aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorshmel1k <shmel1k@ydb.tech>2022-09-02 12:44:59 +0300
committershmel1k <shmel1k@ydb.tech>2022-09-02 12:44:59 +0300
commit90d450f74722da7859d6f510a869f6c6908fd12f (patch)
tree538c718dedc76cdfe37ad6d01ff250dd930d9278
parent01f64c1ecd0d4ffa9e3a74478335f1745f26cc75 (diff)
downloadydb-90d450f74722da7859d6f510a869f6c6908fd12f.tar.gz
[] add metering mode to CLI
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/arrow_to_pandas.cc2299
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/arrow_to_pandas.h124
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/benchmark.cc38
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/benchmark.h36
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/common.cc203
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/common.h360
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/datetime.cc455
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/datetime.h183
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/decimal.cc246
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/decimal.h128
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/deserialize.cc495
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/deserialize.h106
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/extension_type.cc217
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/extension_type.h85
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/filesystem.cc206
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/filesystem.h126
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/helpers.cc436
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/helpers.h156
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/inference.cc660
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/inference.h64
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/init.cc24
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/init.h26
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/io.cc374
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/io.h116
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/ipc.cc67
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/ipc.h52
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/iterators.h155
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/numpy_convert.cc559
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/numpy_convert.h120
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/numpy_internal.h182
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/numpy_interop.h96
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/numpy_to_arrow.cc865
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/numpy_to_arrow.h72
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/platform.h36
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/pyarrow.cc93
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/pyarrow.h87
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/pyarrow_api.h239
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/pyarrow_lib.h82
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/python_to_arrow.cc1041
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/python_to_arrow.h80
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/serialize.cc798
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/serialize.h145
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/type_traits.h350
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/python/visibility.h39
-rw-r--r--contrib/libs/apache/arrow/cpp/src/arrow/util/converter.h411
-rw-r--r--contrib/libs/cblas/COPYING36
-rw-r--r--contrib/libs/cblas/blaswrap.h160
-rw-r--r--contrib/libs/cblas/caxpy.c103
-rw-r--r--contrib/libs/cblas/cblas.h575
-rw-r--r--contrib/libs/cblas/cblas_f77.h701
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_caxpy.c22
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_ccopy.c22
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_cdotc_sub.c23
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_cdotu_sub.c23
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_cgbmv.c165
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_cgemm.c109
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_cgemv.c162
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_cgerc.c84
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_cgeru.c45
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_chbmv.c159
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_chemm.c106
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_chemv.c160
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_cher.c116
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_cher2.c152
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_cher2k.c111
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_cherk.c105
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_chpmv.c160
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_chpr.c115
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_chpr2.c149
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_cscal.c21
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_csscal.c21
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_cswap.c22
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_csymm.c106
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_csyr2k.c108
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_csyrk.c108
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_ctbmv.c158
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_ctbsv.c162
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_ctpmv.c152
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_ctpsv.c157
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_ctrmm.c144
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_ctrmv.c155
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_ctrsm.c155
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_ctrsv.c156
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dasum.c23
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_daxpy.c22
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dcopy.c22
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_ddot.c25
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dgbmv.c81
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dgemm.c109
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dgemv.c78
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dger.c47
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dnrm2.c23
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_drot.c23
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_drotg.c14
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_drotm.c14
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_drotmg.c15
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dsbmv.c77
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dscal.c21
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dsdot.c25
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dspmv.c76
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dspr.c70
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dspr2.c70
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dswap.c22
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dsymm.c106
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dsymv.c76
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dsyr.c71
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dsyr2.c76
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dsyr2k.c109
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dsyrk.c108
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dtbmv.c122
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dtbsv.c122
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dtpmv.c117
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dtpsv.c118
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dtrmm.c148
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dtrmv.c122
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dtrsm.c153
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dtrsv.c121
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dzasum.c23
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_dznrm2.c23
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_globals.c2
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_icamax.c23
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_idamax.c23
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_isamax.c23
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_izamax.c23
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_sasum.c23
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_saxpy.c23
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_scasum.c23
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_scnrm2.c23
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_scopy.c22
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_sdot.c25
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_sdsdot.c25
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_sgbmv.c83
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_sgemm.c110
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_sgemv.c78
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_sger.c46
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_snrm2.c23
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_srot.c22
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_srotg.c14
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_srotm.c22
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_srotmg.c15
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_ssbmv.c76
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_sscal.c21
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_sspmv.c73
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_sspr.c72
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_sspr2.c71
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_sswap.c22
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_ssymm.c108
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_ssymv.c76
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_ssyr.c70
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_ssyr2.c76
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_ssyr2k.c111
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_ssyrk.c110
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_stbmv.c122
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_stbsv.c122
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_stpmv.c118
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_stpsv.c118
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_strmm.c148
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_strmv.c122
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_strsm.c143
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_strsv.c121
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_xerbla.c68
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_zaxpy.c22
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_zcopy.c22
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_zdotc_sub.c24
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_zdotu_sub.c24
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_zdscal.c21
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_zgbmv.c166
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_zgemm.c109
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_zgemv.c164
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_zgerc.c84
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_zgeru.c44
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_zhbmv.c159
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_zhemm.c106
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_zhemv.c160
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_zher.c110
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_zher2.c153
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_zher2k.c110
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_zherk.c105
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_zhpmv.c160
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_zhpr.c115
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_zhpr2.c150
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_zscal.c21
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_zswap.c22
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_zsymm.c106
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_zsyr2k.c108
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_zsyrk.c107
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_ztbmv.c158
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_ztbsv.c162
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_ztpmv.c152
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_ztpsv.c157
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_ztrmm.c149
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_ztrmv.c156
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_ztrsm.c155
-rw-r--r--contrib/libs/cblas/cblas_interface/cblas_ztrsv.c156
-rw-r--r--contrib/libs/cblas/cblas_interface/cdotcsub.c41
-rw-r--r--contrib/libs/cblas/cblas_interface/cdotusub.c41
-rw-r--r--contrib/libs/cblas/cblas_interface/dasumsub.c34
-rw-r--r--contrib/libs/cblas/cblas_interface/ddotsub.c36
-rw-r--r--contrib/libs/cblas/cblas_interface/dnrm2sub.c34
-rw-r--r--contrib/libs/cblas/cblas_interface/dsdotsub.c35
-rw-r--r--contrib/libs/cblas/cblas_interface/dzasumsub.c34
-rw-r--r--contrib/libs/cblas/cblas_interface/dznrm2sub.c34
-rw-r--r--contrib/libs/cblas/cblas_interface/icamaxsub.c34
-rw-r--r--contrib/libs/cblas/cblas_interface/idamaxsub.c34
-rw-r--r--contrib/libs/cblas/cblas_interface/isamaxsub.c34
-rw-r--r--contrib/libs/cblas/cblas_interface/izamaxsub.c34
-rw-r--r--contrib/libs/cblas/cblas_interface/sasumsub.c33
-rw-r--r--contrib/libs/cblas/cblas_interface/scasumsub.c34
-rw-r--r--contrib/libs/cblas/cblas_interface/scnrm2sub.c34
-rw-r--r--contrib/libs/cblas/cblas_interface/sdotsub.c35
-rw-r--r--contrib/libs/cblas/cblas_interface/sdsdotsub.c36
-rw-r--r--contrib/libs/cblas/cblas_interface/snrm2sub.c33
-rw-r--r--contrib/libs/cblas/cblas_interface/zdotcsub.c41
-rw-r--r--contrib/libs/cblas/cblas_interface/zdotusub.c41
-rw-r--r--contrib/libs/cblas/ccopy.c88
-rw-r--r--contrib/libs/cblas/cdotc.c106
-rw-r--r--contrib/libs/cblas/cdotu.c100
-rw-r--r--contrib/libs/cblas/cgbmv.c477
-rw-r--r--contrib/libs/cblas/cgemm.c697
-rw-r--r--contrib/libs/cblas/cgemv.c411
-rw-r--r--contrib/libs/cblas/cgerc.c217
-rw-r--r--contrib/libs/cblas/cgeru.c214
-rw-r--r--contrib/libs/cblas/chbmv.c483
-rw-r--r--contrib/libs/cblas/chemm.c495
-rw-r--r--contrib/libs/cblas/chemv.c433
-rw-r--r--contrib/libs/cblas/cher.c338
-rw-r--r--contrib/libs/cblas/cher2.c446
-rw-r--r--contrib/libs/cblas/cher2k.c671
-rw-r--r--contrib/libs/cblas/cherk.c533
-rw-r--r--contrib/libs/cblas/chpmv.c434
-rw-r--r--contrib/libs/cblas/chpr.c339
-rw-r--r--contrib/libs/cblas/chpr2.c447
-rw-r--r--contrib/libs/cblas/crotg.c72
-rw-r--r--contrib/libs/cblas/cscal.c81
-rw-r--r--contrib/libs/cblas/csrot.c153
-rw-r--r--contrib/libs/cblas/csscal.c88
-rw-r--r--contrib/libs/cblas/cswap.c93
-rw-r--r--contrib/libs/cblas/csymm.c495
-rw-r--r--contrib/libs/cblas/csyr2k.c537
-rw-r--r--contrib/libs/cblas/csyrk.c457
-rw-r--r--contrib/libs/cblas/ctbmv.c641
-rw-r--r--contrib/libs/cblas/ctbsv.c609
-rw-r--r--contrib/libs/cblas/ctpmv.c571
-rw-r--r--contrib/libs/cblas/ctpsv.c539
-rw-r--r--contrib/libs/cblas/ctrmm.c688
-rw-r--r--contrib/libs/cblas/ctrmv.c554
-rw-r--r--contrib/libs/cblas/ctrsm.c698
-rw-r--r--contrib/libs/cblas/ctrsv.c523
-rw-r--r--contrib/libs/cblas/dasum.c101
-rw-r--r--contrib/libs/cblas/daxpy.c107
-rw-r--r--contrib/libs/cblas/dcabs1.c36
-rw-r--r--contrib/libs/cblas/dcopy.c107
-rw-r--r--contrib/libs/cblas/ddot.c110
-rw-r--r--contrib/libs/cblas/dgbmv.c369
-rw-r--r--contrib/libs/cblas/dgemm.c389
-rw-r--r--contrib/libs/cblas/dgemv.c312
-rw-r--r--contrib/libs/cblas/dger.c194
-rw-r--r--contrib/libs/cblas/dnrm2.c95
-rw-r--r--contrib/libs/cblas/drot.c86
-rw-r--r--contrib/libs/cblas/drotg.c79
-rw-r--r--contrib/libs/cblas/drotm.c215
-rw-r--r--contrib/libs/cblas/drotmg.c293
-rw-r--r--contrib/libs/cblas/dsbmv.c364
-rw-r--r--contrib/libs/cblas/dscal.c96
-rw-r--r--contrib/libs/cblas/dsdot.c135
-rw-r--r--contrib/libs/cblas/dspmv.c312
-rw-r--r--contrib/libs/cblas/dspr.c237
-rw-r--r--contrib/libs/cblas/dspr2.c270
-rw-r--r--contrib/libs/cblas/dswap.c114
-rw-r--r--contrib/libs/cblas/dsymm.c362
-rw-r--r--contrib/libs/cblas/dsymv.c313
-rw-r--r--contrib/libs/cblas/dsyr.c238
-rw-r--r--contrib/libs/cblas/dsyr2.c275
-rw-r--r--contrib/libs/cblas/dsyr2k.c407
-rw-r--r--contrib/libs/cblas/dsyrk.c372
-rw-r--r--contrib/libs/cblas/dtbmv.c422
-rw-r--r--contrib/libs/cblas/dtbsv.c426
-rw-r--r--contrib/libs/cblas/dtpmv.c357
-rw-r--r--contrib/libs/cblas/dtpsv.c360
-rw-r--r--contrib/libs/cblas/dtrmm.c453
-rw-r--r--contrib/libs/cblas/dtrmv.c345
-rw-r--r--contrib/libs/cblas/dtrsm.c490
-rw-r--r--contrib/libs/cblas/dtrsv.c348
-rw-r--r--contrib/libs/cblas/dzasum.c80
-rw-r--r--contrib/libs/cblas/dznrm2.c108
-rw-r--r--contrib/libs/cblas/icamax.c93
-rw-r--r--contrib/libs/cblas/idamax.c93
-rw-r--r--contrib/libs/cblas/isamax.c93
-rw-r--r--contrib/libs/cblas/izamax.c93
-rw-r--r--contrib/libs/cblas/lsame.c117
-rw-r--r--contrib/libs/cblas/sasum.c101
-rw-r--r--contrib/libs/cblas/saxpy.c107
-rw-r--r--contrib/libs/cblas/scabs1.c36
-rw-r--r--contrib/libs/cblas/scasum.c87
-rw-r--r--contrib/libs/cblas/scnrm2.c109
-rw-r--r--contrib/libs/cblas/scopy.c107
-rw-r--r--contrib/libs/cblas/sdot.c109
-rw-r--r--contrib/libs/cblas/sdsdot.c144
-rw-r--r--contrib/libs/cblas/sgbmv.c368
-rw-r--r--contrib/libs/cblas/sgemm.c388
-rw-r--r--contrib/libs/cblas/sgemv.c312
-rw-r--r--contrib/libs/cblas/sger.c193
-rw-r--r--contrib/libs/cblas/snrm2.c97
-rw-r--r--contrib/libs/cblas/srot.c90
-rw-r--r--contrib/libs/cblas/srotg.c78
-rw-r--r--contrib/libs/cblas/srotm.c216
-rw-r--r--contrib/libs/cblas/srotmg.c295
-rw-r--r--contrib/libs/cblas/ssbmv.c364
-rw-r--r--contrib/libs/cblas/sscal.c95
-rw-r--r--contrib/libs/cblas/sspmv.c311
-rw-r--r--contrib/libs/cblas/sspr.c237
-rw-r--r--contrib/libs/cblas/sspr2.c269
-rw-r--r--contrib/libs/cblas/sswap.c114
-rw-r--r--contrib/libs/cblas/ssymm.c362
-rw-r--r--contrib/libs/cblas/ssymv.c313
-rw-r--r--contrib/libs/cblas/ssyr.c238
-rw-r--r--contrib/libs/cblas/ssyr2.c274
-rw-r--r--contrib/libs/cblas/ssyr2k.c409
-rw-r--r--contrib/libs/cblas/ssyrk.c372
-rw-r--r--contrib/libs/cblas/stbmv.c422
-rw-r--r--contrib/libs/cblas/stbsv.c426
-rw-r--r--contrib/libs/cblas/stpmv.c357
-rw-r--r--contrib/libs/cblas/stpsv.c360
-rw-r--r--contrib/libs/cblas/strmm.c453
-rw-r--r--contrib/libs/cblas/strmv.c345
-rw-r--r--contrib/libs/cblas/strsm.c490
-rw-r--r--contrib/libs/cblas/strsv.c348
-rw-r--r--contrib/libs/cblas/xerbla.c77
-rw-r--r--contrib/libs/cblas/xerbla_array.c102
-rw-r--r--contrib/libs/cblas/zaxpy.c99
-rw-r--r--contrib/libs/cblas/zcopy.c85
-rw-r--r--contrib/libs/cblas/zdotc.c105
-rw-r--r--contrib/libs/cblas/zdotu.c100
-rw-r--r--contrib/libs/cblas/zdrot.c153
-rw-r--r--contrib/libs/cblas/zdscal.c85
-rw-r--r--contrib/libs/cblas/zgbmv.c478
-rw-r--r--contrib/libs/cblas/zgemm.c698
-rw-r--r--contrib/libs/cblas/zgemv.c412
-rw-r--r--contrib/libs/cblas/zgerc.c218
-rw-r--r--contrib/libs/cblas/zgeru.c215
-rw-r--r--contrib/libs/cblas/zhbmv.c483
-rw-r--r--contrib/libs/cblas/zhemm.c496
-rw-r--r--contrib/libs/cblas/zhemv.c433
-rw-r--r--contrib/libs/cblas/zher.c338
-rw-r--r--contrib/libs/cblas/zher2.c447
-rw-r--r--contrib/libs/cblas/zher2k.c671
-rw-r--r--contrib/libs/cblas/zherk.c533
-rw-r--r--contrib/libs/cblas/zhpmv.c434
-rw-r--r--contrib/libs/cblas/zhpr.c339
-rw-r--r--contrib/libs/cblas/zhpr2.c448
-rw-r--r--contrib/libs/cblas/zrotg.c77
-rw-r--r--contrib/libs/cblas/zscal.c81
-rw-r--r--contrib/libs/cblas/zswap.c93
-rw-r--r--contrib/libs/cblas/zsymm.c496
-rw-r--r--contrib/libs/cblas/zsyr2k.c538
-rw-r--r--contrib/libs/cblas/zsyrk.c457
-rw-r--r--contrib/libs/cblas/ztbmv.c642
-rw-r--r--contrib/libs/cblas/ztbsv.c611
-rw-r--r--contrib/libs/cblas/ztpmv.c571
-rw-r--r--contrib/libs/cblas/ztpsv.c540
-rw-r--r--contrib/libs/cblas/ztrmm.c688
-rw-r--r--contrib/libs/cblas/ztrmv.c554
-rw-r--r--contrib/libs/cblas/ztrsm.c699
-rw-r--r--contrib/libs/cblas/ztrsv.c524
-rw-r--r--contrib/libs/clapack/COPYING36
-rw-r--r--contrib/libs/clapack/cbdsqr.c912
-rw-r--r--contrib/libs/clapack/cgbbrd.c649
-rw-r--r--contrib/libs/clapack/cgbcon.c307
-rw-r--r--contrib/libs/clapack/cgbequ.c329
-rw-r--r--contrib/libs/clapack/cgbequb.c353
-rw-r--r--contrib/libs/clapack/cgbrfs.c492
-rw-r--r--contrib/libs/clapack/cgbsv.c176
-rw-r--r--contrib/libs/clapack/cgbsvx.c675
-rw-r--r--contrib/libs/clapack/cgbtf2.c267
-rw-r--r--contrib/libs/clapack/cgbtrf.c604
-rw-r--r--contrib/libs/clapack/cgbtrs.c281
-rw-r--r--contrib/libs/clapack/cgebak.c236
-rw-r--r--contrib/libs/clapack/cgebal.c414
-rw-r--r--contrib/libs/clapack/cgebd2.c345
-rw-r--r--contrib/libs/clapack/cgebrd.c348
-rw-r--r--contrib/libs/clapack/cgecon.c233
-rw-r--r--contrib/libs/clapack/cgeequ.c306
-rw-r--r--contrib/libs/clapack/cgeequb.c331
-rw-r--r--contrib/libs/clapack/cgees.c404
-rw-r--r--contrib/libs/clapack/cgeesx.c472
-rw-r--r--contrib/libs/clapack/cgeev.c529
-rw-r--r--contrib/libs/clapack/cgeevx.c680
-rw-r--r--contrib/libs/clapack/cgegs.c536
-rw-r--r--contrib/libs/clapack/cgegv.c779
-rw-r--r--contrib/libs/clapack/cgehd2.c198
-rw-r--r--contrib/libs/clapack/cgehrd.c350
-rw-r--r--contrib/libs/clapack/cgelq2.c165
-rw-r--r--contrib/libs/clapack/cgelqf.c252
-rw-r--r--contrib/libs/clapack/cgels.c520
-rw-r--r--contrib/libs/clapack/cgelsd.c717
-rw-r--r--contrib/libs/clapack/cgelss.c822
-rw-r--r--contrib/libs/clapack/cgelsx.c468
-rw-r--r--contrib/libs/clapack/cgelsy.c512
-rw-r--r--contrib/libs/clapack/cgeql2.c167
-rw-r--r--contrib/libs/clapack/cgeqlf.c271
-rw-r--r--contrib/libs/clapack/cgeqp3.c361
-rw-r--r--contrib/libs/clapack/cgeqpf.c315
-rw-r--r--contrib/libs/clapack/cgeqr2.c169
-rw-r--r--contrib/libs/clapack/cgeqrf.c253
-rw-r--r--contrib/libs/clapack/cgerfs.c460
-rw-r--r--contrib/libs/clapack/cgerq2.c162
-rw-r--r--contrib/libs/clapack/cgerqf.c270
-rw-r--r--contrib/libs/clapack/cgesc2.c206
-rw-r--r--contrib/libs/clapack/cgesdd.c2240
-rw-r--r--contrib/libs/clapack/cgesv.c138
-rw-r--r--contrib/libs/clapack/cgesvd.c4164
-rw-r--r--contrib/libs/clapack/cgesvx.c605
-rw-r--r--contrib/libs/clapack/cgetc2.c208
-rw-r--r--contrib/libs/clapack/cgetf2.c202
-rw-r--r--contrib/libs/clapack/cgetrf.c220
-rw-r--r--contrib/libs/clapack/cgetri.c271
-rw-r--r--contrib/libs/clapack/cgetrs.c186
-rw-r--r--contrib/libs/clapack/cggbak.c274
-rw-r--r--contrib/libs/clapack/cggbal.c652
-rw-r--r--contrib/libs/clapack/cgges.c596
-rw-r--r--contrib/libs/clapack/cggesx.c701
-rw-r--r--contrib/libs/clapack/cggev.c592
-rw-r--r--contrib/libs/clapack/cggevx.c802
-rw-r--r--contrib/libs/clapack/cggglm.c334
-rw-r--r--contrib/libs/clapack/cgghrd.c336
-rw-r--r--contrib/libs/clapack/cgglse.c342
-rw-r--r--contrib/libs/clapack/cggqrf.c268
-rw-r--r--contrib/libs/clapack/cggrqf.c269
-rw-r--r--contrib/libs/clapack/cggsvd.c403
-rw-r--r--contrib/libs/clapack/cggsvp.c530
-rw-r--r--contrib/libs/clapack/cgtcon.c207
-rw-r--r--contrib/libs/clapack/cgtrfs.c553
-rw-r--r--contrib/libs/clapack/cgtsv.c287
-rw-r--r--contrib/libs/clapack/cgtsvx.c347
-rw-r--r--contrib/libs/clapack/cgttrf.c274
-rw-r--r--contrib/libs/clapack/cgttrs.c192
-rw-r--r--contrib/libs/clapack/cgtts2.c583
-rw-r--r--contrib/libs/clapack/chbev.c270
-rw-r--r--contrib/libs/clapack/chbevd.c377
-rw-r--r--contrib/libs/clapack/chbevx.c524
-rw-r--r--contrib/libs/clapack/chbgst.c2146
-rw-r--r--contrib/libs/clapack/chbgv.c235
-rw-r--r--contrib/libs/clapack/chbgvd.c355
-rw-r--r--contrib/libs/clapack/chbgvx.c472
-rw-r--r--contrib/libs/clapack/chbtrd.c808
-rw-r--r--contrib/libs/clapack/checon.c201
-rw-r--r--contrib/libs/clapack/cheequb.c440
-rw-r--r--contrib/libs/clapack/cheev.c284
-rw-r--r--contrib/libs/clapack/cheevd.c377
-rw-r--r--contrib/libs/clapack/cheevr.c687
-rw-r--r--contrib/libs/clapack/cheevx.c542
-rw-r--r--contrib/libs/clapack/chegs2.c334
-rw-r--r--contrib/libs/clapack/chegst.c350
-rw-r--r--contrib/libs/clapack/chegv.c286
-rw-r--r--contrib/libs/clapack/chegvd.c364
-rw-r--r--contrib/libs/clapack/chegvx.c394
-rw-r--r--contrib/libs/clapack/cherfs.c472
-rw-r--r--contrib/libs/clapack/chesv.c214
-rw-r--r--contrib/libs/clapack/chesvx.c368
-rw-r--r--contrib/libs/clapack/chetd2.c358
-rw-r--r--contrib/libs/clapack/chetf2.c802
-rw-r--r--contrib/libs/clapack/chetrd.c369
-rw-r--r--contrib/libs/clapack/chetrf.c334
-rw-r--r--contrib/libs/clapack/chetri.c510
-rw-r--r--contrib/libs/clapack/chetrs.c528
-rw-r--r--contrib/libs/clapack/chfrk.c530
-rw-r--r--contrib/libs/clapack/chgeqz.c1143
-rw-r--r--contrib/libs/clapack/chla_transtype.c62
-rw-r--r--contrib/libs/clapack/chpcon.c195
-rw-r--r--contrib/libs/clapack/chpev.c249
-rw-r--r--contrib/libs/clapack/chpevd.c346
-rw-r--r--contrib/libs/clapack/chpevx.c471
-rw-r--r--contrib/libs/clapack/chpgst.c312
-rw-r--r--contrib/libs/clapack/chpgv.c244
-rw-r--r--contrib/libs/clapack/chpgvd.c356
-rw-r--r--contrib/libs/clapack/chpgvx.c343
-rw-r--r--contrib/libs/clapack/chprfs.c462
-rw-r--r--contrib/libs/clapack/chpsv.c176
-rw-r--r--contrib/libs/clapack/chpsvx.c320
-rw-r--r--contrib/libs/clapack/chptrd.c318
-rw-r--r--contrib/libs/clapack/chptrf.c821
-rw-r--r--contrib/libs/clapack/chptri.c512
-rw-r--r--contrib/libs/clapack/chptrs.c530
-rw-r--r--contrib/libs/clapack/chsein.c432
-rw-r--r--contrib/libs/clapack/chseqr.c480
-rw-r--r--contrib/libs/clapack/clabrd.c500
-rw-r--r--contrib/libs/clapack/clacgv.c95
-rw-r--r--contrib/libs/clapack/clacn2.c283
-rw-r--r--contrib/libs/clapack/clacon.c275
-rw-r--r--contrib/libs/clapack/clacp2.c134
-rw-r--r--contrib/libs/clapack/clacpy.c134
-rw-r--r--contrib/libs/clapack/clacrm.c176
-rw-r--r--contrib/libs/clapack/clacrt.c155
-rw-r--r--contrib/libs/clapack/cladiv.c74
-rw-r--r--contrib/libs/clapack/claed0.c367
-rw-r--r--contrib/libs/clapack/claed7.c325
-rw-r--r--contrib/libs/clapack/claed8.c436
-rw-r--r--contrib/libs/clapack/claein.c392
-rw-r--r--contrib/libs/clapack/claesy.c206
-rw-r--r--contrib/libs/clapack/claev2.c123
-rw-r--r--contrib/libs/clapack/clag2z.c101
-rw-r--r--contrib/libs/clapack/clags2.c465
-rw-r--r--contrib/libs/clapack/clagtm.c598
-rw-r--r--contrib/libs/clapack/clahef.c933
-rw-r--r--contrib/libs/clapack/clahqr.c754
-rw-r--r--contrib/libs/clapack/clahr2.c329
-rw-r--r--contrib/libs/clapack/clahrd.c298
-rw-r--r--contrib/libs/clapack/claic1.c448
-rw-r--r--contrib/libs/clapack/clals0.c558
-rw-r--r--contrib/libs/clapack/clalsa.c663
-rw-r--r--contrib/libs/clapack/clalsd.c755
-rw-r--r--contrib/libs/clapack/clangb.c224
-rw-r--r--contrib/libs/clapack/clange.c199
-rw-r--r--contrib/libs/clapack/clangt.c195
-rw-r--r--contrib/libs/clapack/clanhb.c291
-rw-r--r--contrib/libs/clapack/clanhe.c265
-rw-r--r--contrib/libs/clapack/clanhf.c1803
-rw-r--r--contrib/libs/clapack/clanhp.c277
-rw-r--r--contrib/libs/clapack/clanhs.c205
-rw-r--r--contrib/libs/clapack/clanht.c166
-rw-r--r--contrib/libs/clapack/clansb.c261
-rw-r--r--contrib/libs/clapack/clansp.c278
-rw-r--r--contrib/libs/clapack/clansy.c237
-rw-r--r--contrib/libs/clapack/clantb.c426
-rw-r--r--contrib/libs/clapack/clantp.c391
-rw-r--r--contrib/libs/clapack/clantr.c394
-rw-r--r--contrib/libs/clapack/clapll.c143
-rw-r--r--contrib/libs/clapack/clapmt.c185
-rw-r--r--contrib/libs/clapack/claqgb.c227
-rw-r--r--contrib/libs/clapack/claqge.c202
-rw-r--r--contrib/libs/clapack/claqhb.c200
-rw-r--r--contrib/libs/clapack/claqhe.c192
-rw-r--r--contrib/libs/clapack/claqhp.c189
-rw-r--r--contrib/libs/clapack/claqp2.c244
-rw-r--r--contrib/libs/clapack/claqps.c367
-rw-r--r--contrib/libs/clapack/claqr0.c784
-rw-r--r--contrib/libs/clapack/claqr1.c197
-rw-r--r--contrib/libs/clapack/claqr2.c603
-rw-r--r--contrib/libs/clapack/claqr3.c620
-rw-r--r--contrib/libs/clapack/claqr4.c782
-rw-r--r--contrib/libs/clapack/claqr5.c1345
-rw-r--r--contrib/libs/clapack/claqsb.c192
-rw-r--r--contrib/libs/clapack/claqsp.c179
-rw-r--r--contrib/libs/clapack/claqsy.c182
-rw-r--r--contrib/libs/clapack/clar1v.c500
-rw-r--r--contrib/libs/clapack/clar2v.c159
-rw-r--r--contrib/libs/clapack/clarcm.c176
-rw-r--r--contrib/libs/clapack/clarf.c198
-rw-r--r--contrib/libs/clapack/clarfb.c837
-rw-r--r--contrib/libs/clapack/clarfg.c190
-rw-r--r--contrib/libs/clapack/clarfp.c234
-rw-r--r--contrib/libs/clapack/clarft.c361
-rw-r--r--contrib/libs/clapack/clarfx.c2048
-rw-r--r--contrib/libs/clapack/clargv.c335
-rw-r--r--contrib/libs/clapack/clarnv.c190
-rw-r--r--contrib/libs/clapack/clarrv.c1015
-rw-r--r--contrib/libs/clapack/clartg.c284
-rw-r--r--contrib/libs/clapack/clartv.c125
-rw-r--r--contrib/libs/clapack/clarz.c198
-rw-r--r--contrib/libs/clapack/clarzb.c323
-rw-r--r--contrib/libs/clapack/clarzt.c236
-rw-r--r--contrib/libs/clapack/clascl.c377
-rw-r--r--contrib/libs/clapack/claset.c162
-rw-r--r--contrib/libs/clapack/clasr.c609
-rw-r--r--contrib/libs/clapack/classq.c138
-rw-r--r--contrib/libs/clapack/claswp.c166
-rw-r--r--contrib/libs/clapack/clasyf.c829
-rw-r--r--contrib/libs/clapack/clatbs.c1193
-rw-r--r--contrib/libs/clapack/clatdf.c357
-rw-r--r--contrib/libs/clapack/clatps.c1161
-rw-r--r--contrib/libs/clapack/clatrd.c418
-rw-r--r--contrib/libs/clapack/clatrs.c1147
-rw-r--r--contrib/libs/clapack/clatrz.c180
-rw-r--r--contrib/libs/clapack/clatzm.c196
-rw-r--r--contrib/libs/clapack/clauu2.c203
-rw-r--r--contrib/libs/clapack/clauum.c217
-rw-r--r--contrib/libs/clapack/cpbcon.c238
-rw-r--r--contrib/libs/clapack/cpbequ.c204
-rw-r--r--contrib/libs/clapack/cpbrfs.c482
-rw-r--r--contrib/libs/clapack/cpbstf.c334
-rw-r--r--contrib/libs/clapack/cpbsv.c182
-rw-r--r--contrib/libs/clapack/cpbsvx.c523
-rw-r--r--contrib/libs/clapack/cpbtf2.c255
-rw-r--r--contrib/libs/clapack/cpbtrf.c489
-rw-r--r--contrib/libs/clapack/cpbtrs.c184
-rw-r--r--contrib/libs/clapack/cpftrf.c475
-rw-r--r--contrib/libs/clapack/cpftri.c425
-rw-r--r--contrib/libs/clapack/cpftrs.c260
-rw-r--r--contrib/libs/clapack/cpocon.c224
-rw-r--r--contrib/libs/clapack/cpoequ.c176
-rw-r--r--contrib/libs/clapack/cpoequb.c195
-rw-r--r--contrib/libs/clapack/cporfs.c465
-rw-r--r--contrib/libs/clapack/cposv.c151
-rw-r--r--contrib/libs/clapack/cposvx.c458
-rw-r--r--contrib/libs/clapack/cpotf2.c245
-rw-r--r--contrib/libs/clapack/cpotrf.c248
-rw-r--r--contrib/libs/clapack/cpotri.c125
-rw-r--r--contrib/libs/clapack/cpotrs.c165
-rw-r--r--contrib/libs/clapack/cppcon.c222
-rw-r--r--contrib/libs/clapack/cppequ.c210
-rw-r--r--contrib/libs/clapack/cpprfs.c457
-rw-r--r--contrib/libs/clapack/cppsv.c160
-rw-r--r--contrib/libs/clapack/cppsvx.c461
-rw-r--r--contrib/libs/clapack/cpptrf.c234
-rw-r--r--contrib/libs/clapack/cpptri.c180
-rw-r--r--contrib/libs/clapack/cpptrs.c170
-rw-r--r--contrib/libs/clapack/cpstf2.c442
-rw-r--r--contrib/libs/clapack/cpstrf.c521
-rw-r--r--contrib/libs/clapack/cptcon.c186
-rw-r--r--contrib/libs/clapack/cpteqr.c241
-rw-r--r--contrib/libs/clapack/cptrfs.c574
-rw-r--r--contrib/libs/clapack/cptsv.c129
-rw-r--r--contrib/libs/clapack/cptsvx.c285
-rw-r--r--contrib/libs/clapack/cpttrf.c215
-rw-r--r--contrib/libs/clapack/cpttrs.c176
-rw-r--r--contrib/libs/clapack/cptts2.c315
-rw-r--r--contrib/libs/clapack/crot.c155
-rw-r--r--contrib/libs/clapack/cspcon.c195
-rw-r--r--contrib/libs/clapack/cspmv.c428
-rw-r--r--contrib/libs/clapack/cspr.c339
-rw-r--r--contrib/libs/clapack/csprfs.c464
-rw-r--r--contrib/libs/clapack/cspsv.c176
-rw-r--r--contrib/libs/clapack/cspsvx.c323
-rw-r--r--contrib/libs/clapack/csptrf.c763
-rw-r--r--contrib/libs/clapack/csptri.c508
-rw-r--r--contrib/libs/clapack/csptrs.c502
-rw-r--r--contrib/libs/clapack/csrscl.c134
-rw-r--r--contrib/libs/clapack/cstedc.c496
-rw-r--r--contrib/libs/clapack/cstegr.c210
-rw-r--r--contrib/libs/clapack/cstein.c468
-rw-r--r--contrib/libs/clapack/cstemr.c749
-rw-r--r--contrib/libs/clapack/csteqr.c620
-rw-r--r--contrib/libs/clapack/csycon.c201
-rw-r--r--contrib/libs/clapack/csyequb.c451
-rw-r--r--contrib/libs/clapack/csymv.c429
-rw-r--r--contrib/libs/clapack/csyr.c289
-rw-r--r--contrib/libs/clapack/csyrfs.c473
-rw-r--r--contrib/libs/clapack/csysv.c214
-rw-r--r--contrib/libs/clapack/csysvx.c368
-rw-r--r--contrib/libs/clapack/csytf2.c727
-rw-r--r--contrib/libs/clapack/csytrf.c340
-rw-r--r--contrib/libs/clapack/csytri.c489
-rw-r--r--contrib/libs/clapack/csytrs.c502
-rw-r--r--contrib/libs/clapack/ctbcon.c255
-rw-r--r--contrib/libs/clapack/ctbrfs.c584
-rw-r--r--contrib/libs/clapack/ctbtrs.c206
-rw-r--r--contrib/libs/clapack/ctfsm.c1024
-rw-r--r--contrib/libs/clapack/ctftri.c500
-rw-r--r--contrib/libs/clapack/ctfttp.c576
-rw-r--r--contrib/libs/clapack/ctfttr.c580
-rw-r--r--contrib/libs/clapack/ctgevc.c971
-rw-r--r--contrib/libs/clapack/ctgex2.c373
-rw-r--r--contrib/libs/clapack/ctgexc.c248
-rw-r--r--contrib/libs/clapack/ctgsen.c762
-rw-r--r--contrib/libs/clapack/ctgsja.c671
-rw-r--r--contrib/libs/clapack/ctgsna.c484
-rw-r--r--contrib/libs/clapack/ctgsy2.c477
-rw-r--r--contrib/libs/clapack/ctgsyl.c689
-rw-r--r--contrib/libs/clapack/ctpcon.c240
-rw-r--r--contrib/libs/clapack/ctprfs.c565
-rw-r--r--contrib/libs/clapack/ctptri.c236
-rw-r--r--contrib/libs/clapack/ctptrs.c194
-rw-r--r--contrib/libs/clapack/ctpttf.c573
-rw-r--r--contrib/libs/clapack/ctpttr.c148
-rw-r--r--contrib/libs/clapack/ctrcon.c249
-rw-r--r--contrib/libs/clapack/ctrevc.c532
-rw-r--r--contrib/libs/clapack/ctrexc.c215
-rw-r--r--contrib/libs/clapack/ctrrfs.c562
-rw-r--r--contrib/libs/clapack/ctrsen.c422
-rw-r--r--contrib/libs/clapack/ctrsna.c445
-rw-r--r--contrib/libs/clapack/ctrsyl.c544
-rw-r--r--contrib/libs/clapack/ctrti2.c198
-rw-r--r--contrib/libs/clapack/ctrtri.c244
-rw-r--r--contrib/libs/clapack/ctrtrs.c184
-rw-r--r--contrib/libs/clapack/ctrttf.c580
-rw-r--r--contrib/libs/clapack/ctrttp.c148
-rw-r--r--contrib/libs/clapack/ctzrqf.c241
-rw-r--r--contrib/libs/clapack/ctzrzf.c310
-rw-r--r--contrib/libs/clapack/cung2l.c182
-rw-r--r--contrib/libs/clapack/cung2r.c184
-rw-r--r--contrib/libs/clapack/cungbr.c309
-rw-r--r--contrib/libs/clapack/cunghr.c223
-rw-r--r--contrib/libs/clapack/cungl2.c193
-rw-r--r--contrib/libs/clapack/cunglq.c284
-rw-r--r--contrib/libs/clapack/cungql.c293
-rw-r--r--contrib/libs/clapack/cungqr.c285
-rw-r--r--contrib/libs/clapack/cungr2.c192
-rw-r--r--contrib/libs/clapack/cungrq.c293
-rw-r--r--contrib/libs/clapack/cungtr.c260
-rw-r--r--contrib/libs/clapack/cunm2l.c245
-rw-r--r--contrib/libs/clapack/cunm2r.c249
-rw-r--r--contrib/libs/clapack/cunmbr.c373
-rw-r--r--contrib/libs/clapack/cunmhr.c257
-rw-r--r--contrib/libs/clapack/cunml2.c254
-rw-r--r--contrib/libs/clapack/cunmlq.c335
-rw-r--r--contrib/libs/clapack/cunmql.c328
-rw-r--r--contrib/libs/clapack/cunmqr.c328
-rw-r--r--contrib/libs/clapack/cunmr2.c246
-rw-r--r--contrib/libs/clapack/cunmr3.c253
-rw-r--r--contrib/libs/clapack/cunmrq.c336
-rw-r--r--contrib/libs/clapack/cunmrz.c370
-rw-r--r--contrib/libs/clapack/cunmtr.c294
-rw-r--r--contrib/libs/clapack/cupgtr.c219
-rw-r--r--contrib/libs/clapack/cupmtr.c320
-rw-r--r--contrib/libs/clapack/dbdsdc.c514
-rw-r--r--contrib/libs/clapack/dbdsqr.c918
-rw-r--r--contrib/libs/clapack/ddisna.c227
-rw-r--r--contrib/libs/clapack/dgbbrd.c566
-rw-r--r--contrib/libs/clapack/dgbcon.c284
-rw-r--r--contrib/libs/clapack/dgbequ.c320
-rw-r--r--contrib/libs/clapack/dgbequb.c347
-rw-r--r--contrib/libs/clapack/dgbrfs.c455
-rw-r--r--contrib/libs/clapack/dgbsv.c176
-rw-r--r--contrib/libs/clapack/dgbsvx.c650
-rw-r--r--contrib/libs/clapack/dgbtf2.c262
-rw-r--r--contrib/libs/clapack/dgbtrf.c588
-rw-r--r--contrib/libs/clapack/dgbtrs.c244
-rw-r--r--contrib/libs/clapack/dgebak.c237
-rw-r--r--contrib/libs/clapack/dgebal.c402
-rw-r--r--contrib/libs/clapack/dgebd2.c304
-rw-r--r--contrib/libs/clapack/dgebrd.c336
-rw-r--r--contrib/libs/clapack/dgecon.c226
-rw-r--r--contrib/libs/clapack/dgeequ.c296
-rw-r--r--contrib/libs/clapack/dgeequb.c324
-rw-r--r--contrib/libs/clapack/dgees.c549
-rw-r--r--contrib/libs/clapack/dgeesx.c649
-rw-r--r--contrib/libs/clapack/dgeev.c566
-rw-r--r--contrib/libs/clapack/dgeevx.c703
-rw-r--r--contrib/libs/clapack/dgegs.c548
-rw-r--r--contrib/libs/clapack/dgegv.c842
-rw-r--r--contrib/libs/clapack/dgehd2.c191
-rw-r--r--contrib/libs/clapack/dgehrd.c342
-rw-r--r--contrib/libs/clapack/dgejsv.c2218
-rw-r--r--contrib/libs/clapack/dgelq2.c157
-rw-r--r--contrib/libs/clapack/dgelqf.c251
-rw-r--r--contrib/libs/clapack/dgels.c515
-rw-r--r--contrib/libs/clapack/dgelsd.c693
-rw-r--r--contrib/libs/clapack/dgelss.c828
-rw-r--r--contrib/libs/clapack/dgelsx.c438
-rw-r--r--contrib/libs/clapack/dgelsy.c495
-rw-r--r--contrib/libs/clapack/dgeql2.c159
-rw-r--r--contrib/libs/clapack/dgeqlf.c270
-rw-r--r--contrib/libs/clapack/dgeqp3.c358
-rw-r--r--contrib/libs/clapack/dgeqpf.c304
-rw-r--r--contrib/libs/clapack/dgeqr2.c161
-rw-r--r--contrib/libs/clapack/dgeqrf.c252
-rw-r--r--contrib/libs/clapack/dgerfs.c424
-rw-r--r--contrib/libs/clapack/dgerq2.c155
-rw-r--r--contrib/libs/clapack/dgerqf.c269
-rw-r--r--contrib/libs/clapack/dgesc2.c176
-rw-r--r--contrib/libs/clapack/dgesdd.c1609
-rw-r--r--contrib/libs/clapack/dgesv.c138
-rw-r--r--contrib/libs/clapack/dgesvd.c4050
-rw-r--r--contrib/libs/clapack/dgesvj.c1796
-rw-r--r--contrib/libs/clapack/dgesvx.c587
-rw-r--r--contrib/libs/clapack/dgetc2.c199
-rw-r--r--contrib/libs/clapack/dgetf2.c193
-rw-r--r--contrib/libs/clapack/dgetrf.c219
-rw-r--r--contrib/libs/clapack/dgetri.c264
-rw-r--r--contrib/libs/clapack/dgetrs.c186
-rw-r--r--contrib/libs/clapack/dggbak.c276
-rw-r--r--contrib/libs/clapack/dggbal.c627
-rw-r--r--contrib/libs/clapack/dgges.c692
-rw-r--r--contrib/libs/clapack/dggesx.c818
-rw-r--r--contrib/libs/clapack/dggev.c641
-rw-r--r--contrib/libs/clapack/dggevx.c885
-rw-r--r--contrib/libs/clapack/dggglm.c331
-rw-r--r--contrib/libs/clapack/dgghrd.c329
-rw-r--r--contrib/libs/clapack/dgglse.c340
-rw-r--r--contrib/libs/clapack/dggqrf.c267
-rw-r--r--contrib/libs/clapack/dggrqf.c268
-rw-r--r--contrib/libs/clapack/dggsvd.c405
-rw-r--r--contrib/libs/clapack/dggsvp.c512
-rw-r--r--contrib/libs/clapack/dgsvj0.c1159
-rw-r--r--contrib/libs/clapack/dgsvj1.c798
-rw-r--r--contrib/libs/clapack/dgtcon.c209
-rw-r--r--contrib/libs/clapack/dgtrfs.c451
-rw-r--r--contrib/libs/clapack/dgtsv.c315
-rw-r--r--contrib/libs/clapack/dgtsvx.c349
-rw-r--r--contrib/libs/clapack/dgttrf.c203
-rw-r--r--contrib/libs/clapack/dgttrs.c189
-rw-r--r--contrib/libs/clapack/dgtts2.c261
-rw-r--r--contrib/libs/clapack/dhgeqz.c1498
-rw-r--r--contrib/libs/clapack/dhsein.c491
-rw-r--r--contrib/libs/clapack/dhseqr.c487
-rw-r--r--contrib/libs/clapack/disnan.c52
-rw-r--r--contrib/libs/clapack/dlabad.c72
-rw-r--r--contrib/libs/clapack/dlabrd.c434
-rw-r--r--contrib/libs/clapack/dlacn2.c267
-rw-r--r--contrib/libs/clapack/dlacon.c258
-rw-r--r--contrib/libs/clapack/dlacpy.c125
-rw-r--r--contrib/libs/clapack/dladiv.c78
-rw-r--r--contrib/libs/clapack/dlae2.c142
-rw-r--r--contrib/libs/clapack/dlaebz.c640
-rw-r--r--contrib/libs/clapack/dlaed0.c440
-rw-r--r--contrib/libs/clapack/dlaed1.c249
-rw-r--r--contrib/libs/clapack/dlaed2.c532
-rw-r--r--contrib/libs/clapack/dlaed3.c338
-rw-r--r--contrib/libs/clapack/dlaed4.c954
-rw-r--r--contrib/libs/clapack/dlaed5.c148
-rw-r--r--contrib/libs/clapack/dlaed6.c374
-rw-r--r--contrib/libs/clapack/dlaed7.c354
-rw-r--r--contrib/libs/clapack/dlaed8.c475
-rw-r--r--contrib/libs/clapack/dlaed9.c274
-rw-r--r--contrib/libs/clapack/dlaeda.c287
-rw-r--r--contrib/libs/clapack/dlaein.c677
-rw-r--r--contrib/libs/clapack/dlaev2.c188
-rw-r--r--contrib/libs/clapack/dlaexc.c459
-rw-r--r--contrib/libs/clapack/dlag2.c356
-rw-r--r--contrib/libs/clapack/dlag2s.c115
-rw-r--r--contrib/libs/clapack/dlags2.c292
-rw-r--r--contrib/libs/clapack/dlagtf.c224
-rw-r--r--contrib/libs/clapack/dlagtm.c254
-rw-r--r--contrib/libs/clapack/dlagts.c351
-rw-r--r--contrib/libs/clapack/dlagv2.c351
-rw-r--r--contrib/libs/clapack/dlahqr.c631
-rw-r--r--contrib/libs/clapack/dlahr2.c315
-rw-r--r--contrib/libs/clapack/dlahrd.c285
-rw-r--r--contrib/libs/clapack/dlaic1.c326
-rw-r--r--contrib/libs/clapack/dlaisnan.c58
-rw-r--r--contrib/libs/clapack/dlaln2.c575
-rw-r--r--contrib/libs/clapack/dlals0.c473
-rw-r--r--contrib/libs/clapack/dlalsa.c456
-rw-r--r--contrib/libs/clapack/dlalsd.c529
-rw-r--r--contrib/libs/clapack/dlamch.c1001
-rw-r--r--contrib/libs/clapack/dlamrg.c131
-rw-r--r--contrib/libs/clapack/dlaneg.c218
-rw-r--r--contrib/libs/clapack/dlangb.c226
-rw-r--r--contrib/libs/clapack/dlange.c199
-rw-r--r--contrib/libs/clapack/dlangt.c195
-rw-r--r--contrib/libs/clapack/dlanhs.c205
-rw-r--r--contrib/libs/clapack/dlansb.c263
-rw-r--r--contrib/libs/clapack/dlansf.c1012
-rw-r--r--contrib/libs/clapack/dlansp.c263
-rw-r--r--contrib/libs/clapack/dlanst.c166
-rw-r--r--contrib/libs/clapack/dlansy.c239
-rw-r--r--contrib/libs/clapack/dlantb.c434
-rw-r--r--contrib/libs/clapack/dlantp.c391
-rw-r--r--contrib/libs/clapack/dlantr.c398
-rw-r--r--contrib/libs/clapack/dlanv2.c235
-rw-r--r--contrib/libs/clapack/dlapll.c127
-rw-r--r--contrib/libs/clapack/dlapmt.c178
-rw-r--r--contrib/libs/clapack/dlapy2.c73
-rw-r--r--contrib/libs/clapack/dlapy3.c83
-rw-r--r--contrib/libs/clapack/dlaqgb.c216
-rw-r--r--contrib/libs/clapack/dlaqge.c188
-rw-r--r--contrib/libs/clapack/dlaqp2.c237
-rw-r--r--contrib/libs/clapack/dlaqps.c345
-rw-r--r--contrib/libs/clapack/dlaqr0.c758
-rw-r--r--contrib/libs/clapack/dlaqr1.c127
-rw-r--r--contrib/libs/clapack/dlaqr2.c698
-rw-r--r--contrib/libs/clapack/dlaqr3.c715
-rw-r--r--contrib/libs/clapack/dlaqr4.c754
-rw-r--r--contrib/libs/clapack/dlaqr5.c1025
-rw-r--r--contrib/libs/clapack/dlaqsb.c185
-rw-r--r--contrib/libs/clapack/dlaqsp.c169
-rw-r--r--contrib/libs/clapack/dlaqsy.c172
-rw-r--r--contrib/libs/clapack/dlaqtr.c832
-rw-r--r--contrib/libs/clapack/dlar1v.c441
-rw-r--r--contrib/libs/clapack/dlar2v.c121
-rw-r--r--contrib/libs/clapack/dlarf.c193
-rw-r--r--contrib/libs/clapack/dlarfb.c774
-rw-r--r--contrib/libs/clapack/dlarfg.c170
-rw-r--r--contrib/libs/clapack/dlarfp.c192
-rw-r--r--contrib/libs/clapack/dlarft.c325
-rw-r--r--contrib/libs/clapack/dlarfx.c730
-rw-r--r--contrib/libs/clapack/dlargv.c130
-rw-r--r--contrib/libs/clapack/dlarnv.c146
-rw-r--r--contrib/libs/clapack/dlarra.c156
-rw-r--r--contrib/libs/clapack/dlarrb.c350
-rw-r--r--contrib/libs/clapack/dlarrc.c183
-rw-r--r--contrib/libs/clapack/dlarrd.c793
-rw-r--r--contrib/libs/clapack/dlarre.c861
-rw-r--r--contrib/libs/clapack/dlarrf.c423
-rw-r--r--contrib/libs/clapack/dlarrj.c338
-rw-r--r--contrib/libs/clapack/dlarrk.c193
-rw-r--r--contrib/libs/clapack/dlarrr.c176
-rw-r--r--contrib/libs/clapack/dlarrv.c988
-rw-r--r--contrib/libs/clapack/dlartg.c190
-rw-r--r--contrib/libs/clapack/dlartv.c106
-rw-r--r--contrib/libs/clapack/dlaruv.c192
-rw-r--r--contrib/libs/clapack/dlarz.c194
-rw-r--r--contrib/libs/clapack/dlarzb.c288
-rw-r--r--contrib/libs/clapack/dlarzt.c229
-rw-r--r--contrib/libs/clapack/dlas2.c144
-rw-r--r--contrib/libs/clapack/dlascl.c354
-rw-r--r--contrib/libs/clapack/dlasd0.c291
-rw-r--r--contrib/libs/clapack/dlasd1.c288
-rw-r--r--contrib/libs/clapack/dlasd2.c609
-rw-r--r--contrib/libs/clapack/dlasd3.c452
-rw-r--r--contrib/libs/clapack/dlasd4.c1010
-rw-r--r--contrib/libs/clapack/dlasd5.c189
-rw-r--r--contrib/libs/clapack/dlasd6.c367
-rw-r--r--contrib/libs/clapack/dlasd7.c518
-rw-r--r--contrib/libs/clapack/dlasd8.c326
-rw-r--r--contrib/libs/clapack/dlasda.c488
-rw-r--r--contrib/libs/clapack/dlasdq.c380
-rw-r--r--contrib/libs/clapack/dlasdt.c136
-rw-r--r--contrib/libs/clapack/dlaset.c152
-rw-r--r--contrib/libs/clapack/dlasq1.c219
-rw-r--r--contrib/libs/clapack/dlasq2.c602
-rw-r--r--contrib/libs/clapack/dlasq3.c350
-rw-r--r--contrib/libs/clapack/dlasq4.c403
-rw-r--r--contrib/libs/clapack/dlasq5.c240
-rw-r--r--contrib/libs/clapack/dlasq6.c212
-rw-r--r--contrib/libs/clapack/dlasr.c453
-rw-r--r--contrib/libs/clapack/dlasrt.c286
-rw-r--r--contrib/libs/clapack/dlassq.c116
-rw-r--r--contrib/libs/clapack/dlasv2.c274
-rw-r--r--contrib/libs/clapack/dlaswp.c158
-rw-r--r--contrib/libs/clapack/dlasy2.c478
-rw-r--r--contrib/libs/clapack/dlasyf.c721
-rw-r--r--contrib/libs/clapack/dlat2s.c137
-rw-r--r--contrib/libs/clapack/dlatbs.c850
-rw-r--r--contrib/libs/clapack/dlatdf.c303
-rw-r--r--contrib/libs/clapack/dlatps.c824
-rw-r--r--contrib/libs/clapack/dlatrd.c355
-rw-r--r--contrib/libs/clapack/dlatrs.c815
-rw-r--r--contrib/libs/clapack/dlatrz.c163
-rw-r--r--contrib/libs/clapack/dlatzm.c193
-rw-r--r--contrib/libs/clapack/dlauu2.c183
-rw-r--r--contrib/libs/clapack/dlauum.c217
-rw-r--r--contrib/libs/clapack/dopgtr.c210
-rw-r--r--contrib/libs/clapack/dopmtr.c296
-rw-r--r--contrib/libs/clapack/dorg2l.c173
-rw-r--r--contrib/libs/clapack/dorg2r.c175
-rw-r--r--contrib/libs/clapack/dorgbr.c299
-rw-r--r--contrib/libs/clapack/dorghr.c216
-rw-r--r--contrib/libs/clapack/dorgl2.c175
-rw-r--r--contrib/libs/clapack/dorglq.c280
-rw-r--r--contrib/libs/clapack/dorgql.c289
-rw-r--r--contrib/libs/clapack/dorgqr.c281
-rw-r--r--contrib/libs/clapack/dorgr2.c174
-rw-r--r--contrib/libs/clapack/dorgrq.c289
-rw-r--r--contrib/libs/clapack/dorgtr.c250
-rw-r--r--contrib/libs/clapack/dorm2l.c231
-rw-r--r--contrib/libs/clapack/dorm2r.c235
-rw-r--r--contrib/libs/clapack/dormbr.c361
-rw-r--r--contrib/libs/clapack/dormhr.c257
-rw-r--r--contrib/libs/clapack/dorml2.c231
-rw-r--r--contrib/libs/clapack/dormlq.c335
-rw-r--r--contrib/libs/clapack/dormql.c328
-rw-r--r--contrib/libs/clapack/dormqr.c328
-rw-r--r--contrib/libs/clapack/dormr2.c227
-rw-r--r--contrib/libs/clapack/dormr3.c241
-rw-r--r--contrib/libs/clapack/dormrq.c335
-rw-r--r--contrib/libs/clapack/dormrz.c362
-rw-r--r--contrib/libs/clapack/dormtr.c296
-rw-r--r--contrib/libs/clapack/dpbcon.c233
-rw-r--r--contrib/libs/clapack/dpbequ.c203
-rw-r--r--contrib/libs/clapack/dpbrfs.c438
-rw-r--r--contrib/libs/clapack/dpbstf.c312
-rw-r--r--contrib/libs/clapack/dpbsv.c182
-rw-r--r--contrib/libs/clapack/dpbsvx.c515
-rw-r--r--contrib/libs/clapack/dpbtf2.c244
-rw-r--r--contrib/libs/clapack/dpbtrf.c471
-rw-r--r--contrib/libs/clapack/dpbtrs.c184
-rw-r--r--contrib/libs/clapack/dpftrf.c452
-rw-r--r--contrib/libs/clapack/dpftri.c403
-rw-r--r--contrib/libs/clapack/dpftrs.c240
-rw-r--r--contrib/libs/clapack/dpocon.c220
-rw-r--r--contrib/libs/clapack/dpoequ.c174
-rw-r--r--contrib/libs/clapack/dpoequb.c188
-rw-r--r--contrib/libs/clapack/dporfs.c422
-rw-r--r--contrib/libs/clapack/dposv.c151
-rw-r--r--contrib/libs/clapack/dposvx.c450
-rw-r--r--contrib/libs/clapack/dpotf2.c224
-rw-r--r--contrib/libs/clapack/dpotrf.c245
-rw-r--r--contrib/libs/clapack/dpotri.c125
-rw-r--r--contrib/libs/clapack/dpotrs.c166
-rw-r--r--contrib/libs/clapack/dppcon.c215
-rw-r--r--contrib/libs/clapack/dppequ.c208
-rw-r--r--contrib/libs/clapack/dpprfs.c413
-rw-r--r--contrib/libs/clapack/dppsv.c161
-rw-r--r--contrib/libs/clapack/dppsvx.c455
-rw-r--r--contrib/libs/clapack/dpptrf.c223
-rw-r--r--contrib/libs/clapack/dpptri.c173
-rw-r--r--contrib/libs/clapack/dpptrs.c170
-rw-r--r--contrib/libs/clapack/dpstf2.c395
-rw-r--r--contrib/libs/clapack/dpstrf.c471
-rw-r--r--contrib/libs/clapack/dptcon.c184
-rw-r--r--contrib/libs/clapack/dpteqr.c244
-rw-r--r--contrib/libs/clapack/dptrfs.c365
-rw-r--r--contrib/libs/clapack/dptsv.c130
-rw-r--r--contrib/libs/clapack/dptsvx.c283
-rw-r--r--contrib/libs/clapack/dpttrf.c181
-rw-r--r--contrib/libs/clapack/dpttrs.c156
-rw-r--r--contrib/libs/clapack/dptts2.c131
-rw-r--r--contrib/libs/clapack/drscl.c134
-rw-r--r--contrib/libs/clapack/dsbev.c268
-rw-r--r--contrib/libs/clapack/dsbevd.c338
-rw-r--r--contrib/libs/clapack/dsbevx.c520
-rw-r--r--contrib/libs/clapack/dsbgst.c1755
-rw-r--r--contrib/libs/clapack/dsbgv.c234
-rw-r--r--contrib/libs/clapack/dsbgvd.c327
-rw-r--r--contrib/libs/clapack/dsbgvx.c466
-rw-r--r--contrib/libs/clapack/dsbtrd.c713
-rw-r--r--contrib/libs/clapack/dsfrk.c517
-rw-r--r--contrib/libs/clapack/dsgesv.c416
-rw-r--r--contrib/libs/clapack/dspcon.c198
-rw-r--r--contrib/libs/clapack/dspev.c246
-rw-r--r--contrib/libs/clapack/dspevd.c314
-rw-r--r--contrib/libs/clapack/dspevx.c467
-rw-r--r--contrib/libs/clapack/dspgst.c284
-rw-r--r--contrib/libs/clapack/dspgv.c243
-rw-r--r--contrib/libs/clapack/dspgvd.c334
-rw-r--r--contrib/libs/clapack/dspgvx.c341
-rw-r--r--contrib/libs/clapack/dsposv.c418
-rw-r--r--contrib/libs/clapack/dsprfs.c421
-rw-r--r--contrib/libs/clapack/dspsv.c176
-rw-r--r--contrib/libs/clapack/dspsvx.c329
-rw-r--r--contrib/libs/clapack/dsptrd.c277
-rw-r--r--contrib/libs/clapack/dsptrf.c628
-rw-r--r--contrib/libs/clapack/dsptri.c411
-rw-r--r--contrib/libs/clapack/dsptrs.c456
-rw-r--r--contrib/libs/clapack/dstebz.c774
-rw-r--r--contrib/libs/clapack/dstedc.c488
-rw-r--r--contrib/libs/clapack/dstegr.c211
-rw-r--r--contrib/libs/clapack/dstein.c452
-rw-r--r--contrib/libs/clapack/dstemr.c728
-rw-r--r--contrib/libs/clapack/dsteqr.c621
-rw-r--r--contrib/libs/clapack/dsterf.c461
-rw-r--r--contrib/libs/clapack/dstev.c212
-rw-r--r--contrib/libs/clapack/dstevd.c273
-rw-r--r--contrib/libs/clapack/dstevr.c550
-rw-r--r--contrib/libs/clapack/dstevx.c432
-rw-r--r--contrib/libs/clapack/dsycon.c204
-rw-r--r--contrib/libs/clapack/dsyequb.c333
-rw-r--r--contrib/libs/clapack/dsyev.c283
-rw-r--r--contrib/libs/clapack/dsyevd.c353
-rw-r--r--contrib/libs/clapack/dsyevr.c652
-rw-r--r--contrib/libs/clapack/dsyevx.c536
-rw-r--r--contrib/libs/clapack/dsygs2.c299
-rw-r--r--contrib/libs/clapack/dsygst.c347
-rw-r--r--contrib/libs/clapack/dsygv.c285
-rw-r--r--contrib/libs/clapack/dsygvd.c338
-rw-r--r--contrib/libs/clapack/dsygvx.c396
-rw-r--r--contrib/libs/clapack/dsyrfs.c429
-rw-r--r--contrib/libs/clapack/dsysv.c215
-rw-r--r--contrib/libs/clapack/dsysvx.c370
-rw-r--r--contrib/libs/clapack/dsytd2.c306
-rw-r--r--contrib/libs/clapack/dsytf2.c608
-rw-r--r--contrib/libs/clapack/dsytrd.c360
-rw-r--r--contrib/libs/clapack/dsytrf.c341
-rw-r--r--contrib/libs/clapack/dsytri.c396
-rw-r--r--contrib/libs/clapack/dsytrs.c453
-rw-r--r--contrib/libs/clapack/dtbcon.c247
-rw-r--r--contrib/libs/clapack/dtbrfs.c519
-rw-r--r--contrib/libs/clapack/dtbtrs.c204
-rw-r--r--contrib/libs/clapack/dtfsm.c976
-rw-r--r--contrib/libs/clapack/dtftri.c474
-rw-r--r--contrib/libs/clapack/dtfttp.c514
-rw-r--r--contrib/libs/clapack/dtfttr.c491
-rw-r--r--contrib/libs/clapack/dtgevc.c1418
-rw-r--r--contrib/libs/clapack/dtgex2.c711
-rw-r--r--contrib/libs/clapack/dtgexc.c514
-rw-r--r--contrib/libs/clapack/dtgsen.c836
-rw-r--r--contrib/libs/clapack/dtgsja.c625
-rw-r--r--contrib/libs/clapack/dtgsna.c695
-rw-r--r--contrib/libs/clapack/dtgsy2.c1113
-rw-r--r--contrib/libs/clapack/dtgsyl.c692
-rw-r--r--contrib/libs/clapack/dtpcon.c233
-rw-r--r--contrib/libs/clapack/dtprfs.c496
-rw-r--r--contrib/libs/clapack/dtptri.c219
-rw-r--r--contrib/libs/clapack/dtptrs.c193
-rw-r--r--contrib/libs/clapack/dtpttf.c499
-rw-r--r--contrib/libs/clapack/dtpttr.c144
-rw-r--r--contrib/libs/clapack/dtrcon.c241
-rw-r--r--contrib/libs/clapack/dtrevc.c1228
-rw-r--r--contrib/libs/clapack/dtrexc.c403
-rw-r--r--contrib/libs/clapack/dtrrfs.c493
-rw-r--r--contrib/libs/clapack/dtrsen.c530
-rw-r--r--contrib/libs/clapack/dtrsna.c606
-rw-r--r--contrib/libs/clapack/dtrsyl.c1319
-rw-r--r--contrib/libs/clapack/dtrti2.c183
-rw-r--r--contrib/libs/clapack/dtrtri.c243
-rw-r--r--contrib/libs/clapack/dtrtrs.c183
-rw-r--r--contrib/libs/clapack/dtrttf.c489
-rw-r--r--contrib/libs/clapack/dtrttp.c144
-rw-r--r--contrib/libs/clapack/dtzrqf.c221
-rw-r--r--contrib/libs/clapack/dtzrzf.c308
-rw-r--r--contrib/libs/clapack/dzsum1.c114
-rw-r--r--contrib/libs/clapack/icmax1.c127
-rw-r--r--contrib/libs/clapack/ieeeck.c166
-rw-r--r--contrib/libs/clapack/ilaclc.c94
-rw-r--r--contrib/libs/clapack/ilaclr.c96
-rw-r--r--contrib/libs/clapack/iladiag.c65
-rw-r--r--contrib/libs/clapack/iladlc.c88
-rw-r--r--contrib/libs/clapack/iladlr.c90
-rw-r--r--contrib/libs/clapack/ilaenv.c654
-rw-r--r--contrib/libs/clapack/ilaprec.c72
-rw-r--r--contrib/libs/clapack/ilaslc.c88
-rw-r--r--contrib/libs/clapack/ilaslr.c90
-rw-r--r--contrib/libs/clapack/ilatrans.c69
-rw-r--r--contrib/libs/clapack/ilauplo.c65
-rw-r--r--contrib/libs/clapack/ilaver.c50
-rw-r--r--contrib/libs/clapack/ilazlc.c94
-rw-r--r--contrib/libs/clapack/ilazlr.c96
-rw-r--r--contrib/libs/clapack/iparmq.c282
-rw-r--r--contrib/libs/clapack/izmax1.c127
-rw-r--r--contrib/libs/clapack/list.inc8
-rw-r--r--contrib/libs/clapack/lsamen.c98
-rw-r--r--contrib/libs/clapack/maxloc.c71
-rw-r--r--contrib/libs/clapack/sbdsdc.c511
-rw-r--r--contrib/libs/clapack/sbdsqr.c918
-rw-r--r--contrib/libs/clapack/scsum1.c114
-rw-r--r--contrib/libs/clapack/sdisna.c228
-rw-r--r--contrib/libs/clapack/sgbbrd.c562
-rw-r--r--contrib/libs/clapack/sgbcon.c282
-rw-r--r--contrib/libs/clapack/sgbequ.c319
-rw-r--r--contrib/libs/clapack/sgbequb.c346
-rw-r--r--contrib/libs/clapack/sgbrfs.c454
-rw-r--r--contrib/libs/clapack/sgbsv.c176
-rw-r--r--contrib/libs/clapack/sgbsvx.c650
-rw-r--r--contrib/libs/clapack/sgbtf2.c260
-rw-r--r--contrib/libs/clapack/sgbtrf.c583
-rw-r--r--contrib/libs/clapack/sgbtrs.c242
-rw-r--r--contrib/libs/clapack/sgebak.c235
-rw-r--r--contrib/libs/clapack/sgebal.c400
-rw-r--r--contrib/libs/clapack/sgebd2.c303
-rw-r--r--contrib/libs/clapack/sgebrd.c336
-rw-r--r--contrib/libs/clapack/sgecon.c224
-rw-r--r--contrib/libs/clapack/sgeequ.c296
-rw-r--r--contrib/libs/clapack/sgeequb.c324
-rw-r--r--contrib/libs/clapack/sgees.c547
-rw-r--r--contrib/libs/clapack/sgeesx.c643
-rw-r--r--contrib/libs/clapack/sgeev.c558
-rw-r--r--contrib/libs/clapack/sgeevx.c696
-rw-r--r--contrib/libs/clapack/sgegs.c545
-rw-r--r--contrib/libs/clapack/sgegv.c837
-rw-r--r--contrib/libs/clapack/sgehd2.c190
-rw-r--r--contrib/libs/clapack/sgehrd.c338
-rw-r--r--contrib/libs/clapack/sgejsv.c2210
-rw-r--r--contrib/libs/clapack/sgelq2.c157
-rw-r--r--contrib/libs/clapack/sgelqf.c251
-rw-r--r--contrib/libs/clapack/sgels.c513
-rw-r--r--contrib/libs/clapack/sgelsd.c699
-rw-r--r--contrib/libs/clapack/sgelss.c822
-rw-r--r--contrib/libs/clapack/sgelsx.c433
-rw-r--r--contrib/libs/clapack/sgelsy.c488
-rw-r--r--contrib/libs/clapack/sgeql2.c159
-rw-r--r--contrib/libs/clapack/sgeqlf.c270
-rw-r--r--contrib/libs/clapack/sgeqp3.c351
-rw-r--r--contrib/libs/clapack/sgeqpf.c303
-rw-r--r--contrib/libs/clapack/sgeqr2.c161
-rw-r--r--contrib/libs/clapack/sgeqrf.c252
-rw-r--r--contrib/libs/clapack/sgerfs.c422
-rw-r--r--contrib/libs/clapack/sgerq2.c155
-rw-r--r--contrib/libs/clapack/sgerqf.c272
-rw-r--r--contrib/libs/clapack/sgesc2.c176
-rw-r--r--contrib/libs/clapack/sgesdd.c1611
-rw-r--r--contrib/libs/clapack/sgesv.c139
-rw-r--r--contrib/libs/clapack/sgesvd.c4047
-rw-r--r--contrib/libs/clapack/sgesvj.c1785
-rw-r--r--contrib/libs/clapack/sgesvx.c582
-rw-r--r--contrib/libs/clapack/sgetc2.c198
-rw-r--r--contrib/libs/clapack/sgetf2.c192
-rw-r--r--contrib/libs/clapack/sgetrf.c217
-rw-r--r--contrib/libs/clapack/sgetri.c259
-rw-r--r--contrib/libs/clapack/sgetrs.c185
-rw-r--r--contrib/libs/clapack/sggbak.c274
-rw-r--r--contrib/libs/clapack/sggbal.c623
-rw-r--r--contrib/libs/clapack/sgges.c687
-rw-r--r--contrib/libs/clapack/sggesx.c811
-rw-r--r--contrib/libs/clapack/sggev.c640
-rw-r--r--contrib/libs/clapack/sggevx.c879
-rw-r--r--contrib/libs/clapack/sggglm.c326
-rw-r--r--contrib/libs/clapack/sgghrd.c329
-rw-r--r--contrib/libs/clapack/sgglse.c334
-rw-r--r--contrib/libs/clapack/sggqrf.c268
-rw-r--r--contrib/libs/clapack/sggrqf.c269
-rw-r--r--contrib/libs/clapack/sggsvd.c402
-rw-r--r--contrib/libs/clapack/sggsvp.c508
-rw-r--r--contrib/libs/clapack/sgsvj0.c1150
-rw-r--r--contrib/libs/clapack/sgsvj1.c789
-rw-r--r--contrib/libs/clapack/sgtcon.c206
-rw-r--r--contrib/libs/clapack/sgtrfs.c444
-rw-r--r--contrib/libs/clapack/sgtsv.c318
-rw-r--r--contrib/libs/clapack/sgtsvx.c347
-rw-r--r--contrib/libs/clapack/sgttrf.c203
-rw-r--r--contrib/libs/clapack/sgttrs.c189
-rw-r--r--contrib/libs/clapack/sgtts2.c261
-rw-r--r--contrib/libs/clapack/shgeqz.c1494
-rw-r--r--contrib/libs/clapack/shsein.c488
-rw-r--r--contrib/libs/clapack/shseqr.c484
-rw-r--r--contrib/libs/clapack/sisnan.c52
-rw-r--r--contrib/libs/clapack/slabad.c72
-rw-r--r--contrib/libs/clapack/slabrd.c432
-rw-r--r--contrib/libs/clapack/slacn2.c266
-rw-r--r--contrib/libs/clapack/slacon.c256
-rw-r--r--contrib/libs/clapack/slacpy.c125
-rw-r--r--contrib/libs/clapack/sladiv.c78
-rw-r--r--contrib/libs/clapack/slae2.c141
-rw-r--r--contrib/libs/clapack/slaebz.c639
-rw-r--r--contrib/libs/clapack/slaed0.c435
-rw-r--r--contrib/libs/clapack/slaed1.c246
-rw-r--r--contrib/libs/clapack/slaed2.c530
-rw-r--r--contrib/libs/clapack/slaed3.c336
-rw-r--r--contrib/libs/clapack/slaed4.c952
-rw-r--r--contrib/libs/clapack/slaed5.c149
-rw-r--r--contrib/libs/clapack/slaed6.c375
-rw-r--r--contrib/libs/clapack/slaed7.c352
-rw-r--r--contrib/libs/clapack/slaed8.c475
-rw-r--r--contrib/libs/clapack/slaed9.c272
-rw-r--r--contrib/libs/clapack/slaeda.c283
-rw-r--r--contrib/libs/clapack/slaein.c678
-rw-r--r--contrib/libs/clapack/slaev2.c188
-rw-r--r--contrib/libs/clapack/slaexc.c458
-rw-r--r--contrib/libs/clapack/slag2.c356
-rw-r--r--contrib/libs/clapack/slag2d.c100
-rw-r--r--contrib/libs/clapack/slags2.c290
-rw-r--r--contrib/libs/clapack/slagtf.c223
-rw-r--r--contrib/libs/clapack/slagtm.c253
-rw-r--r--contrib/libs/clapack/slagts.c351
-rw-r--r--contrib/libs/clapack/slagv2.c347
-rw-r--r--contrib/libs/clapack/slahqr.c631
-rw-r--r--contrib/libs/clapack/slahr2.c309
-rw-r--r--contrib/libs/clapack/slahrd.c282
-rw-r--r--contrib/libs/clapack/slaic1.c324
-rw-r--r--contrib/libs/clapack/slaisnan.c58
-rw-r--r--contrib/libs/clapack/slaln2.c577
-rw-r--r--contrib/libs/clapack/slals0.c470
-rw-r--r--contrib/libs/clapack/slalsa.c454
-rw-r--r--contrib/libs/clapack/slalsd.c523
-rw-r--r--contrib/libs/clapack/slamch.c1000
-rw-r--r--contrib/libs/clapack/slamrg.c131
-rw-r--r--contrib/libs/clapack/slaneg.c218
-rw-r--r--contrib/libs/clapack/slangb.c226
-rw-r--r--contrib/libs/clapack/slange.c199
-rw-r--r--contrib/libs/clapack/slangt.c196
-rw-r--r--contrib/libs/clapack/slanhs.c204
-rw-r--r--contrib/libs/clapack/slansb.c263
-rw-r--r--contrib/libs/clapack/slansf.c1013
-rw-r--r--contrib/libs/clapack/slansp.c262
-rw-r--r--contrib/libs/clapack/slanst.c166
-rw-r--r--contrib/libs/clapack/slansy.c239
-rw-r--r--contrib/libs/clapack/slantb.c434
-rw-r--r--contrib/libs/clapack/slantp.c391
-rw-r--r--contrib/libs/clapack/slantr.c398
-rw-r--r--contrib/libs/clapack/slanv2.c234
-rw-r--r--contrib/libs/clapack/slapll.c126
-rw-r--r--contrib/libs/clapack/slapmt.c177
-rw-r--r--contrib/libs/clapack/slapy2.c73
-rw-r--r--contrib/libs/clapack/slapy3.c83
-rw-r--r--contrib/libs/clapack/slaqgb.c216
-rw-r--r--contrib/libs/clapack/slaqge.c188
-rw-r--r--contrib/libs/clapack/slaqp2.c238
-rw-r--r--contrib/libs/clapack/slaqps.c342
-rw-r--r--contrib/libs/clapack/slaqr0.c753
-rw-r--r--contrib/libs/clapack/slaqr1.c126
-rw-r--r--contrib/libs/clapack/slaqr2.c694
-rw-r--r--contrib/libs/clapack/slaqr3.c710
-rw-r--r--contrib/libs/clapack/slaqr4.c751
-rw-r--r--contrib/libs/clapack/slaqr5.c1026
-rw-r--r--contrib/libs/clapack/slaqsb.c184
-rw-r--r--contrib/libs/clapack/slaqsp.c169
-rw-r--r--contrib/libs/clapack/slaqsy.c172
-rw-r--r--contrib/libs/clapack/slaqtr.c831
-rw-r--r--contrib/libs/clapack/slar1v.c440
-rw-r--r--contrib/libs/clapack/slar2v.c120
-rw-r--r--contrib/libs/clapack/slarf.c191
-rw-r--r--contrib/libs/clapack/slarfb.c773
-rw-r--r--contrib/libs/clapack/slarfg.c169
-rw-r--r--contrib/libs/clapack/slarfp.c191
-rw-r--r--contrib/libs/clapack/slarft.c323
-rw-r--r--contrib/libs/clapack/slarfx.c729
-rw-r--r--contrib/libs/clapack/slargv.c130
-rw-r--r--contrib/libs/clapack/slarnv.c146
-rw-r--r--contrib/libs/clapack/slarra.c155
-rw-r--r--contrib/libs/clapack/slarrb.c349
-rw-r--r--contrib/libs/clapack/slarrc.c183
-rw-r--r--contrib/libs/clapack/slarrd.c790
-rw-r--r--contrib/libs/clapack/slarre.c857
-rw-r--r--contrib/libs/clapack/slarrf.c422
-rw-r--r--contrib/libs/clapack/slarrj.c337
-rw-r--r--contrib/libs/clapack/slarrk.c193
-rw-r--r--contrib/libs/clapack/slarrr.c175
-rw-r--r--contrib/libs/clapack/slarrv.c980
-rw-r--r--contrib/libs/clapack/slartg.c189
-rw-r--r--contrib/libs/clapack/slartv.c105
-rw-r--r--contrib/libs/clapack/slaruv.c193
-rw-r--r--contrib/libs/clapack/slarz.c190
-rw-r--r--contrib/libs/clapack/slarzb.c287
-rw-r--r--contrib/libs/clapack/slarzt.c227
-rw-r--r--contrib/libs/clapack/slas2.c145
-rw-r--r--contrib/libs/clapack/slascl.c355
-rw-r--r--contrib/libs/clapack/slasd0.c286
-rw-r--r--contrib/libs/clapack/slasd1.c286
-rw-r--r--contrib/libs/clapack/slasd2.c607
-rw-r--r--contrib/libs/clapack/slasd3.c450
-rw-r--r--contrib/libs/clapack/slasd4.c1010
-rw-r--r--contrib/libs/clapack/slasd5.c189
-rw-r--r--contrib/libs/clapack/slasd6.c364
-rw-r--r--contrib/libs/clapack/slasd7.c516
-rw-r--r--contrib/libs/clapack/slasd8.c323
-rw-r--r--contrib/libs/clapack/slasda.c483
-rw-r--r--contrib/libs/clapack/slasdq.c379
-rw-r--r--contrib/libs/clapack/slasdt.c136
-rw-r--r--contrib/libs/clapack/slaset.c152
-rw-r--r--contrib/libs/clapack/slasq1.c216
-rw-r--r--contrib/libs/clapack/slasq2.c599
-rw-r--r--contrib/libs/clapack/slasq3.c346
-rw-r--r--contrib/libs/clapack/slasq4.c402
-rw-r--r--contrib/libs/clapack/slasq5.c239
-rw-r--r--contrib/libs/clapack/slasq6.c212
-rw-r--r--contrib/libs/clapack/slasr.c452
-rw-r--r--contrib/libs/clapack/slasrt.c285
-rw-r--r--contrib/libs/clapack/slassq.c116
-rw-r--r--contrib/libs/clapack/slasv2.c273
-rw-r--r--contrib/libs/clapack/slaswp.c158
-rw-r--r--contrib/libs/clapack/slasy2.c479
-rw-r--r--contrib/libs/clapack/slasyf.c719
-rw-r--r--contrib/libs/clapack/slatbs.c849
-rw-r--r--contrib/libs/clapack/slatdf.c301
-rw-r--r--contrib/libs/clapack/slatps.c822
-rw-r--r--contrib/libs/clapack/slatrd.c351
-rw-r--r--contrib/libs/clapack/slatrs.c813
-rw-r--r--contrib/libs/clapack/slatrz.c162
-rw-r--r--contrib/libs/clapack/slatzm.c189
-rw-r--r--contrib/libs/clapack/slauu2.c180
-rw-r--r--contrib/libs/clapack/slauum.c215
-rw-r--r--contrib/libs/clapack/sopgtr.c209
-rw-r--r--contrib/libs/clapack/sopmtr.c295
-rw-r--r--contrib/libs/clapack/sorg2l.c173
-rw-r--r--contrib/libs/clapack/sorg2r.c175
-rw-r--r--contrib/libs/clapack/sorgbr.c299
-rw-r--r--contrib/libs/clapack/sorghr.c214
-rw-r--r--contrib/libs/clapack/sorgl2.c175
-rw-r--r--contrib/libs/clapack/sorglq.c279
-rw-r--r--contrib/libs/clapack/sorgql.c288
-rw-r--r--contrib/libs/clapack/sorgqr.c280
-rw-r--r--contrib/libs/clapack/sorgr2.c174
-rw-r--r--contrib/libs/clapack/sorgrq.c288
-rw-r--r--contrib/libs/clapack/sorgtr.c250
-rw-r--r--contrib/libs/clapack/sorm2l.c230
-rw-r--r--contrib/libs/clapack/sorm2r.c234
-rw-r--r--contrib/libs/clapack/sormbr.c359
-rw-r--r--contrib/libs/clapack/sormhr.c256
-rw-r--r--contrib/libs/clapack/sorml2.c230
-rw-r--r--contrib/libs/clapack/sormlq.c335
-rw-r--r--contrib/libs/clapack/sormql.c329
-rw-r--r--contrib/libs/clapack/sormqr.c328
-rw-r--r--contrib/libs/clapack/sormr2.c226
-rw-r--r--contrib/libs/clapack/sormr3.c241
-rw-r--r--contrib/libs/clapack/sormrq.c335
-rw-r--r--contrib/libs/clapack/sormrz.c358
-rw-r--r--contrib/libs/clapack/sormtr.c296
-rw-r--r--contrib/libs/clapack/spbcon.c232
-rw-r--r--contrib/libs/clapack/spbequ.c202
-rw-r--r--contrib/libs/clapack/spbrfs.c434
-rw-r--r--contrib/libs/clapack/spbstf.c312
-rw-r--r--contrib/libs/clapack/spbsv.c181
-rw-r--r--contrib/libs/clapack/spbsvx.c512
-rw-r--r--contrib/libs/clapack/spbtf2.c244
-rw-r--r--contrib/libs/clapack/spbtrf.c469
-rw-r--r--contrib/libs/clapack/spbtrs.c182
-rw-r--r--contrib/libs/clapack/spftrf.c451
-rw-r--r--contrib/libs/clapack/spftri.c402
-rw-r--r--contrib/libs/clapack/spftrs.c238
-rw-r--r--contrib/libs/clapack/spocon.c217
-rw-r--r--contrib/libs/clapack/spoequ.c174
-rw-r--r--contrib/libs/clapack/spoequb.c188
-rw-r--r--contrib/libs/clapack/sporfs.c421
-rw-r--r--contrib/libs/clapack/sposv.c151
-rw-r--r--contrib/libs/clapack/sposvx.c446
-rw-r--r--contrib/libs/clapack/spotf2.c221
-rw-r--r--contrib/libs/clapack/spotrf.c243
-rw-r--r--contrib/libs/clapack/spotri.c124
-rw-r--r--contrib/libs/clapack/spotrs.c164
-rw-r--r--contrib/libs/clapack/sppcon.c213
-rw-r--r--contrib/libs/clapack/sppequ.c208
-rw-r--r--contrib/libs/clapack/spprfs.c408
-rw-r--r--contrib/libs/clapack/sppsv.c160
-rw-r--r--contrib/libs/clapack/sppsvx.c452
-rw-r--r--contrib/libs/clapack/spptrf.c221
-rw-r--r--contrib/libs/clapack/spptri.c171
-rw-r--r--contrib/libs/clapack/spptrs.c170
-rw-r--r--contrib/libs/clapack/spstf2.c392
-rw-r--r--contrib/libs/clapack/spstrf.c466
-rw-r--r--contrib/libs/clapack/sptcon.c184
-rw-r--r--contrib/libs/clapack/spteqr.c240
-rw-r--r--contrib/libs/clapack/sptrfs.c365
-rw-r--r--contrib/libs/clapack/sptsv.c129
-rw-r--r--contrib/libs/clapack/sptsvx.c279
-rw-r--r--contrib/libs/clapack/spttrf.c180
-rw-r--r--contrib/libs/clapack/spttrs.c156
-rw-r--r--contrib/libs/clapack/sptts2.c130
-rw-r--r--contrib/libs/clapack/srscl.c133
-rw-r--r--contrib/libs/clapack/ssbev.c265
-rw-r--r--contrib/libs/clapack/ssbevd.c332
-rw-r--r--contrib/libs/clapack/ssbevx.c513
-rw-r--r--contrib/libs/clapack/ssbgst.c1752
-rw-r--r--contrib/libs/clapack/ssbgv.c232
-rw-r--r--contrib/libs/clapack/ssbgvd.c327
-rw-r--r--contrib/libs/clapack/ssbgvx.c461
-rw-r--r--contrib/libs/clapack/ssbtrd.c710
-rw-r--r--contrib/libs/clapack/ssfrk.c516
-rw-r--r--contrib/libs/clapack/sspcon.c196
-rw-r--r--contrib/libs/clapack/sspev.c240
-rw-r--r--contrib/libs/clapack/sspevd.c310
-rw-r--r--contrib/libs/clapack/sspevx.c461
-rw-r--r--contrib/libs/clapack/sspgst.c281
-rw-r--r--contrib/libs/clapack/sspgv.c240
-rw-r--r--contrib/libs/clapack/sspgvd.c330
-rw-r--r--contrib/libs/clapack/sspgvx.c339
-rw-r--r--contrib/libs/clapack/ssprfs.c417
-rw-r--r--contrib/libs/clapack/sspsv.c176
-rw-r--r--contrib/libs/clapack/sspsvx.c325
-rw-r--r--contrib/libs/clapack/ssptrd.c275
-rw-r--r--contrib/libs/clapack/ssptrf.c627
-rw-r--r--contrib/libs/clapack/ssptri.c407
-rw-r--r--contrib/libs/clapack/ssptrs.c452
-rw-r--r--contrib/libs/clapack/sstebz.c773
-rw-r--r--contrib/libs/clapack/sstedc.c484
-rw-r--r--contrib/libs/clapack/sstegr.c209
-rw-r--r--contrib/libs/clapack/sstein.c449
-rw-r--r--contrib/libs/clapack/sstemr.c726
-rw-r--r--contrib/libs/clapack/ssteqr.c617
-rw-r--r--contrib/libs/clapack/ssterf.c460
-rw-r--r--contrib/libs/clapack/sstev.c209
-rw-r--r--contrib/libs/clapack/sstevd.c270
-rw-r--r--contrib/libs/clapack/sstevr.c541
-rw-r--r--contrib/libs/clapack/sstevx.c427
-rw-r--r--contrib/libs/clapack/ssycon.c202
-rw-r--r--contrib/libs/clapack/ssyequb.c334
-rw-r--r--contrib/libs/clapack/ssyev.c276
-rw-r--r--contrib/libs/clapack/ssyevd.c344
-rw-r--r--contrib/libs/clapack/ssyevr.c658
-rw-r--r--contrib/libs/clapack/ssyevx.c531
-rw-r--r--contrib/libs/clapack/ssygs2.c296
-rw-r--r--contrib/libs/clapack/ssygst.c342
-rw-r--r--contrib/libs/clapack/ssygv.c283
-rw-r--r--contrib/libs/clapack/ssygvd.c337
-rw-r--r--contrib/libs/clapack/ssygvx.c395
-rw-r--r--contrib/libs/clapack/ssyrfs.c427
-rw-r--r--contrib/libs/clapack/ssysv.c214
-rw-r--r--contrib/libs/clapack/ssysvx.c368
-rw-r--r--contrib/libs/clapack/ssytd2.c302
-rw-r--r--contrib/libs/clapack/ssytf2.c608
-rw-r--r--contrib/libs/clapack/ssytrd.c360
-rw-r--r--contrib/libs/clapack/ssytrf.c339
-rw-r--r--contrib/libs/clapack/ssytri.c394
-rw-r--r--contrib/libs/clapack/ssytrs.c449
-rw-r--r--contrib/libs/clapack/stbcon.c247
-rw-r--r--contrib/libs/clapack/stbrfs.c519
-rw-r--r--contrib/libs/clapack/stbtrs.c203
-rw-r--r--contrib/libs/clapack/stfsm.c973
-rw-r--r--contrib/libs/clapack/stftri.c473
-rw-r--r--contrib/libs/clapack/stfttp.c514
-rw-r--r--contrib/libs/clapack/stfttr.c491
-rw-r--r--contrib/libs/clapack/stgevc.c1415
-rw-r--r--contrib/libs/clapack/stgex2.c706
-rw-r--r--contrib/libs/clapack/stgexc.c514
-rw-r--r--contrib/libs/clapack/stgsen.c832
-rw-r--r--contrib/libs/clapack/stgsja.c619
-rw-r--r--contrib/libs/clapack/stgsna.c691
-rw-r--r--contrib/libs/clapack/stgsy2.c1106
-rw-r--r--contrib/libs/clapack/stgsyl.c691
-rw-r--r--contrib/libs/clapack/stpcon.c230
-rw-r--r--contrib/libs/clapack/stprfs.c493
-rw-r--r--contrib/libs/clapack/stptri.c218
-rw-r--r--contrib/libs/clapack/stptrs.c192
-rw-r--r--contrib/libs/clapack/stpttf.c499
-rw-r--r--contrib/libs/clapack/stpttr.c144
-rw-r--r--contrib/libs/clapack/strcon.c239
-rw-r--r--contrib/libs/clapack/strevc.c1223
-rw-r--r--contrib/libs/clapack/strexc.c403
-rw-r--r--contrib/libs/clapack/strrfs.c492
-rw-r--r--contrib/libs/clapack/strsen.c530
-rw-r--r--contrib/libs/clapack/strsna.c603
-rw-r--r--contrib/libs/clapack/strsyl.c1316
-rw-r--r--contrib/libs/clapack/strti2.c183
-rw-r--r--contrib/libs/clapack/strtri.c242
-rw-r--r--contrib/libs/clapack/strtrs.c182
-rw-r--r--contrib/libs/clapack/strttf.c489
-rw-r--r--contrib/libs/clapack/strttp.c143
-rw-r--r--contrib/libs/clapack/stzrqf.c219
-rw-r--r--contrib/libs/clapack/stzrzf.c310
-rw-r--r--contrib/libs/clapack/zbdsqr.c909
-rw-r--r--contrib/libs/clapack/zcgesv.c432
-rw-r--r--contrib/libs/clapack/zcposv.c440
-rw-r--r--contrib/libs/clapack/zdrscl.c135
-rw-r--r--contrib/libs/clapack/zgbbrd.c654
-rw-r--r--contrib/libs/clapack/zgbcon.c307
-rw-r--r--contrib/libs/clapack/zgbequ.c330
-rw-r--r--contrib/libs/clapack/zgbequb.c355
-rw-r--r--contrib/libs/clapack/zgbrfs.c494
-rw-r--r--contrib/libs/clapack/zgbsv.c176
-rw-r--r--contrib/libs/clapack/zgbsvx.c678
-rw-r--r--contrib/libs/clapack/zgbtf2.c268
-rw-r--r--contrib/libs/clapack/zgbtrf.c605
-rw-r--r--contrib/libs/clapack/zgbtrs.c281
-rw-r--r--contrib/libs/clapack/zgebak.c235
-rw-r--r--contrib/libs/clapack/zgebal.c414
-rw-r--r--contrib/libs/clapack/zgebd2.c345
-rw-r--r--contrib/libs/clapack/zgebrd.c351
-rw-r--r--contrib/libs/clapack/zgecon.c235
-rw-r--r--contrib/libs/clapack/zgeequ.c306
-rw-r--r--contrib/libs/clapack/zgeequb.c332
-rw-r--r--contrib/libs/clapack/zgees.c409
-rw-r--r--contrib/libs/clapack/zgeesx.c477
-rw-r--r--contrib/libs/clapack/zgeev.c533
-rw-r--r--contrib/libs/clapack/zgeevx.c686
-rw-r--r--contrib/libs/clapack/zgegs.c543
-rw-r--r--contrib/libs/clapack/zgegv.c781
-rw-r--r--contrib/libs/clapack/zgehd2.c199
-rw-r--r--contrib/libs/clapack/zgehrd.c353
-rw-r--r--contrib/libs/clapack/zgelq2.c165
-rw-r--r--contrib/libs/clapack/zgelqf.c257
-rw-r--r--contrib/libs/clapack/zgels.c520
-rw-r--r--contrib/libs/clapack/zgelsd.c724
-rw-r--r--contrib/libs/clapack/zgelss.c828
-rw-r--r--contrib/libs/clapack/zgelsx.c471
-rw-r--r--contrib/libs/clapack/zgelsy.c512
-rw-r--r--contrib/libs/clapack/zgeql2.c167
-rw-r--r--contrib/libs/clapack/zgeqlf.c276
-rw-r--r--contrib/libs/clapack/zgeqp3.c361
-rw-r--r--contrib/libs/clapack/zgeqpf.c316
-rw-r--r--contrib/libs/clapack/zgeqr2.c169
-rw-r--r--contrib/libs/clapack/zgeqrf.c258
-rw-r--r--contrib/libs/clapack/zgerfs.c461
-rw-r--r--contrib/libs/clapack/zgerq2.c162
-rw-r--r--contrib/libs/clapack/zgerqf.c275
-rw-r--r--contrib/libs/clapack/zgesc2.c206
-rw-r--r--contrib/libs/clapack/zgesdd.c2252
-rw-r--r--contrib/libs/clapack/zgesv.c140
-rw-r--r--contrib/libs/clapack/zgesvd.c4173
-rw-r--r--contrib/libs/clapack/zgesvx.c610
-rw-r--r--contrib/libs/clapack/zgetc2.c209
-rw-r--r--contrib/libs/clapack/zgetf2.c202
-rw-r--r--contrib/libs/clapack/zgetrf.c219
-rw-r--r--contrib/libs/clapack/zgetri.c270
-rw-r--r--contrib/libs/clapack/zgetrs.c187
-rw-r--r--contrib/libs/clapack/zggbak.c273
-rw-r--r--contrib/libs/clapack/zggbal.c657
-rw-r--r--contrib/libs/clapack/zgges.c604
-rw-r--r--contrib/libs/clapack/zggesx.c708
-rw-r--r--contrib/libs/clapack/zggev.c599
-rw-r--r--contrib/libs/clapack/zggevx.c812
-rw-r--r--contrib/libs/clapack/zggglm.c337
-rw-r--r--contrib/libs/clapack/zgghrd.c336
-rw-r--r--contrib/libs/clapack/zgglse.c346
-rw-r--r--contrib/libs/clapack/zggqrf.c270
-rw-r--r--contrib/libs/clapack/zggrqf.c271
-rw-r--r--contrib/libs/clapack/zggsvd.c407
-rw-r--r--contrib/libs/clapack/zggsvp.c533
-rw-r--r--contrib/libs/clapack/zgtcon.c209
-rw-r--r--contrib/libs/clapack/zgtrfs.c553
-rw-r--r--contrib/libs/clapack/zgtsv.c288
-rw-r--r--contrib/libs/clapack/zgtsvx.c353
-rw-r--r--contrib/libs/clapack/zgttrf.c275
-rw-r--r--contrib/libs/clapack/zgttrs.c194
-rw-r--r--contrib/libs/clapack/zgtts2.c584
-rw-r--r--contrib/libs/clapack/zhbev.c273
-rw-r--r--contrib/libs/clapack/zhbevd.c381
-rw-r--r--contrib/libs/clapack/zhbevx.c527
-rw-r--r--contrib/libs/clapack/zhbgst.c2152
-rw-r--r--contrib/libs/clapack/zhbgv.c238
-rw-r--r--contrib/libs/clapack/zhbgvd.c359
-rw-r--r--contrib/libs/clapack/zhbgvx.c473
-rw-r--r--contrib/libs/clapack/zhbtrd.c810
-rw-r--r--contrib/libs/clapack/zhecon.c203
-rw-r--r--contrib/libs/clapack/zheequb.c439
-rw-r--r--contrib/libs/clapack/zheev.c289
-rw-r--r--contrib/libs/clapack/zheevd.c382
-rw-r--r--contrib/libs/clapack/zheevr.c696
-rw-r--r--contrib/libs/clapack/zheevx.c548
-rw-r--r--contrib/libs/clapack/zhegs2.c338
-rw-r--r--contrib/libs/clapack/zhegst.c353
-rw-r--r--contrib/libs/clapack/zhegv.c289
-rw-r--r--contrib/libs/clapack/zhegvd.c367
-rw-r--r--contrib/libs/clapack/zhegvx.c397
-rw-r--r--contrib/libs/clapack/zherfs.c473
-rw-r--r--contrib/libs/clapack/zhesv.c213
-rw-r--r--contrib/libs/clapack/zhesvx.c368
-rw-r--r--contrib/libs/clapack/zhetd2.c361
-rw-r--r--contrib/libs/clapack/zhetf2.c802
-rw-r--r--contrib/libs/clapack/zhetrd.c370
-rw-r--r--contrib/libs/clapack/zhetrf.c336
-rw-r--r--contrib/libs/clapack/zhetri.c510
-rw-r--r--contrib/libs/clapack/zhetrs.c529
-rw-r--r--contrib/libs/clapack/zhfrk.c531
-rw-r--r--contrib/libs/clapack/zhgeqz.c1149
-rw-r--r--contrib/libs/clapack/zhpcon.c197
-rw-r--r--contrib/libs/clapack/zhpev.c254
-rw-r--r--contrib/libs/clapack/zhpevd.c349
-rw-r--r--contrib/libs/clapack/zhpevx.c475
-rw-r--r--contrib/libs/clapack/zhpgst.c313
-rw-r--r--contrib/libs/clapack/zhpgv.c246
-rw-r--r--contrib/libs/clapack/zhpgvd.c359
-rw-r--r--contrib/libs/clapack/zhpgvx.c344
-rw-r--r--contrib/libs/clapack/zhprfs.c462
-rw-r--r--contrib/libs/clapack/zhpsv.c177
-rw-r--r--contrib/libs/clapack/zhpsvx.c326
-rw-r--r--contrib/libs/clapack/zhptrd.c319
-rw-r--r--contrib/libs/clapack/zhptrf.c821
-rw-r--r--contrib/libs/clapack/zhptri.c513
-rw-r--r--contrib/libs/clapack/zhptrs.c532
-rw-r--r--contrib/libs/clapack/zhsein.c433
-rw-r--r--contrib/libs/clapack/zhseqr.c483
-rw-r--r--contrib/libs/clapack/zlabrd.c502
-rw-r--r--contrib/libs/clapack/zlacgv.c95
-rw-r--r--contrib/libs/clapack/zlacn2.c283
-rw-r--r--contrib/libs/clapack/zlacon.c275
-rw-r--r--contrib/libs/clapack/zlacp2.c134
-rw-r--r--contrib/libs/clapack/zlacpy.c134
-rw-r--r--contrib/libs/clapack/zlacrm.c177
-rw-r--r--contrib/libs/clapack/zlacrt.c156
-rw-r--r--contrib/libs/clapack/zladiv.c75
-rw-r--r--contrib/libs/clapack/zlaed0.c366
-rw-r--r--contrib/libs/clapack/zlaed7.c327
-rw-r--r--contrib/libs/clapack/zlaed8.c436
-rw-r--r--contrib/libs/clapack/zlaein.c397
-rw-r--r--contrib/libs/clapack/zlaesy.c208
-rw-r--r--contrib/libs/clapack/zlaev2.c125
-rw-r--r--contrib/libs/clapack/zlag2c.c124
-rw-r--r--contrib/libs/clapack/zlags2.c468
-rw-r--r--contrib/libs/clapack/zlagtm.c599
-rw-r--r--contrib/libs/clapack/zlahef.c938
-rw-r--r--contrib/libs/clapack/zlahqr.c755
-rw-r--r--contrib/libs/clapack/zlahr2.c331
-rw-r--r--contrib/libs/clapack/zlahrd.c301
-rw-r--r--contrib/libs/clapack/zlaic1.c451
-rw-r--r--contrib/libs/clapack/zlals0.c563
-rw-r--r--contrib/libs/clapack/zlalsa.c664
-rw-r--r--contrib/libs/clapack/zlalsd.c758
-rw-r--r--contrib/libs/clapack/zlangb.c224
-rw-r--r--contrib/libs/clapack/zlange.c199
-rw-r--r--contrib/libs/clapack/zlangt.c195
-rw-r--r--contrib/libs/clapack/zlanhb.c291
-rw-r--r--contrib/libs/clapack/zlanhe.c265
-rw-r--r--contrib/libs/clapack/zlanhf.c1803
-rw-r--r--contrib/libs/clapack/zlanhp.c277
-rw-r--r--contrib/libs/clapack/zlanhs.c205
-rw-r--r--contrib/libs/clapack/zlanht.c167
-rw-r--r--contrib/libs/clapack/zlansb.c261
-rw-r--r--contrib/libs/clapack/zlansp.c278
-rw-r--r--contrib/libs/clapack/zlansy.c237
-rw-r--r--contrib/libs/clapack/zlantb.c426
-rw-r--r--contrib/libs/clapack/zlantp.c391
-rw-r--r--contrib/libs/clapack/zlantr.c394
-rw-r--r--contrib/libs/clapack/zlapll.c143
-rw-r--r--contrib/libs/clapack/zlapmt.c186
-rw-r--r--contrib/libs/clapack/zlaqgb.c227
-rw-r--r--contrib/libs/clapack/zlaqge.c202
-rw-r--r--contrib/libs/clapack/zlaqhb.c201
-rw-r--r--contrib/libs/clapack/zlaqhe.c193
-rw-r--r--contrib/libs/clapack/zlaqhp.c189
-rw-r--r--contrib/libs/clapack/zlaqp2.c242
-rw-r--r--contrib/libs/clapack/zlaqps.c364
-rw-r--r--contrib/libs/clapack/zlaqr0.c787
-rw-r--r--contrib/libs/clapack/zlaqr1.c197
-rw-r--r--contrib/libs/clapack/zlaqr2.c611
-rw-r--r--contrib/libs/clapack/zlaqr3.c630
-rw-r--r--contrib/libs/clapack/zlaqr4.c785
-rw-r--r--contrib/libs/clapack/zlaqr5.c1349
-rw-r--r--contrib/libs/clapack/zlaqsb.c193
-rw-r--r--contrib/libs/clapack/zlaqsp.c179
-rw-r--r--contrib/libs/clapack/zlaqsy.c183
-rw-r--r--contrib/libs/clapack/zlar1v.c501
-rw-r--r--contrib/libs/clapack/zlar2v.c160
-rw-r--r--contrib/libs/clapack/zlarcm.c177
-rw-r--r--contrib/libs/clapack/zlarf.c200
-rw-r--r--contrib/libs/clapack/zlarfb.c839
-rw-r--r--contrib/libs/clapack/zlarfg.c191
-rw-r--r--contrib/libs/clapack/zlarfp.c236
-rw-r--r--contrib/libs/clapack/zlarft.c362
-rw-r--r--contrib/libs/clapack/zlarfx.c2050
-rw-r--r--contrib/libs/clapack/zlargv.c336
-rw-r--r--contrib/libs/clapack/zlarnv.c190
-rw-r--r--contrib/libs/clapack/zlarrv.c1022
-rw-r--r--contrib/libs/clapack/zlartg.c285
-rw-r--r--contrib/libs/clapack/zlartv.c126
-rw-r--r--contrib/libs/clapack/zlarz.c200
-rw-r--r--contrib/libs/clapack/zlarzb.c323
-rw-r--r--contrib/libs/clapack/zlarzt.c238
-rw-r--r--contrib/libs/clapack/zlascl.c376
-rw-r--r--contrib/libs/clapack/zlaset.c163
-rw-r--r--contrib/libs/clapack/zlasr.c610
-rw-r--r--contrib/libs/clapack/zlassq.c138
-rw-r--r--contrib/libs/clapack/zlaswp.c166
-rw-r--r--contrib/libs/clapack/zlasyf.c831
-rw-r--r--contrib/libs/clapack/zlat2c.c152
-rw-r--r--contrib/libs/clapack/zlatbs.c1195
-rw-r--r--contrib/libs/clapack/zlatdf.c359
-rw-r--r--contrib/libs/clapack/zlatps.c1163
-rw-r--r--contrib/libs/clapack/zlatrd.c420
-rw-r--r--contrib/libs/clapack/zlatrs.c1150
-rw-r--r--contrib/libs/clapack/zlatrz.c181
-rw-r--r--contrib/libs/clapack/zlatzm.c198
-rw-r--r--contrib/libs/clapack/zlauu2.c203
-rw-r--r--contrib/libs/clapack/zlauum.c217
-rw-r--r--contrib/libs/clapack/zpbcon.c236
-rw-r--r--contrib/libs/clapack/zpbequ.c205
-rw-r--r--contrib/libs/clapack/zpbrfs.c483
-rw-r--r--contrib/libs/clapack/zpbstf.c334
-rw-r--r--contrib/libs/clapack/zpbsv.c183
-rw-r--r--contrib/libs/clapack/zpbsvx.c528
-rw-r--r--contrib/libs/clapack/zpbtf2.c255
-rw-r--r--contrib/libs/clapack/zpbtrf.c490
-rw-r--r--contrib/libs/clapack/zpbtrs.c183
-rw-r--r--contrib/libs/clapack/zpftrf.c475
-rw-r--r--contrib/libs/clapack/zpftri.c426
-rw-r--r--contrib/libs/clapack/zpftrs.c260
-rw-r--r--contrib/libs/clapack/zpocon.c225
-rw-r--r--contrib/libs/clapack/zpoequ.c176
-rw-r--r--contrib/libs/clapack/zpoequb.c195
-rw-r--r--contrib/libs/clapack/zporfs.c465
-rw-r--r--contrib/libs/clapack/zposv.c152
-rw-r--r--contrib/libs/clapack/zposvx.c462
-rw-r--r--contrib/libs/clapack/zpotf2.c245
-rw-r--r--contrib/libs/clapack/zpotrf.c248
-rw-r--r--contrib/libs/clapack/zpotri.c125
-rw-r--r--contrib/libs/clapack/zpotrs.c166
-rw-r--r--contrib/libs/clapack/zppcon.c223
-rw-r--r--contrib/libs/clapack/zppequ.c210
-rw-r--r--contrib/libs/clapack/zpprfs.c457
-rw-r--r--contrib/libs/clapack/zppsv.c161
-rw-r--r--contrib/libs/clapack/zppsvx.c465
-rw-r--r--contrib/libs/clapack/zpptrf.c233
-rw-r--r--contrib/libs/clapack/zpptri.c179
-rw-r--r--contrib/libs/clapack/zpptrs.c169
-rw-r--r--contrib/libs/clapack/zpstf2.c443
-rw-r--r--contrib/libs/clapack/zpstrf.c529
-rw-r--r--contrib/libs/clapack/zptcon.c187
-rw-r--r--contrib/libs/clapack/zpteqr.c243
-rw-r--r--contrib/libs/clapack/zptrfs.c576
-rw-r--r--contrib/libs/clapack/zptsv.c130
-rw-r--r--contrib/libs/clapack/zptsvx.c290
-rw-r--r--contrib/libs/clapack/zpttrf.c216
-rw-r--r--contrib/libs/clapack/zpttrs.c178
-rw-r--r--contrib/libs/clapack/zptts2.c315
-rw-r--r--contrib/libs/clapack/zrot.c155
-rw-r--r--contrib/libs/clapack/zspcon.c197
-rw-r--r--contrib/libs/clapack/zspmv.c428
-rw-r--r--contrib/libs/clapack/zspr.c339
-rw-r--r--contrib/libs/clapack/zsprfs.c464
-rw-r--r--contrib/libs/clapack/zspsv.c177
-rw-r--r--contrib/libs/clapack/zspsvx.c326
-rw-r--r--contrib/libs/clapack/zsptrf.c763
-rw-r--r--contrib/libs/clapack/zsptri.c508
-rw-r--r--contrib/libs/clapack/zsptrs.c503
-rw-r--r--contrib/libs/clapack/zstedc.c497
-rw-r--r--contrib/libs/clapack/zstegr.c211
-rw-r--r--contrib/libs/clapack/zstein.c470
-rw-r--r--contrib/libs/clapack/zstemr.c752
-rw-r--r--contrib/libs/clapack/zsteqr.c621
-rw-r--r--contrib/libs/clapack/zsycon.c203
-rw-r--r--contrib/libs/clapack/zsyequb.c450
-rw-r--r--contrib/libs/clapack/zsymv.c429
-rw-r--r--contrib/libs/clapack/zsyr.c289
-rw-r--r--contrib/libs/clapack/zsyrfs.c474
-rw-r--r--contrib/libs/clapack/zsysv.c213
-rw-r--r--contrib/libs/clapack/zsysvx.c368
-rw-r--r--contrib/libs/clapack/zsytf2.c727
-rw-r--r--contrib/libs/clapack/zsytrf.c343
-rw-r--r--contrib/libs/clapack/zsytri.c489
-rw-r--r--contrib/libs/clapack/zsytrs.c502
-rw-r--r--contrib/libs/clapack/ztbcon.c254
-rw-r--r--contrib/libs/clapack/ztbrfs.c586
-rw-r--r--contrib/libs/clapack/ztbtrs.c205
-rw-r--r--contrib/libs/clapack/ztfsm.c1024
-rw-r--r--contrib/libs/clapack/ztftri.c500
-rw-r--r--contrib/libs/clapack/ztfttp.c575
-rw-r--r--contrib/libs/clapack/ztfttr.c580
-rw-r--r--contrib/libs/clapack/ztgevc.c972
-rw-r--r--contrib/libs/clapack/ztgex2.c376
-rw-r--r--contrib/libs/clapack/ztgexc.c248
-rw-r--r--contrib/libs/clapack/ztgsen.c766
-rw-r--r--contrib/libs/clapack/ztgsja.c672
-rw-r--r--contrib/libs/clapack/ztgsna.c487
-rw-r--r--contrib/libs/clapack/ztgsy2.c478
-rw-r--r--contrib/libs/clapack/ztgsyl.c695
-rw-r--r--contrib/libs/clapack/ztpcon.c242
-rw-r--r--contrib/libs/clapack/ztprfs.c561
-rw-r--r--contrib/libs/clapack/ztptri.c235
-rw-r--r--contrib/libs/clapack/ztptrs.c194
-rw-r--r--contrib/libs/clapack/ztpttf.c573
-rw-r--r--contrib/libs/clapack/ztpttr.c148
-rw-r--r--contrib/libs/clapack/ztrcon.c250
-rw-r--r--contrib/libs/clapack/ztrevc.c533
-rw-r--r--contrib/libs/clapack/ztrexc.c216
-rw-r--r--contrib/libs/clapack/ztrrfs.c565
-rw-r--r--contrib/libs/clapack/ztrsen.c422
-rw-r--r--contrib/libs/clapack/ztrsna.c446
-rw-r--r--contrib/libs/clapack/ztrsyl.c547
-rw-r--r--contrib/libs/clapack/ztrti2.c198
-rw-r--r--contrib/libs/clapack/ztrtri.c244
-rw-r--r--contrib/libs/clapack/ztrtrs.c184
-rw-r--r--contrib/libs/clapack/ztrttf.c580
-rw-r--r--contrib/libs/clapack/ztrttp.c149
-rw-r--r--contrib/libs/clapack/ztzrqf.c241
-rw-r--r--contrib/libs/clapack/ztzrzf.c313
-rw-r--r--contrib/libs/clapack/zung2l.c183
-rw-r--r--contrib/libs/clapack/zung2r.c185
-rw-r--r--contrib/libs/clapack/zungbr.c310
-rw-r--r--contrib/libs/clapack/zunghr.c224
-rw-r--r--contrib/libs/clapack/zungl2.c193
-rw-r--r--contrib/libs/clapack/zunglq.c287
-rw-r--r--contrib/libs/clapack/zungql.c296
-rw-r--r--contrib/libs/clapack/zungqr.c288
-rw-r--r--contrib/libs/clapack/zungr2.c192
-rw-r--r--contrib/libs/clapack/zungrq.c296
-rw-r--r--contrib/libs/clapack/zungtr.c262
-rw-r--r--contrib/libs/clapack/zunm2l.c245
-rw-r--r--contrib/libs/clapack/zunm2r.c249
-rw-r--r--contrib/libs/clapack/zunmbr.c371
-rw-r--r--contrib/libs/clapack/zunmhr.c257
-rw-r--r--contrib/libs/clapack/zunml2.c253
-rw-r--r--contrib/libs/clapack/zunmlq.c338
-rw-r--r--contrib/libs/clapack/zunmql.c332
-rw-r--r--contrib/libs/clapack/zunmqr.c332
-rw-r--r--contrib/libs/clapack/zunmr2.c245
-rw-r--r--contrib/libs/clapack/zunmr3.c254
-rw-r--r--contrib/libs/clapack/zunmrq.c339
-rw-r--r--contrib/libs/clapack/zunmrz.c371
-rw-r--r--contrib/libs/clapack/zunmtr.c295
-rw-r--r--contrib/libs/clapack/zupgtr.c221
-rw-r--r--contrib/libs/clapack/zupmtr.c321
-rw-r--r--contrib/libs/libf2c/Notice23
-rw-r--r--contrib/libs/libf2c/README374
-rw-r--r--contrib/libs/libf2c/abort_.c22
-rw-r--r--contrib/libs/libf2c/arith.h8
-rw-r--r--contrib/libs/libf2c/backspac.c76
-rw-r--r--contrib/libs/libf2c/c_abs.c20
-rw-r--r--contrib/libs/libf2c/c_cos.c23
-rw-r--r--contrib/libs/libf2c/c_div.c53
-rw-r--r--contrib/libs/libf2c/c_exp.c25
-rw-r--r--contrib/libs/libf2c/c_log.c23
-rw-r--r--contrib/libs/libf2c/c_sin.c23
-rw-r--r--contrib/libs/libf2c/c_sqrt.c41
-rw-r--r--contrib/libs/libf2c/cabs.c33
-rw-r--r--contrib/libs/libf2c/close.c101
-rw-r--r--contrib/libs/libf2c/ctype.c2
-rw-r--r--contrib/libs/libf2c/ctype_.h47
-rw-r--r--contrib/libs/libf2c/d_abs.c18
-rw-r--r--contrib/libs/libf2c/d_acos.c19
-rw-r--r--contrib/libs/libf2c/d_asin.c19
-rw-r--r--contrib/libs/libf2c/d_atan.c19
-rw-r--r--contrib/libs/libf2c/d_atn2.c19
-rw-r--r--contrib/libs/libf2c/d_cnjg.c19
-rw-r--r--contrib/libs/libf2c/d_cos.c19
-rw-r--r--contrib/libs/libf2c/d_cosh.c19
-rw-r--r--contrib/libs/libf2c/d_dim.c16
-rw-r--r--contrib/libs/libf2c/d_exp.c19
-rw-r--r--contrib/libs/libf2c/d_imag.c16
-rw-r--r--contrib/libs/libf2c/d_int.c19
-rw-r--r--contrib/libs/libf2c/d_lg10.c21
-rw-r--r--contrib/libs/libf2c/d_log.c19
-rw-r--r--contrib/libs/libf2c/d_mod.c46
-rw-r--r--contrib/libs/libf2c/d_nint.c20
-rw-r--r--contrib/libs/libf2c/d_prod.c16
-rw-r--r--contrib/libs/libf2c/d_sign.c18
-rw-r--r--contrib/libs/libf2c/d_sin.c19
-rw-r--r--contrib/libs/libf2c/d_sinh.c19
-rw-r--r--contrib/libs/libf2c/d_sqrt.c19
-rw-r--r--contrib/libs/libf2c/d_tan.c19
-rw-r--r--contrib/libs/libf2c/d_tanh.c19
-rw-r--r--contrib/libs/libf2c/derf_.c18
-rw-r--r--contrib/libs/libf2c/derfc_.c20
-rw-r--r--contrib/libs/libf2c/dfe.c151
-rw-r--r--contrib/libs/libf2c/dolio.c26
-rw-r--r--contrib/libs/libf2c/dtime_.c63
-rw-r--r--contrib/libs/libf2c/due.c77
-rw-r--r--contrib/libs/libf2c/ef1asc_.c25
-rw-r--r--contrib/libs/libf2c/ef1cmc_.c20
-rw-r--r--contrib/libs/libf2c/endfile.c160
-rw-r--r--contrib/libs/libf2c/erf_.c22
-rw-r--r--contrib/libs/libf2c/erfc_.c22
-rw-r--r--contrib/libs/libf2c/err.c293
-rw-r--r--contrib/libs/libf2c/etime_.c57
-rw-r--r--contrib/libs/libf2c/exit_.c43
-rw-r--r--contrib/libs/libf2c/f2c.h248
-rw-r--r--contrib/libs/libf2c/f77_aloc.c44
-rw-r--r--contrib/libs/libf2c/f77vers.c97
-rw-r--r--contrib/libs/libf2c/fio.h141
-rw-r--r--contrib/libs/libf2c/fmt.c530
-rw-r--r--contrib/libs/libf2c/fmt.h105
-rw-r--r--contrib/libs/libf2c/fmtlib.c51
-rw-r--r--contrib/libs/libf2c/fp.h28
-rw-r--r--contrib/libs/libf2c/ftell_.c52
-rw-r--r--contrib/libs/libf2c/getarg_.c36
-rw-r--r--contrib/libs/libf2c/getenv_.c62
-rw-r--r--contrib/libs/libf2c/h_abs.c18
-rw-r--r--contrib/libs/libf2c/h_dim.c16
-rw-r--r--contrib/libs/libf2c/h_dnnt.c19
-rw-r--r--contrib/libs/libf2c/h_indx.c32
-rw-r--r--contrib/libs/libf2c/h_len.c16
-rw-r--r--contrib/libs/libf2c/h_mod.c16
-rw-r--r--contrib/libs/libf2c/h_nint.c19
-rw-r--r--contrib/libs/libf2c/h_sign.c18
-rw-r--r--contrib/libs/libf2c/hl_ge.c18
-rw-r--r--contrib/libs/libf2c/hl_gt.c18
-rw-r--r--contrib/libs/libf2c/hl_le.c18
-rw-r--r--contrib/libs/libf2c/hl_lt.c18
-rw-r--r--contrib/libs/libf2c/i77vers.c343
-rw-r--r--contrib/libs/libf2c/i_abs.c18
-rw-r--r--contrib/libs/libf2c/i_ceiling.c36
-rw-r--r--contrib/libs/libf2c/i_dim.c16
-rw-r--r--contrib/libs/libf2c/i_dnnt.c19
-rw-r--r--contrib/libs/libf2c/i_indx.c32
-rw-r--r--contrib/libs/libf2c/i_len.c16
-rw-r--r--contrib/libs/libf2c/i_len_trim.c22
-rw-r--r--contrib/libs/libf2c/i_mod.c16
-rw-r--r--contrib/libs/libf2c/i_nint.c19
-rw-r--r--contrib/libs/libf2c/i_sign.c18
-rw-r--r--contrib/libs/libf2c/iargc_.c17
-rw-r--r--contrib/libs/libf2c/iio.c159
-rw-r--r--contrib/libs/libf2c/ilnw.c83
-rw-r--r--contrib/libs/libf2c/inquire.c117
-rw-r--r--contrib/libs/libf2c/l_ge.c18
-rw-r--r--contrib/libs/libf2c/l_gt.c18
-rw-r--r--contrib/libs/libf2c/l_le.c18
-rw-r--r--contrib/libs/libf2c/l_lt.c18
-rw-r--r--contrib/libs/libf2c/lbitbits.c68
-rw-r--r--contrib/libs/libf2c/lbitshft.c17
-rw-r--r--contrib/libs/libf2c/lio.h74
-rw-r--r--contrib/libs/libf2c/lread.c806
-rw-r--r--contrib/libs/libf2c/lwrite.c314
-rw-r--r--contrib/libs/libf2c/open.c301
-rw-r--r--contrib/libs/libf2c/pow_ci.c26
-rw-r--r--contrib/libs/libf2c/pow_dd.c19
-rw-r--r--contrib/libs/libf2c/pow_di.c41
-rw-r--r--contrib/libs/libf2c/pow_hh.c39
-rw-r--r--contrib/libs/libf2c/pow_ii.c39
-rw-r--r--contrib/libs/libf2c/pow_ri.c41
-rw-r--r--contrib/libs/libf2c/pow_zi.c60
-rw-r--r--contrib/libs/libf2c/pow_zz.c29
-rw-r--r--contrib/libs/libf2c/r_abs.c18
-rw-r--r--contrib/libs/libf2c/r_acos.c19
-rw-r--r--contrib/libs/libf2c/r_asin.c19
-rw-r--r--contrib/libs/libf2c/r_atan.c19
-rw-r--r--contrib/libs/libf2c/r_atn2.c19
-rw-r--r--contrib/libs/libf2c/r_cnjg.c18
-rw-r--r--contrib/libs/libf2c/r_cos.c19
-rw-r--r--contrib/libs/libf2c/r_cosh.c19
-rw-r--r--contrib/libs/libf2c/r_dim.c16
-rw-r--r--contrib/libs/libf2c/r_exp.c19
-rw-r--r--contrib/libs/libf2c/r_imag.c16
-rw-r--r--contrib/libs/libf2c/r_int.c19
-rw-r--r--contrib/libs/libf2c/r_lg10.c21
-rw-r--r--contrib/libs/libf2c/r_log.c19
-rw-r--r--contrib/libs/libf2c/r_mod.c46
-rw-r--r--contrib/libs/libf2c/r_nint.c20
-rw-r--r--contrib/libs/libf2c/r_sign.c18
-rw-r--r--contrib/libs/libf2c/r_sin.c19
-rw-r--r--contrib/libs/libf2c/r_sinh.c19
-rw-r--r--contrib/libs/libf2c/r_sqrt.c19
-rw-r--r--contrib/libs/libf2c/r_tan.c19
-rw-r--r--contrib/libs/libf2c/r_tanh.c19
-rw-r--r--contrib/libs/libf2c/rdfmt.c553
-rw-r--r--contrib/libs/libf2c/rewind.c30
-rw-r--r--contrib/libs/libf2c/rsfe.c91
-rw-r--r--contrib/libs/libf2c/rsli.c109
-rw-r--r--contrib/libs/libf2c/rsne.c618
-rw-r--r--contrib/libs/libf2c/s_cat.c86
-rw-r--r--contrib/libs/libf2c/s_cmp.c50
-rw-r--r--contrib/libs/libf2c/s_copy.c68
-rw-r--r--contrib/libs/libf2c/s_paus.c96
-rw-r--r--contrib/libs/libf2c/s_rnge.c32
-rw-r--r--contrib/libs/libf2c/s_stop.c48
-rw-r--r--contrib/libs/libf2c/sfe.c47
-rw-r--r--contrib/libs/libf2c/sig_die.c51
-rw-r--r--contrib/libs/libf2c/signal1.h35
-rw-r--r--contrib/libs/libf2c/signal_.c21
-rw-r--r--contrib/libs/libf2c/sue.c90
-rw-r--r--contrib/libs/libf2c/sysdep1.h71
-rw-r--r--contrib/libs/libf2c/system_.c47
-rw-r--r--contrib/libs/libf2c/typesize.c18
-rw-r--r--contrib/libs/libf2c/uio.c75
-rw-r--r--contrib/libs/libf2c/uninit.c379
-rw-r--r--contrib/libs/libf2c/util.c57
-rw-r--r--contrib/libs/libf2c/wref.c294
-rw-r--r--contrib/libs/libf2c/wrtfmt.c377
-rw-r--r--contrib/libs/libf2c/wsfe.c78
-rw-r--r--contrib/libs/libf2c/wsle.c42
-rw-r--r--contrib/libs/libf2c/wsne.c32
-rw-r--r--contrib/libs/libf2c/xwsne.c77
-rw-r--r--contrib/libs/libf2c/z_abs.c18
-rw-r--r--contrib/libs/libf2c/z_cos.c21
-rw-r--r--contrib/libs/libf2c/z_div.c50
-rw-r--r--contrib/libs/libf2c/z_exp.c23
-rw-r--r--contrib/libs/libf2c/z_log.c121
-rw-r--r--contrib/libs/libf2c/z_sin.c21
-rw-r--r--contrib/libs/libf2c/z_sqrt.c35
-rw-r--r--contrib/libs/python/Include/datetime.h7
-rw-r--r--ydb/public/lib/ydb_cli/commands/ydb_service_scheme.cpp1
-rw-r--r--ydb/public/lib/ydb_cli/commands/ydb_service_topic.cpp64
-rw-r--r--ydb/public/lib/ydb_cli/commands/ydb_service_topic.h16
1989 files changed, 665985 insertions, 3 deletions
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/arrow_to_pandas.cc b/contrib/libs/apache/arrow/cpp/src/arrow/python/arrow_to_pandas.cc
new file mode 100644
index 0000000000..cc386f589a
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/arrow_to_pandas.cc
@@ -0,0 +1,2299 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Functions for pandas conversion via NumPy
+
+#include "arrow/python/arrow_to_pandas.h"
+#include "arrow/python/numpy_interop.h" // IWYU pragma: expand
+
+#include <cmath>
+#include <cstdint>
+#include <iostream>
+#include <memory>
+#include <mutex>
+#include <string>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
+#include "arrow/array.h"
+#include "arrow/buffer.h"
+#include "arrow/datum.h"
+#include "arrow/status.h"
+#include "arrow/table.h"
+#include "arrow/type.h"
+#include "arrow/type_traits.h"
+#include "arrow/util/checked_cast.h"
+#include "arrow/util/hashing.h"
+#include "arrow/util/int_util.h"
+#include "arrow/util/logging.h"
+#include "arrow/util/macros.h"
+#include "arrow/util/parallel.h"
+#include "arrow/util/string_view.h"
+#include "arrow/visitor_inline.h"
+
+#include "arrow/compute/api.h"
+
+#include "arrow/python/common.h"
+#include "arrow/python/datetime.h"
+#include "arrow/python/decimal.h"
+#include "arrow/python/helpers.h"
+#include "arrow/python/numpy_convert.h"
+#include "arrow/python/numpy_internal.h"
+#include "arrow/python/pyarrow.h"
+#include "arrow/python/python_to_arrow.h"
+#include "arrow/python/type_traits.h"
+
+namespace arrow {
+
+class MemoryPool;
+
+using internal::checked_cast;
+using internal::CheckIndexBounds;
+using internal::GetByteWidth;
+using internal::OptionalParallelFor;
+
+namespace py {
+namespace {
+
+// Fix options for conversion of an inner (child) array.
+PandasOptions MakeInnerOptions(PandasOptions options) {
+ // Make sure conversion of inner dictionary arrays always returns an array,
+ // not a dict {'indices': array, 'dictionary': array, 'ordered': bool}
+ options.decode_dictionaries = true;
+ options.categorical_columns.clear();
+ options.strings_to_categorical = false;
+
+ // In ARROW-7723, we found as a result of ARROW-3789 that second
+ // through microsecond resolution tz-aware timestamps were being promoted to
+ // use the DATETIME_NANO_TZ conversion path, yielding a datetime64[ns] NumPy
+ // array in this function. PyArray_GETITEM returns datetime.datetime for
+ // units second through microsecond but PyLong for nanosecond (because
+ // datetime.datetime does not support nanoseconds).
+ // We force the object conversion to preserve the value of the timezone.
+ // Nanoseconds are returned as integers.
+ options.coerce_temporal_nanoseconds = false;
+
+ return options;
+}
+
+// ----------------------------------------------------------------------
+// PyCapsule code for setting ndarray base to reference C++ object
+
+struct ArrayCapsule {
+ std::shared_ptr<Array> array;
+};
+
+struct BufferCapsule {
+ std::shared_ptr<Buffer> buffer;
+};
+
+void ArrayCapsule_Destructor(PyObject* capsule) {
+ delete reinterpret_cast<ArrayCapsule*>(PyCapsule_GetPointer(capsule, "arrow::Array"));
+}
+
+void BufferCapsule_Destructor(PyObject* capsule) {
+ delete reinterpret_cast<BufferCapsule*>(PyCapsule_GetPointer(capsule, "arrow::Buffer"));
+}
+
+// ----------------------------------------------------------------------
+// pandas 0.x DataFrame conversion internals
+
+using internal::arrow_traits;
+using internal::npy_traits;
+
+template <typename T>
+struct WrapBytes {};
+
+template <>
+struct WrapBytes<StringType> {
+ static inline PyObject* Wrap(const char* data, int64_t length) {
+ return PyUnicode_FromStringAndSize(data, length);
+ }
+};
+
+template <>
+struct WrapBytes<LargeStringType> {
+ static inline PyObject* Wrap(const char* data, int64_t length) {
+ return PyUnicode_FromStringAndSize(data, length);
+ }
+};
+
+template <>
+struct WrapBytes<BinaryType> {
+ static inline PyObject* Wrap(const char* data, int64_t length) {
+ return PyBytes_FromStringAndSize(data, length);
+ }
+};
+
+template <>
+struct WrapBytes<LargeBinaryType> {
+ static inline PyObject* Wrap(const char* data, int64_t length) {
+ return PyBytes_FromStringAndSize(data, length);
+ }
+};
+
+template <>
+struct WrapBytes<FixedSizeBinaryType> {
+ static inline PyObject* Wrap(const char* data, int64_t length) {
+ return PyBytes_FromStringAndSize(data, length);
+ }
+};
+
+static inline bool ListTypeSupported(const DataType& type) {
+ switch (type.id()) {
+ case Type::BOOL:
+ case Type::UINT8:
+ case Type::INT8:
+ case Type::UINT16:
+ case Type::INT16:
+ case Type::UINT32:
+ case Type::INT32:
+ case Type::INT64:
+ case Type::UINT64:
+ case Type::FLOAT:
+ case Type::DOUBLE:
+ case Type::DECIMAL128:
+ case Type::DECIMAL256:
+ case Type::BINARY:
+ case Type::LARGE_BINARY:
+ case Type::STRING:
+ case Type::LARGE_STRING:
+ case Type::DATE32:
+ case Type::DATE64:
+ case Type::STRUCT:
+ case Type::TIME32:
+ case Type::TIME64:
+ case Type::TIMESTAMP:
+ case Type::DURATION:
+ case Type::DICTIONARY:
+ case Type::NA: // empty list
+ // The above types are all supported.
+ return true;
+ case Type::FIXED_SIZE_LIST:
+ case Type::LIST:
+ case Type::LARGE_LIST: {
+ const auto& list_type = checked_cast<const BaseListType&>(type);
+ return ListTypeSupported(*list_type.value_type());
+ }
+ default:
+ break;
+ }
+ return false;
+}
+
+Status CapsulizeArray(const std::shared_ptr<Array>& arr, PyObject** out) {
+ auto capsule = new ArrayCapsule{{arr}};
+ *out = PyCapsule_New(reinterpret_cast<void*>(capsule), "arrow::Array",
+ &ArrayCapsule_Destructor);
+ if (*out == nullptr) {
+ delete capsule;
+ RETURN_IF_PYERROR();
+ }
+ return Status::OK();
+}
+
+Status CapsulizeBuffer(const std::shared_ptr<Buffer>& buffer, PyObject** out) {
+ auto capsule = new BufferCapsule{{buffer}};
+ *out = PyCapsule_New(reinterpret_cast<void*>(capsule), "arrow::Buffer",
+ &BufferCapsule_Destructor);
+ if (*out == nullptr) {
+ delete capsule;
+ RETURN_IF_PYERROR();
+ }
+ return Status::OK();
+}
+
+Status SetNdarrayBase(PyArrayObject* arr, PyObject* base) {
+ if (PyArray_SetBaseObject(arr, base) == -1) {
+ // Error occurred, trust that SetBaseObject sets the error state
+ Py_XDECREF(base);
+ RETURN_IF_PYERROR();
+ }
+ return Status::OK();
+}
+
+Status SetBufferBase(PyArrayObject* arr, const std::shared_ptr<Buffer>& buffer) {
+ PyObject* base;
+ RETURN_NOT_OK(CapsulizeBuffer(buffer, &base));
+ return SetNdarrayBase(arr, base);
+}
+
+inline void set_numpy_metadata(int type, const DataType* datatype, PyArray_Descr* out) {
+ auto metadata = reinterpret_cast<PyArray_DatetimeDTypeMetaData*>(out->c_metadata);
+ if (type == NPY_DATETIME) {
+ if (datatype->id() == Type::TIMESTAMP) {
+ const auto& timestamp_type = checked_cast<const TimestampType&>(*datatype);
+ metadata->meta.base = internal::NumPyFrequency(timestamp_type.unit());
+ } else {
+ DCHECK(false) << "NPY_DATETIME views only supported for Arrow TIMESTAMP types";
+ }
+ } else if (type == NPY_TIMEDELTA) {
+ DCHECK_EQ(datatype->id(), Type::DURATION);
+ const auto& duration_type = checked_cast<const DurationType&>(*datatype);
+ metadata->meta.base = internal::NumPyFrequency(duration_type.unit());
+ }
+}
+
+Status PyArray_NewFromPool(int nd, npy_intp* dims, PyArray_Descr* descr, MemoryPool* pool,
+ PyObject** out) {
+ // ARROW-6570: Allocate memory from MemoryPool for a couple reasons
+ //
+ // * Track allocations
+ // * Get better performance through custom allocators
+ int64_t total_size = descr->elsize;
+ for (int i = 0; i < nd; ++i) {
+ total_size *= dims[i];
+ }
+
+ ARROW_ASSIGN_OR_RAISE(auto buffer, AllocateBuffer(total_size, pool));
+ *out = PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims,
+ /*strides=*/nullptr,
+ /*data=*/buffer->mutable_data(),
+ /*flags=*/NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEABLE,
+ /*obj=*/nullptr);
+ if (*out == nullptr) {
+ RETURN_IF_PYERROR();
+ // Trust that error set if NULL returned
+ }
+ return SetBufferBase(reinterpret_cast<PyArrayObject*>(*out), std::move(buffer));
+}
+
+template <typename T = void>
+inline const T* GetPrimitiveValues(const Array& arr) {
+ if (arr.length() == 0) {
+ return nullptr;
+ }
+ const int elsize = GetByteWidth(*arr.type());
+ const auto& prim_arr = checked_cast<const PrimitiveArray&>(arr);
+ return reinterpret_cast<const T*>(prim_arr.values()->data() + arr.offset() * elsize);
+}
+
+Status MakeNumPyView(std::shared_ptr<Array> arr, PyObject* py_ref, int npy_type, int ndim,
+ npy_intp* dims, PyObject** out) {
+ PyAcquireGIL lock;
+
+ PyArray_Descr* descr = internal::GetSafeNumPyDtype(npy_type);
+ set_numpy_metadata(npy_type, arr->type().get(), descr);
+ PyObject* result = PyArray_NewFromDescr(
+ &PyArray_Type, descr, ndim, dims, /*strides=*/nullptr,
+ const_cast<void*>(GetPrimitiveValues(*arr)), /*flags=*/0, nullptr);
+ PyArrayObject* np_arr = reinterpret_cast<PyArrayObject*>(result);
+ if (np_arr == nullptr) {
+ // Error occurred, trust that error set
+ return Status::OK();
+ }
+
+ PyObject* base;
+ if (py_ref == nullptr) {
+ // Capsule will be owned by the ndarray, no incref necessary. See
+ // ARROW-1973
+ RETURN_NOT_OK(CapsulizeArray(arr, &base));
+ } else {
+ Py_INCREF(py_ref);
+ base = py_ref;
+ }
+ RETURN_NOT_OK(SetNdarrayBase(np_arr, base));
+
+ // Do not allow Arrow data to be mutated
+ PyArray_CLEARFLAGS(np_arr, NPY_ARRAY_WRITEABLE);
+ *out = result;
+ return Status::OK();
+}
+
+class PandasWriter {
+ public:
+ enum type {
+ OBJECT,
+ UINT8,
+ INT8,
+ UINT16,
+ INT16,
+ UINT32,
+ INT32,
+ UINT64,
+ INT64,
+ HALF_FLOAT,
+ FLOAT,
+ DOUBLE,
+ BOOL,
+ DATETIME_DAY,
+ DATETIME_SECOND,
+ DATETIME_MILLI,
+ DATETIME_MICRO,
+ DATETIME_NANO,
+ DATETIME_NANO_TZ,
+ TIMEDELTA_SECOND,
+ TIMEDELTA_MILLI,
+ TIMEDELTA_MICRO,
+ TIMEDELTA_NANO,
+ CATEGORICAL,
+ EXTENSION
+ };
+
+ PandasWriter(const PandasOptions& options, int64_t num_rows, int num_columns)
+ : options_(options), num_rows_(num_rows), num_columns_(num_columns) {}
+ virtual ~PandasWriter() {}
+
+ void SetBlockData(PyObject* arr) {
+ block_arr_.reset(arr);
+ block_data_ =
+ reinterpret_cast<uint8_t*>(PyArray_DATA(reinterpret_cast<PyArrayObject*>(arr)));
+ }
+
+ /// \brief Either copy or wrap single array to create pandas-compatible array
+ /// for Series or DataFrame. num_columns_ can only be 1. Will try to zero
+ /// copy if possible (or error if not possible and zero_copy_only=True)
+ virtual Status TransferSingle(std::shared_ptr<ChunkedArray> data, PyObject* py_ref) = 0;
+
+ /// \brief Copy ChunkedArray into a multi-column block
+ virtual Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) = 0;
+
+ Status EnsurePlacementAllocated() {
+ std::lock_guard<std::mutex> guard(allocation_lock_);
+ if (placement_data_ != nullptr) {
+ return Status::OK();
+ }
+ PyAcquireGIL lock;
+
+ npy_intp placement_dims[1] = {num_columns_};
+ PyObject* placement_arr = PyArray_SimpleNew(1, placement_dims, NPY_INT64);
+ RETURN_IF_PYERROR();
+ placement_arr_.reset(placement_arr);
+ placement_data_ = reinterpret_cast<int64_t*>(
+ PyArray_DATA(reinterpret_cast<PyArrayObject*>(placement_arr)));
+ return Status::OK();
+ }
+
+ Status EnsureAllocated() {
+ std::lock_guard<std::mutex> guard(allocation_lock_);
+ if (block_data_ != nullptr) {
+ return Status::OK();
+ }
+ RETURN_NOT_OK(Allocate());
+ return Status::OK();
+ }
+
+ virtual bool CanZeroCopy(const ChunkedArray& data) const { return false; }
+
+ virtual Status Write(std::shared_ptr<ChunkedArray> data, int64_t abs_placement,
+ int64_t rel_placement) {
+ RETURN_NOT_OK(EnsurePlacementAllocated());
+ if (num_columns_ == 1 && options_.allow_zero_copy_blocks) {
+ RETURN_NOT_OK(TransferSingle(data, /*py_ref=*/nullptr));
+ } else {
+ RETURN_NOT_OK(
+ CheckNoZeroCopy("Cannot do zero copy conversion into "
+ "multi-column DataFrame block"));
+ RETURN_NOT_OK(EnsureAllocated());
+ RETURN_NOT_OK(CopyInto(data, rel_placement));
+ }
+ placement_data_[rel_placement] = abs_placement;
+ return Status::OK();
+ }
+
+ virtual Status GetDataFrameResult(PyObject** out) {
+ PyObject* result = PyDict_New();
+ RETURN_IF_PYERROR();
+
+ PyObject* block;
+ RETURN_NOT_OK(GetResultBlock(&block));
+
+ PyDict_SetItemString(result, "block", block);
+ PyDict_SetItemString(result, "placement", placement_arr_.obj());
+
+ RETURN_NOT_OK(AddResultMetadata(result));
+ *out = result;
+ return Status::OK();
+ }
+
+ // Caller steals the reference to this object
+ virtual Status GetSeriesResult(PyObject** out) {
+ RETURN_NOT_OK(MakeBlock1D());
+ // Caller owns the object now
+ *out = block_arr_.detach();
+ return Status::OK();
+ }
+
+ protected:
+ virtual Status AddResultMetadata(PyObject* result) { return Status::OK(); }
+
+ Status MakeBlock1D() {
+ // For Series or for certain DataFrame block types, we need to shape to a
+ // 1D array when there is only one column
+ PyAcquireGIL lock;
+
+ DCHECK_EQ(1, num_columns_);
+
+ npy_intp new_dims[1] = {static_cast<npy_intp>(num_rows_)};
+ PyArray_Dims dims;
+ dims.ptr = new_dims;
+ dims.len = 1;
+
+ PyObject* reshaped = PyArray_Newshape(
+ reinterpret_cast<PyArrayObject*>(block_arr_.obj()), &dims, NPY_ANYORDER);
+ RETURN_IF_PYERROR();
+
+ // ARROW-8801: Here a PyArrayObject is created that is not being managed by
+ // any OwnedRef object. This object is then put in the resulting object
+ // with PyDict_SetItemString, which increments the reference count, so a
+ // memory leak ensues. There are several ways to fix the memory leak but a
+ // simple one is to put the reshaped 1D block array in this OwnedRefNoGIL
+ // so it will be correctly decref'd when this class is destructed.
+ block_arr_.reset(reshaped);
+ return Status::OK();
+ }
+
+ virtual Status GetResultBlock(PyObject** out) {
+ *out = block_arr_.obj();
+ return Status::OK();
+ }
+
+ Status CheckNoZeroCopy(const std::string& message) {
+ if (options_.zero_copy_only) {
+ return Status::Invalid(message);
+ }
+ return Status::OK();
+ }
+
+ Status CheckNotZeroCopyOnly(const ChunkedArray& data) {
+ if (options_.zero_copy_only) {
+ return Status::Invalid("Needed to copy ", data.num_chunks(), " chunks with ",
+ data.null_count(), " nulls, but zero_copy_only was True");
+ }
+ return Status::OK();
+ }
+
+ virtual Status Allocate() {
+ return Status::NotImplemented("Override Allocate in subclasses");
+ }
+
+ Status AllocateNDArray(int npy_type, int ndim = 2) {
+ PyAcquireGIL lock;
+
+ PyObject* block_arr;
+ npy_intp block_dims[2] = {0, 0};
+
+ if (ndim == 2) {
+ block_dims[0] = num_columns_;
+ block_dims[1] = num_rows_;
+ } else {
+ block_dims[0] = num_rows_;
+ }
+ PyArray_Descr* descr = internal::GetSafeNumPyDtype(npy_type);
+ if (PyDataType_REFCHK(descr)) {
+ // ARROW-6876: if the array has refcounted items, let Numpy
+ // own the array memory so as to decref elements on array destruction
+ block_arr = PyArray_SimpleNewFromDescr(ndim, block_dims, descr);
+ RETURN_IF_PYERROR();
+ } else {
+ RETURN_NOT_OK(
+ PyArray_NewFromPool(ndim, block_dims, descr, options_.pool, &block_arr));
+ }
+
+ SetBlockData(block_arr);
+ return Status::OK();
+ }
+
+ void SetDatetimeUnit(NPY_DATETIMEUNIT unit) {
+ PyAcquireGIL lock;
+ auto date_dtype = reinterpret_cast<PyArray_DatetimeDTypeMetaData*>(
+ PyArray_DESCR(reinterpret_cast<PyArrayObject*>(block_arr_.obj()))->c_metadata);
+ date_dtype->meta.base = unit;
+ }
+
+ PandasOptions options_;
+
+ std::mutex allocation_lock_;
+
+ int64_t num_rows_;
+ int num_columns_;
+
+ OwnedRefNoGIL block_arr_;
+ uint8_t* block_data_ = nullptr;
+
+ // ndarray<int32>
+ OwnedRefNoGIL placement_arr_;
+ int64_t* placement_data_ = nullptr;
+
+ private:
+ ARROW_DISALLOW_COPY_AND_ASSIGN(PandasWriter);
+};
+
+template <typename InType, typename OutType>
+inline void ConvertIntegerWithNulls(const PandasOptions& options,
+ const ChunkedArray& data, OutType* out_values) {
+ for (int c = 0; c < data.num_chunks(); c++) {
+ const auto& arr = *data.chunk(c);
+ const InType* in_values = GetPrimitiveValues<InType>(arr);
+ // Upcast to double, set NaN as appropriate
+
+ for (int i = 0; i < arr.length(); ++i) {
+ *out_values++ =
+ arr.IsNull(i) ? static_cast<OutType>(NAN) : static_cast<OutType>(in_values[i]);
+ }
+ }
+}
+
+template <typename T>
+inline void ConvertIntegerNoNullsSameType(const PandasOptions& options,
+ const ChunkedArray& data, T* out_values) {
+ for (int c = 0; c < data.num_chunks(); c++) {
+ const auto& arr = *data.chunk(c);
+ if (arr.length() > 0) {
+ const T* in_values = GetPrimitiveValues<T>(arr);
+ memcpy(out_values, in_values, sizeof(T) * arr.length());
+ out_values += arr.length();
+ }
+ }
+}
+
+template <typename InType, typename OutType>
+inline void ConvertIntegerNoNullsCast(const PandasOptions& options,
+ const ChunkedArray& data, OutType* out_values) {
+ for (int c = 0; c < data.num_chunks(); c++) {
+ const auto& arr = *data.chunk(c);
+ const InType* in_values = GetPrimitiveValues<InType>(arr);
+ for (int64_t i = 0; i < arr.length(); ++i) {
+ *out_values = in_values[i];
+ }
+ }
+}
+
+// Generic Array -> PyObject** converter that handles object deduplication, if
+// requested
+template <typename ArrayType, typename WriteValue>
+inline Status WriteArrayObjects(const ArrayType& arr, WriteValue&& write_func,
+ PyObject** out_values) {
+ const bool has_nulls = arr.null_count() > 0;
+ for (int64_t i = 0; i < arr.length(); ++i) {
+ if (has_nulls && arr.IsNull(i)) {
+ Py_INCREF(Py_None);
+ *out_values = Py_None;
+ } else {
+ RETURN_NOT_OK(write_func(arr.GetView(i), out_values));
+ }
+ ++out_values;
+ }
+ return Status::OK();
+}
+
+template <typename T, typename Enable = void>
+struct MemoizationTraits {
+ using Scalar = typename T::c_type;
+};
+
+template <typename T>
+struct MemoizationTraits<T, enable_if_has_string_view<T>> {
+ // For binary, we memoize string_view as a scalar value to avoid having to
+ // unnecessarily copy the memory into the memo table data structure
+ using Scalar = util::string_view;
+};
+
+template <typename Type, typename WrapFunction>
+inline Status ConvertAsPyObjects(const PandasOptions& options, const ChunkedArray& data,
+ WrapFunction&& wrap_func, PyObject** out_values) {
+ using ArrayType = typename TypeTraits<Type>::ArrayType;
+ using Scalar = typename MemoizationTraits<Type>::Scalar;
+
+ // TODO(fsaintjacques): propagate memory pool.
+ ::arrow::internal::ScalarMemoTable<Scalar> memo_table(default_memory_pool());
+ std::vector<PyObject*> unique_values;
+ int32_t memo_size = 0;
+
+ auto WrapMemoized = [&](const Scalar& value, PyObject** out_values) {
+ int32_t memo_index;
+ RETURN_NOT_OK(memo_table.GetOrInsert(value, &memo_index));
+ if (memo_index == memo_size) {
+ // New entry
+ RETURN_NOT_OK(wrap_func(value, out_values));
+ unique_values.push_back(*out_values);
+ ++memo_size;
+ } else {
+ // Duplicate entry
+ Py_INCREF(unique_values[memo_index]);
+ *out_values = unique_values[memo_index];
+ }
+ return Status::OK();
+ };
+
+ auto WrapUnmemoized = [&](const Scalar& value, PyObject** out_values) {
+ return wrap_func(value, out_values);
+ };
+
+ for (int c = 0; c < data.num_chunks(); c++) {
+ const auto& arr = checked_cast<const ArrayType&>(*data.chunk(c));
+ if (options.deduplicate_objects) {
+ RETURN_NOT_OK(WriteArrayObjects(arr, WrapMemoized, out_values));
+ } else {
+ RETURN_NOT_OK(WriteArrayObjects(arr, WrapUnmemoized, out_values));
+ }
+ out_values += arr.length();
+ }
+ return Status::OK();
+}
+
+Status ConvertStruct(PandasOptions options, const ChunkedArray& data,
+ PyObject** out_values) {
+ if (data.num_chunks() == 0) {
+ return Status::OK();
+ }
+ // ChunkedArray has at least one chunk
+ auto arr = checked_cast<const StructArray*>(data.chunk(0).get());
+ // Use it to cache the struct type and number of fields for all chunks
+ int32_t num_fields = arr->num_fields();
+ auto array_type = arr->type();
+ std::vector<OwnedRef> fields_data(num_fields * data.num_chunks());
+ OwnedRef dict_item;
+
+ // See notes in MakeInnerOptions.
+ options = MakeInnerOptions(std::move(options));
+ // Don't blindly convert because timestamps in lists are handled differently.
+ options.timestamp_as_object = true;
+
+ for (int c = 0; c < data.num_chunks(); c++) {
+ auto fields_data_offset = c * num_fields;
+ auto arr = checked_cast<const StructArray*>(data.chunk(c).get());
+ // Convert the struct arrays first
+ for (int32_t i = 0; i < num_fields; i++) {
+ const auto field = arr->field(static_cast<int>(i));
+ RETURN_NOT_OK(ConvertArrayToPandas(options, field, nullptr,
+ fields_data[i + fields_data_offset].ref()));
+ DCHECK(PyArray_Check(fields_data[i + fields_data_offset].obj()));
+ }
+
+ // Construct a dictionary for each row
+ const bool has_nulls = data.null_count() > 0;
+ for (int64_t i = 0; i < arr->length(); ++i) {
+ if (has_nulls && arr->IsNull(i)) {
+ Py_INCREF(Py_None);
+ *out_values = Py_None;
+ } else {
+ // Build the new dict object for the row
+ dict_item.reset(PyDict_New());
+ RETURN_IF_PYERROR();
+ for (int32_t field_idx = 0; field_idx < num_fields; ++field_idx) {
+ OwnedRef field_value;
+ auto name = array_type->field(static_cast<int>(field_idx))->name();
+ if (!arr->field(static_cast<int>(field_idx))->IsNull(i)) {
+ // Value exists in child array, obtain it
+ auto array = reinterpret_cast<PyArrayObject*>(
+ fields_data[field_idx + fields_data_offset].obj());
+ auto ptr = reinterpret_cast<const char*>(PyArray_GETPTR1(array, i));
+ field_value.reset(PyArray_GETITEM(array, ptr));
+ RETURN_IF_PYERROR();
+ } else {
+ // Translate the Null to a None
+ Py_INCREF(Py_None);
+ field_value.reset(Py_None);
+ }
+ // PyDict_SetItemString increments reference count
+ auto setitem_result =
+ PyDict_SetItemString(dict_item.obj(), name.c_str(), field_value.obj());
+ RETURN_IF_PYERROR();
+ DCHECK_EQ(setitem_result, 0);
+ }
+ *out_values = dict_item.obj();
+ // Grant ownership to the resulting array
+ Py_INCREF(*out_values);
+ }
+ ++out_values;
+ }
+ }
+ return Status::OK();
+}
+
+Status DecodeDictionaries(MemoryPool* pool, const std::shared_ptr<DataType>& dense_type,
+ ArrayVector* arrays) {
+ compute::ExecContext ctx(pool);
+ compute::CastOptions options;
+ for (size_t i = 0; i < arrays->size(); ++i) {
+ ARROW_ASSIGN_OR_RAISE((*arrays)[i],
+ compute::Cast(*(*arrays)[i], dense_type, options, &ctx));
+ }
+ return Status::OK();
+}
+
+Status DecodeDictionaries(MemoryPool* pool, const std::shared_ptr<DataType>& dense_type,
+ std::shared_ptr<ChunkedArray>* array) {
+ auto chunks = (*array)->chunks();
+ RETURN_NOT_OK(DecodeDictionaries(pool, dense_type, &chunks));
+ *array = std::make_shared<ChunkedArray>(std::move(chunks), dense_type);
+ return Status::OK();
+}
+
+template <typename ListArrayT>
+Status ConvertListsLike(PandasOptions options, const ChunkedArray& data,
+ PyObject** out_values) {
+ // Get column of underlying value arrays
+ ArrayVector value_arrays;
+ for (int c = 0; c < data.num_chunks(); c++) {
+ const auto& arr = checked_cast<const ListArrayT&>(*data.chunk(c));
+ value_arrays.emplace_back(arr.values());
+ }
+ using ListArrayType = typename ListArrayT::TypeClass;
+ const auto& list_type = checked_cast<const ListArrayType&>(*data.type());
+ auto value_type = list_type.value_type();
+
+ auto flat_column = std::make_shared<ChunkedArray>(value_arrays, value_type);
+
+ options = MakeInnerOptions(std::move(options));
+
+ OwnedRefNoGIL owned_numpy_array;
+ RETURN_NOT_OK(ConvertChunkedArrayToPandas(options, flat_column, nullptr,
+ owned_numpy_array.ref()));
+
+ PyObject* numpy_array = owned_numpy_array.obj();
+ DCHECK(PyArray_Check(numpy_array));
+
+ int64_t chunk_offset = 0;
+ for (int c = 0; c < data.num_chunks(); c++) {
+ const auto& arr = checked_cast<const ListArrayT&>(*data.chunk(c));
+
+ const bool has_nulls = data.null_count() > 0;
+ for (int64_t i = 0; i < arr.length(); ++i) {
+ if (has_nulls && arr.IsNull(i)) {
+ Py_INCREF(Py_None);
+ *out_values = Py_None;
+ } else {
+ OwnedRef start(PyLong_FromLongLong(arr.value_offset(i) + chunk_offset));
+ OwnedRef end(PyLong_FromLongLong(arr.value_offset(i + 1) + chunk_offset));
+ OwnedRef slice(PySlice_New(start.obj(), end.obj(), nullptr));
+
+ if (ARROW_PREDICT_FALSE(slice.obj() == nullptr)) {
+ // Fall out of loop, will return from RETURN_IF_PYERROR
+ break;
+ }
+ *out_values = PyObject_GetItem(numpy_array, slice.obj());
+
+ if (*out_values == nullptr) {
+ // Fall out of loop, will return from RETURN_IF_PYERROR
+ break;
+ }
+ }
+ ++out_values;
+ }
+ RETURN_IF_PYERROR();
+
+ chunk_offset += arr.values()->length();
+ }
+
+ return Status::OK();
+}
+
+Status ConvertMap(PandasOptions options, const ChunkedArray& data,
+ PyObject** out_values) {
+ // Get columns of underlying key/item arrays
+ std::vector<std::shared_ptr<Array>> key_arrays;
+ std::vector<std::shared_ptr<Array>> item_arrays;
+ for (int c = 0; c < data.num_chunks(); ++c) {
+ const auto& map_arr = checked_cast<const MapArray&>(*data.chunk(c));
+ key_arrays.emplace_back(map_arr.keys());
+ item_arrays.emplace_back(map_arr.items());
+ }
+
+ const auto& map_type = checked_cast<const MapType&>(*data.type());
+ auto key_type = map_type.key_type();
+ auto item_type = map_type.item_type();
+
+ // ARROW-6899: Convert dictionary-encoded children to dense instead of
+ // failing below. A more efficient conversion than this could be done later
+ if (key_type->id() == Type::DICTIONARY) {
+ auto dense_type = checked_cast<const DictionaryType&>(*key_type).value_type();
+ RETURN_NOT_OK(DecodeDictionaries(options.pool, dense_type, &key_arrays));
+ key_type = dense_type;
+ }
+ if (item_type->id() == Type::DICTIONARY) {
+ auto dense_type = checked_cast<const DictionaryType&>(*item_type).value_type();
+ RETURN_NOT_OK(DecodeDictionaries(options.pool, dense_type, &item_arrays));
+ item_type = dense_type;
+ }
+
+ // See notes in MakeInnerOptions.
+ options = MakeInnerOptions(std::move(options));
+ // Don't blindly convert because timestamps in lists are handled differently.
+ options.timestamp_as_object = true;
+
+ auto flat_keys = std::make_shared<ChunkedArray>(key_arrays, key_type);
+ auto flat_items = std::make_shared<ChunkedArray>(item_arrays, item_type);
+ OwnedRef list_item;
+ OwnedRef key_value;
+ OwnedRef item_value;
+ OwnedRefNoGIL owned_numpy_keys;
+ RETURN_NOT_OK(
+ ConvertChunkedArrayToPandas(options, flat_keys, nullptr, owned_numpy_keys.ref()));
+ OwnedRefNoGIL owned_numpy_items;
+ RETURN_NOT_OK(
+ ConvertChunkedArrayToPandas(options, flat_items, nullptr, owned_numpy_items.ref()));
+ PyArrayObject* py_keys = reinterpret_cast<PyArrayObject*>(owned_numpy_keys.obj());
+ PyArrayObject* py_items = reinterpret_cast<PyArrayObject*>(owned_numpy_items.obj());
+
+ int64_t chunk_offset = 0;
+ for (int c = 0; c < data.num_chunks(); ++c) {
+ const auto& arr = checked_cast<const MapArray&>(*data.chunk(c));
+ const bool has_nulls = data.null_count() > 0;
+
+ // Make a list of key/item pairs for each row in array
+ for (int64_t i = 0; i < arr.length(); ++i) {
+ if (has_nulls && arr.IsNull(i)) {
+ Py_INCREF(Py_None);
+ *out_values = Py_None;
+ } else {
+ int64_t entry_offset = arr.value_offset(i);
+ int64_t num_maps = arr.value_offset(i + 1) - entry_offset;
+
+ // Build the new list object for the row of maps
+ list_item.reset(PyList_New(num_maps));
+ RETURN_IF_PYERROR();
+
+ // Add each key/item pair in the row
+ for (int64_t j = 0; j < num_maps; ++j) {
+ // Get key value, key is non-nullable for a valid row
+ auto ptr_key = reinterpret_cast<const char*>(
+ PyArray_GETPTR1(py_keys, chunk_offset + entry_offset + j));
+ key_value.reset(PyArray_GETITEM(py_keys, ptr_key));
+ RETURN_IF_PYERROR();
+
+ if (item_arrays[c]->IsNull(entry_offset + j)) {
+ // Translate the Null to a None
+ Py_INCREF(Py_None);
+ item_value.reset(Py_None);
+ } else {
+ // Get valid value from item array
+ auto ptr_item = reinterpret_cast<const char*>(
+ PyArray_GETPTR1(py_items, chunk_offset + entry_offset + j));
+ item_value.reset(PyArray_GETITEM(py_items, ptr_item));
+ RETURN_IF_PYERROR();
+ }
+
+ // Add the key/item pair to the list for the row
+ PyList_SET_ITEM(list_item.obj(), j,
+ PyTuple_Pack(2, key_value.obj(), item_value.obj()));
+ RETURN_IF_PYERROR();
+ }
+
+ // Pass ownership to the resulting array
+ *out_values = list_item.detach();
+ }
+ ++out_values;
+ }
+ RETURN_IF_PYERROR();
+
+ chunk_offset += arr.values()->length();
+ }
+
+ return Status::OK();
+}
+
+template <typename InType, typename OutType>
+inline void ConvertNumericNullable(const ChunkedArray& data, InType na_value,
+ OutType* out_values) {
+ for (int c = 0; c < data.num_chunks(); c++) {
+ const auto& arr = *data.chunk(c);
+ const InType* in_values = GetPrimitiveValues<InType>(arr);
+
+ if (arr.null_count() > 0) {
+ for (int64_t i = 0; i < arr.length(); ++i) {
+ *out_values++ = arr.IsNull(i) ? na_value : in_values[i];
+ }
+ } else {
+ memcpy(out_values, in_values, sizeof(InType) * arr.length());
+ out_values += arr.length();
+ }
+ }
+}
+
+template <typename InType, typename OutType>
+inline void ConvertNumericNullableCast(const ChunkedArray& data, InType na_value,
+ OutType* out_values) {
+ for (int c = 0; c < data.num_chunks(); c++) {
+ const auto& arr = *data.chunk(c);
+ const InType* in_values = GetPrimitiveValues<InType>(arr);
+
+ for (int64_t i = 0; i < arr.length(); ++i) {
+ *out_values++ = arr.IsNull(i) ? static_cast<OutType>(na_value)
+ : static_cast<OutType>(in_values[i]);
+ }
+ }
+}
+
+template <int NPY_TYPE>
+class TypedPandasWriter : public PandasWriter {
+ public:
+ using T = typename npy_traits<NPY_TYPE>::value_type;
+
+ using PandasWriter::PandasWriter;
+
+ Status TransferSingle(std::shared_ptr<ChunkedArray> data, PyObject* py_ref) override {
+ if (CanZeroCopy(*data)) {
+ PyObject* wrapped;
+ npy_intp dims[2] = {static_cast<npy_intp>(num_columns_),
+ static_cast<npy_intp>(num_rows_)};
+ RETURN_NOT_OK(
+ MakeNumPyView(data->chunk(0), py_ref, NPY_TYPE, /*ndim=*/2, dims, &wrapped));
+ SetBlockData(wrapped);
+ return Status::OK();
+ } else {
+ RETURN_NOT_OK(CheckNotZeroCopyOnly(*data));
+ RETURN_NOT_OK(EnsureAllocated());
+ return CopyInto(data, /*rel_placement=*/0);
+ }
+ }
+
+ Status CheckTypeExact(const DataType& type, Type::type expected) {
+ if (type.id() != expected) {
+ // TODO(wesm): stringify NumPy / pandas type
+ return Status::NotImplemented("Cannot write Arrow data of type ", type.ToString());
+ }
+ return Status::OK();
+ }
+
+ T* GetBlockColumnStart(int64_t rel_placement) {
+ return reinterpret_cast<T*>(block_data_) + rel_placement * num_rows_;
+ }
+
+ protected:
+ Status Allocate() override { return AllocateNDArray(NPY_TYPE); }
+};
+
+struct ObjectWriterVisitor {
+ const PandasOptions& options;
+ const ChunkedArray& data;
+ PyObject** out_values;
+
+ Status Visit(const NullType& type) {
+ for (int c = 0; c < data.num_chunks(); c++) {
+ std::shared_ptr<Array> arr = data.chunk(c);
+
+ for (int64_t i = 0; i < arr->length(); ++i) {
+ // All values are null
+ Py_INCREF(Py_None);
+ *out_values = Py_None;
+ ++out_values;
+ }
+ }
+ return Status::OK();
+ }
+
+ Status Visit(const BooleanType& type) {
+ for (int c = 0; c < data.num_chunks(); c++) {
+ const auto& arr = checked_cast<const BooleanArray&>(*data.chunk(c));
+
+ for (int64_t i = 0; i < arr.length(); ++i) {
+ if (arr.IsNull(i)) {
+ Py_INCREF(Py_None);
+ *out_values++ = Py_None;
+ } else if (arr.Value(i)) {
+ // True
+ Py_INCREF(Py_True);
+ *out_values++ = Py_True;
+ } else {
+ // False
+ Py_INCREF(Py_False);
+ *out_values++ = Py_False;
+ }
+ }
+ }
+ return Status::OK();
+ }
+
+ template <typename Type>
+ enable_if_integer<Type, Status> Visit(const Type& type) {
+ using T = typename Type::c_type;
+ auto WrapValue = [](T value, PyObject** out) {
+ *out = std::is_signed<T>::value ? PyLong_FromLongLong(value)
+ : PyLong_FromUnsignedLongLong(value);
+ RETURN_IF_PYERROR();
+ return Status::OK();
+ };
+ return ConvertAsPyObjects<Type>(options, data, WrapValue, out_values);
+ }
+
+ template <typename Type>
+ enable_if_t<is_base_binary_type<Type>::value || is_fixed_size_binary_type<Type>::value,
+ Status>
+ Visit(const Type& type) {
+ auto WrapValue = [](const util::string_view& view, PyObject** out) {
+ *out = WrapBytes<Type>::Wrap(view.data(), view.length());
+ if (*out == nullptr) {
+ PyErr_Clear();
+ return Status::UnknownError("Wrapping ", view, " failed");
+ }
+ return Status::OK();
+ };
+ return ConvertAsPyObjects<Type>(options, data, WrapValue, out_values);
+ }
+
+ template <typename Type>
+ enable_if_date<Type, Status> Visit(const Type& type) {
+ auto WrapValue = [](typename Type::c_type value, PyObject** out) {
+ RETURN_NOT_OK(internal::PyDate_from_int(value, Type::UNIT, out));
+ RETURN_IF_PYERROR();
+ return Status::OK();
+ };
+ return ConvertAsPyObjects<Type>(options, data, WrapValue, out_values);
+ }
+
+ template <typename Type>
+ enable_if_time<Type, Status> Visit(const Type& type) {
+ const TimeUnit::type unit = type.unit();
+ auto WrapValue = [unit](typename Type::c_type value, PyObject** out) {
+ RETURN_NOT_OK(internal::PyTime_from_int(value, unit, out));
+ RETURN_IF_PYERROR();
+ return Status::OK();
+ };
+ return ConvertAsPyObjects<Type>(options, data, WrapValue, out_values);
+ }
+
+ template <typename Type>
+ enable_if_timestamp<Type, Status> Visit(const Type& type) {
+ const TimeUnit::type unit = type.unit();
+ OwnedRef tzinfo;
+
+ auto ConvertTimezoneNaive = [&](typename Type::c_type value, PyObject** out) {
+ RETURN_NOT_OK(internal::PyDateTime_from_int(value, unit, out));
+ RETURN_IF_PYERROR();
+ return Status::OK();
+ };
+ auto ConvertTimezoneAware = [&](typename Type::c_type value, PyObject** out) {
+ PyObject* naive_datetime;
+ RETURN_NOT_OK(ConvertTimezoneNaive(value, &naive_datetime));
+ // convert the timezone naive datetime object to timezone aware
+ *out = PyObject_CallMethod(tzinfo.obj(), "fromutc", "O", naive_datetime);
+ // the timezone naive object is no longer required
+ Py_DECREF(naive_datetime);
+ RETURN_IF_PYERROR();
+ return Status::OK();
+ };
+
+ if (!type.timezone().empty() && !options.ignore_timezone) {
+ // convert timezone aware
+ PyObject* tzobj;
+ ARROW_ASSIGN_OR_RAISE(tzobj, internal::StringToTzinfo(type.timezone()));
+ tzinfo.reset(tzobj);
+ RETURN_IF_PYERROR();
+ RETURN_NOT_OK(
+ ConvertAsPyObjects<Type>(options, data, ConvertTimezoneAware, out_values));
+ } else {
+ // convert timezone naive
+ RETURN_NOT_OK(
+ ConvertAsPyObjects<Type>(options, data, ConvertTimezoneNaive, out_values));
+ }
+
+ return Status::OK();
+ }
+
+ Status Visit(const Decimal128Type& type) {
+ OwnedRef decimal;
+ OwnedRef Decimal;
+ RETURN_NOT_OK(internal::ImportModule("decimal", &decimal));
+ RETURN_NOT_OK(internal::ImportFromModule(decimal.obj(), "Decimal", &Decimal));
+ PyObject* decimal_constructor = Decimal.obj();
+
+ for (int c = 0; c < data.num_chunks(); c++) {
+ const auto& arr = checked_cast<const arrow::Decimal128Array&>(*data.chunk(c));
+
+ for (int64_t i = 0; i < arr.length(); ++i) {
+ if (arr.IsNull(i)) {
+ Py_INCREF(Py_None);
+ *out_values++ = Py_None;
+ } else {
+ *out_values++ =
+ internal::DecimalFromString(decimal_constructor, arr.FormatValue(i));
+ RETURN_IF_PYERROR();
+ }
+ }
+ }
+
+ return Status::OK();
+ }
+
+ Status Visit(const Decimal256Type& type) {
+ OwnedRef decimal;
+ OwnedRef Decimal;
+ RETURN_NOT_OK(internal::ImportModule("decimal", &decimal));
+ RETURN_NOT_OK(internal::ImportFromModule(decimal.obj(), "Decimal", &Decimal));
+ PyObject* decimal_constructor = Decimal.obj();
+
+ for (int c = 0; c < data.num_chunks(); c++) {
+ const auto& arr = checked_cast<const arrow::Decimal256Array&>(*data.chunk(c));
+
+ for (int64_t i = 0; i < arr.length(); ++i) {
+ if (arr.IsNull(i)) {
+ Py_INCREF(Py_None);
+ *out_values++ = Py_None;
+ } else {
+ *out_values++ =
+ internal::DecimalFromString(decimal_constructor, arr.FormatValue(i));
+ RETURN_IF_PYERROR();
+ }
+ }
+ }
+
+ return Status::OK();
+ }
+
+ template <typename T>
+ enable_if_t<is_fixed_size_list_type<T>::value || is_var_length_list_type<T>::value,
+ Status>
+ Visit(const T& type) {
+ using ArrayType = typename TypeTraits<T>::ArrayType;
+ if (!ListTypeSupported(*type.value_type())) {
+ return Status::NotImplemented(
+ "Not implemented type for conversion from List to Pandas: ",
+ type.value_type()->ToString());
+ }
+ return ConvertListsLike<ArrayType>(options, data, out_values);
+ }
+
+ Status Visit(const MapType& type) { return ConvertMap(options, data, out_values); }
+
+ Status Visit(const StructType& type) {
+ return ConvertStruct(options, data, out_values);
+ }
+
+ template <typename Type>
+ enable_if_t<is_floating_type<Type>::value ||
+ std::is_same<DictionaryType, Type>::value ||
+ std::is_same<DurationType, Type>::value ||
+ std::is_same<ExtensionType, Type>::value ||
+ std::is_base_of<IntervalType, Type>::value ||
+ std::is_base_of<UnionType, Type>::value,
+ Status>
+ Visit(const Type& type) {
+ return Status::NotImplemented("No implemented conversion to object dtype: ",
+ type.ToString());
+ }
+};
+
+class ObjectWriter : public TypedPandasWriter<NPY_OBJECT> {
+ public:
+ using TypedPandasWriter<NPY_OBJECT>::TypedPandasWriter;
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
+ PyAcquireGIL lock;
+ ObjectWriterVisitor visitor{this->options_, *data,
+ this->GetBlockColumnStart(rel_placement)};
+ return VisitTypeInline(*data->type(), &visitor);
+ }
+};
+
+static inline bool IsNonNullContiguous(const ChunkedArray& data) {
+ return data.num_chunks() == 1 && data.null_count() == 0;
+}
+
+template <int NPY_TYPE>
+class IntWriter : public TypedPandasWriter<NPY_TYPE> {
+ public:
+ using ArrowType = typename npy_traits<NPY_TYPE>::TypeClass;
+ using TypedPandasWriter<NPY_TYPE>::TypedPandasWriter;
+
+ bool CanZeroCopy(const ChunkedArray& data) const override {
+ return IsNonNullContiguous(data);
+ }
+
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
+ RETURN_NOT_OK(this->CheckTypeExact(*data->type(), ArrowType::type_id));
+ ConvertIntegerNoNullsSameType<typename ArrowType::c_type>(
+ this->options_, *data, this->GetBlockColumnStart(rel_placement));
+ return Status::OK();
+ }
+};
+
+template <int NPY_TYPE>
+class FloatWriter : public TypedPandasWriter<NPY_TYPE> {
+ public:
+ using ArrowType = typename npy_traits<NPY_TYPE>::TypeClass;
+ using TypedPandasWriter<NPY_TYPE>::TypedPandasWriter;
+ using T = typename ArrowType::c_type;
+
+ bool CanZeroCopy(const ChunkedArray& data) const override {
+ return IsNonNullContiguous(data) && data.type()->id() == ArrowType::type_id;
+ }
+
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
+ Type::type in_type = data->type()->id();
+ auto out_values = this->GetBlockColumnStart(rel_placement);
+
+#define INTEGER_CASE(IN_TYPE) \
+ ConvertIntegerWithNulls<IN_TYPE, T>(this->options_, *data, out_values); \
+ break;
+
+ switch (in_type) {
+ case Type::UINT8:
+ INTEGER_CASE(uint8_t);
+ case Type::INT8:
+ INTEGER_CASE(int8_t);
+ case Type::UINT16:
+ INTEGER_CASE(uint16_t);
+ case Type::INT16:
+ INTEGER_CASE(int16_t);
+ case Type::UINT32:
+ INTEGER_CASE(uint32_t);
+ case Type::INT32:
+ INTEGER_CASE(int32_t);
+ case Type::UINT64:
+ INTEGER_CASE(uint64_t);
+ case Type::INT64:
+ INTEGER_CASE(int64_t);
+ case Type::HALF_FLOAT:
+ ConvertNumericNullableCast(*data, npy_traits<NPY_TYPE>::na_sentinel, out_values);
+ case Type::FLOAT:
+ ConvertNumericNullableCast(*data, npy_traits<NPY_TYPE>::na_sentinel, out_values);
+ break;
+ case Type::DOUBLE:
+ ConvertNumericNullableCast(*data, npy_traits<NPY_TYPE>::na_sentinel, out_values);
+ break;
+ default:
+ return Status::NotImplemented("Cannot write Arrow data of type ",
+ data->type()->ToString(),
+ " to a Pandas floating point block");
+ }
+
+#undef INTEGER_CASE
+
+ return Status::OK();
+ }
+};
+
+using UInt8Writer = IntWriter<NPY_UINT8>;
+using Int8Writer = IntWriter<NPY_INT8>;
+using UInt16Writer = IntWriter<NPY_UINT16>;
+using Int16Writer = IntWriter<NPY_INT16>;
+using UInt32Writer = IntWriter<NPY_UINT32>;
+using Int32Writer = IntWriter<NPY_INT32>;
+using UInt64Writer = IntWriter<NPY_UINT64>;
+using Int64Writer = IntWriter<NPY_INT64>;
+using Float16Writer = FloatWriter<NPY_FLOAT16>;
+using Float32Writer = FloatWriter<NPY_FLOAT32>;
+using Float64Writer = FloatWriter<NPY_FLOAT64>;
+
+class BoolWriter : public TypedPandasWriter<NPY_BOOL> {
+ public:
+ using TypedPandasWriter<NPY_BOOL>::TypedPandasWriter;
+
+ Status TransferSingle(std::shared_ptr<ChunkedArray> data, PyObject* py_ref) override {
+ RETURN_NOT_OK(
+ CheckNoZeroCopy("Zero copy conversions not possible with "
+ "boolean types"));
+ RETURN_NOT_OK(EnsureAllocated());
+ return CopyInto(data, /*rel_placement=*/0);
+ }
+
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
+ RETURN_NOT_OK(this->CheckTypeExact(*data->type(), Type::BOOL));
+ auto out_values = this->GetBlockColumnStart(rel_placement);
+ for (int c = 0; c < data->num_chunks(); c++) {
+ const auto& arr = checked_cast<const BooleanArray&>(*data->chunk(c));
+ for (int64_t i = 0; i < arr.length(); ++i) {
+ *out_values++ = static_cast<uint8_t>(arr.Value(i));
+ }
+ }
+ return Status::OK();
+ }
+};
+
+// ----------------------------------------------------------------------
+// Date / timestamp types
+
+template <typename T, int64_t SHIFT>
+inline void ConvertDatetimeLikeNanos(const ChunkedArray& data, int64_t* out_values) {
+ for (int c = 0; c < data.num_chunks(); c++) {
+ const auto& arr = *data.chunk(c);
+ const T* in_values = GetPrimitiveValues<T>(arr);
+
+ for (int64_t i = 0; i < arr.length(); ++i) {
+ *out_values++ = arr.IsNull(i) ? kPandasTimestampNull
+ : (static_cast<int64_t>(in_values[i]) * SHIFT);
+ }
+ }
+}
+
+template <typename T, int SHIFT>
+void ConvertDatesShift(const ChunkedArray& data, int64_t* out_values) {
+ for (int c = 0; c < data.num_chunks(); c++) {
+ const auto& arr = *data.chunk(c);
+ const T* in_values = GetPrimitiveValues<T>(arr);
+ for (int64_t i = 0; i < arr.length(); ++i) {
+ *out_values++ = arr.IsNull(i) ? kPandasTimestampNull
+ : static_cast<int64_t>(in_values[i]) / SHIFT;
+ }
+ }
+}
+
+class DatetimeDayWriter : public TypedPandasWriter<NPY_DATETIME> {
+ public:
+ using TypedPandasWriter<NPY_DATETIME>::TypedPandasWriter;
+
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
+ int64_t* out_values = this->GetBlockColumnStart(rel_placement);
+ const auto& type = checked_cast<const DateType&>(*data->type());
+ switch (type.unit()) {
+ case DateUnit::DAY:
+ ConvertDatesShift<int32_t, 1LL>(*data, out_values);
+ break;
+ case DateUnit::MILLI:
+ ConvertDatesShift<int64_t, 86400000LL>(*data, out_values);
+ break;
+ }
+ return Status::OK();
+ }
+
+ protected:
+ Status Allocate() override {
+ RETURN_NOT_OK(this->AllocateNDArray(NPY_DATETIME));
+ SetDatetimeUnit(NPY_FR_D);
+ return Status::OK();
+ }
+};
+
+template <TimeUnit::type UNIT>
+class DatetimeWriter : public TypedPandasWriter<NPY_DATETIME> {
+ public:
+ using TypedPandasWriter<NPY_DATETIME>::TypedPandasWriter;
+
+ bool CanZeroCopy(const ChunkedArray& data) const override {
+ if (data.type()->id() == Type::TIMESTAMP) {
+ const auto& type = checked_cast<const TimestampType&>(*data.type());
+ return IsNonNullContiguous(data) && type.unit() == UNIT;
+ } else {
+ return false;
+ }
+ }
+
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
+ const auto& ts_type = checked_cast<const TimestampType&>(*data->type());
+ DCHECK_EQ(UNIT, ts_type.unit()) << "Should only call instances of this writer "
+ << "with arrays of the correct unit";
+ ConvertNumericNullable<int64_t>(*data, kPandasTimestampNull,
+ this->GetBlockColumnStart(rel_placement));
+ return Status::OK();
+ }
+
+ protected:
+ Status Allocate() override {
+ RETURN_NOT_OK(this->AllocateNDArray(NPY_DATETIME));
+ SetDatetimeUnit(internal::NumPyFrequency(UNIT));
+ return Status::OK();
+ }
+};
+
+using DatetimeSecondWriter = DatetimeWriter<TimeUnit::SECOND>;
+using DatetimeMilliWriter = DatetimeWriter<TimeUnit::MILLI>;
+using DatetimeMicroWriter = DatetimeWriter<TimeUnit::MICRO>;
+
+class DatetimeNanoWriter : public DatetimeWriter<TimeUnit::NANO> {
+ public:
+ using DatetimeWriter<TimeUnit::NANO>::DatetimeWriter;
+
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
+ Type::type type = data->type()->id();
+ int64_t* out_values = this->GetBlockColumnStart(rel_placement);
+ compute::ExecContext ctx(options_.pool);
+ compute::CastOptions options;
+ if (options_.safe_cast) {
+ options = compute::CastOptions::Safe();
+ } else {
+ options = compute::CastOptions::Unsafe();
+ }
+ Datum out;
+ auto target_type = timestamp(TimeUnit::NANO);
+
+ if (type == Type::DATE32) {
+ // Convert from days since epoch to datetime64[ns]
+ ConvertDatetimeLikeNanos<int32_t, kNanosecondsInDay>(*data, out_values);
+ } else if (type == Type::DATE64) {
+ // Date64Type is millisecond timestamp stored as int64_t
+ // TODO(wesm): Do we want to make sure to zero out the milliseconds?
+ ConvertDatetimeLikeNanos<int64_t, 1000000L>(*data, out_values);
+ } else if (type == Type::TIMESTAMP) {
+ const auto& ts_type = checked_cast<const TimestampType&>(*data->type());
+
+ if (ts_type.unit() == TimeUnit::NANO) {
+ ConvertNumericNullable<int64_t>(*data, kPandasTimestampNull, out_values);
+ } else if (ts_type.unit() == TimeUnit::MICRO || ts_type.unit() == TimeUnit::MILLI ||
+ ts_type.unit() == TimeUnit::SECOND) {
+ ARROW_ASSIGN_OR_RAISE(out, compute::Cast(data, target_type, options, &ctx));
+ ConvertNumericNullable<int64_t>(*out.chunked_array(), kPandasTimestampNull,
+ out_values);
+ } else {
+ return Status::NotImplemented("Unsupported time unit");
+ }
+ } else {
+ return Status::NotImplemented("Cannot write Arrow data of type ",
+ data->type()->ToString(),
+ " to a Pandas datetime block.");
+ }
+ return Status::OK();
+ }
+};
+
+class DatetimeTZWriter : public DatetimeNanoWriter {
+ public:
+ DatetimeTZWriter(const PandasOptions& options, const std::string& timezone,
+ int64_t num_rows)
+ : DatetimeNanoWriter(options, num_rows, 1), timezone_(timezone) {}
+
+ protected:
+ Status GetResultBlock(PyObject** out) override {
+ RETURN_NOT_OK(MakeBlock1D());
+ *out = block_arr_.obj();
+ return Status::OK();
+ }
+
+ Status AddResultMetadata(PyObject* result) override {
+ PyObject* py_tz = PyUnicode_FromStringAndSize(
+ timezone_.c_str(), static_cast<Py_ssize_t>(timezone_.size()));
+ RETURN_IF_PYERROR();
+ PyDict_SetItemString(result, "timezone", py_tz);
+ Py_DECREF(py_tz);
+ return Status::OK();
+ }
+
+ private:
+ std::string timezone_;
+};
+
+template <TimeUnit::type UNIT>
+class TimedeltaWriter : public TypedPandasWriter<NPY_TIMEDELTA> {
+ public:
+ using TypedPandasWriter<NPY_TIMEDELTA>::TypedPandasWriter;
+
+ Status AllocateTimedelta(int ndim) {
+ RETURN_NOT_OK(this->AllocateNDArray(NPY_TIMEDELTA, ndim));
+ SetDatetimeUnit(internal::NumPyFrequency(UNIT));
+ return Status::OK();
+ }
+
+ bool CanZeroCopy(const ChunkedArray& data) const override {
+ const auto& type = checked_cast<const DurationType&>(*data.type());
+ return IsNonNullContiguous(data) && type.unit() == UNIT;
+ }
+
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
+ const auto& type = checked_cast<const DurationType&>(*data->type());
+ DCHECK_EQ(UNIT, type.unit()) << "Should only call instances of this writer "
+ << "with arrays of the correct unit";
+ ConvertNumericNullable<int64_t>(*data, kPandasTimestampNull,
+ this->GetBlockColumnStart(rel_placement));
+ return Status::OK();
+ }
+
+ protected:
+ Status Allocate() override { return AllocateTimedelta(2); }
+};
+
+using TimedeltaSecondWriter = TimedeltaWriter<TimeUnit::SECOND>;
+using TimedeltaMilliWriter = TimedeltaWriter<TimeUnit::MILLI>;
+using TimedeltaMicroWriter = TimedeltaWriter<TimeUnit::MICRO>;
+
+class TimedeltaNanoWriter : public TimedeltaWriter<TimeUnit::NANO> {
+ public:
+ using TimedeltaWriter<TimeUnit::NANO>::TimedeltaWriter;
+
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
+ Type::type type = data->type()->id();
+ int64_t* out_values = this->GetBlockColumnStart(rel_placement);
+ if (type == Type::DURATION) {
+ const auto& ts_type = checked_cast<const DurationType&>(*data->type());
+ if (ts_type.unit() == TimeUnit::NANO) {
+ ConvertNumericNullable<int64_t>(*data, kPandasTimestampNull, out_values);
+ } else if (ts_type.unit() == TimeUnit::MICRO) {
+ ConvertDatetimeLikeNanos<int64_t, 1000L>(*data, out_values);
+ } else if (ts_type.unit() == TimeUnit::MILLI) {
+ ConvertDatetimeLikeNanos<int64_t, 1000000L>(*data, out_values);
+ } else if (ts_type.unit() == TimeUnit::SECOND) {
+ ConvertDatetimeLikeNanos<int64_t, 1000000000L>(*data, out_values);
+ } else {
+ return Status::NotImplemented("Unsupported time unit");
+ }
+ } else {
+ return Status::NotImplemented("Cannot write Arrow data of type ",
+ data->type()->ToString(),
+ " to a Pandas timedelta block.");
+ }
+ return Status::OK();
+ }
+};
+
+Status MakeZeroLengthArray(const std::shared_ptr<DataType>& type,
+ std::shared_ptr<Array>* out) {
+ std::unique_ptr<ArrayBuilder> builder;
+ RETURN_NOT_OK(MakeBuilder(default_memory_pool(), type, &builder));
+ RETURN_NOT_OK(builder->Resize(0));
+ return builder->Finish(out);
+}
+
+bool NeedDictionaryUnification(const ChunkedArray& data) {
+ if (data.num_chunks() < 2) {
+ return false;
+ }
+ const auto& arr_first = checked_cast<const DictionaryArray&>(*data.chunk(0));
+ for (int c = 1; c < data.num_chunks(); c++) {
+ const auto& arr = checked_cast<const DictionaryArray&>(*data.chunk(c));
+ if (!(arr_first.dictionary()->Equals(arr.dictionary()))) {
+ return true;
+ }
+ }
+ return false;
+}
+
+template <typename IndexType>
+class CategoricalWriter
+ : public TypedPandasWriter<arrow_traits<IndexType::type_id>::npy_type> {
+ public:
+ using TRAITS = arrow_traits<IndexType::type_id>;
+ using ArrayType = typename TypeTraits<IndexType>::ArrayType;
+ using T = typename TRAITS::T;
+
+ explicit CategoricalWriter(const PandasOptions& options, int64_t num_rows)
+ : TypedPandasWriter<TRAITS::npy_type>(options, num_rows, 1),
+ ordered_(false),
+ needs_copy_(false) {}
+
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
+ return Status::NotImplemented("categorical type");
+ }
+
+ Status TransferSingle(std::shared_ptr<ChunkedArray> data, PyObject* py_ref) override {
+ const auto& dict_type = checked_cast<const DictionaryType&>(*data->type());
+ std::shared_ptr<Array> dict;
+ if (data->num_chunks() == 0) {
+ // no dictionary values => create empty array
+ RETURN_NOT_OK(this->AllocateNDArray(TRAITS::npy_type, 1));
+ RETURN_NOT_OK(MakeZeroLengthArray(dict_type.value_type(), &dict));
+ } else {
+ DCHECK_EQ(IndexType::type_id, dict_type.index_type()->id());
+ RETURN_NOT_OK(WriteIndices(*data, &dict));
+ }
+
+ PyObject* pydict;
+ RETURN_NOT_OK(ConvertArrayToPandas(this->options_, dict, nullptr, &pydict));
+ dictionary_.reset(pydict);
+ ordered_ = dict_type.ordered();
+ return Status::OK();
+ }
+
+ Status Write(std::shared_ptr<ChunkedArray> data, int64_t abs_placement,
+ int64_t rel_placement) override {
+ RETURN_NOT_OK(this->EnsurePlacementAllocated());
+ RETURN_NOT_OK(TransferSingle(data, /*py_ref=*/nullptr));
+ this->placement_data_[rel_placement] = abs_placement;
+ return Status::OK();
+ }
+
+ Status GetSeriesResult(PyObject** out) override {
+ PyAcquireGIL lock;
+
+ PyObject* result = PyDict_New();
+ RETURN_IF_PYERROR();
+
+ // Expected single array dictionary layout
+ PyDict_SetItemString(result, "indices", this->block_arr_.obj());
+ RETURN_IF_PYERROR();
+ RETURN_NOT_OK(AddResultMetadata(result));
+
+ *out = result;
+ return Status::OK();
+ }
+
+ protected:
+ Status AddResultMetadata(PyObject* result) override {
+ PyDict_SetItemString(result, "dictionary", dictionary_.obj());
+ PyObject* py_ordered = ordered_ ? Py_True : Py_False;
+ Py_INCREF(py_ordered);
+ PyDict_SetItemString(result, "ordered", py_ordered);
+ return Status::OK();
+ }
+
+ Status WriteIndicesUniform(const ChunkedArray& data) {
+ RETURN_NOT_OK(this->AllocateNDArray(TRAITS::npy_type, 1));
+ T* out_values = reinterpret_cast<T*>(this->block_data_);
+
+ for (int c = 0; c < data.num_chunks(); c++) {
+ const auto& arr = checked_cast<const DictionaryArray&>(*data.chunk(c));
+ const auto& indices = checked_cast<const ArrayType&>(*arr.indices());
+ auto values = reinterpret_cast<const T*>(indices.raw_values());
+
+ RETURN_NOT_OK(CheckIndexBounds(*indices.data(), arr.dictionary()->length()));
+ // Null is -1 in CategoricalBlock
+ for (int i = 0; i < arr.length(); ++i) {
+ if (indices.IsValid(i)) {
+ *out_values++ = values[i];
+ } else {
+ *out_values++ = -1;
+ }
+ }
+ }
+ return Status::OK();
+ }
+
+ Status WriteIndicesVarying(const ChunkedArray& data, std::shared_ptr<Array>* out_dict) {
+ // Yield int32 indices to allow for dictionary outgrowing the current index
+ // type
+ RETURN_NOT_OK(this->AllocateNDArray(NPY_INT32, 1));
+ auto out_values = reinterpret_cast<int32_t*>(this->block_data_);
+
+ const auto& dict_type = checked_cast<const DictionaryType&>(*data.type());
+
+ ARROW_ASSIGN_OR_RAISE(auto unifier, DictionaryUnifier::Make(dict_type.value_type(),
+ this->options_.pool));
+ for (int c = 0; c < data.num_chunks(); c++) {
+ const auto& arr = checked_cast<const DictionaryArray&>(*data.chunk(c));
+ const auto& indices = checked_cast<const ArrayType&>(*arr.indices());
+ auto values = reinterpret_cast<const T*>(indices.raw_values());
+
+ std::shared_ptr<Buffer> transpose_buffer;
+ RETURN_NOT_OK(unifier->Unify(*arr.dictionary(), &transpose_buffer));
+
+ auto transpose = reinterpret_cast<const int32_t*>(transpose_buffer->data());
+ int64_t dict_length = arr.dictionary()->length();
+
+ RETURN_NOT_OK(CheckIndexBounds(*indices.data(), dict_length));
+
+ // Null is -1 in CategoricalBlock
+ for (int i = 0; i < arr.length(); ++i) {
+ if (indices.IsValid(i)) {
+ *out_values++ = transpose[values[i]];
+ } else {
+ *out_values++ = -1;
+ }
+ }
+ }
+
+ std::shared_ptr<DataType> unused_type;
+ return unifier->GetResult(&unused_type, out_dict);
+ }
+
+ Status WriteIndices(const ChunkedArray& data, std::shared_ptr<Array>* out_dict) {
+ DCHECK_GT(data.num_chunks(), 0);
+
+ // Sniff the first chunk
+ const auto& arr_first = checked_cast<const DictionaryArray&>(*data.chunk(0));
+ const auto indices_first = std::static_pointer_cast<ArrayType>(arr_first.indices());
+
+ if (data.num_chunks() == 1 && indices_first->null_count() == 0) {
+ RETURN_NOT_OK(
+ CheckIndexBounds(*indices_first->data(), arr_first.dictionary()->length()));
+
+ PyObject* wrapped;
+ npy_intp dims[1] = {static_cast<npy_intp>(this->num_rows_)};
+ RETURN_NOT_OK(MakeNumPyView(indices_first, /*py_ref=*/nullptr, TRAITS::npy_type,
+ /*ndim=*/1, dims, &wrapped));
+ this->SetBlockData(wrapped);
+ *out_dict = arr_first.dictionary();
+ } else {
+ RETURN_NOT_OK(this->CheckNotZeroCopyOnly(data));
+ if (NeedDictionaryUnification(data)) {
+ RETURN_NOT_OK(WriteIndicesVarying(data, out_dict));
+ } else {
+ RETURN_NOT_OK(WriteIndicesUniform(data));
+ *out_dict = arr_first.dictionary();
+ }
+ }
+ return Status::OK();
+ }
+
+ OwnedRefNoGIL dictionary_;
+ bool ordered_;
+ bool needs_copy_;
+};
+
+class ExtensionWriter : public PandasWriter {
+ public:
+ using PandasWriter::PandasWriter;
+
+ Status Allocate() override {
+ // no-op
+ return Status::OK();
+ }
+
+ Status TransferSingle(std::shared_ptr<ChunkedArray> data, PyObject* py_ref) override {
+ PyAcquireGIL lock;
+ PyObject* py_array;
+ py_array = wrap_chunked_array(data);
+ py_array_.reset(py_array);
+
+ return Status::OK();
+ }
+
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
+ return TransferSingle(data, nullptr);
+ }
+
+ Status GetDataFrameResult(PyObject** out) override {
+ PyAcquireGIL lock;
+ PyObject* result = PyDict_New();
+ RETURN_IF_PYERROR();
+
+ PyDict_SetItemString(result, "py_array", py_array_.obj());
+ PyDict_SetItemString(result, "placement", placement_arr_.obj());
+ *out = result;
+ return Status::OK();
+ }
+
+ Status GetSeriesResult(PyObject** out) override {
+ *out = py_array_.detach();
+ return Status::OK();
+ }
+
+ protected:
+ OwnedRefNoGIL py_array_;
+};
+
+Status MakeWriter(const PandasOptions& options, PandasWriter::type writer_type,
+ const DataType& type, int64_t num_rows, int num_columns,
+ std::shared_ptr<PandasWriter>* writer) {
+#define BLOCK_CASE(NAME, TYPE) \
+ case PandasWriter::NAME: \
+ *writer = std::make_shared<TYPE>(options, num_rows, num_columns); \
+ break;
+
+#define CATEGORICAL_CASE(TYPE) \
+ case TYPE::type_id: \
+ *writer = std::make_shared<CategoricalWriter<TYPE>>(options, num_rows); \
+ break;
+
+ switch (writer_type) {
+ case PandasWriter::CATEGORICAL: {
+ const auto& index_type = *checked_cast<const DictionaryType&>(type).index_type();
+ switch (index_type.id()) {
+ CATEGORICAL_CASE(Int8Type);
+ CATEGORICAL_CASE(Int16Type);
+ CATEGORICAL_CASE(Int32Type);
+ CATEGORICAL_CASE(Int64Type);
+ case Type::UINT8:
+ case Type::UINT16:
+ case Type::UINT32:
+ case Type::UINT64:
+ return Status::TypeError(
+ "Converting unsigned dictionary indices to pandas",
+ " not yet supported, index type: ", index_type.ToString());
+ default:
+ // Unreachable
+ DCHECK(false);
+ break;
+ }
+ } break;
+ case PandasWriter::EXTENSION:
+ *writer = std::make_shared<ExtensionWriter>(options, num_rows, num_columns);
+ break;
+ BLOCK_CASE(OBJECT, ObjectWriter);
+ BLOCK_CASE(UINT8, UInt8Writer);
+ BLOCK_CASE(INT8, Int8Writer);
+ BLOCK_CASE(UINT16, UInt16Writer);
+ BLOCK_CASE(INT16, Int16Writer);
+ BLOCK_CASE(UINT32, UInt32Writer);
+ BLOCK_CASE(INT32, Int32Writer);
+ BLOCK_CASE(UINT64, UInt64Writer);
+ BLOCK_CASE(INT64, Int64Writer);
+ BLOCK_CASE(HALF_FLOAT, Float16Writer);
+ BLOCK_CASE(FLOAT, Float32Writer);
+ BLOCK_CASE(DOUBLE, Float64Writer);
+ BLOCK_CASE(BOOL, BoolWriter);
+ BLOCK_CASE(DATETIME_DAY, DatetimeDayWriter);
+ BLOCK_CASE(DATETIME_SECOND, DatetimeSecondWriter);
+ BLOCK_CASE(DATETIME_MILLI, DatetimeMilliWriter);
+ BLOCK_CASE(DATETIME_MICRO, DatetimeMicroWriter);
+ BLOCK_CASE(DATETIME_NANO, DatetimeNanoWriter);
+ BLOCK_CASE(TIMEDELTA_SECOND, TimedeltaSecondWriter);
+ BLOCK_CASE(TIMEDELTA_MILLI, TimedeltaMilliWriter);
+ BLOCK_CASE(TIMEDELTA_MICRO, TimedeltaMicroWriter);
+ BLOCK_CASE(TIMEDELTA_NANO, TimedeltaNanoWriter);
+ case PandasWriter::DATETIME_NANO_TZ: {
+ const auto& ts_type = checked_cast<const TimestampType&>(type);
+ *writer = std::make_shared<DatetimeTZWriter>(options, ts_type.timezone(), num_rows);
+ } break;
+ default:
+ return Status::NotImplemented("Unsupported block type");
+ }
+
+#undef BLOCK_CASE
+#undef CATEGORICAL_CASE
+
+ return Status::OK();
+}
+
+static Status GetPandasWriterType(const ChunkedArray& data, const PandasOptions& options,
+ PandasWriter::type* output_type) {
+#define INTEGER_CASE(NAME) \
+ *output_type = \
+ data.null_count() > 0 \
+ ? options.integer_object_nulls ? PandasWriter::OBJECT : PandasWriter::DOUBLE \
+ : PandasWriter::NAME; \
+ break;
+
+ switch (data.type()->id()) {
+ case Type::BOOL:
+ *output_type = data.null_count() > 0 ? PandasWriter::OBJECT : PandasWriter::BOOL;
+ break;
+ case Type::UINT8:
+ INTEGER_CASE(UINT8);
+ case Type::INT8:
+ INTEGER_CASE(INT8);
+ case Type::UINT16:
+ INTEGER_CASE(UINT16);
+ case Type::INT16:
+ INTEGER_CASE(INT16);
+ case Type::UINT32:
+ INTEGER_CASE(UINT32);
+ case Type::INT32:
+ INTEGER_CASE(INT32);
+ case Type::UINT64:
+ INTEGER_CASE(UINT64);
+ case Type::INT64:
+ INTEGER_CASE(INT64);
+ case Type::HALF_FLOAT:
+ *output_type = PandasWriter::HALF_FLOAT;
+ break;
+ case Type::FLOAT:
+ *output_type = PandasWriter::FLOAT;
+ break;
+ case Type::DOUBLE:
+ *output_type = PandasWriter::DOUBLE;
+ break;
+ case Type::STRING: // fall through
+ case Type::LARGE_STRING: // fall through
+ case Type::BINARY: // fall through
+ case Type::LARGE_BINARY:
+ case Type::NA: // fall through
+ case Type::FIXED_SIZE_BINARY: // fall through
+ case Type::STRUCT: // fall through
+ case Type::TIME32: // fall through
+ case Type::TIME64: // fall through
+ case Type::DECIMAL128: // fall through
+ case Type::DECIMAL256: // fall through
+ *output_type = PandasWriter::OBJECT;
+ break;
+ case Type::DATE32: // fall through
+ case Type::DATE64:
+ if (options.date_as_object) {
+ *output_type = PandasWriter::OBJECT;
+ } else {
+ *output_type = options.coerce_temporal_nanoseconds ? PandasWriter::DATETIME_NANO
+ : PandasWriter::DATETIME_DAY;
+ }
+ break;
+ case Type::TIMESTAMP: {
+ const auto& ts_type = checked_cast<const TimestampType&>(*data.type());
+ if (options.timestamp_as_object && ts_type.unit() != TimeUnit::NANO) {
+ // Nanoseconds are never out of bounds for pandas, so in that case
+ // we don't convert to object
+ *output_type = PandasWriter::OBJECT;
+ } else if (!ts_type.timezone().empty()) {
+ *output_type = PandasWriter::DATETIME_NANO_TZ;
+ } else if (options.coerce_temporal_nanoseconds) {
+ *output_type = PandasWriter::DATETIME_NANO;
+ } else {
+ switch (ts_type.unit()) {
+ case TimeUnit::SECOND:
+ *output_type = PandasWriter::DATETIME_SECOND;
+ break;
+ case TimeUnit::MILLI:
+ *output_type = PandasWriter::DATETIME_MILLI;
+ break;
+ case TimeUnit::MICRO:
+ *output_type = PandasWriter::DATETIME_MICRO;
+ break;
+ case TimeUnit::NANO:
+ *output_type = PandasWriter::DATETIME_NANO;
+ break;
+ }
+ }
+ } break;
+ case Type::DURATION: {
+ const auto& dur_type = checked_cast<const DurationType&>(*data.type());
+ if (options.coerce_temporal_nanoseconds) {
+ *output_type = PandasWriter::TIMEDELTA_NANO;
+ } else {
+ switch (dur_type.unit()) {
+ case TimeUnit::SECOND:
+ *output_type = PandasWriter::TIMEDELTA_SECOND;
+ break;
+ case TimeUnit::MILLI:
+ *output_type = PandasWriter::TIMEDELTA_MILLI;
+ break;
+ case TimeUnit::MICRO:
+ *output_type = PandasWriter::TIMEDELTA_MICRO;
+ break;
+ case TimeUnit::NANO:
+ *output_type = PandasWriter::TIMEDELTA_NANO;
+ break;
+ }
+ }
+ } break;
+ case Type::FIXED_SIZE_LIST:
+ case Type::LIST:
+ case Type::LARGE_LIST:
+ case Type::MAP: {
+ auto list_type = std::static_pointer_cast<BaseListType>(data.type());
+ if (!ListTypeSupported(*list_type->value_type())) {
+ return Status::NotImplemented("Not implemented type for Arrow list to pandas: ",
+ list_type->value_type()->ToString());
+ }
+ *output_type = PandasWriter::OBJECT;
+ } break;
+ case Type::DICTIONARY:
+ *output_type = PandasWriter::CATEGORICAL;
+ break;
+ case Type::EXTENSION:
+ *output_type = PandasWriter::EXTENSION;
+ break;
+ default:
+ return Status::NotImplemented(
+ "No known equivalent Pandas block for Arrow data of type ",
+ data.type()->ToString(), " is known.");
+ }
+ return Status::OK();
+}
+
+// Construct the exact pandas "BlockManager" memory layout
+//
+// * For each column determine the correct output pandas type
+// * Allocate 2D blocks (ncols x nrows) for each distinct data type in output
+// * Allocate block placement arrays
+// * Write Arrow columns out into each slice of memory; populate block
+// * placement arrays as we go
+class PandasBlockCreator {
+ public:
+ using WriterMap = std::unordered_map<int, std::shared_ptr<PandasWriter>>;
+
+ explicit PandasBlockCreator(const PandasOptions& options, FieldVector fields,
+ ChunkedArrayVector arrays)
+ : options_(options), fields_(std::move(fields)), arrays_(std::move(arrays)) {
+ num_columns_ = static_cast<int>(arrays_.size());
+ if (num_columns_ > 0) {
+ num_rows_ = arrays_[0]->length();
+ }
+ column_block_placement_.resize(num_columns_);
+ }
+
+ virtual Status Convert(PyObject** out) = 0;
+
+ Status AppendBlocks(const WriterMap& blocks, PyObject* list) {
+ for (const auto& it : blocks) {
+ PyObject* item;
+ RETURN_NOT_OK(it.second->GetDataFrameResult(&item));
+ if (PyList_Append(list, item) < 0) {
+ RETURN_IF_PYERROR();
+ }
+
+ // ARROW-1017; PyList_Append increments object refcount
+ Py_DECREF(item);
+ }
+ return Status::OK();
+ }
+
+ protected:
+ PandasOptions options_;
+
+ FieldVector fields_;
+ ChunkedArrayVector arrays_;
+ int num_columns_;
+ int64_t num_rows_;
+
+ // column num -> relative placement within internal block
+ std::vector<int> column_block_placement_;
+};
+
+class ConsolidatedBlockCreator : public PandasBlockCreator {
+ public:
+ using PandasBlockCreator::PandasBlockCreator;
+
+ Status Convert(PyObject** out) override {
+ column_types_.resize(num_columns_);
+ RETURN_NOT_OK(CreateBlocks());
+ RETURN_NOT_OK(WriteTableToBlocks());
+ PyAcquireGIL lock;
+
+ PyObject* result = PyList_New(0);
+ RETURN_IF_PYERROR();
+
+ RETURN_NOT_OK(AppendBlocks(blocks_, result));
+ RETURN_NOT_OK(AppendBlocks(singleton_blocks_, result));
+
+ *out = result;
+ return Status::OK();
+ }
+
+ Status GetBlockType(int column_index, PandasWriter::type* out) {
+ if (options_.extension_columns.count(fields_[column_index]->name())) {
+ *out = PandasWriter::EXTENSION;
+ return Status::OK();
+ } else {
+ return GetPandasWriterType(*arrays_[column_index], options_, out);
+ }
+ }
+
+ Status CreateBlocks() {
+ for (int i = 0; i < num_columns_; ++i) {
+ const DataType& type = *arrays_[i]->type();
+ PandasWriter::type output_type;
+ RETURN_NOT_OK(GetBlockType(i, &output_type));
+
+ int block_placement = 0;
+ std::shared_ptr<PandasWriter> writer;
+ if (output_type == PandasWriter::CATEGORICAL ||
+ output_type == PandasWriter::DATETIME_NANO_TZ ||
+ output_type == PandasWriter::EXTENSION) {
+ RETURN_NOT_OK(MakeWriter(options_, output_type, type, num_rows_,
+ /*num_columns=*/1, &writer));
+ singleton_blocks_[i] = writer;
+ } else {
+ auto it = block_sizes_.find(output_type);
+ if (it != block_sizes_.end()) {
+ block_placement = it->second;
+ // Increment count
+ ++it->second;
+ } else {
+ // Add key to map
+ block_sizes_[output_type] = 1;
+ }
+ }
+ column_types_[i] = output_type;
+ column_block_placement_[i] = block_placement;
+ }
+
+ // Create normal non-categorical blocks
+ for (const auto& it : this->block_sizes_) {
+ PandasWriter::type output_type = static_cast<PandasWriter::type>(it.first);
+ std::shared_ptr<PandasWriter> block;
+ RETURN_NOT_OK(MakeWriter(this->options_, output_type, /*unused*/ *null(), num_rows_,
+ it.second, &block));
+ this->blocks_[output_type] = block;
+ }
+ return Status::OK();
+ }
+
+ Status GetWriter(int i, std::shared_ptr<PandasWriter>* block) {
+ PandasWriter::type output_type = this->column_types_[i];
+ switch (output_type) {
+ case PandasWriter::CATEGORICAL:
+ case PandasWriter::DATETIME_NANO_TZ:
+ case PandasWriter::EXTENSION: {
+ auto it = this->singleton_blocks_.find(i);
+ if (it == this->singleton_blocks_.end()) {
+ return Status::KeyError("No block allocated");
+ }
+ *block = it->second;
+ } break;
+ default:
+ auto it = this->blocks_.find(output_type);
+ if (it == this->blocks_.end()) {
+ return Status::KeyError("No block allocated");
+ }
+ *block = it->second;
+ break;
+ }
+ return Status::OK();
+ }
+
+ Status WriteTableToBlocks() {
+ auto WriteColumn = [this](int i) {
+ std::shared_ptr<PandasWriter> block;
+ RETURN_NOT_OK(this->GetWriter(i, &block));
+ // ARROW-3789 Use std::move on the array to permit self-destructing
+ return block->Write(std::move(arrays_[i]), i, this->column_block_placement_[i]);
+ };
+
+ return OptionalParallelFor(options_.use_threads, num_columns_, WriteColumn);
+ }
+
+ private:
+ // column num -> block type id
+ std::vector<PandasWriter::type> column_types_;
+
+ // block type -> type count
+ std::unordered_map<int, int> block_sizes_;
+ std::unordered_map<int, const DataType*> block_types_;
+
+ // block type -> block
+ WriterMap blocks_;
+
+ WriterMap singleton_blocks_;
+};
+
+/// \brief Create blocks for pandas.DataFrame block manager using one block per
+/// column strategy. This permits some zero-copy optimizations as well as the
+/// ability for the table to "self-destruct" if selected by the user.
+class SplitBlockCreator : public PandasBlockCreator {
+ public:
+ using PandasBlockCreator::PandasBlockCreator;
+
+ Status GetWriter(int i, std::shared_ptr<PandasWriter>* writer) {
+ PandasWriter::type output_type = PandasWriter::OBJECT;
+ const DataType& type = *arrays_[i]->type();
+ if (options_.extension_columns.count(fields_[i]->name())) {
+ output_type = PandasWriter::EXTENSION;
+ } else {
+ // Null count needed to determine output type
+ RETURN_NOT_OK(GetPandasWriterType(*arrays_[i], options_, &output_type));
+ }
+ return MakeWriter(this->options_, output_type, type, num_rows_, 1, writer);
+ }
+
+ Status Convert(PyObject** out) override {
+ PyAcquireGIL lock;
+
+ PyObject* result = PyList_New(0);
+ RETURN_IF_PYERROR();
+
+ for (int i = 0; i < num_columns_; ++i) {
+ std::shared_ptr<PandasWriter> writer;
+ RETURN_NOT_OK(GetWriter(i, &writer));
+ // ARROW-3789 Use std::move on the array to permit self-destructing
+ RETURN_NOT_OK(writer->Write(std::move(arrays_[i]), i, /*rel_placement=*/0));
+
+ PyObject* item;
+ RETURN_NOT_OK(writer->GetDataFrameResult(&item));
+ if (PyList_Append(result, item) < 0) {
+ RETURN_IF_PYERROR();
+ }
+ // PyList_Append increments object refcount
+ Py_DECREF(item);
+ }
+
+ *out = result;
+ return Status::OK();
+ }
+
+ private:
+ std::vector<std::shared_ptr<PandasWriter>> writers_;
+};
+
+Status ConvertCategoricals(const PandasOptions& options, ChunkedArrayVector* arrays,
+ FieldVector* fields) {
+ std::vector<int> columns_to_encode;
+
+ // For Categorical conversions
+ auto EncodeColumn = [&](int j) {
+ int i = columns_to_encode[j];
+ if (options.zero_copy_only) {
+ return Status::Invalid("Need to dictionary encode a column, but ",
+ "only zero-copy conversions allowed");
+ }
+ compute::ExecContext ctx(options.pool);
+ ARROW_ASSIGN_OR_RAISE(
+ Datum out, DictionaryEncode((*arrays)[i],
+ compute::DictionaryEncodeOptions::Defaults(), &ctx));
+ (*arrays)[i] = out.chunked_array();
+ (*fields)[i] = (*fields)[i]->WithType((*arrays)[i]->type());
+ return Status::OK();
+ };
+
+ if (!options.categorical_columns.empty()) {
+ for (int i = 0; i < static_cast<int>(arrays->size()); i++) {
+ if ((*arrays)[i]->type()->id() != Type::DICTIONARY &&
+ options.categorical_columns.count((*fields)[i]->name())) {
+ columns_to_encode.push_back(i);
+ }
+ }
+ }
+ if (options.strings_to_categorical) {
+ for (int i = 0; i < static_cast<int>(arrays->size()); i++) {
+ if (is_base_binary_like((*arrays)[i]->type()->id())) {
+ columns_to_encode.push_back(i);
+ }
+ }
+ }
+ return OptionalParallelFor(options.use_threads,
+ static_cast<int>(columns_to_encode.size()), EncodeColumn);
+}
+
+} // namespace
+
+Status ConvertArrayToPandas(const PandasOptions& options, std::shared_ptr<Array> arr,
+ PyObject* py_ref, PyObject** out) {
+ return ConvertChunkedArrayToPandas(
+ options, std::make_shared<ChunkedArray>(std::move(arr)), py_ref, out);
+}
+
+Status ConvertChunkedArrayToPandas(const PandasOptions& options,
+ std::shared_ptr<ChunkedArray> arr, PyObject* py_ref,
+ PyObject** out) {
+ if (options.decode_dictionaries && arr->type()->id() == Type::DICTIONARY) {
+ const auto& dense_type =
+ checked_cast<const DictionaryType&>(*arr->type()).value_type();
+ RETURN_NOT_OK(DecodeDictionaries(options.pool, dense_type, &arr));
+ DCHECK_NE(arr->type()->id(), Type::DICTIONARY);
+
+ // The original Python DictionaryArray won't own the memory anymore
+ // as we actually built a new array when we decoded the DictionaryArray
+ // thus let the final resulting numpy array own the memory through a Capsule
+ py_ref = nullptr;
+ }
+
+ if (options.strings_to_categorical && is_base_binary_like(arr->type()->id())) {
+ if (options.zero_copy_only) {
+ return Status::Invalid("Need to dictionary encode a column, but ",
+ "only zero-copy conversions allowed");
+ }
+ compute::ExecContext ctx(options.pool);
+ ARROW_ASSIGN_OR_RAISE(
+ Datum out,
+ DictionaryEncode(arr, compute::DictionaryEncodeOptions::Defaults(), &ctx));
+ arr = out.chunked_array();
+ }
+
+ PandasOptions modified_options = options;
+ modified_options.strings_to_categorical = false;
+
+ // ARROW-7596: We permit the hybrid Series/DataFrame code path to do zero copy
+ // optimizations that we do not allow in the default case when converting
+ // Table->DataFrame
+ modified_options.allow_zero_copy_blocks = true;
+
+ PandasWriter::type output_type;
+ RETURN_NOT_OK(GetPandasWriterType(*arr, modified_options, &output_type));
+ if (options.decode_dictionaries) {
+ DCHECK_NE(output_type, PandasWriter::CATEGORICAL);
+ }
+
+ std::shared_ptr<PandasWriter> writer;
+ RETURN_NOT_OK(MakeWriter(modified_options, output_type, *arr->type(), arr->length(),
+ /*num_columns=*/1, &writer));
+ RETURN_NOT_OK(writer->TransferSingle(std::move(arr), py_ref));
+ return writer->GetSeriesResult(out);
+}
+
+Status ConvertTableToPandas(const PandasOptions& options, std::shared_ptr<Table> table,
+ PyObject** out) {
+ ChunkedArrayVector arrays = table->columns();
+ FieldVector fields = table->fields();
+
+ // ARROW-3789: allow "self-destructing" by releasing references to columns as
+ // we convert them to pandas
+ table = nullptr;
+
+ RETURN_NOT_OK(ConvertCategoricals(options, &arrays, &fields));
+
+ PandasOptions modified_options = options;
+ modified_options.strings_to_categorical = false;
+ modified_options.categorical_columns.clear();
+
+ if (options.split_blocks) {
+ modified_options.allow_zero_copy_blocks = true;
+ SplitBlockCreator helper(modified_options, std::move(fields), std::move(arrays));
+ return helper.Convert(out);
+ } else {
+ ConsolidatedBlockCreator helper(modified_options, std::move(fields),
+ std::move(arrays));
+ return helper.Convert(out);
+ }
+}
+
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/arrow_to_pandas.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/arrow_to_pandas.h
new file mode 100644
index 0000000000..6570364b8d
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/arrow_to_pandas.h
@@ -0,0 +1,124 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Functions for converting between pandas's NumPy-based data representation
+// and Arrow data structures
+
+#pragma once
+
+#include "arrow/python/platform.h"
+
+#include <memory>
+#include <string>
+#include <unordered_set>
+
+#include "arrow/memory_pool.h"
+#include "arrow/python/visibility.h"
+
+namespace arrow {
+
+class Array;
+class ChunkedArray;
+class Column;
+class DataType;
+class MemoryPool;
+class Status;
+class Table;
+
+namespace py {
+
+struct PandasOptions {
+ /// arrow::MemoryPool to use for memory allocations
+ MemoryPool* pool = default_memory_pool();
+
+ /// If true, we will convert all string columns to categoricals
+ bool strings_to_categorical = false;
+ bool zero_copy_only = false;
+ bool integer_object_nulls = false;
+ bool date_as_object = false;
+ bool timestamp_as_object = false;
+ bool use_threads = false;
+
+ /// Coerce all date and timestamp to datetime64[ns]
+ bool coerce_temporal_nanoseconds = false;
+
+ /// Used to maintain backwards compatibility for
+ /// timezone bugs (see ARROW-9528). Should be removed
+ /// after Arrow 2.0 release.
+ bool ignore_timezone = false;
+
+ /// \brief If true, do not create duplicate PyObject versions of equal
+ /// objects. This only applies to immutable objects like strings or datetime
+ /// objects
+ bool deduplicate_objects = false;
+
+ /// \brief For certain data types, a cast is needed in order to store the
+ /// data in a pandas DataFrame or Series (e.g. timestamps are always stored
+ /// as nanoseconds in pandas). This option controls whether it is a safe
+ /// cast or not.
+ bool safe_cast = true;
+
+ /// \brief If true, create one block per column rather than consolidated
+ /// blocks (1 per data type). Do zero-copy wrapping when there are no
+ /// nulls. pandas currently will consolidate the blocks on its own, causing
+ /// increased memory use, so keep this in mind if you are working on a
+ /// memory-constrained situation.
+ bool split_blocks = false;
+
+ /// \brief If true, allow non-writable zero-copy views to be created for
+ /// single column blocks. This option is also used to provide zero copy for
+ /// Series data
+ bool allow_zero_copy_blocks = false;
+
+ /// \brief If true, attempt to deallocate buffers in passed Arrow object if
+ /// it is the only remaining shared_ptr copy of it. See ARROW-3789 for
+ /// original context for this feature. Only currently implemented for Table
+ /// conversions
+ bool self_destruct = false;
+
+ // Used internally for nested arrays.
+ bool decode_dictionaries = false;
+
+ // Columns that should be casted to categorical
+ std::unordered_set<std::string> categorical_columns;
+
+ // Columns that should be passed through to be converted to
+ // ExtensionArray/Block
+ std::unordered_set<std::string> extension_columns;
+};
+
+ARROW_PYTHON_EXPORT
+Status ConvertArrayToPandas(const PandasOptions& options, std::shared_ptr<Array> arr,
+ PyObject* py_ref, PyObject** out);
+
+ARROW_PYTHON_EXPORT
+Status ConvertChunkedArrayToPandas(const PandasOptions& options,
+ std::shared_ptr<ChunkedArray> col, PyObject* py_ref,
+ PyObject** out);
+
+// Convert a whole table as efficiently as possible to a pandas.DataFrame.
+//
+// The returned Python object is a list of tuples consisting of the exact 2D
+// BlockManager structure of the pandas.DataFrame used as of pandas 0.19.x.
+//
+// tuple item: (indices: ndarray[int32], block: ndarray[TYPE, ndim=2])
+ARROW_PYTHON_EXPORT
+Status ConvertTableToPandas(const PandasOptions& options, std::shared_ptr<Table> table,
+ PyObject** out);
+
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/benchmark.cc b/contrib/libs/apache/arrow/cpp/src/arrow/python/benchmark.cc
new file mode 100644
index 0000000000..2d29f69d25
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/benchmark.cc
@@ -0,0 +1,38 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <arrow/python/benchmark.h>
+#include <arrow/python/helpers.h>
+
+namespace arrow {
+namespace py {
+namespace benchmark {
+
+void Benchmark_PandasObjectIsNull(PyObject* list) {
+ if (!PyList_CheckExact(list)) {
+ PyErr_SetString(PyExc_TypeError, "expected a list");
+ return;
+ }
+ Py_ssize_t i, n = PyList_GET_SIZE(list);
+ for (i = 0; i < n; i++) {
+ internal::PandasObjectIsNull(PyList_GET_ITEM(list, i));
+ }
+}
+
+} // namespace benchmark
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/benchmark.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/benchmark.h
new file mode 100644
index 0000000000..8060dd3372
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/benchmark.h
@@ -0,0 +1,36 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include "arrow/python/platform.h"
+
+#include "arrow/python/visibility.h"
+
+namespace arrow {
+namespace py {
+namespace benchmark {
+
+// Micro-benchmark routines for use from ASV
+
+// Run PandasObjectIsNull() once over every object in *list*
+ARROW_PYTHON_EXPORT
+void Benchmark_PandasObjectIsNull(PyObject* list);
+
+} // namespace benchmark
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/common.cc b/contrib/libs/apache/arrow/cpp/src/arrow/python/common.cc
new file mode 100644
index 0000000000..6fe2ed4dae
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/common.cc
@@ -0,0 +1,203 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "arrow/python/common.h"
+
+#include <cstdlib>
+#include <mutex>
+#include <string>
+
+#include "arrow/memory_pool.h"
+#include "arrow/status.h"
+#include "arrow/util/checked_cast.h"
+#include "arrow/util/logging.h"
+
+#include "arrow/python/helpers.h"
+
+namespace arrow {
+
+using internal::checked_cast;
+
+namespace py {
+
+static std::mutex memory_pool_mutex;
+static MemoryPool* default_python_pool = nullptr;
+
+void set_default_memory_pool(MemoryPool* pool) {
+ std::lock_guard<std::mutex> guard(memory_pool_mutex);
+ default_python_pool = pool;
+}
+
+MemoryPool* get_memory_pool() {
+ std::lock_guard<std::mutex> guard(memory_pool_mutex);
+ if (default_python_pool) {
+ return default_python_pool;
+ } else {
+ return default_memory_pool();
+ }
+}
+
+// ----------------------------------------------------------------------
+// PythonErrorDetail
+
+namespace {
+
+const char kErrorDetailTypeId[] = "arrow::py::PythonErrorDetail";
+
+// Try to match the Python exception type with an appropriate Status code
+StatusCode MapPyError(PyObject* exc_type) {
+ StatusCode code;
+
+ if (PyErr_GivenExceptionMatches(exc_type, PyExc_MemoryError)) {
+ code = StatusCode::OutOfMemory;
+ } else if (PyErr_GivenExceptionMatches(exc_type, PyExc_IndexError)) {
+ code = StatusCode::IndexError;
+ } else if (PyErr_GivenExceptionMatches(exc_type, PyExc_KeyError)) {
+ code = StatusCode::KeyError;
+ } else if (PyErr_GivenExceptionMatches(exc_type, PyExc_TypeError)) {
+ code = StatusCode::TypeError;
+ } else if (PyErr_GivenExceptionMatches(exc_type, PyExc_ValueError) ||
+ PyErr_GivenExceptionMatches(exc_type, PyExc_OverflowError)) {
+ code = StatusCode::Invalid;
+ } else if (PyErr_GivenExceptionMatches(exc_type, PyExc_EnvironmentError)) {
+ code = StatusCode::IOError;
+ } else if (PyErr_GivenExceptionMatches(exc_type, PyExc_NotImplementedError)) {
+ code = StatusCode::NotImplemented;
+ } else {
+ code = StatusCode::UnknownError;
+ }
+ return code;
+}
+
+// PythonErrorDetail indicates a Python exception was raised.
+class PythonErrorDetail : public StatusDetail {
+ public:
+ const char* type_id() const override { return kErrorDetailTypeId; }
+
+ std::string ToString() const override {
+ // This is simple enough not to need the GIL
+ const auto ty = reinterpret_cast<const PyTypeObject*>(exc_type_.obj());
+ // XXX Should we also print traceback?
+ return std::string("Python exception: ") + ty->tp_name;
+ }
+
+ void RestorePyError() const {
+ Py_INCREF(exc_type_.obj());
+ Py_INCREF(exc_value_.obj());
+ Py_INCREF(exc_traceback_.obj());
+ PyErr_Restore(exc_type_.obj(), exc_value_.obj(), exc_traceback_.obj());
+ }
+
+ PyObject* exc_type() const { return exc_type_.obj(); }
+
+ PyObject* exc_value() const { return exc_value_.obj(); }
+
+ static std::shared_ptr<PythonErrorDetail> FromPyError() {
+ PyObject* exc_type = nullptr;
+ PyObject* exc_value = nullptr;
+ PyObject* exc_traceback = nullptr;
+
+ PyErr_Fetch(&exc_type, &exc_value, &exc_traceback);
+ PyErr_NormalizeException(&exc_type, &exc_value, &exc_traceback);
+ ARROW_CHECK(exc_type)
+ << "PythonErrorDetail::FromPyError called without a Python error set";
+ DCHECK(PyType_Check(exc_type));
+ DCHECK(exc_value); // Ensured by PyErr_NormalizeException, double-check
+ if (exc_traceback == nullptr) {
+ // Needed by PyErr_Restore()
+ Py_INCREF(Py_None);
+ exc_traceback = Py_None;
+ }
+
+ std::shared_ptr<PythonErrorDetail> detail(new PythonErrorDetail);
+ detail->exc_type_.reset(exc_type);
+ detail->exc_value_.reset(exc_value);
+ detail->exc_traceback_.reset(exc_traceback);
+ return detail;
+ }
+
+ protected:
+ PythonErrorDetail() = default;
+
+ OwnedRefNoGIL exc_type_, exc_value_, exc_traceback_;
+};
+
+} // namespace
+
+// ----------------------------------------------------------------------
+// Python exception <-> Status
+
+Status ConvertPyError(StatusCode code) {
+ auto detail = PythonErrorDetail::FromPyError();
+ if (code == StatusCode::UnknownError) {
+ code = MapPyError(detail->exc_type());
+ }
+
+ std::string message;
+ RETURN_NOT_OK(internal::PyObject_StdStringStr(detail->exc_value(), &message));
+ return Status(code, message, detail);
+}
+
+bool IsPyError(const Status& status) {
+ if (status.ok()) {
+ return false;
+ }
+ auto detail = status.detail();
+ bool result = detail != nullptr && detail->type_id() == kErrorDetailTypeId;
+ return result;
+}
+
+void RestorePyError(const Status& status) {
+ ARROW_CHECK(IsPyError(status));
+ const auto& detail = checked_cast<const PythonErrorDetail&>(*status.detail());
+ detail.RestorePyError();
+}
+
+// ----------------------------------------------------------------------
+// PyBuffer
+
+PyBuffer::PyBuffer() : Buffer(nullptr, 0) {}
+
+Status PyBuffer::Init(PyObject* obj) {
+ if (!PyObject_GetBuffer(obj, &py_buf_, PyBUF_ANY_CONTIGUOUS)) {
+ data_ = reinterpret_cast<const uint8_t*>(py_buf_.buf);
+ ARROW_CHECK_NE(data_, nullptr) << "Null pointer in Py_buffer";
+ size_ = py_buf_.len;
+ capacity_ = py_buf_.len;
+ is_mutable_ = !py_buf_.readonly;
+ return Status::OK();
+ } else {
+ return ConvertPyError(StatusCode::Invalid);
+ }
+}
+
+Result<std::shared_ptr<Buffer>> PyBuffer::FromPyObject(PyObject* obj) {
+ PyBuffer* buf = new PyBuffer();
+ std::shared_ptr<Buffer> res(buf);
+ RETURN_NOT_OK(buf->Init(obj));
+ return res;
+}
+
+PyBuffer::~PyBuffer() {
+ if (data_ != nullptr) {
+ PyAcquireGIL lock;
+ PyBuffer_Release(&py_buf_);
+ }
+}
+
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/common.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/common.h
new file mode 100644
index 0000000000..24dcb130a2
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/common.h
@@ -0,0 +1,360 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include <memory>
+#include <utility>
+
+#include "arrow/buffer.h"
+#include "arrow/python/pyarrow.h"
+#include "arrow/python/visibility.h"
+#include "arrow/result.h"
+#include "arrow/util/macros.h"
+
+namespace arrow {
+
+class MemoryPool;
+template <class T>
+class Result;
+
+namespace py {
+
+// Convert current Python error to a Status. The Python error state is cleared
+// and can be restored with RestorePyError().
+ARROW_PYTHON_EXPORT Status ConvertPyError(StatusCode code = StatusCode::UnknownError);
+// Query whether the given Status is a Python error (as wrapped by ConvertPyError()).
+ARROW_PYTHON_EXPORT bool IsPyError(const Status& status);
+// Restore a Python error wrapped in a Status.
+ARROW_PYTHON_EXPORT void RestorePyError(const Status& status);
+
+// Catch a pending Python exception and return the corresponding Status.
+// If no exception is pending, Status::OK() is returned.
+inline Status CheckPyError(StatusCode code = StatusCode::UnknownError) {
+ if (ARROW_PREDICT_TRUE(!PyErr_Occurred())) {
+ return Status::OK();
+ } else {
+ return ConvertPyError(code);
+ }
+}
+
+#define RETURN_IF_PYERROR() ARROW_RETURN_NOT_OK(CheckPyError())
+
+#define PY_RETURN_IF_ERROR(CODE) ARROW_RETURN_NOT_OK(CheckPyError(CODE))
+
+// For Cython, as you can't define template C++ functions in Cython, only use them.
+// This function can set a Python exception. It assumes that T has a (cheap)
+// default constructor.
+template <class T>
+T GetResultValue(Result<T> result) {
+ if (ARROW_PREDICT_TRUE(result.ok())) {
+ return *std::move(result);
+ } else {
+ int r = internal::check_status(result.status()); // takes the GIL
+ assert(r == -1); // should have errored out
+ ARROW_UNUSED(r);
+ return {};
+ }
+}
+
+// A RAII-style helper that ensures the GIL is acquired inside a lexical block.
+class ARROW_PYTHON_EXPORT PyAcquireGIL {
+ public:
+ PyAcquireGIL() : acquired_gil_(false) { acquire(); }
+
+ ~PyAcquireGIL() { release(); }
+
+ void acquire() {
+ if (!acquired_gil_) {
+ state_ = PyGILState_Ensure();
+ acquired_gil_ = true;
+ }
+ }
+
+ // idempotent
+ void release() {
+ if (acquired_gil_) {
+ PyGILState_Release(state_);
+ acquired_gil_ = false;
+ }
+ }
+
+ private:
+ bool acquired_gil_;
+ PyGILState_STATE state_;
+ ARROW_DISALLOW_COPY_AND_ASSIGN(PyAcquireGIL);
+};
+
+// A RAII-style helper that releases the GIL until the end of a lexical block
+class ARROW_PYTHON_EXPORT PyReleaseGIL {
+ public:
+ PyReleaseGIL() { saved_state_ = PyEval_SaveThread(); }
+
+ ~PyReleaseGIL() { PyEval_RestoreThread(saved_state_); }
+
+ private:
+ PyThreadState* saved_state_;
+ ARROW_DISALLOW_COPY_AND_ASSIGN(PyReleaseGIL);
+};
+
+// A helper to call safely into the Python interpreter from arbitrary C++ code.
+// The GIL is acquired, and the current thread's error status is preserved.
+template <typename Function>
+auto SafeCallIntoPython(Function&& func) -> decltype(func()) {
+ PyAcquireGIL lock;
+ PyObject* exc_type;
+ PyObject* exc_value;
+ PyObject* exc_traceback;
+ PyErr_Fetch(&exc_type, &exc_value, &exc_traceback);
+ auto maybe_status = std::forward<Function>(func)();
+ // If the return Status is a "Python error", the current Python error status
+ // describes the error and shouldn't be clobbered.
+ if (!IsPyError(::arrow::internal::GenericToStatus(maybe_status)) &&
+ exc_type != NULLPTR) {
+ PyErr_Restore(exc_type, exc_value, exc_traceback);
+ }
+ return maybe_status;
+}
+
+// A RAII primitive that DECREFs the underlying PyObject* when it
+// goes out of scope.
+class ARROW_PYTHON_EXPORT OwnedRef {
+ public:
+ OwnedRef() : obj_(NULLPTR) {}
+ OwnedRef(OwnedRef&& other) : OwnedRef(other.detach()) {}
+ explicit OwnedRef(PyObject* obj) : obj_(obj) {}
+
+ OwnedRef& operator=(OwnedRef&& other) {
+ obj_ = other.detach();
+ return *this;
+ }
+
+ ~OwnedRef() { reset(); }
+
+ void reset(PyObject* obj) {
+ Py_XDECREF(obj_);
+ obj_ = obj;
+ }
+
+ void reset() { reset(NULLPTR); }
+
+ PyObject* detach() {
+ PyObject* result = obj_;
+ obj_ = NULLPTR;
+ return result;
+ }
+
+ PyObject* obj() const { return obj_; }
+
+ PyObject** ref() { return &obj_; }
+
+ operator bool() const { return obj_ != NULLPTR; }
+
+ private:
+ ARROW_DISALLOW_COPY_AND_ASSIGN(OwnedRef);
+
+ PyObject* obj_;
+};
+
+// Same as OwnedRef, but ensures the GIL is taken when it goes out of scope.
+// This is for situations where the GIL is not always known to be held
+// (e.g. if it is released in the middle of a function for performance reasons)
+class ARROW_PYTHON_EXPORT OwnedRefNoGIL : public OwnedRef {
+ public:
+ OwnedRefNoGIL() : OwnedRef() {}
+ OwnedRefNoGIL(OwnedRefNoGIL&& other) : OwnedRef(other.detach()) {}
+ explicit OwnedRefNoGIL(PyObject* obj) : OwnedRef(obj) {}
+
+ ~OwnedRefNoGIL() {
+ PyAcquireGIL lock;
+ reset();
+ }
+};
+
+template <typename Fn>
+struct BoundFunction;
+
+template <typename... Args>
+struct BoundFunction<void(PyObject*, Args...)> {
+ // We bind `cdef void fn(object, ...)` to get a `Status(...)`
+ // where the Status contains any Python error raised by `fn`
+ using Unbound = void(PyObject*, Args...);
+ using Bound = Status(Args...);
+
+ BoundFunction(Unbound* unbound, PyObject* bound_arg)
+ : bound_arg_(bound_arg), unbound_(unbound) {}
+
+ Status Invoke(Args... args) const {
+ PyAcquireGIL lock;
+ unbound_(bound_arg_.obj(), std::forward<Args>(args)...);
+ RETURN_IF_PYERROR();
+ return Status::OK();
+ }
+
+ Unbound* unbound_;
+ OwnedRefNoGIL bound_arg_;
+};
+
+template <typename Return, typename... Args>
+struct BoundFunction<Return(PyObject*, Args...)> {
+ // We bind `cdef Return fn(object, ...)` to get a `Result<Return>(...)`
+ // where the Result contains any Python error raised by `fn` or the
+ // return value from `fn`.
+ using Unbound = Return(PyObject*, Args...);
+ using Bound = Result<Return>(Args...);
+
+ BoundFunction(Unbound* unbound, PyObject* bound_arg)
+ : bound_arg_(bound_arg), unbound_(unbound) {}
+
+ Result<Return> Invoke(Args... args) const {
+ PyAcquireGIL lock;
+ Return ret = unbound_(bound_arg_.obj(), std::forward<Args>(args)...);
+ RETURN_IF_PYERROR();
+ return ret;
+ }
+
+ Unbound* unbound_;
+ OwnedRefNoGIL bound_arg_;
+};
+
+template <typename OutFn, typename Return, typename... Args>
+std::function<OutFn> BindFunction(Return (*unbound)(PyObject*, Args...),
+ PyObject* bound_arg) {
+ using Fn = BoundFunction<Return(PyObject*, Args...)>;
+
+ static_assert(std::is_same<typename Fn::Bound, OutFn>::value,
+ "requested bound function of unsupported type");
+
+ Py_XINCREF(bound_arg);
+ auto bound_fn = std::make_shared<Fn>(unbound, bound_arg);
+ return
+ [bound_fn](Args... args) { return bound_fn->Invoke(std::forward<Args>(args)...); };
+}
+
+// A temporary conversion of a Python object to a bytes area.
+struct PyBytesView {
+ const char* bytes;
+ Py_ssize_t size;
+ bool is_utf8;
+
+ static Result<PyBytesView> FromString(PyObject* obj, bool check_utf8 = false) {
+ PyBytesView self;
+ ARROW_RETURN_NOT_OK(self.ParseString(obj, check_utf8));
+ return std::move(self);
+ }
+
+ static Result<PyBytesView> FromUnicode(PyObject* obj) {
+ PyBytesView self;
+ ARROW_RETURN_NOT_OK(self.ParseUnicode(obj));
+ return std::move(self);
+ }
+
+ static Result<PyBytesView> FromBinary(PyObject* obj) {
+ PyBytesView self;
+ ARROW_RETURN_NOT_OK(self.ParseBinary(obj));
+ return std::move(self);
+ }
+
+ // View the given Python object as string-like, i.e. str or (utf8) bytes
+ Status ParseString(PyObject* obj, bool check_utf8 = false) {
+ if (PyUnicode_Check(obj)) {
+ return ParseUnicode(obj);
+ } else {
+ ARROW_RETURN_NOT_OK(ParseBinary(obj));
+ if (check_utf8) {
+ // Check the bytes are utf8 utf-8
+ OwnedRef decoded(PyUnicode_FromStringAndSize(bytes, size));
+ if (ARROW_PREDICT_TRUE(!PyErr_Occurred())) {
+ is_utf8 = true;
+ } else {
+ PyErr_Clear();
+ is_utf8 = false;
+ }
+ }
+ return Status::OK();
+ }
+ }
+
+ // View the given Python object as unicode string
+ Status ParseUnicode(PyObject* obj) {
+ // The utf-8 representation is cached on the unicode object
+ bytes = PyUnicode_AsUTF8AndSize(obj, &size);
+ RETURN_IF_PYERROR();
+ is_utf8 = true;
+ return Status::OK();
+ }
+
+ // View the given Python object as binary-like, i.e. bytes
+ Status ParseBinary(PyObject* obj) {
+ if (PyBytes_Check(obj)) {
+ bytes = PyBytes_AS_STRING(obj);
+ size = PyBytes_GET_SIZE(obj);
+ is_utf8 = false;
+ } else if (PyByteArray_Check(obj)) {
+ bytes = PyByteArray_AS_STRING(obj);
+ size = PyByteArray_GET_SIZE(obj);
+ is_utf8 = false;
+ } else if (PyMemoryView_Check(obj)) {
+ PyObject* ref = PyMemoryView_GetContiguous(obj, PyBUF_READ, 'C');
+ RETURN_IF_PYERROR();
+ Py_buffer* buffer = PyMemoryView_GET_BUFFER(ref);
+ bytes = reinterpret_cast<const char*>(buffer->buf);
+ size = buffer->len;
+ is_utf8 = false;
+ } else {
+ return Status::TypeError("Expected bytes, got a '", Py_TYPE(obj)->tp_name,
+ "' object");
+ }
+ return Status::OK();
+ }
+
+ protected:
+ OwnedRef ref;
+};
+
+class ARROW_PYTHON_EXPORT PyBuffer : public Buffer {
+ public:
+ /// While memoryview objects support multi-dimensional buffers, PyBuffer only supports
+ /// one-dimensional byte buffers.
+ ~PyBuffer();
+
+ static Result<std::shared_ptr<Buffer>> FromPyObject(PyObject* obj);
+
+ private:
+ PyBuffer();
+ Status Init(PyObject*);
+
+ Py_buffer py_buf_;
+};
+
+// Return the common PyArrow memory pool
+ARROW_PYTHON_EXPORT void set_default_memory_pool(MemoryPool* pool);
+ARROW_PYTHON_EXPORT MemoryPool* get_memory_pool();
+
+// This is annoying: because C++11 does not allow implicit conversion of string
+// literals to non-const char*, we need to go through some gymnastics to use
+// PyObject_CallMethod without a lot of pain (its arguments are non-const
+// char*)
+template <typename... ArgTypes>
+static inline PyObject* cpp_PyObject_CallMethod(PyObject* obj, const char* method_name,
+ const char* argspec, ArgTypes... args) {
+ return PyObject_CallMethod(obj, const_cast<char*>(method_name),
+ const_cast<char*>(argspec), args...);
+}
+
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/datetime.cc b/contrib/libs/apache/arrow/cpp/src/arrow/python/datetime.cc
new file mode 100644
index 0000000000..4b18918cbc
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/datetime.cc
@@ -0,0 +1,455 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+#include "arrow/python/datetime.h"
+
+#include <algorithm>
+#include <chrono>
+#include <iomanip>
+
+#include "arrow/python/common.h"
+#include "arrow/python/helpers.h"
+#include "arrow/python/platform.h"
+#include "arrow/status.h"
+#include "arrow/type.h"
+#include "arrow/util/logging.h"
+#include "arrow/util/value_parsing.h"
+
+namespace arrow {
+namespace py {
+namespace internal {
+
+namespace {
+
+// Same as Regex '([+-])(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9])$'.
+// GCC 4.9 doesn't support regex, so handcode until support for it
+// is dropped.
+bool MatchFixedOffset(const std::string& tz, util::string_view* sign,
+ util::string_view* hour, util::string_view* minute) {
+ if (tz.size() < 5) {
+ return false;
+ }
+ const char* iter = tz.data();
+ if (*iter == '+' || *iter == '-') {
+ *sign = util::string_view(iter, 1);
+ iter++;
+ if (tz.size() < 6) {
+ return false;
+ }
+ }
+ if ((((*iter == '0' || *iter == '1') && *(iter + 1) >= '0' && *(iter + 1) <= '9') ||
+ (*iter == '2' && *(iter + 1) >= '0' && *(iter + 1) <= '3'))) {
+ *hour = util::string_view(iter, 2);
+ iter += 2;
+ } else {
+ return false;
+ }
+ if (*iter != ':') {
+ return false;
+ }
+ iter++;
+
+ if (*iter >= '0' && *iter <= '5' && *(iter + 1) >= '0' && *(iter + 1) <= '9') {
+ *minute = util::string_view(iter, 2);
+ iter += 2;
+ } else {
+ return false;
+ }
+ return iter == (tz.data() + tz.size());
+}
+
+} // namespace
+
+PyDateTime_CAPI* datetime_api = nullptr;
+
+void InitDatetime() {
+ PyAcquireGIL lock;
+ datetime_api =
+ reinterpret_cast<PyDateTime_CAPI*>(PyCapsule_Import(PyDateTime_CAPSULE_NAME, 0));
+ if (datetime_api == nullptr) {
+ Py_FatalError("Could not import datetime C API");
+ }
+}
+
+// The following code is adapted from
+// https://github.com/numpy/numpy/blob/master/numpy/core/src/multiarray/datetime.c
+
+// Days per month, regular year and leap year
+static int64_t _days_per_month_table[2][12] = {
+ {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31},
+ {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}};
+
+static bool is_leapyear(int64_t year) {
+ return (year & 0x3) == 0 && // year % 4 == 0
+ ((year % 100) != 0 || (year % 400) == 0);
+}
+
+// Calculates the days offset from the 1970 epoch.
+static int64_t get_days_from_date(int64_t date_year, int64_t date_month,
+ int64_t date_day) {
+ int64_t i, month;
+ int64_t year, days = 0;
+ int64_t* month_lengths;
+
+ year = date_year - 1970;
+ days = year * 365;
+
+ // Adjust for leap years
+ if (days >= 0) {
+ // 1968 is the closest leap year before 1970.
+ // Exclude the current year, so add 1.
+ year += 1;
+ // Add one day for each 4 years
+ days += year / 4;
+ // 1900 is the closest previous year divisible by 100
+ year += 68;
+ // Subtract one day for each 100 years
+ days -= year / 100;
+ // 1600 is the closest previous year divisible by 400
+ year += 300;
+ // Add one day for each 400 years
+ days += year / 400;
+ } else {
+ // 1972 is the closest later year after 1970.
+ // Include the current year, so subtract 2.
+ year -= 2;
+ // Subtract one day for each 4 years
+ days += year / 4;
+ // 2000 is the closest later year divisible by 100
+ year -= 28;
+ // Add one day for each 100 years
+ days -= year / 100;
+ // 2000 is also the closest later year divisible by 400
+ // Subtract one day for each 400 years
+ days += year / 400;
+ }
+
+ month_lengths = _days_per_month_table[is_leapyear(date_year)];
+ month = date_month - 1;
+
+ // Add the months
+ for (i = 0; i < month; ++i) {
+ days += month_lengths[i];
+ }
+
+ // Add the days
+ days += date_day - 1;
+
+ return days;
+}
+
+// Modifies '*days_' to be the day offset within the year,
+// and returns the year.
+static int64_t days_to_yearsdays(int64_t* days_) {
+ const int64_t days_per_400years = (400 * 365 + 100 - 4 + 1);
+ // Adjust so it's relative to the year 2000 (divisible by 400)
+ int64_t days = (*days_) - (365 * 30 + 7);
+ int64_t year;
+
+ // Break down the 400 year cycle to get the year and day within the year
+ if (days >= 0) {
+ year = 400 * (days / days_per_400years);
+ days = days % days_per_400years;
+ } else {
+ year = 400 * ((days - (days_per_400years - 1)) / days_per_400years);
+ days = days % days_per_400years;
+ if (days < 0) {
+ days += days_per_400years;
+ }
+ }
+
+ // Work out the year/day within the 400 year cycle
+ if (days >= 366) {
+ year += 100 * ((days - 1) / (100 * 365 + 25 - 1));
+ days = (days - 1) % (100 * 365 + 25 - 1);
+ if (days >= 365) {
+ year += 4 * ((days + 1) / (4 * 365 + 1));
+ days = (days + 1) % (4 * 365 + 1);
+ if (days >= 366) {
+ year += (days - 1) / 365;
+ days = (days - 1) % 365;
+ }
+ }
+ }
+
+ *days_ = days;
+ return year + 2000;
+}
+
+// Extracts the month and year and day number from a number of days
+static void get_date_from_days(int64_t days, int64_t* date_year, int64_t* date_month,
+ int64_t* date_day) {
+ int64_t *month_lengths, i;
+
+ *date_year = days_to_yearsdays(&days);
+ month_lengths = _days_per_month_table[is_leapyear(*date_year)];
+
+ for (i = 0; i < 12; ++i) {
+ if (days < month_lengths[i]) {
+ *date_month = i + 1;
+ *date_day = days + 1;
+ return;
+ } else {
+ days -= month_lengths[i];
+ }
+ }
+
+ // Should never get here
+ return;
+}
+
+// Splitting time quantities, for example splitting total seconds into
+// minutes and remaining seconds. After we run
+// int64_t remaining = split_time(total, quotient, &next)
+// we have
+// total = next * quotient + remaining. Handles negative values by propagating
+// them: If total is negative, next will be negative and remaining will
+// always be non-negative.
+static inline int64_t split_time(int64_t total, int64_t quotient, int64_t* next) {
+ int64_t r = total % quotient;
+ if (r < 0) {
+ *next = total / quotient - 1;
+ return r + quotient;
+ } else {
+ *next = total / quotient;
+ return r;
+ }
+}
+
+static inline Status PyTime_convert_int(int64_t val, const TimeUnit::type unit,
+ int64_t* hour, int64_t* minute, int64_t* second,
+ int64_t* microsecond) {
+ switch (unit) {
+ case TimeUnit::NANO:
+ if (val % 1000 != 0) {
+ return Status::Invalid("Value ", val, " has non-zero nanoseconds");
+ }
+ val /= 1000;
+ // fall through
+ case TimeUnit::MICRO:
+ *microsecond = split_time(val, 1000000LL, &val);
+ *second = split_time(val, 60, &val);
+ *minute = split_time(val, 60, hour);
+ break;
+ case TimeUnit::MILLI:
+ *microsecond = split_time(val, 1000, &val) * 1000;
+ // fall through
+ case TimeUnit::SECOND:
+ *second = split_time(val, 60, &val);
+ *minute = split_time(val, 60, hour);
+ break;
+ default:
+ break;
+ }
+ return Status::OK();
+}
+
+static inline Status PyDate_convert_int(int64_t val, const DateUnit unit, int64_t* year,
+ int64_t* month, int64_t* day) {
+ switch (unit) {
+ case DateUnit::MILLI:
+ val /= 86400000LL; // fall through
+ case DateUnit::DAY:
+ get_date_from_days(val, year, month, day);
+ default:
+ break;
+ }
+ return Status::OK();
+}
+
+Status PyTime_from_int(int64_t val, const TimeUnit::type unit, PyObject** out) {
+ int64_t hour = 0, minute = 0, second = 0, microsecond = 0;
+ RETURN_NOT_OK(PyTime_convert_int(val, unit, &hour, &minute, &second, &microsecond));
+ *out = PyTime_FromTime(static_cast<int32_t>(hour), static_cast<int32_t>(minute),
+ static_cast<int32_t>(second), static_cast<int32_t>(microsecond));
+ return Status::OK();
+}
+
+Status PyDate_from_int(int64_t val, const DateUnit unit, PyObject** out) {
+ int64_t year = 0, month = 0, day = 0;
+ RETURN_NOT_OK(PyDate_convert_int(val, unit, &year, &month, &day));
+ *out = PyDate_FromDate(static_cast<int32_t>(year), static_cast<int32_t>(month),
+ static_cast<int32_t>(day));
+ return Status::OK();
+}
+
+Status PyDateTime_from_int(int64_t val, const TimeUnit::type unit, PyObject** out) {
+ int64_t hour = 0, minute = 0, second = 0, microsecond = 0;
+ RETURN_NOT_OK(PyTime_convert_int(val, unit, &hour, &minute, &second, &microsecond));
+ int64_t total_days = 0;
+ hour = split_time(hour, 24, &total_days);
+ int64_t year = 0, month = 0, day = 0;
+ get_date_from_days(total_days, &year, &month, &day);
+ *out = PyDateTime_FromDateAndTime(
+ static_cast<int32_t>(year), static_cast<int32_t>(month), static_cast<int32_t>(day),
+ static_cast<int32_t>(hour), static_cast<int32_t>(minute),
+ static_cast<int32_t>(second), static_cast<int32_t>(microsecond));
+ return Status::OK();
+}
+
+int64_t PyDate_to_days(PyDateTime_Date* pydate) {
+ return get_days_from_date(PyDateTime_GET_YEAR(pydate), PyDateTime_GET_MONTH(pydate),
+ PyDateTime_GET_DAY(pydate));
+}
+
+Result<int64_t> PyDateTime_utcoffset_s(PyObject* obj) {
+ // calculate offset from UTC timezone in seconds
+ // supports only PyDateTime_DateTime and PyDateTime_Time objects
+ OwnedRef pyoffset(PyObject_CallMethod(obj, "utcoffset", NULL));
+ RETURN_IF_PYERROR();
+ if (pyoffset.obj() != nullptr && pyoffset.obj() != Py_None) {
+ auto delta = reinterpret_cast<PyDateTime_Delta*>(pyoffset.obj());
+ return internal::PyDelta_to_s(delta);
+ } else {
+ return 0;
+ }
+}
+
+Result<std::string> PyTZInfo_utcoffset_hhmm(PyObject* pytzinfo) {
+ // attempt to convert timezone offset objects to "+/-{hh}:{mm}" format
+ OwnedRef pydelta_object(PyObject_CallMethod(pytzinfo, "utcoffset", "O", Py_None));
+ RETURN_IF_PYERROR();
+
+ if (!PyDelta_Check(pydelta_object.obj())) {
+ return Status::Invalid(
+ "Object returned by tzinfo.utcoffset(None) is not an instance of "
+ "datetime.timedelta");
+ }
+ auto pydelta = reinterpret_cast<PyDateTime_Delta*>(pydelta_object.obj());
+
+ // retrieve the offset as seconds
+ auto total_seconds = internal::PyDelta_to_s(pydelta);
+
+ // determine whether the offset is positive or negative
+ auto sign = (total_seconds < 0) ? "-" : "+";
+ total_seconds = abs(total_seconds);
+
+ // calculate offset components
+ int64_t hours, minutes, seconds;
+ seconds = split_time(total_seconds, 60, &minutes);
+ minutes = split_time(minutes, 60, &hours);
+ if (seconds > 0) {
+ // check there are no remaining seconds
+ return Status::Invalid("Offset must represent whole number of minutes");
+ }
+
+ // construct the timezone string
+ std::stringstream stream;
+ stream << sign << std::setfill('0') << std::setw(2) << hours << ":" << std::setfill('0')
+ << std::setw(2) << minutes;
+ return stream.str();
+}
+
+// Converted from python. See https://github.com/apache/arrow/pull/7604
+// for details.
+Result<PyObject*> StringToTzinfo(const std::string& tz) {
+ util::string_view sign_str, hour_str, minute_str;
+ OwnedRef pytz;
+ RETURN_NOT_OK(internal::ImportModule("pytz", &pytz));
+
+ if (MatchFixedOffset(tz, &sign_str, &hour_str, &minute_str)) {
+ int sign = -1;
+ if (sign_str == "+") {
+ sign = 1;
+ }
+ OwnedRef fixed_offset;
+ RETURN_NOT_OK(internal::ImportFromModule(pytz.obj(), "FixedOffset", &fixed_offset));
+ uint32_t minutes, hours;
+ if (!::arrow::internal::ParseUnsigned(hour_str.data(), hour_str.size(), &hours) ||
+ !::arrow::internal::ParseUnsigned(minute_str.data(), minute_str.size(),
+ &minutes)) {
+ return Status::Invalid("Invalid timezone: ", tz);
+ }
+ OwnedRef total_minutes(PyLong_FromLong(
+ sign * ((static_cast<int>(hours) * 60) + static_cast<int>(minutes))));
+ RETURN_IF_PYERROR();
+ auto tzinfo =
+ PyObject_CallFunctionObjArgs(fixed_offset.obj(), total_minutes.obj(), NULL);
+ RETURN_IF_PYERROR();
+ return tzinfo;
+ }
+
+ OwnedRef timezone;
+ RETURN_NOT_OK(internal::ImportFromModule(pytz.obj(), "timezone", &timezone));
+ OwnedRef py_tz_string(
+ PyUnicode_FromStringAndSize(tz.c_str(), static_cast<Py_ssize_t>(tz.size())));
+ auto tzinfo = PyObject_CallFunctionObjArgs(timezone.obj(), py_tz_string.obj(), NULL);
+ RETURN_IF_PYERROR();
+ return tzinfo;
+}
+
+Result<std::string> TzinfoToString(PyObject* tzinfo) {
+ OwnedRef module_pytz; // import pytz
+ OwnedRef module_datetime; // import datetime
+ OwnedRef class_timezone; // from datetime import timezone
+ OwnedRef class_fixedoffset; // from pytz import _FixedOffset
+
+ // import necessary modules
+ RETURN_NOT_OK(internal::ImportModule("pytz", &module_pytz));
+ RETURN_NOT_OK(internal::ImportModule("datetime", &module_datetime));
+ // import necessary classes
+ RETURN_NOT_OK(
+ internal::ImportFromModule(module_pytz.obj(), "_FixedOffset", &class_fixedoffset));
+ RETURN_NOT_OK(
+ internal::ImportFromModule(module_datetime.obj(), "timezone", &class_timezone));
+
+ // check that it's a valid tzinfo object
+ if (!PyTZInfo_Check(tzinfo)) {
+ return Status::TypeError("Not an instance of datetime.tzinfo");
+ }
+
+ // if tzinfo is an instance of pytz._FixedOffset or datetime.timezone return the
+ // HH:MM offset string representation
+ if (PyObject_IsInstance(tzinfo, class_timezone.obj()) ||
+ PyObject_IsInstance(tzinfo, class_fixedoffset.obj())) {
+ // still recognize datetime.timezone.utc as UTC (instead of +00:00)
+ OwnedRef tzname_object(PyObject_CallMethod(tzinfo, "tzname", "O", Py_None));
+ RETURN_IF_PYERROR();
+ if (PyUnicode_Check(tzname_object.obj())) {
+ std::string result;
+ RETURN_NOT_OK(internal::PyUnicode_AsStdString(tzname_object.obj(), &result));
+ if (result == "UTC") {
+ return result;
+ }
+ }
+ return PyTZInfo_utcoffset_hhmm(tzinfo);
+ }
+
+ // try to look up zone attribute
+ if (PyObject_HasAttrString(tzinfo, "zone")) {
+ OwnedRef zone(PyObject_GetAttrString(tzinfo, "zone"));
+ RETURN_IF_PYERROR();
+ std::string result;
+ RETURN_NOT_OK(internal::PyUnicode_AsStdString(zone.obj(), &result));
+ return result;
+ }
+
+ // attempt to call tzinfo.tzname(None)
+ OwnedRef tzname_object(PyObject_CallMethod(tzinfo, "tzname", "O", Py_None));
+ RETURN_IF_PYERROR();
+ if (PyUnicode_Check(tzname_object.obj())) {
+ std::string result;
+ RETURN_NOT_OK(internal::PyUnicode_AsStdString(tzname_object.obj(), &result));
+ return result;
+ }
+
+ // fall back to HH:MM offset string representation based on tzinfo.utcoffset(None)
+ return PyTZInfo_utcoffset_hhmm(tzinfo);
+}
+
+} // namespace internal
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/datetime.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/datetime.h
new file mode 100644
index 0000000000..0072cdda4c
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/datetime.h
@@ -0,0 +1,183 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include <algorithm>
+#include <chrono>
+
+#include "arrow/python/platform.h"
+#include "arrow/python/visibility.h"
+#include "arrow/status.h"
+#include "arrow/type.h"
+#include "arrow/util/logging.h"
+
+// By default, PyDateTimeAPI is a *static* variable. This forces
+// PyDateTime_IMPORT to be called in every C/C++ module using the
+// C datetime API. This is error-prone and potentially costly.
+// Instead, we redefine PyDateTimeAPI to point to a global variable,
+// which is initialized once by calling InitDatetime().
+#define PyDateTimeAPI ::arrow::py::internal::datetime_api
+
+namespace arrow {
+namespace py {
+namespace internal {
+
+extern PyDateTime_CAPI* datetime_api;
+
+ARROW_PYTHON_EXPORT
+void InitDatetime();
+
+ARROW_PYTHON_EXPORT
+inline int64_t PyTime_to_us(PyObject* pytime) {
+ return (PyDateTime_TIME_GET_HOUR(pytime) * 3600000000LL +
+ PyDateTime_TIME_GET_MINUTE(pytime) * 60000000LL +
+ PyDateTime_TIME_GET_SECOND(pytime) * 1000000LL +
+ PyDateTime_TIME_GET_MICROSECOND(pytime));
+}
+
+ARROW_PYTHON_EXPORT
+inline int64_t PyTime_to_s(PyObject* pytime) { return PyTime_to_us(pytime) / 1000000; }
+
+ARROW_PYTHON_EXPORT
+inline int64_t PyTime_to_ms(PyObject* pytime) { return PyTime_to_us(pytime) / 1000; }
+
+ARROW_PYTHON_EXPORT
+inline int64_t PyTime_to_ns(PyObject* pytime) { return PyTime_to_us(pytime) * 1000; }
+
+ARROW_PYTHON_EXPORT
+Status PyTime_from_int(int64_t val, const TimeUnit::type unit, PyObject** out);
+
+ARROW_PYTHON_EXPORT
+Status PyDate_from_int(int64_t val, const DateUnit unit, PyObject** out);
+
+// WARNING: This function returns a naive datetime.
+ARROW_PYTHON_EXPORT
+Status PyDateTime_from_int(int64_t val, const TimeUnit::type unit, PyObject** out);
+
+// This declaration must be the same as in filesystem/filesystem.h
+using TimePoint =
+ std::chrono::time_point<std::chrono::system_clock, std::chrono::nanoseconds>;
+
+ARROW_PYTHON_EXPORT
+int64_t PyDate_to_days(PyDateTime_Date* pydate);
+
+ARROW_PYTHON_EXPORT
+inline int64_t PyDate_to_s(PyDateTime_Date* pydate) {
+ return PyDate_to_days(pydate) * 86400LL;
+}
+
+ARROW_PYTHON_EXPORT
+inline int64_t PyDate_to_ms(PyDateTime_Date* pydate) {
+ return PyDate_to_days(pydate) * 86400000LL;
+}
+
+ARROW_PYTHON_EXPORT
+inline int64_t PyDateTime_to_s(PyDateTime_DateTime* pydatetime) {
+ return (PyDate_to_s(reinterpret_cast<PyDateTime_Date*>(pydatetime)) +
+ PyDateTime_DATE_GET_HOUR(pydatetime) * 3600LL +
+ PyDateTime_DATE_GET_MINUTE(pydatetime) * 60LL +
+ PyDateTime_DATE_GET_SECOND(pydatetime));
+}
+
+ARROW_PYTHON_EXPORT
+inline int64_t PyDateTime_to_ms(PyDateTime_DateTime* pydatetime) {
+ return (PyDateTime_to_s(pydatetime) * 1000LL +
+ PyDateTime_DATE_GET_MICROSECOND(pydatetime) / 1000);
+}
+
+ARROW_PYTHON_EXPORT
+inline int64_t PyDateTime_to_us(PyDateTime_DateTime* pydatetime) {
+ return (PyDateTime_to_s(pydatetime) * 1000000LL +
+ PyDateTime_DATE_GET_MICROSECOND(pydatetime));
+}
+
+ARROW_PYTHON_EXPORT
+inline int64_t PyDateTime_to_ns(PyDateTime_DateTime* pydatetime) {
+ return PyDateTime_to_us(pydatetime) * 1000LL;
+}
+
+ARROW_PYTHON_EXPORT
+inline TimePoint PyDateTime_to_TimePoint(PyDateTime_DateTime* pydatetime) {
+ return TimePoint(TimePoint::duration(PyDateTime_to_ns(pydatetime)));
+}
+
+ARROW_PYTHON_EXPORT
+inline int64_t TimePoint_to_ns(TimePoint val) { return val.time_since_epoch().count(); }
+
+ARROW_PYTHON_EXPORT
+inline TimePoint TimePoint_from_s(double val) {
+ return TimePoint(TimePoint::duration(static_cast<int64_t>(1e9 * val)));
+}
+
+ARROW_PYTHON_EXPORT
+inline TimePoint TimePoint_from_ns(int64_t val) {
+ return TimePoint(TimePoint::duration(val));
+}
+
+ARROW_PYTHON_EXPORT
+inline int64_t PyDelta_to_s(PyDateTime_Delta* pytimedelta) {
+ return (PyDateTime_DELTA_GET_DAYS(pytimedelta) * 86400LL +
+ PyDateTime_DELTA_GET_SECONDS(pytimedelta));
+}
+
+ARROW_PYTHON_EXPORT
+inline int64_t PyDelta_to_ms(PyDateTime_Delta* pytimedelta) {
+ return (PyDelta_to_s(pytimedelta) * 1000LL +
+ PyDateTime_DELTA_GET_MICROSECONDS(pytimedelta) / 1000);
+}
+
+ARROW_PYTHON_EXPORT
+inline int64_t PyDelta_to_us(PyDateTime_Delta* pytimedelta) {
+ return (PyDelta_to_s(pytimedelta) * 1000000LL +
+ PyDateTime_DELTA_GET_MICROSECONDS(pytimedelta));
+}
+
+ARROW_PYTHON_EXPORT
+inline int64_t PyDelta_to_ns(PyDateTime_Delta* pytimedelta) {
+ return PyDelta_to_us(pytimedelta) * 1000LL;
+}
+
+ARROW_PYTHON_EXPORT
+Result<int64_t> PyDateTime_utcoffset_s(PyObject* pydatetime);
+
+/// \brief Convert a time zone name into a time zone object.
+///
+/// Supported input strings are:
+/// * As used in the Olson time zone database (the "tz database" or
+/// "tzdata"), such as "America/New_York"
+/// * An absolute time zone offset of the form +XX:XX or -XX:XX, such as +07:30
+/// GIL must be held when calling this method.
+ARROW_PYTHON_EXPORT
+Result<PyObject*> StringToTzinfo(const std::string& tz);
+
+/// \brief Convert a time zone object to a string representation.
+///
+/// The output strings are:
+/// * An absolute time zone offset of the form +XX:XX or -XX:XX, such as +07:30
+/// if the input object is either an instance of pytz._FixedOffset or
+/// datetime.timedelta
+/// * The timezone's name if the input object's tzname() method returns with a
+/// non-empty timezone name such as "UTC" or "America/New_York"
+///
+/// GIL must be held when calling this method.
+ARROW_PYTHON_EXPORT
+Result<std::string> TzinfoToString(PyObject* pytzinfo);
+
+} // namespace internal
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/decimal.cc b/contrib/libs/apache/arrow/cpp/src/arrow/python/decimal.cc
new file mode 100644
index 0000000000..0c00fcfaa8
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/decimal.cc
@@ -0,0 +1,246 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <algorithm>
+#include <limits>
+
+#include "arrow/python/common.h"
+#include "arrow/python/decimal.h"
+#include "arrow/python/helpers.h"
+#include "arrow/type_fwd.h"
+#include "arrow/util/decimal.h"
+#include "arrow/util/logging.h"
+
+namespace arrow {
+namespace py {
+namespace internal {
+
+Status ImportDecimalType(OwnedRef* decimal_type) {
+ OwnedRef decimal_module;
+ RETURN_NOT_OK(ImportModule("decimal", &decimal_module));
+ RETURN_NOT_OK(ImportFromModule(decimal_module.obj(), "Decimal", decimal_type));
+ return Status::OK();
+}
+
+Status PythonDecimalToString(PyObject* python_decimal, std::string* out) {
+ // Call Python's str(decimal_object)
+ return PyObject_StdStringStr(python_decimal, out);
+}
+
+// \brief Infer the precision and scale of a Python decimal.Decimal instance
+// \param python_decimal[in] An instance of decimal.Decimal
+// \param precision[out] The value of the inferred precision
+// \param scale[out] The value of the inferred scale
+// \return The status of the operation
+static Status InferDecimalPrecisionAndScale(PyObject* python_decimal, int32_t* precision,
+ int32_t* scale) {
+ DCHECK_NE(python_decimal, NULLPTR);
+ DCHECK_NE(precision, NULLPTR);
+ DCHECK_NE(scale, NULLPTR);
+
+ // TODO(phillipc): Make sure we perform PyDecimal_Check(python_decimal) as a DCHECK
+ OwnedRef as_tuple(PyObject_CallMethod(python_decimal, const_cast<char*>("as_tuple"),
+ const_cast<char*>("")));
+ RETURN_IF_PYERROR();
+ DCHECK(PyTuple_Check(as_tuple.obj()));
+
+ OwnedRef digits(PyObject_GetAttrString(as_tuple.obj(), "digits"));
+ RETURN_IF_PYERROR();
+ DCHECK(PyTuple_Check(digits.obj()));
+
+ const auto num_digits = static_cast<int32_t>(PyTuple_Size(digits.obj()));
+ RETURN_IF_PYERROR();
+
+ OwnedRef py_exponent(PyObject_GetAttrString(as_tuple.obj(), "exponent"));
+ RETURN_IF_PYERROR();
+ DCHECK(IsPyInteger(py_exponent.obj()));
+
+ const auto exponent = static_cast<int32_t>(PyLong_AsLong(py_exponent.obj()));
+ RETURN_IF_PYERROR();
+
+ if (exponent < 0) {
+ // If exponent > num_digits, we have a number with leading zeros
+ // such as 0.01234. Ensure we have enough precision for leading zeros
+ // (which are not included in num_digits).
+ *precision = std::max(num_digits, -exponent);
+ *scale = -exponent;
+ } else {
+ // Trailing zeros are not included in num_digits, need to add to precision.
+ // Note we don't generate negative scales as they are poorly supported
+ // in non-Arrow systems.
+ *precision = num_digits + exponent;
+ *scale = 0;
+ }
+ return Status::OK();
+}
+
+PyObject* DecimalFromString(PyObject* decimal_constructor,
+ const std::string& decimal_string) {
+ DCHECK_NE(decimal_constructor, nullptr);
+
+ auto string_size = decimal_string.size();
+ DCHECK_GT(string_size, 0);
+
+ auto string_bytes = decimal_string.c_str();
+ DCHECK_NE(string_bytes, nullptr);
+
+ return PyObject_CallFunction(decimal_constructor, const_cast<char*>("s#"), string_bytes,
+ static_cast<Py_ssize_t>(string_size));
+}
+
+namespace {
+
+template <typename ArrowDecimal>
+Status DecimalFromStdString(const std::string& decimal_string,
+ const DecimalType& arrow_type, ArrowDecimal* out) {
+ int32_t inferred_precision;
+ int32_t inferred_scale;
+
+ RETURN_NOT_OK(ArrowDecimal::FromString(decimal_string, out, &inferred_precision,
+ &inferred_scale));
+
+ const int32_t precision = arrow_type.precision();
+ const int32_t scale = arrow_type.scale();
+
+ if (scale != inferred_scale) {
+ DCHECK_NE(out, NULLPTR);
+ ARROW_ASSIGN_OR_RAISE(*out, out->Rescale(inferred_scale, scale));
+ }
+
+ auto inferred_scale_delta = inferred_scale - scale;
+ if (ARROW_PREDICT_FALSE((inferred_precision - inferred_scale_delta) > precision)) {
+ return Status::Invalid(
+ "Decimal type with precision ", inferred_precision,
+ " does not fit into precision inferred from first array element: ", precision);
+ }
+
+ return Status::OK();
+}
+
+template <typename ArrowDecimal>
+Status InternalDecimalFromPythonDecimal(PyObject* python_decimal,
+ const DecimalType& arrow_type,
+ ArrowDecimal* out) {
+ DCHECK_NE(python_decimal, NULLPTR);
+ DCHECK_NE(out, NULLPTR);
+
+ std::string string;
+ RETURN_NOT_OK(PythonDecimalToString(python_decimal, &string));
+ return DecimalFromStdString(string, arrow_type, out);
+}
+
+template <typename ArrowDecimal>
+Status InternalDecimalFromPyObject(PyObject* obj, const DecimalType& arrow_type,
+ ArrowDecimal* out) {
+ DCHECK_NE(obj, NULLPTR);
+ DCHECK_NE(out, NULLPTR);
+
+ if (IsPyInteger(obj)) {
+ // TODO: add a fast path for small-ish ints
+ std::string string;
+ RETURN_NOT_OK(PyObject_StdStringStr(obj, &string));
+ return DecimalFromStdString(string, arrow_type, out);
+ } else if (PyDecimal_Check(obj)) {
+ return InternalDecimalFromPythonDecimal<ArrowDecimal>(obj, arrow_type, out);
+ } else {
+ return Status::TypeError("int or Decimal object expected, got ",
+ Py_TYPE(obj)->tp_name);
+ }
+}
+
+} // namespace
+
+Status DecimalFromPythonDecimal(PyObject* python_decimal, const DecimalType& arrow_type,
+ Decimal128* out) {
+ return InternalDecimalFromPythonDecimal(python_decimal, arrow_type, out);
+}
+
+Status DecimalFromPyObject(PyObject* obj, const DecimalType& arrow_type,
+ Decimal128* out) {
+ return InternalDecimalFromPyObject(obj, arrow_type, out);
+}
+
+Status DecimalFromPythonDecimal(PyObject* python_decimal, const DecimalType& arrow_type,
+ Decimal256* out) {
+ return InternalDecimalFromPythonDecimal(python_decimal, arrow_type, out);
+}
+
+Status DecimalFromPyObject(PyObject* obj, const DecimalType& arrow_type,
+ Decimal256* out) {
+ return InternalDecimalFromPyObject(obj, arrow_type, out);
+}
+
+bool PyDecimal_Check(PyObject* obj) {
+ static OwnedRef decimal_type;
+ if (!decimal_type.obj()) {
+ ARROW_CHECK_OK(ImportDecimalType(&decimal_type));
+ DCHECK(PyType_Check(decimal_type.obj()));
+ }
+ // PyObject_IsInstance() is slower as it has to check for virtual subclasses
+ const int result =
+ PyType_IsSubtype(Py_TYPE(obj), reinterpret_cast<PyTypeObject*>(decimal_type.obj()));
+ ARROW_CHECK_NE(result, -1) << " error during PyType_IsSubtype check";
+ return result == 1;
+}
+
+bool PyDecimal_ISNAN(PyObject* obj) {
+ DCHECK(PyDecimal_Check(obj)) << "obj is not an instance of decimal.Decimal";
+ OwnedRef is_nan(
+ PyObject_CallMethod(obj, const_cast<char*>("is_nan"), const_cast<char*>("")));
+ return PyObject_IsTrue(is_nan.obj()) == 1;
+}
+
+DecimalMetadata::DecimalMetadata()
+ : DecimalMetadata(std::numeric_limits<int32_t>::min(),
+ std::numeric_limits<int32_t>::min()) {}
+
+DecimalMetadata::DecimalMetadata(int32_t precision, int32_t scale)
+ : precision_(precision), scale_(scale) {}
+
+Status DecimalMetadata::Update(int32_t suggested_precision, int32_t suggested_scale) {
+ const int32_t current_scale = scale_;
+ scale_ = std::max(current_scale, suggested_scale);
+
+ const int32_t current_precision = precision_;
+
+ if (current_precision == std::numeric_limits<int32_t>::min()) {
+ precision_ = suggested_precision;
+ } else {
+ auto num_digits = std::max(current_precision - current_scale,
+ suggested_precision - suggested_scale);
+ precision_ = std::max(num_digits + scale_, current_precision);
+ }
+
+ return Status::OK();
+}
+
+Status DecimalMetadata::Update(PyObject* object) {
+ bool is_decimal = PyDecimal_Check(object);
+
+ if (ARROW_PREDICT_FALSE(!is_decimal || PyDecimal_ISNAN(object))) {
+ return Status::OK();
+ }
+
+ int32_t precision = 0;
+ int32_t scale = 0;
+ RETURN_NOT_OK(InferDecimalPrecisionAndScale(object, &precision, &scale));
+ return Update(precision, scale);
+}
+
+} // namespace internal
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/decimal.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/decimal.h
new file mode 100644
index 0000000000..1187037aed
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/decimal.h
@@ -0,0 +1,128 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include <string>
+
+#include "arrow/python/visibility.h"
+#include "arrow/type.h"
+
+namespace arrow {
+
+class Decimal128;
+class Decimal256;
+
+namespace py {
+
+class OwnedRef;
+
+//
+// Python Decimal support
+//
+
+namespace internal {
+
+// \brief Import the Python Decimal type
+ARROW_PYTHON_EXPORT
+Status ImportDecimalType(OwnedRef* decimal_type);
+
+// \brief Convert a Python Decimal object to a C++ string
+// \param[in] python_decimal A Python decimal.Decimal instance
+// \param[out] The string representation of the Python Decimal instance
+// \return The status of the operation
+ARROW_PYTHON_EXPORT
+Status PythonDecimalToString(PyObject* python_decimal, std::string* out);
+
+// \brief Convert a C++ std::string to a Python Decimal instance
+// \param[in] decimal_constructor The decimal type object
+// \param[in] decimal_string A decimal string
+// \return An instance of decimal.Decimal
+ARROW_PYTHON_EXPORT
+PyObject* DecimalFromString(PyObject* decimal_constructor,
+ const std::string& decimal_string);
+
+// \brief Convert a Python decimal to an Arrow Decimal128 object
+// \param[in] python_decimal A Python decimal.Decimal instance
+// \param[in] arrow_type An instance of arrow::DecimalType
+// \param[out] out A pointer to a Decimal128
+// \return The status of the operation
+ARROW_PYTHON_EXPORT
+Status DecimalFromPythonDecimal(PyObject* python_decimal, const DecimalType& arrow_type,
+ Decimal128* out);
+
+// \brief Convert a Python object to an Arrow Decimal128 object
+// \param[in] python_decimal A Python int or decimal.Decimal instance
+// \param[in] arrow_type An instance of arrow::DecimalType
+// \param[out] out A pointer to a Decimal128
+// \return The status of the operation
+ARROW_PYTHON_EXPORT
+Status DecimalFromPyObject(PyObject* obj, const DecimalType& arrow_type, Decimal128* out);
+
+// \brief Convert a Python decimal to an Arrow Decimal256 object
+// \param[in] python_decimal A Python decimal.Decimal instance
+// \param[in] arrow_type An instance of arrow::DecimalType
+// \param[out] out A pointer to a Decimal256
+// \return The status of the operation
+ARROW_PYTHON_EXPORT
+Status DecimalFromPythonDecimal(PyObject* python_decimal, const DecimalType& arrow_type,
+ Decimal256* out);
+
+// \brief Convert a Python object to an Arrow Decimal256 object
+// \param[in] python_decimal A Python int or decimal.Decimal instance
+// \param[in] arrow_type An instance of arrow::DecimalType
+// \param[out] out A pointer to a Decimal256
+// \return The status of the operation
+ARROW_PYTHON_EXPORT
+Status DecimalFromPyObject(PyObject* obj, const DecimalType& arrow_type, Decimal256* out);
+
+// \brief Check whether obj is an instance of Decimal
+ARROW_PYTHON_EXPORT
+bool PyDecimal_Check(PyObject* obj);
+
+// \brief Check whether obj is nan. This function will abort the program if the argument
+// is not a Decimal instance
+ARROW_PYTHON_EXPORT
+bool PyDecimal_ISNAN(PyObject* obj);
+
+// \brief Helper class to track and update the precision and scale of a decimal
+class ARROW_PYTHON_EXPORT DecimalMetadata {
+ public:
+ DecimalMetadata();
+ DecimalMetadata(int32_t precision, int32_t scale);
+
+ // \brief Adjust the precision and scale of a decimal type given a new precision and a
+ // new scale \param[in] suggested_precision A candidate precision \param[in]
+ // suggested_scale A candidate scale \return The status of the operation
+ Status Update(int32_t suggested_precision, int32_t suggested_scale);
+
+ // \brief A convenient interface for updating the precision and scale based on a Python
+ // Decimal object \param object A Python Decimal object \return The status of the
+ // operation
+ Status Update(PyObject* object);
+
+ int32_t precision() const { return precision_; }
+ int32_t scale() const { return scale_; }
+
+ private:
+ int32_t precision_;
+ int32_t scale_;
+};
+
+} // namespace internal
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/deserialize.cc b/contrib/libs/apache/arrow/cpp/src/arrow/python/deserialize.cc
new file mode 100644
index 0000000000..961a1686e0
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/deserialize.cc
@@ -0,0 +1,495 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "arrow/python/deserialize.h"
+
+#include "arrow/python/numpy_interop.h"
+
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <numpy/arrayobject.h>
+#include <numpy/arrayscalars.h>
+
+#include "arrow/array.h"
+#include "arrow/io/interfaces.h"
+#include "arrow/io/memory.h"
+#include "arrow/ipc/options.h"
+#include "arrow/ipc/reader.h"
+#include "arrow/ipc/util.h"
+#include "arrow/ipc/writer.h"
+#include "arrow/table.h"
+#include "arrow/util/checked_cast.h"
+#include "arrow/util/logging.h"
+#include "arrow/util/value_parsing.h"
+
+#include "arrow/python/common.h"
+#include "arrow/python/datetime.h"
+#include "arrow/python/helpers.h"
+#include "arrow/python/numpy_convert.h"
+#include "arrow/python/pyarrow.h"
+#include "arrow/python/serialize.h"
+
+namespace arrow {
+
+using internal::checked_cast;
+using internal::ParseValue;
+
+namespace py {
+
+Status CallDeserializeCallback(PyObject* context, PyObject* value,
+ PyObject** deserialized_object);
+
+Status DeserializeTuple(PyObject* context, const Array& array, int64_t start_idx,
+ int64_t stop_idx, PyObject* base, const SerializedPyObject& blobs,
+ PyObject** out);
+
+Status DeserializeList(PyObject* context, const Array& array, int64_t start_idx,
+ int64_t stop_idx, PyObject* base, const SerializedPyObject& blobs,
+ PyObject** out);
+
+Status DeserializeSet(PyObject* context, const Array& array, int64_t start_idx,
+ int64_t stop_idx, PyObject* base, const SerializedPyObject& blobs,
+ PyObject** out);
+
+Status DeserializeDict(PyObject* context, const Array& array, int64_t start_idx,
+ int64_t stop_idx, PyObject* base, const SerializedPyObject& blobs,
+ PyObject** out) {
+ const auto& data = checked_cast<const StructArray&>(array);
+ OwnedRef keys, vals;
+ OwnedRef result(PyDict_New());
+ RETURN_IF_PYERROR();
+
+ DCHECK_EQ(2, data.num_fields());
+
+ RETURN_NOT_OK(DeserializeList(context, *data.field(0), start_idx, stop_idx, base, blobs,
+ keys.ref()));
+ RETURN_NOT_OK(DeserializeList(context, *data.field(1), start_idx, stop_idx, base, blobs,
+ vals.ref()));
+ for (int64_t i = start_idx; i < stop_idx; ++i) {
+ // PyDict_SetItem behaves differently from PyList_SetItem and PyTuple_SetItem.
+ // The latter two steal references whereas PyDict_SetItem does not. So we need
+ // to make sure the reference count is decremented by letting the OwnedRef
+ // go out of scope at the end.
+ int ret = PyDict_SetItem(result.obj(), PyList_GET_ITEM(keys.obj(), i - start_idx),
+ PyList_GET_ITEM(vals.obj(), i - start_idx));
+ if (ret != 0) {
+ return ConvertPyError();
+ }
+ }
+ static PyObject* py_type = PyUnicode_FromString("_pytype_");
+ if (PyDict_Contains(result.obj(), py_type)) {
+ RETURN_NOT_OK(CallDeserializeCallback(context, result.obj(), out));
+ } else {
+ *out = result.detach();
+ }
+ return Status::OK();
+}
+
+Status DeserializeArray(int32_t index, PyObject* base, const SerializedPyObject& blobs,
+ PyObject** out) {
+ RETURN_NOT_OK(py::TensorToNdarray(blobs.ndarrays[index], base, out));
+ // Mark the array as immutable
+ OwnedRef flags(PyObject_GetAttrString(*out, "flags"));
+ if (flags.obj() == NULL) {
+ return ConvertPyError();
+ }
+ if (PyObject_SetAttrString(flags.obj(), "writeable", Py_False) < 0) {
+ return ConvertPyError();
+ }
+ return Status::OK();
+}
+
+Status GetValue(PyObject* context, const Array& arr, int64_t index, int8_t type,
+ PyObject* base, const SerializedPyObject& blobs, PyObject** result) {
+ switch (type) {
+ case PythonType::NONE:
+ Py_INCREF(Py_None);
+ *result = Py_None;
+ return Status::OK();
+ case PythonType::BOOL:
+ *result = PyBool_FromLong(checked_cast<const BooleanArray&>(arr).Value(index));
+ return Status::OK();
+ case PythonType::PY2INT:
+ case PythonType::INT: {
+ *result = PyLong_FromSsize_t(checked_cast<const Int64Array&>(arr).Value(index));
+ return Status::OK();
+ }
+ case PythonType::BYTES: {
+ auto view = checked_cast<const BinaryArray&>(arr).GetView(index);
+ *result = PyBytes_FromStringAndSize(view.data(), view.length());
+ return CheckPyError();
+ }
+ case PythonType::STRING: {
+ auto view = checked_cast<const StringArray&>(arr).GetView(index);
+ *result = PyUnicode_FromStringAndSize(view.data(), view.length());
+ return CheckPyError();
+ }
+ case PythonType::HALF_FLOAT: {
+ *result = PyHalf_FromHalf(checked_cast<const HalfFloatArray&>(arr).Value(index));
+ RETURN_IF_PYERROR();
+ return Status::OK();
+ }
+ case PythonType::FLOAT:
+ *result = PyFloat_FromDouble(checked_cast<const FloatArray&>(arr).Value(index));
+ return Status::OK();
+ case PythonType::DOUBLE:
+ *result = PyFloat_FromDouble(checked_cast<const DoubleArray&>(arr).Value(index));
+ return Status::OK();
+ case PythonType::DATE64: {
+ RETURN_NOT_OK(internal::PyDateTime_from_int(
+ checked_cast<const Date64Array&>(arr).Value(index), TimeUnit::MICRO, result));
+ RETURN_IF_PYERROR();
+ return Status::OK();
+ }
+ case PythonType::LIST: {
+ const auto& l = checked_cast<const ListArray&>(arr);
+ return DeserializeList(context, *l.values(), l.value_offset(index),
+ l.value_offset(index + 1), base, blobs, result);
+ }
+ case PythonType::DICT: {
+ const auto& l = checked_cast<const ListArray&>(arr);
+ return DeserializeDict(context, *l.values(), l.value_offset(index),
+ l.value_offset(index + 1), base, blobs, result);
+ }
+ case PythonType::TUPLE: {
+ const auto& l = checked_cast<const ListArray&>(arr);
+ return DeserializeTuple(context, *l.values(), l.value_offset(index),
+ l.value_offset(index + 1), base, blobs, result);
+ }
+ case PythonType::SET: {
+ const auto& l = checked_cast<const ListArray&>(arr);
+ return DeserializeSet(context, *l.values(), l.value_offset(index),
+ l.value_offset(index + 1), base, blobs, result);
+ }
+ case PythonType::TENSOR: {
+ int32_t ref = checked_cast<const Int32Array&>(arr).Value(index);
+ *result = wrap_tensor(blobs.tensors[ref]);
+ return Status::OK();
+ }
+ case PythonType::SPARSECOOTENSOR: {
+ int32_t ref = checked_cast<const Int32Array&>(arr).Value(index);
+ const std::shared_ptr<SparseCOOTensor>& sparse_coo_tensor =
+ arrow::internal::checked_pointer_cast<SparseCOOTensor>(
+ blobs.sparse_tensors[ref]);
+ *result = wrap_sparse_coo_tensor(sparse_coo_tensor);
+ return Status::OK();
+ }
+ case PythonType::SPARSECSRMATRIX: {
+ int32_t ref = checked_cast<const Int32Array&>(arr).Value(index);
+ const std::shared_ptr<SparseCSRMatrix>& sparse_csr_matrix =
+ arrow::internal::checked_pointer_cast<SparseCSRMatrix>(
+ blobs.sparse_tensors[ref]);
+ *result = wrap_sparse_csr_matrix(sparse_csr_matrix);
+ return Status::OK();
+ }
+ case PythonType::SPARSECSCMATRIX: {
+ int32_t ref = checked_cast<const Int32Array&>(arr).Value(index);
+ const std::shared_ptr<SparseCSCMatrix>& sparse_csc_matrix =
+ arrow::internal::checked_pointer_cast<SparseCSCMatrix>(
+ blobs.sparse_tensors[ref]);
+ *result = wrap_sparse_csc_matrix(sparse_csc_matrix);
+ return Status::OK();
+ }
+ case PythonType::SPARSECSFTENSOR: {
+ int32_t ref = checked_cast<const Int32Array&>(arr).Value(index);
+ const std::shared_ptr<SparseCSFTensor>& sparse_csf_tensor =
+ arrow::internal::checked_pointer_cast<SparseCSFTensor>(
+ blobs.sparse_tensors[ref]);
+ *result = wrap_sparse_csf_tensor(sparse_csf_tensor);
+ return Status::OK();
+ }
+ case PythonType::NDARRAY: {
+ int32_t ref = checked_cast<const Int32Array&>(arr).Value(index);
+ return DeserializeArray(ref, base, blobs, result);
+ }
+ case PythonType::BUFFER: {
+ int32_t ref = checked_cast<const Int32Array&>(arr).Value(index);
+ *result = wrap_buffer(blobs.buffers[ref]);
+ return Status::OK();
+ }
+ default: {
+ ARROW_CHECK(false) << "union tag " << type << "' not recognized";
+ }
+ }
+ return Status::OK();
+}
+
+Status GetPythonTypes(const UnionArray& data, std::vector<int8_t>* result) {
+ ARROW_CHECK(result != nullptr);
+ auto type = data.type();
+ for (int i = 0; i < type->num_fields(); ++i) {
+ int8_t tag = 0;
+ const std::string& data = type->field(i)->name();
+ if (!ParseValue<Int8Type>(data.c_str(), data.size(), &tag)) {
+ return Status::SerializationError("Cannot convert string: \"",
+ type->field(i)->name(), "\" to int8_t");
+ }
+ result->push_back(tag);
+ }
+ return Status::OK();
+}
+
+template <typename CreateSequenceFn, typename SetItemFn>
+Status DeserializeSequence(PyObject* context, const Array& array, int64_t start_idx,
+ int64_t stop_idx, PyObject* base,
+ const SerializedPyObject& blobs,
+ CreateSequenceFn&& create_sequence, SetItemFn&& set_item,
+ PyObject** out) {
+ const auto& data = checked_cast<const DenseUnionArray&>(array);
+ OwnedRef result(create_sequence(stop_idx - start_idx));
+ RETURN_IF_PYERROR();
+ const int8_t* type_codes = data.raw_type_codes();
+ const int32_t* value_offsets = data.raw_value_offsets();
+ std::vector<int8_t> python_types;
+ RETURN_NOT_OK(GetPythonTypes(data, &python_types));
+ for (int64_t i = start_idx; i < stop_idx; ++i) {
+ const int64_t offset = value_offsets[i];
+ const uint8_t type = type_codes[i];
+ PyObject* value;
+ RETURN_NOT_OK(GetValue(context, *data.field(type), offset, python_types[type], base,
+ blobs, &value));
+ RETURN_NOT_OK(set_item(result.obj(), i - start_idx, value));
+ }
+ *out = result.detach();
+ return Status::OK();
+}
+
+Status DeserializeList(PyObject* context, const Array& array, int64_t start_idx,
+ int64_t stop_idx, PyObject* base, const SerializedPyObject& blobs,
+ PyObject** out) {
+ return DeserializeSequence(
+ context, array, start_idx, stop_idx, base, blobs,
+ [](int64_t size) { return PyList_New(size); },
+ [](PyObject* seq, int64_t index, PyObject* item) {
+ PyList_SET_ITEM(seq, index, item);
+ return Status::OK();
+ },
+ out);
+}
+
+Status DeserializeTuple(PyObject* context, const Array& array, int64_t start_idx,
+ int64_t stop_idx, PyObject* base, const SerializedPyObject& blobs,
+ PyObject** out) {
+ return DeserializeSequence(
+ context, array, start_idx, stop_idx, base, blobs,
+ [](int64_t size) { return PyTuple_New(size); },
+ [](PyObject* seq, int64_t index, PyObject* item) {
+ PyTuple_SET_ITEM(seq, index, item);
+ return Status::OK();
+ },
+ out);
+}
+
+Status DeserializeSet(PyObject* context, const Array& array, int64_t start_idx,
+ int64_t stop_idx, PyObject* base, const SerializedPyObject& blobs,
+ PyObject** out) {
+ return DeserializeSequence(
+ context, array, start_idx, stop_idx, base, blobs,
+ [](int64_t size) { return PySet_New(nullptr); },
+ [](PyObject* seq, int64_t index, PyObject* item) {
+ int err = PySet_Add(seq, item);
+ Py_DECREF(item);
+ if (err < 0) {
+ RETURN_IF_PYERROR();
+ }
+ return Status::OK();
+ },
+ out);
+}
+
+Status ReadSerializedObject(io::RandomAccessFile* src, SerializedPyObject* out) {
+ int32_t num_tensors;
+ int32_t num_sparse_tensors;
+ int32_t num_ndarrays;
+ int32_t num_buffers;
+
+ // Read number of tensors
+ RETURN_NOT_OK(src->Read(sizeof(int32_t), reinterpret_cast<uint8_t*>(&num_tensors)));
+ RETURN_NOT_OK(
+ src->Read(sizeof(int32_t), reinterpret_cast<uint8_t*>(&num_sparse_tensors)));
+ RETURN_NOT_OK(src->Read(sizeof(int32_t), reinterpret_cast<uint8_t*>(&num_ndarrays)));
+ RETURN_NOT_OK(src->Read(sizeof(int32_t), reinterpret_cast<uint8_t*>(&num_buffers)));
+
+ // Align stream to 8-byte offset
+ RETURN_NOT_OK(ipc::AlignStream(src, ipc::kArrowIpcAlignment));
+ std::shared_ptr<RecordBatchReader> reader;
+ ARROW_ASSIGN_OR_RAISE(reader, ipc::RecordBatchStreamReader::Open(src));
+ RETURN_NOT_OK(reader->ReadNext(&out->batch));
+
+ /// Skip EOS marker
+ RETURN_NOT_OK(src->Advance(4));
+
+ /// Align stream so tensor bodies are 64-byte aligned
+ RETURN_NOT_OK(ipc::AlignStream(src, ipc::kTensorAlignment));
+
+ for (int i = 0; i < num_tensors; ++i) {
+ std::shared_ptr<Tensor> tensor;
+ ARROW_ASSIGN_OR_RAISE(tensor, ipc::ReadTensor(src));
+ RETURN_NOT_OK(ipc::AlignStream(src, ipc::kTensorAlignment));
+ out->tensors.push_back(tensor);
+ }
+
+ for (int i = 0; i < num_sparse_tensors; ++i) {
+ std::shared_ptr<SparseTensor> sparse_tensor;
+ ARROW_ASSIGN_OR_RAISE(sparse_tensor, ipc::ReadSparseTensor(src));
+ RETURN_NOT_OK(ipc::AlignStream(src, ipc::kTensorAlignment));
+ out->sparse_tensors.push_back(sparse_tensor);
+ }
+
+ for (int i = 0; i < num_ndarrays; ++i) {
+ std::shared_ptr<Tensor> ndarray;
+ ARROW_ASSIGN_OR_RAISE(ndarray, ipc::ReadTensor(src));
+ RETURN_NOT_OK(ipc::AlignStream(src, ipc::kTensorAlignment));
+ out->ndarrays.push_back(ndarray);
+ }
+
+ ARROW_ASSIGN_OR_RAISE(int64_t offset, src->Tell());
+ for (int i = 0; i < num_buffers; ++i) {
+ int64_t size;
+ RETURN_NOT_OK(src->ReadAt(offset, sizeof(int64_t), &size));
+ offset += sizeof(int64_t);
+ ARROW_ASSIGN_OR_RAISE(auto buffer, src->ReadAt(offset, size));
+ out->buffers.push_back(buffer);
+ offset += size;
+ }
+
+ return Status::OK();
+}
+
+Status DeserializeObject(PyObject* context, const SerializedPyObject& obj, PyObject* base,
+ PyObject** out) {
+ PyAcquireGIL lock;
+ return DeserializeList(context, *obj.batch->column(0), 0, obj.batch->num_rows(), base,
+ obj, out);
+}
+
+Status GetSerializedFromComponents(int num_tensors,
+ const SparseTensorCounts& num_sparse_tensors,
+ int num_ndarrays, int num_buffers, PyObject* data,
+ SerializedPyObject* out) {
+ PyAcquireGIL gil;
+ const Py_ssize_t data_length = PyList_Size(data);
+ RETURN_IF_PYERROR();
+
+ const Py_ssize_t expected_data_length = 1 + num_tensors * 2 +
+ num_sparse_tensors.num_total_buffers() +
+ num_ndarrays * 2 + num_buffers;
+ if (data_length != expected_data_length) {
+ return Status::Invalid("Invalid number of buffers in data");
+ }
+
+ auto GetBuffer = [&data](Py_ssize_t index, std::shared_ptr<Buffer>* out) {
+ ARROW_CHECK_LE(index, PyList_Size(data));
+ PyObject* py_buf = PyList_GET_ITEM(data, index);
+ return unwrap_buffer(py_buf).Value(out);
+ };
+
+ Py_ssize_t buffer_index = 0;
+
+ // Read the union batch describing object structure
+ {
+ std::shared_ptr<Buffer> data_buffer;
+ RETURN_NOT_OK(GetBuffer(buffer_index++, &data_buffer));
+ gil.release();
+ io::BufferReader buf_reader(data_buffer);
+ std::shared_ptr<RecordBatchReader> reader;
+ ARROW_ASSIGN_OR_RAISE(reader, ipc::RecordBatchStreamReader::Open(&buf_reader));
+ RETURN_NOT_OK(reader->ReadNext(&out->batch));
+ gil.acquire();
+ }
+
+ // Zero-copy reconstruct tensors
+ for (int i = 0; i < num_tensors; ++i) {
+ std::shared_ptr<Buffer> metadata;
+ std::shared_ptr<Buffer> body;
+ std::shared_ptr<Tensor> tensor;
+ RETURN_NOT_OK(GetBuffer(buffer_index++, &metadata));
+ RETURN_NOT_OK(GetBuffer(buffer_index++, &body));
+
+ ipc::Message message(metadata, body);
+
+ ARROW_ASSIGN_OR_RAISE(tensor, ipc::ReadTensor(message));
+ out->tensors.emplace_back(std::move(tensor));
+ }
+
+ // Zero-copy reconstruct sparse tensors
+ for (int i = 0, n = num_sparse_tensors.num_total_tensors(); i < n; ++i) {
+ ipc::IpcPayload payload;
+ RETURN_NOT_OK(GetBuffer(buffer_index++, &payload.metadata));
+
+ ARROW_ASSIGN_OR_RAISE(
+ size_t num_bodies,
+ ipc::internal::ReadSparseTensorBodyBufferCount(*payload.metadata));
+
+ payload.body_buffers.reserve(num_bodies);
+ for (size_t i = 0; i < num_bodies; ++i) {
+ std::shared_ptr<Buffer> body;
+ RETURN_NOT_OK(GetBuffer(buffer_index++, &body));
+ payload.body_buffers.emplace_back(body);
+ }
+
+ std::shared_ptr<SparseTensor> sparse_tensor;
+ ARROW_ASSIGN_OR_RAISE(sparse_tensor, ipc::internal::ReadSparseTensorPayload(payload));
+ out->sparse_tensors.emplace_back(std::move(sparse_tensor));
+ }
+
+ // Zero-copy reconstruct tensors for numpy ndarrays
+ for (int i = 0; i < num_ndarrays; ++i) {
+ std::shared_ptr<Buffer> metadata;
+ std::shared_ptr<Buffer> body;
+ std::shared_ptr<Tensor> tensor;
+ RETURN_NOT_OK(GetBuffer(buffer_index++, &metadata));
+ RETURN_NOT_OK(GetBuffer(buffer_index++, &body));
+
+ ipc::Message message(metadata, body);
+
+ ARROW_ASSIGN_OR_RAISE(tensor, ipc::ReadTensor(message));
+ out->ndarrays.emplace_back(std::move(tensor));
+ }
+
+ // Unwrap and append buffers
+ for (int i = 0; i < num_buffers; ++i) {
+ std::shared_ptr<Buffer> buffer;
+ RETURN_NOT_OK(GetBuffer(buffer_index++, &buffer));
+ out->buffers.emplace_back(std::move(buffer));
+ }
+
+ return Status::OK();
+}
+
+Status DeserializeNdarray(const SerializedPyObject& object,
+ std::shared_ptr<Tensor>* out) {
+ if (object.ndarrays.size() != 1) {
+ return Status::Invalid("Object is not an Ndarray");
+ }
+ *out = object.ndarrays[0];
+ return Status::OK();
+}
+
+Status NdarrayFromBuffer(std::shared_ptr<Buffer> src, std::shared_ptr<Tensor>* out) {
+ io::BufferReader reader(src);
+ SerializedPyObject object;
+ RETURN_NOT_OK(ReadSerializedObject(&reader, &object));
+ return DeserializeNdarray(object, out);
+}
+
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/deserialize.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/deserialize.h
new file mode 100644
index 0000000000..41b6a13a38
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/deserialize.h
@@ -0,0 +1,106 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+#include "arrow/python/serialize.h"
+#include "arrow/python/visibility.h"
+#include "arrow/status.h"
+
+namespace arrow {
+
+class RecordBatch;
+class Tensor;
+
+namespace io {
+
+class RandomAccessFile;
+
+} // namespace io
+
+namespace py {
+
+struct ARROW_PYTHON_EXPORT SparseTensorCounts {
+ int coo;
+ int csr;
+ int csc;
+ int csf;
+ int ndim_csf;
+
+ int num_total_tensors() const { return coo + csr + csc + csf; }
+ int num_total_buffers() const {
+ return coo * 3 + csr * 4 + csc * 4 + 2 * ndim_csf + csf;
+ }
+};
+
+/// \brief Read serialized Python sequence from file interface using Arrow IPC
+/// \param[in] src a RandomAccessFile
+/// \param[out] out the reconstructed data
+/// \return Status
+ARROW_PYTHON_EXPORT
+Status ReadSerializedObject(io::RandomAccessFile* src, SerializedPyObject* out);
+
+/// \brief Reconstruct SerializedPyObject from representation produced by
+/// SerializedPyObject::GetComponents.
+///
+/// \param[in] num_tensors number of tensors in the object
+/// \param[in] num_sparse_tensors number of sparse tensors in the object
+/// \param[in] num_ndarrays number of numpy Ndarrays in the object
+/// \param[in] num_buffers number of buffers in the object
+/// \param[in] data a list containing pyarrow.Buffer instances. It must be 1 +
+/// num_tensors * 2 + num_coo_tensors * 3 + num_csr_tensors * 4 + num_csc_tensors * 4 +
+/// num_csf_tensors * (2 * ndim_csf + 3) + num_buffers in length
+/// \param[out] out the reconstructed object
+/// \return Status
+ARROW_PYTHON_EXPORT
+Status GetSerializedFromComponents(int num_tensors,
+ const SparseTensorCounts& num_sparse_tensors,
+ int num_ndarrays, int num_buffers, PyObject* data,
+ SerializedPyObject* out);
+
+/// \brief Reconstruct Python object from Arrow-serialized representation
+/// \param[in] context Serialization context which contains custom serialization
+/// and deserialization callbacks. Can be any Python object with a
+/// _serialize_callback method for serialization and a _deserialize_callback
+/// method for deserialization. If context is None, no custom serialization
+/// will be attempted.
+/// \param[in] object Object to deserialize
+/// \param[in] base a Python object holding the underlying data that any NumPy
+/// arrays will reference, to avoid premature deallocation
+/// \param[out] out The returned object
+/// \return Status
+/// This acquires the GIL
+ARROW_PYTHON_EXPORT
+Status DeserializeObject(PyObject* context, const SerializedPyObject& object,
+ PyObject* base, PyObject** out);
+
+/// \brief Reconstruct Ndarray from Arrow-serialized representation
+/// \param[in] object Object to deserialize
+/// \param[out] out The deserialized tensor
+/// \return Status
+ARROW_PYTHON_EXPORT
+Status DeserializeNdarray(const SerializedPyObject& object, std::shared_ptr<Tensor>* out);
+
+ARROW_PYTHON_EXPORT
+Status NdarrayFromBuffer(std::shared_ptr<Buffer> src, std::shared_ptr<Tensor>* out);
+
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/extension_type.cc b/contrib/libs/apache/arrow/cpp/src/arrow/python/extension_type.cc
new file mode 100644
index 0000000000..3ccc171c87
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/extension_type.cc
@@ -0,0 +1,217 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <memory>
+#include <sstream>
+#include <utility>
+
+#include "arrow/python/extension_type.h"
+#include "arrow/python/helpers.h"
+#include "arrow/python/pyarrow.h"
+#include "arrow/util/checked_cast.h"
+#include "arrow/util/logging.h"
+
+namespace arrow {
+
+using internal::checked_cast;
+
+namespace py {
+
+namespace {
+
+// Serialize a Python ExtensionType instance
+Status SerializeExtInstance(PyObject* type_instance, std::string* out) {
+ OwnedRef res(
+ cpp_PyObject_CallMethod(type_instance, "__arrow_ext_serialize__", nullptr));
+ if (!res) {
+ return ConvertPyError();
+ }
+ if (!PyBytes_Check(res.obj())) {
+ return Status::TypeError(
+ "__arrow_ext_serialize__ should return bytes object, "
+ "got ",
+ internal::PyObject_StdStringRepr(res.obj()));
+ }
+ *out = internal::PyBytes_AsStdString(res.obj());
+ return Status::OK();
+}
+
+// Deserialize a Python ExtensionType instance
+PyObject* DeserializeExtInstance(PyObject* type_class,
+ std::shared_ptr<DataType> storage_type,
+ const std::string& serialized_data) {
+ OwnedRef storage_ref(wrap_data_type(storage_type));
+ if (!storage_ref) {
+ return nullptr;
+ }
+ OwnedRef data_ref(PyBytes_FromStringAndSize(
+ serialized_data.data(), static_cast<Py_ssize_t>(serialized_data.size())));
+ if (!data_ref) {
+ return nullptr;
+ }
+
+ return cpp_PyObject_CallMethod(type_class, "__arrow_ext_deserialize__", "OO",
+ storage_ref.obj(), data_ref.obj());
+}
+
+} // namespace
+
+static const char* kExtensionName = "arrow.py_extension_type";
+
+std::string PyExtensionType::ToString() const {
+ PyAcquireGIL lock;
+
+ std::stringstream ss;
+ OwnedRef instance(GetInstance());
+ ss << "extension<" << this->extension_name() << "<" << Py_TYPE(instance.obj())->tp_name
+ << ">>";
+ return ss.str();
+}
+
+PyExtensionType::PyExtensionType(std::shared_ptr<DataType> storage_type, PyObject* typ,
+ PyObject* inst)
+ : ExtensionType(storage_type),
+ extension_name_(kExtensionName),
+ type_class_(typ),
+ type_instance_(inst) {}
+
+PyExtensionType::PyExtensionType(std::shared_ptr<DataType> storage_type,
+ std::string extension_name, PyObject* typ,
+ PyObject* inst)
+ : ExtensionType(storage_type),
+ extension_name_(std::move(extension_name)),
+ type_class_(typ),
+ type_instance_(inst) {}
+
+bool PyExtensionType::ExtensionEquals(const ExtensionType& other) const {
+ PyAcquireGIL lock;
+
+ if (other.extension_name() != extension_name()) {
+ return false;
+ }
+ const auto& other_ext = checked_cast<const PyExtensionType&>(other);
+ int res = -1;
+ if (!type_instance_) {
+ if (other_ext.type_instance_) {
+ return false;
+ }
+ // Compare Python types
+ res = PyObject_RichCompareBool(type_class_.obj(), other_ext.type_class_.obj(), Py_EQ);
+ } else {
+ if (!other_ext.type_instance_) {
+ return false;
+ }
+ // Compare Python instances
+ OwnedRef left(GetInstance());
+ OwnedRef right(other_ext.GetInstance());
+ if (!left || !right) {
+ goto error;
+ }
+ res = PyObject_RichCompareBool(left.obj(), right.obj(), Py_EQ);
+ }
+ if (res == -1) {
+ goto error;
+ }
+ return res == 1;
+
+error:
+ // Cannot propagate error
+ PyErr_WriteUnraisable(nullptr);
+ return false;
+}
+
+std::shared_ptr<Array> PyExtensionType::MakeArray(std::shared_ptr<ArrayData> data) const {
+ DCHECK_EQ(data->type->id(), Type::EXTENSION);
+ return std::make_shared<ExtensionArray>(data);
+}
+
+std::string PyExtensionType::Serialize() const {
+ DCHECK(type_instance_);
+ return serialized_;
+}
+
+Result<std::shared_ptr<DataType>> PyExtensionType::Deserialize(
+ std::shared_ptr<DataType> storage_type, const std::string& serialized_data) const {
+ PyAcquireGIL lock;
+
+ if (import_pyarrow()) {
+ return ConvertPyError();
+ }
+ OwnedRef res(DeserializeExtInstance(type_class_.obj(), storage_type, serialized_data));
+ if (!res) {
+ return ConvertPyError();
+ }
+ return unwrap_data_type(res.obj());
+}
+
+PyObject* PyExtensionType::GetInstance() const {
+ if (!type_instance_) {
+ PyErr_SetString(PyExc_TypeError, "Not an instance");
+ return nullptr;
+ }
+ DCHECK(PyWeakref_CheckRef(type_instance_.obj()));
+ PyObject* inst = PyWeakref_GET_OBJECT(type_instance_.obj());
+ if (inst != Py_None) {
+ // Cached instance still alive
+ Py_INCREF(inst);
+ return inst;
+ } else {
+ // Must reconstruct from serialized form
+ // XXX cache again?
+ return DeserializeExtInstance(type_class_.obj(), storage_type_, serialized_);
+ }
+}
+
+Status PyExtensionType::SetInstance(PyObject* inst) const {
+ // Check we have the right type
+ PyObject* typ = reinterpret_cast<PyObject*>(Py_TYPE(inst));
+ if (typ != type_class_.obj()) {
+ return Status::TypeError("Unexpected Python ExtensionType class ",
+ internal::PyObject_StdStringRepr(typ), " expected ",
+ internal::PyObject_StdStringRepr(type_class_.obj()));
+ }
+
+ PyObject* wr = PyWeakref_NewRef(inst, nullptr);
+ if (wr == NULL) {
+ return ConvertPyError();
+ }
+ type_instance_.reset(wr);
+ return SerializeExtInstance(inst, &serialized_);
+}
+
+Status PyExtensionType::FromClass(const std::shared_ptr<DataType> storage_type,
+ const std::string extension_name, PyObject* typ,
+ std::shared_ptr<ExtensionType>* out) {
+ Py_INCREF(typ);
+ out->reset(new PyExtensionType(storage_type, std::move(extension_name), typ));
+ return Status::OK();
+}
+
+Status RegisterPyExtensionType(const std::shared_ptr<DataType>& type) {
+ DCHECK_EQ(type->id(), Type::EXTENSION);
+ auto ext_type = std::dynamic_pointer_cast<ExtensionType>(type);
+ return RegisterExtensionType(ext_type);
+}
+
+Status UnregisterPyExtensionType(const std::string& type_name) {
+ return UnregisterExtensionType(type_name);
+}
+
+std::string PyExtensionName() { return kExtensionName; }
+
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/extension_type.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/extension_type.h
new file mode 100644
index 0000000000..e433d9aca7
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/extension_type.h
@@ -0,0 +1,85 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include <memory>
+#include <string>
+
+#include "arrow/extension_type.h"
+#include "arrow/python/common.h"
+#include "arrow/python/visibility.h"
+#include "arrow/util/macros.h"
+
+namespace arrow {
+namespace py {
+
+class ARROW_PYTHON_EXPORT PyExtensionType : public ExtensionType {
+ public:
+ // Implement extensionType API
+ std::string extension_name() const override { return extension_name_; }
+
+ std::string ToString() const override;
+
+ bool ExtensionEquals(const ExtensionType& other) const override;
+
+ std::shared_ptr<Array> MakeArray(std::shared_ptr<ArrayData> data) const override;
+
+ Result<std::shared_ptr<DataType>> Deserialize(
+ std::shared_ptr<DataType> storage_type,
+ const std::string& serialized) const override;
+
+ std::string Serialize() const override;
+
+ // For use from Cython
+ // Assumes that `typ` is borrowed
+ static Status FromClass(const std::shared_ptr<DataType> storage_type,
+ const std::string extension_name, PyObject* typ,
+ std::shared_ptr<ExtensionType>* out);
+
+ // Return new ref
+ PyObject* GetInstance() const;
+ Status SetInstance(PyObject*) const;
+
+ protected:
+ PyExtensionType(std::shared_ptr<DataType> storage_type, PyObject* typ,
+ PyObject* inst = NULLPTR);
+ PyExtensionType(std::shared_ptr<DataType> storage_type, std::string extension_name,
+ PyObject* typ, PyObject* inst = NULLPTR);
+
+ std::string extension_name_;
+
+ // These fields are mutable because of two-step initialization.
+ mutable OwnedRefNoGIL type_class_;
+ // A weakref or null. Storing a strong reference to the Python extension type
+ // instance would create an unreclaimable reference cycle between Python and C++
+ // (the Python instance has to keep a strong reference to the C++ ExtensionType
+ // in other direction). Instead, we store a weakref to the instance.
+ // If the weakref is dead, we reconstruct the instance from its serialized form.
+ mutable OwnedRefNoGIL type_instance_;
+ // Empty if type_instance_ is null
+ mutable std::string serialized_;
+};
+
+ARROW_PYTHON_EXPORT std::string PyExtensionName();
+
+ARROW_PYTHON_EXPORT Status RegisterPyExtensionType(const std::shared_ptr<DataType>&);
+
+ARROW_PYTHON_EXPORT Status UnregisterPyExtensionType(const std::string& type_name);
+
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/filesystem.cc b/contrib/libs/apache/arrow/cpp/src/arrow/python/filesystem.cc
new file mode 100644
index 0000000000..8c12f05a0f
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/filesystem.cc
@@ -0,0 +1,206 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "arrow/python/filesystem.h"
+#include "arrow/util/logging.h"
+
+namespace arrow {
+
+using fs::FileInfo;
+using fs::FileSelector;
+
+namespace py {
+namespace fs {
+
+PyFileSystem::PyFileSystem(PyObject* handler, PyFileSystemVtable vtable)
+ : handler_(handler), vtable_(std::move(vtable)) {
+ Py_INCREF(handler);
+}
+
+PyFileSystem::~PyFileSystem() {}
+
+std::shared_ptr<PyFileSystem> PyFileSystem::Make(PyObject* handler,
+ PyFileSystemVtable vtable) {
+ return std::make_shared<PyFileSystem>(handler, std::move(vtable));
+}
+
+std::string PyFileSystem::type_name() const {
+ std::string result;
+ auto st = SafeCallIntoPython([&]() -> Status {
+ vtable_.get_type_name(handler_.obj(), &result);
+ if (PyErr_Occurred()) {
+ PyErr_WriteUnraisable(handler_.obj());
+ }
+ return Status::OK();
+ });
+ ARROW_UNUSED(st);
+ return result;
+}
+
+bool PyFileSystem::Equals(const FileSystem& other) const {
+ bool result;
+ auto st = SafeCallIntoPython([&]() -> Status {
+ result = vtable_.equals(handler_.obj(), other);
+ if (PyErr_Occurred()) {
+ PyErr_WriteUnraisable(handler_.obj());
+ }
+ return Status::OK();
+ });
+ ARROW_UNUSED(st);
+ return result;
+}
+
+Result<FileInfo> PyFileSystem::GetFileInfo(const std::string& path) {
+ FileInfo info;
+
+ auto st = SafeCallIntoPython([&]() -> Status {
+ vtable_.get_file_info(handler_.obj(), path, &info);
+ return CheckPyError();
+ });
+ RETURN_NOT_OK(st);
+ return info;
+}
+
+Result<std::vector<FileInfo>> PyFileSystem::GetFileInfo(
+ const std::vector<std::string>& paths) {
+ std::vector<FileInfo> infos;
+
+ auto st = SafeCallIntoPython([&]() -> Status {
+ vtable_.get_file_info_vector(handler_.obj(), paths, &infos);
+ return CheckPyError();
+ });
+ RETURN_NOT_OK(st);
+ return infos;
+}
+
+Result<std::vector<FileInfo>> PyFileSystem::GetFileInfo(const FileSelector& select) {
+ std::vector<FileInfo> infos;
+
+ auto st = SafeCallIntoPython([&]() -> Status {
+ vtable_.get_file_info_selector(handler_.obj(), select, &infos);
+ return CheckPyError();
+ });
+ RETURN_NOT_OK(st);
+ return infos;
+}
+
+Status PyFileSystem::CreateDir(const std::string& path, bool recursive) {
+ return SafeCallIntoPython([&]() -> Status {
+ vtable_.create_dir(handler_.obj(), path, recursive);
+ return CheckPyError();
+ });
+}
+
+Status PyFileSystem::DeleteDir(const std::string& path) {
+ return SafeCallIntoPython([&]() -> Status {
+ vtable_.delete_dir(handler_.obj(), path);
+ return CheckPyError();
+ });
+}
+
+Status PyFileSystem::DeleteDirContents(const std::string& path) {
+ return SafeCallIntoPython([&]() -> Status {
+ vtable_.delete_dir_contents(handler_.obj(), path);
+ return CheckPyError();
+ });
+}
+
+Status PyFileSystem::DeleteRootDirContents() {
+ return SafeCallIntoPython([&]() -> Status {
+ vtable_.delete_root_dir_contents(handler_.obj());
+ return CheckPyError();
+ });
+}
+
+Status PyFileSystem::DeleteFile(const std::string& path) {
+ return SafeCallIntoPython([&]() -> Status {
+ vtable_.delete_file(handler_.obj(), path);
+ return CheckPyError();
+ });
+}
+
+Status PyFileSystem::Move(const std::string& src, const std::string& dest) {
+ return SafeCallIntoPython([&]() -> Status {
+ vtable_.move(handler_.obj(), src, dest);
+ return CheckPyError();
+ });
+}
+
+Status PyFileSystem::CopyFile(const std::string& src, const std::string& dest) {
+ return SafeCallIntoPython([&]() -> Status {
+ vtable_.copy_file(handler_.obj(), src, dest);
+ return CheckPyError();
+ });
+}
+
+Result<std::shared_ptr<io::InputStream>> PyFileSystem::OpenInputStream(
+ const std::string& path) {
+ std::shared_ptr<io::InputStream> stream;
+ auto st = SafeCallIntoPython([&]() -> Status {
+ vtable_.open_input_stream(handler_.obj(), path, &stream);
+ return CheckPyError();
+ });
+ RETURN_NOT_OK(st);
+ return stream;
+}
+
+Result<std::shared_ptr<io::RandomAccessFile>> PyFileSystem::OpenInputFile(
+ const std::string& path) {
+ std::shared_ptr<io::RandomAccessFile> stream;
+ auto st = SafeCallIntoPython([&]() -> Status {
+ vtable_.open_input_file(handler_.obj(), path, &stream);
+ return CheckPyError();
+ });
+ RETURN_NOT_OK(st);
+ return stream;
+}
+
+Result<std::shared_ptr<io::OutputStream>> PyFileSystem::OpenOutputStream(
+ const std::string& path, const std::shared_ptr<const KeyValueMetadata>& metadata) {
+ std::shared_ptr<io::OutputStream> stream;
+ auto st = SafeCallIntoPython([&]() -> Status {
+ vtable_.open_output_stream(handler_.obj(), path, metadata, &stream);
+ return CheckPyError();
+ });
+ RETURN_NOT_OK(st);
+ return stream;
+}
+
+Result<std::shared_ptr<io::OutputStream>> PyFileSystem::OpenAppendStream(
+ const std::string& path, const std::shared_ptr<const KeyValueMetadata>& metadata) {
+ std::shared_ptr<io::OutputStream> stream;
+ auto st = SafeCallIntoPython([&]() -> Status {
+ vtable_.open_append_stream(handler_.obj(), path, metadata, &stream);
+ return CheckPyError();
+ });
+ RETURN_NOT_OK(st);
+ return stream;
+}
+
+Result<std::string> PyFileSystem::NormalizePath(std::string path) {
+ std::string normalized;
+ auto st = SafeCallIntoPython([&]() -> Status {
+ vtable_.normalize_path(handler_.obj(), path, &normalized);
+ return CheckPyError();
+ });
+ RETURN_NOT_OK(st);
+ return normalized;
+}
+
+} // namespace fs
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/filesystem.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/filesystem.h
new file mode 100644
index 0000000000..e1235f8de5
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/filesystem.h
@@ -0,0 +1,126 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "arrow/filesystem/filesystem.h"
+#include "arrow/python/common.h"
+#include "arrow/python/visibility.h"
+#include "arrow/util/macros.h"
+
+namespace arrow {
+namespace py {
+namespace fs {
+
+class ARROW_PYTHON_EXPORT PyFileSystemVtable {
+ public:
+ std::function<void(PyObject*, std::string* out)> get_type_name;
+ std::function<bool(PyObject*, const arrow::fs::FileSystem& other)> equals;
+
+ std::function<void(PyObject*, const std::string& path, arrow::fs::FileInfo* out)>
+ get_file_info;
+ std::function<void(PyObject*, const std::vector<std::string>& paths,
+ std::vector<arrow::fs::FileInfo>* out)>
+ get_file_info_vector;
+ std::function<void(PyObject*, const arrow::fs::FileSelector&,
+ std::vector<arrow::fs::FileInfo>* out)>
+ get_file_info_selector;
+
+ std::function<void(PyObject*, const std::string& path, bool)> create_dir;
+ std::function<void(PyObject*, const std::string& path)> delete_dir;
+ std::function<void(PyObject*, const std::string& path)> delete_dir_contents;
+ std::function<void(PyObject*)> delete_root_dir_contents;
+ std::function<void(PyObject*, const std::string& path)> delete_file;
+ std::function<void(PyObject*, const std::string& src, const std::string& dest)> move;
+ std::function<void(PyObject*, const std::string& src, const std::string& dest)>
+ copy_file;
+
+ std::function<void(PyObject*, const std::string& path,
+ std::shared_ptr<io::InputStream>* out)>
+ open_input_stream;
+ std::function<void(PyObject*, const std::string& path,
+ std::shared_ptr<io::RandomAccessFile>* out)>
+ open_input_file;
+ std::function<void(PyObject*, const std::string& path,
+ const std::shared_ptr<const KeyValueMetadata>&,
+ std::shared_ptr<io::OutputStream>* out)>
+ open_output_stream;
+ std::function<void(PyObject*, const std::string& path,
+ const std::shared_ptr<const KeyValueMetadata>&,
+ std::shared_ptr<io::OutputStream>* out)>
+ open_append_stream;
+
+ std::function<void(PyObject*, const std::string& path, std::string* out)>
+ normalize_path;
+};
+
+class ARROW_PYTHON_EXPORT PyFileSystem : public arrow::fs::FileSystem {
+ public:
+ PyFileSystem(PyObject* handler, PyFileSystemVtable vtable);
+ ~PyFileSystem() override;
+
+ static std::shared_ptr<PyFileSystem> Make(PyObject* handler, PyFileSystemVtable vtable);
+
+ std::string type_name() const override;
+
+ bool Equals(const FileSystem& other) const override;
+
+ Result<arrow::fs::FileInfo> GetFileInfo(const std::string& path) override;
+ Result<std::vector<arrow::fs::FileInfo>> GetFileInfo(
+ const std::vector<std::string>& paths) override;
+ Result<std::vector<arrow::fs::FileInfo>> GetFileInfo(
+ const arrow::fs::FileSelector& select) override;
+
+ Status CreateDir(const std::string& path, bool recursive = true) override;
+
+ Status DeleteDir(const std::string& path) override;
+ Status DeleteDirContents(const std::string& path) override;
+ Status DeleteRootDirContents() override;
+
+ Status DeleteFile(const std::string& path) override;
+
+ Status Move(const std::string& src, const std::string& dest) override;
+
+ Status CopyFile(const std::string& src, const std::string& dest) override;
+
+ Result<std::shared_ptr<io::InputStream>> OpenInputStream(
+ const std::string& path) override;
+ Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
+ const std::string& path) override;
+ Result<std::shared_ptr<io::OutputStream>> OpenOutputStream(
+ const std::string& path,
+ const std::shared_ptr<const KeyValueMetadata>& metadata = {}) override;
+ Result<std::shared_ptr<io::OutputStream>> OpenAppendStream(
+ const std::string& path,
+ const std::shared_ptr<const KeyValueMetadata>& metadata = {}) override;
+
+ Result<std::string> NormalizePath(std::string path) override;
+
+ PyObject* handler() const { return handler_.obj(); }
+
+ private:
+ OwnedRefNoGIL handler_;
+ PyFileSystemVtable vtable_;
+};
+
+} // namespace fs
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/helpers.cc b/contrib/libs/apache/arrow/cpp/src/arrow/python/helpers.cc
new file mode 100644
index 0000000000..75a77c640b
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/helpers.cc
@@ -0,0 +1,436 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// helpers.h includes a NumPy header, so we include this first
+#include "arrow/python/numpy_interop.h"
+
+#include "arrow/python/helpers.h"
+
+#include <cmath>
+#include <limits>
+#include <sstream>
+#include <type_traits>
+
+#include "arrow/python/common.h"
+#include "arrow/python/decimal.h"
+#include "arrow/type_fwd.h"
+#include "arrow/util/checked_cast.h"
+#include "arrow/util/logging.h"
+
+namespace arrow {
+
+using internal::checked_cast;
+
+namespace py {
+
+#define GET_PRIMITIVE_TYPE(NAME, FACTORY) \
+ case Type::NAME: \
+ return FACTORY()
+
+std::shared_ptr<DataType> GetPrimitiveType(Type::type type) {
+ switch (type) {
+ case Type::NA:
+ return null();
+ GET_PRIMITIVE_TYPE(UINT8, uint8);
+ GET_PRIMITIVE_TYPE(INT8, int8);
+ GET_PRIMITIVE_TYPE(UINT16, uint16);
+ GET_PRIMITIVE_TYPE(INT16, int16);
+ GET_PRIMITIVE_TYPE(UINT32, uint32);
+ GET_PRIMITIVE_TYPE(INT32, int32);
+ GET_PRIMITIVE_TYPE(UINT64, uint64);
+ GET_PRIMITIVE_TYPE(INT64, int64);
+ GET_PRIMITIVE_TYPE(DATE32, date32);
+ GET_PRIMITIVE_TYPE(DATE64, date64);
+ GET_PRIMITIVE_TYPE(BOOL, boolean);
+ GET_PRIMITIVE_TYPE(HALF_FLOAT, float16);
+ GET_PRIMITIVE_TYPE(FLOAT, float32);
+ GET_PRIMITIVE_TYPE(DOUBLE, float64);
+ GET_PRIMITIVE_TYPE(BINARY, binary);
+ GET_PRIMITIVE_TYPE(STRING, utf8);
+ GET_PRIMITIVE_TYPE(LARGE_BINARY, large_binary);
+ GET_PRIMITIVE_TYPE(LARGE_STRING, large_utf8);
+ default:
+ return nullptr;
+ }
+}
+
+PyObject* PyHalf_FromHalf(npy_half value) {
+ PyObject* result = PyArrayScalar_New(Half);
+ if (result != NULL) {
+ PyArrayScalar_ASSIGN(result, Half, value);
+ }
+ return result;
+}
+
+Status PyFloat_AsHalf(PyObject* obj, npy_half* out) {
+ if (PyArray_IsScalar(obj, Half)) {
+ *out = PyArrayScalar_VAL(obj, Half);
+ return Status::OK();
+ } else {
+ // XXX: cannot use npy_double_to_half() without linking with Numpy
+ return Status::TypeError("Expected np.float16 instance");
+ }
+}
+
+namespace internal {
+
+std::string PyBytes_AsStdString(PyObject* obj) {
+ DCHECK(PyBytes_Check(obj));
+ return std::string(PyBytes_AS_STRING(obj), PyBytes_GET_SIZE(obj));
+}
+
+Status PyUnicode_AsStdString(PyObject* obj, std::string* out) {
+ DCHECK(PyUnicode_Check(obj));
+ Py_ssize_t size;
+ // The utf-8 representation is cached on the unicode object
+ const char* data = PyUnicode_AsUTF8AndSize(obj, &size);
+ RETURN_IF_PYERROR();
+ *out = std::string(data, size);
+ return Status::OK();
+}
+
+std::string PyObject_StdStringRepr(PyObject* obj) {
+ OwnedRef unicode_ref(PyObject_Repr(obj));
+ OwnedRef bytes_ref;
+
+ if (unicode_ref) {
+ bytes_ref.reset(
+ PyUnicode_AsEncodedString(unicode_ref.obj(), "utf8", "backslashreplace"));
+ }
+ if (!bytes_ref) {
+ PyErr_Clear();
+ std::stringstream ss;
+ ss << "<object of type '" << Py_TYPE(obj)->tp_name << "' repr() failed>";
+ return ss.str();
+ }
+ return PyBytes_AsStdString(bytes_ref.obj());
+}
+
+Status PyObject_StdStringStr(PyObject* obj, std::string* out) {
+ OwnedRef string_ref(PyObject_Str(obj));
+ RETURN_IF_PYERROR();
+ return PyUnicode_AsStdString(string_ref.obj(), out);
+}
+
+Result<bool> IsModuleImported(const std::string& module_name) {
+ // PyImport_GetModuleDict returns with a borrowed reference
+ OwnedRef key(PyUnicode_FromString(module_name.c_str()));
+ auto is_imported = PyDict_Contains(PyImport_GetModuleDict(), key.obj());
+ RETURN_IF_PYERROR();
+ return is_imported;
+}
+
+Status ImportModule(const std::string& module_name, OwnedRef* ref) {
+ PyObject* module = PyImport_ImportModule(module_name.c_str());
+ RETURN_IF_PYERROR();
+ ref->reset(module);
+ return Status::OK();
+}
+
+Status ImportFromModule(PyObject* module, const std::string& name, OwnedRef* ref) {
+ PyObject* attr = PyObject_GetAttrString(module, name.c_str());
+ RETURN_IF_PYERROR();
+ ref->reset(attr);
+ return Status::OK();
+}
+
+namespace {
+
+Status IntegerOverflowStatus(PyObject* obj, const std::string& overflow_message) {
+ if (overflow_message.empty()) {
+ std::string obj_as_stdstring;
+ RETURN_NOT_OK(PyObject_StdStringStr(obj, &obj_as_stdstring));
+ return Status::Invalid("Value ", obj_as_stdstring,
+ " too large to fit in C integer type");
+ } else {
+ return Status::Invalid(overflow_message);
+ }
+}
+
+// Extract C signed int from Python object
+template <typename Int, enable_if_t<std::is_signed<Int>::value, Int> = 0>
+Status CIntFromPythonImpl(PyObject* obj, Int* out, const std::string& overflow_message) {
+ static_assert(sizeof(Int) <= sizeof(long long), // NOLINT
+ "integer type larger than long long");
+
+ if (sizeof(Int) > sizeof(long)) { // NOLINT
+ const auto value = PyLong_AsLongLong(obj);
+ if (ARROW_PREDICT_FALSE(value == -1)) {
+ RETURN_IF_PYERROR();
+ }
+ if (ARROW_PREDICT_FALSE(value < std::numeric_limits<Int>::min() ||
+ value > std::numeric_limits<Int>::max())) {
+ return IntegerOverflowStatus(obj, overflow_message);
+ }
+ *out = static_cast<Int>(value);
+ } else {
+ const auto value = PyLong_AsLong(obj);
+ if (ARROW_PREDICT_FALSE(value == -1)) {
+ RETURN_IF_PYERROR();
+ }
+ if (ARROW_PREDICT_FALSE(value < std::numeric_limits<Int>::min() ||
+ value > std::numeric_limits<Int>::max())) {
+ return IntegerOverflowStatus(obj, overflow_message);
+ }
+ *out = static_cast<Int>(value);
+ }
+ return Status::OK();
+}
+
+// Extract C unsigned int from Python object
+template <typename Int, enable_if_t<std::is_unsigned<Int>::value, Int> = 0>
+Status CIntFromPythonImpl(PyObject* obj, Int* out, const std::string& overflow_message) {
+ static_assert(sizeof(Int) <= sizeof(unsigned long long), // NOLINT
+ "integer type larger than unsigned long long");
+
+ OwnedRef ref;
+ // PyLong_AsUnsignedLong() and PyLong_AsUnsignedLongLong() don't handle
+ // conversion from non-ints (e.g. np.uint64), so do it ourselves
+ if (!PyLong_Check(obj)) {
+ ref.reset(PyNumber_Index(obj));
+ if (!ref) {
+ RETURN_IF_PYERROR();
+ }
+ obj = ref.obj();
+ }
+ if (sizeof(Int) > sizeof(unsigned long)) { // NOLINT
+ const auto value = PyLong_AsUnsignedLongLong(obj);
+ if (ARROW_PREDICT_FALSE(value == static_cast<decltype(value)>(-1))) {
+ RETURN_IF_PYERROR();
+ }
+ if (ARROW_PREDICT_FALSE(value > std::numeric_limits<Int>::max())) {
+ return IntegerOverflowStatus(obj, overflow_message);
+ }
+ *out = static_cast<Int>(value);
+ } else {
+ const auto value = PyLong_AsUnsignedLong(obj);
+ if (ARROW_PREDICT_FALSE(value == static_cast<decltype(value)>(-1))) {
+ RETURN_IF_PYERROR();
+ }
+ if (ARROW_PREDICT_FALSE(value > std::numeric_limits<Int>::max())) {
+ return IntegerOverflowStatus(obj, overflow_message);
+ }
+ *out = static_cast<Int>(value);
+ }
+ return Status::OK();
+}
+
+} // namespace
+
+template <typename Int>
+Status CIntFromPython(PyObject* obj, Int* out, const std::string& overflow_message) {
+ if (PyBool_Check(obj)) {
+ return Status::TypeError("Expected integer, got bool");
+ }
+ return CIntFromPythonImpl(obj, out, overflow_message);
+}
+
+template Status CIntFromPython(PyObject*, int8_t*, const std::string&);
+template Status CIntFromPython(PyObject*, int16_t*, const std::string&);
+template Status CIntFromPython(PyObject*, int32_t*, const std::string&);
+template Status CIntFromPython(PyObject*, int64_t*, const std::string&);
+template Status CIntFromPython(PyObject*, uint8_t*, const std::string&);
+template Status CIntFromPython(PyObject*, uint16_t*, const std::string&);
+template Status CIntFromPython(PyObject*, uint32_t*, const std::string&);
+template Status CIntFromPython(PyObject*, uint64_t*, const std::string&);
+
+inline bool MayHaveNaN(PyObject* obj) {
+ // Some core types can be very quickly type-checked and do not allow NaN values
+ const int64_t non_nan_tpflags = Py_TPFLAGS_LONG_SUBCLASS | Py_TPFLAGS_LIST_SUBCLASS |
+ Py_TPFLAGS_TUPLE_SUBCLASS | Py_TPFLAGS_BYTES_SUBCLASS |
+ Py_TPFLAGS_UNICODE_SUBCLASS | Py_TPFLAGS_DICT_SUBCLASS |
+ Py_TPFLAGS_BASE_EXC_SUBCLASS | Py_TPFLAGS_TYPE_SUBCLASS;
+ return !PyType_HasFeature(Py_TYPE(obj), non_nan_tpflags);
+}
+
+bool PyFloat_IsNaN(PyObject* obj) {
+ return PyFloat_Check(obj) && std::isnan(PyFloat_AsDouble(obj));
+}
+
+namespace {
+
+static bool pandas_static_initialized = false;
+
+// Once initialized, these variables hold borrowed references to Pandas static data.
+// We should not use OwnedRef here because Python destructors would be
+// called on a finalized interpreter.
+static PyObject* pandas_NA = nullptr;
+static PyObject* pandas_NaT = nullptr;
+static PyObject* pandas_Timedelta = nullptr;
+static PyObject* pandas_Timestamp = nullptr;
+static PyTypeObject* pandas_NaTType = nullptr;
+
+} // namespace
+
+void InitPandasStaticData() {
+ // NOTE: This is called with the GIL held. We needn't (and shouldn't,
+ // to avoid deadlocks) use an additional C++ lock (ARROW-10519).
+ if (pandas_static_initialized) {
+ return;
+ }
+
+ OwnedRef pandas;
+
+ // Import pandas
+ Status s = ImportModule("pandas", &pandas);
+ if (!s.ok()) {
+ return;
+ }
+
+ // Since ImportModule can release the GIL, another thread could have
+ // already initialized the static data.
+ if (pandas_static_initialized) {
+ return;
+ }
+ OwnedRef ref;
+
+ // set NaT sentinel and its type
+ if (ImportFromModule(pandas.obj(), "NaT", &ref).ok()) {
+ pandas_NaT = ref.obj();
+ // PyObject_Type returns a new reference but we trust that pandas.NaT will
+ // outlive our use of this PyObject*
+ pandas_NaTType = Py_TYPE(ref.obj());
+ }
+
+ // retain a reference to Timedelta
+ if (ImportFromModule(pandas.obj(), "Timedelta", &ref).ok()) {
+ pandas_Timedelta = ref.obj();
+ }
+
+ // retain a reference to Timestamp
+ if (ImportFromModule(pandas.obj(), "Timestamp", &ref).ok()) {
+ pandas_Timestamp = ref.obj();
+ }
+
+ // if pandas.NA exists, retain a reference to it
+ if (ImportFromModule(pandas.obj(), "NA", &ref).ok()) {
+ pandas_NA = ref.obj();
+ }
+
+ pandas_static_initialized = true;
+}
+
+bool PandasObjectIsNull(PyObject* obj) {
+ if (!MayHaveNaN(obj)) {
+ return false;
+ }
+ if (obj == Py_None) {
+ return true;
+ }
+ if (PyFloat_IsNaN(obj) || (pandas_NA && obj == pandas_NA) ||
+ (pandas_NaTType && PyObject_TypeCheck(obj, pandas_NaTType)) ||
+ (internal::PyDecimal_Check(obj) && internal::PyDecimal_ISNAN(obj))) {
+ return true;
+ }
+ return false;
+}
+
+bool IsPandasTimedelta(PyObject* obj) {
+ return pandas_Timedelta && PyObject_IsInstance(obj, pandas_Timedelta);
+}
+
+bool IsPandasTimestamp(PyObject* obj) {
+ return pandas_Timestamp && PyObject_IsInstance(obj, pandas_Timestamp);
+}
+
+Status InvalidValue(PyObject* obj, const std::string& why) {
+ auto obj_as_str = PyObject_StdStringRepr(obj);
+ return Status::Invalid("Could not convert ", std::move(obj_as_str), " with type ",
+ Py_TYPE(obj)->tp_name, ": ", why);
+}
+
+Status InvalidType(PyObject* obj, const std::string& why) {
+ auto obj_as_str = PyObject_StdStringRepr(obj);
+ return Status::TypeError("Could not convert ", std::move(obj_as_str), " with type ",
+ Py_TYPE(obj)->tp_name, ": ", why);
+}
+
+Status UnboxIntegerAsInt64(PyObject* obj, int64_t* out) {
+ if (PyLong_Check(obj)) {
+ int overflow = 0;
+ *out = PyLong_AsLongLongAndOverflow(obj, &overflow);
+ if (overflow) {
+ return Status::Invalid("PyLong is too large to fit int64");
+ }
+ } else if (PyArray_IsScalar(obj, Byte)) {
+ *out = reinterpret_cast<PyByteScalarObject*>(obj)->obval;
+ } else if (PyArray_IsScalar(obj, UByte)) {
+ *out = reinterpret_cast<PyUByteScalarObject*>(obj)->obval;
+ } else if (PyArray_IsScalar(obj, Short)) {
+ *out = reinterpret_cast<PyShortScalarObject*>(obj)->obval;
+ } else if (PyArray_IsScalar(obj, UShort)) {
+ *out = reinterpret_cast<PyUShortScalarObject*>(obj)->obval;
+ } else if (PyArray_IsScalar(obj, Int)) {
+ *out = reinterpret_cast<PyIntScalarObject*>(obj)->obval;
+ } else if (PyArray_IsScalar(obj, UInt)) {
+ *out = reinterpret_cast<PyUIntScalarObject*>(obj)->obval;
+ } else if (PyArray_IsScalar(obj, Long)) {
+ *out = reinterpret_cast<PyLongScalarObject*>(obj)->obval;
+ } else if (PyArray_IsScalar(obj, ULong)) {
+ *out = reinterpret_cast<PyULongScalarObject*>(obj)->obval;
+ } else if (PyArray_IsScalar(obj, LongLong)) {
+ *out = reinterpret_cast<PyLongLongScalarObject*>(obj)->obval;
+ } else if (PyArray_IsScalar(obj, Int64)) {
+ *out = reinterpret_cast<PyInt64ScalarObject*>(obj)->obval;
+ } else if (PyArray_IsScalar(obj, ULongLong)) {
+ *out = reinterpret_cast<PyULongLongScalarObject*>(obj)->obval;
+ } else if (PyArray_IsScalar(obj, UInt64)) {
+ *out = reinterpret_cast<PyUInt64ScalarObject*>(obj)->obval;
+ } else {
+ return Status::Invalid("Integer scalar type not recognized");
+ }
+ return Status::OK();
+}
+
+Status IntegerScalarToDoubleSafe(PyObject* obj, double* out) {
+ int64_t value = 0;
+ RETURN_NOT_OK(UnboxIntegerAsInt64(obj, &value));
+
+ constexpr int64_t kDoubleMax = 1LL << 53;
+ constexpr int64_t kDoubleMin = -(1LL << 53);
+
+ if (value < kDoubleMin || value > kDoubleMax) {
+ return Status::Invalid("Integer value ", value, " is outside of the range exactly",
+ " representable by a IEEE 754 double precision value");
+ }
+ *out = static_cast<double>(value);
+ return Status::OK();
+}
+
+Status IntegerScalarToFloat32Safe(PyObject* obj, float* out) {
+ int64_t value = 0;
+ RETURN_NOT_OK(UnboxIntegerAsInt64(obj, &value));
+
+ constexpr int64_t kFloatMax = 1LL << 24;
+ constexpr int64_t kFloatMin = -(1LL << 24);
+
+ if (value < kFloatMin || value > kFloatMax) {
+ return Status::Invalid("Integer value ", value, " is outside of the range exactly",
+ " representable by a IEEE 754 single precision value");
+ }
+ *out = static_cast<float>(value);
+ return Status::OK();
+}
+
+void DebugPrint(PyObject* obj) {
+ std::string repr = PyObject_StdStringRepr(obj);
+ PySys_WriteStderr("%s\n", repr.c_str());
+}
+
+} // namespace internal
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/helpers.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/helpers.h
new file mode 100644
index 0000000000..19288756c0
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/helpers.h
@@ -0,0 +1,156 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include "arrow/python/platform.h"
+
+#include <limits>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "arrow/python/numpy_interop.h"
+
+#include <numpy/halffloat.h>
+
+#include "arrow/python/visibility.h"
+#include "arrow/type.h"
+#include "arrow/util/macros.h"
+
+namespace arrow {
+
+namespace py {
+
+class OwnedRef;
+
+// \brief Get an arrow DataType instance from Arrow's Type::type enum
+// \param[in] type One of the values of Arrow's Type::type enum
+// \return A shared pointer to DataType
+ARROW_PYTHON_EXPORT std::shared_ptr<DataType> GetPrimitiveType(Type::type type);
+
+// \brief Construct a np.float16 object from a npy_half value.
+ARROW_PYTHON_EXPORT PyObject* PyHalf_FromHalf(npy_half value);
+
+// \brief Convert a Python object to a npy_half value.
+ARROW_PYTHON_EXPORT Status PyFloat_AsHalf(PyObject* obj, npy_half* out);
+
+namespace internal {
+
+// \brief Check that a Python module has been already imported
+// \param[in] module_name The name of the module
+Result<bool> IsModuleImported(const std::string& module_name);
+
+// \brief Import a Python module
+// \param[in] module_name The name of the module
+// \param[out] ref The OwnedRef containing the module PyObject*
+ARROW_PYTHON_EXPORT
+Status ImportModule(const std::string& module_name, OwnedRef* ref);
+
+// \brief Import an object from a Python module
+// \param[in] module A Python module
+// \param[in] name The name of the object to import
+// \param[out] ref The OwnedRef containing the \c name attribute of the Python module \c
+// module
+ARROW_PYTHON_EXPORT
+Status ImportFromModule(PyObject* module, const std::string& name, OwnedRef* ref);
+
+// \brief Check whether obj is an integer, independent of Python versions.
+inline bool IsPyInteger(PyObject* obj) { return PyLong_Check(obj); }
+
+// \brief Import symbols from pandas that we need for various type-checking,
+// like pandas.NaT or pandas.NA
+void InitPandasStaticData();
+
+// \brief Use pandas missing value semantics to check if a value is null
+ARROW_PYTHON_EXPORT
+bool PandasObjectIsNull(PyObject* obj);
+
+// \brief Check that obj is a pandas.Timedelta instance
+ARROW_PYTHON_EXPORT
+bool IsPandasTimedelta(PyObject* obj);
+
+// \brief Check that obj is a pandas.Timestamp instance
+bool IsPandasTimestamp(PyObject* obj);
+
+// \brief Check whether obj is a floating-point NaN
+ARROW_PYTHON_EXPORT
+bool PyFloat_IsNaN(PyObject* obj);
+
+inline bool IsPyBinary(PyObject* obj) {
+ return PyBytes_Check(obj) || PyByteArray_Check(obj) || PyMemoryView_Check(obj);
+}
+
+// \brief Convert a Python integer into a C integer
+// \param[in] obj A Python integer
+// \param[out] out A pointer to a C integer to hold the result of the conversion
+// \return The status of the operation
+template <typename Int>
+Status CIntFromPython(PyObject* obj, Int* out, const std::string& overflow_message = "");
+
+// \brief Convert a Python unicode string to a std::string
+ARROW_PYTHON_EXPORT
+Status PyUnicode_AsStdString(PyObject* obj, std::string* out);
+
+// \brief Convert a Python bytes object to a std::string
+ARROW_PYTHON_EXPORT
+std::string PyBytes_AsStdString(PyObject* obj);
+
+// \brief Call str() on the given object and return the result as a std::string
+ARROW_PYTHON_EXPORT
+Status PyObject_StdStringStr(PyObject* obj, std::string* out);
+
+// \brief Return the repr() of the given object (always succeeds)
+ARROW_PYTHON_EXPORT
+std::string PyObject_StdStringRepr(PyObject* obj);
+
+// \brief Cast the given size to int32_t, with error checking
+inline Status CastSize(Py_ssize_t size, int32_t* out,
+ const char* error_msg = "Maximum size exceeded (2GB)") {
+ // size is assumed to be positive
+ if (size > std::numeric_limits<int32_t>::max()) {
+ return Status::Invalid(error_msg);
+ }
+ *out = static_cast<int32_t>(size);
+ return Status::OK();
+}
+
+inline Status CastSize(Py_ssize_t size, int64_t* out, const char* error_msg = NULLPTR) {
+ // size is assumed to be positive
+ *out = static_cast<int64_t>(size);
+ return Status::OK();
+}
+
+// \brief Print the Python object's __str__ form along with the passed error
+// message
+ARROW_PYTHON_EXPORT
+Status InvalidValue(PyObject* obj, const std::string& why);
+
+ARROW_PYTHON_EXPORT
+Status InvalidType(PyObject* obj, const std::string& why);
+
+ARROW_PYTHON_EXPORT
+Status IntegerScalarToDoubleSafe(PyObject* obj, double* result);
+ARROW_PYTHON_EXPORT
+Status IntegerScalarToFloat32Safe(PyObject* obj, float* result);
+
+// \brief Print Python object __repr__
+void DebugPrint(PyObject* obj);
+
+} // namespace internal
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/inference.cc b/contrib/libs/apache/arrow/cpp/src/arrow/python/inference.cc
new file mode 100644
index 0000000000..5086815f84
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/inference.cc
@@ -0,0 +1,660 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "arrow/python/inference.h"
+#include "arrow/python/numpy_interop.h"
+
+#include <datetime.h>
+
+#include <algorithm>
+#include <limits>
+#include <map>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "arrow/status.h"
+#include "arrow/util/decimal.h"
+#include "arrow/util/logging.h"
+
+#include "arrow/python/datetime.h"
+#include "arrow/python/decimal.h"
+#include "arrow/python/helpers.h"
+#include "arrow/python/iterators.h"
+#include "arrow/python/numpy_convert.h"
+
+namespace arrow {
+namespace py {
+
+#define _NUMPY_UNIFY_NOOP(DTYPE) \
+ case NPY_##DTYPE: \
+ return OK;
+
+#define _NUMPY_UNIFY_PROMOTE(DTYPE) \
+ case NPY_##DTYPE: \
+ current_type_num_ = dtype; \
+ current_dtype_ = descr; \
+ return OK;
+
+#define _NUMPY_UNIFY_PROMOTE_TO(DTYPE, NEW_TYPE) \
+ case NPY_##DTYPE: \
+ current_type_num_ = NPY_##NEW_TYPE; \
+ current_dtype_ = PyArray_DescrFromType(current_type_num_); \
+ return OK;
+
+// Form a consensus NumPy dtype to use for Arrow conversion for a
+// collection of dtype objects observed one at a time
+class NumPyDtypeUnifier {
+ public:
+ enum Action { OK, INVALID };
+
+ NumPyDtypeUnifier() : current_type_num_(-1), current_dtype_(nullptr) {}
+
+ Status InvalidMix(int new_dtype) {
+ return Status::Invalid("Cannot mix NumPy dtypes ",
+ GetNumPyTypeName(current_type_num_), " and ",
+ GetNumPyTypeName(new_dtype));
+ }
+
+ int Observe_BOOL(PyArray_Descr* descr, int dtype) { return INVALID; }
+
+ int Observe_INT8(PyArray_Descr* descr, int dtype) {
+ switch (dtype) {
+ _NUMPY_UNIFY_PROMOTE(INT16);
+ _NUMPY_UNIFY_PROMOTE(INT32);
+ _NUMPY_UNIFY_PROMOTE(INT64);
+ _NUMPY_UNIFY_PROMOTE(FLOAT32);
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
+ default:
+ return INVALID;
+ }
+ }
+
+ int Observe_INT16(PyArray_Descr* descr, int dtype) {
+ switch (dtype) {
+ _NUMPY_UNIFY_NOOP(INT8);
+ _NUMPY_UNIFY_PROMOTE(INT32);
+ _NUMPY_UNIFY_PROMOTE(INT64);
+ _NUMPY_UNIFY_NOOP(UINT8);
+ _NUMPY_UNIFY_PROMOTE(FLOAT32);
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
+ default:
+ return INVALID;
+ }
+ }
+
+ int Observe_INT32(PyArray_Descr* descr, int dtype) {
+ switch (dtype) {
+ _NUMPY_UNIFY_NOOP(INT8);
+ _NUMPY_UNIFY_NOOP(INT16);
+ _NUMPY_UNIFY_PROMOTE(INT32);
+ _NUMPY_UNIFY_PROMOTE(INT64);
+ _NUMPY_UNIFY_NOOP(UINT8);
+ _NUMPY_UNIFY_NOOP(UINT16);
+ _NUMPY_UNIFY_PROMOTE_TO(FLOAT32, FLOAT64);
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
+ default:
+ return INVALID;
+ }
+ }
+
+ int Observe_INT64(PyArray_Descr* descr, int dtype) {
+ switch (dtype) {
+ _NUMPY_UNIFY_NOOP(INT8);
+ _NUMPY_UNIFY_NOOP(INT16);
+ _NUMPY_UNIFY_NOOP(INT32);
+ _NUMPY_UNIFY_NOOP(INT64);
+ _NUMPY_UNIFY_NOOP(UINT8);
+ _NUMPY_UNIFY_NOOP(UINT16);
+ _NUMPY_UNIFY_NOOP(UINT32);
+ _NUMPY_UNIFY_PROMOTE_TO(FLOAT32, FLOAT64);
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
+ default:
+ return INVALID;
+ }
+ }
+
+ int Observe_UINT8(PyArray_Descr* descr, int dtype) {
+ switch (dtype) {
+ _NUMPY_UNIFY_PROMOTE(UINT16);
+ _NUMPY_UNIFY_PROMOTE(UINT32);
+ _NUMPY_UNIFY_PROMOTE(UINT64);
+ _NUMPY_UNIFY_PROMOTE(FLOAT32);
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
+ default:
+ return INVALID;
+ }
+ }
+
+ int Observe_UINT16(PyArray_Descr* descr, int dtype) {
+ switch (dtype) {
+ _NUMPY_UNIFY_NOOP(UINT8);
+ _NUMPY_UNIFY_PROMOTE(UINT32);
+ _NUMPY_UNIFY_PROMOTE(UINT64);
+ _NUMPY_UNIFY_PROMOTE(FLOAT32);
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
+ default:
+ return INVALID;
+ }
+ }
+
+ int Observe_UINT32(PyArray_Descr* descr, int dtype) {
+ switch (dtype) {
+ _NUMPY_UNIFY_NOOP(UINT8);
+ _NUMPY_UNIFY_NOOP(UINT16);
+ _NUMPY_UNIFY_PROMOTE(UINT64);
+ _NUMPY_UNIFY_PROMOTE_TO(FLOAT32, FLOAT64);
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
+ default:
+ return INVALID;
+ }
+ }
+
+ int Observe_UINT64(PyArray_Descr* descr, int dtype) {
+ switch (dtype) {
+ _NUMPY_UNIFY_NOOP(UINT8);
+ _NUMPY_UNIFY_NOOP(UINT16);
+ _NUMPY_UNIFY_NOOP(UINT32);
+ _NUMPY_UNIFY_PROMOTE_TO(FLOAT32, FLOAT64);
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
+ default:
+ return INVALID;
+ }
+ }
+
+ int Observe_FLOAT16(PyArray_Descr* descr, int dtype) {
+ switch (dtype) {
+ _NUMPY_UNIFY_PROMOTE(FLOAT32);
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
+ default:
+ return INVALID;
+ }
+ }
+
+ int Observe_FLOAT32(PyArray_Descr* descr, int dtype) {
+ switch (dtype) {
+ _NUMPY_UNIFY_NOOP(INT8);
+ _NUMPY_UNIFY_NOOP(INT16);
+ _NUMPY_UNIFY_NOOP(INT32);
+ _NUMPY_UNIFY_NOOP(INT64);
+ _NUMPY_UNIFY_NOOP(UINT8);
+ _NUMPY_UNIFY_NOOP(UINT16);
+ _NUMPY_UNIFY_NOOP(UINT32);
+ _NUMPY_UNIFY_NOOP(UINT64);
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
+ default:
+ return INVALID;
+ }
+ }
+
+ int Observe_FLOAT64(PyArray_Descr* descr, int dtype) {
+ switch (dtype) {
+ _NUMPY_UNIFY_NOOP(INT8);
+ _NUMPY_UNIFY_NOOP(INT16);
+ _NUMPY_UNIFY_NOOP(INT32);
+ _NUMPY_UNIFY_NOOP(INT64);
+ _NUMPY_UNIFY_NOOP(UINT8);
+ _NUMPY_UNIFY_NOOP(UINT16);
+ _NUMPY_UNIFY_NOOP(UINT32);
+ _NUMPY_UNIFY_NOOP(UINT64);
+ default:
+ return INVALID;
+ }
+ }
+
+ int Observe_DATETIME(PyArray_Descr* dtype_obj) {
+ // TODO: check that units are all the same
+ return OK;
+ }
+
+ Status Observe(PyArray_Descr* descr) {
+ int dtype = fix_numpy_type_num(descr->type_num);
+
+ if (current_type_num_ == -1) {
+ current_dtype_ = descr;
+ current_type_num_ = dtype;
+ return Status::OK();
+ } else if (current_type_num_ == dtype) {
+ return Status::OK();
+ }
+
+#define OBSERVE_CASE(DTYPE) \
+ case NPY_##DTYPE: \
+ action = Observe_##DTYPE(descr, dtype); \
+ break;
+
+ int action = OK;
+ switch (current_type_num_) {
+ OBSERVE_CASE(BOOL);
+ OBSERVE_CASE(INT8);
+ OBSERVE_CASE(INT16);
+ OBSERVE_CASE(INT32);
+ OBSERVE_CASE(INT64);
+ OBSERVE_CASE(UINT8);
+ OBSERVE_CASE(UINT16);
+ OBSERVE_CASE(UINT32);
+ OBSERVE_CASE(UINT64);
+ OBSERVE_CASE(FLOAT16);
+ OBSERVE_CASE(FLOAT32);
+ OBSERVE_CASE(FLOAT64);
+ case NPY_DATETIME:
+ action = Observe_DATETIME(descr);
+ break;
+ default:
+ return Status::NotImplemented("Unsupported numpy type ", GetNumPyTypeName(dtype));
+ }
+
+ if (action == INVALID) {
+ return InvalidMix(dtype);
+ }
+ return Status::OK();
+ }
+
+ bool dtype_was_observed() const { return current_type_num_ != -1; }
+
+ PyArray_Descr* current_dtype() const { return current_dtype_; }
+
+ int current_type_num() const { return current_type_num_; }
+
+ private:
+ int current_type_num_;
+ PyArray_Descr* current_dtype_;
+};
+
+class TypeInferrer {
+ // A type inference visitor for Python values
+ public:
+ // \param validate_interval the number of elements to observe before checking
+ // whether the data is mixed type or has other problems. This helps avoid
+ // excess computation for each element while also making sure we "bail out"
+ // early with long sequences that may have problems up front
+ // \param make_unions permit mixed-type data by creating union types (not yet
+ // implemented)
+ explicit TypeInferrer(bool pandas_null_sentinels = false,
+ int64_t validate_interval = 100, bool make_unions = false)
+ : pandas_null_sentinels_(pandas_null_sentinels),
+ validate_interval_(validate_interval),
+ make_unions_(make_unions),
+ total_count_(0),
+ none_count_(0),
+ bool_count_(0),
+ int_count_(0),
+ date_count_(0),
+ time_count_(0),
+ timestamp_micro_count_(0),
+ duration_count_(0),
+ float_count_(0),
+ binary_count_(0),
+ unicode_count_(0),
+ decimal_count_(0),
+ list_count_(0),
+ struct_count_(0),
+ numpy_dtype_count_(0),
+ max_decimal_metadata_(std::numeric_limits<int32_t>::min(),
+ std::numeric_limits<int32_t>::min()),
+ decimal_type_() {
+ ARROW_CHECK_OK(internal::ImportDecimalType(&decimal_type_));
+ }
+
+ /// \param[in] obj a Python object in the sequence
+ /// \param[out] keep_going if sufficient information has been gathered to
+ /// attempt to begin converting the sequence, *keep_going will be set to true
+ /// to signal to the calling visitor loop to terminate
+ Status Visit(PyObject* obj, bool* keep_going) {
+ ++total_count_;
+
+ if (obj == Py_None || (pandas_null_sentinels_ && internal::PandasObjectIsNull(obj))) {
+ ++none_count_;
+ } else if (PyBool_Check(obj)) {
+ ++bool_count_;
+ *keep_going = make_unions_;
+ } else if (PyFloat_Check(obj)) {
+ ++float_count_;
+ *keep_going = make_unions_;
+ } else if (internal::IsPyInteger(obj)) {
+ ++int_count_;
+ } else if (PyDateTime_Check(obj)) {
+ // infer timezone from the first encountered datetime object
+ if (!timestamp_micro_count_) {
+ OwnedRef tzinfo(PyObject_GetAttrString(obj, "tzinfo"));
+ if (tzinfo.obj() != nullptr && tzinfo.obj() != Py_None) {
+ ARROW_ASSIGN_OR_RAISE(timezone_, internal::TzinfoToString(tzinfo.obj()));
+ }
+ }
+ ++timestamp_micro_count_;
+ *keep_going = make_unions_;
+ } else if (PyDelta_Check(obj)) {
+ ++duration_count_;
+ *keep_going = make_unions_;
+ } else if (PyDate_Check(obj)) {
+ ++date_count_;
+ *keep_going = make_unions_;
+ } else if (PyTime_Check(obj)) {
+ ++time_count_;
+ *keep_going = make_unions_;
+ } else if (internal::IsPyBinary(obj)) {
+ ++binary_count_;
+ *keep_going = make_unions_;
+ } else if (PyUnicode_Check(obj)) {
+ ++unicode_count_;
+ *keep_going = make_unions_;
+ } else if (PyArray_CheckAnyScalarExact(obj)) {
+ RETURN_NOT_OK(VisitDType(PyArray_DescrFromScalar(obj), keep_going));
+ } else if (PyList_Check(obj)) {
+ RETURN_NOT_OK(VisitList(obj, keep_going));
+ } else if (PyArray_Check(obj)) {
+ RETURN_NOT_OK(VisitNdarray(obj, keep_going));
+ } else if (PyDict_Check(obj)) {
+ RETURN_NOT_OK(VisitDict(obj));
+ } else if (PyObject_IsInstance(obj, decimal_type_.obj())) {
+ RETURN_NOT_OK(max_decimal_metadata_.Update(obj));
+ ++decimal_count_;
+ } else {
+ return internal::InvalidValue(obj,
+ "did not recognize Python value type when inferring "
+ "an Arrow data type");
+ }
+
+ if (total_count_ % validate_interval_ == 0) {
+ RETURN_NOT_OK(Validate());
+ }
+
+ return Status::OK();
+ }
+
+ // Infer value type from a sequence of values
+ Status VisitSequence(PyObject* obj, PyObject* mask = nullptr) {
+ if (mask == nullptr || mask == Py_None) {
+ return internal::VisitSequence(
+ obj, /*offset=*/0,
+ [this](PyObject* value, bool* keep_going) { return Visit(value, keep_going); });
+ } else {
+ return internal::VisitSequenceMasked(
+ obj, mask, /*offset=*/0,
+ [this](PyObject* value, uint8_t masked, bool* keep_going) {
+ if (!masked) {
+ return Visit(value, keep_going);
+ } else {
+ return Status::OK();
+ }
+ });
+ }
+ }
+
+ Status GetType(std::shared_ptr<DataType>* out) {
+ // TODO(wesm): handling forming unions
+ if (make_unions_) {
+ return Status::NotImplemented("Creating union types not yet supported");
+ }
+
+ RETURN_NOT_OK(Validate());
+
+ if (numpy_dtype_count_ > 0) {
+ // All NumPy scalars and Nones/nulls
+ if (numpy_dtype_count_ + none_count_ == total_count_) {
+ std::shared_ptr<DataType> type;
+ RETURN_NOT_OK(NumPyDtypeToArrow(numpy_unifier_.current_dtype(), &type));
+ *out = type;
+ return Status::OK();
+ }
+
+ // The "bad path": data contains a mix of NumPy scalars and
+ // other kinds of scalars. Note this can happen innocuously
+ // because numpy.nan is not a NumPy scalar (it's a built-in
+ // PyFloat)
+
+ // TODO(ARROW-5564): Merge together type unification so this
+ // hack is not necessary
+ switch (numpy_unifier_.current_type_num()) {
+ case NPY_BOOL:
+ bool_count_ += numpy_dtype_count_;
+ break;
+ case NPY_INT8:
+ case NPY_INT16:
+ case NPY_INT32:
+ case NPY_INT64:
+ case NPY_UINT8:
+ case NPY_UINT16:
+ case NPY_UINT32:
+ case NPY_UINT64:
+ int_count_ += numpy_dtype_count_;
+ break;
+ case NPY_FLOAT32:
+ case NPY_FLOAT64:
+ float_count_ += numpy_dtype_count_;
+ break;
+ case NPY_DATETIME:
+ return Status::Invalid(
+ "numpy.datetime64 scalars cannot be mixed "
+ "with other Python scalar values currently");
+ }
+ }
+
+ if (list_count_) {
+ std::shared_ptr<DataType> value_type;
+ RETURN_NOT_OK(list_inferrer_->GetType(&value_type));
+ *out = list(value_type);
+ } else if (struct_count_) {
+ RETURN_NOT_OK(GetStructType(out));
+ } else if (decimal_count_) {
+ if (max_decimal_metadata_.precision() > Decimal128Type::kMaxPrecision) {
+ // the default constructor does not validate the precision and scale
+ ARROW_ASSIGN_OR_RAISE(*out,
+ Decimal256Type::Make(max_decimal_metadata_.precision(),
+ max_decimal_metadata_.scale()));
+ } else {
+ ARROW_ASSIGN_OR_RAISE(*out,
+ Decimal128Type::Make(max_decimal_metadata_.precision(),
+ max_decimal_metadata_.scale()));
+ }
+ } else if (float_count_) {
+ // Prioritize floats before integers
+ *out = float64();
+ } else if (int_count_) {
+ *out = int64();
+ } else if (date_count_) {
+ *out = date32();
+ } else if (time_count_) {
+ *out = time64(TimeUnit::MICRO);
+ } else if (timestamp_micro_count_) {
+ *out = timestamp(TimeUnit::MICRO, timezone_);
+ } else if (duration_count_) {
+ *out = duration(TimeUnit::MICRO);
+ } else if (bool_count_) {
+ *out = boolean();
+ } else if (binary_count_) {
+ *out = binary();
+ } else if (unicode_count_) {
+ *out = utf8();
+ } else {
+ *out = null();
+ }
+ return Status::OK();
+ }
+
+ int64_t total_count() const { return total_count_; }
+
+ protected:
+ Status Validate() const {
+ if (list_count_ > 0) {
+ if (list_count_ + none_count_ != total_count_) {
+ return Status::Invalid("cannot mix list and non-list, non-null values");
+ }
+ RETURN_NOT_OK(list_inferrer_->Validate());
+ } else if (struct_count_ > 0) {
+ if (struct_count_ + none_count_ != total_count_) {
+ return Status::Invalid("cannot mix struct and non-struct, non-null values");
+ }
+ for (const auto& it : struct_inferrers_) {
+ RETURN_NOT_OK(it.second.Validate());
+ }
+ }
+ return Status::OK();
+ }
+
+ Status VisitDType(PyArray_Descr* dtype, bool* keep_going) {
+ // Continue visiting dtypes for now.
+ // TODO(wesm): devise approach for unions
+ ++numpy_dtype_count_;
+ *keep_going = true;
+ return numpy_unifier_.Observe(dtype);
+ }
+
+ Status VisitList(PyObject* obj, bool* keep_going /* unused */) {
+ if (!list_inferrer_) {
+ list_inferrer_.reset(
+ new TypeInferrer(pandas_null_sentinels_, validate_interval_, make_unions_));
+ }
+ ++list_count_;
+ return list_inferrer_->VisitSequence(obj);
+ }
+
+ Status VisitNdarray(PyObject* obj, bool* keep_going) {
+ PyArray_Descr* dtype = PyArray_DESCR(reinterpret_cast<PyArrayObject*>(obj));
+ if (dtype->type_num == NPY_OBJECT) {
+ return VisitList(obj, keep_going);
+ }
+ // Not an object array: infer child Arrow type from dtype
+ if (!list_inferrer_) {
+ list_inferrer_.reset(
+ new TypeInferrer(pandas_null_sentinels_, validate_interval_, make_unions_));
+ }
+ ++list_count_;
+
+ // XXX(wesm): In ARROW-4324 I added accounting to check whether
+ // all of the non-null values have NumPy dtypes, but the
+ // total_count not not being properly incremented here
+ ++(*list_inferrer_).total_count_;
+ return list_inferrer_->VisitDType(dtype, keep_going);
+ }
+
+ Status VisitDict(PyObject* obj) {
+ PyObject* key_obj;
+ PyObject* value_obj;
+ Py_ssize_t pos = 0;
+
+ while (PyDict_Next(obj, &pos, &key_obj, &value_obj)) {
+ std::string key;
+ if (PyUnicode_Check(key_obj)) {
+ RETURN_NOT_OK(internal::PyUnicode_AsStdString(key_obj, &key));
+ } else if (PyBytes_Check(key_obj)) {
+ key = internal::PyBytes_AsStdString(key_obj);
+ } else {
+ return Status::TypeError("Expected dict key of type str or bytes, got '",
+ Py_TYPE(key_obj)->tp_name, "'");
+ }
+ // Get or create visitor for this key
+ auto it = struct_inferrers_.find(key);
+ if (it == struct_inferrers_.end()) {
+ it = struct_inferrers_
+ .insert(
+ std::make_pair(key, TypeInferrer(pandas_null_sentinels_,
+ validate_interval_, make_unions_)))
+ .first;
+ }
+ TypeInferrer* visitor = &it->second;
+
+ // We ignore termination signals from child visitors for now
+ //
+ // TODO(wesm): keep track of whether type inference has terminated for
+ // the child visitors to avoid doing unneeded work
+ bool keep_going = true;
+ RETURN_NOT_OK(visitor->Visit(value_obj, &keep_going));
+ }
+
+ // We do not terminate visiting dicts since we want the union of all
+ // observed keys
+ ++struct_count_;
+ return Status::OK();
+ }
+
+ Status GetStructType(std::shared_ptr<DataType>* out) {
+ std::vector<std::shared_ptr<Field>> fields;
+ for (auto&& it : struct_inferrers_) {
+ std::shared_ptr<DataType> field_type;
+ RETURN_NOT_OK(it.second.GetType(&field_type));
+ fields.emplace_back(field(it.first, field_type));
+ }
+ *out = struct_(fields);
+ return Status::OK();
+ }
+
+ private:
+ bool pandas_null_sentinels_;
+ int64_t validate_interval_;
+ bool make_unions_;
+ int64_t total_count_;
+ int64_t none_count_;
+ int64_t bool_count_;
+ int64_t int_count_;
+ int64_t date_count_;
+ int64_t time_count_;
+ int64_t timestamp_micro_count_;
+ std::string timezone_;
+ int64_t duration_count_;
+ int64_t float_count_;
+ int64_t binary_count_;
+ int64_t unicode_count_;
+ int64_t decimal_count_;
+ int64_t list_count_;
+ int64_t struct_count_;
+ int64_t numpy_dtype_count_;
+ std::unique_ptr<TypeInferrer> list_inferrer_;
+ std::map<std::string, TypeInferrer> struct_inferrers_;
+
+ // If we observe a strongly-typed value in e.g. a NumPy array, we can store
+ // it here to skip the type counting logic above
+ NumPyDtypeUnifier numpy_unifier_;
+
+ internal::DecimalMetadata max_decimal_metadata_;
+
+ // Place to accumulate errors
+ // std::vector<Status> errors_;
+ OwnedRefNoGIL decimal_type_;
+};
+
+// Non-exhaustive type inference
+Result<std::shared_ptr<DataType>> InferArrowType(PyObject* obj, PyObject* mask,
+ bool pandas_null_sentinels) {
+ if (pandas_null_sentinels) {
+ // ARROW-842: If pandas is not installed then null checks will be less
+ // comprehensive, but that is okay.
+ internal::InitPandasStaticData();
+ }
+
+ std::shared_ptr<DataType> out_type;
+ TypeInferrer inferrer(pandas_null_sentinels);
+ RETURN_NOT_OK(inferrer.VisitSequence(obj, mask));
+ RETURN_NOT_OK(inferrer.GetType(&out_type));
+ if (out_type == nullptr) {
+ return Status::TypeError("Unable to determine data type");
+ } else {
+ return std::move(out_type);
+ }
+}
+
+ARROW_PYTHON_EXPORT
+bool IsPyBool(PyObject* obj) { return internal::PyBoolScalar_Check(obj); }
+
+ARROW_PYTHON_EXPORT
+bool IsPyInt(PyObject* obj) { return internal::PyIntScalar_Check(obj); }
+
+ARROW_PYTHON_EXPORT
+bool IsPyFloat(PyObject* obj) { return internal::PyFloatScalar_Check(obj); }
+
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/inference.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/inference.h
new file mode 100644
index 0000000000..eff1836293
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/inference.h
@@ -0,0 +1,64 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Functions for converting between CPython built-in data structures and Arrow
+// data structures
+
+#pragma once
+
+#include "arrow/python/platform.h"
+
+#include <memory>
+
+#include "arrow/python/visibility.h"
+#include "arrow/type.h"
+#include "arrow/util/macros.h"
+
+#include "arrow/python/common.h"
+
+namespace arrow {
+
+class Array;
+class Status;
+
+namespace py {
+
+// These functions take a sequence input, not arbitrary iterables
+
+/// \brief Infer Arrow type from a Python sequence
+/// \param[in] obj the sequence of values
+/// \param[in] mask an optional mask where True values are null. May
+/// be nullptr
+/// \param[in] pandas_null_sentinels use pandas's null value markers
+ARROW_PYTHON_EXPORT
+Result<std::shared_ptr<arrow::DataType>> InferArrowType(PyObject* obj, PyObject* mask,
+ bool pandas_null_sentinels);
+
+/// Checks whether the passed Python object is a boolean scalar
+ARROW_PYTHON_EXPORT
+bool IsPyBool(PyObject* obj);
+
+/// Checks whether the passed Python object is an integer scalar
+ARROW_PYTHON_EXPORT
+bool IsPyInt(PyObject* obj);
+
+/// Checks whether the passed Python object is a float scalar
+ARROW_PYTHON_EXPORT
+bool IsPyFloat(PyObject* obj);
+
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/init.cc b/contrib/libs/apache/arrow/cpp/src/arrow/python/init.cc
new file mode 100644
index 0000000000..dba293bbe2
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/init.cc
@@ -0,0 +1,24 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Trigger the array import (inversion of NO_IMPORT_ARRAY)
+#define NUMPY_IMPORT_ARRAY
+
+#include "arrow/python/init.h"
+#include "arrow/python/numpy_interop.h"
+
+int arrow_init_numpy() { return arrow::py::import_numpy(); }
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/init.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/init.h
new file mode 100644
index 0000000000..2e6c954862
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/init.h
@@ -0,0 +1,26 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include "arrow/python/platform.h"
+#include "arrow/python/visibility.h"
+
+extern "C" {
+ARROW_PYTHON_EXPORT
+int arrow_init_numpy();
+}
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/io.cc b/contrib/libs/apache/arrow/cpp/src/arrow/python/io.cc
new file mode 100644
index 0000000000..73525feed3
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/io.cc
@@ -0,0 +1,374 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "arrow/python/io.h"
+
+#include <cstdint>
+#include <cstdlib>
+#include <memory>
+#include <mutex>
+#include <string>
+
+#include "arrow/io/memory.h"
+#include "arrow/memory_pool.h"
+#include "arrow/status.h"
+#include "arrow/util/logging.h"
+
+#include "arrow/python/common.h"
+#include "arrow/python/pyarrow.h"
+
+namespace arrow {
+
+using arrow::io::TransformInputStream;
+
+namespace py {
+
+// ----------------------------------------------------------------------
+// Python file
+
+// A common interface to a Python file-like object. Must acquire GIL before
+// calling any methods
+class PythonFile {
+ public:
+ explicit PythonFile(PyObject* file) : file_(file), checked_read_buffer_(false) {
+ Py_INCREF(file);
+ }
+
+ Status CheckClosed() const {
+ if (!file_) {
+ return Status::Invalid("operation on closed Python file");
+ }
+ return Status::OK();
+ }
+
+ Status Close() {
+ if (file_) {
+ PyObject* result = cpp_PyObject_CallMethod(file_.obj(), "close", "()");
+ Py_XDECREF(result);
+ file_.reset();
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
+ }
+ return Status::OK();
+ }
+
+ Status Abort() {
+ file_.reset();
+ return Status::OK();
+ }
+
+ bool closed() const {
+ if (!file_) {
+ return true;
+ }
+ PyObject* result = PyObject_GetAttrString(file_.obj(), "closed");
+ if (result == NULL) {
+ // Can't propagate the error, so write it out and return an arbitrary value
+ PyErr_WriteUnraisable(NULL);
+ return true;
+ }
+ int ret = PyObject_IsTrue(result);
+ Py_XDECREF(result);
+ if (ret < 0) {
+ PyErr_WriteUnraisable(NULL);
+ return true;
+ }
+ return ret != 0;
+ }
+
+ Status Seek(int64_t position, int whence) {
+ RETURN_NOT_OK(CheckClosed());
+
+ // whence: 0 for relative to start of file, 2 for end of file
+ PyObject* result = cpp_PyObject_CallMethod(file_.obj(), "seek", "(ni)",
+ static_cast<Py_ssize_t>(position), whence);
+ Py_XDECREF(result);
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
+ return Status::OK();
+ }
+
+ Status Read(int64_t nbytes, PyObject** out) {
+ RETURN_NOT_OK(CheckClosed());
+
+ PyObject* result = cpp_PyObject_CallMethod(file_.obj(), "read", "(n)",
+ static_cast<Py_ssize_t>(nbytes));
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
+ *out = result;
+ return Status::OK();
+ }
+
+ Status ReadBuffer(int64_t nbytes, PyObject** out) {
+ PyObject* result = cpp_PyObject_CallMethod(file_.obj(), "read_buffer", "(n)",
+ static_cast<Py_ssize_t>(nbytes));
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
+ *out = result;
+ return Status::OK();
+ }
+
+ Status Write(const void* data, int64_t nbytes) {
+ RETURN_NOT_OK(CheckClosed());
+
+ // Since the data isn't owned, we have to make a copy
+ PyObject* py_data =
+ PyBytes_FromStringAndSize(reinterpret_cast<const char*>(data), nbytes);
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
+
+ PyObject* result = cpp_PyObject_CallMethod(file_.obj(), "write", "(O)", py_data);
+ Py_XDECREF(py_data);
+ Py_XDECREF(result);
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
+ return Status::OK();
+ }
+
+ Status Write(const std::shared_ptr<Buffer>& buffer) {
+ RETURN_NOT_OK(CheckClosed());
+
+ PyObject* py_data = wrap_buffer(buffer);
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
+
+ PyObject* result = cpp_PyObject_CallMethod(file_.obj(), "write", "(O)", py_data);
+ Py_XDECREF(py_data);
+ Py_XDECREF(result);
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
+ return Status::OK();
+ }
+
+ Result<int64_t> Tell() {
+ RETURN_NOT_OK(CheckClosed());
+
+ PyObject* result = cpp_PyObject_CallMethod(file_.obj(), "tell", "()");
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
+
+ int64_t position = PyLong_AsLongLong(result);
+ Py_DECREF(result);
+
+ // PyLong_AsLongLong can raise OverflowError
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
+ return position;
+ }
+
+ std::mutex& lock() { return lock_; }
+
+ bool HasReadBuffer() {
+ if (!checked_read_buffer_) { // we don't want to check this each time
+ has_read_buffer_ = PyObject_HasAttrString(file_.obj(), "read_buffer") == 1;
+ checked_read_buffer_ = true;
+ }
+ return has_read_buffer_;
+ }
+
+ private:
+ std::mutex lock_;
+ OwnedRefNoGIL file_;
+ bool has_read_buffer_;
+ bool checked_read_buffer_;
+};
+
+// ----------------------------------------------------------------------
+// Seekable input stream
+
+PyReadableFile::PyReadableFile(PyObject* file) { file_.reset(new PythonFile(file)); }
+
+// The destructor does not close the underlying Python file object, as
+// there may be multiple references to it. Instead let the Python
+// destructor do its job.
+PyReadableFile::~PyReadableFile() {}
+
+Status PyReadableFile::Abort() {
+ return SafeCallIntoPython([this]() { return file_->Abort(); });
+}
+
+Status PyReadableFile::Close() {
+ return SafeCallIntoPython([this]() { return file_->Close(); });
+}
+
+bool PyReadableFile::closed() const {
+ bool res;
+ Status st = SafeCallIntoPython([this, &res]() {
+ res = file_->closed();
+ return Status::OK();
+ });
+ return res;
+}
+
+Status PyReadableFile::Seek(int64_t position) {
+ return SafeCallIntoPython([=] { return file_->Seek(position, 0); });
+}
+
+Result<int64_t> PyReadableFile::Tell() const {
+ return SafeCallIntoPython([=]() -> Result<int64_t> { return file_->Tell(); });
+}
+
+Result<int64_t> PyReadableFile::Read(int64_t nbytes, void* out) {
+ return SafeCallIntoPython([=]() -> Result<int64_t> {
+ OwnedRef bytes;
+ RETURN_NOT_OK(file_->Read(nbytes, bytes.ref()));
+ PyObject* bytes_obj = bytes.obj();
+ DCHECK(bytes_obj != NULL);
+
+ Py_buffer py_buf;
+ if (!PyObject_GetBuffer(bytes_obj, &py_buf, PyBUF_ANY_CONTIGUOUS)) {
+ const uint8_t* data = reinterpret_cast<const uint8_t*>(py_buf.buf);
+ std::memcpy(out, data, py_buf.len);
+ int64_t len = py_buf.len;
+ PyBuffer_Release(&py_buf);
+ return len;
+ } else {
+ return Status::TypeError(
+ "Python file read() should have returned a bytes object or an object "
+ "supporting the buffer protocol, got '",
+ Py_TYPE(bytes_obj)->tp_name, "' (did you open the file in binary mode?)");
+ }
+ });
+}
+
+Result<std::shared_ptr<Buffer>> PyReadableFile::Read(int64_t nbytes) {
+ return SafeCallIntoPython([=]() -> Result<std::shared_ptr<Buffer>> {
+ OwnedRef buffer_obj;
+ if (file_->HasReadBuffer()) {
+ RETURN_NOT_OK(file_->ReadBuffer(nbytes, buffer_obj.ref()));
+ } else {
+ RETURN_NOT_OK(file_->Read(nbytes, buffer_obj.ref()));
+ }
+ DCHECK(buffer_obj.obj() != NULL);
+
+ return PyBuffer::FromPyObject(buffer_obj.obj());
+ });
+}
+
+Result<int64_t> PyReadableFile::ReadAt(int64_t position, int64_t nbytes, void* out) {
+ std::lock_guard<std::mutex> guard(file_->lock());
+ return SafeCallIntoPython([=]() -> Result<int64_t> {
+ RETURN_NOT_OK(Seek(position));
+ return Read(nbytes, out);
+ });
+}
+
+Result<std::shared_ptr<Buffer>> PyReadableFile::ReadAt(int64_t position, int64_t nbytes) {
+ std::lock_guard<std::mutex> guard(file_->lock());
+ return SafeCallIntoPython([=]() -> Result<std::shared_ptr<Buffer>> {
+ RETURN_NOT_OK(Seek(position));
+ return Read(nbytes);
+ });
+}
+
+Result<int64_t> PyReadableFile::GetSize() {
+ return SafeCallIntoPython([=]() -> Result<int64_t> {
+ ARROW_ASSIGN_OR_RAISE(int64_t current_position, file_->Tell());
+ RETURN_NOT_OK(file_->Seek(0, 2));
+
+ ARROW_ASSIGN_OR_RAISE(int64_t file_size, file_->Tell());
+ // Restore previous file position
+ RETURN_NOT_OK(file_->Seek(current_position, 0));
+
+ return file_size;
+ });
+}
+
+// ----------------------------------------------------------------------
+// Output stream
+
+PyOutputStream::PyOutputStream(PyObject* file) : position_(0) {
+ file_.reset(new PythonFile(file));
+}
+
+// The destructor does not close the underlying Python file object, as
+// there may be multiple references to it. Instead let the Python
+// destructor do its job.
+PyOutputStream::~PyOutputStream() {}
+
+Status PyOutputStream::Abort() {
+ return SafeCallIntoPython([=]() { return file_->Abort(); });
+}
+
+Status PyOutputStream::Close() {
+ return SafeCallIntoPython([=]() { return file_->Close(); });
+}
+
+bool PyOutputStream::closed() const {
+ bool res;
+ Status st = SafeCallIntoPython([this, &res]() {
+ res = file_->closed();
+ return Status::OK();
+ });
+ return res;
+}
+
+Result<int64_t> PyOutputStream::Tell() const { return position_; }
+
+Status PyOutputStream::Write(const void* data, int64_t nbytes) {
+ return SafeCallIntoPython([=]() {
+ position_ += nbytes;
+ return file_->Write(data, nbytes);
+ });
+}
+
+Status PyOutputStream::Write(const std::shared_ptr<Buffer>& buffer) {
+ return SafeCallIntoPython([=]() {
+ position_ += buffer->size();
+ return file_->Write(buffer);
+ });
+}
+
+// ----------------------------------------------------------------------
+// Foreign buffer
+
+Status PyForeignBuffer::Make(const uint8_t* data, int64_t size, PyObject* base,
+ std::shared_ptr<Buffer>* out) {
+ PyForeignBuffer* buf = new PyForeignBuffer(data, size, base);
+ if (buf == NULL) {
+ return Status::OutOfMemory("could not allocate foreign buffer object");
+ } else {
+ *out = std::shared_ptr<Buffer>(buf);
+ return Status::OK();
+ }
+}
+
+// ----------------------------------------------------------------------
+// TransformInputStream::TransformFunc wrapper
+
+struct TransformFunctionWrapper {
+ TransformFunctionWrapper(TransformCallback cb, PyObject* arg)
+ : cb_(std::move(cb)), arg_(std::make_shared<OwnedRefNoGIL>(arg)) {
+ Py_INCREF(arg);
+ }
+
+ Result<std::shared_ptr<Buffer>> operator()(const std::shared_ptr<Buffer>& src) {
+ return SafeCallIntoPython([=]() -> Result<std::shared_ptr<Buffer>> {
+ std::shared_ptr<Buffer> dest;
+ cb_(arg_->obj(), src, &dest);
+ RETURN_NOT_OK(CheckPyError());
+ return dest;
+ });
+ }
+
+ protected:
+ // Need to wrap OwnedRefNoGIL because std::function needs the callable
+ // to be copy-constructible...
+ TransformCallback cb_;
+ std::shared_ptr<OwnedRefNoGIL> arg_;
+};
+
+std::shared_ptr<::arrow::io::InputStream> MakeTransformInputStream(
+ std::shared_ptr<::arrow::io::InputStream> wrapped, TransformInputStreamVTable vtable,
+ PyObject* handler) {
+ TransformInputStream::TransformFunc transform(
+ TransformFunctionWrapper{std::move(vtable.transform), handler});
+ return std::make_shared<TransformInputStream>(std::move(wrapped), std::move(transform));
+}
+
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/io.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/io.h
new file mode 100644
index 0000000000..a38d0ca332
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/io.h
@@ -0,0 +1,116 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include <memory>
+
+#include "arrow/io/interfaces.h"
+#include "arrow/io/transform.h"
+
+#include "arrow/python/common.h"
+#include "arrow/python/visibility.h"
+
+namespace arrow {
+namespace py {
+
+class ARROW_NO_EXPORT PythonFile;
+
+class ARROW_PYTHON_EXPORT PyReadableFile : public io::RandomAccessFile {
+ public:
+ explicit PyReadableFile(PyObject* file);
+ ~PyReadableFile() override;
+
+ Status Close() override;
+ Status Abort() override;
+ bool closed() const override;
+
+ Result<int64_t> Read(int64_t nbytes, void* out) override;
+ Result<std::shared_ptr<Buffer>> Read(int64_t nbytes) override;
+
+ // Thread-safe version
+ Result<int64_t> ReadAt(int64_t position, int64_t nbytes, void* out) override;
+
+ // Thread-safe version
+ Result<std::shared_ptr<Buffer>> ReadAt(int64_t position, int64_t nbytes) override;
+
+ Result<int64_t> GetSize() override;
+
+ Status Seek(int64_t position) override;
+
+ Result<int64_t> Tell() const override;
+
+ private:
+ std::unique_ptr<PythonFile> file_;
+};
+
+class ARROW_PYTHON_EXPORT PyOutputStream : public io::OutputStream {
+ public:
+ explicit PyOutputStream(PyObject* file);
+ ~PyOutputStream() override;
+
+ Status Close() override;
+ Status Abort() override;
+ bool closed() const override;
+ Result<int64_t> Tell() const override;
+ Status Write(const void* data, int64_t nbytes) override;
+ Status Write(const std::shared_ptr<Buffer>& buffer) override;
+
+ private:
+ std::unique_ptr<PythonFile> file_;
+ int64_t position_;
+};
+
+// TODO(wesm): seekable output files
+
+// A Buffer subclass that keeps a PyObject reference throughout its
+// lifetime, such that the Python object is kept alive as long as the
+// C++ buffer is still needed.
+// Keeping the reference in a Python wrapper would be incorrect as
+// the Python wrapper can get destroyed even though the wrapped C++
+// buffer is still alive (ARROW-2270).
+class ARROW_PYTHON_EXPORT PyForeignBuffer : public Buffer {
+ public:
+ static Status Make(const uint8_t* data, int64_t size, PyObject* base,
+ std::shared_ptr<Buffer>* out);
+
+ private:
+ PyForeignBuffer(const uint8_t* data, int64_t size, PyObject* base)
+ : Buffer(data, size) {
+ Py_INCREF(base);
+ base_.reset(base);
+ }
+
+ OwnedRefNoGIL base_;
+};
+
+// All this rigamarole because Cython is really poor with std::function<>
+
+using TransformCallback = std::function<void(
+ PyObject*, const std::shared_ptr<Buffer>& src, std::shared_ptr<Buffer>* out)>;
+
+struct TransformInputStreamVTable {
+ TransformCallback transform;
+};
+
+ARROW_PYTHON_EXPORT
+std::shared_ptr<::arrow::io::InputStream> MakeTransformInputStream(
+ std::shared_ptr<::arrow::io::InputStream> wrapped, TransformInputStreamVTable vtable,
+ PyObject* arg);
+
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/ipc.cc b/contrib/libs/apache/arrow/cpp/src/arrow/python/ipc.cc
new file mode 100644
index 0000000000..2e6c9d9127
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/ipc.cc
@@ -0,0 +1,67 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "arrow/python/ipc.h"
+
+#include <memory>
+
+#include "arrow/python/pyarrow.h"
+
+namespace arrow {
+namespace py {
+
+PyRecordBatchReader::PyRecordBatchReader() {}
+
+Status PyRecordBatchReader::Init(std::shared_ptr<Schema> schema, PyObject* iterable) {
+ schema_ = std::move(schema);
+
+ iterator_.reset(PyObject_GetIter(iterable));
+ return CheckPyError();
+}
+
+std::shared_ptr<Schema> PyRecordBatchReader::schema() const { return schema_; }
+
+Status PyRecordBatchReader::ReadNext(std::shared_ptr<RecordBatch>* batch) {
+ PyAcquireGIL lock;
+
+ if (!iterator_) {
+ // End of stream
+ batch->reset();
+ return Status::OK();
+ }
+
+ OwnedRef py_batch(PyIter_Next(iterator_.obj()));
+ if (!py_batch) {
+ RETURN_IF_PYERROR();
+ // End of stream
+ batch->reset();
+ iterator_.reset();
+ return Status::OK();
+ }
+
+ return unwrap_batch(py_batch.obj()).Value(batch);
+}
+
+Result<std::shared_ptr<RecordBatchReader>> PyRecordBatchReader::Make(
+ std::shared_ptr<Schema> schema, PyObject* iterable) {
+ auto reader = std::shared_ptr<PyRecordBatchReader>(new PyRecordBatchReader());
+ RETURN_NOT_OK(reader->Init(std::move(schema), iterable));
+ return reader;
+}
+
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/ipc.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/ipc.h
new file mode 100644
index 0000000000..92232ed830
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/ipc.h
@@ -0,0 +1,52 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include <memory>
+
+#include "arrow/python/common.h"
+#include "arrow/python/visibility.h"
+#include "arrow/record_batch.h"
+#include "arrow/result.h"
+#include "arrow/util/macros.h"
+
+namespace arrow {
+namespace py {
+
+class ARROW_PYTHON_EXPORT PyRecordBatchReader : public RecordBatchReader {
+ public:
+ std::shared_ptr<Schema> schema() const override;
+
+ Status ReadNext(std::shared_ptr<RecordBatch>* batch) override;
+
+ // For use from Cython
+ // Assumes that `iterable` is borrowed
+ static Result<std::shared_ptr<RecordBatchReader>> Make(std::shared_ptr<Schema>,
+ PyObject* iterable);
+
+ protected:
+ PyRecordBatchReader();
+
+ Status Init(std::shared_ptr<Schema>, PyObject* iterable);
+
+ std::shared_ptr<Schema> schema_;
+ OwnedRefNoGIL iterator_;
+};
+
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/iterators.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/iterators.h
new file mode 100644
index 0000000000..58213ee2db
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/iterators.h
@@ -0,0 +1,155 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include <utility>
+
+#include "arrow/python/common.h"
+#include "arrow/python/numpy_internal.h"
+
+namespace arrow {
+namespace py {
+namespace internal {
+
+// Visit the Python sequence, calling the given callable on each element. If
+// the callable returns a non-OK status, iteration stops and the status is
+// returned.
+//
+// The call signature for Visitor must be
+//
+// Visit(PyObject* obj, int64_t index, bool* keep_going)
+//
+// If keep_going is set to false, the iteration terminates
+template <class VisitorFunc>
+inline Status VisitSequenceGeneric(PyObject* obj, int64_t offset, VisitorFunc&& func) {
+ // VisitorFunc may set to false to terminate iteration
+ bool keep_going = true;
+
+ if (PyArray_Check(obj)) {
+ PyArrayObject* arr_obj = reinterpret_cast<PyArrayObject*>(obj);
+ if (PyArray_NDIM(arr_obj) != 1) {
+ return Status::Invalid("Only 1D arrays accepted");
+ }
+
+ if (PyArray_DESCR(arr_obj)->type_num == NPY_OBJECT) {
+ // It's an array object, we can fetch object pointers directly
+ const Ndarray1DIndexer<PyObject*> objects(arr_obj);
+ for (int64_t i = offset; keep_going && i < objects.size(); ++i) {
+ RETURN_NOT_OK(func(objects[i], i, &keep_going));
+ }
+ return Status::OK();
+ }
+ // It's a non-object array, fall back on regular sequence access.
+ // (note PyArray_GETITEM() is slightly different: it returns standard
+ // Python types, not Numpy scalar types)
+ // This code path is inefficient: callers should implement dedicated
+ // logic for non-object arrays.
+ }
+ if (PySequence_Check(obj)) {
+ if (PyList_Check(obj) || PyTuple_Check(obj)) {
+ // Use fast item access
+ const Py_ssize_t size = PySequence_Fast_GET_SIZE(obj);
+ for (Py_ssize_t i = offset; keep_going && i < size; ++i) {
+ PyObject* value = PySequence_Fast_GET_ITEM(obj, i);
+ RETURN_NOT_OK(func(value, static_cast<int64_t>(i), &keep_going));
+ }
+ } else {
+ // Regular sequence: avoid making a potentially large copy
+ const Py_ssize_t size = PySequence_Size(obj);
+ RETURN_IF_PYERROR();
+ for (Py_ssize_t i = offset; keep_going && i < size; ++i) {
+ OwnedRef value_ref(PySequence_ITEM(obj, i));
+ RETURN_IF_PYERROR();
+ RETURN_NOT_OK(func(value_ref.obj(), static_cast<int64_t>(i), &keep_going));
+ }
+ }
+ } else {
+ return Status::TypeError("Object is not a sequence");
+ }
+ return Status::OK();
+}
+
+// Visit sequence with no null mask
+template <class VisitorFunc>
+inline Status VisitSequence(PyObject* obj, int64_t offset, VisitorFunc&& func) {
+ return VisitSequenceGeneric(
+ obj, offset, [&func](PyObject* value, int64_t i /* unused */, bool* keep_going) {
+ return func(value, keep_going);
+ });
+}
+
+/// Visit sequence with null mask
+template <class VisitorFunc>
+inline Status VisitSequenceMasked(PyObject* obj, PyObject* mo, int64_t offset,
+ VisitorFunc&& func) {
+ if (mo == nullptr || !PyArray_Check(mo)) {
+ return Status::Invalid("Null mask must be NumPy array");
+ }
+
+ PyArrayObject* mask = reinterpret_cast<PyArrayObject*>(mo);
+ if (PyArray_NDIM(mask) != 1) {
+ return Status::Invalid("Mask must be 1D array");
+ }
+
+ const Py_ssize_t obj_size = PySequence_Size(obj);
+ if (PyArray_SIZE(mask) != static_cast<int64_t>(obj_size)) {
+ return Status::Invalid("Mask was a different length from sequence being converted");
+ }
+
+ const int dtype = fix_numpy_type_num(PyArray_DESCR(mask)->type_num);
+ if (dtype == NPY_BOOL) {
+ Ndarray1DIndexer<uint8_t> mask_values(mask);
+
+ return VisitSequenceGeneric(
+ obj, offset, [&func, &mask_values](PyObject* value, int64_t i, bool* keep_going) {
+ return func(value, mask_values[i], keep_going);
+ });
+ } else {
+ return Status::Invalid("Mask must be boolean dtype");
+ }
+}
+
+// Like IterateSequence, but accepts any generic iterable (including
+// non-restartable iterators, e.g. generators).
+//
+// The call signature for VisitorFunc must be Visit(PyObject*, bool*
+// keep_going). If keep_going is set to false, the iteration terminates
+template <class VisitorFunc>
+inline Status VisitIterable(PyObject* obj, VisitorFunc&& func) {
+ if (PySequence_Check(obj)) {
+ // Numpy arrays fall here as well
+ return VisitSequence(obj, /*offset=*/0, std::forward<VisitorFunc>(func));
+ }
+ // Fall back on the iterator protocol
+ OwnedRef iter_ref(PyObject_GetIter(obj));
+ PyObject* iter = iter_ref.obj();
+ RETURN_IF_PYERROR();
+ PyObject* value;
+
+ bool keep_going = true;
+ while (keep_going && (value = PyIter_Next(iter))) {
+ OwnedRef value_ref(value);
+ RETURN_NOT_OK(func(value_ref.obj(), &keep_going));
+ }
+ RETURN_IF_PYERROR(); // __next__() might have raised
+ return Status::OK();
+}
+
+} // namespace internal
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/numpy_convert.cc b/contrib/libs/apache/arrow/cpp/src/arrow/python/numpy_convert.cc
new file mode 100644
index 0000000000..bf4afb2a0a
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/numpy_convert.cc
@@ -0,0 +1,559 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "arrow/python/numpy_interop.h"
+
+#include "arrow/python/numpy_convert.h"
+
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "arrow/buffer.h"
+#include "arrow/sparse_tensor.h"
+#include "arrow/tensor.h"
+#include "arrow/type.h"
+#include "arrow/util/logging.h"
+
+#include "arrow/python/common.h"
+#include "arrow/python/pyarrow.h"
+#include "arrow/python/type_traits.h"
+
+namespace arrow {
+namespace py {
+
+NumPyBuffer::NumPyBuffer(PyObject* ao) : Buffer(nullptr, 0) {
+ PyAcquireGIL lock;
+ arr_ = ao;
+ Py_INCREF(ao);
+
+ if (PyArray_Check(ao)) {
+ PyArrayObject* ndarray = reinterpret_cast<PyArrayObject*>(ao);
+ auto ptr = reinterpret_cast<uint8_t*>(PyArray_DATA(ndarray));
+ data_ = const_cast<const uint8_t*>(ptr);
+ size_ = PyArray_SIZE(ndarray) * PyArray_DESCR(ndarray)->elsize;
+ capacity_ = size_;
+ is_mutable_ = !!(PyArray_FLAGS(ndarray) & NPY_ARRAY_WRITEABLE);
+ }
+}
+
+NumPyBuffer::~NumPyBuffer() {
+ PyAcquireGIL lock;
+ Py_XDECREF(arr_);
+}
+
+#define TO_ARROW_TYPE_CASE(NPY_NAME, FACTORY) \
+ case NPY_##NPY_NAME: \
+ *out = FACTORY(); \
+ break;
+
+namespace {
+
+Status GetTensorType(PyObject* dtype, std::shared_ptr<DataType>* out) {
+ if (!PyObject_TypeCheck(dtype, &PyArrayDescr_Type)) {
+ return Status::TypeError("Did not pass numpy.dtype object");
+ }
+ PyArray_Descr* descr = reinterpret_cast<PyArray_Descr*>(dtype);
+ int type_num = fix_numpy_type_num(descr->type_num);
+
+ switch (type_num) {
+ TO_ARROW_TYPE_CASE(BOOL, uint8);
+ TO_ARROW_TYPE_CASE(INT8, int8);
+ TO_ARROW_TYPE_CASE(INT16, int16);
+ TO_ARROW_TYPE_CASE(INT32, int32);
+ TO_ARROW_TYPE_CASE(INT64, int64);
+ TO_ARROW_TYPE_CASE(UINT8, uint8);
+ TO_ARROW_TYPE_CASE(UINT16, uint16);
+ TO_ARROW_TYPE_CASE(UINT32, uint32);
+ TO_ARROW_TYPE_CASE(UINT64, uint64);
+ TO_ARROW_TYPE_CASE(FLOAT16, float16);
+ TO_ARROW_TYPE_CASE(FLOAT32, float32);
+ TO_ARROW_TYPE_CASE(FLOAT64, float64);
+ default: {
+ return Status::NotImplemented("Unsupported numpy type ", descr->type_num);
+ }
+ }
+ return Status::OK();
+}
+
+Status GetNumPyType(const DataType& type, int* type_num) {
+#define NUMPY_TYPE_CASE(ARROW_NAME, NPY_NAME) \
+ case Type::ARROW_NAME: \
+ *type_num = NPY_##NPY_NAME; \
+ break;
+
+ switch (type.id()) {
+ NUMPY_TYPE_CASE(UINT8, UINT8);
+ NUMPY_TYPE_CASE(INT8, INT8);
+ NUMPY_TYPE_CASE(UINT16, UINT16);
+ NUMPY_TYPE_CASE(INT16, INT16);
+ NUMPY_TYPE_CASE(UINT32, UINT32);
+ NUMPY_TYPE_CASE(INT32, INT32);
+ NUMPY_TYPE_CASE(UINT64, UINT64);
+ NUMPY_TYPE_CASE(INT64, INT64);
+ NUMPY_TYPE_CASE(HALF_FLOAT, FLOAT16);
+ NUMPY_TYPE_CASE(FLOAT, FLOAT32);
+ NUMPY_TYPE_CASE(DOUBLE, FLOAT64);
+ default: {
+ return Status::NotImplemented("Unsupported tensor type: ", type.ToString());
+ }
+ }
+#undef NUMPY_TYPE_CASE
+
+ return Status::OK();
+}
+
+} // namespace
+
+Status NumPyDtypeToArrow(PyObject* dtype, std::shared_ptr<DataType>* out) {
+ if (!PyObject_TypeCheck(dtype, &PyArrayDescr_Type)) {
+ return Status::TypeError("Did not pass numpy.dtype object");
+ }
+ PyArray_Descr* descr = reinterpret_cast<PyArray_Descr*>(dtype);
+ return NumPyDtypeToArrow(descr, out);
+}
+
+Status NumPyDtypeToArrow(PyArray_Descr* descr, std::shared_ptr<DataType>* out) {
+ int type_num = fix_numpy_type_num(descr->type_num);
+
+ switch (type_num) {
+ TO_ARROW_TYPE_CASE(BOOL, boolean);
+ TO_ARROW_TYPE_CASE(INT8, int8);
+ TO_ARROW_TYPE_CASE(INT16, int16);
+ TO_ARROW_TYPE_CASE(INT32, int32);
+ TO_ARROW_TYPE_CASE(INT64, int64);
+ TO_ARROW_TYPE_CASE(UINT8, uint8);
+ TO_ARROW_TYPE_CASE(UINT16, uint16);
+ TO_ARROW_TYPE_CASE(UINT32, uint32);
+ TO_ARROW_TYPE_CASE(UINT64, uint64);
+ TO_ARROW_TYPE_CASE(FLOAT16, float16);
+ TO_ARROW_TYPE_CASE(FLOAT32, float32);
+ TO_ARROW_TYPE_CASE(FLOAT64, float64);
+ TO_ARROW_TYPE_CASE(STRING, binary);
+ TO_ARROW_TYPE_CASE(UNICODE, utf8);
+ case NPY_DATETIME: {
+ auto date_dtype =
+ reinterpret_cast<PyArray_DatetimeDTypeMetaData*>(descr->c_metadata);
+ switch (date_dtype->meta.base) {
+ case NPY_FR_s:
+ *out = timestamp(TimeUnit::SECOND);
+ break;
+ case NPY_FR_ms:
+ *out = timestamp(TimeUnit::MILLI);
+ break;
+ case NPY_FR_us:
+ *out = timestamp(TimeUnit::MICRO);
+ break;
+ case NPY_FR_ns:
+ *out = timestamp(TimeUnit::NANO);
+ break;
+ case NPY_FR_D:
+ *out = date32();
+ break;
+ case NPY_FR_GENERIC:
+ return Status::NotImplemented("Unbound or generic datetime64 time unit");
+ default:
+ return Status::NotImplemented("Unsupported datetime64 time unit");
+ }
+ } break;
+ case NPY_TIMEDELTA: {
+ auto timedelta_dtype =
+ reinterpret_cast<PyArray_DatetimeDTypeMetaData*>(descr->c_metadata);
+ switch (timedelta_dtype->meta.base) {
+ case NPY_FR_s:
+ *out = duration(TimeUnit::SECOND);
+ break;
+ case NPY_FR_ms:
+ *out = duration(TimeUnit::MILLI);
+ break;
+ case NPY_FR_us:
+ *out = duration(TimeUnit::MICRO);
+ break;
+ case NPY_FR_ns:
+ *out = duration(TimeUnit::NANO);
+ break;
+ case NPY_FR_GENERIC:
+ return Status::NotImplemented("Unbound or generic timedelta64 time unit");
+ default:
+ return Status::NotImplemented("Unsupported timedelta64 time unit");
+ }
+ } break;
+ default: {
+ return Status::NotImplemented("Unsupported numpy type ", descr->type_num);
+ }
+ }
+
+ return Status::OK();
+}
+
+#undef TO_ARROW_TYPE_CASE
+
+Status NdarrayToTensor(MemoryPool* pool, PyObject* ao,
+ const std::vector<std::string>& dim_names,
+ std::shared_ptr<Tensor>* out) {
+ if (!PyArray_Check(ao)) {
+ return Status::TypeError("Did not pass ndarray object");
+ }
+
+ PyArrayObject* ndarray = reinterpret_cast<PyArrayObject*>(ao);
+
+ // TODO(wesm): What do we want to do with non-contiguous memory and negative strides?
+
+ int ndim = PyArray_NDIM(ndarray);
+
+ std::shared_ptr<Buffer> data = std::make_shared<NumPyBuffer>(ao);
+ std::vector<int64_t> shape(ndim);
+ std::vector<int64_t> strides(ndim);
+
+ npy_intp* array_strides = PyArray_STRIDES(ndarray);
+ npy_intp* array_shape = PyArray_SHAPE(ndarray);
+ for (int i = 0; i < ndim; ++i) {
+ if (array_strides[i] < 0) {
+ return Status::Invalid("Negative ndarray strides not supported");
+ }
+ shape[i] = array_shape[i];
+ strides[i] = array_strides[i];
+ }
+
+ std::shared_ptr<DataType> type;
+ RETURN_NOT_OK(
+ GetTensorType(reinterpret_cast<PyObject*>(PyArray_DESCR(ndarray)), &type));
+ *out = std::make_shared<Tensor>(type, data, shape, strides, dim_names);
+ return Status::OK();
+}
+
+Status TensorToNdarray(const std::shared_ptr<Tensor>& tensor, PyObject* base,
+ PyObject** out) {
+ int type_num = 0;
+ RETURN_NOT_OK(GetNumPyType(*tensor->type(), &type_num));
+ PyArray_Descr* dtype = PyArray_DescrNewFromType(type_num);
+ RETURN_IF_PYERROR();
+
+ const int ndim = tensor->ndim();
+ std::vector<npy_intp> npy_shape(ndim);
+ std::vector<npy_intp> npy_strides(ndim);
+
+ for (int i = 0; i < ndim; ++i) {
+ npy_shape[i] = tensor->shape()[i];
+ npy_strides[i] = tensor->strides()[i];
+ }
+
+ const void* immutable_data = nullptr;
+ if (tensor->data()) {
+ immutable_data = tensor->data()->data();
+ }
+
+ // Remove const =(
+ void* mutable_data = const_cast<void*>(immutable_data);
+
+ int array_flags = 0;
+ if (tensor->is_row_major()) {
+ array_flags |= NPY_ARRAY_C_CONTIGUOUS;
+ }
+ if (tensor->is_column_major()) {
+ array_flags |= NPY_ARRAY_F_CONTIGUOUS;
+ }
+ if (tensor->is_mutable()) {
+ array_flags |= NPY_ARRAY_WRITEABLE;
+ }
+
+ PyObject* result =
+ PyArray_NewFromDescr(&PyArray_Type, dtype, ndim, npy_shape.data(),
+ npy_strides.data(), mutable_data, array_flags, nullptr);
+ RETURN_IF_PYERROR();
+
+ if (base == Py_None || base == nullptr) {
+ base = py::wrap_tensor(tensor);
+ } else {
+ Py_XINCREF(base);
+ }
+ PyArray_SetBaseObject(reinterpret_cast<PyArrayObject*>(result), base);
+ *out = result;
+ return Status::OK();
+}
+
+// Wrap the dense data of a sparse tensor in a ndarray
+static Status SparseTensorDataToNdarray(const SparseTensor& sparse_tensor,
+ std::vector<npy_intp> data_shape, PyObject* base,
+ PyObject** out_data) {
+ int type_num_data = 0;
+ RETURN_NOT_OK(GetNumPyType(*sparse_tensor.type(), &type_num_data));
+ PyArray_Descr* dtype_data = PyArray_DescrNewFromType(type_num_data);
+ RETURN_IF_PYERROR();
+
+ const void* immutable_data = sparse_tensor.data()->data();
+ // Remove const =(
+ void* mutable_data = const_cast<void*>(immutable_data);
+ int array_flags = NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS;
+ if (sparse_tensor.is_mutable()) {
+ array_flags |= NPY_ARRAY_WRITEABLE;
+ }
+
+ *out_data = PyArray_NewFromDescr(&PyArray_Type, dtype_data,
+ static_cast<int>(data_shape.size()), data_shape.data(),
+ nullptr, mutable_data, array_flags, nullptr);
+ RETURN_IF_PYERROR();
+ Py_XINCREF(base);
+ PyArray_SetBaseObject(reinterpret_cast<PyArrayObject*>(*out_data), base);
+ return Status::OK();
+}
+
+Status SparseCOOTensorToNdarray(const std::shared_ptr<SparseCOOTensor>& sparse_tensor,
+ PyObject* base, PyObject** out_data,
+ PyObject** out_coords) {
+ const auto& sparse_index = arrow::internal::checked_cast<const SparseCOOIndex&>(
+ *sparse_tensor->sparse_index());
+
+ // Wrap tensor data
+ OwnedRef result_data;
+ RETURN_NOT_OK(SparseTensorDataToNdarray(
+ *sparse_tensor, {sparse_tensor->non_zero_length(), 1}, base, result_data.ref()));
+
+ // Wrap indices
+ PyObject* result_coords;
+ RETURN_NOT_OK(TensorToNdarray(sparse_index.indices(), base, &result_coords));
+
+ *out_data = result_data.detach();
+ *out_coords = result_coords;
+ return Status::OK();
+}
+
+Status SparseCSXMatrixToNdarray(const std::shared_ptr<SparseTensor>& sparse_tensor,
+ PyObject* base, PyObject** out_data,
+ PyObject** out_indptr, PyObject** out_indices) {
+ // Wrap indices
+ OwnedRef result_indptr;
+ OwnedRef result_indices;
+
+ switch (sparse_tensor->format_id()) {
+ case SparseTensorFormat::CSR: {
+ const auto& sparse_index = arrow::internal::checked_cast<const SparseCSRIndex&>(
+ *sparse_tensor->sparse_index());
+ RETURN_NOT_OK(TensorToNdarray(sparse_index.indptr(), base, result_indptr.ref()));
+ RETURN_NOT_OK(TensorToNdarray(sparse_index.indices(), base, result_indices.ref()));
+ break;
+ }
+ case SparseTensorFormat::CSC: {
+ const auto& sparse_index = arrow::internal::checked_cast<const SparseCSCIndex&>(
+ *sparse_tensor->sparse_index());
+ RETURN_NOT_OK(TensorToNdarray(sparse_index.indptr(), base, result_indptr.ref()));
+ RETURN_NOT_OK(TensorToNdarray(sparse_index.indices(), base, result_indices.ref()));
+ break;
+ }
+ default:
+ return Status::NotImplemented("Invalid SparseTensor type.");
+ }
+
+ // Wrap tensor data
+ OwnedRef result_data;
+ RETURN_NOT_OK(SparseTensorDataToNdarray(
+ *sparse_tensor, {sparse_tensor->non_zero_length(), 1}, base, result_data.ref()));
+
+ *out_data = result_data.detach();
+ *out_indptr = result_indptr.detach();
+ *out_indices = result_indices.detach();
+ return Status::OK();
+}
+
+Status SparseCSRMatrixToNdarray(const std::shared_ptr<SparseCSRMatrix>& sparse_tensor,
+ PyObject* base, PyObject** out_data,
+ PyObject** out_indptr, PyObject** out_indices) {
+ return SparseCSXMatrixToNdarray(sparse_tensor, base, out_data, out_indptr, out_indices);
+}
+
+Status SparseCSCMatrixToNdarray(const std::shared_ptr<SparseCSCMatrix>& sparse_tensor,
+ PyObject* base, PyObject** out_data,
+ PyObject** out_indptr, PyObject** out_indices) {
+ return SparseCSXMatrixToNdarray(sparse_tensor, base, out_data, out_indptr, out_indices);
+}
+
+Status SparseCSFTensorToNdarray(const std::shared_ptr<SparseCSFTensor>& sparse_tensor,
+ PyObject* base, PyObject** out_data,
+ PyObject** out_indptr, PyObject** out_indices) {
+ const auto& sparse_index = arrow::internal::checked_cast<const SparseCSFIndex&>(
+ *sparse_tensor->sparse_index());
+
+ // Wrap tensor data
+ OwnedRef result_data;
+ RETURN_NOT_OK(SparseTensorDataToNdarray(
+ *sparse_tensor, {sparse_tensor->non_zero_length(), 1}, base, result_data.ref()));
+
+ // Wrap indices
+ int ndim = static_cast<int>(sparse_index.indices().size());
+ OwnedRef indptr(PyList_New(ndim - 1));
+ OwnedRef indices(PyList_New(ndim));
+ RETURN_IF_PYERROR();
+
+ for (int i = 0; i < ndim - 1; ++i) {
+ PyObject* item;
+ RETURN_NOT_OK(TensorToNdarray(sparse_index.indptr()[i], base, &item));
+ if (PyList_SetItem(indptr.obj(), i, item) < 0) {
+ Py_XDECREF(item);
+ RETURN_IF_PYERROR();
+ }
+ }
+ for (int i = 0; i < ndim; ++i) {
+ PyObject* item;
+ RETURN_NOT_OK(TensorToNdarray(sparse_index.indices()[i], base, &item));
+ if (PyList_SetItem(indices.obj(), i, item) < 0) {
+ Py_XDECREF(item);
+ RETURN_IF_PYERROR();
+ }
+ }
+
+ *out_indptr = indptr.detach();
+ *out_indices = indices.detach();
+ *out_data = result_data.detach();
+ return Status::OK();
+}
+
+Status NdarraysToSparseCOOTensor(MemoryPool* pool, PyObject* data_ao, PyObject* coords_ao,
+ const std::vector<int64_t>& shape,
+ const std::vector<std::string>& dim_names,
+ std::shared_ptr<SparseCOOTensor>* out) {
+ if (!PyArray_Check(data_ao) || !PyArray_Check(coords_ao)) {
+ return Status::TypeError("Did not pass ndarray object");
+ }
+
+ PyArrayObject* ndarray_data = reinterpret_cast<PyArrayObject*>(data_ao);
+ std::shared_ptr<Buffer> data = std::make_shared<NumPyBuffer>(data_ao);
+ std::shared_ptr<DataType> type_data;
+ RETURN_NOT_OK(GetTensorType(reinterpret_cast<PyObject*>(PyArray_DESCR(ndarray_data)),
+ &type_data));
+
+ std::shared_ptr<Tensor> coords;
+ RETURN_NOT_OK(NdarrayToTensor(pool, coords_ao, {}, &coords));
+ ARROW_CHECK_EQ(coords->type_id(), Type::INT64); // Should be ensured by caller
+
+ ARROW_ASSIGN_OR_RAISE(std::shared_ptr<SparseCOOIndex> sparse_index,
+ SparseCOOIndex::Make(coords));
+ *out = std::make_shared<SparseTensorImpl<SparseCOOIndex>>(sparse_index, type_data, data,
+ shape, dim_names);
+ return Status::OK();
+}
+
+template <class IndexType>
+Status NdarraysToSparseCSXMatrix(MemoryPool* pool, PyObject* data_ao, PyObject* indptr_ao,
+ PyObject* indices_ao, const std::vector<int64_t>& shape,
+ const std::vector<std::string>& dim_names,
+ std::shared_ptr<SparseTensorImpl<IndexType>>* out) {
+ if (!PyArray_Check(data_ao) || !PyArray_Check(indptr_ao) ||
+ !PyArray_Check(indices_ao)) {
+ return Status::TypeError("Did not pass ndarray object");
+ }
+
+ PyArrayObject* ndarray_data = reinterpret_cast<PyArrayObject*>(data_ao);
+ std::shared_ptr<Buffer> data = std::make_shared<NumPyBuffer>(data_ao);
+ std::shared_ptr<DataType> type_data;
+ RETURN_NOT_OK(GetTensorType(reinterpret_cast<PyObject*>(PyArray_DESCR(ndarray_data)),
+ &type_data));
+
+ std::shared_ptr<Tensor> indptr, indices;
+ RETURN_NOT_OK(NdarrayToTensor(pool, indptr_ao, {}, &indptr));
+ RETURN_NOT_OK(NdarrayToTensor(pool, indices_ao, {}, &indices));
+ ARROW_CHECK_EQ(indptr->type_id(), Type::INT64); // Should be ensured by caller
+ ARROW_CHECK_EQ(indices->type_id(), Type::INT64); // Should be ensured by caller
+
+ auto sparse_index = std::make_shared<IndexType>(
+ std::static_pointer_cast<NumericTensor<Int64Type>>(indptr),
+ std::static_pointer_cast<NumericTensor<Int64Type>>(indices));
+ *out = std::make_shared<SparseTensorImpl<IndexType>>(sparse_index, type_data, data,
+ shape, dim_names);
+ return Status::OK();
+}
+
+Status NdarraysToSparseCSFTensor(MemoryPool* pool, PyObject* data_ao, PyObject* indptr_ao,
+ PyObject* indices_ao, const std::vector<int64_t>& shape,
+ const std::vector<int64_t>& axis_order,
+ const std::vector<std::string>& dim_names,
+ std::shared_ptr<SparseCSFTensor>* out) {
+ if (!PyArray_Check(data_ao)) {
+ return Status::TypeError("Did not pass ndarray object for data");
+ }
+ const int ndim = static_cast<const int>(shape.size());
+ PyArrayObject* ndarray_data = reinterpret_cast<PyArrayObject*>(data_ao);
+ std::shared_ptr<Buffer> data = std::make_shared<NumPyBuffer>(data_ao);
+ std::shared_ptr<DataType> type_data;
+ RETURN_NOT_OK(GetTensorType(reinterpret_cast<PyObject*>(PyArray_DESCR(ndarray_data)),
+ &type_data));
+
+ std::vector<std::shared_ptr<Tensor>> indptr(ndim - 1);
+ std::vector<std::shared_ptr<Tensor>> indices(ndim);
+
+ for (int i = 0; i < ndim - 1; ++i) {
+ PyObject* item = PySequence_Fast_GET_ITEM(indptr_ao, i);
+ if (!PyArray_Check(item)) {
+ return Status::TypeError("Did not pass ndarray object for indptr");
+ }
+ RETURN_NOT_OK(NdarrayToTensor(pool, item, {}, &indptr[i]));
+ ARROW_CHECK_EQ(indptr[i]->type_id(), Type::INT64); // Should be ensured by caller
+ }
+
+ for (int i = 0; i < ndim; ++i) {
+ PyObject* item = PySequence_Fast_GET_ITEM(indices_ao, i);
+ if (!PyArray_Check(item)) {
+ return Status::TypeError("Did not pass ndarray object for indices");
+ }
+ RETURN_NOT_OK(NdarrayToTensor(pool, item, {}, &indices[i]));
+ ARROW_CHECK_EQ(indices[i]->type_id(), Type::INT64); // Should be ensured by caller
+ }
+
+ auto sparse_index = std::make_shared<SparseCSFIndex>(indptr, indices, axis_order);
+ *out = std::make_shared<SparseTensorImpl<SparseCSFIndex>>(sparse_index, type_data, data,
+ shape, dim_names);
+ return Status::OK();
+}
+
+Status NdarraysToSparseCSRMatrix(MemoryPool* pool, PyObject* data_ao, PyObject* indptr_ao,
+ PyObject* indices_ao, const std::vector<int64_t>& shape,
+ const std::vector<std::string>& dim_names,
+ std::shared_ptr<SparseCSRMatrix>* out) {
+ return NdarraysToSparseCSXMatrix<SparseCSRIndex>(pool, data_ao, indptr_ao, indices_ao,
+ shape, dim_names, out);
+}
+
+Status NdarraysToSparseCSCMatrix(MemoryPool* pool, PyObject* data_ao, PyObject* indptr_ao,
+ PyObject* indices_ao, const std::vector<int64_t>& shape,
+ const std::vector<std::string>& dim_names,
+ std::shared_ptr<SparseCSCMatrix>* out) {
+ return NdarraysToSparseCSXMatrix<SparseCSCIndex>(pool, data_ao, indptr_ao, indices_ao,
+ shape, dim_names, out);
+}
+
+Status TensorToSparseCOOTensor(const std::shared_ptr<Tensor>& tensor,
+ std::shared_ptr<SparseCOOTensor>* out) {
+ return SparseCOOTensor::Make(*tensor).Value(out);
+}
+
+Status TensorToSparseCSRMatrix(const std::shared_ptr<Tensor>& tensor,
+ std::shared_ptr<SparseCSRMatrix>* out) {
+ return SparseCSRMatrix::Make(*tensor).Value(out);
+}
+
+Status TensorToSparseCSCMatrix(const std::shared_ptr<Tensor>& tensor,
+ std::shared_ptr<SparseCSCMatrix>* out) {
+ return SparseCSCMatrix::Make(*tensor).Value(out);
+}
+
+Status TensorToSparseCSFTensor(const std::shared_ptr<Tensor>& tensor,
+ std::shared_ptr<SparseCSFTensor>* out) {
+ return SparseCSFTensor::Make(*tensor).Value(out);
+}
+
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/numpy_convert.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/numpy_convert.h
new file mode 100644
index 0000000000..10451077a2
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/numpy_convert.h
@@ -0,0 +1,120 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Functions for converting between pandas's NumPy-based data representation
+// and Arrow data structures
+
+#pragma once
+
+#include "arrow/python/platform.h"
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "arrow/buffer.h"
+#include "arrow/python/visibility.h"
+#include "arrow/sparse_tensor.h"
+
+namespace arrow {
+
+class DataType;
+class MemoryPool;
+class Status;
+class Tensor;
+
+namespace py {
+
+class ARROW_PYTHON_EXPORT NumPyBuffer : public Buffer {
+ public:
+ explicit NumPyBuffer(PyObject* arr);
+ virtual ~NumPyBuffer();
+
+ private:
+ PyObject* arr_;
+};
+
+ARROW_PYTHON_EXPORT
+Status NumPyDtypeToArrow(PyObject* dtype, std::shared_ptr<DataType>* out);
+ARROW_PYTHON_EXPORT
+Status NumPyDtypeToArrow(PyArray_Descr* descr, std::shared_ptr<DataType>* out);
+
+ARROW_PYTHON_EXPORT Status NdarrayToTensor(MemoryPool* pool, PyObject* ao,
+ const std::vector<std::string>& dim_names,
+ std::shared_ptr<Tensor>* out);
+
+ARROW_PYTHON_EXPORT Status TensorToNdarray(const std::shared_ptr<Tensor>& tensor,
+ PyObject* base, PyObject** out);
+
+ARROW_PYTHON_EXPORT Status
+SparseCOOTensorToNdarray(const std::shared_ptr<SparseCOOTensor>& sparse_tensor,
+ PyObject* base, PyObject** out_data, PyObject** out_coords);
+
+Status SparseCSXMatrixToNdarray(const std::shared_ptr<SparseTensor>& sparse_tensor,
+ PyObject* base, PyObject** out_data,
+ PyObject** out_indptr, PyObject** out_indices);
+
+ARROW_PYTHON_EXPORT Status SparseCSRMatrixToNdarray(
+ const std::shared_ptr<SparseCSRMatrix>& sparse_tensor, PyObject* base,
+ PyObject** out_data, PyObject** out_indptr, PyObject** out_indices);
+
+ARROW_PYTHON_EXPORT Status SparseCSCMatrixToNdarray(
+ const std::shared_ptr<SparseCSCMatrix>& sparse_tensor, PyObject* base,
+ PyObject** out_data, PyObject** out_indptr, PyObject** out_indices);
+
+ARROW_PYTHON_EXPORT Status SparseCSFTensorToNdarray(
+ const std::shared_ptr<SparseCSFTensor>& sparse_tensor, PyObject* base,
+ PyObject** out_data, PyObject** out_indptr, PyObject** out_indices);
+
+ARROW_PYTHON_EXPORT Status NdarraysToSparseCOOTensor(
+ MemoryPool* pool, PyObject* data_ao, PyObject* coords_ao,
+ const std::vector<int64_t>& shape, const std::vector<std::string>& dim_names,
+ std::shared_ptr<SparseCOOTensor>* out);
+
+ARROW_PYTHON_EXPORT Status NdarraysToSparseCSRMatrix(
+ MemoryPool* pool, PyObject* data_ao, PyObject* indptr_ao, PyObject* indices_ao,
+ const std::vector<int64_t>& shape, const std::vector<std::string>& dim_names,
+ std::shared_ptr<SparseCSRMatrix>* out);
+
+ARROW_PYTHON_EXPORT Status NdarraysToSparseCSCMatrix(
+ MemoryPool* pool, PyObject* data_ao, PyObject* indptr_ao, PyObject* indices_ao,
+ const std::vector<int64_t>& shape, const std::vector<std::string>& dim_names,
+ std::shared_ptr<SparseCSCMatrix>* out);
+
+ARROW_PYTHON_EXPORT Status NdarraysToSparseCSFTensor(
+ MemoryPool* pool, PyObject* data_ao, PyObject* indptr_ao, PyObject* indices_ao,
+ const std::vector<int64_t>& shape, const std::vector<int64_t>& axis_order,
+ const std::vector<std::string>& dim_names, std::shared_ptr<SparseCSFTensor>* out);
+
+ARROW_PYTHON_EXPORT Status
+TensorToSparseCOOTensor(const std::shared_ptr<Tensor>& tensor,
+ std::shared_ptr<SparseCOOTensor>* csparse_tensor);
+
+ARROW_PYTHON_EXPORT Status
+TensorToSparseCSRMatrix(const std::shared_ptr<Tensor>& tensor,
+ std::shared_ptr<SparseCSRMatrix>* csparse_tensor);
+
+ARROW_PYTHON_EXPORT Status
+TensorToSparseCSCMatrix(const std::shared_ptr<Tensor>& tensor,
+ std::shared_ptr<SparseCSCMatrix>* csparse_tensor);
+
+ARROW_PYTHON_EXPORT Status
+TensorToSparseCSFTensor(const std::shared_ptr<Tensor>& tensor,
+ std::shared_ptr<SparseCSFTensor>* csparse_tensor);
+
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/numpy_internal.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/numpy_internal.h
new file mode 100644
index 0000000000..973f577cb1
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/numpy_internal.h
@@ -0,0 +1,182 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Internal utilities for dealing with NumPy
+
+#pragma once
+
+#include "arrow/python/numpy_interop.h"
+
+#include "arrow/status.h"
+
+#include "arrow/python/platform.h"
+
+#include <cstdint>
+#include <sstream>
+#include <string>
+
+namespace arrow {
+namespace py {
+
+/// Indexing convenience for interacting with strided 1-dim ndarray objects
+template <typename T>
+class Ndarray1DIndexer {
+ public:
+ typedef int64_t size_type;
+
+ Ndarray1DIndexer() : arr_(NULLPTR), data_(NULLPTR) {}
+
+ explicit Ndarray1DIndexer(PyArrayObject* arr) : Ndarray1DIndexer() {
+ arr_ = arr;
+ DCHECK_EQ(1, PyArray_NDIM(arr)) << "Only works with 1-dimensional arrays";
+ Py_INCREF(arr);
+ data_ = reinterpret_cast<uint8_t*>(PyArray_DATA(arr));
+ stride_ = PyArray_STRIDES(arr)[0];
+ }
+
+ ~Ndarray1DIndexer() { Py_XDECREF(arr_); }
+
+ int64_t size() const { return PyArray_SIZE(arr_); }
+
+ const T* data() const { return reinterpret_cast<const T*>(data_); }
+
+ bool is_strided() const { return stride_ != sizeof(T); }
+
+ T& operator[](size_type index) {
+ return *reinterpret_cast<T*>(data_ + index * stride_);
+ }
+ const T& operator[](size_type index) const {
+ return *reinterpret_cast<const T*>(data_ + index * stride_);
+ }
+
+ private:
+ PyArrayObject* arr_;
+ uint8_t* data_;
+ int64_t stride_;
+};
+
+// Handling of Numpy Types by their static numbers
+// (the NPY_TYPES enum and related defines)
+
+static inline std::string GetNumPyTypeName(int npy_type) {
+#define TYPE_CASE(TYPE, NAME) \
+ case NPY_##TYPE: \
+ return NAME;
+
+ switch (npy_type) {
+ TYPE_CASE(BOOL, "bool")
+ TYPE_CASE(INT8, "int8")
+ TYPE_CASE(INT16, "int16")
+ TYPE_CASE(INT32, "int32")
+ TYPE_CASE(INT64, "int64")
+#if !NPY_INT32_IS_INT
+ TYPE_CASE(INT, "intc")
+#endif
+#if !NPY_INT64_IS_LONG_LONG
+ TYPE_CASE(LONGLONG, "longlong")
+#endif
+ TYPE_CASE(UINT8, "uint8")
+ TYPE_CASE(UINT16, "uint16")
+ TYPE_CASE(UINT32, "uint32")
+ TYPE_CASE(UINT64, "uint64")
+#if !NPY_INT32_IS_INT
+ TYPE_CASE(UINT, "uintc")
+#endif
+#if !NPY_INT64_IS_LONG_LONG
+ TYPE_CASE(ULONGLONG, "ulonglong")
+#endif
+ TYPE_CASE(FLOAT16, "float16")
+ TYPE_CASE(FLOAT32, "float32")
+ TYPE_CASE(FLOAT64, "float64")
+ TYPE_CASE(DATETIME, "datetime64")
+ TYPE_CASE(TIMEDELTA, "timedelta64")
+ TYPE_CASE(OBJECT, "object")
+ TYPE_CASE(VOID, "void")
+ default:
+ break;
+ }
+
+#undef TYPE_CASE
+ std::stringstream ss;
+ ss << "unrecognized type (" << npy_type << ") in GetNumPyTypeName";
+ return ss.str();
+}
+
+#define TYPE_VISIT_INLINE(TYPE) \
+ case NPY_##TYPE: \
+ return visitor->template Visit<NPY_##TYPE>(arr);
+
+template <typename VISITOR>
+inline Status VisitNumpyArrayInline(PyArrayObject* arr, VISITOR* visitor) {
+ switch (PyArray_TYPE(arr)) {
+ TYPE_VISIT_INLINE(BOOL);
+ TYPE_VISIT_INLINE(INT8);
+ TYPE_VISIT_INLINE(UINT8);
+ TYPE_VISIT_INLINE(INT16);
+ TYPE_VISIT_INLINE(UINT16);
+ TYPE_VISIT_INLINE(INT32);
+ TYPE_VISIT_INLINE(UINT32);
+ TYPE_VISIT_INLINE(INT64);
+ TYPE_VISIT_INLINE(UINT64);
+#if !NPY_INT32_IS_INT
+ TYPE_VISIT_INLINE(INT);
+ TYPE_VISIT_INLINE(UINT);
+#endif
+#if !NPY_INT64_IS_LONG_LONG
+ TYPE_VISIT_INLINE(LONGLONG);
+ TYPE_VISIT_INLINE(ULONGLONG);
+#endif
+ TYPE_VISIT_INLINE(FLOAT16);
+ TYPE_VISIT_INLINE(FLOAT32);
+ TYPE_VISIT_INLINE(FLOAT64);
+ TYPE_VISIT_INLINE(DATETIME);
+ TYPE_VISIT_INLINE(TIMEDELTA);
+ TYPE_VISIT_INLINE(OBJECT);
+ }
+ return Status::NotImplemented("NumPy type not implemented: ",
+ GetNumPyTypeName(PyArray_TYPE(arr)));
+}
+
+#undef TYPE_VISIT_INLINE
+
+namespace internal {
+
+inline bool PyFloatScalar_Check(PyObject* obj) {
+ return PyFloat_Check(obj) || PyArray_IsScalar(obj, Floating);
+}
+
+inline bool PyIntScalar_Check(PyObject* obj) {
+ return PyLong_Check(obj) || PyArray_IsScalar(obj, Integer);
+}
+
+inline bool PyBoolScalar_Check(PyObject* obj) {
+ return PyBool_Check(obj) || PyArray_IsScalar(obj, Bool);
+}
+
+static inline PyArray_Descr* GetSafeNumPyDtype(int type) {
+ if (type == NPY_DATETIME) {
+ // It is not safe to mutate the result of DescrFromType
+ return PyArray_DescrNewFromType(type);
+ } else {
+ return PyArray_DescrFromType(type);
+ }
+}
+
+} // namespace internal
+
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/numpy_interop.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/numpy_interop.h
new file mode 100644
index 0000000000..ce7baed259
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/numpy_interop.h
@@ -0,0 +1,96 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include "arrow/python/platform.h" // IWYU pragma: export
+
+#include <numpy/numpyconfig.h> // IWYU pragma: export
+
+// Don't use the deprecated Numpy functions
+#ifdef NPY_1_7_API_VERSION
+#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+#else
+#define NPY_ARRAY_NOTSWAPPED NPY_NOTSWAPPED
+#define NPY_ARRAY_ALIGNED NPY_ALIGNED
+#define NPY_ARRAY_WRITEABLE NPY_WRITEABLE
+#define NPY_ARRAY_UPDATEIFCOPY NPY_UPDATEIFCOPY
+#endif
+
+// This is required to be able to access the NumPy C API properly in C++ files
+// other than init.cc.
+#define PY_ARRAY_UNIQUE_SYMBOL arrow_ARRAY_API
+#ifndef NUMPY_IMPORT_ARRAY
+#define NO_IMPORT_ARRAY
+#endif
+
+#include <numpy/arrayobject.h> // IWYU pragma: export
+#include <numpy/arrayscalars.h> // IWYU pragma: export
+#include <numpy/ufuncobject.h> // IWYU pragma: export
+
+// A bit subtle. Numpy has 5 canonical integer types:
+// (or, rather, type pairs: signed and unsigned)
+// NPY_BYTE, NPY_SHORT, NPY_INT, NPY_LONG, NPY_LONGLONG
+// It also has 4 fixed-width integer aliases.
+// When mapping Arrow integer types to these 4 fixed-width aliases,
+// we always miss one of the canonical types (even though it may
+// have the same width as one of the aliases).
+// Which one depends on the platform...
+// On a LP64 system, NPY_INT64 maps to NPY_LONG and
+// NPY_LONGLONG needs to be handled separately.
+// On a LLP64 system, NPY_INT32 maps to NPY_LONG and
+// NPY_INT needs to be handled separately.
+
+#if NPY_BITSOF_LONG == 32 && NPY_BITSOF_LONGLONG == 64
+#define NPY_INT64_IS_LONG_LONG 1
+#else
+#define NPY_INT64_IS_LONG_LONG 0
+#endif
+
+#if NPY_BITSOF_INT == 32 && NPY_BITSOF_LONG == 64
+#define NPY_INT32_IS_INT 1
+#else
+#define NPY_INT32_IS_INT 0
+#endif
+
+namespace arrow {
+namespace py {
+
+inline int import_numpy() {
+#ifdef NUMPY_IMPORT_ARRAY
+ import_array1(-1);
+ import_umath1(-1);
+#endif
+
+ return 0;
+}
+
+// See above about the missing Numpy integer type numbers
+inline int fix_numpy_type_num(int type_num) {
+#if !NPY_INT32_IS_INT && NPY_BITSOF_INT == 32
+ if (type_num == NPY_INT) return NPY_INT32;
+ if (type_num == NPY_UINT) return NPY_UINT32;
+#endif
+#if !NPY_INT64_IS_LONG_LONG && NPY_BITSOF_LONGLONG == 64
+ if (type_num == NPY_LONGLONG) return NPY_INT64;
+ if (type_num == NPY_ULONGLONG) return NPY_UINT64;
+#endif
+ return type_num;
+}
+
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/numpy_to_arrow.cc b/contrib/libs/apache/arrow/cpp/src/arrow/python/numpy_to_arrow.cc
new file mode 100644
index 0000000000..a382f76633
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/numpy_to_arrow.cc
@@ -0,0 +1,865 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Functions for pandas conversion via NumPy
+
+#include "arrow/python/numpy_to_arrow.h"
+#include "arrow/python/numpy_interop.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstdint>
+#include <cstring>
+#include <limits>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "arrow/array.h"
+#include "arrow/array/builder_binary.h"
+#include "arrow/status.h"
+#include "arrow/table.h"
+#include "arrow/type_fwd.h"
+#include "arrow/type_traits.h"
+#include "arrow/util/bit_util.h"
+#include "arrow/util/bitmap_generate.h"
+#include "arrow/util/bitmap_ops.h"
+#include "arrow/util/checked_cast.h"
+#include "arrow/util/logging.h"
+#include "arrow/util/macros.h"
+#include "arrow/util/string.h"
+#include "arrow/util/utf8.h"
+#include "arrow/visitor_inline.h"
+
+#include "arrow/compute/api_scalar.h"
+
+#include "arrow/python/common.h"
+#include "arrow/python/datetime.h"
+#include "arrow/python/helpers.h"
+#include "arrow/python/iterators.h"
+#include "arrow/python/numpy_convert.h"
+#include "arrow/python/numpy_internal.h"
+#include "arrow/python/python_to_arrow.h"
+#include "arrow/python/type_traits.h"
+
+namespace arrow {
+
+using internal::checked_cast;
+using internal::CopyBitmap;
+using internal::GenerateBitsUnrolled;
+
+namespace py {
+
+using internal::NumPyTypeSize;
+
+// ----------------------------------------------------------------------
+// Conversion utilities
+
+namespace {
+
+Status AllocateNullBitmap(MemoryPool* pool, int64_t length,
+ std::shared_ptr<ResizableBuffer>* out) {
+ int64_t null_bytes = BitUtil::BytesForBits(length);
+ ARROW_ASSIGN_OR_RAISE(auto null_bitmap, AllocateResizableBuffer(null_bytes, pool));
+
+ // Padding zeroed by AllocateResizableBuffer
+ memset(null_bitmap->mutable_data(), 0, static_cast<size_t>(null_bytes));
+ *out = std::move(null_bitmap);
+ return Status::OK();
+}
+
+// ----------------------------------------------------------------------
+// Conversion from NumPy-in-Pandas to Arrow null bitmap
+
+template <int TYPE>
+inline int64_t ValuesToBitmap(PyArrayObject* arr, uint8_t* bitmap) {
+ typedef internal::npy_traits<TYPE> traits;
+ typedef typename traits::value_type T;
+
+ int64_t null_count = 0;
+
+ Ndarray1DIndexer<T> values(arr);
+ for (int i = 0; i < values.size(); ++i) {
+ if (traits::isnull(values[i])) {
+ ++null_count;
+ } else {
+ BitUtil::SetBit(bitmap, i);
+ }
+ }
+
+ return null_count;
+}
+
+class NumPyNullsConverter {
+ public:
+ /// Convert the given array's null values to a null bitmap.
+ /// The null bitmap is only allocated if null values are ever possible.
+ static Status Convert(MemoryPool* pool, PyArrayObject* arr, bool from_pandas,
+ std::shared_ptr<ResizableBuffer>* out_null_bitmap_,
+ int64_t* out_null_count) {
+ NumPyNullsConverter converter(pool, arr, from_pandas);
+ RETURN_NOT_OK(VisitNumpyArrayInline(arr, &converter));
+ *out_null_bitmap_ = converter.null_bitmap_;
+ *out_null_count = converter.null_count_;
+ return Status::OK();
+ }
+
+ template <int TYPE>
+ Status Visit(PyArrayObject* arr) {
+ typedef internal::npy_traits<TYPE> traits;
+
+ const bool null_sentinels_possible =
+ // Always treat Numpy's NaT as null
+ TYPE == NPY_DATETIME || TYPE == NPY_TIMEDELTA ||
+ // Observing pandas's null sentinels
+ (from_pandas_ && traits::supports_nulls);
+
+ if (null_sentinels_possible) {
+ RETURN_NOT_OK(AllocateNullBitmap(pool_, PyArray_SIZE(arr), &null_bitmap_));
+ null_count_ = ValuesToBitmap<TYPE>(arr, null_bitmap_->mutable_data());
+ }
+ return Status::OK();
+ }
+
+ protected:
+ NumPyNullsConverter(MemoryPool* pool, PyArrayObject* arr, bool from_pandas)
+ : pool_(pool),
+ arr_(arr),
+ from_pandas_(from_pandas),
+ null_bitmap_data_(nullptr),
+ null_count_(0) {}
+
+ MemoryPool* pool_;
+ PyArrayObject* arr_;
+ bool from_pandas_;
+ std::shared_ptr<ResizableBuffer> null_bitmap_;
+ uint8_t* null_bitmap_data_;
+ int64_t null_count_;
+};
+
+// Returns null count
+int64_t MaskToBitmap(PyArrayObject* mask, int64_t length, uint8_t* bitmap) {
+ int64_t null_count = 0;
+
+ Ndarray1DIndexer<uint8_t> mask_values(mask);
+ for (int i = 0; i < length; ++i) {
+ if (mask_values[i]) {
+ ++null_count;
+ BitUtil::ClearBit(bitmap, i);
+ } else {
+ BitUtil::SetBit(bitmap, i);
+ }
+ }
+ return null_count;
+}
+
+} // namespace
+
+// ----------------------------------------------------------------------
+// Conversion from NumPy arrays (possibly originating from pandas) to Arrow
+// format. Does not handle NPY_OBJECT dtype arrays; use ConvertPySequence for
+// that
+
+class NumPyConverter {
+ public:
+ NumPyConverter(MemoryPool* pool, PyObject* arr, PyObject* mo,
+ const std::shared_ptr<DataType>& type, bool from_pandas,
+ const compute::CastOptions& cast_options = compute::CastOptions())
+ : pool_(pool),
+ type_(type),
+ arr_(reinterpret_cast<PyArrayObject*>(arr)),
+ dtype_(PyArray_DESCR(arr_)),
+ mask_(nullptr),
+ from_pandas_(from_pandas),
+ cast_options_(cast_options),
+ null_bitmap_data_(nullptr),
+ null_count_(0) {
+ if (mo != nullptr && mo != Py_None) {
+ mask_ = reinterpret_cast<PyArrayObject*>(mo);
+ }
+ length_ = static_cast<int64_t>(PyArray_SIZE(arr_));
+ itemsize_ = static_cast<int>(PyArray_DESCR(arr_)->elsize);
+ stride_ = static_cast<int64_t>(PyArray_STRIDES(arr_)[0]);
+ }
+
+ bool is_strided() const { return itemsize_ != stride_; }
+
+ Status Convert();
+
+ const ArrayVector& result() const { return out_arrays_; }
+
+ template <typename T>
+ enable_if_primitive_ctype<T, Status> Visit(const T& type) {
+ return VisitNative<T>();
+ }
+
+ Status Visit(const HalfFloatType& type) { return VisitNative<UInt16Type>(); }
+
+ Status Visit(const Date32Type& type) { return VisitNative<Date32Type>(); }
+ Status Visit(const Date64Type& type) { return VisitNative<Date64Type>(); }
+ Status Visit(const TimestampType& type) { return VisitNative<TimestampType>(); }
+ Status Visit(const Time32Type& type) { return VisitNative<Int32Type>(); }
+ Status Visit(const Time64Type& type) { return VisitNative<Int64Type>(); }
+ Status Visit(const DurationType& type) { return VisitNative<DurationType>(); }
+
+ Status Visit(const NullType& type) { return TypeNotImplemented(type.ToString()); }
+
+ // NumPy ascii string arrays
+ Status Visit(const BinaryType& type);
+
+ // NumPy unicode arrays
+ Status Visit(const StringType& type);
+
+ Status Visit(const StructType& type);
+
+ Status Visit(const FixedSizeBinaryType& type);
+
+ // Default case
+ Status Visit(const DataType& type) { return TypeNotImplemented(type.ToString()); }
+
+ protected:
+ Status InitNullBitmap() {
+ RETURN_NOT_OK(AllocateNullBitmap(pool_, length_, &null_bitmap_));
+ null_bitmap_data_ = null_bitmap_->mutable_data();
+ return Status::OK();
+ }
+
+ // Called before ConvertData to ensure Numpy input buffer is in expected
+ // Arrow layout
+ template <typename ArrowType>
+ Status PrepareInputData(std::shared_ptr<Buffer>* data);
+
+ // ----------------------------------------------------------------------
+ // Traditional visitor conversion for non-object arrays
+
+ template <typename ArrowType>
+ Status ConvertData(std::shared_ptr<Buffer>* data);
+
+ template <typename T>
+ Status PushBuilderResult(T* builder) {
+ std::shared_ptr<Array> out;
+ RETURN_NOT_OK(builder->Finish(&out));
+ out_arrays_.emplace_back(out);
+ return Status::OK();
+ }
+
+ Status PushArray(const std::shared_ptr<ArrayData>& data) {
+ out_arrays_.emplace_back(MakeArray(data));
+ return Status::OK();
+ }
+
+ template <typename ArrowType>
+ Status VisitNative() {
+ if (mask_ != nullptr) {
+ RETURN_NOT_OK(InitNullBitmap());
+ null_count_ = MaskToBitmap(mask_, length_, null_bitmap_data_);
+ } else {
+ RETURN_NOT_OK(NumPyNullsConverter::Convert(pool_, arr_, from_pandas_, &null_bitmap_,
+ &null_count_));
+ }
+
+ std::shared_ptr<Buffer> data;
+ RETURN_NOT_OK(ConvertData<ArrowType>(&data));
+
+ auto arr_data = ArrayData::Make(type_, length_, {null_bitmap_, data}, null_count_, 0);
+ return PushArray(arr_data);
+ }
+
+ Status TypeNotImplemented(std::string type_name) {
+ return Status::NotImplemented("NumPyConverter doesn't implement <", type_name,
+ "> conversion. ");
+ }
+
+ MemoryPool* pool_;
+ std::shared_ptr<DataType> type_;
+ PyArrayObject* arr_;
+ PyArray_Descr* dtype_;
+ PyArrayObject* mask_;
+ int64_t length_;
+ int64_t stride_;
+ int itemsize_;
+
+ bool from_pandas_;
+ compute::CastOptions cast_options_;
+
+ // Used in visitor pattern
+ ArrayVector out_arrays_;
+
+ std::shared_ptr<ResizableBuffer> null_bitmap_;
+ uint8_t* null_bitmap_data_;
+ int64_t null_count_;
+};
+
+Status NumPyConverter::Convert() {
+ if (PyArray_NDIM(arr_) != 1) {
+ return Status::Invalid("only handle 1-dimensional arrays");
+ }
+
+ if (dtype_->type_num == NPY_OBJECT) {
+ // If an object array, convert it like a normal Python sequence
+ PyConversionOptions py_options;
+ py_options.type = type_;
+ py_options.from_pandas = from_pandas_;
+ ARROW_ASSIGN_OR_RAISE(
+ auto chunked_array,
+ ConvertPySequence(reinterpret_cast<PyObject*>(arr_),
+ reinterpret_cast<PyObject*>(mask_), py_options, pool_));
+ out_arrays_ = chunked_array->chunks();
+ return Status::OK();
+ }
+
+ if (type_ == nullptr) {
+ return Status::Invalid("Must pass data type for non-object arrays");
+ }
+
+ // Visit the type to perform conversion
+ return VisitTypeInline(*type_, this);
+}
+
+namespace {
+
+Status CastBuffer(const std::shared_ptr<DataType>& in_type,
+ const std::shared_ptr<Buffer>& input, const int64_t length,
+ const std::shared_ptr<Buffer>& valid_bitmap, const int64_t null_count,
+ const std::shared_ptr<DataType>& out_type,
+ const compute::CastOptions& cast_options, MemoryPool* pool,
+ std::shared_ptr<Buffer>* out) {
+ // Must cast
+ auto tmp_data = ArrayData::Make(in_type, length, {valid_bitmap, input}, null_count);
+ compute::ExecContext context(pool);
+ ARROW_ASSIGN_OR_RAISE(
+ std::shared_ptr<Array> casted_array,
+ compute::Cast(*MakeArray(tmp_data), out_type, cast_options, &context));
+ *out = casted_array->data()->buffers[1];
+ return Status::OK();
+}
+
+template <typename FromType, typename ToType>
+Status StaticCastBuffer(const Buffer& input, const int64_t length, MemoryPool* pool,
+ std::shared_ptr<Buffer>* out) {
+ ARROW_ASSIGN_OR_RAISE(auto result, AllocateBuffer(sizeof(ToType) * length, pool));
+
+ auto in_values = reinterpret_cast<const FromType*>(input.data());
+ auto out_values = reinterpret_cast<ToType*>(result->mutable_data());
+ for (int64_t i = 0; i < length; ++i) {
+ *out_values++ = static_cast<ToType>(*in_values++);
+ }
+ *out = std::move(result);
+ return Status::OK();
+}
+
+template <typename T>
+void CopyStridedBytewise(int8_t* input_data, int64_t length, int64_t stride,
+ T* output_data) {
+ // Passing input_data as non-const is a concession to PyObject*
+ for (int64_t i = 0; i < length; ++i) {
+ memcpy(output_data + i, input_data, sizeof(T));
+ input_data += stride;
+ }
+}
+
+template <typename T>
+void CopyStridedNatural(T* input_data, int64_t length, int64_t stride, T* output_data) {
+ // Passing input_data as non-const is a concession to PyObject*
+ int64_t j = 0;
+ for (int64_t i = 0; i < length; ++i) {
+ output_data[i] = input_data[j];
+ j += stride;
+ }
+}
+
+class NumPyStridedConverter {
+ public:
+ static Status Convert(PyArrayObject* arr, int64_t length, MemoryPool* pool,
+ std::shared_ptr<Buffer>* out) {
+ NumPyStridedConverter converter(arr, length, pool);
+ RETURN_NOT_OK(VisitNumpyArrayInline(arr, &converter));
+ *out = converter.buffer_;
+ return Status::OK();
+ }
+ template <int TYPE>
+ Status Visit(PyArrayObject* arr) {
+ using traits = internal::npy_traits<TYPE>;
+ using T = typename traits::value_type;
+
+ ARROW_ASSIGN_OR_RAISE(buffer_, AllocateBuffer(sizeof(T) * length_, pool_));
+
+ const int64_t stride = PyArray_STRIDES(arr)[0];
+ if (stride % sizeof(T) == 0) {
+ const int64_t stride_elements = stride / sizeof(T);
+ CopyStridedNatural(reinterpret_cast<T*>(PyArray_DATA(arr)), length_,
+ stride_elements, reinterpret_cast<T*>(buffer_->mutable_data()));
+ } else {
+ CopyStridedBytewise(reinterpret_cast<int8_t*>(PyArray_DATA(arr)), length_, stride,
+ reinterpret_cast<T*>(buffer_->mutable_data()));
+ }
+ return Status::OK();
+ }
+
+ protected:
+ NumPyStridedConverter(PyArrayObject* arr, int64_t length, MemoryPool* pool)
+ : arr_(arr), length_(length), pool_(pool), buffer_(nullptr) {}
+ PyArrayObject* arr_;
+ int64_t length_;
+ MemoryPool* pool_;
+ std::shared_ptr<Buffer> buffer_;
+};
+
+} // namespace
+
+template <typename ArrowType>
+inline Status NumPyConverter::PrepareInputData(std::shared_ptr<Buffer>* data) {
+ if (PyArray_ISBYTESWAPPED(arr_)) {
+ // TODO
+ return Status::NotImplemented("Byte-swapped arrays not supported");
+ }
+
+ if (dtype_->type_num == NPY_BOOL) {
+ int64_t nbytes = BitUtil::BytesForBits(length_);
+ ARROW_ASSIGN_OR_RAISE(auto buffer, AllocateBuffer(nbytes, pool_));
+
+ Ndarray1DIndexer<uint8_t> values(arr_);
+ int64_t i = 0;
+ const auto generate = [&values, &i]() -> bool { return values[i++] > 0; };
+ GenerateBitsUnrolled(buffer->mutable_data(), 0, length_, generate);
+
+ *data = std::move(buffer);
+ } else if (is_strided()) {
+ RETURN_NOT_OK(NumPyStridedConverter::Convert(arr_, length_, pool_, data));
+ } else {
+ // Can zero-copy
+ *data = std::make_shared<NumPyBuffer>(reinterpret_cast<PyObject*>(arr_));
+ }
+
+ return Status::OK();
+}
+
+template <typename ArrowType>
+inline Status NumPyConverter::ConvertData(std::shared_ptr<Buffer>* data) {
+ RETURN_NOT_OK(PrepareInputData<ArrowType>(data));
+
+ std::shared_ptr<DataType> input_type;
+ RETURN_NOT_OK(NumPyDtypeToArrow(reinterpret_cast<PyObject*>(dtype_), &input_type));
+
+ if (!input_type->Equals(*type_)) {
+ RETURN_NOT_OK(CastBuffer(input_type, *data, length_, null_bitmap_, null_count_, type_,
+ cast_options_, pool_, data));
+ }
+
+ return Status::OK();
+}
+
+template <>
+inline Status NumPyConverter::ConvertData<Date32Type>(std::shared_ptr<Buffer>* data) {
+ std::shared_ptr<DataType> input_type;
+
+ RETURN_NOT_OK(PrepareInputData<Date32Type>(data));
+
+ auto date_dtype = reinterpret_cast<PyArray_DatetimeDTypeMetaData*>(dtype_->c_metadata);
+ if (dtype_->type_num == NPY_DATETIME) {
+ // If we have inbound datetime64[D] data, this needs to be downcasted
+ // separately here from int64_t to int32_t, because this data is not
+ // supported in compute::Cast
+ if (date_dtype->meta.base == NPY_FR_D) {
+ // TODO(wesm): How pedantic do we really want to be about checking for int32
+ // overflow here?
+ Status s = StaticCastBuffer<int64_t, int32_t>(**data, length_, pool_, data);
+ RETURN_NOT_OK(s);
+ } else {
+ RETURN_NOT_OK(NumPyDtypeToArrow(reinterpret_cast<PyObject*>(dtype_), &input_type));
+ if (!input_type->Equals(*type_)) {
+ // The null bitmap was already computed in VisitNative()
+ RETURN_NOT_OK(CastBuffer(input_type, *data, length_, null_bitmap_, null_count_,
+ type_, cast_options_, pool_, data));
+ }
+ }
+ } else {
+ RETURN_NOT_OK(NumPyDtypeToArrow(reinterpret_cast<PyObject*>(dtype_), &input_type));
+ if (!input_type->Equals(*type_)) {
+ RETURN_NOT_OK(CastBuffer(input_type, *data, length_, null_bitmap_, null_count_,
+ type_, cast_options_, pool_, data));
+ }
+ }
+
+ return Status::OK();
+}
+
+template <>
+inline Status NumPyConverter::ConvertData<Date64Type>(std::shared_ptr<Buffer>* data) {
+ constexpr int64_t kMillisecondsInDay = 86400000;
+ std::shared_ptr<DataType> input_type;
+
+ RETURN_NOT_OK(PrepareInputData<Date64Type>(data));
+
+ auto date_dtype = reinterpret_cast<PyArray_DatetimeDTypeMetaData*>(dtype_->c_metadata);
+ if (dtype_->type_num == NPY_DATETIME) {
+ // If we have inbound datetime64[D] data, this needs to be downcasted
+ // separately here from int64_t to int32_t, because this data is not
+ // supported in compute::Cast
+ if (date_dtype->meta.base == NPY_FR_D) {
+ ARROW_ASSIGN_OR_RAISE(auto result,
+ AllocateBuffer(sizeof(int64_t) * length_, pool_));
+
+ auto in_values = reinterpret_cast<const int64_t*>((*data)->data());
+ auto out_values = reinterpret_cast<int64_t*>(result->mutable_data());
+ for (int64_t i = 0; i < length_; ++i) {
+ *out_values++ = kMillisecondsInDay * (*in_values++);
+ }
+ *data = std::move(result);
+ } else {
+ RETURN_NOT_OK(NumPyDtypeToArrow(reinterpret_cast<PyObject*>(dtype_), &input_type));
+ if (!input_type->Equals(*type_)) {
+ // The null bitmap was already computed in VisitNative()
+ RETURN_NOT_OK(CastBuffer(input_type, *data, length_, null_bitmap_, null_count_,
+ type_, cast_options_, pool_, data));
+ }
+ }
+ } else {
+ RETURN_NOT_OK(NumPyDtypeToArrow(reinterpret_cast<PyObject*>(dtype_), &input_type));
+ if (!input_type->Equals(*type_)) {
+ RETURN_NOT_OK(CastBuffer(input_type, *data, length_, null_bitmap_, null_count_,
+ type_, cast_options_, pool_, data));
+ }
+ }
+
+ return Status::OK();
+}
+
+// Create 16MB chunks for binary data
+constexpr int32_t kBinaryChunksize = 1 << 24;
+
+Status NumPyConverter::Visit(const BinaryType& type) {
+ ::arrow::internal::ChunkedBinaryBuilder builder(kBinaryChunksize, pool_);
+
+ auto data = reinterpret_cast<const uint8_t*>(PyArray_DATA(arr_));
+
+ auto AppendNotNull = [&builder, this](const uint8_t* data) {
+ // This is annoying. NumPy allows strings to have nul-terminators, so
+ // we must check for them here
+ const size_t item_size =
+ strnlen(reinterpret_cast<const char*>(data), static_cast<size_t>(itemsize_));
+ return builder.Append(data, static_cast<int32_t>(item_size));
+ };
+
+ if (mask_ != nullptr) {
+ Ndarray1DIndexer<uint8_t> mask_values(mask_);
+ for (int64_t i = 0; i < length_; ++i) {
+ if (mask_values[i]) {
+ RETURN_NOT_OK(builder.AppendNull());
+ } else {
+ RETURN_NOT_OK(AppendNotNull(data));
+ }
+ data += stride_;
+ }
+ } else {
+ for (int64_t i = 0; i < length_; ++i) {
+ RETURN_NOT_OK(AppendNotNull(data));
+ data += stride_;
+ }
+ }
+
+ ArrayVector result;
+ RETURN_NOT_OK(builder.Finish(&result));
+ for (auto arr : result) {
+ RETURN_NOT_OK(PushArray(arr->data()));
+ }
+ return Status::OK();
+}
+
+Status NumPyConverter::Visit(const FixedSizeBinaryType& type) {
+ auto byte_width = type.byte_width();
+
+ if (itemsize_ != byte_width) {
+ return Status::Invalid("Got bytestring of length ", itemsize_, " (expected ",
+ byte_width, ")");
+ }
+
+ FixedSizeBinaryBuilder builder(::arrow::fixed_size_binary(byte_width), pool_);
+ auto data = reinterpret_cast<const uint8_t*>(PyArray_DATA(arr_));
+
+ if (mask_ != nullptr) {
+ Ndarray1DIndexer<uint8_t> mask_values(mask_);
+ RETURN_NOT_OK(builder.Reserve(length_));
+ for (int64_t i = 0; i < length_; ++i) {
+ if (mask_values[i]) {
+ RETURN_NOT_OK(builder.AppendNull());
+ } else {
+ RETURN_NOT_OK(builder.Append(data));
+ }
+ data += stride_;
+ }
+ } else {
+ for (int64_t i = 0; i < length_; ++i) {
+ RETURN_NOT_OK(builder.Append(data));
+ data += stride_;
+ }
+ }
+
+ std::shared_ptr<Array> result;
+ RETURN_NOT_OK(builder.Finish(&result));
+ return PushArray(result->data());
+}
+
+namespace {
+
+// NumPy unicode is UCS4/UTF32 always
+constexpr int kNumPyUnicodeSize = 4;
+
+Status AppendUTF32(const char* data, int itemsize, int byteorder,
+ ::arrow::internal::ChunkedStringBuilder* builder) {
+ // The binary \x00\x00\x00\x00 indicates a nul terminator in NumPy unicode,
+ // so we need to detect that here to truncate if necessary. Yep.
+ int actual_length = 0;
+ for (; actual_length < itemsize / kNumPyUnicodeSize; ++actual_length) {
+ const char* code_point = data + actual_length * kNumPyUnicodeSize;
+ if ((*code_point == '\0') && (*(code_point + 1) == '\0') &&
+ (*(code_point + 2) == '\0') && (*(code_point + 3) == '\0')) {
+ break;
+ }
+ }
+
+ OwnedRef unicode_obj(PyUnicode_DecodeUTF32(data, actual_length * kNumPyUnicodeSize,
+ nullptr, &byteorder));
+ RETURN_IF_PYERROR();
+ OwnedRef utf8_obj(PyUnicode_AsUTF8String(unicode_obj.obj()));
+ if (utf8_obj.obj() == NULL) {
+ PyErr_Clear();
+ return Status::Invalid("failed converting UTF32 to UTF8");
+ }
+
+ const int32_t length = static_cast<int32_t>(PyBytes_GET_SIZE(utf8_obj.obj()));
+ return builder->Append(
+ reinterpret_cast<const uint8_t*>(PyBytes_AS_STRING(utf8_obj.obj())), length);
+}
+
+} // namespace
+
+Status NumPyConverter::Visit(const StringType& type) {
+ util::InitializeUTF8();
+
+ ::arrow::internal::ChunkedStringBuilder builder(kBinaryChunksize, pool_);
+
+ auto data = reinterpret_cast<const uint8_t*>(PyArray_DATA(arr_));
+
+ char numpy_byteorder = dtype_->byteorder;
+
+ // For Python C API, -1 is little-endian, 1 is big-endian
+ int byteorder = numpy_byteorder == '>' ? 1 : -1;
+
+ PyAcquireGIL gil_lock;
+
+ const bool is_binary_type = dtype_->type_num == NPY_STRING;
+ const bool is_unicode_type = dtype_->type_num == NPY_UNICODE;
+
+ if (!is_binary_type && !is_unicode_type) {
+ const bool is_float_type = dtype_->kind == 'f';
+ if (from_pandas_ && is_float_type) {
+ // in case of from_pandas=True, accept an all-NaN float array as input
+ RETURN_NOT_OK(NumPyNullsConverter::Convert(pool_, arr_, from_pandas_, &null_bitmap_,
+ &null_count_));
+ if (null_count_ == length_) {
+ auto arr = std::make_shared<NullArray>(length_);
+ compute::ExecContext context(pool_);
+ ARROW_ASSIGN_OR_RAISE(
+ std::shared_ptr<Array> out,
+ compute::Cast(*arr, arrow::utf8(), cast_options_, &context));
+ out_arrays_.emplace_back(out);
+ return Status::OK();
+ }
+ }
+ std::string dtype_string;
+ RETURN_NOT_OK(internal::PyObject_StdStringStr(reinterpret_cast<PyObject*>(dtype_),
+ &dtype_string));
+ return Status::TypeError("Expected a string or bytes dtype, got ", dtype_string);
+ }
+
+ auto AppendNonNullValue = [&](const uint8_t* data) {
+ if (is_binary_type) {
+ if (ARROW_PREDICT_TRUE(util::ValidateUTF8(data, itemsize_))) {
+ return builder.Append(data, itemsize_);
+ } else {
+ return Status::Invalid("Encountered non-UTF8 binary value: ",
+ HexEncode(data, itemsize_));
+ }
+ } else {
+ // is_unicode_type case
+ return AppendUTF32(reinterpret_cast<const char*>(data), itemsize_, byteorder,
+ &builder);
+ }
+ };
+
+ if (mask_ != nullptr) {
+ Ndarray1DIndexer<uint8_t> mask_values(mask_);
+ for (int64_t i = 0; i < length_; ++i) {
+ if (mask_values[i]) {
+ RETURN_NOT_OK(builder.AppendNull());
+ } else {
+ RETURN_NOT_OK(AppendNonNullValue(data));
+ }
+ data += stride_;
+ }
+ } else {
+ for (int64_t i = 0; i < length_; ++i) {
+ RETURN_NOT_OK(AppendNonNullValue(data));
+ data += stride_;
+ }
+ }
+
+ ArrayVector result;
+ RETURN_NOT_OK(builder.Finish(&result));
+ for (auto arr : result) {
+ RETURN_NOT_OK(PushArray(arr->data()));
+ }
+ return Status::OK();
+}
+
+Status NumPyConverter::Visit(const StructType& type) {
+ std::vector<NumPyConverter> sub_converters;
+ std::vector<OwnedRefNoGIL> sub_arrays;
+
+ {
+ PyAcquireGIL gil_lock;
+
+ // Create converters for each struct type field
+ if (dtype_->fields == NULL || !PyDict_Check(dtype_->fields)) {
+ return Status::TypeError("Expected struct array");
+ }
+
+ for (auto field : type.fields()) {
+ PyObject* tup = PyDict_GetItemString(dtype_->fields, field->name().c_str());
+ if (tup == NULL) {
+ return Status::Invalid("Missing field '", field->name(), "' in struct array");
+ }
+ PyArray_Descr* sub_dtype =
+ reinterpret_cast<PyArray_Descr*>(PyTuple_GET_ITEM(tup, 0));
+ DCHECK(PyObject_TypeCheck(sub_dtype, &PyArrayDescr_Type));
+ int offset = static_cast<int>(PyLong_AsLong(PyTuple_GET_ITEM(tup, 1)));
+ RETURN_IF_PYERROR();
+ Py_INCREF(sub_dtype); /* PyArray_GetField() steals ref */
+ PyObject* sub_array = PyArray_GetField(arr_, sub_dtype, offset);
+ RETURN_IF_PYERROR();
+ sub_arrays.emplace_back(sub_array);
+ sub_converters.emplace_back(pool_, sub_array, nullptr /* mask */, field->type(),
+ from_pandas_);
+ }
+ }
+
+ std::vector<ArrayVector> groups;
+ int64_t null_count = 0;
+
+ // Compute null bitmap and store it as a Boolean Array to include it
+ // in the rechunking below
+ {
+ if (mask_ != nullptr) {
+ RETURN_NOT_OK(InitNullBitmap());
+ null_count = MaskToBitmap(mask_, length_, null_bitmap_data_);
+ }
+ groups.push_back({std::make_shared<BooleanArray>(length_, null_bitmap_)});
+ }
+
+ // Convert child data
+ for (auto& converter : sub_converters) {
+ RETURN_NOT_OK(converter.Convert());
+ groups.push_back(converter.result());
+ const auto& group = groups.back();
+ int64_t n = 0;
+ for (const auto& array : group) {
+ n += array->length();
+ }
+ }
+ // Ensure the different array groups are chunked consistently
+ groups = ::arrow::internal::RechunkArraysConsistently(groups);
+ for (const auto& group : groups) {
+ int64_t n = 0;
+ for (const auto& array : group) {
+ n += array->length();
+ }
+ }
+
+ // Make struct array chunks by combining groups
+ size_t ngroups = groups.size();
+ size_t nchunks = groups[0].size();
+ for (size_t chunk = 0; chunk < nchunks; chunk++) {
+ // First group has the null bitmaps as Boolean Arrays
+ const auto& null_data = groups[0][chunk]->data();
+ DCHECK_EQ(null_data->type->id(), Type::BOOL);
+ DCHECK_EQ(null_data->buffers.size(), 2);
+ const auto& null_buffer = null_data->buffers[1];
+ // Careful: the rechunked null bitmap may have a non-zero offset
+ // to its buffer, and it may not even start on a byte boundary
+ int64_t null_offset = null_data->offset;
+ std::shared_ptr<Buffer> fixed_null_buffer;
+
+ if (!null_buffer) {
+ fixed_null_buffer = null_buffer;
+ } else if (null_offset % 8 == 0) {
+ fixed_null_buffer =
+ std::make_shared<Buffer>(null_buffer,
+ // byte offset
+ null_offset / 8,
+ // byte size
+ BitUtil::BytesForBits(null_data->length));
+ } else {
+ ARROW_ASSIGN_OR_RAISE(
+ fixed_null_buffer,
+ CopyBitmap(pool_, null_buffer->data(), null_offset, null_data->length));
+ }
+
+ // Create struct array chunk and populate it
+ auto arr_data =
+ ArrayData::Make(type_, null_data->length, null_count ? kUnknownNullCount : 0, 0);
+ arr_data->buffers.push_back(fixed_null_buffer);
+ // Append child chunks
+ for (size_t i = 1; i < ngroups; i++) {
+ arr_data->child_data.push_back(groups[i][chunk]->data());
+ }
+ RETURN_NOT_OK(PushArray(arr_data));
+ }
+
+ return Status::OK();
+}
+
+Status NdarrayToArrow(MemoryPool* pool, PyObject* ao, PyObject* mo, bool from_pandas,
+ const std::shared_ptr<DataType>& type,
+ const compute::CastOptions& cast_options,
+ std::shared_ptr<ChunkedArray>* out) {
+ if (!PyArray_Check(ao)) {
+ // This code path cannot be reached by Python unit tests currently so this
+ // is only a sanity check.
+ return Status::TypeError("Input object was not a NumPy array");
+ }
+ if (PyArray_NDIM(reinterpret_cast<PyArrayObject*>(ao)) != 1) {
+ return Status::Invalid("only handle 1-dimensional arrays");
+ }
+
+ NumPyConverter converter(pool, ao, mo, type, from_pandas, cast_options);
+ RETURN_NOT_OK(converter.Convert());
+ const auto& output_arrays = converter.result();
+ DCHECK_GT(output_arrays.size(), 0);
+ *out = std::make_shared<ChunkedArray>(output_arrays);
+ return Status::OK();
+}
+
+Status NdarrayToArrow(MemoryPool* pool, PyObject* ao, PyObject* mo, bool from_pandas,
+ const std::shared_ptr<DataType>& type,
+ std::shared_ptr<ChunkedArray>* out) {
+ return NdarrayToArrow(pool, ao, mo, from_pandas, type, compute::CastOptions(), out);
+}
+
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/numpy_to_arrow.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/numpy_to_arrow.h
new file mode 100644
index 0000000000..b6cd093e55
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/numpy_to_arrow.h
@@ -0,0 +1,72 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Converting from pandas memory representation to Arrow data structures
+
+#pragma once
+
+#include "arrow/python/platform.h"
+
+#include <memory>
+
+#include "arrow/compute/api.h"
+#include "arrow/python/visibility.h"
+
+namespace arrow {
+
+class Array;
+class ChunkedArray;
+class DataType;
+class MemoryPool;
+class Status;
+
+namespace py {
+
+/// Convert NumPy arrays to Arrow. If target data type is not known, pass a
+/// type with null
+///
+/// \param[in] pool Memory pool for any memory allocations
+/// \param[in] ao an ndarray with the array data
+/// \param[in] mo an ndarray with a null mask (True is null), optional
+/// \param[in] from_pandas If true, use pandas's null sentinels to determine
+/// whether values are null
+/// \param[in] type a specific type to cast to, may be null
+/// \param[in] cast_options casting options
+/// \param[out] out a ChunkedArray, to accommodate chunked output
+ARROW_PYTHON_EXPORT
+Status NdarrayToArrow(MemoryPool* pool, PyObject* ao, PyObject* mo, bool from_pandas,
+ const std::shared_ptr<DataType>& type,
+ const compute::CastOptions& cast_options,
+ std::shared_ptr<ChunkedArray>* out);
+
+/// Safely convert NumPy arrays to Arrow. If target data type is not known,
+/// pass a type with null.
+///
+/// \param[in] pool Memory pool for any memory allocations
+/// \param[in] ao an ndarray with the array data
+/// \param[in] mo an ndarray with a null mask (True is null), optional
+/// \param[in] from_pandas If true, use pandas's null sentinels to determine
+/// whether values are null
+/// \param[in] type a specific type to cast to, may be null
+/// \param[out] out a ChunkedArray, to accommodate chunked output
+ARROW_PYTHON_EXPORT
+Status NdarrayToArrow(MemoryPool* pool, PyObject* ao, PyObject* mo, bool from_pandas,
+ const std::shared_ptr<DataType>& type,
+ std::shared_ptr<ChunkedArray>* out);
+
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/platform.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/platform.h
new file mode 100644
index 0000000000..80f7e60813
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/platform.h
@@ -0,0 +1,36 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Functions for converting between pandas's NumPy-based data representation
+// and Arrow data structures
+
+#pragma once
+
+// If PY_SSIZE_T_CLEAN is defined, argument parsing functions treat #-specifier
+// to mean Py_ssize_t (defining this to suppress deprecation warning)
+#define PY_SSIZE_T_CLEAN
+
+#include <Python.h> // IWYU pragma: export
+#include <datetime.h>
+
+// Work around C2528 error
+#ifdef _MSC_VER
+#if _MSC_VER >= 1900
+#undef timezone
+#endif
+#endif
+
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/pyarrow.cc b/contrib/libs/apache/arrow/cpp/src/arrow/python/pyarrow.cc
new file mode 100644
index 0000000000..bea35ff3b6
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/pyarrow.cc
@@ -0,0 +1,93 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "arrow/python/pyarrow.h"
+
+#include <memory>
+#include <utility>
+
+#include "arrow/array.h"
+#include "arrow/table.h"
+#include "arrow/tensor.h"
+#include "arrow/type.h"
+
+#include "arrow/python/common.h"
+#include "arrow/python/datetime.h"
+namespace {
+#include "arrow/python/pyarrow_api.h"
+}
+
+namespace arrow {
+namespace py {
+
+static Status UnwrapError(PyObject* obj, const char* expected_type) {
+ return Status::TypeError("Could not unwrap ", expected_type,
+ " from Python object of type '", Py_TYPE(obj)->tp_name, "'");
+}
+
+int import_pyarrow() {
+ internal::InitDatetime();
+ return ::import_pyarrow__lib();
+}
+
+#define DEFINE_WRAP_FUNCTIONS(FUNC_SUFFIX, TYPE_NAME) \
+ bool is_##FUNC_SUFFIX(PyObject* obj) { return ::pyarrow_is_##FUNC_SUFFIX(obj) != 0; } \
+ \
+ PyObject* wrap_##FUNC_SUFFIX(const std::shared_ptr<TYPE_NAME>& src) { \
+ return ::pyarrow_wrap_##FUNC_SUFFIX(src); \
+ } \
+ Result<std::shared_ptr<TYPE_NAME>> unwrap_##FUNC_SUFFIX(PyObject* obj) { \
+ auto out = ::pyarrow_unwrap_##FUNC_SUFFIX(obj); \
+ if (out) { \
+ return std::move(out); \
+ } else { \
+ return UnwrapError(obj, #TYPE_NAME); \
+ } \
+ } \
+ Status unwrap_##FUNC_SUFFIX(PyObject* obj, std::shared_ptr<TYPE_NAME>* out) { \
+ return unwrap_##FUNC_SUFFIX(obj).Value(out); \
+ }
+
+DEFINE_WRAP_FUNCTIONS(buffer, Buffer)
+
+DEFINE_WRAP_FUNCTIONS(data_type, DataType)
+DEFINE_WRAP_FUNCTIONS(field, Field)
+DEFINE_WRAP_FUNCTIONS(schema, Schema)
+
+DEFINE_WRAP_FUNCTIONS(scalar, Scalar)
+
+DEFINE_WRAP_FUNCTIONS(array, Array)
+DEFINE_WRAP_FUNCTIONS(chunked_array, ChunkedArray)
+
+DEFINE_WRAP_FUNCTIONS(sparse_coo_tensor, SparseCOOTensor)
+DEFINE_WRAP_FUNCTIONS(sparse_csc_matrix, SparseCSCMatrix)
+DEFINE_WRAP_FUNCTIONS(sparse_csf_tensor, SparseCSFTensor)
+DEFINE_WRAP_FUNCTIONS(sparse_csr_matrix, SparseCSRMatrix)
+DEFINE_WRAP_FUNCTIONS(tensor, Tensor)
+
+DEFINE_WRAP_FUNCTIONS(batch, RecordBatch)
+DEFINE_WRAP_FUNCTIONS(table, Table)
+
+#undef DEFINE_WRAP_FUNCTIONS
+
+namespace internal {
+
+int check_status(const Status& status) { return ::pyarrow_internal_check_status(status); }
+
+} // namespace internal
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/pyarrow.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/pyarrow.h
new file mode 100644
index 0000000000..8056e700a0
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/pyarrow.h
@@ -0,0 +1,87 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include "arrow/python/platform.h"
+
+#include <memory>
+
+#include "arrow/python/visibility.h"
+
+#include "arrow/sparse_tensor.h"
+
+// Work around ARROW-2317 (C linkage warning from Cython)
+extern "C++" {
+
+namespace arrow {
+
+class Array;
+class Buffer;
+class DataType;
+class Field;
+class RecordBatch;
+class Schema;
+class Status;
+class Table;
+class Tensor;
+
+namespace py {
+
+// Returns 0 on success, -1 on error.
+ARROW_PYTHON_EXPORT int import_pyarrow();
+
+#define DECLARE_WRAP_FUNCTIONS(FUNC_SUFFIX, TYPE_NAME) \
+ ARROW_PYTHON_EXPORT bool is_##FUNC_SUFFIX(PyObject*); \
+ ARROW_PYTHON_EXPORT Result<std::shared_ptr<TYPE_NAME>> unwrap_##FUNC_SUFFIX( \
+ PyObject*); \
+ ARROW_PYTHON_EXPORT PyObject* wrap_##FUNC_SUFFIX(const std::shared_ptr<TYPE_NAME>&); \
+ ARROW_DEPRECATED("Use Result-returning version") \
+ ARROW_PYTHON_EXPORT Status unwrap_##FUNC_SUFFIX(PyObject*, \
+ std::shared_ptr<TYPE_NAME>* out);
+
+DECLARE_WRAP_FUNCTIONS(buffer, Buffer)
+
+DECLARE_WRAP_FUNCTIONS(data_type, DataType)
+DECLARE_WRAP_FUNCTIONS(field, Field)
+DECLARE_WRAP_FUNCTIONS(schema, Schema)
+
+DECLARE_WRAP_FUNCTIONS(scalar, Scalar)
+
+DECLARE_WRAP_FUNCTIONS(array, Array)
+DECLARE_WRAP_FUNCTIONS(chunked_array, ChunkedArray)
+
+DECLARE_WRAP_FUNCTIONS(sparse_coo_tensor, SparseCOOTensor)
+DECLARE_WRAP_FUNCTIONS(sparse_csc_matrix, SparseCSCMatrix)
+DECLARE_WRAP_FUNCTIONS(sparse_csf_tensor, SparseCSFTensor)
+DECLARE_WRAP_FUNCTIONS(sparse_csr_matrix, SparseCSRMatrix)
+DECLARE_WRAP_FUNCTIONS(tensor, Tensor)
+
+DECLARE_WRAP_FUNCTIONS(batch, RecordBatch)
+DECLARE_WRAP_FUNCTIONS(table, Table)
+
+#undef DECLARE_WRAP_FUNCTIONS
+
+namespace internal {
+
+ARROW_PYTHON_EXPORT int check_status(const Status& status);
+
+} // namespace internal
+} // namespace py
+} // namespace arrow
+
+} // extern "C++"
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/pyarrow_api.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/pyarrow_api.h
new file mode 100644
index 0000000000..9474312002
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/pyarrow_api.h
@@ -0,0 +1,239 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// DO NOT EDIT THIS FILE. Update from pyarrow/lib_api.h after pyarrow build
+// This is used to be able to call back into Cython code from C++.
+
+/* Generated by Cython 0.29.15 */
+
+#ifndef __PYX_HAVE_API__pyarrow__lib
+#define __PYX_HAVE_API__pyarrow__lib
+#ifdef __MINGW64__
+#define MS_WIN64
+#endif
+#include "Python.h"
+#include "pyarrow_lib.h"
+
+static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_scalar)(std::shared_ptr< arrow::Scalar> const &) = 0;
+#define pyarrow_wrap_scalar __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_scalar
+static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_array)(std::shared_ptr< arrow::Array> const &) = 0;
+#define pyarrow_wrap_array __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_array
+static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_chunked_array)(std::shared_ptr< arrow::ChunkedArray> const &) = 0;
+#define pyarrow_wrap_chunked_array __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_chunked_array
+static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_batch)(std::shared_ptr< arrow::RecordBatch> const &) = 0;
+#define pyarrow_wrap_batch __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_batch
+static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_buffer)(std::shared_ptr< arrow::Buffer> const &) = 0;
+#define pyarrow_wrap_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_buffer
+static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_data_type)(std::shared_ptr< arrow::DataType> const &) = 0;
+#define pyarrow_wrap_data_type __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_data_type
+static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_field)(std::shared_ptr< arrow::Field> const &) = 0;
+#define pyarrow_wrap_field __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_field
+static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_resizable_buffer)(std::shared_ptr< arrow::ResizableBuffer> const &) = 0;
+#define pyarrow_wrap_resizable_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_resizable_buffer
+static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_schema)(std::shared_ptr< arrow::Schema> const &) = 0;
+#define pyarrow_wrap_schema __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_schema
+static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_table)(std::shared_ptr< arrow::Table> const &) = 0;
+#define pyarrow_wrap_table __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_table
+static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_tensor)(std::shared_ptr< arrow::Tensor> const &) = 0;
+#define pyarrow_wrap_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_tensor
+static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_coo_tensor)(std::shared_ptr< arrow::SparseCOOTensor> const &) = 0;
+#define pyarrow_wrap_sparse_coo_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_coo_tensor
+static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csr_matrix)(std::shared_ptr< arrow::SparseCSRMatrix> const &) = 0;
+#define pyarrow_wrap_sparse_csr_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csr_matrix
+static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csc_matrix)(std::shared_ptr< arrow::SparseCSCMatrix> const &) = 0;
+#define pyarrow_wrap_sparse_csc_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csc_matrix
+static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csf_tensor)(std::shared_ptr< arrow::SparseCSFTensor> const &) = 0;
+#define pyarrow_wrap_sparse_csf_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csf_tensor
+static std::shared_ptr< arrow::Scalar> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_scalar)(PyObject *) = 0;
+#define pyarrow_unwrap_scalar __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_scalar
+static std::shared_ptr< arrow::Array> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_array)(PyObject *) = 0;
+#define pyarrow_unwrap_array __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_array
+static std::shared_ptr< arrow::ChunkedArray> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_chunked_array)(PyObject *) = 0;
+#define pyarrow_unwrap_chunked_array __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_chunked_array
+static std::shared_ptr< arrow::RecordBatch> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_batch)(PyObject *) = 0;
+#define pyarrow_unwrap_batch __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_batch
+static std::shared_ptr< arrow::Buffer> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_buffer)(PyObject *) = 0;
+#define pyarrow_unwrap_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_buffer
+static std::shared_ptr< arrow::DataType> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_data_type)(PyObject *) = 0;
+#define pyarrow_unwrap_data_type __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_data_type
+static std::shared_ptr< arrow::Field> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_field)(PyObject *) = 0;
+#define pyarrow_unwrap_field __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_field
+static std::shared_ptr< arrow::Schema> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_schema)(PyObject *) = 0;
+#define pyarrow_unwrap_schema __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_schema
+static std::shared_ptr< arrow::Table> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_table)(PyObject *) = 0;
+#define pyarrow_unwrap_table __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_table
+static std::shared_ptr< arrow::Tensor> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_tensor)(PyObject *) = 0;
+#define pyarrow_unwrap_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_tensor
+static std::shared_ptr< arrow::SparseCOOTensor> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_coo_tensor)(PyObject *) = 0;
+#define pyarrow_unwrap_sparse_coo_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_coo_tensor
+static std::shared_ptr< arrow::SparseCSRMatrix> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csr_matrix)(PyObject *) = 0;
+#define pyarrow_unwrap_sparse_csr_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csr_matrix
+static std::shared_ptr< arrow::SparseCSCMatrix> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csc_matrix)(PyObject *) = 0;
+#define pyarrow_unwrap_sparse_csc_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csc_matrix
+static std::shared_ptr< arrow::SparseCSFTensor> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csf_tensor)(PyObject *) = 0;
+#define pyarrow_unwrap_sparse_csf_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csf_tensor
+static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_internal_check_status)(arrow::Status const &) = 0;
+#define pyarrow_internal_check_status __pyx_api_f_7pyarrow_3lib_pyarrow_internal_check_status
+static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_buffer)(PyObject *) = 0;
+#define pyarrow_is_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_is_buffer
+static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_data_type)(PyObject *) = 0;
+#define pyarrow_is_data_type __pyx_api_f_7pyarrow_3lib_pyarrow_is_data_type
+static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_metadata)(PyObject *) = 0;
+#define pyarrow_is_metadata __pyx_api_f_7pyarrow_3lib_pyarrow_is_metadata
+static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_field)(PyObject *) = 0;
+#define pyarrow_is_field __pyx_api_f_7pyarrow_3lib_pyarrow_is_field
+static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_schema)(PyObject *) = 0;
+#define pyarrow_is_schema __pyx_api_f_7pyarrow_3lib_pyarrow_is_schema
+static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_array)(PyObject *) = 0;
+#define pyarrow_is_array __pyx_api_f_7pyarrow_3lib_pyarrow_is_array
+static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_chunked_array)(PyObject *) = 0;
+#define pyarrow_is_chunked_array __pyx_api_f_7pyarrow_3lib_pyarrow_is_chunked_array
+static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_scalar)(PyObject *) = 0;
+#define pyarrow_is_scalar __pyx_api_f_7pyarrow_3lib_pyarrow_is_scalar
+static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_tensor)(PyObject *) = 0;
+#define pyarrow_is_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_is_tensor
+static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_coo_tensor)(PyObject *) = 0;
+#define pyarrow_is_sparse_coo_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_coo_tensor
+static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csr_matrix)(PyObject *) = 0;
+#define pyarrow_is_sparse_csr_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csr_matrix
+static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csc_matrix)(PyObject *) = 0;
+#define pyarrow_is_sparse_csc_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csc_matrix
+static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csf_tensor)(PyObject *) = 0;
+#define pyarrow_is_sparse_csf_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csf_tensor
+static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_table)(PyObject *) = 0;
+#define pyarrow_is_table __pyx_api_f_7pyarrow_3lib_pyarrow_is_table
+static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_batch)(PyObject *) = 0;
+#define pyarrow_is_batch __pyx_api_f_7pyarrow_3lib_pyarrow_is_batch
+#if !defined(__Pyx_PyIdentifier_FromString)
+#if PY_MAJOR_VERSION < 3
+ #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s)
+#else
+ #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s)
+#endif
+#endif
+
+#ifndef __PYX_HAVE_RT_ImportFunction
+#define __PYX_HAVE_RT_ImportFunction
+static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig) {
+ PyObject *d = 0;
+ PyObject *cobj = 0;
+ union {
+ void (*fp)(void);
+ void *p;
+ } tmp;
+ d = PyObject_GetAttrString(module, (char *)"__pyx_capi__");
+ if (!d)
+ goto bad;
+ cobj = PyDict_GetItemString(d, funcname);
+ if (!cobj) {
+ PyErr_Format(PyExc_ImportError,
+ "%.200s does not export expected C function %.200s",
+ PyModule_GetName(module), funcname);
+ goto bad;
+ }
+#if PY_VERSION_HEX >= 0x02070000
+ if (!PyCapsule_IsValid(cobj, sig)) {
+ PyErr_Format(PyExc_TypeError,
+ "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)",
+ PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj));
+ goto bad;
+ }
+ tmp.p = PyCapsule_GetPointer(cobj, sig);
+#else
+ {const char *desc, *s1, *s2;
+ desc = (const char *)PyCObject_GetDesc(cobj);
+ if (!desc)
+ goto bad;
+ s1 = desc; s2 = sig;
+ while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; }
+ if (*s1 != *s2) {
+ PyErr_Format(PyExc_TypeError,
+ "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)",
+ PyModule_GetName(module), funcname, sig, desc);
+ goto bad;
+ }
+ tmp.p = PyCObject_AsVoidPtr(cobj);}
+#endif
+ *f = tmp.fp;
+ if (!(*f))
+ goto bad;
+ Py_DECREF(d);
+ return 0;
+bad:
+ Py_XDECREF(d);
+ return -1;
+}
+#endif
+
+
+static int import_pyarrow__lib(void) {
+ PyObject *module = 0;
+ module = PyImport_ImportModule("pyarrow.lib");
+ if (!module) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_wrap_scalar", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_scalar, "PyObject *(std::shared_ptr< arrow::Scalar> const &)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_wrap_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_array, "PyObject *(std::shared_ptr< arrow::Array> const &)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_wrap_chunked_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_chunked_array, "PyObject *(std::shared_ptr< arrow::ChunkedArray> const &)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_wrap_batch", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_batch, "PyObject *(std::shared_ptr< arrow::RecordBatch> const &)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_wrap_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_buffer, "PyObject *(std::shared_ptr< arrow::Buffer> const &)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_wrap_data_type", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_data_type, "PyObject *(std::shared_ptr< arrow::DataType> const &)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_wrap_field", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_field, "PyObject *(std::shared_ptr< arrow::Field> const &)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_wrap_resizable_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_resizable_buffer, "PyObject *(std::shared_ptr< arrow::ResizableBuffer> const &)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_wrap_schema", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_schema, "PyObject *(std::shared_ptr< arrow::Schema> const &)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_wrap_table", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_table, "PyObject *(std::shared_ptr< arrow::Table> const &)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_wrap_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_tensor, "PyObject *(std::shared_ptr< arrow::Tensor> const &)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_wrap_sparse_coo_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_coo_tensor, "PyObject *(std::shared_ptr< arrow::SparseCOOTensor> const &)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_wrap_sparse_csr_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csr_matrix, "PyObject *(std::shared_ptr< arrow::SparseCSRMatrix> const &)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_wrap_sparse_csc_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csc_matrix, "PyObject *(std::shared_ptr< arrow::SparseCSCMatrix> const &)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_wrap_sparse_csf_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csf_tensor, "PyObject *(std::shared_ptr< arrow::SparseCSFTensor> const &)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_unwrap_scalar", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_scalar, "std::shared_ptr< arrow::Scalar> (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_unwrap_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_array, "std::shared_ptr< arrow::Array> (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_unwrap_chunked_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_chunked_array, "std::shared_ptr< arrow::ChunkedArray> (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_unwrap_batch", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_batch, "std::shared_ptr< arrow::RecordBatch> (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_unwrap_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_buffer, "std::shared_ptr< arrow::Buffer> (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_unwrap_data_type", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_data_type, "std::shared_ptr< arrow::DataType> (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_unwrap_field", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_field, "std::shared_ptr< arrow::Field> (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_unwrap_schema", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_schema, "std::shared_ptr< arrow::Schema> (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_unwrap_table", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_table, "std::shared_ptr< arrow::Table> (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_unwrap_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_tensor, "std::shared_ptr< arrow::Tensor> (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_unwrap_sparse_coo_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_coo_tensor, "std::shared_ptr< arrow::SparseCOOTensor> (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_unwrap_sparse_csr_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csr_matrix, "std::shared_ptr< arrow::SparseCSRMatrix> (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_unwrap_sparse_csc_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csc_matrix, "std::shared_ptr< arrow::SparseCSCMatrix> (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_unwrap_sparse_csf_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csf_tensor, "std::shared_ptr< arrow::SparseCSFTensor> (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_internal_check_status", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_internal_check_status, "int (arrow::Status const &)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_is_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_buffer, "int (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_is_data_type", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_data_type, "int (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_is_metadata", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_metadata, "int (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_is_field", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_field, "int (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_is_schema", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_schema, "int (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_is_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_array, "int (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_is_chunked_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_chunked_array, "int (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_is_scalar", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_scalar, "int (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_is_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_tensor, "int (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_is_sparse_coo_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_coo_tensor, "int (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_is_sparse_csr_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csr_matrix, "int (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_is_sparse_csc_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csc_matrix, "int (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_is_sparse_csf_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csf_tensor, "int (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_is_table", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_table, "int (PyObject *)") < 0) goto bad;
+ if (__Pyx_ImportFunction(module, "pyarrow_is_batch", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_batch, "int (PyObject *)") < 0) goto bad;
+ Py_DECREF(module); module = 0;
+ return 0;
+ bad:
+ Py_XDECREF(module);
+ return -1;
+}
+
+#endif /* !__PYX_HAVE_API__pyarrow__lib */
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/pyarrow_lib.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/pyarrow_lib.h
new file mode 100644
index 0000000000..fa59414474
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/pyarrow_lib.h
@@ -0,0 +1,82 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// DO NOT EDIT THIS FILE. Update from pyarrow/lib.h after pyarrow build
+
+/* Generated by Cython 0.29.15 */
+
+#ifndef __PYX_HAVE__pyarrow__lib
+#define __PYX_HAVE__pyarrow__lib
+
+#include "Python.h"
+
+#ifndef __PYX_HAVE_API__pyarrow__lib
+
+#ifndef __PYX_EXTERN_C
+ #ifdef __cplusplus
+ #define __PYX_EXTERN_C extern "C"
+ #else
+ #define __PYX_EXTERN_C extern
+ #endif
+#endif
+
+#ifndef DL_IMPORT
+ #define DL_IMPORT(_T) _T
+#endif
+
+__PYX_EXTERN_C PyObject *__pyx_f_7pyarrow_3lib_pyarrow_wrap_scalar(std::shared_ptr< arrow::Scalar> const &);
+__PYX_EXTERN_C PyObject *__pyx_f_7pyarrow_3lib_pyarrow_wrap_array(std::shared_ptr< arrow::Array> const &);
+__PYX_EXTERN_C PyObject *__pyx_f_7pyarrow_3lib_pyarrow_wrap_chunked_array(std::shared_ptr< arrow::ChunkedArray> const &);
+__PYX_EXTERN_C PyObject *__pyx_f_7pyarrow_3lib_pyarrow_wrap_batch(std::shared_ptr< arrow::RecordBatch> const &);
+__PYX_EXTERN_C PyObject *__pyx_f_7pyarrow_3lib_pyarrow_wrap_buffer(std::shared_ptr< arrow::Buffer> const &);
+__PYX_EXTERN_C PyObject *__pyx_f_7pyarrow_3lib_pyarrow_wrap_data_type(std::shared_ptr< arrow::DataType> const &);
+__PYX_EXTERN_C PyObject *__pyx_f_7pyarrow_3lib_pyarrow_wrap_field(std::shared_ptr< arrow::Field> const &);
+__PYX_EXTERN_C PyObject *__pyx_f_7pyarrow_3lib_pyarrow_wrap_resizable_buffer(std::shared_ptr< arrow::ResizableBuffer> const &);
+__PYX_EXTERN_C PyObject *__pyx_f_7pyarrow_3lib_pyarrow_wrap_schema(std::shared_ptr< arrow::Schema> const &);
+__PYX_EXTERN_C PyObject *__pyx_f_7pyarrow_3lib_pyarrow_wrap_table(std::shared_ptr< arrow::Table> const &);
+__PYX_EXTERN_C PyObject *__pyx_f_7pyarrow_3lib_pyarrow_wrap_tensor(std::shared_ptr< arrow::Tensor> const &);
+__PYX_EXTERN_C PyObject *__pyx_f_7pyarrow_3lib_pyarrow_wrap_sparse_coo_tensor(std::shared_ptr< arrow::SparseCOOTensor> const &);
+__PYX_EXTERN_C PyObject *__pyx_f_7pyarrow_3lib_pyarrow_wrap_sparse_csr_matrix(std::shared_ptr< arrow::SparseCSRMatrix> const &);
+__PYX_EXTERN_C PyObject *__pyx_f_7pyarrow_3lib_pyarrow_wrap_sparse_csc_matrix(std::shared_ptr< arrow::SparseCSCMatrix> const &);
+__PYX_EXTERN_C PyObject *__pyx_f_7pyarrow_3lib_pyarrow_wrap_sparse_csf_tensor(std::shared_ptr< arrow::SparseCSFTensor> const &);
+__PYX_EXTERN_C std::shared_ptr< arrow::Scalar> __pyx_f_7pyarrow_3lib_pyarrow_unwrap_scalar(PyObject *);
+__PYX_EXTERN_C std::shared_ptr< arrow::Array> __pyx_f_7pyarrow_3lib_pyarrow_unwrap_array(PyObject *);
+__PYX_EXTERN_C std::shared_ptr< arrow::ChunkedArray> __pyx_f_7pyarrow_3lib_pyarrow_unwrap_chunked_array(PyObject *);
+__PYX_EXTERN_C std::shared_ptr< arrow::RecordBatch> __pyx_f_7pyarrow_3lib_pyarrow_unwrap_batch(PyObject *);
+__PYX_EXTERN_C std::shared_ptr< arrow::Buffer> __pyx_f_7pyarrow_3lib_pyarrow_unwrap_buffer(PyObject *);
+__PYX_EXTERN_C std::shared_ptr< arrow::DataType> __pyx_f_7pyarrow_3lib_pyarrow_unwrap_data_type(PyObject *);
+__PYX_EXTERN_C std::shared_ptr< arrow::Field> __pyx_f_7pyarrow_3lib_pyarrow_unwrap_field(PyObject *);
+__PYX_EXTERN_C std::shared_ptr< arrow::Schema> __pyx_f_7pyarrow_3lib_pyarrow_unwrap_schema(PyObject *);
+__PYX_EXTERN_C std::shared_ptr< arrow::Table> __pyx_f_7pyarrow_3lib_pyarrow_unwrap_table(PyObject *);
+__PYX_EXTERN_C std::shared_ptr< arrow::Tensor> __pyx_f_7pyarrow_3lib_pyarrow_unwrap_tensor(PyObject *);
+__PYX_EXTERN_C std::shared_ptr< arrow::SparseCOOTensor> __pyx_f_7pyarrow_3lib_pyarrow_unwrap_sparse_coo_tensor(PyObject *);
+__PYX_EXTERN_C std::shared_ptr< arrow::SparseCSRMatrix> __pyx_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csr_matrix(PyObject *);
+__PYX_EXTERN_C std::shared_ptr< arrow::SparseCSCMatrix> __pyx_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csc_matrix(PyObject *);
+__PYX_EXTERN_C std::shared_ptr< arrow::SparseCSFTensor> __pyx_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csf_tensor(PyObject *);
+
+#endif /* !__PYX_HAVE_API__pyarrow__lib */
+
+/* WARNING: the interface of the module init function changed in CPython 3.5. */
+/* It now returns a PyModuleDef instance instead of a PyModule instance. */
+
+#if PY_MAJOR_VERSION < 3
+PyMODINIT_FUNC initlib(void);
+#else
+PyMODINIT_FUNC PyInit_lib(void);
+#endif
+
+#endif /* !__PYX_HAVE__pyarrow__lib */
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/python_to_arrow.cc b/contrib/libs/apache/arrow/cpp/src/arrow/python/python_to_arrow.cc
new file mode 100644
index 0000000000..521249fd54
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/python_to_arrow.cc
@@ -0,0 +1,1041 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "arrow/python/python_to_arrow.h"
+#include "arrow/python/numpy_interop.h"
+
+#include <datetime.h>
+
+#include <algorithm>
+#include <limits>
+#include <sstream>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "arrow/array.h"
+#include "arrow/array/builder_binary.h"
+#include "arrow/array/builder_decimal.h"
+#include "arrow/array/builder_dict.h"
+#include "arrow/array/builder_nested.h"
+#include "arrow/array/builder_primitive.h"
+#include "arrow/chunked_array.h"
+#include "arrow/status.h"
+#include "arrow/type.h"
+#include "arrow/type_traits.h"
+#include "arrow/util/checked_cast.h"
+#include "arrow/util/converter.h"
+#include "arrow/util/decimal.h"
+#include "arrow/util/int_util_internal.h"
+#include "arrow/util/logging.h"
+
+#include "arrow/python/datetime.h"
+#include "arrow/python/decimal.h"
+#include "arrow/python/helpers.h"
+#include "arrow/python/inference.h"
+#include "arrow/python/iterators.h"
+#include "arrow/python/numpy_convert.h"
+#include "arrow/python/type_traits.h"
+#include "arrow/visitor_inline.h"
+
+namespace arrow {
+
+using internal::checked_cast;
+using internal::checked_pointer_cast;
+
+using internal::Converter;
+using internal::DictionaryConverter;
+using internal::ListConverter;
+using internal::PrimitiveConverter;
+using internal::StructConverter;
+
+using internal::MakeChunker;
+using internal::MakeConverter;
+
+namespace py {
+
+// Utility for converting single python objects to their intermediate C representations
+// which can be fed to the typed builders
+class PyValue {
+ public:
+ // Type aliases for shorter signature definitions
+ using I = PyObject*;
+ using O = PyConversionOptions;
+
+ // Used for null checking before actually converting the values
+ static bool IsNull(const O& options, I obj) {
+ if (options.from_pandas) {
+ return internal::PandasObjectIsNull(obj);
+ } else {
+ return obj == Py_None;
+ }
+ }
+
+ // Used for post-conversion numpy NaT sentinel checking
+ static bool IsNaT(const TimestampType*, int64_t value) {
+ return internal::npy_traits<NPY_DATETIME>::isnull(value);
+ }
+
+ // Used for post-conversion numpy NaT sentinel checking
+ static bool IsNaT(const DurationType*, int64_t value) {
+ return internal::npy_traits<NPY_TIMEDELTA>::isnull(value);
+ }
+
+ static Result<std::nullptr_t> Convert(const NullType*, const O&, I obj) {
+ if (obj == Py_None) {
+ return nullptr;
+ } else {
+ return Status::Invalid("Invalid null value");
+ }
+ }
+
+ static Result<bool> Convert(const BooleanType*, const O&, I obj) {
+ if (obj == Py_True) {
+ return true;
+ } else if (obj == Py_False) {
+ return false;
+ } else if (PyArray_IsScalar(obj, Bool)) {
+ return reinterpret_cast<PyBoolScalarObject*>(obj)->obval == NPY_TRUE;
+ } else {
+ return internal::InvalidValue(obj, "tried to convert to boolean");
+ }
+ }
+
+ template <typename T>
+ static enable_if_integer<T, Result<typename T::c_type>> Convert(const T*, const O&,
+ I obj) {
+ typename T::c_type value;
+ auto status = internal::CIntFromPython(obj, &value);
+ if (ARROW_PREDICT_TRUE(status.ok())) {
+ return value;
+ } else if (!internal::PyIntScalar_Check(obj)) {
+ return internal::InvalidValue(obj, "tried to convert to int");
+ } else {
+ return status;
+ }
+ }
+
+ static Result<uint16_t> Convert(const HalfFloatType*, const O&, I obj) {
+ uint16_t value;
+ RETURN_NOT_OK(PyFloat_AsHalf(obj, &value));
+ return value;
+ }
+
+ static Result<float> Convert(const FloatType*, const O&, I obj) {
+ float value;
+ if (internal::PyFloatScalar_Check(obj)) {
+ value = static_cast<float>(PyFloat_AsDouble(obj));
+ RETURN_IF_PYERROR();
+ } else if (internal::PyIntScalar_Check(obj)) {
+ RETURN_NOT_OK(internal::IntegerScalarToFloat32Safe(obj, &value));
+ } else {
+ return internal::InvalidValue(obj, "tried to convert to float32");
+ }
+ return value;
+ }
+
+ static Result<double> Convert(const DoubleType*, const O&, I obj) {
+ double value;
+ if (PyFloat_Check(obj)) {
+ value = PyFloat_AS_DOUBLE(obj);
+ } else if (internal::PyFloatScalar_Check(obj)) {
+ // Other kinds of float-y things
+ value = PyFloat_AsDouble(obj);
+ RETURN_IF_PYERROR();
+ } else if (internal::PyIntScalar_Check(obj)) {
+ RETURN_NOT_OK(internal::IntegerScalarToDoubleSafe(obj, &value));
+ } else {
+ return internal::InvalidValue(obj, "tried to convert to double");
+ }
+ return value;
+ }
+
+ static Result<Decimal128> Convert(const Decimal128Type* type, const O&, I obj) {
+ Decimal128 value;
+ RETURN_NOT_OK(internal::DecimalFromPyObject(obj, *type, &value));
+ return value;
+ }
+
+ static Result<Decimal256> Convert(const Decimal256Type* type, const O&, I obj) {
+ Decimal256 value;
+ RETURN_NOT_OK(internal::DecimalFromPyObject(obj, *type, &value));
+ return value;
+ }
+
+ static Result<int32_t> Convert(const Date32Type*, const O&, I obj) {
+ int32_t value;
+ if (PyDate_Check(obj)) {
+ auto pydate = reinterpret_cast<PyDateTime_Date*>(obj);
+ value = static_cast<int32_t>(internal::PyDate_to_days(pydate));
+ } else {
+ RETURN_NOT_OK(
+ internal::CIntFromPython(obj, &value, "Integer too large for date32"));
+ }
+ return value;
+ }
+
+ static Result<int64_t> Convert(const Date64Type*, const O&, I obj) {
+ int64_t value;
+ if (PyDateTime_Check(obj)) {
+ auto pydate = reinterpret_cast<PyDateTime_DateTime*>(obj);
+ value = internal::PyDateTime_to_ms(pydate);
+ // Truncate any intraday milliseconds
+ // TODO: introduce an option for this
+ value -= value % 86400000LL;
+ } else if (PyDate_Check(obj)) {
+ auto pydate = reinterpret_cast<PyDateTime_Date*>(obj);
+ value = internal::PyDate_to_ms(pydate);
+ } else {
+ RETURN_NOT_OK(
+ internal::CIntFromPython(obj, &value, "Integer too large for date64"));
+ }
+ return value;
+ }
+
+ static Result<int32_t> Convert(const Time32Type* type, const O&, I obj) {
+ int32_t value;
+ if (PyTime_Check(obj)) {
+ switch (type->unit()) {
+ case TimeUnit::SECOND:
+ value = static_cast<int32_t>(internal::PyTime_to_s(obj));
+ break;
+ case TimeUnit::MILLI:
+ value = static_cast<int32_t>(internal::PyTime_to_ms(obj));
+ break;
+ default:
+ return Status::UnknownError("Invalid time unit");
+ }
+ } else {
+ RETURN_NOT_OK(internal::CIntFromPython(obj, &value, "Integer too large for int32"));
+ }
+ return value;
+ }
+
+ static Result<int64_t> Convert(const Time64Type* type, const O&, I obj) {
+ int64_t value;
+ if (PyTime_Check(obj)) {
+ switch (type->unit()) {
+ case TimeUnit::MICRO:
+ value = internal::PyTime_to_us(obj);
+ break;
+ case TimeUnit::NANO:
+ value = internal::PyTime_to_ns(obj);
+ break;
+ default:
+ return Status::UnknownError("Invalid time unit");
+ }
+ } else {
+ RETURN_NOT_OK(internal::CIntFromPython(obj, &value, "Integer too large for int64"));
+ }
+ return value;
+ }
+
+ static Result<int64_t> Convert(const TimestampType* type, const O& options, I obj) {
+ int64_t value, offset;
+ if (PyDateTime_Check(obj)) {
+ if (ARROW_PREDICT_FALSE(options.ignore_timezone)) {
+ offset = 0;
+ } else {
+ ARROW_ASSIGN_OR_RAISE(offset, internal::PyDateTime_utcoffset_s(obj));
+ }
+ auto dt = reinterpret_cast<PyDateTime_DateTime*>(obj);
+ switch (type->unit()) {
+ case TimeUnit::SECOND:
+ value = internal::PyDateTime_to_s(dt) - offset;
+ break;
+ case TimeUnit::MILLI:
+ value = internal::PyDateTime_to_ms(dt) - offset * 1000LL;
+ break;
+ case TimeUnit::MICRO:
+ value = internal::PyDateTime_to_us(dt) - offset * 1000000LL;
+ break;
+ case TimeUnit::NANO:
+ if (internal::IsPandasTimestamp(obj)) {
+ // pd.Timestamp value attribute contains the offset from unix epoch
+ // so no adjustment for timezone is need.
+ OwnedRef nanos(PyObject_GetAttrString(obj, "value"));
+ RETURN_IF_PYERROR();
+ RETURN_NOT_OK(internal::CIntFromPython(nanos.obj(), &value));
+ } else {
+ // Conversion to nanoseconds can overflow -> check multiply of microseconds
+ value = internal::PyDateTime_to_us(dt);
+ if (arrow::internal::MultiplyWithOverflow(value, 1000LL, &value)) {
+ return internal::InvalidValue(obj,
+ "out of bounds for nanosecond resolution");
+ }
+
+ // Adjust with offset and check for overflow
+ if (arrow::internal::SubtractWithOverflow(value, offset * 1000000000LL,
+ &value)) {
+ return internal::InvalidValue(obj,
+ "out of bounds for nanosecond resolution");
+ }
+ }
+ break;
+ default:
+ return Status::UnknownError("Invalid time unit");
+ }
+ } else if (PyArray_CheckAnyScalarExact(obj)) {
+ // validate that the numpy scalar has np.datetime64 dtype
+ std::shared_ptr<DataType> numpy_type;
+ RETURN_NOT_OK(NumPyDtypeToArrow(PyArray_DescrFromScalar(obj), &numpy_type));
+ if (!numpy_type->Equals(*type)) {
+ return Status::NotImplemented("Expected np.datetime64 but got: ",
+ numpy_type->ToString());
+ }
+ return reinterpret_cast<PyDatetimeScalarObject*>(obj)->obval;
+ } else {
+ RETURN_NOT_OK(internal::CIntFromPython(obj, &value));
+ }
+ return value;
+ }
+
+ static Result<int64_t> Convert(const DurationType* type, const O&, I obj) {
+ int64_t value;
+ if (PyDelta_Check(obj)) {
+ auto dt = reinterpret_cast<PyDateTime_Delta*>(obj);
+ switch (type->unit()) {
+ case TimeUnit::SECOND:
+ value = internal::PyDelta_to_s(dt);
+ break;
+ case TimeUnit::MILLI:
+ value = internal::PyDelta_to_ms(dt);
+ break;
+ case TimeUnit::MICRO:
+ value = internal::PyDelta_to_us(dt);
+ break;
+ case TimeUnit::NANO:
+ if (internal::IsPandasTimedelta(obj)) {
+ OwnedRef nanos(PyObject_GetAttrString(obj, "value"));
+ RETURN_IF_PYERROR();
+ RETURN_NOT_OK(internal::CIntFromPython(nanos.obj(), &value));
+ } else {
+ value = internal::PyDelta_to_ns(dt);
+ }
+ break;
+ default:
+ return Status::UnknownError("Invalid time unit");
+ }
+ } else if (PyArray_CheckAnyScalarExact(obj)) {
+ // validate that the numpy scalar has np.datetime64 dtype
+ std::shared_ptr<DataType> numpy_type;
+ RETURN_NOT_OK(NumPyDtypeToArrow(PyArray_DescrFromScalar(obj), &numpy_type));
+ if (!numpy_type->Equals(*type)) {
+ return Status::NotImplemented("Expected np.timedelta64 but got: ",
+ numpy_type->ToString());
+ }
+ return reinterpret_cast<PyTimedeltaScalarObject*>(obj)->obval;
+ } else {
+ RETURN_NOT_OK(internal::CIntFromPython(obj, &value));
+ }
+ return value;
+ }
+
+ // The binary-like intermediate representation is PyBytesView because it keeps temporary
+ // python objects alive (non-contiguous memoryview) and stores whether the original
+ // object was unicode encoded or not, which is used for unicode -> bytes coersion if
+ // there is a non-unicode object observed.
+
+ static Status Convert(const BaseBinaryType*, const O&, I obj, PyBytesView& view) {
+ return view.ParseString(obj);
+ }
+
+ static Status Convert(const FixedSizeBinaryType* type, const O&, I obj,
+ PyBytesView& view) {
+ ARROW_RETURN_NOT_OK(view.ParseString(obj));
+ if (view.size != type->byte_width()) {
+ std::stringstream ss;
+ ss << "expected to be length " << type->byte_width() << " was " << view.size;
+ return internal::InvalidValue(obj, ss.str());
+ } else {
+ return Status::OK();
+ }
+ }
+
+ template <typename T>
+ static enable_if_string<T, Status> Convert(const T*, const O& options, I obj,
+ PyBytesView& view) {
+ if (options.strict) {
+ // Strict conversion, force output to be unicode / utf8 and validate that
+ // any binary values are utf8
+ ARROW_RETURN_NOT_OK(view.ParseString(obj, true));
+ if (!view.is_utf8) {
+ return internal::InvalidValue(obj, "was not a utf8 string");
+ }
+ return Status::OK();
+ } else {
+ // Non-strict conversion; keep track of whether values are unicode or bytes
+ return view.ParseString(obj);
+ }
+ }
+
+ static Result<bool> Convert(const DataType* type, const O&, I obj) {
+ return Status::NotImplemented("PyValue::Convert is not implemented for type ", type);
+ }
+};
+
+// The base Converter class is a mixin with predefined behavior and constructors.
+class PyConverter : public Converter<PyObject*, PyConversionOptions> {
+ public:
+ // Iterate over the input values and defer the conversion to the Append method
+ Status Extend(PyObject* values, int64_t size, int64_t offset = 0) override {
+ DCHECK_GE(size, offset);
+ /// Ensure we've allocated enough space
+ RETURN_NOT_OK(this->Reserve(size - offset));
+ // Iterate over the items adding each one
+ return internal::VisitSequence(
+ values, offset,
+ [this](PyObject* item, bool* /* unused */) { return this->Append(item); });
+ }
+
+ // Convert and append a sequence of values masked with a numpy array
+ Status ExtendMasked(PyObject* values, PyObject* mask, int64_t size,
+ int64_t offset = 0) override {
+ DCHECK_GE(size, offset);
+ /// Ensure we've allocated enough space
+ RETURN_NOT_OK(this->Reserve(size - offset));
+ // Iterate over the items adding each one
+ return internal::VisitSequenceMasked(
+ values, mask, offset, [this](PyObject* item, bool is_masked, bool* /* unused */) {
+ if (is_masked) {
+ return this->AppendNull();
+ } else {
+ // This will also apply the null-checking convention in the event
+ // that the value is not masked
+ return this->Append(item); // perhaps use AppendValue instead?
+ }
+ });
+ }
+};
+
+template <typename T, typename Enable = void>
+class PyPrimitiveConverter;
+
+template <typename T>
+class PyListConverter;
+
+template <typename U, typename Enable = void>
+class PyDictionaryConverter;
+
+class PyStructConverter;
+
+template <typename T, typename Enable = void>
+struct PyConverterTrait;
+
+template <typename T>
+struct PyConverterTrait<
+ T, enable_if_t<!is_nested_type<T>::value && !is_interval_type<T>::value &&
+ !is_extension_type<T>::value>> {
+ using type = PyPrimitiveConverter<T>;
+};
+
+template <typename T>
+struct PyConverterTrait<T, enable_if_list_like<T>> {
+ using type = PyListConverter<T>;
+};
+
+template <>
+struct PyConverterTrait<StructType> {
+ using type = PyStructConverter;
+};
+
+template <>
+struct PyConverterTrait<DictionaryType> {
+ template <typename T>
+ using dictionary_type = PyDictionaryConverter<T>;
+};
+
+template <typename T>
+class PyPrimitiveConverter<T, enable_if_null<T>>
+ : public PrimitiveConverter<T, PyConverter> {
+ public:
+ Status Append(PyObject* value) override {
+ if (PyValue::IsNull(this->options_, value)) {
+ return this->primitive_builder_->AppendNull();
+ } else {
+ ARROW_ASSIGN_OR_RAISE(
+ auto converted, PyValue::Convert(this->primitive_type_, this->options_, value));
+ return this->primitive_builder_->Append(converted);
+ }
+ }
+};
+
+template <typename T>
+class PyPrimitiveConverter<
+ T, enable_if_t<is_boolean_type<T>::value || is_number_type<T>::value ||
+ is_decimal_type<T>::value || is_date_type<T>::value ||
+ is_time_type<T>::value>> : public PrimitiveConverter<T, PyConverter> {
+ public:
+ Status Append(PyObject* value) override {
+ // Since the required space has been already allocated in the Extend functions we can
+ // rely on the Unsafe builder API which improves the performance.
+ if (PyValue::IsNull(this->options_, value)) {
+ this->primitive_builder_->UnsafeAppendNull();
+ } else {
+ ARROW_ASSIGN_OR_RAISE(
+ auto converted, PyValue::Convert(this->primitive_type_, this->options_, value));
+ this->primitive_builder_->UnsafeAppend(converted);
+ }
+ return Status::OK();
+ }
+};
+
+template <typename T>
+class PyPrimitiveConverter<
+ T, enable_if_t<is_timestamp_type<T>::value || is_duration_type<T>::value>>
+ : public PrimitiveConverter<T, PyConverter> {
+ public:
+ Status Append(PyObject* value) override {
+ if (PyValue::IsNull(this->options_, value)) {
+ this->primitive_builder_->UnsafeAppendNull();
+ } else {
+ ARROW_ASSIGN_OR_RAISE(
+ auto converted, PyValue::Convert(this->primitive_type_, this->options_, value));
+ // Numpy NaT sentinels can be checked after the conversion
+ if (PyArray_CheckAnyScalarExact(value) &&
+ PyValue::IsNaT(this->primitive_type_, converted)) {
+ this->primitive_builder_->UnsafeAppendNull();
+ } else {
+ this->primitive_builder_->UnsafeAppend(converted);
+ }
+ }
+ return Status::OK();
+ }
+};
+
+template <typename T>
+class PyPrimitiveConverter<T, enable_if_t<std::is_same<T, FixedSizeBinaryType>::value>>
+ : public PrimitiveConverter<T, PyConverter> {
+ public:
+ Status Append(PyObject* value) override {
+ if (PyValue::IsNull(this->options_, value)) {
+ this->primitive_builder_->UnsafeAppendNull();
+ } else {
+ ARROW_RETURN_NOT_OK(
+ PyValue::Convert(this->primitive_type_, this->options_, value, view_));
+ ARROW_RETURN_NOT_OK(this->primitive_builder_->ReserveData(view_.size));
+ this->primitive_builder_->UnsafeAppend(view_.bytes);
+ }
+ return Status::OK();
+ }
+
+ protected:
+ PyBytesView view_;
+};
+
+template <typename T>
+class PyPrimitiveConverter<T, enable_if_base_binary<T>>
+ : public PrimitiveConverter<T, PyConverter> {
+ public:
+ using OffsetType = typename T::offset_type;
+
+ Status Append(PyObject* value) override {
+ if (PyValue::IsNull(this->options_, value)) {
+ this->primitive_builder_->UnsafeAppendNull();
+ } else {
+ ARROW_RETURN_NOT_OK(
+ PyValue::Convert(this->primitive_type_, this->options_, value, view_));
+ if (!view_.is_utf8) {
+ // observed binary value
+ observed_binary_ = true;
+ }
+ // Since we don't know the varying length input size in advance, we need to
+ // reserve space in the value builder one by one. ReserveData raises CapacityError
+ // if the value would not fit into the array.
+ ARROW_RETURN_NOT_OK(this->primitive_builder_->ReserveData(view_.size));
+ this->primitive_builder_->UnsafeAppend(view_.bytes,
+ static_cast<OffsetType>(view_.size));
+ }
+ return Status::OK();
+ }
+
+ Result<std::shared_ptr<Array>> ToArray() override {
+ ARROW_ASSIGN_OR_RAISE(auto array, (PrimitiveConverter<T, PyConverter>::ToArray()));
+ if (observed_binary_) {
+ // if we saw any non-unicode, cast results to BinaryArray
+ auto binary_type = TypeTraits<typename T::PhysicalType>::type_singleton();
+ return array->View(binary_type);
+ } else {
+ return array;
+ }
+ }
+
+ protected:
+ PyBytesView view_;
+ bool observed_binary_ = false;
+};
+
+template <typename U>
+class PyDictionaryConverter<U, enable_if_has_c_type<U>>
+ : public DictionaryConverter<U, PyConverter> {
+ public:
+ Status Append(PyObject* value) override {
+ if (PyValue::IsNull(this->options_, value)) {
+ return this->value_builder_->AppendNull();
+ } else {
+ ARROW_ASSIGN_OR_RAISE(auto converted,
+ PyValue::Convert(this->value_type_, this->options_, value));
+ return this->value_builder_->Append(converted);
+ }
+ }
+};
+
+template <typename U>
+class PyDictionaryConverter<U, enable_if_has_string_view<U>>
+ : public DictionaryConverter<U, PyConverter> {
+ public:
+ Status Append(PyObject* value) override {
+ if (PyValue::IsNull(this->options_, value)) {
+ return this->value_builder_->AppendNull();
+ } else {
+ ARROW_RETURN_NOT_OK(
+ PyValue::Convert(this->value_type_, this->options_, value, view_));
+ return this->value_builder_->Append(view_.bytes, static_cast<int32_t>(view_.size));
+ }
+ }
+
+ protected:
+ PyBytesView view_;
+};
+
+template <typename T>
+class PyListConverter : public ListConverter<T, PyConverter, PyConverterTrait> {
+ public:
+ Status Append(PyObject* value) override {
+ if (PyValue::IsNull(this->options_, value)) {
+ return this->list_builder_->AppendNull();
+ }
+
+ RETURN_NOT_OK(this->list_builder_->Append());
+ if (PyArray_Check(value)) {
+ RETURN_NOT_OK(AppendNdarray(value));
+ } else if (PySequence_Check(value)) {
+ RETURN_NOT_OK(AppendSequence(value));
+ } else {
+ return internal::InvalidType(
+ value, "was not a sequence or recognized null for conversion to list type");
+ }
+
+ return ValidateBuilder(this->list_type_);
+ }
+
+ protected:
+ Status ValidateBuilder(const MapType*) {
+ if (this->list_builder_->key_builder()->null_count() > 0) {
+ return Status::Invalid("Invalid Map: key field can not contain null values");
+ } else {
+ return Status::OK();
+ }
+ }
+
+ Status ValidateBuilder(const BaseListType*) { return Status::OK(); }
+
+ Status AppendSequence(PyObject* value) {
+ int64_t size = static_cast<int64_t>(PySequence_Size(value));
+ RETURN_NOT_OK(this->list_builder_->ValidateOverflow(size));
+ return this->value_converter_->Extend(value, size);
+ }
+
+ Status AppendNdarray(PyObject* value) {
+ PyArrayObject* ndarray = reinterpret_cast<PyArrayObject*>(value);
+ if (PyArray_NDIM(ndarray) != 1) {
+ return Status::Invalid("Can only convert 1-dimensional array values");
+ }
+ const int64_t size = PyArray_SIZE(ndarray);
+ RETURN_NOT_OK(this->list_builder_->ValidateOverflow(size));
+
+ const auto value_type = this->value_converter_->builder()->type();
+ switch (value_type->id()) {
+// If the value type does not match the expected NumPy dtype, then fall through
+// to a slower PySequence-based path
+#define LIST_FAST_CASE(TYPE_ID, TYPE, NUMPY_TYPE) \
+ case Type::TYPE_ID: { \
+ if (PyArray_DESCR(ndarray)->type_num != NUMPY_TYPE) { \
+ return this->value_converter_->Extend(value, size); \
+ } \
+ return AppendNdarrayTyped<TYPE, NUMPY_TYPE>(ndarray); \
+ }
+ LIST_FAST_CASE(BOOL, BooleanType, NPY_BOOL)
+ LIST_FAST_CASE(UINT8, UInt8Type, NPY_UINT8)
+ LIST_FAST_CASE(INT8, Int8Type, NPY_INT8)
+ LIST_FAST_CASE(UINT16, UInt16Type, NPY_UINT16)
+ LIST_FAST_CASE(INT16, Int16Type, NPY_INT16)
+ LIST_FAST_CASE(UINT32, UInt32Type, NPY_UINT32)
+ LIST_FAST_CASE(INT32, Int32Type, NPY_INT32)
+ LIST_FAST_CASE(UINT64, UInt64Type, NPY_UINT64)
+ LIST_FAST_CASE(INT64, Int64Type, NPY_INT64)
+ LIST_FAST_CASE(HALF_FLOAT, HalfFloatType, NPY_FLOAT16)
+ LIST_FAST_CASE(FLOAT, FloatType, NPY_FLOAT)
+ LIST_FAST_CASE(DOUBLE, DoubleType, NPY_DOUBLE)
+ LIST_FAST_CASE(TIMESTAMP, TimestampType, NPY_DATETIME)
+ LIST_FAST_CASE(DURATION, DurationType, NPY_TIMEDELTA)
+#undef LIST_FAST_CASE
+ default: {
+ return this->value_converter_->Extend(value, size);
+ }
+ }
+ }
+
+ template <typename ArrowType, int NUMPY_TYPE>
+ Status AppendNdarrayTyped(PyArrayObject* ndarray) {
+ // no need to go through the conversion
+ using NumpyTrait = internal::npy_traits<NUMPY_TYPE>;
+ using NumpyType = typename NumpyTrait::value_type;
+ using ValueBuilderType = typename TypeTraits<ArrowType>::BuilderType;
+
+ const bool null_sentinels_possible =
+ // Always treat Numpy's NaT as null
+ NUMPY_TYPE == NPY_DATETIME || NUMPY_TYPE == NPY_TIMEDELTA ||
+ // Observing pandas's null sentinels
+ (this->options_.from_pandas && NumpyTrait::supports_nulls);
+
+ auto value_builder =
+ checked_cast<ValueBuilderType*>(this->value_converter_->builder().get());
+
+ Ndarray1DIndexer<NumpyType> values(ndarray);
+ if (null_sentinels_possible) {
+ for (int64_t i = 0; i < values.size(); ++i) {
+ if (NumpyTrait::isnull(values[i])) {
+ RETURN_NOT_OK(value_builder->AppendNull());
+ } else {
+ RETURN_NOT_OK(value_builder->Append(values[i]));
+ }
+ }
+ } else if (!values.is_strided()) {
+ RETURN_NOT_OK(value_builder->AppendValues(values.data(), values.size()));
+ } else {
+ for (int64_t i = 0; i < values.size(); ++i) {
+ RETURN_NOT_OK(value_builder->Append(values[i]));
+ }
+ }
+ return Status::OK();
+ }
+};
+
+class PyStructConverter : public StructConverter<PyConverter, PyConverterTrait> {
+ public:
+ Status Append(PyObject* value) override {
+ if (PyValue::IsNull(this->options_, value)) {
+ return this->struct_builder_->AppendNull();
+ }
+ switch (input_kind_) {
+ case InputKind::DICT:
+ RETURN_NOT_OK(this->struct_builder_->Append());
+ return AppendDict(value);
+ case InputKind::TUPLE:
+ RETURN_NOT_OK(this->struct_builder_->Append());
+ return AppendTuple(value);
+ case InputKind::ITEMS:
+ RETURN_NOT_OK(this->struct_builder_->Append());
+ return AppendItems(value);
+ default:
+ RETURN_NOT_OK(InferInputKind(value));
+ return Append(value);
+ }
+ }
+
+ protected:
+ Status Init(MemoryPool* pool) override {
+ RETURN_NOT_OK((StructConverter<PyConverter, PyConverterTrait>::Init(pool)));
+
+ // Store the field names as a PyObjects for dict matching
+ num_fields_ = this->struct_type_->num_fields();
+ bytes_field_names_.reset(PyList_New(num_fields_));
+ unicode_field_names_.reset(PyList_New(num_fields_));
+ RETURN_IF_PYERROR();
+
+ for (int i = 0; i < num_fields_; i++) {
+ const auto& field_name = this->struct_type_->field(i)->name();
+ PyObject* bytes = PyBytes_FromStringAndSize(field_name.c_str(), field_name.size());
+ PyObject* unicode =
+ PyUnicode_FromStringAndSize(field_name.c_str(), field_name.size());
+ RETURN_IF_PYERROR();
+ PyList_SET_ITEM(bytes_field_names_.obj(), i, bytes);
+ PyList_SET_ITEM(unicode_field_names_.obj(), i, unicode);
+ }
+ return Status::OK();
+ }
+
+ Status InferInputKind(PyObject* value) {
+ // Infer input object's type, note that heterogeneous sequences are not allowed
+ if (PyDict_Check(value)) {
+ input_kind_ = InputKind::DICT;
+ } else if (PyTuple_Check(value)) {
+ input_kind_ = InputKind::TUPLE;
+ } else if (PySequence_Check(value)) {
+ input_kind_ = InputKind::ITEMS;
+ } else {
+ return internal::InvalidType(value,
+ "was not a dict, tuple, or recognized null value "
+ "for conversion to struct type");
+ }
+ return Status::OK();
+ }
+
+ Status InferKeyKind(PyObject* items) {
+ for (int i = 0; i < PySequence_Length(items); i++) {
+ // retrieve the key from the passed key-value pairs
+ ARROW_ASSIGN_OR_RAISE(auto pair, GetKeyValuePair(items, i));
+
+ // check key exists between the unicode field names
+ bool do_contain = PySequence_Contains(unicode_field_names_.obj(), pair.first);
+ RETURN_IF_PYERROR();
+ if (do_contain) {
+ key_kind_ = KeyKind::UNICODE;
+ return Status::OK();
+ }
+
+ // check key exists between the bytes field names
+ do_contain = PySequence_Contains(bytes_field_names_.obj(), pair.first);
+ RETURN_IF_PYERROR();
+ if (do_contain) {
+ key_kind_ = KeyKind::BYTES;
+ return Status::OK();
+ }
+ }
+ return Status::OK();
+ }
+
+ Status AppendEmpty() {
+ for (int i = 0; i < num_fields_; i++) {
+ RETURN_NOT_OK(this->children_[i]->Append(Py_None));
+ }
+ return Status::OK();
+ }
+
+ Status AppendTuple(PyObject* tuple) {
+ if (!PyTuple_Check(tuple)) {
+ return internal::InvalidType(tuple, "was expecting a tuple");
+ }
+ if (PyTuple_GET_SIZE(tuple) != num_fields_) {
+ return Status::Invalid("Tuple size must be equal to number of struct fields");
+ }
+ for (int i = 0; i < num_fields_; i++) {
+ PyObject* value = PyTuple_GET_ITEM(tuple, i);
+ RETURN_NOT_OK(this->children_[i]->Append(value));
+ }
+ return Status::OK();
+ }
+
+ Status AppendDict(PyObject* dict) {
+ if (!PyDict_Check(dict)) {
+ return internal::InvalidType(dict, "was expecting a dict");
+ }
+ switch (key_kind_) {
+ case KeyKind::UNICODE:
+ return AppendDict(dict, unicode_field_names_.obj());
+ case KeyKind::BYTES:
+ return AppendDict(dict, bytes_field_names_.obj());
+ default:
+ RETURN_NOT_OK(InferKeyKind(PyDict_Items(dict)));
+ if (key_kind_ == KeyKind::UNKNOWN) {
+ // was unable to infer the type which means that all keys are absent
+ return AppendEmpty();
+ } else {
+ return AppendDict(dict);
+ }
+ }
+ }
+
+ Status AppendItems(PyObject* items) {
+ if (!PySequence_Check(items)) {
+ return internal::InvalidType(items, "was expecting a sequence of key-value items");
+ }
+ switch (key_kind_) {
+ case KeyKind::UNICODE:
+ return AppendItems(items, unicode_field_names_.obj());
+ case KeyKind::BYTES:
+ return AppendItems(items, bytes_field_names_.obj());
+ default:
+ RETURN_NOT_OK(InferKeyKind(items));
+ if (key_kind_ == KeyKind::UNKNOWN) {
+ // was unable to infer the type which means that all keys are absent
+ return AppendEmpty();
+ } else {
+ return AppendItems(items);
+ }
+ }
+ }
+
+ Status AppendDict(PyObject* dict, PyObject* field_names) {
+ // NOTE we're ignoring any extraneous dict items
+ for (int i = 0; i < num_fields_; i++) {
+ PyObject* name = PyList_GET_ITEM(field_names, i); // borrowed
+ PyObject* value = PyDict_GetItem(dict, name); // borrowed
+ if (value == NULL) {
+ RETURN_IF_PYERROR();
+ }
+ RETURN_NOT_OK(this->children_[i]->Append(value ? value : Py_None));
+ }
+ return Status::OK();
+ }
+
+ Result<std::pair<PyObject*, PyObject*>> GetKeyValuePair(PyObject* seq, int index) {
+ PyObject* pair = PySequence_GetItem(seq, index);
+ RETURN_IF_PYERROR();
+ if (!PyTuple_Check(pair) || PyTuple_Size(pair) != 2) {
+ return internal::InvalidType(pair, "was expecting tuple of (key, value) pair");
+ }
+ PyObject* key = PyTuple_GetItem(pair, 0);
+ RETURN_IF_PYERROR();
+ PyObject* value = PyTuple_GetItem(pair, 1);
+ RETURN_IF_PYERROR();
+ return std::make_pair(key, value);
+ }
+
+ Status AppendItems(PyObject* items, PyObject* field_names) {
+ auto length = static_cast<int>(PySequence_Size(items));
+ RETURN_IF_PYERROR();
+
+ // append the values for the defined fields
+ for (int i = 0; i < std::min(num_fields_, length); i++) {
+ // retrieve the key-value pair
+ ARROW_ASSIGN_OR_RAISE(auto pair, GetKeyValuePair(items, i));
+
+ // validate that the key and the field name are equal
+ PyObject* name = PyList_GET_ITEM(field_names, i);
+ bool are_equal = PyObject_RichCompareBool(pair.first, name, Py_EQ);
+ RETURN_IF_PYERROR();
+
+ // finally append to the respective child builder
+ if (are_equal) {
+ RETURN_NOT_OK(this->children_[i]->Append(pair.second));
+ } else {
+ ARROW_ASSIGN_OR_RAISE(auto key_view, PyBytesView::FromString(pair.first));
+ ARROW_ASSIGN_OR_RAISE(auto name_view, PyBytesView::FromString(name));
+ return Status::Invalid("The expected field name is `", name_view.bytes, "` but `",
+ key_view.bytes, "` was given");
+ }
+ }
+ // insert null values for missing fields
+ for (int i = length; i < num_fields_; i++) {
+ RETURN_NOT_OK(this->children_[i]->AppendNull());
+ }
+ return Status::OK();
+ }
+
+ // Whether we're converting from a sequence of dicts or tuples or list of pairs
+ enum class InputKind { UNKNOWN, DICT, TUPLE, ITEMS } input_kind_ = InputKind::UNKNOWN;
+ // Whether the input dictionary keys' type is python bytes or unicode
+ enum class KeyKind { UNKNOWN, BYTES, UNICODE } key_kind_ = KeyKind::UNKNOWN;
+ // Store the field names as a PyObjects for dict matching
+ OwnedRef bytes_field_names_;
+ OwnedRef unicode_field_names_;
+ // Store the number of fields for later reuse
+ int num_fields_;
+};
+
+// Convert *obj* to a sequence if necessary
+// Fill *size* to its length. If >= 0 on entry, *size* is an upper size
+// bound that may lead to truncation.
+Status ConvertToSequenceAndInferSize(PyObject* obj, PyObject** seq, int64_t* size) {
+ if (PySequence_Check(obj)) {
+ // obj is already a sequence
+ int64_t real_size = static_cast<int64_t>(PySequence_Size(obj));
+ if (*size < 0) {
+ *size = real_size;
+ } else {
+ *size = std::min(real_size, *size);
+ }
+ Py_INCREF(obj);
+ *seq = obj;
+ } else if (*size < 0) {
+ // unknown size, exhaust iterator
+ *seq = PySequence_List(obj);
+ RETURN_IF_PYERROR();
+ *size = static_cast<int64_t>(PyList_GET_SIZE(*seq));
+ } else {
+ // size is known but iterator could be infinite
+ Py_ssize_t i, n = *size;
+ PyObject* iter = PyObject_GetIter(obj);
+ RETURN_IF_PYERROR();
+ OwnedRef iter_ref(iter);
+ PyObject* lst = PyList_New(n);
+ RETURN_IF_PYERROR();
+ for (i = 0; i < n; i++) {
+ PyObject* item = PyIter_Next(iter);
+ if (!item) break;
+ PyList_SET_ITEM(lst, i, item);
+ }
+ // Shrink list if len(iterator) < size
+ if (i < n && PyList_SetSlice(lst, i, n, NULL)) {
+ Py_DECREF(lst);
+ return Status::UnknownError("failed to resize list");
+ }
+ *seq = lst;
+ *size = std::min<int64_t>(i, *size);
+ }
+ return Status::OK();
+}
+
+Result<std::shared_ptr<ChunkedArray>> ConvertPySequence(PyObject* obj, PyObject* mask,
+ PyConversionOptions options,
+ MemoryPool* pool) {
+ PyAcquireGIL lock;
+
+ PyObject* seq;
+ OwnedRef tmp_seq_nanny;
+
+ ARROW_ASSIGN_OR_RAISE(auto is_pandas_imported, internal::IsModuleImported("pandas"));
+ if (is_pandas_imported) {
+ // If pandas has been already imported initialize the static pandas objects to
+ // support converting from pd.Timedelta and pd.Timestamp objects
+ internal::InitPandasStaticData();
+ }
+
+ int64_t size = options.size;
+ RETURN_NOT_OK(ConvertToSequenceAndInferSize(obj, &seq, &size));
+ tmp_seq_nanny.reset(seq);
+
+ // In some cases, type inference may be "loose", like strings. If the user
+ // passed pa.string(), then we will error if we encounter any non-UTF8
+ // value. If not, then we will allow the result to be a BinaryArray
+ if (options.type == nullptr) {
+ ARROW_ASSIGN_OR_RAISE(options.type, InferArrowType(seq, mask, options.from_pandas));
+ options.strict = false;
+ } else {
+ options.strict = true;
+ }
+ DCHECK_GE(size, 0);
+
+ ARROW_ASSIGN_OR_RAISE(auto converter, (MakeConverter<PyConverter, PyConverterTrait>(
+ options.type, options, pool)));
+ if (converter->may_overflow()) {
+ // The converter hierarchy contains binary- or list-like builders which can overflow
+ // depending on the input values. Wrap the converter with a chunker which detects
+ // the overflow and automatically creates new chunks.
+ ARROW_ASSIGN_OR_RAISE(auto chunked_converter, MakeChunker(std::move(converter)));
+ if (mask != nullptr && mask != Py_None) {
+ RETURN_NOT_OK(chunked_converter->ExtendMasked(seq, mask, size));
+ } else {
+ RETURN_NOT_OK(chunked_converter->Extend(seq, size));
+ }
+ return chunked_converter->ToChunkedArray();
+ } else {
+ // If the converter can't overflow spare the capacity error checking on the hot-path,
+ // this improves the performance roughly by ~10% for primitive types.
+ if (mask != nullptr && mask != Py_None) {
+ RETURN_NOT_OK(converter->ExtendMasked(seq, mask, size));
+ } else {
+ RETURN_NOT_OK(converter->Extend(seq, size));
+ }
+ return converter->ToChunkedArray();
+ }
+}
+
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/python_to_arrow.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/python_to_arrow.h
new file mode 100644
index 0000000000..d167996ba8
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/python_to_arrow.h
@@ -0,0 +1,80 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Functions for converting between CPython built-in data structures and Arrow
+// data structures
+
+#pragma once
+
+#include "arrow/python/platform.h"
+
+#include <cstdint>
+#include <memory>
+
+#include "arrow/python/visibility.h"
+#include "arrow/type.h"
+#include "arrow/util/macros.h"
+
+#include "arrow/python/common.h"
+
+namespace arrow {
+
+class Array;
+class Status;
+
+namespace py {
+
+struct PyConversionOptions {
+ PyConversionOptions() = default;
+
+ PyConversionOptions(const std::shared_ptr<DataType>& type, int64_t size,
+ MemoryPool* pool, bool from_pandas)
+ : type(type), size(size), from_pandas(from_pandas) {}
+
+ // Set to null if to be inferred
+ std::shared_ptr<DataType> type;
+
+ // Default is -1, which indicates the size should the same as the input sequence
+ int64_t size = -1;
+
+ bool from_pandas = false;
+
+ /// Used to maintain backwards compatibility for
+ /// timezone bugs (see ARROW-9528). Should be removed
+ /// after Arrow 2.0 release.
+ bool ignore_timezone = false;
+
+ bool strict = false;
+};
+
+/// \brief Convert sequence (list, generator, NumPy array with dtype object) of
+/// Python objects.
+/// \param[in] obj the sequence to convert
+/// \param[in] mask a NumPy array of true/false values to indicate whether
+/// values in the sequence are null (true) or not null (false). This parameter
+/// may be null
+/// \param[in] options various conversion options
+/// \param[in] pool MemoryPool to use for allocations
+/// \return Result ChunkedArray
+ARROW_PYTHON_EXPORT
+Result<std::shared_ptr<ChunkedArray>> ConvertPySequence(
+ PyObject* obj, PyObject* mask, PyConversionOptions options,
+ MemoryPool* pool = default_memory_pool());
+
+} // namespace py
+
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/serialize.cc b/contrib/libs/apache/arrow/cpp/src/arrow/python/serialize.cc
new file mode 100644
index 0000000000..ad079cbd9c
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/serialize.cc
@@ -0,0 +1,798 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "arrow/python/serialize.h"
+#include "arrow/python/numpy_interop.h"
+
+#include <cstdint>
+#include <limits>
+#include <memory>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include <numpy/arrayobject.h>
+#include <numpy/arrayscalars.h>
+
+#include "arrow/array.h"
+#include "arrow/array/builder_binary.h"
+#include "arrow/array/builder_nested.h"
+#include "arrow/array/builder_primitive.h"
+#include "arrow/array/builder_union.h"
+#include "arrow/io/interfaces.h"
+#include "arrow/io/memory.h"
+#include "arrow/ipc/util.h"
+#include "arrow/ipc/writer.h"
+#include "arrow/record_batch.h"
+#include "arrow/result.h"
+#include "arrow/tensor.h"
+#include "arrow/util/logging.h"
+
+#include "arrow/python/common.h"
+#include "arrow/python/datetime.h"
+#include "arrow/python/helpers.h"
+#include "arrow/python/iterators.h"
+#include "arrow/python/numpy_convert.h"
+#include "arrow/python/platform.h"
+#include "arrow/python/pyarrow.h"
+
+constexpr int32_t kMaxRecursionDepth = 100;
+
+namespace arrow {
+
+using internal::checked_cast;
+
+namespace py {
+
+class SequenceBuilder;
+class DictBuilder;
+
+Status Append(PyObject* context, PyObject* elem, SequenceBuilder* builder,
+ int32_t recursion_depth, SerializedPyObject* blobs_out);
+
+// A Sequence is a heterogeneous collections of elements. It can contain
+// scalar Python types, lists, tuples, dictionaries, tensors and sparse tensors.
+class SequenceBuilder {
+ public:
+ explicit SequenceBuilder(MemoryPool* pool = default_memory_pool())
+ : pool_(pool),
+ types_(::arrow::int8(), pool),
+ offsets_(::arrow::int32(), pool),
+ type_map_(PythonType::NUM_PYTHON_TYPES, -1) {
+ auto null_builder = std::make_shared<NullBuilder>(pool);
+ auto initial_ty = dense_union({field("0", null())});
+ builder_.reset(new DenseUnionBuilder(pool, {null_builder}, initial_ty));
+ }
+
+ // Appending a none to the sequence
+ Status AppendNone() { return builder_->AppendNull(); }
+
+ template <typename BuilderType, typename MakeBuilderFn>
+ Status CreateAndUpdate(std::shared_ptr<BuilderType>* child_builder, int8_t tag,
+ MakeBuilderFn make_builder) {
+ if (!*child_builder) {
+ child_builder->reset(make_builder());
+ std::ostringstream convert;
+ convert.imbue(std::locale::classic());
+ convert << static_cast<int>(tag);
+ type_map_[tag] = builder_->AppendChild(*child_builder, convert.str());
+ }
+ return builder_->Append(type_map_[tag]);
+ }
+
+ template <typename BuilderType, typename T>
+ Status AppendPrimitive(std::shared_ptr<BuilderType>* child_builder, const T val,
+ int8_t tag) {
+ RETURN_NOT_OK(
+ CreateAndUpdate(child_builder, tag, [this]() { return new BuilderType(pool_); }));
+ return (*child_builder)->Append(val);
+ }
+
+ // Appending a boolean to the sequence
+ Status AppendBool(const bool data) {
+ return AppendPrimitive(&bools_, data, PythonType::BOOL);
+ }
+
+ // Appending an int64_t to the sequence
+ Status AppendInt64(const int64_t data) {
+ return AppendPrimitive(&ints_, data, PythonType::INT);
+ }
+
+ // Append a list of bytes to the sequence
+ Status AppendBytes(const uint8_t* data, int32_t length) {
+ RETURN_NOT_OK(CreateAndUpdate(&bytes_, PythonType::BYTES,
+ [this]() { return new BinaryBuilder(pool_); }));
+ return bytes_->Append(data, length);
+ }
+
+ // Appending a string to the sequence
+ Status AppendString(const char* data, int32_t length) {
+ RETURN_NOT_OK(CreateAndUpdate(&strings_, PythonType::STRING,
+ [this]() { return new StringBuilder(pool_); }));
+ return strings_->Append(data, length);
+ }
+
+ // Appending a half_float to the sequence
+ Status AppendHalfFloat(const npy_half data) {
+ return AppendPrimitive(&half_floats_, data, PythonType::HALF_FLOAT);
+ }
+
+ // Appending a float to the sequence
+ Status AppendFloat(const float data) {
+ return AppendPrimitive(&floats_, data, PythonType::FLOAT);
+ }
+
+ // Appending a double to the sequence
+ Status AppendDouble(const double data) {
+ return AppendPrimitive(&doubles_, data, PythonType::DOUBLE);
+ }
+
+ // Appending a Date64 timestamp to the sequence
+ Status AppendDate64(const int64_t timestamp) {
+ return AppendPrimitive(&date64s_, timestamp, PythonType::DATE64);
+ }
+
+ // Appending a tensor to the sequence
+ //
+ // \param tensor_index Index of the tensor in the object.
+ Status AppendTensor(const int32_t tensor_index) {
+ RETURN_NOT_OK(CreateAndUpdate(&tensor_indices_, PythonType::TENSOR,
+ [this]() { return new Int32Builder(pool_); }));
+ return tensor_indices_->Append(tensor_index);
+ }
+
+ // Appending a sparse coo tensor to the sequence
+ //
+ // \param sparse_coo_tensor_index Index of the sparse coo tensor in the object.
+ Status AppendSparseCOOTensor(const int32_t sparse_coo_tensor_index) {
+ RETURN_NOT_OK(CreateAndUpdate(&sparse_coo_tensor_indices_,
+ PythonType::SPARSECOOTENSOR,
+ [this]() { return new Int32Builder(pool_); }));
+ return sparse_coo_tensor_indices_->Append(sparse_coo_tensor_index);
+ }
+
+ // Appending a sparse csr matrix to the sequence
+ //
+ // \param sparse_csr_matrix_index Index of the sparse csr matrix in the object.
+ Status AppendSparseCSRMatrix(const int32_t sparse_csr_matrix_index) {
+ RETURN_NOT_OK(CreateAndUpdate(&sparse_csr_matrix_indices_,
+ PythonType::SPARSECSRMATRIX,
+ [this]() { return new Int32Builder(pool_); }));
+ return sparse_csr_matrix_indices_->Append(sparse_csr_matrix_index);
+ }
+
+ // Appending a sparse csc matrix to the sequence
+ //
+ // \param sparse_csc_matrix_index Index of the sparse csc matrix in the object.
+ Status AppendSparseCSCMatrix(const int32_t sparse_csc_matrix_index) {
+ RETURN_NOT_OK(CreateAndUpdate(&sparse_csc_matrix_indices_,
+ PythonType::SPARSECSCMATRIX,
+ [this]() { return new Int32Builder(pool_); }));
+ return sparse_csc_matrix_indices_->Append(sparse_csc_matrix_index);
+ }
+
+ // Appending a sparse csf tensor to the sequence
+ //
+ // \param sparse_csf_tensor_index Index of the sparse csf tensor in the object.
+ Status AppendSparseCSFTensor(const int32_t sparse_csf_tensor_index) {
+ RETURN_NOT_OK(CreateAndUpdate(&sparse_csf_tensor_indices_,
+ PythonType::SPARSECSFTENSOR,
+ [this]() { return new Int32Builder(pool_); }));
+ return sparse_csf_tensor_indices_->Append(sparse_csf_tensor_index);
+ }
+
+ // Appending a numpy ndarray to the sequence
+ //
+ // \param tensor_index Index of the tensor in the object.
+ Status AppendNdarray(const int32_t ndarray_index) {
+ RETURN_NOT_OK(CreateAndUpdate(&ndarray_indices_, PythonType::NDARRAY,
+ [this]() { return new Int32Builder(pool_); }));
+ return ndarray_indices_->Append(ndarray_index);
+ }
+
+ // Appending a buffer to the sequence
+ //
+ // \param buffer_index Index of the buffer in the object.
+ Status AppendBuffer(const int32_t buffer_index) {
+ RETURN_NOT_OK(CreateAndUpdate(&buffer_indices_, PythonType::BUFFER,
+ [this]() { return new Int32Builder(pool_); }));
+ return buffer_indices_->Append(buffer_index);
+ }
+
+ Status AppendSequence(PyObject* context, PyObject* sequence, int8_t tag,
+ std::shared_ptr<ListBuilder>& target_sequence,
+ std::unique_ptr<SequenceBuilder>& values, int32_t recursion_depth,
+ SerializedPyObject* blobs_out) {
+ if (recursion_depth >= kMaxRecursionDepth) {
+ return Status::NotImplemented(
+ "This object exceeds the maximum recursion depth. It may contain itself "
+ "recursively.");
+ }
+ RETURN_NOT_OK(CreateAndUpdate(&target_sequence, tag, [this, &values]() {
+ values.reset(new SequenceBuilder(pool_));
+ return new ListBuilder(pool_, values->builder());
+ }));
+ RETURN_NOT_OK(target_sequence->Append());
+ return internal::VisitIterable(
+ sequence, [&](PyObject* obj, bool* keep_going /* unused */) {
+ return Append(context, obj, values.get(), recursion_depth, blobs_out);
+ });
+ }
+
+ Status AppendList(PyObject* context, PyObject* list, int32_t recursion_depth,
+ SerializedPyObject* blobs_out) {
+ return AppendSequence(context, list, PythonType::LIST, lists_, list_values_,
+ recursion_depth + 1, blobs_out);
+ }
+
+ Status AppendTuple(PyObject* context, PyObject* tuple, int32_t recursion_depth,
+ SerializedPyObject* blobs_out) {
+ return AppendSequence(context, tuple, PythonType::TUPLE, tuples_, tuple_values_,
+ recursion_depth + 1, blobs_out);
+ }
+
+ Status AppendSet(PyObject* context, PyObject* set, int32_t recursion_depth,
+ SerializedPyObject* blobs_out) {
+ return AppendSequence(context, set, PythonType::SET, sets_, set_values_,
+ recursion_depth + 1, blobs_out);
+ }
+
+ Status AppendDict(PyObject* context, PyObject* dict, int32_t recursion_depth,
+ SerializedPyObject* blobs_out);
+
+ // Finish building the sequence and return the result.
+ // Input arrays may be nullptr
+ Status Finish(std::shared_ptr<Array>* out) { return builder_->Finish(out); }
+
+ std::shared_ptr<DenseUnionBuilder> builder() { return builder_; }
+
+ private:
+ MemoryPool* pool_;
+
+ Int8Builder types_;
+ Int32Builder offsets_;
+
+ /// Mapping from PythonType to child index
+ std::vector<int8_t> type_map_;
+
+ std::shared_ptr<BooleanBuilder> bools_;
+ std::shared_ptr<Int64Builder> ints_;
+ std::shared_ptr<BinaryBuilder> bytes_;
+ std::shared_ptr<StringBuilder> strings_;
+ std::shared_ptr<HalfFloatBuilder> half_floats_;
+ std::shared_ptr<FloatBuilder> floats_;
+ std::shared_ptr<DoubleBuilder> doubles_;
+ std::shared_ptr<Date64Builder> date64s_;
+
+ std::unique_ptr<SequenceBuilder> list_values_;
+ std::shared_ptr<ListBuilder> lists_;
+ std::unique_ptr<DictBuilder> dict_values_;
+ std::shared_ptr<ListBuilder> dicts_;
+ std::unique_ptr<SequenceBuilder> tuple_values_;
+ std::shared_ptr<ListBuilder> tuples_;
+ std::unique_ptr<SequenceBuilder> set_values_;
+ std::shared_ptr<ListBuilder> sets_;
+
+ std::shared_ptr<Int32Builder> tensor_indices_;
+ std::shared_ptr<Int32Builder> sparse_coo_tensor_indices_;
+ std::shared_ptr<Int32Builder> sparse_csr_matrix_indices_;
+ std::shared_ptr<Int32Builder> sparse_csc_matrix_indices_;
+ std::shared_ptr<Int32Builder> sparse_csf_tensor_indices_;
+ std::shared_ptr<Int32Builder> ndarray_indices_;
+ std::shared_ptr<Int32Builder> buffer_indices_;
+
+ std::shared_ptr<DenseUnionBuilder> builder_;
+};
+
+// Constructing dictionaries of key/value pairs. Sequences of
+// keys and values are built separately using a pair of
+// SequenceBuilders. The resulting Arrow representation
+// can be obtained via the Finish method.
+class DictBuilder {
+ public:
+ explicit DictBuilder(MemoryPool* pool = nullptr) : keys_(pool), vals_(pool) {
+ builder_.reset(new StructBuilder(struct_({field("keys", dense_union(FieldVector{})),
+ field("vals", dense_union(FieldVector{}))}),
+ pool, {keys_.builder(), vals_.builder()}));
+ }
+
+ // Builder for the keys of the dictionary
+ SequenceBuilder& keys() { return keys_; }
+ // Builder for the values of the dictionary
+ SequenceBuilder& vals() { return vals_; }
+
+ // Construct an Arrow StructArray representing the dictionary.
+ // Contains a field "keys" for the keys and "vals" for the values.
+ Status Finish(std::shared_ptr<Array>* out) { return builder_->Finish(out); }
+
+ std::shared_ptr<StructBuilder> builder() { return builder_; }
+
+ private:
+ SequenceBuilder keys_;
+ SequenceBuilder vals_;
+ std::shared_ptr<StructBuilder> builder_;
+};
+
+Status SequenceBuilder::AppendDict(PyObject* context, PyObject* dict,
+ int32_t recursion_depth,
+ SerializedPyObject* blobs_out) {
+ if (recursion_depth >= kMaxRecursionDepth) {
+ return Status::NotImplemented(
+ "This object exceeds the maximum recursion depth. It may contain itself "
+ "recursively.");
+ }
+ RETURN_NOT_OK(CreateAndUpdate(&dicts_, PythonType::DICT, [this]() {
+ dict_values_.reset(new DictBuilder(pool_));
+ return new ListBuilder(pool_, dict_values_->builder());
+ }));
+ RETURN_NOT_OK(dicts_->Append());
+ PyObject* key;
+ PyObject* value;
+ Py_ssize_t pos = 0;
+ while (PyDict_Next(dict, &pos, &key, &value)) {
+ RETURN_NOT_OK(dict_values_->builder()->Append());
+ RETURN_NOT_OK(
+ Append(context, key, &dict_values_->keys(), recursion_depth + 1, blobs_out));
+ RETURN_NOT_OK(
+ Append(context, value, &dict_values_->vals(), recursion_depth + 1, blobs_out));
+ }
+
+ // This block is used to decrement the reference counts of the results
+ // returned by the serialization callback, which is called in AppendArray,
+ // in DeserializeDict and in Append
+ static PyObject* py_type = PyUnicode_FromString("_pytype_");
+ if (PyDict_Contains(dict, py_type)) {
+ // If the dictionary contains the key "_pytype_", then the user has to
+ // have registered a callback.
+ if (context == Py_None) {
+ return Status::Invalid("No serialization callback set");
+ }
+ Py_XDECREF(dict);
+ }
+ return Status::OK();
+}
+
+Status CallCustomCallback(PyObject* context, PyObject* method_name, PyObject* elem,
+ PyObject** result) {
+ if (context == Py_None) {
+ *result = NULL;
+ return Status::SerializationError("error while calling callback on ",
+ internal::PyObject_StdStringRepr(elem),
+ ": handler not registered");
+ } else {
+ *result = PyObject_CallMethodObjArgs(context, method_name, elem, NULL);
+ return CheckPyError();
+ }
+}
+
+Status CallSerializeCallback(PyObject* context, PyObject* value,
+ PyObject** serialized_object) {
+ OwnedRef method_name(PyUnicode_FromString("_serialize_callback"));
+ RETURN_NOT_OK(CallCustomCallback(context, method_name.obj(), value, serialized_object));
+ if (!PyDict_Check(*serialized_object)) {
+ return Status::TypeError("serialization callback must return a valid dictionary");
+ }
+ return Status::OK();
+}
+
+Status CallDeserializeCallback(PyObject* context, PyObject* value,
+ PyObject** deserialized_object) {
+ OwnedRef method_name(PyUnicode_FromString("_deserialize_callback"));
+ return CallCustomCallback(context, method_name.obj(), value, deserialized_object);
+}
+
+Status AppendArray(PyObject* context, PyArrayObject* array, SequenceBuilder* builder,
+ int32_t recursion_depth, SerializedPyObject* blobs_out);
+
+template <typename NumpyScalarObject>
+Status AppendIntegerScalar(PyObject* obj, SequenceBuilder* builder) {
+ int64_t value = reinterpret_cast<NumpyScalarObject*>(obj)->obval;
+ return builder->AppendInt64(value);
+}
+
+// Append a potentially 64-bit wide unsigned Numpy scalar.
+// Must check for overflow as we reinterpret it as signed int64.
+template <typename NumpyScalarObject>
+Status AppendLargeUnsignedScalar(PyObject* obj, SequenceBuilder* builder) {
+ constexpr uint64_t max_value = std::numeric_limits<int64_t>::max();
+
+ uint64_t value = reinterpret_cast<NumpyScalarObject*>(obj)->obval;
+ if (value > max_value) {
+ return Status::Invalid("cannot serialize Numpy uint64 scalar >= 2**63");
+ }
+ return builder->AppendInt64(static_cast<int64_t>(value));
+}
+
+Status AppendScalar(PyObject* obj, SequenceBuilder* builder) {
+ if (PyArray_IsScalar(obj, Bool)) {
+ return builder->AppendBool(reinterpret_cast<PyBoolScalarObject*>(obj)->obval != 0);
+ } else if (PyArray_IsScalar(obj, Half)) {
+ return builder->AppendHalfFloat(reinterpret_cast<PyHalfScalarObject*>(obj)->obval);
+ } else if (PyArray_IsScalar(obj, Float)) {
+ return builder->AppendFloat(reinterpret_cast<PyFloatScalarObject*>(obj)->obval);
+ } else if (PyArray_IsScalar(obj, Double)) {
+ return builder->AppendDouble(reinterpret_cast<PyDoubleScalarObject*>(obj)->obval);
+ }
+ if (PyArray_IsScalar(obj, Byte)) {
+ return AppendIntegerScalar<PyByteScalarObject>(obj, builder);
+ } else if (PyArray_IsScalar(obj, Short)) {
+ return AppendIntegerScalar<PyShortScalarObject>(obj, builder);
+ } else if (PyArray_IsScalar(obj, Int)) {
+ return AppendIntegerScalar<PyIntScalarObject>(obj, builder);
+ } else if (PyArray_IsScalar(obj, Long)) {
+ return AppendIntegerScalar<PyLongScalarObject>(obj, builder);
+ } else if (PyArray_IsScalar(obj, LongLong)) {
+ return AppendIntegerScalar<PyLongLongScalarObject>(obj, builder);
+ } else if (PyArray_IsScalar(obj, Int64)) {
+ return AppendIntegerScalar<PyInt64ScalarObject>(obj, builder);
+ } else if (PyArray_IsScalar(obj, UByte)) {
+ return AppendIntegerScalar<PyUByteScalarObject>(obj, builder);
+ } else if (PyArray_IsScalar(obj, UShort)) {
+ return AppendIntegerScalar<PyUShortScalarObject>(obj, builder);
+ } else if (PyArray_IsScalar(obj, UInt)) {
+ return AppendIntegerScalar<PyUIntScalarObject>(obj, builder);
+ } else if (PyArray_IsScalar(obj, ULong)) {
+ return AppendLargeUnsignedScalar<PyULongScalarObject>(obj, builder);
+ } else if (PyArray_IsScalar(obj, ULongLong)) {
+ return AppendLargeUnsignedScalar<PyULongLongScalarObject>(obj, builder);
+ } else if (PyArray_IsScalar(obj, UInt64)) {
+ return AppendLargeUnsignedScalar<PyUInt64ScalarObject>(obj, builder);
+ }
+ return Status::NotImplemented("Numpy scalar type not recognized");
+}
+
+Status Append(PyObject* context, PyObject* elem, SequenceBuilder* builder,
+ int32_t recursion_depth, SerializedPyObject* blobs_out) {
+ // The bool case must precede the int case (PyInt_Check passes for bools)
+ if (PyBool_Check(elem)) {
+ RETURN_NOT_OK(builder->AppendBool(elem == Py_True));
+ } else if (PyArray_DescrFromScalar(elem)->type_num == NPY_HALF) {
+ npy_half halffloat = reinterpret_cast<PyHalfScalarObject*>(elem)->obval;
+ RETURN_NOT_OK(builder->AppendHalfFloat(halffloat));
+ } else if (PyFloat_Check(elem)) {
+ RETURN_NOT_OK(builder->AppendDouble(PyFloat_AS_DOUBLE(elem)));
+ } else if (PyLong_Check(elem)) {
+ int overflow = 0;
+ int64_t data = PyLong_AsLongLongAndOverflow(elem, &overflow);
+ if (!overflow) {
+ RETURN_NOT_OK(builder->AppendInt64(data));
+ } else {
+ // Attempt to serialize the object using the custom callback.
+ PyObject* serialized_object;
+ // The reference count of serialized_object will be decremented in SerializeDict
+ RETURN_NOT_OK(CallSerializeCallback(context, elem, &serialized_object));
+ RETURN_NOT_OK(
+ builder->AppendDict(context, serialized_object, recursion_depth, blobs_out));
+ }
+ } else if (PyBytes_Check(elem)) {
+ auto data = reinterpret_cast<uint8_t*>(PyBytes_AS_STRING(elem));
+ int32_t size = -1;
+ RETURN_NOT_OK(internal::CastSize(PyBytes_GET_SIZE(elem), &size));
+ RETURN_NOT_OK(builder->AppendBytes(data, size));
+ } else if (PyUnicode_Check(elem)) {
+ ARROW_ASSIGN_OR_RAISE(auto view, PyBytesView::FromUnicode(elem));
+ int32_t size = -1;
+ RETURN_NOT_OK(internal::CastSize(view.size, &size));
+ RETURN_NOT_OK(builder->AppendString(view.bytes, size));
+ } else if (PyList_CheckExact(elem)) {
+ RETURN_NOT_OK(builder->AppendList(context, elem, recursion_depth, blobs_out));
+ } else if (PyDict_CheckExact(elem)) {
+ RETURN_NOT_OK(builder->AppendDict(context, elem, recursion_depth, blobs_out));
+ } else if (PyTuple_CheckExact(elem)) {
+ RETURN_NOT_OK(builder->AppendTuple(context, elem, recursion_depth, blobs_out));
+ } else if (PySet_Check(elem)) {
+ RETURN_NOT_OK(builder->AppendSet(context, elem, recursion_depth, blobs_out));
+ } else if (PyArray_IsScalar(elem, Generic)) {
+ RETURN_NOT_OK(AppendScalar(elem, builder));
+ } else if (PyArray_CheckExact(elem)) {
+ RETURN_NOT_OK(AppendArray(context, reinterpret_cast<PyArrayObject*>(elem), builder,
+ recursion_depth, blobs_out));
+ } else if (elem == Py_None) {
+ RETURN_NOT_OK(builder->AppendNone());
+ } else if (PyDateTime_Check(elem)) {
+ PyDateTime_DateTime* datetime = reinterpret_cast<PyDateTime_DateTime*>(elem);
+ RETURN_NOT_OK(builder->AppendDate64(internal::PyDateTime_to_us(datetime)));
+ } else if (is_buffer(elem)) {
+ RETURN_NOT_OK(builder->AppendBuffer(static_cast<int32_t>(blobs_out->buffers.size())));
+ ARROW_ASSIGN_OR_RAISE(auto buffer, unwrap_buffer(elem));
+ blobs_out->buffers.push_back(buffer);
+ } else if (is_tensor(elem)) {
+ RETURN_NOT_OK(builder->AppendTensor(static_cast<int32_t>(blobs_out->tensors.size())));
+ ARROW_ASSIGN_OR_RAISE(auto tensor, unwrap_tensor(elem));
+ blobs_out->tensors.push_back(tensor);
+ } else if (is_sparse_coo_tensor(elem)) {
+ RETURN_NOT_OK(builder->AppendSparseCOOTensor(
+ static_cast<int32_t>(blobs_out->sparse_tensors.size())));
+ ARROW_ASSIGN_OR_RAISE(auto tensor, unwrap_sparse_coo_tensor(elem));
+ blobs_out->sparse_tensors.push_back(tensor);
+ } else if (is_sparse_csr_matrix(elem)) {
+ RETURN_NOT_OK(builder->AppendSparseCSRMatrix(
+ static_cast<int32_t>(blobs_out->sparse_tensors.size())));
+ ARROW_ASSIGN_OR_RAISE(auto matrix, unwrap_sparse_csr_matrix(elem));
+ blobs_out->sparse_tensors.push_back(matrix);
+ } else if (is_sparse_csc_matrix(elem)) {
+ RETURN_NOT_OK(builder->AppendSparseCSCMatrix(
+ static_cast<int32_t>(blobs_out->sparse_tensors.size())));
+ ARROW_ASSIGN_OR_RAISE(auto matrix, unwrap_sparse_csc_matrix(elem));
+ blobs_out->sparse_tensors.push_back(matrix);
+ } else if (is_sparse_csf_tensor(elem)) {
+ RETURN_NOT_OK(builder->AppendSparseCSFTensor(
+ static_cast<int32_t>(blobs_out->sparse_tensors.size())));
+ ARROW_ASSIGN_OR_RAISE(auto tensor, unwrap_sparse_csf_tensor(elem));
+ blobs_out->sparse_tensors.push_back(tensor);
+ } else {
+ // Attempt to serialize the object using the custom callback.
+ PyObject* serialized_object;
+ // The reference count of serialized_object will be decremented in SerializeDict
+ RETURN_NOT_OK(CallSerializeCallback(context, elem, &serialized_object));
+ RETURN_NOT_OK(
+ builder->AppendDict(context, serialized_object, recursion_depth, blobs_out));
+ }
+ return Status::OK();
+}
+
+Status AppendArray(PyObject* context, PyArrayObject* array, SequenceBuilder* builder,
+ int32_t recursion_depth, SerializedPyObject* blobs_out) {
+ int dtype = PyArray_TYPE(array);
+ switch (dtype) {
+ case NPY_UINT8:
+ case NPY_INT8:
+ case NPY_UINT16:
+ case NPY_INT16:
+ case NPY_UINT32:
+ case NPY_INT32:
+ case NPY_UINT64:
+ case NPY_INT64:
+ case NPY_HALF:
+ case NPY_FLOAT:
+ case NPY_DOUBLE: {
+ RETURN_NOT_OK(
+ builder->AppendNdarray(static_cast<int32_t>(blobs_out->ndarrays.size())));
+ std::shared_ptr<Tensor> tensor;
+ RETURN_NOT_OK(NdarrayToTensor(default_memory_pool(),
+ reinterpret_cast<PyObject*>(array), {}, &tensor));
+ blobs_out->ndarrays.push_back(tensor);
+ } break;
+ default: {
+ PyObject* serialized_object;
+ // The reference count of serialized_object will be decremented in SerializeDict
+ RETURN_NOT_OK(CallSerializeCallback(context, reinterpret_cast<PyObject*>(array),
+ &serialized_object));
+ RETURN_NOT_OK(builder->AppendDict(context, serialized_object, recursion_depth + 1,
+ blobs_out));
+ }
+ }
+ return Status::OK();
+}
+
+std::shared_ptr<RecordBatch> MakeBatch(std::shared_ptr<Array> data) {
+ auto field = std::make_shared<Field>("list", data->type());
+ auto schema = ::arrow::schema({field});
+ return RecordBatch::Make(schema, data->length(), {data});
+}
+
+Status SerializeObject(PyObject* context, PyObject* sequence, SerializedPyObject* out) {
+ PyAcquireGIL lock;
+ SequenceBuilder builder;
+ RETURN_NOT_OK(internal::VisitIterable(
+ sequence, [&](PyObject* obj, bool* keep_going /* unused */) {
+ return Append(context, obj, &builder, 0, out);
+ }));
+ std::shared_ptr<Array> array;
+ RETURN_NOT_OK(builder.Finish(&array));
+ out->batch = MakeBatch(array);
+ return Status::OK();
+}
+
+Status SerializeNdarray(std::shared_ptr<Tensor> tensor, SerializedPyObject* out) {
+ std::shared_ptr<Array> array;
+ SequenceBuilder builder;
+ RETURN_NOT_OK(builder.AppendNdarray(static_cast<int32_t>(out->ndarrays.size())));
+ out->ndarrays.push_back(tensor);
+ RETURN_NOT_OK(builder.Finish(&array));
+ out->batch = MakeBatch(array);
+ return Status::OK();
+}
+
+Status WriteNdarrayHeader(std::shared_ptr<DataType> dtype,
+ const std::vector<int64_t>& shape, int64_t tensor_num_bytes,
+ io::OutputStream* dst) {
+ auto empty_tensor = std::make_shared<Tensor>(
+ dtype, std::make_shared<Buffer>(nullptr, tensor_num_bytes), shape);
+ SerializedPyObject serialized_tensor;
+ RETURN_NOT_OK(SerializeNdarray(empty_tensor, &serialized_tensor));
+ return serialized_tensor.WriteTo(dst);
+}
+
+SerializedPyObject::SerializedPyObject()
+ : ipc_options(ipc::IpcWriteOptions::Defaults()) {}
+
+Status SerializedPyObject::WriteTo(io::OutputStream* dst) {
+ int32_t num_tensors = static_cast<int32_t>(this->tensors.size());
+ int32_t num_sparse_tensors = static_cast<int32_t>(this->sparse_tensors.size());
+ int32_t num_ndarrays = static_cast<int32_t>(this->ndarrays.size());
+ int32_t num_buffers = static_cast<int32_t>(this->buffers.size());
+ RETURN_NOT_OK(
+ dst->Write(reinterpret_cast<const uint8_t*>(&num_tensors), sizeof(int32_t)));
+ RETURN_NOT_OK(
+ dst->Write(reinterpret_cast<const uint8_t*>(&num_sparse_tensors), sizeof(int32_t)));
+ RETURN_NOT_OK(
+ dst->Write(reinterpret_cast<const uint8_t*>(&num_ndarrays), sizeof(int32_t)));
+ RETURN_NOT_OK(
+ dst->Write(reinterpret_cast<const uint8_t*>(&num_buffers), sizeof(int32_t)));
+
+ // Align stream to 8-byte offset
+ RETURN_NOT_OK(ipc::AlignStream(dst, ipc::kArrowIpcAlignment));
+ RETURN_NOT_OK(ipc::WriteRecordBatchStream({this->batch}, this->ipc_options, dst));
+
+ // Align stream to 64-byte offset so tensor bodies are 64-byte aligned
+ RETURN_NOT_OK(ipc::AlignStream(dst, ipc::kTensorAlignment));
+
+ int32_t metadata_length;
+ int64_t body_length;
+ for (const auto& tensor : this->tensors) {
+ RETURN_NOT_OK(ipc::WriteTensor(*tensor, dst, &metadata_length, &body_length));
+ RETURN_NOT_OK(ipc::AlignStream(dst, ipc::kTensorAlignment));
+ }
+
+ for (const auto& sparse_tensor : this->sparse_tensors) {
+ RETURN_NOT_OK(
+ ipc::WriteSparseTensor(*sparse_tensor, dst, &metadata_length, &body_length));
+ RETURN_NOT_OK(ipc::AlignStream(dst, ipc::kTensorAlignment));
+ }
+
+ for (const auto& tensor : this->ndarrays) {
+ RETURN_NOT_OK(ipc::WriteTensor(*tensor, dst, &metadata_length, &body_length));
+ RETURN_NOT_OK(ipc::AlignStream(dst, ipc::kTensorAlignment));
+ }
+
+ for (const auto& buffer : this->buffers) {
+ int64_t size = buffer->size();
+ RETURN_NOT_OK(dst->Write(reinterpret_cast<const uint8_t*>(&size), sizeof(int64_t)));
+ RETURN_NOT_OK(dst->Write(buffer->data(), size));
+ }
+
+ return Status::OK();
+}
+
+namespace {
+
+Status CountSparseTensors(
+ const std::vector<std::shared_ptr<SparseTensor>>& sparse_tensors, PyObject** out) {
+ OwnedRef num_sparse_tensors(PyDict_New());
+ size_t num_coo = 0;
+ size_t num_csr = 0;
+ size_t num_csc = 0;
+ size_t num_csf = 0;
+ size_t ndim_csf = 0;
+
+ for (const auto& sparse_tensor : sparse_tensors) {
+ switch (sparse_tensor->format_id()) {
+ case SparseTensorFormat::COO:
+ ++num_coo;
+ break;
+ case SparseTensorFormat::CSR:
+ ++num_csr;
+ break;
+ case SparseTensorFormat::CSC:
+ ++num_csc;
+ break;
+ case SparseTensorFormat::CSF:
+ ++num_csf;
+ ndim_csf += sparse_tensor->ndim();
+ break;
+ }
+ }
+
+ PyDict_SetItemString(num_sparse_tensors.obj(), "coo", PyLong_FromSize_t(num_coo));
+ PyDict_SetItemString(num_sparse_tensors.obj(), "csr", PyLong_FromSize_t(num_csr));
+ PyDict_SetItemString(num_sparse_tensors.obj(), "csc", PyLong_FromSize_t(num_csc));
+ PyDict_SetItemString(num_sparse_tensors.obj(), "csf", PyLong_FromSize_t(num_csf));
+ PyDict_SetItemString(num_sparse_tensors.obj(), "ndim_csf", PyLong_FromSize_t(ndim_csf));
+ RETURN_IF_PYERROR();
+
+ *out = num_sparse_tensors.detach();
+ return Status::OK();
+}
+
+} // namespace
+
+Status SerializedPyObject::GetComponents(MemoryPool* memory_pool, PyObject** out) {
+ PyAcquireGIL py_gil;
+
+ OwnedRef result(PyDict_New());
+ PyObject* buffers = PyList_New(0);
+ PyObject* num_sparse_tensors = nullptr;
+
+ // TODO(wesm): Not sure how pedantic we need to be about checking the return
+ // values of these functions. There are other places where we do not check
+ // PyDict_SetItem/SetItemString return value, but these failures would be
+ // quite esoteric
+ PyDict_SetItemString(result.obj(), "num_tensors",
+ PyLong_FromSize_t(this->tensors.size()));
+ RETURN_NOT_OK(CountSparseTensors(this->sparse_tensors, &num_sparse_tensors));
+ PyDict_SetItemString(result.obj(), "num_sparse_tensors", num_sparse_tensors);
+ PyDict_SetItemString(result.obj(), "ndim_csf", num_sparse_tensors);
+ PyDict_SetItemString(result.obj(), "num_ndarrays",
+ PyLong_FromSize_t(this->ndarrays.size()));
+ PyDict_SetItemString(result.obj(), "num_buffers",
+ PyLong_FromSize_t(this->buffers.size()));
+ PyDict_SetItemString(result.obj(), "data", buffers);
+ RETURN_IF_PYERROR();
+
+ Py_DECREF(buffers);
+
+ auto PushBuffer = [&buffers](const std::shared_ptr<Buffer>& buffer) {
+ PyObject* wrapped_buffer = wrap_buffer(buffer);
+ RETURN_IF_PYERROR();
+ if (PyList_Append(buffers, wrapped_buffer) < 0) {
+ Py_DECREF(wrapped_buffer);
+ RETURN_IF_PYERROR();
+ }
+ Py_DECREF(wrapped_buffer);
+ return Status::OK();
+ };
+
+ constexpr int64_t kInitialCapacity = 1024;
+
+ // Write the record batch describing the object structure
+ py_gil.release();
+ ARROW_ASSIGN_OR_RAISE(auto stream,
+ io::BufferOutputStream::Create(kInitialCapacity, memory_pool));
+ RETURN_NOT_OK(
+ ipc::WriteRecordBatchStream({this->batch}, this->ipc_options, stream.get()));
+ ARROW_ASSIGN_OR_RAISE(auto buffer, stream->Finish());
+ py_gil.acquire();
+
+ RETURN_NOT_OK(PushBuffer(buffer));
+
+ // For each tensor, get a metadata buffer and a buffer for the body
+ for (const auto& tensor : this->tensors) {
+ ARROW_ASSIGN_OR_RAISE(std::unique_ptr<ipc::Message> message,
+ ipc::GetTensorMessage(*tensor, memory_pool));
+ RETURN_NOT_OK(PushBuffer(message->metadata()));
+ RETURN_NOT_OK(PushBuffer(message->body()));
+ }
+
+ // For each sparse tensor, get a metadata buffer and buffers containing index and data
+ for (const auto& sparse_tensor : this->sparse_tensors) {
+ ipc::IpcPayload payload;
+ RETURN_NOT_OK(ipc::GetSparseTensorPayload(*sparse_tensor, memory_pool, &payload));
+ RETURN_NOT_OK(PushBuffer(payload.metadata));
+ for (const auto& body : payload.body_buffers) {
+ RETURN_NOT_OK(PushBuffer(body));
+ }
+ }
+
+ // For each ndarray, get a metadata buffer and a buffer for the body
+ for (const auto& ndarray : this->ndarrays) {
+ ARROW_ASSIGN_OR_RAISE(std::unique_ptr<ipc::Message> message,
+ ipc::GetTensorMessage(*ndarray, memory_pool));
+ RETURN_NOT_OK(PushBuffer(message->metadata()));
+ RETURN_NOT_OK(PushBuffer(message->body()));
+ }
+
+ for (const auto& buf : this->buffers) {
+ RETURN_NOT_OK(PushBuffer(buf));
+ }
+
+ *out = result.detach();
+ return Status::OK();
+}
+
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/serialize.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/serialize.h
new file mode 100644
index 0000000000..fd207d3e06
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/serialize.h
@@ -0,0 +1,145 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include <memory>
+#include <vector>
+
+#include "arrow/ipc/options.h"
+#include "arrow/python/visibility.h"
+#include "arrow/sparse_tensor.h"
+#include "arrow/status.h"
+
+// Forward declaring PyObject, see
+// https://mail.python.org/pipermail/python-dev/2003-August/037601.html
+#ifndef PyObject_HEAD
+struct _object;
+typedef _object PyObject;
+#endif
+
+namespace arrow {
+
+class Buffer;
+class DataType;
+class MemoryPool;
+class RecordBatch;
+class Tensor;
+
+namespace io {
+
+class OutputStream;
+
+} // namespace io
+
+namespace py {
+
+struct ARROW_PYTHON_EXPORT SerializedPyObject {
+ std::shared_ptr<RecordBatch> batch;
+ std::vector<std::shared_ptr<Tensor>> tensors;
+ std::vector<std::shared_ptr<SparseTensor>> sparse_tensors;
+ std::vector<std::shared_ptr<Tensor>> ndarrays;
+ std::vector<std::shared_ptr<Buffer>> buffers;
+ ipc::IpcWriteOptions ipc_options;
+
+ SerializedPyObject();
+
+ /// \brief Write serialized Python object to OutputStream
+ /// \param[in,out] dst an OutputStream
+ /// \return Status
+ Status WriteTo(io::OutputStream* dst);
+
+ /// \brief Convert SerializedPyObject to a dict containing the message
+ /// components as Buffer instances with minimal memory allocation
+ ///
+ /// {
+ /// 'num_tensors': M,
+ /// 'num_sparse_tensors': N,
+ /// 'num_buffers': K,
+ /// 'data': [Buffer]
+ /// }
+ ///
+ /// Each tensor is written as two buffers, one for the metadata and one for
+ /// the body. Therefore, the number of buffers in 'data' is 2 * M + 2 * N + K + 1,
+ /// with the first buffer containing the serialized record batch containing
+ /// the UnionArray that describes the whole object
+ Status GetComponents(MemoryPool* pool, PyObject** out);
+};
+
+/// \brief Serialize Python sequence as a SerializedPyObject.
+/// \param[in] context Serialization context which contains custom serialization
+/// and deserialization callbacks. Can be any Python object with a
+/// _serialize_callback method for serialization and a _deserialize_callback
+/// method for deserialization. If context is None, no custom serialization
+/// will be attempted.
+/// \param[in] sequence A Python sequence object to serialize to Arrow data
+/// structures
+/// \param[out] out The serialized representation
+/// \return Status
+///
+/// Release GIL before calling
+ARROW_PYTHON_EXPORT
+Status SerializeObject(PyObject* context, PyObject* sequence, SerializedPyObject* out);
+
+/// \brief Serialize an Arrow Tensor as a SerializedPyObject.
+/// \param[in] tensor Tensor to be serialized
+/// \param[out] out The serialized representation
+/// \return Status
+ARROW_PYTHON_EXPORT
+Status SerializeTensor(std::shared_ptr<Tensor> tensor, py::SerializedPyObject* out);
+
+/// \brief Write the Tensor metadata header to an OutputStream.
+/// \param[in] dtype DataType of the Tensor
+/// \param[in] shape The shape of the tensor
+/// \param[in] tensor_num_bytes The length of the Tensor data in bytes
+/// \param[in] dst The OutputStream to write the Tensor header to
+/// \return Status
+ARROW_PYTHON_EXPORT
+Status WriteNdarrayHeader(std::shared_ptr<DataType> dtype,
+ const std::vector<int64_t>& shape, int64_t tensor_num_bytes,
+ io::OutputStream* dst);
+
+struct PythonType {
+ enum type {
+ NONE,
+ BOOL,
+ INT,
+ PY2INT, // Kept for compatibility
+ BYTES,
+ STRING,
+ HALF_FLOAT,
+ FLOAT,
+ DOUBLE,
+ DATE64,
+ LIST,
+ DICT,
+ TUPLE,
+ SET,
+ TENSOR,
+ NDARRAY,
+ BUFFER,
+ SPARSECOOTENSOR,
+ SPARSECSRMATRIX,
+ SPARSECSCMATRIX,
+ SPARSECSFTENSOR,
+ NUM_PYTHON_TYPES
+ };
+};
+
+} // namespace py
+
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/type_traits.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/type_traits.h
new file mode 100644
index 0000000000..a941577f76
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/type_traits.h
@@ -0,0 +1,350 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Internal header
+
+#pragma once
+
+#include "arrow/python/platform.h"
+
+#include <cstdint>
+#include <limits>
+
+#include "arrow/python/numpy_interop.h"
+
+#include <numpy/halffloat.h>
+
+#include "arrow/type_fwd.h"
+#include "arrow/util/logging.h"
+
+namespace arrow {
+namespace py {
+
+static constexpr int64_t kPandasTimestampNull = std::numeric_limits<int64_t>::min();
+constexpr int64_t kNanosecondsInDay = 86400000000000LL;
+
+namespace internal {
+
+//
+// Type traits for Numpy -> Arrow equivalence
+//
+template <int TYPE>
+struct npy_traits {};
+
+template <>
+struct npy_traits<NPY_BOOL> {
+ typedef uint8_t value_type;
+ using TypeClass = BooleanType;
+ using BuilderClass = BooleanBuilder;
+
+ static constexpr bool supports_nulls = false;
+ static inline bool isnull(uint8_t v) { return false; }
+};
+
+#define NPY_INT_DECL(TYPE, CapType, T) \
+ template <> \
+ struct npy_traits<NPY_##TYPE> { \
+ typedef T value_type; \
+ using TypeClass = CapType##Type; \
+ using BuilderClass = CapType##Builder; \
+ \
+ static constexpr bool supports_nulls = false; \
+ static inline bool isnull(T v) { return false; } \
+ };
+
+NPY_INT_DECL(INT8, Int8, int8_t);
+NPY_INT_DECL(INT16, Int16, int16_t);
+NPY_INT_DECL(INT32, Int32, int32_t);
+NPY_INT_DECL(INT64, Int64, int64_t);
+
+NPY_INT_DECL(UINT8, UInt8, uint8_t);
+NPY_INT_DECL(UINT16, UInt16, uint16_t);
+NPY_INT_DECL(UINT32, UInt32, uint32_t);
+NPY_INT_DECL(UINT64, UInt64, uint64_t);
+
+#if !NPY_INT32_IS_INT && NPY_BITSOF_INT == 32
+NPY_INT_DECL(INT, Int32, int32_t);
+NPY_INT_DECL(UINT, UInt32, uint32_t);
+#endif
+#if !NPY_INT64_IS_LONG_LONG && NPY_BITSOF_LONGLONG == 64
+NPY_INT_DECL(LONGLONG, Int64, int64_t);
+NPY_INT_DECL(ULONGLONG, UInt64, uint64_t);
+#endif
+
+template <>
+struct npy_traits<NPY_FLOAT16> {
+ typedef npy_half value_type;
+ using TypeClass = HalfFloatType;
+ using BuilderClass = HalfFloatBuilder;
+
+ static constexpr npy_half na_sentinel = NPY_HALF_NAN;
+
+ static constexpr bool supports_nulls = true;
+
+ static inline bool isnull(npy_half v) { return v == NPY_HALF_NAN; }
+};
+
+template <>
+struct npy_traits<NPY_FLOAT32> {
+ typedef float value_type;
+ using TypeClass = FloatType;
+ using BuilderClass = FloatBuilder;
+
+ // We need to use quiet_NaN here instead of the NAN macro as on Windows
+ // the NAN macro leads to "division-by-zero" compile-time error with clang.
+ static constexpr float na_sentinel = std::numeric_limits<float>::quiet_NaN();
+
+ static constexpr bool supports_nulls = true;
+
+ static inline bool isnull(float v) { return v != v; }
+};
+
+template <>
+struct npy_traits<NPY_FLOAT64> {
+ typedef double value_type;
+ using TypeClass = DoubleType;
+ using BuilderClass = DoubleBuilder;
+
+ static constexpr double na_sentinel = std::numeric_limits<double>::quiet_NaN();
+
+ static constexpr bool supports_nulls = true;
+
+ static inline bool isnull(double v) { return v != v; }
+};
+
+template <>
+struct npy_traits<NPY_DATETIME> {
+ typedef int64_t value_type;
+ using TypeClass = TimestampType;
+ using BuilderClass = TimestampBuilder;
+
+ static constexpr bool supports_nulls = true;
+
+ static inline bool isnull(int64_t v) {
+ // NaT = -2**63
+ // = -0x8000000000000000
+ // = -9223372036854775808;
+ // = std::numeric_limits<int64_t>::min()
+ return v == std::numeric_limits<int64_t>::min();
+ }
+};
+
+template <>
+struct npy_traits<NPY_TIMEDELTA> {
+ typedef int64_t value_type;
+ using TypeClass = DurationType;
+ using BuilderClass = DurationBuilder;
+
+ static constexpr bool supports_nulls = true;
+
+ static inline bool isnull(int64_t v) {
+ // NaT = -2**63 = std::numeric_limits<int64_t>::min()
+ return v == std::numeric_limits<int64_t>::min();
+ }
+};
+
+template <>
+struct npy_traits<NPY_OBJECT> {
+ typedef PyObject* value_type;
+ static constexpr bool supports_nulls = true;
+
+ static inline bool isnull(PyObject* v) { return v == Py_None; }
+};
+
+//
+// Type traits for Arrow -> Numpy equivalence
+// Note *supports_nulls* means the equivalent Numpy type support nulls
+//
+template <int TYPE>
+struct arrow_traits {};
+
+template <>
+struct arrow_traits<Type::BOOL> {
+ static constexpr int npy_type = NPY_BOOL;
+ static constexpr bool supports_nulls = false;
+ typedef typename npy_traits<NPY_BOOL>::value_type T;
+};
+
+#define INT_DECL(TYPE) \
+ template <> \
+ struct arrow_traits<Type::TYPE> { \
+ static constexpr int npy_type = NPY_##TYPE; \
+ static constexpr bool supports_nulls = false; \
+ static constexpr double na_value = std::numeric_limits<double>::quiet_NaN(); \
+ typedef typename npy_traits<NPY_##TYPE>::value_type T; \
+ };
+
+INT_DECL(INT8);
+INT_DECL(INT16);
+INT_DECL(INT32);
+INT_DECL(INT64);
+INT_DECL(UINT8);
+INT_DECL(UINT16);
+INT_DECL(UINT32);
+INT_DECL(UINT64);
+
+template <>
+struct arrow_traits<Type::HALF_FLOAT> {
+ static constexpr int npy_type = NPY_FLOAT16;
+ static constexpr bool supports_nulls = true;
+ static constexpr uint16_t na_value = NPY_HALF_NAN;
+ typedef typename npy_traits<NPY_FLOAT16>::value_type T;
+};
+
+template <>
+struct arrow_traits<Type::FLOAT> {
+ static constexpr int npy_type = NPY_FLOAT32;
+ static constexpr bool supports_nulls = true;
+ static constexpr float na_value = std::numeric_limits<float>::quiet_NaN();
+ typedef typename npy_traits<NPY_FLOAT32>::value_type T;
+};
+
+template <>
+struct arrow_traits<Type::DOUBLE> {
+ static constexpr int npy_type = NPY_FLOAT64;
+ static constexpr bool supports_nulls = true;
+ static constexpr double na_value = std::numeric_limits<double>::quiet_NaN();
+ typedef typename npy_traits<NPY_FLOAT64>::value_type T;
+};
+
+template <>
+struct arrow_traits<Type::TIMESTAMP> {
+ static constexpr int npy_type = NPY_DATETIME;
+ static constexpr int64_t npy_shift = 1;
+
+ static constexpr bool supports_nulls = true;
+ static constexpr int64_t na_value = kPandasTimestampNull;
+ typedef typename npy_traits<NPY_DATETIME>::value_type T;
+};
+
+template <>
+struct arrow_traits<Type::DURATION> {
+ static constexpr int npy_type = NPY_TIMEDELTA;
+ static constexpr int64_t npy_shift = 1;
+
+ static constexpr bool supports_nulls = true;
+ static constexpr int64_t na_value = kPandasTimestampNull;
+ typedef typename npy_traits<NPY_TIMEDELTA>::value_type T;
+};
+
+template <>
+struct arrow_traits<Type::DATE32> {
+ // Data stores as FR_D day unit
+ static constexpr int npy_type = NPY_DATETIME;
+ static constexpr int64_t npy_shift = 1;
+
+ static constexpr bool supports_nulls = true;
+ typedef typename npy_traits<NPY_DATETIME>::value_type T;
+
+ static constexpr int64_t na_value = kPandasTimestampNull;
+ static inline bool isnull(int64_t v) { return npy_traits<NPY_DATETIME>::isnull(v); }
+};
+
+template <>
+struct arrow_traits<Type::DATE64> {
+ // Data stores as FR_D day unit
+ static constexpr int npy_type = NPY_DATETIME;
+
+ // There are 1000 * 60 * 60 * 24 = 86400000ms in a day
+ static constexpr int64_t npy_shift = 86400000;
+
+ static constexpr bool supports_nulls = true;
+ typedef typename npy_traits<NPY_DATETIME>::value_type T;
+
+ static constexpr int64_t na_value = kPandasTimestampNull;
+ static inline bool isnull(int64_t v) { return npy_traits<NPY_DATETIME>::isnull(v); }
+};
+
+template <>
+struct arrow_traits<Type::TIME32> {
+ static constexpr int npy_type = NPY_OBJECT;
+ static constexpr bool supports_nulls = true;
+ static constexpr int64_t na_value = kPandasTimestampNull;
+ typedef typename npy_traits<NPY_DATETIME>::value_type T;
+};
+
+template <>
+struct arrow_traits<Type::TIME64> {
+ static constexpr int npy_type = NPY_OBJECT;
+ static constexpr bool supports_nulls = true;
+ typedef typename npy_traits<NPY_DATETIME>::value_type T;
+};
+
+template <>
+struct arrow_traits<Type::STRING> {
+ static constexpr int npy_type = NPY_OBJECT;
+ static constexpr bool supports_nulls = true;
+};
+
+template <>
+struct arrow_traits<Type::BINARY> {
+ static constexpr int npy_type = NPY_OBJECT;
+ static constexpr bool supports_nulls = true;
+};
+
+static inline NPY_DATETIMEUNIT NumPyFrequency(TimeUnit::type unit) {
+ switch (unit) {
+ case TimestampType::Unit::SECOND:
+ return NPY_FR_s;
+ case TimestampType::Unit::MILLI:
+ return NPY_FR_ms;
+ break;
+ case TimestampType::Unit::MICRO:
+ return NPY_FR_us;
+ default:
+ // NANO
+ return NPY_FR_ns;
+ }
+}
+
+static inline int NumPyTypeSize(int npy_type) {
+ npy_type = fix_numpy_type_num(npy_type);
+
+ switch (npy_type) {
+ case NPY_BOOL:
+ case NPY_INT8:
+ case NPY_UINT8:
+ return 1;
+ case NPY_INT16:
+ case NPY_UINT16:
+ return 2;
+ case NPY_INT32:
+ case NPY_UINT32:
+ return 4;
+ case NPY_INT64:
+ case NPY_UINT64:
+ return 8;
+ case NPY_FLOAT16:
+ return 2;
+ case NPY_FLOAT32:
+ return 4;
+ case NPY_FLOAT64:
+ return 8;
+ case NPY_DATETIME:
+ return 8;
+ case NPY_OBJECT:
+ return sizeof(void*);
+ default:
+ ARROW_CHECK(false) << "unhandled numpy type";
+ break;
+ }
+ return -1;
+}
+
+} // namespace internal
+} // namespace py
+} // namespace arrow
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/python/visibility.h b/contrib/libs/apache/arrow/cpp/src/arrow/python/visibility.h
new file mode 100644
index 0000000000..c0b343c70e
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/python/visibility.h
@@ -0,0 +1,39 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#if defined(_WIN32) || defined(__CYGWIN__) // Windows
+#if defined(_MSC_VER)
+#pragma warning(disable : 4251)
+#else
+#pragma GCC diagnostic ignored "-Wattributes"
+#endif
+
+#ifdef ARROW_STATIC
+#define ARROW_PYTHON_EXPORT
+#elif defined(ARROW_PYTHON_EXPORTING)
+#define ARROW_PYTHON_EXPORT __declspec(dllexport)
+#else
+#define ARROW_PYTHON_EXPORT __declspec(dllimport)
+#endif
+
+#else // Not Windows
+#ifndef ARROW_PYTHON_EXPORT
+#define ARROW_PYTHON_EXPORT __attribute__((visibility("default")))
+#endif
+#endif // Non-Windows
diff --git a/contrib/libs/apache/arrow/cpp/src/arrow/util/converter.h b/contrib/libs/apache/arrow/cpp/src/arrow/util/converter.h
new file mode 100644
index 0000000000..0b29e0f5bc
--- /dev/null
+++ b/contrib/libs/apache/arrow/cpp/src/arrow/util/converter.h
@@ -0,0 +1,411 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "arrow/array.h"
+#include "arrow/chunked_array.h"
+#include "arrow/status.h"
+#include "arrow/type.h"
+#include "arrow/type_traits.h"
+#include "arrow/util/checked_cast.h"
+#include "arrow/util/make_unique.h"
+#include "arrow/visitor_inline.h"
+
+namespace arrow {
+namespace internal {
+
+template <typename BaseConverter, template <typename...> class ConverterTrait>
+static Result<std::unique_ptr<BaseConverter>> MakeConverter(
+ std::shared_ptr<DataType> type, typename BaseConverter::OptionsType options,
+ MemoryPool* pool);
+
+template <typename Input, typename Options>
+class Converter {
+ public:
+ using Self = Converter<Input, Options>;
+ using InputType = Input;
+ using OptionsType = Options;
+
+ virtual ~Converter() = default;
+
+ Status Construct(std::shared_ptr<DataType> type, OptionsType options,
+ MemoryPool* pool) {
+ type_ = std::move(type);
+ options_ = std::move(options);
+ return Init(pool);
+ }
+
+ virtual Status Append(InputType value) { return Status::NotImplemented("Append"); }
+
+ virtual Status Extend(InputType values, int64_t size, int64_t offset = 0) {
+ return Status::NotImplemented("Extend");
+ }
+
+ virtual Status ExtendMasked(InputType values, InputType mask, int64_t size,
+ int64_t offset = 0) {
+ return Status::NotImplemented("ExtendMasked");
+ }
+
+ const std::shared_ptr<ArrayBuilder>& builder() const { return builder_; }
+
+ const std::shared_ptr<DataType>& type() const { return type_; }
+
+ OptionsType options() const { return options_; }
+
+ bool may_overflow() const { return may_overflow_; }
+
+ bool rewind_on_overflow() const { return rewind_on_overflow_; }
+
+ virtual Status Reserve(int64_t additional_capacity) {
+ return builder_->Reserve(additional_capacity);
+ }
+
+ Status AppendNull() { return builder_->AppendNull(); }
+
+ virtual Result<std::shared_ptr<Array>> ToArray() { return builder_->Finish(); }
+
+ virtual Result<std::shared_ptr<Array>> ToArray(int64_t length) {
+ ARROW_ASSIGN_OR_RAISE(auto arr, this->ToArray());
+ return arr->Slice(0, length);
+ }
+
+ virtual Result<std::shared_ptr<ChunkedArray>> ToChunkedArray() {
+ ARROW_ASSIGN_OR_RAISE(auto array, ToArray());
+ std::vector<std::shared_ptr<Array>> chunks = {std::move(array)};
+ return std::make_shared<ChunkedArray>(chunks);
+ }
+
+ protected:
+ virtual Status Init(MemoryPool* pool) { return Status::OK(); }
+
+ std::shared_ptr<DataType> type_;
+ std::shared_ptr<ArrayBuilder> builder_;
+ OptionsType options_;
+ bool may_overflow_ = false;
+ bool rewind_on_overflow_ = false;
+};
+
+template <typename ArrowType, typename BaseConverter>
+class PrimitiveConverter : public BaseConverter {
+ public:
+ using BuilderType = typename TypeTraits<ArrowType>::BuilderType;
+
+ protected:
+ Status Init(MemoryPool* pool) override {
+ this->builder_ = std::make_shared<BuilderType>(this->type_, pool);
+ // Narrow variable-sized binary types may overflow
+ this->may_overflow_ = is_binary_like(this->type_->id());
+ primitive_type_ = checked_cast<const ArrowType*>(this->type_.get());
+ primitive_builder_ = checked_cast<BuilderType*>(this->builder_.get());
+ return Status::OK();
+ }
+
+ const ArrowType* primitive_type_;
+ BuilderType* primitive_builder_;
+};
+
+template <typename ArrowType, typename BaseConverter,
+ template <typename...> class ConverterTrait>
+class ListConverter : public BaseConverter {
+ public:
+ using BuilderType = typename TypeTraits<ArrowType>::BuilderType;
+ using ConverterType = typename ConverterTrait<ArrowType>::type;
+
+ protected:
+ Status Init(MemoryPool* pool) override {
+ list_type_ = checked_cast<const ArrowType*>(this->type_.get());
+ ARROW_ASSIGN_OR_RAISE(value_converter_,
+ (MakeConverter<BaseConverter, ConverterTrait>(
+ list_type_->value_type(), this->options_, pool)));
+ this->builder_ =
+ std::make_shared<BuilderType>(pool, value_converter_->builder(), this->type_);
+ list_builder_ = checked_cast<BuilderType*>(this->builder_.get());
+ // Narrow list types may overflow
+ this->may_overflow_ = this->rewind_on_overflow_ =
+ sizeof(typename ArrowType::offset_type) < sizeof(int64_t);
+ return Status::OK();
+ }
+
+ const ArrowType* list_type_;
+ BuilderType* list_builder_;
+ std::unique_ptr<BaseConverter> value_converter_;
+};
+
+template <typename BaseConverter, template <typename...> class ConverterTrait>
+class StructConverter : public BaseConverter {
+ public:
+ using ConverterType = typename ConverterTrait<StructType>::type;
+
+ Status Reserve(int64_t additional_capacity) override {
+ ARROW_RETURN_NOT_OK(this->builder_->Reserve(additional_capacity));
+ for (const auto& child : children_) {
+ ARROW_RETURN_NOT_OK(child->Reserve(additional_capacity));
+ }
+ return Status::OK();
+ }
+
+ protected:
+ Status Init(MemoryPool* pool) override {
+ std::unique_ptr<BaseConverter> child_converter;
+ std::vector<std::shared_ptr<ArrayBuilder>> child_builders;
+
+ struct_type_ = checked_cast<const StructType*>(this->type_.get());
+ for (const auto& field : struct_type_->fields()) {
+ ARROW_ASSIGN_OR_RAISE(child_converter,
+ (MakeConverter<BaseConverter, ConverterTrait>(
+ field->type(), this->options_, pool)));
+ this->may_overflow_ |= child_converter->may_overflow();
+ this->rewind_on_overflow_ = this->may_overflow_;
+ child_builders.push_back(child_converter->builder());
+ children_.push_back(std::move(child_converter));
+ }
+
+ this->builder_ =
+ std::make_shared<StructBuilder>(this->type_, pool, std::move(child_builders));
+ struct_builder_ = checked_cast<StructBuilder*>(this->builder_.get());
+
+ return Status::OK();
+ }
+
+ const StructType* struct_type_;
+ StructBuilder* struct_builder_;
+ std::vector<std::unique_ptr<BaseConverter>> children_;
+};
+
+template <typename ValueType, typename BaseConverter>
+class DictionaryConverter : public BaseConverter {
+ public:
+ using BuilderType = DictionaryBuilder<ValueType>;
+
+ protected:
+ Status Init(MemoryPool* pool) override {
+ std::unique_ptr<ArrayBuilder> builder;
+ ARROW_RETURN_NOT_OK(MakeDictionaryBuilder(pool, this->type_, NULLPTR, &builder));
+ this->builder_ = std::move(builder);
+ this->may_overflow_ = false;
+ dict_type_ = checked_cast<const DictionaryType*>(this->type_.get());
+ value_type_ = checked_cast<const ValueType*>(dict_type_->value_type().get());
+ value_builder_ = checked_cast<BuilderType*>(this->builder_.get());
+ return Status::OK();
+ }
+
+ const DictionaryType* dict_type_;
+ const ValueType* value_type_;
+ BuilderType* value_builder_;
+};
+
+template <typename BaseConverter, template <typename...> class ConverterTrait>
+struct MakeConverterImpl {
+ template <typename T, typename ConverterType = typename ConverterTrait<T>::type>
+ Status Visit(const T&) {
+ out.reset(new ConverterType());
+ return out->Construct(std::move(type), std::move(options), pool);
+ }
+
+ Status Visit(const DictionaryType& t) {
+ switch (t.value_type()->id()) {
+#define DICTIONARY_CASE(TYPE) \
+ case TYPE::type_id: \
+ out = internal::make_unique< \
+ typename ConverterTrait<DictionaryType>::template dictionary_type<TYPE>>(); \
+ break;
+ DICTIONARY_CASE(BooleanType);
+ DICTIONARY_CASE(Int8Type);
+ DICTIONARY_CASE(Int16Type);
+ DICTIONARY_CASE(Int32Type);
+ DICTIONARY_CASE(Int64Type);
+ DICTIONARY_CASE(UInt8Type);
+ DICTIONARY_CASE(UInt16Type);
+ DICTIONARY_CASE(UInt32Type);
+ DICTIONARY_CASE(UInt64Type);
+ DICTIONARY_CASE(FloatType);
+ DICTIONARY_CASE(DoubleType);
+ DICTIONARY_CASE(BinaryType);
+ DICTIONARY_CASE(StringType);
+ DICTIONARY_CASE(FixedSizeBinaryType);
+#undef DICTIONARY_CASE
+ default:
+ return Status::NotImplemented("DictionaryArray converter for type ", t.ToString(),
+ " not implemented");
+ }
+ return out->Construct(std::move(type), std::move(options), pool);
+ }
+
+ Status Visit(const DataType& t) { return Status::NotImplemented(t.name()); }
+
+ std::shared_ptr<DataType> type;
+ typename BaseConverter::OptionsType options;
+ MemoryPool* pool;
+ std::unique_ptr<BaseConverter> out;
+};
+
+template <typename BaseConverter, template <typename...> class ConverterTrait>
+static Result<std::unique_ptr<BaseConverter>> MakeConverter(
+ std::shared_ptr<DataType> type, typename BaseConverter::OptionsType options,
+ MemoryPool* pool) {
+ MakeConverterImpl<BaseConverter, ConverterTrait> visitor{
+ std::move(type), std::move(options), pool, NULLPTR};
+ ARROW_RETURN_NOT_OK(VisitTypeInline(*visitor.type, &visitor));
+ return std::move(visitor.out);
+}
+
+template <typename Converter>
+class Chunker {
+ public:
+ using InputType = typename Converter::InputType;
+
+ explicit Chunker(std::unique_ptr<Converter> converter)
+ : converter_(std::move(converter)) {}
+
+ Status Reserve(int64_t additional_capacity) {
+ ARROW_RETURN_NOT_OK(converter_->Reserve(additional_capacity));
+ reserved_ += additional_capacity;
+ return Status::OK();
+ }
+
+ Status AppendNull() {
+ auto status = converter_->AppendNull();
+ if (ARROW_PREDICT_FALSE(status.IsCapacityError())) {
+ if (converter_->builder()->length() == 0) {
+ // Builder length == 0 means the individual element is too large to append.
+ // In this case, no need to try again.
+ return status;
+ }
+ ARROW_RETURN_NOT_OK(FinishChunk());
+ return converter_->AppendNull();
+ }
+ ++length_;
+ return status;
+ }
+
+ Status Append(InputType value) {
+ auto status = converter_->Append(value);
+ if (ARROW_PREDICT_FALSE(status.IsCapacityError())) {
+ if (converter_->builder()->length() == 0) {
+ return status;
+ }
+ ARROW_RETURN_NOT_OK(FinishChunk());
+ return Append(value);
+ }
+ ++length_;
+ return status;
+ }
+
+ Status Extend(InputType values, int64_t size, int64_t offset = 0) {
+ while (offset < size) {
+ auto length_before = converter_->builder()->length();
+ auto status = converter_->Extend(values, size, offset);
+ auto length_after = converter_->builder()->length();
+ auto num_converted = length_after - length_before;
+
+ offset += num_converted;
+ length_ += num_converted;
+
+ if (status.IsCapacityError()) {
+ if (converter_->builder()->length() == 0) {
+ // Builder length == 0 means the individual element is too large to append.
+ // In this case, no need to try again.
+ return status;
+ } else if (converter_->rewind_on_overflow()) {
+ // The list-like and binary-like conversion paths may raise a capacity error,
+ // we need to handle them differently. While the binary-like converters check
+ // the capacity before append/extend the list-like converters just check after
+ // append/extend. Thus depending on the implementation semantics we may need
+ // to rewind (slice) the output chunk by one.
+ length_ -= 1;
+ offset -= 1;
+ }
+ ARROW_RETURN_NOT_OK(FinishChunk());
+ } else if (!status.ok()) {
+ return status;
+ }
+ }
+ return Status::OK();
+ }
+
+ Status ExtendMasked(InputType values, InputType mask, int64_t size,
+ int64_t offset = 0) {
+ while (offset < size) {
+ auto length_before = converter_->builder()->length();
+ auto status = converter_->ExtendMasked(values, mask, size, offset);
+ auto length_after = converter_->builder()->length();
+ auto num_converted = length_after - length_before;
+
+ offset += num_converted;
+ length_ += num_converted;
+
+ if (status.IsCapacityError()) {
+ if (converter_->builder()->length() == 0) {
+ // Builder length == 0 means the individual element is too large to append.
+ // In this case, no need to try again.
+ return status;
+ } else if (converter_->rewind_on_overflow()) {
+ // The list-like and binary-like conversion paths may raise a capacity error,
+ // we need to handle them differently. While the binary-like converters check
+ // the capacity before append/extend the list-like converters just check after
+ // append/extend. Thus depending on the implementation semantics we may need
+ // to rewind (slice) the output chunk by one.
+ length_ -= 1;
+ offset -= 1;
+ }
+ ARROW_RETURN_NOT_OK(FinishChunk());
+ } else if (!status.ok()) {
+ return status;
+ }
+ }
+ return Status::OK();
+ }
+
+ Status FinishChunk() {
+ ARROW_ASSIGN_OR_RAISE(auto chunk, converter_->ToArray(length_));
+ chunks_.push_back(chunk);
+ // Reserve space for the remaining items.
+ // Besides being an optimization, it is also required if the converter's
+ // implementation relies on unsafe builder methods in converter->Append().
+ auto remaining = reserved_ - length_;
+ Reset();
+ return Reserve(remaining);
+ }
+
+ Result<std::shared_ptr<ChunkedArray>> ToChunkedArray() {
+ ARROW_RETURN_NOT_OK(FinishChunk());
+ return std::make_shared<ChunkedArray>(chunks_);
+ }
+
+ protected:
+ void Reset() {
+ converter_->builder()->Reset();
+ length_ = 0;
+ reserved_ = 0;
+ }
+
+ int64_t length_ = 0;
+ int64_t reserved_ = 0;
+ std::unique_ptr<Converter> converter_;
+ std::vector<std::shared_ptr<Array>> chunks_;
+};
+
+template <typename T>
+static Result<std::unique_ptr<Chunker<T>>> MakeChunker(std::unique_ptr<T> converter) {
+ return internal::make_unique<Chunker<T>>(std::move(converter));
+}
+
+} // namespace internal
+} // namespace arrow
diff --git a/contrib/libs/cblas/COPYING b/contrib/libs/cblas/COPYING
new file mode 100644
index 0000000000..d7bf953820
--- /dev/null
+++ b/contrib/libs/cblas/COPYING
@@ -0,0 +1,36 @@
+Copyright (c) 1992-2008 The University of Tennessee. All rights reserved.
+
+$COPYRIGHT$
+
+Additional copyrights may follow
+
+$HEADER$
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+- Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+- Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer listed
+ in this license in the documentation and/or other materials
+ provided with the distribution.
+
+- Neither the name of the copyright holders nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/contrib/libs/cblas/blaswrap.h b/contrib/libs/cblas/blaswrap.h
new file mode 100644
index 0000000000..4780b4001e
--- /dev/null
+++ b/contrib/libs/cblas/blaswrap.h
@@ -0,0 +1,160 @@
+/* CLAPACK 3.0 BLAS wrapper macros
+ * Feb 5, 2000
+ */
+
+#ifndef __BLASWRAP_H
+#define __BLASWRAP_H
+
+#ifndef NO_BLAS_WRAP
+
+/* BLAS1 routines */
+#define srotg_ f2c_srotg
+#define crotg_ f2c_crotg
+#define drotg_ f2c_drotg
+#define zrotg_ f2c_zrotg
+#define srotmg_ f2c_srotmg
+#define drotmg_ f2c_drotmg
+#define srot_ f2c_srot
+#define drot_ f2c_drot
+#define srotm_ f2c_srotm
+#define drotm_ f2c_drotm
+#define sswap_ f2c_sswap
+#define dswap_ f2c_dswap
+#define cswap_ f2c_cswap
+#define zswap_ f2c_zswap
+#define sscal_ f2c_sscal
+#define dscal_ f2c_dscal
+#define cscal_ f2c_cscal
+#define zscal_ f2c_zscal
+#define csscal_ f2c_csscal
+#define zdscal_ f2c_zdscal
+#define scopy_ f2c_scopy
+#define dcopy_ f2c_dcopy
+#define ccopy_ f2c_ccopy
+#define zcopy_ f2c_zcopy
+#define saxpy_ f2c_saxpy
+#define daxpy_ f2c_daxpy
+#define caxpy_ f2c_caxpy
+#define zaxpy_ f2c_zaxpy
+#define sdot_ f2c_sdot
+#define ddot_ f2c_ddot
+#define cdotu_ f2c_cdotu
+#define zdotu_ f2c_zdotu
+#define cdotc_ f2c_cdotc
+#define zdotc_ f2c_zdotc
+#define snrm2_ f2c_snrm2
+#define dnrm2_ f2c_dnrm2
+#define scnrm2_ f2c_scnrm2
+#define dznrm2_ f2c_dznrm2
+#define sasum_ f2c_sasum
+#define dasum_ f2c_dasum
+#define scasum_ f2c_scasum
+#define dzasum_ f2c_dzasum
+#define isamax_ f2c_isamax
+#define idamax_ f2c_idamax
+#define icamax_ f2c_icamax
+#define izamax_ f2c_izamax
+
+/* BLAS2 routines */
+#define sgemv_ f2c_sgemv
+#define dgemv_ f2c_dgemv
+#define cgemv_ f2c_cgemv
+#define zgemv_ f2c_zgemv
+#define sgbmv_ f2c_sgbmv
+#define dgbmv_ f2c_dgbmv
+#define cgbmv_ f2c_cgbmv
+#define zgbmv_ f2c_zgbmv
+#define chemv_ f2c_chemv
+#define zhemv_ f2c_zhemv
+#define chbmv_ f2c_chbmv
+#define zhbmv_ f2c_zhbmv
+#define chpmv_ f2c_chpmv
+#define zhpmv_ f2c_zhpmv
+#define ssymv_ f2c_ssymv
+#define dsymv_ f2c_dsymv
+#define ssbmv_ f2c_ssbmv
+#define dsbmv_ f2c_dsbmv
+#define sspmv_ f2c_sspmv
+#define dspmv_ f2c_dspmv
+#define strmv_ f2c_strmv
+#define dtrmv_ f2c_dtrmv
+#define ctrmv_ f2c_ctrmv
+#define ztrmv_ f2c_ztrmv
+#define stbmv_ f2c_stbmv
+#define dtbmv_ f2c_dtbmv
+#define ctbmv_ f2c_ctbmv
+#define ztbmv_ f2c_ztbmv
+#define stpmv_ f2c_stpmv
+#define dtpmv_ f2c_dtpmv
+#define ctpmv_ f2c_ctpmv
+#define ztpmv_ f2c_ztpmv
+#define strsv_ f2c_strsv
+#define dtrsv_ f2c_dtrsv
+#define ctrsv_ f2c_ctrsv
+#define ztrsv_ f2c_ztrsv
+#define stbsv_ f2c_stbsv
+#define dtbsv_ f2c_dtbsv
+#define ctbsv_ f2c_ctbsv
+#define ztbsv_ f2c_ztbsv
+#define stpsv_ f2c_stpsv
+#define dtpsv_ f2c_dtpsv
+#define ctpsv_ f2c_ctpsv
+#define ztpsv_ f2c_ztpsv
+#define sger_ f2c_sger
+#define dger_ f2c_dger
+#define cgeru_ f2c_cgeru
+#define zgeru_ f2c_zgeru
+#define cgerc_ f2c_cgerc
+#define zgerc_ f2c_zgerc
+#define cher_ f2c_cher
+#define zher_ f2c_zher
+#define chpr_ f2c_chpr
+#define zhpr_ f2c_zhpr
+#define cher2_ f2c_cher2
+#define zher2_ f2c_zher2
+#define chpr2_ f2c_chpr2
+#define zhpr2_ f2c_zhpr2
+#define ssyr_ f2c_ssyr
+#define dsyr_ f2c_dsyr
+#define sspr_ f2c_sspr
+#define dspr_ f2c_dspr
+#define ssyr2_ f2c_ssyr2
+#define dsyr2_ f2c_dsyr2
+#define sspr2_ f2c_sspr2
+#define dspr2_ f2c_dspr2
+
+/* BLAS3 routines */
+#define sgemm_ f2c_sgemm
+#define dgemm_ f2c_dgemm
+#define cgemm_ f2c_cgemm
+#define zgemm_ f2c_zgemm
+#define ssymm_ f2c_ssymm
+#define dsymm_ f2c_dsymm
+#define csymm_ f2c_csymm
+#define zsymm_ f2c_zsymm
+#define chemm_ f2c_chemm
+#define zhemm_ f2c_zhemm
+#define ssyrk_ f2c_ssyrk
+#define dsyrk_ f2c_dsyrk
+#define csyrk_ f2c_csyrk
+#define zsyrk_ f2c_zsyrk
+#define cherk_ f2c_cherk
+#define zherk_ f2c_zherk
+#define ssyr2k_ f2c_ssyr2k
+#define dsyr2k_ f2c_dsyr2k
+#define csyr2k_ f2c_csyr2k
+#define zsyr2k_ f2c_zsyr2k
+#define cher2k_ f2c_cher2k
+#define zher2k_ f2c_zher2k
+#define strmm_ f2c_strmm
+#define dtrmm_ f2c_dtrmm
+#define ctrmm_ f2c_ctrmm
+#define ztrmm_ f2c_ztrmm
+#define strsm_ f2c_strsm
+#define dtrsm_ f2c_dtrsm
+#define ctrsm_ f2c_ctrsm
+#define ztrsm_ f2c_ztrsm
+
+#endif /* NO_BLAS_WRAP */
+
+#endif /* __BLASWRAP_H */
diff --git a/contrib/libs/cblas/caxpy.c b/contrib/libs/cblas/caxpy.c
new file mode 100644
index 0000000000..fe01b4ecd7
--- /dev/null
+++ b/contrib/libs/cblas/caxpy.c
@@ -0,0 +1,103 @@
+/* caxpy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int caxpy_(integer *n, complex *ca, complex *cx, integer *
+ incx, complex *cy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ complex q__1, q__2;
+
+ /* Local variables */
+ integer i__, ix, iy;
+ extern doublereal scabs1_(complex *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CAXPY constant times a vector plus a vector. */
+
+/* Further Details */
+/* =============== */
+
+/* jack dongarra, linpack, 3/11/78. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --cy;
+ --cx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (scabs1_(ca) == 0.f) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* code for unequal increments or equal increments */
+/* not equal to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = iy;
+ i__4 = ix;
+ q__2.r = ca->r * cx[i__4].r - ca->i * cx[i__4].i, q__2.i = ca->r * cx[
+ i__4].i + ca->i * cx[i__4].r;
+ q__1.r = cy[i__3].r + q__2.r, q__1.i = cy[i__3].i + q__2.i;
+ cy[i__2].r = q__1.r, cy[i__2].i = q__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ i__4 = i__;
+ q__2.r = ca->r * cx[i__4].r - ca->i * cx[i__4].i, q__2.i = ca->r * cx[
+ i__4].i + ca->i * cx[i__4].r;
+ q__1.r = cy[i__3].r + q__2.r, q__1.i = cy[i__3].i + q__2.i;
+ cy[i__2].r = q__1.r, cy[i__2].i = q__1.i;
+/* L30: */
+ }
+ return 0;
+} /* caxpy_ */
diff --git a/contrib/libs/cblas/cblas.h b/contrib/libs/cblas/cblas.h
new file mode 100644
index 0000000000..f91557e74d
--- /dev/null
+++ b/contrib/libs/cblas/cblas.h
@@ -0,0 +1,575 @@
+#ifndef CBLAS_H
+#define CBLAS_H
+#include <stddef.h>
+
+/*
+ * Enumerated and derived types
+ */
+#define CBLAS_INDEX size_t /* this may vary between platforms */
+
+enum CBLAS_ORDER {CblasRowMajor=101, CblasColMajor=102};
+enum CBLAS_TRANSPOSE {CblasNoTrans=111, CblasTrans=112, CblasConjTrans=113};
+enum CBLAS_UPLO {CblasUpper=121, CblasLower=122};
+enum CBLAS_DIAG {CblasNonUnit=131, CblasUnit=132};
+enum CBLAS_SIDE {CblasLeft=141, CblasRight=142};
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * ===========================================================================
+ * Prototypes for level 1 BLAS functions (complex are recast as routines)
+ * ===========================================================================
+ */
+float cblas_sdsdot(const int N, const float alpha, const float *X,
+ const int incX, const float *Y, const int incY);
+double cblas_dsdot(const int N, const float *X, const int incX, const float *Y,
+ const int incY);
+float cblas_sdot(const int N, const float *X, const int incX,
+ const float *Y, const int incY);
+double cblas_ddot(const int N, const double *X, const int incX,
+ const double *Y, const int incY);
+
+/*
+ * Functions having prefixes Z and C only
+ */
+void cblas_cdotu_sub(const int N, const void *X, const int incX,
+ const void *Y, const int incY, void *dotu);
+void cblas_cdotc_sub(const int N, const void *X, const int incX,
+ const void *Y, const int incY, void *dotc);
+
+void cblas_zdotu_sub(const int N, const void *X, const int incX,
+ const void *Y, const int incY, void *dotu);
+void cblas_zdotc_sub(const int N, const void *X, const int incX,
+ const void *Y, const int incY, void *dotc);
+
+
+/*
+ * Functions having prefixes S D SC DZ
+ */
+float cblas_snrm2(const int N, const float *X, const int incX);
+float cblas_sasum(const int N, const float *X, const int incX);
+
+double cblas_dnrm2(const int N, const double *X, const int incX);
+double cblas_dasum(const int N, const double *X, const int incX);
+
+float cblas_scnrm2(const int N, const void *X, const int incX);
+float cblas_scasum(const int N, const void *X, const int incX);
+
+double cblas_dznrm2(const int N, const void *X, const int incX);
+double cblas_dzasum(const int N, const void *X, const int incX);
+
+
+/*
+ * Functions having standard 4 prefixes (S D C Z)
+ */
+CBLAS_INDEX cblas_isamax(const int N, const float *X, const int incX);
+CBLAS_INDEX cblas_idamax(const int N, const double *X, const int incX);
+CBLAS_INDEX cblas_icamax(const int N, const void *X, const int incX);
+CBLAS_INDEX cblas_izamax(const int N, const void *X, const int incX);
+
+/*
+ * ===========================================================================
+ * Prototypes for level 1 BLAS routines
+ * ===========================================================================
+ */
+
+/*
+ * Routines with standard 4 prefixes (s, d, c, z)
+ */
+void cblas_sswap(const int N, float *X, const int incX,
+ float *Y, const int incY);
+void cblas_scopy(const int N, const float *X, const int incX,
+ float *Y, const int incY);
+void cblas_saxpy(const int N, const float alpha, const float *X,
+ const int incX, float *Y, const int incY);
+
+void cblas_dswap(const int N, double *X, const int incX,
+ double *Y, const int incY);
+void cblas_dcopy(const int N, const double *X, const int incX,
+ double *Y, const int incY);
+void cblas_daxpy(const int N, const double alpha, const double *X,
+ const int incX, double *Y, const int incY);
+
+void cblas_cswap(const int N, void *X, const int incX,
+ void *Y, const int incY);
+void cblas_ccopy(const int N, const void *X, const int incX,
+ void *Y, const int incY);
+void cblas_caxpy(const int N, const void *alpha, const void *X,
+ const int incX, void *Y, const int incY);
+
+void cblas_zswap(const int N, void *X, const int incX,
+ void *Y, const int incY);
+void cblas_zcopy(const int N, const void *X, const int incX,
+ void *Y, const int incY);
+void cblas_zaxpy(const int N, const void *alpha, const void *X,
+ const int incX, void *Y, const int incY);
+
+
+/*
+ * Routines with S and D prefix only
+ */
+void cblas_srotg(float *a, float *b, float *c, float *s);
+void cblas_srotmg(float *d1, float *d2, float *b1, const float b2, float *P);
+void cblas_srot(const int N, float *X, const int incX,
+ float *Y, const int incY, const float c, const float s);
+void cblas_srotm(const int N, float *X, const int incX,
+ float *Y, const int incY, const float *P);
+
+void cblas_drotg(double *a, double *b, double *c, double *s);
+void cblas_drotmg(double *d1, double *d2, double *b1, const double b2, double *P);
+void cblas_drot(const int N, double *X, const int incX,
+ double *Y, const int incY, const double c, const double s);
+void cblas_drotm(const int N, double *X, const int incX,
+ double *Y, const int incY, const double *P);
+
+
+/*
+ * Routines with S D C Z CS and ZD prefixes
+ */
+void cblas_sscal(const int N, const float alpha, float *X, const int incX);
+void cblas_dscal(const int N, const double alpha, double *X, const int incX);
+void cblas_cscal(const int N, const void *alpha, void *X, const int incX);
+void cblas_zscal(const int N, const void *alpha, void *X, const int incX);
+void cblas_csscal(const int N, const float alpha, void *X, const int incX);
+void cblas_zdscal(const int N, const double alpha, void *X, const int incX);
+
+/*
+ * ===========================================================================
+ * Prototypes for level 2 BLAS
+ * ===========================================================================
+ */
+
+/*
+ * Routines with standard 4 prefixes (S, D, C, Z)
+ */
+void cblas_sgemv(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
+ const float alpha, const float *A, const int lda,
+ const float *X, const int incX, const float beta,
+ float *Y, const int incY);
+void cblas_sgbmv(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
+ const int KL, const int KU, const float alpha,
+ const float *A, const int lda, const float *X,
+ const int incX, const float beta, float *Y, const int incY);
+void cblas_strmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const float *A, const int lda,
+ float *X, const int incX);
+void cblas_stbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const int K, const float *A, const int lda,
+ float *X, const int incX);
+void cblas_stpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const float *Ap, float *X, const int incX);
+void cblas_strsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const float *A, const int lda, float *X,
+ const int incX);
+void cblas_stbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const int K, const float *A, const int lda,
+ float *X, const int incX);
+void cblas_stpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const float *Ap, float *X, const int incX);
+
+void cblas_dgemv(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
+ const double alpha, const double *A, const int lda,
+ const double *X, const int incX, const double beta,
+ double *Y, const int incY);
+void cblas_dgbmv(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
+ const int KL, const int KU, const double alpha,
+ const double *A, const int lda, const double *X,
+ const int incX, const double beta, double *Y, const int incY);
+void cblas_dtrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const double *A, const int lda,
+ double *X, const int incX);
+void cblas_dtbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const int K, const double *A, const int lda,
+ double *X, const int incX);
+void cblas_dtpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const double *Ap, double *X, const int incX);
+void cblas_dtrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const double *A, const int lda, double *X,
+ const int incX);
+void cblas_dtbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const int K, const double *A, const int lda,
+ double *X, const int incX);
+void cblas_dtpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const double *Ap, double *X, const int incX);
+
+void cblas_cgemv(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ const void *X, const int incX, const void *beta,
+ void *Y, const int incY);
+void cblas_cgbmv(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
+ const int KL, const int KU, const void *alpha,
+ const void *A, const int lda, const void *X,
+ const int incX, const void *beta, void *Y, const int incY);
+void cblas_ctrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const void *A, const int lda,
+ void *X, const int incX);
+void cblas_ctbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const int K, const void *A, const int lda,
+ void *X, const int incX);
+void cblas_ctpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const void *Ap, void *X, const int incX);
+void cblas_ctrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const void *A, const int lda, void *X,
+ const int incX);
+void cblas_ctbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const int K, const void *A, const int lda,
+ void *X, const int incX);
+void cblas_ctpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const void *Ap, void *X, const int incX);
+
+void cblas_zgemv(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ const void *X, const int incX, const void *beta,
+ void *Y, const int incY);
+void cblas_zgbmv(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
+ const int KL, const int KU, const void *alpha,
+ const void *A, const int lda, const void *X,
+ const int incX, const void *beta, void *Y, const int incY);
+void cblas_ztrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const void *A, const int lda,
+ void *X, const int incX);
+void cblas_ztbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const int K, const void *A, const int lda,
+ void *X, const int incX);
+void cblas_ztpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const void *Ap, void *X, const int incX);
+void cblas_ztrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const void *A, const int lda, void *X,
+ const int incX);
+void cblas_ztbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const int K, const void *A, const int lda,
+ void *X, const int incX);
+void cblas_ztpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const void *Ap, void *X, const int incX);
+
+
+/*
+ * Routines with S and D prefixes only
+ */
+void cblas_ssymv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const float alpha, const float *A,
+ const int lda, const float *X, const int incX,
+ const float beta, float *Y, const int incY);
+void cblas_ssbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const int K, const float alpha, const float *A,
+ const int lda, const float *X, const int incX,
+ const float beta, float *Y, const int incY);
+void cblas_sspmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const float alpha, const float *Ap,
+ const float *X, const int incX,
+ const float beta, float *Y, const int incY);
+void cblas_sger(const enum CBLAS_ORDER order, const int M, const int N,
+ const float alpha, const float *X, const int incX,
+ const float *Y, const int incY, float *A, const int lda);
+void cblas_ssyr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const float alpha, const float *X,
+ const int incX, float *A, const int lda);
+void cblas_sspr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const float alpha, const float *X,
+ const int incX, float *Ap);
+void cblas_ssyr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const float alpha, const float *X,
+ const int incX, const float *Y, const int incY, float *A,
+ const int lda);
+void cblas_sspr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const float alpha, const float *X,
+ const int incX, const float *Y, const int incY, float *A);
+
+void cblas_dsymv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const double alpha, const double *A,
+ const int lda, const double *X, const int incX,
+ const double beta, double *Y, const int incY);
+void cblas_dsbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const int K, const double alpha, const double *A,
+ const int lda, const double *X, const int incX,
+ const double beta, double *Y, const int incY);
+void cblas_dspmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const double alpha, const double *Ap,
+ const double *X, const int incX,
+ const double beta, double *Y, const int incY);
+void cblas_dger(const enum CBLAS_ORDER order, const int M, const int N,
+ const double alpha, const double *X, const int incX,
+ const double *Y, const int incY, double *A, const int lda);
+void cblas_dsyr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const double alpha, const double *X,
+ const int incX, double *A, const int lda);
+void cblas_dspr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const double alpha, const double *X,
+ const int incX, double *Ap);
+void cblas_dsyr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const double alpha, const double *X,
+ const int incX, const double *Y, const int incY, double *A,
+ const int lda);
+void cblas_dspr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const double alpha, const double *X,
+ const int incX, const double *Y, const int incY, double *A);
+
+
+/*
+ * Routines with C and Z prefixes only
+ */
+void cblas_chemv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const void *alpha, const void *A,
+ const int lda, const void *X, const int incX,
+ const void *beta, void *Y, const int incY);
+void cblas_chbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const int K, const void *alpha, const void *A,
+ const int lda, const void *X, const int incX,
+ const void *beta, void *Y, const int incY);
+void cblas_chpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const void *alpha, const void *Ap,
+ const void *X, const int incX,
+ const void *beta, void *Y, const int incY);
+void cblas_cgeru(const enum CBLAS_ORDER order, const int M, const int N,
+ const void *alpha, const void *X, const int incX,
+ const void *Y, const int incY, void *A, const int lda);
+void cblas_cgerc(const enum CBLAS_ORDER order, const int M, const int N,
+ const void *alpha, const void *X, const int incX,
+ const void *Y, const int incY, void *A, const int lda);
+void cblas_cher(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const float alpha, const void *X, const int incX,
+ void *A, const int lda);
+void cblas_chpr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const float alpha, const void *X,
+ const int incX, void *A);
+void cblas_cher2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,
+ const void *alpha, const void *X, const int incX,
+ const void *Y, const int incY, void *A, const int lda);
+void cblas_chpr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,
+ const void *alpha, const void *X, const int incX,
+ const void *Y, const int incY, void *Ap);
+
+void cblas_zhemv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const void *alpha, const void *A,
+ const int lda, const void *X, const int incX,
+ const void *beta, void *Y, const int incY);
+void cblas_zhbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const int K, const void *alpha, const void *A,
+ const int lda, const void *X, const int incX,
+ const void *beta, void *Y, const int incY);
+void cblas_zhpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const void *alpha, const void *Ap,
+ const void *X, const int incX,
+ const void *beta, void *Y, const int incY);
+void cblas_zgeru(const enum CBLAS_ORDER order, const int M, const int N,
+ const void *alpha, const void *X, const int incX,
+ const void *Y, const int incY, void *A, const int lda);
+void cblas_zgerc(const enum CBLAS_ORDER order, const int M, const int N,
+ const void *alpha, const void *X, const int incX,
+ const void *Y, const int incY, void *A, const int lda);
+void cblas_zher(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const double alpha, const void *X, const int incX,
+ void *A, const int lda);
+void cblas_zhpr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const double alpha, const void *X,
+ const int incX, void *A);
+void cblas_zher2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,
+ const void *alpha, const void *X, const int incX,
+ const void *Y, const int incY, void *A, const int lda);
+void cblas_zhpr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,
+ const void *alpha, const void *X, const int incX,
+ const void *Y, const int incY, void *Ap);
+
+/*
+ * ===========================================================================
+ * Prototypes for level 3 BLAS
+ * ===========================================================================
+ */
+
+/*
+ * Routines with standard 4 prefixes (S, D, C, Z)
+ */
+void cblas_sgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
+ const int K, const float alpha, const float *A,
+ const int lda, const float *B, const int ldb,
+ const float beta, float *C, const int ldc);
+void cblas_ssymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const int M, const int N,
+ const float alpha, const float *A, const int lda,
+ const float *B, const int ldb, const float beta,
+ float *C, const int ldc);
+void cblas_ssyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const float alpha, const float *A, const int lda,
+ const float beta, float *C, const int ldc);
+void cblas_ssyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const float alpha, const float *A, const int lda,
+ const float *B, const int ldb, const float beta,
+ float *C, const int ldc);
+void cblas_strmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const int M, const int N,
+ const float alpha, const float *A, const int lda,
+ float *B, const int ldb);
+void cblas_strsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const int M, const int N,
+ const float alpha, const float *A, const int lda,
+ float *B, const int ldb);
+
+void cblas_dgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
+ const int K, const double alpha, const double *A,
+ const int lda, const double *B, const int ldb,
+ const double beta, double *C, const int ldc);
+void cblas_dsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const int M, const int N,
+ const double alpha, const double *A, const int lda,
+ const double *B, const int ldb, const double beta,
+ double *C, const int ldc);
+void cblas_dsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const double alpha, const double *A, const int lda,
+ const double beta, double *C, const int ldc);
+void cblas_dsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const double alpha, const double *A, const int lda,
+ const double *B, const int ldb, const double beta,
+ double *C, const int ldc);
+void cblas_dtrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const int M, const int N,
+ const double alpha, const double *A, const int lda,
+ double *B, const int ldb);
+void cblas_dtrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const int M, const int N,
+ const double alpha, const double *A, const int lda,
+ double *B, const int ldb);
+
+void cblas_cgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
+ const int K, const void *alpha, const void *A,
+ const int lda, const void *B, const int ldb,
+ const void *beta, void *C, const int ldc);
+void cblas_csymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ const void *B, const int ldb, const void *beta,
+ void *C, const int ldc);
+void cblas_csyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const void *alpha, const void *A, const int lda,
+ const void *beta, void *C, const int ldc);
+void cblas_csyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const void *alpha, const void *A, const int lda,
+ const void *B, const int ldb, const void *beta,
+ void *C, const int ldc);
+void cblas_ctrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ void *B, const int ldb);
+void cblas_ctrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ void *B, const int ldb);
+
+void cblas_zgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
+ const int K, const void *alpha, const void *A,
+ const int lda, const void *B, const int ldb,
+ const void *beta, void *C, const int ldc);
+void cblas_zsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ const void *B, const int ldb, const void *beta,
+ void *C, const int ldc);
+void cblas_zsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const void *alpha, const void *A, const int lda,
+ const void *beta, void *C, const int ldc);
+void cblas_zsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const void *alpha, const void *A, const int lda,
+ const void *B, const int ldb, const void *beta,
+ void *C, const int ldc);
+void cblas_ztrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ void *B, const int ldb);
+void cblas_ztrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ void *B, const int ldb);
+
+
+/*
+ * Routines with prefixes C and Z only
+ */
+void cblas_chemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ const void *B, const int ldb, const void *beta,
+ void *C, const int ldc);
+void cblas_cherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const float alpha, const void *A, const int lda,
+ const float beta, void *C, const int ldc);
+void cblas_cher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const void *alpha, const void *A, const int lda,
+ const void *B, const int ldb, const float beta,
+ void *C, const int ldc);
+
+void cblas_zhemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ const void *B, const int ldb, const void *beta,
+ void *C, const int ldc);
+void cblas_zherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const double alpha, const void *A, const int lda,
+ const double beta, void *C, const int ldc);
+void cblas_zher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const void *alpha, const void *A, const int lda,
+ const void *B, const int ldb, const double beta,
+ void *C, const int ldc);
+
+void cblas_xerbla(int p, const char *rout, const char *form, ...);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/contrib/libs/cblas/cblas_f77.h b/contrib/libs/cblas/cblas_f77.h
new file mode 100644
index 0000000000..18435cd301
--- /dev/null
+++ b/contrib/libs/cblas/cblas_f77.h
@@ -0,0 +1,701 @@
+/*
+ * cblas_f77.h
+ * Written by Keita Teranishi
+ *
+ * Updated by Jeff Horner
+ * Merged cblas_f77.h and cblas_fortran_header.h
+ */
+
+#ifndef CBLAS_F77_H
+#define CBLAS_f77_H
+
+#ifdef CRAY
+ #include <fortran.h>
+ #define F77_CHAR _fcd
+ #define C2F_CHAR(a) ( _cptofcd( (a), 1 ) )
+ #define C2F_STR(a, i) ( _cptofcd( (a), (i) ) )
+ #define F77_STRLEN(a) (_fcdlen)
+#endif
+
+#ifdef WeirdNEC
+ #define F77_INT long
+#endif
+
+#ifdef F77_CHAR
+ #define FCHAR F77_CHAR
+#else
+ #define FCHAR char *
+#endif
+
+#ifdef F77_INT
+ #define FINT const F77_INT *
+ #define FINT2 F77_INT *
+#else
+ #define FINT const int *
+ #define FINT2 int *
+#endif
+
+#if defined(ADD_)
+/*
+ * Level 1 BLAS
+ */
+#define F77_xerbla xerbla_
+ #define F77_srotg srotg_
+ #define F77_srotmg srotmg_
+ #define F77_srot srot_
+ #define F77_srotm srotm_
+ #define F77_drotg drotg_
+ #define F77_drotmg drotmg_
+ #define F77_drot drot_
+ #define F77_drotm drotm_
+ #define F77_sswap sswap_
+ #define F77_scopy scopy_
+ #define F77_saxpy saxpy_
+ #define F77_isamax_sub isamaxsub_
+ #define F77_dswap dswap_
+ #define F77_dcopy dcopy_
+ #define F77_daxpy daxpy_
+ #define F77_idamax_sub idamaxsub_
+ #define F77_cswap cswap_
+ #define F77_ccopy ccopy_
+ #define F77_caxpy caxpy_
+ #define F77_icamax_sub icamaxsub_
+ #define F77_zswap zswap_
+ #define F77_zcopy zcopy_
+ #define F77_zaxpy zaxpy_
+ #define F77_izamax_sub izamaxsub_
+ #define F77_sdot_sub sdotsub_
+ #define F77_ddot_sub ddotsub_
+ #define F77_dsdot_sub dsdotsub_
+ #define F77_sscal sscal_
+ #define F77_dscal dscal_
+ #define F77_cscal cscal_
+ #define F77_zscal zscal_
+ #define F77_csscal csscal_
+ #define F77_zdscal zdscal_
+ #define F77_cdotu_sub cdotusub_
+ #define F77_cdotc_sub cdotcsub_
+ #define F77_zdotu_sub zdotusub_
+ #define F77_zdotc_sub zdotcsub_
+ #define F77_snrm2_sub snrm2sub_
+ #define F77_sasum_sub sasumsub_
+ #define F77_dnrm2_sub dnrm2sub_
+ #define F77_dasum_sub dasumsub_
+ #define F77_scnrm2_sub scnrm2sub_
+ #define F77_scasum_sub scasumsub_
+ #define F77_dznrm2_sub dznrm2sub_
+ #define F77_dzasum_sub dzasumsub_
+ #define F77_sdsdot_sub sdsdotsub_
+/*
+ * Level 2 BLAS
+ */
+ #define F77_ssymv ssymv_
+ #define F77_ssbmv ssbmv_
+ #define F77_sspmv sspmv_
+ #define F77_sger sger_
+ #define F77_ssyr ssyr_
+ #define F77_sspr sspr_
+ #define F77_ssyr2 ssyr2_
+ #define F77_sspr2 sspr2_
+ #define F77_dsymv dsymv_
+ #define F77_dsbmv dsbmv_
+ #define F77_dspmv dspmv_
+ #define F77_dger dger_
+ #define F77_dsyr dsyr_
+ #define F77_dspr dspr_
+ #define F77_dsyr2 dsyr2_
+ #define F77_dspr2 dspr2_
+ #define F77_chemv chemv_
+ #define F77_chbmv chbmv_
+ #define F77_chpmv chpmv_
+ #define F77_cgeru cgeru_
+ #define F77_cgerc cgerc_
+ #define F77_cher cher_
+ #define F77_chpr chpr_
+ #define F77_cher2 cher2_
+ #define F77_chpr2 chpr2_
+ #define F77_zhemv zhemv_
+ #define F77_zhbmv zhbmv_
+ #define F77_zhpmv zhpmv_
+ #define F77_zgeru zgeru_
+ #define F77_zgerc zgerc_
+ #define F77_zher zher_
+ #define F77_zhpr zhpr_
+ #define F77_zher2 zher2_
+ #define F77_zhpr2 zhpr2_
+ #define F77_sgemv sgemv_
+ #define F77_sgbmv sgbmv_
+ #define F77_strmv strmv_
+ #define F77_stbmv stbmv_
+ #define F77_stpmv stpmv_
+ #define F77_strsv strsv_
+ #define F77_stbsv stbsv_
+ #define F77_stpsv stpsv_
+ #define F77_dgemv dgemv_
+ #define F77_dgbmv dgbmv_
+ #define F77_dtrmv dtrmv_
+ #define F77_dtbmv dtbmv_
+ #define F77_dtpmv dtpmv_
+ #define F77_dtrsv dtrsv_
+ #define F77_dtbsv dtbsv_
+ #define F77_dtpsv dtpsv_
+ #define F77_cgemv cgemv_
+ #define F77_cgbmv cgbmv_
+ #define F77_ctrmv ctrmv_
+ #define F77_ctbmv ctbmv_
+ #define F77_ctpmv ctpmv_
+ #define F77_ctrsv ctrsv_
+ #define F77_ctbsv ctbsv_
+ #define F77_ctpsv ctpsv_
+ #define F77_zgemv zgemv_
+ #define F77_zgbmv zgbmv_
+ #define F77_ztrmv ztrmv_
+ #define F77_ztbmv ztbmv_
+ #define F77_ztpmv ztpmv_
+ #define F77_ztrsv ztrsv_
+ #define F77_ztbsv ztbsv_
+ #define F77_ztpsv ztpsv_
+/*
+ * Level 3 BLAS
+ */
+ #define F77_chemm chemm_
+ #define F77_cherk cherk_
+ #define F77_cher2k cher2k_
+ #define F77_zhemm zhemm_
+ #define F77_zherk zherk_
+ #define F77_zher2k zher2k_
+ #define F77_sgemm sgemm_
+ #define F77_ssymm ssymm_
+ #define F77_ssyrk ssyrk_
+ #define F77_ssyr2k ssyr2k_
+ #define F77_strmm strmm_
+ #define F77_strsm strsm_
+ #define F77_dgemm dgemm_
+ #define F77_dsymm dsymm_
+ #define F77_dsyrk dsyrk_
+ #define F77_dsyr2k dsyr2k_
+ #define F77_dtrmm dtrmm_
+ #define F77_dtrsm dtrsm_
+ #define F77_cgemm cgemm_
+ #define F77_csymm csymm_
+ #define F77_csyrk csyrk_
+ #define F77_csyr2k csyr2k_
+ #define F77_ctrmm ctrmm_
+ #define F77_ctrsm ctrsm_
+ #define F77_zgemm zgemm_
+ #define F77_zsymm zsymm_
+ #define F77_zsyrk zsyrk_
+ #define F77_zsyr2k zsyr2k_
+ #define F77_ztrmm ztrmm_
+ #define F77_ztrsm ztrsm_
+#elif defined(UPCASE)
+/*
+ * Level 1 BLAS
+ */
+#define F77_xerbla XERBLA
+ #define F77_srotg SROTG
+ #define F77_srotmg SROTMG
+ #define F77_srot SROT
+ #define F77_srotm SROTM
+ #define F77_drotg DROTG
+ #define F77_drotmg DROTMG
+ #define F77_drot DROT
+ #define F77_drotm DROTM
+ #define F77_sswap SSWAP
+ #define F77_scopy SCOPY
+ #define F77_saxpy SAXPY
+ #define F77_isamax_sub ISAMAXSUB
+ #define F77_dswap DSWAP
+ #define F77_dcopy DCOPY
+ #define F77_daxpy DAXPY
+ #define F77_idamax_sub IDAMAXSUB
+ #define F77_cswap CSWAP
+ #define F77_ccopy CCOPY
+ #define F77_caxpy CAXPY
+ #define F77_icamax_sub ICAMAXSUB
+ #define F77_zswap ZSWAP
+ #define F77_zcopy ZCOPY
+ #define F77_zaxpy ZAXPY
+ #define F77_izamax_sub IZAMAXSUB
+ #define F77_sdot_sub SDOTSUB
+ #define F77_ddot_sub DDOTSUB
+ #define F77_dsdot_sub DSDOTSUB
+ #define F77_sscal SSCAL
+ #define F77_dscal DSCAL
+ #define F77_cscal CSCAL
+ #define F77_zscal ZSCAL
+ #define F77_csscal CSSCAL
+ #define F77_zdscal ZDSCAL
+ #define F77_cdotu_sub CDOTUSUB
+ #define F77_cdotc_sub CDOTCSUB
+ #define F77_zdotu_sub ZDOTUSUB
+ #define F77_zdotc_sub ZDOTCSUB
+ #define F77_snrm2_sub SNRM2SUB
+ #define F77_sasum_sub SASUMSUB
+ #define F77_dnrm2_sub DNRM2SUB
+ #define F77_dasum_sub DASUMSUB
+ #define F77_scnrm2_sub SCNRM2SUB
+ #define F77_scasum_sub SCASUMSUB
+ #define F77_dznrm2_sub DZNRM2SUB
+ #define F77_dzasum_sub DZASUMSUB
+ #define F77_sdsdot_sub SDSDOTSUB
+/*
+ * Level 2 BLAS
+ */
+ #define F77_ssymv SSYMV
+ #define F77_ssbmv SSBMV
+ #define F77_sspmv SSPMV
+ #define F77_sger SGER
+ #define F77_ssyr SSYR
+ #define F77_sspr SSPR
+ #define F77_ssyr2 SSYR2
+ #define F77_sspr2 SSPR2
+ #define F77_dsymv DSYMV
+ #define F77_dsbmv DSBMV
+ #define F77_dspmv DSPMV
+ #define F77_dger DGER
+ #define F77_dsyr DSYR
+ #define F77_dspr DSPR
+ #define F77_dsyr2 DSYR2
+ #define F77_dspr2 DSPR2
+ #define F77_chemv CHEMV
+ #define F77_chbmv CHBMV
+ #define F77_chpmv CHPMV
+ #define F77_cgeru CGERU
+ #define F77_cgerc CGERC
+ #define F77_cher CHER
+ #define F77_chpr CHPR
+ #define F77_cher2 CHER2
+ #define F77_chpr2 CHPR2
+ #define F77_zhemv ZHEMV
+ #define F77_zhbmv ZHBMV
+ #define F77_zhpmv ZHPMV
+ #define F77_zgeru ZGERU
+ #define F77_zgerc ZGERC
+ #define F77_zher ZHER
+ #define F77_zhpr ZHPR
+ #define F77_zher2 ZHER2
+ #define F77_zhpr2 ZHPR2
+ #define F77_sgemv SGEMV
+ #define F77_sgbmv SGBMV
+ #define F77_strmv STRMV
+ #define F77_stbmv STBMV
+ #define F77_stpmv STPMV
+ #define F77_strsv STRSV
+ #define F77_stbsv STBSV
+ #define F77_stpsv STPSV
+ #define F77_dgemv DGEMV
+ #define F77_dgbmv DGBMV
+ #define F77_dtrmv DTRMV
+ #define F77_dtbmv DTBMV
+ #define F77_dtpmv DTPMV
+ #define F77_dtrsv DTRSV
+ #define F77_dtbsv DTBSV
+ #define F77_dtpsv DTPSV
+ #define F77_cgemv CGEMV
+ #define F77_cgbmv CGBMV
+ #define F77_ctrmv CTRMV
+ #define F77_ctbmv CTBMV
+ #define F77_ctpmv CTPMV
+ #define F77_ctrsv CTRSV
+ #define F77_ctbsv CTBSV
+ #define F77_ctpsv CTPSV
+ #define F77_zgemv ZGEMV
+ #define F77_zgbmv ZGBMV
+ #define F77_ztrmv ZTRMV
+ #define F77_ztbmv ZTBMV
+ #define F77_ztpmv ZTPMV
+ #define F77_ztrsv ZTRSV
+ #define F77_ztbsv ZTBSV
+ #define F77_ztpsv ZTPSV
+/*
+ * Level 3 BLAS
+ */
+ #define F77_chemm CHEMM
+ #define F77_cherk CHERK
+ #define F77_cher2k CHER2K
+ #define F77_zhemm ZHEMM
+ #define F77_zherk ZHERK
+ #define F77_zher2k ZHER2K
+ #define F77_sgemm SGEMM
+ #define F77_ssymm SSYMM
+ #define F77_ssyrk SSYRK
+ #define F77_ssyr2k SSYR2K
+ #define F77_strmm STRMM
+ #define F77_strsm STRSM
+ #define F77_dgemm DGEMM
+ #define F77_dsymm DSYMM
+ #define F77_dsyrk DSYRK
+ #define F77_dsyr2k DSYR2K
+ #define F77_dtrmm DTRMM
+ #define F77_dtrsm DTRSM
+ #define F77_cgemm CGEMM
+ #define F77_csymm CSYMM
+ #define F77_csyrk CSYRK
+ #define F77_csyr2k CSYR2K
+ #define F77_ctrmm CTRMM
+ #define F77_ctrsm CTRSM
+ #define F77_zgemm ZGEMM
+ #define F77_zsymm ZSYMM
+ #define F77_zsyrk ZSYRK
+ #define F77_zsyr2k ZSYR2K
+ #define F77_ztrmm ZTRMM
+ #define F77_ztrsm ZTRSM
+#elif defined(NOCHANGE)
+/*
+ * Level 1 BLAS
+ */
+#define F77_xerbla xerbla
+ #define F77_srotg srotg
+ #define F77_srotmg srotmg
+ #define F77_srot srot
+ #define F77_srotm srotm
+ #define F77_drotg drotg
+ #define F77_drotmg drotmg
+ #define F77_drot drot
+ #define F77_drotm drotm
+ #define F77_sswap sswap
+ #define F77_scopy scopy
+ #define F77_saxpy saxpy
+ #define F77_isamax_sub isamaxsub
+ #define F77_dswap dswap
+ #define F77_dcopy dcopy
+ #define F77_daxpy daxpy
+ #define F77_idamax_sub idamaxsub
+ #define F77_cswap cswap
+ #define F77_ccopy ccopy
+ #define F77_caxpy caxpy
+ #define F77_icamax_sub icamaxsub
+ #define F77_zswap zswap
+ #define F77_zcopy zcopy
+ #define F77_zaxpy zaxpy
+ #define F77_izamax_sub izamaxsub
+ #define F77_sdot_sub sdotsub
+ #define F77_ddot_sub ddotsub
+ #define F77_dsdot_sub dsdotsub
+ #define F77_sscal sscal
+ #define F77_dscal dscal
+ #define F77_cscal cscal
+ #define F77_zscal zscal
+ #define F77_csscal csscal
+ #define F77_zdscal zdscal
+ #define F77_cdotu_sub cdotusub
+ #define F77_cdotc_sub cdotcsub
+ #define F77_zdotu_sub zdotusub
+ #define F77_zdotc_sub zdotcsub
+ #define F77_snrm2_sub snrm2sub
+ #define F77_sasum_sub sasumsub
+ #define F77_dnrm2_sub dnrm2sub
+ #define F77_dasum_sub dasumsub
+ #define F77_scnrm2_sub scnrm2sub
+ #define F77_scasum_sub scasumsub
+ #define F77_dznrm2_sub dznrm2sub
+ #define F77_dzasum_sub dzasumsub
+ #define F77_sdsdot_sub sdsdotsub
+/*
+ * Level 2 BLAS
+ */
+ #define F77_ssymv ssymv
+ #define F77_ssbmv ssbmv
+ #define F77_sspmv sspmv
+ #define F77_sger sger
+ #define F77_ssyr ssyr
+ #define F77_sspr sspr
+ #define F77_ssyr2 ssyr2
+ #define F77_sspr2 sspr2
+ #define F77_dsymv dsymv
+ #define F77_dsbmv dsbmv
+ #define F77_dspmv dspmv
+ #define F77_dger dger
+ #define F77_dsyr dsyr
+ #define F77_dspr dspr
+ #define F77_dsyr2 dsyr2
+ #define F77_dspr2 dspr2
+ #define F77_chemv chemv
+ #define F77_chbmv chbmv
+ #define F77_chpmv chpmv
+ #define F77_cgeru cgeru
+ #define F77_cgerc cgerc
+ #define F77_cher cher
+ #define F77_chpr chpr
+ #define F77_cher2 cher2
+ #define F77_chpr2 chpr2
+ #define F77_zhemv zhemv
+ #define F77_zhbmv zhbmv
+ #define F77_zhpmv zhpmv
+ #define F77_zgeru zgeru
+ #define F77_zgerc zgerc
+ #define F77_zher zher
+ #define F77_zhpr zhpr
+ #define F77_zher2 zher2
+ #define F77_zhpr2 zhpr2
+ #define F77_sgemv sgemv
+ #define F77_sgbmv sgbmv
+ #define F77_strmv strmv
+ #define F77_stbmv stbmv
+ #define F77_stpmv stpmv
+ #define F77_strsv strsv
+ #define F77_stbsv stbsv
+ #define F77_stpsv stpsv
+ #define F77_dgemv dgemv
+ #define F77_dgbmv dgbmv
+ #define F77_dtrmv dtrmv
+ #define F77_dtbmv dtbmv
+ #define F77_dtpmv dtpmv
+ #define F77_dtrsv dtrsv
+ #define F77_dtbsv dtbsv
+ #define F77_dtpsv dtpsv
+ #define F77_cgemv cgemv
+ #define F77_cgbmv cgbmv
+ #define F77_ctrmv ctrmv
+ #define F77_ctbmv ctbmv
+ #define F77_ctpmv ctpmv
+ #define F77_ctrsv ctrsv
+ #define F77_ctbsv ctbsv
+ #define F77_ctpsv ctpsv
+ #define F77_zgemv zgemv
+ #define F77_zgbmv zgbmv
+ #define F77_ztrmv ztrmv
+ #define F77_ztbmv ztbmv
+ #define F77_ztpmv ztpmv
+ #define F77_ztrsv ztrsv
+ #define F77_ztbsv ztbsv
+ #define F77_ztpsv ztpsv
+/*
+ * Level 3 BLAS
+ */
+ #define F77_chemm chemm
+ #define F77_cherk cherk
+ #define F77_cher2k cher2k
+ #define F77_zhemm zhemm
+ #define F77_zherk zherk
+ #define F77_zher2k zher2k
+ #define F77_sgemm sgemm
+ #define F77_ssymm ssymm
+ #define F77_ssyrk ssyrk
+ #define F77_ssyr2k ssyr2k
+ #define F77_strmm strmm
+ #define F77_strsm strsm
+ #define F77_dgemm dgemm
+ #define F77_dsymm dsymm
+ #define F77_dsyrk dsyrk
+ #define F77_dsyr2k dsyr2k
+ #define F77_dtrmm dtrmm
+ #define F77_dtrsm dtrsm
+ #define F77_cgemm cgemm
+ #define F77_csymm csymm
+ #define F77_csyrk csyrk
+ #define F77_csyr2k csyr2k
+ #define F77_ctrmm ctrmm
+ #define F77_ctrsm ctrsm
+ #define F77_zgemm zgemm
+ #define F77_zsymm zsymm
+ #define F77_zsyrk zsyrk
+ #define F77_zsyr2k zsyr2k
+ #define F77_ztrmm ztrmm
+ #define F77_ztrsm ztrsm
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ void F77_xerbla(FCHAR, void *);
+/*
+ * Level 1 Fortran Prototypes
+ */
+
+/* Single Precision */
+
+ void F77_srot(FINT, float *, FINT, float *, FINT, const float *, const float *);
+ void F77_srotg(float *,float *,float *,float *);
+ void F77_srotm( FINT, float *, FINT, float *, FINT, const float *);
+ void F77_srotmg(float *,float *,float *,const float *, float *);
+ void F77_sswap( FINT, float *, FINT, float *, FINT);
+ void F77_scopy( FINT, const float *, FINT, float *, FINT);
+ void F77_saxpy( FINT, const float *, const float *, FINT, float *, FINT);
+ void F77_sdot_sub(FINT, const float *, FINT, const float *, FINT, float *);
+ void F77_sdsdot_sub( FINT, const float *, const float *, FINT, const float *, FINT, float *);
+ void F77_sscal( FINT, const float *, float *, FINT);
+ void F77_snrm2_sub( FINT, const float *, FINT, float *);
+ void F77_sasum_sub( FINT, const float *, FINT, float *);
+ void F77_isamax_sub( FINT, const float * , FINT, FINT2);
+
+/* Double Precision */
+
+ void F77_drot(FINT, double *, FINT, double *, FINT, const double *, const double *);
+ void F77_drotg(double *,double *,double *,double *);
+ void F77_drotm( FINT, double *, FINT, double *, FINT, const double *);
+ void F77_drotmg(double *,double *,double *,const double *, double *);
+ void F77_dswap( FINT, double *, FINT, double *, FINT);
+ void F77_dcopy( FINT, const double *, FINT, double *, FINT);
+ void F77_daxpy( FINT, const double *, const double *, FINT, double *, FINT);
+ void F77_dswap( FINT, double *, FINT, double *, FINT);
+ void F77_dsdot_sub(FINT, const float *, FINT, const float *, FINT, double *);
+ void F77_ddot_sub( FINT, const double *, FINT, const double *, FINT, double *);
+ void F77_dscal( FINT, const double *, double *, FINT);
+ void F77_dnrm2_sub( FINT, const double *, FINT, double *);
+ void F77_dasum_sub( FINT, const double *, FINT, double *);
+ void F77_idamax_sub( FINT, const double * , FINT, FINT2);
+
+/* Single Complex Precision */
+
+ void F77_cswap( FINT, void *, FINT, void *, FINT);
+ void F77_ccopy( FINT, const void *, FINT, void *, FINT);
+ void F77_caxpy( FINT, const void *, const void *, FINT, void *, FINT);
+ void F77_cswap( FINT, void *, FINT, void *, FINT);
+ void F77_cdotc_sub( FINT, const void *, FINT, const void *, FINT, void *);
+ void F77_cdotu_sub( FINT, const void *, FINT, const void *, FINT, void *);
+ void F77_cscal( FINT, const void *, void *, FINT);
+ void F77_icamax_sub( FINT, const void *, FINT, FINT2);
+ void F77_csscal( FINT, const float *, void *, FINT);
+ void F77_scnrm2_sub( FINT, const void *, FINT, float *);
+ void F77_scasum_sub( FINT, const void *, FINT, float *);
+
+/* Double Complex Precision */
+
+ void F77_zswap( FINT, void *, FINT, void *, FINT);
+ void F77_zcopy( FINT, const void *, FINT, void *, FINT);
+ void F77_zaxpy( FINT, const void *, const void *, FINT, void *, FINT);
+ void F77_zswap( FINT, void *, FINT, void *, FINT);
+ void F77_zdotc_sub( FINT, const void *, FINT, const void *, FINT, void *);
+ void F77_zdotu_sub( FINT, const void *, FINT, const void *, FINT, void *);
+ void F77_zdscal( FINT, const double *, void *, FINT);
+ void F77_zscal( FINT, const void *, void *, FINT);
+ void F77_dznrm2_sub( FINT, const void *, FINT, double *);
+ void F77_dzasum_sub( FINT, const void *, FINT, double *);
+ void F77_izamax_sub( FINT, const void *, FINT, FINT2);
+
+/*
+ * Level 2 Fortran Prototypes
+ */
+
+/* Single Precision */
+
+ void F77_sgemv(FCHAR, FINT, FINT, const float *, const float *, FINT, const float *, FINT, const float *, float *, FINT);
+ void F77_sgbmv(FCHAR, FINT, FINT, FINT, FINT, const float *, const float *, FINT, const float *, FINT, const float *, float *, FINT);
+ void F77_ssymv(FCHAR, FINT, const float *, const float *, FINT, const float *, FINT, const float *, float *, FINT);
+ void F77_ssbmv(FCHAR, FINT, FINT, const float *, const float *, FINT, const float *, FINT, const float *, float *, FINT);
+ void F77_sspmv(FCHAR, FINT, const float *, const float *, const float *, FINT, const float *, float *, FINT);
+ void F77_strmv( FCHAR, FCHAR, FCHAR, FINT, const float *, FINT, float *, FINT);
+ void F77_stbmv( FCHAR, FCHAR, FCHAR, FINT, FINT, const float *, FINT, float *, FINT);
+ void F77_strsv( FCHAR, FCHAR, FCHAR, FINT, const float *, FINT, float *, FINT);
+ void F77_stbsv( FCHAR, FCHAR, FCHAR, FINT, FINT, const float *, FINT, float *, FINT);
+ void F77_stpmv( FCHAR, FCHAR, FCHAR, FINT, const float *, float *, FINT);
+ void F77_stpsv( FCHAR, FCHAR, FCHAR, FINT, const float *, float *, FINT);
+ void F77_sger( FINT, FINT, const float *, const float *, FINT, const float *, FINT, float *, FINT);
+ void F77_ssyr(FCHAR, FINT, const float *, const float *, FINT, float *, FINT);
+ void F77_sspr(FCHAR, FINT, const float *, const float *, FINT, float *);
+ void F77_sspr2(FCHAR, FINT, const float *, const float *, FINT, const float *, FINT, float *);
+ void F77_ssyr2(FCHAR, FINT, const float *, const float *, FINT, const float *, FINT, float *, FINT);
+
+/* Double Precision */
+
+ void F77_dgemv(FCHAR, FINT, FINT, const double *, const double *, FINT, const double *, FINT, const double *, double *, FINT);
+ void F77_dgbmv(FCHAR, FINT, FINT, FINT, FINT, const double *, const double *, FINT, const double *, FINT, const double *, double *, FINT);
+ void F77_dsymv(FCHAR, FINT, const double *, const double *, FINT, const double *, FINT, const double *, double *, FINT);
+ void F77_dsbmv(FCHAR, FINT, FINT, const double *, const double *, FINT, const double *, FINT, const double *, double *, FINT);
+ void F77_dspmv(FCHAR, FINT, const double *, const double *, const double *, FINT, const double *, double *, FINT);
+ void F77_dtrmv( FCHAR, FCHAR, FCHAR, FINT, const double *, FINT, double *, FINT);
+ void F77_dtbmv( FCHAR, FCHAR, FCHAR, FINT, FINT, const double *, FINT, double *, FINT);
+ void F77_dtrsv( FCHAR, FCHAR, FCHAR, FINT, const double *, FINT, double *, FINT);
+ void F77_dtbsv( FCHAR, FCHAR, FCHAR, FINT, FINT, const double *, FINT, double *, FINT);
+ void F77_dtpmv( FCHAR, FCHAR, FCHAR, FINT, const double *, double *, FINT);
+ void F77_dtpsv( FCHAR, FCHAR, FCHAR, FINT, const double *, double *, FINT);
+ void F77_dger( FINT, FINT, const double *, const double *, FINT, const double *, FINT, double *, FINT);
+ void F77_dsyr(FCHAR, FINT, const double *, const double *, FINT, double *, FINT);
+ void F77_dspr(FCHAR, FINT, const double *, const double *, FINT, double *);
+ void F77_dspr2(FCHAR, FINT, const double *, const double *, FINT, const double *, FINT, double *);
+ void F77_dsyr2(FCHAR, FINT, const double *, const double *, FINT, const double *, FINT, double *, FINT);
+
+/* Single Complex Precision */
+
+ void F77_cgemv(FCHAR, FINT, FINT, const void *, const void *, FINT, const void *, FINT, const void *, void *, FINT);
+ void F77_cgbmv(FCHAR, FINT, FINT, FINT, FINT, const void *, const void *, FINT, const void *, FINT, const void *, void *, FINT);
+ void F77_chemv(FCHAR, FINT, const void *, const void *, FINT, const void *, FINT, const void *, void *, FINT);
+ void F77_chbmv(FCHAR, FINT, FINT, const void *, const void *, FINT, const void *, FINT, const void *, void *, FINT);
+ void F77_chpmv(FCHAR, FINT, const void *, const void *, const void *, FINT, const void *, void *, FINT);
+ void F77_ctrmv( FCHAR, FCHAR, FCHAR, FINT, const void *, FINT, void *, FINT);
+ void F77_ctbmv( FCHAR, FCHAR, FCHAR, FINT, FINT, const void *, FINT, void *, FINT);
+ void F77_ctpmv( FCHAR, FCHAR, FCHAR, FINT, const void *, void *, FINT);
+ void F77_ctrsv( FCHAR, FCHAR, FCHAR, FINT, const void *, FINT, void *, FINT);
+ void F77_ctbsv( FCHAR, FCHAR, FCHAR, FINT, FINT, const void *, FINT, void *, FINT);
+ void F77_ctpsv( FCHAR, FCHAR, FCHAR, FINT, const void *, void *,FINT);
+ void F77_cgerc( FINT, FINT, const void *, const void *, FINT, const void *, FINT, void *, FINT);
+ void F77_cgeru( FINT, FINT, const void *, const void *, FINT, const void *, FINT, void *, FINT);
+ void F77_cher(FCHAR, FINT, const float *, const void *, FINT, void *, FINT);
+ void F77_cher2(FCHAR, FINT, const void *, const void *, FINT, const void *, FINT, void *, FINT);
+ void F77_chpr(FCHAR, FINT, const float *, const void *, FINT, void *);
+ void F77_chpr2(FCHAR, FINT, const float *, const void *, FINT, const void *, FINT, void *);
+
+/* Double Complex Precision */
+
+ void F77_zgemv(FCHAR, FINT, FINT, const void *, const void *, FINT, const void *, FINT, const void *, void *, FINT);
+ void F77_zgbmv(FCHAR, FINT, FINT, FINT, FINT, const void *, const void *, FINT, const void *, FINT, const void *, void *, FINT);
+ void F77_zhemv(FCHAR, FINT, const void *, const void *, FINT, const void *, FINT, const void *, void *, FINT);
+ void F77_zhbmv(FCHAR, FINT, FINT, const void *, const void *, FINT, const void *, FINT, const void *, void *, FINT);
+ void F77_zhpmv(FCHAR, FINT, const void *, const void *, const void *, FINT, const void *, void *, FINT);
+ void F77_ztrmv( FCHAR, FCHAR, FCHAR, FINT, const void *, FINT, void *, FINT);
+ void F77_ztbmv( FCHAR, FCHAR, FCHAR, FINT, FINT, const void *, FINT, void *, FINT);
+ void F77_ztpmv( FCHAR, FCHAR, FCHAR, FINT, const void *, void *, FINT);
+ void F77_ztrsv( FCHAR, FCHAR, FCHAR, FINT, const void *, FINT, void *, FINT);
+ void F77_ztbsv( FCHAR, FCHAR, FCHAR, FINT, FINT, const void *, FINT, void *, FINT);
+ void F77_ztpsv( FCHAR, FCHAR, FCHAR, FINT, const void *, void *,FINT);
+ void F77_zgerc( FINT, FINT, const void *, const void *, FINT, const void *, FINT, void *, FINT);
+ void F77_zgeru( FINT, FINT, const void *, const void *, FINT, const void *, FINT, void *, FINT);
+ void F77_zher(FCHAR, FINT, const double *, const void *, FINT, void *, FINT);
+ void F77_zher2(FCHAR, FINT, const void *, const void *, FINT, const void *, FINT, void *, FINT);
+ void F77_zhpr(FCHAR, FINT, const double *, const void *, FINT, void *);
+ void F77_zhpr2(FCHAR, FINT, const double *, const void *, FINT, const void *, FINT, void *);
+
+/*
+ * Level 3 Fortran Prototypes
+ */
+
+/* Single Precision */
+
+ void F77_sgemm(FCHAR, FCHAR, FINT, FINT, FINT, const float *, const float *, FINT, const float *, FINT, const float *, float *, FINT);
+ void F77_ssymm(FCHAR, FCHAR, FINT, FINT, const float *, const float *, FINT, const float *, FINT, const float *, float *, FINT);
+ void F77_ssyrk(FCHAR, FCHAR, FINT, FINT, const float *, const float *, FINT, const float *, float *, FINT);
+ void F77_ssyr2k(FCHAR, FCHAR, FINT, FINT, const float *, const float *, FINT, const float *, FINT, const float *, float *, FINT);
+ void F77_strmm(FCHAR, FCHAR, FCHAR, FCHAR, FINT, FINT, const float *, const float *, FINT, float *, FINT);
+ void F77_strsm(FCHAR, FCHAR, FCHAR, FCHAR, FINT, FINT, const float *, const float *, FINT, float *, FINT);
+
+/* Double Precision */
+
+ void F77_dgemm(FCHAR, FCHAR, FINT, FINT, FINT, const double *, const double *, FINT, const double *, FINT, const double *, double *, FINT);
+ void F77_dsymm(FCHAR, FCHAR, FINT, FINT, const double *, const double *, FINT, const double *, FINT, const double *, double *, FINT);
+ void F77_dsyrk(FCHAR, FCHAR, FINT, FINT, const double *, const double *, FINT, const double *, double *, FINT);
+ void F77_dsyr2k(FCHAR, FCHAR, FINT, FINT, const double *, const double *, FINT, const double *, FINT, const double *, double *, FINT);
+ void F77_dtrmm(FCHAR, FCHAR, FCHAR, FCHAR, FINT, FINT, const double *, const double *, FINT, double *, FINT);
+ void F77_dtrsm(FCHAR, FCHAR, FCHAR, FCHAR, FINT, FINT, const double *, const double *, FINT, double *, FINT);
+
+/* Single Complex Precision */
+
+ void F77_cgemm(FCHAR, FCHAR, FINT, FINT, FINT, const float *, const float *, FINT, const float *, FINT, const float *, float *, FINT);
+ void F77_csymm(FCHAR, FCHAR, FINT, FINT, const float *, const float *, FINT, const float *, FINT, const float *, float *, FINT);
+ void F77_chemm(FCHAR, FCHAR, FINT, FINT, const float *, const float *, FINT, const float *, FINT, const float *, float *, FINT);
+ void F77_csyrk(FCHAR, FCHAR, FINT, FINT, const float *, const float *, FINT, const float *, float *, FINT);
+ void F77_cherk(FCHAR, FCHAR, FINT, FINT, const float *, const float *, FINT, const float *, float *, FINT);
+ void F77_csyr2k(FCHAR, FCHAR, FINT, FINT, const float *, const float *, FINT, const float *, FINT, const float *, float *, FINT);
+ void F77_cher2k(FCHAR, FCHAR, FINT, FINT, const float *, const float *, FINT, const float *, FINT, const float *, float *, FINT);
+ void F77_ctrmm(FCHAR, FCHAR, FCHAR, FCHAR, FINT, FINT, const float *, const float *, FINT, float *, FINT);
+ void F77_ctrsm(FCHAR, FCHAR, FCHAR, FCHAR, FINT, FINT, const float *, const float *, FINT, float *, FINT);
+
+/* Double Complex Precision */
+
+ void F77_zgemm(FCHAR, FCHAR, FINT, FINT, FINT, const double *, const double *, FINT, const double *, FINT, const double *, double *, FINT);
+ void F77_zsymm(FCHAR, FCHAR, FINT, FINT, const double *, const double *, FINT, const double *, FINT, const double *, double *, FINT);
+ void F77_zhemm(FCHAR, FCHAR, FINT, FINT, const double *, const double *, FINT, const double *, FINT, const double *, double *, FINT);
+ void F77_zsyrk(FCHAR, FCHAR, FINT, FINT, const double *, const double *, FINT, const double *, double *, FINT);
+ void F77_zherk(FCHAR, FCHAR, FINT, FINT, const double *, const double *, FINT, const double *, double *, FINT);
+ void F77_zsyr2k(FCHAR, FCHAR, FINT, FINT, const double *, const double *, FINT, const double *, FINT, const double *, double *, FINT);
+ void F77_zher2k(FCHAR, FCHAR, FINT, FINT, const double *, const double *, FINT, const double *, FINT, const double *, double *, FINT);
+ void F77_ztrmm(FCHAR, FCHAR, FCHAR, FCHAR, FINT, FINT, const double *, const double *, FINT, double *, FINT);
+ void F77_ztrsm(FCHAR, FCHAR, FCHAR, FCHAR, FINT, FINT, const double *, const double *, FINT, double *, FINT);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* CBLAS_F77_H */
diff --git a/contrib/libs/cblas/cblas_interface/cblas_caxpy.c b/contrib/libs/cblas/cblas_interface/cblas_caxpy.c
new file mode 100644
index 0000000000..7579aa707a
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_caxpy.c
@@ -0,0 +1,22 @@
+/*
+ * cblas_caxpy.c
+ *
+ * The program is a C interface to caxpy.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_caxpy( const int N, const void *alpha, const void *X,
+ const int incX, void *Y, const int incY)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ F77_caxpy( &F77_N, alpha, X, &F77_incX, Y, &F77_incY);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_ccopy.c b/contrib/libs/cblas/cblas_interface/cblas_ccopy.c
new file mode 100644
index 0000000000..b7bc428473
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_ccopy.c
@@ -0,0 +1,22 @@
+/*
+ * cblas_ccopy.c
+ *
+ * The program is a C interface to ccopy.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_ccopy( const int N, const void *X,
+ const int incX, void *Y, const int incY)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ F77_ccopy( &F77_N, X, &F77_incX, Y, &F77_incY);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_cdotc_sub.c b/contrib/libs/cblas/cblas_interface/cblas_cdotc_sub.c
new file mode 100644
index 0000000000..d6086814e2
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_cdotc_sub.c
@@ -0,0 +1,23 @@
+/*
+ * cblas_cdotc_sub.c
+ *
+ * The program is a C interface to cdotc.
+ * It calls the fortran wrapper before calling cdotc.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_cdotc_sub( const int N, const void *X, const int incX,
+ const void *Y, const int incY,void *dotc)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ F77_cdotc_sub( &F77_N, X, &F77_incX, Y, &F77_incY, dotc);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_cdotu_sub.c b/contrib/libs/cblas/cblas_interface/cblas_cdotu_sub.c
new file mode 100644
index 0000000000..d06e4e5fa9
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_cdotu_sub.c
@@ -0,0 +1,23 @@
+/*
+ * cblas_cdotu_sub.f
+ *
+ * The program is a C interface to cdotu.
+ * It calls the forteran wrapper before calling cdotu.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_cdotu_sub( const int N, const void *X,
+ const int incX, const void *Y, const int incY,void *dotu)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ F77_cdotu_sub( &F77_N, X, &F77_incX, Y, &F77_incY, dotu);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_cgbmv.c b/contrib/libs/cblas/cblas_interface/cblas_cgbmv.c
new file mode 100644
index 0000000000..e61a31a4ab
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_cgbmv.c
@@ -0,0 +1,165 @@
+/*
+ * cblas_cgbmv.c
+ * The program is a C interface of cgbmv
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_cgbmv(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
+ const int KL, const int KU,
+ const void *alpha, const void *A, const int lda,
+ const void *X, const int incX, const void *beta,
+ void *Y, const int incY)
+{
+ char TA;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA;
+#else
+ #define F77_TA &TA
+#endif
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+ F77_INT F77_KL=KL,F77_KU=KU;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_KL KL
+ #define F77_KU KU
+ #define F77_incX incx
+ #define F77_incY incY
+#endif
+ int n=0, i=0, incx=incX;
+ const float *xx= (float *)X, *alp= (float *)alpha, *bet = (float *)beta;
+ float ALPHA[2],BETA[2];
+ int tincY, tincx;
+ float *x=(float *)X, *y=(float *)Y, *st=0, *tx=0;
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(2, "cblas_cgbmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_TA = C2F_CHAR(&TA);
+ #endif
+ F77_cgbmv(F77_TA, &F77_M, &F77_N, &F77_KL, &F77_KU, alpha,
+ A, &F77_lda, X, &F77_incX, beta, Y, &F77_incY);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans)
+ {
+ ALPHA[0]= *alp;
+ ALPHA[1]= -alp[1];
+ BETA[0]= *bet;
+ BETA[1]= -bet[1];
+ TA = 'N';
+ if (M > 0)
+ {
+ n = M << 1;
+ x = malloc(n*sizeof(float));
+ tx = x;
+
+ if( incX > 0 ) {
+ i = incX << 1 ;
+ tincx = 2;
+ st= x+n;
+ } else {
+ i = incX *(-2);
+ tincx = -2;
+ st = x-2;
+ x +=(n-2);
+ }
+ do
+ {
+ *x = *xx;
+ x[1] = -xx[1];
+ x += tincx ;
+ xx += i;
+ }
+ while (x != st);
+ x=tx;
+
+ #ifdef F77_INT
+ F77_incX = 1;
+ #else
+ incx = 1;
+ #endif
+
+ if( incY > 0 )
+ tincY = incY;
+ else
+ tincY = -incY;
+
+ y++;
+
+ if (N > 0)
+ {
+ i = tincY << 1;
+ n = i * N ;
+ st = y + n;
+ do {
+ *y = -(*y);
+ y += i;
+ } while(y != st);
+ y -= n;
+ }
+ }
+ else x = (float *) X;
+
+
+ }
+ else
+ {
+ cblas_xerbla(2, "cblas_cgbmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_TA = C2F_CHAR(&TA);
+ #endif
+ if (TransA == CblasConjTrans)
+ F77_cgbmv(F77_TA, &F77_N, &F77_M, &F77_KU, &F77_KL, ALPHA,
+ A ,&F77_lda, x,&F77_incX, BETA, Y, &F77_incY);
+ else
+ F77_cgbmv(F77_TA, &F77_N, &F77_M, &F77_KU, &F77_KL, alpha,
+ A ,&F77_lda, x,&F77_incX, beta, Y, &F77_incY);
+ if (TransA == CblasConjTrans)
+ {
+ if (x != X) free(x);
+ if (N > 0)
+ {
+ do
+ {
+ *y = -(*y);
+ y += i;
+ }
+ while (y != st);
+ }
+ }
+ }
+ else cblas_xerbla(1, "cblas_cgbmv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_cgemm.c b/contrib/libs/cblas/cblas_interface/cblas_cgemm.c
new file mode 100644
index 0000000000..dee4696eed
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_cgemm.c
@@ -0,0 +1,109 @@
+/*
+ *
+ * cblas_cgemm.c
+ * This program is a C interface to cgemm.
+ * Written by Keita Teranishi
+ * 4/8/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_cgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
+ const int K, const void *alpha, const void *A,
+ const int lda, const void *B, const int ldb,
+ const void *beta, void *C, const int ldc)
+{
+ char TA, TB;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_TB;
+#else
+ #define F77_TA &TA
+ #define F77_TB &TB
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_K=K, F77_lda=lda, F77_ldb=ldb;
+ F77_INT F77_ldc=ldc;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_ldb ldb
+ #define F77_ldc ldc
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+ if(TransA == CblasTrans) TA='T';
+ else if ( TransA == CblasConjTrans ) TA='C';
+ else if ( TransA == CblasNoTrans ) TA='N';
+ else
+ {
+ cblas_xerbla(2, "cblas_cgemm", "Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if(TransB == CblasTrans) TB='T';
+ else if ( TransB == CblasConjTrans ) TB='C';
+ else if ( TransB == CblasNoTrans ) TB='N';
+ else
+ {
+ cblas_xerbla(3, "cblas_cgemm", "Illegal TransB setting, %d\n", TransB);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_TA = C2F_CHAR(&TA);
+ F77_TB = C2F_CHAR(&TB);
+ #endif
+
+ F77_cgemm(F77_TA, F77_TB, &F77_M, &F77_N, &F77_K, alpha, A,
+ &F77_lda, B, &F77_ldb, beta, C, &F77_ldc);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if(TransA == CblasTrans) TB='T';
+ else if ( TransA == CblasConjTrans ) TB='C';
+ else if ( TransA == CblasNoTrans ) TB='N';
+ else
+ {
+ cblas_xerbla(2, "cblas_cgemm", "Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if(TransB == CblasTrans) TA='T';
+ else if ( TransB == CblasConjTrans ) TA='C';
+ else if ( TransB == CblasNoTrans ) TA='N';
+ else
+ {
+ cblas_xerbla(2, "cblas_cgemm", "Illegal TransB setting, %d\n", TransB);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_TA = C2F_CHAR(&TA);
+ F77_TB = C2F_CHAR(&TB);
+ #endif
+
+ F77_cgemm(F77_TA, F77_TB, &F77_N, &F77_M, &F77_K, alpha, B,
+ &F77_ldb, A, &F77_lda, beta, C, &F77_ldc);
+ }
+ else cblas_xerbla(1, "cblas_cgemm", "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_cgemv.c b/contrib/libs/cblas/cblas_interface/cblas_cgemv.c
new file mode 100644
index 0000000000..5e4509a4f1
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_cgemv.c
@@ -0,0 +1,162 @@
+/*
+ * cblas_cgemv.c
+ * The program is a C interface of cgemv
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_cgemv(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ const void *X, const int incX, const void *beta,
+ void *Y, const int incY)
+{
+ char TA;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA;
+#else
+ #define F77_TA &TA
+#endif
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_incX incx
+ #define F77_incY incY
+#endif
+
+ int n=0, i=0, incx=incX;
+ const float *xx= (const float *)X;
+ float ALPHA[2],BETA[2];
+ int tincY, tincx;
+ float *x=(float *)X, *y=(float *)Y, *st=0, *tx=0;
+ const float *stx = x;
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+
+ if (order == CblasColMajor)
+ {
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(2, "cblas_cgemv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_TA = C2F_CHAR(&TA);
+ #endif
+ F77_cgemv(F77_TA, &F77_M, &F77_N, alpha, A, &F77_lda, X, &F77_incX,
+ beta, Y, &F77_incY);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans)
+ {
+ ALPHA[0]= *( (const float *) alpha );
+ ALPHA[1]= -( *( (const float *) alpha+1) );
+ BETA[0]= *( (const float *) beta );
+ BETA[1]= -( *( (const float *) beta+1 ) );
+ TA = 'N';
+ if (M > 0)
+ {
+ n = M << 1;
+ x = malloc(n*sizeof(float));
+ tx = x;
+ if( incX > 0 ) {
+ i = incX << 1 ;
+ tincx = 2;
+ st= x+n;
+ } else {
+ i = incX *(-2);
+ tincx = -2;
+ st = x-2;
+ x +=(n-2);
+ }
+
+ do
+ {
+ *x = *xx;
+ x[1] = -xx[1];
+ x += tincx ;
+ xx += i;
+ }
+ while (x != st);
+ x=tx;
+
+ F77_incX = 1;
+
+ if(incY > 0)
+ tincY = incY;
+ else
+ tincY = -incY;
+
+ y++;
+
+ if (N > 0)
+ {
+ i = tincY << 1;
+ n = i * N ;
+ st = y + n;
+ do {
+ *y = -(*y);
+ y += i;
+ } while(y != st);
+ y -= n;
+ }
+ stx = x;
+ }
+ else stx = (const float *)X;
+ }
+ else
+ {
+ cblas_xerbla(2, "cblas_cgemv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_TA = C2F_CHAR(&TA);
+ #endif
+ if (TransA == CblasConjTrans)
+ F77_cgemv(F77_TA, &F77_N, &F77_M, ALPHA, A, &F77_lda, stx,
+ &F77_incX, BETA, Y, &F77_incY);
+ else
+ F77_cgemv(F77_TA, &F77_N, &F77_M, alpha, A, &F77_lda, x,
+ &F77_incX, beta, Y, &F77_incY);
+
+ if (TransA == CblasConjTrans)
+ {
+ if (x != (const float *)X) free(x);
+ if (N > 0)
+ {
+ do
+ {
+ *y = -(*y);
+ y += i;
+ }
+ while (y != st);
+ }
+ }
+ }
+ else cblas_xerbla(1, "cblas_cgemv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_cgerc.c b/contrib/libs/cblas/cblas_interface/cblas_cgerc.c
new file mode 100644
index 0000000000..29ccde63a8
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_cgerc.c
@@ -0,0 +1,84 @@
+/*
+ * cblas_cgerc.c
+ * The program is a C interface to cgerc.
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_cgerc(const enum CBLAS_ORDER order, const int M, const int N,
+ const void *alpha, const void *X, const int incX,
+ const void *Y, const int incY, void *A, const int lda)
+{
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incy
+ #define F77_lda lda
+#endif
+
+ int n, i, tincy, incy=incY;
+ float *y=(float *)Y, *yy=(float *)Y, *ty, *st;
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ F77_cgerc( &F77_M, &F77_N, alpha, X, &F77_incX, Y, &F77_incY, A,
+ &F77_lda);
+ } else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (N > 0)
+ {
+ n = N << 1;
+ y = malloc(n*sizeof(float));
+
+ ty = y;
+ if( incY > 0 ) {
+ i = incY << 1;
+ tincy = 2;
+ st= y+n;
+ } else {
+ i = incY *(-2);
+ tincy = -2;
+ st = y-2;
+ y +=(n-2);
+ }
+ do
+ {
+ *y = *yy;
+ y[1] = -yy[1];
+ y += tincy ;
+ yy += i;
+ }
+ while (y != st);
+ y = ty;
+
+ #ifdef F77_INT
+ F77_incY = 1;
+ #else
+ incy = 1;
+ #endif
+ }
+ else y = (float *) Y;
+
+ F77_cgeru( &F77_N, &F77_M, alpha, y, &F77_incY, X, &F77_incX, A,
+ &F77_lda);
+ if(Y!=y)
+ free(y);
+
+ } else cblas_xerbla(1, "cblas_cgerc", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_cgeru.c b/contrib/libs/cblas/cblas_interface/cblas_cgeru.c
new file mode 100644
index 0000000000..549eae3cf4
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_cgeru.c
@@ -0,0 +1,45 @@
+/*
+ * cblas_cgeru.c
+ * The program is a C interface to cgeru.
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_cgeru(const enum CBLAS_ORDER order, const int M, const int N,
+ const void *alpha, const void *X, const int incX,
+ const void *Y, const int incY, void *A, const int lda)
+{
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+ #define F77_lda lda
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+
+ if (order == CblasColMajor)
+ {
+ F77_cgeru( &F77_M, &F77_N, alpha, X, &F77_incX, Y, &F77_incY, A,
+ &F77_lda);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ F77_cgeru( &F77_N, &F77_M, alpha, Y, &F77_incY, X, &F77_incX, A,
+ &F77_lda);
+ }
+ else cblas_xerbla(1, "cblas_cgeru","Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_chbmv.c b/contrib/libs/cblas/cblas_interface/cblas_chbmv.c
new file mode 100644
index 0000000000..3f33e69c21
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_chbmv.c
@@ -0,0 +1,159 @@
+/*
+ * cblas_chbmv.c
+ * The program is a C interface to chbmv
+ *
+ * Keita Teranishi 5/18/98
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+#include <stdio.h>
+#include <stdlib.h>
+void cblas_chbmv(const enum CBLAS_ORDER order,
+ const enum CBLAS_UPLO Uplo,const int N,const int K,
+ const void *alpha, const void *A, const int lda,
+ const void *X, const int incX, const void *beta,
+ void *Y, const int incY)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_K=K, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_incX incx
+ #define F77_incY incY
+#endif
+ int n, i=0, incx=incX;
+ const float *xx= (float *)X, *alp= (float *)alpha, *bet = (float *)beta;
+ float ALPHA[2],BETA[2];
+ int tincY, tincx;
+ float *x=(float *)X, *y=(float *)Y, *st=0, *tx;
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasLower) UL = 'L';
+ else if (Uplo == CblasUpper) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_chbmv","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_chbmv(F77_UL, &F77_N, &F77_K, alpha, A, &F77_lda, X,
+ &F77_incX, beta, Y, &F77_incY);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ ALPHA[0]= *alp;
+ ALPHA[1]= -alp[1];
+ BETA[0]= *bet;
+ BETA[1]= -bet[1];
+
+ if (N > 0)
+ {
+ n = N << 1;
+ x = malloc(n*sizeof(float));
+
+ tx = x;
+ if( incX > 0 ) {
+ i = incX << 1 ;
+ tincx = 2;
+ st= x+n;
+ } else {
+ i = incX *(-2);
+ tincx = -2;
+ st = x-2;
+ x +=(n-2);
+ }
+
+ do
+ {
+ *x = *xx;
+ x[1] = -xx[1];
+ x += tincx ;
+ xx += i;
+ }
+ while (x != st);
+ x=tx;
+
+
+ #ifdef F77_INT
+ F77_incX = 1;
+ #else
+ incx = 1;
+ #endif
+
+ if(incY > 0)
+ tincY = incY;
+ else
+ tincY = -incY;
+ y++;
+
+ i = tincY << 1;
+ n = i * N ;
+ st = y + n;
+ do {
+ *y = -(*y);
+ y += i;
+ } while(y != st);
+ y -= n;
+ } else
+ x = (float *) X;
+
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_chbmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_chbmv(F77_UL, &F77_N, &F77_K, ALPHA,
+ A ,&F77_lda, x,&F77_incX, BETA, Y, &F77_incY);
+ }
+ else
+ {
+ cblas_xerbla(1, "cblas_chbmv","Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if ( order == CblasRowMajor )
+ {
+ RowMajorStrg = 1;
+ if(X!=x)
+ free(x);
+ if (N > 0)
+ {
+ do
+ {
+ *y = -(*y);
+ y += i;
+ }
+ while (y != st);
+ }
+ }
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_chemm.c b/contrib/libs/cblas/cblas_interface/cblas_chemm.c
new file mode 100644
index 0000000000..89b80f5dc3
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_chemm.c
@@ -0,0 +1,106 @@
+/*
+ *
+ * cblas_chemm.c
+ * This program is a C interface to chemm.
+ * Written by Keita Teranishi
+ * 4/8/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_chemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ const void *B, const int ldb, const void *beta,
+ void *C, const int ldc)
+{
+ char SD, UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_SD, F77_UL;
+#else
+ #define F77_SD &SD
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_ldb=ldb;
+ F77_INT F77_ldc=ldc;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_ldb ldb
+ #define F77_ldc ldc
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+ if( Side == CblasRight) SD='R';
+ else if ( Side == CblasLeft ) SD='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_chemm", "Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Uplo == CblasUpper) UL='U';
+ else if ( Uplo == CblasLower ) UL='L';
+ else
+ {
+ cblas_xerbla(3, "cblas_chemm", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_SD = C2F_CHAR(&SD);
+ #endif
+
+ F77_chemm(F77_SD, F77_UL, &F77_M, &F77_N, alpha, A, &F77_lda,
+ B, &F77_ldb, beta, C, &F77_ldc);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if( Side == CblasRight) SD='L';
+ else if ( Side == CblasLeft ) SD='R';
+ else
+ {
+ cblas_xerbla(2, "cblas_chemm", "Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Uplo == CblasUpper) UL='L';
+ else if ( Uplo == CblasLower ) UL='U';
+ else
+ {
+ cblas_xerbla(3, "cblas_chemm", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_SD = C2F_CHAR(&SD);
+ #endif
+
+ F77_chemm(F77_SD, F77_UL, &F77_N, &F77_M, alpha, A,
+ &F77_lda, B, &F77_ldb, beta, C, &F77_ldc);
+ }
+ else cblas_xerbla(1, "cblas_chemm", "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_chemv.c b/contrib/libs/cblas/cblas_interface/cblas_chemv.c
new file mode 100644
index 0000000000..f36a00d78e
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_chemv.c
@@ -0,0 +1,160 @@
+/*
+ * cblas_chemv.c
+ * The program is a C interface to chemv
+ *
+ * Keita Teranishi 5/18/98
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_chemv(const enum CBLAS_ORDER order,
+ const enum CBLAS_UPLO Uplo, const int N,
+ const void *alpha, const void *A, const int lda,
+ const void *X, const int incX, const void *beta,
+ void *Y, const int incY)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_incX incx
+ #define F77_incY incY
+#endif
+ int n=0, i=0, incx=incX;
+ const float *xx= (float *)X, *alp= (float *)alpha, *bet = (float *)beta;
+ float ALPHA[2],BETA[2];
+ int tincY, tincx;
+ float *x=(float *)X, *y=(float *)Y, *st=0, *tx;
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_chemv","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_chemv(F77_UL, &F77_N, alpha, A, &F77_lda, X, &F77_incX,
+ beta, Y, &F77_incY);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ ALPHA[0]= *alp;
+ ALPHA[1]= -alp[1];
+ BETA[0]= *bet;
+ BETA[1]= -bet[1];
+
+ if (N > 0)
+ {
+ n = N << 1;
+ x = malloc(n*sizeof(float));
+
+ tx = x;
+ if( incX > 0 ) {
+ i = incX << 1 ;
+ tincx = 2;
+ st= x+n;
+ } else {
+ i = incX *(-2);
+ tincx = -2;
+ st = x-2;
+ x +=(n-2);
+ }
+
+ do
+ {
+ *x = *xx;
+ x[1] = -xx[1];
+ x += tincx ;
+ xx += i;
+ }
+ while (x != st);
+ x=tx;
+
+
+ #ifdef F77_INT
+ F77_incX = 1;
+ #else
+ incx = 1;
+ #endif
+
+ if(incY > 0)
+ tincY = incY;
+ else
+ tincY = -incY;
+ y++;
+
+ i = tincY << 1;
+ n = i * N ;
+ st = y + n;
+ do {
+ *y = -(*y);
+ y += i;
+ } while(y != st);
+ y -= n;
+ } else
+ x = (float *) X;
+
+
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_chemv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_chemv(F77_UL, &F77_N, ALPHA, A, &F77_lda, x, &F77_incX,
+ BETA, Y, &F77_incY);
+ }
+ else
+ {
+ cblas_xerbla(1, "cblas_chemv","Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if ( order == CblasRowMajor )
+ {
+ RowMajorStrg = 1;
+ if ( X != x )
+ free(x);
+ if (N > 0)
+ {
+ do
+ {
+ *y = -(*y);
+ y += i;
+ }
+ while (y != st);
+ }
+ }
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_cher.c b/contrib/libs/cblas/cblas_interface/cblas_cher.c
new file mode 100644
index 0000000000..3332868ad7
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_cher.c
@@ -0,0 +1,116 @@
+/*
+ * cblas_cher.c
+ * The program is a C interface to cher.
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_cher(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const float alpha, const void *X, const int incX
+ ,void *A, const int lda)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_lda=lda, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_incX incx
+#endif
+ int n, i, tincx, incx=incX;
+ float *x=(float *)X, *xx=(float *)X, *tx, *st;
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasLower) UL = 'L';
+ else if (Uplo == CblasUpper) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_cher","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+
+ F77_cher(F77_UL, &F77_N, &alpha, X, &F77_incX, A, &F77_lda);
+
+ } else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_cher","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ if (N > 0)
+ {
+ n = N << 1;
+ x = malloc(n*sizeof(float));
+ tx = x;
+ if( incX > 0 ) {
+ i = incX << 1 ;
+ tincx = 2;
+ st= x+n;
+ } else {
+ i = incX *(-2);
+ tincx = -2;
+ st = x-2;
+ x +=(n-2);
+ }
+ do
+ {
+ *x = *xx;
+ x[1] = -xx[1];
+ x += tincx ;
+ xx += i;
+ }
+ while (x != st);
+ x=tx;
+
+ #ifdef F77_INT
+ F77_incX = 1;
+ #else
+ incx = 1;
+ #endif
+ }
+ else x = (float *) X;
+ F77_cher(F77_UL, &F77_N, &alpha, x, &F77_incX, A, &F77_lda);
+ } else
+ {
+ cblas_xerbla(1, "cblas_cher","Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if(X!=x)
+ free(x);
+
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_cher2.c b/contrib/libs/cblas/cblas_interface/cblas_cher2.c
new file mode 100644
index 0000000000..1bcdd3a6dd
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_cher2.c
@@ -0,0 +1,152 @@
+/*
+ * cblas_cher2.c
+ * The program is a C interface to cher2.
+ *
+ * Keita Teranishi 3/23/98
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_cher2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const void *alpha, const void *X, const int incX,
+ const void *Y, const int incY, void *A, const int lda)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_incX incx
+ #define F77_incY incy
+#endif
+ int n, i, j, tincx, tincy, incx=incX, incy=incY;
+ float *x=(float *)X, *xx=(float *)X, *y=(float *)Y,
+ *yy=(float *)Y, *tx, *ty, *stx, *sty;
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasLower) UL = 'L';
+ else if (Uplo == CblasUpper) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_cher2","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+
+ F77_cher2(F77_UL, &F77_N, alpha, X, &F77_incX,
+ Y, &F77_incY, A, &F77_lda);
+
+ } else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_cher2","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ if (N > 0)
+ {
+ n = N << 1;
+ x = malloc(n*sizeof(float));
+ y = malloc(n*sizeof(float));
+ tx = x;
+ ty = y;
+ if( incX > 0 ) {
+ i = incX << 1 ;
+ tincx = 2;
+ stx= x+n;
+ } else {
+ i = incX *(-2);
+ tincx = -2;
+ stx = x-2;
+ x +=(n-2);
+ }
+
+ if( incY > 0 ) {
+ j = incY << 1;
+ tincy = 2;
+ sty= y+n;
+ } else {
+ j = incY *(-2);
+ tincy = -2;
+ sty = y-2;
+ y +=(n-2);
+ }
+
+ do
+ {
+ *x = *xx;
+ x[1] = -xx[1];
+ x += tincx ;
+ xx += i;
+ }
+ while (x != stx);
+
+ do
+ {
+ *y = *yy;
+ y[1] = -yy[1];
+ y += tincy ;
+ yy += j;
+ }
+ while (y != sty);
+
+ x=tx;
+ y=ty;
+
+ #ifdef F77_INT
+ F77_incX = 1;
+ F77_incY = 1;
+ #else
+ incx = 1;
+ incy = 1;
+ #endif
+ } else
+ {
+ x = (float *) X;
+ y = (float *) Y;
+ }
+ F77_cher2(F77_UL, &F77_N, alpha, y, &F77_incY, x,
+ &F77_incX, A, &F77_lda);
+ } else
+ {
+ cblas_xerbla(1, "cblas_cher2","Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if(X!=x)
+ free(x);
+ if(Y!=y)
+ free(y);
+
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_cher2k.c b/contrib/libs/cblas/cblas_interface/cblas_cher2k.c
new file mode 100644
index 0000000000..b4082ef235
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_cher2k.c
@@ -0,0 +1,111 @@
+/*
+ *
+ * cblas_cher2k.c
+ * This program is a C interface to cher2k.
+ * Written by Keita Teranishi
+ * 4/8/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_cher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const void *alpha, const void *A, const int lda,
+ const void *B, const int ldb, const float beta,
+ void *C, const int ldc)
+{
+ char UL, TR;
+#ifdef F77_CHAR
+ F77_CHAR F77_TR, F77_UL;
+#else
+ #define F77_TR &TR
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_K=K, F77_lda=lda, F77_ldb=ldb;
+ F77_INT F77_ldc=ldc;
+#else
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_ldb ldb
+ #define F77_ldc ldc
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ float ALPHA[2];
+ const float *alp=(float *)alpha;
+
+ CBLAS_CallFromC = 1;
+ RowMajorStrg = 0;
+
+ if( Order == CblasColMajor )
+ {
+
+ if( Uplo == CblasUpper) UL='U';
+ else if ( Uplo == CblasLower ) UL='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_cher2k", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Trans == CblasTrans) TR ='T';
+ else if ( Trans == CblasConjTrans ) TR='C';
+ else if ( Trans == CblasNoTrans ) TR='N';
+ else
+ {
+ cblas_xerbla(3, "cblas_cher2k", "Illegal Trans setting, %d\n", Trans);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TR = C2F_CHAR(&TR);
+ #endif
+
+ F77_cher2k(F77_UL, F77_TR, &F77_N, &F77_K, alpha, A, &F77_lda, B, &F77_ldb, &beta, C, &F77_ldc);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+
+ if( Uplo == CblasUpper) UL='L';
+ else if ( Uplo == CblasLower ) UL='U';
+ else
+ {
+ cblas_xerbla(2, "cblas_cher2k", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if( Trans == CblasTrans) TR ='N';
+ else if ( Trans == CblasConjTrans ) TR='N';
+ else if ( Trans == CblasNoTrans ) TR='C';
+ else
+ {
+ cblas_xerbla(3, "cblas_cher2k", "Illegal Trans setting, %d\n", Trans);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TR = C2F_CHAR(&TR);
+ #endif
+
+ ALPHA[0]= *alp;
+ ALPHA[1]= -alp[1];
+ F77_cher2k(F77_UL,F77_TR, &F77_N, &F77_K, ALPHA, A, &F77_lda, B, &F77_ldb, &beta, C, &F77_ldc);
+ }
+ else cblas_xerbla(1, "cblas_cher2k", "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_cherk.c b/contrib/libs/cblas/cblas_interface/cblas_cherk.c
new file mode 100644
index 0000000000..fd0e09b43b
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_cherk.c
@@ -0,0 +1,105 @@
+/*
+ *
+ * cblas_cherk.c
+ * This program is a C interface to cherk.
+ * Written by Keita Teranishi
+ * 4/8/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_cherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const float alpha, const void *A, const int lda,
+ const float beta, void *C, const int ldc)
+{
+ char UL, TR;
+#ifdef F77_CHAR
+ F77_CHAR F77_TR, F77_UL;
+#else
+ #define F77_TR &TR
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_K=K, F77_lda=lda;
+ F77_INT F77_ldc=ldc;
+#else
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_ldc ldc
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+ if( Uplo == CblasUpper) UL='U';
+ else if ( Uplo == CblasLower ) UL='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_cherk", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Trans == CblasTrans) TR ='T';
+ else if ( Trans == CblasConjTrans ) TR='C';
+ else if ( Trans == CblasNoTrans ) TR='N';
+ else
+ {
+ cblas_xerbla(3, "cblas_cherk", "Illegal Trans setting, %d\n", Trans);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TR = C2F_CHAR(&TR);
+ #endif
+
+ F77_cherk(F77_UL, F77_TR, &F77_N, &F77_K, &alpha, A, &F77_lda,
+ &beta, C, &F77_ldc);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if( Uplo == CblasUpper) UL='L';
+ else if ( Uplo == CblasLower ) UL='U';
+ else
+ {
+ cblas_xerbla(3, "cblas_cherk", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if( Trans == CblasTrans) TR ='N';
+ else if ( Trans == CblasConjTrans ) TR='N';
+ else if ( Trans == CblasNoTrans ) TR='C';
+ else
+ {
+ cblas_xerbla(3, "cblas_cherk", "Illegal Trans setting, %d\n", Trans);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_SD = C2F_CHAR(&SD);
+ #endif
+
+ F77_cherk(F77_UL, F77_TR, &F77_N, &F77_K, &alpha, A, &F77_lda,
+ &beta, C, &F77_ldc);
+ }
+ else cblas_xerbla(1, "cblas_cherk", "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_chpmv.c b/contrib/libs/cblas/cblas_interface/cblas_chpmv.c
new file mode 100644
index 0000000000..c805756ebd
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_chpmv.c
@@ -0,0 +1,160 @@
+/*
+ * cblas_chpmv.c
+ * The program is a C interface of chpmv
+ *
+ * Keita Teranishi 5/18/98
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_chpmv(const enum CBLAS_ORDER order,
+ const enum CBLAS_UPLO Uplo,const int N,
+ const void *alpha, const void *AP,
+ const void *X, const int incX, const void *beta,
+ void *Y, const int incY)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_K=K, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incx
+ #define F77_incY incY
+#endif
+ int n, i=0, incx=incX;
+ const float *xx= (float *)X, *alp= (float *)alpha, *bet = (float *)beta;
+ float ALPHA[2],BETA[2];
+ int tincY, tincx;
+ float *x=(float *)X, *y=(float *)Y, *st=0, *tx;
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasLower) UL = 'L';
+ else if (Uplo == CblasUpper) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_chpmv","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_chpmv(F77_UL, &F77_N, alpha, AP, X,
+ &F77_incX, beta, Y, &F77_incY);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ ALPHA[0]= *alp;
+ ALPHA[1]= -alp[1];
+ BETA[0]= *bet;
+ BETA[1]= -bet[1];
+
+ if (N > 0)
+ {
+ n = N << 1;
+ x = malloc(n*sizeof(float));
+
+ tx = x;
+ if( incX > 0 ) {
+ i = incX << 1;
+ tincx = 2;
+ st= x+n;
+ } else {
+ i = incX *(-2);
+ tincx = -2;
+ st = x-2;
+ x +=(n-2);
+ }
+
+ do
+ {
+ *x = *xx;
+ x[1] = -xx[1];
+ x += tincx ;
+ xx += i;
+ }
+ while (x != st);
+ x=tx;
+
+
+ #ifdef F77_INT
+ F77_incX = 1;
+ #else
+ incx = 1;
+ #endif
+
+ if(incY > 0)
+ tincY = incY;
+ else
+ tincY = -incY;
+ y++;
+
+ i = tincY << 1;
+ n = i * N ;
+ st = y + n;
+ do {
+ *y = -(*y);
+ y += i;
+ } while(y != st);
+ y -= n;
+ } else
+ x = (float *) X;
+
+
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_chpmv","Illegal Uplo setting, %d\n", Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+
+ F77_chpmv(F77_UL, &F77_N, ALPHA,
+ AP, x, &F77_incX, BETA, Y, &F77_incY);
+ }
+ else
+ {
+ cblas_xerbla(1, "cblas_chpmv","Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if ( order == CblasRowMajor )
+ {
+ RowMajorStrg = 1;
+ if(X!=x)
+ free(x);
+ if (N > 0)
+ {
+ do
+ {
+ *y = -(*y);
+ y += i;
+ }
+ while (y != st);
+ }
+ }
+
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_chpr.c b/contrib/libs/cblas/cblas_interface/cblas_chpr.c
new file mode 100644
index 0000000000..9b39f38bdf
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_chpr.c
@@ -0,0 +1,115 @@
+/*
+ * cblas_chpr.c
+ * The program is a C interface to chpr.
+ *
+ * Keita Teranishi 3/23/98
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_chpr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const float alpha, const void *X,
+ const int incX, void *A)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incx
+#endif
+ int n, i, tincx, incx=incX;
+ float *x=(float *)X, *xx=(float *)X, *tx, *st;
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasLower) UL = 'L';
+ else if (Uplo == CblasUpper) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_chpr","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+
+ F77_chpr(F77_UL, &F77_N, &alpha, X, &F77_incX, A);
+
+ } else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_chpr","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ if (N > 0)
+ {
+ n = N << 1;
+ x = malloc(n*sizeof(float));
+ tx = x;
+ if( incX > 0 ) {
+ i = incX << 1;
+ tincx = 2;
+ st= x+n;
+ } else {
+ i = incX *(-2);
+ tincx = -2;
+ st = x-2;
+ x +=(n-2);
+ }
+ do
+ {
+ *x = *xx;
+ x[1] = -xx[1];
+ x += tincx ;
+ xx += i;
+ }
+ while (x != st);
+ x=tx;
+ #ifdef F77_INT
+ F77_incX = 1;
+ #else
+ incx = 1;
+ #endif
+ }
+ else x = (float *) X;
+
+ F77_chpr(F77_UL, &F77_N, &alpha, x, &F77_incX, A);
+
+ } else
+ {
+ cblas_xerbla(1, "cblas_chpr","Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if(X!=x)
+ free(x);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_chpr2.c b/contrib/libs/cblas/cblas_interface/cblas_chpr2.c
new file mode 100644
index 0000000000..e43077db6a
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_chpr2.c
@@ -0,0 +1,149 @@
+/*
+ * cblas_chpr2.c
+ * The program is a C interface to chpr2.
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_chpr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N,const void *alpha, const void *X,
+ const int incX,const void *Y, const int incY, void *Ap)
+
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incx
+ #define F77_incY incy
+#endif
+ int n, i, j, tincx, tincy, incx=incX, incy=incY;
+ float *x=(float *)X, *xx=(float *)X, *y=(float *)Y,
+ *yy=(float *)Y, *tx, *ty, *stx, *sty;
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasLower) UL = 'L';
+ else if (Uplo == CblasUpper) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_chpr2","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+
+ F77_chpr2(F77_UL, &F77_N, alpha, X, &F77_incX, Y, &F77_incY, Ap);
+
+ } else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_chpr2","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ if (N > 0)
+ {
+ n = N << 1;
+ x = malloc(n*sizeof(float));
+ y = malloc(n*sizeof(float));
+ tx = x;
+ ty = y;
+ if( incX > 0 ) {
+ i = incX << 1 ;
+ tincx = 2;
+ stx= x+n;
+ } else {
+ i = incX *(-2);
+ tincx = -2;
+ stx = x-2;
+ x +=(n-2);
+ }
+
+ if( incY > 0 ) {
+ j = incY << 1;
+ tincy = 2;
+ sty= y+n;
+ } else {
+ j = incY *(-2);
+ tincy = -2;
+ sty = y-2;
+ y +=(n-2);
+ }
+
+ do
+ {
+ *x = *xx;
+ x[1] = -xx[1];
+ x += tincx ;
+ xx += i;
+ }
+ while (x != stx);
+ do
+ {
+ *y = *yy;
+ y[1] = -yy[1];
+ y += tincy ;
+ yy += j;
+ }
+ while (y != sty);
+
+ x=tx;
+ y=ty;
+
+ #ifdef F77_INT
+ F77_incX = 1;
+ F77_incY = 1;
+ #else
+ incx = 1;
+ incy = 1;
+ #endif
+
+ } else
+ {
+ x = (float *) X;
+ y = (void *) Y;
+ }
+ F77_chpr2(F77_UL, &F77_N, alpha, y, &F77_incY, x, &F77_incX, Ap);
+ } else
+ {
+ cblas_xerbla(1, "cblas_chpr2","Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if(X!=x)
+ free(x);
+ if(Y!=y)
+ free(y);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_cscal.c b/contrib/libs/cblas/cblas_interface/cblas_cscal.c
new file mode 100644
index 0000000000..a23e6ee577
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_cscal.c
@@ -0,0 +1,21 @@
+/*
+ * cblas_cscal.c
+ *
+ * The program is a C interface to cscal.f.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_cscal( const int N, const void *alpha, void *X,
+ const int incX)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ F77_cscal( &F77_N, alpha, X, &F77_incX);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_csscal.c b/contrib/libs/cblas/cblas_interface/cblas_csscal.c
new file mode 100644
index 0000000000..39983fe071
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_csscal.c
@@ -0,0 +1,21 @@
+/*
+ * cblas_csscal.c
+ *
+ * The program is a C interface to csscal.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_csscal( const int N, const float alpha, void *X,
+ const int incX)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ F77_csscal( &F77_N, &alpha, X, &F77_incX);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_cswap.c b/contrib/libs/cblas/cblas_interface/cblas_cswap.c
new file mode 100644
index 0000000000..1272820727
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_cswap.c
@@ -0,0 +1,22 @@
+/*
+ * cblas_cswap.c
+ *
+ * The program is a C interface to cswap.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_cswap( const int N, void *X, const int incX, void *Y,
+ const int incY)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ F77_cswap( &F77_N, X, &F77_incX, Y, &F77_incY);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_csymm.c b/contrib/libs/cblas/cblas_interface/cblas_csymm.c
new file mode 100644
index 0000000000..4db34e346d
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_csymm.c
@@ -0,0 +1,106 @@
+/*
+ *
+ * cblas_csymm.c
+ * This program is a C interface to csymm.
+ * Written by Keita Teranishi
+ * 4/8/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_csymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ const void *B, const int ldb, const void *beta,
+ void *C, const int ldc)
+{
+ char SD, UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_SD, F77_UL;
+#else
+ #define F77_SD &SD
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_ldb=ldb;
+ F77_INT F77_ldc=ldc;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_ldb ldb
+ #define F77_ldc ldc
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+ if( Side == CblasRight) SD='R';
+ else if ( Side == CblasLeft ) SD='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_csymm", "Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Uplo == CblasUpper) UL='U';
+ else if ( Uplo == CblasLower ) UL='L';
+ else
+ {
+ cblas_xerbla(3, "cblas_csymm", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_SD = C2F_CHAR(&SD);
+ #endif
+
+ F77_csymm(F77_SD, F77_UL, &F77_M, &F77_N, alpha, A, &F77_lda,
+ B, &F77_ldb, beta, C, &F77_ldc);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if( Side == CblasRight) SD='L';
+ else if ( Side == CblasLeft ) SD='R';
+ else
+ {
+ cblas_xerbla(2, "cblas_csymm", "Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Uplo == CblasUpper) UL='L';
+ else if ( Uplo == CblasLower ) UL='U';
+ else
+ {
+ cblas_xerbla(3, "cblas_csymm", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_SD = C2F_CHAR(&SD);
+ #endif
+
+ F77_csymm(F77_SD, F77_UL, &F77_N, &F77_M, alpha, A, &F77_lda,
+ B, &F77_ldb, beta, C, &F77_ldc);
+ }
+ else cblas_xerbla(1, "cblas_csymm", "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_csyr2k.c b/contrib/libs/cblas/cblas_interface/cblas_csyr2k.c
new file mode 100644
index 0000000000..5ca3f34cda
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_csyr2k.c
@@ -0,0 +1,108 @@
+/*
+ *
+ * cblas_csyr2k.c
+ * This program is a C interface to csyr2k.
+ * Written by Keita Teranishi
+ * 4/8/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_csyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const void *alpha, const void *A, const int lda,
+ const void *B, const int ldb, const void *beta,
+ void *C, const int ldc)
+{
+ char UL, TR;
+#ifdef F77_CHAR
+ F77_CHAR F77_TR, F77_UL;
+#else
+ #define F77_TR &TR
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_K=K, F77_lda=lda, F77_ldb=ldb;
+ F77_INT F77_ldc=ldc;
+#else
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_ldb ldb
+ #define F77_ldc ldc
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+
+ if( Uplo == CblasUpper) UL='U';
+ else if ( Uplo == CblasLower ) UL='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_csyr2k", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Trans == CblasTrans) TR ='T';
+ else if ( Trans == CblasConjTrans ) TR='C';
+ else if ( Trans == CblasNoTrans ) TR='N';
+ else
+ {
+ cblas_xerbla(3, "cblas_csyr2k", "Illegal Trans setting, %d\n", Trans);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TR = C2F_CHAR(&TR);
+ #endif
+
+ F77_csyr2k(F77_UL, F77_TR, &F77_N, &F77_K, alpha, A, &F77_lda,
+ B, &F77_ldb, beta, C, &F77_ldc);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if( Uplo == CblasUpper) UL='L';
+ else if ( Uplo == CblasLower ) UL='U';
+ else
+ {
+ cblas_xerbla(3, "cblas_csyr2k", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if( Trans == CblasTrans) TR ='N';
+ else if ( Trans == CblasConjTrans ) TR='N';
+ else if ( Trans == CblasNoTrans ) TR='T';
+ else
+ {
+ cblas_xerbla(3, "cblas_csyr2k", "Illegal Trans setting, %d\n", Trans);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TR = C2F_CHAR(&TR);
+ #endif
+
+ F77_csyr2k(F77_UL, F77_TR, &F77_N, &F77_K, alpha, A, &F77_lda, B, &F77_ldb, beta, C, &F77_ldc);
+ }
+ else cblas_xerbla(1, "cblas_csyr2k", "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_csyrk.c b/contrib/libs/cblas/cblas_interface/cblas_csyrk.c
new file mode 100644
index 0000000000..3f0bb07eac
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_csyrk.c
@@ -0,0 +1,108 @@
+/*
+ *
+ * cblas_csyrk.c
+ * This program is a C interface to csyrk.
+ * Written by Keita Teranishi
+ * 4/8/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_csyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const void *alpha, const void *A, const int lda,
+ const void *beta, void *C, const int ldc)
+{
+ char UL, TR;
+#ifdef F77_CHAR
+ F77_CHAR F77_TR, F77_UL;
+#else
+ #define F77_TR &TR
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_K=K, F77_lda=lda;
+ F77_INT F77_ldc=ldc;
+#else
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_ldc ldc
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+
+ if( Uplo == CblasUpper) UL='U';
+ else if ( Uplo == CblasLower ) UL='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_csyrk", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Trans == CblasTrans) TR ='T';
+ else if ( Trans == CblasConjTrans ) TR='C';
+ else if ( Trans == CblasNoTrans ) TR='N';
+ else
+ {
+ cblas_xerbla(3, "cblas_csyrk", "Illegal Trans setting, %d\n", Trans);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TR = C2F_CHAR(&TR);
+ #endif
+
+ F77_csyrk(F77_UL, F77_TR, &F77_N, &F77_K, alpha, A, &F77_lda,
+ beta, C, &F77_ldc);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if( Uplo == CblasUpper) UL='L';
+ else if ( Uplo == CblasLower ) UL='U';
+ else
+ {
+ cblas_xerbla(3, "cblas_csyrk", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if( Trans == CblasTrans) TR ='N';
+ else if ( Trans == CblasConjTrans ) TR='N';
+ else if ( Trans == CblasNoTrans ) TR='T';
+ else
+ {
+ cblas_xerbla(3, "cblas_csyrk", "Illegal Trans setting, %d\n", Trans);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TR = C2F_CHAR(&TR);
+ #endif
+
+ F77_csyrk(F77_UL, F77_TR, &F77_N, &F77_K, alpha, A, &F77_lda,
+ beta, C, &F77_ldc);
+ }
+ else cblas_xerbla(1, "cblas_csyrk", "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
+
diff --git a/contrib/libs/cblas/cblas_interface/cblas_ctbmv.c b/contrib/libs/cblas/cblas_interface/cblas_ctbmv.c
new file mode 100644
index 0000000000..7845cc8284
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_ctbmv.c
@@ -0,0 +1,158 @@
+/*
+ * cblas_ctbmv.c
+ * The program is a C interface to ctbmv.
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_ctbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const int K, const void *A, const int lda,
+ void *X, const int incX)
+{
+ char TA;
+ char UL;
+ char DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_DI &DI
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_lda=lda, F77_K=K, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_incX incX
+#endif
+ int n, i=0, tincX;
+ float *st=0, *x=(float *)X;
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_ctbmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(3, "cblas_ctbmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ctbmv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_ctbmv( F77_UL, F77_TA, F77_DI, &F77_N, &F77_K, A, &F77_lda, X,
+ &F77_incX);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_ctbmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans)
+ {
+ TA = 'N';
+ if ( N > 0)
+ {
+ if(incX > 0)
+ tincX = incX;
+ else
+ tincX = -incX;
+ i = tincX << 1;
+ n = i * N;
+ x++;
+ st = x + n;
+ do
+ {
+ *x = -(*x);
+ x+= i;
+ }
+ while (x != st);
+ x -= n;
+ }
+ }
+ else
+ {
+ cblas_xerbla(3, "cblas_ctbmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ctbmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_ctbmv( F77_UL, F77_TA, F77_DI, &F77_N, &F77_K, A, &F77_lda, X,
+ &F77_incX);
+
+ if (TransA == CblasConjTrans)
+ {
+ if (N > 0)
+ {
+ do
+ {
+ *x = -(*x);
+ x += i;
+ }
+ while (x != st);
+ }
+ }
+ }
+ else cblas_xerbla(1, "cblas_ctbmv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_ctbsv.c b/contrib/libs/cblas/cblas_interface/cblas_ctbsv.c
new file mode 100644
index 0000000000..ab4646b546
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_ctbsv.c
@@ -0,0 +1,162 @@
+/*
+ * cblas_ctbsv.c
+ * The program is a C interface to ctbsv.
+ *
+ * Keita Teranishi 3/23/98
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_ctbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const int K, const void *A, const int lda,
+ void *X, const int incX)
+{
+ char TA;
+ char UL;
+ char DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_DI &DI
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_lda=lda, F77_K=K, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_incX incX
+#endif
+ int n, i=0, tincX;
+ float *st=0,*x=(float *)X;
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_ctbsv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(3, "cblas_ctbsv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ctbsv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_ctbsv( F77_UL, F77_TA, F77_DI, &F77_N, &F77_K, A, &F77_lda, X,
+ &F77_incX);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_ctbsv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans)
+ {
+ TA = 'N';
+ if ( N > 0)
+ {
+ if ( incX > 0 )
+ tincX = incX;
+ else
+ tincX = -incX;
+
+ n = N*2*(tincX);
+
+ x++;
+
+ st=x+n;
+
+ i = tincX << 1;
+ do
+ {
+ *x = -(*x);
+ x+=i;
+ }
+ while (x != st);
+ x -= n;
+ }
+ }
+ else
+ {
+ cblas_xerbla(3, "cblas_ctbsv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ctbsv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_ctbsv( F77_UL, F77_TA, F77_DI, &F77_N, &F77_K, A, &F77_lda, X,
+ &F77_incX);
+
+ if (TransA == CblasConjTrans)
+ {
+ if (N > 0)
+ {
+ do
+ {
+ *x = -(*x);
+ x+= i;
+ }
+ while (x != st);
+ }
+ }
+ }
+ else cblas_xerbla(1, "cblas_ctbsv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_ctpmv.c b/contrib/libs/cblas/cblas_interface/cblas_ctpmv.c
new file mode 100644
index 0000000000..7a4d63af22
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_ctpmv.c
@@ -0,0 +1,152 @@
+/*
+ * cblas_ctpmv.c
+ * The program is a C interface to ctpmv.
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_ctpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const void *Ap, void *X, const int incX)
+{
+ char TA;
+ char UL;
+ char DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_DI &DI
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ int n, i=0, tincX;
+ float *st=0,*x=(float *)X;
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_ctpmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(3, "cblas_ctpmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ctpmv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_ctpmv( F77_UL, F77_TA, F77_DI, &F77_N, Ap, X, &F77_incX);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_ctpmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans)
+ {
+ TA = 'N';
+ if ( N > 0)
+ {
+ if(incX > 0)
+ tincX = incX;
+ else
+ tincX = -incX;
+ i = tincX << 1;
+ n = i * N;
+ x++;
+ st = x + n;
+ do
+ {
+ *x = -(*x);
+ x += i;
+ }
+ while (x != st);
+ x -= n;
+ }
+ }
+ else
+ {
+ cblas_xerbla(3, "cblas_ctpmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ctpmv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_ctpmv( F77_UL, F77_TA, F77_DI, &F77_N, Ap, X,&F77_incX);
+ if (TransA == CblasConjTrans)
+ {
+ if (N > 0)
+ {
+ do
+ {
+ *x = -(*x);
+ x += i;
+ }
+ while (x != st);
+ }
+ }
+ }
+ else cblas_xerbla(1, "cblas_ctpmv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_ctpsv.c b/contrib/libs/cblas/cblas_interface/cblas_ctpsv.c
new file mode 100644
index 0000000000..d39687cbf9
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_ctpsv.c
@@ -0,0 +1,157 @@
+/*
+ * cblas_ctpsv.c
+ * The program is a C interface to ctpsv.
+ *
+ * Keita Teranishi 3/23/98
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_ctpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const void *Ap, void *X, const int incX)
+{
+ char TA;
+ char UL;
+ char DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_DI &DI
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ int n, i=0, tincX;
+ float *st=0, *x=(float*)X;
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_ctpsv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(3, "cblas_ctpsv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ctpsv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_ctpsv( F77_UL, F77_TA, F77_DI, &F77_N, Ap, X, &F77_incX);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_ctpsv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans)
+ {
+ TA = 'N';
+ if ( N > 0)
+ {
+ if ( incX > 0 )
+ tincX = incX;
+ else
+ tincX = -incX;
+
+ n = N*2*(tincX);
+
+ x++;
+
+ st=x+n;
+
+ i = tincX << 1;
+ do
+ {
+ *x = -(*x);
+ x+=i;
+ }
+ while (x != st);
+ x -= n;
+ }
+ }
+ else
+ {
+ cblas_xerbla(3, "cblas_ctpsv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ctpsv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_ctpsv( F77_UL, F77_TA, F77_DI, &F77_N, Ap, X,&F77_incX);
+
+ if (TransA == CblasConjTrans)
+ {
+ if (N > 0)
+ {
+ do
+ {
+ *x = -(*x);
+ x += i;
+ }
+ while (x != st);
+ }
+ }
+ }
+ else cblas_xerbla(1, "cblas_ctpsv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_ctrmm.c b/contrib/libs/cblas/cblas_interface/cblas_ctrmm.c
new file mode 100644
index 0000000000..d70bfd308a
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_ctrmm.c
@@ -0,0 +1,144 @@
+/*
+ *
+ * cblas_ctrmm.c
+ * This program is a C interface to ctrmm.
+ * Written by Keita Teranishi
+ * 4/8/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_ctrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ void *B, const int ldb)
+{
+ char UL, TA, SD, DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_SD, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_SD &SD
+ #define F77_DI &DI
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_ldb=ldb;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_ldb ldb
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+ if( Side == CblasRight ) SD='R';
+ else if ( Side == CblasLeft ) SD='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_ctrmm", "Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if( Uplo == CblasUpper ) UL='U';
+ else if ( Uplo == CblasLower ) UL='L';
+ else
+ {
+ cblas_xerbla(3, "cblas_ctrmm", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( TransA == CblasTrans ) TA ='T';
+ else if ( TransA == CblasConjTrans ) TA='C';
+ else if ( TransA == CblasNoTrans ) TA='N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ctrmm", "Illegal Trans setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Diag == CblasUnit ) DI='U';
+ else if ( Diag == CblasNonUnit ) DI='N';
+ else cblas_xerbla(5, "cblas_ctrmm",
+ "Illegal Diag setting, %d\n", Diag);
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_SD = C2F_CHAR(&SD);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_ctrmm(F77_SD, F77_UL, F77_TA, F77_DI, &F77_M, &F77_N, alpha, A, &F77_lda, B, &F77_ldb);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if( Side == CblasRight ) SD='L';
+ else if ( Side == CblasLeft ) SD='R';
+ else
+ {
+ cblas_xerbla(2, "cblas_ctrmm", "Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Uplo == CblasUpper ) UL='L';
+ else if ( Uplo == CblasLower ) UL='U';
+ else
+ {
+ cblas_xerbla(3, "cblas_ctrmm", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( TransA == CblasTrans ) TA ='T';
+ else if ( TransA == CblasConjTrans ) TA='C';
+ else if ( TransA == CblasNoTrans ) TA='N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ctrmm", "Illegal Trans setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Diag == CblasUnit ) DI='U';
+ else if ( Diag == CblasNonUnit ) DI='N';
+ else
+ {
+ cblas_xerbla(5, "cblas_ctrmm", "Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_SD = C2F_CHAR(&SD);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_ctrmm(F77_SD, F77_UL, F77_TA, F77_DI, &F77_N, &F77_M, alpha, A, &F77_lda, B, &F77_ldb);
+ }
+ else cblas_xerbla(1, "cblas_ctrmm", "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_ctrmv.c b/contrib/libs/cblas/cblas_interface/cblas_ctrmv.c
new file mode 100644
index 0000000000..3d284388ce
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_ctrmv.c
@@ -0,0 +1,155 @@
+/*
+ * cblas_ctrmv.c
+ * The program is a C interface to ctrmv.
+ *
+ * Keita Teranishi 3/23/98
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_ctrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const void *A, const int lda,
+ void *X, const int incX)
+
+{
+ char TA;
+ char UL;
+ char DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_DI &DI
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_lda=lda, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_incX incX
+#endif
+ int n, i=0, tincX;
+ float *st=0,*x=(float *)X;
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_ctrmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(3, "cblas_ctrmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ctrmv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_ctrmv( F77_UL, F77_TA, F77_DI, &F77_N, A, &F77_lda, X,
+ &F77_incX);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_ctrmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans)
+ {
+ TA = 'N';
+ if ( N > 0)
+ {
+ if(incX > 0)
+ tincX = incX;
+ else
+ tincX = -incX;
+ i = tincX << 1;
+ n = i * N;
+ st = x + n;
+ do
+ {
+ x[1] = -x[1];
+ x+= i;
+ }
+ while (x != st);
+ x -= n;
+ }
+ }
+ else
+ {
+ cblas_xerbla(3, "cblas_ctrmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ctrmv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_ctrmv( F77_UL, F77_TA, F77_DI, &F77_N, A, &F77_lda, X,
+ &F77_incX);
+ if (TransA == CblasConjTrans)
+ {
+ if (N > 0)
+ {
+ do
+ {
+ x[1] = -x[1];
+ x += i;
+ }
+ while (x != st);
+ }
+ }
+ }
+ else cblas_xerbla(1, "cblas_ctrmv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_ctrsm.c b/contrib/libs/cblas/cblas_interface/cblas_ctrsm.c
new file mode 100644
index 0000000000..00c592d56a
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_ctrsm.c
@@ -0,0 +1,155 @@
+/*
+ *
+ * cblas_ctrsm.c
+ * This program is a C interface to ctrsm.
+ * Written by Keita Teranishi
+ * 4/8/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_ctrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ void *B, const int ldb)
+{
+ char UL, TA, SD, DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_SD, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_SD &SD
+ #define F77_DI &DI
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_ldb=ldb;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_ldb ldb
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+
+ if( Side == CblasRight) SD='R';
+ else if ( Side == CblasLeft ) SD='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_ctrsm", "Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Uplo == CblasUpper) UL='U';
+ else if ( Uplo == CblasLower ) UL='L';
+ else
+ {
+ cblas_xerbla(3, "cblas_ctrsm", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( TransA == CblasTrans) TA ='T';
+ else if ( TransA == CblasConjTrans ) TA='C';
+ else if ( TransA == CblasNoTrans ) TA='N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ctrsm", "Illegal Trans setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Diag == CblasUnit ) DI='U';
+ else if ( Diag == CblasNonUnit ) DI='N';
+ else
+ {
+ cblas_xerbla(5, "cblas_ctrsm", "Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_SD = C2F_CHAR(&SD);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_ctrsm(F77_SD, F77_UL, F77_TA, F77_DI, &F77_M, &F77_N, alpha, A,
+ &F77_lda, B, &F77_ldb);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+
+ if( Side == CblasRight) SD='L';
+ else if ( Side == CblasLeft ) SD='R';
+ else
+ {
+ cblas_xerbla(2, "cblas_ctrsm", "Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Uplo == CblasUpper) UL='L';
+ else if ( Uplo == CblasLower ) UL='U';
+ else
+ {
+ cblas_xerbla(3, "cblas_ctrsm", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( TransA == CblasTrans) TA ='T';
+ else if ( TransA == CblasConjTrans ) TA='C';
+ else if ( TransA == CblasNoTrans ) TA='N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ctrsm", "Illegal Trans setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Diag == CblasUnit ) DI='U';
+ else if ( Diag == CblasNonUnit ) DI='N';
+ else
+ {
+ cblas_xerbla(5, "cblas_ctrsm", "Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_SD = C2F_CHAR(&SD);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+
+ F77_ctrsm(F77_SD, F77_UL, F77_TA, F77_DI, &F77_N, &F77_M, alpha, A,
+ &F77_lda, B, &F77_ldb);
+ }
+ else cblas_xerbla(1, "cblas_ctrsm", "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_ctrsv.c b/contrib/libs/cblas/cblas_interface/cblas_ctrsv.c
new file mode 100644
index 0000000000..39ff644cbd
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_ctrsv.c
@@ -0,0 +1,156 @@
+/*
+ * cblas_ctrsv.c
+ * The program is a C interface to ctrsv.
+ *
+ * Keita Teranishi 3/23/98
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_ctrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const void *A, const int lda, void *X,
+ const int incX)
+{
+ char TA;
+ char UL;
+ char DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_DI &DI
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_lda=lda, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_incX incX
+#endif
+ int n, i=0, tincX;
+ float *st=0,*x=(float *)X;
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_ctrsv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(3, "cblas_ctrsv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ctrsv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_ctrsv( F77_UL, F77_TA, F77_DI, &F77_N, A, &F77_lda, X,
+ &F77_incX);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_ctrsv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans)
+ {
+ TA = 'N';
+ if ( N > 0)
+ {
+ if ( incX > 0 )
+ tincX = incX;
+ else
+ tincX = -incX;
+
+ n = N*2*(tincX);
+ x++;
+ st=x+n;
+ i = tincX << 1;
+ do
+ {
+ *x = -(*x);
+ x+=i;
+ }
+ while (x != st);
+ x -= n;
+ }
+ }
+ else
+ {
+ cblas_xerbla(3, "cblas_ctrsv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ctrsv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_ctrsv( F77_UL, F77_TA, F77_DI, &F77_N, A, &F77_lda, X,
+ &F77_incX);
+ if (TransA == CblasConjTrans)
+ {
+ if (N > 0)
+ {
+ do
+ {
+ *x = -(*x);
+ x += i;
+ }
+ while (x != st);
+ }
+ }
+ }
+ else cblas_xerbla(1, "cblas_ctrsv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dasum.c b/contrib/libs/cblas/cblas_interface/cblas_dasum.c
new file mode 100644
index 0000000000..1a3667f2d7
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dasum.c
@@ -0,0 +1,23 @@
+/*
+ * cblas_dasum.c
+ *
+ * The program is a C interface to dasum.
+ * It calls the fortran wrapper before calling dasum.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+double cblas_dasum( const int N, const double *X, const int incX)
+{
+ double asum;
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ F77_dasum_sub( &F77_N, X, &F77_incX, &asum);
+ return asum;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_daxpy.c b/contrib/libs/cblas/cblas_interface/cblas_daxpy.c
new file mode 100644
index 0000000000..3678137fb7
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_daxpy.c
@@ -0,0 +1,22 @@
+/*
+ * cblas_daxpy.c
+ *
+ * The program is a C interface to daxpy.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_daxpy( const int N, const double alpha, const double *X,
+ const int incX, double *Y, const int incY)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ F77_daxpy( &F77_N, &alpha, X, &F77_incX, Y, &F77_incY);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dcopy.c b/contrib/libs/cblas/cblas_interface/cblas_dcopy.c
new file mode 100644
index 0000000000..422a55e517
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dcopy.c
@@ -0,0 +1,22 @@
+/*
+ * cblas_dcopy.c
+ *
+ * The program is a C interface to dcopy.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_dcopy( const int N, const double *X,
+ const int incX, double *Y, const int incY)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ F77_dcopy( &F77_N, X, &F77_incX, Y, &F77_incY);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_ddot.c b/contrib/libs/cblas/cblas_interface/cblas_ddot.c
new file mode 100644
index 0000000000..d773434031
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_ddot.c
@@ -0,0 +1,25 @@
+/*
+ * cblas_ddot.c
+ *
+ * The program is a C interface to ddot.
+ * It calls the fortran wrapper before calling ddot.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+double cblas_ddot( const int N, const double *X,
+ const int incX, const double *Y, const int incY)
+{
+ double dot;
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ F77_ddot_sub( &F77_N, X, &F77_incX, Y, &F77_incY, &dot);
+ return dot;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dgbmv.c b/contrib/libs/cblas/cblas_interface/cblas_dgbmv.c
new file mode 100644
index 0000000000..33c481db11
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dgbmv.c
@@ -0,0 +1,81 @@
+/*
+ *
+ * cblas_dgbmv.c
+ * This program is a C interface to dgbmv.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_dgbmv(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
+ const int KL, const int KU,
+ const double alpha, const double *A, const int lda,
+ const double *X, const int incX, const double beta,
+ double *Y, const int incY)
+{
+ char TA;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA;
+#else
+ #define F77_TA &TA
+#endif
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+ F77_INT F77_KL=KL,F77_KU=KU;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_KL KL
+ #define F77_KU KU
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(2, "cblas_dgbmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_TA = C2F_CHAR(&TA);
+ #endif
+ F77_dgbmv(F77_TA, &F77_M, &F77_N, &F77_KL, &F77_KU, &alpha,
+ A, &F77_lda, X, &F77_incX, &beta, Y, &F77_incY);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans) TA = 'N';
+ else
+ {
+ cblas_xerbla(2, "cblas_dgbmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_TA = C2F_CHAR(&TA);
+ #endif
+ F77_dgbmv(F77_TA, &F77_N, &F77_M, &F77_KU, &F77_KL, &alpha,
+ A ,&F77_lda, X,&F77_incX, &beta, Y, &F77_incY);
+ }
+ else cblas_xerbla(1, "cblas_dgbmv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dgemm.c b/contrib/libs/cblas/cblas_interface/cblas_dgemm.c
new file mode 100644
index 0000000000..d02ac16b32
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dgemm.c
@@ -0,0 +1,109 @@
+/*
+ *
+ * cblas_dgemm.c
+ * This program is a C interface to dgemm.
+ * Written by Keita Teranishi
+ * 4/8/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_dgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
+ const int K, const double alpha, const double *A,
+ const int lda, const double *B, const int ldb,
+ const double beta, double *C, const int ldc)
+{
+ char TA, TB;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_TB;
+#else
+ #define F77_TA &TA
+ #define F77_TB &TB
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_K=K, F77_lda=lda, F77_ldb=ldb;
+ F77_INT F77_ldc=ldc;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_ldb ldb
+ #define F77_ldc ldc
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+ if(TransA == CblasTrans) TA='T';
+ else if ( TransA == CblasConjTrans ) TA='C';
+ else if ( TransA == CblasNoTrans ) TA='N';
+ else
+ {
+ cblas_xerbla(2, "cblas_dgemm","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if(TransB == CblasTrans) TB='T';
+ else if ( TransB == CblasConjTrans ) TB='C';
+ else if ( TransB == CblasNoTrans ) TB='N';
+ else
+ {
+ cblas_xerbla(3, "cblas_dgemm","Illegal TransB setting, %d\n", TransB);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_TA = C2F_CHAR(&TA);
+ F77_TB = C2F_CHAR(&TB);
+ #endif
+
+ F77_dgemm(F77_TA, F77_TB, &F77_M, &F77_N, &F77_K, &alpha, A,
+ &F77_lda, B, &F77_ldb, &beta, C, &F77_ldc);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if(TransA == CblasTrans) TB='T';
+ else if ( TransA == CblasConjTrans ) TB='C';
+ else if ( TransA == CblasNoTrans ) TB='N';
+ else
+ {
+ cblas_xerbla(2, "cblas_dgemm","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if(TransB == CblasTrans) TA='T';
+ else if ( TransB == CblasConjTrans ) TA='C';
+ else if ( TransB == CblasNoTrans ) TA='N';
+ else
+ {
+ cblas_xerbla(2, "cblas_dgemm","Illegal TransB setting, %d\n", TransB);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_TA = C2F_CHAR(&TA);
+ F77_TB = C2F_CHAR(&TB);
+ #endif
+
+ F77_dgemm(F77_TA, F77_TB, &F77_N, &F77_M, &F77_K, &alpha, B,
+ &F77_ldb, A, &F77_lda, &beta, C, &F77_ldc);
+ }
+ else cblas_xerbla(1, "cblas_dgemm", "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dgemv.c b/contrib/libs/cblas/cblas_interface/cblas_dgemv.c
new file mode 100644
index 0000000000..9062f3eed4
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dgemv.c
@@ -0,0 +1,78 @@
+/*
+ *
+ * cblas_dgemv.c
+ * This program is a C interface to dgemv.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_dgemv(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
+ const double alpha, const double *A, const int lda,
+ const double *X, const int incX, const double beta,
+ double *Y, const int incY)
+{
+ char TA;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA;
+#else
+ #define F77_TA &TA
+#endif
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(2, "cblas_dgemv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_TA = C2F_CHAR(&TA);
+ #endif
+ F77_dgemv(F77_TA, &F77_M, &F77_N, &alpha, A, &F77_lda, X, &F77_incX,
+ &beta, Y, &F77_incY);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans) TA = 'N';
+ else
+ {
+ cblas_xerbla(2, "cblas_dgemv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_TA = C2F_CHAR(&TA);
+ #endif
+ F77_dgemv(F77_TA, &F77_N, &F77_M, &alpha, A, &F77_lda, X,
+ &F77_incX, &beta, Y, &F77_incY);
+ }
+ else cblas_xerbla(1, "cblas_dgemv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dger.c b/contrib/libs/cblas/cblas_interface/cblas_dger.c
new file mode 100644
index 0000000000..b2b805b4f7
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dger.c
@@ -0,0 +1,47 @@
+/*
+ *
+ * cblas_dger.c
+ * This program is a C interface to dger.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_dger(const enum CBLAS_ORDER order, const int M, const int N,
+ const double alpha, const double *X, const int incX,
+ const double *Y, const int incY, double *A, const int lda)
+{
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+ #define F77_lda lda
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ F77_dger( &F77_M, &F77_N, &alpha, X, &F77_incX, Y, &F77_incY, A,
+ &F77_lda);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ F77_dger( &F77_N, &F77_M ,&alpha, Y, &F77_incY, X, &F77_incX, A,
+ &F77_lda);
+
+ }
+ else cblas_xerbla(1, "cblas_dger", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dnrm2.c b/contrib/libs/cblas/cblas_interface/cblas_dnrm2.c
new file mode 100644
index 0000000000..fe46ad4849
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dnrm2.c
@@ -0,0 +1,23 @@
+/*
+ * cblas_dnrm2.c
+ *
+ * The program is a C interface to dnrm2.
+ * It calls the fortranwrapper before calling dnrm2.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+double cblas_dnrm2( const int N, const double *X, const int incX)
+{
+ double nrm2;
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ F77_dnrm2_sub( &F77_N, X, &F77_incX, &nrm2);
+ return nrm2;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_drot.c b/contrib/libs/cblas/cblas_interface/cblas_drot.c
new file mode 100644
index 0000000000..51dc4ad5ef
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_drot.c
@@ -0,0 +1,23 @@
+/*
+ * cblas_drot.c
+ *
+ * The program is a C interface to drot.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_drot(const int N, double *X, const int incX,
+ double *Y, const int incY, const double c, const double s)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ F77_drot(&F77_N, X, &F77_incX, Y, &F77_incY, &c, &s);
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_drotg.c b/contrib/libs/cblas/cblas_interface/cblas_drotg.c
new file mode 100644
index 0000000000..0cbbd8bc0b
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_drotg.c
@@ -0,0 +1,14 @@
+/*
+ * cblas_drotg.c
+ *
+ * The program is a C interface to drotg.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_drotg( double *a, double *b, double *c, double *s)
+{
+ F77_drotg(a,b,c,s);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_drotm.c b/contrib/libs/cblas/cblas_interface/cblas_drotm.c
new file mode 100644
index 0000000000..ebe20ad627
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_drotm.c
@@ -0,0 +1,14 @@
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_drotm( const int N, double *X, const int incX, double *Y,
+ const int incY, const double *P)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ F77_drotm( &F77_N, X, &F77_incX, Y, &F77_incY, P);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_drotmg.c b/contrib/libs/cblas/cblas_interface/cblas_drotmg.c
new file mode 100644
index 0000000000..13a2208e5f
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_drotmg.c
@@ -0,0 +1,15 @@
+/*
+ * cblas_drotmg.c
+ *
+ * The program is a C interface to drotmg.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_drotmg( double *d1, double *d2, double *b1,
+ const double b2, double *p)
+{
+ F77_drotmg(d1,d2,b1,&b2,p);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dsbmv.c b/contrib/libs/cblas/cblas_interface/cblas_dsbmv.c
new file mode 100644
index 0000000000..95b61820fc
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dsbmv.c
@@ -0,0 +1,77 @@
+/*
+ *
+ * cblas_dsbmv.c
+ * This program is a C interface to dsbmv.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_dsbmv(const enum CBLAS_ORDER order,
+ const enum CBLAS_UPLO Uplo, const int N, const int K,
+ const double alpha, const double *A, const int lda,
+ const double *X, const int incX, const double beta,
+ double *Y, const int incY)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_K=K, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_dsbmv","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_dsbmv(F77_UL, &F77_N, &F77_K, &alpha, A, &F77_lda, X,
+ &F77_incX, &beta, Y, &F77_incY);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_dsbmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_dsbmv(F77_UL, &F77_N, &F77_K, &alpha,
+ A ,&F77_lda, X,&F77_incX, &beta, Y, &F77_incY);
+ }
+ else cblas_xerbla(1, "cblas_dsbmv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dscal.c b/contrib/libs/cblas/cblas_interface/cblas_dscal.c
new file mode 100644
index 0000000000..bd04de77d6
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dscal.c
@@ -0,0 +1,21 @@
+/*
+ * cblas_dscal.c
+ *
+ * The program is a C interface to dscal.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_dscal( const int N, const double alpha, double *X,
+ const int incX)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ F77_dscal( &F77_N, &alpha, X, &F77_incX);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dsdot.c b/contrib/libs/cblas/cblas_interface/cblas_dsdot.c
new file mode 100644
index 0000000000..52cd877a20
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dsdot.c
@@ -0,0 +1,25 @@
+/*
+ * cblas_dsdot.c
+ *
+ * The program is a C interface to dsdot.
+ * It calls fthe fortran wrapper before calling dsdot.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+double cblas_dsdot( const int N, const float *X,
+ const int incX, const float *Y, const int incY)
+{
+ double dot;
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ F77_dsdot_sub( &F77_N, X, &F77_incX, Y, &F77_incY, &dot);
+ return dot;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dspmv.c b/contrib/libs/cblas/cblas_interface/cblas_dspmv.c
new file mode 100644
index 0000000000..dd1544f9cf
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dspmv.c
@@ -0,0 +1,76 @@
+/*
+ *
+ * cblas_dspmv.c
+ * This program is a C interface to dspmv.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_dspmv(const enum CBLAS_ORDER order,
+ const enum CBLAS_UPLO Uplo, const int N,
+ const double alpha, const double *AP,
+ const double *X, const int incX, const double beta,
+ double *Y, const int incY)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_dspmv","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_dspmv(F77_UL, &F77_N, &alpha, AP, X,
+ &F77_incX, &beta, Y, &F77_incY);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_dspmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_dspmv(F77_UL, &F77_N, &alpha,
+ AP, X,&F77_incX, &beta, Y, &F77_incY);
+ }
+ else cblas_xerbla(1, "cblas_dspmv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dspr.c b/contrib/libs/cblas/cblas_interface/cblas_dspr.c
new file mode 100644
index 0000000000..c6300391cb
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dspr.c
@@ -0,0 +1,70 @@
+/*
+ *
+ * cblas_dspr.c
+ * This program is a C interface to dspr.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_dspr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const double alpha, const double *X,
+ const int incX, double *Ap)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasLower) UL = 'L';
+ else if (Uplo == CblasUpper) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_dspr","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+
+ F77_dspr(F77_UL, &F77_N, &alpha, X, &F77_incX, Ap);
+
+ } else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasLower) UL = 'U';
+ else if (Uplo == CblasUpper) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_dspr","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_dspr(F77_UL, &F77_N, &alpha, X, &F77_incX, Ap);
+ } else cblas_xerbla(1, "cblas_dspr", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dspr2.c b/contrib/libs/cblas/cblas_interface/cblas_dspr2.c
new file mode 100644
index 0000000000..4f1e7805a0
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dspr2.c
@@ -0,0 +1,70 @@
+/*
+ * cblas_dspr2.c
+ * The program is a C interface to dspr2.
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_dspr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const double alpha, const double *X,
+ const int incX, const double *Y, const int incY, double *A)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasLower) UL = 'L';
+ else if (Uplo == CblasUpper) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_dspr2","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+
+ F77_dspr2(F77_UL, &F77_N, &alpha, X, &F77_incX, Y, &F77_incY, A);
+
+ } else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasLower) UL = 'U';
+ else if (Uplo == CblasUpper) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_dspr2","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_dspr2(F77_UL, &F77_N, &alpha, X, &F77_incX, Y, &F77_incY, A);
+ } else cblas_xerbla(1, "cblas_dspr2", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dswap.c b/contrib/libs/cblas/cblas_interface/cblas_dswap.c
new file mode 100644
index 0000000000..9ae5bb93c0
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dswap.c
@@ -0,0 +1,22 @@
+/*
+ * cblas_dswap.c
+ *
+ * The program is a C interface to dswap.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_dswap( const int N, double *X, const int incX, double *Y,
+ const int incY)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ F77_dswap( &F77_N, X, &F77_incX, Y, &F77_incY);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dsymm.c b/contrib/libs/cblas/cblas_interface/cblas_dsymm.c
new file mode 100644
index 0000000000..8b50e9a40b
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dsymm.c
@@ -0,0 +1,106 @@
+/*
+ *
+ * cblas_dsymm.c
+ * This program is a C interface to dsymm.
+ * Written by Keita Teranishi
+ * 4/8/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_dsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const int M, const int N,
+ const double alpha, const double *A, const int lda,
+ const double *B, const int ldb, const double beta,
+ double *C, const int ldc)
+{
+ char SD, UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_SD, F77_UL;
+#else
+ #define F77_SD &SD
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_ldb=ldb;
+ F77_INT F77_ldc=ldc;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_ldb ldb
+ #define F77_ldc ldc
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+ if( Side == CblasRight) SD='R';
+ else if ( Side == CblasLeft ) SD='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_dsymm","Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Uplo == CblasUpper) UL='U';
+ else if ( Uplo == CblasLower ) UL='L';
+ else
+ {
+ cblas_xerbla(3, "cblas_dsymm","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_SD = C2F_CHAR(&SD);
+ #endif
+
+ F77_dsymm(F77_SD, F77_UL, &F77_M, &F77_N, &alpha, A, &F77_lda,
+ B, &F77_ldb, &beta, C, &F77_ldc);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if( Side == CblasRight) SD='L';
+ else if ( Side == CblasLeft ) SD='R';
+ else
+ {
+ cblas_xerbla(2, "cblas_dsymm","Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Uplo == CblasUpper) UL='L';
+ else if ( Uplo == CblasLower ) UL='U';
+ else
+ {
+ cblas_xerbla(3, "cblas_dsymm","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_SD = C2F_CHAR(&SD);
+ #endif
+
+ F77_dsymm(F77_SD, F77_UL, &F77_N, &F77_M, &alpha, A, &F77_lda, B,
+ &F77_ldb, &beta, C, &F77_ldc);
+ }
+ else cblas_xerbla(1, "cblas_dsymm","Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dsymv.c b/contrib/libs/cblas/cblas_interface/cblas_dsymv.c
new file mode 100644
index 0000000000..020adc91d3
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dsymv.c
@@ -0,0 +1,76 @@
+/*
+ *
+ * cblas_dsymv.c
+ * This program is a C interface to dsymv.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_dsymv(const enum CBLAS_ORDER order,
+ const enum CBLAS_UPLO Uplo, const int N,
+ const double alpha, const double *A, const int lda,
+ const double *X, const int incX, const double beta,
+ double *Y, const int incY)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_dsymv","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_dsymv(F77_UL, &F77_N, &alpha, A, &F77_lda, X,
+ &F77_incX, &beta, Y, &F77_incY);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_dsymv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_dsymv(F77_UL, &F77_N, &alpha,
+ A ,&F77_lda, X,&F77_incX, &beta, Y, &F77_incY);
+ }
+ else cblas_xerbla(1, "cblas_dsymv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dsyr.c b/contrib/libs/cblas/cblas_interface/cblas_dsyr.c
new file mode 100644
index 0000000000..0d20083481
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dsyr.c
@@ -0,0 +1,71 @@
+/*
+ *
+ * cblas_dsyr.c
+ * This program is a C interface to dsyr.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_dsyr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const double alpha, const double *X,
+ const int incX, double *A, const int lda)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_lda=lda;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_lda lda
+#endif
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasLower) UL = 'L';
+ else if (Uplo == CblasUpper) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_dsyr","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+
+ F77_dsyr(F77_UL, &F77_N, &alpha, X, &F77_incX, A, &F77_lda);
+
+ } else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasLower) UL = 'U';
+ else if (Uplo == CblasUpper) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_dsyr","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_dsyr(F77_UL, &F77_N, &alpha, X, &F77_incX, A, &F77_lda);
+ } else cblas_xerbla(1, "cblas_dsyr", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dsyr2.c b/contrib/libs/cblas/cblas_interface/cblas_dsyr2.c
new file mode 100644
index 0000000000..fe4a2920ed
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dsyr2.c
@@ -0,0 +1,76 @@
+/*
+ *
+ * cblas_dsyr2.c
+ * This program is a C interface to dsyr2.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_dsyr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const double alpha, const double *X,
+ const int incX, const double *Y, const int incY, double *A,
+ const int lda)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY, F77__lda=lda;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+ #define F77_lda lda
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasLower) UL = 'L';
+ else if (Uplo == CblasUpper) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_dsyr2","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+
+ F77_dsyr2(F77_UL, &F77_N, &alpha, X, &F77_incX, Y, &F77_incY, A,
+ &F77_lda);
+
+ } else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasLower) UL = 'U';
+ else if (Uplo == CblasUpper) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_dsyr2","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_dsyr2(F77_UL, &F77_N, &alpha, X, &F77_incX, Y, &F77_incY, A,
+ &F77_lda);
+ } else cblas_xerbla(1, "cblas_dsyr2", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dsyr2k.c b/contrib/libs/cblas/cblas_interface/cblas_dsyr2k.c
new file mode 100644
index 0000000000..e50dc11cc9
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dsyr2k.c
@@ -0,0 +1,109 @@
+/*
+ *
+ * cblas_dsyr2k.c
+ * This program is a C interface to dsyr2k.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_dsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const double alpha, const double *A, const int lda,
+ const double *B, const int ldb, const double beta,
+ double *C, const int ldc)
+{
+ char UL, TR;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL;
+#else
+ #define F77_TR &TR
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_K=K, F77_lda=lda, F77_ldb=ldb;
+ F77_INT F77_ldc=ldc;
+#else
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_ldb ldb
+ #define F77_ldc ldc
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+
+ if( Uplo == CblasUpper) UL='U';
+ else if ( Uplo == CblasLower ) UL='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_dsyr2k","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Trans == CblasTrans) TR ='T';
+ else if ( Trans == CblasConjTrans ) TR='C';
+ else if ( Trans == CblasNoTrans ) TR='N';
+ else
+ {
+ cblas_xerbla(3, "cblas_dsyr2k","Illegal Trans setting, %d\n", Trans);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TR = C2F_CHAR(&TR);
+ #endif
+
+ F77_dsyr2k(F77_UL, F77_TR, &F77_N, &F77_K, &alpha, A, &F77_lda,
+ B, &F77_ldb, &beta, C, &F77_ldc);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if( Uplo == CblasUpper) UL='L';
+ else if ( Uplo == CblasLower ) UL='U';
+ else
+ {
+ cblas_xerbla(3, "cblas_dsyr2k","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if( Trans == CblasTrans) TR ='N';
+ else if ( Trans == CblasConjTrans ) TR='N';
+ else if ( Trans == CblasNoTrans ) TR='T';
+ else
+ {
+ cblas_xerbla(3, "cblas_dsyr2k","Illegal Trans setting, %d\n", Trans);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TR = C2F_CHAR(&TR);
+ #endif
+
+ F77_dsyr2k(F77_UL, F77_TR, &F77_N, &F77_K, &alpha, A, &F77_lda, B,
+ &F77_ldb, &beta, C, &F77_ldc);
+ }
+ else cblas_xerbla(1, "cblas_dsyr2k","Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dsyrk.c b/contrib/libs/cblas/cblas_interface/cblas_dsyrk.c
new file mode 100644
index 0000000000..469f930df3
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dsyrk.c
@@ -0,0 +1,108 @@
+/*
+ *
+ * cblas_dsyrk.c
+ * This program is a C interface to dsyrk.
+ * Written by Keita Teranishi
+ * 4/8/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_dsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const double alpha, const double *A, const int lda,
+ const double beta, double *C, const int ldc)
+{
+ char UL, TR;
+#ifdef F77_CHAR
+ F77_CHAR F77_TR, F77_UL;
+#else
+ #define F77_TR &TR
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_K=K, F77_lda=lda;
+ F77_INT F77_ldc=ldc;
+#else
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_ldc ldc
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+
+ if( Uplo == CblasUpper) UL='U';
+ else if ( Uplo == CblasLower ) UL='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_dsyrk","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Trans == CblasTrans) TR ='T';
+ else if ( Trans == CblasConjTrans ) TR='C';
+ else if ( Trans == CblasNoTrans ) TR='N';
+ else
+ {
+ cblas_xerbla(3, "cblas_dsyrk","Illegal Trans setting, %d\n", Trans);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TR = C2F_CHAR(&TR);
+ #endif
+
+ F77_dsyrk(F77_UL, F77_TR, &F77_N, &F77_K, &alpha, A, &F77_lda,
+ &beta, C, &F77_ldc);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if( Uplo == CblasUpper) UL='L';
+ else if ( Uplo == CblasLower ) UL='U';
+ else
+ {
+ cblas_xerbla(3, "cblas_dsyrk","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if( Trans == CblasTrans) TR ='N';
+ else if ( Trans == CblasConjTrans ) TR='N';
+ else if ( Trans == CblasNoTrans ) TR='T';
+ else
+ {
+ cblas_xerbla(3, "cblas_dsyrk","Illegal Trans setting, %d\n", Trans);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TR = C2F_CHAR(&TR);
+ #endif
+
+ F77_dsyrk(F77_UL, F77_TR, &F77_N, &F77_K, &alpha, A, &F77_lda,
+ &beta, C, &F77_ldc);
+ }
+ else cblas_xerbla(1, "cblas_dsyrk","Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
+
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dtbmv.c b/contrib/libs/cblas/cblas_interface/cblas_dtbmv.c
new file mode 100644
index 0000000000..491f11d475
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dtbmv.c
@@ -0,0 +1,122 @@
+/*
+ * cblas_dtbmv.c
+ * The program is a C interface to dtbmv.
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_dtbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const int K, const double *A, const int lda,
+ double *X, const int incX)
+{
+ char TA;
+ char UL;
+ char DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_DI &DI
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_lda=lda, F77_K=K, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_incX incX
+#endif
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_dtbmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(3, "cblas_dtbmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_dtbmv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_dtbmv( F77_UL, F77_TA, F77_DI, &F77_N, &F77_K, A, &F77_lda, X,
+ &F77_incX);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_dtbmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans) TA = 'N';
+ else
+ {
+ cblas_xerbla(3, "cblas_dtbmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_dtbmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_dtbmv( F77_UL, F77_TA, F77_DI, &F77_N, &F77_K, A, &F77_lda, X,
+ &F77_incX);
+
+ }
+ else cblas_xerbla(1, "cblas_dtbmv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dtbsv.c b/contrib/libs/cblas/cblas_interface/cblas_dtbsv.c
new file mode 100644
index 0000000000..664822fea4
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dtbsv.c
@@ -0,0 +1,122 @@
+/*
+ * cblas_dtbsv.c
+ * The program is a C interface to dtbsv.
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_dtbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const int K, const double *A, const int lda,
+ double *X, const int incX)
+{
+ char TA;
+ char UL;
+ char DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_DI &DI
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_lda=lda, F77_K=K, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_incX incX
+#endif
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_dtbsv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(3, "cblas_dtbsv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_dtbsv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_dtbsv( F77_UL, F77_TA, F77_DI, &F77_N, &F77_K, A, &F77_lda, X,
+ &F77_incX);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_dtbsv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans) TA = 'N';
+ else
+ {
+ cblas_xerbla(3, "cblas_dtbsv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_dtbsv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_dtbsv( F77_UL, F77_TA, F77_DI, &F77_N, &F77_K, A, &F77_lda, X,
+ &F77_incX);
+ }
+ else cblas_xerbla(1, "cblas_dtbsv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dtpmv.c b/contrib/libs/cblas/cblas_interface/cblas_dtpmv.c
new file mode 100644
index 0000000000..5b96a2b495
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dtpmv.c
@@ -0,0 +1,117 @@
+/*
+ * cblas_dtpmv.c
+ * The program is a C interface to dtpmv.
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_dtpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const double *Ap, double *X, const int incX)
+{
+ char TA;
+ char UL;
+ char DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_DI &DI
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_dtpmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(3, "cblas_dtpmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_dtpmv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_dtpmv( F77_UL, F77_TA, F77_DI, &F77_N, Ap, X, &F77_incX);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_dtpmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans) TA = 'N';
+ else
+ {
+ cblas_xerbla(3, "cblas_dtpmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_dtpmv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_dtpmv( F77_UL, F77_TA, F77_DI, &F77_N, Ap, X,&F77_incX);
+ }
+ else cblas_xerbla(1, "cblas_dtpmv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dtpsv.c b/contrib/libs/cblas/cblas_interface/cblas_dtpsv.c
new file mode 100644
index 0000000000..5555c2174e
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dtpsv.c
@@ -0,0 +1,118 @@
+/*
+ * cblas_dtpsv.c
+ * The program is a C interface to dtpsv.
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_dtpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const double *Ap, double *X, const int incX)
+{
+ char TA;
+ char UL;
+ char DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_DI &DI
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_dtpsv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(3, "cblas_dtpsv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_dtpsv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_dtpsv( F77_UL, F77_TA, F77_DI, &F77_N, Ap, X, &F77_incX);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_dtpsv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans) TA = 'N';
+ else
+ {
+ cblas_xerbla(3, "cblas_dtpsv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_dtpsv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_dtpsv( F77_UL, F77_TA, F77_DI, &F77_N, Ap, X,&F77_incX);
+
+ }
+ else cblas_xerbla(1, "cblas_dtpsv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dtrmm.c b/contrib/libs/cblas/cblas_interface/cblas_dtrmm.c
new file mode 100644
index 0000000000..32a5d2bc91
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dtrmm.c
@@ -0,0 +1,148 @@
+/*
+ *
+ * cblas_dtrmm.c
+ * This program is a C interface to dtrmm.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_dtrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const int M, const int N,
+ const double alpha, const double *A, const int lda,
+ double *B, const int ldb)
+{
+ char UL, TA, SD, DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_SD, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_SD &SD
+ #define F77_DI &DI
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_ldb=ldb;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_ldb ldb
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+ if( Side == CblasRight) SD='R';
+ else if ( Side == CblasLeft ) SD='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_dtrmm","Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if( Uplo == CblasUpper) UL='U';
+ else if ( Uplo == CblasLower ) UL='L';
+ else
+ {
+ cblas_xerbla(3, "cblas_dtrmm","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( TransA == CblasTrans) TA ='T';
+ else if ( TransA == CblasConjTrans ) TA='C';
+ else if ( TransA == CblasNoTrans ) TA='N';
+ else
+ {
+ cblas_xerbla(4, "cblas_dtrmm","Illegal Trans setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Diag == CblasUnit ) DI='U';
+ else if ( Diag == CblasNonUnit ) DI='N';
+ else
+ {
+ cblas_xerbla(5, "cblas_dtrmm","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_SD = C2F_CHAR(&SD);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_dtrmm(F77_SD, F77_UL, F77_TA, F77_DI, &F77_M, &F77_N, &alpha, A, &F77_lda, B, &F77_ldb);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if( Side == CblasRight) SD='L';
+ else if ( Side == CblasLeft ) SD='R';
+ else
+ {
+ cblas_xerbla(2, "cblas_dtrmm","Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Uplo == CblasUpper) UL='L';
+ else if ( Uplo == CblasLower ) UL='U';
+ else
+ {
+ cblas_xerbla(3, "cblas_dtrmm","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( TransA == CblasTrans) TA ='T';
+ else if ( TransA == CblasConjTrans ) TA='C';
+ else if ( TransA == CblasNoTrans ) TA='N';
+ else
+ {
+ cblas_xerbla(4, "cblas_dtrmm","Illegal Trans setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Diag == CblasUnit ) DI='U';
+ else if ( Diag == CblasNonUnit ) DI='N';
+ else
+ {
+ cblas_xerbla(5, "cblas_dtrmm","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_SD = C2F_CHAR(&SD);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_dtrmm(F77_SD, F77_UL, F77_TA, F77_DI, &F77_N, &F77_M, &alpha, A, &F77_lda, B, &F77_ldb);
+ }
+ else cblas_xerbla(1, "cblas_dtrmm", "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dtrmv.c b/contrib/libs/cblas/cblas_interface/cblas_dtrmv.c
new file mode 100644
index 0000000000..cce150709b
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dtrmv.c
@@ -0,0 +1,122 @@
+/*
+ *
+ * cblas_dtrmv.c
+ * This program is a C interface to sgemv.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_dtrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const double *A, const int lda,
+ double *X, const int incX)
+
+{
+ char TA;
+ char UL;
+ char DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_DI &DI
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_lda=lda, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_incX incX
+#endif
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_dtrmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(3, "cblas_dtrmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_dtrmv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_dtrmv( F77_UL, F77_TA, F77_DI, &F77_N, A, &F77_lda, X,
+ &F77_incX);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_dtrmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans) TA = 'N';
+ else
+ {
+ cblas_xerbla(3, "cblas_dtrmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_dtrmv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_dtrmv( F77_UL, F77_TA, F77_DI, &F77_N, A, &F77_lda, X,
+ &F77_incX);
+ } else cblas_xerbla(1, "cblas_dtrmv", "Illegal order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dtrsm.c b/contrib/libs/cblas/cblas_interface/cblas_dtrsm.c
new file mode 100644
index 0000000000..4f47cb193c
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dtrsm.c
@@ -0,0 +1,153 @@
+/*
+ *
+ * cblas_dtrsm.c
+ * This program is a C interface to dtrsm.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_dtrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const int M, const int N,
+ const double alpha, const double *A, const int lda,
+ double *B, const int ldb)
+
+{
+ char UL, TA, SD, DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_SD, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_SD &SD
+ #define F77_DI &DI
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_ldb=ldb;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_ldb ldb
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+ if ( Side == CblasRight) SD='R';
+ else if ( Side == CblasLeft ) SD='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_dtrsm","Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if ( Uplo == CblasUpper) UL='U';
+ else if ( Uplo == CblasLower) UL='L';
+ else
+ {
+ cblas_xerbla(3, "cblas_dtrsm","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if ( TransA == CblasTrans ) TA='T';
+ else if ( TransA == CblasConjTrans) TA='C';
+ else if ( TransA == CblasNoTrans ) TA='N';
+ else
+ {
+ cblas_xerbla(4, "cblas_dtrsm","Illegal Trans setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if ( Diag == CblasUnit ) DI='U';
+ else if ( Diag == CblasNonUnit) DI='N';
+ else
+ {
+ cblas_xerbla(5, "cblas_dtrsm","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_SD = C2F_CHAR(&SD);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_dtrsm(F77_SD, F77_UL, F77_TA, F77_DI, &F77_M, &F77_N, &alpha,
+ A, &F77_lda, B, &F77_ldb);
+ }
+ else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if ( Side == CblasRight) SD='L';
+ else if ( Side == CblasLeft ) SD='R';
+ else
+ {
+ cblas_xerbla(2, "cblas_dtrsm","Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if ( Uplo == CblasUpper) UL='L';
+ else if ( Uplo == CblasLower) UL='U';
+ else
+ {
+ cblas_xerbla(3, "cblas_dtrsm","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if ( TransA == CblasTrans ) TA='T';
+ else if ( TransA == CblasConjTrans) TA='C';
+ else if ( TransA == CblasNoTrans ) TA='N';
+ else
+ {
+ cblas_xerbla(4, "cblas_dtrsm","Illegal Trans setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if ( Diag == CblasUnit ) DI='U';
+ else if ( Diag == CblasNonUnit) DI='N';
+ else
+ {
+ cblas_xerbla(5, "cblas_dtrsm","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_SD = C2F_CHAR(&SD);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_dtrsm(F77_SD, F77_UL, F77_TA, F77_DI, &F77_N, &F77_M, &alpha, A,
+ &F77_lda, B, &F77_ldb);
+ }
+ else cblas_xerbla(1, "cblas_dtrsm","Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dtrsv.c b/contrib/libs/cblas/cblas_interface/cblas_dtrsv.c
new file mode 100644
index 0000000000..7299d17d52
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dtrsv.c
@@ -0,0 +1,121 @@
+/*
+ * cblas_dtrsv.c
+ * The program is a C interface to dtrsv.
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_dtrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const double *A, const int lda, double *X,
+ const int incX)
+
+{
+ char TA;
+ char UL;
+ char DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_DI &DI
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_lda=lda, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_incX incX
+#endif
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_dtrsv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(3, "cblas_dtrsv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_dtrsv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_dtrsv( F77_UL, F77_TA, F77_DI, &F77_N, A, &F77_lda, X,
+ &F77_incX);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_dtrsv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans) TA = 'N';
+ else
+ {
+ cblas_xerbla(3, "cblas_dtrsv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_dtrsv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_dtrsv( F77_UL, F77_TA, F77_DI, &F77_N, A, &F77_lda, X,
+ &F77_incX);
+ }
+ else cblas_xerbla(1, "cblas_dtrsv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dzasum.c b/contrib/libs/cblas/cblas_interface/cblas_dzasum.c
new file mode 100644
index 0000000000..b32f573e5f
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dzasum.c
@@ -0,0 +1,23 @@
+/*
+ * cblas_dzasum.c
+ *
+ * The program is a C interface to dzasum.
+ * It calls the fortran wrapper before calling dzasum.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+double cblas_dzasum( const int N, const void *X, const int incX)
+{
+ double asum;
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ F77_dzasum_sub( &F77_N, X, &F77_incX, &asum);
+ return asum;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_dznrm2.c b/contrib/libs/cblas/cblas_interface/cblas_dznrm2.c
new file mode 100644
index 0000000000..dfa2bfc837
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_dznrm2.c
@@ -0,0 +1,23 @@
+/*
+ * cblas_dznrm2.c
+ *
+ * The program is a C interface to dznrm2.
+ * It calls the fortran wrapper before calling dznrm2.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+double cblas_dznrm2( const int N, const void *X, const int incX)
+{
+ double nrm2;
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ F77_dznrm2_sub( &F77_N, X, &F77_incX, &nrm2);
+ return nrm2;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_globals.c b/contrib/libs/cblas/cblas_interface/cblas_globals.c
new file mode 100644
index 0000000000..ebcd74db3f
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_globals.c
@@ -0,0 +1,2 @@
+int CBLAS_CallFromC=0;
+int RowMajorStrg=0;
diff --git a/contrib/libs/cblas/cblas_interface/cblas_icamax.c b/contrib/libs/cblas/cblas_interface/cblas_icamax.c
new file mode 100644
index 0000000000..f0cdbdb3e7
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_icamax.c
@@ -0,0 +1,23 @@
+/*
+ * cblas_icamax.c
+ *
+ * The program is a C interface to icamax.
+ * It calls the fortran wrapper before calling icamax.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+CBLAS_INDEX cblas_icamax( const int N, const void *X, const int incX)
+{
+ int iamax;
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ F77_icamax_sub( &F77_N, X, &F77_incX, &iamax);
+ return iamax ? iamax-1 : 0;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_idamax.c b/contrib/libs/cblas/cblas_interface/cblas_idamax.c
new file mode 100644
index 0000000000..abb70b53cc
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_idamax.c
@@ -0,0 +1,23 @@
+/*
+ * cblas_idamax.c
+ *
+ * The program is a C interface to idamax.
+ * It calls the fortran wrapper before calling idamax.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+CBLAS_INDEX cblas_idamax( const int N, const double *X, const int incX)
+{
+ int iamax;
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ F77_idamax_sub( &F77_N, X, &F77_incX, &iamax);
+ return iamax ? iamax-1 : 0;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_isamax.c b/contrib/libs/cblas/cblas_interface/cblas_isamax.c
new file mode 100644
index 0000000000..bfd74e8f9c
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_isamax.c
@@ -0,0 +1,23 @@
+/*
+ * cblas_isamax.c
+ *
+ * The program is a C interface to isamax.
+ * It calls the fortran wrapper before calling isamax.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+CBLAS_INDEX cblas_isamax( const int N, const float *X, const int incX)
+{
+ int iamax;
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ F77_isamax_sub( &F77_N, X, &F77_incX, &iamax);
+ return iamax ? iamax-1 : 0;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_izamax.c b/contrib/libs/cblas/cblas_interface/cblas_izamax.c
new file mode 100644
index 0000000000..21fdc396fd
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_izamax.c
@@ -0,0 +1,23 @@
+/*
+ * cblas_izamax.c
+ *
+ * The program is a C interface to izamax.
+ * It calls the fortran wrapper before calling izamax.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+CBLAS_INDEX cblas_izamax( const int N, const void *X, const int incX)
+{
+ int iamax;
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ F77_izamax_sub( &F77_N, X, &F77_incX, &iamax);
+ return (iamax ? iamax-1 : 0);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_sasum.c b/contrib/libs/cblas/cblas_interface/cblas_sasum.c
new file mode 100644
index 0000000000..7d4c32cf9e
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_sasum.c
@@ -0,0 +1,23 @@
+/*
+ * cblas_sasum.c
+ *
+ * The program is a C interface to sasum.
+ * It calls the fortran wrapper before calling sasum.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+float cblas_sasum( const int N, const float *X, const int incX)
+{
+ float asum;
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ F77_sasum_sub( &F77_N, X, &F77_incX, &asum);
+ return asum;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_saxpy.c b/contrib/libs/cblas/cblas_interface/cblas_saxpy.c
new file mode 100644
index 0000000000..2eee8e06e4
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_saxpy.c
@@ -0,0 +1,23 @@
+/*
+ * cblas_saxpy.c
+ *
+ * The program is a C interface to saxpy.
+ * It calls the fortran wrapper before calling saxpy.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_saxpy( const int N, const float alpha, const float *X,
+ const int incX, float *Y, const int incY)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ F77_saxpy( &F77_N, &alpha, X, &F77_incX, Y, &F77_incY);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_scasum.c b/contrib/libs/cblas/cblas_interface/cblas_scasum.c
new file mode 100644
index 0000000000..e1fa53090a
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_scasum.c
@@ -0,0 +1,23 @@
+/*
+ * cblas_scasum.c
+ *
+ * The program is a C interface to scasum.
+ * It calls the fortran wrapper before calling scasum.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+float cblas_scasum( const int N, const void *X, const int incX)
+{
+ float asum;
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ F77_scasum_sub( &F77_N, X, &F77_incX, &asum);
+ return asum;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_scnrm2.c b/contrib/libs/cblas/cblas_interface/cblas_scnrm2.c
new file mode 100644
index 0000000000..fa48454ed5
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_scnrm2.c
@@ -0,0 +1,23 @@
+/*
+ * cblas_scnrm2.c
+ *
+ * The program is a C interface to scnrm2.
+ * It calls the fortran wrapper before calling scnrm2.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+float cblas_scnrm2( const int N, const void *X, const int incX)
+{
+ float nrm2;
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ F77_scnrm2_sub( &F77_N, X, &F77_incX, &nrm2);
+ return nrm2;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_scopy.c b/contrib/libs/cblas/cblas_interface/cblas_scopy.c
new file mode 100644
index 0000000000..7796959f33
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_scopy.c
@@ -0,0 +1,22 @@
+/*
+ * cblas_scopy.c
+ *
+ * The program is a C interface to scopy.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_scopy( const int N, const float *X,
+ const int incX, float *Y, const int incY)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ F77_scopy( &F77_N, X, &F77_incX, Y, &F77_incY);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_sdot.c b/contrib/libs/cblas/cblas_interface/cblas_sdot.c
new file mode 100644
index 0000000000..baf859272b
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_sdot.c
@@ -0,0 +1,25 @@
+/*
+ * cblas_sdot.c
+ *
+ * The program is a C interface to sdot.
+ * It calls the fortran wrapper before calling sdot.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+float cblas_sdot( const int N, const float *X,
+ const int incX, const float *Y, const int incY)
+{
+ float dot;
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ F77_sdot_sub( &F77_N, X, &F77_incX, Y, &F77_incY, &dot);
+ return dot;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_sdsdot.c b/contrib/libs/cblas/cblas_interface/cblas_sdsdot.c
new file mode 100644
index 0000000000..b824849b99
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_sdsdot.c
@@ -0,0 +1,25 @@
+/*
+ * cblas_sdsdot.c
+ *
+ * The program is a C interface to sdsdot.
+ * It calls the fortran wrapper before calling sdsdot.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+float cblas_sdsdot( const int N, const float alpha, const float *X,
+ const int incX, const float *Y, const int incY)
+{
+ float dot;
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ F77_sdsdot_sub( &F77_N, &alpha, X, &F77_incX, Y, &F77_incY, &dot);
+ return dot;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_sgbmv.c b/contrib/libs/cblas/cblas_interface/cblas_sgbmv.c
new file mode 100644
index 0000000000..0af607f20b
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_sgbmv.c
@@ -0,0 +1,83 @@
+/*
+ *
+ * cblas_sgbmv.c
+ * This program is a C interface to sgbmv.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_sgbmv(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
+ const int KL, const int KU,
+ const float alpha, const float *A, const int lda,
+ const float *X, const int incX, const float beta,
+ float *Y, const int incY)
+{
+ char TA;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA;
+#else
+ #define F77_TA &TA
+#endif
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+ F77_INT F77_KL=KL,F77_KU=KU;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_KL KL
+ #define F77_KU KU
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(2, "cblas_sgbmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_TA = C2F_CHAR(&TA);
+ #endif
+ F77_sgbmv(F77_TA, &F77_M, &F77_N, &F77_KL, &F77_KU, &alpha,
+ A, &F77_lda, X, &F77_incX, &beta, Y, &F77_incY);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans) TA = 'N';
+ else
+ {
+ cblas_xerbla(2, "cblas_sgbmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_TA = C2F_CHAR(&TA);
+ #endif
+ F77_sgbmv(F77_TA, &F77_N, &F77_M, &F77_KU, &F77_KL, &alpha,
+ A ,&F77_lda, X, &F77_incX, &beta, Y, &F77_incY);
+ }
+ else cblas_xerbla(1, "cblas_sgbmv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_sgemm.c b/contrib/libs/cblas/cblas_interface/cblas_sgemm.c
new file mode 100644
index 0000000000..73a06e5e16
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_sgemm.c
@@ -0,0 +1,110 @@
+/*
+ *
+ * cblas_sgemm.c
+ * This program is a C interface to sgemm.
+ * Written by Keita Teranishi
+ * 4/8/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_sgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
+ const int K, const float alpha, const float *A,
+ const int lda, const float *B, const int ldb,
+ const float beta, float *C, const int ldc)
+{
+ char TA, TB;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_TB;
+#else
+ #define F77_TA &TA
+ #define F77_TB &TB
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_K=K, F77_lda=lda, F77_ldb=ldb;
+ F77_INT F77_ldc=ldc;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_ldb ldb
+ #define F77_ldc ldc
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+ if( Order == CblasColMajor )
+ {
+ if(TransA == CblasTrans) TA='T';
+ else if ( TransA == CblasConjTrans ) TA='C';
+ else if ( TransA == CblasNoTrans ) TA='N';
+ else
+ {
+ cblas_xerbla(2, "cblas_sgemm",
+ "Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if(TransB == CblasTrans) TB='T';
+ else if ( TransB == CblasConjTrans ) TB='C';
+ else if ( TransB == CblasNoTrans ) TB='N';
+ else
+ {
+ cblas_xerbla(3, "cblas_sgemm",
+ "Illegal TransB setting, %d\n", TransB);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_TA = C2F_CHAR(&TA);
+ F77_TB = C2F_CHAR(&TB);
+ #endif
+
+ F77_sgemm(F77_TA, F77_TB, &F77_M, &F77_N, &F77_K, &alpha, A, &F77_lda, B, &F77_ldb, &beta, C, &F77_ldc);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if(TransA == CblasTrans) TB='T';
+ else if ( TransA == CblasConjTrans ) TB='C';
+ else if ( TransA == CblasNoTrans ) TB='N';
+ else
+ {
+ cblas_xerbla(2, "cblas_sgemm",
+ "Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if(TransB == CblasTrans) TA='T';
+ else if ( TransB == CblasConjTrans ) TA='C';
+ else if ( TransB == CblasNoTrans ) TA='N';
+ else
+ {
+ cblas_xerbla(2, "cblas_sgemm",
+ "Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_TA = C2F_CHAR(&TA);
+ F77_TB = C2F_CHAR(&TB);
+ #endif
+
+ F77_sgemm(F77_TA, F77_TB, &F77_N, &F77_M, &F77_K, &alpha, B, &F77_ldb, A, &F77_lda, &beta, C, &F77_ldc);
+ } else
+ cblas_xerbla(1, "cblas_sgemm",
+ "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_sgemv.c b/contrib/libs/cblas/cblas_interface/cblas_sgemv.c
new file mode 100644
index 0000000000..45b7196484
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_sgemv.c
@@ -0,0 +1,78 @@
+/*
+ *
+ * cblas_sgemv.c
+ * This program is a C interface to sgemv.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_sgemv(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
+ const float alpha, const float *A, const int lda,
+ const float *X, const int incX, const float beta,
+ float *Y, const int incY)
+{
+ char TA;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA;
+#else
+ #define F77_TA &TA
+#endif
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(2, "cblas_sgemv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ }
+ #ifdef F77_CHAR
+ F77_TA = C2F_CHAR(&TA);
+ #endif
+ F77_sgemv(F77_TA, &F77_M, &F77_N, &alpha, A, &F77_lda, X, &F77_incX,
+ &beta, Y, &F77_incY);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans) TA = 'N';
+ else
+ {
+ cblas_xerbla(2, "cblas_sgemv", "Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_TA = C2F_CHAR(&TA);
+ #endif
+ F77_sgemv(F77_TA, &F77_N, &F77_M, &alpha, A, &F77_lda, X,
+ &F77_incX, &beta, Y, &F77_incY);
+ }
+ else cblas_xerbla(1, "cblas_sgemv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_sger.c b/contrib/libs/cblas/cblas_interface/cblas_sger.c
new file mode 100644
index 0000000000..368940c74d
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_sger.c
@@ -0,0 +1,46 @@
+/*
+ *
+ * cblas_sger.c
+ * This program is a C interface to sger.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_sger(const enum CBLAS_ORDER order, const int M, const int N,
+ const float alpha, const float *X, const int incX,
+ const float *Y, const int incY, float *A, const int lda)
+{
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+ #define F77_lda lda
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ F77_sger( &F77_M, &F77_N, &alpha, X, &F77_incX, Y, &F77_incY, A,
+ &F77_lda);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ F77_sger( &F77_N, &F77_M, &alpha, Y, &F77_incY, X, &F77_incX, A,
+ &F77_lda);
+ }
+ else cblas_xerbla(1, "cblas_sger", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_snrm2.c b/contrib/libs/cblas/cblas_interface/cblas_snrm2.c
new file mode 100644
index 0000000000..18161b4fa7
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_snrm2.c
@@ -0,0 +1,23 @@
+/*
+ * cblas_snrm2.c
+ *
+ * The program is a C interface to snrm2.
+ * It calls the fortran wrapper before calling snrm2.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+float cblas_snrm2( const int N, const float *X, const int incX)
+{
+ float nrm2;
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ F77_snrm2_sub( &F77_N, X, &F77_incX, &nrm2);
+ return nrm2;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_srot.c b/contrib/libs/cblas/cblas_interface/cblas_srot.c
new file mode 100644
index 0000000000..cbd1c8c90a
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_srot.c
@@ -0,0 +1,22 @@
+/*
+ * cblas_srot.c
+ *
+ * The program is a C interface to srot.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_srot( const int N, float *X, const int incX, float *Y,
+ const int incY, const float c, const float s)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ F77_srot(&F77_N, X, &F77_incX, Y, &F77_incY, &c, &s);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_srotg.c b/contrib/libs/cblas/cblas_interface/cblas_srotg.c
new file mode 100644
index 0000000000..f6460048d0
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_srotg.c
@@ -0,0 +1,14 @@
+/*
+ * cblas_srotg.c
+ *
+ * The program is a C interface to srotg.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_srotg( float *a, float *b, float *c, float *s)
+{
+ F77_srotg(a,b,c,s);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_srotm.c b/contrib/libs/cblas/cblas_interface/cblas_srotm.c
new file mode 100644
index 0000000000..4967464544
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_srotm.c
@@ -0,0 +1,22 @@
+/*
+ * cblas_srotm.c
+ *
+ * The program is a C interface to srotm.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_srotm( const int N, float *X, const int incX, float *Y,
+ const int incY, const float *P)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ F77_srotm( &F77_N, X, &F77_incX, Y, &F77_incY, P);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_srotmg.c b/contrib/libs/cblas/cblas_interface/cblas_srotmg.c
new file mode 100644
index 0000000000..04f978b405
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_srotmg.c
@@ -0,0 +1,15 @@
+/*
+ * cblas_srotmg.c
+ *
+ * The program is a C interface to srotmg.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_srotmg( float *d1, float *d2, float *b1,
+ const float b2, float *p)
+{
+ F77_srotmg(d1,d2,b1,&b2,p);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_ssbmv.c b/contrib/libs/cblas/cblas_interface/cblas_ssbmv.c
new file mode 100644
index 0000000000..7a18630b61
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_ssbmv.c
@@ -0,0 +1,76 @@
+/*
+ *
+ * cblas_ssbmv.c
+ * This program is a C interface to ssbmv.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_ssbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const int K, const float alpha, const float *A,
+ const int lda, const float *X, const int incX,
+ const float beta, float *Y, const int incY)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_K=K, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_ssbmv","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_ssbmv(F77_UL, &F77_N, &F77_K, &alpha, A, &F77_lda, X,
+ &F77_incX, &beta, Y, &F77_incY);
+ }else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_ssbmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_ssbmv(F77_UL, &F77_N, &F77_K, &alpha, A, &F77_lda, X,
+ &F77_incX, &beta, Y, &F77_incY);
+ }
+ else cblas_xerbla(1, "cblas_ssbmv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_sscal.c b/contrib/libs/cblas/cblas_interface/cblas_sscal.c
new file mode 100644
index 0000000000..1f09abe7a4
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_sscal.c
@@ -0,0 +1,21 @@
+/*
+ * cblas_sscal.c
+ *
+ * The program is a C interface to sscal.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_sscal( const int N, const float alpha, float *X,
+ const int incX)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ F77_sscal( &F77_N, &alpha, X, &F77_incX);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_sspmv.c b/contrib/libs/cblas/cblas_interface/cblas_sspmv.c
new file mode 100644
index 0000000000..aa4a287eb7
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_sspmv.c
@@ -0,0 +1,73 @@
+/*
+ *
+ * cblas_sspmv.c
+ * This program is a C interface to sspmv.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_sspmv(const enum CBLAS_ORDER order,
+ const enum CBLAS_UPLO Uplo, const int N,
+ const float alpha, const float *AP,
+ const float *X, const int incX, const float beta,
+ float *Y, const int incY)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_sspmv","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_sspmv(F77_UL, &F77_N, &alpha, AP, X,
+ &F77_incX, &beta, Y, &F77_incY);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_sspmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_sspmv(F77_UL, &F77_N, &alpha,
+ AP, X,&F77_incX, &beta, Y, &F77_incY);
+ }
+ else cblas_xerbla(1, "cblas_sspmv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_sspr.c b/contrib/libs/cblas/cblas_interface/cblas_sspr.c
new file mode 100644
index 0000000000..c8517ac1cd
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_sspr.c
@@ -0,0 +1,72 @@
+/*
+ *
+ * cblas_sspr.c
+ * This program is a C interface to sspr.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_sspr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const float alpha, const float *X,
+ const int incX, float *Ap)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasLower) UL = 'L';
+ else if (Uplo == CblasUpper) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_sspr","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+
+ F77_sspr(F77_UL, &F77_N, &alpha, X, &F77_incX, Ap);
+
+ } else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasLower) UL = 'U';
+ else if (Uplo == CblasUpper) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_sspr","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_sspr(F77_UL, &F77_N, &alpha, X, &F77_incX, Ap);
+ } else cblas_xerbla(1, "cblas_sspr", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_sspr2.c b/contrib/libs/cblas/cblas_interface/cblas_sspr2.c
new file mode 100644
index 0000000000..4f5afcd85d
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_sspr2.c
@@ -0,0 +1,71 @@
+/*
+ *
+ * cblas_sspr2.c
+ * This program is a C interface to sspr2.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_sspr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const float alpha, const float *X,
+ const int incX, const float *Y, const int incY, float *A)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasLower) UL = 'L';
+ else if (Uplo == CblasUpper) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_sspr2","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+
+ F77_sspr2(F77_UL, &F77_N, &alpha, X, &F77_incX, Y, &F77_incY, A);
+
+ } else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasLower) UL = 'U';
+ else if (Uplo == CblasUpper) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_sspr2","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_sspr2(F77_UL, &F77_N, &alpha, X, &F77_incX, Y, &F77_incY, A);
+ } else cblas_xerbla(1, "cblas_sspr2", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_sswap.c b/contrib/libs/cblas/cblas_interface/cblas_sswap.c
new file mode 100644
index 0000000000..b74d8469c3
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_sswap.c
@@ -0,0 +1,22 @@
+/*
+ * cblas_sswap.c
+ *
+ * The program is a C interface to sswap.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_sswap( const int N, float *X, const int incX, float *Y,
+ const int incY)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ F77_sswap( &F77_N, X, &F77_incX, Y, &F77_incY);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_ssymm.c b/contrib/libs/cblas/cblas_interface/cblas_ssymm.c
new file mode 100644
index 0000000000..a3b160105d
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_ssymm.c
@@ -0,0 +1,108 @@
+/*
+ *
+ * cblas_ssymm.c
+ * This program is a C interface to ssymm.
+ * Written by Keita Teranishi
+ * 4/8/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_ssymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const int M, const int N,
+ const float alpha, const float *A, const int lda,
+ const float *B, const int ldb, const float beta,
+ float *C, const int ldc)
+{
+ char SD, UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_SD, F77_UL;
+#else
+ #define F77_SD &SD
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_ldb=ldb;
+ F77_INT F77_ldc=ldc;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_ldb ldb
+ #define F77_ldc ldc
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+ if( Side == CblasRight) SD='R';
+ else if ( Side == CblasLeft ) SD='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_ssymm",
+ "Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Uplo == CblasUpper) UL='U';
+ else if ( Uplo == CblasLower ) UL='L';
+ else
+ {
+ cblas_xerbla(3, "cblas_ssymm",
+ "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_SD = C2F_CHAR(&SD);
+ #endif
+
+ F77_ssymm(F77_SD, F77_UL, &F77_M, &F77_N, &alpha, A, &F77_lda, B, &F77_ldb, &beta, C, &F77_ldc);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if( Side == CblasRight) SD='L';
+ else if ( Side == CblasLeft ) SD='R';
+ else
+ {
+ cblas_xerbla(2, "cblas_ssymm",
+ "Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Uplo == CblasUpper) UL='L';
+ else if ( Uplo == CblasLower ) UL='U';
+ else
+ {
+ cblas_xerbla(3, "cblas_ssymm",
+ "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_SD = C2F_CHAR(&SD);
+ #endif
+
+ F77_ssymm(F77_SD, F77_UL, &F77_N, &F77_M, &alpha, A, &F77_lda, B, &F77_ldb, &beta, C, &F77_ldc);
+ } else cblas_xerbla(1, "cblas_ssymm",
+ "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_ssymv.c b/contrib/libs/cblas/cblas_interface/cblas_ssymv.c
new file mode 100644
index 0000000000..89f5cc0cc6
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_ssymv.c
@@ -0,0 +1,76 @@
+/*
+ *
+ * cblas_ssymv.c
+ * This program is a C interface to ssymv.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_ssymv(const enum CBLAS_ORDER order,
+ const enum CBLAS_UPLO Uplo, const int N,
+ const float alpha, const float *A, const int lda,
+ const float *X, const int incX, const float beta,
+ float *Y, const int incY)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_ssymv","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_ssymv(F77_UL, &F77_N, &alpha, A, &F77_lda, X,
+ &F77_incX, &beta, Y, &F77_incY);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_ssymv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_ssymv(F77_UL, &F77_N, &alpha,
+ A ,&F77_lda, X,&F77_incX, &beta, Y, &F77_incY);
+ }
+ else cblas_xerbla(1, "cblas_ssymv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_ssyr.c b/contrib/libs/cblas/cblas_interface/cblas_ssyr.c
new file mode 100644
index 0000000000..4e58dba417
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_ssyr.c
@@ -0,0 +1,70 @@
+/*
+ *
+ * cblas_ssyr.c
+ * This program is a C interface to ssyr.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_ssyr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const float alpha, const float *X,
+ const int incX, float *A, const int lda)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_lda=lda;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_lda lda
+#endif
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasLower) UL = 'L';
+ else if (Uplo == CblasUpper) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_ssyr","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+
+ F77_ssyr(F77_UL, &F77_N, &alpha, X, &F77_incX, A, &F77_lda);
+
+ } else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasLower) UL = 'U';
+ else if (Uplo == CblasUpper) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_ssyr","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_ssyr(F77_UL, &F77_N, &alpha, X, &F77_incX, A, &F77_lda);
+ } else cblas_xerbla(1, "cblas_ssyr", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_ssyr2.c b/contrib/libs/cblas/cblas_interface/cblas_ssyr2.c
new file mode 100644
index 0000000000..1d990cd413
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_ssyr2.c
@@ -0,0 +1,76 @@
+/*
+ *
+ * cblas_ssyr2.c
+ * This program is a C interface to ssyr2.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_ssyr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const float alpha, const float *X,
+ const int incX, const float *Y, const int incY, float *A,
+ const int lda)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY, F77__lda=lda;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+ #define F77_lda lda
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasLower) UL = 'L';
+ else if (Uplo == CblasUpper) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_ssyr2","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+
+ F77_ssyr2(F77_UL, &F77_N, &alpha, X, &F77_incX, Y, &F77_incY, A,
+ &F77_lda);
+
+ } else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasLower) UL = 'U';
+ else if (Uplo == CblasUpper) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_ssyr2","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_ssyr2(F77_UL, &F77_N, &alpha, X, &F77_incX, Y, &F77_incY, A,
+ &F77_lda);
+ } else cblas_xerbla(1, "cblas_ssyr2", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_ssyr2k.c b/contrib/libs/cblas/cblas_interface/cblas_ssyr2k.c
new file mode 100644
index 0000000000..871dd21a12
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_ssyr2k.c
@@ -0,0 +1,111 @@
+/*
+ *
+ * cblas_ssyr2k.c
+ * This program is a C interface to ssyr2k.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_ssyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const float alpha, const float *A, const int lda,
+ const float *B, const int ldb, const float beta,
+ float *C, const int ldc)
+{
+ char UL, TR;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL;
+#else
+ #define F77_TR &TR
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_K=K, F77_lda=lda, F77_ldb=ldb;
+ F77_INT F77_ldc=ldc;
+#else
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_ldb ldb
+ #define F77_ldc ldc
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+
+ if( Uplo == CblasUpper) UL='U';
+ else if ( Uplo == CblasLower ) UL='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_ssyr2k",
+ "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Trans == CblasTrans) TR ='T';
+ else if ( Trans == CblasConjTrans ) TR='C';
+ else if ( Trans == CblasNoTrans ) TR='N';
+ else
+ {
+ cblas_xerbla(3, "cblas_ssyr2k",
+ "Illegal Trans setting, %d\n", Trans);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TR = C2F_CHAR(&TR);
+ #endif
+
+ F77_ssyr2k(F77_UL, F77_TR, &F77_N, &F77_K, &alpha, A, &F77_lda, B, &F77_ldb, &beta, C, &F77_ldc);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if( Uplo == CblasUpper) UL='L';
+ else if ( Uplo == CblasLower ) UL='U';
+ else
+ {
+ cblas_xerbla(3, "cblas_ssyr2k",
+ "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if( Trans == CblasTrans) TR ='N';
+ else if ( Trans == CblasConjTrans ) TR='N';
+ else if ( Trans == CblasNoTrans ) TR='T';
+ else
+ {
+ cblas_xerbla(3, "cblas_ssyr2k",
+ "Illegal Trans setting, %d\n", Trans);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TR = C2F_CHAR(&TR);
+ #endif
+
+ F77_ssyr2k(F77_UL, F77_TR, &F77_N, &F77_K, &alpha, A, &F77_lda, B, &F77_ldb, &beta, C, &F77_ldc);
+ } else cblas_xerbla(1, "cblas_ssyr2k",
+ "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_ssyrk.c b/contrib/libs/cblas/cblas_interface/cblas_ssyrk.c
new file mode 100644
index 0000000000..4992c9b266
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_ssyrk.c
@@ -0,0 +1,110 @@
+/*
+ *
+ * cblas_ssyrk.c
+ * This program is a C interface to ssyrk.
+ * Written by Keita Teranishi
+ * 4/8/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_ssyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const float alpha, const float *A, const int lda,
+ const float beta, float *C, const int ldc)
+{
+ char UL, TR;
+#ifdef F77_CHAR
+ F77_CHAR F77_TR, F77_UL;
+#else
+ #define F77_TR &TR
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_K=K, F77_lda=lda;
+ F77_INT F77_ldc=ldc;
+#else
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_ldc ldc
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+
+ if( Uplo == CblasUpper) UL='U';
+ else if ( Uplo == CblasLower ) UL='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_ssyrk",
+ "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Trans == CblasTrans) TR ='T';
+ else if ( Trans == CblasConjTrans ) TR='C';
+ else if ( Trans == CblasNoTrans ) TR='N';
+ else
+ {
+ cblas_xerbla(3, "cblas_ssyrk",
+ "Illegal Trans setting, %d\n", Trans);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TR = C2F_CHAR(&TR);
+ #endif
+
+ F77_ssyrk(F77_UL, F77_TR, &F77_N, &F77_K, &alpha, A, &F77_lda, &beta, C, &F77_ldc);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if( Uplo == CblasUpper) UL='L';
+ else if ( Uplo == CblasLower ) UL='U';
+ else
+ {
+ cblas_xerbla(3, "cblas_ssyrk",
+ "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if( Trans == CblasTrans) TR ='N';
+ else if ( Trans == CblasConjTrans ) TR='N';
+ else if ( Trans == CblasNoTrans ) TR='T';
+ else
+ {
+ cblas_xerbla(3, "cblas_ssyrk",
+ "Illegal Trans setting, %d\n", Trans);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TR = C2F_CHAR(&TR);
+ #endif
+
+ F77_ssyrk(F77_UL, F77_TR, &F77_N, &F77_K, &alpha, A, &F77_lda, &beta, C, &F77_ldc);
+ } else cblas_xerbla(1, "cblas_ssyrk",
+ "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
+
diff --git a/contrib/libs/cblas/cblas_interface/cblas_stbmv.c b/contrib/libs/cblas/cblas_interface/cblas_stbmv.c
new file mode 100644
index 0000000000..9e84bc0198
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_stbmv.c
@@ -0,0 +1,122 @@
+/*
+ * cblas_stbmv.c
+ * This program is a C interface to stbmv.
+ * Written by Keita Teranishi
+ * 3/3/1998
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+
+void cblas_stbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const int K, const float *A, const int lda,
+ float *X, const int incX)
+{
+ char TA;
+ char UL;
+ char DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_DI &DI
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_lda=lda, F77_K=K, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_incX incX
+#endif
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_stbmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(3, "cblas_stbmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_stbmv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_stbmv( F77_UL, F77_TA, F77_DI, &F77_N, &F77_K, A, &F77_lda, X,
+ &F77_incX);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_stbmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans) TA = 'N';
+ else
+ {
+ cblas_xerbla(3, "cblas_stbmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_stbmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_stbmv( F77_UL, F77_TA, F77_DI, &F77_N, &F77_K, A, &F77_lda, X,
+ &F77_incX);
+ }
+ else cblas_xerbla(1, "cblas_stbmv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_stbsv.c b/contrib/libs/cblas/cblas_interface/cblas_stbsv.c
new file mode 100644
index 0000000000..fc19089708
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_stbsv.c
@@ -0,0 +1,122 @@
+/*
+ * cblas_stbsv.c
+ * The program is a C interface to stbsv.
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_stbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const int K, const float *A, const int lda,
+ float *X, const int incX)
+{
+ char TA;
+ char UL;
+ char DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_DI &DI
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_lda=lda, F77_K=K, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_incX incX
+#endif
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_stbsv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(3, "cblas_stbsv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_stbsv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_stbsv( F77_UL, F77_TA, F77_DI, &F77_N, &F77_K, A, &F77_lda, X,
+ &F77_incX);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_stbsv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans) TA = 'N';
+ else
+ {
+ cblas_xerbla(3, "cblas_stbsv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_stbsv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_stbsv( F77_UL, F77_TA, F77_DI, &F77_N, &F77_K, A, &F77_lda, X,
+ &F77_incX);
+ }
+ else cblas_xerbla(1, "cblas_stbsv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_stpmv.c b/contrib/libs/cblas/cblas_interface/cblas_stpmv.c
new file mode 100644
index 0000000000..8f7fd6acd9
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_stpmv.c
@@ -0,0 +1,118 @@
+/*
+ *
+ * cblas_stpmv.c
+ * This program is a C interface to stpmv.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_stpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const float *Ap, float *X, const int incX)
+{
+ char TA;
+ char UL;
+ char DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_DI &DI
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_stpmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(3, "cblas_stpmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_stpmv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_stpmv( F77_UL, F77_TA, F77_DI, &F77_N, Ap, X, &F77_incX);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_stpmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans) TA = 'N';
+ else
+ {
+ cblas_xerbla(3, "cblas_stpmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_stpmv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_stpmv( F77_UL, F77_TA, F77_DI, &F77_N, Ap, X,&F77_incX);
+ }
+ else cblas_xerbla(1, "cblas_stpmv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_stpsv.c b/contrib/libs/cblas/cblas_interface/cblas_stpsv.c
new file mode 100644
index 0000000000..acc5f1d5ca
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_stpsv.c
@@ -0,0 +1,118 @@
+/*
+ * cblas_stpsv.c
+ * The program is a C interface to stpsv.
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_stpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const float *Ap, float *X, const int incX)
+{
+ char TA;
+ char UL;
+ char DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_DI &DI
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_stpsv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(3, "cblas_stpsv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_stpsv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_stpsv( F77_UL, F77_TA, F77_DI, &F77_N, Ap, X, &F77_incX);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_stpsv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans) TA = 'N';
+ else
+ {
+ cblas_xerbla(3, "cblas_stpsv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_stpsv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_stpsv( F77_UL, F77_TA, F77_DI, &F77_N, Ap, X,&F77_incX);
+
+ }
+ else cblas_xerbla(1, "cblas_stpsv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_strmm.c b/contrib/libs/cblas/cblas_interface/cblas_strmm.c
new file mode 100644
index 0000000000..9f8ce198dc
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_strmm.c
@@ -0,0 +1,148 @@
+/*
+ *
+ * cblas_strmm.c
+ * This program is a C interface to strmm.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_strmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const int M, const int N,
+ const float alpha, const float *A, const int lda,
+ float *B, const int ldb)
+{
+ char UL, TA, SD, DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_SD, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_SD &SD
+ #define F77_DI &DI
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_ldb=ldb;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_ldb ldb
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+ if( Side == CblasRight) SD='R';
+ else if ( Side == CblasLeft ) SD='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_strmm","Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if( Uplo == CblasUpper) UL='U';
+ else if ( Uplo == CblasLower ) UL='L';
+ else
+ {
+ cblas_xerbla(3, "cblas_strmm","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( TransA == CblasTrans) TA ='T';
+ else if ( TransA == CblasConjTrans ) TA='C';
+ else if ( TransA == CblasNoTrans ) TA='N';
+ else
+ {
+ cblas_xerbla(4, "cblas_strmm","Illegal Trans setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Diag == CblasUnit ) DI='U';
+ else if ( Diag == CblasNonUnit ) DI='N';
+ else
+ {
+ cblas_xerbla(5, "cblas_strmm", "Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_SD = C2F_CHAR(&SD);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_strmm(F77_SD, F77_UL, F77_TA, F77_DI, &F77_M, &F77_N, &alpha, A, &F77_lda, B, &F77_ldb);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if( Side == CblasRight) SD='L';
+ else if ( Side == CblasLeft ) SD='R';
+ else
+ {
+ cblas_xerbla(2, "cblas_strmm","Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Uplo == CblasUpper) UL='L';
+ else if ( Uplo == CblasLower ) UL='U';
+ else
+ {
+ cblas_xerbla(3, "cblas_strmm", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( TransA == CblasTrans) TA ='T';
+ else if ( TransA == CblasConjTrans ) TA='C';
+ else if ( TransA == CblasNoTrans ) TA='N';
+ else
+ {
+ cblas_xerbla(4, "cblas_strmm", "Illegal Trans setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Diag == CblasUnit ) DI='U';
+ else if ( Diag == CblasNonUnit ) DI='N';
+ else
+ {
+ cblas_xerbla(5, "cblas_strmm","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+#ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_SD = C2F_CHAR(&SD);
+ F77_DI = C2F_CHAR(&DI);
+#endif
+ F77_strmm(F77_SD, F77_UL, F77_TA, F77_DI, &F77_N, &F77_M, &alpha, A,
+ &F77_lda, B, &F77_ldb);
+ }
+ else cblas_xerbla(1, "cblas_strmm", "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_strmv.c b/contrib/libs/cblas/cblas_interface/cblas_strmv.c
new file mode 100644
index 0000000000..5a85b1dafb
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_strmv.c
@@ -0,0 +1,122 @@
+/*
+ *
+ * cblas_strmv.c
+ * This program is a C interface to strmv.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_strmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const float *A, const int lda,
+ float *X, const int incX)
+
+{
+ char TA;
+ char UL;
+ char DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_DI &DI
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_lda=lda, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_incX incX
+#endif
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_strmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(3, "cblas_strmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_strmv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_strmv( F77_UL, F77_TA, F77_DI, &F77_N, A, &F77_lda, X,
+ &F77_incX);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_strmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans) TA = 'N';
+ else
+ {
+ cblas_xerbla(3, "cblas_strmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_strmv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_strmv( F77_UL, F77_TA, F77_DI, &F77_N, A, &F77_lda, X,
+ &F77_incX);
+ }
+ else cblas_xerbla(1, "cblas_strmv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_strsm.c b/contrib/libs/cblas/cblas_interface/cblas_strsm.c
new file mode 100644
index 0000000000..5dc3e0bc0d
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_strsm.c
@@ -0,0 +1,143 @@
+/*
+ *
+ * cblas_strsm.c
+ * This program is a C interface to strsm.
+ * Written by Keita Teranishi
+ * 4/6/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_strsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const int M, const int N,
+ const float alpha, const float *A, const int lda,
+ float *B, const int ldb)
+
+{
+ char UL, TA, SD, DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_SD, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_SD &SD
+ #define F77_DI &DI
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=M, F77_N=N, F77_lda=lda, F77_ldb=ldb;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_ldb ldb
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+ if( Side == CblasRight) SD='R';
+ else if ( Side == CblasLeft ) SD='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_strsm", "Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if( Uplo == CblasUpper) UL='U';
+ else if ( Uplo == CblasLower ) UL='L';
+ else
+ {
+ cblas_xerbla(3, "cblas_strsm", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if( TransA == CblasTrans) TA ='T';
+ else if ( TransA == CblasConjTrans ) TA='C';
+ else if ( TransA == CblasNoTrans ) TA='N';
+ else
+ {
+ cblas_xerbla(4, "cblas_strsm", "Illegal Trans setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if( Diag == CblasUnit ) DI='U';
+ else if ( Diag == CblasNonUnit ) DI='N';
+ else
+ {
+ cblas_xerbla(5, "cblas_strsm", "Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_SD = C2F_CHAR(&SD);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_strsm(F77_SD, F77_UL, F77_TA, F77_DI, &F77_M, &F77_N, &alpha, A, &F77_lda, B, &F77_ldb);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if( Side == CblasRight) SD='L';
+ else if ( Side == CblasLeft ) SD='R';
+ else
+ {
+ cblas_xerbla(2, "cblas_strsm", "Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if( Uplo == CblasUpper) UL='L';
+ else if ( Uplo == CblasLower ) UL='U';
+ else
+ {
+ cblas_xerbla(3, "cblas_strsm", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if( TransA == CblasTrans) TA ='T';
+ else if ( TransA == CblasConjTrans ) TA='C';
+ else if ( TransA == CblasNoTrans ) TA='N';
+ else
+ {
+ cblas_xerbla(4, "cblas_strsm", "Illegal Trans setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if( Diag == CblasUnit ) DI='U';
+ else if ( Diag == CblasNonUnit ) DI='N';
+ else
+ {
+ cblas_xerbla(5, "cblas_strsm", "Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_SD = C2F_CHAR(&SD);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_strsm(F77_SD, F77_UL, F77_TA, F77_DI, &F77_N, &F77_M, &alpha, A, &F77_lda, B, &F77_ldb);
+ }
+ else cblas_xerbla(1, "cblas_strsm", "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_strsv.c b/contrib/libs/cblas/cblas_interface/cblas_strsv.c
new file mode 100644
index 0000000000..a0509aebd6
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_strsv.c
@@ -0,0 +1,121 @@
+/*
+ * cblas_strsv.c
+ * The program is a C interface to strsv.
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_strsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const float *A, const int lda, float *X,
+ const int incX)
+
+{
+ char TA;
+ char UL;
+ char DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_DI &DI
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_lda=lda, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_incX incX
+#endif
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_strsv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(3, "cblas_strsv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_strsv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_strsv( F77_UL, F77_TA, F77_DI, &F77_N, A, &F77_lda, X,
+ &F77_incX);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_strsv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans) TA = 'N';
+ else
+ {
+ cblas_xerbla(3, "cblas_strsv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_strsv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_strsv( F77_UL, F77_TA, F77_DI, &F77_N, A, &F77_lda, X,
+ &F77_incX);
+ }
+ else cblas_xerbla(1, "cblas_strsv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_xerbla.c b/contrib/libs/cblas/cblas_interface/cblas_xerbla.c
new file mode 100644
index 0000000000..3a2bfe6e3b
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_xerbla.c
@@ -0,0 +1,68 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include "cblas.h"
+#include "cblas_f77.h"
+
+void cblas_xerbla(int info, const char *rout, const char *form, ...)
+{
+ extern int RowMajorStrg;
+ char empty[1] = "";
+ va_list argptr;
+
+ va_start(argptr, form);
+
+ if (RowMajorStrg)
+ {
+ if (strstr(rout,"gemm") != 0)
+ {
+ if (info == 5 ) info = 4;
+ else if (info == 4 ) info = 5;
+ else if (info == 11) info = 9;
+ else if (info == 9 ) info = 11;
+ }
+ else if (strstr(rout,"symm") != 0 || strstr(rout,"hemm") != 0)
+ {
+ if (info == 5 ) info = 4;
+ else if (info == 4 ) info = 5;
+ }
+ else if (strstr(rout,"trmm") != 0 || strstr(rout,"trsm") != 0)
+ {
+ if (info == 7 ) info = 6;
+ else if (info == 6 ) info = 7;
+ }
+ else if (strstr(rout,"gemv") != 0)
+ {
+ if (info == 4) info = 3;
+ else if (info == 3) info = 4;
+ }
+ else if (strstr(rout,"gbmv") != 0)
+ {
+ if (info == 4) info = 3;
+ else if (info == 3) info = 4;
+ else if (info == 6) info = 5;
+ else if (info == 5) info = 6;
+ }
+ else if (strstr(rout,"ger") != 0)
+ {
+ if (info == 3) info = 2;
+ else if (info == 2) info = 3;
+ else if (info == 8) info = 6;
+ else if (info == 6) info = 8;
+ }
+ else if ( (strstr(rout,"her2") != 0 || strstr(rout,"hpr2") != 0)
+ && strstr(rout,"her2k") == 0 )
+ {
+ if (info == 8) info = 6;
+ else if (info == 6) info = 8;
+ }
+ }
+ if (info)
+ fprintf(stderr, "Parameter %d to routine %s was incorrect\n", info, rout);
+ vfprintf(stderr, form, argptr);
+ va_end(argptr);
+ if (info && !info)
+ F77_xerbla(empty, &info); /* Force link of our F77 error handler */
+ exit(-1);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_zaxpy.c b/contrib/libs/cblas/cblas_interface/cblas_zaxpy.c
new file mode 100644
index 0000000000..f63c4c39bc
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_zaxpy.c
@@ -0,0 +1,22 @@
+/*
+ * cblas_zaxpy.c
+ *
+ * The program is a C interface to zaxpy.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_zaxpy( const int N, const void *alpha, const void *X,
+ const int incX, void *Y, const int incY)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ F77_zaxpy( &F77_N, alpha, X, &F77_incX, Y, &F77_incY);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_zcopy.c b/contrib/libs/cblas/cblas_interface/cblas_zcopy.c
new file mode 100644
index 0000000000..a16be28e7e
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_zcopy.c
@@ -0,0 +1,22 @@
+/*
+ * cblas_zcopy.c
+ *
+ * The program is a C interface to zcopy.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_zcopy( const int N, const void *X,
+ const int incX, void *Y, const int incY)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ F77_zcopy( &F77_N, X, &F77_incX, Y, &F77_incY);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_zdotc_sub.c b/contrib/libs/cblas/cblas_interface/cblas_zdotc_sub.c
new file mode 100644
index 0000000000..29dec6c576
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_zdotc_sub.c
@@ -0,0 +1,24 @@
+/*
+ * cblas_zdotc_sub.c
+ *
+ * The program is a C interface to zdotc.
+ * It calls the fortran wrapper before calling zdotc.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_zdotc_sub( const int N, const void *X, const int incX,
+ const void *Y, const int incY, void *dotc)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ F77_zdotc_sub( &F77_N, X, &F77_incX, Y, &F77_incY, dotc);
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_zdotu_sub.c b/contrib/libs/cblas/cblas_interface/cblas_zdotu_sub.c
new file mode 100644
index 0000000000..48a14bf3d4
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_zdotu_sub.c
@@ -0,0 +1,24 @@
+/*
+ * cblas_zdotu_sub.c
+ *
+ * The program is a C interface to zdotu.
+ * It calls the fortran wrapper before calling zdotu.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_zdotu_sub( const int N, const void *X, const int incX,
+ const void *Y, const int incY, void *dotu)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ F77_zdotu_sub( &F77_N, X, &F77_incX, Y, &F77_incY, dotu);
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_zdscal.c b/contrib/libs/cblas/cblas_interface/cblas_zdscal.c
new file mode 100644
index 0000000000..788365befa
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_zdscal.c
@@ -0,0 +1,21 @@
+/*
+ * cblas_zdscal.c
+ *
+ * The program is a C interface to zdscal.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_zdscal( const int N, const double alpha, void *X,
+ const int incX)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ F77_zdscal( &F77_N, &alpha, X, &F77_incX);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_zgbmv.c b/contrib/libs/cblas/cblas_interface/cblas_zgbmv.c
new file mode 100644
index 0000000000..fb3cabb400
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_zgbmv.c
@@ -0,0 +1,166 @@
+/*
+ * cblas_zgbmv.c
+ * The program is a C interface of zgbmv
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_zgbmv(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
+ const int KL, const int KU,
+ const void *alpha, const void *A, const int lda,
+ const void *X, const int incX, const void *beta,
+ void *Y, const int incY)
+{
+ char TA;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA;
+#else
+ #define F77_TA &TA
+#endif
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+ F77_INT F77_KL=KL,F77_KU=KU;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_KL KL
+ #define F77_KU KU
+ #define F77_incX incx
+ #define F77_incY incY
+#endif
+ int n, i=0, incx=incX;
+ const double *xx= (double *)X, *alp= (double *)alpha, *bet = (double *)beta;
+ double ALPHA[2],BETA[2];
+ int tincY, tincx;
+ double *x=(double *)X, *y=(double *)Y, *st=0, *tx;
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(2, "cblas_zgbmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_TA = C2F_CHAR(&TA);
+ #endif
+ F77_zgbmv(F77_TA, &F77_M, &F77_N, &F77_KL, &F77_KU, alpha,
+ A, &F77_lda, X, &F77_incX, beta, Y, &F77_incY);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans)
+ {
+ ALPHA[0]= *alp;
+ ALPHA[1]= -alp[1];
+ BETA[0]= *bet;
+ BETA[1]= -bet[1];
+ TA = 'N';
+ if (M > 0)
+ {
+ n = M << 1;
+ x = malloc(n*sizeof(double));
+ tx = x;
+
+ if( incX > 0 ) {
+ i = incX << 1 ;
+ tincx = 2;
+ st= x+n;
+ } else {
+ i = incX *(-2);
+ tincx = -2;
+ st = x-2;
+ x +=(n-2);
+ }
+ do
+ {
+ *x = *xx;
+ x[1] = -xx[1];
+ x += tincx ;
+ xx += i;
+ }
+ while (x != st);
+ x=tx;
+
+ #ifdef F77_INT
+ F77_incX = 1;
+ #else
+ incx = 1;
+ #endif
+
+ if( incY > 0 )
+ tincY = incY;
+ else
+ tincY = -incY;
+
+ y++;
+
+ if (N > 0)
+ {
+ i = tincY << 1;
+ n = i * N ;
+ st = y + n;
+ do {
+ *y = -(*y);
+ y += i;
+ } while(y != st);
+ y -= n;
+ }
+ }
+ else x = (double *) X;
+
+
+ }
+ else
+ {
+ cblas_xerbla(2, "cblas_zgbmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_TA = C2F_CHAR(&TA);
+ #endif
+ if (TransA == CblasConjTrans)
+ F77_zgbmv(F77_TA, &F77_N, &F77_M, &F77_KU, &F77_KL, ALPHA,
+ A ,&F77_lda, x,&F77_incX, BETA, Y, &F77_incY);
+ else
+ F77_zgbmv(F77_TA, &F77_N, &F77_M, &F77_KU, &F77_KL, alpha,
+ A ,&F77_lda, x,&F77_incX, beta, Y, &F77_incY);
+ if (TransA == CblasConjTrans)
+ {
+ if (x != X) free(x);
+ if (N > 0)
+ {
+ do
+ {
+ *y = -(*y);
+ y += i;
+ }
+ while (y != st);
+ }
+ }
+ }
+ else cblas_xerbla(1, "cblas_zgbmv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_zgemm.c b/contrib/libs/cblas/cblas_interface/cblas_zgemm.c
new file mode 100644
index 0000000000..f344d83876
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_zgemm.c
@@ -0,0 +1,109 @@
+/*
+ *
+ * cblas_zgemm.c
+ * This program is a C interface to zgemm.
+ * Written by Keita Teranishi
+ * 4/8/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_zgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
+ const int K, const void *alpha, const void *A,
+ const int lda, const void *B, const int ldb,
+ const void *beta, void *C, const int ldc)
+{
+ char TA, TB;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_TB;
+#else
+ #define F77_TA &TA
+ #define F77_TB &TB
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_K=K, F77_lda=lda, F77_ldb=ldb;
+ F77_INT F77_ldc=ldc;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_ldb ldb
+ #define F77_ldc ldc
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+ if(TransA == CblasTrans) TA='T';
+ else if ( TransA == CblasConjTrans ) TA='C';
+ else if ( TransA == CblasNoTrans ) TA='N';
+ else
+ {
+ cblas_xerbla(2, "cblas_zgemm","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if(TransB == CblasTrans) TB='T';
+ else if ( TransB == CblasConjTrans ) TB='C';
+ else if ( TransB == CblasNoTrans ) TB='N';
+ else
+ {
+ cblas_xerbla(3, "cblas_zgemm","Illegal TransB setting, %d\n", TransB);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_TA = C2F_CHAR(&TA);
+ F77_TB = C2F_CHAR(&TB);
+ #endif
+
+ F77_zgemm(F77_TA, F77_TB, &F77_M, &F77_N, &F77_K, alpha, A,
+ &F77_lda, B, &F77_ldb, beta, C, &F77_ldc);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if(TransA == CblasTrans) TB='T';
+ else if ( TransA == CblasConjTrans ) TB='C';
+ else if ( TransA == CblasNoTrans ) TB='N';
+ else
+ {
+ cblas_xerbla(2, "cblas_zgemm","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if(TransB == CblasTrans) TA='T';
+ else if ( TransB == CblasConjTrans ) TA='C';
+ else if ( TransB == CblasNoTrans ) TA='N';
+ else
+ {
+ cblas_xerbla(2, "cblas_zgemm","Illegal TransB setting, %d\n", TransB);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_TA = C2F_CHAR(&TA);
+ F77_TB = C2F_CHAR(&TB);
+ #endif
+
+ F77_zgemm(F77_TA, F77_TB, &F77_N, &F77_M, &F77_K, alpha, B,
+ &F77_ldb, A, &F77_lda, beta, C, &F77_ldc);
+ }
+ else cblas_xerbla(1, "cblas_zgemm", "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_zgemv.c b/contrib/libs/cblas/cblas_interface/cblas_zgemv.c
new file mode 100644
index 0000000000..355d7ef30f
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_zgemv.c
@@ -0,0 +1,164 @@
+/*
+ * cblas_zgemv.c
+ * The program is a C interface of zgemv
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_zgemv(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ const void *X, const int incX, const void *beta,
+ void *Y, const int incY)
+{
+ char TA;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA;
+#else
+ #define F77_TA &TA
+#endif
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_incX incx
+ #define F77_incY incY
+#endif
+
+ int n, i=0, incx=incX;
+ const double *xx= (double *)X, *alp= (double *)alpha, *bet = (double *)beta;
+ double ALPHA[2],BETA[2];
+ int tincY, tincx;
+ double *x=(double *)X, *y=(double *)Y, *st=0, *tx;
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+
+ if (order == CblasColMajor)
+ {
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(2, "cblas_zgemv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_TA = C2F_CHAR(&TA);
+ #endif
+ F77_zgemv(F77_TA, &F77_M, &F77_N, alpha, A, &F77_lda, X, &F77_incX,
+ beta, Y, &F77_incY);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans)
+ {
+ ALPHA[0]= *alp;
+ ALPHA[1]= -alp[1];
+ BETA[0]= *bet;
+ BETA[1]= -bet[1];
+ TA = 'N';
+ if (M > 0)
+ {
+ n = M << 1;
+ x = malloc(n*sizeof(double));
+ tx = x;
+ if( incX > 0 ) {
+ i = incX << 1 ;
+ tincx = 2;
+ st= x+n;
+ } else {
+ i = incX *(-2);
+ tincx = -2;
+ st = x-2;
+ x +=(n-2);
+ }
+
+ do
+ {
+ *x = *xx;
+ x[1] = -xx[1];
+ x += tincx ;
+ xx += i;
+ }
+ while (x != st);
+ x=tx;
+
+ #ifdef F77_INT
+ F77_incX = 1;
+ #else
+ incx = 1;
+ #endif
+
+ if(incY > 0)
+ tincY = incY;
+ else
+ tincY = -incY;
+
+ y++;
+
+ if (N > 0)
+ {
+ i = tincY << 1;
+ n = i * N ;
+ st = y + n;
+ do {
+ *y = -(*y);
+ y += i;
+ } while(y != st);
+ y -= n;
+ }
+ }
+ else x = (double *) X;
+ }
+ else
+ {
+ cblas_xerbla(2, "cblas_zgemv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_TA = C2F_CHAR(&TA);
+ #endif
+ if (TransA == CblasConjTrans)
+ F77_zgemv(F77_TA, &F77_N, &F77_M, ALPHA, A, &F77_lda, x,
+ &F77_incX, BETA, Y, &F77_incY);
+ else
+ F77_zgemv(F77_TA, &F77_N, &F77_M, alpha, A, &F77_lda, x,
+ &F77_incX, beta, Y, &F77_incY);
+
+ if (TransA == CblasConjTrans)
+ {
+ if (x != (double *)X) free(x);
+ if (N > 0)
+ {
+ do
+ {
+ *y = -(*y);
+ y += i;
+ }
+ while (y != st);
+ }
+ }
+ }
+ else cblas_xerbla(1, "cblas_zgemv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_zgerc.c b/contrib/libs/cblas/cblas_interface/cblas_zgerc.c
new file mode 100644
index 0000000000..2acde748e4
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_zgerc.c
@@ -0,0 +1,84 @@
+/*
+ * cblas_zgerc.c
+ * The program is a C interface to zgerc.
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_zgerc(const enum CBLAS_ORDER order, const int M, const int N,
+ const void *alpha, const void *X, const int incX,
+ const void *Y, const int incY, void *A, const int lda)
+{
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incy
+ #define F77_lda lda
+#endif
+
+ int n, i, tincy, incy=incY;
+ double *y=(double *)Y, *yy=(double *)Y, *ty, *st;
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ F77_zgerc( &F77_M, &F77_N, alpha, X, &F77_incX, Y, &F77_incY, A,
+ &F77_lda);
+ } else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (N > 0)
+ {
+ n = N << 1;
+ y = malloc(n*sizeof(double));
+
+ ty = y;
+ if( incY > 0 ) {
+ i = incY << 1;
+ tincy = 2;
+ st= y+n;
+ } else {
+ i = incY *(-2);
+ tincy = -2;
+ st = y-2;
+ y +=(n-2);
+ }
+ do
+ {
+ *y = *yy;
+ y[1] = -yy[1];
+ y += tincy ;
+ yy += i;
+ }
+ while (y != st);
+ y = ty;
+
+ #ifdef F77_INT
+ F77_incY = 1;
+ #else
+ incy = 1;
+ #endif
+ }
+ else y = (double *) Y;
+
+ F77_zgeru( &F77_N, &F77_M, alpha, y, &F77_incY, X, &F77_incX, A,
+ &F77_lda);
+ if(Y!=y)
+ free(y);
+
+ } else cblas_xerbla(1, "cblas_zgerc", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_zgeru.c b/contrib/libs/cblas/cblas_interface/cblas_zgeru.c
new file mode 100644
index 0000000000..464ca1539e
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_zgeru.c
@@ -0,0 +1,44 @@
+/*
+ * cblas_zgeru.c
+ * The program is a C interface to zgeru.
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_zgeru(const enum CBLAS_ORDER order, const int M, const int N,
+ const void *alpha, const void *X, const int incX,
+ const void *Y, const int incY, void *A, const int lda)
+{
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+ #define F77_lda lda
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if (order == CblasColMajor)
+ {
+ F77_zgeru( &F77_M, &F77_N, alpha, X, &F77_incX, Y, &F77_incY, A,
+ &F77_lda);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ F77_zgeru( &F77_N, &F77_M, alpha, Y, &F77_incY, X, &F77_incX, A,
+ &F77_lda);
+ }
+ else cblas_xerbla(1, "cblas_zgeru", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_zhbmv.c b/contrib/libs/cblas/cblas_interface/cblas_zhbmv.c
new file mode 100644
index 0000000000..de4b96a9b0
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_zhbmv.c
@@ -0,0 +1,159 @@
+/*
+ * cblas_zhbmv.c
+ * The program is a C interface to zhbmv
+ *
+ * Keita Teranishi 5/18/98
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+#include <stdio.h>
+#include <stdlib.h>
+void cblas_zhbmv(const enum CBLAS_ORDER order,
+ const enum CBLAS_UPLO Uplo,const int N,const int K,
+ const void *alpha, const void *A, const int lda,
+ const void *X, const int incX, const void *beta,
+ void *Y, const int incY)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_K=K, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_incX incx
+ #define F77_incY incY
+#endif
+ int n, i=0, incx=incX;
+ const double *xx= (double *)X, *alp= (double *)alpha, *bet = (double *)beta;
+ double ALPHA[2],BETA[2];
+ int tincY, tincx;
+ double *x=(double *)X, *y=(double *)Y, *st=0, *tx;
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasLower) UL = 'L';
+ else if (Uplo == CblasUpper) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_zhbmv","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_zhbmv(F77_UL, &F77_N, &F77_K, alpha, A, &F77_lda, X,
+ &F77_incX, beta, Y, &F77_incY);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ ALPHA[0]= *alp;
+ ALPHA[1]= -alp[1];
+ BETA[0]= *bet;
+ BETA[1]= -bet[1];
+
+ if (N > 0)
+ {
+ n = N << 1;
+ x = malloc(n*sizeof(double));
+
+ tx = x;
+ if( incX > 0 ) {
+ i = incX << 1 ;
+ tincx = 2;
+ st= x+n;
+ } else {
+ i = incX *(-2);
+ tincx = -2;
+ st = x-2;
+ x +=(n-2);
+ }
+
+ do
+ {
+ *x = *xx;
+ x[1] = -xx[1];
+ x += tincx ;
+ xx += i;
+ }
+ while (x != st);
+ x=tx;
+
+
+ #ifdef F77_INT
+ F77_incX = 1;
+ #else
+ incx = 1;
+ #endif
+
+ if(incY > 0)
+ tincY = incY;
+ else
+ tincY = -incY;
+ y++;
+
+ i = tincY << 1;
+ n = i * N ;
+ st = y + n;
+ do {
+ *y = -(*y);
+ y += i;
+ } while(y != st);
+ y -= n;
+ } else
+ x = (double *) X;
+
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_zhbmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_zhbmv(F77_UL, &F77_N, &F77_K, ALPHA,
+ A ,&F77_lda, x,&F77_incX, BETA, Y, &F77_incY);
+ }
+ else
+ {
+ cblas_xerbla(1, "cblas_zhbmv","Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if ( order == CblasRowMajor )
+ {
+ RowMajorStrg = 1;
+ if(X!=x)
+ free(x);
+ if (N > 0)
+ {
+ do
+ {
+ *y = -(*y);
+ y += i;
+ }
+ while (y != st);
+ }
+ }
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_zhemm.c b/contrib/libs/cblas/cblas_interface/cblas_zhemm.c
new file mode 100644
index 0000000000..2eb0951d2b
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_zhemm.c
@@ -0,0 +1,106 @@
+/*
+ *
+ * cblas_zhemm.c
+ * This program is a C interface to zhemm.
+ * Written by Keita Teranishi
+ * 4/8/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_zhemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ const void *B, const int ldb, const void *beta,
+ void *C, const int ldc)
+{
+ char SD, UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_SD, F77_UL;
+#else
+ #define F77_SD &SD
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_ldb=ldb;
+ F77_INT F77_ldc=ldc;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_ldb ldb
+ #define F77_ldc ldc
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+ if( Side == CblasRight) SD='R';
+ else if ( Side == CblasLeft ) SD='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_zhemm", "Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Uplo == CblasUpper) UL='U';
+ else if ( Uplo == CblasLower ) UL='L';
+ else
+ {
+ cblas_xerbla(3, "cblas_zhemm", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_SD = C2F_CHAR(&SD);
+ #endif
+
+ F77_zhemm(F77_SD, F77_UL, &F77_M, &F77_N, alpha, A, &F77_lda,
+ B, &F77_ldb, beta, C, &F77_ldc);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if( Side == CblasRight) SD='L';
+ else if ( Side == CblasLeft ) SD='R';
+ else
+ {
+ cblas_xerbla(2, "cblas_zhemm", "Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Uplo == CblasUpper) UL='L';
+ else if ( Uplo == CblasLower ) UL='U';
+ else
+ {
+ cblas_xerbla(3, "cblas_zhemm", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_SD = C2F_CHAR(&SD);
+ #endif
+
+ F77_zhemm(F77_SD, F77_UL, &F77_N, &F77_M, alpha, A,
+ &F77_lda, B, &F77_ldb, beta, C, &F77_ldc);
+ }
+ else cblas_xerbla(1, "cblas_zhemm", "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_zhemv.c b/contrib/libs/cblas/cblas_interface/cblas_zhemv.c
new file mode 100644
index 0000000000..29cee1f20b
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_zhemv.c
@@ -0,0 +1,160 @@
+/*
+ * cblas_zhemv.c
+ * The program is a C interface to zhemv
+ *
+ * Keita Teranishi 5/18/98
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_zhemv(const enum CBLAS_ORDER order,
+ const enum CBLAS_UPLO Uplo, const int N,
+ const void *alpha, const void *A, const int lda,
+ const void *X, const int incX, const void *beta,
+ void *Y, const int incY)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_incX incx
+ #define F77_incY incY
+#endif
+ int n, i=0, incx=incX;
+ const double *xx= (double *)X, *alp= (double *)alpha, *bet = (double *)beta;
+ double ALPHA[2],BETA[2];
+ int tincY, tincx;
+ double *x=(double *)X, *y=(double *)Y, *st=0, *tx;
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_zhemv","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_zhemv(F77_UL, &F77_N, alpha, A, &F77_lda, X, &F77_incX,
+ beta, Y, &F77_incY);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ ALPHA[0]= *alp;
+ ALPHA[1]= -alp[1];
+ BETA[0]= *bet;
+ BETA[1]= -bet[1];
+
+ if (N > 0)
+ {
+ n = N << 1;
+ x = malloc(n*sizeof(double));
+
+ tx = x;
+ if( incX > 0 ) {
+ i = incX << 1 ;
+ tincx = 2;
+ st= x+n;
+ } else {
+ i = incX *(-2);
+ tincx = -2;
+ st = x-2;
+ x +=(n-2);
+ }
+
+ do
+ {
+ *x = *xx;
+ x[1] = -xx[1];
+ x += tincx ;
+ xx += i;
+ }
+ while (x != st);
+ x=tx;
+
+
+ #ifdef F77_INT
+ F77_incX = 1;
+ #else
+ incx = 1;
+ #endif
+
+ if(incY > 0)
+ tincY = incY;
+ else
+ tincY = -incY;
+ y++;
+
+ i = tincY << 1;
+ n = i * N ;
+ st = y + n;
+ do {
+ *y = -(*y);
+ y += i;
+ } while(y != st);
+ y -= n;
+ } else
+ x = (double *) X;
+
+
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_zhemv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_zhemv(F77_UL, &F77_N, ALPHA, A, &F77_lda, x, &F77_incX,
+ BETA, Y, &F77_incY);
+ }
+ else
+ {
+ cblas_xerbla(1, "cblas_zhemv","Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if ( order == CblasRowMajor )
+ {
+ RowMajorStrg = 1;
+ if ( X != x )
+ free(x);
+ if (N > 0)
+ {
+ do
+ {
+ *y = -(*y);
+ y += i;
+ }
+ while (y != st);
+ }
+ }
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_zher.c b/contrib/libs/cblas/cblas_interface/cblas_zher.c
new file mode 100644
index 0000000000..f688992bf4
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_zher.c
@@ -0,0 +1,110 @@
+/*
+ * cblas_zher.c
+ * The program is a C interface to zher.
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_zher(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const double alpha, const void *X, const int incX
+ ,void *A, const int lda)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_lda=lda, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_incX incx
+#endif
+ int n, i, tincx, incx=incX;
+ double *x=(double *)X, *xx=(double *)X, *tx, *st;
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasLower) UL = 'L';
+ else if (Uplo == CblasUpper) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_zher","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+
+ F77_zher(F77_UL, &F77_N, &alpha, X, &F77_incX, A, &F77_lda);
+
+ } else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_zher","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ if (N > 0)
+ {
+ n = N << 1;
+ x = malloc(n*sizeof(double));
+ tx = x;
+ if( incX > 0 ) {
+ i = incX << 1 ;
+ tincx = 2;
+ st= x+n;
+ } else {
+ i = incX *(-2);
+ tincx = -2;
+ st = x-2;
+ x +=(n-2);
+ }
+ do
+ {
+ *x = *xx;
+ x[1] = -xx[1];
+ x += tincx ;
+ xx += i;
+ }
+ while (x != st);
+ x=tx;
+
+ #ifdef F77_INT
+ F77_incX = 1;
+ #else
+ incx = 1;
+ #endif
+ }
+ else x = (double *) X;
+ F77_zher(F77_UL, &F77_N, &alpha, x, &F77_incX, A, &F77_lda);
+ } else cblas_xerbla(1, "cblas_zher", "Illegal Order setting, %d\n", order);
+ if(X!=x)
+ free(x);
+
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_zher2.c b/contrib/libs/cblas/cblas_interface/cblas_zher2.c
new file mode 100644
index 0000000000..fa0547453b
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_zher2.c
@@ -0,0 +1,153 @@
+/*
+ * cblas_zher2.c
+ * The program is a C interface to zher2.
+ *
+ * Keita Teranishi 3/23/98
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_zher2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const void *alpha, const void *X, const int incX,
+ const void *Y, const int incY, void *A, const int lda)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_incX incx
+ #define F77_incY incy
+#endif
+ int n, i, j, tincx, tincy, incx=incX, incy=incY;
+ double *x=(double *)X, *xx=(double *)X, *y=(double *)Y,
+ *yy=(double *)Y, *tx, *ty, *stx, *sty;
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasLower) UL = 'L';
+ else if (Uplo == CblasUpper) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_zher2", "Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+
+ F77_zher2(F77_UL, &F77_N, alpha, X, &F77_incX,
+ Y, &F77_incY, A, &F77_lda);
+
+ } else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_zher2", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ if (N > 0)
+ {
+ n = N << 1;
+ x = malloc(n*sizeof(double));
+ y = malloc(n*sizeof(double));
+ tx = x;
+ ty = y;
+ if( incX > 0 ) {
+ i = incX << 1 ;
+ tincx = 2;
+ stx= x+n;
+ } else {
+ i = incX *(-2);
+ tincx = -2;
+ stx = x-2;
+ x +=(n-2);
+ }
+
+ if( incY > 0 ) {
+ j = incY << 1;
+ tincy = 2;
+ sty= y+n;
+ } else {
+ j = incY *(-2);
+ tincy = -2;
+ sty = y-2;
+ y +=(n-2);
+ }
+
+ do
+ {
+ *x = *xx;
+ x[1] = -xx[1];
+ x += tincx ;
+ xx += i;
+ }
+ while (x != stx);
+
+ do
+ {
+ *y = *yy;
+ y[1] = -yy[1];
+ y += tincy ;
+ yy += j;
+ }
+ while (y != sty);
+
+ x=tx;
+ y=ty;
+
+ #ifdef F77_INT
+ F77_incX = 1;
+ F77_incY = 1;
+ #else
+ incx = 1;
+ incy = 1;
+ #endif
+ } else
+ {
+ x = (double *) X;
+ y = (double *) Y;
+ }
+ F77_zher2(F77_UL, &F77_N, alpha, y, &F77_incY, x,
+ &F77_incX, A, &F77_lda);
+ }
+ else
+ {
+ cblas_xerbla(1, "cblas_zher2", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if(X!=x)
+ free(x);
+ if(Y!=y)
+ free(y);
+
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_zher2k.c b/contrib/libs/cblas/cblas_interface/cblas_zher2k.c
new file mode 100644
index 0000000000..abd0a4ddcb
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_zher2k.c
@@ -0,0 +1,110 @@
+/*
+ *
+ * cblas_zher2k.c
+ * This program is a C interface to zher2k.
+ * Written by Keita Teranishi
+ * 4/8/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_zher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const void *alpha, const void *A, const int lda,
+ const void *B, const int ldb, const double beta,
+ void *C, const int ldc)
+{
+ char UL, TR;
+#ifdef F77_CHAR
+ F77_CHAR F77_TR, F77_UL;
+#else
+ #define F77_TR &TR
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_K=K, F77_lda=lda, F77_ldb=ldb;
+ F77_INT F77_ldc=ldc;
+#else
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_ldb ldb
+ #define F77_ldc ldc
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ double ALPHA[2];
+ const double *alp=(double *)alpha;
+
+ CBLAS_CallFromC = 1;
+ RowMajorStrg = 0;
+
+ if( Order == CblasColMajor )
+ {
+
+ if( Uplo == CblasUpper) UL='U';
+ else if ( Uplo == CblasLower ) UL='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_zher2k", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Trans == CblasTrans) TR ='T';
+ else if ( Trans == CblasConjTrans ) TR='C';
+ else if ( Trans == CblasNoTrans ) TR='N';
+ else
+ {
+ cblas_xerbla(3, "cblas_zher2k", "Illegal Trans setting, %d\n", Trans);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TR = C2F_CHAR(&TR);
+ #endif
+
+ F77_zher2k(F77_UL, F77_TR, &F77_N, &F77_K, alpha, A, &F77_lda, B, &F77_ldb, &beta, C, &F77_ldc);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+
+ if( Uplo == CblasUpper) UL='L';
+ else if ( Uplo == CblasLower ) UL='U';
+ else
+ {
+ cblas_xerbla(2, "cblas_zher2k", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if( Trans == CblasTrans) TR ='N';
+ else if ( Trans == CblasConjTrans ) TR='N';
+ else if ( Trans == CblasNoTrans ) TR='C';
+ else
+ {
+ cblas_xerbla(3, "cblas_zher2k", "Illegal Trans setting, %d\n", Trans);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TR = C2F_CHAR(&TR);
+ #endif
+
+ ALPHA[0]= *alp;
+ ALPHA[1]= -alp[1];
+ F77_zher2k(F77_UL,F77_TR, &F77_N, &F77_K, ALPHA, A, &F77_lda, B, &F77_ldb, &beta, C, &F77_ldc);
+ } else cblas_xerbla(1, "cblas_zher2k", "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_zherk.c b/contrib/libs/cblas/cblas_interface/cblas_zherk.c
new file mode 100644
index 0000000000..a867788f34
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_zherk.c
@@ -0,0 +1,105 @@
+/*
+ *
+ * cblas_zherk.c
+ * This program is a C interface to zherk.
+ * Written by Keita Teranishi
+ * 4/8/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_zherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const double alpha, const void *A, const int lda,
+ const double beta, void *C, const int ldc)
+{
+ char UL, TR;
+#ifdef F77_CHAR
+ F77_CHAR F77_TR, F77_UL;
+#else
+ #define F77_TR &TR
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_K=K, F77_lda=lda;
+ F77_INT F77_ldc=ldc;
+#else
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_ldc ldc
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+ if( Uplo == CblasUpper) UL='U';
+ else if ( Uplo == CblasLower ) UL='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_zherk", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Trans == CblasTrans) TR ='T';
+ else if ( Trans == CblasConjTrans ) TR='C';
+ else if ( Trans == CblasNoTrans ) TR='N';
+ else
+ {
+ cblas_xerbla(3, "cblas_zherk", "Illegal Trans setting, %d\n", Trans);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TR = C2F_CHAR(&TR);
+ #endif
+
+ F77_zherk(F77_UL, F77_TR, &F77_N, &F77_K, &alpha, A, &F77_lda,
+ &beta, C, &F77_ldc);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if( Uplo == CblasUpper) UL='L';
+ else if ( Uplo == CblasLower ) UL='U';
+ else
+ {
+ cblas_xerbla(3, "cblas_zherk", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if( Trans == CblasTrans) TR ='N';
+ else if ( Trans == CblasConjTrans ) TR='N';
+ else if ( Trans == CblasNoTrans ) TR='C';
+ else
+ {
+ cblas_xerbla(3, "cblas_zherk", "Illegal Trans setting, %d\n", Trans);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_SD = C2F_CHAR(&SD);
+ #endif
+
+ F77_zherk(F77_UL, F77_TR, &F77_N, &F77_K, &alpha, A, &F77_lda,
+ &beta, C, &F77_ldc);
+ }
+ else cblas_xerbla(1, "cblas_zherk", "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_zhpmv.c b/contrib/libs/cblas/cblas_interface/cblas_zhpmv.c
new file mode 100644
index 0000000000..289eb78066
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_zhpmv.c
@@ -0,0 +1,160 @@
+/*
+ * cblas_zhpmv.c
+ * The program is a C interface of zhpmv
+ *
+ * Keita Teranishi 5/18/98
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_zhpmv(const enum CBLAS_ORDER order,
+ const enum CBLAS_UPLO Uplo,const int N,
+ const void *alpha, const void *AP,
+ const void *X, const int incX, const void *beta,
+ void *Y, const int incY)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_K=K, F77_lda=lda, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incx
+ #define F77_incY incY
+#endif
+ int n, i=0, incx=incX;
+ const double *xx= (double *)X, *alp= (double *)alpha, *bet = (double *)beta;
+ double ALPHA[2],BETA[2];
+ int tincY, tincx;
+ double *x=(double *)X, *y=(double *)Y, *st=0, *tx;
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasLower) UL = 'L';
+ else if (Uplo == CblasUpper) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_zhpmv","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ F77_zhpmv(F77_UL, &F77_N, alpha, AP, X,
+ &F77_incX, beta, Y, &F77_incY);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ ALPHA[0]= *alp;
+ ALPHA[1]= -alp[1];
+ BETA[0]= *bet;
+ BETA[1]= -bet[1];
+
+ if (N > 0)
+ {
+ n = N << 1;
+ x = malloc(n*sizeof(double));
+
+ tx = x;
+ if( incX > 0 ) {
+ i = incX << 1;
+ tincx = 2;
+ st= x+n;
+ } else {
+ i = incX *(-2);
+ tincx = -2;
+ st = x-2;
+ x +=(n-2);
+ }
+
+ do
+ {
+ *x = *xx;
+ x[1] = -xx[1];
+ x += tincx ;
+ xx += i;
+ }
+ while (x != st);
+ x=tx;
+
+
+ #ifdef F77_INT
+ F77_incX = 1;
+ #else
+ incx = 1;
+ #endif
+
+ if(incY > 0)
+ tincY = incY;
+ else
+ tincY = -incY;
+ y++;
+
+ i = tincY << 1;
+ n = i * N ;
+ st = y + n;
+ do {
+ *y = -(*y);
+ y += i;
+ } while(y != st);
+ y -= n;
+ } else
+ x = (double *) X;
+
+
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_zhpmv","Illegal Uplo setting, %d\n", Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+
+ F77_zhpmv(F77_UL, &F77_N, ALPHA,
+ AP, x, &F77_incX, BETA, Y, &F77_incY);
+ }
+ else
+ {
+ cblas_xerbla(1, "cblas_zhpmv","Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if ( order == CblasRowMajor )
+ {
+ RowMajorStrg = 1;
+ if(X!=x)
+ free(x);
+ if (N > 0)
+ {
+ do
+ {
+ *y = -(*y);
+ y += i;
+ }
+ while (y != st);
+ }
+ }
+
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_zhpr.c b/contrib/libs/cblas/cblas_interface/cblas_zhpr.c
new file mode 100644
index 0000000000..5517c22d56
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_zhpr.c
@@ -0,0 +1,115 @@
+/*
+ * cblas_zhpr.c
+ * The program is a C interface to zhpr.
+ *
+ * Keita Teranishi 3/23/98
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_zhpr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N, const double alpha, const void *X,
+ const int incX, void *A)
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incx
+#endif
+ int n, i, tincx, incx=incX;
+ double *x=(double *)X, *xx=(double *)X, *tx, *st;
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasLower) UL = 'L';
+ else if (Uplo == CblasUpper) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_zhpr","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+
+ F77_zhpr(F77_UL, &F77_N, &alpha, X, &F77_incX, A);
+
+ } else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_zhpr","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ if (N > 0)
+ {
+ n = N << 1;
+ x = malloc(n*sizeof(double));
+ tx = x;
+ if( incX > 0 ) {
+ i = incX << 1;
+ tincx = 2;
+ st= x+n;
+ } else {
+ i = incX *(-2);
+ tincx = -2;
+ st = x-2;
+ x +=(n-2);
+ }
+ do
+ {
+ *x = *xx;
+ x[1] = -xx[1];
+ x += tincx ;
+ xx += i;
+ }
+ while (x != st);
+ x=tx;
+ #ifdef F77_INT
+ F77_incX = 1;
+ #else
+ incx = 1;
+ #endif
+ }
+ else x = (double *) X;
+
+ F77_zhpr(F77_UL, &F77_N, &alpha, x, &F77_incX, A);
+
+ } else
+ {
+ cblas_xerbla(1, "cblas_zhpr","Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if(X!=x)
+ free(x);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_zhpr2.c b/contrib/libs/cblas/cblas_interface/cblas_zhpr2.c
new file mode 100644
index 0000000000..69b9f14a90
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_zhpr2.c
@@ -0,0 +1,150 @@
+/*
+ * cblas_zhpr2.c
+ * The program is a C interface to zhpr2.
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_zhpr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const int N,const void *alpha, const void *X,
+ const int incX,const void *Y, const int incY, void *Ap)
+
+{
+ char UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_UL;
+#else
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incx
+ #define F77_incY incy
+#endif
+ int n, i, j, incx=incX, incy=incY;
+ double *x=(double *)X, *xx=(double *)X, *y=(double *)Y,
+ *yy=(double *)Y, *stx, *sty;
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasLower) UL = 'L';
+ else if (Uplo == CblasUpper) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_zhpr2","Illegal Uplo setting, %d\n",Uplo );
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+
+ F77_zhpr2(F77_UL, &F77_N, alpha, X, &F77_incX, Y, &F77_incY, Ap);
+
+ } else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_zhpr2","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ #endif
+ if (N > 0)
+ {
+ n = N << 1;
+ x = malloc(n*sizeof(double));
+ y = malloc(n*sizeof(double));
+ stx = x + n;
+ sty = y + n;
+ if( incX > 0 )
+ i = incX << 1;
+ else
+ i = incX *(-2);
+
+ if( incY > 0 )
+ j = incY << 1;
+ else
+ j = incY *(-2);
+ do
+ {
+ *x = *xx;
+ x[1] = -xx[1];
+ x += 2;
+ xx += i;
+ } while (x != stx);
+ do
+ {
+ *y = *yy;
+ y[1] = -yy[1];
+ y += 2;
+ yy += j;
+ }
+ while (y != sty);
+ x -= n;
+ y -= n;
+
+ #ifdef F77_INT
+ if(incX > 0 )
+ F77_incX = 1;
+ else
+ F77_incX = -1;
+
+ if(incY > 0 )
+ F77_incY = 1;
+ else
+ F77_incY = -1;
+
+ #else
+ if(incX > 0 )
+ incx = 1;
+ else
+ incx = -1;
+
+ if(incY > 0 )
+ incy = 1;
+ else
+ incy = -1;
+ #endif
+
+ } else
+ {
+ x = (double *) X;
+ y = (void *) Y;
+ }
+ F77_zhpr2(F77_UL, &F77_N, alpha, y, &F77_incY, x, &F77_incX, Ap);
+ }
+ else
+ {
+ cblas_xerbla(1, "cblas_zhpr2","Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if(X!=x)
+ free(x);
+ if(Y!=y)
+ free(y);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_zscal.c b/contrib/libs/cblas/cblas_interface/cblas_zscal.c
new file mode 100644
index 0000000000..37b319f38f
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_zscal.c
@@ -0,0 +1,21 @@
+/*
+ * cblas_zscal.c
+ *
+ * The program is a C interface to zscal.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_zscal( const int N, const void *alpha, void *X,
+ const int incX)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ F77_zscal( &F77_N, alpha, X, &F77_incX);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_zswap.c b/contrib/libs/cblas/cblas_interface/cblas_zswap.c
new file mode 100644
index 0000000000..dfde2cbd01
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_zswap.c
@@ -0,0 +1,22 @@
+/*
+ * cblas_zswap.c
+ *
+ * The program is a C interface to zswap.
+ *
+ * Written by Keita Teranishi. 2/11/1998
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_zswap( const int N, void *X, const int incX, void *Y,
+ const int incY)
+{
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX, F77_incY=incY;
+#else
+ #define F77_N N
+ #define F77_incX incX
+ #define F77_incY incY
+#endif
+ F77_zswap( &F77_N, X, &F77_incX, Y, &F77_incY);
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_zsymm.c b/contrib/libs/cblas/cblas_interface/cblas_zsymm.c
new file mode 100644
index 0000000000..91aa67d3b0
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_zsymm.c
@@ -0,0 +1,106 @@
+/*
+ *
+ * cblas_zsymm.c
+ * This program is a C interface to zsymm.
+ * Written by Keita Teranishi
+ * 4/8/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_zsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ const void *B, const int ldb, const void *beta,
+ void *C, const int ldc)
+{
+ char SD, UL;
+#ifdef F77_CHAR
+ F77_CHAR F77_SD, F77_UL;
+#else
+ #define F77_SD &SD
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_ldb=ldb;
+ F77_INT F77_ldc=ldc;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_ldb ldb
+ #define F77_ldc ldc
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+ if( Side == CblasRight) SD='R';
+ else if ( Side == CblasLeft ) SD='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_zsymm", "Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Uplo == CblasUpper) UL='U';
+ else if ( Uplo == CblasLower ) UL='L';
+ else
+ {
+ cblas_xerbla(3, "cblas_zsymm", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_SD = C2F_CHAR(&SD);
+ #endif
+
+ F77_zsymm(F77_SD, F77_UL, &F77_M, &F77_N, alpha, A, &F77_lda,
+ B, &F77_ldb, beta, C, &F77_ldc);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if( Side == CblasRight) SD='L';
+ else if ( Side == CblasLeft ) SD='R';
+ else
+ {
+ cblas_xerbla(2, "cblas_zsymm", "Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Uplo == CblasUpper) UL='L';
+ else if ( Uplo == CblasLower ) UL='U';
+ else
+ {
+ cblas_xerbla(3, "cblas_zsymm", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_SD = C2F_CHAR(&SD);
+ #endif
+
+ F77_zsymm(F77_SD, F77_UL, &F77_N, &F77_M, alpha, A, &F77_lda,
+ B, &F77_ldb, beta, C, &F77_ldc);
+ }
+ else cblas_xerbla(1, "cblas_zsymm", "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_zsyr2k.c b/contrib/libs/cblas/cblas_interface/cblas_zsyr2k.c
new file mode 100644
index 0000000000..def7239ba1
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_zsyr2k.c
@@ -0,0 +1,108 @@
+/*
+ *
+ * cblas_zsyr2k.c
+ * This program is a C interface to zsyr2k.
+ * Written by Keita Teranishi
+ * 4/8/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_zsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const void *alpha, const void *A, const int lda,
+ const void *B, const int ldb, const void *beta,
+ void *C, const int ldc)
+{
+ char UL, TR;
+#ifdef F77_CHAR
+ F77_CHAR F77_TR, F77_UL;
+#else
+ #define F77_TR &TR
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_K=K, F77_lda=lda, F77_ldb=ldb;
+ F77_INT F77_ldc=ldc;
+#else
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_ldb ldb
+ #define F77_ldc ldc
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+
+ if( Uplo == CblasUpper) UL='U';
+ else if ( Uplo == CblasLower ) UL='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_zsyr2k", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Trans == CblasTrans) TR ='T';
+ else if ( Trans == CblasConjTrans ) TR='C';
+ else if ( Trans == CblasNoTrans ) TR='N';
+ else
+ {
+ cblas_xerbla(3, "cblas_zsyr2k", "Illegal Trans setting, %d\n", Trans);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TR = C2F_CHAR(&TR);
+ #endif
+
+ F77_zsyr2k(F77_UL, F77_TR, &F77_N, &F77_K, alpha, A, &F77_lda,
+ B, &F77_ldb, beta, C, &F77_ldc);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if( Uplo == CblasUpper) UL='L';
+ else if ( Uplo == CblasLower ) UL='U';
+ else
+ {
+ cblas_xerbla(3, "cblas_zsyr2k", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if( Trans == CblasTrans) TR ='N';
+ else if ( Trans == CblasConjTrans ) TR='N';
+ else if ( Trans == CblasNoTrans ) TR='T';
+ else
+ {
+ cblas_xerbla(3, "cblas_zsyr2k", "Illegal Trans setting, %d\n", Trans);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TR = C2F_CHAR(&TR);
+ #endif
+
+ F77_zsyr2k(F77_UL, F77_TR, &F77_N, &F77_K, alpha, A, &F77_lda, B, &F77_ldb, beta, C, &F77_ldc);
+ }
+ else cblas_xerbla(1, "cblas_zsyr2k", "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_zsyrk.c b/contrib/libs/cblas/cblas_interface/cblas_zsyrk.c
new file mode 100644
index 0000000000..7968f90417
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_zsyrk.c
@@ -0,0 +1,107 @@
+/*
+ *
+ * cblas_zsyrk.c
+ * This program is a C interface to zsyrk.
+ * Written by Keita Teranishi
+ * 4/8/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_zsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
+ const void *alpha, const void *A, const int lda,
+ const void *beta, void *C, const int ldc)
+{
+ char UL, TR;
+#ifdef F77_CHAR
+ F77_CHAR F77_TR, F77_UL;
+#else
+ #define F77_TR &TR
+ #define F77_UL &UL
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_K=K, F77_lda=lda;
+ F77_INT F77_ldc=ldc;
+#else
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_ldc ldc
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+
+ if( Uplo == CblasUpper) UL='U';
+ else if ( Uplo == CblasLower ) UL='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_zsyrk", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Trans == CblasTrans) TR ='T';
+ else if ( Trans == CblasConjTrans ) TR='C';
+ else if ( Trans == CblasNoTrans ) TR='N';
+ else
+ {
+ cblas_xerbla(3, "cblas_zsyrk", "Illegal Trans setting, %d\n", Trans);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TR = C2F_CHAR(&TR);
+ #endif
+
+ F77_zsyrk(F77_UL, F77_TR, &F77_N, &F77_K, alpha, A, &F77_lda,
+ beta, C, &F77_ldc);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if( Uplo == CblasUpper) UL='L';
+ else if ( Uplo == CblasLower ) UL='U';
+ else
+ {
+ cblas_xerbla(3, "cblas_zsyrk", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if( Trans == CblasTrans) TR ='N';
+ else if ( Trans == CblasConjTrans ) TR='N';
+ else if ( Trans == CblasNoTrans ) TR='T';
+ else
+ {
+ cblas_xerbla(3, "cblas_zsyrk", "Illegal Trans setting, %d\n", Trans);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TR = C2F_CHAR(&TR);
+ #endif
+
+ F77_zsyrk(F77_UL, F77_TR, &F77_N, &F77_K, alpha, A, &F77_lda,
+ beta, C, &F77_ldc);
+ }
+ else cblas_xerbla(1, "cblas_zsyrk", "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_ztbmv.c b/contrib/libs/cblas/cblas_interface/cblas_ztbmv.c
new file mode 100644
index 0000000000..b3dde43813
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_ztbmv.c
@@ -0,0 +1,158 @@
+/*
+ * cblas_ztbmv.c
+ * The program is a C interface to ztbmv.
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_ztbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const int K, const void *A, const int lda,
+ void *X, const int incX)
+{
+ char TA;
+ char UL;
+ char DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_DI &DI
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_lda=lda, F77_K=K, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_incX incX
+#endif
+ int n, i=0, tincX;
+ double *st=0, *x=(double *)X;
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_ztbmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(3, "cblas_ztbmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ztbmv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_ztbmv( F77_UL, F77_TA, F77_DI, &F77_N, &F77_K, A, &F77_lda, X,
+ &F77_incX);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_ztbmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans)
+ {
+ TA = 'N';
+ if ( N > 0)
+ {
+ if(incX > 0)
+ tincX = incX;
+ else
+ tincX = -incX;
+ i = tincX << 1;
+ n = i * N;
+ x++;
+ st = x + n;
+ do
+ {
+ *x = -(*x);
+ x+= i;
+ }
+ while (x != st);
+ x -= n;
+ }
+ }
+ else
+ {
+ cblas_xerbla(3, "cblas_ztbmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ztbmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_ztbmv( F77_UL, F77_TA, F77_DI, &F77_N, &F77_K, A, &F77_lda, X,
+ &F77_incX);
+
+ if (TransA == CblasConjTrans)
+ {
+ if (N > 0)
+ {
+ do
+ {
+ *x = -(*x);
+ x += i;
+ }
+ while (x != st);
+ }
+ }
+ }
+ else cblas_xerbla(1, "cblas_ztbmv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_ztbsv.c b/contrib/libs/cblas/cblas_interface/cblas_ztbsv.c
new file mode 100644
index 0000000000..e3532b35ae
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_ztbsv.c
@@ -0,0 +1,162 @@
+/*
+ * cblas_ztbsv.c
+ * The program is a C interface to ztbsv.
+ *
+ * Keita Teranishi 3/23/98
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_ztbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const int K, const void *A, const int lda,
+ void *X, const int incX)
+{
+ char TA;
+ char UL;
+ char DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_DI &DI
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_lda=lda, F77_K=K, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_K K
+ #define F77_lda lda
+ #define F77_incX incX
+#endif
+ int n, i=0, tincX;
+ double *st=0,*x=(double *)X;
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_ztbsv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(3, "cblas_ztbsv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ztbsv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_ztbsv( F77_UL, F77_TA, F77_DI, &F77_N, &F77_K, A, &F77_lda, X,
+ &F77_incX);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_ztbsv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans)
+ {
+ TA = 'N';
+ if ( N > 0)
+ {
+ if ( incX > 0 )
+ tincX = incX;
+ else
+ tincX = -incX;
+
+ n = N*2*(tincX);
+
+ x++;
+
+ st=x+n;
+
+ i = tincX << 1;
+ do
+ {
+ *x = -(*x);
+ x+=i;
+ }
+ while (x != st);
+ x -= n;
+ }
+ }
+ else
+ {
+ cblas_xerbla(3, "cblas_ztbsv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ztbsv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_ztbsv( F77_UL, F77_TA, F77_DI, &F77_N, &F77_K, A, &F77_lda, X,
+ &F77_incX);
+
+ if (TransA == CblasConjTrans)
+ {
+ if (N > 0)
+ {
+ do
+ {
+ *x = -(*x);
+ x+= i;
+ }
+ while (x != st);
+ }
+ }
+ }
+ else cblas_xerbla(1, "cblas_ztbsv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_ztpmv.c b/contrib/libs/cblas/cblas_interface/cblas_ztpmv.c
new file mode 100644
index 0000000000..f29b7bb32a
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_ztpmv.c
@@ -0,0 +1,152 @@
+/*
+ * cblas_ztpmv.c
+ * The program is a C interface to ztpmv.
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_ztpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const void *Ap, void *X, const int incX)
+{
+ char TA;
+ char UL;
+ char DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_DI &DI
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ int n, i=0, tincX;
+ double *st=0,*x=(double *)X;
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_ztpmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(3, "cblas_ztpmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ztpmv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_ztpmv( F77_UL, F77_TA, F77_DI, &F77_N, Ap, X, &F77_incX);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_ztpmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans)
+ {
+ TA = 'N';
+ if ( N > 0)
+ {
+ if(incX > 0)
+ tincX = incX;
+ else
+ tincX = -incX;
+ i = tincX << 1;
+ n = i * N;
+ x++;
+ st = x + n;
+ do
+ {
+ *x = -(*x);
+ x += i;
+ }
+ while (x != st);
+ x -= n;
+ }
+ }
+ else
+ {
+ cblas_xerbla(3, "cblas_ztpmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ztpmv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_ztpmv( F77_UL, F77_TA, F77_DI, &F77_N, Ap, X,&F77_incX);
+ if (TransA == CblasConjTrans)
+ {
+ if (N > 0)
+ {
+ do
+ {
+ *x = -(*x);
+ x += i;
+ }
+ while (x != st);
+ }
+ }
+ }
+ else cblas_xerbla(1, "cblas_ztpmv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_ztpsv.c b/contrib/libs/cblas/cblas_interface/cblas_ztpsv.c
new file mode 100644
index 0000000000..4c72808b0e
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_ztpsv.c
@@ -0,0 +1,157 @@
+/*
+ * cblas_ztpsv.c
+ * The program is a C interface to ztpsv.
+ *
+ * Keita Teranishi 3/23/98
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_ztpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const void *Ap, void *X, const int incX)
+{
+ char TA;
+ char UL;
+ char DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_DI &DI
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_incX incX
+#endif
+ int n, i=0, tincX;
+ double *st=0, *x=(double*)X;
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_ztpsv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(3, "cblas_ztpsv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ztpsv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_ztpsv( F77_UL, F77_TA, F77_DI, &F77_N, Ap, X, &F77_incX);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_ztpsv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans)
+ {
+ TA = 'N';
+ if ( N > 0)
+ {
+ if ( incX > 0 )
+ tincX = incX;
+ else
+ tincX = -incX;
+
+ n = N*2*(tincX);
+
+ x++;
+
+ st=x+n;
+
+ i = tincX << 1;
+ do
+ {
+ *x = -(*x);
+ x+=i;
+ }
+ while (x != st);
+ x -= n;
+ }
+ }
+ else
+ {
+ cblas_xerbla(3, "cblas_ztpsv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ztpsv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_ztpsv( F77_UL, F77_TA, F77_DI, &F77_N, Ap, X,&F77_incX);
+
+ if (TransA == CblasConjTrans)
+ {
+ if (N > 0)
+ {
+ do
+ {
+ *x = -(*x);
+ x += i;
+ }
+ while (x != st);
+ }
+ }
+ }
+ else cblas_xerbla(1, "cblas_ztpsv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_ztrmm.c b/contrib/libs/cblas/cblas_interface/cblas_ztrmm.c
new file mode 100644
index 0000000000..caeaefa1ed
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_ztrmm.c
@@ -0,0 +1,149 @@
+/*
+ *
+ * cblas_ztrmm.c
+ * This program is a C interface to ztrmm.
+ * Written by Keita Teranishi
+ * 4/8/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_ztrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ void *B, const int ldb)
+{
+ char UL, TA, SD, DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_SD, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_SD &SD
+ #define F77_DI &DI
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_ldb=ldb;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_ldb ldb
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+ if( Side == CblasRight ) SD='R';
+ else if ( Side == CblasLeft ) SD='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_ztrmm", "Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if( Uplo == CblasUpper ) UL='U';
+ else if ( Uplo == CblasLower ) UL='L';
+ else
+ {
+ cblas_xerbla(3, "cblas_ztrmm", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( TransA == CblasTrans ) TA ='T';
+ else if ( TransA == CblasConjTrans ) TA='C';
+ else if ( TransA == CblasNoTrans ) TA='N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ztrmm", "Illegal Trans setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Diag == CblasUnit ) DI='U';
+ else if ( Diag == CblasNonUnit ) DI='N';
+ else
+ {
+ cblas_xerbla(5, "cblas_ztrmm", "Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_SD = C2F_CHAR(&SD);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_ztrmm(F77_SD, F77_UL, F77_TA, F77_DI, &F77_M, &F77_N, alpha, A, &F77_lda, B, &F77_ldb);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if( Side == CblasRight ) SD='L';
+ else if ( Side == CblasLeft ) SD='R';
+ else
+ {
+ cblas_xerbla(2, "cblas_ztrmm", "Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Uplo == CblasUpper ) UL='L';
+ else if ( Uplo == CblasLower ) UL='U';
+ else
+ {
+ cblas_xerbla(3, "cblas_ztrmm", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( TransA == CblasTrans ) TA ='T';
+ else if ( TransA == CblasConjTrans ) TA='C';
+ else if ( TransA == CblasNoTrans ) TA='N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ztrmm", "Illegal Trans setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Diag == CblasUnit ) DI='U';
+ else if ( Diag == CblasNonUnit ) DI='N';
+ else
+ {
+ cblas_xerbla(5, "cblas_ztrmm", "Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_SD = C2F_CHAR(&SD);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_ztrmm(F77_SD, F77_UL, F77_TA, F77_DI, &F77_N, &F77_M, alpha, A, &F77_lda, B, &F77_ldb);
+ }
+ else cblas_xerbla(1, "cblas_ztrmm", "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_ztrmv.c b/contrib/libs/cblas/cblas_interface/cblas_ztrmv.c
new file mode 100644
index 0000000000..c9345afaa3
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_ztrmv.c
@@ -0,0 +1,156 @@
+/*
+ * cblas_ztrmv.c
+ * The program is a C interface to ztrmv.
+ *
+ * Keita Teranishi 5/20/98
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_ztrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const void *A, const int lda,
+ void *X, const int incX)
+
+{
+ char TA;
+ char UL;
+ char DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_DI &DI
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_lda=lda, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_incX incX
+#endif
+ int n, i=0, tincX;
+ double *st=0,*x=(double *)X;
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_ztrmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(3, "cblas_ztrmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ztrmv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_ztrmv( F77_UL, F77_TA, F77_DI, &F77_N, A, &F77_lda, X,
+ &F77_incX);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_ztrmv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans)
+ {
+ TA = 'N';
+ if ( N > 0)
+ {
+ if(incX > 0)
+ tincX = incX;
+ else
+ tincX = -incX;
+ i = tincX << 1;
+ n = i * N;
+ x++;
+ st = x + n;
+ do
+ {
+ *x = -(*x);
+ x += i;
+ }
+ while (x != st);
+ x -= n;
+ }
+ }
+ else
+ {
+ cblas_xerbla(3, "cblas_ztrmv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ztrmv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_ztrmv( F77_UL, F77_TA, F77_DI, &F77_N, A, &F77_lda, X,
+ &F77_incX);
+ if (TransA == CblasConjTrans)
+ {
+ if (N > 0)
+ {
+ do
+ {
+ *x = -(*x);
+ x += i;
+ }
+ while (x != st);
+ }
+ }
+ }
+ else cblas_xerbla(1, "cblas_ztrmv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_ztrsm.c b/contrib/libs/cblas/cblas_interface/cblas_ztrsm.c
new file mode 100644
index 0000000000..08375d8153
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_ztrsm.c
@@ -0,0 +1,155 @@
+/*
+ *
+ * cblas_ztrsm.c
+ * This program is a C interface to ztrsm.
+ * Written by Keita Teranishi
+ * 4/8/1998
+ *
+ */
+
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_ztrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const int M, const int N,
+ const void *alpha, const void *A, const int lda,
+ void *B, const int ldb)
+{
+ char UL, TA, SD, DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_SD, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_SD &SD
+ #define F77_DI &DI
+#endif
+
+#ifdef F77_INT
+ F77_INT F77_M=M, F77_N=N, F77_lda=lda, F77_ldb=ldb;
+#else
+ #define F77_M M
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_ldb ldb
+#endif
+
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+ CBLAS_CallFromC = 1;
+
+ if( Order == CblasColMajor )
+ {
+
+ if( Side == CblasRight) SD='R';
+ else if ( Side == CblasLeft ) SD='L';
+ else
+ {
+ cblas_xerbla(2, "cblas_ztrsm", "Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Uplo == CblasUpper) UL='U';
+ else if ( Uplo == CblasLower ) UL='L';
+ else
+ {
+ cblas_xerbla(3, "cblas_ztrsm", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( TransA == CblasTrans) TA ='T';
+ else if ( TransA == CblasConjTrans ) TA='C';
+ else if ( TransA == CblasNoTrans ) TA='N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ztrsm", "Illegal Trans setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Diag == CblasUnit ) DI='U';
+ else if ( Diag == CblasNonUnit ) DI='N';
+ else
+ {
+ cblas_xerbla(5, "cblas_ztrsm", "Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_SD = C2F_CHAR(&SD);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+ F77_ztrsm(F77_SD, F77_UL, F77_TA, F77_DI, &F77_M, &F77_N, alpha, A,
+ &F77_lda, B, &F77_ldb);
+ } else if (Order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+
+ if( Side == CblasRight) SD='L';
+ else if ( Side == CblasLeft ) SD='R';
+ else
+ {
+ cblas_xerbla(2, "cblas_ztrsm", "Illegal Side setting, %d\n", Side);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Uplo == CblasUpper) UL='L';
+ else if ( Uplo == CblasLower ) UL='U';
+ else
+ {
+ cblas_xerbla(3, "cblas_ztrsm", "Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( TransA == CblasTrans) TA ='T';
+ else if ( TransA == CblasConjTrans ) TA='C';
+ else if ( TransA == CblasNoTrans ) TA='N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ztrsm", "Illegal Trans setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if( Diag == CblasUnit ) DI='U';
+ else if ( Diag == CblasNonUnit ) DI='N';
+ else
+ {
+ cblas_xerbla(5, "cblas_ztrsm", "Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_SD = C2F_CHAR(&SD);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+
+
+ F77_ztrsm(F77_SD, F77_UL, F77_TA, F77_DI, &F77_N, &F77_M, alpha, A,
+ &F77_lda, B, &F77_ldb);
+ }
+ else cblas_xerbla(1, "cblas_ztrsm", "Illegal Order setting, %d\n", Order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cblas_ztrsv.c b/contrib/libs/cblas/cblas_interface/cblas_ztrsv.c
new file mode 100644
index 0000000000..621399ba04
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cblas_ztrsv.c
@@ -0,0 +1,156 @@
+/*
+ * cblas_ztrsv.c
+ * The program is a C interface to ztrsv.
+ *
+ * Keita Teranishi 3/23/98
+ *
+ */
+#include "cblas.h"
+#include "cblas_f77.h"
+void cblas_ztrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const int N, const void *A, const int lda, void *X,
+ const int incX)
+{
+ char TA;
+ char UL;
+ char DI;
+#ifdef F77_CHAR
+ F77_CHAR F77_TA, F77_UL, F77_DI;
+#else
+ #define F77_TA &TA
+ #define F77_UL &UL
+ #define F77_DI &DI
+#endif
+#ifdef F77_INT
+ F77_INT F77_N=N, F77_lda=lda, F77_incX=incX;
+#else
+ #define F77_N N
+ #define F77_lda lda
+ #define F77_incX incX
+#endif
+ int n, i=0, tincX;
+ double *st=0,*x=(double *)X;
+ extern int CBLAS_CallFromC;
+ extern int RowMajorStrg;
+ RowMajorStrg = 0;
+
+ CBLAS_CallFromC = 1;
+ if (order == CblasColMajor)
+ {
+ if (Uplo == CblasUpper) UL = 'U';
+ else if (Uplo == CblasLower) UL = 'L';
+ else
+ {
+ cblas_xerbla(2, "cblas_ztrsv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(3, "cblas_ztrsv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ztrsv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_ztrsv( F77_UL, F77_TA, F77_DI, &F77_N, A, &F77_lda, X,
+ &F77_incX);
+ }
+ else if (order == CblasRowMajor)
+ {
+ RowMajorStrg = 1;
+ if (Uplo == CblasUpper) UL = 'L';
+ else if (Uplo == CblasLower) UL = 'U';
+ else
+ {
+ cblas_xerbla(2, "cblas_ztrsv","Illegal Uplo setting, %d\n", Uplo);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans)
+ {
+ TA = 'N';
+ if ( N > 0)
+ {
+ if ( incX > 0 )
+ tincX = incX;
+ else
+ tincX = -incX;
+
+ n = N*2*(tincX);
+ x++;
+ st=x+n;
+ i = tincX << 1;
+ do
+ {
+ *x = -(*x);
+ x+=i;
+ }
+ while (x != st);
+ x -= n;
+ }
+ }
+ else
+ {
+ cblas_xerbla(3, "cblas_ztrsv","Illegal TransA setting, %d\n", TransA);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+
+ if (Diag == CblasUnit) DI = 'U';
+ else if (Diag == CblasNonUnit) DI = 'N';
+ else
+ {
+ cblas_xerbla(4, "cblas_ztrsv","Illegal Diag setting, %d\n", Diag);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+ }
+ #ifdef F77_CHAR
+ F77_UL = C2F_CHAR(&UL);
+ F77_TA = C2F_CHAR(&TA);
+ F77_DI = C2F_CHAR(&DI);
+ #endif
+ F77_ztrsv( F77_UL, F77_TA, F77_DI, &F77_N, A, &F77_lda, X,
+ &F77_incX);
+ if (TransA == CblasConjTrans)
+ {
+ if (N > 0)
+ {
+ do
+ {
+ *x = -(*x);
+ x += i;
+ }
+ while (x != st);
+ }
+ }
+ }
+ else cblas_xerbla(1, "cblas_ztrsv", "Illegal Order setting, %d\n", order);
+ CBLAS_CallFromC = 0;
+ RowMajorStrg = 0;
+ return;
+}
diff --git a/contrib/libs/cblas/cblas_interface/cdotcsub.c b/contrib/libs/cblas/cblas_interface/cdotcsub.c
new file mode 100644
index 0000000000..483a17b679
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cdotcsub.c
@@ -0,0 +1,41 @@
+/* ./cdotcsub.f -- translated by f2c (version 20100827).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+
+/* cdotcsub.f */
+
+/* The program is a fortran wrapper for cdotc. */
+/* Witten by Keita Teranishi. 2/11/1998 */
+
+/* Subroutine */ int cdotcsub_(integer *n, complex *x, integer *incx, complex
+ *y, integer *incy, complex *dotc)
+{
+ /* System generated locals */
+ complex q__1;
+
+ /* Local variables */
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+
+
+
+ /* Parameter adjustments */
+ --y;
+ --x;
+
+ /* Function Body */
+ cdotc_(&q__1, n, &x[1], incx, &y[1], incy);
+ dotc->r = q__1.r, dotc->i = q__1.i;
+ return 0;
+} /* cdotcsub_ */
+
diff --git a/contrib/libs/cblas/cblas_interface/cdotusub.c b/contrib/libs/cblas/cblas_interface/cdotusub.c
new file mode 100644
index 0000000000..00ea66c1d2
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/cdotusub.c
@@ -0,0 +1,41 @@
+/* ./cdotusub.f -- translated by f2c (version 20100827).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+
+/* cdotusub.f */
+
+/* The program is a fortran wrapper for cdotu. */
+/* Witten by Keita Teranishi. 2/11/1998 */
+
+/* Subroutine */ int cdotusub_(integer *n, complex *x, integer *incx, complex
+ *y, integer *incy, complex *dotu)
+{
+ /* System generated locals */
+ complex q__1;
+
+ /* Local variables */
+ extern /* Complex */ VOID cdotu_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+
+
+
+ /* Parameter adjustments */
+ --y;
+ --x;
+
+ /* Function Body */
+ cdotu_(&q__1, n, &x[1], incx, &y[1], incy);
+ dotu->r = q__1.r, dotu->i = q__1.i;
+ return 0;
+} /* cdotusub_ */
+
diff --git a/contrib/libs/cblas/cblas_interface/dasumsub.c b/contrib/libs/cblas/cblas_interface/dasumsub.c
new file mode 100644
index 0000000000..8588f26614
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/dasumsub.c
@@ -0,0 +1,34 @@
+/* ./dasumsub.f -- translated by f2c (version 20100827).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+
+/* dasumsun.f */
+
+/* The program is a fortran wrapper for dasum.. */
+/* Witten by Keita Teranishi. 2/11/1998 */
+
+/* Subroutine */ int dasumsub_(integer *n, doublereal *x, integer *incx,
+ doublereal *asum)
+{
+ extern doublereal dasum_(integer *, doublereal *, integer *);
+
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ *asum = dasum_(n, &x[1], incx);
+ return 0;
+} /* dasumsub_ */
+
diff --git a/contrib/libs/cblas/cblas_interface/ddotsub.c b/contrib/libs/cblas/cblas_interface/ddotsub.c
new file mode 100644
index 0000000000..bac0bbaaaa
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/ddotsub.c
@@ -0,0 +1,36 @@
+/* ./ddotsub.f -- translated by f2c (version 20100827).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+
+/* ddotsub.f */
+
+/* The program is a fortran wrapper for ddot. */
+/* Witten by Keita Teranishi. 2/11/1998 */
+
+/* Subroutine */ int ddotsub_(integer *n, doublereal *x, integer *incx,
+ doublereal *y, integer *incy, doublereal *dot)
+{
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+
+
+
+ /* Parameter adjustments */
+ --y;
+ --x;
+
+ /* Function Body */
+ *dot = ddot_(n, &x[1], incx, &y[1], incy);
+ return 0;
+} /* ddotsub_ */
+
diff --git a/contrib/libs/cblas/cblas_interface/dnrm2sub.c b/contrib/libs/cblas/cblas_interface/dnrm2sub.c
new file mode 100644
index 0000000000..2a43286be0
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/dnrm2sub.c
@@ -0,0 +1,34 @@
+/* ./dnrm2sub.f -- translated by f2c (version 20100827).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+
+/* dnrm2sub.f */
+
+/* The program is a fortran wrapper for dnrm2. */
+/* Witten by Keita Teranishi. 2/11/1998 */
+
+/* Subroutine */ int dnrm2sub_(integer *n, doublereal *x, integer *incx,
+ doublereal *nrm2)
+{
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ *nrm2 = dnrm2_(n, &x[1], incx);
+ return 0;
+} /* dnrm2sub_ */
+
diff --git a/contrib/libs/cblas/cblas_interface/dsdotsub.c b/contrib/libs/cblas/cblas_interface/dsdotsub.c
new file mode 100644
index 0000000000..ffa291e5ab
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/dsdotsub.c
@@ -0,0 +1,35 @@
+/* ./dsdotsub.f -- translated by f2c (version 20100827).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+
+/* dsdotsub.f */
+
+/* The program is a fortran wrapper for dsdot. */
+/* Witten by Keita Teranishi. 2/11/1998 */
+
+/* Subroutine */ int dsdotsub_(integer *n, real *x, integer *incx, real *y,
+ integer *incy, doublereal *dot)
+{
+ extern doublereal dsdot_(integer *, real *, integer *, real *, integer *);
+
+
+
+ /* Parameter adjustments */
+ --y;
+ --x;
+
+ /* Function Body */
+ *dot = dsdot_(n, &x[1], incx, &y[1], incy);
+ return 0;
+} /* dsdotsub_ */
+
diff --git a/contrib/libs/cblas/cblas_interface/dzasumsub.c b/contrib/libs/cblas/cblas_interface/dzasumsub.c
new file mode 100644
index 0000000000..10a60ce966
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/dzasumsub.c
@@ -0,0 +1,34 @@
+/* ./dzasumsub.f -- translated by f2c (version 20100827).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+
+/* dzasumsub.f */
+
+/* The program is a fortran wrapper for dzasum. */
+/* Witten by Keita Teranishi. 2/11/1998 */
+
+/* Subroutine */ int dzasumsub_(integer *n, doublecomplex *x, integer *incx,
+ doublereal *asum)
+{
+ extern doublereal dzasum_(integer *, doublecomplex *, integer *);
+
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ *asum = dzasum_(n, &x[1], incx);
+ return 0;
+} /* dzasumsub_ */
+
diff --git a/contrib/libs/cblas/cblas_interface/dznrm2sub.c b/contrib/libs/cblas/cblas_interface/dznrm2sub.c
new file mode 100644
index 0000000000..6b9f823744
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/dznrm2sub.c
@@ -0,0 +1,34 @@
+/* ./dznrm2sub.f -- translated by f2c (version 20100827).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+
+/* dznrm2sub.f */
+
+/* The program is a fortran wrapper for dznrm2. */
+/* Witten by Keita Teranishi. 2/11/1998 */
+
+/* Subroutine */ int dznrm2sub_(integer *n, doublecomplex *x, integer *incx,
+ doublereal *nrm2)
+{
+ extern doublereal dznrm2_(integer *, doublecomplex *, integer *);
+
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ *nrm2 = dznrm2_(n, &x[1], incx);
+ return 0;
+} /* dznrm2sub_ */
+
diff --git a/contrib/libs/cblas/cblas_interface/icamaxsub.c b/contrib/libs/cblas/cblas_interface/icamaxsub.c
new file mode 100644
index 0000000000..932348cce7
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/icamaxsub.c
@@ -0,0 +1,34 @@
+/* ./icamaxsub.f -- translated by f2c (version 20100827).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+
+/* icamaxsub.f */
+
+/* The program is a fortran wrapper for icamax. */
+/* Witten by Keita Teranishi. 2/11/1998 */
+
+/* Subroutine */ int icamaxsub_(integer *n, complex *x, integer *incx,
+ integer *iamax)
+{
+ extern integer icamax_(integer *, complex *, integer *);
+
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ *iamax = icamax_(n, &x[1], incx);
+ return 0;
+} /* icamaxsub_ */
+
diff --git a/contrib/libs/cblas/cblas_interface/idamaxsub.c b/contrib/libs/cblas/cblas_interface/idamaxsub.c
new file mode 100644
index 0000000000..32163edd3b
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/idamaxsub.c
@@ -0,0 +1,34 @@
+/* ./idamaxsub.f -- translated by f2c (version 20100827).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+
+/* icamaxsub.f */
+
+/* The program is a fortran wrapper for idamax. */
+/* Witten by Keita Teranishi. 2/22/1998 */
+
+/* Subroutine */ int idamaxsub_(integer *n, doublereal *x, integer *incx,
+ integer *iamax)
+{
+ extern integer idamax_(integer *, doublereal *, integer *);
+
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ *iamax = idamax_(n, &x[1], incx);
+ return 0;
+} /* idamaxsub_ */
+
diff --git a/contrib/libs/cblas/cblas_interface/isamaxsub.c b/contrib/libs/cblas/cblas_interface/isamaxsub.c
new file mode 100644
index 0000000000..c937f5902e
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/isamaxsub.c
@@ -0,0 +1,34 @@
+/* ./isamaxsub.f -- translated by f2c (version 20100827).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+
+/* isamaxsub.f */
+
+/* The program is a fortran wrapper for isamax. */
+/* Witten by Keita Teranishi. 2/11/1998 */
+
+/* Subroutine */ int isamaxsub_(integer *n, real *x, integer *incx, integer *
+ iamax)
+{
+ extern integer isamax_(integer *, real *, integer *);
+
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ *iamax = isamax_(n, &x[1], incx);
+ return 0;
+} /* isamaxsub_ */
+
diff --git a/contrib/libs/cblas/cblas_interface/izamaxsub.c b/contrib/libs/cblas/cblas_interface/izamaxsub.c
new file mode 100644
index 0000000000..1e6af907d3
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/izamaxsub.c
@@ -0,0 +1,34 @@
+/* ./izamaxsub.f -- translated by f2c (version 20100827).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+
+/* izamaxsub.f */
+
+/* The program is a fortran wrapper for izamax. */
+/* Witten by Keita Teranishi. 2/11/1998 */
+
+/* Subroutine */ int izamaxsub_(integer *n, doublecomplex *x, integer *incx,
+ integer *iamax)
+{
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ *iamax = izamax_(n, &x[1], incx);
+ return 0;
+} /* izamaxsub_ */
+
diff --git a/contrib/libs/cblas/cblas_interface/sasumsub.c b/contrib/libs/cblas/cblas_interface/sasumsub.c
new file mode 100644
index 0000000000..d8d0efca40
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/sasumsub.c
@@ -0,0 +1,33 @@
+/* ./sasumsub.f -- translated by f2c (version 20100827).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+
+/* sasumsub.f */
+
+/* The program is a fortran wrapper for sasum. */
+/* Witten by Keita Teranishi. 2/11/1998 */
+
+/* Subroutine */ int sasumsub_(integer *n, real *x, integer *incx, real *asum)
+{
+ extern doublereal sasum_(integer *, real *, integer *);
+
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ *asum = sasum_(n, &x[1], incx);
+ return 0;
+} /* sasumsub_ */
+
diff --git a/contrib/libs/cblas/cblas_interface/scasumsub.c b/contrib/libs/cblas/cblas_interface/scasumsub.c
new file mode 100644
index 0000000000..35297da9b4
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/scasumsub.c
@@ -0,0 +1,34 @@
+/* ./scasumsub.f -- translated by f2c (version 20100827).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+
+/* scasumsub.f */
+
+/* The program is a fortran wrapper for scasum. */
+/* Witten by Keita Teranishi. 2/11/1998 */
+
+/* Subroutine */ int scasumsub_(integer *n, complex *x, integer *incx, real *
+ asum)
+{
+ extern doublereal scasum_(integer *, complex *, integer *);
+
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ *asum = scasum_(n, &x[1], incx);
+ return 0;
+} /* scasumsub_ */
+
diff --git a/contrib/libs/cblas/cblas_interface/scnrm2sub.c b/contrib/libs/cblas/cblas_interface/scnrm2sub.c
new file mode 100644
index 0000000000..48a5688469
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/scnrm2sub.c
@@ -0,0 +1,34 @@
+/* ./scnrm2sub.f -- translated by f2c (version 20100827).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+
+/* scnrm2sub.f */
+
+/* The program is a fortran wrapper for scnrm2. */
+/* Witten by Keita Teranishi. 2/11/1998 */
+
+/* Subroutine */ int scnrm2sub_(integer *n, complex *x, integer *incx, real *
+ nrm2)
+{
+ extern doublereal scnrm2_(integer *, complex *, integer *);
+
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ *nrm2 = scnrm2_(n, &x[1], incx);
+ return 0;
+} /* scnrm2sub_ */
+
diff --git a/contrib/libs/cblas/cblas_interface/sdotsub.c b/contrib/libs/cblas/cblas_interface/sdotsub.c
new file mode 100644
index 0000000000..db0e7e4f04
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/sdotsub.c
@@ -0,0 +1,35 @@
+/* ./sdotsub.f -- translated by f2c (version 20100827).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+
+/* sdotsub.f */
+
+/* The program is a fortran wrapper for sdot. */
+/* Witten by Keita Teranishi. 2/11/1998 */
+
+/* Subroutine */ int sdotsub_(integer *n, real *x, integer *incx, real *y,
+ integer *incy, real *dot)
+{
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+
+
+
+ /* Parameter adjustments */
+ --y;
+ --x;
+
+ /* Function Body */
+ *dot = sdot_(n, &x[1], incx, &y[1], incy);
+ return 0;
+} /* sdotsub_ */
+
diff --git a/contrib/libs/cblas/cblas_interface/sdsdotsub.c b/contrib/libs/cblas/cblas_interface/sdsdotsub.c
new file mode 100644
index 0000000000..c1c72c0103
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/sdsdotsub.c
@@ -0,0 +1,36 @@
+/* ./sdsdotsub.f -- translated by f2c (version 20100827).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+
+/* sdsdotsub.f */
+
+/* The program is a fortran wrapper for sdsdot. */
+/* Witten by Keita Teranishi. 2/11/1998 */
+
+/* Subroutine */ int sdsdotsub_(integer *n, real *x, integer *incx, real *y,
+ integer *incy, real *dot)
+{
+ extern doublereal sdsdot_(integer *, real *, integer *, real *, integer *)
+ ;
+
+
+
+ /* Parameter adjustments */
+ --y;
+ --x;
+
+ /* Function Body */
+ *dot = sdsdot_(n, &x[1], incx, &y[1], incy);
+ return 0;
+} /* sdsdotsub_ */
+
diff --git a/contrib/libs/cblas/cblas_interface/snrm2sub.c b/contrib/libs/cblas/cblas_interface/snrm2sub.c
new file mode 100644
index 0000000000..4f3e379d9c
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/snrm2sub.c
@@ -0,0 +1,33 @@
+/* ./snrm2sub.f -- translated by f2c (version 20100827).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+
+/* snrm2sub.f */
+
+/* The program is a fortran wrapper for snrm2. */
+/* Witten by Keita Teranishi. 2/11/1998 */
+
+/* Subroutine */ int snrm2sub_(integer *n, real *x, integer *incx, real *nrm2)
+{
+ extern doublereal snrm2_(integer *, real *, integer *);
+
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ *nrm2 = snrm2_(n, &x[1], incx);
+ return 0;
+} /* snrm2sub_ */
+
diff --git a/contrib/libs/cblas/cblas_interface/zdotcsub.c b/contrib/libs/cblas/cblas_interface/zdotcsub.c
new file mode 100644
index 0000000000..86ae886063
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/zdotcsub.c
@@ -0,0 +1,41 @@
+/* ./zdotcsub.f -- translated by f2c (version 20100827).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+
+/* zdotcsub.f */
+
+/* The program is a fortran wrapper for zdotc. */
+/* Witten by Keita Teranishi. 2/11/1998 */
+
+/* Subroutine */ int zdotcsub_(integer *n, doublecomplex *x, integer *incx,
+ doublecomplex *y, integer *incy, doublecomplex *dotc)
+{
+ /* System generated locals */
+ doublecomplex z__1;
+
+ /* Local variables */
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+
+
+
+ /* Parameter adjustments */
+ --y;
+ --x;
+
+ /* Function Body */
+ zdotc_(&z__1, n, &x[1], incx, &y[1], incy);
+ dotc->r = z__1.r, dotc->i = z__1.i;
+ return 0;
+} /* zdotcsub_ */
+
diff --git a/contrib/libs/cblas/cblas_interface/zdotusub.c b/contrib/libs/cblas/cblas_interface/zdotusub.c
new file mode 100644
index 0000000000..06166c37bd
--- /dev/null
+++ b/contrib/libs/cblas/cblas_interface/zdotusub.c
@@ -0,0 +1,41 @@
+/* ./zdotusub.f -- translated by f2c (version 20100827).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+
+/* zdotusub.f */
+
+/* The program is a fortran wrapper for zdotu. */
+/* Witten by Keita Teranishi. 2/11/1998 */
+
+/* Subroutine */ int zdotusub_(integer *n, doublecomplex *x, integer *incx,
+ doublecomplex *y, integer *incy, doublecomplex *dotu)
+{
+ /* System generated locals */
+ doublecomplex z__1;
+
+ /* Local variables */
+ extern /* Double Complex */ VOID zdotu_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+
+
+
+ /* Parameter adjustments */
+ --y;
+ --x;
+
+ /* Function Body */
+ zdotu_(&z__1, n, &x[1], incx, &y[1], incy);
+ dotu->r = z__1.r, dotu->i = z__1.i;
+ return 0;
+} /* zdotusub_ */
+
diff --git a/contrib/libs/cblas/ccopy.c b/contrib/libs/cblas/ccopy.c
new file mode 100644
index 0000000000..32e9952ce2
--- /dev/null
+++ b/contrib/libs/cblas/ccopy.c
@@ -0,0 +1,88 @@
+/* ccopy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ccopy_(integer *n, complex *cx, integer *incx, complex *
+ cy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, ix, iy;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CCOPY copies a vector x to a vector y. */
+
+/* Further Details */
+/* =============== */
+
+/* jack dongarra, linpack, 3/11/78. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+/* .. Local Scalars .. */
+/* .. */
+ /* Parameter adjustments */
+ --cy;
+ --cx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* code for unequal increments or equal increments */
+/* not equal to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = ix;
+ cy[i__2].r = cx[i__3].r, cy[i__2].i = cx[i__3].i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ cy[i__2].r = cx[i__3].r, cy[i__2].i = cx[i__3].i;
+/* L30: */
+ }
+ return 0;
+} /* ccopy_ */
diff --git a/contrib/libs/cblas/cdotc.c b/contrib/libs/cblas/cdotc.c
new file mode 100644
index 0000000000..a471573da4
--- /dev/null
+++ b/contrib/libs/cblas/cdotc.c
@@ -0,0 +1,106 @@
+/* cdotc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Complex */ VOID cdotc_(complex * ret_val, integer *n, complex *cx, integer
+ *incx, complex *cy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, ix, iy;
+ complex ctemp;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* forms the dot product of two vectors, conjugating the first */
+/* vector. */
+
+/* Further Details */
+/* =============== */
+
+/* jack dongarra, linpack, 3/11/78. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --cy;
+ --cx;
+
+ /* Function Body */
+ ctemp.r = 0.f, ctemp.i = 0.f;
+ ret_val->r = 0.f, ret_val->i = 0.f;
+ if (*n <= 0) {
+ return ;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* code for unequal increments or equal increments */
+/* not equal to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r_cnjg(&q__3, &cx[ix]);
+ i__2 = iy;
+ q__2.r = q__3.r * cy[i__2].r - q__3.i * cy[i__2].i, q__2.i = q__3.r *
+ cy[i__2].i + q__3.i * cy[i__2].r;
+ q__1.r = ctemp.r + q__2.r, q__1.i = ctemp.i + q__2.i;
+ ctemp.r = q__1.r, ctemp.i = q__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ ret_val->r = ctemp.r, ret_val->i = ctemp.i;
+ return ;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r_cnjg(&q__3, &cx[i__]);
+ i__2 = i__;
+ q__2.r = q__3.r * cy[i__2].r - q__3.i * cy[i__2].i, q__2.i = q__3.r *
+ cy[i__2].i + q__3.i * cy[i__2].r;
+ q__1.r = ctemp.r + q__2.r, q__1.i = ctemp.i + q__2.i;
+ ctemp.r = q__1.r, ctemp.i = q__1.i;
+/* L30: */
+ }
+ ret_val->r = ctemp.r, ret_val->i = ctemp.i;
+ return ;
+} /* cdotc_ */
diff --git a/contrib/libs/cblas/cdotu.c b/contrib/libs/cblas/cdotu.c
new file mode 100644
index 0000000000..3aa48a2944
--- /dev/null
+++ b/contrib/libs/cblas/cdotu.c
@@ -0,0 +1,100 @@
+/* cdotu.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Complex */ VOID cdotu_(complex * ret_val, integer *n, complex *cx, integer
+ *incx, complex *cy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ complex q__1, q__2;
+
+ /* Local variables */
+ integer i__, ix, iy;
+ complex ctemp;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CDOTU forms the dot product of two vectors. */
+
+/* Further Details */
+/* =============== */
+
+/* jack dongarra, linpack, 3/11/78. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+/* .. Local Scalars .. */
+/* .. */
+ /* Parameter adjustments */
+ --cy;
+ --cx;
+
+ /* Function Body */
+ ctemp.r = 0.f, ctemp.i = 0.f;
+ ret_val->r = 0.f, ret_val->i = 0.f;
+ if (*n <= 0) {
+ return ;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* code for unequal increments or equal increments */
+/* not equal to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ i__3 = iy;
+ q__2.r = cx[i__2].r * cy[i__3].r - cx[i__2].i * cy[i__3].i, q__2.i =
+ cx[i__2].r * cy[i__3].i + cx[i__2].i * cy[i__3].r;
+ q__1.r = ctemp.r + q__2.r, q__1.i = ctemp.i + q__2.i;
+ ctemp.r = q__1.r, ctemp.i = q__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ ret_val->r = ctemp.r, ret_val->i = ctemp.i;
+ return ;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ q__2.r = cx[i__2].r * cy[i__3].r - cx[i__2].i * cy[i__3].i, q__2.i =
+ cx[i__2].r * cy[i__3].i + cx[i__2].i * cy[i__3].r;
+ q__1.r = ctemp.r + q__2.r, q__1.i = ctemp.i + q__2.i;
+ ctemp.r = q__1.r, ctemp.i = q__1.i;
+/* L30: */
+ }
+ ret_val->r = ctemp.r, ret_val->i = ctemp.i;
+ return ;
+} /* cdotu_ */
diff --git a/contrib/libs/cblas/cgbmv.c b/contrib/libs/cblas/cgbmv.c
new file mode 100644
index 0000000000..ec04382a29
--- /dev/null
+++ b/contrib/libs/cblas/cgbmv.c
@@ -0,0 +1,477 @@
+/* cgbmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cgbmv_(char *trans, integer *m, integer *n, integer *kl,
+ integer *ku, complex *alpha, complex *a, integer *lda, complex *x,
+ integer *incx, complex *beta, complex *y, integer *incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, k, ix, iy, jx, jy, kx, ky, kup1, info;
+ complex temp;
+ integer lenx, leny;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical noconj;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGBMV performs one of the matrix-vector operations */
+
+/* y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y, or */
+
+/* y := alpha*conjg( A' )*x + beta*y, */
+
+/* where alpha and beta are scalars, x and y are vectors and A is an */
+/* m by n band matrix, with kl sub-diagonals and ku super-diagonals. */
+
+/* Arguments */
+/* ========== */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' y := alpha*A*x + beta*y. */
+
+/* TRANS = 'T' or 't' y := alpha*A'*x + beta*y. */
+
+/* TRANS = 'C' or 'c' y := alpha*conjg( A' )*x + beta*y. */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of the matrix A. */
+/* M must be at least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* KL - INTEGER. */
+/* On entry, KL specifies the number of sub-diagonals of the */
+/* matrix A. KL must satisfy 0 .le. KL. */
+/* Unchanged on exit. */
+
+/* KU - INTEGER. */
+/* On entry, KU specifies the number of super-diagonals of the */
+/* matrix A. KU must satisfy 0 .le. KU. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX array of DIMENSION ( LDA, n ). */
+/* Before entry, the leading ( kl + ku + 1 ) by n part of the */
+/* array A must contain the matrix of coefficients, supplied */
+/* column by column, with the leading diagonal of the matrix in */
+/* row ( ku + 1 ) of the array, the first super-diagonal */
+/* starting at position 2 in row ku, the first sub-diagonal */
+/* starting at position 1 in row ( ku + 2 ), and so on. */
+/* Elements in the array A that do not correspond to elements */
+/* in the band matrix (such as the top left ku by ku triangle) */
+/* are not referenced. */
+/* The following program segment will transfer a band matrix */
+/* from conventional full matrix storage to band storage: */
+
+/* DO 20, J = 1, N */
+/* K = KU + 1 - J */
+/* DO 10, I = MAX( 1, J - KU ), MIN( M, J + KL ) */
+/* A( K + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* ( kl + ku + 1 ). */
+/* Unchanged on exit. */
+
+/* X - COMPLEX array of DIMENSION at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ) when TRANS = 'N' or 'n' */
+/* and at least */
+/* ( 1 + ( m - 1 )*abs( INCX ) ) otherwise. */
+/* Before entry, the incremented array X must contain the */
+/* vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* BETA - COMPLEX . */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then Y need not be set on input. */
+/* Unchanged on exit. */
+
+/* Y - COMPLEX array of DIMENSION at least */
+/* ( 1 + ( m - 1 )*abs( INCY ) ) when TRANS = 'N' or 'n' */
+/* and at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ) otherwise. */
+/* Before entry, the incremented array Y must contain the */
+/* vector y. On exit, Y is overwritten by the updated vector y. */
+
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(trans, "N") && ! lsame_(trans, "T") && ! lsame_(trans, "C")
+ ) {
+ info = 1;
+ } else if (*m < 0) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*kl < 0) {
+ info = 4;
+ } else if (*ku < 0) {
+ info = 5;
+ } else if (*lda < *kl + *ku + 1) {
+ info = 8;
+ } else if (*incx == 0) {
+ info = 10;
+ } else if (*incy == 0) {
+ info = 13;
+ }
+ if (info != 0) {
+ xerbla_("CGBMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || alpha->r == 0.f && alpha->i == 0.f && (beta->r
+ == 1.f && beta->i == 0.f)) {
+ return 0;
+ }
+
+ noconj = lsame_(trans, "T");
+
+/* Set LENX and LENY, the lengths of the vectors x and y, and set */
+/* up the start points in X and Y. */
+
+ if (lsame_(trans, "N")) {
+ lenx = *n;
+ leny = *m;
+ } else {
+ lenx = *m;
+ leny = *n;
+ }
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (lenx - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (leny - 1) * *incy;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through the band part of A. */
+
+/* First form y := beta*y. */
+
+ if (beta->r != 1.f || beta->i != 0.f) {
+ if (*incy == 1) {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ y[i__2].r = 0.f, y[i__2].i = 0.f;
+/* L10: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ q__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ q__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ y[i__2].r = 0.f, y[i__2].i = 0.f;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = iy;
+ q__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ q__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (alpha->r == 0.f && alpha->i == 0.f) {
+ return 0;
+ }
+ kup1 = *ku + 1;
+ if (lsame_(trans, "N")) {
+
+/* Form y := alpha*A*x + y. */
+
+ jx = kx;
+ if (*incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ i__2 = jx;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ q__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ k = kup1 - j;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__4 = min(i__5,i__6);
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ i__5 = k + i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ q__2.i = temp.r * a[i__5].i + temp.i * a[i__5]
+ .r;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i +
+ q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+/* L50: */
+ }
+ }
+ jx += *incx;
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__4 = jx;
+ if (x[i__4].r != 0.f || x[i__4].i != 0.f) {
+ i__4 = jx;
+ q__1.r = alpha->r * x[i__4].r - alpha->i * x[i__4].i,
+ q__1.i = alpha->r * x[i__4].i + alpha->i * x[i__4]
+ .r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ iy = ky;
+ k = kup1 - j;
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__3 = min(i__5,i__6);
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ i__4 = iy;
+ i__2 = iy;
+ i__5 = k + i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ q__2.i = temp.r * a[i__5].i + temp.i * a[i__5]
+ .r;
+ q__1.r = y[i__2].r + q__2.r, q__1.i = y[i__2].i +
+ q__2.i;
+ y[i__4].r = q__1.r, y[i__4].i = q__1.i;
+ iy += *incy;
+/* L70: */
+ }
+ }
+ jx += *incx;
+ if (j > *ku) {
+ ky += *incy;
+ }
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y := alpha*A'*x + y or y := alpha*conjg( A' )*x + y. */
+
+ jy = ky;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp.r = 0.f, temp.i = 0.f;
+ k = kup1 - j;
+ if (noconj) {
+/* Computing MAX */
+ i__3 = 1, i__4 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__2 = min(i__5,i__6);
+ for (i__ = max(i__3,i__4); i__ <= i__2; ++i__) {
+ i__3 = k + i__ + j * a_dim1;
+ i__4 = i__;
+ q__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4]
+ .i, q__2.i = a[i__3].r * x[i__4].i + a[i__3]
+ .i * x[i__4].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L90: */
+ }
+ } else {
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__4 = min(i__5,i__6);
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ r_cnjg(&q__3, &a[k + i__ + j * a_dim1]);
+ i__2 = i__;
+ q__2.r = q__3.r * x[i__2].r - q__3.i * x[i__2].i,
+ q__2.i = q__3.r * x[i__2].i + q__3.i * x[i__2]
+ .r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L100: */
+ }
+ }
+ i__4 = jy;
+ i__2 = jy;
+ q__2.r = alpha->r * temp.r - alpha->i * temp.i, q__2.i =
+ alpha->r * temp.i + alpha->i * temp.r;
+ q__1.r = y[i__2].r + q__2.r, q__1.i = y[i__2].i + q__2.i;
+ y[i__4].r = q__1.r, y[i__4].i = q__1.i;
+ jy += *incy;
+/* L110: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp.r = 0.f, temp.i = 0.f;
+ ix = kx;
+ k = kup1 - j;
+ if (noconj) {
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__3 = min(i__5,i__6);
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ i__4 = k + i__ + j * a_dim1;
+ i__2 = ix;
+ q__2.r = a[i__4].r * x[i__2].r - a[i__4].i * x[i__2]
+ .i, q__2.i = a[i__4].r * x[i__2].i + a[i__4]
+ .i * x[i__2].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix += *incx;
+/* L120: */
+ }
+ } else {
+/* Computing MAX */
+ i__3 = 1, i__4 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__2 = min(i__5,i__6);
+ for (i__ = max(i__3,i__4); i__ <= i__2; ++i__) {
+ r_cnjg(&q__3, &a[k + i__ + j * a_dim1]);
+ i__3 = ix;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i,
+ q__2.i = q__3.r * x[i__3].i + q__3.i * x[i__3]
+ .r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix += *incx;
+/* L130: */
+ }
+ }
+ i__2 = jy;
+ i__3 = jy;
+ q__2.r = alpha->r * temp.r - alpha->i * temp.i, q__2.i =
+ alpha->r * temp.i + alpha->i * temp.r;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ jy += *incy;
+ if (j > *ku) {
+ kx += *incx;
+ }
+/* L140: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CGBMV . */
+
+} /* cgbmv_ */
diff --git a/contrib/libs/cblas/cgemm.c b/contrib/libs/cblas/cgemm.c
new file mode 100644
index 0000000000..7568b5cbfe
--- /dev/null
+++ b/contrib/libs/cblas/cgemm.c
@@ -0,0 +1,697 @@
+/* cgemm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cgemm_(char *transa, char *transb, integer *m, integer *
+ n, integer *k, complex *alpha, complex *a, integer *lda, complex *b,
+ integer *ldb, complex *beta, complex *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4, i__5, i__6;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, l, info;
+ logical nota, notb;
+ complex temp;
+ logical conja, conjb;
+ integer ncola;
+ extern logical lsame_(char *, char *);
+ integer nrowa, nrowb;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGEMM performs one of the matrix-matrix operations */
+
+/* C := alpha*op( A )*op( B ) + beta*C, */
+
+/* where op( X ) is one of */
+
+/* op( X ) = X or op( X ) = X' or op( X ) = conjg( X' ), */
+
+/* alpha and beta are scalars, and A, B and C are matrices, with op( A ) */
+/* an m by k matrix, op( B ) a k by n matrix and C an m by n matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* TRANSA - CHARACTER*1. */
+/* On entry, TRANSA specifies the form of op( A ) to be used in */
+/* the matrix multiplication as follows: */
+
+/* TRANSA = 'N' or 'n', op( A ) = A. */
+
+/* TRANSA = 'T' or 't', op( A ) = A'. */
+
+/* TRANSA = 'C' or 'c', op( A ) = conjg( A' ). */
+
+/* Unchanged on exit. */
+
+/* TRANSB - CHARACTER*1. */
+/* On entry, TRANSB specifies the form of op( B ) to be used in */
+/* the matrix multiplication as follows: */
+
+/* TRANSB = 'N' or 'n', op( B ) = B. */
+
+/* TRANSB = 'T' or 't', op( B ) = B'. */
+
+/* TRANSB = 'C' or 'c', op( B ) = conjg( B' ). */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of the matrix */
+/* op( A ) and of the matrix C. M must be at least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of the matrix */
+/* op( B ) and the number of columns of the matrix C. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry, K specifies the number of columns of the matrix */
+/* op( A ) and the number of rows of the matrix op( B ). K must */
+/* be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX array of DIMENSION ( LDA, ka ), where ka is */
+/* k when TRANSA = 'N' or 'n', and is m otherwise. */
+/* Before entry with TRANSA = 'N' or 'n', the leading m by k */
+/* part of the array A must contain the matrix A, otherwise */
+/* the leading k by m part of the array A must contain the */
+/* matrix A. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When TRANSA = 'N' or 'n' then */
+/* LDA must be at least max( 1, m ), otherwise LDA must be at */
+/* least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* B - COMPLEX array of DIMENSION ( LDB, kb ), where kb is */
+/* n when TRANSB = 'N' or 'n', and is k otherwise. */
+/* Before entry with TRANSB = 'N' or 'n', the leading k by n */
+/* part of the array B must contain the matrix B, otherwise */
+/* the leading n by k part of the array B must contain the */
+/* matrix B. */
+/* Unchanged on exit. */
+
+/* LDB - INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. When TRANSB = 'N' or 'n' then */
+/* LDB must be at least max( 1, k ), otherwise LDB must be at */
+/* least max( 1, n ). */
+/* Unchanged on exit. */
+
+/* BETA - COMPLEX . */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then C need not be set on input. */
+/* Unchanged on exit. */
+
+/* C - COMPLEX array of DIMENSION ( LDC, n ). */
+/* Before entry, the leading m by n part of the array C must */
+/* contain the matrix C, except when beta is zero, in which */
+/* case C need not be set on entry. */
+/* On exit, the array C is overwritten by the m by n matrix */
+/* ( alpha*op( A )*op( B ) + beta*C ). */
+
+/* LDC - INTEGER. */
+/* On entry, LDC specifies the first dimension of C as declared */
+/* in the calling (sub) program. LDC must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Set NOTA and NOTB as true if A and B respectively are not */
+/* conjugated or transposed, set CONJA and CONJB as true if A and */
+/* B respectively are to be transposed but not conjugated and set */
+/* NROWA, NCOLA and NROWB as the number of rows and columns of A */
+/* and the number of rows of B respectively. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ nota = lsame_(transa, "N");
+ notb = lsame_(transb, "N");
+ conja = lsame_(transa, "C");
+ conjb = lsame_(transb, "C");
+ if (nota) {
+ nrowa = *m;
+ ncola = *k;
+ } else {
+ nrowa = *k;
+ ncola = *m;
+ }
+ if (notb) {
+ nrowb = *k;
+ } else {
+ nrowb = *n;
+ }
+
+/* Test the input parameters. */
+
+ info = 0;
+ if (! nota && ! conja && ! lsame_(transa, "T")) {
+ info = 1;
+ } else if (! notb && ! conjb && ! lsame_(transb, "T")) {
+ info = 2;
+ } else if (*m < 0) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*k < 0) {
+ info = 5;
+ } else if (*lda < max(1,nrowa)) {
+ info = 8;
+ } else if (*ldb < max(1,nrowb)) {
+ info = 10;
+ } else if (*ldc < max(1,*m)) {
+ info = 13;
+ }
+ if (info != 0) {
+ xerbla_("CGEMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || (alpha->r == 0.f && alpha->i == 0.f || *k == 0)
+ && (beta->r == 1.f && beta->i == 0.f)) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (alpha->r == 0.f && alpha->i == 0.f) {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4].i,
+ q__1.i = beta->r * c__[i__4].i + beta->i * c__[
+ i__4].r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (notb) {
+ if (nota) {
+
+/* Form C := alpha*A*B + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L50: */
+ }
+ } else if (beta->r != 1.f || beta->i != 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L60: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = l + j * b_dim1;
+ if (b[i__3].r != 0.f || b[i__3].i != 0.f) {
+ i__3 = l + j * b_dim1;
+ q__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
+ q__1.i = alpha->r * b[i__3].i + alpha->i * b[
+ i__3].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ q__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ q__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ q__1.r = c__[i__5].r + q__2.r, q__1.i = c__[i__5]
+ .i + q__2.i;
+ c__[i__4].r = q__1.r, c__[i__4].i = q__1.i;
+/* L70: */
+ }
+ }
+/* L80: */
+ }
+/* L90: */
+ }
+ } else if (conja) {
+
+/* Form C := alpha*conjg( A' )*B + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0.f, temp.i = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ r_cnjg(&q__3, &a[l + i__ * a_dim1]);
+ i__4 = l + j * b_dim1;
+ q__2.r = q__3.r * b[i__4].r - q__3.i * b[i__4].i,
+ q__2.i = q__3.r * b[i__4].i + q__3.i * b[i__4]
+ .r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L100: */
+ }
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ q__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ q__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+/* L110: */
+ }
+/* L120: */
+ }
+ } else {
+
+/* Form C := alpha*A'*B + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0.f, temp.i = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + i__ * a_dim1;
+ i__5 = l + j * b_dim1;
+ q__2.r = a[i__4].r * b[i__5].r - a[i__4].i * b[i__5]
+ .i, q__2.i = a[i__4].r * b[i__5].i + a[i__4]
+ .i * b[i__5].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L130: */
+ }
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ q__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ q__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+/* L140: */
+ }
+/* L150: */
+ }
+ }
+ } else if (nota) {
+ if (conjb) {
+
+/* Form C := alpha*A*conjg( B' ) + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L160: */
+ }
+ } else if (beta->r != 1.f || beta->i != 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L170: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * b_dim1;
+ if (b[i__3].r != 0.f || b[i__3].i != 0.f) {
+ r_cnjg(&q__2, &b[j + l * b_dim1]);
+ q__1.r = alpha->r * q__2.r - alpha->i * q__2.i,
+ q__1.i = alpha->r * q__2.i + alpha->i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ q__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ q__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ q__1.r = c__[i__5].r + q__2.r, q__1.i = c__[i__5]
+ .i + q__2.i;
+ c__[i__4].r = q__1.r, c__[i__4].i = q__1.i;
+/* L180: */
+ }
+ }
+/* L190: */
+ }
+/* L200: */
+ }
+ } else {
+
+/* Form C := alpha*A*B' + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L210: */
+ }
+ } else if (beta->r != 1.f || beta->i != 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L220: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * b_dim1;
+ if (b[i__3].r != 0.f || b[i__3].i != 0.f) {
+ i__3 = j + l * b_dim1;
+ q__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
+ q__1.i = alpha->r * b[i__3].i + alpha->i * b[
+ i__3].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ q__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ q__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ q__1.r = c__[i__5].r + q__2.r, q__1.i = c__[i__5]
+ .i + q__2.i;
+ c__[i__4].r = q__1.r, c__[i__4].i = q__1.i;
+/* L230: */
+ }
+ }
+/* L240: */
+ }
+/* L250: */
+ }
+ }
+ } else if (conja) {
+ if (conjb) {
+
+/* Form C := alpha*conjg( A' )*conjg( B' ) + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0.f, temp.i = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ r_cnjg(&q__3, &a[l + i__ * a_dim1]);
+ r_cnjg(&q__4, &b[j + l * b_dim1]);
+ q__2.r = q__3.r * q__4.r - q__3.i * q__4.i, q__2.i =
+ q__3.r * q__4.i + q__3.i * q__4.r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L260: */
+ }
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ q__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ q__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+/* L270: */
+ }
+/* L280: */
+ }
+ } else {
+
+/* Form C := alpha*conjg( A' )*B' + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0.f, temp.i = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ r_cnjg(&q__3, &a[l + i__ * a_dim1]);
+ i__4 = j + l * b_dim1;
+ q__2.r = q__3.r * b[i__4].r - q__3.i * b[i__4].i,
+ q__2.i = q__3.r * b[i__4].i + q__3.i * b[i__4]
+ .r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L290: */
+ }
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ q__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ q__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+/* L300: */
+ }
+/* L310: */
+ }
+ }
+ } else {
+ if (conjb) {
+
+/* Form C := alpha*A'*conjg( B' ) + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0.f, temp.i = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + i__ * a_dim1;
+ r_cnjg(&q__3, &b[j + l * b_dim1]);
+ q__2.r = a[i__4].r * q__3.r - a[i__4].i * q__3.i,
+ q__2.i = a[i__4].r * q__3.i + a[i__4].i *
+ q__3.r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L320: */
+ }
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ q__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ q__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+/* L330: */
+ }
+/* L340: */
+ }
+ } else {
+
+/* Form C := alpha*A'*B' + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0.f, temp.i = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + i__ * a_dim1;
+ i__5 = j + l * b_dim1;
+ q__2.r = a[i__4].r * b[i__5].r - a[i__4].i * b[i__5]
+ .i, q__2.i = a[i__4].r * b[i__5].i + a[i__4]
+ .i * b[i__5].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L350: */
+ }
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ q__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ q__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+/* L360: */
+ }
+/* L370: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CGEMM . */
+
+} /* cgemm_ */
diff --git a/contrib/libs/cblas/cgemv.c b/contrib/libs/cblas/cgemv.c
new file mode 100644
index 0000000000..bca57983f5
--- /dev/null
+++ b/contrib/libs/cblas/cgemv.c
@@ -0,0 +1,411 @@
+/* cgemv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cgemv_(char *trans, integer *m, integer *n, complex *
+ alpha, complex *a, integer *lda, complex *x, integer *incx, complex *
+ beta, complex *y, integer *incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ complex temp;
+ integer lenx, leny;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical noconj;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGEMV performs one of the matrix-vector operations */
+
+/* y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y, or */
+
+/* y := alpha*conjg( A' )*x + beta*y, */
+
+/* where alpha and beta are scalars, x and y are vectors and A is an */
+/* m by n matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' y := alpha*A*x + beta*y. */
+
+/* TRANS = 'T' or 't' y := alpha*A'*x + beta*y. */
+
+/* TRANS = 'C' or 'c' y := alpha*conjg( A' )*x + beta*y. */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of the matrix A. */
+/* M must be at least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX array of DIMENSION ( LDA, n ). */
+/* Before entry, the leading m by n part of the array A must */
+/* contain the matrix of coefficients. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+/* X - COMPLEX array of DIMENSION at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ) when TRANS = 'N' or 'n' */
+/* and at least */
+/* ( 1 + ( m - 1 )*abs( INCX ) ) otherwise. */
+/* Before entry, the incremented array X must contain the */
+/* vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* BETA - COMPLEX . */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then Y need not be set on input. */
+/* Unchanged on exit. */
+
+/* Y - COMPLEX array of DIMENSION at least */
+/* ( 1 + ( m - 1 )*abs( INCY ) ) when TRANS = 'N' or 'n' */
+/* and at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ) otherwise. */
+/* Before entry with BETA non-zero, the incremented array Y */
+/* must contain the vector y. On exit, Y is overwritten by the */
+/* updated vector y. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(trans, "N") && ! lsame_(trans, "T") && ! lsame_(trans, "C")
+ ) {
+ info = 1;
+ } else if (*m < 0) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*lda < max(1,*m)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ } else if (*incy == 0) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("CGEMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || alpha->r == 0.f && alpha->i == 0.f && (beta->r
+ == 1.f && beta->i == 0.f)) {
+ return 0;
+ }
+
+ noconj = lsame_(trans, "T");
+
+/* Set LENX and LENY, the lengths of the vectors x and y, and set */
+/* up the start points in X and Y. */
+
+ if (lsame_(trans, "N")) {
+ lenx = *n;
+ leny = *m;
+ } else {
+ lenx = *m;
+ leny = *n;
+ }
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (lenx - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (leny - 1) * *incy;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through A. */
+
+/* First form y := beta*y. */
+
+ if (beta->r != 1.f || beta->i != 0.f) {
+ if (*incy == 1) {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ y[i__2].r = 0.f, y[i__2].i = 0.f;
+/* L10: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ q__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ q__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ y[i__2].r = 0.f, y[i__2].i = 0.f;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = iy;
+ q__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ q__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (alpha->r == 0.f && alpha->i == 0.f) {
+ return 0;
+ }
+ if (lsame_(trans, "N")) {
+
+/* Form y := alpha*A*x + y. */
+
+ jx = kx;
+ if (*incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ i__2 = jx;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ q__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ q__2.i = temp.r * a[i__5].i + temp.i * a[i__5]
+ .r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i +
+ q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+/* L50: */
+ }
+ }
+ jx += *incx;
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ i__2 = jx;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ q__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ iy = ky;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ q__2.i = temp.r * a[i__5].i + temp.i * a[i__5]
+ .r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i +
+ q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ iy += *incy;
+/* L70: */
+ }
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y := alpha*A'*x + y or y := alpha*conjg( A' )*x + y. */
+
+ jy = ky;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp.r = 0.f, temp.i = 0.f;
+ if (noconj) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__;
+ q__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4]
+ .i, q__2.i = a[i__3].r * x[i__4].i + a[i__3]
+ .i * x[i__4].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L90: */
+ }
+ } else {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__3 = i__;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i,
+ q__2.i = q__3.r * x[i__3].i + q__3.i * x[i__3]
+ .r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L100: */
+ }
+ }
+ i__2 = jy;
+ i__3 = jy;
+ q__2.r = alpha->r * temp.r - alpha->i * temp.i, q__2.i =
+ alpha->r * temp.i + alpha->i * temp.r;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ jy += *incy;
+/* L110: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp.r = 0.f, temp.i = 0.f;
+ ix = kx;
+ if (noconj) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = ix;
+ q__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4]
+ .i, q__2.i = a[i__3].r * x[i__4].i + a[i__3]
+ .i * x[i__4].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix += *incx;
+/* L120: */
+ }
+ } else {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__3 = ix;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i,
+ q__2.i = q__3.r * x[i__3].i + q__3.i * x[i__3]
+ .r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix += *incx;
+/* L130: */
+ }
+ }
+ i__2 = jy;
+ i__3 = jy;
+ q__2.r = alpha->r * temp.r - alpha->i * temp.i, q__2.i =
+ alpha->r * temp.i + alpha->i * temp.r;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ jy += *incy;
+/* L140: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CGEMV . */
+
+} /* cgemv_ */
diff --git a/contrib/libs/cblas/cgerc.c b/contrib/libs/cblas/cgerc.c
new file mode 100644
index 0000000000..378b9a716f
--- /dev/null
+++ b/contrib/libs/cblas/cgerc.c
@@ -0,0 +1,217 @@
+/* cgerc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cgerc_(integer *m, integer *n, complex *alpha, complex *
+ x, integer *incx, complex *y, integer *incy, complex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, ix, jy, kx, info;
+ complex temp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGERC performs the rank 1 operation */
+
+/* A := alpha*x*conjg( y' ) + A, */
+
+/* where alpha is a scalar, x is an m element vector, y is an n element */
+/* vector and A is an m by n matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of the matrix A. */
+/* M must be at least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X - COMPLEX array of dimension at least */
+/* ( 1 + ( m - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the m */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* Y - COMPLEX array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. */
+/* Unchanged on exit. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX array of DIMENSION ( LDA, n ). */
+/* Before entry, the leading m by n part of the array A must */
+/* contain the matrix of coefficients. On exit, A is */
+/* overwritten by the updated matrix. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --x;
+ --y;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (*m < 0) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ } else if (*lda < max(1,*m)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("CGERC ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || alpha->r == 0.f && alpha->i == 0.f) {
+ return 0;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through A. */
+
+ if (*incy > 0) {
+ jy = 1;
+ } else {
+ jy = 1 - (*n - 1) * *incy;
+ }
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jy;
+ if (y[i__2].r != 0.f || y[i__2].i != 0.f) {
+ r_cnjg(&q__2, &y[jy]);
+ q__1.r = alpha->r * q__2.r - alpha->i * q__2.i, q__1.i =
+ alpha->r * q__2.i + alpha->i * q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ q__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, q__2.i =
+ x[i__5].r * temp.i + x[i__5].i * temp.r;
+ q__1.r = a[i__4].r + q__2.r, q__1.i = a[i__4].i + q__2.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L10: */
+ }
+ }
+ jy += *incy;
+/* L20: */
+ }
+ } else {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*m - 1) * *incx;
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jy;
+ if (y[i__2].r != 0.f || y[i__2].i != 0.f) {
+ r_cnjg(&q__2, &y[jy]);
+ q__1.r = alpha->r * q__2.r - alpha->i * q__2.i, q__1.i =
+ alpha->r * q__2.i + alpha->i * q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix = kx;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ q__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, q__2.i =
+ x[i__5].r * temp.i + x[i__5].i * temp.r;
+ q__1.r = a[i__4].r + q__2.r, q__1.i = a[i__4].i + q__2.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ }
+ jy += *incy;
+/* L40: */
+ }
+ }
+
+ return 0;
+
+/* End of CGERC . */
+
+} /* cgerc_ */
diff --git a/contrib/libs/cblas/cgeru.c b/contrib/libs/cblas/cgeru.c
new file mode 100644
index 0000000000..ad87262030
--- /dev/null
+++ b/contrib/libs/cblas/cgeru.c
@@ -0,0 +1,214 @@
+/* cgeru.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cgeru_(integer *m, integer *n, complex *alpha, complex *
+ x, integer *incx, complex *y, integer *incy, complex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ complex q__1, q__2;
+
+ /* Local variables */
+ integer i__, j, ix, jy, kx, info;
+ complex temp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGERU performs the rank 1 operation */
+
+/* A := alpha*x*y' + A, */
+
+/* where alpha is a scalar, x is an m element vector, y is an n element */
+/* vector and A is an m by n matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of the matrix A. */
+/* M must be at least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X - COMPLEX array of dimension at least */
+/* ( 1 + ( m - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the m */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* Y - COMPLEX array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. */
+/* Unchanged on exit. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX array of DIMENSION ( LDA, n ). */
+/* Before entry, the leading m by n part of the array A must */
+/* contain the matrix of coefficients. On exit, A is */
+/* overwritten by the updated matrix. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --x;
+ --y;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (*m < 0) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ } else if (*lda < max(1,*m)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("CGERU ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || alpha->r == 0.f && alpha->i == 0.f) {
+ return 0;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through A. */
+
+ if (*incy > 0) {
+ jy = 1;
+ } else {
+ jy = 1 - (*n - 1) * *incy;
+ }
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jy;
+ if (y[i__2].r != 0.f || y[i__2].i != 0.f) {
+ i__2 = jy;
+ q__1.r = alpha->r * y[i__2].r - alpha->i * y[i__2].i, q__1.i =
+ alpha->r * y[i__2].i + alpha->i * y[i__2].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ q__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, q__2.i =
+ x[i__5].r * temp.i + x[i__5].i * temp.r;
+ q__1.r = a[i__4].r + q__2.r, q__1.i = a[i__4].i + q__2.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L10: */
+ }
+ }
+ jy += *incy;
+/* L20: */
+ }
+ } else {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*m - 1) * *incx;
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jy;
+ if (y[i__2].r != 0.f || y[i__2].i != 0.f) {
+ i__2 = jy;
+ q__1.r = alpha->r * y[i__2].r - alpha->i * y[i__2].i, q__1.i =
+ alpha->r * y[i__2].i + alpha->i * y[i__2].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix = kx;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ q__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, q__2.i =
+ x[i__5].r * temp.i + x[i__5].i * temp.r;
+ q__1.r = a[i__4].r + q__2.r, q__1.i = a[i__4].i + q__2.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ }
+ jy += *incy;
+/* L40: */
+ }
+ }
+
+ return 0;
+
+/* End of CGERU . */
+
+} /* cgeru_ */
diff --git a/contrib/libs/cblas/chbmv.c b/contrib/libs/cblas/chbmv.c
new file mode 100644
index 0000000000..81e5da7483
--- /dev/null
+++ b/contrib/libs/cblas/chbmv.c
@@ -0,0 +1,483 @@
+/* chbmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int chbmv_(char *uplo, integer *n, integer *k, complex *
+ alpha, complex *a, integer *lda, complex *x, integer *incx, complex *
+ beta, complex *y, integer *incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, l, ix, iy, jx, jy, kx, ky, info;
+ complex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ integer kplus1;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHBMV performs the matrix-vector operation */
+
+/* y := alpha*A*x + beta*y, */
+
+/* where alpha and beta are scalars, x and y are n element vectors and */
+/* A is an n by n hermitian band matrix, with k super-diagonals. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the band matrix A is being supplied as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' The upper triangular part of A is */
+/* being supplied. */
+
+/* UPLO = 'L' or 'l' The lower triangular part of A is */
+/* being supplied. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry, K specifies the number of super-diagonals of the */
+/* matrix A. K must satisfy 0 .le. K. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) */
+/* by n part of the array A must contain the upper triangular */
+/* band part of the hermitian matrix, supplied column by */
+/* column, with the leading diagonal of the matrix in row */
+/* ( k + 1 ) of the array, the first super-diagonal starting at */
+/* position 2 in row k, and so on. The top left k by k triangle */
+/* of the array A is not referenced. */
+/* The following program segment will transfer the upper */
+/* triangular part of a hermitian band matrix from conventional */
+/* full matrix storage to band storage: */
+
+/* DO 20, J = 1, N */
+/* M = K + 1 - J */
+/* DO 10, I = MAX( 1, J - K ), J */
+/* A( M + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) */
+/* by n part of the array A must contain the lower triangular */
+/* band part of the hermitian matrix, supplied column by */
+/* column, with the leading diagonal of the matrix in row 1 of */
+/* the array, the first sub-diagonal starting at position 1 in */
+/* row 2, and so on. The bottom right k by k triangle of the */
+/* array A is not referenced. */
+/* The following program segment will transfer the lower */
+/* triangular part of a hermitian band matrix from conventional */
+/* full matrix storage to band storage: */
+
+/* DO 20, J = 1, N */
+/* M = 1 - J */
+/* DO 10, I = J, MIN( N, J + K ) */
+/* A( M + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set and are assumed to be zero. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* ( k + 1 ). */
+/* Unchanged on exit. */
+
+/* X - COMPLEX array of DIMENSION at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the */
+/* vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* BETA - COMPLEX . */
+/* On entry, BETA specifies the scalar beta. */
+/* Unchanged on exit. */
+
+/* Y - COMPLEX array of DIMENSION at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the */
+/* vector y. On exit, Y is overwritten by the updated vector y. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*k < 0) {
+ info = 3;
+ } else if (*lda < *k + 1) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ } else if (*incy == 0) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("CHBMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || alpha->r == 0.f && alpha->i == 0.f && (beta->r == 1.f &&
+ beta->i == 0.f)) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y. */
+
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+
+/* Start the operations. In this version the elements of the array A */
+/* are accessed sequentially with one pass through A. */
+
+/* First form y := beta*y. */
+
+ if (beta->r != 1.f || beta->i != 0.f) {
+ if (*incy == 1) {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ y[i__2].r = 0.f, y[i__2].i = 0.f;
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ q__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ q__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ y[i__2].r = 0.f, y[i__2].i = 0.f;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = iy;
+ q__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ q__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (alpha->r == 0.f && alpha->i == 0.f) {
+ return 0;
+ }
+ if (lsame_(uplo, "U")) {
+
+/* Form y when upper triangle of A is stored. */
+
+ kplus1 = *k + 1;
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ l = kplus1 - j;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__4 = j - 1;
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ i__5 = l + i__ + j * a_dim1;
+ q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ q__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ r_cnjg(&q__3, &a[l + i__ + j * a_dim1]);
+ i__2 = i__;
+ q__2.r = q__3.r * x[i__2].r - q__3.i * x[i__2].i, q__2.i =
+ q__3.r * x[i__2].i + q__3.i * x[i__2].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+/* L50: */
+ }
+ i__4 = j;
+ i__2 = j;
+ i__3 = kplus1 + j * a_dim1;
+ r__1 = a[i__3].r;
+ q__3.r = r__1 * temp1.r, q__3.i = r__1 * temp1.i;
+ q__2.r = y[i__2].r + q__3.r, q__2.i = y[i__2].i + q__3.i;
+ q__4.r = alpha->r * temp2.r - alpha->i * temp2.i, q__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ y[i__4].r = q__1.r, y[i__4].i = q__1.i;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__4 = jx;
+ q__1.r = alpha->r * x[i__4].r - alpha->i * x[i__4].i, q__1.i =
+ alpha->r * x[i__4].i + alpha->i * x[i__4].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ ix = kx;
+ iy = ky;
+ l = kplus1 - j;
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *k;
+ i__3 = j - 1;
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ i__4 = iy;
+ i__2 = iy;
+ i__5 = l + i__ + j * a_dim1;
+ q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ q__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ q__1.r = y[i__2].r + q__2.r, q__1.i = y[i__2].i + q__2.i;
+ y[i__4].r = q__1.r, y[i__4].i = q__1.i;
+ r_cnjg(&q__3, &a[l + i__ + j * a_dim1]);
+ i__4 = ix;
+ q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i, q__2.i =
+ q__3.r * x[i__4].i + q__3.i * x[i__4].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ i__3 = jy;
+ i__4 = jy;
+ i__2 = kplus1 + j * a_dim1;
+ r__1 = a[i__2].r;
+ q__3.r = r__1 * temp1.r, q__3.i = r__1 * temp1.i;
+ q__2.r = y[i__4].r + q__3.r, q__2.i = y[i__4].i + q__3.i;
+ q__4.r = alpha->r * temp2.r - alpha->i * temp2.i, q__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ jx += *incx;
+ jy += *incy;
+ if (j > *k) {
+ kx += *incx;
+ ky += *incy;
+ }
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y when lower triangle of A is stored. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__3 = j;
+ q__1.r = alpha->r * x[i__3].r - alpha->i * x[i__3].i, q__1.i =
+ alpha->r * x[i__3].i + alpha->i * x[i__3].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ i__3 = j;
+ i__4 = j;
+ i__2 = j * a_dim1 + 1;
+ r__1 = a[i__2].r;
+ q__2.r = r__1 * temp1.r, q__2.i = r__1 * temp1.i;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ l = 1 - j;
+/* Computing MIN */
+ i__4 = *n, i__2 = j + *k;
+ i__3 = min(i__4,i__2);
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ i__2 = i__;
+ i__5 = l + i__ + j * a_dim1;
+ q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ q__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ q__1.r = y[i__2].r + q__2.r, q__1.i = y[i__2].i + q__2.i;
+ y[i__4].r = q__1.r, y[i__4].i = q__1.i;
+ r_cnjg(&q__3, &a[l + i__ + j * a_dim1]);
+ i__4 = i__;
+ q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i, q__2.i =
+ q__3.r * x[i__4].i + q__3.i * x[i__4].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+/* L90: */
+ }
+ i__3 = j;
+ i__4 = j;
+ q__2.r = alpha->r * temp2.r - alpha->i * temp2.i, q__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__3 = jx;
+ q__1.r = alpha->r * x[i__3].r - alpha->i * x[i__3].i, q__1.i =
+ alpha->r * x[i__3].i + alpha->i * x[i__3].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ i__3 = jy;
+ i__4 = jy;
+ i__2 = j * a_dim1 + 1;
+ r__1 = a[i__2].r;
+ q__2.r = r__1 * temp1.r, q__2.i = r__1 * temp1.i;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ l = 1 - j;
+ ix = jx;
+ iy = jy;
+/* Computing MIN */
+ i__4 = *n, i__2 = j + *k;
+ i__3 = min(i__4,i__2);
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ ix += *incx;
+ iy += *incy;
+ i__4 = iy;
+ i__2 = iy;
+ i__5 = l + i__ + j * a_dim1;
+ q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ q__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ q__1.r = y[i__2].r + q__2.r, q__1.i = y[i__2].i + q__2.i;
+ y[i__4].r = q__1.r, y[i__4].i = q__1.i;
+ r_cnjg(&q__3, &a[l + i__ + j * a_dim1]);
+ i__4 = ix;
+ q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i, q__2.i =
+ q__3.r * x[i__4].i + q__3.i * x[i__4].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+/* L110: */
+ }
+ i__3 = jy;
+ i__4 = jy;
+ q__2.r = alpha->r * temp2.r - alpha->i * temp2.i, q__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ jx += *incx;
+ jy += *incy;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CHBMV . */
+
+} /* chbmv_ */
diff --git a/contrib/libs/cblas/chemm.c b/contrib/libs/cblas/chemm.c
new file mode 100644
index 0000000000..7404e5da97
--- /dev/null
+++ b/contrib/libs/cblas/chemm.c
@@ -0,0 +1,495 @@
+/* chemm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int chemm_(char *side, char *uplo, integer *m, integer *n,
+ complex *alpha, complex *a, integer *lda, complex *b, integer *ldb,
+ complex *beta, complex *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4, i__5, i__6;
+ real r__1;
+ complex q__1, q__2, q__3, q__4, q__5;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, k, info;
+ complex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHEMM performs one of the matrix-matrix operations */
+
+/* C := alpha*A*B + beta*C, */
+
+/* or */
+
+/* C := alpha*B*A + beta*C, */
+
+/* where alpha and beta are scalars, A is an hermitian matrix and B and */
+/* C are m by n matrices. */
+
+/* Arguments */
+/* ========== */
+
+/* SIDE - CHARACTER*1. */
+/* On entry, SIDE specifies whether the hermitian matrix A */
+/* appears on the left or right in the operation as follows: */
+
+/* SIDE = 'L' or 'l' C := alpha*A*B + beta*C, */
+
+/* SIDE = 'R' or 'r' C := alpha*B*A + beta*C, */
+
+/* Unchanged on exit. */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the hermitian matrix A is to be */
+/* referenced as follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of the */
+/* hermitian matrix is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of the */
+/* hermitian matrix is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of the matrix C. */
+/* M must be at least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of the matrix C. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX array of DIMENSION ( LDA, ka ), where ka is */
+/* m when SIDE = 'L' or 'l' and is n otherwise. */
+/* Before entry with SIDE = 'L' or 'l', the m by m part of */
+/* the array A must contain the hermitian matrix, such that */
+/* when UPLO = 'U' or 'u', the leading m by m upper triangular */
+/* part of the array A must contain the upper triangular part */
+/* of the hermitian matrix and the strictly lower triangular */
+/* part of A is not referenced, and when UPLO = 'L' or 'l', */
+/* the leading m by m lower triangular part of the array A */
+/* must contain the lower triangular part of the hermitian */
+/* matrix and the strictly upper triangular part of A is not */
+/* referenced. */
+/* Before entry with SIDE = 'R' or 'r', the n by n part of */
+/* the array A must contain the hermitian matrix, such that */
+/* when UPLO = 'U' or 'u', the leading n by n upper triangular */
+/* part of the array A must contain the upper triangular part */
+/* of the hermitian matrix and the strictly lower triangular */
+/* part of A is not referenced, and when UPLO = 'L' or 'l', */
+/* the leading n by n lower triangular part of the array A */
+/* must contain the lower triangular part of the hermitian */
+/* matrix and the strictly upper triangular part of A is not */
+/* referenced. */
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set, they are assumed to be zero. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When SIDE = 'L' or 'l' then */
+/* LDA must be at least max( 1, m ), otherwise LDA must be at */
+/* least max( 1, n ). */
+/* Unchanged on exit. */
+
+/* B - COMPLEX array of DIMENSION ( LDB, n ). */
+/* Before entry, the leading m by n part of the array B must */
+/* contain the matrix B. */
+/* Unchanged on exit. */
+
+/* LDB - INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. LDB must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+/* BETA - COMPLEX . */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then C need not be set on input. */
+/* Unchanged on exit. */
+
+/* C - COMPLEX array of DIMENSION ( LDC, n ). */
+/* Before entry, the leading m by n part of the array C must */
+/* contain the matrix C, except when beta is zero, in which */
+/* case C need not be set on entry. */
+/* On exit, the array C is overwritten by the m by n updated */
+/* matrix. */
+
+/* LDC - INTEGER. */
+/* On entry, LDC specifies the first dimension of C as declared */
+/* in the calling (sub) program. LDC must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Set NROWA as the number of rows of A. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(side, "L")) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ upper = lsame_(uplo, "U");
+
+/* Test the input parameters. */
+
+ info = 0;
+ if (! lsame_(side, "L") && ! lsame_(side, "R")) {
+ info = 1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ info = 2;
+ } else if (*m < 0) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldb < max(1,*m)) {
+ info = 9;
+ } else if (*ldc < max(1,*m)) {
+ info = 12;
+ }
+ if (info != 0) {
+ xerbla_("CHEMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || alpha->r == 0.f && alpha->i == 0.f && (beta->r
+ == 1.f && beta->i == 0.f)) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (alpha->r == 0.f && alpha->i == 0.f) {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4].i,
+ q__1.i = beta->r * c__[i__4].i + beta->i * c__[
+ i__4].r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(side, "L")) {
+
+/* Form C := alpha*A*B + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ q__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
+ q__1.i = alpha->r * b[i__3].i + alpha->i * b[i__3]
+ .r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ i__3 = i__ - 1;
+ for (k = 1; k <= i__3; ++k) {
+ i__4 = k + j * c_dim1;
+ i__5 = k + j * c_dim1;
+ i__6 = k + i__ * a_dim1;
+ q__2.r = temp1.r * a[i__6].r - temp1.i * a[i__6].i,
+ q__2.i = temp1.r * a[i__6].i + temp1.i * a[
+ i__6].r;
+ q__1.r = c__[i__5].r + q__2.r, q__1.i = c__[i__5].i +
+ q__2.i;
+ c__[i__4].r = q__1.r, c__[i__4].i = q__1.i;
+ i__4 = k + j * b_dim1;
+ r_cnjg(&q__3, &a[k + i__ * a_dim1]);
+ q__2.r = b[i__4].r * q__3.r - b[i__4].i * q__3.i,
+ q__2.i = b[i__4].r * q__3.i + b[i__4].i *
+ q__3.r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+/* L50: */
+ }
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + i__ * a_dim1;
+ r__1 = a[i__4].r;
+ q__2.r = r__1 * temp1.r, q__2.i = r__1 * temp1.i;
+ q__3.r = alpha->r * temp2.r - alpha->i * temp2.i,
+ q__3.i = alpha->r * temp2.i + alpha->i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ i__5 = i__ + i__ * a_dim1;
+ r__1 = a[i__5].r;
+ q__4.r = r__1 * temp1.r, q__4.i = r__1 * temp1.i;
+ q__2.r = q__3.r + q__4.r, q__2.i = q__3.i + q__4.i;
+ q__5.r = alpha->r * temp2.r - alpha->i * temp2.i,
+ q__5.i = alpha->r * temp2.i + alpha->i *
+ temp2.r;
+ q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+/* L60: */
+ }
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ i__2 = i__ + j * b_dim1;
+ q__1.r = alpha->r * b[i__2].r - alpha->i * b[i__2].i,
+ q__1.i = alpha->r * b[i__2].i + alpha->i * b[i__2]
+ .r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ i__2 = *m;
+ for (k = i__ + 1; k <= i__2; ++k) {
+ i__3 = k + j * c_dim1;
+ i__4 = k + j * c_dim1;
+ i__5 = k + i__ * a_dim1;
+ q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ q__2.i = temp1.r * a[i__5].i + temp1.i * a[
+ i__5].r;
+ q__1.r = c__[i__4].r + q__2.r, q__1.i = c__[i__4].i +
+ q__2.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ i__3 = k + j * b_dim1;
+ r_cnjg(&q__3, &a[k + i__ * a_dim1]);
+ q__2.r = b[i__3].r * q__3.r - b[i__3].i * q__3.i,
+ q__2.i = b[i__3].r * q__3.i + b[i__3].i *
+ q__3.r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+/* L80: */
+ }
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__2 = i__ + j * c_dim1;
+ i__3 = i__ + i__ * a_dim1;
+ r__1 = a[i__3].r;
+ q__2.r = r__1 * temp1.r, q__2.i = r__1 * temp1.i;
+ q__3.r = alpha->r * temp2.r - alpha->i * temp2.i,
+ q__3.i = alpha->r * temp2.i + alpha->i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ } else {
+ i__2 = i__ + j * c_dim1;
+ i__3 = i__ + j * c_dim1;
+ q__3.r = beta->r * c__[i__3].r - beta->i * c__[i__3]
+ .i, q__3.i = beta->r * c__[i__3].i + beta->i *
+ c__[i__3].r;
+ i__4 = i__ + i__ * a_dim1;
+ r__1 = a[i__4].r;
+ q__4.r = r__1 * temp1.r, q__4.i = r__1 * temp1.i;
+ q__2.r = q__3.r + q__4.r, q__2.i = q__3.i + q__4.i;
+ q__5.r = alpha->r * temp2.r - alpha->i * temp2.i,
+ q__5.i = alpha->r * temp2.i + alpha->i *
+ temp2.r;
+ q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ }
+/* L90: */
+ }
+/* L100: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*B*A + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + j * a_dim1;
+ r__1 = a[i__2].r;
+ q__1.r = r__1 * alpha->r, q__1.i = r__1 * alpha->i;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * b_dim1;
+ q__1.r = temp1.r * b[i__4].r - temp1.i * b[i__4].i,
+ q__1.i = temp1.r * b[i__4].i + temp1.i * b[i__4]
+ .r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L110: */
+ }
+ } else {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__2.r = beta->r * c__[i__4].r - beta->i * c__[i__4].i,
+ q__2.i = beta->r * c__[i__4].i + beta->i * c__[
+ i__4].r;
+ i__5 = i__ + j * b_dim1;
+ q__3.r = temp1.r * b[i__5].r - temp1.i * b[i__5].i,
+ q__3.i = temp1.r * b[i__5].i + temp1.i * b[i__5]
+ .r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L120: */
+ }
+ }
+ i__2 = j - 1;
+ for (k = 1; k <= i__2; ++k) {
+ if (upper) {
+ i__3 = k + j * a_dim1;
+ q__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ q__1.i = alpha->r * a[i__3].i + alpha->i * a[i__3]
+ .r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ } else {
+ r_cnjg(&q__2, &a[j + k * a_dim1]);
+ q__1.r = alpha->r * q__2.r - alpha->i * q__2.i, q__1.i =
+ alpha->r * q__2.i + alpha->i * q__2.r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ }
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + k * b_dim1;
+ q__2.r = temp1.r * b[i__6].r - temp1.i * b[i__6].i,
+ q__2.i = temp1.r * b[i__6].i + temp1.i * b[i__6]
+ .r;
+ q__1.r = c__[i__5].r + q__2.r, q__1.i = c__[i__5].i +
+ q__2.i;
+ c__[i__4].r = q__1.r, c__[i__4].i = q__1.i;
+/* L130: */
+ }
+/* L140: */
+ }
+ i__2 = *n;
+ for (k = j + 1; k <= i__2; ++k) {
+ if (upper) {
+ r_cnjg(&q__2, &a[j + k * a_dim1]);
+ q__1.r = alpha->r * q__2.r - alpha->i * q__2.i, q__1.i =
+ alpha->r * q__2.i + alpha->i * q__2.r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ } else {
+ i__3 = k + j * a_dim1;
+ q__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ q__1.i = alpha->r * a[i__3].i + alpha->i * a[i__3]
+ .r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ }
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + k * b_dim1;
+ q__2.r = temp1.r * b[i__6].r - temp1.i * b[i__6].i,
+ q__2.i = temp1.r * b[i__6].i + temp1.i * b[i__6]
+ .r;
+ q__1.r = c__[i__5].r + q__2.r, q__1.i = c__[i__5].i +
+ q__2.i;
+ c__[i__4].r = q__1.r, c__[i__4].i = q__1.i;
+/* L150: */
+ }
+/* L160: */
+ }
+/* L170: */
+ }
+ }
+
+ return 0;
+
+/* End of CHEMM . */
+
+} /* chemm_ */
diff --git a/contrib/libs/cblas/chemv.c b/contrib/libs/cblas/chemv.c
new file mode 100644
index 0000000000..163cecaf95
--- /dev/null
+++ b/contrib/libs/cblas/chemv.c
@@ -0,0 +1,433 @@
+/* chemv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int chemv_(char *uplo, integer *n, complex *alpha, complex *
+ a, integer *lda, complex *x, integer *incx, complex *beta, complex *y,
+ integer *incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ complex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHEMV performs the matrix-vector operation */
+
+/* y := alpha*A*x + beta*y, */
+
+/* where alpha and beta are scalars, x and y are n element vectors and */
+/* A is an n by n hermitian matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array A is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of A */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of A */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array A must contain the upper */
+/* triangular part of the hermitian matrix and the strictly */
+/* lower triangular part of A is not referenced. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array A must contain the lower */
+/* triangular part of the hermitian matrix and the strictly */
+/* upper triangular part of A is not referenced. */
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set and are assumed to be zero. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+/* X - COMPLEX array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* BETA - COMPLEX . */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then Y need not be set on input. */
+/* Unchanged on exit. */
+
+/* Y - COMPLEX array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. On exit, Y is overwritten by the updated */
+/* vector y. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*lda < max(1,*n)) {
+ info = 5;
+ } else if (*incx == 0) {
+ info = 7;
+ } else if (*incy == 0) {
+ info = 10;
+ }
+ if (info != 0) {
+ xerbla_("CHEMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || alpha->r == 0.f && alpha->i == 0.f && (beta->r == 1.f &&
+ beta->i == 0.f)) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y. */
+
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through the triangular part */
+/* of A. */
+
+/* First form y := beta*y. */
+
+ if (beta->r != 1.f || beta->i != 0.f) {
+ if (*incy == 1) {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ y[i__2].r = 0.f, y[i__2].i = 0.f;
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ q__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ q__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ y[i__2].r = 0.f, y[i__2].i = 0.f;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = iy;
+ q__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ q__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (alpha->r == 0.f && alpha->i == 0.f) {
+ return 0;
+ }
+ if (lsame_(uplo, "U")) {
+
+/* Form y when A is stored in upper triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ q__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__3 = i__;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i, q__2.i =
+ q__3.r * x[i__3].i + q__3.i * x[i__3].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+/* L50: */
+ }
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ r__1 = a[i__4].r;
+ q__3.r = r__1 * temp1.r, q__3.i = r__1 * temp1.i;
+ q__2.r = y[i__3].r + q__3.r, q__2.i = y[i__3].i + q__3.i;
+ q__4.r = alpha->r * temp2.r - alpha->i * temp2.i, q__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ ix = kx;
+ iy = ky;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = i__ + j * a_dim1;
+ q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ q__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__3 = ix;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i, q__2.i =
+ q__3.r * x[i__3].i + q__3.i * x[i__3].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ i__2 = jy;
+ i__3 = jy;
+ i__4 = j + j * a_dim1;
+ r__1 = a[i__4].r;
+ q__3.r = r__1 * temp1.r, q__3.i = r__1 * temp1.i;
+ q__2.r = y[i__3].r + q__3.r, q__2.i = y[i__3].i + q__3.i;
+ q__4.r = alpha->r * temp2.r - alpha->i * temp2.i, q__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ jx += *incx;
+ jy += *incy;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y when A is stored in lower triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ r__1 = a[i__4].r;
+ q__2.r = r__1 * temp1.r, q__2.i = r__1 * temp1.i;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ q__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__3 = i__;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i, q__2.i =
+ q__3.r * x[i__3].i + q__3.i * x[i__3].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+/* L90: */
+ }
+ i__2 = j;
+ i__3 = j;
+ q__2.r = alpha->r * temp2.r - alpha->i * temp2.i, q__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ i__2 = jy;
+ i__3 = jy;
+ i__4 = j + j * a_dim1;
+ r__1 = a[i__4].r;
+ q__2.r = r__1 * temp1.r, q__2.i = r__1 * temp1.i;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ ix = jx;
+ iy = jy;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ iy += *incy;
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = i__ + j * a_dim1;
+ q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ q__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__3 = ix;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i, q__2.i =
+ q__3.r * x[i__3].i + q__3.i * x[i__3].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+/* L110: */
+ }
+ i__2 = jy;
+ i__3 = jy;
+ q__2.r = alpha->r * temp2.r - alpha->i * temp2.i, q__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ jx += *incx;
+ jy += *incy;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CHEMV . */
+
+} /* chemv_ */
diff --git a/contrib/libs/cblas/cher.c b/contrib/libs/cblas/cher.c
new file mode 100644
index 0000000000..933de87e04
--- /dev/null
+++ b/contrib/libs/cblas/cher.c
@@ -0,0 +1,338 @@
+/* cher.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cher_(char *uplo, integer *n, real *alpha, complex *x,
+ integer *incx, complex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, ix, jx, kx, info;
+ complex temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHER performs the hermitian rank 1 operation */
+
+/* A := alpha*x*conjg( x' ) + A, */
+
+/* where alpha is a real scalar, x is an n element vector and A is an */
+/* n by n hermitian matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array A is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of A */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of A */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - REAL . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X - COMPLEX array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array A must contain the upper */
+/* triangular part of the hermitian matrix and the strictly */
+/* lower triangular part of A is not referenced. On exit, the */
+/* upper triangular part of the array A is overwritten by the */
+/* upper triangular part of the updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array A must contain the lower */
+/* triangular part of the hermitian matrix and the strictly */
+/* upper triangular part of A is not referenced. On exit, the */
+/* lower triangular part of the array A is overwritten by the */
+/* lower triangular part of the updated matrix. */
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set, they are assumed to be zero, and on exit they */
+/* are set to zero. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --x;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*lda < max(1,*n)) {
+ info = 7;
+ }
+ if (info != 0) {
+ xerbla_("CHER ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || *alpha == 0.f) {
+ return 0;
+ }
+
+/* Set the start point in X if the increment is not unity. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through the triangular part */
+/* of A. */
+
+ if (lsame_(uplo, "U")) {
+
+/* Form A when A is stored in upper triangle. */
+
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ r_cnjg(&q__2, &x[j]);
+ q__1.r = *alpha * q__2.r, q__1.i = *alpha * q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ q__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ q__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ q__1.r = a[i__4].r + q__2.r, q__1.i = a[i__4].i +
+ q__2.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L10: */
+ }
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = j;
+ q__1.r = x[i__4].r * temp.r - x[i__4].i * temp.i, q__1.i =
+ x[i__4].r * temp.i + x[i__4].i * temp.r;
+ r__1 = a[i__3].r + q__1.r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ r__1 = a[i__3].r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ r_cnjg(&q__2, &x[jx]);
+ q__1.r = *alpha * q__2.r, q__1.i = *alpha * q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix = kx;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ q__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ q__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ q__1.r = a[i__4].r + q__2.r, q__1.i = a[i__4].i +
+ q__2.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = jx;
+ q__1.r = x[i__4].r * temp.r - x[i__4].i * temp.i, q__1.i =
+ x[i__4].r * temp.i + x[i__4].i * temp.r;
+ r__1 = a[i__3].r + q__1.r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ r__1 = a[i__3].r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ }
+ jx += *incx;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when A is stored in lower triangle. */
+
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ r_cnjg(&q__2, &x[j]);
+ q__1.r = *alpha * q__2.r, q__1.i = *alpha * q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = j;
+ q__1.r = temp.r * x[i__4].r - temp.i * x[i__4].i, q__1.i =
+ temp.r * x[i__4].i + temp.i * x[i__4].r;
+ r__1 = a[i__3].r + q__1.r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ q__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ q__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ q__1.r = a[i__4].r + q__2.r, q__1.i = a[i__4].i +
+ q__2.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L50: */
+ }
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ r__1 = a[i__3].r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ }
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ r_cnjg(&q__2, &x[jx]);
+ q__1.r = *alpha * q__2.r, q__1.i = *alpha * q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = jx;
+ q__1.r = temp.r * x[i__4].r - temp.i * x[i__4].i, q__1.i =
+ temp.r * x[i__4].i + temp.i * x[i__4].r;
+ r__1 = a[i__3].r + q__1.r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ ix = jx;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ q__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ q__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ q__1.r = a[i__4].r + q__2.r, q__1.i = a[i__4].i +
+ q__2.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L70: */
+ }
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ r__1 = a[i__3].r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CHER . */
+
+} /* cher_ */
diff --git a/contrib/libs/cblas/cher2.c b/contrib/libs/cblas/cher2.c
new file mode 100644
index 0000000000..2de3bab940
--- /dev/null
+++ b/contrib/libs/cblas/cher2.c
@@ -0,0 +1,446 @@
+/* cher2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cher2_(char *uplo, integer *n, complex *alpha, complex *
+ x, integer *incx, complex *y, integer *incy, complex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ real r__1;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ complex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHER2 performs the hermitian rank 2 operation */
+
+/* A := alpha*x*conjg( y' ) + conjg( alpha )*y*conjg( x' ) + A, */
+
+/* where alpha is a scalar, x and y are n element vectors and A is an n */
+/* by n hermitian matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array A is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of A */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of A */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X - COMPLEX array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* Y - COMPLEX array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. */
+/* Unchanged on exit. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array A must contain the upper */
+/* triangular part of the hermitian matrix and the strictly */
+/* lower triangular part of A is not referenced. On exit, the */
+/* upper triangular part of the array A is overwritten by the */
+/* upper triangular part of the updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array A must contain the lower */
+/* triangular part of the hermitian matrix and the strictly */
+/* upper triangular part of A is not referenced. On exit, the */
+/* lower triangular part of the array A is overwritten by the */
+/* lower triangular part of the updated matrix. */
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set, they are assumed to be zero, and on exit they */
+/* are set to zero. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --x;
+ --y;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ } else if (*lda < max(1,*n)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("CHER2 ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || alpha->r == 0.f && alpha->i == 0.f) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y if the increments are not both */
+/* unity. */
+
+ if (*incx != 1 || *incy != 1) {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+ jx = kx;
+ jy = ky;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through the triangular part */
+/* of A. */
+
+ if (lsame_(uplo, "U")) {
+
+/* Form A when A is stored in the upper triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ i__3 = j;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f || (y[i__3].r != 0.f
+ || y[i__3].i != 0.f)) {
+ r_cnjg(&q__2, &y[j]);
+ q__1.r = alpha->r * q__2.r - alpha->i * q__2.i, q__1.i =
+ alpha->r * q__2.i + alpha->i * q__2.r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ i__2 = j;
+ q__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ q__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ r_cnjg(&q__1, &q__2);
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ q__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ q__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ q__2.r = a[i__4].r + q__3.r, q__2.i = a[i__4].i +
+ q__3.i;
+ i__6 = i__;
+ q__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ q__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L10: */
+ }
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = j;
+ q__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ q__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = j;
+ q__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ q__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ r__1 = a[i__3].r + q__1.r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ r__1 = a[i__3].r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ i__3 = jy;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f || (y[i__3].r != 0.f
+ || y[i__3].i != 0.f)) {
+ r_cnjg(&q__2, &y[jy]);
+ q__1.r = alpha->r * q__2.r - alpha->i * q__2.i, q__1.i =
+ alpha->r * q__2.i + alpha->i * q__2.r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ i__2 = jx;
+ q__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ q__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ r_cnjg(&q__1, &q__2);
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ ix = kx;
+ iy = ky;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ q__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ q__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ q__2.r = a[i__4].r + q__3.r, q__2.i = a[i__4].i +
+ q__3.i;
+ i__6 = iy;
+ q__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ q__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L30: */
+ }
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = jx;
+ q__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ q__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = jy;
+ q__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ q__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ r__1 = a[i__3].r + q__1.r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ r__1 = a[i__3].r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ }
+ jx += *incx;
+ jy += *incy;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when A is stored in the lower triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ i__3 = j;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f || (y[i__3].r != 0.f
+ || y[i__3].i != 0.f)) {
+ r_cnjg(&q__2, &y[j]);
+ q__1.r = alpha->r * q__2.r - alpha->i * q__2.i, q__1.i =
+ alpha->r * q__2.i + alpha->i * q__2.r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ i__2 = j;
+ q__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ q__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ r_cnjg(&q__1, &q__2);
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = j;
+ q__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ q__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = j;
+ q__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ q__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ r__1 = a[i__3].r + q__1.r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ q__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ q__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ q__2.r = a[i__4].r + q__3.r, q__2.i = a[i__4].i +
+ q__3.i;
+ i__6 = i__;
+ q__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ q__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L50: */
+ }
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ r__1 = a[i__3].r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ i__3 = jy;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f || (y[i__3].r != 0.f
+ || y[i__3].i != 0.f)) {
+ r_cnjg(&q__2, &y[jy]);
+ q__1.r = alpha->r * q__2.r - alpha->i * q__2.i, q__1.i =
+ alpha->r * q__2.i + alpha->i * q__2.r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ i__2 = jx;
+ q__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ q__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ r_cnjg(&q__1, &q__2);
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = jx;
+ q__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ q__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = jy;
+ q__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ q__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ r__1 = a[i__3].r + q__1.r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ ix = jx;
+ iy = jy;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ iy += *incy;
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ q__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ q__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ q__2.r = a[i__4].r + q__3.r, q__2.i = a[i__4].i +
+ q__3.i;
+ i__6 = iy;
+ q__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ q__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L70: */
+ }
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ r__1 = a[i__3].r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ }
+ jx += *incx;
+ jy += *incy;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CHER2 . */
+
+} /* cher2_ */
diff --git a/contrib/libs/cblas/cher2k.c b/contrib/libs/cblas/cher2k.c
new file mode 100644
index 0000000000..c5f2d999c1
--- /dev/null
+++ b/contrib/libs/cblas/cher2k.c
@@ -0,0 +1,671 @@
+/* cher2k.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cher2k_(char *uplo, char *trans, integer *n, integer *k,
+ complex *alpha, complex *a, integer *lda, complex *b, integer *ldb,
+ real *beta, complex *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4, i__5, i__6, i__7;
+ real r__1;
+ complex q__1, q__2, q__3, q__4, q__5, q__6;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, l, info;
+ complex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHER2K performs one of the hermitian rank 2k operations */
+
+/* C := alpha*A*conjg( B' ) + conjg( alpha )*B*conjg( A' ) + beta*C, */
+
+/* or */
+
+/* C := alpha*conjg( A' )*B + conjg( alpha )*conjg( B' )*A + beta*C, */
+
+/* where alpha and beta are scalars with beta real, C is an n by n */
+/* hermitian matrix and A and B are n by k matrices in the first case */
+/* and k by n matrices in the second case. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array C is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of C */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of C */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' C := alpha*A*conjg( B' ) + */
+/* conjg( alpha )*B*conjg( A' ) + */
+/* beta*C. */
+
+/* TRANS = 'C' or 'c' C := alpha*conjg( A' )*B + */
+/* conjg( alpha )*conjg( B' )*A + */
+/* beta*C. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix C. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry with TRANS = 'N' or 'n', K specifies the number */
+/* of columns of the matrices A and B, and on entry with */
+/* TRANS = 'C' or 'c', K specifies the number of rows of the */
+/* matrices A and B. K must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX array of DIMENSION ( LDA, ka ), where ka is */
+/* k when TRANS = 'N' or 'n', and is n otherwise. */
+/* Before entry with TRANS = 'N' or 'n', the leading n by k */
+/* part of the array A must contain the matrix A, otherwise */
+/* the leading k by n part of the array A must contain the */
+/* matrix A. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When TRANS = 'N' or 'n' */
+/* then LDA must be at least max( 1, n ), otherwise LDA must */
+/* be at least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* B - COMPLEX array of DIMENSION ( LDB, kb ), where kb is */
+/* k when TRANS = 'N' or 'n', and is n otherwise. */
+/* Before entry with TRANS = 'N' or 'n', the leading n by k */
+/* part of the array B must contain the matrix B, otherwise */
+/* the leading k by n part of the array B must contain the */
+/* matrix B. */
+/* Unchanged on exit. */
+
+/* LDB - INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. When TRANS = 'N' or 'n' */
+/* then LDB must be at least max( 1, n ), otherwise LDB must */
+/* be at least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* BETA - REAL . */
+/* On entry, BETA specifies the scalar beta. */
+/* Unchanged on exit. */
+
+/* C - COMPLEX array of DIMENSION ( LDC, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array C must contain the upper */
+/* triangular part of the hermitian matrix and the strictly */
+/* lower triangular part of C is not referenced. On exit, the */
+/* upper triangular part of the array C is overwritten by the */
+/* upper triangular part of the updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array C must contain the lower */
+/* triangular part of the hermitian matrix and the strictly */
+/* upper triangular part of C is not referenced. On exit, the */
+/* lower triangular part of the array C is overwritten by the */
+/* lower triangular part of the updated matrix. */
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set, they are assumed to be zero, and on exit they */
+/* are set to zero. */
+
+/* LDC - INTEGER. */
+/* On entry, LDC specifies the first dimension of C as declared */
+/* in the calling (sub) program. LDC must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+/* -- Modified 8-Nov-93 to set C(J,J) to REAL( C(J,J) ) when BETA = 1. */
+/* Ed Anderson, Cray Research Inc. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(trans, "N")) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! upper && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "C")) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*k < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldb < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldc < max(1,*n)) {
+ info = 12;
+ }
+ if (info != 0) {
+ xerbla_("CHER2K", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || (alpha->r == 0.f && alpha->i == 0.f || *k == 0) && *beta ==
+ 1.f) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (alpha->r == 0.f && alpha->i == 0.f) {
+ if (upper) {
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = *beta * c__[i__4].r, q__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L30: */
+ }
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = *beta * c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+/* L40: */
+ }
+ }
+ } else {
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = *beta * c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = *beta * c__[i__4].r, q__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form C := alpha*A*conjg( B' ) + conjg( alpha )*B*conjg( A' ) + */
+/* C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.f) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L90: */
+ }
+ } else if (*beta != 1.f) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = *beta * c__[i__4].r, q__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L100: */
+ }
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = *beta * c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ i__4 = j + l * b_dim1;
+ if (a[i__3].r != 0.f || a[i__3].i != 0.f || (b[i__4].r !=
+ 0.f || b[i__4].i != 0.f)) {
+ r_cnjg(&q__2, &b[j + l * b_dim1]);
+ q__1.r = alpha->r * q__2.r - alpha->i * q__2.i,
+ q__1.i = alpha->r * q__2.i + alpha->i *
+ q__2.r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ i__3 = j + l * a_dim1;
+ q__2.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ q__2.i = alpha->r * a[i__3].i + alpha->i * a[
+ i__3].r;
+ r_cnjg(&q__1, &q__2);
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ q__3.r = a[i__6].r * temp1.r - a[i__6].i *
+ temp1.i, q__3.i = a[i__6].r * temp1.i + a[
+ i__6].i * temp1.r;
+ q__2.r = c__[i__5].r + q__3.r, q__2.i = c__[i__5]
+ .i + q__3.i;
+ i__7 = i__ + l * b_dim1;
+ q__4.r = b[i__7].r * temp2.r - b[i__7].i *
+ temp2.i, q__4.i = b[i__7].r * temp2.i + b[
+ i__7].i * temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i +
+ q__4.i;
+ c__[i__4].r = q__1.r, c__[i__4].i = q__1.i;
+/* L110: */
+ }
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ i__5 = j + l * a_dim1;
+ q__2.r = a[i__5].r * temp1.r - a[i__5].i * temp1.i,
+ q__2.i = a[i__5].r * temp1.i + a[i__5].i *
+ temp1.r;
+ i__6 = j + l * b_dim1;
+ q__3.r = b[i__6].r * temp2.r - b[i__6].i * temp2.i,
+ q__3.i = b[i__6].r * temp2.i + b[i__6].i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ r__1 = c__[i__4].r + q__1.r;
+ c__[i__3].r = r__1, c__[i__3].i = 0.f;
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.f) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L140: */
+ }
+ } else if (*beta != 1.f) {
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = *beta * c__[i__4].r, q__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L150: */
+ }
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = *beta * c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ i__4 = j + l * b_dim1;
+ if (a[i__3].r != 0.f || a[i__3].i != 0.f || (b[i__4].r !=
+ 0.f || b[i__4].i != 0.f)) {
+ r_cnjg(&q__2, &b[j + l * b_dim1]);
+ q__1.r = alpha->r * q__2.r - alpha->i * q__2.i,
+ q__1.i = alpha->r * q__2.i + alpha->i *
+ q__2.r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ i__3 = j + l * a_dim1;
+ q__2.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ q__2.i = alpha->r * a[i__3].i + alpha->i * a[
+ i__3].r;
+ r_cnjg(&q__1, &q__2);
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ i__3 = *n;
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ q__3.r = a[i__6].r * temp1.r - a[i__6].i *
+ temp1.i, q__3.i = a[i__6].r * temp1.i + a[
+ i__6].i * temp1.r;
+ q__2.r = c__[i__5].r + q__3.r, q__2.i = c__[i__5]
+ .i + q__3.i;
+ i__7 = i__ + l * b_dim1;
+ q__4.r = b[i__7].r * temp2.r - b[i__7].i *
+ temp2.i, q__4.i = b[i__7].r * temp2.i + b[
+ i__7].i * temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i +
+ q__4.i;
+ c__[i__4].r = q__1.r, c__[i__4].i = q__1.i;
+/* L160: */
+ }
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ i__5 = j + l * a_dim1;
+ q__2.r = a[i__5].r * temp1.r - a[i__5].i * temp1.i,
+ q__2.i = a[i__5].r * temp1.i + a[i__5].i *
+ temp1.r;
+ i__6 = j + l * b_dim1;
+ q__3.r = b[i__6].r * temp2.r - b[i__6].i * temp2.i,
+ q__3.i = b[i__6].r * temp2.i + b[i__6].i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ r__1 = c__[i__4].r + q__1.r;
+ c__[i__3].r = r__1, c__[i__3].i = 0.f;
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*conjg( A' )*B + conjg( alpha )*conjg( B' )*A + */
+/* C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp1.r = 0.f, temp1.i = 0.f;
+ temp2.r = 0.f, temp2.i = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ r_cnjg(&q__3, &a[l + i__ * a_dim1]);
+ i__4 = l + j * b_dim1;
+ q__2.r = q__3.r * b[i__4].r - q__3.i * b[i__4].i,
+ q__2.i = q__3.r * b[i__4].i + q__3.i * b[i__4]
+ .r;
+ q__1.r = temp1.r + q__2.r, q__1.i = temp1.i + q__2.i;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ r_cnjg(&q__3, &b[l + i__ * b_dim1]);
+ i__4 = l + j * a_dim1;
+ q__2.r = q__3.r * a[i__4].r - q__3.i * a[i__4].i,
+ q__2.i = q__3.r * a[i__4].i + q__3.i * a[i__4]
+ .r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+/* L190: */
+ }
+ if (i__ == j) {
+ if (*beta == 0.f) {
+ i__3 = j + j * c_dim1;
+ q__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ q__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ r_cnjg(&q__4, alpha);
+ q__3.r = q__4.r * temp2.r - q__4.i * temp2.i,
+ q__3.i = q__4.r * temp2.i + q__4.i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ r__1 = q__1.r;
+ c__[i__3].r = r__1, c__[i__3].i = 0.f;
+ } else {
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ q__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ q__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ r_cnjg(&q__4, alpha);
+ q__3.r = q__4.r * temp2.r - q__4.i * temp2.i,
+ q__3.i = q__4.r * temp2.i + q__4.i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ r__1 = *beta * c__[i__4].r + q__1.r;
+ c__[i__3].r = r__1, c__[i__3].i = 0.f;
+ }
+ } else {
+ if (*beta == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ q__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ q__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ r_cnjg(&q__4, alpha);
+ q__3.r = q__4.r * temp2.r - q__4.i * temp2.i,
+ q__3.i = q__4.r * temp2.i + q__4.i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = *beta * c__[i__4].r, q__3.i = *beta *
+ c__[i__4].i;
+ q__4.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ q__4.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ q__2.r = q__3.r + q__4.r, q__2.i = q__3.i +
+ q__4.i;
+ r_cnjg(&q__6, alpha);
+ q__5.r = q__6.r * temp2.r - q__6.i * temp2.i,
+ q__5.i = q__6.r * temp2.i + q__6.i *
+ temp2.r;
+ q__1.r = q__2.r + q__5.r, q__1.i = q__2.i +
+ q__5.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ temp1.r = 0.f, temp1.i = 0.f;
+ temp2.r = 0.f, temp2.i = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ r_cnjg(&q__3, &a[l + i__ * a_dim1]);
+ i__4 = l + j * b_dim1;
+ q__2.r = q__3.r * b[i__4].r - q__3.i * b[i__4].i,
+ q__2.i = q__3.r * b[i__4].i + q__3.i * b[i__4]
+ .r;
+ q__1.r = temp1.r + q__2.r, q__1.i = temp1.i + q__2.i;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ r_cnjg(&q__3, &b[l + i__ * b_dim1]);
+ i__4 = l + j * a_dim1;
+ q__2.r = q__3.r * a[i__4].r - q__3.i * a[i__4].i,
+ q__2.i = q__3.r * a[i__4].i + q__3.i * a[i__4]
+ .r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+/* L220: */
+ }
+ if (i__ == j) {
+ if (*beta == 0.f) {
+ i__3 = j + j * c_dim1;
+ q__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ q__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ r_cnjg(&q__4, alpha);
+ q__3.r = q__4.r * temp2.r - q__4.i * temp2.i,
+ q__3.i = q__4.r * temp2.i + q__4.i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ r__1 = q__1.r;
+ c__[i__3].r = r__1, c__[i__3].i = 0.f;
+ } else {
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ q__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ q__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ r_cnjg(&q__4, alpha);
+ q__3.r = q__4.r * temp2.r - q__4.i * temp2.i,
+ q__3.i = q__4.r * temp2.i + q__4.i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ r__1 = *beta * c__[i__4].r + q__1.r;
+ c__[i__3].r = r__1, c__[i__3].i = 0.f;
+ }
+ } else {
+ if (*beta == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ q__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ q__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ r_cnjg(&q__4, alpha);
+ q__3.r = q__4.r * temp2.r - q__4.i * temp2.i,
+ q__3.i = q__4.r * temp2.i + q__4.i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = *beta * c__[i__4].r, q__3.i = *beta *
+ c__[i__4].i;
+ q__4.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ q__4.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ q__2.r = q__3.r + q__4.r, q__2.i = q__3.i +
+ q__4.i;
+ r_cnjg(&q__6, alpha);
+ q__5.r = q__6.r * temp2.r - q__6.i * temp2.i,
+ q__5.i = q__6.r * temp2.i + q__6.i *
+ temp2.r;
+ q__1.r = q__2.r + q__5.r, q__1.i = q__2.i +
+ q__5.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+ }
+/* L230: */
+ }
+/* L240: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CHER2K. */
+
+} /* cher2k_ */
diff --git a/contrib/libs/cblas/cherk.c b/contrib/libs/cblas/cherk.c
new file mode 100644
index 0000000000..fae90fb3b1
--- /dev/null
+++ b/contrib/libs/cblas/cherk.c
@@ -0,0 +1,533 @@
+/* cherk.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cherk_(char *uplo, char *trans, integer *n, integer *k,
+ real *alpha, complex *a, integer *lda, real *beta, complex *c__,
+ integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6;
+ real r__1;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, l, info;
+ complex temp;
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ real rtemp;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHERK performs one of the hermitian rank k operations */
+
+/* C := alpha*A*conjg( A' ) + beta*C, */
+
+/* or */
+
+/* C := alpha*conjg( A' )*A + beta*C, */
+
+/* where alpha and beta are real scalars, C is an n by n hermitian */
+/* matrix and A is an n by k matrix in the first case and a k by n */
+/* matrix in the second case. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array C is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of C */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of C */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' C := alpha*A*conjg( A' ) + beta*C. */
+
+/* TRANS = 'C' or 'c' C := alpha*conjg( A' )*A + beta*C. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix C. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry with TRANS = 'N' or 'n', K specifies the number */
+/* of columns of the matrix A, and on entry with */
+/* TRANS = 'C' or 'c', K specifies the number of rows of the */
+/* matrix A. K must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - REAL . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX array of DIMENSION ( LDA, ka ), where ka is */
+/* k when TRANS = 'N' or 'n', and is n otherwise. */
+/* Before entry with TRANS = 'N' or 'n', the leading n by k */
+/* part of the array A must contain the matrix A, otherwise */
+/* the leading k by n part of the array A must contain the */
+/* matrix A. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When TRANS = 'N' or 'n' */
+/* then LDA must be at least max( 1, n ), otherwise LDA must */
+/* be at least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* BETA - REAL . */
+/* On entry, BETA specifies the scalar beta. */
+/* Unchanged on exit. */
+
+/* C - COMPLEX array of DIMENSION ( LDC, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array C must contain the upper */
+/* triangular part of the hermitian matrix and the strictly */
+/* lower triangular part of C is not referenced. On exit, the */
+/* upper triangular part of the array C is overwritten by the */
+/* upper triangular part of the updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array C must contain the lower */
+/* triangular part of the hermitian matrix and the strictly */
+/* upper triangular part of C is not referenced. On exit, the */
+/* lower triangular part of the array C is overwritten by the */
+/* lower triangular part of the updated matrix. */
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set, they are assumed to be zero, and on exit they */
+/* are set to zero. */
+
+/* LDC - INTEGER. */
+/* On entry, LDC specifies the first dimension of C as declared */
+/* in the calling (sub) program. LDC must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+/* -- Modified 8-Nov-93 to set C(J,J) to REAL( C(J,J) ) when BETA = 1. */
+/* Ed Anderson, Cray Research Inc. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(trans, "N")) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! upper && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "C")) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*k < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldc < max(1,*n)) {
+ info = 10;
+ }
+ if (info != 0) {
+ xerbla_("CHERK ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || (*alpha == 0.f || *k == 0) && *beta == 1.f) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.f) {
+ if (upper) {
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = *beta * c__[i__4].r, q__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L30: */
+ }
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = *beta * c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+/* L40: */
+ }
+ }
+ } else {
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = *beta * c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = *beta * c__[i__4].r, q__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form C := alpha*A*conjg( A' ) + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.f) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L90: */
+ }
+ } else if (*beta != 1.f) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = *beta * c__[i__4].r, q__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L100: */
+ }
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = *beta * c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ if (a[i__3].r != 0.f || a[i__3].i != 0.f) {
+ r_cnjg(&q__2, &a[j + l * a_dim1]);
+ q__1.r = *alpha * q__2.r, q__1.i = *alpha * q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ q__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ q__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ q__1.r = c__[i__5].r + q__2.r, q__1.i = c__[i__5]
+ .i + q__2.i;
+ c__[i__4].r = q__1.r, c__[i__4].i = q__1.i;
+/* L110: */
+ }
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ i__5 = i__ + l * a_dim1;
+ q__1.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ q__1.i = temp.r * a[i__5].i + temp.i * a[i__5]
+ .r;
+ r__1 = c__[i__4].r + q__1.r;
+ c__[i__3].r = r__1, c__[i__3].i = 0.f;
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.f) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L140: */
+ }
+ } else if (*beta != 1.f) {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = *beta * c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = *beta * c__[i__4].r, q__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L150: */
+ }
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ if (a[i__3].r != 0.f || a[i__3].i != 0.f) {
+ r_cnjg(&q__2, &a[j + l * a_dim1]);
+ q__1.r = *alpha * q__2.r, q__1.i = *alpha * q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ i__5 = j + l * a_dim1;
+ q__1.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ q__1.i = temp.r * a[i__5].i + temp.i * a[i__5]
+ .r;
+ r__1 = c__[i__4].r + q__1.r;
+ c__[i__3].r = r__1, c__[i__3].i = 0.f;
+ i__3 = *n;
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ q__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ q__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ q__1.r = c__[i__5].r + q__2.r, q__1.i = c__[i__5]
+ .i + q__2.i;
+ c__[i__4].r = q__1.r, c__[i__4].i = q__1.i;
+/* L160: */
+ }
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*conjg( A' )*A + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0.f, temp.i = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ r_cnjg(&q__3, &a[l + i__ * a_dim1]);
+ i__4 = l + j * a_dim1;
+ q__2.r = q__3.r * a[i__4].r - q__3.i * a[i__4].i,
+ q__2.i = q__3.r * a[i__4].i + q__3.i * a[i__4]
+ .r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L190: */
+ }
+ if (*beta == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ q__1.r = *alpha * temp.r, q__1.i = *alpha * temp.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ q__2.r = *alpha * temp.r, q__2.i = *alpha * temp.i;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = *beta * c__[i__4].r, q__3.i = *beta * c__[
+ i__4].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+/* L200: */
+ }
+ rtemp = 0.f;
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ r_cnjg(&q__3, &a[l + j * a_dim1]);
+ i__3 = l + j * a_dim1;
+ q__2.r = q__3.r * a[i__3].r - q__3.i * a[i__3].i, q__2.i =
+ q__3.r * a[i__3].i + q__3.i * a[i__3].r;
+ q__1.r = rtemp + q__2.r, q__1.i = q__2.i;
+ rtemp = q__1.r;
+/* L210: */
+ }
+ if (*beta == 0.f) {
+ i__2 = j + j * c_dim1;
+ r__1 = *alpha * rtemp;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = *alpha * rtemp + *beta * c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ }
+/* L220: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ rtemp = 0.f;
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ r_cnjg(&q__3, &a[l + j * a_dim1]);
+ i__3 = l + j * a_dim1;
+ q__2.r = q__3.r * a[i__3].r - q__3.i * a[i__3].i, q__2.i =
+ q__3.r * a[i__3].i + q__3.i * a[i__3].r;
+ q__1.r = rtemp + q__2.r, q__1.i = q__2.i;
+ rtemp = q__1.r;
+/* L230: */
+ }
+ if (*beta == 0.f) {
+ i__2 = j + j * c_dim1;
+ r__1 = *alpha * rtemp;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = *alpha * rtemp + *beta * c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ temp.r = 0.f, temp.i = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ r_cnjg(&q__3, &a[l + i__ * a_dim1]);
+ i__4 = l + j * a_dim1;
+ q__2.r = q__3.r * a[i__4].r - q__3.i * a[i__4].i,
+ q__2.i = q__3.r * a[i__4].i + q__3.i * a[i__4]
+ .r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L240: */
+ }
+ if (*beta == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ q__1.r = *alpha * temp.r, q__1.i = *alpha * temp.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ q__2.r = *alpha * temp.r, q__2.i = *alpha * temp.i;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = *beta * c__[i__4].r, q__3.i = *beta * c__[
+ i__4].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+/* L250: */
+ }
+/* L260: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CHERK . */
+
+} /* cherk_ */
diff --git a/contrib/libs/cblas/chpmv.c b/contrib/libs/cblas/chpmv.c
new file mode 100644
index 0000000000..29d580adb8
--- /dev/null
+++ b/contrib/libs/cblas/chpmv.c
@@ -0,0 +1,434 @@
+/* chpmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int chpmv_(char *uplo, integer *n, complex *alpha, complex *
+ ap, complex *x, integer *incx, complex *beta, complex *y, integer *
+ incy)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4, i__5;
+ real r__1;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, k, kk, ix, iy, jx, jy, kx, ky, info;
+ complex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHPMV performs the matrix-vector operation */
+
+/* y := alpha*A*x + beta*y, */
+
+/* where alpha and beta are scalars, x and y are n element vectors and */
+/* A is an n by n hermitian matrix, supplied in packed form. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the matrix A is supplied in the packed */
+/* array AP as follows: */
+
+/* UPLO = 'U' or 'u' The upper triangular part of A is */
+/* supplied in AP. */
+
+/* UPLO = 'L' or 'l' The lower triangular part of A is */
+/* supplied in AP. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* AP - COMPLEX array of DIMENSION at least */
+/* ( ( n*( n + 1 ) )/2 ). */
+/* Before entry with UPLO = 'U' or 'u', the array AP must */
+/* contain the upper triangular part of the hermitian matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 ) */
+/* and a( 2, 2 ) respectively, and so on. */
+/* Before entry with UPLO = 'L' or 'l', the array AP must */
+/* contain the lower triangular part of the hermitian matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 ) */
+/* and a( 3, 1 ) respectively, and so on. */
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set and are assumed to be zero. */
+/* Unchanged on exit. */
+
+/* X - COMPLEX array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* BETA - COMPLEX . */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then Y need not be set on input. */
+/* Unchanged on exit. */
+
+/* Y - COMPLEX array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. On exit, Y is overwritten by the updated */
+/* vector y. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --y;
+ --x;
+ --ap;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 6;
+ } else if (*incy == 0) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("CHPMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || alpha->r == 0.f && alpha->i == 0.f && (beta->r == 1.f &&
+ beta->i == 0.f)) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y. */
+
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+
+/* Start the operations. In this version the elements of the array AP */
+/* are accessed sequentially with one pass through AP. */
+
+/* First form y := beta*y. */
+
+ if (beta->r != 1.f || beta->i != 0.f) {
+ if (*incy == 1) {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ y[i__2].r = 0.f, y[i__2].i = 0.f;
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ q__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ q__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ y[i__2].r = 0.f, y[i__2].i = 0.f;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = iy;
+ q__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ q__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (alpha->r == 0.f && alpha->i == 0.f) {
+ return 0;
+ }
+ kk = 1;
+ if (lsame_(uplo, "U")) {
+
+/* Form y when AP contains the upper triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ k = kk;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = k;
+ q__2.r = temp1.r * ap[i__5].r - temp1.i * ap[i__5].i,
+ q__2.i = temp1.r * ap[i__5].i + temp1.i * ap[i__5]
+ .r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ r_cnjg(&q__3, &ap[k]);
+ i__3 = i__;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i, q__2.i =
+ q__3.r * x[i__3].i + q__3.i * x[i__3].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ ++k;
+/* L50: */
+ }
+ i__2 = j;
+ i__3 = j;
+ i__4 = kk + j - 1;
+ r__1 = ap[i__4].r;
+ q__3.r = r__1 * temp1.r, q__3.i = r__1 * temp1.i;
+ q__2.r = y[i__3].r + q__3.r, q__2.i = y[i__3].i + q__3.i;
+ q__4.r = alpha->r * temp2.r - alpha->i * temp2.i, q__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ kk += j;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ ix = kx;
+ iy = ky;
+ i__2 = kk + j - 2;
+ for (k = kk; k <= i__2; ++k) {
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = k;
+ q__2.r = temp1.r * ap[i__5].r - temp1.i * ap[i__5].i,
+ q__2.i = temp1.r * ap[i__5].i + temp1.i * ap[i__5]
+ .r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ r_cnjg(&q__3, &ap[k]);
+ i__3 = ix;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i, q__2.i =
+ q__3.r * x[i__3].i + q__3.i * x[i__3].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ i__2 = jy;
+ i__3 = jy;
+ i__4 = kk + j - 1;
+ r__1 = ap[i__4].r;
+ q__3.r = r__1 * temp1.r, q__3.i = r__1 * temp1.i;
+ q__2.r = y[i__3].r + q__3.r, q__2.i = y[i__3].i + q__3.i;
+ q__4.r = alpha->r * temp2.r - alpha->i * temp2.i, q__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ jx += *incx;
+ jy += *incy;
+ kk += j;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y when AP contains the lower triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ i__2 = j;
+ i__3 = j;
+ i__4 = kk;
+ r__1 = ap[i__4].r;
+ q__2.r = r__1 * temp1.r, q__2.i = r__1 * temp1.i;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ k = kk + 1;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = k;
+ q__2.r = temp1.r * ap[i__5].r - temp1.i * ap[i__5].i,
+ q__2.i = temp1.r * ap[i__5].i + temp1.i * ap[i__5]
+ .r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ r_cnjg(&q__3, &ap[k]);
+ i__3 = i__;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i, q__2.i =
+ q__3.r * x[i__3].i + q__3.i * x[i__3].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ ++k;
+/* L90: */
+ }
+ i__2 = j;
+ i__3 = j;
+ q__2.r = alpha->r * temp2.r - alpha->i * temp2.i, q__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ kk += *n - j + 1;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ i__2 = jy;
+ i__3 = jy;
+ i__4 = kk;
+ r__1 = ap[i__4].r;
+ q__2.r = r__1 * temp1.r, q__2.i = r__1 * temp1.i;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ ix = jx;
+ iy = jy;
+ i__2 = kk + *n - j;
+ for (k = kk + 1; k <= i__2; ++k) {
+ ix += *incx;
+ iy += *incy;
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = k;
+ q__2.r = temp1.r * ap[i__5].r - temp1.i * ap[i__5].i,
+ q__2.i = temp1.r * ap[i__5].i + temp1.i * ap[i__5]
+ .r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ r_cnjg(&q__3, &ap[k]);
+ i__3 = ix;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i, q__2.i =
+ q__3.r * x[i__3].i + q__3.i * x[i__3].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+/* L110: */
+ }
+ i__2 = jy;
+ i__3 = jy;
+ q__2.r = alpha->r * temp2.r - alpha->i * temp2.i, q__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ jx += *incx;
+ jy += *incy;
+ kk += *n - j + 1;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CHPMV . */
+
+} /* chpmv_ */
diff --git a/contrib/libs/cblas/chpr.c b/contrib/libs/cblas/chpr.c
new file mode 100644
index 0000000000..2622f05531
--- /dev/null
+++ b/contrib/libs/cblas/chpr.c
@@ -0,0 +1,339 @@
+/* chpr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int chpr_(char *uplo, integer *n, real *alpha, complex *x,
+ integer *incx, complex *ap)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4, i__5;
+ real r__1;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, k, kk, ix, jx, kx, info;
+ complex temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHPR performs the hermitian rank 1 operation */
+
+/* A := alpha*x*conjg( x' ) + A, */
+
+/* where alpha is a real scalar, x is an n element vector and A is an */
+/* n by n hermitian matrix, supplied in packed form. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the matrix A is supplied in the packed */
+/* array AP as follows: */
+
+/* UPLO = 'U' or 'u' The upper triangular part of A is */
+/* supplied in AP. */
+
+/* UPLO = 'L' or 'l' The lower triangular part of A is */
+/* supplied in AP. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - REAL . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X - COMPLEX array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* AP - COMPLEX array of DIMENSION at least */
+/* ( ( n*( n + 1 ) )/2 ). */
+/* Before entry with UPLO = 'U' or 'u', the array AP must */
+/* contain the upper triangular part of the hermitian matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 ) */
+/* and a( 2, 2 ) respectively, and so on. On exit, the array */
+/* AP is overwritten by the upper triangular part of the */
+/* updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the array AP must */
+/* contain the lower triangular part of the hermitian matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 ) */
+/* and a( 3, 1 ) respectively, and so on. On exit, the array */
+/* AP is overwritten by the lower triangular part of the */
+/* updated matrix. */
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set, they are assumed to be zero, and on exit they */
+/* are set to zero. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ }
+ if (info != 0) {
+ xerbla_("CHPR ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || *alpha == 0.f) {
+ return 0;
+ }
+
+/* Set the start point in X if the increment is not unity. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of the array AP */
+/* are accessed sequentially with one pass through AP. */
+
+ kk = 1;
+ if (lsame_(uplo, "U")) {
+
+/* Form A when upper triangle is stored in AP. */
+
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ r_cnjg(&q__2, &x[j]);
+ q__1.r = *alpha * q__2.r, q__1.i = *alpha * q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ k = kk;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = k;
+ i__4 = k;
+ i__5 = i__;
+ q__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ q__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ q__1.r = ap[i__4].r + q__2.r, q__1.i = ap[i__4].i +
+ q__2.i;
+ ap[i__3].r = q__1.r, ap[i__3].i = q__1.i;
+ ++k;
+/* L10: */
+ }
+ i__2 = kk + j - 1;
+ i__3 = kk + j - 1;
+ i__4 = j;
+ q__1.r = x[i__4].r * temp.r - x[i__4].i * temp.i, q__1.i =
+ x[i__4].r * temp.i + x[i__4].i * temp.r;
+ r__1 = ap[i__3].r + q__1.r;
+ ap[i__2].r = r__1, ap[i__2].i = 0.f;
+ } else {
+ i__2 = kk + j - 1;
+ i__3 = kk + j - 1;
+ r__1 = ap[i__3].r;
+ ap[i__2].r = r__1, ap[i__2].i = 0.f;
+ }
+ kk += j;
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ r_cnjg(&q__2, &x[jx]);
+ q__1.r = *alpha * q__2.r, q__1.i = *alpha * q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix = kx;
+ i__2 = kk + j - 2;
+ for (k = kk; k <= i__2; ++k) {
+ i__3 = k;
+ i__4 = k;
+ i__5 = ix;
+ q__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ q__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ q__1.r = ap[i__4].r + q__2.r, q__1.i = ap[i__4].i +
+ q__2.i;
+ ap[i__3].r = q__1.r, ap[i__3].i = q__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ i__2 = kk + j - 1;
+ i__3 = kk + j - 1;
+ i__4 = jx;
+ q__1.r = x[i__4].r * temp.r - x[i__4].i * temp.i, q__1.i =
+ x[i__4].r * temp.i + x[i__4].i * temp.r;
+ r__1 = ap[i__3].r + q__1.r;
+ ap[i__2].r = r__1, ap[i__2].i = 0.f;
+ } else {
+ i__2 = kk + j - 1;
+ i__3 = kk + j - 1;
+ r__1 = ap[i__3].r;
+ ap[i__2].r = r__1, ap[i__2].i = 0.f;
+ }
+ jx += *incx;
+ kk += j;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when lower triangle is stored in AP. */
+
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ r_cnjg(&q__2, &x[j]);
+ q__1.r = *alpha * q__2.r, q__1.i = *alpha * q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = kk;
+ i__3 = kk;
+ i__4 = j;
+ q__1.r = temp.r * x[i__4].r - temp.i * x[i__4].i, q__1.i =
+ temp.r * x[i__4].i + temp.i * x[i__4].r;
+ r__1 = ap[i__3].r + q__1.r;
+ ap[i__2].r = r__1, ap[i__2].i = 0.f;
+ k = kk + 1;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = k;
+ i__4 = k;
+ i__5 = i__;
+ q__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ q__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ q__1.r = ap[i__4].r + q__2.r, q__1.i = ap[i__4].i +
+ q__2.i;
+ ap[i__3].r = q__1.r, ap[i__3].i = q__1.i;
+ ++k;
+/* L50: */
+ }
+ } else {
+ i__2 = kk;
+ i__3 = kk;
+ r__1 = ap[i__3].r;
+ ap[i__2].r = r__1, ap[i__2].i = 0.f;
+ }
+ kk = kk + *n - j + 1;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ r_cnjg(&q__2, &x[jx]);
+ q__1.r = *alpha * q__2.r, q__1.i = *alpha * q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = kk;
+ i__3 = kk;
+ i__4 = jx;
+ q__1.r = temp.r * x[i__4].r - temp.i * x[i__4].i, q__1.i =
+ temp.r * x[i__4].i + temp.i * x[i__4].r;
+ r__1 = ap[i__3].r + q__1.r;
+ ap[i__2].r = r__1, ap[i__2].i = 0.f;
+ ix = jx;
+ i__2 = kk + *n - j;
+ for (k = kk + 1; k <= i__2; ++k) {
+ ix += *incx;
+ i__3 = k;
+ i__4 = k;
+ i__5 = ix;
+ q__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ q__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ q__1.r = ap[i__4].r + q__2.r, q__1.i = ap[i__4].i +
+ q__2.i;
+ ap[i__3].r = q__1.r, ap[i__3].i = q__1.i;
+/* L70: */
+ }
+ } else {
+ i__2 = kk;
+ i__3 = kk;
+ r__1 = ap[i__3].r;
+ ap[i__2].r = r__1, ap[i__2].i = 0.f;
+ }
+ jx += *incx;
+ kk = kk + *n - j + 1;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CHPR . */
+
+} /* chpr_ */
diff --git a/contrib/libs/cblas/chpr2.c b/contrib/libs/cblas/chpr2.c
new file mode 100644
index 0000000000..f16950a3e4
--- /dev/null
+++ b/contrib/libs/cblas/chpr2.c
@@ -0,0 +1,447 @@
+/* chpr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int chpr2_(char *uplo, integer *n, complex *alpha, complex *
+ x, integer *incx, complex *y, integer *incy, complex *ap)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4, i__5, i__6;
+ real r__1;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, k, kk, ix, iy, jx, jy, kx, ky, info;
+ complex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHPR2 performs the hermitian rank 2 operation */
+
+/* A := alpha*x*conjg( y' ) + conjg( alpha )*y*conjg( x' ) + A, */
+
+/* where alpha is a scalar, x and y are n element vectors and A is an */
+/* n by n hermitian matrix, supplied in packed form. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the matrix A is supplied in the packed */
+/* array AP as follows: */
+
+/* UPLO = 'U' or 'u' The upper triangular part of A is */
+/* supplied in AP. */
+
+/* UPLO = 'L' or 'l' The lower triangular part of A is */
+/* supplied in AP. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X - COMPLEX array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* Y - COMPLEX array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. */
+/* Unchanged on exit. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+/* AP - COMPLEX array of DIMENSION at least */
+/* ( ( n*( n + 1 ) )/2 ). */
+/* Before entry with UPLO = 'U' or 'u', the array AP must */
+/* contain the upper triangular part of the hermitian matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 ) */
+/* and a( 2, 2 ) respectively, and so on. On exit, the array */
+/* AP is overwritten by the upper triangular part of the */
+/* updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the array AP must */
+/* contain the lower triangular part of the hermitian matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 ) */
+/* and a( 3, 1 ) respectively, and so on. On exit, the array */
+/* AP is overwritten by the lower triangular part of the */
+/* updated matrix. */
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set, they are assumed to be zero, and on exit they */
+/* are set to zero. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --y;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ }
+ if (info != 0) {
+ xerbla_("CHPR2 ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || alpha->r == 0.f && alpha->i == 0.f) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y if the increments are not both */
+/* unity. */
+
+ if (*incx != 1 || *incy != 1) {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+ jx = kx;
+ jy = ky;
+ }
+
+/* Start the operations. In this version the elements of the array AP */
+/* are accessed sequentially with one pass through AP. */
+
+ kk = 1;
+ if (lsame_(uplo, "U")) {
+
+/* Form A when upper triangle is stored in AP. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ i__3 = j;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f || (y[i__3].r != 0.f
+ || y[i__3].i != 0.f)) {
+ r_cnjg(&q__2, &y[j]);
+ q__1.r = alpha->r * q__2.r - alpha->i * q__2.i, q__1.i =
+ alpha->r * q__2.i + alpha->i * q__2.r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ i__2 = j;
+ q__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ q__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ r_cnjg(&q__1, &q__2);
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ k = kk;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = k;
+ i__4 = k;
+ i__5 = i__;
+ q__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ q__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ q__2.r = ap[i__4].r + q__3.r, q__2.i = ap[i__4].i +
+ q__3.i;
+ i__6 = i__;
+ q__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ q__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ ap[i__3].r = q__1.r, ap[i__3].i = q__1.i;
+ ++k;
+/* L10: */
+ }
+ i__2 = kk + j - 1;
+ i__3 = kk + j - 1;
+ i__4 = j;
+ q__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ q__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = j;
+ q__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ q__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ r__1 = ap[i__3].r + q__1.r;
+ ap[i__2].r = r__1, ap[i__2].i = 0.f;
+ } else {
+ i__2 = kk + j - 1;
+ i__3 = kk + j - 1;
+ r__1 = ap[i__3].r;
+ ap[i__2].r = r__1, ap[i__2].i = 0.f;
+ }
+ kk += j;
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ i__3 = jy;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f || (y[i__3].r != 0.f
+ || y[i__3].i != 0.f)) {
+ r_cnjg(&q__2, &y[jy]);
+ q__1.r = alpha->r * q__2.r - alpha->i * q__2.i, q__1.i =
+ alpha->r * q__2.i + alpha->i * q__2.r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ i__2 = jx;
+ q__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ q__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ r_cnjg(&q__1, &q__2);
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ ix = kx;
+ iy = ky;
+ i__2 = kk + j - 2;
+ for (k = kk; k <= i__2; ++k) {
+ i__3 = k;
+ i__4 = k;
+ i__5 = ix;
+ q__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ q__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ q__2.r = ap[i__4].r + q__3.r, q__2.i = ap[i__4].i +
+ q__3.i;
+ i__6 = iy;
+ q__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ q__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ ap[i__3].r = q__1.r, ap[i__3].i = q__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L30: */
+ }
+ i__2 = kk + j - 1;
+ i__3 = kk + j - 1;
+ i__4 = jx;
+ q__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ q__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = jy;
+ q__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ q__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ r__1 = ap[i__3].r + q__1.r;
+ ap[i__2].r = r__1, ap[i__2].i = 0.f;
+ } else {
+ i__2 = kk + j - 1;
+ i__3 = kk + j - 1;
+ r__1 = ap[i__3].r;
+ ap[i__2].r = r__1, ap[i__2].i = 0.f;
+ }
+ jx += *incx;
+ jy += *incy;
+ kk += j;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when lower triangle is stored in AP. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ i__3 = j;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f || (y[i__3].r != 0.f
+ || y[i__3].i != 0.f)) {
+ r_cnjg(&q__2, &y[j]);
+ q__1.r = alpha->r * q__2.r - alpha->i * q__2.i, q__1.i =
+ alpha->r * q__2.i + alpha->i * q__2.r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ i__2 = j;
+ q__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ q__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ r_cnjg(&q__1, &q__2);
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ i__2 = kk;
+ i__3 = kk;
+ i__4 = j;
+ q__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ q__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = j;
+ q__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ q__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ r__1 = ap[i__3].r + q__1.r;
+ ap[i__2].r = r__1, ap[i__2].i = 0.f;
+ k = kk + 1;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = k;
+ i__4 = k;
+ i__5 = i__;
+ q__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ q__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ q__2.r = ap[i__4].r + q__3.r, q__2.i = ap[i__4].i +
+ q__3.i;
+ i__6 = i__;
+ q__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ q__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ ap[i__3].r = q__1.r, ap[i__3].i = q__1.i;
+ ++k;
+/* L50: */
+ }
+ } else {
+ i__2 = kk;
+ i__3 = kk;
+ r__1 = ap[i__3].r;
+ ap[i__2].r = r__1, ap[i__2].i = 0.f;
+ }
+ kk = kk + *n - j + 1;
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ i__3 = jy;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f || (y[i__3].r != 0.f
+ || y[i__3].i != 0.f)) {
+ r_cnjg(&q__2, &y[jy]);
+ q__1.r = alpha->r * q__2.r - alpha->i * q__2.i, q__1.i =
+ alpha->r * q__2.i + alpha->i * q__2.r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ i__2 = jx;
+ q__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ q__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ r_cnjg(&q__1, &q__2);
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ i__2 = kk;
+ i__3 = kk;
+ i__4 = jx;
+ q__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ q__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = jy;
+ q__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ q__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ r__1 = ap[i__3].r + q__1.r;
+ ap[i__2].r = r__1, ap[i__2].i = 0.f;
+ ix = jx;
+ iy = jy;
+ i__2 = kk + *n - j;
+ for (k = kk + 1; k <= i__2; ++k) {
+ ix += *incx;
+ iy += *incy;
+ i__3 = k;
+ i__4 = k;
+ i__5 = ix;
+ q__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ q__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ q__2.r = ap[i__4].r + q__3.r, q__2.i = ap[i__4].i +
+ q__3.i;
+ i__6 = iy;
+ q__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ q__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ ap[i__3].r = q__1.r, ap[i__3].i = q__1.i;
+/* L70: */
+ }
+ } else {
+ i__2 = kk;
+ i__3 = kk;
+ r__1 = ap[i__3].r;
+ ap[i__2].r = r__1, ap[i__2].i = 0.f;
+ }
+ jx += *incx;
+ jy += *incy;
+ kk = kk + *n - j + 1;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CHPR2 . */
+
+} /* chpr2_ */
diff --git a/contrib/libs/cblas/crotg.c b/contrib/libs/cblas/crotg.c
new file mode 100644
index 0000000000..8edb25ea22
--- /dev/null
+++ b/contrib/libs/cblas/crotg.c
@@ -0,0 +1,72 @@
+/* crotg.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int crotg_(complex *ca, complex *cb, real *c__, complex *s)
+{
+ /* System generated locals */
+ real r__1, r__2;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ double c_abs(complex *), sqrt(doublereal);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ real norm;
+ complex alpha;
+ real scale;
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CROTG determines a complex Givens rotation. */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ if (c_abs(ca) != 0.f) {
+ goto L10;
+ }
+ *c__ = 0.f;
+ s->r = 1.f, s->i = 0.f;
+ ca->r = cb->r, ca->i = cb->i;
+ goto L20;
+L10:
+ scale = c_abs(ca) + c_abs(cb);
+ q__1.r = ca->r / scale, q__1.i = ca->i / scale;
+/* Computing 2nd power */
+ r__1 = c_abs(&q__1);
+ q__2.r = cb->r / scale, q__2.i = cb->i / scale;
+/* Computing 2nd power */
+ r__2 = c_abs(&q__2);
+ norm = scale * sqrt(r__1 * r__1 + r__2 * r__2);
+ r__1 = c_abs(ca);
+ q__1.r = ca->r / r__1, q__1.i = ca->i / r__1;
+ alpha.r = q__1.r, alpha.i = q__1.i;
+ *c__ = c_abs(ca) / norm;
+ r_cnjg(&q__3, cb);
+ q__2.r = alpha.r * q__3.r - alpha.i * q__3.i, q__2.i = alpha.r * q__3.i +
+ alpha.i * q__3.r;
+ q__1.r = q__2.r / norm, q__1.i = q__2.i / norm;
+ s->r = q__1.r, s->i = q__1.i;
+ q__1.r = norm * alpha.r, q__1.i = norm * alpha.i;
+ ca->r = q__1.r, ca->i = q__1.i;
+L20:
+ return 0;
+} /* crotg_ */
diff --git a/contrib/libs/cblas/cscal.c b/contrib/libs/cblas/cscal.c
new file mode 100644
index 0000000000..7c22710d04
--- /dev/null
+++ b/contrib/libs/cblas/cscal.c
@@ -0,0 +1,81 @@
+/* cscal.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cscal_(integer *n, complex *ca, complex *cx, integer *
+ incx)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, nincx;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* scales a vector by a constant. */
+/* jack dongarra, linpack, 3/11/78. */
+/* modified 3/93 to return if incx .le. 0. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+ /* Parameter adjustments */
+ --cx;
+
+ /* Function Body */
+ if (*n <= 0 || *incx <= 0) {
+ return 0;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ nincx = *n * *incx;
+ i__1 = nincx;
+ i__2 = *incx;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ i__3 = i__;
+ i__4 = i__;
+ q__1.r = ca->r * cx[i__4].r - ca->i * cx[i__4].i, q__1.i = ca->r * cx[
+ i__4].i + ca->i * cx[i__4].r;
+ cx[i__3].r = q__1.r, cx[i__3].i = q__1.i;
+/* L10: */
+ }
+ return 0;
+
+/* code for increment equal to 1 */
+
+L20:
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__1 = i__;
+ i__3 = i__;
+ q__1.r = ca->r * cx[i__3].r - ca->i * cx[i__3].i, q__1.i = ca->r * cx[
+ i__3].i + ca->i * cx[i__3].r;
+ cx[i__1].r = q__1.r, cx[i__1].i = q__1.i;
+/* L30: */
+ }
+ return 0;
+} /* cscal_ */
diff --git a/contrib/libs/cblas/csrot.c b/contrib/libs/cblas/csrot.c
new file mode 100644
index 0000000000..0010a2af91
--- /dev/null
+++ b/contrib/libs/cblas/csrot.c
@@ -0,0 +1,153 @@
+/* csrot.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int csrot_(integer *n, complex *cx, integer *incx, complex *
+ cy, integer *incy, real *c__, real *s)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ complex q__1, q__2, q__3;
+
+ /* Local variables */
+ integer i__, ix, iy;
+ complex ctemp;
+
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Applies a plane rotation, where the cos and sin (c and s) are real */
+/* and the vectors cx and cy are complex. */
+/* jack dongarra, linpack, 3/11/78. */
+
+/* Arguments */
+/* ========== */
+
+/* N (input) INTEGER */
+/* On entry, N specifies the order of the vectors cx and cy. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* CX (input) COMPLEX array, dimension at least */
+/* ( 1 + ( N - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array CX must contain the n */
+/* element vector cx. On exit, CX is overwritten by the updated */
+/* vector cx. */
+
+/* INCX (input) INTEGER */
+/* On entry, INCX specifies the increment for the elements of */
+/* CX. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* CY (input) COMPLEX array, dimension at least */
+/* ( 1 + ( N - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array CY must contain the n */
+/* element vector cy. On exit, CY is overwritten by the updated */
+/* vector cy. */
+
+/* INCY (input) INTEGER */
+/* On entry, INCY specifies the increment for the elements of */
+/* CY. INCY must not be zero. */
+/* Unchanged on exit. */
+
+/* C (input) REAL */
+/* On entry, C specifies the cosine, cos. */
+/* Unchanged on exit. */
+
+/* S (input) REAL */
+/* On entry, S specifies the sine, sin. */
+/* Unchanged on exit. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --cy;
+ --cx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* code for unequal increments or equal increments not equal */
+/* to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ q__2.r = *c__ * cx[i__2].r, q__2.i = *c__ * cx[i__2].i;
+ i__3 = iy;
+ q__3.r = *s * cy[i__3].r, q__3.i = *s * cy[i__3].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ ctemp.r = q__1.r, ctemp.i = q__1.i;
+ i__2 = iy;
+ i__3 = iy;
+ q__2.r = *c__ * cy[i__3].r, q__2.i = *c__ * cy[i__3].i;
+ i__4 = ix;
+ q__3.r = *s * cx[i__4].r, q__3.i = *s * cx[i__4].i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
+ cy[i__2].r = q__1.r, cy[i__2].i = q__1.i;
+ i__2 = ix;
+ cx[i__2].r = ctemp.r, cx[i__2].i = ctemp.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ q__2.r = *c__ * cx[i__2].r, q__2.i = *c__ * cx[i__2].i;
+ i__3 = i__;
+ q__3.r = *s * cy[i__3].r, q__3.i = *s * cy[i__3].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ ctemp.r = q__1.r, ctemp.i = q__1.i;
+ i__2 = i__;
+ i__3 = i__;
+ q__2.r = *c__ * cy[i__3].r, q__2.i = *c__ * cy[i__3].i;
+ i__4 = i__;
+ q__3.r = *s * cx[i__4].r, q__3.i = *s * cx[i__4].i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
+ cy[i__2].r = q__1.r, cy[i__2].i = q__1.i;
+ i__2 = i__;
+ cx[i__2].r = ctemp.r, cx[i__2].i = ctemp.i;
+/* L30: */
+ }
+ return 0;
+} /* csrot_ */
diff --git a/contrib/libs/cblas/csscal.c b/contrib/libs/cblas/csscal.c
new file mode 100644
index 0000000000..f849d23c71
--- /dev/null
+++ b/contrib/libs/cblas/csscal.c
@@ -0,0 +1,88 @@
+/* csscal.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int csscal_(integer *n, real *sa, complex *cx, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ real r__1, r__2;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer i__, nincx;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* scales a complex vector by a real constant. */
+/* jack dongarra, linpack, 3/11/78. */
+/* modified 3/93 to return if incx .le. 0. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --cx;
+
+ /* Function Body */
+ if (*n <= 0 || *incx <= 0) {
+ return 0;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ nincx = *n * *incx;
+ i__1 = nincx;
+ i__2 = *incx;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ i__3 = i__;
+ i__4 = i__;
+ r__1 = *sa * cx[i__4].r;
+ r__2 = *sa * r_imag(&cx[i__]);
+ q__1.r = r__1, q__1.i = r__2;
+ cx[i__3].r = q__1.r, cx[i__3].i = q__1.i;
+/* L10: */
+ }
+ return 0;
+
+/* code for increment equal to 1 */
+
+L20:
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__1 = i__;
+ i__3 = i__;
+ r__1 = *sa * cx[i__3].r;
+ r__2 = *sa * r_imag(&cx[i__]);
+ q__1.r = r__1, q__1.i = r__2;
+ cx[i__1].r = q__1.r, cx[i__1].i = q__1.i;
+/* L30: */
+ }
+ return 0;
+} /* csscal_ */
diff --git a/contrib/libs/cblas/cswap.c b/contrib/libs/cblas/cswap.c
new file mode 100644
index 0000000000..b007f86a70
--- /dev/null
+++ b/contrib/libs/cblas/cswap.c
@@ -0,0 +1,93 @@
+/* cswap.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cswap_(integer *n, complex *cx, integer *incx, complex *
+ cy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, ix, iy;
+ complex ctemp;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* interchanges two vectors. */
+/* jack dongarra, linpack, 3/11/78. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+ /* Parameter adjustments */
+ --cy;
+ --cx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* code for unequal increments or equal increments not equal */
+/* to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ ctemp.r = cx[i__2].r, ctemp.i = cx[i__2].i;
+ i__2 = ix;
+ i__3 = iy;
+ cx[i__2].r = cy[i__3].r, cx[i__2].i = cy[i__3].i;
+ i__2 = iy;
+ cy[i__2].r = ctemp.r, cy[i__2].i = ctemp.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ ctemp.r = cx[i__2].r, ctemp.i = cx[i__2].i;
+ i__2 = i__;
+ i__3 = i__;
+ cx[i__2].r = cy[i__3].r, cx[i__2].i = cy[i__3].i;
+ i__2 = i__;
+ cy[i__2].r = ctemp.r, cy[i__2].i = ctemp.i;
+/* L30: */
+ }
+ return 0;
+} /* cswap_ */
diff --git a/contrib/libs/cblas/csymm.c b/contrib/libs/cblas/csymm.c
new file mode 100644
index 0000000000..595b867325
--- /dev/null
+++ b/contrib/libs/cblas/csymm.c
@@ -0,0 +1,495 @@
+/* csymm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int csymm_(char *side, char *uplo, integer *m, integer *n,
+ complex *alpha, complex *a, integer *lda, complex *b, integer *ldb,
+ complex *beta, complex *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4, i__5, i__6;
+ complex q__1, q__2, q__3, q__4, q__5;
+
+ /* Local variables */
+ integer i__, j, k, info;
+ complex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSYMM performs one of the matrix-matrix operations */
+
+/* C := alpha*A*B + beta*C, */
+
+/* or */
+
+/* C := alpha*B*A + beta*C, */
+
+/* where alpha and beta are scalars, A is a symmetric matrix and B and */
+/* C are m by n matrices. */
+
+/* Arguments */
+/* ========== */
+
+/* SIDE - CHARACTER*1. */
+/* On entry, SIDE specifies whether the symmetric matrix A */
+/* appears on the left or right in the operation as follows: */
+
+/* SIDE = 'L' or 'l' C := alpha*A*B + beta*C, */
+
+/* SIDE = 'R' or 'r' C := alpha*B*A + beta*C, */
+
+/* Unchanged on exit. */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the symmetric matrix A is to be */
+/* referenced as follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of the */
+/* symmetric matrix is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of the */
+/* symmetric matrix is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of the matrix C. */
+/* M must be at least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of the matrix C. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX array of DIMENSION ( LDA, ka ), where ka is */
+/* m when SIDE = 'L' or 'l' and is n otherwise. */
+/* Before entry with SIDE = 'L' or 'l', the m by m part of */
+/* the array A must contain the symmetric matrix, such that */
+/* when UPLO = 'U' or 'u', the leading m by m upper triangular */
+/* part of the array A must contain the upper triangular part */
+/* of the symmetric matrix and the strictly lower triangular */
+/* part of A is not referenced, and when UPLO = 'L' or 'l', */
+/* the leading m by m lower triangular part of the array A */
+/* must contain the lower triangular part of the symmetric */
+/* matrix and the strictly upper triangular part of A is not */
+/* referenced. */
+/* Before entry with SIDE = 'R' or 'r', the n by n part of */
+/* the array A must contain the symmetric matrix, such that */
+/* when UPLO = 'U' or 'u', the leading n by n upper triangular */
+/* part of the array A must contain the upper triangular part */
+/* of the symmetric matrix and the strictly lower triangular */
+/* part of A is not referenced, and when UPLO = 'L' or 'l', */
+/* the leading n by n lower triangular part of the array A */
+/* must contain the lower triangular part of the symmetric */
+/* matrix and the strictly upper triangular part of A is not */
+/* referenced. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When SIDE = 'L' or 'l' then */
+/* LDA must be at least max( 1, m ), otherwise LDA must be at */
+/* least max( 1, n ). */
+/* Unchanged on exit. */
+
+/* B - COMPLEX array of DIMENSION ( LDB, n ). */
+/* Before entry, the leading m by n part of the array B must */
+/* contain the matrix B. */
+/* Unchanged on exit. */
+
+/* LDB - INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. LDB must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+/* BETA - COMPLEX . */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then C need not be set on input. */
+/* Unchanged on exit. */
+
+/* C - COMPLEX array of DIMENSION ( LDC, n ). */
+/* Before entry, the leading m by n part of the array C must */
+/* contain the matrix C, except when beta is zero, in which */
+/* case C need not be set on entry. */
+/* On exit, the array C is overwritten by the m by n updated */
+/* matrix. */
+
+/* LDC - INTEGER. */
+/* On entry, LDC specifies the first dimension of C as declared */
+/* in the calling (sub) program. LDC must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Set NROWA as the number of rows of A. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(side, "L")) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ upper = lsame_(uplo, "U");
+
+/* Test the input parameters. */
+
+ info = 0;
+ if (! lsame_(side, "L") && ! lsame_(side, "R")) {
+ info = 1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ info = 2;
+ } else if (*m < 0) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldb < max(1,*m)) {
+ info = 9;
+ } else if (*ldc < max(1,*m)) {
+ info = 12;
+ }
+ if (info != 0) {
+ xerbla_("CSYMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || alpha->r == 0.f && alpha->i == 0.f && (beta->r
+ == 1.f && beta->i == 0.f)) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (alpha->r == 0.f && alpha->i == 0.f) {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4].i,
+ q__1.i = beta->r * c__[i__4].i + beta->i * c__[
+ i__4].r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(side, "L")) {
+
+/* Form C := alpha*A*B + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ q__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
+ q__1.i = alpha->r * b[i__3].i + alpha->i * b[i__3]
+ .r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ i__3 = i__ - 1;
+ for (k = 1; k <= i__3; ++k) {
+ i__4 = k + j * c_dim1;
+ i__5 = k + j * c_dim1;
+ i__6 = k + i__ * a_dim1;
+ q__2.r = temp1.r * a[i__6].r - temp1.i * a[i__6].i,
+ q__2.i = temp1.r * a[i__6].i + temp1.i * a[
+ i__6].r;
+ q__1.r = c__[i__5].r + q__2.r, q__1.i = c__[i__5].i +
+ q__2.i;
+ c__[i__4].r = q__1.r, c__[i__4].i = q__1.i;
+ i__4 = k + j * b_dim1;
+ i__5 = k + i__ * a_dim1;
+ q__2.r = b[i__4].r * a[i__5].r - b[i__4].i * a[i__5]
+ .i, q__2.i = b[i__4].r * a[i__5].i + b[i__4]
+ .i * a[i__5].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+/* L50: */
+ }
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + i__ * a_dim1;
+ q__2.r = temp1.r * a[i__4].r - temp1.i * a[i__4].i,
+ q__2.i = temp1.r * a[i__4].i + temp1.i * a[
+ i__4].r;
+ q__3.r = alpha->r * temp2.r - alpha->i * temp2.i,
+ q__3.i = alpha->r * temp2.i + alpha->i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ i__5 = i__ + i__ * a_dim1;
+ q__4.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ q__4.i = temp1.r * a[i__5].i + temp1.i * a[
+ i__5].r;
+ q__2.r = q__3.r + q__4.r, q__2.i = q__3.i + q__4.i;
+ q__5.r = alpha->r * temp2.r - alpha->i * temp2.i,
+ q__5.i = alpha->r * temp2.i + alpha->i *
+ temp2.r;
+ q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+/* L60: */
+ }
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ i__2 = i__ + j * b_dim1;
+ q__1.r = alpha->r * b[i__2].r - alpha->i * b[i__2].i,
+ q__1.i = alpha->r * b[i__2].i + alpha->i * b[i__2]
+ .r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ i__2 = *m;
+ for (k = i__ + 1; k <= i__2; ++k) {
+ i__3 = k + j * c_dim1;
+ i__4 = k + j * c_dim1;
+ i__5 = k + i__ * a_dim1;
+ q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ q__2.i = temp1.r * a[i__5].i + temp1.i * a[
+ i__5].r;
+ q__1.r = c__[i__4].r + q__2.r, q__1.i = c__[i__4].i +
+ q__2.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ i__3 = k + j * b_dim1;
+ i__4 = k + i__ * a_dim1;
+ q__2.r = b[i__3].r * a[i__4].r - b[i__3].i * a[i__4]
+ .i, q__2.i = b[i__3].r * a[i__4].i + b[i__3]
+ .i * a[i__4].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+/* L80: */
+ }
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__2 = i__ + j * c_dim1;
+ i__3 = i__ + i__ * a_dim1;
+ q__2.r = temp1.r * a[i__3].r - temp1.i * a[i__3].i,
+ q__2.i = temp1.r * a[i__3].i + temp1.i * a[
+ i__3].r;
+ q__3.r = alpha->r * temp2.r - alpha->i * temp2.i,
+ q__3.i = alpha->r * temp2.i + alpha->i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ } else {
+ i__2 = i__ + j * c_dim1;
+ i__3 = i__ + j * c_dim1;
+ q__3.r = beta->r * c__[i__3].r - beta->i * c__[i__3]
+ .i, q__3.i = beta->r * c__[i__3].i + beta->i *
+ c__[i__3].r;
+ i__4 = i__ + i__ * a_dim1;
+ q__4.r = temp1.r * a[i__4].r - temp1.i * a[i__4].i,
+ q__4.i = temp1.r * a[i__4].i + temp1.i * a[
+ i__4].r;
+ q__2.r = q__3.r + q__4.r, q__2.i = q__3.i + q__4.i;
+ q__5.r = alpha->r * temp2.r - alpha->i * temp2.i,
+ q__5.i = alpha->r * temp2.i + alpha->i *
+ temp2.r;
+ q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ }
+/* L90: */
+ }
+/* L100: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*B*A + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + j * a_dim1;
+ q__1.r = alpha->r * a[i__2].r - alpha->i * a[i__2].i, q__1.i =
+ alpha->r * a[i__2].i + alpha->i * a[i__2].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * b_dim1;
+ q__1.r = temp1.r * b[i__4].r - temp1.i * b[i__4].i,
+ q__1.i = temp1.r * b[i__4].i + temp1.i * b[i__4]
+ .r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L110: */
+ }
+ } else {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__2.r = beta->r * c__[i__4].r - beta->i * c__[i__4].i,
+ q__2.i = beta->r * c__[i__4].i + beta->i * c__[
+ i__4].r;
+ i__5 = i__ + j * b_dim1;
+ q__3.r = temp1.r * b[i__5].r - temp1.i * b[i__5].i,
+ q__3.i = temp1.r * b[i__5].i + temp1.i * b[i__5]
+ .r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L120: */
+ }
+ }
+ i__2 = j - 1;
+ for (k = 1; k <= i__2; ++k) {
+ if (upper) {
+ i__3 = k + j * a_dim1;
+ q__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ q__1.i = alpha->r * a[i__3].i + alpha->i * a[i__3]
+ .r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ } else {
+ i__3 = j + k * a_dim1;
+ q__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ q__1.i = alpha->r * a[i__3].i + alpha->i * a[i__3]
+ .r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ }
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + k * b_dim1;
+ q__2.r = temp1.r * b[i__6].r - temp1.i * b[i__6].i,
+ q__2.i = temp1.r * b[i__6].i + temp1.i * b[i__6]
+ .r;
+ q__1.r = c__[i__5].r + q__2.r, q__1.i = c__[i__5].i +
+ q__2.i;
+ c__[i__4].r = q__1.r, c__[i__4].i = q__1.i;
+/* L130: */
+ }
+/* L140: */
+ }
+ i__2 = *n;
+ for (k = j + 1; k <= i__2; ++k) {
+ if (upper) {
+ i__3 = j + k * a_dim1;
+ q__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ q__1.i = alpha->r * a[i__3].i + alpha->i * a[i__3]
+ .r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ } else {
+ i__3 = k + j * a_dim1;
+ q__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ q__1.i = alpha->r * a[i__3].i + alpha->i * a[i__3]
+ .r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ }
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + k * b_dim1;
+ q__2.r = temp1.r * b[i__6].r - temp1.i * b[i__6].i,
+ q__2.i = temp1.r * b[i__6].i + temp1.i * b[i__6]
+ .r;
+ q__1.r = c__[i__5].r + q__2.r, q__1.i = c__[i__5].i +
+ q__2.i;
+ c__[i__4].r = q__1.r, c__[i__4].i = q__1.i;
+/* L150: */
+ }
+/* L160: */
+ }
+/* L170: */
+ }
+ }
+
+ return 0;
+
+/* End of CSYMM . */
+
+} /* csymm_ */
diff --git a/contrib/libs/cblas/csyr2k.c b/contrib/libs/cblas/csyr2k.c
new file mode 100644
index 0000000000..782867892e
--- /dev/null
+++ b/contrib/libs/cblas/csyr2k.c
@@ -0,0 +1,537 @@
+/* csyr2k.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int csyr2k_(char *uplo, char *trans, integer *n, integer *k,
+ complex *alpha, complex *a, integer *lda, complex *b, integer *ldb,
+ complex *beta, complex *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4, i__5, i__6, i__7;
+ complex q__1, q__2, q__3, q__4, q__5;
+
+ /* Local variables */
+ integer i__, j, l, info;
+ complex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSYR2K performs one of the symmetric rank 2k operations */
+
+/* C := alpha*A*B' + alpha*B*A' + beta*C, */
+
+/* or */
+
+/* C := alpha*A'*B + alpha*B'*A + beta*C, */
+
+/* where alpha and beta are scalars, C is an n by n symmetric matrix */
+/* and A and B are n by k matrices in the first case and k by n */
+/* matrices in the second case. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array C is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of C */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of C */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' C := alpha*A*B' + alpha*B*A' + */
+/* beta*C. */
+
+/* TRANS = 'T' or 't' C := alpha*A'*B + alpha*B'*A + */
+/* beta*C. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix C. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry with TRANS = 'N' or 'n', K specifies the number */
+/* of columns of the matrices A and B, and on entry with */
+/* TRANS = 'T' or 't', K specifies the number of rows of the */
+/* matrices A and B. K must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX array of DIMENSION ( LDA, ka ), where ka is */
+/* k when TRANS = 'N' or 'n', and is n otherwise. */
+/* Before entry with TRANS = 'N' or 'n', the leading n by k */
+/* part of the array A must contain the matrix A, otherwise */
+/* the leading k by n part of the array A must contain the */
+/* matrix A. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When TRANS = 'N' or 'n' */
+/* then LDA must be at least max( 1, n ), otherwise LDA must */
+/* be at least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* B - COMPLEX array of DIMENSION ( LDB, kb ), where kb is */
+/* k when TRANS = 'N' or 'n', and is n otherwise. */
+/* Before entry with TRANS = 'N' or 'n', the leading n by k */
+/* part of the array B must contain the matrix B, otherwise */
+/* the leading k by n part of the array B must contain the */
+/* matrix B. */
+/* Unchanged on exit. */
+
+/* LDB - INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. When TRANS = 'N' or 'n' */
+/* then LDB must be at least max( 1, n ), otherwise LDB must */
+/* be at least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* BETA - COMPLEX . */
+/* On entry, BETA specifies the scalar beta. */
+/* Unchanged on exit. */
+
+/* C - COMPLEX array of DIMENSION ( LDC, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array C must contain the upper */
+/* triangular part of the symmetric matrix and the strictly */
+/* lower triangular part of C is not referenced. On exit, the */
+/* upper triangular part of the array C is overwritten by the */
+/* upper triangular part of the updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array C must contain the lower */
+/* triangular part of the symmetric matrix and the strictly */
+/* upper triangular part of C is not referenced. On exit, the */
+/* lower triangular part of the array C is overwritten by the */
+/* lower triangular part of the updated matrix. */
+
+/* LDC - INTEGER. */
+/* On entry, LDC specifies the first dimension of C as declared */
+/* in the calling (sub) program. LDC must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(trans, "N")) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! upper && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T")) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*k < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldb < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldc < max(1,*n)) {
+ info = 12;
+ }
+ if (info != 0) {
+ xerbla_("CSYR2K", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || (alpha->r == 0.f && alpha->i == 0.f || *k == 0) && (
+ beta->r == 1.f && beta->i == 0.f)) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (alpha->r == 0.f && alpha->i == 0.f) {
+ if (upper) {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form C := alpha*A*B' + alpha*B*A' + C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L90: */
+ }
+ } else if (beta->r != 1.f || beta->i != 0.f) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L100: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ i__4 = j + l * b_dim1;
+ if (a[i__3].r != 0.f || a[i__3].i != 0.f || (b[i__4].r !=
+ 0.f || b[i__4].i != 0.f)) {
+ i__3 = j + l * b_dim1;
+ q__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
+ q__1.i = alpha->r * b[i__3].i + alpha->i * b[
+ i__3].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ i__3 = j + l * a_dim1;
+ q__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ q__1.i = alpha->r * a[i__3].i + alpha->i * a[
+ i__3].r;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ i__3 = j;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ q__3.r = a[i__6].r * temp1.r - a[i__6].i *
+ temp1.i, q__3.i = a[i__6].r * temp1.i + a[
+ i__6].i * temp1.r;
+ q__2.r = c__[i__5].r + q__3.r, q__2.i = c__[i__5]
+ .i + q__3.i;
+ i__7 = i__ + l * b_dim1;
+ q__4.r = b[i__7].r * temp2.r - b[i__7].i *
+ temp2.i, q__4.i = b[i__7].r * temp2.i + b[
+ i__7].i * temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i +
+ q__4.i;
+ c__[i__4].r = q__1.r, c__[i__4].i = q__1.i;
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L140: */
+ }
+ } else if (beta->r != 1.f || beta->i != 0.f) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L150: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ i__4 = j + l * b_dim1;
+ if (a[i__3].r != 0.f || a[i__3].i != 0.f || (b[i__4].r !=
+ 0.f || b[i__4].i != 0.f)) {
+ i__3 = j + l * b_dim1;
+ q__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
+ q__1.i = alpha->r * b[i__3].i + alpha->i * b[
+ i__3].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ i__3 = j + l * a_dim1;
+ q__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ q__1.i = alpha->r * a[i__3].i + alpha->i * a[
+ i__3].r;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ i__3 = *n;
+ for (i__ = j; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ q__3.r = a[i__6].r * temp1.r - a[i__6].i *
+ temp1.i, q__3.i = a[i__6].r * temp1.i + a[
+ i__6].i * temp1.r;
+ q__2.r = c__[i__5].r + q__3.r, q__2.i = c__[i__5]
+ .i + q__3.i;
+ i__7 = i__ + l * b_dim1;
+ q__4.r = b[i__7].r * temp2.r - b[i__7].i *
+ temp2.i, q__4.i = b[i__7].r * temp2.i + b[
+ i__7].i * temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i +
+ q__4.i;
+ c__[i__4].r = q__1.r, c__[i__4].i = q__1.i;
+/* L160: */
+ }
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*A'*B + alpha*B'*A + C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp1.r = 0.f, temp1.i = 0.f;
+ temp2.r = 0.f, temp2.i = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + i__ * a_dim1;
+ i__5 = l + j * b_dim1;
+ q__2.r = a[i__4].r * b[i__5].r - a[i__4].i * b[i__5]
+ .i, q__2.i = a[i__4].r * b[i__5].i + a[i__4]
+ .i * b[i__5].r;
+ q__1.r = temp1.r + q__2.r, q__1.i = temp1.i + q__2.i;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ i__4 = l + i__ * b_dim1;
+ i__5 = l + j * a_dim1;
+ q__2.r = b[i__4].r * a[i__5].r - b[i__4].i * a[i__5]
+ .i, q__2.i = b[i__4].r * a[i__5].i + b[i__4]
+ .i * a[i__5].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+/* L190: */
+ }
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ q__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ q__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ q__3.r = alpha->r * temp2.r - alpha->i * temp2.i,
+ q__3.i = alpha->r * temp2.i + alpha->i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ q__4.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ q__4.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ q__2.r = q__3.r + q__4.r, q__2.i = q__3.i + q__4.i;
+ q__5.r = alpha->r * temp2.r - alpha->i * temp2.i,
+ q__5.i = alpha->r * temp2.i + alpha->i *
+ temp2.r;
+ q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ temp1.r = 0.f, temp1.i = 0.f;
+ temp2.r = 0.f, temp2.i = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + i__ * a_dim1;
+ i__5 = l + j * b_dim1;
+ q__2.r = a[i__4].r * b[i__5].r - a[i__4].i * b[i__5]
+ .i, q__2.i = a[i__4].r * b[i__5].i + a[i__4]
+ .i * b[i__5].r;
+ q__1.r = temp1.r + q__2.r, q__1.i = temp1.i + q__2.i;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ i__4 = l + i__ * b_dim1;
+ i__5 = l + j * a_dim1;
+ q__2.r = b[i__4].r * a[i__5].r - b[i__4].i * a[i__5]
+ .i, q__2.i = b[i__4].r * a[i__5].i + b[i__4]
+ .i * a[i__5].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+/* L220: */
+ }
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ q__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ q__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ q__3.r = alpha->r * temp2.r - alpha->i * temp2.i,
+ q__3.i = alpha->r * temp2.i + alpha->i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ q__4.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ q__4.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ q__2.r = q__3.r + q__4.r, q__2.i = q__3.i + q__4.i;
+ q__5.r = alpha->r * temp2.r - alpha->i * temp2.i,
+ q__5.i = alpha->r * temp2.i + alpha->i *
+ temp2.r;
+ q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+/* L230: */
+ }
+/* L240: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CSYR2K. */
+
+} /* csyr2k_ */
diff --git a/contrib/libs/cblas/csyrk.c b/contrib/libs/cblas/csyrk.c
new file mode 100644
index 0000000000..fdc8692957
--- /dev/null
+++ b/contrib/libs/cblas/csyrk.c
@@ -0,0 +1,457 @@
+/* csyrk.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int csyrk_(char *uplo, char *trans, integer *n, integer *k,
+ complex *alpha, complex *a, integer *lda, complex *beta, complex *c__,
+ integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6;
+ complex q__1, q__2, q__3;
+
+ /* Local variables */
+ integer i__, j, l, info;
+ complex temp;
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSYRK performs one of the symmetric rank k operations */
+
+/* C := alpha*A*A' + beta*C, */
+
+/* or */
+
+/* C := alpha*A'*A + beta*C, */
+
+/* where alpha and beta are scalars, C is an n by n symmetric matrix */
+/* and A is an n by k matrix in the first case and a k by n matrix */
+/* in the second case. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array C is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of C */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of C */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' C := alpha*A*A' + beta*C. */
+
+/* TRANS = 'T' or 't' C := alpha*A'*A + beta*C. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix C. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry with TRANS = 'N' or 'n', K specifies the number */
+/* of columns of the matrix A, and on entry with */
+/* TRANS = 'T' or 't', K specifies the number of rows of the */
+/* matrix A. K must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX array of DIMENSION ( LDA, ka ), where ka is */
+/* k when TRANS = 'N' or 'n', and is n otherwise. */
+/* Before entry with TRANS = 'N' or 'n', the leading n by k */
+/* part of the array A must contain the matrix A, otherwise */
+/* the leading k by n part of the array A must contain the */
+/* matrix A. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When TRANS = 'N' or 'n' */
+/* then LDA must be at least max( 1, n ), otherwise LDA must */
+/* be at least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* BETA - COMPLEX . */
+/* On entry, BETA specifies the scalar beta. */
+/* Unchanged on exit. */
+
+/* C - COMPLEX array of DIMENSION ( LDC, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array C must contain the upper */
+/* triangular part of the symmetric matrix and the strictly */
+/* lower triangular part of C is not referenced. On exit, the */
+/* upper triangular part of the array C is overwritten by the */
+/* upper triangular part of the updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array C must contain the lower */
+/* triangular part of the symmetric matrix and the strictly */
+/* upper triangular part of C is not referenced. On exit, the */
+/* lower triangular part of the array C is overwritten by the */
+/* lower triangular part of the updated matrix. */
+
+/* LDC - INTEGER. */
+/* On entry, LDC specifies the first dimension of C as declared */
+/* in the calling (sub) program. LDC must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(trans, "N")) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! upper && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T")) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*k < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldc < max(1,*n)) {
+ info = 10;
+ }
+ if (info != 0) {
+ xerbla_("CSYRK ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || (alpha->r == 0.f && alpha->i == 0.f || *k == 0) && (
+ beta->r == 1.f && beta->i == 0.f)) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (alpha->r == 0.f && alpha->i == 0.f) {
+ if (upper) {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form C := alpha*A*A' + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L90: */
+ }
+ } else if (beta->r != 1.f || beta->i != 0.f) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L100: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ if (a[i__3].r != 0.f || a[i__3].i != 0.f) {
+ i__3 = j + l * a_dim1;
+ q__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ q__1.i = alpha->r * a[i__3].i + alpha->i * a[
+ i__3].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__3 = j;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ q__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ q__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ q__1.r = c__[i__5].r + q__2.r, q__1.i = c__[i__5]
+ .i + q__2.i;
+ c__[i__4].r = q__1.r, c__[i__4].i = q__1.i;
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L140: */
+ }
+ } else if (beta->r != 1.f || beta->i != 0.f) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L150: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ if (a[i__3].r != 0.f || a[i__3].i != 0.f) {
+ i__3 = j + l * a_dim1;
+ q__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ q__1.i = alpha->r * a[i__3].i + alpha->i * a[
+ i__3].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__3 = *n;
+ for (i__ = j; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ q__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ q__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ q__1.r = c__[i__5].r + q__2.r, q__1.i = c__[i__5]
+ .i + q__2.i;
+ c__[i__4].r = q__1.r, c__[i__4].i = q__1.i;
+/* L160: */
+ }
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*A'*A + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0.f, temp.i = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + i__ * a_dim1;
+ i__5 = l + j * a_dim1;
+ q__2.r = a[i__4].r * a[i__5].r - a[i__4].i * a[i__5]
+ .i, q__2.i = a[i__4].r * a[i__5].i + a[i__4]
+ .i * a[i__5].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L190: */
+ }
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ q__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ q__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ temp.r = 0.f, temp.i = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + i__ * a_dim1;
+ i__5 = l + j * a_dim1;
+ q__2.r = a[i__4].r * a[i__5].r - a[i__4].i * a[i__5]
+ .i, q__2.i = a[i__4].r * a[i__5].i + a[i__4]
+ .i * a[i__5].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L220: */
+ }
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ q__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ q__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+/* L230: */
+ }
+/* L240: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CSYRK . */
+
+} /* csyrk_ */
diff --git a/contrib/libs/cblas/ctbmv.c b/contrib/libs/cblas/ctbmv.c
new file mode 100644
index 0000000000..cbaf63578b
--- /dev/null
+++ b/contrib/libs/cblas/ctbmv.c
@@ -0,0 +1,641 @@
+/* ctbmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ctbmv_(char *uplo, char *trans, char *diag, integer *n,
+ integer *k, complex *a, integer *lda, complex *x, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, l, ix, jx, kx, info;
+ complex temp;
+ extern logical lsame_(char *, char *);
+ integer kplus1;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical noconj, nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTBMV performs one of the matrix-vector operations */
+
+/* x := A*x, or x := A'*x, or x := conjg( A' )*x, */
+
+/* where x is an n element vector and A is an n by n unit, or non-unit, */
+/* upper or lower triangular band matrix, with ( k + 1 ) diagonals. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' x := A*x. */
+
+/* TRANS = 'T' or 't' x := A'*x. */
+
+/* TRANS = 'C' or 'c' x := conjg( A' )*x. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry with UPLO = 'U' or 'u', K specifies the number of */
+/* super-diagonals of the matrix A. */
+/* On entry with UPLO = 'L' or 'l', K specifies the number of */
+/* sub-diagonals of the matrix A. */
+/* K must satisfy 0 .le. K. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) */
+/* by n part of the array A must contain the upper triangular */
+/* band part of the matrix of coefficients, supplied column by */
+/* column, with the leading diagonal of the matrix in row */
+/* ( k + 1 ) of the array, the first super-diagonal starting at */
+/* position 2 in row k, and so on. The top left k by k triangle */
+/* of the array A is not referenced. */
+/* The following program segment will transfer an upper */
+/* triangular band matrix from conventional full matrix storage */
+/* to band storage: */
+
+/* DO 20, J = 1, N */
+/* M = K + 1 - J */
+/* DO 10, I = MAX( 1, J - K ), J */
+/* A( M + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) */
+/* by n part of the array A must contain the lower triangular */
+/* band part of the matrix of coefficients, supplied column by */
+/* column, with the leading diagonal of the matrix in row 1 of */
+/* the array, the first sub-diagonal starting at position 1 in */
+/* row 2, and so on. The bottom right k by k triangle of the */
+/* array A is not referenced. */
+/* The following program segment will transfer a lower */
+/* triangular band matrix from conventional full matrix storage */
+/* to band storage: */
+
+/* DO 20, J = 1, N */
+/* M = 1 - J */
+/* DO 10, I = J, MIN( N, J + K ) */
+/* A( M + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Note that when DIAG = 'U' or 'u' the elements of the array A */
+/* corresponding to the diagonal elements of the matrix are not */
+/* referenced, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* ( k + 1 ). */
+/* Unchanged on exit. */
+
+/* X - COMPLEX array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. On exit, X is overwritten with the */
+/* tranformed vector x. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*k < 0) {
+ info = 5;
+ } else if (*lda < *k + 1) {
+ info = 7;
+ } else if (*incx == 0) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("CTBMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ noconj = lsame_(trans, "T");
+ nounit = lsame_(diag, "N");
+
+/* Set up the start point in X if the increment is not unity. This */
+/* will be ( N - 1 )*INCX too small for descending loops. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through A. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := A*x. */
+
+ if (lsame_(uplo, "U")) {
+ kplus1 = *k + 1;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ l = kplus1 - j;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__4 = j - 1;
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ i__5 = l + i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ q__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ q__1.r = x[i__3].r + q__2.r, q__1.i = x[i__3].i +
+ q__2.i;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+/* L10: */
+ }
+ if (nounit) {
+ i__4 = j;
+ i__2 = j;
+ i__3 = kplus1 + j * a_dim1;
+ q__1.r = x[i__2].r * a[i__3].r - x[i__2].i * a[
+ i__3].i, q__1.i = x[i__2].r * a[i__3].i +
+ x[i__2].i * a[i__3].r;
+ x[i__4].r = q__1.r, x[i__4].i = q__1.i;
+ }
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__4 = jx;
+ if (x[i__4].r != 0.f || x[i__4].i != 0.f) {
+ i__4 = jx;
+ temp.r = x[i__4].r, temp.i = x[i__4].i;
+ ix = kx;
+ l = kplus1 - j;
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *k;
+ i__3 = j - 1;
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ i__4 = ix;
+ i__2 = ix;
+ i__5 = l + i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ q__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ q__1.r = x[i__2].r + q__2.r, q__1.i = x[i__2].i +
+ q__2.i;
+ x[i__4].r = q__1.r, x[i__4].i = q__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ if (nounit) {
+ i__3 = jx;
+ i__4 = jx;
+ i__2 = kplus1 + j * a_dim1;
+ q__1.r = x[i__4].r * a[i__2].r - x[i__4].i * a[
+ i__2].i, q__1.i = x[i__4].r * a[i__2].i +
+ x[i__4].i * a[i__2].r;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ }
+ }
+ jx += *incx;
+ if (j > *k) {
+ kx += *incx;
+ }
+/* L40: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ if (x[i__1].r != 0.f || x[i__1].i != 0.f) {
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ l = 1 - j;
+/* Computing MIN */
+ i__1 = *n, i__3 = j + *k;
+ i__4 = j + 1;
+ for (i__ = min(i__1,i__3); i__ >= i__4; --i__) {
+ i__1 = i__;
+ i__3 = i__;
+ i__2 = l + i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
+ q__2.i = temp.r * a[i__2].i + temp.i * a[
+ i__2].r;
+ q__1.r = x[i__3].r + q__2.r, q__1.i = x[i__3].i +
+ q__2.i;
+ x[i__1].r = q__1.r, x[i__1].i = q__1.i;
+/* L50: */
+ }
+ if (nounit) {
+ i__4 = j;
+ i__1 = j;
+ i__3 = j * a_dim1 + 1;
+ q__1.r = x[i__1].r * a[i__3].r - x[i__1].i * a[
+ i__3].i, q__1.i = x[i__1].r * a[i__3].i +
+ x[i__1].i * a[i__3].r;
+ x[i__4].r = q__1.r, x[i__4].i = q__1.i;
+ }
+ }
+/* L60: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ i__4 = jx;
+ if (x[i__4].r != 0.f || x[i__4].i != 0.f) {
+ i__4 = jx;
+ temp.r = x[i__4].r, temp.i = x[i__4].i;
+ ix = kx;
+ l = 1 - j;
+/* Computing MIN */
+ i__4 = *n, i__1 = j + *k;
+ i__3 = j + 1;
+ for (i__ = min(i__4,i__1); i__ >= i__3; --i__) {
+ i__4 = ix;
+ i__1 = ix;
+ i__2 = l + i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
+ q__2.i = temp.r * a[i__2].i + temp.i * a[
+ i__2].r;
+ q__1.r = x[i__1].r + q__2.r, q__1.i = x[i__1].i +
+ q__2.i;
+ x[i__4].r = q__1.r, x[i__4].i = q__1.i;
+ ix -= *incx;
+/* L70: */
+ }
+ if (nounit) {
+ i__3 = jx;
+ i__4 = jx;
+ i__1 = j * a_dim1 + 1;
+ q__1.r = x[i__4].r * a[i__1].r - x[i__4].i * a[
+ i__1].i, q__1.i = x[i__4].r * a[i__1].i +
+ x[i__4].i * a[i__1].r;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ }
+ }
+ jx -= *incx;
+ if (*n - j >= *k) {
+ kx -= *incx;
+ }
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := A'*x or x := conjg( A' )*x. */
+
+ if (lsame_(uplo, "U")) {
+ kplus1 = *k + 1;
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__3 = j;
+ temp.r = x[i__3].r, temp.i = x[i__3].i;
+ l = kplus1 - j;
+ if (noconj) {
+ if (nounit) {
+ i__3 = kplus1 + j * a_dim1;
+ q__1.r = temp.r * a[i__3].r - temp.i * a[i__3].i,
+ q__1.i = temp.r * a[i__3].i + temp.i * a[
+ i__3].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+/* Computing MAX */
+ i__4 = 1, i__1 = j - *k;
+ i__3 = max(i__4,i__1);
+ for (i__ = j - 1; i__ >= i__3; --i__) {
+ i__4 = l + i__ + j * a_dim1;
+ i__1 = i__;
+ q__2.r = a[i__4].r * x[i__1].r - a[i__4].i * x[
+ i__1].i, q__2.i = a[i__4].r * x[i__1].i +
+ a[i__4].i * x[i__1].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L90: */
+ }
+ } else {
+ if (nounit) {
+ r_cnjg(&q__2, &a[kplus1 + j * a_dim1]);
+ q__1.r = temp.r * q__2.r - temp.i * q__2.i,
+ q__1.i = temp.r * q__2.i + temp.i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+/* Computing MAX */
+ i__4 = 1, i__1 = j - *k;
+ i__3 = max(i__4,i__1);
+ for (i__ = j - 1; i__ >= i__3; --i__) {
+ r_cnjg(&q__3, &a[l + i__ + j * a_dim1]);
+ i__4 = i__;
+ q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i,
+ q__2.i = q__3.r * x[i__4].i + q__3.i * x[
+ i__4].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L100: */
+ }
+ }
+ i__3 = j;
+ x[i__3].r = temp.r, x[i__3].i = temp.i;
+/* L110: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ i__3 = jx;
+ temp.r = x[i__3].r, temp.i = x[i__3].i;
+ kx -= *incx;
+ ix = kx;
+ l = kplus1 - j;
+ if (noconj) {
+ if (nounit) {
+ i__3 = kplus1 + j * a_dim1;
+ q__1.r = temp.r * a[i__3].r - temp.i * a[i__3].i,
+ q__1.i = temp.r * a[i__3].i + temp.i * a[
+ i__3].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+/* Computing MAX */
+ i__4 = 1, i__1 = j - *k;
+ i__3 = max(i__4,i__1);
+ for (i__ = j - 1; i__ >= i__3; --i__) {
+ i__4 = l + i__ + j * a_dim1;
+ i__1 = ix;
+ q__2.r = a[i__4].r * x[i__1].r - a[i__4].i * x[
+ i__1].i, q__2.i = a[i__4].r * x[i__1].i +
+ a[i__4].i * x[i__1].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix -= *incx;
+/* L120: */
+ }
+ } else {
+ if (nounit) {
+ r_cnjg(&q__2, &a[kplus1 + j * a_dim1]);
+ q__1.r = temp.r * q__2.r - temp.i * q__2.i,
+ q__1.i = temp.r * q__2.i + temp.i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+/* Computing MAX */
+ i__4 = 1, i__1 = j - *k;
+ i__3 = max(i__4,i__1);
+ for (i__ = j - 1; i__ >= i__3; --i__) {
+ r_cnjg(&q__3, &a[l + i__ + j * a_dim1]);
+ i__4 = ix;
+ q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i,
+ q__2.i = q__3.r * x[i__4].i + q__3.i * x[
+ i__4].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix -= *incx;
+/* L130: */
+ }
+ }
+ i__3 = jx;
+ x[i__3].r = temp.r, x[i__3].i = temp.i;
+ jx -= *incx;
+/* L140: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ i__3 = *n;
+ for (j = 1; j <= i__3; ++j) {
+ i__4 = j;
+ temp.r = x[i__4].r, temp.i = x[i__4].i;
+ l = 1 - j;
+ if (noconj) {
+ if (nounit) {
+ i__4 = j * a_dim1 + 1;
+ q__1.r = temp.r * a[i__4].r - temp.i * a[i__4].i,
+ q__1.i = temp.r * a[i__4].i + temp.i * a[
+ i__4].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+/* Computing MIN */
+ i__1 = *n, i__2 = j + *k;
+ i__4 = min(i__1,i__2);
+ for (i__ = j + 1; i__ <= i__4; ++i__) {
+ i__1 = l + i__ + j * a_dim1;
+ i__2 = i__;
+ q__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[
+ i__2].i, q__2.i = a[i__1].r * x[i__2].i +
+ a[i__1].i * x[i__2].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L150: */
+ }
+ } else {
+ if (nounit) {
+ r_cnjg(&q__2, &a[j * a_dim1 + 1]);
+ q__1.r = temp.r * q__2.r - temp.i * q__2.i,
+ q__1.i = temp.r * q__2.i + temp.i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+/* Computing MIN */
+ i__1 = *n, i__2 = j + *k;
+ i__4 = min(i__1,i__2);
+ for (i__ = j + 1; i__ <= i__4; ++i__) {
+ r_cnjg(&q__3, &a[l + i__ + j * a_dim1]);
+ i__1 = i__;
+ q__2.r = q__3.r * x[i__1].r - q__3.i * x[i__1].i,
+ q__2.i = q__3.r * x[i__1].i + q__3.i * x[
+ i__1].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L160: */
+ }
+ }
+ i__4 = j;
+ x[i__4].r = temp.r, x[i__4].i = temp.i;
+/* L170: */
+ }
+ } else {
+ jx = kx;
+ i__3 = *n;
+ for (j = 1; j <= i__3; ++j) {
+ i__4 = jx;
+ temp.r = x[i__4].r, temp.i = x[i__4].i;
+ kx += *incx;
+ ix = kx;
+ l = 1 - j;
+ if (noconj) {
+ if (nounit) {
+ i__4 = j * a_dim1 + 1;
+ q__1.r = temp.r * a[i__4].r - temp.i * a[i__4].i,
+ q__1.i = temp.r * a[i__4].i + temp.i * a[
+ i__4].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+/* Computing MIN */
+ i__1 = *n, i__2 = j + *k;
+ i__4 = min(i__1,i__2);
+ for (i__ = j + 1; i__ <= i__4; ++i__) {
+ i__1 = l + i__ + j * a_dim1;
+ i__2 = ix;
+ q__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[
+ i__2].i, q__2.i = a[i__1].r * x[i__2].i +
+ a[i__1].i * x[i__2].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix += *incx;
+/* L180: */
+ }
+ } else {
+ if (nounit) {
+ r_cnjg(&q__2, &a[j * a_dim1 + 1]);
+ q__1.r = temp.r * q__2.r - temp.i * q__2.i,
+ q__1.i = temp.r * q__2.i + temp.i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+/* Computing MIN */
+ i__1 = *n, i__2 = j + *k;
+ i__4 = min(i__1,i__2);
+ for (i__ = j + 1; i__ <= i__4; ++i__) {
+ r_cnjg(&q__3, &a[l + i__ + j * a_dim1]);
+ i__1 = ix;
+ q__2.r = q__3.r * x[i__1].r - q__3.i * x[i__1].i,
+ q__2.i = q__3.r * x[i__1].i + q__3.i * x[
+ i__1].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix += *incx;
+/* L190: */
+ }
+ }
+ i__4 = jx;
+ x[i__4].r = temp.r, x[i__4].i = temp.i;
+ jx += *incx;
+/* L200: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CTBMV . */
+
+} /* ctbmv_ */
diff --git a/contrib/libs/cblas/ctbsv.c b/contrib/libs/cblas/ctbsv.c
new file mode 100644
index 0000000000..0f47dfba02
--- /dev/null
+++ b/contrib/libs/cblas/ctbsv.c
@@ -0,0 +1,609 @@
+/* ctbsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ctbsv_(char *uplo, char *trans, char *diag, integer *n,
+ integer *k, complex *a, integer *lda, complex *x, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void c_div(complex *, complex *, complex *), r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, l, ix, jx, kx, info;
+ complex temp;
+ extern logical lsame_(char *, char *);
+ integer kplus1;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical noconj, nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTBSV solves one of the systems of equations */
+
+/* A*x = b, or A'*x = b, or conjg( A' )*x = b, */
+
+/* where b and x are n element vectors and A is an n by n unit, or */
+/* non-unit, upper or lower triangular band matrix, with ( k + 1 ) */
+/* diagonals. */
+
+/* No test for singularity or near-singularity is included in this */
+/* routine. Such tests must be performed before calling this routine. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the equations to be solved as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' A*x = b. */
+
+/* TRANS = 'T' or 't' A'*x = b. */
+
+/* TRANS = 'C' or 'c' conjg( A' )*x = b. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry with UPLO = 'U' or 'u', K specifies the number of */
+/* super-diagonals of the matrix A. */
+/* On entry with UPLO = 'L' or 'l', K specifies the number of */
+/* sub-diagonals of the matrix A. */
+/* K must satisfy 0 .le. K. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) */
+/* by n part of the array A must contain the upper triangular */
+/* band part of the matrix of coefficients, supplied column by */
+/* column, with the leading diagonal of the matrix in row */
+/* ( k + 1 ) of the array, the first super-diagonal starting at */
+/* position 2 in row k, and so on. The top left k by k triangle */
+/* of the array A is not referenced. */
+/* The following program segment will transfer an upper */
+/* triangular band matrix from conventional full matrix storage */
+/* to band storage: */
+
+/* DO 20, J = 1, N */
+/* M = K + 1 - J */
+/* DO 10, I = MAX( 1, J - K ), J */
+/* A( M + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) */
+/* by n part of the array A must contain the lower triangular */
+/* band part of the matrix of coefficients, supplied column by */
+/* column, with the leading diagonal of the matrix in row 1 of */
+/* the array, the first sub-diagonal starting at position 1 in */
+/* row 2, and so on. The bottom right k by k triangle of the */
+/* array A is not referenced. */
+/* The following program segment will transfer a lower */
+/* triangular band matrix from conventional full matrix storage */
+/* to band storage: */
+
+/* DO 20, J = 1, N */
+/* M = 1 - J */
+/* DO 10, I = J, MIN( N, J + K ) */
+/* A( M + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Note that when DIAG = 'U' or 'u' the elements of the array A */
+/* corresponding to the diagonal elements of the matrix are not */
+/* referenced, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* ( k + 1 ). */
+/* Unchanged on exit. */
+
+/* X - COMPLEX array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element right-hand side vector b. On exit, X is overwritten */
+/* with the solution vector x. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*k < 0) {
+ info = 5;
+ } else if (*lda < *k + 1) {
+ info = 7;
+ } else if (*incx == 0) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("CTBSV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ noconj = lsame_(trans, "T");
+ nounit = lsame_(diag, "N");
+
+/* Set up the start point in X if the increment is not unity. This */
+/* will be ( N - 1 )*INCX too small for descending loops. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed by sequentially with one pass through A. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := inv( A )*x. */
+
+ if (lsame_(uplo, "U")) {
+ kplus1 = *k + 1;
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ if (x[i__1].r != 0.f || x[i__1].i != 0.f) {
+ l = kplus1 - j;
+ if (nounit) {
+ i__1 = j;
+ c_div(&q__1, &x[j], &a[kplus1 + j * a_dim1]);
+ x[i__1].r = q__1.r, x[i__1].i = q__1.i;
+ }
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__1 = max(i__2,i__3);
+ for (i__ = j - 1; i__ >= i__1; --i__) {
+ i__2 = i__;
+ i__3 = i__;
+ i__4 = l + i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__4].r - temp.i * a[i__4].i,
+ q__2.i = temp.r * a[i__4].i + temp.i * a[
+ i__4].r;
+ q__1.r = x[i__3].r - q__2.r, q__1.i = x[i__3].i -
+ q__2.i;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ kx -= *incx;
+ i__1 = jx;
+ if (x[i__1].r != 0.f || x[i__1].i != 0.f) {
+ ix = kx;
+ l = kplus1 - j;
+ if (nounit) {
+ i__1 = jx;
+ c_div(&q__1, &x[jx], &a[kplus1 + j * a_dim1]);
+ x[i__1].r = q__1.r, x[i__1].i = q__1.i;
+ }
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__1 = max(i__2,i__3);
+ for (i__ = j - 1; i__ >= i__1; --i__) {
+ i__2 = ix;
+ i__3 = ix;
+ i__4 = l + i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__4].r - temp.i * a[i__4].i,
+ q__2.i = temp.r * a[i__4].i + temp.i * a[
+ i__4].r;
+ q__1.r = x[i__3].r - q__2.r, q__1.i = x[i__3].i -
+ q__2.i;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ ix -= *incx;
+/* L30: */
+ }
+ }
+ jx -= *incx;
+/* L40: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ l = 1 - j;
+ if (nounit) {
+ i__2 = j;
+ c_div(&q__1, &x[j], &a[j * a_dim1 + 1]);
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ }
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+/* Computing MIN */
+ i__3 = *n, i__4 = j + *k;
+ i__2 = min(i__3,i__4);
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = l + i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ q__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ q__1.r = x[i__4].r - q__2.r, q__1.i = x[i__4].i -
+ q__2.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ kx += *incx;
+ i__2 = jx;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ ix = kx;
+ l = 1 - j;
+ if (nounit) {
+ i__2 = jx;
+ c_div(&q__1, &x[jx], &a[j * a_dim1 + 1]);
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ }
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+/* Computing MIN */
+ i__3 = *n, i__4 = j + *k;
+ i__2 = min(i__3,i__4);
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = ix;
+ i__4 = ix;
+ i__5 = l + i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ q__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ q__1.r = x[i__4].r - q__2.r, q__1.i = x[i__4].i -
+ q__2.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ ix += *incx;
+/* L70: */
+ }
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := inv( A' )*x or x := inv( conjg( A') )*x. */
+
+ if (lsame_(uplo, "U")) {
+ kplus1 = *k + 1;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ l = kplus1 - j;
+ if (noconj) {
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__4 = j - 1;
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ i__2 = l + i__ + j * a_dim1;
+ i__3 = i__;
+ q__2.r = a[i__2].r * x[i__3].r - a[i__2].i * x[
+ i__3].i, q__2.i = a[i__2].r * x[i__3].i +
+ a[i__2].i * x[i__3].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L90: */
+ }
+ if (nounit) {
+ c_div(&q__1, &temp, &a[kplus1 + j * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ } else {
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *k;
+ i__3 = j - 1;
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ r_cnjg(&q__3, &a[l + i__ + j * a_dim1]);
+ i__4 = i__;
+ q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i,
+ q__2.i = q__3.r * x[i__4].i + q__3.i * x[
+ i__4].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L100: */
+ }
+ if (nounit) {
+ r_cnjg(&q__2, &a[kplus1 + j * a_dim1]);
+ c_div(&q__1, &temp, &q__2);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ }
+ i__3 = j;
+ x[i__3].r = temp.r, x[i__3].i = temp.i;
+/* L110: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__3 = jx;
+ temp.r = x[i__3].r, temp.i = x[i__3].i;
+ ix = kx;
+ l = kplus1 - j;
+ if (noconj) {
+/* Computing MAX */
+ i__3 = 1, i__4 = j - *k;
+ i__2 = j - 1;
+ for (i__ = max(i__3,i__4); i__ <= i__2; ++i__) {
+ i__3 = l + i__ + j * a_dim1;
+ i__4 = ix;
+ q__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
+ i__4].i, q__2.i = a[i__3].r * x[i__4].i +
+ a[i__3].i * x[i__4].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix += *incx;
+/* L120: */
+ }
+ if (nounit) {
+ c_div(&q__1, &temp, &a[kplus1 + j * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ } else {
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__4 = j - 1;
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ r_cnjg(&q__3, &a[l + i__ + j * a_dim1]);
+ i__2 = ix;
+ q__2.r = q__3.r * x[i__2].r - q__3.i * x[i__2].i,
+ q__2.i = q__3.r * x[i__2].i + q__3.i * x[
+ i__2].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix += *incx;
+/* L130: */
+ }
+ if (nounit) {
+ r_cnjg(&q__2, &a[kplus1 + j * a_dim1]);
+ c_div(&q__1, &temp, &q__2);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ }
+ i__4 = jx;
+ x[i__4].r = temp.r, x[i__4].i = temp.i;
+ jx += *incx;
+ if (j > *k) {
+ kx += *incx;
+ }
+/* L140: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ l = 1 - j;
+ if (noconj) {
+/* Computing MIN */
+ i__1 = *n, i__4 = j + *k;
+ i__2 = j + 1;
+ for (i__ = min(i__1,i__4); i__ >= i__2; --i__) {
+ i__1 = l + i__ + j * a_dim1;
+ i__4 = i__;
+ q__2.r = a[i__1].r * x[i__4].r - a[i__1].i * x[
+ i__4].i, q__2.i = a[i__1].r * x[i__4].i +
+ a[i__1].i * x[i__4].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L150: */
+ }
+ if (nounit) {
+ c_div(&q__1, &temp, &a[j * a_dim1 + 1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ } else {
+/* Computing MIN */
+ i__2 = *n, i__1 = j + *k;
+ i__4 = j + 1;
+ for (i__ = min(i__2,i__1); i__ >= i__4; --i__) {
+ r_cnjg(&q__3, &a[l + i__ + j * a_dim1]);
+ i__2 = i__;
+ q__2.r = q__3.r * x[i__2].r - q__3.i * x[i__2].i,
+ q__2.i = q__3.r * x[i__2].i + q__3.i * x[
+ i__2].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L160: */
+ }
+ if (nounit) {
+ r_cnjg(&q__2, &a[j * a_dim1 + 1]);
+ c_div(&q__1, &temp, &q__2);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ }
+ i__4 = j;
+ x[i__4].r = temp.r, x[i__4].i = temp.i;
+/* L170: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ i__4 = jx;
+ temp.r = x[i__4].r, temp.i = x[i__4].i;
+ ix = kx;
+ l = 1 - j;
+ if (noconj) {
+/* Computing MIN */
+ i__4 = *n, i__2 = j + *k;
+ i__1 = j + 1;
+ for (i__ = min(i__4,i__2); i__ >= i__1; --i__) {
+ i__4 = l + i__ + j * a_dim1;
+ i__2 = ix;
+ q__2.r = a[i__4].r * x[i__2].r - a[i__4].i * x[
+ i__2].i, q__2.i = a[i__4].r * x[i__2].i +
+ a[i__4].i * x[i__2].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix -= *incx;
+/* L180: */
+ }
+ if (nounit) {
+ c_div(&q__1, &temp, &a[j * a_dim1 + 1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ } else {
+/* Computing MIN */
+ i__1 = *n, i__4 = j + *k;
+ i__2 = j + 1;
+ for (i__ = min(i__1,i__4); i__ >= i__2; --i__) {
+ r_cnjg(&q__3, &a[l + i__ + j * a_dim1]);
+ i__1 = ix;
+ q__2.r = q__3.r * x[i__1].r - q__3.i * x[i__1].i,
+ q__2.i = q__3.r * x[i__1].i + q__3.i * x[
+ i__1].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix -= *incx;
+/* L190: */
+ }
+ if (nounit) {
+ r_cnjg(&q__2, &a[j * a_dim1 + 1]);
+ c_div(&q__1, &temp, &q__2);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ }
+ i__2 = jx;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+ jx -= *incx;
+ if (*n - j >= *k) {
+ kx -= *incx;
+ }
+/* L200: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CTBSV . */
+
+} /* ctbsv_ */
diff --git a/contrib/libs/cblas/ctpmv.c b/contrib/libs/cblas/ctpmv.c
new file mode 100644
index 0000000000..1a28e95444
--- /dev/null
+++ b/contrib/libs/cblas/ctpmv.c
@@ -0,0 +1,571 @@
+/* ctpmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ctpmv_(char *uplo, char *trans, char *diag, integer *n,
+ complex *ap, complex *x, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4, i__5;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, k, kk, ix, jx, kx, info;
+ complex temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical noconj, nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTPMV performs one of the matrix-vector operations */
+
+/* x := A*x, or x := A'*x, or x := conjg( A' )*x, */
+
+/* where x is an n element vector and A is an n by n unit, or non-unit, */
+/* upper or lower triangular matrix, supplied in packed form. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' x := A*x. */
+
+/* TRANS = 'T' or 't' x := A'*x. */
+
+/* TRANS = 'C' or 'c' x := conjg( A' )*x. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* AP - COMPLEX array of DIMENSION at least */
+/* ( ( n*( n + 1 ) )/2 ). */
+/* Before entry with UPLO = 'U' or 'u', the array AP must */
+/* contain the upper triangular matrix packed sequentially, */
+/* column by column, so that AP( 1 ) contains a( 1, 1 ), */
+/* AP( 2 ) and AP( 3 ) contain a( 1, 2 ) and a( 2, 2 ) */
+/* respectively, and so on. */
+/* Before entry with UPLO = 'L' or 'l', the array AP must */
+/* contain the lower triangular matrix packed sequentially, */
+/* column by column, so that AP( 1 ) contains a( 1, 1 ), */
+/* AP( 2 ) and AP( 3 ) contain a( 2, 1 ) and a( 3, 1 ) */
+/* respectively, and so on. */
+/* Note that when DIAG = 'U' or 'u', the diagonal elements of */
+/* A are not referenced, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* X - COMPLEX array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. On exit, X is overwritten with the */
+/* tranformed vector x. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --x;
+ --ap;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*incx == 0) {
+ info = 7;
+ }
+ if (info != 0) {
+ xerbla_("CTPMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ noconj = lsame_(trans, "T");
+ nounit = lsame_(diag, "N");
+
+/* Set up the start point in X if the increment is not unity. This */
+/* will be ( N - 1 )*INCX too small for descending loops. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of AP are */
+/* accessed sequentially with one pass through AP. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form x:= A*x. */
+
+ if (lsame_(uplo, "U")) {
+ kk = 1;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ k = kk;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = k;
+ q__2.r = temp.r * ap[i__5].r - temp.i * ap[i__5]
+ .i, q__2.i = temp.r * ap[i__5].i + temp.i
+ * ap[i__5].r;
+ q__1.r = x[i__4].r + q__2.r, q__1.i = x[i__4].i +
+ q__2.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ ++k;
+/* L10: */
+ }
+ if (nounit) {
+ i__2 = j;
+ i__3 = j;
+ i__4 = kk + j - 1;
+ q__1.r = x[i__3].r * ap[i__4].r - x[i__3].i * ap[
+ i__4].i, q__1.i = x[i__3].r * ap[i__4].i
+ + x[i__3].i * ap[i__4].r;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ }
+ }
+ kk += j;
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ ix = kx;
+ i__2 = kk + j - 2;
+ for (k = kk; k <= i__2; ++k) {
+ i__3 = ix;
+ i__4 = ix;
+ i__5 = k;
+ q__2.r = temp.r * ap[i__5].r - temp.i * ap[i__5]
+ .i, q__2.i = temp.r * ap[i__5].i + temp.i
+ * ap[i__5].r;
+ q__1.r = x[i__4].r + q__2.r, q__1.i = x[i__4].i +
+ q__2.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ if (nounit) {
+ i__2 = jx;
+ i__3 = jx;
+ i__4 = kk + j - 1;
+ q__1.r = x[i__3].r * ap[i__4].r - x[i__3].i * ap[
+ i__4].i, q__1.i = x[i__3].r * ap[i__4].i
+ + x[i__3].i * ap[i__4].r;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ }
+ }
+ jx += *incx;
+ kk += j;
+/* L40: */
+ }
+ }
+ } else {
+ kk = *n * (*n + 1) / 2;
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ if (x[i__1].r != 0.f || x[i__1].i != 0.f) {
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ k = kk;
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ i__2 = i__;
+ i__3 = i__;
+ i__4 = k;
+ q__2.r = temp.r * ap[i__4].r - temp.i * ap[i__4]
+ .i, q__2.i = temp.r * ap[i__4].i + temp.i
+ * ap[i__4].r;
+ q__1.r = x[i__3].r + q__2.r, q__1.i = x[i__3].i +
+ q__2.i;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ --k;
+/* L50: */
+ }
+ if (nounit) {
+ i__1 = j;
+ i__2 = j;
+ i__3 = kk - *n + j;
+ q__1.r = x[i__2].r * ap[i__3].r - x[i__2].i * ap[
+ i__3].i, q__1.i = x[i__2].r * ap[i__3].i
+ + x[i__2].i * ap[i__3].r;
+ x[i__1].r = q__1.r, x[i__1].i = q__1.i;
+ }
+ }
+ kk -= *n - j + 1;
+/* L60: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ i__1 = jx;
+ if (x[i__1].r != 0.f || x[i__1].i != 0.f) {
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ ix = kx;
+ i__1 = kk - (*n - (j + 1));
+ for (k = kk; k >= i__1; --k) {
+ i__2 = ix;
+ i__3 = ix;
+ i__4 = k;
+ q__2.r = temp.r * ap[i__4].r - temp.i * ap[i__4]
+ .i, q__2.i = temp.r * ap[i__4].i + temp.i
+ * ap[i__4].r;
+ q__1.r = x[i__3].r + q__2.r, q__1.i = x[i__3].i +
+ q__2.i;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ ix -= *incx;
+/* L70: */
+ }
+ if (nounit) {
+ i__1 = jx;
+ i__2 = jx;
+ i__3 = kk - *n + j;
+ q__1.r = x[i__2].r * ap[i__3].r - x[i__2].i * ap[
+ i__3].i, q__1.i = x[i__2].r * ap[i__3].i
+ + x[i__2].i * ap[i__3].r;
+ x[i__1].r = q__1.r, x[i__1].i = q__1.i;
+ }
+ }
+ jx -= *incx;
+ kk -= *n - j + 1;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := A'*x or x := conjg( A' )*x. */
+
+ if (lsame_(uplo, "U")) {
+ kk = *n * (*n + 1) / 2;
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ k = kk - 1;
+ if (noconj) {
+ if (nounit) {
+ i__1 = kk;
+ q__1.r = temp.r * ap[i__1].r - temp.i * ap[i__1]
+ .i, q__1.i = temp.r * ap[i__1].i + temp.i
+ * ap[i__1].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ i__1 = k;
+ i__2 = i__;
+ q__2.r = ap[i__1].r * x[i__2].r - ap[i__1].i * x[
+ i__2].i, q__2.i = ap[i__1].r * x[i__2].i
+ + ap[i__1].i * x[i__2].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ --k;
+/* L90: */
+ }
+ } else {
+ if (nounit) {
+ r_cnjg(&q__2, &ap[kk]);
+ q__1.r = temp.r * q__2.r - temp.i * q__2.i,
+ q__1.i = temp.r * q__2.i + temp.i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ r_cnjg(&q__3, &ap[k]);
+ i__1 = i__;
+ q__2.r = q__3.r * x[i__1].r - q__3.i * x[i__1].i,
+ q__2.i = q__3.r * x[i__1].i + q__3.i * x[
+ i__1].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ --k;
+/* L100: */
+ }
+ }
+ i__1 = j;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+ kk -= j;
+/* L110: */
+ }
+ } else {
+ jx = kx + (*n - 1) * *incx;
+ for (j = *n; j >= 1; --j) {
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ ix = jx;
+ if (noconj) {
+ if (nounit) {
+ i__1 = kk;
+ q__1.r = temp.r * ap[i__1].r - temp.i * ap[i__1]
+ .i, q__1.i = temp.r * ap[i__1].i + temp.i
+ * ap[i__1].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__1 = kk - j + 1;
+ for (k = kk - 1; k >= i__1; --k) {
+ ix -= *incx;
+ i__2 = k;
+ i__3 = ix;
+ q__2.r = ap[i__2].r * x[i__3].r - ap[i__2].i * x[
+ i__3].i, q__2.i = ap[i__2].r * x[i__3].i
+ + ap[i__2].i * x[i__3].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L120: */
+ }
+ } else {
+ if (nounit) {
+ r_cnjg(&q__2, &ap[kk]);
+ q__1.r = temp.r * q__2.r - temp.i * q__2.i,
+ q__1.i = temp.r * q__2.i + temp.i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__1 = kk - j + 1;
+ for (k = kk - 1; k >= i__1; --k) {
+ ix -= *incx;
+ r_cnjg(&q__3, &ap[k]);
+ i__2 = ix;
+ q__2.r = q__3.r * x[i__2].r - q__3.i * x[i__2].i,
+ q__2.i = q__3.r * x[i__2].i + q__3.i * x[
+ i__2].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L130: */
+ }
+ }
+ i__1 = jx;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+ jx -= *incx;
+ kk -= j;
+/* L140: */
+ }
+ }
+ } else {
+ kk = 1;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ k = kk + 1;
+ if (noconj) {
+ if (nounit) {
+ i__2 = kk;
+ q__1.r = temp.r * ap[i__2].r - temp.i * ap[i__2]
+ .i, q__1.i = temp.r * ap[i__2].i + temp.i
+ * ap[i__2].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = k;
+ i__4 = i__;
+ q__2.r = ap[i__3].r * x[i__4].r - ap[i__3].i * x[
+ i__4].i, q__2.i = ap[i__3].r * x[i__4].i
+ + ap[i__3].i * x[i__4].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ++k;
+/* L150: */
+ }
+ } else {
+ if (nounit) {
+ r_cnjg(&q__2, &ap[kk]);
+ q__1.r = temp.r * q__2.r - temp.i * q__2.i,
+ q__1.i = temp.r * q__2.i + temp.i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ r_cnjg(&q__3, &ap[k]);
+ i__3 = i__;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i,
+ q__2.i = q__3.r * x[i__3].i + q__3.i * x[
+ i__3].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ++k;
+/* L160: */
+ }
+ }
+ i__2 = j;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+ kk += *n - j + 1;
+/* L170: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ ix = jx;
+ if (noconj) {
+ if (nounit) {
+ i__2 = kk;
+ q__1.r = temp.r * ap[i__2].r - temp.i * ap[i__2]
+ .i, q__1.i = temp.r * ap[i__2].i + temp.i
+ * ap[i__2].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__2 = kk + *n - j;
+ for (k = kk + 1; k <= i__2; ++k) {
+ ix += *incx;
+ i__3 = k;
+ i__4 = ix;
+ q__2.r = ap[i__3].r * x[i__4].r - ap[i__3].i * x[
+ i__4].i, q__2.i = ap[i__3].r * x[i__4].i
+ + ap[i__3].i * x[i__4].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L180: */
+ }
+ } else {
+ if (nounit) {
+ r_cnjg(&q__2, &ap[kk]);
+ q__1.r = temp.r * q__2.r - temp.i * q__2.i,
+ q__1.i = temp.r * q__2.i + temp.i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__2 = kk + *n - j;
+ for (k = kk + 1; k <= i__2; ++k) {
+ ix += *incx;
+ r_cnjg(&q__3, &ap[k]);
+ i__3 = ix;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i,
+ q__2.i = q__3.r * x[i__3].i + q__3.i * x[
+ i__3].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L190: */
+ }
+ }
+ i__2 = jx;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+ jx += *incx;
+ kk += *n - j + 1;
+/* L200: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CTPMV . */
+
+} /* ctpmv_ */
diff --git a/contrib/libs/cblas/ctpsv.c b/contrib/libs/cblas/ctpsv.c
new file mode 100644
index 0000000000..2e810f674c
--- /dev/null
+++ b/contrib/libs/cblas/ctpsv.c
@@ -0,0 +1,539 @@
+/* ctpsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ctpsv_(char *uplo, char *trans, char *diag, integer *n,
+ complex *ap, complex *x, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4, i__5;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void c_div(complex *, complex *, complex *), r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, k, kk, ix, jx, kx, info;
+ complex temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical noconj, nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTPSV solves one of the systems of equations */
+
+/* A*x = b, or A'*x = b, or conjg( A' )*x = b, */
+
+/* where b and x are n element vectors and A is an n by n unit, or */
+/* non-unit, upper or lower triangular matrix, supplied in packed form. */
+
+/* No test for singularity or near-singularity is included in this */
+/* routine. Such tests must be performed before calling this routine. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the equations to be solved as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' A*x = b. */
+
+/* TRANS = 'T' or 't' A'*x = b. */
+
+/* TRANS = 'C' or 'c' conjg( A' )*x = b. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* AP - COMPLEX array of DIMENSION at least */
+/* ( ( n*( n + 1 ) )/2 ). */
+/* Before entry with UPLO = 'U' or 'u', the array AP must */
+/* contain the upper triangular matrix packed sequentially, */
+/* column by column, so that AP( 1 ) contains a( 1, 1 ), */
+/* AP( 2 ) and AP( 3 ) contain a( 1, 2 ) and a( 2, 2 ) */
+/* respectively, and so on. */
+/* Before entry with UPLO = 'L' or 'l', the array AP must */
+/* contain the lower triangular matrix packed sequentially, */
+/* column by column, so that AP( 1 ) contains a( 1, 1 ), */
+/* AP( 2 ) and AP( 3 ) contain a( 2, 1 ) and a( 3, 1 ) */
+/* respectively, and so on. */
+/* Note that when DIAG = 'U' or 'u', the diagonal elements of */
+/* A are not referenced, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* X - COMPLEX array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element right-hand side vector b. On exit, X is overwritten */
+/* with the solution vector x. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --x;
+ --ap;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*incx == 0) {
+ info = 7;
+ }
+ if (info != 0) {
+ xerbla_("CTPSV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ noconj = lsame_(trans, "T");
+ nounit = lsame_(diag, "N");
+
+/* Set up the start point in X if the increment is not unity. This */
+/* will be ( N - 1 )*INCX too small for descending loops. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of AP are */
+/* accessed sequentially with one pass through AP. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := inv( A )*x. */
+
+ if (lsame_(uplo, "U")) {
+ kk = *n * (*n + 1) / 2;
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ if (x[i__1].r != 0.f || x[i__1].i != 0.f) {
+ if (nounit) {
+ i__1 = j;
+ c_div(&q__1, &x[j], &ap[kk]);
+ x[i__1].r = q__1.r, x[i__1].i = q__1.i;
+ }
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ k = kk - 1;
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ i__1 = i__;
+ i__2 = i__;
+ i__3 = k;
+ q__2.r = temp.r * ap[i__3].r - temp.i * ap[i__3]
+ .i, q__2.i = temp.r * ap[i__3].i + temp.i
+ * ap[i__3].r;
+ q__1.r = x[i__2].r - q__2.r, q__1.i = x[i__2].i -
+ q__2.i;
+ x[i__1].r = q__1.r, x[i__1].i = q__1.i;
+ --k;
+/* L10: */
+ }
+ }
+ kk -= j;
+/* L20: */
+ }
+ } else {
+ jx = kx + (*n - 1) * *incx;
+ for (j = *n; j >= 1; --j) {
+ i__1 = jx;
+ if (x[i__1].r != 0.f || x[i__1].i != 0.f) {
+ if (nounit) {
+ i__1 = jx;
+ c_div(&q__1, &x[jx], &ap[kk]);
+ x[i__1].r = q__1.r, x[i__1].i = q__1.i;
+ }
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ ix = jx;
+ i__1 = kk - j + 1;
+ for (k = kk - 1; k >= i__1; --k) {
+ ix -= *incx;
+ i__2 = ix;
+ i__3 = ix;
+ i__4 = k;
+ q__2.r = temp.r * ap[i__4].r - temp.i * ap[i__4]
+ .i, q__2.i = temp.r * ap[i__4].i + temp.i
+ * ap[i__4].r;
+ q__1.r = x[i__3].r - q__2.r, q__1.i = x[i__3].i -
+ q__2.i;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+/* L30: */
+ }
+ }
+ jx -= *incx;
+ kk -= j;
+/* L40: */
+ }
+ }
+ } else {
+ kk = 1;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ if (nounit) {
+ i__2 = j;
+ c_div(&q__1, &x[j], &ap[kk]);
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ }
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ k = kk + 1;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = k;
+ q__2.r = temp.r * ap[i__5].r - temp.i * ap[i__5]
+ .i, q__2.i = temp.r * ap[i__5].i + temp.i
+ * ap[i__5].r;
+ q__1.r = x[i__4].r - q__2.r, q__1.i = x[i__4].i -
+ q__2.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ ++k;
+/* L50: */
+ }
+ }
+ kk += *n - j + 1;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ if (nounit) {
+ i__2 = jx;
+ c_div(&q__1, &x[jx], &ap[kk]);
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ }
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ ix = jx;
+ i__2 = kk + *n - j;
+ for (k = kk + 1; k <= i__2; ++k) {
+ ix += *incx;
+ i__3 = ix;
+ i__4 = ix;
+ i__5 = k;
+ q__2.r = temp.r * ap[i__5].r - temp.i * ap[i__5]
+ .i, q__2.i = temp.r * ap[i__5].i + temp.i
+ * ap[i__5].r;
+ q__1.r = x[i__4].r - q__2.r, q__1.i = x[i__4].i -
+ q__2.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+/* L70: */
+ }
+ }
+ jx += *incx;
+ kk += *n - j + 1;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := inv( A' )*x or x := inv( conjg( A' ) )*x. */
+
+ if (lsame_(uplo, "U")) {
+ kk = 1;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ k = kk;
+ if (noconj) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = k;
+ i__4 = i__;
+ q__2.r = ap[i__3].r * x[i__4].r - ap[i__3].i * x[
+ i__4].i, q__2.i = ap[i__3].r * x[i__4].i
+ + ap[i__3].i * x[i__4].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ++k;
+/* L90: */
+ }
+ if (nounit) {
+ c_div(&q__1, &temp, &ap[kk + j - 1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ } else {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ r_cnjg(&q__3, &ap[k]);
+ i__3 = i__;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i,
+ q__2.i = q__3.r * x[i__3].i + q__3.i * x[
+ i__3].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ++k;
+/* L100: */
+ }
+ if (nounit) {
+ r_cnjg(&q__2, &ap[kk + j - 1]);
+ c_div(&q__1, &temp, &q__2);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ }
+ i__2 = j;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+ kk += j;
+/* L110: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ ix = kx;
+ if (noconj) {
+ i__2 = kk + j - 2;
+ for (k = kk; k <= i__2; ++k) {
+ i__3 = k;
+ i__4 = ix;
+ q__2.r = ap[i__3].r * x[i__4].r - ap[i__3].i * x[
+ i__4].i, q__2.i = ap[i__3].r * x[i__4].i
+ + ap[i__3].i * x[i__4].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix += *incx;
+/* L120: */
+ }
+ if (nounit) {
+ c_div(&q__1, &temp, &ap[kk + j - 1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ } else {
+ i__2 = kk + j - 2;
+ for (k = kk; k <= i__2; ++k) {
+ r_cnjg(&q__3, &ap[k]);
+ i__3 = ix;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i,
+ q__2.i = q__3.r * x[i__3].i + q__3.i * x[
+ i__3].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix += *incx;
+/* L130: */
+ }
+ if (nounit) {
+ r_cnjg(&q__2, &ap[kk + j - 1]);
+ c_div(&q__1, &temp, &q__2);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ }
+ i__2 = jx;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+ jx += *incx;
+ kk += j;
+/* L140: */
+ }
+ }
+ } else {
+ kk = *n * (*n + 1) / 2;
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ k = kk;
+ if (noconj) {
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ i__2 = k;
+ i__3 = i__;
+ q__2.r = ap[i__2].r * x[i__3].r - ap[i__2].i * x[
+ i__3].i, q__2.i = ap[i__2].r * x[i__3].i
+ + ap[i__2].i * x[i__3].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ --k;
+/* L150: */
+ }
+ if (nounit) {
+ c_div(&q__1, &temp, &ap[kk - *n + j]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ } else {
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ r_cnjg(&q__3, &ap[k]);
+ i__2 = i__;
+ q__2.r = q__3.r * x[i__2].r - q__3.i * x[i__2].i,
+ q__2.i = q__3.r * x[i__2].i + q__3.i * x[
+ i__2].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ --k;
+/* L160: */
+ }
+ if (nounit) {
+ r_cnjg(&q__2, &ap[kk - *n + j]);
+ c_div(&q__1, &temp, &q__2);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ }
+ i__1 = j;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+ kk -= *n - j + 1;
+/* L170: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ ix = kx;
+ if (noconj) {
+ i__1 = kk - (*n - (j + 1));
+ for (k = kk; k >= i__1; --k) {
+ i__2 = k;
+ i__3 = ix;
+ q__2.r = ap[i__2].r * x[i__3].r - ap[i__2].i * x[
+ i__3].i, q__2.i = ap[i__2].r * x[i__3].i
+ + ap[i__2].i * x[i__3].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix -= *incx;
+/* L180: */
+ }
+ if (nounit) {
+ c_div(&q__1, &temp, &ap[kk - *n + j]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ } else {
+ i__1 = kk - (*n - (j + 1));
+ for (k = kk; k >= i__1; --k) {
+ r_cnjg(&q__3, &ap[k]);
+ i__2 = ix;
+ q__2.r = q__3.r * x[i__2].r - q__3.i * x[i__2].i,
+ q__2.i = q__3.r * x[i__2].i + q__3.i * x[
+ i__2].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix -= *incx;
+/* L190: */
+ }
+ if (nounit) {
+ r_cnjg(&q__2, &ap[kk - *n + j]);
+ c_div(&q__1, &temp, &q__2);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ }
+ i__1 = jx;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+ jx -= *incx;
+ kk -= *n - j + 1;
+/* L200: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CTPSV . */
+
+} /* ctpsv_ */
diff --git a/contrib/libs/cblas/ctrmm.c b/contrib/libs/cblas/ctrmm.c
new file mode 100644
index 0000000000..2b1f231ed6
--- /dev/null
+++ b/contrib/libs/cblas/ctrmm.c
@@ -0,0 +1,688 @@
+/* ctrmm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ctrmm_(char *side, char *uplo, char *transa, char *diag,
+ integer *m, integer *n, complex *alpha, complex *a, integer *lda,
+ complex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, k, info;
+ complex temp;
+ extern logical lsame_(char *, char *);
+ logical lside;
+ integer nrowa;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical noconj, nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTRMM performs one of the matrix-matrix operations */
+
+/* B := alpha*op( A )*B, or B := alpha*B*op( A ) */
+
+/* where alpha is a scalar, B is an m by n matrix, A is a unit, or */
+/* non-unit, upper or lower triangular matrix and op( A ) is one of */
+
+/* op( A ) = A or op( A ) = A' or op( A ) = conjg( A' ). */
+
+/* Arguments */
+/* ========== */
+
+/* SIDE - CHARACTER*1. */
+/* On entry, SIDE specifies whether op( A ) multiplies B from */
+/* the left or right as follows: */
+
+/* SIDE = 'L' or 'l' B := alpha*op( A )*B. */
+
+/* SIDE = 'R' or 'r' B := alpha*B*op( A ). */
+
+/* Unchanged on exit. */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix A is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANSA - CHARACTER*1. */
+/* On entry, TRANSA specifies the form of op( A ) to be used in */
+/* the matrix multiplication as follows: */
+
+/* TRANSA = 'N' or 'n' op( A ) = A. */
+
+/* TRANSA = 'T' or 't' op( A ) = A'. */
+
+/* TRANSA = 'C' or 'c' op( A ) = conjg( A' ). */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit triangular */
+/* as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of B. M must be at */
+/* least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of B. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX . */
+/* On entry, ALPHA specifies the scalar alpha. When alpha is */
+/* zero then A is not referenced and B need not be set before */
+/* entry. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX array of DIMENSION ( LDA, k ), where k is m */
+/* when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'. */
+/* Before entry with UPLO = 'U' or 'u', the leading k by k */
+/* upper triangular part of the array A must contain the upper */
+/* triangular matrix and the strictly lower triangular part of */
+/* A is not referenced. */
+/* Before entry with UPLO = 'L' or 'l', the leading k by k */
+/* lower triangular part of the array A must contain the lower */
+/* triangular matrix and the strictly upper triangular part of */
+/* A is not referenced. */
+/* Note that when DIAG = 'U' or 'u', the diagonal elements of */
+/* A are not referenced either, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When SIDE = 'L' or 'l' then */
+/* LDA must be at least max( 1, m ), when SIDE = 'R' or 'r' */
+/* then LDA must be at least max( 1, n ). */
+/* Unchanged on exit. */
+
+/* B - COMPLEX array of DIMENSION ( LDB, n ). */
+/* Before entry, the leading m by n part of the array B must */
+/* contain the matrix B, and on exit is overwritten by the */
+/* transformed matrix. */
+
+/* LDB - INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. LDB must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ lside = lsame_(side, "L");
+ if (lside) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ noconj = lsame_(transa, "T");
+ nounit = lsame_(diag, "N");
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! lside && ! lsame_(side, "R")) {
+ info = 1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ info = 2;
+ } else if (! lsame_(transa, "N") && ! lsame_(transa,
+ "T") && ! lsame_(transa, "C")) {
+ info = 3;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 4;
+ } else if (*m < 0) {
+ info = 5;
+ } else if (*n < 0) {
+ info = 6;
+ } else if (*lda < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldb < max(1,*m)) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("CTRMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (alpha->r == 0.f && alpha->i == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0.f, b[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lside) {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*A*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * b_dim1;
+ if (b[i__3].r != 0.f || b[i__3].i != 0.f) {
+ i__3 = k + j * b_dim1;
+ q__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3]
+ .i, q__1.i = alpha->r * b[i__3].i +
+ alpha->i * b[i__3].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = i__ + k * a_dim1;
+ q__2.r = temp.r * a[i__6].r - temp.i * a[i__6]
+ .i, q__2.i = temp.r * a[i__6].i +
+ temp.i * a[i__6].r;
+ q__1.r = b[i__5].r + q__2.r, q__1.i = b[i__5]
+ .i + q__2.i;
+ b[i__4].r = q__1.r, b[i__4].i = q__1.i;
+/* L30: */
+ }
+ if (nounit) {
+ i__3 = k + k * a_dim1;
+ q__1.r = temp.r * a[i__3].r - temp.i * a[i__3]
+ .i, q__1.i = temp.r * a[i__3].i +
+ temp.i * a[i__3].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__3 = k + j * b_dim1;
+ b[i__3].r = temp.r, b[i__3].i = temp.i;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (k = *m; k >= 1; --k) {
+ i__2 = k + j * b_dim1;
+ if (b[i__2].r != 0.f || b[i__2].i != 0.f) {
+ i__2 = k + j * b_dim1;
+ q__1.r = alpha->r * b[i__2].r - alpha->i * b[i__2]
+ .i, q__1.i = alpha->r * b[i__2].i +
+ alpha->i * b[i__2].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = k + j * b_dim1;
+ b[i__2].r = temp.r, b[i__2].i = temp.i;
+ if (nounit) {
+ i__2 = k + j * b_dim1;
+ i__3 = k + j * b_dim1;
+ i__4 = k + k * a_dim1;
+ q__1.r = b[i__3].r * a[i__4].r - b[i__3].i *
+ a[i__4].i, q__1.i = b[i__3].r * a[
+ i__4].i + b[i__3].i * a[i__4].r;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ }
+ i__2 = *m;
+ for (i__ = k + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + k * a_dim1;
+ q__2.r = temp.r * a[i__5].r - temp.i * a[i__5]
+ .i, q__2.i = temp.r * a[i__5].i +
+ temp.i * a[i__5].r;
+ q__1.r = b[i__4].r + q__2.r, q__1.i = b[i__4]
+ .i + q__2.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L60: */
+ }
+ }
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*A'*B or B := alpha*conjg( A' )*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ i__2 = i__ + j * b_dim1;
+ temp.r = b[i__2].r, temp.i = b[i__2].i;
+ if (noconj) {
+ if (nounit) {
+ i__2 = i__ + i__ * a_dim1;
+ q__1.r = temp.r * a[i__2].r - temp.i * a[i__2]
+ .i, q__1.i = temp.r * a[i__2].i +
+ temp.i * a[i__2].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__2 = i__ - 1;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + i__ * a_dim1;
+ i__4 = k + j * b_dim1;
+ q__2.r = a[i__3].r * b[i__4].r - a[i__3].i *
+ b[i__4].i, q__2.i = a[i__3].r * b[
+ i__4].i + a[i__3].i * b[i__4].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L90: */
+ }
+ } else {
+ if (nounit) {
+ r_cnjg(&q__2, &a[i__ + i__ * a_dim1]);
+ q__1.r = temp.r * q__2.r - temp.i * q__2.i,
+ q__1.i = temp.r * q__2.i + temp.i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__2 = i__ - 1;
+ for (k = 1; k <= i__2; ++k) {
+ r_cnjg(&q__3, &a[k + i__ * a_dim1]);
+ i__3 = k + j * b_dim1;
+ q__2.r = q__3.r * b[i__3].r - q__3.i * b[i__3]
+ .i, q__2.i = q__3.r * b[i__3].i +
+ q__3.i * b[i__3].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L100: */
+ }
+ }
+ i__2 = i__ + j * b_dim1;
+ q__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L110: */
+ }
+/* L120: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ temp.r = b[i__3].r, temp.i = b[i__3].i;
+ if (noconj) {
+ if (nounit) {
+ i__3 = i__ + i__ * a_dim1;
+ q__1.r = temp.r * a[i__3].r - temp.i * a[i__3]
+ .i, q__1.i = temp.r * a[i__3].i +
+ temp.i * a[i__3].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__3 = *m;
+ for (k = i__ + 1; k <= i__3; ++k) {
+ i__4 = k + i__ * a_dim1;
+ i__5 = k + j * b_dim1;
+ q__2.r = a[i__4].r * b[i__5].r - a[i__4].i *
+ b[i__5].i, q__2.i = a[i__4].r * b[
+ i__5].i + a[i__4].i * b[i__5].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L130: */
+ }
+ } else {
+ if (nounit) {
+ r_cnjg(&q__2, &a[i__ + i__ * a_dim1]);
+ q__1.r = temp.r * q__2.r - temp.i * q__2.i,
+ q__1.i = temp.r * q__2.i + temp.i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__3 = *m;
+ for (k = i__ + 1; k <= i__3; ++k) {
+ r_cnjg(&q__3, &a[k + i__ * a_dim1]);
+ i__4 = k + j * b_dim1;
+ q__2.r = q__3.r * b[i__4].r - q__3.i * b[i__4]
+ .i, q__2.i = q__3.r * b[i__4].i +
+ q__3.i * b[i__4].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L140: */
+ }
+ }
+ i__3 = i__ + j * b_dim1;
+ q__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L150: */
+ }
+/* L160: */
+ }
+ }
+ }
+ } else {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*B*A. */
+
+ if (upper) {
+ for (j = *n; j >= 1; --j) {
+ temp.r = alpha->r, temp.i = alpha->i;
+ if (nounit) {
+ i__1 = j + j * a_dim1;
+ q__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i,
+ q__1.i = temp.r * a[i__1].i + temp.i * a[i__1]
+ .r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ q__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i,
+ q__1.i = temp.r * b[i__3].i + temp.i * b[i__3]
+ .r;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L170: */
+ }
+ i__1 = j - 1;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k + j * a_dim1;
+ if (a[i__2].r != 0.f || a[i__2].i != 0.f) {
+ i__2 = k + j * a_dim1;
+ q__1.r = alpha->r * a[i__2].r - alpha->i * a[i__2]
+ .i, q__1.i = alpha->r * a[i__2].i +
+ alpha->i * a[i__2].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + k * b_dim1;
+ q__2.r = temp.r * b[i__5].r - temp.i * b[i__5]
+ .i, q__2.i = temp.r * b[i__5].i +
+ temp.i * b[i__5].r;
+ q__1.r = b[i__4].r + q__2.r, q__1.i = b[i__4]
+ .i + q__2.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L180: */
+ }
+ }
+/* L190: */
+ }
+/* L200: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp.r = alpha->r, temp.i = alpha->i;
+ if (nounit) {
+ i__2 = j + j * a_dim1;
+ q__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
+ q__1.i = temp.r * a[i__2].i + temp.i * a[i__2]
+ .r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ q__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i,
+ q__1.i = temp.r * b[i__4].i + temp.i * b[i__4]
+ .r;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L210: */
+ }
+ i__2 = *n;
+ for (k = j + 1; k <= i__2; ++k) {
+ i__3 = k + j * a_dim1;
+ if (a[i__3].r != 0.f || a[i__3].i != 0.f) {
+ i__3 = k + j * a_dim1;
+ q__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3]
+ .i, q__1.i = alpha->r * a[i__3].i +
+ alpha->i * a[i__3].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = i__ + k * b_dim1;
+ q__2.r = temp.r * b[i__6].r - temp.i * b[i__6]
+ .i, q__2.i = temp.r * b[i__6].i +
+ temp.i * b[i__6].r;
+ q__1.r = b[i__5].r + q__2.r, q__1.i = b[i__5]
+ .i + q__2.i;
+ b[i__4].r = q__1.r, b[i__4].i = q__1.i;
+/* L220: */
+ }
+ }
+/* L230: */
+ }
+/* L240: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*B*A' or B := alpha*B*conjg( A' ). */
+
+ if (upper) {
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k - 1;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = j + k * a_dim1;
+ if (a[i__3].r != 0.f || a[i__3].i != 0.f) {
+ if (noconj) {
+ i__3 = j + k * a_dim1;
+ q__1.r = alpha->r * a[i__3].r - alpha->i * a[
+ i__3].i, q__1.i = alpha->r * a[i__3]
+ .i + alpha->i * a[i__3].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ } else {
+ r_cnjg(&q__2, &a[j + k * a_dim1]);
+ q__1.r = alpha->r * q__2.r - alpha->i *
+ q__2.i, q__1.i = alpha->r * q__2.i +
+ alpha->i * q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = i__ + k * b_dim1;
+ q__2.r = temp.r * b[i__6].r - temp.i * b[i__6]
+ .i, q__2.i = temp.r * b[i__6].i +
+ temp.i * b[i__6].r;
+ q__1.r = b[i__5].r + q__2.r, q__1.i = b[i__5]
+ .i + q__2.i;
+ b[i__4].r = q__1.r, b[i__4].i = q__1.i;
+/* L250: */
+ }
+ }
+/* L260: */
+ }
+ temp.r = alpha->r, temp.i = alpha->i;
+ if (nounit) {
+ if (noconj) {
+ i__2 = k + k * a_dim1;
+ q__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
+ q__1.i = temp.r * a[i__2].i + temp.i * a[
+ i__2].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ } else {
+ r_cnjg(&q__2, &a[k + k * a_dim1]);
+ q__1.r = temp.r * q__2.r - temp.i * q__2.i,
+ q__1.i = temp.r * q__2.i + temp.i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ }
+ if (temp.r != 1.f || temp.i != 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + k * b_dim1;
+ i__4 = i__ + k * b_dim1;
+ q__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i,
+ q__1.i = temp.r * b[i__4].i + temp.i * b[
+ i__4].r;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L270: */
+ }
+ }
+/* L280: */
+ }
+ } else {
+ for (k = *n; k >= 1; --k) {
+ i__1 = *n;
+ for (j = k + 1; j <= i__1; ++j) {
+ i__2 = j + k * a_dim1;
+ if (a[i__2].r != 0.f || a[i__2].i != 0.f) {
+ if (noconj) {
+ i__2 = j + k * a_dim1;
+ q__1.r = alpha->r * a[i__2].r - alpha->i * a[
+ i__2].i, q__1.i = alpha->r * a[i__2]
+ .i + alpha->i * a[i__2].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ } else {
+ r_cnjg(&q__2, &a[j + k * a_dim1]);
+ q__1.r = alpha->r * q__2.r - alpha->i *
+ q__2.i, q__1.i = alpha->r * q__2.i +
+ alpha->i * q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + k * b_dim1;
+ q__2.r = temp.r * b[i__5].r - temp.i * b[i__5]
+ .i, q__2.i = temp.r * b[i__5].i +
+ temp.i * b[i__5].r;
+ q__1.r = b[i__4].r + q__2.r, q__1.i = b[i__4]
+ .i + q__2.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L290: */
+ }
+ }
+/* L300: */
+ }
+ temp.r = alpha->r, temp.i = alpha->i;
+ if (nounit) {
+ if (noconj) {
+ i__1 = k + k * a_dim1;
+ q__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i,
+ q__1.i = temp.r * a[i__1].i + temp.i * a[
+ i__1].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ } else {
+ r_cnjg(&q__2, &a[k + k * a_dim1]);
+ q__1.r = temp.r * q__2.r - temp.i * q__2.i,
+ q__1.i = temp.r * q__2.i + temp.i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ }
+ if (temp.r != 1.f || temp.i != 0.f) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + k * b_dim1;
+ i__3 = i__ + k * b_dim1;
+ q__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i,
+ q__1.i = temp.r * b[i__3].i + temp.i * b[
+ i__3].r;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L310: */
+ }
+ }
+/* L320: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CTRMM . */
+
+} /* ctrmm_ */
diff --git a/contrib/libs/cblas/ctrmv.c b/contrib/libs/cblas/ctrmv.c
new file mode 100644
index 0000000000..9380189d23
--- /dev/null
+++ b/contrib/libs/cblas/ctrmv.c
@@ -0,0 +1,554 @@
+/* ctrmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ctrmv_(char *uplo, char *trans, char *diag, integer *n,
+ complex *a, integer *lda, complex *x, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, ix, jx, kx, info;
+ complex temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical noconj, nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTRMV performs one of the matrix-vector operations */
+
+/* x := A*x, or x := A'*x, or x := conjg( A' )*x, */
+
+/* where x is an n element vector and A is an n by n unit, or non-unit, */
+/* upper or lower triangular matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' x := A*x. */
+
+/* TRANS = 'T' or 't' x := A'*x. */
+
+/* TRANS = 'C' or 'c' x := conjg( A' )*x. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array A must contain the upper */
+/* triangular matrix and the strictly lower triangular part of */
+/* A is not referenced. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array A must contain the lower */
+/* triangular matrix and the strictly upper triangular part of */
+/* A is not referenced. */
+/* Note that when DIAG = 'U' or 'u', the diagonal elements of */
+/* A are not referenced either, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+/* X - COMPLEX array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. On exit, X is overwritten with the */
+/* tranformed vector x. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*lda < max(1,*n)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ }
+ if (info != 0) {
+ xerbla_("CTRMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ noconj = lsame_(trans, "T");
+ nounit = lsame_(diag, "N");
+
+/* Set up the start point in X if the increment is not unity. This */
+/* will be ( N - 1 )*INCX too small for descending loops. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through A. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := A*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ q__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ q__1.r = x[i__4].r + q__2.r, q__1.i = x[i__4].i +
+ q__2.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+/* L10: */
+ }
+ if (nounit) {
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ q__1.r = x[i__3].r * a[i__4].r - x[i__3].i * a[
+ i__4].i, q__1.i = x[i__3].r * a[i__4].i +
+ x[i__3].i * a[i__4].r;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ }
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ ix = kx;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = ix;
+ i__4 = ix;
+ i__5 = i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ q__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ q__1.r = x[i__4].r + q__2.r, q__1.i = x[i__4].i +
+ q__2.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ if (nounit) {
+ i__2 = jx;
+ i__3 = jx;
+ i__4 = j + j * a_dim1;
+ q__1.r = x[i__3].r * a[i__4].r - x[i__3].i * a[
+ i__4].i, q__1.i = x[i__3].r * a[i__4].i +
+ x[i__3].i * a[i__4].r;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ }
+ }
+ jx += *incx;
+/* L40: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ if (x[i__1].r != 0.f || x[i__1].i != 0.f) {
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ i__2 = i__;
+ i__3 = i__;
+ i__4 = i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__4].r - temp.i * a[i__4].i,
+ q__2.i = temp.r * a[i__4].i + temp.i * a[
+ i__4].r;
+ q__1.r = x[i__3].r + q__2.r, q__1.i = x[i__3].i +
+ q__2.i;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+/* L50: */
+ }
+ if (nounit) {
+ i__1 = j;
+ i__2 = j;
+ i__3 = j + j * a_dim1;
+ q__1.r = x[i__2].r * a[i__3].r - x[i__2].i * a[
+ i__3].i, q__1.i = x[i__2].r * a[i__3].i +
+ x[i__2].i * a[i__3].r;
+ x[i__1].r = q__1.r, x[i__1].i = q__1.i;
+ }
+ }
+/* L60: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ i__1 = jx;
+ if (x[i__1].r != 0.f || x[i__1].i != 0.f) {
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ ix = kx;
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ i__2 = ix;
+ i__3 = ix;
+ i__4 = i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__4].r - temp.i * a[i__4].i,
+ q__2.i = temp.r * a[i__4].i + temp.i * a[
+ i__4].r;
+ q__1.r = x[i__3].r + q__2.r, q__1.i = x[i__3].i +
+ q__2.i;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ ix -= *incx;
+/* L70: */
+ }
+ if (nounit) {
+ i__1 = jx;
+ i__2 = jx;
+ i__3 = j + j * a_dim1;
+ q__1.r = x[i__2].r * a[i__3].r - x[i__2].i * a[
+ i__3].i, q__1.i = x[i__2].r * a[i__3].i +
+ x[i__2].i * a[i__3].r;
+ x[i__1].r = q__1.r, x[i__1].i = q__1.i;
+ }
+ }
+ jx -= *incx;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := A'*x or x := conjg( A' )*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ if (noconj) {
+ if (nounit) {
+ i__1 = j + j * a_dim1;
+ q__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i,
+ q__1.i = temp.r * a[i__1].i + temp.i * a[
+ i__1].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ i__1 = i__ + j * a_dim1;
+ i__2 = i__;
+ q__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[
+ i__2].i, q__2.i = a[i__1].r * x[i__2].i +
+ a[i__1].i * x[i__2].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L90: */
+ }
+ } else {
+ if (nounit) {
+ r_cnjg(&q__2, &a[j + j * a_dim1]);
+ q__1.r = temp.r * q__2.r - temp.i * q__2.i,
+ q__1.i = temp.r * q__2.i + temp.i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__1 = i__;
+ q__2.r = q__3.r * x[i__1].r - q__3.i * x[i__1].i,
+ q__2.i = q__3.r * x[i__1].i + q__3.i * x[
+ i__1].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L100: */
+ }
+ }
+ i__1 = j;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+/* L110: */
+ }
+ } else {
+ jx = kx + (*n - 1) * *incx;
+ for (j = *n; j >= 1; --j) {
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ ix = jx;
+ if (noconj) {
+ if (nounit) {
+ i__1 = j + j * a_dim1;
+ q__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i,
+ q__1.i = temp.r * a[i__1].i + temp.i * a[
+ i__1].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ ix -= *incx;
+ i__1 = i__ + j * a_dim1;
+ i__2 = ix;
+ q__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[
+ i__2].i, q__2.i = a[i__1].r * x[i__2].i +
+ a[i__1].i * x[i__2].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L120: */
+ }
+ } else {
+ if (nounit) {
+ r_cnjg(&q__2, &a[j + j * a_dim1]);
+ q__1.r = temp.r * q__2.r - temp.i * q__2.i,
+ q__1.i = temp.r * q__2.i + temp.i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ ix -= *incx;
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__1 = ix;
+ q__2.r = q__3.r * x[i__1].r - q__3.i * x[i__1].i,
+ q__2.i = q__3.r * x[i__1].i + q__3.i * x[
+ i__1].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L130: */
+ }
+ }
+ i__1 = jx;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+ jx -= *incx;
+/* L140: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ if (noconj) {
+ if (nounit) {
+ i__2 = j + j * a_dim1;
+ q__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
+ q__1.i = temp.r * a[i__2].i + temp.i * a[
+ i__2].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__;
+ q__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
+ i__4].i, q__2.i = a[i__3].r * x[i__4].i +
+ a[i__3].i * x[i__4].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L150: */
+ }
+ } else {
+ if (nounit) {
+ r_cnjg(&q__2, &a[j + j * a_dim1]);
+ q__1.r = temp.r * q__2.r - temp.i * q__2.i,
+ q__1.i = temp.r * q__2.i + temp.i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__3 = i__;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i,
+ q__2.i = q__3.r * x[i__3].i + q__3.i * x[
+ i__3].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L160: */
+ }
+ }
+ i__2 = j;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+/* L170: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ ix = jx;
+ if (noconj) {
+ if (nounit) {
+ i__2 = j + j * a_dim1;
+ q__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
+ q__1.i = temp.r * a[i__2].i + temp.i * a[
+ i__2].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ i__3 = i__ + j * a_dim1;
+ i__4 = ix;
+ q__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
+ i__4].i, q__2.i = a[i__3].r * x[i__4].i +
+ a[i__3].i * x[i__4].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L180: */
+ }
+ } else {
+ if (nounit) {
+ r_cnjg(&q__2, &a[j + j * a_dim1]);
+ q__1.r = temp.r * q__2.r - temp.i * q__2.i,
+ q__1.i = temp.r * q__2.i + temp.i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__3 = ix;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i,
+ q__2.i = q__3.r * x[i__3].i + q__3.i * x[
+ i__3].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L190: */
+ }
+ }
+ i__2 = jx;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+ jx += *incx;
+/* L200: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CTRMV . */
+
+} /* ctrmv_ */
diff --git a/contrib/libs/cblas/ctrsm.c b/contrib/libs/cblas/ctrsm.c
new file mode 100644
index 0000000000..ec893f619a
--- /dev/null
+++ b/contrib/libs/cblas/ctrsm.c
@@ -0,0 +1,698 @@
+/* ctrsm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+
+/* Subroutine */ int ctrsm_(char *side, char *uplo, char *transa, char *diag,
+ integer *m, integer *n, complex *alpha, complex *a, integer *lda,
+ complex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6, i__7;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void c_div(complex *, complex *, complex *), r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, k, info;
+ complex temp;
+ extern logical lsame_(char *, char *);
+ logical lside;
+ integer nrowa;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical noconj, nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTRSM solves one of the matrix equations */
+
+/* op( A )*X = alpha*B, or X*op( A ) = alpha*B, */
+
+/* where alpha is a scalar, X and B are m by n matrices, A is a unit, or */
+/* non-unit, upper or lower triangular matrix and op( A ) is one of */
+
+/* op( A ) = A or op( A ) = A' or op( A ) = conjg( A' ). */
+
+/* The matrix X is overwritten on B. */
+
+/* Arguments */
+/* ========== */
+
+/* SIDE - CHARACTER*1. */
+/* On entry, SIDE specifies whether op( A ) appears on the left */
+/* or right of X as follows: */
+
+/* SIDE = 'L' or 'l' op( A )*X = alpha*B. */
+
+/* SIDE = 'R' or 'r' X*op( A ) = alpha*B. */
+
+/* Unchanged on exit. */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix A is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANSA - CHARACTER*1. */
+/* On entry, TRANSA specifies the form of op( A ) to be used in */
+/* the matrix multiplication as follows: */
+
+/* TRANSA = 'N' or 'n' op( A ) = A. */
+
+/* TRANSA = 'T' or 't' op( A ) = A'. */
+
+/* TRANSA = 'C' or 'c' op( A ) = conjg( A' ). */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit triangular */
+/* as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of B. M must be at */
+/* least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of B. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX . */
+/* On entry, ALPHA specifies the scalar alpha. When alpha is */
+/* zero then A is not referenced and B need not be set before */
+/* entry. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX array of DIMENSION ( LDA, k ), where k is m */
+/* when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'. */
+/* Before entry with UPLO = 'U' or 'u', the leading k by k */
+/* upper triangular part of the array A must contain the upper */
+/* triangular matrix and the strictly lower triangular part of */
+/* A is not referenced. */
+/* Before entry with UPLO = 'L' or 'l', the leading k by k */
+/* lower triangular part of the array A must contain the lower */
+/* triangular matrix and the strictly upper triangular part of */
+/* A is not referenced. */
+/* Note that when DIAG = 'U' or 'u', the diagonal elements of */
+/* A are not referenced either, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When SIDE = 'L' or 'l' then */
+/* LDA must be at least max( 1, m ), when SIDE = 'R' or 'r' */
+/* then LDA must be at least max( 1, n ). */
+/* Unchanged on exit. */
+
+/* B - COMPLEX array of DIMENSION ( LDB, n ). */
+/* Before entry, the leading m by n part of the array B must */
+/* contain the right-hand side matrix B, and on exit is */
+/* overwritten by the solution matrix X. */
+
+/* LDB - INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. LDB must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ lside = lsame_(side, "L");
+ if (lside) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ noconj = lsame_(transa, "T");
+ nounit = lsame_(diag, "N");
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! lside && ! lsame_(side, "R")) {
+ info = 1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ info = 2;
+ } else if (! lsame_(transa, "N") && ! lsame_(transa,
+ "T") && ! lsame_(transa, "C")) {
+ info = 3;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 4;
+ } else if (*m < 0) {
+ info = 5;
+ } else if (*n < 0) {
+ info = 6;
+ } else if (*lda < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldb < max(1,*m)) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("CTRSM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (alpha->r == 0.f && alpha->i == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0.f, b[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lside) {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*inv( A )*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (alpha->r != 1.f || alpha->i != 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ q__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4]
+ .i, q__1.i = alpha->r * b[i__4].i +
+ alpha->i * b[i__4].r;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L30: */
+ }
+ }
+ for (k = *m; k >= 1; --k) {
+ i__2 = k + j * b_dim1;
+ if (b[i__2].r != 0.f || b[i__2].i != 0.f) {
+ if (nounit) {
+ i__2 = k + j * b_dim1;
+ c_div(&q__1, &b[k + j * b_dim1], &a[k + k *
+ a_dim1]);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ }
+ i__2 = k - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = k + j * b_dim1;
+ i__6 = i__ + k * a_dim1;
+ q__2.r = b[i__5].r * a[i__6].r - b[i__5].i *
+ a[i__6].i, q__2.i = b[i__5].r * a[
+ i__6].i + b[i__5].i * a[i__6].r;
+ q__1.r = b[i__4].r - q__2.r, q__1.i = b[i__4]
+ .i - q__2.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L40: */
+ }
+ }
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (alpha->r != 1.f || alpha->i != 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ q__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4]
+ .i, q__1.i = alpha->r * b[i__4].i +
+ alpha->i * b[i__4].r;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L70: */
+ }
+ }
+ i__2 = *m;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * b_dim1;
+ if (b[i__3].r != 0.f || b[i__3].i != 0.f) {
+ if (nounit) {
+ i__3 = k + j * b_dim1;
+ c_div(&q__1, &b[k + j * b_dim1], &a[k + k *
+ a_dim1]);
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+ }
+ i__3 = *m;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = k + j * b_dim1;
+ i__7 = i__ + k * a_dim1;
+ q__2.r = b[i__6].r * a[i__7].r - b[i__6].i *
+ a[i__7].i, q__2.i = b[i__6].r * a[
+ i__7].i + b[i__6].i * a[i__7].r;
+ q__1.r = b[i__5].r - q__2.r, q__1.i = b[i__5]
+ .i - q__2.i;
+ b[i__4].r = q__1.r, b[i__4].i = q__1.i;
+/* L80: */
+ }
+ }
+/* L90: */
+ }
+/* L100: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*inv( A' )*B */
+/* or B := alpha*inv( conjg( A' ) )*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ q__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
+ q__1.i = alpha->r * b[i__3].i + alpha->i * b[
+ i__3].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ if (noconj) {
+ i__3 = i__ - 1;
+ for (k = 1; k <= i__3; ++k) {
+ i__4 = k + i__ * a_dim1;
+ i__5 = k + j * b_dim1;
+ q__2.r = a[i__4].r * b[i__5].r - a[i__4].i *
+ b[i__5].i, q__2.i = a[i__4].r * b[
+ i__5].i + a[i__4].i * b[i__5].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L110: */
+ }
+ if (nounit) {
+ c_div(&q__1, &temp, &a[i__ + i__ * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ } else {
+ i__3 = i__ - 1;
+ for (k = 1; k <= i__3; ++k) {
+ r_cnjg(&q__3, &a[k + i__ * a_dim1]);
+ i__4 = k + j * b_dim1;
+ q__2.r = q__3.r * b[i__4].r - q__3.i * b[i__4]
+ .i, q__2.i = q__3.r * b[i__4].i +
+ q__3.i * b[i__4].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L120: */
+ }
+ if (nounit) {
+ r_cnjg(&q__2, &a[i__ + i__ * a_dim1]);
+ c_div(&q__1, &temp, &q__2);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ }
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = temp.r, b[i__3].i = temp.i;
+/* L130: */
+ }
+/* L140: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ i__2 = i__ + j * b_dim1;
+ q__1.r = alpha->r * b[i__2].r - alpha->i * b[i__2].i,
+ q__1.i = alpha->r * b[i__2].i + alpha->i * b[
+ i__2].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ if (noconj) {
+ i__2 = *m;
+ for (k = i__ + 1; k <= i__2; ++k) {
+ i__3 = k + i__ * a_dim1;
+ i__4 = k + j * b_dim1;
+ q__2.r = a[i__3].r * b[i__4].r - a[i__3].i *
+ b[i__4].i, q__2.i = a[i__3].r * b[
+ i__4].i + a[i__3].i * b[i__4].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L150: */
+ }
+ if (nounit) {
+ c_div(&q__1, &temp, &a[i__ + i__ * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ } else {
+ i__2 = *m;
+ for (k = i__ + 1; k <= i__2; ++k) {
+ r_cnjg(&q__3, &a[k + i__ * a_dim1]);
+ i__3 = k + j * b_dim1;
+ q__2.r = q__3.r * b[i__3].r - q__3.i * b[i__3]
+ .i, q__2.i = q__3.r * b[i__3].i +
+ q__3.i * b[i__3].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L160: */
+ }
+ if (nounit) {
+ r_cnjg(&q__2, &a[i__ + i__ * a_dim1]);
+ c_div(&q__1, &temp, &q__2);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ }
+ i__2 = i__ + j * b_dim1;
+ b[i__2].r = temp.r, b[i__2].i = temp.i;
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ }
+ } else {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*B*inv( A ). */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (alpha->r != 1.f || alpha->i != 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ q__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4]
+ .i, q__1.i = alpha->r * b[i__4].i +
+ alpha->i * b[i__4].r;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L190: */
+ }
+ }
+ i__2 = j - 1;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * a_dim1;
+ if (a[i__3].r != 0.f || a[i__3].i != 0.f) {
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = k + j * a_dim1;
+ i__7 = i__ + k * b_dim1;
+ q__2.r = a[i__6].r * b[i__7].r - a[i__6].i *
+ b[i__7].i, q__2.i = a[i__6].r * b[
+ i__7].i + a[i__6].i * b[i__7].r;
+ q__1.r = b[i__5].r - q__2.r, q__1.i = b[i__5]
+ .i - q__2.i;
+ b[i__4].r = q__1.r, b[i__4].i = q__1.i;
+/* L200: */
+ }
+ }
+/* L210: */
+ }
+ if (nounit) {
+ c_div(&q__1, &c_b1, &a[j + j * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ q__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i,
+ q__1.i = temp.r * b[i__4].i + temp.i * b[
+ i__4].r;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L220: */
+ }
+ }
+/* L230: */
+ }
+ } else {
+ for (j = *n; j >= 1; --j) {
+ if (alpha->r != 1.f || alpha->i != 0.f) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ q__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3]
+ .i, q__1.i = alpha->r * b[i__3].i +
+ alpha->i * b[i__3].r;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L240: */
+ }
+ }
+ i__1 = *n;
+ for (k = j + 1; k <= i__1; ++k) {
+ i__2 = k + j * a_dim1;
+ if (a[i__2].r != 0.f || a[i__2].i != 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = k + j * a_dim1;
+ i__6 = i__ + k * b_dim1;
+ q__2.r = a[i__5].r * b[i__6].r - a[i__5].i *
+ b[i__6].i, q__2.i = a[i__5].r * b[
+ i__6].i + a[i__5].i * b[i__6].r;
+ q__1.r = b[i__4].r - q__2.r, q__1.i = b[i__4]
+ .i - q__2.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L250: */
+ }
+ }
+/* L260: */
+ }
+ if (nounit) {
+ c_div(&q__1, &c_b1, &a[j + j * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ q__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i,
+ q__1.i = temp.r * b[i__3].i + temp.i * b[
+ i__3].r;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L270: */
+ }
+ }
+/* L280: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*B*inv( A' ) */
+/* or B := alpha*B*inv( conjg( A' ) ). */
+
+ if (upper) {
+ for (k = *n; k >= 1; --k) {
+ if (nounit) {
+ if (noconj) {
+ c_div(&q__1, &c_b1, &a[k + k * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ } else {
+ r_cnjg(&q__2, &a[k + k * a_dim1]);
+ c_div(&q__1, &c_b1, &q__2);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + k * b_dim1;
+ i__3 = i__ + k * b_dim1;
+ q__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i,
+ q__1.i = temp.r * b[i__3].i + temp.i * b[
+ i__3].r;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L290: */
+ }
+ }
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + k * a_dim1;
+ if (a[i__2].r != 0.f || a[i__2].i != 0.f) {
+ if (noconj) {
+ i__2 = j + k * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ } else {
+ r_cnjg(&q__1, &a[j + k * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + k * b_dim1;
+ q__2.r = temp.r * b[i__5].r - temp.i * b[i__5]
+ .i, q__2.i = temp.r * b[i__5].i +
+ temp.i * b[i__5].r;
+ q__1.r = b[i__4].r - q__2.r, q__1.i = b[i__4]
+ .i - q__2.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L300: */
+ }
+ }
+/* L310: */
+ }
+ if (alpha->r != 1.f || alpha->i != 0.f) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + k * b_dim1;
+ i__3 = i__ + k * b_dim1;
+ q__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3]
+ .i, q__1.i = alpha->r * b[i__3].i +
+ alpha->i * b[i__3].r;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L320: */
+ }
+ }
+/* L330: */
+ }
+ } else {
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (nounit) {
+ if (noconj) {
+ c_div(&q__1, &c_b1, &a[k + k * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ } else {
+ r_cnjg(&q__2, &a[k + k * a_dim1]);
+ c_div(&q__1, &c_b1, &q__2);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + k * b_dim1;
+ i__4 = i__ + k * b_dim1;
+ q__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i,
+ q__1.i = temp.r * b[i__4].i + temp.i * b[
+ i__4].r;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L340: */
+ }
+ }
+ i__2 = *n;
+ for (j = k + 1; j <= i__2; ++j) {
+ i__3 = j + k * a_dim1;
+ if (a[i__3].r != 0.f || a[i__3].i != 0.f) {
+ if (noconj) {
+ i__3 = j + k * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ } else {
+ r_cnjg(&q__1, &a[j + k * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = i__ + k * b_dim1;
+ q__2.r = temp.r * b[i__6].r - temp.i * b[i__6]
+ .i, q__2.i = temp.r * b[i__6].i +
+ temp.i * b[i__6].r;
+ q__1.r = b[i__5].r - q__2.r, q__1.i = b[i__5]
+ .i - q__2.i;
+ b[i__4].r = q__1.r, b[i__4].i = q__1.i;
+/* L350: */
+ }
+ }
+/* L360: */
+ }
+ if (alpha->r != 1.f || alpha->i != 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + k * b_dim1;
+ i__4 = i__ + k * b_dim1;
+ q__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4]
+ .i, q__1.i = alpha->r * b[i__4].i +
+ alpha->i * b[i__4].r;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L370: */
+ }
+ }
+/* L380: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CTRSM . */
+
+} /* ctrsm_ */
diff --git a/contrib/libs/cblas/ctrsv.c b/contrib/libs/cblas/ctrsv.c
new file mode 100644
index 0000000000..9c46cacbcc
--- /dev/null
+++ b/contrib/libs/cblas/ctrsv.c
@@ -0,0 +1,523 @@
+/* ctrsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ctrsv_(char *uplo, char *trans, char *diag, integer *n,
+ complex *a, integer *lda, complex *x, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void c_div(complex *, complex *, complex *), r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, ix, jx, kx, info;
+ complex temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical noconj, nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTRSV solves one of the systems of equations */
+
+/* A*x = b, or A'*x = b, or conjg( A' )*x = b, */
+
+/* where b and x are n element vectors and A is an n by n unit, or */
+/* non-unit, upper or lower triangular matrix. */
+
+/* No test for singularity or near-singularity is included in this */
+/* routine. Such tests must be performed before calling this routine. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the equations to be solved as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' A*x = b. */
+
+/* TRANS = 'T' or 't' A'*x = b. */
+
+/* TRANS = 'C' or 'c' conjg( A' )*x = b. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array A must contain the upper */
+/* triangular matrix and the strictly lower triangular part of */
+/* A is not referenced. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array A must contain the lower */
+/* triangular matrix and the strictly upper triangular part of */
+/* A is not referenced. */
+/* Note that when DIAG = 'U' or 'u', the diagonal elements of */
+/* A are not referenced either, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+/* X - COMPLEX array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element right-hand side vector b. On exit, X is overwritten */
+/* with the solution vector x. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*lda < max(1,*n)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ }
+ if (info != 0) {
+ xerbla_("CTRSV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ noconj = lsame_(trans, "T");
+ nounit = lsame_(diag, "N");
+
+/* Set up the start point in X if the increment is not unity. This */
+/* will be ( N - 1 )*INCX too small for descending loops. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through A. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := inv( A )*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ if (x[i__1].r != 0.f || x[i__1].i != 0.f) {
+ if (nounit) {
+ i__1 = j;
+ c_div(&q__1, &x[j], &a[j + j * a_dim1]);
+ x[i__1].r = q__1.r, x[i__1].i = q__1.i;
+ }
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ i__1 = i__;
+ i__2 = i__;
+ i__3 = i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__3].r - temp.i * a[i__3].i,
+ q__2.i = temp.r * a[i__3].i + temp.i * a[
+ i__3].r;
+ q__1.r = x[i__2].r - q__2.r, q__1.i = x[i__2].i -
+ q__2.i;
+ x[i__1].r = q__1.r, x[i__1].i = q__1.i;
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx + (*n - 1) * *incx;
+ for (j = *n; j >= 1; --j) {
+ i__1 = jx;
+ if (x[i__1].r != 0.f || x[i__1].i != 0.f) {
+ if (nounit) {
+ i__1 = jx;
+ c_div(&q__1, &x[jx], &a[j + j * a_dim1]);
+ x[i__1].r = q__1.r, x[i__1].i = q__1.i;
+ }
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ ix = jx;
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ ix -= *incx;
+ i__1 = ix;
+ i__2 = ix;
+ i__3 = i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__3].r - temp.i * a[i__3].i,
+ q__2.i = temp.r * a[i__3].i + temp.i * a[
+ i__3].r;
+ q__1.r = x[i__2].r - q__2.r, q__1.i = x[i__2].i -
+ q__2.i;
+ x[i__1].r = q__1.r, x[i__1].i = q__1.i;
+/* L30: */
+ }
+ }
+ jx -= *incx;
+/* L40: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ if (nounit) {
+ i__2 = j;
+ c_div(&q__1, &x[j], &a[j + j * a_dim1]);
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ }
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ q__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ q__1.r = x[i__4].r - q__2.r, q__1.i = x[i__4].i -
+ q__2.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ if (nounit) {
+ i__2 = jx;
+ c_div(&q__1, &x[jx], &a[j + j * a_dim1]);
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ }
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ ix = jx;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ i__3 = ix;
+ i__4 = ix;
+ i__5 = i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ q__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ q__1.r = x[i__4].r - q__2.r, q__1.i = x[i__4].i -
+ q__2.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+/* L70: */
+ }
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := inv( A' )*x or x := inv( conjg( A' ) )*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ if (noconj) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__;
+ q__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
+ i__4].i, q__2.i = a[i__3].r * x[i__4].i +
+ a[i__3].i * x[i__4].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L90: */
+ }
+ if (nounit) {
+ c_div(&q__1, &temp, &a[j + j * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ } else {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__3 = i__;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i,
+ q__2.i = q__3.r * x[i__3].i + q__3.i * x[
+ i__3].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L100: */
+ }
+ if (nounit) {
+ r_cnjg(&q__2, &a[j + j * a_dim1]);
+ c_div(&q__1, &temp, &q__2);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ }
+ i__2 = j;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+/* L110: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ ix = kx;
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ if (noconj) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = ix;
+ q__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
+ i__4].i, q__2.i = a[i__3].r * x[i__4].i +
+ a[i__3].i * x[i__4].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix += *incx;
+/* L120: */
+ }
+ if (nounit) {
+ c_div(&q__1, &temp, &a[j + j * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ } else {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__3 = ix;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i,
+ q__2.i = q__3.r * x[i__3].i + q__3.i * x[
+ i__3].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix += *incx;
+/* L130: */
+ }
+ if (nounit) {
+ r_cnjg(&q__2, &a[j + j * a_dim1]);
+ c_div(&q__1, &temp, &q__2);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ }
+ i__2 = jx;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+ jx += *incx;
+/* L140: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ if (noconj) {
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__;
+ q__2.r = a[i__2].r * x[i__3].r - a[i__2].i * x[
+ i__3].i, q__2.i = a[i__2].r * x[i__3].i +
+ a[i__2].i * x[i__3].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L150: */
+ }
+ if (nounit) {
+ c_div(&q__1, &temp, &a[j + j * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ } else {
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__2 = i__;
+ q__2.r = q__3.r * x[i__2].r - q__3.i * x[i__2].i,
+ q__2.i = q__3.r * x[i__2].i + q__3.i * x[
+ i__2].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L160: */
+ }
+ if (nounit) {
+ r_cnjg(&q__2, &a[j + j * a_dim1]);
+ c_div(&q__1, &temp, &q__2);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ }
+ i__1 = j;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+/* L170: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ ix = kx;
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ if (noconj) {
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = ix;
+ q__2.r = a[i__2].r * x[i__3].r - a[i__2].i * x[
+ i__3].i, q__2.i = a[i__2].r * x[i__3].i +
+ a[i__2].i * x[i__3].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix -= *incx;
+/* L180: */
+ }
+ if (nounit) {
+ c_div(&q__1, &temp, &a[j + j * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ } else {
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__2 = ix;
+ q__2.r = q__3.r * x[i__2].r - q__3.i * x[i__2].i,
+ q__2.i = q__3.r * x[i__2].i + q__3.i * x[
+ i__2].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix -= *incx;
+/* L190: */
+ }
+ if (nounit) {
+ r_cnjg(&q__2, &a[j + j * a_dim1]);
+ c_div(&q__1, &temp, &q__2);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ }
+ i__1 = jx;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+ jx -= *incx;
+/* L200: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CTRSV . */
+
+} /* ctrsv_ */
diff --git a/contrib/libs/cblas/dasum.c b/contrib/libs/cblas/dasum.c
new file mode 100644
index 0000000000..6e5f54cef3
--- /dev/null
+++ b/contrib/libs/cblas/dasum.c
@@ -0,0 +1,101 @@
+/* dasum.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+doublereal dasum_(integer *n, doublereal *dx, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal ret_val, d__1, d__2, d__3, d__4, d__5, d__6;
+
+ /* Local variables */
+ integer i__, m, mp1;
+ doublereal dtemp;
+ integer nincx;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* takes the sum of the absolute values. */
+/* jack dongarra, linpack, 3/11/78. */
+/* modified 3/93 to return if incx .le. 0. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --dx;
+
+ /* Function Body */
+ ret_val = 0.;
+ dtemp = 0.;
+ if (*n <= 0 || *incx <= 0) {
+ return ret_val;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ nincx = *n * *incx;
+ i__1 = nincx;
+ i__2 = *incx;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ dtemp += (d__1 = dx[i__], abs(d__1));
+/* L10: */
+ }
+ ret_val = dtemp;
+ return ret_val;
+
+/* code for increment equal to 1 */
+
+
+/* clean-up loop */
+
+L20:
+ m = *n % 6;
+ if (m == 0) {
+ goto L40;
+ }
+ i__2 = m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ dtemp += (d__1 = dx[i__], abs(d__1));
+/* L30: */
+ }
+ if (*n < 6) {
+ goto L60;
+ }
+L40:
+ mp1 = m + 1;
+ i__2 = *n;
+ for (i__ = mp1; i__ <= i__2; i__ += 6) {
+ dtemp = dtemp + (d__1 = dx[i__], abs(d__1)) + (d__2 = dx[i__ + 1],
+ abs(d__2)) + (d__3 = dx[i__ + 2], abs(d__3)) + (d__4 = dx[i__
+ + 3], abs(d__4)) + (d__5 = dx[i__ + 4], abs(d__5)) + (d__6 =
+ dx[i__ + 5], abs(d__6));
+/* L50: */
+ }
+L60:
+ ret_val = dtemp;
+ return ret_val;
+} /* dasum_ */
diff --git a/contrib/libs/cblas/daxpy.c b/contrib/libs/cblas/daxpy.c
new file mode 100644
index 0000000000..d6ef82815b
--- /dev/null
+++ b/contrib/libs/cblas/daxpy.c
@@ -0,0 +1,107 @@
+/* daxpy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int daxpy_(integer *n, doublereal *da, doublereal *dx,
+ integer *incx, doublereal *dy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ integer i__, m, ix, iy, mp1;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* constant times a vector plus a vector. */
+/* uses unrolled loops for increments equal to one. */
+/* jack dongarra, linpack, 3/11/78. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --dy;
+ --dx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*da == 0.) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* code for unequal increments or equal increments */
+/* not equal to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dy[iy] += *da * dx[ix];
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+
+
+/* clean-up loop */
+
+L20:
+ m = *n % 4;
+ if (m == 0) {
+ goto L40;
+ }
+ i__1 = m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dy[i__] += *da * dx[i__];
+/* L30: */
+ }
+ if (*n < 4) {
+ return 0;
+ }
+L40:
+ mp1 = m + 1;
+ i__1 = *n;
+ for (i__ = mp1; i__ <= i__1; i__ += 4) {
+ dy[i__] += *da * dx[i__];
+ dy[i__ + 1] += *da * dx[i__ + 1];
+ dy[i__ + 2] += *da * dx[i__ + 2];
+ dy[i__ + 3] += *da * dx[i__ + 3];
+/* L50: */
+ }
+ return 0;
+} /* daxpy_ */
diff --git a/contrib/libs/cblas/dcabs1.c b/contrib/libs/cblas/dcabs1.c
new file mode 100644
index 0000000000..0e926f0a44
--- /dev/null
+++ b/contrib/libs/cblas/dcabs1.c
@@ -0,0 +1,36 @@
+/* dcabs1.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+doublereal dcabs1_(doublecomplex *z__)
+{
+ /* System generated locals */
+ doublereal ret_val, d__1, d__2;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. */
+/* Purpose */
+/* ======= */
+
+/* DCABS1 computes absolute value of a double complex number */
+
+/* .. Intrinsic Functions .. */
+
+ ret_val = (d__1 = z__->r, abs(d__1)) + (d__2 = d_imag(z__), abs(d__2));
+ return ret_val;
+} /* dcabs1_ */
diff --git a/contrib/libs/cblas/dcopy.c b/contrib/libs/cblas/dcopy.c
new file mode 100644
index 0000000000..9033cceef2
--- /dev/null
+++ b/contrib/libs/cblas/dcopy.c
@@ -0,0 +1,107 @@
+/* dcopy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dcopy_(integer *n, doublereal *dx, integer *incx,
+ doublereal *dy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ integer i__, m, ix, iy, mp1;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* copies a vector, x, to a vector, y. */
+/* uses unrolled loops for increments equal to one. */
+/* jack dongarra, linpack, 3/11/78. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --dy;
+ --dx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* code for unequal increments or equal increments */
+/* not equal to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dy[iy] = dx[ix];
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+
+
+/* clean-up loop */
+
+L20:
+ m = *n % 7;
+ if (m == 0) {
+ goto L40;
+ }
+ i__1 = m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dy[i__] = dx[i__];
+/* L30: */
+ }
+ if (*n < 7) {
+ return 0;
+ }
+L40:
+ mp1 = m + 1;
+ i__1 = *n;
+ for (i__ = mp1; i__ <= i__1; i__ += 7) {
+ dy[i__] = dx[i__];
+ dy[i__ + 1] = dx[i__ + 1];
+ dy[i__ + 2] = dx[i__ + 2];
+ dy[i__ + 3] = dx[i__ + 3];
+ dy[i__ + 4] = dx[i__ + 4];
+ dy[i__ + 5] = dx[i__ + 5];
+ dy[i__ + 6] = dx[i__ + 6];
+/* L50: */
+ }
+ return 0;
+} /* dcopy_ */
diff --git a/contrib/libs/cblas/ddot.c b/contrib/libs/cblas/ddot.c
new file mode 100644
index 0000000000..331fe0ab29
--- /dev/null
+++ b/contrib/libs/cblas/ddot.c
@@ -0,0 +1,110 @@
+/* ddot.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+doublereal ddot_(integer *n, doublereal *dx, integer *incx, doublereal *dy,
+ integer *incy)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal ret_val;
+
+ /* Local variables */
+ integer i__, m, ix, iy, mp1;
+ doublereal dtemp;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* forms the dot product of two vectors. */
+/* uses unrolled loops for increments equal to one. */
+/* jack dongarra, linpack, 3/11/78. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --dy;
+ --dx;
+
+ /* Function Body */
+ ret_val = 0.;
+ dtemp = 0.;
+ if (*n <= 0) {
+ return ret_val;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* code for unequal increments or equal increments */
+/* not equal to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dtemp += dx[ix] * dy[iy];
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ ret_val = dtemp;
+ return ret_val;
+
+/* code for both increments equal to 1 */
+
+
+/* clean-up loop */
+
+L20:
+ m = *n % 5;
+ if (m == 0) {
+ goto L40;
+ }
+ i__1 = m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dtemp += dx[i__] * dy[i__];
+/* L30: */
+ }
+ if (*n < 5) {
+ goto L60;
+ }
+L40:
+ mp1 = m + 1;
+ i__1 = *n;
+ for (i__ = mp1; i__ <= i__1; i__ += 5) {
+ dtemp = dtemp + dx[i__] * dy[i__] + dx[i__ + 1] * dy[i__ + 1] + dx[
+ i__ + 2] * dy[i__ + 2] + dx[i__ + 3] * dy[i__ + 3] + dx[i__ +
+ 4] * dy[i__ + 4];
+/* L50: */
+ }
+L60:
+ ret_val = dtemp;
+ return ret_val;
+} /* ddot_ */
diff --git a/contrib/libs/cblas/dgbmv.c b/contrib/libs/cblas/dgbmv.c
new file mode 100644
index 0000000000..d734d2791e
--- /dev/null
+++ b/contrib/libs/cblas/dgbmv.c
@@ -0,0 +1,369 @@
+/* dgbmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dgbmv_(char *trans, integer *m, integer *n, integer *kl,
+ integer *ku, doublereal *alpha, doublereal *a, integer *lda,
+ doublereal *x, integer *incx, doublereal *beta, doublereal *y,
+ integer *incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+
+ /* Local variables */
+ integer i__, j, k, ix, iy, jx, jy, kx, ky, kup1, info;
+ doublereal temp;
+ integer lenx, leny;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGBMV performs one of the matrix-vector operations */
+
+/* y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y, */
+
+/* where alpha and beta are scalars, x and y are vectors and A is an */
+/* m by n band matrix, with kl sub-diagonals and ku super-diagonals. */
+
+/* Arguments */
+/* ========== */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' y := alpha*A*x + beta*y. */
+
+/* TRANS = 'T' or 't' y := alpha*A'*x + beta*y. */
+
+/* TRANS = 'C' or 'c' y := alpha*A'*x + beta*y. */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of the matrix A. */
+/* M must be at least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* KL - INTEGER. */
+/* On entry, KL specifies the number of sub-diagonals of the */
+/* matrix A. KL must satisfy 0 .le. KL. */
+/* Unchanged on exit. */
+
+/* KU - INTEGER. */
+/* On entry, KU specifies the number of super-diagonals of the */
+/* matrix A. KU must satisfy 0 .le. KU. */
+/* Unchanged on exit. */
+
+/* ALPHA - DOUBLE PRECISION. */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). */
+/* Before entry, the leading ( kl + ku + 1 ) by n part of the */
+/* array A must contain the matrix of coefficients, supplied */
+/* column by column, with the leading diagonal of the matrix in */
+/* row ( ku + 1 ) of the array, the first super-diagonal */
+/* starting at position 2 in row ku, the first sub-diagonal */
+/* starting at position 1 in row ( ku + 2 ), and so on. */
+/* Elements in the array A that do not correspond to elements */
+/* in the band matrix (such as the top left ku by ku triangle) */
+/* are not referenced. */
+/* The following program segment will transfer a band matrix */
+/* from conventional full matrix storage to band storage: */
+
+/* DO 20, J = 1, N */
+/* K = KU + 1 - J */
+/* DO 10, I = MAX( 1, J - KU ), MIN( M, J + KL ) */
+/* A( K + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* ( kl + ku + 1 ). */
+/* Unchanged on exit. */
+
+/* X - DOUBLE PRECISION array of DIMENSION at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ) when TRANS = 'N' or 'n' */
+/* and at least */
+/* ( 1 + ( m - 1 )*abs( INCX ) ) otherwise. */
+/* Before entry, the incremented array X must contain the */
+/* vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* BETA - DOUBLE PRECISION. */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then Y need not be set on input. */
+/* Unchanged on exit. */
+
+/* Y - DOUBLE PRECISION array of DIMENSION at least */
+/* ( 1 + ( m - 1 )*abs( INCY ) ) when TRANS = 'N' or 'n' */
+/* and at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ) otherwise. */
+/* Before entry, the incremented array Y must contain the */
+/* vector y. On exit, Y is overwritten by the updated vector y. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(trans, "N") && ! lsame_(trans, "T") && ! lsame_(trans, "C")
+ ) {
+ info = 1;
+ } else if (*m < 0) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*kl < 0) {
+ info = 4;
+ } else if (*ku < 0) {
+ info = 5;
+ } else if (*lda < *kl + *ku + 1) {
+ info = 8;
+ } else if (*incx == 0) {
+ info = 10;
+ } else if (*incy == 0) {
+ info = 13;
+ }
+ if (info != 0) {
+ xerbla_("DGBMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || *alpha == 0. && *beta == 1.) {
+ return 0;
+ }
+
+/* Set LENX and LENY, the lengths of the vectors x and y, and set */
+/* up the start points in X and Y. */
+
+ if (lsame_(trans, "N")) {
+ lenx = *n;
+ leny = *m;
+ } else {
+ lenx = *m;
+ leny = *n;
+ }
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (lenx - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (leny - 1) * *incy;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through the band part of A. */
+
+/* First form y := beta*y. */
+
+ if (*beta != 1.) {
+ if (*incy == 1) {
+ if (*beta == 0.) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = 0.;
+/* L10: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = *beta * y[i__];
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (*beta == 0.) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = 0.;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = *beta * y[iy];
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (*alpha == 0.) {
+ return 0;
+ }
+ kup1 = *ku + 1;
+ if (lsame_(trans, "N")) {
+
+/* Form y := alpha*A*x + y. */
+
+ jx = kx;
+ if (*incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.) {
+ temp = *alpha * x[jx];
+ k = kup1 - j;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__4 = min(i__5,i__6);
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ y[i__] += temp * a[k + i__ + j * a_dim1];
+/* L50: */
+ }
+ }
+ jx += *incx;
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.) {
+ temp = *alpha * x[jx];
+ iy = ky;
+ k = kup1 - j;
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__3 = min(i__5,i__6);
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ y[iy] += temp * a[k + i__ + j * a_dim1];
+ iy += *incy;
+/* L70: */
+ }
+ }
+ jx += *incx;
+ if (j > *ku) {
+ ky += *incy;
+ }
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y := alpha*A'*x + y. */
+
+ jy = ky;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = 0.;
+ k = kup1 - j;
+/* Computing MAX */
+ i__3 = 1, i__4 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__2 = min(i__5,i__6);
+ for (i__ = max(i__3,i__4); i__ <= i__2; ++i__) {
+ temp += a[k + i__ + j * a_dim1] * x[i__];
+/* L90: */
+ }
+ y[jy] += *alpha * temp;
+ jy += *incy;
+/* L100: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = 0.;
+ ix = kx;
+ k = kup1 - j;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__4 = min(i__5,i__6);
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ temp += a[k + i__ + j * a_dim1] * x[ix];
+ ix += *incx;
+/* L110: */
+ }
+ y[jy] += *alpha * temp;
+ jy += *incy;
+ if (j > *ku) {
+ kx += *incx;
+ }
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DGBMV . */
+
+} /* dgbmv_ */
diff --git a/contrib/libs/cblas/dgemm.c b/contrib/libs/cblas/dgemm.c
new file mode 100644
index 0000000000..b802cb0fbd
--- /dev/null
+++ b/contrib/libs/cblas/dgemm.c
@@ -0,0 +1,389 @@
+/* dgemm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dgemm_(char *transa, char *transb, integer *m, integer *
+ n, integer *k, doublereal *alpha, doublereal *a, integer *lda,
+ doublereal *b, integer *ldb, doublereal *beta, doublereal *c__,
+ integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3;
+
+ /* Local variables */
+ integer i__, j, l, info;
+ logical nota, notb;
+ doublereal temp;
+ integer ncola;
+ extern logical lsame_(char *, char *);
+ integer nrowa, nrowb;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGEMM performs one of the matrix-matrix operations */
+
+/* C := alpha*op( A )*op( B ) + beta*C, */
+
+/* where op( X ) is one of */
+
+/* op( X ) = X or op( X ) = X', */
+
+/* alpha and beta are scalars, and A, B and C are matrices, with op( A ) */
+/* an m by k matrix, op( B ) a k by n matrix and C an m by n matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* TRANSA - CHARACTER*1. */
+/* On entry, TRANSA specifies the form of op( A ) to be used in */
+/* the matrix multiplication as follows: */
+
+/* TRANSA = 'N' or 'n', op( A ) = A. */
+
+/* TRANSA = 'T' or 't', op( A ) = A'. */
+
+/* TRANSA = 'C' or 'c', op( A ) = A'. */
+
+/* Unchanged on exit. */
+
+/* TRANSB - CHARACTER*1. */
+/* On entry, TRANSB specifies the form of op( B ) to be used in */
+/* the matrix multiplication as follows: */
+
+/* TRANSB = 'N' or 'n', op( B ) = B. */
+
+/* TRANSB = 'T' or 't', op( B ) = B'. */
+
+/* TRANSB = 'C' or 'c', op( B ) = B'. */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of the matrix */
+/* op( A ) and of the matrix C. M must be at least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of the matrix */
+/* op( B ) and the number of columns of the matrix C. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry, K specifies the number of columns of the matrix */
+/* op( A ) and the number of rows of the matrix op( B ). K must */
+/* be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - DOUBLE PRECISION. */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is */
+/* k when TRANSA = 'N' or 'n', and is m otherwise. */
+/* Before entry with TRANSA = 'N' or 'n', the leading m by k */
+/* part of the array A must contain the matrix A, otherwise */
+/* the leading k by m part of the array A must contain the */
+/* matrix A. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When TRANSA = 'N' or 'n' then */
+/* LDA must be at least max( 1, m ), otherwise LDA must be at */
+/* least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* B - DOUBLE PRECISION array of DIMENSION ( LDB, kb ), where kb is */
+/* n when TRANSB = 'N' or 'n', and is k otherwise. */
+/* Before entry with TRANSB = 'N' or 'n', the leading k by n */
+/* part of the array B must contain the matrix B, otherwise */
+/* the leading n by k part of the array B must contain the */
+/* matrix B. */
+/* Unchanged on exit. */
+
+/* LDB - INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. When TRANSB = 'N' or 'n' then */
+/* LDB must be at least max( 1, k ), otherwise LDB must be at */
+/* least max( 1, n ). */
+/* Unchanged on exit. */
+
+/* BETA - DOUBLE PRECISION. */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then C need not be set on input. */
+/* Unchanged on exit. */
+
+/* C - DOUBLE PRECISION array of DIMENSION ( LDC, n ). */
+/* Before entry, the leading m by n part of the array C must */
+/* contain the matrix C, except when beta is zero, in which */
+/* case C need not be set on entry. */
+/* On exit, the array C is overwritten by the m by n matrix */
+/* ( alpha*op( A )*op( B ) + beta*C ). */
+
+/* LDC - INTEGER. */
+/* On entry, LDC specifies the first dimension of C as declared */
+/* in the calling (sub) program. LDC must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Set NOTA and NOTB as true if A and B respectively are not */
+/* transposed and set NROWA, NCOLA and NROWB as the number of rows */
+/* and columns of A and the number of rows of B respectively. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ nota = lsame_(transa, "N");
+ notb = lsame_(transb, "N");
+ if (nota) {
+ nrowa = *m;
+ ncola = *k;
+ } else {
+ nrowa = *k;
+ ncola = *m;
+ }
+ if (notb) {
+ nrowb = *k;
+ } else {
+ nrowb = *n;
+ }
+
+/* Test the input parameters. */
+
+ info = 0;
+ if (! nota && ! lsame_(transa, "C") && ! lsame_(
+ transa, "T")) {
+ info = 1;
+ } else if (! notb && ! lsame_(transb, "C") && !
+ lsame_(transb, "T")) {
+ info = 2;
+ } else if (*m < 0) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*k < 0) {
+ info = 5;
+ } else if (*lda < max(1,nrowa)) {
+ info = 8;
+ } else if (*ldb < max(1,nrowb)) {
+ info = 10;
+ } else if (*ldc < max(1,*m)) {
+ info = 13;
+ }
+ if (info != 0) {
+ xerbla_("DGEMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || (*alpha == 0. || *k == 0) && *beta == 1.) {
+ return 0;
+ }
+
+/* And if alpha.eq.zero. */
+
+ if (*alpha == 0.) {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (notb) {
+ if (nota) {
+
+/* Form C := alpha*A*B + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L50: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L60: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (b[l + j * b_dim1] != 0.) {
+ temp = *alpha * b[l + j * b_dim1];
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] += temp * a[i__ + l *
+ a_dim1];
+/* L70: */
+ }
+ }
+/* L80: */
+ }
+/* L90: */
+ }
+ } else {
+
+/* Form C := alpha*A'*B + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp += a[l + i__ * a_dim1] * b[l + j * b_dim1];
+/* L100: */
+ }
+ if (*beta == 0.) {
+ c__[i__ + j * c_dim1] = *alpha * temp;
+ } else {
+ c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[
+ i__ + j * c_dim1];
+ }
+/* L110: */
+ }
+/* L120: */
+ }
+ }
+ } else {
+ if (nota) {
+
+/* Form C := alpha*A*B' + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L130: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L140: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (b[j + l * b_dim1] != 0.) {
+ temp = *alpha * b[j + l * b_dim1];
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] += temp * a[i__ + l *
+ a_dim1];
+/* L150: */
+ }
+ }
+/* L160: */
+ }
+/* L170: */
+ }
+ } else {
+
+/* Form C := alpha*A'*B' + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp += a[l + i__ * a_dim1] * b[j + l * b_dim1];
+/* L180: */
+ }
+ if (*beta == 0.) {
+ c__[i__ + j * c_dim1] = *alpha * temp;
+ } else {
+ c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[
+ i__ + j * c_dim1];
+ }
+/* L190: */
+ }
+/* L200: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DGEMM . */
+
+} /* dgemm_ */
diff --git a/contrib/libs/cblas/dgemv.c b/contrib/libs/cblas/dgemv.c
new file mode 100644
index 0000000000..b82a5b3654
--- /dev/null
+++ b/contrib/libs/cblas/dgemv.c
@@ -0,0 +1,312 @@
+/* dgemv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dgemv_(char *trans, integer *m, integer *n, doublereal *
+ alpha, doublereal *a, integer *lda, doublereal *x, integer *incx,
+ doublereal *beta, doublereal *y, integer *incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ doublereal temp;
+ integer lenx, leny;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGEMV performs one of the matrix-vector operations */
+
+/* y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y, */
+
+/* where alpha and beta are scalars, x and y are vectors and A is an */
+/* m by n matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' y := alpha*A*x + beta*y. */
+
+/* TRANS = 'T' or 't' y := alpha*A'*x + beta*y. */
+
+/* TRANS = 'C' or 'c' y := alpha*A'*x + beta*y. */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of the matrix A. */
+/* M must be at least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - DOUBLE PRECISION. */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). */
+/* Before entry, the leading m by n part of the array A must */
+/* contain the matrix of coefficients. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+/* X - DOUBLE PRECISION array of DIMENSION at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ) when TRANS = 'N' or 'n' */
+/* and at least */
+/* ( 1 + ( m - 1 )*abs( INCX ) ) otherwise. */
+/* Before entry, the incremented array X must contain the */
+/* vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* BETA - DOUBLE PRECISION. */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then Y need not be set on input. */
+/* Unchanged on exit. */
+
+/* Y - DOUBLE PRECISION array of DIMENSION at least */
+/* ( 1 + ( m - 1 )*abs( INCY ) ) when TRANS = 'N' or 'n' */
+/* and at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ) otherwise. */
+/* Before entry with BETA non-zero, the incremented array Y */
+/* must contain the vector y. On exit, Y is overwritten by the */
+/* updated vector y. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(trans, "N") && ! lsame_(trans, "T") && ! lsame_(trans, "C")
+ ) {
+ info = 1;
+ } else if (*m < 0) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*lda < max(1,*m)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ } else if (*incy == 0) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("DGEMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || *alpha == 0. && *beta == 1.) {
+ return 0;
+ }
+
+/* Set LENX and LENY, the lengths of the vectors x and y, and set */
+/* up the start points in X and Y. */
+
+ if (lsame_(trans, "N")) {
+ lenx = *n;
+ leny = *m;
+ } else {
+ lenx = *m;
+ leny = *n;
+ }
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (lenx - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (leny - 1) * *incy;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through A. */
+
+/* First form y := beta*y. */
+
+ if (*beta != 1.) {
+ if (*incy == 1) {
+ if (*beta == 0.) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = 0.;
+/* L10: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = *beta * y[i__];
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (*beta == 0.) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = 0.;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = *beta * y[iy];
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (*alpha == 0.) {
+ return 0;
+ }
+ if (lsame_(trans, "N")) {
+
+/* Form y := alpha*A*x + y. */
+
+ jx = kx;
+ if (*incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.) {
+ temp = *alpha * x[jx];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ y[i__] += temp * a[i__ + j * a_dim1];
+/* L50: */
+ }
+ }
+ jx += *incx;
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.) {
+ temp = *alpha * x[jx];
+ iy = ky;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ y[iy] += temp * a[i__ + j * a_dim1];
+ iy += *incy;
+/* L70: */
+ }
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y := alpha*A'*x + y. */
+
+ jy = ky;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = 0.;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp += a[i__ + j * a_dim1] * x[i__];
+/* L90: */
+ }
+ y[jy] += *alpha * temp;
+ jy += *incy;
+/* L100: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = 0.;
+ ix = kx;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp += a[i__ + j * a_dim1] * x[ix];
+ ix += *incx;
+/* L110: */
+ }
+ y[jy] += *alpha * temp;
+ jy += *incy;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DGEMV . */
+
+} /* dgemv_ */
diff --git a/contrib/libs/cblas/dger.c b/contrib/libs/cblas/dger.c
new file mode 100644
index 0000000000..085833b90c
--- /dev/null
+++ b/contrib/libs/cblas/dger.c
@@ -0,0 +1,194 @@
+/* dger.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dger_(integer *m, integer *n, doublereal *alpha,
+ doublereal *x, integer *incx, doublereal *y, integer *incy,
+ doublereal *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, ix, jy, kx, info;
+ doublereal temp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGER performs the rank 1 operation */
+
+/* A := alpha*x*y' + A, */
+
+/* where alpha is a scalar, x is an m element vector, y is an n element */
+/* vector and A is an m by n matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of the matrix A. */
+/* M must be at least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - DOUBLE PRECISION. */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X - DOUBLE PRECISION array of dimension at least */
+/* ( 1 + ( m - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the m */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* Y - DOUBLE PRECISION array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. */
+/* Unchanged on exit. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+/* A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). */
+/* Before entry, the leading m by n part of the array A must */
+/* contain the matrix of coefficients. On exit, A is */
+/* overwritten by the updated matrix. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --x;
+ --y;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (*m < 0) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ } else if (*lda < max(1,*m)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("DGER ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || *alpha == 0.) {
+ return 0;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through A. */
+
+ if (*incy > 0) {
+ jy = 1;
+ } else {
+ jy = 1 - (*n - 1) * *incy;
+ }
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (y[jy] != 0.) {
+ temp = *alpha * y[jy];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] += x[i__] * temp;
+/* L10: */
+ }
+ }
+ jy += *incy;
+/* L20: */
+ }
+ } else {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*m - 1) * *incx;
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (y[jy] != 0.) {
+ temp = *alpha * y[jy];
+ ix = kx;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] += x[ix] * temp;
+ ix += *incx;
+/* L30: */
+ }
+ }
+ jy += *incy;
+/* L40: */
+ }
+ }
+
+ return 0;
+
+/* End of DGER . */
+
+} /* dger_ */
diff --git a/contrib/libs/cblas/dnrm2.c b/contrib/libs/cblas/dnrm2.c
new file mode 100644
index 0000000000..8c50ff5ddd
--- /dev/null
+++ b/contrib/libs/cblas/dnrm2.c
@@ -0,0 +1,95 @@
+/* dnrm2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+doublereal dnrm2_(integer *n, doublereal *x, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal ret_val, d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer ix;
+ doublereal ssq, norm, scale, absxi;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DNRM2 returns the euclidean norm of a vector via the function */
+/* name, so that */
+
+/* DNRM2 := sqrt( x'*x ) */
+
+
+/* -- This version written on 25-October-1982. */
+/* Modified on 14-October-1993 to inline the call to DLASSQ. */
+/* Sven Hammarling, Nag Ltd. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n < 1 || *incx < 1) {
+ norm = 0.;
+ } else if (*n == 1) {
+ norm = abs(x[1]);
+ } else {
+ scale = 0.;
+ ssq = 1.;
+/* The following loop is equivalent to this call to the LAPACK */
+/* auxiliary routine: */
+/* CALL DLASSQ( N, X, INCX, SCALE, SSQ ) */
+
+ i__1 = (*n - 1) * *incx + 1;
+ i__2 = *incx;
+ for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) {
+ if (x[ix] != 0.) {
+ absxi = (d__1 = x[ix], abs(d__1));
+ if (scale < absxi) {
+/* Computing 2nd power */
+ d__1 = scale / absxi;
+ ssq = ssq * (d__1 * d__1) + 1.;
+ scale = absxi;
+ } else {
+/* Computing 2nd power */
+ d__1 = absxi / scale;
+ ssq += d__1 * d__1;
+ }
+ }
+/* L10: */
+ }
+ norm = scale * sqrt(ssq);
+ }
+
+ ret_val = norm;
+ return ret_val;
+
+/* End of DNRM2. */
+
+} /* dnrm2_ */
diff --git a/contrib/libs/cblas/drot.c b/contrib/libs/cblas/drot.c
new file mode 100644
index 0000000000..d4bc6bde4d
--- /dev/null
+++ b/contrib/libs/cblas/drot.c
@@ -0,0 +1,86 @@
+/* drot.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int drot_(integer *n, doublereal *dx, integer *incx,
+ doublereal *dy, integer *incy, doublereal *c__, doublereal *s)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ integer i__, ix, iy;
+ doublereal dtemp;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* applies a plane rotation. */
+/* jack dongarra, linpack, 3/11/78. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+ /* Parameter adjustments */
+ --dy;
+ --dx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* code for unequal increments or equal increments not equal */
+/* to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dtemp = *c__ * dx[ix] + *s * dy[iy];
+ dy[iy] = *c__ * dy[iy] - *s * dx[ix];
+ dx[ix] = dtemp;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dtemp = *c__ * dx[i__] + *s * dy[i__];
+ dy[i__] = *c__ * dy[i__] - *s * dx[i__];
+ dx[i__] = dtemp;
+/* L30: */
+ }
+ return 0;
+} /* drot_ */
diff --git a/contrib/libs/cblas/drotg.c b/contrib/libs/cblas/drotg.c
new file mode 100644
index 0000000000..b2d9aa4aff
--- /dev/null
+++ b/contrib/libs/cblas/drotg.c
@@ -0,0 +1,79 @@
+/* drotg.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b4 = 1.;
+
+/* Subroutine */ int drotg_(doublereal *da, doublereal *db, doublereal *c__,
+ doublereal *s)
+{
+ /* System generated locals */
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ doublereal r__, z__, roe, scale;
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* construct givens plane rotation. */
+/* jack dongarra, linpack, 3/11/78. */
+
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ roe = *db;
+ if (abs(*da) > abs(*db)) {
+ roe = *da;
+ }
+ scale = abs(*da) + abs(*db);
+ if (scale != 0.) {
+ goto L10;
+ }
+ *c__ = 1.;
+ *s = 0.;
+ r__ = 0.;
+ z__ = 0.;
+ goto L20;
+L10:
+/* Computing 2nd power */
+ d__1 = *da / scale;
+/* Computing 2nd power */
+ d__2 = *db / scale;
+ r__ = scale * sqrt(d__1 * d__1 + d__2 * d__2);
+ r__ = d_sign(&c_b4, &roe) * r__;
+ *c__ = *da / r__;
+ *s = *db / r__;
+ z__ = 1.;
+ if (abs(*da) > abs(*db)) {
+ z__ = *s;
+ }
+ if (abs(*db) >= abs(*da) && *c__ != 0.) {
+ z__ = 1. / *c__;
+ }
+L20:
+ *da = r__;
+ *db = z__;
+ return 0;
+} /* drotg_ */
diff --git a/contrib/libs/cblas/drotm.c b/contrib/libs/cblas/drotm.c
new file mode 100644
index 0000000000..90815c2e88
--- /dev/null
+++ b/contrib/libs/cblas/drotm.c
@@ -0,0 +1,215 @@
+/* drotm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int drotm_(integer *n, doublereal *dx, integer *incx,
+ doublereal *dy, integer *incy, doublereal *dparam)
+{
+ /* Initialized data */
+
+ static doublereal zero = 0.;
+ static doublereal two = 2.;
+
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__;
+ doublereal w, z__;
+ integer kx, ky;
+ doublereal dh11, dh12, dh21, dh22, dflag;
+ integer nsteps;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* APPLY THE MODIFIED GIVENS TRANSFORMATION, H, TO THE 2 BY N MATRIX */
+
+/* (DX**T) , WHERE **T INDICATES TRANSPOSE. THE ELEMENTS OF DX ARE IN */
+/* (DY**T) */
+
+/* DX(LX+I*INCX), I = 0 TO N-1, WHERE LX = 1 IF INCX .GE. 0, ELSE */
+/* LX = (-INCX)*N, AND SIMILARLY FOR SY USING LY AND INCY. */
+/* WITH DPARAM(1)=DFLAG, H HAS ONE OF THE FOLLOWING FORMS.. */
+
+/* DFLAG=-1.D0 DFLAG=0.D0 DFLAG=1.D0 DFLAG=-2.D0 */
+
+/* (DH11 DH12) (1.D0 DH12) (DH11 1.D0) (1.D0 0.D0) */
+/* H=( ) ( ) ( ) ( ) */
+/* (DH21 DH22), (DH21 1.D0), (-1.D0 DH22), (0.D0 1.D0). */
+/* SEE DROTMG FOR A DESCRIPTION OF DATA STORAGE IN DPARAM. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* number of elements in input vector(s) */
+
+/* DX (input/output) DOUBLE PRECISION array, dimension N */
+/* double precision vector with N elements */
+
+/* INCX (input) INTEGER */
+/* storage spacing between elements of DX */
+
+/* DY (input/output) DOUBLE PRECISION array, dimension N */
+/* double precision vector with N elements */
+
+/* INCY (input) INTEGER */
+/* storage spacing between elements of DY */
+
+/* DPARAM (input/output) DOUBLE PRECISION array, dimension 5 */
+/* DPARAM(1)=DFLAG */
+/* DPARAM(2)=DH11 */
+/* DPARAM(3)=DH21 */
+/* DPARAM(4)=DH12 */
+/* DPARAM(5)=DH22 */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Data statements .. */
+ /* Parameter adjustments */
+ --dparam;
+ --dy;
+ --dx;
+
+ /* Function Body */
+/* .. */
+
+ dflag = dparam[1];
+ if (*n <= 0 || dflag + two == zero) {
+ goto L140;
+ }
+ if (! (*incx == *incy && *incx > 0)) {
+ goto L70;
+ }
+
+ nsteps = *n * *incx;
+ if (dflag < 0.) {
+ goto L50;
+ } else if (dflag == 0) {
+ goto L10;
+ } else {
+ goto L30;
+ }
+L10:
+ dh12 = dparam[4];
+ dh21 = dparam[3];
+ i__1 = nsteps;
+ i__2 = *incx;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ w = dx[i__];
+ z__ = dy[i__];
+ dx[i__] = w + z__ * dh12;
+ dy[i__] = w * dh21 + z__;
+/* L20: */
+ }
+ goto L140;
+L30:
+ dh11 = dparam[2];
+ dh22 = dparam[5];
+ i__2 = nsteps;
+ i__1 = *incx;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+ w = dx[i__];
+ z__ = dy[i__];
+ dx[i__] = w * dh11 + z__;
+ dy[i__] = -w + dh22 * z__;
+/* L40: */
+ }
+ goto L140;
+L50:
+ dh11 = dparam[2];
+ dh12 = dparam[4];
+ dh21 = dparam[3];
+ dh22 = dparam[5];
+ i__1 = nsteps;
+ i__2 = *incx;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ w = dx[i__];
+ z__ = dy[i__];
+ dx[i__] = w * dh11 + z__ * dh12;
+ dy[i__] = w * dh21 + z__ * dh22;
+/* L60: */
+ }
+ goto L140;
+L70:
+ kx = 1;
+ ky = 1;
+ if (*incx < 0) {
+ kx = (1 - *n) * *incx + 1;
+ }
+ if (*incy < 0) {
+ ky = (1 - *n) * *incy + 1;
+ }
+
+ if (dflag < 0.) {
+ goto L120;
+ } else if (dflag == 0) {
+ goto L80;
+ } else {
+ goto L100;
+ }
+L80:
+ dh12 = dparam[4];
+ dh21 = dparam[3];
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ w = dx[kx];
+ z__ = dy[ky];
+ dx[kx] = w + z__ * dh12;
+ dy[ky] = w * dh21 + z__;
+ kx += *incx;
+ ky += *incy;
+/* L90: */
+ }
+ goto L140;
+L100:
+ dh11 = dparam[2];
+ dh22 = dparam[5];
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ w = dx[kx];
+ z__ = dy[ky];
+ dx[kx] = w * dh11 + z__;
+ dy[ky] = -w + dh22 * z__;
+ kx += *incx;
+ ky += *incy;
+/* L110: */
+ }
+ goto L140;
+L120:
+ dh11 = dparam[2];
+ dh12 = dparam[4];
+ dh21 = dparam[3];
+ dh22 = dparam[5];
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ w = dx[kx];
+ z__ = dy[ky];
+ dx[kx] = w * dh11 + z__ * dh12;
+ dy[ky] = w * dh21 + z__ * dh22;
+ kx += *incx;
+ ky += *incy;
+/* L130: */
+ }
+L140:
+ return 0;
+} /* drotm_ */
diff --git a/contrib/libs/cblas/drotmg.c b/contrib/libs/cblas/drotmg.c
new file mode 100644
index 0000000000..94694170bd
--- /dev/null
+++ b/contrib/libs/cblas/drotmg.c
@@ -0,0 +1,293 @@
+/* drotmg.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int drotmg_(doublereal *dd1, doublereal *dd2, doublereal *
+ dx1, doublereal *dy1, doublereal *dparam)
+{
+ /* Initialized data */
+
+ static doublereal zero = 0.;
+ static doublereal one = 1.;
+ static doublereal two = 2.;
+ static doublereal gam = 4096.;
+ static doublereal gamsq = 16777216.;
+ static doublereal rgamsq = 5.9604645e-8;
+
+ /* Format strings */
+ static char fmt_120[] = "";
+ static char fmt_150[] = "";
+ static char fmt_180[] = "";
+ static char fmt_210[] = "";
+
+ /* System generated locals */
+ doublereal d__1;
+
+ /* Local variables */
+ doublereal du, dp1, dp2, dq1, dq2, dh11, dh12, dh21, dh22;
+ integer igo;
+ doublereal dflag, dtemp;
+
+ /* Assigned format variables */
+ static char *igo_fmt;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CONSTRUCT THE MODIFIED GIVENS TRANSFORMATION MATRIX H WHICH ZEROS */
+/* THE SECOND COMPONENT OF THE 2-VECTOR (DSQRT(DD1)*DX1,DSQRT(DD2)* */
+/* DY2)**T. */
+/* WITH DPARAM(1)=DFLAG, H HAS ONE OF THE FOLLOWING FORMS.. */
+
+/* DFLAG=-1.D0 DFLAG=0.D0 DFLAG=1.D0 DFLAG=-2.D0 */
+
+/* (DH11 DH12) (1.D0 DH12) (DH11 1.D0) (1.D0 0.D0) */
+/* H=( ) ( ) ( ) ( ) */
+/* (DH21 DH22), (DH21 1.D0), (-1.D0 DH22), (0.D0 1.D0). */
+/* LOCATIONS 2-4 OF DPARAM CONTAIN DH11, DH21, DH12, AND DH22 */
+/* RESPECTIVELY. (VALUES OF 1.D0, -1.D0, OR 0.D0 IMPLIED BY THE */
+/* VALUE OF DPARAM(1) ARE NOT STORED IN DPARAM.) */
+
+/* THE VALUES OF GAMSQ AND RGAMSQ SET IN THE DATA STATEMENT MAY BE */
+/* INEXACT. THIS IS OK AS THEY ARE ONLY USED FOR TESTING THE SIZE */
+/* OF DD1 AND DD2. ALL ACTUAL SCALING OF DATA IS DONE USING GAM. */
+
+
+/* Arguments */
+/* ========= */
+
+/* DD1 (input/output) DOUBLE PRECISION */
+
+/* DD2 (input/output) DOUBLE PRECISION */
+
+/* DX1 (input/output) DOUBLE PRECISION */
+
+/* DY1 (input) DOUBLE PRECISION */
+
+/* DPARAM (input/output) DOUBLE PRECISION array, dimension 5 */
+/* DPARAM(1)=DFLAG */
+/* DPARAM(2)=DH11 */
+/* DPARAM(3)=DH21 */
+/* DPARAM(4)=DH12 */
+/* DPARAM(5)=DH22 */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Data statements .. */
+
+ /* Parameter adjustments */
+ --dparam;
+
+ /* Function Body */
+/* .. */
+ if (! (*dd1 < zero)) {
+ goto L10;
+ }
+/* GO ZERO-H-D-AND-DX1.. */
+ goto L60;
+L10:
+/* CASE-DD1-NONNEGATIVE */
+ dp2 = *dd2 * *dy1;
+ if (! (dp2 == zero)) {
+ goto L20;
+ }
+ dflag = -two;
+ goto L260;
+/* REGULAR-CASE.. */
+L20:
+ dp1 = *dd1 * *dx1;
+ dq2 = dp2 * *dy1;
+ dq1 = dp1 * *dx1;
+
+ if (! (abs(dq1) > abs(dq2))) {
+ goto L40;
+ }
+ dh21 = -(*dy1) / *dx1;
+ dh12 = dp2 / dp1;
+
+ du = one - dh12 * dh21;
+
+ if (! (du <= zero)) {
+ goto L30;
+ }
+/* GO ZERO-H-D-AND-DX1.. */
+ goto L60;
+L30:
+ dflag = zero;
+ *dd1 /= du;
+ *dd2 /= du;
+ *dx1 *= du;
+/* GO SCALE-CHECK.. */
+ goto L100;
+L40:
+ if (! (dq2 < zero)) {
+ goto L50;
+ }
+/* GO ZERO-H-D-AND-DX1.. */
+ goto L60;
+L50:
+ dflag = one;
+ dh11 = dp1 / dp2;
+ dh22 = *dx1 / *dy1;
+ du = one + dh11 * dh22;
+ dtemp = *dd2 / du;
+ *dd2 = *dd1 / du;
+ *dd1 = dtemp;
+ *dx1 = *dy1 * du;
+/* GO SCALE-CHECK */
+ goto L100;
+/* PROCEDURE..ZERO-H-D-AND-DX1.. */
+L60:
+ dflag = -one;
+ dh11 = zero;
+ dh12 = zero;
+ dh21 = zero;
+ dh22 = zero;
+
+ *dd1 = zero;
+ *dd2 = zero;
+ *dx1 = zero;
+/* RETURN.. */
+ goto L220;
+/* PROCEDURE..FIX-H.. */
+L70:
+ if (! (dflag >= zero)) {
+ goto L90;
+ }
+
+ if (! (dflag == zero)) {
+ goto L80;
+ }
+ dh11 = one;
+ dh22 = one;
+ dflag = -one;
+ goto L90;
+L80:
+ dh21 = -one;
+ dh12 = one;
+ dflag = -one;
+L90:
+ switch (igo) {
+ case 0: goto L120;
+ case 1: goto L150;
+ case 2: goto L180;
+ case 3: goto L210;
+ }
+/* PROCEDURE..SCALE-CHECK */
+L100:
+L110:
+ if (! (*dd1 <= rgamsq)) {
+ goto L130;
+ }
+ if (*dd1 == zero) {
+ goto L160;
+ }
+ igo = 0;
+ igo_fmt = fmt_120;
+/* FIX-H.. */
+ goto L70;
+L120:
+/* Computing 2nd power */
+ d__1 = gam;
+ *dd1 *= d__1 * d__1;
+ *dx1 /= gam;
+ dh11 /= gam;
+ dh12 /= gam;
+ goto L110;
+L130:
+L140:
+ if (! (*dd1 >= gamsq)) {
+ goto L160;
+ }
+ igo = 1;
+ igo_fmt = fmt_150;
+/* FIX-H.. */
+ goto L70;
+L150:
+/* Computing 2nd power */
+ d__1 = gam;
+ *dd1 /= d__1 * d__1;
+ *dx1 *= gam;
+ dh11 *= gam;
+ dh12 *= gam;
+ goto L140;
+L160:
+L170:
+ if (! (abs(*dd2) <= rgamsq)) {
+ goto L190;
+ }
+ if (*dd2 == zero) {
+ goto L220;
+ }
+ igo = 2;
+ igo_fmt = fmt_180;
+/* FIX-H.. */
+ goto L70;
+L180:
+/* Computing 2nd power */
+ d__1 = gam;
+ *dd2 *= d__1 * d__1;
+ dh21 /= gam;
+ dh22 /= gam;
+ goto L170;
+L190:
+L200:
+ if (! (abs(*dd2) >= gamsq)) {
+ goto L220;
+ }
+ igo = 3;
+ igo_fmt = fmt_210;
+/* FIX-H.. */
+ goto L70;
+L210:
+/* Computing 2nd power */
+ d__1 = gam;
+ *dd2 /= d__1 * d__1;
+ dh21 *= gam;
+ dh22 *= gam;
+ goto L200;
+L220:
+ if (dflag < 0.) {
+ goto L250;
+ } else if (dflag == 0) {
+ goto L230;
+ } else {
+ goto L240;
+ }
+L230:
+ dparam[3] = dh21;
+ dparam[4] = dh12;
+ goto L260;
+L240:
+ dparam[2] = dh11;
+ dparam[5] = dh22;
+ goto L260;
+L250:
+ dparam[2] = dh11;
+ dparam[3] = dh21;
+ dparam[4] = dh12;
+ dparam[5] = dh22;
+L260:
+ dparam[1] = dflag;
+ return 0;
+} /* drotmg_ */
diff --git a/contrib/libs/cblas/dsbmv.c b/contrib/libs/cblas/dsbmv.c
new file mode 100644
index 0000000000..bdce6df450
--- /dev/null
+++ b/contrib/libs/cblas/dsbmv.c
@@ -0,0 +1,364 @@
+/* dsbmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dsbmv_(char *uplo, integer *n, integer *k, doublereal *
+ alpha, doublereal *a, integer *lda, doublereal *x, integer *incx,
+ doublereal *beta, doublereal *y, integer *incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, l, ix, iy, jx, jy, kx, ky, info;
+ doublereal temp1, temp2;
+ extern logical lsame_(char *, char *);
+ integer kplus1;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSBMV performs the matrix-vector operation */
+
+/* y := alpha*A*x + beta*y, */
+
+/* where alpha and beta are scalars, x and y are n element vectors and */
+/* A is an n by n symmetric band matrix, with k super-diagonals. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the band matrix A is being supplied as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' The upper triangular part of A is */
+/* being supplied. */
+
+/* UPLO = 'L' or 'l' The lower triangular part of A is */
+/* being supplied. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry, K specifies the number of super-diagonals of the */
+/* matrix A. K must satisfy 0 .le. K. */
+/* Unchanged on exit. */
+
+/* ALPHA - DOUBLE PRECISION. */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) */
+/* by n part of the array A must contain the upper triangular */
+/* band part of the symmetric matrix, supplied column by */
+/* column, with the leading diagonal of the matrix in row */
+/* ( k + 1 ) of the array, the first super-diagonal starting at */
+/* position 2 in row k, and so on. The top left k by k triangle */
+/* of the array A is not referenced. */
+/* The following program segment will transfer the upper */
+/* triangular part of a symmetric band matrix from conventional */
+/* full matrix storage to band storage: */
+
+/* DO 20, J = 1, N */
+/* M = K + 1 - J */
+/* DO 10, I = MAX( 1, J - K ), J */
+/* A( M + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) */
+/* by n part of the array A must contain the lower triangular */
+/* band part of the symmetric matrix, supplied column by */
+/* column, with the leading diagonal of the matrix in row 1 of */
+/* the array, the first sub-diagonal starting at position 1 in */
+/* row 2, and so on. The bottom right k by k triangle of the */
+/* array A is not referenced. */
+/* The following program segment will transfer the lower */
+/* triangular part of a symmetric band matrix from conventional */
+/* full matrix storage to band storage: */
+
+/* DO 20, J = 1, N */
+/* M = 1 - J */
+/* DO 10, I = J, MIN( N, J + K ) */
+/* A( M + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* ( k + 1 ). */
+/* Unchanged on exit. */
+
+/* X - DOUBLE PRECISION array of DIMENSION at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the */
+/* vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* BETA - DOUBLE PRECISION. */
+/* On entry, BETA specifies the scalar beta. */
+/* Unchanged on exit. */
+
+/* Y - DOUBLE PRECISION array of DIMENSION at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the */
+/* vector y. On exit, Y is overwritten by the updated vector y. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*k < 0) {
+ info = 3;
+ } else if (*lda < *k + 1) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ } else if (*incy == 0) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("DSBMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || *alpha == 0. && *beta == 1.) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y. */
+
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+
+/* Start the operations. In this version the elements of the array A */
+/* are accessed sequentially with one pass through A. */
+
+/* First form y := beta*y. */
+
+ if (*beta != 1.) {
+ if (*incy == 1) {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = 0.;
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = *beta * y[i__];
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = 0.;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = *beta * y[iy];
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (*alpha == 0.) {
+ return 0;
+ }
+ if (lsame_(uplo, "U")) {
+
+/* Form y when upper triangle of A is stored. */
+
+ kplus1 = *k + 1;
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[j];
+ temp2 = 0.;
+ l = kplus1 - j;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__4 = j - 1;
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ y[i__] += temp1 * a[l + i__ + j * a_dim1];
+ temp2 += a[l + i__ + j * a_dim1] * x[i__];
+/* L50: */
+ }
+ y[j] = y[j] + temp1 * a[kplus1 + j * a_dim1] + *alpha * temp2;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[jx];
+ temp2 = 0.;
+ ix = kx;
+ iy = ky;
+ l = kplus1 - j;
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *k;
+ i__3 = j - 1;
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ y[iy] += temp1 * a[l + i__ + j * a_dim1];
+ temp2 += a[l + i__ + j * a_dim1] * x[ix];
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ y[jy] = y[jy] + temp1 * a[kplus1 + j * a_dim1] + *alpha *
+ temp2;
+ jx += *incx;
+ jy += *incy;
+ if (j > *k) {
+ kx += *incx;
+ ky += *incy;
+ }
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y when lower triangle of A is stored. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[j];
+ temp2 = 0.;
+ y[j] += temp1 * a[j * a_dim1 + 1];
+ l = 1 - j;
+/* Computing MIN */
+ i__4 = *n, i__2 = j + *k;
+ i__3 = min(i__4,i__2);
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ y[i__] += temp1 * a[l + i__ + j * a_dim1];
+ temp2 += a[l + i__ + j * a_dim1] * x[i__];
+/* L90: */
+ }
+ y[j] += *alpha * temp2;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[jx];
+ temp2 = 0.;
+ y[jy] += temp1 * a[j * a_dim1 + 1];
+ l = 1 - j;
+ ix = jx;
+ iy = jy;
+/* Computing MIN */
+ i__4 = *n, i__2 = j + *k;
+ i__3 = min(i__4,i__2);
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ ix += *incx;
+ iy += *incy;
+ y[iy] += temp1 * a[l + i__ + j * a_dim1];
+ temp2 += a[l + i__ + j * a_dim1] * x[ix];
+/* L110: */
+ }
+ y[jy] += *alpha * temp2;
+ jx += *incx;
+ jy += *incy;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DSBMV . */
+
+} /* dsbmv_ */
diff --git a/contrib/libs/cblas/dscal.c b/contrib/libs/cblas/dscal.c
new file mode 100644
index 0000000000..11548be866
--- /dev/null
+++ b/contrib/libs/cblas/dscal.c
@@ -0,0 +1,96 @@
+/* dscal.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dscal_(integer *n, doublereal *da, doublereal *dx,
+ integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__, m, mp1, nincx;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+/* * */
+/* scales a vector by a constant. */
+/* uses unrolled loops for increment equal to one. */
+/* jack dongarra, linpack, 3/11/78. */
+/* modified 3/93 to return if incx .le. 0. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --dx;
+
+ /* Function Body */
+ if (*n <= 0 || *incx <= 0) {
+ return 0;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ nincx = *n * *incx;
+ i__1 = nincx;
+ i__2 = *incx;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ dx[i__] = *da * dx[i__];
+/* L10: */
+ }
+ return 0;
+
+/* code for increment equal to 1 */
+
+
+/* clean-up loop */
+
+L20:
+ m = *n % 5;
+ if (m == 0) {
+ goto L40;
+ }
+ i__2 = m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ dx[i__] = *da * dx[i__];
+/* L30: */
+ }
+ if (*n < 5) {
+ return 0;
+ }
+L40:
+ mp1 = m + 1;
+ i__2 = *n;
+ for (i__ = mp1; i__ <= i__2; i__ += 5) {
+ dx[i__] = *da * dx[i__];
+ dx[i__ + 1] = *da * dx[i__ + 1];
+ dx[i__ + 2] = *da * dx[i__ + 2];
+ dx[i__ + 3] = *da * dx[i__ + 3];
+ dx[i__ + 4] = *da * dx[i__ + 4];
+/* L50: */
+ }
+ return 0;
+} /* dscal_ */
diff --git a/contrib/libs/cblas/dsdot.c b/contrib/libs/cblas/dsdot.c
new file mode 100644
index 0000000000..26e66df9e5
--- /dev/null
+++ b/contrib/libs/cblas/dsdot.c
@@ -0,0 +1,135 @@
+/* dsdot.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+doublereal dsdot_(integer *n, real *sx, integer *incx, real *sy, integer *
+ incy)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal ret_val;
+
+ /* Local variables */
+ integer i__, ns, kx, ky;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* AUTHORS */
+/* ======= */
+/* Lawson, C. L., (JPL), Hanson, R. J., (SNLA), */
+/* Kincaid, D. R., (U. of Texas), Krogh, F. T., (JPL) */
+
+/* Purpose */
+/* ======= */
+/* Compute the inner product of two vectors with extended */
+/* precision accumulation and result. */
+
+/* Returns D.P. dot product accumulated in D.P., for S.P. SX and SY */
+/* DSDOT = sum for I = 0 to N-1 of SX(LX+I*INCX) * SY(LY+I*INCY), */
+/* where LX = 1 if INCX .GE. 0, else LX = 1+(1-N)*INCX, and LY is */
+/* defined in a similar way using INCY. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* number of elements in input vector(s) */
+
+/* SX (input) REAL array, dimension(N) */
+/* single precision vector with N elements */
+
+/* INCX (input) INTEGER */
+/* storage spacing between elements of SX */
+
+/* SY (input) REAL array, dimension(N) */
+/* single precision vector with N elements */
+
+/* INCY (input) INTEGER */
+/* storage spacing between elements of SY */
+
+/* DSDOT (output) DOUBLE PRECISION */
+/* DSDOT double precision dot product (zero if N.LE.0) */
+
+/* REFERENCES */
+/* ========== */
+
+/* C. L. Lawson, R. J. Hanson, D. R. Kincaid and F. T. */
+/* Krogh, Basic linear algebra subprograms for Fortran */
+/* usage, Algorithm No. 539, Transactions on Mathematical */
+/* Software 5, 3 (September 1979), pp. 308-323. */
+
+/* REVISION HISTORY (YYMMDD) */
+/* ========================== */
+
+/* 791001 DATE WRITTEN */
+/* 890831 Modified array declarations. (WRB) */
+/* 890831 REVISION DATE from Version 3.2 */
+/* 891214 Prologue converted to Version 4.0 format. (BAB) */
+/* 920310 Corrected definition of LX in DESCRIPTION. (WRB) */
+/* 920501 Reformatted the REFERENCES section. (WRB) */
+/* 070118 Reformat to LAPACK style (JL) */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --sy;
+ --sx;
+
+ /* Function Body */
+ ret_val = 0.;
+ if (*n <= 0) {
+ return ret_val;
+ }
+ if (*incx == *incy && *incx > 0) {
+ goto L20;
+ }
+
+/* Code for unequal or nonpositive increments. */
+
+ kx = 1;
+ ky = 1;
+ if (*incx < 0) {
+ kx = (1 - *n) * *incx + 1;
+ }
+ if (*incy < 0) {
+ ky = (1 - *n) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ret_val += (doublereal) sx[kx] * (doublereal) sy[ky];
+ kx += *incx;
+ ky += *incy;
+/* L10: */
+ }
+ return ret_val;
+
+/* Code for equal, positive, non-unit increments. */
+
+L20:
+ ns = *n * *incx;
+ i__1 = ns;
+ i__2 = *incx;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ ret_val += (doublereal) sx[i__] * (doublereal) sy[i__];
+/* L30: */
+ }
+ return ret_val;
+} /* dsdot_ */
diff --git a/contrib/libs/cblas/dspmv.c b/contrib/libs/cblas/dspmv.c
new file mode 100644
index 0000000000..24861dcb39
--- /dev/null
+++ b/contrib/libs/cblas/dspmv.c
@@ -0,0 +1,312 @@
+/* dspmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dspmv_(char *uplo, integer *n, doublereal *alpha,
+ doublereal *ap, doublereal *x, integer *incx, doublereal *beta,
+ doublereal *y, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, k, kk, ix, iy, jx, jy, kx, ky, info;
+ doublereal temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSPMV performs the matrix-vector operation */
+
+/* y := alpha*A*x + beta*y, */
+
+/* where alpha and beta are scalars, x and y are n element vectors and */
+/* A is an n by n symmetric matrix, supplied in packed form. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the matrix A is supplied in the packed */
+/* array AP as follows: */
+
+/* UPLO = 'U' or 'u' The upper triangular part of A is */
+/* supplied in AP. */
+
+/* UPLO = 'L' or 'l' The lower triangular part of A is */
+/* supplied in AP. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - DOUBLE PRECISION. */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* AP - DOUBLE PRECISION array of DIMENSION at least */
+/* ( ( n*( n + 1 ) )/2 ). */
+/* Before entry with UPLO = 'U' or 'u', the array AP must */
+/* contain the upper triangular part of the symmetric matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 ) */
+/* and a( 2, 2 ) respectively, and so on. */
+/* Before entry with UPLO = 'L' or 'l', the array AP must */
+/* contain the lower triangular part of the symmetric matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 ) */
+/* and a( 3, 1 ) respectively, and so on. */
+/* Unchanged on exit. */
+
+/* X - DOUBLE PRECISION array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* BETA - DOUBLE PRECISION. */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then Y need not be set on input. */
+/* Unchanged on exit. */
+
+/* Y - DOUBLE PRECISION array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. On exit, Y is overwritten by the updated */
+/* vector y. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --y;
+ --x;
+ --ap;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 6;
+ } else if (*incy == 0) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("DSPMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || *alpha == 0. && *beta == 1.) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y. */
+
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+
+/* Start the operations. In this version the elements of the array AP */
+/* are accessed sequentially with one pass through AP. */
+
+/* First form y := beta*y. */
+
+ if (*beta != 1.) {
+ if (*incy == 1) {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = 0.;
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = *beta * y[i__];
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = 0.;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = *beta * y[iy];
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (*alpha == 0.) {
+ return 0;
+ }
+ kk = 1;
+ if (lsame_(uplo, "U")) {
+
+/* Form y when AP contains the upper triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[j];
+ temp2 = 0.;
+ k = kk;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ y[i__] += temp1 * ap[k];
+ temp2 += ap[k] * x[i__];
+ ++k;
+/* L50: */
+ }
+ y[j] = y[j] + temp1 * ap[kk + j - 1] + *alpha * temp2;
+ kk += j;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[jx];
+ temp2 = 0.;
+ ix = kx;
+ iy = ky;
+ i__2 = kk + j - 2;
+ for (k = kk; k <= i__2; ++k) {
+ y[iy] += temp1 * ap[k];
+ temp2 += ap[k] * x[ix];
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ y[jy] = y[jy] + temp1 * ap[kk + j - 1] + *alpha * temp2;
+ jx += *incx;
+ jy += *incy;
+ kk += j;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y when AP contains the lower triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[j];
+ temp2 = 0.;
+ y[j] += temp1 * ap[kk];
+ k = kk + 1;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ y[i__] += temp1 * ap[k];
+ temp2 += ap[k] * x[i__];
+ ++k;
+/* L90: */
+ }
+ y[j] += *alpha * temp2;
+ kk += *n - j + 1;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[jx];
+ temp2 = 0.;
+ y[jy] += temp1 * ap[kk];
+ ix = jx;
+ iy = jy;
+ i__2 = kk + *n - j;
+ for (k = kk + 1; k <= i__2; ++k) {
+ ix += *incx;
+ iy += *incy;
+ y[iy] += temp1 * ap[k];
+ temp2 += ap[k] * x[ix];
+/* L110: */
+ }
+ y[jy] += *alpha * temp2;
+ jx += *incx;
+ jy += *incy;
+ kk += *n - j + 1;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DSPMV . */
+
+} /* dspmv_ */
diff --git a/contrib/libs/cblas/dspr.c b/contrib/libs/cblas/dspr.c
new file mode 100644
index 0000000000..7aa62e42be
--- /dev/null
+++ b/contrib/libs/cblas/dspr.c
@@ -0,0 +1,237 @@
+/* dspr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dspr_(char *uplo, integer *n, doublereal *alpha,
+ doublereal *x, integer *incx, doublereal *ap)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, k, kk, ix, jx, kx, info;
+ doublereal temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSPR performs the symmetric rank 1 operation */
+
+/* A := alpha*x*x' + A, */
+
+/* where alpha is a real scalar, x is an n element vector and A is an */
+/* n by n symmetric matrix, supplied in packed form. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the matrix A is supplied in the packed */
+/* array AP as follows: */
+
+/* UPLO = 'U' or 'u' The upper triangular part of A is */
+/* supplied in AP. */
+
+/* UPLO = 'L' or 'l' The lower triangular part of A is */
+/* supplied in AP. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - DOUBLE PRECISION. */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X - DOUBLE PRECISION array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* AP - DOUBLE PRECISION array of DIMENSION at least */
+/* ( ( n*( n + 1 ) )/2 ). */
+/* Before entry with UPLO = 'U' or 'u', the array AP must */
+/* contain the upper triangular part of the symmetric matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 ) */
+/* and a( 2, 2 ) respectively, and so on. On exit, the array */
+/* AP is overwritten by the upper triangular part of the */
+/* updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the array AP must */
+/* contain the lower triangular part of the symmetric matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 ) */
+/* and a( 3, 1 ) respectively, and so on. On exit, the array */
+/* AP is overwritten by the lower triangular part of the */
+/* updated matrix. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ }
+ if (info != 0) {
+ xerbla_("DSPR ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || *alpha == 0.) {
+ return 0;
+ }
+
+/* Set the start point in X if the increment is not unity. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of the array AP */
+/* are accessed sequentially with one pass through AP. */
+
+ kk = 1;
+ if (lsame_(uplo, "U")) {
+
+/* Form A when upper triangle is stored in AP. */
+
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.) {
+ temp = *alpha * x[j];
+ k = kk;
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ ap[k] += x[i__] * temp;
+ ++k;
+/* L10: */
+ }
+ }
+ kk += j;
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.) {
+ temp = *alpha * x[jx];
+ ix = kx;
+ i__2 = kk + j - 1;
+ for (k = kk; k <= i__2; ++k) {
+ ap[k] += x[ix] * temp;
+ ix += *incx;
+/* L30: */
+ }
+ }
+ jx += *incx;
+ kk += j;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when lower triangle is stored in AP. */
+
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.) {
+ temp = *alpha * x[j];
+ k = kk;
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ap[k] += x[i__] * temp;
+ ++k;
+/* L50: */
+ }
+ }
+ kk = kk + *n - j + 1;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.) {
+ temp = *alpha * x[jx];
+ ix = jx;
+ i__2 = kk + *n - j;
+ for (k = kk; k <= i__2; ++k) {
+ ap[k] += x[ix] * temp;
+ ix += *incx;
+/* L70: */
+ }
+ }
+ jx += *incx;
+ kk = kk + *n - j + 1;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DSPR . */
+
+} /* dspr_ */
diff --git a/contrib/libs/cblas/dspr2.c b/contrib/libs/cblas/dspr2.c
new file mode 100644
index 0000000000..61e753254f
--- /dev/null
+++ b/contrib/libs/cblas/dspr2.c
@@ -0,0 +1,270 @@
+/* dspr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dspr2_(char *uplo, integer *n, doublereal *alpha,
+ doublereal *x, integer *incx, doublereal *y, integer *incy,
+ doublereal *ap)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, k, kk, ix, iy, jx, jy, kx, ky, info;
+ doublereal temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSPR2 performs the symmetric rank 2 operation */
+
+/* A := alpha*x*y' + alpha*y*x' + A, */
+
+/* where alpha is a scalar, x and y are n element vectors and A is an */
+/* n by n symmetric matrix, supplied in packed form. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the matrix A is supplied in the packed */
+/* array AP as follows: */
+
+/* UPLO = 'U' or 'u' The upper triangular part of A is */
+/* supplied in AP. */
+
+/* UPLO = 'L' or 'l' The lower triangular part of A is */
+/* supplied in AP. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - DOUBLE PRECISION. */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X - DOUBLE PRECISION array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* Y - DOUBLE PRECISION array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. */
+/* Unchanged on exit. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+/* AP - DOUBLE PRECISION array of DIMENSION at least */
+/* ( ( n*( n + 1 ) )/2 ). */
+/* Before entry with UPLO = 'U' or 'u', the array AP must */
+/* contain the upper triangular part of the symmetric matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 ) */
+/* and a( 2, 2 ) respectively, and so on. On exit, the array */
+/* AP is overwritten by the upper triangular part of the */
+/* updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the array AP must */
+/* contain the lower triangular part of the symmetric matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 ) */
+/* and a( 3, 1 ) respectively, and so on. On exit, the array */
+/* AP is overwritten by the lower triangular part of the */
+/* updated matrix. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --y;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ }
+ if (info != 0) {
+ xerbla_("DSPR2 ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || *alpha == 0.) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y if the increments are not both */
+/* unity. */
+
+ if (*incx != 1 || *incy != 1) {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+ jx = kx;
+ jy = ky;
+ }
+
+/* Start the operations. In this version the elements of the array AP */
+/* are accessed sequentially with one pass through AP. */
+
+ kk = 1;
+ if (lsame_(uplo, "U")) {
+
+/* Form A when upper triangle is stored in AP. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0. || y[j] != 0.) {
+ temp1 = *alpha * y[j];
+ temp2 = *alpha * x[j];
+ k = kk;
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ ap[k] = ap[k] + x[i__] * temp1 + y[i__] * temp2;
+ ++k;
+/* L10: */
+ }
+ }
+ kk += j;
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0. || y[jy] != 0.) {
+ temp1 = *alpha * y[jy];
+ temp2 = *alpha * x[jx];
+ ix = kx;
+ iy = ky;
+ i__2 = kk + j - 1;
+ for (k = kk; k <= i__2; ++k) {
+ ap[k] = ap[k] + x[ix] * temp1 + y[iy] * temp2;
+ ix += *incx;
+ iy += *incy;
+/* L30: */
+ }
+ }
+ jx += *incx;
+ jy += *incy;
+ kk += j;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when lower triangle is stored in AP. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0. || y[j] != 0.) {
+ temp1 = *alpha * y[j];
+ temp2 = *alpha * x[j];
+ k = kk;
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ap[k] = ap[k] + x[i__] * temp1 + y[i__] * temp2;
+ ++k;
+/* L50: */
+ }
+ }
+ kk = kk + *n - j + 1;
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0. || y[jy] != 0.) {
+ temp1 = *alpha * y[jy];
+ temp2 = *alpha * x[jx];
+ ix = jx;
+ iy = jy;
+ i__2 = kk + *n - j;
+ for (k = kk; k <= i__2; ++k) {
+ ap[k] = ap[k] + x[ix] * temp1 + y[iy] * temp2;
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ }
+ jx += *incx;
+ jy += *incy;
+ kk = kk + *n - j + 1;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DSPR2 . */
+
+} /* dspr2_ */
diff --git a/contrib/libs/cblas/dswap.c b/contrib/libs/cblas/dswap.c
new file mode 100644
index 0000000000..27a303e607
--- /dev/null
+++ b/contrib/libs/cblas/dswap.c
@@ -0,0 +1,114 @@
+/* dswap.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dswap_(integer *n, doublereal *dx, integer *incx,
+ doublereal *dy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ integer i__, m, ix, iy, mp1;
+ doublereal dtemp;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* interchanges two vectors. */
+/* uses unrolled loops for increments equal one. */
+/* jack dongarra, linpack, 3/11/78. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --dy;
+ --dx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* code for unequal increments or equal increments not equal */
+/* to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dtemp = dx[ix];
+ dx[ix] = dy[iy];
+ dy[iy] = dtemp;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+
+
+/* clean-up loop */
+
+L20:
+ m = *n % 3;
+ if (m == 0) {
+ goto L40;
+ }
+ i__1 = m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dtemp = dx[i__];
+ dx[i__] = dy[i__];
+ dy[i__] = dtemp;
+/* L30: */
+ }
+ if (*n < 3) {
+ return 0;
+ }
+L40:
+ mp1 = m + 1;
+ i__1 = *n;
+ for (i__ = mp1; i__ <= i__1; i__ += 3) {
+ dtemp = dx[i__];
+ dx[i__] = dy[i__];
+ dy[i__] = dtemp;
+ dtemp = dx[i__ + 1];
+ dx[i__ + 1] = dy[i__ + 1];
+ dy[i__ + 1] = dtemp;
+ dtemp = dx[i__ + 2];
+ dx[i__ + 2] = dy[i__ + 2];
+ dy[i__ + 2] = dtemp;
+/* L50: */
+ }
+ return 0;
+} /* dswap_ */
diff --git a/contrib/libs/cblas/dsymm.c b/contrib/libs/cblas/dsymm.c
new file mode 100644
index 0000000000..e5dcd5db93
--- /dev/null
+++ b/contrib/libs/cblas/dsymm.c
@@ -0,0 +1,362 @@
+/* dsymm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dsymm_(char *side, char *uplo, integer *m, integer *n,
+ doublereal *alpha, doublereal *a, integer *lda, doublereal *b,
+ integer *ldb, doublereal *beta, doublereal *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3;
+
+ /* Local variables */
+ integer i__, j, k, info;
+ doublereal temp1, temp2;
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYMM performs one of the matrix-matrix operations */
+
+/* C := alpha*A*B + beta*C, */
+
+/* or */
+
+/* C := alpha*B*A + beta*C, */
+
+/* where alpha and beta are scalars, A is a symmetric matrix and B and */
+/* C are m by n matrices. */
+
+/* Arguments */
+/* ========== */
+
+/* SIDE - CHARACTER*1. */
+/* On entry, SIDE specifies whether the symmetric matrix A */
+/* appears on the left or right in the operation as follows: */
+
+/* SIDE = 'L' or 'l' C := alpha*A*B + beta*C, */
+
+/* SIDE = 'R' or 'r' C := alpha*B*A + beta*C, */
+
+/* Unchanged on exit. */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the symmetric matrix A is to be */
+/* referenced as follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of the */
+/* symmetric matrix is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of the */
+/* symmetric matrix is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of the matrix C. */
+/* M must be at least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of the matrix C. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - DOUBLE PRECISION. */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is */
+/* m when SIDE = 'L' or 'l' and is n otherwise. */
+/* Before entry with SIDE = 'L' or 'l', the m by m part of */
+/* the array A must contain the symmetric matrix, such that */
+/* when UPLO = 'U' or 'u', the leading m by m upper triangular */
+/* part of the array A must contain the upper triangular part */
+/* of the symmetric matrix and the strictly lower triangular */
+/* part of A is not referenced, and when UPLO = 'L' or 'l', */
+/* the leading m by m lower triangular part of the array A */
+/* must contain the lower triangular part of the symmetric */
+/* matrix and the strictly upper triangular part of A is not */
+/* referenced. */
+/* Before entry with SIDE = 'R' or 'r', the n by n part of */
+/* the array A must contain the symmetric matrix, such that */
+/* when UPLO = 'U' or 'u', the leading n by n upper triangular */
+/* part of the array A must contain the upper triangular part */
+/* of the symmetric matrix and the strictly lower triangular */
+/* part of A is not referenced, and when UPLO = 'L' or 'l', */
+/* the leading n by n lower triangular part of the array A */
+/* must contain the lower triangular part of the symmetric */
+/* matrix and the strictly upper triangular part of A is not */
+/* referenced. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When SIDE = 'L' or 'l' then */
+/* LDA must be at least max( 1, m ), otherwise LDA must be at */
+/* least max( 1, n ). */
+/* Unchanged on exit. */
+
+/* B - DOUBLE PRECISION array of DIMENSION ( LDB, n ). */
+/* Before entry, the leading m by n part of the array B must */
+/* contain the matrix B. */
+/* Unchanged on exit. */
+
+/* LDB - INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. LDB must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+/* BETA - DOUBLE PRECISION. */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then C need not be set on input. */
+/* Unchanged on exit. */
+
+/* C - DOUBLE PRECISION array of DIMENSION ( LDC, n ). */
+/* Before entry, the leading m by n part of the array C must */
+/* contain the matrix C, except when beta is zero, in which */
+/* case C need not be set on entry. */
+/* On exit, the array C is overwritten by the m by n updated */
+/* matrix. */
+
+/* LDC - INTEGER. */
+/* On entry, LDC specifies the first dimension of C as declared */
+/* in the calling (sub) program. LDC must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Set NROWA as the number of rows of A. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(side, "L")) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ upper = lsame_(uplo, "U");
+
+/* Test the input parameters. */
+
+ info = 0;
+ if (! lsame_(side, "L") && ! lsame_(side, "R")) {
+ info = 1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ info = 2;
+ } else if (*m < 0) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldb < max(1,*m)) {
+ info = 9;
+ } else if (*ldc < max(1,*m)) {
+ info = 12;
+ }
+ if (info != 0) {
+ xerbla_("DSYMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || *alpha == 0. && *beta == 1.) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.) {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(side, "L")) {
+
+/* Form C := alpha*A*B + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp1 = *alpha * b[i__ + j * b_dim1];
+ temp2 = 0.;
+ i__3 = i__ - 1;
+ for (k = 1; k <= i__3; ++k) {
+ c__[k + j * c_dim1] += temp1 * a[k + i__ * a_dim1];
+ temp2 += b[k + j * b_dim1] * a[k + i__ * a_dim1];
+/* L50: */
+ }
+ if (*beta == 0.) {
+ c__[i__ + j * c_dim1] = temp1 * a[i__ + i__ * a_dim1]
+ + *alpha * temp2;
+ } else {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]
+ + temp1 * a[i__ + i__ * a_dim1] + *alpha *
+ temp2;
+ }
+/* L60: */
+ }
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ temp1 = *alpha * b[i__ + j * b_dim1];
+ temp2 = 0.;
+ i__2 = *m;
+ for (k = i__ + 1; k <= i__2; ++k) {
+ c__[k + j * c_dim1] += temp1 * a[k + i__ * a_dim1];
+ temp2 += b[k + j * b_dim1] * a[k + i__ * a_dim1];
+/* L80: */
+ }
+ if (*beta == 0.) {
+ c__[i__ + j * c_dim1] = temp1 * a[i__ + i__ * a_dim1]
+ + *alpha * temp2;
+ } else {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]
+ + temp1 * a[i__ + i__ * a_dim1] + *alpha *
+ temp2;
+ }
+/* L90: */
+ }
+/* L100: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*B*A + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * a[j + j * a_dim1];
+ if (*beta == 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = temp1 * b[i__ + j * b_dim1];
+/* L110: */
+ }
+ } else {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1] +
+ temp1 * b[i__ + j * b_dim1];
+/* L120: */
+ }
+ }
+ i__2 = j - 1;
+ for (k = 1; k <= i__2; ++k) {
+ if (upper) {
+ temp1 = *alpha * a[k + j * a_dim1];
+ } else {
+ temp1 = *alpha * a[j + k * a_dim1];
+ }
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] += temp1 * b[i__ + k * b_dim1];
+/* L130: */
+ }
+/* L140: */
+ }
+ i__2 = *n;
+ for (k = j + 1; k <= i__2; ++k) {
+ if (upper) {
+ temp1 = *alpha * a[j + k * a_dim1];
+ } else {
+ temp1 = *alpha * a[k + j * a_dim1];
+ }
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] += temp1 * b[i__ + k * b_dim1];
+/* L150: */
+ }
+/* L160: */
+ }
+/* L170: */
+ }
+ }
+
+ return 0;
+
+/* End of DSYMM . */
+
+} /* dsymm_ */
diff --git a/contrib/libs/cblas/dsymv.c b/contrib/libs/cblas/dsymv.c
new file mode 100644
index 0000000000..a557f1db8c
--- /dev/null
+++ b/contrib/libs/cblas/dsymv.c
@@ -0,0 +1,313 @@
+/* dsymv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dsymv_(char *uplo, integer *n, doublereal *alpha,
+ doublereal *a, integer *lda, doublereal *x, integer *incx, doublereal
+ *beta, doublereal *y, integer *incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ doublereal temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYMV performs the matrix-vector operation */
+
+/* y := alpha*A*x + beta*y, */
+
+/* where alpha and beta are scalars, x and y are n element vectors and */
+/* A is an n by n symmetric matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array A is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of A */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of A */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - DOUBLE PRECISION. */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array A must contain the upper */
+/* triangular part of the symmetric matrix and the strictly */
+/* lower triangular part of A is not referenced. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array A must contain the lower */
+/* triangular part of the symmetric matrix and the strictly */
+/* upper triangular part of A is not referenced. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+/* X - DOUBLE PRECISION array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* BETA - DOUBLE PRECISION. */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then Y need not be set on input. */
+/* Unchanged on exit. */
+
+/* Y - DOUBLE PRECISION array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. On exit, Y is overwritten by the updated */
+/* vector y. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*lda < max(1,*n)) {
+ info = 5;
+ } else if (*incx == 0) {
+ info = 7;
+ } else if (*incy == 0) {
+ info = 10;
+ }
+ if (info != 0) {
+ xerbla_("DSYMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || *alpha == 0. && *beta == 1.) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y. */
+
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through the triangular part */
+/* of A. */
+
+/* First form y := beta*y. */
+
+ if (*beta != 1.) {
+ if (*incy == 1) {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = 0.;
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = *beta * y[i__];
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = 0.;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = *beta * y[iy];
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (*alpha == 0.) {
+ return 0;
+ }
+ if (lsame_(uplo, "U")) {
+
+/* Form y when A is stored in upper triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[j];
+ temp2 = 0.;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ y[i__] += temp1 * a[i__ + j * a_dim1];
+ temp2 += a[i__ + j * a_dim1] * x[i__];
+/* L50: */
+ }
+ y[j] = y[j] + temp1 * a[j + j * a_dim1] + *alpha * temp2;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[jx];
+ temp2 = 0.;
+ ix = kx;
+ iy = ky;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ y[iy] += temp1 * a[i__ + j * a_dim1];
+ temp2 += a[i__ + j * a_dim1] * x[ix];
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ y[jy] = y[jy] + temp1 * a[j + j * a_dim1] + *alpha * temp2;
+ jx += *incx;
+ jy += *incy;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y when A is stored in lower triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[j];
+ temp2 = 0.;
+ y[j] += temp1 * a[j + j * a_dim1];
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ y[i__] += temp1 * a[i__ + j * a_dim1];
+ temp2 += a[i__ + j * a_dim1] * x[i__];
+/* L90: */
+ }
+ y[j] += *alpha * temp2;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[jx];
+ temp2 = 0.;
+ y[jy] += temp1 * a[j + j * a_dim1];
+ ix = jx;
+ iy = jy;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ iy += *incy;
+ y[iy] += temp1 * a[i__ + j * a_dim1];
+ temp2 += a[i__ + j * a_dim1] * x[ix];
+/* L110: */
+ }
+ y[jy] += *alpha * temp2;
+ jx += *incx;
+ jy += *incy;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DSYMV . */
+
+} /* dsymv_ */
diff --git a/contrib/libs/cblas/dsyr.c b/contrib/libs/cblas/dsyr.c
new file mode 100644
index 0000000000..fa15dcdbe5
--- /dev/null
+++ b/contrib/libs/cblas/dsyr.c
@@ -0,0 +1,238 @@
+/* dsyr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dsyr_(char *uplo, integer *n, doublereal *alpha,
+ doublereal *x, integer *incx, doublereal *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, ix, jx, kx, info;
+ doublereal temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYR performs the symmetric rank 1 operation */
+
+/* A := alpha*x*x' + A, */
+
+/* where alpha is a real scalar, x is an n element vector and A is an */
+/* n by n symmetric matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array A is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of A */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of A */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - DOUBLE PRECISION. */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X - DOUBLE PRECISION array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array A must contain the upper */
+/* triangular part of the symmetric matrix and the strictly */
+/* lower triangular part of A is not referenced. On exit, the */
+/* upper triangular part of the array A is overwritten by the */
+/* upper triangular part of the updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array A must contain the lower */
+/* triangular part of the symmetric matrix and the strictly */
+/* upper triangular part of A is not referenced. On exit, the */
+/* lower triangular part of the array A is overwritten by the */
+/* lower triangular part of the updated matrix. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --x;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*lda < max(1,*n)) {
+ info = 7;
+ }
+ if (info != 0) {
+ xerbla_("DSYR ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || *alpha == 0.) {
+ return 0;
+ }
+
+/* Set the start point in X if the increment is not unity. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through the triangular part */
+/* of A. */
+
+ if (lsame_(uplo, "U")) {
+
+/* Form A when A is stored in upper triangle. */
+
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.) {
+ temp = *alpha * x[j];
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] += x[i__] * temp;
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.) {
+ temp = *alpha * x[jx];
+ ix = kx;
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] += x[ix] * temp;
+ ix += *incx;
+/* L30: */
+ }
+ }
+ jx += *incx;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when A is stored in lower triangle. */
+
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.) {
+ temp = *alpha * x[j];
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] += x[i__] * temp;
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.) {
+ temp = *alpha * x[jx];
+ ix = jx;
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] += x[ix] * temp;
+ ix += *incx;
+/* L70: */
+ }
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DSYR . */
+
+} /* dsyr_ */
diff --git a/contrib/libs/cblas/dsyr2.c b/contrib/libs/cblas/dsyr2.c
new file mode 100644
index 0000000000..d9dca76ff6
--- /dev/null
+++ b/contrib/libs/cblas/dsyr2.c
@@ -0,0 +1,275 @@
+/* dsyr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dsyr2_(char *uplo, integer *n, doublereal *alpha,
+ doublereal *x, integer *incx, doublereal *y, integer *incy,
+ doublereal *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ doublereal temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYR2 performs the symmetric rank 2 operation */
+
+/* A := alpha*x*y' + alpha*y*x' + A, */
+
+/* where alpha is a scalar, x and y are n element vectors and A is an n */
+/* by n symmetric matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array A is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of A */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of A */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - DOUBLE PRECISION. */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X - DOUBLE PRECISION array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* Y - DOUBLE PRECISION array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. */
+/* Unchanged on exit. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+/* A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array A must contain the upper */
+/* triangular part of the symmetric matrix and the strictly */
+/* lower triangular part of A is not referenced. On exit, the */
+/* upper triangular part of the array A is overwritten by the */
+/* upper triangular part of the updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array A must contain the lower */
+/* triangular part of the symmetric matrix and the strictly */
+/* upper triangular part of A is not referenced. On exit, the */
+/* lower triangular part of the array A is overwritten by the */
+/* lower triangular part of the updated matrix. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --x;
+ --y;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ } else if (*lda < max(1,*n)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("DSYR2 ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || *alpha == 0.) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y if the increments are not both */
+/* unity. */
+
+ if (*incx != 1 || *incy != 1) {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+ jx = kx;
+ jy = ky;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through the triangular part */
+/* of A. */
+
+ if (lsame_(uplo, "U")) {
+
+/* Form A when A is stored in the upper triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0. || y[j] != 0.) {
+ temp1 = *alpha * y[j];
+ temp2 = *alpha * x[j];
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[i__] *
+ temp1 + y[i__] * temp2;
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0. || y[jy] != 0.) {
+ temp1 = *alpha * y[jy];
+ temp2 = *alpha * x[jx];
+ ix = kx;
+ iy = ky;
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[ix] *
+ temp1 + y[iy] * temp2;
+ ix += *incx;
+ iy += *incy;
+/* L30: */
+ }
+ }
+ jx += *incx;
+ jy += *incy;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when A is stored in the lower triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0. || y[j] != 0.) {
+ temp1 = *alpha * y[j];
+ temp2 = *alpha * x[j];
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[i__] *
+ temp1 + y[i__] * temp2;
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0. || y[jy] != 0.) {
+ temp1 = *alpha * y[jy];
+ temp2 = *alpha * x[jx];
+ ix = jx;
+ iy = jy;
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[ix] *
+ temp1 + y[iy] * temp2;
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ }
+ jx += *incx;
+ jy += *incy;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DSYR2 . */
+
+} /* dsyr2_ */
diff --git a/contrib/libs/cblas/dsyr2k.c b/contrib/libs/cblas/dsyr2k.c
new file mode 100644
index 0000000000..cbd684508a
--- /dev/null
+++ b/contrib/libs/cblas/dsyr2k.c
@@ -0,0 +1,407 @@
+/* dsyr2k.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dsyr2k_(char *uplo, char *trans, integer *n, integer *k,
+ doublereal *alpha, doublereal *a, integer *lda, doublereal *b,
+ integer *ldb, doublereal *beta, doublereal *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3;
+
+ /* Local variables */
+ integer i__, j, l, info;
+ doublereal temp1, temp2;
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYR2K performs one of the symmetric rank 2k operations */
+
+/* C := alpha*A*B' + alpha*B*A' + beta*C, */
+
+/* or */
+
+/* C := alpha*A'*B + alpha*B'*A + beta*C, */
+
+/* where alpha and beta are scalars, C is an n by n symmetric matrix */
+/* and A and B are n by k matrices in the first case and k by n */
+/* matrices in the second case. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array C is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of C */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of C */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' C := alpha*A*B' + alpha*B*A' + */
+/* beta*C. */
+
+/* TRANS = 'T' or 't' C := alpha*A'*B + alpha*B'*A + */
+/* beta*C. */
+
+/* TRANS = 'C' or 'c' C := alpha*A'*B + alpha*B'*A + */
+/* beta*C. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix C. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry with TRANS = 'N' or 'n', K specifies the number */
+/* of columns of the matrices A and B, and on entry with */
+/* TRANS = 'T' or 't' or 'C' or 'c', K specifies the number */
+/* of rows of the matrices A and B. K must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - DOUBLE PRECISION. */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is */
+/* k when TRANS = 'N' or 'n', and is n otherwise. */
+/* Before entry with TRANS = 'N' or 'n', the leading n by k */
+/* part of the array A must contain the matrix A, otherwise */
+/* the leading k by n part of the array A must contain the */
+/* matrix A. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When TRANS = 'N' or 'n' */
+/* then LDA must be at least max( 1, n ), otherwise LDA must */
+/* be at least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* B - DOUBLE PRECISION array of DIMENSION ( LDB, kb ), where kb is */
+/* k when TRANS = 'N' or 'n', and is n otherwise. */
+/* Before entry with TRANS = 'N' or 'n', the leading n by k */
+/* part of the array B must contain the matrix B, otherwise */
+/* the leading k by n part of the array B must contain the */
+/* matrix B. */
+/* Unchanged on exit. */
+
+/* LDB - INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. When TRANS = 'N' or 'n' */
+/* then LDB must be at least max( 1, n ), otherwise LDB must */
+/* be at least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* BETA - DOUBLE PRECISION. */
+/* On entry, BETA specifies the scalar beta. */
+/* Unchanged on exit. */
+
+/* C - DOUBLE PRECISION array of DIMENSION ( LDC, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array C must contain the upper */
+/* triangular part of the symmetric matrix and the strictly */
+/* lower triangular part of C is not referenced. On exit, the */
+/* upper triangular part of the array C is overwritten by the */
+/* upper triangular part of the updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array C must contain the lower */
+/* triangular part of the symmetric matrix and the strictly */
+/* upper triangular part of C is not referenced. On exit, the */
+/* lower triangular part of the array C is overwritten by the */
+/* lower triangular part of the updated matrix. */
+
+/* LDC - INTEGER. */
+/* On entry, LDC specifies the first dimension of C as declared */
+/* in the calling (sub) program. LDC must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(trans, "N")) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! upper && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*k < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldb < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldc < max(1,*n)) {
+ info = 12;
+ }
+ if (info != 0) {
+ xerbla_("DSYR2K", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || (*alpha == 0. || *k == 0) && *beta == 1.) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.) {
+ if (upper) {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form C := alpha*A*B' + alpha*B*A' + C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L90: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L100: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (a[j + l * a_dim1] != 0. || b[j + l * b_dim1] != 0.) {
+ temp1 = *alpha * b[j + l * b_dim1];
+ temp2 = *alpha * a[j + l * a_dim1];
+ i__3 = j;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] = c__[i__ + j * c_dim1] + a[
+ i__ + l * a_dim1] * temp1 + b[i__ + l *
+ b_dim1] * temp2;
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L140: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L150: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (a[j + l * a_dim1] != 0. || b[j + l * b_dim1] != 0.) {
+ temp1 = *alpha * b[j + l * b_dim1];
+ temp2 = *alpha * a[j + l * a_dim1];
+ i__3 = *n;
+ for (i__ = j; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] = c__[i__ + j * c_dim1] + a[
+ i__ + l * a_dim1] * temp1 + b[i__ + l *
+ b_dim1] * temp2;
+/* L160: */
+ }
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*A'*B + alpha*B'*A + C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp1 = 0.;
+ temp2 = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp1 += a[l + i__ * a_dim1] * b[l + j * b_dim1];
+ temp2 += b[l + i__ * b_dim1] * a[l + j * a_dim1];
+/* L190: */
+ }
+ if (*beta == 0.) {
+ c__[i__ + j * c_dim1] = *alpha * temp1 + *alpha *
+ temp2;
+ } else {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]
+ + *alpha * temp1 + *alpha * temp2;
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ temp1 = 0.;
+ temp2 = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp1 += a[l + i__ * a_dim1] * b[l + j * b_dim1];
+ temp2 += b[l + i__ * b_dim1] * a[l + j * a_dim1];
+/* L220: */
+ }
+ if (*beta == 0.) {
+ c__[i__ + j * c_dim1] = *alpha * temp1 + *alpha *
+ temp2;
+ } else {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]
+ + *alpha * temp1 + *alpha * temp2;
+ }
+/* L230: */
+ }
+/* L240: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DSYR2K. */
+
+} /* dsyr2k_ */
diff --git a/contrib/libs/cblas/dsyrk.c b/contrib/libs/cblas/dsyrk.c
new file mode 100644
index 0000000000..393880daaa
--- /dev/null
+++ b/contrib/libs/cblas/dsyrk.c
@@ -0,0 +1,372 @@
+/* dsyrk.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dsyrk_(char *uplo, char *trans, integer *n, integer *k,
+ doublereal *alpha, doublereal *a, integer *lda, doublereal *beta,
+ doublereal *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, l, info;
+ doublereal temp;
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYRK performs one of the symmetric rank k operations */
+
+/* C := alpha*A*A' + beta*C, */
+
+/* or */
+
+/* C := alpha*A'*A + beta*C, */
+
+/* where alpha and beta are scalars, C is an n by n symmetric matrix */
+/* and A is an n by k matrix in the first case and a k by n matrix */
+/* in the second case. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array C is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of C */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of C */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' C := alpha*A*A' + beta*C. */
+
+/* TRANS = 'T' or 't' C := alpha*A'*A + beta*C. */
+
+/* TRANS = 'C' or 'c' C := alpha*A'*A + beta*C. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix C. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry with TRANS = 'N' or 'n', K specifies the number */
+/* of columns of the matrix A, and on entry with */
+/* TRANS = 'T' or 't' or 'C' or 'c', K specifies the number */
+/* of rows of the matrix A. K must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - DOUBLE PRECISION. */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is */
+/* k when TRANS = 'N' or 'n', and is n otherwise. */
+/* Before entry with TRANS = 'N' or 'n', the leading n by k */
+/* part of the array A must contain the matrix A, otherwise */
+/* the leading k by n part of the array A must contain the */
+/* matrix A. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When TRANS = 'N' or 'n' */
+/* then LDA must be at least max( 1, n ), otherwise LDA must */
+/* be at least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* BETA - DOUBLE PRECISION. */
+/* On entry, BETA specifies the scalar beta. */
+/* Unchanged on exit. */
+
+/* C - DOUBLE PRECISION array of DIMENSION ( LDC, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array C must contain the upper */
+/* triangular part of the symmetric matrix and the strictly */
+/* lower triangular part of C is not referenced. On exit, the */
+/* upper triangular part of the array C is overwritten by the */
+/* upper triangular part of the updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array C must contain the lower */
+/* triangular part of the symmetric matrix and the strictly */
+/* upper triangular part of C is not referenced. On exit, the */
+/* lower triangular part of the array C is overwritten by the */
+/* lower triangular part of the updated matrix. */
+
+/* LDC - INTEGER. */
+/* On entry, LDC specifies the first dimension of C as declared */
+/* in the calling (sub) program. LDC must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(trans, "N")) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! upper && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*k < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldc < max(1,*n)) {
+ info = 10;
+ }
+ if (info != 0) {
+ xerbla_("DSYRK ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || (*alpha == 0. || *k == 0) && *beta == 1.) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.) {
+ if (upper) {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form C := alpha*A*A' + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L90: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L100: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (a[j + l * a_dim1] != 0.) {
+ temp = *alpha * a[j + l * a_dim1];
+ i__3 = j;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] += temp * a[i__ + l *
+ a_dim1];
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L140: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L150: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (a[j + l * a_dim1] != 0.) {
+ temp = *alpha * a[j + l * a_dim1];
+ i__3 = *n;
+ for (i__ = j; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] += temp * a[i__ + l *
+ a_dim1];
+/* L160: */
+ }
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*A'*A + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp += a[l + i__ * a_dim1] * a[l + j * a_dim1];
+/* L190: */
+ }
+ if (*beta == 0.) {
+ c__[i__ + j * c_dim1] = *alpha * temp;
+ } else {
+ c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[
+ i__ + j * c_dim1];
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ temp = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp += a[l + i__ * a_dim1] * a[l + j * a_dim1];
+/* L220: */
+ }
+ if (*beta == 0.) {
+ c__[i__ + j * c_dim1] = *alpha * temp;
+ } else {
+ c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[
+ i__ + j * c_dim1];
+ }
+/* L230: */
+ }
+/* L240: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DSYRK . */
+
+} /* dsyrk_ */
diff --git a/contrib/libs/cblas/dtbmv.c b/contrib/libs/cblas/dtbmv.c
new file mode 100644
index 0000000000..2095d82a7a
--- /dev/null
+++ b/contrib/libs/cblas/dtbmv.c
@@ -0,0 +1,422 @@
+/* dtbmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dtbmv_(char *uplo, char *trans, char *diag, integer *n,
+ integer *k, doublereal *a, integer *lda, doublereal *x, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, l, ix, jx, kx, info;
+ doublereal temp;
+ extern logical lsame_(char *, char *);
+ integer kplus1;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTBMV performs one of the matrix-vector operations */
+
+/* x := A*x, or x := A'*x, */
+
+/* where x is an n element vector and A is an n by n unit, or non-unit, */
+/* upper or lower triangular band matrix, with ( k + 1 ) diagonals. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' x := A*x. */
+
+/* TRANS = 'T' or 't' x := A'*x. */
+
+/* TRANS = 'C' or 'c' x := A'*x. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry with UPLO = 'U' or 'u', K specifies the number of */
+/* super-diagonals of the matrix A. */
+/* On entry with UPLO = 'L' or 'l', K specifies the number of */
+/* sub-diagonals of the matrix A. */
+/* K must satisfy 0 .le. K. */
+/* Unchanged on exit. */
+
+/* A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) */
+/* by n part of the array A must contain the upper triangular */
+/* band part of the matrix of coefficients, supplied column by */
+/* column, with the leading diagonal of the matrix in row */
+/* ( k + 1 ) of the array, the first super-diagonal starting at */
+/* position 2 in row k, and so on. The top left k by k triangle */
+/* of the array A is not referenced. */
+/* The following program segment will transfer an upper */
+/* triangular band matrix from conventional full matrix storage */
+/* to band storage: */
+
+/* DO 20, J = 1, N */
+/* M = K + 1 - J */
+/* DO 10, I = MAX( 1, J - K ), J */
+/* A( M + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) */
+/* by n part of the array A must contain the lower triangular */
+/* band part of the matrix of coefficients, supplied column by */
+/* column, with the leading diagonal of the matrix in row 1 of */
+/* the array, the first sub-diagonal starting at position 1 in */
+/* row 2, and so on. The bottom right k by k triangle of the */
+/* array A is not referenced. */
+/* The following program segment will transfer a lower */
+/* triangular band matrix from conventional full matrix storage */
+/* to band storage: */
+
+/* DO 20, J = 1, N */
+/* M = 1 - J */
+/* DO 10, I = J, MIN( N, J + K ) */
+/* A( M + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Note that when DIAG = 'U' or 'u' the elements of the array A */
+/* corresponding to the diagonal elements of the matrix are not */
+/* referenced, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* ( k + 1 ). */
+/* Unchanged on exit. */
+
+/* X - DOUBLE PRECISION array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. On exit, X is overwritten with the */
+/* tranformed vector x. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*k < 0) {
+ info = 5;
+ } else if (*lda < *k + 1) {
+ info = 7;
+ } else if (*incx == 0) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("DTBMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ nounit = lsame_(diag, "N");
+
+/* Set up the start point in X if the increment is not unity. This */
+/* will be ( N - 1 )*INCX too small for descending loops. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through A. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := A*x. */
+
+ if (lsame_(uplo, "U")) {
+ kplus1 = *k + 1;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.) {
+ temp = x[j];
+ l = kplus1 - j;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__4 = j - 1;
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ x[i__] += temp * a[l + i__ + j * a_dim1];
+/* L10: */
+ }
+ if (nounit) {
+ x[j] *= a[kplus1 + j * a_dim1];
+ }
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.) {
+ temp = x[jx];
+ ix = kx;
+ l = kplus1 - j;
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *k;
+ i__3 = j - 1;
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ x[ix] += temp * a[l + i__ + j * a_dim1];
+ ix += *incx;
+/* L30: */
+ }
+ if (nounit) {
+ x[jx] *= a[kplus1 + j * a_dim1];
+ }
+ }
+ jx += *incx;
+ if (j > *k) {
+ kx += *incx;
+ }
+/* L40: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ if (x[j] != 0.) {
+ temp = x[j];
+ l = 1 - j;
+/* Computing MIN */
+ i__1 = *n, i__3 = j + *k;
+ i__4 = j + 1;
+ for (i__ = min(i__1,i__3); i__ >= i__4; --i__) {
+ x[i__] += temp * a[l + i__ + j * a_dim1];
+/* L50: */
+ }
+ if (nounit) {
+ x[j] *= a[j * a_dim1 + 1];
+ }
+ }
+/* L60: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ if (x[jx] != 0.) {
+ temp = x[jx];
+ ix = kx;
+ l = 1 - j;
+/* Computing MIN */
+ i__4 = *n, i__1 = j + *k;
+ i__3 = j + 1;
+ for (i__ = min(i__4,i__1); i__ >= i__3; --i__) {
+ x[ix] += temp * a[l + i__ + j * a_dim1];
+ ix -= *incx;
+/* L70: */
+ }
+ if (nounit) {
+ x[jx] *= a[j * a_dim1 + 1];
+ }
+ }
+ jx -= *incx;
+ if (*n - j >= *k) {
+ kx -= *incx;
+ }
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := A'*x. */
+
+ if (lsame_(uplo, "U")) {
+ kplus1 = *k + 1;
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ temp = x[j];
+ l = kplus1 - j;
+ if (nounit) {
+ temp *= a[kplus1 + j * a_dim1];
+ }
+/* Computing MAX */
+ i__4 = 1, i__1 = j - *k;
+ i__3 = max(i__4,i__1);
+ for (i__ = j - 1; i__ >= i__3; --i__) {
+ temp += a[l + i__ + j * a_dim1] * x[i__];
+/* L90: */
+ }
+ x[j] = temp;
+/* L100: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ temp = x[jx];
+ kx -= *incx;
+ ix = kx;
+ l = kplus1 - j;
+ if (nounit) {
+ temp *= a[kplus1 + j * a_dim1];
+ }
+/* Computing MAX */
+ i__4 = 1, i__1 = j - *k;
+ i__3 = max(i__4,i__1);
+ for (i__ = j - 1; i__ >= i__3; --i__) {
+ temp += a[l + i__ + j * a_dim1] * x[ix];
+ ix -= *incx;
+/* L110: */
+ }
+ x[jx] = temp;
+ jx -= *incx;
+/* L120: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ i__3 = *n;
+ for (j = 1; j <= i__3; ++j) {
+ temp = x[j];
+ l = 1 - j;
+ if (nounit) {
+ temp *= a[j * a_dim1 + 1];
+ }
+/* Computing MIN */
+ i__1 = *n, i__2 = j + *k;
+ i__4 = min(i__1,i__2);
+ for (i__ = j + 1; i__ <= i__4; ++i__) {
+ temp += a[l + i__ + j * a_dim1] * x[i__];
+/* L130: */
+ }
+ x[j] = temp;
+/* L140: */
+ }
+ } else {
+ jx = kx;
+ i__3 = *n;
+ for (j = 1; j <= i__3; ++j) {
+ temp = x[jx];
+ kx += *incx;
+ ix = kx;
+ l = 1 - j;
+ if (nounit) {
+ temp *= a[j * a_dim1 + 1];
+ }
+/* Computing MIN */
+ i__1 = *n, i__2 = j + *k;
+ i__4 = min(i__1,i__2);
+ for (i__ = j + 1; i__ <= i__4; ++i__) {
+ temp += a[l + i__ + j * a_dim1] * x[ix];
+ ix += *incx;
+/* L150: */
+ }
+ x[jx] = temp;
+ jx += *incx;
+/* L160: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DTBMV . */
+
+} /* dtbmv_ */
diff --git a/contrib/libs/cblas/dtbsv.c b/contrib/libs/cblas/dtbsv.c
new file mode 100644
index 0000000000..dd40400a49
--- /dev/null
+++ b/contrib/libs/cblas/dtbsv.c
@@ -0,0 +1,426 @@
+/* dtbsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dtbsv_(char *uplo, char *trans, char *diag, integer *n,
+ integer *k, doublereal *a, integer *lda, doublereal *x, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, l, ix, jx, kx, info;
+ doublereal temp;
+ extern logical lsame_(char *, char *);
+ integer kplus1;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTBSV solves one of the systems of equations */
+
+/* A*x = b, or A'*x = b, */
+
+/* where b and x are n element vectors and A is an n by n unit, or */
+/* non-unit, upper or lower triangular band matrix, with ( k + 1 ) */
+/* diagonals. */
+
+/* No test for singularity or near-singularity is included in this */
+/* routine. Such tests must be performed before calling this routine. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the equations to be solved as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' A*x = b. */
+
+/* TRANS = 'T' or 't' A'*x = b. */
+
+/* TRANS = 'C' or 'c' A'*x = b. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry with UPLO = 'U' or 'u', K specifies the number of */
+/* super-diagonals of the matrix A. */
+/* On entry with UPLO = 'L' or 'l', K specifies the number of */
+/* sub-diagonals of the matrix A. */
+/* K must satisfy 0 .le. K. */
+/* Unchanged on exit. */
+
+/* A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) */
+/* by n part of the array A must contain the upper triangular */
+/* band part of the matrix of coefficients, supplied column by */
+/* column, with the leading diagonal of the matrix in row */
+/* ( k + 1 ) of the array, the first super-diagonal starting at */
+/* position 2 in row k, and so on. The top left k by k triangle */
+/* of the array A is not referenced. */
+/* The following program segment will transfer an upper */
+/* triangular band matrix from conventional full matrix storage */
+/* to band storage: */
+
+/* DO 20, J = 1, N */
+/* M = K + 1 - J */
+/* DO 10, I = MAX( 1, J - K ), J */
+/* A( M + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) */
+/* by n part of the array A must contain the lower triangular */
+/* band part of the matrix of coefficients, supplied column by */
+/* column, with the leading diagonal of the matrix in row 1 of */
+/* the array, the first sub-diagonal starting at position 1 in */
+/* row 2, and so on. The bottom right k by k triangle of the */
+/* array A is not referenced. */
+/* The following program segment will transfer a lower */
+/* triangular band matrix from conventional full matrix storage */
+/* to band storage: */
+
+/* DO 20, J = 1, N */
+/* M = 1 - J */
+/* DO 10, I = J, MIN( N, J + K ) */
+/* A( M + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Note that when DIAG = 'U' or 'u' the elements of the array A */
+/* corresponding to the diagonal elements of the matrix are not */
+/* referenced, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* ( k + 1 ). */
+/* Unchanged on exit. */
+
+/* X - DOUBLE PRECISION array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element right-hand side vector b. On exit, X is overwritten */
+/* with the solution vector x. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*k < 0) {
+ info = 5;
+ } else if (*lda < *k + 1) {
+ info = 7;
+ } else if (*incx == 0) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("DTBSV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ nounit = lsame_(diag, "N");
+
+/* Set up the start point in X if the increment is not unity. This */
+/* will be ( N - 1 )*INCX too small for descending loops. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed by sequentially with one pass through A. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := inv( A )*x. */
+
+ if (lsame_(uplo, "U")) {
+ kplus1 = *k + 1;
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ if (x[j] != 0.) {
+ l = kplus1 - j;
+ if (nounit) {
+ x[j] /= a[kplus1 + j * a_dim1];
+ }
+ temp = x[j];
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__1 = max(i__2,i__3);
+ for (i__ = j - 1; i__ >= i__1; --i__) {
+ x[i__] -= temp * a[l + i__ + j * a_dim1];
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ kx -= *incx;
+ if (x[jx] != 0.) {
+ ix = kx;
+ l = kplus1 - j;
+ if (nounit) {
+ x[jx] /= a[kplus1 + j * a_dim1];
+ }
+ temp = x[jx];
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__1 = max(i__2,i__3);
+ for (i__ = j - 1; i__ >= i__1; --i__) {
+ x[ix] -= temp * a[l + i__ + j * a_dim1];
+ ix -= *incx;
+/* L30: */
+ }
+ }
+ jx -= *incx;
+/* L40: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.) {
+ l = 1 - j;
+ if (nounit) {
+ x[j] /= a[j * a_dim1 + 1];
+ }
+ temp = x[j];
+/* Computing MIN */
+ i__3 = *n, i__4 = j + *k;
+ i__2 = min(i__3,i__4);
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ x[i__] -= temp * a[l + i__ + j * a_dim1];
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ kx += *incx;
+ if (x[jx] != 0.) {
+ ix = kx;
+ l = 1 - j;
+ if (nounit) {
+ x[jx] /= a[j * a_dim1 + 1];
+ }
+ temp = x[jx];
+/* Computing MIN */
+ i__3 = *n, i__4 = j + *k;
+ i__2 = min(i__3,i__4);
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ x[ix] -= temp * a[l + i__ + j * a_dim1];
+ ix += *incx;
+/* L70: */
+ }
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := inv( A')*x. */
+
+ if (lsame_(uplo, "U")) {
+ kplus1 = *k + 1;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[j];
+ l = kplus1 - j;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__4 = j - 1;
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ temp -= a[l + i__ + j * a_dim1] * x[i__];
+/* L90: */
+ }
+ if (nounit) {
+ temp /= a[kplus1 + j * a_dim1];
+ }
+ x[j] = temp;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[jx];
+ ix = kx;
+ l = kplus1 - j;
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *k;
+ i__3 = j - 1;
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ temp -= a[l + i__ + j * a_dim1] * x[ix];
+ ix += *incx;
+/* L110: */
+ }
+ if (nounit) {
+ temp /= a[kplus1 + j * a_dim1];
+ }
+ x[jx] = temp;
+ jx += *incx;
+ if (j > *k) {
+ kx += *incx;
+ }
+/* L120: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ temp = x[j];
+ l = 1 - j;
+/* Computing MIN */
+ i__1 = *n, i__3 = j + *k;
+ i__4 = j + 1;
+ for (i__ = min(i__1,i__3); i__ >= i__4; --i__) {
+ temp -= a[l + i__ + j * a_dim1] * x[i__];
+/* L130: */
+ }
+ if (nounit) {
+ temp /= a[j * a_dim1 + 1];
+ }
+ x[j] = temp;
+/* L140: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ temp = x[jx];
+ ix = kx;
+ l = 1 - j;
+/* Computing MIN */
+ i__4 = *n, i__1 = j + *k;
+ i__3 = j + 1;
+ for (i__ = min(i__4,i__1); i__ >= i__3; --i__) {
+ temp -= a[l + i__ + j * a_dim1] * x[ix];
+ ix -= *incx;
+/* L150: */
+ }
+ if (nounit) {
+ temp /= a[j * a_dim1 + 1];
+ }
+ x[jx] = temp;
+ jx -= *incx;
+ if (*n - j >= *k) {
+ kx -= *incx;
+ }
+/* L160: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DTBSV . */
+
+} /* dtbsv_ */
diff --git a/contrib/libs/cblas/dtpmv.c b/contrib/libs/cblas/dtpmv.c
new file mode 100644
index 0000000000..4bd5e70b46
--- /dev/null
+++ b/contrib/libs/cblas/dtpmv.c
@@ -0,0 +1,357 @@
+/* dtpmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dtpmv_(char *uplo, char *trans, char *diag, integer *n,
+ doublereal *ap, doublereal *x, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, k, kk, ix, jx, kx, info;
+ doublereal temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTPMV performs one of the matrix-vector operations */
+
+/* x := A*x, or x := A'*x, */
+
+/* where x is an n element vector and A is an n by n unit, or non-unit, */
+/* upper or lower triangular matrix, supplied in packed form. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' x := A*x. */
+
+/* TRANS = 'T' or 't' x := A'*x. */
+
+/* TRANS = 'C' or 'c' x := A'*x. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* AP - DOUBLE PRECISION array of DIMENSION at least */
+/* ( ( n*( n + 1 ) )/2 ). */
+/* Before entry with UPLO = 'U' or 'u', the array AP must */
+/* contain the upper triangular matrix packed sequentially, */
+/* column by column, so that AP( 1 ) contains a( 1, 1 ), */
+/* AP( 2 ) and AP( 3 ) contain a( 1, 2 ) and a( 2, 2 ) */
+/* respectively, and so on. */
+/* Before entry with UPLO = 'L' or 'l', the array AP must */
+/* contain the lower triangular matrix packed sequentially, */
+/* column by column, so that AP( 1 ) contains a( 1, 1 ), */
+/* AP( 2 ) and AP( 3 ) contain a( 2, 1 ) and a( 3, 1 ) */
+/* respectively, and so on. */
+/* Note that when DIAG = 'U' or 'u', the diagonal elements of */
+/* A are not referenced, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* X - DOUBLE PRECISION array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. On exit, X is overwritten with the */
+/* tranformed vector x. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --x;
+ --ap;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*incx == 0) {
+ info = 7;
+ }
+ if (info != 0) {
+ xerbla_("DTPMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ nounit = lsame_(diag, "N");
+
+/* Set up the start point in X if the increment is not unity. This */
+/* will be ( N - 1 )*INCX too small for descending loops. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of AP are */
+/* accessed sequentially with one pass through AP. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form x:= A*x. */
+
+ if (lsame_(uplo, "U")) {
+ kk = 1;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.) {
+ temp = x[j];
+ k = kk;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[i__] += temp * ap[k];
+ ++k;
+/* L10: */
+ }
+ if (nounit) {
+ x[j] *= ap[kk + j - 1];
+ }
+ }
+ kk += j;
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.) {
+ temp = x[jx];
+ ix = kx;
+ i__2 = kk + j - 2;
+ for (k = kk; k <= i__2; ++k) {
+ x[ix] += temp * ap[k];
+ ix += *incx;
+/* L30: */
+ }
+ if (nounit) {
+ x[jx] *= ap[kk + j - 1];
+ }
+ }
+ jx += *incx;
+ kk += j;
+/* L40: */
+ }
+ }
+ } else {
+ kk = *n * (*n + 1) / 2;
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ if (x[j] != 0.) {
+ temp = x[j];
+ k = kk;
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ x[i__] += temp * ap[k];
+ --k;
+/* L50: */
+ }
+ if (nounit) {
+ x[j] *= ap[kk - *n + j];
+ }
+ }
+ kk -= *n - j + 1;
+/* L60: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ if (x[jx] != 0.) {
+ temp = x[jx];
+ ix = kx;
+ i__1 = kk - (*n - (j + 1));
+ for (k = kk; k >= i__1; --k) {
+ x[ix] += temp * ap[k];
+ ix -= *incx;
+/* L70: */
+ }
+ if (nounit) {
+ x[jx] *= ap[kk - *n + j];
+ }
+ }
+ jx -= *incx;
+ kk -= *n - j + 1;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := A'*x. */
+
+ if (lsame_(uplo, "U")) {
+ kk = *n * (*n + 1) / 2;
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ temp = x[j];
+ if (nounit) {
+ temp *= ap[kk];
+ }
+ k = kk - 1;
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ temp += ap[k] * x[i__];
+ --k;
+/* L90: */
+ }
+ x[j] = temp;
+ kk -= j;
+/* L100: */
+ }
+ } else {
+ jx = kx + (*n - 1) * *incx;
+ for (j = *n; j >= 1; --j) {
+ temp = x[jx];
+ ix = jx;
+ if (nounit) {
+ temp *= ap[kk];
+ }
+ i__1 = kk - j + 1;
+ for (k = kk - 1; k >= i__1; --k) {
+ ix -= *incx;
+ temp += ap[k] * x[ix];
+/* L110: */
+ }
+ x[jx] = temp;
+ jx -= *incx;
+ kk -= j;
+/* L120: */
+ }
+ }
+ } else {
+ kk = 1;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[j];
+ if (nounit) {
+ temp *= ap[kk];
+ }
+ k = kk + 1;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ temp += ap[k] * x[i__];
+ ++k;
+/* L130: */
+ }
+ x[j] = temp;
+ kk += *n - j + 1;
+/* L140: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[jx];
+ ix = jx;
+ if (nounit) {
+ temp *= ap[kk];
+ }
+ i__2 = kk + *n - j;
+ for (k = kk + 1; k <= i__2; ++k) {
+ ix += *incx;
+ temp += ap[k] * x[ix];
+/* L150: */
+ }
+ x[jx] = temp;
+ jx += *incx;
+ kk += *n - j + 1;
+/* L160: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DTPMV . */
+
+} /* dtpmv_ */
diff --git a/contrib/libs/cblas/dtpsv.c b/contrib/libs/cblas/dtpsv.c
new file mode 100644
index 0000000000..525424560f
--- /dev/null
+++ b/contrib/libs/cblas/dtpsv.c
@@ -0,0 +1,360 @@
+/* dtpsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dtpsv_(char *uplo, char *trans, char *diag, integer *n,
+ doublereal *ap, doublereal *x, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, k, kk, ix, jx, kx, info;
+ doublereal temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTPSV solves one of the systems of equations */
+
+/* A*x = b, or A'*x = b, */
+
+/* where b and x are n element vectors and A is an n by n unit, or */
+/* non-unit, upper or lower triangular matrix, supplied in packed form. */
+
+/* No test for singularity or near-singularity is included in this */
+/* routine. Such tests must be performed before calling this routine. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the equations to be solved as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' A*x = b. */
+
+/* TRANS = 'T' or 't' A'*x = b. */
+
+/* TRANS = 'C' or 'c' A'*x = b. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* AP - DOUBLE PRECISION array of DIMENSION at least */
+/* ( ( n*( n + 1 ) )/2 ). */
+/* Before entry with UPLO = 'U' or 'u', the array AP must */
+/* contain the upper triangular matrix packed sequentially, */
+/* column by column, so that AP( 1 ) contains a( 1, 1 ), */
+/* AP( 2 ) and AP( 3 ) contain a( 1, 2 ) and a( 2, 2 ) */
+/* respectively, and so on. */
+/* Before entry with UPLO = 'L' or 'l', the array AP must */
+/* contain the lower triangular matrix packed sequentially, */
+/* column by column, so that AP( 1 ) contains a( 1, 1 ), */
+/* AP( 2 ) and AP( 3 ) contain a( 2, 1 ) and a( 3, 1 ) */
+/* respectively, and so on. */
+/* Note that when DIAG = 'U' or 'u', the diagonal elements of */
+/* A are not referenced, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* X - DOUBLE PRECISION array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element right-hand side vector b. On exit, X is overwritten */
+/* with the solution vector x. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --x;
+ --ap;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*incx == 0) {
+ info = 7;
+ }
+ if (info != 0) {
+ xerbla_("DTPSV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ nounit = lsame_(diag, "N");
+
+/* Set up the start point in X if the increment is not unity. This */
+/* will be ( N - 1 )*INCX too small for descending loops. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of AP are */
+/* accessed sequentially with one pass through AP. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := inv( A )*x. */
+
+ if (lsame_(uplo, "U")) {
+ kk = *n * (*n + 1) / 2;
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ if (x[j] != 0.) {
+ if (nounit) {
+ x[j] /= ap[kk];
+ }
+ temp = x[j];
+ k = kk - 1;
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ x[i__] -= temp * ap[k];
+ --k;
+/* L10: */
+ }
+ }
+ kk -= j;
+/* L20: */
+ }
+ } else {
+ jx = kx + (*n - 1) * *incx;
+ for (j = *n; j >= 1; --j) {
+ if (x[jx] != 0.) {
+ if (nounit) {
+ x[jx] /= ap[kk];
+ }
+ temp = x[jx];
+ ix = jx;
+ i__1 = kk - j + 1;
+ for (k = kk - 1; k >= i__1; --k) {
+ ix -= *incx;
+ x[ix] -= temp * ap[k];
+/* L30: */
+ }
+ }
+ jx -= *incx;
+ kk -= j;
+/* L40: */
+ }
+ }
+ } else {
+ kk = 1;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.) {
+ if (nounit) {
+ x[j] /= ap[kk];
+ }
+ temp = x[j];
+ k = kk + 1;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ x[i__] -= temp * ap[k];
+ ++k;
+/* L50: */
+ }
+ }
+ kk += *n - j + 1;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.) {
+ if (nounit) {
+ x[jx] /= ap[kk];
+ }
+ temp = x[jx];
+ ix = jx;
+ i__2 = kk + *n - j;
+ for (k = kk + 1; k <= i__2; ++k) {
+ ix += *incx;
+ x[ix] -= temp * ap[k];
+/* L70: */
+ }
+ }
+ jx += *incx;
+ kk += *n - j + 1;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := inv( A' )*x. */
+
+ if (lsame_(uplo, "U")) {
+ kk = 1;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[j];
+ k = kk;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp -= ap[k] * x[i__];
+ ++k;
+/* L90: */
+ }
+ if (nounit) {
+ temp /= ap[kk + j - 1];
+ }
+ x[j] = temp;
+ kk += j;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[jx];
+ ix = kx;
+ i__2 = kk + j - 2;
+ for (k = kk; k <= i__2; ++k) {
+ temp -= ap[k] * x[ix];
+ ix += *incx;
+/* L110: */
+ }
+ if (nounit) {
+ temp /= ap[kk + j - 1];
+ }
+ x[jx] = temp;
+ jx += *incx;
+ kk += j;
+/* L120: */
+ }
+ }
+ } else {
+ kk = *n * (*n + 1) / 2;
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ temp = x[j];
+ k = kk;
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ temp -= ap[k] * x[i__];
+ --k;
+/* L130: */
+ }
+ if (nounit) {
+ temp /= ap[kk - *n + j];
+ }
+ x[j] = temp;
+ kk -= *n - j + 1;
+/* L140: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ temp = x[jx];
+ ix = kx;
+ i__1 = kk - (*n - (j + 1));
+ for (k = kk; k >= i__1; --k) {
+ temp -= ap[k] * x[ix];
+ ix -= *incx;
+/* L150: */
+ }
+ if (nounit) {
+ temp /= ap[kk - *n + j];
+ }
+ x[jx] = temp;
+ jx -= *incx;
+ kk -= *n - j + 1;
+/* L160: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DTPSV . */
+
+} /* dtpsv_ */
diff --git a/contrib/libs/cblas/dtrmm.c b/contrib/libs/cblas/dtrmm.c
new file mode 100644
index 0000000000..4bf834f550
--- /dev/null
+++ b/contrib/libs/cblas/dtrmm.c
@@ -0,0 +1,453 @@
+/* dtrmm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dtrmm_(char *side, char *uplo, char *transa, char *diag,
+ integer *m, integer *n, doublereal *alpha, doublereal *a, integer *
+ lda, doublereal *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, k, info;
+ doublereal temp;
+ logical lside;
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTRMM performs one of the matrix-matrix operations */
+
+/* B := alpha*op( A )*B, or B := alpha*B*op( A ), */
+
+/* where alpha is a scalar, B is an m by n matrix, A is a unit, or */
+/* non-unit, upper or lower triangular matrix and op( A ) is one of */
+
+/* op( A ) = A or op( A ) = A'. */
+
+/* Arguments */
+/* ========== */
+
+/* SIDE - CHARACTER*1. */
+/* On entry, SIDE specifies whether op( A ) multiplies B from */
+/* the left or right as follows: */
+
+/* SIDE = 'L' or 'l' B := alpha*op( A )*B. */
+
+/* SIDE = 'R' or 'r' B := alpha*B*op( A ). */
+
+/* Unchanged on exit. */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix A is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANSA - CHARACTER*1. */
+/* On entry, TRANSA specifies the form of op( A ) to be used in */
+/* the matrix multiplication as follows: */
+
+/* TRANSA = 'N' or 'n' op( A ) = A. */
+
+/* TRANSA = 'T' or 't' op( A ) = A'. */
+
+/* TRANSA = 'C' or 'c' op( A ) = A'. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit triangular */
+/* as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of B. M must be at */
+/* least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of B. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - DOUBLE PRECISION. */
+/* On entry, ALPHA specifies the scalar alpha. When alpha is */
+/* zero then A is not referenced and B need not be set before */
+/* entry. */
+/* Unchanged on exit. */
+
+/* A - DOUBLE PRECISION array of DIMENSION ( LDA, k ), where k is m */
+/* when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'. */
+/* Before entry with UPLO = 'U' or 'u', the leading k by k */
+/* upper triangular part of the array A must contain the upper */
+/* triangular matrix and the strictly lower triangular part of */
+/* A is not referenced. */
+/* Before entry with UPLO = 'L' or 'l', the leading k by k */
+/* lower triangular part of the array A must contain the lower */
+/* triangular matrix and the strictly upper triangular part of */
+/* A is not referenced. */
+/* Note that when DIAG = 'U' or 'u', the diagonal elements of */
+/* A are not referenced either, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When SIDE = 'L' or 'l' then */
+/* LDA must be at least max( 1, m ), when SIDE = 'R' or 'r' */
+/* then LDA must be at least max( 1, n ). */
+/* Unchanged on exit. */
+
+/* B - DOUBLE PRECISION array of DIMENSION ( LDB, n ). */
+/* Before entry, the leading m by n part of the array B must */
+/* contain the matrix B, and on exit is overwritten by the */
+/* transformed matrix. */
+
+/* LDB - INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. LDB must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ lside = lsame_(side, "L");
+ if (lside) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ nounit = lsame_(diag, "N");
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! lside && ! lsame_(side, "R")) {
+ info = 1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ info = 2;
+ } else if (! lsame_(transa, "N") && ! lsame_(transa,
+ "T") && ! lsame_(transa, "C")) {
+ info = 3;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 4;
+ } else if (*m < 0) {
+ info = 5;
+ } else if (*n < 0) {
+ info = 6;
+ } else if (*lda < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldb < max(1,*m)) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("DTRMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lside) {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*A*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (k = 1; k <= i__2; ++k) {
+ if (b[k + j * b_dim1] != 0.) {
+ temp = *alpha * b[k + j * b_dim1];
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] += temp * a[i__ + k *
+ a_dim1];
+/* L30: */
+ }
+ if (nounit) {
+ temp *= a[k + k * a_dim1];
+ }
+ b[k + j * b_dim1] = temp;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (k = *m; k >= 1; --k) {
+ if (b[k + j * b_dim1] != 0.) {
+ temp = *alpha * b[k + j * b_dim1];
+ b[k + j * b_dim1] = temp;
+ if (nounit) {
+ b[k + j * b_dim1] *= a[k + k * a_dim1];
+ }
+ i__2 = *m;
+ for (i__ = k + 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] += temp * a[i__ + k *
+ a_dim1];
+/* L60: */
+ }
+ }
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*A'*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ temp = b[i__ + j * b_dim1];
+ if (nounit) {
+ temp *= a[i__ + i__ * a_dim1];
+ }
+ i__2 = i__ - 1;
+ for (k = 1; k <= i__2; ++k) {
+ temp += a[k + i__ * a_dim1] * b[k + j * b_dim1];
+/* L90: */
+ }
+ b[i__ + j * b_dim1] = *alpha * temp;
+/* L100: */
+ }
+/* L110: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = b[i__ + j * b_dim1];
+ if (nounit) {
+ temp *= a[i__ + i__ * a_dim1];
+ }
+ i__3 = *m;
+ for (k = i__ + 1; k <= i__3; ++k) {
+ temp += a[k + i__ * a_dim1] * b[k + j * b_dim1];
+/* L120: */
+ }
+ b[i__ + j * b_dim1] = *alpha * temp;
+/* L130: */
+ }
+/* L140: */
+ }
+ }
+ }
+ } else {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*B*A. */
+
+ if (upper) {
+ for (j = *n; j >= 1; --j) {
+ temp = *alpha;
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1];
+/* L150: */
+ }
+ i__1 = j - 1;
+ for (k = 1; k <= i__1; ++k) {
+ if (a[k + j * a_dim1] != 0.) {
+ temp = *alpha * a[k + j * a_dim1];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] += temp * b[i__ + k *
+ b_dim1];
+/* L160: */
+ }
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = *alpha;
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1];
+/* L190: */
+ }
+ i__2 = *n;
+ for (k = j + 1; k <= i__2; ++k) {
+ if (a[k + j * a_dim1] != 0.) {
+ temp = *alpha * a[k + j * a_dim1];
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] += temp * b[i__ + k *
+ b_dim1];
+/* L200: */
+ }
+ }
+/* L210: */
+ }
+/* L220: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*B*A'. */
+
+ if (upper) {
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k - 1;
+ for (j = 1; j <= i__2; ++j) {
+ if (a[j + k * a_dim1] != 0.) {
+ temp = *alpha * a[j + k * a_dim1];
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] += temp * b[i__ + k *
+ b_dim1];
+/* L230: */
+ }
+ }
+/* L240: */
+ }
+ temp = *alpha;
+ if (nounit) {
+ temp *= a[k + k * a_dim1];
+ }
+ if (temp != 1.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1];
+/* L250: */
+ }
+ }
+/* L260: */
+ }
+ } else {
+ for (k = *n; k >= 1; --k) {
+ i__1 = *n;
+ for (j = k + 1; j <= i__1; ++j) {
+ if (a[j + k * a_dim1] != 0.) {
+ temp = *alpha * a[j + k * a_dim1];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] += temp * b[i__ + k *
+ b_dim1];
+/* L270: */
+ }
+ }
+/* L280: */
+ }
+ temp = *alpha;
+ if (nounit) {
+ temp *= a[k + k * a_dim1];
+ }
+ if (temp != 1.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1];
+/* L290: */
+ }
+ }
+/* L300: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DTRMM . */
+
+} /* dtrmm_ */
diff --git a/contrib/libs/cblas/dtrmv.c b/contrib/libs/cblas/dtrmv.c
new file mode 100644
index 0000000000..3acaa6d24b
--- /dev/null
+++ b/contrib/libs/cblas/dtrmv.c
@@ -0,0 +1,345 @@
+/* dtrmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dtrmv_(char *uplo, char *trans, char *diag, integer *n,
+ doublereal *a, integer *lda, doublereal *x, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, ix, jx, kx, info;
+ doublereal temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTRMV performs one of the matrix-vector operations */
+
+/* x := A*x, or x := A'*x, */
+
+/* where x is an n element vector and A is an n by n unit, or non-unit, */
+/* upper or lower triangular matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' x := A*x. */
+
+/* TRANS = 'T' or 't' x := A'*x. */
+
+/* TRANS = 'C' or 'c' x := A'*x. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array A must contain the upper */
+/* triangular matrix and the strictly lower triangular part of */
+/* A is not referenced. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array A must contain the lower */
+/* triangular matrix and the strictly upper triangular part of */
+/* A is not referenced. */
+/* Note that when DIAG = 'U' or 'u', the diagonal elements of */
+/* A are not referenced either, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+/* X - DOUBLE PRECISION array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. On exit, X is overwritten with the */
+/* tranformed vector x. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*lda < max(1,*n)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ }
+ if (info != 0) {
+ xerbla_("DTRMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ nounit = lsame_(diag, "N");
+
+/* Set up the start point in X if the increment is not unity. This */
+/* will be ( N - 1 )*INCX too small for descending loops. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through A. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := A*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.) {
+ temp = x[j];
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[i__] += temp * a[i__ + j * a_dim1];
+/* L10: */
+ }
+ if (nounit) {
+ x[j] *= a[j + j * a_dim1];
+ }
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.) {
+ temp = x[jx];
+ ix = kx;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[ix] += temp * a[i__ + j * a_dim1];
+ ix += *incx;
+/* L30: */
+ }
+ if (nounit) {
+ x[jx] *= a[j + j * a_dim1];
+ }
+ }
+ jx += *incx;
+/* L40: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ if (x[j] != 0.) {
+ temp = x[j];
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ x[i__] += temp * a[i__ + j * a_dim1];
+/* L50: */
+ }
+ if (nounit) {
+ x[j] *= a[j + j * a_dim1];
+ }
+ }
+/* L60: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ if (x[jx] != 0.) {
+ temp = x[jx];
+ ix = kx;
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ x[ix] += temp * a[i__ + j * a_dim1];
+ ix -= *incx;
+/* L70: */
+ }
+ if (nounit) {
+ x[jx] *= a[j + j * a_dim1];
+ }
+ }
+ jx -= *incx;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := A'*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ temp = x[j];
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ temp += a[i__ + j * a_dim1] * x[i__];
+/* L90: */
+ }
+ x[j] = temp;
+/* L100: */
+ }
+ } else {
+ jx = kx + (*n - 1) * *incx;
+ for (j = *n; j >= 1; --j) {
+ temp = x[jx];
+ ix = jx;
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ ix -= *incx;
+ temp += a[i__ + j * a_dim1] * x[ix];
+/* L110: */
+ }
+ x[jx] = temp;
+ jx -= *incx;
+/* L120: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[j];
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ temp += a[i__ + j * a_dim1] * x[i__];
+/* L130: */
+ }
+ x[j] = temp;
+/* L140: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[jx];
+ ix = jx;
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ temp += a[i__ + j * a_dim1] * x[ix];
+/* L150: */
+ }
+ x[jx] = temp;
+ jx += *incx;
+/* L160: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DTRMV . */
+
+} /* dtrmv_ */
diff --git a/contrib/libs/cblas/dtrsm.c b/contrib/libs/cblas/dtrsm.c
new file mode 100644
index 0000000000..84ee3e7172
--- /dev/null
+++ b/contrib/libs/cblas/dtrsm.c
@@ -0,0 +1,490 @@
+/* dtrsm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dtrsm_(char *side, char *uplo, char *transa, char *diag,
+ integer *m, integer *n, doublereal *alpha, doublereal *a, integer *
+ lda, doublereal *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, k, info;
+ doublereal temp;
+ logical lside;
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTRSM solves one of the matrix equations */
+
+/* op( A )*X = alpha*B, or X*op( A ) = alpha*B, */
+
+/* where alpha is a scalar, X and B are m by n matrices, A is a unit, or */
+/* non-unit, upper or lower triangular matrix and op( A ) is one of */
+
+/* op( A ) = A or op( A ) = A'. */
+
+/* The matrix X is overwritten on B. */
+
+/* Arguments */
+/* ========== */
+
+/* SIDE - CHARACTER*1. */
+/* On entry, SIDE specifies whether op( A ) appears on the left */
+/* or right of X as follows: */
+
+/* SIDE = 'L' or 'l' op( A )*X = alpha*B. */
+
+/* SIDE = 'R' or 'r' X*op( A ) = alpha*B. */
+
+/* Unchanged on exit. */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix A is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANSA - CHARACTER*1. */
+/* On entry, TRANSA specifies the form of op( A ) to be used in */
+/* the matrix multiplication as follows: */
+
+/* TRANSA = 'N' or 'n' op( A ) = A. */
+
+/* TRANSA = 'T' or 't' op( A ) = A'. */
+
+/* TRANSA = 'C' or 'c' op( A ) = A'. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit triangular */
+/* as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of B. M must be at */
+/* least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of B. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - DOUBLE PRECISION. */
+/* On entry, ALPHA specifies the scalar alpha. When alpha is */
+/* zero then A is not referenced and B need not be set before */
+/* entry. */
+/* Unchanged on exit. */
+
+/* A - DOUBLE PRECISION array of DIMENSION ( LDA, k ), where k is m */
+/* when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'. */
+/* Before entry with UPLO = 'U' or 'u', the leading k by k */
+/* upper triangular part of the array A must contain the upper */
+/* triangular matrix and the strictly lower triangular part of */
+/* A is not referenced. */
+/* Before entry with UPLO = 'L' or 'l', the leading k by k */
+/* lower triangular part of the array A must contain the lower */
+/* triangular matrix and the strictly upper triangular part of */
+/* A is not referenced. */
+/* Note that when DIAG = 'U' or 'u', the diagonal elements of */
+/* A are not referenced either, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When SIDE = 'L' or 'l' then */
+/* LDA must be at least max( 1, m ), when SIDE = 'R' or 'r' */
+/* then LDA must be at least max( 1, n ). */
+/* Unchanged on exit. */
+
+/* B - DOUBLE PRECISION array of DIMENSION ( LDB, n ). */
+/* Before entry, the leading m by n part of the array B must */
+/* contain the right-hand side matrix B, and on exit is */
+/* overwritten by the solution matrix X. */
+
+/* LDB - INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. LDB must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ lside = lsame_(side, "L");
+ if (lside) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ nounit = lsame_(diag, "N");
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! lside && ! lsame_(side, "R")) {
+ info = 1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ info = 2;
+ } else if (! lsame_(transa, "N") && ! lsame_(transa,
+ "T") && ! lsame_(transa, "C")) {
+ info = 3;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 4;
+ } else if (*m < 0) {
+ info = 5;
+ } else if (*n < 0) {
+ info = 6;
+ } else if (*lda < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldb < max(1,*m)) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("DTRSM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lside) {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*inv( A )*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*alpha != 1.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1]
+ ;
+/* L30: */
+ }
+ }
+ for (k = *m; k >= 1; --k) {
+ if (b[k + j * b_dim1] != 0.) {
+ if (nounit) {
+ b[k + j * b_dim1] /= a[k + k * a_dim1];
+ }
+ i__2 = k - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] -= b[k + j * b_dim1] * a[
+ i__ + k * a_dim1];
+/* L40: */
+ }
+ }
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*alpha != 1.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1]
+ ;
+/* L70: */
+ }
+ }
+ i__2 = *m;
+ for (k = 1; k <= i__2; ++k) {
+ if (b[k + j * b_dim1] != 0.) {
+ if (nounit) {
+ b[k + j * b_dim1] /= a[k + k * a_dim1];
+ }
+ i__3 = *m;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] -= b[k + j * b_dim1] * a[
+ i__ + k * a_dim1];
+/* L80: */
+ }
+ }
+/* L90: */
+ }
+/* L100: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*inv( A' )*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = *alpha * b[i__ + j * b_dim1];
+ i__3 = i__ - 1;
+ for (k = 1; k <= i__3; ++k) {
+ temp -= a[k + i__ * a_dim1] * b[k + j * b_dim1];
+/* L110: */
+ }
+ if (nounit) {
+ temp /= a[i__ + i__ * a_dim1];
+ }
+ b[i__ + j * b_dim1] = temp;
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ temp = *alpha * b[i__ + j * b_dim1];
+ i__2 = *m;
+ for (k = i__ + 1; k <= i__2; ++k) {
+ temp -= a[k + i__ * a_dim1] * b[k + j * b_dim1];
+/* L140: */
+ }
+ if (nounit) {
+ temp /= a[i__ + i__ * a_dim1];
+ }
+ b[i__ + j * b_dim1] = temp;
+/* L150: */
+ }
+/* L160: */
+ }
+ }
+ }
+ } else {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*B*inv( A ). */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*alpha != 1.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1]
+ ;
+/* L170: */
+ }
+ }
+ i__2 = j - 1;
+ for (k = 1; k <= i__2; ++k) {
+ if (a[k + j * a_dim1] != 0.) {
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] -= a[k + j * a_dim1] * b[
+ i__ + k * b_dim1];
+/* L180: */
+ }
+ }
+/* L190: */
+ }
+ if (nounit) {
+ temp = 1. / a[j + j * a_dim1];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1];
+/* L200: */
+ }
+ }
+/* L210: */
+ }
+ } else {
+ for (j = *n; j >= 1; --j) {
+ if (*alpha != 1.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1]
+ ;
+/* L220: */
+ }
+ }
+ i__1 = *n;
+ for (k = j + 1; k <= i__1; ++k) {
+ if (a[k + j * a_dim1] != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] -= a[k + j * a_dim1] * b[
+ i__ + k * b_dim1];
+/* L230: */
+ }
+ }
+/* L240: */
+ }
+ if (nounit) {
+ temp = 1. / a[j + j * a_dim1];
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1];
+/* L250: */
+ }
+ }
+/* L260: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*B*inv( A' ). */
+
+ if (upper) {
+ for (k = *n; k >= 1; --k) {
+ if (nounit) {
+ temp = 1. / a[k + k * a_dim1];
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1];
+/* L270: */
+ }
+ }
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ if (a[j + k * a_dim1] != 0.) {
+ temp = a[j + k * a_dim1];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] -= temp * b[i__ + k *
+ b_dim1];
+/* L280: */
+ }
+ }
+/* L290: */
+ }
+ if (*alpha != 1.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + k * b_dim1] = *alpha * b[i__ + k * b_dim1]
+ ;
+/* L300: */
+ }
+ }
+/* L310: */
+ }
+ } else {
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (nounit) {
+ temp = 1. / a[k + k * a_dim1];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1];
+/* L320: */
+ }
+ }
+ i__2 = *n;
+ for (j = k + 1; j <= i__2; ++j) {
+ if (a[j + k * a_dim1] != 0.) {
+ temp = a[j + k * a_dim1];
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] -= temp * b[i__ + k *
+ b_dim1];
+/* L330: */
+ }
+ }
+/* L340: */
+ }
+ if (*alpha != 1.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + k * b_dim1] = *alpha * b[i__ + k * b_dim1]
+ ;
+/* L350: */
+ }
+ }
+/* L360: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DTRSM . */
+
+} /* dtrsm_ */
diff --git a/contrib/libs/cblas/dtrsv.c b/contrib/libs/cblas/dtrsv.c
new file mode 100644
index 0000000000..ef9f1b44c7
--- /dev/null
+++ b/contrib/libs/cblas/dtrsv.c
@@ -0,0 +1,348 @@
+/* dtrsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dtrsv_(char *uplo, char *trans, char *diag, integer *n,
+ doublereal *a, integer *lda, doublereal *x, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, ix, jx, kx, info;
+ doublereal temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTRSV solves one of the systems of equations */
+
+/* A*x = b, or A'*x = b, */
+
+/* where b and x are n element vectors and A is an n by n unit, or */
+/* non-unit, upper or lower triangular matrix. */
+
+/* No test for singularity or near-singularity is included in this */
+/* routine. Such tests must be performed before calling this routine. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the equations to be solved as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' A*x = b. */
+
+/* TRANS = 'T' or 't' A'*x = b. */
+
+/* TRANS = 'C' or 'c' A'*x = b. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array A must contain the upper */
+/* triangular matrix and the strictly lower triangular part of */
+/* A is not referenced. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array A must contain the lower */
+/* triangular matrix and the strictly upper triangular part of */
+/* A is not referenced. */
+/* Note that when DIAG = 'U' or 'u', the diagonal elements of */
+/* A are not referenced either, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+/* X - DOUBLE PRECISION array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element right-hand side vector b. On exit, X is overwritten */
+/* with the solution vector x. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*lda < max(1,*n)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ }
+ if (info != 0) {
+ xerbla_("DTRSV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ nounit = lsame_(diag, "N");
+
+/* Set up the start point in X if the increment is not unity. This */
+/* will be ( N - 1 )*INCX too small for descending loops. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through A. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := inv( A )*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ if (x[j] != 0.) {
+ if (nounit) {
+ x[j] /= a[j + j * a_dim1];
+ }
+ temp = x[j];
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ x[i__] -= temp * a[i__ + j * a_dim1];
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx + (*n - 1) * *incx;
+ for (j = *n; j >= 1; --j) {
+ if (x[jx] != 0.) {
+ if (nounit) {
+ x[jx] /= a[j + j * a_dim1];
+ }
+ temp = x[jx];
+ ix = jx;
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ ix -= *incx;
+ x[ix] -= temp * a[i__ + j * a_dim1];
+/* L30: */
+ }
+ }
+ jx -= *incx;
+/* L40: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.) {
+ if (nounit) {
+ x[j] /= a[j + j * a_dim1];
+ }
+ temp = x[j];
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ x[i__] -= temp * a[i__ + j * a_dim1];
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.) {
+ if (nounit) {
+ x[jx] /= a[j + j * a_dim1];
+ }
+ temp = x[jx];
+ ix = jx;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ x[ix] -= temp * a[i__ + j * a_dim1];
+/* L70: */
+ }
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := inv( A' )*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[j];
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp -= a[i__ + j * a_dim1] * x[i__];
+/* L90: */
+ }
+ if (nounit) {
+ temp /= a[j + j * a_dim1];
+ }
+ x[j] = temp;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[jx];
+ ix = kx;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp -= a[i__ + j * a_dim1] * x[ix];
+ ix += *incx;
+/* L110: */
+ }
+ if (nounit) {
+ temp /= a[j + j * a_dim1];
+ }
+ x[jx] = temp;
+ jx += *incx;
+/* L120: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ temp = x[j];
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ temp -= a[i__ + j * a_dim1] * x[i__];
+/* L130: */
+ }
+ if (nounit) {
+ temp /= a[j + j * a_dim1];
+ }
+ x[j] = temp;
+/* L140: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ temp = x[jx];
+ ix = kx;
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ temp -= a[i__ + j * a_dim1] * x[ix];
+ ix -= *incx;
+/* L150: */
+ }
+ if (nounit) {
+ temp /= a[j + j * a_dim1];
+ }
+ x[jx] = temp;
+ jx -= *incx;
+/* L160: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DTRSV . */
+
+} /* dtrsv_ */
diff --git a/contrib/libs/cblas/dzasum.c b/contrib/libs/cblas/dzasum.c
new file mode 100644
index 0000000000..6a9354455f
--- /dev/null
+++ b/contrib/libs/cblas/dzasum.c
@@ -0,0 +1,80 @@
+/* dzasum.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+doublereal dzasum_(integer *n, doublecomplex *zx, integer *incx)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal ret_val;
+
+ /* Local variables */
+ integer i__, ix;
+ doublereal stemp;
+ extern doublereal dcabs1_(doublecomplex *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* takes the sum of the absolute values. */
+/* jack dongarra, 3/11/78. */
+/* modified 3/93 to return if incx .le. 0. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --zx;
+
+ /* Function Body */
+ ret_val = 0.;
+ stemp = 0.;
+ if (*n <= 0 || *incx <= 0) {
+ return ret_val;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ ix = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ stemp += dcabs1_(&zx[ix]);
+ ix += *incx;
+/* L10: */
+ }
+ ret_val = stemp;
+ return ret_val;
+
+/* code for increment equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ stemp += dcabs1_(&zx[i__]);
+/* L30: */
+ }
+ ret_val = stemp;
+ return ret_val;
+} /* dzasum_ */
diff --git a/contrib/libs/cblas/dznrm2.c b/contrib/libs/cblas/dznrm2.c
new file mode 100644
index 0000000000..e6235447b2
--- /dev/null
+++ b/contrib/libs/cblas/dznrm2.c
@@ -0,0 +1,108 @@
+/* dznrm2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+doublereal dznrm2_(integer *n, doublecomplex *x, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublereal ret_val, d__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer ix;
+ doublereal ssq, temp, norm, scale;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DZNRM2 returns the euclidean norm of a vector via the function */
+/* name, so that */
+
+/* DZNRM2 := sqrt( conjg( x' )*x ) */
+
+
+/* -- This version written on 25-October-1982. */
+/* Modified on 14-October-1993 to inline the call to ZLASSQ. */
+/* Sven Hammarling, Nag Ltd. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n < 1 || *incx < 1) {
+ norm = 0.;
+ } else {
+ scale = 0.;
+ ssq = 1.;
+/* The following loop is equivalent to this call to the LAPACK */
+/* auxiliary routine: */
+/* CALL ZLASSQ( N, X, INCX, SCALE, SSQ ) */
+
+ i__1 = (*n - 1) * *incx + 1;
+ i__2 = *incx;
+ for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) {
+ i__3 = ix;
+ if (x[i__3].r != 0.) {
+ i__3 = ix;
+ temp = (d__1 = x[i__3].r, abs(d__1));
+ if (scale < temp) {
+/* Computing 2nd power */
+ d__1 = scale / temp;
+ ssq = ssq * (d__1 * d__1) + 1.;
+ scale = temp;
+ } else {
+/* Computing 2nd power */
+ d__1 = temp / scale;
+ ssq += d__1 * d__1;
+ }
+ }
+ if (d_imag(&x[ix]) != 0.) {
+ temp = (d__1 = d_imag(&x[ix]), abs(d__1));
+ if (scale < temp) {
+/* Computing 2nd power */
+ d__1 = scale / temp;
+ ssq = ssq * (d__1 * d__1) + 1.;
+ scale = temp;
+ } else {
+/* Computing 2nd power */
+ d__1 = temp / scale;
+ ssq += d__1 * d__1;
+ }
+ }
+/* L10: */
+ }
+ norm = scale * sqrt(ssq);
+ }
+
+ ret_val = norm;
+ return ret_val;
+
+/* End of DZNRM2. */
+
+} /* dznrm2_ */
diff --git a/contrib/libs/cblas/icamax.c b/contrib/libs/cblas/icamax.c
new file mode 100644
index 0000000000..30a6277e53
--- /dev/null
+++ b/contrib/libs/cblas/icamax.c
@@ -0,0 +1,93 @@
+/* icamax.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+integer icamax_(integer *n, complex *cx, integer *incx)
+{
+ /* System generated locals */
+ integer ret_val, i__1;
+
+ /* Local variables */
+ integer i__, ix;
+ real smax;
+ extern doublereal scabs1_(complex *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* finds the index of element having max. absolute value. */
+/* jack dongarra, linpack, 3/11/78. */
+/* modified 3/93 to return if incx .le. 0. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --cx;
+
+ /* Function Body */
+ ret_val = 0;
+ if (*n < 1 || *incx <= 0) {
+ return ret_val;
+ }
+ ret_val = 1;
+ if (*n == 1) {
+ return ret_val;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ ix = 1;
+ smax = scabs1_(&cx[1]);
+ ix += *incx;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if (scabs1_(&cx[ix]) <= smax) {
+ goto L5;
+ }
+ ret_val = i__;
+ smax = scabs1_(&cx[ix]);
+L5:
+ ix += *incx;
+/* L10: */
+ }
+ return ret_val;
+
+/* code for increment equal to 1 */
+
+L20:
+ smax = scabs1_(&cx[1]);
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if (scabs1_(&cx[i__]) <= smax) {
+ goto L30;
+ }
+ ret_val = i__;
+ smax = scabs1_(&cx[i__]);
+L30:
+ ;
+ }
+ return ret_val;
+} /* icamax_ */
diff --git a/contrib/libs/cblas/idamax.c b/contrib/libs/cblas/idamax.c
new file mode 100644
index 0000000000..9b9636a2de
--- /dev/null
+++ b/contrib/libs/cblas/idamax.c
@@ -0,0 +1,93 @@
+/* idamax.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+integer idamax_(integer *n, doublereal *dx, integer *incx)
+{
+ /* System generated locals */
+ integer ret_val, i__1;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__, ix;
+ doublereal dmax__;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* finds the index of element having max. absolute value. */
+/* jack dongarra, linpack, 3/11/78. */
+/* modified 3/93 to return if incx .le. 0. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --dx;
+
+ /* Function Body */
+ ret_val = 0;
+ if (*n < 1 || *incx <= 0) {
+ return ret_val;
+ }
+ ret_val = 1;
+ if (*n == 1) {
+ return ret_val;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ ix = 1;
+ dmax__ = abs(dx[1]);
+ ix += *incx;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if ((d__1 = dx[ix], abs(d__1)) <= dmax__) {
+ goto L5;
+ }
+ ret_val = i__;
+ dmax__ = (d__1 = dx[ix], abs(d__1));
+L5:
+ ix += *incx;
+/* L10: */
+ }
+ return ret_val;
+
+/* code for increment equal to 1 */
+
+L20:
+ dmax__ = abs(dx[1]);
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if ((d__1 = dx[i__], abs(d__1)) <= dmax__) {
+ goto L30;
+ }
+ ret_val = i__;
+ dmax__ = (d__1 = dx[i__], abs(d__1));
+L30:
+ ;
+ }
+ return ret_val;
+} /* idamax_ */
diff --git a/contrib/libs/cblas/isamax.c b/contrib/libs/cblas/isamax.c
new file mode 100644
index 0000000000..227005442d
--- /dev/null
+++ b/contrib/libs/cblas/isamax.c
@@ -0,0 +1,93 @@
+/* isamax.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+integer isamax_(integer *n, real *sx, integer *incx)
+{
+ /* System generated locals */
+ integer ret_val, i__1;
+ real r__1;
+
+ /* Local variables */
+ integer i__, ix;
+ real smax;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* finds the index of element having max. absolute value. */
+/* jack dongarra, linpack, 3/11/78. */
+/* modified 3/93 to return if incx .le. 0. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --sx;
+
+ /* Function Body */
+ ret_val = 0;
+ if (*n < 1 || *incx <= 0) {
+ return ret_val;
+ }
+ ret_val = 1;
+ if (*n == 1) {
+ return ret_val;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ ix = 1;
+ smax = dabs(sx[1]);
+ ix += *incx;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if ((r__1 = sx[ix], dabs(r__1)) <= smax) {
+ goto L5;
+ }
+ ret_val = i__;
+ smax = (r__1 = sx[ix], dabs(r__1));
+L5:
+ ix += *incx;
+/* L10: */
+ }
+ return ret_val;
+
+/* code for increment equal to 1 */
+
+L20:
+ smax = dabs(sx[1]);
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if ((r__1 = sx[i__], dabs(r__1)) <= smax) {
+ goto L30;
+ }
+ ret_val = i__;
+ smax = (r__1 = sx[i__], dabs(r__1));
+L30:
+ ;
+ }
+ return ret_val;
+} /* isamax_ */
diff --git a/contrib/libs/cblas/izamax.c b/contrib/libs/cblas/izamax.c
new file mode 100644
index 0000000000..b4bd3c3409
--- /dev/null
+++ b/contrib/libs/cblas/izamax.c
@@ -0,0 +1,93 @@
+/* izamax.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+integer izamax_(integer *n, doublecomplex *zx, integer *incx)
+{
+ /* System generated locals */
+ integer ret_val, i__1;
+
+ /* Local variables */
+ integer i__, ix;
+ doublereal smax;
+ extern doublereal dcabs1_(doublecomplex *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* finds the index of element having max. absolute value. */
+/* jack dongarra, 1/15/85. */
+/* modified 3/93 to return if incx .le. 0. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --zx;
+
+ /* Function Body */
+ ret_val = 0;
+ if (*n < 1 || *incx <= 0) {
+ return ret_val;
+ }
+ ret_val = 1;
+ if (*n == 1) {
+ return ret_val;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ ix = 1;
+ smax = dcabs1_(&zx[1]);
+ ix += *incx;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if (dcabs1_(&zx[ix]) <= smax) {
+ goto L5;
+ }
+ ret_val = i__;
+ smax = dcabs1_(&zx[ix]);
+L5:
+ ix += *incx;
+/* L10: */
+ }
+ return ret_val;
+
+/* code for increment equal to 1 */
+
+L20:
+ smax = dcabs1_(&zx[1]);
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if (dcabs1_(&zx[i__]) <= smax) {
+ goto L30;
+ }
+ ret_val = i__;
+ smax = dcabs1_(&zx[i__]);
+L30:
+ ;
+ }
+ return ret_val;
+} /* izamax_ */
diff --git a/contrib/libs/cblas/lsame.c b/contrib/libs/cblas/lsame.c
new file mode 100644
index 0000000000..0648674fa8
--- /dev/null
+++ b/contrib/libs/cblas/lsame.c
@@ -0,0 +1,117 @@
+/* lsame.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+logical lsame_(char *ca, char *cb)
+{
+ /* System generated locals */
+ logical ret_val;
+
+ /* Local variables */
+ integer inta, intb, zcode;
+
+
+/* -- LAPACK auxiliary routine (version 3.1) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* LSAME returns .TRUE. if CA is the same letter as CB regardless of */
+/* case. */
+
+/* Arguments */
+/* ========= */
+
+/* CA (input) CHARACTER*1 */
+
+/* CB (input) CHARACTER*1 */
+/* CA and CB specify the single characters to be compared. */
+
+/* ===================================================================== */
+
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+
+/* Test if the characters are equal */
+
+ ret_val = *(unsigned char *)ca == *(unsigned char *)cb;
+ if (ret_val) {
+ return ret_val;
+ }
+
+/* Now test for equivalence if both characters are alphabetic. */
+
+ zcode = 'Z';
+
+/* Use 'Z' rather than 'A' so that ASCII can be detected on Prime */
+/* machines, on which ICHAR returns a value with bit 8 set. */
+/* ICHAR('A') on Prime machines returns 193 which is the same as */
+/* ICHAR('A') on an EBCDIC machine. */
+
+ inta = *(unsigned char *)ca;
+ intb = *(unsigned char *)cb;
+
+ if (zcode == 90 || zcode == 122) {
+
+/* ASCII is assumed - ZCODE is the ASCII code of either lower or */
+/* upper case 'Z'. */
+
+ if (inta >= 97 && inta <= 122) {
+ inta += -32;
+ }
+ if (intb >= 97 && intb <= 122) {
+ intb += -32;
+ }
+
+ } else if (zcode == 233 || zcode == 169) {
+
+/* EBCDIC is assumed - ZCODE is the EBCDIC code of either lower or */
+/* upper case 'Z'. */
+
+ if (inta >= 129 && inta <= 137 || inta >= 145 && inta <= 153 || inta
+ >= 162 && inta <= 169) {
+ inta += 64;
+ }
+ if (intb >= 129 && intb <= 137 || intb >= 145 && intb <= 153 || intb
+ >= 162 && intb <= 169) {
+ intb += 64;
+ }
+
+ } else if (zcode == 218 || zcode == 250) {
+
+/* ASCII is assumed, on Prime machines - ZCODE is the ASCII code */
+/* plus 128 of either lower or upper case 'Z'. */
+
+ if (inta >= 225 && inta <= 250) {
+ inta += -32;
+ }
+ if (intb >= 225 && intb <= 250) {
+ intb += -32;
+ }
+ }
+ ret_val = inta == intb;
+
+/* RETURN */
+
+/* End of LSAME */
+
+ return ret_val;
+} /* lsame_ */
diff --git a/contrib/libs/cblas/sasum.c b/contrib/libs/cblas/sasum.c
new file mode 100644
index 0000000000..24d9799cae
--- /dev/null
+++ b/contrib/libs/cblas/sasum.c
@@ -0,0 +1,101 @@
+/* sasum.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+doublereal sasum_(integer *n, real *sx, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ real ret_val, r__1, r__2, r__3, r__4, r__5, r__6;
+
+ /* Local variables */
+ integer i__, m, mp1, nincx;
+ real stemp;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* takes the sum of the absolute values. */
+/* uses unrolled loops for increment equal to one. */
+/* jack dongarra, linpack, 3/11/78. */
+/* modified 3/93 to return if incx .le. 0. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --sx;
+
+ /* Function Body */
+ ret_val = 0.f;
+ stemp = 0.f;
+ if (*n <= 0 || *incx <= 0) {
+ return ret_val;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ nincx = *n * *incx;
+ i__1 = nincx;
+ i__2 = *incx;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ stemp += (r__1 = sx[i__], dabs(r__1));
+/* L10: */
+ }
+ ret_val = stemp;
+ return ret_val;
+
+/* code for increment equal to 1 */
+
+
+/* clean-up loop */
+
+L20:
+ m = *n % 6;
+ if (m == 0) {
+ goto L40;
+ }
+ i__2 = m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ stemp += (r__1 = sx[i__], dabs(r__1));
+/* L30: */
+ }
+ if (*n < 6) {
+ goto L60;
+ }
+L40:
+ mp1 = m + 1;
+ i__2 = *n;
+ for (i__ = mp1; i__ <= i__2; i__ += 6) {
+ stemp = stemp + (r__1 = sx[i__], dabs(r__1)) + (r__2 = sx[i__ + 1],
+ dabs(r__2)) + (r__3 = sx[i__ + 2], dabs(r__3)) + (r__4 = sx[
+ i__ + 3], dabs(r__4)) + (r__5 = sx[i__ + 4], dabs(r__5)) + (
+ r__6 = sx[i__ + 5], dabs(r__6));
+/* L50: */
+ }
+L60:
+ ret_val = stemp;
+ return ret_val;
+} /* sasum_ */
diff --git a/contrib/libs/cblas/saxpy.c b/contrib/libs/cblas/saxpy.c
new file mode 100644
index 0000000000..f591cca330
--- /dev/null
+++ b/contrib/libs/cblas/saxpy.c
@@ -0,0 +1,107 @@
+/* saxpy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int saxpy_(integer *n, real *sa, real *sx, integer *incx,
+ real *sy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ integer i__, m, ix, iy, mp1;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SAXPY constant times a vector plus a vector. */
+/* uses unrolled loop for increments equal to one. */
+/* jack dongarra, linpack, 3/11/78. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --sy;
+ --sx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*sa == 0.f) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* code for unequal increments or equal increments */
+/* not equal to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ sy[iy] += *sa * sx[ix];
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+
+
+/* clean-up loop */
+
+L20:
+ m = *n % 4;
+ if (m == 0) {
+ goto L40;
+ }
+ i__1 = m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ sy[i__] += *sa * sx[i__];
+/* L30: */
+ }
+ if (*n < 4) {
+ return 0;
+ }
+L40:
+ mp1 = m + 1;
+ i__1 = *n;
+ for (i__ = mp1; i__ <= i__1; i__ += 4) {
+ sy[i__] += *sa * sx[i__];
+ sy[i__ + 1] += *sa * sx[i__ + 1];
+ sy[i__ + 2] += *sa * sx[i__ + 2];
+ sy[i__ + 3] += *sa * sx[i__ + 3];
+/* L50: */
+ }
+ return 0;
+} /* saxpy_ */
diff --git a/contrib/libs/cblas/scabs1.c b/contrib/libs/cblas/scabs1.c
new file mode 100644
index 0000000000..d6c63fc626
--- /dev/null
+++ b/contrib/libs/cblas/scabs1.c
@@ -0,0 +1,36 @@
+/* scabs1.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+doublereal scabs1_(complex *z__)
+{
+ /* System generated locals */
+ real ret_val, r__1, r__2;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SCABS1 computes absolute value of a complex number */
+
+/* .. Intrinsic Functions .. */
+/* .. */
+ ret_val = (r__1 = z__->r, dabs(r__1)) + (r__2 = r_imag(z__), dabs(r__2));
+ return ret_val;
+} /* scabs1_ */
diff --git a/contrib/libs/cblas/scasum.c b/contrib/libs/cblas/scasum.c
new file mode 100644
index 0000000000..d9345df0a7
--- /dev/null
+++ b/contrib/libs/cblas/scasum.c
@@ -0,0 +1,87 @@
+/* scasum.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+doublereal scasum_(integer *n, complex *cx, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ real ret_val, r__1, r__2;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer i__, nincx;
+ real stemp;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* takes the sum of the absolute values of a complex vector and */
+/* returns a single precision result. */
+/* jack dongarra, linpack, 3/11/78. */
+/* modified 3/93 to return if incx .le. 0. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --cx;
+
+ /* Function Body */
+ ret_val = 0.f;
+ stemp = 0.f;
+ if (*n <= 0 || *incx <= 0) {
+ return ret_val;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ nincx = *n * *incx;
+ i__1 = nincx;
+ i__2 = *incx;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ i__3 = i__;
+ stemp = stemp + (r__1 = cx[i__3].r, dabs(r__1)) + (r__2 = r_imag(&cx[
+ i__]), dabs(r__2));
+/* L10: */
+ }
+ ret_val = stemp;
+ return ret_val;
+
+/* code for increment equal to 1 */
+
+L20:
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__1 = i__;
+ stemp = stemp + (r__1 = cx[i__1].r, dabs(r__1)) + (r__2 = r_imag(&cx[
+ i__]), dabs(r__2));
+/* L30: */
+ }
+ ret_val = stemp;
+ return ret_val;
+} /* scasum_ */
diff --git a/contrib/libs/cblas/scnrm2.c b/contrib/libs/cblas/scnrm2.c
new file mode 100644
index 0000000000..a965801f4b
--- /dev/null
+++ b/contrib/libs/cblas/scnrm2.c
@@ -0,0 +1,109 @@
+/* scnrm2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+doublereal scnrm2_(integer *n, complex *x, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ real ret_val, r__1;
+
+ /* Builtin functions */
+ double r_imag(complex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer ix;
+ real ssq, temp, norm, scale;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SCNRM2 returns the euclidean norm of a vector via the function */
+/* name, so that */
+
+/* SCNRM2 := sqrt( conjg( x' )*x ) */
+
+
+
+/* -- This version written on 25-October-1982. */
+/* Modified on 14-October-1993 to inline the call to CLASSQ. */
+/* Sven Hammarling, Nag Ltd. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n < 1 || *incx < 1) {
+ norm = 0.f;
+ } else {
+ scale = 0.f;
+ ssq = 1.f;
+/* The following loop is equivalent to this call to the LAPACK */
+/* auxiliary routine: */
+/* CALL CLASSQ( N, X, INCX, SCALE, SSQ ) */
+
+ i__1 = (*n - 1) * *incx + 1;
+ i__2 = *incx;
+ for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) {
+ i__3 = ix;
+ if (x[i__3].r != 0.f) {
+ i__3 = ix;
+ temp = (r__1 = x[i__3].r, dabs(r__1));
+ if (scale < temp) {
+/* Computing 2nd power */
+ r__1 = scale / temp;
+ ssq = ssq * (r__1 * r__1) + 1.f;
+ scale = temp;
+ } else {
+/* Computing 2nd power */
+ r__1 = temp / scale;
+ ssq += r__1 * r__1;
+ }
+ }
+ if (r_imag(&x[ix]) != 0.f) {
+ temp = (r__1 = r_imag(&x[ix]), dabs(r__1));
+ if (scale < temp) {
+/* Computing 2nd power */
+ r__1 = scale / temp;
+ ssq = ssq * (r__1 * r__1) + 1.f;
+ scale = temp;
+ } else {
+/* Computing 2nd power */
+ r__1 = temp / scale;
+ ssq += r__1 * r__1;
+ }
+ }
+/* L10: */
+ }
+ norm = scale * sqrt(ssq);
+ }
+
+ ret_val = norm;
+ return ret_val;
+
+/* End of SCNRM2. */
+
+} /* scnrm2_ */
diff --git a/contrib/libs/cblas/scopy.c b/contrib/libs/cblas/scopy.c
new file mode 100644
index 0000000000..13651fa118
--- /dev/null
+++ b/contrib/libs/cblas/scopy.c
@@ -0,0 +1,107 @@
+/* scopy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int scopy_(integer *n, real *sx, integer *incx, real *sy,
+ integer *incy)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ integer i__, m, ix, iy, mp1;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* copies a vector, x, to a vector, y. */
+/* uses unrolled loops for increments equal to 1. */
+/* jack dongarra, linpack, 3/11/78. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --sy;
+ --sx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* code for unequal increments or equal increments */
+/* not equal to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ sy[iy] = sx[ix];
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+
+
+/* clean-up loop */
+
+L20:
+ m = *n % 7;
+ if (m == 0) {
+ goto L40;
+ }
+ i__1 = m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ sy[i__] = sx[i__];
+/* L30: */
+ }
+ if (*n < 7) {
+ return 0;
+ }
+L40:
+ mp1 = m + 1;
+ i__1 = *n;
+ for (i__ = mp1; i__ <= i__1; i__ += 7) {
+ sy[i__] = sx[i__];
+ sy[i__ + 1] = sx[i__ + 1];
+ sy[i__ + 2] = sx[i__ + 2];
+ sy[i__ + 3] = sx[i__ + 3];
+ sy[i__ + 4] = sx[i__ + 4];
+ sy[i__ + 5] = sx[i__ + 5];
+ sy[i__ + 6] = sx[i__ + 6];
+/* L50: */
+ }
+ return 0;
+} /* scopy_ */
diff --git a/contrib/libs/cblas/sdot.c b/contrib/libs/cblas/sdot.c
new file mode 100644
index 0000000000..26e9129bac
--- /dev/null
+++ b/contrib/libs/cblas/sdot.c
@@ -0,0 +1,109 @@
+/* sdot.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+doublereal sdot_(integer *n, real *sx, integer *incx, real *sy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1;
+ real ret_val;
+
+ /* Local variables */
+ integer i__, m, ix, iy, mp1;
+ real stemp;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* forms the dot product of two vectors. */
+/* uses unrolled loops for increments equal to one. */
+/* jack dongarra, linpack, 3/11/78. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --sy;
+ --sx;
+
+ /* Function Body */
+ stemp = 0.f;
+ ret_val = 0.f;
+ if (*n <= 0) {
+ return ret_val;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* code for unequal increments or equal increments */
+/* not equal to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ stemp += sx[ix] * sy[iy];
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ ret_val = stemp;
+ return ret_val;
+
+/* code for both increments equal to 1 */
+
+
+/* clean-up loop */
+
+L20:
+ m = *n % 5;
+ if (m == 0) {
+ goto L40;
+ }
+ i__1 = m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ stemp += sx[i__] * sy[i__];
+/* L30: */
+ }
+ if (*n < 5) {
+ goto L60;
+ }
+L40:
+ mp1 = m + 1;
+ i__1 = *n;
+ for (i__ = mp1; i__ <= i__1; i__ += 5) {
+ stemp = stemp + sx[i__] * sy[i__] + sx[i__ + 1] * sy[i__ + 1] + sx[
+ i__ + 2] * sy[i__ + 2] + sx[i__ + 3] * sy[i__ + 3] + sx[i__ +
+ 4] * sy[i__ + 4];
+/* L50: */
+ }
+L60:
+ ret_val = stemp;
+ return ret_val;
+} /* sdot_ */
diff --git a/contrib/libs/cblas/sdsdot.c b/contrib/libs/cblas/sdsdot.c
new file mode 100644
index 0000000000..ec5638ab50
--- /dev/null
+++ b/contrib/libs/cblas/sdsdot.c
@@ -0,0 +1,144 @@
+/* sdsdot.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+doublereal sdsdot_(integer *n, real *sb, real *sx, integer *incx, real *sy,
+ integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ real ret_val;
+
+ /* Local variables */
+ integer i__, ns, kx, ky;
+ doublereal dsdot;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* PURPOSE */
+/* ======= */
+
+/* Compute the inner product of two vectors with extended */
+/* precision accumulation. */
+
+/* Returns S.P. result with dot product accumulated in D.P. */
+/* SDSDOT = SB + sum for I = 0 to N-1 of SX(LX+I*INCX)*SY(LY+I*INCY), */
+/* where LX = 1 if INCX .GE. 0, else LX = 1+(1-N)*INCX, and LY is */
+/* defined in a similar way using INCY. */
+
+/* AUTHOR */
+/* ====== */
+/* Lawson, C. L., (JPL), Hanson, R. J., (SNLA), */
+/* Kincaid, D. R., (U. of Texas), Krogh, F. T., (JPL) */
+
+/* ARGUMENTS */
+/* ========= */
+
+/* N (input) INTEGER */
+/* number of elements in input vector(s) */
+
+/* SB (input) REAL */
+/* single precision scalar to be added to inner product */
+
+/* SX (input) REAL array, dimension (N) */
+/* single precision vector with N elements */
+
+/* INCX (input) INTEGER */
+/* storage spacing between elements of SX */
+
+/* SY (input) REAL array, dimension (N) */
+/* single precision vector with N elements */
+
+/* INCY (input) INTEGER */
+/* storage spacing between elements of SY */
+
+/* SDSDOT (output) REAL */
+/* single precision dot product (SB if N .LE. 0) */
+
+/* REFERENCES */
+/* ========== */
+
+/* C. L. Lawson, R. J. Hanson, D. R. Kincaid and F. T. */
+/* Krogh, Basic linear algebra subprograms for Fortran */
+/* usage, Algorithm No. 539, Transactions on Mathematical */
+/* Software 5, 3 (September 1979), pp. 308-323. */
+
+/* REVISION HISTORY (YYMMDD) */
+/* ========================== */
+
+/* 791001 DATE WRITTEN */
+/* 890531 Changed all specific intrinsics to generic. (WRB) */
+/* 890831 Modified array declarations. (WRB) */
+/* 890831 REVISION DATE from Version 3.2 */
+/* 891214 Prologue converted to Version 4.0 format. (BAB) */
+/* 920310 Corrected definition of LX in DESCRIPTION. (WRB) */
+/* 920501 Reformatted the REFERENCES section. (WRB) */
+/* 070118 Reformat to LAPACK coding style */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --sy;
+ --sx;
+
+ /* Function Body */
+ dsdot = *sb;
+ if (*n <= 0) {
+ goto L30;
+ }
+ if (*incx == *incy && *incx > 0) {
+ goto L40;
+ }
+
+/* Code for unequal or nonpositive increments. */
+
+ kx = 1;
+ ky = 1;
+ if (*incx < 0) {
+ kx = (1 - *n) * *incx + 1;
+ }
+ if (*incy < 0) {
+ ky = (1 - *n) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dsdot += (doublereal) sx[kx] * (doublereal) sy[ky];
+ kx += *incx;
+ ky += *incy;
+/* L10: */
+ }
+L30:
+ ret_val = dsdot;
+ return ret_val;
+
+/* Code for equal and positive increments. */
+
+L40:
+ ns = *n * *incx;
+ i__1 = ns;
+ i__2 = *incx;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ dsdot += (doublereal) sx[i__] * (doublereal) sy[i__];
+/* L50: */
+ }
+ ret_val = dsdot;
+ return ret_val;
+} /* sdsdot_ */
diff --git a/contrib/libs/cblas/sgbmv.c b/contrib/libs/cblas/sgbmv.c
new file mode 100644
index 0000000000..11db1d99ca
--- /dev/null
+++ b/contrib/libs/cblas/sgbmv.c
@@ -0,0 +1,368 @@
+/* sgbmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sgbmv_(char *trans, integer *m, integer *n, integer *kl,
+ integer *ku, real *alpha, real *a, integer *lda, real *x, integer *
+ incx, real *beta, real *y, integer *incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+
+ /* Local variables */
+ integer i__, j, k, ix, iy, jx, jy, kx, ky, kup1, info;
+ real temp;
+ integer lenx, leny;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGBMV performs one of the matrix-vector operations */
+
+/* y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y, */
+
+/* where alpha and beta are scalars, x and y are vectors and A is an */
+/* m by n band matrix, with kl sub-diagonals and ku super-diagonals. */
+
+/* Arguments */
+/* ========== */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' y := alpha*A*x + beta*y. */
+
+/* TRANS = 'T' or 't' y := alpha*A'*x + beta*y. */
+
+/* TRANS = 'C' or 'c' y := alpha*A'*x + beta*y. */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of the matrix A. */
+/* M must be at least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* KL - INTEGER. */
+/* On entry, KL specifies the number of sub-diagonals of the */
+/* matrix A. KL must satisfy 0 .le. KL. */
+/* Unchanged on exit. */
+
+/* KU - INTEGER. */
+/* On entry, KU specifies the number of super-diagonals of the */
+/* matrix A. KU must satisfy 0 .le. KU. */
+/* Unchanged on exit. */
+
+/* ALPHA - REAL . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - REAL array of DIMENSION ( LDA, n ). */
+/* Before entry, the leading ( kl + ku + 1 ) by n part of the */
+/* array A must contain the matrix of coefficients, supplied */
+/* column by column, with the leading diagonal of the matrix in */
+/* row ( ku + 1 ) of the array, the first super-diagonal */
+/* starting at position 2 in row ku, the first sub-diagonal */
+/* starting at position 1 in row ( ku + 2 ), and so on. */
+/* Elements in the array A that do not correspond to elements */
+/* in the band matrix (such as the top left ku by ku triangle) */
+/* are not referenced. */
+/* The following program segment will transfer a band matrix */
+/* from conventional full matrix storage to band storage: */
+
+/* DO 20, J = 1, N */
+/* K = KU + 1 - J */
+/* DO 10, I = MAX( 1, J - KU ), MIN( M, J + KL ) */
+/* A( K + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* ( kl + ku + 1 ). */
+/* Unchanged on exit. */
+
+/* X - REAL array of DIMENSION at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ) when TRANS = 'N' or 'n' */
+/* and at least */
+/* ( 1 + ( m - 1 )*abs( INCX ) ) otherwise. */
+/* Before entry, the incremented array X must contain the */
+/* vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* BETA - REAL . */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then Y need not be set on input. */
+/* Unchanged on exit. */
+
+/* Y - REAL array of DIMENSION at least */
+/* ( 1 + ( m - 1 )*abs( INCY ) ) when TRANS = 'N' or 'n' */
+/* and at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ) otherwise. */
+/* Before entry, the incremented array Y must contain the */
+/* vector y. On exit, Y is overwritten by the updated vector y. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(trans, "N") && ! lsame_(trans, "T") && ! lsame_(trans, "C")
+ ) {
+ info = 1;
+ } else if (*m < 0) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*kl < 0) {
+ info = 4;
+ } else if (*ku < 0) {
+ info = 5;
+ } else if (*lda < *kl + *ku + 1) {
+ info = 8;
+ } else if (*incx == 0) {
+ info = 10;
+ } else if (*incy == 0) {
+ info = 13;
+ }
+ if (info != 0) {
+ xerbla_("SGBMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || *alpha == 0.f && *beta == 1.f) {
+ return 0;
+ }
+
+/* Set LENX and LENY, the lengths of the vectors x and y, and set */
+/* up the start points in X and Y. */
+
+ if (lsame_(trans, "N")) {
+ lenx = *n;
+ leny = *m;
+ } else {
+ lenx = *m;
+ leny = *n;
+ }
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (lenx - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (leny - 1) * *incy;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through the band part of A. */
+
+/* First form y := beta*y. */
+
+ if (*beta != 1.f) {
+ if (*incy == 1) {
+ if (*beta == 0.f) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = 0.f;
+/* L10: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = *beta * y[i__];
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (*beta == 0.f) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = 0.f;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = *beta * y[iy];
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (*alpha == 0.f) {
+ return 0;
+ }
+ kup1 = *ku + 1;
+ if (lsame_(trans, "N")) {
+
+/* Form y := alpha*A*x + y. */
+
+ jx = kx;
+ if (*incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.f) {
+ temp = *alpha * x[jx];
+ k = kup1 - j;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__4 = min(i__5,i__6);
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ y[i__] += temp * a[k + i__ + j * a_dim1];
+/* L50: */
+ }
+ }
+ jx += *incx;
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.f) {
+ temp = *alpha * x[jx];
+ iy = ky;
+ k = kup1 - j;
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__3 = min(i__5,i__6);
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ y[iy] += temp * a[k + i__ + j * a_dim1];
+ iy += *incy;
+/* L70: */
+ }
+ }
+ jx += *incx;
+ if (j > *ku) {
+ ky += *incy;
+ }
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y := alpha*A'*x + y. */
+
+ jy = ky;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = 0.f;
+ k = kup1 - j;
+/* Computing MAX */
+ i__3 = 1, i__4 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__2 = min(i__5,i__6);
+ for (i__ = max(i__3,i__4); i__ <= i__2; ++i__) {
+ temp += a[k + i__ + j * a_dim1] * x[i__];
+/* L90: */
+ }
+ y[jy] += *alpha * temp;
+ jy += *incy;
+/* L100: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = 0.f;
+ ix = kx;
+ k = kup1 - j;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__4 = min(i__5,i__6);
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ temp += a[k + i__ + j * a_dim1] * x[ix];
+ ix += *incx;
+/* L110: */
+ }
+ y[jy] += *alpha * temp;
+ jy += *incy;
+ if (j > *ku) {
+ kx += *incx;
+ }
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SGBMV . */
+
+} /* sgbmv_ */
diff --git a/contrib/libs/cblas/sgemm.c b/contrib/libs/cblas/sgemm.c
new file mode 100644
index 0000000000..527735a6d0
--- /dev/null
+++ b/contrib/libs/cblas/sgemm.c
@@ -0,0 +1,388 @@
+/* sgemm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sgemm_(char *transa, char *transb, integer *m, integer *
+ n, integer *k, real *alpha, real *a, integer *lda, real *b, integer *
+ ldb, real *beta, real *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3;
+
+ /* Local variables */
+ integer i__, j, l, info;
+ logical nota, notb;
+ real temp;
+ integer ncola;
+ extern logical lsame_(char *, char *);
+ integer nrowa, nrowb;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGEMM performs one of the matrix-matrix operations */
+
+/* C := alpha*op( A )*op( B ) + beta*C, */
+
+/* where op( X ) is one of */
+
+/* op( X ) = X or op( X ) = X', */
+
+/* alpha and beta are scalars, and A, B and C are matrices, with op( A ) */
+/* an m by k matrix, op( B ) a k by n matrix and C an m by n matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* TRANSA - CHARACTER*1. */
+/* On entry, TRANSA specifies the form of op( A ) to be used in */
+/* the matrix multiplication as follows: */
+
+/* TRANSA = 'N' or 'n', op( A ) = A. */
+
+/* TRANSA = 'T' or 't', op( A ) = A'. */
+
+/* TRANSA = 'C' or 'c', op( A ) = A'. */
+
+/* Unchanged on exit. */
+
+/* TRANSB - CHARACTER*1. */
+/* On entry, TRANSB specifies the form of op( B ) to be used in */
+/* the matrix multiplication as follows: */
+
+/* TRANSB = 'N' or 'n', op( B ) = B. */
+
+/* TRANSB = 'T' or 't', op( B ) = B'. */
+
+/* TRANSB = 'C' or 'c', op( B ) = B'. */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of the matrix */
+/* op( A ) and of the matrix C. M must be at least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of the matrix */
+/* op( B ) and the number of columns of the matrix C. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry, K specifies the number of columns of the matrix */
+/* op( A ) and the number of rows of the matrix op( B ). K must */
+/* be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - REAL . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - REAL array of DIMENSION ( LDA, ka ), where ka is */
+/* k when TRANSA = 'N' or 'n', and is m otherwise. */
+/* Before entry with TRANSA = 'N' or 'n', the leading m by k */
+/* part of the array A must contain the matrix A, otherwise */
+/* the leading k by m part of the array A must contain the */
+/* matrix A. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When TRANSA = 'N' or 'n' then */
+/* LDA must be at least max( 1, m ), otherwise LDA must be at */
+/* least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* B - REAL array of DIMENSION ( LDB, kb ), where kb is */
+/* n when TRANSB = 'N' or 'n', and is k otherwise. */
+/* Before entry with TRANSB = 'N' or 'n', the leading k by n */
+/* part of the array B must contain the matrix B, otherwise */
+/* the leading n by k part of the array B must contain the */
+/* matrix B. */
+/* Unchanged on exit. */
+
+/* LDB - INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. When TRANSB = 'N' or 'n' then */
+/* LDB must be at least max( 1, k ), otherwise LDB must be at */
+/* least max( 1, n ). */
+/* Unchanged on exit. */
+
+/* BETA - REAL . */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then C need not be set on input. */
+/* Unchanged on exit. */
+
+/* C - REAL array of DIMENSION ( LDC, n ). */
+/* Before entry, the leading m by n part of the array C must */
+/* contain the matrix C, except when beta is zero, in which */
+/* case C need not be set on entry. */
+/* On exit, the array C is overwritten by the m by n matrix */
+/* ( alpha*op( A )*op( B ) + beta*C ). */
+
+/* LDC - INTEGER. */
+/* On entry, LDC specifies the first dimension of C as declared */
+/* in the calling (sub) program. LDC must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Set NOTA and NOTB as true if A and B respectively are not */
+/* transposed and set NROWA, NCOLA and NROWB as the number of rows */
+/* and columns of A and the number of rows of B respectively. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ nota = lsame_(transa, "N");
+ notb = lsame_(transb, "N");
+ if (nota) {
+ nrowa = *m;
+ ncola = *k;
+ } else {
+ nrowa = *k;
+ ncola = *m;
+ }
+ if (notb) {
+ nrowb = *k;
+ } else {
+ nrowb = *n;
+ }
+
+/* Test the input parameters. */
+
+ info = 0;
+ if (! nota && ! lsame_(transa, "C") && ! lsame_(
+ transa, "T")) {
+ info = 1;
+ } else if (! notb && ! lsame_(transb, "C") && !
+ lsame_(transb, "T")) {
+ info = 2;
+ } else if (*m < 0) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*k < 0) {
+ info = 5;
+ } else if (*lda < max(1,nrowa)) {
+ info = 8;
+ } else if (*ldb < max(1,nrowb)) {
+ info = 10;
+ } else if (*ldc < max(1,*m)) {
+ info = 13;
+ }
+ if (info != 0) {
+ xerbla_("SGEMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || (*alpha == 0.f || *k == 0) && *beta == 1.f) {
+ return 0;
+ }
+
+/* And if alpha.eq.zero. */
+
+ if (*alpha == 0.f) {
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (notb) {
+ if (nota) {
+
+/* Form C := alpha*A*B + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.f;
+/* L50: */
+ }
+ } else if (*beta != 1.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L60: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (b[l + j * b_dim1] != 0.f) {
+ temp = *alpha * b[l + j * b_dim1];
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] += temp * a[i__ + l *
+ a_dim1];
+/* L70: */
+ }
+ }
+/* L80: */
+ }
+/* L90: */
+ }
+ } else {
+
+/* Form C := alpha*A'*B + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp += a[l + i__ * a_dim1] * b[l + j * b_dim1];
+/* L100: */
+ }
+ if (*beta == 0.f) {
+ c__[i__ + j * c_dim1] = *alpha * temp;
+ } else {
+ c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[
+ i__ + j * c_dim1];
+ }
+/* L110: */
+ }
+/* L120: */
+ }
+ }
+ } else {
+ if (nota) {
+
+/* Form C := alpha*A*B' + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.f;
+/* L130: */
+ }
+ } else if (*beta != 1.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L140: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (b[j + l * b_dim1] != 0.f) {
+ temp = *alpha * b[j + l * b_dim1];
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] += temp * a[i__ + l *
+ a_dim1];
+/* L150: */
+ }
+ }
+/* L160: */
+ }
+/* L170: */
+ }
+ } else {
+
+/* Form C := alpha*A'*B' + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp += a[l + i__ * a_dim1] * b[j + l * b_dim1];
+/* L180: */
+ }
+ if (*beta == 0.f) {
+ c__[i__ + j * c_dim1] = *alpha * temp;
+ } else {
+ c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[
+ i__ + j * c_dim1];
+ }
+/* L190: */
+ }
+/* L200: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SGEMM . */
+
+} /* sgemm_ */
diff --git a/contrib/libs/cblas/sgemv.c b/contrib/libs/cblas/sgemv.c
new file mode 100644
index 0000000000..10eacd3a15
--- /dev/null
+++ b/contrib/libs/cblas/sgemv.c
@@ -0,0 +1,312 @@
+/* sgemv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sgemv_(char *trans, integer *m, integer *n, real *alpha,
+ real *a, integer *lda, real *x, integer *incx, real *beta, real *y,
+ integer *incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ real temp;
+ integer lenx, leny;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGEMV performs one of the matrix-vector operations */
+
+/* y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y, */
+
+/* where alpha and beta are scalars, x and y are vectors and A is an */
+/* m by n matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' y := alpha*A*x + beta*y. */
+
+/* TRANS = 'T' or 't' y := alpha*A'*x + beta*y. */
+
+/* TRANS = 'C' or 'c' y := alpha*A'*x + beta*y. */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of the matrix A. */
+/* M must be at least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - REAL . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - REAL array of DIMENSION ( LDA, n ). */
+/* Before entry, the leading m by n part of the array A must */
+/* contain the matrix of coefficients. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+/* X - REAL array of DIMENSION at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ) when TRANS = 'N' or 'n' */
+/* and at least */
+/* ( 1 + ( m - 1 )*abs( INCX ) ) otherwise. */
+/* Before entry, the incremented array X must contain the */
+/* vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* BETA - REAL . */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then Y need not be set on input. */
+/* Unchanged on exit. */
+
+/* Y - REAL array of DIMENSION at least */
+/* ( 1 + ( m - 1 )*abs( INCY ) ) when TRANS = 'N' or 'n' */
+/* and at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ) otherwise. */
+/* Before entry with BETA non-zero, the incremented array Y */
+/* must contain the vector y. On exit, Y is overwritten by the */
+/* updated vector y. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(trans, "N") && ! lsame_(trans, "T") && ! lsame_(trans, "C")
+ ) {
+ info = 1;
+ } else if (*m < 0) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*lda < max(1,*m)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ } else if (*incy == 0) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("SGEMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || *alpha == 0.f && *beta == 1.f) {
+ return 0;
+ }
+
+/* Set LENX and LENY, the lengths of the vectors x and y, and set */
+/* up the start points in X and Y. */
+
+ if (lsame_(trans, "N")) {
+ lenx = *n;
+ leny = *m;
+ } else {
+ lenx = *m;
+ leny = *n;
+ }
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (lenx - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (leny - 1) * *incy;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through A. */
+
+/* First form y := beta*y. */
+
+ if (*beta != 1.f) {
+ if (*incy == 1) {
+ if (*beta == 0.f) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = 0.f;
+/* L10: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = *beta * y[i__];
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (*beta == 0.f) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = 0.f;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = *beta * y[iy];
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (*alpha == 0.f) {
+ return 0;
+ }
+ if (lsame_(trans, "N")) {
+
+/* Form y := alpha*A*x + y. */
+
+ jx = kx;
+ if (*incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.f) {
+ temp = *alpha * x[jx];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ y[i__] += temp * a[i__ + j * a_dim1];
+/* L50: */
+ }
+ }
+ jx += *incx;
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.f) {
+ temp = *alpha * x[jx];
+ iy = ky;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ y[iy] += temp * a[i__ + j * a_dim1];
+ iy += *incy;
+/* L70: */
+ }
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y := alpha*A'*x + y. */
+
+ jy = ky;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = 0.f;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp += a[i__ + j * a_dim1] * x[i__];
+/* L90: */
+ }
+ y[jy] += *alpha * temp;
+ jy += *incy;
+/* L100: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = 0.f;
+ ix = kx;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp += a[i__ + j * a_dim1] * x[ix];
+ ix += *incx;
+/* L110: */
+ }
+ y[jy] += *alpha * temp;
+ jy += *incy;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SGEMV . */
+
+} /* sgemv_ */
diff --git a/contrib/libs/cblas/sger.c b/contrib/libs/cblas/sger.c
new file mode 100644
index 0000000000..a6e02758d8
--- /dev/null
+++ b/contrib/libs/cblas/sger.c
@@ -0,0 +1,193 @@
+/* sger.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sger_(integer *m, integer *n, real *alpha, real *x,
+ integer *incx, real *y, integer *incy, real *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, ix, jy, kx, info;
+ real temp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGER performs the rank 1 operation */
+
+/* A := alpha*x*y' + A, */
+
+/* where alpha is a scalar, x is an m element vector, y is an n element */
+/* vector and A is an m by n matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of the matrix A. */
+/* M must be at least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - REAL . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X - REAL array of dimension at least */
+/* ( 1 + ( m - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the m */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* Y - REAL array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. */
+/* Unchanged on exit. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+/* A - REAL array of DIMENSION ( LDA, n ). */
+/* Before entry, the leading m by n part of the array A must */
+/* contain the matrix of coefficients. On exit, A is */
+/* overwritten by the updated matrix. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --x;
+ --y;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (*m < 0) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ } else if (*lda < max(1,*m)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("SGER ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || *alpha == 0.f) {
+ return 0;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through A. */
+
+ if (*incy > 0) {
+ jy = 1;
+ } else {
+ jy = 1 - (*n - 1) * *incy;
+ }
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (y[jy] != 0.f) {
+ temp = *alpha * y[jy];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] += x[i__] * temp;
+/* L10: */
+ }
+ }
+ jy += *incy;
+/* L20: */
+ }
+ } else {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*m - 1) * *incx;
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (y[jy] != 0.f) {
+ temp = *alpha * y[jy];
+ ix = kx;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] += x[ix] * temp;
+ ix += *incx;
+/* L30: */
+ }
+ }
+ jy += *incy;
+/* L40: */
+ }
+ }
+
+ return 0;
+
+/* End of SGER . */
+
+} /* sger_ */
diff --git a/contrib/libs/cblas/snrm2.c b/contrib/libs/cblas/snrm2.c
new file mode 100644
index 0000000000..ca5233f35a
--- /dev/null
+++ b/contrib/libs/cblas/snrm2.c
@@ -0,0 +1,97 @@
+/* snrm2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+doublereal snrm2_(integer *n, real *x, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ real ret_val, r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer ix;
+ real ssq, norm, scale, absxi;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SNRM2 returns the euclidean norm of a vector via the function */
+/* name, so that */
+
+/* SNRM2 := sqrt( x'*x ). */
+
+/* Further Details */
+/* =============== */
+
+/* -- This version written on 25-October-1982. */
+/* Modified on 14-October-1993 to inline the call to SLASSQ. */
+/* Sven Hammarling, Nag Ltd. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n < 1 || *incx < 1) {
+ norm = 0.f;
+ } else if (*n == 1) {
+ norm = dabs(x[1]);
+ } else {
+ scale = 0.f;
+ ssq = 1.f;
+/* The following loop is equivalent to this call to the LAPACK */
+/* auxiliary routine: */
+/* CALL SLASSQ( N, X, INCX, SCALE, SSQ ) */
+
+ i__1 = (*n - 1) * *incx + 1;
+ i__2 = *incx;
+ for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) {
+ if (x[ix] != 0.f) {
+ absxi = (r__1 = x[ix], dabs(r__1));
+ if (scale < absxi) {
+/* Computing 2nd power */
+ r__1 = scale / absxi;
+ ssq = ssq * (r__1 * r__1) + 1.f;
+ scale = absxi;
+ } else {
+/* Computing 2nd power */
+ r__1 = absxi / scale;
+ ssq += r__1 * r__1;
+ }
+ }
+/* L10: */
+ }
+ norm = scale * sqrt(ssq);
+ }
+
+ ret_val = norm;
+ return ret_val;
+
+/* End of SNRM2. */
+
+} /* snrm2_ */
diff --git a/contrib/libs/cblas/srot.c b/contrib/libs/cblas/srot.c
new file mode 100644
index 0000000000..085677c73c
--- /dev/null
+++ b/contrib/libs/cblas/srot.c
@@ -0,0 +1,90 @@
+/* srot.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int srot_(integer *n, real *sx, integer *incx, real *sy,
+ integer *incy, real *c__, real *s)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ integer i__, ix, iy;
+ real stemp;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* applies a plane rotation. */
+
+/* Further Details */
+/* =============== */
+
+/* jack dongarra, linpack, 3/11/78. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+ /* Parameter adjustments */
+ --sy;
+ --sx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* code for unequal increments or equal increments not equal */
+/* to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ stemp = *c__ * sx[ix] + *s * sy[iy];
+ sy[iy] = *c__ * sy[iy] - *s * sx[ix];
+ sx[ix] = stemp;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ stemp = *c__ * sx[i__] + *s * sy[i__];
+ sy[i__] = *c__ * sy[i__] - *s * sx[i__];
+ sx[i__] = stemp;
+/* L30: */
+ }
+ return 0;
+} /* srot_ */
diff --git a/contrib/libs/cblas/srotg.c b/contrib/libs/cblas/srotg.c
new file mode 100644
index 0000000000..99be0f8ec4
--- /dev/null
+++ b/contrib/libs/cblas/srotg.c
@@ -0,0 +1,78 @@
+/* srotg.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b4 = 1.f;
+
+/* Subroutine */ int srotg_(real *sa, real *sb, real *c__, real *s)
+{
+ /* System generated locals */
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ real r__, z__, roe, scale;
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* construct givens plane rotation. */
+/* jack dongarra, linpack, 3/11/78. */
+
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ roe = *sb;
+ if (dabs(*sa) > dabs(*sb)) {
+ roe = *sa;
+ }
+ scale = dabs(*sa) + dabs(*sb);
+ if (scale != 0.f) {
+ goto L10;
+ }
+ *c__ = 1.f;
+ *s = 0.f;
+ r__ = 0.f;
+ z__ = 0.f;
+ goto L20;
+L10:
+/* Computing 2nd power */
+ r__1 = *sa / scale;
+/* Computing 2nd power */
+ r__2 = *sb / scale;
+ r__ = scale * sqrt(r__1 * r__1 + r__2 * r__2);
+ r__ = r_sign(&c_b4, &roe) * r__;
+ *c__ = *sa / r__;
+ *s = *sb / r__;
+ z__ = 1.f;
+ if (dabs(*sa) > dabs(*sb)) {
+ z__ = *s;
+ }
+ if (dabs(*sb) >= dabs(*sa) && *c__ != 0.f) {
+ z__ = 1.f / *c__;
+ }
+L20:
+ *sa = r__;
+ *sb = z__;
+ return 0;
+} /* srotg_ */
diff --git a/contrib/libs/cblas/srotm.c b/contrib/libs/cblas/srotm.c
new file mode 100644
index 0000000000..b83bf8628d
--- /dev/null
+++ b/contrib/libs/cblas/srotm.c
@@ -0,0 +1,216 @@
+/* srotm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int srotm_(integer *n, real *sx, integer *incx, real *sy,
+ integer *incy, real *sparam)
+{
+ /* Initialized data */
+
+ static real zero = 0.f;
+ static real two = 2.f;
+
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__;
+ real w, z__;
+ integer kx, ky;
+ real sh11, sh12, sh21, sh22, sflag;
+ integer nsteps;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* APPLY THE MODIFIED GIVENS TRANSFORMATION, H, TO THE 2 BY N MATRIX */
+
+/* (SX**T) , WHERE **T INDICATES TRANSPOSE. THE ELEMENTS OF SX ARE IN */
+/* (DX**T) */
+
+/* SX(LX+I*INCX), I = 0 TO N-1, WHERE LX = 1 IF INCX .GE. 0, ELSE */
+/* LX = (-INCX)*N, AND SIMILARLY FOR SY USING USING LY AND INCY. */
+/* WITH SPARAM(1)=SFLAG, H HAS ONE OF THE FOLLOWING FORMS.. */
+
+/* SFLAG=-1.E0 SFLAG=0.E0 SFLAG=1.E0 SFLAG=-2.E0 */
+
+/* (SH11 SH12) (1.E0 SH12) (SH11 1.E0) (1.E0 0.E0) */
+/* H=( ) ( ) ( ) ( ) */
+/* (SH21 SH22), (SH21 1.E0), (-1.E0 SH22), (0.E0 1.E0). */
+/* SEE SROTMG FOR A DESCRIPTION OF DATA STORAGE IN SPARAM. */
+
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* number of elements in input vector(s) */
+
+/* SX (input/output) REAL array, dimension N */
+/* double precision vector with N elements */
+
+/* INCX (input) INTEGER */
+/* storage spacing between elements of SX */
+
+/* SY (input/output) REAL array, dimension N */
+/* double precision vector with N elements */
+
+/* INCY (input) INTEGER */
+/* storage spacing between elements of SY */
+
+/* SPARAM (input/output) REAL array, dimension 5 */
+/* SPARAM(1)=SFLAG */
+/* SPARAM(2)=SH11 */
+/* SPARAM(3)=SH21 */
+/* SPARAM(4)=SH12 */
+/* SPARAM(5)=SH22 */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Data statements .. */
+ /* Parameter adjustments */
+ --sparam;
+ --sy;
+ --sx;
+
+ /* Function Body */
+/* .. */
+
+ sflag = sparam[1];
+ if (*n <= 0 || sflag + two == zero) {
+ goto L140;
+ }
+ if (! (*incx == *incy && *incx > 0)) {
+ goto L70;
+ }
+
+ nsteps = *n * *incx;
+ if (sflag < 0.f) {
+ goto L50;
+ } else if (sflag == 0) {
+ goto L10;
+ } else {
+ goto L30;
+ }
+L10:
+ sh12 = sparam[4];
+ sh21 = sparam[3];
+ i__1 = nsteps;
+ i__2 = *incx;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ w = sx[i__];
+ z__ = sy[i__];
+ sx[i__] = w + z__ * sh12;
+ sy[i__] = w * sh21 + z__;
+/* L20: */
+ }
+ goto L140;
+L30:
+ sh11 = sparam[2];
+ sh22 = sparam[5];
+ i__2 = nsteps;
+ i__1 = *incx;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+ w = sx[i__];
+ z__ = sy[i__];
+ sx[i__] = w * sh11 + z__;
+ sy[i__] = -w + sh22 * z__;
+/* L40: */
+ }
+ goto L140;
+L50:
+ sh11 = sparam[2];
+ sh12 = sparam[4];
+ sh21 = sparam[3];
+ sh22 = sparam[5];
+ i__1 = nsteps;
+ i__2 = *incx;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ w = sx[i__];
+ z__ = sy[i__];
+ sx[i__] = w * sh11 + z__ * sh12;
+ sy[i__] = w * sh21 + z__ * sh22;
+/* L60: */
+ }
+ goto L140;
+L70:
+ kx = 1;
+ ky = 1;
+ if (*incx < 0) {
+ kx = (1 - *n) * *incx + 1;
+ }
+ if (*incy < 0) {
+ ky = (1 - *n) * *incy + 1;
+ }
+
+ if (sflag < 0.f) {
+ goto L120;
+ } else if (sflag == 0) {
+ goto L80;
+ } else {
+ goto L100;
+ }
+L80:
+ sh12 = sparam[4];
+ sh21 = sparam[3];
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ w = sx[kx];
+ z__ = sy[ky];
+ sx[kx] = w + z__ * sh12;
+ sy[ky] = w * sh21 + z__;
+ kx += *incx;
+ ky += *incy;
+/* L90: */
+ }
+ goto L140;
+L100:
+ sh11 = sparam[2];
+ sh22 = sparam[5];
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ w = sx[kx];
+ z__ = sy[ky];
+ sx[kx] = w * sh11 + z__;
+ sy[ky] = -w + sh22 * z__;
+ kx += *incx;
+ ky += *incy;
+/* L110: */
+ }
+ goto L140;
+L120:
+ sh11 = sparam[2];
+ sh12 = sparam[4];
+ sh21 = sparam[3];
+ sh22 = sparam[5];
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ w = sx[kx];
+ z__ = sy[ky];
+ sx[kx] = w * sh11 + z__ * sh12;
+ sy[ky] = w * sh21 + z__ * sh22;
+ kx += *incx;
+ ky += *incy;
+/* L130: */
+ }
+L140:
+ return 0;
+} /* srotm_ */
diff --git a/contrib/libs/cblas/srotmg.c b/contrib/libs/cblas/srotmg.c
new file mode 100644
index 0000000000..912778bedb
--- /dev/null
+++ b/contrib/libs/cblas/srotmg.c
@@ -0,0 +1,295 @@
+/* srotmg.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int srotmg_(real *sd1, real *sd2, real *sx1, real *sy1, real
+ *sparam)
+{
+ /* Initialized data */
+
+ static real zero = 0.f;
+ static real one = 1.f;
+ static real two = 2.f;
+ static real gam = 4096.f;
+ static real gamsq = 16777200.f;
+ static real rgamsq = 5.96046e-8f;
+
+ /* Format strings */
+ static char fmt_120[] = "";
+ static char fmt_150[] = "";
+ static char fmt_180[] = "";
+ static char fmt_210[] = "";
+
+ /* System generated locals */
+ real r__1;
+
+ /* Local variables */
+ real su, sp1, sp2, sq1, sq2, sh11, sh12, sh21, sh22;
+ integer igo;
+ real sflag, stemp;
+
+ /* Assigned format variables */
+ static char *igo_fmt;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CONSTRUCT THE MODIFIED GIVENS TRANSFORMATION MATRIX H WHICH ZEROS */
+/* THE SECOND COMPONENT OF THE 2-VECTOR (SQRT(SD1)*SX1,SQRT(SD2)* */
+/* SY2)**T. */
+/* WITH SPARAM(1)=SFLAG, H HAS ONE OF THE FOLLOWING FORMS.. */
+
+/* SFLAG=-1.E0 SFLAG=0.E0 SFLAG=1.E0 SFLAG=-2.E0 */
+
+/* (SH11 SH12) (1.E0 SH12) (SH11 1.E0) (1.E0 0.E0) */
+/* H=( ) ( ) ( ) ( ) */
+/* (SH21 SH22), (SH21 1.E0), (-1.E0 SH22), (0.E0 1.E0). */
+/* LOCATIONS 2-4 OF SPARAM CONTAIN SH11,SH21,SH12, AND SH22 */
+/* RESPECTIVELY. (VALUES OF 1.E0, -1.E0, OR 0.E0 IMPLIED BY THE */
+/* VALUE OF SPARAM(1) ARE NOT STORED IN SPARAM.) */
+
+/* THE VALUES OF GAMSQ AND RGAMSQ SET IN THE DATA STATEMENT MAY BE */
+/* INEXACT. THIS IS OK AS THEY ARE ONLY USED FOR TESTING THE SIZE */
+/* OF SD1 AND SD2. ALL ACTUAL SCALING OF DATA IS DONE USING GAM. */
+
+
+/* Arguments */
+/* ========= */
+
+
+/* SD1 (input/output) REAL */
+
+/* SD2 (input/output) REAL */
+
+/* SX1 (input/output) REAL */
+
+/* SY1 (input) REAL */
+
+
+/* SPARAM (input/output) REAL array, dimension 5 */
+/* SPARAM(1)=SFLAG */
+/* SPARAM(2)=SH11 */
+/* SPARAM(3)=SH21 */
+/* SPARAM(4)=SH12 */
+/* SPARAM(5)=SH22 */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Data statements .. */
+
+ /* Parameter adjustments */
+ --sparam;
+
+ /* Function Body */
+/* .. */
+ if (! (*sd1 < zero)) {
+ goto L10;
+ }
+/* GO ZERO-H-D-AND-SX1.. */
+ goto L60;
+L10:
+/* CASE-SD1-NONNEGATIVE */
+ sp2 = *sd2 * *sy1;
+ if (! (sp2 == zero)) {
+ goto L20;
+ }
+ sflag = -two;
+ goto L260;
+/* REGULAR-CASE.. */
+L20:
+ sp1 = *sd1 * *sx1;
+ sq2 = sp2 * *sy1;
+ sq1 = sp1 * *sx1;
+
+ if (! (dabs(sq1) > dabs(sq2))) {
+ goto L40;
+ }
+ sh21 = -(*sy1) / *sx1;
+ sh12 = sp2 / sp1;
+
+ su = one - sh12 * sh21;
+
+ if (! (su <= zero)) {
+ goto L30;
+ }
+/* GO ZERO-H-D-AND-SX1.. */
+ goto L60;
+L30:
+ sflag = zero;
+ *sd1 /= su;
+ *sd2 /= su;
+ *sx1 *= su;
+/* GO SCALE-CHECK.. */
+ goto L100;
+L40:
+ if (! (sq2 < zero)) {
+ goto L50;
+ }
+/* GO ZERO-H-D-AND-SX1.. */
+ goto L60;
+L50:
+ sflag = one;
+ sh11 = sp1 / sp2;
+ sh22 = *sx1 / *sy1;
+ su = one + sh11 * sh22;
+ stemp = *sd2 / su;
+ *sd2 = *sd1 / su;
+ *sd1 = stemp;
+ *sx1 = *sy1 * su;
+/* GO SCALE-CHECK */
+ goto L100;
+/* PROCEDURE..ZERO-H-D-AND-SX1.. */
+L60:
+ sflag = -one;
+ sh11 = zero;
+ sh12 = zero;
+ sh21 = zero;
+ sh22 = zero;
+
+ *sd1 = zero;
+ *sd2 = zero;
+ *sx1 = zero;
+/* RETURN.. */
+ goto L220;
+/* PROCEDURE..FIX-H.. */
+L70:
+ if (! (sflag >= zero)) {
+ goto L90;
+ }
+
+ if (! (sflag == zero)) {
+ goto L80;
+ }
+ sh11 = one;
+ sh22 = one;
+ sflag = -one;
+ goto L90;
+L80:
+ sh21 = -one;
+ sh12 = one;
+ sflag = -one;
+L90:
+ switch (igo) {
+ case 0: goto L120;
+ case 1: goto L150;
+ case 2: goto L180;
+ case 3: goto L210;
+ }
+/* PROCEDURE..SCALE-CHECK */
+L100:
+L110:
+ if (! (*sd1 <= rgamsq)) {
+ goto L130;
+ }
+ if (*sd1 == zero) {
+ goto L160;
+ }
+ igo = 0;
+ igo_fmt = fmt_120;
+/* FIX-H.. */
+ goto L70;
+L120:
+/* Computing 2nd power */
+ r__1 = gam;
+ *sd1 *= r__1 * r__1;
+ *sx1 /= gam;
+ sh11 /= gam;
+ sh12 /= gam;
+ goto L110;
+L130:
+L140:
+ if (! (*sd1 >= gamsq)) {
+ goto L160;
+ }
+ igo = 1;
+ igo_fmt = fmt_150;
+/* FIX-H.. */
+ goto L70;
+L150:
+/* Computing 2nd power */
+ r__1 = gam;
+ *sd1 /= r__1 * r__1;
+ *sx1 *= gam;
+ sh11 *= gam;
+ sh12 *= gam;
+ goto L140;
+L160:
+L170:
+ if (! (dabs(*sd2) <= rgamsq)) {
+ goto L190;
+ }
+ if (*sd2 == zero) {
+ goto L220;
+ }
+ igo = 2;
+ igo_fmt = fmt_180;
+/* FIX-H.. */
+ goto L70;
+L180:
+/* Computing 2nd power */
+ r__1 = gam;
+ *sd2 *= r__1 * r__1;
+ sh21 /= gam;
+ sh22 /= gam;
+ goto L170;
+L190:
+L200:
+ if (! (dabs(*sd2) >= gamsq)) {
+ goto L220;
+ }
+ igo = 3;
+ igo_fmt = fmt_210;
+/* FIX-H.. */
+ goto L70;
+L210:
+/* Computing 2nd power */
+ r__1 = gam;
+ *sd2 /= r__1 * r__1;
+ sh21 *= gam;
+ sh22 *= gam;
+ goto L200;
+L220:
+ if (sflag < 0.f) {
+ goto L250;
+ } else if (sflag == 0) {
+ goto L230;
+ } else {
+ goto L240;
+ }
+L230:
+ sparam[3] = sh21;
+ sparam[4] = sh12;
+ goto L260;
+L240:
+ sparam[2] = sh11;
+ sparam[5] = sh22;
+ goto L260;
+L250:
+ sparam[2] = sh11;
+ sparam[3] = sh21;
+ sparam[4] = sh12;
+ sparam[5] = sh22;
+L260:
+ sparam[1] = sflag;
+ return 0;
+} /* srotmg_ */
diff --git a/contrib/libs/cblas/ssbmv.c b/contrib/libs/cblas/ssbmv.c
new file mode 100644
index 0000000000..9b22b21c1c
--- /dev/null
+++ b/contrib/libs/cblas/ssbmv.c
@@ -0,0 +1,364 @@
+/* ssbmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ssbmv_(char *uplo, integer *n, integer *k, real *alpha,
+ real *a, integer *lda, real *x, integer *incx, real *beta, real *y,
+ integer *incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, l, ix, iy, jx, jy, kx, ky, info;
+ real temp1, temp2;
+ extern logical lsame_(char *, char *);
+ integer kplus1;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSBMV performs the matrix-vector operation */
+
+/* y := alpha*A*x + beta*y, */
+
+/* where alpha and beta are scalars, x and y are n element vectors and */
+/* A is an n by n symmetric band matrix, with k super-diagonals. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the band matrix A is being supplied as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' The upper triangular part of A is */
+/* being supplied. */
+
+/* UPLO = 'L' or 'l' The lower triangular part of A is */
+/* being supplied. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry, K specifies the number of super-diagonals of the */
+/* matrix A. K must satisfy 0 .le. K. */
+/* Unchanged on exit. */
+
+/* ALPHA - REAL . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - REAL array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) */
+/* by n part of the array A must contain the upper triangular */
+/* band part of the symmetric matrix, supplied column by */
+/* column, with the leading diagonal of the matrix in row */
+/* ( k + 1 ) of the array, the first super-diagonal starting at */
+/* position 2 in row k, and so on. The top left k by k triangle */
+/* of the array A is not referenced. */
+/* The following program segment will transfer the upper */
+/* triangular part of a symmetric band matrix from conventional */
+/* full matrix storage to band storage: */
+
+/* DO 20, J = 1, N */
+/* M = K + 1 - J */
+/* DO 10, I = MAX( 1, J - K ), J */
+/* A( M + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) */
+/* by n part of the array A must contain the lower triangular */
+/* band part of the symmetric matrix, supplied column by */
+/* column, with the leading diagonal of the matrix in row 1 of */
+/* the array, the first sub-diagonal starting at position 1 in */
+/* row 2, and so on. The bottom right k by k triangle of the */
+/* array A is not referenced. */
+/* The following program segment will transfer the lower */
+/* triangular part of a symmetric band matrix from conventional */
+/* full matrix storage to band storage: */
+
+/* DO 20, J = 1, N */
+/* M = 1 - J */
+/* DO 10, I = J, MIN( N, J + K ) */
+/* A( M + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* ( k + 1 ). */
+/* Unchanged on exit. */
+
+/* X - REAL array of DIMENSION at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the */
+/* vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* BETA - REAL . */
+/* On entry, BETA specifies the scalar beta. */
+/* Unchanged on exit. */
+
+/* Y - REAL array of DIMENSION at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the */
+/* vector y. On exit, Y is overwritten by the updated vector y. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*k < 0) {
+ info = 3;
+ } else if (*lda < *k + 1) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ } else if (*incy == 0) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("SSBMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || *alpha == 0.f && *beta == 1.f) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y. */
+
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+
+/* Start the operations. In this version the elements of the array A */
+/* are accessed sequentially with one pass through A. */
+
+/* First form y := beta*y. */
+
+ if (*beta != 1.f) {
+ if (*incy == 1) {
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = 0.f;
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = *beta * y[i__];
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = 0.f;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = *beta * y[iy];
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (*alpha == 0.f) {
+ return 0;
+ }
+ if (lsame_(uplo, "U")) {
+
+/* Form y when upper triangle of A is stored. */
+
+ kplus1 = *k + 1;
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[j];
+ temp2 = 0.f;
+ l = kplus1 - j;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__4 = j - 1;
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ y[i__] += temp1 * a[l + i__ + j * a_dim1];
+ temp2 += a[l + i__ + j * a_dim1] * x[i__];
+/* L50: */
+ }
+ y[j] = y[j] + temp1 * a[kplus1 + j * a_dim1] + *alpha * temp2;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[jx];
+ temp2 = 0.f;
+ ix = kx;
+ iy = ky;
+ l = kplus1 - j;
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *k;
+ i__3 = j - 1;
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ y[iy] += temp1 * a[l + i__ + j * a_dim1];
+ temp2 += a[l + i__ + j * a_dim1] * x[ix];
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ y[jy] = y[jy] + temp1 * a[kplus1 + j * a_dim1] + *alpha *
+ temp2;
+ jx += *incx;
+ jy += *incy;
+ if (j > *k) {
+ kx += *incx;
+ ky += *incy;
+ }
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y when lower triangle of A is stored. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[j];
+ temp2 = 0.f;
+ y[j] += temp1 * a[j * a_dim1 + 1];
+ l = 1 - j;
+/* Computing MIN */
+ i__4 = *n, i__2 = j + *k;
+ i__3 = min(i__4,i__2);
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ y[i__] += temp1 * a[l + i__ + j * a_dim1];
+ temp2 += a[l + i__ + j * a_dim1] * x[i__];
+/* L90: */
+ }
+ y[j] += *alpha * temp2;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[jx];
+ temp2 = 0.f;
+ y[jy] += temp1 * a[j * a_dim1 + 1];
+ l = 1 - j;
+ ix = jx;
+ iy = jy;
+/* Computing MIN */
+ i__4 = *n, i__2 = j + *k;
+ i__3 = min(i__4,i__2);
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ ix += *incx;
+ iy += *incy;
+ y[iy] += temp1 * a[l + i__ + j * a_dim1];
+ temp2 += a[l + i__ + j * a_dim1] * x[ix];
+/* L110: */
+ }
+ y[jy] += *alpha * temp2;
+ jx += *incx;
+ jy += *incy;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SSBMV . */
+
+} /* ssbmv_ */
diff --git a/contrib/libs/cblas/sscal.c b/contrib/libs/cblas/sscal.c
new file mode 100644
index 0000000000..1bd2963e3b
--- /dev/null
+++ b/contrib/libs/cblas/sscal.c
@@ -0,0 +1,95 @@
+/* sscal.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sscal_(integer *n, real *sa, real *sx, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__, m, mp1, nincx;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* scales a vector by a constant. */
+/* uses unrolled loops for increment equal to 1. */
+/* jack dongarra, linpack, 3/11/78. */
+/* modified 3/93 to return if incx .le. 0. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --sx;
+
+ /* Function Body */
+ if (*n <= 0 || *incx <= 0) {
+ return 0;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ nincx = *n * *incx;
+ i__1 = nincx;
+ i__2 = *incx;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ sx[i__] = *sa * sx[i__];
+/* L10: */
+ }
+ return 0;
+
+/* code for increment equal to 1 */
+
+
+/* clean-up loop */
+
+L20:
+ m = *n % 5;
+ if (m == 0) {
+ goto L40;
+ }
+ i__2 = m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sx[i__] = *sa * sx[i__];
+/* L30: */
+ }
+ if (*n < 5) {
+ return 0;
+ }
+L40:
+ mp1 = m + 1;
+ i__2 = *n;
+ for (i__ = mp1; i__ <= i__2; i__ += 5) {
+ sx[i__] = *sa * sx[i__];
+ sx[i__ + 1] = *sa * sx[i__ + 1];
+ sx[i__ + 2] = *sa * sx[i__ + 2];
+ sx[i__ + 3] = *sa * sx[i__ + 3];
+ sx[i__ + 4] = *sa * sx[i__ + 4];
+/* L50: */
+ }
+ return 0;
+} /* sscal_ */
diff --git a/contrib/libs/cblas/sspmv.c b/contrib/libs/cblas/sspmv.c
new file mode 100644
index 0000000000..7a5db80c54
--- /dev/null
+++ b/contrib/libs/cblas/sspmv.c
@@ -0,0 +1,311 @@
+/* sspmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sspmv_(char *uplo, integer *n, real *alpha, real *ap,
+ real *x, integer *incx, real *beta, real *y, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, k, kk, ix, iy, jx, jy, kx, ky, info;
+ real temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSPMV performs the matrix-vector operation */
+
+/* y := alpha*A*x + beta*y, */
+
+/* where alpha and beta are scalars, x and y are n element vectors and */
+/* A is an n by n symmetric matrix, supplied in packed form. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the matrix A is supplied in the packed */
+/* array AP as follows: */
+
+/* UPLO = 'U' or 'u' The upper triangular part of A is */
+/* supplied in AP. */
+
+/* UPLO = 'L' or 'l' The lower triangular part of A is */
+/* supplied in AP. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - REAL . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* AP - REAL array of DIMENSION at least */
+/* ( ( n*( n + 1 ) )/2 ). */
+/* Before entry with UPLO = 'U' or 'u', the array AP must */
+/* contain the upper triangular part of the symmetric matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 ) */
+/* and a( 2, 2 ) respectively, and so on. */
+/* Before entry with UPLO = 'L' or 'l', the array AP must */
+/* contain the lower triangular part of the symmetric matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 ) */
+/* and a( 3, 1 ) respectively, and so on. */
+/* Unchanged on exit. */
+
+/* X - REAL array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* BETA - REAL . */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then Y need not be set on input. */
+/* Unchanged on exit. */
+
+/* Y - REAL array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. On exit, Y is overwritten by the updated */
+/* vector y. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --y;
+ --x;
+ --ap;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 6;
+ } else if (*incy == 0) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("SSPMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || *alpha == 0.f && *beta == 1.f) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y. */
+
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+
+/* Start the operations. In this version the elements of the array AP */
+/* are accessed sequentially with one pass through AP. */
+
+/* First form y := beta*y. */
+
+ if (*beta != 1.f) {
+ if (*incy == 1) {
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = 0.f;
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = *beta * y[i__];
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = 0.f;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = *beta * y[iy];
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (*alpha == 0.f) {
+ return 0;
+ }
+ kk = 1;
+ if (lsame_(uplo, "U")) {
+
+/* Form y when AP contains the upper triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[j];
+ temp2 = 0.f;
+ k = kk;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ y[i__] += temp1 * ap[k];
+ temp2 += ap[k] * x[i__];
+ ++k;
+/* L50: */
+ }
+ y[j] = y[j] + temp1 * ap[kk + j - 1] + *alpha * temp2;
+ kk += j;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[jx];
+ temp2 = 0.f;
+ ix = kx;
+ iy = ky;
+ i__2 = kk + j - 2;
+ for (k = kk; k <= i__2; ++k) {
+ y[iy] += temp1 * ap[k];
+ temp2 += ap[k] * x[ix];
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ y[jy] = y[jy] + temp1 * ap[kk + j - 1] + *alpha * temp2;
+ jx += *incx;
+ jy += *incy;
+ kk += j;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y when AP contains the lower triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[j];
+ temp2 = 0.f;
+ y[j] += temp1 * ap[kk];
+ k = kk + 1;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ y[i__] += temp1 * ap[k];
+ temp2 += ap[k] * x[i__];
+ ++k;
+/* L90: */
+ }
+ y[j] += *alpha * temp2;
+ kk += *n - j + 1;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[jx];
+ temp2 = 0.f;
+ y[jy] += temp1 * ap[kk];
+ ix = jx;
+ iy = jy;
+ i__2 = kk + *n - j;
+ for (k = kk + 1; k <= i__2; ++k) {
+ ix += *incx;
+ iy += *incy;
+ y[iy] += temp1 * ap[k];
+ temp2 += ap[k] * x[ix];
+/* L110: */
+ }
+ y[jy] += *alpha * temp2;
+ jx += *incx;
+ jy += *incy;
+ kk += *n - j + 1;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SSPMV . */
+
+} /* sspmv_ */
diff --git a/contrib/libs/cblas/sspr.c b/contrib/libs/cblas/sspr.c
new file mode 100644
index 0000000000..97791a8ab3
--- /dev/null
+++ b/contrib/libs/cblas/sspr.c
@@ -0,0 +1,237 @@
+/* sspr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sspr_(char *uplo, integer *n, real *alpha, real *x,
+ integer *incx, real *ap)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, k, kk, ix, jx, kx, info;
+ real temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSPR performs the symmetric rank 1 operation */
+
+/* A := alpha*x*x' + A, */
+
+/* where alpha is a real scalar, x is an n element vector and A is an */
+/* n by n symmetric matrix, supplied in packed form. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the matrix A is supplied in the packed */
+/* array AP as follows: */
+
+/* UPLO = 'U' or 'u' The upper triangular part of A is */
+/* supplied in AP. */
+
+/* UPLO = 'L' or 'l' The lower triangular part of A is */
+/* supplied in AP. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - REAL . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X - REAL array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* AP - REAL array of DIMENSION at least */
+/* ( ( n*( n + 1 ) )/2 ). */
+/* Before entry with UPLO = 'U' or 'u', the array AP must */
+/* contain the upper triangular part of the symmetric matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 ) */
+/* and a( 2, 2 ) respectively, and so on. On exit, the array */
+/* AP is overwritten by the upper triangular part of the */
+/* updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the array AP must */
+/* contain the lower triangular part of the symmetric matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 ) */
+/* and a( 3, 1 ) respectively, and so on. On exit, the array */
+/* AP is overwritten by the lower triangular part of the */
+/* updated matrix. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ }
+ if (info != 0) {
+ xerbla_("SSPR ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || *alpha == 0.f) {
+ return 0;
+ }
+
+/* Set the start point in X if the increment is not unity. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of the array AP */
+/* are accessed sequentially with one pass through AP. */
+
+ kk = 1;
+ if (lsame_(uplo, "U")) {
+
+/* Form A when upper triangle is stored in AP. */
+
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.f) {
+ temp = *alpha * x[j];
+ k = kk;
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ ap[k] += x[i__] * temp;
+ ++k;
+/* L10: */
+ }
+ }
+ kk += j;
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.f) {
+ temp = *alpha * x[jx];
+ ix = kx;
+ i__2 = kk + j - 1;
+ for (k = kk; k <= i__2; ++k) {
+ ap[k] += x[ix] * temp;
+ ix += *incx;
+/* L30: */
+ }
+ }
+ jx += *incx;
+ kk += j;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when lower triangle is stored in AP. */
+
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.f) {
+ temp = *alpha * x[j];
+ k = kk;
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ap[k] += x[i__] * temp;
+ ++k;
+/* L50: */
+ }
+ }
+ kk = kk + *n - j + 1;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.f) {
+ temp = *alpha * x[jx];
+ ix = jx;
+ i__2 = kk + *n - j;
+ for (k = kk; k <= i__2; ++k) {
+ ap[k] += x[ix] * temp;
+ ix += *incx;
+/* L70: */
+ }
+ }
+ jx += *incx;
+ kk = kk + *n - j + 1;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SSPR . */
+
+} /* sspr_ */
diff --git a/contrib/libs/cblas/sspr2.c b/contrib/libs/cblas/sspr2.c
new file mode 100644
index 0000000000..3fb675a6f1
--- /dev/null
+++ b/contrib/libs/cblas/sspr2.c
@@ -0,0 +1,269 @@
+/* sspr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sspr2_(char *uplo, integer *n, real *alpha, real *x,
+ integer *incx, real *y, integer *incy, real *ap)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, k, kk, ix, iy, jx, jy, kx, ky, info;
+ real temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSPR2 performs the symmetric rank 2 operation */
+
+/* A := alpha*x*y' + alpha*y*x' + A, */
+
+/* where alpha is a scalar, x and y are n element vectors and A is an */
+/* n by n symmetric matrix, supplied in packed form. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the matrix A is supplied in the packed */
+/* array AP as follows: */
+
+/* UPLO = 'U' or 'u' The upper triangular part of A is */
+/* supplied in AP. */
+
+/* UPLO = 'L' or 'l' The lower triangular part of A is */
+/* supplied in AP. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - REAL . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X - REAL array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* Y - REAL array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. */
+/* Unchanged on exit. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+/* AP - REAL array of DIMENSION at least */
+/* ( ( n*( n + 1 ) )/2 ). */
+/* Before entry with UPLO = 'U' or 'u', the array AP must */
+/* contain the upper triangular part of the symmetric matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 ) */
+/* and a( 2, 2 ) respectively, and so on. On exit, the array */
+/* AP is overwritten by the upper triangular part of the */
+/* updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the array AP must */
+/* contain the lower triangular part of the symmetric matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 ) */
+/* and a( 3, 1 ) respectively, and so on. On exit, the array */
+/* AP is overwritten by the lower triangular part of the */
+/* updated matrix. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --y;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ }
+ if (info != 0) {
+ xerbla_("SSPR2 ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || *alpha == 0.f) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y if the increments are not both */
+/* unity. */
+
+ if (*incx != 1 || *incy != 1) {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+ jx = kx;
+ jy = ky;
+ }
+
+/* Start the operations. In this version the elements of the array AP */
+/* are accessed sequentially with one pass through AP. */
+
+ kk = 1;
+ if (lsame_(uplo, "U")) {
+
+/* Form A when upper triangle is stored in AP. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.f || y[j] != 0.f) {
+ temp1 = *alpha * y[j];
+ temp2 = *alpha * x[j];
+ k = kk;
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ ap[k] = ap[k] + x[i__] * temp1 + y[i__] * temp2;
+ ++k;
+/* L10: */
+ }
+ }
+ kk += j;
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.f || y[jy] != 0.f) {
+ temp1 = *alpha * y[jy];
+ temp2 = *alpha * x[jx];
+ ix = kx;
+ iy = ky;
+ i__2 = kk + j - 1;
+ for (k = kk; k <= i__2; ++k) {
+ ap[k] = ap[k] + x[ix] * temp1 + y[iy] * temp2;
+ ix += *incx;
+ iy += *incy;
+/* L30: */
+ }
+ }
+ jx += *incx;
+ jy += *incy;
+ kk += j;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when lower triangle is stored in AP. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.f || y[j] != 0.f) {
+ temp1 = *alpha * y[j];
+ temp2 = *alpha * x[j];
+ k = kk;
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ap[k] = ap[k] + x[i__] * temp1 + y[i__] * temp2;
+ ++k;
+/* L50: */
+ }
+ }
+ kk = kk + *n - j + 1;
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.f || y[jy] != 0.f) {
+ temp1 = *alpha * y[jy];
+ temp2 = *alpha * x[jx];
+ ix = jx;
+ iy = jy;
+ i__2 = kk + *n - j;
+ for (k = kk; k <= i__2; ++k) {
+ ap[k] = ap[k] + x[ix] * temp1 + y[iy] * temp2;
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ }
+ jx += *incx;
+ jy += *incy;
+ kk = kk + *n - j + 1;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SSPR2 . */
+
+} /* sspr2_ */
diff --git a/contrib/libs/cblas/sswap.c b/contrib/libs/cblas/sswap.c
new file mode 100644
index 0000000000..6d6e9e4371
--- /dev/null
+++ b/contrib/libs/cblas/sswap.c
@@ -0,0 +1,114 @@
+/* sswap.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sswap_(integer *n, real *sx, integer *incx, real *sy,
+ integer *incy)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ integer i__, m, ix, iy, mp1;
+ real stemp;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* interchanges two vectors. */
+/* uses unrolled loops for increments equal to 1. */
+/* jack dongarra, linpack, 3/11/78. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --sy;
+ --sx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* code for unequal increments or equal increments not equal */
+/* to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ stemp = sx[ix];
+ sx[ix] = sy[iy];
+ sy[iy] = stemp;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+
+
+/* clean-up loop */
+
+L20:
+ m = *n % 3;
+ if (m == 0) {
+ goto L40;
+ }
+ i__1 = m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ stemp = sx[i__];
+ sx[i__] = sy[i__];
+ sy[i__] = stemp;
+/* L30: */
+ }
+ if (*n < 3) {
+ return 0;
+ }
+L40:
+ mp1 = m + 1;
+ i__1 = *n;
+ for (i__ = mp1; i__ <= i__1; i__ += 3) {
+ stemp = sx[i__];
+ sx[i__] = sy[i__];
+ sy[i__] = stemp;
+ stemp = sx[i__ + 1];
+ sx[i__ + 1] = sy[i__ + 1];
+ sy[i__ + 1] = stemp;
+ stemp = sx[i__ + 2];
+ sx[i__ + 2] = sy[i__ + 2];
+ sy[i__ + 2] = stemp;
+/* L50: */
+ }
+ return 0;
+} /* sswap_ */
diff --git a/contrib/libs/cblas/ssymm.c b/contrib/libs/cblas/ssymm.c
new file mode 100644
index 0000000000..df3424298e
--- /dev/null
+++ b/contrib/libs/cblas/ssymm.c
@@ -0,0 +1,362 @@
+/* ssymm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ssymm_(char *side, char *uplo, integer *m, integer *n,
+ real *alpha, real *a, integer *lda, real *b, integer *ldb, real *beta,
+ real *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3;
+
+ /* Local variables */
+ integer i__, j, k, info;
+ real temp1, temp2;
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYMM performs one of the matrix-matrix operations */
+
+/* C := alpha*A*B + beta*C, */
+
+/* or */
+
+/* C := alpha*B*A + beta*C, */
+
+/* where alpha and beta are scalars, A is a symmetric matrix and B and */
+/* C are m by n matrices. */
+
+/* Arguments */
+/* ========== */
+
+/* SIDE - CHARACTER*1. */
+/* On entry, SIDE specifies whether the symmetric matrix A */
+/* appears on the left or right in the operation as follows: */
+
+/* SIDE = 'L' or 'l' C := alpha*A*B + beta*C, */
+
+/* SIDE = 'R' or 'r' C := alpha*B*A + beta*C, */
+
+/* Unchanged on exit. */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the symmetric matrix A is to be */
+/* referenced as follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of the */
+/* symmetric matrix is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of the */
+/* symmetric matrix is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of the matrix C. */
+/* M must be at least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of the matrix C. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - REAL . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - REAL array of DIMENSION ( LDA, ka ), where ka is */
+/* m when SIDE = 'L' or 'l' and is n otherwise. */
+/* Before entry with SIDE = 'L' or 'l', the m by m part of */
+/* the array A must contain the symmetric matrix, such that */
+/* when UPLO = 'U' or 'u', the leading m by m upper triangular */
+/* part of the array A must contain the upper triangular part */
+/* of the symmetric matrix and the strictly lower triangular */
+/* part of A is not referenced, and when UPLO = 'L' or 'l', */
+/* the leading m by m lower triangular part of the array A */
+/* must contain the lower triangular part of the symmetric */
+/* matrix and the strictly upper triangular part of A is not */
+/* referenced. */
+/* Before entry with SIDE = 'R' or 'r', the n by n part of */
+/* the array A must contain the symmetric matrix, such that */
+/* when UPLO = 'U' or 'u', the leading n by n upper triangular */
+/* part of the array A must contain the upper triangular part */
+/* of the symmetric matrix and the strictly lower triangular */
+/* part of A is not referenced, and when UPLO = 'L' or 'l', */
+/* the leading n by n lower triangular part of the array A */
+/* must contain the lower triangular part of the symmetric */
+/* matrix and the strictly upper triangular part of A is not */
+/* referenced. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When SIDE = 'L' or 'l' then */
+/* LDA must be at least max( 1, m ), otherwise LDA must be at */
+/* least max( 1, n ). */
+/* Unchanged on exit. */
+
+/* B - REAL array of DIMENSION ( LDB, n ). */
+/* Before entry, the leading m by n part of the array B must */
+/* contain the matrix B. */
+/* Unchanged on exit. */
+
+/* LDB - INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. LDB must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+/* BETA - REAL . */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then C need not be set on input. */
+/* Unchanged on exit. */
+
+/* C - REAL array of DIMENSION ( LDC, n ). */
+/* Before entry, the leading m by n part of the array C must */
+/* contain the matrix C, except when beta is zero, in which */
+/* case C need not be set on entry. */
+/* On exit, the array C is overwritten by the m by n updated */
+/* matrix. */
+
+/* LDC - INTEGER. */
+/* On entry, LDC specifies the first dimension of C as declared */
+/* in the calling (sub) program. LDC must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Set NROWA as the number of rows of A. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(side, "L")) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ upper = lsame_(uplo, "U");
+
+/* Test the input parameters. */
+
+ info = 0;
+ if (! lsame_(side, "L") && ! lsame_(side, "R")) {
+ info = 1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ info = 2;
+ } else if (*m < 0) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldb < max(1,*m)) {
+ info = 9;
+ } else if (*ldc < max(1,*m)) {
+ info = 12;
+ }
+ if (info != 0) {
+ xerbla_("SSYMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || *alpha == 0.f && *beta == 1.f) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.f) {
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(side, "L")) {
+
+/* Form C := alpha*A*B + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp1 = *alpha * b[i__ + j * b_dim1];
+ temp2 = 0.f;
+ i__3 = i__ - 1;
+ for (k = 1; k <= i__3; ++k) {
+ c__[k + j * c_dim1] += temp1 * a[k + i__ * a_dim1];
+ temp2 += b[k + j * b_dim1] * a[k + i__ * a_dim1];
+/* L50: */
+ }
+ if (*beta == 0.f) {
+ c__[i__ + j * c_dim1] = temp1 * a[i__ + i__ * a_dim1]
+ + *alpha * temp2;
+ } else {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]
+ + temp1 * a[i__ + i__ * a_dim1] + *alpha *
+ temp2;
+ }
+/* L60: */
+ }
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ temp1 = *alpha * b[i__ + j * b_dim1];
+ temp2 = 0.f;
+ i__2 = *m;
+ for (k = i__ + 1; k <= i__2; ++k) {
+ c__[k + j * c_dim1] += temp1 * a[k + i__ * a_dim1];
+ temp2 += b[k + j * b_dim1] * a[k + i__ * a_dim1];
+/* L80: */
+ }
+ if (*beta == 0.f) {
+ c__[i__ + j * c_dim1] = temp1 * a[i__ + i__ * a_dim1]
+ + *alpha * temp2;
+ } else {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]
+ + temp1 * a[i__ + i__ * a_dim1] + *alpha *
+ temp2;
+ }
+/* L90: */
+ }
+/* L100: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*B*A + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * a[j + j * a_dim1];
+ if (*beta == 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = temp1 * b[i__ + j * b_dim1];
+/* L110: */
+ }
+ } else {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1] +
+ temp1 * b[i__ + j * b_dim1];
+/* L120: */
+ }
+ }
+ i__2 = j - 1;
+ for (k = 1; k <= i__2; ++k) {
+ if (upper) {
+ temp1 = *alpha * a[k + j * a_dim1];
+ } else {
+ temp1 = *alpha * a[j + k * a_dim1];
+ }
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] += temp1 * b[i__ + k * b_dim1];
+/* L130: */
+ }
+/* L140: */
+ }
+ i__2 = *n;
+ for (k = j + 1; k <= i__2; ++k) {
+ if (upper) {
+ temp1 = *alpha * a[j + k * a_dim1];
+ } else {
+ temp1 = *alpha * a[k + j * a_dim1];
+ }
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] += temp1 * b[i__ + k * b_dim1];
+/* L150: */
+ }
+/* L160: */
+ }
+/* L170: */
+ }
+ }
+
+ return 0;
+
+/* End of SSYMM . */
+
+} /* ssymm_ */
diff --git a/contrib/libs/cblas/ssymv.c b/contrib/libs/cblas/ssymv.c
new file mode 100644
index 0000000000..3a1502c6f0
--- /dev/null
+++ b/contrib/libs/cblas/ssymv.c
@@ -0,0 +1,313 @@
+/* ssymv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ssymv_(char *uplo, integer *n, real *alpha, real *a,
+ integer *lda, real *x, integer *incx, real *beta, real *y, integer *
+ incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ real temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYMV performs the matrix-vector operation */
+
+/* y := alpha*A*x + beta*y, */
+
+/* where alpha and beta are scalars, x and y are n element vectors and */
+/* A is an n by n symmetric matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array A is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of A */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of A */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - REAL . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - REAL array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array A must contain the upper */
+/* triangular part of the symmetric matrix and the strictly */
+/* lower triangular part of A is not referenced. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array A must contain the lower */
+/* triangular part of the symmetric matrix and the strictly */
+/* upper triangular part of A is not referenced. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+/* X - REAL array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* BETA - REAL . */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then Y need not be set on input. */
+/* Unchanged on exit. */
+
+/* Y - REAL array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. On exit, Y is overwritten by the updated */
+/* vector y. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*lda < max(1,*n)) {
+ info = 5;
+ } else if (*incx == 0) {
+ info = 7;
+ } else if (*incy == 0) {
+ info = 10;
+ }
+ if (info != 0) {
+ xerbla_("SSYMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || *alpha == 0.f && *beta == 1.f) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y. */
+
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through the triangular part */
+/* of A. */
+
+/* First form y := beta*y. */
+
+ if (*beta != 1.f) {
+ if (*incy == 1) {
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = 0.f;
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = *beta * y[i__];
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = 0.f;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = *beta * y[iy];
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (*alpha == 0.f) {
+ return 0;
+ }
+ if (lsame_(uplo, "U")) {
+
+/* Form y when A is stored in upper triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[j];
+ temp2 = 0.f;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ y[i__] += temp1 * a[i__ + j * a_dim1];
+ temp2 += a[i__ + j * a_dim1] * x[i__];
+/* L50: */
+ }
+ y[j] = y[j] + temp1 * a[j + j * a_dim1] + *alpha * temp2;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[jx];
+ temp2 = 0.f;
+ ix = kx;
+ iy = ky;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ y[iy] += temp1 * a[i__ + j * a_dim1];
+ temp2 += a[i__ + j * a_dim1] * x[ix];
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ y[jy] = y[jy] + temp1 * a[j + j * a_dim1] + *alpha * temp2;
+ jx += *incx;
+ jy += *incy;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y when A is stored in lower triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[j];
+ temp2 = 0.f;
+ y[j] += temp1 * a[j + j * a_dim1];
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ y[i__] += temp1 * a[i__ + j * a_dim1];
+ temp2 += a[i__ + j * a_dim1] * x[i__];
+/* L90: */
+ }
+ y[j] += *alpha * temp2;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[jx];
+ temp2 = 0.f;
+ y[jy] += temp1 * a[j + j * a_dim1];
+ ix = jx;
+ iy = jy;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ iy += *incy;
+ y[iy] += temp1 * a[i__ + j * a_dim1];
+ temp2 += a[i__ + j * a_dim1] * x[ix];
+/* L110: */
+ }
+ y[jy] += *alpha * temp2;
+ jx += *incx;
+ jy += *incy;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SSYMV . */
+
+} /* ssymv_ */
diff --git a/contrib/libs/cblas/ssyr.c b/contrib/libs/cblas/ssyr.c
new file mode 100644
index 0000000000..dd433a3450
--- /dev/null
+++ b/contrib/libs/cblas/ssyr.c
@@ -0,0 +1,238 @@
+/* ssyr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ssyr_(char *uplo, integer *n, real *alpha, real *x,
+ integer *incx, real *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, ix, jx, kx, info;
+ real temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYR performs the symmetric rank 1 operation */
+
+/* A := alpha*x*x' + A, */
+
+/* where alpha is a real scalar, x is an n element vector and A is an */
+/* n by n symmetric matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array A is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of A */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of A */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - REAL . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X - REAL array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* A - REAL array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array A must contain the upper */
+/* triangular part of the symmetric matrix and the strictly */
+/* lower triangular part of A is not referenced. On exit, the */
+/* upper triangular part of the array A is overwritten by the */
+/* upper triangular part of the updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array A must contain the lower */
+/* triangular part of the symmetric matrix and the strictly */
+/* upper triangular part of A is not referenced. On exit, the */
+/* lower triangular part of the array A is overwritten by the */
+/* lower triangular part of the updated matrix. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --x;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*lda < max(1,*n)) {
+ info = 7;
+ }
+ if (info != 0) {
+ xerbla_("SSYR ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || *alpha == 0.f) {
+ return 0;
+ }
+
+/* Set the start point in X if the increment is not unity. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through the triangular part */
+/* of A. */
+
+ if (lsame_(uplo, "U")) {
+
+/* Form A when A is stored in upper triangle. */
+
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.f) {
+ temp = *alpha * x[j];
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] += x[i__] * temp;
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.f) {
+ temp = *alpha * x[jx];
+ ix = kx;
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] += x[ix] * temp;
+ ix += *incx;
+/* L30: */
+ }
+ }
+ jx += *incx;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when A is stored in lower triangle. */
+
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.f) {
+ temp = *alpha * x[j];
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] += x[i__] * temp;
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.f) {
+ temp = *alpha * x[jx];
+ ix = jx;
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] += x[ix] * temp;
+ ix += *incx;
+/* L70: */
+ }
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SSYR . */
+
+} /* ssyr_ */
diff --git a/contrib/libs/cblas/ssyr2.c b/contrib/libs/cblas/ssyr2.c
new file mode 100644
index 0000000000..738250ebcd
--- /dev/null
+++ b/contrib/libs/cblas/ssyr2.c
@@ -0,0 +1,274 @@
+/* ssyr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ssyr2_(char *uplo, integer *n, real *alpha, real *x,
+ integer *incx, real *y, integer *incy, real *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ real temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYR2 performs the symmetric rank 2 operation */
+
+/* A := alpha*x*y' + alpha*y*x' + A, */
+
+/* where alpha is a scalar, x and y are n element vectors and A is an n */
+/* by n symmetric matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array A is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of A */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of A */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - REAL . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X - REAL array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* Y - REAL array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. */
+/* Unchanged on exit. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+/* A - REAL array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array A must contain the upper */
+/* triangular part of the symmetric matrix and the strictly */
+/* lower triangular part of A is not referenced. On exit, the */
+/* upper triangular part of the array A is overwritten by the */
+/* upper triangular part of the updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array A must contain the lower */
+/* triangular part of the symmetric matrix and the strictly */
+/* upper triangular part of A is not referenced. On exit, the */
+/* lower triangular part of the array A is overwritten by the */
+/* lower triangular part of the updated matrix. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --x;
+ --y;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ } else if (*lda < max(1,*n)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("SSYR2 ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || *alpha == 0.f) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y if the increments are not both */
+/* unity. */
+
+ if (*incx != 1 || *incy != 1) {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+ jx = kx;
+ jy = ky;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through the triangular part */
+/* of A. */
+
+ if (lsame_(uplo, "U")) {
+
+/* Form A when A is stored in the upper triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.f || y[j] != 0.f) {
+ temp1 = *alpha * y[j];
+ temp2 = *alpha * x[j];
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[i__] *
+ temp1 + y[i__] * temp2;
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.f || y[jy] != 0.f) {
+ temp1 = *alpha * y[jy];
+ temp2 = *alpha * x[jx];
+ ix = kx;
+ iy = ky;
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[ix] *
+ temp1 + y[iy] * temp2;
+ ix += *incx;
+ iy += *incy;
+/* L30: */
+ }
+ }
+ jx += *incx;
+ jy += *incy;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when A is stored in the lower triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.f || y[j] != 0.f) {
+ temp1 = *alpha * y[j];
+ temp2 = *alpha * x[j];
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[i__] *
+ temp1 + y[i__] * temp2;
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.f || y[jy] != 0.f) {
+ temp1 = *alpha * y[jy];
+ temp2 = *alpha * x[jx];
+ ix = jx;
+ iy = jy;
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[ix] *
+ temp1 + y[iy] * temp2;
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ }
+ jx += *incx;
+ jy += *incy;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SSYR2 . */
+
+} /* ssyr2_ */
diff --git a/contrib/libs/cblas/ssyr2k.c b/contrib/libs/cblas/ssyr2k.c
new file mode 100644
index 0000000000..2d082fcf88
--- /dev/null
+++ b/contrib/libs/cblas/ssyr2k.c
@@ -0,0 +1,409 @@
+/* ssyr2k.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ssyr2k_(char *uplo, char *trans, integer *n, integer *k,
+ real *alpha, real *a, integer *lda, real *b, integer *ldb, real *beta,
+ real *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3;
+
+ /* Local variables */
+ integer i__, j, l, info;
+ real temp1, temp2;
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYR2K performs one of the symmetric rank 2k operations */
+
+/* C := alpha*A*B' + alpha*B*A' + beta*C, */
+
+/* or */
+
+/* C := alpha*A'*B + alpha*B'*A + beta*C, */
+
+/* where alpha and beta are scalars, C is an n by n symmetric matrix */
+/* and A and B are n by k matrices in the first case and k by n */
+/* matrices in the second case. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array C is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of C */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of C */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' C := alpha*A*B' + alpha*B*A' + */
+/* beta*C. */
+
+/* TRANS = 'T' or 't' C := alpha*A'*B + alpha*B'*A + */
+/* beta*C. */
+
+/* TRANS = 'C' or 'c' C := alpha*A'*B + alpha*B'*A + */
+/* beta*C. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix C. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry with TRANS = 'N' or 'n', K specifies the number */
+/* of columns of the matrices A and B, and on entry with */
+/* TRANS = 'T' or 't' or 'C' or 'c', K specifies the number */
+/* of rows of the matrices A and B. K must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - REAL . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - REAL array of DIMENSION ( LDA, ka ), where ka is */
+/* k when TRANS = 'N' or 'n', and is n otherwise. */
+/* Before entry with TRANS = 'N' or 'n', the leading n by k */
+/* part of the array A must contain the matrix A, otherwise */
+/* the leading k by n part of the array A must contain the */
+/* matrix A. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When TRANS = 'N' or 'n' */
+/* then LDA must be at least max( 1, n ), otherwise LDA must */
+/* be at least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* B - REAL array of DIMENSION ( LDB, kb ), where kb is */
+/* k when TRANS = 'N' or 'n', and is n otherwise. */
+/* Before entry with TRANS = 'N' or 'n', the leading n by k */
+/* part of the array B must contain the matrix B, otherwise */
+/* the leading k by n part of the array B must contain the */
+/* matrix B. */
+/* Unchanged on exit. */
+
+/* LDB - INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. When TRANS = 'N' or 'n' */
+/* then LDB must be at least max( 1, n ), otherwise LDB must */
+/* be at least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* BETA - REAL . */
+/* On entry, BETA specifies the scalar beta. */
+/* Unchanged on exit. */
+
+/* C - REAL array of DIMENSION ( LDC, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array C must contain the upper */
+/* triangular part of the symmetric matrix and the strictly */
+/* lower triangular part of C is not referenced. On exit, the */
+/* upper triangular part of the array C is overwritten by the */
+/* upper triangular part of the updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array C must contain the lower */
+/* triangular part of the symmetric matrix and the strictly */
+/* upper triangular part of C is not referenced. On exit, the */
+/* lower triangular part of the array C is overwritten by the */
+/* lower triangular part of the updated matrix. */
+
+/* LDC - INTEGER. */
+/* On entry, LDC specifies the first dimension of C as declared */
+/* in the calling (sub) program. LDC must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(trans, "N")) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! upper && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*k < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldb < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldc < max(1,*n)) {
+ info = 12;
+ }
+ if (info != 0) {
+ xerbla_("SSYR2K", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || (*alpha == 0.f || *k == 0) && *beta == 1.f) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.f) {
+ if (upper) {
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else {
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.f;
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form C := alpha*A*B' + alpha*B*A' + C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.f) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.f;
+/* L90: */
+ }
+ } else if (*beta != 1.f) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L100: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (a[j + l * a_dim1] != 0.f || b[j + l * b_dim1] != 0.f)
+ {
+ temp1 = *alpha * b[j + l * b_dim1];
+ temp2 = *alpha * a[j + l * a_dim1];
+ i__3 = j;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] = c__[i__ + j * c_dim1] + a[
+ i__ + l * a_dim1] * temp1 + b[i__ + l *
+ b_dim1] * temp2;
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.f) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.f;
+/* L140: */
+ }
+ } else if (*beta != 1.f) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L150: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (a[j + l * a_dim1] != 0.f || b[j + l * b_dim1] != 0.f)
+ {
+ temp1 = *alpha * b[j + l * b_dim1];
+ temp2 = *alpha * a[j + l * a_dim1];
+ i__3 = *n;
+ for (i__ = j; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] = c__[i__ + j * c_dim1] + a[
+ i__ + l * a_dim1] * temp1 + b[i__ + l *
+ b_dim1] * temp2;
+/* L160: */
+ }
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*A'*B + alpha*B'*A + C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp1 = 0.f;
+ temp2 = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp1 += a[l + i__ * a_dim1] * b[l + j * b_dim1];
+ temp2 += b[l + i__ * b_dim1] * a[l + j * a_dim1];
+/* L190: */
+ }
+ if (*beta == 0.f) {
+ c__[i__ + j * c_dim1] = *alpha * temp1 + *alpha *
+ temp2;
+ } else {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]
+ + *alpha * temp1 + *alpha * temp2;
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ temp1 = 0.f;
+ temp2 = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp1 += a[l + i__ * a_dim1] * b[l + j * b_dim1];
+ temp2 += b[l + i__ * b_dim1] * a[l + j * a_dim1];
+/* L220: */
+ }
+ if (*beta == 0.f) {
+ c__[i__ + j * c_dim1] = *alpha * temp1 + *alpha *
+ temp2;
+ } else {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]
+ + *alpha * temp1 + *alpha * temp2;
+ }
+/* L230: */
+ }
+/* L240: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SSYR2K. */
+
+} /* ssyr2k_ */
diff --git a/contrib/libs/cblas/ssyrk.c b/contrib/libs/cblas/ssyrk.c
new file mode 100644
index 0000000000..1c2bc37c2d
--- /dev/null
+++ b/contrib/libs/cblas/ssyrk.c
@@ -0,0 +1,372 @@
+/* ssyrk.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ssyrk_(char *uplo, char *trans, integer *n, integer *k,
+ real *alpha, real *a, integer *lda, real *beta, real *c__, integer *
+ ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, l, info;
+ real temp;
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYRK performs one of the symmetric rank k operations */
+
+/* C := alpha*A*A' + beta*C, */
+
+/* or */
+
+/* C := alpha*A'*A + beta*C, */
+
+/* where alpha and beta are scalars, C is an n by n symmetric matrix */
+/* and A is an n by k matrix in the first case and a k by n matrix */
+/* in the second case. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array C is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of C */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of C */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' C := alpha*A*A' + beta*C. */
+
+/* TRANS = 'T' or 't' C := alpha*A'*A + beta*C. */
+
+/* TRANS = 'C' or 'c' C := alpha*A'*A + beta*C. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix C. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry with TRANS = 'N' or 'n', K specifies the number */
+/* of columns of the matrix A, and on entry with */
+/* TRANS = 'T' or 't' or 'C' or 'c', K specifies the number */
+/* of rows of the matrix A. K must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - REAL . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - REAL array of DIMENSION ( LDA, ka ), where ka is */
+/* k when TRANS = 'N' or 'n', and is n otherwise. */
+/* Before entry with TRANS = 'N' or 'n', the leading n by k */
+/* part of the array A must contain the matrix A, otherwise */
+/* the leading k by n part of the array A must contain the */
+/* matrix A. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When TRANS = 'N' or 'n' */
+/* then LDA must be at least max( 1, n ), otherwise LDA must */
+/* be at least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* BETA - REAL . */
+/* On entry, BETA specifies the scalar beta. */
+/* Unchanged on exit. */
+
+/* C - REAL array of DIMENSION ( LDC, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array C must contain the upper */
+/* triangular part of the symmetric matrix and the strictly */
+/* lower triangular part of C is not referenced. On exit, the */
+/* upper triangular part of the array C is overwritten by the */
+/* upper triangular part of the updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array C must contain the lower */
+/* triangular part of the symmetric matrix and the strictly */
+/* upper triangular part of C is not referenced. On exit, the */
+/* lower triangular part of the array C is overwritten by the */
+/* lower triangular part of the updated matrix. */
+
+/* LDC - INTEGER. */
+/* On entry, LDC specifies the first dimension of C as declared */
+/* in the calling (sub) program. LDC must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(trans, "N")) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! upper && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*k < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldc < max(1,*n)) {
+ info = 10;
+ }
+ if (info != 0) {
+ xerbla_("SSYRK ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || (*alpha == 0.f || *k == 0) && *beta == 1.f) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.f) {
+ if (upper) {
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else {
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.f;
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form C := alpha*A*A' + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.f) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.f;
+/* L90: */
+ }
+ } else if (*beta != 1.f) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L100: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (a[j + l * a_dim1] != 0.f) {
+ temp = *alpha * a[j + l * a_dim1];
+ i__3 = j;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] += temp * a[i__ + l *
+ a_dim1];
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.f) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.f;
+/* L140: */
+ }
+ } else if (*beta != 1.f) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L150: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (a[j + l * a_dim1] != 0.f) {
+ temp = *alpha * a[j + l * a_dim1];
+ i__3 = *n;
+ for (i__ = j; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] += temp * a[i__ + l *
+ a_dim1];
+/* L160: */
+ }
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*A'*A + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp += a[l + i__ * a_dim1] * a[l + j * a_dim1];
+/* L190: */
+ }
+ if (*beta == 0.f) {
+ c__[i__ + j * c_dim1] = *alpha * temp;
+ } else {
+ c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[
+ i__ + j * c_dim1];
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ temp = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp += a[l + i__ * a_dim1] * a[l + j * a_dim1];
+/* L220: */
+ }
+ if (*beta == 0.f) {
+ c__[i__ + j * c_dim1] = *alpha * temp;
+ } else {
+ c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[
+ i__ + j * c_dim1];
+ }
+/* L230: */
+ }
+/* L240: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SSYRK . */
+
+} /* ssyrk_ */
diff --git a/contrib/libs/cblas/stbmv.c b/contrib/libs/cblas/stbmv.c
new file mode 100644
index 0000000000..bea0335e2d
--- /dev/null
+++ b/contrib/libs/cblas/stbmv.c
@@ -0,0 +1,422 @@
+/* stbmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int stbmv_(char *uplo, char *trans, char *diag, integer *n,
+ integer *k, real *a, integer *lda, real *x, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, l, ix, jx, kx, info;
+ real temp;
+ extern logical lsame_(char *, char *);
+ integer kplus1;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STBMV performs one of the matrix-vector operations */
+
+/* x := A*x, or x := A'*x, */
+
+/* where x is an n element vector and A is an n by n unit, or non-unit, */
+/* upper or lower triangular band matrix, with ( k + 1 ) diagonals. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' x := A*x. */
+
+/* TRANS = 'T' or 't' x := A'*x. */
+
+/* TRANS = 'C' or 'c' x := A'*x. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry with UPLO = 'U' or 'u', K specifies the number of */
+/* super-diagonals of the matrix A. */
+/* On entry with UPLO = 'L' or 'l', K specifies the number of */
+/* sub-diagonals of the matrix A. */
+/* K must satisfy 0 .le. K. */
+/* Unchanged on exit. */
+
+/* A - REAL array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) */
+/* by n part of the array A must contain the upper triangular */
+/* band part of the matrix of coefficients, supplied column by */
+/* column, with the leading diagonal of the matrix in row */
+/* ( k + 1 ) of the array, the first super-diagonal starting at */
+/* position 2 in row k, and so on. The top left k by k triangle */
+/* of the array A is not referenced. */
+/* The following program segment will transfer an upper */
+/* triangular band matrix from conventional full matrix storage */
+/* to band storage: */
+
+/* DO 20, J = 1, N */
+/* M = K + 1 - J */
+/* DO 10, I = MAX( 1, J - K ), J */
+/* A( M + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) */
+/* by n part of the array A must contain the lower triangular */
+/* band part of the matrix of coefficients, supplied column by */
+/* column, with the leading diagonal of the matrix in row 1 of */
+/* the array, the first sub-diagonal starting at position 1 in */
+/* row 2, and so on. The bottom right k by k triangle of the */
+/* array A is not referenced. */
+/* The following program segment will transfer a lower */
+/* triangular band matrix from conventional full matrix storage */
+/* to band storage: */
+
+/* DO 20, J = 1, N */
+/* M = 1 - J */
+/* DO 10, I = J, MIN( N, J + K ) */
+/* A( M + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Note that when DIAG = 'U' or 'u' the elements of the array A */
+/* corresponding to the diagonal elements of the matrix are not */
+/* referenced, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* ( k + 1 ). */
+/* Unchanged on exit. */
+
+/* X - REAL array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. On exit, X is overwritten with the */
+/* tranformed vector x. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*k < 0) {
+ info = 5;
+ } else if (*lda < *k + 1) {
+ info = 7;
+ } else if (*incx == 0) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("STBMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ nounit = lsame_(diag, "N");
+
+/* Set up the start point in X if the increment is not unity. This */
+/* will be ( N - 1 )*INCX too small for descending loops. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through A. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := A*x. */
+
+ if (lsame_(uplo, "U")) {
+ kplus1 = *k + 1;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.f) {
+ temp = x[j];
+ l = kplus1 - j;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__4 = j - 1;
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ x[i__] += temp * a[l + i__ + j * a_dim1];
+/* L10: */
+ }
+ if (nounit) {
+ x[j] *= a[kplus1 + j * a_dim1];
+ }
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.f) {
+ temp = x[jx];
+ ix = kx;
+ l = kplus1 - j;
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *k;
+ i__3 = j - 1;
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ x[ix] += temp * a[l + i__ + j * a_dim1];
+ ix += *incx;
+/* L30: */
+ }
+ if (nounit) {
+ x[jx] *= a[kplus1 + j * a_dim1];
+ }
+ }
+ jx += *incx;
+ if (j > *k) {
+ kx += *incx;
+ }
+/* L40: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ if (x[j] != 0.f) {
+ temp = x[j];
+ l = 1 - j;
+/* Computing MIN */
+ i__1 = *n, i__3 = j + *k;
+ i__4 = j + 1;
+ for (i__ = min(i__1,i__3); i__ >= i__4; --i__) {
+ x[i__] += temp * a[l + i__ + j * a_dim1];
+/* L50: */
+ }
+ if (nounit) {
+ x[j] *= a[j * a_dim1 + 1];
+ }
+ }
+/* L60: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ if (x[jx] != 0.f) {
+ temp = x[jx];
+ ix = kx;
+ l = 1 - j;
+/* Computing MIN */
+ i__4 = *n, i__1 = j + *k;
+ i__3 = j + 1;
+ for (i__ = min(i__4,i__1); i__ >= i__3; --i__) {
+ x[ix] += temp * a[l + i__ + j * a_dim1];
+ ix -= *incx;
+/* L70: */
+ }
+ if (nounit) {
+ x[jx] *= a[j * a_dim1 + 1];
+ }
+ }
+ jx -= *incx;
+ if (*n - j >= *k) {
+ kx -= *incx;
+ }
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := A'*x. */
+
+ if (lsame_(uplo, "U")) {
+ kplus1 = *k + 1;
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ temp = x[j];
+ l = kplus1 - j;
+ if (nounit) {
+ temp *= a[kplus1 + j * a_dim1];
+ }
+/* Computing MAX */
+ i__4 = 1, i__1 = j - *k;
+ i__3 = max(i__4,i__1);
+ for (i__ = j - 1; i__ >= i__3; --i__) {
+ temp += a[l + i__ + j * a_dim1] * x[i__];
+/* L90: */
+ }
+ x[j] = temp;
+/* L100: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ temp = x[jx];
+ kx -= *incx;
+ ix = kx;
+ l = kplus1 - j;
+ if (nounit) {
+ temp *= a[kplus1 + j * a_dim1];
+ }
+/* Computing MAX */
+ i__4 = 1, i__1 = j - *k;
+ i__3 = max(i__4,i__1);
+ for (i__ = j - 1; i__ >= i__3; --i__) {
+ temp += a[l + i__ + j * a_dim1] * x[ix];
+ ix -= *incx;
+/* L110: */
+ }
+ x[jx] = temp;
+ jx -= *incx;
+/* L120: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ i__3 = *n;
+ for (j = 1; j <= i__3; ++j) {
+ temp = x[j];
+ l = 1 - j;
+ if (nounit) {
+ temp *= a[j * a_dim1 + 1];
+ }
+/* Computing MIN */
+ i__1 = *n, i__2 = j + *k;
+ i__4 = min(i__1,i__2);
+ for (i__ = j + 1; i__ <= i__4; ++i__) {
+ temp += a[l + i__ + j * a_dim1] * x[i__];
+/* L130: */
+ }
+ x[j] = temp;
+/* L140: */
+ }
+ } else {
+ jx = kx;
+ i__3 = *n;
+ for (j = 1; j <= i__3; ++j) {
+ temp = x[jx];
+ kx += *incx;
+ ix = kx;
+ l = 1 - j;
+ if (nounit) {
+ temp *= a[j * a_dim1 + 1];
+ }
+/* Computing MIN */
+ i__1 = *n, i__2 = j + *k;
+ i__4 = min(i__1,i__2);
+ for (i__ = j + 1; i__ <= i__4; ++i__) {
+ temp += a[l + i__ + j * a_dim1] * x[ix];
+ ix += *incx;
+/* L150: */
+ }
+ x[jx] = temp;
+ jx += *incx;
+/* L160: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of STBMV . */
+
+} /* stbmv_ */
diff --git a/contrib/libs/cblas/stbsv.c b/contrib/libs/cblas/stbsv.c
new file mode 100644
index 0000000000..ef933a1602
--- /dev/null
+++ b/contrib/libs/cblas/stbsv.c
@@ -0,0 +1,426 @@
+/* stbsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int stbsv_(char *uplo, char *trans, char *diag, integer *n,
+ integer *k, real *a, integer *lda, real *x, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, l, ix, jx, kx, info;
+ real temp;
+ extern logical lsame_(char *, char *);
+ integer kplus1;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STBSV solves one of the systems of equations */
+
+/* A*x = b, or A'*x = b, */
+
+/* where b and x are n element vectors and A is an n by n unit, or */
+/* non-unit, upper or lower triangular band matrix, with ( k + 1 ) */
+/* diagonals. */
+
+/* No test for singularity or near-singularity is included in this */
+/* routine. Such tests must be performed before calling this routine. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the equations to be solved as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' A*x = b. */
+
+/* TRANS = 'T' or 't' A'*x = b. */
+
+/* TRANS = 'C' or 'c' A'*x = b. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry with UPLO = 'U' or 'u', K specifies the number of */
+/* super-diagonals of the matrix A. */
+/* On entry with UPLO = 'L' or 'l', K specifies the number of */
+/* sub-diagonals of the matrix A. */
+/* K must satisfy 0 .le. K. */
+/* Unchanged on exit. */
+
+/* A - REAL array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) */
+/* by n part of the array A must contain the upper triangular */
+/* band part of the matrix of coefficients, supplied column by */
+/* column, with the leading diagonal of the matrix in row */
+/* ( k + 1 ) of the array, the first super-diagonal starting at */
+/* position 2 in row k, and so on. The top left k by k triangle */
+/* of the array A is not referenced. */
+/* The following program segment will transfer an upper */
+/* triangular band matrix from conventional full matrix storage */
+/* to band storage: */
+
+/* DO 20, J = 1, N */
+/* M = K + 1 - J */
+/* DO 10, I = MAX( 1, J - K ), J */
+/* A( M + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) */
+/* by n part of the array A must contain the lower triangular */
+/* band part of the matrix of coefficients, supplied column by */
+/* column, with the leading diagonal of the matrix in row 1 of */
+/* the array, the first sub-diagonal starting at position 1 in */
+/* row 2, and so on. The bottom right k by k triangle of the */
+/* array A is not referenced. */
+/* The following program segment will transfer a lower */
+/* triangular band matrix from conventional full matrix storage */
+/* to band storage: */
+
+/* DO 20, J = 1, N */
+/* M = 1 - J */
+/* DO 10, I = J, MIN( N, J + K ) */
+/* A( M + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Note that when DIAG = 'U' or 'u' the elements of the array A */
+/* corresponding to the diagonal elements of the matrix are not */
+/* referenced, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* ( k + 1 ). */
+/* Unchanged on exit. */
+
+/* X - REAL array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element right-hand side vector b. On exit, X is overwritten */
+/* with the solution vector x. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*k < 0) {
+ info = 5;
+ } else if (*lda < *k + 1) {
+ info = 7;
+ } else if (*incx == 0) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("STBSV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ nounit = lsame_(diag, "N");
+
+/* Set up the start point in X if the increment is not unity. This */
+/* will be ( N - 1 )*INCX too small for descending loops. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed by sequentially with one pass through A. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := inv( A )*x. */
+
+ if (lsame_(uplo, "U")) {
+ kplus1 = *k + 1;
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ if (x[j] != 0.f) {
+ l = kplus1 - j;
+ if (nounit) {
+ x[j] /= a[kplus1 + j * a_dim1];
+ }
+ temp = x[j];
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__1 = max(i__2,i__3);
+ for (i__ = j - 1; i__ >= i__1; --i__) {
+ x[i__] -= temp * a[l + i__ + j * a_dim1];
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ kx -= *incx;
+ if (x[jx] != 0.f) {
+ ix = kx;
+ l = kplus1 - j;
+ if (nounit) {
+ x[jx] /= a[kplus1 + j * a_dim1];
+ }
+ temp = x[jx];
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__1 = max(i__2,i__3);
+ for (i__ = j - 1; i__ >= i__1; --i__) {
+ x[ix] -= temp * a[l + i__ + j * a_dim1];
+ ix -= *incx;
+/* L30: */
+ }
+ }
+ jx -= *incx;
+/* L40: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.f) {
+ l = 1 - j;
+ if (nounit) {
+ x[j] /= a[j * a_dim1 + 1];
+ }
+ temp = x[j];
+/* Computing MIN */
+ i__3 = *n, i__4 = j + *k;
+ i__2 = min(i__3,i__4);
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ x[i__] -= temp * a[l + i__ + j * a_dim1];
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ kx += *incx;
+ if (x[jx] != 0.f) {
+ ix = kx;
+ l = 1 - j;
+ if (nounit) {
+ x[jx] /= a[j * a_dim1 + 1];
+ }
+ temp = x[jx];
+/* Computing MIN */
+ i__3 = *n, i__4 = j + *k;
+ i__2 = min(i__3,i__4);
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ x[ix] -= temp * a[l + i__ + j * a_dim1];
+ ix += *incx;
+/* L70: */
+ }
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := inv( A')*x. */
+
+ if (lsame_(uplo, "U")) {
+ kplus1 = *k + 1;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[j];
+ l = kplus1 - j;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__4 = j - 1;
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ temp -= a[l + i__ + j * a_dim1] * x[i__];
+/* L90: */
+ }
+ if (nounit) {
+ temp /= a[kplus1 + j * a_dim1];
+ }
+ x[j] = temp;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[jx];
+ ix = kx;
+ l = kplus1 - j;
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *k;
+ i__3 = j - 1;
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ temp -= a[l + i__ + j * a_dim1] * x[ix];
+ ix += *incx;
+/* L110: */
+ }
+ if (nounit) {
+ temp /= a[kplus1 + j * a_dim1];
+ }
+ x[jx] = temp;
+ jx += *incx;
+ if (j > *k) {
+ kx += *incx;
+ }
+/* L120: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ temp = x[j];
+ l = 1 - j;
+/* Computing MIN */
+ i__1 = *n, i__3 = j + *k;
+ i__4 = j + 1;
+ for (i__ = min(i__1,i__3); i__ >= i__4; --i__) {
+ temp -= a[l + i__ + j * a_dim1] * x[i__];
+/* L130: */
+ }
+ if (nounit) {
+ temp /= a[j * a_dim1 + 1];
+ }
+ x[j] = temp;
+/* L140: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ temp = x[jx];
+ ix = kx;
+ l = 1 - j;
+/* Computing MIN */
+ i__4 = *n, i__1 = j + *k;
+ i__3 = j + 1;
+ for (i__ = min(i__4,i__1); i__ >= i__3; --i__) {
+ temp -= a[l + i__ + j * a_dim1] * x[ix];
+ ix -= *incx;
+/* L150: */
+ }
+ if (nounit) {
+ temp /= a[j * a_dim1 + 1];
+ }
+ x[jx] = temp;
+ jx -= *incx;
+ if (*n - j >= *k) {
+ kx -= *incx;
+ }
+/* L160: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of STBSV . */
+
+} /* stbsv_ */
diff --git a/contrib/libs/cblas/stpmv.c b/contrib/libs/cblas/stpmv.c
new file mode 100644
index 0000000000..9d57e2ea9e
--- /dev/null
+++ b/contrib/libs/cblas/stpmv.c
@@ -0,0 +1,357 @@
+/* stpmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int stpmv_(char *uplo, char *trans, char *diag, integer *n,
+ real *ap, real *x, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, k, kk, ix, jx, kx, info;
+ real temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STPMV performs one of the matrix-vector operations */
+
+/* x := A*x, or x := A'*x, */
+
+/* where x is an n element vector and A is an n by n unit, or non-unit, */
+/* upper or lower triangular matrix, supplied in packed form. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' x := A*x. */
+
+/* TRANS = 'T' or 't' x := A'*x. */
+
+/* TRANS = 'C' or 'c' x := A'*x. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* AP - REAL array of DIMENSION at least */
+/* ( ( n*( n + 1 ) )/2 ). */
+/* Before entry with UPLO = 'U' or 'u', the array AP must */
+/* contain the upper triangular matrix packed sequentially, */
+/* column by column, so that AP( 1 ) contains a( 1, 1 ), */
+/* AP( 2 ) and AP( 3 ) contain a( 1, 2 ) and a( 2, 2 ) */
+/* respectively, and so on. */
+/* Before entry with UPLO = 'L' or 'l', the array AP must */
+/* contain the lower triangular matrix packed sequentially, */
+/* column by column, so that AP( 1 ) contains a( 1, 1 ), */
+/* AP( 2 ) and AP( 3 ) contain a( 2, 1 ) and a( 3, 1 ) */
+/* respectively, and so on. */
+/* Note that when DIAG = 'U' or 'u', the diagonal elements of */
+/* A are not referenced, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* X - REAL array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. On exit, X is overwritten with the */
+/* tranformed vector x. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --x;
+ --ap;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*incx == 0) {
+ info = 7;
+ }
+ if (info != 0) {
+ xerbla_("STPMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ nounit = lsame_(diag, "N");
+
+/* Set up the start point in X if the increment is not unity. This */
+/* will be ( N - 1 )*INCX too small for descending loops. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of AP are */
+/* accessed sequentially with one pass through AP. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form x:= A*x. */
+
+ if (lsame_(uplo, "U")) {
+ kk = 1;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.f) {
+ temp = x[j];
+ k = kk;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[i__] += temp * ap[k];
+ ++k;
+/* L10: */
+ }
+ if (nounit) {
+ x[j] *= ap[kk + j - 1];
+ }
+ }
+ kk += j;
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.f) {
+ temp = x[jx];
+ ix = kx;
+ i__2 = kk + j - 2;
+ for (k = kk; k <= i__2; ++k) {
+ x[ix] += temp * ap[k];
+ ix += *incx;
+/* L30: */
+ }
+ if (nounit) {
+ x[jx] *= ap[kk + j - 1];
+ }
+ }
+ jx += *incx;
+ kk += j;
+/* L40: */
+ }
+ }
+ } else {
+ kk = *n * (*n + 1) / 2;
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ if (x[j] != 0.f) {
+ temp = x[j];
+ k = kk;
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ x[i__] += temp * ap[k];
+ --k;
+/* L50: */
+ }
+ if (nounit) {
+ x[j] *= ap[kk - *n + j];
+ }
+ }
+ kk -= *n - j + 1;
+/* L60: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ if (x[jx] != 0.f) {
+ temp = x[jx];
+ ix = kx;
+ i__1 = kk - (*n - (j + 1));
+ for (k = kk; k >= i__1; --k) {
+ x[ix] += temp * ap[k];
+ ix -= *incx;
+/* L70: */
+ }
+ if (nounit) {
+ x[jx] *= ap[kk - *n + j];
+ }
+ }
+ jx -= *incx;
+ kk -= *n - j + 1;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := A'*x. */
+
+ if (lsame_(uplo, "U")) {
+ kk = *n * (*n + 1) / 2;
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ temp = x[j];
+ if (nounit) {
+ temp *= ap[kk];
+ }
+ k = kk - 1;
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ temp += ap[k] * x[i__];
+ --k;
+/* L90: */
+ }
+ x[j] = temp;
+ kk -= j;
+/* L100: */
+ }
+ } else {
+ jx = kx + (*n - 1) * *incx;
+ for (j = *n; j >= 1; --j) {
+ temp = x[jx];
+ ix = jx;
+ if (nounit) {
+ temp *= ap[kk];
+ }
+ i__1 = kk - j + 1;
+ for (k = kk - 1; k >= i__1; --k) {
+ ix -= *incx;
+ temp += ap[k] * x[ix];
+/* L110: */
+ }
+ x[jx] = temp;
+ jx -= *incx;
+ kk -= j;
+/* L120: */
+ }
+ }
+ } else {
+ kk = 1;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[j];
+ if (nounit) {
+ temp *= ap[kk];
+ }
+ k = kk + 1;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ temp += ap[k] * x[i__];
+ ++k;
+/* L130: */
+ }
+ x[j] = temp;
+ kk += *n - j + 1;
+/* L140: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[jx];
+ ix = jx;
+ if (nounit) {
+ temp *= ap[kk];
+ }
+ i__2 = kk + *n - j;
+ for (k = kk + 1; k <= i__2; ++k) {
+ ix += *incx;
+ temp += ap[k] * x[ix];
+/* L150: */
+ }
+ x[jx] = temp;
+ jx += *incx;
+ kk += *n - j + 1;
+/* L160: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of STPMV . */
+
+} /* stpmv_ */
diff --git a/contrib/libs/cblas/stpsv.c b/contrib/libs/cblas/stpsv.c
new file mode 100644
index 0000000000..f16be31a77
--- /dev/null
+++ b/contrib/libs/cblas/stpsv.c
@@ -0,0 +1,360 @@
+/* stpsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int stpsv_(char *uplo, char *trans, char *diag, integer *n,
+ real *ap, real *x, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, k, kk, ix, jx, kx, info;
+ real temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STPSV solves one of the systems of equations */
+
+/* A*x = b, or A'*x = b, */
+
+/* where b and x are n element vectors and A is an n by n unit, or */
+/* non-unit, upper or lower triangular matrix, supplied in packed form. */
+
+/* No test for singularity or near-singularity is included in this */
+/* routine. Such tests must be performed before calling this routine. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the equations to be solved as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' A*x = b. */
+
+/* TRANS = 'T' or 't' A'*x = b. */
+
+/* TRANS = 'C' or 'c' A'*x = b. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* AP - REAL array of DIMENSION at least */
+/* ( ( n*( n + 1 ) )/2 ). */
+/* Before entry with UPLO = 'U' or 'u', the array AP must */
+/* contain the upper triangular matrix packed sequentially, */
+/* column by column, so that AP( 1 ) contains a( 1, 1 ), */
+/* AP( 2 ) and AP( 3 ) contain a( 1, 2 ) and a( 2, 2 ) */
+/* respectively, and so on. */
+/* Before entry with UPLO = 'L' or 'l', the array AP must */
+/* contain the lower triangular matrix packed sequentially, */
+/* column by column, so that AP( 1 ) contains a( 1, 1 ), */
+/* AP( 2 ) and AP( 3 ) contain a( 2, 1 ) and a( 3, 1 ) */
+/* respectively, and so on. */
+/* Note that when DIAG = 'U' or 'u', the diagonal elements of */
+/* A are not referenced, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* X - REAL array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element right-hand side vector b. On exit, X is overwritten */
+/* with the solution vector x. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --x;
+ --ap;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*incx == 0) {
+ info = 7;
+ }
+ if (info != 0) {
+ xerbla_("STPSV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ nounit = lsame_(diag, "N");
+
+/* Set up the start point in X if the increment is not unity. This */
+/* will be ( N - 1 )*INCX too small for descending loops. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of AP are */
+/* accessed sequentially with one pass through AP. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := inv( A )*x. */
+
+ if (lsame_(uplo, "U")) {
+ kk = *n * (*n + 1) / 2;
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ if (x[j] != 0.f) {
+ if (nounit) {
+ x[j] /= ap[kk];
+ }
+ temp = x[j];
+ k = kk - 1;
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ x[i__] -= temp * ap[k];
+ --k;
+/* L10: */
+ }
+ }
+ kk -= j;
+/* L20: */
+ }
+ } else {
+ jx = kx + (*n - 1) * *incx;
+ for (j = *n; j >= 1; --j) {
+ if (x[jx] != 0.f) {
+ if (nounit) {
+ x[jx] /= ap[kk];
+ }
+ temp = x[jx];
+ ix = jx;
+ i__1 = kk - j + 1;
+ for (k = kk - 1; k >= i__1; --k) {
+ ix -= *incx;
+ x[ix] -= temp * ap[k];
+/* L30: */
+ }
+ }
+ jx -= *incx;
+ kk -= j;
+/* L40: */
+ }
+ }
+ } else {
+ kk = 1;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.f) {
+ if (nounit) {
+ x[j] /= ap[kk];
+ }
+ temp = x[j];
+ k = kk + 1;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ x[i__] -= temp * ap[k];
+ ++k;
+/* L50: */
+ }
+ }
+ kk += *n - j + 1;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.f) {
+ if (nounit) {
+ x[jx] /= ap[kk];
+ }
+ temp = x[jx];
+ ix = jx;
+ i__2 = kk + *n - j;
+ for (k = kk + 1; k <= i__2; ++k) {
+ ix += *incx;
+ x[ix] -= temp * ap[k];
+/* L70: */
+ }
+ }
+ jx += *incx;
+ kk += *n - j + 1;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := inv( A' )*x. */
+
+ if (lsame_(uplo, "U")) {
+ kk = 1;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[j];
+ k = kk;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp -= ap[k] * x[i__];
+ ++k;
+/* L90: */
+ }
+ if (nounit) {
+ temp /= ap[kk + j - 1];
+ }
+ x[j] = temp;
+ kk += j;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[jx];
+ ix = kx;
+ i__2 = kk + j - 2;
+ for (k = kk; k <= i__2; ++k) {
+ temp -= ap[k] * x[ix];
+ ix += *incx;
+/* L110: */
+ }
+ if (nounit) {
+ temp /= ap[kk + j - 1];
+ }
+ x[jx] = temp;
+ jx += *incx;
+ kk += j;
+/* L120: */
+ }
+ }
+ } else {
+ kk = *n * (*n + 1) / 2;
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ temp = x[j];
+ k = kk;
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ temp -= ap[k] * x[i__];
+ --k;
+/* L130: */
+ }
+ if (nounit) {
+ temp /= ap[kk - *n + j];
+ }
+ x[j] = temp;
+ kk -= *n - j + 1;
+/* L140: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ temp = x[jx];
+ ix = kx;
+ i__1 = kk - (*n - (j + 1));
+ for (k = kk; k >= i__1; --k) {
+ temp -= ap[k] * x[ix];
+ ix -= *incx;
+/* L150: */
+ }
+ if (nounit) {
+ temp /= ap[kk - *n + j];
+ }
+ x[jx] = temp;
+ jx -= *incx;
+ kk -= *n - j + 1;
+/* L160: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of STPSV . */
+
+} /* stpsv_ */
diff --git a/contrib/libs/cblas/strmm.c b/contrib/libs/cblas/strmm.c
new file mode 100644
index 0000000000..0dfb68f9a8
--- /dev/null
+++ b/contrib/libs/cblas/strmm.c
@@ -0,0 +1,453 @@
+/* strmm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int strmm_(char *side, char *uplo, char *transa, char *diag,
+ integer *m, integer *n, real *alpha, real *a, integer *lda, real *b,
+ integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, k, info;
+ real temp;
+ logical lside;
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STRMM performs one of the matrix-matrix operations */
+
+/* B := alpha*op( A )*B, or B := alpha*B*op( A ), */
+
+/* where alpha is a scalar, B is an m by n matrix, A is a unit, or */
+/* non-unit, upper or lower triangular matrix and op( A ) is one of */
+
+/* op( A ) = A or op( A ) = A'. */
+
+/* Arguments */
+/* ========== */
+
+/* SIDE - CHARACTER*1. */
+/* On entry, SIDE specifies whether op( A ) multiplies B from */
+/* the left or right as follows: */
+
+/* SIDE = 'L' or 'l' B := alpha*op( A )*B. */
+
+/* SIDE = 'R' or 'r' B := alpha*B*op( A ). */
+
+/* Unchanged on exit. */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix A is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANSA - CHARACTER*1. */
+/* On entry, TRANSA specifies the form of op( A ) to be used in */
+/* the matrix multiplication as follows: */
+
+/* TRANSA = 'N' or 'n' op( A ) = A. */
+
+/* TRANSA = 'T' or 't' op( A ) = A'. */
+
+/* TRANSA = 'C' or 'c' op( A ) = A'. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit triangular */
+/* as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of B. M must be at */
+/* least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of B. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - REAL . */
+/* On entry, ALPHA specifies the scalar alpha. When alpha is */
+/* zero then A is not referenced and B need not be set before */
+/* entry. */
+/* Unchanged on exit. */
+
+/* A - REAL array of DIMENSION ( LDA, k ), where k is m */
+/* when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'. */
+/* Before entry with UPLO = 'U' or 'u', the leading k by k */
+/* upper triangular part of the array A must contain the upper */
+/* triangular matrix and the strictly lower triangular part of */
+/* A is not referenced. */
+/* Before entry with UPLO = 'L' or 'l', the leading k by k */
+/* lower triangular part of the array A must contain the lower */
+/* triangular matrix and the strictly upper triangular part of */
+/* A is not referenced. */
+/* Note that when DIAG = 'U' or 'u', the diagonal elements of */
+/* A are not referenced either, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When SIDE = 'L' or 'l' then */
+/* LDA must be at least max( 1, m ), when SIDE = 'R' or 'r' */
+/* then LDA must be at least max( 1, n ). */
+/* Unchanged on exit. */
+
+/* B - REAL array of DIMENSION ( LDB, n ). */
+/* Before entry, the leading m by n part of the array B must */
+/* contain the matrix B, and on exit is overwritten by the */
+/* transformed matrix. */
+
+/* LDB - INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. LDB must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ lside = lsame_(side, "L");
+ if (lside) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ nounit = lsame_(diag, "N");
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! lside && ! lsame_(side, "R")) {
+ info = 1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ info = 2;
+ } else if (! lsame_(transa, "N") && ! lsame_(transa,
+ "T") && ! lsame_(transa, "C")) {
+ info = 3;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 4;
+ } else if (*m < 0) {
+ info = 5;
+ } else if (*n < 0) {
+ info = 6;
+ } else if (*lda < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldb < max(1,*m)) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("STRMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lside) {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*A*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (k = 1; k <= i__2; ++k) {
+ if (b[k + j * b_dim1] != 0.f) {
+ temp = *alpha * b[k + j * b_dim1];
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] += temp * a[i__ + k *
+ a_dim1];
+/* L30: */
+ }
+ if (nounit) {
+ temp *= a[k + k * a_dim1];
+ }
+ b[k + j * b_dim1] = temp;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (k = *m; k >= 1; --k) {
+ if (b[k + j * b_dim1] != 0.f) {
+ temp = *alpha * b[k + j * b_dim1];
+ b[k + j * b_dim1] = temp;
+ if (nounit) {
+ b[k + j * b_dim1] *= a[k + k * a_dim1];
+ }
+ i__2 = *m;
+ for (i__ = k + 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] += temp * a[i__ + k *
+ a_dim1];
+/* L60: */
+ }
+ }
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*A'*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ temp = b[i__ + j * b_dim1];
+ if (nounit) {
+ temp *= a[i__ + i__ * a_dim1];
+ }
+ i__2 = i__ - 1;
+ for (k = 1; k <= i__2; ++k) {
+ temp += a[k + i__ * a_dim1] * b[k + j * b_dim1];
+/* L90: */
+ }
+ b[i__ + j * b_dim1] = *alpha * temp;
+/* L100: */
+ }
+/* L110: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = b[i__ + j * b_dim1];
+ if (nounit) {
+ temp *= a[i__ + i__ * a_dim1];
+ }
+ i__3 = *m;
+ for (k = i__ + 1; k <= i__3; ++k) {
+ temp += a[k + i__ * a_dim1] * b[k + j * b_dim1];
+/* L120: */
+ }
+ b[i__ + j * b_dim1] = *alpha * temp;
+/* L130: */
+ }
+/* L140: */
+ }
+ }
+ }
+ } else {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*B*A. */
+
+ if (upper) {
+ for (j = *n; j >= 1; --j) {
+ temp = *alpha;
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1];
+/* L150: */
+ }
+ i__1 = j - 1;
+ for (k = 1; k <= i__1; ++k) {
+ if (a[k + j * a_dim1] != 0.f) {
+ temp = *alpha * a[k + j * a_dim1];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] += temp * b[i__ + k *
+ b_dim1];
+/* L160: */
+ }
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = *alpha;
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1];
+/* L190: */
+ }
+ i__2 = *n;
+ for (k = j + 1; k <= i__2; ++k) {
+ if (a[k + j * a_dim1] != 0.f) {
+ temp = *alpha * a[k + j * a_dim1];
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] += temp * b[i__ + k *
+ b_dim1];
+/* L200: */
+ }
+ }
+/* L210: */
+ }
+/* L220: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*B*A'. */
+
+ if (upper) {
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k - 1;
+ for (j = 1; j <= i__2; ++j) {
+ if (a[j + k * a_dim1] != 0.f) {
+ temp = *alpha * a[j + k * a_dim1];
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] += temp * b[i__ + k *
+ b_dim1];
+/* L230: */
+ }
+ }
+/* L240: */
+ }
+ temp = *alpha;
+ if (nounit) {
+ temp *= a[k + k * a_dim1];
+ }
+ if (temp != 1.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1];
+/* L250: */
+ }
+ }
+/* L260: */
+ }
+ } else {
+ for (k = *n; k >= 1; --k) {
+ i__1 = *n;
+ for (j = k + 1; j <= i__1; ++j) {
+ if (a[j + k * a_dim1] != 0.f) {
+ temp = *alpha * a[j + k * a_dim1];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] += temp * b[i__ + k *
+ b_dim1];
+/* L270: */
+ }
+ }
+/* L280: */
+ }
+ temp = *alpha;
+ if (nounit) {
+ temp *= a[k + k * a_dim1];
+ }
+ if (temp != 1.f) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1];
+/* L290: */
+ }
+ }
+/* L300: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of STRMM . */
+
+} /* strmm_ */
diff --git a/contrib/libs/cblas/strmv.c b/contrib/libs/cblas/strmv.c
new file mode 100644
index 0000000000..d4ff0b9f5a
--- /dev/null
+++ b/contrib/libs/cblas/strmv.c
@@ -0,0 +1,345 @@
+/* strmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int strmv_(char *uplo, char *trans, char *diag, integer *n,
+ real *a, integer *lda, real *x, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, ix, jx, kx, info;
+ real temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STRMV performs one of the matrix-vector operations */
+
+/* x := A*x, or x := A'*x, */
+
+/* where x is an n element vector and A is an n by n unit, or non-unit, */
+/* upper or lower triangular matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' x := A*x. */
+
+/* TRANS = 'T' or 't' x := A'*x. */
+
+/* TRANS = 'C' or 'c' x := A'*x. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* A - REAL array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array A must contain the upper */
+/* triangular matrix and the strictly lower triangular part of */
+/* A is not referenced. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array A must contain the lower */
+/* triangular matrix and the strictly upper triangular part of */
+/* A is not referenced. */
+/* Note that when DIAG = 'U' or 'u', the diagonal elements of */
+/* A are not referenced either, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+/* X - REAL array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. On exit, X is overwritten with the */
+/* tranformed vector x. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*lda < max(1,*n)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ }
+ if (info != 0) {
+ xerbla_("STRMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ nounit = lsame_(diag, "N");
+
+/* Set up the start point in X if the increment is not unity. This */
+/* will be ( N - 1 )*INCX too small for descending loops. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through A. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := A*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.f) {
+ temp = x[j];
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[i__] += temp * a[i__ + j * a_dim1];
+/* L10: */
+ }
+ if (nounit) {
+ x[j] *= a[j + j * a_dim1];
+ }
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.f) {
+ temp = x[jx];
+ ix = kx;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[ix] += temp * a[i__ + j * a_dim1];
+ ix += *incx;
+/* L30: */
+ }
+ if (nounit) {
+ x[jx] *= a[j + j * a_dim1];
+ }
+ }
+ jx += *incx;
+/* L40: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ if (x[j] != 0.f) {
+ temp = x[j];
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ x[i__] += temp * a[i__ + j * a_dim1];
+/* L50: */
+ }
+ if (nounit) {
+ x[j] *= a[j + j * a_dim1];
+ }
+ }
+/* L60: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ if (x[jx] != 0.f) {
+ temp = x[jx];
+ ix = kx;
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ x[ix] += temp * a[i__ + j * a_dim1];
+ ix -= *incx;
+/* L70: */
+ }
+ if (nounit) {
+ x[jx] *= a[j + j * a_dim1];
+ }
+ }
+ jx -= *incx;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := A'*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ temp = x[j];
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ temp += a[i__ + j * a_dim1] * x[i__];
+/* L90: */
+ }
+ x[j] = temp;
+/* L100: */
+ }
+ } else {
+ jx = kx + (*n - 1) * *incx;
+ for (j = *n; j >= 1; --j) {
+ temp = x[jx];
+ ix = jx;
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ ix -= *incx;
+ temp += a[i__ + j * a_dim1] * x[ix];
+/* L110: */
+ }
+ x[jx] = temp;
+ jx -= *incx;
+/* L120: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[j];
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ temp += a[i__ + j * a_dim1] * x[i__];
+/* L130: */
+ }
+ x[j] = temp;
+/* L140: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[jx];
+ ix = jx;
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ temp += a[i__ + j * a_dim1] * x[ix];
+/* L150: */
+ }
+ x[jx] = temp;
+ jx += *incx;
+/* L160: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of STRMV . */
+
+} /* strmv_ */
diff --git a/contrib/libs/cblas/strsm.c b/contrib/libs/cblas/strsm.c
new file mode 100644
index 0000000000..8c50307edc
--- /dev/null
+++ b/contrib/libs/cblas/strsm.c
@@ -0,0 +1,490 @@
+/* strsm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int strsm_(char *side, char *uplo, char *transa, char *diag,
+ integer *m, integer *n, real *alpha, real *a, integer *lda, real *b,
+ integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, k, info;
+ real temp;
+ logical lside;
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STRSM solves one of the matrix equations */
+
+/* op( A )*X = alpha*B, or X*op( A ) = alpha*B, */
+
+/* where alpha is a scalar, X and B are m by n matrices, A is a unit, or */
+/* non-unit, upper or lower triangular matrix and op( A ) is one of */
+
+/* op( A ) = A or op( A ) = A'. */
+
+/* The matrix X is overwritten on B. */
+
+/* Arguments */
+/* ========== */
+
+/* SIDE - CHARACTER*1. */
+/* On entry, SIDE specifies whether op( A ) appears on the left */
+/* or right of X as follows: */
+
+/* SIDE = 'L' or 'l' op( A )*X = alpha*B. */
+
+/* SIDE = 'R' or 'r' X*op( A ) = alpha*B. */
+
+/* Unchanged on exit. */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix A is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANSA - CHARACTER*1. */
+/* On entry, TRANSA specifies the form of op( A ) to be used in */
+/* the matrix multiplication as follows: */
+
+/* TRANSA = 'N' or 'n' op( A ) = A. */
+
+/* TRANSA = 'T' or 't' op( A ) = A'. */
+
+/* TRANSA = 'C' or 'c' op( A ) = A'. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit triangular */
+/* as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of B. M must be at */
+/* least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of B. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - REAL . */
+/* On entry, ALPHA specifies the scalar alpha. When alpha is */
+/* zero then A is not referenced and B need not be set before */
+/* entry. */
+/* Unchanged on exit. */
+
+/* A - REAL array of DIMENSION ( LDA, k ), where k is m */
+/* when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'. */
+/* Before entry with UPLO = 'U' or 'u', the leading k by k */
+/* upper triangular part of the array A must contain the upper */
+/* triangular matrix and the strictly lower triangular part of */
+/* A is not referenced. */
+/* Before entry with UPLO = 'L' or 'l', the leading k by k */
+/* lower triangular part of the array A must contain the lower */
+/* triangular matrix and the strictly upper triangular part of */
+/* A is not referenced. */
+/* Note that when DIAG = 'U' or 'u', the diagonal elements of */
+/* A are not referenced either, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When SIDE = 'L' or 'l' then */
+/* LDA must be at least max( 1, m ), when SIDE = 'R' or 'r' */
+/* then LDA must be at least max( 1, n ). */
+/* Unchanged on exit. */
+
+/* B - REAL array of DIMENSION ( LDB, n ). */
+/* Before entry, the leading m by n part of the array B must */
+/* contain the right-hand side matrix B, and on exit is */
+/* overwritten by the solution matrix X. */
+
+/* LDB - INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. LDB must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ lside = lsame_(side, "L");
+ if (lside) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ nounit = lsame_(diag, "N");
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! lside && ! lsame_(side, "R")) {
+ info = 1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ info = 2;
+ } else if (! lsame_(transa, "N") && ! lsame_(transa,
+ "T") && ! lsame_(transa, "C")) {
+ info = 3;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 4;
+ } else if (*m < 0) {
+ info = 5;
+ } else if (*n < 0) {
+ info = 6;
+ } else if (*lda < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldb < max(1,*m)) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("STRSM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lside) {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*inv( A )*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*alpha != 1.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1]
+ ;
+/* L30: */
+ }
+ }
+ for (k = *m; k >= 1; --k) {
+ if (b[k + j * b_dim1] != 0.f) {
+ if (nounit) {
+ b[k + j * b_dim1] /= a[k + k * a_dim1];
+ }
+ i__2 = k - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] -= b[k + j * b_dim1] * a[
+ i__ + k * a_dim1];
+/* L40: */
+ }
+ }
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*alpha != 1.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1]
+ ;
+/* L70: */
+ }
+ }
+ i__2 = *m;
+ for (k = 1; k <= i__2; ++k) {
+ if (b[k + j * b_dim1] != 0.f) {
+ if (nounit) {
+ b[k + j * b_dim1] /= a[k + k * a_dim1];
+ }
+ i__3 = *m;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] -= b[k + j * b_dim1] * a[
+ i__ + k * a_dim1];
+/* L80: */
+ }
+ }
+/* L90: */
+ }
+/* L100: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*inv( A' )*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = *alpha * b[i__ + j * b_dim1];
+ i__3 = i__ - 1;
+ for (k = 1; k <= i__3; ++k) {
+ temp -= a[k + i__ * a_dim1] * b[k + j * b_dim1];
+/* L110: */
+ }
+ if (nounit) {
+ temp /= a[i__ + i__ * a_dim1];
+ }
+ b[i__ + j * b_dim1] = temp;
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ temp = *alpha * b[i__ + j * b_dim1];
+ i__2 = *m;
+ for (k = i__ + 1; k <= i__2; ++k) {
+ temp -= a[k + i__ * a_dim1] * b[k + j * b_dim1];
+/* L140: */
+ }
+ if (nounit) {
+ temp /= a[i__ + i__ * a_dim1];
+ }
+ b[i__ + j * b_dim1] = temp;
+/* L150: */
+ }
+/* L160: */
+ }
+ }
+ }
+ } else {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*B*inv( A ). */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*alpha != 1.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1]
+ ;
+/* L170: */
+ }
+ }
+ i__2 = j - 1;
+ for (k = 1; k <= i__2; ++k) {
+ if (a[k + j * a_dim1] != 0.f) {
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] -= a[k + j * a_dim1] * b[
+ i__ + k * b_dim1];
+/* L180: */
+ }
+ }
+/* L190: */
+ }
+ if (nounit) {
+ temp = 1.f / a[j + j * a_dim1];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1];
+/* L200: */
+ }
+ }
+/* L210: */
+ }
+ } else {
+ for (j = *n; j >= 1; --j) {
+ if (*alpha != 1.f) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1]
+ ;
+/* L220: */
+ }
+ }
+ i__1 = *n;
+ for (k = j + 1; k <= i__1; ++k) {
+ if (a[k + j * a_dim1] != 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] -= a[k + j * a_dim1] * b[
+ i__ + k * b_dim1];
+/* L230: */
+ }
+ }
+/* L240: */
+ }
+ if (nounit) {
+ temp = 1.f / a[j + j * a_dim1];
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1];
+/* L250: */
+ }
+ }
+/* L260: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*B*inv( A' ). */
+
+ if (upper) {
+ for (k = *n; k >= 1; --k) {
+ if (nounit) {
+ temp = 1.f / a[k + k * a_dim1];
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1];
+/* L270: */
+ }
+ }
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ if (a[j + k * a_dim1] != 0.f) {
+ temp = a[j + k * a_dim1];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] -= temp * b[i__ + k *
+ b_dim1];
+/* L280: */
+ }
+ }
+/* L290: */
+ }
+ if (*alpha != 1.f) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + k * b_dim1] = *alpha * b[i__ + k * b_dim1]
+ ;
+/* L300: */
+ }
+ }
+/* L310: */
+ }
+ } else {
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (nounit) {
+ temp = 1.f / a[k + k * a_dim1];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1];
+/* L320: */
+ }
+ }
+ i__2 = *n;
+ for (j = k + 1; j <= i__2; ++j) {
+ if (a[j + k * a_dim1] != 0.f) {
+ temp = a[j + k * a_dim1];
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] -= temp * b[i__ + k *
+ b_dim1];
+/* L330: */
+ }
+ }
+/* L340: */
+ }
+ if (*alpha != 1.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + k * b_dim1] = *alpha * b[i__ + k * b_dim1]
+ ;
+/* L350: */
+ }
+ }
+/* L360: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of STRSM . */
+
+} /* strsm_ */
diff --git a/contrib/libs/cblas/strsv.c b/contrib/libs/cblas/strsv.c
new file mode 100644
index 0000000000..fe61b84b52
--- /dev/null
+++ b/contrib/libs/cblas/strsv.c
@@ -0,0 +1,348 @@
+/* strsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int strsv_(char *uplo, char *trans, char *diag, integer *n,
+ real *a, integer *lda, real *x, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, ix, jx, kx, info;
+ real temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STRSV solves one of the systems of equations */
+
+/* A*x = b, or A'*x = b, */
+
+/* where b and x are n element vectors and A is an n by n unit, or */
+/* non-unit, upper or lower triangular matrix. */
+
+/* No test for singularity or near-singularity is included in this */
+/* routine. Such tests must be performed before calling this routine. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the equations to be solved as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' A*x = b. */
+
+/* TRANS = 'T' or 't' A'*x = b. */
+
+/* TRANS = 'C' or 'c' A'*x = b. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* A - REAL array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array A must contain the upper */
+/* triangular matrix and the strictly lower triangular part of */
+/* A is not referenced. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array A must contain the lower */
+/* triangular matrix and the strictly upper triangular part of */
+/* A is not referenced. */
+/* Note that when DIAG = 'U' or 'u', the diagonal elements of */
+/* A are not referenced either, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+/* X - REAL array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element right-hand side vector b. On exit, X is overwritten */
+/* with the solution vector x. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*lda < max(1,*n)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ }
+ if (info != 0) {
+ xerbla_("STRSV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ nounit = lsame_(diag, "N");
+
+/* Set up the start point in X if the increment is not unity. This */
+/* will be ( N - 1 )*INCX too small for descending loops. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through A. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := inv( A )*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ if (x[j] != 0.f) {
+ if (nounit) {
+ x[j] /= a[j + j * a_dim1];
+ }
+ temp = x[j];
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ x[i__] -= temp * a[i__ + j * a_dim1];
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx + (*n - 1) * *incx;
+ for (j = *n; j >= 1; --j) {
+ if (x[jx] != 0.f) {
+ if (nounit) {
+ x[jx] /= a[j + j * a_dim1];
+ }
+ temp = x[jx];
+ ix = jx;
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ ix -= *incx;
+ x[ix] -= temp * a[i__ + j * a_dim1];
+/* L30: */
+ }
+ }
+ jx -= *incx;
+/* L40: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.f) {
+ if (nounit) {
+ x[j] /= a[j + j * a_dim1];
+ }
+ temp = x[j];
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ x[i__] -= temp * a[i__ + j * a_dim1];
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.f) {
+ if (nounit) {
+ x[jx] /= a[j + j * a_dim1];
+ }
+ temp = x[jx];
+ ix = jx;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ x[ix] -= temp * a[i__ + j * a_dim1];
+/* L70: */
+ }
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := inv( A' )*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[j];
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp -= a[i__ + j * a_dim1] * x[i__];
+/* L90: */
+ }
+ if (nounit) {
+ temp /= a[j + j * a_dim1];
+ }
+ x[j] = temp;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[jx];
+ ix = kx;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp -= a[i__ + j * a_dim1] * x[ix];
+ ix += *incx;
+/* L110: */
+ }
+ if (nounit) {
+ temp /= a[j + j * a_dim1];
+ }
+ x[jx] = temp;
+ jx += *incx;
+/* L120: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ temp = x[j];
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ temp -= a[i__ + j * a_dim1] * x[i__];
+/* L130: */
+ }
+ if (nounit) {
+ temp /= a[j + j * a_dim1];
+ }
+ x[j] = temp;
+/* L140: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ temp = x[jx];
+ ix = kx;
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ temp -= a[i__ + j * a_dim1] * x[ix];
+ ix -= *incx;
+/* L150: */
+ }
+ if (nounit) {
+ temp /= a[j + j * a_dim1];
+ }
+ x[jx] = temp;
+ jx -= *incx;
+/* L160: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of STRSV . */
+
+} /* strsv_ */
diff --git a/contrib/libs/cblas/xerbla.c b/contrib/libs/cblas/xerbla.c
new file mode 100644
index 0000000000..1088d0fa8f
--- /dev/null
+++ b/contrib/libs/cblas/xerbla.c
@@ -0,0 +1,77 @@
+/* xerbla.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+#include <stdio.h>
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int xerbla_(char *srname, integer *info)
+{
+ /* Format strings */
+ static char fmt_9999[] = "(\002 ** On entry to \002,a,\002 parameter num"
+ "ber \002,i2,\002 had \002,\002an illegal value\002)";
+
+ /* Builtin functions */
+ integer s_wsfe(cilist *), i_len_trim(char *, ftnlen), do_fio(integer *,
+ char *, ftnlen), e_wsfe(void);
+ /* Subroutine */ int s_stop(char *, ftnlen);
+
+ /* Fortran I/O blocks */
+ static cilist io___1 = { 0, 6, 0, fmt_9999, 0 };
+
+
+
+/* -- LAPACK auxiliary routine (preliminary version) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* XERBLA is an error handler for the LAPACK routines. */
+/* It is called by an LAPACK routine if an input parameter has an */
+/* invalid value. A message is printed and execution stops. */
+
+/* Installers may consider modifying the STOP statement in order to */
+/* call system-specific exception-handling facilities. */
+
+/* Arguments */
+/* ========= */
+
+/* SRNAME (input) CHARACTER*(*) */
+/* The name of the routine which called XERBLA. */
+
+/* INFO (input) INTEGER */
+/* The position of the invalid parameter in the parameter list */
+/* of the calling routine. */
+
+/* ===================================================================== */
+
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ printf("** On entry to %6s, parameter number %2i had an illegal value\n",
+ srname, *info);
+
+
+/* End of XERBLA */
+
+ return 0;
+} /* xerbla_ */
diff --git a/contrib/libs/cblas/xerbla_array.c b/contrib/libs/cblas/xerbla_array.c
new file mode 100644
index 0000000000..d4e4c24902
--- /dev/null
+++ b/contrib/libs/cblas/xerbla_array.c
@@ -0,0 +1,102 @@
+/* xerbla_array.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int xerbla_array__(char *srname_array__, integer *
+ srname_len__, integer *info, ftnlen srname_array_len)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+
+ /* Builtin functions */
+ /* Subroutine */ int s_copy(char *, char *, ftnlen, ftnlen);
+ integer i_len(char *, ftnlen);
+
+ /* Local variables */
+ integer i__;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ char srname[32];
+
+
+/* -- LAPACK auxiliary routine (version 3.0) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., */
+/* September 19, 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* XERBLA_ARRAY assists other languages in calling XERBLA, the LAPACK */
+/* and BLAS error handler. Rather than taking a Fortran string argument */
+/* as the function's name, XERBLA_ARRAY takes an array of single */
+/* characters along with the array's length. XERBLA_ARRAY then copies */
+/* up to 32 characters of that array into a Fortran string and passes */
+/* that to XERBLA. If called with a non-positive SRNAME_LEN, */
+/* XERBLA_ARRAY will call XERBLA with a string of all blank characters. */
+
+/* Say some macro or other device makes XERBLA_ARRAY available to C99 */
+/* by a name lapack_xerbla and with a common Fortran calling convention. */
+/* Then a C99 program could invoke XERBLA via: */
+/* { */
+/* int flen = strlen(__func__); */
+/* lapack_xerbla(__func__, &flen, &info); */
+/* } */
+
+/* Providing XERBLA_ARRAY is not necessary for intercepting LAPACK */
+/* errors. XERBLA_ARRAY calls XERBLA. */
+
+/* Arguments */
+/* ========= */
+
+/* SRNAME_ARRAY (input) CHARACTER(1) array, dimension (SRNAME_LEN) */
+/* The name of the routine which called XERBLA_ARRAY. */
+
+/* SRNAME_LEN (input) INTEGER */
+/* The length of the name in SRNAME_ARRAY. */
+
+/* INFO (input) INTEGER */
+/* The position of the invalid parameter in the parameter list */
+/* of the calling routine. */
+
+/* ===================================================================== */
+
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+ /* Parameter adjustments */
+ --srname_array__;
+
+ /* Function Body */
+ s_copy(srname, "", (ftnlen)32, (ftnlen)0);
+/* Computing MIN */
+ i__2 = *srname_len__, i__3 = i_len(srname, (ftnlen)32);
+ i__1 = min(i__2,i__3);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ *(unsigned char *)&srname[i__ - 1] = *(unsigned char *)&
+ srname_array__[i__];
+ }
+ xerbla_(srname, info);
+ return 0;
+} /* xerbla_array__ */
diff --git a/contrib/libs/cblas/zaxpy.c b/contrib/libs/cblas/zaxpy.c
new file mode 100644
index 0000000000..1bbebbaadd
--- /dev/null
+++ b/contrib/libs/cblas/zaxpy.c
@@ -0,0 +1,99 @@
+/* zaxpy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zaxpy_(integer *n, doublecomplex *za, doublecomplex *zx,
+ integer *incx, doublecomplex *zy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ doublecomplex z__1, z__2;
+
+ /* Local variables */
+ integer i__, ix, iy;
+ extern doublereal dcabs1_(doublecomplex *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* constant times a vector plus a vector. */
+/* jack dongarra, 3/11/78. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --zy;
+ --zx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (dcabs1_(za) == 0.) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* code for unequal increments or equal increments */
+/* not equal to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = iy;
+ i__4 = ix;
+ z__2.r = za->r * zx[i__4].r - za->i * zx[i__4].i, z__2.i = za->r * zx[
+ i__4].i + za->i * zx[i__4].r;
+ z__1.r = zy[i__3].r + z__2.r, z__1.i = zy[i__3].i + z__2.i;
+ zy[i__2].r = z__1.r, zy[i__2].i = z__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ i__4 = i__;
+ z__2.r = za->r * zx[i__4].r - za->i * zx[i__4].i, z__2.i = za->r * zx[
+ i__4].i + za->i * zx[i__4].r;
+ z__1.r = zy[i__3].r + z__2.r, z__1.i = zy[i__3].i + z__2.i;
+ zy[i__2].r = z__1.r, zy[i__2].i = z__1.i;
+/* L30: */
+ }
+ return 0;
+} /* zaxpy_ */
diff --git a/contrib/libs/cblas/zcopy.c b/contrib/libs/cblas/zcopy.c
new file mode 100644
index 0000000000..5999a693b3
--- /dev/null
+++ b/contrib/libs/cblas/zcopy.c
@@ -0,0 +1,85 @@
+/* zcopy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zcopy_(integer *n, doublecomplex *zx, integer *incx,
+ doublecomplex *zy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, ix, iy;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* copies a vector, x, to a vector, y. */
+/* jack dongarra, linpack, 4/11/78. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+ /* Parameter adjustments */
+ --zy;
+ --zx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* code for unequal increments or equal increments */
+/* not equal to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = ix;
+ zy[i__2].r = zx[i__3].r, zy[i__2].i = zx[i__3].i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ zy[i__2].r = zx[i__3].r, zy[i__2].i = zx[i__3].i;
+/* L30: */
+ }
+ return 0;
+} /* zcopy_ */
diff --git a/contrib/libs/cblas/zdotc.c b/contrib/libs/cblas/zdotc.c
new file mode 100644
index 0000000000..383b2c23ae
--- /dev/null
+++ b/contrib/libs/cblas/zdotc.c
@@ -0,0 +1,105 @@
+/* zdotc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Double Complex */ VOID zdotc_(doublecomplex * ret_val, integer *n,
+ doublecomplex *zx, integer *incx, doublecomplex *zy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, ix, iy;
+ doublecomplex ztemp;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZDOTC forms the dot product of a vector. */
+
+/* Further Details */
+/* =============== */
+
+/* jack dongarra, 3/11/78. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --zy;
+ --zx;
+
+ /* Function Body */
+ ztemp.r = 0., ztemp.i = 0.;
+ ret_val->r = 0., ret_val->i = 0.;
+ if (*n <= 0) {
+ return ;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* code for unequal increments or equal increments */
+/* not equal to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d_cnjg(&z__3, &zx[ix]);
+ i__2 = iy;
+ z__2.r = z__3.r * zy[i__2].r - z__3.i * zy[i__2].i, z__2.i = z__3.r *
+ zy[i__2].i + z__3.i * zy[i__2].r;
+ z__1.r = ztemp.r + z__2.r, z__1.i = ztemp.i + z__2.i;
+ ztemp.r = z__1.r, ztemp.i = z__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ ret_val->r = ztemp.r, ret_val->i = ztemp.i;
+ return ;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d_cnjg(&z__3, &zx[i__]);
+ i__2 = i__;
+ z__2.r = z__3.r * zy[i__2].r - z__3.i * zy[i__2].i, z__2.i = z__3.r *
+ zy[i__2].i + z__3.i * zy[i__2].r;
+ z__1.r = ztemp.r + z__2.r, z__1.i = ztemp.i + z__2.i;
+ ztemp.r = z__1.r, ztemp.i = z__1.i;
+/* L30: */
+ }
+ ret_val->r = ztemp.r, ret_val->i = ztemp.i;
+ return ;
+} /* zdotc_ */
diff --git a/contrib/libs/cblas/zdotu.c b/contrib/libs/cblas/zdotu.c
new file mode 100644
index 0000000000..e32993aade
--- /dev/null
+++ b/contrib/libs/cblas/zdotu.c
@@ -0,0 +1,100 @@
+/* zdotu.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Double Complex */ VOID zdotu_(doublecomplex * ret_val, integer *n,
+ doublecomplex *zx, integer *incx, doublecomplex *zy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublecomplex z__1, z__2;
+
+ /* Local variables */
+ integer i__, ix, iy;
+ doublecomplex ztemp;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZDOTU forms the dot product of two vectors. */
+
+/* Further Details */
+/* =============== */
+
+/* jack dongarra, 3/11/78. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+/* .. Local Scalars .. */
+/* .. */
+ /* Parameter adjustments */
+ --zy;
+ --zx;
+
+ /* Function Body */
+ ztemp.r = 0., ztemp.i = 0.;
+ ret_val->r = 0., ret_val->i = 0.;
+ if (*n <= 0) {
+ return ;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* code for unequal increments or equal increments */
+/* not equal to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ i__3 = iy;
+ z__2.r = zx[i__2].r * zy[i__3].r - zx[i__2].i * zy[i__3].i, z__2.i =
+ zx[i__2].r * zy[i__3].i + zx[i__2].i * zy[i__3].r;
+ z__1.r = ztemp.r + z__2.r, z__1.i = ztemp.i + z__2.i;
+ ztemp.r = z__1.r, ztemp.i = z__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ ret_val->r = ztemp.r, ret_val->i = ztemp.i;
+ return ;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ z__2.r = zx[i__2].r * zy[i__3].r - zx[i__2].i * zy[i__3].i, z__2.i =
+ zx[i__2].r * zy[i__3].i + zx[i__2].i * zy[i__3].r;
+ z__1.r = ztemp.r + z__2.r, z__1.i = ztemp.i + z__2.i;
+ ztemp.r = z__1.r, ztemp.i = z__1.i;
+/* L30: */
+ }
+ ret_val->r = ztemp.r, ret_val->i = ztemp.i;
+ return ;
+} /* zdotu_ */
diff --git a/contrib/libs/cblas/zdrot.c b/contrib/libs/cblas/zdrot.c
new file mode 100644
index 0000000000..8101687338
--- /dev/null
+++ b/contrib/libs/cblas/zdrot.c
@@ -0,0 +1,153 @@
+/* zdrot.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zdrot_(integer *n, doublecomplex *cx, integer *incx,
+ doublecomplex *cy, integer *incy, doublereal *c__, doublereal *s)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Local variables */
+ integer i__, ix, iy;
+ doublecomplex ctemp;
+
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Applies a plane rotation, where the cos and sin (c and s) are real */
+/* and the vectors cx and cy are complex. */
+/* jack dongarra, linpack, 3/11/78. */
+
+/* Arguments */
+/* ========== */
+
+/* N (input) INTEGER */
+/* On entry, N specifies the order of the vectors cx and cy. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* CX (input) COMPLEX*16 array, dimension at least */
+/* ( 1 + ( N - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array CX must contain the n */
+/* element vector cx. On exit, CX is overwritten by the updated */
+/* vector cx. */
+
+/* INCX (input) INTEGER */
+/* On entry, INCX specifies the increment for the elements of */
+/* CX. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* CY (input) COMPLEX*16 array, dimension at least */
+/* ( 1 + ( N - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array CY must contain the n */
+/* element vector cy. On exit, CY is overwritten by the updated */
+/* vector cy. */
+
+/* INCY (input) INTEGER */
+/* On entry, INCY specifies the increment for the elements of */
+/* CY. INCY must not be zero. */
+/* Unchanged on exit. */
+
+/* C (input) DOUBLE PRECISION */
+/* On entry, C specifies the cosine, cos. */
+/* Unchanged on exit. */
+
+/* S (input) DOUBLE PRECISION */
+/* On entry, S specifies the sine, sin. */
+/* Unchanged on exit. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --cy;
+ --cx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* code for unequal increments or equal increments not equal */
+/* to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ z__2.r = *c__ * cx[i__2].r, z__2.i = *c__ * cx[i__2].i;
+ i__3 = iy;
+ z__3.r = *s * cy[i__3].r, z__3.i = *s * cy[i__3].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ ctemp.r = z__1.r, ctemp.i = z__1.i;
+ i__2 = iy;
+ i__3 = iy;
+ z__2.r = *c__ * cy[i__3].r, z__2.i = *c__ * cy[i__3].i;
+ i__4 = ix;
+ z__3.r = *s * cx[i__4].r, z__3.i = *s * cx[i__4].i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
+ cy[i__2].r = z__1.r, cy[i__2].i = z__1.i;
+ i__2 = ix;
+ cx[i__2].r = ctemp.r, cx[i__2].i = ctemp.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ z__2.r = *c__ * cx[i__2].r, z__2.i = *c__ * cx[i__2].i;
+ i__3 = i__;
+ z__3.r = *s * cy[i__3].r, z__3.i = *s * cy[i__3].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ ctemp.r = z__1.r, ctemp.i = z__1.i;
+ i__2 = i__;
+ i__3 = i__;
+ z__2.r = *c__ * cy[i__3].r, z__2.i = *c__ * cy[i__3].i;
+ i__4 = i__;
+ z__3.r = *s * cx[i__4].r, z__3.i = *s * cx[i__4].i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
+ cy[i__2].r = z__1.r, cy[i__2].i = z__1.i;
+ i__2 = i__;
+ cx[i__2].r = ctemp.r, cx[i__2].i = ctemp.i;
+/* L30: */
+ }
+ return 0;
+} /* zdrot_ */
diff --git a/contrib/libs/cblas/zdscal.c b/contrib/libs/cblas/zdscal.c
new file mode 100644
index 0000000000..9cb703011f
--- /dev/null
+++ b/contrib/libs/cblas/zdscal.c
@@ -0,0 +1,85 @@
+/* zdscal.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zdscal_(integer *n, doublereal *da, doublecomplex *zx,
+ integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublecomplex z__1, z__2;
+
+ /* Local variables */
+ integer i__, ix;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* scales a vector by a constant. */
+/* jack dongarra, 3/11/78. */
+/* modified 3/93 to return if incx .le. 0. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ /* Parameter adjustments */
+ --zx;
+
+ /* Function Body */
+ if (*n <= 0 || *incx <= 0) {
+ return 0;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ ix = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ z__2.r = *da, z__2.i = 0.;
+ i__3 = ix;
+ z__1.r = z__2.r * zx[i__3].r - z__2.i * zx[i__3].i, z__1.i = z__2.r *
+ zx[i__3].i + z__2.i * zx[i__3].r;
+ zx[i__2].r = z__1.r, zx[i__2].i = z__1.i;
+ ix += *incx;
+/* L10: */
+ }
+ return 0;
+
+/* code for increment equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ z__2.r = *da, z__2.i = 0.;
+ i__3 = i__;
+ z__1.r = z__2.r * zx[i__3].r - z__2.i * zx[i__3].i, z__1.i = z__2.r *
+ zx[i__3].i + z__2.i * zx[i__3].r;
+ zx[i__2].r = z__1.r, zx[i__2].i = z__1.i;
+/* L30: */
+ }
+ return 0;
+} /* zdscal_ */
diff --git a/contrib/libs/cblas/zgbmv.c b/contrib/libs/cblas/zgbmv.c
new file mode 100644
index 0000000000..22ae8dd497
--- /dev/null
+++ b/contrib/libs/cblas/zgbmv.c
@@ -0,0 +1,478 @@
+/* zgbmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zgbmv_(char *trans, integer *m, integer *n, integer *kl,
+ integer *ku, doublecomplex *alpha, doublecomplex *a, integer *lda,
+ doublecomplex *x, integer *incx, doublecomplex *beta, doublecomplex *
+ y, integer *incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k, ix, iy, jx, jy, kx, ky, kup1, info;
+ doublecomplex temp;
+ integer lenx, leny;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical noconj;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGBMV performs one of the matrix-vector operations */
+
+/* y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y, or */
+
+/* y := alpha*conjg( A' )*x + beta*y, */
+
+/* where alpha and beta are scalars, x and y are vectors and A is an */
+/* m by n band matrix, with kl sub-diagonals and ku super-diagonals. */
+
+/* Arguments */
+/* ========== */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' y := alpha*A*x + beta*y. */
+
+/* TRANS = 'T' or 't' y := alpha*A'*x + beta*y. */
+
+/* TRANS = 'C' or 'c' y := alpha*conjg( A' )*x + beta*y. */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of the matrix A. */
+/* M must be at least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* KL - INTEGER. */
+/* On entry, KL specifies the number of sub-diagonals of the */
+/* matrix A. KL must satisfy 0 .le. KL. */
+/* Unchanged on exit. */
+
+/* KU - INTEGER. */
+/* On entry, KU specifies the number of super-diagonals of the */
+/* matrix A. KU must satisfy 0 .le. KU. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX*16 . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX*16 array of DIMENSION ( LDA, n ). */
+/* Before entry, the leading ( kl + ku + 1 ) by n part of the */
+/* array A must contain the matrix of coefficients, supplied */
+/* column by column, with the leading diagonal of the matrix in */
+/* row ( ku + 1 ) of the array, the first super-diagonal */
+/* starting at position 2 in row ku, the first sub-diagonal */
+/* starting at position 1 in row ( ku + 2 ), and so on. */
+/* Elements in the array A that do not correspond to elements */
+/* in the band matrix (such as the top left ku by ku triangle) */
+/* are not referenced. */
+/* The following program segment will transfer a band matrix */
+/* from conventional full matrix storage to band storage: */
+
+/* DO 20, J = 1, N */
+/* K = KU + 1 - J */
+/* DO 10, I = MAX( 1, J - KU ), MIN( M, J + KL ) */
+/* A( K + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* ( kl + ku + 1 ). */
+/* Unchanged on exit. */
+
+/* X - COMPLEX*16 array of DIMENSION at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ) when TRANS = 'N' or 'n' */
+/* and at least */
+/* ( 1 + ( m - 1 )*abs( INCX ) ) otherwise. */
+/* Before entry, the incremented array X must contain the */
+/* vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* BETA - COMPLEX*16 . */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then Y need not be set on input. */
+/* Unchanged on exit. */
+
+/* Y - COMPLEX*16 array of DIMENSION at least */
+/* ( 1 + ( m - 1 )*abs( INCY ) ) when TRANS = 'N' or 'n' */
+/* and at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ) otherwise. */
+/* Before entry, the incremented array Y must contain the */
+/* vector y. On exit, Y is overwritten by the updated vector y. */
+
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(trans, "N") && ! lsame_(trans, "T") && ! lsame_(trans, "C")
+ ) {
+ info = 1;
+ } else if (*m < 0) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*kl < 0) {
+ info = 4;
+ } else if (*ku < 0) {
+ info = 5;
+ } else if (*lda < *kl + *ku + 1) {
+ info = 8;
+ } else if (*incx == 0) {
+ info = 10;
+ } else if (*incy == 0) {
+ info = 13;
+ }
+ if (info != 0) {
+ xerbla_("ZGBMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || alpha->r == 0. && alpha->i == 0. && (beta->r ==
+ 1. && beta->i == 0.)) {
+ return 0;
+ }
+
+ noconj = lsame_(trans, "T");
+
+/* Set LENX and LENY, the lengths of the vectors x and y, and set */
+/* up the start points in X and Y. */
+
+ if (lsame_(trans, "N")) {
+ lenx = *n;
+ leny = *m;
+ } else {
+ lenx = *m;
+ leny = *n;
+ }
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (lenx - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (leny - 1) * *incy;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through the band part of A. */
+
+/* First form y := beta*y. */
+
+ if (beta->r != 1. || beta->i != 0.) {
+ if (*incy == 1) {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ y[i__2].r = 0., y[i__2].i = 0.;
+/* L10: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ y[i__2].r = 0., y[i__2].i = 0.;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = iy;
+ z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (alpha->r == 0. && alpha->i == 0.) {
+ return 0;
+ }
+ kup1 = *ku + 1;
+ if (lsame_(trans, "N")) {
+
+/* Form y := alpha*A*x + y. */
+
+ jx = kx;
+ if (*incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ i__2 = jx;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ k = kup1 - j;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__4 = min(i__5,i__6);
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ i__5 = k + i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__2.i = temp.r * a[i__5].i + temp.i * a[i__5]
+ .r;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i +
+ z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+/* L50: */
+ }
+ }
+ jx += *incx;
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__4 = jx;
+ if (x[i__4].r != 0. || x[i__4].i != 0.) {
+ i__4 = jx;
+ z__1.r = alpha->r * x[i__4].r - alpha->i * x[i__4].i,
+ z__1.i = alpha->r * x[i__4].i + alpha->i * x[i__4]
+ .r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ iy = ky;
+ k = kup1 - j;
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__3 = min(i__5,i__6);
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ i__4 = iy;
+ i__2 = iy;
+ i__5 = k + i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__2.i = temp.r * a[i__5].i + temp.i * a[i__5]
+ .r;
+ z__1.r = y[i__2].r + z__2.r, z__1.i = y[i__2].i +
+ z__2.i;
+ y[i__4].r = z__1.r, y[i__4].i = z__1.i;
+ iy += *incy;
+/* L70: */
+ }
+ }
+ jx += *incx;
+ if (j > *ku) {
+ ky += *incy;
+ }
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y := alpha*A'*x + y or y := alpha*conjg( A' )*x + y. */
+
+ jy = ky;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp.r = 0., temp.i = 0.;
+ k = kup1 - j;
+ if (noconj) {
+/* Computing MAX */
+ i__3 = 1, i__4 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__2 = min(i__5,i__6);
+ for (i__ = max(i__3,i__4); i__ <= i__2; ++i__) {
+ i__3 = k + i__ + j * a_dim1;
+ i__4 = i__;
+ z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4]
+ .i, z__2.i = a[i__3].r * x[i__4].i + a[i__3]
+ .i * x[i__4].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L90: */
+ }
+ } else {
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__4 = min(i__5,i__6);
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ d_cnjg(&z__3, &a[k + i__ + j * a_dim1]);
+ i__2 = i__;
+ z__2.r = z__3.r * x[i__2].r - z__3.i * x[i__2].i,
+ z__2.i = z__3.r * x[i__2].i + z__3.i * x[i__2]
+ .r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L100: */
+ }
+ }
+ i__4 = jy;
+ i__2 = jy;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i, z__2.i =
+ alpha->r * temp.i + alpha->i * temp.r;
+ z__1.r = y[i__2].r + z__2.r, z__1.i = y[i__2].i + z__2.i;
+ y[i__4].r = z__1.r, y[i__4].i = z__1.i;
+ jy += *incy;
+/* L110: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp.r = 0., temp.i = 0.;
+ ix = kx;
+ k = kup1 - j;
+ if (noconj) {
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__3 = min(i__5,i__6);
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ i__4 = k + i__ + j * a_dim1;
+ i__2 = ix;
+ z__2.r = a[i__4].r * x[i__2].r - a[i__4].i * x[i__2]
+ .i, z__2.i = a[i__4].r * x[i__2].i + a[i__4]
+ .i * x[i__2].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix += *incx;
+/* L120: */
+ }
+ } else {
+/* Computing MAX */
+ i__3 = 1, i__4 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__2 = min(i__5,i__6);
+ for (i__ = max(i__3,i__4); i__ <= i__2; ++i__) {
+ d_cnjg(&z__3, &a[k + i__ + j * a_dim1]);
+ i__3 = ix;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
+ z__2.i = z__3.r * x[i__3].i + z__3.i * x[i__3]
+ .r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix += *incx;
+/* L130: */
+ }
+ }
+ i__2 = jy;
+ i__3 = jy;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i, z__2.i =
+ alpha->r * temp.i + alpha->i * temp.r;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ jy += *incy;
+ if (j > *ku) {
+ kx += *incx;
+ }
+/* L140: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZGBMV . */
+
+} /* zgbmv_ */
diff --git a/contrib/libs/cblas/zgemm.c b/contrib/libs/cblas/zgemm.c
new file mode 100644
index 0000000000..f43a6062d2
--- /dev/null
+++ b/contrib/libs/cblas/zgemm.c
@@ -0,0 +1,698 @@
+/* zgemm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zgemm_(char *transa, char *transb, integer *m, integer *
+ n, integer *k, doublecomplex *alpha, doublecomplex *a, integer *lda,
+ doublecomplex *b, integer *ldb, doublecomplex *beta, doublecomplex *
+ c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4, i__5, i__6;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, l, info;
+ logical nota, notb;
+ doublecomplex temp;
+ logical conja, conjb;
+ integer ncola;
+ extern logical lsame_(char *, char *);
+ integer nrowa, nrowb;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGEMM performs one of the matrix-matrix operations */
+
+/* C := alpha*op( A )*op( B ) + beta*C, */
+
+/* where op( X ) is one of */
+
+/* op( X ) = X or op( X ) = X' or op( X ) = conjg( X' ), */
+
+/* alpha and beta are scalars, and A, B and C are matrices, with op( A ) */
+/* an m by k matrix, op( B ) a k by n matrix and C an m by n matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* TRANSA - CHARACTER*1. */
+/* On entry, TRANSA specifies the form of op( A ) to be used in */
+/* the matrix multiplication as follows: */
+
+/* TRANSA = 'N' or 'n', op( A ) = A. */
+
+/* TRANSA = 'T' or 't', op( A ) = A'. */
+
+/* TRANSA = 'C' or 'c', op( A ) = conjg( A' ). */
+
+/* Unchanged on exit. */
+
+/* TRANSB - CHARACTER*1. */
+/* On entry, TRANSB specifies the form of op( B ) to be used in */
+/* the matrix multiplication as follows: */
+
+/* TRANSB = 'N' or 'n', op( B ) = B. */
+
+/* TRANSB = 'T' or 't', op( B ) = B'. */
+
+/* TRANSB = 'C' or 'c', op( B ) = conjg( B' ). */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of the matrix */
+/* op( A ) and of the matrix C. M must be at least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of the matrix */
+/* op( B ) and the number of columns of the matrix C. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry, K specifies the number of columns of the matrix */
+/* op( A ) and the number of rows of the matrix op( B ). K must */
+/* be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX*16 . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX*16 array of DIMENSION ( LDA, ka ), where ka is */
+/* k when TRANSA = 'N' or 'n', and is m otherwise. */
+/* Before entry with TRANSA = 'N' or 'n', the leading m by k */
+/* part of the array A must contain the matrix A, otherwise */
+/* the leading k by m part of the array A must contain the */
+/* matrix A. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When TRANSA = 'N' or 'n' then */
+/* LDA must be at least max( 1, m ), otherwise LDA must be at */
+/* least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* B - COMPLEX*16 array of DIMENSION ( LDB, kb ), where kb is */
+/* n when TRANSB = 'N' or 'n', and is k otherwise. */
+/* Before entry with TRANSB = 'N' or 'n', the leading k by n */
+/* part of the array B must contain the matrix B, otherwise */
+/* the leading n by k part of the array B must contain the */
+/* matrix B. */
+/* Unchanged on exit. */
+
+/* LDB - INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. When TRANSB = 'N' or 'n' then */
+/* LDB must be at least max( 1, k ), otherwise LDB must be at */
+/* least max( 1, n ). */
+/* Unchanged on exit. */
+
+/* BETA - COMPLEX*16 . */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then C need not be set on input. */
+/* Unchanged on exit. */
+
+/* C - COMPLEX*16 array of DIMENSION ( LDC, n ). */
+/* Before entry, the leading m by n part of the array C must */
+/* contain the matrix C, except when beta is zero, in which */
+/* case C need not be set on entry. */
+/* On exit, the array C is overwritten by the m by n matrix */
+/* ( alpha*op( A )*op( B ) + beta*C ). */
+
+/* LDC - INTEGER. */
+/* On entry, LDC specifies the first dimension of C as declared */
+/* in the calling (sub) program. LDC must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Set NOTA and NOTB as true if A and B respectively are not */
+/* conjugated or transposed, set CONJA and CONJB as true if A and */
+/* B respectively are to be transposed but not conjugated and set */
+/* NROWA, NCOLA and NROWB as the number of rows and columns of A */
+/* and the number of rows of B respectively. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ nota = lsame_(transa, "N");
+ notb = lsame_(transb, "N");
+ conja = lsame_(transa, "C");
+ conjb = lsame_(transb, "C");
+ if (nota) {
+ nrowa = *m;
+ ncola = *k;
+ } else {
+ nrowa = *k;
+ ncola = *m;
+ }
+ if (notb) {
+ nrowb = *k;
+ } else {
+ nrowb = *n;
+ }
+
+/* Test the input parameters. */
+
+ info = 0;
+ if (! nota && ! conja && ! lsame_(transa, "T")) {
+ info = 1;
+ } else if (! notb && ! conjb && ! lsame_(transb, "T")) {
+ info = 2;
+ } else if (*m < 0) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*k < 0) {
+ info = 5;
+ } else if (*lda < max(1,nrowa)) {
+ info = 8;
+ } else if (*ldb < max(1,nrowb)) {
+ info = 10;
+ } else if (*ldc < max(1,*m)) {
+ info = 13;
+ }
+ if (info != 0) {
+ xerbla_("ZGEMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || (alpha->r == 0. && alpha->i == 0. || *k == 0) &&
+ (beta->r == 1. && beta->i == 0.)) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (alpha->r == 0. && alpha->i == 0.) {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4].i,
+ z__1.i = beta->r * c__[i__4].i + beta->i * c__[
+ i__4].r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (notb) {
+ if (nota) {
+
+/* Form C := alpha*A*B + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L50: */
+ }
+ } else if (beta->r != 1. || beta->i != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L60: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = l + j * b_dim1;
+ if (b[i__3].r != 0. || b[i__3].i != 0.) {
+ i__3 = l + j * b_dim1;
+ z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
+ z__1.i = alpha->r * b[i__3].i + alpha->i * b[
+ i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ z__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5]
+ .i + z__2.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L70: */
+ }
+ }
+/* L80: */
+ }
+/* L90: */
+ }
+ } else if (conja) {
+
+/* Form C := alpha*conjg( A' )*B + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ d_cnjg(&z__3, &a[l + i__ * a_dim1]);
+ i__4 = l + j * b_dim1;
+ z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4].i,
+ z__2.i = z__3.r * b[i__4].i + z__3.i * b[i__4]
+ .r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L100: */
+ }
+ if (beta->r == 0. && beta->i == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L110: */
+ }
+/* L120: */
+ }
+ } else {
+
+/* Form C := alpha*A'*B + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + i__ * a_dim1;
+ i__5 = l + j * b_dim1;
+ z__2.r = a[i__4].r * b[i__5].r - a[i__4].i * b[i__5]
+ .i, z__2.i = a[i__4].r * b[i__5].i + a[i__4]
+ .i * b[i__5].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L130: */
+ }
+ if (beta->r == 0. && beta->i == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L140: */
+ }
+/* L150: */
+ }
+ }
+ } else if (nota) {
+ if (conjb) {
+
+/* Form C := alpha*A*conjg( B' ) + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L160: */
+ }
+ } else if (beta->r != 1. || beta->i != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L170: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * b_dim1;
+ if (b[i__3].r != 0. || b[i__3].i != 0.) {
+ d_cnjg(&z__2, &b[j + l * b_dim1]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i,
+ z__1.i = alpha->r * z__2.i + alpha->i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ z__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5]
+ .i + z__2.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L180: */
+ }
+ }
+/* L190: */
+ }
+/* L200: */
+ }
+ } else {
+
+/* Form C := alpha*A*B' + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L210: */
+ }
+ } else if (beta->r != 1. || beta->i != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L220: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * b_dim1;
+ if (b[i__3].r != 0. || b[i__3].i != 0.) {
+ i__3 = j + l * b_dim1;
+ z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
+ z__1.i = alpha->r * b[i__3].i + alpha->i * b[
+ i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ z__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5]
+ .i + z__2.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L230: */
+ }
+ }
+/* L240: */
+ }
+/* L250: */
+ }
+ }
+ } else if (conja) {
+ if (conjb) {
+
+/* Form C := alpha*conjg( A' )*conjg( B' ) + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ d_cnjg(&z__3, &a[l + i__ * a_dim1]);
+ d_cnjg(&z__4, &b[j + l * b_dim1]);
+ z__2.r = z__3.r * z__4.r - z__3.i * z__4.i, z__2.i =
+ z__3.r * z__4.i + z__3.i * z__4.r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L260: */
+ }
+ if (beta->r == 0. && beta->i == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L270: */
+ }
+/* L280: */
+ }
+ } else {
+
+/* Form C := alpha*conjg( A' )*B' + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ d_cnjg(&z__3, &a[l + i__ * a_dim1]);
+ i__4 = j + l * b_dim1;
+ z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4].i,
+ z__2.i = z__3.r * b[i__4].i + z__3.i * b[i__4]
+ .r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L290: */
+ }
+ if (beta->r == 0. && beta->i == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L300: */
+ }
+/* L310: */
+ }
+ }
+ } else {
+ if (conjb) {
+
+/* Form C := alpha*A'*conjg( B' ) + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + i__ * a_dim1;
+ d_cnjg(&z__3, &b[j + l * b_dim1]);
+ z__2.r = a[i__4].r * z__3.r - a[i__4].i * z__3.i,
+ z__2.i = a[i__4].r * z__3.i + a[i__4].i *
+ z__3.r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L320: */
+ }
+ if (beta->r == 0. && beta->i == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L330: */
+ }
+/* L340: */
+ }
+ } else {
+
+/* Form C := alpha*A'*B' + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + i__ * a_dim1;
+ i__5 = j + l * b_dim1;
+ z__2.r = a[i__4].r * b[i__5].r - a[i__4].i * b[i__5]
+ .i, z__2.i = a[i__4].r * b[i__5].i + a[i__4]
+ .i * b[i__5].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L350: */
+ }
+ if (beta->r == 0. && beta->i == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L360: */
+ }
+/* L370: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZGEMM . */
+
+} /* zgemm_ */
diff --git a/contrib/libs/cblas/zgemv.c b/contrib/libs/cblas/zgemv.c
new file mode 100644
index 0000000000..54e83560c2
--- /dev/null
+++ b/contrib/libs/cblas/zgemv.c
@@ -0,0 +1,412 @@
+/* zgemv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zgemv_(char *trans, integer *m, integer *n,
+ doublecomplex *alpha, doublecomplex *a, integer *lda, doublecomplex *
+ x, integer *incx, doublecomplex *beta, doublecomplex *y, integer *
+ incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ doublecomplex temp;
+ integer lenx, leny;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical noconj;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGEMV performs one of the matrix-vector operations */
+
+/* y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y, or */
+
+/* y := alpha*conjg( A' )*x + beta*y, */
+
+/* where alpha and beta are scalars, x and y are vectors and A is an */
+/* m by n matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' y := alpha*A*x + beta*y. */
+
+/* TRANS = 'T' or 't' y := alpha*A'*x + beta*y. */
+
+/* TRANS = 'C' or 'c' y := alpha*conjg( A' )*x + beta*y. */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of the matrix A. */
+/* M must be at least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX*16 . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX*16 array of DIMENSION ( LDA, n ). */
+/* Before entry, the leading m by n part of the array A must */
+/* contain the matrix of coefficients. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+/* X - COMPLEX*16 array of DIMENSION at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ) when TRANS = 'N' or 'n' */
+/* and at least */
+/* ( 1 + ( m - 1 )*abs( INCX ) ) otherwise. */
+/* Before entry, the incremented array X must contain the */
+/* vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* BETA - COMPLEX*16 . */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then Y need not be set on input. */
+/* Unchanged on exit. */
+
+/* Y - COMPLEX*16 array of DIMENSION at least */
+/* ( 1 + ( m - 1 )*abs( INCY ) ) when TRANS = 'N' or 'n' */
+/* and at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ) otherwise. */
+/* Before entry with BETA non-zero, the incremented array Y */
+/* must contain the vector y. On exit, Y is overwritten by the */
+/* updated vector y. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(trans, "N") && ! lsame_(trans, "T") && ! lsame_(trans, "C")
+ ) {
+ info = 1;
+ } else if (*m < 0) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*lda < max(1,*m)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ } else if (*incy == 0) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("ZGEMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || alpha->r == 0. && alpha->i == 0. && (beta->r ==
+ 1. && beta->i == 0.)) {
+ return 0;
+ }
+
+ noconj = lsame_(trans, "T");
+
+/* Set LENX and LENY, the lengths of the vectors x and y, and set */
+/* up the start points in X and Y. */
+
+ if (lsame_(trans, "N")) {
+ lenx = *n;
+ leny = *m;
+ } else {
+ lenx = *m;
+ leny = *n;
+ }
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (lenx - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (leny - 1) * *incy;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through A. */
+
+/* First form y := beta*y. */
+
+ if (beta->r != 1. || beta->i != 0.) {
+ if (*incy == 1) {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ y[i__2].r = 0., y[i__2].i = 0.;
+/* L10: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ y[i__2].r = 0., y[i__2].i = 0.;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = iy;
+ z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (alpha->r == 0. && alpha->i == 0.) {
+ return 0;
+ }
+ if (lsame_(trans, "N")) {
+
+/* Form y := alpha*A*x + y. */
+
+ jx = kx;
+ if (*incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ i__2 = jx;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__2.i = temp.r * a[i__5].i + temp.i * a[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i +
+ z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+/* L50: */
+ }
+ }
+ jx += *incx;
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ i__2 = jx;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ iy = ky;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__2.i = temp.r * a[i__5].i + temp.i * a[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i +
+ z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ iy += *incy;
+/* L70: */
+ }
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y := alpha*A'*x + y or y := alpha*conjg( A' )*x + y. */
+
+ jy = ky;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp.r = 0., temp.i = 0.;
+ if (noconj) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__;
+ z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4]
+ .i, z__2.i = a[i__3].r * x[i__4].i + a[i__3]
+ .i * x[i__4].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L90: */
+ }
+ } else {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = i__;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
+ z__2.i = z__3.r * x[i__3].i + z__3.i * x[i__3]
+ .r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L100: */
+ }
+ }
+ i__2 = jy;
+ i__3 = jy;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i, z__2.i =
+ alpha->r * temp.i + alpha->i * temp.r;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ jy += *incy;
+/* L110: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp.r = 0., temp.i = 0.;
+ ix = kx;
+ if (noconj) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = ix;
+ z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4]
+ .i, z__2.i = a[i__3].r * x[i__4].i + a[i__3]
+ .i * x[i__4].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix += *incx;
+/* L120: */
+ }
+ } else {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = ix;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
+ z__2.i = z__3.r * x[i__3].i + z__3.i * x[i__3]
+ .r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix += *incx;
+/* L130: */
+ }
+ }
+ i__2 = jy;
+ i__3 = jy;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i, z__2.i =
+ alpha->r * temp.i + alpha->i * temp.r;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ jy += *incy;
+/* L140: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZGEMV . */
+
+} /* zgemv_ */
diff --git a/contrib/libs/cblas/zgerc.c b/contrib/libs/cblas/zgerc.c
new file mode 100644
index 0000000000..97929453e8
--- /dev/null
+++ b/contrib/libs/cblas/zgerc.c
@@ -0,0 +1,218 @@
+/* zgerc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zgerc_(integer *m, integer *n, doublecomplex *alpha,
+ doublecomplex *x, integer *incx, doublecomplex *y, integer *incy,
+ doublecomplex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, ix, jy, kx, info;
+ doublecomplex temp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGERC performs the rank 1 operation */
+
+/* A := alpha*x*conjg( y' ) + A, */
+
+/* where alpha is a scalar, x is an m element vector, y is an n element */
+/* vector and A is an m by n matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of the matrix A. */
+/* M must be at least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX*16 . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X - COMPLEX*16 array of dimension at least */
+/* ( 1 + ( m - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the m */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* Y - COMPLEX*16 array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. */
+/* Unchanged on exit. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX*16 array of DIMENSION ( LDA, n ). */
+/* Before entry, the leading m by n part of the array A must */
+/* contain the matrix of coefficients. On exit, A is */
+/* overwritten by the updated matrix. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --x;
+ --y;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (*m < 0) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ } else if (*lda < max(1,*m)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("ZGERC ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || alpha->r == 0. && alpha->i == 0.) {
+ return 0;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through A. */
+
+ if (*incy > 0) {
+ jy = 1;
+ } else {
+ jy = 1 - (*n - 1) * *incy;
+ }
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jy;
+ if (y[i__2].r != 0. || y[i__2].i != 0.) {
+ d_cnjg(&z__2, &y[jy]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
+ alpha->r * z__2.i + alpha->i * z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i =
+ x[i__5].r * temp.i + x[i__5].i * temp.r;
+ z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L10: */
+ }
+ }
+ jy += *incy;
+/* L20: */
+ }
+ } else {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*m - 1) * *incx;
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jy;
+ if (y[i__2].r != 0. || y[i__2].i != 0.) {
+ d_cnjg(&z__2, &y[jy]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
+ alpha->r * z__2.i + alpha->i * z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix = kx;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i =
+ x[i__5].r * temp.i + x[i__5].i * temp.r;
+ z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ }
+ jy += *incy;
+/* L40: */
+ }
+ }
+
+ return 0;
+
+/* End of ZGERC . */
+
+} /* zgerc_ */
diff --git a/contrib/libs/cblas/zgeru.c b/contrib/libs/cblas/zgeru.c
new file mode 100644
index 0000000000..f0b98c806a
--- /dev/null
+++ b/contrib/libs/cblas/zgeru.c
@@ -0,0 +1,215 @@
+/* zgeru.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zgeru_(integer *m, integer *n, doublecomplex *alpha,
+ doublecomplex *x, integer *incx, doublecomplex *y, integer *incy,
+ doublecomplex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2;
+
+ /* Local variables */
+ integer i__, j, ix, jy, kx, info;
+ doublecomplex temp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGERU performs the rank 1 operation */
+
+/* A := alpha*x*y' + A, */
+
+/* where alpha is a scalar, x is an m element vector, y is an n element */
+/* vector and A is an m by n matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of the matrix A. */
+/* M must be at least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX*16 . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X - COMPLEX*16 array of dimension at least */
+/* ( 1 + ( m - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the m */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* Y - COMPLEX*16 array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. */
+/* Unchanged on exit. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX*16 array of DIMENSION ( LDA, n ). */
+/* Before entry, the leading m by n part of the array A must */
+/* contain the matrix of coefficients. On exit, A is */
+/* overwritten by the updated matrix. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --x;
+ --y;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (*m < 0) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ } else if (*lda < max(1,*m)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("ZGERU ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || alpha->r == 0. && alpha->i == 0.) {
+ return 0;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through A. */
+
+ if (*incy > 0) {
+ jy = 1;
+ } else {
+ jy = 1 - (*n - 1) * *incy;
+ }
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jy;
+ if (y[i__2].r != 0. || y[i__2].i != 0.) {
+ i__2 = jy;
+ z__1.r = alpha->r * y[i__2].r - alpha->i * y[i__2].i, z__1.i =
+ alpha->r * y[i__2].i + alpha->i * y[i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i =
+ x[i__5].r * temp.i + x[i__5].i * temp.r;
+ z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L10: */
+ }
+ }
+ jy += *incy;
+/* L20: */
+ }
+ } else {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*m - 1) * *incx;
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jy;
+ if (y[i__2].r != 0. || y[i__2].i != 0.) {
+ i__2 = jy;
+ z__1.r = alpha->r * y[i__2].r - alpha->i * y[i__2].i, z__1.i =
+ alpha->r * y[i__2].i + alpha->i * y[i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix = kx;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i =
+ x[i__5].r * temp.i + x[i__5].i * temp.r;
+ z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ }
+ jy += *incy;
+/* L40: */
+ }
+ }
+
+ return 0;
+
+/* End of ZGERU . */
+
+} /* zgeru_ */
diff --git a/contrib/libs/cblas/zhbmv.c b/contrib/libs/cblas/zhbmv.c
new file mode 100644
index 0000000000..01da70107f
--- /dev/null
+++ b/contrib/libs/cblas/zhbmv.c
@@ -0,0 +1,483 @@
+/* zhbmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zhbmv_(char *uplo, integer *n, integer *k, doublecomplex
+ *alpha, doublecomplex *a, integer *lda, doublecomplex *x, integer *
+ incx, doublecomplex *beta, doublecomplex *y, integer *incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, l, ix, iy, jx, jy, kx, ky, info;
+ doublecomplex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ integer kplus1;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHBMV performs the matrix-vector operation */
+
+/* y := alpha*A*x + beta*y, */
+
+/* where alpha and beta are scalars, x and y are n element vectors and */
+/* A is an n by n hermitian band matrix, with k super-diagonals. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the band matrix A is being supplied as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' The upper triangular part of A is */
+/* being supplied. */
+
+/* UPLO = 'L' or 'l' The lower triangular part of A is */
+/* being supplied. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry, K specifies the number of super-diagonals of the */
+/* matrix A. K must satisfy 0 .le. K. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX*16 . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX*16 array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) */
+/* by n part of the array A must contain the upper triangular */
+/* band part of the hermitian matrix, supplied column by */
+/* column, with the leading diagonal of the matrix in row */
+/* ( k + 1 ) of the array, the first super-diagonal starting at */
+/* position 2 in row k, and so on. The top left k by k triangle */
+/* of the array A is not referenced. */
+/* The following program segment will transfer the upper */
+/* triangular part of a hermitian band matrix from conventional */
+/* full matrix storage to band storage: */
+
+/* DO 20, J = 1, N */
+/* M = K + 1 - J */
+/* DO 10, I = MAX( 1, J - K ), J */
+/* A( M + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) */
+/* by n part of the array A must contain the lower triangular */
+/* band part of the hermitian matrix, supplied column by */
+/* column, with the leading diagonal of the matrix in row 1 of */
+/* the array, the first sub-diagonal starting at position 1 in */
+/* row 2, and so on. The bottom right k by k triangle of the */
+/* array A is not referenced. */
+/* The following program segment will transfer the lower */
+/* triangular part of a hermitian band matrix from conventional */
+/* full matrix storage to band storage: */
+
+/* DO 20, J = 1, N */
+/* M = 1 - J */
+/* DO 10, I = J, MIN( N, J + K ) */
+/* A( M + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set and are assumed to be zero. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* ( k + 1 ). */
+/* Unchanged on exit. */
+
+/* X - COMPLEX*16 array of DIMENSION at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the */
+/* vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* BETA - COMPLEX*16 . */
+/* On entry, BETA specifies the scalar beta. */
+/* Unchanged on exit. */
+
+/* Y - COMPLEX*16 array of DIMENSION at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the */
+/* vector y. On exit, Y is overwritten by the updated vector y. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*k < 0) {
+ info = 3;
+ } else if (*lda < *k + 1) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ } else if (*incy == 0) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("ZHBMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || alpha->r == 0. && alpha->i == 0. && (beta->r == 1. &&
+ beta->i == 0.)) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y. */
+
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+
+/* Start the operations. In this version the elements of the array A */
+/* are accessed sequentially with one pass through A. */
+
+/* First form y := beta*y. */
+
+ if (beta->r != 1. || beta->i != 0.) {
+ if (*incy == 1) {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ y[i__2].r = 0., y[i__2].i = 0.;
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ y[i__2].r = 0., y[i__2].i = 0.;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = iy;
+ z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (alpha->r == 0. && alpha->i == 0.) {
+ return 0;
+ }
+ if (lsame_(uplo, "U")) {
+
+/* Form y when upper triangle of A is stored. */
+
+ kplus1 = *k + 1;
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ l = kplus1 - j;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__4 = j - 1;
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ i__5 = l + i__ + j * a_dim1;
+ z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ d_cnjg(&z__3, &a[l + i__ + j * a_dim1]);
+ i__2 = i__;
+ z__2.r = z__3.r * x[i__2].r - z__3.i * x[i__2].i, z__2.i =
+ z__3.r * x[i__2].i + z__3.i * x[i__2].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L50: */
+ }
+ i__4 = j;
+ i__2 = j;
+ i__3 = kplus1 + j * a_dim1;
+ d__1 = a[i__3].r;
+ z__3.r = d__1 * temp1.r, z__3.i = d__1 * temp1.i;
+ z__2.r = y[i__2].r + z__3.r, z__2.i = y[i__2].i + z__3.i;
+ z__4.r = alpha->r * temp2.r - alpha->i * temp2.i, z__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ y[i__4].r = z__1.r, y[i__4].i = z__1.i;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__4 = jx;
+ z__1.r = alpha->r * x[i__4].r - alpha->i * x[i__4].i, z__1.i =
+ alpha->r * x[i__4].i + alpha->i * x[i__4].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ ix = kx;
+ iy = ky;
+ l = kplus1 - j;
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *k;
+ i__3 = j - 1;
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ i__4 = iy;
+ i__2 = iy;
+ i__5 = l + i__ + j * a_dim1;
+ z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ z__1.r = y[i__2].r + z__2.r, z__1.i = y[i__2].i + z__2.i;
+ y[i__4].r = z__1.r, y[i__4].i = z__1.i;
+ d_cnjg(&z__3, &a[l + i__ + j * a_dim1]);
+ i__4 = ix;
+ z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i, z__2.i =
+ z__3.r * x[i__4].i + z__3.i * x[i__4].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ i__3 = jy;
+ i__4 = jy;
+ i__2 = kplus1 + j * a_dim1;
+ d__1 = a[i__2].r;
+ z__3.r = d__1 * temp1.r, z__3.i = d__1 * temp1.i;
+ z__2.r = y[i__4].r + z__3.r, z__2.i = y[i__4].i + z__3.i;
+ z__4.r = alpha->r * temp2.r - alpha->i * temp2.i, z__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ jx += *incx;
+ jy += *incy;
+ if (j > *k) {
+ kx += *incx;
+ ky += *incy;
+ }
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y when lower triangle of A is stored. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__3 = j;
+ z__1.r = alpha->r * x[i__3].r - alpha->i * x[i__3].i, z__1.i =
+ alpha->r * x[i__3].i + alpha->i * x[i__3].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ i__3 = j;
+ i__4 = j;
+ i__2 = j * a_dim1 + 1;
+ d__1 = a[i__2].r;
+ z__2.r = d__1 * temp1.r, z__2.i = d__1 * temp1.i;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ l = 1 - j;
+/* Computing MIN */
+ i__4 = *n, i__2 = j + *k;
+ i__3 = min(i__4,i__2);
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ i__2 = i__;
+ i__5 = l + i__ + j * a_dim1;
+ z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ z__1.r = y[i__2].r + z__2.r, z__1.i = y[i__2].i + z__2.i;
+ y[i__4].r = z__1.r, y[i__4].i = z__1.i;
+ d_cnjg(&z__3, &a[l + i__ + j * a_dim1]);
+ i__4 = i__;
+ z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i, z__2.i =
+ z__3.r * x[i__4].i + z__3.i * x[i__4].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L90: */
+ }
+ i__3 = j;
+ i__4 = j;
+ z__2.r = alpha->r * temp2.r - alpha->i * temp2.i, z__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__3 = jx;
+ z__1.r = alpha->r * x[i__3].r - alpha->i * x[i__3].i, z__1.i =
+ alpha->r * x[i__3].i + alpha->i * x[i__3].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ i__3 = jy;
+ i__4 = jy;
+ i__2 = j * a_dim1 + 1;
+ d__1 = a[i__2].r;
+ z__2.r = d__1 * temp1.r, z__2.i = d__1 * temp1.i;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ l = 1 - j;
+ ix = jx;
+ iy = jy;
+/* Computing MIN */
+ i__4 = *n, i__2 = j + *k;
+ i__3 = min(i__4,i__2);
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ ix += *incx;
+ iy += *incy;
+ i__4 = iy;
+ i__2 = iy;
+ i__5 = l + i__ + j * a_dim1;
+ z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ z__1.r = y[i__2].r + z__2.r, z__1.i = y[i__2].i + z__2.i;
+ y[i__4].r = z__1.r, y[i__4].i = z__1.i;
+ d_cnjg(&z__3, &a[l + i__ + j * a_dim1]);
+ i__4 = ix;
+ z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i, z__2.i =
+ z__3.r * x[i__4].i + z__3.i * x[i__4].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L110: */
+ }
+ i__3 = jy;
+ i__4 = jy;
+ z__2.r = alpha->r * temp2.r - alpha->i * temp2.i, z__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ jx += *incx;
+ jy += *incy;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZHBMV . */
+
+} /* zhbmv_ */
diff --git a/contrib/libs/cblas/zhemm.c b/contrib/libs/cblas/zhemm.c
new file mode 100644
index 0000000000..dc43c4f0c6
--- /dev/null
+++ b/contrib/libs/cblas/zhemm.c
@@ -0,0 +1,496 @@
+/* zhemm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zhemm_(char *side, char *uplo, integer *m, integer *n,
+ doublecomplex *alpha, doublecomplex *a, integer *lda, doublecomplex *
+ b, integer *ldb, doublecomplex *beta, doublecomplex *c__, integer *
+ ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4, i__5, i__6;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3, z__4, z__5;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k, info;
+ doublecomplex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHEMM performs one of the matrix-matrix operations */
+
+/* C := alpha*A*B + beta*C, */
+
+/* or */
+
+/* C := alpha*B*A + beta*C, */
+
+/* where alpha and beta are scalars, A is an hermitian matrix and B and */
+/* C are m by n matrices. */
+
+/* Arguments */
+/* ========== */
+
+/* SIDE - CHARACTER*1. */
+/* On entry, SIDE specifies whether the hermitian matrix A */
+/* appears on the left or right in the operation as follows: */
+
+/* SIDE = 'L' or 'l' C := alpha*A*B + beta*C, */
+
+/* SIDE = 'R' or 'r' C := alpha*B*A + beta*C, */
+
+/* Unchanged on exit. */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the hermitian matrix A is to be */
+/* referenced as follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of the */
+/* hermitian matrix is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of the */
+/* hermitian matrix is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of the matrix C. */
+/* M must be at least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of the matrix C. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX*16 . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX*16 array of DIMENSION ( LDA, ka ), where ka is */
+/* m when SIDE = 'L' or 'l' and is n otherwise. */
+/* Before entry with SIDE = 'L' or 'l', the m by m part of */
+/* the array A must contain the hermitian matrix, such that */
+/* when UPLO = 'U' or 'u', the leading m by m upper triangular */
+/* part of the array A must contain the upper triangular part */
+/* of the hermitian matrix and the strictly lower triangular */
+/* part of A is not referenced, and when UPLO = 'L' or 'l', */
+/* the leading m by m lower triangular part of the array A */
+/* must contain the lower triangular part of the hermitian */
+/* matrix and the strictly upper triangular part of A is not */
+/* referenced. */
+/* Before entry with SIDE = 'R' or 'r', the n by n part of */
+/* the array A must contain the hermitian matrix, such that */
+/* when UPLO = 'U' or 'u', the leading n by n upper triangular */
+/* part of the array A must contain the upper triangular part */
+/* of the hermitian matrix and the strictly lower triangular */
+/* part of A is not referenced, and when UPLO = 'L' or 'l', */
+/* the leading n by n lower triangular part of the array A */
+/* must contain the lower triangular part of the hermitian */
+/* matrix and the strictly upper triangular part of A is not */
+/* referenced. */
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set, they are assumed to be zero. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When SIDE = 'L' or 'l' then */
+/* LDA must be at least max( 1, m ), otherwise LDA must be at */
+/* least max( 1, n ). */
+/* Unchanged on exit. */
+
+/* B - COMPLEX*16 array of DIMENSION ( LDB, n ). */
+/* Before entry, the leading m by n part of the array B must */
+/* contain the matrix B. */
+/* Unchanged on exit. */
+
+/* LDB - INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. LDB must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+/* BETA - COMPLEX*16 . */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then C need not be set on input. */
+/* Unchanged on exit. */
+
+/* C - COMPLEX*16 array of DIMENSION ( LDC, n ). */
+/* Before entry, the leading m by n part of the array C must */
+/* contain the matrix C, except when beta is zero, in which */
+/* case C need not be set on entry. */
+/* On exit, the array C is overwritten by the m by n updated */
+/* matrix. */
+
+/* LDC - INTEGER. */
+/* On entry, LDC specifies the first dimension of C as declared */
+/* in the calling (sub) program. LDC must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Set NROWA as the number of rows of A. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(side, "L")) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ upper = lsame_(uplo, "U");
+
+/* Test the input parameters. */
+
+ info = 0;
+ if (! lsame_(side, "L") && ! lsame_(side, "R")) {
+ info = 1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ info = 2;
+ } else if (*m < 0) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldb < max(1,*m)) {
+ info = 9;
+ } else if (*ldc < max(1,*m)) {
+ info = 12;
+ }
+ if (info != 0) {
+ xerbla_("ZHEMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || alpha->r == 0. && alpha->i == 0. && (beta->r ==
+ 1. && beta->i == 0.)) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (alpha->r == 0. && alpha->i == 0.) {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4].i,
+ z__1.i = beta->r * c__[i__4].i + beta->i * c__[
+ i__4].r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(side, "L")) {
+
+/* Form C := alpha*A*B + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
+ z__1.i = alpha->r * b[i__3].i + alpha->i * b[i__3]
+ .r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ i__3 = i__ - 1;
+ for (k = 1; k <= i__3; ++k) {
+ i__4 = k + j * c_dim1;
+ i__5 = k + j * c_dim1;
+ i__6 = k + i__ * a_dim1;
+ z__2.r = temp1.r * a[i__6].r - temp1.i * a[i__6].i,
+ z__2.i = temp1.r * a[i__6].i + temp1.i * a[
+ i__6].r;
+ z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5].i +
+ z__2.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+ i__4 = k + j * b_dim1;
+ d_cnjg(&z__3, &a[k + i__ * a_dim1]);
+ z__2.r = b[i__4].r * z__3.r - b[i__4].i * z__3.i,
+ z__2.i = b[i__4].r * z__3.i + b[i__4].i *
+ z__3.r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L50: */
+ }
+ if (beta->r == 0. && beta->i == 0.) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + i__ * a_dim1;
+ d__1 = a[i__4].r;
+ z__2.r = d__1 * temp1.r, z__2.i = d__1 * temp1.i;
+ z__3.r = alpha->r * temp2.r - alpha->i * temp2.i,
+ z__3.i = alpha->r * temp2.i + alpha->i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ i__5 = i__ + i__ * a_dim1;
+ d__1 = a[i__5].r;
+ z__4.r = d__1 * temp1.r, z__4.i = d__1 * temp1.i;
+ z__2.r = z__3.r + z__4.r, z__2.i = z__3.i + z__4.i;
+ z__5.r = alpha->r * temp2.r - alpha->i * temp2.i,
+ z__5.i = alpha->r * temp2.i + alpha->i *
+ temp2.r;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L60: */
+ }
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ i__2 = i__ + j * b_dim1;
+ z__1.r = alpha->r * b[i__2].r - alpha->i * b[i__2].i,
+ z__1.i = alpha->r * b[i__2].i + alpha->i * b[i__2]
+ .r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ i__2 = *m;
+ for (k = i__ + 1; k <= i__2; ++k) {
+ i__3 = k + j * c_dim1;
+ i__4 = k + j * c_dim1;
+ i__5 = k + i__ * a_dim1;
+ z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ z__2.i = temp1.r * a[i__5].i + temp1.i * a[
+ i__5].r;
+ z__1.r = c__[i__4].r + z__2.r, z__1.i = c__[i__4].i +
+ z__2.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ i__3 = k + j * b_dim1;
+ d_cnjg(&z__3, &a[k + i__ * a_dim1]);
+ z__2.r = b[i__3].r * z__3.r - b[i__3].i * z__3.i,
+ z__2.i = b[i__3].r * z__3.i + b[i__3].i *
+ z__3.r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L80: */
+ }
+ if (beta->r == 0. && beta->i == 0.) {
+ i__2 = i__ + j * c_dim1;
+ i__3 = i__ + i__ * a_dim1;
+ d__1 = a[i__3].r;
+ z__2.r = d__1 * temp1.r, z__2.i = d__1 * temp1.i;
+ z__3.r = alpha->r * temp2.r - alpha->i * temp2.i,
+ z__3.i = alpha->r * temp2.i + alpha->i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ } else {
+ i__2 = i__ + j * c_dim1;
+ i__3 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__3].r - beta->i * c__[i__3]
+ .i, z__3.i = beta->r * c__[i__3].i + beta->i *
+ c__[i__3].r;
+ i__4 = i__ + i__ * a_dim1;
+ d__1 = a[i__4].r;
+ z__4.r = d__1 * temp1.r, z__4.i = d__1 * temp1.i;
+ z__2.r = z__3.r + z__4.r, z__2.i = z__3.i + z__4.i;
+ z__5.r = alpha->r * temp2.r - alpha->i * temp2.i,
+ z__5.i = alpha->r * temp2.i + alpha->i *
+ temp2.r;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ }
+/* L90: */
+ }
+/* L100: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*B*A + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + j * a_dim1;
+ d__1 = a[i__2].r;
+ z__1.r = d__1 * alpha->r, z__1.i = d__1 * alpha->i;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ if (beta->r == 0. && beta->i == 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * b_dim1;
+ z__1.r = temp1.r * b[i__4].r - temp1.i * b[i__4].i,
+ z__1.i = temp1.r * b[i__4].i + temp1.i * b[i__4]
+ .r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L110: */
+ }
+ } else {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__2.r = beta->r * c__[i__4].r - beta->i * c__[i__4].i,
+ z__2.i = beta->r * c__[i__4].i + beta->i * c__[
+ i__4].r;
+ i__5 = i__ + j * b_dim1;
+ z__3.r = temp1.r * b[i__5].r - temp1.i * b[i__5].i,
+ z__3.i = temp1.r * b[i__5].i + temp1.i * b[i__5]
+ .r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L120: */
+ }
+ }
+ i__2 = j - 1;
+ for (k = 1; k <= i__2; ++k) {
+ if (upper) {
+ i__3 = k + j * a_dim1;
+ z__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ z__1.i = alpha->r * a[i__3].i + alpha->i * a[i__3]
+ .r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ } else {
+ d_cnjg(&z__2, &a[j + k * a_dim1]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
+ alpha->r * z__2.i + alpha->i * z__2.r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ }
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + k * b_dim1;
+ z__2.r = temp1.r * b[i__6].r - temp1.i * b[i__6].i,
+ z__2.i = temp1.r * b[i__6].i + temp1.i * b[i__6]
+ .r;
+ z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5].i +
+ z__2.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L130: */
+ }
+/* L140: */
+ }
+ i__2 = *n;
+ for (k = j + 1; k <= i__2; ++k) {
+ if (upper) {
+ d_cnjg(&z__2, &a[j + k * a_dim1]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
+ alpha->r * z__2.i + alpha->i * z__2.r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ } else {
+ i__3 = k + j * a_dim1;
+ z__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ z__1.i = alpha->r * a[i__3].i + alpha->i * a[i__3]
+ .r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ }
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + k * b_dim1;
+ z__2.r = temp1.r * b[i__6].r - temp1.i * b[i__6].i,
+ z__2.i = temp1.r * b[i__6].i + temp1.i * b[i__6]
+ .r;
+ z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5].i +
+ z__2.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L150: */
+ }
+/* L160: */
+ }
+/* L170: */
+ }
+ }
+
+ return 0;
+
+/* End of ZHEMM . */
+
+} /* zhemm_ */
diff --git a/contrib/libs/cblas/zhemv.c b/contrib/libs/cblas/zhemv.c
new file mode 100644
index 0000000000..646dc12c10
--- /dev/null
+++ b/contrib/libs/cblas/zhemv.c
@@ -0,0 +1,433 @@
+/* zhemv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zhemv_(char *uplo, integer *n, doublecomplex *alpha,
+ doublecomplex *a, integer *lda, doublecomplex *x, integer *incx,
+ doublecomplex *beta, doublecomplex *y, integer *incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ doublecomplex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHEMV performs the matrix-vector operation */
+
+/* y := alpha*A*x + beta*y, */
+
+/* where alpha and beta are scalars, x and y are n element vectors and */
+/* A is an n by n hermitian matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array A is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of A */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of A */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX*16 . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX*16 array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array A must contain the upper */
+/* triangular part of the hermitian matrix and the strictly */
+/* lower triangular part of A is not referenced. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array A must contain the lower */
+/* triangular part of the hermitian matrix and the strictly */
+/* upper triangular part of A is not referenced. */
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set and are assumed to be zero. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+/* X - COMPLEX*16 array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* BETA - COMPLEX*16 . */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then Y need not be set on input. */
+/* Unchanged on exit. */
+
+/* Y - COMPLEX*16 array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. On exit, Y is overwritten by the updated */
+/* vector y. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*lda < max(1,*n)) {
+ info = 5;
+ } else if (*incx == 0) {
+ info = 7;
+ } else if (*incy == 0) {
+ info = 10;
+ }
+ if (info != 0) {
+ xerbla_("ZHEMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || alpha->r == 0. && alpha->i == 0. && (beta->r == 1. &&
+ beta->i == 0.)) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y. */
+
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through the triangular part */
+/* of A. */
+
+/* First form y := beta*y. */
+
+ if (beta->r != 1. || beta->i != 0.) {
+ if (*incy == 1) {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ y[i__2].r = 0., y[i__2].i = 0.;
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ y[i__2].r = 0., y[i__2].i = 0.;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = iy;
+ z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (alpha->r == 0. && alpha->i == 0.) {
+ return 0;
+ }
+ if (lsame_(uplo, "U")) {
+
+/* Form y when A is stored in upper triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = i__;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i =
+ z__3.r * x[i__3].i + z__3.i * x[i__3].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L50: */
+ }
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ d__1 = a[i__4].r;
+ z__3.r = d__1 * temp1.r, z__3.i = d__1 * temp1.i;
+ z__2.r = y[i__3].r + z__3.r, z__2.i = y[i__3].i + z__3.i;
+ z__4.r = alpha->r * temp2.r - alpha->i * temp2.i, z__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ ix = kx;
+ iy = ky;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = ix;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i =
+ z__3.r * x[i__3].i + z__3.i * x[i__3].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ i__2 = jy;
+ i__3 = jy;
+ i__4 = j + j * a_dim1;
+ d__1 = a[i__4].r;
+ z__3.r = d__1 * temp1.r, z__3.i = d__1 * temp1.i;
+ z__2.r = y[i__3].r + z__3.r, z__2.i = y[i__3].i + z__3.i;
+ z__4.r = alpha->r * temp2.r - alpha->i * temp2.i, z__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ jx += *incx;
+ jy += *incy;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y when A is stored in lower triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ d__1 = a[i__4].r;
+ z__2.r = d__1 * temp1.r, z__2.i = d__1 * temp1.i;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = i__;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i =
+ z__3.r * x[i__3].i + z__3.i * x[i__3].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L90: */
+ }
+ i__2 = j;
+ i__3 = j;
+ z__2.r = alpha->r * temp2.r - alpha->i * temp2.i, z__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ i__2 = jy;
+ i__3 = jy;
+ i__4 = j + j * a_dim1;
+ d__1 = a[i__4].r;
+ z__2.r = d__1 * temp1.r, z__2.i = d__1 * temp1.i;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ ix = jx;
+ iy = jy;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ iy += *incy;
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = ix;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i =
+ z__3.r * x[i__3].i + z__3.i * x[i__3].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L110: */
+ }
+ i__2 = jy;
+ i__3 = jy;
+ z__2.r = alpha->r * temp2.r - alpha->i * temp2.i, z__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ jx += *incx;
+ jy += *incy;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZHEMV . */
+
+} /* zhemv_ */
diff --git a/contrib/libs/cblas/zher.c b/contrib/libs/cblas/zher.c
new file mode 100644
index 0000000000..1aedcc6ded
--- /dev/null
+++ b/contrib/libs/cblas/zher.c
@@ -0,0 +1,338 @@
+/* zher.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zher_(char *uplo, integer *n, doublereal *alpha,
+ doublecomplex *x, integer *incx, doublecomplex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, ix, jx, kx, info;
+ doublecomplex temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHER performs the hermitian rank 1 operation */
+
+/* A := alpha*x*conjg( x' ) + A, */
+
+/* where alpha is a real scalar, x is an n element vector and A is an */
+/* n by n hermitian matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array A is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of A */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of A */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - DOUBLE PRECISION. */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X - COMPLEX*16 array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX*16 array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array A must contain the upper */
+/* triangular part of the hermitian matrix and the strictly */
+/* lower triangular part of A is not referenced. On exit, the */
+/* upper triangular part of the array A is overwritten by the */
+/* upper triangular part of the updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array A must contain the lower */
+/* triangular part of the hermitian matrix and the strictly */
+/* upper triangular part of A is not referenced. On exit, the */
+/* lower triangular part of the array A is overwritten by the */
+/* lower triangular part of the updated matrix. */
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set, they are assumed to be zero, and on exit they */
+/* are set to zero. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --x;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*lda < max(1,*n)) {
+ info = 7;
+ }
+ if (info != 0) {
+ xerbla_("ZHER ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || *alpha == 0.) {
+ return 0;
+ }
+
+/* Set the start point in X if the increment is not unity. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through the triangular part */
+/* of A. */
+
+ if (lsame_(uplo, "U")) {
+
+/* Form A when A is stored in upper triangle. */
+
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ d_cnjg(&z__2, &x[j]);
+ z__1.r = *alpha * z__2.r, z__1.i = *alpha * z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ z__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i +
+ z__2.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L10: */
+ }
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = j;
+ z__1.r = x[i__4].r * temp.r - x[i__4].i * temp.i, z__1.i =
+ x[i__4].r * temp.i + x[i__4].i * temp.r;
+ d__1 = a[i__3].r + z__1.r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ d_cnjg(&z__2, &x[jx]);
+ z__1.r = *alpha * z__2.r, z__1.i = *alpha * z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix = kx;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ z__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i +
+ z__2.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = jx;
+ z__1.r = x[i__4].r * temp.r - x[i__4].i * temp.i, z__1.i =
+ x[i__4].r * temp.i + x[i__4].i * temp.r;
+ d__1 = a[i__3].r + z__1.r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ }
+ jx += *incx;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when A is stored in lower triangle. */
+
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ d_cnjg(&z__2, &x[j]);
+ z__1.r = *alpha * z__2.r, z__1.i = *alpha * z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = j;
+ z__1.r = temp.r * x[i__4].r - temp.i * x[i__4].i, z__1.i =
+ temp.r * x[i__4].i + temp.i * x[i__4].r;
+ d__1 = a[i__3].r + z__1.r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ z__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i +
+ z__2.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L50: */
+ }
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ }
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ d_cnjg(&z__2, &x[jx]);
+ z__1.r = *alpha * z__2.r, z__1.i = *alpha * z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = jx;
+ z__1.r = temp.r * x[i__4].r - temp.i * x[i__4].i, z__1.i =
+ temp.r * x[i__4].i + temp.i * x[i__4].r;
+ d__1 = a[i__3].r + z__1.r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ ix = jx;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ z__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i +
+ z__2.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L70: */
+ }
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZHER . */
+
+} /* zher_ */
diff --git a/contrib/libs/cblas/zher2.c b/contrib/libs/cblas/zher2.c
new file mode 100644
index 0000000000..27b31bce09
--- /dev/null
+++ b/contrib/libs/cblas/zher2.c
@@ -0,0 +1,447 @@
+/* zher2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zher2_(char *uplo, integer *n, doublecomplex *alpha,
+ doublecomplex *x, integer *incx, doublecomplex *y, integer *incy,
+ doublecomplex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ doublecomplex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHER2 performs the hermitian rank 2 operation */
+
+/* A := alpha*x*conjg( y' ) + conjg( alpha )*y*conjg( x' ) + A, */
+
+/* where alpha is a scalar, x and y are n element vectors and A is an n */
+/* by n hermitian matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array A is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of A */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of A */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX*16 . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X - COMPLEX*16 array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* Y - COMPLEX*16 array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. */
+/* Unchanged on exit. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX*16 array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array A must contain the upper */
+/* triangular part of the hermitian matrix and the strictly */
+/* lower triangular part of A is not referenced. On exit, the */
+/* upper triangular part of the array A is overwritten by the */
+/* upper triangular part of the updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array A must contain the lower */
+/* triangular part of the hermitian matrix and the strictly */
+/* upper triangular part of A is not referenced. On exit, the */
+/* lower triangular part of the array A is overwritten by the */
+/* lower triangular part of the updated matrix. */
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set, they are assumed to be zero, and on exit they */
+/* are set to zero. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --x;
+ --y;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ } else if (*lda < max(1,*n)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("ZHER2 ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || alpha->r == 0. && alpha->i == 0.) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y if the increments are not both */
+/* unity. */
+
+ if (*incx != 1 || *incy != 1) {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+ jx = kx;
+ jy = ky;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through the triangular part */
+/* of A. */
+
+ if (lsame_(uplo, "U")) {
+
+/* Form A when A is stored in the upper triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ i__3 = j;
+ if (x[i__2].r != 0. || x[i__2].i != 0. || (y[i__3].r != 0. ||
+ y[i__3].i != 0.)) {
+ d_cnjg(&z__2, &y[j]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
+ alpha->r * z__2.i + alpha->i * z__2.r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__2 = j;
+ z__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ d_cnjg(&z__1, &z__2);
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ z__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ z__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ z__2.r = a[i__4].r + z__3.r, z__2.i = a[i__4].i +
+ z__3.i;
+ i__6 = i__;
+ z__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ z__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L10: */
+ }
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = j;
+ z__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ z__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = j;
+ z__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ z__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ d__1 = a[i__3].r + z__1.r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ i__3 = jy;
+ if (x[i__2].r != 0. || x[i__2].i != 0. || (y[i__3].r != 0. ||
+ y[i__3].i != 0.)) {
+ d_cnjg(&z__2, &y[jy]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
+ alpha->r * z__2.i + alpha->i * z__2.r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__2 = jx;
+ z__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ d_cnjg(&z__1, &z__2);
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ ix = kx;
+ iy = ky;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ z__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ z__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ z__2.r = a[i__4].r + z__3.r, z__2.i = a[i__4].i +
+ z__3.i;
+ i__6 = iy;
+ z__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ z__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L30: */
+ }
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = jx;
+ z__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ z__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = jy;
+ z__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ z__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ d__1 = a[i__3].r + z__1.r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ }
+ jx += *incx;
+ jy += *incy;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when A is stored in the lower triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ i__3 = j;
+ if (x[i__2].r != 0. || x[i__2].i != 0. || (y[i__3].r != 0. ||
+ y[i__3].i != 0.)) {
+ d_cnjg(&z__2, &y[j]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
+ alpha->r * z__2.i + alpha->i * z__2.r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__2 = j;
+ z__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ d_cnjg(&z__1, &z__2);
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = j;
+ z__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ z__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = j;
+ z__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ z__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ d__1 = a[i__3].r + z__1.r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ z__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ z__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ z__2.r = a[i__4].r + z__3.r, z__2.i = a[i__4].i +
+ z__3.i;
+ i__6 = i__;
+ z__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ z__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L50: */
+ }
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ i__3 = jy;
+ if (x[i__2].r != 0. || x[i__2].i != 0. || (y[i__3].r != 0. ||
+ y[i__3].i != 0.)) {
+ d_cnjg(&z__2, &y[jy]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
+ alpha->r * z__2.i + alpha->i * z__2.r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__2 = jx;
+ z__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ d_cnjg(&z__1, &z__2);
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = jx;
+ z__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ z__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = jy;
+ z__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ z__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ d__1 = a[i__3].r + z__1.r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ ix = jx;
+ iy = jy;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ iy += *incy;
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ z__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ z__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ z__2.r = a[i__4].r + z__3.r, z__2.i = a[i__4].i +
+ z__3.i;
+ i__6 = iy;
+ z__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ z__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L70: */
+ }
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ }
+ jx += *incx;
+ jy += *incy;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZHER2 . */
+
+} /* zher2_ */
diff --git a/contrib/libs/cblas/zher2k.c b/contrib/libs/cblas/zher2k.c
new file mode 100644
index 0000000000..efe4ed6d4f
--- /dev/null
+++ b/contrib/libs/cblas/zher2k.c
@@ -0,0 +1,671 @@
+/* zher2k.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zher2k_(char *uplo, char *trans, integer *n, integer *k,
+ doublecomplex *alpha, doublecomplex *a, integer *lda, doublecomplex *
+ b, integer *ldb, doublereal *beta, doublecomplex *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4, i__5, i__6, i__7;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3, z__4, z__5, z__6;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, l, info;
+ doublecomplex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHER2K performs one of the hermitian rank 2k operations */
+
+/* C := alpha*A*conjg( B' ) + conjg( alpha )*B*conjg( A' ) + beta*C, */
+
+/* or */
+
+/* C := alpha*conjg( A' )*B + conjg( alpha )*conjg( B' )*A + beta*C, */
+
+/* where alpha and beta are scalars with beta real, C is an n by n */
+/* hermitian matrix and A and B are n by k matrices in the first case */
+/* and k by n matrices in the second case. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array C is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of C */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of C */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' C := alpha*A*conjg( B' ) + */
+/* conjg( alpha )*B*conjg( A' ) + */
+/* beta*C. */
+
+/* TRANS = 'C' or 'c' C := alpha*conjg( A' )*B + */
+/* conjg( alpha )*conjg( B' )*A + */
+/* beta*C. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix C. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry with TRANS = 'N' or 'n', K specifies the number */
+/* of columns of the matrices A and B, and on entry with */
+/* TRANS = 'C' or 'c', K specifies the number of rows of the */
+/* matrices A and B. K must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX*16 . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX*16 array of DIMENSION ( LDA, ka ), where ka is */
+/* k when TRANS = 'N' or 'n', and is n otherwise. */
+/* Before entry with TRANS = 'N' or 'n', the leading n by k */
+/* part of the array A must contain the matrix A, otherwise */
+/* the leading k by n part of the array A must contain the */
+/* matrix A. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When TRANS = 'N' or 'n' */
+/* then LDA must be at least max( 1, n ), otherwise LDA must */
+/* be at least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* B - COMPLEX*16 array of DIMENSION ( LDB, kb ), where kb is */
+/* k when TRANS = 'N' or 'n', and is n otherwise. */
+/* Before entry with TRANS = 'N' or 'n', the leading n by k */
+/* part of the array B must contain the matrix B, otherwise */
+/* the leading k by n part of the array B must contain the */
+/* matrix B. */
+/* Unchanged on exit. */
+
+/* LDB - INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. When TRANS = 'N' or 'n' */
+/* then LDB must be at least max( 1, n ), otherwise LDB must */
+/* be at least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* BETA - DOUBLE PRECISION . */
+/* On entry, BETA specifies the scalar beta. */
+/* Unchanged on exit. */
+
+/* C - COMPLEX*16 array of DIMENSION ( LDC, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array C must contain the upper */
+/* triangular part of the hermitian matrix and the strictly */
+/* lower triangular part of C is not referenced. On exit, the */
+/* upper triangular part of the array C is overwritten by the */
+/* upper triangular part of the updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array C must contain the lower */
+/* triangular part of the hermitian matrix and the strictly */
+/* upper triangular part of C is not referenced. On exit, the */
+/* lower triangular part of the array C is overwritten by the */
+/* lower triangular part of the updated matrix. */
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set, they are assumed to be zero, and on exit they */
+/* are set to zero. */
+
+/* LDC - INTEGER. */
+/* On entry, LDC specifies the first dimension of C as declared */
+/* in the calling (sub) program. LDC must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+/* -- Modified 8-Nov-93 to set C(J,J) to DBLE( C(J,J) ) when BETA = 1. */
+/* Ed Anderson, Cray Research Inc. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(trans, "N")) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! upper && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "C")) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*k < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldb < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldc < max(1,*n)) {
+ info = 12;
+ }
+ if (info != 0) {
+ xerbla_("ZHER2K", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || (alpha->r == 0. && alpha->i == 0. || *k == 0) && *beta ==
+ 1.) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (alpha->r == 0. && alpha->i == 0.) {
+ if (upper) {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L30: */
+ }
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+/* L40: */
+ }
+ }
+ } else {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form C := alpha*A*conjg( B' ) + conjg( alpha )*B*conjg( A' ) + */
+/* C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L90: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L100: */
+ }
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ i__4 = j + l * b_dim1;
+ if (a[i__3].r != 0. || a[i__3].i != 0. || (b[i__4].r !=
+ 0. || b[i__4].i != 0.)) {
+ d_cnjg(&z__2, &b[j + l * b_dim1]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i,
+ z__1.i = alpha->r * z__2.i + alpha->i *
+ z__2.r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__3 = j + l * a_dim1;
+ z__2.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ z__2.i = alpha->r * a[i__3].i + alpha->i * a[
+ i__3].r;
+ d_cnjg(&z__1, &z__2);
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ z__3.r = a[i__6].r * temp1.r - a[i__6].i *
+ temp1.i, z__3.i = a[i__6].r * temp1.i + a[
+ i__6].i * temp1.r;
+ z__2.r = c__[i__5].r + z__3.r, z__2.i = c__[i__5]
+ .i + z__3.i;
+ i__7 = i__ + l * b_dim1;
+ z__4.r = b[i__7].r * temp2.r - b[i__7].i *
+ temp2.i, z__4.i = b[i__7].r * temp2.i + b[
+ i__7].i * temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i +
+ z__4.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L110: */
+ }
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ i__5 = j + l * a_dim1;
+ z__2.r = a[i__5].r * temp1.r - a[i__5].i * temp1.i,
+ z__2.i = a[i__5].r * temp1.i + a[i__5].i *
+ temp1.r;
+ i__6 = j + l * b_dim1;
+ z__3.r = b[i__6].r * temp2.r - b[i__6].i * temp2.i,
+ z__3.i = b[i__6].r * temp2.i + b[i__6].i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ d__1 = c__[i__4].r + z__1.r;
+ c__[i__3].r = d__1, c__[i__3].i = 0.;
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L140: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L150: */
+ }
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ i__4 = j + l * b_dim1;
+ if (a[i__3].r != 0. || a[i__3].i != 0. || (b[i__4].r !=
+ 0. || b[i__4].i != 0.)) {
+ d_cnjg(&z__2, &b[j + l * b_dim1]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i,
+ z__1.i = alpha->r * z__2.i + alpha->i *
+ z__2.r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__3 = j + l * a_dim1;
+ z__2.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ z__2.i = alpha->r * a[i__3].i + alpha->i * a[
+ i__3].r;
+ d_cnjg(&z__1, &z__2);
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ i__3 = *n;
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ z__3.r = a[i__6].r * temp1.r - a[i__6].i *
+ temp1.i, z__3.i = a[i__6].r * temp1.i + a[
+ i__6].i * temp1.r;
+ z__2.r = c__[i__5].r + z__3.r, z__2.i = c__[i__5]
+ .i + z__3.i;
+ i__7 = i__ + l * b_dim1;
+ z__4.r = b[i__7].r * temp2.r - b[i__7].i *
+ temp2.i, z__4.i = b[i__7].r * temp2.i + b[
+ i__7].i * temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i +
+ z__4.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L160: */
+ }
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ i__5 = j + l * a_dim1;
+ z__2.r = a[i__5].r * temp1.r - a[i__5].i * temp1.i,
+ z__2.i = a[i__5].r * temp1.i + a[i__5].i *
+ temp1.r;
+ i__6 = j + l * b_dim1;
+ z__3.r = b[i__6].r * temp2.r - b[i__6].i * temp2.i,
+ z__3.i = b[i__6].r * temp2.i + b[i__6].i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ d__1 = c__[i__4].r + z__1.r;
+ c__[i__3].r = d__1, c__[i__3].i = 0.;
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*conjg( A' )*B + conjg( alpha )*conjg( B' )*A + */
+/* C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp1.r = 0., temp1.i = 0.;
+ temp2.r = 0., temp2.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ d_cnjg(&z__3, &a[l + i__ * a_dim1]);
+ i__4 = l + j * b_dim1;
+ z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4].i,
+ z__2.i = z__3.r * b[i__4].i + z__3.i * b[i__4]
+ .r;
+ z__1.r = temp1.r + z__2.r, z__1.i = temp1.i + z__2.i;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ d_cnjg(&z__3, &b[l + i__ * b_dim1]);
+ i__4 = l + j * a_dim1;
+ z__2.r = z__3.r * a[i__4].r - z__3.i * a[i__4].i,
+ z__2.i = z__3.r * a[i__4].i + z__3.i * a[i__4]
+ .r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L190: */
+ }
+ if (i__ == j) {
+ if (*beta == 0.) {
+ i__3 = j + j * c_dim1;
+ z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ d_cnjg(&z__4, alpha);
+ z__3.r = z__4.r * temp2.r - z__4.i * temp2.i,
+ z__3.i = z__4.r * temp2.i + z__4.i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ d__1 = z__1.r;
+ c__[i__3].r = d__1, c__[i__3].i = 0.;
+ } else {
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ d_cnjg(&z__4, alpha);
+ z__3.r = z__4.r * temp2.r - z__4.i * temp2.i,
+ z__3.i = z__4.r * temp2.i + z__4.i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ d__1 = *beta * c__[i__4].r + z__1.r;
+ c__[i__3].r = d__1, c__[i__3].i = 0.;
+ }
+ } else {
+ if (*beta == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ d_cnjg(&z__4, alpha);
+ z__3.r = z__4.r * temp2.r - z__4.i * temp2.i,
+ z__3.i = z__4.r * temp2.i + z__4.i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = *beta * c__[i__4].r, z__3.i = *beta *
+ c__[i__4].i;
+ z__4.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__4.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ z__2.r = z__3.r + z__4.r, z__2.i = z__3.i +
+ z__4.i;
+ d_cnjg(&z__6, alpha);
+ z__5.r = z__6.r * temp2.r - z__6.i * temp2.i,
+ z__5.i = z__6.r * temp2.i + z__6.i *
+ temp2.r;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i +
+ z__5.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ temp1.r = 0., temp1.i = 0.;
+ temp2.r = 0., temp2.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ d_cnjg(&z__3, &a[l + i__ * a_dim1]);
+ i__4 = l + j * b_dim1;
+ z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4].i,
+ z__2.i = z__3.r * b[i__4].i + z__3.i * b[i__4]
+ .r;
+ z__1.r = temp1.r + z__2.r, z__1.i = temp1.i + z__2.i;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ d_cnjg(&z__3, &b[l + i__ * b_dim1]);
+ i__4 = l + j * a_dim1;
+ z__2.r = z__3.r * a[i__4].r - z__3.i * a[i__4].i,
+ z__2.i = z__3.r * a[i__4].i + z__3.i * a[i__4]
+ .r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L220: */
+ }
+ if (i__ == j) {
+ if (*beta == 0.) {
+ i__3 = j + j * c_dim1;
+ z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ d_cnjg(&z__4, alpha);
+ z__3.r = z__4.r * temp2.r - z__4.i * temp2.i,
+ z__3.i = z__4.r * temp2.i + z__4.i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ d__1 = z__1.r;
+ c__[i__3].r = d__1, c__[i__3].i = 0.;
+ } else {
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ d_cnjg(&z__4, alpha);
+ z__3.r = z__4.r * temp2.r - z__4.i * temp2.i,
+ z__3.i = z__4.r * temp2.i + z__4.i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ d__1 = *beta * c__[i__4].r + z__1.r;
+ c__[i__3].r = d__1, c__[i__3].i = 0.;
+ }
+ } else {
+ if (*beta == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ d_cnjg(&z__4, alpha);
+ z__3.r = z__4.r * temp2.r - z__4.i * temp2.i,
+ z__3.i = z__4.r * temp2.i + z__4.i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = *beta * c__[i__4].r, z__3.i = *beta *
+ c__[i__4].i;
+ z__4.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__4.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ z__2.r = z__3.r + z__4.r, z__2.i = z__3.i +
+ z__4.i;
+ d_cnjg(&z__6, alpha);
+ z__5.r = z__6.r * temp2.r - z__6.i * temp2.i,
+ z__5.i = z__6.r * temp2.i + z__6.i *
+ temp2.r;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i +
+ z__5.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+ }
+/* L230: */
+ }
+/* L240: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZHER2K. */
+
+} /* zher2k_ */
diff --git a/contrib/libs/cblas/zherk.c b/contrib/libs/cblas/zherk.c
new file mode 100644
index 0000000000..611ebb0921
--- /dev/null
+++ b/contrib/libs/cblas/zherk.c
@@ -0,0 +1,533 @@
+/* zherk.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zherk_(char *uplo, char *trans, integer *n, integer *k,
+ doublereal *alpha, doublecomplex *a, integer *lda, doublereal *beta,
+ doublecomplex *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, l, info;
+ doublecomplex temp;
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ doublereal rtemp;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHERK performs one of the hermitian rank k operations */
+
+/* C := alpha*A*conjg( A' ) + beta*C, */
+
+/* or */
+
+/* C := alpha*conjg( A' )*A + beta*C, */
+
+/* where alpha and beta are real scalars, C is an n by n hermitian */
+/* matrix and A is an n by k matrix in the first case and a k by n */
+/* matrix in the second case. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array C is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of C */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of C */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' C := alpha*A*conjg( A' ) + beta*C. */
+
+/* TRANS = 'C' or 'c' C := alpha*conjg( A' )*A + beta*C. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix C. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry with TRANS = 'N' or 'n', K specifies the number */
+/* of columns of the matrix A, and on entry with */
+/* TRANS = 'C' or 'c', K specifies the number of rows of the */
+/* matrix A. K must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - DOUBLE PRECISION . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX*16 array of DIMENSION ( LDA, ka ), where ka is */
+/* k when TRANS = 'N' or 'n', and is n otherwise. */
+/* Before entry with TRANS = 'N' or 'n', the leading n by k */
+/* part of the array A must contain the matrix A, otherwise */
+/* the leading k by n part of the array A must contain the */
+/* matrix A. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When TRANS = 'N' or 'n' */
+/* then LDA must be at least max( 1, n ), otherwise LDA must */
+/* be at least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* BETA - DOUBLE PRECISION. */
+/* On entry, BETA specifies the scalar beta. */
+/* Unchanged on exit. */
+
+/* C - COMPLEX*16 array of DIMENSION ( LDC, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array C must contain the upper */
+/* triangular part of the hermitian matrix and the strictly */
+/* lower triangular part of C is not referenced. On exit, the */
+/* upper triangular part of the array C is overwritten by the */
+/* upper triangular part of the updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array C must contain the lower */
+/* triangular part of the hermitian matrix and the strictly */
+/* upper triangular part of C is not referenced. On exit, the */
+/* lower triangular part of the array C is overwritten by the */
+/* lower triangular part of the updated matrix. */
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set, they are assumed to be zero, and on exit they */
+/* are set to zero. */
+
+/* LDC - INTEGER. */
+/* On entry, LDC specifies the first dimension of C as declared */
+/* in the calling (sub) program. LDC must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+/* -- Modified 8-Nov-93 to set C(J,J) to DBLE( C(J,J) ) when BETA = 1. */
+/* Ed Anderson, Cray Research Inc. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(trans, "N")) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! upper && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "C")) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*k < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldc < max(1,*n)) {
+ info = 10;
+ }
+ if (info != 0) {
+ xerbla_("ZHERK ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || (*alpha == 0. || *k == 0) && *beta == 1.) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.) {
+ if (upper) {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L30: */
+ }
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+/* L40: */
+ }
+ }
+ } else {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form C := alpha*A*conjg( A' ) + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L90: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L100: */
+ }
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ if (a[i__3].r != 0. || a[i__3].i != 0.) {
+ d_cnjg(&z__2, &a[j + l * a_dim1]);
+ z__1.r = *alpha * z__2.r, z__1.i = *alpha * z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ z__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5]
+ .i + z__2.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L110: */
+ }
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ i__5 = i__ + l * a_dim1;
+ z__1.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__1.i = temp.r * a[i__5].i + temp.i * a[i__5]
+ .r;
+ d__1 = c__[i__4].r + z__1.r;
+ c__[i__3].r = d__1, c__[i__3].i = 0.;
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L140: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L150: */
+ }
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ if (a[i__3].r != 0. || a[i__3].i != 0.) {
+ d_cnjg(&z__2, &a[j + l * a_dim1]);
+ z__1.r = *alpha * z__2.r, z__1.i = *alpha * z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ i__5 = j + l * a_dim1;
+ z__1.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__1.i = temp.r * a[i__5].i + temp.i * a[i__5]
+ .r;
+ d__1 = c__[i__4].r + z__1.r;
+ c__[i__3].r = d__1, c__[i__3].i = 0.;
+ i__3 = *n;
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ z__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5]
+ .i + z__2.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L160: */
+ }
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*conjg( A' )*A + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ d_cnjg(&z__3, &a[l + i__ * a_dim1]);
+ i__4 = l + j * a_dim1;
+ z__2.r = z__3.r * a[i__4].r - z__3.i * a[i__4].i,
+ z__2.i = z__3.r * a[i__4].i + z__3.i * a[i__4]
+ .r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L190: */
+ }
+ if (*beta == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = *alpha * temp.r, z__1.i = *alpha * temp.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = *alpha * temp.r, z__2.i = *alpha * temp.i;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = *beta * c__[i__4].r, z__3.i = *beta * c__[
+ i__4].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L200: */
+ }
+ rtemp = 0.;
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ d_cnjg(&z__3, &a[l + j * a_dim1]);
+ i__3 = l + j * a_dim1;
+ z__2.r = z__3.r * a[i__3].r - z__3.i * a[i__3].i, z__2.i =
+ z__3.r * a[i__3].i + z__3.i * a[i__3].r;
+ z__1.r = rtemp + z__2.r, z__1.i = z__2.i;
+ rtemp = z__1.r;
+/* L210: */
+ }
+ if (*beta == 0.) {
+ i__2 = j + j * c_dim1;
+ d__1 = *alpha * rtemp;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *alpha * rtemp + *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ }
+/* L220: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ rtemp = 0.;
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ d_cnjg(&z__3, &a[l + j * a_dim1]);
+ i__3 = l + j * a_dim1;
+ z__2.r = z__3.r * a[i__3].r - z__3.i * a[i__3].i, z__2.i =
+ z__3.r * a[i__3].i + z__3.i * a[i__3].r;
+ z__1.r = rtemp + z__2.r, z__1.i = z__2.i;
+ rtemp = z__1.r;
+/* L230: */
+ }
+ if (*beta == 0.) {
+ i__2 = j + j * c_dim1;
+ d__1 = *alpha * rtemp;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *alpha * rtemp + *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ d_cnjg(&z__3, &a[l + i__ * a_dim1]);
+ i__4 = l + j * a_dim1;
+ z__2.r = z__3.r * a[i__4].r - z__3.i * a[i__4].i,
+ z__2.i = z__3.r * a[i__4].i + z__3.i * a[i__4]
+ .r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L240: */
+ }
+ if (*beta == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = *alpha * temp.r, z__1.i = *alpha * temp.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = *alpha * temp.r, z__2.i = *alpha * temp.i;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = *beta * c__[i__4].r, z__3.i = *beta * c__[
+ i__4].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L250: */
+ }
+/* L260: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZHERK . */
+
+} /* zherk_ */
diff --git a/contrib/libs/cblas/zhpmv.c b/contrib/libs/cblas/zhpmv.c
new file mode 100644
index 0000000000..c631c86d68
--- /dev/null
+++ b/contrib/libs/cblas/zhpmv.c
@@ -0,0 +1,434 @@
+/* zhpmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zhpmv_(char *uplo, integer *n, doublecomplex *alpha,
+ doublecomplex *ap, doublecomplex *x, integer *incx, doublecomplex *
+ beta, doublecomplex *y, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k, kk, ix, iy, jx, jy, kx, ky, info;
+ doublecomplex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHPMV performs the matrix-vector operation */
+
+/* y := alpha*A*x + beta*y, */
+
+/* where alpha and beta are scalars, x and y are n element vectors and */
+/* A is an n by n hermitian matrix, supplied in packed form. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the matrix A is supplied in the packed */
+/* array AP as follows: */
+
+/* UPLO = 'U' or 'u' The upper triangular part of A is */
+/* supplied in AP. */
+
+/* UPLO = 'L' or 'l' The lower triangular part of A is */
+/* supplied in AP. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX*16 . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* AP - COMPLEX*16 array of DIMENSION at least */
+/* ( ( n*( n + 1 ) )/2 ). */
+/* Before entry with UPLO = 'U' or 'u', the array AP must */
+/* contain the upper triangular part of the hermitian matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 ) */
+/* and a( 2, 2 ) respectively, and so on. */
+/* Before entry with UPLO = 'L' or 'l', the array AP must */
+/* contain the lower triangular part of the hermitian matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 ) */
+/* and a( 3, 1 ) respectively, and so on. */
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set and are assumed to be zero. */
+/* Unchanged on exit. */
+
+/* X - COMPLEX*16 array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* BETA - COMPLEX*16 . */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then Y need not be set on input. */
+/* Unchanged on exit. */
+
+/* Y - COMPLEX*16 array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. On exit, Y is overwritten by the updated */
+/* vector y. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --y;
+ --x;
+ --ap;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 6;
+ } else if (*incy == 0) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("ZHPMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || alpha->r == 0. && alpha->i == 0. && (beta->r == 1. &&
+ beta->i == 0.)) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y. */
+
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+
+/* Start the operations. In this version the elements of the array AP */
+/* are accessed sequentially with one pass through AP. */
+
+/* First form y := beta*y. */
+
+ if (beta->r != 1. || beta->i != 0.) {
+ if (*incy == 1) {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ y[i__2].r = 0., y[i__2].i = 0.;
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ y[i__2].r = 0., y[i__2].i = 0.;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = iy;
+ z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (alpha->r == 0. && alpha->i == 0.) {
+ return 0;
+ }
+ kk = 1;
+ if (lsame_(uplo, "U")) {
+
+/* Form y when AP contains the upper triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ k = kk;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = k;
+ z__2.r = temp1.r * ap[i__5].r - temp1.i * ap[i__5].i,
+ z__2.i = temp1.r * ap[i__5].i + temp1.i * ap[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ d_cnjg(&z__3, &ap[k]);
+ i__3 = i__;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i =
+ z__3.r * x[i__3].i + z__3.i * x[i__3].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ ++k;
+/* L50: */
+ }
+ i__2 = j;
+ i__3 = j;
+ i__4 = kk + j - 1;
+ d__1 = ap[i__4].r;
+ z__3.r = d__1 * temp1.r, z__3.i = d__1 * temp1.i;
+ z__2.r = y[i__3].r + z__3.r, z__2.i = y[i__3].i + z__3.i;
+ z__4.r = alpha->r * temp2.r - alpha->i * temp2.i, z__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ kk += j;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ ix = kx;
+ iy = ky;
+ i__2 = kk + j - 2;
+ for (k = kk; k <= i__2; ++k) {
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = k;
+ z__2.r = temp1.r * ap[i__5].r - temp1.i * ap[i__5].i,
+ z__2.i = temp1.r * ap[i__5].i + temp1.i * ap[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ d_cnjg(&z__3, &ap[k]);
+ i__3 = ix;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i =
+ z__3.r * x[i__3].i + z__3.i * x[i__3].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ i__2 = jy;
+ i__3 = jy;
+ i__4 = kk + j - 1;
+ d__1 = ap[i__4].r;
+ z__3.r = d__1 * temp1.r, z__3.i = d__1 * temp1.i;
+ z__2.r = y[i__3].r + z__3.r, z__2.i = y[i__3].i + z__3.i;
+ z__4.r = alpha->r * temp2.r - alpha->i * temp2.i, z__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ jx += *incx;
+ jy += *incy;
+ kk += j;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y when AP contains the lower triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ i__2 = j;
+ i__3 = j;
+ i__4 = kk;
+ d__1 = ap[i__4].r;
+ z__2.r = d__1 * temp1.r, z__2.i = d__1 * temp1.i;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ k = kk + 1;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = k;
+ z__2.r = temp1.r * ap[i__5].r - temp1.i * ap[i__5].i,
+ z__2.i = temp1.r * ap[i__5].i + temp1.i * ap[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ d_cnjg(&z__3, &ap[k]);
+ i__3 = i__;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i =
+ z__3.r * x[i__3].i + z__3.i * x[i__3].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ ++k;
+/* L90: */
+ }
+ i__2 = j;
+ i__3 = j;
+ z__2.r = alpha->r * temp2.r - alpha->i * temp2.i, z__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ kk += *n - j + 1;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ i__2 = jy;
+ i__3 = jy;
+ i__4 = kk;
+ d__1 = ap[i__4].r;
+ z__2.r = d__1 * temp1.r, z__2.i = d__1 * temp1.i;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ ix = jx;
+ iy = jy;
+ i__2 = kk + *n - j;
+ for (k = kk + 1; k <= i__2; ++k) {
+ ix += *incx;
+ iy += *incy;
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = k;
+ z__2.r = temp1.r * ap[i__5].r - temp1.i * ap[i__5].i,
+ z__2.i = temp1.r * ap[i__5].i + temp1.i * ap[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ d_cnjg(&z__3, &ap[k]);
+ i__3 = ix;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i =
+ z__3.r * x[i__3].i + z__3.i * x[i__3].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L110: */
+ }
+ i__2 = jy;
+ i__3 = jy;
+ z__2.r = alpha->r * temp2.r - alpha->i * temp2.i, z__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ jx += *incx;
+ jy += *incy;
+ kk += *n - j + 1;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZHPMV . */
+
+} /* zhpmv_ */
diff --git a/contrib/libs/cblas/zhpr.c b/contrib/libs/cblas/zhpr.c
new file mode 100644
index 0000000000..a2c189bbff
--- /dev/null
+++ b/contrib/libs/cblas/zhpr.c
@@ -0,0 +1,339 @@
+/* zhpr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zhpr_(char *uplo, integer *n, doublereal *alpha,
+ doublecomplex *x, integer *incx, doublecomplex *ap)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k, kk, ix, jx, kx, info;
+ doublecomplex temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHPR performs the hermitian rank 1 operation */
+
+/* A := alpha*x*conjg( x' ) + A, */
+
+/* where alpha is a real scalar, x is an n element vector and A is an */
+/* n by n hermitian matrix, supplied in packed form. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the matrix A is supplied in the packed */
+/* array AP as follows: */
+
+/* UPLO = 'U' or 'u' The upper triangular part of A is */
+/* supplied in AP. */
+
+/* UPLO = 'L' or 'l' The lower triangular part of A is */
+/* supplied in AP. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - DOUBLE PRECISION. */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X - COMPLEX*16 array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* AP - COMPLEX*16 array of DIMENSION at least */
+/* ( ( n*( n + 1 ) )/2 ). */
+/* Before entry with UPLO = 'U' or 'u', the array AP must */
+/* contain the upper triangular part of the hermitian matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 ) */
+/* and a( 2, 2 ) respectively, and so on. On exit, the array */
+/* AP is overwritten by the upper triangular part of the */
+/* updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the array AP must */
+/* contain the lower triangular part of the hermitian matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 ) */
+/* and a( 3, 1 ) respectively, and so on. On exit, the array */
+/* AP is overwritten by the lower triangular part of the */
+/* updated matrix. */
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set, they are assumed to be zero, and on exit they */
+/* are set to zero. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ }
+ if (info != 0) {
+ xerbla_("ZHPR ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || *alpha == 0.) {
+ return 0;
+ }
+
+/* Set the start point in X if the increment is not unity. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of the array AP */
+/* are accessed sequentially with one pass through AP. */
+
+ kk = 1;
+ if (lsame_(uplo, "U")) {
+
+/* Form A when upper triangle is stored in AP. */
+
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ d_cnjg(&z__2, &x[j]);
+ z__1.r = *alpha * z__2.r, z__1.i = *alpha * z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ k = kk;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = k;
+ i__4 = k;
+ i__5 = i__;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ z__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ z__1.r = ap[i__4].r + z__2.r, z__1.i = ap[i__4].i +
+ z__2.i;
+ ap[i__3].r = z__1.r, ap[i__3].i = z__1.i;
+ ++k;
+/* L10: */
+ }
+ i__2 = kk + j - 1;
+ i__3 = kk + j - 1;
+ i__4 = j;
+ z__1.r = x[i__4].r * temp.r - x[i__4].i * temp.i, z__1.i =
+ x[i__4].r * temp.i + x[i__4].i * temp.r;
+ d__1 = ap[i__3].r + z__1.r;
+ ap[i__2].r = d__1, ap[i__2].i = 0.;
+ } else {
+ i__2 = kk + j - 1;
+ i__3 = kk + j - 1;
+ d__1 = ap[i__3].r;
+ ap[i__2].r = d__1, ap[i__2].i = 0.;
+ }
+ kk += j;
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ d_cnjg(&z__2, &x[jx]);
+ z__1.r = *alpha * z__2.r, z__1.i = *alpha * z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix = kx;
+ i__2 = kk + j - 2;
+ for (k = kk; k <= i__2; ++k) {
+ i__3 = k;
+ i__4 = k;
+ i__5 = ix;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ z__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ z__1.r = ap[i__4].r + z__2.r, z__1.i = ap[i__4].i +
+ z__2.i;
+ ap[i__3].r = z__1.r, ap[i__3].i = z__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ i__2 = kk + j - 1;
+ i__3 = kk + j - 1;
+ i__4 = jx;
+ z__1.r = x[i__4].r * temp.r - x[i__4].i * temp.i, z__1.i =
+ x[i__4].r * temp.i + x[i__4].i * temp.r;
+ d__1 = ap[i__3].r + z__1.r;
+ ap[i__2].r = d__1, ap[i__2].i = 0.;
+ } else {
+ i__2 = kk + j - 1;
+ i__3 = kk + j - 1;
+ d__1 = ap[i__3].r;
+ ap[i__2].r = d__1, ap[i__2].i = 0.;
+ }
+ jx += *incx;
+ kk += j;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when lower triangle is stored in AP. */
+
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ d_cnjg(&z__2, &x[j]);
+ z__1.r = *alpha * z__2.r, z__1.i = *alpha * z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = kk;
+ i__3 = kk;
+ i__4 = j;
+ z__1.r = temp.r * x[i__4].r - temp.i * x[i__4].i, z__1.i =
+ temp.r * x[i__4].i + temp.i * x[i__4].r;
+ d__1 = ap[i__3].r + z__1.r;
+ ap[i__2].r = d__1, ap[i__2].i = 0.;
+ k = kk + 1;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = k;
+ i__4 = k;
+ i__5 = i__;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ z__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ z__1.r = ap[i__4].r + z__2.r, z__1.i = ap[i__4].i +
+ z__2.i;
+ ap[i__3].r = z__1.r, ap[i__3].i = z__1.i;
+ ++k;
+/* L50: */
+ }
+ } else {
+ i__2 = kk;
+ i__3 = kk;
+ d__1 = ap[i__3].r;
+ ap[i__2].r = d__1, ap[i__2].i = 0.;
+ }
+ kk = kk + *n - j + 1;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ d_cnjg(&z__2, &x[jx]);
+ z__1.r = *alpha * z__2.r, z__1.i = *alpha * z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = kk;
+ i__3 = kk;
+ i__4 = jx;
+ z__1.r = temp.r * x[i__4].r - temp.i * x[i__4].i, z__1.i =
+ temp.r * x[i__4].i + temp.i * x[i__4].r;
+ d__1 = ap[i__3].r + z__1.r;
+ ap[i__2].r = d__1, ap[i__2].i = 0.;
+ ix = jx;
+ i__2 = kk + *n - j;
+ for (k = kk + 1; k <= i__2; ++k) {
+ ix += *incx;
+ i__3 = k;
+ i__4 = k;
+ i__5 = ix;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ z__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ z__1.r = ap[i__4].r + z__2.r, z__1.i = ap[i__4].i +
+ z__2.i;
+ ap[i__3].r = z__1.r, ap[i__3].i = z__1.i;
+/* L70: */
+ }
+ } else {
+ i__2 = kk;
+ i__3 = kk;
+ d__1 = ap[i__3].r;
+ ap[i__2].r = d__1, ap[i__2].i = 0.;
+ }
+ jx += *incx;
+ kk = kk + *n - j + 1;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZHPR . */
+
+} /* zhpr_ */
diff --git a/contrib/libs/cblas/zhpr2.c b/contrib/libs/cblas/zhpr2.c
new file mode 100644
index 0000000000..e116ba7dc3
--- /dev/null
+++ b/contrib/libs/cblas/zhpr2.c
@@ -0,0 +1,448 @@
+/* zhpr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zhpr2_(char *uplo, integer *n, doublecomplex *alpha,
+ doublecomplex *x, integer *incx, doublecomplex *y, integer *incy,
+ doublecomplex *ap)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4, i__5, i__6;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k, kk, ix, iy, jx, jy, kx, ky, info;
+ doublecomplex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHPR2 performs the hermitian rank 2 operation */
+
+/* A := alpha*x*conjg( y' ) + conjg( alpha )*y*conjg( x' ) + A, */
+
+/* where alpha is a scalar, x and y are n element vectors and A is an */
+/* n by n hermitian matrix, supplied in packed form. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the matrix A is supplied in the packed */
+/* array AP as follows: */
+
+/* UPLO = 'U' or 'u' The upper triangular part of A is */
+/* supplied in AP. */
+
+/* UPLO = 'L' or 'l' The lower triangular part of A is */
+/* supplied in AP. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX*16 . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X - COMPLEX*16 array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* Y - COMPLEX*16 array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. */
+/* Unchanged on exit. */
+
+/* INCY - INTEGER. */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+/* AP - COMPLEX*16 array of DIMENSION at least */
+/* ( ( n*( n + 1 ) )/2 ). */
+/* Before entry with UPLO = 'U' or 'u', the array AP must */
+/* contain the upper triangular part of the hermitian matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 ) */
+/* and a( 2, 2 ) respectively, and so on. On exit, the array */
+/* AP is overwritten by the upper triangular part of the */
+/* updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the array AP must */
+/* contain the lower triangular part of the hermitian matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 ) */
+/* and a( 3, 1 ) respectively, and so on. On exit, the array */
+/* AP is overwritten by the lower triangular part of the */
+/* updated matrix. */
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set, they are assumed to be zero, and on exit they */
+/* are set to zero. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --y;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ }
+ if (info != 0) {
+ xerbla_("ZHPR2 ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || alpha->r == 0. && alpha->i == 0.) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y if the increments are not both */
+/* unity. */
+
+ if (*incx != 1 || *incy != 1) {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+ jx = kx;
+ jy = ky;
+ }
+
+/* Start the operations. In this version the elements of the array AP */
+/* are accessed sequentially with one pass through AP. */
+
+ kk = 1;
+ if (lsame_(uplo, "U")) {
+
+/* Form A when upper triangle is stored in AP. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ i__3 = j;
+ if (x[i__2].r != 0. || x[i__2].i != 0. || (y[i__3].r != 0. ||
+ y[i__3].i != 0.)) {
+ d_cnjg(&z__2, &y[j]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
+ alpha->r * z__2.i + alpha->i * z__2.r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__2 = j;
+ z__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ d_cnjg(&z__1, &z__2);
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ k = kk;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = k;
+ i__4 = k;
+ i__5 = i__;
+ z__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ z__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ z__2.r = ap[i__4].r + z__3.r, z__2.i = ap[i__4].i +
+ z__3.i;
+ i__6 = i__;
+ z__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ z__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ ap[i__3].r = z__1.r, ap[i__3].i = z__1.i;
+ ++k;
+/* L10: */
+ }
+ i__2 = kk + j - 1;
+ i__3 = kk + j - 1;
+ i__4 = j;
+ z__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ z__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = j;
+ z__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ z__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ d__1 = ap[i__3].r + z__1.r;
+ ap[i__2].r = d__1, ap[i__2].i = 0.;
+ } else {
+ i__2 = kk + j - 1;
+ i__3 = kk + j - 1;
+ d__1 = ap[i__3].r;
+ ap[i__2].r = d__1, ap[i__2].i = 0.;
+ }
+ kk += j;
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ i__3 = jy;
+ if (x[i__2].r != 0. || x[i__2].i != 0. || (y[i__3].r != 0. ||
+ y[i__3].i != 0.)) {
+ d_cnjg(&z__2, &y[jy]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
+ alpha->r * z__2.i + alpha->i * z__2.r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__2 = jx;
+ z__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ d_cnjg(&z__1, &z__2);
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ ix = kx;
+ iy = ky;
+ i__2 = kk + j - 2;
+ for (k = kk; k <= i__2; ++k) {
+ i__3 = k;
+ i__4 = k;
+ i__5 = ix;
+ z__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ z__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ z__2.r = ap[i__4].r + z__3.r, z__2.i = ap[i__4].i +
+ z__3.i;
+ i__6 = iy;
+ z__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ z__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ ap[i__3].r = z__1.r, ap[i__3].i = z__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L30: */
+ }
+ i__2 = kk + j - 1;
+ i__3 = kk + j - 1;
+ i__4 = jx;
+ z__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ z__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = jy;
+ z__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ z__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ d__1 = ap[i__3].r + z__1.r;
+ ap[i__2].r = d__1, ap[i__2].i = 0.;
+ } else {
+ i__2 = kk + j - 1;
+ i__3 = kk + j - 1;
+ d__1 = ap[i__3].r;
+ ap[i__2].r = d__1, ap[i__2].i = 0.;
+ }
+ jx += *incx;
+ jy += *incy;
+ kk += j;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when lower triangle is stored in AP. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ i__3 = j;
+ if (x[i__2].r != 0. || x[i__2].i != 0. || (y[i__3].r != 0. ||
+ y[i__3].i != 0.)) {
+ d_cnjg(&z__2, &y[j]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
+ alpha->r * z__2.i + alpha->i * z__2.r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__2 = j;
+ z__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ d_cnjg(&z__1, &z__2);
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ i__2 = kk;
+ i__3 = kk;
+ i__4 = j;
+ z__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ z__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = j;
+ z__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ z__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ d__1 = ap[i__3].r + z__1.r;
+ ap[i__2].r = d__1, ap[i__2].i = 0.;
+ k = kk + 1;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = k;
+ i__4 = k;
+ i__5 = i__;
+ z__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ z__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ z__2.r = ap[i__4].r + z__3.r, z__2.i = ap[i__4].i +
+ z__3.i;
+ i__6 = i__;
+ z__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ z__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ ap[i__3].r = z__1.r, ap[i__3].i = z__1.i;
+ ++k;
+/* L50: */
+ }
+ } else {
+ i__2 = kk;
+ i__3 = kk;
+ d__1 = ap[i__3].r;
+ ap[i__2].r = d__1, ap[i__2].i = 0.;
+ }
+ kk = kk + *n - j + 1;
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ i__3 = jy;
+ if (x[i__2].r != 0. || x[i__2].i != 0. || (y[i__3].r != 0. ||
+ y[i__3].i != 0.)) {
+ d_cnjg(&z__2, &y[jy]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
+ alpha->r * z__2.i + alpha->i * z__2.r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__2 = jx;
+ z__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ d_cnjg(&z__1, &z__2);
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ i__2 = kk;
+ i__3 = kk;
+ i__4 = jx;
+ z__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ z__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = jy;
+ z__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ z__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ d__1 = ap[i__3].r + z__1.r;
+ ap[i__2].r = d__1, ap[i__2].i = 0.;
+ ix = jx;
+ iy = jy;
+ i__2 = kk + *n - j;
+ for (k = kk + 1; k <= i__2; ++k) {
+ ix += *incx;
+ iy += *incy;
+ i__3 = k;
+ i__4 = k;
+ i__5 = ix;
+ z__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ z__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ z__2.r = ap[i__4].r + z__3.r, z__2.i = ap[i__4].i +
+ z__3.i;
+ i__6 = iy;
+ z__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ z__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ ap[i__3].r = z__1.r, ap[i__3].i = z__1.i;
+/* L70: */
+ }
+ } else {
+ i__2 = kk;
+ i__3 = kk;
+ d__1 = ap[i__3].r;
+ ap[i__2].r = d__1, ap[i__2].i = 0.;
+ }
+ jx += *incx;
+ jy += *incy;
+ kk = kk + *n - j + 1;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZHPR2 . */
+
+} /* zhpr2_ */
diff --git a/contrib/libs/cblas/zrotg.c b/contrib/libs/cblas/zrotg.c
new file mode 100644
index 0000000000..4f41c1afcd
--- /dev/null
+++ b/contrib/libs/cblas/zrotg.c
@@ -0,0 +1,77 @@
+/* zrotg.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zrotg_(doublecomplex *ca, doublecomplex *cb, doublereal *
+ c__, doublecomplex *s)
+{
+ /* System generated locals */
+ doublereal d__1, d__2;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *);
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *);
+ double sqrt(doublereal);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ doublereal norm;
+ doublecomplex alpha;
+ doublereal scale;
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* determines a double complex Givens rotation. */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+ if (z_abs(ca) != 0.) {
+ goto L10;
+ }
+ *c__ = 0.;
+ s->r = 1., s->i = 0.;
+ ca->r = cb->r, ca->i = cb->i;
+ goto L20;
+L10:
+ scale = z_abs(ca) + z_abs(cb);
+ z__2.r = scale, z__2.i = 0.;
+ z_div(&z__1, ca, &z__2);
+/* Computing 2nd power */
+ d__1 = z_abs(&z__1);
+ z__4.r = scale, z__4.i = 0.;
+ z_div(&z__3, cb, &z__4);
+/* Computing 2nd power */
+ d__2 = z_abs(&z__3);
+ norm = scale * sqrt(d__1 * d__1 + d__2 * d__2);
+ d__1 = z_abs(ca);
+ z__1.r = ca->r / d__1, z__1.i = ca->i / d__1;
+ alpha.r = z__1.r, alpha.i = z__1.i;
+ *c__ = z_abs(ca) / norm;
+ d_cnjg(&z__3, cb);
+ z__2.r = alpha.r * z__3.r - alpha.i * z__3.i, z__2.i = alpha.r * z__3.i +
+ alpha.i * z__3.r;
+ z__1.r = z__2.r / norm, z__1.i = z__2.i / norm;
+ s->r = z__1.r, s->i = z__1.i;
+ z__1.r = norm * alpha.r, z__1.i = norm * alpha.i;
+ ca->r = z__1.r, ca->i = z__1.i;
+L20:
+ return 0;
+} /* zrotg_ */
diff --git a/contrib/libs/cblas/zscal.c b/contrib/libs/cblas/zscal.c
new file mode 100644
index 0000000000..0975b43ddb
--- /dev/null
+++ b/contrib/libs/cblas/zscal.c
@@ -0,0 +1,81 @@
+/* zscal.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zscal_(integer *n, doublecomplex *za, doublecomplex *zx,
+ integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, ix;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* scales a vector by a constant. */
+/* jack dongarra, 3/11/78. */
+/* modified 3/93 to return if incx .le. 0. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+ /* Parameter adjustments */
+ --zx;
+
+ /* Function Body */
+ if (*n <= 0 || *incx <= 0) {
+ return 0;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ ix = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ i__3 = ix;
+ z__1.r = za->r * zx[i__3].r - za->i * zx[i__3].i, z__1.i = za->r * zx[
+ i__3].i + za->i * zx[i__3].r;
+ zx[i__2].r = z__1.r, zx[i__2].i = z__1.i;
+ ix += *incx;
+/* L10: */
+ }
+ return 0;
+
+/* code for increment equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ z__1.r = za->r * zx[i__3].r - za->i * zx[i__3].i, z__1.i = za->r * zx[
+ i__3].i + za->i * zx[i__3].r;
+ zx[i__2].r = z__1.r, zx[i__2].i = z__1.i;
+/* L30: */
+ }
+ return 0;
+} /* zscal_ */
diff --git a/contrib/libs/cblas/zswap.c b/contrib/libs/cblas/zswap.c
new file mode 100644
index 0000000000..0eaed35276
--- /dev/null
+++ b/contrib/libs/cblas/zswap.c
@@ -0,0 +1,93 @@
+/* zswap.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zswap_(integer *n, doublecomplex *zx, integer *incx,
+ doublecomplex *zy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, ix, iy;
+ doublecomplex ztemp;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* interchanges two vectors. */
+/* jack dongarra, 3/11/78. */
+/* modified 12/3/93, array(1) declarations changed to array(*) */
+
+
+/* .. Local Scalars .. */
+/* .. */
+ /* Parameter adjustments */
+ --zy;
+ --zx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* code for unequal increments or equal increments not equal */
+/* to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ ztemp.r = zx[i__2].r, ztemp.i = zx[i__2].i;
+ i__2 = ix;
+ i__3 = iy;
+ zx[i__2].r = zy[i__3].r, zx[i__2].i = zy[i__3].i;
+ i__2 = iy;
+ zy[i__2].r = ztemp.r, zy[i__2].i = ztemp.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ ztemp.r = zx[i__2].r, ztemp.i = zx[i__2].i;
+ i__2 = i__;
+ i__3 = i__;
+ zx[i__2].r = zy[i__3].r, zx[i__2].i = zy[i__3].i;
+ i__2 = i__;
+ zy[i__2].r = ztemp.r, zy[i__2].i = ztemp.i;
+/* L30: */
+ }
+ return 0;
+} /* zswap_ */
diff --git a/contrib/libs/cblas/zsymm.c b/contrib/libs/cblas/zsymm.c
new file mode 100644
index 0000000000..f6c7671c86
--- /dev/null
+++ b/contrib/libs/cblas/zsymm.c
@@ -0,0 +1,496 @@
+/* zsymm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zsymm_(char *side, char *uplo, integer *m, integer *n,
+ doublecomplex *alpha, doublecomplex *a, integer *lda, doublecomplex *
+ b, integer *ldb, doublecomplex *beta, doublecomplex *c__, integer *
+ ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4, i__5, i__6;
+ doublecomplex z__1, z__2, z__3, z__4, z__5;
+
+ /* Local variables */
+ integer i__, j, k, info;
+ doublecomplex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSYMM performs one of the matrix-matrix operations */
+
+/* C := alpha*A*B + beta*C, */
+
+/* or */
+
+/* C := alpha*B*A + beta*C, */
+
+/* where alpha and beta are scalars, A is a symmetric matrix and B and */
+/* C are m by n matrices. */
+
+/* Arguments */
+/* ========== */
+
+/* SIDE - CHARACTER*1. */
+/* On entry, SIDE specifies whether the symmetric matrix A */
+/* appears on the left or right in the operation as follows: */
+
+/* SIDE = 'L' or 'l' C := alpha*A*B + beta*C, */
+
+/* SIDE = 'R' or 'r' C := alpha*B*A + beta*C, */
+
+/* Unchanged on exit. */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the symmetric matrix A is to be */
+/* referenced as follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of the */
+/* symmetric matrix is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of the */
+/* symmetric matrix is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of the matrix C. */
+/* M must be at least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of the matrix C. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX*16 . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX*16 array of DIMENSION ( LDA, ka ), where ka is */
+/* m when SIDE = 'L' or 'l' and is n otherwise. */
+/* Before entry with SIDE = 'L' or 'l', the m by m part of */
+/* the array A must contain the symmetric matrix, such that */
+/* when UPLO = 'U' or 'u', the leading m by m upper triangular */
+/* part of the array A must contain the upper triangular part */
+/* of the symmetric matrix and the strictly lower triangular */
+/* part of A is not referenced, and when UPLO = 'L' or 'l', */
+/* the leading m by m lower triangular part of the array A */
+/* must contain the lower triangular part of the symmetric */
+/* matrix and the strictly upper triangular part of A is not */
+/* referenced. */
+/* Before entry with SIDE = 'R' or 'r', the n by n part of */
+/* the array A must contain the symmetric matrix, such that */
+/* when UPLO = 'U' or 'u', the leading n by n upper triangular */
+/* part of the array A must contain the upper triangular part */
+/* of the symmetric matrix and the strictly lower triangular */
+/* part of A is not referenced, and when UPLO = 'L' or 'l', */
+/* the leading n by n lower triangular part of the array A */
+/* must contain the lower triangular part of the symmetric */
+/* matrix and the strictly upper triangular part of A is not */
+/* referenced. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When SIDE = 'L' or 'l' then */
+/* LDA must be at least max( 1, m ), otherwise LDA must be at */
+/* least max( 1, n ). */
+/* Unchanged on exit. */
+
+/* B - COMPLEX*16 array of DIMENSION ( LDB, n ). */
+/* Before entry, the leading m by n part of the array B must */
+/* contain the matrix B. */
+/* Unchanged on exit. */
+
+/* LDB - INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. LDB must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+/* BETA - COMPLEX*16 . */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then C need not be set on input. */
+/* Unchanged on exit. */
+
+/* C - COMPLEX*16 array of DIMENSION ( LDC, n ). */
+/* Before entry, the leading m by n part of the array C must */
+/* contain the matrix C, except when beta is zero, in which */
+/* case C need not be set on entry. */
+/* On exit, the array C is overwritten by the m by n updated */
+/* matrix. */
+
+/* LDC - INTEGER. */
+/* On entry, LDC specifies the first dimension of C as declared */
+/* in the calling (sub) program. LDC must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Set NROWA as the number of rows of A. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(side, "L")) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ upper = lsame_(uplo, "U");
+
+/* Test the input parameters. */
+
+ info = 0;
+ if (! lsame_(side, "L") && ! lsame_(side, "R")) {
+ info = 1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ info = 2;
+ } else if (*m < 0) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldb < max(1,*m)) {
+ info = 9;
+ } else if (*ldc < max(1,*m)) {
+ info = 12;
+ }
+ if (info != 0) {
+ xerbla_("ZSYMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0 || alpha->r == 0. && alpha->i == 0. && (beta->r ==
+ 1. && beta->i == 0.)) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (alpha->r == 0. && alpha->i == 0.) {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4].i,
+ z__1.i = beta->r * c__[i__4].i + beta->i * c__[
+ i__4].r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(side, "L")) {
+
+/* Form C := alpha*A*B + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
+ z__1.i = alpha->r * b[i__3].i + alpha->i * b[i__3]
+ .r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ i__3 = i__ - 1;
+ for (k = 1; k <= i__3; ++k) {
+ i__4 = k + j * c_dim1;
+ i__5 = k + j * c_dim1;
+ i__6 = k + i__ * a_dim1;
+ z__2.r = temp1.r * a[i__6].r - temp1.i * a[i__6].i,
+ z__2.i = temp1.r * a[i__6].i + temp1.i * a[
+ i__6].r;
+ z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5].i +
+ z__2.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+ i__4 = k + j * b_dim1;
+ i__5 = k + i__ * a_dim1;
+ z__2.r = b[i__4].r * a[i__5].r - b[i__4].i * a[i__5]
+ .i, z__2.i = b[i__4].r * a[i__5].i + b[i__4]
+ .i * a[i__5].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L50: */
+ }
+ if (beta->r == 0. && beta->i == 0.) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + i__ * a_dim1;
+ z__2.r = temp1.r * a[i__4].r - temp1.i * a[i__4].i,
+ z__2.i = temp1.r * a[i__4].i + temp1.i * a[
+ i__4].r;
+ z__3.r = alpha->r * temp2.r - alpha->i * temp2.i,
+ z__3.i = alpha->r * temp2.i + alpha->i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ i__5 = i__ + i__ * a_dim1;
+ z__4.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ z__4.i = temp1.r * a[i__5].i + temp1.i * a[
+ i__5].r;
+ z__2.r = z__3.r + z__4.r, z__2.i = z__3.i + z__4.i;
+ z__5.r = alpha->r * temp2.r - alpha->i * temp2.i,
+ z__5.i = alpha->r * temp2.i + alpha->i *
+ temp2.r;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L60: */
+ }
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ i__2 = i__ + j * b_dim1;
+ z__1.r = alpha->r * b[i__2].r - alpha->i * b[i__2].i,
+ z__1.i = alpha->r * b[i__2].i + alpha->i * b[i__2]
+ .r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ i__2 = *m;
+ for (k = i__ + 1; k <= i__2; ++k) {
+ i__3 = k + j * c_dim1;
+ i__4 = k + j * c_dim1;
+ i__5 = k + i__ * a_dim1;
+ z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ z__2.i = temp1.r * a[i__5].i + temp1.i * a[
+ i__5].r;
+ z__1.r = c__[i__4].r + z__2.r, z__1.i = c__[i__4].i +
+ z__2.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ i__3 = k + j * b_dim1;
+ i__4 = k + i__ * a_dim1;
+ z__2.r = b[i__3].r * a[i__4].r - b[i__3].i * a[i__4]
+ .i, z__2.i = b[i__3].r * a[i__4].i + b[i__3]
+ .i * a[i__4].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L80: */
+ }
+ if (beta->r == 0. && beta->i == 0.) {
+ i__2 = i__ + j * c_dim1;
+ i__3 = i__ + i__ * a_dim1;
+ z__2.r = temp1.r * a[i__3].r - temp1.i * a[i__3].i,
+ z__2.i = temp1.r * a[i__3].i + temp1.i * a[
+ i__3].r;
+ z__3.r = alpha->r * temp2.r - alpha->i * temp2.i,
+ z__3.i = alpha->r * temp2.i + alpha->i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ } else {
+ i__2 = i__ + j * c_dim1;
+ i__3 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__3].r - beta->i * c__[i__3]
+ .i, z__3.i = beta->r * c__[i__3].i + beta->i *
+ c__[i__3].r;
+ i__4 = i__ + i__ * a_dim1;
+ z__4.r = temp1.r * a[i__4].r - temp1.i * a[i__4].i,
+ z__4.i = temp1.r * a[i__4].i + temp1.i * a[
+ i__4].r;
+ z__2.r = z__3.r + z__4.r, z__2.i = z__3.i + z__4.i;
+ z__5.r = alpha->r * temp2.r - alpha->i * temp2.i,
+ z__5.i = alpha->r * temp2.i + alpha->i *
+ temp2.r;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ }
+/* L90: */
+ }
+/* L100: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*B*A + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + j * a_dim1;
+ z__1.r = alpha->r * a[i__2].r - alpha->i * a[i__2].i, z__1.i =
+ alpha->r * a[i__2].i + alpha->i * a[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ if (beta->r == 0. && beta->i == 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * b_dim1;
+ z__1.r = temp1.r * b[i__4].r - temp1.i * b[i__4].i,
+ z__1.i = temp1.r * b[i__4].i + temp1.i * b[i__4]
+ .r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L110: */
+ }
+ } else {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__2.r = beta->r * c__[i__4].r - beta->i * c__[i__4].i,
+ z__2.i = beta->r * c__[i__4].i + beta->i * c__[
+ i__4].r;
+ i__5 = i__ + j * b_dim1;
+ z__3.r = temp1.r * b[i__5].r - temp1.i * b[i__5].i,
+ z__3.i = temp1.r * b[i__5].i + temp1.i * b[i__5]
+ .r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L120: */
+ }
+ }
+ i__2 = j - 1;
+ for (k = 1; k <= i__2; ++k) {
+ if (upper) {
+ i__3 = k + j * a_dim1;
+ z__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ z__1.i = alpha->r * a[i__3].i + alpha->i * a[i__3]
+ .r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ } else {
+ i__3 = j + k * a_dim1;
+ z__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ z__1.i = alpha->r * a[i__3].i + alpha->i * a[i__3]
+ .r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ }
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + k * b_dim1;
+ z__2.r = temp1.r * b[i__6].r - temp1.i * b[i__6].i,
+ z__2.i = temp1.r * b[i__6].i + temp1.i * b[i__6]
+ .r;
+ z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5].i +
+ z__2.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L130: */
+ }
+/* L140: */
+ }
+ i__2 = *n;
+ for (k = j + 1; k <= i__2; ++k) {
+ if (upper) {
+ i__3 = j + k * a_dim1;
+ z__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ z__1.i = alpha->r * a[i__3].i + alpha->i * a[i__3]
+ .r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ } else {
+ i__3 = k + j * a_dim1;
+ z__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ z__1.i = alpha->r * a[i__3].i + alpha->i * a[i__3]
+ .r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ }
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + k * b_dim1;
+ z__2.r = temp1.r * b[i__6].r - temp1.i * b[i__6].i,
+ z__2.i = temp1.r * b[i__6].i + temp1.i * b[i__6]
+ .r;
+ z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5].i +
+ z__2.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L150: */
+ }
+/* L160: */
+ }
+/* L170: */
+ }
+ }
+
+ return 0;
+
+/* End of ZSYMM . */
+
+} /* zsymm_ */
diff --git a/contrib/libs/cblas/zsyr2k.c b/contrib/libs/cblas/zsyr2k.c
new file mode 100644
index 0000000000..79c76090b9
--- /dev/null
+++ b/contrib/libs/cblas/zsyr2k.c
@@ -0,0 +1,538 @@
+/* zsyr2k.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zsyr2k_(char *uplo, char *trans, integer *n, integer *k,
+ doublecomplex *alpha, doublecomplex *a, integer *lda, doublecomplex *
+ b, integer *ldb, doublecomplex *beta, doublecomplex *c__, integer *
+ ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4, i__5, i__6, i__7;
+ doublecomplex z__1, z__2, z__3, z__4, z__5;
+
+ /* Local variables */
+ integer i__, j, l, info;
+ doublecomplex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSYR2K performs one of the symmetric rank 2k operations */
+
+/* C := alpha*A*B' + alpha*B*A' + beta*C, */
+
+/* or */
+
+/* C := alpha*A'*B + alpha*B'*A + beta*C, */
+
+/* where alpha and beta are scalars, C is an n by n symmetric matrix */
+/* and A and B are n by k matrices in the first case and k by n */
+/* matrices in the second case. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array C is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of C */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of C */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' C := alpha*A*B' + alpha*B*A' + */
+/* beta*C. */
+
+/* TRANS = 'T' or 't' C := alpha*A'*B + alpha*B'*A + */
+/* beta*C. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix C. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry with TRANS = 'N' or 'n', K specifies the number */
+/* of columns of the matrices A and B, and on entry with */
+/* TRANS = 'T' or 't', K specifies the number of rows of the */
+/* matrices A and B. K must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX*16 . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX*16 array of DIMENSION ( LDA, ka ), where ka is */
+/* k when TRANS = 'N' or 'n', and is n otherwise. */
+/* Before entry with TRANS = 'N' or 'n', the leading n by k */
+/* part of the array A must contain the matrix A, otherwise */
+/* the leading k by n part of the array A must contain the */
+/* matrix A. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When TRANS = 'N' or 'n' */
+/* then LDA must be at least max( 1, n ), otherwise LDA must */
+/* be at least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* B - COMPLEX*16 array of DIMENSION ( LDB, kb ), where kb is */
+/* k when TRANS = 'N' or 'n', and is n otherwise. */
+/* Before entry with TRANS = 'N' or 'n', the leading n by k */
+/* part of the array B must contain the matrix B, otherwise */
+/* the leading k by n part of the array B must contain the */
+/* matrix B. */
+/* Unchanged on exit. */
+
+/* LDB - INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. When TRANS = 'N' or 'n' */
+/* then LDB must be at least max( 1, n ), otherwise LDB must */
+/* be at least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* BETA - COMPLEX*16 . */
+/* On entry, BETA specifies the scalar beta. */
+/* Unchanged on exit. */
+
+/* C - COMPLEX*16 array of DIMENSION ( LDC, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array C must contain the upper */
+/* triangular part of the symmetric matrix and the strictly */
+/* lower triangular part of C is not referenced. On exit, the */
+/* upper triangular part of the array C is overwritten by the */
+/* upper triangular part of the updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array C must contain the lower */
+/* triangular part of the symmetric matrix and the strictly */
+/* upper triangular part of C is not referenced. On exit, the */
+/* lower triangular part of the array C is overwritten by the */
+/* lower triangular part of the updated matrix. */
+
+/* LDC - INTEGER. */
+/* On entry, LDC specifies the first dimension of C as declared */
+/* in the calling (sub) program. LDC must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(trans, "N")) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! upper && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T")) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*k < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldb < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldc < max(1,*n)) {
+ info = 12;
+ }
+ if (info != 0) {
+ xerbla_("ZSYR2K", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || (alpha->r == 0. && alpha->i == 0. || *k == 0) && (beta->r
+ == 1. && beta->i == 0.)) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (alpha->r == 0. && alpha->i == 0.) {
+ if (upper) {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form C := alpha*A*B' + alpha*B*A' + C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L90: */
+ }
+ } else if (beta->r != 1. || beta->i != 0.) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L100: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ i__4 = j + l * b_dim1;
+ if (a[i__3].r != 0. || a[i__3].i != 0. || (b[i__4].r !=
+ 0. || b[i__4].i != 0.)) {
+ i__3 = j + l * b_dim1;
+ z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
+ z__1.i = alpha->r * b[i__3].i + alpha->i * b[
+ i__3].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__3 = j + l * a_dim1;
+ z__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ z__1.i = alpha->r * a[i__3].i + alpha->i * a[
+ i__3].r;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ i__3 = j;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ z__3.r = a[i__6].r * temp1.r - a[i__6].i *
+ temp1.i, z__3.i = a[i__6].r * temp1.i + a[
+ i__6].i * temp1.r;
+ z__2.r = c__[i__5].r + z__3.r, z__2.i = c__[i__5]
+ .i + z__3.i;
+ i__7 = i__ + l * b_dim1;
+ z__4.r = b[i__7].r * temp2.r - b[i__7].i *
+ temp2.i, z__4.i = b[i__7].r * temp2.i + b[
+ i__7].i * temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i +
+ z__4.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L140: */
+ }
+ } else if (beta->r != 1. || beta->i != 0.) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L150: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ i__4 = j + l * b_dim1;
+ if (a[i__3].r != 0. || a[i__3].i != 0. || (b[i__4].r !=
+ 0. || b[i__4].i != 0.)) {
+ i__3 = j + l * b_dim1;
+ z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
+ z__1.i = alpha->r * b[i__3].i + alpha->i * b[
+ i__3].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__3 = j + l * a_dim1;
+ z__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ z__1.i = alpha->r * a[i__3].i + alpha->i * a[
+ i__3].r;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ i__3 = *n;
+ for (i__ = j; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ z__3.r = a[i__6].r * temp1.r - a[i__6].i *
+ temp1.i, z__3.i = a[i__6].r * temp1.i + a[
+ i__6].i * temp1.r;
+ z__2.r = c__[i__5].r + z__3.r, z__2.i = c__[i__5]
+ .i + z__3.i;
+ i__7 = i__ + l * b_dim1;
+ z__4.r = b[i__7].r * temp2.r - b[i__7].i *
+ temp2.i, z__4.i = b[i__7].r * temp2.i + b[
+ i__7].i * temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i +
+ z__4.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L160: */
+ }
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*A'*B + alpha*B'*A + C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp1.r = 0., temp1.i = 0.;
+ temp2.r = 0., temp2.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + i__ * a_dim1;
+ i__5 = l + j * b_dim1;
+ z__2.r = a[i__4].r * b[i__5].r - a[i__4].i * b[i__5]
+ .i, z__2.i = a[i__4].r * b[i__5].i + a[i__4]
+ .i * b[i__5].r;
+ z__1.r = temp1.r + z__2.r, z__1.i = temp1.i + z__2.i;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__4 = l + i__ * b_dim1;
+ i__5 = l + j * a_dim1;
+ z__2.r = b[i__4].r * a[i__5].r - b[i__4].i * a[i__5]
+ .i, z__2.i = b[i__4].r * a[i__5].i + b[i__4]
+ .i * a[i__5].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L190: */
+ }
+ if (beta->r == 0. && beta->i == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ z__3.r = alpha->r * temp2.r - alpha->i * temp2.i,
+ z__3.i = alpha->r * temp2.i + alpha->i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ z__4.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__4.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ z__2.r = z__3.r + z__4.r, z__2.i = z__3.i + z__4.i;
+ z__5.r = alpha->r * temp2.r - alpha->i * temp2.i,
+ z__5.i = alpha->r * temp2.i + alpha->i *
+ temp2.r;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ temp1.r = 0., temp1.i = 0.;
+ temp2.r = 0., temp2.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + i__ * a_dim1;
+ i__5 = l + j * b_dim1;
+ z__2.r = a[i__4].r * b[i__5].r - a[i__4].i * b[i__5]
+ .i, z__2.i = a[i__4].r * b[i__5].i + a[i__4]
+ .i * b[i__5].r;
+ z__1.r = temp1.r + z__2.r, z__1.i = temp1.i + z__2.i;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__4 = l + i__ * b_dim1;
+ i__5 = l + j * a_dim1;
+ z__2.r = b[i__4].r * a[i__5].r - b[i__4].i * a[i__5]
+ .i, z__2.i = b[i__4].r * a[i__5].i + b[i__4]
+ .i * a[i__5].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L220: */
+ }
+ if (beta->r == 0. && beta->i == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ z__3.r = alpha->r * temp2.r - alpha->i * temp2.i,
+ z__3.i = alpha->r * temp2.i + alpha->i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ z__4.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__4.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ z__2.r = z__3.r + z__4.r, z__2.i = z__3.i + z__4.i;
+ z__5.r = alpha->r * temp2.r - alpha->i * temp2.i,
+ z__5.i = alpha->r * temp2.i + alpha->i *
+ temp2.r;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L230: */
+ }
+/* L240: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZSYR2K. */
+
+} /* zsyr2k_ */
diff --git a/contrib/libs/cblas/zsyrk.c b/contrib/libs/cblas/zsyrk.c
new file mode 100644
index 0000000000..c1a1c03d98
--- /dev/null
+++ b/contrib/libs/cblas/zsyrk.c
@@ -0,0 +1,457 @@
+/* zsyrk.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zsyrk_(char *uplo, char *trans, integer *n, integer *k,
+ doublecomplex *alpha, doublecomplex *a, integer *lda, doublecomplex *
+ beta, doublecomplex *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Local variables */
+ integer i__, j, l, info;
+ doublecomplex temp;
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSYRK performs one of the symmetric rank k operations */
+
+/* C := alpha*A*A' + beta*C, */
+
+/* or */
+
+/* C := alpha*A'*A + beta*C, */
+
+/* where alpha and beta are scalars, C is an n by n symmetric matrix */
+/* and A is an n by k matrix in the first case and a k by n matrix */
+/* in the second case. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array C is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of C */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of C */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' C := alpha*A*A' + beta*C. */
+
+/* TRANS = 'T' or 't' C := alpha*A'*A + beta*C. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix C. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry with TRANS = 'N' or 'n', K specifies the number */
+/* of columns of the matrix A, and on entry with */
+/* TRANS = 'T' or 't', K specifies the number of rows of the */
+/* matrix A. K must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX*16 . */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX*16 array of DIMENSION ( LDA, ka ), where ka is */
+/* k when TRANS = 'N' or 'n', and is n otherwise. */
+/* Before entry with TRANS = 'N' or 'n', the leading n by k */
+/* part of the array A must contain the matrix A, otherwise */
+/* the leading k by n part of the array A must contain the */
+/* matrix A. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When TRANS = 'N' or 'n' */
+/* then LDA must be at least max( 1, n ), otherwise LDA must */
+/* be at least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* BETA - COMPLEX*16 . */
+/* On entry, BETA specifies the scalar beta. */
+/* Unchanged on exit. */
+
+/* C - COMPLEX*16 array of DIMENSION ( LDC, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array C must contain the upper */
+/* triangular part of the symmetric matrix and the strictly */
+/* lower triangular part of C is not referenced. On exit, the */
+/* upper triangular part of the array C is overwritten by the */
+/* upper triangular part of the updated matrix. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array C must contain the lower */
+/* triangular part of the symmetric matrix and the strictly */
+/* upper triangular part of C is not referenced. On exit, the */
+/* lower triangular part of the array C is overwritten by the */
+/* lower triangular part of the updated matrix. */
+
+/* LDC - INTEGER. */
+/* On entry, LDC specifies the first dimension of C as declared */
+/* in the calling (sub) program. LDC must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(trans, "N")) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! upper && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T")) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*k < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldc < max(1,*n)) {
+ info = 10;
+ }
+ if (info != 0) {
+ xerbla_("ZSYRK ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || (alpha->r == 0. && alpha->i == 0. || *k == 0) && (beta->r
+ == 1. && beta->i == 0.)) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (alpha->r == 0. && alpha->i == 0.) {
+ if (upper) {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form C := alpha*A*A' + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L90: */
+ }
+ } else if (beta->r != 1. || beta->i != 0.) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L100: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ if (a[i__3].r != 0. || a[i__3].i != 0.) {
+ i__3 = j + l * a_dim1;
+ z__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ z__1.i = alpha->r * a[i__3].i + alpha->i * a[
+ i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__3 = j;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ z__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5]
+ .i + z__2.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L140: */
+ }
+ } else if (beta->r != 1. || beta->i != 0.) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L150: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ if (a[i__3].r != 0. || a[i__3].i != 0.) {
+ i__3 = j + l * a_dim1;
+ z__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ z__1.i = alpha->r * a[i__3].i + alpha->i * a[
+ i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__3 = *n;
+ for (i__ = j; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ z__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5]
+ .i + z__2.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L160: */
+ }
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*A'*A + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + i__ * a_dim1;
+ i__5 = l + j * a_dim1;
+ z__2.r = a[i__4].r * a[i__5].r - a[i__4].i * a[i__5]
+ .i, z__2.i = a[i__4].r * a[i__5].i + a[i__4]
+ .i * a[i__5].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L190: */
+ }
+ if (beta->r == 0. && beta->i == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + i__ * a_dim1;
+ i__5 = l + j * a_dim1;
+ z__2.r = a[i__4].r * a[i__5].r - a[i__4].i * a[i__5]
+ .i, z__2.i = a[i__4].r * a[i__5].i + a[i__4]
+ .i * a[i__5].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L220: */
+ }
+ if (beta->r == 0. && beta->i == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L230: */
+ }
+/* L240: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZSYRK . */
+
+} /* zsyrk_ */
diff --git a/contrib/libs/cblas/ztbmv.c b/contrib/libs/cblas/ztbmv.c
new file mode 100644
index 0000000000..2585383a7d
--- /dev/null
+++ b/contrib/libs/cblas/ztbmv.c
@@ -0,0 +1,642 @@
+/* ztbmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ztbmv_(char *uplo, char *trans, char *diag, integer *n,
+ integer *k, doublecomplex *a, integer *lda, doublecomplex *x, integer
+ *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, l, ix, jx, kx, info;
+ doublecomplex temp;
+ extern logical lsame_(char *, char *);
+ integer kplus1;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical noconj, nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTBMV performs one of the matrix-vector operations */
+
+/* x := A*x, or x := A'*x, or x := conjg( A' )*x, */
+
+/* where x is an n element vector and A is an n by n unit, or non-unit, */
+/* upper or lower triangular band matrix, with ( k + 1 ) diagonals. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' x := A*x. */
+
+/* TRANS = 'T' or 't' x := A'*x. */
+
+/* TRANS = 'C' or 'c' x := conjg( A' )*x. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry with UPLO = 'U' or 'u', K specifies the number of */
+/* super-diagonals of the matrix A. */
+/* On entry with UPLO = 'L' or 'l', K specifies the number of */
+/* sub-diagonals of the matrix A. */
+/* K must satisfy 0 .le. K. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX*16 array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) */
+/* by n part of the array A must contain the upper triangular */
+/* band part of the matrix of coefficients, supplied column by */
+/* column, with the leading diagonal of the matrix in row */
+/* ( k + 1 ) of the array, the first super-diagonal starting at */
+/* position 2 in row k, and so on. The top left k by k triangle */
+/* of the array A is not referenced. */
+/* The following program segment will transfer an upper */
+/* triangular band matrix from conventional full matrix storage */
+/* to band storage: */
+
+/* DO 20, J = 1, N */
+/* M = K + 1 - J */
+/* DO 10, I = MAX( 1, J - K ), J */
+/* A( M + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) */
+/* by n part of the array A must contain the lower triangular */
+/* band part of the matrix of coefficients, supplied column by */
+/* column, with the leading diagonal of the matrix in row 1 of */
+/* the array, the first sub-diagonal starting at position 1 in */
+/* row 2, and so on. The bottom right k by k triangle of the */
+/* array A is not referenced. */
+/* The following program segment will transfer a lower */
+/* triangular band matrix from conventional full matrix storage */
+/* to band storage: */
+
+/* DO 20, J = 1, N */
+/* M = 1 - J */
+/* DO 10, I = J, MIN( N, J + K ) */
+/* A( M + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Note that when DIAG = 'U' or 'u' the elements of the array A */
+/* corresponding to the diagonal elements of the matrix are not */
+/* referenced, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* ( k + 1 ). */
+/* Unchanged on exit. */
+
+/* X - COMPLEX*16 array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. On exit, X is overwritten with the */
+/* tranformed vector x. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*k < 0) {
+ info = 5;
+ } else if (*lda < *k + 1) {
+ info = 7;
+ } else if (*incx == 0) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("ZTBMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ noconj = lsame_(trans, "T");
+ nounit = lsame_(diag, "N");
+
+/* Set up the start point in X if the increment is not unity. This */
+/* will be ( N - 1 )*INCX too small for descending loops. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through A. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := A*x. */
+
+ if (lsame_(uplo, "U")) {
+ kplus1 = *k + 1;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ l = kplus1 - j;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__4 = j - 1;
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ i__5 = l + i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ z__1.r = x[i__3].r + z__2.r, z__1.i = x[i__3].i +
+ z__2.i;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+/* L10: */
+ }
+ if (nounit) {
+ i__4 = j;
+ i__2 = j;
+ i__3 = kplus1 + j * a_dim1;
+ z__1.r = x[i__2].r * a[i__3].r - x[i__2].i * a[
+ i__3].i, z__1.i = x[i__2].r * a[i__3].i +
+ x[i__2].i * a[i__3].r;
+ x[i__4].r = z__1.r, x[i__4].i = z__1.i;
+ }
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__4 = jx;
+ if (x[i__4].r != 0. || x[i__4].i != 0.) {
+ i__4 = jx;
+ temp.r = x[i__4].r, temp.i = x[i__4].i;
+ ix = kx;
+ l = kplus1 - j;
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *k;
+ i__3 = j - 1;
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ i__4 = ix;
+ i__2 = ix;
+ i__5 = l + i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ z__1.r = x[i__2].r + z__2.r, z__1.i = x[i__2].i +
+ z__2.i;
+ x[i__4].r = z__1.r, x[i__4].i = z__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ if (nounit) {
+ i__3 = jx;
+ i__4 = jx;
+ i__2 = kplus1 + j * a_dim1;
+ z__1.r = x[i__4].r * a[i__2].r - x[i__4].i * a[
+ i__2].i, z__1.i = x[i__4].r * a[i__2].i +
+ x[i__4].i * a[i__2].r;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ }
+ }
+ jx += *incx;
+ if (j > *k) {
+ kx += *incx;
+ }
+/* L40: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ if (x[i__1].r != 0. || x[i__1].i != 0.) {
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ l = 1 - j;
+/* Computing MIN */
+ i__1 = *n, i__3 = j + *k;
+ i__4 = j + 1;
+ for (i__ = min(i__1,i__3); i__ >= i__4; --i__) {
+ i__1 = i__;
+ i__3 = i__;
+ i__2 = l + i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
+ z__2.i = temp.r * a[i__2].i + temp.i * a[
+ i__2].r;
+ z__1.r = x[i__3].r + z__2.r, z__1.i = x[i__3].i +
+ z__2.i;
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+/* L50: */
+ }
+ if (nounit) {
+ i__4 = j;
+ i__1 = j;
+ i__3 = j * a_dim1 + 1;
+ z__1.r = x[i__1].r * a[i__3].r - x[i__1].i * a[
+ i__3].i, z__1.i = x[i__1].r * a[i__3].i +
+ x[i__1].i * a[i__3].r;
+ x[i__4].r = z__1.r, x[i__4].i = z__1.i;
+ }
+ }
+/* L60: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ i__4 = jx;
+ if (x[i__4].r != 0. || x[i__4].i != 0.) {
+ i__4 = jx;
+ temp.r = x[i__4].r, temp.i = x[i__4].i;
+ ix = kx;
+ l = 1 - j;
+/* Computing MIN */
+ i__4 = *n, i__1 = j + *k;
+ i__3 = j + 1;
+ for (i__ = min(i__4,i__1); i__ >= i__3; --i__) {
+ i__4 = ix;
+ i__1 = ix;
+ i__2 = l + i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
+ z__2.i = temp.r * a[i__2].i + temp.i * a[
+ i__2].r;
+ z__1.r = x[i__1].r + z__2.r, z__1.i = x[i__1].i +
+ z__2.i;
+ x[i__4].r = z__1.r, x[i__4].i = z__1.i;
+ ix -= *incx;
+/* L70: */
+ }
+ if (nounit) {
+ i__3 = jx;
+ i__4 = jx;
+ i__1 = j * a_dim1 + 1;
+ z__1.r = x[i__4].r * a[i__1].r - x[i__4].i * a[
+ i__1].i, z__1.i = x[i__4].r * a[i__1].i +
+ x[i__4].i * a[i__1].r;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ }
+ }
+ jx -= *incx;
+ if (*n - j >= *k) {
+ kx -= *incx;
+ }
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := A'*x or x := conjg( A' )*x. */
+
+ if (lsame_(uplo, "U")) {
+ kplus1 = *k + 1;
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__3 = j;
+ temp.r = x[i__3].r, temp.i = x[i__3].i;
+ l = kplus1 - j;
+ if (noconj) {
+ if (nounit) {
+ i__3 = kplus1 + j * a_dim1;
+ z__1.r = temp.r * a[i__3].r - temp.i * a[i__3].i,
+ z__1.i = temp.r * a[i__3].i + temp.i * a[
+ i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+/* Computing MAX */
+ i__4 = 1, i__1 = j - *k;
+ i__3 = max(i__4,i__1);
+ for (i__ = j - 1; i__ >= i__3; --i__) {
+ i__4 = l + i__ + j * a_dim1;
+ i__1 = i__;
+ z__2.r = a[i__4].r * x[i__1].r - a[i__4].i * x[
+ i__1].i, z__2.i = a[i__4].r * x[i__1].i +
+ a[i__4].i * x[i__1].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L90: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &a[kplus1 + j * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+/* Computing MAX */
+ i__4 = 1, i__1 = j - *k;
+ i__3 = max(i__4,i__1);
+ for (i__ = j - 1; i__ >= i__3; --i__) {
+ d_cnjg(&z__3, &a[l + i__ + j * a_dim1]);
+ i__4 = i__;
+ z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i,
+ z__2.i = z__3.r * x[i__4].i + z__3.i * x[
+ i__4].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L100: */
+ }
+ }
+ i__3 = j;
+ x[i__3].r = temp.r, x[i__3].i = temp.i;
+/* L110: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ i__3 = jx;
+ temp.r = x[i__3].r, temp.i = x[i__3].i;
+ kx -= *incx;
+ ix = kx;
+ l = kplus1 - j;
+ if (noconj) {
+ if (nounit) {
+ i__3 = kplus1 + j * a_dim1;
+ z__1.r = temp.r * a[i__3].r - temp.i * a[i__3].i,
+ z__1.i = temp.r * a[i__3].i + temp.i * a[
+ i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+/* Computing MAX */
+ i__4 = 1, i__1 = j - *k;
+ i__3 = max(i__4,i__1);
+ for (i__ = j - 1; i__ >= i__3; --i__) {
+ i__4 = l + i__ + j * a_dim1;
+ i__1 = ix;
+ z__2.r = a[i__4].r * x[i__1].r - a[i__4].i * x[
+ i__1].i, z__2.i = a[i__4].r * x[i__1].i +
+ a[i__4].i * x[i__1].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix -= *incx;
+/* L120: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &a[kplus1 + j * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+/* Computing MAX */
+ i__4 = 1, i__1 = j - *k;
+ i__3 = max(i__4,i__1);
+ for (i__ = j - 1; i__ >= i__3; --i__) {
+ d_cnjg(&z__3, &a[l + i__ + j * a_dim1]);
+ i__4 = ix;
+ z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i,
+ z__2.i = z__3.r * x[i__4].i + z__3.i * x[
+ i__4].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix -= *incx;
+/* L130: */
+ }
+ }
+ i__3 = jx;
+ x[i__3].r = temp.r, x[i__3].i = temp.i;
+ jx -= *incx;
+/* L140: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ i__3 = *n;
+ for (j = 1; j <= i__3; ++j) {
+ i__4 = j;
+ temp.r = x[i__4].r, temp.i = x[i__4].i;
+ l = 1 - j;
+ if (noconj) {
+ if (nounit) {
+ i__4 = j * a_dim1 + 1;
+ z__1.r = temp.r * a[i__4].r - temp.i * a[i__4].i,
+ z__1.i = temp.r * a[i__4].i + temp.i * a[
+ i__4].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+/* Computing MIN */
+ i__1 = *n, i__2 = j + *k;
+ i__4 = min(i__1,i__2);
+ for (i__ = j + 1; i__ <= i__4; ++i__) {
+ i__1 = l + i__ + j * a_dim1;
+ i__2 = i__;
+ z__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[
+ i__2].i, z__2.i = a[i__1].r * x[i__2].i +
+ a[i__1].i * x[i__2].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L150: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &a[j * a_dim1 + 1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+/* Computing MIN */
+ i__1 = *n, i__2 = j + *k;
+ i__4 = min(i__1,i__2);
+ for (i__ = j + 1; i__ <= i__4; ++i__) {
+ d_cnjg(&z__3, &a[l + i__ + j * a_dim1]);
+ i__1 = i__;
+ z__2.r = z__3.r * x[i__1].r - z__3.i * x[i__1].i,
+ z__2.i = z__3.r * x[i__1].i + z__3.i * x[
+ i__1].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L160: */
+ }
+ }
+ i__4 = j;
+ x[i__4].r = temp.r, x[i__4].i = temp.i;
+/* L170: */
+ }
+ } else {
+ jx = kx;
+ i__3 = *n;
+ for (j = 1; j <= i__3; ++j) {
+ i__4 = jx;
+ temp.r = x[i__4].r, temp.i = x[i__4].i;
+ kx += *incx;
+ ix = kx;
+ l = 1 - j;
+ if (noconj) {
+ if (nounit) {
+ i__4 = j * a_dim1 + 1;
+ z__1.r = temp.r * a[i__4].r - temp.i * a[i__4].i,
+ z__1.i = temp.r * a[i__4].i + temp.i * a[
+ i__4].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+/* Computing MIN */
+ i__1 = *n, i__2 = j + *k;
+ i__4 = min(i__1,i__2);
+ for (i__ = j + 1; i__ <= i__4; ++i__) {
+ i__1 = l + i__ + j * a_dim1;
+ i__2 = ix;
+ z__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[
+ i__2].i, z__2.i = a[i__1].r * x[i__2].i +
+ a[i__1].i * x[i__2].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix += *incx;
+/* L180: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &a[j * a_dim1 + 1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+/* Computing MIN */
+ i__1 = *n, i__2 = j + *k;
+ i__4 = min(i__1,i__2);
+ for (i__ = j + 1; i__ <= i__4; ++i__) {
+ d_cnjg(&z__3, &a[l + i__ + j * a_dim1]);
+ i__1 = ix;
+ z__2.r = z__3.r * x[i__1].r - z__3.i * x[i__1].i,
+ z__2.i = z__3.r * x[i__1].i + z__3.i * x[
+ i__1].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix += *incx;
+/* L190: */
+ }
+ }
+ i__4 = jx;
+ x[i__4].r = temp.r, x[i__4].i = temp.i;
+ jx += *incx;
+/* L200: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZTBMV . */
+
+} /* ztbmv_ */
diff --git a/contrib/libs/cblas/ztbsv.c b/contrib/libs/cblas/ztbsv.c
new file mode 100644
index 0000000000..c09ac40d41
--- /dev/null
+++ b/contrib/libs/cblas/ztbsv.c
@@ -0,0 +1,611 @@
+/* ztbsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ztbsv_(char *uplo, char *trans, char *diag, integer *n,
+ integer *k, doublecomplex *a, integer *lda, doublecomplex *x, integer
+ *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *), d_cnjg(
+ doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, l, ix, jx, kx, info;
+ doublecomplex temp;
+ extern logical lsame_(char *, char *);
+ integer kplus1;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical noconj, nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTBSV solves one of the systems of equations */
+
+/* A*x = b, or A'*x = b, or conjg( A' )*x = b, */
+
+/* where b and x are n element vectors and A is an n by n unit, or */
+/* non-unit, upper or lower triangular band matrix, with ( k + 1 ) */
+/* diagonals. */
+
+/* No test for singularity or near-singularity is included in this */
+/* routine. Such tests must be performed before calling this routine. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the equations to be solved as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' A*x = b. */
+
+/* TRANS = 'T' or 't' A'*x = b. */
+
+/* TRANS = 'C' or 'c' conjg( A' )*x = b. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* K - INTEGER. */
+/* On entry with UPLO = 'U' or 'u', K specifies the number of */
+/* super-diagonals of the matrix A. */
+/* On entry with UPLO = 'L' or 'l', K specifies the number of */
+/* sub-diagonals of the matrix A. */
+/* K must satisfy 0 .le. K. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX*16 array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) */
+/* by n part of the array A must contain the upper triangular */
+/* band part of the matrix of coefficients, supplied column by */
+/* column, with the leading diagonal of the matrix in row */
+/* ( k + 1 ) of the array, the first super-diagonal starting at */
+/* position 2 in row k, and so on. The top left k by k triangle */
+/* of the array A is not referenced. */
+/* The following program segment will transfer an upper */
+/* triangular band matrix from conventional full matrix storage */
+/* to band storage: */
+
+/* DO 20, J = 1, N */
+/* M = K + 1 - J */
+/* DO 10, I = MAX( 1, J - K ), J */
+/* A( M + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) */
+/* by n part of the array A must contain the lower triangular */
+/* band part of the matrix of coefficients, supplied column by */
+/* column, with the leading diagonal of the matrix in row 1 of */
+/* the array, the first sub-diagonal starting at position 1 in */
+/* row 2, and so on. The bottom right k by k triangle of the */
+/* array A is not referenced. */
+/* The following program segment will transfer a lower */
+/* triangular band matrix from conventional full matrix storage */
+/* to band storage: */
+
+/* DO 20, J = 1, N */
+/* M = 1 - J */
+/* DO 10, I = J, MIN( N, J + K ) */
+/* A( M + I, J ) = matrix( I, J ) */
+/* 10 CONTINUE */
+/* 20 CONTINUE */
+
+/* Note that when DIAG = 'U' or 'u' the elements of the array A */
+/* corresponding to the diagonal elements of the matrix are not */
+/* referenced, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* ( k + 1 ). */
+/* Unchanged on exit. */
+
+/* X - COMPLEX*16 array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element right-hand side vector b. On exit, X is overwritten */
+/* with the solution vector x. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*k < 0) {
+ info = 5;
+ } else if (*lda < *k + 1) {
+ info = 7;
+ } else if (*incx == 0) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("ZTBSV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ noconj = lsame_(trans, "T");
+ nounit = lsame_(diag, "N");
+
+/* Set up the start point in X if the increment is not unity. This */
+/* will be ( N - 1 )*INCX too small for descending loops. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed by sequentially with one pass through A. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := inv( A )*x. */
+
+ if (lsame_(uplo, "U")) {
+ kplus1 = *k + 1;
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ if (x[i__1].r != 0. || x[i__1].i != 0.) {
+ l = kplus1 - j;
+ if (nounit) {
+ i__1 = j;
+ z_div(&z__1, &x[j], &a[kplus1 + j * a_dim1]);
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+ }
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__1 = max(i__2,i__3);
+ for (i__ = j - 1; i__ >= i__1; --i__) {
+ i__2 = i__;
+ i__3 = i__;
+ i__4 = l + i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__4].r - temp.i * a[i__4].i,
+ z__2.i = temp.r * a[i__4].i + temp.i * a[
+ i__4].r;
+ z__1.r = x[i__3].r - z__2.r, z__1.i = x[i__3].i -
+ z__2.i;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ kx -= *incx;
+ i__1 = jx;
+ if (x[i__1].r != 0. || x[i__1].i != 0.) {
+ ix = kx;
+ l = kplus1 - j;
+ if (nounit) {
+ i__1 = jx;
+ z_div(&z__1, &x[jx], &a[kplus1 + j * a_dim1]);
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+ }
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__1 = max(i__2,i__3);
+ for (i__ = j - 1; i__ >= i__1; --i__) {
+ i__2 = ix;
+ i__3 = ix;
+ i__4 = l + i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__4].r - temp.i * a[i__4].i,
+ z__2.i = temp.r * a[i__4].i + temp.i * a[
+ i__4].r;
+ z__1.r = x[i__3].r - z__2.r, z__1.i = x[i__3].i -
+ z__2.i;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ ix -= *incx;
+/* L30: */
+ }
+ }
+ jx -= *incx;
+/* L40: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ l = 1 - j;
+ if (nounit) {
+ i__2 = j;
+ z_div(&z__1, &x[j], &a[j * a_dim1 + 1]);
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ }
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+/* Computing MIN */
+ i__3 = *n, i__4 = j + *k;
+ i__2 = min(i__3,i__4);
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = l + i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ z__1.r = x[i__4].r - z__2.r, z__1.i = x[i__4].i -
+ z__2.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ kx += *incx;
+ i__2 = jx;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ ix = kx;
+ l = 1 - j;
+ if (nounit) {
+ i__2 = jx;
+ z_div(&z__1, &x[jx], &a[j * a_dim1 + 1]);
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ }
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+/* Computing MIN */
+ i__3 = *n, i__4 = j + *k;
+ i__2 = min(i__3,i__4);
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = ix;
+ i__4 = ix;
+ i__5 = l + i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ z__1.r = x[i__4].r - z__2.r, z__1.i = x[i__4].i -
+ z__2.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ ix += *incx;
+/* L70: */
+ }
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := inv( A' )*x or x := inv( conjg( A') )*x. */
+
+ if (lsame_(uplo, "U")) {
+ kplus1 = *k + 1;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ l = kplus1 - j;
+ if (noconj) {
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__4 = j - 1;
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ i__2 = l + i__ + j * a_dim1;
+ i__3 = i__;
+ z__2.r = a[i__2].r * x[i__3].r - a[i__2].i * x[
+ i__3].i, z__2.i = a[i__2].r * x[i__3].i +
+ a[i__2].i * x[i__3].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L90: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &a[kplus1 + j * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *k;
+ i__3 = j - 1;
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ d_cnjg(&z__3, &a[l + i__ + j * a_dim1]);
+ i__4 = i__;
+ z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i,
+ z__2.i = z__3.r * x[i__4].i + z__3.i * x[
+ i__4].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L100: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &a[kplus1 + j * a_dim1]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__3 = j;
+ x[i__3].r = temp.r, x[i__3].i = temp.i;
+/* L110: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__3 = jx;
+ temp.r = x[i__3].r, temp.i = x[i__3].i;
+ ix = kx;
+ l = kplus1 - j;
+ if (noconj) {
+/* Computing MAX */
+ i__3 = 1, i__4 = j - *k;
+ i__2 = j - 1;
+ for (i__ = max(i__3,i__4); i__ <= i__2; ++i__) {
+ i__3 = l + i__ + j * a_dim1;
+ i__4 = ix;
+ z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
+ i__4].i, z__2.i = a[i__3].r * x[i__4].i +
+ a[i__3].i * x[i__4].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix += *incx;
+/* L120: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &a[kplus1 + j * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__4 = j - 1;
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ d_cnjg(&z__3, &a[l + i__ + j * a_dim1]);
+ i__2 = ix;
+ z__2.r = z__3.r * x[i__2].r - z__3.i * x[i__2].i,
+ z__2.i = z__3.r * x[i__2].i + z__3.i * x[
+ i__2].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix += *incx;
+/* L130: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &a[kplus1 + j * a_dim1]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__4 = jx;
+ x[i__4].r = temp.r, x[i__4].i = temp.i;
+ jx += *incx;
+ if (j > *k) {
+ kx += *incx;
+ }
+/* L140: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ l = 1 - j;
+ if (noconj) {
+/* Computing MIN */
+ i__1 = *n, i__4 = j + *k;
+ i__2 = j + 1;
+ for (i__ = min(i__1,i__4); i__ >= i__2; --i__) {
+ i__1 = l + i__ + j * a_dim1;
+ i__4 = i__;
+ z__2.r = a[i__1].r * x[i__4].r - a[i__1].i * x[
+ i__4].i, z__2.i = a[i__1].r * x[i__4].i +
+ a[i__1].i * x[i__4].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L150: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &a[j * a_dim1 + 1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+/* Computing MIN */
+ i__2 = *n, i__1 = j + *k;
+ i__4 = j + 1;
+ for (i__ = min(i__2,i__1); i__ >= i__4; --i__) {
+ d_cnjg(&z__3, &a[l + i__ + j * a_dim1]);
+ i__2 = i__;
+ z__2.r = z__3.r * x[i__2].r - z__3.i * x[i__2].i,
+ z__2.i = z__3.r * x[i__2].i + z__3.i * x[
+ i__2].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L160: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &a[j * a_dim1 + 1]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__4 = j;
+ x[i__4].r = temp.r, x[i__4].i = temp.i;
+/* L170: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ i__4 = jx;
+ temp.r = x[i__4].r, temp.i = x[i__4].i;
+ ix = kx;
+ l = 1 - j;
+ if (noconj) {
+/* Computing MIN */
+ i__4 = *n, i__2 = j + *k;
+ i__1 = j + 1;
+ for (i__ = min(i__4,i__2); i__ >= i__1; --i__) {
+ i__4 = l + i__ + j * a_dim1;
+ i__2 = ix;
+ z__2.r = a[i__4].r * x[i__2].r - a[i__4].i * x[
+ i__2].i, z__2.i = a[i__4].r * x[i__2].i +
+ a[i__4].i * x[i__2].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix -= *incx;
+/* L180: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &a[j * a_dim1 + 1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+/* Computing MIN */
+ i__1 = *n, i__4 = j + *k;
+ i__2 = j + 1;
+ for (i__ = min(i__1,i__4); i__ >= i__2; --i__) {
+ d_cnjg(&z__3, &a[l + i__ + j * a_dim1]);
+ i__1 = ix;
+ z__2.r = z__3.r * x[i__1].r - z__3.i * x[i__1].i,
+ z__2.i = z__3.r * x[i__1].i + z__3.i * x[
+ i__1].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix -= *incx;
+/* L190: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &a[j * a_dim1 + 1]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__2 = jx;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+ jx -= *incx;
+ if (*n - j >= *k) {
+ kx -= *incx;
+ }
+/* L200: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZTBSV . */
+
+} /* ztbsv_ */
diff --git a/contrib/libs/cblas/ztpmv.c b/contrib/libs/cblas/ztpmv.c
new file mode 100644
index 0000000000..3983febd60
--- /dev/null
+++ b/contrib/libs/cblas/ztpmv.c
@@ -0,0 +1,571 @@
+/* ztpmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ztpmv_(char *uplo, char *trans, char *diag, integer *n,
+ doublecomplex *ap, doublecomplex *x, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k, kk, ix, jx, kx, info;
+ doublecomplex temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical noconj, nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTPMV performs one of the matrix-vector operations */
+
+/* x := A*x, or x := A'*x, or x := conjg( A' )*x, */
+
+/* where x is an n element vector and A is an n by n unit, or non-unit, */
+/* upper or lower triangular matrix, supplied in packed form. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' x := A*x. */
+
+/* TRANS = 'T' or 't' x := A'*x. */
+
+/* TRANS = 'C' or 'c' x := conjg( A' )*x. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* AP - COMPLEX*16 array of DIMENSION at least */
+/* ( ( n*( n + 1 ) )/2 ). */
+/* Before entry with UPLO = 'U' or 'u', the array AP must */
+/* contain the upper triangular matrix packed sequentially, */
+/* column by column, so that AP( 1 ) contains a( 1, 1 ), */
+/* AP( 2 ) and AP( 3 ) contain a( 1, 2 ) and a( 2, 2 ) */
+/* respectively, and so on. */
+/* Before entry with UPLO = 'L' or 'l', the array AP must */
+/* contain the lower triangular matrix packed sequentially, */
+/* column by column, so that AP( 1 ) contains a( 1, 1 ), */
+/* AP( 2 ) and AP( 3 ) contain a( 2, 1 ) and a( 3, 1 ) */
+/* respectively, and so on. */
+/* Note that when DIAG = 'U' or 'u', the diagonal elements of */
+/* A are not referenced, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* X - COMPLEX*16 array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. On exit, X is overwritten with the */
+/* tranformed vector x. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --x;
+ --ap;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*incx == 0) {
+ info = 7;
+ }
+ if (info != 0) {
+ xerbla_("ZTPMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ noconj = lsame_(trans, "T");
+ nounit = lsame_(diag, "N");
+
+/* Set up the start point in X if the increment is not unity. This */
+/* will be ( N - 1 )*INCX too small for descending loops. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of AP are */
+/* accessed sequentially with one pass through AP. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form x:= A*x. */
+
+ if (lsame_(uplo, "U")) {
+ kk = 1;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ k = kk;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = k;
+ z__2.r = temp.r * ap[i__5].r - temp.i * ap[i__5]
+ .i, z__2.i = temp.r * ap[i__5].i + temp.i
+ * ap[i__5].r;
+ z__1.r = x[i__4].r + z__2.r, z__1.i = x[i__4].i +
+ z__2.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ ++k;
+/* L10: */
+ }
+ if (nounit) {
+ i__2 = j;
+ i__3 = j;
+ i__4 = kk + j - 1;
+ z__1.r = x[i__3].r * ap[i__4].r - x[i__3].i * ap[
+ i__4].i, z__1.i = x[i__3].r * ap[i__4].i
+ + x[i__3].i * ap[i__4].r;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ }
+ }
+ kk += j;
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ ix = kx;
+ i__2 = kk + j - 2;
+ for (k = kk; k <= i__2; ++k) {
+ i__3 = ix;
+ i__4 = ix;
+ i__5 = k;
+ z__2.r = temp.r * ap[i__5].r - temp.i * ap[i__5]
+ .i, z__2.i = temp.r * ap[i__5].i + temp.i
+ * ap[i__5].r;
+ z__1.r = x[i__4].r + z__2.r, z__1.i = x[i__4].i +
+ z__2.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ if (nounit) {
+ i__2 = jx;
+ i__3 = jx;
+ i__4 = kk + j - 1;
+ z__1.r = x[i__3].r * ap[i__4].r - x[i__3].i * ap[
+ i__4].i, z__1.i = x[i__3].r * ap[i__4].i
+ + x[i__3].i * ap[i__4].r;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ }
+ }
+ jx += *incx;
+ kk += j;
+/* L40: */
+ }
+ }
+ } else {
+ kk = *n * (*n + 1) / 2;
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ if (x[i__1].r != 0. || x[i__1].i != 0.) {
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ k = kk;
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ i__2 = i__;
+ i__3 = i__;
+ i__4 = k;
+ z__2.r = temp.r * ap[i__4].r - temp.i * ap[i__4]
+ .i, z__2.i = temp.r * ap[i__4].i + temp.i
+ * ap[i__4].r;
+ z__1.r = x[i__3].r + z__2.r, z__1.i = x[i__3].i +
+ z__2.i;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ --k;
+/* L50: */
+ }
+ if (nounit) {
+ i__1 = j;
+ i__2 = j;
+ i__3 = kk - *n + j;
+ z__1.r = x[i__2].r * ap[i__3].r - x[i__2].i * ap[
+ i__3].i, z__1.i = x[i__2].r * ap[i__3].i
+ + x[i__2].i * ap[i__3].r;
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+ }
+ }
+ kk -= *n - j + 1;
+/* L60: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ i__1 = jx;
+ if (x[i__1].r != 0. || x[i__1].i != 0.) {
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ ix = kx;
+ i__1 = kk - (*n - (j + 1));
+ for (k = kk; k >= i__1; --k) {
+ i__2 = ix;
+ i__3 = ix;
+ i__4 = k;
+ z__2.r = temp.r * ap[i__4].r - temp.i * ap[i__4]
+ .i, z__2.i = temp.r * ap[i__4].i + temp.i
+ * ap[i__4].r;
+ z__1.r = x[i__3].r + z__2.r, z__1.i = x[i__3].i +
+ z__2.i;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ ix -= *incx;
+/* L70: */
+ }
+ if (nounit) {
+ i__1 = jx;
+ i__2 = jx;
+ i__3 = kk - *n + j;
+ z__1.r = x[i__2].r * ap[i__3].r - x[i__2].i * ap[
+ i__3].i, z__1.i = x[i__2].r * ap[i__3].i
+ + x[i__2].i * ap[i__3].r;
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+ }
+ }
+ jx -= *incx;
+ kk -= *n - j + 1;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := A'*x or x := conjg( A' )*x. */
+
+ if (lsame_(uplo, "U")) {
+ kk = *n * (*n + 1) / 2;
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ k = kk - 1;
+ if (noconj) {
+ if (nounit) {
+ i__1 = kk;
+ z__1.r = temp.r * ap[i__1].r - temp.i * ap[i__1]
+ .i, z__1.i = temp.r * ap[i__1].i + temp.i
+ * ap[i__1].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ i__1 = k;
+ i__2 = i__;
+ z__2.r = ap[i__1].r * x[i__2].r - ap[i__1].i * x[
+ i__2].i, z__2.i = ap[i__1].r * x[i__2].i
+ + ap[i__1].i * x[i__2].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ --k;
+/* L90: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &ap[kk]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ d_cnjg(&z__3, &ap[k]);
+ i__1 = i__;
+ z__2.r = z__3.r * x[i__1].r - z__3.i * x[i__1].i,
+ z__2.i = z__3.r * x[i__1].i + z__3.i * x[
+ i__1].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ --k;
+/* L100: */
+ }
+ }
+ i__1 = j;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+ kk -= j;
+/* L110: */
+ }
+ } else {
+ jx = kx + (*n - 1) * *incx;
+ for (j = *n; j >= 1; --j) {
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ ix = jx;
+ if (noconj) {
+ if (nounit) {
+ i__1 = kk;
+ z__1.r = temp.r * ap[i__1].r - temp.i * ap[i__1]
+ .i, z__1.i = temp.r * ap[i__1].i + temp.i
+ * ap[i__1].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__1 = kk - j + 1;
+ for (k = kk - 1; k >= i__1; --k) {
+ ix -= *incx;
+ i__2 = k;
+ i__3 = ix;
+ z__2.r = ap[i__2].r * x[i__3].r - ap[i__2].i * x[
+ i__3].i, z__2.i = ap[i__2].r * x[i__3].i
+ + ap[i__2].i * x[i__3].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L120: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &ap[kk]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__1 = kk - j + 1;
+ for (k = kk - 1; k >= i__1; --k) {
+ ix -= *incx;
+ d_cnjg(&z__3, &ap[k]);
+ i__2 = ix;
+ z__2.r = z__3.r * x[i__2].r - z__3.i * x[i__2].i,
+ z__2.i = z__3.r * x[i__2].i + z__3.i * x[
+ i__2].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L130: */
+ }
+ }
+ i__1 = jx;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+ jx -= *incx;
+ kk -= j;
+/* L140: */
+ }
+ }
+ } else {
+ kk = 1;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ k = kk + 1;
+ if (noconj) {
+ if (nounit) {
+ i__2 = kk;
+ z__1.r = temp.r * ap[i__2].r - temp.i * ap[i__2]
+ .i, z__1.i = temp.r * ap[i__2].i + temp.i
+ * ap[i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = k;
+ i__4 = i__;
+ z__2.r = ap[i__3].r * x[i__4].r - ap[i__3].i * x[
+ i__4].i, z__2.i = ap[i__3].r * x[i__4].i
+ + ap[i__3].i * x[i__4].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ++k;
+/* L150: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &ap[kk]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ d_cnjg(&z__3, &ap[k]);
+ i__3 = i__;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
+ z__2.i = z__3.r * x[i__3].i + z__3.i * x[
+ i__3].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ++k;
+/* L160: */
+ }
+ }
+ i__2 = j;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+ kk += *n - j + 1;
+/* L170: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ ix = jx;
+ if (noconj) {
+ if (nounit) {
+ i__2 = kk;
+ z__1.r = temp.r * ap[i__2].r - temp.i * ap[i__2]
+ .i, z__1.i = temp.r * ap[i__2].i + temp.i
+ * ap[i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = kk + *n - j;
+ for (k = kk + 1; k <= i__2; ++k) {
+ ix += *incx;
+ i__3 = k;
+ i__4 = ix;
+ z__2.r = ap[i__3].r * x[i__4].r - ap[i__3].i * x[
+ i__4].i, z__2.i = ap[i__3].r * x[i__4].i
+ + ap[i__3].i * x[i__4].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L180: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &ap[kk]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = kk + *n - j;
+ for (k = kk + 1; k <= i__2; ++k) {
+ ix += *incx;
+ d_cnjg(&z__3, &ap[k]);
+ i__3 = ix;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
+ z__2.i = z__3.r * x[i__3].i + z__3.i * x[
+ i__3].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L190: */
+ }
+ }
+ i__2 = jx;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+ jx += *incx;
+ kk += *n - j + 1;
+/* L200: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZTPMV . */
+
+} /* ztpmv_ */
diff --git a/contrib/libs/cblas/ztpsv.c b/contrib/libs/cblas/ztpsv.c
new file mode 100644
index 0000000000..9e5b5850e0
--- /dev/null
+++ b/contrib/libs/cblas/ztpsv.c
@@ -0,0 +1,540 @@
+/* ztpsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ztpsv_(char *uplo, char *trans, char *diag, integer *n,
+ doublecomplex *ap, doublecomplex *x, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *), d_cnjg(
+ doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k, kk, ix, jx, kx, info;
+ doublecomplex temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical noconj, nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTPSV solves one of the systems of equations */
+
+/* A*x = b, or A'*x = b, or conjg( A' )*x = b, */
+
+/* where b and x are n element vectors and A is an n by n unit, or */
+/* non-unit, upper or lower triangular matrix, supplied in packed form. */
+
+/* No test for singularity or near-singularity is included in this */
+/* routine. Such tests must be performed before calling this routine. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the equations to be solved as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' A*x = b. */
+
+/* TRANS = 'T' or 't' A'*x = b. */
+
+/* TRANS = 'C' or 'c' conjg( A' )*x = b. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* AP - COMPLEX*16 array of DIMENSION at least */
+/* ( ( n*( n + 1 ) )/2 ). */
+/* Before entry with UPLO = 'U' or 'u', the array AP must */
+/* contain the upper triangular matrix packed sequentially, */
+/* column by column, so that AP( 1 ) contains a( 1, 1 ), */
+/* AP( 2 ) and AP( 3 ) contain a( 1, 2 ) and a( 2, 2 ) */
+/* respectively, and so on. */
+/* Before entry with UPLO = 'L' or 'l', the array AP must */
+/* contain the lower triangular matrix packed sequentially, */
+/* column by column, so that AP( 1 ) contains a( 1, 1 ), */
+/* AP( 2 ) and AP( 3 ) contain a( 2, 1 ) and a( 3, 1 ) */
+/* respectively, and so on. */
+/* Note that when DIAG = 'U' or 'u', the diagonal elements of */
+/* A are not referenced, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* X - COMPLEX*16 array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element right-hand side vector b. On exit, X is overwritten */
+/* with the solution vector x. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --x;
+ --ap;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*incx == 0) {
+ info = 7;
+ }
+ if (info != 0) {
+ xerbla_("ZTPSV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ noconj = lsame_(trans, "T");
+ nounit = lsame_(diag, "N");
+
+/* Set up the start point in X if the increment is not unity. This */
+/* will be ( N - 1 )*INCX too small for descending loops. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of AP are */
+/* accessed sequentially with one pass through AP. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := inv( A )*x. */
+
+ if (lsame_(uplo, "U")) {
+ kk = *n * (*n + 1) / 2;
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ if (x[i__1].r != 0. || x[i__1].i != 0.) {
+ if (nounit) {
+ i__1 = j;
+ z_div(&z__1, &x[j], &ap[kk]);
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+ }
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ k = kk - 1;
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ i__1 = i__;
+ i__2 = i__;
+ i__3 = k;
+ z__2.r = temp.r * ap[i__3].r - temp.i * ap[i__3]
+ .i, z__2.i = temp.r * ap[i__3].i + temp.i
+ * ap[i__3].r;
+ z__1.r = x[i__2].r - z__2.r, z__1.i = x[i__2].i -
+ z__2.i;
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+ --k;
+/* L10: */
+ }
+ }
+ kk -= j;
+/* L20: */
+ }
+ } else {
+ jx = kx + (*n - 1) * *incx;
+ for (j = *n; j >= 1; --j) {
+ i__1 = jx;
+ if (x[i__1].r != 0. || x[i__1].i != 0.) {
+ if (nounit) {
+ i__1 = jx;
+ z_div(&z__1, &x[jx], &ap[kk]);
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+ }
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ ix = jx;
+ i__1 = kk - j + 1;
+ for (k = kk - 1; k >= i__1; --k) {
+ ix -= *incx;
+ i__2 = ix;
+ i__3 = ix;
+ i__4 = k;
+ z__2.r = temp.r * ap[i__4].r - temp.i * ap[i__4]
+ .i, z__2.i = temp.r * ap[i__4].i + temp.i
+ * ap[i__4].r;
+ z__1.r = x[i__3].r - z__2.r, z__1.i = x[i__3].i -
+ z__2.i;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+/* L30: */
+ }
+ }
+ jx -= *incx;
+ kk -= j;
+/* L40: */
+ }
+ }
+ } else {
+ kk = 1;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ if (nounit) {
+ i__2 = j;
+ z_div(&z__1, &x[j], &ap[kk]);
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ }
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ k = kk + 1;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = k;
+ z__2.r = temp.r * ap[i__5].r - temp.i * ap[i__5]
+ .i, z__2.i = temp.r * ap[i__5].i + temp.i
+ * ap[i__5].r;
+ z__1.r = x[i__4].r - z__2.r, z__1.i = x[i__4].i -
+ z__2.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ ++k;
+/* L50: */
+ }
+ }
+ kk += *n - j + 1;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ if (nounit) {
+ i__2 = jx;
+ z_div(&z__1, &x[jx], &ap[kk]);
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ }
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ ix = jx;
+ i__2 = kk + *n - j;
+ for (k = kk + 1; k <= i__2; ++k) {
+ ix += *incx;
+ i__3 = ix;
+ i__4 = ix;
+ i__5 = k;
+ z__2.r = temp.r * ap[i__5].r - temp.i * ap[i__5]
+ .i, z__2.i = temp.r * ap[i__5].i + temp.i
+ * ap[i__5].r;
+ z__1.r = x[i__4].r - z__2.r, z__1.i = x[i__4].i -
+ z__2.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+/* L70: */
+ }
+ }
+ jx += *incx;
+ kk += *n - j + 1;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := inv( A' )*x or x := inv( conjg( A' ) )*x. */
+
+ if (lsame_(uplo, "U")) {
+ kk = 1;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ k = kk;
+ if (noconj) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = k;
+ i__4 = i__;
+ z__2.r = ap[i__3].r * x[i__4].r - ap[i__3].i * x[
+ i__4].i, z__2.i = ap[i__3].r * x[i__4].i
+ + ap[i__3].i * x[i__4].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ++k;
+/* L90: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &ap[kk + j - 1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ d_cnjg(&z__3, &ap[k]);
+ i__3 = i__;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
+ z__2.i = z__3.r * x[i__3].i + z__3.i * x[
+ i__3].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ++k;
+/* L100: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &ap[kk + j - 1]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__2 = j;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+ kk += j;
+/* L110: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ ix = kx;
+ if (noconj) {
+ i__2 = kk + j - 2;
+ for (k = kk; k <= i__2; ++k) {
+ i__3 = k;
+ i__4 = ix;
+ z__2.r = ap[i__3].r * x[i__4].r - ap[i__3].i * x[
+ i__4].i, z__2.i = ap[i__3].r * x[i__4].i
+ + ap[i__3].i * x[i__4].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix += *incx;
+/* L120: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &ap[kk + j - 1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+ i__2 = kk + j - 2;
+ for (k = kk; k <= i__2; ++k) {
+ d_cnjg(&z__3, &ap[k]);
+ i__3 = ix;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
+ z__2.i = z__3.r * x[i__3].i + z__3.i * x[
+ i__3].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix += *incx;
+/* L130: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &ap[kk + j - 1]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__2 = jx;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+ jx += *incx;
+ kk += j;
+/* L140: */
+ }
+ }
+ } else {
+ kk = *n * (*n + 1) / 2;
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ k = kk;
+ if (noconj) {
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ i__2 = k;
+ i__3 = i__;
+ z__2.r = ap[i__2].r * x[i__3].r - ap[i__2].i * x[
+ i__3].i, z__2.i = ap[i__2].r * x[i__3].i
+ + ap[i__2].i * x[i__3].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ --k;
+/* L150: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &ap[kk - *n + j]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ d_cnjg(&z__3, &ap[k]);
+ i__2 = i__;
+ z__2.r = z__3.r * x[i__2].r - z__3.i * x[i__2].i,
+ z__2.i = z__3.r * x[i__2].i + z__3.i * x[
+ i__2].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ --k;
+/* L160: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &ap[kk - *n + j]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__1 = j;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+ kk -= *n - j + 1;
+/* L170: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ ix = kx;
+ if (noconj) {
+ i__1 = kk - (*n - (j + 1));
+ for (k = kk; k >= i__1; --k) {
+ i__2 = k;
+ i__3 = ix;
+ z__2.r = ap[i__2].r * x[i__3].r - ap[i__2].i * x[
+ i__3].i, z__2.i = ap[i__2].r * x[i__3].i
+ + ap[i__2].i * x[i__3].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix -= *incx;
+/* L180: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &ap[kk - *n + j]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+ i__1 = kk - (*n - (j + 1));
+ for (k = kk; k >= i__1; --k) {
+ d_cnjg(&z__3, &ap[k]);
+ i__2 = ix;
+ z__2.r = z__3.r * x[i__2].r - z__3.i * x[i__2].i,
+ z__2.i = z__3.r * x[i__2].i + z__3.i * x[
+ i__2].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix -= *incx;
+/* L190: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &ap[kk - *n + j]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__1 = jx;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+ jx -= *incx;
+ kk -= *n - j + 1;
+/* L200: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZTPSV . */
+
+} /* ztpsv_ */
diff --git a/contrib/libs/cblas/ztrmm.c b/contrib/libs/cblas/ztrmm.c
new file mode 100644
index 0000000000..c2af1d560f
--- /dev/null
+++ b/contrib/libs/cblas/ztrmm.c
@@ -0,0 +1,688 @@
+/* ztrmm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ztrmm_(char *side, char *uplo, char *transa, char *diag,
+ integer *m, integer *n, doublecomplex *alpha, doublecomplex *a,
+ integer *lda, doublecomplex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k, info;
+ doublecomplex temp;
+ logical lside;
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical noconj, nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTRMM performs one of the matrix-matrix operations */
+
+/* B := alpha*op( A )*B, or B := alpha*B*op( A ) */
+
+/* where alpha is a scalar, B is an m by n matrix, A is a unit, or */
+/* non-unit, upper or lower triangular matrix and op( A ) is one of */
+
+/* op( A ) = A or op( A ) = A' or op( A ) = conjg( A' ). */
+
+/* Arguments */
+/* ========== */
+
+/* SIDE - CHARACTER*1. */
+/* On entry, SIDE specifies whether op( A ) multiplies B from */
+/* the left or right as follows: */
+
+/* SIDE = 'L' or 'l' B := alpha*op( A )*B. */
+
+/* SIDE = 'R' or 'r' B := alpha*B*op( A ). */
+
+/* Unchanged on exit. */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix A is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANSA - CHARACTER*1. */
+/* On entry, TRANSA specifies the form of op( A ) to be used in */
+/* the matrix multiplication as follows: */
+
+/* TRANSA = 'N' or 'n' op( A ) = A. */
+
+/* TRANSA = 'T' or 't' op( A ) = A'. */
+
+/* TRANSA = 'C' or 'c' op( A ) = conjg( A' ). */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit triangular */
+/* as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of B. M must be at */
+/* least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of B. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX*16 . */
+/* On entry, ALPHA specifies the scalar alpha. When alpha is */
+/* zero then A is not referenced and B need not be set before */
+/* entry. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX*16 array of DIMENSION ( LDA, k ), where k is m */
+/* when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'. */
+/* Before entry with UPLO = 'U' or 'u', the leading k by k */
+/* upper triangular part of the array A must contain the upper */
+/* triangular matrix and the strictly lower triangular part of */
+/* A is not referenced. */
+/* Before entry with UPLO = 'L' or 'l', the leading k by k */
+/* lower triangular part of the array A must contain the lower */
+/* triangular matrix and the strictly upper triangular part of */
+/* A is not referenced. */
+/* Note that when DIAG = 'U' or 'u', the diagonal elements of */
+/* A are not referenced either, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When SIDE = 'L' or 'l' then */
+/* LDA must be at least max( 1, m ), when SIDE = 'R' or 'r' */
+/* then LDA must be at least max( 1, n ). */
+/* Unchanged on exit. */
+
+/* B - COMPLEX*16 array of DIMENSION ( LDB, n ). */
+/* Before entry, the leading m by n part of the array B must */
+/* contain the matrix B, and on exit is overwritten by the */
+/* transformed matrix. */
+
+/* LDB - INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. LDB must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ lside = lsame_(side, "L");
+ if (lside) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ noconj = lsame_(transa, "T");
+ nounit = lsame_(diag, "N");
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! lside && ! lsame_(side, "R")) {
+ info = 1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ info = 2;
+ } else if (! lsame_(transa, "N") && ! lsame_(transa,
+ "T") && ! lsame_(transa, "C")) {
+ info = 3;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 4;
+ } else if (*m < 0) {
+ info = 5;
+ } else if (*n < 0) {
+ info = 6;
+ } else if (*lda < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldb < max(1,*m)) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("ZTRMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (alpha->r == 0. && alpha->i == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0., b[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lside) {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*A*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * b_dim1;
+ if (b[i__3].r != 0. || b[i__3].i != 0.) {
+ i__3 = k + j * b_dim1;
+ z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3]
+ .i, z__1.i = alpha->r * b[i__3].i +
+ alpha->i * b[i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = i__ + k * a_dim1;
+ z__2.r = temp.r * a[i__6].r - temp.i * a[i__6]
+ .i, z__2.i = temp.r * a[i__6].i +
+ temp.i * a[i__6].r;
+ z__1.r = b[i__5].r + z__2.r, z__1.i = b[i__5]
+ .i + z__2.i;
+ b[i__4].r = z__1.r, b[i__4].i = z__1.i;
+/* L30: */
+ }
+ if (nounit) {
+ i__3 = k + k * a_dim1;
+ z__1.r = temp.r * a[i__3].r - temp.i * a[i__3]
+ .i, z__1.i = temp.r * a[i__3].i +
+ temp.i * a[i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__3 = k + j * b_dim1;
+ b[i__3].r = temp.r, b[i__3].i = temp.i;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (k = *m; k >= 1; --k) {
+ i__2 = k + j * b_dim1;
+ if (b[i__2].r != 0. || b[i__2].i != 0.) {
+ i__2 = k + j * b_dim1;
+ z__1.r = alpha->r * b[i__2].r - alpha->i * b[i__2]
+ .i, z__1.i = alpha->r * b[i__2].i +
+ alpha->i * b[i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = k + j * b_dim1;
+ b[i__2].r = temp.r, b[i__2].i = temp.i;
+ if (nounit) {
+ i__2 = k + j * b_dim1;
+ i__3 = k + j * b_dim1;
+ i__4 = k + k * a_dim1;
+ z__1.r = b[i__3].r * a[i__4].r - b[i__3].i *
+ a[i__4].i, z__1.i = b[i__3].r * a[
+ i__4].i + b[i__3].i * a[i__4].r;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ }
+ i__2 = *m;
+ for (i__ = k + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + k * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5]
+ .i, z__2.i = temp.r * a[i__5].i +
+ temp.i * a[i__5].r;
+ z__1.r = b[i__4].r + z__2.r, z__1.i = b[i__4]
+ .i + z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L60: */
+ }
+ }
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*A'*B or B := alpha*conjg( A' )*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ i__2 = i__ + j * b_dim1;
+ temp.r = b[i__2].r, temp.i = b[i__2].i;
+ if (noconj) {
+ if (nounit) {
+ i__2 = i__ + i__ * a_dim1;
+ z__1.r = temp.r * a[i__2].r - temp.i * a[i__2]
+ .i, z__1.i = temp.r * a[i__2].i +
+ temp.i * a[i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = i__ - 1;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + i__ * a_dim1;
+ i__4 = k + j * b_dim1;
+ z__2.r = a[i__3].r * b[i__4].r - a[i__3].i *
+ b[i__4].i, z__2.i = a[i__3].r * b[
+ i__4].i + a[i__3].i * b[i__4].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L90: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &a[i__ + i__ * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = i__ - 1;
+ for (k = 1; k <= i__2; ++k) {
+ d_cnjg(&z__3, &a[k + i__ * a_dim1]);
+ i__3 = k + j * b_dim1;
+ z__2.r = z__3.r * b[i__3].r - z__3.i * b[i__3]
+ .i, z__2.i = z__3.r * b[i__3].i +
+ z__3.i * b[i__3].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L100: */
+ }
+ }
+ i__2 = i__ + j * b_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L110: */
+ }
+/* L120: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ temp.r = b[i__3].r, temp.i = b[i__3].i;
+ if (noconj) {
+ if (nounit) {
+ i__3 = i__ + i__ * a_dim1;
+ z__1.r = temp.r * a[i__3].r - temp.i * a[i__3]
+ .i, z__1.i = temp.r * a[i__3].i +
+ temp.i * a[i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__3 = *m;
+ for (k = i__ + 1; k <= i__3; ++k) {
+ i__4 = k + i__ * a_dim1;
+ i__5 = k + j * b_dim1;
+ z__2.r = a[i__4].r * b[i__5].r - a[i__4].i *
+ b[i__5].i, z__2.i = a[i__4].r * b[
+ i__5].i + a[i__4].i * b[i__5].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L130: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &a[i__ + i__ * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__3 = *m;
+ for (k = i__ + 1; k <= i__3; ++k) {
+ d_cnjg(&z__3, &a[k + i__ * a_dim1]);
+ i__4 = k + j * b_dim1;
+ z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4]
+ .i, z__2.i = z__3.r * b[i__4].i +
+ z__3.i * b[i__4].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L140: */
+ }
+ }
+ i__3 = i__ + j * b_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L150: */
+ }
+/* L160: */
+ }
+ }
+ }
+ } else {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*B*A. */
+
+ if (upper) {
+ for (j = *n; j >= 1; --j) {
+ temp.r = alpha->r, temp.i = alpha->i;
+ if (nounit) {
+ i__1 = j + j * a_dim1;
+ z__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i,
+ z__1.i = temp.r * a[i__1].i + temp.i * a[i__1]
+ .r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ z__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i,
+ z__1.i = temp.r * b[i__3].i + temp.i * b[i__3]
+ .r;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L170: */
+ }
+ i__1 = j - 1;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k + j * a_dim1;
+ if (a[i__2].r != 0. || a[i__2].i != 0.) {
+ i__2 = k + j * a_dim1;
+ z__1.r = alpha->r * a[i__2].r - alpha->i * a[i__2]
+ .i, z__1.i = alpha->r * a[i__2].i +
+ alpha->i * a[i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + k * b_dim1;
+ z__2.r = temp.r * b[i__5].r - temp.i * b[i__5]
+ .i, z__2.i = temp.r * b[i__5].i +
+ temp.i * b[i__5].r;
+ z__1.r = b[i__4].r + z__2.r, z__1.i = b[i__4]
+ .i + z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L180: */
+ }
+ }
+/* L190: */
+ }
+/* L200: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp.r = alpha->r, temp.i = alpha->i;
+ if (nounit) {
+ i__2 = j + j * a_dim1;
+ z__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
+ z__1.i = temp.r * a[i__2].i + temp.i * a[i__2]
+ .r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ z__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i,
+ z__1.i = temp.r * b[i__4].i + temp.i * b[i__4]
+ .r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L210: */
+ }
+ i__2 = *n;
+ for (k = j + 1; k <= i__2; ++k) {
+ i__3 = k + j * a_dim1;
+ if (a[i__3].r != 0. || a[i__3].i != 0.) {
+ i__3 = k + j * a_dim1;
+ z__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3]
+ .i, z__1.i = alpha->r * a[i__3].i +
+ alpha->i * a[i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = i__ + k * b_dim1;
+ z__2.r = temp.r * b[i__6].r - temp.i * b[i__6]
+ .i, z__2.i = temp.r * b[i__6].i +
+ temp.i * b[i__6].r;
+ z__1.r = b[i__5].r + z__2.r, z__1.i = b[i__5]
+ .i + z__2.i;
+ b[i__4].r = z__1.r, b[i__4].i = z__1.i;
+/* L220: */
+ }
+ }
+/* L230: */
+ }
+/* L240: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*B*A' or B := alpha*B*conjg( A' ). */
+
+ if (upper) {
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k - 1;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = j + k * a_dim1;
+ if (a[i__3].r != 0. || a[i__3].i != 0.) {
+ if (noconj) {
+ i__3 = j + k * a_dim1;
+ z__1.r = alpha->r * a[i__3].r - alpha->i * a[
+ i__3].i, z__1.i = alpha->r * a[i__3]
+ .i + alpha->i * a[i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ } else {
+ d_cnjg(&z__2, &a[j + k * a_dim1]);
+ z__1.r = alpha->r * z__2.r - alpha->i *
+ z__2.i, z__1.i = alpha->r * z__2.i +
+ alpha->i * z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = i__ + k * b_dim1;
+ z__2.r = temp.r * b[i__6].r - temp.i * b[i__6]
+ .i, z__2.i = temp.r * b[i__6].i +
+ temp.i * b[i__6].r;
+ z__1.r = b[i__5].r + z__2.r, z__1.i = b[i__5]
+ .i + z__2.i;
+ b[i__4].r = z__1.r, b[i__4].i = z__1.i;
+/* L250: */
+ }
+ }
+/* L260: */
+ }
+ temp.r = alpha->r, temp.i = alpha->i;
+ if (nounit) {
+ if (noconj) {
+ i__2 = k + k * a_dim1;
+ z__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
+ z__1.i = temp.r * a[i__2].i + temp.i * a[
+ i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ } else {
+ d_cnjg(&z__2, &a[k + k * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ if (temp.r != 1. || temp.i != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + k * b_dim1;
+ i__4 = i__ + k * b_dim1;
+ z__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i,
+ z__1.i = temp.r * b[i__4].i + temp.i * b[
+ i__4].r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L270: */
+ }
+ }
+/* L280: */
+ }
+ } else {
+ for (k = *n; k >= 1; --k) {
+ i__1 = *n;
+ for (j = k + 1; j <= i__1; ++j) {
+ i__2 = j + k * a_dim1;
+ if (a[i__2].r != 0. || a[i__2].i != 0.) {
+ if (noconj) {
+ i__2 = j + k * a_dim1;
+ z__1.r = alpha->r * a[i__2].r - alpha->i * a[
+ i__2].i, z__1.i = alpha->r * a[i__2]
+ .i + alpha->i * a[i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ } else {
+ d_cnjg(&z__2, &a[j + k * a_dim1]);
+ z__1.r = alpha->r * z__2.r - alpha->i *
+ z__2.i, z__1.i = alpha->r * z__2.i +
+ alpha->i * z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + k * b_dim1;
+ z__2.r = temp.r * b[i__5].r - temp.i * b[i__5]
+ .i, z__2.i = temp.r * b[i__5].i +
+ temp.i * b[i__5].r;
+ z__1.r = b[i__4].r + z__2.r, z__1.i = b[i__4]
+ .i + z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L290: */
+ }
+ }
+/* L300: */
+ }
+ temp.r = alpha->r, temp.i = alpha->i;
+ if (nounit) {
+ if (noconj) {
+ i__1 = k + k * a_dim1;
+ z__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i,
+ z__1.i = temp.r * a[i__1].i + temp.i * a[
+ i__1].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ } else {
+ d_cnjg(&z__2, &a[k + k * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ if (temp.r != 1. || temp.i != 0.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + k * b_dim1;
+ i__3 = i__ + k * b_dim1;
+ z__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i,
+ z__1.i = temp.r * b[i__3].i + temp.i * b[
+ i__3].r;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L310: */
+ }
+ }
+/* L320: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZTRMM . */
+
+} /* ztrmm_ */
diff --git a/contrib/libs/cblas/ztrmv.c b/contrib/libs/cblas/ztrmv.c
new file mode 100644
index 0000000000..a4d8d3ccfb
--- /dev/null
+++ b/contrib/libs/cblas/ztrmv.c
@@ -0,0 +1,554 @@
+/* ztrmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ztrmv_(char *uplo, char *trans, char *diag, integer *n,
+ doublecomplex *a, integer *lda, doublecomplex *x, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, ix, jx, kx, info;
+ doublecomplex temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical noconj, nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTRMV performs one of the matrix-vector operations */
+
+/* x := A*x, or x := A'*x, or x := conjg( A' )*x, */
+
+/* where x is an n element vector and A is an n by n unit, or non-unit, */
+/* upper or lower triangular matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' x := A*x. */
+
+/* TRANS = 'T' or 't' x := A'*x. */
+
+/* TRANS = 'C' or 'c' x := conjg( A' )*x. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX*16 array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array A must contain the upper */
+/* triangular matrix and the strictly lower triangular part of */
+/* A is not referenced. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array A must contain the lower */
+/* triangular matrix and the strictly upper triangular part of */
+/* A is not referenced. */
+/* Note that when DIAG = 'U' or 'u', the diagonal elements of */
+/* A are not referenced either, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+/* X - COMPLEX*16 array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element vector x. On exit, X is overwritten with the */
+/* tranformed vector x. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*lda < max(1,*n)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ }
+ if (info != 0) {
+ xerbla_("ZTRMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ noconj = lsame_(trans, "T");
+ nounit = lsame_(diag, "N");
+
+/* Set up the start point in X if the increment is not unity. This */
+/* will be ( N - 1 )*INCX too small for descending loops. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through A. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := A*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ z__1.r = x[i__4].r + z__2.r, z__1.i = x[i__4].i +
+ z__2.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+/* L10: */
+ }
+ if (nounit) {
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ z__1.r = x[i__3].r * a[i__4].r - x[i__3].i * a[
+ i__4].i, z__1.i = x[i__3].r * a[i__4].i +
+ x[i__3].i * a[i__4].r;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ }
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ ix = kx;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = ix;
+ i__4 = ix;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ z__1.r = x[i__4].r + z__2.r, z__1.i = x[i__4].i +
+ z__2.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ if (nounit) {
+ i__2 = jx;
+ i__3 = jx;
+ i__4 = j + j * a_dim1;
+ z__1.r = x[i__3].r * a[i__4].r - x[i__3].i * a[
+ i__4].i, z__1.i = x[i__3].r * a[i__4].i +
+ x[i__3].i * a[i__4].r;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ }
+ }
+ jx += *incx;
+/* L40: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ if (x[i__1].r != 0. || x[i__1].i != 0.) {
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ i__2 = i__;
+ i__3 = i__;
+ i__4 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__4].r - temp.i * a[i__4].i,
+ z__2.i = temp.r * a[i__4].i + temp.i * a[
+ i__4].r;
+ z__1.r = x[i__3].r + z__2.r, z__1.i = x[i__3].i +
+ z__2.i;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+/* L50: */
+ }
+ if (nounit) {
+ i__1 = j;
+ i__2 = j;
+ i__3 = j + j * a_dim1;
+ z__1.r = x[i__2].r * a[i__3].r - x[i__2].i * a[
+ i__3].i, z__1.i = x[i__2].r * a[i__3].i +
+ x[i__2].i * a[i__3].r;
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+ }
+ }
+/* L60: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ i__1 = jx;
+ if (x[i__1].r != 0. || x[i__1].i != 0.) {
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ ix = kx;
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ i__2 = ix;
+ i__3 = ix;
+ i__4 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__4].r - temp.i * a[i__4].i,
+ z__2.i = temp.r * a[i__4].i + temp.i * a[
+ i__4].r;
+ z__1.r = x[i__3].r + z__2.r, z__1.i = x[i__3].i +
+ z__2.i;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ ix -= *incx;
+/* L70: */
+ }
+ if (nounit) {
+ i__1 = jx;
+ i__2 = jx;
+ i__3 = j + j * a_dim1;
+ z__1.r = x[i__2].r * a[i__3].r - x[i__2].i * a[
+ i__3].i, z__1.i = x[i__2].r * a[i__3].i +
+ x[i__2].i * a[i__3].r;
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+ }
+ }
+ jx -= *incx;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := A'*x or x := conjg( A' )*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ if (noconj) {
+ if (nounit) {
+ i__1 = j + j * a_dim1;
+ z__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i,
+ z__1.i = temp.r * a[i__1].i + temp.i * a[
+ i__1].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ i__1 = i__ + j * a_dim1;
+ i__2 = i__;
+ z__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[
+ i__2].i, z__2.i = a[i__1].r * x[i__2].i +
+ a[i__1].i * x[i__2].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L90: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__1 = i__;
+ z__2.r = z__3.r * x[i__1].r - z__3.i * x[i__1].i,
+ z__2.i = z__3.r * x[i__1].i + z__3.i * x[
+ i__1].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L100: */
+ }
+ }
+ i__1 = j;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+/* L110: */
+ }
+ } else {
+ jx = kx + (*n - 1) * *incx;
+ for (j = *n; j >= 1; --j) {
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ ix = jx;
+ if (noconj) {
+ if (nounit) {
+ i__1 = j + j * a_dim1;
+ z__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i,
+ z__1.i = temp.r * a[i__1].i + temp.i * a[
+ i__1].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ ix -= *incx;
+ i__1 = i__ + j * a_dim1;
+ i__2 = ix;
+ z__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[
+ i__2].i, z__2.i = a[i__1].r * x[i__2].i +
+ a[i__1].i * x[i__2].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L120: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ ix -= *incx;
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__1 = ix;
+ z__2.r = z__3.r * x[i__1].r - z__3.i * x[i__1].i,
+ z__2.i = z__3.r * x[i__1].i + z__3.i * x[
+ i__1].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L130: */
+ }
+ }
+ i__1 = jx;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+ jx -= *incx;
+/* L140: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ if (noconj) {
+ if (nounit) {
+ i__2 = j + j * a_dim1;
+ z__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
+ z__1.i = temp.r * a[i__2].i + temp.i * a[
+ i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__;
+ z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
+ i__4].i, z__2.i = a[i__3].r * x[i__4].i +
+ a[i__3].i * x[i__4].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L150: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = i__;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
+ z__2.i = z__3.r * x[i__3].i + z__3.i * x[
+ i__3].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L160: */
+ }
+ }
+ i__2 = j;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+/* L170: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ ix = jx;
+ if (noconj) {
+ if (nounit) {
+ i__2 = j + j * a_dim1;
+ z__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
+ z__1.i = temp.r * a[i__2].i + temp.i * a[
+ i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ i__3 = i__ + j * a_dim1;
+ i__4 = ix;
+ z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
+ i__4].i, z__2.i = a[i__3].r * x[i__4].i +
+ a[i__3].i * x[i__4].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L180: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = ix;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
+ z__2.i = z__3.r * x[i__3].i + z__3.i * x[
+ i__3].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L190: */
+ }
+ }
+ i__2 = jx;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+ jx += *incx;
+/* L200: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZTRMV . */
+
+} /* ztrmv_ */
diff --git a/contrib/libs/cblas/ztrsm.c b/contrib/libs/cblas/ztrsm.c
new file mode 100644
index 0000000000..068744c20a
--- /dev/null
+++ b/contrib/libs/cblas/ztrsm.c
@@ -0,0 +1,699 @@
+/* ztrsm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+
+/* Subroutine */ int ztrsm_(char *side, char *uplo, char *transa, char *diag,
+ integer *m, integer *n, doublecomplex *alpha, doublecomplex *a,
+ integer *lda, doublecomplex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6, i__7;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *), d_cnjg(
+ doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k, info;
+ doublecomplex temp;
+ logical lside;
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical noconj, nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTRSM solves one of the matrix equations */
+
+/* op( A )*X = alpha*B, or X*op( A ) = alpha*B, */
+
+/* where alpha is a scalar, X and B are m by n matrices, A is a unit, or */
+/* non-unit, upper or lower triangular matrix and op( A ) is one of */
+
+/* op( A ) = A or op( A ) = A' or op( A ) = conjg( A' ). */
+
+/* The matrix X is overwritten on B. */
+
+/* Arguments */
+/* ========== */
+
+/* SIDE - CHARACTER*1. */
+/* On entry, SIDE specifies whether op( A ) appears on the left */
+/* or right of X as follows: */
+
+/* SIDE = 'L' or 'l' op( A )*X = alpha*B. */
+
+/* SIDE = 'R' or 'r' X*op( A ) = alpha*B. */
+
+/* Unchanged on exit. */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix A is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANSA - CHARACTER*1. */
+/* On entry, TRANSA specifies the form of op( A ) to be used in */
+/* the matrix multiplication as follows: */
+
+/* TRANSA = 'N' or 'n' op( A ) = A. */
+
+/* TRANSA = 'T' or 't' op( A ) = A'. */
+
+/* TRANSA = 'C' or 'c' op( A ) = conjg( A' ). */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit triangular */
+/* as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* M - INTEGER. */
+/* On entry, M specifies the number of rows of B. M must be at */
+/* least zero. */
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the number of columns of B. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - COMPLEX*16 . */
+/* On entry, ALPHA specifies the scalar alpha. When alpha is */
+/* zero then A is not referenced and B need not be set before */
+/* entry. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX*16 array of DIMENSION ( LDA, k ), where k is m */
+/* when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'. */
+/* Before entry with UPLO = 'U' or 'u', the leading k by k */
+/* upper triangular part of the array A must contain the upper */
+/* triangular matrix and the strictly lower triangular part of */
+/* A is not referenced. */
+/* Before entry with UPLO = 'L' or 'l', the leading k by k */
+/* lower triangular part of the array A must contain the lower */
+/* triangular matrix and the strictly upper triangular part of */
+/* A is not referenced. */
+/* Note that when DIAG = 'U' or 'u', the diagonal elements of */
+/* A are not referenced either, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When SIDE = 'L' or 'l' then */
+/* LDA must be at least max( 1, m ), when SIDE = 'R' or 'r' */
+/* then LDA must be at least max( 1, n ). */
+/* Unchanged on exit. */
+
+/* B - COMPLEX*16 array of DIMENSION ( LDB, n ). */
+/* Before entry, the leading m by n part of the array B must */
+/* contain the right-hand side matrix B, and on exit is */
+/* overwritten by the solution matrix X. */
+
+/* LDB - INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. LDB must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+
+/* Level 3 Blas routine. */
+
+/* -- Written on 8-February-1989. */
+/* Jack Dongarra, Argonne National Laboratory. */
+/* Iain Duff, AERE Harwell. */
+/* Jeremy Du Croz, Numerical Algorithms Group Ltd. */
+/* Sven Hammarling, Numerical Algorithms Group Ltd. */
+
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Parameters .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ lside = lsame_(side, "L");
+ if (lside) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ noconj = lsame_(transa, "T");
+ nounit = lsame_(diag, "N");
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! lside && ! lsame_(side, "R")) {
+ info = 1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ info = 2;
+ } else if (! lsame_(transa, "N") && ! lsame_(transa,
+ "T") && ! lsame_(transa, "C")) {
+ info = 3;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 4;
+ } else if (*m < 0) {
+ info = 5;
+ } else if (*n < 0) {
+ info = 6;
+ } else if (*lda < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldb < max(1,*m)) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("ZTRSM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (alpha->r == 0. && alpha->i == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0., b[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lside) {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*inv( A )*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (alpha->r != 1. || alpha->i != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ z__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4]
+ .i, z__1.i = alpha->r * b[i__4].i +
+ alpha->i * b[i__4].r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L30: */
+ }
+ }
+ for (k = *m; k >= 1; --k) {
+ i__2 = k + j * b_dim1;
+ if (b[i__2].r != 0. || b[i__2].i != 0.) {
+ if (nounit) {
+ i__2 = k + j * b_dim1;
+ z_div(&z__1, &b[k + j * b_dim1], &a[k + k *
+ a_dim1]);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ }
+ i__2 = k - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = k + j * b_dim1;
+ i__6 = i__ + k * a_dim1;
+ z__2.r = b[i__5].r * a[i__6].r - b[i__5].i *
+ a[i__6].i, z__2.i = b[i__5].r * a[
+ i__6].i + b[i__5].i * a[i__6].r;
+ z__1.r = b[i__4].r - z__2.r, z__1.i = b[i__4]
+ .i - z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L40: */
+ }
+ }
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (alpha->r != 1. || alpha->i != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ z__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4]
+ .i, z__1.i = alpha->r * b[i__4].i +
+ alpha->i * b[i__4].r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L70: */
+ }
+ }
+ i__2 = *m;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * b_dim1;
+ if (b[i__3].r != 0. || b[i__3].i != 0.) {
+ if (nounit) {
+ i__3 = k + j * b_dim1;
+ z_div(&z__1, &b[k + j * b_dim1], &a[k + k *
+ a_dim1]);
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+ }
+ i__3 = *m;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = k + j * b_dim1;
+ i__7 = i__ + k * a_dim1;
+ z__2.r = b[i__6].r * a[i__7].r - b[i__6].i *
+ a[i__7].i, z__2.i = b[i__6].r * a[
+ i__7].i + b[i__6].i * a[i__7].r;
+ z__1.r = b[i__5].r - z__2.r, z__1.i = b[i__5]
+ .i - z__2.i;
+ b[i__4].r = z__1.r, b[i__4].i = z__1.i;
+/* L80: */
+ }
+ }
+/* L90: */
+ }
+/* L100: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*inv( A' )*B */
+/* or B := alpha*inv( conjg( A' ) )*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
+ z__1.i = alpha->r * b[i__3].i + alpha->i * b[
+ i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ if (noconj) {
+ i__3 = i__ - 1;
+ for (k = 1; k <= i__3; ++k) {
+ i__4 = k + i__ * a_dim1;
+ i__5 = k + j * b_dim1;
+ z__2.r = a[i__4].r * b[i__5].r - a[i__4].i *
+ b[i__5].i, z__2.i = a[i__4].r * b[
+ i__5].i + a[i__4].i * b[i__5].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L110: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &a[i__ + i__ * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+ i__3 = i__ - 1;
+ for (k = 1; k <= i__3; ++k) {
+ d_cnjg(&z__3, &a[k + i__ * a_dim1]);
+ i__4 = k + j * b_dim1;
+ z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4]
+ .i, z__2.i = z__3.r * b[i__4].i +
+ z__3.i * b[i__4].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L120: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &a[i__ + i__ * a_dim1]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = temp.r, b[i__3].i = temp.i;
+/* L130: */
+ }
+/* L140: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ i__2 = i__ + j * b_dim1;
+ z__1.r = alpha->r * b[i__2].r - alpha->i * b[i__2].i,
+ z__1.i = alpha->r * b[i__2].i + alpha->i * b[
+ i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ if (noconj) {
+ i__2 = *m;
+ for (k = i__ + 1; k <= i__2; ++k) {
+ i__3 = k + i__ * a_dim1;
+ i__4 = k + j * b_dim1;
+ z__2.r = a[i__3].r * b[i__4].r - a[i__3].i *
+ b[i__4].i, z__2.i = a[i__3].r * b[
+ i__4].i + a[i__3].i * b[i__4].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L150: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &a[i__ + i__ * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+ i__2 = *m;
+ for (k = i__ + 1; k <= i__2; ++k) {
+ d_cnjg(&z__3, &a[k + i__ * a_dim1]);
+ i__3 = k + j * b_dim1;
+ z__2.r = z__3.r * b[i__3].r - z__3.i * b[i__3]
+ .i, z__2.i = z__3.r * b[i__3].i +
+ z__3.i * b[i__3].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L160: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &a[i__ + i__ * a_dim1]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__2 = i__ + j * b_dim1;
+ b[i__2].r = temp.r, b[i__2].i = temp.i;
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ }
+ } else {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*B*inv( A ). */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (alpha->r != 1. || alpha->i != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ z__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4]
+ .i, z__1.i = alpha->r * b[i__4].i +
+ alpha->i * b[i__4].r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L190: */
+ }
+ }
+ i__2 = j - 1;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * a_dim1;
+ if (a[i__3].r != 0. || a[i__3].i != 0.) {
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = k + j * a_dim1;
+ i__7 = i__ + k * b_dim1;
+ z__2.r = a[i__6].r * b[i__7].r - a[i__6].i *
+ b[i__7].i, z__2.i = a[i__6].r * b[
+ i__7].i + a[i__6].i * b[i__7].r;
+ z__1.r = b[i__5].r - z__2.r, z__1.i = b[i__5]
+ .i - z__2.i;
+ b[i__4].r = z__1.r, b[i__4].i = z__1.i;
+/* L200: */
+ }
+ }
+/* L210: */
+ }
+ if (nounit) {
+ z_div(&z__1, &c_b1, &a[j + j * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ z__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i,
+ z__1.i = temp.r * b[i__4].i + temp.i * b[
+ i__4].r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L220: */
+ }
+ }
+/* L230: */
+ }
+ } else {
+ for (j = *n; j >= 1; --j) {
+ if (alpha->r != 1. || alpha->i != 0.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3]
+ .i, z__1.i = alpha->r * b[i__3].i +
+ alpha->i * b[i__3].r;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L240: */
+ }
+ }
+ i__1 = *n;
+ for (k = j + 1; k <= i__1; ++k) {
+ i__2 = k + j * a_dim1;
+ if (a[i__2].r != 0. || a[i__2].i != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = k + j * a_dim1;
+ i__6 = i__ + k * b_dim1;
+ z__2.r = a[i__5].r * b[i__6].r - a[i__5].i *
+ b[i__6].i, z__2.i = a[i__5].r * b[
+ i__6].i + a[i__5].i * b[i__6].r;
+ z__1.r = b[i__4].r - z__2.r, z__1.i = b[i__4]
+ .i - z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L250: */
+ }
+ }
+/* L260: */
+ }
+ if (nounit) {
+ z_div(&z__1, &c_b1, &a[j + j * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ z__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i,
+ z__1.i = temp.r * b[i__3].i + temp.i * b[
+ i__3].r;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L270: */
+ }
+ }
+/* L280: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*B*inv( A' ) */
+/* or B := alpha*B*inv( conjg( A' ) ). */
+
+ if (upper) {
+ for (k = *n; k >= 1; --k) {
+ if (nounit) {
+ if (noconj) {
+ z_div(&z__1, &c_b1, &a[k + k * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ } else {
+ d_cnjg(&z__2, &a[k + k * a_dim1]);
+ z_div(&z__1, &c_b1, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + k * b_dim1;
+ i__3 = i__ + k * b_dim1;
+ z__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i,
+ z__1.i = temp.r * b[i__3].i + temp.i * b[
+ i__3].r;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L290: */
+ }
+ }
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + k * a_dim1;
+ if (a[i__2].r != 0. || a[i__2].i != 0.) {
+ if (noconj) {
+ i__2 = j + k * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ } else {
+ d_cnjg(&z__1, &a[j + k * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + k * b_dim1;
+ z__2.r = temp.r * b[i__5].r - temp.i * b[i__5]
+ .i, z__2.i = temp.r * b[i__5].i +
+ temp.i * b[i__5].r;
+ z__1.r = b[i__4].r - z__2.r, z__1.i = b[i__4]
+ .i - z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L300: */
+ }
+ }
+/* L310: */
+ }
+ if (alpha->r != 1. || alpha->i != 0.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + k * b_dim1;
+ i__3 = i__ + k * b_dim1;
+ z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3]
+ .i, z__1.i = alpha->r * b[i__3].i +
+ alpha->i * b[i__3].r;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L320: */
+ }
+ }
+/* L330: */
+ }
+ } else {
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (nounit) {
+ if (noconj) {
+ z_div(&z__1, &c_b1, &a[k + k * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ } else {
+ d_cnjg(&z__2, &a[k + k * a_dim1]);
+ z_div(&z__1, &c_b1, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + k * b_dim1;
+ i__4 = i__ + k * b_dim1;
+ z__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i,
+ z__1.i = temp.r * b[i__4].i + temp.i * b[
+ i__4].r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L340: */
+ }
+ }
+ i__2 = *n;
+ for (j = k + 1; j <= i__2; ++j) {
+ i__3 = j + k * a_dim1;
+ if (a[i__3].r != 0. || a[i__3].i != 0.) {
+ if (noconj) {
+ i__3 = j + k * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ } else {
+ d_cnjg(&z__1, &a[j + k * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = i__ + k * b_dim1;
+ z__2.r = temp.r * b[i__6].r - temp.i * b[i__6]
+ .i, z__2.i = temp.r * b[i__6].i +
+ temp.i * b[i__6].r;
+ z__1.r = b[i__5].r - z__2.r, z__1.i = b[i__5]
+ .i - z__2.i;
+ b[i__4].r = z__1.r, b[i__4].i = z__1.i;
+/* L350: */
+ }
+ }
+/* L360: */
+ }
+ if (alpha->r != 1. || alpha->i != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + k * b_dim1;
+ i__4 = i__ + k * b_dim1;
+ z__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4]
+ .i, z__1.i = alpha->r * b[i__4].i +
+ alpha->i * b[i__4].r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L370: */
+ }
+ }
+/* L380: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZTRSM . */
+
+} /* ztrsm_ */
diff --git a/contrib/libs/cblas/ztrsv.c b/contrib/libs/cblas/ztrsv.c
new file mode 100644
index 0000000000..df3205f8a9
--- /dev/null
+++ b/contrib/libs/cblas/ztrsv.c
@@ -0,0 +1,524 @@
+/* ztrsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ztrsv_(char *uplo, char *trans, char *diag, integer *n,
+ doublecomplex *a, integer *lda, doublecomplex *x, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *), d_cnjg(
+ doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, ix, jx, kx, info;
+ doublecomplex temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical noconj, nounit;
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTRSV solves one of the systems of equations */
+
+/* A*x = b, or A'*x = b, or conjg( A' )*x = b, */
+
+/* where b and x are n element vectors and A is an n by n unit, or */
+/* non-unit, upper or lower triangular matrix. */
+
+/* No test for singularity or near-singularity is included in this */
+/* routine. Such tests must be performed before calling this routine. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO - CHARACTER*1. */
+/* On entry, UPLO specifies whether the matrix is an upper or */
+/* lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
+
+/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
+
+/* Unchanged on exit. */
+
+/* TRANS - CHARACTER*1. */
+/* On entry, TRANS specifies the equations to be solved as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' A*x = b. */
+
+/* TRANS = 'T' or 't' A'*x = b. */
+
+/* TRANS = 'C' or 'c' conjg( A' )*x = b. */
+
+/* Unchanged on exit. */
+
+/* DIAG - CHARACTER*1. */
+/* On entry, DIAG specifies whether or not A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* N - INTEGER. */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* A - COMPLEX*16 array of DIMENSION ( LDA, n ). */
+/* Before entry with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array A must contain the upper */
+/* triangular matrix and the strictly lower triangular part of */
+/* A is not referenced. */
+/* Before entry with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array A must contain the lower */
+/* triangular matrix and the strictly upper triangular part of */
+/* A is not referenced. */
+/* Note that when DIAG = 'U' or 'u', the diagonal elements of */
+/* A are not referenced either, but are assumed to be unity. */
+/* Unchanged on exit. */
+
+/* LDA - INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, n ). */
+/* Unchanged on exit. */
+
+/* X - COMPLEX*16 array of dimension at least */
+/* ( 1 + ( n - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the n */
+/* element right-hand side vector b. On exit, X is overwritten */
+/* with the solution vector x. */
+
+/* INCX - INTEGER. */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+
+/* Level 2 Blas routine. */
+
+/* -- Written on 22-October-1986. */
+/* Jack Dongarra, Argonne National Lab. */
+/* Jeremy Du Croz, Nag Central Office. */
+/* Sven Hammarling, Nag Central Office. */
+/* Richard Hanson, Sandia National Labs. */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*lda < max(1,*n)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ }
+ if (info != 0) {
+ xerbla_("ZTRSV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ noconj = lsame_(trans, "T");
+ nounit = lsame_(diag, "N");
+
+/* Set up the start point in X if the increment is not unity. This */
+/* will be ( N - 1 )*INCX too small for descending loops. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through A. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := inv( A )*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ if (x[i__1].r != 0. || x[i__1].i != 0.) {
+ if (nounit) {
+ i__1 = j;
+ z_div(&z__1, &x[j], &a[j + j * a_dim1]);
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+ }
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ i__1 = i__;
+ i__2 = i__;
+ i__3 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__3].r - temp.i * a[i__3].i,
+ z__2.i = temp.r * a[i__3].i + temp.i * a[
+ i__3].r;
+ z__1.r = x[i__2].r - z__2.r, z__1.i = x[i__2].i -
+ z__2.i;
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx + (*n - 1) * *incx;
+ for (j = *n; j >= 1; --j) {
+ i__1 = jx;
+ if (x[i__1].r != 0. || x[i__1].i != 0.) {
+ if (nounit) {
+ i__1 = jx;
+ z_div(&z__1, &x[jx], &a[j + j * a_dim1]);
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+ }
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ ix = jx;
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ ix -= *incx;
+ i__1 = ix;
+ i__2 = ix;
+ i__3 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__3].r - temp.i * a[i__3].i,
+ z__2.i = temp.r * a[i__3].i + temp.i * a[
+ i__3].r;
+ z__1.r = x[i__2].r - z__2.r, z__1.i = x[i__2].i -
+ z__2.i;
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+/* L30: */
+ }
+ }
+ jx -= *incx;
+/* L40: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ if (nounit) {
+ i__2 = j;
+ z_div(&z__1, &x[j], &a[j + j * a_dim1]);
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ }
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ z__1.r = x[i__4].r - z__2.r, z__1.i = x[i__4].i -
+ z__2.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ if (nounit) {
+ i__2 = jx;
+ z_div(&z__1, &x[jx], &a[j + j * a_dim1]);
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ }
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ ix = jx;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ i__3 = ix;
+ i__4 = ix;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ z__1.r = x[i__4].r - z__2.r, z__1.i = x[i__4].i -
+ z__2.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+/* L70: */
+ }
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := inv( A' )*x or x := inv( conjg( A' ) )*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ if (noconj) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__;
+ z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
+ i__4].i, z__2.i = a[i__3].r * x[i__4].i +
+ a[i__3].i * x[i__4].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L90: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &a[j + j * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = i__;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
+ z__2.i = z__3.r * x[i__3].i + z__3.i * x[
+ i__3].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L100: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__2 = j;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+/* L110: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ ix = kx;
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ if (noconj) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = ix;
+ z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
+ i__4].i, z__2.i = a[i__3].r * x[i__4].i +
+ a[i__3].i * x[i__4].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix += *incx;
+/* L120: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &a[j + j * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = ix;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
+ z__2.i = z__3.r * x[i__3].i + z__3.i * x[
+ i__3].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix += *incx;
+/* L130: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__2 = jx;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+ jx += *incx;
+/* L140: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ if (noconj) {
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__;
+ z__2.r = a[i__2].r * x[i__3].r - a[i__2].i * x[
+ i__3].i, z__2.i = a[i__2].r * x[i__3].i +
+ a[i__2].i * x[i__3].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L150: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &a[j + j * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__2 = i__;
+ z__2.r = z__3.r * x[i__2].r - z__3.i * x[i__2].i,
+ z__2.i = z__3.r * x[i__2].i + z__3.i * x[
+ i__2].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L160: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__1 = j;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+/* L170: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ ix = kx;
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ if (noconj) {
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = ix;
+ z__2.r = a[i__2].r * x[i__3].r - a[i__2].i * x[
+ i__3].i, z__2.i = a[i__2].r * x[i__3].i +
+ a[i__2].i * x[i__3].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix -= *incx;
+/* L180: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &a[j + j * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__2 = ix;
+ z__2.r = z__3.r * x[i__2].r - z__3.i * x[i__2].i,
+ z__2.i = z__3.r * x[i__2].i + z__3.i * x[
+ i__2].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix -= *incx;
+/* L190: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__1 = jx;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+ jx -= *incx;
+/* L200: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZTRSV . */
+
+} /* ztrsv_ */
diff --git a/contrib/libs/clapack/COPYING b/contrib/libs/clapack/COPYING
new file mode 100644
index 0000000000..d7bf953820
--- /dev/null
+++ b/contrib/libs/clapack/COPYING
@@ -0,0 +1,36 @@
+Copyright (c) 1992-2008 The University of Tennessee. All rights reserved.
+
+$COPYRIGHT$
+
+Additional copyrights may follow
+
+$HEADER$
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+- Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+- Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer listed
+ in this license in the documentation and/or other materials
+ provided with the distribution.
+
+- Neither the name of the copyright holders nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/contrib/libs/clapack/cbdsqr.c b/contrib/libs/clapack/cbdsqr.c
new file mode 100644
index 0000000000..97c02ecca9
--- /dev/null
+++ b/contrib/libs/clapack/cbdsqr.c
@@ -0,0 +1,912 @@
+/* cbdsqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b15 = -.125;
+static integer c__1 = 1;
+static real c_b49 = 1.f;
+static real c_b72 = -1.f;
+
+/* Subroutine */ int cbdsqr_(char *uplo, integer *n, integer *ncvt, integer *
+ nru, integer *ncc, real *d__, real *e, complex *vt, integer *ldvt,
+ complex *u, integer *ldu, complex *c__, integer *ldc, real *rwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1,
+ i__2;
+ real r__1, r__2, r__3, r__4;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double pow_dd(doublereal *, doublereal *), sqrt(doublereal), r_sign(real *
+ , real *);
+
+ /* Local variables */
+ real f, g, h__;
+ integer i__, j, m;
+ real r__, cs;
+ integer ll;
+ real sn, mu;
+ integer nm1, nm12, nm13, lll;
+ real eps, sll, tol, abse;
+ integer idir;
+ real abss;
+ integer oldm;
+ real cosl;
+ integer isub, iter;
+ real unfl, sinl, cosr, smin, smax, sinr;
+ extern /* Subroutine */ int slas2_(real *, real *, real *, real *, real *)
+ ;
+ extern logical lsame_(char *, char *);
+ real oldcs;
+ extern /* Subroutine */ int clasr_(char *, char *, char *, integer *,
+ integer *, real *, real *, complex *, integer *);
+ integer oldll;
+ real shift, sigmn, oldsn;
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ integer maxit;
+ real sminl, sigmx;
+ logical lower;
+ extern /* Subroutine */ int csrot_(integer *, complex *, integer *,
+ complex *, integer *, real *, real *), slasq1_(integer *, real *,
+ real *, real *, integer *), slasv2_(real *, real *, real *, real *
+, real *, real *, real *, real *, real *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *), xerbla_(char *, integer *);
+ real sminoa;
+ extern /* Subroutine */ int slartg_(real *, real *, real *, real *, real *
+);
+ real thresh;
+ logical rotate;
+ real tolmul;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CBDSQR computes the singular values and, optionally, the right and/or */
+/* left singular vectors from the singular value decomposition (SVD) of */
+/* a real N-by-N (upper or lower) bidiagonal matrix B using the implicit */
+/* zero-shift QR algorithm. The SVD of B has the form */
+
+/* B = Q * S * P**H */
+
+/* where S is the diagonal matrix of singular values, Q is an orthogonal */
+/* matrix of left singular vectors, and P is an orthogonal matrix of */
+/* right singular vectors. If left singular vectors are requested, this */
+/* subroutine actually returns U*Q instead of Q, and, if right singular */
+/* vectors are requested, this subroutine returns P**H*VT instead of */
+/* P**H, for given complex input matrices U and VT. When U and VT are */
+/* the unitary matrices that reduce a general matrix A to bidiagonal */
+/* form: A = U*B*VT, as computed by CGEBRD, then */
+
+/* A = (U*Q) * S * (P**H*VT) */
+
+/* is the SVD of A. Optionally, the subroutine may also compute Q**H*C */
+/* for a given complex input matrix C. */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices With */
+/* Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan, */
+/* LAPACK Working Note #3 (or SIAM J. Sci. Statist. Comput. vol. 11, */
+/* no. 5, pp. 873-912, Sept 1990) and */
+/* "Accurate singular values and differential qd algorithms," by */
+/* B. Parlett and V. Fernando, Technical Report CPAM-554, Mathematics */
+/* Department, University of California at Berkeley, July 1992 */
+/* for a detailed description of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': B is upper bidiagonal; */
+/* = 'L': B is lower bidiagonal. */
+
+/* N (input) INTEGER */
+/* The order of the matrix B. N >= 0. */
+
+/* NCVT (input) INTEGER */
+/* The number of columns of the matrix VT. NCVT >= 0. */
+
+/* NRU (input) INTEGER */
+/* The number of rows of the matrix U. NRU >= 0. */
+
+/* NCC (input) INTEGER */
+/* The number of columns of the matrix C. NCC >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the n diagonal elements of the bidiagonal matrix B. */
+/* On exit, if INFO=0, the singular values of B in decreasing */
+/* order. */
+
+/* E (input/output) REAL array, dimension (N-1) */
+/* On entry, the N-1 offdiagonal elements of the bidiagonal */
+/* matrix B. */
+/* On exit, if INFO = 0, E is destroyed; if INFO > 0, D and E */
+/* will contain the diagonal and superdiagonal elements of a */
+/* bidiagonal matrix orthogonally equivalent to the one given */
+/* as input. */
+
+/* VT (input/output) COMPLEX array, dimension (LDVT, NCVT) */
+/* On entry, an N-by-NCVT matrix VT. */
+/* On exit, VT is overwritten by P**H * VT. */
+/* Not referenced if NCVT = 0. */
+
+/* LDVT (input) INTEGER */
+/* The leading dimension of the array VT. */
+/* LDVT >= max(1,N) if NCVT > 0; LDVT >= 1 if NCVT = 0. */
+
+/* U (input/output) COMPLEX array, dimension (LDU, N) */
+/* On entry, an NRU-by-N matrix U. */
+/* On exit, U is overwritten by U * Q. */
+/* Not referenced if NRU = 0. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= max(1,NRU). */
+
+/* C (input/output) COMPLEX array, dimension (LDC, NCC) */
+/* On entry, an N-by-NCC matrix C. */
+/* On exit, C is overwritten by Q**H * C. */
+/* Not referenced if NCC = 0. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. */
+/* LDC >= max(1,N) if NCC > 0; LDC >=1 if NCC = 0. */
+
+/* RWORK (workspace) REAL array, dimension (2*N) */
+/* if NCVT = NRU = NCC = 0, (max(1, 4*N-4)) otherwise */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: If INFO = -i, the i-th argument had an illegal value */
+/* > 0: the algorithm did not converge; D and E contain the */
+/* elements of a bidiagonal matrix which is orthogonally */
+/* similar to the input matrix B; if INFO = i, i */
+/* elements of E have not converged to zero. */
+
+/* Internal Parameters */
+/* =================== */
+
+/* TOLMUL REAL, default = max(10,min(100,EPS**(-1/8))) */
+/* TOLMUL controls the convergence criterion of the QR loop. */
+/* If it is positive, TOLMUL*EPS is the desired relative */
+/* precision in the computed singular values. */
+/* If it is negative, abs(TOLMUL*EPS*sigma_max) is the */
+/* desired absolute accuracy in the computed singular */
+/* values (corresponds to relative accuracy */
+/* abs(TOLMUL*EPS) in the largest singular value. */
+/* abs(TOLMUL) should be between 1 and 1/EPS, and preferably */
+/* between 10 (for fast convergence) and .1/EPS */
+/* (for there to be some accuracy in the results). */
+/* Default is to lose at either one eighth or 2 of the */
+/* available decimal digits in each computed singular value */
+/* (whichever is smaller). */
+
+/* MAXITR INTEGER, default = 6 */
+/* MAXITR controls the maximum number of passes of the */
+/* algorithm through its inner loop. The algorithms stops */
+/* (and so fails to converge) if the number of passes */
+/* through the inner loop exceeds MAXITR*N**2. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ lower = lsame_(uplo, "L");
+ if (! lsame_(uplo, "U") && ! lower) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ncvt < 0) {
+ *info = -3;
+ } else if (*nru < 0) {
+ *info = -4;
+ } else if (*ncc < 0) {
+ *info = -5;
+ } else if (*ncvt == 0 && *ldvt < 1 || *ncvt > 0 && *ldvt < max(1,*n)) {
+ *info = -9;
+ } else if (*ldu < max(1,*nru)) {
+ *info = -11;
+ } else if (*ncc == 0 && *ldc < 1 || *ncc > 0 && *ldc < max(1,*n)) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CBDSQR", &i__1);
+ return 0;
+ }
+ if (*n == 0) {
+ return 0;
+ }
+ if (*n == 1) {
+ goto L160;
+ }
+
+/* ROTATE is true if any singular vectors desired, false otherwise */
+
+ rotate = *ncvt > 0 || *nru > 0 || *ncc > 0;
+
+/* If no singular vectors desired, use qd algorithm */
+
+ if (! rotate) {
+ slasq1_(n, &d__[1], &e[1], &rwork[1], info);
+ return 0;
+ }
+
+ nm1 = *n - 1;
+ nm12 = nm1 + nm1;
+ nm13 = nm12 + nm1;
+ idir = 0;
+
+/* Get machine constants */
+
+ eps = slamch_("Epsilon");
+ unfl = slamch_("Safe minimum");
+
+/* If matrix lower bidiagonal, rotate to be upper bidiagonal */
+/* by applying Givens rotations on the left */
+
+ if (lower) {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ slartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ rwork[i__] = cs;
+ rwork[nm1 + i__] = sn;
+/* L10: */
+ }
+
+/* Update singular vectors if desired */
+
+ if (*nru > 0) {
+ clasr_("R", "V", "F", nru, n, &rwork[1], &rwork[*n], &u[u_offset],
+ ldu);
+ }
+ if (*ncc > 0) {
+ clasr_("L", "V", "F", n, ncc, &rwork[1], &rwork[*n], &c__[
+ c_offset], ldc);
+ }
+ }
+
+/* Compute singular values to relative accuracy TOL */
+/* (By setting TOL to be negative, algorithm will compute */
+/* singular values to absolute accuracy ABS(TOL)*norm(input matrix)) */
+
+/* Computing MAX */
+/* Computing MIN */
+ d__1 = (doublereal) eps;
+ r__3 = 100.f, r__4 = pow_dd(&d__1, &c_b15);
+ r__1 = 10.f, r__2 = dmin(r__3,r__4);
+ tolmul = dmax(r__1,r__2);
+ tol = tolmul * eps;
+
+/* Compute approximate maximum, minimum singular values */
+
+ smax = 0.f;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__2 = smax, r__3 = (r__1 = d__[i__], dabs(r__1));
+ smax = dmax(r__2,r__3);
+/* L20: */
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__2 = smax, r__3 = (r__1 = e[i__], dabs(r__1));
+ smax = dmax(r__2,r__3);
+/* L30: */
+ }
+ sminl = 0.f;
+ if (tol >= 0.f) {
+
+/* Relative accuracy desired */
+
+ sminoa = dabs(d__[1]);
+ if (sminoa == 0.f) {
+ goto L50;
+ }
+ mu = sminoa;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ mu = (r__2 = d__[i__], dabs(r__2)) * (mu / (mu + (r__1 = e[i__ -
+ 1], dabs(r__1))));
+ sminoa = dmin(sminoa,mu);
+ if (sminoa == 0.f) {
+ goto L50;
+ }
+/* L40: */
+ }
+L50:
+ sminoa /= sqrt((real) (*n));
+/* Computing MAX */
+ r__1 = tol * sminoa, r__2 = *n * 6 * *n * unfl;
+ thresh = dmax(r__1,r__2);
+ } else {
+
+/* Absolute accuracy desired */
+
+/* Computing MAX */
+ r__1 = dabs(tol) * smax, r__2 = *n * 6 * *n * unfl;
+ thresh = dmax(r__1,r__2);
+ }
+
+/* Prepare for main iteration loop for the singular values */
+/* (MAXIT is the maximum number of passes through the inner */
+/* loop permitted before nonconvergence signalled.) */
+
+ maxit = *n * 6 * *n;
+ iter = 0;
+ oldll = -1;
+ oldm = -1;
+
+/* M points to last element of unconverged part of matrix */
+
+ m = *n;
+
+/* Begin main iteration loop */
+
+L60:
+
+/* Check for convergence or exceeding iteration count */
+
+ if (m <= 1) {
+ goto L160;
+ }
+ if (iter > maxit) {
+ goto L200;
+ }
+
+/* Find diagonal block of matrix to work on */
+
+ if (tol < 0.f && (r__1 = d__[m], dabs(r__1)) <= thresh) {
+ d__[m] = 0.f;
+ }
+ smax = (r__1 = d__[m], dabs(r__1));
+ smin = smax;
+ i__1 = m - 1;
+ for (lll = 1; lll <= i__1; ++lll) {
+ ll = m - lll;
+ abss = (r__1 = d__[ll], dabs(r__1));
+ abse = (r__1 = e[ll], dabs(r__1));
+ if (tol < 0.f && abss <= thresh) {
+ d__[ll] = 0.f;
+ }
+ if (abse <= thresh) {
+ goto L80;
+ }
+ smin = dmin(smin,abss);
+/* Computing MAX */
+ r__1 = max(smax,abss);
+ smax = dmax(r__1,abse);
+/* L70: */
+ }
+ ll = 0;
+ goto L90;
+L80:
+ e[ll] = 0.f;
+
+/* Matrix splits since E(LL) = 0 */
+
+ if (ll == m - 1) {
+
+/* Convergence of bottom singular value, return to top of loop */
+
+ --m;
+ goto L60;
+ }
+L90:
+ ++ll;
+
+/* E(LL) through E(M-1) are nonzero, E(LL-1) is zero */
+
+ if (ll == m - 1) {
+
+/* 2 by 2 block, handle separately */
+
+ slasv2_(&d__[m - 1], &e[m - 1], &d__[m], &sigmn, &sigmx, &sinr, &cosr,
+ &sinl, &cosl);
+ d__[m - 1] = sigmx;
+ e[m - 1] = 0.f;
+ d__[m] = sigmn;
+
+/* Compute singular vectors, if desired */
+
+ if (*ncvt > 0) {
+ csrot_(ncvt, &vt[m - 1 + vt_dim1], ldvt, &vt[m + vt_dim1], ldvt, &
+ cosr, &sinr);
+ }
+ if (*nru > 0) {
+ csrot_(nru, &u[(m - 1) * u_dim1 + 1], &c__1, &u[m * u_dim1 + 1], &
+ c__1, &cosl, &sinl);
+ }
+ if (*ncc > 0) {
+ csrot_(ncc, &c__[m - 1 + c_dim1], ldc, &c__[m + c_dim1], ldc, &
+ cosl, &sinl);
+ }
+ m += -2;
+ goto L60;
+ }
+
+/* If working on new submatrix, choose shift direction */
+/* (from larger end diagonal element towards smaller) */
+
+ if (ll > oldm || m < oldll) {
+ if ((r__1 = d__[ll], dabs(r__1)) >= (r__2 = d__[m], dabs(r__2))) {
+
+/* Chase bulge from top (big end) to bottom (small end) */
+
+ idir = 1;
+ } else {
+
+/* Chase bulge from bottom (big end) to top (small end) */
+
+ idir = 2;
+ }
+ }
+
+/* Apply convergence tests */
+
+ if (idir == 1) {
+
+/* Run convergence test in forward direction */
+/* First apply standard test to bottom of matrix */
+
+ if ((r__2 = e[m - 1], dabs(r__2)) <= dabs(tol) * (r__1 = d__[m], dabs(
+ r__1)) || tol < 0.f && (r__3 = e[m - 1], dabs(r__3)) <=
+ thresh) {
+ e[m - 1] = 0.f;
+ goto L60;
+ }
+
+ if (tol >= 0.f) {
+
+/* If relative accuracy desired, */
+/* apply convergence criterion forward */
+
+ mu = (r__1 = d__[ll], dabs(r__1));
+ sminl = mu;
+ i__1 = m - 1;
+ for (lll = ll; lll <= i__1; ++lll) {
+ if ((r__1 = e[lll], dabs(r__1)) <= tol * mu) {
+ e[lll] = 0.f;
+ goto L60;
+ }
+ mu = (r__2 = d__[lll + 1], dabs(r__2)) * (mu / (mu + (r__1 =
+ e[lll], dabs(r__1))));
+ sminl = dmin(sminl,mu);
+/* L100: */
+ }
+ }
+
+ } else {
+
+/* Run convergence test in backward direction */
+/* First apply standard test to top of matrix */
+
+ if ((r__2 = e[ll], dabs(r__2)) <= dabs(tol) * (r__1 = d__[ll], dabs(
+ r__1)) || tol < 0.f && (r__3 = e[ll], dabs(r__3)) <= thresh) {
+ e[ll] = 0.f;
+ goto L60;
+ }
+
+ if (tol >= 0.f) {
+
+/* If relative accuracy desired, */
+/* apply convergence criterion backward */
+
+ mu = (r__1 = d__[m], dabs(r__1));
+ sminl = mu;
+ i__1 = ll;
+ for (lll = m - 1; lll >= i__1; --lll) {
+ if ((r__1 = e[lll], dabs(r__1)) <= tol * mu) {
+ e[lll] = 0.f;
+ goto L60;
+ }
+ mu = (r__2 = d__[lll], dabs(r__2)) * (mu / (mu + (r__1 = e[
+ lll], dabs(r__1))));
+ sminl = dmin(sminl,mu);
+/* L110: */
+ }
+ }
+ }
+ oldll = ll;
+ oldm = m;
+
+/* Compute shift. First, test if shifting would ruin relative */
+/* accuracy, and if so set the shift to zero. */
+
+/* Computing MAX */
+ r__1 = eps, r__2 = tol * .01f;
+ if (tol >= 0.f && *n * tol * (sminl / smax) <= dmax(r__1,r__2)) {
+
+/* Use a zero shift to avoid loss of relative accuracy */
+
+ shift = 0.f;
+ } else {
+
+/* Compute the shift from 2-by-2 block at end of matrix */
+
+ if (idir == 1) {
+ sll = (r__1 = d__[ll], dabs(r__1));
+ slas2_(&d__[m - 1], &e[m - 1], &d__[m], &shift, &r__);
+ } else {
+ sll = (r__1 = d__[m], dabs(r__1));
+ slas2_(&d__[ll], &e[ll], &d__[ll + 1], &shift, &r__);
+ }
+
+/* Test if shift negligible, and if so set to zero */
+
+ if (sll > 0.f) {
+/* Computing 2nd power */
+ r__1 = shift / sll;
+ if (r__1 * r__1 < eps) {
+ shift = 0.f;
+ }
+ }
+ }
+
+/* Increment iteration count */
+
+ iter = iter + m - ll;
+
+/* If SHIFT = 0, do simplified QR iteration */
+
+ if (shift == 0.f) {
+ if (idir == 1) {
+
+/* Chase bulge from top to bottom */
+/* Save cosines and sines for later singular vector updates */
+
+ cs = 1.f;
+ oldcs = 1.f;
+ i__1 = m - 1;
+ for (i__ = ll; i__ <= i__1; ++i__) {
+ r__1 = d__[i__] * cs;
+ slartg_(&r__1, &e[i__], &cs, &sn, &r__);
+ if (i__ > ll) {
+ e[i__ - 1] = oldsn * r__;
+ }
+ r__1 = oldcs * r__;
+ r__2 = d__[i__ + 1] * sn;
+ slartg_(&r__1, &r__2, &oldcs, &oldsn, &d__[i__]);
+ rwork[i__ - ll + 1] = cs;
+ rwork[i__ - ll + 1 + nm1] = sn;
+ rwork[i__ - ll + 1 + nm12] = oldcs;
+ rwork[i__ - ll + 1 + nm13] = oldsn;
+/* L120: */
+ }
+ h__ = d__[m] * cs;
+ d__[m] = h__ * oldcs;
+ e[m - 1] = h__ * oldsn;
+
+/* Update singular vectors */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ clasr_("L", "V", "F", &i__1, ncvt, &rwork[1], &rwork[*n], &vt[
+ ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ clasr_("R", "V", "F", nru, &i__1, &rwork[nm12 + 1], &rwork[
+ nm13 + 1], &u[ll * u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ clasr_("L", "V", "F", &i__1, ncc, &rwork[nm12 + 1], &rwork[
+ nm13 + 1], &c__[ll + c_dim1], ldc);
+ }
+
+/* Test convergence */
+
+ if ((r__1 = e[m - 1], dabs(r__1)) <= thresh) {
+ e[m - 1] = 0.f;
+ }
+
+ } else {
+
+/* Chase bulge from bottom to top */
+/* Save cosines and sines for later singular vector updates */
+
+ cs = 1.f;
+ oldcs = 1.f;
+ i__1 = ll + 1;
+ for (i__ = m; i__ >= i__1; --i__) {
+ r__1 = d__[i__] * cs;
+ slartg_(&r__1, &e[i__ - 1], &cs, &sn, &r__);
+ if (i__ < m) {
+ e[i__] = oldsn * r__;
+ }
+ r__1 = oldcs * r__;
+ r__2 = d__[i__ - 1] * sn;
+ slartg_(&r__1, &r__2, &oldcs, &oldsn, &d__[i__]);
+ rwork[i__ - ll] = cs;
+ rwork[i__ - ll + nm1] = -sn;
+ rwork[i__ - ll + nm12] = oldcs;
+ rwork[i__ - ll + nm13] = -oldsn;
+/* L130: */
+ }
+ h__ = d__[ll] * cs;
+ d__[ll] = h__ * oldcs;
+ e[ll] = h__ * oldsn;
+
+/* Update singular vectors */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ clasr_("L", "V", "B", &i__1, ncvt, &rwork[nm12 + 1], &rwork[
+ nm13 + 1], &vt[ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ clasr_("R", "V", "B", nru, &i__1, &rwork[1], &rwork[*n], &u[
+ ll * u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ clasr_("L", "V", "B", &i__1, ncc, &rwork[1], &rwork[*n], &c__[
+ ll + c_dim1], ldc);
+ }
+
+/* Test convergence */
+
+ if ((r__1 = e[ll], dabs(r__1)) <= thresh) {
+ e[ll] = 0.f;
+ }
+ }
+ } else {
+
+/* Use nonzero shift */
+
+ if (idir == 1) {
+
+/* Chase bulge from top to bottom */
+/* Save cosines and sines for later singular vector updates */
+
+ f = ((r__1 = d__[ll], dabs(r__1)) - shift) * (r_sign(&c_b49, &d__[
+ ll]) + shift / d__[ll]);
+ g = e[ll];
+ i__1 = m - 1;
+ for (i__ = ll; i__ <= i__1; ++i__) {
+ slartg_(&f, &g, &cosr, &sinr, &r__);
+ if (i__ > ll) {
+ e[i__ - 1] = r__;
+ }
+ f = cosr * d__[i__] + sinr * e[i__];
+ e[i__] = cosr * e[i__] - sinr * d__[i__];
+ g = sinr * d__[i__ + 1];
+ d__[i__ + 1] = cosr * d__[i__ + 1];
+ slartg_(&f, &g, &cosl, &sinl, &r__);
+ d__[i__] = r__;
+ f = cosl * e[i__] + sinl * d__[i__ + 1];
+ d__[i__ + 1] = cosl * d__[i__ + 1] - sinl * e[i__];
+ if (i__ < m - 1) {
+ g = sinl * e[i__ + 1];
+ e[i__ + 1] = cosl * e[i__ + 1];
+ }
+ rwork[i__ - ll + 1] = cosr;
+ rwork[i__ - ll + 1 + nm1] = sinr;
+ rwork[i__ - ll + 1 + nm12] = cosl;
+ rwork[i__ - ll + 1 + nm13] = sinl;
+/* L140: */
+ }
+ e[m - 1] = f;
+
+/* Update singular vectors */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ clasr_("L", "V", "F", &i__1, ncvt, &rwork[1], &rwork[*n], &vt[
+ ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ clasr_("R", "V", "F", nru, &i__1, &rwork[nm12 + 1], &rwork[
+ nm13 + 1], &u[ll * u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ clasr_("L", "V", "F", &i__1, ncc, &rwork[nm12 + 1], &rwork[
+ nm13 + 1], &c__[ll + c_dim1], ldc);
+ }
+
+/* Test convergence */
+
+ if ((r__1 = e[m - 1], dabs(r__1)) <= thresh) {
+ e[m - 1] = 0.f;
+ }
+
+ } else {
+
+/* Chase bulge from bottom to top */
+/* Save cosines and sines for later singular vector updates */
+
+ f = ((r__1 = d__[m], dabs(r__1)) - shift) * (r_sign(&c_b49, &d__[
+ m]) + shift / d__[m]);
+ g = e[m - 1];
+ i__1 = ll + 1;
+ for (i__ = m; i__ >= i__1; --i__) {
+ slartg_(&f, &g, &cosr, &sinr, &r__);
+ if (i__ < m) {
+ e[i__] = r__;
+ }
+ f = cosr * d__[i__] + sinr * e[i__ - 1];
+ e[i__ - 1] = cosr * e[i__ - 1] - sinr * d__[i__];
+ g = sinr * d__[i__ - 1];
+ d__[i__ - 1] = cosr * d__[i__ - 1];
+ slartg_(&f, &g, &cosl, &sinl, &r__);
+ d__[i__] = r__;
+ f = cosl * e[i__ - 1] + sinl * d__[i__ - 1];
+ d__[i__ - 1] = cosl * d__[i__ - 1] - sinl * e[i__ - 1];
+ if (i__ > ll + 1) {
+ g = sinl * e[i__ - 2];
+ e[i__ - 2] = cosl * e[i__ - 2];
+ }
+ rwork[i__ - ll] = cosr;
+ rwork[i__ - ll + nm1] = -sinr;
+ rwork[i__ - ll + nm12] = cosl;
+ rwork[i__ - ll + nm13] = -sinl;
+/* L150: */
+ }
+ e[ll] = f;
+
+/* Test convergence */
+
+ if ((r__1 = e[ll], dabs(r__1)) <= thresh) {
+ e[ll] = 0.f;
+ }
+
+/* Update singular vectors if desired */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ clasr_("L", "V", "B", &i__1, ncvt, &rwork[nm12 + 1], &rwork[
+ nm13 + 1], &vt[ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ clasr_("R", "V", "B", nru, &i__1, &rwork[1], &rwork[*n], &u[
+ ll * u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ clasr_("L", "V", "B", &i__1, ncc, &rwork[1], &rwork[*n], &c__[
+ ll + c_dim1], ldc);
+ }
+ }
+ }
+
+/* QR iteration finished, go back and check convergence */
+
+ goto L60;
+
+/* All singular values converged, so make them positive */
+
+L160:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] < 0.f) {
+ d__[i__] = -d__[i__];
+
+/* Change sign of singular vectors, if desired */
+
+ if (*ncvt > 0) {
+ csscal_(ncvt, &c_b72, &vt[i__ + vt_dim1], ldvt);
+ }
+ }
+/* L170: */
+ }
+
+/* Sort the singular values into decreasing order (insertion sort on */
+/* singular values, but only one transposition per singular vector) */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Scan for smallest D(I) */
+
+ isub = 1;
+ smin = d__[1];
+ i__2 = *n + 1 - i__;
+ for (j = 2; j <= i__2; ++j) {
+ if (d__[j] <= smin) {
+ isub = j;
+ smin = d__[j];
+ }
+/* L180: */
+ }
+ if (isub != *n + 1 - i__) {
+
+/* Swap singular values and vectors */
+
+ d__[isub] = d__[*n + 1 - i__];
+ d__[*n + 1 - i__] = smin;
+ if (*ncvt > 0) {
+ cswap_(ncvt, &vt[isub + vt_dim1], ldvt, &vt[*n + 1 - i__ +
+ vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ cswap_(nru, &u[isub * u_dim1 + 1], &c__1, &u[(*n + 1 - i__) *
+ u_dim1 + 1], &c__1);
+ }
+ if (*ncc > 0) {
+ cswap_(ncc, &c__[isub + c_dim1], ldc, &c__[*n + 1 - i__ +
+ c_dim1], ldc);
+ }
+ }
+/* L190: */
+ }
+ goto L220;
+
+/* Maximum number of iterations exceeded, failure to converge */
+
+L200:
+ *info = 0;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (e[i__] != 0.f) {
+ ++(*info);
+ }
+/* L210: */
+ }
+L220:
+ return 0;
+
+/* End of CBDSQR */
+
+} /* cbdsqr_ */
diff --git a/contrib/libs/clapack/cgbbrd.c b/contrib/libs/clapack/cgbbrd.c
new file mode 100644
index 0000000000..113b54ab59
--- /dev/null
+++ b/contrib/libs/clapack/cgbbrd.c
@@ -0,0 +1,649 @@
+/* cgbbrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int cgbbrd_(char *vect, integer *m, integer *n, integer *ncc,
+ integer *kl, integer *ku, complex *ab, integer *ldab, real *d__,
+ real *e, complex *q, integer *ldq, complex *pt, integer *ldpt,
+ complex *c__, integer *ldc, complex *work, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, c_dim1, c_offset, pt_dim1, pt_offset, q_dim1,
+ q_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+ double c_abs(complex *);
+
+ /* Local variables */
+ integer i__, j, l;
+ complex t;
+ integer j1, j2, kb;
+ complex ra, rb;
+ real rc;
+ integer kk, ml, nr, mu;
+ complex rs;
+ integer kb1, ml0, mu0, klm, kun, nrt, klu1, inca;
+ real abst;
+ extern /* Subroutine */ int crot_(integer *, complex *, integer *,
+ complex *, integer *, real *, complex *), cscal_(integer *,
+ complex *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ logical wantb, wantc;
+ integer minmn;
+ logical wantq;
+ extern /* Subroutine */ int claset_(char *, integer *, integer *, complex
+ *, complex *, complex *, integer *), clartg_(complex *,
+ complex *, real *, complex *, complex *), xerbla_(char *, integer
+ *), clargv_(integer *, complex *, integer *, complex *,
+ integer *, real *, integer *), clartv_(integer *, complex *,
+ integer *, complex *, integer *, real *, complex *, integer *);
+ logical wantpt;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGBBRD reduces a complex general m-by-n band matrix A to real upper */
+/* bidiagonal form B by a unitary transformation: Q' * A * P = B. */
+
+/* The routine computes B, and optionally forms Q or P', or computes */
+/* Q'*C for a given matrix C. */
+
+/* Arguments */
+/* ========= */
+
+/* VECT (input) CHARACTER*1 */
+/* Specifies whether or not the matrices Q and P' are to be */
+/* formed. */
+/* = 'N': do not form Q or P'; */
+/* = 'Q': form Q only; */
+/* = 'P': form P' only; */
+/* = 'B': form both. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* NCC (input) INTEGER */
+/* The number of columns of the matrix C. NCC >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals of the matrix A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals of the matrix A. KU >= 0. */
+
+/* AB (input/output) COMPLEX array, dimension (LDAB,N) */
+/* On entry, the m-by-n band matrix A, stored in rows 1 to */
+/* KL+KU+1. The j-th column of A is stored in the j-th column of */
+/* the array AB as follows: */
+/* AB(ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(m,j+kl). */
+/* On exit, A is overwritten by values generated during the */
+/* reduction. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array A. LDAB >= KL+KU+1. */
+
+/* D (output) REAL array, dimension (min(M,N)) */
+/* The diagonal elements of the bidiagonal matrix B. */
+
+/* E (output) REAL array, dimension (min(M,N)-1) */
+/* The superdiagonal elements of the bidiagonal matrix B. */
+
+/* Q (output) COMPLEX array, dimension (LDQ,M) */
+/* If VECT = 'Q' or 'B', the m-by-m unitary matrix Q. */
+/* If VECT = 'N' or 'P', the array Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. */
+/* LDQ >= max(1,M) if VECT = 'Q' or 'B'; LDQ >= 1 otherwise. */
+
+/* PT (output) COMPLEX array, dimension (LDPT,N) */
+/* If VECT = 'P' or 'B', the n-by-n unitary matrix P'. */
+/* If VECT = 'N' or 'Q', the array PT is not referenced. */
+
+/* LDPT (input) INTEGER */
+/* The leading dimension of the array PT. */
+/* LDPT >= max(1,N) if VECT = 'P' or 'B'; LDPT >= 1 otherwise. */
+
+/* C (input/output) COMPLEX array, dimension (LDC,NCC) */
+/* On entry, an m-by-ncc matrix C. */
+/* On exit, C is overwritten by Q'*C. */
+/* C is not referenced if NCC = 0. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. */
+/* LDC >= max(1,M) if NCC > 0; LDC >= 1 if NCC = 0. */
+
+/* WORK (workspace) COMPLEX array, dimension (max(M,N)) */
+
+/* RWORK (workspace) REAL array, dimension (max(M,N)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --d__;
+ --e;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ pt_dim1 = *ldpt;
+ pt_offset = 1 + pt_dim1;
+ pt -= pt_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ wantb = lsame_(vect, "B");
+ wantq = lsame_(vect, "Q") || wantb;
+ wantpt = lsame_(vect, "P") || wantb;
+ wantc = *ncc > 0;
+ klu1 = *kl + *ku + 1;
+ *info = 0;
+ if (! wantq && ! wantpt && ! lsame_(vect, "N")) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ncc < 0) {
+ *info = -4;
+ } else if (*kl < 0) {
+ *info = -5;
+ } else if (*ku < 0) {
+ *info = -6;
+ } else if (*ldab < klu1) {
+ *info = -8;
+ } else if (*ldq < 1 || wantq && *ldq < max(1,*m)) {
+ *info = -12;
+ } else if (*ldpt < 1 || wantpt && *ldpt < max(1,*n)) {
+ *info = -14;
+ } else if (*ldc < 1 || wantc && *ldc < max(1,*m)) {
+ *info = -16;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGBBRD", &i__1);
+ return 0;
+ }
+
+/* Initialize Q and P' to the unit matrix, if needed */
+
+ if (wantq) {
+ claset_("Full", m, m, &c_b1, &c_b2, &q[q_offset], ldq);
+ }
+ if (wantpt) {
+ claset_("Full", n, n, &c_b1, &c_b2, &pt[pt_offset], ldpt);
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ minmn = min(*m,*n);
+
+ if (*kl + *ku > 1) {
+
+/* Reduce to upper bidiagonal form if KU > 0; if KU = 0, reduce */
+/* first to lower bidiagonal form and then transform to upper */
+/* bidiagonal */
+
+ if (*ku > 0) {
+ ml0 = 1;
+ mu0 = 2;
+ } else {
+ ml0 = 2;
+ mu0 = 1;
+ }
+
+/* Wherever possible, plane rotations are generated and applied in */
+/* vector operations of length NR over the index set J1:J2:KLU1. */
+
+/* The complex sines of the plane rotations are stored in WORK, */
+/* and the real cosines in RWORK. */
+
+/* Computing MIN */
+ i__1 = *m - 1;
+ klm = min(i__1,*kl);
+/* Computing MIN */
+ i__1 = *n - 1;
+ kun = min(i__1,*ku);
+ kb = klm + kun;
+ kb1 = kb + 1;
+ inca = kb1 * *ldab;
+ nr = 0;
+ j1 = klm + 2;
+ j2 = 1 - kun;
+
+ i__1 = minmn;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Reduce i-th column and i-th row of matrix to bidiagonal form */
+
+ ml = klm + 1;
+ mu = kun + 1;
+ i__2 = kb;
+ for (kk = 1; kk <= i__2; ++kk) {
+ j1 += kb;
+ j2 += kb;
+
+/* generate plane rotations to annihilate nonzero elements */
+/* which have been created below the band */
+
+ if (nr > 0) {
+ clargv_(&nr, &ab[klu1 + (j1 - klm - 1) * ab_dim1], &inca,
+ &work[j1], &kb1, &rwork[j1], &kb1);
+ }
+
+/* apply plane rotations from the left */
+
+ i__3 = kb;
+ for (l = 1; l <= i__3; ++l) {
+ if (j2 - klm + l - 1 > *n) {
+ nrt = nr - 1;
+ } else {
+ nrt = nr;
+ }
+ if (nrt > 0) {
+ clartv_(&nrt, &ab[klu1 - l + (j1 - klm + l - 1) *
+ ab_dim1], &inca, &ab[klu1 - l + 1 + (j1 - klm
+ + l - 1) * ab_dim1], &inca, &rwork[j1], &work[
+ j1], &kb1);
+ }
+/* L10: */
+ }
+
+ if (ml > ml0) {
+ if (ml <= *m - i__ + 1) {
+
+/* generate plane rotation to annihilate a(i+ml-1,i) */
+/* within the band, and apply rotation from the left */
+
+ clartg_(&ab[*ku + ml - 1 + i__ * ab_dim1], &ab[*ku +
+ ml + i__ * ab_dim1], &rwork[i__ + ml - 1], &
+ work[i__ + ml - 1], &ra);
+ i__3 = *ku + ml - 1 + i__ * ab_dim1;
+ ab[i__3].r = ra.r, ab[i__3].i = ra.i;
+ if (i__ < *n) {
+/* Computing MIN */
+ i__4 = *ku + ml - 2, i__5 = *n - i__;
+ i__3 = min(i__4,i__5);
+ i__6 = *ldab - 1;
+ i__7 = *ldab - 1;
+ crot_(&i__3, &ab[*ku + ml - 2 + (i__ + 1) *
+ ab_dim1], &i__6, &ab[*ku + ml - 1 + (i__
+ + 1) * ab_dim1], &i__7, &rwork[i__ + ml -
+ 1], &work[i__ + ml - 1]);
+ }
+ }
+ ++nr;
+ j1 -= kb1;
+ }
+
+ if (wantq) {
+
+/* accumulate product of plane rotations in Q */
+
+ i__3 = j2;
+ i__4 = kb1;
+ for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4)
+ {
+ r_cnjg(&q__1, &work[j]);
+ crot_(m, &q[(j - 1) * q_dim1 + 1], &c__1, &q[j *
+ q_dim1 + 1], &c__1, &rwork[j], &q__1);
+/* L20: */
+ }
+ }
+
+ if (wantc) {
+
+/* apply plane rotations to C */
+
+ i__4 = j2;
+ i__3 = kb1;
+ for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3)
+ {
+ crot_(ncc, &c__[j - 1 + c_dim1], ldc, &c__[j + c_dim1]
+, ldc, &rwork[j], &work[j]);
+/* L30: */
+ }
+ }
+
+ if (j2 + kun > *n) {
+
+/* adjust J2 to keep within the bounds of the matrix */
+
+ --nr;
+ j2 -= kb1;
+ }
+
+ i__3 = j2;
+ i__4 = kb1;
+ for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {
+
+/* create nonzero element a(j-1,j+ku) above the band */
+/* and store it in WORK(n+1:2*n) */
+
+ i__5 = j + kun;
+ i__6 = j;
+ i__7 = (j + kun) * ab_dim1 + 1;
+ q__1.r = work[i__6].r * ab[i__7].r - work[i__6].i * ab[
+ i__7].i, q__1.i = work[i__6].r * ab[i__7].i +
+ work[i__6].i * ab[i__7].r;
+ work[i__5].r = q__1.r, work[i__5].i = q__1.i;
+ i__5 = (j + kun) * ab_dim1 + 1;
+ i__6 = j;
+ i__7 = (j + kun) * ab_dim1 + 1;
+ q__1.r = rwork[i__6] * ab[i__7].r, q__1.i = rwork[i__6] *
+ ab[i__7].i;
+ ab[i__5].r = q__1.r, ab[i__5].i = q__1.i;
+/* L40: */
+ }
+
+/* generate plane rotations to annihilate nonzero elements */
+/* which have been generated above the band */
+
+ if (nr > 0) {
+ clargv_(&nr, &ab[(j1 + kun - 1) * ab_dim1 + 1], &inca, &
+ work[j1 + kun], &kb1, &rwork[j1 + kun], &kb1);
+ }
+
+/* apply plane rotations from the right */
+
+ i__4 = kb;
+ for (l = 1; l <= i__4; ++l) {
+ if (j2 + l - 1 > *m) {
+ nrt = nr - 1;
+ } else {
+ nrt = nr;
+ }
+ if (nrt > 0) {
+ clartv_(&nrt, &ab[l + 1 + (j1 + kun - 1) * ab_dim1], &
+ inca, &ab[l + (j1 + kun) * ab_dim1], &inca, &
+ rwork[j1 + kun], &work[j1 + kun], &kb1);
+ }
+/* L50: */
+ }
+
+ if (ml == ml0 && mu > mu0) {
+ if (mu <= *n - i__ + 1) {
+
+/* generate plane rotation to annihilate a(i,i+mu-1) */
+/* within the band, and apply rotation from the right */
+
+ clartg_(&ab[*ku - mu + 3 + (i__ + mu - 2) * ab_dim1],
+ &ab[*ku - mu + 2 + (i__ + mu - 1) * ab_dim1],
+ &rwork[i__ + mu - 1], &work[i__ + mu - 1], &
+ ra);
+ i__4 = *ku - mu + 3 + (i__ + mu - 2) * ab_dim1;
+ ab[i__4].r = ra.r, ab[i__4].i = ra.i;
+/* Computing MIN */
+ i__3 = *kl + mu - 2, i__5 = *m - i__;
+ i__4 = min(i__3,i__5);
+ crot_(&i__4, &ab[*ku - mu + 4 + (i__ + mu - 2) *
+ ab_dim1], &c__1, &ab[*ku - mu + 3 + (i__ + mu
+ - 1) * ab_dim1], &c__1, &rwork[i__ + mu - 1],
+ &work[i__ + mu - 1]);
+ }
+ ++nr;
+ j1 -= kb1;
+ }
+
+ if (wantpt) {
+
+/* accumulate product of plane rotations in P' */
+
+ i__4 = j2;
+ i__3 = kb1;
+ for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3)
+ {
+ r_cnjg(&q__1, &work[j + kun]);
+ crot_(n, &pt[j + kun - 1 + pt_dim1], ldpt, &pt[j +
+ kun + pt_dim1], ldpt, &rwork[j + kun], &q__1);
+/* L60: */
+ }
+ }
+
+ if (j2 + kb > *m) {
+
+/* adjust J2 to keep within the bounds of the matrix */
+
+ --nr;
+ j2 -= kb1;
+ }
+
+ i__3 = j2;
+ i__4 = kb1;
+ for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {
+
+/* create nonzero element a(j+kl+ku,j+ku-1) below the */
+/* band and store it in WORK(1:n) */
+
+ i__5 = j + kb;
+ i__6 = j + kun;
+ i__7 = klu1 + (j + kun) * ab_dim1;
+ q__1.r = work[i__6].r * ab[i__7].r - work[i__6].i * ab[
+ i__7].i, q__1.i = work[i__6].r * ab[i__7].i +
+ work[i__6].i * ab[i__7].r;
+ work[i__5].r = q__1.r, work[i__5].i = q__1.i;
+ i__5 = klu1 + (j + kun) * ab_dim1;
+ i__6 = j + kun;
+ i__7 = klu1 + (j + kun) * ab_dim1;
+ q__1.r = rwork[i__6] * ab[i__7].r, q__1.i = rwork[i__6] *
+ ab[i__7].i;
+ ab[i__5].r = q__1.r, ab[i__5].i = q__1.i;
+/* L70: */
+ }
+
+ if (ml > ml0) {
+ --ml;
+ } else {
+ --mu;
+ }
+/* L80: */
+ }
+/* L90: */
+ }
+ }
+
+ if (*ku == 0 && *kl > 0) {
+
+/* A has been reduced to complex lower bidiagonal form */
+
+/* Transform lower bidiagonal form to upper bidiagonal by applying */
+/* plane rotations from the left, overwriting superdiagonal */
+/* elements on subdiagonal elements */
+
+/* Computing MIN */
+ i__2 = *m - 1;
+ i__1 = min(i__2,*n);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ clartg_(&ab[i__ * ab_dim1 + 1], &ab[i__ * ab_dim1 + 2], &rc, &rs,
+ &ra);
+ i__2 = i__ * ab_dim1 + 1;
+ ab[i__2].r = ra.r, ab[i__2].i = ra.i;
+ if (i__ < *n) {
+ i__2 = i__ * ab_dim1 + 2;
+ i__4 = (i__ + 1) * ab_dim1 + 1;
+ q__1.r = rs.r * ab[i__4].r - rs.i * ab[i__4].i, q__1.i = rs.r
+ * ab[i__4].i + rs.i * ab[i__4].r;
+ ab[i__2].r = q__1.r, ab[i__2].i = q__1.i;
+ i__2 = (i__ + 1) * ab_dim1 + 1;
+ i__4 = (i__ + 1) * ab_dim1 + 1;
+ q__1.r = rc * ab[i__4].r, q__1.i = rc * ab[i__4].i;
+ ab[i__2].r = q__1.r, ab[i__2].i = q__1.i;
+ }
+ if (wantq) {
+ r_cnjg(&q__1, &rs);
+ crot_(m, &q[i__ * q_dim1 + 1], &c__1, &q[(i__ + 1) * q_dim1 +
+ 1], &c__1, &rc, &q__1);
+ }
+ if (wantc) {
+ crot_(ncc, &c__[i__ + c_dim1], ldc, &c__[i__ + 1 + c_dim1],
+ ldc, &rc, &rs);
+ }
+/* L100: */
+ }
+ } else {
+
+/* A has been reduced to complex upper bidiagonal form or is */
+/* diagonal */
+
+ if (*ku > 0 && *m < *n) {
+
+/* Annihilate a(m,m+1) by applying plane rotations from the */
+/* right */
+
+ i__1 = *ku + (*m + 1) * ab_dim1;
+ rb.r = ab[i__1].r, rb.i = ab[i__1].i;
+ for (i__ = *m; i__ >= 1; --i__) {
+ clartg_(&ab[*ku + 1 + i__ * ab_dim1], &rb, &rc, &rs, &ra);
+ i__1 = *ku + 1 + i__ * ab_dim1;
+ ab[i__1].r = ra.r, ab[i__1].i = ra.i;
+ if (i__ > 1) {
+ r_cnjg(&q__3, &rs);
+ q__2.r = -q__3.r, q__2.i = -q__3.i;
+ i__1 = *ku + i__ * ab_dim1;
+ q__1.r = q__2.r * ab[i__1].r - q__2.i * ab[i__1].i,
+ q__1.i = q__2.r * ab[i__1].i + q__2.i * ab[i__1]
+ .r;
+ rb.r = q__1.r, rb.i = q__1.i;
+ i__1 = *ku + i__ * ab_dim1;
+ i__2 = *ku + i__ * ab_dim1;
+ q__1.r = rc * ab[i__2].r, q__1.i = rc * ab[i__2].i;
+ ab[i__1].r = q__1.r, ab[i__1].i = q__1.i;
+ }
+ if (wantpt) {
+ r_cnjg(&q__1, &rs);
+ crot_(n, &pt[i__ + pt_dim1], ldpt, &pt[*m + 1 + pt_dim1],
+ ldpt, &rc, &q__1);
+ }
+/* L110: */
+ }
+ }
+ }
+
+/* Make diagonal and superdiagonal elements real, storing them in D */
+/* and E */
+
+ i__1 = *ku + 1 + ab_dim1;
+ t.r = ab[i__1].r, t.i = ab[i__1].i;
+ i__1 = minmn;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ abst = c_abs(&t);
+ d__[i__] = abst;
+ if (abst != 0.f) {
+ q__1.r = t.r / abst, q__1.i = t.i / abst;
+ t.r = q__1.r, t.i = q__1.i;
+ } else {
+ t.r = 1.f, t.i = 0.f;
+ }
+ if (wantq) {
+ cscal_(m, &t, &q[i__ * q_dim1 + 1], &c__1);
+ }
+ if (wantc) {
+ r_cnjg(&q__1, &t);
+ cscal_(ncc, &q__1, &c__[i__ + c_dim1], ldc);
+ }
+ if (i__ < minmn) {
+ if (*ku == 0 && *kl == 0) {
+ e[i__] = 0.f;
+ i__2 = (i__ + 1) * ab_dim1 + 1;
+ t.r = ab[i__2].r, t.i = ab[i__2].i;
+ } else {
+ if (*ku == 0) {
+ i__2 = i__ * ab_dim1 + 2;
+ r_cnjg(&q__2, &t);
+ q__1.r = ab[i__2].r * q__2.r - ab[i__2].i * q__2.i,
+ q__1.i = ab[i__2].r * q__2.i + ab[i__2].i *
+ q__2.r;
+ t.r = q__1.r, t.i = q__1.i;
+ } else {
+ i__2 = *ku + (i__ + 1) * ab_dim1;
+ r_cnjg(&q__2, &t);
+ q__1.r = ab[i__2].r * q__2.r - ab[i__2].i * q__2.i,
+ q__1.i = ab[i__2].r * q__2.i + ab[i__2].i *
+ q__2.r;
+ t.r = q__1.r, t.i = q__1.i;
+ }
+ abst = c_abs(&t);
+ e[i__] = abst;
+ if (abst != 0.f) {
+ q__1.r = t.r / abst, q__1.i = t.i / abst;
+ t.r = q__1.r, t.i = q__1.i;
+ } else {
+ t.r = 1.f, t.i = 0.f;
+ }
+ if (wantpt) {
+ cscal_(n, &t, &pt[i__ + 1 + pt_dim1], ldpt);
+ }
+ i__2 = *ku + 1 + (i__ + 1) * ab_dim1;
+ r_cnjg(&q__2, &t);
+ q__1.r = ab[i__2].r * q__2.r - ab[i__2].i * q__2.i, q__1.i =
+ ab[i__2].r * q__2.i + ab[i__2].i * q__2.r;
+ t.r = q__1.r, t.i = q__1.i;
+ }
+ }
+/* L120: */
+ }
+ return 0;
+
+/* End of CGBBRD */
+
+} /* cgbbrd_ */
diff --git a/contrib/libs/clapack/cgbcon.c b/contrib/libs/clapack/cgbcon.c
new file mode 100644
index 0000000000..3e75842892
--- /dev/null
+++ b/contrib/libs/clapack/cgbcon.c
@@ -0,0 +1,307 @@
+/* cgbcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cgbcon_(char *norm, integer *n, integer *kl, integer *ku,
+ complex *ab, integer *ldab, integer *ipiv, real *anorm, real *rcond,
+ complex *work, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3;
+ real r__1, r__2;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer j;
+ complex t;
+ integer kd, lm, jp, ix, kase, kase1;
+ real scale;
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int caxpy_(integer *, complex *, complex *,
+ integer *, complex *, integer *);
+ logical lnoti;
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *);
+ extern integer icamax_(integer *, complex *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int clatbs_(char *, char *, char *, char *,
+ integer *, integer *, complex *, integer *, complex *, real *,
+ real *, integer *), xerbla_(char *
+, integer *);
+ real ainvnm;
+ extern /* Subroutine */ int csrscl_(integer *, real *, complex *, integer
+ *);
+ logical onenrm;
+ char normin[1];
+ real smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGBCON estimates the reciprocal of the condition number of a complex */
+/* general band matrix A, in either the 1-norm or the infinity-norm, */
+/* using the LU factorization computed by CGBTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as */
+/* RCOND = 1 / ( norm(A) * norm(inv(A)) ). */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies whether the 1-norm condition number or the */
+/* infinity-norm condition number is required: */
+/* = '1' or 'O': 1-norm; */
+/* = 'I': Infinity-norm. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* AB (input) COMPLEX array, dimension (LDAB,N) */
+/* Details of the LU factorization of the band matrix A, as */
+/* computed by CGBTRF. U is stored as an upper triangular band */
+/* matrix with KL+KU superdiagonals in rows 1 to KL+KU+1, and */
+/* the multipliers used during the factorization are stored in */
+/* rows KL+KU+2 to 2*KL+KU+1. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= 2*KL+KU+1. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= N, row i of the matrix was */
+/* interchanged with row IPIV(i). */
+
+/* ANORM (input) REAL */
+/* If NORM = '1' or 'O', the 1-norm of the original matrix A. */
+/* If NORM = 'I', the infinity-norm of the original matrix A. */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(norm(A) * norm(inv(A))). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --ipiv;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O");
+ if (! onenrm && ! lsame_(norm, "I")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*ldab < (*kl << 1) + *ku + 1) {
+ *info = -6;
+ } else if (*anorm < 0.f) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGBCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.f;
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ } else if (*anorm == 0.f) {
+ return 0;
+ }
+
+ smlnum = slamch_("Safe minimum");
+
+/* Estimate the norm of inv(A). */
+
+ ainvnm = 0.f;
+ *(unsigned char *)normin = 'N';
+ if (onenrm) {
+ kase1 = 1;
+ } else {
+ kase1 = 2;
+ }
+ kd = *kl + *ku + 1;
+ lnoti = *kl > 0;
+ kase = 0;
+L10:
+ clacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (kase == kase1) {
+
+/* Multiply by inv(L). */
+
+ if (lnoti) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = *kl, i__3 = *n - j;
+ lm = min(i__2,i__3);
+ jp = ipiv[j];
+ i__2 = jp;
+ t.r = work[i__2].r, t.i = work[i__2].i;
+ if (jp != j) {
+ i__2 = jp;
+ i__3 = j;
+ work[i__2].r = work[i__3].r, work[i__2].i = work[i__3]
+ .i;
+ i__2 = j;
+ work[i__2].r = t.r, work[i__2].i = t.i;
+ }
+ q__1.r = -t.r, q__1.i = -t.i;
+ caxpy_(&lm, &q__1, &ab[kd + 1 + j * ab_dim1], &c__1, &
+ work[j + 1], &c__1);
+/* L20: */
+ }
+ }
+
+/* Multiply by inv(U). */
+
+ i__1 = *kl + *ku;
+ clatbs_("Upper", "No transpose", "Non-unit", normin, n, &i__1, &
+ ab[ab_offset], ldab, &work[1], &scale, &rwork[1], info);
+ } else {
+
+/* Multiply by inv(U'). */
+
+ i__1 = *kl + *ku;
+ clatbs_("Upper", "Conjugate transpose", "Non-unit", normin, n, &
+ i__1, &ab[ab_offset], ldab, &work[1], &scale, &rwork[1],
+ info);
+
+/* Multiply by inv(L'). */
+
+ if (lnoti) {
+ for (j = *n - 1; j >= 1; --j) {
+/* Computing MIN */
+ i__1 = *kl, i__2 = *n - j;
+ lm = min(i__1,i__2);
+ i__1 = j;
+ i__2 = j;
+ cdotc_(&q__2, &lm, &ab[kd + 1 + j * ab_dim1], &c__1, &
+ work[j + 1], &c__1);
+ q__1.r = work[i__2].r - q__2.r, q__1.i = work[i__2].i -
+ q__2.i;
+ work[i__1].r = q__1.r, work[i__1].i = q__1.i;
+ jp = ipiv[j];
+ if (jp != j) {
+ i__1 = jp;
+ t.r = work[i__1].r, t.i = work[i__1].i;
+ i__1 = jp;
+ i__2 = j;
+ work[i__1].r = work[i__2].r, work[i__1].i = work[i__2]
+ .i;
+ i__1 = j;
+ work[i__1].r = t.r, work[i__1].i = t.i;
+ }
+/* L30: */
+ }
+ }
+ }
+
+/* Divide X by 1/SCALE if doing so will not cause overflow. */
+
+ *(unsigned char *)normin = 'Y';
+ if (scale != 1.f) {
+ ix = icamax_(n, &work[1], &c__1);
+ i__1 = ix;
+ if (scale < ((r__1 = work[i__1].r, dabs(r__1)) + (r__2 = r_imag(&
+ work[ix]), dabs(r__2))) * smlnum || scale == 0.f) {
+ goto L40;
+ }
+ csrscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / ainvnm / *anorm;
+ }
+
+L40:
+ return 0;
+
+/* End of CGBCON */
+
+} /* cgbcon_ */
diff --git a/contrib/libs/clapack/cgbequ.c b/contrib/libs/clapack/cgbequ.c
new file mode 100644
index 0000000000..146946043f
--- /dev/null
+++ b/contrib/libs/clapack/cgbequ.c
@@ -0,0 +1,329 @@
+/* cgbequ.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cgbequ_(integer *m, integer *n, integer *kl, integer *ku,
+ complex *ab, integer *ldab, real *r__, real *c__, real *rowcnd, real
+ *colcnd, real *amax, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+ real r__1, r__2, r__3, r__4;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer i__, j, kd;
+ real rcmin, rcmax;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum, smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGBEQU computes row and column scalings intended to equilibrate an */
+/* M-by-N band matrix A and reduce its condition number. R returns the */
+/* row scale factors and C the column scale factors, chosen to try to */
+/* make the largest element in each row and column of the matrix B with */
+/* elements B(i,j)=R(i)*A(i,j)*C(j) have absolute value 1. */
+
+/* R(i) and C(j) are restricted to be between SMLNUM = smallest safe */
+/* number and BIGNUM = largest safe number. Use of these scaling */
+/* factors is not guaranteed to reduce the condition number of A but */
+/* works well in practice. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* AB (input) COMPLEX array, dimension (LDAB,N) */
+/* The band matrix A, stored in rows 1 to KL+KU+1. The j-th */
+/* column of A is stored in the j-th column of the array AB as */
+/* follows: */
+/* AB(ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(m,j+kl). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KL+KU+1. */
+
+/* R (output) REAL array, dimension (M) */
+/* If INFO = 0, or INFO > M, R contains the row scale factors */
+/* for A. */
+
+/* C (output) REAL array, dimension (N) */
+/* If INFO = 0, C contains the column scale factors for A. */
+
+/* ROWCND (output) REAL */
+/* If INFO = 0 or INFO > M, ROWCND contains the ratio of the */
+/* smallest R(i) to the largest R(i). If ROWCND >= 0.1 and */
+/* AMAX is neither too large nor too small, it is not worth */
+/* scaling by R. */
+
+/* COLCND (output) REAL */
+/* If INFO = 0, COLCND contains the ratio of the smallest */
+/* C(i) to the largest C(i). If COLCND >= 0.1, it is not */
+/* worth scaling by C. */
+
+/* AMAX (output) REAL */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= M: the i-th row of A is exactly zero */
+/* > M: the (i-M)-th column of A is exactly zero */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --r__;
+ --c__;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*ldab < *kl + *ku + 1) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGBEQU", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ *rowcnd = 1.f;
+ *colcnd = 1.f;
+ *amax = 0.f;
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+
+/* Compute row scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__[i__] = 0.f;
+/* L10: */
+ }
+
+/* Find the maximum element in each row. */
+
+ kd = *ku + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j - *ku;
+/* Computing MIN */
+ i__4 = j + *kl;
+ i__3 = min(i__4,*m);
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ i__2 = kd + i__ - j + j * ab_dim1;
+ r__3 = r__[i__], r__4 = (r__1 = ab[i__2].r, dabs(r__1)) + (r__2 =
+ r_imag(&ab[kd + i__ - j + j * ab_dim1]), dabs(r__2));
+ r__[i__] = dmax(r__3,r__4);
+/* L20: */
+ }
+/* L30: */
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.f;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = rcmax, r__2 = r__[i__];
+ rcmax = dmax(r__1,r__2);
+/* Computing MIN */
+ r__1 = rcmin, r__2 = r__[i__];
+ rcmin = dmin(r__1,r__2);
+/* L40: */
+ }
+ *amax = rcmax;
+
+ if (rcmin == 0.f) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (r__[i__] == 0.f) {
+ *info = i__;
+ return 0;
+ }
+/* L50: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MIN */
+/* Computing MAX */
+ r__2 = r__[i__];
+ r__1 = dmax(r__2,smlnum);
+ r__[i__] = 1.f / dmin(r__1,bignum);
+/* L60: */
+ }
+
+/* Compute ROWCND = min(R(I)) / max(R(I)) */
+
+ *rowcnd = dmax(rcmin,smlnum) / dmin(rcmax,bignum);
+ }
+
+/* Compute column scale factors */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j] = 0.f;
+/* L70: */
+ }
+
+/* Find the maximum element in each column, */
+/* assuming the row scaling computed above. */
+
+ kd = *ku + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__3 = j - *ku;
+/* Computing MIN */
+ i__4 = j + *kl;
+ i__2 = min(i__4,*m);
+ for (i__ = max(i__3,1); i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = kd + i__ - j + j * ab_dim1;
+ r__3 = c__[j], r__4 = ((r__1 = ab[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&ab[kd + i__ - j + j * ab_dim1]), dabs(r__2))) *
+ r__[i__];
+ c__[j] = dmax(r__3,r__4);
+/* L80: */
+ }
+/* L90: */
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ r__1 = rcmin, r__2 = c__[j];
+ rcmin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = rcmax, r__2 = c__[j];
+ rcmax = dmax(r__1,r__2);
+/* L100: */
+ }
+
+ if (rcmin == 0.f) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (c__[j] == 0.f) {
+ *info = *m + j;
+ return 0;
+ }
+/* L110: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+/* Computing MAX */
+ r__2 = c__[j];
+ r__1 = dmax(r__2,smlnum);
+ c__[j] = 1.f / dmin(r__1,bignum);
+/* L120: */
+ }
+
+/* Compute COLCND = min(C(J)) / max(C(J)) */
+
+ *colcnd = dmax(rcmin,smlnum) / dmin(rcmax,bignum);
+ }
+
+ return 0;
+
+/* End of CGBEQU */
+
+} /* cgbequ_ */
diff --git a/contrib/libs/clapack/cgbequb.c b/contrib/libs/clapack/cgbequb.c
new file mode 100644
index 0000000000..76cfdf922c
--- /dev/null
+++ b/contrib/libs/clapack/cgbequb.c
@@ -0,0 +1,353 @@
+/* cgbequb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cgbequb_(integer *m, integer *n, integer *kl, integer *
+ ku, complex *ab, integer *ldab, real *r__, real *c__, real *rowcnd,
+ real *colcnd, real *amax, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+ real r__1, r__2, r__3, r__4;
+
+ /* Builtin functions */
+ double log(doublereal), r_imag(complex *), pow_ri(real *, integer *);
+
+ /* Local variables */
+ integer i__, j, kd;
+ real radix, rcmin, rcmax;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum, logrdx, smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- Contributed by James Demmel, Deaglan Halligan, Yozo Hida and -- */
+/* -- Jason Riedy of Univ. of California Berkeley. -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley and NAG Ltd. -- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGBEQUB computes row and column scalings intended to equilibrate an */
+/* M-by-N matrix A and reduce its condition number. R returns the row */
+/* scale factors and C the column scale factors, chosen to try to make */
+/* the largest element in each row and column of the matrix B with */
+/* elements B(i,j)=R(i)*A(i,j)*C(j) have an absolute value of at most */
+/* the radix. */
+
+/* R(i) and C(j) are restricted to be a power of the radix between */
+/* SMLNUM = smallest safe number and BIGNUM = largest safe number. Use */
+/* of these scaling factors is not guaranteed to reduce the condition */
+/* number of A but works well in practice. */
+
+/* This routine differs from CGEEQU by restricting the scaling factors */
+/* to a power of the radix. Baring over- and underflow, scaling by */
+/* these factors introduces no additional rounding errors. However, the */
+/* scaled entries' magnitured are no longer approximately 1 but lie */
+/* between sqrt(radix) and 1/sqrt(radix). */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* AB (input) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* On entry, the matrix A in band storage, in rows 1 to KL+KU+1. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(KU+1+i-j,j) = A(i,j) for max(1,j-KU)<=i<=min(N,j+kl) */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array A. LDAB >= max(1,M). */
+
+/* R (output) REAL array, dimension (M) */
+/* If INFO = 0 or INFO > M, R contains the row scale factors */
+/* for A. */
+
+/* C (output) REAL array, dimension (N) */
+/* If INFO = 0, C contains the column scale factors for A. */
+
+/* ROWCND (output) REAL */
+/* If INFO = 0 or INFO > M, ROWCND contains the ratio of the */
+/* smallest R(i) to the largest R(i). If ROWCND >= 0.1 and */
+/* AMAX is neither too large nor too small, it is not worth */
+/* scaling by R. */
+
+/* COLCND (output) REAL */
+/* If INFO = 0, COLCND contains the ratio of the smallest */
+/* C(i) to the largest C(i). If COLCND >= 0.1, it is not */
+/* worth scaling by C. */
+
+/* AMAX (output) REAL */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= M: the i-th row of A is exactly zero */
+/* > M: the (i-M)-th column of A is exactly zero */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --r__;
+ --c__;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*ldab < *kl + *ku + 1) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGBEQUB", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ *rowcnd = 1.f;
+ *colcnd = 1.f;
+ *amax = 0.f;
+ return 0;
+ }
+
+/* Get machine constants. Assume SMLNUM is a power of the radix. */
+
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+ radix = slamch_("B");
+ logrdx = log(radix);
+
+/* Compute row scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__[i__] = 0.f;
+/* L10: */
+ }
+
+/* Find the maximum element in each row. */
+
+ kd = *ku + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j - *ku;
+/* Computing MIN */
+ i__4 = j + *kl;
+ i__3 = min(i__4,*m);
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ i__2 = kd + i__ - j + j * ab_dim1;
+ r__3 = r__[i__], r__4 = (r__1 = ab[i__2].r, dabs(r__1)) + (r__2 =
+ r_imag(&ab[kd + i__ - j + j * ab_dim1]), dabs(r__2));
+ r__[i__] = dmax(r__3,r__4);
+/* L20: */
+ }
+/* L30: */
+ }
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (r__[i__] > 0.f) {
+ i__3 = (integer) (log(r__[i__]) / logrdx);
+ r__[i__] = pow_ri(&radix, &i__3);
+ }
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.f;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = rcmax, r__2 = r__[i__];
+ rcmax = dmax(r__1,r__2);
+/* Computing MIN */
+ r__1 = rcmin, r__2 = r__[i__];
+ rcmin = dmin(r__1,r__2);
+/* L40: */
+ }
+ *amax = rcmax;
+
+ if (rcmin == 0.f) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (r__[i__] == 0.f) {
+ *info = i__;
+ return 0;
+ }
+/* L50: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MIN */
+/* Computing MAX */
+ r__2 = r__[i__];
+ r__1 = dmax(r__2,smlnum);
+ r__[i__] = 1.f / dmin(r__1,bignum);
+/* L60: */
+ }
+
+/* Compute ROWCND = min(R(I)) / max(R(I)). */
+
+ *rowcnd = dmax(rcmin,smlnum) / dmin(rcmax,bignum);
+ }
+
+/* Compute column scale factors. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j] = 0.f;
+/* L70: */
+ }
+
+/* Find the maximum element in each column, */
+/* assuming the row scaling computed above. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__3 = j - *ku;
+/* Computing MIN */
+ i__4 = j + *kl;
+ i__2 = min(i__4,*m);
+ for (i__ = max(i__3,1); i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = kd + i__ - j + j * ab_dim1;
+ r__3 = c__[j], r__4 = ((r__1 = ab[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&ab[kd + i__ - j + j * ab_dim1]), dabs(r__2))) *
+ r__[i__];
+ c__[j] = dmax(r__3,r__4);
+/* L80: */
+ }
+ if (c__[j] > 0.f) {
+ i__2 = (integer) (log(c__[j]) / logrdx);
+ c__[j] = pow_ri(&radix, &i__2);
+ }
+/* L90: */
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ r__1 = rcmin, r__2 = c__[j];
+ rcmin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = rcmax, r__2 = c__[j];
+ rcmax = dmax(r__1,r__2);
+/* L100: */
+ }
+
+ if (rcmin == 0.f) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (c__[j] == 0.f) {
+ *info = *m + j;
+ return 0;
+ }
+/* L110: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+/* Computing MAX */
+ r__2 = c__[j];
+ r__1 = dmax(r__2,smlnum);
+ c__[j] = 1.f / dmin(r__1,bignum);
+/* L120: */
+ }
+
+/* Compute COLCND = min(C(J)) / max(C(J)). */
+
+ *colcnd = dmax(rcmin,smlnum) / dmin(rcmax,bignum);
+ }
+
+ return 0;
+
+/* End of CGBEQUB */
+
+} /* cgbequb_ */
diff --git a/contrib/libs/clapack/cgbrfs.c b/contrib/libs/clapack/cgbrfs.c
new file mode 100644
index 0000000000..e2a720dc8d
--- /dev/null
+++ b/contrib/libs/clapack/cgbrfs.c
@@ -0,0 +1,492 @@
+/* cgbrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int cgbrfs_(char *trans, integer *n, integer *kl, integer *
+ ku, integer *nrhs, complex *ab, integer *ldab, complex *afb, integer *
+ ldafb, integer *ipiv, complex *b, integer *ldb, complex *x, integer *
+ ldx, real *ferr, real *berr, complex *work, real *rwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, afb_dim1, afb_offset, b_dim1, b_offset,
+ x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7;
+ real r__1, r__2, r__3, r__4;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ real s;
+ integer kk;
+ real xk;
+ integer nz;
+ real eps;
+ integer kase;
+ real safe1, safe2;
+ extern /* Subroutine */ int cgbmv_(char *, integer *, integer *, integer *
+, integer *, complex *, complex *, integer *, complex *, integer *
+, complex *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), caxpy_(integer *, complex *, complex *,
+ integer *, complex *, integer *);
+ integer count;
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *), cgbtrs_(
+ char *, integer *, integer *, integer *, integer *, complex *,
+ integer *, integer *, complex *, integer *, integer *);
+ logical notran;
+ char transn[1], transt[1];
+ real lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGBRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is banded, and provides */
+/* error bounds and backward error estimates for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AB (input) COMPLEX array, dimension (LDAB,N) */
+/* The original band matrix A, stored in rows 1 to KL+KU+1. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(n,j+kl). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KL+KU+1. */
+
+/* AFB (input) COMPLEX array, dimension (LDAFB,N) */
+/* Details of the LU factorization of the band matrix A, as */
+/* computed by CGBTRF. U is stored as an upper triangular band */
+/* matrix with KL+KU superdiagonals in rows 1 to KL+KU+1, and */
+/* the multipliers used during the factorization are stored in */
+/* rows KL+KU+2 to 2*KL+KU+1. */
+
+/* LDAFB (input) INTEGER */
+/* The leading dimension of the array AFB. LDAFB >= 2*KL*KU+1. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices from CGBTRF; for 1<=i<=N, row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* B (input) COMPLEX array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) COMPLEX array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by CGBTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ afb_dim1 = *ldafb;
+ afb_offset = 1 + afb_dim1;
+ afb -= afb_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "T") && ! lsame_(
+ trans, "C")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*ldab < *kl + *ku + 1) {
+ *info = -7;
+ } else if (*ldafb < (*kl << 1) + *ku + 1) {
+ *info = -9;
+ } else if (*ldb < max(1,*n)) {
+ *info = -12;
+ } else if (*ldx < max(1,*n)) {
+ *info = -14;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGBRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+ if (notran) {
+ *(unsigned char *)transn = 'N';
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transn = 'C';
+ *(unsigned char *)transt = 'N';
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+/* Computing MIN */
+ i__1 = *kl + *ku + 2, i__2 = *n + 1;
+ nz = min(i__1,i__2);
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.f;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - op(A) * X, */
+/* where op(A) = A, A**T, or A**H, depending on TRANS. */
+
+ ccopy_(n, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgbmv_(trans, n, n, kl, ku, &q__1, &ab[ab_offset], ldab, &x[j *
+ x_dim1 + 1], &c__1, &c_b1, &work[1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[i__] = (r__1 = b[i__3].r, dabs(r__1)) + (r__2 = r_imag(&b[
+ i__ + j * b_dim1]), dabs(r__2));
+/* L30: */
+ }
+
+/* Compute abs(op(A))*abs(X) + abs(B). */
+
+ if (notran) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ kk = *ku + 1 - k;
+ i__3 = k + j * x_dim1;
+ xk = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[k + j
+ * x_dim1]), dabs(r__2));
+/* Computing MAX */
+ i__3 = 1, i__4 = k - *ku;
+/* Computing MIN */
+ i__6 = *n, i__7 = k + *kl;
+ i__5 = min(i__6,i__7);
+ for (i__ = max(i__3,i__4); i__ <= i__5; ++i__) {
+ i__3 = kk + i__ + k * ab_dim1;
+ rwork[i__] += ((r__1 = ab[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&ab[kk + i__ + k * ab_dim1]), dabs(r__2)))
+ * xk;
+/* L40: */
+ }
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ kk = *ku + 1 - k;
+/* Computing MAX */
+ i__5 = 1, i__3 = k - *ku;
+/* Computing MIN */
+ i__6 = *n, i__7 = k + *kl;
+ i__4 = min(i__6,i__7);
+ for (i__ = max(i__5,i__3); i__ <= i__4; ++i__) {
+ i__5 = kk + i__ + k * ab_dim1;
+ i__3 = i__ + j * x_dim1;
+ s += ((r__1 = ab[i__5].r, dabs(r__1)) + (r__2 = r_imag(&
+ ab[kk + i__ + k * ab_dim1]), dabs(r__2))) * ((
+ r__3 = x[i__3].r, dabs(r__3)) + (r__4 = r_imag(&x[
+ i__ + j * x_dim1]), dabs(r__4)));
+/* L60: */
+ }
+ rwork[k] += s;
+/* L70: */
+ }
+ }
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__4 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2))) / rwork[i__];
+ s = dmax(r__3,r__4);
+ } else {
+/* Computing MAX */
+ i__4 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = dmax(r__3,r__4);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2.f <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ cgbtrs_(trans, n, kl, ku, &c__1, &afb[afb_offset], ldafb, &ipiv[1]
+, &work[1], n, info);
+ caxpy_(n, &c_b1, &work[1], &c__1, &x[j * x_dim1 + 1], &c__1);
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(op(A)))* */
+/* ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(op(A)) is the inverse of op(A) */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(op(A))*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use CLACN2 to estimate the infinity-norm of the matrix */
+/* inv(op(A)) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__4 = i__;
+ rwork[i__] = (r__1 = work[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__];
+ } else {
+ i__4 = i__;
+ rwork[i__] = (r__1 = work[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__] + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ clacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(op(A)**H). */
+
+ cgbtrs_(transt, n, kl, ku, &c__1, &afb[afb_offset], ldafb, &
+ ipiv[1], &work[1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__4 = i__;
+ i__5 = i__;
+ i__3 = i__;
+ q__1.r = rwork[i__5] * work[i__3].r, q__1.i = rwork[i__5]
+ * work[i__3].i;
+ work[i__4].r = q__1.r, work[i__4].i = q__1.i;
+/* L110: */
+ }
+ } else {
+
+/* Multiply by inv(op(A))*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__4 = i__;
+ i__5 = i__;
+ i__3 = i__;
+ q__1.r = rwork[i__5] * work[i__3].r, q__1.i = rwork[i__5]
+ * work[i__3].i;
+ work[i__4].r = q__1.r, work[i__4].i = q__1.i;
+/* L120: */
+ }
+ cgbtrs_(transn, n, kl, ku, &c__1, &afb[afb_offset], ldafb, &
+ ipiv[1], &work[1], n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__4 = i__ + j * x_dim1;
+ r__3 = lstres, r__4 = (r__1 = x[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[i__ + j * x_dim1]), dabs(r__2));
+ lstres = dmax(r__3,r__4);
+/* L130: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of CGBRFS */
+
+} /* cgbrfs_ */
diff --git a/contrib/libs/clapack/cgbsv.c b/contrib/libs/clapack/cgbsv.c
new file mode 100644
index 0000000000..fbbdc81aff
--- /dev/null
+++ b/contrib/libs/clapack/cgbsv.c
@@ -0,0 +1,176 @@
+/* cgbsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cgbsv_(integer *n, integer *kl, integer *ku, integer *
+ nrhs, complex *ab, integer *ldab, integer *ipiv, complex *b, integer *
+ ldb, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int cgbtrf_(integer *, integer *, integer *,
+ integer *, complex *, integer *, integer *, integer *), xerbla_(
+ char *, integer *), cgbtrs_(char *, integer *, integer *,
+ integer *, integer *, complex *, integer *, integer *, complex *,
+ integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGBSV computes the solution to a complex system of linear equations */
+/* A * X = B, where A is a band matrix of order N with KL subdiagonals */
+/* and KU superdiagonals, and X and B are N-by-NRHS matrices. */
+
+/* The LU decomposition with partial pivoting and row interchanges is */
+/* used to factor A as A = L * U, where L is a product of permutation */
+/* and unit lower triangular matrices with KL subdiagonals, and U is */
+/* upper triangular with KL+KU superdiagonals. The factored form of A */
+/* is then used to solve the system of equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AB (input/output) COMPLEX array, dimension (LDAB,N) */
+/* On entry, the matrix A in band storage, in rows KL+1 to */
+/* 2*KL+KU+1; rows 1 to KL of the array need not be set. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(KL+KU+1+i-j,j) = A(i,j) for max(1,j-KU)<=i<=min(N,j+KL) */
+/* On exit, details of the factorization: U is stored as an */
+/* upper triangular band matrix with KL+KU superdiagonals in */
+/* rows 1 to KL+KU+1, and the multipliers used during the */
+/* factorization are stored in rows KL+KU+2 to 2*KL+KU+1. */
+/* See below for further details. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= 2*KL+KU+1. */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* The pivot indices that define the permutation matrix P; */
+/* row i of the matrix was interchanged with row IPIV(i). */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, U(i,i) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, and the solution has not been computed. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* M = N = 6, KL = 2, KU = 1: */
+
+/* On entry: On exit: */
+
+/* * * * + + + * * * u14 u25 u36 */
+/* * * + + + + * * u13 u24 u35 u46 */
+/* * a12 a23 a34 a45 a56 * u12 u23 u34 u45 u56 */
+/* a11 a22 a33 a44 a55 a66 u11 u22 u33 u44 u55 u66 */
+/* a21 a32 a43 a54 a65 * m21 m32 m43 m54 m65 * */
+/* a31 a42 a53 a64 * * m31 m42 m53 m64 * * */
+
+/* Array elements marked * are not used by the routine; elements marked */
+/* + need not be set on entry, but are required by the routine to store */
+/* elements of U because of fill-in resulting from the row interchanges. */
+
+/* ===================================================================== */
+
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*kl < 0) {
+ *info = -2;
+ } else if (*ku < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldab < (*kl << 1) + *ku + 1) {
+ *info = -6;
+ } else if (*ldb < max(*n,1)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGBSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the LU factorization of the band matrix A. */
+
+ cgbtrf_(n, n, kl, ku, &ab[ab_offset], ldab, &ipiv[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ cgbtrs_("No transpose", n, kl, ku, nrhs, &ab[ab_offset], ldab, &ipiv[
+ 1], &b[b_offset], ldb, info);
+ }
+ return 0;
+
+/* End of CGBSV */
+
+} /* cgbsv_ */
diff --git a/contrib/libs/clapack/cgbsvx.c b/contrib/libs/clapack/cgbsvx.c
new file mode 100644
index 0000000000..ed72200d4c
--- /dev/null
+++ b/contrib/libs/clapack/cgbsvx.c
@@ -0,0 +1,675 @@
+/* cgbsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cgbsvx_(char *fact, char *trans, integer *n, integer *kl,
+ integer *ku, integer *nrhs, complex *ab, integer *ldab, complex *afb,
+ integer *ldafb, integer *ipiv, char *equed, real *r__, real *c__,
+ complex *b, integer *ldb, complex *x, integer *ldx, real *rcond, real
+ *ferr, real *berr, complex *work, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, afb_dim1, afb_offset, b_dim1, b_offset,
+ x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2;
+ complex q__1;
+
+ /* Builtin functions */
+ double c_abs(complex *);
+
+ /* Local variables */
+ integer i__, j, j1, j2;
+ real amax;
+ char norm[1];
+ extern logical lsame_(char *, char *);
+ real rcmin, rcmax, anorm;
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *);
+ logical equil;
+ extern doublereal clangb_(char *, integer *, integer *, integer *,
+ complex *, integer *, real *);
+ extern /* Subroutine */ int claqgb_(integer *, integer *, integer *,
+ integer *, complex *, integer *, real *, real *, real *, real *,
+ real *, char *), cgbcon_(char *, integer *, integer *,
+ integer *, complex *, integer *, integer *, real *, real *,
+ complex *, real *, integer *);
+ real colcnd;
+ extern doublereal clantb_(char *, char *, char *, integer *, integer *,
+ complex *, integer *, real *);
+ extern /* Subroutine */ int cgbequ_(integer *, integer *, integer *,
+ integer *, complex *, integer *, real *, real *, real *, real *,
+ real *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int cgbrfs_(char *, integer *, integer *, integer
+ *, integer *, complex *, integer *, complex *, integer *, integer
+ *, complex *, integer *, complex *, integer *, real *, real *,
+ complex *, real *, integer *), cgbtrf_(integer *, integer
+ *, integer *, integer *, complex *, integer *, integer *, integer
+ *);
+ logical nofact;
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), xerbla_(char *,
+ integer *);
+ real bignum;
+ extern /* Subroutine */ int cgbtrs_(char *, integer *, integer *, integer
+ *, integer *, complex *, integer *, integer *, complex *, integer
+ *, integer *);
+ integer infequ;
+ logical colequ;
+ real rowcnd;
+ logical notran;
+ real smlnum;
+ logical rowequ;
+ real rpvgrw;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGBSVX uses the LU factorization to compute the solution to a complex */
+/* system of linear equations A * X = B, A**T * X = B, or A**H * X = B, */
+/* where A is a band matrix of order N with KL subdiagonals and KU */
+/* superdiagonals, and X and B are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed by this subroutine: */
+
+/* 1. If FACT = 'E', real scaling factors are computed to equilibrate */
+/* the system: */
+/* TRANS = 'N': diag(R)*A*diag(C) *inv(diag(C))*X = diag(R)*B */
+/* TRANS = 'T': (diag(R)*A*diag(C))**T *inv(diag(R))*X = diag(C)*B */
+/* TRANS = 'C': (diag(R)*A*diag(C))**H *inv(diag(R))*X = diag(C)*B */
+/* Whether or not the system will be equilibrated depends on the */
+/* scaling of the matrix A, but if equilibration is used, A is */
+/* overwritten by diag(R)*A*diag(C) and B by diag(R)*B (if TRANS='N') */
+/* or diag(C)*B (if TRANS = 'T' or 'C'). */
+
+/* 2. If FACT = 'N' or 'E', the LU decomposition is used to factor the */
+/* matrix A (after equilibration if FACT = 'E') as */
+/* A = L * U, */
+/* where L is a product of permutation and unit lower triangular */
+/* matrices with KL subdiagonals, and U is upper triangular with */
+/* KL+KU superdiagonals. */
+
+/* 3. If some U(i,i)=0, so that U is exactly singular, then the routine */
+/* returns with INFO = i. Otherwise, the factored form of A is used */
+/* to estimate the condition number of the matrix A. If the */
+/* reciprocal of the condition number is less than machine precision, */
+/* INFO = N+1 is returned as a warning, but the routine still goes on */
+/* to solve for X and compute error bounds as described below. */
+
+/* 4. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 5. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* 6. If equilibration was used, the matrix X is premultiplied by */
+/* diag(C) (if TRANS = 'N') or diag(R) (if TRANS = 'T' or 'C') so */
+/* that it solves the original system before equilibration. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of the matrix A is */
+/* supplied on entry, and if not, whether the matrix A should be */
+/* equilibrated before it is factored. */
+/* = 'F': On entry, AFB and IPIV contain the factored form of */
+/* A. If EQUED is not 'N', the matrix A has been */
+/* equilibrated with scaling factors given by R and C. */
+/* AB, AFB, and IPIV are not modified. */
+/* = 'N': The matrix A will be copied to AFB and factored. */
+/* = 'E': The matrix A will be equilibrated if necessary, then */
+/* copied to AFB and factored. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations. */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AB (input/output) COMPLEX array, dimension (LDAB,N) */
+/* On entry, the matrix A in band storage, in rows 1 to KL+KU+1. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(KU+1+i-j,j) = A(i,j) for max(1,j-KU)<=i<=min(N,j+kl) */
+
+/* If FACT = 'F' and EQUED is not 'N', then A must have been */
+/* equilibrated by the scaling factors in R and/or C. AB is not */
+/* modified if FACT = 'F' or 'N', or if FACT = 'E' and */
+/* EQUED = 'N' on exit. */
+
+/* On exit, if EQUED .ne. 'N', A is scaled as follows: */
+/* EQUED = 'R': A := diag(R) * A */
+/* EQUED = 'C': A := A * diag(C) */
+/* EQUED = 'B': A := diag(R) * A * diag(C). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KL+KU+1. */
+
+/* AFB (input or output) COMPLEX array, dimension (LDAFB,N) */
+/* If FACT = 'F', then AFB is an input argument and on entry */
+/* contains details of the LU factorization of the band matrix */
+/* A, as computed by CGBTRF. U is stored as an upper triangular */
+/* band matrix with KL+KU superdiagonals in rows 1 to KL+KU+1, */
+/* and the multipliers used during the factorization are stored */
+/* in rows KL+KU+2 to 2*KL+KU+1. If EQUED .ne. 'N', then AFB is */
+/* the factored form of the equilibrated matrix A. */
+
+/* If FACT = 'N', then AFB is an output argument and on exit */
+/* returns details of the LU factorization of A. */
+
+/* If FACT = 'E', then AFB is an output argument and on exit */
+/* returns details of the LU factorization of the equilibrated */
+/* matrix A (see the description of AB for the form of the */
+/* equilibrated matrix). */
+
+/* LDAFB (input) INTEGER */
+/* The leading dimension of the array AFB. LDAFB >= 2*KL+KU+1. */
+
+/* IPIV (input or output) INTEGER array, dimension (N) */
+/* If FACT = 'F', then IPIV is an input argument and on entry */
+/* contains the pivot indices from the factorization A = L*U */
+/* as computed by CGBTRF; row i of the matrix was interchanged */
+/* with row IPIV(i). */
+
+/* If FACT = 'N', then IPIV is an output argument and on exit */
+/* contains the pivot indices from the factorization A = L*U */
+/* of the original matrix A. */
+
+/* If FACT = 'E', then IPIV is an output argument and on exit */
+/* contains the pivot indices from the factorization A = L*U */
+/* of the equilibrated matrix A. */
+
+/* EQUED (input or output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration (always true if FACT = 'N'). */
+/* = 'R': Row equilibration, i.e., A has been premultiplied by */
+/* diag(R). */
+/* = 'C': Column equilibration, i.e., A has been postmultiplied */
+/* by diag(C). */
+/* = 'B': Both row and column equilibration, i.e., A has been */
+/* replaced by diag(R) * A * diag(C). */
+/* EQUED is an input argument if FACT = 'F'; otherwise, it is an */
+/* output argument. */
+
+/* R (input or output) REAL array, dimension (N) */
+/* The row scale factors for A. If EQUED = 'R' or 'B', A is */
+/* multiplied on the left by diag(R); if EQUED = 'N' or 'C', R */
+/* is not accessed. R is an input argument if FACT = 'F'; */
+/* otherwise, R is an output argument. If FACT = 'F' and */
+/* EQUED = 'R' or 'B', each element of R must be positive. */
+
+/* C (input or output) REAL array, dimension (N) */
+/* The column scale factors for A. If EQUED = 'C' or 'B', A is */
+/* multiplied on the right by diag(C); if EQUED = 'N' or 'R', C */
+/* is not accessed. C is an input argument if FACT = 'F'; */
+/* otherwise, C is an output argument. If FACT = 'F' and */
+/* EQUED = 'C' or 'B', each element of C must be positive. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, */
+/* if EQUED = 'N', B is not modified; */
+/* if TRANS = 'N' and EQUED = 'R' or 'B', B is overwritten by */
+/* diag(R)*B; */
+/* if TRANS = 'T' or 'C' and EQUED = 'C' or 'B', B is */
+/* overwritten by diag(C)*B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) COMPLEX array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X */
+/* to the original system of equations. Note that A and B are */
+/* modified on exit if EQUED .ne. 'N', and the solution to the */
+/* equilibrated system is inv(diag(C))*X if TRANS = 'N' and */
+/* EQUED = 'C' or 'B', or inv(diag(R))*X if TRANS = 'T' or 'C' */
+/* and EQUED = 'R' or 'B'. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) REAL */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A after equilibration (if done). If RCOND is less than the */
+/* machine precision (in particular, if RCOND = 0), the matrix */
+/* is singular to working precision. This condition is */
+/* indicated by a return code of INFO > 0. */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace/output) REAL array, dimension (N) */
+/* On exit, RWORK(1) contains the reciprocal pivot growth */
+/* factor norm(A)/norm(U). The "max absolute element" norm is */
+/* used. If RWORK(1) is much less than 1, then the stability */
+/* of the LU factorization of the (equilibrated) matrix A */
+/* could be poor. This also means that the solution X, condition */
+/* estimator RCOND, and forward error bound FERR could be */
+/* unreliable. If factorization fails with 0<INFO<=N, then */
+/* RWORK(1) contains the reciprocal pivot growth factor for the */
+/* leading INFO columns of A. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: U(i,i) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, so the solution and error bounds */
+/* could not be computed. RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* ===================================================================== */
+/* Moved setting of INFO = N+1 so INFO does not subsequently get */
+/* overwritten. Sven, 17 Mar 05. */
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ afb_dim1 = *ldafb;
+ afb_offset = 1 + afb_dim1;
+ afb -= afb_offset;
+ --ipiv;
+ --r__;
+ --c__;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ equil = lsame_(fact, "E");
+ notran = lsame_(trans, "N");
+ if (nofact || equil) {
+ *(unsigned char *)equed = 'N';
+ rowequ = FALSE_;
+ colequ = FALSE_;
+ } else {
+ rowequ = lsame_(equed, "R") || lsame_(equed,
+ "B");
+ colequ = lsame_(equed, "C") || lsame_(equed,
+ "B");
+ smlnum = slamch_("Safe minimum");
+ bignum = 1.f / smlnum;
+ }
+
+/* Test the input parameters. */
+
+ if (! nofact && ! equil && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*kl < 0) {
+ *info = -4;
+ } else if (*ku < 0) {
+ *info = -5;
+ } else if (*nrhs < 0) {
+ *info = -6;
+ } else if (*ldab < *kl + *ku + 1) {
+ *info = -8;
+ } else if (*ldafb < (*kl << 1) + *ku + 1) {
+ *info = -10;
+ } else if (lsame_(fact, "F") && ! (rowequ || colequ
+ || lsame_(equed, "N"))) {
+ *info = -12;
+ } else {
+ if (rowequ) {
+ rcmin = bignum;
+ rcmax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ r__1 = rcmin, r__2 = r__[j];
+ rcmin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = rcmax, r__2 = r__[j];
+ rcmax = dmax(r__1,r__2);
+/* L10: */
+ }
+ if (rcmin <= 0.f) {
+ *info = -13;
+ } else if (*n > 0) {
+ rowcnd = dmax(rcmin,smlnum) / dmin(rcmax,bignum);
+ } else {
+ rowcnd = 1.f;
+ }
+ }
+ if (colequ && *info == 0) {
+ rcmin = bignum;
+ rcmax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ r__1 = rcmin, r__2 = c__[j];
+ rcmin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = rcmax, r__2 = c__[j];
+ rcmax = dmax(r__1,r__2);
+/* L20: */
+ }
+ if (rcmin <= 0.f) {
+ *info = -14;
+ } else if (*n > 0) {
+ colcnd = dmax(rcmin,smlnum) / dmin(rcmax,bignum);
+ } else {
+ colcnd = 1.f;
+ }
+ }
+ if (*info == 0) {
+ if (*ldb < max(1,*n)) {
+ *info = -16;
+ } else if (*ldx < max(1,*n)) {
+ *info = -18;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGBSVX", &i__1);
+ return 0;
+ }
+
+ if (equil) {
+
+/* Compute row and column scalings to equilibrate the matrix A. */
+
+ cgbequ_(n, n, kl, ku, &ab[ab_offset], ldab, &r__[1], &c__[1], &rowcnd,
+ &colcnd, &amax, &infequ);
+ if (infequ == 0) {
+
+/* Equilibrate the matrix. */
+
+ claqgb_(n, n, kl, ku, &ab[ab_offset], ldab, &r__[1], &c__[1], &
+ rowcnd, &colcnd, &amax, equed);
+ rowequ = lsame_(equed, "R") || lsame_(equed,
+ "B");
+ colequ = lsame_(equed, "C") || lsame_(equed,
+ "B");
+ }
+ }
+
+/* Scale the right hand side. */
+
+ if (notran) {
+ if (rowequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * b_dim1;
+ q__1.r = r__[i__4] * b[i__5].r, q__1.i = r__[i__4] * b[
+ i__5].i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else if (colequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * b_dim1;
+ q__1.r = c__[i__4] * b[i__5].r, q__1.i = c__[i__4] * b[i__5]
+ .i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+ if (nofact || equil) {
+
+/* Compute the LU factorization of the band matrix A. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j - *ku;
+ j1 = max(i__2,1);
+/* Computing MIN */
+ i__2 = j + *kl;
+ j2 = min(i__2,*n);
+ i__2 = j2 - j1 + 1;
+ ccopy_(&i__2, &ab[*ku + 1 - j + j1 + j * ab_dim1], &c__1, &afb[*
+ kl + *ku + 1 - j + j1 + j * afb_dim1], &c__1);
+/* L70: */
+ }
+
+ cgbtrf_(n, n, kl, ku, &afb[afb_offset], ldafb, &ipiv[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+
+/* Compute the reciprocal pivot growth factor of the */
+/* leading rank-deficient INFO columns of A. */
+
+ anorm = 0.f;
+ i__1 = *info;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = *ku + 2 - j;
+/* Computing MIN */
+ i__4 = *n + *ku + 1 - j, i__5 = *kl + *ku + 1;
+ i__3 = min(i__4,i__5);
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ r__1 = anorm, r__2 = c_abs(&ab[i__ + j * ab_dim1]);
+ anorm = dmax(r__1,r__2);
+/* L80: */
+ }
+/* L90: */
+ }
+/* Computing MIN */
+ i__3 = *info - 1, i__2 = *kl + *ku;
+ i__1 = min(i__3,i__2);
+/* Computing MAX */
+ i__4 = 1, i__5 = *kl + *ku + 2 - *info;
+ rpvgrw = clantb_("M", "U", "N", info, &i__1, &afb[max(i__4, i__5)
+ + afb_dim1], ldafb, &rwork[1]);
+ if (rpvgrw == 0.f) {
+ rpvgrw = 1.f;
+ } else {
+ rpvgrw = anorm / rpvgrw;
+ }
+ rwork[1] = rpvgrw;
+ *rcond = 0.f;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A and the */
+/* reciprocal pivot growth factor RPVGRW. */
+
+ if (notran) {
+ *(unsigned char *)norm = '1';
+ } else {
+ *(unsigned char *)norm = 'I';
+ }
+ anorm = clangb_(norm, n, kl, ku, &ab[ab_offset], ldab, &rwork[1]);
+ i__1 = *kl + *ku;
+ rpvgrw = clantb_("M", "U", "N", n, &i__1, &afb[afb_offset], ldafb, &rwork[
+ 1]);
+ if (rpvgrw == 0.f) {
+ rpvgrw = 1.f;
+ } else {
+ rpvgrw = clangb_("M", n, kl, ku, &ab[ab_offset], ldab, &rwork[1]) / rpvgrw;
+ }
+
+/* Compute the reciprocal of the condition number of A. */
+
+ cgbcon_(norm, n, kl, ku, &afb[afb_offset], ldafb, &ipiv[1], &anorm, rcond,
+ &work[1], &rwork[1], info);
+
+/* Compute the solution matrix X. */
+
+ clacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ cgbtrs_(trans, n, kl, ku, nrhs, &afb[afb_offset], ldafb, &ipiv[1], &x[
+ x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solution and */
+/* compute error bounds and backward error estimates for it. */
+
+ cgbrfs_(trans, n, kl, ku, nrhs, &ab[ab_offset], ldab, &afb[afb_offset],
+ ldafb, &ipiv[1], &b[b_offset], ldb, &x[x_offset], ldx, &ferr[1], &
+ berr[1], &work[1], &rwork[1], info);
+
+/* Transform the solution matrix X to a solution of the original */
+/* system. */
+
+ if (notran) {
+ if (colequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__2 = i__ + j * x_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * x_dim1;
+ q__1.r = c__[i__4] * x[i__5].r, q__1.i = c__[i__4] * x[
+ i__5].i;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+/* L100: */
+ }
+/* L110: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= colcnd;
+/* L120: */
+ }
+ }
+ } else if (rowequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__2 = i__ + j * x_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * x_dim1;
+ q__1.r = r__[i__4] * x[i__5].r, q__1.i = r__[i__4] * x[i__5]
+ .i;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+/* L130: */
+ }
+/* L140: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= rowcnd;
+/* L150: */
+ }
+ }
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < slamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ rwork[1] = rpvgrw;
+ return 0;
+
+/* End of CGBSVX */
+
+} /* cgbsvx_ */
diff --git a/contrib/libs/clapack/cgbtf2.c b/contrib/libs/clapack/cgbtf2.c
new file mode 100644
index 0000000000..ddf5cf55c1
--- /dev/null
+++ b/contrib/libs/clapack/cgbtf2.c
@@ -0,0 +1,267 @@
+/* cgbtf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int cgbtf2_(integer *m, integer *n, integer *kl, integer *ku,
+ complex *ab, integer *ldab, integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+ complex q__1;
+
+ /* Builtin functions */
+ void c_div(complex *, complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, km, jp, ju, kv;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *), cgeru_(integer *, integer *, complex *, complex *,
+ integer *, complex *, integer *, complex *, integer *), cswap_(
+ integer *, complex *, integer *, complex *, integer *);
+ extern integer icamax_(integer *, complex *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGBTF2 computes an LU factorization of a complex m-by-n band matrix */
+/* A using partial pivoting with row interchanges. */
+
+/* This is the unblocked version of the algorithm, calling Level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* AB (input/output) COMPLEX array, dimension (LDAB,N) */
+/* On entry, the matrix A in band storage, in rows KL+1 to */
+/* 2*KL+KU+1; rows 1 to KL of the array need not be set. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(kl+ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(m,j+kl) */
+
+/* On exit, details of the factorization: U is stored as an */
+/* upper triangular band matrix with KL+KU superdiagonals in */
+/* rows 1 to KL+KU+1, and the multipliers used during the */
+/* factorization are stored in rows KL+KU+2 to 2*KL+KU+1. */
+/* See below for further details. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= 2*KL+KU+1. */
+
+/* IPIV (output) INTEGER array, dimension (min(M,N)) */
+/* The pivot indices; for 1 <= i <= min(M,N), row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = +i, U(i,i) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, and division by zero will occur if it is used */
+/* to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* M = N = 6, KL = 2, KU = 1: */
+
+/* On entry: On exit: */
+
+/* * * * + + + * * * u14 u25 u36 */
+/* * * + + + + * * u13 u24 u35 u46 */
+/* * a12 a23 a34 a45 a56 * u12 u23 u34 u45 u56 */
+/* a11 a22 a33 a44 a55 a66 u11 u22 u33 u44 u55 u66 */
+/* a21 a32 a43 a54 a65 * m21 m32 m43 m54 m65 * */
+/* a31 a42 a53 a64 * * m31 m42 m53 m64 * * */
+
+/* Array elements marked * are not used by the routine; elements marked */
+/* + need not be set on entry, but are required by the routine to store */
+/* elements of U, because of fill-in resulting from the row */
+/* interchanges. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* KV is the number of superdiagonals in the factor U, allowing for */
+/* fill-in. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --ipiv;
+
+ /* Function Body */
+ kv = *ku + *kl;
+
+/* Test the input parameters. */
+
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*ldab < *kl + kv + 1) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGBTF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Gaussian elimination with partial pivoting */
+
+/* Set fill-in elements in columns KU+2 to KV to zero. */
+
+ i__1 = min(kv,*n);
+ for (j = *ku + 2; j <= i__1; ++j) {
+ i__2 = *kl;
+ for (i__ = kv - j + 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * ab_dim1;
+ ab[i__3].r = 0.f, ab[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* JU is the index of the last column affected by the current stage */
+/* of the factorization. */
+
+ ju = 1;
+
+ i__1 = min(*m,*n);
+ for (j = 1; j <= i__1; ++j) {
+
+/* Set fill-in elements in column J+KV to zero. */
+
+ if (j + kv <= *n) {
+ i__2 = *kl;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + (j + kv) * ab_dim1;
+ ab[i__3].r = 0.f, ab[i__3].i = 0.f;
+/* L30: */
+ }
+ }
+
+/* Find pivot and test for singularity. KM is the number of */
+/* subdiagonal elements in the current column. */
+
+/* Computing MIN */
+ i__2 = *kl, i__3 = *m - j;
+ km = min(i__2,i__3);
+ i__2 = km + 1;
+ jp = icamax_(&i__2, &ab[kv + 1 + j * ab_dim1], &c__1);
+ ipiv[j] = jp + j - 1;
+ i__2 = kv + jp + j * ab_dim1;
+ if (ab[i__2].r != 0.f || ab[i__2].i != 0.f) {
+/* Computing MAX */
+/* Computing MIN */
+ i__4 = j + *ku + jp - 1;
+ i__2 = ju, i__3 = min(i__4,*n);
+ ju = max(i__2,i__3);
+
+/* Apply interchange to columns J to JU. */
+
+ if (jp != 1) {
+ i__2 = ju - j + 1;
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ cswap_(&i__2, &ab[kv + jp + j * ab_dim1], &i__3, &ab[kv + 1 +
+ j * ab_dim1], &i__4);
+ }
+ if (km > 0) {
+
+/* Compute multipliers. */
+
+ c_div(&q__1, &c_b1, &ab[kv + 1 + j * ab_dim1]);
+ cscal_(&km, &q__1, &ab[kv + 2 + j * ab_dim1], &c__1);
+
+/* Update trailing submatrix within the band. */
+
+ if (ju > j) {
+ i__2 = ju - j;
+ q__1.r = -1.f, q__1.i = -0.f;
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ cgeru_(&km, &i__2, &q__1, &ab[kv + 2 + j * ab_dim1], &
+ c__1, &ab[kv + (j + 1) * ab_dim1], &i__3, &ab[kv
+ + 1 + (j + 1) * ab_dim1], &i__4);
+ }
+ }
+ } else {
+
+/* If pivot is zero, set INFO to the index of the pivot */
+/* unless a zero pivot has already been found. */
+
+ if (*info == 0) {
+ *info = j;
+ }
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of CGBTF2 */
+
+} /* cgbtf2_ */
diff --git a/contrib/libs/clapack/cgbtrf.c b/contrib/libs/clapack/cgbtrf.c
new file mode 100644
index 0000000000..491eb8d0e8
--- /dev/null
+++ b/contrib/libs/clapack/cgbtrf.c
@@ -0,0 +1,604 @@
+/* cgbtrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c__65 = 65;
+
+/* Subroutine */ int cgbtrf_(integer *m, integer *n, integer *kl, integer *ku,
+ complex *ab, integer *ldab, integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ complex q__1;
+
+ /* Builtin functions */
+ void c_div(complex *, complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, i2, i3, j2, j3, k2, jb, nb, ii, jj, jm, ip, jp, km, ju,
+ kv, nw;
+ complex temp;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *), cgemm_(char *, char *, integer *, integer *, integer *
+, complex *, complex *, integer *, complex *, integer *, complex *
+, complex *, integer *), cgeru_(integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, integer *), ccopy_(integer *, complex *, integer *,
+ complex *, integer *), cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ complex work13[4160] /* was [65][64] */, work31[4160] /*
+ was [65][64] */;
+ extern /* Subroutine */ int ctrsm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *), cgbtf2_(integer *,
+ integer *, integer *, integer *, complex *, integer *, integer *,
+ integer *);
+ extern integer icamax_(integer *, complex *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int claswp_(integer *, complex *, integer *,
+ integer *, integer *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGBTRF computes an LU factorization of a complex m-by-n band matrix A */
+/* using partial pivoting with row interchanges. */
+
+/* This is the blocked version of the algorithm, calling Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* AB (input/output) COMPLEX array, dimension (LDAB,N) */
+/* On entry, the matrix A in band storage, in rows KL+1 to */
+/* 2*KL+KU+1; rows 1 to KL of the array need not be set. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(kl+ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(m,j+kl) */
+
+/* On exit, details of the factorization: U is stored as an */
+/* upper triangular band matrix with KL+KU superdiagonals in */
+/* rows 1 to KL+KU+1, and the multipliers used during the */
+/* factorization are stored in rows KL+KU+2 to 2*KL+KU+1. */
+/* See below for further details. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= 2*KL+KU+1. */
+
+/* IPIV (output) INTEGER array, dimension (min(M,N)) */
+/* The pivot indices; for 1 <= i <= min(M,N), row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = +i, U(i,i) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, and division by zero will occur if it is used */
+/* to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* M = N = 6, KL = 2, KU = 1: */
+
+/* On entry: On exit: */
+
+/* * * * + + + * * * u14 u25 u36 */
+/* * * + + + + * * u13 u24 u35 u46 */
+/* * a12 a23 a34 a45 a56 * u12 u23 u34 u45 u56 */
+/* a11 a22 a33 a44 a55 a66 u11 u22 u33 u44 u55 u66 */
+/* a21 a32 a43 a54 a65 * m21 m32 m43 m54 m65 * */
+/* a31 a42 a53 a64 * * m31 m42 m53 m64 * * */
+
+/* Array elements marked * are not used by the routine; elements marked */
+/* + need not be set on entry, but are required by the routine to store */
+/* elements of U because of fill-in resulting from the row interchanges. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* KV is the number of superdiagonals in the factor U, allowing for */
+/* fill-in */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --ipiv;
+
+ /* Function Body */
+ kv = *ku + *kl;
+
+/* Test the input parameters. */
+
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*ldab < *kl + kv + 1) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGBTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment */
+
+ nb = ilaenv_(&c__1, "CGBTRF", " ", m, n, kl, ku);
+
+/* The block size must not exceed the limit set by the size of the */
+/* local arrays WORK13 and WORK31. */
+
+ nb = min(nb,64);
+
+ if (nb <= 1 || nb > *kl) {
+
+/* Use unblocked code */
+
+ cgbtf2_(m, n, kl, ku, &ab[ab_offset], ldab, &ipiv[1], info);
+ } else {
+
+/* Use blocked code */
+
+/* Zero the superdiagonal elements of the work array WORK13 */
+
+ i__1 = nb;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * 65 - 66;
+ work13[i__3].r = 0.f, work13[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* Zero the subdiagonal elements of the work array WORK31 */
+
+ i__1 = nb;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = nb;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * 65 - 66;
+ work31[i__3].r = 0.f, work31[i__3].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Gaussian elimination with partial pivoting */
+
+/* Set fill-in elements in columns KU+2 to KV to zero */
+
+ i__1 = min(kv,*n);
+ for (j = *ku + 2; j <= i__1; ++j) {
+ i__2 = *kl;
+ for (i__ = kv - j + 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * ab_dim1;
+ ab[i__3].r = 0.f, ab[i__3].i = 0.f;
+/* L50: */
+ }
+/* L60: */
+ }
+
+/* JU is the index of the last column affected by the current */
+/* stage of the factorization */
+
+ ju = 1;
+
+ i__1 = min(*m,*n);
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = min(*m,*n) - j + 1;
+ jb = min(i__3,i__4);
+
+/* The active part of the matrix is partitioned */
+
+/* A11 A12 A13 */
+/* A21 A22 A23 */
+/* A31 A32 A33 */
+
+/* Here A11, A21 and A31 denote the current block of JB columns */
+/* which is about to be factorized. The number of rows in the */
+/* partitioning are JB, I2, I3 respectively, and the numbers */
+/* of columns are JB, J2, J3. The superdiagonal elements of A13 */
+/* and the subdiagonal elements of A31 lie outside the band. */
+
+/* Computing MIN */
+ i__3 = *kl - jb, i__4 = *m - j - jb + 1;
+ i2 = min(i__3,i__4);
+/* Computing MIN */
+ i__3 = jb, i__4 = *m - j - *kl + 1;
+ i3 = min(i__3,i__4);
+
+/* J2 and J3 are computed after JU has been updated. */
+
+/* Factorize the current block of JB columns */
+
+ i__3 = j + jb - 1;
+ for (jj = j; jj <= i__3; ++jj) {
+
+/* Set fill-in elements in column JJ+KV to zero */
+
+ if (jj + kv <= *n) {
+ i__4 = *kl;
+ for (i__ = 1; i__ <= i__4; ++i__) {
+ i__5 = i__ + (jj + kv) * ab_dim1;
+ ab[i__5].r = 0.f, ab[i__5].i = 0.f;
+/* L70: */
+ }
+ }
+
+/* Find pivot and test for singularity. KM is the number of */
+/* subdiagonal elements in the current column. */
+
+/* Computing MIN */
+ i__4 = *kl, i__5 = *m - jj;
+ km = min(i__4,i__5);
+ i__4 = km + 1;
+ jp = icamax_(&i__4, &ab[kv + 1 + jj * ab_dim1], &c__1);
+ ipiv[jj] = jp + jj - j;
+ i__4 = kv + jp + jj * ab_dim1;
+ if (ab[i__4].r != 0.f || ab[i__4].i != 0.f) {
+/* Computing MAX */
+/* Computing MIN */
+ i__6 = jj + *ku + jp - 1;
+ i__4 = ju, i__5 = min(i__6,*n);
+ ju = max(i__4,i__5);
+ if (jp != 1) {
+
+/* Apply interchange to columns J to J+JB-1 */
+
+ if (jp + jj - 1 < j + *kl) {
+
+ i__4 = *ldab - 1;
+ i__5 = *ldab - 1;
+ cswap_(&jb, &ab[kv + 1 + jj - j + j * ab_dim1], &
+ i__4, &ab[kv + jp + jj - j + j * ab_dim1],
+ &i__5);
+ } else {
+
+/* The interchange affects columns J to JJ-1 of A31 */
+/* which are stored in the work array WORK31 */
+
+ i__4 = jj - j;
+ i__5 = *ldab - 1;
+ cswap_(&i__4, &ab[kv + 1 + jj - j + j * ab_dim1],
+ &i__5, &work31[jp + jj - j - *kl - 1], &
+ c__65);
+ i__4 = j + jb - jj;
+ i__5 = *ldab - 1;
+ i__6 = *ldab - 1;
+ cswap_(&i__4, &ab[kv + 1 + jj * ab_dim1], &i__5, &
+ ab[kv + jp + jj * ab_dim1], &i__6);
+ }
+ }
+
+/* Compute multipliers */
+
+ c_div(&q__1, &c_b1, &ab[kv + 1 + jj * ab_dim1]);
+ cscal_(&km, &q__1, &ab[kv + 2 + jj * ab_dim1], &c__1);
+
+/* Update trailing submatrix within the band and within */
+/* the current block. JM is the index of the last column */
+/* which needs to be updated. */
+
+/* Computing MIN */
+ i__4 = ju, i__5 = j + jb - 1;
+ jm = min(i__4,i__5);
+ if (jm > jj) {
+ i__4 = jm - jj;
+ q__1.r = -1.f, q__1.i = -0.f;
+ i__5 = *ldab - 1;
+ i__6 = *ldab - 1;
+ cgeru_(&km, &i__4, &q__1, &ab[kv + 2 + jj * ab_dim1],
+ &c__1, &ab[kv + (jj + 1) * ab_dim1], &i__5, &
+ ab[kv + 1 + (jj + 1) * ab_dim1], &i__6);
+ }
+ } else {
+
+/* If pivot is zero, set INFO to the index of the pivot */
+/* unless a zero pivot has already been found. */
+
+ if (*info == 0) {
+ *info = jj;
+ }
+ }
+
+/* Copy current column of A31 into the work array WORK31 */
+
+/* Computing MIN */
+ i__4 = jj - j + 1;
+ nw = min(i__4,i3);
+ if (nw > 0) {
+ ccopy_(&nw, &ab[kv + *kl + 1 - jj + j + jj * ab_dim1], &
+ c__1, &work31[(jj - j + 1) * 65 - 65], &c__1);
+ }
+/* L80: */
+ }
+ if (j + jb <= *n) {
+
+/* Apply the row interchanges to the other blocks. */
+
+/* Computing MIN */
+ i__3 = ju - j + 1;
+ j2 = min(i__3,kv) - jb;
+/* Computing MAX */
+ i__3 = 0, i__4 = ju - j - kv + 1;
+ j3 = max(i__3,i__4);
+
+/* Use CLASWP to apply the row interchanges to A12, A22, and */
+/* A32. */
+
+ i__3 = *ldab - 1;
+ claswp_(&j2, &ab[kv + 1 - jb + (j + jb) * ab_dim1], &i__3, &
+ c__1, &jb, &ipiv[j], &c__1);
+
+/* Adjust the pivot indices. */
+
+ i__3 = j + jb - 1;
+ for (i__ = j; i__ <= i__3; ++i__) {
+ ipiv[i__] = ipiv[i__] + j - 1;
+/* L90: */
+ }
+
+/* Apply the row interchanges to A13, A23, and A33 */
+/* columnwise. */
+
+ k2 = j - 1 + jb + j2;
+ i__3 = j3;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ jj = k2 + i__;
+ i__4 = j + jb - 1;
+ for (ii = j + i__ - 1; ii <= i__4; ++ii) {
+ ip = ipiv[ii];
+ if (ip != ii) {
+ i__5 = kv + 1 + ii - jj + jj * ab_dim1;
+ temp.r = ab[i__5].r, temp.i = ab[i__5].i;
+ i__5 = kv + 1 + ii - jj + jj * ab_dim1;
+ i__6 = kv + 1 + ip - jj + jj * ab_dim1;
+ ab[i__5].r = ab[i__6].r, ab[i__5].i = ab[i__6].i;
+ i__5 = kv + 1 + ip - jj + jj * ab_dim1;
+ ab[i__5].r = temp.r, ab[i__5].i = temp.i;
+ }
+/* L100: */
+ }
+/* L110: */
+ }
+
+/* Update the relevant part of the trailing submatrix */
+
+ if (j2 > 0) {
+
+/* Update A12 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ ctrsm_("Left", "Lower", "No transpose", "Unit", &jb, &j2,
+ &c_b1, &ab[kv + 1 + j * ab_dim1], &i__3, &ab[kv +
+ 1 - jb + (j + jb) * ab_dim1], &i__4);
+
+ if (i2 > 0) {
+
+/* Update A22 */
+
+ q__1.r = -1.f, q__1.i = -0.f;
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ i__5 = *ldab - 1;
+ cgemm_("No transpose", "No transpose", &i2, &j2, &jb,
+ &q__1, &ab[kv + 1 + jb + j * ab_dim1], &i__3,
+ &ab[kv + 1 - jb + (j + jb) * ab_dim1], &i__4,
+ &c_b1, &ab[kv + 1 + (j + jb) * ab_dim1], &
+ i__5);
+ }
+
+ if (i3 > 0) {
+
+/* Update A32 */
+
+ q__1.r = -1.f, q__1.i = -0.f;
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ cgemm_("No transpose", "No transpose", &i3, &j2, &jb,
+ &q__1, work31, &c__65, &ab[kv + 1 - jb + (j +
+ jb) * ab_dim1], &i__3, &c_b1, &ab[kv + *kl +
+ 1 - jb + (j + jb) * ab_dim1], &i__4);
+ }
+ }
+
+ if (j3 > 0) {
+
+/* Copy the lower triangle of A13 into the work array */
+/* WORK13 */
+
+ i__3 = j3;
+ for (jj = 1; jj <= i__3; ++jj) {
+ i__4 = jb;
+ for (ii = jj; ii <= i__4; ++ii) {
+ i__5 = ii + jj * 65 - 66;
+ i__6 = ii - jj + 1 + (jj + j + kv - 1) * ab_dim1;
+ work13[i__5].r = ab[i__6].r, work13[i__5].i = ab[
+ i__6].i;
+/* L120: */
+ }
+/* L130: */
+ }
+
+/* Update A13 in the work array */
+
+ i__3 = *ldab - 1;
+ ctrsm_("Left", "Lower", "No transpose", "Unit", &jb, &j3,
+ &c_b1, &ab[kv + 1 + j * ab_dim1], &i__3, work13, &
+ c__65);
+
+ if (i2 > 0) {
+
+/* Update A23 */
+
+ q__1.r = -1.f, q__1.i = -0.f;
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ cgemm_("No transpose", "No transpose", &i2, &j3, &jb,
+ &q__1, &ab[kv + 1 + jb + j * ab_dim1], &i__3,
+ work13, &c__65, &c_b1, &ab[jb + 1 + (j + kv) *
+ ab_dim1], &i__4);
+ }
+
+ if (i3 > 0) {
+
+/* Update A33 */
+
+ q__1.r = -1.f, q__1.i = -0.f;
+ i__3 = *ldab - 1;
+ cgemm_("No transpose", "No transpose", &i3, &j3, &jb,
+ &q__1, work31, &c__65, work13, &c__65, &c_b1,
+ &ab[*kl + 1 + (j + kv) * ab_dim1], &i__3);
+ }
+
+/* Copy the lower triangle of A13 back into place */
+
+ i__3 = j3;
+ for (jj = 1; jj <= i__3; ++jj) {
+ i__4 = jb;
+ for (ii = jj; ii <= i__4; ++ii) {
+ i__5 = ii - jj + 1 + (jj + j + kv - 1) * ab_dim1;
+ i__6 = ii + jj * 65 - 66;
+ ab[i__5].r = work13[i__6].r, ab[i__5].i = work13[
+ i__6].i;
+/* L140: */
+ }
+/* L150: */
+ }
+ }
+ } else {
+
+/* Adjust the pivot indices. */
+
+ i__3 = j + jb - 1;
+ for (i__ = j; i__ <= i__3; ++i__) {
+ ipiv[i__] = ipiv[i__] + j - 1;
+/* L160: */
+ }
+ }
+
+/* Partially undo the interchanges in the current block to */
+/* restore the upper triangular form of A31 and copy the upper */
+/* triangle of A31 back into place */
+
+ i__3 = j;
+ for (jj = j + jb - 1; jj >= i__3; --jj) {
+ jp = ipiv[jj] - jj + 1;
+ if (jp != 1) {
+
+/* Apply interchange to columns J to JJ-1 */
+
+ if (jp + jj - 1 < j + *kl) {
+
+/* The interchange does not affect A31 */
+
+ i__4 = jj - j;
+ i__5 = *ldab - 1;
+ i__6 = *ldab - 1;
+ cswap_(&i__4, &ab[kv + 1 + jj - j + j * ab_dim1], &
+ i__5, &ab[kv + jp + jj - j + j * ab_dim1], &
+ i__6);
+ } else {
+
+/* The interchange does affect A31 */
+
+ i__4 = jj - j;
+ i__5 = *ldab - 1;
+ cswap_(&i__4, &ab[kv + 1 + jj - j + j * ab_dim1], &
+ i__5, &work31[jp + jj - j - *kl - 1], &c__65);
+ }
+ }
+
+/* Copy the current column of A31 back into place */
+
+/* Computing MIN */
+ i__4 = i3, i__5 = jj - j + 1;
+ nw = min(i__4,i__5);
+ if (nw > 0) {
+ ccopy_(&nw, &work31[(jj - j + 1) * 65 - 65], &c__1, &ab[
+ kv + *kl + 1 - jj + j + jj * ab_dim1], &c__1);
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+
+ return 0;
+
+/* End of CGBTRF */
+
+} /* cgbtrf_ */
diff --git a/contrib/libs/clapack/cgbtrs.c b/contrib/libs/clapack/cgbtrs.c
new file mode 100644
index 0000000000..358ed1ae70
--- /dev/null
+++ b/contrib/libs/clapack/cgbtrs.c
@@ -0,0 +1,281 @@
+/* cgbtrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int cgbtrs_(char *trans, integer *n, integer *kl, integer *
+ ku, integer *nrhs, complex *ab, integer *ldab, integer *ipiv, complex
+ *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, b_dim1, b_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j, l, kd, lm;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+, complex *, integer *, complex *, integer *, complex *, complex *
+, integer *), cgeru_(integer *, integer *, complex *,
+ complex *, integer *, complex *, integer *, complex *, integer *),
+ cswap_(integer *, complex *, integer *, complex *, integer *),
+ ctbsv_(char *, char *, char *, integer *, integer *, complex *,
+ integer *, complex *, integer *);
+ logical lnoti;
+ extern /* Subroutine */ int clacgv_(integer *, complex *, integer *),
+ xerbla_(char *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGBTRS solves a system of linear equations */
+/* A * X = B, A**T * X = B, or A**H * X = B */
+/* with a general band matrix A using the LU factorization computed */
+/* by CGBTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations. */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AB (input) COMPLEX array, dimension (LDAB,N) */
+/* Details of the LU factorization of the band matrix A, as */
+/* computed by CGBTRF. U is stored as an upper triangular band */
+/* matrix with KL+KU superdiagonals in rows 1 to KL+KU+1, and */
+/* the multipliers used during the factorization are stored in */
+/* rows KL+KU+2 to 2*KL+KU+1. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= 2*KL+KU+1. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= N, row i of the matrix was */
+/* interchanged with row IPIV(i). */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "T") && ! lsame_(
+ trans, "C")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*ldab < (*kl << 1) + *ku + 1) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGBTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ kd = *ku + *kl + 1;
+ lnoti = *kl > 0;
+
+ if (notran) {
+
+/* Solve A*X = B. */
+
+/* Solve L*X = B, overwriting B with X. */
+
+/* L is represented as a product of permutations and unit lower */
+/* triangular matrices L = P(1) * L(1) * ... * P(n-1) * L(n-1), */
+/* where each transformation L(i) is a rank-one modification of */
+/* the identity matrix. */
+
+ if (lnoti) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = *kl, i__3 = *n - j;
+ lm = min(i__2,i__3);
+ l = ipiv[j];
+ if (l != j) {
+ cswap_(nrhs, &b[l + b_dim1], ldb, &b[j + b_dim1], ldb);
+ }
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&lm, nrhs, &q__1, &ab[kd + 1 + j * ab_dim1], &c__1, &b[
+ j + b_dim1], ldb, &b[j + 1 + b_dim1], ldb);
+/* L10: */
+ }
+ }
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Solve U*X = B, overwriting B with X. */
+
+ i__2 = *kl + *ku;
+ ctbsv_("Upper", "No transpose", "Non-unit", n, &i__2, &ab[
+ ab_offset], ldab, &b[i__ * b_dim1 + 1], &c__1);
+/* L20: */
+ }
+
+ } else if (lsame_(trans, "T")) {
+
+/* Solve A**T * X = B. */
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Solve U**T * X = B, overwriting B with X. */
+
+ i__2 = *kl + *ku;
+ ctbsv_("Upper", "Transpose", "Non-unit", n, &i__2, &ab[ab_offset],
+ ldab, &b[i__ * b_dim1 + 1], &c__1);
+/* L30: */
+ }
+
+/* Solve L**T * X = B, overwriting B with X. */
+
+ if (lnoti) {
+ for (j = *n - 1; j >= 1; --j) {
+/* Computing MIN */
+ i__1 = *kl, i__2 = *n - j;
+ lm = min(i__1,i__2);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Transpose", &lm, nrhs, &q__1, &b[j + 1 + b_dim1], ldb,
+ &ab[kd + 1 + j * ab_dim1], &c__1, &c_b1, &b[j +
+ b_dim1], ldb);
+ l = ipiv[j];
+ if (l != j) {
+ cswap_(nrhs, &b[l + b_dim1], ldb, &b[j + b_dim1], ldb);
+ }
+/* L40: */
+ }
+ }
+
+ } else {
+
+/* Solve A**H * X = B. */
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Solve U**H * X = B, overwriting B with X. */
+
+ i__2 = *kl + *ku;
+ ctbsv_("Upper", "Conjugate transpose", "Non-unit", n, &i__2, &ab[
+ ab_offset], ldab, &b[i__ * b_dim1 + 1], &c__1);
+/* L50: */
+ }
+
+/* Solve L**H * X = B, overwriting B with X. */
+
+ if (lnoti) {
+ for (j = *n - 1; j >= 1; --j) {
+/* Computing MIN */
+ i__1 = *kl, i__2 = *n - j;
+ lm = min(i__1,i__2);
+ clacgv_(nrhs, &b[j + b_dim1], ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Conjugate transpose", &lm, nrhs, &q__1, &b[j + 1 +
+ b_dim1], ldb, &ab[kd + 1 + j * ab_dim1], &c__1, &c_b1,
+ &b[j + b_dim1], ldb);
+ clacgv_(nrhs, &b[j + b_dim1], ldb);
+ l = ipiv[j];
+ if (l != j) {
+ cswap_(nrhs, &b[l + b_dim1], ldb, &b[j + b_dim1], ldb);
+ }
+/* L60: */
+ }
+ }
+ }
+ return 0;
+
+/* End of CGBTRS */
+
+} /* cgbtrs_ */
diff --git a/contrib/libs/clapack/cgebak.c b/contrib/libs/clapack/cgebak.c
new file mode 100644
index 0000000000..d84eacb0a4
--- /dev/null
+++ b/contrib/libs/clapack/cgebak.c
@@ -0,0 +1,236 @@
+/* cgebak.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cgebak_(char *job, char *side, integer *n, integer *ilo,
+ integer *ihi, real *scale, integer *m, complex *v, integer *ldv,
+ integer *info)
+{
+ /* System generated locals */
+ integer v_dim1, v_offset, i__1;
+
+ /* Local variables */
+ integer i__, k;
+ real s;
+ integer ii;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ logical leftv;
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *), xerbla_(char *, integer *);
+ logical rightv;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGEBAK forms the right or left eigenvectors of a complex general */
+/* matrix by backward transformation on the computed eigenvectors of the */
+/* balanced matrix output by CGEBAL. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies the type of backward transformation required: */
+/* = 'N', do nothing, return immediately; */
+/* = 'P', do backward transformation for permutation only; */
+/* = 'S', do backward transformation for scaling only; */
+/* = 'B', do backward transformations for both permutation and */
+/* scaling. */
+/* JOB must be the same as the argument JOB supplied to CGEBAL. */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'R': V contains right eigenvectors; */
+/* = 'L': V contains left eigenvectors. */
+
+/* N (input) INTEGER */
+/* The number of rows of the matrix V. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* The integers ILO and IHI determined by CGEBAL. */
+/* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */
+
+/* SCALE (input) REAL array, dimension (N) */
+/* Details of the permutation and scaling factors, as returned */
+/* by CGEBAL. */
+
+/* M (input) INTEGER */
+/* The number of columns of the matrix V. M >= 0. */
+
+/* V (input/output) COMPLEX array, dimension (LDV,M) */
+/* On entry, the matrix of right or left eigenvectors to be */
+/* transformed, as returned by CHSEIN or CTREVC. */
+/* On exit, V is overwritten by the transformed eigenvectors. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. LDV >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and Test the input parameters */
+
+ /* Parameter adjustments */
+ --scale;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+
+ /* Function Body */
+ rightv = lsame_(side, "R");
+ leftv = lsame_(side, "L");
+
+ *info = 0;
+ if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S")
+ && ! lsame_(job, "B")) {
+ *info = -1;
+ } else if (! rightv && ! leftv) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -4;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -5;
+ } else if (*m < 0) {
+ *info = -7;
+ } else if (*ldv < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEBAK", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*m == 0) {
+ return 0;
+ }
+ if (lsame_(job, "N")) {
+ return 0;
+ }
+
+ if (*ilo == *ihi) {
+ goto L30;
+ }
+
+/* Backward balance */
+
+ if (lsame_(job, "S") || lsame_(job, "B")) {
+
+ if (rightv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ s = scale[i__];
+ csscal_(m, &s, &v[i__ + v_dim1], ldv);
+/* L10: */
+ }
+ }
+
+ if (leftv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ s = 1.f / scale[i__];
+ csscal_(m, &s, &v[i__ + v_dim1], ldv);
+/* L20: */
+ }
+ }
+
+ }
+
+/* Backward permutation */
+
+/* For I = ILO-1 step -1 until 1, */
+/* IHI+1 step 1 until N do -- */
+
+L30:
+ if (lsame_(job, "P") || lsame_(job, "B")) {
+ if (rightv) {
+ i__1 = *n;
+ for (ii = 1; ii <= i__1; ++ii) {
+ i__ = ii;
+ if (i__ >= *ilo && i__ <= *ihi) {
+ goto L40;
+ }
+ if (i__ < *ilo) {
+ i__ = *ilo - ii;
+ }
+ k = scale[i__];
+ if (k == i__) {
+ goto L40;
+ }
+ cswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L40:
+ ;
+ }
+ }
+
+ if (leftv) {
+ i__1 = *n;
+ for (ii = 1; ii <= i__1; ++ii) {
+ i__ = ii;
+ if (i__ >= *ilo && i__ <= *ihi) {
+ goto L50;
+ }
+ if (i__ < *ilo) {
+ i__ = *ilo - ii;
+ }
+ k = scale[i__];
+ if (k == i__) {
+ goto L50;
+ }
+ cswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L50:
+ ;
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CGEBAK */
+
+} /* cgebak_ */
diff --git a/contrib/libs/clapack/cgebal.c b/contrib/libs/clapack/cgebal.c
new file mode 100644
index 0000000000..435e9a72c5
--- /dev/null
+++ b/contrib/libs/clapack/cgebal.c
@@ -0,0 +1,414 @@
+/* cgebal.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cgebal_(char *job, integer *n, complex *a, integer *lda,
+ integer *ilo, integer *ihi, real *scale, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double r_imag(complex *), c_abs(complex *);
+
+ /* Local variables */
+ real c__, f, g;
+ integer i__, j, k, l, m;
+ real r__, s, ca, ra;
+ integer ica, ira, iexc;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ real sfmin1, sfmin2, sfmax1, sfmax2;
+ extern integer icamax_(integer *, complex *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *), xerbla_(char *, integer *);
+ logical noconv;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGEBAL balances a general complex matrix A. This involves, first, */
+/* permuting A by a similarity transformation to isolate eigenvalues */
+/* in the first 1 to ILO-1 and last IHI+1 to N elements on the */
+/* diagonal; and second, applying a diagonal similarity transformation */
+/* to rows and columns ILO to IHI to make the rows and columns as */
+/* close in norm as possible. Both steps are optional. */
+
+/* Balancing may reduce the 1-norm of the matrix, and improve the */
+/* accuracy of the computed eigenvalues and/or eigenvectors. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies the operations to be performed on A: */
+/* = 'N': none: simply set ILO = 1, IHI = N, SCALE(I) = 1.0 */
+/* for i = 1,...,N; */
+/* = 'P': permute only; */
+/* = 'S': scale only; */
+/* = 'B': both permute and scale. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the input matrix A. */
+/* On exit, A is overwritten by the balanced matrix. */
+/* If JOB = 'N', A is not referenced. */
+/* See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* ILO (output) INTEGER */
+/* IHI (output) INTEGER */
+/* ILO and IHI are set to integers such that on exit */
+/* A(i,j) = 0 if i > j and j = 1,...,ILO-1 or I = IHI+1,...,N. */
+/* If JOB = 'N' or 'S', ILO = 1 and IHI = N. */
+
+/* SCALE (output) REAL array, dimension (N) */
+/* Details of the permutations and scaling factors applied to */
+/* A. If P(j) is the index of the row and column interchanged */
+/* with row and column j and D(j) is the scaling factor */
+/* applied to row and column j, then */
+/* SCALE(j) = P(j) for j = 1,...,ILO-1 */
+/* = D(j) for j = ILO,...,IHI */
+/* = P(j) for j = IHI+1,...,N. */
+/* The order in which the interchanges are made is N to IHI+1, */
+/* then 1 to ILO-1. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The permutations consist of row and column interchanges which put */
+/* the matrix in the form */
+
+/* ( T1 X Y ) */
+/* P A P = ( 0 B Z ) */
+/* ( 0 0 T2 ) */
+
+/* where T1 and T2 are upper triangular matrices whose eigenvalues lie */
+/* along the diagonal. The column indices ILO and IHI mark the starting */
+/* and ending columns of the submatrix B. Balancing consists of applying */
+/* a diagonal similarity transformation inv(D) * B * D to make the */
+/* 1-norms of each row of B and its corresponding column nearly equal. */
+/* The output matrix is */
+
+/* ( T1 X*D Y ) */
+/* ( 0 inv(D)*B*D inv(D)*Z ). */
+/* ( 0 0 T2 ) */
+
+/* Information about the permutations P and the diagonal matrix D is */
+/* returned in the vector SCALE. */
+
+/* This subroutine is based on the EISPACK routine CBAL. */
+
+/* Modified by Tzu-Yi Chen, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --scale;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S")
+ && ! lsame_(job, "B")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEBAL", &i__1);
+ return 0;
+ }
+
+ k = 1;
+ l = *n;
+
+ if (*n == 0) {
+ goto L210;
+ }
+
+ if (lsame_(job, "N")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ scale[i__] = 1.f;
+/* L10: */
+ }
+ goto L210;
+ }
+
+ if (lsame_(job, "S")) {
+ goto L120;
+ }
+
+/* Permutation to isolate eigenvalues if possible */
+
+ goto L50;
+
+/* Row and column exchange. */
+
+L20:
+ scale[m] = (real) j;
+ if (j == m) {
+ goto L30;
+ }
+
+ cswap_(&l, &a[j * a_dim1 + 1], &c__1, &a[m * a_dim1 + 1], &c__1);
+ i__1 = *n - k + 1;
+ cswap_(&i__1, &a[j + k * a_dim1], lda, &a[m + k * a_dim1], lda);
+
+L30:
+ switch (iexc) {
+ case 1: goto L40;
+ case 2: goto L80;
+ }
+
+/* Search for rows isolating an eigenvalue and push them down. */
+
+L40:
+ if (l == 1) {
+ goto L210;
+ }
+ --l;
+
+L50:
+ for (j = l; j >= 1; --j) {
+
+ i__1 = l;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (i__ == j) {
+ goto L60;
+ }
+ i__2 = j + i__ * a_dim1;
+ if (a[i__2].r != 0.f || r_imag(&a[j + i__ * a_dim1]) != 0.f) {
+ goto L70;
+ }
+L60:
+ ;
+ }
+
+ m = l;
+ iexc = 1;
+ goto L20;
+L70:
+ ;
+ }
+
+ goto L90;
+
+/* Search for columns isolating an eigenvalue and push them left. */
+
+L80:
+ ++k;
+
+L90:
+ i__1 = l;
+ for (j = k; j <= i__1; ++j) {
+
+ i__2 = l;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ if (i__ == j) {
+ goto L100;
+ }
+ i__3 = i__ + j * a_dim1;
+ if (a[i__3].r != 0.f || r_imag(&a[i__ + j * a_dim1]) != 0.f) {
+ goto L110;
+ }
+L100:
+ ;
+ }
+
+ m = k;
+ iexc = 2;
+ goto L20;
+L110:
+ ;
+ }
+
+L120:
+ i__1 = l;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ scale[i__] = 1.f;
+/* L130: */
+ }
+
+ if (lsame_(job, "P")) {
+ goto L210;
+ }
+
+/* Balance the submatrix in rows K to L. */
+
+/* Iterative loop for norm reduction */
+
+ sfmin1 = slamch_("S") / slamch_("P");
+ sfmax1 = 1.f / sfmin1;
+ sfmin2 = sfmin1 * 2.f;
+ sfmax2 = 1.f / sfmin2;
+L140:
+ noconv = FALSE_;
+
+ i__1 = l;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ c__ = 0.f;
+ r__ = 0.f;
+
+ i__2 = l;
+ for (j = k; j <= i__2; ++j) {
+ if (j == i__) {
+ goto L150;
+ }
+ i__3 = j + i__ * a_dim1;
+ c__ += (r__1 = a[i__3].r, dabs(r__1)) + (r__2 = r_imag(&a[j + i__
+ * a_dim1]), dabs(r__2));
+ i__3 = i__ + j * a_dim1;
+ r__ += (r__1 = a[i__3].r, dabs(r__1)) + (r__2 = r_imag(&a[i__ + j
+ * a_dim1]), dabs(r__2));
+L150:
+ ;
+ }
+ ica = icamax_(&l, &a[i__ * a_dim1 + 1], &c__1);
+ ca = c_abs(&a[ica + i__ * a_dim1]);
+ i__2 = *n - k + 1;
+ ira = icamax_(&i__2, &a[i__ + k * a_dim1], lda);
+ ra = c_abs(&a[i__ + (ira + k - 1) * a_dim1]);
+
+/* Guard against zero C or R due to underflow. */
+
+ if (c__ == 0.f || r__ == 0.f) {
+ goto L200;
+ }
+ g = r__ / 2.f;
+ f = 1.f;
+ s = c__ + r__;
+L160:
+/* Computing MAX */
+ r__1 = max(f,c__);
+/* Computing MIN */
+ r__2 = min(r__,g);
+ if (c__ >= g || dmax(r__1,ca) >= sfmax2 || dmin(r__2,ra) <= sfmin2) {
+ goto L170;
+ }
+ f *= 2.f;
+ c__ *= 2.f;
+ ca *= 2.f;
+ r__ /= 2.f;
+ g /= 2.f;
+ ra /= 2.f;
+ goto L160;
+
+L170:
+ g = c__ / 2.f;
+L180:
+/* Computing MIN */
+ r__1 = min(f,c__), r__1 = min(r__1,g);
+ if (g < r__ || dmax(r__,ra) >= sfmax2 || dmin(r__1,ca) <= sfmin2) {
+ goto L190;
+ }
+ f /= 2.f;
+ c__ /= 2.f;
+ g /= 2.f;
+ ca /= 2.f;
+ r__ *= 2.f;
+ ra *= 2.f;
+ goto L180;
+
+/* Now balance. */
+
+L190:
+ if (c__ + r__ >= s * .95f) {
+ goto L200;
+ }
+ if (f < 1.f && scale[i__] < 1.f) {
+ if (f * scale[i__] <= sfmin1) {
+ goto L200;
+ }
+ }
+ if (f > 1.f && scale[i__] > 1.f) {
+ if (scale[i__] >= sfmax1 / f) {
+ goto L200;
+ }
+ }
+ g = 1.f / f;
+ scale[i__] *= f;
+ noconv = TRUE_;
+
+ i__2 = *n - k + 1;
+ csscal_(&i__2, &g, &a[i__ + k * a_dim1], lda);
+ csscal_(&l, &f, &a[i__ * a_dim1 + 1], &c__1);
+
+L200:
+ ;
+ }
+
+ if (noconv) {
+ goto L140;
+ }
+
+L210:
+ *ilo = k;
+ *ihi = l;
+
+ return 0;
+
+/* End of CGEBAL */
+
+} /* cgebal_ */
diff --git a/contrib/libs/clapack/cgebd2.c b/contrib/libs/clapack/cgebd2.c
new file mode 100644
index 0000000000..0b301416aa
--- /dev/null
+++ b/contrib/libs/clapack/cgebd2.c
@@ -0,0 +1,345 @@
+/* cgebd2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cgebd2_(integer *m, integer *n, complex *a, integer *lda,
+ real *d__, real *e, complex *tauq, complex *taup, complex *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__;
+ complex alpha;
+ extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
+, integer *, complex *, complex *, integer *, complex *),
+ clarfg_(integer *, complex *, complex *, integer *, complex *),
+ clacgv_(integer *, complex *, integer *), xerbla_(char *, integer
+ *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGEBD2 reduces a complex general m by n matrix A to upper or lower */
+/* real bidiagonal form B by a unitary transformation: Q' * A * P = B. */
+
+/* If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows in the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns in the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the m by n general matrix to be reduced. */
+/* On exit, */
+/* if m >= n, the diagonal and the first superdiagonal are */
+/* overwritten with the upper bidiagonal matrix B; the */
+/* elements below the diagonal, with the array TAUQ, represent */
+/* the unitary matrix Q as a product of elementary */
+/* reflectors, and the elements above the first superdiagonal, */
+/* with the array TAUP, represent the unitary matrix P as */
+/* a product of elementary reflectors; */
+/* if m < n, the diagonal and the first subdiagonal are */
+/* overwritten with the lower bidiagonal matrix B; the */
+/* elements below the first subdiagonal, with the array TAUQ, */
+/* represent the unitary matrix Q as a product of */
+/* elementary reflectors, and the elements above the diagonal, */
+/* with the array TAUP, represent the unitary matrix P as */
+/* a product of elementary reflectors. */
+/* See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* D (output) REAL array, dimension (min(M,N)) */
+/* The diagonal elements of the bidiagonal matrix B: */
+/* D(i) = A(i,i). */
+
+/* E (output) REAL array, dimension (min(M,N)-1) */
+/* The off-diagonal elements of the bidiagonal matrix B: */
+/* if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1; */
+/* if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1. */
+
+/* TAUQ (output) COMPLEX array dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the unitary matrix Q. See Further Details. */
+
+/* TAUP (output) COMPLEX array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the unitary matrix P. See Further Details. */
+
+/* WORK (workspace) COMPLEX array, dimension (max(M,N)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrices Q and P are represented as products of elementary */
+/* reflectors: */
+
+/* If m >= n, */
+
+/* Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1) */
+
+/* Each H(i) and G(i) has the form: */
+
+/* H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' */
+
+/* where tauq and taup are complex scalars, and v and u are complex */
+/* vectors; v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in */
+/* A(i+1:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in */
+/* A(i,i+2:n); tauq is stored in TAUQ(i) and taup in TAUP(i). */
+
+/* If m < n, */
+
+/* Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m) */
+
+/* Each H(i) and G(i) has the form: */
+
+/* H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' */
+
+/* where tauq and taup are complex scalars, v and u are complex vectors; */
+/* v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in A(i+2:m,i); */
+/* u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n); */
+/* tauq is stored in TAUQ(i) and taup in TAUP(i). */
+
+/* The contents of A on exit are illustrated by the following examples: */
+
+/* m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n): */
+
+/* ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 ) */
+/* ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 ) */
+/* ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 ) */
+/* ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 ) */
+/* ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 ) */
+/* ( v1 v2 v3 v4 v5 ) */
+
+/* where d and e denote diagonal and off-diagonal elements of B, vi */
+/* denotes an element of the vector defining H(i), and ui an element of */
+/* the vector defining G(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info < 0) {
+ i__1 = -(*info);
+ xerbla_("CGEBD2", &i__1);
+ return 0;
+ }
+
+ if (*m >= *n) {
+
+/* Reduce to upper bidiagonal form */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
+
+ i__2 = i__ + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ clarfg_(&i__2, &alpha, &a[min(i__3, *m)+ i__ * a_dim1], &c__1, &
+ tauq[i__]);
+ i__2 = i__;
+ d__[i__2] = alpha.r;
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Apply H(i)' to A(i:m,i+1:n) from the left */
+
+ if (i__ < *n) {
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ r_cnjg(&q__1, &tauq[i__]);
+ clarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &
+ q__1, &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ }
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = i__;
+ a[i__2].r = d__[i__3], a[i__2].i = 0.f;
+
+ if (i__ < *n) {
+
+/* Generate elementary reflector G(i) to annihilate */
+/* A(i,i+2:n) */
+
+ i__2 = *n - i__;
+ clacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__2 = i__ + (i__ + 1) * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ clarfg_(&i__2, &alpha, &a[i__ + min(i__3, *n)* a_dim1], lda, &
+ taup[i__]);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+ i__2 = i__ + (i__ + 1) * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Apply G(i) to A(i+1:m,i+1:n) from the right */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ clarf_("Right", &i__2, &i__3, &a[i__ + (i__ + 1) * a_dim1],
+ lda, &taup[i__], &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda, &work[1]);
+ i__2 = *n - i__;
+ clacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__2 = i__ + (i__ + 1) * a_dim1;
+ i__3 = i__;
+ a[i__2].r = e[i__3], a[i__2].i = 0.f;
+ } else {
+ i__2 = i__;
+ taup[i__2].r = 0.f, taup[i__2].i = 0.f;
+ }
+/* L10: */
+ }
+ } else {
+
+/* Reduce to lower bidiagonal form */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector G(i) to annihilate A(i,i+1:n) */
+
+ i__2 = *n - i__ + 1;
+ clacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+ i__2 = i__ + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ clarfg_(&i__2, &alpha, &a[i__ + min(i__3, *n)* a_dim1], lda, &
+ taup[i__]);
+ i__2 = i__;
+ d__[i__2] = alpha.r;
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Apply G(i) to A(i+1:m,i:n) from the right */
+
+ if (i__ < *m) {
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+ clarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &
+ taup[i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
+ }
+ i__2 = *n - i__ + 1;
+ clacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = i__;
+ a[i__2].r = d__[i__3], a[i__2].i = 0.f;
+
+ if (i__ < *m) {
+
+/* Generate elementary reflector H(i) to annihilate */
+/* A(i+2:m,i) */
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *m - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ clarfg_(&i__2, &alpha, &a[min(i__3, *m)+ i__ * a_dim1], &c__1,
+ &tauq[i__]);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+ i__2 = i__ + 1 + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Apply H(i)' to A(i+1:m,i+1:n) from the left */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ r_cnjg(&q__1, &tauq[i__]);
+ clarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &
+ c__1, &q__1, &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &
+ work[1]);
+ i__2 = i__ + 1 + i__ * a_dim1;
+ i__3 = i__;
+ a[i__2].r = e[i__3], a[i__2].i = 0.f;
+ } else {
+ i__2 = i__;
+ tauq[i__2].r = 0.f, tauq[i__2].i = 0.f;
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of CGEBD2 */
+
+} /* cgebd2_ */
diff --git a/contrib/libs/clapack/cgebrd.c b/contrib/libs/clapack/cgebrd.c
new file mode 100644
index 0000000000..8e8e753612
--- /dev/null
+++ b/contrib/libs/clapack/cgebrd.c
@@ -0,0 +1,348 @@
+/* cgebrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int cgebrd_(integer *m, integer *n, complex *a, integer *lda,
+ real *d__, real *e, complex *tauq, complex *taup, complex *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j, nb, nx;
+ real ws;
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *);
+ integer nbmin, iinfo, minmn;
+ extern /* Subroutine */ int cgebd2_(integer *, integer *, complex *,
+ integer *, real *, real *, complex *, complex *, complex *,
+ integer *), clabrd_(integer *, integer *, integer *, complex *,
+ integer *, real *, real *, complex *, complex *, complex *,
+ integer *, complex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwrkx, ldwrky, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGEBRD reduces a general complex M-by-N matrix A to upper or lower */
+/* bidiagonal form B by a unitary transformation: Q**H * A * P = B. */
+
+/* If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows in the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns in the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the M-by-N general matrix to be reduced. */
+/* On exit, */
+/* if m >= n, the diagonal and the first superdiagonal are */
+/* overwritten with the upper bidiagonal matrix B; the */
+/* elements below the diagonal, with the array TAUQ, represent */
+/* the unitary matrix Q as a product of elementary */
+/* reflectors, and the elements above the first superdiagonal, */
+/* with the array TAUP, represent the unitary matrix P as */
+/* a product of elementary reflectors; */
+/* if m < n, the diagonal and the first subdiagonal are */
+/* overwritten with the lower bidiagonal matrix B; the */
+/* elements below the first subdiagonal, with the array TAUQ, */
+/* represent the unitary matrix Q as a product of */
+/* elementary reflectors, and the elements above the diagonal, */
+/* with the array TAUP, represent the unitary matrix P as */
+/* a product of elementary reflectors. */
+/* See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* D (output) REAL array, dimension (min(M,N)) */
+/* The diagonal elements of the bidiagonal matrix B: */
+/* D(i) = A(i,i). */
+
+/* E (output) REAL array, dimension (min(M,N)-1) */
+/* The off-diagonal elements of the bidiagonal matrix B: */
+/* if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1; */
+/* if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1. */
+
+/* TAUQ (output) COMPLEX array dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the unitary matrix Q. See Further Details. */
+
+/* TAUP (output) COMPLEX array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the unitary matrix P. See Further Details. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= max(1,M,N). */
+/* For optimum performance LWORK >= (M+N)*NB, where NB */
+/* is the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrices Q and P are represented as products of elementary */
+/* reflectors: */
+
+/* If m >= n, */
+
+/* Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1) */
+
+/* Each H(i) and G(i) has the form: */
+
+/* H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' */
+
+/* where tauq and taup are complex scalars, and v and u are complex */
+/* vectors; v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in */
+/* A(i+1:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in */
+/* A(i,i+2:n); tauq is stored in TAUQ(i) and taup in TAUP(i). */
+
+/* If m < n, */
+
+/* Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m) */
+
+/* Each H(i) and G(i) has the form: */
+
+/* H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' */
+
+/* where tauq and taup are complex scalars, and v and u are complex */
+/* vectors; v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in */
+/* A(i+2:m,i); u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in */
+/* A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). */
+
+/* The contents of A on exit are illustrated by the following examples: */
+
+/* m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n): */
+
+/* ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 ) */
+/* ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 ) */
+/* ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 ) */
+/* ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 ) */
+/* ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 ) */
+/* ( v1 v2 v3 v4 v5 ) */
+
+/* where d and e denote diagonal and off-diagonal elements of B, vi */
+/* denotes an element of the vector defining H(i), and ui an element of */
+/* the vector defining G(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+/* Computing MAX */
+ i__1 = 1, i__2 = ilaenv_(&c__1, "CGEBRD", " ", m, n, &c_n1, &c_n1);
+ nb = max(i__1,i__2);
+ lwkopt = (*m + *n) * nb;
+ r__1 = (real) lwkopt;
+ work[1].r = r__1, work[1].i = 0.f;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*m);
+ if (*lwork < max(i__1,*n) && ! lquery) {
+ *info = -10;
+ }
+ }
+ if (*info < 0) {
+ i__1 = -(*info);
+ xerbla_("CGEBRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ minmn = min(*m,*n);
+ if (minmn == 0) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ ws = (real) max(*m,*n);
+ ldwrkx = *m;
+ ldwrky = *n;
+
+ if (nb > 1 && nb < minmn) {
+
+/* Set the crossover point NX. */
+
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__3, "CGEBRD", " ", m, n, &c_n1, &c_n1);
+ nx = max(i__1,i__2);
+
+/* Determine when to switch from blocked to unblocked code. */
+
+ if (nx < minmn) {
+ ws = (real) ((*m + *n) * nb);
+ if ((real) (*lwork) < ws) {
+
+/* Not enough work space for the optimal NB, consider using */
+/* a smaller block size. */
+
+ nbmin = ilaenv_(&c__2, "CGEBRD", " ", m, n, &c_n1, &c_n1);
+ if (*lwork >= (*m + *n) * nbmin) {
+ nb = *lwork / (*m + *n);
+ } else {
+ nb = 1;
+ nx = minmn;
+ }
+ }
+ }
+ } else {
+ nx = minmn;
+ }
+
+ i__1 = minmn - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+
+/* Reduce rows and columns i:i+ib-1 to bidiagonal form and return */
+/* the matrices X and Y which are needed to update the unreduced */
+/* part of the matrix */
+
+ i__3 = *m - i__ + 1;
+ i__4 = *n - i__ + 1;
+ clabrd_(&i__3, &i__4, &nb, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[
+ i__], &tauq[i__], &taup[i__], &work[1], &ldwrkx, &work[ldwrkx
+ * nb + 1], &ldwrky);
+
+/* Update the trailing submatrix A(i+ib:m,i+ib:n), using */
+/* an update of the form A := A - V*Y' - X*U' */
+
+ i__3 = *m - i__ - nb + 1;
+ i__4 = *n - i__ - nb + 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "Conjugate transpose", &i__3, &i__4, &nb, &
+ q__1, &a[i__ + nb + i__ * a_dim1], lda, &work[ldwrkx * nb +
+ nb + 1], &ldwrky, &c_b1, &a[i__ + nb + (i__ + nb) * a_dim1],
+ lda);
+ i__3 = *m - i__ - nb + 1;
+ i__4 = *n - i__ - nb + 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "No transpose", &i__3, &i__4, &nb, &q__1, &
+ work[nb + 1], &ldwrkx, &a[i__ + (i__ + nb) * a_dim1], lda, &
+ c_b1, &a[i__ + nb + (i__ + nb) * a_dim1], lda);
+
+/* Copy diagonal and off-diagonal elements of B back into A */
+
+ if (*m >= *n) {
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ i__4 = j + j * a_dim1;
+ i__5 = j;
+ a[i__4].r = d__[i__5], a[i__4].i = 0.f;
+ i__4 = j + (j + 1) * a_dim1;
+ i__5 = j;
+ a[i__4].r = e[i__5], a[i__4].i = 0.f;
+/* L10: */
+ }
+ } else {
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ i__4 = j + j * a_dim1;
+ i__5 = j;
+ a[i__4].r = d__[i__5], a[i__4].i = 0.f;
+ i__4 = j + 1 + j * a_dim1;
+ i__5 = j;
+ a[i__4].r = e[i__5], a[i__4].i = 0.f;
+/* L20: */
+ }
+ }
+/* L30: */
+ }
+
+/* Use unblocked code to reduce the remainder of the matrix */
+
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ cgebd2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__], &
+ tauq[i__], &taup[i__], &work[1], &iinfo);
+ work[1].r = ws, work[1].i = 0.f;
+ return 0;
+
+/* End of CGEBRD */
+
+} /* cgebrd_ */
diff --git a/contrib/libs/clapack/cgecon.c b/contrib/libs/clapack/cgecon.c
new file mode 100644
index 0000000000..fe9d614657
--- /dev/null
+++ b/contrib/libs/clapack/cgecon.c
@@ -0,0 +1,233 @@
+/* cgecon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cgecon_(char *norm, integer *n, complex *a, integer *lda,
+ real *anorm, real *rcond, complex *work, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ real sl;
+ integer ix;
+ real su;
+ integer kase, kase1;
+ real scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *);
+ extern integer icamax_(integer *, complex *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real ainvnm;
+ extern /* Subroutine */ int clatrs_(char *, char *, char *, char *,
+ integer *, complex *, integer *, complex *, real *, real *,
+ integer *), csrscl_(integer *,
+ real *, complex *, integer *);
+ logical onenrm;
+ char normin[1];
+ real smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGECON estimates the reciprocal of the condition number of a general */
+/* complex matrix A, in either the 1-norm or the infinity-norm, using */
+/* the LU factorization computed by CGETRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as */
+/* RCOND = 1 / ( norm(A) * norm(inv(A)) ). */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies whether the 1-norm condition number or the */
+/* infinity-norm condition number is required: */
+/* = '1' or 'O': 1-norm; */
+/* = 'I': Infinity-norm. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The factors L and U from the factorization A = P*L*U */
+/* as computed by CGETRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* ANORM (input) REAL */
+/* If NORM = '1' or 'O', the 1-norm of the original matrix A. */
+/* If NORM = 'I', the infinity-norm of the original matrix A. */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(norm(A) * norm(inv(A))). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O");
+ if (! onenrm && ! lsame_(norm, "I")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*anorm < 0.f) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGECON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.f;
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ } else if (*anorm == 0.f) {
+ return 0;
+ }
+
+ smlnum = slamch_("Safe minimum");
+
+/* Estimate the norm of inv(A). */
+
+ ainvnm = 0.f;
+ *(unsigned char *)normin = 'N';
+ if (onenrm) {
+ kase1 = 1;
+ } else {
+ kase1 = 2;
+ }
+ kase = 0;
+L10:
+ clacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (kase == kase1) {
+
+/* Multiply by inv(L). */
+
+ clatrs_("Lower", "No transpose", "Unit", normin, n, &a[a_offset],
+ lda, &work[1], &sl, &rwork[1], info);
+
+/* Multiply by inv(U). */
+
+ clatrs_("Upper", "No transpose", "Non-unit", normin, n, &a[
+ a_offset], lda, &work[1], &su, &rwork[*n + 1], info);
+ } else {
+
+/* Multiply by inv(U'). */
+
+ clatrs_("Upper", "Conjugate transpose", "Non-unit", normin, n, &a[
+ a_offset], lda, &work[1], &su, &rwork[*n + 1], info);
+
+/* Multiply by inv(L'). */
+
+ clatrs_("Lower", "Conjugate transpose", "Unit", normin, n, &a[
+ a_offset], lda, &work[1], &sl, &rwork[1], info);
+ }
+
+/* Divide X by 1/(SL*SU) if doing so will not cause overflow. */
+
+ scale = sl * su;
+ *(unsigned char *)normin = 'Y';
+ if (scale != 1.f) {
+ ix = icamax_(n, &work[1], &c__1);
+ i__1 = ix;
+ if (scale < ((r__1 = work[i__1].r, dabs(r__1)) + (r__2 = r_imag(&
+ work[ix]), dabs(r__2))) * smlnum || scale == 0.f) {
+ goto L20;
+ }
+ csrscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / ainvnm / *anorm;
+ }
+
+L20:
+ return 0;
+
+/* End of CGECON */
+
+} /* cgecon_ */
diff --git a/contrib/libs/clapack/cgeequ.c b/contrib/libs/clapack/cgeequ.c
new file mode 100644
index 0000000000..84cf694706
--- /dev/null
+++ b/contrib/libs/clapack/cgeequ.c
@@ -0,0 +1,306 @@
+/* cgeequ.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cgeequ_(integer *m, integer *n, complex *a, integer *lda,
+ real *r__, real *c__, real *rowcnd, real *colcnd, real *amax,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1, r__2, r__3, r__4;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer i__, j;
+ real rcmin, rcmax;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum, smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGEEQU computes row and column scalings intended to equilibrate an */
+/* M-by-N matrix A and reduce its condition number. R returns the row */
+/* scale factors and C the column scale factors, chosen to try to make */
+/* the largest element in each row and column of the matrix B with */
+/* elements B(i,j)=R(i)*A(i,j)*C(j) have absolute value 1. */
+
+/* R(i) and C(j) are restricted to be between SMLNUM = smallest safe */
+/* number and BIGNUM = largest safe number. Use of these scaling */
+/* factors is not guaranteed to reduce the condition number of A but */
+/* works well in practice. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The M-by-N matrix whose equilibration factors are */
+/* to be computed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* R (output) REAL array, dimension (M) */
+/* If INFO = 0 or INFO > M, R contains the row scale factors */
+/* for A. */
+
+/* C (output) REAL array, dimension (N) */
+/* If INFO = 0, C contains the column scale factors for A. */
+
+/* ROWCND (output) REAL */
+/* If INFO = 0 or INFO > M, ROWCND contains the ratio of the */
+/* smallest R(i) to the largest R(i). If ROWCND >= 0.1 and */
+/* AMAX is neither too large nor too small, it is not worth */
+/* scaling by R. */
+
+/* COLCND (output) REAL */
+/* If INFO = 0, COLCND contains the ratio of the smallest */
+/* C(i) to the largest C(i). If COLCND >= 0.1, it is not */
+/* worth scaling by C. */
+
+/* AMAX (output) REAL */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= M: the i-th row of A is exactly zero */
+/* > M: the (i-M)-th column of A is exactly zero */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --r__;
+ --c__;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEEQU", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ *rowcnd = 1.f;
+ *colcnd = 1.f;
+ *amax = 0.f;
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+
+/* Compute row scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__[i__] = 0.f;
+/* L10: */
+ }
+
+/* Find the maximum element in each row. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ r__3 = r__[i__], r__4 = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + j * a_dim1]), dabs(r__2));
+ r__[i__] = dmax(r__3,r__4);
+/* L20: */
+ }
+/* L30: */
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.f;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = rcmax, r__2 = r__[i__];
+ rcmax = dmax(r__1,r__2);
+/* Computing MIN */
+ r__1 = rcmin, r__2 = r__[i__];
+ rcmin = dmin(r__1,r__2);
+/* L40: */
+ }
+ *amax = rcmax;
+
+ if (rcmin == 0.f) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (r__[i__] == 0.f) {
+ *info = i__;
+ return 0;
+ }
+/* L50: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MIN */
+/* Computing MAX */
+ r__2 = r__[i__];
+ r__1 = dmax(r__2,smlnum);
+ r__[i__] = 1.f / dmin(r__1,bignum);
+/* L60: */
+ }
+
+/* Compute ROWCND = min(R(I)) / max(R(I)) */
+
+ *rowcnd = dmax(rcmin,smlnum) / dmin(rcmax,bignum);
+ }
+
+/* Compute column scale factors */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j] = 0.f;
+/* L70: */
+ }
+
+/* Find the maximum element in each column, */
+/* assuming the row scaling computed above. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ r__3 = c__[j], r__4 = ((r__1 = a[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + j * a_dim1]), dabs(r__2))) * r__[i__];
+ c__[j] = dmax(r__3,r__4);
+/* L80: */
+ }
+/* L90: */
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ r__1 = rcmin, r__2 = c__[j];
+ rcmin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = rcmax, r__2 = c__[j];
+ rcmax = dmax(r__1,r__2);
+/* L100: */
+ }
+
+ if (rcmin == 0.f) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (c__[j] == 0.f) {
+ *info = *m + j;
+ return 0;
+ }
+/* L110: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+/* Computing MAX */
+ r__2 = c__[j];
+ r__1 = dmax(r__2,smlnum);
+ c__[j] = 1.f / dmin(r__1,bignum);
+/* L120: */
+ }
+
+/* Compute COLCND = min(C(J)) / max(C(J)) */
+
+ *colcnd = dmax(rcmin,smlnum) / dmin(rcmax,bignum);
+ }
+
+ return 0;
+
+/* End of CGEEQU */
+
+} /* cgeequ_ */
diff --git a/contrib/libs/clapack/cgeequb.c b/contrib/libs/clapack/cgeequb.c
new file mode 100644
index 0000000000..93318d0920
--- /dev/null
+++ b/contrib/libs/clapack/cgeequb.c
@@ -0,0 +1,331 @@
+/* cgeequb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cgeequb_(integer *m, integer *n, complex *a, integer *
+ lda, real *r__, real *c__, real *rowcnd, real *colcnd, real *amax,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1, r__2, r__3, r__4;
+
+ /* Builtin functions */
+ double log(doublereal), r_imag(complex *), pow_ri(real *, integer *);
+
+ /* Local variables */
+ integer i__, j;
+ real radix, rcmin, rcmax;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum, logrdx, smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- Contributed by James Demmel, Deaglan Halligan, Yozo Hida and -- */
+/* -- Jason Riedy of Univ. of California Berkeley. -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley and NAG Ltd. -- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGEEQUB computes row and column scalings intended to equilibrate an */
+/* M-by-N matrix A and reduce its condition number. R returns the row */
+/* scale factors and C the column scale factors, chosen to try to make */
+/* the largest element in each row and column of the matrix B with */
+/* elements B(i,j)=R(i)*A(i,j)*C(j) have an absolute value of at most */
+/* the radix. */
+
+/* R(i) and C(j) are restricted to be a power of the radix between */
+/* SMLNUM = smallest safe number and BIGNUM = largest safe number. Use */
+/* of these scaling factors is not guaranteed to reduce the condition */
+/* number of A but works well in practice. */
+
+/* This routine differs from CGEEQU by restricting the scaling factors */
+/* to a power of the radix. Baring over- and underflow, scaling by */
+/* these factors introduces no additional rounding errors. However, the */
+/* scaled entries' magnitured are no longer approximately 1 but lie */
+/* between sqrt(radix) and 1/sqrt(radix). */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The M-by-N matrix whose equilibration factors are */
+/* to be computed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* R (output) REAL array, dimension (M) */
+/* If INFO = 0 or INFO > M, R contains the row scale factors */
+/* for A. */
+
+/* C (output) REAL array, dimension (N) */
+/* If INFO = 0, C contains the column scale factors for A. */
+
+/* ROWCND (output) REAL */
+/* If INFO = 0 or INFO > M, ROWCND contains the ratio of the */
+/* smallest R(i) to the largest R(i). If ROWCND >= 0.1 and */
+/* AMAX is neither too large nor too small, it is not worth */
+/* scaling by R. */
+
+/* COLCND (output) REAL */
+/* If INFO = 0, COLCND contains the ratio of the smallest */
+/* C(i) to the largest C(i). If COLCND >= 0.1, it is not */
+/* worth scaling by C. */
+
+/* AMAX (output) REAL */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= M: the i-th row of A is exactly zero */
+/* > M: the (i-M)-th column of A is exactly zero */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --r__;
+ --c__;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEEQUB", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ *rowcnd = 1.f;
+ *colcnd = 1.f;
+ *amax = 0.f;
+ return 0;
+ }
+
+/* Get machine constants. Assume SMLNUM is a power of the radix. */
+
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+ radix = slamch_("B");
+ logrdx = log(radix);
+
+/* Compute row scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__[i__] = 0.f;
+/* L10: */
+ }
+
+/* Find the maximum element in each row. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ r__3 = r__[i__], r__4 = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + j * a_dim1]), dabs(r__2));
+ r__[i__] = dmax(r__3,r__4);
+/* L20: */
+ }
+/* L30: */
+ }
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (r__[i__] > 0.f) {
+ i__2 = (integer) (log(r__[i__]) / logrdx);
+ r__[i__] = pow_ri(&radix, &i__2);
+ }
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.f;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = rcmax, r__2 = r__[i__];
+ rcmax = dmax(r__1,r__2);
+/* Computing MIN */
+ r__1 = rcmin, r__2 = r__[i__];
+ rcmin = dmin(r__1,r__2);
+/* L40: */
+ }
+ *amax = rcmax;
+
+ if (rcmin == 0.f) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (r__[i__] == 0.f) {
+ *info = i__;
+ return 0;
+ }
+/* L50: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MIN */
+/* Computing MAX */
+ r__2 = r__[i__];
+ r__1 = dmax(r__2,smlnum);
+ r__[i__] = 1.f / dmin(r__1,bignum);
+/* L60: */
+ }
+
+/* Compute ROWCND = min(R(I)) / max(R(I)). */
+
+ *rowcnd = dmax(rcmin,smlnum) / dmin(rcmax,bignum);
+ }
+
+/* Compute column scale factors. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j] = 0.f;
+/* L70: */
+ }
+
+/* Find the maximum element in each column, */
+/* assuming the row scaling computed above. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ r__3 = c__[j], r__4 = ((r__1 = a[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + j * a_dim1]), dabs(r__2))) * r__[i__];
+ c__[j] = dmax(r__3,r__4);
+/* L80: */
+ }
+ if (c__[j] > 0.f) {
+ i__2 = (integer) (log(c__[j]) / logrdx);
+ c__[j] = pow_ri(&radix, &i__2);
+ }
+/* L90: */
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ r__1 = rcmin, r__2 = c__[j];
+ rcmin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = rcmax, r__2 = c__[j];
+ rcmax = dmax(r__1,r__2);
+/* L100: */
+ }
+
+ if (rcmin == 0.f) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (c__[j] == 0.f) {
+ *info = *m + j;
+ return 0;
+ }
+/* L110: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+/* Computing MAX */
+ r__2 = c__[j];
+ r__1 = dmax(r__2,smlnum);
+ c__[j] = 1.f / dmin(r__1,bignum);
+/* L120: */
+ }
+
+/* Compute COLCND = min(C(J)) / max(C(J)). */
+
+ *colcnd = dmax(rcmin,smlnum) / dmin(rcmax,bignum);
+ }
+
+ return 0;
+
+/* End of CGEEQUB */
+
+} /* cgeequb_ */
diff --git a/contrib/libs/clapack/cgees.c b/contrib/libs/clapack/cgees.c
new file mode 100644
index 0000000000..d113eb3749
--- /dev/null
+++ b/contrib/libs/clapack/cgees.c
@@ -0,0 +1,404 @@
+/* cgees.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+
+/* Subroutine */ int cgees_(char *jobvs, char *sort, L_fp select, integer *n,
+ complex *a, integer *lda, integer *sdim, complex *w, complex *vs,
+ integer *ldvs, complex *work, integer *lwork, real *rwork, logical *
+ bwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, vs_dim1, vs_offset, i__1, i__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ real s;
+ integer ihi, ilo;
+ real dum[1], eps, sep;
+ integer ibal;
+ real anrm;
+ integer ierr, itau, iwrk, icond, ieval;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), cgebak_(char *, char *, integer *, integer
+ *, integer *, real *, integer *, complex *, integer *, integer *), cgebal_(char *, integer *, complex *, integer *,
+ integer *, integer *, real *, integer *), slabad_(real *,
+ real *);
+ logical scalea;
+ extern doublereal clange_(char *, integer *, integer *, complex *,
+ integer *, real *);
+ real cscale;
+ extern /* Subroutine */ int cgehrd_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, integer *),
+ clascl_(char *, integer *, integer *, real *, real *, integer *,
+ integer *, complex *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ real bignum;
+ extern /* Subroutine */ int chseqr_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *), cunghr_(integer
+ *, integer *, integer *, complex *, integer *, complex *, complex
+ *, integer *, integer *), ctrsen_(char *, char *, logical *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *, real *, real *, complex *, integer *, integer *);
+ integer minwrk, maxwrk;
+ real smlnum;
+ integer hswork;
+ logical wantst, lquery, wantvs;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+/* .. Function Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGEES computes for an N-by-N complex nonsymmetric matrix A, the */
+/* eigenvalues, the Schur form T, and, optionally, the matrix of Schur */
+/* vectors Z. This gives the Schur factorization A = Z*T*(Z**H). */
+
+/* Optionally, it also orders the eigenvalues on the diagonal of the */
+/* Schur form so that selected eigenvalues are at the top left. */
+/* The leading columns of Z then form an orthonormal basis for the */
+/* invariant subspace corresponding to the selected eigenvalues. */
+/* A complex matrix is in Schur form if it is upper triangular. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVS (input) CHARACTER*1 */
+/* = 'N': Schur vectors are not computed; */
+/* = 'V': Schur vectors are computed. */
+
+/* SORT (input) CHARACTER*1 */
+/* Specifies whether or not to order the eigenvalues on the */
+/* diagonal of the Schur form. */
+/* = 'N': Eigenvalues are not ordered: */
+/* = 'S': Eigenvalues are ordered (see SELECT). */
+
+/* SELECT (external procedure) LOGICAL FUNCTION of one COMPLEX argument */
+/* SELECT must be declared EXTERNAL in the calling subroutine. */
+/* If SORT = 'S', SELECT is used to select eigenvalues to order */
+/* to the top left of the Schur form. */
+/* IF SORT = 'N', SELECT is not referenced. */
+/* The eigenvalue W(j) is selected if SELECT(W(j)) is true. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the N-by-N matrix A. */
+/* On exit, A has been overwritten by its Schur form T. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* SDIM (output) INTEGER */
+/* If SORT = 'N', SDIM = 0. */
+/* If SORT = 'S', SDIM = number of eigenvalues for which */
+/* SELECT is true. */
+
+/* W (output) COMPLEX array, dimension (N) */
+/* W contains the computed eigenvalues, in the same order that */
+/* they appear on the diagonal of the output Schur form T. */
+
+/* VS (output) COMPLEX array, dimension (LDVS,N) */
+/* If JOBVS = 'V', VS contains the unitary matrix Z of Schur */
+/* vectors. */
+/* If JOBVS = 'N', VS is not referenced. */
+
+/* LDVS (input) INTEGER */
+/* The leading dimension of the array VS. LDVS >= 1; if */
+/* JOBVS = 'V', LDVS >= N. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,2*N). */
+/* For good performance, LWORK must generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* BWORK (workspace) LOGICAL array, dimension (N) */
+/* Not referenced if SORT = 'N'. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, and i is */
+/* <= N: the QR algorithm failed to compute all the */
+/* eigenvalues; elements 1:ILO-1 and i+1:N of W */
+/* contain those eigenvalues which have converged; */
+/* if JOBVS = 'V', VS contains the matrix which */
+/* reduces A to its partially converged Schur form. */
+/* = N+1: the eigenvalues could not be reordered because */
+/* some eigenvalues were too close to separate (the */
+/* problem is very ill-conditioned); */
+/* = N+2: after reordering, roundoff changed values of */
+/* some complex eigenvalues so that leading */
+/* eigenvalues in the Schur form no longer satisfy */
+/* SELECT = .TRUE.. This could also be caused by */
+/* underflow due to scaling. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ vs_dim1 = *ldvs;
+ vs_offset = 1 + vs_dim1;
+ vs -= vs_offset;
+ --work;
+ --rwork;
+ --bwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ wantvs = lsame_(jobvs, "V");
+ wantst = lsame_(sort, "S");
+ if (! wantvs && ! lsame_(jobvs, "N")) {
+ *info = -1;
+ } else if (! wantst && ! lsame_(sort, "N")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldvs < 1 || wantvs && *ldvs < *n) {
+ *info = -10;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* CWorkspace refers to complex workspace, and RWorkspace to real */
+/* workspace. NB refers to the optimal block size for the */
+/* immediately following subroutine, as returned by ILAENV. */
+/* HSWORK refers to the workspace preferred by CHSEQR, as */
+/* calculated below. HSWORK is computed assuming ILO=1 and IHI=N, */
+/* the worst case.) */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ } else {
+ maxwrk = *n + *n * ilaenv_(&c__1, "CGEHRD", " ", n, &c__1, n, &
+ c__0);
+ minwrk = *n << 1;
+
+ chseqr_("S", jobvs, n, &c__1, n, &a[a_offset], lda, &w[1], &vs[
+ vs_offset], ldvs, &work[1], &c_n1, &ieval);
+ hswork = work[1].r;
+
+ if (! wantvs) {
+ maxwrk = max(maxwrk,hswork);
+ } else {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + (*n - 1) * ilaenv_(&c__1, "CUNGHR",
+ " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ maxwrk = max(maxwrk,hswork);
+ }
+ }
+ work[1].r = (real) maxwrk, work[1].i = 0.f;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEES ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *sdim = 0;
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = clange_("M", n, n, &a[a_offset], lda, dum);
+ scalea = FALSE_;
+ if (anrm > 0.f && anrm < smlnum) {
+ scalea = TRUE_;
+ cscale = smlnum;
+ } else if (anrm > bignum) {
+ scalea = TRUE_;
+ cscale = bignum;
+ }
+ if (scalea) {
+ clascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Permute the matrix to make it more nearly triangular */
+/* (CWorkspace: none) */
+/* (RWorkspace: need N) */
+
+ ibal = 1;
+ cgebal_("P", n, &a[a_offset], lda, &ilo, &ihi, &rwork[ibal], &ierr);
+
+/* Reduce to upper Hessenberg form */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: none) */
+
+ itau = 1;
+ iwrk = *n + itau;
+ i__1 = *lwork - iwrk + 1;
+ cgehrd_(n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1,
+ &ierr);
+
+ if (wantvs) {
+
+/* Copy Householder vectors to VS */
+
+ clacpy_("L", n, n, &a[a_offset], lda, &vs[vs_offset], ldvs)
+ ;
+
+/* Generate unitary matrix in VS */
+/* (CWorkspace: need 2*N-1, prefer N+(N-1)*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwrk + 1;
+ cunghr_(n, &ilo, &ihi, &vs[vs_offset], ldvs, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+ }
+
+ *sdim = 0;
+
+/* Perform QR iteration, accumulating Schur vectors in VS if desired */
+/* (CWorkspace: need 1, prefer HSWORK (see comments) ) */
+/* (RWorkspace: none) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ chseqr_("S", jobvs, n, &ilo, &ihi, &a[a_offset], lda, &w[1], &vs[
+ vs_offset], ldvs, &work[iwrk], &i__1, &ieval);
+ if (ieval > 0) {
+ *info = ieval;
+ }
+
+/* Sort eigenvalues if desired */
+
+ if (wantst && *info == 0) {
+ if (scalea) {
+ clascl_("G", &c__0, &c__0, &cscale, &anrm, n, &c__1, &w[1], n, &
+ ierr);
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ bwork[i__] = (*select)(&w[i__]);
+/* L10: */
+ }
+
+/* Reorder eigenvalues and transform Schur vectors */
+/* (CWorkspace: none) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwrk + 1;
+ ctrsen_("N", jobvs, &bwork[1], n, &a[a_offset], lda, &vs[vs_offset],
+ ldvs, &w[1], sdim, &s, &sep, &work[iwrk], &i__1, &icond);
+ }
+
+ if (wantvs) {
+
+/* Undo balancing */
+/* (CWorkspace: none) */
+/* (RWorkspace: need N) */
+
+ cgebak_("P", "R", n, &ilo, &ihi, &rwork[ibal], n, &vs[vs_offset],
+ ldvs, &ierr);
+ }
+
+ if (scalea) {
+
+/* Undo scaling for the Schur form of A */
+
+ clascl_("U", &c__0, &c__0, &cscale, &anrm, n, n, &a[a_offset], lda, &
+ ierr);
+ i__1 = *lda + 1;
+ ccopy_(n, &a[a_offset], &i__1, &w[1], &c__1);
+ }
+
+ work[1].r = (real) maxwrk, work[1].i = 0.f;
+ return 0;
+
+/* End of CGEES */
+
+} /* cgees_ */
diff --git a/contrib/libs/clapack/cgeesx.c b/contrib/libs/clapack/cgeesx.c
new file mode 100644
index 0000000000..ea9c2366c6
--- /dev/null
+++ b/contrib/libs/clapack/cgeesx.c
@@ -0,0 +1,472 @@
+/* cgeesx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+
+/* Subroutine */ int cgeesx_(char *jobvs, char *sort, L_fp select, char *
+ sense, integer *n, complex *a, integer *lda, integer *sdim, complex *
+ w, complex *vs, integer *ldvs, real *rconde, real *rcondv, complex *
+ work, integer *lwork, real *rwork, logical *bwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, vs_dim1, vs_offset, i__1, i__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, ihi, ilo;
+ real dum[1], eps;
+ integer ibal;
+ real anrm;
+ integer ierr, itau, iwrk, lwrk, icond, ieval;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), cgebak_(char *, char *, integer *, integer
+ *, integer *, real *, integer *, complex *, integer *, integer *), cgebal_(char *, integer *, complex *, integer *,
+ integer *, integer *, real *, integer *), slabad_(real *,
+ real *);
+ logical scalea;
+ extern doublereal clange_(char *, integer *, integer *, complex *,
+ integer *, real *);
+ real cscale;
+ extern /* Subroutine */ int cgehrd_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, integer *),
+ clascl_(char *, integer *, integer *, real *, real *, integer *,
+ integer *, complex *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ real bignum;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *), chseqr_(char *, char *, integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, complex *,
+ integer *, integer *), cunghr_(integer *, integer
+ *, integer *, complex *, integer *, complex *, complex *, integer
+ *, integer *);
+ logical wantsb;
+ extern /* Subroutine */ int ctrsen_(char *, char *, logical *, integer *,
+ complex *, integer *, complex *, integer *, complex *, integer *,
+ real *, real *, complex *, integer *, integer *);
+ logical wantse;
+ integer minwrk, maxwrk;
+ logical wantsn;
+ real smlnum;
+ integer hswork;
+ logical wantst, wantsv, wantvs;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+/* .. Function Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGEESX computes for an N-by-N complex nonsymmetric matrix A, the */
+/* eigenvalues, the Schur form T, and, optionally, the matrix of Schur */
+/* vectors Z. This gives the Schur factorization A = Z*T*(Z**H). */
+
+/* Optionally, it also orders the eigenvalues on the diagonal of the */
+/* Schur form so that selected eigenvalues are at the top left; */
+/* computes a reciprocal condition number for the average of the */
+/* selected eigenvalues (RCONDE); and computes a reciprocal condition */
+/* number for the right invariant subspace corresponding to the */
+/* selected eigenvalues (RCONDV). The leading columns of Z form an */
+/* orthonormal basis for this invariant subspace. */
+
+/* For further explanation of the reciprocal condition numbers RCONDE */
+/* and RCONDV, see Section 4.10 of the LAPACK Users' Guide (where */
+/* these quantities are called s and sep respectively). */
+
+/* A complex matrix is in Schur form if it is upper triangular. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVS (input) CHARACTER*1 */
+/* = 'N': Schur vectors are not computed; */
+/* = 'V': Schur vectors are computed. */
+
+/* SORT (input) CHARACTER*1 */
+/* Specifies whether or not to order the eigenvalues on the */
+/* diagonal of the Schur form. */
+/* = 'N': Eigenvalues are not ordered; */
+/* = 'S': Eigenvalues are ordered (see SELECT). */
+
+/* SELECT (external procedure) LOGICAL FUNCTION of one COMPLEX argument */
+/* SELECT must be declared EXTERNAL in the calling subroutine. */
+/* If SORT = 'S', SELECT is used to select eigenvalues to order */
+/* to the top left of the Schur form. */
+/* If SORT = 'N', SELECT is not referenced. */
+/* An eigenvalue W(j) is selected if SELECT(W(j)) is true. */
+
+/* SENSE (input) CHARACTER*1 */
+/* Determines which reciprocal condition numbers are computed. */
+/* = 'N': None are computed; */
+/* = 'E': Computed for average of selected eigenvalues only; */
+/* = 'V': Computed for selected right invariant subspace only; */
+/* = 'B': Computed for both. */
+/* If SENSE = 'E', 'V' or 'B', SORT must equal 'S'. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA, N) */
+/* On entry, the N-by-N matrix A. */
+/* On exit, A is overwritten by its Schur form T. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* SDIM (output) INTEGER */
+/* If SORT = 'N', SDIM = 0. */
+/* If SORT = 'S', SDIM = number of eigenvalues for which */
+/* SELECT is true. */
+
+/* W (output) COMPLEX array, dimension (N) */
+/* W contains the computed eigenvalues, in the same order */
+/* that they appear on the diagonal of the output Schur form T. */
+
+/* VS (output) COMPLEX array, dimension (LDVS,N) */
+/* If JOBVS = 'V', VS contains the unitary matrix Z of Schur */
+/* vectors. */
+/* If JOBVS = 'N', VS is not referenced. */
+
+/* LDVS (input) INTEGER */
+/* The leading dimension of the array VS. LDVS >= 1, and if */
+/* JOBVS = 'V', LDVS >= N. */
+
+/* RCONDE (output) REAL */
+/* If SENSE = 'E' or 'B', RCONDE contains the reciprocal */
+/* condition number for the average of the selected eigenvalues. */
+/* Not referenced if SENSE = 'N' or 'V'. */
+
+/* RCONDV (output) REAL */
+/* If SENSE = 'V' or 'B', RCONDV contains the reciprocal */
+/* condition number for the selected right invariant subspace. */
+/* Not referenced if SENSE = 'N' or 'E'. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,2*N). */
+/* Also, if SENSE = 'E' or 'V' or 'B', LWORK >= 2*SDIM*(N-SDIM), */
+/* where SDIM is the number of selected eigenvalues computed by */
+/* this routine. Note that 2*SDIM*(N-SDIM) <= N*N/2. Note also */
+/* that an error is only returned if LWORK < max(1,2*N), but if */
+/* SENSE = 'E' or 'V' or 'B' this may not be large enough. */
+/* For good performance, LWORK must generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates upper bound on the optimal size of the */
+/* array WORK, returns this value as the first entry of the WORK */
+/* array, and no error message related to LWORK is issued by */
+/* XERBLA. */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* BWORK (workspace) LOGICAL array, dimension (N) */
+/* Not referenced if SORT = 'N'. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, and i is */
+/* <= N: the QR algorithm failed to compute all the */
+/* eigenvalues; elements 1:ILO-1 and i+1:N of W */
+/* contain those eigenvalues which have converged; if */
+/* JOBVS = 'V', VS contains the transformation which */
+/* reduces A to its partially converged Schur form. */
+/* = N+1: the eigenvalues could not be reordered because some */
+/* eigenvalues were too close to separate (the problem */
+/* is very ill-conditioned); */
+/* = N+2: after reordering, roundoff changed values of some */
+/* complex eigenvalues so that leading eigenvalues in */
+/* the Schur form no longer satisfy SELECT=.TRUE. This */
+/* could also be caused by underflow due to scaling. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ vs_dim1 = *ldvs;
+ vs_offset = 1 + vs_dim1;
+ vs -= vs_offset;
+ --work;
+ --rwork;
+ --bwork;
+
+ /* Function Body */
+ *info = 0;
+ wantvs = lsame_(jobvs, "V");
+ wantst = lsame_(sort, "S");
+ wantsn = lsame_(sense, "N");
+ wantse = lsame_(sense, "E");
+ wantsv = lsame_(sense, "V");
+ wantsb = lsame_(sense, "B");
+ if (! wantvs && ! lsame_(jobvs, "N")) {
+ *info = -1;
+ } else if (! wantst && ! lsame_(sort, "N")) {
+ *info = -2;
+ } else if (! (wantsn || wantse || wantsv || wantsb) || ! wantst && !
+ wantsn) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldvs < 1 || wantvs && *ldvs < *n) {
+ *info = -11;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of real workspace needed at that point in the */
+/* code, as well as the preferred amount for good performance. */
+/* CWorkspace refers to complex workspace, and RWorkspace to real */
+/* workspace. NB refers to the optimal block size for the */
+/* immediately following subroutine, as returned by ILAENV. */
+/* HSWORK refers to the workspace preferred by CHSEQR, as */
+/* calculated below. HSWORK is computed assuming ILO=1 and IHI=N, */
+/* the worst case. */
+/* If SENSE = 'E', 'V' or 'B', then the amount of workspace needed */
+/* depends on SDIM, which is computed by the routine CTRSEN later */
+/* in the code.) */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ minwrk = 1;
+ lwrk = 1;
+ } else {
+ maxwrk = *n + *n * ilaenv_(&c__1, "CGEHRD", " ", n, &c__1, n, &
+ c__0);
+ minwrk = *n << 1;
+
+ chseqr_("S", jobvs, n, &c__1, n, &a[a_offset], lda, &w[1], &vs[
+ vs_offset], ldvs, &work[1], &c_n1, &ieval);
+ hswork = work[1].r;
+
+ if (! wantvs) {
+ maxwrk = max(maxwrk,hswork);
+ } else {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + (*n - 1) * ilaenv_(&c__1, "CUNGHR",
+ " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ maxwrk = max(maxwrk,hswork);
+ }
+ lwrk = maxwrk;
+ if (! wantsn) {
+/* Computing MAX */
+ i__1 = lwrk, i__2 = *n * *n / 2;
+ lwrk = max(i__1,i__2);
+ }
+ }
+ work[1].r = (real) lwrk, work[1].i = 0.f;
+
+ if (*lwork < minwrk) {
+ *info = -15;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEESX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *sdim = 0;
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = clange_("M", n, n, &a[a_offset], lda, dum);
+ scalea = FALSE_;
+ if (anrm > 0.f && anrm < smlnum) {
+ scalea = TRUE_;
+ cscale = smlnum;
+ } else if (anrm > bignum) {
+ scalea = TRUE_;
+ cscale = bignum;
+ }
+ if (scalea) {
+ clascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+
+/* Permute the matrix to make it more nearly triangular */
+/* (CWorkspace: none) */
+/* (RWorkspace: need N) */
+
+ ibal = 1;
+ cgebal_("P", n, &a[a_offset], lda, &ilo, &ihi, &rwork[ibal], &ierr);
+
+/* Reduce to upper Hessenberg form */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: none) */
+
+ itau = 1;
+ iwrk = *n + itau;
+ i__1 = *lwork - iwrk + 1;
+ cgehrd_(n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1,
+ &ierr);
+
+ if (wantvs) {
+
+/* Copy Householder vectors to VS */
+
+ clacpy_("L", n, n, &a[a_offset], lda, &vs[vs_offset], ldvs)
+ ;
+
+/* Generate unitary matrix in VS */
+/* (CWorkspace: need 2*N-1, prefer N+(N-1)*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwrk + 1;
+ cunghr_(n, &ilo, &ihi, &vs[vs_offset], ldvs, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+ }
+
+ *sdim = 0;
+
+/* Perform QR iteration, accumulating Schur vectors in VS if desired */
+/* (CWorkspace: need 1, prefer HSWORK (see comments) ) */
+/* (RWorkspace: none) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ chseqr_("S", jobvs, n, &ilo, &ihi, &a[a_offset], lda, &w[1], &vs[
+ vs_offset], ldvs, &work[iwrk], &i__1, &ieval);
+ if (ieval > 0) {
+ *info = ieval;
+ }
+
+/* Sort eigenvalues if desired */
+
+ if (wantst && *info == 0) {
+ if (scalea) {
+ clascl_("G", &c__0, &c__0, &cscale, &anrm, n, &c__1, &w[1], n, &
+ ierr);
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ bwork[i__] = (*select)(&w[i__]);
+/* L10: */
+ }
+
+/* Reorder eigenvalues, transform Schur vectors, and compute */
+/* reciprocal condition numbers */
+/* (CWorkspace: if SENSE is not 'N', need 2*SDIM*(N-SDIM) */
+/* otherwise, need none ) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwrk + 1;
+ ctrsen_(sense, jobvs, &bwork[1], n, &a[a_offset], lda, &vs[vs_offset],
+ ldvs, &w[1], sdim, rconde, rcondv, &work[iwrk], &i__1, &
+ icond);
+ if (! wantsn) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*sdim << 1) * (*n - *sdim);
+ maxwrk = max(i__1,i__2);
+ }
+ if (icond == -14) {
+
+/* Not enough complex workspace */
+
+ *info = -15;
+ }
+ }
+
+ if (wantvs) {
+
+/* Undo balancing */
+/* (CWorkspace: none) */
+/* (RWorkspace: need N) */
+
+ cgebak_("P", "R", n, &ilo, &ihi, &rwork[ibal], n, &vs[vs_offset],
+ ldvs, &ierr);
+ }
+
+ if (scalea) {
+
+/* Undo scaling for the Schur form of A */
+
+ clascl_("U", &c__0, &c__0, &cscale, &anrm, n, n, &a[a_offset], lda, &
+ ierr);
+ i__1 = *lda + 1;
+ ccopy_(n, &a[a_offset], &i__1, &w[1], &c__1);
+ if ((wantsv || wantsb) && *info == 0) {
+ dum[0] = *rcondv;
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, &c__1, &c__1, dum, &
+ c__1, &ierr);
+ *rcondv = dum[0];
+ }
+ }
+
+ work[1].r = (real) maxwrk, work[1].i = 0.f;
+ return 0;
+
+/* End of CGEESX */
+
+} /* cgeesx_ */
diff --git a/contrib/libs/clapack/cgeev.c b/contrib/libs/clapack/cgeev.c
new file mode 100644
index 0000000000..81a7b98aef
--- /dev/null
+++ b/contrib/libs/clapack/cgeev.c
@@ -0,0 +1,529 @@
+/* cgeev.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+
+/* Subroutine */ int cgeev_(char *jobvl, char *jobvr, integer *n, complex *a,
+ integer *lda, complex *w, complex *vl, integer *ldvl, complex *vr,
+ integer *ldvr, complex *work, integer *lwork, real *rwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2, i__3;
+ real r__1, r__2;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_imag(complex *);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, k, ihi;
+ real scl;
+ integer ilo;
+ real dum[1], eps;
+ complex tmp;
+ integer ibal;
+ char side[1];
+ real anrm;
+ integer ierr, itau, iwrk, nout;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern doublereal scnrm2_(integer *, complex *, integer *);
+ extern /* Subroutine */ int cgebak_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, complex *, integer *, integer *), cgebal_(char *, integer *, complex *, integer *,
+ integer *, integer *, real *, integer *), slabad_(real *,
+ real *);
+ logical scalea;
+ extern doublereal clange_(char *, integer *, integer *, complex *,
+ integer *, real *);
+ real cscale;
+ extern /* Subroutine */ int cgehrd_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, integer *),
+ clascl_(char *, integer *, integer *, real *, real *, integer *,
+ integer *, complex *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *), clacpy_(char *, integer *, integer *, complex *, integer *,
+ complex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ logical select[1];
+ real bignum;
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int chseqr_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *), ctrevc_(char *,
+ char *, logical *, integer *, complex *, integer *, complex *,
+ integer *, complex *, integer *, integer *, integer *, complex *,
+ real *, integer *), cunghr_(integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ integer *);
+ integer minwrk, maxwrk;
+ logical wantvl;
+ real smlnum;
+ integer hswork, irwork;
+ logical lquery, wantvr;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGEEV computes for an N-by-N complex nonsymmetric matrix A, the */
+/* eigenvalues and, optionally, the left and/or right eigenvectors. */
+
+/* The right eigenvector v(j) of A satisfies */
+/* A * v(j) = lambda(j) * v(j) */
+/* where lambda(j) is its eigenvalue. */
+/* The left eigenvector u(j) of A satisfies */
+/* u(j)**H * A = lambda(j) * u(j)**H */
+/* where u(j)**H denotes the conjugate transpose of u(j). */
+
+/* The computed eigenvectors are normalized to have Euclidean norm */
+/* equal to 1 and largest component real. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVL (input) CHARACTER*1 */
+/* = 'N': left eigenvectors of A are not computed; */
+/* = 'V': left eigenvectors of are computed. */
+
+/* JOBVR (input) CHARACTER*1 */
+/* = 'N': right eigenvectors of A are not computed; */
+/* = 'V': right eigenvectors of A are computed. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the N-by-N matrix A. */
+/* On exit, A has been overwritten. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* W (output) COMPLEX array, dimension (N) */
+/* W contains the computed eigenvalues. */
+
+/* VL (output) COMPLEX array, dimension (LDVL,N) */
+/* If JOBVL = 'V', the left eigenvectors u(j) are stored one */
+/* after another in the columns of VL, in the same order */
+/* as their eigenvalues. */
+/* If JOBVL = 'N', VL is not referenced. */
+/* u(j) = VL(:,j), the j-th column of VL. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the array VL. LDVL >= 1; if */
+/* JOBVL = 'V', LDVL >= N. */
+
+/* VR (output) COMPLEX array, dimension (LDVR,N) */
+/* If JOBVR = 'V', the right eigenvectors v(j) are stored one */
+/* after another in the columns of VR, in the same order */
+/* as their eigenvalues. */
+/* If JOBVR = 'N', VR is not referenced. */
+/* v(j) = VR(:,j), the j-th column of VR. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. LDVR >= 1; if */
+/* JOBVR = 'V', LDVR >= N. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,2*N). */
+/* For good performance, LWORK must generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) REAL array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, the QR algorithm failed to compute all the */
+/* eigenvalues, and no eigenvectors have been computed; */
+/* elements and i+1:N of W contain eigenvalues which have */
+/* converged. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ wantvl = lsame_(jobvl, "V");
+ wantvr = lsame_(jobvr, "V");
+ if (! wantvl && ! lsame_(jobvl, "N")) {
+ *info = -1;
+ } else if (! wantvr && ! lsame_(jobvr, "N")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldvl < 1 || wantvl && *ldvl < *n) {
+ *info = -8;
+ } else if (*ldvr < 1 || wantvr && *ldvr < *n) {
+ *info = -10;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* CWorkspace refers to complex workspace, and RWorkspace to real */
+/* workspace. NB refers to the optimal block size for the */
+/* immediately following subroutine, as returned by ILAENV. */
+/* HSWORK refers to the workspace preferred by CHSEQR, as */
+/* calculated below. HSWORK is computed assuming ILO=1 and IHI=N, */
+/* the worst case.) */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ } else {
+ maxwrk = *n + *n * ilaenv_(&c__1, "CGEHRD", " ", n, &c__1, n, &
+ c__0);
+ minwrk = *n << 1;
+ if (wantvl) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + (*n - 1) * ilaenv_(&c__1, "CUNGHR",
+ " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ chseqr_("S", "V", n, &c__1, n, &a[a_offset], lda, &w[1], &vl[
+ vl_offset], ldvl, &work[1], &c_n1, info);
+ } else if (wantvr) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + (*n - 1) * ilaenv_(&c__1, "CUNGHR",
+ " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ chseqr_("S", "V", n, &c__1, n, &a[a_offset], lda, &w[1], &vr[
+ vr_offset], ldvr, &work[1], &c_n1, info);
+ } else {
+ chseqr_("E", "N", n, &c__1, n, &a[a_offset], lda, &w[1], &vr[
+ vr_offset], ldvr, &work[1], &c_n1, info);
+ }
+ hswork = work[1].r;
+/* Computing MAX */
+ i__1 = max(maxwrk,hswork);
+ maxwrk = max(i__1,minwrk);
+ }
+ work[1].r = (real) maxwrk, work[1].i = 0.f;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEEV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = clange_("M", n, n, &a[a_offset], lda, dum);
+ scalea = FALSE_;
+ if (anrm > 0.f && anrm < smlnum) {
+ scalea = TRUE_;
+ cscale = smlnum;
+ } else if (anrm > bignum) {
+ scalea = TRUE_;
+ cscale = bignum;
+ }
+ if (scalea) {
+ clascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Balance the matrix */
+/* (CWorkspace: none) */
+/* (RWorkspace: need N) */
+
+ ibal = 1;
+ cgebal_("B", n, &a[a_offset], lda, &ilo, &ihi, &rwork[ibal], &ierr);
+
+/* Reduce to upper Hessenberg form */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: none) */
+
+ itau = 1;
+ iwrk = itau + *n;
+ i__1 = *lwork - iwrk + 1;
+ cgehrd_(n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1,
+ &ierr);
+
+ if (wantvl) {
+
+/* Want left eigenvectors */
+/* Copy Householder vectors to VL */
+
+ *(unsigned char *)side = 'L';
+ clacpy_("L", n, n, &a[a_offset], lda, &vl[vl_offset], ldvl)
+ ;
+
+/* Generate unitary matrix in VL */
+/* (CWorkspace: need 2*N-1, prefer N+(N-1)*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwrk + 1;
+ cunghr_(n, &ilo, &ihi, &vl[vl_offset], ldvl, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+
+/* Perform QR iteration, accumulating Schur vectors in VL */
+/* (CWorkspace: need 1, prefer HSWORK (see comments) ) */
+/* (RWorkspace: none) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ chseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &w[1], &vl[
+ vl_offset], ldvl, &work[iwrk], &i__1, info);
+
+ if (wantvr) {
+
+/* Want left and right eigenvectors */
+/* Copy Schur vectors to VR */
+
+ *(unsigned char *)side = 'B';
+ clacpy_("F", n, n, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr);
+ }
+
+ } else if (wantvr) {
+
+/* Want right eigenvectors */
+/* Copy Householder vectors to VR */
+
+ *(unsigned char *)side = 'R';
+ clacpy_("L", n, n, &a[a_offset], lda, &vr[vr_offset], ldvr)
+ ;
+
+/* Generate unitary matrix in VR */
+/* (CWorkspace: need 2*N-1, prefer N+(N-1)*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwrk + 1;
+ cunghr_(n, &ilo, &ihi, &vr[vr_offset], ldvr, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+
+/* Perform QR iteration, accumulating Schur vectors in VR */
+/* (CWorkspace: need 1, prefer HSWORK (see comments) ) */
+/* (RWorkspace: none) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ chseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &w[1], &vr[
+ vr_offset], ldvr, &work[iwrk], &i__1, info);
+
+ } else {
+
+/* Compute eigenvalues only */
+/* (CWorkspace: need 1, prefer HSWORK (see comments) ) */
+/* (RWorkspace: none) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ chseqr_("E", "N", n, &ilo, &ihi, &a[a_offset], lda, &w[1], &vr[
+ vr_offset], ldvr, &work[iwrk], &i__1, info);
+ }
+
+/* If INFO > 0 from CHSEQR, then quit */
+
+ if (*info > 0) {
+ goto L50;
+ }
+
+ if (wantvl || wantvr) {
+
+/* Compute left and/or right eigenvectors */
+/* (CWorkspace: need 2*N) */
+/* (RWorkspace: need 2*N) */
+
+ irwork = ibal + *n;
+ ctrevc_(side, "B", select, n, &a[a_offset], lda, &vl[vl_offset], ldvl,
+ &vr[vr_offset], ldvr, n, &nout, &work[iwrk], &rwork[irwork],
+ &ierr);
+ }
+
+ if (wantvl) {
+
+/* Undo balancing of left eigenvectors */
+/* (CWorkspace: none) */
+/* (RWorkspace: need N) */
+
+ cgebak_("B", "L", n, &ilo, &ihi, &rwork[ibal], n, &vl[vl_offset],
+ ldvl, &ierr);
+
+/* Normalize left eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ scl = 1.f / scnrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1);
+ csscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + i__ * vl_dim1;
+/* Computing 2nd power */
+ r__1 = vl[i__3].r;
+/* Computing 2nd power */
+ r__2 = r_imag(&vl[k + i__ * vl_dim1]);
+ rwork[irwork + k - 1] = r__1 * r__1 + r__2 * r__2;
+/* L10: */
+ }
+ k = isamax_(n, &rwork[irwork], &c__1);
+ r_cnjg(&q__2, &vl[k + i__ * vl_dim1]);
+ r__1 = sqrt(rwork[irwork + k - 1]);
+ q__1.r = q__2.r / r__1, q__1.i = q__2.i / r__1;
+ tmp.r = q__1.r, tmp.i = q__1.i;
+ cscal_(n, &tmp, &vl[i__ * vl_dim1 + 1], &c__1);
+ i__2 = k + i__ * vl_dim1;
+ i__3 = k + i__ * vl_dim1;
+ r__1 = vl[i__3].r;
+ q__1.r = r__1, q__1.i = 0.f;
+ vl[i__2].r = q__1.r, vl[i__2].i = q__1.i;
+/* L20: */
+ }
+ }
+
+ if (wantvr) {
+
+/* Undo balancing of right eigenvectors */
+/* (CWorkspace: none) */
+/* (RWorkspace: need N) */
+
+ cgebak_("B", "R", n, &ilo, &ihi, &rwork[ibal], n, &vr[vr_offset],
+ ldvr, &ierr);
+
+/* Normalize right eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ scl = 1.f / scnrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1);
+ csscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + i__ * vr_dim1;
+/* Computing 2nd power */
+ r__1 = vr[i__3].r;
+/* Computing 2nd power */
+ r__2 = r_imag(&vr[k + i__ * vr_dim1]);
+ rwork[irwork + k - 1] = r__1 * r__1 + r__2 * r__2;
+/* L30: */
+ }
+ k = isamax_(n, &rwork[irwork], &c__1);
+ r_cnjg(&q__2, &vr[k + i__ * vr_dim1]);
+ r__1 = sqrt(rwork[irwork + k - 1]);
+ q__1.r = q__2.r / r__1, q__1.i = q__2.i / r__1;
+ tmp.r = q__1.r, tmp.i = q__1.i;
+ cscal_(n, &tmp, &vr[i__ * vr_dim1 + 1], &c__1);
+ i__2 = k + i__ * vr_dim1;
+ i__3 = k + i__ * vr_dim1;
+ r__1 = vr[i__3].r;
+ q__1.r = r__1, q__1.i = 0.f;
+ vr[i__2].r = q__1.r, vr[i__2].i = q__1.i;
+/* L40: */
+ }
+ }
+
+/* Undo scaling if necessary */
+
+L50:
+ if (scalea) {
+ i__1 = *n - *info;
+/* Computing MAX */
+ i__3 = *n - *info;
+ i__2 = max(i__3,1);
+ clascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &w[*info + 1]
+, &i__2, &ierr);
+ if (*info > 0) {
+ i__1 = ilo - 1;
+ clascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &w[1], n,
+ &ierr);
+ }
+ }
+
+ work[1].r = (real) maxwrk, work[1].i = 0.f;
+ return 0;
+
+/* End of CGEEV */
+
+} /* cgeev_ */
diff --git a/contrib/libs/clapack/cgeevx.c b/contrib/libs/clapack/cgeevx.c
new file mode 100644
index 0000000000..589c88dc7f
--- /dev/null
+++ b/contrib/libs/clapack/cgeevx.c
@@ -0,0 +1,680 @@
+/* cgeevx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+
+/* Subroutine */ int cgeevx_(char *balanc, char *jobvl, char *jobvr, char *
+ sense, integer *n, complex *a, integer *lda, complex *w, complex *vl,
+ integer *ldvl, complex *vr, integer *ldvr, integer *ilo, integer *ihi,
+ real *scale, real *abnrm, real *rconde, real *rcondv, complex *work,
+ integer *lwork, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2, i__3;
+ real r__1, r__2;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_imag(complex *);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, k;
+ char job[1];
+ real scl, dum[1], eps;
+ complex tmp;
+ char side[1];
+ real anrm;
+ integer ierr, itau, iwrk, nout;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *);
+ integer icond;
+ extern logical lsame_(char *, char *);
+ extern doublereal scnrm2_(integer *, complex *, integer *);
+ extern /* Subroutine */ int cgebak_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, complex *, integer *, integer *), cgebal_(char *, integer *, complex *, integer *,
+ integer *, integer *, real *, integer *), slabad_(real *,
+ real *);
+ logical scalea;
+ extern doublereal clange_(char *, integer *, integer *, complex *,
+ integer *, real *);
+ real cscale;
+ extern /* Subroutine */ int cgehrd_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, integer *),
+ clascl_(char *, integer *, integer *, real *, real *, integer *,
+ integer *, complex *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *), clacpy_(char *, integer *, integer *, complex *, integer *,
+ complex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ logical select[1];
+ real bignum;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int chseqr_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *), ctrevc_(char *,
+ char *, logical *, integer *, complex *, integer *, complex *,
+ integer *, complex *, integer *, integer *, integer *, complex *,
+ real *, integer *), cunghr_(integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ integer *), ctrsna_(char *, char *, logical *, integer *, complex
+ *, integer *, complex *, integer *, complex *, integer *, real *,
+ real *, integer *, integer *, complex *, integer *, real *,
+ integer *);
+ integer minwrk, maxwrk;
+ logical wantvl, wntsnb;
+ integer hswork;
+ logical wntsne;
+ real smlnum;
+ logical lquery, wantvr, wntsnn, wntsnv;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGEEVX computes for an N-by-N complex nonsymmetric matrix A, the */
+/* eigenvalues and, optionally, the left and/or right eigenvectors. */
+
+/* Optionally also, it computes a balancing transformation to improve */
+/* the conditioning of the eigenvalues and eigenvectors (ILO, IHI, */
+/* SCALE, and ABNRM), reciprocal condition numbers for the eigenvalues */
+/* (RCONDE), and reciprocal condition numbers for the right */
+/* eigenvectors (RCONDV). */
+
+/* The right eigenvector v(j) of A satisfies */
+/* A * v(j) = lambda(j) * v(j) */
+/* where lambda(j) is its eigenvalue. */
+/* The left eigenvector u(j) of A satisfies */
+/* u(j)**H * A = lambda(j) * u(j)**H */
+/* where u(j)**H denotes the conjugate transpose of u(j). */
+
+/* The computed eigenvectors are normalized to have Euclidean norm */
+/* equal to 1 and largest component real. */
+
+/* Balancing a matrix means permuting the rows and columns to make it */
+/* more nearly upper triangular, and applying a diagonal similarity */
+/* transformation D * A * D**(-1), where D is a diagonal matrix, to */
+/* make its rows and columns closer in norm and the condition numbers */
+/* of its eigenvalues and eigenvectors smaller. The computed */
+/* reciprocal condition numbers correspond to the balanced matrix. */
+/* Permuting rows and columns will not change the condition numbers */
+/* (in exact arithmetic) but diagonal scaling will. For further */
+/* explanation of balancing, see section 4.10.2 of the LAPACK */
+/* Users' Guide. */
+
+/* Arguments */
+/* ========= */
+
+/* BALANC (input) CHARACTER*1 */
+/* Indicates how the input matrix should be diagonally scaled */
+/* and/or permuted to improve the conditioning of its */
+/* eigenvalues. */
+/* = 'N': Do not diagonally scale or permute; */
+/* = 'P': Perform permutations to make the matrix more nearly */
+/* upper triangular. Do not diagonally scale; */
+/* = 'S': Diagonally scale the matrix, ie. replace A by */
+/* D*A*D**(-1), where D is a diagonal matrix chosen */
+/* to make the rows and columns of A more equal in */
+/* norm. Do not permute; */
+/* = 'B': Both diagonally scale and permute A. */
+
+/* Computed reciprocal condition numbers will be for the matrix */
+/* after balancing and/or permuting. Permuting does not change */
+/* condition numbers (in exact arithmetic), but balancing does. */
+
+/* JOBVL (input) CHARACTER*1 */
+/* = 'N': left eigenvectors of A are not computed; */
+/* = 'V': left eigenvectors of A are computed. */
+/* If SENSE = 'E' or 'B', JOBVL must = 'V'. */
+
+/* JOBVR (input) CHARACTER*1 */
+/* = 'N': right eigenvectors of A are not computed; */
+/* = 'V': right eigenvectors of A are computed. */
+/* If SENSE = 'E' or 'B', JOBVR must = 'V'. */
+
+/* SENSE (input) CHARACTER*1 */
+/* Determines which reciprocal condition numbers are computed. */
+/* = 'N': None are computed; */
+/* = 'E': Computed for eigenvalues only; */
+/* = 'V': Computed for right eigenvectors only; */
+/* = 'B': Computed for eigenvalues and right eigenvectors. */
+
+/* If SENSE = 'E' or 'B', both left and right eigenvectors */
+/* must also be computed (JOBVL = 'V' and JOBVR = 'V'). */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the N-by-N matrix A. */
+/* On exit, A has been overwritten. If JOBVL = 'V' or */
+/* JOBVR = 'V', A contains the Schur form of the balanced */
+/* version of the matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* W (output) COMPLEX array, dimension (N) */
+/* W contains the computed eigenvalues. */
+
+/* VL (output) COMPLEX array, dimension (LDVL,N) */
+/* If JOBVL = 'V', the left eigenvectors u(j) are stored one */
+/* after another in the columns of VL, in the same order */
+/* as their eigenvalues. */
+/* If JOBVL = 'N', VL is not referenced. */
+/* u(j) = VL(:,j), the j-th column of VL. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the array VL. LDVL >= 1; if */
+/* JOBVL = 'V', LDVL >= N. */
+
+/* VR (output) COMPLEX array, dimension (LDVR,N) */
+/* If JOBVR = 'V', the right eigenvectors v(j) are stored one */
+/* after another in the columns of VR, in the same order */
+/* as their eigenvalues. */
+/* If JOBVR = 'N', VR is not referenced. */
+/* v(j) = VR(:,j), the j-th column of VR. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. LDVR >= 1; if */
+/* JOBVR = 'V', LDVR >= N. */
+
+/* ILO (output) INTEGER */
+/* IHI (output) INTEGER */
+/* ILO and IHI are integer values determined when A was */
+/* balanced. The balanced A(i,j) = 0 if I > J and */
+/* J = 1,...,ILO-1 or I = IHI+1,...,N. */
+
+/* SCALE (output) REAL array, dimension (N) */
+/* Details of the permutations and scaling factors applied */
+/* when balancing A. If P(j) is the index of the row and column */
+/* interchanged with row and column j, and D(j) is the scaling */
+/* factor applied to row and column j, then */
+/* SCALE(J) = P(J), for J = 1,...,ILO-1 */
+/* = D(J), for J = ILO,...,IHI */
+/* = P(J) for J = IHI+1,...,N. */
+/* The order in which the interchanges are made is N to IHI+1, */
+/* then 1 to ILO-1. */
+
+/* ABNRM (output) REAL */
+/* The one-norm of the balanced matrix (the maximum */
+/* of the sum of absolute values of elements of any column). */
+
+/* RCONDE (output) REAL array, dimension (N) */
+/* RCONDE(j) is the reciprocal condition number of the j-th */
+/* eigenvalue. */
+
+/* RCONDV (output) REAL array, dimension (N) */
+/* RCONDV(j) is the reciprocal condition number of the j-th */
+/* right eigenvector. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. If SENSE = 'N' or 'E', */
+/* LWORK >= max(1,2*N), and if SENSE = 'V' or 'B', */
+/* LWORK >= N*N+2*N. */
+/* For good performance, LWORK must generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) REAL array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, the QR algorithm failed to compute all the */
+/* eigenvalues, and no eigenvectors or condition numbers */
+/* have been computed; elements 1:ILO-1 and i+1:N of W */
+/* contain eigenvalues which have converged. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --scale;
+ --rconde;
+ --rcondv;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ wantvl = lsame_(jobvl, "V");
+ wantvr = lsame_(jobvr, "V");
+ wntsnn = lsame_(sense, "N");
+ wntsne = lsame_(sense, "E");
+ wntsnv = lsame_(sense, "V");
+ wntsnb = lsame_(sense, "B");
+ if (! (lsame_(balanc, "N") || lsame_(balanc, "S") || lsame_(balanc, "P")
+ || lsame_(balanc, "B"))) {
+ *info = -1;
+ } else if (! wantvl && ! lsame_(jobvl, "N")) {
+ *info = -2;
+ } else if (! wantvr && ! lsame_(jobvr, "N")) {
+ *info = -3;
+ } else if (! (wntsnn || wntsne || wntsnb || wntsnv) || (wntsne || wntsnb)
+ && ! (wantvl && wantvr)) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldvl < 1 || wantvl && *ldvl < *n) {
+ *info = -10;
+ } else if (*ldvr < 1 || wantvr && *ldvr < *n) {
+ *info = -12;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* CWorkspace refers to complex workspace, and RWorkspace to real */
+/* workspace. NB refers to the optimal block size for the */
+/* immediately following subroutine, as returned by ILAENV. */
+/* HSWORK refers to the workspace preferred by CHSEQR, as */
+/* calculated below. HSWORK is computed assuming ILO=1 and IHI=N, */
+/* the worst case.) */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ } else {
+ maxwrk = *n + *n * ilaenv_(&c__1, "CGEHRD", " ", n, &c__1, n, &
+ c__0);
+
+ if (wantvl) {
+ chseqr_("S", "V", n, &c__1, n, &a[a_offset], lda, &w[1], &vl[
+ vl_offset], ldvl, &work[1], &c_n1, info);
+ } else if (wantvr) {
+ chseqr_("S", "V", n, &c__1, n, &a[a_offset], lda, &w[1], &vr[
+ vr_offset], ldvr, &work[1], &c_n1, info);
+ } else {
+ if (wntsnn) {
+ chseqr_("E", "N", n, &c__1, n, &a[a_offset], lda, &w[1], &
+ vr[vr_offset], ldvr, &work[1], &c_n1, info);
+ } else {
+ chseqr_("S", "N", n, &c__1, n, &a[a_offset], lda, &w[1], &
+ vr[vr_offset], ldvr, &work[1], &c_n1, info);
+ }
+ }
+ hswork = work[1].r;
+
+ if (! wantvl && ! wantvr) {
+ minwrk = *n << 1;
+ if (! (wntsnn || wntsne)) {
+/* Computing MAX */
+ i__1 = minwrk, i__2 = *n * *n + (*n << 1);
+ minwrk = max(i__1,i__2);
+ }
+ maxwrk = max(maxwrk,hswork);
+ if (! (wntsnn || wntsne)) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * *n + (*n << 1);
+ maxwrk = max(i__1,i__2);
+ }
+ } else {
+ minwrk = *n << 1;
+ if (! (wntsnn || wntsne)) {
+/* Computing MAX */
+ i__1 = minwrk, i__2 = *n * *n + (*n << 1);
+ minwrk = max(i__1,i__2);
+ }
+ maxwrk = max(maxwrk,hswork);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + (*n - 1) * ilaenv_(&c__1, "CUNGHR",
+ " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ if (! (wntsnn || wntsne)) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * *n + (*n << 1);
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n << 1;
+ maxwrk = max(i__1,i__2);
+ }
+ maxwrk = max(maxwrk,minwrk);
+ }
+ work[1].r = (real) maxwrk, work[1].i = 0.f;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -20;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEEVX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ icond = 0;
+ anrm = clange_("M", n, n, &a[a_offset], lda, dum);
+ scalea = FALSE_;
+ if (anrm > 0.f && anrm < smlnum) {
+ scalea = TRUE_;
+ cscale = smlnum;
+ } else if (anrm > bignum) {
+ scalea = TRUE_;
+ cscale = bignum;
+ }
+ if (scalea) {
+ clascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Balance the matrix and compute ABNRM */
+
+ cgebal_(balanc, n, &a[a_offset], lda, ilo, ihi, &scale[1], &ierr);
+ *abnrm = clange_("1", n, n, &a[a_offset], lda, dum);
+ if (scalea) {
+ dum[0] = *abnrm;
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, &c__1, &c__1, dum, &c__1, &
+ ierr);
+ *abnrm = dum[0];
+ }
+
+/* Reduce to upper Hessenberg form */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: none) */
+
+ itau = 1;
+ iwrk = itau + *n;
+ i__1 = *lwork - iwrk + 1;
+ cgehrd_(n, ilo, ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1, &
+ ierr);
+
+ if (wantvl) {
+
+/* Want left eigenvectors */
+/* Copy Householder vectors to VL */
+
+ *(unsigned char *)side = 'L';
+ clacpy_("L", n, n, &a[a_offset], lda, &vl[vl_offset], ldvl)
+ ;
+
+/* Generate unitary matrix in VL */
+/* (CWorkspace: need 2*N-1, prefer N+(N-1)*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwrk + 1;
+ cunghr_(n, ilo, ihi, &vl[vl_offset], ldvl, &work[itau], &work[iwrk], &
+ i__1, &ierr);
+
+/* Perform QR iteration, accumulating Schur vectors in VL */
+/* (CWorkspace: need 1, prefer HSWORK (see comments) ) */
+/* (RWorkspace: none) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ chseqr_("S", "V", n, ilo, ihi, &a[a_offset], lda, &w[1], &vl[
+ vl_offset], ldvl, &work[iwrk], &i__1, info);
+
+ if (wantvr) {
+
+/* Want left and right eigenvectors */
+/* Copy Schur vectors to VR */
+
+ *(unsigned char *)side = 'B';
+ clacpy_("F", n, n, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr);
+ }
+
+ } else if (wantvr) {
+
+/* Want right eigenvectors */
+/* Copy Householder vectors to VR */
+
+ *(unsigned char *)side = 'R';
+ clacpy_("L", n, n, &a[a_offset], lda, &vr[vr_offset], ldvr)
+ ;
+
+/* Generate unitary matrix in VR */
+/* (CWorkspace: need 2*N-1, prefer N+(N-1)*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwrk + 1;
+ cunghr_(n, ilo, ihi, &vr[vr_offset], ldvr, &work[itau], &work[iwrk], &
+ i__1, &ierr);
+
+/* Perform QR iteration, accumulating Schur vectors in VR */
+/* (CWorkspace: need 1, prefer HSWORK (see comments) ) */
+/* (RWorkspace: none) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ chseqr_("S", "V", n, ilo, ihi, &a[a_offset], lda, &w[1], &vr[
+ vr_offset], ldvr, &work[iwrk], &i__1, info);
+
+ } else {
+
+/* Compute eigenvalues only */
+/* If condition numbers desired, compute Schur form */
+
+ if (wntsnn) {
+ *(unsigned char *)job = 'E';
+ } else {
+ *(unsigned char *)job = 'S';
+ }
+
+/* (CWorkspace: need 1, prefer HSWORK (see comments) ) */
+/* (RWorkspace: none) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ chseqr_(job, "N", n, ilo, ihi, &a[a_offset], lda, &w[1], &vr[
+ vr_offset], ldvr, &work[iwrk], &i__1, info);
+ }
+
+/* If INFO > 0 from CHSEQR, then quit */
+
+ if (*info > 0) {
+ goto L50;
+ }
+
+ if (wantvl || wantvr) {
+
+/* Compute left and/or right eigenvectors */
+/* (CWorkspace: need 2*N) */
+/* (RWorkspace: need N) */
+
+ ctrevc_(side, "B", select, n, &a[a_offset], lda, &vl[vl_offset], ldvl,
+ &vr[vr_offset], ldvr, n, &nout, &work[iwrk], &rwork[1], &
+ ierr);
+ }
+
+/* Compute condition numbers if desired */
+/* (CWorkspace: need N*N+2*N unless SENSE = 'E') */
+/* (RWorkspace: need 2*N unless SENSE = 'E') */
+
+ if (! wntsnn) {
+ ctrsna_(sense, "A", select, n, &a[a_offset], lda, &vl[vl_offset],
+ ldvl, &vr[vr_offset], ldvr, &rconde[1], &rcondv[1], n, &nout,
+ &work[iwrk], n, &rwork[1], &icond);
+ }
+
+ if (wantvl) {
+
+/* Undo balancing of left eigenvectors */
+
+ cgebak_(balanc, "L", n, ilo, ihi, &scale[1], n, &vl[vl_offset], ldvl,
+ &ierr);
+
+/* Normalize left eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ scl = 1.f / scnrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1);
+ csscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + i__ * vl_dim1;
+/* Computing 2nd power */
+ r__1 = vl[i__3].r;
+/* Computing 2nd power */
+ r__2 = r_imag(&vl[k + i__ * vl_dim1]);
+ rwork[k] = r__1 * r__1 + r__2 * r__2;
+/* L10: */
+ }
+ k = isamax_(n, &rwork[1], &c__1);
+ r_cnjg(&q__2, &vl[k + i__ * vl_dim1]);
+ r__1 = sqrt(rwork[k]);
+ q__1.r = q__2.r / r__1, q__1.i = q__2.i / r__1;
+ tmp.r = q__1.r, tmp.i = q__1.i;
+ cscal_(n, &tmp, &vl[i__ * vl_dim1 + 1], &c__1);
+ i__2 = k + i__ * vl_dim1;
+ i__3 = k + i__ * vl_dim1;
+ r__1 = vl[i__3].r;
+ q__1.r = r__1, q__1.i = 0.f;
+ vl[i__2].r = q__1.r, vl[i__2].i = q__1.i;
+/* L20: */
+ }
+ }
+
+ if (wantvr) {
+
+/* Undo balancing of right eigenvectors */
+
+ cgebak_(balanc, "R", n, ilo, ihi, &scale[1], n, &vr[vr_offset], ldvr,
+ &ierr);
+
+/* Normalize right eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ scl = 1.f / scnrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1);
+ csscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + i__ * vr_dim1;
+/* Computing 2nd power */
+ r__1 = vr[i__3].r;
+/* Computing 2nd power */
+ r__2 = r_imag(&vr[k + i__ * vr_dim1]);
+ rwork[k] = r__1 * r__1 + r__2 * r__2;
+/* L30: */
+ }
+ k = isamax_(n, &rwork[1], &c__1);
+ r_cnjg(&q__2, &vr[k + i__ * vr_dim1]);
+ r__1 = sqrt(rwork[k]);
+ q__1.r = q__2.r / r__1, q__1.i = q__2.i / r__1;
+ tmp.r = q__1.r, tmp.i = q__1.i;
+ cscal_(n, &tmp, &vr[i__ * vr_dim1 + 1], &c__1);
+ i__2 = k + i__ * vr_dim1;
+ i__3 = k + i__ * vr_dim1;
+ r__1 = vr[i__3].r;
+ q__1.r = r__1, q__1.i = 0.f;
+ vr[i__2].r = q__1.r, vr[i__2].i = q__1.i;
+/* L40: */
+ }
+ }
+
+/* Undo scaling if necessary */
+
+L50:
+ if (scalea) {
+ i__1 = *n - *info;
+/* Computing MAX */
+ i__3 = *n - *info;
+ i__2 = max(i__3,1);
+ clascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &w[*info + 1]
+, &i__2, &ierr);
+ if (*info == 0) {
+ if ((wntsnv || wntsnb) && icond == 0) {
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, n, &c__1, &rcondv[
+ 1], n, &ierr);
+ }
+ } else {
+ i__1 = *ilo - 1;
+ clascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &w[1], n,
+ &ierr);
+ }
+ }
+
+ work[1].r = (real) maxwrk, work[1].i = 0.f;
+ return 0;
+
+/* End of CGEEVX */
+
+} /* cgeevx_ */
diff --git a/contrib/libs/clapack/cgegs.c b/contrib/libs/clapack/cgegs.c
new file mode 100644
index 0000000000..f4d2a8e8db
--- /dev/null
+++ b/contrib/libs/clapack/cgegs.c
@@ -0,0 +1,536 @@
+/* cgegs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int cgegs_(char *jobvsl, char *jobvsr, integer *n, complex *
+ a, integer *lda, complex *b, integer *ldb, complex *alpha, complex *
+ beta, complex *vsl, integer *ldvsl, complex *vsr, integer *ldvsr,
+ complex *work, integer *lwork, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vsl_dim1, vsl_offset,
+ vsr_dim1, vsr_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer nb, nb1, nb2, nb3, ihi, ilo;
+ real eps, anrm, bnrm;
+ integer itau, lopt;
+ extern logical lsame_(char *, char *);
+ integer ileft, iinfo, icols;
+ logical ilvsl;
+ integer iwork;
+ logical ilvsr;
+ integer irows;
+ extern /* Subroutine */ int cggbak_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, complex *, integer *,
+ integer *), cggbal_(char *, integer *, complex *,
+ integer *, complex *, integer *, integer *, integer *, real *,
+ real *, real *, integer *);
+ extern doublereal clange_(char *, integer *, integer *, complex *,
+ integer *, real *);
+ extern /* Subroutine */ int cgghrd_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *, complex *, integer *, integer *),
+ clascl_(char *, integer *, integer *, real *, real *, integer *,
+ integer *, complex *, integer *, integer *);
+ logical ilascl, ilbscl;
+ extern /* Subroutine */ int cgeqrf_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), claset_(char *,
+ integer *, integer *, complex *, complex *, complex *, integer *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ real bignum;
+ extern /* Subroutine */ int chgeqz_(char *, char *, char *, integer *,
+ integer *, integer *, complex *, integer *, complex *, integer *,
+ complex *, complex *, complex *, integer *, complex *, integer *,
+ complex *, integer *, real *, integer *);
+ integer ijobvl, iright, ijobvr;
+ real anrmto;
+ integer lwkmin;
+ real bnrmto;
+ extern /* Subroutine */ int cungqr_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, integer *),
+ cunmqr_(char *, char *, integer *, integer *, integer *, complex
+ *, integer *, complex *, complex *, integer *, complex *, integer
+ *, integer *);
+ real smlnum;
+ integer irwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is deprecated and has been replaced by routine CGGES. */
+
+/* CGEGS computes the eigenvalues, Schur form, and, optionally, the */
+/* left and or/right Schur vectors of a complex matrix pair (A,B). */
+/* Given two square matrices A and B, the generalized Schur */
+/* factorization has the form */
+
+/* A = Q*S*Z**H, B = Q*T*Z**H */
+
+/* where Q and Z are unitary matrices and S and T are upper triangular. */
+/* The columns of Q are the left Schur vectors */
+/* and the columns of Z are the right Schur vectors. */
+
+/* If only the eigenvalues of (A,B) are needed, the driver routine */
+/* CGEGV should be used instead. See CGEGV for a description of the */
+/* eigenvalues of the generalized nonsymmetric eigenvalue problem */
+/* (GNEP). */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVSL (input) CHARACTER*1 */
+/* = 'N': do not compute the left Schur vectors; */
+/* = 'V': compute the left Schur vectors (returned in VSL). */
+
+/* JOBVSR (input) CHARACTER*1 */
+/* = 'N': do not compute the right Schur vectors; */
+/* = 'V': compute the right Schur vectors (returned in VSR). */
+
+/* N (input) INTEGER */
+/* The order of the matrices A, B, VSL, and VSR. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA, N) */
+/* On entry, the matrix A. */
+/* On exit, the upper triangular matrix S from the generalized */
+/* Schur factorization. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX array, dimension (LDB, N) */
+/* On entry, the matrix B. */
+/* On exit, the upper triangular matrix T from the generalized */
+/* Schur factorization. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB >= max(1,N). */
+
+/* ALPHA (output) COMPLEX array, dimension (N) */
+/* The complex scalars alpha that define the eigenvalues of */
+/* GNEP. ALPHA(j) = S(j,j), the diagonal element of the Schur */
+/* form of A. */
+
+/* BETA (output) COMPLEX array, dimension (N) */
+/* The non-negative real scalars beta that define the */
+/* eigenvalues of GNEP. BETA(j) = T(j,j), the diagonal element */
+/* of the triangular factor T. */
+
+/* Together, the quantities alpha = ALPHA(j) and beta = BETA(j) */
+/* represent the j-th eigenvalue of the matrix pair (A,B), in */
+/* one of the forms lambda = alpha/beta or mu = beta/alpha. */
+/* Since either lambda or mu may overflow, they should not, */
+/* in general, be computed. */
+
+/* VSL (output) COMPLEX array, dimension (LDVSL,N) */
+/* If JOBVSL = 'V', the matrix of left Schur vectors Q. */
+/* Not referenced if JOBVSL = 'N'. */
+
+/* LDVSL (input) INTEGER */
+/* The leading dimension of the matrix VSL. LDVSL >= 1, and */
+/* if JOBVSL = 'V', LDVSL >= N. */
+
+/* VSR (output) COMPLEX array, dimension (LDVSR,N) */
+/* If JOBVSR = 'V', the matrix of right Schur vectors Z. */
+/* Not referenced if JOBVSR = 'N'. */
+
+/* LDVSR (input) INTEGER */
+/* The leading dimension of the matrix VSR. LDVSR >= 1, and */
+/* if JOBVSR = 'V', LDVSR >= N. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,2*N). */
+/* For good performance, LWORK must generally be larger. */
+/* To compute the optimal value of LWORK, call ILAENV to get */
+/* blocksizes (for CGEQRF, CUNMQR, and CUNGQR.) Then compute: */
+/* NB -- MAX of the blocksizes for CGEQRF, CUNMQR, and CUNGQR; */
+/* the optimal LWORK is N*(NB+1). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) REAL array, dimension (3*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* =1,...,N: */
+/* The QZ iteration failed. (A,B) are not in Schur */
+/* form, but ALPHA(j) and BETA(j) should be correct for */
+/* j=INFO+1,...,N. */
+/* > N: errors that usually indicate LAPACK problems: */
+/* =N+1: error return from CGGBAL */
+/* =N+2: error return from CGEQRF */
+/* =N+3: error return from CUNMQR */
+/* =N+4: error return from CUNGQR */
+/* =N+5: error return from CGGHRD */
+/* =N+6: error return from CHGEQZ (other than failed */
+/* iteration) */
+/* =N+7: error return from CGGBAK (computing VSL) */
+/* =N+8: error return from CGGBAK (computing VSR) */
+/* =N+9: error return from CLASCL (various places) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alpha;
+ --beta;
+ vsl_dim1 = *ldvsl;
+ vsl_offset = 1 + vsl_dim1;
+ vsl -= vsl_offset;
+ vsr_dim1 = *ldvsr;
+ vsr_offset = 1 + vsr_dim1;
+ vsr -= vsr_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ if (lsame_(jobvsl, "N")) {
+ ijobvl = 1;
+ ilvsl = FALSE_;
+ } else if (lsame_(jobvsl, "V")) {
+ ijobvl = 2;
+ ilvsl = TRUE_;
+ } else {
+ ijobvl = -1;
+ ilvsl = FALSE_;
+ }
+
+ if (lsame_(jobvsr, "N")) {
+ ijobvr = 1;
+ ilvsr = FALSE_;
+ } else if (lsame_(jobvsr, "V")) {
+ ijobvr = 2;
+ ilvsr = TRUE_;
+ } else {
+ ijobvr = -1;
+ ilvsr = FALSE_;
+ }
+
+/* Test the input arguments */
+
+/* Computing MAX */
+ i__1 = *n << 1;
+ lwkmin = max(i__1,1);
+ lwkopt = lwkmin;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ lquery = *lwork == -1;
+ *info = 0;
+ if (ijobvl <= 0) {
+ *info = -1;
+ } else if (ijobvr <= 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ } else if (*ldvsl < 1 || ilvsl && *ldvsl < *n) {
+ *info = -11;
+ } else if (*ldvsr < 1 || ilvsr && *ldvsr < *n) {
+ *info = -13;
+ } else if (*lwork < lwkmin && ! lquery) {
+ *info = -15;
+ }
+
+ if (*info == 0) {
+ nb1 = ilaenv_(&c__1, "CGEQRF", " ", n, n, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "CUNMQR", " ", n, n, n, &c_n1);
+ nb3 = ilaenv_(&c__1, "CUNGQR", " ", n, n, n, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2);
+ nb = max(i__1,nb3);
+ lopt = *n * (nb + 1);
+ work[1].r = (real) lopt, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEGS ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("E") * slamch_("B");
+ safmin = slamch_("S");
+ smlnum = *n * safmin / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = clange_("M", n, n, &a[a_offset], lda, &rwork[1]);
+ ilascl = FALSE_;
+ if (anrm > 0.f && anrm < smlnum) {
+ anrmto = smlnum;
+ ilascl = TRUE_;
+ } else if (anrm > bignum) {
+ anrmto = bignum;
+ ilascl = TRUE_;
+ }
+
+ if (ilascl) {
+ clascl_("G", &c_n1, &c_n1, &anrm, &anrmto, n, n, &a[a_offset], lda, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ }
+
+/* Scale B if max element outside range [SMLNUM,BIGNUM] */
+
+ bnrm = clange_("M", n, n, &b[b_offset], ldb, &rwork[1]);
+ ilbscl = FALSE_;
+ if (bnrm > 0.f && bnrm < smlnum) {
+ bnrmto = smlnum;
+ ilbscl = TRUE_;
+ } else if (bnrm > bignum) {
+ bnrmto = bignum;
+ ilbscl = TRUE_;
+ }
+
+ if (ilbscl) {
+ clascl_("G", &c_n1, &c_n1, &bnrm, &bnrmto, n, n, &b[b_offset], ldb, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ }
+
+/* Permute the matrix to make it more nearly triangular */
+
+ ileft = 1;
+ iright = *n + 1;
+ irwork = iright + *n;
+ iwork = 1;
+ cggbal_("P", n, &a[a_offset], lda, &b[b_offset], ldb, &ilo, &ihi, &rwork[
+ ileft], &rwork[iright], &rwork[irwork], &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 1;
+ goto L10;
+ }
+
+/* Reduce B to triangular form, and initialize VSL and/or VSR */
+
+ irows = ihi + 1 - ilo;
+ icols = *n + 1 - ilo;
+ itau = iwork;
+ iwork = itau + irows;
+ i__1 = *lwork + 1 - iwork;
+ cgeqrf_(&irows, &icols, &b[ilo + ilo * b_dim1], ldb, &work[itau], &work[
+ iwork], &i__1, &iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__3 = iwork;
+ i__1 = lwkopt, i__2 = (integer) work[i__3].r + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ *info = *n + 2;
+ goto L10;
+ }
+
+ i__1 = *lwork + 1 - iwork;
+ cunmqr_("L", "C", &irows, &icols, &irows, &b[ilo + ilo * b_dim1], ldb, &
+ work[itau], &a[ilo + ilo * a_dim1], lda, &work[iwork], &i__1, &
+ iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__3 = iwork;
+ i__1 = lwkopt, i__2 = (integer) work[i__3].r + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ *info = *n + 3;
+ goto L10;
+ }
+
+ if (ilvsl) {
+ claset_("Full", n, n, &c_b1, &c_b2, &vsl[vsl_offset], ldvsl);
+ i__1 = irows - 1;
+ i__2 = irows - 1;
+ clacpy_("L", &i__1, &i__2, &b[ilo + 1 + ilo * b_dim1], ldb, &vsl[ilo
+ + 1 + ilo * vsl_dim1], ldvsl);
+ i__1 = *lwork + 1 - iwork;
+ cungqr_(&irows, &irows, &irows, &vsl[ilo + ilo * vsl_dim1], ldvsl, &
+ work[itau], &work[iwork], &i__1, &iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__3 = iwork;
+ i__1 = lwkopt, i__2 = (integer) work[i__3].r + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ *info = *n + 4;
+ goto L10;
+ }
+ }
+
+ if (ilvsr) {
+ claset_("Full", n, n, &c_b1, &c_b2, &vsr[vsr_offset], ldvsr);
+ }
+
+/* Reduce to generalized Hessenberg form */
+
+ cgghrd_(jobvsl, jobvsr, n, &ilo, &ihi, &a[a_offset], lda, &b[b_offset],
+ ldb, &vsl[vsl_offset], ldvsl, &vsr[vsr_offset], ldvsr, &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 5;
+ goto L10;
+ }
+
+/* Perform QZ algorithm, computing Schur vectors if desired */
+
+ iwork = itau;
+ i__1 = *lwork + 1 - iwork;
+ chgeqz_("S", jobvsl, jobvsr, n, &ilo, &ihi, &a[a_offset], lda, &b[
+ b_offset], ldb, &alpha[1], &beta[1], &vsl[vsl_offset], ldvsl, &
+ vsr[vsr_offset], ldvsr, &work[iwork], &i__1, &rwork[irwork], &
+ iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__3 = iwork;
+ i__1 = lwkopt, i__2 = (integer) work[i__3].r + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ if (iinfo > 0 && iinfo <= *n) {
+ *info = iinfo;
+ } else if (iinfo > *n && iinfo <= *n << 1) {
+ *info = iinfo - *n;
+ } else {
+ *info = *n + 6;
+ }
+ goto L10;
+ }
+
+/* Apply permutation to VSL and VSR */
+
+ if (ilvsl) {
+ cggbak_("P", "L", n, &ilo, &ihi, &rwork[ileft], &rwork[iright], n, &
+ vsl[vsl_offset], ldvsl, &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 7;
+ goto L10;
+ }
+ }
+ if (ilvsr) {
+ cggbak_("P", "R", n, &ilo, &ihi, &rwork[ileft], &rwork[iright], n, &
+ vsr[vsr_offset], ldvsr, &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 8;
+ goto L10;
+ }
+ }
+
+/* Undo scaling */
+
+ if (ilascl) {
+ clascl_("U", &c_n1, &c_n1, &anrmto, &anrm, n, n, &a[a_offset], lda, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ clascl_("G", &c_n1, &c_n1, &anrmto, &anrm, n, &c__1, &alpha[1], n, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ }
+
+ if (ilbscl) {
+ clascl_("U", &c_n1, &c_n1, &bnrmto, &bnrm, n, n, &b[b_offset], ldb, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ clascl_("G", &c_n1, &c_n1, &bnrmto, &bnrm, n, &c__1, &beta[1], n, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ }
+
+L10:
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CGEGS */
+
+} /* cgegs_ */
diff --git a/contrib/libs/clapack/cgegv.c b/contrib/libs/clapack/cgegv.c
new file mode 100644
index 0000000000..4841b04904
--- /dev/null
+++ b/contrib/libs/clapack/cgegv.c
@@ -0,0 +1,779 @@
+/* cgegv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static real c_b29 = 1.f;
+
+/* Subroutine */ int cgegv_(char *jobvl, char *jobvr, integer *n, complex *a,
+ integer *lda, complex *b, integer *ldb, complex *alpha, complex *beta,
+ complex *vl, integer *ldvl, complex *vr, integer *ldvr, complex *
+ work, integer *lwork, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vl_dim1, vl_offset, vr_dim1,
+ vr_offset, i__1, i__2, i__3, i__4;
+ real r__1, r__2, r__3, r__4;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer jc, nb, in, jr, nb1, nb2, nb3, ihi, ilo;
+ real eps;
+ logical ilv;
+ real absb, anrm, bnrm;
+ integer itau;
+ real temp;
+ logical ilvl, ilvr;
+ integer lopt;
+ real anrm1, anrm2, bnrm1, bnrm2, absai, scale, absar, sbeta;
+ extern logical lsame_(char *, char *);
+ integer ileft, iinfo, icols, iwork, irows;
+ extern /* Subroutine */ int cggbak_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, complex *, integer *,
+ integer *), cggbal_(char *, integer *, complex *,
+ integer *, complex *, integer *, integer *, integer *, real *,
+ real *, real *, integer *);
+ extern doublereal clange_(char *, integer *, integer *, complex *,
+ integer *, real *);
+ extern /* Subroutine */ int cgghrd_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *, complex *, integer *, integer *);
+ real salfai;
+ extern /* Subroutine */ int clascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, complex *, integer *, integer *), cgeqrf_(integer *, integer *, complex *, integer *,
+ complex *, complex *, integer *, integer *);
+ real salfar;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), claset_(char *,
+ integer *, integer *, complex *, complex *, complex *, integer *);
+ real safmin;
+ extern /* Subroutine */ int ctgevc_(char *, char *, logical *, integer *,
+ complex *, integer *, complex *, integer *, complex *, integer *,
+ complex *, integer *, integer *, integer *, complex *, real *,
+ integer *);
+ real safmax;
+ char chtemp[1];
+ logical ldumma[1];
+ extern /* Subroutine */ int chgeqz_(char *, char *, char *, integer *,
+ integer *, integer *, complex *, integer *, complex *, integer *,
+ complex *, complex *, complex *, integer *, complex *, integer *,
+ complex *, integer *, real *, integer *),
+ xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ijobvl, iright;
+ logical ilimit;
+ integer ijobvr;
+ extern /* Subroutine */ int cungqr_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, integer *);
+ integer lwkmin;
+ extern /* Subroutine */ int cunmqr_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *);
+ integer irwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is deprecated and has been replaced by routine CGGEV. */
+
+/* CGEGV computes the eigenvalues and, optionally, the left and/or right */
+/* eigenvectors of a complex matrix pair (A,B). */
+/* Given two square matrices A and B, */
+/* the generalized nonsymmetric eigenvalue problem (GNEP) is to find the */
+/* eigenvalues lambda and corresponding (non-zero) eigenvectors x such */
+/* that */
+/* A*x = lambda*B*x. */
+
+/* An alternate form is to find the eigenvalues mu and corresponding */
+/* eigenvectors y such that */
+/* mu*A*y = B*y. */
+
+/* These two forms are equivalent with mu = 1/lambda and x = y if */
+/* neither lambda nor mu is zero. In order to deal with the case that */
+/* lambda or mu is zero or small, two values alpha and beta are returned */
+/* for each eigenvalue, such that lambda = alpha/beta and */
+/* mu = beta/alpha. */
+
+/* The vectors x and y in the above equations are right eigenvectors of */
+/* the matrix pair (A,B). Vectors u and v satisfying */
+/* u**H*A = lambda*u**H*B or mu*v**H*A = v**H*B */
+/* are left eigenvectors of (A,B). */
+
+/* Note: this routine performs "full balancing" on A and B -- see */
+/* "Further Details", below. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVL (input) CHARACTER*1 */
+/* = 'N': do not compute the left generalized eigenvectors; */
+/* = 'V': compute the left generalized eigenvectors (returned */
+/* in VL). */
+
+/* JOBVR (input) CHARACTER*1 */
+/* = 'N': do not compute the right generalized eigenvectors; */
+/* = 'V': compute the right generalized eigenvectors (returned */
+/* in VR). */
+
+/* N (input) INTEGER */
+/* The order of the matrices A, B, VL, and VR. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA, N) */
+/* On entry, the matrix A. */
+/* If JOBVL = 'V' or JOBVR = 'V', then on exit A */
+/* contains the Schur form of A from the generalized Schur */
+/* factorization of the pair (A,B) after balancing. If no */
+/* eigenvectors were computed, then only the diagonal elements */
+/* of the Schur form will be correct. See CGGHRD and CHGEQZ */
+/* for details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX array, dimension (LDB, N) */
+/* On entry, the matrix B. */
+/* If JOBVL = 'V' or JOBVR = 'V', then on exit B contains the */
+/* upper triangular matrix obtained from B in the generalized */
+/* Schur factorization of the pair (A,B) after balancing. */
+/* If no eigenvectors were computed, then only the diagonal */
+/* elements of B will be correct. See CGGHRD and CHGEQZ for */
+/* details. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB >= max(1,N). */
+
+/* ALPHA (output) COMPLEX array, dimension (N) */
+/* The complex scalars alpha that define the eigenvalues of */
+/* GNEP. */
+
+/* BETA (output) COMPLEX array, dimension (N) */
+/* The complex scalars beta that define the eigenvalues of GNEP. */
+
+/* Together, the quantities alpha = ALPHA(j) and beta = BETA(j) */
+/* represent the j-th eigenvalue of the matrix pair (A,B), in */
+/* one of the forms lambda = alpha/beta or mu = beta/alpha. */
+/* Since either lambda or mu may overflow, they should not, */
+/* in general, be computed. */
+
+/* VL (output) COMPLEX array, dimension (LDVL,N) */
+/* If JOBVL = 'V', the left eigenvectors u(j) are stored */
+/* in the columns of VL, in the same order as their eigenvalues. */
+/* Each eigenvector is scaled so that its largest component has */
+/* abs(real part) + abs(imag. part) = 1, except for eigenvectors */
+/* corresponding to an eigenvalue with alpha = beta = 0, which */
+/* are set to zero. */
+/* Not referenced if JOBVL = 'N'. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the matrix VL. LDVL >= 1, and */
+/* if JOBVL = 'V', LDVL >= N. */
+
+/* VR (output) COMPLEX array, dimension (LDVR,N) */
+/* If JOBVR = 'V', the right eigenvectors x(j) are stored */
+/* in the columns of VR, in the same order as their eigenvalues. */
+/* Each eigenvector is scaled so that its largest component has */
+/* abs(real part) + abs(imag. part) = 1, except for eigenvectors */
+/* corresponding to an eigenvalue with alpha = beta = 0, which */
+/* are set to zero. */
+/* Not referenced if JOBVR = 'N'. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the matrix VR. LDVR >= 1, and */
+/* if JOBVR = 'V', LDVR >= N. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,2*N). */
+/* For good performance, LWORK must generally be larger. */
+/* To compute the optimal value of LWORK, call ILAENV to get */
+/* blocksizes (for CGEQRF, CUNMQR, and CUNGQR.) Then compute: */
+/* NB -- MAX of the blocksizes for CGEQRF, CUNMQR, and CUNGQR; */
+/* The optimal LWORK is MAX( 2*N, N*(NB+1) ). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace/output) REAL array, dimension (8*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* =1,...,N: */
+/* The QZ iteration failed. No eigenvectors have been */
+/* calculated, but ALPHA(j) and BETA(j) should be */
+/* correct for j=INFO+1,...,N. */
+/* > N: errors that usually indicate LAPACK problems: */
+/* =N+1: error return from CGGBAL */
+/* =N+2: error return from CGEQRF */
+/* =N+3: error return from CUNMQR */
+/* =N+4: error return from CUNGQR */
+/* =N+5: error return from CGGHRD */
+/* =N+6: error return from CHGEQZ (other than failed */
+/* iteration) */
+/* =N+7: error return from CTGEVC */
+/* =N+8: error return from CGGBAK (computing VL) */
+/* =N+9: error return from CGGBAK (computing VR) */
+/* =N+10: error return from CLASCL (various calls) */
+
+/* Further Details */
+/* =============== */
+
+/* Balancing */
+/* --------- */
+
+/* This driver calls CGGBAL to both permute and scale rows and columns */
+/* of A and B. The permutations PL and PR are chosen so that PL*A*PR */
+/* and PL*B*R will be upper triangular except for the diagonal blocks */
+/* A(i:j,i:j) and B(i:j,i:j), with i and j as close together as */
+/* possible. The diagonal scaling matrices DL and DR are chosen so */
+/* that the pair DL*PL*A*PR*DR, DL*PL*B*PR*DR have elements close to */
+/* one (except for the elements that start out zero.) */
+
+/* After the eigenvalues and eigenvectors of the balanced matrices */
+/* have been computed, CGGBAK transforms the eigenvectors back to what */
+/* they would have been (in perfect arithmetic) if they had not been */
+/* balanced. */
+
+/* Contents of A and B on Exit */
+/* -------- -- - --- - -- ---- */
+
+/* If any eigenvectors are computed (either JOBVL='V' or JOBVR='V' or */
+/* both), then on exit the arrays A and B will contain the complex Schur */
+/* form[*] of the "balanced" versions of A and B. If no eigenvectors */
+/* are computed, then only the diagonal blocks will be correct. */
+
+/* [*] In other words, upper triangular form. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alpha;
+ --beta;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ if (lsame_(jobvl, "N")) {
+ ijobvl = 1;
+ ilvl = FALSE_;
+ } else if (lsame_(jobvl, "V")) {
+ ijobvl = 2;
+ ilvl = TRUE_;
+ } else {
+ ijobvl = -1;
+ ilvl = FALSE_;
+ }
+
+ if (lsame_(jobvr, "N")) {
+ ijobvr = 1;
+ ilvr = FALSE_;
+ } else if (lsame_(jobvr, "V")) {
+ ijobvr = 2;
+ ilvr = TRUE_;
+ } else {
+ ijobvr = -1;
+ ilvr = FALSE_;
+ }
+ ilv = ilvl || ilvr;
+
+/* Test the input arguments */
+
+/* Computing MAX */
+ i__1 = *n << 1;
+ lwkmin = max(i__1,1);
+ lwkopt = lwkmin;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ lquery = *lwork == -1;
+ *info = 0;
+ if (ijobvl <= 0) {
+ *info = -1;
+ } else if (ijobvr <= 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ } else if (*ldvl < 1 || ilvl && *ldvl < *n) {
+ *info = -11;
+ } else if (*ldvr < 1 || ilvr && *ldvr < *n) {
+ *info = -13;
+ } else if (*lwork < lwkmin && ! lquery) {
+ *info = -15;
+ }
+
+ if (*info == 0) {
+ nb1 = ilaenv_(&c__1, "CGEQRF", " ", n, n, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "CUNMQR", " ", n, n, n, &c_n1);
+ nb3 = ilaenv_(&c__1, "CUNGQR", " ", n, n, n, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2);
+ nb = max(i__1,nb3);
+/* Computing MAX */
+ i__1 = *n << 1, i__2 = *n * (nb + 1);
+ lopt = max(i__1,i__2);
+ work[1].r = (real) lopt, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEGV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("E") * slamch_("B");
+ safmin = slamch_("S");
+ safmin += safmin;
+ safmax = 1.f / safmin;
+
+/* Scale A */
+
+ anrm = clange_("M", n, n, &a[a_offset], lda, &rwork[1]);
+ anrm1 = anrm;
+ anrm2 = 1.f;
+ if (anrm < 1.f) {
+ if (safmax * anrm < 1.f) {
+ anrm1 = safmin;
+ anrm2 = safmax * anrm;
+ }
+ }
+
+ if (anrm > 0.f) {
+ clascl_("G", &c_n1, &c_n1, &anrm, &c_b29, n, n, &a[a_offset], lda, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 10;
+ return 0;
+ }
+ }
+
+/* Scale B */
+
+ bnrm = clange_("M", n, n, &b[b_offset], ldb, &rwork[1]);
+ bnrm1 = bnrm;
+ bnrm2 = 1.f;
+ if (bnrm < 1.f) {
+ if (safmax * bnrm < 1.f) {
+ bnrm1 = safmin;
+ bnrm2 = safmax * bnrm;
+ }
+ }
+
+ if (bnrm > 0.f) {
+ clascl_("G", &c_n1, &c_n1, &bnrm, &c_b29, n, n, &b[b_offset], ldb, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 10;
+ return 0;
+ }
+ }
+
+/* Permute the matrix to make it more nearly triangular */
+/* Also "balance" the matrix. */
+
+ ileft = 1;
+ iright = *n + 1;
+ irwork = iright + *n;
+ cggbal_("P", n, &a[a_offset], lda, &b[b_offset], ldb, &ilo, &ihi, &rwork[
+ ileft], &rwork[iright], &rwork[irwork], &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 1;
+ goto L80;
+ }
+
+/* Reduce B to triangular form, and initialize VL and/or VR */
+
+ irows = ihi + 1 - ilo;
+ if (ilv) {
+ icols = *n + 1 - ilo;
+ } else {
+ icols = irows;
+ }
+ itau = 1;
+ iwork = itau + irows;
+ i__1 = *lwork + 1 - iwork;
+ cgeqrf_(&irows, &icols, &b[ilo + ilo * b_dim1], ldb, &work[itau], &work[
+ iwork], &i__1, &iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__3 = iwork;
+ i__1 = lwkopt, i__2 = (integer) work[i__3].r + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ *info = *n + 2;
+ goto L80;
+ }
+
+ i__1 = *lwork + 1 - iwork;
+ cunmqr_("L", "C", &irows, &icols, &irows, &b[ilo + ilo * b_dim1], ldb, &
+ work[itau], &a[ilo + ilo * a_dim1], lda, &work[iwork], &i__1, &
+ iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__3 = iwork;
+ i__1 = lwkopt, i__2 = (integer) work[i__3].r + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ *info = *n + 3;
+ goto L80;
+ }
+
+ if (ilvl) {
+ claset_("Full", n, n, &c_b1, &c_b2, &vl[vl_offset], ldvl);
+ i__1 = irows - 1;
+ i__2 = irows - 1;
+ clacpy_("L", &i__1, &i__2, &b[ilo + 1 + ilo * b_dim1], ldb, &vl[ilo +
+ 1 + ilo * vl_dim1], ldvl);
+ i__1 = *lwork + 1 - iwork;
+ cungqr_(&irows, &irows, &irows, &vl[ilo + ilo * vl_dim1], ldvl, &work[
+ itau], &work[iwork], &i__1, &iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__3 = iwork;
+ i__1 = lwkopt, i__2 = (integer) work[i__3].r + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ *info = *n + 4;
+ goto L80;
+ }
+ }
+
+ if (ilvr) {
+ claset_("Full", n, n, &c_b1, &c_b2, &vr[vr_offset], ldvr);
+ }
+
+/* Reduce to generalized Hessenberg form */
+
+ if (ilv) {
+
+/* Eigenvectors requested -- work on whole matrix. */
+
+ cgghrd_(jobvl, jobvr, n, &ilo, &ihi, &a[a_offset], lda, &b[b_offset],
+ ldb, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, &iinfo);
+ } else {
+ cgghrd_("N", "N", &irows, &c__1, &irows, &a[ilo + ilo * a_dim1], lda,
+ &b[ilo + ilo * b_dim1], ldb, &vl[vl_offset], ldvl, &vr[
+ vr_offset], ldvr, &iinfo);
+ }
+ if (iinfo != 0) {
+ *info = *n + 5;
+ goto L80;
+ }
+
+/* Perform QZ algorithm */
+
+ iwork = itau;
+ if (ilv) {
+ *(unsigned char *)chtemp = 'S';
+ } else {
+ *(unsigned char *)chtemp = 'E';
+ }
+ i__1 = *lwork + 1 - iwork;
+ chgeqz_(chtemp, jobvl, jobvr, n, &ilo, &ihi, &a[a_offset], lda, &b[
+ b_offset], ldb, &alpha[1], &beta[1], &vl[vl_offset], ldvl, &vr[
+ vr_offset], ldvr, &work[iwork], &i__1, &rwork[irwork], &iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__3 = iwork;
+ i__1 = lwkopt, i__2 = (integer) work[i__3].r + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ if (iinfo > 0 && iinfo <= *n) {
+ *info = iinfo;
+ } else if (iinfo > *n && iinfo <= *n << 1) {
+ *info = iinfo - *n;
+ } else {
+ *info = *n + 6;
+ }
+ goto L80;
+ }
+
+ if (ilv) {
+
+/* Compute Eigenvectors */
+
+ if (ilvl) {
+ if (ilvr) {
+ *(unsigned char *)chtemp = 'B';
+ } else {
+ *(unsigned char *)chtemp = 'L';
+ }
+ } else {
+ *(unsigned char *)chtemp = 'R';
+ }
+
+ ctgevc_(chtemp, "B", ldumma, n, &a[a_offset], lda, &b[b_offset], ldb,
+ &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, n, &in, &work[
+ iwork], &rwork[irwork], &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 7;
+ goto L80;
+ }
+
+/* Undo balancing on VL and VR, rescale */
+
+ if (ilvl) {
+ cggbak_("P", "L", n, &ilo, &ihi, &rwork[ileft], &rwork[iright], n,
+ &vl[vl_offset], ldvl, &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 8;
+ goto L80;
+ }
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ temp = 0.f;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ i__3 = jr + jc * vl_dim1;
+ r__3 = temp, r__4 = (r__1 = vl[i__3].r, dabs(r__1)) + (
+ r__2 = r_imag(&vl[jr + jc * vl_dim1]), dabs(r__2))
+ ;
+ temp = dmax(r__3,r__4);
+/* L10: */
+ }
+ if (temp < safmin) {
+ goto L30;
+ }
+ temp = 1.f / temp;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ i__3 = jr + jc * vl_dim1;
+ i__4 = jr + jc * vl_dim1;
+ q__1.r = temp * vl[i__4].r, q__1.i = temp * vl[i__4].i;
+ vl[i__3].r = q__1.r, vl[i__3].i = q__1.i;
+/* L20: */
+ }
+L30:
+ ;
+ }
+ }
+ if (ilvr) {
+ cggbak_("P", "R", n, &ilo, &ihi, &rwork[ileft], &rwork[iright], n,
+ &vr[vr_offset], ldvr, &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ goto L80;
+ }
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ temp = 0.f;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ i__3 = jr + jc * vr_dim1;
+ r__3 = temp, r__4 = (r__1 = vr[i__3].r, dabs(r__1)) + (
+ r__2 = r_imag(&vr[jr + jc * vr_dim1]), dabs(r__2))
+ ;
+ temp = dmax(r__3,r__4);
+/* L40: */
+ }
+ if (temp < safmin) {
+ goto L60;
+ }
+ temp = 1.f / temp;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ i__3 = jr + jc * vr_dim1;
+ i__4 = jr + jc * vr_dim1;
+ q__1.r = temp * vr[i__4].r, q__1.i = temp * vr[i__4].i;
+ vr[i__3].r = q__1.r, vr[i__3].i = q__1.i;
+/* L50: */
+ }
+L60:
+ ;
+ }
+ }
+
+/* End of eigenvector calculation */
+
+ }
+
+/* Undo scaling in alpha, beta */
+
+/* Note: this does not give the alpha and beta for the unscaled */
+/* problem. */
+
+/* Un-scaling is limited to avoid underflow in alpha and beta */
+/* if they are significant. */
+
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ i__2 = jc;
+ absar = (r__1 = alpha[i__2].r, dabs(r__1));
+ absai = (r__1 = r_imag(&alpha[jc]), dabs(r__1));
+ i__2 = jc;
+ absb = (r__1 = beta[i__2].r, dabs(r__1));
+ i__2 = jc;
+ salfar = anrm * alpha[i__2].r;
+ salfai = anrm * r_imag(&alpha[jc]);
+ i__2 = jc;
+ sbeta = bnrm * beta[i__2].r;
+ ilimit = FALSE_;
+ scale = 1.f;
+
+/* Check for significant underflow in imaginary part of ALPHA */
+
+/* Computing MAX */
+ r__1 = safmin, r__2 = eps * absar, r__1 = max(r__1,r__2), r__2 = eps *
+ absb;
+ if (dabs(salfai) < safmin && absai >= dmax(r__1,r__2)) {
+ ilimit = TRUE_;
+/* Computing MAX */
+ r__1 = safmin, r__2 = anrm2 * absai;
+ scale = safmin / anrm1 / dmax(r__1,r__2);
+ }
+
+/* Check for significant underflow in real part of ALPHA */
+
+/* Computing MAX */
+ r__1 = safmin, r__2 = eps * absai, r__1 = max(r__1,r__2), r__2 = eps *
+ absb;
+ if (dabs(salfar) < safmin && absar >= dmax(r__1,r__2)) {
+ ilimit = TRUE_;
+/* Computing MAX */
+/* Computing MAX */
+ r__3 = safmin, r__4 = anrm2 * absar;
+ r__1 = scale, r__2 = safmin / anrm1 / dmax(r__3,r__4);
+ scale = dmax(r__1,r__2);
+ }
+
+/* Check for significant underflow in BETA */
+
+/* Computing MAX */
+ r__1 = safmin, r__2 = eps * absar, r__1 = max(r__1,r__2), r__2 = eps *
+ absai;
+ if (dabs(sbeta) < safmin && absb >= dmax(r__1,r__2)) {
+ ilimit = TRUE_;
+/* Computing MAX */
+/* Computing MAX */
+ r__3 = safmin, r__4 = bnrm2 * absb;
+ r__1 = scale, r__2 = safmin / bnrm1 / dmax(r__3,r__4);
+ scale = dmax(r__1,r__2);
+ }
+
+/* Check for possible overflow when limiting scaling */
+
+ if (ilimit) {
+/* Computing MAX */
+ r__1 = dabs(salfar), r__2 = dabs(salfai), r__1 = max(r__1,r__2),
+ r__2 = dabs(sbeta);
+ temp = scale * safmin * dmax(r__1,r__2);
+ if (temp > 1.f) {
+ scale /= temp;
+ }
+ if (scale < 1.f) {
+ ilimit = FALSE_;
+ }
+ }
+
+/* Recompute un-scaled ALPHA, BETA if necessary. */
+
+ if (ilimit) {
+ i__2 = jc;
+ salfar = scale * alpha[i__2].r * anrm;
+ salfai = scale * r_imag(&alpha[jc]) * anrm;
+ i__2 = jc;
+ q__2.r = scale * beta[i__2].r, q__2.i = scale * beta[i__2].i;
+ q__1.r = bnrm * q__2.r, q__1.i = bnrm * q__2.i;
+ sbeta = q__1.r;
+ }
+ i__2 = jc;
+ q__1.r = salfar, q__1.i = salfai;
+ alpha[i__2].r = q__1.r, alpha[i__2].i = q__1.i;
+ i__2 = jc;
+ beta[i__2].r = sbeta, beta[i__2].i = 0.f;
+/* L70: */
+ }
+
+L80:
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CGEGV */
+
+} /* cgegv_ */
diff --git a/contrib/libs/clapack/cgehd2.c b/contrib/libs/clapack/cgehd2.c
new file mode 100644
index 0000000000..8cbc13f01d
--- /dev/null
+++ b/contrib/libs/clapack/cgehd2.c
@@ -0,0 +1,198 @@
+/* cgehd2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cgehd2_(integer *n, integer *ilo, integer *ihi, complex *
+ a, integer *lda, complex *tau, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__;
+ complex alpha;
+ extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
+, integer *, complex *, complex *, integer *, complex *),
+ clarfg_(integer *, complex *, complex *, integer *, complex *),
+ xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGEHD2 reduces a complex general matrix A to upper Hessenberg form H */
+/* by a unitary similarity transformation: Q' * A * Q = H . */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* It is assumed that A is already upper triangular in rows */
+/* and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally */
+/* set by a previous call to CGEBAL; otherwise they should be */
+/* set to 1 and N respectively. See Further Details. */
+/* 1 <= ILO <= IHI <= max(1,N). */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the n by n general matrix to be reduced. */
+/* On exit, the upper triangle and the first subdiagonal of A */
+/* are overwritten with the upper Hessenberg matrix H, and the */
+/* elements below the first subdiagonal, with the array TAU, */
+/* represent the unitary matrix Q as a product of elementary */
+/* reflectors. See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAU (output) COMPLEX array, dimension (N-1) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace) COMPLEX array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of (ihi-ilo) elementary */
+/* reflectors */
+
+/* Q = H(ilo) H(ilo+1) . . . H(ihi-1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on */
+/* exit in A(i+2:ihi,i), and tau in TAU(i). */
+
+/* The contents of A are illustrated by the following example, with */
+/* n = 7, ilo = 2 and ihi = 6: */
+
+/* on entry, on exit, */
+
+/* ( a a a a a a a ) ( a a h h h h a ) */
+/* ( a a a a a a ) ( a h h h h a ) */
+/* ( a a a a a a ) ( h h h h h h ) */
+/* ( a a a a a a ) ( v2 h h h h h ) */
+/* ( a a a a a a ) ( v2 v3 h h h h ) */
+/* ( a a a a a a ) ( v2 v3 v4 h h h ) */
+/* ( a ) ( a ) */
+
+/* where a denotes an element of the original matrix A, h denotes a */
+/* modified element of the upper Hessenberg matrix H, and vi denotes an */
+/* element of the vector defining H(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -2;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEHD2", &i__1);
+ return 0;
+ }
+
+ i__1 = *ihi - 1;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+
+/* Compute elementary reflector H(i) to annihilate A(i+2:ihi,i) */
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *ihi - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ clarfg_(&i__2, &alpha, &a[min(i__3, *n)+ i__ * a_dim1], &c__1, &tau[
+ i__]);
+ i__2 = i__ + 1 + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Apply H(i) to A(1:ihi,i+1:ihi) from the right */
+
+ i__2 = *ihi - i__;
+ clarf_("Right", ihi, &i__2, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
+ i__], &a[(i__ + 1) * a_dim1 + 1], lda, &work[1]);
+
+/* Apply H(i)' to A(i+1:ihi,i+1:n) from the left */
+
+ i__2 = *ihi - i__;
+ i__3 = *n - i__;
+ r_cnjg(&q__1, &tau[i__]);
+ clarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &c__1, &q__1,
+ &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &work[1]);
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ a[i__2].r = alpha.r, a[i__2].i = alpha.i;
+/* L10: */
+ }
+
+ return 0;
+
+/* End of CGEHD2 */
+
+} /* cgehd2_ */
diff --git a/contrib/libs/clapack/cgehrd.c b/contrib/libs/clapack/cgehrd.c
new file mode 100644
index 0000000000..0cf4b2d584
--- /dev/null
+++ b/contrib/libs/clapack/cgehrd.c
@@ -0,0 +1,350 @@
+/* cgehrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+static integer c__65 = 65;
+
+/* Subroutine */ int cgehrd_(integer *n, integer *ilo, integer *ihi, complex *
+ a, integer *lda, complex *tau, complex *work, integer *lwork, integer
+ *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j;
+ complex t[4160] /* was [65][64] */;
+ integer ib;
+ complex ei;
+ integer nb, nh, nx, iws;
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *);
+ integer nbmin, iinfo;
+ extern /* Subroutine */ int ctrmm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *), caxpy_(integer *,
+ complex *, complex *, integer *, complex *, integer *), cgehd2_(
+ integer *, integer *, integer *, complex *, integer *, complex *,
+ complex *, integer *), clahr2_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, complex *,
+ integer *), clarfb_(char *, char *, char *, char *, integer *,
+ integer *, integer *, complex *, integer *, complex *, integer *,
+ complex *, integer *, complex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGEHRD reduces a complex general matrix A to upper Hessenberg form H by */
+/* an unitary similarity transformation: Q' * A * Q = H . */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* It is assumed that A is already upper triangular in rows */
+/* and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally */
+/* set by a previous call to CGEBAL; otherwise they should be */
+/* set to 1 and N respectively. See Further Details. */
+/* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the N-by-N general matrix to be reduced. */
+/* On exit, the upper triangle and the first subdiagonal of A */
+/* are overwritten with the upper Hessenberg matrix H, and the */
+/* elements below the first subdiagonal, with the array TAU, */
+/* represent the unitary matrix Q as a product of elementary */
+/* reflectors. See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAU (output) COMPLEX array, dimension (N-1) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). Elements 1:ILO-1 and IHI:N-1 of TAU are set to */
+/* zero. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (LWORK) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= max(1,N). */
+/* For optimum performance LWORK >= N*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of (ihi-ilo) elementary */
+/* reflectors */
+
+/* Q = H(ilo) H(ilo+1) . . . H(ihi-1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on */
+/* exit in A(i+2:ihi,i), and tau in TAU(i). */
+
+/* The contents of A are illustrated by the following example, with */
+/* n = 7, ilo = 2 and ihi = 6: */
+
+/* on entry, on exit, */
+
+/* ( a a a a a a a ) ( a a h h h h a ) */
+/* ( a a a a a a ) ( a h h h h a ) */
+/* ( a a a a a a ) ( h h h h h h ) */
+/* ( a a a a a a ) ( v2 h h h h h ) */
+/* ( a a a a a a ) ( v2 v3 h h h h ) */
+/* ( a a a a a a ) ( v2 v3 v4 h h h ) */
+/* ( a ) ( a ) */
+
+/* where a denotes an element of the original matrix A, h denotes a */
+/* modified element of the upper Hessenberg matrix H, and vi denotes an */
+/* element of the vector defining H(i). */
+
+/* This file is a slight modification of LAPACK-3.0's CGEHRD */
+/* subroutine incorporating improvements proposed by Quintana-Orti and */
+/* Van de Geijn (2005). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+/* Computing MIN */
+ i__1 = 64, i__2 = ilaenv_(&c__1, "CGEHRD", " ", n, ilo, ihi, &c_n1);
+ nb = min(i__1,i__2);
+ lwkopt = *n * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -2;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEHRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Set elements 1:ILO-1 and IHI:N-1 of TAU to zero */
+
+ i__1 = *ilo - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ tau[i__2].r = 0.f, tau[i__2].i = 0.f;
+/* L10: */
+ }
+ i__1 = *n - 1;
+ for (i__ = max(1,*ihi); i__ <= i__1; ++i__) {
+ i__2 = i__;
+ tau[i__2].r = 0.f, tau[i__2].i = 0.f;
+/* L20: */
+ }
+
+/* Quick return if possible */
+
+ nh = *ihi - *ilo + 1;
+ if (nh <= 1) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+/* Determine the block size */
+
+/* Computing MIN */
+ i__1 = 64, i__2 = ilaenv_(&c__1, "CGEHRD", " ", n, ilo, ihi, &c_n1);
+ nb = min(i__1,i__2);
+ nbmin = 2;
+ iws = 1;
+ if (nb > 1 && nb < nh) {
+
+/* Determine when to cross over from blocked to unblocked code */
+/* (last block is always handled by unblocked code) */
+
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__3, "CGEHRD", " ", n, ilo, ihi, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < nh) {
+
+/* Determine if workspace is large enough for blocked code */
+
+ iws = *n * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: determine the */
+/* minimum value of NB, and reduce NB or force use of */
+/* unblocked code */
+
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CGEHRD", " ", n, ilo, ihi, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ if (*lwork >= *n * nbmin) {
+ nb = *lwork / *n;
+ } else {
+ nb = 1;
+ }
+ }
+ }
+ }
+ ldwork = *n;
+
+ if (nb < nbmin || nb >= nh) {
+
+/* Use unblocked code below */
+
+ i__ = *ilo;
+
+ } else {
+
+/* Use blocked code */
+
+ i__1 = *ihi - 1 - nx;
+ i__2 = nb;
+ for (i__ = *ilo; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *ihi - i__;
+ ib = min(i__3,i__4);
+
+/* Reduce columns i:i+ib-1 to Hessenberg form, returning the */
+/* matrices V and T of the block reflector H = I - V*T*V' */
+/* which performs the reduction, and also the matrix Y = A*V*T */
+
+ clahr2_(ihi, &i__, &ib, &a[i__ * a_dim1 + 1], lda, &tau[i__], t, &
+ c__65, &work[1], &ldwork);
+
+/* Apply the block reflector H to A(1:ihi,i+ib:ihi) from the */
+/* right, computing A := A - Y * V'. V(i+ib,ib-1) must be set */
+/* to 1 */
+
+ i__3 = i__ + ib + (i__ + ib - 1) * a_dim1;
+ ei.r = a[i__3].r, ei.i = a[i__3].i;
+ i__3 = i__ + ib + (i__ + ib - 1) * a_dim1;
+ a[i__3].r = 1.f, a[i__3].i = 0.f;
+ i__3 = *ihi - i__ - ib + 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "Conjugate transpose", ihi, &i__3, &ib, &
+ q__1, &work[1], &ldwork, &a[i__ + ib + i__ * a_dim1], lda,
+ &c_b2, &a[(i__ + ib) * a_dim1 + 1], lda);
+ i__3 = i__ + ib + (i__ + ib - 1) * a_dim1;
+ a[i__3].r = ei.r, a[i__3].i = ei.i;
+
+/* Apply the block reflector H to A(1:i,i+1:i+ib-1) from the */
+/* right */
+
+ i__3 = ib - 1;
+ ctrmm_("Right", "Lower", "Conjugate transpose", "Unit", &i__, &
+ i__3, &c_b2, &a[i__ + 1 + i__ * a_dim1], lda, &work[1], &
+ ldwork);
+ i__3 = ib - 2;
+ for (j = 0; j <= i__3; ++j) {
+ q__1.r = -1.f, q__1.i = -0.f;
+ caxpy_(&i__, &q__1, &work[ldwork * j + 1], &c__1, &a[(i__ + j
+ + 1) * a_dim1 + 1], &c__1);
+/* L30: */
+ }
+
+/* Apply the block reflector H to A(i+1:ihi,i+ib:n) from the */
+/* left */
+
+ i__3 = *ihi - i__;
+ i__4 = *n - i__ - ib + 1;
+ clarfb_("Left", "Conjugate transpose", "Forward", "Columnwise", &
+ i__3, &i__4, &ib, &a[i__ + 1 + i__ * a_dim1], lda, t, &
+ c__65, &a[i__ + 1 + (i__ + ib) * a_dim1], lda, &work[1], &
+ ldwork);
+/* L40: */
+ }
+ }
+
+/* Use unblocked code to reduce the rest of the matrix */
+
+ cgehd2_(n, &i__, ihi, &a[a_offset], lda, &tau[1], &work[1], &iinfo);
+ work[1].r = (real) iws, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CGEHRD */
+
+} /* cgehrd_ */
diff --git a/contrib/libs/clapack/cgelq2.c b/contrib/libs/clapack/cgelq2.c
new file mode 100644
index 0000000000..04a23f8d5b
--- /dev/null
+++ b/contrib/libs/clapack/cgelq2.c
@@ -0,0 +1,165 @@
+/* cgelq2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cgelq2_(integer *m, integer *n, complex *a, integer *lda,
+ complex *tau, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, k;
+ complex alpha;
+ extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
+, integer *, complex *, complex *, integer *, complex *),
+ clacgv_(integer *, complex *, integer *), clarfp_(integer *,
+ complex *, complex *, integer *, complex *), xerbla_(char *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGELQ2 computes an LQ factorization of a complex m by n matrix A: */
+/* A = L * Q. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the m by n matrix A. */
+/* On exit, the elements on and below the diagonal of the array */
+/* contain the m by min(m,n) lower trapezoidal matrix L (L is */
+/* lower triangular if m <= n); the elements above the diagonal, */
+/* with the array TAU, represent the unitary matrix Q as a */
+/* product of elementary reflectors (see Further Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) COMPLEX array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace) COMPLEX array, dimension (M) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(k)' . . . H(2)' H(1)', where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i-1) = 0 and v(i) = 1; conjg(v(i+1:n)) is stored on exit in */
+/* A(i,i+1:n), and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGELQ2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ i__1 = k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i,i+1:n) */
+
+ i__2 = *n - i__ + 1;
+ clacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+ i__2 = i__ + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ clarfp_(&i__2, &alpha, &a[i__ + min(i__3, *n)* a_dim1], lda, &tau[i__]
+);
+ if (i__ < *m) {
+
+/* Apply H(i) to A(i+1:m,i:n) from the right */
+
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+ clarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[
+ i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
+ }
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = alpha.r, a[i__2].i = alpha.i;
+ i__2 = *n - i__ + 1;
+ clacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+/* L10: */
+ }
+ return 0;
+
+/* End of CGELQ2 */
+
+} /* cgelq2_ */
diff --git a/contrib/libs/clapack/cgelqf.c b/contrib/libs/clapack/cgelqf.c
new file mode 100644
index 0000000000..9c62f96db6
--- /dev/null
+++ b/contrib/libs/clapack/cgelqf.c
@@ -0,0 +1,252 @@
+/* cgelqf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int cgelqf_(integer *m, integer *n, complex *a, integer *lda,
+ complex *tau, complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, k, ib, nb, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int cgelq2_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *), clarfb_(char *, char
+ *, char *, char *, integer *, integer *, integer *, complex *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *), clarft_(char *, char *
+, integer *, integer *, complex *, integer *, complex *, complex *
+, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGELQF computes an LQ factorization of a complex M-by-N matrix A: */
+/* A = L * Q. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the elements on and below the diagonal of the array */
+/* contain the m-by-min(m,n) lower trapezoidal matrix L (L is */
+/* lower triangular if m <= n); the elements above the diagonal, */
+/* with the array TAU, represent the unitary matrix Q as a */
+/* product of elementary reflectors (see Further Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) COMPLEX array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,M). */
+/* For optimum performance LWORK >= M*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(k)' . . . H(2)' H(1)', where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i-1) = 0 and v(i) = 1; conjg(v(i+1:n)) is stored on exit in */
+/* A(i,i+1:n), and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "CGELQF", " ", m, n, &c_n1, &c_n1);
+ lwkopt = *m * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else if (*lwork < max(1,*m) && ! lquery) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGELQF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ k = min(*m,*n);
+ if (k == 0) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *m;
+ if (nb > 1 && nb < k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "CGELQF", " ", m, n, &c_n1, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CGELQF", " ", m, n, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < k && nx < k) {
+
+/* Use blocked code initially */
+
+ i__1 = k - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/* Compute the LQ factorization of the current block */
+/* A(i:i+ib-1,i:n) */
+
+ i__3 = *n - i__ + 1;
+ cgelq2_(&ib, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[
+ 1], &iinfo);
+ if (i__ + ib <= *m) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i) H(i+1) . . . H(i+ib-1) */
+
+ i__3 = *n - i__ + 1;
+ clarft_("Forward", "Rowwise", &i__3, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(i+ib:m,i:n) from the right */
+
+ i__3 = *m - i__ - ib + 1;
+ i__4 = *n - i__ + 1;
+ clarfb_("Right", "No transpose", "Forward", "Rowwise", &i__3,
+ &i__4, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], &
+ ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[ib +
+ 1], &ldwork);
+ }
+/* L10: */
+ }
+ } else {
+ i__ = 1;
+ }
+
+/* Use unblocked code to factor the last or only block. */
+
+ if (i__ <= k) {
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ cgelq2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1]
+, &iinfo);
+ }
+
+ work[1].r = (real) iws, work[1].i = 0.f;
+ return 0;
+
+/* End of CGELQF */
+
+} /* cgelqf_ */
diff --git a/contrib/libs/clapack/cgels.c b/contrib/libs/clapack/cgels.c
new file mode 100644
index 0000000000..3385ab8c29
--- /dev/null
+++ b/contrib/libs/clapack/cgels.c
@@ -0,0 +1,520 @@
+/* cgels.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__0 = 0;
+
+/* Subroutine */ int cgels_(char *trans, integer *m, integer *n, integer *
+ nrhs, complex *a, integer *lda, complex *b, integer *ldb, complex *
+ work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+ real r__1;
+
+ /* Local variables */
+ integer i__, j, nb, mn;
+ real anrm, bnrm;
+ integer brow;
+ logical tpsd;
+ integer iascl, ibscl;
+ extern logical lsame_(char *, char *);
+ integer wsize;
+ real rwork[1];
+ extern /* Subroutine */ int slabad_(real *, real *);
+ extern doublereal clange_(char *, integer *, integer *, complex *,
+ integer *, real *);
+ extern /* Subroutine */ int cgelqf_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, integer *), clascl_(
+ char *, integer *, integer *, real *, real *, integer *, integer *
+, complex *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int cgeqrf_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, integer *), claset_(
+ char *, integer *, integer *, complex *, complex *, complex *,
+ integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer scllen;
+ real bignum;
+ extern /* Subroutine */ int cunmlq_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *), cunmqr_(char *,
+ char *, integer *, integer *, integer *, complex *, integer *,
+ complex *, complex *, integer *, complex *, integer *, integer *);
+ real smlnum;
+ logical lquery;
+ extern /* Subroutine */ int ctrtrs_(char *, char *, char *, integer *,
+ integer *, complex *, integer *, complex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGELS solves overdetermined or underdetermined complex linear systems */
+/* involving an M-by-N matrix A, or its conjugate-transpose, using a QR */
+/* or LQ factorization of A. It is assumed that A has full rank. */
+
+/* The following options are provided: */
+
+/* 1. If TRANS = 'N' and m >= n: find the least squares solution of */
+/* an overdetermined system, i.e., solve the least squares problem */
+/* minimize || B - A*X ||. */
+
+/* 2. If TRANS = 'N' and m < n: find the minimum norm solution of */
+/* an underdetermined system A * X = B. */
+
+/* 3. If TRANS = 'C' and m >= n: find the minimum norm solution of */
+/* an undetermined system A**H * X = B. */
+
+/* 4. If TRANS = 'C' and m < n: find the least squares solution of */
+/* an overdetermined system, i.e., solve the least squares problem */
+/* minimize || B - A**H * X ||. */
+
+/* Several right hand side vectors b and solution vectors x can be */
+/* handled in a single call; they are stored as the columns of the */
+/* M-by-NRHS right hand side matrix B and the N-by-NRHS solution */
+/* matrix X. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': the linear system involves A; */
+/* = 'C': the linear system involves A**H. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of */
+/* columns of the matrices B and X. NRHS >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* if M >= N, A is overwritten by details of its QR */
+/* factorization as returned by CGEQRF; */
+/* if M < N, A is overwritten by details of its LQ */
+/* factorization as returned by CGELQF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the matrix B of right hand side vectors, stored */
+/* columnwise; B is M-by-NRHS if TRANS = 'N', or N-by-NRHS */
+/* if TRANS = 'C'. */
+/* On exit, if INFO = 0, B is overwritten by the solution */
+/* vectors, stored columnwise: */
+/* if TRANS = 'N' and m >= n, rows 1 to n of B contain the least */
+/* squares solution vectors; the residual sum of squares for the */
+/* solution in each column is given by the sum of squares of the */
+/* modulus of elements N+1 to M in that column; */
+/* if TRANS = 'N' and m < n, rows 1 to N of B contain the */
+/* minimum norm solution vectors; */
+/* if TRANS = 'C' and m >= n, rows 1 to M of B contain the */
+/* minimum norm solution vectors; */
+/* if TRANS = 'C' and m < n, rows 1 to M of B contain the */
+/* least squares solution vectors; the residual sum of squares */
+/* for the solution in each column is given by the sum of */
+/* squares of the modulus of elements M+1 to N in that column. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= MAX(1,M,N). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* LWORK >= max( 1, MN + max( MN, NRHS ) ). */
+/* For optimal performance, */
+/* LWORK >= max( 1, MN + max( MN, NRHS )*NB ). */
+/* where MN = min(M,N) and NB is the optimum block size. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element of the */
+/* triangular factor of A is zero, so that A does not have */
+/* full rank; the least squares solution could not be */
+/* computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ mn = min(*m,*n);
+ lquery = *lwork == -1;
+ if (! (lsame_(trans, "N") || lsame_(trans, "C"))) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*m)) {
+ *info = -6;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*m);
+ if (*ldb < max(i__1,*n)) {
+ *info = -8;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = 1, i__2 = mn + max(mn,*nrhs);
+ if (*lwork < max(i__1,i__2) && ! lquery) {
+ *info = -10;
+ }
+ }
+ }
+
+/* Figure out optimal block size */
+
+ if (*info == 0 || *info == -10) {
+
+ tpsd = TRUE_;
+ if (lsame_(trans, "N")) {
+ tpsd = FALSE_;
+ }
+
+ if (*m >= *n) {
+ nb = ilaenv_(&c__1, "CGEQRF", " ", m, n, &c_n1, &c_n1);
+ if (tpsd) {
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__1, "CUNMQR", "LN", m, nrhs, n, &
+ c_n1);
+ nb = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__1, "CUNMQR", "LC", m, nrhs, n, &
+ c_n1);
+ nb = max(i__1,i__2);
+ }
+ } else {
+ nb = ilaenv_(&c__1, "CGELQF", " ", m, n, &c_n1, &c_n1);
+ if (tpsd) {
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__1, "CUNMLQ", "LC", n, nrhs, m, &
+ c_n1);
+ nb = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__1, "CUNMLQ", "LN", n, nrhs, m, &
+ c_n1);
+ nb = max(i__1,i__2);
+ }
+ }
+
+/* Computing MAX */
+ i__1 = 1, i__2 = mn + max(mn,*nrhs) * nb;
+ wsize = max(i__1,i__2);
+ r__1 = (real) wsize;
+ work[1].r = r__1, work[1].i = 0.f;
+
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGELS ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+/* Computing MIN */
+ i__1 = min(*m,*n);
+ if (min(i__1,*nrhs) == 0) {
+ i__1 = max(*m,*n);
+ claset_("Full", &i__1, nrhs, &c_b1, &c_b1, &b[b_offset], ldb);
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ smlnum = slamch_("S") / slamch_("P");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+
+/* Scale A, B if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = clange_("M", m, n, &a[a_offset], lda, rwork);
+ iascl = 0;
+ if (anrm > 0.f && anrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ clascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ clascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.f) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ claset_("F", &i__1, nrhs, &c_b1, &c_b1, &b[b_offset], ldb);
+ goto L50;
+ }
+
+ brow = *m;
+ if (tpsd) {
+ brow = *n;
+ }
+ bnrm = clange_("M", &brow, nrhs, &b[b_offset], ldb, rwork);
+ ibscl = 0;
+ if (bnrm > 0.f && bnrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ clascl_("G", &c__0, &c__0, &bnrm, &smlnum, &brow, nrhs, &b[b_offset],
+ ldb, info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ clascl_("G", &c__0, &c__0, &bnrm, &bignum, &brow, nrhs, &b[b_offset],
+ ldb, info);
+ ibscl = 2;
+ }
+
+ if (*m >= *n) {
+
+/* compute QR factorization of A */
+
+ i__1 = *lwork - mn;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[1], &work[mn + 1], &i__1, info)
+ ;
+
+/* workspace at least N, optimally N*NB */
+
+ if (! tpsd) {
+
+/* Least-Squares Problem min || A * X - B || */
+
+/* B(1:M,1:NRHS) := Q' * B(1:M,1:NRHS) */
+
+ i__1 = *lwork - mn;
+ cunmqr_("Left", "Conjugate transpose", m, nrhs, n, &a[a_offset],
+ lda, &work[1], &b[b_offset], ldb, &work[mn + 1], &i__1,
+ info);
+
+/* workspace at least NRHS, optimally NRHS*NB */
+
+/* B(1:N,1:NRHS) := inv(R) * B(1:N,1:NRHS) */
+
+ ctrtrs_("Upper", "No transpose", "Non-unit", n, nrhs, &a[a_offset]
+, lda, &b[b_offset], ldb, info);
+
+ if (*info > 0) {
+ return 0;
+ }
+
+ scllen = *n;
+
+ } else {
+
+/* Overdetermined system of equations A' * X = B */
+
+/* B(1:N,1:NRHS) := inv(R') * B(1:N,1:NRHS) */
+
+ ctrtrs_("Upper", "Conjugate transpose", "Non-unit", n, nrhs, &a[
+ a_offset], lda, &b[b_offset], ldb, info);
+
+ if (*info > 0) {
+ return 0;
+ }
+
+/* B(N+1:M,1:NRHS) = ZERO */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = *n + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0.f, b[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* B(1:M,1:NRHS) := Q(1:N,:) * B(1:N,1:NRHS) */
+
+ i__1 = *lwork - mn;
+ cunmqr_("Left", "No transpose", m, nrhs, n, &a[a_offset], lda, &
+ work[1], &b[b_offset], ldb, &work[mn + 1], &i__1, info);
+
+/* workspace at least NRHS, optimally NRHS*NB */
+
+ scllen = *m;
+
+ }
+
+ } else {
+
+/* Compute LQ factorization of A */
+
+ i__1 = *lwork - mn;
+ cgelqf_(m, n, &a[a_offset], lda, &work[1], &work[mn + 1], &i__1, info)
+ ;
+
+/* workspace at least M, optimally M*NB. */
+
+ if (! tpsd) {
+
+/* underdetermined system of equations A * X = B */
+
+/* B(1:M,1:NRHS) := inv(L) * B(1:M,1:NRHS) */
+
+ ctrtrs_("Lower", "No transpose", "Non-unit", m, nrhs, &a[a_offset]
+, lda, &b[b_offset], ldb, info);
+
+ if (*info > 0) {
+ return 0;
+ }
+
+/* B(M+1:N,1:NRHS) = 0 */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = *m + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0.f, b[i__3].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* B(1:N,1:NRHS) := Q(1:N,:)' * B(1:M,1:NRHS) */
+
+ i__1 = *lwork - mn;
+ cunmlq_("Left", "Conjugate transpose", n, nrhs, m, &a[a_offset],
+ lda, &work[1], &b[b_offset], ldb, &work[mn + 1], &i__1,
+ info);
+
+/* workspace at least NRHS, optimally NRHS*NB */
+
+ scllen = *n;
+
+ } else {
+
+/* overdetermined system min || A' * X - B || */
+
+/* B(1:N,1:NRHS) := Q * B(1:N,1:NRHS) */
+
+ i__1 = *lwork - mn;
+ cunmlq_("Left", "No transpose", n, nrhs, m, &a[a_offset], lda, &
+ work[1], &b[b_offset], ldb, &work[mn + 1], &i__1, info);
+
+/* workspace at least NRHS, optimally NRHS*NB */
+
+/* B(1:M,1:NRHS) := inv(L') * B(1:M,1:NRHS) */
+
+ ctrtrs_("Lower", "Conjugate transpose", "Non-unit", m, nrhs, &a[
+ a_offset], lda, &b[b_offset], ldb, info);
+
+ if (*info > 0) {
+ return 0;
+ }
+
+ scllen = *m;
+
+ }
+
+ }
+
+/* Undo scaling */
+
+ if (iascl == 1) {
+ clascl_("G", &c__0, &c__0, &anrm, &smlnum, &scllen, nrhs, &b[b_offset]
+, ldb, info);
+ } else if (iascl == 2) {
+ clascl_("G", &c__0, &c__0, &anrm, &bignum, &scllen, nrhs, &b[b_offset]
+, ldb, info);
+ }
+ if (ibscl == 1) {
+ clascl_("G", &c__0, &c__0, &smlnum, &bnrm, &scllen, nrhs, &b[b_offset]
+, ldb, info);
+ } else if (ibscl == 2) {
+ clascl_("G", &c__0, &c__0, &bignum, &bnrm, &scllen, nrhs, &b[b_offset]
+, ldb, info);
+ }
+
+L50:
+ r__1 = (real) wsize;
+ work[1].r = r__1, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CGELS */
+
+} /* cgels_ */
diff --git a/contrib/libs/clapack/cgelsd.c b/contrib/libs/clapack/cgelsd.c
new file mode 100644
index 0000000000..c949a9371f
--- /dev/null
+++ b/contrib/libs/clapack/cgelsd.c
@@ -0,0 +1,717 @@
+/* cgelsd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static integer c__9 = 9;
+static integer c__0 = 0;
+static integer c__6 = 6;
+static integer c_n1 = -1;
+static integer c__1 = 1;
+static real c_b80 = 0.f;
+
+/* Subroutine */ int cgelsd_(integer *m, integer *n, integer *nrhs, complex *
+ a, integer *lda, complex *b, integer *ldb, real *s, real *rcond,
+ integer *rank, complex *work, integer *lwork, real *rwork, integer *
+ iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+
+ /* Builtin functions */
+ double log(doublereal);
+
+ /* Local variables */
+ integer ie, il, mm;
+ real eps, anrm, bnrm;
+ integer itau, nlvl, iascl, ibscl;
+ real sfmin;
+ integer minmn, maxmn, itaup, itauq, mnthr, nwork;
+ extern /* Subroutine */ int cgebrd_(integer *, integer *, complex *,
+ integer *, real *, real *, complex *, complex *, complex *,
+ integer *, integer *), slabad_(real *, real *);
+ extern doublereal clange_(char *, integer *, integer *, complex *,
+ integer *, real *);
+ extern /* Subroutine */ int cgelqf_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, integer *), clalsd_(
+ char *, integer *, integer *, integer *, real *, real *, complex *
+, integer *, real *, integer *, complex *, real *, integer *,
+ integer *), clascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, complex *, integer *, integer *), cgeqrf_(integer *, integer *, complex *, integer *,
+ complex *, complex *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), claset_(char *,
+ integer *, integer *, complex *, complex *, complex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ real bignum;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *), cunmbr_(char *, char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *), slaset_(
+ char *, integer *, integer *, real *, real *, real *, integer *), cunmlq_(char *, char *, integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, complex *,
+ integer *, integer *);
+ integer ldwork;
+ extern /* Subroutine */ int cunmqr_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *);
+ integer liwork, minwrk, maxwrk;
+ real smlnum;
+ integer lrwork;
+ logical lquery;
+ integer nrwork, smlsiz;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGELSD computes the minimum-norm solution to a real linear least */
+/* squares problem: */
+/* minimize 2-norm(| b - A*x |) */
+/* using the singular value decomposition (SVD) of A. A is an M-by-N */
+/* matrix which may be rank-deficient. */
+
+/* Several right hand side vectors b and solution vectors x can be */
+/* handled in a single call; they are stored as the columns of the */
+/* M-by-NRHS right hand side matrix B and the N-by-NRHS solution */
+/* matrix X. */
+
+/* The problem is solved in three steps: */
+/* (1) Reduce the coefficient matrix A to bidiagonal form with */
+/* Householder tranformations, reducing the original problem */
+/* into a "bidiagonal least squares problem" (BLS) */
+/* (2) Solve the BLS using a divide and conquer approach. */
+/* (3) Apply back all the Householder tranformations to solve */
+/* the original least squares problem. */
+
+/* The effective rank of A is determined by treating as zero those */
+/* singular values which are less than RCOND times the largest singular */
+/* value. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, A has been destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the M-by-NRHS right hand side matrix B. */
+/* On exit, B is overwritten by the N-by-NRHS solution matrix X. */
+/* If m >= n and RANK = n, the residual sum-of-squares for */
+/* the solution in the i-th column is given by the sum of */
+/* squares of the modulus of elements n+1:m in that column. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,M,N). */
+
+/* S (output) REAL array, dimension (min(M,N)) */
+/* The singular values of A in decreasing order. */
+/* The condition number of A in the 2-norm = S(1)/S(min(m,n)). */
+
+/* RCOND (input) REAL */
+/* RCOND is used to determine the effective rank of A. */
+/* Singular values S(i) <= RCOND*S(1) are treated as zero. */
+/* If RCOND < 0, machine precision is used instead. */
+
+/* RANK (output) INTEGER */
+/* The effective rank of A, i.e., the number of singular values */
+/* which are greater than RCOND*S(1). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK must be at least 1. */
+/* The exact minimum amount of workspace needed depends on M, */
+/* N and NRHS. As long as LWORK is at least */
+/* 2 * N + N * NRHS */
+/* if M is greater than or equal to N or */
+/* 2 * M + M * NRHS */
+/* if M is less than N, the code will execute correctly. */
+/* For good performance, LWORK should generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the array WORK and the */
+/* minimum sizes of the arrays RWORK and IWORK, and returns */
+/* these values as the first entries of the WORK, RWORK and */
+/* IWORK arrays, and no error message related to LWORK is issued */
+/* by XERBLA. */
+
+/* RWORK (workspace) REAL array, dimension (MAX(1,LRWORK)) */
+/* LRWORK >= */
+/* 10*N + 2*N*SMLSIZ + 8*N*NLVL + 3*SMLSIZ*NRHS + */
+/* (SMLSIZ+1)**2 */
+/* if M is greater than or equal to N or */
+/* 10*M + 2*M*SMLSIZ + 8*M*NLVL + 3*SMLSIZ*NRHS + */
+/* (SMLSIZ+1)**2 */
+/* if M is less than N, the code will execute correctly. */
+/* SMLSIZ is returned by ILAENV and is equal to the maximum */
+/* size of the subproblems at the bottom of the computation */
+/* tree (usually about 25), and */
+/* NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 ) */
+/* On exit, if INFO = 0, RWORK(1) returns the minimum LRWORK. */
+
+/* IWORK (workspace) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* LIWORK >= max(1, 3*MINMN*NLVL + 11*MINMN), */
+/* where MINMN = MIN( M,N ). */
+/* On exit, if INFO = 0, IWORK(1) returns the minimum LIWORK. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: the algorithm for computing the SVD failed to converge; */
+/* if INFO = i, i off-diagonal elements of an intermediate */
+/* bidiagonal form did not converge to zero. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Ren-Cang Li, Computer Science Division, University of */
+/* California at Berkeley, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --s;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ maxmn = max(*m,*n);
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldb < max(1,maxmn)) {
+ *info = -7;
+ }
+
+/* Compute workspace. */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV.) */
+
+ if (*info == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ liwork = 1;
+ lrwork = 1;
+ if (minmn > 0) {
+ smlsiz = ilaenv_(&c__9, "CGELSD", " ", &c__0, &c__0, &c__0, &c__0);
+ mnthr = ilaenv_(&c__6, "CGELSD", " ", m, n, nrhs, &c_n1);
+/* Computing MAX */
+ i__1 = (integer) (log((real) minmn / (real) (smlsiz + 1)) / log(
+ 2.f)) + 1;
+ nlvl = max(i__1,0);
+ liwork = minmn * 3 * nlvl + minmn * 11;
+ mm = *m;
+ if (*m >= *n && *m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than */
+/* columns. */
+
+ mm = *n;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * ilaenv_(&c__1, "CGEQRF", " ", m, n,
+ &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *nrhs * ilaenv_(&c__1, "CUNMQR", "LC",
+ m, nrhs, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ }
+ if (*m >= *n) {
+
+/* Path 1 - overdetermined or exactly determined. */
+
+/* Computing 2nd power */
+ i__1 = smlsiz + 1;
+ lrwork = *n * 10 + (*n << 1) * smlsiz + (*n << 3) * nlvl +
+ smlsiz * 3 * *nrhs + i__1 * i__1;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + (mm + *n) * ilaenv_(&c__1,
+ "CGEBRD", " ", &mm, n, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *nrhs * ilaenv_(&c__1,
+ "CUNMBR", "QLC", &mm, nrhs, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + (*n - 1) * ilaenv_(&c__1,
+ "CUNMBR", "PLN", n, nrhs, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *n * *nrhs;
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = (*n << 1) + mm, i__2 = (*n << 1) + *n * *nrhs;
+ minwrk = max(i__1,i__2);
+ }
+ if (*n > *m) {
+/* Computing 2nd power */
+ i__1 = smlsiz + 1;
+ lrwork = *m * 10 + (*m << 1) * smlsiz + (*m << 3) * nlvl +
+ smlsiz * 3 * *nrhs + i__1 * i__1;
+ if (*n >= mnthr) {
+
+/* Path 2a - underdetermined, with many more columns */
+/* than rows. */
+
+ maxwrk = *m + *m * ilaenv_(&c__1, "CGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + (*m << 1) *
+ ilaenv_(&c__1, "CGEBRD", " ", m, m, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + *nrhs *
+ ilaenv_(&c__1, "CUNMBR", "QLC", m, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + (*m - 1) *
+ ilaenv_(&c__1, "CUNMLQ", "LC", n, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+ if (*nrhs > 1) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + *m + *m * *nrhs;
+ maxwrk = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 1);
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + *m * *nrhs;
+ maxwrk = max(i__1,i__2);
+/* XXX: Ensure the Path 2a case below is triggered. The workspace */
+/* calculation should use queries for all routines eventually. */
+/* Computing MAX */
+/* Computing MAX */
+ i__3 = *m, i__4 = (*m << 1) - 4, i__3 = max(i__3,i__4),
+ i__3 = max(i__3,*nrhs), i__4 = *n - *m * 3;
+ i__1 = maxwrk, i__2 = (*m << 2) + *m * *m + max(i__3,i__4)
+ ;
+ maxwrk = max(i__1,i__2);
+ } else {
+
+/* Path 2 - underdetermined. */
+
+ maxwrk = (*m << 1) + (*n + *m) * ilaenv_(&c__1, "CGEBRD",
+ " ", m, n, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *nrhs * ilaenv_(&c__1,
+ "CUNMBR", "QLC", m, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNMBR", "PLN", n, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * *nrhs;
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = (*m << 1) + *n, i__2 = (*m << 1) + *m * *nrhs;
+ minwrk = max(i__1,i__2);
+ }
+ }
+ minwrk = min(minwrk,maxwrk);
+ work[1].r = (real) maxwrk, work[1].i = 0.f;
+ iwork[1] = liwork;
+ rwork[1] = (real) lrwork;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGELSD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ *rank = 0;
+ return 0;
+ }
+
+/* Get machine parameters. */
+
+ eps = slamch_("P");
+ sfmin = slamch_("S");
+ smlnum = sfmin / eps;
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+
+/* Scale A if max entry outside range [SMLNUM,BIGNUM]. */
+
+ anrm = clange_("M", m, n, &a[a_offset], lda, &rwork[1]);
+ iascl = 0;
+ if (anrm > 0.f && anrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ clascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM. */
+
+ clascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.f) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ claset_("F", &i__1, nrhs, &c_b1, &c_b1, &b[b_offset], ldb);
+ slaset_("F", &minmn, &c__1, &c_b80, &c_b80, &s[1], &c__1);
+ *rank = 0;
+ goto L10;
+ }
+
+/* Scale B if max entry outside range [SMLNUM,BIGNUM]. */
+
+ bnrm = clange_("M", m, nrhs, &b[b_offset], ldb, &rwork[1]);
+ ibscl = 0;
+ if (bnrm > 0.f && bnrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM. */
+
+ clascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM. */
+
+ clascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 2;
+ }
+
+/* If M < N make sure B(M+1:N,:) = 0 */
+
+ if (*m < *n) {
+ i__1 = *n - *m;
+ claset_("F", &i__1, nrhs, &c_b1, &c_b1, &b[*m + 1 + b_dim1], ldb);
+ }
+
+/* Overdetermined case. */
+
+ if (*m >= *n) {
+
+/* Path 1 - overdetermined or exactly determined. */
+
+ mm = *m;
+ if (*m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than columns */
+
+ mm = *n;
+ itau = 1;
+ nwork = itau + *n;
+
+/* Compute A=Q*R. */
+/* (RWorkspace: need N) */
+/* (CWorkspace: need N, prefer N*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1,
+ info);
+
+/* Multiply B by transpose(Q). */
+/* (RWorkspace: need N) */
+/* (CWorkspace: need NRHS, prefer NRHS*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ cunmqr_("L", "C", m, nrhs, n, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Zero out below R. */
+
+ if (*n > 1) {
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ claset_("L", &i__1, &i__2, &c_b1, &c_b1, &a[a_dim1 + 2], lda);
+ }
+ }
+
+ itauq = 1;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+ ie = 1;
+ nrwork = ie + *n;
+
+/* Bidiagonalize R in A. */
+/* (RWorkspace: need N) */
+/* (CWorkspace: need 2*N+MM, prefer 2*N+(MM+N)*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ cgebrd_(&mm, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__1, info);
+
+/* Multiply B by transpose of left bidiagonalizing vectors of R. */
+/* (CWorkspace: need 2*N+NRHS, prefer 2*N+NRHS*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "C", &mm, nrhs, n, &a[a_offset], lda, &work[itauq],
+ &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ clalsd_("U", &smlsiz, n, nrhs, &s[1], &rwork[ie], &b[b_offset], ldb,
+ rcond, rank, &work[nwork], &rwork[nrwork], &iwork[1], info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of R. */
+
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("P", "L", "N", n, nrhs, n, &a[a_offset], lda, &work[itaup], &
+ b[b_offset], ldb, &work[nwork], &i__1, info);
+
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = *m, i__2 = (*m << 1) - 4, i__1 = max(i__1,i__2), i__1 = max(
+ i__1,*nrhs), i__2 = *n - *m * 3;
+ if (*n >= mnthr && *lwork >= (*m << 2) + *m * *m + max(i__1,i__2)) {
+
+/* Path 2a - underdetermined, with many more columns than rows */
+/* and sufficient workspace for an efficient algorithm. */
+
+ ldwork = *m;
+/* Computing MAX */
+/* Computing MAX */
+ i__3 = *m, i__4 = (*m << 1) - 4, i__3 = max(i__3,i__4), i__3 =
+ max(i__3,*nrhs), i__4 = *n - *m * 3;
+ i__1 = (*m << 2) + *m * *lda + max(i__3,i__4), i__2 = *m * *lda +
+ *m + *m * *nrhs;
+ if (*lwork >= max(i__1,i__2)) {
+ ldwork = *lda;
+ }
+ itau = 1;
+ nwork = *m + 1;
+
+/* Compute A=L*Q. */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1,
+ info);
+ il = nwork;
+
+/* Copy L to WORK(IL), zeroing out above its diagonal. */
+
+ clacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwork);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ claset_("U", &i__1, &i__2, &c_b1, &c_b1, &work[il + ldwork], &
+ ldwork);
+ itauq = il + ldwork * *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+ ie = 1;
+ nrwork = ie + *m;
+
+/* Bidiagonalize L in WORK(IL). */
+/* (RWorkspace: need M) */
+/* (CWorkspace: need M*M+4*M, prefer M*M+4*M+2*M*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ cgebrd_(m, m, &work[il], &ldwork, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__1, info);
+
+/* Multiply B by transpose of left bidiagonalizing vectors of L. */
+/* (CWorkspace: need M*M+4*M+NRHS, prefer M*M+4*M+NRHS*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "C", m, nrhs, m, &work[il], &ldwork, &work[
+ itauq], &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ clalsd_("U", &smlsiz, m, nrhs, &s[1], &rwork[ie], &b[b_offset],
+ ldb, rcond, rank, &work[nwork], &rwork[nrwork], &iwork[1],
+ info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of L. */
+
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("P", "L", "N", m, nrhs, m, &work[il], &ldwork, &work[
+ itaup], &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Zero out below first M rows of B. */
+
+ i__1 = *n - *m;
+ claset_("F", &i__1, nrhs, &c_b1, &c_b1, &b[*m + 1 + b_dim1], ldb);
+ nwork = itau + *m;
+
+/* Multiply transpose(Q) by B. */
+/* (CWorkspace: need NRHS, prefer NRHS*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ cunmlq_("L", "C", n, nrhs, m, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[nwork], &i__1, info);
+
+ } else {
+
+/* Path 2 - remaining underdetermined cases. */
+
+ itauq = 1;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+ ie = 1;
+ nrwork = ie + *m;
+
+/* Bidiagonalize A. */
+/* (RWorkspace: need M) */
+/* (CWorkspace: need 2*M+N, prefer 2*M+(M+N)*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ cgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__1, info);
+
+/* Multiply B by transpose of left bidiagonalizing vectors. */
+/* (CWorkspace: need 2*M+NRHS, prefer 2*M+NRHS*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "C", m, nrhs, n, &a[a_offset], lda, &work[itauq]
+, &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ clalsd_("L", &smlsiz, m, nrhs, &s[1], &rwork[ie], &b[b_offset],
+ ldb, rcond, rank, &work[nwork], &rwork[nrwork], &iwork[1],
+ info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of A. */
+
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("P", "L", "N", n, nrhs, m, &a[a_offset], lda, &work[itaup]
+, &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+ }
+ }
+
+/* Undo scaling. */
+
+ if (iascl == 1) {
+ clascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb,
+ info);
+ slascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ } else if (iascl == 2) {
+ clascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb,
+ info);
+ slascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ }
+ if (ibscl == 1) {
+ clascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ } else if (ibscl == 2) {
+ clascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ }
+
+L10:
+ work[1].r = (real) maxwrk, work[1].i = 0.f;
+ iwork[1] = liwork;
+ rwork[1] = (real) lrwork;
+ return 0;
+
+/* End of CGELSD */
+
+} /* cgelsd_ */
diff --git a/contrib/libs/clapack/cgelss.c b/contrib/libs/clapack/cgelss.c
new file mode 100644
index 0000000000..24729a736d
--- /dev/null
+++ b/contrib/libs/clapack/cgelss.c
@@ -0,0 +1,822 @@
+/* cgelss.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__6 = 6;
+static integer c_n1 = -1;
+static integer c__1 = 1;
+static integer c__0 = 0;
+static real c_b78 = 0.f;
+
+/* Subroutine */ int cgelss_(integer *m, integer *n, integer *nrhs, complex *
+ a, integer *lda, complex *b, integer *ldb, real *s, real *rcond,
+ integer *rank, complex *work, integer *lwork, real *rwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+ real r__1;
+
+ /* Local variables */
+ integer i__, bl, ie, il, mm;
+ real eps, thr, anrm, bnrm;
+ integer itau;
+ complex vdum[1];
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *);
+ integer iascl, ibscl;
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+, complex *, integer *, complex *, integer *, complex *, complex *
+, integer *);
+ integer chunk;
+ real sfmin;
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *);
+ integer minmn, maxmn, itaup, itauq, mnthr, iwork;
+ extern /* Subroutine */ int cgebrd_(integer *, integer *, complex *,
+ integer *, real *, real *, complex *, complex *, complex *,
+ integer *, integer *), slabad_(real *, real *);
+ extern doublereal clange_(char *, integer *, integer *, complex *,
+ integer *, real *);
+ extern /* Subroutine */ int cgelqf_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, integer *), clascl_(
+ char *, integer *, integer *, real *, real *, integer *, integer *
+, complex *, integer *, integer *), cgeqrf_(integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), claset_(char *,
+ integer *, integer *, complex *, complex *, complex *, integer *), xerbla_(char *, integer *), cbdsqr_(char *,
+ integer *, integer *, integer *, integer *, real *, real *,
+ complex *, integer *, complex *, integer *, complex *, integer *,
+ real *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ real bignum;
+ extern /* Subroutine */ int cungbr_(char *, integer *, integer *, integer
+ *, complex *, integer *, complex *, complex *, integer *, integer
+ *), slascl_(char *, integer *, integer *, real *, real *,
+ integer *, integer *, real *, integer *, integer *),
+ cunmbr_(char *, char *, char *, integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, complex *,
+ integer *, integer *), csrscl_(integer *,
+ real *, complex *, integer *), slaset_(char *, integer *, integer
+ *, real *, real *, real *, integer *), cunmlq_(char *,
+ char *, integer *, integer *, integer *, complex *, integer *,
+ complex *, complex *, integer *, complex *, integer *, integer *);
+ integer ldwork;
+ extern /* Subroutine */ int cunmqr_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *);
+ integer minwrk, maxwrk;
+ real smlnum;
+ integer irwork;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGELSS computes the minimum norm solution to a complex linear */
+/* least squares problem: */
+
+/* Minimize 2-norm(| b - A*x |). */
+
+/* using the singular value decomposition (SVD) of A. A is an M-by-N */
+/* matrix which may be rank-deficient. */
+
+/* Several right hand side vectors b and solution vectors x can be */
+/* handled in a single call; they are stored as the columns of the */
+/* M-by-NRHS right hand side matrix B and the N-by-NRHS solution matrix */
+/* X. */
+
+/* The effective rank of A is determined by treating as zero those */
+/* singular values which are less than RCOND times the largest singular */
+/* value. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the first min(m,n) rows of A are overwritten with */
+/* its right singular vectors, stored rowwise. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the M-by-NRHS right hand side matrix B. */
+/* On exit, B is overwritten by the N-by-NRHS solution matrix X. */
+/* If m >= n and RANK = n, the residual sum-of-squares for */
+/* the solution in the i-th column is given by the sum of */
+/* squares of the modulus of elements n+1:m in that column. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,M,N). */
+
+/* S (output) REAL array, dimension (min(M,N)) */
+/* The singular values of A in decreasing order. */
+/* The condition number of A in the 2-norm = S(1)/S(min(m,n)). */
+
+/* RCOND (input) REAL */
+/* RCOND is used to determine the effective rank of A. */
+/* Singular values S(i) <= RCOND*S(1) are treated as zero. */
+/* If RCOND < 0, machine precision is used instead. */
+
+/* RANK (output) INTEGER */
+/* The effective rank of A, i.e., the number of singular values */
+/* which are greater than RCOND*S(1). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= 1, and also: */
+/* LWORK >= 2*min(M,N) + max(M,N,NRHS) */
+/* For good performance, LWORK should generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) REAL array, dimension (5*min(M,N)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: the algorithm for computing the SVD failed to converge; */
+/* if INFO = i, i off-diagonal elements of an intermediate */
+/* bidiagonal form did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --s;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ maxmn = max(*m,*n);
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldb < max(1,maxmn)) {
+ *info = -7;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* CWorkspace refers to complex workspace, and RWorkspace refers */
+/* to real workspace. NB refers to the optimal block size for the */
+/* immediately following subroutine, as returned by ILAENV.) */
+
+ if (*info == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ if (minmn > 0) {
+ mm = *m;
+ mnthr = ilaenv_(&c__6, "CGELSS", " ", m, n, nrhs, &c_n1);
+ if (*m >= *n && *m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than */
+/* columns */
+
+ mm = *n;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "CGEQRF",
+ " ", m, n, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *nrhs * ilaenv_(&c__1, "CUNMQR",
+ "LC", m, nrhs, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ }
+ if (*m >= *n) {
+
+/* Path 1 - overdetermined or exactly determined */
+
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + (mm + *n) * ilaenv_(&c__1,
+ "CGEBRD", " ", &mm, n, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *nrhs * ilaenv_(&c__1,
+ "CUNMBR", "QLC", &mm, nrhs, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + (*n - 1) * ilaenv_(&c__1,
+ "CUNGBR", "P", n, n, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * *nrhs;
+ maxwrk = max(i__1,i__2);
+ minwrk = (*n << 1) + max(*nrhs,*m);
+ }
+ if (*n > *m) {
+ minwrk = (*m << 1) + max(*nrhs,*n);
+ if (*n >= mnthr) {
+
+/* Path 2a - underdetermined, with many more columns */
+/* than rows */
+
+ maxwrk = *m + *m * ilaenv_(&c__1, "CGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + *m * *m + (*m << 1) *
+ ilaenv_(&c__1, "CGEBRD", " ", m, m, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + *m * *m + *nrhs * ilaenv_(&
+ c__1, "CUNMBR", "QLC", m, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + *m * *m + (*m - 1) *
+ ilaenv_(&c__1, "CUNGBR", "P", m, m, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+ if (*nrhs > 1) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + *m + *m * *nrhs;
+ maxwrk = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 1);
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m + *nrhs * ilaenv_(&c__1, "CUNMLQ"
+, "LC", n, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+ } else {
+
+/* Path 2 - underdetermined */
+
+ maxwrk = (*m << 1) + (*n + *m) * ilaenv_(&c__1, "CGEBRD",
+ " ", m, n, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *nrhs * ilaenv_(&c__1,
+ "CUNMBR", "QLC", m, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNGBR", "P", m, n, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * *nrhs;
+ maxwrk = max(i__1,i__2);
+ }
+ }
+ maxwrk = max(minwrk,maxwrk);
+ }
+ work[1].r = (real) maxwrk, work[1].i = 0.f;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGELSS", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ *rank = 0;
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ eps = slamch_("P");
+ sfmin = slamch_("S");
+ smlnum = sfmin / eps;
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = clange_("M", m, n, &a[a_offset], lda, &rwork[1]);
+ iascl = 0;
+ if (anrm > 0.f && anrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ clascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ clascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.f) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ claset_("F", &i__1, nrhs, &c_b1, &c_b1, &b[b_offset], ldb);
+ slaset_("F", &minmn, &c__1, &c_b78, &c_b78, &s[1], &minmn);
+ *rank = 0;
+ goto L70;
+ }
+
+/* Scale B if max element outside range [SMLNUM,BIGNUM] */
+
+ bnrm = clange_("M", m, nrhs, &b[b_offset], ldb, &rwork[1]);
+ ibscl = 0;
+ if (bnrm > 0.f && bnrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ clascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ clascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 2;
+ }
+
+/* Overdetermined case */
+
+ if (*m >= *n) {
+
+/* Path 1 - overdetermined or exactly determined */
+
+ mm = *m;
+ if (*m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than columns */
+
+ mm = *n;
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__1,
+ info);
+
+/* Multiply B by transpose(Q) */
+/* (CWorkspace: need N+NRHS, prefer N+NRHS*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwork + 1;
+ cunmqr_("L", "C", m, nrhs, n, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[iwork], &i__1, info);
+
+/* Zero out below R */
+
+ if (*n > 1) {
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ claset_("L", &i__1, &i__2, &c_b1, &c_b1, &a[a_dim1 + 2], lda);
+ }
+ }
+
+ ie = 1;
+ itauq = 1;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in A */
+/* (CWorkspace: need 2*N+MM, prefer 2*N+(MM+N)*NB) */
+/* (RWorkspace: need N) */
+
+ i__1 = *lwork - iwork + 1;
+ cgebrd_(&mm, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq], &
+ work[itaup], &work[iwork], &i__1, info);
+
+/* Multiply B by transpose of left bidiagonalizing vectors of R */
+/* (CWorkspace: need 2*N+NRHS, prefer 2*N+NRHS*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwork + 1;
+ cunmbr_("Q", "L", "C", &mm, nrhs, n, &a[a_offset], lda, &work[itauq],
+ &b[b_offset], ldb, &work[iwork], &i__1, info);
+
+/* Generate right bidiagonalizing vectors of R in A */
+/* (CWorkspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwork + 1;
+ cungbr_("P", n, n, n, &a[a_offset], lda, &work[itaup], &work[iwork], &
+ i__1, info);
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration */
+/* multiply B by transpose of left singular vectors */
+/* compute right singular vectors in A */
+/* (CWorkspace: none) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", n, n, &c__0, nrhs, &s[1], &rwork[ie], &a[a_offset], lda,
+ vdum, &c__1, &b[b_offset], ldb, &rwork[irwork], info);
+ if (*info != 0) {
+ goto L70;
+ }
+
+/* Multiply B by reciprocals of singular values */
+
+/* Computing MAX */
+ r__1 = *rcond * s[1];
+ thr = dmax(r__1,sfmin);
+ if (*rcond < 0.f) {
+/* Computing MAX */
+ r__1 = eps * s[1];
+ thr = dmax(r__1,sfmin);
+ }
+ *rank = 0;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (s[i__] > thr) {
+ csrscl_(nrhs, &s[i__], &b[i__ + b_dim1], ldb);
+ ++(*rank);
+ } else {
+ claset_("F", &c__1, nrhs, &c_b1, &c_b1, &b[i__ + b_dim1], ldb);
+ }
+/* L10: */
+ }
+
+/* Multiply B by right singular vectors */
+/* (CWorkspace: need N, prefer N*NRHS) */
+/* (RWorkspace: none) */
+
+ if (*lwork >= *ldb * *nrhs && *nrhs > 1) {
+ cgemm_("C", "N", n, nrhs, n, &c_b2, &a[a_offset], lda, &b[
+ b_offset], ldb, &c_b1, &work[1], ldb);
+ clacpy_("G", n, nrhs, &work[1], ldb, &b[b_offset], ldb)
+ ;
+ } else if (*nrhs > 1) {
+ chunk = *lwork / *n;
+ i__1 = *nrhs;
+ i__2 = chunk;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = *nrhs - i__ + 1;
+ bl = min(i__3,chunk);
+ cgemm_("C", "N", n, &bl, n, &c_b2, &a[a_offset], lda, &b[i__ *
+ b_dim1 + 1], ldb, &c_b1, &work[1], n);
+ clacpy_("G", n, &bl, &work[1], n, &b[i__ * b_dim1 + 1], ldb);
+/* L20: */
+ }
+ } else {
+ cgemv_("C", n, n, &c_b2, &a[a_offset], lda, &b[b_offset], &c__1, &
+ c_b1, &work[1], &c__1);
+ ccopy_(n, &work[1], &c__1, &b[b_offset], &c__1);
+ }
+
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__2 = max(*m,*nrhs), i__1 = *n - (*m << 1);
+ if (*n >= mnthr && *lwork >= *m * 3 + *m * *m + max(i__2,i__1)) {
+
+/* Underdetermined case, M much less than N */
+
+/* Path 2a - underdetermined, with many more columns than rows */
+/* and sufficient workspace for an efficient algorithm */
+
+ ldwork = *m;
+/* Computing MAX */
+ i__2 = max(*m,*nrhs), i__1 = *n - (*m << 1);
+ if (*lwork >= *m * 3 + *m * *lda + max(i__2,i__1)) {
+ ldwork = *lda;
+ }
+ itau = 1;
+ iwork = *m + 1;
+
+/* Compute A=L*Q */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: none) */
+
+ i__2 = *lwork - iwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2,
+ info);
+ il = iwork;
+
+/* Copy L to WORK(IL), zeroing out above it */
+
+ clacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwork);
+ i__2 = *m - 1;
+ i__1 = *m - 1;
+ claset_("U", &i__2, &i__1, &c_b1, &c_b1, &work[il + ldwork], &
+ ldwork);
+ ie = 1;
+ itauq = il + ldwork * *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IL) */
+/* (CWorkspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(m, m, &work[il], &ldwork, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[iwork], &i__2, info);
+
+/* Multiply B by transpose of left bidiagonalizing vectors of L */
+/* (CWorkspace: need M*M+3*M+NRHS, prefer M*M+3*M+NRHS*NB) */
+/* (RWorkspace: none) */
+
+ i__2 = *lwork - iwork + 1;
+ cunmbr_("Q", "L", "C", m, nrhs, m, &work[il], &ldwork, &work[
+ itauq], &b[b_offset], ldb, &work[iwork], &i__2, info);
+
+/* Generate right bidiagonalizing vectors of R in WORK(IL) */
+/* (CWorkspace: need M*M+4*M-1, prefer M*M+3*M+(M-1)*NB) */
+/* (RWorkspace: none) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("P", m, m, m, &work[il], &ldwork, &work[itaup], &work[
+ iwork], &i__2, info);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right singular */
+/* vectors of L in WORK(IL) and multiplying B by transpose of */
+/* left singular vectors */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", m, m, &c__0, nrhs, &s[1], &rwork[ie], &work[il], &
+ ldwork, &a[a_offset], lda, &b[b_offset], ldb, &rwork[
+ irwork], info);
+ if (*info != 0) {
+ goto L70;
+ }
+
+/* Multiply B by reciprocals of singular values */
+
+/* Computing MAX */
+ r__1 = *rcond * s[1];
+ thr = dmax(r__1,sfmin);
+ if (*rcond < 0.f) {
+/* Computing MAX */
+ r__1 = eps * s[1];
+ thr = dmax(r__1,sfmin);
+ }
+ *rank = 0;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (s[i__] > thr) {
+ csrscl_(nrhs, &s[i__], &b[i__ + b_dim1], ldb);
+ ++(*rank);
+ } else {
+ claset_("F", &c__1, nrhs, &c_b1, &c_b1, &b[i__ + b_dim1],
+ ldb);
+ }
+/* L30: */
+ }
+ iwork = il + *m * ldwork;
+
+/* Multiply B by right singular vectors of L in WORK(IL) */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NRHS) */
+/* (RWorkspace: none) */
+
+ if (*lwork >= *ldb * *nrhs + iwork - 1 && *nrhs > 1) {
+ cgemm_("C", "N", m, nrhs, m, &c_b2, &work[il], &ldwork, &b[
+ b_offset], ldb, &c_b1, &work[iwork], ldb);
+ clacpy_("G", m, nrhs, &work[iwork], ldb, &b[b_offset], ldb);
+ } else if (*nrhs > 1) {
+ chunk = (*lwork - iwork + 1) / *m;
+ i__2 = *nrhs;
+ i__1 = chunk;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *nrhs - i__ + 1;
+ bl = min(i__3,chunk);
+ cgemm_("C", "N", m, &bl, m, &c_b2, &work[il], &ldwork, &b[
+ i__ * b_dim1 + 1], ldb, &c_b1, &work[iwork], m);
+ clacpy_("G", m, &bl, &work[iwork], m, &b[i__ * b_dim1 + 1]
+, ldb);
+/* L40: */
+ }
+ } else {
+ cgemv_("C", m, m, &c_b2, &work[il], &ldwork, &b[b_dim1 + 1], &
+ c__1, &c_b1, &work[iwork], &c__1);
+ ccopy_(m, &work[iwork], &c__1, &b[b_dim1 + 1], &c__1);
+ }
+
+/* Zero out below first M rows of B */
+
+ i__1 = *n - *m;
+ claset_("F", &i__1, nrhs, &c_b1, &c_b1, &b[*m + 1 + b_dim1], ldb);
+ iwork = itau + *m;
+
+/* Multiply transpose(Q) by B */
+/* (CWorkspace: need M+NRHS, prefer M+NHRS*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwork + 1;
+ cunmlq_("L", "C", n, nrhs, m, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[iwork], &i__1, info);
+
+ } else {
+
+/* Path 2 - remaining underdetermined cases */
+
+ ie = 1;
+ itauq = 1;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize A */
+/* (CWorkspace: need 3*M, prefer 2*M+(M+N)*NB) */
+/* (RWorkspace: need N) */
+
+ i__1 = *lwork - iwork + 1;
+ cgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[iwork], &i__1, info);
+
+/* Multiply B by transpose of left bidiagonalizing vectors */
+/* (CWorkspace: need 2*M+NRHS, prefer 2*M+NRHS*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwork + 1;
+ cunmbr_("Q", "L", "C", m, nrhs, n, &a[a_offset], lda, &work[itauq]
+, &b[b_offset], ldb, &work[iwork], &i__1, info);
+
+/* Generate right bidiagonalizing vectors in A */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwork + 1;
+ cungbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &work[
+ iwork], &i__1, info);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, */
+/* computing right singular vectors of A in A and */
+/* multiplying B by transpose of left singular vectors */
+/* (CWorkspace: none) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("L", m, n, &c__0, nrhs, &s[1], &rwork[ie], &a[a_offset],
+ lda, vdum, &c__1, &b[b_offset], ldb, &rwork[irwork], info);
+ if (*info != 0) {
+ goto L70;
+ }
+
+/* Multiply B by reciprocals of singular values */
+
+/* Computing MAX */
+ r__1 = *rcond * s[1];
+ thr = dmax(r__1,sfmin);
+ if (*rcond < 0.f) {
+/* Computing MAX */
+ r__1 = eps * s[1];
+ thr = dmax(r__1,sfmin);
+ }
+ *rank = 0;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (s[i__] > thr) {
+ csrscl_(nrhs, &s[i__], &b[i__ + b_dim1], ldb);
+ ++(*rank);
+ } else {
+ claset_("F", &c__1, nrhs, &c_b1, &c_b1, &b[i__ + b_dim1],
+ ldb);
+ }
+/* L50: */
+ }
+
+/* Multiply B by right singular vectors of A */
+/* (CWorkspace: need N, prefer N*NRHS) */
+/* (RWorkspace: none) */
+
+ if (*lwork >= *ldb * *nrhs && *nrhs > 1) {
+ cgemm_("C", "N", n, nrhs, m, &c_b2, &a[a_offset], lda, &b[
+ b_offset], ldb, &c_b1, &work[1], ldb);
+ clacpy_("G", n, nrhs, &work[1], ldb, &b[b_offset], ldb);
+ } else if (*nrhs > 1) {
+ chunk = *lwork / *n;
+ i__1 = *nrhs;
+ i__2 = chunk;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *nrhs - i__ + 1;
+ bl = min(i__3,chunk);
+ cgemm_("C", "N", n, &bl, m, &c_b2, &a[a_offset], lda, &b[
+ i__ * b_dim1 + 1], ldb, &c_b1, &work[1], n);
+ clacpy_("F", n, &bl, &work[1], n, &b[i__ * b_dim1 + 1],
+ ldb);
+/* L60: */
+ }
+ } else {
+ cgemv_("C", m, n, &c_b2, &a[a_offset], lda, &b[b_offset], &
+ c__1, &c_b1, &work[1], &c__1);
+ ccopy_(n, &work[1], &c__1, &b[b_offset], &c__1);
+ }
+ }
+ }
+
+/* Undo scaling */
+
+ if (iascl == 1) {
+ clascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb,
+ info);
+ slascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ } else if (iascl == 2) {
+ clascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb,
+ info);
+ slascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ }
+ if (ibscl == 1) {
+ clascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ } else if (ibscl == 2) {
+ clascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ }
+L70:
+ work[1].r = (real) maxwrk, work[1].i = 0.f;
+ return 0;
+
+/* End of CGELSS */
+
+} /* cgelss_ */
diff --git a/contrib/libs/clapack/cgelsx.c b/contrib/libs/clapack/cgelsx.c
new file mode 100644
index 0000000000..d75a1784b2
--- /dev/null
+++ b/contrib/libs/clapack/cgelsx.c
@@ -0,0 +1,468 @@
+/* cgelsx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__0 = 0;
+static integer c__2 = 2;
+static integer c__1 = 1;
+
+/* Subroutine */ int cgelsx_(integer *m, integer *n, integer *nrhs, complex *
+ a, integer *lda, complex *b, integer *ldb, integer *jpvt, real *rcond,
+ integer *rank, complex *work, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Builtin functions */
+ double c_abs(complex *);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ complex c1, c2, s1, s2, t1, t2;
+ integer mn;
+ real anrm, bnrm, smin, smax;
+ integer iascl, ibscl, ismin, ismax;
+ extern /* Subroutine */ int ctrsm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *), claic1_(integer *,
+ integer *, complex *, real *, complex *, complex *, real *,
+ complex *, complex *), cunm2r_(char *, char *, integer *, integer
+ *, integer *, complex *, integer *, complex *, complex *, integer
+ *, complex *, integer *), slabad_(real *, real *);
+ extern doublereal clange_(char *, integer *, integer *, complex *,
+ integer *, real *);
+ extern /* Subroutine */ int clascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, complex *, integer *, integer *), cgeqpf_(integer *, integer *, complex *, integer *,
+ integer *, complex *, complex *, real *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int claset_(char *, integer *, integer *, complex
+ *, complex *, complex *, integer *), xerbla_(char *,
+ integer *);
+ real bignum;
+ extern /* Subroutine */ int clatzm_(char *, integer *, integer *, complex
+ *, integer *, complex *, complex *, complex *, integer *, complex
+ *);
+ real sminpr;
+ extern /* Subroutine */ int ctzrqf_(integer *, integer *, complex *,
+ integer *, complex *, integer *);
+ real smaxpr, smlnum;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is deprecated and has been replaced by routine CGELSY. */
+
+/* CGELSX computes the minimum-norm solution to a complex linear least */
+/* squares problem: */
+/* minimize || A * X - B || */
+/* using a complete orthogonal factorization of A. A is an M-by-N */
+/* matrix which may be rank-deficient. */
+
+/* Several right hand side vectors b and solution vectors x can be */
+/* handled in a single call; they are stored as the columns of the */
+/* M-by-NRHS right hand side matrix B and the N-by-NRHS solution */
+/* matrix X. */
+
+/* The routine first computes a QR factorization with column pivoting: */
+/* A * P = Q * [ R11 R12 ] */
+/* [ 0 R22 ] */
+/* with R11 defined as the largest leading submatrix whose estimated */
+/* condition number is less than 1/RCOND. The order of R11, RANK, */
+/* is the effective rank of A. */
+
+/* Then, R22 is considered to be negligible, and R12 is annihilated */
+/* by unitary transformations from the right, arriving at the */
+/* complete orthogonal factorization: */
+/* A * P = Q * [ T11 0 ] * Z */
+/* [ 0 0 ] */
+/* The minimum-norm solution is then */
+/* X = P * Z' [ inv(T11)*Q1'*B ] */
+/* [ 0 ] */
+/* where Q1 consists of the first RANK columns of Q. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of */
+/* columns of matrices B and X. NRHS >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, A has been overwritten by details of its */
+/* complete orthogonal factorization. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the M-by-NRHS right hand side matrix B. */
+/* On exit, the N-by-NRHS solution matrix X. */
+/* If m >= n and RANK = n, the residual sum-of-squares for */
+/* the solution in the i-th column is given by the sum of */
+/* squares of elements N+1:M in that column. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,M,N). */
+
+/* JPVT (input/output) INTEGER array, dimension (N) */
+/* On entry, if JPVT(i) .ne. 0, the i-th column of A is an */
+/* initial column, otherwise it is a free column. Before */
+/* the QR factorization of A, all initial columns are */
+/* permuted to the leading positions; only the remaining */
+/* free columns are moved as a result of column pivoting */
+/* during the factorization. */
+/* On exit, if JPVT(i) = k, then the i-th column of A*P */
+/* was the k-th column of A. */
+
+/* RCOND (input) REAL */
+/* RCOND is used to determine the effective rank of A, which */
+/* is defined as the order of the largest leading triangular */
+/* submatrix R11 in the QR factorization with pivoting of A, */
+/* whose estimated condition number < 1/RCOND. */
+
+/* RANK (output) INTEGER */
+/* The effective rank of A, i.e., the order of the submatrix */
+/* R11. This is the same as the order of the submatrix T11 */
+/* in the complete orthogonal factorization of A. */
+
+/* WORK (workspace) COMPLEX array, dimension */
+/* (min(M,N) + max( N, 2*min(M,N)+NRHS )), */
+
+/* RWORK (workspace) REAL array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --jpvt;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ mn = min(*m,*n);
+ ismin = mn + 1;
+ ismax = (mn << 1) + 1;
+
+/* Test the input arguments. */
+
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*m);
+ if (*ldb < max(i__1,*n)) {
+ *info = -7;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGELSX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+/* Computing MIN */
+ i__1 = min(*m,*n);
+ if (min(i__1,*nrhs) == 0) {
+ *rank = 0;
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ smlnum = slamch_("S") / slamch_("P");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+
+/* Scale A, B if max elements outside range [SMLNUM,BIGNUM] */
+
+ anrm = clange_("M", m, n, &a[a_offset], lda, &rwork[1]);
+ iascl = 0;
+ if (anrm > 0.f && anrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ clascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ clascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.f) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ claset_("F", &i__1, nrhs, &c_b1, &c_b1, &b[b_offset], ldb);
+ *rank = 0;
+ goto L100;
+ }
+
+ bnrm = clange_("M", m, nrhs, &b[b_offset], ldb, &rwork[1]);
+ ibscl = 0;
+ if (bnrm > 0.f && bnrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ clascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ clascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 2;
+ }
+
+/* Compute QR factorization with column pivoting of A: */
+/* A * P = Q * R */
+
+ cgeqpf_(m, n, &a[a_offset], lda, &jpvt[1], &work[1], &work[mn + 1], &
+ rwork[1], info);
+
+/* complex workspace MN+N. Real workspace 2*N. Details of Householder */
+/* rotations stored in WORK(1:MN). */
+
+/* Determine RANK using incremental condition estimation */
+
+ i__1 = ismin;
+ work[i__1].r = 1.f, work[i__1].i = 0.f;
+ i__1 = ismax;
+ work[i__1].r = 1.f, work[i__1].i = 0.f;
+ smax = c_abs(&a[a_dim1 + 1]);
+ smin = smax;
+ if (c_abs(&a[a_dim1 + 1]) == 0.f) {
+ *rank = 0;
+ i__1 = max(*m,*n);
+ claset_("F", &i__1, nrhs, &c_b1, &c_b1, &b[b_offset], ldb);
+ goto L100;
+ } else {
+ *rank = 1;
+ }
+
+L10:
+ if (*rank < mn) {
+ i__ = *rank + 1;
+ claic1_(&c__2, rank, &work[ismin], &smin, &a[i__ * a_dim1 + 1], &a[
+ i__ + i__ * a_dim1], &sminpr, &s1, &c1);
+ claic1_(&c__1, rank, &work[ismax], &smax, &a[i__ * a_dim1 + 1], &a[
+ i__ + i__ * a_dim1], &smaxpr, &s2, &c2);
+
+ if (smaxpr * *rcond <= sminpr) {
+ i__1 = *rank;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ismin + i__ - 1;
+ i__3 = ismin + i__ - 1;
+ q__1.r = s1.r * work[i__3].r - s1.i * work[i__3].i, q__1.i =
+ s1.r * work[i__3].i + s1.i * work[i__3].r;
+ work[i__2].r = q__1.r, work[i__2].i = q__1.i;
+ i__2 = ismax + i__ - 1;
+ i__3 = ismax + i__ - 1;
+ q__1.r = s2.r * work[i__3].r - s2.i * work[i__3].i, q__1.i =
+ s2.r * work[i__3].i + s2.i * work[i__3].r;
+ work[i__2].r = q__1.r, work[i__2].i = q__1.i;
+/* L20: */
+ }
+ i__1 = ismin + *rank;
+ work[i__1].r = c1.r, work[i__1].i = c1.i;
+ i__1 = ismax + *rank;
+ work[i__1].r = c2.r, work[i__1].i = c2.i;
+ smin = sminpr;
+ smax = smaxpr;
+ ++(*rank);
+ goto L10;
+ }
+ }
+
+/* Logically partition R = [ R11 R12 ] */
+/* [ 0 R22 ] */
+/* where R11 = R(1:RANK,1:RANK) */
+
+/* [R11,R12] = [ T11, 0 ] * Y */
+
+ if (*rank < *n) {
+ ctzrqf_(rank, n, &a[a_offset], lda, &work[mn + 1], info);
+ }
+
+/* Details of Householder rotations stored in WORK(MN+1:2*MN) */
+
+/* B(1:M,1:NRHS) := Q' * B(1:M,1:NRHS) */
+
+ cunm2r_("Left", "Conjugate transpose", m, nrhs, &mn, &a[a_offset], lda, &
+ work[1], &b[b_offset], ldb, &work[(mn << 1) + 1], info);
+
+/* workspace NRHS */
+
+/* B(1:RANK,1:NRHS) := inv(T11) * B(1:RANK,1:NRHS) */
+
+ ctrsm_("Left", "Upper", "No transpose", "Non-unit", rank, nrhs, &c_b2, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+ i__1 = *n;
+ for (i__ = *rank + 1; i__ <= i__1; ++i__) {
+ i__2 = *nrhs;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0.f, b[i__3].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* B(1:N,1:NRHS) := Y' * B(1:N,1:NRHS) */
+
+ if (*rank < *n) {
+ i__1 = *rank;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *n - *rank + 1;
+ r_cnjg(&q__1, &work[mn + i__]);
+ clatzm_("Left", &i__2, nrhs, &a[i__ + (*rank + 1) * a_dim1], lda,
+ &q__1, &b[i__ + b_dim1], &b[*rank + 1 + b_dim1], ldb, &
+ work[(mn << 1) + 1]);
+/* L50: */
+ }
+ }
+
+/* workspace NRHS */
+
+/* B(1:N,1:NRHS) := P * B(1:N,1:NRHS) */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = (mn << 1) + i__;
+ work[i__3].r = 1.f, work[i__3].i = 0.f;
+/* L60: */
+ }
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = (mn << 1) + i__;
+ if (work[i__3].r == 1.f && work[i__3].i == 0.f) {
+ if (jpvt[i__] != i__) {
+ k = i__;
+ i__3 = k + j * b_dim1;
+ t1.r = b[i__3].r, t1.i = b[i__3].i;
+ i__3 = jpvt[k] + j * b_dim1;
+ t2.r = b[i__3].r, t2.i = b[i__3].i;
+L70:
+ i__3 = jpvt[k] + j * b_dim1;
+ b[i__3].r = t1.r, b[i__3].i = t1.i;
+ i__3 = (mn << 1) + k;
+ work[i__3].r = 0.f, work[i__3].i = 0.f;
+ t1.r = t2.r, t1.i = t2.i;
+ k = jpvt[k];
+ i__3 = jpvt[k] + j * b_dim1;
+ t2.r = b[i__3].r, t2.i = b[i__3].i;
+ if (jpvt[k] != i__) {
+ goto L70;
+ }
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = t1.r, b[i__3].i = t1.i;
+ i__3 = (mn << 1) + k;
+ work[i__3].r = 0.f, work[i__3].i = 0.f;
+ }
+ }
+/* L80: */
+ }
+/* L90: */
+ }
+
+/* Undo scaling */
+
+ if (iascl == 1) {
+ clascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb,
+ info);
+ clascl_("U", &c__0, &c__0, &smlnum, &anrm, rank, rank, &a[a_offset],
+ lda, info);
+ } else if (iascl == 2) {
+ clascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb,
+ info);
+ clascl_("U", &c__0, &c__0, &bignum, &anrm, rank, rank, &a[a_offset],
+ lda, info);
+ }
+ if (ibscl == 1) {
+ clascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ } else if (ibscl == 2) {
+ clascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ }
+
+L100:
+
+ return 0;
+
+/* End of CGELSX */
+
+} /* cgelsx_ */
diff --git a/contrib/libs/clapack/cgelsy.c b/contrib/libs/clapack/cgelsy.c
new file mode 100644
index 0000000000..836bfd08de
--- /dev/null
+++ b/contrib/libs/clapack/cgelsy.c
@@ -0,0 +1,512 @@
+/* cgelsy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__0 = 0;
+static integer c__2 = 2;
+
+/* Subroutine */ int cgelsy_(integer *m, integer *n, integer *nrhs, complex *
+ a, integer *lda, complex *b, integer *ldb, integer *jpvt, real *rcond,
+ integer *rank, complex *work, integer *lwork, real *rwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+ real r__1, r__2;
+ complex q__1;
+
+ /* Builtin functions */
+ double c_abs(complex *);
+
+ /* Local variables */
+ integer i__, j;
+ complex c1, c2, s1, s2;
+ integer nb, mn, nb1, nb2, nb3, nb4;
+ real anrm, bnrm, smin, smax;
+ integer iascl, ibscl;
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *);
+ integer ismin, ismax;
+ extern /* Subroutine */ int ctrsm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *), claic1_(integer *,
+ integer *, complex *, real *, complex *, complex *, real *,
+ complex *, complex *);
+ real wsize;
+ extern /* Subroutine */ int cgeqp3_(integer *, integer *, complex *,
+ integer *, integer *, complex *, complex *, integer *, real *,
+ integer *), slabad_(real *, real *);
+ extern doublereal clange_(char *, integer *, integer *, complex *,
+ integer *, real *);
+ extern /* Subroutine */ int clascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, complex *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int claset_(char *, integer *, integer *, complex
+ *, complex *, complex *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ real bignum;
+ extern /* Subroutine */ int cunmqr_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *);
+ real sminpr, smaxpr, smlnum;
+ extern /* Subroutine */ int cunmrz_(char *, char *, integer *, integer *,
+ integer *, integer *, complex *, integer *, complex *, complex *,
+ integer *, complex *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int ctzrzf_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGELSY computes the minimum-norm solution to a complex linear least */
+/* squares problem: */
+/* minimize || A * X - B || */
+/* using a complete orthogonal factorization of A. A is an M-by-N */
+/* matrix which may be rank-deficient. */
+
+/* Several right hand side vectors b and solution vectors x can be */
+/* handled in a single call; they are stored as the columns of the */
+/* M-by-NRHS right hand side matrix B and the N-by-NRHS solution */
+/* matrix X. */
+
+/* The routine first computes a QR factorization with column pivoting: */
+/* A * P = Q * [ R11 R12 ] */
+/* [ 0 R22 ] */
+/* with R11 defined as the largest leading submatrix whose estimated */
+/* condition number is less than 1/RCOND. The order of R11, RANK, */
+/* is the effective rank of A. */
+
+/* Then, R22 is considered to be negligible, and R12 is annihilated */
+/* by unitary transformations from the right, arriving at the */
+/* complete orthogonal factorization: */
+/* A * P = Q * [ T11 0 ] * Z */
+/* [ 0 0 ] */
+/* The minimum-norm solution is then */
+/* X = P * Z' [ inv(T11)*Q1'*B ] */
+/* [ 0 ] */
+/* where Q1 consists of the first RANK columns of Q. */
+
+/* This routine is basically identical to the original xGELSX except */
+/* three differences: */
+/* o The permutation of matrix B (the right hand side) is faster and */
+/* more simple. */
+/* o The call to the subroutine xGEQPF has been substituted by the */
+/* the call to the subroutine xGEQP3. This subroutine is a Blas-3 */
+/* version of the QR factorization with column pivoting. */
+/* o Matrix B (the right hand side) is updated with Blas-3. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of */
+/* columns of matrices B and X. NRHS >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, A has been overwritten by details of its */
+/* complete orthogonal factorization. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the M-by-NRHS right hand side matrix B. */
+/* On exit, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,M,N). */
+
+/* JPVT (input/output) INTEGER array, dimension (N) */
+/* On entry, if JPVT(i) .ne. 0, the i-th column of A is permuted */
+/* to the front of AP, otherwise column i is a free column. */
+/* On exit, if JPVT(i) = k, then the i-th column of A*P */
+/* was the k-th column of A. */
+
+/* RCOND (input) REAL */
+/* RCOND is used to determine the effective rank of A, which */
+/* is defined as the order of the largest leading triangular */
+/* submatrix R11 in the QR factorization with pivoting of A, */
+/* whose estimated condition number < 1/RCOND. */
+
+/* RANK (output) INTEGER */
+/* The effective rank of A, i.e., the order of the submatrix */
+/* R11. This is the same as the order of the submatrix T11 */
+/* in the complete orthogonal factorization of A. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* The unblocked strategy requires that: */
+/* LWORK >= MN + MAX( 2*MN, N+1, MN+NRHS ) */
+/* where MN = min(M,N). */
+/* The block algorithm requires that: */
+/* LWORK >= MN + MAX( 2*MN, NB*(N+1), MN+MN*NB, MN+NB*NRHS ) */
+/* where NB is an upper bound on the blocksize returned */
+/* by ILAENV for the routines CGEQP3, CTZRZF, CTZRQF, CUNMQR, */
+/* and CUNMRZ. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) REAL array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+/* E. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */
+/* G. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --jpvt;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ mn = min(*m,*n);
+ ismin = mn + 1;
+ ismax = (mn << 1) + 1;
+
+/* Test the input arguments. */
+
+ *info = 0;
+ nb1 = ilaenv_(&c__1, "CGEQRF", " ", m, n, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "CGERQF", " ", m, n, &c_n1, &c_n1);
+ nb3 = ilaenv_(&c__1, "CUNMQR", " ", m, n, nrhs, &c_n1);
+ nb4 = ilaenv_(&c__1, "CUNMRQ", " ", m, n, nrhs, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2), i__1 = max(i__1,nb3);
+ nb = max(i__1,nb4);
+/* Computing MAX */
+ i__1 = 1, i__2 = mn + (*n << 1) + nb * (*n + 1), i__1 = max(i__1,i__2),
+ i__2 = (mn << 1) + nb * *nrhs;
+ lwkopt = max(i__1,i__2);
+ q__1.r = (real) lwkopt, q__1.i = 0.f;
+ work[1].r = q__1.r, work[1].i = q__1.i;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*m);
+ if (*ldb < max(i__1,*n)) {
+ *info = -7;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = mn << 1, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = mn +
+ *nrhs;
+ if (*lwork < mn + max(i__1,i__2) && ! lquery) {
+ *info = -12;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGELSY", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+/* Computing MIN */
+ i__1 = min(*m,*n);
+ if (min(i__1,*nrhs) == 0) {
+ *rank = 0;
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ smlnum = slamch_("S") / slamch_("P");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+
+/* Scale A, B if max entries outside range [SMLNUM,BIGNUM] */
+
+ anrm = clange_("M", m, n, &a[a_offset], lda, &rwork[1]);
+ iascl = 0;
+ if (anrm > 0.f && anrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ clascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ clascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.f) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ claset_("F", &i__1, nrhs, &c_b1, &c_b1, &b[b_offset], ldb);
+ *rank = 0;
+ goto L70;
+ }
+
+ bnrm = clange_("M", m, nrhs, &b[b_offset], ldb, &rwork[1]);
+ ibscl = 0;
+ if (bnrm > 0.f && bnrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ clascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ clascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 2;
+ }
+
+/* Compute QR factorization with column pivoting of A: */
+/* A * P = Q * R */
+
+ i__1 = *lwork - mn;
+ cgeqp3_(m, n, &a[a_offset], lda, &jpvt[1], &work[1], &work[mn + 1], &i__1,
+ &rwork[1], info);
+ i__1 = mn + 1;
+ wsize = mn + work[i__1].r;
+
+/* complex workspace: MN+NB*(N+1). real workspace 2*N. */
+/* Details of Householder rotations stored in WORK(1:MN). */
+
+/* Determine RANK using incremental condition estimation */
+
+ i__1 = ismin;
+ work[i__1].r = 1.f, work[i__1].i = 0.f;
+ i__1 = ismax;
+ work[i__1].r = 1.f, work[i__1].i = 0.f;
+ smax = c_abs(&a[a_dim1 + 1]);
+ smin = smax;
+ if (c_abs(&a[a_dim1 + 1]) == 0.f) {
+ *rank = 0;
+ i__1 = max(*m,*n);
+ claset_("F", &i__1, nrhs, &c_b1, &c_b1, &b[b_offset], ldb);
+ goto L70;
+ } else {
+ *rank = 1;
+ }
+
+L10:
+ if (*rank < mn) {
+ i__ = *rank + 1;
+ claic1_(&c__2, rank, &work[ismin], &smin, &a[i__ * a_dim1 + 1], &a[
+ i__ + i__ * a_dim1], &sminpr, &s1, &c1);
+ claic1_(&c__1, rank, &work[ismax], &smax, &a[i__ * a_dim1 + 1], &a[
+ i__ + i__ * a_dim1], &smaxpr, &s2, &c2);
+
+ if (smaxpr * *rcond <= sminpr) {
+ i__1 = *rank;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ismin + i__ - 1;
+ i__3 = ismin + i__ - 1;
+ q__1.r = s1.r * work[i__3].r - s1.i * work[i__3].i, q__1.i =
+ s1.r * work[i__3].i + s1.i * work[i__3].r;
+ work[i__2].r = q__1.r, work[i__2].i = q__1.i;
+ i__2 = ismax + i__ - 1;
+ i__3 = ismax + i__ - 1;
+ q__1.r = s2.r * work[i__3].r - s2.i * work[i__3].i, q__1.i =
+ s2.r * work[i__3].i + s2.i * work[i__3].r;
+ work[i__2].r = q__1.r, work[i__2].i = q__1.i;
+/* L20: */
+ }
+ i__1 = ismin + *rank;
+ work[i__1].r = c1.r, work[i__1].i = c1.i;
+ i__1 = ismax + *rank;
+ work[i__1].r = c2.r, work[i__1].i = c2.i;
+ smin = sminpr;
+ smax = smaxpr;
+ ++(*rank);
+ goto L10;
+ }
+ }
+
+/* complex workspace: 3*MN. */
+
+/* Logically partition R = [ R11 R12 ] */
+/* [ 0 R22 ] */
+/* where R11 = R(1:RANK,1:RANK) */
+
+/* [R11,R12] = [ T11, 0 ] * Y */
+
+ if (*rank < *n) {
+ i__1 = *lwork - (mn << 1);
+ ctzrzf_(rank, n, &a[a_offset], lda, &work[mn + 1], &work[(mn << 1) +
+ 1], &i__1, info);
+ }
+
+/* complex workspace: 2*MN. */
+/* Details of Householder rotations stored in WORK(MN+1:2*MN) */
+
+/* B(1:M,1:NRHS) := Q' * B(1:M,1:NRHS) */
+
+ i__1 = *lwork - (mn << 1);
+ cunmqr_("Left", "Conjugate transpose", m, nrhs, &mn, &a[a_offset], lda, &
+ work[1], &b[b_offset], ldb, &work[(mn << 1) + 1], &i__1, info);
+/* Computing MAX */
+ i__1 = (mn << 1) + 1;
+ r__1 = wsize, r__2 = (mn << 1) + work[i__1].r;
+ wsize = dmax(r__1,r__2);
+
+/* complex workspace: 2*MN+NB*NRHS. */
+
+/* B(1:RANK,1:NRHS) := inv(T11) * B(1:RANK,1:NRHS) */
+
+ ctrsm_("Left", "Upper", "No transpose", "Non-unit", rank, nrhs, &c_b2, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = *rank + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0.f, b[i__3].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* B(1:N,1:NRHS) := Y' * B(1:N,1:NRHS) */
+
+ if (*rank < *n) {
+ i__1 = *n - *rank;
+ i__2 = *lwork - (mn << 1);
+ cunmrz_("Left", "Conjugate transpose", n, nrhs, rank, &i__1, &a[
+ a_offset], lda, &work[mn + 1], &b[b_offset], ldb, &work[(mn <<
+ 1) + 1], &i__2, info);
+ }
+
+/* complex workspace: 2*MN+NRHS. */
+
+/* B(1:N,1:NRHS) := P * B(1:N,1:NRHS) */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = jpvt[i__];
+ i__4 = i__ + j * b_dim1;
+ work[i__3].r = b[i__4].r, work[i__3].i = b[i__4].i;
+/* L50: */
+ }
+ ccopy_(n, &work[1], &c__1, &b[j * b_dim1 + 1], &c__1);
+/* L60: */
+ }
+
+/* complex workspace: N. */
+
+/* Undo scaling */
+
+ if (iascl == 1) {
+ clascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb,
+ info);
+ clascl_("U", &c__0, &c__0, &smlnum, &anrm, rank, rank, &a[a_offset],
+ lda, info);
+ } else if (iascl == 2) {
+ clascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb,
+ info);
+ clascl_("U", &c__0, &c__0, &bignum, &anrm, rank, rank, &a[a_offset],
+ lda, info);
+ }
+ if (ibscl == 1) {
+ clascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ } else if (ibscl == 2) {
+ clascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ }
+
+L70:
+ q__1.r = (real) lwkopt, q__1.i = 0.f;
+ work[1].r = q__1.r, work[1].i = q__1.i;
+
+ return 0;
+
+/* End of CGELSY */
+
+} /* cgelsy_ */
diff --git a/contrib/libs/clapack/cgeql2.c b/contrib/libs/clapack/cgeql2.c
new file mode 100644
index 0000000000..e26141146f
--- /dev/null
+++ b/contrib/libs/clapack/cgeql2.c
@@ -0,0 +1,167 @@
+/* cgeql2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cgeql2_(integer *m, integer *n, complex *a, integer *lda,
+ complex *tau, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, k;
+ complex alpha;
+ extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
+, integer *, complex *, complex *, integer *, complex *),
+ clarfp_(integer *, complex *, complex *, integer *, complex *),
+ xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGEQL2 computes a QL factorization of a complex m by n matrix A: */
+/* A = Q * L. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the m by n matrix A. */
+/* On exit, if m >= n, the lower triangle of the subarray */
+/* A(m-n+1:m,1:n) contains the n by n lower triangular matrix L; */
+/* if m <= n, the elements on and below the (n-m)-th */
+/* superdiagonal contain the m by n lower trapezoidal matrix L; */
+/* the remaining elements, with the array TAU, represent the */
+/* unitary matrix Q as a product of elementary reflectors */
+/* (see Further Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) COMPLEX array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace) COMPLEX array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(k) . . . H(2) H(1), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(m-k+i+1:m) = 0 and v(m-k+i) = 1; v(1:m-k+i-1) is stored on exit in */
+/* A(1:m-k+i-1,n-k+i), and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEQL2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ for (i__ = k; i__ >= 1; --i__) {
+
+/* Generate elementary reflector H(i) to annihilate */
+/* A(1:m-k+i-1,n-k+i) */
+
+ i__1 = *m - k + i__ + (*n - k + i__) * a_dim1;
+ alpha.r = a[i__1].r, alpha.i = a[i__1].i;
+ i__1 = *m - k + i__;
+ clarfp_(&i__1, &alpha, &a[(*n - k + i__) * a_dim1 + 1], &c__1, &tau[
+ i__]);
+
+/* Apply H(i)' to A(1:m-k+i,1:n-k+i-1) from the left */
+
+ i__1 = *m - k + i__ + (*n - k + i__) * a_dim1;
+ a[i__1].r = 1.f, a[i__1].i = 0.f;
+ i__1 = *m - k + i__;
+ i__2 = *n - k + i__ - 1;
+ r_cnjg(&q__1, &tau[i__]);
+ clarf_("Left", &i__1, &i__2, &a[(*n - k + i__) * a_dim1 + 1], &c__1, &
+ q__1, &a[a_offset], lda, &work[1]);
+ i__1 = *m - k + i__ + (*n - k + i__) * a_dim1;
+ a[i__1].r = alpha.r, a[i__1].i = alpha.i;
+/* L10: */
+ }
+ return 0;
+
+/* End of CGEQL2 */
+
+} /* cgeql2_ */
diff --git a/contrib/libs/clapack/cgeqlf.c b/contrib/libs/clapack/cgeqlf.c
new file mode 100644
index 0000000000..9437d05243
--- /dev/null
+++ b/contrib/libs/clapack/cgeqlf.c
@@ -0,0 +1,271 @@
+/* cgeqlf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int cgeqlf_(integer *m, integer *n, complex *a, integer *lda,
+ complex *tau, complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, k, ib, nb, ki, kk, mu, nu, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int cgeql2_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *), clarfb_(char *, char
+ *, char *, char *, integer *, integer *, integer *, complex *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *), clarft_(char *, char *
+, integer *, integer *, complex *, integer *, complex *, complex *
+, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGEQLF computes a QL factorization of a complex M-by-N matrix A: */
+/* A = Q * L. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, */
+/* if m >= n, the lower triangle of the subarray */
+/* A(m-n+1:m,1:n) contains the N-by-N lower triangular matrix L; */
+/* if m <= n, the elements on and below the (n-m)-th */
+/* superdiagonal contain the M-by-N lower trapezoidal matrix L; */
+/* the remaining elements, with the array TAU, represent the */
+/* unitary matrix Q as a product of elementary reflectors */
+/* (see Further Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) COMPLEX array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+/* For optimum performance LWORK >= N*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(k) . . . H(2) H(1), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(m-k+i+1:m) = 0 and v(m-k+i) = 1; v(1:m-k+i-1) is stored on exit in */
+/* A(1:m-k+i-1,n-k+i), and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+
+ if (*info == 0) {
+ k = min(*m,*n);
+ if (k == 0) {
+ lwkopt = 1;
+ } else {
+ nb = ilaenv_(&c__1, "CGEQLF", " ", m, n, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ if (*lwork < max(1,*n) && ! lquery) {
+ *info = -7;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEQLF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (k == 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 1;
+ iws = *n;
+ if (nb > 1 && nb < k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "CGEQLF", " ", m, n, &c_n1, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CGEQLF", " ", m, n, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < k && nx < k) {
+
+/* Use blocked code initially. */
+/* The last kk columns are handled by the block method. */
+
+ ki = (k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+ i__1 = k - kk + 1;
+ i__2 = -nb;
+ for (i__ = k - kk + ki + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__
+ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/* Compute the QL factorization of the current block */
+/* A(1:m-k+i+ib-1,n-k+i:n-k+i+ib-1) */
+
+ i__3 = *m - k + i__ + ib - 1;
+ cgeql2_(&i__3, &ib, &a[(*n - k + i__) * a_dim1 + 1], lda, &tau[
+ i__], &work[1], &iinfo);
+ if (*n - k + i__ > 1) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__3 = *m - k + i__ + ib - 1;
+ clarft_("Backward", "Columnwise", &i__3, &ib, &a[(*n - k +
+ i__) * a_dim1 + 1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(1:m-k+i+ib-1,1:n-k+i-1) from the left */
+
+ i__3 = *m - k + i__ + ib - 1;
+ i__4 = *n - k + i__ - 1;
+ clarfb_("Left", "Conjugate transpose", "Backward", "Columnwi"
+ "se", &i__3, &i__4, &ib, &a[(*n - k + i__) * a_dim1 +
+ 1], lda, &work[1], &ldwork, &a[a_offset], lda, &work[
+ ib + 1], &ldwork);
+ }
+/* L10: */
+ }
+ mu = *m - k + i__ + nb - 1;
+ nu = *n - k + i__ + nb - 1;
+ } else {
+ mu = *m;
+ nu = *n;
+ }
+
+/* Use unblocked code to factor the last or only block */
+
+ if (mu > 0 && nu > 0) {
+ cgeql2_(&mu, &nu, &a[a_offset], lda, &tau[1], &work[1], &iinfo);
+ }
+
+ work[1].r = (real) iws, work[1].i = 0.f;
+ return 0;
+
+/* End of CGEQLF */
+
+} /* cgeqlf_ */
diff --git a/contrib/libs/clapack/cgeqp3.c b/contrib/libs/clapack/cgeqp3.c
new file mode 100644
index 0000000000..f07fa2ded3
--- /dev/null
+++ b/contrib/libs/clapack/cgeqp3.c
@@ -0,0 +1,361 @@
+/* cgeqp3.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int cgeqp3_(integer *m, integer *n, complex *a, integer *lda,
+ integer *jpvt, complex *tau, complex *work, integer *lwork, real *
+ rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer j, jb, na, nb, sm, sn, nx, fjb, iws, nfxd, nbmin;
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ integer minmn, minws;
+ extern /* Subroutine */ int claqp2_(integer *, integer *, integer *,
+ complex *, integer *, integer *, complex *, real *, real *,
+ complex *);
+ extern doublereal scnrm2_(integer *, complex *, integer *);
+ extern /* Subroutine */ int cgeqrf_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, integer *), xerbla_(
+ char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int claqps_(integer *, integer *, integer *,
+ integer *, integer *, complex *, integer *, integer *, complex *,
+ real *, real *, complex *, complex *, integer *);
+ integer topbmn, sminmn;
+ extern /* Subroutine */ int cunmqr_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGEQP3 computes a QR factorization with column pivoting of a */
+/* matrix A: A*P = Q*R using Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the upper triangle of the array contains the */
+/* min(M,N)-by-N upper trapezoidal matrix R; the elements below */
+/* the diagonal, together with the array TAU, represent the */
+/* unitary matrix Q as a product of min(M,N) elementary */
+/* reflectors. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* JPVT (input/output) INTEGER array, dimension (N) */
+/* On entry, if JPVT(J).ne.0, the J-th column of A is permuted */
+/* to the front of A*P (a leading column); if JPVT(J)=0, */
+/* the J-th column of A is a free column. */
+/* On exit, if JPVT(J)=K, then the J-th column of A*P was the */
+/* the K-th column of A. */
+
+/* TAU (output) COMPLEX array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO=0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= N+1. */
+/* For optimal performance LWORK >= ( N+1 )*NB, where NB */
+/* is the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) REAL array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real/complex scalar, and v is a real/complex vector */
+/* with v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in */
+/* A(i+1:m,i), and tau in TAU(i). */
+
+/* Based on contributions by */
+/* G. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */
+/* X. Sun, Computer Science Dept., Duke University, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test input arguments */
+/* ==================== */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --jpvt;
+ --tau;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+
+ if (*info == 0) {
+ minmn = min(*m,*n);
+ if (minmn == 0) {
+ iws = 1;
+ lwkopt = 1;
+ } else {
+ iws = *n + 1;
+ nb = ilaenv_(&c__1, "CGEQRF", " ", m, n, &c_n1, &c_n1);
+ lwkopt = (*n + 1) * nb;
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ if (*lwork < iws && ! lquery) {
+ *info = -8;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEQP3", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (minmn == 0) {
+ return 0;
+ }
+
+/* Move initial columns up front. */
+
+ nfxd = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (jpvt[j] != 0) {
+ if (j != nfxd) {
+ cswap_(m, &a[j * a_dim1 + 1], &c__1, &a[nfxd * a_dim1 + 1], &
+ c__1);
+ jpvt[j] = jpvt[nfxd];
+ jpvt[nfxd] = j;
+ } else {
+ jpvt[j] = j;
+ }
+ ++nfxd;
+ } else {
+ jpvt[j] = j;
+ }
+/* L10: */
+ }
+ --nfxd;
+
+/* Factorize fixed columns */
+/* ======================= */
+
+/* Compute the QR factorization of fixed columns and update */
+/* remaining columns. */
+
+ if (nfxd > 0) {
+ na = min(*m,nfxd);
+/* CC CALL CGEQR2( M, NA, A, LDA, TAU, WORK, INFO ) */
+ cgeqrf_(m, &na, &a[a_offset], lda, &tau[1], &work[1], lwork, info);
+/* Computing MAX */
+ i__1 = iws, i__2 = (integer) work[1].r;
+ iws = max(i__1,i__2);
+ if (na < *n) {
+/* CC CALL CUNM2R( 'Left', 'Conjugate Transpose', M, N-NA, */
+/* CC $ NA, A, LDA, TAU, A( 1, NA+1 ), LDA, WORK, */
+/* CC $ INFO ) */
+ i__1 = *n - na;
+ cunmqr_("Left", "Conjugate Transpose", m, &i__1, &na, &a[a_offset]
+, lda, &tau[1], &a[(na + 1) * a_dim1 + 1], lda, &work[1],
+ lwork, info);
+/* Computing MAX */
+ i__1 = iws, i__2 = (integer) work[1].r;
+ iws = max(i__1,i__2);
+ }
+ }
+
+/* Factorize free columns */
+/* ====================== */
+
+ if (nfxd < minmn) {
+
+ sm = *m - nfxd;
+ sn = *n - nfxd;
+ sminmn = minmn - nfxd;
+
+/* Determine the block size. */
+
+ nb = ilaenv_(&c__1, "CGEQRF", " ", &sm, &sn, &c_n1, &c_n1);
+ nbmin = 2;
+ nx = 0;
+
+ if (nb > 1 && nb < sminmn) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "CGEQRF", " ", &sm, &sn, &c_n1, &
+ c_n1);
+ nx = max(i__1,i__2);
+
+
+ if (nx < sminmn) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ minws = (sn + 1) * nb;
+ iws = max(iws,minws);
+ if (*lwork < minws) {
+
+/* Not enough workspace to use optimal NB: Reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / (sn + 1);
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CGEQRF", " ", &sm, &sn, &
+ c_n1, &c_n1);
+ nbmin = max(i__1,i__2);
+
+
+ }
+ }
+ }
+
+/* Initialize partial column norms. The first N elements of work */
+/* store the exact column norms. */
+
+ i__1 = *n;
+ for (j = nfxd + 1; j <= i__1; ++j) {
+ rwork[j] = scnrm2_(&sm, &a[nfxd + 1 + j * a_dim1], &c__1);
+ rwork[*n + j] = rwork[j];
+/* L20: */
+ }
+
+ if (nb >= nbmin && nb < sminmn && nx < sminmn) {
+
+/* Use blocked code initially. */
+
+ j = nfxd + 1;
+
+/* Compute factorization: while loop. */
+
+
+ topbmn = minmn - nx;
+L30:
+ if (j <= topbmn) {
+/* Computing MIN */
+ i__1 = nb, i__2 = topbmn - j + 1;
+ jb = min(i__1,i__2);
+
+/* Factorize JB columns among columns J:N. */
+
+ i__1 = *n - j + 1;
+ i__2 = j - 1;
+ i__3 = *n - j + 1;
+ claqps_(m, &i__1, &i__2, &jb, &fjb, &a[j * a_dim1 + 1], lda, &
+ jpvt[j], &tau[j], &rwork[j], &rwork[*n + j], &work[1],
+ &work[jb + 1], &i__3);
+
+ j += fjb;
+ goto L30;
+ }
+ } else {
+ j = nfxd + 1;
+ }
+
+/* Use unblocked code to factor the last or only block. */
+
+
+ if (j <= minmn) {
+ i__1 = *n - j + 1;
+ i__2 = j - 1;
+ claqp2_(m, &i__1, &i__2, &a[j * a_dim1 + 1], lda, &jpvt[j], &tau[
+ j], &rwork[j], &rwork[*n + j], &work[1]);
+ }
+
+ }
+
+ work[1].r = (real) iws, work[1].i = 0.f;
+ return 0;
+
+/* End of CGEQP3 */
+
+} /* cgeqp3_ */
diff --git a/contrib/libs/clapack/cgeqpf.c b/contrib/libs/clapack/cgeqpf.c
new file mode 100644
index 0000000000..b341824174
--- /dev/null
+++ b/contrib/libs/clapack/cgeqpf.c
@@ -0,0 +1,315 @@
+/* cgeqpf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cgeqpf_(integer *m, integer *n, complex *a, integer *lda,
+ integer *jpvt, complex *tau, complex *work, real *rwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1, r__2;
+ complex q__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+ void r_cnjg(complex *, complex *);
+ double c_abs(complex *);
+
+ /* Local variables */
+ integer i__, j, ma, mn;
+ complex aii;
+ integer pvt;
+ real temp, temp2, tol3z;
+ extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
+, integer *, complex *, complex *, integer *, complex *),
+ cswap_(integer *, complex *, integer *, complex *, integer *);
+ integer itemp;
+ extern /* Subroutine */ int cgeqr2_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *);
+ extern doublereal scnrm2_(integer *, complex *, integer *);
+ extern /* Subroutine */ int cunm2r_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *), clarfp_(integer *, complex
+ *, complex *, integer *, complex *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+
+
+/* -- LAPACK deprecated driver routine (version 3.2) -- */
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is deprecated and has been replaced by routine CGEQP3. */
+
+/* CGEQPF computes a QR factorization with column pivoting of a */
+/* complex M-by-N matrix A: A*P = Q*R. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0 */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the upper triangle of the array contains the */
+/* min(M,N)-by-N upper triangular matrix R; the elements */
+/* below the diagonal, together with the array TAU, */
+/* represent the unitary matrix Q as a product of */
+/* min(m,n) elementary reflectors. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* JPVT (input/output) INTEGER array, dimension (N) */
+/* On entry, if JPVT(i) .ne. 0, the i-th column of A is permuted */
+/* to the front of A*P (a leading column); if JPVT(i) = 0, */
+/* the i-th column of A is a free column. */
+/* On exit, if JPVT(i) = k, then the i-th column of A*P */
+/* was the k-th column of A. */
+
+/* TAU (output) COMPLEX array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors. */
+
+/* WORK (workspace) COMPLEX array, dimension (N) */
+
+/* RWORK (workspace) REAL array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(n) */
+
+/* Each H(i) has the form */
+
+/* H = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i). */
+
+/* The matrix P is represented in jpvt as follows: If */
+/* jpvt(j) = i */
+/* then the jth column of P is the ith canonical unit vector. */
+
+/* Partial column norm updating strategy modified by */
+/* Z. Drmac and Z. Bujanovic, Dept. of Mathematics, */
+/* University of Zagreb, Croatia. */
+/* June 2006. */
+/* For more details see LAPACK Working Note 176. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --jpvt;
+ --tau;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEQPF", &i__1);
+ return 0;
+ }
+
+ mn = min(*m,*n);
+ tol3z = sqrt(slamch_("Epsilon"));
+
+/* Move initial columns up front */
+
+ itemp = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (jpvt[i__] != 0) {
+ if (i__ != itemp) {
+ cswap_(m, &a[i__ * a_dim1 + 1], &c__1, &a[itemp * a_dim1 + 1],
+ &c__1);
+ jpvt[i__] = jpvt[itemp];
+ jpvt[itemp] = i__;
+ } else {
+ jpvt[i__] = i__;
+ }
+ ++itemp;
+ } else {
+ jpvt[i__] = i__;
+ }
+/* L10: */
+ }
+ --itemp;
+
+/* Compute the QR factorization and update remaining columns */
+
+ if (itemp > 0) {
+ ma = min(itemp,*m);
+ cgeqr2_(m, &ma, &a[a_offset], lda, &tau[1], &work[1], info);
+ if (ma < *n) {
+ i__1 = *n - ma;
+ cunm2r_("Left", "Conjugate transpose", m, &i__1, &ma, &a[a_offset]
+, lda, &tau[1], &a[(ma + 1) * a_dim1 + 1], lda, &work[1],
+ info);
+ }
+ }
+
+ if (itemp < mn) {
+
+/* Initialize partial column norms. The first n elements of */
+/* work store the exact column norms. */
+
+ i__1 = *n;
+ for (i__ = itemp + 1; i__ <= i__1; ++i__) {
+ i__2 = *m - itemp;
+ rwork[i__] = scnrm2_(&i__2, &a[itemp + 1 + i__ * a_dim1], &c__1);
+ rwork[*n + i__] = rwork[i__];
+/* L20: */
+ }
+
+/* Compute factorization */
+
+ i__1 = mn;
+ for (i__ = itemp + 1; i__ <= i__1; ++i__) {
+
+/* Determine ith pivot column and swap if necessary */
+
+ i__2 = *n - i__ + 1;
+ pvt = i__ - 1 + isamax_(&i__2, &rwork[i__], &c__1);
+
+ if (pvt != i__) {
+ cswap_(m, &a[pvt * a_dim1 + 1], &c__1, &a[i__ * a_dim1 + 1], &
+ c__1);
+ itemp = jpvt[pvt];
+ jpvt[pvt] = jpvt[i__];
+ jpvt[i__] = itemp;
+ rwork[pvt] = rwork[i__];
+ rwork[*n + pvt] = rwork[*n + i__];
+ }
+
+/* Generate elementary reflector H(i) */
+
+ i__2 = i__ + i__ * a_dim1;
+ aii.r = a[i__2].r, aii.i = a[i__2].i;
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ clarfp_(&i__2, &aii, &a[min(i__3, *m)+ i__ * a_dim1], &c__1, &tau[
+ i__]);
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = aii.r, a[i__2].i = aii.i;
+
+ if (i__ < *n) {
+
+/* Apply H(i) to A(i:m,i+1:n) from the left */
+
+ i__2 = i__ + i__ * a_dim1;
+ aii.r = a[i__2].r, aii.i = a[i__2].i;
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ r_cnjg(&q__1, &tau[i__]);
+ clarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &
+ q__1, &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = aii.r, a[i__2].i = aii.i;
+ }
+
+/* Update partial column norms */
+
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ if (rwork[j] != 0.f) {
+
+/* NOTE: The following 4 lines follow from the analysis in */
+/* Lapack Working Note 176. */
+
+ temp = c_abs(&a[i__ + j * a_dim1]) / rwork[j];
+/* Computing MAX */
+ r__1 = 0.f, r__2 = (temp + 1.f) * (1.f - temp);
+ temp = dmax(r__1,r__2);
+/* Computing 2nd power */
+ r__1 = rwork[j] / rwork[*n + j];
+ temp2 = temp * (r__1 * r__1);
+ if (temp2 <= tol3z) {
+ if (*m - i__ > 0) {
+ i__3 = *m - i__;
+ rwork[j] = scnrm2_(&i__3, &a[i__ + 1 + j * a_dim1]
+, &c__1);
+ rwork[*n + j] = rwork[j];
+ } else {
+ rwork[j] = 0.f;
+ rwork[*n + j] = 0.f;
+ }
+ } else {
+ rwork[j] *= sqrt(temp);
+ }
+ }
+/* L30: */
+ }
+
+/* L40: */
+ }
+ }
+ return 0;
+
+/* End of CGEQPF */
+
+} /* cgeqpf_ */
diff --git a/contrib/libs/clapack/cgeqr2.c b/contrib/libs/clapack/cgeqr2.c
new file mode 100644
index 0000000000..8c46a05238
--- /dev/null
+++ b/contrib/libs/clapack/cgeqr2.c
@@ -0,0 +1,169 @@
+/* cgeqr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cgeqr2_(integer *m, integer *n, complex *a, integer *lda,
+ complex *tau, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, k;
+ complex alpha;
+ extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
+, integer *, complex *, complex *, integer *, complex *),
+ clarfp_(integer *, complex *, complex *, integer *, complex *),
+ xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGEQR2 computes a QR factorization of a complex m by n matrix A: */
+/* A = Q * R. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the m by n matrix A. */
+/* On exit, the elements on and above the diagonal of the array */
+/* contain the min(m,n) by n upper trapezoidal matrix R (R is */
+/* upper triangular if m >= n); the elements below the diagonal, */
+/* with the array TAU, represent the unitary matrix Q as a */
+/* product of elementary reflectors (see Further Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) COMPLEX array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace) COMPLEX array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), */
+/* and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEQR2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ i__1 = k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
+
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ clarfp_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3, *m)+ i__ * a_dim1]
+, &c__1, &tau[i__]);
+ if (i__ < *n) {
+
+/* Apply H(i)' to A(i:m,i+1:n) from the left */
+
+ i__2 = i__ + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ r_cnjg(&q__1, &tau[i__]);
+ clarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &q__1,
+ &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = alpha.r, a[i__2].i = alpha.i;
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of CGEQR2 */
+
+} /* cgeqr2_ */
diff --git a/contrib/libs/clapack/cgeqrf.c b/contrib/libs/clapack/cgeqrf.c
new file mode 100644
index 0000000000..c473abe387
--- /dev/null
+++ b/contrib/libs/clapack/cgeqrf.c
@@ -0,0 +1,253 @@
+/* cgeqrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int cgeqrf_(integer *m, integer *n, complex *a, integer *lda,
+ complex *tau, complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, k, ib, nb, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int cgeqr2_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *), clarfb_(char *, char
+ *, char *, char *, integer *, integer *, integer *, complex *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *), clarft_(char *, char *
+, integer *, integer *, complex *, integer *, complex *, complex *
+, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGEQRF computes a QR factorization of a complex M-by-N matrix A: */
+/* A = Q * R. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the elements on and above the diagonal of the array */
+/* contain the min(M,N)-by-N upper trapezoidal matrix R (R is */
+/* upper triangular if m >= n); the elements below the diagonal, */
+/* with the array TAU, represent the unitary matrix Q as a */
+/* product of min(m,n) elementary reflectors (see Further */
+/* Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) COMPLEX array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+/* For optimum performance LWORK >= N*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), */
+/* and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "CGEQRF", " ", m, n, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEQRF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ k = min(*m,*n);
+ if (k == 0) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *n;
+ if (nb > 1 && nb < k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "CGEQRF", " ", m, n, &c_n1, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CGEQRF", " ", m, n, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < k && nx < k) {
+
+/* Use blocked code initially */
+
+ i__1 = k - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/* Compute the QR factorization of the current block */
+/* A(i:m,i:i+ib-1) */
+
+ i__3 = *m - i__ + 1;
+ cgeqr2_(&i__3, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[
+ 1], &iinfo);
+ if (i__ + ib <= *n) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i) H(i+1) . . . H(i+ib-1) */
+
+ i__3 = *m - i__ + 1;
+ clarft_("Forward", "Columnwise", &i__3, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(i:m,i+ib:n) from the left */
+
+ i__3 = *m - i__ + 1;
+ i__4 = *n - i__ - ib + 1;
+ clarfb_("Left", "Conjugate transpose", "Forward", "Columnwise"
+, &i__3, &i__4, &ib, &a[i__ + i__ * a_dim1], lda, &
+ work[1], &ldwork, &a[i__ + (i__ + ib) * a_dim1], lda,
+ &work[ib + 1], &ldwork);
+ }
+/* L10: */
+ }
+ } else {
+ i__ = 1;
+ }
+
+/* Use unblocked code to factor the last or only block. */
+
+ if (i__ <= k) {
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ cgeqr2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1]
+, &iinfo);
+ }
+
+ work[1].r = (real) iws, work[1].i = 0.f;
+ return 0;
+
+/* End of CGEQRF */
+
+} /* cgeqrf_ */
diff --git a/contrib/libs/clapack/cgerfs.c b/contrib/libs/clapack/cgerfs.c
new file mode 100644
index 0000000000..c9f854c548
--- /dev/null
+++ b/contrib/libs/clapack/cgerfs.c
@@ -0,0 +1,460 @@
+/* cgerfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int cgerfs_(char *trans, integer *n, integer *nrhs, complex *
+ a, integer *lda, complex *af, integer *ldaf, integer *ipiv, complex *
+ b, integer *ldb, complex *x, integer *ldx, real *ferr, real *berr,
+ complex *work, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ real s, xk;
+ integer nz;
+ real eps;
+ integer kase;
+ real safe1, safe2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+, complex *, integer *, complex *, integer *, complex *, complex *
+, integer *);
+ integer isave[3];
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), caxpy_(integer *, complex *, complex *,
+ integer *, complex *, integer *);
+ integer count;
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *), cgetrs_(
+ char *, integer *, integer *, complex *, integer *, integer *,
+ complex *, integer *, integer *);
+ logical notran;
+ char transn[1], transt[1];
+ real lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGERFS improves the computed solution to a system of linear */
+/* equations and provides error bounds and backward error estimates for */
+/* the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The original N-by-N matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input) COMPLEX array, dimension (LDAF,N) */
+/* The factors L and U from the factorization A = P*L*U */
+/* as computed by CGETRF. */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices from CGETRF; for 1<=i<=N, row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* B (input) COMPLEX array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) COMPLEX array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by CGETRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "T") && ! lsame_(
+ trans, "C")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldx < max(1,*n)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGERFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+ if (notran) {
+ *(unsigned char *)transn = 'N';
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transn = 'C';
+ *(unsigned char *)transt = 'N';
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.f;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - op(A) * X, */
+/* where op(A) = A, A**T, or A**H, depending on TRANS. */
+
+ ccopy_(n, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_(trans, n, n, &q__1, &a[a_offset], lda, &x[j * x_dim1 + 1], &
+ c__1, &c_b1, &work[1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[i__] = (r__1 = b[i__3].r, dabs(r__1)) + (r__2 = r_imag(&b[
+ i__ + j * b_dim1]), dabs(r__2));
+/* L30: */
+ }
+
+/* Compute abs(op(A))*abs(X) + abs(B). */
+
+ if (notran) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ xk = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[k + j
+ * x_dim1]), dabs(r__2));
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ rwork[i__] += ((r__1 = a[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + k * a_dim1]), dabs(r__2))) * xk;
+/* L40: */
+ }
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ i__5 = i__ + j * x_dim1;
+ s += ((r__1 = a[i__4].r, dabs(r__1)) + (r__2 = r_imag(&a[
+ i__ + k * a_dim1]), dabs(r__2))) * ((r__3 = x[
+ i__5].r, dabs(r__3)) + (r__4 = r_imag(&x[i__ + j *
+ x_dim1]), dabs(r__4)));
+/* L60: */
+ }
+ rwork[k] += s;
+/* L70: */
+ }
+ }
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__3 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2))) / rwork[i__];
+ s = dmax(r__3,r__4);
+ } else {
+/* Computing MAX */
+ i__3 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = dmax(r__3,r__4);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2.f <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ cgetrs_(trans, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &work[1],
+ n, info);
+ caxpy_(n, &c_b1, &work[1], &c__1, &x[j * x_dim1 + 1], &c__1);
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(op(A)))* */
+/* ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(op(A)) is the inverse of op(A) */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(op(A))*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use CLACN2 to estimate the infinity-norm of the matrix */
+/* inv(op(A)) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__3 = i__;
+ rwork[i__] = (r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__];
+ } else {
+ i__3 = i__;
+ rwork[i__] = (r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__] + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ clacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(op(A)**H). */
+
+ cgetrs_(transt, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &
+ work[1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ q__1.r = rwork[i__4] * work[i__5].r, q__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+/* L110: */
+ }
+ } else {
+
+/* Multiply by inv(op(A))*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ q__1.r = rwork[i__4] * work[i__5].r, q__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+/* L120: */
+ }
+ cgetrs_(transn, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &
+ work[1], n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * x_dim1;
+ r__3 = lstres, r__4 = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[i__ + j * x_dim1]), dabs(r__2));
+ lstres = dmax(r__3,r__4);
+/* L130: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of CGERFS */
+
+} /* cgerfs_ */
diff --git a/contrib/libs/clapack/cgerq2.c b/contrib/libs/clapack/cgerq2.c
new file mode 100644
index 0000000000..885c71660e
--- /dev/null
+++ b/contrib/libs/clapack/cgerq2.c
@@ -0,0 +1,162 @@
+/* cgerq2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cgerq2_(integer *m, integer *n, complex *a, integer *lda,
+ complex *tau, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, k;
+ complex alpha;
+ extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
+, integer *, complex *, complex *, integer *, complex *),
+ clacgv_(integer *, complex *, integer *), clarfp_(integer *,
+ complex *, complex *, integer *, complex *), xerbla_(char *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGERQ2 computes an RQ factorization of a complex m by n matrix A: */
+/* A = R * Q. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the m by n matrix A. */
+/* On exit, if m <= n, the upper triangle of the subarray */
+/* A(1:m,n-m+1:n) contains the m by m upper triangular matrix R; */
+/* if m >= n, the elements on and above the (m-n)-th subdiagonal */
+/* contain the m by n upper trapezoidal matrix R; the remaining */
+/* elements, with the array TAU, represent the unitary matrix */
+/* Q as a product of elementary reflectors (see Further */
+/* Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) COMPLEX array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace) COMPLEX array, dimension (M) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1)' H(2)' . . . H(k)', where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(n-k+i+1:n) = 0 and v(n-k+i) = 1; conjg(v(1:n-k+i-1)) is stored on */
+/* exit in A(m-k+i,1:n-k+i-1), and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGERQ2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ for (i__ = k; i__ >= 1; --i__) {
+
+/* Generate elementary reflector H(i) to annihilate */
+/* A(m-k+i,1:n-k+i-1) */
+
+ i__1 = *n - k + i__;
+ clacgv_(&i__1, &a[*m - k + i__ + a_dim1], lda);
+ i__1 = *m - k + i__ + (*n - k + i__) * a_dim1;
+ alpha.r = a[i__1].r, alpha.i = a[i__1].i;
+ i__1 = *n - k + i__;
+ clarfp_(&i__1, &alpha, &a[*m - k + i__ + a_dim1], lda, &tau[i__]);
+
+/* Apply H(i) to A(1:m-k+i-1,1:n-k+i) from the right */
+
+ i__1 = *m - k + i__ + (*n - k + i__) * a_dim1;
+ a[i__1].r = 1.f, a[i__1].i = 0.f;
+ i__1 = *m - k + i__ - 1;
+ i__2 = *n - k + i__;
+ clarf_("Right", &i__1, &i__2, &a[*m - k + i__ + a_dim1], lda, &tau[
+ i__], &a[a_offset], lda, &work[1]);
+ i__1 = *m - k + i__ + (*n - k + i__) * a_dim1;
+ a[i__1].r = alpha.r, a[i__1].i = alpha.i;
+ i__1 = *n - k + i__ - 1;
+ clacgv_(&i__1, &a[*m - k + i__ + a_dim1], lda);
+/* L10: */
+ }
+ return 0;
+
+/* End of CGERQ2 */
+
+} /* cgerq2_ */
diff --git a/contrib/libs/clapack/cgerqf.c b/contrib/libs/clapack/cgerqf.c
new file mode 100644
index 0000000000..204b487b5a
--- /dev/null
+++ b/contrib/libs/clapack/cgerqf.c
@@ -0,0 +1,270 @@
+/* cgerqf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int cgerqf_(integer *m, integer *n, complex *a, integer *lda,
+ complex *tau, complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, k, ib, nb, ki, kk, mu, nu, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int cgerq2_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *), clarfb_(char *, char
+ *, char *, char *, integer *, integer *, integer *, complex *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *), clarft_(char *, char *
+, integer *, integer *, complex *, integer *, complex *, complex *
+, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGERQF computes an RQ factorization of a complex M-by-N matrix A: */
+/* A = R * Q. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, */
+/* if m <= n, the upper triangle of the subarray */
+/* A(1:m,n-m+1:n) contains the M-by-M upper triangular matrix R; */
+/* if m >= n, the elements on and above the (m-n)-th subdiagonal */
+/* contain the M-by-N upper trapezoidal matrix R; */
+/* the remaining elements, with the array TAU, represent the */
+/* unitary matrix Q as a product of min(m,n) elementary */
+/* reflectors (see Further Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) COMPLEX array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,M). */
+/* For optimum performance LWORK >= M*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1)' H(2)' . . . H(k)', where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(n-k+i+1:n) = 0 and v(n-k+i) = 1; conjg(v(1:n-k+i-1)) is stored on */
+/* exit in A(m-k+i,1:n-k+i-1), and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+
+ if (*info == 0) {
+ k = min(*m,*n);
+ if (k == 0) {
+ lwkopt = 1;
+ } else {
+ nb = ilaenv_(&c__1, "CGERQF", " ", m, n, &c_n1, &c_n1);
+ lwkopt = *m * nb;
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ if (*lwork < max(1,*m) && ! lquery) {
+ *info = -7;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGERQF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (k == 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 1;
+ iws = *m;
+ if (nb > 1 && nb < k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "CGERQF", " ", m, n, &c_n1, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CGERQF", " ", m, n, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < k && nx < k) {
+
+/* Use blocked code initially. */
+/* The last kk rows are handled by the block method. */
+
+ ki = (k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+ i__1 = k - kk + 1;
+ i__2 = -nb;
+ for (i__ = k - kk + ki + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__
+ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/* Compute the RQ factorization of the current block */
+/* A(m-k+i:m-k+i+ib-1,1:n-k+i+ib-1) */
+
+ i__3 = *n - k + i__ + ib - 1;
+ cgerq2_(&ib, &i__3, &a[*m - k + i__ + a_dim1], lda, &tau[i__], &
+ work[1], &iinfo);
+ if (*m - k + i__ > 1) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__3 = *n - k + i__ + ib - 1;
+ clarft_("Backward", "Rowwise", &i__3, &ib, &a[*m - k + i__ +
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(1:m-k+i-1,1:n-k+i+ib-1) from the right */
+
+ i__3 = *m - k + i__ - 1;
+ i__4 = *n - k + i__ + ib - 1;
+ clarfb_("Right", "No transpose", "Backward", "Rowwise", &i__3,
+ &i__4, &ib, &a[*m - k + i__ + a_dim1], lda, &work[1],
+ &ldwork, &a[a_offset], lda, &work[ib + 1], &ldwork);
+ }
+/* L10: */
+ }
+ mu = *m - k + i__ + nb - 1;
+ nu = *n - k + i__ + nb - 1;
+ } else {
+ mu = *m;
+ nu = *n;
+ }
+
+/* Use unblocked code to factor the last or only block */
+
+ if (mu > 0 && nu > 0) {
+ cgerq2_(&mu, &nu, &a[a_offset], lda, &tau[1], &work[1], &iinfo);
+ }
+
+ work[1].r = (real) iws, work[1].i = 0.f;
+ return 0;
+
+/* End of CGERQF */
+
+} /* cgerqf_ */
diff --git a/contrib/libs/clapack/cgesc2.c b/contrib/libs/clapack/cgesc2.c
new file mode 100644
index 0000000000..111103e767
--- /dev/null
+++ b/contrib/libs/clapack/cgesc2.c
@@ -0,0 +1,206 @@
+/* cgesc2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static complex c_b13 = {1.f,0.f};
+static integer c_n1 = -1;
+
+/* Subroutine */ int cgesc2_(integer *n, complex *a, integer *lda, complex *
+ rhs, integer *ipiv, integer *jpiv, real *scale)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ real r__1;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ double c_abs(complex *);
+ void c_div(complex *, complex *, complex *);
+
+ /* Local variables */
+ integer i__, j;
+ real eps;
+ complex temp;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *), slabad_(real *, real *);
+ extern integer icamax_(integer *, complex *, integer *);
+ extern doublereal slamch_(char *);
+ real bignum;
+ extern /* Subroutine */ int claswp_(integer *, complex *, integer *,
+ integer *, integer *, integer *, integer *);
+ real smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGESC2 solves a system of linear equations */
+
+/* A * X = scale* RHS */
+
+/* with a general N-by-N matrix A using the LU factorization with */
+/* complete pivoting computed by CGETC2. */
+
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. */
+
+/* A (input) COMPLEX array, dimension (LDA, N) */
+/* On entry, the LU part of the factorization of the n-by-n */
+/* matrix A computed by CGETC2: A = P * L * U * Q */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1, N). */
+
+/* RHS (input/output) COMPLEX array, dimension N. */
+/* On entry, the right hand side vector b. */
+/* On exit, the solution vector X. */
+
+/* IPIV (input) INTEGER array, dimension (N). */
+/* The pivot indices; for 1 <= i <= N, row i of the */
+/* matrix has been interchanged with row IPIV(i). */
+
+/* JPIV (input) INTEGER array, dimension (N). */
+/* The pivot indices; for 1 <= j <= N, column j of the */
+/* matrix has been interchanged with column JPIV(j). */
+
+/* SCALE (output) REAL */
+/* On exit, SCALE contains the scale factor. SCALE is chosen */
+/* 0 <= SCALE <= 1 to prevent owerflow in the solution. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Set constant to control overflow */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --rhs;
+ --ipiv;
+ --jpiv;
+
+ /* Function Body */
+ eps = slamch_("P");
+ smlnum = slamch_("S") / eps;
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+
+/* Apply permutations IPIV to RHS */
+
+ i__1 = *n - 1;
+ claswp_(&c__1, &rhs[1], lda, &c__1, &i__1, &ipiv[1], &c__1);
+
+/* Solve for L part */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ i__3 = j;
+ i__4 = j;
+ i__5 = j + i__ * a_dim1;
+ i__6 = i__;
+ q__2.r = a[i__5].r * rhs[i__6].r - a[i__5].i * rhs[i__6].i,
+ q__2.i = a[i__5].r * rhs[i__6].i + a[i__5].i * rhs[i__6]
+ .r;
+ q__1.r = rhs[i__4].r - q__2.r, q__1.i = rhs[i__4].i - q__2.i;
+ rhs[i__3].r = q__1.r, rhs[i__3].i = q__1.i;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* Solve for U part */
+
+ *scale = 1.f;
+
+/* Check for scaling */
+
+ i__ = icamax_(n, &rhs[1], &c__1);
+ if (smlnum * 2.f * c_abs(&rhs[i__]) > c_abs(&a[*n + *n * a_dim1])) {
+ r__1 = c_abs(&rhs[i__]);
+ q__1.r = .5f / r__1, q__1.i = 0.f / r__1;
+ temp.r = q__1.r, temp.i = q__1.i;
+ cscal_(n, &temp, &rhs[1], &c__1);
+ *scale *= temp.r;
+ }
+ for (i__ = *n; i__ >= 1; --i__) {
+ c_div(&q__1, &c_b13, &a[i__ + i__ * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__1 = i__;
+ i__2 = i__;
+ q__1.r = rhs[i__2].r * temp.r - rhs[i__2].i * temp.i, q__1.i = rhs[
+ i__2].r * temp.i + rhs[i__2].i * temp.r;
+ rhs[i__1].r = q__1.r, rhs[i__1].i = q__1.i;
+ i__1 = *n;
+ for (j = i__ + 1; j <= i__1; ++j) {
+ i__2 = i__;
+ i__3 = i__;
+ i__4 = j;
+ i__5 = i__ + j * a_dim1;
+ q__3.r = a[i__5].r * temp.r - a[i__5].i * temp.i, q__3.i = a[i__5]
+ .r * temp.i + a[i__5].i * temp.r;
+ q__2.r = rhs[i__4].r * q__3.r - rhs[i__4].i * q__3.i, q__2.i =
+ rhs[i__4].r * q__3.i + rhs[i__4].i * q__3.r;
+ q__1.r = rhs[i__3].r - q__2.r, q__1.i = rhs[i__3].i - q__2.i;
+ rhs[i__2].r = q__1.r, rhs[i__2].i = q__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Apply permutations JPIV to the solution (RHS) */
+
+ i__1 = *n - 1;
+ claswp_(&c__1, &rhs[1], lda, &c__1, &i__1, &jpiv[1], &c_n1);
+ return 0;
+
+/* End of CGESC2 */
+
+} /* cgesc2_ */
diff --git a/contrib/libs/clapack/cgesdd.c b/contrib/libs/clapack/cgesdd.c
new file mode 100644
index 0000000000..6149ae7bc5
--- /dev/null
+++ b/contrib/libs/clapack/cgesdd.c
@@ -0,0 +1,2240 @@
+/* cgesdd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__0 = 0;
+
+/* Subroutine */ int cgesdd_(char *jobz, integer *m, integer *n, complex *a,
+ integer *lda, real *s, complex *u, integer *ldu, complex *vt, integer
+ *ldvt, complex *work, integer *lwork, real *rwork, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1,
+ i__2, i__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, ie, il, ir, iu, blk;
+ real dum[1], eps;
+ integer iru, ivt, iscl;
+ real anrm;
+ integer idum[1], ierr, itau, irvt;
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ integer chunk, minmn, wrkbl, itaup, itauq;
+ logical wntqa;
+ integer nwork;
+ extern /* Subroutine */ int clacp2_(char *, integer *, integer *, real *,
+ integer *, complex *, integer *);
+ logical wntqn, wntqo, wntqs;
+ integer mnthr1, mnthr2;
+ extern /* Subroutine */ int cgebrd_(integer *, integer *, complex *,
+ integer *, real *, real *, complex *, complex *, complex *,
+ integer *, integer *);
+ extern doublereal clange_(char *, integer *, integer *, complex *,
+ integer *, real *);
+ extern /* Subroutine */ int cgelqf_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, integer *), clacrm_(
+ integer *, integer *, complex *, integer *, real *, integer *,
+ complex *, integer *, real *), clarcm_(integer *, integer *, real
+ *, integer *, complex *, integer *, complex *, integer *, real *),
+ clascl_(char *, integer *, integer *, real *, real *, integer *,
+ integer *, complex *, integer *, integer *), sbdsdc_(char
+ *, char *, integer *, real *, real *, real *, integer *, real *,
+ integer *, real *, integer *, real *, integer *, integer *), cgeqrf_(integer *, integer *, complex *, integer
+ *, complex *, complex *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), claset_(char *,
+ integer *, integer *, complex *, complex *, complex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int cungbr_(char *, integer *, integer *, integer
+ *, complex *, integer *, complex *, complex *, integer *, integer
+ *);
+ real bignum;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *), cunmbr_(char *, char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *), cunglq_(
+ integer *, integer *, integer *, complex *, integer *, complex *,
+ complex *, integer *, integer *);
+ integer ldwrkl;
+ extern /* Subroutine */ int cungqr_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, integer *);
+ integer ldwrkr, minwrk, ldwrku, maxwrk, ldwkvt;
+ real smlnum;
+ logical wntqas;
+ integer nrwork;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+/* 8-15-00: Improve consistency of WS calculations (eca) */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGESDD computes the singular value decomposition (SVD) of a complex */
+/* M-by-N matrix A, optionally computing the left and/or right singular */
+/* vectors, by using divide-and-conquer method. The SVD is written */
+
+/* A = U * SIGMA * conjugate-transpose(V) */
+
+/* where SIGMA is an M-by-N matrix which is zero except for its */
+/* min(m,n) diagonal elements, U is an M-by-M unitary matrix, and */
+/* V is an N-by-N unitary matrix. The diagonal elements of SIGMA */
+/* are the singular values of A; they are real and non-negative, and */
+/* are returned in descending order. The first min(m,n) columns of */
+/* U and V are the left and right singular vectors of A. */
+
+/* Note that the routine returns VT = V**H, not V. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* Specifies options for computing all or part of the matrix U: */
+/* = 'A': all M columns of U and all N rows of V**H are */
+/* returned in the arrays U and VT; */
+/* = 'S': the first min(M,N) columns of U and the first */
+/* min(M,N) rows of V**H are returned in the arrays U */
+/* and VT; */
+/* = 'O': If M >= N, the first N columns of U are overwritten */
+/* in the array A and all rows of V**H are returned in */
+/* the array VT; */
+/* otherwise, all columns of U are returned in the */
+/* array U and the first M rows of V**H are overwritten */
+/* in the array A; */
+/* = 'N': no columns of U or rows of V**H are computed. */
+
+/* M (input) INTEGER */
+/* The number of rows of the input matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the input matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, */
+/* if JOBZ = 'O', A is overwritten with the first N columns */
+/* of U (the left singular vectors, stored */
+/* columnwise) if M >= N; */
+/* A is overwritten with the first M rows */
+/* of V**H (the right singular vectors, stored */
+/* rowwise) otherwise. */
+/* if JOBZ .ne. 'O', the contents of A are destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* S (output) REAL array, dimension (min(M,N)) */
+/* The singular values of A, sorted so that S(i) >= S(i+1). */
+
+/* U (output) COMPLEX array, dimension (LDU,UCOL) */
+/* UCOL = M if JOBZ = 'A' or JOBZ = 'O' and M < N; */
+/* UCOL = min(M,N) if JOBZ = 'S'. */
+/* If JOBZ = 'A' or JOBZ = 'O' and M < N, U contains the M-by-M */
+/* unitary matrix U; */
+/* if JOBZ = 'S', U contains the first min(M,N) columns of U */
+/* (the left singular vectors, stored columnwise); */
+/* if JOBZ = 'O' and M >= N, or JOBZ = 'N', U is not referenced. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= 1; if */
+/* JOBZ = 'S' or 'A' or JOBZ = 'O' and M < N, LDU >= M. */
+
+/* VT (output) COMPLEX array, dimension (LDVT,N) */
+/* If JOBZ = 'A' or JOBZ = 'O' and M >= N, VT contains the */
+/* N-by-N unitary matrix V**H; */
+/* if JOBZ = 'S', VT contains the first min(M,N) rows of */
+/* V**H (the right singular vectors, stored rowwise); */
+/* if JOBZ = 'O' and M < N, or JOBZ = 'N', VT is not referenced. */
+
+/* LDVT (input) INTEGER */
+/* The leading dimension of the array VT. LDVT >= 1; if */
+/* JOBZ = 'A' or JOBZ = 'O' and M >= N, LDVT >= N; */
+/* if JOBZ = 'S', LDVT >= min(M,N). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= 1. */
+/* if JOBZ = 'N', LWORK >= 2*min(M,N)+max(M,N). */
+/* if JOBZ = 'O', */
+/* LWORK >= 2*min(M,N)*min(M,N)+2*min(M,N)+max(M,N). */
+/* if JOBZ = 'S' or 'A', */
+/* LWORK >= min(M,N)*min(M,N)+2*min(M,N)+max(M,N). */
+/* For good performance, LWORK should generally be larger. */
+
+/* If LWORK = -1, a workspace query is assumed. The optimal */
+/* size for the WORK array is calculated and stored in WORK(1), */
+/* and no other work except argument checking is performed. */
+
+/* RWORK (workspace) REAL array, dimension (MAX(1,LRWORK)) */
+/* If JOBZ = 'N', LRWORK >= 5*min(M,N). */
+/* Otherwise, LRWORK >= 5*min(M,N)*min(M,N) + 7*min(M,N) */
+
+/* IWORK (workspace) INTEGER array, dimension (8*min(M,N)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: The updating process of SBDSDC did not converge. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ mnthr1 = (integer) (minmn * 17.f / 9.f);
+ mnthr2 = (integer) (minmn * 5.f / 3.f);
+ wntqa = lsame_(jobz, "A");
+ wntqs = lsame_(jobz, "S");
+ wntqas = wntqa || wntqs;
+ wntqo = lsame_(jobz, "O");
+ wntqn = lsame_(jobz, "N");
+ minwrk = 1;
+ maxwrk = 1;
+
+ if (! (wntqa || wntqs || wntqo || wntqn)) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldu < 1 || wntqas && *ldu < *m || wntqo && *m < *n && *ldu < *
+ m) {
+ *info = -8;
+ } else if (*ldvt < 1 || wntqa && *ldvt < *n || wntqs && *ldvt < minmn ||
+ wntqo && *m >= *n && *ldvt < *n) {
+ *info = -10;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* CWorkspace refers to complex workspace, and RWorkspace to */
+/* real workspace. NB refers to the optimal block size for the */
+/* immediately following subroutine, as returned by ILAENV.) */
+
+ if (*info == 0 && *m > 0 && *n > 0) {
+ if (*m >= *n) {
+
+/* There is no complex work space needed for bidiagonal SVD */
+/* The real work space needed for bidiagonal SVD is BDSPAC */
+/* for computing singular values and singular vectors; BDSPAN */
+/* for computing singular values only. */
+/* BDSPAC = 5*N*N + 7*N */
+/* BDSPAN = MAX(7*N+4, 3*N+2+SMLSIZ*(SMLSIZ+8)) */
+
+ if (*m >= mnthr1) {
+ if (wntqn) {
+
+/* Path 1 (M much larger than N, JOBZ='N') */
+
+ maxwrk = *n + *n * ilaenv_(&c__1, "CGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", n, n, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+ minwrk = *n * 3;
+ } else if (wntqo) {
+
+/* Path 2 (M much larger than N, JOBZ='O') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "CGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "CUNGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "CUNMBR", "QLN", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "CUNMBR", "PRC", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+ maxwrk = *m * *n + *n * *n + wrkbl;
+ minwrk = (*n << 1) * *n + *n * 3;
+ } else if (wntqs) {
+
+/* Path 3 (M much larger than N, JOBZ='S') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "CGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "CUNGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "CUNMBR", "QLN", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "CUNMBR", "PRC", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+ maxwrk = *n * *n + wrkbl;
+ minwrk = *n * *n + *n * 3;
+ } else if (wntqa) {
+
+/* Path 4 (M much larger than N, JOBZ='A') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "CGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *m * ilaenv_(&c__1, "CUNGQR",
+ " ", m, m, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "CUNMBR", "QLN", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "CUNMBR", "PRC", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+ maxwrk = *n * *n + wrkbl;
+ minwrk = *n * *n + (*n << 1) + *m;
+ }
+ } else if (*m >= mnthr2) {
+
+/* Path 5 (M much larger than N, but not as much as MNTHR1) */
+
+ maxwrk = (*n << 1) + (*m + *n) * ilaenv_(&c__1, "CGEBRD",
+ " ", m, n, &c_n1, &c_n1);
+ minwrk = (*n << 1) + *m;
+ if (wntqo) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "CUNGBR", "P", n, n, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "CUNGBR", "Q", m, n, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ maxwrk += *m * *n;
+ minwrk += *n * *n;
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "CUNGBR", "P", n, n, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "CUNGBR", "Q", m, n, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "CUNGBR", "P", n, n, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *m * ilaenv_(&c__1,
+ "CUNGBR", "Q", m, m, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ }
+ } else {
+
+/* Path 6 (M at least N, but not much larger) */
+
+ maxwrk = (*n << 1) + (*m + *n) * ilaenv_(&c__1, "CGEBRD",
+ " ", m, n, &c_n1, &c_n1);
+ minwrk = (*n << 1) + *m;
+ if (wntqo) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "CUNMBR", "PRC", n, n, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "CUNMBR", "QLN", m, n, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ maxwrk += *m * *n;
+ minwrk += *n * *n;
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "CUNMBR", "PRC", n, n, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "CUNMBR", "QLN", m, n, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "CUNGBR", "PRC", n, n, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *m * ilaenv_(&c__1,
+ "CUNGBR", "QLN", m, m, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ }
+ }
+ } else {
+
+/* There is no complex work space needed for bidiagonal SVD */
+/* The real work space needed for bidiagonal SVD is BDSPAC */
+/* for computing singular values and singular vectors; BDSPAN */
+/* for computing singular values only. */
+/* BDSPAC = 5*M*M + 7*M */
+/* BDSPAN = MAX(7*M+4, 3*M+2+SMLSIZ*(SMLSIZ+8)) */
+
+ if (*n >= mnthr1) {
+ if (wntqn) {
+
+/* Path 1t (N much larger than M, JOBZ='N') */
+
+ maxwrk = *m + *m * ilaenv_(&c__1, "CGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", m, m, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+ minwrk = *m * 3;
+ } else if (wntqo) {
+
+/* Path 2t (N much larger than M, JOBZ='O') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "CGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "CUNGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNMBR", "PRC", m, m, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNMBR", "QLN", m, m, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+ maxwrk = *m * *n + *m * *m + wrkbl;
+ minwrk = (*m << 1) * *m + *m * 3;
+ } else if (wntqs) {
+
+/* Path 3t (N much larger than M, JOBZ='S') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "CGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "CUNGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNMBR", "PRC", m, m, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNMBR", "QLN", m, m, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+ maxwrk = *m * *m + wrkbl;
+ minwrk = *m * *m + *m * 3;
+ } else if (wntqa) {
+
+/* Path 4t (N much larger than M, JOBZ='A') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "CGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *n * ilaenv_(&c__1, "CUNGLQ",
+ " ", n, n, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNMBR", "PRC", m, m, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNMBR", "QLN", m, m, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+ maxwrk = *m * *m + wrkbl;
+ minwrk = *m * *m + (*m << 1) + *n;
+ }
+ } else if (*n >= mnthr2) {
+
+/* Path 5t (N much larger than M, but not as much as MNTHR1) */
+
+ maxwrk = (*m << 1) + (*m + *n) * ilaenv_(&c__1, "CGEBRD",
+ " ", m, n, &c_n1, &c_n1);
+ minwrk = (*m << 1) + *n;
+ if (wntqo) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNGBR", "P", m, n, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNGBR", "Q", m, m, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ maxwrk += *m * *n;
+ minwrk += *m * *m;
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNGBR", "P", m, n, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNGBR", "Q", m, m, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *n * ilaenv_(&c__1,
+ "CUNGBR", "P", n, n, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNGBR", "Q", m, m, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ }
+ } else {
+
+/* Path 6t (N greater than M, but not much larger) */
+
+ maxwrk = (*m << 1) + (*m + *n) * ilaenv_(&c__1, "CGEBRD",
+ " ", m, n, &c_n1, &c_n1);
+ minwrk = (*m << 1) + *n;
+ if (wntqo) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNMBR", "PRC", m, n, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNMBR", "QLN", m, m, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ maxwrk += *m * *n;
+ minwrk += *m * *m;
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNGBR", "PRC", m, n, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNGBR", "QLN", m, m, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *n * ilaenv_(&c__1,
+ "CUNGBR", "PRC", n, n, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNGBR", "QLN", m, m, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ }
+ }
+ }
+ maxwrk = max(maxwrk,minwrk);
+ }
+ if (*info == 0) {
+ work[1].r = (real) maxwrk, work[1].i = 0.f;
+ if (*lwork < minwrk && *lwork != -1) {
+ *info = -13;
+ }
+ }
+
+/* Quick returns */
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGESDD", &i__1);
+ return 0;
+ }
+ if (*lwork == -1) {
+ return 0;
+ }
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = sqrt(slamch_("S")) / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = clange_("M", m, n, &a[a_offset], lda, dum);
+ iscl = 0;
+ if (anrm > 0.f && anrm < smlnum) {
+ iscl = 1;
+ clascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, &
+ ierr);
+ } else if (anrm > bignum) {
+ iscl = 1;
+ clascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+ if (*m >= *n) {
+
+/* A has at least as many rows as columns. If A has sufficiently */
+/* more rows than columns, first reduce using the QR */
+/* decomposition (if sufficient workspace available) */
+
+ if (*m >= mnthr1) {
+
+ if (wntqn) {
+
+/* Path 1 (M much larger than N, JOBZ='N') */
+/* No singular vectors to be computed */
+
+ itau = 1;
+ nwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: need 0) */
+
+ i__1 = *lwork - nwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Zero out below R */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ claset_("L", &i__1, &i__2, &c_b1, &c_b1, &a[a_dim1 + 2], lda);
+ ie = 1;
+ itauq = 1;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/* Bidiagonalize R in A */
+/* (CWorkspace: need 3*N, prefer 2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__1 = *lwork - nwork + 1;
+ cgebrd_(n, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+ nrwork = ie + *n;
+
+/* Perform bidiagonal SVD, compute singular values only */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAN) */
+
+ sbdsdc_("U", "N", n, &s[1], &rwork[ie], dum, &c__1, dum, &
+ c__1, dum, idum, &rwork[nrwork], &iwork[1], info);
+
+ } else if (wntqo) {
+
+/* Path 2 (M much larger than N, JOBZ='O') */
+/* N left singular vectors to be overwritten on A and */
+/* N right singular vectors to be computed in VT */
+
+ iu = 1;
+
+/* WORK(IU) is N by N */
+
+ ldwrku = *n;
+ ir = iu + ldwrku * *n;
+ if (*lwork >= *m * *n + *n * *n + *n * 3) {
+
+/* WORK(IR) is M by N */
+
+ ldwrkr = *m;
+ } else {
+ ldwrkr = (*lwork - *n * *n - *n * 3) / *n;
+ }
+ itau = ir + ldwrkr * *n;
+ nwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (CWorkspace: need N*N+2*N, prefer M*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__1 = *lwork - nwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Copy R to WORK( IR ), zeroing out below it */
+
+ clacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ claset_("L", &i__1, &i__2, &c_b1, &c_b1, &work[ir + 1], &
+ ldwrkr);
+
+/* Generate Q in A */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__1 = *lwork - nwork + 1;
+ cungqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__1, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IR) */
+/* (CWorkspace: need N*N+3*N, prefer M*N+2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__1 = *lwork - nwork + 1;
+ cgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of R in WORK(IRU) and computing right singular vectors */
+/* of R in WORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ iru = ie + *n;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ sbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Copy real matrix RWORK(IRU) to complex matrix WORK(IU) */
+/* Overwrite WORK(IU) by the left singular vectors of R */
+/* (CWorkspace: need 2*N*N+3*N, prefer M*N+N*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ clacp2_("F", n, n, &rwork[iru], n, &work[iu], &ldwrku);
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[
+ itauq], &work[iu], &ldwrku, &work[nwork], &i__1, &
+ ierr);
+
+/* Copy real matrix RWORK(IRVT) to complex matrix VT */
+/* Overwrite VT by the right singular vectors of R */
+/* (CWorkspace: need N*N+3*N, prefer M*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ clacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("P", "R", "C", n, n, n, &work[ir], &ldwrkr, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IU), storing result in WORK(IR) and copying to A */
+/* (CWorkspace: need 2*N*N, prefer N*N+M*N) */
+/* (RWorkspace: 0) */
+
+ i__1 = *m;
+ i__2 = ldwrkr;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ chunk = min(i__3,ldwrkr);
+ cgemm_("N", "N", &chunk, n, n, &c_b2, &a[i__ + a_dim1],
+ lda, &work[iu], &ldwrku, &c_b1, &work[ir], &
+ ldwrkr);
+ clacpy_("F", &chunk, n, &work[ir], &ldwrkr, &a[i__ +
+ a_dim1], lda);
+/* L10: */
+ }
+
+ } else if (wntqs) {
+
+/* Path 3 (M much larger than N, JOBZ='S') */
+/* N left singular vectors to be computed in U and */
+/* N right singular vectors to be computed in VT */
+
+ ir = 1;
+
+/* WORK(IR) is N by N */
+
+ ldwrkr = *n;
+ itau = ir + ldwrkr * *n;
+ nwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (CWorkspace: need N*N+2*N, prefer N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - nwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+
+/* Copy R to WORK(IR), zeroing out below it */
+
+ clacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__2 = *n - 1;
+ i__1 = *n - 1;
+ claset_("L", &i__2, &i__1, &c_b1, &c_b1, &work[ir + 1], &
+ ldwrkr);
+
+/* Generate Q in A */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - nwork + 1;
+ cungqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IR) */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - nwork + 1;
+ cgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ iru = ie + *n;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ sbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Copy real matrix RWORK(IRU) to complex matrix U */
+/* Overwrite U by left singular vectors of R */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ clacp2_("F", n, n, &rwork[iru], n, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+/* Copy real matrix RWORK(IRVT) to complex matrix VT */
+/* Overwrite VT by right singular vectors of R */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ clacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ cunmbr_("P", "R", "C", n, n, n, &work[ir], &ldwrkr, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IR), storing result in U */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: 0) */
+
+ clacpy_("F", n, n, &u[u_offset], ldu, &work[ir], &ldwrkr);
+ cgemm_("N", "N", m, n, n, &c_b2, &a[a_offset], lda, &work[ir],
+ &ldwrkr, &c_b1, &u[u_offset], ldu);
+
+ } else if (wntqa) {
+
+/* Path 4 (M much larger than N, JOBZ='A') */
+/* M left singular vectors to be computed in U and */
+/* N right singular vectors to be computed in VT */
+
+ iu = 1;
+
+/* WORK(IU) is N by N */
+
+ ldwrku = *n;
+ itau = iu + ldwrku * *n;
+ nwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - nwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+ clacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+
+/* Generate Q in U */
+/* (CWorkspace: need N+M, prefer N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - nwork + 1;
+ cungqr_(m, m, n, &u[u_offset], ldu, &work[itau], &work[nwork],
+ &i__2, &ierr);
+
+/* Produce R in A, zeroing out below it */
+
+ i__2 = *n - 1;
+ i__1 = *n - 1;
+ claset_("L", &i__2, &i__1, &c_b1, &c_b1, &a[a_dim1 + 2], lda);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/* Bidiagonalize R in A */
+/* (CWorkspace: need 3*N, prefer 2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - nwork + 1;
+ cgebrd_(n, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+ iru = ie + *n;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ sbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Copy real matrix RWORK(IRU) to complex matrix WORK(IU) */
+/* Overwrite WORK(IU) by left singular vectors of R */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ clacp2_("F", n, n, &rwork[iru], n, &work[iu], &ldwrku);
+ i__2 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "N", n, n, n, &a[a_offset], lda, &work[
+ itauq], &work[iu], &ldwrku, &work[nwork], &i__2, &
+ ierr);
+
+/* Copy real matrix RWORK(IRVT) to complex matrix VT */
+/* Overwrite VT by right singular vectors of R */
+/* (CWorkspace: need 3*N, prefer 2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ clacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ cunmbr_("P", "R", "C", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+/* Multiply Q in U by left singular vectors of R in */
+/* WORK(IU), storing result in A */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: 0) */
+
+ cgemm_("N", "N", m, n, n, &c_b2, &u[u_offset], ldu, &work[iu],
+ &ldwrku, &c_b1, &a[a_offset], lda);
+
+/* Copy left singular vectors of A from A to U */
+
+ clacpy_("F", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+
+ }
+
+ } else if (*m >= mnthr2) {
+
+/* MNTHR2 <= M < MNTHR1 */
+
+/* Path 5 (M much larger than N, but not as much as MNTHR1) */
+/* Reduce to bidiagonal form without QR decomposition, use */
+/* CUNGBR and matrix multiplication to compute singular vectors */
+
+ ie = 1;
+ nrwork = ie + *n;
+ itauq = 1;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/* Bidiagonalize A */
+/* (CWorkspace: need 2*N+M, prefer 2*N+(M+N)*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - nwork + 1;
+ cgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__2, &ierr);
+ if (wntqn) {
+
+/* Compute singular values only */
+/* (Cworkspace: 0) */
+/* (Rworkspace: need BDSPAN) */
+
+ sbdsdc_("U", "N", n, &s[1], &rwork[ie], dum, &c__1, dum, &
+ c__1, dum, idum, &rwork[nrwork], &iwork[1], info);
+ } else if (wntqo) {
+ iu = nwork;
+ iru = nrwork;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+
+/* Copy A to VT, generate P**H */
+/* (Cworkspace: need 2*N, prefer N+N*NB) */
+/* (Rworkspace: 0) */
+
+ clacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ cungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], &
+ work[nwork], &i__2, &ierr);
+
+/* Generate Q in A */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - nwork + 1;
+ cungbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], &work[
+ nwork], &i__2, &ierr);
+
+ if (*lwork >= *m * *n + *n * 3) {
+
+/* WORK( IU ) is M by N */
+
+ ldwrku = *m;
+ } else {
+
+/* WORK(IU) is LDWRKU by N */
+
+ ldwrku = (*lwork - *n * 3) / *n;
+ }
+ nwork = iu + ldwrku * *n;
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ sbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Multiply real matrix RWORK(IRVT) by P**H in VT, */
+/* storing the result in WORK(IU), copying to VT */
+/* (Cworkspace: need 0) */
+/* (Rworkspace: need 3*N*N) */
+
+ clarcm_(n, n, &rwork[irvt], n, &vt[vt_offset], ldvt, &work[iu]
+, &ldwrku, &rwork[nrwork]);
+ clacpy_("F", n, n, &work[iu], &ldwrku, &vt[vt_offset], ldvt);
+
+/* Multiply Q in A by real matrix RWORK(IRU), storing the */
+/* result in WORK(IU), copying to A */
+/* (CWorkspace: need N*N, prefer M*N) */
+/* (Rworkspace: need 3*N*N, prefer N*N+2*M*N) */
+
+ nrwork = irvt;
+ i__2 = *m;
+ i__1 = ldwrku;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ chunk = min(i__3,ldwrku);
+ clacrm_(&chunk, n, &a[i__ + a_dim1], lda, &rwork[iru], n,
+ &work[iu], &ldwrku, &rwork[nrwork]);
+ clacpy_("F", &chunk, n, &work[iu], &ldwrku, &a[i__ +
+ a_dim1], lda);
+/* L20: */
+ }
+
+ } else if (wntqs) {
+
+/* Copy A to VT, generate P**H */
+/* (Cworkspace: need 2*N, prefer N+N*NB) */
+/* (Rworkspace: 0) */
+
+ clacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ cungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], &
+ work[nwork], &i__1, &ierr);
+
+/* Copy A to U, generate Q */
+/* (Cworkspace: need 2*N, prefer N+N*NB) */
+/* (Rworkspace: 0) */
+
+ clacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ cungbr_("Q", m, n, n, &u[u_offset], ldu, &work[itauq], &work[
+ nwork], &i__1, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ iru = nrwork;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ sbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Multiply real matrix RWORK(IRVT) by P**H in VT, */
+/* storing the result in A, copying to VT */
+/* (Cworkspace: need 0) */
+/* (Rworkspace: need 3*N*N) */
+
+ clarcm_(n, n, &rwork[irvt], n, &vt[vt_offset], ldvt, &a[
+ a_offset], lda, &rwork[nrwork]);
+ clacpy_("F", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+/* Multiply Q in U by real matrix RWORK(IRU), storing the */
+/* result in A, copying to U */
+/* (CWorkspace: need 0) */
+/* (Rworkspace: need N*N+2*M*N) */
+
+ nrwork = irvt;
+ clacrm_(m, n, &u[u_offset], ldu, &rwork[iru], n, &a[a_offset],
+ lda, &rwork[nrwork]);
+ clacpy_("F", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+ } else {
+
+/* Copy A to VT, generate P**H */
+/* (Cworkspace: need 2*N, prefer N+N*NB) */
+/* (Rworkspace: 0) */
+
+ clacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ cungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], &
+ work[nwork], &i__1, &ierr);
+
+/* Copy A to U, generate Q */
+/* (Cworkspace: need 2*N, prefer N+N*NB) */
+/* (Rworkspace: 0) */
+
+ clacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ cungbr_("Q", m, m, n, &u[u_offset], ldu, &work[itauq], &work[
+ nwork], &i__1, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ iru = nrwork;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ sbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Multiply real matrix RWORK(IRVT) by P**H in VT, */
+/* storing the result in A, copying to VT */
+/* (Cworkspace: need 0) */
+/* (Rworkspace: need 3*N*N) */
+
+ clarcm_(n, n, &rwork[irvt], n, &vt[vt_offset], ldvt, &a[
+ a_offset], lda, &rwork[nrwork]);
+ clacpy_("F", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+/* Multiply Q in U by real matrix RWORK(IRU), storing the */
+/* result in A, copying to U */
+/* (CWorkspace: 0) */
+/* (Rworkspace: need 3*N*N) */
+
+ nrwork = irvt;
+ clacrm_(m, n, &u[u_offset], ldu, &rwork[iru], n, &a[a_offset],
+ lda, &rwork[nrwork]);
+ clacpy_("F", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+ }
+
+ } else {
+
+/* M .LT. MNTHR2 */
+
+/* Path 6 (M at least N, but not much larger) */
+/* Reduce to bidiagonal form without QR decomposition */
+/* Use CUNMBR to compute singular vectors */
+
+ ie = 1;
+ nrwork = ie + *n;
+ itauq = 1;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/* Bidiagonalize A */
+/* (CWorkspace: need 2*N+M, prefer 2*N+(M+N)*NB) */
+/* (RWorkspace: need N) */
+
+ i__1 = *lwork - nwork + 1;
+ cgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__1, &ierr);
+ if (wntqn) {
+
+/* Compute singular values only */
+/* (Cworkspace: 0) */
+/* (Rworkspace: need BDSPAN) */
+
+ sbdsdc_("U", "N", n, &s[1], &rwork[ie], dum, &c__1, dum, &
+ c__1, dum, idum, &rwork[nrwork], &iwork[1], info);
+ } else if (wntqo) {
+ iu = nwork;
+ iru = nrwork;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ if (*lwork >= *m * *n + *n * 3) {
+
+/* WORK( IU ) is M by N */
+
+ ldwrku = *m;
+ } else {
+
+/* WORK( IU ) is LDWRKU by N */
+
+ ldwrku = (*lwork - *n * 3) / *n;
+ }
+ nwork = iu + ldwrku * *n;
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ sbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Copy real matrix RWORK(IRVT) to complex matrix VT */
+/* Overwrite VT by right singular vectors of A */
+/* (Cworkspace: need 2*N, prefer N+N*NB) */
+/* (Rworkspace: need 0) */
+
+ clacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("P", "R", "C", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+
+ if (*lwork >= *m * *n + *n * 3) {
+
+/* Copy real matrix RWORK(IRU) to complex matrix WORK(IU) */
+/* Overwrite WORK(IU) by left singular vectors of A, copying */
+/* to A */
+/* (Cworkspace: need M*N+2*N, prefer M*N+N+N*NB) */
+/* (Rworkspace: need 0) */
+
+ claset_("F", m, n, &c_b1, &c_b1, &work[iu], &ldwrku);
+ clacp2_("F", n, n, &rwork[iru], n, &work[iu], &ldwrku);
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[
+ itauq], &work[iu], &ldwrku, &work[nwork], &i__1, &
+ ierr);
+ clacpy_("F", m, n, &work[iu], &ldwrku, &a[a_offset], lda);
+ } else {
+
+/* Generate Q in A */
+/* (Cworkspace: need 2*N, prefer N+N*NB) */
+/* (Rworkspace: need 0) */
+
+ i__1 = *lwork - nwork + 1;
+ cungbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], &
+ work[nwork], &i__1, &ierr);
+
+/* Multiply Q in A by real matrix RWORK(IRU), storing the */
+/* result in WORK(IU), copying to A */
+/* (CWorkspace: need N*N, prefer M*N) */
+/* (Rworkspace: need 3*N*N, prefer N*N+2*M*N) */
+
+ nrwork = irvt;
+ i__1 = *m;
+ i__2 = ldwrku;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ chunk = min(i__3,ldwrku);
+ clacrm_(&chunk, n, &a[i__ + a_dim1], lda, &rwork[iru],
+ n, &work[iu], &ldwrku, &rwork[nrwork]);
+ clacpy_("F", &chunk, n, &work[iu], &ldwrku, &a[i__ +
+ a_dim1], lda);
+/* L30: */
+ }
+ }
+
+ } else if (wntqs) {
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ iru = nrwork;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ sbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Copy real matrix RWORK(IRU) to complex matrix U */
+/* Overwrite U by left singular vectors of A */
+/* (CWorkspace: need 3*N, prefer 2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ claset_("F", m, n, &c_b1, &c_b1, &u[u_offset], ldu)
+ ;
+ clacp2_("F", n, n, &rwork[iru], n, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+/* Copy real matrix RWORK(IRVT) to complex matrix VT */
+/* Overwrite VT by right singular vectors of A */
+/* (CWorkspace: need 3*N, prefer 2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ clacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ cunmbr_("P", "R", "C", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+ } else {
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ iru = nrwork;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ sbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Set the right corner of U to identity matrix */
+
+ claset_("F", m, m, &c_b1, &c_b1, &u[u_offset], ldu)
+ ;
+ if (*m > *n) {
+ i__2 = *m - *n;
+ i__1 = *m - *n;
+ claset_("F", &i__2, &i__1, &c_b1, &c_b2, &u[*n + 1 + (*n
+ + 1) * u_dim1], ldu);
+ }
+
+/* Copy real matrix RWORK(IRU) to complex matrix U */
+/* Overwrite U by left singular vectors of A */
+/* (CWorkspace: need 2*N+M, prefer 2*N+M*NB) */
+/* (RWorkspace: 0) */
+
+ clacp2_("F", n, n, &rwork[iru], n, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+/* Copy real matrix RWORK(IRVT) to complex matrix VT */
+/* Overwrite VT by right singular vectors of A */
+/* (CWorkspace: need 3*N, prefer 2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ clacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ cunmbr_("P", "R", "C", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+ }
+
+ }
+
+ } else {
+
+/* A has more columns than rows. If A has sufficiently more */
+/* columns than rows, first reduce using the LQ decomposition (if */
+/* sufficient workspace available) */
+
+ if (*n >= mnthr1) {
+
+ if (wntqn) {
+
+/* Path 1t (N much larger than M, JOBZ='N') */
+/* No singular vectors to be computed */
+
+ itau = 1;
+ nwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - nwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+
+/* Zero out above L */
+
+ i__2 = *m - 1;
+ i__1 = *m - 1;
+ claset_("U", &i__2, &i__1, &c_b1, &c_b1, &a[(a_dim1 << 1) + 1]
+, lda);
+ ie = 1;
+ itauq = 1;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize L in A */
+/* (CWorkspace: need 3*M, prefer 2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - nwork + 1;
+ cgebrd_(m, m, &a[a_offset], lda, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+ nrwork = ie + *m;
+
+/* Perform bidiagonal SVD, compute singular values only */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAN) */
+
+ sbdsdc_("U", "N", m, &s[1], &rwork[ie], dum, &c__1, dum, &
+ c__1, dum, idum, &rwork[nrwork], &iwork[1], info);
+
+ } else if (wntqo) {
+
+/* Path 2t (N much larger than M, JOBZ='O') */
+/* M right singular vectors to be overwritten on A and */
+/* M left singular vectors to be computed in U */
+
+ ivt = 1;
+ ldwkvt = *m;
+
+/* WORK(IVT) is M by M */
+
+ il = ivt + ldwkvt * *m;
+ if (*lwork >= *m * *n + *m * *m + *m * 3) {
+
+/* WORK(IL) M by N */
+
+ ldwrkl = *m;
+ chunk = *n;
+ } else {
+
+/* WORK(IL) is M by CHUNK */
+
+ ldwrkl = *m;
+ chunk = (*lwork - *m * *m - *m * 3) / *m;
+ }
+ itau = il + ldwrkl * chunk;
+ nwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - nwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+
+/* Copy L to WORK(IL), zeroing about above it */
+
+ clacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl);
+ i__2 = *m - 1;
+ i__1 = *m - 1;
+ claset_("U", &i__2, &i__1, &c_b1, &c_b1, &work[il + ldwrkl], &
+ ldwrkl);
+
+/* Generate Q in A */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - nwork + 1;
+ cunglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IL) */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - nwork + 1;
+ cgebrd_(m, m, &work[il], &ldwrkl, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ iru = ie + *m;
+ irvt = iru + *m * *m;
+ nrwork = irvt + *m * *m;
+ sbdsdc_("U", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Copy real matrix RWORK(IRU) to complex matrix WORK(IU) */
+/* Overwrite WORK(IU) by the left singular vectors of L */
+/* (CWorkspace: need N*N+3*N, prefer M*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ clacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+/* Copy real matrix RWORK(IRVT) to complex matrix WORK(IVT) */
+/* Overwrite WORK(IVT) by the right singular vectors of L */
+/* (CWorkspace: need N*N+3*N, prefer M*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ clacp2_("F", m, m, &rwork[irvt], m, &work[ivt], &ldwkvt);
+ i__2 = *lwork - nwork + 1;
+ cunmbr_("P", "R", "C", m, m, m, &work[il], &ldwrkl, &work[
+ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2, &
+ ierr);
+
+/* Multiply right singular vectors of L in WORK(IL) by Q */
+/* in A, storing result in WORK(IL) and copying to A */
+/* (CWorkspace: need 2*M*M, prefer M*M+M*N)) */
+/* (RWorkspace: 0) */
+
+ i__2 = *n;
+ i__1 = chunk;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *n - i__ + 1;
+ blk = min(i__3,chunk);
+ cgemm_("N", "N", m, &blk, m, &c_b2, &work[ivt], m, &a[i__
+ * a_dim1 + 1], lda, &c_b1, &work[il], &ldwrkl);
+ clacpy_("F", m, &blk, &work[il], &ldwrkl, &a[i__ * a_dim1
+ + 1], lda);
+/* L40: */
+ }
+
+ } else if (wntqs) {
+
+/* Path 3t (N much larger than M, JOBZ='S') */
+/* M right singular vectors to be computed in VT and */
+/* M left singular vectors to be computed in U */
+
+ il = 1;
+
+/* WORK(IL) is M by M */
+
+ ldwrkl = *m;
+ itau = il + ldwrkl * *m;
+ nwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__1 = *lwork - nwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Copy L to WORK(IL), zeroing out above it */
+
+ clacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ claset_("U", &i__1, &i__2, &c_b1, &c_b1, &work[il + ldwrkl], &
+ ldwrkl);
+
+/* Generate Q in A */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__1 = *lwork - nwork + 1;
+ cunglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__1, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IL) */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__1 = *lwork - nwork + 1;
+ cgebrd_(m, m, &work[il], &ldwrkl, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ iru = ie + *m;
+ irvt = iru + *m * *m;
+ nrwork = irvt + *m * *m;
+ sbdsdc_("U", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Copy real matrix RWORK(IRU) to complex matrix U */
+/* Overwrite U by left singular vectors of L */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ clacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+
+/* Copy real matrix RWORK(IRVT) to complex matrix VT */
+/* Overwrite VT by left singular vectors of L */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ clacp2_("F", m, m, &rwork[irvt], m, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("P", "R", "C", m, m, m, &work[il], &ldwrkl, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+
+/* Copy VT to WORK(IL), multiply right singular vectors of L */
+/* in WORK(IL) by Q in A, storing result in VT */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: 0) */
+
+ clacpy_("F", m, m, &vt[vt_offset], ldvt, &work[il], &ldwrkl);
+ cgemm_("N", "N", m, n, m, &c_b2, &work[il], &ldwrkl, &a[
+ a_offset], lda, &c_b1, &vt[vt_offset], ldvt);
+
+ } else if (wntqa) {
+
+/* Path 9t (N much larger than M, JOBZ='A') */
+/* N right singular vectors to be computed in VT and */
+/* M left singular vectors to be computed in U */
+
+ ivt = 1;
+
+/* WORK(IVT) is M by M */
+
+ ldwkvt = *m;
+ itau = ivt + ldwkvt * *m;
+ nwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__1 = *lwork - nwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+ clacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+/* Generate Q in VT */
+/* (CWorkspace: need M+N, prefer M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__1 = *lwork - nwork + 1;
+ cunglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &work[
+ nwork], &i__1, &ierr);
+
+/* Produce L in A, zeroing out above it */
+
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ claset_("U", &i__1, &i__2, &c_b1, &c_b1, &a[(a_dim1 << 1) + 1]
+, lda);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize L in A */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__1 = *lwork - nwork + 1;
+ cgebrd_(m, m, &a[a_offset], lda, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ iru = ie + *m;
+ irvt = iru + *m * *m;
+ nrwork = irvt + *m * *m;
+ sbdsdc_("U", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Copy real matrix RWORK(IRU) to complex matrix U */
+/* Overwrite U by left singular vectors of L */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ clacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "N", m, m, m, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+
+/* Copy real matrix RWORK(IRVT) to complex matrix WORK(IVT) */
+/* Overwrite WORK(IVT) by right singular vectors of L */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ clacp2_("F", m, m, &rwork[irvt], m, &work[ivt], &ldwkvt);
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("P", "R", "C", m, m, m, &a[a_offset], lda, &work[
+ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__1, &
+ ierr);
+
+/* Multiply right singular vectors of L in WORK(IVT) by */
+/* Q in VT, storing result in A */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: 0) */
+
+ cgemm_("N", "N", m, n, m, &c_b2, &work[ivt], &ldwkvt, &vt[
+ vt_offset], ldvt, &c_b1, &a[a_offset], lda);
+
+/* Copy right singular vectors of A from A to VT */
+
+ clacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+ }
+
+ } else if (*n >= mnthr2) {
+
+/* MNTHR2 <= N < MNTHR1 */
+
+/* Path 5t (N much larger than M, but not as much as MNTHR1) */
+/* Reduce to bidiagonal form without QR decomposition, use */
+/* CUNGBR and matrix multiplication to compute singular vectors */
+
+
+ ie = 1;
+ nrwork = ie + *m;
+ itauq = 1;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize A */
+/* (CWorkspace: need 2*M+N, prefer 2*M+(M+N)*NB) */
+/* (RWorkspace: M) */
+
+ i__1 = *lwork - nwork + 1;
+ cgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__1, &ierr);
+
+ if (wntqn) {
+
+/* Compute singular values only */
+/* (Cworkspace: 0) */
+/* (Rworkspace: need BDSPAN) */
+
+ sbdsdc_("L", "N", m, &s[1], &rwork[ie], dum, &c__1, dum, &
+ c__1, dum, idum, &rwork[nrwork], &iwork[1], info);
+ } else if (wntqo) {
+ irvt = nrwork;
+ iru = irvt + *m * *m;
+ nrwork = iru + *m * *m;
+ ivt = nwork;
+
+/* Copy A to U, generate Q */
+/* (Cworkspace: need 2*M, prefer M+M*NB) */
+/* (Rworkspace: 0) */
+
+ clacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ cungbr_("Q", m, m, n, &u[u_offset], ldu, &work[itauq], &work[
+ nwork], &i__1, &ierr);
+
+/* Generate P**H in A */
+/* (Cworkspace: need 2*M, prefer M+M*NB) */
+/* (Rworkspace: 0) */
+
+ i__1 = *lwork - nwork + 1;
+ cungbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &work[
+ nwork], &i__1, &ierr);
+
+ ldwkvt = *m;
+ if (*lwork >= *m * *n + *m * 3) {
+
+/* WORK( IVT ) is M by N */
+
+ nwork = ivt + ldwkvt * *n;
+ chunk = *n;
+ } else {
+
+/* WORK( IVT ) is M by CHUNK */
+
+ chunk = (*lwork - *m * 3) / *m;
+ nwork = ivt + ldwkvt * chunk;
+ }
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ sbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Multiply Q in U by real matrix RWORK(IRVT) */
+/* storing the result in WORK(IVT), copying to U */
+/* (Cworkspace: need 0) */
+/* (Rworkspace: need 2*M*M) */
+
+ clacrm_(m, m, &u[u_offset], ldu, &rwork[iru], m, &work[ivt], &
+ ldwkvt, &rwork[nrwork]);
+ clacpy_("F", m, m, &work[ivt], &ldwkvt, &u[u_offset], ldu);
+
+/* Multiply RWORK(IRVT) by P**H in A, storing the */
+/* result in WORK(IVT), copying to A */
+/* (CWorkspace: need M*M, prefer M*N) */
+/* (Rworkspace: need 2*M*M, prefer 2*M*N) */
+
+ nrwork = iru;
+ i__1 = *n;
+ i__2 = chunk;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *n - i__ + 1;
+ blk = min(i__3,chunk);
+ clarcm_(m, &blk, &rwork[irvt], m, &a[i__ * a_dim1 + 1],
+ lda, &work[ivt], &ldwkvt, &rwork[nrwork]);
+ clacpy_("F", m, &blk, &work[ivt], &ldwkvt, &a[i__ *
+ a_dim1 + 1], lda);
+/* L50: */
+ }
+ } else if (wntqs) {
+
+/* Copy A to U, generate Q */
+/* (Cworkspace: need 2*M, prefer M+M*NB) */
+/* (Rworkspace: 0) */
+
+ clacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ cungbr_("Q", m, m, n, &u[u_offset], ldu, &work[itauq], &work[
+ nwork], &i__2, &ierr);
+
+/* Copy A to VT, generate P**H */
+/* (Cworkspace: need 2*M, prefer M+M*NB) */
+/* (Rworkspace: 0) */
+
+ clacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ cungbr_("P", m, n, m, &vt[vt_offset], ldvt, &work[itaup], &
+ work[nwork], &i__2, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ irvt = nrwork;
+ iru = irvt + *m * *m;
+ nrwork = iru + *m * *m;
+ sbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Multiply Q in U by real matrix RWORK(IRU), storing the */
+/* result in A, copying to U */
+/* (CWorkspace: need 0) */
+/* (Rworkspace: need 3*M*M) */
+
+ clacrm_(m, m, &u[u_offset], ldu, &rwork[iru], m, &a[a_offset],
+ lda, &rwork[nrwork]);
+ clacpy_("F", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+
+/* Multiply real matrix RWORK(IRVT) by P**H in VT, */
+/* storing the result in A, copying to VT */
+/* (Cworkspace: need 0) */
+/* (Rworkspace: need M*M+2*M*N) */
+
+ nrwork = iru;
+ clarcm_(m, n, &rwork[irvt], m, &vt[vt_offset], ldvt, &a[
+ a_offset], lda, &rwork[nrwork]);
+ clacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ } else {
+
+/* Copy A to U, generate Q */
+/* (Cworkspace: need 2*M, prefer M+M*NB) */
+/* (Rworkspace: 0) */
+
+ clacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ cungbr_("Q", m, m, n, &u[u_offset], ldu, &work[itauq], &work[
+ nwork], &i__2, &ierr);
+
+/* Copy A to VT, generate P**H */
+/* (Cworkspace: need 2*M, prefer M+M*NB) */
+/* (Rworkspace: 0) */
+
+ clacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ cungbr_("P", n, n, m, &vt[vt_offset], ldvt, &work[itaup], &
+ work[nwork], &i__2, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ irvt = nrwork;
+ iru = irvt + *m * *m;
+ nrwork = iru + *m * *m;
+ sbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Multiply Q in U by real matrix RWORK(IRU), storing the */
+/* result in A, copying to U */
+/* (CWorkspace: need 0) */
+/* (Rworkspace: need 3*M*M) */
+
+ clacrm_(m, m, &u[u_offset], ldu, &rwork[iru], m, &a[a_offset],
+ lda, &rwork[nrwork]);
+ clacpy_("F", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+
+/* Multiply real matrix RWORK(IRVT) by P**H in VT, */
+/* storing the result in A, copying to VT */
+/* (Cworkspace: need 0) */
+/* (Rworkspace: need M*M+2*M*N) */
+
+ clarcm_(m, n, &rwork[irvt], m, &vt[vt_offset], ldvt, &a[
+ a_offset], lda, &rwork[nrwork]);
+ clacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ }
+
+ } else {
+
+/* N .LT. MNTHR2 */
+
+/* Path 6t (N greater than M, but not much larger) */
+/* Reduce to bidiagonal form without LQ decomposition */
+/* Use CUNMBR to compute singular vectors */
+
+ ie = 1;
+ nrwork = ie + *m;
+ itauq = 1;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize A */
+/* (CWorkspace: need 2*M+N, prefer 2*M+(M+N)*NB) */
+/* (RWorkspace: M) */
+
+ i__2 = *lwork - nwork + 1;
+ cgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__2, &ierr);
+ if (wntqn) {
+
+/* Compute singular values only */
+/* (Cworkspace: 0) */
+/* (Rworkspace: need BDSPAN) */
+
+ sbdsdc_("L", "N", m, &s[1], &rwork[ie], dum, &c__1, dum, &
+ c__1, dum, idum, &rwork[nrwork], &iwork[1], info);
+ } else if (wntqo) {
+ ldwkvt = *m;
+ ivt = nwork;
+ if (*lwork >= *m * *n + *m * 3) {
+
+/* WORK( IVT ) is M by N */
+
+ claset_("F", m, n, &c_b1, &c_b1, &work[ivt], &ldwkvt);
+ nwork = ivt + ldwkvt * *n;
+ } else {
+
+/* WORK( IVT ) is M by CHUNK */
+
+ chunk = (*lwork - *m * 3) / *m;
+ nwork = ivt + ldwkvt * chunk;
+ }
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ irvt = nrwork;
+ iru = irvt + *m * *m;
+ nrwork = iru + *m * *m;
+ sbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Copy real matrix RWORK(IRU) to complex matrix U */
+/* Overwrite U by left singular vectors of A */
+/* (Cworkspace: need 2*M, prefer M+M*NB) */
+/* (Rworkspace: need 0) */
+
+ clacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+ if (*lwork >= *m * *n + *m * 3) {
+
+/* Copy real matrix RWORK(IRVT) to complex matrix WORK(IVT) */
+/* Overwrite WORK(IVT) by right singular vectors of A, */
+/* copying to A */
+/* (Cworkspace: need M*N+2*M, prefer M*N+M+M*NB) */
+/* (Rworkspace: need 0) */
+
+ clacp2_("F", m, m, &rwork[irvt], m, &work[ivt], &ldwkvt);
+ i__2 = *lwork - nwork + 1;
+ cunmbr_("P", "R", "C", m, n, m, &a[a_offset], lda, &work[
+ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2,
+ &ierr);
+ clacpy_("F", m, n, &work[ivt], &ldwkvt, &a[a_offset], lda);
+ } else {
+
+/* Generate P**H in A */
+/* (Cworkspace: need 2*M, prefer M+M*NB) */
+/* (Rworkspace: need 0) */
+
+ i__2 = *lwork - nwork + 1;
+ cungbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &
+ work[nwork], &i__2, &ierr);
+
+/* Multiply Q in A by real matrix RWORK(IRU), storing the */
+/* result in WORK(IU), copying to A */
+/* (CWorkspace: need M*M, prefer M*N) */
+/* (Rworkspace: need 3*M*M, prefer M*M+2*M*N) */
+
+ nrwork = iru;
+ i__2 = *n;
+ i__1 = chunk;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *n - i__ + 1;
+ blk = min(i__3,chunk);
+ clarcm_(m, &blk, &rwork[irvt], m, &a[i__ * a_dim1 + 1]
+, lda, &work[ivt], &ldwkvt, &rwork[nrwork]);
+ clacpy_("F", m, &blk, &work[ivt], &ldwkvt, &a[i__ *
+ a_dim1 + 1], lda);
+/* L60: */
+ }
+ }
+ } else if (wntqs) {
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ irvt = nrwork;
+ iru = irvt + *m * *m;
+ nrwork = iru + *m * *m;
+ sbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Copy real matrix RWORK(IRU) to complex matrix U */
+/* Overwrite U by left singular vectors of A */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: M*M) */
+
+ clacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+
+/* Copy real matrix RWORK(IRVT) to complex matrix VT */
+/* Overwrite VT by right singular vectors of A */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: M*M) */
+
+ claset_("F", m, n, &c_b1, &c_b1, &vt[vt_offset], ldvt);
+ clacp2_("F", m, m, &rwork[irvt], m, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("P", "R", "C", m, n, m, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ } else {
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ irvt = nrwork;
+ iru = irvt + *m * *m;
+ nrwork = iru + *m * *m;
+
+ sbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Copy real matrix RWORK(IRU) to complex matrix U */
+/* Overwrite U by left singular vectors of A */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: M*M) */
+
+ clacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+
+/* Set all of VT to identity matrix */
+
+ claset_("F", n, n, &c_b1, &c_b2, &vt[vt_offset], ldvt);
+
+/* Copy real matrix RWORK(IRVT) to complex matrix VT */
+/* Overwrite VT by right singular vectors of A */
+/* (CWorkspace: need 2*M+N, prefer 2*M+N*NB) */
+/* (RWorkspace: M*M) */
+
+ clacp2_("F", m, m, &rwork[irvt], m, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("P", "R", "C", n, n, m, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ }
+
+ }
+
+ }
+
+/* Undo scaling if necessary */
+
+ if (iscl == 1) {
+ if (anrm > bignum) {
+ slascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ if (*info != 0 && anrm > bignum) {
+ i__1 = minmn - 1;
+ slascl_("G", &c__0, &c__0, &bignum, &anrm, &i__1, &c__1, &rwork[
+ ie], &minmn, &ierr);
+ }
+ if (anrm < smlnum) {
+ slascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ if (*info != 0 && anrm < smlnum) {
+ i__1 = minmn - 1;
+ slascl_("G", &c__0, &c__0, &smlnum, &anrm, &i__1, &c__1, &rwork[
+ ie], &minmn, &ierr);
+ }
+ }
+
+/* Return optimal workspace in WORK(1) */
+
+ work[1].r = (real) maxwrk, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CGESDD */
+
+} /* cgesdd_ */
diff --git a/contrib/libs/clapack/cgesv.c b/contrib/libs/clapack/cgesv.c
new file mode 100644
index 0000000000..1d3ea37ef1
--- /dev/null
+++ b/contrib/libs/clapack/cgesv.c
@@ -0,0 +1,138 @@
+/* cgesv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cgesv_(integer *n, integer *nrhs, complex *a, integer *
+ lda, integer *ipiv, complex *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int cgetrf_(integer *, integer *, complex *,
+ integer *, integer *, integer *), xerbla_(char *, integer *), cgetrs_(char *, integer *, integer *, complex *, integer
+ *, integer *, complex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGESV computes the solution to a complex system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N matrix and X and B are N-by-NRHS matrices. */
+
+/* The LU decomposition with partial pivoting and row interchanges is */
+/* used to factor A as */
+/* A = P * L * U, */
+/* where P is a permutation matrix, L is unit lower triangular, and U is */
+/* upper triangular. The factored form of A is then used to solve the */
+/* system of equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the N-by-N coefficient matrix A. */
+/* On exit, the factors L and U from the factorization */
+/* A = P*L*U; the unit diagonal elements of L are not stored. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* The pivot indices that define the permutation matrix P; */
+/* row i of the matrix was interchanged with row IPIV(i). */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS matrix of right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, U(i,i) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, so the solution could not be computed. */
+
+/* ===================================================================== */
+
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*nrhs < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGESV ", &i__1);
+ return 0;
+ }
+
+/* Compute the LU factorization of A. */
+
+ cgetrf_(n, n, &a[a_offset], lda, &ipiv[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ cgetrs_("No transpose", n, nrhs, &a[a_offset], lda, &ipiv[1], &b[
+ b_offset], ldb, info);
+ }
+ return 0;
+
+/* End of CGESV */
+
+} /* cgesv_ */
diff --git a/contrib/libs/clapack/cgesvd.c b/contrib/libs/clapack/cgesvd.c
new file mode 100644
index 0000000000..f6b5b7ca55
--- /dev/null
+++ b/contrib/libs/clapack/cgesvd.c
@@ -0,0 +1,4164 @@
+/* cgesvd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__6 = 6;
+static integer c__0 = 0;
+static integer c__2 = 2;
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int cgesvd_(char *jobu, char *jobvt, integer *m, integer *n,
+ complex *a, integer *lda, real *s, complex *u, integer *ldu, complex *
+ vt, integer *ldvt, complex *work, integer *lwork, real *rwork,
+ integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1[2],
+ i__2, i__3, i__4;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, ie, ir, iu, blk, ncu;
+ real dum[1], eps;
+ integer nru;
+ complex cdum[1];
+ integer iscl;
+ real anrm;
+ integer ierr, itau, ncvt, nrvt;
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ integer chunk, minmn, wrkbl, itaup, itauq, mnthr, iwork;
+ logical wntua, wntva, wntun, wntuo, wntvn, wntvo, wntus, wntvs;
+ extern /* Subroutine */ int cgebrd_(integer *, integer *, complex *,
+ integer *, real *, real *, complex *, complex *, complex *,
+ integer *, integer *);
+ extern doublereal clange_(char *, integer *, integer *, complex *,
+ integer *, real *);
+ extern /* Subroutine */ int cgelqf_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, integer *), clascl_(
+ char *, integer *, integer *, real *, real *, integer *, integer *
+, complex *, integer *, integer *), cgeqrf_(integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), claset_(char *,
+ integer *, integer *, complex *, complex *, complex *, integer *), cbdsqr_(char *, integer *, integer *, integer *, integer
+ *, real *, real *, complex *, integer *, complex *, integer *,
+ complex *, integer *, real *, integer *), xerbla_(char *,
+ integer *), cungbr_(char *, integer *, integer *, integer
+ *, complex *, integer *, complex *, complex *, integer *, integer
+ *);
+ real bignum;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int cunmbr_(char *, char *, char *, integer *,
+ integer *, integer *, complex *, integer *, complex *, complex *,
+ integer *, complex *, integer *, integer *), cunglq_(integer *, integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, integer *), cungqr_(
+ integer *, integer *, integer *, complex *, integer *, complex *,
+ complex *, integer *, integer *);
+ integer ldwrkr, minwrk, ldwrku, maxwrk;
+ real smlnum;
+ integer irwork;
+ logical lquery, wntuas, wntvas;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGESVD computes the singular value decomposition (SVD) of a complex */
+/* M-by-N matrix A, optionally computing the left and/or right singular */
+/* vectors. The SVD is written */
+
+/* A = U * SIGMA * conjugate-transpose(V) */
+
+/* where SIGMA is an M-by-N matrix which is zero except for its */
+/* min(m,n) diagonal elements, U is an M-by-M unitary matrix, and */
+/* V is an N-by-N unitary matrix. The diagonal elements of SIGMA */
+/* are the singular values of A; they are real and non-negative, and */
+/* are returned in descending order. The first min(m,n) columns of */
+/* U and V are the left and right singular vectors of A. */
+
+/* Note that the routine returns V**H, not V. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBU (input) CHARACTER*1 */
+/* Specifies options for computing all or part of the matrix U: */
+/* = 'A': all M columns of U are returned in array U: */
+/* = 'S': the first min(m,n) columns of U (the left singular */
+/* vectors) are returned in the array U; */
+/* = 'O': the first min(m,n) columns of U (the left singular */
+/* vectors) are overwritten on the array A; */
+/* = 'N': no columns of U (no left singular vectors) are */
+/* computed. */
+
+/* JOBVT (input) CHARACTER*1 */
+/* Specifies options for computing all or part of the matrix */
+/* V**H: */
+/* = 'A': all N rows of V**H are returned in the array VT; */
+/* = 'S': the first min(m,n) rows of V**H (the right singular */
+/* vectors) are returned in the array VT; */
+/* = 'O': the first min(m,n) rows of V**H (the right singular */
+/* vectors) are overwritten on the array A; */
+/* = 'N': no rows of V**H (no right singular vectors) are */
+/* computed. */
+
+/* JOBVT and JOBU cannot both be 'O'. */
+
+/* M (input) INTEGER */
+/* The number of rows of the input matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the input matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, */
+/* if JOBU = 'O', A is overwritten with the first min(m,n) */
+/* columns of U (the left singular vectors, */
+/* stored columnwise); */
+/* if JOBVT = 'O', A is overwritten with the first min(m,n) */
+/* rows of V**H (the right singular vectors, */
+/* stored rowwise); */
+/* if JOBU .ne. 'O' and JOBVT .ne. 'O', the contents of A */
+/* are destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* S (output) REAL array, dimension (min(M,N)) */
+/* The singular values of A, sorted so that S(i) >= S(i+1). */
+
+/* U (output) COMPLEX array, dimension (LDU,UCOL) */
+/* (LDU,M) if JOBU = 'A' or (LDU,min(M,N)) if JOBU = 'S'. */
+/* If JOBU = 'A', U contains the M-by-M unitary matrix U; */
+/* if JOBU = 'S', U contains the first min(m,n) columns of U */
+/* (the left singular vectors, stored columnwise); */
+/* if JOBU = 'N' or 'O', U is not referenced. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= 1; if */
+/* JOBU = 'S' or 'A', LDU >= M. */
+
+/* VT (output) COMPLEX array, dimension (LDVT,N) */
+/* If JOBVT = 'A', VT contains the N-by-N unitary matrix */
+/* V**H; */
+/* if JOBVT = 'S', VT contains the first min(m,n) rows of */
+/* V**H (the right singular vectors, stored rowwise); */
+/* if JOBVT = 'N' or 'O', VT is not referenced. */
+
+/* LDVT (input) INTEGER */
+/* The leading dimension of the array VT. LDVT >= 1; if */
+/* JOBVT = 'A', LDVT >= N; if JOBVT = 'S', LDVT >= min(M,N). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* LWORK >= MAX(1,2*MIN(M,N)+MAX(M,N)). */
+/* For good performance, LWORK should generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) REAL array, dimension (5*min(M,N)) */
+/* On exit, if INFO > 0, RWORK(1:MIN(M,N)-1) contains the */
+/* unconverged superdiagonal elements of an upper bidiagonal */
+/* matrix B whose diagonal is in S (not necessarily sorted). */
+/* B satisfies A = U * B * VT, so it has the same singular */
+/* values as A, and singular vectors related by U and VT. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if CBDSQR did not converge, INFO specifies how many */
+/* superdiagonals of an intermediate bidiagonal form B */
+/* did not converge to zero. See the description of RWORK */
+/* above for details. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ wntua = lsame_(jobu, "A");
+ wntus = lsame_(jobu, "S");
+ wntuas = wntua || wntus;
+ wntuo = lsame_(jobu, "O");
+ wntun = lsame_(jobu, "N");
+ wntva = lsame_(jobvt, "A");
+ wntvs = lsame_(jobvt, "S");
+ wntvas = wntva || wntvs;
+ wntvo = lsame_(jobvt, "O");
+ wntvn = lsame_(jobvt, "N");
+ lquery = *lwork == -1;
+
+ if (! (wntua || wntus || wntuo || wntun)) {
+ *info = -1;
+ } else if (! (wntva || wntvs || wntvo || wntvn) || wntvo && wntuo) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*m)) {
+ *info = -6;
+ } else if (*ldu < 1 || wntuas && *ldu < *m) {
+ *info = -9;
+ } else if (*ldvt < 1 || wntva && *ldvt < *n || wntvs && *ldvt < minmn) {
+ *info = -11;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* CWorkspace refers to complex workspace, and RWorkspace to */
+/* real workspace. NB refers to the optimal block size for the */
+/* immediately following subroutine, as returned by ILAENV.) */
+
+ if (*info == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ if (*m >= *n && minmn > 0) {
+
+/* Space needed for CBDSQR is BDSPAC = 5*N */
+
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = jobu;
+ i__1[1] = 1, a__1[1] = jobvt;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ mnthr = ilaenv_(&c__6, "CGESVD", ch__1, m, n, &c__0, &c__0);
+ if (*m >= mnthr) {
+ if (wntun) {
+
+/* Path 1 (M much larger than N, JOBU='N') */
+
+ maxwrk = *n + *n * ilaenv_(&c__1, "CGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", n, n, &c_n1, &c_n1);
+ maxwrk = max(i__2,i__3);
+ if (wntvo || wntvas) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = (*n << 1) + (*n - 1) * ilaenv_(&
+ c__1, "CUNGBR", "P", n, n, n, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ minwrk = *n * 3;
+ } else if (wntuo && wntvn) {
+
+/* Path 2 (M much larger than N, JOBU='O', JOBVT='N') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "CGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *n * ilaenv_(&c__1, "CUNGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + *n * ilaenv_(&c__1,
+ "CUNGBR", "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = *n * *n + wrkbl, i__3 = *n * *n + *m * *n;
+ maxwrk = max(i__2,i__3);
+ minwrk = (*n << 1) + *m;
+ } else if (wntuo && wntvas) {
+
+/* Path 3 (M much larger than N, JOBU='O', JOBVT='S' or */
+/* 'A') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "CGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *n * ilaenv_(&c__1, "CUNGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + *n * ilaenv_(&c__1,
+ "CUNGBR", "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n - 1) * ilaenv_(&c__1,
+ "CUNGBR", "P", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = *n * *n + wrkbl, i__3 = *n * *n + *m * *n;
+ maxwrk = max(i__2,i__3);
+ minwrk = (*n << 1) + *m;
+ } else if (wntus && wntvn) {
+
+/* Path 4 (M much larger than N, JOBU='S', JOBVT='N') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "CGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *n * ilaenv_(&c__1, "CUNGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + *n * ilaenv_(&c__1,
+ "CUNGBR", "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ maxwrk = *n * *n + wrkbl;
+ minwrk = (*n << 1) + *m;
+ } else if (wntus && wntvo) {
+
+/* Path 5 (M much larger than N, JOBU='S', JOBVT='O') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "CGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *n * ilaenv_(&c__1, "CUNGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + *n * ilaenv_(&c__1,
+ "CUNGBR", "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n - 1) * ilaenv_(&c__1,
+ "CUNGBR", "P", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ maxwrk = (*n << 1) * *n + wrkbl;
+ minwrk = (*n << 1) + *m;
+ } else if (wntus && wntvas) {
+
+/* Path 6 (M much larger than N, JOBU='S', JOBVT='S' or */
+/* 'A') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "CGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *n * ilaenv_(&c__1, "CUNGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + *n * ilaenv_(&c__1,
+ "CUNGBR", "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n - 1) * ilaenv_(&c__1,
+ "CUNGBR", "P", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ maxwrk = *n * *n + wrkbl;
+ minwrk = (*n << 1) + *m;
+ } else if (wntua && wntvn) {
+
+/* Path 7 (M much larger than N, JOBU='A', JOBVT='N') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "CGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *m * ilaenv_(&c__1, "CUNGQR",
+ " ", m, m, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + *n * ilaenv_(&c__1,
+ "CUNGBR", "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ maxwrk = *n * *n + wrkbl;
+ minwrk = (*n << 1) + *m;
+ } else if (wntua && wntvo) {
+
+/* Path 8 (M much larger than N, JOBU='A', JOBVT='O') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "CGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *m * ilaenv_(&c__1, "CUNGQR",
+ " ", m, m, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + *n * ilaenv_(&c__1,
+ "CUNGBR", "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n - 1) * ilaenv_(&c__1,
+ "CUNGBR", "P", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ maxwrk = (*n << 1) * *n + wrkbl;
+ minwrk = (*n << 1) + *m;
+ } else if (wntua && wntvas) {
+
+/* Path 9 (M much larger than N, JOBU='A', JOBVT='S' or */
+/* 'A') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "CGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *m * ilaenv_(&c__1, "CUNGQR",
+ " ", m, m, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + *n * ilaenv_(&c__1,
+ "CUNGBR", "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n - 1) * ilaenv_(&c__1,
+ "CUNGBR", "P", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ maxwrk = *n * *n + wrkbl;
+ minwrk = (*n << 1) + *m;
+ }
+ } else {
+
+/* Path 10 (M at least N, but not much larger) */
+
+ maxwrk = (*n << 1) + (*m + *n) * ilaenv_(&c__1, "CGEBRD",
+ " ", m, n, &c_n1, &c_n1);
+ if (wntus || wntuo) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = (*n << 1) + *n * ilaenv_(&c__1,
+ "CUNGBR", "Q", m, n, n, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ if (wntua) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = (*n << 1) + *m * ilaenv_(&c__1,
+ "CUNGBR", "Q", m, m, n, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ if (! wntvn) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = (*n << 1) + (*n - 1) * ilaenv_(&
+ c__1, "CUNGBR", "P", n, n, n, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ minwrk = (*n << 1) + *m;
+ }
+ } else if (minmn > 0) {
+
+/* Space needed for CBDSQR is BDSPAC = 5*M */
+
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = jobu;
+ i__1[1] = 1, a__1[1] = jobvt;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ mnthr = ilaenv_(&c__6, "CGESVD", ch__1, m, n, &c__0, &c__0);
+ if (*n >= mnthr) {
+ if (wntvn) {
+
+/* Path 1t(N much larger than M, JOBVT='N') */
+
+ maxwrk = *m + *m * ilaenv_(&c__1, "CGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", m, m, &c_n1, &c_n1);
+ maxwrk = max(i__2,i__3);
+ if (wntuo || wntuas) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNGBR", "Q", m, m, m, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ minwrk = *m * 3;
+ } else if (wntvo && wntun) {
+
+/* Path 2t(N much larger than M, JOBU='N', JOBVT='O') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "CGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *m * ilaenv_(&c__1, "CUNGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m - 1) * ilaenv_(&c__1,
+ "CUNGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = *m * *m + wrkbl, i__3 = *m * *m + *m * *n;
+ maxwrk = max(i__2,i__3);
+ minwrk = (*m << 1) + *n;
+ } else if (wntvo && wntuas) {
+
+/* Path 3t(N much larger than M, JOBU='S' or 'A', */
+/* JOBVT='O') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "CGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *m * ilaenv_(&c__1, "CUNGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m - 1) * ilaenv_(&c__1,
+ "CUNGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNGBR", "Q", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = *m * *m + wrkbl, i__3 = *m * *m + *m * *n;
+ maxwrk = max(i__2,i__3);
+ minwrk = (*m << 1) + *n;
+ } else if (wntvs && wntun) {
+
+/* Path 4t(N much larger than M, JOBU='N', JOBVT='S') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "CGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *m * ilaenv_(&c__1, "CUNGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m - 1) * ilaenv_(&c__1,
+ "CUNGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ maxwrk = *m * *m + wrkbl;
+ minwrk = (*m << 1) + *n;
+ } else if (wntvs && wntuo) {
+
+/* Path 5t(N much larger than M, JOBU='O', JOBVT='S') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "CGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *m * ilaenv_(&c__1, "CUNGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m - 1) * ilaenv_(&c__1,
+ "CUNGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNGBR", "Q", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ maxwrk = (*m << 1) * *m + wrkbl;
+ minwrk = (*m << 1) + *n;
+ } else if (wntvs && wntuas) {
+
+/* Path 6t(N much larger than M, JOBU='S' or 'A', */
+/* JOBVT='S') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "CGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *m * ilaenv_(&c__1, "CUNGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m - 1) * ilaenv_(&c__1,
+ "CUNGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNGBR", "Q", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ maxwrk = *m * *m + wrkbl;
+ minwrk = (*m << 1) + *n;
+ } else if (wntva && wntun) {
+
+/* Path 7t(N much larger than M, JOBU='N', JOBVT='A') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "CGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *n * ilaenv_(&c__1, "CUNGLQ",
+ " ", n, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m - 1) * ilaenv_(&c__1,
+ "CUNGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ maxwrk = *m * *m + wrkbl;
+ minwrk = (*m << 1) + *n;
+ } else if (wntva && wntuo) {
+
+/* Path 8t(N much larger than M, JOBU='O', JOBVT='A') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "CGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *n * ilaenv_(&c__1, "CUNGLQ",
+ " ", n, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m - 1) * ilaenv_(&c__1,
+ "CUNGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNGBR", "Q", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ maxwrk = (*m << 1) * *m + wrkbl;
+ minwrk = (*m << 1) + *n;
+ } else if (wntva && wntuas) {
+
+/* Path 9t(N much larger than M, JOBU='S' or 'A', */
+/* JOBVT='A') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "CGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *n * ilaenv_(&c__1, "CUNGLQ",
+ " ", n, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "CGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m - 1) * ilaenv_(&c__1,
+ "CUNGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNGBR", "Q", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ maxwrk = *m * *m + wrkbl;
+ minwrk = (*m << 1) + *n;
+ }
+ } else {
+
+/* Path 10t(N greater than M, but not much larger) */
+
+ maxwrk = (*m << 1) + (*m + *n) * ilaenv_(&c__1, "CGEBRD",
+ " ", m, n, &c_n1, &c_n1);
+ if (wntvs || wntvo) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNGBR", "P", m, n, m, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ if (wntva) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = (*m << 1) + *n * ilaenv_(&c__1,
+ "CUNGBR", "P", n, n, m, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ if (! wntun) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = (*m << 1) + (*m - 1) * ilaenv_(&
+ c__1, "CUNGBR", "Q", m, m, m, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ minwrk = (*m << 1) + *n;
+ }
+ }
+ maxwrk = max(minwrk,maxwrk);
+ work[1].r = (real) maxwrk, work[1].i = 0.f;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info != 0) {
+ i__2 = -(*info);
+ xerbla_("CGESVD", &i__2);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = sqrt(slamch_("S")) / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = clange_("M", m, n, &a[a_offset], lda, dum);
+ iscl = 0;
+ if (anrm > 0.f && anrm < smlnum) {
+ iscl = 1;
+ clascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, &
+ ierr);
+ } else if (anrm > bignum) {
+ iscl = 1;
+ clascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+ if (*m >= *n) {
+
+/* A has at least as many rows as columns. If A has sufficiently */
+/* more rows than columns, first reduce using the QR */
+/* decomposition (if sufficient workspace available) */
+
+ if (*m >= mnthr) {
+
+ if (wntun) {
+
+/* Path 1 (M much larger than N, JOBU='N') */
+/* No left singular vectors to be computed */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: need 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork], &
+ i__2, &ierr);
+
+/* Zero out below R */
+
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ claset_("L", &i__2, &i__3, &c_b1, &c_b1, &a[a_dim1 + 2], lda);
+ ie = 1;
+ itauq = 1;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in A */
+/* (CWorkspace: need 3*N, prefer 2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(n, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__2, &ierr);
+ ncvt = 0;
+ if (wntvo || wntvas) {
+
+/* If right singular vectors desired, generate P'. */
+/* (CWorkspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("P", n, n, n, &a[a_offset], lda, &work[itaup], &
+ work[iwork], &i__2, &ierr);
+ ncvt = *n;
+ }
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of A in A if desired */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", n, &ncvt, &c__0, &c__0, &s[1], &rwork[ie], &a[
+ a_offset], lda, cdum, &c__1, cdum, &c__1, &rwork[
+ irwork], info);
+
+/* If right singular vectors desired in VT, copy them there */
+
+ if (wntvas) {
+ clacpy_("F", n, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+ }
+
+ } else if (wntuo && wntvn) {
+
+/* Path 2 (M much larger than N, JOBU='O', JOBVT='N') */
+/* N left singular vectors to be overwritten on A and */
+/* no right singular vectors to be computed */
+
+ if (*lwork >= *n * *n + *n * 3) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *lda * *n;
+ if (*lwork >= max(i__2,i__3) + *lda * *n) {
+
+/* WORK(IU) is LDA by N, WORK(IR) is LDA by N */
+
+ ldwrku = *lda;
+ ldwrkr = *lda;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *lda * *n;
+ if (*lwork >= max(i__2,i__3) + *n * *n) {
+
+/* WORK(IU) is LDA by N, WORK(IR) is N by N */
+
+ ldwrku = *lda;
+ ldwrkr = *n;
+ } else {
+
+/* WORK(IU) is LDWRKU by N, WORK(IR) is N by N */
+
+ ldwrku = (*lwork - *n * *n) / *n;
+ ldwrkr = *n;
+ }
+ }
+ itau = ir + ldwrkr * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (CWorkspace: need N*N+2*N, prefer N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork]
+, &i__2, &ierr);
+
+/* Copy R to WORK(IR) and zero out below it */
+
+ clacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ claset_("L", &i__2, &i__3, &c_b1, &c_b1, &work[ir + 1], &
+ ldwrkr);
+
+/* Generate Q in A */
+/* (CWorkspace: need N*N+2*N, prefer N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IR) */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &i__2, &
+ ierr);
+
+/* Generate left vectors bidiagonalizing R */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB) */
+/* (RWorkspace: need 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("Q", n, n, n, &work[ir], &ldwrkr, &work[itauq], &
+ work[iwork], &i__2, &ierr);
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IR) */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", n, &c__0, n, &c__0, &s[1], &rwork[ie], cdum,
+ &c__1, &work[ir], &ldwrkr, cdum, &c__1, &rwork[
+ irwork], info);
+ iu = itauq;
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IR), storing result in WORK(IU) and copying to A */
+/* (CWorkspace: need N*N+N, prefer N*N+M*N) */
+/* (RWorkspace: 0) */
+
+ i__2 = *m;
+ i__3 = ldwrku;
+ for (i__ = 1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__3) {
+/* Computing MIN */
+ i__4 = *m - i__ + 1;
+ chunk = min(i__4,ldwrku);
+ cgemm_("N", "N", &chunk, n, n, &c_b2, &a[i__ + a_dim1]
+, lda, &work[ir], &ldwrkr, &c_b1, &work[iu], &
+ ldwrku);
+ clacpy_("F", &chunk, n, &work[iu], &ldwrku, &a[i__ +
+ a_dim1], lda);
+/* L10: */
+ }
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ ie = 1;
+ itauq = 1;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize A */
+/* (CWorkspace: need 2*N+M, prefer 2*N+(M+N)*NB) */
+/* (RWorkspace: N) */
+
+ i__3 = *lwork - iwork + 1;
+ cgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__3, &ierr);
+
+/* Generate left vectors bidiagonalizing A */
+/* (CWorkspace: need 3*N, prefer 2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__3 = *lwork - iwork + 1;
+ cungbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], &
+ work[iwork], &i__3, &ierr);
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in A */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", n, &c__0, m, &c__0, &s[1], &rwork[ie], cdum,
+ &c__1, &a[a_offset], lda, cdum, &c__1, &rwork[
+ irwork], info);
+
+ }
+
+ } else if (wntuo && wntvas) {
+
+/* Path 3 (M much larger than N, JOBU='O', JOBVT='S' or 'A') */
+/* N left singular vectors to be overwritten on A and */
+/* N right singular vectors to be computed in VT */
+
+ if (*lwork >= *n * *n + *n * 3) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+/* Computing MAX */
+ i__3 = wrkbl, i__2 = *lda * *n;
+ if (*lwork >= max(i__3,i__2) + *lda * *n) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is LDA by N */
+
+ ldwrku = *lda;
+ ldwrkr = *lda;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__3 = wrkbl, i__2 = *lda * *n;
+ if (*lwork >= max(i__3,i__2) + *n * *n) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is N by N */
+
+ ldwrku = *lda;
+ ldwrkr = *n;
+ } else {
+
+/* WORK(IU) is LDWRKU by N and WORK(IR) is N by N */
+
+ ldwrku = (*lwork - *n * *n) / *n;
+ ldwrkr = *n;
+ }
+ }
+ itau = ir + ldwrkr * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (CWorkspace: need N*N+2*N, prefer N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__3 = *lwork - iwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork]
+, &i__3, &ierr);
+
+/* Copy R to VT, zeroing out below it */
+
+ clacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+ if (*n > 1) {
+ i__3 = *n - 1;
+ i__2 = *n - 1;
+ claset_("L", &i__3, &i__2, &c_b1, &c_b1, &vt[vt_dim1
+ + 2], ldvt);
+ }
+
+/* Generate Q in A */
+/* (CWorkspace: need N*N+2*N, prefer N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__3 = *lwork - iwork + 1;
+ cungqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__3, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in VT, copying result to WORK(IR) */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__3 = *lwork - iwork + 1;
+ cgebrd_(n, n, &vt[vt_offset], ldvt, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &i__3, &
+ ierr);
+ clacpy_("L", n, n, &vt[vt_offset], ldvt, &work[ir], &
+ ldwrkr);
+
+/* Generate left vectors bidiagonalizing R in WORK(IR) */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__3 = *lwork - iwork + 1;
+ cungbr_("Q", n, n, n, &work[ir], &ldwrkr, &work[itauq], &
+ work[iwork], &i__3, &ierr);
+
+/* Generate right vectors bidiagonalizing R in VT */
+/* (CWorkspace: need N*N+3*N-1, prefer N*N+2*N+(N-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__3 = *lwork - iwork + 1;
+ cungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup],
+ &work[iwork], &i__3, &ierr);
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IR) and computing right */
+/* singular vectors of R in VT */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", n, n, n, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &work[ir], &ldwrkr, cdum, &c__1,
+ &rwork[irwork], info);
+ iu = itauq;
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IR), storing result in WORK(IU) and copying to A */
+/* (CWorkspace: need N*N+N, prefer N*N+M*N) */
+/* (RWorkspace: 0) */
+
+ i__3 = *m;
+ i__2 = ldwrku;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__3 : i__ <= i__3; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__4 = *m - i__ + 1;
+ chunk = min(i__4,ldwrku);
+ cgemm_("N", "N", &chunk, n, n, &c_b2, &a[i__ + a_dim1]
+, lda, &work[ir], &ldwrkr, &c_b1, &work[iu], &
+ ldwrku);
+ clacpy_("F", &chunk, n, &work[iu], &ldwrku, &a[i__ +
+ a_dim1], lda);
+/* L20: */
+ }
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork]
+, &i__2, &ierr);
+
+/* Copy R to VT, zeroing out below it */
+
+ clacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+ if (*n > 1) {
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ claset_("L", &i__2, &i__3, &c_b1, &c_b1, &vt[vt_dim1
+ + 2], ldvt);
+ }
+
+/* Generate Q in A */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in VT */
+/* (CWorkspace: need 3*N, prefer 2*N+2*N*NB) */
+/* (RWorkspace: N) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(n, n, &vt[vt_offset], ldvt, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &i__2, &
+ ierr);
+
+/* Multiply Q in A by left vectors bidiagonalizing R */
+/* (CWorkspace: need 2*N+M, prefer 2*N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunmbr_("Q", "R", "N", m, n, n, &vt[vt_offset], ldvt, &
+ work[itauq], &a[a_offset], lda, &work[iwork], &
+ i__2, &ierr);
+
+/* Generate right vectors bidiagonalizing R in VT */
+/* (CWorkspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup],
+ &work[iwork], &i__2, &ierr);
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in A and computing right */
+/* singular vectors of A in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", n, n, m, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &a[a_offset], lda, cdum, &c__1,
+ &rwork[irwork], info);
+
+ }
+
+ } else if (wntus) {
+
+ if (wntvn) {
+
+/* Path 4 (M much larger than N, JOBU='S', JOBVT='N') */
+/* N left singular vectors to be computed in U and */
+/* no right singular vectors to be computed */
+
+ if (*lwork >= *n * *n + *n * 3) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+ if (*lwork >= wrkbl + *lda * *n) {
+
+/* WORK(IR) is LDA by N */
+
+ ldwrkr = *lda;
+ } else {
+
+/* WORK(IR) is N by N */
+
+ ldwrkr = *n;
+ }
+ itau = ir + ldwrkr * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (CWorkspace: need N*N+2*N, prefer N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy R to WORK(IR), zeroing out below it */
+
+ clacpy_("U", n, n, &a[a_offset], lda, &work[ir], &
+ ldwrkr);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ claset_("L", &i__2, &i__3, &c_b1, &c_b1, &work[ir + 1]
+, &ldwrkr);
+
+/* Generate Q in A */
+/* (CWorkspace: need N*N+2*N, prefer N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungqr_(m, n, n, &a[a_offset], lda, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IR) */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Generate left vectors bidiagonalizing R in WORK(IR) */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("Q", n, n, n, &work[ir], &ldwrkr, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IR) */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", n, &c__0, n, &c__0, &s[1], &rwork[ie],
+ cdum, &c__1, &work[ir], &ldwrkr, cdum, &c__1,
+ &rwork[irwork], info);
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IR), storing result in U */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: 0) */
+
+ cgemm_("N", "N", m, n, n, &c_b2, &a[a_offset], lda, &
+ work[ir], &ldwrkr, &c_b1, &u[u_offset], ldu);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ clacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungqr_(m, n, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Zero out below R in A */
+
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ claset_("L", &i__2, &i__3, &c_b1, &c_b1, &a[a_dim1 +
+ 2], lda);
+
+/* Bidiagonalize R in A */
+/* (CWorkspace: need 3*N, prefer 2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(n, n, &a[a_offset], lda, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply Q in U by left vectors bidiagonalizing R */
+/* (CWorkspace: need 2*N+M, prefer 2*N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunmbr_("Q", "R", "N", m, n, n, &a[a_offset], lda, &
+ work[itauq], &u[u_offset], ldu, &work[iwork],
+ &i__2, &ierr)
+ ;
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", n, &c__0, m, &c__0, &s[1], &rwork[ie],
+ cdum, &c__1, &u[u_offset], ldu, cdum, &c__1, &
+ rwork[irwork], info);
+
+ }
+
+ } else if (wntvo) {
+
+/* Path 5 (M much larger than N, JOBU='S', JOBVT='O') */
+/* N left singular vectors to be computed in U and */
+/* N right singular vectors to be overwritten on A */
+
+ if (*lwork >= (*n << 1) * *n + *n * 3) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + (*lda << 1) * *n) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is LDA by N */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *n;
+ ldwrkr = *lda;
+ } else if (*lwork >= wrkbl + (*lda + *n) * *n) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is N by N */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *n;
+ ldwrkr = *n;
+ } else {
+
+/* WORK(IU) is N by N and WORK(IR) is N by N */
+
+ ldwrku = *n;
+ ir = iu + ldwrku * *n;
+ ldwrkr = *n;
+ }
+ itau = ir + ldwrkr * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (CWorkspace: need 2*N*N+2*N, prefer 2*N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy R to WORK(IU), zeroing out below it */
+
+ clacpy_("U", n, n, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ claset_("L", &i__2, &i__3, &c_b1, &c_b1, &work[iu + 1]
+, &ldwrku);
+
+/* Generate Q in A */
+/* (CWorkspace: need 2*N*N+2*N, prefer 2*N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungqr_(m, n, n, &a[a_offset], lda, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IU), copying result to */
+/* WORK(IR) */
+/* (CWorkspace: need 2*N*N+3*N, */
+/* prefer 2*N*N+2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(n, n, &work[iu], &ldwrku, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ clacpy_("U", n, n, &work[iu], &ldwrku, &work[ir], &
+ ldwrkr);
+
+/* Generate left bidiagonalizing vectors in WORK(IU) */
+/* (CWorkspace: need 2*N*N+3*N, prefer 2*N*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("Q", n, n, n, &work[iu], &ldwrku, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in WORK(IR) */
+/* (CWorkspace: need 2*N*N+3*N-1, */
+/* prefer 2*N*N+2*N+(N-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("P", n, n, n, &work[ir], &ldwrkr, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IU) and computing */
+/* right singular vectors of R in WORK(IR) */
+/* (CWorkspace: need 2*N*N) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", n, n, n, &c__0, &s[1], &rwork[ie], &work[
+ ir], &ldwrkr, &work[iu], &ldwrku, cdum, &c__1,
+ &rwork[irwork], info);
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IU), storing result in U */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: 0) */
+
+ cgemm_("N", "N", m, n, n, &c_b2, &a[a_offset], lda, &
+ work[iu], &ldwrku, &c_b1, &u[u_offset], ldu);
+
+/* Copy right singular vectors of R to A */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: 0) */
+
+ clacpy_("F", n, n, &work[ir], &ldwrkr, &a[a_offset],
+ lda);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ clacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungqr_(m, n, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Zero out below R in A */
+
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ claset_("L", &i__2, &i__3, &c_b1, &c_b1, &a[a_dim1 +
+ 2], lda);
+
+/* Bidiagonalize R in A */
+/* (CWorkspace: need 3*N, prefer 2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(n, n, &a[a_offset], lda, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply Q in U by left vectors bidiagonalizing R */
+/* (CWorkspace: need 2*N+M, prefer 2*N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunmbr_("Q", "R", "N", m, n, n, &a[a_offset], lda, &
+ work[itauq], &u[u_offset], ldu, &work[iwork],
+ &i__2, &ierr)
+ ;
+
+/* Generate right vectors bidiagonalizing R in A */
+/* (CWorkspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("P", n, n, n, &a[a_offset], lda, &work[itaup],
+ &work[iwork], &i__2, &ierr);
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in A */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", n, n, m, &c__0, &s[1], &rwork[ie], &a[
+ a_offset], lda, &u[u_offset], ldu, cdum, &
+ c__1, &rwork[irwork], info);
+
+ }
+
+ } else if (wntvas) {
+
+/* Path 6 (M much larger than N, JOBU='S', JOBVT='S' */
+/* or 'A') */
+/* N left singular vectors to be computed in U and */
+/* N right singular vectors to be computed in VT */
+
+ if (*lwork >= *n * *n + *n * 3) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + *lda * *n) {
+
+/* WORK(IU) is LDA by N */
+
+ ldwrku = *lda;
+ } else {
+
+/* WORK(IU) is N by N */
+
+ ldwrku = *n;
+ }
+ itau = iu + ldwrku * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (CWorkspace: need N*N+2*N, prefer N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy R to WORK(IU), zeroing out below it */
+
+ clacpy_("U", n, n, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ claset_("L", &i__2, &i__3, &c_b1, &c_b1, &work[iu + 1]
+, &ldwrku);
+
+/* Generate Q in A */
+/* (CWorkspace: need N*N+2*N, prefer N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungqr_(m, n, n, &a[a_offset], lda, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IU), copying result to VT */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(n, n, &work[iu], &ldwrku, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ clacpy_("U", n, n, &work[iu], &ldwrku, &vt[vt_offset],
+ ldvt);
+
+/* Generate left bidiagonalizing vectors in WORK(IU) */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("Q", n, n, n, &work[iu], &ldwrku, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in VT */
+/* (CWorkspace: need N*N+3*N-1, */
+/* prefer N*N+2*N+(N-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[
+ itaup], &work[iwork], &i__2, &ierr)
+ ;
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IU) and computing */
+/* right singular vectors of R in VT */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", n, n, n, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &work[iu], &ldwrku, cdum, &
+ c__1, &rwork[irwork], info);
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IU), storing result in U */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: 0) */
+
+ cgemm_("N", "N", m, n, n, &c_b2, &a[a_offset], lda, &
+ work[iu], &ldwrku, &c_b1, &u[u_offset], ldu);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ clacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungqr_(m, n, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy R to VT, zeroing out below it */
+
+ clacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+ if (*n > 1) {
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ claset_("L", &i__2, &i__3, &c_b1, &c_b1, &vt[
+ vt_dim1 + 2], ldvt);
+ }
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in VT */
+/* (CWorkspace: need 3*N, prefer 2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(n, n, &vt[vt_offset], ldvt, &s[1], &rwork[ie],
+ &work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply Q in U by left bidiagonalizing vectors */
+/* in VT */
+/* (CWorkspace: need 2*N+M, prefer 2*N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunmbr_("Q", "R", "N", m, n, n, &vt[vt_offset], ldvt,
+ &work[itauq], &u[u_offset], ldu, &work[iwork],
+ &i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in VT */
+/* (CWorkspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[
+ itaup], &work[iwork], &i__2, &ierr)
+ ;
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", n, n, m, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, cdum, &
+ c__1, &rwork[irwork], info);
+
+ }
+
+ }
+
+ } else if (wntua) {
+
+ if (wntvn) {
+
+/* Path 7 (M much larger than N, JOBU='A', JOBVT='N') */
+/* M left singular vectors to be computed in U and */
+/* no right singular vectors to be computed */
+
+/* Computing MAX */
+ i__2 = *n + *m, i__3 = *n * 3;
+ if (*lwork >= *n * *n + max(i__2,i__3)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+ if (*lwork >= wrkbl + *lda * *n) {
+
+/* WORK(IR) is LDA by N */
+
+ ldwrkr = *lda;
+ } else {
+
+/* WORK(IR) is N by N */
+
+ ldwrkr = *n;
+ }
+ itau = ir + ldwrkr * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (CWorkspace: need N*N+2*N, prefer N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ clacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Copy R to WORK(IR), zeroing out below it */
+
+ clacpy_("U", n, n, &a[a_offset], lda, &work[ir], &
+ ldwrkr);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ claset_("L", &i__2, &i__3, &c_b1, &c_b1, &work[ir + 1]
+, &ldwrkr);
+
+/* Generate Q in U */
+/* (CWorkspace: need N*N+N+M, prefer N*N+N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungqr_(m, m, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IR) */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in WORK(IR) */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("Q", n, n, n, &work[ir], &ldwrkr, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IR) */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", n, &c__0, n, &c__0, &s[1], &rwork[ie],
+ cdum, &c__1, &work[ir], &ldwrkr, cdum, &c__1,
+ &rwork[irwork], info);
+
+/* Multiply Q in U by left singular vectors of R in */
+/* WORK(IR), storing result in A */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: 0) */
+
+ cgemm_("N", "N", m, n, n, &c_b2, &u[u_offset], ldu, &
+ work[ir], &ldwrkr, &c_b1, &a[a_offset], lda);
+
+/* Copy left singular vectors of A from A to U */
+
+ clacpy_("F", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ clacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (CWorkspace: need N+M, prefer N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungqr_(m, m, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Zero out below R in A */
+
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ claset_("L", &i__2, &i__3, &c_b1, &c_b1, &a[a_dim1 +
+ 2], lda);
+
+/* Bidiagonalize R in A */
+/* (CWorkspace: need 3*N, prefer 2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(n, n, &a[a_offset], lda, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply Q in U by left bidiagonalizing vectors */
+/* in A */
+/* (CWorkspace: need 2*N+M, prefer 2*N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunmbr_("Q", "R", "N", m, n, n, &a[a_offset], lda, &
+ work[itauq], &u[u_offset], ldu, &work[iwork],
+ &i__2, &ierr)
+ ;
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", n, &c__0, m, &c__0, &s[1], &rwork[ie],
+ cdum, &c__1, &u[u_offset], ldu, cdum, &c__1, &
+ rwork[irwork], info);
+
+ }
+
+ } else if (wntvo) {
+
+/* Path 8 (M much larger than N, JOBU='A', JOBVT='O') */
+/* M left singular vectors to be computed in U and */
+/* N right singular vectors to be overwritten on A */
+
+/* Computing MAX */
+ i__2 = *n + *m, i__3 = *n * 3;
+ if (*lwork >= (*n << 1) * *n + max(i__2,i__3)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + (*lda << 1) * *n) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is LDA by N */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *n;
+ ldwrkr = *lda;
+ } else if (*lwork >= wrkbl + (*lda + *n) * *n) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is N by N */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *n;
+ ldwrkr = *n;
+ } else {
+
+/* WORK(IU) is N by N and WORK(IR) is N by N */
+
+ ldwrku = *n;
+ ir = iu + ldwrku * *n;
+ ldwrkr = *n;
+ }
+ itau = ir + ldwrkr * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (CWorkspace: need 2*N*N+2*N, prefer 2*N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ clacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (CWorkspace: need 2*N*N+N+M, prefer 2*N*N+N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungqr_(m, m, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy R to WORK(IU), zeroing out below it */
+
+ clacpy_("U", n, n, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ claset_("L", &i__2, &i__3, &c_b1, &c_b1, &work[iu + 1]
+, &ldwrku);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IU), copying result to */
+/* WORK(IR) */
+/* (CWorkspace: need 2*N*N+3*N, */
+/* prefer 2*N*N+2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(n, n, &work[iu], &ldwrku, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ clacpy_("U", n, n, &work[iu], &ldwrku, &work[ir], &
+ ldwrkr);
+
+/* Generate left bidiagonalizing vectors in WORK(IU) */
+/* (CWorkspace: need 2*N*N+3*N, prefer 2*N*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("Q", n, n, n, &work[iu], &ldwrku, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in WORK(IR) */
+/* (CWorkspace: need 2*N*N+3*N-1, */
+/* prefer 2*N*N+2*N+(N-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("P", n, n, n, &work[ir], &ldwrkr, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IU) and computing */
+/* right singular vectors of R in WORK(IR) */
+/* (CWorkspace: need 2*N*N) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", n, n, n, &c__0, &s[1], &rwork[ie], &work[
+ ir], &ldwrkr, &work[iu], &ldwrku, cdum, &c__1,
+ &rwork[irwork], info);
+
+/* Multiply Q in U by left singular vectors of R in */
+/* WORK(IU), storing result in A */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: 0) */
+
+ cgemm_("N", "N", m, n, n, &c_b2, &u[u_offset], ldu, &
+ work[iu], &ldwrku, &c_b1, &a[a_offset], lda);
+
+/* Copy left singular vectors of A from A to U */
+
+ clacpy_("F", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Copy right singular vectors of R from WORK(IR) to A */
+
+ clacpy_("F", n, n, &work[ir], &ldwrkr, &a[a_offset],
+ lda);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ clacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (CWorkspace: need N+M, prefer N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungqr_(m, m, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Zero out below R in A */
+
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ claset_("L", &i__2, &i__3, &c_b1, &c_b1, &a[a_dim1 +
+ 2], lda);
+
+/* Bidiagonalize R in A */
+/* (CWorkspace: need 3*N, prefer 2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(n, n, &a[a_offset], lda, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply Q in U by left bidiagonalizing vectors */
+/* in A */
+/* (CWorkspace: need 2*N+M, prefer 2*N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunmbr_("Q", "R", "N", m, n, n, &a[a_offset], lda, &
+ work[itauq], &u[u_offset], ldu, &work[iwork],
+ &i__2, &ierr)
+ ;
+
+/* Generate right bidiagonalizing vectors in A */
+/* (CWorkspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("P", n, n, n, &a[a_offset], lda, &work[itaup],
+ &work[iwork], &i__2, &ierr);
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in A */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", n, n, m, &c__0, &s[1], &rwork[ie], &a[
+ a_offset], lda, &u[u_offset], ldu, cdum, &
+ c__1, &rwork[irwork], info);
+
+ }
+
+ } else if (wntvas) {
+
+/* Path 9 (M much larger than N, JOBU='A', JOBVT='S' */
+/* or 'A') */
+/* M left singular vectors to be computed in U and */
+/* N right singular vectors to be computed in VT */
+
+/* Computing MAX */
+ i__2 = *n + *m, i__3 = *n * 3;
+ if (*lwork >= *n * *n + max(i__2,i__3)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + *lda * *n) {
+
+/* WORK(IU) is LDA by N */
+
+ ldwrku = *lda;
+ } else {
+
+/* WORK(IU) is N by N */
+
+ ldwrku = *n;
+ }
+ itau = iu + ldwrku * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (CWorkspace: need N*N+2*N, prefer N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ clacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (CWorkspace: need N*N+N+M, prefer N*N+N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungqr_(m, m, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy R to WORK(IU), zeroing out below it */
+
+ clacpy_("U", n, n, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ claset_("L", &i__2, &i__3, &c_b1, &c_b1, &work[iu + 1]
+, &ldwrku);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IU), copying result to VT */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(n, n, &work[iu], &ldwrku, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ clacpy_("U", n, n, &work[iu], &ldwrku, &vt[vt_offset],
+ ldvt);
+
+/* Generate left bidiagonalizing vectors in WORK(IU) */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("Q", n, n, n, &work[iu], &ldwrku, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in VT */
+/* (CWorkspace: need N*N+3*N-1, */
+/* prefer N*N+2*N+(N-1)*NB) */
+/* (RWorkspace: need 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[
+ itaup], &work[iwork], &i__2, &ierr)
+ ;
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IU) and computing */
+/* right singular vectors of R in VT */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", n, n, n, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &work[iu], &ldwrku, cdum, &
+ c__1, &rwork[irwork], info);
+
+/* Multiply Q in U by left singular vectors of R in */
+/* WORK(IU), storing result in A */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: 0) */
+
+ cgemm_("N", "N", m, n, n, &c_b2, &u[u_offset], ldu, &
+ work[iu], &ldwrku, &c_b1, &a[a_offset], lda);
+
+/* Copy left singular vectors of A from A to U */
+
+ clacpy_("F", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ clacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (CWorkspace: need N+M, prefer N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungqr_(m, m, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy R from A to VT, zeroing out below it */
+
+ clacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+ if (*n > 1) {
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ claset_("L", &i__2, &i__3, &c_b1, &c_b1, &vt[
+ vt_dim1 + 2], ldvt);
+ }
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in VT */
+/* (CWorkspace: need 3*N, prefer 2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(n, n, &vt[vt_offset], ldvt, &s[1], &rwork[ie],
+ &work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply Q in U by left bidiagonalizing vectors */
+/* in VT */
+/* (CWorkspace: need 2*N+M, prefer 2*N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunmbr_("Q", "R", "N", m, n, n, &vt[vt_offset], ldvt,
+ &work[itauq], &u[u_offset], ldu, &work[iwork],
+ &i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in VT */
+/* (CWorkspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[
+ itaup], &work[iwork], &i__2, &ierr)
+ ;
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", n, n, m, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, cdum, &
+ c__1, &rwork[irwork], info);
+
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* M .LT. MNTHR */
+
+/* Path 10 (M at least N, but not much larger) */
+/* Reduce to bidiagonal form without QR decomposition */
+
+ ie = 1;
+ itauq = 1;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize A */
+/* (CWorkspace: need 2*N+M, prefer 2*N+(M+N)*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[iwork], &i__2, &ierr);
+ if (wntuas) {
+
+/* If left singular vectors desired in U, copy result to U */
+/* and generate left bidiagonalizing vectors in U */
+/* (CWorkspace: need 2*N+NCU, prefer 2*N+NCU*NB) */
+/* (RWorkspace: 0) */
+
+ clacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+ if (wntus) {
+ ncu = *n;
+ }
+ if (wntua) {
+ ncu = *m;
+ }
+ i__2 = *lwork - iwork + 1;
+ cungbr_("Q", m, &ncu, n, &u[u_offset], ldu, &work[itauq], &
+ work[iwork], &i__2, &ierr);
+ }
+ if (wntvas) {
+
+/* If right singular vectors desired in VT, copy result to */
+/* VT and generate right bidiagonalizing vectors in VT */
+/* (CWorkspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+/* (RWorkspace: 0) */
+
+ clacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ i__2 = *lwork - iwork + 1;
+ cungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], &
+ work[iwork], &i__2, &ierr);
+ }
+ if (wntuo) {
+
+/* If left singular vectors desired in A, generate left */
+/* bidiagonalizing vectors in A */
+/* (CWorkspace: need 3*N, prefer 2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], &work[
+ iwork], &i__2, &ierr);
+ }
+ if (wntvo) {
+
+/* If right singular vectors desired in A, generate right */
+/* bidiagonalizing vectors in A */
+/* (CWorkspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("P", n, n, n, &a[a_offset], lda, &work[itaup], &work[
+ iwork], &i__2, &ierr);
+ }
+ irwork = ie + *n;
+ if (wntuas || wntuo) {
+ nru = *m;
+ }
+ if (wntun) {
+ nru = 0;
+ }
+ if (wntvas || wntvo) {
+ ncvt = *n;
+ }
+ if (wntvn) {
+ ncvt = 0;
+ }
+ if (! wntuo && ! wntvo) {
+
+/* Perform bidiagonal QR iteration, if desired, computing */
+/* left singular vectors in U and computing right singular */
+/* vectors in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", n, &ncvt, &nru, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, cdum, &c__1, &
+ rwork[irwork], info);
+ } else if (! wntuo && wntvo) {
+
+/* Perform bidiagonal QR iteration, if desired, computing */
+/* left singular vectors in U and computing right singular */
+/* vectors in A */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", n, &ncvt, &nru, &c__0, &s[1], &rwork[ie], &a[
+ a_offset], lda, &u[u_offset], ldu, cdum, &c__1, &
+ rwork[irwork], info);
+ } else {
+
+/* Perform bidiagonal QR iteration, if desired, computing */
+/* left singular vectors in A and computing right singular */
+/* vectors in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", n, &ncvt, &nru, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &a[a_offset], lda, cdum, &c__1, &
+ rwork[irwork], info);
+ }
+
+ }
+
+ } else {
+
+/* A has more columns than rows. If A has sufficiently more */
+/* columns than rows, first reduce using the LQ decomposition (if */
+/* sufficient workspace available) */
+
+ if (*n >= mnthr) {
+
+ if (wntvn) {
+
+/* Path 1t(N much larger than M, JOBVT='N') */
+/* No right singular vectors to be computed */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork], &
+ i__2, &ierr);
+
+/* Zero out above L */
+
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ claset_("U", &i__2, &i__3, &c_b1, &c_b1, &a[(a_dim1 << 1) + 1]
+, lda);
+ ie = 1;
+ itauq = 1;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in A */
+/* (CWorkspace: need 3*M, prefer 2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(m, m, &a[a_offset], lda, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__2, &ierr);
+ if (wntuo || wntuas) {
+
+/* If left singular vectors desired, generate Q */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("Q", m, m, m, &a[a_offset], lda, &work[itauq], &
+ work[iwork], &i__2, &ierr);
+ }
+ irwork = ie + *m;
+ nru = 0;
+ if (wntuo || wntuas) {
+ nru = *m;
+ }
+
+/* Perform bidiagonal QR iteration, computing left singular */
+/* vectors of A in A if desired */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", m, &c__0, &nru, &c__0, &s[1], &rwork[ie], cdum, &
+ c__1, &a[a_offset], lda, cdum, &c__1, &rwork[irwork],
+ info);
+
+/* If left singular vectors desired in U, copy them there */
+
+ if (wntuas) {
+ clacpy_("F", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ }
+
+ } else if (wntvo && wntun) {
+
+/* Path 2t(N much larger than M, JOBU='N', JOBVT='O') */
+/* M right singular vectors to be overwritten on A and */
+/* no left singular vectors to be computed */
+
+ if (*lwork >= *m * *m + *m * 3) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *lda * *n;
+ if (*lwork >= max(i__2,i__3) + *lda * *m) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is LDA by M */
+
+ ldwrku = *lda;
+ chunk = *n;
+ ldwrkr = *lda;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *lda * *n;
+ if (*lwork >= max(i__2,i__3) + *m * *m) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is M by M */
+
+ ldwrku = *lda;
+ chunk = *n;
+ ldwrkr = *m;
+ } else {
+
+/* WORK(IU) is M by CHUNK and WORK(IR) is M by M */
+
+ ldwrku = *m;
+ chunk = (*lwork - *m * *m) / *m;
+ ldwrkr = *m;
+ }
+ }
+ itau = ir + ldwrkr * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork]
+, &i__2, &ierr);
+
+/* Copy L to WORK(IR) and zero out above it */
+
+ clacpy_("L", m, m, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ claset_("U", &i__2, &i__3, &c_b1, &c_b1, &work[ir +
+ ldwrkr], &ldwrkr);
+
+/* Generate Q in A */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IR) */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(m, m, &work[ir], &ldwrkr, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &i__2, &
+ ierr);
+
+/* Generate right vectors bidiagonalizing L */
+/* (CWorkspace: need M*M+3*M-1, prefer M*M+2*M+(M-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("P", m, m, m, &work[ir], &ldwrkr, &work[itaup], &
+ work[iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of L in WORK(IR) */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", m, m, &c__0, &c__0, &s[1], &rwork[ie], &work[
+ ir], &ldwrkr, cdum, &c__1, cdum, &c__1, &rwork[
+ irwork], info);
+ iu = itauq;
+
+/* Multiply right singular vectors of L in WORK(IR) by Q */
+/* in A, storing result in WORK(IU) and copying to A */
+/* (CWorkspace: need M*M+M, prefer M*M+M*N) */
+/* (RWorkspace: 0) */
+
+ i__2 = *n;
+ i__3 = chunk;
+ for (i__ = 1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__3) {
+/* Computing MIN */
+ i__4 = *n - i__ + 1;
+ blk = min(i__4,chunk);
+ cgemm_("N", "N", m, &blk, m, &c_b2, &work[ir], &
+ ldwrkr, &a[i__ * a_dim1 + 1], lda, &c_b1, &
+ work[iu], &ldwrku);
+ clacpy_("F", m, &blk, &work[iu], &ldwrku, &a[i__ *
+ a_dim1 + 1], lda);
+/* L30: */
+ }
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ ie = 1;
+ itauq = 1;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize A */
+/* (CWorkspace: need 2*M+N, prefer 2*M+(M+N)*NB) */
+/* (RWorkspace: need M) */
+
+ i__3 = *lwork - iwork + 1;
+ cgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__3, &ierr);
+
+/* Generate right vectors bidiagonalizing A */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__3 = *lwork - iwork + 1;
+ cungbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &
+ work[iwork], &i__3, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of A in A */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("L", m, n, &c__0, &c__0, &s[1], &rwork[ie], &a[
+ a_offset], lda, cdum, &c__1, cdum, &c__1, &rwork[
+ irwork], info);
+
+ }
+
+ } else if (wntvo && wntuas) {
+
+/* Path 3t(N much larger than M, JOBU='S' or 'A', JOBVT='O') */
+/* M right singular vectors to be overwritten on A and */
+/* M left singular vectors to be computed in U */
+
+ if (*lwork >= *m * *m + *m * 3) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+/* Computing MAX */
+ i__3 = wrkbl, i__2 = *lda * *n;
+ if (*lwork >= max(i__3,i__2) + *lda * *m) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is LDA by M */
+
+ ldwrku = *lda;
+ chunk = *n;
+ ldwrkr = *lda;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__3 = wrkbl, i__2 = *lda * *n;
+ if (*lwork >= max(i__3,i__2) + *m * *m) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is M by M */
+
+ ldwrku = *lda;
+ chunk = *n;
+ ldwrkr = *m;
+ } else {
+
+/* WORK(IU) is M by CHUNK and WORK(IR) is M by M */
+
+ ldwrku = *m;
+ chunk = (*lwork - *m * *m) / *m;
+ ldwrkr = *m;
+ }
+ }
+ itau = ir + ldwrkr * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__3 = *lwork - iwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork]
+, &i__3, &ierr);
+
+/* Copy L to U, zeroing about above it */
+
+ clacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ i__3 = *m - 1;
+ i__2 = *m - 1;
+ claset_("U", &i__3, &i__2, &c_b1, &c_b1, &u[(u_dim1 << 1)
+ + 1], ldu);
+
+/* Generate Q in A */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__3 = *lwork - iwork + 1;
+ cunglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__3, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in U, copying result to WORK(IR) */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__3 = *lwork - iwork + 1;
+ cgebrd_(m, m, &u[u_offset], ldu, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__3, &ierr);
+ clacpy_("U", m, m, &u[u_offset], ldu, &work[ir], &ldwrkr);
+
+/* Generate right vectors bidiagonalizing L in WORK(IR) */
+/* (CWorkspace: need M*M+3*M-1, prefer M*M+2*M+(M-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__3 = *lwork - iwork + 1;
+ cungbr_("P", m, m, m, &work[ir], &ldwrkr, &work[itaup], &
+ work[iwork], &i__3, &ierr);
+
+/* Generate left vectors bidiagonalizing L in U */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__3 = *lwork - iwork + 1;
+ cungbr_("Q", m, m, m, &u[u_offset], ldu, &work[itauq], &
+ work[iwork], &i__3, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of L in U, and computing right */
+/* singular vectors of L in WORK(IR) */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", m, m, m, &c__0, &s[1], &rwork[ie], &work[ir],
+ &ldwrkr, &u[u_offset], ldu, cdum, &c__1, &rwork[
+ irwork], info);
+ iu = itauq;
+
+/* Multiply right singular vectors of L in WORK(IR) by Q */
+/* in A, storing result in WORK(IU) and copying to A */
+/* (CWorkspace: need M*M+M, prefer M*M+M*N)) */
+/* (RWorkspace: 0) */
+
+ i__3 = *n;
+ i__2 = chunk;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__3 : i__ <= i__3; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__4 = *n - i__ + 1;
+ blk = min(i__4,chunk);
+ cgemm_("N", "N", m, &blk, m, &c_b2, &work[ir], &
+ ldwrkr, &a[i__ * a_dim1 + 1], lda, &c_b1, &
+ work[iu], &ldwrku);
+ clacpy_("F", m, &blk, &work[iu], &ldwrku, &a[i__ *
+ a_dim1 + 1], lda);
+/* L40: */
+ }
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork]
+, &i__2, &ierr);
+
+/* Copy L to U, zeroing out above it */
+
+ clacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ claset_("U", &i__2, &i__3, &c_b1, &c_b1, &u[(u_dim1 << 1)
+ + 1], ldu);
+
+/* Generate Q in A */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in U */
+/* (CWorkspace: need 3*M, prefer 2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(m, m, &u[u_offset], ldu, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__2, &ierr);
+
+/* Multiply right vectors bidiagonalizing L by Q in A */
+/* (CWorkspace: need 2*M+N, prefer 2*M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunmbr_("P", "L", "C", m, n, m, &u[u_offset], ldu, &work[
+ itaup], &a[a_offset], lda, &work[iwork], &i__2, &
+ ierr);
+
+/* Generate left vectors bidiagonalizing L in U */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("Q", m, m, m, &u[u_offset], ldu, &work[itauq], &
+ work[iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in A */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", m, n, m, &c__0, &s[1], &rwork[ie], &a[
+ a_offset], lda, &u[u_offset], ldu, cdum, &c__1, &
+ rwork[irwork], info);
+
+ }
+
+ } else if (wntvs) {
+
+ if (wntun) {
+
+/* Path 4t(N much larger than M, JOBU='N', JOBVT='S') */
+/* M right singular vectors to be computed in VT and */
+/* no left singular vectors to be computed */
+
+ if (*lwork >= *m * *m + *m * 3) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+ if (*lwork >= wrkbl + *lda * *m) {
+
+/* WORK(IR) is LDA by M */
+
+ ldwrkr = *lda;
+ } else {
+
+/* WORK(IR) is M by M */
+
+ ldwrkr = *m;
+ }
+ itau = ir + ldwrkr * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy L to WORK(IR), zeroing out above it */
+
+ clacpy_("L", m, m, &a[a_offset], lda, &work[ir], &
+ ldwrkr);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ claset_("U", &i__2, &i__3, &c_b1, &c_b1, &work[ir +
+ ldwrkr], &ldwrkr);
+
+/* Generate Q in A */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunglq_(m, n, m, &a[a_offset], lda, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IR) */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(m, m, &work[ir], &ldwrkr, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Generate right vectors bidiagonalizing L in */
+/* WORK(IR) */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+(M-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("P", m, m, m, &work[ir], &ldwrkr, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of L in WORK(IR) */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", m, m, &c__0, &c__0, &s[1], &rwork[ie], &
+ work[ir], &ldwrkr, cdum, &c__1, cdum, &c__1, &
+ rwork[irwork], info);
+
+/* Multiply right singular vectors of L in WORK(IR) by */
+/* Q in A, storing result in VT */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: 0) */
+
+ cgemm_("N", "N", m, n, m, &c_b2, &work[ir], &ldwrkr, &
+ a[a_offset], lda, &c_b1, &vt[vt_offset], ldvt);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy result to VT */
+
+ clacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunglq_(m, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Zero out above L in A */
+
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ claset_("U", &i__2, &i__3, &c_b1, &c_b1, &a[(a_dim1 <<
+ 1) + 1], lda);
+
+/* Bidiagonalize L in A */
+/* (CWorkspace: need 3*M, prefer 2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(m, m, &a[a_offset], lda, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply right vectors bidiagonalizing L by Q in VT */
+/* (CWorkspace: need 2*M+N, prefer 2*M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunmbr_("P", "L", "C", m, n, m, &a[a_offset], lda, &
+ work[itaup], &vt[vt_offset], ldvt, &work[
+ iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of A in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", m, n, &c__0, &c__0, &s[1], &rwork[ie], &
+ vt[vt_offset], ldvt, cdum, &c__1, cdum, &c__1,
+ &rwork[irwork], info);
+
+ }
+
+ } else if (wntuo) {
+
+/* Path 5t(N much larger than M, JOBU='O', JOBVT='S') */
+/* M right singular vectors to be computed in VT and */
+/* M left singular vectors to be overwritten on A */
+
+ if (*lwork >= (*m << 1) * *m + *m * 3) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + (*lda << 1) * *m) {
+
+/* WORK(IU) is LDA by M and WORK(IR) is LDA by M */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *m;
+ ldwrkr = *lda;
+ } else if (*lwork >= wrkbl + (*lda + *m) * *m) {
+
+/* WORK(IU) is LDA by M and WORK(IR) is M by M */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *m;
+ ldwrkr = *m;
+ } else {
+
+/* WORK(IU) is M by M and WORK(IR) is M by M */
+
+ ldwrku = *m;
+ ir = iu + ldwrku * *m;
+ ldwrkr = *m;
+ }
+ itau = ir + ldwrkr * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (CWorkspace: need 2*M*M+2*M, prefer 2*M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy L to WORK(IU), zeroing out below it */
+
+ clacpy_("L", m, m, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ claset_("U", &i__2, &i__3, &c_b1, &c_b1, &work[iu +
+ ldwrku], &ldwrku);
+
+/* Generate Q in A */
+/* (CWorkspace: need 2*M*M+2*M, prefer 2*M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunglq_(m, n, m, &a[a_offset], lda, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IU), copying result to */
+/* WORK(IR) */
+/* (CWorkspace: need 2*M*M+3*M, */
+/* prefer 2*M*M+2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(m, m, &work[iu], &ldwrku, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ clacpy_("L", m, m, &work[iu], &ldwrku, &work[ir], &
+ ldwrkr);
+
+/* Generate right bidiagonalizing vectors in WORK(IU) */
+/* (CWorkspace: need 2*M*M+3*M-1, */
+/* prefer 2*M*M+2*M+(M-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("P", m, m, m, &work[iu], &ldwrku, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in WORK(IR) */
+/* (CWorkspace: need 2*M*M+3*M, prefer 2*M*M+2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("Q", m, m, m, &work[ir], &ldwrkr, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of L in WORK(IR) and computing */
+/* right singular vectors of L in WORK(IU) */
+/* (CWorkspace: need 2*M*M) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", m, m, m, &c__0, &s[1], &rwork[ie], &work[
+ iu], &ldwrku, &work[ir], &ldwrkr, cdum, &c__1,
+ &rwork[irwork], info);
+
+/* Multiply right singular vectors of L in WORK(IU) by */
+/* Q in A, storing result in VT */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: 0) */
+
+ cgemm_("N", "N", m, n, m, &c_b2, &work[iu], &ldwrku, &
+ a[a_offset], lda, &c_b1, &vt[vt_offset], ldvt);
+
+/* Copy left singular vectors of L to A */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: 0) */
+
+ clacpy_("F", m, m, &work[ir], &ldwrkr, &a[a_offset],
+ lda);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ clacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunglq_(m, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Zero out above L in A */
+
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ claset_("U", &i__2, &i__3, &c_b1, &c_b1, &a[(a_dim1 <<
+ 1) + 1], lda);
+
+/* Bidiagonalize L in A */
+/* (CWorkspace: need 3*M, prefer 2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(m, m, &a[a_offset], lda, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply right vectors bidiagonalizing L by Q in VT */
+/* (CWorkspace: need 2*M+N, prefer 2*M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunmbr_("P", "L", "C", m, n, m, &a[a_offset], lda, &
+ work[itaup], &vt[vt_offset], ldvt, &work[
+ iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors of L in A */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("Q", m, m, m, &a[a_offset], lda, &work[itauq],
+ &work[iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in A and computing right */
+/* singular vectors of A in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", m, n, m, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &a[a_offset], lda, cdum, &
+ c__1, &rwork[irwork], info);
+
+ }
+
+ } else if (wntuas) {
+
+/* Path 6t(N much larger than M, JOBU='S' or 'A', */
+/* JOBVT='S') */
+/* M right singular vectors to be computed in VT and */
+/* M left singular vectors to be computed in U */
+
+ if (*lwork >= *m * *m + *m * 3) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + *lda * *m) {
+
+/* WORK(IU) is LDA by N */
+
+ ldwrku = *lda;
+ } else {
+
+/* WORK(IU) is LDA by M */
+
+ ldwrku = *m;
+ }
+ itau = iu + ldwrku * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy L to WORK(IU), zeroing out above it */
+
+ clacpy_("L", m, m, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ claset_("U", &i__2, &i__3, &c_b1, &c_b1, &work[iu +
+ ldwrku], &ldwrku);
+
+/* Generate Q in A */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunglq_(m, n, m, &a[a_offset], lda, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IU), copying result to U */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(m, m, &work[iu], &ldwrku, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ clacpy_("L", m, m, &work[iu], &ldwrku, &u[u_offset],
+ ldu);
+
+/* Generate right bidiagonalizing vectors in WORK(IU) */
+/* (CWorkspace: need M*M+3*M-1, */
+/* prefer M*M+2*M+(M-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("P", m, m, m, &work[iu], &ldwrku, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in U */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("Q", m, m, m, &u[u_offset], ldu, &work[itauq],
+ &work[iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of L in U and computing right */
+/* singular vectors of L in WORK(IU) */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", m, m, m, &c__0, &s[1], &rwork[ie], &work[
+ iu], &ldwrku, &u[u_offset], ldu, cdum, &c__1,
+ &rwork[irwork], info);
+
+/* Multiply right singular vectors of L in WORK(IU) by */
+/* Q in A, storing result in VT */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: 0) */
+
+ cgemm_("N", "N", m, n, m, &c_b2, &work[iu], &ldwrku, &
+ a[a_offset], lda, &c_b1, &vt[vt_offset], ldvt);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ clacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunglq_(m, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy L to U, zeroing out above it */
+
+ clacpy_("L", m, m, &a[a_offset], lda, &u[u_offset],
+ ldu);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ claset_("U", &i__2, &i__3, &c_b1, &c_b1, &u[(u_dim1 <<
+ 1) + 1], ldu);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in U */
+/* (CWorkspace: need 3*M, prefer 2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(m, m, &u[u_offset], ldu, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply right bidiagonalizing vectors in U by Q */
+/* in VT */
+/* (CWorkspace: need 2*M+N, prefer 2*M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunmbr_("P", "L", "C", m, n, m, &u[u_offset], ldu, &
+ work[itaup], &vt[vt_offset], ldvt, &work[
+ iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in U */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("Q", m, m, m, &u[u_offset], ldu, &work[itauq],
+ &work[iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", m, n, m, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, cdum, &
+ c__1, &rwork[irwork], info);
+
+ }
+
+ }
+
+ } else if (wntva) {
+
+ if (wntun) {
+
+/* Path 7t(N much larger than M, JOBU='N', JOBVT='A') */
+/* N right singular vectors to be computed in VT and */
+/* no left singular vectors to be computed */
+
+/* Computing MAX */
+ i__2 = *n + *m, i__3 = *m * 3;
+ if (*lwork >= *m * *m + max(i__2,i__3)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+ if (*lwork >= wrkbl + *lda * *m) {
+
+/* WORK(IR) is LDA by M */
+
+ ldwrkr = *lda;
+ } else {
+
+/* WORK(IR) is M by M */
+
+ ldwrkr = *m;
+ }
+ itau = ir + ldwrkr * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ clacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Copy L to WORK(IR), zeroing out above it */
+
+ clacpy_("L", m, m, &a[a_offset], lda, &work[ir], &
+ ldwrkr);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ claset_("U", &i__2, &i__3, &c_b1, &c_b1, &work[ir +
+ ldwrkr], &ldwrkr);
+
+/* Generate Q in VT */
+/* (CWorkspace: need M*M+M+N, prefer M*M+M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IR) */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(m, m, &work[ir], &ldwrkr, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in WORK(IR) */
+/* (CWorkspace: need M*M+3*M-1, */
+/* prefer M*M+2*M+(M-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("P", m, m, m, &work[ir], &ldwrkr, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of L in WORK(IR) */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", m, m, &c__0, &c__0, &s[1], &rwork[ie], &
+ work[ir], &ldwrkr, cdum, &c__1, cdum, &c__1, &
+ rwork[irwork], info);
+
+/* Multiply right singular vectors of L in WORK(IR) by */
+/* Q in VT, storing result in A */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: 0) */
+
+ cgemm_("N", "N", m, n, m, &c_b2, &work[ir], &ldwrkr, &
+ vt[vt_offset], ldvt, &c_b1, &a[a_offset], lda);
+
+/* Copy right singular vectors of A from A to VT */
+
+ clacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ clacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (CWorkspace: need M+N, prefer M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Zero out above L in A */
+
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ claset_("U", &i__2, &i__3, &c_b1, &c_b1, &a[(a_dim1 <<
+ 1) + 1], lda);
+
+/* Bidiagonalize L in A */
+/* (CWorkspace: need 3*M, prefer 2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(m, m, &a[a_offset], lda, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply right bidiagonalizing vectors in A by Q */
+/* in VT */
+/* (CWorkspace: need 2*M+N, prefer 2*M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunmbr_("P", "L", "C", m, n, m, &a[a_offset], lda, &
+ work[itaup], &vt[vt_offset], ldvt, &work[
+ iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of A in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", m, n, &c__0, &c__0, &s[1], &rwork[ie], &
+ vt[vt_offset], ldvt, cdum, &c__1, cdum, &c__1,
+ &rwork[irwork], info);
+
+ }
+
+ } else if (wntuo) {
+
+/* Path 8t(N much larger than M, JOBU='O', JOBVT='A') */
+/* N right singular vectors to be computed in VT and */
+/* M left singular vectors to be overwritten on A */
+
+/* Computing MAX */
+ i__2 = *n + *m, i__3 = *m * 3;
+ if (*lwork >= (*m << 1) * *m + max(i__2,i__3)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + (*lda << 1) * *m) {
+
+/* WORK(IU) is LDA by M and WORK(IR) is LDA by M */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *m;
+ ldwrkr = *lda;
+ } else if (*lwork >= wrkbl + (*lda + *m) * *m) {
+
+/* WORK(IU) is LDA by M and WORK(IR) is M by M */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *m;
+ ldwrkr = *m;
+ } else {
+
+/* WORK(IU) is M by M and WORK(IR) is M by M */
+
+ ldwrku = *m;
+ ir = iu + ldwrku * *m;
+ ldwrkr = *m;
+ }
+ itau = ir + ldwrkr * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (CWorkspace: need 2*M*M+2*M, prefer 2*M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ clacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (CWorkspace: need 2*M*M+M+N, prefer 2*M*M+M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy L to WORK(IU), zeroing out above it */
+
+ clacpy_("L", m, m, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ claset_("U", &i__2, &i__3, &c_b1, &c_b1, &work[iu +
+ ldwrku], &ldwrku);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IU), copying result to */
+/* WORK(IR) */
+/* (CWorkspace: need 2*M*M+3*M, */
+/* prefer 2*M*M+2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(m, m, &work[iu], &ldwrku, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ clacpy_("L", m, m, &work[iu], &ldwrku, &work[ir], &
+ ldwrkr);
+
+/* Generate right bidiagonalizing vectors in WORK(IU) */
+/* (CWorkspace: need 2*M*M+3*M-1, */
+/* prefer 2*M*M+2*M+(M-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("P", m, m, m, &work[iu], &ldwrku, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in WORK(IR) */
+/* (CWorkspace: need 2*M*M+3*M, prefer 2*M*M+2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("Q", m, m, m, &work[ir], &ldwrkr, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of L in WORK(IR) and computing */
+/* right singular vectors of L in WORK(IU) */
+/* (CWorkspace: need 2*M*M) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", m, m, m, &c__0, &s[1], &rwork[ie], &work[
+ iu], &ldwrku, &work[ir], &ldwrkr, cdum, &c__1,
+ &rwork[irwork], info);
+
+/* Multiply right singular vectors of L in WORK(IU) by */
+/* Q in VT, storing result in A */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: 0) */
+
+ cgemm_("N", "N", m, n, m, &c_b2, &work[iu], &ldwrku, &
+ vt[vt_offset], ldvt, &c_b1, &a[a_offset], lda);
+
+/* Copy right singular vectors of A from A to VT */
+
+ clacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Copy left singular vectors of A from WORK(IR) to A */
+
+ clacpy_("F", m, m, &work[ir], &ldwrkr, &a[a_offset],
+ lda);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ clacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (CWorkspace: need M+N, prefer M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Zero out above L in A */
+
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ claset_("U", &i__2, &i__3, &c_b1, &c_b1, &a[(a_dim1 <<
+ 1) + 1], lda);
+
+/* Bidiagonalize L in A */
+/* (CWorkspace: need 3*M, prefer 2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(m, m, &a[a_offset], lda, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply right bidiagonalizing vectors in A by Q */
+/* in VT */
+/* (CWorkspace: need 2*M+N, prefer 2*M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunmbr_("P", "L", "C", m, n, m, &a[a_offset], lda, &
+ work[itaup], &vt[vt_offset], ldvt, &work[
+ iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in A */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("Q", m, m, m, &a[a_offset], lda, &work[itauq],
+ &work[iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in A and computing right */
+/* singular vectors of A in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", m, n, m, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &a[a_offset], lda, cdum, &
+ c__1, &rwork[irwork], info);
+
+ }
+
+ } else if (wntuas) {
+
+/* Path 9t(N much larger than M, JOBU='S' or 'A', */
+/* JOBVT='A') */
+/* N right singular vectors to be computed in VT and */
+/* M left singular vectors to be computed in U */
+
+/* Computing MAX */
+ i__2 = *n + *m, i__3 = *m * 3;
+ if (*lwork >= *m * *m + max(i__2,i__3)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + *lda * *m) {
+
+/* WORK(IU) is LDA by M */
+
+ ldwrku = *lda;
+ } else {
+
+/* WORK(IU) is M by M */
+
+ ldwrku = *m;
+ }
+ itau = iu + ldwrku * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ clacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (CWorkspace: need M*M+M+N, prefer M*M+M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy L to WORK(IU), zeroing out above it */
+
+ clacpy_("L", m, m, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ claset_("U", &i__2, &i__3, &c_b1, &c_b1, &work[iu +
+ ldwrku], &ldwrku);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IU), copying result to U */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(m, m, &work[iu], &ldwrku, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ clacpy_("L", m, m, &work[iu], &ldwrku, &u[u_offset],
+ ldu);
+
+/* Generate right bidiagonalizing vectors in WORK(IU) */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+(M-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("P", m, m, m, &work[iu], &ldwrku, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in U */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("Q", m, m, m, &u[u_offset], ldu, &work[itauq],
+ &work[iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of L in U and computing right */
+/* singular vectors of L in WORK(IU) */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", m, m, m, &c__0, &s[1], &rwork[ie], &work[
+ iu], &ldwrku, &u[u_offset], ldu, cdum, &c__1,
+ &rwork[irwork], info);
+
+/* Multiply right singular vectors of L in WORK(IU) by */
+/* Q in VT, storing result in A */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: 0) */
+
+ cgemm_("N", "N", m, n, m, &c_b2, &work[iu], &ldwrku, &
+ vt[vt_offset], ldvt, &c_b1, &a[a_offset], lda);
+
+/* Copy right singular vectors of A from A to VT */
+
+ clacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ clacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (CWorkspace: need M+N, prefer M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy L to U, zeroing out above it */
+
+ clacpy_("L", m, m, &a[a_offset], lda, &u[u_offset],
+ ldu);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ claset_("U", &i__2, &i__3, &c_b1, &c_b1, &u[(u_dim1 <<
+ 1) + 1], ldu);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in U */
+/* (CWorkspace: need 3*M, prefer 2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(m, m, &u[u_offset], ldu, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply right bidiagonalizing vectors in U by Q */
+/* in VT */
+/* (CWorkspace: need 2*M+N, prefer 2*M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cunmbr_("P", "L", "C", m, n, m, &u[u_offset], ldu, &
+ work[itaup], &vt[vt_offset], ldvt, &work[
+ iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in U */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("Q", m, m, m, &u[u_offset], ldu, &work[itauq],
+ &work[iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("U", m, n, m, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, cdum, &
+ c__1, &rwork[irwork], info);
+
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N .LT. MNTHR */
+
+/* Path 10t(N greater than M, but not much larger) */
+/* Reduce to bidiagonal form without LQ decomposition */
+
+ ie = 1;
+ itauq = 1;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize A */
+/* (CWorkspace: need 2*M+N, prefer 2*M+(M+N)*NB) */
+/* (RWorkspace: M) */
+
+ i__2 = *lwork - iwork + 1;
+ cgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[iwork], &i__2, &ierr);
+ if (wntuas) {
+
+/* If left singular vectors desired in U, copy result to U */
+/* and generate left bidiagonalizing vectors in U */
+/* (CWorkspace: need 3*M-1, prefer 2*M+(M-1)*NB) */
+/* (RWorkspace: 0) */
+
+ clacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ i__2 = *lwork - iwork + 1;
+ cungbr_("Q", m, m, n, &u[u_offset], ldu, &work[itauq], &work[
+ iwork], &i__2, &ierr);
+ }
+ if (wntvas) {
+
+/* If right singular vectors desired in VT, copy result to */
+/* VT and generate right bidiagonalizing vectors in VT */
+/* (CWorkspace: need 2*M+NRVT, prefer 2*M+NRVT*NB) */
+/* (RWorkspace: 0) */
+
+ clacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ if (wntva) {
+ nrvt = *n;
+ }
+ if (wntvs) {
+ nrvt = *m;
+ }
+ i__2 = *lwork - iwork + 1;
+ cungbr_("P", &nrvt, n, m, &vt[vt_offset], ldvt, &work[itaup],
+ &work[iwork], &i__2, &ierr);
+ }
+ if (wntuo) {
+
+/* If left singular vectors desired in A, generate left */
+/* bidiagonalizing vectors in A */
+/* (CWorkspace: need 3*M-1, prefer 2*M+(M-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("Q", m, m, n, &a[a_offset], lda, &work[itauq], &work[
+ iwork], &i__2, &ierr);
+ }
+ if (wntvo) {
+
+/* If right singular vectors desired in A, generate right */
+/* bidiagonalizing vectors in A */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ cungbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &work[
+ iwork], &i__2, &ierr);
+ }
+ irwork = ie + *m;
+ if (wntuas || wntuo) {
+ nru = *m;
+ }
+ if (wntun) {
+ nru = 0;
+ }
+ if (wntvas || wntvo) {
+ ncvt = *n;
+ }
+ if (wntvn) {
+ ncvt = 0;
+ }
+ if (! wntuo && ! wntvo) {
+
+/* Perform bidiagonal QR iteration, if desired, computing */
+/* left singular vectors in U and computing right singular */
+/* vectors in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("L", m, &ncvt, &nru, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, cdum, &c__1, &
+ rwork[irwork], info);
+ } else if (! wntuo && wntvo) {
+
+/* Perform bidiagonal QR iteration, if desired, computing */
+/* left singular vectors in U and computing right singular */
+/* vectors in A */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("L", m, &ncvt, &nru, &c__0, &s[1], &rwork[ie], &a[
+ a_offset], lda, &u[u_offset], ldu, cdum, &c__1, &
+ rwork[irwork], info);
+ } else {
+
+/* Perform bidiagonal QR iteration, if desired, computing */
+/* left singular vectors in A and computing right singular */
+/* vectors in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ cbdsqr_("L", m, &ncvt, &nru, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &a[a_offset], lda, cdum, &c__1, &
+ rwork[irwork], info);
+ }
+
+ }
+
+ }
+
+/* Undo scaling if necessary */
+
+ if (iscl == 1) {
+ if (anrm > bignum) {
+ slascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ if (*info != 0 && anrm > bignum) {
+ i__2 = minmn - 1;
+ slascl_("G", &c__0, &c__0, &bignum, &anrm, &i__2, &c__1, &rwork[
+ ie], &minmn, &ierr);
+ }
+ if (anrm < smlnum) {
+ slascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ if (*info != 0 && anrm < smlnum) {
+ i__2 = minmn - 1;
+ slascl_("G", &c__0, &c__0, &smlnum, &anrm, &i__2, &c__1, &rwork[
+ ie], &minmn, &ierr);
+ }
+ }
+
+/* Return optimal workspace in WORK(1) */
+
+ work[1].r = (real) maxwrk, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CGESVD */
+
+} /* cgesvd_ */
diff --git a/contrib/libs/clapack/cgesvx.c b/contrib/libs/clapack/cgesvx.c
new file mode 100644
index 0000000000..3dffd5265e
--- /dev/null
+++ b/contrib/libs/clapack/cgesvx.c
@@ -0,0 +1,605 @@
+/* cgesvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cgesvx_(char *fact, char *trans, integer *n, integer *
+ nrhs, complex *a, integer *lda, complex *af, integer *ldaf, integer *
+ ipiv, char *equed, real *r__, real *c__, complex *b, integer *ldb,
+ complex *x, integer *ldx, real *rcond, real *ferr, real *berr,
+ complex *work, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j;
+ real amax;
+ char norm[1];
+ extern logical lsame_(char *, char *);
+ real rcmin, rcmax, anorm;
+ logical equil;
+ extern doublereal clange_(char *, integer *, integer *, complex *,
+ integer *, real *);
+ extern /* Subroutine */ int claqge_(integer *, integer *, complex *,
+ integer *, real *, real *, real *, real *, real *, char *)
+ , cgecon_(char *, integer *, complex *, integer *, real *, real *,
+ complex *, real *, integer *);
+ real colcnd;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int cgeequ_(integer *, integer *, complex *,
+ integer *, real *, real *, real *, real *, real *, integer *);
+ logical nofact;
+ extern /* Subroutine */ int cgerfs_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *, integer *, complex *, integer
+ *, complex *, integer *, real *, real *, complex *, real *,
+ integer *), cgetrf_(integer *, integer *, complex *,
+ integer *, integer *, integer *), clacpy_(char *, integer *,
+ integer *, complex *, integer *, complex *, integer *),
+ xerbla_(char *, integer *);
+ real bignum;
+ extern doublereal clantr_(char *, char *, char *, integer *, integer *,
+ complex *, integer *, real *);
+ integer infequ;
+ logical colequ;
+ extern /* Subroutine */ int cgetrs_(char *, integer *, integer *, complex
+ *, integer *, integer *, complex *, integer *, integer *);
+ real rowcnd;
+ logical notran;
+ real smlnum;
+ logical rowequ;
+ real rpvgrw;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGESVX uses the LU factorization to compute the solution to a complex */
+/* system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N matrix and X and B are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'E', real scaling factors are computed to equilibrate */
+/* the system: */
+/* TRANS = 'N': diag(R)*A*diag(C) *inv(diag(C))*X = diag(R)*B */
+/* TRANS = 'T': (diag(R)*A*diag(C))**T *inv(diag(R))*X = diag(C)*B */
+/* TRANS = 'C': (diag(R)*A*diag(C))**H *inv(diag(R))*X = diag(C)*B */
+/* Whether or not the system will be equilibrated depends on the */
+/* scaling of the matrix A, but if equilibration is used, A is */
+/* overwritten by diag(R)*A*diag(C) and B by diag(R)*B (if TRANS='N') */
+/* or diag(C)*B (if TRANS = 'T' or 'C'). */
+
+/* 2. If FACT = 'N' or 'E', the LU decomposition is used to factor the */
+/* matrix A (after equilibration if FACT = 'E') as */
+/* A = P * L * U, */
+/* where P is a permutation matrix, L is a unit lower triangular */
+/* matrix, and U is upper triangular. */
+
+/* 3. If some U(i,i)=0, so that U is exactly singular, then the routine */
+/* returns with INFO = i. Otherwise, the factored form of A is used */
+/* to estimate the condition number of the matrix A. If the */
+/* reciprocal of the condition number is less than machine precision, */
+/* INFO = N+1 is returned as a warning, but the routine still goes on */
+/* to solve for X and compute error bounds as described below. */
+
+/* 4. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 5. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* 6. If equilibration was used, the matrix X is premultiplied by */
+/* diag(C) (if TRANS = 'N') or diag(R) (if TRANS = 'T' or 'C') so */
+/* that it solves the original system before equilibration. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of the matrix A is */
+/* supplied on entry, and if not, whether the matrix A should be */
+/* equilibrated before it is factored. */
+/* = 'F': On entry, AF and IPIV contain the factored form of A. */
+/* If EQUED is not 'N', the matrix A has been */
+/* equilibrated with scaling factors given by R and C. */
+/* A, AF, and IPIV are not modified. */
+/* = 'N': The matrix A will be copied to AF and factored. */
+/* = 'E': The matrix A will be equilibrated if necessary, then */
+/* copied to AF and factored. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the N-by-N matrix A. If FACT = 'F' and EQUED is */
+/* not 'N', then A must have been equilibrated by the scaling */
+/* factors in R and/or C. A is not modified if FACT = 'F' or */
+/* 'N', or if FACT = 'E' and EQUED = 'N' on exit. */
+
+/* On exit, if EQUED .ne. 'N', A is scaled as follows: */
+/* EQUED = 'R': A := diag(R) * A */
+/* EQUED = 'C': A := A * diag(C) */
+/* EQUED = 'B': A := diag(R) * A * diag(C). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input or output) COMPLEX array, dimension (LDAF,N) */
+/* If FACT = 'F', then AF is an input argument and on entry */
+/* contains the factors L and U from the factorization */
+/* A = P*L*U as computed by CGETRF. If EQUED .ne. 'N', then */
+/* AF is the factored form of the equilibrated matrix A. */
+
+/* If FACT = 'N', then AF is an output argument and on exit */
+/* returns the factors L and U from the factorization A = P*L*U */
+/* of the original matrix A. */
+
+/* If FACT = 'E', then AF is an output argument and on exit */
+/* returns the factors L and U from the factorization A = P*L*U */
+/* of the equilibrated matrix A (see the description of A for */
+/* the form of the equilibrated matrix). */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* IPIV (input or output) INTEGER array, dimension (N) */
+/* If FACT = 'F', then IPIV is an input argument and on entry */
+/* contains the pivot indices from the factorization A = P*L*U */
+/* as computed by CGETRF; row i of the matrix was interchanged */
+/* with row IPIV(i). */
+
+/* If FACT = 'N', then IPIV is an output argument and on exit */
+/* contains the pivot indices from the factorization A = P*L*U */
+/* of the original matrix A. */
+
+/* If FACT = 'E', then IPIV is an output argument and on exit */
+/* contains the pivot indices from the factorization A = P*L*U */
+/* of the equilibrated matrix A. */
+
+/* EQUED (input or output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration (always true if FACT = 'N'). */
+/* = 'R': Row equilibration, i.e., A has been premultiplied by */
+/* diag(R). */
+/* = 'C': Column equilibration, i.e., A has been postmultiplied */
+/* by diag(C). */
+/* = 'B': Both row and column equilibration, i.e., A has been */
+/* replaced by diag(R) * A * diag(C). */
+/* EQUED is an input argument if FACT = 'F'; otherwise, it is an */
+/* output argument. */
+
+/* R (input or output) REAL array, dimension (N) */
+/* The row scale factors for A. If EQUED = 'R' or 'B', A is */
+/* multiplied on the left by diag(R); if EQUED = 'N' or 'C', R */
+/* is not accessed. R is an input argument if FACT = 'F'; */
+/* otherwise, R is an output argument. If FACT = 'F' and */
+/* EQUED = 'R' or 'B', each element of R must be positive. */
+
+/* C (input or output) REAL array, dimension (N) */
+/* The column scale factors for A. If EQUED = 'C' or 'B', A is */
+/* multiplied on the right by diag(C); if EQUED = 'N' or 'R', C */
+/* is not accessed. C is an input argument if FACT = 'F'; */
+/* otherwise, C is an output argument. If FACT = 'F' and */
+/* EQUED = 'C' or 'B', each element of C must be positive. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, */
+/* if EQUED = 'N', B is not modified; */
+/* if TRANS = 'N' and EQUED = 'R' or 'B', B is overwritten by */
+/* diag(R)*B; */
+/* if TRANS = 'T' or 'C' and EQUED = 'C' or 'B', B is */
+/* overwritten by diag(C)*B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) COMPLEX array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X */
+/* to the original system of equations. Note that A and B are */
+/* modified on exit if EQUED .ne. 'N', and the solution to the */
+/* equilibrated system is inv(diag(C))*X if TRANS = 'N' and */
+/* EQUED = 'C' or 'B', or inv(diag(R))*X if TRANS = 'T' or 'C' */
+/* and EQUED = 'R' or 'B'. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) REAL */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A after equilibration (if done). If RCOND is less than the */
+/* machine precision (in particular, if RCOND = 0), the matrix */
+/* is singular to working precision. This condition is */
+/* indicated by a return code of INFO > 0. */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace/output) REAL array, dimension (2*N) */
+/* On exit, RWORK(1) contains the reciprocal pivot growth */
+/* factor norm(A)/norm(U). The "max absolute element" norm is */
+/* used. If RWORK(1) is much less than 1, then the stability */
+/* of the LU factorization of the (equilibrated) matrix A */
+/* could be poor. This also means that the solution X, condition */
+/* estimator RCOND, and forward error bound FERR could be */
+/* unreliable. If factorization fails with 0<INFO<=N, then */
+/* RWORK(1) contains the reciprocal pivot growth factor for the */
+/* leading INFO columns of A. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: U(i,i) is exactly zero. The factorization has */
+/* been completed, but the factor U is exactly */
+/* singular, so the solution and error bounds */
+/* could not be computed. RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ --ipiv;
+ --r__;
+ --c__;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ equil = lsame_(fact, "E");
+ notran = lsame_(trans, "N");
+ if (nofact || equil) {
+ *(unsigned char *)equed = 'N';
+ rowequ = FALSE_;
+ colequ = FALSE_;
+ } else {
+ rowequ = lsame_(equed, "R") || lsame_(equed,
+ "B");
+ colequ = lsame_(equed, "C") || lsame_(equed,
+ "B");
+ smlnum = slamch_("Safe minimum");
+ bignum = 1.f / smlnum;
+ }
+
+/* Test the input parameters. */
+
+ if (! nofact && ! equil && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -8;
+ } else if (lsame_(fact, "F") && ! (rowequ || colequ
+ || lsame_(equed, "N"))) {
+ *info = -10;
+ } else {
+ if (rowequ) {
+ rcmin = bignum;
+ rcmax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ r__1 = rcmin, r__2 = r__[j];
+ rcmin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = rcmax, r__2 = r__[j];
+ rcmax = dmax(r__1,r__2);
+/* L10: */
+ }
+ if (rcmin <= 0.f) {
+ *info = -11;
+ } else if (*n > 0) {
+ rowcnd = dmax(rcmin,smlnum) / dmin(rcmax,bignum);
+ } else {
+ rowcnd = 1.f;
+ }
+ }
+ if (colequ && *info == 0) {
+ rcmin = bignum;
+ rcmax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ r__1 = rcmin, r__2 = c__[j];
+ rcmin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = rcmax, r__2 = c__[j];
+ rcmax = dmax(r__1,r__2);
+/* L20: */
+ }
+ if (rcmin <= 0.f) {
+ *info = -12;
+ } else if (*n > 0) {
+ colcnd = dmax(rcmin,smlnum) / dmin(rcmax,bignum);
+ } else {
+ colcnd = 1.f;
+ }
+ }
+ if (*info == 0) {
+ if (*ldb < max(1,*n)) {
+ *info = -14;
+ } else if (*ldx < max(1,*n)) {
+ *info = -16;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGESVX", &i__1);
+ return 0;
+ }
+
+ if (equil) {
+
+/* Compute row and column scalings to equilibrate the matrix A. */
+
+ cgeequ_(n, n, &a[a_offset], lda, &r__[1], &c__[1], &rowcnd, &colcnd, &
+ amax, &infequ);
+ if (infequ == 0) {
+
+/* Equilibrate the matrix. */
+
+ claqge_(n, n, &a[a_offset], lda, &r__[1], &c__[1], &rowcnd, &
+ colcnd, &amax, equed);
+ rowequ = lsame_(equed, "R") || lsame_(equed,
+ "B");
+ colequ = lsame_(equed, "C") || lsame_(equed,
+ "B");
+ }
+ }
+
+/* Scale the right hand side. */
+
+ if (notran) {
+ if (rowequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * b_dim1;
+ q__1.r = r__[i__4] * b[i__5].r, q__1.i = r__[i__4] * b[
+ i__5].i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else if (colequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * b_dim1;
+ q__1.r = c__[i__4] * b[i__5].r, q__1.i = c__[i__4] * b[i__5]
+ .i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+ if (nofact || equil) {
+
+/* Compute the LU factorization of A. */
+
+ clacpy_("Full", n, n, &a[a_offset], lda, &af[af_offset], ldaf);
+ cgetrf_(n, n, &af[af_offset], ldaf, &ipiv[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+
+/* Compute the reciprocal pivot growth factor of the */
+/* leading rank-deficient INFO columns of A. */
+
+ rpvgrw = clantr_("M", "U", "N", info, info, &af[af_offset], ldaf,
+ &rwork[1]);
+ if (rpvgrw == 0.f) {
+ rpvgrw = 1.f;
+ } else {
+ rpvgrw = clange_("M", n, info, &a[a_offset], lda, &rwork[1]) / rpvgrw;
+ }
+ rwork[1] = rpvgrw;
+ *rcond = 0.f;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A and the */
+/* reciprocal pivot growth factor RPVGRW. */
+
+ if (notran) {
+ *(unsigned char *)norm = '1';
+ } else {
+ *(unsigned char *)norm = 'I';
+ }
+ anorm = clange_(norm, n, n, &a[a_offset], lda, &rwork[1]);
+ rpvgrw = clantr_("M", "U", "N", n, n, &af[af_offset], ldaf, &rwork[1]);
+ if (rpvgrw == 0.f) {
+ rpvgrw = 1.f;
+ } else {
+ rpvgrw = clange_("M", n, n, &a[a_offset], lda, &rwork[1]) /
+ rpvgrw;
+ }
+
+/* Compute the reciprocal of the condition number of A. */
+
+ cgecon_(norm, n, &af[af_offset], ldaf, &anorm, rcond, &work[1], &rwork[1],
+ info);
+
+/* Compute the solution matrix X. */
+
+ clacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ cgetrs_(trans, n, nrhs, &af[af_offset], ldaf, &ipiv[1], &x[x_offset], ldx,
+ info);
+
+/* Use iterative refinement to improve the computed solution and */
+/* compute error bounds and backward error estimates for it. */
+
+ cgerfs_(trans, n, nrhs, &a[a_offset], lda, &af[af_offset], ldaf, &ipiv[1],
+ &b[b_offset], ldb, &x[x_offset], ldx, &ferr[1], &berr[1], &work[
+ 1], &rwork[1], info);
+
+/* Transform the solution matrix X to a solution of the original */
+/* system. */
+
+ if (notran) {
+ if (colequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * x_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * x_dim1;
+ q__1.r = c__[i__4] * x[i__5].r, q__1.i = c__[i__4] * x[
+ i__5].i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= colcnd;
+/* L90: */
+ }
+ }
+ } else if (rowequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * x_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * x_dim1;
+ q__1.r = r__[i__4] * x[i__5].r, q__1.i = r__[i__4] * x[i__5]
+ .i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+/* L100: */
+ }
+/* L110: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= rowcnd;
+/* L120: */
+ }
+ }
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < slamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ rwork[1] = rpvgrw;
+ return 0;
+
+/* End of CGESVX */
+
+} /* cgesvx_ */
diff --git a/contrib/libs/clapack/cgetc2.c b/contrib/libs/clapack/cgetc2.c
new file mode 100644
index 0000000000..bf2921ec15
--- /dev/null
+++ b/contrib/libs/clapack/cgetc2.c
@@ -0,0 +1,208 @@
+/* cgetc2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static complex c_b10 = {-1.f,-0.f};
+
+/* Subroutine */ int cgetc2_(integer *n, complex *a, integer *lda, integer *
+ ipiv, integer *jpiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1;
+ complex q__1;
+
+ /* Builtin functions */
+ double c_abs(complex *);
+ void c_div(complex *, complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, ip, jp;
+ real eps;
+ integer ipv, jpv;
+ real smin, xmax;
+ extern /* Subroutine */ int cgeru_(integer *, integer *, complex *,
+ complex *, integer *, complex *, integer *, complex *, integer *),
+ cswap_(integer *, complex *, integer *, complex *, integer *),
+ slabad_(real *, real *);
+ extern doublereal slamch_(char *);
+ real bignum, smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGETC2 computes an LU factorization, using complete pivoting, of the */
+/* n-by-n matrix A. The factorization has the form A = P * L * U * Q, */
+/* where P and Q are permutation matrices, L is lower triangular with */
+/* unit diagonal elements and U is upper triangular. */
+
+/* This is a level 1 BLAS version of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA, N) */
+/* On entry, the n-by-n matrix to be factored. */
+/* On exit, the factors L and U from the factorization */
+/* A = P*L*U*Q; the unit diagonal elements of L are not stored. */
+/* If U(k, k) appears to be less than SMIN, U(k, k) is given the */
+/* value of SMIN, giving a nonsingular perturbed system. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1, N). */
+
+/* IPIV (output) INTEGER array, dimension (N). */
+/* The pivot indices; for 1 <= i <= N, row i of the */
+/* matrix has been interchanged with row IPIV(i). */
+
+/* JPIV (output) INTEGER array, dimension (N). */
+/* The pivot indices; for 1 <= j <= N, column j of the */
+/* matrix has been interchanged with column JPIV(j). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* > 0: if INFO = k, U(k, k) is likely to produce overflow if */
+/* one tries to solve for x in Ax = b. So U is perturbed */
+/* to avoid the overflow. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Set constants to control overflow */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --jpiv;
+
+ /* Function Body */
+ *info = 0;
+ eps = slamch_("P");
+ smlnum = slamch_("S") / eps;
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+
+/* Factorize A using complete pivoting. */
+/* Set pivots less than SMIN to SMIN */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Find max element in matrix A */
+
+ xmax = 0.f;
+ i__2 = *n;
+ for (ip = i__; ip <= i__2; ++ip) {
+ i__3 = *n;
+ for (jp = i__; jp <= i__3; ++jp) {
+ if (c_abs(&a[ip + jp * a_dim1]) >= xmax) {
+ xmax = c_abs(&a[ip + jp * a_dim1]);
+ ipv = ip;
+ jpv = jp;
+ }
+/* L10: */
+ }
+/* L20: */
+ }
+ if (i__ == 1) {
+/* Computing MAX */
+ r__1 = eps * xmax;
+ smin = dmax(r__1,smlnum);
+ }
+
+/* Swap rows */
+
+ if (ipv != i__) {
+ cswap_(n, &a[ipv + a_dim1], lda, &a[i__ + a_dim1], lda);
+ }
+ ipiv[i__] = ipv;
+
+/* Swap columns */
+
+ if (jpv != i__) {
+ cswap_(n, &a[jpv * a_dim1 + 1], &c__1, &a[i__ * a_dim1 + 1], &
+ c__1);
+ }
+ jpiv[i__] = jpv;
+
+/* Check for singularity */
+
+ if (c_abs(&a[i__ + i__ * a_dim1]) < smin) {
+ *info = i__;
+ i__2 = i__ + i__ * a_dim1;
+ q__1.r = smin, q__1.i = 0.f;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+ }
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ i__3 = j + i__ * a_dim1;
+ c_div(&q__1, &a[j + i__ * a_dim1], &a[i__ + i__ * a_dim1]);
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L30: */
+ }
+ i__2 = *n - i__;
+ i__3 = *n - i__;
+ cgeru_(&i__2, &i__3, &c_b10, &a[i__ + 1 + i__ * a_dim1], &c__1, &a[
+ i__ + (i__ + 1) * a_dim1], lda, &a[i__ + 1 + (i__ + 1) *
+ a_dim1], lda);
+/* L40: */
+ }
+
+ if (c_abs(&a[*n + *n * a_dim1]) < smin) {
+ *info = *n;
+ i__1 = *n + *n * a_dim1;
+ q__1.r = smin, q__1.i = 0.f;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ }
+ return 0;
+
+/* End of CGETC2 */
+
+} /* cgetc2_ */
diff --git a/contrib/libs/clapack/cgetf2.c b/contrib/libs/clapack/cgetf2.c
new file mode 100644
index 0000000000..e41b6957c6
--- /dev/null
+++ b/contrib/libs/clapack/cgetf2.c
@@ -0,0 +1,202 @@
+/* cgetf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int cgetf2_(integer *m, integer *n, complex *a, integer *lda,
+ integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Builtin functions */
+ double c_abs(complex *);
+ void c_div(complex *, complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, jp;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *), cgeru_(integer *, integer *, complex *, complex *,
+ integer *, complex *, integer *, complex *, integer *);
+ real sfmin;
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ extern integer icamax_(integer *, complex *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGETF2 computes an LU factorization of a general m-by-n matrix A */
+/* using partial pivoting with row interchanges. */
+
+/* The factorization has the form */
+/* A = P * L * U */
+/* where P is a permutation matrix, L is lower triangular with unit */
+/* diagonal elements (lower trapezoidal if m > n), and U is upper */
+/* triangular (upper trapezoidal if m < n). */
+
+/* This is the right-looking Level 2 BLAS version of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the m by n matrix to be factored. */
+/* On exit, the factors L and U from the factorization */
+/* A = P*L*U; the unit diagonal elements of L are not stored. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* IPIV (output) INTEGER array, dimension (min(M,N)) */
+/* The pivot indices; for 1 <= i <= min(M,N), row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, U(k,k) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, and division by zero will occur if it is used */
+/* to solve a system of equations. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGETF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Compute machine safe minimum */
+
+ sfmin = slamch_("S");
+
+ i__1 = min(*m,*n);
+ for (j = 1; j <= i__1; ++j) {
+
+/* Find pivot and test for singularity. */
+
+ i__2 = *m - j + 1;
+ jp = j - 1 + icamax_(&i__2, &a[j + j * a_dim1], &c__1);
+ ipiv[j] = jp;
+ i__2 = jp + j * a_dim1;
+ if (a[i__2].r != 0.f || a[i__2].i != 0.f) {
+
+/* Apply the interchange to columns 1:N. */
+
+ if (jp != j) {
+ cswap_(n, &a[j + a_dim1], lda, &a[jp + a_dim1], lda);
+ }
+
+/* Compute elements J+1:M of J-th column. */
+
+ if (j < *m) {
+ if (c_abs(&a[j + j * a_dim1]) >= sfmin) {
+ i__2 = *m - j;
+ c_div(&q__1, &c_b1, &a[j + j * a_dim1]);
+ cscal_(&i__2, &q__1, &a[j + 1 + j * a_dim1], &c__1);
+ } else {
+ i__2 = *m - j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = j + i__ + j * a_dim1;
+ c_div(&q__1, &a[j + i__ + j * a_dim1], &a[j + j *
+ a_dim1]);
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L20: */
+ }
+ }
+ }
+
+ } else if (*info == 0) {
+
+ *info = j;
+ }
+
+ if (j < min(*m,*n)) {
+
+/* Update trailing submatrix. */
+
+ i__2 = *m - j;
+ i__3 = *n - j;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__2, &i__3, &q__1, &a[j + 1 + j * a_dim1], &c__1, &a[j +
+ (j + 1) * a_dim1], lda, &a[j + 1 + (j + 1) * a_dim1], lda)
+ ;
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of CGETF2 */
+
+} /* cgetf2_ */
diff --git a/contrib/libs/clapack/cgetrf.c b/contrib/libs/clapack/cgetrf.c
new file mode 100644
index 0000000000..0798e8f638
--- /dev/null
+++ b/contrib/libs/clapack/cgetrf.c
@@ -0,0 +1,220 @@
+/* cgetrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int cgetrf_(integer *m, integer *n, complex *a, integer *lda,
+ integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j, jb, nb;
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *);
+ integer iinfo;
+ extern /* Subroutine */ int ctrsm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *), cgetf2_(integer *,
+ integer *, complex *, integer *, integer *, integer *), xerbla_(
+ char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int claswp_(integer *, complex *, integer *,
+ integer *, integer *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGETRF computes an LU factorization of a general M-by-N matrix A */
+/* using partial pivoting with row interchanges. */
+
+/* The factorization has the form */
+/* A = P * L * U */
+/* where P is a permutation matrix, L is lower triangular with unit */
+/* diagonal elements (lower trapezoidal if m > n), and U is upper */
+/* triangular (upper trapezoidal if m < n). */
+
+/* This is the right-looking Level 3 BLAS version of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix to be factored. */
+/* On exit, the factors L and U from the factorization */
+/* A = P*L*U; the unit diagonal elements of L are not stored. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* IPIV (output) INTEGER array, dimension (min(M,N)) */
+/* The pivot indices; for 1 <= i <= min(M,N), row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, U(i,i) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, and division by zero will occur if it is used */
+/* to solve a system of equations. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGETRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "CGETRF", " ", m, n, &c_n1, &c_n1);
+ if (nb <= 1 || nb >= min(*m,*n)) {
+
+/* Use unblocked code. */
+
+ cgetf2_(m, n, &a[a_offset], lda, &ipiv[1], info);
+ } else {
+
+/* Use blocked code. */
+
+ i__1 = min(*m,*n);
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = min(*m,*n) - j + 1;
+ jb = min(i__3,nb);
+
+/* Factor diagonal and subdiagonal blocks and test for exact */
+/* singularity. */
+
+ i__3 = *m - j + 1;
+ cgetf2_(&i__3, &jb, &a[j + j * a_dim1], lda, &ipiv[j], &iinfo);
+
+/* Adjust INFO and the pivot indices. */
+
+ if (*info == 0 && iinfo > 0) {
+ *info = iinfo + j - 1;
+ }
+/* Computing MIN */
+ i__4 = *m, i__5 = j + jb - 1;
+ i__3 = min(i__4,i__5);
+ for (i__ = j; i__ <= i__3; ++i__) {
+ ipiv[i__] = j - 1 + ipiv[i__];
+/* L10: */
+ }
+
+/* Apply interchanges to columns 1:J-1. */
+
+ i__3 = j - 1;
+ i__4 = j + jb - 1;
+ claswp_(&i__3, &a[a_offset], lda, &j, &i__4, &ipiv[1], &c__1);
+
+ if (j + jb <= *n) {
+
+/* Apply interchanges to columns J+JB:N. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j + jb - 1;
+ claswp_(&i__3, &a[(j + jb) * a_dim1 + 1], lda, &j, &i__4, &
+ ipiv[1], &c__1);
+
+/* Compute block row of U. */
+
+ i__3 = *n - j - jb + 1;
+ ctrsm_("Left", "Lower", "No transpose", "Unit", &jb, &i__3, &
+ c_b1, &a[j + j * a_dim1], lda, &a[j + (j + jb) *
+ a_dim1], lda);
+ if (j + jb <= *m) {
+
+/* Update trailing submatrix. */
+
+ i__3 = *m - j - jb + 1;
+ i__4 = *n - j - jb + 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "No transpose", &i__3, &i__4, &jb,
+ &q__1, &a[j + jb + j * a_dim1], lda, &a[j + (j +
+ jb) * a_dim1], lda, &c_b1, &a[j + jb + (j + jb) *
+ a_dim1], lda);
+ }
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of CGETRF */
+
+} /* cgetrf_ */
diff --git a/contrib/libs/clapack/cgetri.c b/contrib/libs/clapack/cgetri.c
new file mode 100644
index 0000000000..e849bfdc37
--- /dev/null
+++ b/contrib/libs/clapack/cgetri.c
@@ -0,0 +1,271 @@
+/* cgetri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+
+/* Subroutine */ int cgetri_(integer *n, complex *a, integer *lda, integer *
+ ipiv, complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j, jb, nb, jj, jp, nn, iws;
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *), cgemv_(char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *, complex *, complex *, integer *);
+ integer nbmin;
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *), ctrsm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ integer ldwork;
+ extern /* Subroutine */ int ctrtri_(char *, char *, integer *, complex *,
+ integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGETRI computes the inverse of a matrix using the LU factorization */
+/* computed by CGETRF. */
+
+/* This method inverts U and then computes inv(A) by solving the system */
+/* inv(A)*L = inv(U) for inv(A). */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the factors L and U from the factorization */
+/* A = P*L*U as computed by CGETRF. */
+/* On exit, if INFO = 0, the inverse of the original matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices from CGETRF; for 1<=i<=N, row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO=0, then WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+/* For optimal performance LWORK >= N*NB, where NB is */
+/* the optimal blocksize returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, U(i,i) is exactly zero; the matrix is */
+/* singular and its inverse could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "CGETRI", " ", n, &c_n1, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*lda < max(1,*n)) {
+ *info = -3;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGETRI", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form inv(U). If INFO > 0 from CTRTRI, then U is singular, */
+/* and the inverse is not computed. */
+
+ ctrtri_("Upper", "Non-unit", n, &a[a_offset], lda, info);
+ if (*info > 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = *n;
+ if (nb > 1 && nb < *n) {
+/* Computing MAX */
+ i__1 = ldwork * nb;
+ iws = max(i__1,1);
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CGETRI", " ", n, &c_n1, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = *n;
+ }
+
+/* Solve the equation inv(A)*L = inv(U) for inv(A). */
+
+ if (nb < nbmin || nb >= *n) {
+
+/* Use unblocked code. */
+
+ for (j = *n; j >= 1; --j) {
+
+/* Copy current column of L to WORK and replace with zeros. */
+
+ i__1 = *n;
+ for (i__ = j + 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__ + j * a_dim1;
+ work[i__2].r = a[i__3].r, work[i__2].i = a[i__3].i;
+ i__2 = i__ + j * a_dim1;
+ a[i__2].r = 0.f, a[i__2].i = 0.f;
+/* L10: */
+ }
+
+/* Compute current column of inv(A). */
+
+ if (j < *n) {
+ i__1 = *n - j;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", n, &i__1, &q__1, &a[(j + 1) * a_dim1 +
+ 1], lda, &work[j + 1], &c__1, &c_b2, &a[j * a_dim1 +
+ 1], &c__1);
+ }
+/* L20: */
+ }
+ } else {
+
+/* Use blocked code. */
+
+ nn = (*n - 1) / nb * nb + 1;
+ i__1 = -nb;
+ for (j = nn; i__1 < 0 ? j >= 1 : j <= 1; j += i__1) {
+/* Computing MIN */
+ i__2 = nb, i__3 = *n - j + 1;
+ jb = min(i__2,i__3);
+
+/* Copy current block column of L to WORK and replace with */
+/* zeros. */
+
+ i__2 = j + jb - 1;
+ for (jj = j; jj <= i__2; ++jj) {
+ i__3 = *n;
+ for (i__ = jj + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + (jj - j) * ldwork;
+ i__5 = i__ + jj * a_dim1;
+ work[i__4].r = a[i__5].r, work[i__4].i = a[i__5].i;
+ i__4 = i__ + jj * a_dim1;
+ a[i__4].r = 0.f, a[i__4].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Compute current block column of inv(A). */
+
+ if (j + jb <= *n) {
+ i__2 = *n - j - jb + 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "No transpose", n, &jb, &i__2, &q__1, &
+ a[(j + jb) * a_dim1 + 1], lda, &work[j + jb], &ldwork,
+ &c_b2, &a[j * a_dim1 + 1], lda);
+ }
+ ctrsm_("Right", "Lower", "No transpose", "Unit", n, &jb, &c_b2, &
+ work[j], &ldwork, &a[j * a_dim1 + 1], lda);
+/* L50: */
+ }
+ }
+
+/* Apply column interchanges. */
+
+ for (j = *n - 1; j >= 1; --j) {
+ jp = ipiv[j];
+ if (jp != j) {
+ cswap_(n, &a[j * a_dim1 + 1], &c__1, &a[jp * a_dim1 + 1], &c__1);
+ }
+/* L60: */
+ }
+
+ work[1].r = (real) iws, work[1].i = 0.f;
+ return 0;
+
+/* End of CGETRI */
+
+} /* cgetri_ */
diff --git a/contrib/libs/clapack/cgetrs.c b/contrib/libs/clapack/cgetrs.c
new file mode 100644
index 0000000000..e0bcf1f900
--- /dev/null
+++ b/contrib/libs/clapack/cgetrs.c
@@ -0,0 +1,186 @@
+/* cgetrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int cgetrs_(char *trans, integer *n, integer *nrhs, complex *
+ a, integer *lda, integer *ipiv, complex *b, integer *ldb, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ctrsm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *), xerbla_(char *,
+ integer *), claswp_(integer *, complex *, integer *,
+ integer *, integer *, integer *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGETRS solves a system of linear equations */
+/* A * X = B, A**T * X = B, or A**H * X = B */
+/* with a general N-by-N matrix A using the LU factorization computed */
+/* by CGETRF. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The factors L and U from the factorization A = P*L*U */
+/* as computed by CGETRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices from CGETRF; for 1<=i<=N, row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "T") && ! lsame_(
+ trans, "C")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGETRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (notran) {
+
+/* Solve A * X = B. */
+
+/* Apply row interchanges to the right hand sides. */
+
+ claswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c__1);
+
+/* Solve L*X = B, overwriting B with X. */
+
+ ctrsm_("Left", "Lower", "No transpose", "Unit", n, nrhs, &c_b1, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+/* Solve U*X = B, overwriting B with X. */
+
+ ctrsm_("Left", "Upper", "No transpose", "Non-unit", n, nrhs, &c_b1, &
+ a[a_offset], lda, &b[b_offset], ldb);
+ } else {
+
+/* Solve A**T * X = B or A**H * X = B. */
+
+/* Solve U'*X = B, overwriting B with X. */
+
+ ctrsm_("Left", "Upper", trans, "Non-unit", n, nrhs, &c_b1, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+/* Solve L'*X = B, overwriting B with X. */
+
+ ctrsm_("Left", "Lower", trans, "Unit", n, nrhs, &c_b1, &a[a_offset],
+ lda, &b[b_offset], ldb);
+
+/* Apply row interchanges to the solution vectors. */
+
+ claswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c_n1);
+ }
+
+ return 0;
+
+/* End of CGETRS */
+
+} /* cgetrs_ */
diff --git a/contrib/libs/clapack/cggbak.c b/contrib/libs/clapack/cggbak.c
new file mode 100644
index 0000000000..643170b0c3
--- /dev/null
+++ b/contrib/libs/clapack/cggbak.c
@@ -0,0 +1,274 @@
+/* cggbak.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cggbak_(char *job, char *side, integer *n, integer *ilo,
+ integer *ihi, real *lscale, real *rscale, integer *m, complex *v,
+ integer *ldv, integer *info)
+{
+ /* System generated locals */
+ integer v_dim1, v_offset, i__1;
+
+ /* Local variables */
+ integer i__, k;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ logical leftv;
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *), xerbla_(char *, integer *);
+ logical rightv;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGGBAK forms the right or left eigenvectors of a complex generalized */
+/* eigenvalue problem A*x = lambda*B*x, by backward transformation on */
+/* the computed eigenvectors of the balanced pair of matrices output by */
+/* CGGBAL. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies the type of backward transformation required: */
+/* = 'N': do nothing, return immediately; */
+/* = 'P': do backward transformation for permutation only; */
+/* = 'S': do backward transformation for scaling only; */
+/* = 'B': do backward transformations for both permutation and */
+/* scaling. */
+/* JOB must be the same as the argument JOB supplied to CGGBAL. */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'R': V contains right eigenvectors; */
+/* = 'L': V contains left eigenvectors. */
+
+/* N (input) INTEGER */
+/* The number of rows of the matrix V. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* The integers ILO and IHI determined by CGGBAL. */
+/* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */
+
+/* LSCALE (input) REAL array, dimension (N) */
+/* Details of the permutations and/or scaling factors applied */
+/* to the left side of A and B, as returned by CGGBAL. */
+
+/* RSCALE (input) REAL array, dimension (N) */
+/* Details of the permutations and/or scaling factors applied */
+/* to the right side of A and B, as returned by CGGBAL. */
+
+/* M (input) INTEGER */
+/* The number of columns of the matrix V. M >= 0. */
+
+/* V (input/output) COMPLEX array, dimension (LDV,M) */
+/* On entry, the matrix of right or left eigenvectors to be */
+/* transformed, as returned by CTGEVC. */
+/* On exit, V is overwritten by the transformed eigenvectors. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the matrix V. LDV >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* See R.C. Ward, Balancing the generalized eigenvalue problem, */
+/* SIAM J. Sci. Stat. Comp. 2 (1981), 141-152. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ --lscale;
+ --rscale;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+
+ /* Function Body */
+ rightv = lsame_(side, "R");
+ leftv = lsame_(side, "L");
+
+ *info = 0;
+ if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S")
+ && ! lsame_(job, "B")) {
+ *info = -1;
+ } else if (! rightv && ! leftv) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ilo < 1) {
+ *info = -4;
+ } else if (*n == 0 && *ihi == 0 && *ilo != 1) {
+ *info = -4;
+ } else if (*n > 0 && (*ihi < *ilo || *ihi > max(1,*n))) {
+ *info = -5;
+ } else if (*n == 0 && *ilo == 1 && *ihi != 0) {
+ *info = -5;
+ } else if (*m < 0) {
+ *info = -8;
+ } else if (*ldv < max(1,*n)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGGBAK", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*m == 0) {
+ return 0;
+ }
+ if (lsame_(job, "N")) {
+ return 0;
+ }
+
+ if (*ilo == *ihi) {
+ goto L30;
+ }
+
+/* Backward balance */
+
+ if (lsame_(job, "S") || lsame_(job, "B")) {
+
+/* Backward transformation on right eigenvectors */
+
+ if (rightv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ csscal_(m, &rscale[i__], &v[i__ + v_dim1], ldv);
+/* L10: */
+ }
+ }
+
+/* Backward transformation on left eigenvectors */
+
+ if (leftv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ csscal_(m, &lscale[i__], &v[i__ + v_dim1], ldv);
+/* L20: */
+ }
+ }
+ }
+
+/* Backward permutation */
+
+L30:
+ if (lsame_(job, "P") || lsame_(job, "B")) {
+
+/* Backward permutation on right eigenvectors */
+
+ if (rightv) {
+ if (*ilo == 1) {
+ goto L50;
+ }
+ for (i__ = *ilo - 1; i__ >= 1; --i__) {
+ k = rscale[i__];
+ if (k == i__) {
+ goto L40;
+ }
+ cswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L40:
+ ;
+ }
+
+L50:
+ if (*ihi == *n) {
+ goto L70;
+ }
+ i__1 = *n;
+ for (i__ = *ihi + 1; i__ <= i__1; ++i__) {
+ k = rscale[i__];
+ if (k == i__) {
+ goto L60;
+ }
+ cswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L60:
+ ;
+ }
+ }
+
+/* Backward permutation on left eigenvectors */
+
+L70:
+ if (leftv) {
+ if (*ilo == 1) {
+ goto L90;
+ }
+ for (i__ = *ilo - 1; i__ >= 1; --i__) {
+ k = lscale[i__];
+ if (k == i__) {
+ goto L80;
+ }
+ cswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L80:
+ ;
+ }
+
+L90:
+ if (*ihi == *n) {
+ goto L110;
+ }
+ i__1 = *n;
+ for (i__ = *ihi + 1; i__ <= i__1; ++i__) {
+ k = lscale[i__];
+ if (k == i__) {
+ goto L100;
+ }
+ cswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L100:
+ ;
+ }
+ }
+ }
+
+L110:
+
+ return 0;
+
+/* End of CGGBAK */
+
+} /* cggbak_ */
diff --git a/contrib/libs/clapack/cggbal.c b/contrib/libs/clapack/cggbal.c
new file mode 100644
index 0000000000..c12f5ea94f
--- /dev/null
+++ b/contrib/libs/clapack/cggbal.c
@@ -0,0 +1,652 @@
+/* cggbal.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b36 = 10.f;
+static real c_b72 = .5f;
+
+/* Subroutine */ int cggbal_(char *job, integer *n, complex *a, integer *lda,
+ complex *b, integer *ldb, integer *ilo, integer *ihi, real *lscale,
+ real *rscale, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+ real r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double r_lg10(real *), r_imag(complex *), c_abs(complex *), r_sign(real *,
+ real *), pow_ri(real *, integer *);
+
+ /* Local variables */
+ integer i__, j, k, l, m;
+ real t;
+ integer jc;
+ real ta, tb, tc;
+ integer ir;
+ real ew;
+ integer it, nr, ip1, jp1, lm1;
+ real cab, rab, ewc, cor, sum;
+ integer nrp2, icab, lcab;
+ real beta, coef;
+ integer irab, lrab;
+ real basl, cmax;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ real coef2, coef5, gamma, alpha;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ real sfmin;
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ real sfmax;
+ integer iflow, kount;
+ extern /* Subroutine */ int saxpy_(integer *, real *, real *, integer *,
+ real *, integer *);
+ real pgamma;
+ extern integer icamax_(integer *, complex *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *), xerbla_(char *, integer *);
+ integer lsfmin, lsfmax;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGGBAL balances a pair of general complex matrices (A,B). This */
+/* involves, first, permuting A and B by similarity transformations to */
+/* isolate eigenvalues in the first 1 to ILO$-$1 and last IHI+1 to N */
+/* elements on the diagonal; and second, applying a diagonal similarity */
+/* transformation to rows and columns ILO to IHI to make the rows */
+/* and columns as close in norm as possible. Both steps are optional. */
+
+/* Balancing may reduce the 1-norm of the matrices, and improve the */
+/* accuracy of the computed eigenvalues and/or eigenvectors in the */
+/* generalized eigenvalue problem A*x = lambda*B*x. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies the operations to be performed on A and B: */
+/* = 'N': none: simply set ILO = 1, IHI = N, LSCALE(I) = 1.0 */
+/* and RSCALE(I) = 1.0 for i=1,...,N; */
+/* = 'P': permute only; */
+/* = 'S': scale only; */
+/* = 'B': both permute and scale. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the input matrix A. */
+/* On exit, A is overwritten by the balanced matrix. */
+/* If JOB = 'N', A is not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX array, dimension (LDB,N) */
+/* On entry, the input matrix B. */
+/* On exit, B is overwritten by the balanced matrix. */
+/* If JOB = 'N', B is not referenced. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* ILO (output) INTEGER */
+/* IHI (output) INTEGER */
+/* ILO and IHI are set to integers such that on exit */
+/* A(i,j) = 0 and B(i,j) = 0 if i > j and */
+/* j = 1,...,ILO-1 or i = IHI+1,...,N. */
+/* If JOB = 'N' or 'S', ILO = 1 and IHI = N. */
+
+/* LSCALE (output) REAL array, dimension (N) */
+/* Details of the permutations and scaling factors applied */
+/* to the left side of A and B. If P(j) is the index of the */
+/* row interchanged with row j, and D(j) is the scaling factor */
+/* applied to row j, then */
+/* LSCALE(j) = P(j) for J = 1,...,ILO-1 */
+/* = D(j) for J = ILO,...,IHI */
+/* = P(j) for J = IHI+1,...,N. */
+/* The order in which the interchanges are made is N to IHI+1, */
+/* then 1 to ILO-1. */
+
+/* RSCALE (output) REAL array, dimension (N) */
+/* Details of the permutations and scaling factors applied */
+/* to the right side of A and B. If P(j) is the index of the */
+/* column interchanged with column j, and D(j) is the scaling */
+/* factor applied to column j, then */
+/* RSCALE(j) = P(j) for J = 1,...,ILO-1 */
+/* = D(j) for J = ILO,...,IHI */
+/* = P(j) for J = IHI+1,...,N. */
+/* The order in which the interchanges are made is N to IHI+1, */
+/* then 1 to ILO-1. */
+
+/* WORK (workspace) REAL array, dimension (lwork) */
+/* lwork must be at least max(1,6*N) when JOB = 'S' or 'B', and */
+/* at least 1 when JOB = 'N' or 'P'. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* See R.C. WARD, Balancing the generalized eigenvalue problem, */
+/* SIAM J. Sci. Stat. Comp. 2 (1981), 141-152. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --lscale;
+ --rscale;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S")
+ && ! lsame_(job, "B")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGGBAL", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *ilo = 1;
+ *ihi = *n;
+ return 0;
+ }
+
+ if (*n == 1) {
+ *ilo = 1;
+ *ihi = *n;
+ lscale[1] = 1.f;
+ rscale[1] = 1.f;
+ return 0;
+ }
+
+ if (lsame_(job, "N")) {
+ *ilo = 1;
+ *ihi = *n;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ lscale[i__] = 1.f;
+ rscale[i__] = 1.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+ k = 1;
+ l = *n;
+ if (lsame_(job, "S")) {
+ goto L190;
+ }
+
+ goto L30;
+
+/* Permute the matrices A and B to isolate the eigenvalues. */
+
+/* Find row with one nonzero in columns 1 through L */
+
+L20:
+ l = lm1;
+ if (l != 1) {
+ goto L30;
+ }
+
+ rscale[1] = 1.f;
+ lscale[1] = 1.f;
+ goto L190;
+
+L30:
+ lm1 = l - 1;
+ for (i__ = l; i__ >= 1; --i__) {
+ i__1 = lm1;
+ for (j = 1; j <= i__1; ++j) {
+ jp1 = j + 1;
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__ + j * b_dim1;
+ if (a[i__2].r != 0.f || a[i__2].i != 0.f || (b[i__3].r != 0.f ||
+ b[i__3].i != 0.f)) {
+ goto L50;
+ }
+/* L40: */
+ }
+ j = l;
+ goto L70;
+
+L50:
+ i__1 = l;
+ for (j = jp1; j <= i__1; ++j) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__ + j * b_dim1;
+ if (a[i__2].r != 0.f || a[i__2].i != 0.f || (b[i__3].r != 0.f ||
+ b[i__3].i != 0.f)) {
+ goto L80;
+ }
+/* L60: */
+ }
+ j = jp1 - 1;
+
+L70:
+ m = l;
+ iflow = 1;
+ goto L160;
+L80:
+ ;
+ }
+ goto L100;
+
+/* Find column with one nonzero in rows K through N */
+
+L90:
+ ++k;
+
+L100:
+ i__1 = l;
+ for (j = k; j <= i__1; ++j) {
+ i__2 = lm1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ ip1 = i__ + 1;
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * b_dim1;
+ if (a[i__3].r != 0.f || a[i__3].i != 0.f || (b[i__4].r != 0.f ||
+ b[i__4].i != 0.f)) {
+ goto L120;
+ }
+/* L110: */
+ }
+ i__ = l;
+ goto L140;
+L120:
+ i__2 = l;
+ for (i__ = ip1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * b_dim1;
+ if (a[i__3].r != 0.f || a[i__3].i != 0.f || (b[i__4].r != 0.f ||
+ b[i__4].i != 0.f)) {
+ goto L150;
+ }
+/* L130: */
+ }
+ i__ = ip1 - 1;
+L140:
+ m = k;
+ iflow = 2;
+ goto L160;
+L150:
+ ;
+ }
+ goto L190;
+
+/* Permute rows M and I */
+
+L160:
+ lscale[m] = (real) i__;
+ if (i__ == m) {
+ goto L170;
+ }
+ i__1 = *n - k + 1;
+ cswap_(&i__1, &a[i__ + k * a_dim1], lda, &a[m + k * a_dim1], lda);
+ i__1 = *n - k + 1;
+ cswap_(&i__1, &b[i__ + k * b_dim1], ldb, &b[m + k * b_dim1], ldb);
+
+/* Permute columns M and J */
+
+L170:
+ rscale[m] = (real) j;
+ if (j == m) {
+ goto L180;
+ }
+ cswap_(&l, &a[j * a_dim1 + 1], &c__1, &a[m * a_dim1 + 1], &c__1);
+ cswap_(&l, &b[j * b_dim1 + 1], &c__1, &b[m * b_dim1 + 1], &c__1);
+
+L180:
+ switch (iflow) {
+ case 1: goto L20;
+ case 2: goto L90;
+ }
+
+L190:
+ *ilo = k;
+ *ihi = l;
+
+ if (lsame_(job, "P")) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ lscale[i__] = 1.f;
+ rscale[i__] = 1.f;
+/* L195: */
+ }
+ return 0;
+ }
+
+ if (*ilo == *ihi) {
+ return 0;
+ }
+
+/* Balance the submatrix in rows ILO to IHI. */
+
+ nr = *ihi - *ilo + 1;
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ rscale[i__] = 0.f;
+ lscale[i__] = 0.f;
+
+ work[i__] = 0.f;
+ work[i__ + *n] = 0.f;
+ work[i__ + (*n << 1)] = 0.f;
+ work[i__ + *n * 3] = 0.f;
+ work[i__ + (*n << 2)] = 0.f;
+ work[i__ + *n * 5] = 0.f;
+/* L200: */
+ }
+
+/* Compute right side vector in resulting linear equations */
+
+ basl = r_lg10(&c_b36);
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ i__2 = *ihi;
+ for (j = *ilo; j <= i__2; ++j) {
+ i__3 = i__ + j * a_dim1;
+ if (a[i__3].r == 0.f && a[i__3].i == 0.f) {
+ ta = 0.f;
+ goto L210;
+ }
+ i__3 = i__ + j * a_dim1;
+ r__3 = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 = r_imag(&a[i__ + j
+ * a_dim1]), dabs(r__2));
+ ta = r_lg10(&r__3) / basl;
+
+L210:
+ i__3 = i__ + j * b_dim1;
+ if (b[i__3].r == 0.f && b[i__3].i == 0.f) {
+ tb = 0.f;
+ goto L220;
+ }
+ i__3 = i__ + j * b_dim1;
+ r__3 = (r__1 = b[i__3].r, dabs(r__1)) + (r__2 = r_imag(&b[i__ + j
+ * b_dim1]), dabs(r__2));
+ tb = r_lg10(&r__3) / basl;
+
+L220:
+ work[i__ + (*n << 2)] = work[i__ + (*n << 2)] - ta - tb;
+ work[j + *n * 5] = work[j + *n * 5] - ta - tb;
+/* L230: */
+ }
+/* L240: */
+ }
+
+ coef = 1.f / (real) (nr << 1);
+ coef2 = coef * coef;
+ coef5 = coef2 * .5f;
+ nrp2 = nr + 2;
+ beta = 0.f;
+ it = 1;
+
+/* Start generalized conjugate gradient iteration */
+
+L250:
+
+ gamma = sdot_(&nr, &work[*ilo + (*n << 2)], &c__1, &work[*ilo + (*n << 2)]
+, &c__1) + sdot_(&nr, &work[*ilo + *n * 5], &c__1, &work[*ilo + *
+ n * 5], &c__1);
+
+ ew = 0.f;
+ ewc = 0.f;
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ ew += work[i__ + (*n << 2)];
+ ewc += work[i__ + *n * 5];
+/* L260: */
+ }
+
+/* Computing 2nd power */
+ r__1 = ew;
+/* Computing 2nd power */
+ r__2 = ewc;
+/* Computing 2nd power */
+ r__3 = ew - ewc;
+ gamma = coef * gamma - coef2 * (r__1 * r__1 + r__2 * r__2) - coef5 * (
+ r__3 * r__3);
+ if (gamma == 0.f) {
+ goto L350;
+ }
+ if (it != 1) {
+ beta = gamma / pgamma;
+ }
+ t = coef5 * (ewc - ew * 3.f);
+ tc = coef5 * (ew - ewc * 3.f);
+
+ sscal_(&nr, &beta, &work[*ilo], &c__1);
+ sscal_(&nr, &beta, &work[*ilo + *n], &c__1);
+
+ saxpy_(&nr, &coef, &work[*ilo + (*n << 2)], &c__1, &work[*ilo + *n], &
+ c__1);
+ saxpy_(&nr, &coef, &work[*ilo + *n * 5], &c__1, &work[*ilo], &c__1);
+
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ work[i__] += tc;
+ work[i__ + *n] += t;
+/* L270: */
+ }
+
+/* Apply matrix to vector */
+
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ kount = 0;
+ sum = 0.f;
+ i__2 = *ihi;
+ for (j = *ilo; j <= i__2; ++j) {
+ i__3 = i__ + j * a_dim1;
+ if (a[i__3].r == 0.f && a[i__3].i == 0.f) {
+ goto L280;
+ }
+ ++kount;
+ sum += work[j];
+L280:
+ i__3 = i__ + j * b_dim1;
+ if (b[i__3].r == 0.f && b[i__3].i == 0.f) {
+ goto L290;
+ }
+ ++kount;
+ sum += work[j];
+L290:
+ ;
+ }
+ work[i__ + (*n << 1)] = (real) kount * work[i__ + *n] + sum;
+/* L300: */
+ }
+
+ i__1 = *ihi;
+ for (j = *ilo; j <= i__1; ++j) {
+ kount = 0;
+ sum = 0.f;
+ i__2 = *ihi;
+ for (i__ = *ilo; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ if (a[i__3].r == 0.f && a[i__3].i == 0.f) {
+ goto L310;
+ }
+ ++kount;
+ sum += work[i__ + *n];
+L310:
+ i__3 = i__ + j * b_dim1;
+ if (b[i__3].r == 0.f && b[i__3].i == 0.f) {
+ goto L320;
+ }
+ ++kount;
+ sum += work[i__ + *n];
+L320:
+ ;
+ }
+ work[j + *n * 3] = (real) kount * work[j] + sum;
+/* L330: */
+ }
+
+ sum = sdot_(&nr, &work[*ilo + *n], &c__1, &work[*ilo + (*n << 1)], &c__1)
+ + sdot_(&nr, &work[*ilo], &c__1, &work[*ilo + *n * 3], &c__1);
+ alpha = gamma / sum;
+
+/* Determine correction to current iteration */
+
+ cmax = 0.f;
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ cor = alpha * work[i__ + *n];
+ if (dabs(cor) > cmax) {
+ cmax = dabs(cor);
+ }
+ lscale[i__] += cor;
+ cor = alpha * work[i__];
+ if (dabs(cor) > cmax) {
+ cmax = dabs(cor);
+ }
+ rscale[i__] += cor;
+/* L340: */
+ }
+ if (cmax < .5f) {
+ goto L350;
+ }
+
+ r__1 = -alpha;
+ saxpy_(&nr, &r__1, &work[*ilo + (*n << 1)], &c__1, &work[*ilo + (*n << 2)]
+, &c__1);
+ r__1 = -alpha;
+ saxpy_(&nr, &r__1, &work[*ilo + *n * 3], &c__1, &work[*ilo + *n * 5], &
+ c__1);
+
+ pgamma = gamma;
+ ++it;
+ if (it <= nrp2) {
+ goto L250;
+ }
+
+/* End generalized conjugate gradient iteration */
+
+L350:
+ sfmin = slamch_("S");
+ sfmax = 1.f / sfmin;
+ lsfmin = (integer) (r_lg10(&sfmin) / basl + 1.f);
+ lsfmax = (integer) (r_lg10(&sfmax) / basl);
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ i__2 = *n - *ilo + 1;
+ irab = icamax_(&i__2, &a[i__ + *ilo * a_dim1], lda);
+ rab = c_abs(&a[i__ + (irab + *ilo - 1) * a_dim1]);
+ i__2 = *n - *ilo + 1;
+ irab = icamax_(&i__2, &b[i__ + *ilo * b_dim1], ldb);
+/* Computing MAX */
+ r__1 = rab, r__2 = c_abs(&b[i__ + (irab + *ilo - 1) * b_dim1]);
+ rab = dmax(r__1,r__2);
+ r__1 = rab + sfmin;
+ lrab = (integer) (r_lg10(&r__1) / basl + 1.f);
+ ir = lscale[i__] + r_sign(&c_b72, &lscale[i__]);
+/* Computing MIN */
+ i__2 = max(ir,lsfmin), i__2 = min(i__2,lsfmax), i__3 = lsfmax - lrab;
+ ir = min(i__2,i__3);
+ lscale[i__] = pow_ri(&c_b36, &ir);
+ icab = icamax_(ihi, &a[i__ * a_dim1 + 1], &c__1);
+ cab = c_abs(&a[icab + i__ * a_dim1]);
+ icab = icamax_(ihi, &b[i__ * b_dim1 + 1], &c__1);
+/* Computing MAX */
+ r__1 = cab, r__2 = c_abs(&b[icab + i__ * b_dim1]);
+ cab = dmax(r__1,r__2);
+ r__1 = cab + sfmin;
+ lcab = (integer) (r_lg10(&r__1) / basl + 1.f);
+ jc = rscale[i__] + r_sign(&c_b72, &rscale[i__]);
+/* Computing MIN */
+ i__2 = max(jc,lsfmin), i__2 = min(i__2,lsfmax), i__3 = lsfmax - lcab;
+ jc = min(i__2,i__3);
+ rscale[i__] = pow_ri(&c_b36, &jc);
+/* L360: */
+ }
+
+/* Row scaling of matrices A and B */
+
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ i__2 = *n - *ilo + 1;
+ csscal_(&i__2, &lscale[i__], &a[i__ + *ilo * a_dim1], lda);
+ i__2 = *n - *ilo + 1;
+ csscal_(&i__2, &lscale[i__], &b[i__ + *ilo * b_dim1], ldb);
+/* L370: */
+ }
+
+/* Column scaling of matrices A and B */
+
+ i__1 = *ihi;
+ for (j = *ilo; j <= i__1; ++j) {
+ csscal_(ihi, &rscale[j], &a[j * a_dim1 + 1], &c__1);
+ csscal_(ihi, &rscale[j], &b[j * b_dim1 + 1], &c__1);
+/* L380: */
+ }
+
+ return 0;
+
+/* End of CGGBAL */
+
+} /* cggbal_ */
diff --git a/contrib/libs/clapack/cgges.c b/contrib/libs/clapack/cgges.c
new file mode 100644
index 0000000000..b19048daca
--- /dev/null
+++ b/contrib/libs/clapack/cgges.c
@@ -0,0 +1,596 @@
+/* cgges.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+
+/* Subroutine */ int cgges_(char *jobvsl, char *jobvsr, char *sort, L_fp
+ selctg, integer *n, complex *a, integer *lda, complex *b, integer *
+ ldb, integer *sdim, complex *alpha, complex *beta, complex *vsl,
+ integer *ldvsl, complex *vsr, integer *ldvsr, complex *work, integer *
+ lwork, real *rwork, logical *bwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vsl_dim1, vsl_offset,
+ vsr_dim1, vsr_offset, i__1, i__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ real dif[2];
+ integer ihi, ilo;
+ real eps, anrm, bnrm;
+ integer idum[1], ierr, itau, iwrk;
+ real pvsl, pvsr;
+ extern logical lsame_(char *, char *);
+ integer ileft, icols;
+ logical cursl, ilvsl, ilvsr;
+ integer irwrk, irows;
+ extern /* Subroutine */ int cggbak_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, complex *, integer *,
+ integer *), cggbal_(char *, integer *, complex *,
+ integer *, complex *, integer *, integer *, integer *, real *,
+ real *, real *, integer *), slabad_(real *, real *);
+ extern doublereal clange_(char *, integer *, integer *, complex *,
+ integer *, real *);
+ extern /* Subroutine */ int cgghrd_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *, complex *, integer *, integer *),
+ clascl_(char *, integer *, integer *, real *, real *, integer *,
+ integer *, complex *, integer *, integer *);
+ logical ilascl, ilbscl;
+ extern /* Subroutine */ int cgeqrf_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), claset_(char *,
+ integer *, integer *, complex *, complex *, complex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ real bignum;
+ extern /* Subroutine */ int chgeqz_(char *, char *, char *, integer *,
+ integer *, integer *, complex *, integer *, complex *, integer *,
+ complex *, complex *, complex *, integer *, complex *, integer *,
+ complex *, integer *, real *, integer *),
+ ctgsen_(integer *, logical *, logical *, logical *, integer *,
+ complex *, integer *, complex *, integer *, complex *, complex *,
+ complex *, integer *, complex *, integer *, integer *, real *,
+ real *, real *, complex *, integer *, integer *, integer *,
+ integer *);
+ integer ijobvl, iright, ijobvr;
+ real anrmto;
+ integer lwkmin;
+ logical lastsl;
+ real bnrmto;
+ extern /* Subroutine */ int cungqr_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, integer *),
+ cunmqr_(char *, char *, integer *, integer *, integer *, complex
+ *, integer *, complex *, complex *, integer *, complex *, integer
+ *, integer *);
+ real smlnum;
+ logical wantst, lquery;
+ integer lwkopt;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+/* .. Function Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGGES computes for a pair of N-by-N complex nonsymmetric matrices */
+/* (A,B), the generalized eigenvalues, the generalized complex Schur */
+/* form (S, T), and optionally left and/or right Schur vectors (VSL */
+/* and VSR). This gives the generalized Schur factorization */
+
+/* (A,B) = ( (VSL)*S*(VSR)**H, (VSL)*T*(VSR)**H ) */
+
+/* where (VSR)**H is the conjugate-transpose of VSR. */
+
+/* Optionally, it also orders the eigenvalues so that a selected cluster */
+/* of eigenvalues appears in the leading diagonal blocks of the upper */
+/* triangular matrix S and the upper triangular matrix T. The leading */
+/* columns of VSL and VSR then form an unitary basis for the */
+/* corresponding left and right eigenspaces (deflating subspaces). */
+
+/* (If only the generalized eigenvalues are needed, use the driver */
+/* CGGEV instead, which is faster.) */
+
+/* A generalized eigenvalue for a pair of matrices (A,B) is a scalar w */
+/* or a ratio alpha/beta = w, such that A - w*B is singular. It is */
+/* usually represented as the pair (alpha,beta), as there is a */
+/* reasonable interpretation for beta=0, and even for both being zero. */
+
+/* A pair of matrices (S,T) is in generalized complex Schur form if S */
+/* and T are upper triangular and, in addition, the diagonal elements */
+/* of T are non-negative real numbers. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVSL (input) CHARACTER*1 */
+/* = 'N': do not compute the left Schur vectors; */
+/* = 'V': compute the left Schur vectors. */
+
+/* JOBVSR (input) CHARACTER*1 */
+/* = 'N': do not compute the right Schur vectors; */
+/* = 'V': compute the right Schur vectors. */
+
+/* SORT (input) CHARACTER*1 */
+/* Specifies whether or not to order the eigenvalues on the */
+/* diagonal of the generalized Schur form. */
+/* = 'N': Eigenvalues are not ordered; */
+/* = 'S': Eigenvalues are ordered (see SELCTG). */
+
+/* SELCTG (external procedure) LOGICAL FUNCTION of two COMPLEX arguments */
+/* SELCTG must be declared EXTERNAL in the calling subroutine. */
+/* If SORT = 'N', SELCTG is not referenced. */
+/* If SORT = 'S', SELCTG is used to select eigenvalues to sort */
+/* to the top left of the Schur form. */
+/* An eigenvalue ALPHA(j)/BETA(j) is selected if */
+/* SELCTG(ALPHA(j),BETA(j)) is true. */
+
+/* Note that a selected complex eigenvalue may no longer satisfy */
+/* SELCTG(ALPHA(j),BETA(j)) = .TRUE. after ordering, since */
+/* ordering may change the value of complex eigenvalues */
+/* (especially if the eigenvalue is ill-conditioned), in this */
+/* case INFO is set to N+2 (See INFO below). */
+
+/* N (input) INTEGER */
+/* The order of the matrices A, B, VSL, and VSR. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA, N) */
+/* On entry, the first of the pair of matrices. */
+/* On exit, A has been overwritten by its generalized Schur */
+/* form S. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX array, dimension (LDB, N) */
+/* On entry, the second of the pair of matrices. */
+/* On exit, B has been overwritten by its generalized Schur */
+/* form T. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB >= max(1,N). */
+
+/* SDIM (output) INTEGER */
+/* If SORT = 'N', SDIM = 0. */
+/* If SORT = 'S', SDIM = number of eigenvalues (after sorting) */
+/* for which SELCTG is true. */
+
+/* ALPHA (output) COMPLEX array, dimension (N) */
+/* BETA (output) COMPLEX array, dimension (N) */
+/* On exit, ALPHA(j)/BETA(j), j=1,...,N, will be the */
+/* generalized eigenvalues. ALPHA(j), j=1,...,N and BETA(j), */
+/* j=1,...,N are the diagonals of the complex Schur form (A,B) */
+/* output by CGGES. The BETA(j) will be non-negative real. */
+
+/* Note: the quotients ALPHA(j)/BETA(j) may easily over- or */
+/* underflow, and BETA(j) may even be zero. Thus, the user */
+/* should avoid naively computing the ratio alpha/beta. */
+/* However, ALPHA will be always less than and usually */
+/* comparable with norm(A) in magnitude, and BETA always less */
+/* than and usually comparable with norm(B). */
+
+/* VSL (output) COMPLEX array, dimension (LDVSL,N) */
+/* If JOBVSL = 'V', VSL will contain the left Schur vectors. */
+/* Not referenced if JOBVSL = 'N'. */
+
+/* LDVSL (input) INTEGER */
+/* The leading dimension of the matrix VSL. LDVSL >= 1, and */
+/* if JOBVSL = 'V', LDVSL >= N. */
+
+/* VSR (output) COMPLEX array, dimension (LDVSR,N) */
+/* If JOBVSR = 'V', VSR will contain the right Schur vectors. */
+/* Not referenced if JOBVSR = 'N'. */
+
+/* LDVSR (input) INTEGER */
+/* The leading dimension of the matrix VSR. LDVSR >= 1, and */
+/* if JOBVSR = 'V', LDVSR >= N. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,2*N). */
+/* For good performance, LWORK must generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) REAL array, dimension (8*N) */
+
+/* BWORK (workspace) LOGICAL array, dimension (N) */
+/* Not referenced if SORT = 'N'. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* =1,...,N: */
+/* The QZ iteration failed. (A,B) are not in Schur */
+/* form, but ALPHA(j) and BETA(j) should be correct for */
+/* j=INFO+1,...,N. */
+/* > N: =N+1: other than QZ iteration failed in CHGEQZ */
+/* =N+2: after reordering, roundoff changed values of */
+/* some complex eigenvalues so that leading */
+/* eigenvalues in the Generalized Schur form no */
+/* longer satisfy SELCTG=.TRUE. This could also */
+/* be caused due to scaling. */
+/* =N+3: reordering falied in CTGSEN. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alpha;
+ --beta;
+ vsl_dim1 = *ldvsl;
+ vsl_offset = 1 + vsl_dim1;
+ vsl -= vsl_offset;
+ vsr_dim1 = *ldvsr;
+ vsr_offset = 1 + vsr_dim1;
+ vsr -= vsr_offset;
+ --work;
+ --rwork;
+ --bwork;
+
+ /* Function Body */
+ if (lsame_(jobvsl, "N")) {
+ ijobvl = 1;
+ ilvsl = FALSE_;
+ } else if (lsame_(jobvsl, "V")) {
+ ijobvl = 2;
+ ilvsl = TRUE_;
+ } else {
+ ijobvl = -1;
+ ilvsl = FALSE_;
+ }
+
+ if (lsame_(jobvsr, "N")) {
+ ijobvr = 1;
+ ilvsr = FALSE_;
+ } else if (lsame_(jobvsr, "V")) {
+ ijobvr = 2;
+ ilvsr = TRUE_;
+ } else {
+ ijobvr = -1;
+ ilvsr = FALSE_;
+ }
+
+ wantst = lsame_(sort, "S");
+
+/* Test the input arguments */
+
+ *info = 0;
+ lquery = *lwork == -1;
+ if (ijobvl <= 0) {
+ *info = -1;
+ } else if (ijobvr <= 0) {
+ *info = -2;
+ } else if (! wantst && ! lsame_(sort, "N")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldvsl < 1 || ilvsl && *ldvsl < *n) {
+ *info = -14;
+ } else if (*ldvsr < 1 || ilvsr && *ldvsr < *n) {
+ *info = -16;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV.) */
+
+ if (*info == 0) {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n << 1;
+ lwkmin = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = 1, i__2 = *n + *n * ilaenv_(&c__1, "CGEQRF", " ", n, &c__1, n,
+ &c__0);
+ lwkopt = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = *n + *n * ilaenv_(&c__1, "CUNMQR", " ", n, &
+ c__1, n, &c_n1);
+ lwkopt = max(i__1,i__2);
+ if (ilvsl) {
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = *n + *n * ilaenv_(&c__1, "CUNGQR", " ", n, &
+ c__1, n, &c_n1);
+ lwkopt = max(i__1,i__2);
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ if (*lwork < lwkmin && ! lquery) {
+ *info = -18;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGGES ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *sdim = 0;
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = clange_("M", n, n, &a[a_offset], lda, &rwork[1]);
+ ilascl = FALSE_;
+ if (anrm > 0.f && anrm < smlnum) {
+ anrmto = smlnum;
+ ilascl = TRUE_;
+ } else if (anrm > bignum) {
+ anrmto = bignum;
+ ilascl = TRUE_;
+ }
+
+ if (ilascl) {
+ clascl_("G", &c__0, &c__0, &anrm, &anrmto, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Scale B if max element outside range [SMLNUM,BIGNUM] */
+
+ bnrm = clange_("M", n, n, &b[b_offset], ldb, &rwork[1]);
+ ilbscl = FALSE_;
+ if (bnrm > 0.f && bnrm < smlnum) {
+ bnrmto = smlnum;
+ ilbscl = TRUE_;
+ } else if (bnrm > bignum) {
+ bnrmto = bignum;
+ ilbscl = TRUE_;
+ }
+
+ if (ilbscl) {
+ clascl_("G", &c__0, &c__0, &bnrm, &bnrmto, n, n, &b[b_offset], ldb, &
+ ierr);
+ }
+
+/* Permute the matrix to make it more nearly triangular */
+/* (Real Workspace: need 6*N) */
+
+ ileft = 1;
+ iright = *n + 1;
+ irwrk = iright + *n;
+ cggbal_("P", n, &a[a_offset], lda, &b[b_offset], ldb, &ilo, &ihi, &rwork[
+ ileft], &rwork[iright], &rwork[irwrk], &ierr);
+
+/* Reduce B to triangular form (QR decomposition of B) */
+/* (Complex Workspace: need N, prefer N*NB) */
+
+ irows = ihi + 1 - ilo;
+ icols = *n + 1 - ilo;
+ itau = 1;
+ iwrk = itau + irows;
+ i__1 = *lwork + 1 - iwrk;
+ cgeqrf_(&irows, &icols, &b[ilo + ilo * b_dim1], ldb, &work[itau], &work[
+ iwrk], &i__1, &ierr);
+
+/* Apply the orthogonal transformation to matrix A */
+/* (Complex Workspace: need N, prefer N*NB) */
+
+ i__1 = *lwork + 1 - iwrk;
+ cunmqr_("L", "C", &irows, &icols, &irows, &b[ilo + ilo * b_dim1], ldb, &
+ work[itau], &a[ilo + ilo * a_dim1], lda, &work[iwrk], &i__1, &
+ ierr);
+
+/* Initialize VSL */
+/* (Complex Workspace: need N, prefer N*NB) */
+
+ if (ilvsl) {
+ claset_("Full", n, n, &c_b1, &c_b2, &vsl[vsl_offset], ldvsl);
+ if (irows > 1) {
+ i__1 = irows - 1;
+ i__2 = irows - 1;
+ clacpy_("L", &i__1, &i__2, &b[ilo + 1 + ilo * b_dim1], ldb, &vsl[
+ ilo + 1 + ilo * vsl_dim1], ldvsl);
+ }
+ i__1 = *lwork + 1 - iwrk;
+ cungqr_(&irows, &irows, &irows, &vsl[ilo + ilo * vsl_dim1], ldvsl, &
+ work[itau], &work[iwrk], &i__1, &ierr);
+ }
+
+/* Initialize VSR */
+
+ if (ilvsr) {
+ claset_("Full", n, n, &c_b1, &c_b2, &vsr[vsr_offset], ldvsr);
+ }
+
+/* Reduce to generalized Hessenberg form */
+/* (Workspace: none needed) */
+
+ cgghrd_(jobvsl, jobvsr, n, &ilo, &ihi, &a[a_offset], lda, &b[b_offset],
+ ldb, &vsl[vsl_offset], ldvsl, &vsr[vsr_offset], ldvsr, &ierr);
+
+ *sdim = 0;
+
+/* Perform QZ algorithm, computing Schur vectors if desired */
+/* (Complex Workspace: need N) */
+/* (Real Workspace: need N) */
+
+ iwrk = itau;
+ i__1 = *lwork + 1 - iwrk;
+ chgeqz_("S", jobvsl, jobvsr, n, &ilo, &ihi, &a[a_offset], lda, &b[
+ b_offset], ldb, &alpha[1], &beta[1], &vsl[vsl_offset], ldvsl, &
+ vsr[vsr_offset], ldvsr, &work[iwrk], &i__1, &rwork[irwrk], &ierr);
+ if (ierr != 0) {
+ if (ierr > 0 && ierr <= *n) {
+ *info = ierr;
+ } else if (ierr > *n && ierr <= *n << 1) {
+ *info = ierr - *n;
+ } else {
+ *info = *n + 1;
+ }
+ goto L30;
+ }
+
+/* Sort eigenvalues ALPHA/BETA if desired */
+/* (Workspace: none needed) */
+
+ if (wantst) {
+
+/* Undo scaling on eigenvalues before selecting */
+
+ if (ilascl) {
+ clascl_("G", &c__0, &c__0, &anrm, &anrmto, n, &c__1, &alpha[1], n,
+ &ierr);
+ }
+ if (ilbscl) {
+ clascl_("G", &c__0, &c__0, &bnrm, &bnrmto, n, &c__1, &beta[1], n,
+ &ierr);
+ }
+
+/* Select eigenvalues */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ bwork[i__] = (*selctg)(&alpha[i__], &beta[i__]);
+/* L10: */
+ }
+
+ i__1 = *lwork - iwrk + 1;
+ ctgsen_(&c__0, &ilvsl, &ilvsr, &bwork[1], n, &a[a_offset], lda, &b[
+ b_offset], ldb, &alpha[1], &beta[1], &vsl[vsl_offset], ldvsl,
+ &vsr[vsr_offset], ldvsr, sdim, &pvsl, &pvsr, dif, &work[iwrk],
+ &i__1, idum, &c__1, &ierr);
+ if (ierr == 1) {
+ *info = *n + 3;
+ }
+
+ }
+
+/* Apply back-permutation to VSL and VSR */
+/* (Workspace: none needed) */
+
+ if (ilvsl) {
+ cggbak_("P", "L", n, &ilo, &ihi, &rwork[ileft], &rwork[iright], n, &
+ vsl[vsl_offset], ldvsl, &ierr);
+ }
+ if (ilvsr) {
+ cggbak_("P", "R", n, &ilo, &ihi, &rwork[ileft], &rwork[iright], n, &
+ vsr[vsr_offset], ldvsr, &ierr);
+ }
+
+/* Undo scaling */
+
+ if (ilascl) {
+ clascl_("U", &c__0, &c__0, &anrmto, &anrm, n, n, &a[a_offset], lda, &
+ ierr);
+ clascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alpha[1], n, &
+ ierr);
+ }
+
+ if (ilbscl) {
+ clascl_("U", &c__0, &c__0, &bnrmto, &bnrm, n, n, &b[b_offset], ldb, &
+ ierr);
+ clascl_("G", &c__0, &c__0, &bnrmto, &bnrm, n, &c__1, &beta[1], n, &
+ ierr);
+ }
+
+ if (wantst) {
+
+/* Check if reordering is correct */
+
+ lastsl = TRUE_;
+ *sdim = 0;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ cursl = (*selctg)(&alpha[i__], &beta[i__]);
+ if (cursl) {
+ ++(*sdim);
+ }
+ if (cursl && ! lastsl) {
+ *info = *n + 2;
+ }
+ lastsl = cursl;
+/* L20: */
+ }
+
+ }
+
+L30:
+
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CGGES */
+
+} /* cgges_ */
diff --git a/contrib/libs/clapack/cggesx.c b/contrib/libs/clapack/cggesx.c
new file mode 100644
index 0000000000..57667e1256
--- /dev/null
+++ b/contrib/libs/clapack/cggesx.c
@@ -0,0 +1,701 @@
+/* cggesx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+
+/* Subroutine */ int cggesx_(char *jobvsl, char *jobvsr, char *sort, L_fp
+ selctg, char *sense, integer *n, complex *a, integer *lda, complex *b,
+ integer *ldb, integer *sdim, complex *alpha, complex *beta, complex *
+ vsl, integer *ldvsl, complex *vsr, integer *ldvsr, real *rconde, real
+ *rcondv, complex *work, integer *lwork, real *rwork, integer *iwork,
+ integer *liwork, logical *bwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vsl_dim1, vsl_offset,
+ vsr_dim1, vsr_offset, i__1, i__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ real pl, pr, dif[2];
+ integer ihi, ilo;
+ real eps;
+ integer ijob;
+ real anrm, bnrm;
+ integer ierr, itau, iwrk, lwrk;
+ extern logical lsame_(char *, char *);
+ integer ileft, icols;
+ logical cursl, ilvsl, ilvsr;
+ integer irwrk, irows;
+ extern /* Subroutine */ int cggbak_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, complex *, integer *,
+ integer *), cggbal_(char *, integer *, complex *,
+ integer *, complex *, integer *, integer *, integer *, real *,
+ real *, real *, integer *), slabad_(real *, real *);
+ extern doublereal clange_(char *, integer *, integer *, complex *,
+ integer *, real *);
+ extern /* Subroutine */ int cgghrd_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *, complex *, integer *, integer *),
+ clascl_(char *, integer *, integer *, real *, real *, integer *,
+ integer *, complex *, integer *, integer *);
+ logical ilascl, ilbscl;
+ extern /* Subroutine */ int cgeqrf_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, integer *), clacpy_(
+ char *, integer *, integer *, complex *, integer *, complex *,
+ integer *), claset_(char *, integer *, integer *, complex
+ *, complex *, complex *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern doublereal slamch_(char *);
+ real bignum;
+ extern /* Subroutine */ int chgeqz_(char *, char *, char *, integer *,
+ integer *, integer *, complex *, integer *, complex *, integer *,
+ complex *, complex *, complex *, integer *, complex *, integer *,
+ complex *, integer *, real *, integer *),
+ ctgsen_(integer *, logical *, logical *, logical *, integer *,
+ complex *, integer *, complex *, integer *, complex *, complex *,
+ complex *, integer *, complex *, integer *, integer *, real *,
+ real *, real *, complex *, integer *, integer *, integer *,
+ integer *);
+ integer ijobvl, iright, ijobvr;
+ logical wantsb;
+ integer liwmin;
+ logical wantse, lastsl;
+ real anrmto, bnrmto;
+ extern /* Subroutine */ int cungqr_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, integer *);
+ integer minwrk, maxwrk;
+ logical wantsn;
+ real smlnum;
+ extern /* Subroutine */ int cunmqr_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *);
+ logical wantst, lquery, wantsv;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+/* .. Function Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGGESX computes for a pair of N-by-N complex nonsymmetric matrices */
+/* (A,B), the generalized eigenvalues, the complex Schur form (S,T), */
+/* and, optionally, the left and/or right matrices of Schur vectors (VSL */
+/* and VSR). This gives the generalized Schur factorization */
+
+/* (A,B) = ( (VSL) S (VSR)**H, (VSL) T (VSR)**H ) */
+
+/* where (VSR)**H is the conjugate-transpose of VSR. */
+
+/* Optionally, it also orders the eigenvalues so that a selected cluster */
+/* of eigenvalues appears in the leading diagonal blocks of the upper */
+/* triangular matrix S and the upper triangular matrix T; computes */
+/* a reciprocal condition number for the average of the selected */
+/* eigenvalues (RCONDE); and computes a reciprocal condition number for */
+/* the right and left deflating subspaces corresponding to the selected */
+/* eigenvalues (RCONDV). The leading columns of VSL and VSR then form */
+/* an orthonormal basis for the corresponding left and right eigenspaces */
+/* (deflating subspaces). */
+
+/* A generalized eigenvalue for a pair of matrices (A,B) is a scalar w */
+/* or a ratio alpha/beta = w, such that A - w*B is singular. It is */
+/* usually represented as the pair (alpha,beta), as there is a */
+/* reasonable interpretation for beta=0 or for both being zero. */
+
+/* A pair of matrices (S,T) is in generalized complex Schur form if T is */
+/* upper triangular with non-negative diagonal and S is upper */
+/* triangular. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVSL (input) CHARACTER*1 */
+/* = 'N': do not compute the left Schur vectors; */
+/* = 'V': compute the left Schur vectors. */
+
+/* JOBVSR (input) CHARACTER*1 */
+/* = 'N': do not compute the right Schur vectors; */
+/* = 'V': compute the right Schur vectors. */
+
+/* SORT (input) CHARACTER*1 */
+/* Specifies whether or not to order the eigenvalues on the */
+/* diagonal of the generalized Schur form. */
+/* = 'N': Eigenvalues are not ordered; */
+/* = 'S': Eigenvalues are ordered (see SELCTG). */
+
+/* SELCTG (external procedure) LOGICAL FUNCTION of two COMPLEX arguments */
+/* SELCTG must be declared EXTERNAL in the calling subroutine. */
+/* If SORT = 'N', SELCTG is not referenced. */
+/* If SORT = 'S', SELCTG is used to select eigenvalues to sort */
+/* to the top left of the Schur form. */
+/* Note that a selected complex eigenvalue may no longer satisfy */
+/* SELCTG(ALPHA(j),BETA(j)) = .TRUE. after ordering, since */
+/* ordering may change the value of complex eigenvalues */
+/* (especially if the eigenvalue is ill-conditioned), in this */
+/* case INFO is set to N+3 see INFO below). */
+
+/* SENSE (input) CHARACTER*1 */
+/* Determines which reciprocal condition numbers are computed. */
+/* = 'N' : None are computed; */
+/* = 'E' : Computed for average of selected eigenvalues only; */
+/* = 'V' : Computed for selected deflating subspaces only; */
+/* = 'B' : Computed for both. */
+/* If SENSE = 'E', 'V', or 'B', SORT must equal 'S'. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A, B, VSL, and VSR. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA, N) */
+/* On entry, the first of the pair of matrices. */
+/* On exit, A has been overwritten by its generalized Schur */
+/* form S. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX array, dimension (LDB, N) */
+/* On entry, the second of the pair of matrices. */
+/* On exit, B has been overwritten by its generalized Schur */
+/* form T. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB >= max(1,N). */
+
+/* SDIM (output) INTEGER */
+/* If SORT = 'N', SDIM = 0. */
+/* If SORT = 'S', SDIM = number of eigenvalues (after sorting) */
+/* for which SELCTG is true. */
+
+/* ALPHA (output) COMPLEX array, dimension (N) */
+/* BETA (output) COMPLEX array, dimension (N) */
+/* On exit, ALPHA(j)/BETA(j), j=1,...,N, will be the */
+/* generalized eigenvalues. ALPHA(j) and BETA(j),j=1,...,N are */
+/* the diagonals of the complex Schur form (S,T). BETA(j) will */
+/* be non-negative real. */
+
+/* Note: the quotients ALPHA(j)/BETA(j) may easily over- or */
+/* underflow, and BETA(j) may even be zero. Thus, the user */
+/* should avoid naively computing the ratio alpha/beta. */
+/* However, ALPHA will be always less than and usually */
+/* comparable with norm(A) in magnitude, and BETA always less */
+/* than and usually comparable with norm(B). */
+
+/* VSL (output) COMPLEX array, dimension (LDVSL,N) */
+/* If JOBVSL = 'V', VSL will contain the left Schur vectors. */
+/* Not referenced if JOBVSL = 'N'. */
+
+/* LDVSL (input) INTEGER */
+/* The leading dimension of the matrix VSL. LDVSL >=1, and */
+/* if JOBVSL = 'V', LDVSL >= N. */
+
+/* VSR (output) COMPLEX array, dimension (LDVSR,N) */
+/* If JOBVSR = 'V', VSR will contain the right Schur vectors. */
+/* Not referenced if JOBVSR = 'N'. */
+
+/* LDVSR (input) INTEGER */
+/* The leading dimension of the matrix VSR. LDVSR >= 1, and */
+/* if JOBVSR = 'V', LDVSR >= N. */
+
+/* RCONDE (output) REAL array, dimension ( 2 ) */
+/* If SENSE = 'E' or 'B', RCONDE(1) and RCONDE(2) contain the */
+/* reciprocal condition numbers for the average of the selected */
+/* eigenvalues. */
+/* Not referenced if SENSE = 'N' or 'V'. */
+
+/* RCONDV (output) REAL array, dimension ( 2 ) */
+/* If SENSE = 'V' or 'B', RCONDV(1) and RCONDV(2) contain the */
+/* reciprocal condition number for the selected deflating */
+/* subspaces. */
+/* Not referenced if SENSE = 'N' or 'E'. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If N = 0, LWORK >= 1, else if SENSE = 'E', 'V', or 'B', */
+/* LWORK >= MAX(1,2*N,2*SDIM*(N-SDIM)), else */
+/* LWORK >= MAX(1,2*N). Note that 2*SDIM*(N-SDIM) <= N*N/2. */
+/* Note also that an error is only returned if */
+/* LWORK < MAX(1,2*N), but if SENSE = 'E' or 'V' or 'B' this may */
+/* not be large enough. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the bound on the optimal size of the WORK */
+/* array and the minimum size of the IWORK array, returns these */
+/* values as the first entries of the WORK and IWORK arrays, and */
+/* no error message related to LWORK or LIWORK is issued by */
+/* XERBLA. */
+
+/* RWORK (workspace) REAL array, dimension ( 8*N ) */
+/* Real workspace. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the minimum LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SENSE = 'N' or N = 0, LIWORK >= 1, otherwise */
+/* LIWORK >= N+2. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the bound on the optimal size of the */
+/* WORK array and the minimum size of the IWORK array, returns */
+/* these values as the first entries of the WORK and IWORK */
+/* arrays, and no error message related to LWORK or LIWORK is */
+/* issued by XERBLA. */
+
+/* BWORK (workspace) LOGICAL array, dimension (N) */
+/* Not referenced if SORT = 'N'. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1,...,N: */
+/* The QZ iteration failed. (A,B) are not in Schur */
+/* form, but ALPHA(j) and BETA(j) should be correct for */
+/* j=INFO+1,...,N. */
+/* > N: =N+1: other than QZ iteration failed in CHGEQZ */
+/* =N+2: after reordering, roundoff changed values of */
+/* some complex eigenvalues so that leading */
+/* eigenvalues in the Generalized Schur form no */
+/* longer satisfy SELCTG=.TRUE. This could also */
+/* be caused due to scaling. */
+/* =N+3: reordering failed in CTGSEN. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alpha;
+ --beta;
+ vsl_dim1 = *ldvsl;
+ vsl_offset = 1 + vsl_dim1;
+ vsl -= vsl_offset;
+ vsr_dim1 = *ldvsr;
+ vsr_offset = 1 + vsr_dim1;
+ vsr -= vsr_offset;
+ --rconde;
+ --rcondv;
+ --work;
+ --rwork;
+ --iwork;
+ --bwork;
+
+ /* Function Body */
+ if (lsame_(jobvsl, "N")) {
+ ijobvl = 1;
+ ilvsl = FALSE_;
+ } else if (lsame_(jobvsl, "V")) {
+ ijobvl = 2;
+ ilvsl = TRUE_;
+ } else {
+ ijobvl = -1;
+ ilvsl = FALSE_;
+ }
+
+ if (lsame_(jobvsr, "N")) {
+ ijobvr = 1;
+ ilvsr = FALSE_;
+ } else if (lsame_(jobvsr, "V")) {
+ ijobvr = 2;
+ ilvsr = TRUE_;
+ } else {
+ ijobvr = -1;
+ ilvsr = FALSE_;
+ }
+
+ wantst = lsame_(sort, "S");
+ wantsn = lsame_(sense, "N");
+ wantse = lsame_(sense, "E");
+ wantsv = lsame_(sense, "V");
+ wantsb = lsame_(sense, "B");
+ lquery = *lwork == -1 || *liwork == -1;
+ if (wantsn) {
+ ijob = 0;
+ } else if (wantse) {
+ ijob = 1;
+ } else if (wantsv) {
+ ijob = 2;
+ } else if (wantsb) {
+ ijob = 4;
+ }
+
+/* Test the input arguments */
+
+ *info = 0;
+ if (ijobvl <= 0) {
+ *info = -1;
+ } else if (ijobvr <= 0) {
+ *info = -2;
+ } else if (! wantst && ! lsame_(sort, "N")) {
+ *info = -3;
+ } else if (! (wantsn || wantse || wantsv || wantsb) || ! wantst && !
+ wantsn) {
+ *info = -5;
+ } else if (*n < 0) {
+ *info = -6;
+ } else if (*lda < max(1,*n)) {
+ *info = -8;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldvsl < 1 || ilvsl && *ldvsl < *n) {
+ *info = -15;
+ } else if (*ldvsr < 1 || ilvsr && *ldvsr < *n) {
+ *info = -17;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV.) */
+
+ if (*info == 0) {
+ if (*n > 0) {
+ minwrk = *n << 1;
+ maxwrk = *n * (ilaenv_(&c__1, "CGEQRF", " ", n, &c__1, n, &c__0) + 1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * (ilaenv_(&c__1, "CUNMQR", " ", n, &
+ c__1, n, &c_n1) + 1);
+ maxwrk = max(i__1,i__2);
+ if (ilvsl) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * (ilaenv_(&c__1, "CUNGQR", " ", n, &
+ c__1, n, &c_n1) + 1);
+ maxwrk = max(i__1,i__2);
+ }
+ lwrk = maxwrk;
+ if (ijob >= 1) {
+/* Computing MAX */
+ i__1 = lwrk, i__2 = *n * *n / 2;
+ lwrk = max(i__1,i__2);
+ }
+ } else {
+ minwrk = 1;
+ maxwrk = 1;
+ lwrk = 1;
+ }
+ work[1].r = (real) lwrk, work[1].i = 0.f;
+ if (wantsn || *n == 0) {
+ liwmin = 1;
+ } else {
+ liwmin = *n + 2;
+ }
+ iwork[1] = liwmin;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -21;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -24;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGGESX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *sdim = 0;
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = clange_("M", n, n, &a[a_offset], lda, &rwork[1]);
+ ilascl = FALSE_;
+ if (anrm > 0.f && anrm < smlnum) {
+ anrmto = smlnum;
+ ilascl = TRUE_;
+ } else if (anrm > bignum) {
+ anrmto = bignum;
+ ilascl = TRUE_;
+ }
+ if (ilascl) {
+ clascl_("G", &c__0, &c__0, &anrm, &anrmto, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Scale B if max element outside range [SMLNUM,BIGNUM] */
+
+ bnrm = clange_("M", n, n, &b[b_offset], ldb, &rwork[1]);
+ ilbscl = FALSE_;
+ if (bnrm > 0.f && bnrm < smlnum) {
+ bnrmto = smlnum;
+ ilbscl = TRUE_;
+ } else if (bnrm > bignum) {
+ bnrmto = bignum;
+ ilbscl = TRUE_;
+ }
+ if (ilbscl) {
+ clascl_("G", &c__0, &c__0, &bnrm, &bnrmto, n, n, &b[b_offset], ldb, &
+ ierr);
+ }
+
+/* Permute the matrix to make it more nearly triangular */
+/* (Real Workspace: need 6*N) */
+
+ ileft = 1;
+ iright = *n + 1;
+ irwrk = iright + *n;
+ cggbal_("P", n, &a[a_offset], lda, &b[b_offset], ldb, &ilo, &ihi, &rwork[
+ ileft], &rwork[iright], &rwork[irwrk], &ierr);
+
+/* Reduce B to triangular form (QR decomposition of B) */
+/* (Complex Workspace: need N, prefer N*NB) */
+
+ irows = ihi + 1 - ilo;
+ icols = *n + 1 - ilo;
+ itau = 1;
+ iwrk = itau + irows;
+ i__1 = *lwork + 1 - iwrk;
+ cgeqrf_(&irows, &icols, &b[ilo + ilo * b_dim1], ldb, &work[itau], &work[
+ iwrk], &i__1, &ierr);
+
+/* Apply the unitary transformation to matrix A */
+/* (Complex Workspace: need N, prefer N*NB) */
+
+ i__1 = *lwork + 1 - iwrk;
+ cunmqr_("L", "C", &irows, &icols, &irows, &b[ilo + ilo * b_dim1], ldb, &
+ work[itau], &a[ilo + ilo * a_dim1], lda, &work[iwrk], &i__1, &
+ ierr);
+
+/* Initialize VSL */
+/* (Complex Workspace: need N, prefer N*NB) */
+
+ if (ilvsl) {
+ claset_("Full", n, n, &c_b1, &c_b2, &vsl[vsl_offset], ldvsl);
+ if (irows > 1) {
+ i__1 = irows - 1;
+ i__2 = irows - 1;
+ clacpy_("L", &i__1, &i__2, &b[ilo + 1 + ilo * b_dim1], ldb, &vsl[
+ ilo + 1 + ilo * vsl_dim1], ldvsl);
+ }
+ i__1 = *lwork + 1 - iwrk;
+ cungqr_(&irows, &irows, &irows, &vsl[ilo + ilo * vsl_dim1], ldvsl, &
+ work[itau], &work[iwrk], &i__1, &ierr);
+ }
+
+/* Initialize VSR */
+
+ if (ilvsr) {
+ claset_("Full", n, n, &c_b1, &c_b2, &vsr[vsr_offset], ldvsr);
+ }
+
+/* Reduce to generalized Hessenberg form */
+/* (Workspace: none needed) */
+
+ cgghrd_(jobvsl, jobvsr, n, &ilo, &ihi, &a[a_offset], lda, &b[b_offset],
+ ldb, &vsl[vsl_offset], ldvsl, &vsr[vsr_offset], ldvsr, &ierr);
+
+ *sdim = 0;
+
+/* Perform QZ algorithm, computing Schur vectors if desired */
+/* (Complex Workspace: need N) */
+/* (Real Workspace: need N) */
+
+ iwrk = itau;
+ i__1 = *lwork + 1 - iwrk;
+ chgeqz_("S", jobvsl, jobvsr, n, &ilo, &ihi, &a[a_offset], lda, &b[
+ b_offset], ldb, &alpha[1], &beta[1], &vsl[vsl_offset], ldvsl, &
+ vsr[vsr_offset], ldvsr, &work[iwrk], &i__1, &rwork[irwrk], &ierr);
+ if (ierr != 0) {
+ if (ierr > 0 && ierr <= *n) {
+ *info = ierr;
+ } else if (ierr > *n && ierr <= *n << 1) {
+ *info = ierr - *n;
+ } else {
+ *info = *n + 1;
+ }
+ goto L40;
+ }
+
+/* Sort eigenvalues ALPHA/BETA and compute the reciprocal of */
+/* condition number(s) */
+
+ if (wantst) {
+
+/* Undo scaling on eigenvalues before SELCTGing */
+
+ if (ilascl) {
+ clascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alpha[1], n,
+ &ierr);
+ }
+ if (ilbscl) {
+ clascl_("G", &c__0, &c__0, &bnrmto, &bnrm, n, &c__1, &beta[1], n,
+ &ierr);
+ }
+
+/* Select eigenvalues */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ bwork[i__] = (*selctg)(&alpha[i__], &beta[i__]);
+/* L10: */
+ }
+
+/* Reorder eigenvalues, transform Generalized Schur vectors, and */
+/* compute reciprocal condition numbers */
+/* (Complex Workspace: If IJOB >= 1, need MAX(1, 2*SDIM*(N-SDIM)) */
+/* otherwise, need 1 ) */
+
+ i__1 = *lwork - iwrk + 1;
+ ctgsen_(&ijob, &ilvsl, &ilvsr, &bwork[1], n, &a[a_offset], lda, &b[
+ b_offset], ldb, &alpha[1], &beta[1], &vsl[vsl_offset], ldvsl,
+ &vsr[vsr_offset], ldvsr, sdim, &pl, &pr, dif, &work[iwrk], &
+ i__1, &iwork[1], liwork, &ierr);
+
+ if (ijob >= 1) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*sdim << 1) * (*n - *sdim);
+ maxwrk = max(i__1,i__2);
+ }
+ if (ierr == -21) {
+
+/* not enough complex workspace */
+
+ *info = -21;
+ } else {
+ if (ijob == 1 || ijob == 4) {
+ rconde[1] = pl;
+ rconde[2] = pr;
+ }
+ if (ijob == 2 || ijob == 4) {
+ rcondv[1] = dif[0];
+ rcondv[2] = dif[1];
+ }
+ if (ierr == 1) {
+ *info = *n + 3;
+ }
+ }
+
+ }
+
+/* Apply permutation to VSL and VSR */
+/* (Workspace: none needed) */
+
+ if (ilvsl) {
+ cggbak_("P", "L", n, &ilo, &ihi, &rwork[ileft], &rwork[iright], n, &
+ vsl[vsl_offset], ldvsl, &ierr);
+ }
+
+ if (ilvsr) {
+ cggbak_("P", "R", n, &ilo, &ihi, &rwork[ileft], &rwork[iright], n, &
+ vsr[vsr_offset], ldvsr, &ierr);
+ }
+
+/* Undo scaling */
+
+ if (ilascl) {
+ clascl_("U", &c__0, &c__0, &anrmto, &anrm, n, n, &a[a_offset], lda, &
+ ierr);
+ clascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alpha[1], n, &
+ ierr);
+ }
+
+ if (ilbscl) {
+ clascl_("U", &c__0, &c__0, &bnrmto, &bnrm, n, n, &b[b_offset], ldb, &
+ ierr);
+ clascl_("G", &c__0, &c__0, &bnrmto, &bnrm, n, &c__1, &beta[1], n, &
+ ierr);
+ }
+
+ if (wantst) {
+
+/* Check if reordering is correct */
+
+ lastsl = TRUE_;
+ *sdim = 0;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ cursl = (*selctg)(&alpha[i__], &beta[i__]);
+ if (cursl) {
+ ++(*sdim);
+ }
+ if (cursl && ! lastsl) {
+ *info = *n + 2;
+ }
+ lastsl = cursl;
+/* L30: */
+ }
+
+ }
+
+L40:
+
+ work[1].r = (real) maxwrk, work[1].i = 0.f;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of CGGESX */
+
+} /* cggesx_ */
diff --git a/contrib/libs/clapack/cggev.c b/contrib/libs/clapack/cggev.c
new file mode 100644
index 0000000000..615ac575ee
--- /dev/null
+++ b/contrib/libs/clapack/cggev.c
@@ -0,0 +1,592 @@
+/* cggev.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+
+/* Subroutine */ int cggev_(char *jobvl, char *jobvr, integer *n, complex *a,
+ integer *lda, complex *b, integer *ldb, complex *alpha, complex *beta,
+ complex *vl, integer *ldvl, complex *vr, integer *ldvr, complex *
+ work, integer *lwork, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vl_dim1, vl_offset, vr_dim1,
+ vr_offset, i__1, i__2, i__3, i__4;
+ real r__1, r__2, r__3, r__4;
+ complex q__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_imag(complex *);
+
+ /* Local variables */
+ integer jc, in, jr, ihi, ilo;
+ real eps;
+ logical ilv;
+ real anrm, bnrm;
+ integer ierr, itau;
+ real temp;
+ logical ilvl, ilvr;
+ integer iwrk;
+ extern logical lsame_(char *, char *);
+ integer ileft, icols, irwrk, irows;
+ extern /* Subroutine */ int cggbak_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, complex *, integer *,
+ integer *), cggbal_(char *, integer *, complex *,
+ integer *, complex *, integer *, integer *, integer *, real *,
+ real *, real *, integer *), slabad_(real *, real *);
+ extern doublereal clange_(char *, integer *, integer *, complex *,
+ integer *, real *);
+ extern /* Subroutine */ int cgghrd_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *, complex *, integer *, integer *),
+ clascl_(char *, integer *, integer *, real *, real *, integer *,
+ integer *, complex *, integer *, integer *);
+ logical ilascl, ilbscl;
+ extern /* Subroutine */ int cgeqrf_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), claset_(char *,
+ integer *, integer *, complex *, complex *, complex *, integer *), ctgevc_(char *, char *, logical *, integer *, complex *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *, integer *, integer *, complex *, real *, integer *), xerbla_(char *, integer *);
+ logical ldumma[1];
+ char chtemp[1];
+ real bignum;
+ extern /* Subroutine */ int chgeqz_(char *, char *, char *, integer *,
+ integer *, integer *, complex *, integer *, complex *, integer *,
+ complex *, complex *, complex *, integer *, complex *, integer *,
+ complex *, integer *, real *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ijobvl, iright, ijobvr;
+ extern /* Subroutine */ int cungqr_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, integer *);
+ real anrmto;
+ integer lwkmin;
+ real bnrmto;
+ extern /* Subroutine */ int cunmqr_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *);
+ real smlnum;
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGGEV computes for a pair of N-by-N complex nonsymmetric matrices */
+/* (A,B), the generalized eigenvalues, and optionally, the left and/or */
+/* right generalized eigenvectors. */
+
+/* A generalized eigenvalue for a pair of matrices (A,B) is a scalar */
+/* lambda or a ratio alpha/beta = lambda, such that A - lambda*B is */
+/* singular. It is usually represented as the pair (alpha,beta), as */
+/* there is a reasonable interpretation for beta=0, and even for both */
+/* being zero. */
+
+/* The right generalized eigenvector v(j) corresponding to the */
+/* generalized eigenvalue lambda(j) of (A,B) satisfies */
+
+/* A * v(j) = lambda(j) * B * v(j). */
+
+/* The left generalized eigenvector u(j) corresponding to the */
+/* generalized eigenvalues lambda(j) of (A,B) satisfies */
+
+/* u(j)**H * A = lambda(j) * u(j)**H * B */
+
+/* where u(j)**H is the conjugate-transpose of u(j). */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVL (input) CHARACTER*1 */
+/* = 'N': do not compute the left generalized eigenvectors; */
+/* = 'V': compute the left generalized eigenvectors. */
+
+/* JOBVR (input) CHARACTER*1 */
+/* = 'N': do not compute the right generalized eigenvectors; */
+/* = 'V': compute the right generalized eigenvectors. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A, B, VL, and VR. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA, N) */
+/* On entry, the matrix A in the pair (A,B). */
+/* On exit, A has been overwritten. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX array, dimension (LDB, N) */
+/* On entry, the matrix B in the pair (A,B). */
+/* On exit, B has been overwritten. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB >= max(1,N). */
+
+/* ALPHA (output) COMPLEX array, dimension (N) */
+/* BETA (output) COMPLEX array, dimension (N) */
+/* On exit, ALPHA(j)/BETA(j), j=1,...,N, will be the */
+/* generalized eigenvalues. */
+
+/* Note: the quotients ALPHA(j)/BETA(j) may easily over- or */
+/* underflow, and BETA(j) may even be zero. Thus, the user */
+/* should avoid naively computing the ratio alpha/beta. */
+/* However, ALPHA will be always less than and usually */
+/* comparable with norm(A) in magnitude, and BETA always less */
+/* than and usually comparable with norm(B). */
+
+/* VL (output) COMPLEX array, dimension (LDVL,N) */
+/* If JOBVL = 'V', the left generalized eigenvectors u(j) are */
+/* stored one after another in the columns of VL, in the same */
+/* order as their eigenvalues. */
+/* Each eigenvector is scaled so the largest component has */
+/* abs(real part) + abs(imag. part) = 1. */
+/* Not referenced if JOBVL = 'N'. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the matrix VL. LDVL >= 1, and */
+/* if JOBVL = 'V', LDVL >= N. */
+
+/* VR (output) COMPLEX array, dimension (LDVR,N) */
+/* If JOBVR = 'V', the right generalized eigenvectors v(j) are */
+/* stored one after another in the columns of VR, in the same */
+/* order as their eigenvalues. */
+/* Each eigenvector is scaled so the largest component has */
+/* abs(real part) + abs(imag. part) = 1. */
+/* Not referenced if JOBVR = 'N'. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the matrix VR. LDVR >= 1, and */
+/* if JOBVR = 'V', LDVR >= N. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,2*N). */
+/* For good performance, LWORK must generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace/output) REAL array, dimension (8*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* =1,...,N: */
+/* The QZ iteration failed. No eigenvectors have been */
+/* calculated, but ALPHA(j) and BETA(j) should be */
+/* correct for j=INFO+1,...,N. */
+/* > N: =N+1: other then QZ iteration failed in SHGEQZ, */
+/* =N+2: error return from STGEVC. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alpha;
+ --beta;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ if (lsame_(jobvl, "N")) {
+ ijobvl = 1;
+ ilvl = FALSE_;
+ } else if (lsame_(jobvl, "V")) {
+ ijobvl = 2;
+ ilvl = TRUE_;
+ } else {
+ ijobvl = -1;
+ ilvl = FALSE_;
+ }
+
+ if (lsame_(jobvr, "N")) {
+ ijobvr = 1;
+ ilvr = FALSE_;
+ } else if (lsame_(jobvr, "V")) {
+ ijobvr = 2;
+ ilvr = TRUE_;
+ } else {
+ ijobvr = -1;
+ ilvr = FALSE_;
+ }
+ ilv = ilvl || ilvr;
+
+/* Test the input arguments */
+
+ *info = 0;
+ lquery = *lwork == -1;
+ if (ijobvl <= 0) {
+ *info = -1;
+ } else if (ijobvr <= 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ } else if (*ldvl < 1 || ilvl && *ldvl < *n) {
+ *info = -11;
+ } else if (*ldvr < 1 || ilvr && *ldvr < *n) {
+ *info = -13;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV. The workspace is */
+/* computed assuming ILO = 1 and IHI = N, the worst case.) */
+
+ if (*info == 0) {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n << 1;
+ lwkmin = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = 1, i__2 = *n + *n * ilaenv_(&c__1, "CGEQRF", " ", n, &c__1, n,
+ &c__0);
+ lwkopt = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = *n + *n * ilaenv_(&c__1, "CUNMQR", " ", n, &
+ c__1, n, &c__0);
+ lwkopt = max(i__1,i__2);
+ if (ilvl) {
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = *n + *n * ilaenv_(&c__1, "CUNGQR", " ", n, &
+ c__1, n, &c_n1);
+ lwkopt = max(i__1,i__2);
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ if (*lwork < lwkmin && ! lquery) {
+ *info = -15;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGGEV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("E") * slamch_("B");
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = clange_("M", n, n, &a[a_offset], lda, &rwork[1]);
+ ilascl = FALSE_;
+ if (anrm > 0.f && anrm < smlnum) {
+ anrmto = smlnum;
+ ilascl = TRUE_;
+ } else if (anrm > bignum) {
+ anrmto = bignum;
+ ilascl = TRUE_;
+ }
+ if (ilascl) {
+ clascl_("G", &c__0, &c__0, &anrm, &anrmto, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Scale B if max element outside range [SMLNUM,BIGNUM] */
+
+ bnrm = clange_("M", n, n, &b[b_offset], ldb, &rwork[1]);
+ ilbscl = FALSE_;
+ if (bnrm > 0.f && bnrm < smlnum) {
+ bnrmto = smlnum;
+ ilbscl = TRUE_;
+ } else if (bnrm > bignum) {
+ bnrmto = bignum;
+ ilbscl = TRUE_;
+ }
+ if (ilbscl) {
+ clascl_("G", &c__0, &c__0, &bnrm, &bnrmto, n, n, &b[b_offset], ldb, &
+ ierr);
+ }
+
+/* Permute the matrices A, B to isolate eigenvalues if possible */
+/* (Real Workspace: need 6*N) */
+
+ ileft = 1;
+ iright = *n + 1;
+ irwrk = iright + *n;
+ cggbal_("P", n, &a[a_offset], lda, &b[b_offset], ldb, &ilo, &ihi, &rwork[
+ ileft], &rwork[iright], &rwork[irwrk], &ierr);
+
+/* Reduce B to triangular form (QR decomposition of B) */
+/* (Complex Workspace: need N, prefer N*NB) */
+
+ irows = ihi + 1 - ilo;
+ if (ilv) {
+ icols = *n + 1 - ilo;
+ } else {
+ icols = irows;
+ }
+ itau = 1;
+ iwrk = itau + irows;
+ i__1 = *lwork + 1 - iwrk;
+ cgeqrf_(&irows, &icols, &b[ilo + ilo * b_dim1], ldb, &work[itau], &work[
+ iwrk], &i__1, &ierr);
+
+/* Apply the orthogonal transformation to matrix A */
+/* (Complex Workspace: need N, prefer N*NB) */
+
+ i__1 = *lwork + 1 - iwrk;
+ cunmqr_("L", "C", &irows, &icols, &irows, &b[ilo + ilo * b_dim1], ldb, &
+ work[itau], &a[ilo + ilo * a_dim1], lda, &work[iwrk], &i__1, &
+ ierr);
+
+/* Initialize VL */
+/* (Complex Workspace: need N, prefer N*NB) */
+
+ if (ilvl) {
+ claset_("Full", n, n, &c_b1, &c_b2, &vl[vl_offset], ldvl);
+ if (irows > 1) {
+ i__1 = irows - 1;
+ i__2 = irows - 1;
+ clacpy_("L", &i__1, &i__2, &b[ilo + 1 + ilo * b_dim1], ldb, &vl[
+ ilo + 1 + ilo * vl_dim1], ldvl);
+ }
+ i__1 = *lwork + 1 - iwrk;
+ cungqr_(&irows, &irows, &irows, &vl[ilo + ilo * vl_dim1], ldvl, &work[
+ itau], &work[iwrk], &i__1, &ierr);
+ }
+
+/* Initialize VR */
+
+ if (ilvr) {
+ claset_("Full", n, n, &c_b1, &c_b2, &vr[vr_offset], ldvr);
+ }
+
+/* Reduce to generalized Hessenberg form */
+
+ if (ilv) {
+
+/* Eigenvectors requested -- work on whole matrix. */
+
+ cgghrd_(jobvl, jobvr, n, &ilo, &ihi, &a[a_offset], lda, &b[b_offset],
+ ldb, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, &ierr);
+ } else {
+ cgghrd_("N", "N", &irows, &c__1, &irows, &a[ilo + ilo * a_dim1], lda,
+ &b[ilo + ilo * b_dim1], ldb, &vl[vl_offset], ldvl, &vr[
+ vr_offset], ldvr, &ierr);
+ }
+
+/* Perform QZ algorithm (Compute eigenvalues, and optionally, the */
+/* Schur form and Schur vectors) */
+/* (Complex Workspace: need N) */
+/* (Real Workspace: need N) */
+
+ iwrk = itau;
+ if (ilv) {
+ *(unsigned char *)chtemp = 'S';
+ } else {
+ *(unsigned char *)chtemp = 'E';
+ }
+ i__1 = *lwork + 1 - iwrk;
+ chgeqz_(chtemp, jobvl, jobvr, n, &ilo, &ihi, &a[a_offset], lda, &b[
+ b_offset], ldb, &alpha[1], &beta[1], &vl[vl_offset], ldvl, &vr[
+ vr_offset], ldvr, &work[iwrk], &i__1, &rwork[irwrk], &ierr);
+ if (ierr != 0) {
+ if (ierr > 0 && ierr <= *n) {
+ *info = ierr;
+ } else if (ierr > *n && ierr <= *n << 1) {
+ *info = ierr - *n;
+ } else {
+ *info = *n + 1;
+ }
+ goto L70;
+ }
+
+/* Compute Eigenvectors */
+/* (Real Workspace: need 2*N) */
+/* (Complex Workspace: need 2*N) */
+
+ if (ilv) {
+ if (ilvl) {
+ if (ilvr) {
+ *(unsigned char *)chtemp = 'B';
+ } else {
+ *(unsigned char *)chtemp = 'L';
+ }
+ } else {
+ *(unsigned char *)chtemp = 'R';
+ }
+
+ ctgevc_(chtemp, "B", ldumma, n, &a[a_offset], lda, &b[b_offset], ldb,
+ &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, n, &in, &work[
+ iwrk], &rwork[irwrk], &ierr);
+ if (ierr != 0) {
+ *info = *n + 2;
+ goto L70;
+ }
+
+/* Undo balancing on VL and VR and normalization */
+/* (Workspace: none needed) */
+
+ if (ilvl) {
+ cggbak_("P", "L", n, &ilo, &ihi, &rwork[ileft], &rwork[iright], n,
+ &vl[vl_offset], ldvl, &ierr);
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ temp = 0.f;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ i__3 = jr + jc * vl_dim1;
+ r__3 = temp, r__4 = (r__1 = vl[i__3].r, dabs(r__1)) + (
+ r__2 = r_imag(&vl[jr + jc * vl_dim1]), dabs(r__2))
+ ;
+ temp = dmax(r__3,r__4);
+/* L10: */
+ }
+ if (temp < smlnum) {
+ goto L30;
+ }
+ temp = 1.f / temp;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ i__3 = jr + jc * vl_dim1;
+ i__4 = jr + jc * vl_dim1;
+ q__1.r = temp * vl[i__4].r, q__1.i = temp * vl[i__4].i;
+ vl[i__3].r = q__1.r, vl[i__3].i = q__1.i;
+/* L20: */
+ }
+L30:
+ ;
+ }
+ }
+ if (ilvr) {
+ cggbak_("P", "R", n, &ilo, &ihi, &rwork[ileft], &rwork[iright], n,
+ &vr[vr_offset], ldvr, &ierr);
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ temp = 0.f;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ i__3 = jr + jc * vr_dim1;
+ r__3 = temp, r__4 = (r__1 = vr[i__3].r, dabs(r__1)) + (
+ r__2 = r_imag(&vr[jr + jc * vr_dim1]), dabs(r__2))
+ ;
+ temp = dmax(r__3,r__4);
+/* L40: */
+ }
+ if (temp < smlnum) {
+ goto L60;
+ }
+ temp = 1.f / temp;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ i__3 = jr + jc * vr_dim1;
+ i__4 = jr + jc * vr_dim1;
+ q__1.r = temp * vr[i__4].r, q__1.i = temp * vr[i__4].i;
+ vr[i__3].r = q__1.r, vr[i__3].i = q__1.i;
+/* L50: */
+ }
+L60:
+ ;
+ }
+ }
+ }
+
+/* Undo scaling if necessary */
+
+ if (ilascl) {
+ clascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alpha[1], n, &
+ ierr);
+ }
+
+ if (ilbscl) {
+ clascl_("G", &c__0, &c__0, &bnrmto, &bnrm, n, &c__1, &beta[1], n, &
+ ierr);
+ }
+
+L70:
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CGGEV */
+
+} /* cggev_ */
diff --git a/contrib/libs/clapack/cggevx.c b/contrib/libs/clapack/cggevx.c
new file mode 100644
index 0000000000..5c1a1b3e13
--- /dev/null
+++ b/contrib/libs/clapack/cggevx.c
@@ -0,0 +1,802 @@
+/* cggevx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c__0 = 0;
+
+/* Subroutine */ int cggevx_(char *balanc, char *jobvl, char *jobvr, char *
+ sense, integer *n, complex *a, integer *lda, complex *b, integer *ldb,
+ complex *alpha, complex *beta, complex *vl, integer *ldvl, complex *
+ vr, integer *ldvr, integer *ilo, integer *ihi, real *lscale, real *
+ rscale, real *abnrm, real *bbnrm, real *rconde, real *rcondv, complex
+ *work, integer *lwork, real *rwork, integer *iwork, logical *bwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vl_dim1, vl_offset, vr_dim1,
+ vr_offset, i__1, i__2, i__3, i__4;
+ real r__1, r__2, r__3, r__4;
+ complex q__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_imag(complex *);
+
+ /* Local variables */
+ integer i__, j, m, jc, in, jr;
+ real eps;
+ logical ilv;
+ real anrm, bnrm;
+ integer ierr, itau;
+ real temp;
+ logical ilvl, ilvr;
+ integer iwrk, iwrk1;
+ extern logical lsame_(char *, char *);
+ integer icols;
+ logical noscl;
+ integer irows;
+ extern /* Subroutine */ int cggbak_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, complex *, integer *,
+ integer *), cggbal_(char *, integer *, complex *,
+ integer *, complex *, integer *, integer *, integer *, real *,
+ real *, real *, integer *), slabad_(real *, real *);
+ extern doublereal clange_(char *, integer *, integer *, complex *,
+ integer *, real *);
+ extern /* Subroutine */ int cgghrd_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *, complex *, integer *, integer *),
+ clascl_(char *, integer *, integer *, real *, real *, integer *,
+ integer *, complex *, integer *, integer *);
+ logical ilascl, ilbscl;
+ extern /* Subroutine */ int cgeqrf_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, integer *), clacpy_(
+ char *, integer *, integer *, complex *, integer *, complex *,
+ integer *), claset_(char *, integer *, integer *, complex
+ *, complex *, complex *, integer *), ctgevc_(char *, char
+ *, logical *, integer *, complex *, integer *, complex *, integer
+ *, complex *, integer *, complex *, integer *, integer *, integer
+ *, complex *, real *, integer *);
+ logical ldumma[1];
+ char chtemp[1];
+ real bignum;
+ extern /* Subroutine */ int chgeqz_(char *, char *, char *, integer *,
+ integer *, integer *, complex *, integer *, complex *, integer *,
+ complex *, complex *, complex *, integer *, complex *, integer *,
+ complex *, integer *, real *, integer *),
+ ctgsna_(char *, char *, logical *, integer *, complex *, integer *
+, complex *, integer *, complex *, integer *, complex *, integer *
+, real *, real *, integer *, integer *, complex *, integer *,
+ integer *, integer *);
+ integer ijobvl;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern doublereal slamch_(char *);
+ integer ijobvr;
+ logical wantsb;
+ extern /* Subroutine */ int cungqr_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, integer *);
+ real anrmto;
+ logical wantse;
+ real bnrmto;
+ extern /* Subroutine */ int cunmqr_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *);
+ integer minwrk, maxwrk;
+ logical wantsn;
+ real smlnum;
+ logical lquery, wantsv;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGGEVX computes for a pair of N-by-N complex nonsymmetric matrices */
+/* (A,B) the generalized eigenvalues, and optionally, the left and/or */
+/* right generalized eigenvectors. */
+
+/* Optionally, it also computes a balancing transformation to improve */
+/* the conditioning of the eigenvalues and eigenvectors (ILO, IHI, */
+/* LSCALE, RSCALE, ABNRM, and BBNRM), reciprocal condition numbers for */
+/* the eigenvalues (RCONDE), and reciprocal condition numbers for the */
+/* right eigenvectors (RCONDV). */
+
+/* A generalized eigenvalue for a pair of matrices (A,B) is a scalar */
+/* lambda or a ratio alpha/beta = lambda, such that A - lambda*B is */
+/* singular. It is usually represented as the pair (alpha,beta), as */
+/* there is a reasonable interpretation for beta=0, and even for both */
+/* being zero. */
+
+/* The right eigenvector v(j) corresponding to the eigenvalue lambda(j) */
+/* of (A,B) satisfies */
+/* A * v(j) = lambda(j) * B * v(j) . */
+/* The left eigenvector u(j) corresponding to the eigenvalue lambda(j) */
+/* of (A,B) satisfies */
+/* u(j)**H * A = lambda(j) * u(j)**H * B. */
+/* where u(j)**H is the conjugate-transpose of u(j). */
+
+
+/* Arguments */
+/* ========= */
+
+/* BALANC (input) CHARACTER*1 */
+/* Specifies the balance option to be performed: */
+/* = 'N': do not diagonally scale or permute; */
+/* = 'P': permute only; */
+/* = 'S': scale only; */
+/* = 'B': both permute and scale. */
+/* Computed reciprocal condition numbers will be for the */
+/* matrices after permuting and/or balancing. Permuting does */
+/* not change condition numbers (in exact arithmetic), but */
+/* balancing does. */
+
+/* JOBVL (input) CHARACTER*1 */
+/* = 'N': do not compute the left generalized eigenvectors; */
+/* = 'V': compute the left generalized eigenvectors. */
+
+/* JOBVR (input) CHARACTER*1 */
+/* = 'N': do not compute the right generalized eigenvectors; */
+/* = 'V': compute the right generalized eigenvectors. */
+
+/* SENSE (input) CHARACTER*1 */
+/* Determines which reciprocal condition numbers are computed. */
+/* = 'N': none are computed; */
+/* = 'E': computed for eigenvalues only; */
+/* = 'V': computed for eigenvectors only; */
+/* = 'B': computed for eigenvalues and eigenvectors. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A, B, VL, and VR. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA, N) */
+/* On entry, the matrix A in the pair (A,B). */
+/* On exit, A has been overwritten. If JOBVL='V' or JOBVR='V' */
+/* or both, then A contains the first part of the complex Schur */
+/* form of the "balanced" versions of the input A and B. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX array, dimension (LDB, N) */
+/* On entry, the matrix B in the pair (A,B). */
+/* On exit, B has been overwritten. If JOBVL='V' or JOBVR='V' */
+/* or both, then B contains the second part of the complex */
+/* Schur form of the "balanced" versions of the input A and B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB >= max(1,N). */
+
+/* ALPHA (output) COMPLEX array, dimension (N) */
+/* BETA (output) COMPLEX array, dimension (N) */
+/* On exit, ALPHA(j)/BETA(j), j=1,...,N, will be the generalized */
+/* eigenvalues. */
+
+/* Note: the quotient ALPHA(j)/BETA(j) ) may easily over- or */
+/* underflow, and BETA(j) may even be zero. Thus, the user */
+/* should avoid naively computing the ratio ALPHA/BETA. */
+/* However, ALPHA will be always less than and usually */
+/* comparable with norm(A) in magnitude, and BETA always less */
+/* than and usually comparable with norm(B). */
+
+/* VL (output) COMPLEX array, dimension (LDVL,N) */
+/* If JOBVL = 'V', the left generalized eigenvectors u(j) are */
+/* stored one after another in the columns of VL, in the same */
+/* order as their eigenvalues. */
+/* Each eigenvector will be scaled so the largest component */
+/* will have abs(real part) + abs(imag. part) = 1. */
+/* Not referenced if JOBVL = 'N'. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the matrix VL. LDVL >= 1, and */
+/* if JOBVL = 'V', LDVL >= N. */
+
+/* VR (output) COMPLEX array, dimension (LDVR,N) */
+/* If JOBVR = 'V', the right generalized eigenvectors v(j) are */
+/* stored one after another in the columns of VR, in the same */
+/* order as their eigenvalues. */
+/* Each eigenvector will be scaled so the largest component */
+/* will have abs(real part) + abs(imag. part) = 1. */
+/* Not referenced if JOBVR = 'N'. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the matrix VR. LDVR >= 1, and */
+/* if JOBVR = 'V', LDVR >= N. */
+
+/* ILO (output) INTEGER */
+/* IHI (output) INTEGER */
+/* ILO and IHI are integer values such that on exit */
+/* A(i,j) = 0 and B(i,j) = 0 if i > j and */
+/* j = 1,...,ILO-1 or i = IHI+1,...,N. */
+/* If BALANC = 'N' or 'S', ILO = 1 and IHI = N. */
+
+/* LSCALE (output) REAL array, dimension (N) */
+/* Details of the permutations and scaling factors applied */
+/* to the left side of A and B. If PL(j) is the index of the */
+/* row interchanged with row j, and DL(j) is the scaling */
+/* factor applied to row j, then */
+/* LSCALE(j) = PL(j) for j = 1,...,ILO-1 */
+/* = DL(j) for j = ILO,...,IHI */
+/* = PL(j) for j = IHI+1,...,N. */
+/* The order in which the interchanges are made is N to IHI+1, */
+/* then 1 to ILO-1. */
+
+/* RSCALE (output) REAL array, dimension (N) */
+/* Details of the permutations and scaling factors applied */
+/* to the right side of A and B. If PR(j) is the index of the */
+/* column interchanged with column j, and DR(j) is the scaling */
+/* factor applied to column j, then */
+/* RSCALE(j) = PR(j) for j = 1,...,ILO-1 */
+/* = DR(j) for j = ILO,...,IHI */
+/* = PR(j) for j = IHI+1,...,N */
+/* The order in which the interchanges are made is N to IHI+1, */
+/* then 1 to ILO-1. */
+
+/* ABNRM (output) REAL */
+/* The one-norm of the balanced matrix A. */
+
+/* BBNRM (output) REAL */
+/* The one-norm of the balanced matrix B. */
+
+/* RCONDE (output) REAL array, dimension (N) */
+/* If SENSE = 'E' or 'B', the reciprocal condition numbers of */
+/* the eigenvalues, stored in consecutive elements of the array. */
+/* If SENSE = 'N' or 'V', RCONDE is not referenced. */
+
+/* RCONDV (output) REAL array, dimension (N) */
+/* If SENSE = 'V' or 'B', the estimated reciprocal condition */
+/* numbers of the eigenvectors, stored in consecutive elements */
+/* of the array. If the eigenvalues cannot be reordered to */
+/* compute RCONDV(j), RCONDV(j) is set to 0; this can only occur */
+/* when the true value would be very small anyway. */
+/* If SENSE = 'N' or 'E', RCONDV is not referenced. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,2*N). */
+/* If SENSE = 'E', LWORK >= max(1,4*N). */
+/* If SENSE = 'V' or 'B', LWORK >= max(1,2*N*N+2*N). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) REAL array, dimension (lrwork) */
+/* lrwork must be at least max(1,6*N) if BALANC = 'S' or 'B', */
+/* and at least max(1,2*N) otherwise. */
+/* Real workspace. */
+
+/* IWORK (workspace) INTEGER array, dimension (N+2) */
+/* If SENSE = 'E', IWORK is not referenced. */
+
+/* BWORK (workspace) LOGICAL array, dimension (N) */
+/* If SENSE = 'N', BWORK is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1,...,N: */
+/* The QZ iteration failed. No eigenvectors have been */
+/* calculated, but ALPHA(j) and BETA(j) should be correct */
+/* for j=INFO+1,...,N. */
+/* > N: =N+1: other than QZ iteration failed in CHGEQZ. */
+/* =N+2: error return from CTGEVC. */
+
+/* Further Details */
+/* =============== */
+
+/* Balancing a matrix pair (A,B) includes, first, permuting rows and */
+/* columns to isolate eigenvalues, second, applying diagonal similarity */
+/* transformation to the rows and columns to make the rows and columns */
+/* as close in norm as possible. The computed reciprocal condition */
+/* numbers correspond to the balanced matrix. Permuting rows and columns */
+/* will not change the condition numbers (in exact arithmetic) but */
+/* diagonal scaling will. For further explanation of balancing, see */
+/* section 4.11.1.2 of LAPACK Users' Guide. */
+
+/* An approximate error bound on the chordal distance between the i-th */
+/* computed generalized eigenvalue w and the corresponding exact */
+/* eigenvalue lambda is */
+
+/* chord(w, lambda) <= EPS * norm(ABNRM, BBNRM) / RCONDE(I) */
+
+/* An approximate error bound for the angle between the i-th computed */
+/* eigenvector VL(i) or VR(i) is given by */
+
+/* EPS * norm(ABNRM, BBNRM) / DIF(i). */
+
+/* For further explanation of the reciprocal condition numbers RCONDE */
+/* and RCONDV, see section 4.11 of LAPACK User's Guide. */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alpha;
+ --beta;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --lscale;
+ --rscale;
+ --rconde;
+ --rcondv;
+ --work;
+ --rwork;
+ --iwork;
+ --bwork;
+
+ /* Function Body */
+ if (lsame_(jobvl, "N")) {
+ ijobvl = 1;
+ ilvl = FALSE_;
+ } else if (lsame_(jobvl, "V")) {
+ ijobvl = 2;
+ ilvl = TRUE_;
+ } else {
+ ijobvl = -1;
+ ilvl = FALSE_;
+ }
+
+ if (lsame_(jobvr, "N")) {
+ ijobvr = 1;
+ ilvr = FALSE_;
+ } else if (lsame_(jobvr, "V")) {
+ ijobvr = 2;
+ ilvr = TRUE_;
+ } else {
+ ijobvr = -1;
+ ilvr = FALSE_;
+ }
+ ilv = ilvl || ilvr;
+
+ noscl = lsame_(balanc, "N") || lsame_(balanc, "P");
+ wantsn = lsame_(sense, "N");
+ wantse = lsame_(sense, "E");
+ wantsv = lsame_(sense, "V");
+ wantsb = lsame_(sense, "B");
+
+/* Test the input arguments */
+
+ *info = 0;
+ lquery = *lwork == -1;
+ if (! (noscl || lsame_(balanc, "S") || lsame_(
+ balanc, "B"))) {
+ *info = -1;
+ } else if (ijobvl <= 0) {
+ *info = -2;
+ } else if (ijobvr <= 0) {
+ *info = -3;
+ } else if (! (wantsn || wantse || wantsb || wantsv)) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldvl < 1 || ilvl && *ldvl < *n) {
+ *info = -13;
+ } else if (*ldvr < 1 || ilvr && *ldvr < *n) {
+ *info = -15;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV. The workspace is */
+/* computed assuming ILO = 1 and IHI = N, the worst case.) */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ } else {
+ minwrk = *n << 1;
+ if (wantse) {
+ minwrk = *n << 2;
+ } else if (wantsv || wantsb) {
+ minwrk = (*n << 1) * (*n + 1);
+ }
+ maxwrk = minwrk;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "CGEQRF", " ", n, &
+ c__1, n, &c__0);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "CUNMQR", " ", n, &
+ c__1, n, &c__0);
+ maxwrk = max(i__1,i__2);
+ if (ilvl) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "CUNGQR",
+ " ", n, &c__1, n, &c__0);
+ maxwrk = max(i__1,i__2);
+ }
+ }
+ work[1].r = (real) maxwrk, work[1].i = 0.f;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -25;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGGEVX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = clange_("M", n, n, &a[a_offset], lda, &rwork[1]);
+ ilascl = FALSE_;
+ if (anrm > 0.f && anrm < smlnum) {
+ anrmto = smlnum;
+ ilascl = TRUE_;
+ } else if (anrm > bignum) {
+ anrmto = bignum;
+ ilascl = TRUE_;
+ }
+ if (ilascl) {
+ clascl_("G", &c__0, &c__0, &anrm, &anrmto, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Scale B if max element outside range [SMLNUM,BIGNUM] */
+
+ bnrm = clange_("M", n, n, &b[b_offset], ldb, &rwork[1]);
+ ilbscl = FALSE_;
+ if (bnrm > 0.f && bnrm < smlnum) {
+ bnrmto = smlnum;
+ ilbscl = TRUE_;
+ } else if (bnrm > bignum) {
+ bnrmto = bignum;
+ ilbscl = TRUE_;
+ }
+ if (ilbscl) {
+ clascl_("G", &c__0, &c__0, &bnrm, &bnrmto, n, n, &b[b_offset], ldb, &
+ ierr);
+ }
+
+/* Permute and/or balance the matrix pair (A,B) */
+/* (Real Workspace: need 6*N if BALANC = 'S' or 'B', 1 otherwise) */
+
+ cggbal_(balanc, n, &a[a_offset], lda, &b[b_offset], ldb, ilo, ihi, &
+ lscale[1], &rscale[1], &rwork[1], &ierr);
+
+/* Compute ABNRM and BBNRM */
+
+ *abnrm = clange_("1", n, n, &a[a_offset], lda, &rwork[1]);
+ if (ilascl) {
+ rwork[1] = *abnrm;
+ slascl_("G", &c__0, &c__0, &anrmto, &anrm, &c__1, &c__1, &rwork[1], &
+ c__1, &ierr);
+ *abnrm = rwork[1];
+ }
+
+ *bbnrm = clange_("1", n, n, &b[b_offset], ldb, &rwork[1]);
+ if (ilbscl) {
+ rwork[1] = *bbnrm;
+ slascl_("G", &c__0, &c__0, &bnrmto, &bnrm, &c__1, &c__1, &rwork[1], &
+ c__1, &ierr);
+ *bbnrm = rwork[1];
+ }
+
+/* Reduce B to triangular form (QR decomposition of B) */
+/* (Complex Workspace: need N, prefer N*NB ) */
+
+ irows = *ihi + 1 - *ilo;
+ if (ilv || ! wantsn) {
+ icols = *n + 1 - *ilo;
+ } else {
+ icols = irows;
+ }
+ itau = 1;
+ iwrk = itau + irows;
+ i__1 = *lwork + 1 - iwrk;
+ cgeqrf_(&irows, &icols, &b[*ilo + *ilo * b_dim1], ldb, &work[itau], &work[
+ iwrk], &i__1, &ierr);
+
+/* Apply the unitary transformation to A */
+/* (Complex Workspace: need N, prefer N*NB) */
+
+ i__1 = *lwork + 1 - iwrk;
+ cunmqr_("L", "C", &irows, &icols, &irows, &b[*ilo + *ilo * b_dim1], ldb, &
+ work[itau], &a[*ilo + *ilo * a_dim1], lda, &work[iwrk], &i__1, &
+ ierr);
+
+/* Initialize VL and/or VR */
+/* (Workspace: need N, prefer N*NB) */
+
+ if (ilvl) {
+ claset_("Full", n, n, &c_b1, &c_b2, &vl[vl_offset], ldvl);
+ if (irows > 1) {
+ i__1 = irows - 1;
+ i__2 = irows - 1;
+ clacpy_("L", &i__1, &i__2, &b[*ilo + 1 + *ilo * b_dim1], ldb, &vl[
+ *ilo + 1 + *ilo * vl_dim1], ldvl);
+ }
+ i__1 = *lwork + 1 - iwrk;
+ cungqr_(&irows, &irows, &irows, &vl[*ilo + *ilo * vl_dim1], ldvl, &
+ work[itau], &work[iwrk], &i__1, &ierr);
+ }
+
+ if (ilvr) {
+ claset_("Full", n, n, &c_b1, &c_b2, &vr[vr_offset], ldvr);
+ }
+
+/* Reduce to generalized Hessenberg form */
+/* (Workspace: none needed) */
+
+ if (ilv || ! wantsn) {
+
+/* Eigenvectors requested -- work on whole matrix. */
+
+ cgghrd_(jobvl, jobvr, n, ilo, ihi, &a[a_offset], lda, &b[b_offset],
+ ldb, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, &ierr);
+ } else {
+ cgghrd_("N", "N", &irows, &c__1, &irows, &a[*ilo + *ilo * a_dim1],
+ lda, &b[*ilo + *ilo * b_dim1], ldb, &vl[vl_offset], ldvl, &vr[
+ vr_offset], ldvr, &ierr);
+ }
+
+/* Perform QZ algorithm (Compute eigenvalues, and optionally, the */
+/* Schur forms and Schur vectors) */
+/* (Complex Workspace: need N) */
+/* (Real Workspace: need N) */
+
+ iwrk = itau;
+ if (ilv || ! wantsn) {
+ *(unsigned char *)chtemp = 'S';
+ } else {
+ *(unsigned char *)chtemp = 'E';
+ }
+
+ i__1 = *lwork + 1 - iwrk;
+ chgeqz_(chtemp, jobvl, jobvr, n, ilo, ihi, &a[a_offset], lda, &b[b_offset]
+, ldb, &alpha[1], &beta[1], &vl[vl_offset], ldvl, &vr[vr_offset],
+ ldvr, &work[iwrk], &i__1, &rwork[1], &ierr);
+ if (ierr != 0) {
+ if (ierr > 0 && ierr <= *n) {
+ *info = ierr;
+ } else if (ierr > *n && ierr <= *n << 1) {
+ *info = ierr - *n;
+ } else {
+ *info = *n + 1;
+ }
+ goto L90;
+ }
+
+/* Compute Eigenvectors and estimate condition numbers if desired */
+/* CTGEVC: (Complex Workspace: need 2*N ) */
+/* (Real Workspace: need 2*N ) */
+/* CTGSNA: (Complex Workspace: need 2*N*N if SENSE='V' or 'B') */
+/* (Integer Workspace: need N+2 ) */
+
+ if (ilv || ! wantsn) {
+ if (ilv) {
+ if (ilvl) {
+ if (ilvr) {
+ *(unsigned char *)chtemp = 'B';
+ } else {
+ *(unsigned char *)chtemp = 'L';
+ }
+ } else {
+ *(unsigned char *)chtemp = 'R';
+ }
+
+ ctgevc_(chtemp, "B", ldumma, n, &a[a_offset], lda, &b[b_offset],
+ ldb, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, n, &in, &
+ work[iwrk], &rwork[1], &ierr);
+ if (ierr != 0) {
+ *info = *n + 2;
+ goto L90;
+ }
+ }
+
+ if (! wantsn) {
+
+/* compute eigenvectors (STGEVC) and estimate condition */
+/* numbers (STGSNA). Note that the definition of the condition */
+/* number is not invariant under transformation (u,v) to */
+/* (Q*u, Z*v), where (u,v) are eigenvectors of the generalized */
+/* Schur form (S,T), Q and Z are orthogonal matrices. In order */
+/* to avoid using extra 2*N*N workspace, we have to */
+/* re-calculate eigenvectors and estimate the condition numbers */
+/* one at a time. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ bwork[j] = FALSE_;
+/* L10: */
+ }
+ bwork[i__] = TRUE_;
+
+ iwrk = *n + 1;
+ iwrk1 = iwrk + *n;
+
+ if (wantse || wantsb) {
+ ctgevc_("B", "S", &bwork[1], n, &a[a_offset], lda, &b[
+ b_offset], ldb, &work[1], n, &work[iwrk], n, &
+ c__1, &m, &work[iwrk1], &rwork[1], &ierr);
+ if (ierr != 0) {
+ *info = *n + 2;
+ goto L90;
+ }
+ }
+
+ i__2 = *lwork - iwrk1 + 1;
+ ctgsna_(sense, "S", &bwork[1], n, &a[a_offset], lda, &b[
+ b_offset], ldb, &work[1], n, &work[iwrk], n, &rconde[
+ i__], &rcondv[i__], &c__1, &m, &work[iwrk1], &i__2, &
+ iwork[1], &ierr);
+
+/* L20: */
+ }
+ }
+ }
+
+/* Undo balancing on VL and VR and normalization */
+/* (Workspace: none needed) */
+
+ if (ilvl) {
+ cggbak_(balanc, "L", n, ilo, ihi, &lscale[1], &rscale[1], n, &vl[
+ vl_offset], ldvl, &ierr);
+
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ temp = 0.f;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ i__3 = jr + jc * vl_dim1;
+ r__3 = temp, r__4 = (r__1 = vl[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&vl[jr + jc * vl_dim1]), dabs(r__2));
+ temp = dmax(r__3,r__4);
+/* L30: */
+ }
+ if (temp < smlnum) {
+ goto L50;
+ }
+ temp = 1.f / temp;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ i__3 = jr + jc * vl_dim1;
+ i__4 = jr + jc * vl_dim1;
+ q__1.r = temp * vl[i__4].r, q__1.i = temp * vl[i__4].i;
+ vl[i__3].r = q__1.r, vl[i__3].i = q__1.i;
+/* L40: */
+ }
+L50:
+ ;
+ }
+ }
+
+ if (ilvr) {
+ cggbak_(balanc, "R", n, ilo, ihi, &lscale[1], &rscale[1], n, &vr[
+ vr_offset], ldvr, &ierr);
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ temp = 0.f;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ i__3 = jr + jc * vr_dim1;
+ r__3 = temp, r__4 = (r__1 = vr[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&vr[jr + jc * vr_dim1]), dabs(r__2));
+ temp = dmax(r__3,r__4);
+/* L60: */
+ }
+ if (temp < smlnum) {
+ goto L80;
+ }
+ temp = 1.f / temp;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ i__3 = jr + jc * vr_dim1;
+ i__4 = jr + jc * vr_dim1;
+ q__1.r = temp * vr[i__4].r, q__1.i = temp * vr[i__4].i;
+ vr[i__3].r = q__1.r, vr[i__3].i = q__1.i;
+/* L70: */
+ }
+L80:
+ ;
+ }
+ }
+
+/* Undo scaling if necessary */
+
+ if (ilascl) {
+ clascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alpha[1], n, &
+ ierr);
+ }
+
+ if (ilbscl) {
+ clascl_("G", &c__0, &c__0, &bnrmto, &bnrm, n, &c__1, &beta[1], n, &
+ ierr);
+ }
+
+L90:
+ work[1].r = (real) maxwrk, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CGGEVX */
+
+} /* cggevx_ */
diff --git a/contrib/libs/clapack/cggglm.c b/contrib/libs/clapack/cggglm.c
new file mode 100644
index 0000000000..a8ec88d5cc
--- /dev/null
+++ b/contrib/libs/clapack/cggglm.c
@@ -0,0 +1,334 @@
+/* cggglm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int cggglm_(integer *n, integer *m, integer *p, complex *a,
+ integer *lda, complex *b, integer *ldb, complex *d__, complex *x,
+ complex *y, complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, nb, np, nb1, nb2, nb3, nb4, lopt;
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+, complex *, integer *, complex *, integer *, complex *, complex *
+, integer *), ccopy_(integer *, complex *, integer *,
+ complex *, integer *), cggqrf_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, complex *,
+ complex *, integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer lwkmin;
+ extern /* Subroutine */ int cunmqr_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *), cunmrq_(char *,
+ char *, integer *, integer *, integer *, complex *, integer *,
+ complex *, complex *, integer *, complex *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int ctrtrs_(char *, char *, char *, integer *,
+ integer *, complex *, integer *, complex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGGGLM solves a general Gauss-Markov linear model (GLM) problem: */
+
+/* minimize || y ||_2 subject to d = A*x + B*y */
+/* x */
+
+/* where A is an N-by-M matrix, B is an N-by-P matrix, and d is a */
+/* given N-vector. It is assumed that M <= N <= M+P, and */
+
+/* rank(A) = M and rank( A B ) = N. */
+
+/* Under these assumptions, the constrained equation is always */
+/* consistent, and there is a unique solution x and a minimal 2-norm */
+/* solution y, which is obtained using a generalized QR factorization */
+/* of the matrices (A, B) given by */
+
+/* A = Q*(R), B = Q*T*Z. */
+/* (0) */
+
+/* In particular, if matrix B is square nonsingular, then the problem */
+/* GLM is equivalent to the following weighted linear least squares */
+/* problem */
+
+/* minimize || inv(B)*(d-A*x) ||_2 */
+/* x */
+
+/* where inv(B) denotes the inverse of B. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of rows of the matrices A and B. N >= 0. */
+
+/* M (input) INTEGER */
+/* The number of columns of the matrix A. 0 <= M <= N. */
+
+/* P (input) INTEGER */
+/* The number of columns of the matrix B. P >= N-M. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,M) */
+/* On entry, the N-by-M matrix A. */
+/* On exit, the upper triangular part of the array A contains */
+/* the M-by-M upper triangular matrix R. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX array, dimension (LDB,P) */
+/* On entry, the N-by-P matrix B. */
+/* On exit, if N <= P, the upper triangle of the subarray */
+/* B(1:N,P-N+1:P) contains the N-by-N upper triangular matrix T; */
+/* if N > P, the elements on and above the (N-P)th subdiagonal */
+/* contain the N-by-P upper trapezoidal matrix T. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* D (input/output) COMPLEX array, dimension (N) */
+/* On entry, D is the left hand side of the GLM equation. */
+/* On exit, D is destroyed. */
+
+/* X (output) COMPLEX array, dimension (M) */
+/* Y (output) COMPLEX array, dimension (P) */
+/* On exit, X and Y are the solutions of the GLM problem. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N+M+P). */
+/* For optimum performance, LWORK >= M+min(N,P)+max(N,P)*NB, */
+/* where NB is an upper bound for the optimal blocksizes for */
+/* CGEQRF, CGERQF, CUNMQR and CUNMRQ. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1: the upper triangular factor R associated with A in the */
+/* generalized QR factorization of the pair (A, B) is */
+/* singular, so that rank(A) < M; the least squares */
+/* solution could not be computed. */
+/* = 2: the bottom (N-M) by (N-M) part of the upper trapezoidal */
+/* factor T associated with B in the generalized QR */
+/* factorization of the pair (A, B) is singular, so that */
+/* rank( A B ) < N; the least squares solution could not */
+/* be computed. */
+
+/* =================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --d__;
+ --x;
+ --y;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ np = min(*n,*p);
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*m < 0 || *m > *n) {
+ *info = -2;
+ } else if (*p < 0 || *p < *n - *m) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+
+/* Calculate workspace */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ lwkmin = 1;
+ lwkopt = 1;
+ } else {
+ nb1 = ilaenv_(&c__1, "CGEQRF", " ", n, m, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "CGERQF", " ", n, m, &c_n1, &c_n1);
+ nb3 = ilaenv_(&c__1, "CUNMQR", " ", n, m, p, &c_n1);
+ nb4 = ilaenv_(&c__1, "CUNMRQ", " ", n, m, p, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2), i__1 = max(i__1,nb3);
+ nb = max(i__1,nb4);
+ lwkmin = *m + *n + *p;
+ lwkopt = *m + np + max(*n,*p) * nb;
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ if (*lwork < lwkmin && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGGGLM", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Compute the GQR factorization of matrices A and B: */
+
+/* Q'*A = ( R11 ) M, Q'*B*Z' = ( T11 T12 ) M */
+/* ( 0 ) N-M ( 0 T22 ) N-M */
+/* M M+P-N N-M */
+
+/* where R11 and T22 are upper triangular, and Q and Z are */
+/* unitary. */
+
+ i__1 = *lwork - *m - np;
+ cggqrf_(n, m, p, &a[a_offset], lda, &work[1], &b[b_offset], ldb, &work[*m
+ + 1], &work[*m + np + 1], &i__1, info);
+ i__1 = *m + np + 1;
+ lopt = work[i__1].r;
+
+/* Update left-hand-side vector d = Q'*d = ( d1 ) M */
+/* ( d2 ) N-M */
+
+ i__1 = max(1,*n);
+ i__2 = *lwork - *m - np;
+ cunmqr_("Left", "Conjugate transpose", n, &c__1, m, &a[a_offset], lda, &
+ work[1], &d__[1], &i__1, &work[*m + np + 1], &i__2, info);
+/* Computing MAX */
+ i__3 = *m + np + 1;
+ i__1 = lopt, i__2 = (integer) work[i__3].r;
+ lopt = max(i__1,i__2);
+
+/* Solve T22*y2 = d2 for y2 */
+
+ if (*n > *m) {
+ i__1 = *n - *m;
+ i__2 = *n - *m;
+ ctrtrs_("Upper", "No transpose", "Non unit", &i__1, &c__1, &b[*m + 1
+ + (*m + *p - *n + 1) * b_dim1], ldb, &d__[*m + 1], &i__2,
+ info);
+
+ if (*info > 0) {
+ *info = 1;
+ return 0;
+ }
+
+ i__1 = *n - *m;
+ ccopy_(&i__1, &d__[*m + 1], &c__1, &y[*m + *p - *n + 1], &c__1);
+ }
+
+/* Set y1 = 0 */
+
+ i__1 = *m + *p - *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ y[i__2].r = 0.f, y[i__2].i = 0.f;
+/* L10: */
+ }
+
+/* Update d1 = d1 - T12*y2 */
+
+ i__1 = *n - *m;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", m, &i__1, &q__1, &b[(*m + *p - *n + 1) * b_dim1 +
+ 1], ldb, &y[*m + *p - *n + 1], &c__1, &c_b2, &d__[1], &c__1);
+
+/* Solve triangular system: R11*x = d1 */
+
+ if (*m > 0) {
+ ctrtrs_("Upper", "No Transpose", "Non unit", m, &c__1, &a[a_offset],
+ lda, &d__[1], m, info);
+
+ if (*info > 0) {
+ *info = 2;
+ return 0;
+ }
+
+/* Copy D to X */
+
+ ccopy_(m, &d__[1], &c__1, &x[1], &c__1);
+ }
+
+/* Backward transformation y = Z'*y */
+
+/* Computing MAX */
+ i__1 = 1, i__2 = *n - *p + 1;
+ i__3 = max(1,*p);
+ i__4 = *lwork - *m - np;
+ cunmrq_("Left", "Conjugate transpose", p, &c__1, &np, &b[max(i__1, i__2)+
+ b_dim1], ldb, &work[*m + 1], &y[1], &i__3, &work[*m + np + 1], &
+ i__4, info);
+/* Computing MAX */
+ i__4 = *m + np + 1;
+ i__2 = lopt, i__3 = (integer) work[i__4].r;
+ i__1 = *m + np + max(i__2,i__3);
+ work[1].r = (real) i__1, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CGGGLM */
+
+} /* cggglm_ */
diff --git a/contrib/libs/clapack/cgghrd.c b/contrib/libs/clapack/cgghrd.c
new file mode 100644
index 0000000000..18bc1207e2
--- /dev/null
+++ b/contrib/libs/clapack/cgghrd.c
@@ -0,0 +1,336 @@
+/* cgghrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static complex c_b2 = {0.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int cgghrd_(char *compq, char *compz, integer *n, integer *
+ ilo, integer *ihi, complex *a, integer *lda, complex *b, integer *ldb,
+ complex *q, integer *ldq, complex *z__, integer *ldz, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1,
+ z_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ real c__;
+ complex s;
+ logical ilq, ilz;
+ integer jcol;
+ extern /* Subroutine */ int crot_(integer *, complex *, integer *,
+ complex *, integer *, real *, complex *);
+ integer jrow;
+ extern logical lsame_(char *, char *);
+ complex ctemp;
+ extern /* Subroutine */ int claset_(char *, integer *, integer *, complex
+ *, complex *, complex *, integer *), clartg_(complex *,
+ complex *, real *, complex *, complex *), xerbla_(char *, integer
+ *);
+ integer icompq, icompz;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGGHRD reduces a pair of complex matrices (A,B) to generalized upper */
+/* Hessenberg form using unitary transformations, where A is a */
+/* general matrix and B is upper triangular. The form of the generalized */
+/* eigenvalue problem is */
+/* A*x = lambda*B*x, */
+/* and B is typically made upper triangular by computing its QR */
+/* factorization and moving the unitary matrix Q to the left side */
+/* of the equation. */
+
+/* This subroutine simultaneously reduces A to a Hessenberg matrix H: */
+/* Q**H*A*Z = H */
+/* and transforms B to another upper triangular matrix T: */
+/* Q**H*B*Z = T */
+/* in order to reduce the problem to its standard form */
+/* H*y = lambda*T*y */
+/* where y = Z**H*x. */
+
+/* The unitary matrices Q and Z are determined as products of Givens */
+/* rotations. They may either be formed explicitly, or they may be */
+/* postmultiplied into input matrices Q1 and Z1, so that */
+/* Q1 * A * Z1**H = (Q1*Q) * H * (Z1*Z)**H */
+/* Q1 * B * Z1**H = (Q1*Q) * T * (Z1*Z)**H */
+/* If Q1 is the unitary matrix from the QR factorization of B in the */
+/* original equation A*x = lambda*B*x, then CGGHRD reduces the original */
+/* problem to generalized Hessenberg form. */
+
+/* Arguments */
+/* ========= */
+
+/* COMPQ (input) CHARACTER*1 */
+/* = 'N': do not compute Q; */
+/* = 'I': Q is initialized to the unit matrix, and the */
+/* unitary matrix Q is returned; */
+/* = 'V': Q must contain a unitary matrix Q1 on entry, */
+/* and the product Q1*Q is returned. */
+
+/* COMPZ (input) CHARACTER*1 */
+/* = 'N': do not compute Q; */
+/* = 'I': Q is initialized to the unit matrix, and the */
+/* unitary matrix Q is returned; */
+/* = 'V': Q must contain a unitary matrix Q1 on entry, */
+/* and the product Q1*Q is returned. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* ILO and IHI mark the rows and columns of A which are to be */
+/* reduced. It is assumed that A is already upper triangular */
+/* in rows and columns 1:ILO-1 and IHI+1:N. ILO and IHI are */
+/* normally set by a previous call to CGGBAL; otherwise they */
+/* should be set to 1 and N respectively. */
+/* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA, N) */
+/* On entry, the N-by-N general matrix to be reduced. */
+/* On exit, the upper triangle and the first subdiagonal of A */
+/* are overwritten with the upper Hessenberg matrix H, and the */
+/* rest is set to zero. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX array, dimension (LDB, N) */
+/* On entry, the N-by-N upper triangular matrix B. */
+/* On exit, the upper triangular matrix T = Q**H B Z. The */
+/* elements below the diagonal are set to zero. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* Q (input/output) COMPLEX array, dimension (LDQ, N) */
+/* On entry, if COMPQ = 'V', the unitary matrix Q1, typically */
+/* from the QR factorization of B. */
+/* On exit, if COMPQ='I', the unitary matrix Q, and if */
+/* COMPQ = 'V', the product Q1*Q. */
+/* Not referenced if COMPQ='N'. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. */
+/* LDQ >= N if COMPQ='V' or 'I'; LDQ >= 1 otherwise. */
+
+/* Z (input/output) COMPLEX array, dimension (LDZ, N) */
+/* On entry, if COMPZ = 'V', the unitary matrix Z1. */
+/* On exit, if COMPZ='I', the unitary matrix Z, and if */
+/* COMPZ = 'V', the product Z1*Z. */
+/* Not referenced if COMPZ='N'. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. */
+/* LDZ >= N if COMPZ='V' or 'I'; LDZ >= 1 otherwise. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* This routine reduces A to Hessenberg and B to triangular form by */
+/* an unblocked reduction, as described in _Matrix_Computations_, */
+/* by Golub and van Loan (Johns Hopkins Press). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode COMPQ */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+
+ /* Function Body */
+ if (lsame_(compq, "N")) {
+ ilq = FALSE_;
+ icompq = 1;
+ } else if (lsame_(compq, "V")) {
+ ilq = TRUE_;
+ icompq = 2;
+ } else if (lsame_(compq, "I")) {
+ ilq = TRUE_;
+ icompq = 3;
+ } else {
+ icompq = 0;
+ }
+
+/* Decode COMPZ */
+
+ if (lsame_(compz, "N")) {
+ ilz = FALSE_;
+ icompz = 1;
+ } else if (lsame_(compz, "V")) {
+ ilz = TRUE_;
+ icompz = 2;
+ } else if (lsame_(compz, "I")) {
+ ilz = TRUE_;
+ icompz = 3;
+ } else {
+ icompz = 0;
+ }
+
+/* Test the input parameters. */
+
+ *info = 0;
+ if (icompq <= 0) {
+ *info = -1;
+ } else if (icompz <= 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ilo < 1) {
+ *info = -4;
+ } else if (*ihi > *n || *ihi < *ilo - 1) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (ilq && *ldq < *n || *ldq < 1) {
+ *info = -11;
+ } else if (ilz && *ldz < *n || *ldz < 1) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGGHRD", &i__1);
+ return 0;
+ }
+
+/* Initialize Q and Z if desired. */
+
+ if (icompq == 3) {
+ claset_("Full", n, n, &c_b2, &c_b1, &q[q_offset], ldq);
+ }
+ if (icompz == 3) {
+ claset_("Full", n, n, &c_b2, &c_b1, &z__[z_offset], ldz);
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 1) {
+ return 0;
+ }
+
+/* Zero out lower triangle of B */
+
+ i__1 = *n - 1;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = jcol + 1; jrow <= i__2; ++jrow) {
+ i__3 = jrow + jcol * b_dim1;
+ b[i__3].r = 0.f, b[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* Reduce A and B */
+
+ i__1 = *ihi - 2;
+ for (jcol = *ilo; jcol <= i__1; ++jcol) {
+
+ i__2 = jcol + 2;
+ for (jrow = *ihi; jrow >= i__2; --jrow) {
+
+/* Step 1: rotate rows JROW-1, JROW to kill A(JROW,JCOL) */
+
+ i__3 = jrow - 1 + jcol * a_dim1;
+ ctemp.r = a[i__3].r, ctemp.i = a[i__3].i;
+ clartg_(&ctemp, &a[jrow + jcol * a_dim1], &c__, &s, &a[jrow - 1 +
+ jcol * a_dim1]);
+ i__3 = jrow + jcol * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+ i__3 = *n - jcol;
+ crot_(&i__3, &a[jrow - 1 + (jcol + 1) * a_dim1], lda, &a[jrow + (
+ jcol + 1) * a_dim1], lda, &c__, &s);
+ i__3 = *n + 2 - jrow;
+ crot_(&i__3, &b[jrow - 1 + (jrow - 1) * b_dim1], ldb, &b[jrow + (
+ jrow - 1) * b_dim1], ldb, &c__, &s);
+ if (ilq) {
+ r_cnjg(&q__1, &s);
+ crot_(n, &q[(jrow - 1) * q_dim1 + 1], &c__1, &q[jrow * q_dim1
+ + 1], &c__1, &c__, &q__1);
+ }
+
+/* Step 2: rotate columns JROW, JROW-1 to kill B(JROW,JROW-1) */
+
+ i__3 = jrow + jrow * b_dim1;
+ ctemp.r = b[i__3].r, ctemp.i = b[i__3].i;
+ clartg_(&ctemp, &b[jrow + (jrow - 1) * b_dim1], &c__, &s, &b[jrow
+ + jrow * b_dim1]);
+ i__3 = jrow + (jrow - 1) * b_dim1;
+ b[i__3].r = 0.f, b[i__3].i = 0.f;
+ crot_(ihi, &a[jrow * a_dim1 + 1], &c__1, &a[(jrow - 1) * a_dim1 +
+ 1], &c__1, &c__, &s);
+ i__3 = jrow - 1;
+ crot_(&i__3, &b[jrow * b_dim1 + 1], &c__1, &b[(jrow - 1) * b_dim1
+ + 1], &c__1, &c__, &s);
+ if (ilz) {
+ crot_(n, &z__[jrow * z_dim1 + 1], &c__1, &z__[(jrow - 1) *
+ z_dim1 + 1], &c__1, &c__, &s);
+ }
+/* L30: */
+ }
+/* L40: */
+ }
+
+ return 0;
+
+/* End of CGGHRD */
+
+} /* cgghrd_ */
diff --git a/contrib/libs/clapack/cgglse.c b/contrib/libs/clapack/cgglse.c
new file mode 100644
index 0000000000..a57f3d4e0b
--- /dev/null
+++ b/contrib/libs/clapack/cgglse.c
@@ -0,0 +1,342 @@
+/* cgglse.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int cgglse_(integer *m, integer *n, integer *p, complex *a,
+ integer *lda, complex *b, integer *ldb, complex *c__, complex *d__,
+ complex *x, complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+ complex q__1;
+
+ /* Local variables */
+ integer nb, mn, nr, nb1, nb2, nb3, nb4, lopt;
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+, complex *, integer *, complex *, integer *, complex *, complex *
+, integer *), ccopy_(integer *, complex *, integer *,
+ complex *, integer *), caxpy_(integer *, complex *, complex *,
+ integer *, complex *, integer *), ctrmv_(char *, char *, char *,
+ integer *, complex *, integer *, complex *, integer *), cggrqf_(integer *, integer *, integer *, complex
+ *, integer *, complex *, complex *, integer *, complex *, complex
+ *, integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer lwkmin;
+ extern /* Subroutine */ int cunmqr_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *), cunmrq_(char *,
+ char *, integer *, integer *, integer *, complex *, integer *,
+ complex *, complex *, integer *, complex *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int ctrtrs_(char *, char *, char *, integer *,
+ integer *, complex *, integer *, complex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGGLSE solves the linear equality-constrained least squares (LSE) */
+/* problem: */
+
+/* minimize || c - A*x ||_2 subject to B*x = d */
+
+/* where A is an M-by-N matrix, B is a P-by-N matrix, c is a given */
+/* M-vector, and d is a given P-vector. It is assumed that */
+/* P <= N <= M+P, and */
+
+/* rank(B) = P and rank( (A) ) = N. */
+/* ( (B) ) */
+
+/* These conditions ensure that the LSE problem has a unique solution, */
+/* which is obtained using a generalized RQ factorization of the */
+/* matrices (B, A) given by */
+
+/* B = (0 R)*Q, A = Z*T*Q. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrices A and B. N >= 0. */
+
+/* P (input) INTEGER */
+/* The number of rows of the matrix B. 0 <= P <= N <= M+P. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the elements on and above the diagonal of the array */
+/* contain the min(M,N)-by-N upper trapezoidal matrix T. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) COMPLEX array, dimension (LDB,N) */
+/* On entry, the P-by-N matrix B. */
+/* On exit, the upper triangle of the subarray B(1:P,N-P+1:N) */
+/* contains the P-by-P upper triangular matrix R. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,P). */
+
+/* C (input/output) COMPLEX array, dimension (M) */
+/* On entry, C contains the right hand side vector for the */
+/* least squares part of the LSE problem. */
+/* On exit, the residual sum of squares for the solution */
+/* is given by the sum of squares of elements N-P+1 to M of */
+/* vector C. */
+
+/* D (input/output) COMPLEX array, dimension (P) */
+/* On entry, D contains the right hand side vector for the */
+/* constrained equation. */
+/* On exit, D is destroyed. */
+
+/* X (output) COMPLEX array, dimension (N) */
+/* On exit, X is the solution of the LSE problem. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,M+N+P). */
+/* For optimum performance LWORK >= P+min(M,N)+max(M,N)*NB, */
+/* where NB is an upper bound for the optimal blocksizes for */
+/* CGEQRF, CGERQF, CUNMQR and CUNMRQ. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1: the upper triangular factor R associated with B in the */
+/* generalized RQ factorization of the pair (B, A) is */
+/* singular, so that rank(B) < P; the least squares */
+/* solution could not be computed. */
+/* = 2: the (N-P) by (N-P) part of the upper trapezoidal factor */
+/* T associated with A in the generalized RQ factorization */
+/* of the pair (B, A) is singular, so that */
+/* rank( (A) ) < N; the least squares solution could not */
+/* ( (B) ) */
+/* be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --c__;
+ --d__;
+ --x;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ mn = min(*m,*n);
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*p < 0 || *p > *n || *p < *n - *m) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldb < max(1,*p)) {
+ *info = -7;
+ }
+
+/* Calculate workspace */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ lwkmin = 1;
+ lwkopt = 1;
+ } else {
+ nb1 = ilaenv_(&c__1, "CGEQRF", " ", m, n, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "CGERQF", " ", m, n, &c_n1, &c_n1);
+ nb3 = ilaenv_(&c__1, "CUNMQR", " ", m, n, p, &c_n1);
+ nb4 = ilaenv_(&c__1, "CUNMRQ", " ", m, n, p, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2), i__1 = max(i__1,nb3);
+ nb = max(i__1,nb4);
+ lwkmin = *m + *n + *p;
+ lwkopt = *p + mn + max(*m,*n) * nb;
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ if (*lwork < lwkmin && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGGLSE", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Compute the GRQ factorization of matrices B and A: */
+
+/* B*Q' = ( 0 T12 ) P Z'*A*Q' = ( R11 R12 ) N-P */
+/* N-P P ( 0 R22 ) M+P-N */
+/* N-P P */
+
+/* where T12 and R11 are upper triangular, and Q and Z are */
+/* unitary. */
+
+ i__1 = *lwork - *p - mn;
+ cggrqf_(p, m, n, &b[b_offset], ldb, &work[1], &a[a_offset], lda, &work[*p
+ + 1], &work[*p + mn + 1], &i__1, info);
+ i__1 = *p + mn + 1;
+ lopt = work[i__1].r;
+
+/* Update c = Z'*c = ( c1 ) N-P */
+/* ( c2 ) M+P-N */
+
+ i__1 = max(1,*m);
+ i__2 = *lwork - *p - mn;
+ cunmqr_("Left", "Conjugate Transpose", m, &c__1, &mn, &a[a_offset], lda, &
+ work[*p + 1], &c__[1], &i__1, &work[*p + mn + 1], &i__2, info);
+/* Computing MAX */
+ i__3 = *p + mn + 1;
+ i__1 = lopt, i__2 = (integer) work[i__3].r;
+ lopt = max(i__1,i__2);
+
+/* Solve T12*x2 = d for x2 */
+
+ if (*p > 0) {
+ ctrtrs_("Upper", "No transpose", "Non-unit", p, &c__1, &b[(*n - *p +
+ 1) * b_dim1 + 1], ldb, &d__[1], p, info);
+
+ if (*info > 0) {
+ *info = 1;
+ return 0;
+ }
+
+/* Put the solution in X */
+
+ ccopy_(p, &d__[1], &c__1, &x[*n - *p + 1], &c__1);
+
+/* Update c1 */
+
+ i__1 = *n - *p;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__1, p, &q__1, &a[(*n - *p + 1) * a_dim1 + 1]
+, lda, &d__[1], &c__1, &c_b1, &c__[1], &c__1);
+ }
+
+/* Solve R11*x1 = c1 for x1 */
+
+ if (*n > *p) {
+ i__1 = *n - *p;
+ i__2 = *n - *p;
+ ctrtrs_("Upper", "No transpose", "Non-unit", &i__1, &c__1, &a[
+ a_offset], lda, &c__[1], &i__2, info);
+
+ if (*info > 0) {
+ *info = 2;
+ return 0;
+ }
+
+/* Put the solutions in X */
+
+ i__1 = *n - *p;
+ ccopy_(&i__1, &c__[1], &c__1, &x[1], &c__1);
+ }
+
+/* Compute the residual vector: */
+
+ if (*m < *n) {
+ nr = *m + *p - *n;
+ if (nr > 0) {
+ i__1 = *n - *m;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &nr, &i__1, &q__1, &a[*n - *p + 1 + (*m +
+ 1) * a_dim1], lda, &d__[nr + 1], &c__1, &c_b1, &c__[*n - *
+ p + 1], &c__1);
+ }
+ } else {
+ nr = *p;
+ }
+ if (nr > 0) {
+ ctrmv_("Upper", "No transpose", "Non unit", &nr, &a[*n - *p + 1 + (*n
+ - *p + 1) * a_dim1], lda, &d__[1], &c__1);
+ q__1.r = -1.f, q__1.i = -0.f;
+ caxpy_(&nr, &q__1, &d__[1], &c__1, &c__[*n - *p + 1], &c__1);
+ }
+
+/* Backward transformation x = Q'*x */
+
+ i__1 = *lwork - *p - mn;
+ cunmrq_("Left", "Conjugate Transpose", n, &c__1, p, &b[b_offset], ldb, &
+ work[1], &x[1], n, &work[*p + mn + 1], &i__1, info);
+/* Computing MAX */
+ i__4 = *p + mn + 1;
+ i__2 = lopt, i__3 = (integer) work[i__4].r;
+ i__1 = *p + mn + max(i__2,i__3);
+ work[1].r = (real) i__1, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CGGLSE */
+
+} /* cgglse_ */
diff --git a/contrib/libs/clapack/cggqrf.c b/contrib/libs/clapack/cggqrf.c
new file mode 100644
index 0000000000..41c861a021
--- /dev/null
+++ b/contrib/libs/clapack/cggqrf.c
@@ -0,0 +1,268 @@
+/* cggqrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int cggqrf_(integer *n, integer *m, integer *p, complex *a,
+ integer *lda, complex *taua, complex *b, integer *ldb, complex *taub,
+ complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer nb, nb1, nb2, nb3, lopt;
+ extern /* Subroutine */ int cgeqrf_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, integer *), cgerqf_(
+ integer *, integer *, complex *, integer *, complex *, complex *,
+ integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int cunmqr_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGGQRF computes a generalized QR factorization of an N-by-M matrix A */
+/* and an N-by-P matrix B: */
+
+/* A = Q*R, B = Q*T*Z, */
+
+/* where Q is an N-by-N unitary matrix, Z is a P-by-P unitary matrix, */
+/* and R and T assume one of the forms: */
+
+/* if N >= M, R = ( R11 ) M , or if N < M, R = ( R11 R12 ) N, */
+/* ( 0 ) N-M N M-N */
+/* M */
+
+/* where R11 is upper triangular, and */
+
+/* if N <= P, T = ( 0 T12 ) N, or if N > P, T = ( T11 ) N-P, */
+/* P-N N ( T21 ) P */
+/* P */
+
+/* where T12 or T21 is upper triangular. */
+
+/* In particular, if B is square and nonsingular, the GQR factorization */
+/* of A and B implicitly gives the QR factorization of inv(B)*A: */
+
+/* inv(B)*A = Z'*(inv(T)*R) */
+
+/* where inv(B) denotes the inverse of the matrix B, and Z' denotes the */
+/* conjugate transpose of matrix Z. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of rows of the matrices A and B. N >= 0. */
+
+/* M (input) INTEGER */
+/* The number of columns of the matrix A. M >= 0. */
+
+/* P (input) INTEGER */
+/* The number of columns of the matrix B. P >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,M) */
+/* On entry, the N-by-M matrix A. */
+/* On exit, the elements on and above the diagonal of the array */
+/* contain the min(N,M)-by-M upper trapezoidal matrix R (R is */
+/* upper triangular if N >= M); the elements below the diagonal, */
+/* with the array TAUA, represent the unitary matrix Q as a */
+/* product of min(N,M) elementary reflectors (see Further */
+/* Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAUA (output) COMPLEX array, dimension (min(N,M)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the unitary matrix Q (see Further Details). */
+
+/* B (input/output) COMPLEX array, dimension (LDB,P) */
+/* On entry, the N-by-P matrix B. */
+/* On exit, if N <= P, the upper triangle of the subarray */
+/* B(1:N,P-N+1:P) contains the N-by-N upper triangular matrix T; */
+/* if N > P, the elements on and above the (N-P)-th subdiagonal */
+/* contain the N-by-P upper trapezoidal matrix T; the remaining */
+/* elements, with the array TAUB, represent the unitary */
+/* matrix Z as a product of elementary reflectors (see Further */
+/* Details). */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* TAUB (output) COMPLEX array, dimension (min(N,P)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the unitary matrix Z (see Further Details). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N,M,P). */
+/* For optimum performance LWORK >= max(N,M,P)*max(NB1,NB2,NB3), */
+/* where NB1 is the optimal blocksize for the QR factorization */
+/* of an N-by-M matrix, NB2 is the optimal blocksize for the */
+/* RQ factorization of an N-by-P matrix, and NB3 is the optimal */
+/* blocksize for a call of CUNMQR. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k), where k = min(n,m). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - taua * v * v' */
+
+/* where taua is a complex scalar, and v is a complex vector with */
+/* v(1:i-1) = 0 and v(i) = 1; v(i+1:n) is stored on exit in A(i+1:n,i), */
+/* and taua in TAUA(i). */
+/* To form Q explicitly, use LAPACK subroutine CUNGQR. */
+/* To use Q to update another matrix, use LAPACK subroutine CUNMQR. */
+
+/* The matrix Z is represented as a product of elementary reflectors */
+
+/* Z = H(1) H(2) . . . H(k), where k = min(n,p). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - taub * v * v' */
+
+/* where taub is a complex scalar, and v is a complex vector with */
+/* v(p-k+i+1:p) = 0 and v(p-k+i) = 1; v(1:p-k+i-1) is stored on exit in */
+/* B(n-k+i,1:p-k+i-1), and taub in TAUB(i). */
+/* To form Z explicitly, use LAPACK subroutine CUNGRQ. */
+/* To use Z to update another matrix, use LAPACK subroutine CUNMRQ. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --taua;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --taub;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb1 = ilaenv_(&c__1, "CGEQRF", " ", n, m, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "CGERQF", " ", n, p, &c_n1, &c_n1);
+ nb3 = ilaenv_(&c__1, "CUNMQR", " ", n, m, p, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2);
+ nb = max(i__1,nb3);
+/* Computing MAX */
+ i__1 = max(*n,*m);
+ lwkopt = max(i__1,*p) * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*p < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*n), i__1 = max(i__1,*m);
+ if (*lwork < max(i__1,*p) && ! lquery) {
+ *info = -11;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGGQRF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* QR factorization of N-by-M matrix A: A = Q*R */
+
+ cgeqrf_(n, m, &a[a_offset], lda, &taua[1], &work[1], lwork, info);
+ lopt = work[1].r;
+
+/* Update B := Q'*B. */
+
+ i__1 = min(*n,*m);
+ cunmqr_("Left", "Conjugate Transpose", n, p, &i__1, &a[a_offset], lda, &
+ taua[1], &b[b_offset], ldb, &work[1], lwork, info);
+/* Computing MAX */
+ i__1 = lopt, i__2 = (integer) work[1].r;
+ lopt = max(i__1,i__2);
+
+/* RQ factorization of N-by-P matrix B: B = T*Z. */
+
+ cgerqf_(n, p, &b[b_offset], ldb, &taub[1], &work[1], lwork, info);
+/* Computing MAX */
+ i__2 = lopt, i__3 = (integer) work[1].r;
+ i__1 = max(i__2,i__3);
+ work[1].r = (real) i__1, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CGGQRF */
+
+} /* cggqrf_ */
diff --git a/contrib/libs/clapack/cggrqf.c b/contrib/libs/clapack/cggrqf.c
new file mode 100644
index 0000000000..2653aeac19
--- /dev/null
+++ b/contrib/libs/clapack/cggrqf.c
@@ -0,0 +1,269 @@
+/* cggrqf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int cggrqf_(integer *m, integer *p, integer *n, complex *a,
+ integer *lda, complex *taua, complex *b, integer *ldb, complex *taub,
+ complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer nb, nb1, nb2, nb3, lopt;
+ extern /* Subroutine */ int cgeqrf_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, integer *), cgerqf_(
+ integer *, integer *, complex *, integer *, complex *, complex *,
+ integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int cunmrq_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGGRQF computes a generalized RQ factorization of an M-by-N matrix A */
+/* and a P-by-N matrix B: */
+
+/* A = R*Q, B = Z*T*Q, */
+
+/* where Q is an N-by-N unitary matrix, Z is a P-by-P unitary */
+/* matrix, and R and T assume one of the forms: */
+
+/* if M <= N, R = ( 0 R12 ) M, or if M > N, R = ( R11 ) M-N, */
+/* N-M M ( R21 ) N */
+/* N */
+
+/* where R12 or R21 is upper triangular, and */
+
+/* if P >= N, T = ( T11 ) N , or if P < N, T = ( T11 T12 ) P, */
+/* ( 0 ) P-N P N-P */
+/* N */
+
+/* where T11 is upper triangular. */
+
+/* In particular, if B is square and nonsingular, the GRQ factorization */
+/* of A and B implicitly gives the RQ factorization of A*inv(B): */
+
+/* A*inv(B) = (R*inv(T))*Z' */
+
+/* where inv(B) denotes the inverse of the matrix B, and Z' denotes the */
+/* conjugate transpose of the matrix Z. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* P (input) INTEGER */
+/* The number of rows of the matrix B. P >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrices A and B. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, if M <= N, the upper triangle of the subarray */
+/* A(1:M,N-M+1:N) contains the M-by-M upper triangular matrix R; */
+/* if M > N, the elements on and above the (M-N)-th subdiagonal */
+/* contain the M-by-N upper trapezoidal matrix R; the remaining */
+/* elements, with the array TAUA, represent the unitary */
+/* matrix Q as a product of elementary reflectors (see Further */
+/* Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAUA (output) COMPLEX array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the unitary matrix Q (see Further Details). */
+
+/* B (input/output) COMPLEX array, dimension (LDB,N) */
+/* On entry, the P-by-N matrix B. */
+/* On exit, the elements on and above the diagonal of the array */
+/* contain the min(P,N)-by-N upper trapezoidal matrix T (T is */
+/* upper triangular if P >= N); the elements below the diagonal, */
+/* with the array TAUB, represent the unitary matrix Z as a */
+/* product of elementary reflectors (see Further Details). */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,P). */
+
+/* TAUB (output) COMPLEX array, dimension (min(P,N)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the unitary matrix Z (see Further Details). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N,M,P). */
+/* For optimum performance LWORK >= max(N,M,P)*max(NB1,NB2,NB3), */
+/* where NB1 is the optimal blocksize for the RQ factorization */
+/* of an M-by-N matrix, NB2 is the optimal blocksize for the */
+/* QR factorization of a P-by-N matrix, and NB3 is the optimal */
+/* blocksize for a call of CUNMRQ. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO=-i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - taua * v * v' */
+
+/* where taua is a complex scalar, and v is a complex vector with */
+/* v(n-k+i+1:n) = 0 and v(n-k+i) = 1; v(1:n-k+i-1) is stored on exit in */
+/* A(m-k+i,1:n-k+i-1), and taua in TAUA(i). */
+/* To form Q explicitly, use LAPACK subroutine CUNGRQ. */
+/* To use Q to update another matrix, use LAPACK subroutine CUNMRQ. */
+
+/* The matrix Z is represented as a product of elementary reflectors */
+
+/* Z = H(1) H(2) . . . H(k), where k = min(p,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - taub * v * v' */
+
+/* where taub is a complex scalar, and v is a complex vector with */
+/* v(1:i-1) = 0 and v(i) = 1; v(i+1:p) is stored on exit in B(i+1:p,i), */
+/* and taub in TAUB(i). */
+/* To form Z explicitly, use LAPACK subroutine CUNGQR. */
+/* To use Z to update another matrix, use LAPACK subroutine CUNMQR. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --taua;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --taub;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb1 = ilaenv_(&c__1, "CGERQF", " ", m, n, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "CGEQRF", " ", p, n, &c_n1, &c_n1);
+ nb3 = ilaenv_(&c__1, "CUNMRQ", " ", m, n, p, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2);
+ nb = max(i__1,nb3);
+/* Computing MAX */
+ i__1 = max(*n,*m);
+ lwkopt = max(i__1,*p) * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*p < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldb < max(1,*p)) {
+ *info = -8;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*m), i__1 = max(i__1,*p);
+ if (*lwork < max(i__1,*n) && ! lquery) {
+ *info = -11;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGGRQF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* RQ factorization of M-by-N matrix A: A = R*Q */
+
+ cgerqf_(m, n, &a[a_offset], lda, &taua[1], &work[1], lwork, info);
+ lopt = work[1].r;
+
+/* Update B := B*Q' */
+
+ i__1 = min(*m,*n);
+/* Computing MAX */
+ i__2 = 1, i__3 = *m - *n + 1;
+ cunmrq_("Right", "Conjugate Transpose", p, n, &i__1, &a[max(i__2, i__3)+
+ a_dim1], lda, &taua[1], &b[b_offset], ldb, &work[1], lwork, info);
+/* Computing MAX */
+ i__1 = lopt, i__2 = (integer) work[1].r;
+ lopt = max(i__1,i__2);
+
+/* QR factorization of P-by-N matrix B: B = Z*T */
+
+ cgeqrf_(p, n, &b[b_offset], ldb, &taub[1], &work[1], lwork, info);
+/* Computing MAX */
+ i__2 = lopt, i__3 = (integer) work[1].r;
+ i__1 = max(i__2,i__3);
+ work[1].r = (real) i__1, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CGGRQF */
+
+} /* cggrqf_ */
diff --git a/contrib/libs/clapack/cggsvd.c b/contrib/libs/clapack/cggsvd.c
new file mode 100644
index 0000000000..9415098a22
--- /dev/null
+++ b/contrib/libs/clapack/cggsvd.c
@@ -0,0 +1,403 @@
+/* cggsvd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cggsvd_(char *jobu, char *jobv, char *jobq, integer *m,
+ integer *n, integer *p, integer *k, integer *l, complex *a, integer *
+ lda, complex *b, integer *ldb, real *alpha, real *beta, complex *u,
+ integer *ldu, complex *v, integer *ldv, complex *q, integer *ldq,
+ complex *work, real *rwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, u_dim1,
+ u_offset, v_dim1, v_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j;
+ real ulp;
+ integer ibnd;
+ real tola;
+ integer isub;
+ real tolb, unfl, temp, smax;
+ extern logical lsame_(char *, char *);
+ real anorm, bnorm;
+ logical wantq;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ logical wantu, wantv;
+ extern doublereal clange_(char *, integer *, integer *, complex *,
+ integer *, real *), slamch_(char *);
+ extern /* Subroutine */ int ctgsja_(char *, char *, char *, integer *,
+ integer *, integer *, integer *, integer *, complex *, integer *,
+ complex *, integer *, real *, real *, real *, real *, complex *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *, integer *);
+ integer ncycle;
+ extern /* Subroutine */ int xerbla_(char *, integer *), cggsvp_(
+ char *, char *, char *, integer *, integer *, integer *, complex *
+, integer *, complex *, integer *, real *, real *, integer *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *, integer *, real *, complex *, complex *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGGSVD computes the generalized singular value decomposition (GSVD) */
+/* of an M-by-N complex matrix A and P-by-N complex matrix B: */
+
+/* U'*A*Q = D1*( 0 R ), V'*B*Q = D2*( 0 R ) */
+
+/* where U, V and Q are unitary matrices, and Z' means the conjugate */
+/* transpose of Z. Let K+L = the effective numerical rank of the */
+/* matrix (A',B')', then R is a (K+L)-by-(K+L) nonsingular upper */
+/* triangular matrix, D1 and D2 are M-by-(K+L) and P-by-(K+L) "diagonal" */
+/* matrices and of the following structures, respectively: */
+
+/* If M-K-L >= 0, */
+
+/* K L */
+/* D1 = K ( I 0 ) */
+/* L ( 0 C ) */
+/* M-K-L ( 0 0 ) */
+
+/* K L */
+/* D2 = L ( 0 S ) */
+/* P-L ( 0 0 ) */
+
+/* N-K-L K L */
+/* ( 0 R ) = K ( 0 R11 R12 ) */
+/* L ( 0 0 R22 ) */
+/* where */
+
+/* C = diag( ALPHA(K+1), ... , ALPHA(K+L) ), */
+/* S = diag( BETA(K+1), ... , BETA(K+L) ), */
+/* C**2 + S**2 = I. */
+
+/* R is stored in A(1:K+L,N-K-L+1:N) on exit. */
+
+/* If M-K-L < 0, */
+
+/* K M-K K+L-M */
+/* D1 = K ( I 0 0 ) */
+/* M-K ( 0 C 0 ) */
+
+/* K M-K K+L-M */
+/* D2 = M-K ( 0 S 0 ) */
+/* K+L-M ( 0 0 I ) */
+/* P-L ( 0 0 0 ) */
+
+/* N-K-L K M-K K+L-M */
+/* ( 0 R ) = K ( 0 R11 R12 R13 ) */
+/* M-K ( 0 0 R22 R23 ) */
+/* K+L-M ( 0 0 0 R33 ) */
+
+/* where */
+
+/* C = diag( ALPHA(K+1), ... , ALPHA(M) ), */
+/* S = diag( BETA(K+1), ... , BETA(M) ), */
+/* C**2 + S**2 = I. */
+
+/* (R11 R12 R13 ) is stored in A(1:M, N-K-L+1:N), and R33 is stored */
+/* ( 0 R22 R23 ) */
+/* in B(M-K+1:L,N+M-K-L+1:N) on exit. */
+
+/* The routine computes C, S, R, and optionally the unitary */
+/* transformation matrices U, V and Q. */
+
+/* In particular, if B is an N-by-N nonsingular matrix, then the GSVD of */
+/* A and B implicitly gives the SVD of A*inv(B): */
+/* A*inv(B) = U*(D1*inv(D2))*V'. */
+/* If ( A',B')' has orthnormal columns, then the GSVD of A and B is also */
+/* equal to the CS decomposition of A and B. Furthermore, the GSVD can */
+/* be used to derive the solution of the eigenvalue problem: */
+/* A'*A x = lambda* B'*B x. */
+/* In some literature, the GSVD of A and B is presented in the form */
+/* U'*A*X = ( 0 D1 ), V'*B*X = ( 0 D2 ) */
+/* where U and V are orthogonal and X is nonsingular, and D1 and D2 are */
+/* ``diagonal''. The former GSVD form can be converted to the latter */
+/* form by taking the nonsingular matrix X as */
+
+/* X = Q*( I 0 ) */
+/* ( 0 inv(R) ) */
+
+/* Arguments */
+/* ========= */
+
+/* JOBU (input) CHARACTER*1 */
+/* = 'U': Unitary matrix U is computed; */
+/* = 'N': U is not computed. */
+
+/* JOBV (input) CHARACTER*1 */
+/* = 'V': Unitary matrix V is computed; */
+/* = 'N': V is not computed. */
+
+/* JOBQ (input) CHARACTER*1 */
+/* = 'Q': Unitary matrix Q is computed; */
+/* = 'N': Q is not computed. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrices A and B. N >= 0. */
+
+/* P (input) INTEGER */
+/* The number of rows of the matrix B. P >= 0. */
+
+/* K (output) INTEGER */
+/* L (output) INTEGER */
+/* On exit, K and L specify the dimension of the subblocks */
+/* described in Purpose. */
+/* K + L = effective numerical rank of (A',B')'. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, A contains the triangular matrix R, or part of R. */
+/* See Purpose for details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) COMPLEX array, dimension (LDB,N) */
+/* On entry, the P-by-N matrix B. */
+/* On exit, B contains part of the triangular matrix R if */
+/* M-K-L < 0. See Purpose for details. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,P). */
+
+/* ALPHA (output) REAL array, dimension (N) */
+/* BETA (output) REAL array, dimension (N) */
+/* On exit, ALPHA and BETA contain the generalized singular */
+/* value pairs of A and B; */
+/* ALPHA(1:K) = 1, */
+/* BETA(1:K) = 0, */
+/* and if M-K-L >= 0, */
+/* ALPHA(K+1:K+L) = C, */
+/* BETA(K+1:K+L) = S, */
+/* or if M-K-L < 0, */
+/* ALPHA(K+1:M)= C, ALPHA(M+1:K+L)= 0 */
+/* BETA(K+1:M) = S, BETA(M+1:K+L) = 1 */
+/* and */
+/* ALPHA(K+L+1:N) = 0 */
+/* BETA(K+L+1:N) = 0 */
+
+/* U (output) COMPLEX array, dimension (LDU,M) */
+/* If JOBU = 'U', U contains the M-by-M unitary matrix U. */
+/* If JOBU = 'N', U is not referenced. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= max(1,M) if */
+/* JOBU = 'U'; LDU >= 1 otherwise. */
+
+/* V (output) COMPLEX array, dimension (LDV,P) */
+/* If JOBV = 'V', V contains the P-by-P unitary matrix V. */
+/* If JOBV = 'N', V is not referenced. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. LDV >= max(1,P) if */
+/* JOBV = 'V'; LDV >= 1 otherwise. */
+
+/* Q (output) COMPLEX array, dimension (LDQ,N) */
+/* If JOBQ = 'Q', Q contains the N-by-N unitary matrix Q. */
+/* If JOBQ = 'N', Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N) if */
+/* JOBQ = 'Q'; LDQ >= 1 otherwise. */
+
+/* WORK (workspace) COMPLEX array, dimension (max(3*N,M,P)+N) */
+
+/* RWORK (workspace) REAL array, dimension (2*N) */
+
+/* IWORK (workspace/output) INTEGER array, dimension (N) */
+/* On exit, IWORK stores the sorting information. More */
+/* precisely, the following loop will sort ALPHA */
+/* for I = K+1, min(M,K+L) */
+/* swap ALPHA(I) and ALPHA(IWORK(I)) */
+/* endfor */
+/* such that ALPHA(1) >= ALPHA(2) >= ... >= ALPHA(N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, the Jacobi-type procedure failed to */
+/* converge. For further details, see subroutine CTGSJA. */
+
+/* Internal Parameters */
+/* =================== */
+
+/* TOLA REAL */
+/* TOLB REAL */
+/* TOLA and TOLB are the thresholds to determine the effective */
+/* rank of (A',B')'. Generally, they are set to */
+/* TOLA = MAX(M,N)*norm(A)*MACHEPS, */
+/* TOLB = MAX(P,N)*norm(B)*MACHEPS. */
+/* The size of TOLA and TOLB may affect the size of backward */
+/* errors of the decomposition. */
+
+/* Further Details */
+/* =============== */
+
+/* 2-96 Based on modifications by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alpha;
+ --beta;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ wantu = lsame_(jobu, "U");
+ wantv = lsame_(jobv, "V");
+ wantq = lsame_(jobq, "Q");
+
+ *info = 0;
+ if (! (wantu || lsame_(jobu, "N"))) {
+ *info = -1;
+ } else if (! (wantv || lsame_(jobv, "N"))) {
+ *info = -2;
+ } else if (! (wantq || lsame_(jobq, "N"))) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*p < 0) {
+ *info = -6;
+ } else if (*lda < max(1,*m)) {
+ *info = -10;
+ } else if (*ldb < max(1,*p)) {
+ *info = -12;
+ } else if (*ldu < 1 || wantu && *ldu < *m) {
+ *info = -16;
+ } else if (*ldv < 1 || wantv && *ldv < *p) {
+ *info = -18;
+ } else if (*ldq < 1 || wantq && *ldq < *n) {
+ *info = -20;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGGSVD", &i__1);
+ return 0;
+ }
+
+/* Compute the Frobenius norm of matrices A and B */
+
+ anorm = clange_("1", m, n, &a[a_offset], lda, &rwork[1]);
+ bnorm = clange_("1", p, n, &b[b_offset], ldb, &rwork[1]);
+
+/* Get machine precision and set up threshold for determining */
+/* the effective numerical rank of the matrices A and B. */
+
+ ulp = slamch_("Precision");
+ unfl = slamch_("Safe Minimum");
+ tola = max(*m,*n) * dmax(anorm,unfl) * ulp;
+ tolb = max(*p,*n) * dmax(bnorm,unfl) * ulp;
+
+ cggsvp_(jobu, jobv, jobq, m, p, n, &a[a_offset], lda, &b[b_offset], ldb, &
+ tola, &tolb, k, l, &u[u_offset], ldu, &v[v_offset], ldv, &q[
+ q_offset], ldq, &iwork[1], &rwork[1], &work[1], &work[*n + 1],
+ info);
+
+/* Compute the GSVD of two upper "triangular" matrices */
+
+ ctgsja_(jobu, jobv, jobq, m, p, n, k, l, &a[a_offset], lda, &b[b_offset],
+ ldb, &tola, &tolb, &alpha[1], &beta[1], &u[u_offset], ldu, &v[
+ v_offset], ldv, &q[q_offset], ldq, &work[1], &ncycle, info);
+
+/* Sort the singular values and store the pivot indices in IWORK */
+/* Copy ALPHA to RWORK, then sort ALPHA in RWORK */
+
+ scopy_(n, &alpha[1], &c__1, &rwork[1], &c__1);
+/* Computing MIN */
+ i__1 = *l, i__2 = *m - *k;
+ ibnd = min(i__1,i__2);
+ i__1 = ibnd;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Scan for largest ALPHA(K+I) */
+
+ isub = i__;
+ smax = rwork[*k + i__];
+ i__2 = ibnd;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ temp = rwork[*k + j];
+ if (temp > smax) {
+ isub = j;
+ smax = temp;
+ }
+/* L10: */
+ }
+ if (isub != i__) {
+ rwork[*k + isub] = rwork[*k + i__];
+ rwork[*k + i__] = smax;
+ iwork[*k + i__] = *k + isub;
+ } else {
+ iwork[*k + i__] = *k + i__;
+ }
+/* L20: */
+ }
+
+ return 0;
+
+/* End of CGGSVD */
+
+} /* cggsvd_ */
diff --git a/contrib/libs/clapack/cggsvp.c b/contrib/libs/clapack/cggsvp.c
new file mode 100644
index 0000000000..6f21946f35
--- /dev/null
+++ b/contrib/libs/clapack/cggsvp.c
@@ -0,0 +1,530 @@
+/* cggsvp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+
+/* Subroutine */ int cggsvp_(char *jobu, char *jobv, char *jobq, integer *m,
+ integer *p, integer *n, complex *a, integer *lda, complex *b, integer
+ *ldb, real *tola, real *tolb, integer *k, integer *l, complex *u,
+ integer *ldu, complex *v, integer *ldv, complex *q, integer *ldq,
+ integer *iwork, real *rwork, complex *tau, complex *work, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, u_dim1,
+ u_offset, v_dim1, v_offset, i__1, i__2, i__3;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer i__, j;
+ extern logical lsame_(char *, char *);
+ logical wantq, wantu, wantv;
+ extern /* Subroutine */ int cgeqr2_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *), cgerq2_(integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *),
+ cung2r_(integer *, integer *, integer *, complex *, integer *,
+ complex *, complex *, integer *), cunm2r_(char *, char *, integer
+ *, integer *, integer *, complex *, integer *, complex *, complex
+ *, integer *, complex *, integer *), cunmr2_(char
+ *, char *, integer *, integer *, integer *, complex *, integer *,
+ complex *, complex *, integer *, complex *, integer *), cgeqpf_(integer *, integer *, complex *, integer *,
+ integer *, complex *, complex *, real *, integer *), clacpy_(char
+ *, integer *, integer *, complex *, integer *, complex *, integer
+ *), claset_(char *, integer *, integer *, complex *,
+ complex *, complex *, integer *), xerbla_(char *, integer
+ *), clapmt_(logical *, integer *, integer *, complex *,
+ integer *, integer *);
+ logical forwrd;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGGSVP computes unitary matrices U, V and Q such that */
+
+/* N-K-L K L */
+/* U'*A*Q = K ( 0 A12 A13 ) if M-K-L >= 0; */
+/* L ( 0 0 A23 ) */
+/* M-K-L ( 0 0 0 ) */
+
+/* N-K-L K L */
+/* = K ( 0 A12 A13 ) if M-K-L < 0; */
+/* M-K ( 0 0 A23 ) */
+
+/* N-K-L K L */
+/* V'*B*Q = L ( 0 0 B13 ) */
+/* P-L ( 0 0 0 ) */
+
+/* where the K-by-K matrix A12 and L-by-L matrix B13 are nonsingular */
+/* upper triangular; A23 is L-by-L upper triangular if M-K-L >= 0, */
+/* otherwise A23 is (M-K)-by-L upper trapezoidal. K+L = the effective */
+/* numerical rank of the (M+P)-by-N matrix (A',B')'. Z' denotes the */
+/* conjugate transpose of Z. */
+
+/* This decomposition is the preprocessing step for computing the */
+/* Generalized Singular Value Decomposition (GSVD), see subroutine */
+/* CGGSVD. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBU (input) CHARACTER*1 */
+/* = 'U': Unitary matrix U is computed; */
+/* = 'N': U is not computed. */
+
+/* JOBV (input) CHARACTER*1 */
+/* = 'V': Unitary matrix V is computed; */
+/* = 'N': V is not computed. */
+
+/* JOBQ (input) CHARACTER*1 */
+/* = 'Q': Unitary matrix Q is computed; */
+/* = 'N': Q is not computed. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* P (input) INTEGER */
+/* The number of rows of the matrix B. P >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrices A and B. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, A contains the triangular (or trapezoidal) matrix */
+/* described in the Purpose section. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) COMPLEX array, dimension (LDB,N) */
+/* On entry, the P-by-N matrix B. */
+/* On exit, B contains the triangular matrix described in */
+/* the Purpose section. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,P). */
+
+/* TOLA (input) REAL */
+/* TOLB (input) REAL */
+/* TOLA and TOLB are the thresholds to determine the effective */
+/* numerical rank of matrix B and a subblock of A. Generally, */
+/* they are set to */
+/* TOLA = MAX(M,N)*norm(A)*MACHEPS, */
+/* TOLB = MAX(P,N)*norm(B)*MACHEPS. */
+/* The size of TOLA and TOLB may affect the size of backward */
+/* errors of the decomposition. */
+
+/* K (output) INTEGER */
+/* L (output) INTEGER */
+/* On exit, K and L specify the dimension of the subblocks */
+/* described in Purpose section. */
+/* K + L = effective numerical rank of (A',B')'. */
+
+/* U (output) COMPLEX array, dimension (LDU,M) */
+/* If JOBU = 'U', U contains the unitary matrix U. */
+/* If JOBU = 'N', U is not referenced. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= max(1,M) if */
+/* JOBU = 'U'; LDU >= 1 otherwise. */
+
+/* V (output) COMPLEX array, dimension (LDV,P) */
+/* If JOBV = 'V', V contains the unitary matrix V. */
+/* If JOBV = 'N', V is not referenced. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. LDV >= max(1,P) if */
+/* JOBV = 'V'; LDV >= 1 otherwise. */
+
+/* Q (output) COMPLEX array, dimension (LDQ,N) */
+/* If JOBQ = 'Q', Q contains the unitary matrix Q. */
+/* If JOBQ = 'N', Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N) if */
+/* JOBQ = 'Q'; LDQ >= 1 otherwise. */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* RWORK (workspace) REAL array, dimension (2*N) */
+
+/* TAU (workspace) COMPLEX array, dimension (N) */
+
+/* WORK (workspace) COMPLEX array, dimension (max(3*N,M,P)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The subroutine uses LAPACK subroutine CGEQPF for the QR factorization */
+/* with column pivoting to detect the effective numerical rank of the */
+/* a matrix. It may be replaced by a better rank determination strategy. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --iwork;
+ --rwork;
+ --tau;
+ --work;
+
+ /* Function Body */
+ wantu = lsame_(jobu, "U");
+ wantv = lsame_(jobv, "V");
+ wantq = lsame_(jobq, "Q");
+ forwrd = TRUE_;
+
+ *info = 0;
+ if (! (wantu || lsame_(jobu, "N"))) {
+ *info = -1;
+ } else if (! (wantv || lsame_(jobv, "N"))) {
+ *info = -2;
+ } else if (! (wantq || lsame_(jobq, "N"))) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*p < 0) {
+ *info = -5;
+ } else if (*n < 0) {
+ *info = -6;
+ } else if (*lda < max(1,*m)) {
+ *info = -8;
+ } else if (*ldb < max(1,*p)) {
+ *info = -10;
+ } else if (*ldu < 1 || wantu && *ldu < *m) {
+ *info = -16;
+ } else if (*ldv < 1 || wantv && *ldv < *p) {
+ *info = -18;
+ } else if (*ldq < 1 || wantq && *ldq < *n) {
+ *info = -20;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGGSVP", &i__1);
+ return 0;
+ }
+
+/* QR with column pivoting of B: B*P = V*( S11 S12 ) */
+/* ( 0 0 ) */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ iwork[i__] = 0;
+/* L10: */
+ }
+ cgeqpf_(p, n, &b[b_offset], ldb, &iwork[1], &tau[1], &work[1], &rwork[1],
+ info);
+
+/* Update A := A*P */
+
+ clapmt_(&forwrd, m, n, &a[a_offset], lda, &iwork[1]);
+
+/* Determine the effective rank of matrix B. */
+
+ *l = 0;
+ i__1 = min(*p,*n);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * b_dim1;
+ if ((r__1 = b[i__2].r, dabs(r__1)) + (r__2 = r_imag(&b[i__ + i__ *
+ b_dim1]), dabs(r__2)) > *tolb) {
+ ++(*l);
+ }
+/* L20: */
+ }
+
+ if (wantv) {
+
+/* Copy the details of V, and form V. */
+
+ claset_("Full", p, p, &c_b1, &c_b1, &v[v_offset], ldv);
+ if (*p > 1) {
+ i__1 = *p - 1;
+ clacpy_("Lower", &i__1, n, &b[b_dim1 + 2], ldb, &v[v_dim1 + 2],
+ ldv);
+ }
+ i__1 = min(*p,*n);
+ cung2r_(p, p, &i__1, &v[v_offset], ldv, &tau[1], &work[1], info);
+ }
+
+/* Clean up B */
+
+ i__1 = *l - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *l;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0.f, b[i__3].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+ if (*p > *l) {
+ i__1 = *p - *l;
+ claset_("Full", &i__1, n, &c_b1, &c_b1, &b[*l + 1 + b_dim1], ldb);
+ }
+
+ if (wantq) {
+
+/* Set Q = I and Update Q := Q*P */
+
+ claset_("Full", n, n, &c_b1, &c_b2, &q[q_offset], ldq);
+ clapmt_(&forwrd, n, n, &q[q_offset], ldq, &iwork[1]);
+ }
+
+ if (*p >= *l && *n != *l) {
+
+/* RQ factorization of ( S11 S12 ) = ( 0 S12 )*Z */
+
+ cgerq2_(l, n, &b[b_offset], ldb, &tau[1], &work[1], info);
+
+/* Update A := A*Z' */
+
+ cunmr2_("Right", "Conjugate transpose", m, n, l, &b[b_offset], ldb, &
+ tau[1], &a[a_offset], lda, &work[1], info);
+ if (wantq) {
+
+/* Update Q := Q*Z' */
+
+ cunmr2_("Right", "Conjugate transpose", n, n, l, &b[b_offset],
+ ldb, &tau[1], &q[q_offset], ldq, &work[1], info);
+ }
+
+/* Clean up B */
+
+ i__1 = *n - *l;
+ claset_("Full", l, &i__1, &c_b1, &c_b1, &b[b_offset], ldb);
+ i__1 = *n;
+ for (j = *n - *l + 1; j <= i__1; ++j) {
+ i__2 = *l;
+ for (i__ = j - *n + *l + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0.f, b[i__3].i = 0.f;
+/* L50: */
+ }
+/* L60: */
+ }
+
+ }
+
+/* Let N-L L */
+/* A = ( A11 A12 ) M, */
+
+/* then the following does the complete QR decomposition of A11: */
+
+/* A11 = U*( 0 T12 )*P1' */
+/* ( 0 0 ) */
+
+ i__1 = *n - *l;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ iwork[i__] = 0;
+/* L70: */
+ }
+ i__1 = *n - *l;
+ cgeqpf_(m, &i__1, &a[a_offset], lda, &iwork[1], &tau[1], &work[1], &rwork[
+ 1], info);
+
+/* Determine the effective rank of A11 */
+
+ *k = 0;
+/* Computing MIN */
+ i__2 = *m, i__3 = *n - *l;
+ i__1 = min(i__2,i__3);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ if ((r__1 = a[i__2].r, dabs(r__1)) + (r__2 = r_imag(&a[i__ + i__ *
+ a_dim1]), dabs(r__2)) > *tola) {
+ ++(*k);
+ }
+/* L80: */
+ }
+
+/* Update A12 := U'*A12, where A12 = A( 1:M, N-L+1:N ) */
+
+/* Computing MIN */
+ i__2 = *m, i__3 = *n - *l;
+ i__1 = min(i__2,i__3);
+ cunm2r_("Left", "Conjugate transpose", m, l, &i__1, &a[a_offset], lda, &
+ tau[1], &a[(*n - *l + 1) * a_dim1 + 1], lda, &work[1], info);
+
+ if (wantu) {
+
+/* Copy the details of U, and form U */
+
+ claset_("Full", m, m, &c_b1, &c_b1, &u[u_offset], ldu);
+ if (*m > 1) {
+ i__1 = *m - 1;
+ i__2 = *n - *l;
+ clacpy_("Lower", &i__1, &i__2, &a[a_dim1 + 2], lda, &u[u_dim1 + 2]
+, ldu);
+ }
+/* Computing MIN */
+ i__2 = *m, i__3 = *n - *l;
+ i__1 = min(i__2,i__3);
+ cung2r_(m, m, &i__1, &u[u_offset], ldu, &tau[1], &work[1], info);
+ }
+
+ if (wantq) {
+
+/* Update Q( 1:N, 1:N-L ) = Q( 1:N, 1:N-L )*P1 */
+
+ i__1 = *n - *l;
+ clapmt_(&forwrd, n, &i__1, &q[q_offset], ldq, &iwork[1]);
+ }
+
+/* Clean up A: set the strictly lower triangular part of */
+/* A(1:K, 1:K) = 0, and A( K+1:M, 1:N-L ) = 0. */
+
+ i__1 = *k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+/* L90: */
+ }
+/* L100: */
+ }
+ if (*m > *k) {
+ i__1 = *m - *k;
+ i__2 = *n - *l;
+ claset_("Full", &i__1, &i__2, &c_b1, &c_b1, &a[*k + 1 + a_dim1], lda);
+ }
+
+ if (*n - *l > *k) {
+
+/* RQ factorization of ( T11 T12 ) = ( 0 T12 )*Z1 */
+
+ i__1 = *n - *l;
+ cgerq2_(k, &i__1, &a[a_offset], lda, &tau[1], &work[1], info);
+
+ if (wantq) {
+
+/* Update Q( 1:N,1:N-L ) = Q( 1:N,1:N-L )*Z1' */
+
+ i__1 = *n - *l;
+ cunmr2_("Right", "Conjugate transpose", n, &i__1, k, &a[a_offset],
+ lda, &tau[1], &q[q_offset], ldq, &work[1], info);
+ }
+
+/* Clean up A */
+
+ i__1 = *n - *l - *k;
+ claset_("Full", k, &i__1, &c_b1, &c_b1, &a[a_offset], lda);
+ i__1 = *n - *l;
+ for (j = *n - *l - *k + 1; j <= i__1; ++j) {
+ i__2 = *k;
+ for (i__ = j - *n + *l + *k + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+/* L110: */
+ }
+/* L120: */
+ }
+
+ }
+
+ if (*m > *k) {
+
+/* QR factorization of A( K+1:M,N-L+1:N ) */
+
+ i__1 = *m - *k;
+ cgeqr2_(&i__1, l, &a[*k + 1 + (*n - *l + 1) * a_dim1], lda, &tau[1], &
+ work[1], info);
+
+ if (wantu) {
+
+/* Update U(:,K+1:M) := U(:,K+1:M)*U1 */
+
+ i__1 = *m - *k;
+/* Computing MIN */
+ i__3 = *m - *k;
+ i__2 = min(i__3,*l);
+ cunm2r_("Right", "No transpose", m, &i__1, &i__2, &a[*k + 1 + (*n
+ - *l + 1) * a_dim1], lda, &tau[1], &u[(*k + 1) * u_dim1 +
+ 1], ldu, &work[1], info);
+ }
+
+/* Clean up */
+
+ i__1 = *n;
+ for (j = *n - *l + 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j - *n + *k + *l + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+/* L130: */
+ }
+/* L140: */
+ }
+
+ }
+
+ return 0;
+
+/* End of CGGSVP */
+
+} /* cggsvp_ */
diff --git a/contrib/libs/clapack/cgtcon.c b/contrib/libs/clapack/cgtcon.c
new file mode 100644
index 0000000000..462a36a57c
--- /dev/null
+++ b/contrib/libs/clapack/cgtcon.c
@@ -0,0 +1,207 @@
+/* cgtcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cgtcon_(char *norm, integer *n, complex *dl, complex *
+ d__, complex *du, complex *du2, integer *ipiv, real *anorm, real *
+ rcond, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__, kase, kase1;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *), xerbla_(char *, integer *);
+ real ainvnm;
+ logical onenrm;
+ extern /* Subroutine */ int cgttrs_(char *, integer *, integer *, complex
+ *, complex *, complex *, complex *, integer *, complex *, integer
+ *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGTCON estimates the reciprocal of the condition number of a complex */
+/* tridiagonal matrix A using the LU factorization as computed by */
+/* CGTTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies whether the 1-norm condition number or the */
+/* infinity-norm condition number is required: */
+/* = '1' or 'O': 1-norm; */
+/* = 'I': Infinity-norm. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* DL (input) COMPLEX array, dimension (N-1) */
+/* The (n-1) multipliers that define the matrix L from the */
+/* LU factorization of A as computed by CGTTRF. */
+
+/* D (input) COMPLEX array, dimension (N) */
+/* The n diagonal elements of the upper triangular matrix U from */
+/* the LU factorization of A. */
+
+/* DU (input) COMPLEX array, dimension (N-1) */
+/* The (n-1) elements of the first superdiagonal of U. */
+
+/* DU2 (input) COMPLEX array, dimension (N-2) */
+/* The (n-2) elements of the second superdiagonal of U. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= n, row i of the matrix was */
+/* interchanged with row IPIV(i). IPIV(i) will always be either */
+/* i or i+1; IPIV(i) = i indicates a row interchange was not */
+/* required. */
+
+/* ANORM (input) REAL */
+/* If NORM = '1' or 'O', the 1-norm of the original matrix A. */
+/* If NORM = 'I', the infinity-norm of the original matrix A. */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments. */
+
+ /* Parameter adjustments */
+ --work;
+ --ipiv;
+ --du2;
+ --du;
+ --d__;
+ --dl;
+
+ /* Function Body */
+ *info = 0;
+ onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O");
+ if (! onenrm && ! lsame_(norm, "I")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*anorm < 0.f) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGTCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.f;
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ } else if (*anorm == 0.f) {
+ return 0;
+ }
+
+/* Check that D(1:N) is non-zero. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ if (d__[i__2].r == 0.f && d__[i__2].i == 0.f) {
+ return 0;
+ }
+/* L10: */
+ }
+
+ ainvnm = 0.f;
+ if (onenrm) {
+ kase1 = 1;
+ } else {
+ kase1 = 2;
+ }
+ kase = 0;
+L20:
+ clacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (kase == kase1) {
+
+/* Multiply by inv(U)*inv(L). */
+
+ cgttrs_("No transpose", n, &c__1, &dl[1], &d__[1], &du[1], &du2[1]
+, &ipiv[1], &work[1], n, info);
+ } else {
+
+/* Multiply by inv(L')*inv(U'). */
+
+ cgttrs_("Conjugate transpose", n, &c__1, &dl[1], &d__[1], &du[1],
+ &du2[1], &ipiv[1], &work[1], n, info);
+ }
+ goto L20;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / ainvnm / *anorm;
+ }
+
+ return 0;
+
+/* End of CGTCON */
+
+} /* cgtcon_ */
diff --git a/contrib/libs/clapack/cgtrfs.c b/contrib/libs/clapack/cgtrfs.c
new file mode 100644
index 0000000000..85898224ae
--- /dev/null
+++ b/contrib/libs/clapack/cgtrfs.c
@@ -0,0 +1,553 @@
+/* cgtrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b18 = -1.f;
+static real c_b19 = 1.f;
+static complex c_b26 = {1.f,0.f};
+
+/* Subroutine */ int cgtrfs_(char *trans, integer *n, integer *nrhs, complex *
+ dl, complex *d__, complex *du, complex *dlf, complex *df, complex *
+ duf, complex *du2, integer *ipiv, complex *b, integer *ldb, complex *
+ x, integer *ldx, real *ferr, real *berr, complex *work, real *rwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6, i__7, i__8, i__9;
+ real r__1, r__2, r__3, r__4, r__5, r__6, r__7, r__8, r__9, r__10, r__11,
+ r__12, r__13, r__14;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer i__, j;
+ real s;
+ integer nz;
+ real eps;
+ integer kase;
+ real safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), caxpy_(integer *, complex *, complex *,
+ integer *, complex *, integer *);
+ integer count;
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *), clagtm_(char *, integer *, integer *,
+ real *, complex *, complex *, complex *, complex *, integer *,
+ real *, complex *, integer *);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+ char transn[1];
+ extern /* Subroutine */ int cgttrs_(char *, integer *, integer *, complex
+ *, complex *, complex *, complex *, integer *, complex *, integer
+ *, integer *);
+ char transt[1];
+ real lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGTRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is tridiagonal, and provides */
+/* error bounds and backward error estimates for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* DL (input) COMPLEX array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of A. */
+
+/* D (input) COMPLEX array, dimension (N) */
+/* The diagonal elements of A. */
+
+/* DU (input) COMPLEX array, dimension (N-1) */
+/* The (n-1) superdiagonal elements of A. */
+
+/* DLF (input) COMPLEX array, dimension (N-1) */
+/* The (n-1) multipliers that define the matrix L from the */
+/* LU factorization of A as computed by CGTTRF. */
+
+/* DF (input) COMPLEX array, dimension (N) */
+/* The n diagonal elements of the upper triangular matrix U from */
+/* the LU factorization of A. */
+
+/* DUF (input) COMPLEX array, dimension (N-1) */
+/* The (n-1) elements of the first superdiagonal of U. */
+
+/* DU2 (input) COMPLEX array, dimension (N-2) */
+/* The (n-2) elements of the second superdiagonal of U. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= n, row i of the matrix was */
+/* interchanged with row IPIV(i). IPIV(i) will always be either */
+/* i or i+1; IPIV(i) = i indicates a row interchange was not */
+/* required. */
+
+/* B (input) COMPLEX array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) COMPLEX array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by CGTTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --dl;
+ --d__;
+ --du;
+ --dlf;
+ --df;
+ --duf;
+ --du2;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "T") && ! lsame_(
+ trans, "C")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -13;
+ } else if (*ldx < max(1,*n)) {
+ *info = -15;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGTRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+ if (notran) {
+ *(unsigned char *)transn = 'N';
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transn = 'C';
+ *(unsigned char *)transt = 'N';
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = 4;
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.f;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - op(A) * X, */
+/* where op(A) = A, A**T, or A**H, depending on TRANS. */
+
+ ccopy_(n, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+ clagtm_(trans, n, &c__1, &c_b18, &dl[1], &d__[1], &du[1], &x[j *
+ x_dim1 + 1], ldx, &c_b19, &work[1], n);
+
+/* Compute abs(op(A))*abs(x) + abs(b) for use in the backward */
+/* error bound. */
+
+ if (notran) {
+ if (*n == 1) {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * x_dim1 + 1;
+ rwork[1] = (r__1 = b[i__2].r, dabs(r__1)) + (r__2 = r_imag(&b[
+ j * b_dim1 + 1]), dabs(r__2)) + ((r__3 = d__[1].r,
+ dabs(r__3)) + (r__4 = r_imag(&d__[1]), dabs(r__4))) *
+ ((r__5 = x[i__3].r, dabs(r__5)) + (r__6 = r_imag(&x[j
+ * x_dim1 + 1]), dabs(r__6)));
+ } else {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * x_dim1 + 1;
+ i__4 = j * x_dim1 + 2;
+ rwork[1] = (r__1 = b[i__2].r, dabs(r__1)) + (r__2 = r_imag(&b[
+ j * b_dim1 + 1]), dabs(r__2)) + ((r__3 = d__[1].r,
+ dabs(r__3)) + (r__4 = r_imag(&d__[1]), dabs(r__4))) *
+ ((r__5 = x[i__3].r, dabs(r__5)) + (r__6 = r_imag(&x[j
+ * x_dim1 + 1]), dabs(r__6))) + ((r__7 = du[1].r, dabs(
+ r__7)) + (r__8 = r_imag(&du[1]), dabs(r__8))) * ((
+ r__9 = x[i__4].r, dabs(r__9)) + (r__10 = r_imag(&x[j *
+ x_dim1 + 2]), dabs(r__10)));
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ - 1;
+ i__5 = i__ - 1 + j * x_dim1;
+ i__6 = i__;
+ i__7 = i__ + j * x_dim1;
+ i__8 = i__;
+ i__9 = i__ + 1 + j * x_dim1;
+ rwork[i__] = (r__1 = b[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&b[i__ + j * b_dim1]), dabs(r__2)) + ((
+ r__3 = dl[i__4].r, dabs(r__3)) + (r__4 = r_imag(&
+ dl[i__ - 1]), dabs(r__4))) * ((r__5 = x[i__5].r,
+ dabs(r__5)) + (r__6 = r_imag(&x[i__ - 1 + j *
+ x_dim1]), dabs(r__6))) + ((r__7 = d__[i__6].r,
+ dabs(r__7)) + (r__8 = r_imag(&d__[i__]), dabs(
+ r__8))) * ((r__9 = x[i__7].r, dabs(r__9)) + (
+ r__10 = r_imag(&x[i__ + j * x_dim1]), dabs(r__10))
+ ) + ((r__11 = du[i__8].r, dabs(r__11)) + (r__12 =
+ r_imag(&du[i__]), dabs(r__12))) * ((r__13 = x[
+ i__9].r, dabs(r__13)) + (r__14 = r_imag(&x[i__ +
+ 1 + j * x_dim1]), dabs(r__14)));
+/* L30: */
+ }
+ i__2 = *n + j * b_dim1;
+ i__3 = *n - 1;
+ i__4 = *n - 1 + j * x_dim1;
+ i__5 = *n;
+ i__6 = *n + j * x_dim1;
+ rwork[*n] = (r__1 = b[i__2].r, dabs(r__1)) + (r__2 = r_imag(&
+ b[*n + j * b_dim1]), dabs(r__2)) + ((r__3 = dl[i__3]
+ .r, dabs(r__3)) + (r__4 = r_imag(&dl[*n - 1]), dabs(
+ r__4))) * ((r__5 = x[i__4].r, dabs(r__5)) + (r__6 =
+ r_imag(&x[*n - 1 + j * x_dim1]), dabs(r__6))) + ((
+ r__7 = d__[i__5].r, dabs(r__7)) + (r__8 = r_imag(&d__[
+ *n]), dabs(r__8))) * ((r__9 = x[i__6].r, dabs(r__9))
+ + (r__10 = r_imag(&x[*n + j * x_dim1]), dabs(r__10)));
+ }
+ } else {
+ if (*n == 1) {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * x_dim1 + 1;
+ rwork[1] = (r__1 = b[i__2].r, dabs(r__1)) + (r__2 = r_imag(&b[
+ j * b_dim1 + 1]), dabs(r__2)) + ((r__3 = d__[1].r,
+ dabs(r__3)) + (r__4 = r_imag(&d__[1]), dabs(r__4))) *
+ ((r__5 = x[i__3].r, dabs(r__5)) + (r__6 = r_imag(&x[j
+ * x_dim1 + 1]), dabs(r__6)));
+ } else {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * x_dim1 + 1;
+ i__4 = j * x_dim1 + 2;
+ rwork[1] = (r__1 = b[i__2].r, dabs(r__1)) + (r__2 = r_imag(&b[
+ j * b_dim1 + 1]), dabs(r__2)) + ((r__3 = d__[1].r,
+ dabs(r__3)) + (r__4 = r_imag(&d__[1]), dabs(r__4))) *
+ ((r__5 = x[i__3].r, dabs(r__5)) + (r__6 = r_imag(&x[j
+ * x_dim1 + 1]), dabs(r__6))) + ((r__7 = dl[1].r, dabs(
+ r__7)) + (r__8 = r_imag(&dl[1]), dabs(r__8))) * ((
+ r__9 = x[i__4].r, dabs(r__9)) + (r__10 = r_imag(&x[j *
+ x_dim1 + 2]), dabs(r__10)));
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ - 1;
+ i__5 = i__ - 1 + j * x_dim1;
+ i__6 = i__;
+ i__7 = i__ + j * x_dim1;
+ i__8 = i__;
+ i__9 = i__ + 1 + j * x_dim1;
+ rwork[i__] = (r__1 = b[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&b[i__ + j * b_dim1]), dabs(r__2)) + ((
+ r__3 = du[i__4].r, dabs(r__3)) + (r__4 = r_imag(&
+ du[i__ - 1]), dabs(r__4))) * ((r__5 = x[i__5].r,
+ dabs(r__5)) + (r__6 = r_imag(&x[i__ - 1 + j *
+ x_dim1]), dabs(r__6))) + ((r__7 = d__[i__6].r,
+ dabs(r__7)) + (r__8 = r_imag(&d__[i__]), dabs(
+ r__8))) * ((r__9 = x[i__7].r, dabs(r__9)) + (
+ r__10 = r_imag(&x[i__ + j * x_dim1]), dabs(r__10))
+ ) + ((r__11 = dl[i__8].r, dabs(r__11)) + (r__12 =
+ r_imag(&dl[i__]), dabs(r__12))) * ((r__13 = x[
+ i__9].r, dabs(r__13)) + (r__14 = r_imag(&x[i__ +
+ 1 + j * x_dim1]), dabs(r__14)));
+/* L40: */
+ }
+ i__2 = *n + j * b_dim1;
+ i__3 = *n - 1;
+ i__4 = *n - 1 + j * x_dim1;
+ i__5 = *n;
+ i__6 = *n + j * x_dim1;
+ rwork[*n] = (r__1 = b[i__2].r, dabs(r__1)) + (r__2 = r_imag(&
+ b[*n + j * b_dim1]), dabs(r__2)) + ((r__3 = du[i__3]
+ .r, dabs(r__3)) + (r__4 = r_imag(&du[*n - 1]), dabs(
+ r__4))) * ((r__5 = x[i__4].r, dabs(r__5)) + (r__6 =
+ r_imag(&x[*n - 1 + j * x_dim1]), dabs(r__6))) + ((
+ r__7 = d__[i__5].r, dabs(r__7)) + (r__8 = r_imag(&d__[
+ *n]), dabs(r__8))) * ((r__9 = x[i__6].r, dabs(r__9))
+ + (r__10 = r_imag(&x[*n + j * x_dim1]), dabs(r__10)));
+ }
+ }
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__3 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2))) / rwork[i__];
+ s = dmax(r__3,r__4);
+ } else {
+/* Computing MAX */
+ i__3 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = dmax(r__3,r__4);
+ }
+/* L50: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2.f <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ cgttrs_(trans, n, &c__1, &dlf[1], &df[1], &duf[1], &du2[1], &ipiv[
+ 1], &work[1], n, info);
+ caxpy_(n, &c_b26, &work[1], &c__1, &x[j * x_dim1 + 1], &c__1);
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(op(A)))* */
+/* ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(op(A)) is the inverse of op(A) */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(op(A))*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use CLACN2 to estimate the infinity-norm of the matrix */
+/* inv(op(A)) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__3 = i__;
+ rwork[i__] = (r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__];
+ } else {
+ i__3 = i__;
+ rwork[i__] = (r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__] + safe1;
+ }
+/* L60: */
+ }
+
+ kase = 0;
+L70:
+ clacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(op(A)**H). */
+
+ cgttrs_(transt, n, &c__1, &dlf[1], &df[1], &duf[1], &du2[1], &
+ ipiv[1], &work[1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ q__1.r = rwork[i__4] * work[i__5].r, q__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+/* L80: */
+ }
+ } else {
+
+/* Multiply by inv(op(A))*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ q__1.r = rwork[i__4] * work[i__5].r, q__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+/* L90: */
+ }
+ cgttrs_(transn, n, &c__1, &dlf[1], &df[1], &duf[1], &du2[1], &
+ ipiv[1], &work[1], n, info);
+ }
+ goto L70;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * x_dim1;
+ r__3 = lstres, r__4 = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[i__ + j * x_dim1]), dabs(r__2));
+ lstres = dmax(r__3,r__4);
+/* L100: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L110: */
+ }
+
+ return 0;
+
+/* End of CGTRFS */
+
+} /* cgtrfs_ */
diff --git a/contrib/libs/clapack/cgtsv.c b/contrib/libs/clapack/cgtsv.c
new file mode 100644
index 0000000000..57a1e32d87
--- /dev/null
+++ b/contrib/libs/clapack/cgtsv.c
@@ -0,0 +1,287 @@
+/* cgtsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cgtsv_(integer *n, integer *nrhs, complex *dl, complex *
+ d__, complex *du, complex *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7;
+ real r__1, r__2, r__3, r__4;
+ complex q__1, q__2, q__3, q__4, q__5;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+ void c_div(complex *, complex *, complex *);
+
+ /* Local variables */
+ integer j, k;
+ complex temp, mult;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGTSV solves the equation */
+
+/* A*X = B, */
+
+/* where A is an N-by-N tridiagonal matrix, by Gaussian elimination with */
+/* partial pivoting. */
+
+/* Note that the equation A'*X = B may be solved by interchanging the */
+/* order of the arguments DU and DL. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* DL (input/output) COMPLEX array, dimension (N-1) */
+/* On entry, DL must contain the (n-1) subdiagonal elements of */
+/* A. */
+/* On exit, DL is overwritten by the (n-2) elements of the */
+/* second superdiagonal of the upper triangular matrix U from */
+/* the LU factorization of A, in DL(1), ..., DL(n-2). */
+
+/* D (input/output) COMPLEX array, dimension (N) */
+/* On entry, D must contain the diagonal elements of A. */
+/* On exit, D is overwritten by the n diagonal elements of U. */
+
+/* DU (input/output) COMPLEX array, dimension (N-1) */
+/* On entry, DU must contain the (n-1) superdiagonal elements */
+/* of A. */
+/* On exit, DU is overwritten by the (n-1) elements of the first */
+/* superdiagonal of U. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, U(i,i) is exactly zero, and the solution */
+/* has not been computed. The factorization has not been */
+/* completed unless i = N. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --dl;
+ --d__;
+ --du;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*nrhs < 0) {
+ *info = -2;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGTSV ", &i__1);
+ return 0;
+ }
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ i__1 = *n - 1;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k;
+ if (dl[i__2].r == 0.f && dl[i__2].i == 0.f) {
+
+/* Subdiagonal is zero, no elimination is required. */
+
+ i__2 = k;
+ if (d__[i__2].r == 0.f && d__[i__2].i == 0.f) {
+
+/* Diagonal is zero: set INFO = K and return; a unique */
+/* solution can not be found. */
+
+ *info = k;
+ return 0;
+ }
+ } else /* if(complicated condition) */ {
+ i__2 = k;
+ i__3 = k;
+ if ((r__1 = d__[i__2].r, dabs(r__1)) + (r__2 = r_imag(&d__[k]),
+ dabs(r__2)) >= (r__3 = dl[i__3].r, dabs(r__3)) + (r__4 =
+ r_imag(&dl[k]), dabs(r__4))) {
+
+/* No row interchange required */
+
+ c_div(&q__1, &dl[k], &d__[k]);
+ mult.r = q__1.r, mult.i = q__1.i;
+ i__2 = k + 1;
+ i__3 = k + 1;
+ i__4 = k;
+ q__2.r = mult.r * du[i__4].r - mult.i * du[i__4].i, q__2.i =
+ mult.r * du[i__4].i + mult.i * du[i__4].r;
+ q__1.r = d__[i__3].r - q__2.r, q__1.i = d__[i__3].i - q__2.i;
+ d__[i__2].r = q__1.r, d__[i__2].i = q__1.i;
+ i__2 = *nrhs;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = k + 1 + j * b_dim1;
+ i__4 = k + 1 + j * b_dim1;
+ i__5 = k + j * b_dim1;
+ q__2.r = mult.r * b[i__5].r - mult.i * b[i__5].i, q__2.i =
+ mult.r * b[i__5].i + mult.i * b[i__5].r;
+ q__1.r = b[i__4].r - q__2.r, q__1.i = b[i__4].i - q__2.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L10: */
+ }
+ if (k < *n - 1) {
+ i__2 = k;
+ dl[i__2].r = 0.f, dl[i__2].i = 0.f;
+ }
+ } else {
+
+/* Interchange rows K and K+1 */
+
+ c_div(&q__1, &d__[k], &dl[k]);
+ mult.r = q__1.r, mult.i = q__1.i;
+ i__2 = k;
+ i__3 = k;
+ d__[i__2].r = dl[i__3].r, d__[i__2].i = dl[i__3].i;
+ i__2 = k + 1;
+ temp.r = d__[i__2].r, temp.i = d__[i__2].i;
+ i__2 = k + 1;
+ i__3 = k;
+ q__2.r = mult.r * temp.r - mult.i * temp.i, q__2.i = mult.r *
+ temp.i + mult.i * temp.r;
+ q__1.r = du[i__3].r - q__2.r, q__1.i = du[i__3].i - q__2.i;
+ d__[i__2].r = q__1.r, d__[i__2].i = q__1.i;
+ if (k < *n - 1) {
+ i__2 = k;
+ i__3 = k + 1;
+ dl[i__2].r = du[i__3].r, dl[i__2].i = du[i__3].i;
+ i__2 = k + 1;
+ q__2.r = -mult.r, q__2.i = -mult.i;
+ i__3 = k;
+ q__1.r = q__2.r * dl[i__3].r - q__2.i * dl[i__3].i,
+ q__1.i = q__2.r * dl[i__3].i + q__2.i * dl[i__3]
+ .r;
+ du[i__2].r = q__1.r, du[i__2].i = q__1.i;
+ }
+ i__2 = k;
+ du[i__2].r = temp.r, du[i__2].i = temp.i;
+ i__2 = *nrhs;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = k + j * b_dim1;
+ temp.r = b[i__3].r, temp.i = b[i__3].i;
+ i__3 = k + j * b_dim1;
+ i__4 = k + 1 + j * b_dim1;
+ b[i__3].r = b[i__4].r, b[i__3].i = b[i__4].i;
+ i__3 = k + 1 + j * b_dim1;
+ i__4 = k + 1 + j * b_dim1;
+ q__2.r = mult.r * b[i__4].r - mult.i * b[i__4].i, q__2.i =
+ mult.r * b[i__4].i + mult.i * b[i__4].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i - q__2.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L20: */
+ }
+ }
+ }
+/* L30: */
+ }
+ i__1 = *n;
+ if (d__[i__1].r == 0.f && d__[i__1].i == 0.f) {
+ *info = *n;
+ return 0;
+ }
+
+/* Back solve with the matrix U from the factorization. */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n + j * b_dim1;
+ c_div(&q__1, &b[*n + j * b_dim1], &d__[*n]);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ if (*n > 1) {
+ i__2 = *n - 1 + j * b_dim1;
+ i__3 = *n - 1 + j * b_dim1;
+ i__4 = *n - 1;
+ i__5 = *n + j * b_dim1;
+ q__3.r = du[i__4].r * b[i__5].r - du[i__4].i * b[i__5].i, q__3.i =
+ du[i__4].r * b[i__5].i + du[i__4].i * b[i__5].r;
+ q__2.r = b[i__3].r - q__3.r, q__2.i = b[i__3].i - q__3.i;
+ c_div(&q__1, &q__2, &d__[*n - 1]);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ }
+ for (k = *n - 2; k >= 1; --k) {
+ i__2 = k + j * b_dim1;
+ i__3 = k + j * b_dim1;
+ i__4 = k;
+ i__5 = k + 1 + j * b_dim1;
+ q__4.r = du[i__4].r * b[i__5].r - du[i__4].i * b[i__5].i, q__4.i =
+ du[i__4].r * b[i__5].i + du[i__4].i * b[i__5].r;
+ q__3.r = b[i__3].r - q__4.r, q__3.i = b[i__3].i - q__4.i;
+ i__6 = k;
+ i__7 = k + 2 + j * b_dim1;
+ q__5.r = dl[i__6].r * b[i__7].r - dl[i__6].i * b[i__7].i, q__5.i =
+ dl[i__6].r * b[i__7].i + dl[i__6].i * b[i__7].r;
+ q__2.r = q__3.r - q__5.r, q__2.i = q__3.i - q__5.i;
+ c_div(&q__1, &q__2, &d__[k]);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L40: */
+ }
+/* L50: */
+ }
+
+ return 0;
+
+/* End of CGTSV */
+
+} /* cgtsv_ */
diff --git a/contrib/libs/clapack/cgtsvx.c b/contrib/libs/clapack/cgtsvx.c
new file mode 100644
index 0000000000..9e99fb002e
--- /dev/null
+++ b/contrib/libs/clapack/cgtsvx.c
@@ -0,0 +1,347 @@
+/* cgtsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cgtsvx_(char *fact, char *trans, integer *n, integer *
+ nrhs, complex *dl, complex *d__, complex *du, complex *dlf, complex *
+ df, complex *duf, complex *du2, integer *ipiv, complex *b, integer *
+ ldb, complex *x, integer *ldx, real *rcond, real *ferr, real *berr,
+ complex *work, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1;
+
+ /* Local variables */
+ char norm[1];
+ extern logical lsame_(char *, char *);
+ real anorm;
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *);
+ extern doublereal slamch_(char *), clangt_(char *, integer *,
+ complex *, complex *, complex *);
+ logical nofact;
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), cgtcon_(char *,
+ integer *, complex *, complex *, complex *, complex *, integer *,
+ real *, real *, complex *, integer *), xerbla_(char *,
+ integer *), cgtrfs_(char *, integer *, integer *, complex
+ *, complex *, complex *, complex *, complex *, complex *, complex
+ *, integer *, complex *, integer *, complex *, integer *, real *,
+ real *, complex *, real *, integer *), cgttrf_(integer *,
+ complex *, complex *, complex *, complex *, integer *, integer *);
+ logical notran;
+ extern /* Subroutine */ int cgttrs_(char *, integer *, integer *, complex
+ *, complex *, complex *, complex *, integer *, complex *, integer
+ *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGTSVX uses the LU factorization to compute the solution to a complex */
+/* system of linear equations A * X = B, A**T * X = B, or A**H * X = B, */
+/* where A is a tridiagonal matrix of order N and X and B are N-by-NRHS */
+/* matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'N', the LU decomposition is used to factor the matrix A */
+/* as A = L * U, where L is a product of permutation and unit lower */
+/* bidiagonal matrices and U is upper triangular with nonzeros in */
+/* only the main diagonal and first two superdiagonals. */
+
+/* 2. If some U(i,i)=0, so that U is exactly singular, then the routine */
+/* returns with INFO = i. Otherwise, the factored form of A is used */
+/* to estimate the condition number of the matrix A. If the */
+/* reciprocal of the condition number is less than machine precision, */
+/* INFO = N+1 is returned as a warning, but the routine still goes on */
+/* to solve for X and compute error bounds as described below. */
+
+/* 3. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 4. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of A has been */
+/* supplied on entry. */
+/* = 'F': DLF, DF, DUF, DU2, and IPIV contain the factored form */
+/* of A; DL, D, DU, DLF, DF, DUF, DU2 and IPIV will not */
+/* be modified. */
+/* = 'N': The matrix will be copied to DLF, DF, and DUF */
+/* and factored. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* DL (input) COMPLEX array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of A. */
+
+/* D (input) COMPLEX array, dimension (N) */
+/* The n diagonal elements of A. */
+
+/* DU (input) COMPLEX array, dimension (N-1) */
+/* The (n-1) superdiagonal elements of A. */
+
+/* DLF (input or output) COMPLEX array, dimension (N-1) */
+/* If FACT = 'F', then DLF is an input argument and on entry */
+/* contains the (n-1) multipliers that define the matrix L from */
+/* the LU factorization of A as computed by CGTTRF. */
+
+/* If FACT = 'N', then DLF is an output argument and on exit */
+/* contains the (n-1) multipliers that define the matrix L from */
+/* the LU factorization of A. */
+
+/* DF (input or output) COMPLEX array, dimension (N) */
+/* If FACT = 'F', then DF is an input argument and on entry */
+/* contains the n diagonal elements of the upper triangular */
+/* matrix U from the LU factorization of A. */
+
+/* If FACT = 'N', then DF is an output argument and on exit */
+/* contains the n diagonal elements of the upper triangular */
+/* matrix U from the LU factorization of A. */
+
+/* DUF (input or output) COMPLEX array, dimension (N-1) */
+/* If FACT = 'F', then DUF is an input argument and on entry */
+/* contains the (n-1) elements of the first superdiagonal of U. */
+
+/* If FACT = 'N', then DUF is an output argument and on exit */
+/* contains the (n-1) elements of the first superdiagonal of U. */
+
+/* DU2 (input or output) COMPLEX array, dimension (N-2) */
+/* If FACT = 'F', then DU2 is an input argument and on entry */
+/* contains the (n-2) elements of the second superdiagonal of */
+/* U. */
+
+/* If FACT = 'N', then DU2 is an output argument and on exit */
+/* contains the (n-2) elements of the second superdiagonal of */
+/* U. */
+
+/* IPIV (input or output) INTEGER array, dimension (N) */
+/* If FACT = 'F', then IPIV is an input argument and on entry */
+/* contains the pivot indices from the LU factorization of A as */
+/* computed by CGTTRF. */
+
+/* If FACT = 'N', then IPIV is an output argument and on exit */
+/* contains the pivot indices from the LU factorization of A; */
+/* row i of the matrix was interchanged with row IPIV(i). */
+/* IPIV(i) will always be either i or i+1; IPIV(i) = i indicates */
+/* a row interchange was not required. */
+
+/* B (input) COMPLEX array, dimension (LDB,NRHS) */
+/* The N-by-NRHS right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) COMPLEX array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) REAL */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A. If RCOND is less than the machine precision (in */
+/* particular, if RCOND = 0), the matrix is singular to working */
+/* precision. This condition is indicated by a return code of */
+/* INFO > 0. */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: U(i,i) is exactly zero. The factorization */
+/* has not been completed unless i = N, but the */
+/* factor U is exactly singular, so the solution */
+/* and error bounds could not be computed. */
+/* RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --dl;
+ --d__;
+ --du;
+ --dlf;
+ --df;
+ --duf;
+ --du2;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ notran = lsame_(trans, "N");
+ if (! nofact && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -14;
+ } else if (*ldx < max(1,*n)) {
+ *info = -16;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGTSVX", &i__1);
+ return 0;
+ }
+
+ if (nofact) {
+
+/* Compute the LU factorization of A. */
+
+ ccopy_(n, &d__[1], &c__1, &df[1], &c__1);
+ if (*n > 1) {
+ i__1 = *n - 1;
+ ccopy_(&i__1, &dl[1], &c__1, &dlf[1], &c__1);
+ i__1 = *n - 1;
+ ccopy_(&i__1, &du[1], &c__1, &duf[1], &c__1);
+ }
+ cgttrf_(n, &dlf[1], &df[1], &duf[1], &du2[1], &ipiv[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.f;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ if (notran) {
+ *(unsigned char *)norm = '1';
+ } else {
+ *(unsigned char *)norm = 'I';
+ }
+ anorm = clangt_(norm, n, &dl[1], &d__[1], &du[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ cgtcon_(norm, n, &dlf[1], &df[1], &duf[1], &du2[1], &ipiv[1], &anorm,
+ rcond, &work[1], info);
+
+/* Compute the solution vectors X. */
+
+ clacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ cgttrs_(trans, n, nrhs, &dlf[1], &df[1], &duf[1], &du2[1], &ipiv[1], &x[
+ x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solutions and */
+/* compute error bounds and backward error estimates for them. */
+
+ cgtrfs_(trans, n, nrhs, &dl[1], &d__[1], &du[1], &dlf[1], &df[1], &duf[1],
+ &du2[1], &ipiv[1], &b[b_offset], ldb, &x[x_offset], ldx, &ferr[1]
+, &berr[1], &work[1], &rwork[1], info);
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < slamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of CGTSVX */
+
+} /* cgtsvx_ */
diff --git a/contrib/libs/clapack/cgttrf.c b/contrib/libs/clapack/cgttrf.c
new file mode 100644
index 0000000000..b6a012b200
--- /dev/null
+++ b/contrib/libs/clapack/cgttrf.c
@@ -0,0 +1,274 @@
+/* cgttrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cgttrf_(integer *n, complex *dl, complex *d__, complex *
+ du, complex *du2, integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ real r__1, r__2, r__3, r__4;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+ void c_div(complex *, complex *, complex *);
+
+ /* Local variables */
+ integer i__;
+ complex fact, temp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGTTRF computes an LU factorization of a complex tridiagonal matrix A */
+/* using elimination with partial pivoting and row interchanges. */
+
+/* The factorization has the form */
+/* A = L * U */
+/* where L is a product of permutation and unit lower bidiagonal */
+/* matrices and U is upper triangular with nonzeros in only the main */
+/* diagonal and first two superdiagonals. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* DL (input/output) COMPLEX array, dimension (N-1) */
+/* On entry, DL must contain the (n-1) sub-diagonal elements of */
+/* A. */
+
+/* On exit, DL is overwritten by the (n-1) multipliers that */
+/* define the matrix L from the LU factorization of A. */
+
+/* D (input/output) COMPLEX array, dimension (N) */
+/* On entry, D must contain the diagonal elements of A. */
+
+/* On exit, D is overwritten by the n diagonal elements of the */
+/* upper triangular matrix U from the LU factorization of A. */
+
+/* DU (input/output) COMPLEX array, dimension (N-1) */
+/* On entry, DU must contain the (n-1) super-diagonal elements */
+/* of A. */
+
+/* On exit, DU is overwritten by the (n-1) elements of the first */
+/* super-diagonal of U. */
+
+/* DU2 (output) COMPLEX array, dimension (N-2) */
+/* On exit, DU2 is overwritten by the (n-2) elements of the */
+/* second super-diagonal of U. */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= n, row i of the matrix was */
+/* interchanged with row IPIV(i). IPIV(i) will always be either */
+/* i or i+1; IPIV(i) = i indicates a row interchange was not */
+/* required. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, U(k,k) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, and division by zero will occur if it is used */
+/* to solve a system of equations. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --ipiv;
+ --du2;
+ --du;
+ --d__;
+ --dl;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ i__1 = -(*info);
+ xerbla_("CGTTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Initialize IPIV(i) = i and DU2(i) = 0 */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ipiv[i__] = i__;
+/* L10: */
+ }
+ i__1 = *n - 2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ du2[i__2].r = 0.f, du2[i__2].i = 0.f;
+/* L20: */
+ }
+
+ i__1 = *n - 2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ if ((r__1 = d__[i__2].r, dabs(r__1)) + (r__2 = r_imag(&d__[i__]),
+ dabs(r__2)) >= (r__3 = dl[i__3].r, dabs(r__3)) + (r__4 =
+ r_imag(&dl[i__]), dabs(r__4))) {
+
+/* No row interchange required, eliminate DL(I) */
+
+ i__2 = i__;
+ if ((r__1 = d__[i__2].r, dabs(r__1)) + (r__2 = r_imag(&d__[i__]),
+ dabs(r__2)) != 0.f) {
+ c_div(&q__1, &dl[i__], &d__[i__]);
+ fact.r = q__1.r, fact.i = q__1.i;
+ i__2 = i__;
+ dl[i__2].r = fact.r, dl[i__2].i = fact.i;
+ i__2 = i__ + 1;
+ i__3 = i__ + 1;
+ i__4 = i__;
+ q__2.r = fact.r * du[i__4].r - fact.i * du[i__4].i, q__2.i =
+ fact.r * du[i__4].i + fact.i * du[i__4].r;
+ q__1.r = d__[i__3].r - q__2.r, q__1.i = d__[i__3].i - q__2.i;
+ d__[i__2].r = q__1.r, d__[i__2].i = q__1.i;
+ }
+ } else {
+
+/* Interchange rows I and I+1, eliminate DL(I) */
+
+ c_div(&q__1, &d__[i__], &dl[i__]);
+ fact.r = q__1.r, fact.i = q__1.i;
+ i__2 = i__;
+ i__3 = i__;
+ d__[i__2].r = dl[i__3].r, d__[i__2].i = dl[i__3].i;
+ i__2 = i__;
+ dl[i__2].r = fact.r, dl[i__2].i = fact.i;
+ i__2 = i__;
+ temp.r = du[i__2].r, temp.i = du[i__2].i;
+ i__2 = i__;
+ i__3 = i__ + 1;
+ du[i__2].r = d__[i__3].r, du[i__2].i = d__[i__3].i;
+ i__2 = i__ + 1;
+ i__3 = i__ + 1;
+ q__2.r = fact.r * d__[i__3].r - fact.i * d__[i__3].i, q__2.i =
+ fact.r * d__[i__3].i + fact.i * d__[i__3].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i - q__2.i;
+ d__[i__2].r = q__1.r, d__[i__2].i = q__1.i;
+ i__2 = i__;
+ i__3 = i__ + 1;
+ du2[i__2].r = du[i__3].r, du2[i__2].i = du[i__3].i;
+ i__2 = i__ + 1;
+ q__2.r = -fact.r, q__2.i = -fact.i;
+ i__3 = i__ + 1;
+ q__1.r = q__2.r * du[i__3].r - q__2.i * du[i__3].i, q__1.i =
+ q__2.r * du[i__3].i + q__2.i * du[i__3].r;
+ du[i__2].r = q__1.r, du[i__2].i = q__1.i;
+ ipiv[i__] = i__ + 1;
+ }
+/* L30: */
+ }
+ if (*n > 1) {
+ i__ = *n - 1;
+ i__1 = i__;
+ i__2 = i__;
+ if ((r__1 = d__[i__1].r, dabs(r__1)) + (r__2 = r_imag(&d__[i__]),
+ dabs(r__2)) >= (r__3 = dl[i__2].r, dabs(r__3)) + (r__4 =
+ r_imag(&dl[i__]), dabs(r__4))) {
+ i__1 = i__;
+ if ((r__1 = d__[i__1].r, dabs(r__1)) + (r__2 = r_imag(&d__[i__]),
+ dabs(r__2)) != 0.f) {
+ c_div(&q__1, &dl[i__], &d__[i__]);
+ fact.r = q__1.r, fact.i = q__1.i;
+ i__1 = i__;
+ dl[i__1].r = fact.r, dl[i__1].i = fact.i;
+ i__1 = i__ + 1;
+ i__2 = i__ + 1;
+ i__3 = i__;
+ q__2.r = fact.r * du[i__3].r - fact.i * du[i__3].i, q__2.i =
+ fact.r * du[i__3].i + fact.i * du[i__3].r;
+ q__1.r = d__[i__2].r - q__2.r, q__1.i = d__[i__2].i - q__2.i;
+ d__[i__1].r = q__1.r, d__[i__1].i = q__1.i;
+ }
+ } else {
+ c_div(&q__1, &d__[i__], &dl[i__]);
+ fact.r = q__1.r, fact.i = q__1.i;
+ i__1 = i__;
+ i__2 = i__;
+ d__[i__1].r = dl[i__2].r, d__[i__1].i = dl[i__2].i;
+ i__1 = i__;
+ dl[i__1].r = fact.r, dl[i__1].i = fact.i;
+ i__1 = i__;
+ temp.r = du[i__1].r, temp.i = du[i__1].i;
+ i__1 = i__;
+ i__2 = i__ + 1;
+ du[i__1].r = d__[i__2].r, du[i__1].i = d__[i__2].i;
+ i__1 = i__ + 1;
+ i__2 = i__ + 1;
+ q__2.r = fact.r * d__[i__2].r - fact.i * d__[i__2].i, q__2.i =
+ fact.r * d__[i__2].i + fact.i * d__[i__2].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i - q__2.i;
+ d__[i__1].r = q__1.r, d__[i__1].i = q__1.i;
+ ipiv[i__] = i__ + 1;
+ }
+ }
+
+/* Check for a zero on the diagonal of U. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ if ((r__1 = d__[i__2].r, dabs(r__1)) + (r__2 = r_imag(&d__[i__]),
+ dabs(r__2)) == 0.f) {
+ *info = i__;
+ goto L50;
+ }
+/* L40: */
+ }
+L50:
+
+ return 0;
+
+/* End of CGTTRF */
+
+} /* cgttrf_ */
diff --git a/contrib/libs/clapack/cgttrs.c b/contrib/libs/clapack/cgttrs.c
new file mode 100644
index 0000000000..8bd8c7aeb1
--- /dev/null
+++ b/contrib/libs/clapack/cgttrs.c
@@ -0,0 +1,192 @@
+/* cgttrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int cgttrs_(char *trans, integer *n, integer *nrhs, complex *
+ dl, complex *d__, complex *du, complex *du2, integer *ipiv, complex *
+ b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer j, jb, nb;
+ extern /* Subroutine */ int cgtts2_(integer *, integer *, integer *,
+ complex *, complex *, complex *, complex *, integer *, complex *,
+ integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer itrans;
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGTTRS solves one of the systems of equations */
+/* A * X = B, A**T * X = B, or A**H * X = B, */
+/* with a tridiagonal matrix A using the LU factorization computed */
+/* by CGTTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations. */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* DL (input) COMPLEX array, dimension (N-1) */
+/* The (n-1) multipliers that define the matrix L from the */
+/* LU factorization of A. */
+
+/* D (input) COMPLEX array, dimension (N) */
+/* The n diagonal elements of the upper triangular matrix U from */
+/* the LU factorization of A. */
+
+/* DU (input) COMPLEX array, dimension (N-1) */
+/* The (n-1) elements of the first super-diagonal of U. */
+
+/* DU2 (input) COMPLEX array, dimension (N-2) */
+/* The (n-2) elements of the second super-diagonal of U. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= n, row i of the matrix was */
+/* interchanged with row IPIV(i). IPIV(i) will always be either */
+/* i or i+1; IPIV(i) = i indicates a row interchange was not */
+/* required. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the matrix of right hand side vectors B. */
+/* On exit, B is overwritten by the solution vectors X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --dl;
+ --d__;
+ --du;
+ --du2;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ notran = *(unsigned char *)trans == 'N' || *(unsigned char *)trans == 'n';
+ if (! notran && ! (*(unsigned char *)trans == 'T' || *(unsigned char *)
+ trans == 't') && ! (*(unsigned char *)trans == 'C' || *(unsigned
+ char *)trans == 'c')) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(*n,1)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGTTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+/* Decode TRANS */
+
+ if (notran) {
+ itrans = 0;
+ } else if (*(unsigned char *)trans == 'T' || *(unsigned char *)trans ==
+ 't') {
+ itrans = 1;
+ } else {
+ itrans = 2;
+ }
+
+/* Determine the number of right-hand sides to solve at a time. */
+
+ if (*nrhs == 1) {
+ nb = 1;
+ } else {
+/* Computing MAX */
+ i__1 = 1, i__2 = ilaenv_(&c__1, "CGTTRS", trans, n, nrhs, &c_n1, &
+ c_n1);
+ nb = max(i__1,i__2);
+ }
+
+ if (nb >= *nrhs) {
+ cgtts2_(&itrans, n, nrhs, &dl[1], &d__[1], &du[1], &du2[1], &ipiv[1],
+ &b[b_offset], ldb);
+ } else {
+ i__1 = *nrhs;
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = *nrhs - j + 1;
+ jb = min(i__3,nb);
+ cgtts2_(&itrans, n, &jb, &dl[1], &d__[1], &du[1], &du2[1], &ipiv[
+ 1], &b[j * b_dim1 + 1], ldb);
+/* L10: */
+ }
+ }
+
+/* End of CGTTRS */
+
+ return 0;
+} /* cgttrs_ */
diff --git a/contrib/libs/clapack/cgtts2.c b/contrib/libs/clapack/cgtts2.c
new file mode 100644
index 0000000000..dae49c82a1
--- /dev/null
+++ b/contrib/libs/clapack/cgtts2.c
@@ -0,0 +1,583 @@
+/* cgtts2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cgtts2_(integer *itrans, integer *n, integer *nrhs,
+ complex *dl, complex *d__, complex *du, complex *du2, integer *ipiv,
+ complex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7, i__8;
+ complex q__1, q__2, q__3, q__4, q__5, q__6, q__7, q__8;
+
+ /* Builtin functions */
+ void c_div(complex *, complex *, complex *), r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j;
+ complex temp;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CGTTS2 solves one of the systems of equations */
+/* A * X = B, A**T * X = B, or A**H * X = B, */
+/* with a tridiagonal matrix A using the LU factorization computed */
+/* by CGTTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* ITRANS (input) INTEGER */
+/* Specifies the form of the system of equations. */
+/* = 0: A * X = B (No transpose) */
+/* = 1: A**T * X = B (Transpose) */
+/* = 2: A**H * X = B (Conjugate transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* DL (input) COMPLEX array, dimension (N-1) */
+/* The (n-1) multipliers that define the matrix L from the */
+/* LU factorization of A. */
+
+/* D (input) COMPLEX array, dimension (N) */
+/* The n diagonal elements of the upper triangular matrix U from */
+/* the LU factorization of A. */
+
+/* DU (input) COMPLEX array, dimension (N-1) */
+/* The (n-1) elements of the first super-diagonal of U. */
+
+/* DU2 (input) COMPLEX array, dimension (N-2) */
+/* The (n-2) elements of the second super-diagonal of U. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= n, row i of the matrix was */
+/* interchanged with row IPIV(i). IPIV(i) will always be either */
+/* i or i+1; IPIV(i) = i indicates a row interchange was not */
+/* required. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the matrix of right hand side vectors B. */
+/* On exit, B is overwritten by the solution vectors X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --dl;
+ --d__;
+ --du;
+ --du2;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (*itrans == 0) {
+
+/* Solve A*X = B using the LU factorization of A, */
+/* overwriting each right hand side vector with its solution. */
+
+ if (*nrhs <= 1) {
+ j = 1;
+L10:
+
+/* Solve L*x = b. */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (ipiv[i__] == i__) {
+ i__2 = i__ + 1 + j * b_dim1;
+ i__3 = i__ + 1 + j * b_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * b_dim1;
+ q__2.r = dl[i__4].r * b[i__5].r - dl[i__4].i * b[i__5].i,
+ q__2.i = dl[i__4].r * b[i__5].i + dl[i__4].i * b[
+ i__5].r;
+ q__1.r = b[i__3].r - q__2.r, q__1.i = b[i__3].i - q__2.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ } else {
+ i__2 = i__ + j * b_dim1;
+ temp.r = b[i__2].r, temp.i = b[i__2].i;
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + 1 + j * b_dim1;
+ b[i__2].r = b[i__3].r, b[i__2].i = b[i__3].i;
+ i__2 = i__ + 1 + j * b_dim1;
+ i__3 = i__;
+ i__4 = i__ + j * b_dim1;
+ q__2.r = dl[i__3].r * b[i__4].r - dl[i__3].i * b[i__4].i,
+ q__2.i = dl[i__3].r * b[i__4].i + dl[i__3].i * b[
+ i__4].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i - q__2.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ }
+/* L20: */
+ }
+
+/* Solve U*x = b. */
+
+ i__1 = *n + j * b_dim1;
+ c_div(&q__1, &b[*n + j * b_dim1], &d__[*n]);
+ b[i__1].r = q__1.r, b[i__1].i = q__1.i;
+ if (*n > 1) {
+ i__1 = *n - 1 + j * b_dim1;
+ i__2 = *n - 1 + j * b_dim1;
+ i__3 = *n - 1;
+ i__4 = *n + j * b_dim1;
+ q__3.r = du[i__3].r * b[i__4].r - du[i__3].i * b[i__4].i,
+ q__3.i = du[i__3].r * b[i__4].i + du[i__3].i * b[i__4]
+ .r;
+ q__2.r = b[i__2].r - q__3.r, q__2.i = b[i__2].i - q__3.i;
+ c_div(&q__1, &q__2, &d__[*n - 1]);
+ b[i__1].r = q__1.r, b[i__1].i = q__1.i;
+ }
+ for (i__ = *n - 2; i__ >= 1; --i__) {
+ i__1 = i__ + j * b_dim1;
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__;
+ i__4 = i__ + 1 + j * b_dim1;
+ q__4.r = du[i__3].r * b[i__4].r - du[i__3].i * b[i__4].i,
+ q__4.i = du[i__3].r * b[i__4].i + du[i__3].i * b[i__4]
+ .r;
+ q__3.r = b[i__2].r - q__4.r, q__3.i = b[i__2].i - q__4.i;
+ i__5 = i__;
+ i__6 = i__ + 2 + j * b_dim1;
+ q__5.r = du2[i__5].r * b[i__6].r - du2[i__5].i * b[i__6].i,
+ q__5.i = du2[i__5].r * b[i__6].i + du2[i__5].i * b[
+ i__6].r;
+ q__2.r = q__3.r - q__5.r, q__2.i = q__3.i - q__5.i;
+ c_div(&q__1, &q__2, &d__[i__]);
+ b[i__1].r = q__1.r, b[i__1].i = q__1.i;
+/* L30: */
+ }
+ if (j < *nrhs) {
+ ++j;
+ goto L10;
+ }
+ } else {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Solve L*x = b. */
+
+ i__2 = *n - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (ipiv[i__] == i__) {
+ i__3 = i__ + 1 + j * b_dim1;
+ i__4 = i__ + 1 + j * b_dim1;
+ i__5 = i__;
+ i__6 = i__ + j * b_dim1;
+ q__2.r = dl[i__5].r * b[i__6].r - dl[i__5].i * b[i__6]
+ .i, q__2.i = dl[i__5].r * b[i__6].i + dl[i__5]
+ .i * b[i__6].r;
+ q__1.r = b[i__4].r - q__2.r, q__1.i = b[i__4].i -
+ q__2.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * b_dim1;
+ temp.r = b[i__3].r, temp.i = b[i__3].i;
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + 1 + j * b_dim1;
+ b[i__3].r = b[i__4].r, b[i__3].i = b[i__4].i;
+ i__3 = i__ + 1 + j * b_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * b_dim1;
+ q__2.r = dl[i__4].r * b[i__5].r - dl[i__4].i * b[i__5]
+ .i, q__2.i = dl[i__4].r * b[i__5].i + dl[i__4]
+ .i * b[i__5].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i - q__2.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+ }
+/* L40: */
+ }
+
+/* Solve U*x = b. */
+
+ i__2 = *n + j * b_dim1;
+ c_div(&q__1, &b[*n + j * b_dim1], &d__[*n]);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ if (*n > 1) {
+ i__2 = *n - 1 + j * b_dim1;
+ i__3 = *n - 1 + j * b_dim1;
+ i__4 = *n - 1;
+ i__5 = *n + j * b_dim1;
+ q__3.r = du[i__4].r * b[i__5].r - du[i__4].i * b[i__5].i,
+ q__3.i = du[i__4].r * b[i__5].i + du[i__4].i * b[
+ i__5].r;
+ q__2.r = b[i__3].r - q__3.r, q__2.i = b[i__3].i - q__3.i;
+ c_div(&q__1, &q__2, &d__[*n - 1]);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ }
+ for (i__ = *n - 2; i__ >= 1; --i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ i__5 = i__ + 1 + j * b_dim1;
+ q__4.r = du[i__4].r * b[i__5].r - du[i__4].i * b[i__5].i,
+ q__4.i = du[i__4].r * b[i__5].i + du[i__4].i * b[
+ i__5].r;
+ q__3.r = b[i__3].r - q__4.r, q__3.i = b[i__3].i - q__4.i;
+ i__6 = i__;
+ i__7 = i__ + 2 + j * b_dim1;
+ q__5.r = du2[i__6].r * b[i__7].r - du2[i__6].i * b[i__7]
+ .i, q__5.i = du2[i__6].r * b[i__7].i + du2[i__6]
+ .i * b[i__7].r;
+ q__2.r = q__3.r - q__5.r, q__2.i = q__3.i - q__5.i;
+ c_div(&q__1, &q__2, &d__[i__]);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+ } else if (*itrans == 1) {
+
+/* Solve A**T * X = B. */
+
+ if (*nrhs <= 1) {
+ j = 1;
+L70:
+
+/* Solve U**T * x = b. */
+
+ i__1 = j * b_dim1 + 1;
+ c_div(&q__1, &b[j * b_dim1 + 1], &d__[1]);
+ b[i__1].r = q__1.r, b[i__1].i = q__1.i;
+ if (*n > 1) {
+ i__1 = j * b_dim1 + 2;
+ i__2 = j * b_dim1 + 2;
+ i__3 = j * b_dim1 + 1;
+ q__3.r = du[1].r * b[i__3].r - du[1].i * b[i__3].i, q__3.i =
+ du[1].r * b[i__3].i + du[1].i * b[i__3].r;
+ q__2.r = b[i__2].r - q__3.r, q__2.i = b[i__2].i - q__3.i;
+ c_div(&q__1, &q__2, &d__[2]);
+ b[i__1].r = q__1.r, b[i__1].i = q__1.i;
+ }
+ i__1 = *n;
+ for (i__ = 3; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ - 1;
+ i__5 = i__ - 1 + j * b_dim1;
+ q__4.r = du[i__4].r * b[i__5].r - du[i__4].i * b[i__5].i,
+ q__4.i = du[i__4].r * b[i__5].i + du[i__4].i * b[i__5]
+ .r;
+ q__3.r = b[i__3].r - q__4.r, q__3.i = b[i__3].i - q__4.i;
+ i__6 = i__ - 2;
+ i__7 = i__ - 2 + j * b_dim1;
+ q__5.r = du2[i__6].r * b[i__7].r - du2[i__6].i * b[i__7].i,
+ q__5.i = du2[i__6].r * b[i__7].i + du2[i__6].i * b[
+ i__7].r;
+ q__2.r = q__3.r - q__5.r, q__2.i = q__3.i - q__5.i;
+ c_div(&q__1, &q__2, &d__[i__]);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L80: */
+ }
+
+/* Solve L**T * x = b. */
+
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ if (ipiv[i__] == i__) {
+ i__1 = i__ + j * b_dim1;
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__;
+ i__4 = i__ + 1 + j * b_dim1;
+ q__2.r = dl[i__3].r * b[i__4].r - dl[i__3].i * b[i__4].i,
+ q__2.i = dl[i__3].r * b[i__4].i + dl[i__3].i * b[
+ i__4].r;
+ q__1.r = b[i__2].r - q__2.r, q__1.i = b[i__2].i - q__2.i;
+ b[i__1].r = q__1.r, b[i__1].i = q__1.i;
+ } else {
+ i__1 = i__ + 1 + j * b_dim1;
+ temp.r = b[i__1].r, temp.i = b[i__1].i;
+ i__1 = i__ + 1 + j * b_dim1;
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__;
+ q__2.r = dl[i__3].r * temp.r - dl[i__3].i * temp.i,
+ q__2.i = dl[i__3].r * temp.i + dl[i__3].i *
+ temp.r;
+ q__1.r = b[i__2].r - q__2.r, q__1.i = b[i__2].i - q__2.i;
+ b[i__1].r = q__1.r, b[i__1].i = q__1.i;
+ i__1 = i__ + j * b_dim1;
+ b[i__1].r = temp.r, b[i__1].i = temp.i;
+ }
+/* L90: */
+ }
+ if (j < *nrhs) {
+ ++j;
+ goto L70;
+ }
+ } else {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Solve U**T * x = b. */
+
+ i__2 = j * b_dim1 + 1;
+ c_div(&q__1, &b[j * b_dim1 + 1], &d__[1]);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ if (*n > 1) {
+ i__2 = j * b_dim1 + 2;
+ i__3 = j * b_dim1 + 2;
+ i__4 = j * b_dim1 + 1;
+ q__3.r = du[1].r * b[i__4].r - du[1].i * b[i__4].i,
+ q__3.i = du[1].r * b[i__4].i + du[1].i * b[i__4]
+ .r;
+ q__2.r = b[i__3].r - q__3.r, q__2.i = b[i__3].i - q__3.i;
+ c_div(&q__1, &q__2, &d__[2]);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ }
+ i__2 = *n;
+ for (i__ = 3; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ - 1;
+ i__6 = i__ - 1 + j * b_dim1;
+ q__4.r = du[i__5].r * b[i__6].r - du[i__5].i * b[i__6].i,
+ q__4.i = du[i__5].r * b[i__6].i + du[i__5].i * b[
+ i__6].r;
+ q__3.r = b[i__4].r - q__4.r, q__3.i = b[i__4].i - q__4.i;
+ i__7 = i__ - 2;
+ i__8 = i__ - 2 + j * b_dim1;
+ q__5.r = du2[i__7].r * b[i__8].r - du2[i__7].i * b[i__8]
+ .i, q__5.i = du2[i__7].r * b[i__8].i + du2[i__7]
+ .i * b[i__8].r;
+ q__2.r = q__3.r - q__5.r, q__2.i = q__3.i - q__5.i;
+ c_div(&q__1, &q__2, &d__[i__]);
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L100: */
+ }
+
+/* Solve L**T * x = b. */
+
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ if (ipiv[i__] == i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ i__5 = i__ + 1 + j * b_dim1;
+ q__2.r = dl[i__4].r * b[i__5].r - dl[i__4].i * b[i__5]
+ .i, q__2.i = dl[i__4].r * b[i__5].i + dl[i__4]
+ .i * b[i__5].r;
+ q__1.r = b[i__3].r - q__2.r, q__1.i = b[i__3].i -
+ q__2.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ } else {
+ i__2 = i__ + 1 + j * b_dim1;
+ temp.r = b[i__2].r, temp.i = b[i__2].i;
+ i__2 = i__ + 1 + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ q__2.r = dl[i__4].r * temp.r - dl[i__4].i * temp.i,
+ q__2.i = dl[i__4].r * temp.i + dl[i__4].i *
+ temp.r;
+ q__1.r = b[i__3].r - q__2.r, q__1.i = b[i__3].i -
+ q__2.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ i__2 = i__ + j * b_dim1;
+ b[i__2].r = temp.r, b[i__2].i = temp.i;
+ }
+/* L110: */
+ }
+/* L120: */
+ }
+ }
+ } else {
+
+/* Solve A**H * X = B. */
+
+ if (*nrhs <= 1) {
+ j = 1;
+L130:
+
+/* Solve U**H * x = b. */
+
+ i__1 = j * b_dim1 + 1;
+ r_cnjg(&q__2, &d__[1]);
+ c_div(&q__1, &b[j * b_dim1 + 1], &q__2);
+ b[i__1].r = q__1.r, b[i__1].i = q__1.i;
+ if (*n > 1) {
+ i__1 = j * b_dim1 + 2;
+ i__2 = j * b_dim1 + 2;
+ r_cnjg(&q__4, &du[1]);
+ i__3 = j * b_dim1 + 1;
+ q__3.r = q__4.r * b[i__3].r - q__4.i * b[i__3].i, q__3.i =
+ q__4.r * b[i__3].i + q__4.i * b[i__3].r;
+ q__2.r = b[i__2].r - q__3.r, q__2.i = b[i__2].i - q__3.i;
+ r_cnjg(&q__5, &d__[2]);
+ c_div(&q__1, &q__2, &q__5);
+ b[i__1].r = q__1.r, b[i__1].i = q__1.i;
+ }
+ i__1 = *n;
+ for (i__ = 3; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ r_cnjg(&q__5, &du[i__ - 1]);
+ i__4 = i__ - 1 + j * b_dim1;
+ q__4.r = q__5.r * b[i__4].r - q__5.i * b[i__4].i, q__4.i =
+ q__5.r * b[i__4].i + q__5.i * b[i__4].r;
+ q__3.r = b[i__3].r - q__4.r, q__3.i = b[i__3].i - q__4.i;
+ r_cnjg(&q__7, &du2[i__ - 2]);
+ i__5 = i__ - 2 + j * b_dim1;
+ q__6.r = q__7.r * b[i__5].r - q__7.i * b[i__5].i, q__6.i =
+ q__7.r * b[i__5].i + q__7.i * b[i__5].r;
+ q__2.r = q__3.r - q__6.r, q__2.i = q__3.i - q__6.i;
+ r_cnjg(&q__8, &d__[i__]);
+ c_div(&q__1, &q__2, &q__8);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L140: */
+ }
+
+/* Solve L**H * x = b. */
+
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ if (ipiv[i__] == i__) {
+ i__1 = i__ + j * b_dim1;
+ i__2 = i__ + j * b_dim1;
+ r_cnjg(&q__3, &dl[i__]);
+ i__3 = i__ + 1 + j * b_dim1;
+ q__2.r = q__3.r * b[i__3].r - q__3.i * b[i__3].i, q__2.i =
+ q__3.r * b[i__3].i + q__3.i * b[i__3].r;
+ q__1.r = b[i__2].r - q__2.r, q__1.i = b[i__2].i - q__2.i;
+ b[i__1].r = q__1.r, b[i__1].i = q__1.i;
+ } else {
+ i__1 = i__ + 1 + j * b_dim1;
+ temp.r = b[i__1].r, temp.i = b[i__1].i;
+ i__1 = i__ + 1 + j * b_dim1;
+ i__2 = i__ + j * b_dim1;
+ r_cnjg(&q__3, &dl[i__]);
+ q__2.r = q__3.r * temp.r - q__3.i * temp.i, q__2.i =
+ q__3.r * temp.i + q__3.i * temp.r;
+ q__1.r = b[i__2].r - q__2.r, q__1.i = b[i__2].i - q__2.i;
+ b[i__1].r = q__1.r, b[i__1].i = q__1.i;
+ i__1 = i__ + j * b_dim1;
+ b[i__1].r = temp.r, b[i__1].i = temp.i;
+ }
+/* L150: */
+ }
+ if (j < *nrhs) {
+ ++j;
+ goto L130;
+ }
+ } else {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Solve U**H * x = b. */
+
+ i__2 = j * b_dim1 + 1;
+ r_cnjg(&q__2, &d__[1]);
+ c_div(&q__1, &b[j * b_dim1 + 1], &q__2);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ if (*n > 1) {
+ i__2 = j * b_dim1 + 2;
+ i__3 = j * b_dim1 + 2;
+ r_cnjg(&q__4, &du[1]);
+ i__4 = j * b_dim1 + 1;
+ q__3.r = q__4.r * b[i__4].r - q__4.i * b[i__4].i, q__3.i =
+ q__4.r * b[i__4].i + q__4.i * b[i__4].r;
+ q__2.r = b[i__3].r - q__3.r, q__2.i = b[i__3].i - q__3.i;
+ r_cnjg(&q__5, &d__[2]);
+ c_div(&q__1, &q__2, &q__5);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ }
+ i__2 = *n;
+ for (i__ = 3; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ r_cnjg(&q__5, &du[i__ - 1]);
+ i__5 = i__ - 1 + j * b_dim1;
+ q__4.r = q__5.r * b[i__5].r - q__5.i * b[i__5].i, q__4.i =
+ q__5.r * b[i__5].i + q__5.i * b[i__5].r;
+ q__3.r = b[i__4].r - q__4.r, q__3.i = b[i__4].i - q__4.i;
+ r_cnjg(&q__7, &du2[i__ - 2]);
+ i__6 = i__ - 2 + j * b_dim1;
+ q__6.r = q__7.r * b[i__6].r - q__7.i * b[i__6].i, q__6.i =
+ q__7.r * b[i__6].i + q__7.i * b[i__6].r;
+ q__2.r = q__3.r - q__6.r, q__2.i = q__3.i - q__6.i;
+ r_cnjg(&q__8, &d__[i__]);
+ c_div(&q__1, &q__2, &q__8);
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L160: */
+ }
+
+/* Solve L**H * x = b. */
+
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ if (ipiv[i__] == i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ r_cnjg(&q__3, &dl[i__]);
+ i__4 = i__ + 1 + j * b_dim1;
+ q__2.r = q__3.r * b[i__4].r - q__3.i * b[i__4].i,
+ q__2.i = q__3.r * b[i__4].i + q__3.i * b[i__4]
+ .r;
+ q__1.r = b[i__3].r - q__2.r, q__1.i = b[i__3].i -
+ q__2.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ } else {
+ i__2 = i__ + 1 + j * b_dim1;
+ temp.r = b[i__2].r, temp.i = b[i__2].i;
+ i__2 = i__ + 1 + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ r_cnjg(&q__3, &dl[i__]);
+ q__2.r = q__3.r * temp.r - q__3.i * temp.i, q__2.i =
+ q__3.r * temp.i + q__3.i * temp.r;
+ q__1.r = b[i__3].r - q__2.r, q__1.i = b[i__3].i -
+ q__2.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ i__2 = i__ + j * b_dim1;
+ b[i__2].r = temp.r, b[i__2].i = temp.i;
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ }
+
+/* End of CGTTS2 */
+
+ return 0;
+} /* cgtts2_ */
diff --git a/contrib/libs/clapack/chbev.c b/contrib/libs/clapack/chbev.c
new file mode 100644
index 0000000000..45c3ef8db9
--- /dev/null
+++ b/contrib/libs/clapack/chbev.c
@@ -0,0 +1,270 @@
+/* chbev.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b11 = 1.f;
+static integer c__1 = 1;
+
+/* Subroutine */ int chbev_(char *jobz, char *uplo, integer *n, integer *kd,
+ complex *ab, integer *ldab, real *w, complex *z__, integer *ldz,
+ complex *work, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, z_dim1, z_offset, i__1;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real eps;
+ integer inde;
+ real anrm;
+ integer imax;
+ real rmin, rmax, sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ logical lower, wantz;
+ extern doublereal clanhb_(char *, char *, integer *, integer *, complex *,
+ integer *, real *);
+ integer iscale;
+ extern /* Subroutine */ int clascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, complex *, integer *, integer *), chbtrd_(char *, char *, integer *, integer *, complex *,
+ integer *, real *, real *, complex *, integer *, complex *,
+ integer *);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ integer indrwk;
+ extern /* Subroutine */ int csteqr_(char *, integer *, real *, real *,
+ complex *, integer *, real *, integer *), ssterf_(integer
+ *, real *, real *, integer *);
+ real smlnum;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHBEV computes all the eigenvalues and, optionally, eigenvectors of */
+/* a complex Hermitian band matrix A. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) COMPLEX array, dimension (LDAB, N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, AB is overwritten by values generated during the */
+/* reduction to tridiagonal form. If UPLO = 'U', the first */
+/* superdiagonal and the diagonal of the tridiagonal matrix T */
+/* are returned in rows KD and KD+1 of AB, and if UPLO = 'L', */
+/* the diagonal and first subdiagonal of T are returned in the */
+/* first two rows of AB. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD + 1. */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the orthonormal */
+/* eigenvectors of the matrix A, with the i-th column of Z */
+/* holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) COMPLEX array, dimension (N) */
+
+/* RWORK (workspace) REAL array, dimension (max(1,3*N-2)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, the algorithm failed to converge; i */
+/* off-diagonal elements of an intermediate tridiagonal */
+/* form did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lower = lsame_(uplo, "L");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*kd < 0) {
+ *info = -4;
+ } else if (*ldab < *kd + 1) {
+ *info = -6;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -9;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHBEV ", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (lower) {
+ i__1 = ab_dim1 + 1;
+ w[1] = ab[i__1].r;
+ } else {
+ i__1 = *kd + 1 + ab_dim1;
+ w[1] = ab[i__1].r;
+ }
+ if (wantz) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1.f, z__[i__1].i = 0.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = clanhb_("M", uplo, n, kd, &ab[ab_offset], ldab, &rwork[1]);
+ iscale = 0;
+ if (anrm > 0.f && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ if (lower) {
+ clascl_("B", kd, kd, &c_b11, &sigma, n, n, &ab[ab_offset], ldab,
+ info);
+ } else {
+ clascl_("Q", kd, kd, &c_b11, &sigma, n, n, &ab[ab_offset], ldab,
+ info);
+ }
+ }
+
+/* Call CHBTRD to reduce Hermitian band matrix to tridiagonal form. */
+
+ inde = 1;
+ chbtrd_(jobz, uplo, n, kd, &ab[ab_offset], ldab, &w[1], &rwork[inde], &
+ z__[z_offset], ldz, &work[1], &iinfo);
+
+/* For eigenvalues only, call SSTERF. For eigenvectors, call CSTEQR. */
+
+ if (! wantz) {
+ ssterf_(n, &w[1], &rwork[inde], info);
+ } else {
+ indrwk = inde + *n;
+ csteqr_(jobz, n, &w[1], &rwork[inde], &z__[z_offset], ldz, &rwork[
+ indrwk], info);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *n;
+ } else {
+ imax = *info - 1;
+ }
+ r__1 = 1.f / sigma;
+ sscal_(&imax, &r__1, &w[1], &c__1);
+ }
+
+ return 0;
+
+/* End of CHBEV */
+
+} /* chbev_ */
diff --git a/contrib/libs/clapack/chbevd.c b/contrib/libs/clapack/chbevd.c
new file mode 100644
index 0000000000..148aafd8b2
--- /dev/null
+++ b/contrib/libs/clapack/chbevd.c
@@ -0,0 +1,377 @@
+/* chbevd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static real c_b13 = 1.f;
+static integer c__1 = 1;
+
+/* Subroutine */ int chbevd_(char *jobz, char *uplo, integer *n, integer *kd,
+ complex *ab, integer *ldab, real *w, complex *z__, integer *ldz,
+ complex *work, integer *lwork, real *rwork, integer *lrwork, integer *
+ iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, z_dim1, z_offset, i__1;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real eps;
+ integer inde;
+ real anrm;
+ integer imax;
+ real rmin, rmax;
+ integer llwk2;
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *);
+ real sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ integer lwmin;
+ logical lower;
+ integer llrwk;
+ logical wantz;
+ integer indwk2;
+ extern doublereal clanhb_(char *, char *, integer *, integer *, complex *,
+ integer *, real *);
+ integer iscale;
+ extern /* Subroutine */ int clascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, complex *, integer *, integer *), cstedc_(char *, integer *, real *, real *, complex *,
+ integer *, complex *, integer *, real *, integer *, integer *,
+ integer *, integer *), chbtrd_(char *, char *, integer *,
+ integer *, complex *, integer *, real *, real *, complex *,
+ integer *, complex *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ integer indwrk, liwmin;
+ extern /* Subroutine */ int ssterf_(integer *, real *, real *, integer *);
+ integer lrwmin;
+ real smlnum;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHBEVD computes all the eigenvalues and, optionally, eigenvectors of */
+/* a complex Hermitian band matrix A. If eigenvectors are desired, it */
+/* uses a divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) COMPLEX array, dimension (LDAB, N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, AB is overwritten by values generated during the */
+/* reduction to tridiagonal form. If UPLO = 'U', the first */
+/* superdiagonal and the diagonal of the tridiagonal matrix T */
+/* are returned in rows KD and KD+1 of AB, and if UPLO = 'L', */
+/* the diagonal and first subdiagonal of T are returned in the */
+/* first two rows of AB. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD + 1. */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the orthonormal */
+/* eigenvectors of the matrix A, with the i-th column of Z */
+/* holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If N <= 1, LWORK must be at least 1. */
+/* If JOBZ = 'N' and N > 1, LWORK must be at least N. */
+/* If JOBZ = 'V' and N > 1, LWORK must be at least 2*N**2. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal sizes of the WORK, RWORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* RWORK (workspace/output) REAL array, */
+/* dimension (LRWORK) */
+/* On exit, if INFO = 0, RWORK(1) returns the optimal LRWORK. */
+
+/* LRWORK (input) INTEGER */
+/* The dimension of array RWORK. */
+/* If N <= 1, LRWORK must be at least 1. */
+/* If JOBZ = 'N' and N > 1, LRWORK must be at least N. */
+/* If JOBZ = 'V' and N > 1, LRWORK must be at least */
+/* 1 + 5*N + 2*N**2. */
+
+/* If LRWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of array IWORK. */
+/* If JOBZ = 'N' or N <= 1, LIWORK must be at least 1. */
+/* If JOBZ = 'V' and N > 1, LIWORK must be at least 3 + 5*N . */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, the algorithm failed to converge; i */
+/* off-diagonal elements of an intermediate tridiagonal */
+/* form did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lower = lsame_(uplo, "L");
+ lquery = *lwork == -1 || *liwork == -1 || *lrwork == -1;
+
+ *info = 0;
+ if (*n <= 1) {
+ lwmin = 1;
+ lrwmin = 1;
+ liwmin = 1;
+ } else {
+ if (wantz) {
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = i__1 * i__1 << 1;
+/* Computing 2nd power */
+ i__1 = *n;
+ lrwmin = *n * 5 + 1 + (i__1 * i__1 << 1);
+ liwmin = *n * 5 + 3;
+ } else {
+ lwmin = *n;
+ lrwmin = *n;
+ liwmin = 1;
+ }
+ }
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*kd < 0) {
+ *info = -4;
+ } else if (*ldab < *kd + 1) {
+ *info = -6;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+ work[1].r = (real) lwmin, work[1].i = 0.f;
+ rwork[1] = (real) lrwmin;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -11;
+ } else if (*lrwork < lrwmin && ! lquery) {
+ *info = -13;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -15;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHBEVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ i__1 = ab_dim1 + 1;
+ w[1] = ab[i__1].r;
+ if (wantz) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1.f, z__[i__1].i = 0.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = clanhb_("M", uplo, n, kd, &ab[ab_offset], ldab, &rwork[1]);
+ iscale = 0;
+ if (anrm > 0.f && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ if (lower) {
+ clascl_("B", kd, kd, &c_b13, &sigma, n, n, &ab[ab_offset], ldab,
+ info);
+ } else {
+ clascl_("Q", kd, kd, &c_b13, &sigma, n, n, &ab[ab_offset], ldab,
+ info);
+ }
+ }
+
+/* Call CHBTRD to reduce Hermitian band matrix to tridiagonal form. */
+
+ inde = 1;
+ indwrk = inde + *n;
+ indwk2 = *n * *n + 1;
+ llwk2 = *lwork - indwk2 + 1;
+ llrwk = *lrwork - indwrk + 1;
+ chbtrd_(jobz, uplo, n, kd, &ab[ab_offset], ldab, &w[1], &rwork[inde], &
+ z__[z_offset], ldz, &work[1], &iinfo);
+
+/* For eigenvalues only, call SSTERF. For eigenvectors, call CSTEDC. */
+
+ if (! wantz) {
+ ssterf_(n, &w[1], &rwork[inde], info);
+ } else {
+ cstedc_("I", n, &w[1], &rwork[inde], &work[1], n, &work[indwk2], &
+ llwk2, &rwork[indwrk], &llrwk, &iwork[1], liwork, info);
+ cgemm_("N", "N", n, n, n, &c_b2, &z__[z_offset], ldz, &work[1], n, &
+ c_b1, &work[indwk2], n);
+ clacpy_("A", n, n, &work[indwk2], n, &z__[z_offset], ldz);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *n;
+ } else {
+ imax = *info - 1;
+ }
+ r__1 = 1.f / sigma;
+ sscal_(&imax, &r__1, &w[1], &c__1);
+ }
+
+ work[1].r = (real) lwmin, work[1].i = 0.f;
+ rwork[1] = (real) lrwmin;
+ iwork[1] = liwmin;
+ return 0;
+
+/* End of CHBEVD */
+
+} /* chbevd_ */
diff --git a/contrib/libs/clapack/chbevx.c b/contrib/libs/clapack/chbevx.c
new file mode 100644
index 0000000000..0a395a250b
--- /dev/null
+++ b/contrib/libs/clapack/chbevx.c
@@ -0,0 +1,524 @@
+/* chbevx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static real c_b16 = 1.f;
+static integer c__1 = 1;
+
+/* Subroutine */ int chbevx_(char *jobz, char *range, char *uplo, integer *n,
+ integer *kd, complex *ab, integer *ldab, complex *q, integer *ldq,
+ real *vl, real *vu, integer *il, integer *iu, real *abstol, integer *
+ m, real *w, complex *z__, integer *ldz, complex *work, real *rwork,
+ integer *iwork, integer *ifail, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, q_dim1, q_offset, z_dim1, z_offset, i__1,
+ i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, jj;
+ real eps, vll, vuu, tmp1;
+ integer indd, inde;
+ real anrm;
+ integer imax;
+ real rmin, rmax;
+ logical test;
+ complex ctmp1;
+ integer itmp1, indee;
+ real sigma;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+, complex *, integer *, complex *, integer *, complex *, complex *
+, integer *);
+ integer iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ char order[1];
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ logical lower;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ logical wantz;
+ extern doublereal clanhb_(char *, char *, integer *, integer *, complex *,
+ integer *, real *);
+ logical alleig, indeig;
+ integer iscale, indibl;
+ extern /* Subroutine */ int clascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, complex *, integer *, integer *), chbtrd_(char *, char *, integer *, integer *, complex *,
+ integer *, real *, real *, complex *, integer *, complex *,
+ integer *);
+ logical valeig;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real abstll, bignum;
+ integer indiwk, indisp;
+ extern /* Subroutine */ int cstein_(integer *, real *, real *, integer *,
+ real *, integer *, integer *, complex *, integer *, real *,
+ integer *, integer *, integer *);
+ integer indrwk, indwrk;
+ extern /* Subroutine */ int csteqr_(char *, integer *, real *, real *,
+ complex *, integer *, real *, integer *), ssterf_(integer
+ *, real *, real *, integer *);
+ integer nsplit;
+ extern /* Subroutine */ int sstebz_(char *, char *, integer *, real *,
+ real *, integer *, integer *, real *, real *, real *, integer *,
+ integer *, real *, integer *, integer *, real *, integer *,
+ integer *);
+ real smlnum;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHBEVX computes selected eigenvalues and, optionally, eigenvectors */
+/* of a complex Hermitian band matrix A. Eigenvalues and eigenvectors */
+/* can be selected by specifying either a range of values or a range of */
+/* indices for the desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found; */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found; */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) COMPLEX array, dimension (LDAB, N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, AB is overwritten by values generated during the */
+/* reduction to tridiagonal form. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD + 1. */
+
+/* Q (output) COMPLEX array, dimension (LDQ, N) */
+/* If JOBZ = 'V', the N-by-N unitary matrix used in the */
+/* reduction to tridiagonal form. */
+/* If JOBZ = 'N', the array Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. If JOBZ = 'V', then */
+/* LDQ >= max(1,N). */
+
+/* VL (input) REAL */
+/* VU (input) REAL */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) REAL */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing AB to tridiagonal form. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*SLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*SLAMCH('S'). */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices */
+/* with Guaranteed High Relative Accuracy," by Demmel and */
+/* Kahan, LAPACK Working Note #3. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) REAL array, dimension (N) */
+/* The first M elements contain the selected eigenvalues in */
+/* ascending order. */
+
+/* Z (output) COMPLEX array, dimension (LDZ, max(1,M)) */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If an eigenvector fails to converge, then that column of Z */
+/* contains the latest approximation to the eigenvector, and the */
+/* index of the eigenvector is returned in IFAIL. */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) COMPLEX array, dimension (N) */
+
+/* RWORK (workspace) REAL array, dimension (7*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+
+/* IFAIL (output) INTEGER array, dimension (N) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvectors that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, then i eigenvectors failed to converge. */
+/* Their indices are stored in array IFAIL. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+ lower = lsame_(uplo, "L");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*kd < 0) {
+ *info = -5;
+ } else if (*ldab < *kd + 1) {
+ *info = -7;
+ } else if (wantz && *ldq < max(1,*n)) {
+ *info = -9;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -11;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -12;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -13;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -18;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHBEVX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ *m = 1;
+ if (lower) {
+ i__1 = ab_dim1 + 1;
+ ctmp1.r = ab[i__1].r, ctmp1.i = ab[i__1].i;
+ } else {
+ i__1 = *kd + 1 + ab_dim1;
+ ctmp1.r = ab[i__1].r, ctmp1.i = ab[i__1].i;
+ }
+ tmp1 = ctmp1.r;
+ if (valeig) {
+ if (! (*vl < tmp1 && *vu >= tmp1)) {
+ *m = 0;
+ }
+ }
+ if (*m == 1) {
+ w[1] = ctmp1.r;
+ if (wantz) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1.f, z__[i__1].i = 0.f;
+ }
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+/* Computing MIN */
+ r__1 = sqrt(bignum), r__2 = 1.f / sqrt(sqrt(safmin));
+ rmax = dmin(r__1,r__2);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ iscale = 0;
+ abstll = *abstol;
+ if (valeig) {
+ vll = *vl;
+ vuu = *vu;
+ } else {
+ vll = 0.f;
+ vuu = 0.f;
+ }
+ anrm = clanhb_("M", uplo, n, kd, &ab[ab_offset], ldab, &rwork[1]);
+ if (anrm > 0.f && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ if (lower) {
+ clascl_("B", kd, kd, &c_b16, &sigma, n, n, &ab[ab_offset], ldab,
+ info);
+ } else {
+ clascl_("Q", kd, kd, &c_b16, &sigma, n, n, &ab[ab_offset], ldab,
+ info);
+ }
+ if (*abstol > 0.f) {
+ abstll = *abstol * sigma;
+ }
+ if (valeig) {
+ vll = *vl * sigma;
+ vuu = *vu * sigma;
+ }
+ }
+
+/* Call CHBTRD to reduce Hermitian band matrix to tridiagonal form. */
+
+ indd = 1;
+ inde = indd + *n;
+ indrwk = inde + *n;
+ indwrk = 1;
+ chbtrd_(jobz, uplo, n, kd, &ab[ab_offset], ldab, &rwork[indd], &rwork[
+ inde], &q[q_offset], ldq, &work[indwrk], &iinfo);
+
+/* If all eigenvalues are desired and ABSTOL is less than or equal */
+/* to zero, then call SSTERF or CSTEQR. If this fails for some */
+/* eigenvalue, then try SSTEBZ. */
+
+ test = FALSE_;
+ if (indeig) {
+ if (*il == 1 && *iu == *n) {
+ test = TRUE_;
+ }
+ }
+ if ((alleig || test) && *abstol <= 0.f) {
+ scopy_(n, &rwork[indd], &c__1, &w[1], &c__1);
+ indee = indrwk + (*n << 1);
+ if (! wantz) {
+ i__1 = *n - 1;
+ scopy_(&i__1, &rwork[inde], &c__1, &rwork[indee], &c__1);
+ ssterf_(n, &w[1], &rwork[indee], info);
+ } else {
+ clacpy_("A", n, n, &q[q_offset], ldq, &z__[z_offset], ldz);
+ i__1 = *n - 1;
+ scopy_(&i__1, &rwork[inde], &c__1, &rwork[indee], &c__1);
+ csteqr_(jobz, n, &w[1], &rwork[indee], &z__[z_offset], ldz, &
+ rwork[indrwk], info);
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ifail[i__] = 0;
+/* L10: */
+ }
+ }
+ }
+ if (*info == 0) {
+ *m = *n;
+ goto L30;
+ }
+ *info = 0;
+ }
+
+/* Otherwise, call SSTEBZ and, if eigenvectors are desired, CSTEIN. */
+
+ if (wantz) {
+ *(unsigned char *)order = 'B';
+ } else {
+ *(unsigned char *)order = 'E';
+ }
+ indibl = 1;
+ indisp = indibl + *n;
+ indiwk = indisp + *n;
+ sstebz_(range, order, n, &vll, &vuu, il, iu, &abstll, &rwork[indd], &
+ rwork[inde], m, &nsplit, &w[1], &iwork[indibl], &iwork[indisp], &
+ rwork[indrwk], &iwork[indiwk], info);
+
+ if (wantz) {
+ cstein_(n, &rwork[indd], &rwork[inde], m, &w[1], &iwork[indibl], &
+ iwork[indisp], &z__[z_offset], ldz, &rwork[indrwk], &iwork[
+ indiwk], &ifail[1], info);
+
+/* Apply unitary matrix used in reduction to tridiagonal */
+/* form to eigenvectors returned by CSTEIN. */
+
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ ccopy_(n, &z__[j * z_dim1 + 1], &c__1, &work[1], &c__1);
+ cgemv_("N", n, n, &c_b2, &q[q_offset], ldq, &work[1], &c__1, &
+ c_b1, &z__[j * z_dim1 + 1], &c__1);
+/* L20: */
+ }
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+L30:
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *m;
+ } else {
+ imax = *info - 1;
+ }
+ r__1 = 1.f / sigma;
+ sscal_(&imax, &r__1, &w[1], &c__1);
+ }
+
+/* If eigenvalues are not in order, then sort them, along with */
+/* eigenvectors. */
+
+ if (wantz) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp1 = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp1) {
+ i__ = jj;
+ tmp1 = w[jj];
+ }
+/* L40: */
+ }
+
+ if (i__ != 0) {
+ itmp1 = iwork[indibl + i__ - 1];
+ w[i__] = w[j];
+ iwork[indibl + i__ - 1] = iwork[indibl + j - 1];
+ w[j] = tmp1;
+ iwork[indibl + j - 1] = itmp1;
+ cswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1],
+ &c__1);
+ if (*info != 0) {
+ itmp1 = ifail[i__];
+ ifail[i__] = ifail[j];
+ ifail[j] = itmp1;
+ }
+ }
+/* L50: */
+ }
+ }
+
+ return 0;
+
+/* End of CHBEVX */
+
+} /* chbevx_ */
diff --git a/contrib/libs/clapack/chbgst.c b/contrib/libs/clapack/chbgst.c
new file mode 100644
index 0000000000..a4d1581c30
--- /dev/null
+++ b/contrib/libs/clapack/chbgst.c
@@ -0,0 +1,2146 @@
+/* chbgst.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int chbgst_(char *vect, char *uplo, integer *n, integer *ka,
+ integer *kb, complex *ab, integer *ldab, complex *bb, integer *ldbb,
+ complex *x, integer *ldx, complex *work, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, bb_dim1, bb_offset, x_dim1, x_offset, i__1,
+ i__2, i__3, i__4, i__5, i__6, i__7, i__8;
+ real r__1;
+ complex q__1, q__2, q__3, q__4, q__5, q__6, q__7, q__8, q__9, q__10;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, k, l, m;
+ complex t;
+ integer i0, i1, i2, j1, j2;
+ complex ra;
+ integer nr, nx, ka1, kb1;
+ complex ra1;
+ integer j1t, j2t;
+ real bii;
+ integer kbt, nrt, inca;
+ extern /* Subroutine */ int crot_(integer *, complex *, integer *,
+ complex *, integer *, real *, complex *), cgerc_(integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cgeru_(integer *, integer *, complex *,
+ complex *, integer *, complex *, integer *, complex *, integer *);
+ logical upper, wantx;
+ extern /* Subroutine */ int clar2v_(integer *, complex *, complex *,
+ complex *, integer *, real *, complex *, integer *), clacgv_(
+ integer *, complex *, integer *), csscal_(integer *, real *,
+ complex *, integer *), claset_(char *, integer *, integer *,
+ complex *, complex *, complex *, integer *), clartg_(
+ complex *, complex *, real *, complex *, complex *), xerbla_(char
+ *, integer *), clargv_(integer *, complex *, integer *,
+ complex *, integer *, real *, integer *);
+ logical update;
+ extern /* Subroutine */ int clartv_(integer *, complex *, integer *,
+ complex *, integer *, real *, complex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHBGST reduces a complex Hermitian-definite banded generalized */
+/* eigenproblem A*x = lambda*B*x to standard form C*y = lambda*y, */
+/* such that C has the same bandwidth as A. */
+
+/* B must have been previously factorized as S**H*S by CPBSTF, using a */
+/* split Cholesky factorization. A is overwritten by C = X**H*A*X, where */
+/* X = S**(-1)*Q and Q is a unitary matrix chosen to preserve the */
+/* bandwidth of A. */
+
+/* Arguments */
+/* ========= */
+
+/* VECT (input) CHARACTER*1 */
+/* = 'N': do not form the transformation matrix X; */
+/* = 'V': form X. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* KA (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KA >= 0. */
+
+/* KB (input) INTEGER */
+/* The number of superdiagonals of the matrix B if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KA >= KB >= 0. */
+
+/* AB (input/output) COMPLEX array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first ka+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(ka+1+i-j,j) = A(i,j) for max(1,j-ka)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+ka). */
+
+/* On exit, the transformed matrix X**H*A*X, stored in the same */
+/* format as A. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KA+1. */
+
+/* BB (input) COMPLEX array, dimension (LDBB,N) */
+/* The banded factor S from the split Cholesky factorization of */
+/* B, as returned by CPBSTF, stored in the first kb+1 rows of */
+/* the array. */
+
+/* LDBB (input) INTEGER */
+/* The leading dimension of the array BB. LDBB >= KB+1. */
+
+/* X (output) COMPLEX array, dimension (LDX,N) */
+/* If VECT = 'V', the n-by-n matrix X. */
+/* If VECT = 'N', the array X is not referenced. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. */
+/* LDX >= max(1,N) if VECT = 'V'; LDX >= 1 otherwise. */
+
+/* WORK (workspace) COMPLEX array, dimension (N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ bb_dim1 = *ldbb;
+ bb_offset = 1 + bb_dim1;
+ bb -= bb_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ wantx = lsame_(vect, "V");
+ upper = lsame_(uplo, "U");
+ ka1 = *ka + 1;
+ kb1 = *kb + 1;
+ *info = 0;
+ if (! wantx && ! lsame_(vect, "N")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ka < 0) {
+ *info = -4;
+ } else if (*kb < 0 || *kb > *ka) {
+ *info = -5;
+ } else if (*ldab < *ka + 1) {
+ *info = -7;
+ } else if (*ldbb < *kb + 1) {
+ *info = -9;
+ } else if (*ldx < 1 || wantx && *ldx < max(1,*n)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHBGST", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ inca = *ldab * ka1;
+
+/* Initialize X to the unit matrix, if needed */
+
+ if (wantx) {
+ claset_("Full", n, n, &c_b1, &c_b2, &x[x_offset], ldx);
+ }
+
+/* Set M to the splitting point m. It must be the same value as is */
+/* used in CPBSTF. The chosen value allows the arrays WORK and RWORK */
+/* to be of dimension (N). */
+
+ m = (*n + *kb) / 2;
+
+/* The routine works in two phases, corresponding to the two halves */
+/* of the split Cholesky factorization of B as S**H*S where */
+
+/* S = ( U ) */
+/* ( M L ) */
+
+/* with U upper triangular of order m, and L lower triangular of */
+/* order n-m. S has the same bandwidth as B. */
+
+/* S is treated as a product of elementary matrices: */
+
+/* S = S(m)*S(m-1)*...*S(2)*S(1)*S(m+1)*S(m+2)*...*S(n-1)*S(n) */
+
+/* where S(i) is determined by the i-th row of S. */
+
+/* In phase 1, the index i takes the values n, n-1, ... , m+1; */
+/* in phase 2, it takes the values 1, 2, ... , m. */
+
+/* For each value of i, the current matrix A is updated by forming */
+/* inv(S(i))**H*A*inv(S(i)). This creates a triangular bulge outside */
+/* the band of A. The bulge is then pushed down toward the bottom of */
+/* A in phase 1, and up toward the top of A in phase 2, by applying */
+/* plane rotations. */
+
+/* There are kb*(kb+1)/2 elements in the bulge, but at most 2*kb-1 */
+/* of them are linearly independent, so annihilating a bulge requires */
+/* only 2*kb-1 plane rotations. The rotations are divided into a 1st */
+/* set of kb-1 rotations, and a 2nd set of kb rotations. */
+
+/* Wherever possible, rotations are generated and applied in vector */
+/* operations of length NR between the indices J1 and J2 (sometimes */
+/* replaced by modified values NRT, J1T or J2T). */
+
+/* The real cosines and complex sines of the rotations are stored in */
+/* the arrays RWORK and WORK, those of the 1st set in elements */
+/* 2:m-kb-1, and those of the 2nd set in elements m-kb+1:n. */
+
+/* The bulges are not formed explicitly; nonzero elements outside the */
+/* band are created only when they are required for generating new */
+/* rotations; they are stored in the array WORK, in positions where */
+/* they are later overwritten by the sines of the rotations which */
+/* annihilate them. */
+
+/* **************************** Phase 1 ***************************** */
+
+/* The logical structure of this phase is: */
+
+/* UPDATE = .TRUE. */
+/* DO I = N, M + 1, -1 */
+/* use S(i) to update A and create a new bulge */
+/* apply rotations to push all bulges KA positions downward */
+/* END DO */
+/* UPDATE = .FALSE. */
+/* DO I = M + KA + 1, N - 1 */
+/* apply rotations to push all bulges KA positions downward */
+/* END DO */
+
+/* To avoid duplicating code, the two loops are merged. */
+
+ update = TRUE_;
+ i__ = *n + 1;
+L10:
+ if (update) {
+ --i__;
+/* Computing MIN */
+ i__1 = *kb, i__2 = i__ - 1;
+ kbt = min(i__1,i__2);
+ i0 = i__ - 1;
+/* Computing MIN */
+ i__1 = *n, i__2 = i__ + *ka;
+ i1 = min(i__1,i__2);
+ i2 = i__ - kbt + ka1;
+ if (i__ < m + 1) {
+ update = FALSE_;
+ ++i__;
+ i0 = m;
+ if (*ka == 0) {
+ goto L480;
+ }
+ goto L10;
+ }
+ } else {
+ i__ += *ka;
+ if (i__ > *n - 1) {
+ goto L480;
+ }
+ }
+
+ if (upper) {
+
+/* Transform A, working with the upper triangle */
+
+ if (update) {
+
+/* Form inv(S(i))**H * A * inv(S(i)) */
+
+ i__1 = kb1 + i__ * bb_dim1;
+ bii = bb[i__1].r;
+ i__1 = ka1 + i__ * ab_dim1;
+ i__2 = ka1 + i__ * ab_dim1;
+ r__1 = ab[i__2].r / bii / bii;
+ ab[i__1].r = r__1, ab[i__1].i = 0.f;
+ i__1 = i1;
+ for (j = i__ + 1; j <= i__1; ++j) {
+ i__2 = i__ - j + ka1 + j * ab_dim1;
+ i__3 = i__ - j + ka1 + j * ab_dim1;
+ q__1.r = ab[i__3].r / bii, q__1.i = ab[i__3].i / bii;
+ ab[i__2].r = q__1.r, ab[i__2].i = q__1.i;
+/* L20: */
+ }
+/* Computing MAX */
+ i__1 = 1, i__2 = i__ - *ka;
+ i__3 = i__ - 1;
+ for (j = max(i__1,i__2); j <= i__3; ++j) {
+ i__1 = j - i__ + ka1 + i__ * ab_dim1;
+ i__2 = j - i__ + ka1 + i__ * ab_dim1;
+ q__1.r = ab[i__2].r / bii, q__1.i = ab[i__2].i / bii;
+ ab[i__1].r = q__1.r, ab[i__1].i = q__1.i;
+/* L30: */
+ }
+ i__3 = i__ - 1;
+ for (k = i__ - kbt; k <= i__3; ++k) {
+ i__1 = k;
+ for (j = i__ - kbt; j <= i__1; ++j) {
+ i__2 = j - k + ka1 + k * ab_dim1;
+ i__4 = j - k + ka1 + k * ab_dim1;
+ i__5 = j - i__ + kb1 + i__ * bb_dim1;
+ r_cnjg(&q__5, &ab[k - i__ + ka1 + i__ * ab_dim1]);
+ q__4.r = bb[i__5].r * q__5.r - bb[i__5].i * q__5.i,
+ q__4.i = bb[i__5].r * q__5.i + bb[i__5].i *
+ q__5.r;
+ q__3.r = ab[i__4].r - q__4.r, q__3.i = ab[i__4].i -
+ q__4.i;
+ r_cnjg(&q__7, &bb[k - i__ + kb1 + i__ * bb_dim1]);
+ i__6 = j - i__ + ka1 + i__ * ab_dim1;
+ q__6.r = q__7.r * ab[i__6].r - q__7.i * ab[i__6].i,
+ q__6.i = q__7.r * ab[i__6].i + q__7.i * ab[i__6]
+ .r;
+ q__2.r = q__3.r - q__6.r, q__2.i = q__3.i - q__6.i;
+ i__7 = ka1 + i__ * ab_dim1;
+ r__1 = ab[i__7].r;
+ i__8 = j - i__ + kb1 + i__ * bb_dim1;
+ q__9.r = r__1 * bb[i__8].r, q__9.i = r__1 * bb[i__8].i;
+ r_cnjg(&q__10, &bb[k - i__ + kb1 + i__ * bb_dim1]);
+ q__8.r = q__9.r * q__10.r - q__9.i * q__10.i, q__8.i =
+ q__9.r * q__10.i + q__9.i * q__10.r;
+ q__1.r = q__2.r + q__8.r, q__1.i = q__2.i + q__8.i;
+ ab[i__2].r = q__1.r, ab[i__2].i = q__1.i;
+/* L40: */
+ }
+/* Computing MAX */
+ i__1 = 1, i__2 = i__ - *ka;
+ i__4 = i__ - kbt - 1;
+ for (j = max(i__1,i__2); j <= i__4; ++j) {
+ i__1 = j - k + ka1 + k * ab_dim1;
+ i__2 = j - k + ka1 + k * ab_dim1;
+ r_cnjg(&q__3, &bb[k - i__ + kb1 + i__ * bb_dim1]);
+ i__5 = j - i__ + ka1 + i__ * ab_dim1;
+ q__2.r = q__3.r * ab[i__5].r - q__3.i * ab[i__5].i,
+ q__2.i = q__3.r * ab[i__5].i + q__3.i * ab[i__5]
+ .r;
+ q__1.r = ab[i__2].r - q__2.r, q__1.i = ab[i__2].i -
+ q__2.i;
+ ab[i__1].r = q__1.r, ab[i__1].i = q__1.i;
+/* L50: */
+ }
+/* L60: */
+ }
+ i__3 = i1;
+ for (j = i__; j <= i__3; ++j) {
+/* Computing MAX */
+ i__4 = j - *ka, i__1 = i__ - kbt;
+ i__2 = i__ - 1;
+ for (k = max(i__4,i__1); k <= i__2; ++k) {
+ i__4 = k - j + ka1 + j * ab_dim1;
+ i__1 = k - j + ka1 + j * ab_dim1;
+ i__5 = k - i__ + kb1 + i__ * bb_dim1;
+ i__6 = i__ - j + ka1 + j * ab_dim1;
+ q__2.r = bb[i__5].r * ab[i__6].r - bb[i__5].i * ab[i__6]
+ .i, q__2.i = bb[i__5].r * ab[i__6].i + bb[i__5].i
+ * ab[i__6].r;
+ q__1.r = ab[i__1].r - q__2.r, q__1.i = ab[i__1].i -
+ q__2.i;
+ ab[i__4].r = q__1.r, ab[i__4].i = q__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by inv(S(i)) */
+
+ i__3 = *n - m;
+ r__1 = 1.f / bii;
+ csscal_(&i__3, &r__1, &x[m + 1 + i__ * x_dim1], &c__1);
+ if (kbt > 0) {
+ i__3 = *n - m;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgerc_(&i__3, &kbt, &q__1, &x[m + 1 + i__ * x_dim1], &
+ c__1, &bb[kb1 - kbt + i__ * bb_dim1], &c__1, &x[m
+ + 1 + (i__ - kbt) * x_dim1], ldx);
+ }
+ }
+
+/* store a(i,i1) in RA1 for use in next loop over K */
+
+ i__3 = i__ - i1 + ka1 + i1 * ab_dim1;
+ ra1.r = ab[i__3].r, ra1.i = ab[i__3].i;
+ }
+
+/* Generate and apply vectors of rotations to chase all the */
+/* existing bulges KA positions down toward the bottom of the */
+/* band */
+
+ i__3 = *kb - 1;
+ for (k = 1; k <= i__3; ++k) {
+ if (update) {
+
+/* Determine the rotations which would annihilate the bulge */
+/* which has in theory just been created */
+
+ if (i__ - k + *ka < *n && i__ - k > 1) {
+
+/* generate rotation to annihilate a(i,i-k+ka+1) */
+
+ clartg_(&ab[k + 1 + (i__ - k + *ka) * ab_dim1], &ra1, &
+ rwork[i__ - k + *ka - m], &work[i__ - k + *ka - m]
+, &ra);
+
+/* create nonzero element a(i-k,i-k+ka+1) outside the */
+/* band and store it in WORK(i-k) */
+
+ i__2 = kb1 - k + i__ * bb_dim1;
+ q__2.r = -bb[i__2].r, q__2.i = -bb[i__2].i;
+ q__1.r = q__2.r * ra1.r - q__2.i * ra1.i, q__1.i = q__2.r
+ * ra1.i + q__2.i * ra1.r;
+ t.r = q__1.r, t.i = q__1.i;
+ i__2 = i__ - k;
+ i__4 = i__ - k + *ka - m;
+ q__2.r = rwork[i__4] * t.r, q__2.i = rwork[i__4] * t.i;
+ r_cnjg(&q__4, &work[i__ - k + *ka - m]);
+ i__1 = (i__ - k + *ka) * ab_dim1 + 1;
+ q__3.r = q__4.r * ab[i__1].r - q__4.i * ab[i__1].i,
+ q__3.i = q__4.r * ab[i__1].i + q__4.i * ab[i__1]
+ .r;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
+ work[i__2].r = q__1.r, work[i__2].i = q__1.i;
+ i__2 = (i__ - k + *ka) * ab_dim1 + 1;
+ i__4 = i__ - k + *ka - m;
+ q__2.r = work[i__4].r * t.r - work[i__4].i * t.i, q__2.i =
+ work[i__4].r * t.i + work[i__4].i * t.r;
+ i__1 = i__ - k + *ka - m;
+ i__5 = (i__ - k + *ka) * ab_dim1 + 1;
+ q__3.r = rwork[i__1] * ab[i__5].r, q__3.i = rwork[i__1] *
+ ab[i__5].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ ab[i__2].r = q__1.r, ab[i__2].i = q__1.i;
+ ra1.r = ra.r, ra1.i = ra.i;
+ }
+ }
+/* Computing MAX */
+ i__2 = 1, i__4 = k - i0 + 2;
+ j2 = i__ - k - 1 + max(i__2,i__4) * ka1;
+ nr = (*n - j2 + *ka) / ka1;
+ j1 = j2 + (nr - 1) * ka1;
+ if (update) {
+/* Computing MAX */
+ i__2 = j2, i__4 = i__ + (*ka << 1) - k + 1;
+ j2t = max(i__2,i__4);
+ } else {
+ j2t = j2;
+ }
+ nrt = (*n - j2t + *ka) / ka1;
+ i__2 = j1;
+ i__4 = ka1;
+ for (j = j2t; i__4 < 0 ? j >= i__2 : j <= i__2; j += i__4) {
+
+/* create nonzero element a(j-ka,j+1) outside the band */
+/* and store it in WORK(j-m) */
+
+ i__1 = j - m;
+ i__5 = j - m;
+ i__6 = (j + 1) * ab_dim1 + 1;
+ q__1.r = work[i__5].r * ab[i__6].r - work[i__5].i * ab[i__6]
+ .i, q__1.i = work[i__5].r * ab[i__6].i + work[i__5].i
+ * ab[i__6].r;
+ work[i__1].r = q__1.r, work[i__1].i = q__1.i;
+ i__1 = (j + 1) * ab_dim1 + 1;
+ i__5 = j - m;
+ i__6 = (j + 1) * ab_dim1 + 1;
+ q__1.r = rwork[i__5] * ab[i__6].r, q__1.i = rwork[i__5] * ab[
+ i__6].i;
+ ab[i__1].r = q__1.r, ab[i__1].i = q__1.i;
+/* L90: */
+ }
+
+/* generate rotations in 1st set to annihilate elements which */
+/* have been created outside the band */
+
+ if (nrt > 0) {
+ clargv_(&nrt, &ab[j2t * ab_dim1 + 1], &inca, &work[j2t - m], &
+ ka1, &rwork[j2t - m], &ka1);
+ }
+ if (nr > 0) {
+
+/* apply rotations in 1st set from the right */
+
+ i__4 = *ka - 1;
+ for (l = 1; l <= i__4; ++l) {
+ clartv_(&nr, &ab[ka1 - l + j2 * ab_dim1], &inca, &ab[*ka
+ - l + (j2 + 1) * ab_dim1], &inca, &rwork[j2 - m],
+ &work[j2 - m], &ka1);
+/* L100: */
+ }
+
+/* apply rotations in 1st set from both sides to diagonal */
+/* blocks */
+
+ clar2v_(&nr, &ab[ka1 + j2 * ab_dim1], &ab[ka1 + (j2 + 1) *
+ ab_dim1], &ab[*ka + (j2 + 1) * ab_dim1], &inca, &
+ rwork[j2 - m], &work[j2 - m], &ka1);
+
+ clacgv_(&nr, &work[j2 - m], &ka1);
+ }
+
+/* start applying rotations in 1st set from the left */
+
+ i__4 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__4; --l) {
+ nrt = (*n - j2 + l) / ka1;
+ if (nrt > 0) {
+ clartv_(&nrt, &ab[l + (j2 + ka1 - l) * ab_dim1], &inca, &
+ ab[l + 1 + (j2 + ka1 - l) * ab_dim1], &inca, &
+ rwork[j2 - m], &work[j2 - m], &ka1);
+ }
+/* L110: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 1st set */
+
+ i__4 = j1;
+ i__2 = ka1;
+ for (j = j2; i__2 < 0 ? j >= i__4 : j <= i__4; j += i__2) {
+ i__1 = *n - m;
+ r_cnjg(&q__1, &work[j - m]);
+ crot_(&i__1, &x[m + 1 + j * x_dim1], &c__1, &x[m + 1 + (j
+ + 1) * x_dim1], &c__1, &rwork[j - m], &q__1);
+/* L120: */
+ }
+ }
+/* L130: */
+ }
+
+ if (update) {
+ if (i2 <= *n && kbt > 0) {
+
+/* create nonzero element a(i-kbt,i-kbt+ka+1) outside the */
+/* band and store it in WORK(i-kbt) */
+
+ i__3 = i__ - kbt;
+ i__2 = kb1 - kbt + i__ * bb_dim1;
+ q__2.r = -bb[i__2].r, q__2.i = -bb[i__2].i;
+ q__1.r = q__2.r * ra1.r - q__2.i * ra1.i, q__1.i = q__2.r *
+ ra1.i + q__2.i * ra1.r;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+ }
+ }
+
+ for (k = *kb; k >= 1; --k) {
+ if (update) {
+/* Computing MAX */
+ i__3 = 2, i__2 = k - i0 + 1;
+ j2 = i__ - k - 1 + max(i__3,i__2) * ka1;
+ } else {
+/* Computing MAX */
+ i__3 = 1, i__2 = k - i0 + 1;
+ j2 = i__ - k - 1 + max(i__3,i__2) * ka1;
+ }
+
+/* finish applying rotations in 2nd set from the left */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (*n - j2 + *ka + l) / ka1;
+ if (nrt > 0) {
+ clartv_(&nrt, &ab[l + (j2 - l + 1) * ab_dim1], &inca, &ab[
+ l + 1 + (j2 - l + 1) * ab_dim1], &inca, &rwork[j2
+ - *ka], &work[j2 - *ka], &ka1);
+ }
+/* L140: */
+ }
+ nr = (*n - j2 + *ka) / ka1;
+ j1 = j2 + (nr - 1) * ka1;
+ i__3 = j2;
+ i__2 = -ka1;
+ for (j = j1; i__2 < 0 ? j >= i__3 : j <= i__3; j += i__2) {
+ i__4 = j;
+ i__1 = j - *ka;
+ work[i__4].r = work[i__1].r, work[i__4].i = work[i__1].i;
+ rwork[j] = rwork[j - *ka];
+/* L150: */
+ }
+ i__2 = j1;
+ i__3 = ka1;
+ for (j = j2; i__3 < 0 ? j >= i__2 : j <= i__2; j += i__3) {
+
+/* create nonzero element a(j-ka,j+1) outside the band */
+/* and store it in WORK(j) */
+
+ i__4 = j;
+ i__1 = j;
+ i__5 = (j + 1) * ab_dim1 + 1;
+ q__1.r = work[i__1].r * ab[i__5].r - work[i__1].i * ab[i__5]
+ .i, q__1.i = work[i__1].r * ab[i__5].i + work[i__1].i
+ * ab[i__5].r;
+ work[i__4].r = q__1.r, work[i__4].i = q__1.i;
+ i__4 = (j + 1) * ab_dim1 + 1;
+ i__1 = j;
+ i__5 = (j + 1) * ab_dim1 + 1;
+ q__1.r = rwork[i__1] * ab[i__5].r, q__1.i = rwork[i__1] * ab[
+ i__5].i;
+ ab[i__4].r = q__1.r, ab[i__4].i = q__1.i;
+/* L160: */
+ }
+ if (update) {
+ if (i__ - k < *n - *ka && k <= kbt) {
+ i__3 = i__ - k + *ka;
+ i__2 = i__ - k;
+ work[i__3].r = work[i__2].r, work[i__3].i = work[i__2].i;
+ }
+ }
+/* L170: */
+ }
+
+ for (k = *kb; k >= 1; --k) {
+/* Computing MAX */
+ i__3 = 1, i__2 = k - i0 + 1;
+ j2 = i__ - k - 1 + max(i__3,i__2) * ka1;
+ nr = (*n - j2 + *ka) / ka1;
+ j1 = j2 + (nr - 1) * ka1;
+ if (nr > 0) {
+
+/* generate rotations in 2nd set to annihilate elements */
+/* which have been created outside the band */
+
+ clargv_(&nr, &ab[j2 * ab_dim1 + 1], &inca, &work[j2], &ka1, &
+ rwork[j2], &ka1);
+
+/* apply rotations in 2nd set from the right */
+
+ i__3 = *ka - 1;
+ for (l = 1; l <= i__3; ++l) {
+ clartv_(&nr, &ab[ka1 - l + j2 * ab_dim1], &inca, &ab[*ka
+ - l + (j2 + 1) * ab_dim1], &inca, &rwork[j2], &
+ work[j2], &ka1);
+/* L180: */
+ }
+
+/* apply rotations in 2nd set from both sides to diagonal */
+/* blocks */
+
+ clar2v_(&nr, &ab[ka1 + j2 * ab_dim1], &ab[ka1 + (j2 + 1) *
+ ab_dim1], &ab[*ka + (j2 + 1) * ab_dim1], &inca, &
+ rwork[j2], &work[j2], &ka1);
+
+ clacgv_(&nr, &work[j2], &ka1);
+ }
+
+/* start applying rotations in 2nd set from the left */
+
+ i__3 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__3; --l) {
+ nrt = (*n - j2 + l) / ka1;
+ if (nrt > 0) {
+ clartv_(&nrt, &ab[l + (j2 + ka1 - l) * ab_dim1], &inca, &
+ ab[l + 1 + (j2 + ka1 - l) * ab_dim1], &inca, &
+ rwork[j2], &work[j2], &ka1);
+ }
+/* L190: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 2nd set */
+
+ i__3 = j1;
+ i__2 = ka1;
+ for (j = j2; i__2 < 0 ? j >= i__3 : j <= i__3; j += i__2) {
+ i__4 = *n - m;
+ r_cnjg(&q__1, &work[j]);
+ crot_(&i__4, &x[m + 1 + j * x_dim1], &c__1, &x[m + 1 + (j
+ + 1) * x_dim1], &c__1, &rwork[j], &q__1);
+/* L200: */
+ }
+ }
+/* L210: */
+ }
+
+ i__2 = *kb - 1;
+ for (k = 1; k <= i__2; ++k) {
+/* Computing MAX */
+ i__3 = 1, i__4 = k - i0 + 2;
+ j2 = i__ - k - 1 + max(i__3,i__4) * ka1;
+
+/* finish applying rotations in 1st set from the left */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (*n - j2 + l) / ka1;
+ if (nrt > 0) {
+ clartv_(&nrt, &ab[l + (j2 + ka1 - l) * ab_dim1], &inca, &
+ ab[l + 1 + (j2 + ka1 - l) * ab_dim1], &inca, &
+ rwork[j2 - m], &work[j2 - m], &ka1);
+ }
+/* L220: */
+ }
+/* L230: */
+ }
+
+ if (*kb > 1) {
+ i__2 = i2 + *ka;
+ for (j = *n - 1; j >= i__2; --j) {
+ rwork[j - m] = rwork[j - *ka - m];
+ i__3 = j - m;
+ i__4 = j - *ka - m;
+ work[i__3].r = work[i__4].r, work[i__3].i = work[i__4].i;
+/* L240: */
+ }
+ }
+
+ } else {
+
+/* Transform A, working with the lower triangle */
+
+ if (update) {
+
+/* Form inv(S(i))**H * A * inv(S(i)) */
+
+ i__2 = i__ * bb_dim1 + 1;
+ bii = bb[i__2].r;
+ i__2 = i__ * ab_dim1 + 1;
+ i__3 = i__ * ab_dim1 + 1;
+ r__1 = ab[i__3].r / bii / bii;
+ ab[i__2].r = r__1, ab[i__2].i = 0.f;
+ i__2 = i1;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ i__3 = j - i__ + 1 + i__ * ab_dim1;
+ i__4 = j - i__ + 1 + i__ * ab_dim1;
+ q__1.r = ab[i__4].r / bii, q__1.i = ab[i__4].i / bii;
+ ab[i__3].r = q__1.r, ab[i__3].i = q__1.i;
+/* L250: */
+ }
+/* Computing MAX */
+ i__2 = 1, i__3 = i__ - *ka;
+ i__4 = i__ - 1;
+ for (j = max(i__2,i__3); j <= i__4; ++j) {
+ i__2 = i__ - j + 1 + j * ab_dim1;
+ i__3 = i__ - j + 1 + j * ab_dim1;
+ q__1.r = ab[i__3].r / bii, q__1.i = ab[i__3].i / bii;
+ ab[i__2].r = q__1.r, ab[i__2].i = q__1.i;
+/* L260: */
+ }
+ i__4 = i__ - 1;
+ for (k = i__ - kbt; k <= i__4; ++k) {
+ i__2 = k;
+ for (j = i__ - kbt; j <= i__2; ++j) {
+ i__3 = k - j + 1 + j * ab_dim1;
+ i__1 = k - j + 1 + j * ab_dim1;
+ i__5 = i__ - j + 1 + j * bb_dim1;
+ r_cnjg(&q__5, &ab[i__ - k + 1 + k * ab_dim1]);
+ q__4.r = bb[i__5].r * q__5.r - bb[i__5].i * q__5.i,
+ q__4.i = bb[i__5].r * q__5.i + bb[i__5].i *
+ q__5.r;
+ q__3.r = ab[i__1].r - q__4.r, q__3.i = ab[i__1].i -
+ q__4.i;
+ r_cnjg(&q__7, &bb[i__ - k + 1 + k * bb_dim1]);
+ i__6 = i__ - j + 1 + j * ab_dim1;
+ q__6.r = q__7.r * ab[i__6].r - q__7.i * ab[i__6].i,
+ q__6.i = q__7.r * ab[i__6].i + q__7.i * ab[i__6]
+ .r;
+ q__2.r = q__3.r - q__6.r, q__2.i = q__3.i - q__6.i;
+ i__7 = i__ * ab_dim1 + 1;
+ r__1 = ab[i__7].r;
+ i__8 = i__ - j + 1 + j * bb_dim1;
+ q__9.r = r__1 * bb[i__8].r, q__9.i = r__1 * bb[i__8].i;
+ r_cnjg(&q__10, &bb[i__ - k + 1 + k * bb_dim1]);
+ q__8.r = q__9.r * q__10.r - q__9.i * q__10.i, q__8.i =
+ q__9.r * q__10.i + q__9.i * q__10.r;
+ q__1.r = q__2.r + q__8.r, q__1.i = q__2.i + q__8.i;
+ ab[i__3].r = q__1.r, ab[i__3].i = q__1.i;
+/* L270: */
+ }
+/* Computing MAX */
+ i__2 = 1, i__3 = i__ - *ka;
+ i__1 = i__ - kbt - 1;
+ for (j = max(i__2,i__3); j <= i__1; ++j) {
+ i__2 = k - j + 1 + j * ab_dim1;
+ i__3 = k - j + 1 + j * ab_dim1;
+ r_cnjg(&q__3, &bb[i__ - k + 1 + k * bb_dim1]);
+ i__5 = i__ - j + 1 + j * ab_dim1;
+ q__2.r = q__3.r * ab[i__5].r - q__3.i * ab[i__5].i,
+ q__2.i = q__3.r * ab[i__5].i + q__3.i * ab[i__5]
+ .r;
+ q__1.r = ab[i__3].r - q__2.r, q__1.i = ab[i__3].i -
+ q__2.i;
+ ab[i__2].r = q__1.r, ab[i__2].i = q__1.i;
+/* L280: */
+ }
+/* L290: */
+ }
+ i__4 = i1;
+ for (j = i__; j <= i__4; ++j) {
+/* Computing MAX */
+ i__1 = j - *ka, i__2 = i__ - kbt;
+ i__3 = i__ - 1;
+ for (k = max(i__1,i__2); k <= i__3; ++k) {
+ i__1 = j - k + 1 + k * ab_dim1;
+ i__2 = j - k + 1 + k * ab_dim1;
+ i__5 = i__ - k + 1 + k * bb_dim1;
+ i__6 = j - i__ + 1 + i__ * ab_dim1;
+ q__2.r = bb[i__5].r * ab[i__6].r - bb[i__5].i * ab[i__6]
+ .i, q__2.i = bb[i__5].r * ab[i__6].i + bb[i__5].i
+ * ab[i__6].r;
+ q__1.r = ab[i__2].r - q__2.r, q__1.i = ab[i__2].i -
+ q__2.i;
+ ab[i__1].r = q__1.r, ab[i__1].i = q__1.i;
+/* L300: */
+ }
+/* L310: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by inv(S(i)) */
+
+ i__4 = *n - m;
+ r__1 = 1.f / bii;
+ csscal_(&i__4, &r__1, &x[m + 1 + i__ * x_dim1], &c__1);
+ if (kbt > 0) {
+ i__4 = *n - m;
+ q__1.r = -1.f, q__1.i = -0.f;
+ i__3 = *ldbb - 1;
+ cgeru_(&i__4, &kbt, &q__1, &x[m + 1 + i__ * x_dim1], &
+ c__1, &bb[kbt + 1 + (i__ - kbt) * bb_dim1], &i__3,
+ &x[m + 1 + (i__ - kbt) * x_dim1], ldx);
+ }
+ }
+
+/* store a(i1,i) in RA1 for use in next loop over K */
+
+ i__4 = i1 - i__ + 1 + i__ * ab_dim1;
+ ra1.r = ab[i__4].r, ra1.i = ab[i__4].i;
+ }
+
+/* Generate and apply vectors of rotations to chase all the */
+/* existing bulges KA positions down toward the bottom of the */
+/* band */
+
+ i__4 = *kb - 1;
+ for (k = 1; k <= i__4; ++k) {
+ if (update) {
+
+/* Determine the rotations which would annihilate the bulge */
+/* which has in theory just been created */
+
+ if (i__ - k + *ka < *n && i__ - k > 1) {
+
+/* generate rotation to annihilate a(i-k+ka+1,i) */
+
+ clartg_(&ab[ka1 - k + i__ * ab_dim1], &ra1, &rwork[i__ -
+ k + *ka - m], &work[i__ - k + *ka - m], &ra);
+
+/* create nonzero element a(i-k+ka+1,i-k) outside the */
+/* band and store it in WORK(i-k) */
+
+ i__3 = k + 1 + (i__ - k) * bb_dim1;
+ q__2.r = -bb[i__3].r, q__2.i = -bb[i__3].i;
+ q__1.r = q__2.r * ra1.r - q__2.i * ra1.i, q__1.i = q__2.r
+ * ra1.i + q__2.i * ra1.r;
+ t.r = q__1.r, t.i = q__1.i;
+ i__3 = i__ - k;
+ i__1 = i__ - k + *ka - m;
+ q__2.r = rwork[i__1] * t.r, q__2.i = rwork[i__1] * t.i;
+ r_cnjg(&q__4, &work[i__ - k + *ka - m]);
+ i__2 = ka1 + (i__ - k) * ab_dim1;
+ q__3.r = q__4.r * ab[i__2].r - q__4.i * ab[i__2].i,
+ q__3.i = q__4.r * ab[i__2].i + q__4.i * ab[i__2]
+ .r;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+ i__3 = ka1 + (i__ - k) * ab_dim1;
+ i__1 = i__ - k + *ka - m;
+ q__2.r = work[i__1].r * t.r - work[i__1].i * t.i, q__2.i =
+ work[i__1].r * t.i + work[i__1].i * t.r;
+ i__2 = i__ - k + *ka - m;
+ i__5 = ka1 + (i__ - k) * ab_dim1;
+ q__3.r = rwork[i__2] * ab[i__5].r, q__3.i = rwork[i__2] *
+ ab[i__5].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ ab[i__3].r = q__1.r, ab[i__3].i = q__1.i;
+ ra1.r = ra.r, ra1.i = ra.i;
+ }
+ }
+/* Computing MAX */
+ i__3 = 1, i__1 = k - i0 + 2;
+ j2 = i__ - k - 1 + max(i__3,i__1) * ka1;
+ nr = (*n - j2 + *ka) / ka1;
+ j1 = j2 + (nr - 1) * ka1;
+ if (update) {
+/* Computing MAX */
+ i__3 = j2, i__1 = i__ + (*ka << 1) - k + 1;
+ j2t = max(i__3,i__1);
+ } else {
+ j2t = j2;
+ }
+ nrt = (*n - j2t + *ka) / ka1;
+ i__3 = j1;
+ i__1 = ka1;
+ for (j = j2t; i__1 < 0 ? j >= i__3 : j <= i__3; j += i__1) {
+
+/* create nonzero element a(j+1,j-ka) outside the band */
+/* and store it in WORK(j-m) */
+
+ i__2 = j - m;
+ i__5 = j - m;
+ i__6 = ka1 + (j - *ka + 1) * ab_dim1;
+ q__1.r = work[i__5].r * ab[i__6].r - work[i__5].i * ab[i__6]
+ .i, q__1.i = work[i__5].r * ab[i__6].i + work[i__5].i
+ * ab[i__6].r;
+ work[i__2].r = q__1.r, work[i__2].i = q__1.i;
+ i__2 = ka1 + (j - *ka + 1) * ab_dim1;
+ i__5 = j - m;
+ i__6 = ka1 + (j - *ka + 1) * ab_dim1;
+ q__1.r = rwork[i__5] * ab[i__6].r, q__1.i = rwork[i__5] * ab[
+ i__6].i;
+ ab[i__2].r = q__1.r, ab[i__2].i = q__1.i;
+/* L320: */
+ }
+
+/* generate rotations in 1st set to annihilate elements which */
+/* have been created outside the band */
+
+ if (nrt > 0) {
+ clargv_(&nrt, &ab[ka1 + (j2t - *ka) * ab_dim1], &inca, &work[
+ j2t - m], &ka1, &rwork[j2t - m], &ka1);
+ }
+ if (nr > 0) {
+
+/* apply rotations in 1st set from the left */
+
+ i__1 = *ka - 1;
+ for (l = 1; l <= i__1; ++l) {
+ clartv_(&nr, &ab[l + 1 + (j2 - l) * ab_dim1], &inca, &ab[
+ l + 2 + (j2 - l) * ab_dim1], &inca, &rwork[j2 - m]
+, &work[j2 - m], &ka1);
+/* L330: */
+ }
+
+/* apply rotations in 1st set from both sides to diagonal */
+/* blocks */
+
+ clar2v_(&nr, &ab[j2 * ab_dim1 + 1], &ab[(j2 + 1) * ab_dim1 +
+ 1], &ab[j2 * ab_dim1 + 2], &inca, &rwork[j2 - m], &
+ work[j2 - m], &ka1);
+
+ clacgv_(&nr, &work[j2 - m], &ka1);
+ }
+
+/* start applying rotations in 1st set from the right */
+
+ i__1 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__1; --l) {
+ nrt = (*n - j2 + l) / ka1;
+ if (nrt > 0) {
+ clartv_(&nrt, &ab[ka1 - l + 1 + j2 * ab_dim1], &inca, &ab[
+ ka1 - l + (j2 + 1) * ab_dim1], &inca, &rwork[j2 -
+ m], &work[j2 - m], &ka1);
+ }
+/* L340: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 1st set */
+
+ i__1 = j1;
+ i__3 = ka1;
+ for (j = j2; i__3 < 0 ? j >= i__1 : j <= i__1; j += i__3) {
+ i__2 = *n - m;
+ crot_(&i__2, &x[m + 1 + j * x_dim1], &c__1, &x[m + 1 + (j
+ + 1) * x_dim1], &c__1, &rwork[j - m], &work[j - m]
+);
+/* L350: */
+ }
+ }
+/* L360: */
+ }
+
+ if (update) {
+ if (i2 <= *n && kbt > 0) {
+
+/* create nonzero element a(i-kbt+ka+1,i-kbt) outside the */
+/* band and store it in WORK(i-kbt) */
+
+ i__4 = i__ - kbt;
+ i__3 = kbt + 1 + (i__ - kbt) * bb_dim1;
+ q__2.r = -bb[i__3].r, q__2.i = -bb[i__3].i;
+ q__1.r = q__2.r * ra1.r - q__2.i * ra1.i, q__1.i = q__2.r *
+ ra1.i + q__2.i * ra1.r;
+ work[i__4].r = q__1.r, work[i__4].i = q__1.i;
+ }
+ }
+
+ for (k = *kb; k >= 1; --k) {
+ if (update) {
+/* Computing MAX */
+ i__4 = 2, i__3 = k - i0 + 1;
+ j2 = i__ - k - 1 + max(i__4,i__3) * ka1;
+ } else {
+/* Computing MAX */
+ i__4 = 1, i__3 = k - i0 + 1;
+ j2 = i__ - k - 1 + max(i__4,i__3) * ka1;
+ }
+
+/* finish applying rotations in 2nd set from the right */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (*n - j2 + *ka + l) / ka1;
+ if (nrt > 0) {
+ clartv_(&nrt, &ab[ka1 - l + 1 + (j2 - *ka) * ab_dim1], &
+ inca, &ab[ka1 - l + (j2 - *ka + 1) * ab_dim1], &
+ inca, &rwork[j2 - *ka], &work[j2 - *ka], &ka1);
+ }
+/* L370: */
+ }
+ nr = (*n - j2 + *ka) / ka1;
+ j1 = j2 + (nr - 1) * ka1;
+ i__4 = j2;
+ i__3 = -ka1;
+ for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) {
+ i__1 = j;
+ i__2 = j - *ka;
+ work[i__1].r = work[i__2].r, work[i__1].i = work[i__2].i;
+ rwork[j] = rwork[j - *ka];
+/* L380: */
+ }
+ i__3 = j1;
+ i__4 = ka1;
+ for (j = j2; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {
+
+/* create nonzero element a(j+1,j-ka) outside the band */
+/* and store it in WORK(j) */
+
+ i__1 = j;
+ i__2 = j;
+ i__5 = ka1 + (j - *ka + 1) * ab_dim1;
+ q__1.r = work[i__2].r * ab[i__5].r - work[i__2].i * ab[i__5]
+ .i, q__1.i = work[i__2].r * ab[i__5].i + work[i__2].i
+ * ab[i__5].r;
+ work[i__1].r = q__1.r, work[i__1].i = q__1.i;
+ i__1 = ka1 + (j - *ka + 1) * ab_dim1;
+ i__2 = j;
+ i__5 = ka1 + (j - *ka + 1) * ab_dim1;
+ q__1.r = rwork[i__2] * ab[i__5].r, q__1.i = rwork[i__2] * ab[
+ i__5].i;
+ ab[i__1].r = q__1.r, ab[i__1].i = q__1.i;
+/* L390: */
+ }
+ if (update) {
+ if (i__ - k < *n - *ka && k <= kbt) {
+ i__4 = i__ - k + *ka;
+ i__3 = i__ - k;
+ work[i__4].r = work[i__3].r, work[i__4].i = work[i__3].i;
+ }
+ }
+/* L400: */
+ }
+
+ for (k = *kb; k >= 1; --k) {
+/* Computing MAX */
+ i__4 = 1, i__3 = k - i0 + 1;
+ j2 = i__ - k - 1 + max(i__4,i__3) * ka1;
+ nr = (*n - j2 + *ka) / ka1;
+ j1 = j2 + (nr - 1) * ka1;
+ if (nr > 0) {
+
+/* generate rotations in 2nd set to annihilate elements */
+/* which have been created outside the band */
+
+ clargv_(&nr, &ab[ka1 + (j2 - *ka) * ab_dim1], &inca, &work[j2]
+, &ka1, &rwork[j2], &ka1);
+
+/* apply rotations in 2nd set from the left */
+
+ i__4 = *ka - 1;
+ for (l = 1; l <= i__4; ++l) {
+ clartv_(&nr, &ab[l + 1 + (j2 - l) * ab_dim1], &inca, &ab[
+ l + 2 + (j2 - l) * ab_dim1], &inca, &rwork[j2], &
+ work[j2], &ka1);
+/* L410: */
+ }
+
+/* apply rotations in 2nd set from both sides to diagonal */
+/* blocks */
+
+ clar2v_(&nr, &ab[j2 * ab_dim1 + 1], &ab[(j2 + 1) * ab_dim1 +
+ 1], &ab[j2 * ab_dim1 + 2], &inca, &rwork[j2], &work[
+ j2], &ka1);
+
+ clacgv_(&nr, &work[j2], &ka1);
+ }
+
+/* start applying rotations in 2nd set from the right */
+
+ i__4 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__4; --l) {
+ nrt = (*n - j2 + l) / ka1;
+ if (nrt > 0) {
+ clartv_(&nrt, &ab[ka1 - l + 1 + j2 * ab_dim1], &inca, &ab[
+ ka1 - l + (j2 + 1) * ab_dim1], &inca, &rwork[j2],
+ &work[j2], &ka1);
+ }
+/* L420: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 2nd set */
+
+ i__4 = j1;
+ i__3 = ka1;
+ for (j = j2; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) {
+ i__1 = *n - m;
+ crot_(&i__1, &x[m + 1 + j * x_dim1], &c__1, &x[m + 1 + (j
+ + 1) * x_dim1], &c__1, &rwork[j], &work[j]);
+/* L430: */
+ }
+ }
+/* L440: */
+ }
+
+ i__3 = *kb - 1;
+ for (k = 1; k <= i__3; ++k) {
+/* Computing MAX */
+ i__4 = 1, i__1 = k - i0 + 2;
+ j2 = i__ - k - 1 + max(i__4,i__1) * ka1;
+
+/* finish applying rotations in 1st set from the right */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (*n - j2 + l) / ka1;
+ if (nrt > 0) {
+ clartv_(&nrt, &ab[ka1 - l + 1 + j2 * ab_dim1], &inca, &ab[
+ ka1 - l + (j2 + 1) * ab_dim1], &inca, &rwork[j2 -
+ m], &work[j2 - m], &ka1);
+ }
+/* L450: */
+ }
+/* L460: */
+ }
+
+ if (*kb > 1) {
+ i__3 = i2 + *ka;
+ for (j = *n - 1; j >= i__3; --j) {
+ rwork[j - m] = rwork[j - *ka - m];
+ i__4 = j - m;
+ i__1 = j - *ka - m;
+ work[i__4].r = work[i__1].r, work[i__4].i = work[i__1].i;
+/* L470: */
+ }
+ }
+
+ }
+
+ goto L10;
+
+L480:
+
+/* **************************** Phase 2 ***************************** */
+
+/* The logical structure of this phase is: */
+
+/* UPDATE = .TRUE. */
+/* DO I = 1, M */
+/* use S(i) to update A and create a new bulge */
+/* apply rotations to push all bulges KA positions upward */
+/* END DO */
+/* UPDATE = .FALSE. */
+/* DO I = M - KA - 1, 2, -1 */
+/* apply rotations to push all bulges KA positions upward */
+/* END DO */
+
+/* To avoid duplicating code, the two loops are merged. */
+
+ update = TRUE_;
+ i__ = 0;
+L490:
+ if (update) {
+ ++i__;
+/* Computing MIN */
+ i__3 = *kb, i__4 = m - i__;
+ kbt = min(i__3,i__4);
+ i0 = i__ + 1;
+/* Computing MAX */
+ i__3 = 1, i__4 = i__ - *ka;
+ i1 = max(i__3,i__4);
+ i2 = i__ + kbt - ka1;
+ if (i__ > m) {
+ update = FALSE_;
+ --i__;
+ i0 = m + 1;
+ if (*ka == 0) {
+ return 0;
+ }
+ goto L490;
+ }
+ } else {
+ i__ -= *ka;
+ if (i__ < 2) {
+ return 0;
+ }
+ }
+
+ if (i__ < m - kbt) {
+ nx = m;
+ } else {
+ nx = *n;
+ }
+
+ if (upper) {
+
+/* Transform A, working with the upper triangle */
+
+ if (update) {
+
+/* Form inv(S(i))**H * A * inv(S(i)) */
+
+ i__3 = kb1 + i__ * bb_dim1;
+ bii = bb[i__3].r;
+ i__3 = ka1 + i__ * ab_dim1;
+ i__4 = ka1 + i__ * ab_dim1;
+ r__1 = ab[i__4].r / bii / bii;
+ ab[i__3].r = r__1, ab[i__3].i = 0.f;
+ i__3 = i__ - 1;
+ for (j = i1; j <= i__3; ++j) {
+ i__4 = j - i__ + ka1 + i__ * ab_dim1;
+ i__1 = j - i__ + ka1 + i__ * ab_dim1;
+ q__1.r = ab[i__1].r / bii, q__1.i = ab[i__1].i / bii;
+ ab[i__4].r = q__1.r, ab[i__4].i = q__1.i;
+/* L500: */
+ }
+/* Computing MIN */
+ i__4 = *n, i__1 = i__ + *ka;
+ i__3 = min(i__4,i__1);
+ for (j = i__ + 1; j <= i__3; ++j) {
+ i__4 = i__ - j + ka1 + j * ab_dim1;
+ i__1 = i__ - j + ka1 + j * ab_dim1;
+ q__1.r = ab[i__1].r / bii, q__1.i = ab[i__1].i / bii;
+ ab[i__4].r = q__1.r, ab[i__4].i = q__1.i;
+/* L510: */
+ }
+ i__3 = i__ + kbt;
+ for (k = i__ + 1; k <= i__3; ++k) {
+ i__4 = i__ + kbt;
+ for (j = k; j <= i__4; ++j) {
+ i__1 = k - j + ka1 + j * ab_dim1;
+ i__2 = k - j + ka1 + j * ab_dim1;
+ i__5 = i__ - j + kb1 + j * bb_dim1;
+ r_cnjg(&q__5, &ab[i__ - k + ka1 + k * ab_dim1]);
+ q__4.r = bb[i__5].r * q__5.r - bb[i__5].i * q__5.i,
+ q__4.i = bb[i__5].r * q__5.i + bb[i__5].i *
+ q__5.r;
+ q__3.r = ab[i__2].r - q__4.r, q__3.i = ab[i__2].i -
+ q__4.i;
+ r_cnjg(&q__7, &bb[i__ - k + kb1 + k * bb_dim1]);
+ i__6 = i__ - j + ka1 + j * ab_dim1;
+ q__6.r = q__7.r * ab[i__6].r - q__7.i * ab[i__6].i,
+ q__6.i = q__7.r * ab[i__6].i + q__7.i * ab[i__6]
+ .r;
+ q__2.r = q__3.r - q__6.r, q__2.i = q__3.i - q__6.i;
+ i__7 = ka1 + i__ * ab_dim1;
+ r__1 = ab[i__7].r;
+ i__8 = i__ - j + kb1 + j * bb_dim1;
+ q__9.r = r__1 * bb[i__8].r, q__9.i = r__1 * bb[i__8].i;
+ r_cnjg(&q__10, &bb[i__ - k + kb1 + k * bb_dim1]);
+ q__8.r = q__9.r * q__10.r - q__9.i * q__10.i, q__8.i =
+ q__9.r * q__10.i + q__9.i * q__10.r;
+ q__1.r = q__2.r + q__8.r, q__1.i = q__2.i + q__8.i;
+ ab[i__1].r = q__1.r, ab[i__1].i = q__1.i;
+/* L520: */
+ }
+/* Computing MIN */
+ i__1 = *n, i__2 = i__ + *ka;
+ i__4 = min(i__1,i__2);
+ for (j = i__ + kbt + 1; j <= i__4; ++j) {
+ i__1 = k - j + ka1 + j * ab_dim1;
+ i__2 = k - j + ka1 + j * ab_dim1;
+ r_cnjg(&q__3, &bb[i__ - k + kb1 + k * bb_dim1]);
+ i__5 = i__ - j + ka1 + j * ab_dim1;
+ q__2.r = q__3.r * ab[i__5].r - q__3.i * ab[i__5].i,
+ q__2.i = q__3.r * ab[i__5].i + q__3.i * ab[i__5]
+ .r;
+ q__1.r = ab[i__2].r - q__2.r, q__1.i = ab[i__2].i -
+ q__2.i;
+ ab[i__1].r = q__1.r, ab[i__1].i = q__1.i;
+/* L530: */
+ }
+/* L540: */
+ }
+ i__3 = i__;
+ for (j = i1; j <= i__3; ++j) {
+/* Computing MIN */
+ i__1 = j + *ka, i__2 = i__ + kbt;
+ i__4 = min(i__1,i__2);
+ for (k = i__ + 1; k <= i__4; ++k) {
+ i__1 = j - k + ka1 + k * ab_dim1;
+ i__2 = j - k + ka1 + k * ab_dim1;
+ i__5 = i__ - k + kb1 + k * bb_dim1;
+ i__6 = j - i__ + ka1 + i__ * ab_dim1;
+ q__2.r = bb[i__5].r * ab[i__6].r - bb[i__5].i * ab[i__6]
+ .i, q__2.i = bb[i__5].r * ab[i__6].i + bb[i__5].i
+ * ab[i__6].r;
+ q__1.r = ab[i__2].r - q__2.r, q__1.i = ab[i__2].i -
+ q__2.i;
+ ab[i__1].r = q__1.r, ab[i__1].i = q__1.i;
+/* L550: */
+ }
+/* L560: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by inv(S(i)) */
+
+ r__1 = 1.f / bii;
+ csscal_(&nx, &r__1, &x[i__ * x_dim1 + 1], &c__1);
+ if (kbt > 0) {
+ q__1.r = -1.f, q__1.i = -0.f;
+ i__3 = *ldbb - 1;
+ cgeru_(&nx, &kbt, &q__1, &x[i__ * x_dim1 + 1], &c__1, &bb[
+ *kb + (i__ + 1) * bb_dim1], &i__3, &x[(i__ + 1) *
+ x_dim1 + 1], ldx);
+ }
+ }
+
+/* store a(i1,i) in RA1 for use in next loop over K */
+
+ i__3 = i1 - i__ + ka1 + i__ * ab_dim1;
+ ra1.r = ab[i__3].r, ra1.i = ab[i__3].i;
+ }
+
+/* Generate and apply vectors of rotations to chase all the */
+/* existing bulges KA positions up toward the top of the band */
+
+ i__3 = *kb - 1;
+ for (k = 1; k <= i__3; ++k) {
+ if (update) {
+
+/* Determine the rotations which would annihilate the bulge */
+/* which has in theory just been created */
+
+ if (i__ + k - ka1 > 0 && i__ + k < m) {
+
+/* generate rotation to annihilate a(i+k-ka-1,i) */
+
+ clartg_(&ab[k + 1 + i__ * ab_dim1], &ra1, &rwork[i__ + k
+ - *ka], &work[i__ + k - *ka], &ra);
+
+/* create nonzero element a(i+k-ka-1,i+k) outside the */
+/* band and store it in WORK(m-kb+i+k) */
+
+ i__4 = kb1 - k + (i__ + k) * bb_dim1;
+ q__2.r = -bb[i__4].r, q__2.i = -bb[i__4].i;
+ q__1.r = q__2.r * ra1.r - q__2.i * ra1.i, q__1.i = q__2.r
+ * ra1.i + q__2.i * ra1.r;
+ t.r = q__1.r, t.i = q__1.i;
+ i__4 = m - *kb + i__ + k;
+ i__1 = i__ + k - *ka;
+ q__2.r = rwork[i__1] * t.r, q__2.i = rwork[i__1] * t.i;
+ r_cnjg(&q__4, &work[i__ + k - *ka]);
+ i__2 = (i__ + k) * ab_dim1 + 1;
+ q__3.r = q__4.r * ab[i__2].r - q__4.i * ab[i__2].i,
+ q__3.i = q__4.r * ab[i__2].i + q__4.i * ab[i__2]
+ .r;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
+ work[i__4].r = q__1.r, work[i__4].i = q__1.i;
+ i__4 = (i__ + k) * ab_dim1 + 1;
+ i__1 = i__ + k - *ka;
+ q__2.r = work[i__1].r * t.r - work[i__1].i * t.i, q__2.i =
+ work[i__1].r * t.i + work[i__1].i * t.r;
+ i__2 = i__ + k - *ka;
+ i__5 = (i__ + k) * ab_dim1 + 1;
+ q__3.r = rwork[i__2] * ab[i__5].r, q__3.i = rwork[i__2] *
+ ab[i__5].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ ab[i__4].r = q__1.r, ab[i__4].i = q__1.i;
+ ra1.r = ra.r, ra1.i = ra.i;
+ }
+ }
+/* Computing MAX */
+ i__4 = 1, i__1 = k + i0 - m + 1;
+ j2 = i__ + k + 1 - max(i__4,i__1) * ka1;
+ nr = (j2 + *ka - 1) / ka1;
+ j1 = j2 - (nr - 1) * ka1;
+ if (update) {
+/* Computing MIN */
+ i__4 = j2, i__1 = i__ - (*ka << 1) + k - 1;
+ j2t = min(i__4,i__1);
+ } else {
+ j2t = j2;
+ }
+ nrt = (j2t + *ka - 1) / ka1;
+ i__4 = j2t;
+ i__1 = ka1;
+ for (j = j1; i__1 < 0 ? j >= i__4 : j <= i__4; j += i__1) {
+
+/* create nonzero element a(j-1,j+ka) outside the band */
+/* and store it in WORK(j) */
+
+ i__2 = j;
+ i__5 = j;
+ i__6 = (j + *ka - 1) * ab_dim1 + 1;
+ q__1.r = work[i__5].r * ab[i__6].r - work[i__5].i * ab[i__6]
+ .i, q__1.i = work[i__5].r * ab[i__6].i + work[i__5].i
+ * ab[i__6].r;
+ work[i__2].r = q__1.r, work[i__2].i = q__1.i;
+ i__2 = (j + *ka - 1) * ab_dim1 + 1;
+ i__5 = j;
+ i__6 = (j + *ka - 1) * ab_dim1 + 1;
+ q__1.r = rwork[i__5] * ab[i__6].r, q__1.i = rwork[i__5] * ab[
+ i__6].i;
+ ab[i__2].r = q__1.r, ab[i__2].i = q__1.i;
+/* L570: */
+ }
+
+/* generate rotations in 1st set to annihilate elements which */
+/* have been created outside the band */
+
+ if (nrt > 0) {
+ clargv_(&nrt, &ab[(j1 + *ka) * ab_dim1 + 1], &inca, &work[j1],
+ &ka1, &rwork[j1], &ka1);
+ }
+ if (nr > 0) {
+
+/* apply rotations in 1st set from the left */
+
+ i__1 = *ka - 1;
+ for (l = 1; l <= i__1; ++l) {
+ clartv_(&nr, &ab[ka1 - l + (j1 + l) * ab_dim1], &inca, &
+ ab[*ka - l + (j1 + l) * ab_dim1], &inca, &rwork[
+ j1], &work[j1], &ka1);
+/* L580: */
+ }
+
+/* apply rotations in 1st set from both sides to diagonal */
+/* blocks */
+
+ clar2v_(&nr, &ab[ka1 + j1 * ab_dim1], &ab[ka1 + (j1 - 1) *
+ ab_dim1], &ab[*ka + j1 * ab_dim1], &inca, &rwork[j1],
+ &work[j1], &ka1);
+
+ clacgv_(&nr, &work[j1], &ka1);
+ }
+
+/* start applying rotations in 1st set from the right */
+
+ i__1 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__1; --l) {
+ nrt = (j2 + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ clartv_(&nrt, &ab[l + j1t * ab_dim1], &inca, &ab[l + 1 + (
+ j1t - 1) * ab_dim1], &inca, &rwork[j1t], &work[
+ j1t], &ka1);
+ }
+/* L590: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 1st set */
+
+ i__1 = j2;
+ i__4 = ka1;
+ for (j = j1; i__4 < 0 ? j >= i__1 : j <= i__1; j += i__4) {
+ crot_(&nx, &x[j * x_dim1 + 1], &c__1, &x[(j - 1) * x_dim1
+ + 1], &c__1, &rwork[j], &work[j]);
+/* L600: */
+ }
+ }
+/* L610: */
+ }
+
+ if (update) {
+ if (i2 > 0 && kbt > 0) {
+
+/* create nonzero element a(i+kbt-ka-1,i+kbt) outside the */
+/* band and store it in WORK(m-kb+i+kbt) */
+
+ i__3 = m - *kb + i__ + kbt;
+ i__4 = kb1 - kbt + (i__ + kbt) * bb_dim1;
+ q__2.r = -bb[i__4].r, q__2.i = -bb[i__4].i;
+ q__1.r = q__2.r * ra1.r - q__2.i * ra1.i, q__1.i = q__2.r *
+ ra1.i + q__2.i * ra1.r;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+ }
+ }
+
+ for (k = *kb; k >= 1; --k) {
+ if (update) {
+/* Computing MAX */
+ i__3 = 2, i__4 = k + i0 - m;
+ j2 = i__ + k + 1 - max(i__3,i__4) * ka1;
+ } else {
+/* Computing MAX */
+ i__3 = 1, i__4 = k + i0 - m;
+ j2 = i__ + k + 1 - max(i__3,i__4) * ka1;
+ }
+
+/* finish applying rotations in 2nd set from the right */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (j2 + *ka + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ clartv_(&nrt, &ab[l + (j1t + *ka) * ab_dim1], &inca, &ab[
+ l + 1 + (j1t + *ka - 1) * ab_dim1], &inca, &rwork[
+ m - *kb + j1t + *ka], &work[m - *kb + j1t + *ka],
+ &ka1);
+ }
+/* L620: */
+ }
+ nr = (j2 + *ka - 1) / ka1;
+ j1 = j2 - (nr - 1) * ka1;
+ i__3 = j2;
+ i__4 = ka1;
+ for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {
+ i__1 = m - *kb + j;
+ i__2 = m - *kb + j + *ka;
+ work[i__1].r = work[i__2].r, work[i__1].i = work[i__2].i;
+ rwork[m - *kb + j] = rwork[m - *kb + j + *ka];
+/* L630: */
+ }
+ i__4 = j2;
+ i__3 = ka1;
+ for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) {
+
+/* create nonzero element a(j-1,j+ka) outside the band */
+/* and store it in WORK(m-kb+j) */
+
+ i__1 = m - *kb + j;
+ i__2 = m - *kb + j;
+ i__5 = (j + *ka - 1) * ab_dim1 + 1;
+ q__1.r = work[i__2].r * ab[i__5].r - work[i__2].i * ab[i__5]
+ .i, q__1.i = work[i__2].r * ab[i__5].i + work[i__2].i
+ * ab[i__5].r;
+ work[i__1].r = q__1.r, work[i__1].i = q__1.i;
+ i__1 = (j + *ka - 1) * ab_dim1 + 1;
+ i__2 = m - *kb + j;
+ i__5 = (j + *ka - 1) * ab_dim1 + 1;
+ q__1.r = rwork[i__2] * ab[i__5].r, q__1.i = rwork[i__2] * ab[
+ i__5].i;
+ ab[i__1].r = q__1.r, ab[i__1].i = q__1.i;
+/* L640: */
+ }
+ if (update) {
+ if (i__ + k > ka1 && k <= kbt) {
+ i__3 = m - *kb + i__ + k - *ka;
+ i__4 = m - *kb + i__ + k;
+ work[i__3].r = work[i__4].r, work[i__3].i = work[i__4].i;
+ }
+ }
+/* L650: */
+ }
+
+ for (k = *kb; k >= 1; --k) {
+/* Computing MAX */
+ i__3 = 1, i__4 = k + i0 - m;
+ j2 = i__ + k + 1 - max(i__3,i__4) * ka1;
+ nr = (j2 + *ka - 1) / ka1;
+ j1 = j2 - (nr - 1) * ka1;
+ if (nr > 0) {
+
+/* generate rotations in 2nd set to annihilate elements */
+/* which have been created outside the band */
+
+ clargv_(&nr, &ab[(j1 + *ka) * ab_dim1 + 1], &inca, &work[m - *
+ kb + j1], &ka1, &rwork[m - *kb + j1], &ka1);
+
+/* apply rotations in 2nd set from the left */
+
+ i__3 = *ka - 1;
+ for (l = 1; l <= i__3; ++l) {
+ clartv_(&nr, &ab[ka1 - l + (j1 + l) * ab_dim1], &inca, &
+ ab[*ka - l + (j1 + l) * ab_dim1], &inca, &rwork[m
+ - *kb + j1], &work[m - *kb + j1], &ka1);
+/* L660: */
+ }
+
+/* apply rotations in 2nd set from both sides to diagonal */
+/* blocks */
+
+ clar2v_(&nr, &ab[ka1 + j1 * ab_dim1], &ab[ka1 + (j1 - 1) *
+ ab_dim1], &ab[*ka + j1 * ab_dim1], &inca, &rwork[m - *
+ kb + j1], &work[m - *kb + j1], &ka1);
+
+ clacgv_(&nr, &work[m - *kb + j1], &ka1);
+ }
+
+/* start applying rotations in 2nd set from the right */
+
+ i__3 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__3; --l) {
+ nrt = (j2 + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ clartv_(&nrt, &ab[l + j1t * ab_dim1], &inca, &ab[l + 1 + (
+ j1t - 1) * ab_dim1], &inca, &rwork[m - *kb + j1t],
+ &work[m - *kb + j1t], &ka1);
+ }
+/* L670: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 2nd set */
+
+ i__3 = j2;
+ i__4 = ka1;
+ for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {
+ crot_(&nx, &x[j * x_dim1 + 1], &c__1, &x[(j - 1) * x_dim1
+ + 1], &c__1, &rwork[m - *kb + j], &work[m - *kb +
+ j]);
+/* L680: */
+ }
+ }
+/* L690: */
+ }
+
+ i__4 = *kb - 1;
+ for (k = 1; k <= i__4; ++k) {
+/* Computing MAX */
+ i__3 = 1, i__1 = k + i0 - m + 1;
+ j2 = i__ + k + 1 - max(i__3,i__1) * ka1;
+
+/* finish applying rotations in 1st set from the right */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (j2 + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ clartv_(&nrt, &ab[l + j1t * ab_dim1], &inca, &ab[l + 1 + (
+ j1t - 1) * ab_dim1], &inca, &rwork[j1t], &work[
+ j1t], &ka1);
+ }
+/* L700: */
+ }
+/* L710: */
+ }
+
+ if (*kb > 1) {
+ i__4 = i2 - *ka;
+ for (j = 2; j <= i__4; ++j) {
+ rwork[j] = rwork[j + *ka];
+ i__3 = j;
+ i__1 = j + *ka;
+ work[i__3].r = work[i__1].r, work[i__3].i = work[i__1].i;
+/* L720: */
+ }
+ }
+
+ } else {
+
+/* Transform A, working with the lower triangle */
+
+ if (update) {
+
+/* Form inv(S(i))**H * A * inv(S(i)) */
+
+ i__4 = i__ * bb_dim1 + 1;
+ bii = bb[i__4].r;
+ i__4 = i__ * ab_dim1 + 1;
+ i__3 = i__ * ab_dim1 + 1;
+ r__1 = ab[i__3].r / bii / bii;
+ ab[i__4].r = r__1, ab[i__4].i = 0.f;
+ i__4 = i__ - 1;
+ for (j = i1; j <= i__4; ++j) {
+ i__3 = i__ - j + 1 + j * ab_dim1;
+ i__1 = i__ - j + 1 + j * ab_dim1;
+ q__1.r = ab[i__1].r / bii, q__1.i = ab[i__1].i / bii;
+ ab[i__3].r = q__1.r, ab[i__3].i = q__1.i;
+/* L730: */
+ }
+/* Computing MIN */
+ i__3 = *n, i__1 = i__ + *ka;
+ i__4 = min(i__3,i__1);
+ for (j = i__ + 1; j <= i__4; ++j) {
+ i__3 = j - i__ + 1 + i__ * ab_dim1;
+ i__1 = j - i__ + 1 + i__ * ab_dim1;
+ q__1.r = ab[i__1].r / bii, q__1.i = ab[i__1].i / bii;
+ ab[i__3].r = q__1.r, ab[i__3].i = q__1.i;
+/* L740: */
+ }
+ i__4 = i__ + kbt;
+ for (k = i__ + 1; k <= i__4; ++k) {
+ i__3 = i__ + kbt;
+ for (j = k; j <= i__3; ++j) {
+ i__1 = j - k + 1 + k * ab_dim1;
+ i__2 = j - k + 1 + k * ab_dim1;
+ i__5 = j - i__ + 1 + i__ * bb_dim1;
+ r_cnjg(&q__5, &ab[k - i__ + 1 + i__ * ab_dim1]);
+ q__4.r = bb[i__5].r * q__5.r - bb[i__5].i * q__5.i,
+ q__4.i = bb[i__5].r * q__5.i + bb[i__5].i *
+ q__5.r;
+ q__3.r = ab[i__2].r - q__4.r, q__3.i = ab[i__2].i -
+ q__4.i;
+ r_cnjg(&q__7, &bb[k - i__ + 1 + i__ * bb_dim1]);
+ i__6 = j - i__ + 1 + i__ * ab_dim1;
+ q__6.r = q__7.r * ab[i__6].r - q__7.i * ab[i__6].i,
+ q__6.i = q__7.r * ab[i__6].i + q__7.i * ab[i__6]
+ .r;
+ q__2.r = q__3.r - q__6.r, q__2.i = q__3.i - q__6.i;
+ i__7 = i__ * ab_dim1 + 1;
+ r__1 = ab[i__7].r;
+ i__8 = j - i__ + 1 + i__ * bb_dim1;
+ q__9.r = r__1 * bb[i__8].r, q__9.i = r__1 * bb[i__8].i;
+ r_cnjg(&q__10, &bb[k - i__ + 1 + i__ * bb_dim1]);
+ q__8.r = q__9.r * q__10.r - q__9.i * q__10.i, q__8.i =
+ q__9.r * q__10.i + q__9.i * q__10.r;
+ q__1.r = q__2.r + q__8.r, q__1.i = q__2.i + q__8.i;
+ ab[i__1].r = q__1.r, ab[i__1].i = q__1.i;
+/* L750: */
+ }
+/* Computing MIN */
+ i__1 = *n, i__2 = i__ + *ka;
+ i__3 = min(i__1,i__2);
+ for (j = i__ + kbt + 1; j <= i__3; ++j) {
+ i__1 = j - k + 1 + k * ab_dim1;
+ i__2 = j - k + 1 + k * ab_dim1;
+ r_cnjg(&q__3, &bb[k - i__ + 1 + i__ * bb_dim1]);
+ i__5 = j - i__ + 1 + i__ * ab_dim1;
+ q__2.r = q__3.r * ab[i__5].r - q__3.i * ab[i__5].i,
+ q__2.i = q__3.r * ab[i__5].i + q__3.i * ab[i__5]
+ .r;
+ q__1.r = ab[i__2].r - q__2.r, q__1.i = ab[i__2].i -
+ q__2.i;
+ ab[i__1].r = q__1.r, ab[i__1].i = q__1.i;
+/* L760: */
+ }
+/* L770: */
+ }
+ i__4 = i__;
+ for (j = i1; j <= i__4; ++j) {
+/* Computing MIN */
+ i__1 = j + *ka, i__2 = i__ + kbt;
+ i__3 = min(i__1,i__2);
+ for (k = i__ + 1; k <= i__3; ++k) {
+ i__1 = k - j + 1 + j * ab_dim1;
+ i__2 = k - j + 1 + j * ab_dim1;
+ i__5 = k - i__ + 1 + i__ * bb_dim1;
+ i__6 = i__ - j + 1 + j * ab_dim1;
+ q__2.r = bb[i__5].r * ab[i__6].r - bb[i__5].i * ab[i__6]
+ .i, q__2.i = bb[i__5].r * ab[i__6].i + bb[i__5].i
+ * ab[i__6].r;
+ q__1.r = ab[i__2].r - q__2.r, q__1.i = ab[i__2].i -
+ q__2.i;
+ ab[i__1].r = q__1.r, ab[i__1].i = q__1.i;
+/* L780: */
+ }
+/* L790: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by inv(S(i)) */
+
+ r__1 = 1.f / bii;
+ csscal_(&nx, &r__1, &x[i__ * x_dim1 + 1], &c__1);
+ if (kbt > 0) {
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgerc_(&nx, &kbt, &q__1, &x[i__ * x_dim1 + 1], &c__1, &bb[
+ i__ * bb_dim1 + 2], &c__1, &x[(i__ + 1) * x_dim1
+ + 1], ldx);
+ }
+ }
+
+/* store a(i,i1) in RA1 for use in next loop over K */
+
+ i__4 = i__ - i1 + 1 + i1 * ab_dim1;
+ ra1.r = ab[i__4].r, ra1.i = ab[i__4].i;
+ }
+
+/* Generate and apply vectors of rotations to chase all the */
+/* existing bulges KA positions up toward the top of the band */
+
+ i__4 = *kb - 1;
+ for (k = 1; k <= i__4; ++k) {
+ if (update) {
+
+/* Determine the rotations which would annihilate the bulge */
+/* which has in theory just been created */
+
+ if (i__ + k - ka1 > 0 && i__ + k < m) {
+
+/* generate rotation to annihilate a(i,i+k-ka-1) */
+
+ clartg_(&ab[ka1 - k + (i__ + k - *ka) * ab_dim1], &ra1, &
+ rwork[i__ + k - *ka], &work[i__ + k - *ka], &ra);
+
+/* create nonzero element a(i+k,i+k-ka-1) outside the */
+/* band and store it in WORK(m-kb+i+k) */
+
+ i__3 = k + 1 + i__ * bb_dim1;
+ q__2.r = -bb[i__3].r, q__2.i = -bb[i__3].i;
+ q__1.r = q__2.r * ra1.r - q__2.i * ra1.i, q__1.i = q__2.r
+ * ra1.i + q__2.i * ra1.r;
+ t.r = q__1.r, t.i = q__1.i;
+ i__3 = m - *kb + i__ + k;
+ i__1 = i__ + k - *ka;
+ q__2.r = rwork[i__1] * t.r, q__2.i = rwork[i__1] * t.i;
+ r_cnjg(&q__4, &work[i__ + k - *ka]);
+ i__2 = ka1 + (i__ + k - *ka) * ab_dim1;
+ q__3.r = q__4.r * ab[i__2].r - q__4.i * ab[i__2].i,
+ q__3.i = q__4.r * ab[i__2].i + q__4.i * ab[i__2]
+ .r;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+ i__3 = ka1 + (i__ + k - *ka) * ab_dim1;
+ i__1 = i__ + k - *ka;
+ q__2.r = work[i__1].r * t.r - work[i__1].i * t.i, q__2.i =
+ work[i__1].r * t.i + work[i__1].i * t.r;
+ i__2 = i__ + k - *ka;
+ i__5 = ka1 + (i__ + k - *ka) * ab_dim1;
+ q__3.r = rwork[i__2] * ab[i__5].r, q__3.i = rwork[i__2] *
+ ab[i__5].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ ab[i__3].r = q__1.r, ab[i__3].i = q__1.i;
+ ra1.r = ra.r, ra1.i = ra.i;
+ }
+ }
+/* Computing MAX */
+ i__3 = 1, i__1 = k + i0 - m + 1;
+ j2 = i__ + k + 1 - max(i__3,i__1) * ka1;
+ nr = (j2 + *ka - 1) / ka1;
+ j1 = j2 - (nr - 1) * ka1;
+ if (update) {
+/* Computing MIN */
+ i__3 = j2, i__1 = i__ - (*ka << 1) + k - 1;
+ j2t = min(i__3,i__1);
+ } else {
+ j2t = j2;
+ }
+ nrt = (j2t + *ka - 1) / ka1;
+ i__3 = j2t;
+ i__1 = ka1;
+ for (j = j1; i__1 < 0 ? j >= i__3 : j <= i__3; j += i__1) {
+
+/* create nonzero element a(j+ka,j-1) outside the band */
+/* and store it in WORK(j) */
+
+ i__2 = j;
+ i__5 = j;
+ i__6 = ka1 + (j - 1) * ab_dim1;
+ q__1.r = work[i__5].r * ab[i__6].r - work[i__5].i * ab[i__6]
+ .i, q__1.i = work[i__5].r * ab[i__6].i + work[i__5].i
+ * ab[i__6].r;
+ work[i__2].r = q__1.r, work[i__2].i = q__1.i;
+ i__2 = ka1 + (j - 1) * ab_dim1;
+ i__5 = j;
+ i__6 = ka1 + (j - 1) * ab_dim1;
+ q__1.r = rwork[i__5] * ab[i__6].r, q__1.i = rwork[i__5] * ab[
+ i__6].i;
+ ab[i__2].r = q__1.r, ab[i__2].i = q__1.i;
+/* L800: */
+ }
+
+/* generate rotations in 1st set to annihilate elements which */
+/* have been created outside the band */
+
+ if (nrt > 0) {
+ clargv_(&nrt, &ab[ka1 + j1 * ab_dim1], &inca, &work[j1], &ka1,
+ &rwork[j1], &ka1);
+ }
+ if (nr > 0) {
+
+/* apply rotations in 1st set from the right */
+
+ i__1 = *ka - 1;
+ for (l = 1; l <= i__1; ++l) {
+ clartv_(&nr, &ab[l + 1 + j1 * ab_dim1], &inca, &ab[l + 2
+ + (j1 - 1) * ab_dim1], &inca, &rwork[j1], &work[
+ j1], &ka1);
+/* L810: */
+ }
+
+/* apply rotations in 1st set from both sides to diagonal */
+/* blocks */
+
+ clar2v_(&nr, &ab[j1 * ab_dim1 + 1], &ab[(j1 - 1) * ab_dim1 +
+ 1], &ab[(j1 - 1) * ab_dim1 + 2], &inca, &rwork[j1], &
+ work[j1], &ka1);
+
+ clacgv_(&nr, &work[j1], &ka1);
+ }
+
+/* start applying rotations in 1st set from the left */
+
+ i__1 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__1; --l) {
+ nrt = (j2 + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ clartv_(&nrt, &ab[ka1 - l + 1 + (j1t - ka1 + l) * ab_dim1]
+, &inca, &ab[ka1 - l + (j1t - ka1 + l) * ab_dim1],
+ &inca, &rwork[j1t], &work[j1t], &ka1);
+ }
+/* L820: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 1st set */
+
+ i__1 = j2;
+ i__3 = ka1;
+ for (j = j1; i__3 < 0 ? j >= i__1 : j <= i__1; j += i__3) {
+ r_cnjg(&q__1, &work[j]);
+ crot_(&nx, &x[j * x_dim1 + 1], &c__1, &x[(j - 1) * x_dim1
+ + 1], &c__1, &rwork[j], &q__1);
+/* L830: */
+ }
+ }
+/* L840: */
+ }
+
+ if (update) {
+ if (i2 > 0 && kbt > 0) {
+
+/* create nonzero element a(i+kbt,i+kbt-ka-1) outside the */
+/* band and store it in WORK(m-kb+i+kbt) */
+
+ i__4 = m - *kb + i__ + kbt;
+ i__3 = kbt + 1 + i__ * bb_dim1;
+ q__2.r = -bb[i__3].r, q__2.i = -bb[i__3].i;
+ q__1.r = q__2.r * ra1.r - q__2.i * ra1.i, q__1.i = q__2.r *
+ ra1.i + q__2.i * ra1.r;
+ work[i__4].r = q__1.r, work[i__4].i = q__1.i;
+ }
+ }
+
+ for (k = *kb; k >= 1; --k) {
+ if (update) {
+/* Computing MAX */
+ i__4 = 2, i__3 = k + i0 - m;
+ j2 = i__ + k + 1 - max(i__4,i__3) * ka1;
+ } else {
+/* Computing MAX */
+ i__4 = 1, i__3 = k + i0 - m;
+ j2 = i__ + k + 1 - max(i__4,i__3) * ka1;
+ }
+
+/* finish applying rotations in 2nd set from the left */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (j2 + *ka + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ clartv_(&nrt, &ab[ka1 - l + 1 + (j1t + l - 1) * ab_dim1],
+ &inca, &ab[ka1 - l + (j1t + l - 1) * ab_dim1], &
+ inca, &rwork[m - *kb + j1t + *ka], &work[m - *kb
+ + j1t + *ka], &ka1);
+ }
+/* L850: */
+ }
+ nr = (j2 + *ka - 1) / ka1;
+ j1 = j2 - (nr - 1) * ka1;
+ i__4 = j2;
+ i__3 = ka1;
+ for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) {
+ i__1 = m - *kb + j;
+ i__2 = m - *kb + j + *ka;
+ work[i__1].r = work[i__2].r, work[i__1].i = work[i__2].i;
+ rwork[m - *kb + j] = rwork[m - *kb + j + *ka];
+/* L860: */
+ }
+ i__3 = j2;
+ i__4 = ka1;
+ for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {
+
+/* create nonzero element a(j+ka,j-1) outside the band */
+/* and store it in WORK(m-kb+j) */
+
+ i__1 = m - *kb + j;
+ i__2 = m - *kb + j;
+ i__5 = ka1 + (j - 1) * ab_dim1;
+ q__1.r = work[i__2].r * ab[i__5].r - work[i__2].i * ab[i__5]
+ .i, q__1.i = work[i__2].r * ab[i__5].i + work[i__2].i
+ * ab[i__5].r;
+ work[i__1].r = q__1.r, work[i__1].i = q__1.i;
+ i__1 = ka1 + (j - 1) * ab_dim1;
+ i__2 = m - *kb + j;
+ i__5 = ka1 + (j - 1) * ab_dim1;
+ q__1.r = rwork[i__2] * ab[i__5].r, q__1.i = rwork[i__2] * ab[
+ i__5].i;
+ ab[i__1].r = q__1.r, ab[i__1].i = q__1.i;
+/* L870: */
+ }
+ if (update) {
+ if (i__ + k > ka1 && k <= kbt) {
+ i__4 = m - *kb + i__ + k - *ka;
+ i__3 = m - *kb + i__ + k;
+ work[i__4].r = work[i__3].r, work[i__4].i = work[i__3].i;
+ }
+ }
+/* L880: */
+ }
+
+ for (k = *kb; k >= 1; --k) {
+/* Computing MAX */
+ i__4 = 1, i__3 = k + i0 - m;
+ j2 = i__ + k + 1 - max(i__4,i__3) * ka1;
+ nr = (j2 + *ka - 1) / ka1;
+ j1 = j2 - (nr - 1) * ka1;
+ if (nr > 0) {
+
+/* generate rotations in 2nd set to annihilate elements */
+/* which have been created outside the band */
+
+ clargv_(&nr, &ab[ka1 + j1 * ab_dim1], &inca, &work[m - *kb +
+ j1], &ka1, &rwork[m - *kb + j1], &ka1);
+
+/* apply rotations in 2nd set from the right */
+
+ i__4 = *ka - 1;
+ for (l = 1; l <= i__4; ++l) {
+ clartv_(&nr, &ab[l + 1 + j1 * ab_dim1], &inca, &ab[l + 2
+ + (j1 - 1) * ab_dim1], &inca, &rwork[m - *kb + j1]
+, &work[m - *kb + j1], &ka1);
+/* L890: */
+ }
+
+/* apply rotations in 2nd set from both sides to diagonal */
+/* blocks */
+
+ clar2v_(&nr, &ab[j1 * ab_dim1 + 1], &ab[(j1 - 1) * ab_dim1 +
+ 1], &ab[(j1 - 1) * ab_dim1 + 2], &inca, &rwork[m - *
+ kb + j1], &work[m - *kb + j1], &ka1);
+
+ clacgv_(&nr, &work[m - *kb + j1], &ka1);
+ }
+
+/* start applying rotations in 2nd set from the left */
+
+ i__4 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__4; --l) {
+ nrt = (j2 + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ clartv_(&nrt, &ab[ka1 - l + 1 + (j1t - ka1 + l) * ab_dim1]
+, &inca, &ab[ka1 - l + (j1t - ka1 + l) * ab_dim1],
+ &inca, &rwork[m - *kb + j1t], &work[m - *kb +
+ j1t], &ka1);
+ }
+/* L900: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 2nd set */
+
+ i__4 = j2;
+ i__3 = ka1;
+ for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) {
+ r_cnjg(&q__1, &work[m - *kb + j]);
+ crot_(&nx, &x[j * x_dim1 + 1], &c__1, &x[(j - 1) * x_dim1
+ + 1], &c__1, &rwork[m - *kb + j], &q__1);
+/* L910: */
+ }
+ }
+/* L920: */
+ }
+
+ i__3 = *kb - 1;
+ for (k = 1; k <= i__3; ++k) {
+/* Computing MAX */
+ i__4 = 1, i__1 = k + i0 - m + 1;
+ j2 = i__ + k + 1 - max(i__4,i__1) * ka1;
+
+/* finish applying rotations in 1st set from the left */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (j2 + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ clartv_(&nrt, &ab[ka1 - l + 1 + (j1t - ka1 + l) * ab_dim1]
+, &inca, &ab[ka1 - l + (j1t - ka1 + l) * ab_dim1],
+ &inca, &rwork[j1t], &work[j1t], &ka1);
+ }
+/* L930: */
+ }
+/* L940: */
+ }
+
+ if (*kb > 1) {
+ i__3 = i2 - *ka;
+ for (j = 2; j <= i__3; ++j) {
+ rwork[j] = rwork[j + *ka];
+ i__4 = j;
+ i__1 = j + *ka;
+ work[i__4].r = work[i__1].r, work[i__4].i = work[i__1].i;
+/* L950: */
+ }
+ }
+
+ }
+
+ goto L490;
+
+/* End of CHBGST */
+
+} /* chbgst_ */
diff --git a/contrib/libs/clapack/chbgv.c b/contrib/libs/clapack/chbgv.c
new file mode 100644
index 0000000000..b4e70a4053
--- /dev/null
+++ b/contrib/libs/clapack/chbgv.c
@@ -0,0 +1,235 @@
+/* chbgv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int chbgv_(char *jobz, char *uplo, integer *n, integer *ka,
+ integer *kb, complex *ab, integer *ldab, complex *bb, integer *ldbb,
+ real *w, complex *z__, integer *ldz, complex *work, real *rwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, bb_dim1, bb_offset, z_dim1, z_offset, i__1;
+
+ /* Local variables */
+ integer inde;
+ char vect[1];
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical upper, wantz;
+ extern /* Subroutine */ int chbtrd_(char *, char *, integer *, integer *,
+ complex *, integer *, real *, real *, complex *, integer *,
+ complex *, integer *), chbgst_(char *, char *,
+ integer *, integer *, integer *, complex *, integer *, complex *,
+ integer *, complex *, integer *, complex *, real *, integer *), xerbla_(char *, integer *), cpbstf_(char
+ *, integer *, integer *, complex *, integer *, integer *);
+ integer indwrk;
+ extern /* Subroutine */ int csteqr_(char *, integer *, real *, real *,
+ complex *, integer *, real *, integer *), ssterf_(integer
+ *, real *, real *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHBGV computes all the eigenvalues, and optionally, the eigenvectors */
+/* of a complex generalized Hermitian-definite banded eigenproblem, of */
+/* the form A*x=(lambda)*B*x. Here A and B are assumed to be Hermitian */
+/* and banded, and B is also positive definite. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* KA (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KA >= 0. */
+
+/* KB (input) INTEGER */
+/* The number of superdiagonals of the matrix B if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KB >= 0. */
+
+/* AB (input/output) COMPLEX array, dimension (LDAB, N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first ka+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(ka+1+i-j,j) = A(i,j) for max(1,j-ka)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+ka). */
+
+/* On exit, the contents of AB are destroyed. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KA+1. */
+
+/* BB (input/output) COMPLEX array, dimension (LDBB, N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix B, stored in the first kb+1 rows of the array. The */
+/* j-th column of B is stored in the j-th column of the array BB */
+/* as follows: */
+/* if UPLO = 'U', BB(kb+1+i-j,j) = B(i,j) for max(1,j-kb)<=i<=j; */
+/* if UPLO = 'L', BB(1+i-j,j) = B(i,j) for j<=i<=min(n,j+kb). */
+
+/* On exit, the factor S from the split Cholesky factorization */
+/* B = S**H*S, as returned by CPBSTF. */
+
+/* LDBB (input) INTEGER */
+/* The leading dimension of the array BB. LDBB >= KB+1. */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the matrix Z of */
+/* eigenvectors, with the i-th column of Z holding the */
+/* eigenvector associated with W(i). The eigenvectors are */
+/* normalized so that Z**H*B*Z = I. */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= N. */
+
+/* WORK (workspace) COMPLEX array, dimension (N) */
+
+/* RWORK (workspace) REAL array, dimension (3*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is: */
+/* <= N: the algorithm failed to converge: */
+/* i off-diagonal elements of an intermediate */
+/* tridiagonal form did not converge to zero; */
+/* > N: if INFO = N + i, for 1 <= i <= N, then CPBSTF */
+/* returned INFO = i: B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ bb_dim1 = *ldbb;
+ bb_offset = 1 + bb_dim1;
+ bb -= bb_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ka < 0) {
+ *info = -4;
+ } else if (*kb < 0 || *kb > *ka) {
+ *info = -5;
+ } else if (*ldab < *ka + 1) {
+ *info = -7;
+ } else if (*ldbb < *kb + 1) {
+ *info = -9;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHBGV ", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a split Cholesky factorization of B. */
+
+ cpbstf_(uplo, n, kb, &bb[bb_offset], ldbb, info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem. */
+
+ inde = 1;
+ indwrk = inde + *n;
+ chbgst_(jobz, uplo, n, ka, kb, &ab[ab_offset], ldab, &bb[bb_offset], ldbb,
+ &z__[z_offset], ldz, &work[1], &rwork[indwrk], &iinfo);
+
+/* Reduce to tridiagonal form. */
+
+ if (wantz) {
+ *(unsigned char *)vect = 'U';
+ } else {
+ *(unsigned char *)vect = 'N';
+ }
+ chbtrd_(vect, uplo, n, ka, &ab[ab_offset], ldab, &w[1], &rwork[inde], &
+ z__[z_offset], ldz, &work[1], &iinfo);
+
+/* For eigenvalues only, call SSTERF. For eigenvectors, call CSTEQR. */
+
+ if (! wantz) {
+ ssterf_(n, &w[1], &rwork[inde], info);
+ } else {
+ csteqr_(jobz, n, &w[1], &rwork[inde], &z__[z_offset], ldz, &rwork[
+ indwrk], info);
+ }
+ return 0;
+
+/* End of CHBGV */
+
+} /* chbgv_ */
diff --git a/contrib/libs/clapack/chbgvd.c b/contrib/libs/clapack/chbgvd.c
new file mode 100644
index 0000000000..780f128dce
--- /dev/null
+++ b/contrib/libs/clapack/chbgvd.c
@@ -0,0 +1,355 @@
+/* chbgvd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static complex c_b2 = {0.f,0.f};
+
+/* Subroutine */ int chbgvd_(char *jobz, char *uplo, integer *n, integer *ka,
+ integer *kb, complex *ab, integer *ldab, complex *bb, integer *ldbb,
+ real *w, complex *z__, integer *ldz, complex *work, integer *lwork,
+ real *rwork, integer *lrwork, integer *iwork, integer *liwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, bb_dim1, bb_offset, z_dim1, z_offset, i__1;
+
+ /* Local variables */
+ integer inde;
+ char vect[1];
+ integer llwk2;
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ integer iinfo, lwmin;
+ logical upper;
+ integer llrwk;
+ logical wantz;
+ integer indwk2;
+ extern /* Subroutine */ int cstedc_(char *, integer *, real *, real *,
+ complex *, integer *, complex *, integer *, real *, integer *,
+ integer *, integer *, integer *), chbtrd_(char *, char *,
+ integer *, integer *, complex *, integer *, real *, real *,
+ complex *, integer *, complex *, integer *),
+ chbgst_(char *, char *, integer *, integer *, integer *, complex *
+, integer *, complex *, integer *, complex *, integer *, complex *
+, real *, integer *), clacpy_(char *, integer *,
+ integer *, complex *, integer *, complex *, integer *),
+ xerbla_(char *, integer *), cpbstf_(char *, integer *,
+ integer *, complex *, integer *, integer *);
+ integer indwrk, liwmin;
+ extern /* Subroutine */ int ssterf_(integer *, real *, real *, integer *);
+ integer lrwmin;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHBGVD computes all the eigenvalues, and optionally, the eigenvectors */
+/* of a complex generalized Hermitian-definite banded eigenproblem, of */
+/* the form A*x=(lambda)*B*x. Here A and B are assumed to be Hermitian */
+/* and banded, and B is also positive definite. If eigenvectors are */
+/* desired, it uses a divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* KA (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KA >= 0. */
+
+/* KB (input) INTEGER */
+/* The number of superdiagonals of the matrix B if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KB >= 0. */
+
+/* AB (input/output) COMPLEX array, dimension (LDAB, N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first ka+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(ka+1+i-j,j) = A(i,j) for max(1,j-ka)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+ka). */
+
+/* On exit, the contents of AB are destroyed. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KA+1. */
+
+/* BB (input/output) COMPLEX array, dimension (LDBB, N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix B, stored in the first kb+1 rows of the array. The */
+/* j-th column of B is stored in the j-th column of the array BB */
+/* as follows: */
+/* if UPLO = 'U', BB(kb+1+i-j,j) = B(i,j) for max(1,j-kb)<=i<=j; */
+/* if UPLO = 'L', BB(1+i-j,j) = B(i,j) for j<=i<=min(n,j+kb). */
+
+/* On exit, the factor S from the split Cholesky factorization */
+/* B = S**H*S, as returned by CPBSTF. */
+
+/* LDBB (input) INTEGER */
+/* The leading dimension of the array BB. LDBB >= KB+1. */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the matrix Z of */
+/* eigenvectors, with the i-th column of Z holding the */
+/* eigenvector associated with W(i). The eigenvectors are */
+/* normalized so that Z**H*B*Z = I. */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= N. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO=0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If N <= 1, LWORK >= 1. */
+/* If JOBZ = 'N' and N > 1, LWORK >= N. */
+/* If JOBZ = 'V' and N > 1, LWORK >= 2*N**2. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal sizes of the WORK, RWORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* RWORK (workspace/output) REAL array, dimension (MAX(1,LRWORK)) */
+/* On exit, if INFO=0, RWORK(1) returns the optimal LRWORK. */
+
+/* LRWORK (input) INTEGER */
+/* The dimension of array RWORK. */
+/* If N <= 1, LRWORK >= 1. */
+/* If JOBZ = 'N' and N > 1, LRWORK >= N. */
+/* If JOBZ = 'V' and N > 1, LRWORK >= 1 + 5*N + 2*N**2. */
+
+/* If LRWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO=0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of array IWORK. */
+/* If JOBZ = 'N' or N <= 1, LIWORK >= 1. */
+/* If JOBZ = 'V' and N > 1, LIWORK >= 3 + 5*N. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is: */
+/* <= N: the algorithm failed to converge: */
+/* i off-diagonal elements of an intermediate */
+/* tridiagonal form did not converge to zero; */
+/* > N: if INFO = N + i, for 1 <= i <= N, then CPBSTF */
+/* returned INFO = i: B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ bb_dim1 = *ldbb;
+ bb_offset = 1 + bb_dim1;
+ bb -= bb_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1 || *lrwork == -1 || *liwork == -1;
+
+ *info = 0;
+ if (*n <= 1) {
+ lwmin = 1;
+ lrwmin = 1;
+ liwmin = 1;
+ } else if (wantz) {
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = i__1 * i__1 << 1;
+/* Computing 2nd power */
+ i__1 = *n;
+ lrwmin = *n * 5 + 1 + (i__1 * i__1 << 1);
+ liwmin = *n * 5 + 3;
+ } else {
+ lwmin = *n;
+ lrwmin = *n;
+ liwmin = 1;
+ }
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ka < 0) {
+ *info = -4;
+ } else if (*kb < 0 || *kb > *ka) {
+ *info = -5;
+ } else if (*ldab < *ka + 1) {
+ *info = -7;
+ } else if (*ldbb < *kb + 1) {
+ *info = -9;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+ work[1].r = (real) lwmin, work[1].i = 0.f;
+ rwork[1] = (real) lrwmin;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -14;
+ } else if (*lrwork < lrwmin && ! lquery) {
+ *info = -16;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -18;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHBGVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a split Cholesky factorization of B. */
+
+ cpbstf_(uplo, n, kb, &bb[bb_offset], ldbb, info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem. */
+
+ inde = 1;
+ indwrk = inde + *n;
+ indwk2 = *n * *n + 1;
+ llwk2 = *lwork - indwk2 + 2;
+ llrwk = *lrwork - indwrk + 2;
+ chbgst_(jobz, uplo, n, ka, kb, &ab[ab_offset], ldab, &bb[bb_offset], ldbb,
+ &z__[z_offset], ldz, &work[1], &rwork[indwrk], &iinfo);
+
+/* Reduce Hermitian band matrix to tridiagonal form. */
+
+ if (wantz) {
+ *(unsigned char *)vect = 'U';
+ } else {
+ *(unsigned char *)vect = 'N';
+ }
+ chbtrd_(vect, uplo, n, ka, &ab[ab_offset], ldab, &w[1], &rwork[inde], &
+ z__[z_offset], ldz, &work[1], &iinfo);
+
+/* For eigenvalues only, call SSTERF. For eigenvectors, call CSTEDC. */
+
+ if (! wantz) {
+ ssterf_(n, &w[1], &rwork[inde], info);
+ } else {
+ cstedc_("I", n, &w[1], &rwork[inde], &work[1], n, &work[indwk2], &
+ llwk2, &rwork[indwrk], &llrwk, &iwork[1], liwork, info);
+ cgemm_("N", "N", n, n, n, &c_b1, &z__[z_offset], ldz, &work[1], n, &
+ c_b2, &work[indwk2], n);
+ clacpy_("A", n, n, &work[indwk2], n, &z__[z_offset], ldz);
+ }
+
+ work[1].r = (real) lwmin, work[1].i = 0.f;
+ rwork[1] = (real) lrwmin;
+ iwork[1] = liwmin;
+ return 0;
+
+/* End of CHBGVD */
+
+} /* chbgvd_ */
diff --git a/contrib/libs/clapack/chbgvx.c b/contrib/libs/clapack/chbgvx.c
new file mode 100644
index 0000000000..295bc8610d
--- /dev/null
+++ b/contrib/libs/clapack/chbgvx.c
@@ -0,0 +1,472 @@
+/* chbgvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int chbgvx_(char *jobz, char *range, char *uplo, integer *n,
+ integer *ka, integer *kb, complex *ab, integer *ldab, complex *bb,
+ integer *ldbb, complex *q, integer *ldq, real *vl, real *vu, integer *
+ il, integer *iu, real *abstol, integer *m, real *w, complex *z__,
+ integer *ldz, complex *work, real *rwork, integer *iwork, integer *
+ ifail, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, bb_dim1, bb_offset, q_dim1, q_offset, z_dim1,
+ z_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, jj;
+ real tmp1;
+ integer indd, inde;
+ char vect[1];
+ logical test;
+ integer itmp1, indee;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+, complex *, integer *, complex *, integer *, complex *, complex *
+, integer *);
+ integer iinfo;
+ char order[1];
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ logical wantz, alleig, indeig;
+ integer indibl;
+ extern /* Subroutine */ int chbtrd_(char *, char *, integer *, integer *,
+ complex *, integer *, real *, real *, complex *, integer *,
+ complex *, integer *);
+ logical valeig;
+ extern /* Subroutine */ int chbgst_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *, complex *, real *, integer *), clacpy_(
+ char *, integer *, integer *, complex *, integer *, complex *,
+ integer *), xerbla_(char *, integer *), cpbstf_(
+ char *, integer *, integer *, complex *, integer *, integer *);
+ integer indiwk, indisp;
+ extern /* Subroutine */ int cstein_(integer *, real *, real *, integer *,
+ real *, integer *, integer *, complex *, integer *, real *,
+ integer *, integer *, integer *);
+ integer indrwk, indwrk;
+ extern /* Subroutine */ int csteqr_(char *, integer *, real *, real *,
+ complex *, integer *, real *, integer *), ssterf_(integer
+ *, real *, real *, integer *);
+ integer nsplit;
+ extern /* Subroutine */ int sstebz_(char *, char *, integer *, real *,
+ real *, integer *, integer *, real *, real *, real *, integer *,
+ integer *, real *, integer *, integer *, real *, integer *,
+ integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHBGVX computes all the eigenvalues, and optionally, the eigenvectors */
+/* of a complex generalized Hermitian-definite banded eigenproblem, of */
+/* the form A*x=(lambda)*B*x. Here A and B are assumed to be Hermitian */
+/* and banded, and B is also positive definite. Eigenvalues and */
+/* eigenvectors can be selected by specifying either all eigenvalues, */
+/* a range of values or a range of indices for the desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found; */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found; */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* KA (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KA >= 0. */
+
+/* KB (input) INTEGER */
+/* The number of superdiagonals of the matrix B if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KB >= 0. */
+
+/* AB (input/output) COMPLEX array, dimension (LDAB, N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first ka+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(ka+1+i-j,j) = A(i,j) for max(1,j-ka)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+ka). */
+
+/* On exit, the contents of AB are destroyed. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KA+1. */
+
+/* BB (input/output) COMPLEX array, dimension (LDBB, N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix B, stored in the first kb+1 rows of the array. The */
+/* j-th column of B is stored in the j-th column of the array BB */
+/* as follows: */
+/* if UPLO = 'U', BB(kb+1+i-j,j) = B(i,j) for max(1,j-kb)<=i<=j; */
+/* if UPLO = 'L', BB(1+i-j,j) = B(i,j) for j<=i<=min(n,j+kb). */
+
+/* On exit, the factor S from the split Cholesky factorization */
+/* B = S**H*S, as returned by CPBSTF. */
+
+/* LDBB (input) INTEGER */
+/* The leading dimension of the array BB. LDBB >= KB+1. */
+
+/* Q (output) COMPLEX array, dimension (LDQ, N) */
+/* If JOBZ = 'V', the n-by-n matrix used in the reduction of */
+/* A*x = (lambda)*B*x to standard form, i.e. C*x = (lambda)*x, */
+/* and consequently C to tridiagonal form. */
+/* If JOBZ = 'N', the array Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. If JOBZ = 'N', */
+/* LDQ >= 1. If JOBZ = 'V', LDQ >= max(1,N). */
+
+/* VL (input) REAL */
+/* VU (input) REAL */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) REAL */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing AP to tridiagonal form. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*SLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*SLAMCH('S'). */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the matrix Z of */
+/* eigenvectors, with the i-th column of Z holding the */
+/* eigenvector associated with W(i). The eigenvectors are */
+/* normalized so that Z**H*B*Z = I. */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= N. */
+
+/* WORK (workspace) COMPLEX array, dimension (N) */
+
+/* RWORK (workspace) REAL array, dimension (7*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+
+/* IFAIL (output) INTEGER array, dimension (N) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvectors that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is: */
+/* <= N: then i eigenvectors failed to converge. Their */
+/* indices are stored in array IFAIL. */
+/* > N: if INFO = N + i, for 1 <= i <= N, then CPBSTF */
+/* returned INFO = i: B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ bb_dim1 = *ldbb;
+ bb_offset = 1 + bb_dim1;
+ bb -= bb_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ka < 0) {
+ *info = -5;
+ } else if (*kb < 0 || *kb > *ka) {
+ *info = -6;
+ } else if (*ldab < *ka + 1) {
+ *info = -8;
+ } else if (*ldbb < *kb + 1) {
+ *info = -10;
+ } else if (*ldq < 1 || wantz && *ldq < *n) {
+ *info = -12;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -14;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -15;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -16;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -21;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHBGVX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a split Cholesky factorization of B. */
+
+ cpbstf_(uplo, n, kb, &bb[bb_offset], ldbb, info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem. */
+
+ chbgst_(jobz, uplo, n, ka, kb, &ab[ab_offset], ldab, &bb[bb_offset], ldbb,
+ &q[q_offset], ldq, &work[1], &rwork[1], &iinfo);
+
+/* Solve the standard eigenvalue problem. */
+/* Reduce Hermitian band matrix to tridiagonal form. */
+
+ indd = 1;
+ inde = indd + *n;
+ indrwk = inde + *n;
+ indwrk = 1;
+ if (wantz) {
+ *(unsigned char *)vect = 'U';
+ } else {
+ *(unsigned char *)vect = 'N';
+ }
+ chbtrd_(vect, uplo, n, ka, &ab[ab_offset], ldab, &rwork[indd], &rwork[
+ inde], &q[q_offset], ldq, &work[indwrk], &iinfo);
+
+/* If all eigenvalues are desired and ABSTOL is less than or equal */
+/* to zero, then call SSTERF or CSTEQR. If this fails for some */
+/* eigenvalue, then try SSTEBZ. */
+
+ test = FALSE_;
+ if (indeig) {
+ if (*il == 1 && *iu == *n) {
+ test = TRUE_;
+ }
+ }
+ if ((alleig || test) && *abstol <= 0.f) {
+ scopy_(n, &rwork[indd], &c__1, &w[1], &c__1);
+ indee = indrwk + (*n << 1);
+ i__1 = *n - 1;
+ scopy_(&i__1, &rwork[inde], &c__1, &rwork[indee], &c__1);
+ if (! wantz) {
+ ssterf_(n, &w[1], &rwork[indee], info);
+ } else {
+ clacpy_("A", n, n, &q[q_offset], ldq, &z__[z_offset], ldz);
+ csteqr_(jobz, n, &w[1], &rwork[indee], &z__[z_offset], ldz, &
+ rwork[indrwk], info);
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ifail[i__] = 0;
+/* L10: */
+ }
+ }
+ }
+ if (*info == 0) {
+ *m = *n;
+ goto L30;
+ }
+ *info = 0;
+ }
+
+/* Otherwise, call SSTEBZ and, if eigenvectors are desired, */
+/* call CSTEIN. */
+
+ if (wantz) {
+ *(unsigned char *)order = 'B';
+ } else {
+ *(unsigned char *)order = 'E';
+ }
+ indibl = 1;
+ indisp = indibl + *n;
+ indiwk = indisp + *n;
+ sstebz_(range, order, n, vl, vu, il, iu, abstol, &rwork[indd], &rwork[
+ inde], m, &nsplit, &w[1], &iwork[indibl], &iwork[indisp], &rwork[
+ indrwk], &iwork[indiwk], info);
+
+ if (wantz) {
+ cstein_(n, &rwork[indd], &rwork[inde], m, &w[1], &iwork[indibl], &
+ iwork[indisp], &z__[z_offset], ldz, &rwork[indrwk], &iwork[
+ indiwk], &ifail[1], info);
+
+/* Apply unitary matrix used in reduction to tridiagonal */
+/* form to eigenvectors returned by CSTEIN. */
+
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ ccopy_(n, &z__[j * z_dim1 + 1], &c__1, &work[1], &c__1);
+ cgemv_("N", n, n, &c_b2, &q[q_offset], ldq, &work[1], &c__1, &
+ c_b1, &z__[j * z_dim1 + 1], &c__1);
+/* L20: */
+ }
+ }
+
+L30:
+
+/* If eigenvalues are not in order, then sort them, along with */
+/* eigenvectors. */
+
+ if (wantz) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp1 = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp1) {
+ i__ = jj;
+ tmp1 = w[jj];
+ }
+/* L40: */
+ }
+
+ if (i__ != 0) {
+ itmp1 = iwork[indibl + i__ - 1];
+ w[i__] = w[j];
+ iwork[indibl + i__ - 1] = iwork[indibl + j - 1];
+ w[j] = tmp1;
+ iwork[indibl + j - 1] = itmp1;
+ cswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1],
+ &c__1);
+ if (*info != 0) {
+ itmp1 = ifail[i__];
+ ifail[i__] = ifail[j];
+ ifail[j] = itmp1;
+ }
+ }
+/* L50: */
+ }
+ }
+
+ return 0;
+
+/* End of CHBGVX */
+
+} /* chbgvx_ */
diff --git a/contrib/libs/clapack/chbtrd.c b/contrib/libs/clapack/chbtrd.c
new file mode 100644
index 0000000000..a62bd642d7
--- /dev/null
+++ b/contrib/libs/clapack/chbtrd.c
@@ -0,0 +1,808 @@
+/* chbtrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int chbtrd_(char *vect, char *uplo, integer *n, integer *kd,
+ complex *ab, integer *ldab, real *d__, real *e, complex *q, integer *
+ ldq, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, q_dim1, q_offset, i__1, i__2, i__3, i__4,
+ i__5, i__6;
+ real r__1;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+ double c_abs(complex *);
+
+ /* Local variables */
+ integer i__, j, k, l;
+ complex t;
+ integer i2, j1, j2, nq, nr, kd1, ibl, iqb, kdn, jin, nrt, kdm1, inca,
+ jend, lend, jinc;
+ real abst;
+ integer incx, last;
+ complex temp;
+ extern /* Subroutine */ int crot_(integer *, complex *, integer *,
+ complex *, integer *, real *, complex *);
+ integer j1end, j1inc;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *);
+ integer iqend;
+ extern logical lsame_(char *, char *);
+ logical initq, wantq, upper;
+ extern /* Subroutine */ int clar2v_(integer *, complex *, complex *,
+ complex *, integer *, real *, complex *, integer *), clacgv_(
+ integer *, complex *, integer *);
+ integer iqaend;
+ extern /* Subroutine */ int claset_(char *, integer *, integer *, complex
+ *, complex *, complex *, integer *), clartg_(complex *,
+ complex *, real *, complex *, complex *), xerbla_(char *, integer
+ *), clargv_(integer *, complex *, integer *, complex *,
+ integer *, real *, integer *), clartv_(integer *, complex *,
+ integer *, complex *, integer *, real *, complex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHBTRD reduces a complex Hermitian band matrix A to real symmetric */
+/* tridiagonal form T by a unitary similarity transformation: */
+/* Q**H * A * Q = T. */
+
+/* Arguments */
+/* ========= */
+
+/* VECT (input) CHARACTER*1 */
+/* = 'N': do not form Q; */
+/* = 'V': form Q; */
+/* = 'U': update a matrix X, by forming X*Q. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) COMPLEX array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+/* On exit, the diagonal elements of AB are overwritten by the */
+/* diagonal elements of the tridiagonal matrix T; if KD > 0, the */
+/* elements on the first superdiagonal (if UPLO = 'U') or the */
+/* first subdiagonal (if UPLO = 'L') are overwritten by the */
+/* off-diagonal elements of T; the rest of AB is overwritten by */
+/* values generated during the reduction. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* D (output) REAL array, dimension (N) */
+/* The diagonal elements of the tridiagonal matrix T. */
+
+/* E (output) REAL array, dimension (N-1) */
+/* The off-diagonal elements of the tridiagonal matrix T: */
+/* E(i) = T(i,i+1) if UPLO = 'U'; E(i) = T(i+1,i) if UPLO = 'L'. */
+
+/* Q (input/output) COMPLEX array, dimension (LDQ,N) */
+/* On entry, if VECT = 'U', then Q must contain an N-by-N */
+/* matrix X; if VECT = 'N' or 'V', then Q need not be set. */
+
+/* On exit: */
+/* if VECT = 'V', Q contains the N-by-N unitary matrix Q; */
+/* if VECT = 'U', Q contains the product X*Q; */
+/* if VECT = 'N', the array Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. */
+/* LDQ >= 1, and LDQ >= N if VECT = 'V' or 'U'. */
+
+/* WORK (workspace) COMPLEX array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* Modified by Linda Kaufman, Bell Labs. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --d__;
+ --e;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --work;
+
+ /* Function Body */
+ initq = lsame_(vect, "V");
+ wantq = initq || lsame_(vect, "U");
+ upper = lsame_(uplo, "U");
+ kd1 = *kd + 1;
+ kdm1 = *kd - 1;
+ incx = *ldab - 1;
+ iqend = 1;
+
+ *info = 0;
+ if (! wantq && ! lsame_(vect, "N")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*kd < 0) {
+ *info = -4;
+ } else if (*ldab < kd1) {
+ *info = -6;
+ } else if (*ldq < max(1,*n) && wantq) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHBTRD", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Initialize Q to the unit matrix, if needed */
+
+ if (initq) {
+ claset_("Full", n, n, &c_b1, &c_b2, &q[q_offset], ldq);
+ }
+
+/* Wherever possible, plane rotations are generated and applied in */
+/* vector operations of length NR over the index set J1:J2:KD1. */
+
+/* The real cosines and complex sines of the plane rotations are */
+/* stored in the arrays D and WORK. */
+
+ inca = kd1 * *ldab;
+/* Computing MIN */
+ i__1 = *n - 1;
+ kdn = min(i__1,*kd);
+ if (upper) {
+
+ if (*kd > 1) {
+
+/* Reduce to complex Hermitian tridiagonal form, working with */
+/* the upper triangle */
+
+ nr = 0;
+ j1 = kdn + 2;
+ j2 = 1;
+
+ i__1 = kd1 + ab_dim1;
+ i__2 = kd1 + ab_dim1;
+ r__1 = ab[i__2].r;
+ ab[i__1].r = r__1, ab[i__1].i = 0.f;
+ i__1 = *n - 2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Reduce i-th row of matrix to tridiagonal form */
+
+ for (k = kdn + 1; k >= 2; --k) {
+ j1 += kdn;
+ j2 += kdn;
+
+ if (nr > 0) {
+
+/* generate plane rotations to annihilate nonzero */
+/* elements which have been created outside the band */
+
+ clargv_(&nr, &ab[(j1 - 1) * ab_dim1 + 1], &inca, &
+ work[j1], &kd1, &d__[j1], &kd1);
+
+/* apply rotations from the right */
+
+
+/* Dependent on the the number of diagonals either */
+/* CLARTV or CROT is used */
+
+ if (nr >= (*kd << 1) - 1) {
+ i__2 = *kd - 1;
+ for (l = 1; l <= i__2; ++l) {
+ clartv_(&nr, &ab[l + 1 + (j1 - 1) * ab_dim1],
+ &inca, &ab[l + j1 * ab_dim1], &inca, &
+ d__[j1], &work[j1], &kd1);
+/* L10: */
+ }
+
+ } else {
+ jend = j1 + (nr - 1) * kd1;
+ i__2 = jend;
+ i__3 = kd1;
+ for (jinc = j1; i__3 < 0 ? jinc >= i__2 : jinc <=
+ i__2; jinc += i__3) {
+ crot_(&kdm1, &ab[(jinc - 1) * ab_dim1 + 2], &
+ c__1, &ab[jinc * ab_dim1 + 1], &c__1,
+ &d__[jinc], &work[jinc]);
+/* L20: */
+ }
+ }
+ }
+
+
+ if (k > 2) {
+ if (k <= *n - i__ + 1) {
+
+/* generate plane rotation to annihilate a(i,i+k-1) */
+/* within the band */
+
+ clartg_(&ab[*kd - k + 3 + (i__ + k - 2) * ab_dim1]
+, &ab[*kd - k + 2 + (i__ + k - 1) *
+ ab_dim1], &d__[i__ + k - 1], &work[i__ +
+ k - 1], &temp);
+ i__3 = *kd - k + 3 + (i__ + k - 2) * ab_dim1;
+ ab[i__3].r = temp.r, ab[i__3].i = temp.i;
+
+/* apply rotation from the right */
+
+ i__3 = k - 3;
+ crot_(&i__3, &ab[*kd - k + 4 + (i__ + k - 2) *
+ ab_dim1], &c__1, &ab[*kd - k + 3 + (i__ +
+ k - 1) * ab_dim1], &c__1, &d__[i__ + k -
+ 1], &work[i__ + k - 1]);
+ }
+ ++nr;
+ j1 = j1 - kdn - 1;
+ }
+
+/* apply plane rotations from both sides to diagonal */
+/* blocks */
+
+ if (nr > 0) {
+ clar2v_(&nr, &ab[kd1 + (j1 - 1) * ab_dim1], &ab[kd1 +
+ j1 * ab_dim1], &ab[*kd + j1 * ab_dim1], &inca,
+ &d__[j1], &work[j1], &kd1);
+ }
+
+/* apply plane rotations from the left */
+
+ if (nr > 0) {
+ clacgv_(&nr, &work[j1], &kd1);
+ if ((*kd << 1) - 1 < nr) {
+
+/* Dependent on the the number of diagonals either */
+/* CLARTV or CROT is used */
+
+ i__3 = *kd - 1;
+ for (l = 1; l <= i__3; ++l) {
+ if (j2 + l > *n) {
+ nrt = nr - 1;
+ } else {
+ nrt = nr;
+ }
+ if (nrt > 0) {
+ clartv_(&nrt, &ab[*kd - l + (j1 + l) *
+ ab_dim1], &inca, &ab[*kd - l + 1
+ + (j1 + l) * ab_dim1], &inca, &
+ d__[j1], &work[j1], &kd1);
+ }
+/* L30: */
+ }
+ } else {
+ j1end = j1 + kd1 * (nr - 2);
+ if (j1end >= j1) {
+ i__3 = j1end;
+ i__2 = kd1;
+ for (jin = j1; i__2 < 0 ? jin >= i__3 : jin <=
+ i__3; jin += i__2) {
+ i__4 = *kd - 1;
+ crot_(&i__4, &ab[*kd - 1 + (jin + 1) *
+ ab_dim1], &incx, &ab[*kd + (jin +
+ 1) * ab_dim1], &incx, &d__[jin], &
+ work[jin]);
+/* L40: */
+ }
+ }
+/* Computing MIN */
+ i__2 = kdm1, i__3 = *n - j2;
+ lend = min(i__2,i__3);
+ last = j1end + kd1;
+ if (lend > 0) {
+ crot_(&lend, &ab[*kd - 1 + (last + 1) *
+ ab_dim1], &incx, &ab[*kd + (last + 1)
+ * ab_dim1], &incx, &d__[last], &work[
+ last]);
+ }
+ }
+ }
+
+ if (wantq) {
+
+/* accumulate product of plane rotations in Q */
+
+ if (initq) {
+
+/* take advantage of the fact that Q was */
+/* initially the Identity matrix */
+
+ iqend = max(iqend,j2);
+/* Computing MAX */
+ i__2 = 0, i__3 = k - 3;
+ i2 = max(i__2,i__3);
+ iqaend = i__ * *kd + 1;
+ if (k == 2) {
+ iqaend += *kd;
+ }
+ iqaend = min(iqaend,iqend);
+ i__2 = j2;
+ i__3 = kd1;
+ for (j = j1; i__3 < 0 ? j >= i__2 : j <= i__2; j
+ += i__3) {
+ ibl = i__ - i2 / kdm1;
+ ++i2;
+/* Computing MAX */
+ i__4 = 1, i__5 = j - ibl;
+ iqb = max(i__4,i__5);
+ nq = iqaend + 1 - iqb;
+/* Computing MIN */
+ i__4 = iqaend + *kd;
+ iqaend = min(i__4,iqend);
+ r_cnjg(&q__1, &work[j]);
+ crot_(&nq, &q[iqb + (j - 1) * q_dim1], &c__1,
+ &q[iqb + j * q_dim1], &c__1, &d__[j],
+ &q__1);
+/* L50: */
+ }
+ } else {
+
+ i__3 = j2;
+ i__2 = kd1;
+ for (j = j1; i__2 < 0 ? j >= i__3 : j <= i__3; j
+ += i__2) {
+ r_cnjg(&q__1, &work[j]);
+ crot_(n, &q[(j - 1) * q_dim1 + 1], &c__1, &q[
+ j * q_dim1 + 1], &c__1, &d__[j], &
+ q__1);
+/* L60: */
+ }
+ }
+
+ }
+
+ if (j2 + kdn > *n) {
+
+/* adjust J2 to keep within the bounds of the matrix */
+
+ --nr;
+ j2 = j2 - kdn - 1;
+ }
+
+ i__2 = j2;
+ i__3 = kd1;
+ for (j = j1; i__3 < 0 ? j >= i__2 : j <= i__2; j += i__3)
+ {
+
+/* create nonzero element a(j-1,j+kd) outside the band */
+/* and store it in WORK */
+
+ i__4 = j + *kd;
+ i__5 = j;
+ i__6 = (j + *kd) * ab_dim1 + 1;
+ q__1.r = work[i__5].r * ab[i__6].r - work[i__5].i *
+ ab[i__6].i, q__1.i = work[i__5].r * ab[i__6]
+ .i + work[i__5].i * ab[i__6].r;
+ work[i__4].r = q__1.r, work[i__4].i = q__1.i;
+ i__4 = (j + *kd) * ab_dim1 + 1;
+ i__5 = j;
+ i__6 = (j + *kd) * ab_dim1 + 1;
+ q__1.r = d__[i__5] * ab[i__6].r, q__1.i = d__[i__5] *
+ ab[i__6].i;
+ ab[i__4].r = q__1.r, ab[i__4].i = q__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+/* L90: */
+ }
+ }
+
+ if (*kd > 0) {
+
+/* make off-diagonal elements real and copy them to E */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__3 = *kd + (i__ + 1) * ab_dim1;
+ t.r = ab[i__3].r, t.i = ab[i__3].i;
+ abst = c_abs(&t);
+ i__3 = *kd + (i__ + 1) * ab_dim1;
+ ab[i__3].r = abst, ab[i__3].i = 0.f;
+ e[i__] = abst;
+ if (abst != 0.f) {
+ q__1.r = t.r / abst, q__1.i = t.i / abst;
+ t.r = q__1.r, t.i = q__1.i;
+ } else {
+ t.r = 1.f, t.i = 0.f;
+ }
+ if (i__ < *n - 1) {
+ i__3 = *kd + (i__ + 2) * ab_dim1;
+ i__2 = *kd + (i__ + 2) * ab_dim1;
+ q__1.r = ab[i__2].r * t.r - ab[i__2].i * t.i, q__1.i = ab[
+ i__2].r * t.i + ab[i__2].i * t.r;
+ ab[i__3].r = q__1.r, ab[i__3].i = q__1.i;
+ }
+ if (wantq) {
+ r_cnjg(&q__1, &t);
+ cscal_(n, &q__1, &q[(i__ + 1) * q_dim1 + 1], &c__1);
+ }
+/* L100: */
+ }
+ } else {
+
+/* set E to zero if original matrix was diagonal */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ e[i__] = 0.f;
+/* L110: */
+ }
+ }
+
+/* copy diagonal elements to D */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__3 = i__;
+ i__2 = kd1 + i__ * ab_dim1;
+ d__[i__3] = ab[i__2].r;
+/* L120: */
+ }
+
+ } else {
+
+ if (*kd > 1) {
+
+/* Reduce to complex Hermitian tridiagonal form, working with */
+/* the lower triangle */
+
+ nr = 0;
+ j1 = kdn + 2;
+ j2 = 1;
+
+ i__1 = ab_dim1 + 1;
+ i__3 = ab_dim1 + 1;
+ r__1 = ab[i__3].r;
+ ab[i__1].r = r__1, ab[i__1].i = 0.f;
+ i__1 = *n - 2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Reduce i-th column of matrix to tridiagonal form */
+
+ for (k = kdn + 1; k >= 2; --k) {
+ j1 += kdn;
+ j2 += kdn;
+
+ if (nr > 0) {
+
+/* generate plane rotations to annihilate nonzero */
+/* elements which have been created outside the band */
+
+ clargv_(&nr, &ab[kd1 + (j1 - kd1) * ab_dim1], &inca, &
+ work[j1], &kd1, &d__[j1], &kd1);
+
+/* apply plane rotations from one side */
+
+
+/* Dependent on the the number of diagonals either */
+/* CLARTV or CROT is used */
+
+ if (nr > (*kd << 1) - 1) {
+ i__3 = *kd - 1;
+ for (l = 1; l <= i__3; ++l) {
+ clartv_(&nr, &ab[kd1 - l + (j1 - kd1 + l) *
+ ab_dim1], &inca, &ab[kd1 - l + 1 + (
+ j1 - kd1 + l) * ab_dim1], &inca, &d__[
+ j1], &work[j1], &kd1);
+/* L130: */
+ }
+ } else {
+ jend = j1 + kd1 * (nr - 1);
+ i__3 = jend;
+ i__2 = kd1;
+ for (jinc = j1; i__2 < 0 ? jinc >= i__3 : jinc <=
+ i__3; jinc += i__2) {
+ crot_(&kdm1, &ab[*kd + (jinc - *kd) * ab_dim1]
+, &incx, &ab[kd1 + (jinc - *kd) *
+ ab_dim1], &incx, &d__[jinc], &work[
+ jinc]);
+/* L140: */
+ }
+ }
+
+ }
+
+ if (k > 2) {
+ if (k <= *n - i__ + 1) {
+
+/* generate plane rotation to annihilate a(i+k-1,i) */
+/* within the band */
+
+ clartg_(&ab[k - 1 + i__ * ab_dim1], &ab[k + i__ *
+ ab_dim1], &d__[i__ + k - 1], &work[i__ +
+ k - 1], &temp);
+ i__2 = k - 1 + i__ * ab_dim1;
+ ab[i__2].r = temp.r, ab[i__2].i = temp.i;
+
+/* apply rotation from the left */
+
+ i__2 = k - 3;
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ crot_(&i__2, &ab[k - 2 + (i__ + 1) * ab_dim1], &
+ i__3, &ab[k - 1 + (i__ + 1) * ab_dim1], &
+ i__4, &d__[i__ + k - 1], &work[i__ + k -
+ 1]);
+ }
+ ++nr;
+ j1 = j1 - kdn - 1;
+ }
+
+/* apply plane rotations from both sides to diagonal */
+/* blocks */
+
+ if (nr > 0) {
+ clar2v_(&nr, &ab[(j1 - 1) * ab_dim1 + 1], &ab[j1 *
+ ab_dim1 + 1], &ab[(j1 - 1) * ab_dim1 + 2], &
+ inca, &d__[j1], &work[j1], &kd1);
+ }
+
+/* apply plane rotations from the right */
+
+
+/* Dependent on the the number of diagonals either */
+/* CLARTV or CROT is used */
+
+ if (nr > 0) {
+ clacgv_(&nr, &work[j1], &kd1);
+ if (nr > (*kd << 1) - 1) {
+ i__2 = *kd - 1;
+ for (l = 1; l <= i__2; ++l) {
+ if (j2 + l > *n) {
+ nrt = nr - 1;
+ } else {
+ nrt = nr;
+ }
+ if (nrt > 0) {
+ clartv_(&nrt, &ab[l + 2 + (j1 - 1) *
+ ab_dim1], &inca, &ab[l + 1 + j1 *
+ ab_dim1], &inca, &d__[j1], &work[
+ j1], &kd1);
+ }
+/* L150: */
+ }
+ } else {
+ j1end = j1 + kd1 * (nr - 2);
+ if (j1end >= j1) {
+ i__2 = j1end;
+ i__3 = kd1;
+ for (j1inc = j1; i__3 < 0 ? j1inc >= i__2 :
+ j1inc <= i__2; j1inc += i__3) {
+ crot_(&kdm1, &ab[(j1inc - 1) * ab_dim1 +
+ 3], &c__1, &ab[j1inc * ab_dim1 +
+ 2], &c__1, &d__[j1inc], &work[
+ j1inc]);
+/* L160: */
+ }
+ }
+/* Computing MIN */
+ i__3 = kdm1, i__2 = *n - j2;
+ lend = min(i__3,i__2);
+ last = j1end + kd1;
+ if (lend > 0) {
+ crot_(&lend, &ab[(last - 1) * ab_dim1 + 3], &
+ c__1, &ab[last * ab_dim1 + 2], &c__1,
+ &d__[last], &work[last]);
+ }
+ }
+ }
+
+
+
+ if (wantq) {
+
+/* accumulate product of plane rotations in Q */
+
+ if (initq) {
+
+/* take advantage of the fact that Q was */
+/* initially the Identity matrix */
+
+ iqend = max(iqend,j2);
+/* Computing MAX */
+ i__3 = 0, i__2 = k - 3;
+ i2 = max(i__3,i__2);
+ iqaend = i__ * *kd + 1;
+ if (k == 2) {
+ iqaend += *kd;
+ }
+ iqaend = min(iqaend,iqend);
+ i__3 = j2;
+ i__2 = kd1;
+ for (j = j1; i__2 < 0 ? j >= i__3 : j <= i__3; j
+ += i__2) {
+ ibl = i__ - i2 / kdm1;
+ ++i2;
+/* Computing MAX */
+ i__4 = 1, i__5 = j - ibl;
+ iqb = max(i__4,i__5);
+ nq = iqaend + 1 - iqb;
+/* Computing MIN */
+ i__4 = iqaend + *kd;
+ iqaend = min(i__4,iqend);
+ crot_(&nq, &q[iqb + (j - 1) * q_dim1], &c__1,
+ &q[iqb + j * q_dim1], &c__1, &d__[j],
+ &work[j]);
+/* L170: */
+ }
+ } else {
+
+ i__2 = j2;
+ i__3 = kd1;
+ for (j = j1; i__3 < 0 ? j >= i__2 : j <= i__2; j
+ += i__3) {
+ crot_(n, &q[(j - 1) * q_dim1 + 1], &c__1, &q[
+ j * q_dim1 + 1], &c__1, &d__[j], &
+ work[j]);
+/* L180: */
+ }
+ }
+ }
+
+ if (j2 + kdn > *n) {
+
+/* adjust J2 to keep within the bounds of the matrix */
+
+ --nr;
+ j2 = j2 - kdn - 1;
+ }
+
+ i__3 = j2;
+ i__2 = kd1;
+ for (j = j1; i__2 < 0 ? j >= i__3 : j <= i__3; j += i__2)
+ {
+
+/* create nonzero element a(j+kd,j-1) outside the */
+/* band and store it in WORK */
+
+ i__4 = j + *kd;
+ i__5 = j;
+ i__6 = kd1 + j * ab_dim1;
+ q__1.r = work[i__5].r * ab[i__6].r - work[i__5].i *
+ ab[i__6].i, q__1.i = work[i__5].r * ab[i__6]
+ .i + work[i__5].i * ab[i__6].r;
+ work[i__4].r = q__1.r, work[i__4].i = q__1.i;
+ i__4 = kd1 + j * ab_dim1;
+ i__5 = j;
+ i__6 = kd1 + j * ab_dim1;
+ q__1.r = d__[i__5] * ab[i__6].r, q__1.i = d__[i__5] *
+ ab[i__6].i;
+ ab[i__4].r = q__1.r, ab[i__4].i = q__1.i;
+/* L190: */
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+ }
+
+ if (*kd > 0) {
+
+/* make off-diagonal elements real and copy them to E */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ * ab_dim1 + 2;
+ t.r = ab[i__2].r, t.i = ab[i__2].i;
+ abst = c_abs(&t);
+ i__2 = i__ * ab_dim1 + 2;
+ ab[i__2].r = abst, ab[i__2].i = 0.f;
+ e[i__] = abst;
+ if (abst != 0.f) {
+ q__1.r = t.r / abst, q__1.i = t.i / abst;
+ t.r = q__1.r, t.i = q__1.i;
+ } else {
+ t.r = 1.f, t.i = 0.f;
+ }
+ if (i__ < *n - 1) {
+ i__2 = (i__ + 1) * ab_dim1 + 2;
+ i__3 = (i__ + 1) * ab_dim1 + 2;
+ q__1.r = ab[i__3].r * t.r - ab[i__3].i * t.i, q__1.i = ab[
+ i__3].r * t.i + ab[i__3].i * t.r;
+ ab[i__2].r = q__1.r, ab[i__2].i = q__1.i;
+ }
+ if (wantq) {
+ cscal_(n, &t, &q[(i__ + 1) * q_dim1 + 1], &c__1);
+ }
+/* L220: */
+ }
+ } else {
+
+/* set E to zero if original matrix was diagonal */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ e[i__] = 0.f;
+/* L230: */
+ }
+ }
+
+/* copy diagonal elements to D */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__ * ab_dim1 + 1;
+ d__[i__2] = ab[i__3].r;
+/* L240: */
+ }
+ }
+
+ return 0;
+
+/* End of CHBTRD */
+
+} /* chbtrd_ */
diff --git a/contrib/libs/clapack/checon.c b/contrib/libs/clapack/checon.c
new file mode 100644
index 0000000000..af7eeedddd
--- /dev/null
+++ b/contrib/libs/clapack/checon.c
@@ -0,0 +1,201 @@
+/* checon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int checon_(char *uplo, integer *n, complex *a, integer *lda,
+ integer *ipiv, real *anorm, real *rcond, complex *work, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, kase;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical upper;
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *), xerbla_(char *, integer *);
+ real ainvnm;
+ extern /* Subroutine */ int chetrs_(char *, integer *, integer *, complex
+ *, integer *, integer *, complex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHECON estimates the reciprocal of the condition number of a complex */
+/* Hermitian matrix A using the factorization A = U*D*U**H or */
+/* A = L*D*L**H computed by CHETRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**H; */
+/* = 'L': Lower triangular, form is A = L*D*L**H. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The block diagonal matrix D and the multipliers used to */
+/* obtain the factor U or L as computed by CHETRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by CHETRF. */
+
+/* ANORM (input) REAL */
+/* The 1-norm of the original matrix A. */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*anorm < 0.f) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHECON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.f;
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ } else if (*anorm <= 0.f) {
+ return 0;
+ }
+
+/* Check that the diagonal matrix D is nonsingular. */
+
+ if (upper) {
+
+/* Upper triangular storage: examine D from bottom to top */
+
+ for (i__ = *n; i__ >= 1; --i__) {
+ i__1 = i__ + i__ * a_dim1;
+ if (ipiv[i__] > 0 && (a[i__1].r == 0.f && a[i__1].i == 0.f)) {
+ return 0;
+ }
+/* L10: */
+ }
+ } else {
+
+/* Lower triangular storage: examine D from top to bottom. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ if (ipiv[i__] > 0 && (a[i__2].r == 0.f && a[i__2].i == 0.f)) {
+ return 0;
+ }
+/* L20: */
+ }
+ }
+
+/* Estimate the 1-norm of the inverse. */
+
+ kase = 0;
+L30:
+ clacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+
+/* Multiply by inv(L*D*L') or inv(U*D*U'). */
+
+ chetrs_(uplo, n, &c__1, &a[a_offset], lda, &ipiv[1], &work[1], n,
+ info);
+ goto L30;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / ainvnm / *anorm;
+ }
+
+ return 0;
+
+/* End of CHECON */
+
+} /* checon_ */
diff --git a/contrib/libs/clapack/cheequb.c b/contrib/libs/clapack/cheequb.c
new file mode 100644
index 0000000000..dd51a2b048
--- /dev/null
+++ b/contrib/libs/clapack/cheequb.c
@@ -0,0 +1,440 @@
+/* cheequb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cheequb_(char *uplo, integer *n, complex *a, integer *
+ lda, real *s, real *scond, real *amax, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4;
+ doublereal d__1;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Builtin functions */
+ double r_imag(complex *), sqrt(doublereal), log(doublereal), pow_ri(real *
+ , integer *);
+
+ /* Local variables */
+ real d__;
+ integer i__, j;
+ real t, u, c0, c1, c2, si;
+ logical up;
+ real avg, std, tol, base;
+ integer iter;
+ real smin, smax, scale;
+ extern logical lsame_(char *, char *);
+ real sumsq;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ extern /* Subroutine */ int classq_(integer *, complex *, integer *, real
+ *, real *);
+ real smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- Contributed by James Demmel, Deaglan Halligan, Yozo Hida and -- */
+/* -- Jason Riedy of Univ. of California Berkeley. -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley and NAG Ltd. -- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSYEQUB computes row and column scalings intended to equilibrate a */
+/* symmetric matrix A and reduce its condition number */
+/* (with respect to the two-norm). S contains the scale factors, */
+/* S(i) = 1/sqrt(A(i,i)), chosen so that the scaled matrix B with */
+/* elements B(i,j) = S(i)*A(i,j)*S(j) has ones on the diagonal. This */
+/* choice of S puts the condition number of B within a factor N of the */
+/* smallest possible condition number over all possible diagonal */
+/* scalings. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The N-by-N symmetric matrix whose scaling */
+/* factors are to be computed. Only the diagonal elements of A */
+/* are referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* S (output) REAL array, dimension (N) */
+/* If INFO = 0, S contains the scale factors for A. */
+
+/* SCOND (output) REAL */
+/* If INFO = 0, S contains the ratio of the smallest S(i) to */
+/* the largest S(i). If SCOND >= 0.1 and AMAX is neither too */
+/* large nor too small, it is not worth scaling by S. */
+
+/* AMAX (output) REAL */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element is nonpositive. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function Definitions .. */
+
+/* Test input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (! (lsame_(uplo, "U") || lsame_(uplo, "L"))) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHEEQUB", &i__1);
+ return 0;
+ }
+ up = lsame_(uplo, "U");
+ *amax = 0.f;
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ *scond = 1.f;
+ return 0;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ s[i__] = 0.f;
+ }
+ *amax = 0.f;
+ if (up) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ r__3 = s[i__], r__4 = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + j * a_dim1]), dabs(r__2));
+ s[i__] = dmax(r__3,r__4);
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ r__3 = s[j], r__4 = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + j * a_dim1]), dabs(r__2));
+ s[j] = dmax(r__3,r__4);
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ r__3 = *amax, r__4 = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + j * a_dim1]), dabs(r__2));
+ *amax = dmax(r__3,r__4);
+ }
+/* Computing MAX */
+ i__2 = j + j * a_dim1;
+ r__3 = s[j], r__4 = (r__1 = a[i__2].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[j + j * a_dim1]), dabs(r__2));
+ s[j] = dmax(r__3,r__4);
+/* Computing MAX */
+ i__2 = j + j * a_dim1;
+ r__3 = *amax, r__4 = (r__1 = a[i__2].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[j + j * a_dim1]), dabs(r__2));
+ *amax = dmax(r__3,r__4);
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j + j * a_dim1;
+ r__3 = s[j], r__4 = (r__1 = a[i__2].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[j + j * a_dim1]), dabs(r__2));
+ s[j] = dmax(r__3,r__4);
+/* Computing MAX */
+ i__2 = j + j * a_dim1;
+ r__3 = *amax, r__4 = (r__1 = a[i__2].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[j + j * a_dim1]), dabs(r__2));
+ *amax = dmax(r__3,r__4);
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ r__3 = s[i__], r__4 = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + j * a_dim1]), dabs(r__2));
+ s[i__] = dmax(r__3,r__4);
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ r__3 = s[j], r__4 = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + j * a_dim1]), dabs(r__2));
+ s[j] = dmax(r__3,r__4);
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ r__3 = *amax, r__4 = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + j * a_dim1]), dabs(r__2));
+ *amax = dmax(r__3,r__4);
+ }
+ }
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ s[j] = 1.f / s[j];
+ }
+ tol = 1.f / sqrt(*n * 2.f);
+ for (iter = 1; iter <= 100; ++iter) {
+ scale = 0.f;
+ sumsq = 0.f;
+/* beta = |A|s */
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ work[i__2].r = 0.f, work[i__2].i = 0.f;
+ }
+ if (up) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ t = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 = r_imag(&a[
+ i__ + j * a_dim1]), dabs(r__2));
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ r__3 = ((r__1 = a[i__5].r, dabs(r__1)) + (r__2 = r_imag(&
+ a[i__ + j * a_dim1]), dabs(r__2))) * s[j];
+ q__1.r = work[i__4].r + r__3, q__1.i = work[i__4].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+ i__3 = j;
+ i__4 = j;
+ i__5 = i__ + j * a_dim1;
+ r__3 = ((r__1 = a[i__5].r, dabs(r__1)) + (r__2 = r_imag(&
+ a[i__ + j * a_dim1]), dabs(r__2))) * s[i__];
+ q__1.r = work[i__4].r + r__3, q__1.i = work[i__4].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+ }
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ r__3 = ((r__1 = a[i__4].r, dabs(r__1)) + (r__2 = r_imag(&a[j
+ + j * a_dim1]), dabs(r__2))) * s[j];
+ q__1.r = work[i__3].r + r__3, q__1.i = work[i__3].i;
+ work[i__2].r = q__1.r, work[i__2].i = q__1.i;
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ r__3 = ((r__1 = a[i__4].r, dabs(r__1)) + (r__2 = r_imag(&a[j
+ + j * a_dim1]), dabs(r__2))) * s[j];
+ q__1.r = work[i__3].r + r__3, q__1.i = work[i__3].i;
+ work[i__2].r = q__1.r, work[i__2].i = q__1.i;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ t = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 = r_imag(&a[
+ i__ + j * a_dim1]), dabs(r__2));
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ r__3 = ((r__1 = a[i__5].r, dabs(r__1)) + (r__2 = r_imag(&
+ a[i__ + j * a_dim1]), dabs(r__2))) * s[j];
+ q__1.r = work[i__4].r + r__3, q__1.i = work[i__4].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+ i__3 = j;
+ i__4 = j;
+ i__5 = i__ + j * a_dim1;
+ r__3 = ((r__1 = a[i__5].r, dabs(r__1)) + (r__2 = r_imag(&
+ a[i__ + j * a_dim1]), dabs(r__2))) * s[i__];
+ q__1.r = work[i__4].r + r__3, q__1.i = work[i__4].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+ }
+ }
+ }
+/* avg = s^T beta / n */
+ avg = 0.f;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ q__2.r = s[i__2] * work[i__3].r, q__2.i = s[i__2] * work[i__3].i;
+ q__1.r = avg + q__2.r, q__1.i = q__2.i;
+ avg = q__1.r;
+ }
+ avg /= *n;
+ std = 0.f;
+ i__1 = *n * 3;
+ for (i__ = (*n << 1) + 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__ - (*n << 1);
+ i__4 = i__ - (*n << 1);
+ q__2.r = s[i__3] * work[i__4].r, q__2.i = s[i__3] * work[i__4].i;
+ q__1.r = q__2.r - avg, q__1.i = q__2.i;
+ work[i__2].r = q__1.r, work[i__2].i = q__1.i;
+ }
+ classq_(n, &work[(*n << 1) + 1], &c__1, &scale, &sumsq);
+ std = scale * sqrt(sumsq / *n);
+ if (std < tol * avg) {
+ goto L999;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ t = (r__1 = a[i__2].r, dabs(r__1)) + (r__2 = r_imag(&a[i__ + i__ *
+ a_dim1]), dabs(r__2));
+ si = s[i__];
+ c2 = (*n - 1) * t;
+ i__2 = *n - 2;
+ i__3 = i__;
+ r__1 = t * si;
+ q__2.r = work[i__3].r - r__1, q__2.i = work[i__3].i;
+ d__1 = (doublereal) i__2;
+ q__1.r = d__1 * q__2.r, q__1.i = d__1 * q__2.i;
+ c1 = q__1.r;
+ r__1 = -(t * si) * si;
+ i__2 = i__;
+ d__1 = 2.;
+ q__4.r = d__1 * work[i__2].r, q__4.i = d__1 * work[i__2].i;
+ q__3.r = si * q__4.r, q__3.i = si * q__4.i;
+ q__2.r = r__1 + q__3.r, q__2.i = q__3.i;
+ r__2 = *n * avg;
+ q__1.r = q__2.r - r__2, q__1.i = q__2.i;
+ c0 = q__1.r;
+ d__ = c1 * c1 - c0 * 4 * c2;
+ if (d__ <= 0.f) {
+ *info = -1;
+ return 0;
+ }
+ si = c0 * -2 / (c1 + sqrt(d__));
+ d__ = si - s[i__];
+ u = 0.f;
+ if (up) {
+ i__2 = i__;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = j + i__ * a_dim1;
+ t = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 = r_imag(&a[j
+ + i__ * a_dim1]), dabs(r__2));
+ u += s[j] * t;
+ i__3 = j;
+ i__4 = j;
+ r__1 = d__ * t;
+ q__1.r = work[i__4].r + r__1, q__1.i = work[i__4].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+ }
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ i__3 = i__ + j * a_dim1;
+ t = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 = r_imag(&a[
+ i__ + j * a_dim1]), dabs(r__2));
+ u += s[j] * t;
+ i__3 = j;
+ i__4 = j;
+ r__1 = d__ * t;
+ q__1.r = work[i__4].r + r__1, q__1.i = work[i__4].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+ }
+ } else {
+ i__2 = i__;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = i__ + j * a_dim1;
+ t = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 = r_imag(&a[
+ i__ + j * a_dim1]), dabs(r__2));
+ u += s[j] * t;
+ i__3 = j;
+ i__4 = j;
+ r__1 = d__ * t;
+ q__1.r = work[i__4].r + r__1, q__1.i = work[i__4].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+ }
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ i__3 = j + i__ * a_dim1;
+ t = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 = r_imag(&a[j
+ + i__ * a_dim1]), dabs(r__2));
+ u += s[j] * t;
+ i__3 = j;
+ i__4 = j;
+ r__1 = d__ * t;
+ q__1.r = work[i__4].r + r__1, q__1.i = work[i__4].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+ }
+ }
+ i__2 = i__;
+ q__4.r = u + work[i__2].r, q__4.i = work[i__2].i;
+ q__3.r = d__ * q__4.r, q__3.i = d__ * q__4.i;
+ d__1 = (doublereal) (*n);
+ q__2.r = q__3.r / d__1, q__2.i = q__3.i / d__1;
+ q__1.r = avg + q__2.r, q__1.i = q__2.i;
+ avg = q__1.r;
+ s[i__] = si;
+ }
+ }
+L999:
+ smlnum = slamch_("SAFEMIN");
+ bignum = 1.f / smlnum;
+ smin = bignum;
+ smax = 0.f;
+ t = 1.f / sqrt(avg);
+ base = slamch_("B");
+ u = 1.f / log(base);
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = (integer) (u * log(s[i__] * t));
+ s[i__] = pow_ri(&base, &i__2);
+/* Computing MIN */
+ r__1 = smin, r__2 = s[i__];
+ smin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = smax, r__2 = s[i__];
+ smax = dmax(r__1,r__2);
+ }
+ *scond = dmax(smin,smlnum) / dmin(smax,bignum);
+ return 0;
+} /* cheequb_ */
diff --git a/contrib/libs/clapack/cheev.c b/contrib/libs/clapack/cheev.c
new file mode 100644
index 0000000000..58e2c93ea0
--- /dev/null
+++ b/contrib/libs/clapack/cheev.c
@@ -0,0 +1,284 @@
+/* cheev.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__0 = 0;
+static real c_b18 = 1.f;
+
+/* Subroutine */ int cheev_(char *jobz, char *uplo, integer *n, complex *a,
+ integer *lda, real *w, complex *work, integer *lwork, real *rwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer nb;
+ real eps;
+ integer inde;
+ real anrm;
+ integer imax;
+ real rmin, rmax, sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ logical lower, wantz;
+ extern doublereal clanhe_(char *, char *, integer *, complex *, integer *,
+ real *);
+ integer iscale;
+ extern /* Subroutine */ int clascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, complex *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int chetrd_(char *, integer *, complex *, integer
+ *, real *, real *, complex *, complex *, integer *, integer *);
+ real safmin;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ integer indtau, indwrk;
+ extern /* Subroutine */ int csteqr_(char *, integer *, real *, real *,
+ complex *, integer *, real *, integer *), cungtr_(char *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ integer *), ssterf_(integer *, real *, real *, integer *);
+ integer llwork;
+ real smlnum;
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHEEV computes all eigenvalues and, optionally, eigenvectors of a */
+/* complex Hermitian matrix A. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA, N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+/* On exit, if JOBZ = 'V', then if INFO = 0, A contains the */
+/* orthonormal eigenvectors of the matrix A. */
+/* If JOBZ = 'N', then on exit the lower triangle (if UPLO='L') */
+/* or the upper triangle (if UPLO='U') of A, including the */
+/* diagonal, is destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= max(1,2*N-1). */
+/* For optimal efficiency, LWORK >= (NB+1)*N, */
+/* where NB is the blocksize for CHETRD returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) REAL array, dimension (max(1, 3*N-2)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the algorithm failed to converge; i */
+/* off-diagonal elements of an intermediate tridiagonal */
+/* form did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lower = lsame_(uplo, "L");
+ lquery = *lwork == -1;
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+
+ if (*info == 0) {
+ nb = ilaenv_(&c__1, "CHETRD", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = 1, i__2 = (nb + 1) * *n;
+ lwkopt = max(i__1,i__2);
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+/* Computing MAX */
+ i__1 = 1, i__2 = (*n << 1) - 1;
+ if (*lwork < max(i__1,i__2) && ! lquery) {
+ *info = -8;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHEEV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ i__1 = a_dim1 + 1;
+ w[1] = a[i__1].r;
+ work[1].r = 1.f, work[1].i = 0.f;
+ if (wantz) {
+ i__1 = a_dim1 + 1;
+ a[i__1].r = 1.f, a[i__1].i = 0.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = clanhe_("M", uplo, n, &a[a_offset], lda, &rwork[1]);
+ iscale = 0;
+ if (anrm > 0.f && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ clascl_(uplo, &c__0, &c__0, &c_b18, &sigma, n, n, &a[a_offset], lda,
+ info);
+ }
+
+/* Call CHETRD to reduce Hermitian matrix to tridiagonal form. */
+
+ inde = 1;
+ indtau = 1;
+ indwrk = indtau + *n;
+ llwork = *lwork - indwrk + 1;
+ chetrd_(uplo, n, &a[a_offset], lda, &w[1], &rwork[inde], &work[indtau], &
+ work[indwrk], &llwork, &iinfo);
+
+/* For eigenvalues only, call SSTERF. For eigenvectors, first call */
+/* CUNGTR to generate the unitary matrix, then call CSTEQR. */
+
+ if (! wantz) {
+ ssterf_(n, &w[1], &rwork[inde], info);
+ } else {
+ cungtr_(uplo, n, &a[a_offset], lda, &work[indtau], &work[indwrk], &
+ llwork, &iinfo);
+ indwrk = inde + *n;
+ csteqr_(jobz, n, &w[1], &rwork[inde], &a[a_offset], lda, &rwork[
+ indwrk], info);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *n;
+ } else {
+ imax = *info - 1;
+ }
+ r__1 = 1.f / sigma;
+ sscal_(&imax, &r__1, &w[1], &c__1);
+ }
+
+/* Set WORK(1) to optimal complex workspace size. */
+
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CHEEV */
+
+} /* cheev_ */
diff --git a/contrib/libs/clapack/cheevd.c b/contrib/libs/clapack/cheevd.c
new file mode 100644
index 0000000000..0cade89b4e
--- /dev/null
+++ b/contrib/libs/clapack/cheevd.c
@@ -0,0 +1,377 @@
+/* cheevd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__0 = 0;
+static real c_b18 = 1.f;
+
+/* Subroutine */ int cheevd_(char *jobz, char *uplo, integer *n, complex *a,
+ integer *lda, real *w, complex *work, integer *lwork, real *rwork,
+ integer *lrwork, integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real eps;
+ integer inde;
+ real anrm;
+ integer imax;
+ real rmin, rmax;
+ integer lopt;
+ real sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ integer lwmin, liopt;
+ logical lower;
+ integer llrwk, lropt;
+ logical wantz;
+ integer indwk2, llwrk2;
+ extern doublereal clanhe_(char *, char *, integer *, complex *, integer *,
+ real *);
+ integer iscale;
+ extern /* Subroutine */ int clascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, complex *, integer *, integer *), cstedc_(char *, integer *, real *, real *, complex *,
+ integer *, complex *, integer *, real *, integer *, integer *,
+ integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int chetrd_(char *, integer *, complex *, integer
+ *, real *, real *, complex *, complex *, integer *, integer *), clacpy_(char *, integer *, integer *, complex *, integer
+ *, complex *, integer *);
+ real safmin;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ integer indtau, indrwk, indwrk, liwmin;
+ extern /* Subroutine */ int ssterf_(integer *, real *, real *, integer *);
+ integer lrwmin;
+ extern /* Subroutine */ int cunmtr_(char *, char *, char *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *);
+ integer llwork;
+ real smlnum;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHEEVD computes all eigenvalues and, optionally, eigenvectors of a */
+/* complex Hermitian matrix A. If eigenvectors are desired, it uses a */
+/* divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA, N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+/* On exit, if JOBZ = 'V', then if INFO = 0, A contains the */
+/* orthonormal eigenvectors of the matrix A. */
+/* If JOBZ = 'N', then on exit the lower triangle (if UPLO='L') */
+/* or the upper triangle (if UPLO='U') of A, including the */
+/* diagonal, is destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. */
+/* If N <= 1, LWORK must be at least 1. */
+/* If JOBZ = 'N' and N > 1, LWORK must be at least N + 1. */
+/* If JOBZ = 'V' and N > 1, LWORK must be at least 2*N + N**2. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal sizes of the WORK, RWORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* RWORK (workspace/output) REAL array, */
+/* dimension (LRWORK) */
+/* On exit, if INFO = 0, RWORK(1) returns the optimal LRWORK. */
+
+/* LRWORK (input) INTEGER */
+/* The dimension of the array RWORK. */
+/* If N <= 1, LRWORK must be at least 1. */
+/* If JOBZ = 'N' and N > 1, LRWORK must be at least N. */
+/* If JOBZ = 'V' and N > 1, LRWORK must be at least */
+/* 1 + 5*N + 2*N**2. */
+
+/* If LRWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* If N <= 1, LIWORK must be at least 1. */
+/* If JOBZ = 'N' and N > 1, LIWORK must be at least 1. */
+/* If JOBZ = 'V' and N > 1, LIWORK must be at least 3 + 5*N. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i and JOBZ = 'N', then the algorithm failed */
+/* to converge; i off-diagonal elements of an intermediate */
+/* tridiagonal form did not converge to zero; */
+/* if INFO = i and JOBZ = 'V', then the algorithm failed */
+/* to compute an eigenvalue while working on the submatrix */
+/* lying in rows and columns INFO/(N+1) through */
+/* mod(INFO,N+1). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Jeff Rutter, Computer Science Division, University of California */
+/* at Berkeley, USA */
+
+/* Modified description of INFO. Sven, 16 Feb 05. */
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lower = lsame_(uplo, "L");
+ lquery = *lwork == -1 || *lrwork == -1 || *liwork == -1;
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+
+ if (*info == 0) {
+ if (*n <= 1) {
+ lwmin = 1;
+ lrwmin = 1;
+ liwmin = 1;
+ lopt = lwmin;
+ lropt = lrwmin;
+ liopt = liwmin;
+ } else {
+ if (wantz) {
+ lwmin = (*n << 1) + *n * *n;
+/* Computing 2nd power */
+ i__1 = *n;
+ lrwmin = *n * 5 + 1 + (i__1 * i__1 << 1);
+ liwmin = *n * 5 + 3;
+ } else {
+ lwmin = *n + 1;
+ lrwmin = *n;
+ liwmin = 1;
+ }
+/* Computing MAX */
+ i__1 = lwmin, i__2 = *n + ilaenv_(&c__1, "CHETRD", uplo, n, &c_n1,
+ &c_n1, &c_n1);
+ lopt = max(i__1,i__2);
+ lropt = lrwmin;
+ liopt = liwmin;
+ }
+ work[1].r = (real) lopt, work[1].i = 0.f;
+ rwork[1] = (real) lropt;
+ iwork[1] = liopt;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -8;
+ } else if (*lrwork < lrwmin && ! lquery) {
+ *info = -10;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHEEVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ i__1 = a_dim1 + 1;
+ w[1] = a[i__1].r;
+ if (wantz) {
+ i__1 = a_dim1 + 1;
+ a[i__1].r = 1.f, a[i__1].i = 0.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = clanhe_("M", uplo, n, &a[a_offset], lda, &rwork[1]);
+ iscale = 0;
+ if (anrm > 0.f && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ clascl_(uplo, &c__0, &c__0, &c_b18, &sigma, n, n, &a[a_offset], lda,
+ info);
+ }
+
+/* Call CHETRD to reduce Hermitian matrix to tridiagonal form. */
+
+ inde = 1;
+ indtau = 1;
+ indwrk = indtau + *n;
+ indrwk = inde + *n;
+ indwk2 = indwrk + *n * *n;
+ llwork = *lwork - indwrk + 1;
+ llwrk2 = *lwork - indwk2 + 1;
+ llrwk = *lrwork - indrwk + 1;
+ chetrd_(uplo, n, &a[a_offset], lda, &w[1], &rwork[inde], &work[indtau], &
+ work[indwrk], &llwork, &iinfo);
+
+/* For eigenvalues only, call SSTERF. For eigenvectors, first call */
+/* CSTEDC to generate the eigenvector matrix, WORK(INDWRK), of the */
+/* tridiagonal matrix, then call CUNMTR to multiply it to the */
+/* Householder transformations represented as Householder vectors in */
+/* A. */
+
+ if (! wantz) {
+ ssterf_(n, &w[1], &rwork[inde], info);
+ } else {
+ cstedc_("I", n, &w[1], &rwork[inde], &work[indwrk], n, &work[indwk2],
+ &llwrk2, &rwork[indrwk], &llrwk, &iwork[1], liwork, info);
+ cunmtr_("L", uplo, "N", n, n, &a[a_offset], lda, &work[indtau], &work[
+ indwrk], n, &work[indwk2], &llwrk2, &iinfo);
+ clacpy_("A", n, n, &work[indwrk], n, &a[a_offset], lda);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *n;
+ } else {
+ imax = *info - 1;
+ }
+ r__1 = 1.f / sigma;
+ sscal_(&imax, &r__1, &w[1], &c__1);
+ }
+
+ work[1].r = (real) lopt, work[1].i = 0.f;
+ rwork[1] = (real) lropt;
+ iwork[1] = liopt;
+
+ return 0;
+
+/* End of CHEEVD */
+
+} /* cheevd_ */
diff --git a/contrib/libs/clapack/cheevr.c b/contrib/libs/clapack/cheevr.c
new file mode 100644
index 0000000000..20e21ba30b
--- /dev/null
+++ b/contrib/libs/clapack/cheevr.c
@@ -0,0 +1,687 @@
+/* cheevr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__10 = 10;
+static integer c__1 = 1;
+static integer c__2 = 2;
+static integer c__3 = 3;
+static integer c__4 = 4;
+static integer c_n1 = -1;
+
+/* Subroutine */ int cheevr_(char *jobz, char *range, char *uplo, integer *n,
+ complex *a, integer *lda, real *vl, real *vu, integer *il, integer *
+ iu, real *abstol, integer *m, real *w, complex *z__, integer *ldz,
+ integer *isuppz, complex *work, integer *lwork, real *rwork, integer *
+ lrwork, integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, z_dim1, z_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, nb, jj;
+ real eps, vll, vuu, tmp1, anrm;
+ integer imax;
+ real rmin, rmax;
+ logical test;
+ integer itmp1, indrd, indre;
+ real sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ char order[1];
+ integer indwk;
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ integer lwmin;
+ logical lower;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ logical wantz, alleig, indeig;
+ integer iscale, ieeeok, indibl, indrdd, indifl, indree;
+ logical valeig;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int chetrd_(char *, integer *, complex *, integer
+ *, real *, real *, complex *, complex *, integer *, integer *), csscal_(integer *, real *, complex *, integer *);
+ real safmin;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real abstll, bignum;
+ integer indtau, indisp;
+ extern /* Subroutine */ int cstein_(integer *, real *, real *, integer *,
+ real *, integer *, integer *, complex *, integer *, real *,
+ integer *, integer *, integer *);
+ integer indiwo, indwkn;
+ extern doublereal clansy_(char *, char *, integer *, complex *, integer *,
+ real *);
+ extern /* Subroutine */ int cstemr_(char *, char *, integer *, real *,
+ real *, real *, real *, integer *, integer *, integer *, real *,
+ complex *, integer *, integer *, integer *, logical *, real *,
+ integer *, integer *, integer *, integer *);
+ integer indrwk, liwmin;
+ logical tryrac;
+ extern /* Subroutine */ int ssterf_(integer *, real *, real *, integer *);
+ integer lrwmin, llwrkn, llwork, nsplit;
+ real smlnum;
+ extern /* Subroutine */ int cunmtr_(char *, char *, char *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *), sstebz_(
+ char *, char *, integer *, real *, real *, integer *, integer *,
+ real *, real *, real *, integer *, integer *, real *, integer *,
+ integer *, real *, integer *, integer *);
+ logical lquery;
+ integer lwkopt, llrwork;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHEEVR computes selected eigenvalues and, optionally, eigenvectors */
+/* of a complex Hermitian matrix A. Eigenvalues and eigenvectors can */
+/* be selected by specifying either a range of values or a range of */
+/* indices for the desired eigenvalues. */
+
+/* CHEEVR first reduces the matrix A to tridiagonal form T with a call */
+/* to CHETRD. Then, whenever possible, CHEEVR calls CSTEMR to compute */
+/* the eigenspectrum using Relatively Robust Representations. CSTEMR */
+/* computes eigenvalues by the dqds algorithm, while orthogonal */
+/* eigenvectors are computed from various "good" L D L^T representations */
+/* (also known as Relatively Robust Representations). Gram-Schmidt */
+/* orthogonalization is avoided as far as possible. More specifically, */
+/* the various steps of the algorithm are as follows. */
+
+/* For each unreduced block (submatrix) of T, */
+/* (a) Compute T - sigma I = L D L^T, so that L and D */
+/* define all the wanted eigenvalues to high relative accuracy. */
+/* This means that small relative changes in the entries of D and L */
+/* cause only small relative changes in the eigenvalues and */
+/* eigenvectors. The standard (unfactored) representation of the */
+/* tridiagonal matrix T does not have this property in general. */
+/* (b) Compute the eigenvalues to suitable accuracy. */
+/* If the eigenvectors are desired, the algorithm attains full */
+/* accuracy of the computed eigenvalues only right before */
+/* the corresponding vectors have to be computed, see steps c) and d). */
+/* (c) For each cluster of close eigenvalues, select a new */
+/* shift close to the cluster, find a new factorization, and refine */
+/* the shifted eigenvalues to suitable accuracy. */
+/* (d) For each eigenvalue with a large enough relative separation compute */
+/* the corresponding eigenvector by forming a rank revealing twisted */
+/* factorization. Go back to (c) for any clusters that remain. */
+
+/* The desired accuracy of the output can be specified by the input */
+/* parameter ABSTOL. */
+
+/* For more details, see DSTEMR's documentation and: */
+/* - Inderjit S. Dhillon and Beresford N. Parlett: "Multiple representations */
+/* to compute orthogonal eigenvectors of symmetric tridiagonal matrices," */
+/* Linear Algebra and its Applications, 387(1), pp. 1-28, August 2004. */
+/* - Inderjit Dhillon and Beresford Parlett: "Orthogonal Eigenvectors and */
+/* Relative Gaps," SIAM Journal on Matrix Analysis and Applications, Vol. 25, */
+/* 2004. Also LAPACK Working Note 154. */
+/* - Inderjit Dhillon: "A new O(n^2) algorithm for the symmetric */
+/* tridiagonal eigenvalue/eigenvector problem", */
+/* Computer Science Division Technical Report No. UCB/CSD-97-971, */
+/* UC Berkeley, May 1997. */
+
+
+/* Note 1 : CHEEVR calls CSTEMR when the full spectrum is requested */
+/* on machines which conform to the ieee-754 floating point standard. */
+/* CHEEVR calls SSTEBZ and CSTEIN on non-ieee machines and */
+/* when partial spectrum requests are made. */
+
+/* Normal execution of CSTEMR may create NaNs and infinities and */
+/* hence may abort due to a floating point exception in environments */
+/* which do not handle NaNs and infinities in the ieee standard default */
+/* manner. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+/* ********* For RANGE = 'V' or 'I' and IU - IL < N - 1, SSTEBZ and */
+/* ********* CSTEIN are called */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA, N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+/* On exit, the lower triangle (if UPLO='L') or the upper */
+/* triangle (if UPLO='U') of A, including the diagonal, is */
+/* destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* VL (input) REAL */
+/* VU (input) REAL */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) REAL */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing A to tridiagonal form. */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices */
+/* with Guaranteed High Relative Accuracy," by Demmel and */
+/* Kahan, LAPACK Working Note #3. */
+
+/* If high relative accuracy is important, set ABSTOL to */
+/* SLAMCH( 'Safe minimum' ). Doing so will guarantee that */
+/* eigenvalues are computed to high relative accuracy when */
+/* possible in future releases. The current code does not */
+/* make any guarantees about high relative accuracy, but */
+/* furutre releases will. See J. Barlow and J. Demmel, */
+/* "Computing Accurate Eigensystems of Scaled Diagonally */
+/* Dominant Matrices", LAPACK Working Note #7, for a discussion */
+/* of which matrices define their eigenvalues to high relative */
+/* accuracy. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) REAL array, dimension (N) */
+/* The first M elements contain the selected eigenvalues in */
+/* ascending order. */
+
+/* Z (output) COMPLEX array, dimension (LDZ, max(1,M)) */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* ISUPPZ (output) INTEGER array, dimension ( 2*max(1,M) ) */
+/* The support of the eigenvectors in Z, i.e., the indices */
+/* indicating the nonzero elements in Z. The i-th eigenvector */
+/* is nonzero only in elements ISUPPZ( 2*i-1 ) through */
+/* ISUPPZ( 2*i ). */
+/* ********* Implemented only for RANGE = 'A' or 'I' and IU - IL = N - 1 */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= max(1,2*N). */
+/* For optimal efficiency, LWORK >= (NB+1)*N, */
+/* where NB is the max of the blocksize for CHETRD and for */
+/* CUNMTR as returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal sizes of the WORK, RWORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* RWORK (workspace/output) REAL array, dimension (MAX(1,LRWORK)) */
+/* On exit, if INFO = 0, RWORK(1) returns the optimal */
+/* (and minimal) LRWORK. */
+
+/* LRWORK (input) INTEGER */
+/* The length of the array RWORK. LRWORK >= max(1,24*N). */
+
+/* If LRWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal */
+/* (and minimal) LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. LIWORK >= max(1,10*N). */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: Internal error */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Inderjit Dhillon, IBM Almaden, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Ken Stanley, Computer Science Division, University of */
+/* California at Berkeley, USA */
+/* Jason Riedy, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --isuppz;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ ieeeok = ilaenv_(&c__10, "CHEEVR", "N", &c__1, &c__2, &c__3, &c__4);
+
+ lower = lsame_(uplo, "L");
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+
+ lquery = *lwork == -1 || *lrwork == -1 || *liwork == -1;
+
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * 24;
+ lrwmin = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * 10;
+ liwmin = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = 1, i__2 = *n << 1;
+ lwmin = max(i__1,i__2);
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -8;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -9;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -10;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -15;
+ }
+ }
+
+ if (*info == 0) {
+ nb = ilaenv_(&c__1, "CHETRD", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__1, "CUNMTR", uplo, n, &c_n1, &c_n1, &
+ c_n1);
+ nb = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = (nb + 1) * *n;
+ lwkopt = max(i__1,lwmin);
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ rwork[1] = (real) lrwmin;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -18;
+ } else if (*lrwork < lrwmin && ! lquery) {
+ *info = -20;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -22;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHEEVR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ if (*n == 1) {
+ work[1].r = 2.f, work[1].i = 0.f;
+ if (alleig || indeig) {
+ *m = 1;
+ i__1 = a_dim1 + 1;
+ w[1] = a[i__1].r;
+ } else {
+ i__1 = a_dim1 + 1;
+ i__2 = a_dim1 + 1;
+ if (*vl < a[i__1].r && *vu >= a[i__2].r) {
+ *m = 1;
+ i__1 = a_dim1 + 1;
+ w[1] = a[i__1].r;
+ }
+ }
+ if (wantz) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1.f, z__[i__1].i = 0.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+/* Computing MIN */
+ r__1 = sqrt(bignum), r__2 = 1.f / sqrt(sqrt(safmin));
+ rmax = dmin(r__1,r__2);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ iscale = 0;
+ abstll = *abstol;
+ if (valeig) {
+ vll = *vl;
+ vuu = *vu;
+ }
+ anrm = clansy_("M", uplo, n, &a[a_offset], lda, &rwork[1]);
+ if (anrm > 0.f && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ if (lower) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j + 1;
+ csscal_(&i__2, &sigma, &a[j + j * a_dim1], &c__1);
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ csscal_(&j, &sigma, &a[j * a_dim1 + 1], &c__1);
+/* L20: */
+ }
+ }
+ if (*abstol > 0.f) {
+ abstll = *abstol * sigma;
+ }
+ if (valeig) {
+ vll = *vl * sigma;
+ vuu = *vu * sigma;
+ }
+ }
+/* Initialize indices into workspaces. Note: The IWORK indices are */
+/* used only if SSTERF or CSTEMR fail. */
+/* WORK(INDTAU:INDTAU+N-1) stores the complex scalar factors of the */
+/* elementary reflectors used in CHETRD. */
+ indtau = 1;
+/* INDWK is the starting offset of the remaining complex workspace, */
+/* and LLWORK is the remaining complex workspace size. */
+ indwk = indtau + *n;
+ llwork = *lwork - indwk + 1;
+/* RWORK(INDRD:INDRD+N-1) stores the real tridiagonal's diagonal */
+/* entries. */
+ indrd = 1;
+/* RWORK(INDRE:INDRE+N-1) stores the off-diagonal entries of the */
+/* tridiagonal matrix from CHETRD. */
+ indre = indrd + *n;
+/* RWORK(INDRDD:INDRDD+N-1) is a copy of the diagonal entries over */
+/* -written by CSTEMR (the SSTERF path copies the diagonal to W). */
+ indrdd = indre + *n;
+/* RWORK(INDREE:INDREE+N-1) is a copy of the off-diagonal entries over */
+/* -written while computing the eigenvalues in SSTERF and CSTEMR. */
+ indree = indrdd + *n;
+/* INDRWK is the starting offset of the left-over real workspace, and */
+/* LLRWORK is the remaining workspace size. */
+ indrwk = indree + *n;
+ llrwork = *lrwork - indrwk + 1;
+/* IWORK(INDIBL:INDIBL+M-1) corresponds to IBLOCK in SSTEBZ and */
+/* stores the block indices of each of the M<=N eigenvalues. */
+ indibl = 1;
+/* IWORK(INDISP:INDISP+NSPLIT-1) corresponds to ISPLIT in SSTEBZ and */
+/* stores the starting and finishing indices of each block. */
+ indisp = indibl + *n;
+/* IWORK(INDIFL:INDIFL+N-1) stores the indices of eigenvectors */
+/* that corresponding to eigenvectors that fail to converge in */
+/* SSTEIN. This information is discarded; if any fail, the driver */
+/* returns INFO > 0. */
+ indifl = indisp + *n;
+/* INDIWO is the offset of the remaining integer workspace. */
+ indiwo = indisp + *n;
+
+/* Call CHETRD to reduce Hermitian matrix to tridiagonal form. */
+
+ chetrd_(uplo, n, &a[a_offset], lda, &rwork[indrd], &rwork[indre], &work[
+ indtau], &work[indwk], &llwork, &iinfo);
+
+/* If all eigenvalues are desired */
+/* then call SSTERF or CSTEMR and CUNMTR. */
+
+ test = FALSE_;
+ if (indeig) {
+ if (*il == 1 && *iu == *n) {
+ test = TRUE_;
+ }
+ }
+ if ((alleig || test) && ieeeok == 1) {
+ if (! wantz) {
+ scopy_(n, &rwork[indrd], &c__1, &w[1], &c__1);
+ i__1 = *n - 1;
+ scopy_(&i__1, &rwork[indre], &c__1, &rwork[indree], &c__1);
+ ssterf_(n, &w[1], &rwork[indree], info);
+ } else {
+ i__1 = *n - 1;
+ scopy_(&i__1, &rwork[indre], &c__1, &rwork[indree], &c__1);
+ scopy_(n, &rwork[indrd], &c__1, &rwork[indrdd], &c__1);
+
+ if (*abstol <= *n * 2.f * eps) {
+ tryrac = TRUE_;
+ } else {
+ tryrac = FALSE_;
+ }
+ cstemr_(jobz, "A", n, &rwork[indrdd], &rwork[indree], vl, vu, il,
+ iu, m, &w[1], &z__[z_offset], ldz, n, &isuppz[1], &tryrac,
+ &rwork[indrwk], &llrwork, &iwork[1], liwork, info);
+
+/* Apply unitary matrix used in reduction to tridiagonal */
+/* form to eigenvectors returned by CSTEIN. */
+
+ if (wantz && *info == 0) {
+ indwkn = indwk;
+ llwrkn = *lwork - indwkn + 1;
+ cunmtr_("L", uplo, "N", n, m, &a[a_offset], lda, &work[indtau]
+, &z__[z_offset], ldz, &work[indwkn], &llwrkn, &iinfo);
+ }
+ }
+
+
+ if (*info == 0) {
+ *m = *n;
+ goto L30;
+ }
+ *info = 0;
+ }
+
+/* Otherwise, call SSTEBZ and, if eigenvectors are desired, CSTEIN. */
+/* Also call SSTEBZ and CSTEIN if CSTEMR fails. */
+
+ if (wantz) {
+ *(unsigned char *)order = 'B';
+ } else {
+ *(unsigned char *)order = 'E';
+ }
+ sstebz_(range, order, n, &vll, &vuu, il, iu, &abstll, &rwork[indrd], &
+ rwork[indre], m, &nsplit, &w[1], &iwork[indibl], &iwork[indisp], &
+ rwork[indrwk], &iwork[indiwo], info);
+
+ if (wantz) {
+ cstein_(n, &rwork[indrd], &rwork[indre], m, &w[1], &iwork[indibl], &
+ iwork[indisp], &z__[z_offset], ldz, &rwork[indrwk], &iwork[
+ indiwo], &iwork[indifl], info);
+
+/* Apply unitary matrix used in reduction to tridiagonal */
+/* form to eigenvectors returned by CSTEIN. */
+
+ indwkn = indwk;
+ llwrkn = *lwork - indwkn + 1;
+ cunmtr_("L", uplo, "N", n, m, &a[a_offset], lda, &work[indtau], &z__[
+ z_offset], ldz, &work[indwkn], &llwrkn, &iinfo);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+L30:
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *m;
+ } else {
+ imax = *info - 1;
+ }
+ r__1 = 1.f / sigma;
+ sscal_(&imax, &r__1, &w[1], &c__1);
+ }
+
+/* If eigenvalues are not in order, then sort them, along with */
+/* eigenvectors. */
+
+ if (wantz) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp1 = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp1) {
+ i__ = jj;
+ tmp1 = w[jj];
+ }
+/* L40: */
+ }
+
+ if (i__ != 0) {
+ itmp1 = iwork[indibl + i__ - 1];
+ w[i__] = w[j];
+ iwork[indibl + i__ - 1] = iwork[indibl + j - 1];
+ w[j] = tmp1;
+ iwork[indibl + j - 1] = itmp1;
+ cswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1],
+ &c__1);
+ }
+/* L50: */
+ }
+ }
+
+/* Set WORK(1) to optimal workspace size. */
+
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ rwork[1] = (real) lrwmin;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of CHEEVR */
+
+} /* cheevr_ */
diff --git a/contrib/libs/clapack/cheevx.c b/contrib/libs/clapack/cheevx.c
new file mode 100644
index 0000000000..3381e87283
--- /dev/null
+++ b/contrib/libs/clapack/cheevx.c
@@ -0,0 +1,542 @@
+/* cheevx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int cheevx_(char *jobz, char *range, char *uplo, integer *n,
+ complex *a, integer *lda, real *vl, real *vu, integer *il, integer *
+ iu, real *abstol, integer *m, real *w, complex *z__, integer *ldz,
+ complex *work, integer *lwork, real *rwork, integer *iwork, integer *
+ ifail, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, z_dim1, z_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, nb, jj;
+ real eps, vll, vuu, tmp1;
+ integer indd, inde;
+ real anrm;
+ integer imax;
+ real rmin, rmax;
+ logical test;
+ integer itmp1, indee;
+ real sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ char order[1];
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ logical lower;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ logical wantz;
+ extern doublereal clanhe_(char *, char *, integer *, complex *, integer *,
+ real *);
+ logical alleig, indeig;
+ integer iscale, indibl;
+ logical valeig;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int chetrd_(char *, integer *, complex *, integer
+ *, real *, real *, complex *, complex *, integer *, integer *), csscal_(integer *, real *, complex *, integer *),
+ clacpy_(char *, integer *, integer *, complex *, integer *,
+ complex *, integer *);
+ real safmin;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real abstll, bignum;
+ integer indiwk, indisp, indtau;
+ extern /* Subroutine */ int cstein_(integer *, real *, real *, integer *,
+ real *, integer *, integer *, complex *, integer *, real *,
+ integer *, integer *, integer *);
+ integer indrwk, indwrk, lwkmin;
+ extern /* Subroutine */ int csteqr_(char *, integer *, real *, real *,
+ complex *, integer *, real *, integer *), cungtr_(char *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ integer *), ssterf_(integer *, real *, real *, integer *),
+ cunmtr_(char *, char *, char *, integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ integer *);
+ integer nsplit, llwork;
+ real smlnum;
+ extern /* Subroutine */ int sstebz_(char *, char *, integer *, real *,
+ real *, integer *, integer *, real *, real *, real *, integer *,
+ integer *, real *, integer *, integer *, real *, integer *,
+ integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHEEVX computes selected eigenvalues and, optionally, eigenvectors */
+/* of a complex Hermitian matrix A. Eigenvalues and eigenvectors can */
+/* be selected by specifying either a range of values or a range of */
+/* indices for the desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA, N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+/* On exit, the lower triangle (if UPLO='L') or the upper */
+/* triangle (if UPLO='U') of A, including the diagonal, is */
+/* destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* VL (input) REAL */
+/* VU (input) REAL */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) REAL */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing A to tridiagonal form. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*SLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*SLAMCH('S'). */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices */
+/* with Guaranteed High Relative Accuracy," by Demmel and */
+/* Kahan, LAPACK Working Note #3. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) REAL array, dimension (N) */
+/* On normal exit, the first M elements contain the selected */
+/* eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX array, dimension (LDZ, max(1,M)) */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If an eigenvector fails to converge, then that column of Z */
+/* contains the latest approximation to the eigenvector, and the */
+/* index of the eigenvector is returned in IFAIL. */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= 1, when N <= 1; */
+/* otherwise 2*N. */
+/* For optimal efficiency, LWORK >= (NB+1)*N, */
+/* where NB is the max of the blocksize for CHETRD and for */
+/* CUNMTR as returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) REAL array, dimension (7*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+
+/* IFAIL (output) INTEGER array, dimension (N) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvectors that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, then i eigenvectors failed to converge. */
+/* Their indices are stored in array IFAIL. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ lower = lsame_(uplo, "L");
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+ lquery = *lwork == -1;
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -8;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -9;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -10;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -15;
+ }
+ }
+
+ if (*info == 0) {
+ if (*n <= 1) {
+ lwkmin = 1;
+ work[1].r = (real) lwkmin, work[1].i = 0.f;
+ } else {
+ lwkmin = *n << 1;
+ nb = ilaenv_(&c__1, "CHETRD", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__1, "CUNMTR", uplo, n, &c_n1, &c_n1,
+ &c_n1);
+ nb = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = 1, i__2 = (nb + 1) * *n;
+ lwkopt = max(i__1,i__2);
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ }
+
+ if (*lwork < lwkmin && ! lquery) {
+ *info = -17;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHEEVX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (alleig || indeig) {
+ *m = 1;
+ i__1 = a_dim1 + 1;
+ w[1] = a[i__1].r;
+ } else if (valeig) {
+ i__1 = a_dim1 + 1;
+ i__2 = a_dim1 + 1;
+ if (*vl < a[i__1].r && *vu >= a[i__2].r) {
+ *m = 1;
+ i__1 = a_dim1 + 1;
+ w[1] = a[i__1].r;
+ }
+ }
+ if (wantz) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1.f, z__[i__1].i = 0.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+/* Computing MIN */
+ r__1 = sqrt(bignum), r__2 = 1.f / sqrt(sqrt(safmin));
+ rmax = dmin(r__1,r__2);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ iscale = 0;
+ abstll = *abstol;
+ if (valeig) {
+ vll = *vl;
+ vuu = *vu;
+ }
+ anrm = clanhe_("M", uplo, n, &a[a_offset], lda, &rwork[1]);
+ if (anrm > 0.f && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ if (lower) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j + 1;
+ csscal_(&i__2, &sigma, &a[j + j * a_dim1], &c__1);
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ csscal_(&j, &sigma, &a[j * a_dim1 + 1], &c__1);
+/* L20: */
+ }
+ }
+ if (*abstol > 0.f) {
+ abstll = *abstol * sigma;
+ }
+ if (valeig) {
+ vll = *vl * sigma;
+ vuu = *vu * sigma;
+ }
+ }
+
+/* Call CHETRD to reduce Hermitian matrix to tridiagonal form. */
+
+ indd = 1;
+ inde = indd + *n;
+ indrwk = inde + *n;
+ indtau = 1;
+ indwrk = indtau + *n;
+ llwork = *lwork - indwrk + 1;
+ chetrd_(uplo, n, &a[a_offset], lda, &rwork[indd], &rwork[inde], &work[
+ indtau], &work[indwrk], &llwork, &iinfo);
+
+/* If all eigenvalues are desired and ABSTOL is less than or equal to */
+/* zero, then call SSTERF or CUNGTR and CSTEQR. If this fails for */
+/* some eigenvalue, then try SSTEBZ. */
+
+ test = FALSE_;
+ if (indeig) {
+ if (*il == 1 && *iu == *n) {
+ test = TRUE_;
+ }
+ }
+ if ((alleig || test) && *abstol <= 0.f) {
+ scopy_(n, &rwork[indd], &c__1, &w[1], &c__1);
+ indee = indrwk + (*n << 1);
+ if (! wantz) {
+ i__1 = *n - 1;
+ scopy_(&i__1, &rwork[inde], &c__1, &rwork[indee], &c__1);
+ ssterf_(n, &w[1], &rwork[indee], info);
+ } else {
+ clacpy_("A", n, n, &a[a_offset], lda, &z__[z_offset], ldz);
+ cungtr_(uplo, n, &z__[z_offset], ldz, &work[indtau], &work[indwrk]
+, &llwork, &iinfo);
+ i__1 = *n - 1;
+ scopy_(&i__1, &rwork[inde], &c__1, &rwork[indee], &c__1);
+ csteqr_(jobz, n, &w[1], &rwork[indee], &z__[z_offset], ldz, &
+ rwork[indrwk], info);
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ifail[i__] = 0;
+/* L30: */
+ }
+ }
+ }
+ if (*info == 0) {
+ *m = *n;
+ goto L40;
+ }
+ *info = 0;
+ }
+
+/* Otherwise, call SSTEBZ and, if eigenvectors are desired, CSTEIN. */
+
+ if (wantz) {
+ *(unsigned char *)order = 'B';
+ } else {
+ *(unsigned char *)order = 'E';
+ }
+ indibl = 1;
+ indisp = indibl + *n;
+ indiwk = indisp + *n;
+ sstebz_(range, order, n, &vll, &vuu, il, iu, &abstll, &rwork[indd], &
+ rwork[inde], m, &nsplit, &w[1], &iwork[indibl], &iwork[indisp], &
+ rwork[indrwk], &iwork[indiwk], info);
+
+ if (wantz) {
+ cstein_(n, &rwork[indd], &rwork[inde], m, &w[1], &iwork[indibl], &
+ iwork[indisp], &z__[z_offset], ldz, &rwork[indrwk], &iwork[
+ indiwk], &ifail[1], info);
+
+/* Apply unitary matrix used in reduction to tridiagonal */
+/* form to eigenvectors returned by CSTEIN. */
+
+ cunmtr_("L", uplo, "N", n, m, &a[a_offset], lda, &work[indtau], &z__[
+ z_offset], ldz, &work[indwrk], &llwork, &iinfo);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+L40:
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *m;
+ } else {
+ imax = *info - 1;
+ }
+ r__1 = 1.f / sigma;
+ sscal_(&imax, &r__1, &w[1], &c__1);
+ }
+
+/* If eigenvalues are not in order, then sort them, along with */
+/* eigenvectors. */
+
+ if (wantz) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp1 = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp1) {
+ i__ = jj;
+ tmp1 = w[jj];
+ }
+/* L50: */
+ }
+
+ if (i__ != 0) {
+ itmp1 = iwork[indibl + i__ - 1];
+ w[i__] = w[j];
+ iwork[indibl + i__ - 1] = iwork[indibl + j - 1];
+ w[j] = tmp1;
+ iwork[indibl + j - 1] = itmp1;
+ cswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1],
+ &c__1);
+ if (*info != 0) {
+ itmp1 = ifail[i__];
+ ifail[i__] = ifail[j];
+ ifail[j] = itmp1;
+ }
+ }
+/* L60: */
+ }
+ }
+
+/* Set WORK(1) to optimal complex workspace size. */
+
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CHEEVX */
+
+} /* cheevx_ */
diff --git a/contrib/libs/clapack/chegs2.c b/contrib/libs/clapack/chegs2.c
new file mode 100644
index 0000000000..198841829b
--- /dev/null
+++ b/contrib/libs/clapack/chegs2.c
@@ -0,0 +1,334 @@
+/* chegs2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int chegs2_(integer *itype, char *uplo, integer *n, complex *
+ a, integer *lda, complex *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+ real r__1, r__2;
+ complex q__1;
+
+ /* Local variables */
+ integer k;
+ complex ct;
+ real akk, bkk;
+ extern /* Subroutine */ int cher2_(char *, integer *, complex *, complex *
+, integer *, complex *, integer *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int caxpy_(integer *, complex *, complex *,
+ integer *, complex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int ctrmv_(char *, char *, char *, integer *,
+ complex *, integer *, complex *, integer *), ctrsv_(char *, char *, char *, integer *, complex *,
+ integer *, complex *, integer *), clacgv_(
+ integer *, complex *, integer *), csscal_(integer *, real *,
+ complex *, integer *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHEGS2 reduces a complex Hermitian-definite generalized */
+/* eigenproblem to standard form. */
+
+/* If ITYPE = 1, the problem is A*x = lambda*B*x, */
+/* and A is overwritten by inv(U')*A*inv(U) or inv(L)*A*inv(L') */
+
+/* If ITYPE = 2 or 3, the problem is A*B*x = lambda*x or */
+/* B*A*x = lambda*x, and A is overwritten by U*A*U` or L'*A*L. */
+
+/* B must have been previously factorized as U'*U or L*L' by CPOTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* = 1: compute inv(U')*A*inv(U) or inv(L)*A*inv(L'); */
+/* = 2 or 3: compute U*A*U' or L'*A*L. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* Hermitian matrix A is stored, and how B has been factorized. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* n by n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the transformed matrix, stored in the */
+/* same format as A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input) COMPLEX array, dimension (LDB,N) */
+/* The triangular factor from the Cholesky factorization of B, */
+/* as returned by CPOTRF. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHEGS2", &i__1);
+ return 0;
+ }
+
+ if (*itype == 1) {
+ if (upper) {
+
+/* Compute inv(U')*A*inv(U) */
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+
+/* Update the upper triangle of A(k:n,k:n) */
+
+ i__2 = k + k * a_dim1;
+ akk = a[i__2].r;
+ i__2 = k + k * b_dim1;
+ bkk = b[i__2].r;
+/* Computing 2nd power */
+ r__1 = bkk;
+ akk /= r__1 * r__1;
+ i__2 = k + k * a_dim1;
+ a[i__2].r = akk, a[i__2].i = 0.f;
+ if (k < *n) {
+ i__2 = *n - k;
+ r__1 = 1.f / bkk;
+ csscal_(&i__2, &r__1, &a[k + (k + 1) * a_dim1], lda);
+ r__1 = akk * -.5f;
+ ct.r = r__1, ct.i = 0.f;
+ i__2 = *n - k;
+ clacgv_(&i__2, &a[k + (k + 1) * a_dim1], lda);
+ i__2 = *n - k;
+ clacgv_(&i__2, &b[k + (k + 1) * b_dim1], ldb);
+ i__2 = *n - k;
+ caxpy_(&i__2, &ct, &b[k + (k + 1) * b_dim1], ldb, &a[k + (
+ k + 1) * a_dim1], lda);
+ i__2 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cher2_(uplo, &i__2, &q__1, &a[k + (k + 1) * a_dim1], lda,
+ &b[k + (k + 1) * b_dim1], ldb, &a[k + 1 + (k + 1)
+ * a_dim1], lda);
+ i__2 = *n - k;
+ caxpy_(&i__2, &ct, &b[k + (k + 1) * b_dim1], ldb, &a[k + (
+ k + 1) * a_dim1], lda);
+ i__2 = *n - k;
+ clacgv_(&i__2, &b[k + (k + 1) * b_dim1], ldb);
+ i__2 = *n - k;
+ ctrsv_(uplo, "Conjugate transpose", "Non-unit", &i__2, &b[
+ k + 1 + (k + 1) * b_dim1], ldb, &a[k + (k + 1) *
+ a_dim1], lda);
+ i__2 = *n - k;
+ clacgv_(&i__2, &a[k + (k + 1) * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute inv(L)*A*inv(L') */
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+
+/* Update the lower triangle of A(k:n,k:n) */
+
+ i__2 = k + k * a_dim1;
+ akk = a[i__2].r;
+ i__2 = k + k * b_dim1;
+ bkk = b[i__2].r;
+/* Computing 2nd power */
+ r__1 = bkk;
+ akk /= r__1 * r__1;
+ i__2 = k + k * a_dim1;
+ a[i__2].r = akk, a[i__2].i = 0.f;
+ if (k < *n) {
+ i__2 = *n - k;
+ r__1 = 1.f / bkk;
+ csscal_(&i__2, &r__1, &a[k + 1 + k * a_dim1], &c__1);
+ r__1 = akk * -.5f;
+ ct.r = r__1, ct.i = 0.f;
+ i__2 = *n - k;
+ caxpy_(&i__2, &ct, &b[k + 1 + k * b_dim1], &c__1, &a[k +
+ 1 + k * a_dim1], &c__1);
+ i__2 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cher2_(uplo, &i__2, &q__1, &a[k + 1 + k * a_dim1], &c__1,
+ &b[k + 1 + k * b_dim1], &c__1, &a[k + 1 + (k + 1)
+ * a_dim1], lda);
+ i__2 = *n - k;
+ caxpy_(&i__2, &ct, &b[k + 1 + k * b_dim1], &c__1, &a[k +
+ 1 + k * a_dim1], &c__1);
+ i__2 = *n - k;
+ ctrsv_(uplo, "No transpose", "Non-unit", &i__2, &b[k + 1
+ + (k + 1) * b_dim1], ldb, &a[k + 1 + k * a_dim1],
+ &c__1);
+ }
+/* L20: */
+ }
+ }
+ } else {
+ if (upper) {
+
+/* Compute U*A*U' */
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+
+/* Update the upper triangle of A(1:k,1:k) */
+
+ i__2 = k + k * a_dim1;
+ akk = a[i__2].r;
+ i__2 = k + k * b_dim1;
+ bkk = b[i__2].r;
+ i__2 = k - 1;
+ ctrmv_(uplo, "No transpose", "Non-unit", &i__2, &b[b_offset],
+ ldb, &a[k * a_dim1 + 1], &c__1);
+ r__1 = akk * .5f;
+ ct.r = r__1, ct.i = 0.f;
+ i__2 = k - 1;
+ caxpy_(&i__2, &ct, &b[k * b_dim1 + 1], &c__1, &a[k * a_dim1 +
+ 1], &c__1);
+ i__2 = k - 1;
+ cher2_(uplo, &i__2, &c_b1, &a[k * a_dim1 + 1], &c__1, &b[k *
+ b_dim1 + 1], &c__1, &a[a_offset], lda);
+ i__2 = k - 1;
+ caxpy_(&i__2, &ct, &b[k * b_dim1 + 1], &c__1, &a[k * a_dim1 +
+ 1], &c__1);
+ i__2 = k - 1;
+ csscal_(&i__2, &bkk, &a[k * a_dim1 + 1], &c__1);
+ i__2 = k + k * a_dim1;
+/* Computing 2nd power */
+ r__2 = bkk;
+ r__1 = akk * (r__2 * r__2);
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+/* L30: */
+ }
+ } else {
+
+/* Compute L'*A*L */
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+
+/* Update the lower triangle of A(1:k,1:k) */
+
+ i__2 = k + k * a_dim1;
+ akk = a[i__2].r;
+ i__2 = k + k * b_dim1;
+ bkk = b[i__2].r;
+ i__2 = k - 1;
+ clacgv_(&i__2, &a[k + a_dim1], lda);
+ i__2 = k - 1;
+ ctrmv_(uplo, "Conjugate transpose", "Non-unit", &i__2, &b[
+ b_offset], ldb, &a[k + a_dim1], lda);
+ r__1 = akk * .5f;
+ ct.r = r__1, ct.i = 0.f;
+ i__2 = k - 1;
+ clacgv_(&i__2, &b[k + b_dim1], ldb);
+ i__2 = k - 1;
+ caxpy_(&i__2, &ct, &b[k + b_dim1], ldb, &a[k + a_dim1], lda);
+ i__2 = k - 1;
+ cher2_(uplo, &i__2, &c_b1, &a[k + a_dim1], lda, &b[k + b_dim1]
+, ldb, &a[a_offset], lda);
+ i__2 = k - 1;
+ caxpy_(&i__2, &ct, &b[k + b_dim1], ldb, &a[k + a_dim1], lda);
+ i__2 = k - 1;
+ clacgv_(&i__2, &b[k + b_dim1], ldb);
+ i__2 = k - 1;
+ csscal_(&i__2, &bkk, &a[k + a_dim1], lda);
+ i__2 = k - 1;
+ clacgv_(&i__2, &a[k + a_dim1], lda);
+ i__2 = k + k * a_dim1;
+/* Computing 2nd power */
+ r__2 = bkk;
+ r__1 = akk * (r__2 * r__2);
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+/* L40: */
+ }
+ }
+ }
+ return 0;
+
+/* End of CHEGS2 */
+
+} /* chegs2_ */
diff --git a/contrib/libs/clapack/chegst.c b/contrib/libs/clapack/chegst.c
new file mode 100644
index 0000000000..783d0ace20
--- /dev/null
+++ b/contrib/libs/clapack/chegst.c
@@ -0,0 +1,350 @@
+/* chegst.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static complex c_b2 = {.5f,0.f};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static real c_b18 = 1.f;
+
+/* Subroutine */ int chegst_(integer *itype, char *uplo, integer *n, complex *
+ a, integer *lda, complex *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Local variables */
+ integer k, kb, nb;
+ extern /* Subroutine */ int chemm_(char *, char *, integer *, integer *,
+ complex *, complex *, integer *, complex *, integer *, complex *,
+ complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ctrmm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *), ctrsm_(char *, char *,
+ char *, char *, integer *, integer *, complex *, complex *,
+ integer *, complex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int chegs2_(integer *, char *, integer *, complex
+ *, integer *, complex *, integer *, integer *), cher2k_(
+ char *, char *, integer *, integer *, complex *, complex *,
+ integer *, complex *, integer *, real *, complex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHEGST reduces a complex Hermitian-definite generalized */
+/* eigenproblem to standard form. */
+
+/* If ITYPE = 1, the problem is A*x = lambda*B*x, */
+/* and A is overwritten by inv(U**H)*A*inv(U) or inv(L)*A*inv(L**H) */
+
+/* If ITYPE = 2 or 3, the problem is A*B*x = lambda*x or */
+/* B*A*x = lambda*x, and A is overwritten by U*A*U**H or L**H*A*L. */
+
+/* B must have been previously factorized as U**H*U or L*L**H by CPOTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* = 1: compute inv(U**H)*A*inv(U) or inv(L)*A*inv(L**H); */
+/* = 2 or 3: compute U*A*U**H or L**H*A*L. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored and B is factored as */
+/* U**H*U; */
+/* = 'L': Lower triangle of A is stored and B is factored as */
+/* L*L**H. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the transformed matrix, stored in the */
+/* same format as A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input) COMPLEX array, dimension (LDB,N) */
+/* The triangular factor from the Cholesky factorization of B, */
+/* as returned by CPOTRF. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHEGST", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "CHEGST", uplo, n, &c_n1, &c_n1, &c_n1);
+
+ if (nb <= 1 || nb >= *n) {
+
+/* Use unblocked code */
+
+ chegs2_(itype, uplo, n, &a[a_offset], lda, &b[b_offset], ldb, info);
+ } else {
+
+/* Use blocked code */
+
+ if (*itype == 1) {
+ if (upper) {
+
+/* Compute inv(U')*A*inv(U) */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (k = 1; i__2 < 0 ? k >= i__1 : k <= i__1; k += i__2) {
+/* Computing MIN */
+ i__3 = *n - k + 1;
+ kb = min(i__3,nb);
+
+/* Update the upper triangle of A(k:n,k:n) */
+
+ chegs2_(itype, uplo, &kb, &a[k + k * a_dim1], lda, &b[k +
+ k * b_dim1], ldb, info);
+ if (k + kb <= *n) {
+ i__3 = *n - k - kb + 1;
+ ctrsm_("Left", uplo, "Conjugate transpose", "Non-unit"
+, &kb, &i__3, &c_b1, &b[k + k * b_dim1], ldb,
+ &a[k + (k + kb) * a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ q__1.r = -.5f, q__1.i = -0.f;
+ chemm_("Left", uplo, &kb, &i__3, &q__1, &a[k + k *
+ a_dim1], lda, &b[k + (k + kb) * b_dim1], ldb,
+ &c_b1, &a[k + (k + kb) * a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cher2k_(uplo, "Conjugate transpose", &i__3, &kb, &
+ q__1, &a[k + (k + kb) * a_dim1], lda, &b[k + (
+ k + kb) * b_dim1], ldb, &c_b18, &a[k + kb + (
+ k + kb) * a_dim1], lda)
+ ;
+ i__3 = *n - k - kb + 1;
+ q__1.r = -.5f, q__1.i = -0.f;
+ chemm_("Left", uplo, &kb, &i__3, &q__1, &a[k + k *
+ a_dim1], lda, &b[k + (k + kb) * b_dim1], ldb,
+ &c_b1, &a[k + (k + kb) * a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ ctrsm_("Right", uplo, "No transpose", "Non-unit", &kb,
+ &i__3, &c_b1, &b[k + kb + (k + kb) * b_dim1],
+ ldb, &a[k + (k + kb) * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute inv(L)*A*inv(L') */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (k = 1; i__1 < 0 ? k >= i__2 : k <= i__2; k += i__1) {
+/* Computing MIN */
+ i__3 = *n - k + 1;
+ kb = min(i__3,nb);
+
+/* Update the lower triangle of A(k:n,k:n) */
+
+ chegs2_(itype, uplo, &kb, &a[k + k * a_dim1], lda, &b[k +
+ k * b_dim1], ldb, info);
+ if (k + kb <= *n) {
+ i__3 = *n - k - kb + 1;
+ ctrsm_("Right", uplo, "Conjugate transpose", "Non-un"
+ "it", &i__3, &kb, &c_b1, &b[k + k * b_dim1],
+ ldb, &a[k + kb + k * a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ q__1.r = -.5f, q__1.i = -0.f;
+ chemm_("Right", uplo, &i__3, &kb, &q__1, &a[k + k *
+ a_dim1], lda, &b[k + kb + k * b_dim1], ldb, &
+ c_b1, &a[k + kb + k * a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cher2k_(uplo, "No transpose", &i__3, &kb, &q__1, &a[k
+ + kb + k * a_dim1], lda, &b[k + kb + k *
+ b_dim1], ldb, &c_b18, &a[k + kb + (k + kb) *
+ a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ q__1.r = -.5f, q__1.i = -0.f;
+ chemm_("Right", uplo, &i__3, &kb, &q__1, &a[k + k *
+ a_dim1], lda, &b[k + kb + k * b_dim1], ldb, &
+ c_b1, &a[k + kb + k * a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ ctrsm_("Left", uplo, "No transpose", "Non-unit", &
+ i__3, &kb, &c_b1, &b[k + kb + (k + kb) *
+ b_dim1], ldb, &a[k + kb + k * a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+ } else {
+ if (upper) {
+
+/* Compute U*A*U' */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (k = 1; i__2 < 0 ? k >= i__1 : k <= i__1; k += i__2) {
+/* Computing MIN */
+ i__3 = *n - k + 1;
+ kb = min(i__3,nb);
+
+/* Update the upper triangle of A(1:k+kb-1,1:k+kb-1) */
+
+ i__3 = k - 1;
+ ctrmm_("Left", uplo, "No transpose", "Non-unit", &i__3, &
+ kb, &c_b1, &b[b_offset], ldb, &a[k * a_dim1 + 1],
+ lda);
+ i__3 = k - 1;
+ chemm_("Right", uplo, &i__3, &kb, &c_b2, &a[k + k *
+ a_dim1], lda, &b[k * b_dim1 + 1], ldb, &c_b1, &a[
+ k * a_dim1 + 1], lda);
+ i__3 = k - 1;
+ cher2k_(uplo, "No transpose", &i__3, &kb, &c_b1, &a[k *
+ a_dim1 + 1], lda, &b[k * b_dim1 + 1], ldb, &c_b18,
+ &a[a_offset], lda);
+ i__3 = k - 1;
+ chemm_("Right", uplo, &i__3, &kb, &c_b2, &a[k + k *
+ a_dim1], lda, &b[k * b_dim1 + 1], ldb, &c_b1, &a[
+ k * a_dim1 + 1], lda);
+ i__3 = k - 1;
+ ctrmm_("Right", uplo, "Conjugate transpose", "Non-unit", &
+ i__3, &kb, &c_b1, &b[k + k * b_dim1], ldb, &a[k *
+ a_dim1 + 1], lda);
+ chegs2_(itype, uplo, &kb, &a[k + k * a_dim1], lda, &b[k +
+ k * b_dim1], ldb, info);
+/* L30: */
+ }
+ } else {
+
+/* Compute L'*A*L */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (k = 1; i__1 < 0 ? k >= i__2 : k <= i__2; k += i__1) {
+/* Computing MIN */
+ i__3 = *n - k + 1;
+ kb = min(i__3,nb);
+
+/* Update the lower triangle of A(1:k+kb-1,1:k+kb-1) */
+
+ i__3 = k - 1;
+ ctrmm_("Right", uplo, "No transpose", "Non-unit", &kb, &
+ i__3, &c_b1, &b[b_offset], ldb, &a[k + a_dim1],
+ lda);
+ i__3 = k - 1;
+ chemm_("Left", uplo, &kb, &i__3, &c_b2, &a[k + k * a_dim1]
+, lda, &b[k + b_dim1], ldb, &c_b1, &a[k + a_dim1],
+ lda);
+ i__3 = k - 1;
+ cher2k_(uplo, "Conjugate transpose", &i__3, &kb, &c_b1, &
+ a[k + a_dim1], lda, &b[k + b_dim1], ldb, &c_b18, &
+ a[a_offset], lda);
+ i__3 = k - 1;
+ chemm_("Left", uplo, &kb, &i__3, &c_b2, &a[k + k * a_dim1]
+, lda, &b[k + b_dim1], ldb, &c_b1, &a[k + a_dim1],
+ lda);
+ i__3 = k - 1;
+ ctrmm_("Left", uplo, "Conjugate transpose", "Non-unit", &
+ kb, &i__3, &c_b1, &b[k + k * b_dim1], ldb, &a[k +
+ a_dim1], lda);
+ chegs2_(itype, uplo, &kb, &a[k + k * a_dim1], lda, &b[k +
+ k * b_dim1], ldb, info);
+/* L40: */
+ }
+ }
+ }
+ }
+ return 0;
+
+/* End of CHEGST */
+
+} /* chegst_ */
diff --git a/contrib/libs/clapack/chegv.c b/contrib/libs/clapack/chegv.c
new file mode 100644
index 0000000000..0888e89b3f
--- /dev/null
+++ b/contrib/libs/clapack/chegv.c
@@ -0,0 +1,286 @@
+/* chegv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int chegv_(integer *itype, char *jobz, char *uplo, integer *
+ n, complex *a, integer *lda, complex *b, integer *ldb, real *w,
+ complex *work, integer *lwork, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+
+ /* Local variables */
+ integer nb, neig;
+ extern /* Subroutine */ int cheev_(char *, char *, integer *, complex *,
+ integer *, real *, complex *, integer *, real *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ctrmm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *);
+ char trans[1];
+ extern /* Subroutine */ int ctrsm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *);
+ logical upper, wantz;
+ extern /* Subroutine */ int chegst_(integer *, char *, integer *, complex
+ *, integer *, complex *, integer *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), cpotrf_(
+ char *, integer *, complex *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHEGV computes all the eigenvalues, and optionally, the eigenvectors */
+/* of a complex generalized Hermitian-definite eigenproblem, of the form */
+/* A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x. */
+/* Here A and B are assumed to be Hermitian and B is also */
+/* positive definite. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* Specifies the problem type to be solved: */
+/* = 1: A*x = (lambda)*B*x */
+/* = 2: A*B*x = (lambda)*x */
+/* = 3: B*A*x = (lambda)*x */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA, N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+
+/* On exit, if JOBZ = 'V', then if INFO = 0, A contains the */
+/* matrix Z of eigenvectors. The eigenvectors are normalized */
+/* as follows: */
+/* if ITYPE = 1 or 2, Z**H*B*Z = I; */
+/* if ITYPE = 3, Z**H*inv(B)*Z = I. */
+/* If JOBZ = 'N', then on exit the upper triangle (if UPLO='U') */
+/* or the lower triangle (if UPLO='L') of A, including the */
+/* diagonal, is destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX array, dimension (LDB, N) */
+/* On entry, the Hermitian positive definite matrix B. */
+/* If UPLO = 'U', the leading N-by-N upper triangular part of B */
+/* contains the upper triangular part of the matrix B. */
+/* If UPLO = 'L', the leading N-by-N lower triangular part of B */
+/* contains the lower triangular part of the matrix B. */
+
+/* On exit, if INFO <= N, the part of B containing the matrix is */
+/* overwritten by the triangular factor U or L from the Cholesky */
+/* factorization B = U**H*U or B = L*L**H. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= max(1,2*N-1). */
+/* For optimal efficiency, LWORK >= (NB+1)*N, */
+/* where NB is the blocksize for CHETRD returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) REAL array, dimension (max(1, 3*N-2)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: CPOTRF or CHEEV returned an error code: */
+/* <= N: if INFO = i, CHEEV failed to converge; */
+/* i off-diagonal elements of an intermediate */
+/* tridiagonal form did not converge to zero; */
+/* > N: if INFO = N + i, for 1 <= i <= N, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --w;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+
+ *info = 0;
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -2;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+
+ if (*info == 0) {
+ nb = ilaenv_(&c__1, "CHETRD", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = 1, i__2 = (nb + 1) * *n;
+ lwkopt = max(i__1,i__2);
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+/* Computing MAX */
+ i__1 = 1, i__2 = (*n << 1) - 1;
+ if (*lwork < max(i__1,i__2) && ! lquery) {
+ *info = -11;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHEGV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a Cholesky factorization of B. */
+
+ cpotrf_(uplo, n, &b[b_offset], ldb, info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem and solve. */
+
+ chegst_(itype, uplo, n, &a[a_offset], lda, &b[b_offset], ldb, info);
+ cheev_(jobz, uplo, n, &a[a_offset], lda, &w[1], &work[1], lwork, &rwork[1]
+, info);
+
+ if (wantz) {
+
+/* Backtransform eigenvectors to the original problem. */
+
+ neig = *n;
+ if (*info > 0) {
+ neig = *info - 1;
+ }
+ if (*itype == 1 || *itype == 2) {
+
+/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x; */
+/* backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'N';
+ } else {
+ *(unsigned char *)trans = 'C';
+ }
+
+ ctrsm_("Left", uplo, trans, "Non-unit", n, &neig, &c_b1, &b[
+ b_offset], ldb, &a[a_offset], lda);
+
+ } else if (*itype == 3) {
+
+/* For B*A*x=(lambda)*x; */
+/* backtransform eigenvectors: x = L*y or U'*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'C';
+ } else {
+ *(unsigned char *)trans = 'N';
+ }
+
+ ctrmm_("Left", uplo, trans, "Non-unit", n, &neig, &c_b1, &b[
+ b_offset], ldb, &a[a_offset], lda);
+ }
+ }
+
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CHEGV */
+
+} /* chegv_ */
diff --git a/contrib/libs/clapack/chegvd.c b/contrib/libs/clapack/chegvd.c
new file mode 100644
index 0000000000..5f19c37e1b
--- /dev/null
+++ b/contrib/libs/clapack/chegvd.c
@@ -0,0 +1,364 @@
+/* chegvd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+
+/* Subroutine */ int chegvd_(integer *itype, char *jobz, char *uplo, integer *
+ n, complex *a, integer *lda, complex *b, integer *ldb, real *w,
+ complex *work, integer *lwork, real *rwork, integer *lrwork, integer *
+ iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+ real r__1, r__2;
+
+ /* Local variables */
+ integer lopt;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ctrmm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *);
+ integer lwmin;
+ char trans[1];
+ integer liopt;
+ extern /* Subroutine */ int ctrsm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *);
+ logical upper;
+ integer lropt;
+ logical wantz;
+ extern /* Subroutine */ int cheevd_(char *, char *, integer *, complex *,
+ integer *, real *, complex *, integer *, real *, integer *,
+ integer *, integer *, integer *), chegst_(integer
+ *, char *, integer *, complex *, integer *, complex *, integer *,
+ integer *), xerbla_(char *, integer *), cpotrf_(
+ char *, integer *, complex *, integer *, integer *);
+ integer liwmin, lrwmin;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHEGVD computes all the eigenvalues, and optionally, the eigenvectors */
+/* of a complex generalized Hermitian-definite eigenproblem, of the form */
+/* A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x. Here A and */
+/* B are assumed to be Hermitian and B is also positive definite. */
+/* If eigenvectors are desired, it uses a divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* Specifies the problem type to be solved: */
+/* = 1: A*x = (lambda)*B*x */
+/* = 2: A*B*x = (lambda)*x */
+/* = 3: B*A*x = (lambda)*x */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA, N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+
+/* On exit, if JOBZ = 'V', then if INFO = 0, A contains the */
+/* matrix Z of eigenvectors. The eigenvectors are normalized */
+/* as follows: */
+/* if ITYPE = 1 or 2, Z**H*B*Z = I; */
+/* if ITYPE = 3, Z**H*inv(B)*Z = I. */
+/* If JOBZ = 'N', then on exit the upper triangle (if UPLO='U') */
+/* or the lower triangle (if UPLO='L') of A, including the */
+/* diagonal, is destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX array, dimension (LDB, N) */
+/* On entry, the Hermitian matrix B. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of B contains the */
+/* upper triangular part of the matrix B. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of B contains */
+/* the lower triangular part of the matrix B. */
+
+/* On exit, if INFO <= N, the part of B containing the matrix is */
+/* overwritten by the triangular factor U or L from the Cholesky */
+/* factorization B = U**H*U or B = L*L**H. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. */
+/* If N <= 1, LWORK >= 1. */
+/* If JOBZ = 'N' and N > 1, LWORK >= N + 1. */
+/* If JOBZ = 'V' and N > 1, LWORK >= 2*N + N**2. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal sizes of the WORK, RWORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* RWORK (workspace/output) REAL array, dimension (MAX(1,LRWORK)) */
+/* On exit, if INFO = 0, RWORK(1) returns the optimal LRWORK. */
+
+/* LRWORK (input) INTEGER */
+/* The dimension of the array RWORK. */
+/* If N <= 1, LRWORK >= 1. */
+/* If JOBZ = 'N' and N > 1, LRWORK >= N. */
+/* If JOBZ = 'V' and N > 1, LRWORK >= 1 + 5*N + 2*N**2. */
+
+/* If LRWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* If N <= 1, LIWORK >= 1. */
+/* If JOBZ = 'N' and N > 1, LIWORK >= 1. */
+/* If JOBZ = 'V' and N > 1, LIWORK >= 3 + 5*N. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: CPOTRF or CHEEVD returned an error code: */
+/* <= N: if INFO = i and JOBZ = 'N', then the algorithm */
+/* failed to converge; i off-diagonal elements of an */
+/* intermediate tridiagonal form did not converge to */
+/* zero; */
+/* if INFO = i and JOBZ = 'V', then the algorithm */
+/* failed to compute an eigenvalue while working on */
+/* the submatrix lying in rows and columns INFO/(N+1) */
+/* through mod(INFO,N+1); */
+/* > N: if INFO = N + i, for 1 <= i <= N, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* Modified so that no backsubstitution is performed if CHEEVD fails to */
+/* converge (NEIG in old code could be greater than N causing out of */
+/* bounds reference to A - reported by Ralf Meyer). Also corrected the */
+/* description of INFO and the test on ITYPE. Sven, 16 Feb 05. */
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --w;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1 || *lrwork == -1 || *liwork == -1;
+
+ *info = 0;
+ if (*n <= 1) {
+ lwmin = 1;
+ lrwmin = 1;
+ liwmin = 1;
+ } else if (wantz) {
+ lwmin = (*n << 1) + *n * *n;
+ lrwmin = *n * 5 + 1 + (*n << 1) * *n;
+ liwmin = *n * 5 + 3;
+ } else {
+ lwmin = *n + 1;
+ lrwmin = *n;
+ liwmin = 1;
+ }
+ lopt = lwmin;
+ lropt = lrwmin;
+ liopt = liwmin;
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -2;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+
+ if (*info == 0) {
+ work[1].r = (real) lopt, work[1].i = 0.f;
+ rwork[1] = (real) lropt;
+ iwork[1] = liopt;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -11;
+ } else if (*lrwork < lrwmin && ! lquery) {
+ *info = -13;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -15;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHEGVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a Cholesky factorization of B. */
+
+ cpotrf_(uplo, n, &b[b_offset], ldb, info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem and solve. */
+
+ chegst_(itype, uplo, n, &a[a_offset], lda, &b[b_offset], ldb, info);
+ cheevd_(jobz, uplo, n, &a[a_offset], lda, &w[1], &work[1], lwork, &rwork[
+ 1], lrwork, &iwork[1], liwork, info);
+/* Computing MAX */
+ r__1 = (real) lopt, r__2 = work[1].r;
+ lopt = dmax(r__1,r__2);
+/* Computing MAX */
+ r__1 = (real) lropt;
+ lropt = dmax(r__1,rwork[1]);
+/* Computing MAX */
+ r__1 = (real) liopt, r__2 = (real) iwork[1];
+ liopt = dmax(r__1,r__2);
+
+ if (wantz && *info == 0) {
+
+/* Backtransform eigenvectors to the original problem. */
+
+ if (*itype == 1 || *itype == 2) {
+
+/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x; */
+/* backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'N';
+ } else {
+ *(unsigned char *)trans = 'C';
+ }
+
+ ctrsm_("Left", uplo, trans, "Non-unit", n, n, &c_b1, &b[b_offset],
+ ldb, &a[a_offset], lda);
+
+ } else if (*itype == 3) {
+
+/* For B*A*x=(lambda)*x; */
+/* backtransform eigenvectors: x = L*y or U'*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'C';
+ } else {
+ *(unsigned char *)trans = 'N';
+ }
+
+ ctrmm_("Left", uplo, trans, "Non-unit", n, n, &c_b1, &b[b_offset],
+ ldb, &a[a_offset], lda);
+ }
+ }
+
+ work[1].r = (real) lopt, work[1].i = 0.f;
+ rwork[1] = (real) lropt;
+ iwork[1] = liopt;
+
+ return 0;
+
+/* End of CHEGVD */
+
+} /* chegvd_ */
diff --git a/contrib/libs/clapack/chegvx.c b/contrib/libs/clapack/chegvx.c
new file mode 100644
index 0000000000..89d91defd7
--- /dev/null
+++ b/contrib/libs/clapack/chegvx.c
@@ -0,0 +1,394 @@
+/* chegvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int chegvx_(integer *itype, char *jobz, char *range, char *
+ uplo, integer *n, complex *a, integer *lda, complex *b, integer *ldb,
+ real *vl, real *vu, integer *il, integer *iu, real *abstol, integer *
+ m, real *w, complex *z__, integer *ldz, complex *work, integer *lwork,
+ real *rwork, integer *iwork, integer *ifail, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, z_dim1, z_offset, i__1, i__2;
+
+ /* Local variables */
+ integer nb;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ctrmm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *);
+ char trans[1];
+ extern /* Subroutine */ int ctrsm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *);
+ logical upper, wantz, alleig, indeig, valeig;
+ extern /* Subroutine */ int chegst_(integer *, char *, integer *, complex
+ *, integer *, complex *, integer *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), cheevx_(
+ char *, char *, char *, integer *, complex *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, real *, complex *
+, integer *, complex *, integer *, real *, integer *, integer *,
+ integer *), cpotrf_(char *, integer *,
+ complex *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHEGVX computes selected eigenvalues, and optionally, eigenvectors */
+/* of a complex generalized Hermitian-definite eigenproblem, of the form */
+/* A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x. Here A and */
+/* B are assumed to be Hermitian and B is also positive definite. */
+/* Eigenvalues and eigenvectors can be selected by specifying either a */
+/* range of values or a range of indices for the desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* Specifies the problem type to be solved: */
+/* = 1: A*x = (lambda)*B*x */
+/* = 2: A*B*x = (lambda)*x */
+/* = 3: B*A*x = (lambda)*x */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+/* * */
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA, N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+
+/* On exit, the lower triangle (if UPLO='L') or the upper */
+/* triangle (if UPLO='U') of A, including the diagonal, is */
+/* destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX array, dimension (LDB, N) */
+/* On entry, the Hermitian matrix B. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of B contains the */
+/* upper triangular part of the matrix B. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of B contains */
+/* the lower triangular part of the matrix B. */
+
+/* On exit, if INFO <= N, the part of B containing the matrix is */
+/* overwritten by the triangular factor U or L from the Cholesky */
+/* factorization B = U**H*U or B = L*L**H. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* VL (input) REAL */
+/* VU (input) REAL */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) REAL */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing A to tridiagonal form. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*SLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*SLAMCH('S'). */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) REAL array, dimension (N) */
+/* The first M elements contain the selected */
+/* eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX array, dimension (LDZ, max(1,M)) */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* The eigenvectors are normalized as follows: */
+/* if ITYPE = 1 or 2, Z**T*B*Z = I; */
+/* if ITYPE = 3, Z**T*inv(B)*Z = I. */
+
+/* If an eigenvector fails to converge, then that column of Z */
+/* contains the latest approximation to the eigenvector, and the */
+/* index of the eigenvector is returned in IFAIL. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= max(1,2*N). */
+/* For optimal efficiency, LWORK >= (NB+1)*N, */
+/* where NB is the blocksize for CHETRD returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) REAL array, dimension (7*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+
+/* IFAIL (output) INTEGER array, dimension (N) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvectors that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: CPOTRF or CHEEVX returned an error code: */
+/* <= N: if INFO = i, CHEEVX failed to converge; */
+/* i eigenvectors failed to converge. Their indices */
+/* are stored in array IFAIL. */
+/* > N: if INFO = N + i, for 1 <= i <= N, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+ lquery = *lwork == -1;
+
+ *info = 0;
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -2;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -3;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -11;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -12;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -13;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -18;
+ }
+ }
+
+ if (*info == 0) {
+ nb = ilaenv_(&c__1, "CHETRD", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = 1, i__2 = (nb + 1) * *n;
+ lwkopt = max(i__1,i__2);
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+/* Computing MAX */
+ i__1 = 1, i__2 = *n << 1;
+ if (*lwork < max(i__1,i__2) && ! lquery) {
+ *info = -20;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHEGVX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a Cholesky factorization of B. */
+
+ cpotrf_(uplo, n, &b[b_offset], ldb, info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem and solve. */
+
+ chegst_(itype, uplo, n, &a[a_offset], lda, &b[b_offset], ldb, info);
+ cheevx_(jobz, range, uplo, n, &a[a_offset], lda, vl, vu, il, iu, abstol,
+ m, &w[1], &z__[z_offset], ldz, &work[1], lwork, &rwork[1], &iwork[
+ 1], &ifail[1], info);
+
+ if (wantz) {
+
+/* Backtransform eigenvectors to the original problem. */
+
+ if (*info > 0) {
+ *m = *info - 1;
+ }
+ if (*itype == 1 || *itype == 2) {
+
+/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x; */
+/* backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'N';
+ } else {
+ *(unsigned char *)trans = 'C';
+ }
+
+ ctrsm_("Left", uplo, trans, "Non-unit", n, m, &c_b1, &b[b_offset],
+ ldb, &z__[z_offset], ldz);
+
+ } else if (*itype == 3) {
+
+/* For B*A*x=(lambda)*x; */
+/* backtransform eigenvectors: x = L*y or U'*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'C';
+ } else {
+ *(unsigned char *)trans = 'N';
+ }
+
+ ctrmm_("Left", uplo, trans, "Non-unit", n, m, &c_b1, &b[b_offset],
+ ldb, &z__[z_offset], ldz);
+ }
+ }
+
+/* Set WORK(1) to optimal complex workspace size. */
+
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CHEGVX */
+
+} /* chegvx_ */
diff --git a/contrib/libs/clapack/cherfs.c b/contrib/libs/clapack/cherfs.c
new file mode 100644
index 0000000000..4811c600a5
--- /dev/null
+++ b/contrib/libs/clapack/cherfs.c
@@ -0,0 +1,472 @@
+/* cherfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int cherfs_(char *uplo, integer *n, integer *nrhs, complex *
+ a, integer *lda, complex *af, integer *ldaf, integer *ipiv, complex *
+ b, integer *ldb, complex *x, integer *ldx, real *ferr, real *berr,
+ complex *work, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ real s, xk;
+ integer nz;
+ real eps;
+ integer kase;
+ real safe1, safe2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int chemv_(char *, integer *, complex *, complex *
+, integer *, complex *, integer *, complex *, complex *, integer *
+);
+ integer isave[3];
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), caxpy_(integer *, complex *, complex *,
+ integer *, complex *, integer *);
+ integer count;
+ logical upper;
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *), chetrs_(
+ char *, integer *, integer *, complex *, integer *, integer *,
+ complex *, integer *, integer *);
+ real lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHERFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is Hermitian indefinite, and */
+/* provides error bounds and backward error estimates for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The Hermitian matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of A contains the upper triangular part */
+/* of the matrix A, and the strictly lower triangular part of A */
+/* is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of A contains the lower triangular part of */
+/* the matrix A, and the strictly upper triangular part of A is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input) COMPLEX array, dimension (LDAF,N) */
+/* The factored form of the matrix A. AF contains the block */
+/* diagonal matrix D and the multipliers used to obtain the */
+/* factor U or L from the factorization A = U*D*U**H or */
+/* A = L*D*L**H as computed by CHETRF. */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by CHETRF. */
+
+/* B (input) COMPLEX array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) COMPLEX array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by CHETRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldx < max(1,*n)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHERFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.f;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X */
+
+ ccopy_(n, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+ q__1.r = -1.f, q__1.i = -0.f;
+ chemv_(uplo, n, &q__1, &a[a_offset], lda, &x[j * x_dim1 + 1], &c__1, &
+ c_b1, &work[1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[i__] = (r__1 = b[i__3].r, dabs(r__1)) + (r__2 = r_imag(&b[
+ i__ + j * b_dim1]), dabs(r__2));
+/* L30: */
+ }
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ i__3 = k + j * x_dim1;
+ xk = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[k + j
+ * x_dim1]), dabs(r__2));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ rwork[i__] += ((r__1 = a[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + k * a_dim1]), dabs(r__2))) * xk;
+ i__4 = i__ + k * a_dim1;
+ i__5 = i__ + j * x_dim1;
+ s += ((r__1 = a[i__4].r, dabs(r__1)) + (r__2 = r_imag(&a[
+ i__ + k * a_dim1]), dabs(r__2))) * ((r__3 = x[
+ i__5].r, dabs(r__3)) + (r__4 = r_imag(&x[i__ + j *
+ x_dim1]), dabs(r__4)));
+/* L40: */
+ }
+ i__3 = k + k * a_dim1;
+ rwork[k] = rwork[k] + (r__1 = a[i__3].r, dabs(r__1)) * xk + s;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ i__3 = k + j * x_dim1;
+ xk = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[k + j
+ * x_dim1]), dabs(r__2));
+ i__3 = k + k * a_dim1;
+ rwork[k] += (r__1 = a[i__3].r, dabs(r__1)) * xk;
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ rwork[i__] += ((r__1 = a[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + k * a_dim1]), dabs(r__2))) * xk;
+ i__4 = i__ + k * a_dim1;
+ i__5 = i__ + j * x_dim1;
+ s += ((r__1 = a[i__4].r, dabs(r__1)) + (r__2 = r_imag(&a[
+ i__ + k * a_dim1]), dabs(r__2))) * ((r__3 = x[
+ i__5].r, dabs(r__3)) + (r__4 = r_imag(&x[i__ + j *
+ x_dim1]), dabs(r__4)));
+/* L60: */
+ }
+ rwork[k] += s;
+/* L70: */
+ }
+ }
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__3 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2))) / rwork[i__];
+ s = dmax(r__3,r__4);
+ } else {
+/* Computing MAX */
+ i__3 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = dmax(r__3,r__4);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2.f <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ chetrs_(uplo, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &work[1],
+ n, info);
+ caxpy_(n, &c_b1, &work[1], &c__1, &x[j * x_dim1 + 1], &c__1);
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use CLACN2 to estimate the infinity-norm of the matrix */
+/* inv(A) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__3 = i__;
+ rwork[i__] = (r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__];
+ } else {
+ i__3 = i__;
+ rwork[i__] = (r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__] + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ clacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(A'). */
+
+ chetrs_(uplo, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &work[
+ 1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ q__1.r = rwork[i__4] * work[i__5].r, q__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+/* L110: */
+ }
+ } else if (kase == 2) {
+
+/* Multiply by inv(A)*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ q__1.r = rwork[i__4] * work[i__5].r, q__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+/* L120: */
+ }
+ chetrs_(uplo, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &work[
+ 1], n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * x_dim1;
+ r__3 = lstres, r__4 = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[i__ + j * x_dim1]), dabs(r__2));
+ lstres = dmax(r__3,r__4);
+/* L130: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of CHERFS */
+
+} /* cherfs_ */
diff --git a/contrib/libs/clapack/chesv.c b/contrib/libs/clapack/chesv.c
new file mode 100644
index 0000000000..a0e4d1f0f4
--- /dev/null
+++ b/contrib/libs/clapack/chesv.c
@@ -0,0 +1,214 @@
+/* chesv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int chesv_(char *uplo, integer *n, integer *nrhs, complex *a,
+ integer *lda, integer *ipiv, complex *b, integer *ldb, complex *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ integer nb;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int chetrf_(char *, integer *, complex *, integer
+ *, integer *, complex *, integer *, integer *), xerbla_(
+ char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int chetrs_(char *, integer *, integer *, complex
+ *, integer *, integer *, complex *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHESV computes the solution to a complex system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N Hermitian matrix and X and B are N-by-NRHS */
+/* matrices. */
+
+/* The diagonal pivoting method is used to factor A as */
+/* A = U * D * U**H, if UPLO = 'U', or */
+/* A = L * D * L**H, if UPLO = 'L', */
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, and D is Hermitian and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. The factored form of A is then */
+/* used to solve the system of equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the block diagonal matrix D and the */
+/* multipliers used to obtain the factor U or L from the */
+/* factorization A = U*D*U**H or A = L*D*L**H as computed by */
+/* CHETRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D, as */
+/* determined by CHETRF. If IPIV(k) > 0, then rows and columns */
+/* k and IPIV(k) were interchanged, and D(k,k) is a 1-by-1 */
+/* diagonal block. If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, */
+/* then rows and columns k-1 and -IPIV(k) were interchanged and */
+/* D(k-1:k,k-1:k) is a 2-by-2 diagonal block. If UPLO = 'L' and */
+/* IPIV(k) = IPIV(k+1) < 0, then rows and columns k+1 and */
+/* -IPIV(k) were interchanged and D(k:k+1,k:k+1) is a 2-by-2 */
+/* diagonal block. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of WORK. LWORK >= 1, and for best performance */
+/* LWORK >= max(1,N*NB), where NB is the optimal blocksize for */
+/* CHETRF. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, so the solution could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*lwork < 1 && ! lquery) {
+ *info = -10;
+ }
+
+ if (*info == 0) {
+ if (*n == 0) {
+ lwkopt = 1;
+ } else {
+ nb = ilaenv_(&c__1, "CHETRF", uplo, n, &c_n1, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHESV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Compute the factorization A = U*D*U' or A = L*D*L'. */
+
+ chetrf_(uplo, n, &a[a_offset], lda, &ipiv[1], &work[1], lwork, info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ chetrs_(uplo, n, nrhs, &a[a_offset], lda, &ipiv[1], &b[b_offset], ldb,
+ info);
+
+ }
+
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CHESV */
+
+} /* chesv_ */
diff --git a/contrib/libs/clapack/chesvx.c b/contrib/libs/clapack/chesvx.c
new file mode 100644
index 0000000000..f0252377ff
--- /dev/null
+++ b/contrib/libs/clapack/chesvx.c
@@ -0,0 +1,368 @@
+/* chesvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int chesvx_(char *fact, char *uplo, integer *n, integer *
+ nrhs, complex *a, integer *lda, complex *af, integer *ldaf, integer *
+ ipiv, complex *b, integer *ldb, complex *x, integer *ldx, real *rcond,
+ real *ferr, real *berr, complex *work, integer *lwork, real *rwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2;
+
+ /* Local variables */
+ integer nb;
+ extern logical lsame_(char *, char *);
+ real anorm;
+ extern doublereal clanhe_(char *, char *, integer *, complex *, integer *,
+ real *);
+ extern /* Subroutine */ int checon_(char *, integer *, complex *, integer
+ *, integer *, real *, real *, complex *, integer *);
+ extern doublereal slamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int cherfs_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *, integer *, complex *, integer
+ *, complex *, integer *, real *, real *, complex *, real *,
+ integer *), chetrf_(char *, integer *, complex *, integer
+ *, integer *, complex *, integer *, integer *), clacpy_(
+ char *, integer *, integer *, complex *, integer *, complex *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), chetrs_(
+ char *, integer *, integer *, complex *, integer *, integer *,
+ complex *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHESVX uses the diagonal pivoting factorization to compute the */
+/* solution to a complex system of linear equations A * X = B, */
+/* where A is an N-by-N Hermitian matrix and X and B are N-by-NRHS */
+/* matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'N', the diagonal pivoting method is used to factor A. */
+/* The form of the factorization is */
+/* A = U * D * U**H, if UPLO = 'U', or */
+/* A = L * D * L**H, if UPLO = 'L', */
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, and D is Hermitian and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* 2. If some D(i,i)=0, so that D is exactly singular, then the routine */
+/* returns with INFO = i. Otherwise, the factored form of A is used */
+/* to estimate the condition number of the matrix A. If the */
+/* reciprocal of the condition number is less than machine precision, */
+/* INFO = N+1 is returned as a warning, but the routine still goes on */
+/* to solve for X and compute error bounds as described below. */
+
+/* 3. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 4. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of A has been */
+/* supplied on entry. */
+/* = 'F': On entry, AF and IPIV contain the factored form */
+/* of A. A, AF and IPIV will not be modified. */
+/* = 'N': The matrix A will be copied to AF and factored. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The Hermitian matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of A contains the upper triangular part */
+/* of the matrix A, and the strictly lower triangular part of A */
+/* is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of A contains the lower triangular part of */
+/* the matrix A, and the strictly upper triangular part of A is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input or output) COMPLEX array, dimension (LDAF,N) */
+/* If FACT = 'F', then AF is an input argument and on entry */
+/* contains the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**H or A = L*D*L**H as computed by CHETRF. */
+
+/* If FACT = 'N', then AF is an output argument and on exit */
+/* returns the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**H or A = L*D*L**H. */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* IPIV (input or output) INTEGER array, dimension (N) */
+/* If FACT = 'F', then IPIV is an input argument and on entry */
+/* contains details of the interchanges and the block structure */
+/* of D, as determined by CHETRF. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* If FACT = 'N', then IPIV is an output argument and on exit */
+/* contains details of the interchanges and the block structure */
+/* of D, as determined by CHETRF. */
+
+/* B (input) COMPLEX array, dimension (LDB,NRHS) */
+/* The N-by-NRHS right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) COMPLEX array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) REAL */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A. If RCOND is less than the machine precision (in */
+/* particular, if RCOND = 0), the matrix is singular to working */
+/* precision. This condition is indicated by a return code of */
+/* INFO > 0. */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of WORK. LWORK >= max(1,2*N), and for best */
+/* performance, when FACT = 'N', LWORK >= max(1,2*N,N*NB), where */
+/* NB is the optimal blocksize for CHETRF. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: D(i,i) is exactly zero. The factorization */
+/* has been completed but the factor D is exactly */
+/* singular, so the solution and error bounds could */
+/* not be computed. RCOND = 0 is returned. */
+/* = N+1: D is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ lquery = *lwork == -1;
+ if (! nofact && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! lsame_(uplo, "U") && ! lsame_(uplo,
+ "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -8;
+ } else if (*ldb < max(1,*n)) {
+ *info = -11;
+ } else if (*ldx < max(1,*n)) {
+ *info = -13;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n << 1;
+ if (*lwork < max(i__1,i__2) && ! lquery) {
+ *info = -18;
+ }
+ }
+
+ if (*info == 0) {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n << 1;
+ lwkopt = max(i__1,i__2);
+ if (nofact) {
+ nb = ilaenv_(&c__1, "CHETRF", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = *n * nb;
+ lwkopt = max(i__1,i__2);
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHESVX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+ if (nofact) {
+
+/* Compute the factorization A = U*D*U' or A = L*D*L'. */
+
+ clacpy_(uplo, n, n, &a[a_offset], lda, &af[af_offset], ldaf);
+ chetrf_(uplo, n, &af[af_offset], ldaf, &ipiv[1], &work[1], lwork,
+ info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.f;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = clanhe_("I", uplo, n, &a[a_offset], lda, &rwork[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ checon_(uplo, n, &af[af_offset], ldaf, &ipiv[1], &anorm, rcond, &work[1],
+ info);
+
+/* Compute the solution vectors X. */
+
+ clacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ chetrs_(uplo, n, nrhs, &af[af_offset], ldaf, &ipiv[1], &x[x_offset], ldx,
+ info);
+
+/* Use iterative refinement to improve the computed solutions and */
+/* compute error bounds and backward error estimates for them. */
+
+ cherfs_(uplo, n, nrhs, &a[a_offset], lda, &af[af_offset], ldaf, &ipiv[1],
+ &b[b_offset], ldb, &x[x_offset], ldx, &ferr[1], &berr[1], &work[1]
+, &rwork[1], info);
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < slamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CHESVX */
+
+} /* chesvx_ */
diff --git a/contrib/libs/clapack/chetd2.c b/contrib/libs/clapack/chetd2.c
new file mode 100644
index 0000000000..a18ee716fc
--- /dev/null
+++ b/contrib/libs/clapack/chetd2.c
@@ -0,0 +1,358 @@
+/* chetd2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b2 = {0.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int chetd2_(char *uplo, integer *n, complex *a, integer *lda,
+ real *d__, real *e, complex *tau, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Local variables */
+ integer i__;
+ complex taui;
+ extern /* Subroutine */ int cher2_(char *, integer *, complex *, complex *
+, integer *, complex *, integer *, complex *, integer *);
+ complex alpha;
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int chemv_(char *, integer *, complex *, complex *
+, integer *, complex *, integer *, complex *, complex *, integer *
+), caxpy_(integer *, complex *, complex *, integer *,
+ complex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int clarfg_(integer *, complex *, complex *,
+ integer *, complex *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHETD2 reduces a complex Hermitian matrix A to real symmetric */
+/* tridiagonal form T by a unitary similarity transformation: */
+/* Q' * A * Q = T. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* Hermitian matrix A is stored: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* n-by-n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n-by-n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+/* On exit, if UPLO = 'U', the diagonal and first superdiagonal */
+/* of A are overwritten by the corresponding elements of the */
+/* tridiagonal matrix T, and the elements above the first */
+/* superdiagonal, with the array TAU, represent the unitary */
+/* matrix Q as a product of elementary reflectors; if UPLO */
+/* = 'L', the diagonal and first subdiagonal of A are over- */
+/* written by the corresponding elements of the tridiagonal */
+/* matrix T, and the elements below the first subdiagonal, with */
+/* the array TAU, represent the unitary matrix Q as a product */
+/* of elementary reflectors. See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* D (output) REAL array, dimension (N) */
+/* The diagonal elements of the tridiagonal matrix T: */
+/* D(i) = A(i,i). */
+
+/* E (output) REAL array, dimension (N-1) */
+/* The off-diagonal elements of the tridiagonal matrix T: */
+/* E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'. */
+
+/* TAU (output) COMPLEX array, dimension (N-1) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* If UPLO = 'U', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(n-1) . . . H(2) H(1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in */
+/* A(1:i-1,i+1), and tau in TAU(i). */
+
+/* If UPLO = 'L', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(1) H(2) . . . H(n-1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i), */
+/* and tau in TAU(i). */
+
+/* The contents of A on exit are illustrated by the following examples */
+/* with n = 5: */
+
+/* if UPLO = 'U': if UPLO = 'L': */
+
+/* ( d e v2 v3 v4 ) ( d ) */
+/* ( d e v3 v4 ) ( e d ) */
+/* ( d e v4 ) ( v1 e d ) */
+/* ( d e ) ( v1 v2 e d ) */
+/* ( d ) ( v1 v2 v3 e d ) */
+
+/* where d and e denote diagonal and off-diagonal elements of T, and vi */
+/* denotes an element of the vector defining H(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tau;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHETD2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Reduce the upper triangle of A */
+
+ i__1 = *n + *n * a_dim1;
+ i__2 = *n + *n * a_dim1;
+ r__1 = a[i__2].r;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+
+/* Generate elementary reflector H(i) = I - tau * v * v' */
+/* to annihilate A(1:i-1,i+1) */
+
+ i__1 = i__ + (i__ + 1) * a_dim1;
+ alpha.r = a[i__1].r, alpha.i = a[i__1].i;
+ clarfg_(&i__, &alpha, &a[(i__ + 1) * a_dim1 + 1], &c__1, &taui);
+ i__1 = i__;
+ e[i__1] = alpha.r;
+
+ if (taui.r != 0.f || taui.i != 0.f) {
+
+/* Apply H(i) from both sides to A(1:i,1:i) */
+
+ i__1 = i__ + (i__ + 1) * a_dim1;
+ a[i__1].r = 1.f, a[i__1].i = 0.f;
+
+/* Compute x := tau * A * v storing x in TAU(1:i) */
+
+ chemv_(uplo, &i__, &taui, &a[a_offset], lda, &a[(i__ + 1) *
+ a_dim1 + 1], &c__1, &c_b2, &tau[1], &c__1);
+
+/* Compute w := x - 1/2 * tau * (x'*v) * v */
+
+ q__3.r = -.5f, q__3.i = -0.f;
+ q__2.r = q__3.r * taui.r - q__3.i * taui.i, q__2.i = q__3.r *
+ taui.i + q__3.i * taui.r;
+ cdotc_(&q__4, &i__, &tau[1], &c__1, &a[(i__ + 1) * a_dim1 + 1]
+, &c__1);
+ q__1.r = q__2.r * q__4.r - q__2.i * q__4.i, q__1.i = q__2.r *
+ q__4.i + q__2.i * q__4.r;
+ alpha.r = q__1.r, alpha.i = q__1.i;
+ caxpy_(&i__, &alpha, &a[(i__ + 1) * a_dim1 + 1], &c__1, &tau[
+ 1], &c__1);
+
+/* Apply the transformation as a rank-2 update: */
+/* A := A - v * w' - w * v' */
+
+ q__1.r = -1.f, q__1.i = -0.f;
+ cher2_(uplo, &i__, &q__1, &a[(i__ + 1) * a_dim1 + 1], &c__1, &
+ tau[1], &c__1, &a[a_offset], lda);
+
+ } else {
+ i__1 = i__ + i__ * a_dim1;
+ i__2 = i__ + i__ * a_dim1;
+ r__1 = a[i__2].r;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+ }
+ i__1 = i__ + (i__ + 1) * a_dim1;
+ i__2 = i__;
+ a[i__1].r = e[i__2], a[i__1].i = 0.f;
+ i__1 = i__ + 1;
+ i__2 = i__ + 1 + (i__ + 1) * a_dim1;
+ d__[i__1] = a[i__2].r;
+ i__1 = i__;
+ tau[i__1].r = taui.r, tau[i__1].i = taui.i;
+/* L10: */
+ }
+ i__1 = a_dim1 + 1;
+ d__[1] = a[i__1].r;
+ } else {
+
+/* Reduce the lower triangle of A */
+
+ i__1 = a_dim1 + 1;
+ i__2 = a_dim1 + 1;
+ r__1 = a[i__2].r;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) = I - tau * v * v' */
+/* to annihilate A(i+2:n,i) */
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ clarfg_(&i__2, &alpha, &a[min(i__3, *n)+ i__ * a_dim1], &c__1, &
+ taui);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+
+ if (taui.r != 0.f || taui.i != 0.f) {
+
+/* Apply H(i) from both sides to A(i+1:n,i+1:n) */
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Compute x := tau * A * v storing y in TAU(i:n-1) */
+
+ i__2 = *n - i__;
+ chemv_(uplo, &i__2, &taui, &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b2, &tau[
+ i__], &c__1);
+
+/* Compute w := x - 1/2 * tau * (x'*v) * v */
+
+ q__3.r = -.5f, q__3.i = -0.f;
+ q__2.r = q__3.r * taui.r - q__3.i * taui.i, q__2.i = q__3.r *
+ taui.i + q__3.i * taui.r;
+ i__2 = *n - i__;
+ cdotc_(&q__4, &i__2, &tau[i__], &c__1, &a[i__ + 1 + i__ *
+ a_dim1], &c__1);
+ q__1.r = q__2.r * q__4.r - q__2.i * q__4.i, q__1.i = q__2.r *
+ q__4.i + q__2.i * q__4.r;
+ alpha.r = q__1.r, alpha.i = q__1.i;
+ i__2 = *n - i__;
+ caxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
+ i__], &c__1);
+
+/* Apply the transformation as a rank-2 update: */
+/* A := A - v * w' - w * v' */
+
+ i__2 = *n - i__;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cher2_(uplo, &i__2, &q__1, &a[i__ + 1 + i__ * a_dim1], &c__1,
+ &tau[i__], &c__1, &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda);
+
+ } else {
+ i__2 = i__ + 1 + (i__ + 1) * a_dim1;
+ i__3 = i__ + 1 + (i__ + 1) * a_dim1;
+ r__1 = a[i__3].r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ }
+ i__2 = i__ + 1 + i__ * a_dim1;
+ i__3 = i__;
+ a[i__2].r = e[i__3], a[i__2].i = 0.f;
+ i__2 = i__;
+ i__3 = i__ + i__ * a_dim1;
+ d__[i__2] = a[i__3].r;
+ i__2 = i__;
+ tau[i__2].r = taui.r, tau[i__2].i = taui.i;
+/* L20: */
+ }
+ i__1 = *n;
+ i__2 = *n + *n * a_dim1;
+ d__[i__1] = a[i__2].r;
+ }
+
+ return 0;
+
+/* End of CHETD2 */
+
+} /* chetd2_ */
diff --git a/contrib/libs/clapack/chetf2.c b/contrib/libs/clapack/chetf2.c
new file mode 100644
index 0000000000..8e980515e4
--- /dev/null
+++ b/contrib/libs/clapack/chetf2.c
@@ -0,0 +1,802 @@
+/* chetf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int chetf2_(char *uplo, integer *n, complex *a, integer *lda,
+ integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ real r__1, r__2, r__3, r__4;
+ complex q__1, q__2, q__3, q__4, q__5, q__6;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_imag(complex *);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ real d__;
+ integer i__, j, k;
+ complex t;
+ real r1, d11;
+ complex d12;
+ real d22;
+ complex d21;
+ integer kk, kp;
+ complex wk;
+ real tt;
+ complex wkm1, wkp1;
+ extern /* Subroutine */ int cher_(char *, integer *, real *, complex *,
+ integer *, complex *, integer *);
+ integer imax, jmax;
+ real alpha;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ integer kstep;
+ logical upper;
+ extern doublereal slapy2_(real *, real *);
+ real absakk;
+ extern integer icamax_(integer *, complex *, integer *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *), xerbla_(char *, integer *);
+ real colmax;
+ extern logical sisnan_(real *);
+ real rowmax;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHETF2 computes the factorization of a complex Hermitian matrix A */
+/* using the Bunch-Kaufman diagonal pivoting method: */
+
+/* A = U*D*U' or A = L*D*L' */
+
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, U' is the conjugate transpose of U, and D is */
+/* Hermitian and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* This is the unblocked version of the algorithm, calling Level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* Hermitian matrix A is stored: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* n-by-n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n-by-n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L (see below for further details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, D(k,k) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, and division by zero will occur if it */
+/* is used to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* 09-29-06 - patch from */
+/* Bobby Cheng, MathWorks */
+
+/* Replace l.210 and l.392 */
+/* IF( MAX( ABSAKK, COLMAX ).EQ.ZERO ) THEN */
+/* by */
+/* IF( (MAX( ABSAKK, COLMAX ).EQ.ZERO) .OR. SISNAN(ABSAKK) ) THEN */
+
+/* 01-01-96 - Based on modifications by */
+/* J. Lewis, Boeing Computer Services Company */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* If UPLO = 'U', then A = U*D*U', where */
+/* U = P(n)*U(n)* ... *P(k)U(k)* ..., */
+/* i.e., U is a product of terms P(k)*U(k), where k decreases from n to */
+/* 1 in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and U(k) is a unit upper triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I v 0 ) k-s */
+/* U(k) = ( 0 I 0 ) s */
+/* ( 0 0 I ) n-k */
+/* k-s s n-k */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(1:k-1,k). */
+/* If s = 2, the upper triangle of D(k) overwrites A(k-1,k-1), A(k-1,k), */
+/* and A(k,k), and v overwrites A(1:k-2,k-1:k). */
+
+/* If UPLO = 'L', then A = L*D*L', where */
+/* L = P(1)*L(1)* ... *P(k)*L(k)* ..., */
+/* i.e., L is a product of terms P(k)*L(k), where k increases from 1 to */
+/* n in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and L(k) is a unit lower triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I 0 0 ) k-1 */
+/* L(k) = ( 0 I 0 ) s */
+/* ( 0 v I ) n-k-s+1 */
+/* k-1 s n-k-s+1 */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(k+1:n,k). */
+/* If s = 2, the lower triangle of D(k) overwrites A(k,k), A(k+1,k), */
+/* and A(k+1,k+1), and v overwrites A(k+2:n,k:k+1). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHETF2", &i__1);
+ return 0;
+ }
+
+/* Initialize ALPHA for use in choosing pivot block size. */
+
+ alpha = (sqrt(17.f) + 1.f) / 8.f;
+
+ if (upper) {
+
+/* Factorize A as U*D*U' using the upper triangle of A */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2 */
+
+ k = *n;
+L10:
+
+/* If K < 1, exit from loop */
+
+ if (k < 1) {
+ goto L90;
+ }
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ i__1 = k + k * a_dim1;
+ absakk = (r__1 = a[i__1].r, dabs(r__1));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ imax = icamax_(&i__1, &a[k * a_dim1 + 1], &c__1);
+ i__1 = imax + k * a_dim1;
+ colmax = (r__1 = a[i__1].r, dabs(r__1)) + (r__2 = r_imag(&a[imax
+ + k * a_dim1]), dabs(r__2));
+ } else {
+ colmax = 0.f;
+ }
+
+ if (dmax(absakk,colmax) == 0.f || sisnan_(&absakk)) {
+
+/* Column K is zero or contains a NaN: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ r__1 = a[i__2].r;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ i__1 = k - imax;
+ jmax = imax + icamax_(&i__1, &a[imax + (imax + 1) * a_dim1],
+ lda);
+ i__1 = imax + jmax * a_dim1;
+ rowmax = (r__1 = a[i__1].r, dabs(r__1)) + (r__2 = r_imag(&a[
+ imax + jmax * a_dim1]), dabs(r__2));
+ if (imax > 1) {
+ i__1 = imax - 1;
+ jmax = icamax_(&i__1, &a[imax * a_dim1 + 1], &c__1);
+/* Computing MAX */
+ i__1 = jmax + imax * a_dim1;
+ r__3 = rowmax, r__4 = (r__1 = a[i__1].r, dabs(r__1)) + (
+ r__2 = r_imag(&a[jmax + imax * a_dim1]), dabs(
+ r__2));
+ rowmax = dmax(r__3,r__4);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else /* if(complicated condition) */ {
+ i__1 = imax + imax * a_dim1;
+ if ((r__1 = a[i__1].r, dabs(r__1)) >= alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+ } else {
+
+/* interchange rows and columns K-1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+ }
+
+ kk = k - kstep + 1;
+ if (kp != kk) {
+
+/* Interchange rows and columns KK and KP in the leading */
+/* submatrix A(1:k,1:k) */
+
+ i__1 = kp - 1;
+ cswap_(&i__1, &a[kk * a_dim1 + 1], &c__1, &a[kp * a_dim1 + 1],
+ &c__1);
+ i__1 = kk - 1;
+ for (j = kp + 1; j <= i__1; ++j) {
+ r_cnjg(&q__1, &a[j + kk * a_dim1]);
+ t.r = q__1.r, t.i = q__1.i;
+ i__2 = j + kk * a_dim1;
+ r_cnjg(&q__1, &a[kp + j * a_dim1]);
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+ i__2 = kp + j * a_dim1;
+ a[i__2].r = t.r, a[i__2].i = t.i;
+/* L20: */
+ }
+ i__1 = kp + kk * a_dim1;
+ r_cnjg(&q__1, &a[kp + kk * a_dim1]);
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ i__1 = kk + kk * a_dim1;
+ r1 = a[i__1].r;
+ i__1 = kk + kk * a_dim1;
+ i__2 = kp + kp * a_dim1;
+ r__1 = a[i__2].r;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+ i__1 = kp + kp * a_dim1;
+ a[i__1].r = r1, a[i__1].i = 0.f;
+ if (kstep == 2) {
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ r__1 = a[i__2].r;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+ i__1 = k - 1 + k * a_dim1;
+ t.r = a[i__1].r, t.i = a[i__1].i;
+ i__1 = k - 1 + k * a_dim1;
+ i__2 = kp + k * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + k * a_dim1;
+ a[i__1].r = t.r, a[i__1].i = t.i;
+ }
+ } else {
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ r__1 = a[i__2].r;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+ if (kstep == 2) {
+ i__1 = k - 1 + (k - 1) * a_dim1;
+ i__2 = k - 1 + (k - 1) * a_dim1;
+ r__1 = a[i__2].r;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+ }
+ }
+
+/* Update the leading submatrix */
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k now holds */
+
+/* W(k) = U(k)*D(k) */
+
+/* where U(k) is the k-th column of U */
+
+/* Perform a rank-1 update of A(1:k-1,1:k-1) as */
+
+/* A := A - U(k)*D(k)*U(k)' = A - W(k)*1/D(k)*W(k)' */
+
+ i__1 = k + k * a_dim1;
+ r1 = 1.f / a[i__1].r;
+ i__1 = k - 1;
+ r__1 = -r1;
+ cher_(uplo, &i__1, &r__1, &a[k * a_dim1 + 1], &c__1, &a[
+ a_offset], lda);
+
+/* Store U(k) in column k */
+
+ i__1 = k - 1;
+ csscal_(&i__1, &r1, &a[k * a_dim1 + 1], &c__1);
+ } else {
+
+/* 2-by-2 pivot block D(k): columns k and k-1 now hold */
+
+/* ( W(k-1) W(k) ) = ( U(k-1) U(k) )*D(k) */
+
+/* where U(k) and U(k-1) are the k-th and (k-1)-th columns */
+/* of U */
+
+/* Perform a rank-2 update of A(1:k-2,1:k-2) as */
+
+/* A := A - ( U(k-1) U(k) )*D(k)*( U(k-1) U(k) )' */
+/* = A - ( W(k-1) W(k) )*inv(D(k))*( W(k-1) W(k) )' */
+
+ if (k > 2) {
+
+ i__1 = k - 1 + k * a_dim1;
+ r__1 = a[i__1].r;
+ r__2 = r_imag(&a[k - 1 + k * a_dim1]);
+ d__ = slapy2_(&r__1, &r__2);
+ i__1 = k - 1 + (k - 1) * a_dim1;
+ d22 = a[i__1].r / d__;
+ i__1 = k + k * a_dim1;
+ d11 = a[i__1].r / d__;
+ tt = 1.f / (d11 * d22 - 1.f);
+ i__1 = k - 1 + k * a_dim1;
+ q__1.r = a[i__1].r / d__, q__1.i = a[i__1].i / d__;
+ d12.r = q__1.r, d12.i = q__1.i;
+ d__ = tt / d__;
+
+ for (j = k - 2; j >= 1; --j) {
+ i__1 = j + (k - 1) * a_dim1;
+ q__3.r = d11 * a[i__1].r, q__3.i = d11 * a[i__1].i;
+ r_cnjg(&q__5, &d12);
+ i__2 = j + k * a_dim1;
+ q__4.r = q__5.r * a[i__2].r - q__5.i * a[i__2].i,
+ q__4.i = q__5.r * a[i__2].i + q__5.i * a[i__2]
+ .r;
+ q__2.r = q__3.r - q__4.r, q__2.i = q__3.i - q__4.i;
+ q__1.r = d__ * q__2.r, q__1.i = d__ * q__2.i;
+ wkm1.r = q__1.r, wkm1.i = q__1.i;
+ i__1 = j + k * a_dim1;
+ q__3.r = d22 * a[i__1].r, q__3.i = d22 * a[i__1].i;
+ i__2 = j + (k - 1) * a_dim1;
+ q__4.r = d12.r * a[i__2].r - d12.i * a[i__2].i,
+ q__4.i = d12.r * a[i__2].i + d12.i * a[i__2]
+ .r;
+ q__2.r = q__3.r - q__4.r, q__2.i = q__3.i - q__4.i;
+ q__1.r = d__ * q__2.r, q__1.i = d__ * q__2.i;
+ wk.r = q__1.r, wk.i = q__1.i;
+ for (i__ = j; i__ >= 1; --i__) {
+ i__1 = i__ + j * a_dim1;
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__ + k * a_dim1;
+ r_cnjg(&q__4, &wk);
+ q__3.r = a[i__3].r * q__4.r - a[i__3].i * q__4.i,
+ q__3.i = a[i__3].r * q__4.i + a[i__3].i *
+ q__4.r;
+ q__2.r = a[i__2].r - q__3.r, q__2.i = a[i__2].i -
+ q__3.i;
+ i__4 = i__ + (k - 1) * a_dim1;
+ r_cnjg(&q__6, &wkm1);
+ q__5.r = a[i__4].r * q__6.r - a[i__4].i * q__6.i,
+ q__5.i = a[i__4].r * q__6.i + a[i__4].i *
+ q__6.r;
+ q__1.r = q__2.r - q__5.r, q__1.i = q__2.i -
+ q__5.i;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+/* L30: */
+ }
+ i__1 = j + k * a_dim1;
+ a[i__1].r = wk.r, a[i__1].i = wk.i;
+ i__1 = j + (k - 1) * a_dim1;
+ a[i__1].r = wkm1.r, a[i__1].i = wkm1.i;
+ i__1 = j + j * a_dim1;
+ i__2 = j + j * a_dim1;
+ r__1 = a[i__2].r;
+ q__1.r = r__1, q__1.i = 0.f;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+/* L40: */
+ }
+
+ }
+
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k - 1] = -kp;
+ }
+
+/* Decrease K and return to the start of the main loop */
+
+ k -= kstep;
+ goto L10;
+
+ } else {
+
+/* Factorize A as L*D*L' using the lower triangle of A */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2 */
+
+ k = 1;
+L50:
+
+/* If K > N, exit from loop */
+
+ if (k > *n) {
+ goto L90;
+ }
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ i__1 = k + k * a_dim1;
+ absakk = (r__1 = a[i__1].r, dabs(r__1));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ imax = k + icamax_(&i__1, &a[k + 1 + k * a_dim1], &c__1);
+ i__1 = imax + k * a_dim1;
+ colmax = (r__1 = a[i__1].r, dabs(r__1)) + (r__2 = r_imag(&a[imax
+ + k * a_dim1]), dabs(r__2));
+ } else {
+ colmax = 0.f;
+ }
+
+ if (dmax(absakk,colmax) == 0.f || sisnan_(&absakk)) {
+
+/* Column K is zero or contains a NaN: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ r__1 = a[i__2].r;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ i__1 = imax - k;
+ jmax = k - 1 + icamax_(&i__1, &a[imax + k * a_dim1], lda);
+ i__1 = imax + jmax * a_dim1;
+ rowmax = (r__1 = a[i__1].r, dabs(r__1)) + (r__2 = r_imag(&a[
+ imax + jmax * a_dim1]), dabs(r__2));
+ if (imax < *n) {
+ i__1 = *n - imax;
+ jmax = imax + icamax_(&i__1, &a[imax + 1 + imax * a_dim1],
+ &c__1);
+/* Computing MAX */
+ i__1 = jmax + imax * a_dim1;
+ r__3 = rowmax, r__4 = (r__1 = a[i__1].r, dabs(r__1)) + (
+ r__2 = r_imag(&a[jmax + imax * a_dim1]), dabs(
+ r__2));
+ rowmax = dmax(r__3,r__4);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else /* if(complicated condition) */ {
+ i__1 = imax + imax * a_dim1;
+ if ((r__1 = a[i__1].r, dabs(r__1)) >= alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+ } else {
+
+/* interchange rows and columns K+1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+ }
+
+ kk = k + kstep - 1;
+ if (kp != kk) {
+
+/* Interchange rows and columns KK and KP in the trailing */
+/* submatrix A(k:n,k:n) */
+
+ if (kp < *n) {
+ i__1 = *n - kp;
+ cswap_(&i__1, &a[kp + 1 + kk * a_dim1], &c__1, &a[kp + 1
+ + kp * a_dim1], &c__1);
+ }
+ i__1 = kp - 1;
+ for (j = kk + 1; j <= i__1; ++j) {
+ r_cnjg(&q__1, &a[j + kk * a_dim1]);
+ t.r = q__1.r, t.i = q__1.i;
+ i__2 = j + kk * a_dim1;
+ r_cnjg(&q__1, &a[kp + j * a_dim1]);
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+ i__2 = kp + j * a_dim1;
+ a[i__2].r = t.r, a[i__2].i = t.i;
+/* L60: */
+ }
+ i__1 = kp + kk * a_dim1;
+ r_cnjg(&q__1, &a[kp + kk * a_dim1]);
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ i__1 = kk + kk * a_dim1;
+ r1 = a[i__1].r;
+ i__1 = kk + kk * a_dim1;
+ i__2 = kp + kp * a_dim1;
+ r__1 = a[i__2].r;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+ i__1 = kp + kp * a_dim1;
+ a[i__1].r = r1, a[i__1].i = 0.f;
+ if (kstep == 2) {
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ r__1 = a[i__2].r;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+ i__1 = k + 1 + k * a_dim1;
+ t.r = a[i__1].r, t.i = a[i__1].i;
+ i__1 = k + 1 + k * a_dim1;
+ i__2 = kp + k * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + k * a_dim1;
+ a[i__1].r = t.r, a[i__1].i = t.i;
+ }
+ } else {
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ r__1 = a[i__2].r;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+ if (kstep == 2) {
+ i__1 = k + 1 + (k + 1) * a_dim1;
+ i__2 = k + 1 + (k + 1) * a_dim1;
+ r__1 = a[i__2].r;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+ }
+ }
+
+/* Update the trailing submatrix */
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k now holds */
+
+/* W(k) = L(k)*D(k) */
+
+/* where L(k) is the k-th column of L */
+
+ if (k < *n) {
+
+/* Perform a rank-1 update of A(k+1:n,k+1:n) as */
+
+/* A := A - L(k)*D(k)*L(k)' = A - W(k)*(1/D(k))*W(k)' */
+
+ i__1 = k + k * a_dim1;
+ r1 = 1.f / a[i__1].r;
+ i__1 = *n - k;
+ r__1 = -r1;
+ cher_(uplo, &i__1, &r__1, &a[k + 1 + k * a_dim1], &c__1, &
+ a[k + 1 + (k + 1) * a_dim1], lda);
+
+/* Store L(k) in column K */
+
+ i__1 = *n - k;
+ csscal_(&i__1, &r1, &a[k + 1 + k * a_dim1], &c__1);
+ }
+ } else {
+
+/* 2-by-2 pivot block D(k) */
+
+ if (k < *n - 1) {
+
+/* Perform a rank-2 update of A(k+2:n,k+2:n) as */
+
+/* A := A - ( L(k) L(k+1) )*D(k)*( L(k) L(k+1) )' */
+/* = A - ( W(k) W(k+1) )*inv(D(k))*( W(k) W(k+1) )' */
+
+/* where L(k) and L(k+1) are the k-th and (k+1)-th */
+/* columns of L */
+
+ i__1 = k + 1 + k * a_dim1;
+ r__1 = a[i__1].r;
+ r__2 = r_imag(&a[k + 1 + k * a_dim1]);
+ d__ = slapy2_(&r__1, &r__2);
+ i__1 = k + 1 + (k + 1) * a_dim1;
+ d11 = a[i__1].r / d__;
+ i__1 = k + k * a_dim1;
+ d22 = a[i__1].r / d__;
+ tt = 1.f / (d11 * d22 - 1.f);
+ i__1 = k + 1 + k * a_dim1;
+ q__1.r = a[i__1].r / d__, q__1.i = a[i__1].i / d__;
+ d21.r = q__1.r, d21.i = q__1.i;
+ d__ = tt / d__;
+
+ i__1 = *n;
+ for (j = k + 2; j <= i__1; ++j) {
+ i__2 = j + k * a_dim1;
+ q__3.r = d11 * a[i__2].r, q__3.i = d11 * a[i__2].i;
+ i__3 = j + (k + 1) * a_dim1;
+ q__4.r = d21.r * a[i__3].r - d21.i * a[i__3].i,
+ q__4.i = d21.r * a[i__3].i + d21.i * a[i__3]
+ .r;
+ q__2.r = q__3.r - q__4.r, q__2.i = q__3.i - q__4.i;
+ q__1.r = d__ * q__2.r, q__1.i = d__ * q__2.i;
+ wk.r = q__1.r, wk.i = q__1.i;
+ i__2 = j + (k + 1) * a_dim1;
+ q__3.r = d22 * a[i__2].r, q__3.i = d22 * a[i__2].i;
+ r_cnjg(&q__5, &d21);
+ i__3 = j + k * a_dim1;
+ q__4.r = q__5.r * a[i__3].r - q__5.i * a[i__3].i,
+ q__4.i = q__5.r * a[i__3].i + q__5.i * a[i__3]
+ .r;
+ q__2.r = q__3.r - q__4.r, q__2.i = q__3.i - q__4.i;
+ q__1.r = d__ * q__2.r, q__1.i = d__ * q__2.i;
+ wkp1.r = q__1.r, wkp1.i = q__1.i;
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__ + k * a_dim1;
+ r_cnjg(&q__4, &wk);
+ q__3.r = a[i__5].r * q__4.r - a[i__5].i * q__4.i,
+ q__3.i = a[i__5].r * q__4.i + a[i__5].i *
+ q__4.r;
+ q__2.r = a[i__4].r - q__3.r, q__2.i = a[i__4].i -
+ q__3.i;
+ i__6 = i__ + (k + 1) * a_dim1;
+ r_cnjg(&q__6, &wkp1);
+ q__5.r = a[i__6].r * q__6.r - a[i__6].i * q__6.i,
+ q__5.i = a[i__6].r * q__6.i + a[i__6].i *
+ q__6.r;
+ q__1.r = q__2.r - q__5.r, q__1.i = q__2.i -
+ q__5.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L70: */
+ }
+ i__2 = j + k * a_dim1;
+ a[i__2].r = wk.r, a[i__2].i = wk.i;
+ i__2 = j + (k + 1) * a_dim1;
+ a[i__2].r = wkp1.r, a[i__2].i = wkp1.i;
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ r__1 = a[i__3].r;
+ q__1.r = r__1, q__1.i = 0.f;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+/* L80: */
+ }
+ }
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k + 1] = -kp;
+ }
+
+/* Increase K and return to the start of the main loop */
+
+ k += kstep;
+ goto L50;
+
+ }
+
+L90:
+ return 0;
+
+/* End of CHETF2 */
+
+} /* chetf2_ */
diff --git a/contrib/libs/clapack/chetrd.c b/contrib/libs/clapack/chetrd.c
new file mode 100644
index 0000000000..38df87aba0
--- /dev/null
+++ b/contrib/libs/clapack/chetrd.c
@@ -0,0 +1,369 @@
+/* chetrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+static real c_b23 = 1.f;
+
+/* Subroutine */ int chetrd_(char *uplo, integer *n, complex *a, integer *lda,
+ real *d__, real *e, complex *tau, complex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j, nb, kk, nx, iws;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ logical upper;
+ extern /* Subroutine */ int chetd2_(char *, integer *, complex *, integer
+ *, real *, real *, complex *, integer *), cher2k_(char *,
+ char *, integer *, integer *, complex *, complex *, integer *,
+ complex *, integer *, real *, complex *, integer *), clatrd_(char *, integer *, integer *, complex *, integer
+ *, real *, complex *, complex *, integer *), xerbla_(char
+ *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHETRD reduces a complex Hermitian matrix A to real symmetric */
+/* tridiagonal form T by a unitary similarity transformation: */
+/* Q**H * A * Q = T. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+/* On exit, if UPLO = 'U', the diagonal and first superdiagonal */
+/* of A are overwritten by the corresponding elements of the */
+/* tridiagonal matrix T, and the elements above the first */
+/* superdiagonal, with the array TAU, represent the unitary */
+/* matrix Q as a product of elementary reflectors; if UPLO */
+/* = 'L', the diagonal and first subdiagonal of A are over- */
+/* written by the corresponding elements of the tridiagonal */
+/* matrix T, and the elements below the first subdiagonal, with */
+/* the array TAU, represent the unitary matrix Q as a product */
+/* of elementary reflectors. See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* D (output) REAL array, dimension (N) */
+/* The diagonal elements of the tridiagonal matrix T: */
+/* D(i) = A(i,i). */
+
+/* E (output) REAL array, dimension (N-1) */
+/* The off-diagonal elements of the tridiagonal matrix T: */
+/* E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'. */
+
+/* TAU (output) COMPLEX array, dimension (N-1) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= 1. */
+/* For optimum performance LWORK >= N*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* If UPLO = 'U', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(n-1) . . . H(2) H(1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in */
+/* A(1:i-1,i+1), and tau in TAU(i). */
+
+/* If UPLO = 'L', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(1) H(2) . . . H(n-1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i), */
+/* and tau in TAU(i). */
+
+/* The contents of A on exit are illustrated by the following examples */
+/* with n = 5: */
+
+/* if UPLO = 'U': if UPLO = 'L': */
+
+/* ( d e v2 v3 v4 ) ( d ) */
+/* ( d e v3 v4 ) ( e d ) */
+/* ( d e v4 ) ( v1 e d ) */
+/* ( d e ) ( v1 v2 e d ) */
+/* ( d ) ( v1 v2 v3 e d ) */
+
+/* where d and e denote diagonal and off-diagonal elements of T, and vi */
+/* denotes an element of the vector defining H(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*lwork < 1 && ! lquery) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+
+/* Determine the block size. */
+
+ nb = ilaenv_(&c__1, "CHETRD", uplo, n, &c_n1, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHETRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ nx = *n;
+ iws = 1;
+ if (nb > 1 && nb < *n) {
+
+/* Determine when to cross over from blocked to unblocked code */
+/* (last block is always handled by unblocked code). */
+
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__3, "CHETRD", uplo, n, &c_n1, &c_n1, &
+ c_n1);
+ nx = max(i__1,i__2);
+ if (nx < *n) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: determine the */
+/* minimum value of NB, and reduce NB or force use of */
+/* unblocked code by setting NX = N. */
+
+/* Computing MAX */
+ i__1 = *lwork / ldwork;
+ nb = max(i__1,1);
+ nbmin = ilaenv_(&c__2, "CHETRD", uplo, n, &c_n1, &c_n1, &c_n1);
+ if (nb < nbmin) {
+ nx = *n;
+ }
+ }
+ } else {
+ nx = *n;
+ }
+ } else {
+ nb = 1;
+ }
+
+ if (upper) {
+
+/* Reduce the upper triangle of A. */
+/* Columns 1:kk are handled by the unblocked method. */
+
+ kk = *n - (*n - nx + nb - 1) / nb * nb;
+ i__1 = kk + 1;
+ i__2 = -nb;
+ for (i__ = *n - nb + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+
+/* Reduce columns i:i+nb-1 to tridiagonal form and form the */
+/* matrix W which is needed to update the unreduced part of */
+/* the matrix */
+
+ i__3 = i__ + nb - 1;
+ clatrd_(uplo, &i__3, &nb, &a[a_offset], lda, &e[1], &tau[1], &
+ work[1], &ldwork);
+
+/* Update the unreduced submatrix A(1:i-1,1:i-1), using an */
+/* update of the form: A := A - V*W' - W*V' */
+
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cher2k_(uplo, "No transpose", &i__3, &nb, &q__1, &a[i__ * a_dim1
+ + 1], lda, &work[1], &ldwork, &c_b23, &a[a_offset], lda);
+
+/* Copy superdiagonal elements back into A, and diagonal */
+/* elements into D */
+
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ i__4 = j - 1 + j * a_dim1;
+ i__5 = j - 1;
+ a[i__4].r = e[i__5], a[i__4].i = 0.f;
+ i__4 = j;
+ i__5 = j + j * a_dim1;
+ d__[i__4] = a[i__5].r;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* Use unblocked code to reduce the last or only block */
+
+ chetd2_(uplo, &kk, &a[a_offset], lda, &d__[1], &e[1], &tau[1], &iinfo);
+ } else {
+
+/* Reduce the lower triangle of A */
+
+ i__2 = *n - nx;
+ i__1 = nb;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+
+/* Reduce columns i:i+nb-1 to tridiagonal form and form the */
+/* matrix W which is needed to update the unreduced part of */
+/* the matrix */
+
+ i__3 = *n - i__ + 1;
+ clatrd_(uplo, &i__3, &nb, &a[i__ + i__ * a_dim1], lda, &e[i__], &
+ tau[i__], &work[1], &ldwork);
+
+/* Update the unreduced submatrix A(i+nb:n,i+nb:n), using */
+/* an update of the form: A := A - V*W' - W*V' */
+
+ i__3 = *n - i__ - nb + 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cher2k_(uplo, "No transpose", &i__3, &nb, &q__1, &a[i__ + nb +
+ i__ * a_dim1], lda, &work[nb + 1], &ldwork, &c_b23, &a[
+ i__ + nb + (i__ + nb) * a_dim1], lda);
+
+/* Copy subdiagonal elements back into A, and diagonal */
+/* elements into D */
+
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ i__4 = j + 1 + j * a_dim1;
+ i__5 = j;
+ a[i__4].r = e[i__5], a[i__4].i = 0.f;
+ i__4 = j;
+ i__5 = j + j * a_dim1;
+ d__[i__4] = a[i__5].r;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Use unblocked code to reduce the last or only block */
+
+ i__1 = *n - i__ + 1;
+ chetd2_(uplo, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__],
+ &tau[i__], &iinfo);
+ }
+
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ return 0;
+
+/* End of CHETRD */
+
+} /* chetrd_ */
diff --git a/contrib/libs/clapack/chetrf.c b/contrib/libs/clapack/chetrf.c
new file mode 100644
index 0000000000..75c17bb4db
--- /dev/null
+++ b/contrib/libs/clapack/chetrf.c
@@ -0,0 +1,334 @@
+/* chetrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+
+/* Subroutine */ int chetrf_(char *uplo, integer *n, complex *a, integer *lda,
+ integer *ipiv, complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer j, k, kb, nb, iws;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ logical upper;
+ extern /* Subroutine */ int chetf2_(char *, integer *, complex *, integer
+ *, integer *, integer *), clahef_(char *, integer *,
+ integer *, integer *, complex *, integer *, integer *, complex *,
+ integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHETRF computes the factorization of a complex Hermitian matrix A */
+/* using the Bunch-Kaufman diagonal pivoting method. The form of the */
+/* factorization is */
+
+/* A = U*D*U**H or A = L*D*L**H */
+
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, and D is Hermitian and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* This is the blocked version of the algorithm, calling Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L (see below for further details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of WORK. LWORK >=1. For best performance */
+/* LWORK >= N*NB, where NB is the block size returned by ILAENV. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, and division by zero will occur if it */
+/* is used to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* If UPLO = 'U', then A = U*D*U', where */
+/* U = P(n)*U(n)* ... *P(k)U(k)* ..., */
+/* i.e., U is a product of terms P(k)*U(k), where k decreases from n to */
+/* 1 in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and U(k) is a unit upper triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I v 0 ) k-s */
+/* U(k) = ( 0 I 0 ) s */
+/* ( 0 0 I ) n-k */
+/* k-s s n-k */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(1:k-1,k). */
+/* If s = 2, the upper triangle of D(k) overwrites A(k-1,k-1), A(k-1,k), */
+/* and A(k,k), and v overwrites A(1:k-2,k-1:k). */
+
+/* If UPLO = 'L', then A = L*D*L', where */
+/* L = P(1)*L(1)* ... *P(k)*L(k)* ..., */
+/* i.e., L is a product of terms P(k)*L(k), where k increases from 1 to */
+/* n in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and L(k) is a unit lower triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I 0 0 ) k-1 */
+/* L(k) = ( 0 I 0 ) s */
+/* ( 0 v I ) n-k-s+1 */
+/* k-1 s n-k-s+1 */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(k+1:n,k). */
+/* If s = 2, the lower triangle of D(k) overwrites A(k,k), A(k+1,k), */
+/* and A(k+1,k+1), and v overwrites A(k+2:n,k:k+1). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*lwork < 1 && ! lquery) {
+ *info = -7;
+ }
+
+ if (*info == 0) {
+
+/* Determine the block size */
+
+ nb = ilaenv_(&c__1, "CHETRF", uplo, n, &c_n1, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHETRF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = *n;
+ if (nb > 1 && nb < *n) {
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+/* Computing MAX */
+ i__1 = *lwork / ldwork;
+ nb = max(i__1,1);
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CHETRF", uplo, n, &c_n1, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = 1;
+ }
+ if (nb < nbmin) {
+ nb = *n;
+ }
+
+ if (upper) {
+
+/* Factorize A as U*D*U' using the upper triangle of A */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* KB, where KB is the number of columns factorized by CLAHEF; */
+/* KB is either NB or NB-1, or K for the last block */
+
+ k = *n;
+L10:
+
+/* If K < 1, exit from loop */
+
+ if (k < 1) {
+ goto L40;
+ }
+
+ if (k > nb) {
+
+/* Factorize columns k-kb+1:k of A and use blocked code to */
+/* update columns 1:k-kb */
+
+ clahef_(uplo, &k, &nb, &kb, &a[a_offset], lda, &ipiv[1], &work[1],
+ n, &iinfo);
+ } else {
+
+/* Use unblocked code to factorize columns 1:k of A */
+
+ chetf2_(uplo, &k, &a[a_offset], lda, &ipiv[1], &iinfo);
+ kb = k;
+ }
+
+/* Set INFO on the first occurrence of a zero pivot */
+
+ if (*info == 0 && iinfo > 0) {
+ *info = iinfo;
+ }
+
+/* Decrease K and return to the start of the main loop */
+
+ k -= kb;
+ goto L10;
+
+ } else {
+
+/* Factorize A as L*D*L' using the lower triangle of A */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* KB, where KB is the number of columns factorized by CLAHEF; */
+/* KB is either NB or NB-1, or N-K+1 for the last block */
+
+ k = 1;
+L20:
+
+/* If K > N, exit from loop */
+
+ if (k > *n) {
+ goto L40;
+ }
+
+ if (k <= *n - nb) {
+
+/* Factorize columns k:k+kb-1 of A and use blocked code to */
+/* update columns k+kb:n */
+
+ i__1 = *n - k + 1;
+ clahef_(uplo, &i__1, &nb, &kb, &a[k + k * a_dim1], lda, &ipiv[k],
+ &work[1], n, &iinfo);
+ } else {
+
+/* Use unblocked code to factorize columns k:n of A */
+
+ i__1 = *n - k + 1;
+ chetf2_(uplo, &i__1, &a[k + k * a_dim1], lda, &ipiv[k], &iinfo);
+ kb = *n - k + 1;
+ }
+
+/* Set INFO on the first occurrence of a zero pivot */
+
+ if (*info == 0 && iinfo > 0) {
+ *info = iinfo + k - 1;
+ }
+
+/* Adjust IPIV */
+
+ i__1 = k + kb - 1;
+ for (j = k; j <= i__1; ++j) {
+ if (ipiv[j] > 0) {
+ ipiv[j] = ipiv[j] + k - 1;
+ } else {
+ ipiv[j] = ipiv[j] - k + 1;
+ }
+/* L30: */
+ }
+
+/* Increase K and return to the start of the main loop */
+
+ k += kb;
+ goto L20;
+
+ }
+
+L40:
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ return 0;
+
+/* End of CHETRF */
+
+} /* chetrf_ */
diff --git a/contrib/libs/clapack/chetri.c b/contrib/libs/clapack/chetri.c
new file mode 100644
index 0000000000..ac9c6bfa53
--- /dev/null
+++ b/contrib/libs/clapack/chetri.c
@@ -0,0 +1,510 @@
+/* chetri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b2 = {0.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int chetri_(char *uplo, integer *n, complex *a, integer *lda,
+ integer *ipiv, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ double c_abs(complex *);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ real d__;
+ integer j, k;
+ real t, ak;
+ integer kp;
+ real akp1;
+ complex temp, akkp1;
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int chemv_(char *, integer *, complex *, complex *
+, integer *, complex *, integer *, complex *, complex *, integer *
+), ccopy_(integer *, complex *, integer *, complex *,
+ integer *), cswap_(integer *, complex *, integer *, complex *,
+ integer *);
+ integer kstep;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHETRI computes the inverse of a complex Hermitian indefinite matrix */
+/* A using the factorization A = U*D*U**H or A = L*D*L**H computed by */
+/* CHETRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**H; */
+/* = 'L': Lower triangular, form is A = L*D*L**H. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the block diagonal matrix D and the multipliers */
+/* used to obtain the factor U or L as computed by CHETRF. */
+
+/* On exit, if INFO = 0, the (Hermitian) inverse of the original */
+/* matrix. If UPLO = 'U', the upper triangular part of the */
+/* inverse is formed and the part of A below the diagonal is not */
+/* referenced; if UPLO = 'L' the lower triangular part of the */
+/* inverse is formed and the part of A above the diagonal is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by CHETRF. */
+
+/* WORK (workspace) COMPLEX array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) = 0; the matrix is singular and its */
+/* inverse could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHETRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check that the diagonal matrix D is nonsingular. */
+
+ if (upper) {
+
+/* Upper triangular storage: examine D from bottom to top */
+
+ for (*info = *n; *info >= 1; --(*info)) {
+ i__1 = *info + *info * a_dim1;
+ if (ipiv[*info] > 0 && (a[i__1].r == 0.f && a[i__1].i == 0.f)) {
+ return 0;
+ }
+/* L10: */
+ }
+ } else {
+
+/* Lower triangular storage: examine D from top to bottom. */
+
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ i__2 = *info + *info * a_dim1;
+ if (ipiv[*info] > 0 && (a[i__2].r == 0.f && a[i__2].i == 0.f)) {
+ return 0;
+ }
+/* L20: */
+ }
+ }
+ *info = 0;
+
+ if (upper) {
+
+/* Compute inv(A) from the factorization A = U*D*U'. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+L30:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L50;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Invert the diagonal block. */
+
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ r__1 = 1.f / a[i__2].r;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+
+/* Compute column K of the inverse. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ ccopy_(&i__1, &a[k * a_dim1 + 1], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ chemv_(uplo, &i__1, &q__1, &a[a_offset], lda, &work[1], &c__1,
+ &c_b2, &a[k * a_dim1 + 1], &c__1);
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ i__3 = k - 1;
+ cdotc_(&q__2, &i__3, &work[1], &c__1, &a[k * a_dim1 + 1], &
+ c__1);
+ r__1 = q__2.r;
+ q__1.r = a[i__2].r - r__1, q__1.i = a[i__2].i;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ }
+ kstep = 1;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Invert the diagonal block. */
+
+ t = c_abs(&a[k + (k + 1) * a_dim1]);
+ i__1 = k + k * a_dim1;
+ ak = a[i__1].r / t;
+ i__1 = k + 1 + (k + 1) * a_dim1;
+ akp1 = a[i__1].r / t;
+ i__1 = k + (k + 1) * a_dim1;
+ q__1.r = a[i__1].r / t, q__1.i = a[i__1].i / t;
+ akkp1.r = q__1.r, akkp1.i = q__1.i;
+ d__ = t * (ak * akp1 - 1.f);
+ i__1 = k + k * a_dim1;
+ r__1 = akp1 / d__;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+ i__1 = k + 1 + (k + 1) * a_dim1;
+ r__1 = ak / d__;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+ i__1 = k + (k + 1) * a_dim1;
+ q__2.r = -akkp1.r, q__2.i = -akkp1.i;
+ q__1.r = q__2.r / d__, q__1.i = q__2.i / d__;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+
+/* Compute columns K and K+1 of the inverse. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ ccopy_(&i__1, &a[k * a_dim1 + 1], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ chemv_(uplo, &i__1, &q__1, &a[a_offset], lda, &work[1], &c__1,
+ &c_b2, &a[k * a_dim1 + 1], &c__1);
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ i__3 = k - 1;
+ cdotc_(&q__2, &i__3, &work[1], &c__1, &a[k * a_dim1 + 1], &
+ c__1);
+ r__1 = q__2.r;
+ q__1.r = a[i__2].r - r__1, q__1.i = a[i__2].i;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ i__1 = k + (k + 1) * a_dim1;
+ i__2 = k + (k + 1) * a_dim1;
+ i__3 = k - 1;
+ cdotc_(&q__2, &i__3, &a[k * a_dim1 + 1], &c__1, &a[(k + 1) *
+ a_dim1 + 1], &c__1);
+ q__1.r = a[i__2].r - q__2.r, q__1.i = a[i__2].i - q__2.i;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ i__1 = k - 1;
+ ccopy_(&i__1, &a[(k + 1) * a_dim1 + 1], &c__1, &work[1], &
+ c__1);
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ chemv_(uplo, &i__1, &q__1, &a[a_offset], lda, &work[1], &c__1,
+ &c_b2, &a[(k + 1) * a_dim1 + 1], &c__1);
+ i__1 = k + 1 + (k + 1) * a_dim1;
+ i__2 = k + 1 + (k + 1) * a_dim1;
+ i__3 = k - 1;
+ cdotc_(&q__2, &i__3, &work[1], &c__1, &a[(k + 1) * a_dim1 + 1]
+, &c__1);
+ r__1 = q__2.r;
+ q__1.r = a[i__2].r - r__1, q__1.i = a[i__2].i;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ }
+ kstep = 2;
+ }
+
+ kp = (i__1 = ipiv[k], abs(i__1));
+ if (kp != k) {
+
+/* Interchange rows and columns K and KP in the leading */
+/* submatrix A(1:k+1,1:k+1) */
+
+ i__1 = kp - 1;
+ cswap_(&i__1, &a[k * a_dim1 + 1], &c__1, &a[kp * a_dim1 + 1], &
+ c__1);
+ i__1 = k - 1;
+ for (j = kp + 1; j <= i__1; ++j) {
+ r_cnjg(&q__1, &a[j + k * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = j + k * a_dim1;
+ r_cnjg(&q__1, &a[kp + j * a_dim1]);
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+ i__2 = kp + j * a_dim1;
+ a[i__2].r = temp.r, a[i__2].i = temp.i;
+/* L40: */
+ }
+ i__1 = kp + k * a_dim1;
+ r_cnjg(&q__1, &a[kp + k * a_dim1]);
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ i__1 = k + k * a_dim1;
+ temp.r = a[i__1].r, temp.i = a[i__1].i;
+ i__1 = k + k * a_dim1;
+ i__2 = kp + kp * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + kp * a_dim1;
+ a[i__1].r = temp.r, a[i__1].i = temp.i;
+ if (kstep == 2) {
+ i__1 = k + (k + 1) * a_dim1;
+ temp.r = a[i__1].r, temp.i = a[i__1].i;
+ i__1 = k + (k + 1) * a_dim1;
+ i__2 = kp + (k + 1) * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + (k + 1) * a_dim1;
+ a[i__1].r = temp.r, a[i__1].i = temp.i;
+ }
+ }
+
+ k += kstep;
+ goto L30;
+L50:
+
+ ;
+ } else {
+
+/* Compute inv(A) from the factorization A = L*D*L'. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+L60:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L80;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Invert the diagonal block. */
+
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ r__1 = 1.f / a[i__2].r;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+
+/* Compute column K of the inverse. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ ccopy_(&i__1, &a[k + 1 + k * a_dim1], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ chemv_(uplo, &i__1, &q__1, &a[k + 1 + (k + 1) * a_dim1], lda,
+ &work[1], &c__1, &c_b2, &a[k + 1 + k * a_dim1], &c__1);
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ i__3 = *n - k;
+ cdotc_(&q__2, &i__3, &work[1], &c__1, &a[k + 1 + k * a_dim1],
+ &c__1);
+ r__1 = q__2.r;
+ q__1.r = a[i__2].r - r__1, q__1.i = a[i__2].i;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ }
+ kstep = 1;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Invert the diagonal block. */
+
+ t = c_abs(&a[k + (k - 1) * a_dim1]);
+ i__1 = k - 1 + (k - 1) * a_dim1;
+ ak = a[i__1].r / t;
+ i__1 = k + k * a_dim1;
+ akp1 = a[i__1].r / t;
+ i__1 = k + (k - 1) * a_dim1;
+ q__1.r = a[i__1].r / t, q__1.i = a[i__1].i / t;
+ akkp1.r = q__1.r, akkp1.i = q__1.i;
+ d__ = t * (ak * akp1 - 1.f);
+ i__1 = k - 1 + (k - 1) * a_dim1;
+ r__1 = akp1 / d__;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+ i__1 = k + k * a_dim1;
+ r__1 = ak / d__;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+ i__1 = k + (k - 1) * a_dim1;
+ q__2.r = -akkp1.r, q__2.i = -akkp1.i;
+ q__1.r = q__2.r / d__, q__1.i = q__2.i / d__;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+
+/* Compute columns K-1 and K of the inverse. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ ccopy_(&i__1, &a[k + 1 + k * a_dim1], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ chemv_(uplo, &i__1, &q__1, &a[k + 1 + (k + 1) * a_dim1], lda,
+ &work[1], &c__1, &c_b2, &a[k + 1 + k * a_dim1], &c__1);
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ i__3 = *n - k;
+ cdotc_(&q__2, &i__3, &work[1], &c__1, &a[k + 1 + k * a_dim1],
+ &c__1);
+ r__1 = q__2.r;
+ q__1.r = a[i__2].r - r__1, q__1.i = a[i__2].i;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ i__1 = k + (k - 1) * a_dim1;
+ i__2 = k + (k - 1) * a_dim1;
+ i__3 = *n - k;
+ cdotc_(&q__2, &i__3, &a[k + 1 + k * a_dim1], &c__1, &a[k + 1
+ + (k - 1) * a_dim1], &c__1);
+ q__1.r = a[i__2].r - q__2.r, q__1.i = a[i__2].i - q__2.i;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ i__1 = *n - k;
+ ccopy_(&i__1, &a[k + 1 + (k - 1) * a_dim1], &c__1, &work[1], &
+ c__1);
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ chemv_(uplo, &i__1, &q__1, &a[k + 1 + (k + 1) * a_dim1], lda,
+ &work[1], &c__1, &c_b2, &a[k + 1 + (k - 1) * a_dim1],
+ &c__1);
+ i__1 = k - 1 + (k - 1) * a_dim1;
+ i__2 = k - 1 + (k - 1) * a_dim1;
+ i__3 = *n - k;
+ cdotc_(&q__2, &i__3, &work[1], &c__1, &a[k + 1 + (k - 1) *
+ a_dim1], &c__1);
+ r__1 = q__2.r;
+ q__1.r = a[i__2].r - r__1, q__1.i = a[i__2].i;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ }
+ kstep = 2;
+ }
+
+ kp = (i__1 = ipiv[k], abs(i__1));
+ if (kp != k) {
+
+/* Interchange rows and columns K and KP in the trailing */
+/* submatrix A(k-1:n,k-1:n) */
+
+ if (kp < *n) {
+ i__1 = *n - kp;
+ cswap_(&i__1, &a[kp + 1 + k * a_dim1], &c__1, &a[kp + 1 + kp *
+ a_dim1], &c__1);
+ }
+ i__1 = kp - 1;
+ for (j = k + 1; j <= i__1; ++j) {
+ r_cnjg(&q__1, &a[j + k * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = j + k * a_dim1;
+ r_cnjg(&q__1, &a[kp + j * a_dim1]);
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+ i__2 = kp + j * a_dim1;
+ a[i__2].r = temp.r, a[i__2].i = temp.i;
+/* L70: */
+ }
+ i__1 = kp + k * a_dim1;
+ r_cnjg(&q__1, &a[kp + k * a_dim1]);
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ i__1 = k + k * a_dim1;
+ temp.r = a[i__1].r, temp.i = a[i__1].i;
+ i__1 = k + k * a_dim1;
+ i__2 = kp + kp * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + kp * a_dim1;
+ a[i__1].r = temp.r, a[i__1].i = temp.i;
+ if (kstep == 2) {
+ i__1 = k + (k - 1) * a_dim1;
+ temp.r = a[i__1].r, temp.i = a[i__1].i;
+ i__1 = k + (k - 1) * a_dim1;
+ i__2 = kp + (k - 1) * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + (k - 1) * a_dim1;
+ a[i__1].r = temp.r, a[i__1].i = temp.i;
+ }
+ }
+
+ k -= kstep;
+ goto L60;
+L80:
+ ;
+ }
+
+ return 0;
+
+/* End of CHETRI */
+
+} /* chetri_ */
diff --git a/contrib/libs/clapack/chetrs.c b/contrib/libs/clapack/chetrs.c
new file mode 100644
index 0000000000..fcb0eefc18
--- /dev/null
+++ b/contrib/libs/clapack/chetrs.c
@@ -0,0 +1,528 @@
+/* chetrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int chetrs_(char *uplo, integer *n, integer *nrhs, complex *
+ a, integer *lda, integer *ipiv, complex *b, integer *ldb, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void c_div(complex *, complex *, complex *), r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer j, k;
+ real s;
+ complex ak, bk;
+ integer kp;
+ complex akm1, bkm1, akm1k;
+ extern logical lsame_(char *, char *);
+ complex denom;
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+, complex *, integer *, complex *, integer *, complex *, complex *
+, integer *), cgeru_(integer *, integer *, complex *,
+ complex *, integer *, complex *, integer *, complex *, integer *),
+ cswap_(integer *, complex *, integer *, complex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int clacgv_(integer *, complex *, integer *),
+ csscal_(integer *, real *, complex *, integer *), xerbla_(char *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHETRS solves a system of linear equations A*X = B with a complex */
+/* Hermitian matrix A using the factorization A = U*D*U**H or */
+/* A = L*D*L**H computed by CHETRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**H; */
+/* = 'L': Lower triangular, form is A = L*D*L**H. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The block diagonal matrix D and the multipliers used to */
+/* obtain the factor U or L as computed by CHETRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by CHETRF. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHETRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Solve A*X = B, where A = U*D*U'. */
+
+/* First solve U*D*X = B, overwriting B with X. */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+L10:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L30;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(U(K)), where U(K) is the transformation */
+/* stored in column K of A. */
+
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__1, nrhs, &q__1, &a[k * a_dim1 + 1], &c__1, &b[k +
+ b_dim1], ldb, &b[b_dim1 + 1], ldb);
+
+/* Multiply by the inverse of the diagonal block. */
+
+ i__1 = k + k * a_dim1;
+ s = 1.f / a[i__1].r;
+ csscal_(nrhs, &s, &b[k + b_dim1], ldb);
+ --k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Interchange rows K-1 and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k - 1) {
+ cswap_(nrhs, &b[k - 1 + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(U(K)), where U(K) is the transformation */
+/* stored in columns K-1 and K of A. */
+
+ i__1 = k - 2;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__1, nrhs, &q__1, &a[k * a_dim1 + 1], &c__1, &b[k +
+ b_dim1], ldb, &b[b_dim1 + 1], ldb);
+ i__1 = k - 2;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__1, nrhs, &q__1, &a[(k - 1) * a_dim1 + 1], &c__1, &b[k
+ - 1 + b_dim1], ldb, &b[b_dim1 + 1], ldb);
+
+/* Multiply by the inverse of the diagonal block. */
+
+ i__1 = k - 1 + k * a_dim1;
+ akm1k.r = a[i__1].r, akm1k.i = a[i__1].i;
+ c_div(&q__1, &a[k - 1 + (k - 1) * a_dim1], &akm1k);
+ akm1.r = q__1.r, akm1.i = q__1.i;
+ r_cnjg(&q__2, &akm1k);
+ c_div(&q__1, &a[k + k * a_dim1], &q__2);
+ ak.r = q__1.r, ak.i = q__1.i;
+ q__2.r = akm1.r * ak.r - akm1.i * ak.i, q__2.i = akm1.r * ak.i +
+ akm1.i * ak.r;
+ q__1.r = q__2.r - 1.f, q__1.i = q__2.i - 0.f;
+ denom.r = q__1.r, denom.i = q__1.i;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ c_div(&q__1, &b[k - 1 + j * b_dim1], &akm1k);
+ bkm1.r = q__1.r, bkm1.i = q__1.i;
+ r_cnjg(&q__2, &akm1k);
+ c_div(&q__1, &b[k + j * b_dim1], &q__2);
+ bk.r = q__1.r, bk.i = q__1.i;
+ i__2 = k - 1 + j * b_dim1;
+ q__3.r = ak.r * bkm1.r - ak.i * bkm1.i, q__3.i = ak.r *
+ bkm1.i + ak.i * bkm1.r;
+ q__2.r = q__3.r - bk.r, q__2.i = q__3.i - bk.i;
+ c_div(&q__1, &q__2, &denom);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ i__2 = k + j * b_dim1;
+ q__3.r = akm1.r * bk.r - akm1.i * bk.i, q__3.i = akm1.r *
+ bk.i + akm1.i * bk.r;
+ q__2.r = q__3.r - bkm1.r, q__2.i = q__3.i - bkm1.i;
+ c_div(&q__1, &q__2, &denom);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L20: */
+ }
+ k += -2;
+ }
+
+ goto L10;
+L30:
+
+/* Next solve U'*X = B, overwriting B with X. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+L40:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L50;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Multiply by inv(U'(K)), where U(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k > 1) {
+ clacgv_(nrhs, &b[k + b_dim1], ldb);
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Conjugate transpose", &i__1, nrhs, &q__1, &b[b_offset]
+, ldb, &a[k * a_dim1 + 1], &c__1, &c_b1, &b[k +
+ b_dim1], ldb);
+ clacgv_(nrhs, &b[k + b_dim1], ldb);
+ }
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ ++k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Multiply by inv(U'(K+1)), where U(K+1) is the transformation */
+/* stored in columns K and K+1 of A. */
+
+ if (k > 1) {
+ clacgv_(nrhs, &b[k + b_dim1], ldb);
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Conjugate transpose", &i__1, nrhs, &q__1, &b[b_offset]
+, ldb, &a[k * a_dim1 + 1], &c__1, &c_b1, &b[k +
+ b_dim1], ldb);
+ clacgv_(nrhs, &b[k + b_dim1], ldb);
+
+ clacgv_(nrhs, &b[k + 1 + b_dim1], ldb);
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Conjugate transpose", &i__1, nrhs, &q__1, &b[b_offset]
+, ldb, &a[(k + 1) * a_dim1 + 1], &c__1, &c_b1, &b[k +
+ 1 + b_dim1], ldb);
+ clacgv_(nrhs, &b[k + 1 + b_dim1], ldb);
+ }
+
+/* Interchange rows K and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k) {
+ cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ k += 2;
+ }
+
+ goto L40;
+L50:
+
+ ;
+ } else {
+
+/* Solve A*X = B, where A = L*D*L'. */
+
+/* First solve L*D*X = B, overwriting B with X. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+L60:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L80;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(L(K)), where L(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__1, nrhs, &q__1, &a[k + 1 + k * a_dim1], &c__1, &b[
+ k + b_dim1], ldb, &b[k + 1 + b_dim1], ldb);
+ }
+
+/* Multiply by the inverse of the diagonal block. */
+
+ i__1 = k + k * a_dim1;
+ s = 1.f / a[i__1].r;
+ csscal_(nrhs, &s, &b[k + b_dim1], ldb);
+ ++k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Interchange rows K+1 and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k + 1) {
+ cswap_(nrhs, &b[k + 1 + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(L(K)), where L(K) is the transformation */
+/* stored in columns K and K+1 of A. */
+
+ if (k < *n - 1) {
+ i__1 = *n - k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__1, nrhs, &q__1, &a[k + 2 + k * a_dim1], &c__1, &b[
+ k + b_dim1], ldb, &b[k + 2 + b_dim1], ldb);
+ i__1 = *n - k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__1, nrhs, &q__1, &a[k + 2 + (k + 1) * a_dim1], &
+ c__1, &b[k + 1 + b_dim1], ldb, &b[k + 2 + b_dim1],
+ ldb);
+ }
+
+/* Multiply by the inverse of the diagonal block. */
+
+ i__1 = k + 1 + k * a_dim1;
+ akm1k.r = a[i__1].r, akm1k.i = a[i__1].i;
+ r_cnjg(&q__2, &akm1k);
+ c_div(&q__1, &a[k + k * a_dim1], &q__2);
+ akm1.r = q__1.r, akm1.i = q__1.i;
+ c_div(&q__1, &a[k + 1 + (k + 1) * a_dim1], &akm1k);
+ ak.r = q__1.r, ak.i = q__1.i;
+ q__2.r = akm1.r * ak.r - akm1.i * ak.i, q__2.i = akm1.r * ak.i +
+ akm1.i * ak.r;
+ q__1.r = q__2.r - 1.f, q__1.i = q__2.i - 0.f;
+ denom.r = q__1.r, denom.i = q__1.i;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ r_cnjg(&q__2, &akm1k);
+ c_div(&q__1, &b[k + j * b_dim1], &q__2);
+ bkm1.r = q__1.r, bkm1.i = q__1.i;
+ c_div(&q__1, &b[k + 1 + j * b_dim1], &akm1k);
+ bk.r = q__1.r, bk.i = q__1.i;
+ i__2 = k + j * b_dim1;
+ q__3.r = ak.r * bkm1.r - ak.i * bkm1.i, q__3.i = ak.r *
+ bkm1.i + ak.i * bkm1.r;
+ q__2.r = q__3.r - bk.r, q__2.i = q__3.i - bk.i;
+ c_div(&q__1, &q__2, &denom);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ i__2 = k + 1 + j * b_dim1;
+ q__3.r = akm1.r * bk.r - akm1.i * bk.i, q__3.i = akm1.r *
+ bk.i + akm1.i * bk.r;
+ q__2.r = q__3.r - bkm1.r, q__2.i = q__3.i - bkm1.i;
+ c_div(&q__1, &q__2, &denom);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L70: */
+ }
+ k += 2;
+ }
+
+ goto L60;
+L80:
+
+/* Next solve L'*X = B, overwriting B with X. */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+L90:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L100;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Multiply by inv(L'(K)), where L(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k < *n) {
+ clacgv_(nrhs, &b[k + b_dim1], ldb);
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Conjugate transpose", &i__1, nrhs, &q__1, &b[k + 1 +
+ b_dim1], ldb, &a[k + 1 + k * a_dim1], &c__1, &c_b1, &
+ b[k + b_dim1], ldb);
+ clacgv_(nrhs, &b[k + b_dim1], ldb);
+ }
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ --k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Multiply by inv(L'(K-1)), where L(K-1) is the transformation */
+/* stored in columns K-1 and K of A. */
+
+ if (k < *n) {
+ clacgv_(nrhs, &b[k + b_dim1], ldb);
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Conjugate transpose", &i__1, nrhs, &q__1, &b[k + 1 +
+ b_dim1], ldb, &a[k + 1 + k * a_dim1], &c__1, &c_b1, &
+ b[k + b_dim1], ldb);
+ clacgv_(nrhs, &b[k + b_dim1], ldb);
+
+ clacgv_(nrhs, &b[k - 1 + b_dim1], ldb);
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Conjugate transpose", &i__1, nrhs, &q__1, &b[k + 1 +
+ b_dim1], ldb, &a[k + 1 + (k - 1) * a_dim1], &c__1, &
+ c_b1, &b[k - 1 + b_dim1], ldb);
+ clacgv_(nrhs, &b[k - 1 + b_dim1], ldb);
+ }
+
+/* Interchange rows K and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k) {
+ cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ k += -2;
+ }
+
+ goto L90;
+L100:
+ ;
+ }
+
+ return 0;
+
+/* End of CHETRS */
+
+} /* chetrs_ */
diff --git a/contrib/libs/clapack/chfrk.c b/contrib/libs/clapack/chfrk.c
new file mode 100644
index 0000000000..d980a7e152
--- /dev/null
+++ b/contrib/libs/clapack/chfrk.c
@@ -0,0 +1,530 @@
+/* chfrk.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int chfrk_(char *transr, char *uplo, char *trans, integer *n,
+ integer *k, real *alpha, complex *a, integer *lda, real *beta,
+ complex *c__)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ complex q__1;
+
+ /* Local variables */
+ integer j, n1, n2, nk, info;
+ complex cbeta;
+ logical normaltransr;
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *), cherk_(char *,
+ char *, integer *, integer *, real *, complex *, integer *, real *
+, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ logical lower;
+ complex calpha;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nisodd, notrans;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Julien Langou of the Univ. of Colorado Denver -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Level 3 BLAS like routine for C in RFP Format. */
+
+/* CHFRK performs one of the Hermitian rank--k operations */
+
+/* C := alpha*A*conjg( A' ) + beta*C, */
+
+/* or */
+
+/* C := alpha*conjg( A' )*A + beta*C, */
+
+/* where alpha and beta are real scalars, C is an n--by--n Hermitian */
+/* matrix and A is an n--by--k matrix in the first case and a k--by--n */
+/* matrix in the second case. */
+
+/* Arguments */
+/* ========== */
+
+/* TRANSR - (input) CHARACTER. */
+/* = 'N': The Normal Form of RFP A is stored; */
+/* = 'C': The Conjugate-transpose Form of RFP A is stored. */
+
+/* UPLO - (input) CHARACTER. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array C is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of C */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of C */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* TRANS - (input) CHARACTER. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' C := alpha*A*conjg( A' ) + beta*C. */
+
+/* TRANS = 'C' or 'c' C := alpha*conjg( A' )*A + beta*C. */
+
+/* Unchanged on exit. */
+
+/* N - (input) INTEGER. */
+/* On entry, N specifies the order of the matrix C. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* K - (input) INTEGER. */
+/* On entry with TRANS = 'N' or 'n', K specifies the number */
+/* of columns of the matrix A, and on entry with */
+/* TRANS = 'C' or 'c', K specifies the number of rows of the */
+/* matrix A. K must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - (input) REAL. */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - (input) COMPLEX array of DIMENSION ( LDA, ka ), where KA */
+/* is K when TRANS = 'N' or 'n', and is N otherwise. Before */
+/* entry with TRANS = 'N' or 'n', the leading N--by--K part of */
+/* the array A must contain the matrix A, otherwise the leading */
+/* K--by--N part of the array A must contain the matrix A. */
+/* Unchanged on exit. */
+
+/* LDA - (input) INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When TRANS = 'N' or 'n' */
+/* then LDA must be at least max( 1, n ), otherwise LDA must */
+/* be at least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* BETA - (input) REAL. */
+/* On entry, BETA specifies the scalar beta. */
+/* Unchanged on exit. */
+
+/* C - (input/output) COMPLEX array, dimension ( N*(N+1)/2 ). */
+/* On entry, the matrix A in RFP Format. RFP Format is */
+/* described by TRANSR, UPLO and N. Note that the imaginary */
+/* parts of the diagonal elements need not be set, they are */
+/* assumed to be zero, and on exit they are set to zero. */
+
+/* Arguments */
+/* ========== */
+
+/* .. */
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --c__;
+
+ /* Function Body */
+ info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ notrans = lsame_(trans, "N");
+
+ if (notrans) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+
+ if (! normaltransr && ! lsame_(transr, "C")) {
+ info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ info = -2;
+ } else if (! notrans && ! lsame_(trans, "C")) {
+ info = -3;
+ } else if (*n < 0) {
+ info = -4;
+ } else if (*k < 0) {
+ info = -5;
+ } else if (*lda < max(1,nrowa)) {
+ info = -8;
+ }
+ if (info != 0) {
+ i__1 = -info;
+ xerbla_("CHFRK ", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+/* The quick return case: ((ALPHA.EQ.0).AND.(BETA.NE.ZERO)) is not */
+/* done (it is in CHERK for example) and left in the general case. */
+
+ if (*n == 0 || (*alpha == 0.f || *k == 0) && *beta == 1.f) {
+ return 0;
+ }
+
+ if (*alpha == 0.f && *beta == 0.f) {
+ i__1 = *n * (*n + 1) / 2;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ c__[i__2].r = 0.f, c__[i__2].i = 0.f;
+ }
+ return 0;
+ }
+
+ q__1.r = *alpha, q__1.i = 0.f;
+ calpha.r = q__1.r, calpha.i = q__1.i;
+ q__1.r = *beta, q__1.i = 0.f;
+ cbeta.r = q__1.r, cbeta.i = q__1.i;
+
+/* C is N-by-N. */
+/* If N is odd, set NISODD = .TRUE., and N1 and N2. */
+/* If N is even, NISODD = .FALSE., and NK. */
+
+ if (*n % 2 == 0) {
+ nisodd = FALSE_;
+ nk = *n / 2;
+ } else {
+ nisodd = TRUE_;
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+ }
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* N is odd, TRANSR = 'N', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* N is odd, TRANSR = 'N', UPLO = 'L', and TRANS = 'N' */
+
+ cherk_("L", "N", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[1], n);
+ cherk_("U", "N", &n2, k, alpha, &a[n1 + 1 + a_dim1], lda,
+ beta, &c__[*n + 1], n);
+ cgemm_("N", "C", &n2, &n1, k, &calpha, &a[n1 + 1 + a_dim1]
+, lda, &a[a_dim1 + 1], lda, &cbeta, &c__[n1 + 1],
+ n);
+
+ } else {
+
+/* N is odd, TRANSR = 'N', UPLO = 'L', and TRANS = 'C' */
+
+ cherk_("L", "C", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[1], n);
+ cherk_("U", "C", &n2, k, alpha, &a[(n1 + 1) * a_dim1 + 1],
+ lda, beta, &c__[*n + 1], n)
+ ;
+ cgemm_("C", "N", &n2, &n1, k, &calpha, &a[(n1 + 1) *
+ a_dim1 + 1], lda, &a[a_dim1 + 1], lda, &cbeta, &
+ c__[n1 + 1], n);
+
+ }
+
+ } else {
+
+/* N is odd, TRANSR = 'N', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* N is odd, TRANSR = 'N', UPLO = 'U', and TRANS = 'N' */
+
+ cherk_("L", "N", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[n2 + 1], n);
+ cherk_("U", "N", &n2, k, alpha, &a[n2 + a_dim1], lda,
+ beta, &c__[n1 + 1], n);
+ cgemm_("N", "C", &n1, &n2, k, &calpha, &a[a_dim1 + 1],
+ lda, &a[n2 + a_dim1], lda, &cbeta, &c__[1], n);
+
+ } else {
+
+/* N is odd, TRANSR = 'N', UPLO = 'U', and TRANS = 'C' */
+
+ cherk_("L", "C", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[n2 + 1], n);
+ cherk_("U", "C", &n2, k, alpha, &a[n2 * a_dim1 + 1], lda,
+ beta, &c__[n1 + 1], n);
+ cgemm_("C", "N", &n1, &n2, k, &calpha, &a[a_dim1 + 1],
+ lda, &a[n2 * a_dim1 + 1], lda, &cbeta, &c__[1], n);
+
+ }
+
+ }
+
+ } else {
+
+/* N is odd, and TRANSR = 'C' */
+
+ if (lower) {
+
+/* N is odd, TRANSR = 'C', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* N is odd, TRANSR = 'C', UPLO = 'L', and TRANS = 'N' */
+
+ cherk_("U", "N", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[1], &n1);
+ cherk_("L", "N", &n2, k, alpha, &a[n1 + 1 + a_dim1], lda,
+ beta, &c__[2], &n1);
+ cgemm_("N", "C", &n1, &n2, k, &calpha, &a[a_dim1 + 1],
+ lda, &a[n1 + 1 + a_dim1], lda, &cbeta, &c__[n1 *
+ n1 + 1], &n1);
+
+ } else {
+
+/* N is odd, TRANSR = 'C', UPLO = 'L', and TRANS = 'C' */
+
+ cherk_("U", "C", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[1], &n1);
+ cherk_("L", "C", &n2, k, alpha, &a[(n1 + 1) * a_dim1 + 1],
+ lda, beta, &c__[2], &n1);
+ cgemm_("C", "N", &n1, &n2, k, &calpha, &a[a_dim1 + 1],
+ lda, &a[(n1 + 1) * a_dim1 + 1], lda, &cbeta, &c__[
+ n1 * n1 + 1], &n1);
+
+ }
+
+ } else {
+
+/* N is odd, TRANSR = 'C', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* N is odd, TRANSR = 'C', UPLO = 'U', and TRANS = 'N' */
+
+ cherk_("U", "N", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[n2 * n2 + 1], &n2);
+ cherk_("L", "N", &n2, k, alpha, &a[n1 + 1 + a_dim1], lda,
+ beta, &c__[n1 * n2 + 1], &n2);
+ cgemm_("N", "C", &n2, &n1, k, &calpha, &a[n1 + 1 + a_dim1]
+, lda, &a[a_dim1 + 1], lda, &cbeta, &c__[1], &n2);
+
+ } else {
+
+/* N is odd, TRANSR = 'C', UPLO = 'U', and TRANS = 'C' */
+
+ cherk_("U", "C", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[n2 * n2 + 1], &n2);
+ cherk_("L", "C", &n2, k, alpha, &a[(n1 + 1) * a_dim1 + 1],
+ lda, beta, &c__[n1 * n2 + 1], &n2);
+ cgemm_("C", "N", &n2, &n1, k, &calpha, &a[(n1 + 1) *
+ a_dim1 + 1], lda, &a[a_dim1 + 1], lda, &cbeta, &
+ c__[1], &n2);
+
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* N is even, TRANSR = 'N', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* N is even, TRANSR = 'N', UPLO = 'L', and TRANS = 'N' */
+
+ i__1 = *n + 1;
+ cherk_("L", "N", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[2], &i__1);
+ i__1 = *n + 1;
+ cherk_("U", "N", &nk, k, alpha, &a[nk + 1 + a_dim1], lda,
+ beta, &c__[1], &i__1);
+ i__1 = *n + 1;
+ cgemm_("N", "C", &nk, &nk, k, &calpha, &a[nk + 1 + a_dim1]
+, lda, &a[a_dim1 + 1], lda, &cbeta, &c__[nk + 2],
+ &i__1);
+
+ } else {
+
+/* N is even, TRANSR = 'N', UPLO = 'L', and TRANS = 'C' */
+
+ i__1 = *n + 1;
+ cherk_("L", "C", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[2], &i__1);
+ i__1 = *n + 1;
+ cherk_("U", "C", &nk, k, alpha, &a[(nk + 1) * a_dim1 + 1],
+ lda, beta, &c__[1], &i__1);
+ i__1 = *n + 1;
+ cgemm_("C", "N", &nk, &nk, k, &calpha, &a[(nk + 1) *
+ a_dim1 + 1], lda, &a[a_dim1 + 1], lda, &cbeta, &
+ c__[nk + 2], &i__1);
+
+ }
+
+ } else {
+
+/* N is even, TRANSR = 'N', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* N is even, TRANSR = 'N', UPLO = 'U', and TRANS = 'N' */
+
+ i__1 = *n + 1;
+ cherk_("L", "N", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[nk + 2], &i__1);
+ i__1 = *n + 1;
+ cherk_("U", "N", &nk, k, alpha, &a[nk + 1 + a_dim1], lda,
+ beta, &c__[nk + 1], &i__1);
+ i__1 = *n + 1;
+ cgemm_("N", "C", &nk, &nk, k, &calpha, &a[a_dim1 + 1],
+ lda, &a[nk + 1 + a_dim1], lda, &cbeta, &c__[1], &
+ i__1);
+
+ } else {
+
+/* N is even, TRANSR = 'N', UPLO = 'U', and TRANS = 'C' */
+
+ i__1 = *n + 1;
+ cherk_("L", "C", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[nk + 2], &i__1);
+ i__1 = *n + 1;
+ cherk_("U", "C", &nk, k, alpha, &a[(nk + 1) * a_dim1 + 1],
+ lda, beta, &c__[nk + 1], &i__1);
+ i__1 = *n + 1;
+ cgemm_("C", "N", &nk, &nk, k, &calpha, &a[a_dim1 + 1],
+ lda, &a[(nk + 1) * a_dim1 + 1], lda, &cbeta, &c__[
+ 1], &i__1);
+
+ }
+
+ }
+
+ } else {
+
+/* N is even, and TRANSR = 'C' */
+
+ if (lower) {
+
+/* N is even, TRANSR = 'C', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* N is even, TRANSR = 'C', UPLO = 'L', and TRANS = 'N' */
+
+ cherk_("U", "N", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[nk + 1], &nk);
+ cherk_("L", "N", &nk, k, alpha, &a[nk + 1 + a_dim1], lda,
+ beta, &c__[1], &nk);
+ cgemm_("N", "C", &nk, &nk, k, &calpha, &a[a_dim1 + 1],
+ lda, &a[nk + 1 + a_dim1], lda, &cbeta, &c__[(nk +
+ 1) * nk + 1], &nk);
+
+ } else {
+
+/* N is even, TRANSR = 'C', UPLO = 'L', and TRANS = 'C' */
+
+ cherk_("U", "C", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[nk + 1], &nk);
+ cherk_("L", "C", &nk, k, alpha, &a[(nk + 1) * a_dim1 + 1],
+ lda, beta, &c__[1], &nk);
+ cgemm_("C", "N", &nk, &nk, k, &calpha, &a[a_dim1 + 1],
+ lda, &a[(nk + 1) * a_dim1 + 1], lda, &cbeta, &c__[
+ (nk + 1) * nk + 1], &nk);
+
+ }
+
+ } else {
+
+/* N is even, TRANSR = 'C', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* N is even, TRANSR = 'C', UPLO = 'U', and TRANS = 'N' */
+
+ cherk_("U", "N", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[nk * (nk + 1) + 1], &nk);
+ cherk_("L", "N", &nk, k, alpha, &a[nk + 1 + a_dim1], lda,
+ beta, &c__[nk * nk + 1], &nk);
+ cgemm_("N", "C", &nk, &nk, k, &calpha, &a[nk + 1 + a_dim1]
+, lda, &a[a_dim1 + 1], lda, &cbeta, &c__[1], &nk);
+
+ } else {
+
+/* N is even, TRANSR = 'C', UPLO = 'U', and TRANS = 'C' */
+
+ cherk_("U", "C", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[nk * (nk + 1) + 1], &nk);
+ cherk_("L", "C", &nk, k, alpha, &a[(nk + 1) * a_dim1 + 1],
+ lda, beta, &c__[nk * nk + 1], &nk);
+ cgemm_("C", "N", &nk, &nk, k, &calpha, &a[(nk + 1) *
+ a_dim1 + 1], lda, &a[a_dim1 + 1], lda, &cbeta, &
+ c__[1], &nk);
+
+ }
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of CHFRK */
+
+} /* chfrk_ */
diff --git a/contrib/libs/clapack/chgeqz.c b/contrib/libs/clapack/chgeqz.c
new file mode 100644
index 0000000000..a728058dec
--- /dev/null
+++ b/contrib/libs/clapack/chgeqz.c
@@ -0,0 +1,1143 @@
+/* chgeqz.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c__2 = 2;
+
+/* Subroutine */ int chgeqz_(char *job, char *compq, char *compz, integer *n,
+ integer *ilo, integer *ihi, complex *h__, integer *ldh, complex *t,
+ integer *ldt, complex *alpha, complex *beta, complex *q, integer *ldq,
+ complex *z__, integer *ldz, complex *work, integer *lwork, real *
+ rwork, integer *info)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, q_dim1, q_offset, t_dim1, t_offset, z_dim1,
+ z_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ real r__1, r__2, r__3, r__4, r__5, r__6;
+ complex q__1, q__2, q__3, q__4, q__5, q__6;
+
+ /* Builtin functions */
+ double c_abs(complex *);
+ void r_cnjg(complex *, complex *);
+ double r_imag(complex *);
+ void c_div(complex *, complex *, complex *), pow_ci(complex *, complex *,
+ integer *), c_sqrt(complex *, complex *);
+
+ /* Local variables */
+ real c__;
+ integer j;
+ complex s, t1;
+ integer jc, in;
+ complex u12;
+ integer jr;
+ complex ad11, ad12, ad21, ad22;
+ integer jch;
+ logical ilq, ilz;
+ real ulp;
+ complex abi22;
+ real absb, atol, btol, temp;
+ extern /* Subroutine */ int crot_(integer *, complex *, integer *,
+ complex *, integer *, real *, complex *);
+ real temp2;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ complex ctemp;
+ integer iiter, ilast, jiter;
+ real anorm, bnorm;
+ integer maxit;
+ complex shift;
+ real tempr;
+ complex ctemp2, ctemp3;
+ logical ilazr2;
+ real ascale, bscale;
+ complex signbc;
+ extern doublereal slamch_(char *), clanhs_(char *, integer *,
+ complex *, integer *, real *);
+ extern /* Subroutine */ int claset_(char *, integer *, integer *, complex
+ *, complex *, complex *, integer *), clartg_(complex *,
+ complex *, real *, complex *, complex *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ complex eshift;
+ logical ilschr;
+ integer icompq, ilastm;
+ complex rtdisc;
+ integer ischur;
+ logical ilazro;
+ integer icompz, ifirst, ifrstm, istart;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHGEQZ computes the eigenvalues of a complex matrix pair (H,T), */
+/* where H is an upper Hessenberg matrix and T is upper triangular, */
+/* using the single-shift QZ method. */
+/* Matrix pairs of this type are produced by the reduction to */
+/* generalized upper Hessenberg form of a complex matrix pair (A,B): */
+
+/* A = Q1*H*Z1**H, B = Q1*T*Z1**H, */
+
+/* as computed by CGGHRD. */
+
+/* If JOB='S', then the Hessenberg-triangular pair (H,T) is */
+/* also reduced to generalized Schur form, */
+
+/* H = Q*S*Z**H, T = Q*P*Z**H, */
+
+/* where Q and Z are unitary matrices and S and P are upper triangular. */
+
+/* Optionally, the unitary matrix Q from the generalized Schur */
+/* factorization may be postmultiplied into an input matrix Q1, and the */
+/* unitary matrix Z may be postmultiplied into an input matrix Z1. */
+/* If Q1 and Z1 are the unitary matrices from CGGHRD that reduced */
+/* the matrix pair (A,B) to generalized Hessenberg form, then the output */
+/* matrices Q1*Q and Z1*Z are the unitary factors from the generalized */
+/* Schur factorization of (A,B): */
+
+/* A = (Q1*Q)*S*(Z1*Z)**H, B = (Q1*Q)*P*(Z1*Z)**H. */
+
+/* To avoid overflow, eigenvalues of the matrix pair (H,T) */
+/* (equivalently, of (A,B)) are computed as a pair of complex values */
+/* (alpha,beta). If beta is nonzero, lambda = alpha / beta is an */
+/* eigenvalue of the generalized nonsymmetric eigenvalue problem (GNEP) */
+/* A*x = lambda*B*x */
+/* and if alpha is nonzero, mu = beta / alpha is an eigenvalue of the */
+/* alternate form of the GNEP */
+/* mu*A*y = B*y. */
+/* The values of alpha and beta for the i-th eigenvalue can be read */
+/* directly from the generalized Schur form: alpha = S(i,i), */
+/* beta = P(i,i). */
+
+/* Ref: C.B. Moler & G.W. Stewart, "An Algorithm for Generalized Matrix */
+/* Eigenvalue Problems", SIAM J. Numer. Anal., 10(1973), */
+/* pp. 241--256. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* = 'E': Compute eigenvalues only; */
+/* = 'S': Computer eigenvalues and the Schur form. */
+
+/* COMPQ (input) CHARACTER*1 */
+/* = 'N': Left Schur vectors (Q) are not computed; */
+/* = 'I': Q is initialized to the unit matrix and the matrix Q */
+/* of left Schur vectors of (H,T) is returned; */
+/* = 'V': Q must contain a unitary matrix Q1 on entry and */
+/* the product Q1*Q is returned. */
+
+/* COMPZ (input) CHARACTER*1 */
+/* = 'N': Right Schur vectors (Z) are not computed; */
+/* = 'I': Q is initialized to the unit matrix and the matrix Z */
+/* of right Schur vectors of (H,T) is returned; */
+/* = 'V': Z must contain a unitary matrix Z1 on entry and */
+/* the product Z1*Z is returned. */
+
+/* N (input) INTEGER */
+/* The order of the matrices H, T, Q, and Z. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* ILO and IHI mark the rows and columns of H which are in */
+/* Hessenberg form. It is assumed that A is already upper */
+/* triangular in rows and columns 1:ILO-1 and IHI+1:N. */
+/* If N > 0, 1 <= ILO <= IHI <= N; if N = 0, ILO=1 and IHI=0. */
+
+/* H (input/output) COMPLEX array, dimension (LDH, N) */
+/* On entry, the N-by-N upper Hessenberg matrix H. */
+/* On exit, if JOB = 'S', H contains the upper triangular */
+/* matrix S from the generalized Schur factorization. */
+/* If JOB = 'E', the diagonal of H matches that of S, but */
+/* the rest of H is unspecified. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH >= max( 1, N ). */
+
+/* T (input/output) COMPLEX array, dimension (LDT, N) */
+/* On entry, the N-by-N upper triangular matrix T. */
+/* On exit, if JOB = 'S', T contains the upper triangular */
+/* matrix P from the generalized Schur factorization. */
+/* If JOB = 'E', the diagonal of T matches that of P, but */
+/* the rest of T is unspecified. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= max( 1, N ). */
+
+/* ALPHA (output) COMPLEX array, dimension (N) */
+/* The complex scalars alpha that define the eigenvalues of */
+/* GNEP. ALPHA(i) = S(i,i) in the generalized Schur */
+/* factorization. */
+
+/* BETA (output) COMPLEX array, dimension (N) */
+/* The real non-negative scalars beta that define the */
+/* eigenvalues of GNEP. BETA(i) = P(i,i) in the generalized */
+/* Schur factorization. */
+
+/* Together, the quantities alpha = ALPHA(j) and beta = BETA(j) */
+/* represent the j-th eigenvalue of the matrix pair (A,B), in */
+/* one of the forms lambda = alpha/beta or mu = beta/alpha. */
+/* Since either lambda or mu may overflow, they should not, */
+/* in general, be computed. */
+
+/* Q (input/output) COMPLEX array, dimension (LDQ, N) */
+/* On entry, if COMPZ = 'V', the unitary matrix Q1 used in the */
+/* reduction of (A,B) to generalized Hessenberg form. */
+/* On exit, if COMPZ = 'I', the unitary matrix of left Schur */
+/* vectors of (H,T), and if COMPZ = 'V', the unitary matrix of */
+/* left Schur vectors of (A,B). */
+/* Not referenced if COMPZ = 'N'. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= 1. */
+/* If COMPQ='V' or 'I', then LDQ >= N. */
+
+/* Z (input/output) COMPLEX array, dimension (LDZ, N) */
+/* On entry, if COMPZ = 'V', the unitary matrix Z1 used in the */
+/* reduction of (A,B) to generalized Hessenberg form. */
+/* On exit, if COMPZ = 'I', the unitary matrix of right Schur */
+/* vectors of (H,T), and if COMPZ = 'V', the unitary matrix of */
+/* right Schur vectors of (A,B). */
+/* Not referenced if COMPZ = 'N'. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1. */
+/* If COMPZ='V' or 'I', then LDZ >= N. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO >= 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* = 1,...,N: the QZ iteration did not converge. (H,T) is not */
+/* in Schur form, but ALPHA(i) and BETA(i), */
+/* i=INFO+1,...,N should be correct. */
+/* = N+1,...,2*N: the shift calculation failed. (H,T) is not */
+/* in Schur form, but ALPHA(i) and BETA(i), */
+/* i=INFO-N+1,...,N should be correct. */
+
+/* Further Details */
+/* =============== */
+
+/* We assume that complex ABS works as long as its value is less than */
+/* overflow. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode JOB, COMPQ, COMPZ */
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ --alpha;
+ --beta;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ if (lsame_(job, "E")) {
+ ilschr = FALSE_;
+ ischur = 1;
+ } else if (lsame_(job, "S")) {
+ ilschr = TRUE_;
+ ischur = 2;
+ } else {
+ ischur = 0;
+ }
+
+ if (lsame_(compq, "N")) {
+ ilq = FALSE_;
+ icompq = 1;
+ } else if (lsame_(compq, "V")) {
+ ilq = TRUE_;
+ icompq = 2;
+ } else if (lsame_(compq, "I")) {
+ ilq = TRUE_;
+ icompq = 3;
+ } else {
+ icompq = 0;
+ }
+
+ if (lsame_(compz, "N")) {
+ ilz = FALSE_;
+ icompz = 1;
+ } else if (lsame_(compz, "V")) {
+ ilz = TRUE_;
+ icompz = 2;
+ } else if (lsame_(compz, "I")) {
+ ilz = TRUE_;
+ icompz = 3;
+ } else {
+ icompz = 0;
+ }
+
+/* Check Argument Values */
+
+ *info = 0;
+ i__1 = max(1,*n);
+ work[1].r = (real) i__1, work[1].i = 0.f;
+ lquery = *lwork == -1;
+ if (ischur == 0) {
+ *info = -1;
+ } else if (icompq == 0) {
+ *info = -2;
+ } else if (icompz == 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ilo < 1) {
+ *info = -5;
+ } else if (*ihi > *n || *ihi < *ilo - 1) {
+ *info = -6;
+ } else if (*ldh < *n) {
+ *info = -8;
+ } else if (*ldt < *n) {
+ *info = -10;
+ } else if (*ldq < 1 || ilq && *ldq < *n) {
+ *info = -14;
+ } else if (*ldz < 1 || ilz && *ldz < *n) {
+ *info = -16;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -18;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHGEQZ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+/* WORK( 1 ) = CMPLX( 1 ) */
+ if (*n <= 0) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+/* Initialize Q and Z */
+
+ if (icompq == 3) {
+ claset_("Full", n, n, &c_b1, &c_b2, &q[q_offset], ldq);
+ }
+ if (icompz == 3) {
+ claset_("Full", n, n, &c_b1, &c_b2, &z__[z_offset], ldz);
+ }
+
+/* Machine Constants */
+
+ in = *ihi + 1 - *ilo;
+ safmin = slamch_("S");
+ ulp = slamch_("E") * slamch_("B");
+ anorm = clanhs_("F", &in, &h__[*ilo + *ilo * h_dim1], ldh, &rwork[1]);
+ bnorm = clanhs_("F", &in, &t[*ilo + *ilo * t_dim1], ldt, &rwork[1]);
+/* Computing MAX */
+ r__1 = safmin, r__2 = ulp * anorm;
+ atol = dmax(r__1,r__2);
+/* Computing MAX */
+ r__1 = safmin, r__2 = ulp * bnorm;
+ btol = dmax(r__1,r__2);
+ ascale = 1.f / dmax(safmin,anorm);
+ bscale = 1.f / dmax(safmin,bnorm);
+
+
+/* Set Eigenvalues IHI+1:N */
+
+ i__1 = *n;
+ for (j = *ihi + 1; j <= i__1; ++j) {
+ absb = c_abs(&t[j + j * t_dim1]);
+ if (absb > safmin) {
+ i__2 = j + j * t_dim1;
+ q__2.r = t[i__2].r / absb, q__2.i = t[i__2].i / absb;
+ r_cnjg(&q__1, &q__2);
+ signbc.r = q__1.r, signbc.i = q__1.i;
+ i__2 = j + j * t_dim1;
+ t[i__2].r = absb, t[i__2].i = 0.f;
+ if (ilschr) {
+ i__2 = j - 1;
+ cscal_(&i__2, &signbc, &t[j * t_dim1 + 1], &c__1);
+ cscal_(&j, &signbc, &h__[j * h_dim1 + 1], &c__1);
+ } else {
+ i__2 = j + j * h_dim1;
+ i__3 = j + j * h_dim1;
+ q__1.r = h__[i__3].r * signbc.r - h__[i__3].i * signbc.i,
+ q__1.i = h__[i__3].r * signbc.i + h__[i__3].i *
+ signbc.r;
+ h__[i__2].r = q__1.r, h__[i__2].i = q__1.i;
+ }
+ if (ilz) {
+ cscal_(n, &signbc, &z__[j * z_dim1 + 1], &c__1);
+ }
+ } else {
+ i__2 = j + j * t_dim1;
+ t[i__2].r = 0.f, t[i__2].i = 0.f;
+ }
+ i__2 = j;
+ i__3 = j + j * h_dim1;
+ alpha[i__2].r = h__[i__3].r, alpha[i__2].i = h__[i__3].i;
+ i__2 = j;
+ i__3 = j + j * t_dim1;
+ beta[i__2].r = t[i__3].r, beta[i__2].i = t[i__3].i;
+/* L10: */
+ }
+
+/* If IHI < ILO, skip QZ steps */
+
+ if (*ihi < *ilo) {
+ goto L190;
+ }
+
+/* MAIN QZ ITERATION LOOP */
+
+/* Initialize dynamic indices */
+
+/* Eigenvalues ILAST+1:N have been found. */
+/* Column operations modify rows IFRSTM:whatever */
+/* Row operations modify columns whatever:ILASTM */
+
+/* If only eigenvalues are being computed, then */
+/* IFRSTM is the row of the last splitting row above row ILAST; */
+/* this is always at least ILO. */
+/* IITER counts iterations since the last eigenvalue was found, */
+/* to tell when to use an extraordinary shift. */
+/* MAXIT is the maximum number of QZ sweeps allowed. */
+
+ ilast = *ihi;
+ if (ilschr) {
+ ifrstm = 1;
+ ilastm = *n;
+ } else {
+ ifrstm = *ilo;
+ ilastm = *ihi;
+ }
+ iiter = 0;
+ eshift.r = 0.f, eshift.i = 0.f;
+ maxit = (*ihi - *ilo + 1) * 30;
+
+ i__1 = maxit;
+ for (jiter = 1; jiter <= i__1; ++jiter) {
+
+/* Check for too many iterations. */
+
+ if (jiter > maxit) {
+ goto L180;
+ }
+
+/* Split the matrix if possible. */
+
+/* Two tests: */
+/* 1: H(j,j-1)=0 or j=ILO */
+/* 2: T(j,j)=0 */
+
+/* Special case: j=ILAST */
+
+ if (ilast == *ilo) {
+ goto L60;
+ } else {
+ i__2 = ilast + (ilast - 1) * h_dim1;
+ if ((r__1 = h__[i__2].r, dabs(r__1)) + (r__2 = r_imag(&h__[ilast
+ + (ilast - 1) * h_dim1]), dabs(r__2)) <= atol) {
+ i__2 = ilast + (ilast - 1) * h_dim1;
+ h__[i__2].r = 0.f, h__[i__2].i = 0.f;
+ goto L60;
+ }
+ }
+
+ if (c_abs(&t[ilast + ilast * t_dim1]) <= btol) {
+ i__2 = ilast + ilast * t_dim1;
+ t[i__2].r = 0.f, t[i__2].i = 0.f;
+ goto L50;
+ }
+
+/* General case: j<ILAST */
+
+ i__2 = *ilo;
+ for (j = ilast - 1; j >= i__2; --j) {
+
+/* Test 1: for H(j,j-1)=0 or j=ILO */
+
+ if (j == *ilo) {
+ ilazro = TRUE_;
+ } else {
+ i__3 = j + (j - 1) * h_dim1;
+ if ((r__1 = h__[i__3].r, dabs(r__1)) + (r__2 = r_imag(&h__[j
+ + (j - 1) * h_dim1]), dabs(r__2)) <= atol) {
+ i__3 = j + (j - 1) * h_dim1;
+ h__[i__3].r = 0.f, h__[i__3].i = 0.f;
+ ilazro = TRUE_;
+ } else {
+ ilazro = FALSE_;
+ }
+ }
+
+/* Test 2: for T(j,j)=0 */
+
+ if (c_abs(&t[j + j * t_dim1]) < btol) {
+ i__3 = j + j * t_dim1;
+ t[i__3].r = 0.f, t[i__3].i = 0.f;
+
+/* Test 1a: Check for 2 consecutive small subdiagonals in A */
+
+ ilazr2 = FALSE_;
+ if (! ilazro) {
+ i__3 = j + (j - 1) * h_dim1;
+ i__4 = j + 1 + j * h_dim1;
+ i__5 = j + j * h_dim1;
+ if (((r__1 = h__[i__3].r, dabs(r__1)) + (r__2 = r_imag(&
+ h__[j + (j - 1) * h_dim1]), dabs(r__2))) * (
+ ascale * ((r__3 = h__[i__4].r, dabs(r__3)) + (
+ r__4 = r_imag(&h__[j + 1 + j * h_dim1]), dabs(
+ r__4)))) <= ((r__5 = h__[i__5].r, dabs(r__5)) + (
+ r__6 = r_imag(&h__[j + j * h_dim1]), dabs(r__6)))
+ * (ascale * atol)) {
+ ilazr2 = TRUE_;
+ }
+ }
+
+/* If both tests pass (1 & 2), i.e., the leading diagonal */
+/* element of B in the block is zero, split a 1x1 block off */
+/* at the top. (I.e., at the J-th row/column) The leading */
+/* diagonal element of the remainder can also be zero, so */
+/* this may have to be done repeatedly. */
+
+ if (ilazro || ilazr2) {
+ i__3 = ilast - 1;
+ for (jch = j; jch <= i__3; ++jch) {
+ i__4 = jch + jch * h_dim1;
+ ctemp.r = h__[i__4].r, ctemp.i = h__[i__4].i;
+ clartg_(&ctemp, &h__[jch + 1 + jch * h_dim1], &c__, &
+ s, &h__[jch + jch * h_dim1]);
+ i__4 = jch + 1 + jch * h_dim1;
+ h__[i__4].r = 0.f, h__[i__4].i = 0.f;
+ i__4 = ilastm - jch;
+ crot_(&i__4, &h__[jch + (jch + 1) * h_dim1], ldh, &
+ h__[jch + 1 + (jch + 1) * h_dim1], ldh, &c__,
+ &s);
+ i__4 = ilastm - jch;
+ crot_(&i__4, &t[jch + (jch + 1) * t_dim1], ldt, &t[
+ jch + 1 + (jch + 1) * t_dim1], ldt, &c__, &s);
+ if (ilq) {
+ r_cnjg(&q__1, &s);
+ crot_(n, &q[jch * q_dim1 + 1], &c__1, &q[(jch + 1)
+ * q_dim1 + 1], &c__1, &c__, &q__1);
+ }
+ if (ilazr2) {
+ i__4 = jch + (jch - 1) * h_dim1;
+ i__5 = jch + (jch - 1) * h_dim1;
+ q__1.r = c__ * h__[i__5].r, q__1.i = c__ * h__[
+ i__5].i;
+ h__[i__4].r = q__1.r, h__[i__4].i = q__1.i;
+ }
+ ilazr2 = FALSE_;
+ i__4 = jch + 1 + (jch + 1) * t_dim1;
+ if ((r__1 = t[i__4].r, dabs(r__1)) + (r__2 = r_imag(&
+ t[jch + 1 + (jch + 1) * t_dim1]), dabs(r__2))
+ >= btol) {
+ if (jch + 1 >= ilast) {
+ goto L60;
+ } else {
+ ifirst = jch + 1;
+ goto L70;
+ }
+ }
+ i__4 = jch + 1 + (jch + 1) * t_dim1;
+ t[i__4].r = 0.f, t[i__4].i = 0.f;
+/* L20: */
+ }
+ goto L50;
+ } else {
+
+/* Only test 2 passed -- chase the zero to T(ILAST,ILAST) */
+/* Then process as in the case T(ILAST,ILAST)=0 */
+
+ i__3 = ilast - 1;
+ for (jch = j; jch <= i__3; ++jch) {
+ i__4 = jch + (jch + 1) * t_dim1;
+ ctemp.r = t[i__4].r, ctemp.i = t[i__4].i;
+ clartg_(&ctemp, &t[jch + 1 + (jch + 1) * t_dim1], &
+ c__, &s, &t[jch + (jch + 1) * t_dim1]);
+ i__4 = jch + 1 + (jch + 1) * t_dim1;
+ t[i__4].r = 0.f, t[i__4].i = 0.f;
+ if (jch < ilastm - 1) {
+ i__4 = ilastm - jch - 1;
+ crot_(&i__4, &t[jch + (jch + 2) * t_dim1], ldt, &
+ t[jch + 1 + (jch + 2) * t_dim1], ldt, &
+ c__, &s);
+ }
+ i__4 = ilastm - jch + 2;
+ crot_(&i__4, &h__[jch + (jch - 1) * h_dim1], ldh, &
+ h__[jch + 1 + (jch - 1) * h_dim1], ldh, &c__,
+ &s);
+ if (ilq) {
+ r_cnjg(&q__1, &s);
+ crot_(n, &q[jch * q_dim1 + 1], &c__1, &q[(jch + 1)
+ * q_dim1 + 1], &c__1, &c__, &q__1);
+ }
+ i__4 = jch + 1 + jch * h_dim1;
+ ctemp.r = h__[i__4].r, ctemp.i = h__[i__4].i;
+ clartg_(&ctemp, &h__[jch + 1 + (jch - 1) * h_dim1], &
+ c__, &s, &h__[jch + 1 + jch * h_dim1]);
+ i__4 = jch + 1 + (jch - 1) * h_dim1;
+ h__[i__4].r = 0.f, h__[i__4].i = 0.f;
+ i__4 = jch + 1 - ifrstm;
+ crot_(&i__4, &h__[ifrstm + jch * h_dim1], &c__1, &h__[
+ ifrstm + (jch - 1) * h_dim1], &c__1, &c__, &s)
+ ;
+ i__4 = jch - ifrstm;
+ crot_(&i__4, &t[ifrstm + jch * t_dim1], &c__1, &t[
+ ifrstm + (jch - 1) * t_dim1], &c__1, &c__, &s)
+ ;
+ if (ilz) {
+ crot_(n, &z__[jch * z_dim1 + 1], &c__1, &z__[(jch
+ - 1) * z_dim1 + 1], &c__1, &c__, &s);
+ }
+/* L30: */
+ }
+ goto L50;
+ }
+ } else if (ilazro) {
+
+/* Only test 1 passed -- work on J:ILAST */
+
+ ifirst = j;
+ goto L70;
+ }
+
+/* Neither test passed -- try next J */
+
+/* L40: */
+ }
+
+/* (Drop-through is "impossible") */
+
+ *info = (*n << 1) + 1;
+ goto L210;
+
+/* T(ILAST,ILAST)=0 -- clear H(ILAST,ILAST-1) to split off a */
+/* 1x1 block. */
+
+L50:
+ i__2 = ilast + ilast * h_dim1;
+ ctemp.r = h__[i__2].r, ctemp.i = h__[i__2].i;
+ clartg_(&ctemp, &h__[ilast + (ilast - 1) * h_dim1], &c__, &s, &h__[
+ ilast + ilast * h_dim1]);
+ i__2 = ilast + (ilast - 1) * h_dim1;
+ h__[i__2].r = 0.f, h__[i__2].i = 0.f;
+ i__2 = ilast - ifrstm;
+ crot_(&i__2, &h__[ifrstm + ilast * h_dim1], &c__1, &h__[ifrstm + (
+ ilast - 1) * h_dim1], &c__1, &c__, &s);
+ i__2 = ilast - ifrstm;
+ crot_(&i__2, &t[ifrstm + ilast * t_dim1], &c__1, &t[ifrstm + (ilast -
+ 1) * t_dim1], &c__1, &c__, &s);
+ if (ilz) {
+ crot_(n, &z__[ilast * z_dim1 + 1], &c__1, &z__[(ilast - 1) *
+ z_dim1 + 1], &c__1, &c__, &s);
+ }
+
+/* H(ILAST,ILAST-1)=0 -- Standardize B, set ALPHA and BETA */
+
+L60:
+ absb = c_abs(&t[ilast + ilast * t_dim1]);
+ if (absb > safmin) {
+ i__2 = ilast + ilast * t_dim1;
+ q__2.r = t[i__2].r / absb, q__2.i = t[i__2].i / absb;
+ r_cnjg(&q__1, &q__2);
+ signbc.r = q__1.r, signbc.i = q__1.i;
+ i__2 = ilast + ilast * t_dim1;
+ t[i__2].r = absb, t[i__2].i = 0.f;
+ if (ilschr) {
+ i__2 = ilast - ifrstm;
+ cscal_(&i__2, &signbc, &t[ifrstm + ilast * t_dim1], &c__1);
+ i__2 = ilast + 1 - ifrstm;
+ cscal_(&i__2, &signbc, &h__[ifrstm + ilast * h_dim1], &c__1);
+ } else {
+ i__2 = ilast + ilast * h_dim1;
+ i__3 = ilast + ilast * h_dim1;
+ q__1.r = h__[i__3].r * signbc.r - h__[i__3].i * signbc.i,
+ q__1.i = h__[i__3].r * signbc.i + h__[i__3].i *
+ signbc.r;
+ h__[i__2].r = q__1.r, h__[i__2].i = q__1.i;
+ }
+ if (ilz) {
+ cscal_(n, &signbc, &z__[ilast * z_dim1 + 1], &c__1);
+ }
+ } else {
+ i__2 = ilast + ilast * t_dim1;
+ t[i__2].r = 0.f, t[i__2].i = 0.f;
+ }
+ i__2 = ilast;
+ i__3 = ilast + ilast * h_dim1;
+ alpha[i__2].r = h__[i__3].r, alpha[i__2].i = h__[i__3].i;
+ i__2 = ilast;
+ i__3 = ilast + ilast * t_dim1;
+ beta[i__2].r = t[i__3].r, beta[i__2].i = t[i__3].i;
+
+/* Go to next block -- exit if finished. */
+
+ --ilast;
+ if (ilast < *ilo) {
+ goto L190;
+ }
+
+/* Reset counters */
+
+ iiter = 0;
+ eshift.r = 0.f, eshift.i = 0.f;
+ if (! ilschr) {
+ ilastm = ilast;
+ if (ifrstm > ilast) {
+ ifrstm = *ilo;
+ }
+ }
+ goto L160;
+
+/* QZ step */
+
+/* This iteration only involves rows/columns IFIRST:ILAST. We */
+/* assume IFIRST < ILAST, and that the diagonal of B is non-zero. */
+
+L70:
+ ++iiter;
+ if (! ilschr) {
+ ifrstm = ifirst;
+ }
+
+/* Compute the Shift. */
+
+/* At this point, IFIRST < ILAST, and the diagonal elements of */
+/* T(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in */
+/* magnitude) */
+
+ if (iiter / 10 * 10 != iiter) {
+
+/* The Wilkinson shift (AEP p.512), i.e., the eigenvalue of */
+/* the bottom-right 2x2 block of A inv(B) which is nearest to */
+/* the bottom-right element. */
+
+/* We factor B as U*D, where U has unit diagonals, and */
+/* compute (A*inv(D))*inv(U). */
+
+ i__2 = ilast - 1 + ilast * t_dim1;
+ q__2.r = bscale * t[i__2].r, q__2.i = bscale * t[i__2].i;
+ i__3 = ilast + ilast * t_dim1;
+ q__3.r = bscale * t[i__3].r, q__3.i = bscale * t[i__3].i;
+ c_div(&q__1, &q__2, &q__3);
+ u12.r = q__1.r, u12.i = q__1.i;
+ i__2 = ilast - 1 + (ilast - 1) * h_dim1;
+ q__2.r = ascale * h__[i__2].r, q__2.i = ascale * h__[i__2].i;
+ i__3 = ilast - 1 + (ilast - 1) * t_dim1;
+ q__3.r = bscale * t[i__3].r, q__3.i = bscale * t[i__3].i;
+ c_div(&q__1, &q__2, &q__3);
+ ad11.r = q__1.r, ad11.i = q__1.i;
+ i__2 = ilast + (ilast - 1) * h_dim1;
+ q__2.r = ascale * h__[i__2].r, q__2.i = ascale * h__[i__2].i;
+ i__3 = ilast - 1 + (ilast - 1) * t_dim1;
+ q__3.r = bscale * t[i__3].r, q__3.i = bscale * t[i__3].i;
+ c_div(&q__1, &q__2, &q__3);
+ ad21.r = q__1.r, ad21.i = q__1.i;
+ i__2 = ilast - 1 + ilast * h_dim1;
+ q__2.r = ascale * h__[i__2].r, q__2.i = ascale * h__[i__2].i;
+ i__3 = ilast + ilast * t_dim1;
+ q__3.r = bscale * t[i__3].r, q__3.i = bscale * t[i__3].i;
+ c_div(&q__1, &q__2, &q__3);
+ ad12.r = q__1.r, ad12.i = q__1.i;
+ i__2 = ilast + ilast * h_dim1;
+ q__2.r = ascale * h__[i__2].r, q__2.i = ascale * h__[i__2].i;
+ i__3 = ilast + ilast * t_dim1;
+ q__3.r = bscale * t[i__3].r, q__3.i = bscale * t[i__3].i;
+ c_div(&q__1, &q__2, &q__3);
+ ad22.r = q__1.r, ad22.i = q__1.i;
+ q__2.r = u12.r * ad21.r - u12.i * ad21.i, q__2.i = u12.r * ad21.i
+ + u12.i * ad21.r;
+ q__1.r = ad22.r - q__2.r, q__1.i = ad22.i - q__2.i;
+ abi22.r = q__1.r, abi22.i = q__1.i;
+
+ q__2.r = ad11.r + abi22.r, q__2.i = ad11.i + abi22.i;
+ q__1.r = q__2.r * .5f, q__1.i = q__2.i * .5f;
+ t1.r = q__1.r, t1.i = q__1.i;
+ pow_ci(&q__4, &t1, &c__2);
+ q__5.r = ad12.r * ad21.r - ad12.i * ad21.i, q__5.i = ad12.r *
+ ad21.i + ad12.i * ad21.r;
+ q__3.r = q__4.r + q__5.r, q__3.i = q__4.i + q__5.i;
+ q__6.r = ad11.r * ad22.r - ad11.i * ad22.i, q__6.i = ad11.r *
+ ad22.i + ad11.i * ad22.r;
+ q__2.r = q__3.r - q__6.r, q__2.i = q__3.i - q__6.i;
+ c_sqrt(&q__1, &q__2);
+ rtdisc.r = q__1.r, rtdisc.i = q__1.i;
+ q__1.r = t1.r - abi22.r, q__1.i = t1.i - abi22.i;
+ q__2.r = t1.r - abi22.r, q__2.i = t1.i - abi22.i;
+ temp = q__1.r * rtdisc.r + r_imag(&q__2) * r_imag(&rtdisc);
+ if (temp <= 0.f) {
+ q__1.r = t1.r + rtdisc.r, q__1.i = t1.i + rtdisc.i;
+ shift.r = q__1.r, shift.i = q__1.i;
+ } else {
+ q__1.r = t1.r - rtdisc.r, q__1.i = t1.i - rtdisc.i;
+ shift.r = q__1.r, shift.i = q__1.i;
+ }
+ } else {
+
+/* Exceptional shift. Chosen for no particularly good reason. */
+
+ i__2 = ilast - 1 + ilast * h_dim1;
+ q__4.r = ascale * h__[i__2].r, q__4.i = ascale * h__[i__2].i;
+ i__3 = ilast - 1 + (ilast - 1) * t_dim1;
+ q__5.r = bscale * t[i__3].r, q__5.i = bscale * t[i__3].i;
+ c_div(&q__3, &q__4, &q__5);
+ r_cnjg(&q__2, &q__3);
+ q__1.r = eshift.r + q__2.r, q__1.i = eshift.i + q__2.i;
+ eshift.r = q__1.r, eshift.i = q__1.i;
+ shift.r = eshift.r, shift.i = eshift.i;
+ }
+
+/* Now check for two consecutive small subdiagonals. */
+
+ i__2 = ifirst + 1;
+ for (j = ilast - 1; j >= i__2; --j) {
+ istart = j;
+ i__3 = j + j * h_dim1;
+ q__2.r = ascale * h__[i__3].r, q__2.i = ascale * h__[i__3].i;
+ i__4 = j + j * t_dim1;
+ q__4.r = bscale * t[i__4].r, q__4.i = bscale * t[i__4].i;
+ q__3.r = shift.r * q__4.r - shift.i * q__4.i, q__3.i = shift.r *
+ q__4.i + shift.i * q__4.r;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
+ ctemp.r = q__1.r, ctemp.i = q__1.i;
+ temp = (r__1 = ctemp.r, dabs(r__1)) + (r__2 = r_imag(&ctemp),
+ dabs(r__2));
+ i__3 = j + 1 + j * h_dim1;
+ temp2 = ascale * ((r__1 = h__[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&h__[j + 1 + j * h_dim1]), dabs(r__2)));
+ tempr = dmax(temp,temp2);
+ if (tempr < 1.f && tempr != 0.f) {
+ temp /= tempr;
+ temp2 /= tempr;
+ }
+ i__3 = j + (j - 1) * h_dim1;
+ if (((r__1 = h__[i__3].r, dabs(r__1)) + (r__2 = r_imag(&h__[j + (
+ j - 1) * h_dim1]), dabs(r__2))) * temp2 <= temp * atol) {
+ goto L90;
+ }
+/* L80: */
+ }
+
+ istart = ifirst;
+ i__2 = ifirst + ifirst * h_dim1;
+ q__2.r = ascale * h__[i__2].r, q__2.i = ascale * h__[i__2].i;
+ i__3 = ifirst + ifirst * t_dim1;
+ q__4.r = bscale * t[i__3].r, q__4.i = bscale * t[i__3].i;
+ q__3.r = shift.r * q__4.r - shift.i * q__4.i, q__3.i = shift.r *
+ q__4.i + shift.i * q__4.r;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
+ ctemp.r = q__1.r, ctemp.i = q__1.i;
+L90:
+
+/* Do an implicit-shift QZ sweep. */
+
+/* Initial Q */
+
+ i__2 = istart + 1 + istart * h_dim1;
+ q__1.r = ascale * h__[i__2].r, q__1.i = ascale * h__[i__2].i;
+ ctemp2.r = q__1.r, ctemp2.i = q__1.i;
+ clartg_(&ctemp, &ctemp2, &c__, &s, &ctemp3);
+
+/* Sweep */
+
+ i__2 = ilast - 1;
+ for (j = istart; j <= i__2; ++j) {
+ if (j > istart) {
+ i__3 = j + (j - 1) * h_dim1;
+ ctemp.r = h__[i__3].r, ctemp.i = h__[i__3].i;
+ clartg_(&ctemp, &h__[j + 1 + (j - 1) * h_dim1], &c__, &s, &
+ h__[j + (j - 1) * h_dim1]);
+ i__3 = j + 1 + (j - 1) * h_dim1;
+ h__[i__3].r = 0.f, h__[i__3].i = 0.f;
+ }
+
+ i__3 = ilastm;
+ for (jc = j; jc <= i__3; ++jc) {
+ i__4 = j + jc * h_dim1;
+ q__2.r = c__ * h__[i__4].r, q__2.i = c__ * h__[i__4].i;
+ i__5 = j + 1 + jc * h_dim1;
+ q__3.r = s.r * h__[i__5].r - s.i * h__[i__5].i, q__3.i = s.r *
+ h__[i__5].i + s.i * h__[i__5].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ ctemp.r = q__1.r, ctemp.i = q__1.i;
+ i__4 = j + 1 + jc * h_dim1;
+ r_cnjg(&q__4, &s);
+ q__3.r = -q__4.r, q__3.i = -q__4.i;
+ i__5 = j + jc * h_dim1;
+ q__2.r = q__3.r * h__[i__5].r - q__3.i * h__[i__5].i, q__2.i =
+ q__3.r * h__[i__5].i + q__3.i * h__[i__5].r;
+ i__6 = j + 1 + jc * h_dim1;
+ q__5.r = c__ * h__[i__6].r, q__5.i = c__ * h__[i__6].i;
+ q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
+ h__[i__4].r = q__1.r, h__[i__4].i = q__1.i;
+ i__4 = j + jc * h_dim1;
+ h__[i__4].r = ctemp.r, h__[i__4].i = ctemp.i;
+ i__4 = j + jc * t_dim1;
+ q__2.r = c__ * t[i__4].r, q__2.i = c__ * t[i__4].i;
+ i__5 = j + 1 + jc * t_dim1;
+ q__3.r = s.r * t[i__5].r - s.i * t[i__5].i, q__3.i = s.r * t[
+ i__5].i + s.i * t[i__5].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ ctemp2.r = q__1.r, ctemp2.i = q__1.i;
+ i__4 = j + 1 + jc * t_dim1;
+ r_cnjg(&q__4, &s);
+ q__3.r = -q__4.r, q__3.i = -q__4.i;
+ i__5 = j + jc * t_dim1;
+ q__2.r = q__3.r * t[i__5].r - q__3.i * t[i__5].i, q__2.i =
+ q__3.r * t[i__5].i + q__3.i * t[i__5].r;
+ i__6 = j + 1 + jc * t_dim1;
+ q__5.r = c__ * t[i__6].r, q__5.i = c__ * t[i__6].i;
+ q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
+ t[i__4].r = q__1.r, t[i__4].i = q__1.i;
+ i__4 = j + jc * t_dim1;
+ t[i__4].r = ctemp2.r, t[i__4].i = ctemp2.i;
+/* L100: */
+ }
+ if (ilq) {
+ i__3 = *n;
+ for (jr = 1; jr <= i__3; ++jr) {
+ i__4 = jr + j * q_dim1;
+ q__2.r = c__ * q[i__4].r, q__2.i = c__ * q[i__4].i;
+ r_cnjg(&q__4, &s);
+ i__5 = jr + (j + 1) * q_dim1;
+ q__3.r = q__4.r * q[i__5].r - q__4.i * q[i__5].i, q__3.i =
+ q__4.r * q[i__5].i + q__4.i * q[i__5].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ ctemp.r = q__1.r, ctemp.i = q__1.i;
+ i__4 = jr + (j + 1) * q_dim1;
+ q__3.r = -s.r, q__3.i = -s.i;
+ i__5 = jr + j * q_dim1;
+ q__2.r = q__3.r * q[i__5].r - q__3.i * q[i__5].i, q__2.i =
+ q__3.r * q[i__5].i + q__3.i * q[i__5].r;
+ i__6 = jr + (j + 1) * q_dim1;
+ q__4.r = c__ * q[i__6].r, q__4.i = c__ * q[i__6].i;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ q[i__4].r = q__1.r, q[i__4].i = q__1.i;
+ i__4 = jr + j * q_dim1;
+ q[i__4].r = ctemp.r, q[i__4].i = ctemp.i;
+/* L110: */
+ }
+ }
+
+ i__3 = j + 1 + (j + 1) * t_dim1;
+ ctemp.r = t[i__3].r, ctemp.i = t[i__3].i;
+ clartg_(&ctemp, &t[j + 1 + j * t_dim1], &c__, &s, &t[j + 1 + (j +
+ 1) * t_dim1]);
+ i__3 = j + 1 + j * t_dim1;
+ t[i__3].r = 0.f, t[i__3].i = 0.f;
+
+/* Computing MIN */
+ i__4 = j + 2;
+ i__3 = min(i__4,ilast);
+ for (jr = ifrstm; jr <= i__3; ++jr) {
+ i__4 = jr + (j + 1) * h_dim1;
+ q__2.r = c__ * h__[i__4].r, q__2.i = c__ * h__[i__4].i;
+ i__5 = jr + j * h_dim1;
+ q__3.r = s.r * h__[i__5].r - s.i * h__[i__5].i, q__3.i = s.r *
+ h__[i__5].i + s.i * h__[i__5].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ ctemp.r = q__1.r, ctemp.i = q__1.i;
+ i__4 = jr + j * h_dim1;
+ r_cnjg(&q__4, &s);
+ q__3.r = -q__4.r, q__3.i = -q__4.i;
+ i__5 = jr + (j + 1) * h_dim1;
+ q__2.r = q__3.r * h__[i__5].r - q__3.i * h__[i__5].i, q__2.i =
+ q__3.r * h__[i__5].i + q__3.i * h__[i__5].r;
+ i__6 = jr + j * h_dim1;
+ q__5.r = c__ * h__[i__6].r, q__5.i = c__ * h__[i__6].i;
+ q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
+ h__[i__4].r = q__1.r, h__[i__4].i = q__1.i;
+ i__4 = jr + (j + 1) * h_dim1;
+ h__[i__4].r = ctemp.r, h__[i__4].i = ctemp.i;
+/* L120: */
+ }
+ i__3 = j;
+ for (jr = ifrstm; jr <= i__3; ++jr) {
+ i__4 = jr + (j + 1) * t_dim1;
+ q__2.r = c__ * t[i__4].r, q__2.i = c__ * t[i__4].i;
+ i__5 = jr + j * t_dim1;
+ q__3.r = s.r * t[i__5].r - s.i * t[i__5].i, q__3.i = s.r * t[
+ i__5].i + s.i * t[i__5].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ ctemp.r = q__1.r, ctemp.i = q__1.i;
+ i__4 = jr + j * t_dim1;
+ r_cnjg(&q__4, &s);
+ q__3.r = -q__4.r, q__3.i = -q__4.i;
+ i__5 = jr + (j + 1) * t_dim1;
+ q__2.r = q__3.r * t[i__5].r - q__3.i * t[i__5].i, q__2.i =
+ q__3.r * t[i__5].i + q__3.i * t[i__5].r;
+ i__6 = jr + j * t_dim1;
+ q__5.r = c__ * t[i__6].r, q__5.i = c__ * t[i__6].i;
+ q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
+ t[i__4].r = q__1.r, t[i__4].i = q__1.i;
+ i__4 = jr + (j + 1) * t_dim1;
+ t[i__4].r = ctemp.r, t[i__4].i = ctemp.i;
+/* L130: */
+ }
+ if (ilz) {
+ i__3 = *n;
+ for (jr = 1; jr <= i__3; ++jr) {
+ i__4 = jr + (j + 1) * z_dim1;
+ q__2.r = c__ * z__[i__4].r, q__2.i = c__ * z__[i__4].i;
+ i__5 = jr + j * z_dim1;
+ q__3.r = s.r * z__[i__5].r - s.i * z__[i__5].i, q__3.i =
+ s.r * z__[i__5].i + s.i * z__[i__5].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ ctemp.r = q__1.r, ctemp.i = q__1.i;
+ i__4 = jr + j * z_dim1;
+ r_cnjg(&q__4, &s);
+ q__3.r = -q__4.r, q__3.i = -q__4.i;
+ i__5 = jr + (j + 1) * z_dim1;
+ q__2.r = q__3.r * z__[i__5].r - q__3.i * z__[i__5].i,
+ q__2.i = q__3.r * z__[i__5].i + q__3.i * z__[i__5]
+ .r;
+ i__6 = jr + j * z_dim1;
+ q__5.r = c__ * z__[i__6].r, q__5.i = c__ * z__[i__6].i;
+ q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
+ z__[i__4].r = q__1.r, z__[i__4].i = q__1.i;
+ i__4 = jr + (j + 1) * z_dim1;
+ z__[i__4].r = ctemp.r, z__[i__4].i = ctemp.i;
+/* L140: */
+ }
+ }
+/* L150: */
+ }
+
+L160:
+
+/* L170: */
+ ;
+ }
+
+/* Drop-through = non-convergence */
+
+L180:
+ *info = ilast;
+ goto L210;
+
+/* Successful completion of all QZ steps */
+
+L190:
+
+/* Set Eigenvalues 1:ILO-1 */
+
+ i__1 = *ilo - 1;
+ for (j = 1; j <= i__1; ++j) {
+ absb = c_abs(&t[j + j * t_dim1]);
+ if (absb > safmin) {
+ i__2 = j + j * t_dim1;
+ q__2.r = t[i__2].r / absb, q__2.i = t[i__2].i / absb;
+ r_cnjg(&q__1, &q__2);
+ signbc.r = q__1.r, signbc.i = q__1.i;
+ i__2 = j + j * t_dim1;
+ t[i__2].r = absb, t[i__2].i = 0.f;
+ if (ilschr) {
+ i__2 = j - 1;
+ cscal_(&i__2, &signbc, &t[j * t_dim1 + 1], &c__1);
+ cscal_(&j, &signbc, &h__[j * h_dim1 + 1], &c__1);
+ } else {
+ i__2 = j + j * h_dim1;
+ i__3 = j + j * h_dim1;
+ q__1.r = h__[i__3].r * signbc.r - h__[i__3].i * signbc.i,
+ q__1.i = h__[i__3].r * signbc.i + h__[i__3].i *
+ signbc.r;
+ h__[i__2].r = q__1.r, h__[i__2].i = q__1.i;
+ }
+ if (ilz) {
+ cscal_(n, &signbc, &z__[j * z_dim1 + 1], &c__1);
+ }
+ } else {
+ i__2 = j + j * t_dim1;
+ t[i__2].r = 0.f, t[i__2].i = 0.f;
+ }
+ i__2 = j;
+ i__3 = j + j * h_dim1;
+ alpha[i__2].r = h__[i__3].r, alpha[i__2].i = h__[i__3].i;
+ i__2 = j;
+ i__3 = j + j * t_dim1;
+ beta[i__2].r = t[i__3].r, beta[i__2].i = t[i__3].i;
+/* L200: */
+ }
+
+/* Normal Termination */
+
+ *info = 0;
+
+/* Exit (other than argument error) -- return optimal workspace size */
+
+L210:
+ q__1.r = (real) (*n), q__1.i = 0.f;
+ work[1].r = q__1.r, work[1].i = q__1.i;
+ return 0;
+
+/* End of CHGEQZ */
+
+} /* chgeqz_ */
diff --git a/contrib/libs/clapack/chla_transtype.c b/contrib/libs/clapack/chla_transtype.c
new file mode 100644
index 0000000000..f616fbf174
--- /dev/null
+++ b/contrib/libs/clapack/chla_transtype.c
@@ -0,0 +1,62 @@
+/* chla_transtype.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Character */ VOID chla_transtype__(char *ret_val, ftnlen ret_val_len,
+ integer *trans)
+{
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* October 2008 */
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This subroutine translates from a BLAST-specified integer constant to */
+/* the character string specifying a transposition operation. */
+
+/* CHLA_TRANSTYPE returns an CHARACTER*1. If CHLA_TRANSTYPE is 'X', */
+/* then input is not an integer indicating a transposition operator. */
+/* Otherwise CHLA_TRANSTYPE returns the constant value corresponding to */
+/* TRANS. */
+
+/* Arguments */
+/* ========= */
+/* TRANS (input) INTEGER */
+/* Specifies the form of the system of equations: */
+/* = BLAS_NO_TRANS = 111 : No Transpose */
+/* = BLAS_TRANS = 112 : Transpose */
+/* = BLAS_CONJ_TRANS = 113 : Conjugate Transpose */
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Executable Statements .. */
+ if (*trans == 111) {
+ *(unsigned char *)ret_val = 'N';
+ } else if (*trans == 112) {
+ *(unsigned char *)ret_val = 'T';
+ } else if (*trans == 113) {
+ *(unsigned char *)ret_val = 'C';
+ } else {
+ *(unsigned char *)ret_val = 'X';
+ }
+ return ;
+
+/* End of CHLA_TRANSTYPE */
+
+} /* chla_transtype__ */
diff --git a/contrib/libs/clapack/chpcon.c b/contrib/libs/clapack/chpcon.c
new file mode 100644
index 0000000000..8fdf9f3500
--- /dev/null
+++ b/contrib/libs/clapack/chpcon.c
@@ -0,0 +1,195 @@
+/* chpcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int chpcon_(char *uplo, integer *n, complex *ap, integer *
+ ipiv, real *anorm, real *rcond, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__, ip, kase;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical upper;
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *), xerbla_(char *, integer *);
+ real ainvnm;
+ extern /* Subroutine */ int chptrs_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHPCON estimates the reciprocal of the condition number of a complex */
+/* Hermitian packed matrix A using the factorization A = U*D*U**H or */
+/* A = L*D*L**H computed by CHPTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**H; */
+/* = 'L': Lower triangular, form is A = L*D*L**H. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) COMPLEX array, dimension (N*(N+1)/2) */
+/* The block diagonal matrix D and the multipliers used to */
+/* obtain the factor U or L as computed by CHPTRF, stored as a */
+/* packed triangular matrix. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by CHPTRF. */
+
+/* ANORM (input) REAL */
+/* The 1-norm of the original matrix A. */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --work;
+ --ipiv;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*anorm < 0.f) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHPCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.f;
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ } else if (*anorm <= 0.f) {
+ return 0;
+ }
+
+/* Check that the diagonal matrix D is nonsingular. */
+
+ if (upper) {
+
+/* Upper triangular storage: examine D from bottom to top */
+
+ ip = *n * (*n + 1) / 2;
+ for (i__ = *n; i__ >= 1; --i__) {
+ i__1 = ip;
+ if (ipiv[i__] > 0 && (ap[i__1].r == 0.f && ap[i__1].i == 0.f)) {
+ return 0;
+ }
+ ip -= i__;
+/* L10: */
+ }
+ } else {
+
+/* Lower triangular storage: examine D from top to bottom. */
+
+ ip = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ip;
+ if (ipiv[i__] > 0 && (ap[i__2].r == 0.f && ap[i__2].i == 0.f)) {
+ return 0;
+ }
+ ip = ip + *n - i__ + 1;
+/* L20: */
+ }
+ }
+
+/* Estimate the 1-norm of the inverse. */
+
+ kase = 0;
+L30:
+ clacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+
+/* Multiply by inv(L*D*L') or inv(U*D*U'). */
+
+ chptrs_(uplo, n, &c__1, &ap[1], &ipiv[1], &work[1], n, info);
+ goto L30;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / ainvnm / *anorm;
+ }
+
+ return 0;
+
+/* End of CHPCON */
+
+} /* chpcon_ */
diff --git a/contrib/libs/clapack/chpev.c b/contrib/libs/clapack/chpev.c
new file mode 100644
index 0000000000..12442c0706
--- /dev/null
+++ b/contrib/libs/clapack/chpev.c
@@ -0,0 +1,249 @@
+/* chpev.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int chpev_(char *jobz, char *uplo, integer *n, complex *ap,
+ real *w, complex *z__, integer *ldz, complex *work, real *rwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real eps;
+ integer inde;
+ real anrm;
+ integer imax;
+ real rmin, rmax, sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ logical wantz;
+ integer iscale;
+ extern doublereal clanhp_(char *, char *, integer *, complex *, real *), slamch_(char *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ integer indtau;
+ extern /* Subroutine */ int chptrd_(char *, integer *, complex *, real *,
+ real *, complex *, integer *);
+ integer indrwk, indwrk;
+ extern /* Subroutine */ int csteqr_(char *, integer *, real *, real *,
+ complex *, integer *, real *, integer *), cupgtr_(char *,
+ integer *, complex *, complex *, complex *, integer *, complex *,
+ integer *), ssterf_(integer *, real *, real *, integer *);
+ real smlnum;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHPEV computes all the eigenvalues and, optionally, eigenvectors of a */
+/* complex Hermitian matrix in packed storage. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, AP is overwritten by values generated during the */
+/* reduction to tridiagonal form. If UPLO = 'U', the diagonal */
+/* and first superdiagonal of the tridiagonal matrix T overwrite */
+/* the corresponding elements of A, and if UPLO = 'L', the */
+/* diagonal and first subdiagonal of T overwrite the */
+/* corresponding elements of A. */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the orthonormal */
+/* eigenvectors of the matrix A, with the i-th column of Z */
+/* holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) COMPLEX array, dimension (max(1, 2*N-1)) */
+
+/* RWORK (workspace) REAL array, dimension (max(1, 3*N-2)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, the algorithm failed to converge; i */
+/* off-diagonal elements of an intermediate tridiagonal */
+/* form did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lsame_(uplo, "L") || lsame_(uplo,
+ "U"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -7;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHPEV ", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ w[1] = ap[1].r;
+ rwork[1] = 1.f;
+ if (wantz) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1.f, z__[i__1].i = 0.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = clanhp_("M", uplo, n, &ap[1], &rwork[1]);
+ iscale = 0;
+ if (anrm > 0.f && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ i__1 = *n * (*n + 1) / 2;
+ csscal_(&i__1, &sigma, &ap[1], &c__1);
+ }
+
+/* Call CHPTRD to reduce Hermitian packed matrix to tridiagonal form. */
+
+ inde = 1;
+ indtau = 1;
+ chptrd_(uplo, n, &ap[1], &w[1], &rwork[inde], &work[indtau], &iinfo);
+
+/* For eigenvalues only, call SSTERF. For eigenvectors, first call */
+/* CUPGTR to generate the orthogonal matrix, then call CSTEQR. */
+
+ if (! wantz) {
+ ssterf_(n, &w[1], &rwork[inde], info);
+ } else {
+ indwrk = indtau + *n;
+ cupgtr_(uplo, n, &ap[1], &work[indtau], &z__[z_offset], ldz, &work[
+ indwrk], &iinfo);
+ indrwk = inde + *n;
+ csteqr_(jobz, n, &w[1], &rwork[inde], &z__[z_offset], ldz, &rwork[
+ indrwk], info);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *n;
+ } else {
+ imax = *info - 1;
+ }
+ r__1 = 1.f / sigma;
+ sscal_(&imax, &r__1, &w[1], &c__1);
+ }
+
+ return 0;
+
+/* End of CHPEV */
+
+} /* chpev_ */
diff --git a/contrib/libs/clapack/chpevd.c b/contrib/libs/clapack/chpevd.c
new file mode 100644
index 0000000000..e69b3f080c
--- /dev/null
+++ b/contrib/libs/clapack/chpevd.c
@@ -0,0 +1,346 @@
+/* chpevd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int chpevd_(char *jobz, char *uplo, integer *n, complex *ap,
+ real *w, complex *z__, integer *ldz, complex *work, integer *lwork,
+ real *rwork, integer *lrwork, integer *iwork, integer *liwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real eps;
+ integer inde;
+ real anrm;
+ integer imax;
+ real rmin, rmax, sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ integer lwmin, llrwk, llwrk;
+ logical wantz;
+ integer iscale;
+ extern doublereal clanhp_(char *, char *, integer *, complex *, real *);
+ extern /* Subroutine */ int cstedc_(char *, integer *, real *, real *,
+ complex *, integer *, complex *, integer *, real *, integer *,
+ integer *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ integer indtau;
+ extern /* Subroutine */ int chptrd_(char *, integer *, complex *, real *,
+ real *, complex *, integer *);
+ integer indrwk, indwrk, liwmin;
+ extern /* Subroutine */ int ssterf_(integer *, real *, real *, integer *);
+ integer lrwmin;
+ extern /* Subroutine */ int cupmtr_(char *, char *, char *, integer *,
+ integer *, complex *, complex *, complex *, integer *, complex *,
+ integer *);
+ real smlnum;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHPEVD computes all the eigenvalues and, optionally, eigenvectors of */
+/* a complex Hermitian matrix A in packed storage. If eigenvectors are */
+/* desired, it uses a divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, AP is overwritten by values generated during the */
+/* reduction to tridiagonal form. If UPLO = 'U', the diagonal */
+/* and first superdiagonal of the tridiagonal matrix T overwrite */
+/* the corresponding elements of A, and if UPLO = 'L', the */
+/* diagonal and first subdiagonal of T overwrite the */
+/* corresponding elements of A. */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the orthonormal */
+/* eigenvectors of the matrix A, with the i-th column of Z */
+/* holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the required LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of array WORK. */
+/* If N <= 1, LWORK must be at least 1. */
+/* If JOBZ = 'N' and N > 1, LWORK must be at least N. */
+/* If JOBZ = 'V' and N > 1, LWORK must be at least 2*N. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the required sizes of the WORK, RWORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* RWORK (workspace/output) REAL array, dimension (MAX(1,LRWORK)) */
+/* On exit, if INFO = 0, RWORK(1) returns the required LRWORK. */
+
+/* LRWORK (input) INTEGER */
+/* The dimension of array RWORK. */
+/* If N <= 1, LRWORK must be at least 1. */
+/* If JOBZ = 'N' and N > 1, LRWORK must be at least N. */
+/* If JOBZ = 'V' and N > 1, LRWORK must be at least */
+/* 1 + 5*N + 2*N**2. */
+
+/* If LRWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the required sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the required LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of array IWORK. */
+/* If JOBZ = 'N' or N <= 1, LIWORK must be at least 1. */
+/* If JOBZ = 'V' and N > 1, LIWORK must be at least 3 + 5*N. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the required sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, the algorithm failed to converge; i */
+/* off-diagonal elements of an intermediate tridiagonal */
+/* form did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lquery = *lwork == -1 || *lrwork == -1 || *liwork == -1;
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lsame_(uplo, "L") || lsame_(uplo,
+ "U"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -7;
+ }
+
+ if (*info == 0) {
+ if (*n <= 1) {
+ lwmin = 1;
+ liwmin = 1;
+ lrwmin = 1;
+ } else {
+ if (wantz) {
+ lwmin = *n << 1;
+/* Computing 2nd power */
+ i__1 = *n;
+ lrwmin = *n * 5 + 1 + (i__1 * i__1 << 1);
+ liwmin = *n * 5 + 3;
+ } else {
+ lwmin = *n;
+ lrwmin = *n;
+ liwmin = 1;
+ }
+ }
+ work[1].r = (real) lwmin, work[1].i = 0.f;
+ rwork[1] = (real) lrwmin;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -9;
+ } else if (*lrwork < lrwmin && ! lquery) {
+ *info = -11;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHPEVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ w[1] = ap[1].r;
+ if (wantz) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1.f, z__[i__1].i = 0.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = clanhp_("M", uplo, n, &ap[1], &rwork[1]);
+ iscale = 0;
+ if (anrm > 0.f && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ i__1 = *n * (*n + 1) / 2;
+ csscal_(&i__1, &sigma, &ap[1], &c__1);
+ }
+
+/* Call CHPTRD to reduce Hermitian packed matrix to tridiagonal form. */
+
+ inde = 1;
+ indtau = 1;
+ indrwk = inde + *n;
+ indwrk = indtau + *n;
+ llwrk = *lwork - indwrk + 1;
+ llrwk = *lrwork - indrwk + 1;
+ chptrd_(uplo, n, &ap[1], &w[1], &rwork[inde], &work[indtau], &iinfo);
+
+/* For eigenvalues only, call SSTERF. For eigenvectors, first call */
+/* CUPGTR to generate the orthogonal matrix, then call CSTEDC. */
+
+ if (! wantz) {
+ ssterf_(n, &w[1], &rwork[inde], info);
+ } else {
+ cstedc_("I", n, &w[1], &rwork[inde], &z__[z_offset], ldz, &work[
+ indwrk], &llwrk, &rwork[indrwk], &llrwk, &iwork[1], liwork,
+ info);
+ cupmtr_("L", uplo, "N", n, n, &ap[1], &work[indtau], &z__[z_offset],
+ ldz, &work[indwrk], &iinfo);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *n;
+ } else {
+ imax = *info - 1;
+ }
+ r__1 = 1.f / sigma;
+ sscal_(&imax, &r__1, &w[1], &c__1);
+ }
+
+ work[1].r = (real) lwmin, work[1].i = 0.f;
+ rwork[1] = (real) lrwmin;
+ iwork[1] = liwmin;
+ return 0;
+
+/* End of CHPEVD */
+
+} /* chpevd_ */
diff --git a/contrib/libs/clapack/chpevx.c b/contrib/libs/clapack/chpevx.c
new file mode 100644
index 0000000000..77334b2039
--- /dev/null
+++ b/contrib/libs/clapack/chpevx.c
@@ -0,0 +1,471 @@
+/* chpevx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int chpevx_(char *jobz, char *range, char *uplo, integer *n,
+ complex *ap, real *vl, real *vu, integer *il, integer *iu, real *
+ abstol, integer *m, real *w, complex *z__, integer *ldz, complex *
+ work, real *rwork, integer *iwork, integer *ifail, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, jj;
+ real eps, vll, vuu, tmp1;
+ integer indd, inde;
+ real anrm;
+ integer imax;
+ real rmin, rmax;
+ logical test;
+ integer itmp1, indee;
+ real sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ char order[1];
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *), scopy_(integer *, real *, integer *, real *
+, integer *);
+ logical wantz, alleig, indeig;
+ integer iscale, indibl;
+ extern doublereal clanhp_(char *, char *, integer *, complex *, real *);
+ logical valeig;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real abstll, bignum;
+ integer indiwk, indisp, indtau;
+ extern /* Subroutine */ int chptrd_(char *, integer *, complex *, real *,
+ real *, complex *, integer *), cstein_(integer *, real *,
+ real *, integer *, real *, integer *, integer *, complex *,
+ integer *, real *, integer *, integer *, integer *);
+ integer indrwk, indwrk;
+ extern /* Subroutine */ int csteqr_(char *, integer *, real *, real *,
+ complex *, integer *, real *, integer *), cupgtr_(char *,
+ integer *, complex *, complex *, complex *, integer *, complex *,
+ integer *), ssterf_(integer *, real *, real *, integer *);
+ integer nsplit;
+ extern /* Subroutine */ int cupmtr_(char *, char *, char *, integer *,
+ integer *, complex *, complex *, complex *, integer *, complex *,
+ integer *);
+ real smlnum;
+ extern /* Subroutine */ int sstebz_(char *, char *, integer *, real *,
+ real *, integer *, integer *, real *, real *, real *, integer *,
+ integer *, real *, integer *, integer *, real *, integer *,
+ integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHPEVX computes selected eigenvalues and, optionally, eigenvectors */
+/* of a complex Hermitian matrix A in packed storage. */
+/* Eigenvalues/vectors can be selected by specifying either a range of */
+/* values or a range of indices for the desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found; */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found; */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, AP is overwritten by values generated during the */
+/* reduction to tridiagonal form. If UPLO = 'U', the diagonal */
+/* and first superdiagonal of the tridiagonal matrix T overwrite */
+/* the corresponding elements of A, and if UPLO = 'L', the */
+/* diagonal and first subdiagonal of T overwrite the */
+/* corresponding elements of A. */
+
+/* VL (input) REAL */
+/* VU (input) REAL */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) REAL */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing AP to tridiagonal form. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*SLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*SLAMCH('S'). */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices */
+/* with Guaranteed High Relative Accuracy," by Demmel and */
+/* Kahan, LAPACK Working Note #3. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the selected eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX array, dimension (LDZ, max(1,M)) */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If an eigenvector fails to converge, then that column of Z */
+/* contains the latest approximation to the eigenvector, and */
+/* the index of the eigenvector is returned in IFAIL. */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (7*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+
+/* IFAIL (output) INTEGER array, dimension (N) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvectors that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, then i eigenvectors failed to converge. */
+/* Their indices are stored in array IFAIL. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (! (lsame_(uplo, "L") || lsame_(uplo,
+ "U"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -7;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -8;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -9;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -14;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHPEVX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (alleig || indeig) {
+ *m = 1;
+ w[1] = ap[1].r;
+ } else {
+ if (*vl < ap[1].r && *vu >= ap[1].r) {
+ *m = 1;
+ w[1] = ap[1].r;
+ }
+ }
+ if (wantz) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1.f, z__[i__1].i = 0.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+/* Computing MIN */
+ r__1 = sqrt(bignum), r__2 = 1.f / sqrt(sqrt(safmin));
+ rmax = dmin(r__1,r__2);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ iscale = 0;
+ abstll = *abstol;
+ if (valeig) {
+ vll = *vl;
+ vuu = *vu;
+ } else {
+ vll = 0.f;
+ vuu = 0.f;
+ }
+ anrm = clanhp_("M", uplo, n, &ap[1], &rwork[1]);
+ if (anrm > 0.f && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ i__1 = *n * (*n + 1) / 2;
+ csscal_(&i__1, &sigma, &ap[1], &c__1);
+ if (*abstol > 0.f) {
+ abstll = *abstol * sigma;
+ }
+ if (valeig) {
+ vll = *vl * sigma;
+ vuu = *vu * sigma;
+ }
+ }
+
+/* Call CHPTRD to reduce Hermitian packed matrix to tridiagonal form. */
+
+ indd = 1;
+ inde = indd + *n;
+ indrwk = inde + *n;
+ indtau = 1;
+ indwrk = indtau + *n;
+ chptrd_(uplo, n, &ap[1], &rwork[indd], &rwork[inde], &work[indtau], &
+ iinfo);
+
+/* If all eigenvalues are desired and ABSTOL is less than or equal */
+/* to zero, then call SSTERF or CUPGTR and CSTEQR. If this fails */
+/* for some eigenvalue, then try SSTEBZ. */
+
+ test = FALSE_;
+ if (indeig) {
+ if (*il == 1 && *iu == *n) {
+ test = TRUE_;
+ }
+ }
+ if ((alleig || test) && *abstol <= 0.f) {
+ scopy_(n, &rwork[indd], &c__1, &w[1], &c__1);
+ indee = indrwk + (*n << 1);
+ if (! wantz) {
+ i__1 = *n - 1;
+ scopy_(&i__1, &rwork[inde], &c__1, &rwork[indee], &c__1);
+ ssterf_(n, &w[1], &rwork[indee], info);
+ } else {
+ cupgtr_(uplo, n, &ap[1], &work[indtau], &z__[z_offset], ldz, &
+ work[indwrk], &iinfo);
+ i__1 = *n - 1;
+ scopy_(&i__1, &rwork[inde], &c__1, &rwork[indee], &c__1);
+ csteqr_(jobz, n, &w[1], &rwork[indee], &z__[z_offset], ldz, &
+ rwork[indrwk], info);
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ifail[i__] = 0;
+/* L10: */
+ }
+ }
+ }
+ if (*info == 0) {
+ *m = *n;
+ goto L20;
+ }
+ *info = 0;
+ }
+
+/* Otherwise, call SSTEBZ and, if eigenvectors are desired, CSTEIN. */
+
+ if (wantz) {
+ *(unsigned char *)order = 'B';
+ } else {
+ *(unsigned char *)order = 'E';
+ }
+ indibl = 1;
+ indisp = indibl + *n;
+ indiwk = indisp + *n;
+ sstebz_(range, order, n, &vll, &vuu, il, iu, &abstll, &rwork[indd], &
+ rwork[inde], m, &nsplit, &w[1], &iwork[indibl], &iwork[indisp], &
+ rwork[indrwk], &iwork[indiwk], info);
+
+ if (wantz) {
+ cstein_(n, &rwork[indd], &rwork[inde], m, &w[1], &iwork[indibl], &
+ iwork[indisp], &z__[z_offset], ldz, &rwork[indrwk], &iwork[
+ indiwk], &ifail[1], info);
+
+/* Apply unitary matrix used in reduction to tridiagonal */
+/* form to eigenvectors returned by CSTEIN. */
+
+ indwrk = indtau + *n;
+ cupmtr_("L", uplo, "N", n, m, &ap[1], &work[indtau], &z__[z_offset],
+ ldz, &work[indwrk], &iinfo);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+L20:
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *m;
+ } else {
+ imax = *info - 1;
+ }
+ r__1 = 1.f / sigma;
+ sscal_(&imax, &r__1, &w[1], &c__1);
+ }
+
+/* If eigenvalues are not in order, then sort them, along with */
+/* eigenvectors. */
+
+ if (wantz) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp1 = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp1) {
+ i__ = jj;
+ tmp1 = w[jj];
+ }
+/* L30: */
+ }
+
+ if (i__ != 0) {
+ itmp1 = iwork[indibl + i__ - 1];
+ w[i__] = w[j];
+ iwork[indibl + i__ - 1] = iwork[indibl + j - 1];
+ w[j] = tmp1;
+ iwork[indibl + j - 1] = itmp1;
+ cswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1],
+ &c__1);
+ if (*info != 0) {
+ itmp1 = ifail[i__];
+ ifail[i__] = ifail[j];
+ ifail[j] = itmp1;
+ }
+ }
+/* L40: */
+ }
+ }
+
+ return 0;
+
+/* End of CHPEVX */
+
+} /* chpevx_ */
diff --git a/contrib/libs/clapack/chpgst.c b/contrib/libs/clapack/chpgst.c
new file mode 100644
index 0000000000..b830930f3c
--- /dev/null
+++ b/contrib/libs/clapack/chpgst.c
@@ -0,0 +1,312 @@
+/* chpgst.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int chpgst_(integer *itype, char *uplo, integer *n, complex *
+ ap, complex *bp, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ real r__1, r__2;
+ complex q__1, q__2, q__3;
+
+ /* Local variables */
+ integer j, k, j1, k1, jj, kk;
+ complex ct;
+ real ajj;
+ integer j1j1;
+ real akk;
+ integer k1k1;
+ real bjj, bkk;
+ extern /* Subroutine */ int chpr2_(char *, integer *, complex *, complex *
+, integer *, complex *, integer *, complex *);
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int chpmv_(char *, integer *, complex *, complex *
+, complex *, integer *, complex *, complex *, integer *),
+ caxpy_(integer *, complex *, complex *, integer *, complex *,
+ integer *), ctpmv_(char *, char *, char *, integer *, complex *,
+ complex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int ctpsv_(char *, char *, char *, integer *,
+ complex *, complex *, integer *), csscal_(
+ integer *, real *, complex *, integer *), xerbla_(char *, integer
+ *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHPGST reduces a complex Hermitian-definite generalized */
+/* eigenproblem to standard form, using packed storage. */
+
+/* If ITYPE = 1, the problem is A*x = lambda*B*x, */
+/* and A is overwritten by inv(U**H)*A*inv(U) or inv(L)*A*inv(L**H) */
+
+/* If ITYPE = 2 or 3, the problem is A*B*x = lambda*x or */
+/* B*A*x = lambda*x, and A is overwritten by U*A*U**H or L**H*A*L. */
+
+/* B must have been previously factorized as U**H*U or L*L**H by CPPTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* = 1: compute inv(U**H)*A*inv(U) or inv(L)*A*inv(L**H); */
+/* = 2 or 3: compute U*A*U**H or L**H*A*L. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored and B is factored as */
+/* U**H*U; */
+/* = 'L': Lower triangle of A is stored and B is factored as */
+/* L*L**H. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* AP (input/output) COMPLEX array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, if INFO = 0, the transformed matrix, stored in the */
+/* same format as A. */
+
+/* BP (input) COMPLEX array, dimension (N*(N+1)/2) */
+/* The triangular factor from the Cholesky factorization of B, */
+/* stored in the same format as A, as returned by CPPTRF. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --bp;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHPGST", &i__1);
+ return 0;
+ }
+
+ if (*itype == 1) {
+ if (upper) {
+
+/* Compute inv(U')*A*inv(U) */
+
+/* J1 and JJ are the indices of A(1,j) and A(j,j) */
+
+ jj = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ j1 = jj + 1;
+ jj += j;
+
+/* Compute the j-th column of the upper triangle of A */
+
+ i__2 = jj;
+ i__3 = jj;
+ r__1 = ap[i__3].r;
+ ap[i__2].r = r__1, ap[i__2].i = 0.f;
+ i__2 = jj;
+ bjj = bp[i__2].r;
+ ctpsv_(uplo, "Conjugate transpose", "Non-unit", &j, &bp[1], &
+ ap[j1], &c__1);
+ i__2 = j - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ chpmv_(uplo, &i__2, &q__1, &ap[1], &bp[j1], &c__1, &c_b1, &ap[
+ j1], &c__1);
+ i__2 = j - 1;
+ r__1 = 1.f / bjj;
+ csscal_(&i__2, &r__1, &ap[j1], &c__1);
+ i__2 = jj;
+ i__3 = jj;
+ i__4 = j - 1;
+ cdotc_(&q__3, &i__4, &ap[j1], &c__1, &bp[j1], &c__1);
+ q__2.r = ap[i__3].r - q__3.r, q__2.i = ap[i__3].i - q__3.i;
+ q__1.r = q__2.r / bjj, q__1.i = q__2.i / bjj;
+ ap[i__2].r = q__1.r, ap[i__2].i = q__1.i;
+/* L10: */
+ }
+ } else {
+
+/* Compute inv(L)*A*inv(L') */
+
+/* KK and K1K1 are the indices of A(k,k) and A(k+1,k+1) */
+
+ kk = 1;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ k1k1 = kk + *n - k + 1;
+
+/* Update the lower triangle of A(k:n,k:n) */
+
+ i__2 = kk;
+ akk = ap[i__2].r;
+ i__2 = kk;
+ bkk = bp[i__2].r;
+/* Computing 2nd power */
+ r__1 = bkk;
+ akk /= r__1 * r__1;
+ i__2 = kk;
+ ap[i__2].r = akk, ap[i__2].i = 0.f;
+ if (k < *n) {
+ i__2 = *n - k;
+ r__1 = 1.f / bkk;
+ csscal_(&i__2, &r__1, &ap[kk + 1], &c__1);
+ r__1 = akk * -.5f;
+ ct.r = r__1, ct.i = 0.f;
+ i__2 = *n - k;
+ caxpy_(&i__2, &ct, &bp[kk + 1], &c__1, &ap[kk + 1], &c__1)
+ ;
+ i__2 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ chpr2_(uplo, &i__2, &q__1, &ap[kk + 1], &c__1, &bp[kk + 1]
+, &c__1, &ap[k1k1]);
+ i__2 = *n - k;
+ caxpy_(&i__2, &ct, &bp[kk + 1], &c__1, &ap[kk + 1], &c__1)
+ ;
+ i__2 = *n - k;
+ ctpsv_(uplo, "No transpose", "Non-unit", &i__2, &bp[k1k1],
+ &ap[kk + 1], &c__1);
+ }
+ kk = k1k1;
+/* L20: */
+ }
+ }
+ } else {
+ if (upper) {
+
+/* Compute U*A*U' */
+
+/* K1 and KK are the indices of A(1,k) and A(k,k) */
+
+ kk = 0;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ k1 = kk + 1;
+ kk += k;
+
+/* Update the upper triangle of A(1:k,1:k) */
+
+ i__2 = kk;
+ akk = ap[i__2].r;
+ i__2 = kk;
+ bkk = bp[i__2].r;
+ i__2 = k - 1;
+ ctpmv_(uplo, "No transpose", "Non-unit", &i__2, &bp[1], &ap[
+ k1], &c__1);
+ r__1 = akk * .5f;
+ ct.r = r__1, ct.i = 0.f;
+ i__2 = k - 1;
+ caxpy_(&i__2, &ct, &bp[k1], &c__1, &ap[k1], &c__1);
+ i__2 = k - 1;
+ chpr2_(uplo, &i__2, &c_b1, &ap[k1], &c__1, &bp[k1], &c__1, &
+ ap[1]);
+ i__2 = k - 1;
+ caxpy_(&i__2, &ct, &bp[k1], &c__1, &ap[k1], &c__1);
+ i__2 = k - 1;
+ csscal_(&i__2, &bkk, &ap[k1], &c__1);
+ i__2 = kk;
+/* Computing 2nd power */
+ r__2 = bkk;
+ r__1 = akk * (r__2 * r__2);
+ ap[i__2].r = r__1, ap[i__2].i = 0.f;
+/* L30: */
+ }
+ } else {
+
+/* Compute L'*A*L */
+
+/* JJ and J1J1 are the indices of A(j,j) and A(j+1,j+1) */
+
+ jj = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ j1j1 = jj + *n - j + 1;
+
+/* Compute the j-th column of the lower triangle of A */
+
+ i__2 = jj;
+ ajj = ap[i__2].r;
+ i__2 = jj;
+ bjj = bp[i__2].r;
+ i__2 = jj;
+ r__1 = ajj * bjj;
+ i__3 = *n - j;
+ cdotc_(&q__2, &i__3, &ap[jj + 1], &c__1, &bp[jj + 1], &c__1);
+ q__1.r = r__1 + q__2.r, q__1.i = q__2.i;
+ ap[i__2].r = q__1.r, ap[i__2].i = q__1.i;
+ i__2 = *n - j;
+ csscal_(&i__2, &bjj, &ap[jj + 1], &c__1);
+ i__2 = *n - j;
+ chpmv_(uplo, &i__2, &c_b1, &ap[j1j1], &bp[jj + 1], &c__1, &
+ c_b1, &ap[jj + 1], &c__1);
+ i__2 = *n - j + 1;
+ ctpmv_(uplo, "Conjugate transpose", "Non-unit", &i__2, &bp[jj]
+, &ap[jj], &c__1);
+ jj = j1j1;
+/* L40: */
+ }
+ }
+ }
+ return 0;
+
+/* End of CHPGST */
+
+} /* chpgst_ */
diff --git a/contrib/libs/clapack/chpgv.c b/contrib/libs/clapack/chpgv.c
new file mode 100644
index 0000000000..bf69356e1d
--- /dev/null
+++ b/contrib/libs/clapack/chpgv.c
@@ -0,0 +1,244 @@
+/* chpgv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int chpgv_(integer *itype, char *jobz, char *uplo, integer *
+ n, complex *ap, complex *bp, real *w, complex *z__, integer *ldz,
+ complex *work, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+
+ /* Local variables */
+ integer j, neig;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int chpev_(char *, char *, integer *, complex *,
+ real *, complex *, integer *, complex *, real *, integer *);
+ char trans[1];
+ extern /* Subroutine */ int ctpmv_(char *, char *, char *, integer *,
+ complex *, complex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int ctpsv_(char *, char *, char *, integer *,
+ complex *, complex *, integer *);
+ logical wantz;
+ extern /* Subroutine */ int xerbla_(char *, integer *), chpgst_(
+ integer *, char *, integer *, complex *, complex *, integer *), cpptrf_(char *, integer *, complex *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHPGV computes all the eigenvalues and, optionally, the eigenvectors */
+/* of a complex generalized Hermitian-definite eigenproblem, of the form */
+/* A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x. */
+/* Here A and B are assumed to be Hermitian, stored in packed format, */
+/* and B is also positive definite. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* Specifies the problem type to be solved: */
+/* = 1: A*x = (lambda)*B*x */
+/* = 2: A*B*x = (lambda)*x */
+/* = 3: B*A*x = (lambda)*x */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* AP (input/output) COMPLEX array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, the contents of AP are destroyed. */
+
+/* BP (input/output) COMPLEX array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* B, packed columnwise in a linear array. The j-th column of B */
+/* is stored in the array BP as follows: */
+/* if UPLO = 'U', BP(i + (j-1)*j/2) = B(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', BP(i + (j-1)*(2*n-j)/2) = B(i,j) for j<=i<=n. */
+
+/* On exit, the triangular factor U or L from the Cholesky */
+/* factorization B = U**H*U or B = L*L**H, in the same storage */
+/* format as B. */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the matrix Z of */
+/* eigenvectors. The eigenvectors are normalized as follows: */
+/* if ITYPE = 1 or 2, Z**H*B*Z = I; */
+/* if ITYPE = 3, Z**H*inv(B)*Z = I. */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) COMPLEX array, dimension (max(1, 2*N-1)) */
+
+/* RWORK (workspace) REAL array, dimension (max(1, 3*N-2)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: CPPTRF or CHPEV returned an error code: */
+/* <= N: if INFO = i, CHPEV failed to converge; */
+/* i off-diagonal elements of an intermediate */
+/* tridiagonal form did not convergeto zero; */
+/* > N: if INFO = N + i, for 1 <= i <= n, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --bp;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+
+ *info = 0;
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -2;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHPGV ", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a Cholesky factorization of B. */
+
+ cpptrf_(uplo, n, &bp[1], info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem and solve. */
+
+ chpgst_(itype, uplo, n, &ap[1], &bp[1], info);
+ chpev_(jobz, uplo, n, &ap[1], &w[1], &z__[z_offset], ldz, &work[1], &
+ rwork[1], info);
+
+ if (wantz) {
+
+/* Backtransform eigenvectors to the original problem. */
+
+ neig = *n;
+ if (*info > 0) {
+ neig = *info - 1;
+ }
+ if (*itype == 1 || *itype == 2) {
+
+/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x; */
+/* backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'N';
+ } else {
+ *(unsigned char *)trans = 'C';
+ }
+
+ i__1 = neig;
+ for (j = 1; j <= i__1; ++j) {
+ ctpsv_(uplo, trans, "Non-unit", n, &bp[1], &z__[j * z_dim1 +
+ 1], &c__1);
+/* L10: */
+ }
+
+ } else if (*itype == 3) {
+
+/* For B*A*x=(lambda)*x; */
+/* backtransform eigenvectors: x = L*y or U'*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'C';
+ } else {
+ *(unsigned char *)trans = 'N';
+ }
+
+ i__1 = neig;
+ for (j = 1; j <= i__1; ++j) {
+ ctpmv_(uplo, trans, "Non-unit", n, &bp[1], &z__[j * z_dim1 +
+ 1], &c__1);
+/* L20: */
+ }
+ }
+ }
+ return 0;
+
+/* End of CHPGV */
+
+} /* chpgv_ */
diff --git a/contrib/libs/clapack/chpgvd.c b/contrib/libs/clapack/chpgvd.c
new file mode 100644
index 0000000000..58fe02aecc
--- /dev/null
+++ b/contrib/libs/clapack/chpgvd.c
@@ -0,0 +1,356 @@
+/* chpgvd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int chpgvd_(integer *itype, char *jobz, char *uplo, integer *
+ n, complex *ap, complex *bp, real *w, complex *z__, integer *ldz,
+ complex *work, integer *lwork, real *rwork, integer *lrwork, integer *
+ iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+ real r__1, r__2;
+
+ /* Local variables */
+ integer j, neig;
+ extern logical lsame_(char *, char *);
+ integer lwmin;
+ char trans[1];
+ extern /* Subroutine */ int ctpmv_(char *, char *, char *, integer *,
+ complex *, complex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int ctpsv_(char *, char *, char *, integer *,
+ complex *, complex *, integer *);
+ logical wantz;
+ extern /* Subroutine */ int chpevd_(char *, char *, integer *, complex *,
+ real *, complex *, integer *, complex *, integer *, real *,
+ integer *, integer *, integer *, integer *),
+ xerbla_(char *, integer *), chpgst_(integer *, char *,
+ integer *, complex *, complex *, integer *), cpptrf_(char
+ *, integer *, complex *, integer *);
+ integer liwmin, lrwmin;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHPGVD computes all the eigenvalues and, optionally, the eigenvectors */
+/* of a complex generalized Hermitian-definite eigenproblem, of the form */
+/* A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x. Here A and */
+/* B are assumed to be Hermitian, stored in packed format, and B is also */
+/* positive definite. */
+/* If eigenvectors are desired, it uses a divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* Specifies the problem type to be solved: */
+/* = 1: A*x = (lambda)*B*x */
+/* = 2: A*B*x = (lambda)*x */
+/* = 3: B*A*x = (lambda)*x */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* AP (input/output) COMPLEX array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, the contents of AP are destroyed. */
+
+/* BP (input/output) COMPLEX array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* B, packed columnwise in a linear array. The j-th column of B */
+/* is stored in the array BP as follows: */
+/* if UPLO = 'U', BP(i + (j-1)*j/2) = B(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', BP(i + (j-1)*(2*n-j)/2) = B(i,j) for j<=i<=n. */
+
+/* On exit, the triangular factor U or L from the Cholesky */
+/* factorization B = U**H*U or B = L*L**H, in the same storage */
+/* format as B. */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the matrix Z of */
+/* eigenvectors. The eigenvectors are normalized as follows: */
+/* if ITYPE = 1 or 2, Z**H*B*Z = I; */
+/* if ITYPE = 3, Z**H*inv(B)*Z = I. */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the required LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of array WORK. */
+/* If N <= 1, LWORK >= 1. */
+/* If JOBZ = 'N' and N > 1, LWORK >= N. */
+/* If JOBZ = 'V' and N > 1, LWORK >= 2*N. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the required sizes of the WORK, RWORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* RWORK (workspace) REAL array, dimension (MAX(1,LRWORK)) */
+/* On exit, if INFO = 0, RWORK(1) returns the required LRWORK. */
+
+/* LRWORK (input) INTEGER */
+/* The dimension of array RWORK. */
+/* If N <= 1, LRWORK >= 1. */
+/* If JOBZ = 'N' and N > 1, LRWORK >= N. */
+/* If JOBZ = 'V' and N > 1, LRWORK >= 1 + 5*N + 2*N**2. */
+
+/* If LRWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the required sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the required LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of array IWORK. */
+/* If JOBZ = 'N' or N <= 1, LIWORK >= 1. */
+/* If JOBZ = 'V' and N > 1, LIWORK >= 3 + 5*N. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the required sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: CPPTRF or CHPEVD returned an error code: */
+/* <= N: if INFO = i, CHPEVD failed to converge; */
+/* i off-diagonal elements of an intermediate */
+/* tridiagonal form did not convergeto zero; */
+/* > N: if INFO = N + i, for 1 <= i <= n, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --bp;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1 || *lrwork == -1 || *liwork == -1;
+
+ *info = 0;
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -2;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+ if (*n <= 1) {
+ lwmin = 1;
+ liwmin = 1;
+ lrwmin = 1;
+ } else {
+ if (wantz) {
+ lwmin = *n << 1;
+/* Computing 2nd power */
+ i__1 = *n;
+ lrwmin = *n * 5 + 1 + (i__1 * i__1 << 1);
+ liwmin = *n * 5 + 3;
+ } else {
+ lwmin = *n;
+ lrwmin = *n;
+ liwmin = 1;
+ }
+ }
+ work[1].r = (real) lwmin, work[1].i = 0.f;
+ rwork[1] = (real) lrwmin;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -11;
+ } else if (*lrwork < lrwmin && ! lquery) {
+ *info = -13;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -15;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHPGVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a Cholesky factorization of B. */
+
+ cpptrf_(uplo, n, &bp[1], info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem and solve. */
+
+ chpgst_(itype, uplo, n, &ap[1], &bp[1], info);
+ chpevd_(jobz, uplo, n, &ap[1], &w[1], &z__[z_offset], ldz, &work[1],
+ lwork, &rwork[1], lrwork, &iwork[1], liwork, info);
+/* Computing MAX */
+ r__1 = (real) lwmin, r__2 = work[1].r;
+ lwmin = dmax(r__1,r__2);
+/* Computing MAX */
+ r__1 = (real) lrwmin;
+ lrwmin = dmax(r__1,rwork[1]);
+/* Computing MAX */
+ r__1 = (real) liwmin, r__2 = (real) iwork[1];
+ liwmin = dmax(r__1,r__2);
+
+ if (wantz) {
+
+/* Backtransform eigenvectors to the original problem. */
+
+ neig = *n;
+ if (*info > 0) {
+ neig = *info - 1;
+ }
+ if (*itype == 1 || *itype == 2) {
+
+/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x; */
+/* backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'N';
+ } else {
+ *(unsigned char *)trans = 'C';
+ }
+
+ i__1 = neig;
+ for (j = 1; j <= i__1; ++j) {
+ ctpsv_(uplo, trans, "Non-unit", n, &bp[1], &z__[j * z_dim1 +
+ 1], &c__1);
+/* L10: */
+ }
+
+ } else if (*itype == 3) {
+
+/* For B*A*x=(lambda)*x; */
+/* backtransform eigenvectors: x = L*y or U'*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'C';
+ } else {
+ *(unsigned char *)trans = 'N';
+ }
+
+ i__1 = neig;
+ for (j = 1; j <= i__1; ++j) {
+ ctpmv_(uplo, trans, "Non-unit", n, &bp[1], &z__[j * z_dim1 +
+ 1], &c__1);
+/* L20: */
+ }
+ }
+ }
+
+ work[1].r = (real) lwmin, work[1].i = 0.f;
+ rwork[1] = (real) lrwmin;
+ iwork[1] = liwmin;
+ return 0;
+
+/* End of CHPGVD */
+
+} /* chpgvd_ */
diff --git a/contrib/libs/clapack/chpgvx.c b/contrib/libs/clapack/chpgvx.c
new file mode 100644
index 0000000000..838b5584a0
--- /dev/null
+++ b/contrib/libs/clapack/chpgvx.c
@@ -0,0 +1,343 @@
+/* chpgvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int chpgvx_(integer *itype, char *jobz, char *range, char *
+ uplo, integer *n, complex *ap, complex *bp, real *vl, real *vu,
+ integer *il, integer *iu, real *abstol, integer *m, real *w, complex *
+ z__, integer *ldz, complex *work, real *rwork, integer *iwork,
+ integer *ifail, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+
+ /* Local variables */
+ integer j;
+ extern logical lsame_(char *, char *);
+ char trans[1];
+ extern /* Subroutine */ int ctpmv_(char *, char *, char *, integer *,
+ complex *, complex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int ctpsv_(char *, char *, char *, integer *,
+ complex *, complex *, integer *);
+ logical wantz, alleig, indeig, valeig;
+ extern /* Subroutine */ int xerbla_(char *, integer *), chpgst_(
+ integer *, char *, integer *, complex *, complex *, integer *), chpevx_(char *, char *, char *, integer *, complex *,
+ real *, real *, integer *, integer *, real *, integer *, real *,
+ complex *, integer *, complex *, real *, integer *, integer *,
+ integer *), cpptrf_(char *, integer *,
+ complex *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHPGVX computes selected eigenvalues and, optionally, eigenvectors */
+/* of a complex generalized Hermitian-definite eigenproblem, of the form */
+/* A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x. Here A and */
+/* B are assumed to be Hermitian, stored in packed format, and B is also */
+/* positive definite. Eigenvalues and eigenvectors can be selected by */
+/* specifying either a range of values or a range of indices for the */
+/* desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* Specifies the problem type to be solved: */
+/* = 1: A*x = (lambda)*B*x */
+/* = 2: A*B*x = (lambda)*x */
+/* = 3: B*A*x = (lambda)*x */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found; */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found; */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* AP (input/output) COMPLEX array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, the contents of AP are destroyed. */
+
+/* BP (input/output) COMPLEX array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* B, packed columnwise in a linear array. The j-th column of B */
+/* is stored in the array BP as follows: */
+/* if UPLO = 'U', BP(i + (j-1)*j/2) = B(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', BP(i + (j-1)*(2*n-j)/2) = B(i,j) for j<=i<=n. */
+
+/* On exit, the triangular factor U or L from the Cholesky */
+/* factorization B = U**H*U or B = L*L**H, in the same storage */
+/* format as B. */
+
+/* VL (input) REAL */
+/* VU (input) REAL */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) REAL */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing AP to tridiagonal form. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*SLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*SLAMCH('S'). */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) REAL array, dimension (N) */
+/* On normal exit, the first M elements contain the selected */
+/* eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX array, dimension (LDZ, N) */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* The eigenvectors are normalized as follows: */
+/* if ITYPE = 1 or 2, Z**H*B*Z = I; */
+/* if ITYPE = 3, Z**H*inv(B)*Z = I. */
+
+/* If an eigenvector fails to converge, then that column of Z */
+/* contains the latest approximation to the eigenvector, and the */
+/* index of the eigenvector is returned in IFAIL. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (7*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+
+/* IFAIL (output) INTEGER array, dimension (N) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvectors that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: CPPTRF or CHPEVX returned an error code: */
+/* <= N: if INFO = i, CHPEVX failed to converge; */
+/* i eigenvectors failed to converge. Their indices */
+/* are stored in array IFAIL. */
+/* > N: if INFO = N + i, for 1 <= i <= n, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --bp;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+
+ *info = 0;
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -2;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -3;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -9;
+ }
+ } else if (indeig) {
+ if (*il < 1) {
+ *info = -10;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -11;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -16;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHPGVX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a Cholesky factorization of B. */
+
+ cpptrf_(uplo, n, &bp[1], info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem and solve. */
+
+ chpgst_(itype, uplo, n, &ap[1], &bp[1], info);
+ chpevx_(jobz, range, uplo, n, &ap[1], vl, vu, il, iu, abstol, m, &w[1], &
+ z__[z_offset], ldz, &work[1], &rwork[1], &iwork[1], &ifail[1],
+ info);
+
+ if (wantz) {
+
+/* Backtransform eigenvectors to the original problem. */
+
+ if (*info > 0) {
+ *m = *info - 1;
+ }
+ if (*itype == 1 || *itype == 2) {
+
+/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x; */
+/* backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'N';
+ } else {
+ *(unsigned char *)trans = 'C';
+ }
+
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ ctpsv_(uplo, trans, "Non-unit", n, &bp[1], &z__[j * z_dim1 +
+ 1], &c__1);
+/* L10: */
+ }
+
+ } else if (*itype == 3) {
+
+/* For B*A*x=(lambda)*x; */
+/* backtransform eigenvectors: x = L*y or U'*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'C';
+ } else {
+ *(unsigned char *)trans = 'N';
+ }
+
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ ctpmv_(uplo, trans, "Non-unit", n, &bp[1], &z__[j * z_dim1 +
+ 1], &c__1);
+/* L20: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CHPGVX */
+
+} /* chpgvx_ */
diff --git a/contrib/libs/clapack/chprfs.c b/contrib/libs/clapack/chprfs.c
new file mode 100644
index 0000000000..6f73ec37a4
--- /dev/null
+++ b/contrib/libs/clapack/chprfs.c
@@ -0,0 +1,462 @@
+/* chprfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int chprfs_(char *uplo, integer *n, integer *nrhs, complex *
+ ap, complex *afp, integer *ipiv, complex *b, integer *ldb, complex *x,
+ integer *ldx, real *ferr, real *berr, complex *work, real *rwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ real s;
+ integer ik, kk;
+ real xk;
+ integer nz;
+ real eps;
+ integer kase;
+ real safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), chpmv_(char *, integer *, complex *,
+ complex *, complex *, integer *, complex *, complex *, integer *), caxpy_(integer *, complex *, complex *, integer *,
+ complex *, integer *);
+ integer count;
+ logical upper;
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *), chptrs_(
+ char *, integer *, integer *, complex *, integer *, complex *,
+ integer *, integer *);
+ real lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHPRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is Hermitian indefinite */
+/* and packed, and provides error bounds and backward error estimates */
+/* for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AP (input) COMPLEX array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the Hermitian matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* AFP (input) COMPLEX array, dimension (N*(N+1)/2) */
+/* The factored form of the matrix A. AFP contains the block */
+/* diagonal matrix D and the multipliers used to obtain the */
+/* factor U or L from the factorization A = U*D*U**H or */
+/* A = L*D*L**H as computed by CHPTRF, stored as a packed */
+/* triangular matrix. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by CHPTRF. */
+
+/* B (input) COMPLEX array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) COMPLEX array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by CHPTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --afp;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*ldx < max(1,*n)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHPRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.f;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X */
+
+ ccopy_(n, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+ q__1.r = -1.f, q__1.i = -0.f;
+ chpmv_(uplo, n, &q__1, &ap[1], &x[j * x_dim1 + 1], &c__1, &c_b1, &
+ work[1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[i__] = (r__1 = b[i__3].r, dabs(r__1)) + (r__2 = r_imag(&b[
+ i__ + j * b_dim1]), dabs(r__2));
+/* L30: */
+ }
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ kk = 1;
+ if (upper) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ i__3 = k + j * x_dim1;
+ xk = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[k + j
+ * x_dim1]), dabs(r__2));
+ ik = kk;
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = ik;
+ rwork[i__] += ((r__1 = ap[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&ap[ik]), dabs(r__2))) * xk;
+ i__4 = ik;
+ i__5 = i__ + j * x_dim1;
+ s += ((r__1 = ap[i__4].r, dabs(r__1)) + (r__2 = r_imag(&
+ ap[ik]), dabs(r__2))) * ((r__3 = x[i__5].r, dabs(
+ r__3)) + (r__4 = r_imag(&x[i__ + j * x_dim1]),
+ dabs(r__4)));
+ ++ik;
+/* L40: */
+ }
+ i__3 = kk + k - 1;
+ rwork[k] = rwork[k] + (r__1 = ap[i__3].r, dabs(r__1)) * xk +
+ s;
+ kk += k;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ i__3 = k + j * x_dim1;
+ xk = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[k + j
+ * x_dim1]), dabs(r__2));
+ i__3 = kk;
+ rwork[k] += (r__1 = ap[i__3].r, dabs(r__1)) * xk;
+ ik = kk + 1;
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ i__4 = ik;
+ rwork[i__] += ((r__1 = ap[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&ap[ik]), dabs(r__2))) * xk;
+ i__4 = ik;
+ i__5 = i__ + j * x_dim1;
+ s += ((r__1 = ap[i__4].r, dabs(r__1)) + (r__2 = r_imag(&
+ ap[ik]), dabs(r__2))) * ((r__3 = x[i__5].r, dabs(
+ r__3)) + (r__4 = r_imag(&x[i__ + j * x_dim1]),
+ dabs(r__4)));
+ ++ik;
+/* L60: */
+ }
+ rwork[k] += s;
+ kk += *n - k + 1;
+/* L70: */
+ }
+ }
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__3 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2))) / rwork[i__];
+ s = dmax(r__3,r__4);
+ } else {
+/* Computing MAX */
+ i__3 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = dmax(r__3,r__4);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2.f <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ chptrs_(uplo, n, &c__1, &afp[1], &ipiv[1], &work[1], n, info);
+ caxpy_(n, &c_b1, &work[1], &c__1, &x[j * x_dim1 + 1], &c__1);
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use CLACN2 to estimate the infinity-norm of the matrix */
+/* inv(A) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__3 = i__;
+ rwork[i__] = (r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__];
+ } else {
+ i__3 = i__;
+ rwork[i__] = (r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__] + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ clacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(A'). */
+
+ chptrs_(uplo, n, &c__1, &afp[1], &ipiv[1], &work[1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ q__1.r = rwork[i__4] * work[i__5].r, q__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+/* L110: */
+ }
+ } else if (kase == 2) {
+
+/* Multiply by inv(A)*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ q__1.r = rwork[i__4] * work[i__5].r, q__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+/* L120: */
+ }
+ chptrs_(uplo, n, &c__1, &afp[1], &ipiv[1], &work[1], n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * x_dim1;
+ r__3 = lstres, r__4 = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[i__ + j * x_dim1]), dabs(r__2));
+ lstres = dmax(r__3,r__4);
+/* L130: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of CHPRFS */
+
+} /* chprfs_ */
diff --git a/contrib/libs/clapack/chpsv.c b/contrib/libs/clapack/chpsv.c
new file mode 100644
index 0000000000..977b0cfd35
--- /dev/null
+++ b/contrib/libs/clapack/chpsv.c
@@ -0,0 +1,176 @@
+/* chpsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int chpsv_(char *uplo, integer *n, integer *nrhs, complex *
+ ap, integer *ipiv, complex *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), chptrf_(
+ char *, integer *, complex *, integer *, integer *),
+ chptrs_(char *, integer *, integer *, complex *, integer *,
+ complex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHPSV computes the solution to a complex system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N Hermitian matrix stored in packed format and X */
+/* and B are N-by-NRHS matrices. */
+
+/* The diagonal pivoting method is used to factor A as */
+/* A = U * D * U**H, if UPLO = 'U', or */
+/* A = L * D * L**H, if UPLO = 'L', */
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, D is Hermitian and block diagonal with 1-by-1 */
+/* and 2-by-2 diagonal blocks. The factored form of A is then used to */
+/* solve the system of equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AP (input/output) COMPLEX array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. */
+
+/* On exit, the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**H or A = L*D*L**H as computed by CHPTRF, stored as */
+/* a packed triangular matrix in the same storage format as A. */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D, as */
+/* determined by CHPTRF. If IPIV(k) > 0, then rows and columns */
+/* k and IPIV(k) were interchanged, and D(k,k) is a 1-by-1 */
+/* diagonal block. If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, */
+/* then rows and columns k-1 and -IPIV(k) were interchanged and */
+/* D(k-1:k,k-1:k) is a 2-by-2 diagonal block. If UPLO = 'L' and */
+/* IPIV(k) = IPIV(k+1) < 0, then rows and columns k+1 and */
+/* -IPIV(k) were interchanged and D(k:k+1,k:k+1) is a 2-by-2 */
+/* diagonal block. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, so the solution could not be */
+/* computed. */
+
+/* Further Details */
+/* =============== */
+
+/* The packed storage scheme is illustrated by the following example */
+/* when N = 4, UPLO = 'U': */
+
+/* Two-dimensional storage of the Hermitian matrix A: */
+
+/* a11 a12 a13 a14 */
+/* a22 a23 a24 */
+/* a33 a34 (aij = conjg(aji)) */
+/* a44 */
+
+/* Packed storage of the upper triangle of A: */
+
+/* AP = [ a11, a12, a22, a13, a23, a33, a14, a24, a34, a44 ] */
+
+/* ===================================================================== */
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHPSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the factorization A = U*D*U' or A = L*D*L'. */
+
+ chptrf_(uplo, n, &ap[1], &ipiv[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ chptrs_(uplo, n, nrhs, &ap[1], &ipiv[1], &b[b_offset], ldb, info);
+
+ }
+ return 0;
+
+/* End of CHPSV */
+
+} /* chpsv_ */
diff --git a/contrib/libs/clapack/chpsvx.c b/contrib/libs/clapack/chpsvx.c
new file mode 100644
index 0000000000..d75ccae1b7
--- /dev/null
+++ b/contrib/libs/clapack/chpsvx.c
@@ -0,0 +1,320 @@
+/* chpsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int chpsvx_(char *fact, char *uplo, integer *n, integer *
+ nrhs, complex *ap, complex *afp, integer *ipiv, complex *b, integer *
+ ldb, complex *x, integer *ldx, real *rcond, real *ferr, real *berr,
+ complex *work, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ real anorm;
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *);
+ extern doublereal clanhp_(char *, char *, integer *, complex *, real *), slamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int chpcon_(char *, integer *, complex *, integer
+ *, real *, real *, complex *, integer *), clacpy_(char *,
+ integer *, integer *, complex *, integer *, complex *, integer *), xerbla_(char *, integer *), chprfs_(char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *, complex *, integer *, real *, real *, complex *, real *
+, integer *), chptrf_(char *, integer *, complex *,
+ integer *, integer *), chptrs_(char *, integer *, integer
+ *, complex *, integer *, complex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHPSVX uses the diagonal pivoting factorization A = U*D*U**H or */
+/* A = L*D*L**H to compute the solution to a complex system of linear */
+/* equations A * X = B, where A is an N-by-N Hermitian matrix stored */
+/* in packed format and X and B are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'N', the diagonal pivoting method is used to factor A as */
+/* A = U * D * U**H, if UPLO = 'U', or */
+/* A = L * D * L**H, if UPLO = 'L', */
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices and D is Hermitian and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* 2. If some D(i,i)=0, so that D is exactly singular, then the routine */
+/* returns with INFO = i. Otherwise, the factored form of A is used */
+/* to estimate the condition number of the matrix A. If the */
+/* reciprocal of the condition number is less than machine precision, */
+/* INFO = N+1 is returned as a warning, but the routine still goes on */
+/* to solve for X and compute error bounds as described below. */
+
+/* 3. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 4. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of A has been */
+/* supplied on entry. */
+/* = 'F': On entry, AFP and IPIV contain the factored form of */
+/* A. AFP and IPIV will not be modified. */
+/* = 'N': The matrix A will be copied to AFP and factored. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AP (input) COMPLEX array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the Hermitian matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. */
+
+/* AFP (input or output) COMPLEX array, dimension (N*(N+1)/2) */
+/* If FACT = 'F', then AFP is an input argument and on entry */
+/* contains the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**H or A = L*D*L**H as computed by CHPTRF, stored as */
+/* a packed triangular matrix in the same storage format as A. */
+
+/* If FACT = 'N', then AFP is an output argument and on exit */
+/* contains the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**H or A = L*D*L**H as computed by CHPTRF, stored as */
+/* a packed triangular matrix in the same storage format as A. */
+
+/* IPIV (input or output) INTEGER array, dimension (N) */
+/* If FACT = 'F', then IPIV is an input argument and on entry */
+/* contains details of the interchanges and the block structure */
+/* of D, as determined by CHPTRF. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* If FACT = 'N', then IPIV is an output argument and on exit */
+/* contains details of the interchanges and the block structure */
+/* of D, as determined by CHPTRF. */
+
+/* B (input) COMPLEX array, dimension (LDB,NRHS) */
+/* The N-by-NRHS right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) COMPLEX array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) REAL */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A. If RCOND is less than the machine precision (in */
+/* particular, if RCOND = 0), the matrix is singular to working */
+/* precision. This condition is indicated by a return code of */
+/* INFO > 0. */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: D(i,i) is exactly zero. The factorization */
+/* has been completed but the factor D is exactly */
+/* singular, so the solution and error bounds could */
+/* not be computed. RCOND = 0 is returned. */
+/* = N+1: D is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* Further Details */
+/* =============== */
+
+/* The packed storage scheme is illustrated by the following example */
+/* when N = 4, UPLO = 'U': */
+
+/* Two-dimensional storage of the Hermitian matrix A: */
+
+/* a11 a12 a13 a14 */
+/* a22 a23 a24 */
+/* a33 a34 (aij = conjg(aji)) */
+/* a44 */
+
+/* Packed storage of the upper triangle of A: */
+
+/* AP = [ a11, a12, a22, a13, a23, a33, a14, a24, a34, a44 ] */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --afp;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ if (! nofact && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! lsame_(uplo, "U") && ! lsame_(uplo,
+ "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldx < max(1,*n)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHPSVX", &i__1);
+ return 0;
+ }
+
+ if (nofact) {
+
+/* Compute the factorization A = U*D*U' or A = L*D*L'. */
+
+ i__1 = *n * (*n + 1) / 2;
+ ccopy_(&i__1, &ap[1], &c__1, &afp[1], &c__1);
+ chptrf_(uplo, n, &afp[1], &ipiv[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.f;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = clanhp_("I", uplo, n, &ap[1], &rwork[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ chpcon_(uplo, n, &afp[1], &ipiv[1], &anorm, rcond, &work[1], info);
+
+/* Compute the solution vectors X. */
+
+ clacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ chptrs_(uplo, n, nrhs, &afp[1], &ipiv[1], &x[x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solutions and */
+/* compute error bounds and backward error estimates for them. */
+
+ chprfs_(uplo, n, nrhs, &ap[1], &afp[1], &ipiv[1], &b[b_offset], ldb, &x[
+ x_offset], ldx, &ferr[1], &berr[1], &work[1], &rwork[1], info);
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < slamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of CHPSVX */
+
+} /* chpsvx_ */
diff --git a/contrib/libs/clapack/chptrd.c b/contrib/libs/clapack/chptrd.c
new file mode 100644
index 0000000000..b08a545f04
--- /dev/null
+++ b/contrib/libs/clapack/chptrd.c
@@ -0,0 +1,318 @@
+/* chptrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b2 = {0.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int chptrd_(char *uplo, integer *n, complex *ap, real *d__,
+ real *e, complex *tau, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ real r__1;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Local variables */
+ integer i__, i1, ii, i1i1;
+ complex taui;
+ extern /* Subroutine */ int chpr2_(char *, integer *, complex *, complex *
+, integer *, complex *, integer *, complex *);
+ complex alpha;
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int chpmv_(char *, integer *, complex *, complex *
+, complex *, integer *, complex *, complex *, integer *),
+ caxpy_(integer *, complex *, complex *, integer *, complex *,
+ integer *);
+ logical upper;
+ extern /* Subroutine */ int clarfg_(integer *, complex *, complex *,
+ integer *, complex *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHPTRD reduces a complex Hermitian matrix A stored in packed form to */
+/* real symmetric tridiagonal form T by a unitary similarity */
+/* transformation: Q**H * A * Q = T. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+/* On exit, if UPLO = 'U', the diagonal and first superdiagonal */
+/* of A are overwritten by the corresponding elements of the */
+/* tridiagonal matrix T, and the elements above the first */
+/* superdiagonal, with the array TAU, represent the unitary */
+/* matrix Q as a product of elementary reflectors; if UPLO */
+/* = 'L', the diagonal and first subdiagonal of A are over- */
+/* written by the corresponding elements of the tridiagonal */
+/* matrix T, and the elements below the first subdiagonal, with */
+/* the array TAU, represent the unitary matrix Q as a product */
+/* of elementary reflectors. See Further Details. */
+
+/* D (output) REAL array, dimension (N) */
+/* The diagonal elements of the tridiagonal matrix T: */
+/* D(i) = A(i,i). */
+
+/* E (output) REAL array, dimension (N-1) */
+/* The off-diagonal elements of the tridiagonal matrix T: */
+/* E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'. */
+
+/* TAU (output) COMPLEX array, dimension (N-1) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* If UPLO = 'U', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(n-1) . . . H(2) H(1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in AP, */
+/* overwriting A(1:i-1,i+1), and tau is stored in TAU(i). */
+
+/* If UPLO = 'L', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(1) H(2) . . . H(n-1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in AP, */
+/* overwriting A(i+2:n,i), and tau is stored in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ --tau;
+ --e;
+ --d__;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHPTRD", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Reduce the upper triangle of A. */
+/* I1 is the index in AP of A(1,I+1). */
+
+ i1 = *n * (*n - 1) / 2 + 1;
+ i__1 = i1 + *n - 1;
+ i__2 = i1 + *n - 1;
+ r__1 = ap[i__2].r;
+ ap[i__1].r = r__1, ap[i__1].i = 0.f;
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+
+/* Generate elementary reflector H(i) = I - tau * v * v' */
+/* to annihilate A(1:i-1,i+1) */
+
+ i__1 = i1 + i__ - 1;
+ alpha.r = ap[i__1].r, alpha.i = ap[i__1].i;
+ clarfg_(&i__, &alpha, &ap[i1], &c__1, &taui);
+ i__1 = i__;
+ e[i__1] = alpha.r;
+
+ if (taui.r != 0.f || taui.i != 0.f) {
+
+/* Apply H(i) from both sides to A(1:i,1:i) */
+
+ i__1 = i1 + i__ - 1;
+ ap[i__1].r = 1.f, ap[i__1].i = 0.f;
+
+/* Compute y := tau * A * v storing y in TAU(1:i) */
+
+ chpmv_(uplo, &i__, &taui, &ap[1], &ap[i1], &c__1, &c_b2, &tau[
+ 1], &c__1);
+
+/* Compute w := y - 1/2 * tau * (y'*v) * v */
+
+ q__3.r = -.5f, q__3.i = -0.f;
+ q__2.r = q__3.r * taui.r - q__3.i * taui.i, q__2.i = q__3.r *
+ taui.i + q__3.i * taui.r;
+ cdotc_(&q__4, &i__, &tau[1], &c__1, &ap[i1], &c__1);
+ q__1.r = q__2.r * q__4.r - q__2.i * q__4.i, q__1.i = q__2.r *
+ q__4.i + q__2.i * q__4.r;
+ alpha.r = q__1.r, alpha.i = q__1.i;
+ caxpy_(&i__, &alpha, &ap[i1], &c__1, &tau[1], &c__1);
+
+/* Apply the transformation as a rank-2 update: */
+/* A := A - v * w' - w * v' */
+
+ q__1.r = -1.f, q__1.i = -0.f;
+ chpr2_(uplo, &i__, &q__1, &ap[i1], &c__1, &tau[1], &c__1, &ap[
+ 1]);
+
+ }
+ i__1 = i1 + i__ - 1;
+ i__2 = i__;
+ ap[i__1].r = e[i__2], ap[i__1].i = 0.f;
+ i__1 = i__ + 1;
+ i__2 = i1 + i__;
+ d__[i__1] = ap[i__2].r;
+ i__1 = i__;
+ tau[i__1].r = taui.r, tau[i__1].i = taui.i;
+ i1 -= i__;
+/* L10: */
+ }
+ d__[1] = ap[1].r;
+ } else {
+
+/* Reduce the lower triangle of A. II is the index in AP of */
+/* A(i,i) and I1I1 is the index of A(i+1,i+1). */
+
+ ii = 1;
+ r__1 = ap[1].r;
+ ap[1].r = r__1, ap[1].i = 0.f;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i1i1 = ii + *n - i__ + 1;
+
+/* Generate elementary reflector H(i) = I - tau * v * v' */
+/* to annihilate A(i+2:n,i) */
+
+ i__2 = ii + 1;
+ alpha.r = ap[i__2].r, alpha.i = ap[i__2].i;
+ i__2 = *n - i__;
+ clarfg_(&i__2, &alpha, &ap[ii + 2], &c__1, &taui);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+
+ if (taui.r != 0.f || taui.i != 0.f) {
+
+/* Apply H(i) from both sides to A(i+1:n,i+1:n) */
+
+ i__2 = ii + 1;
+ ap[i__2].r = 1.f, ap[i__2].i = 0.f;
+
+/* Compute y := tau * A * v storing y in TAU(i:n-1) */
+
+ i__2 = *n - i__;
+ chpmv_(uplo, &i__2, &taui, &ap[i1i1], &ap[ii + 1], &c__1, &
+ c_b2, &tau[i__], &c__1);
+
+/* Compute w := y - 1/2 * tau * (y'*v) * v */
+
+ q__3.r = -.5f, q__3.i = -0.f;
+ q__2.r = q__3.r * taui.r - q__3.i * taui.i, q__2.i = q__3.r *
+ taui.i + q__3.i * taui.r;
+ i__2 = *n - i__;
+ cdotc_(&q__4, &i__2, &tau[i__], &c__1, &ap[ii + 1], &c__1);
+ q__1.r = q__2.r * q__4.r - q__2.i * q__4.i, q__1.i = q__2.r *
+ q__4.i + q__2.i * q__4.r;
+ alpha.r = q__1.r, alpha.i = q__1.i;
+ i__2 = *n - i__;
+ caxpy_(&i__2, &alpha, &ap[ii + 1], &c__1, &tau[i__], &c__1);
+
+/* Apply the transformation as a rank-2 update: */
+/* A := A - v * w' - w * v' */
+
+ i__2 = *n - i__;
+ q__1.r = -1.f, q__1.i = -0.f;
+ chpr2_(uplo, &i__2, &q__1, &ap[ii + 1], &c__1, &tau[i__], &
+ c__1, &ap[i1i1]);
+
+ }
+ i__2 = ii + 1;
+ i__3 = i__;
+ ap[i__2].r = e[i__3], ap[i__2].i = 0.f;
+ i__2 = i__;
+ i__3 = ii;
+ d__[i__2] = ap[i__3].r;
+ i__2 = i__;
+ tau[i__2].r = taui.r, tau[i__2].i = taui.i;
+ ii = i1i1;
+/* L20: */
+ }
+ i__1 = *n;
+ i__2 = ii;
+ d__[i__1] = ap[i__2].r;
+ }
+
+ return 0;
+
+/* End of CHPTRD */
+
+} /* chptrd_ */
diff --git a/contrib/libs/clapack/chptrf.c b/contrib/libs/clapack/chptrf.c
new file mode 100644
index 0000000000..47d93370b6
--- /dev/null
+++ b/contrib/libs/clapack/chptrf.c
@@ -0,0 +1,821 @@
+/* chptrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int chptrf_(char *uplo, integer *n, complex *ap, integer *
+ ipiv, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4, i__5, i__6;
+ real r__1, r__2, r__3, r__4;
+ complex q__1, q__2, q__3, q__4, q__5, q__6;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_imag(complex *);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ real d__;
+ integer i__, j, k;
+ complex t;
+ real r1, d11;
+ complex d12;
+ real d22;
+ complex d21;
+ integer kc, kk, kp;
+ complex wk;
+ integer kx;
+ real tt;
+ integer knc, kpc, npp;
+ complex wkm1, wkp1;
+ extern /* Subroutine */ int chpr_(char *, integer *, real *, complex *,
+ integer *, complex *);
+ integer imax, jmax;
+ real alpha;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ integer kstep;
+ logical upper;
+ extern doublereal slapy2_(real *, real *);
+ real absakk;
+ extern integer icamax_(integer *, complex *, integer *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *), xerbla_(char *, integer *);
+ real colmax, rowmax;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHPTRF computes the factorization of a complex Hermitian packed */
+/* matrix A using the Bunch-Kaufman diagonal pivoting method: */
+
+/* A = U*D*U**H or A = L*D*L**H */
+
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, and D is Hermitian and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L, stored as a packed triangular */
+/* matrix overwriting A (see below for further details). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, and division by zero will occur if it */
+/* is used to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* 5-96 - Based on modifications by J. Lewis, Boeing Computer Services */
+/* Company */
+
+/* If UPLO = 'U', then A = U*D*U', where */
+/* U = P(n)*U(n)* ... *P(k)U(k)* ..., */
+/* i.e., U is a product of terms P(k)*U(k), where k decreases from n to */
+/* 1 in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and U(k) is a unit upper triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I v 0 ) k-s */
+/* U(k) = ( 0 I 0 ) s */
+/* ( 0 0 I ) n-k */
+/* k-s s n-k */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(1:k-1,k). */
+/* If s = 2, the upper triangle of D(k) overwrites A(k-1,k-1), A(k-1,k), */
+/* and A(k,k), and v overwrites A(1:k-2,k-1:k). */
+
+/* If UPLO = 'L', then A = L*D*L', where */
+/* L = P(1)*L(1)* ... *P(k)*L(k)* ..., */
+/* i.e., L is a product of terms P(k)*L(k), where k increases from 1 to */
+/* n in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and L(k) is a unit lower triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I 0 0 ) k-1 */
+/* L(k) = ( 0 I 0 ) s */
+/* ( 0 v I ) n-k-s+1 */
+/* k-1 s n-k-s+1 */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(k+1:n,k). */
+/* If s = 2, the lower triangle of D(k) overwrites A(k,k), A(k+1,k), */
+/* and A(k+1,k+1), and v overwrites A(k+2:n,k:k+1). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ipiv;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHPTRF", &i__1);
+ return 0;
+ }
+
+/* Initialize ALPHA for use in choosing pivot block size. */
+
+ alpha = (sqrt(17.f) + 1.f) / 8.f;
+
+ if (upper) {
+
+/* Factorize A as U*D*U' using the upper triangle of A */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2 */
+
+ k = *n;
+ kc = (*n - 1) * *n / 2 + 1;
+L10:
+ knc = kc;
+
+/* If K < 1, exit from loop */
+
+ if (k < 1) {
+ goto L110;
+ }
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ i__1 = kc + k - 1;
+ absakk = (r__1 = ap[i__1].r, dabs(r__1));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ imax = icamax_(&i__1, &ap[kc], &c__1);
+ i__1 = kc + imax - 1;
+ colmax = (r__1 = ap[i__1].r, dabs(r__1)) + (r__2 = r_imag(&ap[kc
+ + imax - 1]), dabs(r__2));
+ } else {
+ colmax = 0.f;
+ }
+
+ if (dmax(absakk,colmax) == 0.f) {
+
+/* Column K is zero: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ i__1 = kc + k - 1;
+ i__2 = kc + k - 1;
+ r__1 = ap[i__2].r;
+ ap[i__1].r = r__1, ap[i__1].i = 0.f;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ rowmax = 0.f;
+ jmax = imax;
+ kx = imax * (imax + 1) / 2 + imax;
+ i__1 = k;
+ for (j = imax + 1; j <= i__1; ++j) {
+ i__2 = kx;
+ if ((r__1 = ap[i__2].r, dabs(r__1)) + (r__2 = r_imag(&ap[
+ kx]), dabs(r__2)) > rowmax) {
+ i__2 = kx;
+ rowmax = (r__1 = ap[i__2].r, dabs(r__1)) + (r__2 =
+ r_imag(&ap[kx]), dabs(r__2));
+ jmax = j;
+ }
+ kx += j;
+/* L20: */
+ }
+ kpc = (imax - 1) * imax / 2 + 1;
+ if (imax > 1) {
+ i__1 = imax - 1;
+ jmax = icamax_(&i__1, &ap[kpc], &c__1);
+/* Computing MAX */
+ i__1 = kpc + jmax - 1;
+ r__3 = rowmax, r__4 = (r__1 = ap[i__1].r, dabs(r__1)) + (
+ r__2 = r_imag(&ap[kpc + jmax - 1]), dabs(r__2));
+ rowmax = dmax(r__3,r__4);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else /* if(complicated condition) */ {
+ i__1 = kpc + imax - 1;
+ if ((r__1 = ap[i__1].r, dabs(r__1)) >= alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+ } else {
+
+/* interchange rows and columns K-1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+ }
+
+ kk = k - kstep + 1;
+ if (kstep == 2) {
+ knc = knc - k + 1;
+ }
+ if (kp != kk) {
+
+/* Interchange rows and columns KK and KP in the leading */
+/* submatrix A(1:k,1:k) */
+
+ i__1 = kp - 1;
+ cswap_(&i__1, &ap[knc], &c__1, &ap[kpc], &c__1);
+ kx = kpc + kp - 1;
+ i__1 = kk - 1;
+ for (j = kp + 1; j <= i__1; ++j) {
+ kx = kx + j - 1;
+ r_cnjg(&q__1, &ap[knc + j - 1]);
+ t.r = q__1.r, t.i = q__1.i;
+ i__2 = knc + j - 1;
+ r_cnjg(&q__1, &ap[kx]);
+ ap[i__2].r = q__1.r, ap[i__2].i = q__1.i;
+ i__2 = kx;
+ ap[i__2].r = t.r, ap[i__2].i = t.i;
+/* L30: */
+ }
+ i__1 = kx + kk - 1;
+ r_cnjg(&q__1, &ap[kx + kk - 1]);
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+ i__1 = knc + kk - 1;
+ r1 = ap[i__1].r;
+ i__1 = knc + kk - 1;
+ i__2 = kpc + kp - 1;
+ r__1 = ap[i__2].r;
+ ap[i__1].r = r__1, ap[i__1].i = 0.f;
+ i__1 = kpc + kp - 1;
+ ap[i__1].r = r1, ap[i__1].i = 0.f;
+ if (kstep == 2) {
+ i__1 = kc + k - 1;
+ i__2 = kc + k - 1;
+ r__1 = ap[i__2].r;
+ ap[i__1].r = r__1, ap[i__1].i = 0.f;
+ i__1 = kc + k - 2;
+ t.r = ap[i__1].r, t.i = ap[i__1].i;
+ i__1 = kc + k - 2;
+ i__2 = kc + kp - 1;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kc + kp - 1;
+ ap[i__1].r = t.r, ap[i__1].i = t.i;
+ }
+ } else {
+ i__1 = kc + k - 1;
+ i__2 = kc + k - 1;
+ r__1 = ap[i__2].r;
+ ap[i__1].r = r__1, ap[i__1].i = 0.f;
+ if (kstep == 2) {
+ i__1 = kc - 1;
+ i__2 = kc - 1;
+ r__1 = ap[i__2].r;
+ ap[i__1].r = r__1, ap[i__1].i = 0.f;
+ }
+ }
+
+/* Update the leading submatrix */
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k now holds */
+
+/* W(k) = U(k)*D(k) */
+
+/* where U(k) is the k-th column of U */
+
+/* Perform a rank-1 update of A(1:k-1,1:k-1) as */
+
+/* A := A - U(k)*D(k)*U(k)' = A - W(k)*1/D(k)*W(k)' */
+
+ i__1 = kc + k - 1;
+ r1 = 1.f / ap[i__1].r;
+ i__1 = k - 1;
+ r__1 = -r1;
+ chpr_(uplo, &i__1, &r__1, &ap[kc], &c__1, &ap[1]);
+
+/* Store U(k) in column k */
+
+ i__1 = k - 1;
+ csscal_(&i__1, &r1, &ap[kc], &c__1);
+ } else {
+
+/* 2-by-2 pivot block D(k): columns k and k-1 now hold */
+
+/* ( W(k-1) W(k) ) = ( U(k-1) U(k) )*D(k) */
+
+/* where U(k) and U(k-1) are the k-th and (k-1)-th columns */
+/* of U */
+
+/* Perform a rank-2 update of A(1:k-2,1:k-2) as */
+
+/* A := A - ( U(k-1) U(k) )*D(k)*( U(k-1) U(k) )' */
+/* = A - ( W(k-1) W(k) )*inv(D(k))*( W(k-1) W(k) )' */
+
+ if (k > 2) {
+
+ i__1 = k - 1 + (k - 1) * k / 2;
+ r__1 = ap[i__1].r;
+ r__2 = r_imag(&ap[k - 1 + (k - 1) * k / 2]);
+ d__ = slapy2_(&r__1, &r__2);
+ i__1 = k - 1 + (k - 2) * (k - 1) / 2;
+ d22 = ap[i__1].r / d__;
+ i__1 = k + (k - 1) * k / 2;
+ d11 = ap[i__1].r / d__;
+ tt = 1.f / (d11 * d22 - 1.f);
+ i__1 = k - 1 + (k - 1) * k / 2;
+ q__1.r = ap[i__1].r / d__, q__1.i = ap[i__1].i / d__;
+ d12.r = q__1.r, d12.i = q__1.i;
+ d__ = tt / d__;
+
+ for (j = k - 2; j >= 1; --j) {
+ i__1 = j + (k - 2) * (k - 1) / 2;
+ q__3.r = d11 * ap[i__1].r, q__3.i = d11 * ap[i__1].i;
+ r_cnjg(&q__5, &d12);
+ i__2 = j + (k - 1) * k / 2;
+ q__4.r = q__5.r * ap[i__2].r - q__5.i * ap[i__2].i,
+ q__4.i = q__5.r * ap[i__2].i + q__5.i * ap[
+ i__2].r;
+ q__2.r = q__3.r - q__4.r, q__2.i = q__3.i - q__4.i;
+ q__1.r = d__ * q__2.r, q__1.i = d__ * q__2.i;
+ wkm1.r = q__1.r, wkm1.i = q__1.i;
+ i__1 = j + (k - 1) * k / 2;
+ q__3.r = d22 * ap[i__1].r, q__3.i = d22 * ap[i__1].i;
+ i__2 = j + (k - 2) * (k - 1) / 2;
+ q__4.r = d12.r * ap[i__2].r - d12.i * ap[i__2].i,
+ q__4.i = d12.r * ap[i__2].i + d12.i * ap[i__2]
+ .r;
+ q__2.r = q__3.r - q__4.r, q__2.i = q__3.i - q__4.i;
+ q__1.r = d__ * q__2.r, q__1.i = d__ * q__2.i;
+ wk.r = q__1.r, wk.i = q__1.i;
+ for (i__ = j; i__ >= 1; --i__) {
+ i__1 = i__ + (j - 1) * j / 2;
+ i__2 = i__ + (j - 1) * j / 2;
+ i__3 = i__ + (k - 1) * k / 2;
+ r_cnjg(&q__4, &wk);
+ q__3.r = ap[i__3].r * q__4.r - ap[i__3].i *
+ q__4.i, q__3.i = ap[i__3].r * q__4.i + ap[
+ i__3].i * q__4.r;
+ q__2.r = ap[i__2].r - q__3.r, q__2.i = ap[i__2].i
+ - q__3.i;
+ i__4 = i__ + (k - 2) * (k - 1) / 2;
+ r_cnjg(&q__6, &wkm1);
+ q__5.r = ap[i__4].r * q__6.r - ap[i__4].i *
+ q__6.i, q__5.i = ap[i__4].r * q__6.i + ap[
+ i__4].i * q__6.r;
+ q__1.r = q__2.r - q__5.r, q__1.i = q__2.i -
+ q__5.i;
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+/* L40: */
+ }
+ i__1 = j + (k - 1) * k / 2;
+ ap[i__1].r = wk.r, ap[i__1].i = wk.i;
+ i__1 = j + (k - 2) * (k - 1) / 2;
+ ap[i__1].r = wkm1.r, ap[i__1].i = wkm1.i;
+ i__1 = j + (j - 1) * j / 2;
+ i__2 = j + (j - 1) * j / 2;
+ r__1 = ap[i__2].r;
+ q__1.r = r__1, q__1.i = 0.f;
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+/* L50: */
+ }
+
+ }
+
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k - 1] = -kp;
+ }
+
+/* Decrease K and return to the start of the main loop */
+
+ k -= kstep;
+ kc = knc - k;
+ goto L10;
+
+ } else {
+
+/* Factorize A as L*D*L' using the lower triangle of A */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2 */
+
+ k = 1;
+ kc = 1;
+ npp = *n * (*n + 1) / 2;
+L60:
+ knc = kc;
+
+/* If K > N, exit from loop */
+
+ if (k > *n) {
+ goto L110;
+ }
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ i__1 = kc;
+ absakk = (r__1 = ap[i__1].r, dabs(r__1));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ imax = k + icamax_(&i__1, &ap[kc + 1], &c__1);
+ i__1 = kc + imax - k;
+ colmax = (r__1 = ap[i__1].r, dabs(r__1)) + (r__2 = r_imag(&ap[kc
+ + imax - k]), dabs(r__2));
+ } else {
+ colmax = 0.f;
+ }
+
+ if (dmax(absakk,colmax) == 0.f) {
+
+/* Column K is zero: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ i__1 = kc;
+ i__2 = kc;
+ r__1 = ap[i__2].r;
+ ap[i__1].r = r__1, ap[i__1].i = 0.f;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ rowmax = 0.f;
+ kx = kc + imax - k;
+ i__1 = imax - 1;
+ for (j = k; j <= i__1; ++j) {
+ i__2 = kx;
+ if ((r__1 = ap[i__2].r, dabs(r__1)) + (r__2 = r_imag(&ap[
+ kx]), dabs(r__2)) > rowmax) {
+ i__2 = kx;
+ rowmax = (r__1 = ap[i__2].r, dabs(r__1)) + (r__2 =
+ r_imag(&ap[kx]), dabs(r__2));
+ jmax = j;
+ }
+ kx = kx + *n - j;
+/* L70: */
+ }
+ kpc = npp - (*n - imax + 1) * (*n - imax + 2) / 2 + 1;
+ if (imax < *n) {
+ i__1 = *n - imax;
+ jmax = imax + icamax_(&i__1, &ap[kpc + 1], &c__1);
+/* Computing MAX */
+ i__1 = kpc + jmax - imax;
+ r__3 = rowmax, r__4 = (r__1 = ap[i__1].r, dabs(r__1)) + (
+ r__2 = r_imag(&ap[kpc + jmax - imax]), dabs(r__2))
+ ;
+ rowmax = dmax(r__3,r__4);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else /* if(complicated condition) */ {
+ i__1 = kpc;
+ if ((r__1 = ap[i__1].r, dabs(r__1)) >= alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+ } else {
+
+/* interchange rows and columns K+1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+ }
+
+ kk = k + kstep - 1;
+ if (kstep == 2) {
+ knc = knc + *n - k + 1;
+ }
+ if (kp != kk) {
+
+/* Interchange rows and columns KK and KP in the trailing */
+/* submatrix A(k:n,k:n) */
+
+ if (kp < *n) {
+ i__1 = *n - kp;
+ cswap_(&i__1, &ap[knc + kp - kk + 1], &c__1, &ap[kpc + 1],
+ &c__1);
+ }
+ kx = knc + kp - kk;
+ i__1 = kp - 1;
+ for (j = kk + 1; j <= i__1; ++j) {
+ kx = kx + *n - j + 1;
+ r_cnjg(&q__1, &ap[knc + j - kk]);
+ t.r = q__1.r, t.i = q__1.i;
+ i__2 = knc + j - kk;
+ r_cnjg(&q__1, &ap[kx]);
+ ap[i__2].r = q__1.r, ap[i__2].i = q__1.i;
+ i__2 = kx;
+ ap[i__2].r = t.r, ap[i__2].i = t.i;
+/* L80: */
+ }
+ i__1 = knc + kp - kk;
+ r_cnjg(&q__1, &ap[knc + kp - kk]);
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+ i__1 = knc;
+ r1 = ap[i__1].r;
+ i__1 = knc;
+ i__2 = kpc;
+ r__1 = ap[i__2].r;
+ ap[i__1].r = r__1, ap[i__1].i = 0.f;
+ i__1 = kpc;
+ ap[i__1].r = r1, ap[i__1].i = 0.f;
+ if (kstep == 2) {
+ i__1 = kc;
+ i__2 = kc;
+ r__1 = ap[i__2].r;
+ ap[i__1].r = r__1, ap[i__1].i = 0.f;
+ i__1 = kc + 1;
+ t.r = ap[i__1].r, t.i = ap[i__1].i;
+ i__1 = kc + 1;
+ i__2 = kc + kp - k;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kc + kp - k;
+ ap[i__1].r = t.r, ap[i__1].i = t.i;
+ }
+ } else {
+ i__1 = kc;
+ i__2 = kc;
+ r__1 = ap[i__2].r;
+ ap[i__1].r = r__1, ap[i__1].i = 0.f;
+ if (kstep == 2) {
+ i__1 = knc;
+ i__2 = knc;
+ r__1 = ap[i__2].r;
+ ap[i__1].r = r__1, ap[i__1].i = 0.f;
+ }
+ }
+
+/* Update the trailing submatrix */
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k now holds */
+
+/* W(k) = L(k)*D(k) */
+
+/* where L(k) is the k-th column of L */
+
+ if (k < *n) {
+
+/* Perform a rank-1 update of A(k+1:n,k+1:n) as */
+
+/* A := A - L(k)*D(k)*L(k)' = A - W(k)*(1/D(k))*W(k)' */
+
+ i__1 = kc;
+ r1 = 1.f / ap[i__1].r;
+ i__1 = *n - k;
+ r__1 = -r1;
+ chpr_(uplo, &i__1, &r__1, &ap[kc + 1], &c__1, &ap[kc + *n
+ - k + 1]);
+
+/* Store L(k) in column K */
+
+ i__1 = *n - k;
+ csscal_(&i__1, &r1, &ap[kc + 1], &c__1);
+ }
+ } else {
+
+/* 2-by-2 pivot block D(k): columns K and K+1 now hold */
+
+/* ( W(k) W(k+1) ) = ( L(k) L(k+1) )*D(k) */
+
+/* where L(k) and L(k+1) are the k-th and (k+1)-th columns */
+/* of L */
+
+ if (k < *n - 1) {
+
+/* Perform a rank-2 update of A(k+2:n,k+2:n) as */
+
+/* A := A - ( L(k) L(k+1) )*D(k)*( L(k) L(k+1) )' */
+/* = A - ( W(k) W(k+1) )*inv(D(k))*( W(k) W(k+1) )' */
+
+/* where L(k) and L(k+1) are the k-th and (k+1)-th */
+/* columns of L */
+
+ i__1 = k + 1 + (k - 1) * ((*n << 1) - k) / 2;
+ r__1 = ap[i__1].r;
+ r__2 = r_imag(&ap[k + 1 + (k - 1) * ((*n << 1) - k) / 2]);
+ d__ = slapy2_(&r__1, &r__2);
+ i__1 = k + 1 + k * ((*n << 1) - k - 1) / 2;
+ d11 = ap[i__1].r / d__;
+ i__1 = k + (k - 1) * ((*n << 1) - k) / 2;
+ d22 = ap[i__1].r / d__;
+ tt = 1.f / (d11 * d22 - 1.f);
+ i__1 = k + 1 + (k - 1) * ((*n << 1) - k) / 2;
+ q__1.r = ap[i__1].r / d__, q__1.i = ap[i__1].i / d__;
+ d21.r = q__1.r, d21.i = q__1.i;
+ d__ = tt / d__;
+
+ i__1 = *n;
+ for (j = k + 2; j <= i__1; ++j) {
+ i__2 = j + (k - 1) * ((*n << 1) - k) / 2;
+ q__3.r = d11 * ap[i__2].r, q__3.i = d11 * ap[i__2].i;
+ i__3 = j + k * ((*n << 1) - k - 1) / 2;
+ q__4.r = d21.r * ap[i__3].r - d21.i * ap[i__3].i,
+ q__4.i = d21.r * ap[i__3].i + d21.i * ap[i__3]
+ .r;
+ q__2.r = q__3.r - q__4.r, q__2.i = q__3.i - q__4.i;
+ q__1.r = d__ * q__2.r, q__1.i = d__ * q__2.i;
+ wk.r = q__1.r, wk.i = q__1.i;
+ i__2 = j + k * ((*n << 1) - k - 1) / 2;
+ q__3.r = d22 * ap[i__2].r, q__3.i = d22 * ap[i__2].i;
+ r_cnjg(&q__5, &d21);
+ i__3 = j + (k - 1) * ((*n << 1) - k) / 2;
+ q__4.r = q__5.r * ap[i__3].r - q__5.i * ap[i__3].i,
+ q__4.i = q__5.r * ap[i__3].i + q__5.i * ap[
+ i__3].r;
+ q__2.r = q__3.r - q__4.r, q__2.i = q__3.i - q__4.i;
+ q__1.r = d__ * q__2.r, q__1.i = d__ * q__2.i;
+ wkp1.r = q__1.r, wkp1.i = q__1.i;
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + (j - 1) * ((*n << 1) - j) / 2;
+ i__4 = i__ + (j - 1) * ((*n << 1) - j) / 2;
+ i__5 = i__ + (k - 1) * ((*n << 1) - k) / 2;
+ r_cnjg(&q__4, &wk);
+ q__3.r = ap[i__5].r * q__4.r - ap[i__5].i *
+ q__4.i, q__3.i = ap[i__5].r * q__4.i + ap[
+ i__5].i * q__4.r;
+ q__2.r = ap[i__4].r - q__3.r, q__2.i = ap[i__4].i
+ - q__3.i;
+ i__6 = i__ + k * ((*n << 1) - k - 1) / 2;
+ r_cnjg(&q__6, &wkp1);
+ q__5.r = ap[i__6].r * q__6.r - ap[i__6].i *
+ q__6.i, q__5.i = ap[i__6].r * q__6.i + ap[
+ i__6].i * q__6.r;
+ q__1.r = q__2.r - q__5.r, q__1.i = q__2.i -
+ q__5.i;
+ ap[i__3].r = q__1.r, ap[i__3].i = q__1.i;
+/* L90: */
+ }
+ i__2 = j + (k - 1) * ((*n << 1) - k) / 2;
+ ap[i__2].r = wk.r, ap[i__2].i = wk.i;
+ i__2 = j + k * ((*n << 1) - k - 1) / 2;
+ ap[i__2].r = wkp1.r, ap[i__2].i = wkp1.i;
+ i__2 = j + (j - 1) * ((*n << 1) - j) / 2;
+ i__3 = j + (j - 1) * ((*n << 1) - j) / 2;
+ r__1 = ap[i__3].r;
+ q__1.r = r__1, q__1.i = 0.f;
+ ap[i__2].r = q__1.r, ap[i__2].i = q__1.i;
+/* L100: */
+ }
+ }
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k + 1] = -kp;
+ }
+
+/* Increase K and return to the start of the main loop */
+
+ k += kstep;
+ kc = knc + *n - k + 2;
+ goto L60;
+
+ }
+
+L110:
+ return 0;
+
+/* End of CHPTRF */
+
+} /* chptrf_ */
diff --git a/contrib/libs/clapack/chptri.c b/contrib/libs/clapack/chptri.c
new file mode 100644
index 0000000000..069bb2ae0d
--- /dev/null
+++ b/contrib/libs/clapack/chptri.c
@@ -0,0 +1,512 @@
+/* chptri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b2 = {0.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int chptri_(char *uplo, integer *n, complex *ap, integer *
+ ipiv, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ real r__1;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ double c_abs(complex *);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ real d__;
+ integer j, k;
+ real t, ak;
+ integer kc, kp, kx, kpc, npp;
+ real akp1;
+ complex temp, akkp1;
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), chpmv_(char *, integer *, complex *,
+ complex *, complex *, integer *, complex *, complex *, integer *), cswap_(integer *, complex *, integer *, complex *,
+ integer *);
+ integer kstep;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ integer kcnext;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHPTRI computes the inverse of a complex Hermitian indefinite matrix */
+/* A in packed storage using the factorization A = U*D*U**H or */
+/* A = L*D*L**H computed by CHPTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**H; */
+/* = 'L': Lower triangular, form is A = L*D*L**H. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX array, dimension (N*(N+1)/2) */
+/* On entry, the block diagonal matrix D and the multipliers */
+/* used to obtain the factor U or L as computed by CHPTRF, */
+/* stored as a packed triangular matrix. */
+
+/* On exit, if INFO = 0, the (Hermitian) inverse of the original */
+/* matrix, stored as a packed triangular matrix. The j-th column */
+/* of inv(A) is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = inv(A)(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', */
+/* AP(i + (j-1)*(2n-j)/2) = inv(A)(i,j) for j<=i<=n. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by CHPTRF. */
+
+/* WORK (workspace) COMPLEX array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) = 0; the matrix is singular and its */
+/* inverse could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --work;
+ --ipiv;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHPTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check that the diagonal matrix D is nonsingular. */
+
+ if (upper) {
+
+/* Upper triangular storage: examine D from bottom to top */
+
+ kp = *n * (*n + 1) / 2;
+ for (*info = *n; *info >= 1; --(*info)) {
+ i__1 = kp;
+ if (ipiv[*info] > 0 && (ap[i__1].r == 0.f && ap[i__1].i == 0.f)) {
+ return 0;
+ }
+ kp -= *info;
+/* L10: */
+ }
+ } else {
+
+/* Lower triangular storage: examine D from top to bottom. */
+
+ kp = 1;
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ i__2 = kp;
+ if (ipiv[*info] > 0 && (ap[i__2].r == 0.f && ap[i__2].i == 0.f)) {
+ return 0;
+ }
+ kp = kp + *n - *info + 1;
+/* L20: */
+ }
+ }
+ *info = 0;
+
+ if (upper) {
+
+/* Compute inv(A) from the factorization A = U*D*U'. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+ kc = 1;
+L30:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L50;
+ }
+
+ kcnext = kc + k;
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Invert the diagonal block. */
+
+ i__1 = kc + k - 1;
+ i__2 = kc + k - 1;
+ r__1 = 1.f / ap[i__2].r;
+ ap[i__1].r = r__1, ap[i__1].i = 0.f;
+
+/* Compute column K of the inverse. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ ccopy_(&i__1, &ap[kc], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ chpmv_(uplo, &i__1, &q__1, &ap[1], &work[1], &c__1, &c_b2, &
+ ap[kc], &c__1);
+ i__1 = kc + k - 1;
+ i__2 = kc + k - 1;
+ i__3 = k - 1;
+ cdotc_(&q__2, &i__3, &work[1], &c__1, &ap[kc], &c__1);
+ r__1 = q__2.r;
+ q__1.r = ap[i__2].r - r__1, q__1.i = ap[i__2].i;
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+ }
+ kstep = 1;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Invert the diagonal block. */
+
+ t = c_abs(&ap[kcnext + k - 1]);
+ i__1 = kc + k - 1;
+ ak = ap[i__1].r / t;
+ i__1 = kcnext + k;
+ akp1 = ap[i__1].r / t;
+ i__1 = kcnext + k - 1;
+ q__1.r = ap[i__1].r / t, q__1.i = ap[i__1].i / t;
+ akkp1.r = q__1.r, akkp1.i = q__1.i;
+ d__ = t * (ak * akp1 - 1.f);
+ i__1 = kc + k - 1;
+ r__1 = akp1 / d__;
+ ap[i__1].r = r__1, ap[i__1].i = 0.f;
+ i__1 = kcnext + k;
+ r__1 = ak / d__;
+ ap[i__1].r = r__1, ap[i__1].i = 0.f;
+ i__1 = kcnext + k - 1;
+ q__2.r = -akkp1.r, q__2.i = -akkp1.i;
+ q__1.r = q__2.r / d__, q__1.i = q__2.i / d__;
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+
+/* Compute columns K and K+1 of the inverse. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ ccopy_(&i__1, &ap[kc], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ chpmv_(uplo, &i__1, &q__1, &ap[1], &work[1], &c__1, &c_b2, &
+ ap[kc], &c__1);
+ i__1 = kc + k - 1;
+ i__2 = kc + k - 1;
+ i__3 = k - 1;
+ cdotc_(&q__2, &i__3, &work[1], &c__1, &ap[kc], &c__1);
+ r__1 = q__2.r;
+ q__1.r = ap[i__2].r - r__1, q__1.i = ap[i__2].i;
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+ i__1 = kcnext + k - 1;
+ i__2 = kcnext + k - 1;
+ i__3 = k - 1;
+ cdotc_(&q__2, &i__3, &ap[kc], &c__1, &ap[kcnext], &c__1);
+ q__1.r = ap[i__2].r - q__2.r, q__1.i = ap[i__2].i - q__2.i;
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+ i__1 = k - 1;
+ ccopy_(&i__1, &ap[kcnext], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ chpmv_(uplo, &i__1, &q__1, &ap[1], &work[1], &c__1, &c_b2, &
+ ap[kcnext], &c__1);
+ i__1 = kcnext + k;
+ i__2 = kcnext + k;
+ i__3 = k - 1;
+ cdotc_(&q__2, &i__3, &work[1], &c__1, &ap[kcnext], &c__1);
+ r__1 = q__2.r;
+ q__1.r = ap[i__2].r - r__1, q__1.i = ap[i__2].i;
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+ }
+ kstep = 2;
+ kcnext = kcnext + k + 1;
+ }
+
+ kp = (i__1 = ipiv[k], abs(i__1));
+ if (kp != k) {
+
+/* Interchange rows and columns K and KP in the leading */
+/* submatrix A(1:k+1,1:k+1) */
+
+ kpc = (kp - 1) * kp / 2 + 1;
+ i__1 = kp - 1;
+ cswap_(&i__1, &ap[kc], &c__1, &ap[kpc], &c__1);
+ kx = kpc + kp - 1;
+ i__1 = k - 1;
+ for (j = kp + 1; j <= i__1; ++j) {
+ kx = kx + j - 1;
+ r_cnjg(&q__1, &ap[kc + j - 1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = kc + j - 1;
+ r_cnjg(&q__1, &ap[kx]);
+ ap[i__2].r = q__1.r, ap[i__2].i = q__1.i;
+ i__2 = kx;
+ ap[i__2].r = temp.r, ap[i__2].i = temp.i;
+/* L40: */
+ }
+ i__1 = kc + kp - 1;
+ r_cnjg(&q__1, &ap[kc + kp - 1]);
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+ i__1 = kc + k - 1;
+ temp.r = ap[i__1].r, temp.i = ap[i__1].i;
+ i__1 = kc + k - 1;
+ i__2 = kpc + kp - 1;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kpc + kp - 1;
+ ap[i__1].r = temp.r, ap[i__1].i = temp.i;
+ if (kstep == 2) {
+ i__1 = kc + k + k - 1;
+ temp.r = ap[i__1].r, temp.i = ap[i__1].i;
+ i__1 = kc + k + k - 1;
+ i__2 = kc + k + kp - 1;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kc + k + kp - 1;
+ ap[i__1].r = temp.r, ap[i__1].i = temp.i;
+ }
+ }
+
+ k += kstep;
+ kc = kcnext;
+ goto L30;
+L50:
+
+ ;
+ } else {
+
+/* Compute inv(A) from the factorization A = L*D*L'. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ npp = *n * (*n + 1) / 2;
+ k = *n;
+ kc = npp;
+L60:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L80;
+ }
+
+ kcnext = kc - (*n - k + 2);
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Invert the diagonal block. */
+
+ i__1 = kc;
+ i__2 = kc;
+ r__1 = 1.f / ap[i__2].r;
+ ap[i__1].r = r__1, ap[i__1].i = 0.f;
+
+/* Compute column K of the inverse. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ ccopy_(&i__1, &ap[kc + 1], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ chpmv_(uplo, &i__1, &q__1, &ap[kc + *n - k + 1], &work[1], &
+ c__1, &c_b2, &ap[kc + 1], &c__1);
+ i__1 = kc;
+ i__2 = kc;
+ i__3 = *n - k;
+ cdotc_(&q__2, &i__3, &work[1], &c__1, &ap[kc + 1], &c__1);
+ r__1 = q__2.r;
+ q__1.r = ap[i__2].r - r__1, q__1.i = ap[i__2].i;
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+ }
+ kstep = 1;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Invert the diagonal block. */
+
+ t = c_abs(&ap[kcnext + 1]);
+ i__1 = kcnext;
+ ak = ap[i__1].r / t;
+ i__1 = kc;
+ akp1 = ap[i__1].r / t;
+ i__1 = kcnext + 1;
+ q__1.r = ap[i__1].r / t, q__1.i = ap[i__1].i / t;
+ akkp1.r = q__1.r, akkp1.i = q__1.i;
+ d__ = t * (ak * akp1 - 1.f);
+ i__1 = kcnext;
+ r__1 = akp1 / d__;
+ ap[i__1].r = r__1, ap[i__1].i = 0.f;
+ i__1 = kc;
+ r__1 = ak / d__;
+ ap[i__1].r = r__1, ap[i__1].i = 0.f;
+ i__1 = kcnext + 1;
+ q__2.r = -akkp1.r, q__2.i = -akkp1.i;
+ q__1.r = q__2.r / d__, q__1.i = q__2.i / d__;
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+
+/* Compute columns K-1 and K of the inverse. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ ccopy_(&i__1, &ap[kc + 1], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ chpmv_(uplo, &i__1, &q__1, &ap[kc + (*n - k + 1)], &work[1], &
+ c__1, &c_b2, &ap[kc + 1], &c__1);
+ i__1 = kc;
+ i__2 = kc;
+ i__3 = *n - k;
+ cdotc_(&q__2, &i__3, &work[1], &c__1, &ap[kc + 1], &c__1);
+ r__1 = q__2.r;
+ q__1.r = ap[i__2].r - r__1, q__1.i = ap[i__2].i;
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+ i__1 = kcnext + 1;
+ i__2 = kcnext + 1;
+ i__3 = *n - k;
+ cdotc_(&q__2, &i__3, &ap[kc + 1], &c__1, &ap[kcnext + 2], &
+ c__1);
+ q__1.r = ap[i__2].r - q__2.r, q__1.i = ap[i__2].i - q__2.i;
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+ i__1 = *n - k;
+ ccopy_(&i__1, &ap[kcnext + 2], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ chpmv_(uplo, &i__1, &q__1, &ap[kc + (*n - k + 1)], &work[1], &
+ c__1, &c_b2, &ap[kcnext + 2], &c__1);
+ i__1 = kcnext;
+ i__2 = kcnext;
+ i__3 = *n - k;
+ cdotc_(&q__2, &i__3, &work[1], &c__1, &ap[kcnext + 2], &c__1);
+ r__1 = q__2.r;
+ q__1.r = ap[i__2].r - r__1, q__1.i = ap[i__2].i;
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+ }
+ kstep = 2;
+ kcnext -= *n - k + 3;
+ }
+
+ kp = (i__1 = ipiv[k], abs(i__1));
+ if (kp != k) {
+
+/* Interchange rows and columns K and KP in the trailing */
+/* submatrix A(k-1:n,k-1:n) */
+
+ kpc = npp - (*n - kp + 1) * (*n - kp + 2) / 2 + 1;
+ if (kp < *n) {
+ i__1 = *n - kp;
+ cswap_(&i__1, &ap[kc + kp - k + 1], &c__1, &ap[kpc + 1], &
+ c__1);
+ }
+ kx = kc + kp - k;
+ i__1 = kp - 1;
+ for (j = k + 1; j <= i__1; ++j) {
+ kx = kx + *n - j + 1;
+ r_cnjg(&q__1, &ap[kc + j - k]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = kc + j - k;
+ r_cnjg(&q__1, &ap[kx]);
+ ap[i__2].r = q__1.r, ap[i__2].i = q__1.i;
+ i__2 = kx;
+ ap[i__2].r = temp.r, ap[i__2].i = temp.i;
+/* L70: */
+ }
+ i__1 = kc + kp - k;
+ r_cnjg(&q__1, &ap[kc + kp - k]);
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+ i__1 = kc;
+ temp.r = ap[i__1].r, temp.i = ap[i__1].i;
+ i__1 = kc;
+ i__2 = kpc;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kpc;
+ ap[i__1].r = temp.r, ap[i__1].i = temp.i;
+ if (kstep == 2) {
+ i__1 = kc - *n + k - 1;
+ temp.r = ap[i__1].r, temp.i = ap[i__1].i;
+ i__1 = kc - *n + k - 1;
+ i__2 = kc - *n + kp - 1;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kc - *n + kp - 1;
+ ap[i__1].r = temp.r, ap[i__1].i = temp.i;
+ }
+ }
+
+ k -= kstep;
+ kc = kcnext;
+ goto L60;
+L80:
+ ;
+ }
+
+ return 0;
+
+/* End of CHPTRI */
+
+} /* chptri_ */
diff --git a/contrib/libs/clapack/chptrs.c b/contrib/libs/clapack/chptrs.c
new file mode 100644
index 0000000000..e5c75362c7
--- /dev/null
+++ b/contrib/libs/clapack/chptrs.c
@@ -0,0 +1,530 @@
+/* chptrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int chptrs_(char *uplo, integer *n, integer *nrhs, complex *
+ ap, integer *ipiv, complex *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void c_div(complex *, complex *, complex *), r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer j, k;
+ real s;
+ complex ak, bk;
+ integer kc, kp;
+ complex akm1, bkm1, akm1k;
+ extern logical lsame_(char *, char *);
+ complex denom;
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+, complex *, integer *, complex *, integer *, complex *, complex *
+, integer *), cgeru_(integer *, integer *, complex *,
+ complex *, integer *, complex *, integer *, complex *, integer *),
+ cswap_(integer *, complex *, integer *, complex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int clacgv_(integer *, complex *, integer *),
+ csscal_(integer *, real *, complex *, integer *), xerbla_(char *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHPTRS solves a system of linear equations A*X = B with a complex */
+/* Hermitian matrix A stored in packed format using the factorization */
+/* A = U*D*U**H or A = L*D*L**H computed by CHPTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**H; */
+/* = 'L': Lower triangular, form is A = L*D*L**H. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AP (input) COMPLEX array, dimension (N*(N+1)/2) */
+/* The block diagonal matrix D and the multipliers used to */
+/* obtain the factor U or L as computed by CHPTRF, stored as a */
+/* packed triangular matrix. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by CHPTRF. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --ap;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHPTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Solve A*X = B, where A = U*D*U'. */
+
+/* First solve U*D*X = B, overwriting B with X. */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+ kc = *n * (*n + 1) / 2 + 1;
+L10:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L30;
+ }
+
+ kc -= k;
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(U(K)), where U(K) is the transformation */
+/* stored in column K of A. */
+
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__1, nrhs, &q__1, &ap[kc], &c__1, &b[k + b_dim1], ldb, &
+ b[b_dim1 + 1], ldb);
+
+/* Multiply by the inverse of the diagonal block. */
+
+ i__1 = kc + k - 1;
+ s = 1.f / ap[i__1].r;
+ csscal_(nrhs, &s, &b[k + b_dim1], ldb);
+ --k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Interchange rows K-1 and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k - 1) {
+ cswap_(nrhs, &b[k - 1 + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(U(K)), where U(K) is the transformation */
+/* stored in columns K-1 and K of A. */
+
+ i__1 = k - 2;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__1, nrhs, &q__1, &ap[kc], &c__1, &b[k + b_dim1], ldb, &
+ b[b_dim1 + 1], ldb);
+ i__1 = k - 2;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__1, nrhs, &q__1, &ap[kc - (k - 1)], &c__1, &b[k - 1 +
+ b_dim1], ldb, &b[b_dim1 + 1], ldb);
+
+/* Multiply by the inverse of the diagonal block. */
+
+ i__1 = kc + k - 2;
+ akm1k.r = ap[i__1].r, akm1k.i = ap[i__1].i;
+ c_div(&q__1, &ap[kc - 1], &akm1k);
+ akm1.r = q__1.r, akm1.i = q__1.i;
+ r_cnjg(&q__2, &akm1k);
+ c_div(&q__1, &ap[kc + k - 1], &q__2);
+ ak.r = q__1.r, ak.i = q__1.i;
+ q__2.r = akm1.r * ak.r - akm1.i * ak.i, q__2.i = akm1.r * ak.i +
+ akm1.i * ak.r;
+ q__1.r = q__2.r - 1.f, q__1.i = q__2.i - 0.f;
+ denom.r = q__1.r, denom.i = q__1.i;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ c_div(&q__1, &b[k - 1 + j * b_dim1], &akm1k);
+ bkm1.r = q__1.r, bkm1.i = q__1.i;
+ r_cnjg(&q__2, &akm1k);
+ c_div(&q__1, &b[k + j * b_dim1], &q__2);
+ bk.r = q__1.r, bk.i = q__1.i;
+ i__2 = k - 1 + j * b_dim1;
+ q__3.r = ak.r * bkm1.r - ak.i * bkm1.i, q__3.i = ak.r *
+ bkm1.i + ak.i * bkm1.r;
+ q__2.r = q__3.r - bk.r, q__2.i = q__3.i - bk.i;
+ c_div(&q__1, &q__2, &denom);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ i__2 = k + j * b_dim1;
+ q__3.r = akm1.r * bk.r - akm1.i * bk.i, q__3.i = akm1.r *
+ bk.i + akm1.i * bk.r;
+ q__2.r = q__3.r - bkm1.r, q__2.i = q__3.i - bkm1.i;
+ c_div(&q__1, &q__2, &denom);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L20: */
+ }
+ kc = kc - k + 1;
+ k += -2;
+ }
+
+ goto L10;
+L30:
+
+/* Next solve U'*X = B, overwriting B with X. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+ kc = 1;
+L40:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L50;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Multiply by inv(U'(K)), where U(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k > 1) {
+ clacgv_(nrhs, &b[k + b_dim1], ldb);
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Conjugate transpose", &i__1, nrhs, &q__1, &b[b_offset]
+, ldb, &ap[kc], &c__1, &c_b1, &b[k + b_dim1], ldb);
+ clacgv_(nrhs, &b[k + b_dim1], ldb);
+ }
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ kc += k;
+ ++k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Multiply by inv(U'(K+1)), where U(K+1) is the transformation */
+/* stored in columns K and K+1 of A. */
+
+ if (k > 1) {
+ clacgv_(nrhs, &b[k + b_dim1], ldb);
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Conjugate transpose", &i__1, nrhs, &q__1, &b[b_offset]
+, ldb, &ap[kc], &c__1, &c_b1, &b[k + b_dim1], ldb);
+ clacgv_(nrhs, &b[k + b_dim1], ldb);
+
+ clacgv_(nrhs, &b[k + 1 + b_dim1], ldb);
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Conjugate transpose", &i__1, nrhs, &q__1, &b[b_offset]
+, ldb, &ap[kc + k], &c__1, &c_b1, &b[k + 1 + b_dim1],
+ ldb);
+ clacgv_(nrhs, &b[k + 1 + b_dim1], ldb);
+ }
+
+/* Interchange rows K and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k) {
+ cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ kc = kc + (k << 1) + 1;
+ k += 2;
+ }
+
+ goto L40;
+L50:
+
+ ;
+ } else {
+
+/* Solve A*X = B, where A = L*D*L'. */
+
+/* First solve L*D*X = B, overwriting B with X. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+ kc = 1;
+L60:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L80;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(L(K)), where L(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__1, nrhs, &q__1, &ap[kc + 1], &c__1, &b[k + b_dim1],
+ ldb, &b[k + 1 + b_dim1], ldb);
+ }
+
+/* Multiply by the inverse of the diagonal block. */
+
+ i__1 = kc;
+ s = 1.f / ap[i__1].r;
+ csscal_(nrhs, &s, &b[k + b_dim1], ldb);
+ kc = kc + *n - k + 1;
+ ++k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Interchange rows K+1 and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k + 1) {
+ cswap_(nrhs, &b[k + 1 + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(L(K)), where L(K) is the transformation */
+/* stored in columns K and K+1 of A. */
+
+ if (k < *n - 1) {
+ i__1 = *n - k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__1, nrhs, &q__1, &ap[kc + 2], &c__1, &b[k + b_dim1],
+ ldb, &b[k + 2 + b_dim1], ldb);
+ i__1 = *n - k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__1, nrhs, &q__1, &ap[kc + *n - k + 2], &c__1, &b[k
+ + 1 + b_dim1], ldb, &b[k + 2 + b_dim1], ldb);
+ }
+
+/* Multiply by the inverse of the diagonal block. */
+
+ i__1 = kc + 1;
+ akm1k.r = ap[i__1].r, akm1k.i = ap[i__1].i;
+ r_cnjg(&q__2, &akm1k);
+ c_div(&q__1, &ap[kc], &q__2);
+ akm1.r = q__1.r, akm1.i = q__1.i;
+ c_div(&q__1, &ap[kc + *n - k + 1], &akm1k);
+ ak.r = q__1.r, ak.i = q__1.i;
+ q__2.r = akm1.r * ak.r - akm1.i * ak.i, q__2.i = akm1.r * ak.i +
+ akm1.i * ak.r;
+ q__1.r = q__2.r - 1.f, q__1.i = q__2.i - 0.f;
+ denom.r = q__1.r, denom.i = q__1.i;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ r_cnjg(&q__2, &akm1k);
+ c_div(&q__1, &b[k + j * b_dim1], &q__2);
+ bkm1.r = q__1.r, bkm1.i = q__1.i;
+ c_div(&q__1, &b[k + 1 + j * b_dim1], &akm1k);
+ bk.r = q__1.r, bk.i = q__1.i;
+ i__2 = k + j * b_dim1;
+ q__3.r = ak.r * bkm1.r - ak.i * bkm1.i, q__3.i = ak.r *
+ bkm1.i + ak.i * bkm1.r;
+ q__2.r = q__3.r - bk.r, q__2.i = q__3.i - bk.i;
+ c_div(&q__1, &q__2, &denom);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ i__2 = k + 1 + j * b_dim1;
+ q__3.r = akm1.r * bk.r - akm1.i * bk.i, q__3.i = akm1.r *
+ bk.i + akm1.i * bk.r;
+ q__2.r = q__3.r - bkm1.r, q__2.i = q__3.i - bkm1.i;
+ c_div(&q__1, &q__2, &denom);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L70: */
+ }
+ kc = kc + (*n - k << 1) + 1;
+ k += 2;
+ }
+
+ goto L60;
+L80:
+
+/* Next solve L'*X = B, overwriting B with X. */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+ kc = *n * (*n + 1) / 2 + 1;
+L90:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L100;
+ }
+
+ kc -= *n - k + 1;
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Multiply by inv(L'(K)), where L(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k < *n) {
+ clacgv_(nrhs, &b[k + b_dim1], ldb);
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Conjugate transpose", &i__1, nrhs, &q__1, &b[k + 1 +
+ b_dim1], ldb, &ap[kc + 1], &c__1, &c_b1, &b[k +
+ b_dim1], ldb);
+ clacgv_(nrhs, &b[k + b_dim1], ldb);
+ }
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ --k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Multiply by inv(L'(K-1)), where L(K-1) is the transformation */
+/* stored in columns K-1 and K of A. */
+
+ if (k < *n) {
+ clacgv_(nrhs, &b[k + b_dim1], ldb);
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Conjugate transpose", &i__1, nrhs, &q__1, &b[k + 1 +
+ b_dim1], ldb, &ap[kc + 1], &c__1, &c_b1, &b[k +
+ b_dim1], ldb);
+ clacgv_(nrhs, &b[k + b_dim1], ldb);
+
+ clacgv_(nrhs, &b[k - 1 + b_dim1], ldb);
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Conjugate transpose", &i__1, nrhs, &q__1, &b[k + 1 +
+ b_dim1], ldb, &ap[kc - (*n - k)], &c__1, &c_b1, &b[k
+ - 1 + b_dim1], ldb);
+ clacgv_(nrhs, &b[k - 1 + b_dim1], ldb);
+ }
+
+/* Interchange rows K and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k) {
+ cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ kc -= *n - k + 2;
+ k += -2;
+ }
+
+ goto L90;
+L100:
+ ;
+ }
+
+ return 0;
+
+/* End of CHPTRS */
+
+} /* chptrs_ */
diff --git a/contrib/libs/clapack/chsein.c b/contrib/libs/clapack/chsein.c
new file mode 100644
index 0000000000..bc49ab1ee8
--- /dev/null
+++ b/contrib/libs/clapack/chsein.c
@@ -0,0 +1,432 @@
+/* chsein.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static logical c_false = FALSE_;
+static logical c_true = TRUE_;
+
+/* Subroutine */ int chsein_(char *side, char *eigsrc, char *initv, logical *
+ select, integer *n, complex *h__, integer *ldh, complex *w, complex *
+ vl, integer *ldvl, complex *vr, integer *ldvr, integer *mm, integer *
+ m, complex *work, real *rwork, integer *ifaill, integer *ifailr,
+ integer *info)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2, i__3;
+ real r__1, r__2;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer i__, k, kl, kr, ks;
+ complex wk;
+ integer kln;
+ real ulp, eps3, unfl;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical leftv, bothv;
+ real hnorm;
+ extern /* Subroutine */ int claein_(logical *, logical *, integer *,
+ complex *, integer *, complex *, complex *, complex *, integer *,
+ real *, real *, real *, integer *);
+ extern doublereal slamch_(char *), clanhs_(char *, integer *,
+ complex *, integer *, real *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical noinit;
+ integer ldwork;
+ logical rightv, fromqr;
+ real smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CHSEIN uses inverse iteration to find specified right and/or left */
+/* eigenvectors of a complex upper Hessenberg matrix H. */
+
+/* The right eigenvector x and the left eigenvector y of the matrix H */
+/* corresponding to an eigenvalue w are defined by: */
+
+/* H * x = w * x, y**h * H = w * y**h */
+
+/* where y**h denotes the conjugate transpose of the vector y. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'R': compute right eigenvectors only; */
+/* = 'L': compute left eigenvectors only; */
+/* = 'B': compute both right and left eigenvectors. */
+
+/* EIGSRC (input) CHARACTER*1 */
+/* Specifies the source of eigenvalues supplied in W: */
+/* = 'Q': the eigenvalues were found using CHSEQR; thus, if */
+/* H has zero subdiagonal elements, and so is */
+/* block-triangular, then the j-th eigenvalue can be */
+/* assumed to be an eigenvalue of the block containing */
+/* the j-th row/column. This property allows CHSEIN to */
+/* perform inverse iteration on just one diagonal block. */
+/* = 'N': no assumptions are made on the correspondence */
+/* between eigenvalues and diagonal blocks. In this */
+/* case, CHSEIN must always perform inverse iteration */
+/* using the whole matrix H. */
+
+/* INITV (input) CHARACTER*1 */
+/* = 'N': no initial vectors are supplied; */
+/* = 'U': user-supplied initial vectors are stored in the arrays */
+/* VL and/or VR. */
+
+/* SELECT (input) LOGICAL array, dimension (N) */
+/* Specifies the eigenvectors to be computed. To select the */
+/* eigenvector corresponding to the eigenvalue W(j), */
+/* SELECT(j) must be set to .TRUE.. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H. N >= 0. */
+
+/* H (input) COMPLEX array, dimension (LDH,N) */
+/* The upper Hessenberg matrix H. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH >= max(1,N). */
+
+/* W (input/output) COMPLEX array, dimension (N) */
+/* On entry, the eigenvalues of H. */
+/* On exit, the real parts of W may have been altered since */
+/* close eigenvalues are perturbed slightly in searching for */
+/* independent eigenvectors. */
+
+/* VL (input/output) COMPLEX array, dimension (LDVL,MM) */
+/* On entry, if INITV = 'U' and SIDE = 'L' or 'B', VL must */
+/* contain starting vectors for the inverse iteration for the */
+/* left eigenvectors; the starting vector for each eigenvector */
+/* must be in the same column in which the eigenvector will be */
+/* stored. */
+/* On exit, if SIDE = 'L' or 'B', the left eigenvectors */
+/* specified by SELECT will be stored consecutively in the */
+/* columns of VL, in the same order as their eigenvalues. */
+/* If SIDE = 'R', VL is not referenced. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the array VL. */
+/* LDVL >= max(1,N) if SIDE = 'L' or 'B'; LDVL >= 1 otherwise. */
+
+/* VR (input/output) COMPLEX array, dimension (LDVR,MM) */
+/* On entry, if INITV = 'U' and SIDE = 'R' or 'B', VR must */
+/* contain starting vectors for the inverse iteration for the */
+/* right eigenvectors; the starting vector for each eigenvector */
+/* must be in the same column in which the eigenvector will be */
+/* stored. */
+/* On exit, if SIDE = 'R' or 'B', the right eigenvectors */
+/* specified by SELECT will be stored consecutively in the */
+/* columns of VR, in the same order as their eigenvalues. */
+/* If SIDE = 'L', VR is not referenced. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. */
+/* LDVR >= max(1,N) if SIDE = 'R' or 'B'; LDVR >= 1 otherwise. */
+
+/* MM (input) INTEGER */
+/* The number of columns in the arrays VL and/or VR. MM >= M. */
+
+/* M (output) INTEGER */
+/* The number of columns in the arrays VL and/or VR required to */
+/* store the eigenvectors (= the number of .TRUE. elements in */
+/* SELECT). */
+
+/* WORK (workspace) COMPLEX array, dimension (N*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* IFAILL (output) INTEGER array, dimension (MM) */
+/* If SIDE = 'L' or 'B', IFAILL(i) = j > 0 if the left */
+/* eigenvector in the i-th column of VL (corresponding to the */
+/* eigenvalue w(j)) failed to converge; IFAILL(i) = 0 if the */
+/* eigenvector converged satisfactorily. */
+/* If SIDE = 'R', IFAILL is not referenced. */
+
+/* IFAILR (output) INTEGER array, dimension (MM) */
+/* If SIDE = 'R' or 'B', IFAILR(i) = j > 0 if the right */
+/* eigenvector in the i-th column of VR (corresponding to the */
+/* eigenvalue w(j)) failed to converge; IFAILR(i) = 0 if the */
+/* eigenvector converged satisfactorily. */
+/* If SIDE = 'L', IFAILR is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, i is the number of eigenvectors which */
+/* failed to converge; see IFAILL and IFAILR for further */
+/* details. */
+
+/* Further Details */
+/* =============== */
+
+/* Each eigenvector is normalized so that the element of largest */
+/* magnitude has magnitude 1; here the magnitude of a complex number */
+/* (x,y) is taken to be |x|+|y|. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters. */
+
+ /* Parameter adjustments */
+ --select;
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --w;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+ --rwork;
+ --ifaill;
+ --ifailr;
+
+ /* Function Body */
+ bothv = lsame_(side, "B");
+ rightv = lsame_(side, "R") || bothv;
+ leftv = lsame_(side, "L") || bothv;
+
+ fromqr = lsame_(eigsrc, "Q");
+
+ noinit = lsame_(initv, "N");
+
+/* Set M to the number of columns required to store the selected */
+/* eigenvectors. */
+
+ *m = 0;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (select[k]) {
+ ++(*m);
+ }
+/* L10: */
+ }
+
+ *info = 0;
+ if (! rightv && ! leftv) {
+ *info = -1;
+ } else if (! fromqr && ! lsame_(eigsrc, "N")) {
+ *info = -2;
+ } else if (! noinit && ! lsame_(initv, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*ldh < max(1,*n)) {
+ *info = -7;
+ } else if (*ldvl < 1 || leftv && *ldvl < *n) {
+ *info = -10;
+ } else if (*ldvr < 1 || rightv && *ldvr < *n) {
+ *info = -12;
+ } else if (*mm < *m) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHSEIN", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Set machine-dependent constants. */
+
+ unfl = slamch_("Safe minimum");
+ ulp = slamch_("Precision");
+ smlnum = unfl * (*n / ulp);
+
+ ldwork = *n;
+
+ kl = 1;
+ kln = 0;
+ if (fromqr) {
+ kr = 0;
+ } else {
+ kr = *n;
+ }
+ ks = 1;
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (select[k]) {
+
+/* Compute eigenvector(s) corresponding to W(K). */
+
+ if (fromqr) {
+
+/* If affiliation of eigenvalues is known, check whether */
+/* the matrix splits. */
+
+/* Determine KL and KR such that 1 <= KL <= K <= KR <= N */
+/* and H(KL,KL-1) and H(KR+1,KR) are zero (or KL = 1 or */
+/* KR = N). */
+
+/* Then inverse iteration can be performed with the */
+/* submatrix H(KL:N,KL:N) for a left eigenvector, and with */
+/* the submatrix H(1:KR,1:KR) for a right eigenvector. */
+
+ i__2 = kl + 1;
+ for (i__ = k; i__ >= i__2; --i__) {
+ i__3 = i__ + (i__ - 1) * h_dim1;
+ if (h__[i__3].r == 0.f && h__[i__3].i == 0.f) {
+ goto L30;
+ }
+/* L20: */
+ }
+L30:
+ kl = i__;
+ if (k > kr) {
+ i__2 = *n - 1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ i__3 = i__ + 1 + i__ * h_dim1;
+ if (h__[i__3].r == 0.f && h__[i__3].i == 0.f) {
+ goto L50;
+ }
+/* L40: */
+ }
+L50:
+ kr = i__;
+ }
+ }
+
+ if (kl != kln) {
+ kln = kl;
+
+/* Compute infinity-norm of submatrix H(KL:KR,KL:KR) if it */
+/* has not ben computed before. */
+
+ i__2 = kr - kl + 1;
+ hnorm = clanhs_("I", &i__2, &h__[kl + kl * h_dim1], ldh, &
+ rwork[1]);
+ if (hnorm > 0.f) {
+ eps3 = hnorm * ulp;
+ } else {
+ eps3 = smlnum;
+ }
+ }
+
+/* Perturb eigenvalue if it is close to any previous */
+/* selected eigenvalues affiliated to the submatrix */
+/* H(KL:KR,KL:KR). Close roots are modified by EPS3. */
+
+ i__2 = k;
+ wk.r = w[i__2].r, wk.i = w[i__2].i;
+L60:
+ i__2 = kl;
+ for (i__ = k - 1; i__ >= i__2; --i__) {
+ i__3 = i__;
+ q__2.r = w[i__3].r - wk.r, q__2.i = w[i__3].i - wk.i;
+ q__1.r = q__2.r, q__1.i = q__2.i;
+ if (select[i__] && (r__1 = q__1.r, dabs(r__1)) + (r__2 =
+ r_imag(&q__1), dabs(r__2)) < eps3) {
+ q__1.r = wk.r + eps3, q__1.i = wk.i;
+ wk.r = q__1.r, wk.i = q__1.i;
+ goto L60;
+ }
+/* L70: */
+ }
+ i__2 = k;
+ w[i__2].r = wk.r, w[i__2].i = wk.i;
+
+ if (leftv) {
+
+/* Compute left eigenvector. */
+
+ i__2 = *n - kl + 1;
+ claein_(&c_false, &noinit, &i__2, &h__[kl + kl * h_dim1], ldh,
+ &wk, &vl[kl + ks * vl_dim1], &work[1], &ldwork, &
+ rwork[1], &eps3, &smlnum, &iinfo);
+ if (iinfo > 0) {
+ ++(*info);
+ ifaill[ks] = k;
+ } else {
+ ifaill[ks] = 0;
+ }
+ i__2 = kl - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + ks * vl_dim1;
+ vl[i__3].r = 0.f, vl[i__3].i = 0.f;
+/* L80: */
+ }
+ }
+ if (rightv) {
+
+/* Compute right eigenvector. */
+
+ claein_(&c_true, &noinit, &kr, &h__[h_offset], ldh, &wk, &vr[
+ ks * vr_dim1 + 1], &work[1], &ldwork, &rwork[1], &
+ eps3, &smlnum, &iinfo);
+ if (iinfo > 0) {
+ ++(*info);
+ ifailr[ks] = k;
+ } else {
+ ifailr[ks] = 0;
+ }
+ i__2 = *n;
+ for (i__ = kr + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + ks * vr_dim1;
+ vr[i__3].r = 0.f, vr[i__3].i = 0.f;
+/* L90: */
+ }
+ }
+ ++ks;
+ }
+/* L100: */
+ }
+
+ return 0;
+
+/* End of CHSEIN */
+
+} /* chsein_ */
diff --git a/contrib/libs/clapack/chseqr.c b/contrib/libs/clapack/chseqr.c
new file mode 100644
index 0000000000..0a02ec5721
--- /dev/null
+++ b/contrib/libs/clapack/chseqr.c
@@ -0,0 +1,480 @@
+/* chseqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c__12 = 12;
+static integer c__2 = 2;
+static integer c__49 = 49;
+
+/* Subroutine */ int chseqr_(char *job, char *compz, integer *n, integer *ilo,
+ integer *ihi, complex *h__, integer *ldh, complex *w, complex *z__,
+ integer *ldz, complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3[2];
+ real r__1, r__2, r__3;
+ complex q__1;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ complex hl[2401] /* was [49][49] */;
+ integer kbot, nmin;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *);
+ logical initz;
+ complex workl[49];
+ logical wantt, wantz;
+ extern /* Subroutine */ int claqr0_(logical *, logical *, integer *,
+ integer *, integer *, complex *, integer *, complex *, integer *,
+ integer *, complex *, integer *, complex *, integer *, integer *),
+ clahqr_(logical *, logical *, integer *, integer *, integer *,
+ complex *, integer *, complex *, integer *, integer *, complex *,
+ integer *, integer *), clacpy_(char *, integer *, integer *,
+ complex *, integer *, complex *, integer *), claset_(char
+ *, integer *, integer *, complex *, complex *, complex *, integer
+ *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+/* Purpose */
+/* ======= */
+
+/* CHSEQR computes the eigenvalues of a Hessenberg matrix H */
+/* and, optionally, the matrices T and Z from the Schur decomposition */
+/* H = Z T Z**H, where T is an upper triangular matrix (the */
+/* Schur form), and Z is the unitary matrix of Schur vectors. */
+
+/* Optionally Z may be postmultiplied into an input unitary */
+/* matrix Q so that this routine can give the Schur factorization */
+/* of a matrix A which has been reduced to the Hessenberg form H */
+/* by the unitary matrix Q: A = Q*H*Q**H = (QZ)*H*(QZ)**H. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* = 'E': compute eigenvalues only; */
+/* = 'S': compute eigenvalues and the Schur form T. */
+
+/* COMPZ (input) CHARACTER*1 */
+/* = 'N': no Schur vectors are computed; */
+/* = 'I': Z is initialized to the unit matrix and the matrix Z */
+/* of Schur vectors of H is returned; */
+/* = 'V': Z must contain an unitary matrix Q on entry, and */
+/* the product Q*Z is returned. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H. N .GE. 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* It is assumed that H is already upper triangular in rows */
+/* and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally */
+/* set by a previous call to CGEBAL, and then passed to CGEHRD */
+/* when the matrix output by CGEBAL is reduced to Hessenberg */
+/* form. Otherwise ILO and IHI should be set to 1 and N */
+/* respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. */
+/* If N = 0, then ILO = 1 and IHI = 0. */
+
+/* H (input/output) COMPLEX array, dimension (LDH,N) */
+/* On entry, the upper Hessenberg matrix H. */
+/* On exit, if INFO = 0 and JOB = 'S', H contains the upper */
+/* triangular matrix T from the Schur decomposition (the */
+/* Schur form). If INFO = 0 and JOB = 'E', the contents of */
+/* H are unspecified on exit. (The output value of H when */
+/* INFO.GT.0 is given under the description of INFO below.) */
+
+/* Unlike earlier versions of CHSEQR, this subroutine may */
+/* explicitly H(i,j) = 0 for i.GT.j and j = 1, 2, ... ILO-1 */
+/* or j = IHI+1, IHI+2, ... N. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH .GE. max(1,N). */
+
+/* W (output) COMPLEX array, dimension (N) */
+/* The computed eigenvalues. If JOB = 'S', the eigenvalues are */
+/* stored in the same order as on the diagonal of the Schur */
+/* form returned in H, with W(i) = H(i,i). */
+
+/* Z (input/output) COMPLEX array, dimension (LDZ,N) */
+/* If COMPZ = 'N', Z is not referenced. */
+/* If COMPZ = 'I', on entry Z need not be set and on exit, */
+/* if INFO = 0, Z contains the unitary matrix Z of the Schur */
+/* vectors of H. If COMPZ = 'V', on entry Z must contain an */
+/* N-by-N matrix Q, which is assumed to be equal to the unit */
+/* matrix except for the submatrix Z(ILO:IHI,ILO:IHI). On exit, */
+/* if INFO = 0, Z contains Q*Z. */
+/* Normally Q is the unitary matrix generated by CUNGHR */
+/* after the call to CGEHRD which formed the Hessenberg matrix */
+/* H. (The output value of Z when INFO.GT.0 is given under */
+/* the description of INFO below.) */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. if COMPZ = 'I' or */
+/* COMPZ = 'V', then LDZ.GE.MAX(1,N). Otherwize, LDZ.GE.1. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (LWORK) */
+/* On exit, if INFO = 0, WORK(1) returns an estimate of */
+/* the optimal value for LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK .GE. max(1,N) */
+/* is sufficient and delivers very good and sometimes */
+/* optimal performance. However, LWORK as large as 11*N */
+/* may be required for optimal performance. A workspace */
+/* query is recommended to determine the optimal workspace */
+/* size. */
+
+/* If LWORK = -1, then CHSEQR does a workspace query. */
+/* In this case, CHSEQR checks the input parameters and */
+/* estimates the optimal workspace size for the given */
+/* values of N, ILO and IHI. The estimate is returned */
+/* in WORK(1). No error message related to LWORK is */
+/* issued by XERBLA. Neither H nor Z are accessed. */
+
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* .LT. 0: if INFO = -i, the i-th argument had an illegal */
+/* value */
+/* .GT. 0: if INFO = i, CHSEQR failed to compute all of */
+/* the eigenvalues. Elements 1:ilo-1 and i+1:n of WR */
+/* and WI contain those eigenvalues which have been */
+/* successfully computed. (Failures are rare.) */
+
+/* If INFO .GT. 0 and JOB = 'E', then on exit, the */
+/* remaining unconverged eigenvalues are the eigen- */
+/* values of the upper Hessenberg matrix rows and */
+/* columns ILO through INFO of the final, output */
+/* value of H. */
+
+/* If INFO .GT. 0 and JOB = 'S', then on exit */
+
+/* (*) (initial value of H)*U = U*(final value of H) */
+
+/* where U is a unitary matrix. The final */
+/* value of H is upper Hessenberg and triangular in */
+/* rows and columns INFO+1 through IHI. */
+
+/* If INFO .GT. 0 and COMPZ = 'V', then on exit */
+
+/* (final value of Z) = (initial value of Z)*U */
+
+/* where U is the unitary matrix in (*) (regard- */
+/* less of the value of JOB.) */
+
+/* If INFO .GT. 0 and COMPZ = 'I', then on exit */
+/* (final value of Z) = U */
+/* where U is the unitary matrix in (*) (regard- */
+/* less of the value of JOB.) */
+
+/* If INFO .GT. 0 and COMPZ = 'N', then Z is not */
+/* accessed. */
+
+/* ================================================================ */
+/* Default values supplied by */
+/* ILAENV(ISPEC,'CHSEQR',JOB(:1)//COMPZ(:1),N,ILO,IHI,LWORK). */
+/* It is suggested that these defaults be adjusted in order */
+/* to attain best performance in each particular */
+/* computational environment. */
+
+/* ISPEC=12: The CLAHQR vs CLAQR0 crossover point. */
+/* Default: 75. (Must be at least 11.) */
+
+/* ISPEC=13: Recommended deflation window size. */
+/* This depends on ILO, IHI and NS. NS is the */
+/* number of simultaneous shifts returned */
+/* by ILAENV(ISPEC=15). (See ISPEC=15 below.) */
+/* The default for (IHI-ILO+1).LE.500 is NS. */
+/* The default for (IHI-ILO+1).GT.500 is 3*NS/2. */
+
+/* ISPEC=14: Nibble crossover point. (See IPARMQ for */
+/* details.) Default: 14% of deflation window */
+/* size. */
+
+/* ISPEC=15: Number of simultaneous shifts in a multishift */
+/* QR iteration. */
+
+/* If IHI-ILO+1 is ... */
+
+/* greater than ...but less ... the */
+/* or equal to ... than default is */
+
+/* 1 30 NS = 2(+) */
+/* 30 60 NS = 4(+) */
+/* 60 150 NS = 10(+) */
+/* 150 590 NS = ** */
+/* 590 3000 NS = 64 */
+/* 3000 6000 NS = 128 */
+/* 6000 infinity NS = 256 */
+
+/* (+) By default some or all matrices of this order */
+/* are passed to the implicit double shift routine */
+/* CLAHQR and this parameter is ignored. See */
+/* ISPEC=12 above and comments in IPARMQ for */
+/* details. */
+
+/* (**) The asterisks (**) indicate an ad-hoc */
+/* function of N increasing from 10 to 64. */
+
+/* ISPEC=16: Select structured matrix multiply. */
+/* If the number of simultaneous shifts (specified */
+/* by ISPEC=15) is less than 14, then the default */
+/* for ISPEC=16 is 0. Otherwise the default for */
+/* ISPEC=16 is 2. */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+/* References: */
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part I: Maintaining Well Focused Shifts, and Level 3 */
+/* Performance, SIAM Journal of Matrix Analysis, volume 23, pages */
+/* 929--947, 2002. */
+
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part II: Aggressive Early Deflation, SIAM Journal */
+/* of Matrix Analysis, volume 23, pages 948--973, 2002. */
+
+/* ================================================================ */
+/* .. Parameters .. */
+
+/* ==== Matrices of order NTINY or smaller must be processed by */
+/* . CLAHQR because of insufficient subdiagonal scratch space. */
+/* . (This is a hard limit.) ==== */
+
+/* ==== NL allocates some local workspace to help small matrices */
+/* . through a rare CLAHQR failure. NL .GT. NTINY = 11 is */
+/* . required and NL .LE. NMIN = ILAENV(ISPEC=12,...) is recom- */
+/* . mended. (The default value of NMIN is 75.) Using NL = 49 */
+/* . allows up to six simultaneous shifts and a 16-by-16 */
+/* . deflation window. ==== */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* ==== Decode and check the input parameters. ==== */
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ wantt = lsame_(job, "S");
+ initz = lsame_(compz, "I");
+ wantz = initz || lsame_(compz, "V");
+ r__1 = (real) max(1,*n);
+ q__1.r = r__1, q__1.i = 0.f;
+ work[1].r = q__1.r, work[1].i = q__1.i;
+ lquery = *lwork == -1;
+
+ *info = 0;
+ if (! lsame_(job, "E") && ! wantt) {
+ *info = -1;
+ } else if (! lsame_(compz, "N") && ! wantz) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -4;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -5;
+ } else if (*ldh < max(1,*n)) {
+ *info = -7;
+ } else if (*ldz < 1 || wantz && *ldz < max(1,*n)) {
+ *info = -10;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info != 0) {
+
+/* ==== Quick return in case of invalid argument. ==== */
+
+ i__1 = -(*info);
+ xerbla_("CHSEQR", &i__1);
+ return 0;
+
+ } else if (*n == 0) {
+
+/* ==== Quick return in case N = 0; nothing to do. ==== */
+
+ return 0;
+
+ } else if (lquery) {
+
+/* ==== Quick return in case of a workspace query ==== */
+
+ claqr0_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &w[1], ilo,
+ ihi, &z__[z_offset], ldz, &work[1], lwork, info);
+/* ==== Ensure reported workspace size is backward-compatible with */
+/* . previous LAPACK versions. ==== */
+/* Computing MAX */
+ r__2 = work[1].r, r__3 = (real) max(1,*n);
+ r__1 = dmax(r__2,r__3);
+ q__1.r = r__1, q__1.i = 0.f;
+ work[1].r = q__1.r, work[1].i = q__1.i;
+ return 0;
+
+ } else {
+
+/* ==== copy eigenvalues isolated by CGEBAL ==== */
+
+ if (*ilo > 1) {
+ i__1 = *ilo - 1;
+ i__2 = *ldh + 1;
+ ccopy_(&i__1, &h__[h_offset], &i__2, &w[1], &c__1);
+ }
+ if (*ihi < *n) {
+ i__1 = *n - *ihi;
+ i__2 = *ldh + 1;
+ ccopy_(&i__1, &h__[*ihi + 1 + (*ihi + 1) * h_dim1], &i__2, &w[*
+ ihi + 1], &c__1);
+ }
+
+/* ==== Initialize Z, if requested ==== */
+
+ if (initz) {
+ claset_("A", n, n, &c_b1, &c_b2, &z__[z_offset], ldz);
+ }
+
+/* ==== Quick return if possible ==== */
+
+ if (*ilo == *ihi) {
+ i__1 = *ilo;
+ i__2 = *ilo + *ilo * h_dim1;
+ w[i__1].r = h__[i__2].r, w[i__1].i = h__[i__2].i;
+ return 0;
+ }
+
+/* ==== CLAHQR/CLAQR0 crossover point ==== */
+
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = job;
+ i__3[1] = 1, a__1[1] = compz;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ nmin = ilaenv_(&c__12, "CHSEQR", ch__1, n, ilo, ihi, lwork);
+ nmin = max(11,nmin);
+
+/* ==== CLAQR0 for big matrices; CLAHQR for small ones ==== */
+
+ if (*n > nmin) {
+ claqr0_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &w[1],
+ ilo, ihi, &z__[z_offset], ldz, &work[1], lwork, info);
+ } else {
+
+/* ==== Small matrix ==== */
+
+ clahqr_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &w[1],
+ ilo, ihi, &z__[z_offset], ldz, info);
+
+ if (*info > 0) {
+
+/* ==== A rare CLAHQR failure! CLAQR0 sometimes succeeds */
+/* . when CLAHQR fails. ==== */
+
+ kbot = *info;
+
+ if (*n >= 49) {
+
+/* ==== Larger matrices have enough subdiagonal scratch */
+/* . space to call CLAQR0 directly. ==== */
+
+ claqr0_(&wantt, &wantz, n, ilo, &kbot, &h__[h_offset],
+ ldh, &w[1], ilo, ihi, &z__[z_offset], ldz, &work[
+ 1], lwork, info);
+
+ } else {
+
+/* ==== Tiny matrices don't have enough subdiagonal */
+/* . scratch space to benefit from CLAQR0. Hence, */
+/* . tiny matrices must be copied into a larger */
+/* . array before calling CLAQR0. ==== */
+
+ clacpy_("A", n, n, &h__[h_offset], ldh, hl, &c__49);
+ i__1 = *n + 1 + *n * 49 - 50;
+ hl[i__1].r = 0.f, hl[i__1].i = 0.f;
+ i__1 = 49 - *n;
+ claset_("A", &c__49, &i__1, &c_b1, &c_b1, &hl[(*n + 1) *
+ 49 - 49], &c__49);
+ claqr0_(&wantt, &wantz, &c__49, ilo, &kbot, hl, &c__49, &
+ w[1], ilo, ihi, &z__[z_offset], ldz, workl, &
+ c__49, info);
+ if (wantt || *info != 0) {
+ clacpy_("A", n, n, hl, &c__49, &h__[h_offset], ldh);
+ }
+ }
+ }
+ }
+
+/* ==== Clear out the trash, if necessary. ==== */
+
+ if ((wantt || *info != 0) && *n > 2) {
+ i__1 = *n - 2;
+ i__2 = *n - 2;
+ claset_("L", &i__1, &i__2, &c_b1, &c_b1, &h__[h_dim1 + 3], ldh);
+ }
+
+/* ==== Ensure reported workspace size is backward-compatible with */
+/* . previous LAPACK versions. ==== */
+
+/* Computing MAX */
+ r__2 = (real) max(1,*n), r__3 = work[1].r;
+ r__1 = dmax(r__2,r__3);
+ q__1.r = r__1, q__1.i = 0.f;
+ work[1].r = q__1.r, work[1].i = q__1.i;
+ }
+
+/* ==== End of CHSEQR ==== */
+
+ return 0;
+} /* chseqr_ */
diff --git a/contrib/libs/clapack/clabrd.c b/contrib/libs/clapack/clabrd.c
new file mode 100644
index 0000000000..e32d91da16
--- /dev/null
+++ b/contrib/libs/clapack/clabrd.c
@@ -0,0 +1,500 @@
+/* clabrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int clabrd_(integer *m, integer *n, integer *nb, complex *a,
+ integer *lda, real *d__, real *e, complex *tauq, complex *taup,
+ complex *x, integer *ldx, complex *y, integer *ldy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, x_dim1, x_offset, y_dim1, y_offset, i__1, i__2,
+ i__3;
+ complex q__1;
+
+ /* Local variables */
+ integer i__;
+ complex alpha;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *), cgemv_(char *, integer *, integer *, complex *,
+ complex *, integer *, complex *, integer *, complex *, complex *,
+ integer *), clarfg_(integer *, complex *, complex *,
+ integer *, complex *), clacgv_(integer *, complex *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLABRD reduces the first NB rows and columns of a complex general */
+/* m by n matrix A to upper or lower real bidiagonal form by a unitary */
+/* transformation Q' * A * P, and returns the matrices X and Y which */
+/* are needed to apply the transformation to the unreduced part of A. */
+
+/* If m >= n, A is reduced to upper bidiagonal form; if m < n, to lower */
+/* bidiagonal form. */
+
+/* This is an auxiliary routine called by CGEBRD */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows in the matrix A. */
+
+/* N (input) INTEGER */
+/* The number of columns in the matrix A. */
+
+/* NB (input) INTEGER */
+/* The number of leading rows and columns of A to be reduced. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the m by n general matrix to be reduced. */
+/* On exit, the first NB rows and columns of the matrix are */
+/* overwritten; the rest of the array is unchanged. */
+/* If m >= n, elements on and below the diagonal in the first NB */
+/* columns, with the array TAUQ, represent the unitary */
+/* matrix Q as a product of elementary reflectors; and */
+/* elements above the diagonal in the first NB rows, with the */
+/* array TAUP, represent the unitary matrix P as a product */
+/* of elementary reflectors. */
+/* If m < n, elements below the diagonal in the first NB */
+/* columns, with the array TAUQ, represent the unitary */
+/* matrix Q as a product of elementary reflectors, and */
+/* elements on and above the diagonal in the first NB rows, */
+/* with the array TAUP, represent the unitary matrix P as */
+/* a product of elementary reflectors. */
+/* See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* D (output) REAL array, dimension (NB) */
+/* The diagonal elements of the first NB rows and columns of */
+/* the reduced matrix. D(i) = A(i,i). */
+
+/* E (output) REAL array, dimension (NB) */
+/* The off-diagonal elements of the first NB rows and columns of */
+/* the reduced matrix. */
+
+/* TAUQ (output) COMPLEX array dimension (NB) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the unitary matrix Q. See Further Details. */
+
+/* TAUP (output) COMPLEX array, dimension (NB) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the unitary matrix P. See Further Details. */
+
+/* X (output) COMPLEX array, dimension (LDX,NB) */
+/* The m-by-nb matrix X required to update the unreduced part */
+/* of A. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,M). */
+
+/* Y (output) COMPLEX array, dimension (LDY,NB) */
+/* The n-by-nb matrix Y required to update the unreduced part */
+/* of A. */
+
+/* LDY (input) INTEGER */
+/* The leading dimension of the array Y. LDY >= max(1,N). */
+
+/* Further Details */
+/* =============== */
+
+/* The matrices Q and P are represented as products of elementary */
+/* reflectors: */
+
+/* Q = H(1) H(2) . . . H(nb) and P = G(1) G(2) . . . G(nb) */
+
+/* Each H(i) and G(i) has the form: */
+
+/* H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' */
+
+/* where tauq and taup are complex scalars, and v and u are complex */
+/* vectors. */
+
+/* If m >= n, v(1:i-1) = 0, v(i) = 1, and v(i:m) is stored on exit in */
+/* A(i:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+1:n) is stored on exit in */
+/* A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). */
+
+/* If m < n, v(1:i) = 0, v(i+1) = 1, and v(i+1:m) is stored on exit in */
+/* A(i+2:m,i); u(1:i-1) = 0, u(i) = 1, and u(i:n) is stored on exit in */
+/* A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). */
+
+/* The elements of the vectors v and u together form the m-by-nb matrix */
+/* V and the nb-by-n matrix U' which are needed, with X and Y, to apply */
+/* the transformation to the unreduced part of the matrix, using a block */
+/* update of the form: A := A - V*Y' - X*U'. */
+
+/* The contents of A on exit are illustrated by the following examples */
+/* with nb = 2: */
+
+/* m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n): */
+
+/* ( 1 1 u1 u1 u1 ) ( 1 u1 u1 u1 u1 u1 ) */
+/* ( v1 1 1 u2 u2 ) ( 1 1 u2 u2 u2 u2 ) */
+/* ( v1 v2 a a a ) ( v1 1 a a a a ) */
+/* ( v1 v2 a a a ) ( v1 v2 a a a a ) */
+/* ( v1 v2 a a a ) ( v1 v2 a a a a ) */
+/* ( v1 v2 a a a ) */
+
+/* where a denotes an element of the original matrix which is unchanged, */
+/* vi denotes an element of the vector defining H(i), and ui an element */
+/* of the vector defining G(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ y_dim1 = *ldy;
+ y_offset = 1 + y_dim1;
+ y -= y_offset;
+
+ /* Function Body */
+ if (*m <= 0 || *n <= 0) {
+ return 0;
+ }
+
+ if (*m >= *n) {
+
+/* Reduce to upper bidiagonal form */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i:m,i) */
+
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &y[i__ + y_dim1], ldy);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &a[i__ + a_dim1], lda,
+ &y[i__ + y_dim1], ldy, &c_b2, &a[i__ + i__ * a_dim1], &
+ c__1);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &y[i__ + y_dim1], ldy);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &x[i__ + x_dim1], ldx,
+ &a[i__ * a_dim1 + 1], &c__1, &c_b2, &a[i__ + i__ *
+ a_dim1], &c__1);
+
+/* Generate reflection Q(i) to annihilate A(i+1:m,i) */
+
+ i__2 = i__ + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ clarfg_(&i__2, &alpha, &a[min(i__3, *m)+ i__ * a_dim1], &c__1, &
+ tauq[i__]);
+ i__2 = i__;
+ d__[i__2] = alpha.r;
+ if (i__ < *n) {
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Compute Y(i+1:n,i) */
+
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[i__ + (
+ i__ + 1) * a_dim1], lda, &a[i__ + i__ * a_dim1], &
+ c__1, &c_b1, &y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[i__ +
+ a_dim1], lda, &a[i__ + i__ * a_dim1], &c__1, &c_b1, &
+ y[i__ * y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &y[i__ + 1 +
+ y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b2, &y[
+ i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &x[i__ +
+ x_dim1], ldx, &a[i__ + i__ * a_dim1], &c__1, &c_b1, &
+ y[i__ * y_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &q__1, &a[(i__ +
+ 1) * a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &
+ c_b2, &y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *n - i__;
+ cscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1);
+
+/* Update A(i,i+1:n) */
+
+ i__2 = *n - i__;
+ clacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ clacgv_(&i__, &a[i__ + a_dim1], lda);
+ i__2 = *n - i__;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__, &q__1, &y[i__ + 1 +
+ y_dim1], ldy, &a[i__ + a_dim1], lda, &c_b2, &a[i__ + (
+ i__ + 1) * a_dim1], lda);
+ clacgv_(&i__, &a[i__ + a_dim1], lda);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &x[i__ + x_dim1], ldx);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &q__1, &a[(i__ +
+ 1) * a_dim1 + 1], lda, &x[i__ + x_dim1], ldx, &c_b2, &
+ a[i__ + (i__ + 1) * a_dim1], lda);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &x[i__ + x_dim1], ldx);
+
+/* Generate reflection P(i) to annihilate A(i,i+2:n) */
+
+ i__2 = i__ + (i__ + 1) * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ clarfg_(&i__2, &alpha, &a[i__ + min(i__3, *n)* a_dim1], lda, &
+ taup[i__]);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+ i__2 = i__ + (i__ + 1) * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Compute X(i+1:m,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ cgemv_("No transpose", &i__2, &i__3, &c_b2, &a[i__ + 1 + (i__
+ + 1) * a_dim1], lda, &a[i__ + (i__ + 1) * a_dim1],
+ lda, &c_b1, &x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *n - i__;
+ cgemv_("Conjugate transpose", &i__2, &i__, &c_b2, &y[i__ + 1
+ + y_dim1], ldy, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ c_b1, &x[i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__, &q__1, &a[i__ + 1 +
+ a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b2, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ cgemv_("No transpose", &i__2, &i__3, &c_b2, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ c_b1, &x[i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &x[i__ + 1 +
+ x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b2, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *m - i__;
+ cscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *n - i__;
+ clacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Reduce to lower bidiagonal form */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i,i:n) */
+
+ i__2 = *n - i__ + 1;
+ clacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &a[i__ + a_dim1], lda);
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &y[i__ + y_dim1], ldy,
+ &a[i__ + a_dim1], lda, &c_b2, &a[i__ + i__ * a_dim1],
+ lda);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &a[i__ + a_dim1], lda);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &x[i__ + x_dim1], ldx);
+ i__2 = i__ - 1;
+ i__3 = *n - i__ + 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &q__1, &a[i__ *
+ a_dim1 + 1], lda, &x[i__ + x_dim1], ldx, &c_b2, &a[i__ +
+ i__ * a_dim1], lda);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &x[i__ + x_dim1], ldx);
+
+/* Generate reflection P(i) to annihilate A(i,i+1:n) */
+
+ i__2 = i__ + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ clarfg_(&i__2, &alpha, &a[i__ + min(i__3, *n)* a_dim1], lda, &
+ taup[i__]);
+ i__2 = i__;
+ d__[i__2] = alpha.r;
+ if (i__ < *m) {
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Compute X(i+1:m,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+ cgemv_("No transpose", &i__2, &i__3, &c_b2, &a[i__ + 1 + i__ *
+ a_dim1], lda, &a[i__ + i__ * a_dim1], lda, &c_b1, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &y[i__ +
+ y_dim1], ldy, &a[i__ + i__ * a_dim1], lda, &c_b1, &x[
+ i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &a[i__ + 1 +
+ a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b2, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__ + 1;
+ cgemv_("No transpose", &i__2, &i__3, &c_b2, &a[i__ * a_dim1 +
+ 1], lda, &a[i__ + i__ * a_dim1], lda, &c_b1, &x[i__ *
+ x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &x[i__ + 1 +
+ x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b2, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *m - i__;
+ cscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *n - i__ + 1;
+ clacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+
+/* Update A(i+1:m,i) */
+
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &y[i__ + y_dim1], ldy);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &a[i__ + 1 +
+ a_dim1], lda, &y[i__ + y_dim1], ldy, &c_b2, &a[i__ +
+ 1 + i__ * a_dim1], &c__1);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &y[i__ + y_dim1], ldy);
+ i__2 = *m - i__;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__, &q__1, &x[i__ + 1 +
+ x_dim1], ldx, &a[i__ * a_dim1 + 1], &c__1, &c_b2, &a[
+ i__ + 1 + i__ * a_dim1], &c__1);
+
+/* Generate reflection Q(i) to annihilate A(i+2:m,i) */
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *m - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ clarfg_(&i__2, &alpha, &a[min(i__3, *m)+ i__ * a_dim1], &c__1,
+ &tauq[i__]);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+ i__2 = i__ + 1 + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Compute Y(i+1:n,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[i__ + 1
+ + (i__ + 1) * a_dim1], lda, &a[i__ + 1 + i__ * a_dim1]
+, &c__1, &c_b1, &y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[i__ + 1
+ + a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b1, &y[i__ * y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &y[i__ + 1 +
+ y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b2, &y[
+ i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__;
+ cgemv_("Conjugate transpose", &i__2, &i__, &c_b2, &x[i__ + 1
+ + x_dim1], ldx, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b1, &y[i__ * y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Conjugate transpose", &i__, &i__2, &q__1, &a[(i__ + 1)
+ * a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &
+ c_b2, &y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *n - i__;
+ cscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1);
+ } else {
+ i__2 = *n - i__ + 1;
+ clacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of CLABRD */
+
+} /* clabrd_ */
diff --git a/contrib/libs/clapack/clacgv.c b/contrib/libs/clapack/clacgv.c
new file mode 100644
index 0000000000..31fa8ff9ae
--- /dev/null
+++ b/contrib/libs/clapack/clacgv.c
@@ -0,0 +1,95 @@
+/* clacgv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int clacgv_(integer *n, complex *x, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, ioff;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLACGV conjugates a complex vector of length N. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The length of the vector X. N >= 0. */
+
+/* X (input/output) COMPLEX array, dimension */
+/* (1+(N-1)*abs(INCX)) */
+/* On entry, the vector of length N to be conjugated. */
+/* On exit, X is overwritten with conjg(X). */
+
+/* INCX (input) INTEGER */
+/* The spacing between successive elements of X. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*incx == 1) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ r_cnjg(&q__1, &x[i__]);
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+/* L10: */
+ }
+ } else {
+ ioff = 1;
+ if (*incx < 0) {
+ ioff = 1 - (*n - 1) * *incx;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ioff;
+ r_cnjg(&q__1, &x[ioff]);
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ ioff += *incx;
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of CLACGV */
+
+} /* clacgv_ */
diff --git a/contrib/libs/clapack/clacn2.c b/contrib/libs/clapack/clacn2.c
new file mode 100644
index 0000000000..221f4e3064
--- /dev/null
+++ b/contrib/libs/clapack/clacn2.c
@@ -0,0 +1,283 @@
+/* clacn2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int clacn2_(integer *n, complex *v, complex *x, real *est,
+ integer *kase, integer *isave)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ real r__1, r__2;
+ complex q__1;
+
+ /* Builtin functions */
+ double c_abs(complex *), r_imag(complex *);
+
+ /* Local variables */
+ integer i__;
+ real temp, absxi;
+ integer jlast;
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *);
+ extern integer icmax1_(integer *, complex *, integer *);
+ extern doublereal scsum1_(integer *, complex *, integer *), slamch_(char *
+);
+ real safmin, altsgn, estold;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLACN2 estimates the 1-norm of a square, complex matrix A. */
+/* Reverse communication is used for evaluating matrix-vector products. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 1. */
+
+/* V (workspace) COMPLEX array, dimension (N) */
+/* On the final return, V = A*W, where EST = norm(V)/norm(W) */
+/* (W is not returned). */
+
+/* X (input/output) COMPLEX array, dimension (N) */
+/* On an intermediate return, X should be overwritten by */
+/* A * X, if KASE=1, */
+/* A' * X, if KASE=2, */
+/* where A' is the conjugate transpose of A, and CLACN2 must be */
+/* re-called with all the other parameters unchanged. */
+
+/* EST (input/output) REAL */
+/* On entry with KASE = 1 or 2 and ISAVE(1) = 3, EST should be */
+/* unchanged from the previous call to CLACN2. */
+/* On exit, EST is an estimate (a lower bound) for norm(A). */
+
+/* KASE (input/output) INTEGER */
+/* On the initial call to CLACN2, KASE should be 0. */
+/* On an intermediate return, KASE will be 1 or 2, indicating */
+/* whether X should be overwritten by A * X or A' * X. */
+/* On the final return from CLACN2, KASE will again be 0. */
+
+/* ISAVE (input/output) INTEGER array, dimension (3) */
+/* ISAVE is used to save variables between calls to SLACN2 */
+
+/* Further Details */
+/* ======= ======= */
+
+/* Contributed by Nick Higham, University of Manchester. */
+/* Originally named CONEST, dated March 16, 1988. */
+
+/* Reference: N.J. Higham, "FORTRAN codes for estimating the one-norm of */
+/* a real or complex matrix, with applications to condition estimation", */
+/* ACM Trans. Math. Soft., vol. 14, no. 4, pp. 381-396, December 1988. */
+
+/* Last modified: April, 1999 */
+
+/* This is a thread safe version of CLACON, which uses the array ISAVE */
+/* in place of a SAVE statement, as follows: */
+
+/* CLACON CLACN2 */
+/* JUMP ISAVE(1) */
+/* J ISAVE(2) */
+/* ITER ISAVE(3) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --isave;
+ --x;
+ --v;
+
+ /* Function Body */
+ safmin = slamch_("Safe minimum");
+ if (*kase == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ r__1 = 1.f / (real) (*n);
+ q__1.r = r__1, q__1.i = 0.f;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+/* L10: */
+ }
+ *kase = 1;
+ isave[1] = 1;
+ return 0;
+ }
+
+ switch (isave[1]) {
+ case 1: goto L20;
+ case 2: goto L40;
+ case 3: goto L70;
+ case 4: goto L90;
+ case 5: goto L120;
+ }
+
+/* ................ ENTRY (ISAVE( 1 ) = 1) */
+/* FIRST ITERATION. X HAS BEEN OVERWRITTEN BY A*X. */
+
+L20:
+ if (*n == 1) {
+ v[1].r = x[1].r, v[1].i = x[1].i;
+ *est = c_abs(&v[1]);
+/* ... QUIT */
+ goto L130;
+ }
+ *est = scsum1_(n, &x[1], &c__1);
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ absxi = c_abs(&x[i__]);
+ if (absxi > safmin) {
+ i__2 = i__;
+ i__3 = i__;
+ r__1 = x[i__3].r / absxi;
+ r__2 = r_imag(&x[i__]) / absxi;
+ q__1.r = r__1, q__1.i = r__2;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ } else {
+ i__2 = i__;
+ x[i__2].r = 1.f, x[i__2].i = 0.f;
+ }
+/* L30: */
+ }
+ *kase = 2;
+ isave[1] = 2;
+ return 0;
+
+/* ................ ENTRY (ISAVE( 1 ) = 2) */
+/* FIRST ITERATION. X HAS BEEN OVERWRITTEN BY CTRANS(A)*X. */
+
+L40:
+ isave[2] = icmax1_(n, &x[1], &c__1);
+ isave[3] = 2;
+
+/* MAIN LOOP - ITERATIONS 2,3,...,ITMAX. */
+
+L50:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ x[i__2].r = 0.f, x[i__2].i = 0.f;
+/* L60: */
+ }
+ i__1 = isave[2];
+ x[i__1].r = 1.f, x[i__1].i = 0.f;
+ *kase = 1;
+ isave[1] = 3;
+ return 0;
+
+/* ................ ENTRY (ISAVE( 1 ) = 3) */
+/* X HAS BEEN OVERWRITTEN BY A*X. */
+
+L70:
+ ccopy_(n, &x[1], &c__1, &v[1], &c__1);
+ estold = *est;
+ *est = scsum1_(n, &v[1], &c__1);
+
+/* TEST FOR CYCLING. */
+ if (*est <= estold) {
+ goto L100;
+ }
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ absxi = c_abs(&x[i__]);
+ if (absxi > safmin) {
+ i__2 = i__;
+ i__3 = i__;
+ r__1 = x[i__3].r / absxi;
+ r__2 = r_imag(&x[i__]) / absxi;
+ q__1.r = r__1, q__1.i = r__2;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ } else {
+ i__2 = i__;
+ x[i__2].r = 1.f, x[i__2].i = 0.f;
+ }
+/* L80: */
+ }
+ *kase = 2;
+ isave[1] = 4;
+ return 0;
+
+/* ................ ENTRY (ISAVE( 1 ) = 4) */
+/* X HAS BEEN OVERWRITTEN BY CTRANS(A)*X. */
+
+L90:
+ jlast = isave[2];
+ isave[2] = icmax1_(n, &x[1], &c__1);
+ if (c_abs(&x[jlast]) != c_abs(&x[isave[2]]) && isave[3] < 5) {
+ ++isave[3];
+ goto L50;
+ }
+
+/* ITERATION COMPLETE. FINAL STAGE. */
+
+L100:
+ altsgn = 1.f;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ r__1 = altsgn * ((real) (i__ - 1) / (real) (*n - 1) + 1.f);
+ q__1.r = r__1, q__1.i = 0.f;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ altsgn = -altsgn;
+/* L110: */
+ }
+ *kase = 1;
+ isave[1] = 5;
+ return 0;
+
+/* ................ ENTRY (ISAVE( 1 ) = 5) */
+/* X HAS BEEN OVERWRITTEN BY A*X. */
+
+L120:
+ temp = scsum1_(n, &x[1], &c__1) / (real) (*n * 3) * 2.f;
+ if (temp > *est) {
+ ccopy_(n, &x[1], &c__1, &v[1], &c__1);
+ *est = temp;
+ }
+
+L130:
+ *kase = 0;
+ return 0;
+
+/* End of CLACN2 */
+
+} /* clacn2_ */
diff --git a/contrib/libs/clapack/clacon.c b/contrib/libs/clapack/clacon.c
new file mode 100644
index 0000000000..f77e3dc7bd
--- /dev/null
+++ b/contrib/libs/clapack/clacon.c
@@ -0,0 +1,275 @@
+/* clacon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int clacon_(integer *n, complex *v, complex *x, real *est,
+ integer *kase)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ real r__1, r__2;
+ complex q__1;
+
+ /* Builtin functions */
+ double c_abs(complex *), r_imag(complex *);
+
+ /* Local variables */
+ static integer i__, j, iter;
+ static real temp;
+ static integer jump;
+ static real absxi;
+ static integer jlast;
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *);
+ extern integer icmax1_(integer *, complex *, integer *);
+ extern doublereal scsum1_(integer *, complex *, integer *), slamch_(char *
+);
+ static real safmin, altsgn, estold;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLACON estimates the 1-norm of a square, complex matrix A. */
+/* Reverse communication is used for evaluating matrix-vector products. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 1. */
+
+/* V (workspace) COMPLEX array, dimension (N) */
+/* On the final return, V = A*W, where EST = norm(V)/norm(W) */
+/* (W is not returned). */
+
+/* X (input/output) COMPLEX array, dimension (N) */
+/* On an intermediate return, X should be overwritten by */
+/* A * X, if KASE=1, */
+/* A' * X, if KASE=2, */
+/* where A' is the conjugate transpose of A, and CLACON must be */
+/* re-called with all the other parameters unchanged. */
+
+/* EST (input/output) REAL */
+/* On entry with KASE = 1 or 2 and JUMP = 3, EST should be */
+/* unchanged from the previous call to CLACON. */
+/* On exit, EST is an estimate (a lower bound) for norm(A). */
+
+/* KASE (input/output) INTEGER */
+/* On the initial call to CLACON, KASE should be 0. */
+/* On an intermediate return, KASE will be 1 or 2, indicating */
+/* whether X should be overwritten by A * X or A' * X. */
+/* On the final return from CLACON, KASE will again be 0. */
+
+/* Further Details */
+/* ======= ======= */
+
+/* Contributed by Nick Higham, University of Manchester. */
+/* Originally named CONEST, dated March 16, 1988. */
+
+/* Reference: N.J. Higham, "FORTRAN codes for estimating the one-norm of */
+/* a real or complex matrix, with applications to condition estimation", */
+/* ACM Trans. Math. Soft., vol. 14, no. 4, pp. 381-396, December 1988. */
+
+/* Last modified: April, 1999 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Save statement .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --x;
+ --v;
+
+ /* Function Body */
+ safmin = slamch_("Safe minimum");
+ if (*kase == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ r__1 = 1.f / (real) (*n);
+ q__1.r = r__1, q__1.i = 0.f;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+/* L10: */
+ }
+ *kase = 1;
+ jump = 1;
+ return 0;
+ }
+
+ switch (jump) {
+ case 1: goto L20;
+ case 2: goto L40;
+ case 3: goto L70;
+ case 4: goto L90;
+ case 5: goto L120;
+ }
+
+/* ................ ENTRY (JUMP = 1) */
+/* FIRST ITERATION. X HAS BEEN OVERWRITTEN BY A*X. */
+
+L20:
+ if (*n == 1) {
+ v[1].r = x[1].r, v[1].i = x[1].i;
+ *est = c_abs(&v[1]);
+/* ... QUIT */
+ goto L130;
+ }
+ *est = scsum1_(n, &x[1], &c__1);
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ absxi = c_abs(&x[i__]);
+ if (absxi > safmin) {
+ i__2 = i__;
+ i__3 = i__;
+ r__1 = x[i__3].r / absxi;
+ r__2 = r_imag(&x[i__]) / absxi;
+ q__1.r = r__1, q__1.i = r__2;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ } else {
+ i__2 = i__;
+ x[i__2].r = 1.f, x[i__2].i = 0.f;
+ }
+/* L30: */
+ }
+ *kase = 2;
+ jump = 2;
+ return 0;
+
+/* ................ ENTRY (JUMP = 2) */
+/* FIRST ITERATION. X HAS BEEN OVERWRITTEN BY CTRANS(A)*X. */
+
+L40:
+ j = icmax1_(n, &x[1], &c__1);
+ iter = 2;
+
+/* MAIN LOOP - ITERATIONS 2,3,...,ITMAX. */
+
+L50:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ x[i__2].r = 0.f, x[i__2].i = 0.f;
+/* L60: */
+ }
+ i__1 = j;
+ x[i__1].r = 1.f, x[i__1].i = 0.f;
+ *kase = 1;
+ jump = 3;
+ return 0;
+
+/* ................ ENTRY (JUMP = 3) */
+/* X HAS BEEN OVERWRITTEN BY A*X. */
+
+L70:
+ ccopy_(n, &x[1], &c__1, &v[1], &c__1);
+ estold = *est;
+ *est = scsum1_(n, &v[1], &c__1);
+
+/* TEST FOR CYCLING. */
+ if (*est <= estold) {
+ goto L100;
+ }
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ absxi = c_abs(&x[i__]);
+ if (absxi > safmin) {
+ i__2 = i__;
+ i__3 = i__;
+ r__1 = x[i__3].r / absxi;
+ r__2 = r_imag(&x[i__]) / absxi;
+ q__1.r = r__1, q__1.i = r__2;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ } else {
+ i__2 = i__;
+ x[i__2].r = 1.f, x[i__2].i = 0.f;
+ }
+/* L80: */
+ }
+ *kase = 2;
+ jump = 4;
+ return 0;
+
+/* ................ ENTRY (JUMP = 4) */
+/* X HAS BEEN OVERWRITTEN BY CTRANS(A)*X. */
+
+L90:
+ jlast = j;
+ j = icmax1_(n, &x[1], &c__1);
+ if (c_abs(&x[jlast]) != c_abs(&x[j]) && iter < 5) {
+ ++iter;
+ goto L50;
+ }
+
+/* ITERATION COMPLETE. FINAL STAGE. */
+
+L100:
+ altsgn = 1.f;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ r__1 = altsgn * ((real) (i__ - 1) / (real) (*n - 1) + 1.f);
+ q__1.r = r__1, q__1.i = 0.f;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ altsgn = -altsgn;
+/* L110: */
+ }
+ *kase = 1;
+ jump = 5;
+ return 0;
+
+/* ................ ENTRY (JUMP = 5) */
+/* X HAS BEEN OVERWRITTEN BY A*X. */
+
+L120:
+ temp = scsum1_(n, &x[1], &c__1) / (real) (*n * 3) * 2.f;
+ if (temp > *est) {
+ ccopy_(n, &x[1], &c__1, &v[1], &c__1);
+ *est = temp;
+ }
+
+L130:
+ *kase = 0;
+ return 0;
+
+/* End of CLACON */
+
+} /* clacon_ */
diff --git a/contrib/libs/clapack/clacp2.c b/contrib/libs/clapack/clacp2.c
new file mode 100644
index 0000000000..379eb8f837
--- /dev/null
+++ b/contrib/libs/clapack/clacp2.c
@@ -0,0 +1,134 @@
+/* clacp2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int clacp2_(char *uplo, integer *m, integer *n, real *a,
+ integer *lda, complex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLACP2 copies all or part of a real two-dimensional matrix A to a */
+/* complex matrix B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies the part of the matrix A to be copied to B. */
+/* = 'U': Upper triangular part */
+/* = 'L': Lower triangular part */
+/* Otherwise: All of the matrix A */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The m by n matrix A. If UPLO = 'U', only the upper trapezium */
+/* is accessed; if UPLO = 'L', only the lower trapezium is */
+/* accessed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (output) COMPLEX array, dimension (LDB,N) */
+/* On exit, B = A in the locations specified by UPLO. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(j,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * a_dim1;
+ b[i__3].r = a[i__4], b[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+
+ } else if (lsame_(uplo, "L")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * a_dim1;
+ b[i__3].r = a[i__4], b[i__3].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * a_dim1;
+ b[i__3].r = a[i__4], b[i__3].i = 0.f;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+ return 0;
+
+/* End of CLACP2 */
+
+} /* clacp2_ */
diff --git a/contrib/libs/clapack/clacpy.c b/contrib/libs/clapack/clacpy.c
new file mode 100644
index 0000000000..6d3f585053
--- /dev/null
+++ b/contrib/libs/clapack/clacpy.c
@@ -0,0 +1,134 @@
+/* clacpy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int clacpy_(char *uplo, integer *m, integer *n, complex *a,
+ integer *lda, complex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLACPY copies all or part of a two-dimensional matrix A to another */
+/* matrix B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies the part of the matrix A to be copied to B. */
+/* = 'U': Upper triangular part */
+/* = 'L': Lower triangular part */
+/* Otherwise: All of the matrix A */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The m by n matrix A. If UPLO = 'U', only the upper trapezium */
+/* is accessed; if UPLO = 'L', only the lower trapezium is */
+/* accessed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (output) COMPLEX array, dimension (LDB,N) */
+/* On exit, B = A in the locations specified by UPLO. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(j,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * a_dim1;
+ b[i__3].r = a[i__4].r, b[i__3].i = a[i__4].i;
+/* L10: */
+ }
+/* L20: */
+ }
+
+ } else if (lsame_(uplo, "L")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * a_dim1;
+ b[i__3].r = a[i__4].r, b[i__3].i = a[i__4].i;
+/* L30: */
+ }
+/* L40: */
+ }
+
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * a_dim1;
+ b[i__3].r = a[i__4].r, b[i__3].i = a[i__4].i;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+ return 0;
+
+/* End of CLACPY */
+
+} /* clacpy_ */
diff --git a/contrib/libs/clapack/clacrm.c b/contrib/libs/clapack/clacrm.c
new file mode 100644
index 0000000000..ea759a183c
--- /dev/null
+++ b/contrib/libs/clapack/clacrm.c
@@ -0,0 +1,176 @@
+/* clacrm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b6 = 1.f;
+static real c_b7 = 0.f;
+
+/* Subroutine */ int clacrm_(integer *m, integer *n, complex *a, integer *lda,
+ real *b, integer *ldb, complex *c__, integer *ldc, real *rwork)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, a_dim1, a_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4, i__5;
+ real r__1;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer i__, j, l;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLACRM performs a very simple matrix-matrix multiplication: */
+/* C := A * B, */
+/* where A is M by N and complex; B is N by N and real; */
+/* C is M by N and complex. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A and of the matrix C. */
+/* M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns and rows of the matrix B and */
+/* the number of columns of the matrix C. */
+/* N >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA, N) */
+/* A contains the M by N matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >=max(1,M). */
+
+/* B (input) REAL array, dimension (LDB, N) */
+/* B contains the N by N matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >=max(1,N). */
+
+/* C (input) COMPLEX array, dimension (LDC, N) */
+/* C contains the M by N matrix C. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >=max(1,N). */
+
+/* RWORK (workspace) REAL array, dimension (2*M*N) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --rwork;
+
+ /* Function Body */
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ rwork[(j - 1) * *m + i__] = a[i__3].r;
+/* L10: */
+ }
+/* L20: */
+ }
+
+ l = *m * *n + 1;
+ sgemm_("N", "N", m, n, n, &c_b6, &rwork[1], m, &b[b_offset], ldb, &c_b7, &
+ rwork[l], m);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = l + (j - 1) * *m + i__ - 1;
+ c__[i__3].r = rwork[i__4], c__[i__3].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ rwork[(j - 1) * *m + i__] = r_imag(&a[i__ + j * a_dim1]);
+/* L50: */
+ }
+/* L60: */
+ }
+ sgemm_("N", "N", m, n, n, &c_b6, &rwork[1], m, &b[b_offset], ldb, &c_b7, &
+ rwork[l], m);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ r__1 = c__[i__4].r;
+ i__5 = l + (j - 1) * *m + i__ - 1;
+ q__1.r = r__1, q__1.i = rwork[i__5];
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+
+ return 0;
+
+/* End of CLACRM */
+
+} /* clacrm_ */
diff --git a/contrib/libs/clapack/clacrt.c b/contrib/libs/clapack/clacrt.c
new file mode 100644
index 0000000000..45bd6776c4
--- /dev/null
+++ b/contrib/libs/clapack/clacrt.c
@@ -0,0 +1,155 @@
+/* clacrt.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int clacrt_(integer *n, complex *cx, integer *incx, complex *
+ cy, integer *incy, complex *c__, complex *s)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ complex q__1, q__2, q__3;
+
+ /* Local variables */
+ integer i__, ix, iy;
+ complex ctemp;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLACRT performs the operation */
+
+/* ( c s )( x ) ==> ( x ) */
+/* ( -s c )( y ) ( y ) */
+
+/* where c and s are complex and the vectors x and y are complex. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of elements in the vectors CX and CY. */
+
+/* CX (input/output) COMPLEX array, dimension (N) */
+/* On input, the vector x. */
+/* On output, CX is overwritten with c*x + s*y. */
+
+/* INCX (input) INTEGER */
+/* The increment between successive values of CX. INCX <> 0. */
+
+/* CY (input/output) COMPLEX array, dimension (N) */
+/* On input, the vector y. */
+/* On output, CY is overwritten with -s*x + c*y. */
+
+/* INCY (input) INTEGER */
+/* The increment between successive values of CY. INCY <> 0. */
+
+/* C (input) COMPLEX */
+/* S (input) COMPLEX */
+/* C and S define the matrix */
+/* [ C S ]. */
+/* [ -S C ] */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --cy;
+ --cx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* Code for unequal increments or equal increments not equal to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ q__2.r = c__->r * cx[i__2].r - c__->i * cx[i__2].i, q__2.i = c__->r *
+ cx[i__2].i + c__->i * cx[i__2].r;
+ i__3 = iy;
+ q__3.r = s->r * cy[i__3].r - s->i * cy[i__3].i, q__3.i = s->r * cy[
+ i__3].i + s->i * cy[i__3].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ ctemp.r = q__1.r, ctemp.i = q__1.i;
+ i__2 = iy;
+ i__3 = iy;
+ q__2.r = c__->r * cy[i__3].r - c__->i * cy[i__3].i, q__2.i = c__->r *
+ cy[i__3].i + c__->i * cy[i__3].r;
+ i__4 = ix;
+ q__3.r = s->r * cx[i__4].r - s->i * cx[i__4].i, q__3.i = s->r * cx[
+ i__4].i + s->i * cx[i__4].r;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
+ cy[i__2].r = q__1.r, cy[i__2].i = q__1.i;
+ i__2 = ix;
+ cx[i__2].r = ctemp.r, cx[i__2].i = ctemp.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* Code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ q__2.r = c__->r * cx[i__2].r - c__->i * cx[i__2].i, q__2.i = c__->r *
+ cx[i__2].i + c__->i * cx[i__2].r;
+ i__3 = i__;
+ q__3.r = s->r * cy[i__3].r - s->i * cy[i__3].i, q__3.i = s->r * cy[
+ i__3].i + s->i * cy[i__3].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ ctemp.r = q__1.r, ctemp.i = q__1.i;
+ i__2 = i__;
+ i__3 = i__;
+ q__2.r = c__->r * cy[i__3].r - c__->i * cy[i__3].i, q__2.i = c__->r *
+ cy[i__3].i + c__->i * cy[i__3].r;
+ i__4 = i__;
+ q__3.r = s->r * cx[i__4].r - s->i * cx[i__4].i, q__3.i = s->r * cx[
+ i__4].i + s->i * cx[i__4].r;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
+ cy[i__2].r = q__1.r, cy[i__2].i = q__1.i;
+ i__2 = i__;
+ cx[i__2].r = ctemp.r, cx[i__2].i = ctemp.i;
+/* L30: */
+ }
+ return 0;
+} /* clacrt_ */
diff --git a/contrib/libs/clapack/cladiv.c b/contrib/libs/clapack/cladiv.c
new file mode 100644
index 0000000000..d10c9595fe
--- /dev/null
+++ b/contrib/libs/clapack/cladiv.c
@@ -0,0 +1,74 @@
+/* cladiv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Complex */ VOID cladiv_(complex * ret_val, complex *x, complex *y)
+{
+ /* System generated locals */
+ real r__1, r__2, r__3, r__4;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ real zi, zr;
+ extern /* Subroutine */ int sladiv_(real *, real *, real *, real *, real *
+, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLADIV := X / Y, where X and Y are complex. The computation of X / Y */
+/* will not overflow on an intermediary step unless the results */
+/* overflows. */
+
+/* Arguments */
+/* ========= */
+
+/* X (input) COMPLEX */
+/* Y (input) COMPLEX */
+/* The complex scalars X and Y. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ r__1 = x->r;
+ r__2 = r_imag(x);
+ r__3 = y->r;
+ r__4 = r_imag(y);
+ sladiv_(&r__1, &r__2, &r__3, &r__4, &zr, &zi);
+ q__1.r = zr, q__1.i = zi;
+ ret_val->r = q__1.r, ret_val->i = q__1.i;
+
+ return ;
+
+/* End of CLADIV */
+
+} /* cladiv_ */
diff --git a/contrib/libs/clapack/claed0.c b/contrib/libs/clapack/claed0.c
new file mode 100644
index 0000000000..a1f2a9bcb5
--- /dev/null
+++ b/contrib/libs/clapack/claed0.c
@@ -0,0 +1,367 @@
+/* claed0.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__9 = 9;
+static integer c__0 = 0;
+static integer c__2 = 2;
+static integer c__1 = 1;
+
+/* Subroutine */ int claed0_(integer *qsiz, integer *n, real *d__, real *e,
+ complex *q, integer *ldq, complex *qstore, integer *ldqs, real *rwork,
+ integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, qstore_dim1, qstore_offset, i__1, i__2;
+ real r__1;
+
+ /* Builtin functions */
+ double log(doublereal);
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ integer i__, j, k, ll, iq, lgn, msd2, smm1, spm1, spm2;
+ real temp;
+ integer curr, iperm;
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *);
+ integer indxq, iwrem;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ integer iqptr;
+ extern /* Subroutine */ int claed7_(integer *, integer *, integer *,
+ integer *, integer *, integer *, real *, complex *, integer *,
+ real *, integer *, real *, integer *, integer *, integer *,
+ integer *, integer *, real *, complex *, real *, integer *,
+ integer *);
+ integer tlvls;
+ extern /* Subroutine */ int clacrm_(integer *, integer *, complex *,
+ integer *, real *, integer *, complex *, integer *, real *);
+ integer igivcl;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer igivnm, submat, curprb, subpbs, igivpt, curlvl, matsiz, iprmpt,
+ smlsiz;
+ extern /* Subroutine */ int ssteqr_(char *, integer *, real *, real *,
+ real *, integer *, real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Using the divide and conquer method, CLAED0 computes all eigenvalues */
+/* of a symmetric tridiagonal matrix which is one diagonal block of */
+/* those from reducing a dense or band Hermitian matrix and */
+/* corresponding eigenvectors of the dense or band matrix. */
+
+/* Arguments */
+/* ========= */
+
+/* QSIZ (input) INTEGER */
+/* The dimension of the unitary matrix used to reduce */
+/* the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1. */
+
+/* N (input) INTEGER */
+/* The dimension of the symmetric tridiagonal matrix. N >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the diagonal elements of the tridiagonal matrix. */
+/* On exit, the eigenvalues in ascending order. */
+
+/* E (input/output) REAL array, dimension (N-1) */
+/* On entry, the off-diagonal elements of the tridiagonal matrix. */
+/* On exit, E has been destroyed. */
+
+/* Q (input/output) COMPLEX array, dimension (LDQ,N) */
+/* On entry, Q must contain an QSIZ x N matrix whose columns */
+/* unitarily orthonormal. It is a part of the unitary matrix */
+/* that reduces the full dense Hermitian matrix to a */
+/* (reducible) symmetric tridiagonal matrix. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N). */
+
+/* IWORK (workspace) INTEGER array, */
+/* the dimension of IWORK must be at least */
+/* 6 + 6*N + 5*N*lg N */
+/* ( lg( N ) = smallest integer k */
+/* such that 2^k >= N ) */
+
+/* RWORK (workspace) REAL array, */
+/* dimension (1 + 3*N + 2*N*lg N + 3*N**2) */
+/* ( lg( N ) = smallest integer k */
+/* such that 2^k >= N ) */
+
+/* QSTORE (workspace) COMPLEX array, dimension (LDQS, N) */
+/* Used to store parts of */
+/* the eigenvector matrix when the updating matrix multiplies */
+/* take place. */
+
+/* LDQS (input) INTEGER */
+/* The leading dimension of the array QSTORE. */
+/* LDQS >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: The algorithm failed to compute an eigenvalue while */
+/* working on the submatrix lying in rows and columns */
+/* INFO/(N+1) through mod(INFO,N+1). */
+
+/* ===================================================================== */
+
+/* Warning: N could be as big as QSIZ! */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ qstore_dim1 = *ldqs;
+ qstore_offset = 1 + qstore_dim1;
+ qstore -= qstore_offset;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+/* IF( ICOMPQ .LT. 0 .OR. ICOMPQ .GT. 2 ) THEN */
+/* INFO = -1 */
+/* ELSE IF( ( ICOMPQ .EQ. 1 ) .AND. ( QSIZ .LT. MAX( 0, N ) ) ) */
+/* $ THEN */
+ if (*qsiz < max(0,*n)) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldq < max(1,*n)) {
+ *info = -6;
+ } else if (*ldqs < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CLAED0", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ smlsiz = ilaenv_(&c__9, "CLAED0", " ", &c__0, &c__0, &c__0, &c__0);
+
+/* Determine the size and placement of the submatrices, and save in */
+/* the leading elements of IWORK. */
+
+ iwork[1] = *n;
+ subpbs = 1;
+ tlvls = 0;
+L10:
+ if (iwork[subpbs] > smlsiz) {
+ for (j = subpbs; j >= 1; --j) {
+ iwork[j * 2] = (iwork[j] + 1) / 2;
+ iwork[(j << 1) - 1] = iwork[j] / 2;
+/* L20: */
+ }
+ ++tlvls;
+ subpbs <<= 1;
+ goto L10;
+ }
+ i__1 = subpbs;
+ for (j = 2; j <= i__1; ++j) {
+ iwork[j] += iwork[j - 1];
+/* L30: */
+ }
+
+/* Divide the matrix into SUBPBS submatrices of size at most SMLSIZ+1 */
+/* using rank-1 modifications (cuts). */
+
+ spm1 = subpbs - 1;
+ i__1 = spm1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ submat = iwork[i__] + 1;
+ smm1 = submat - 1;
+ d__[smm1] -= (r__1 = e[smm1], dabs(r__1));
+ d__[submat] -= (r__1 = e[smm1], dabs(r__1));
+/* L40: */
+ }
+
+ indxq = (*n << 2) + 3;
+
+/* Set up workspaces for eigenvalues only/accumulate new vectors */
+/* routine */
+
+ temp = log((real) (*n)) / log(2.f);
+ lgn = (integer) temp;
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ iprmpt = indxq + *n + 1;
+ iperm = iprmpt + *n * lgn;
+ iqptr = iperm + *n * lgn;
+ igivpt = iqptr + *n + 2;
+ igivcl = igivpt + *n * lgn;
+
+ igivnm = 1;
+ iq = igivnm + (*n << 1) * lgn;
+/* Computing 2nd power */
+ i__1 = *n;
+ iwrem = iq + i__1 * i__1 + 1;
+/* Initialize pointers */
+ i__1 = subpbs;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ iwork[iprmpt + i__] = 1;
+ iwork[igivpt + i__] = 1;
+/* L50: */
+ }
+ iwork[iqptr] = 1;
+
+/* Solve each submatrix eigenproblem at the bottom of the divide and */
+/* conquer tree. */
+
+ curr = 0;
+ i__1 = spm1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ if (i__ == 0) {
+ submat = 1;
+ matsiz = iwork[1];
+ } else {
+ submat = iwork[i__] + 1;
+ matsiz = iwork[i__ + 1] - iwork[i__];
+ }
+ ll = iq - 1 + iwork[iqptr + curr];
+ ssteqr_("I", &matsiz, &d__[submat], &e[submat], &rwork[ll], &matsiz, &
+ rwork[1], info);
+ clacrm_(qsiz, &matsiz, &q[submat * q_dim1 + 1], ldq, &rwork[ll], &
+ matsiz, &qstore[submat * qstore_dim1 + 1], ldqs, &rwork[iwrem]
+);
+/* Computing 2nd power */
+ i__2 = matsiz;
+ iwork[iqptr + curr + 1] = iwork[iqptr + curr] + i__2 * i__2;
+ ++curr;
+ if (*info > 0) {
+ *info = submat * (*n + 1) + submat + matsiz - 1;
+ return 0;
+ }
+ k = 1;
+ i__2 = iwork[i__ + 1];
+ for (j = submat; j <= i__2; ++j) {
+ iwork[indxq + j] = k;
+ ++k;
+/* L60: */
+ }
+/* L70: */
+ }
+
+/* Successively merge eigensystems of adjacent submatrices */
+/* into eigensystem for the corresponding larger matrix. */
+
+/* while ( SUBPBS > 1 ) */
+
+ curlvl = 1;
+L80:
+ if (subpbs > 1) {
+ spm2 = subpbs - 2;
+ i__1 = spm2;
+ for (i__ = 0; i__ <= i__1; i__ += 2) {
+ if (i__ == 0) {
+ submat = 1;
+ matsiz = iwork[2];
+ msd2 = iwork[1];
+ curprb = 0;
+ } else {
+ submat = iwork[i__] + 1;
+ matsiz = iwork[i__ + 2] - iwork[i__];
+ msd2 = matsiz / 2;
+ ++curprb;
+ }
+
+/* Merge lower order eigensystems (of size MSD2 and MATSIZ - MSD2) */
+/* into an eigensystem of size MATSIZ. CLAED7 handles the case */
+/* when the eigenvectors of a full or band Hermitian matrix (which */
+/* was reduced to tridiagonal form) are desired. */
+
+/* I am free to use Q as a valuable working space until Loop 150. */
+
+ claed7_(&matsiz, &msd2, qsiz, &tlvls, &curlvl, &curprb, &d__[
+ submat], &qstore[submat * qstore_dim1 + 1], ldqs, &e[
+ submat + msd2 - 1], &iwork[indxq + submat], &rwork[iq], &
+ iwork[iqptr], &iwork[iprmpt], &iwork[iperm], &iwork[
+ igivpt], &iwork[igivcl], &rwork[igivnm], &q[submat *
+ q_dim1 + 1], &rwork[iwrem], &iwork[subpbs + 1], info);
+ if (*info > 0) {
+ *info = submat * (*n + 1) + submat + matsiz - 1;
+ return 0;
+ }
+ iwork[i__ / 2 + 1] = iwork[i__ + 2];
+/* L90: */
+ }
+ subpbs /= 2;
+ ++curlvl;
+ goto L80;
+ }
+
+/* end while */
+
+/* Re-merge the eigenvalues/vectors which were deflated at the final */
+/* merge step. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ j = iwork[indxq + i__];
+ rwork[i__] = d__[j];
+ ccopy_(qsiz, &qstore[j * qstore_dim1 + 1], &c__1, &q[i__ * q_dim1 + 1]
+, &c__1);
+/* L100: */
+ }
+ scopy_(n, &rwork[1], &c__1, &d__[1], &c__1);
+
+ return 0;
+
+/* End of CLAED0 */
+
+} /* claed0_ */
diff --git a/contrib/libs/clapack/claed7.c b/contrib/libs/clapack/claed7.c
new file mode 100644
index 0000000000..14728f8bc3
--- /dev/null
+++ b/contrib/libs/clapack/claed7.c
@@ -0,0 +1,325 @@
+/* claed7.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__2 = 2;
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int claed7_(integer *n, integer *cutpnt, integer *qsiz,
+ integer *tlvls, integer *curlvl, integer *curpbm, real *d__, complex *
+ q, integer *ldq, real *rho, integer *indxq, real *qstore, integer *
+ qptr, integer *prmptr, integer *perm, integer *givptr, integer *
+ givcol, real *givnum, complex *work, real *rwork, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ integer i__, k, n1, n2, iq, iw, iz, ptr, indx, curr, indxc, indxp;
+ extern /* Subroutine */ int claed8_(integer *, integer *, integer *,
+ complex *, integer *, real *, real *, integer *, real *, real *,
+ complex *, integer *, real *, integer *, integer *, integer *,
+ integer *, integer *, integer *, real *, integer *), slaed9_(
+ integer *, integer *, integer *, integer *, real *, real *,
+ integer *, real *, real *, real *, real *, integer *, integer *),
+ slaeda_(integer *, integer *, integer *, integer *, integer *,
+ integer *, integer *, integer *, real *, real *, integer *, real *
+, real *, integer *);
+ integer idlmda;
+ extern /* Subroutine */ int clacrm_(integer *, integer *, complex *,
+ integer *, real *, integer *, complex *, integer *, real *),
+ xerbla_(char *, integer *), slamrg_(integer *, integer *,
+ real *, integer *, integer *, integer *);
+ integer coltyp;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAED7 computes the updated eigensystem of a diagonal */
+/* matrix after modification by a rank-one symmetric matrix. This */
+/* routine is used only for the eigenproblem which requires all */
+/* eigenvalues and optionally eigenvectors of a dense or banded */
+/* Hermitian matrix that has been reduced to tridiagonal form. */
+
+/* T = Q(in) ( D(in) + RHO * Z*Z' ) Q'(in) = Q(out) * D(out) * Q'(out) */
+
+/* where Z = Q'u, u is a vector of length N with ones in the */
+/* CUTPNT and CUTPNT + 1 th elements and zeros elsewhere. */
+
+/* The eigenvectors of the original matrix are stored in Q, and the */
+/* eigenvalues are in D. The algorithm consists of three stages: */
+
+/* The first stage consists of deflating the size of the problem */
+/* when there are multiple eigenvalues or if there is a zero in */
+/* the Z vector. For each such occurence the dimension of the */
+/* secular equation problem is reduced by one. This stage is */
+/* performed by the routine SLAED2. */
+
+/* The second stage consists of calculating the updated */
+/* eigenvalues. This is done by finding the roots of the secular */
+/* equation via the routine SLAED4 (as called by SLAED3). */
+/* This routine also calculates the eigenvectors of the current */
+/* problem. */
+
+/* The final stage consists of computing the updated eigenvectors */
+/* directly using the updated eigenvalues. The eigenvectors for */
+/* the current problem are multiplied with the eigenvectors from */
+/* the overall problem. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The dimension of the symmetric tridiagonal matrix. N >= 0. */
+
+/* CUTPNT (input) INTEGER */
+/* Contains the location of the last eigenvalue in the leading */
+/* sub-matrix. min(1,N) <= CUTPNT <= N. */
+
+/* QSIZ (input) INTEGER */
+/* The dimension of the unitary matrix used to reduce */
+/* the full matrix to tridiagonal form. QSIZ >= N. */
+
+/* TLVLS (input) INTEGER */
+/* The total number of merging levels in the overall divide and */
+/* conquer tree. */
+
+/* CURLVL (input) INTEGER */
+/* The current level in the overall merge routine, */
+/* 0 <= curlvl <= tlvls. */
+
+/* CURPBM (input) INTEGER */
+/* The current problem in the current level in the overall */
+/* merge routine (counting from upper left to lower right). */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the eigenvalues of the rank-1-perturbed matrix. */
+/* On exit, the eigenvalues of the repaired matrix. */
+
+/* Q (input/output) COMPLEX array, dimension (LDQ,N) */
+/* On entry, the eigenvectors of the rank-1-perturbed matrix. */
+/* On exit, the eigenvectors of the repaired tridiagonal matrix. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N). */
+
+/* RHO (input) REAL */
+/* Contains the subdiagonal element used to create the rank-1 */
+/* modification. */
+
+/* INDXQ (output) INTEGER array, dimension (N) */
+/* This contains the permutation which will reintegrate the */
+/* subproblem just solved back into sorted order, */
+/* ie. D( INDXQ( I = 1, N ) ) will be in ascending order. */
+
+/* IWORK (workspace) INTEGER array, dimension (4*N) */
+
+/* RWORK (workspace) REAL array, */
+/* dimension (3*N+2*QSIZ*N) */
+
+/* WORK (workspace) COMPLEX array, dimension (QSIZ*N) */
+
+/* QSTORE (input/output) REAL array, dimension (N**2+1) */
+/* Stores eigenvectors of submatrices encountered during */
+/* divide and conquer, packed together. QPTR points to */
+/* beginning of the submatrices. */
+
+/* QPTR (input/output) INTEGER array, dimension (N+2) */
+/* List of indices pointing to beginning of submatrices stored */
+/* in QSTORE. The submatrices are numbered starting at the */
+/* bottom left of the divide and conquer tree, from left to */
+/* right and bottom to top. */
+
+/* PRMPTR (input) INTEGER array, dimension (N lg N) */
+/* Contains a list of pointers which indicate where in PERM a */
+/* level's permutation is stored. PRMPTR(i+1) - PRMPTR(i) */
+/* indicates the size of the permutation and also the size of */
+/* the full, non-deflated problem. */
+
+/* PERM (input) INTEGER array, dimension (N lg N) */
+/* Contains the permutations (from deflation and sorting) to be */
+/* applied to each eigenblock. */
+
+/* GIVPTR (input) INTEGER array, dimension (N lg N) */
+/* Contains a list of pointers which indicate where in GIVCOL a */
+/* level's Givens rotations are stored. GIVPTR(i+1) - GIVPTR(i) */
+/* indicates the number of Givens rotations. */
+
+/* GIVCOL (input) INTEGER array, dimension (2, N lg N) */
+/* Each pair of numbers indicates a pair of columns to take place */
+/* in a Givens rotation. */
+
+/* GIVNUM (input) REAL array, dimension (2, N lg N) */
+/* Each number indicates the S value to be used in the */
+/* corresponding Givens rotation. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, an eigenvalue did not converge */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --indxq;
+ --qstore;
+ --qptr;
+ --prmptr;
+ --perm;
+ --givptr;
+ givcol -= 3;
+ givnum -= 3;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+/* IF( ICOMPQ.LT.0 .OR. ICOMPQ.GT.1 ) THEN */
+/* INFO = -1 */
+/* ELSE IF( N.LT.0 ) THEN */
+ if (*n < 0) {
+ *info = -1;
+ } else if (min(1,*n) > *cutpnt || *n < *cutpnt) {
+ *info = -2;
+ } else if (*qsiz < *n) {
+ *info = -3;
+ } else if (*ldq < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CLAED7", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* The following values are for bookkeeping purposes only. They are */
+/* integer pointers which indicate the portion of the workspace */
+/* used by a particular array in SLAED2 and SLAED3. */
+
+ iz = 1;
+ idlmda = iz + *n;
+ iw = idlmda + *n;
+ iq = iw + *n;
+
+ indx = 1;
+ indxc = indx + *n;
+ coltyp = indxc + *n;
+ indxp = coltyp + *n;
+
+/* Form the z-vector which consists of the last row of Q_1 and the */
+/* first row of Q_2. */
+
+ ptr = pow_ii(&c__2, tlvls) + 1;
+ i__1 = *curlvl - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *tlvls - i__;
+ ptr += pow_ii(&c__2, &i__2);
+/* L10: */
+ }
+ curr = ptr + *curpbm;
+ slaeda_(n, tlvls, curlvl, curpbm, &prmptr[1], &perm[1], &givptr[1], &
+ givcol[3], &givnum[3], &qstore[1], &qptr[1], &rwork[iz], &rwork[
+ iz + *n], info);
+
+/* When solving the final problem, we no longer need the stored data, */
+/* so we will overwrite the data from this level onto the previously */
+/* used storage space. */
+
+ if (*curlvl == *tlvls) {
+ qptr[curr] = 1;
+ prmptr[curr] = 1;
+ givptr[curr] = 1;
+ }
+
+/* Sort and Deflate eigenvalues. */
+
+ claed8_(&k, n, qsiz, &q[q_offset], ldq, &d__[1], rho, cutpnt, &rwork[iz],
+ &rwork[idlmda], &work[1], qsiz, &rwork[iw], &iwork[indxp], &iwork[
+ indx], &indxq[1], &perm[prmptr[curr]], &givptr[curr + 1], &givcol[
+ (givptr[curr] << 1) + 1], &givnum[(givptr[curr] << 1) + 1], info);
+ prmptr[curr + 1] = prmptr[curr] + *n;
+ givptr[curr + 1] += givptr[curr];
+
+/* Solve Secular Equation. */
+
+ if (k != 0) {
+ slaed9_(&k, &c__1, &k, n, &d__[1], &rwork[iq], &k, rho, &rwork[idlmda]
+, &rwork[iw], &qstore[qptr[curr]], &k, info);
+ clacrm_(qsiz, &k, &work[1], qsiz, &qstore[qptr[curr]], &k, &q[
+ q_offset], ldq, &rwork[iq]);
+/* Computing 2nd power */
+ i__1 = k;
+ qptr[curr + 1] = qptr[curr] + i__1 * i__1;
+ if (*info != 0) {
+ return 0;
+ }
+
+/* Prepare the INDXQ sorting premutation. */
+
+ n1 = k;
+ n2 = *n - k;
+ slamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &indxq[1]);
+ } else {
+ qptr[curr + 1] = qptr[curr];
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ indxq[i__] = i__;
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of CLAED7 */
+
+} /* claed7_ */
diff --git a/contrib/libs/clapack/claed8.c b/contrib/libs/clapack/claed8.c
new file mode 100644
index 0000000000..74e9416a4b
--- /dev/null
+++ b/contrib/libs/clapack/claed8.c
@@ -0,0 +1,436 @@
+/* claed8.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b3 = -1.f;
+static integer c__1 = 1;
+
+/* Subroutine */ int claed8_(integer *k, integer *n, integer *qsiz, complex *
+ q, integer *ldq, real *d__, real *rho, integer *cutpnt, real *z__,
+ real *dlamda, complex *q2, integer *ldq2, real *w, integer *indxp,
+ integer *indx, integer *indxq, integer *perm, integer *givptr,
+ integer *givcol, real *givnum, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, q2_dim1, q2_offset, i__1;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real c__;
+ integer i__, j;
+ real s, t;
+ integer k2, n1, n2, jp, n1p1;
+ real eps, tau, tol;
+ integer jlam, imax, jmax;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ ccopy_(integer *, complex *, integer *, complex *, integer *),
+ csrot_(integer *, complex *, integer *, complex *, integer *,
+ real *, real *), scopy_(integer *, real *, integer *, real *,
+ integer *);
+ extern doublereal slapy2_(real *, real *), slamch_(char *);
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), xerbla_(char *,
+ integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int slamrg_(integer *, integer *, real *, integer
+ *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAED8 merges the two sets of eigenvalues together into a single */
+/* sorted set. Then it tries to deflate the size of the problem. */
+/* There are two ways in which deflation can occur: when two or more */
+/* eigenvalues are close together or if there is a tiny element in the */
+/* Z vector. For each such occurrence the order of the related secular */
+/* equation problem is reduced by one. */
+
+/* Arguments */
+/* ========= */
+
+/* K (output) INTEGER */
+/* Contains the number of non-deflated eigenvalues. */
+/* This is the order of the related secular equation. */
+
+/* N (input) INTEGER */
+/* The dimension of the symmetric tridiagonal matrix. N >= 0. */
+
+/* QSIZ (input) INTEGER */
+/* The dimension of the unitary matrix used to reduce */
+/* the dense or band matrix to tridiagonal form. */
+/* QSIZ >= N if ICOMPQ = 1. */
+
+/* Q (input/output) COMPLEX array, dimension (LDQ,N) */
+/* On entry, Q contains the eigenvectors of the partially solved */
+/* system which has been previously updated in matrix */
+/* multiplies with other partially solved eigensystems. */
+/* On exit, Q contains the trailing (N-K) updated eigenvectors */
+/* (those which were deflated) in its last N-K columns. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max( 1, N ). */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, D contains the eigenvalues of the two submatrices to */
+/* be combined. On exit, D contains the trailing (N-K) updated */
+/* eigenvalues (those which were deflated) sorted into increasing */
+/* order. */
+
+/* RHO (input/output) REAL */
+/* Contains the off diagonal element associated with the rank-1 */
+/* cut which originally split the two submatrices which are now */
+/* being recombined. RHO is modified during the computation to */
+/* the value required by SLAED3. */
+
+/* CUTPNT (input) INTEGER */
+/* Contains the location of the last eigenvalue in the leading */
+/* sub-matrix. MIN(1,N) <= CUTPNT <= N. */
+
+/* Z (input) REAL array, dimension (N) */
+/* On input this vector contains the updating vector (the last */
+/* row of the first sub-eigenvector matrix and the first row of */
+/* the second sub-eigenvector matrix). The contents of Z are */
+/* destroyed during the updating process. */
+
+/* DLAMDA (output) REAL array, dimension (N) */
+/* Contains a copy of the first K eigenvalues which will be used */
+/* by SLAED3 to form the secular equation. */
+
+/* Q2 (output) COMPLEX array, dimension (LDQ2,N) */
+/* If ICOMPQ = 0, Q2 is not referenced. Otherwise, */
+/* Contains a copy of the first K eigenvectors which will be used */
+/* by SLAED7 in a matrix multiply (SGEMM) to update the new */
+/* eigenvectors. */
+
+/* LDQ2 (input) INTEGER */
+/* The leading dimension of the array Q2. LDQ2 >= max( 1, N ). */
+
+/* W (output) REAL array, dimension (N) */
+/* This will hold the first k values of the final */
+/* deflation-altered z-vector and will be passed to SLAED3. */
+
+/* INDXP (workspace) INTEGER array, dimension (N) */
+/* This will contain the permutation used to place deflated */
+/* values of D at the end of the array. On output INDXP(1:K) */
+/* points to the nondeflated D-values and INDXP(K+1:N) */
+/* points to the deflated eigenvalues. */
+
+/* INDX (workspace) INTEGER array, dimension (N) */
+/* This will contain the permutation used to sort the contents of */
+/* D into ascending order. */
+
+/* INDXQ (input) INTEGER array, dimension (N) */
+/* This contains the permutation which separately sorts the two */
+/* sub-problems in D into ascending order. Note that elements in */
+/* the second half of this permutation must first have CUTPNT */
+/* added to their values in order to be accurate. */
+
+/* PERM (output) INTEGER array, dimension (N) */
+/* Contains the permutations (from deflation and sorting) to be */
+/* applied to each eigenblock. */
+
+/* GIVPTR (output) INTEGER */
+/* Contains the number of Givens rotations which took place in */
+/* this subproblem. */
+
+/* GIVCOL (output) INTEGER array, dimension (2, N) */
+/* Each pair of numbers indicates a pair of columns to take place */
+/* in a Givens rotation. */
+
+/* GIVNUM (output) REAL array, dimension (2, N) */
+/* Each number indicates the S value to be used in the */
+/* corresponding Givens rotation. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --d__;
+ --z__;
+ --dlamda;
+ q2_dim1 = *ldq2;
+ q2_offset = 1 + q2_dim1;
+ q2 -= q2_offset;
+ --w;
+ --indxp;
+ --indx;
+ --indxq;
+ --perm;
+ givcol -= 3;
+ givnum -= 3;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -2;
+ } else if (*qsiz < *n) {
+ *info = -3;
+ } else if (*ldq < max(1,*n)) {
+ *info = -5;
+ } else if (*cutpnt < min(1,*n) || *cutpnt > *n) {
+ *info = -8;
+ } else if (*ldq2 < max(1,*n)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CLAED8", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ n1 = *cutpnt;
+ n2 = *n - n1;
+ n1p1 = n1 + 1;
+
+ if (*rho < 0.f) {
+ sscal_(&n2, &c_b3, &z__[n1p1], &c__1);
+ }
+
+/* Normalize z so that norm(z) = 1 */
+
+ t = 1.f / sqrt(2.f);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ indx[j] = j;
+/* L10: */
+ }
+ sscal_(n, &t, &z__[1], &c__1);
+ *rho = (r__1 = *rho * 2.f, dabs(r__1));
+
+/* Sort the eigenvalues into increasing order */
+
+ i__1 = *n;
+ for (i__ = *cutpnt + 1; i__ <= i__1; ++i__) {
+ indxq[i__] += *cutpnt;
+/* L20: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlamda[i__] = d__[indxq[i__]];
+ w[i__] = z__[indxq[i__]];
+/* L30: */
+ }
+ i__ = 1;
+ j = *cutpnt + 1;
+ slamrg_(&n1, &n2, &dlamda[1], &c__1, &c__1, &indx[1]);
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = dlamda[indx[i__]];
+ z__[i__] = w[indx[i__]];
+/* L40: */
+ }
+
+/* Calculate the allowable deflation tolerance */
+
+ imax = isamax_(n, &z__[1], &c__1);
+ jmax = isamax_(n, &d__[1], &c__1);
+ eps = slamch_("Epsilon");
+ tol = eps * 8.f * (r__1 = d__[jmax], dabs(r__1));
+
+/* If the rank-1 modifier is small enough, no more needs to be done */
+/* -- except to reorganize Q so that its columns correspond with the */
+/* elements in D. */
+
+ if (*rho * (r__1 = z__[imax], dabs(r__1)) <= tol) {
+ *k = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ perm[j] = indxq[indx[j]];
+ ccopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 + 1]
+, &c__1);
+/* L50: */
+ }
+ clacpy_("A", qsiz, n, &q2[q2_dim1 + 1], ldq2, &q[q_dim1 + 1], ldq);
+ return 0;
+ }
+
+/* If there are multiple eigenvalues then the problem deflates. Here */
+/* the number of equal eigenvalues are found. As each equal */
+/* eigenvalue is found, an elementary reflector is computed to rotate */
+/* the corresponding eigensubspace so that the corresponding */
+/* components of Z are zero in this new basis. */
+
+ *k = 0;
+ *givptr = 0;
+ k2 = *n + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*rho * (r__1 = z__[j], dabs(r__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ indxp[k2] = j;
+ if (j == *n) {
+ goto L100;
+ }
+ } else {
+ jlam = j;
+ goto L70;
+ }
+/* L60: */
+ }
+L70:
+ ++j;
+ if (j > *n) {
+ goto L90;
+ }
+ if (*rho * (r__1 = z__[j], dabs(r__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ indxp[k2] = j;
+ } else {
+
+/* Check if eigenvalues are close enough to allow deflation. */
+
+ s = z__[jlam];
+ c__ = z__[j];
+
+/* Find sqrt(a**2+b**2) without overflow or */
+/* destructive underflow. */
+
+ tau = slapy2_(&c__, &s);
+ t = d__[j] - d__[jlam];
+ c__ /= tau;
+ s = -s / tau;
+ if ((r__1 = t * c__ * s, dabs(r__1)) <= tol) {
+
+/* Deflation is possible. */
+
+ z__[j] = tau;
+ z__[jlam] = 0.f;
+
+/* Record the appropriate Givens rotation */
+
+ ++(*givptr);
+ givcol[(*givptr << 1) + 1] = indxq[indx[jlam]];
+ givcol[(*givptr << 1) + 2] = indxq[indx[j]];
+ givnum[(*givptr << 1) + 1] = c__;
+ givnum[(*givptr << 1) + 2] = s;
+ csrot_(qsiz, &q[indxq[indx[jlam]] * q_dim1 + 1], &c__1, &q[indxq[
+ indx[j]] * q_dim1 + 1], &c__1, &c__, &s);
+ t = d__[jlam] * c__ * c__ + d__[j] * s * s;
+ d__[j] = d__[jlam] * s * s + d__[j] * c__ * c__;
+ d__[jlam] = t;
+ --k2;
+ i__ = 1;
+L80:
+ if (k2 + i__ <= *n) {
+ if (d__[jlam] < d__[indxp[k2 + i__]]) {
+ indxp[k2 + i__ - 1] = indxp[k2 + i__];
+ indxp[k2 + i__] = jlam;
+ ++i__;
+ goto L80;
+ } else {
+ indxp[k2 + i__ - 1] = jlam;
+ }
+ } else {
+ indxp[k2 + i__ - 1] = jlam;
+ }
+ jlam = j;
+ } else {
+ ++(*k);
+ w[*k] = z__[jlam];
+ dlamda[*k] = d__[jlam];
+ indxp[*k] = jlam;
+ jlam = j;
+ }
+ }
+ goto L70;
+L90:
+
+/* Record the last eigenvalue. */
+
+ ++(*k);
+ w[*k] = z__[jlam];
+ dlamda[*k] = d__[jlam];
+ indxp[*k] = jlam;
+
+L100:
+
+/* Sort the eigenvalues and corresponding eigenvectors into DLAMDA */
+/* and Q2 respectively. The eigenvalues/vectors which were not */
+/* deflated go into the first K slots of DLAMDA and Q2 respectively, */
+/* while those which were deflated go into the last N - K slots. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jp = indxp[j];
+ dlamda[j] = d__[jp];
+ perm[j] = indxq[indx[jp]];
+ ccopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 + 1], &
+ c__1);
+/* L110: */
+ }
+
+/* The deflated eigenvalues and their corresponding vectors go back */
+/* into the last N - K slots of D and Q respectively. */
+
+ if (*k < *n) {
+ i__1 = *n - *k;
+ scopy_(&i__1, &dlamda[*k + 1], &c__1, &d__[*k + 1], &c__1);
+ i__1 = *n - *k;
+ clacpy_("A", qsiz, &i__1, &q2[(*k + 1) * q2_dim1 + 1], ldq2, &q[(*k +
+ 1) * q_dim1 + 1], ldq);
+ }
+
+ return 0;
+
+/* End of CLAED8 */
+
+} /* claed8_ */
diff --git a/contrib/libs/clapack/claein.c b/contrib/libs/clapack/claein.c
new file mode 100644
index 0000000000..48c0b2099f
--- /dev/null
+++ b/contrib/libs/clapack/claein.c
@@ -0,0 +1,392 @@
+/* claein.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int claein_(logical *rightv, logical *noinit, integer *n,
+ complex *h__, integer *ldh, complex *w, complex *v, complex *b,
+ integer *ldb, real *rwork, real *eps3, real *smlnum, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, h_dim1, h_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_imag(complex *);
+
+ /* Local variables */
+ integer i__, j;
+ complex x, ei, ej;
+ integer its, ierr;
+ complex temp;
+ real scale;
+ char trans[1];
+ real rtemp, rootn, vnorm;
+ extern doublereal scnrm2_(integer *, complex *, integer *);
+ extern integer icamax_(integer *, complex *, integer *);
+ extern /* Complex */ VOID cladiv_(complex *, complex *, complex *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *), clatrs_(char *, char *, char *, char *, integer *, complex *,
+ integer *, complex *, real *, real *, integer *);
+ extern doublereal scasum_(integer *, complex *, integer *);
+ char normin[1];
+ real nrmsml, growto;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAEIN uses inverse iteration to find a right or left eigenvector */
+/* corresponding to the eigenvalue W of a complex upper Hessenberg */
+/* matrix H. */
+
+/* Arguments */
+/* ========= */
+
+/* RIGHTV (input) LOGICAL */
+/* = .TRUE. : compute right eigenvector; */
+/* = .FALSE.: compute left eigenvector. */
+
+/* NOINIT (input) LOGICAL */
+/* = .TRUE. : no initial vector supplied in V */
+/* = .FALSE.: initial vector supplied in V. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H. N >= 0. */
+
+/* H (input) COMPLEX array, dimension (LDH,N) */
+/* The upper Hessenberg matrix H. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH >= max(1,N). */
+
+/* W (input) COMPLEX */
+/* The eigenvalue of H whose corresponding right or left */
+/* eigenvector is to be computed. */
+
+/* V (input/output) COMPLEX array, dimension (N) */
+/* On entry, if NOINIT = .FALSE., V must contain a starting */
+/* vector for inverse iteration; otherwise V need not be set. */
+/* On exit, V contains the computed eigenvector, normalized so */
+/* that the component of largest magnitude has magnitude 1; here */
+/* the magnitude of a complex number (x,y) is taken to be */
+/* |x| + |y|. */
+
+/* B (workspace) COMPLEX array, dimension (LDB,N) */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* EPS3 (input) REAL */
+/* A small machine-dependent value which is used to perturb */
+/* close eigenvalues, and to replace zero pivots. */
+
+/* SMLNUM (input) REAL */
+/* A machine-dependent value close to the underflow threshold. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* = 1: inverse iteration did not converge; V is set to the */
+/* last iterate. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --v;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+
+/* GROWTO is the threshold used in the acceptance test for an */
+/* eigenvector. */
+
+ rootn = sqrt((real) (*n));
+ growto = .1f / rootn;
+/* Computing MAX */
+ r__1 = 1.f, r__2 = *eps3 * rootn;
+ nrmsml = dmax(r__1,r__2) * *smlnum;
+
+/* Form B = H - W*I (except that the subdiagonal elements are not */
+/* stored). */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * h_dim1;
+ b[i__3].r = h__[i__4].r, b[i__3].i = h__[i__4].i;
+/* L10: */
+ }
+ i__2 = j + j * b_dim1;
+ i__3 = j + j * h_dim1;
+ q__1.r = h__[i__3].r - w->r, q__1.i = h__[i__3].i - w->i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L20: */
+ }
+
+ if (*noinit) {
+
+/* Initialize V. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ v[i__2].r = *eps3, v[i__2].i = 0.f;
+/* L30: */
+ }
+ } else {
+
+/* Scale supplied initial vector. */
+
+ vnorm = scnrm2_(n, &v[1], &c__1);
+ r__1 = *eps3 * rootn / dmax(vnorm,nrmsml);
+ csscal_(n, &r__1, &v[1], &c__1);
+ }
+
+ if (*rightv) {
+
+/* LU decomposition with partial pivoting of B, replacing zero */
+/* pivots by EPS3. */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + 1 + i__ * h_dim1;
+ ei.r = h__[i__2].r, ei.i = h__[i__2].i;
+ i__2 = i__ + i__ * b_dim1;
+ if ((r__1 = b[i__2].r, dabs(r__1)) + (r__2 = r_imag(&b[i__ + i__ *
+ b_dim1]), dabs(r__2)) < (r__3 = ei.r, dabs(r__3)) + (
+ r__4 = r_imag(&ei), dabs(r__4))) {
+
+/* Interchange rows and eliminate. */
+
+ cladiv_(&q__1, &b[i__ + i__ * b_dim1], &ei);
+ x.r = q__1.r, x.i = q__1.i;
+ i__2 = i__ + i__ * b_dim1;
+ b[i__2].r = ei.r, b[i__2].i = ei.i;
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ i__3 = i__ + 1 + j * b_dim1;
+ temp.r = b[i__3].r, temp.i = b[i__3].i;
+ i__3 = i__ + 1 + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ q__2.r = x.r * temp.r - x.i * temp.i, q__2.i = x.r *
+ temp.i + x.i * temp.r;
+ q__1.r = b[i__4].r - q__2.r, q__1.i = b[i__4].i - q__2.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = temp.r, b[i__3].i = temp.i;
+/* L40: */
+ }
+ } else {
+
+/* Eliminate without interchange. */
+
+ i__2 = i__ + i__ * b_dim1;
+ if (b[i__2].r == 0.f && b[i__2].i == 0.f) {
+ i__3 = i__ + i__ * b_dim1;
+ b[i__3].r = *eps3, b[i__3].i = 0.f;
+ }
+ cladiv_(&q__1, &ei, &b[i__ + i__ * b_dim1]);
+ x.r = q__1.r, x.i = q__1.i;
+ if (x.r != 0.f || x.i != 0.f) {
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ i__3 = i__ + 1 + j * b_dim1;
+ i__4 = i__ + 1 + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ q__2.r = x.r * b[i__5].r - x.i * b[i__5].i, q__2.i =
+ x.r * b[i__5].i + x.i * b[i__5].r;
+ q__1.r = b[i__4].r - q__2.r, q__1.i = b[i__4].i -
+ q__2.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L50: */
+ }
+ }
+ }
+/* L60: */
+ }
+ i__1 = *n + *n * b_dim1;
+ if (b[i__1].r == 0.f && b[i__1].i == 0.f) {
+ i__2 = *n + *n * b_dim1;
+ b[i__2].r = *eps3, b[i__2].i = 0.f;
+ }
+
+ *(unsigned char *)trans = 'N';
+
+ } else {
+
+/* UL decomposition with partial pivoting of B, replacing zero */
+/* pivots by EPS3. */
+
+ for (j = *n; j >= 2; --j) {
+ i__1 = j + (j - 1) * h_dim1;
+ ej.r = h__[i__1].r, ej.i = h__[i__1].i;
+ i__1 = j + j * b_dim1;
+ if ((r__1 = b[i__1].r, dabs(r__1)) + (r__2 = r_imag(&b[j + j *
+ b_dim1]), dabs(r__2)) < (r__3 = ej.r, dabs(r__3)) + (r__4
+ = r_imag(&ej), dabs(r__4))) {
+
+/* Interchange columns and eliminate. */
+
+ cladiv_(&q__1, &b[j + j * b_dim1], &ej);
+ x.r = q__1.r, x.i = q__1.i;
+ i__1 = j + j * b_dim1;
+ b[i__1].r = ej.r, b[i__1].i = ej.i;
+ i__1 = j - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + (j - 1) * b_dim1;
+ temp.r = b[i__2].r, temp.i = b[i__2].i;
+ i__2 = i__ + (j - 1) * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ q__2.r = x.r * temp.r - x.i * temp.i, q__2.i = x.r *
+ temp.i + x.i * temp.r;
+ q__1.r = b[i__3].r - q__2.r, q__1.i = b[i__3].i - q__2.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ i__2 = i__ + j * b_dim1;
+ b[i__2].r = temp.r, b[i__2].i = temp.i;
+/* L70: */
+ }
+ } else {
+
+/* Eliminate without interchange. */
+
+ i__1 = j + j * b_dim1;
+ if (b[i__1].r == 0.f && b[i__1].i == 0.f) {
+ i__2 = j + j * b_dim1;
+ b[i__2].r = *eps3, b[i__2].i = 0.f;
+ }
+ cladiv_(&q__1, &ej, &b[j + j * b_dim1]);
+ x.r = q__1.r, x.i = q__1.i;
+ if (x.r != 0.f || x.i != 0.f) {
+ i__1 = j - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + (j - 1) * b_dim1;
+ i__3 = i__ + (j - 1) * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ q__2.r = x.r * b[i__4].r - x.i * b[i__4].i, q__2.i =
+ x.r * b[i__4].i + x.i * b[i__4].r;
+ q__1.r = b[i__3].r - q__2.r, q__1.i = b[i__3].i -
+ q__2.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L80: */
+ }
+ }
+ }
+/* L90: */
+ }
+ i__1 = b_dim1 + 1;
+ if (b[i__1].r == 0.f && b[i__1].i == 0.f) {
+ i__2 = b_dim1 + 1;
+ b[i__2].r = *eps3, b[i__2].i = 0.f;
+ }
+
+ *(unsigned char *)trans = 'C';
+
+ }
+
+ *(unsigned char *)normin = 'N';
+ i__1 = *n;
+ for (its = 1; its <= i__1; ++its) {
+
+/* Solve U*x = scale*v for a right eigenvector */
+/* or U'*x = scale*v for a left eigenvector, */
+/* overwriting x on v. */
+
+ clatrs_("Upper", trans, "Nonunit", normin, n, &b[b_offset], ldb, &v[1]
+, &scale, &rwork[1], &ierr);
+ *(unsigned char *)normin = 'Y';
+
+/* Test for sufficient growth in the norm of v. */
+
+ vnorm = scasum_(n, &v[1], &c__1);
+ if (vnorm >= growto * scale) {
+ goto L120;
+ }
+
+/* Choose new orthogonal starting vector and try again. */
+
+ rtemp = *eps3 / (rootn + 1.f);
+ v[1].r = *eps3, v[1].i = 0.f;
+ i__2 = *n;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ v[i__3].r = rtemp, v[i__3].i = 0.f;
+/* L100: */
+ }
+ i__2 = *n - its + 1;
+ i__3 = *n - its + 1;
+ r__1 = *eps3 * rootn;
+ q__1.r = v[i__3].r - r__1, q__1.i = v[i__3].i;
+ v[i__2].r = q__1.r, v[i__2].i = q__1.i;
+/* L110: */
+ }
+
+/* Failure to find eigenvector in N iterations. */
+
+ *info = 1;
+
+L120:
+
+/* Normalize eigenvector. */
+
+ i__ = icamax_(n, &v[1], &c__1);
+ i__1 = i__;
+ r__3 = 1.f / ((r__1 = v[i__1].r, dabs(r__1)) + (r__2 = r_imag(&v[i__]),
+ dabs(r__2)));
+ csscal_(n, &r__3, &v[1], &c__1);
+
+ return 0;
+
+/* End of CLAEIN */
+
+} /* claein_ */
diff --git a/contrib/libs/clapack/claesy.c b/contrib/libs/clapack/claesy.c
new file mode 100644
index 0000000000..9a60e4d3db
--- /dev/null
+++ b/contrib/libs/clapack/claesy.c
@@ -0,0 +1,206 @@
+/* claesy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__2 = 2;
+
+/* Subroutine */ int claesy_(complex *a, complex *b, complex *c__, complex *
+ rt1, complex *rt2, complex *evscal, complex *cs1, complex *sn1)
+{
+ /* System generated locals */
+ real r__1, r__2;
+ complex q__1, q__2, q__3, q__4, q__5, q__6, q__7;
+
+ /* Builtin functions */
+ double c_abs(complex *);
+ void pow_ci(complex *, complex *, integer *), c_sqrt(complex *, complex *)
+ , c_div(complex *, complex *, complex *);
+
+ /* Local variables */
+ complex s, t;
+ real z__;
+ complex tmp;
+ real babs, tabs, evnorm;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAESY computes the eigendecomposition of a 2-by-2 symmetric matrix */
+/* ( ( A, B );( B, C ) ) */
+/* provided the norm of the matrix of eigenvectors is larger than */
+/* some threshold value. */
+
+/* RT1 is the eigenvalue of larger absolute value, and RT2 of */
+/* smaller absolute value. If the eigenvectors are computed, then */
+/* on return ( CS1, SN1 ) is the unit eigenvector for RT1, hence */
+
+/* [ CS1 SN1 ] . [ A B ] . [ CS1 -SN1 ] = [ RT1 0 ] */
+/* [ -SN1 CS1 ] [ B C ] [ SN1 CS1 ] [ 0 RT2 ] */
+
+/* Arguments */
+/* ========= */
+
+/* A (input) COMPLEX */
+/* The ( 1, 1 ) element of input matrix. */
+
+/* B (input) COMPLEX */
+/* The ( 1, 2 ) element of input matrix. The ( 2, 1 ) element */
+/* is also given by B, since the 2-by-2 matrix is symmetric. */
+
+/* C (input) COMPLEX */
+/* The ( 2, 2 ) element of input matrix. */
+
+/* RT1 (output) COMPLEX */
+/* The eigenvalue of larger modulus. */
+
+/* RT2 (output) COMPLEX */
+/* The eigenvalue of smaller modulus. */
+
+/* EVSCAL (output) COMPLEX */
+/* The complex value by which the eigenvector matrix was scaled */
+/* to make it orthonormal. If EVSCAL is zero, the eigenvectors */
+/* were not computed. This means one of two things: the 2-by-2 */
+/* matrix could not be diagonalized, or the norm of the matrix */
+/* of eigenvectors before scaling was larger than the threshold */
+/* value THRESH (set below). */
+
+/* CS1 (output) COMPLEX */
+/* SN1 (output) COMPLEX */
+/* If EVSCAL .NE. 0, ( CS1, SN1 ) is the unit right eigenvector */
+/* for RT1. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+
+/* Special case: The matrix is actually diagonal. */
+/* To avoid divide by zero later, we treat this case separately. */
+
+ if (c_abs(b) == 0.f) {
+ rt1->r = a->r, rt1->i = a->i;
+ rt2->r = c__->r, rt2->i = c__->i;
+ if (c_abs(rt1) < c_abs(rt2)) {
+ tmp.r = rt1->r, tmp.i = rt1->i;
+ rt1->r = rt2->r, rt1->i = rt2->i;
+ rt2->r = tmp.r, rt2->i = tmp.i;
+ cs1->r = 0.f, cs1->i = 0.f;
+ sn1->r = 1.f, sn1->i = 0.f;
+ } else {
+ cs1->r = 1.f, cs1->i = 0.f;
+ sn1->r = 0.f, sn1->i = 0.f;
+ }
+ } else {
+
+/* Compute the eigenvalues and eigenvectors. */
+/* The characteristic equation is */
+/* lambda **2 - (A+C) lambda + (A*C - B*B) */
+/* and we solve it using the quadratic formula. */
+
+ q__2.r = a->r + c__->r, q__2.i = a->i + c__->i;
+ q__1.r = q__2.r * .5f, q__1.i = q__2.i * .5f;
+ s.r = q__1.r, s.i = q__1.i;
+ q__2.r = a->r - c__->r, q__2.i = a->i - c__->i;
+ q__1.r = q__2.r * .5f, q__1.i = q__2.i * .5f;
+ t.r = q__1.r, t.i = q__1.i;
+
+/* Take the square root carefully to avoid over/under flow. */
+
+ babs = c_abs(b);
+ tabs = c_abs(&t);
+ z__ = dmax(babs,tabs);
+ if (z__ > 0.f) {
+ q__5.r = t.r / z__, q__5.i = t.i / z__;
+ pow_ci(&q__4, &q__5, &c__2);
+ q__7.r = b->r / z__, q__7.i = b->i / z__;
+ pow_ci(&q__6, &q__7, &c__2);
+ q__3.r = q__4.r + q__6.r, q__3.i = q__4.i + q__6.i;
+ c_sqrt(&q__2, &q__3);
+ q__1.r = z__ * q__2.r, q__1.i = z__ * q__2.i;
+ t.r = q__1.r, t.i = q__1.i;
+ }
+
+/* Compute the two eigenvalues. RT1 and RT2 are exchanged */
+/* if necessary so that RT1 will have the greater magnitude. */
+
+ q__1.r = s.r + t.r, q__1.i = s.i + t.i;
+ rt1->r = q__1.r, rt1->i = q__1.i;
+ q__1.r = s.r - t.r, q__1.i = s.i - t.i;
+ rt2->r = q__1.r, rt2->i = q__1.i;
+ if (c_abs(rt1) < c_abs(rt2)) {
+ tmp.r = rt1->r, tmp.i = rt1->i;
+ rt1->r = rt2->r, rt1->i = rt2->i;
+ rt2->r = tmp.r, rt2->i = tmp.i;
+ }
+
+/* Choose CS1 = 1 and SN1 to satisfy the first equation, then */
+/* scale the components of this eigenvector so that the matrix */
+/* of eigenvectors X satisfies X * X' = I . (No scaling is */
+/* done if the norm of the eigenvalue matrix is less than THRESH.) */
+
+ q__2.r = rt1->r - a->r, q__2.i = rt1->i - a->i;
+ c_div(&q__1, &q__2, b);
+ sn1->r = q__1.r, sn1->i = q__1.i;
+ tabs = c_abs(sn1);
+ if (tabs > 1.f) {
+/* Computing 2nd power */
+ r__2 = 1.f / tabs;
+ r__1 = r__2 * r__2;
+ q__5.r = sn1->r / tabs, q__5.i = sn1->i / tabs;
+ pow_ci(&q__4, &q__5, &c__2);
+ q__3.r = r__1 + q__4.r, q__3.i = q__4.i;
+ c_sqrt(&q__2, &q__3);
+ q__1.r = tabs * q__2.r, q__1.i = tabs * q__2.i;
+ t.r = q__1.r, t.i = q__1.i;
+ } else {
+ q__3.r = sn1->r * sn1->r - sn1->i * sn1->i, q__3.i = sn1->r *
+ sn1->i + sn1->i * sn1->r;
+ q__2.r = q__3.r + 1.f, q__2.i = q__3.i + 0.f;
+ c_sqrt(&q__1, &q__2);
+ t.r = q__1.r, t.i = q__1.i;
+ }
+ evnorm = c_abs(&t);
+ if (evnorm >= .1f) {
+ c_div(&q__1, &c_b1, &t);
+ evscal->r = q__1.r, evscal->i = q__1.i;
+ cs1->r = evscal->r, cs1->i = evscal->i;
+ q__1.r = sn1->r * evscal->r - sn1->i * evscal->i, q__1.i = sn1->r
+ * evscal->i + sn1->i * evscal->r;
+ sn1->r = q__1.r, sn1->i = q__1.i;
+ } else {
+ evscal->r = 0.f, evscal->i = 0.f;
+ }
+ }
+ return 0;
+
+/* End of CLAESY */
+
+} /* claesy_ */
diff --git a/contrib/libs/clapack/claev2.c b/contrib/libs/clapack/claev2.c
new file mode 100644
index 0000000000..f2d1cc0eb3
--- /dev/null
+++ b/contrib/libs/clapack/claev2.c
@@ -0,0 +1,123 @@
+/* claev2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int claev2_(complex *a, complex *b, complex *c__, real *rt1,
+ real *rt2, real *cs1, complex *sn1)
+{
+ /* System generated locals */
+ real r__1, r__2, r__3;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ double c_abs(complex *);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ real t;
+ complex w;
+ extern /* Subroutine */ int slaev2_(real *, real *, real *, real *, real *
+, real *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAEV2 computes the eigendecomposition of a 2-by-2 Hermitian matrix */
+/* [ A B ] */
+/* [ CONJG(B) C ]. */
+/* On return, RT1 is the eigenvalue of larger absolute value, RT2 is the */
+/* eigenvalue of smaller absolute value, and (CS1,SN1) is the unit right */
+/* eigenvector for RT1, giving the decomposition */
+
+/* [ CS1 CONJG(SN1) ] [ A B ] [ CS1 -CONJG(SN1) ] = [ RT1 0 ] */
+/* [-SN1 CS1 ] [ CONJG(B) C ] [ SN1 CS1 ] [ 0 RT2 ]. */
+
+/* Arguments */
+/* ========= */
+
+/* A (input) COMPLEX */
+/* The (1,1) element of the 2-by-2 matrix. */
+
+/* B (input) COMPLEX */
+/* The (1,2) element and the conjugate of the (2,1) element of */
+/* the 2-by-2 matrix. */
+
+/* C (input) COMPLEX */
+/* The (2,2) element of the 2-by-2 matrix. */
+
+/* RT1 (output) REAL */
+/* The eigenvalue of larger absolute value. */
+
+/* RT2 (output) REAL */
+/* The eigenvalue of smaller absolute value. */
+
+/* CS1 (output) REAL */
+/* SN1 (output) COMPLEX */
+/* The vector (CS1, SN1) is a unit right eigenvector for RT1. */
+
+/* Further Details */
+/* =============== */
+
+/* RT1 is accurate to a few ulps barring over/underflow. */
+
+/* RT2 may be inaccurate if there is massive cancellation in the */
+/* determinant A*C-B*B; higher precision or correctly rounded or */
+/* correctly truncated arithmetic would be needed to compute RT2 */
+/* accurately in all cases. */
+
+/* CS1 and SN1 are accurate to a few ulps barring over/underflow. */
+
+/* Overflow is possible only if RT1 is within a factor of 5 of overflow. */
+/* Underflow is harmless if the input data is 0 or exceeds */
+/* underflow_threshold / macheps. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ if (c_abs(b) == 0.f) {
+ w.r = 1.f, w.i = 0.f;
+ } else {
+ r_cnjg(&q__2, b);
+ r__1 = c_abs(b);
+ q__1.r = q__2.r / r__1, q__1.i = q__2.i / r__1;
+ w.r = q__1.r, w.i = q__1.i;
+ }
+ r__1 = a->r;
+ r__2 = c_abs(b);
+ r__3 = c__->r;
+ slaev2_(&r__1, &r__2, &r__3, rt1, rt2, cs1, &t);
+ q__1.r = t * w.r, q__1.i = t * w.i;
+ sn1->r = q__1.r, sn1->i = q__1.i;
+ return 0;
+
+/* End of CLAEV2 */
+
+} /* claev2_ */
diff --git a/contrib/libs/clapack/clag2z.c b/contrib/libs/clapack/clag2z.c
new file mode 100644
index 0000000000..664b6ae9f2
--- /dev/null
+++ b/contrib/libs/clapack/clag2z.c
@@ -0,0 +1,101 @@
+/* clag2z.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int clag2z_(integer *m, integer *n, complex *sa, integer *
+ ldsa, doublecomplex *a, integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer sa_dim1, sa_offset, a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j;
+
+
+/* -- LAPACK PROTOTYPE auxiliary routine (version 3.1.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* August 2007 */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAG2Z converts a COMPLEX matrix, SA, to a COMPLEX*16 matrix, A. */
+
+/* Note that while it is possible to overflow while converting */
+/* from double to single, it is not possible to overflow when */
+/* converting from single to double. */
+
+/* This is an auxiliary routine so there is no argument checking. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of lines of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* SA (input) COMPLEX array, dimension (LDSA,N) */
+/* On entry, the M-by-N coefficient matrix SA. */
+
+/* LDSA (input) INTEGER */
+/* The leading dimension of the array SA. LDSA >= max(1,M). */
+
+/* A (output) COMPLEX*16 array, dimension (LDA,N) */
+/* On exit, the M-by-N coefficient matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* ========= */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ sa_dim1 = *ldsa;
+ sa_offset = 1 + sa_dim1;
+ sa -= sa_offset;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * sa_dim1;
+ a[i__3].r = sa[i__4].r, a[i__3].i = sa[i__4].i;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+
+/* End of CLAG2Z */
+
+} /* clag2z_ */
diff --git a/contrib/libs/clapack/clags2.c b/contrib/libs/clapack/clags2.c
new file mode 100644
index 0000000000..61f6c9f8d7
--- /dev/null
+++ b/contrib/libs/clapack/clags2.c
@@ -0,0 +1,465 @@
+/* clags2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int clags2_(logical *upper, real *a1, complex *a2, real *a3,
+ real *b1, complex *b2, real *b3, real *csu, complex *snu, real *csv,
+ complex *snv, real *csq, complex *snq)
+{
+ /* System generated locals */
+ real r__1, r__2, r__3, r__4, r__5, r__6, r__7, r__8;
+ complex q__1, q__2, q__3, q__4, q__5;
+
+ /* Builtin functions */
+ double c_abs(complex *), r_imag(complex *);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ real a;
+ complex b, c__;
+ real d__;
+ complex r__, d1;
+ real s1, s2, fb, fc;
+ complex ua11, ua12, ua21, ua22, vb11, vb12, vb21, vb22;
+ real csl, csr, snl, snr, aua11, aua12, aua21, aua22, avb11, avb12, avb21,
+ avb22, ua11r, ua22r, vb11r, vb22r;
+ extern /* Subroutine */ int slasv2_(real *, real *, real *, real *, real *
+, real *, real *, real *, real *), clartg_(complex *, complex *,
+ real *, complex *, complex *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAGS2 computes 2-by-2 unitary matrices U, V and Q, such */
+/* that if ( UPPER ) then */
+
+/* U'*A*Q = U'*( A1 A2 )*Q = ( x 0 ) */
+/* ( 0 A3 ) ( x x ) */
+/* and */
+/* V'*B*Q = V'*( B1 B2 )*Q = ( x 0 ) */
+/* ( 0 B3 ) ( x x ) */
+
+/* or if ( .NOT.UPPER ) then */
+
+/* U'*A*Q = U'*( A1 0 )*Q = ( x x ) */
+/* ( A2 A3 ) ( 0 x ) */
+/* and */
+/* V'*B*Q = V'*( B1 0 )*Q = ( x x ) */
+/* ( B2 B3 ) ( 0 x ) */
+/* where */
+
+/* U = ( CSU SNU ), V = ( CSV SNV ), */
+/* ( -CONJG(SNU) CSU ) ( -CONJG(SNV) CSV ) */
+
+/* Q = ( CSQ SNQ ) */
+/* ( -CONJG(SNQ) CSQ ) */
+
+/* Z' denotes the conjugate transpose of Z. */
+
+/* The rows of the transformed A and B are parallel. Moreover, if the */
+/* input 2-by-2 matrix A is not zero, then the transformed (1,1) entry */
+/* of A is not zero. If the input matrices A and B are both not zero, */
+/* then the transformed (2,2) element of B is not zero, except when the */
+/* first rows of input A and B are parallel and the second rows are */
+/* zero. */
+
+/* Arguments */
+/* ========= */
+
+/* UPPER (input) LOGICAL */
+/* = .TRUE.: the input matrices A and B are upper triangular. */
+/* = .FALSE.: the input matrices A and B are lower triangular. */
+
+/* A1 (input) REAL */
+/* A2 (input) COMPLEX */
+/* A3 (input) REAL */
+/* On entry, A1, A2 and A3 are elements of the input 2-by-2 */
+/* upper (lower) triangular matrix A. */
+
+/* B1 (input) REAL */
+/* B2 (input) COMPLEX */
+/* B3 (input) REAL */
+/* On entry, B1, B2 and B3 are elements of the input 2-by-2 */
+/* upper (lower) triangular matrix B. */
+
+/* CSU (output) REAL */
+/* SNU (output) COMPLEX */
+/* The desired unitary matrix U. */
+
+/* CSV (output) REAL */
+/* SNV (output) COMPLEX */
+/* The desired unitary matrix V. */
+
+/* CSQ (output) REAL */
+/* SNQ (output) COMPLEX */
+/* The desired unitary matrix Q. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ if (*upper) {
+
+/* Input matrices A and B are upper triangular matrices */
+
+/* Form matrix C = A*adj(B) = ( a b ) */
+/* ( 0 d ) */
+
+ a = *a1 * *b3;
+ d__ = *a3 * *b1;
+ q__2.r = *b1 * a2->r, q__2.i = *b1 * a2->i;
+ q__3.r = *a1 * b2->r, q__3.i = *a1 * b2->i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
+ b.r = q__1.r, b.i = q__1.i;
+ fb = c_abs(&b);
+
+/* Transform complex 2-by-2 matrix C to real matrix by unitary */
+/* diagonal matrix diag(1,D1). */
+
+ d1.r = 1.f, d1.i = 0.f;
+ if (fb != 0.f) {
+ q__1.r = b.r / fb, q__1.i = b.i / fb;
+ d1.r = q__1.r, d1.i = q__1.i;
+ }
+
+/* The SVD of real 2 by 2 triangular C */
+
+/* ( CSL -SNL )*( A B )*( CSR SNR ) = ( R 0 ) */
+/* ( SNL CSL ) ( 0 D ) ( -SNR CSR ) ( 0 T ) */
+
+ slasv2_(&a, &fb, &d__, &s1, &s2, &snr, &csr, &snl, &csl);
+
+ if (dabs(csl) >= dabs(snl) || dabs(csr) >= dabs(snr)) {
+
+/* Compute the (1,1) and (1,2) elements of U'*A and V'*B, */
+/* and (1,2) element of |U|'*|A| and |V|'*|B|. */
+
+ ua11r = csl * *a1;
+ q__2.r = csl * a2->r, q__2.i = csl * a2->i;
+ q__4.r = snl * d1.r, q__4.i = snl * d1.i;
+ q__3.r = *a3 * q__4.r, q__3.i = *a3 * q__4.i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ ua12.r = q__1.r, ua12.i = q__1.i;
+
+ vb11r = csr * *b1;
+ q__2.r = csr * b2->r, q__2.i = csr * b2->i;
+ q__4.r = snr * d1.r, q__4.i = snr * d1.i;
+ q__3.r = *b3 * q__4.r, q__3.i = *b3 * q__4.i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ vb12.r = q__1.r, vb12.i = q__1.i;
+
+ aua12 = dabs(csl) * ((r__1 = a2->r, dabs(r__1)) + (r__2 = r_imag(
+ a2), dabs(r__2))) + dabs(snl) * dabs(*a3);
+ avb12 = dabs(csr) * ((r__1 = b2->r, dabs(r__1)) + (r__2 = r_imag(
+ b2), dabs(r__2))) + dabs(snr) * dabs(*b3);
+
+/* zero (1,2) elements of U'*A and V'*B */
+
+ if (dabs(ua11r) + ((r__1 = ua12.r, dabs(r__1)) + (r__2 = r_imag(&
+ ua12), dabs(r__2))) == 0.f) {
+ q__2.r = vb11r, q__2.i = 0.f;
+ q__1.r = -q__2.r, q__1.i = -q__2.i;
+ r_cnjg(&q__3, &vb12);
+ clartg_(&q__1, &q__3, csq, snq, &r__);
+ } else if (dabs(vb11r) + ((r__1 = vb12.r, dabs(r__1)) + (r__2 =
+ r_imag(&vb12), dabs(r__2))) == 0.f) {
+ q__2.r = ua11r, q__2.i = 0.f;
+ q__1.r = -q__2.r, q__1.i = -q__2.i;
+ r_cnjg(&q__3, &ua12);
+ clartg_(&q__1, &q__3, csq, snq, &r__);
+ } else if (aua12 / (dabs(ua11r) + ((r__1 = ua12.r, dabs(r__1)) + (
+ r__2 = r_imag(&ua12), dabs(r__2)))) <= avb12 / (dabs(
+ vb11r) + ((r__3 = vb12.r, dabs(r__3)) + (r__4 = r_imag(&
+ vb12), dabs(r__4))))) {
+ q__2.r = ua11r, q__2.i = 0.f;
+ q__1.r = -q__2.r, q__1.i = -q__2.i;
+ r_cnjg(&q__3, &ua12);
+ clartg_(&q__1, &q__3, csq, snq, &r__);
+ } else {
+ q__2.r = vb11r, q__2.i = 0.f;
+ q__1.r = -q__2.r, q__1.i = -q__2.i;
+ r_cnjg(&q__3, &vb12);
+ clartg_(&q__1, &q__3, csq, snq, &r__);
+ }
+
+ *csu = csl;
+ q__2.r = -d1.r, q__2.i = -d1.i;
+ q__1.r = snl * q__2.r, q__1.i = snl * q__2.i;
+ snu->r = q__1.r, snu->i = q__1.i;
+ *csv = csr;
+ q__2.r = -d1.r, q__2.i = -d1.i;
+ q__1.r = snr * q__2.r, q__1.i = snr * q__2.i;
+ snv->r = q__1.r, snv->i = q__1.i;
+
+ } else {
+
+/* Compute the (2,1) and (2,2) elements of U'*A and V'*B, */
+/* and (2,2) element of |U|'*|A| and |V|'*|B|. */
+
+ r_cnjg(&q__4, &d1);
+ q__3.r = -q__4.r, q__3.i = -q__4.i;
+ q__2.r = snl * q__3.r, q__2.i = snl * q__3.i;
+ q__1.r = *a1 * q__2.r, q__1.i = *a1 * q__2.i;
+ ua21.r = q__1.r, ua21.i = q__1.i;
+ r_cnjg(&q__5, &d1);
+ q__4.r = -q__5.r, q__4.i = -q__5.i;
+ q__3.r = snl * q__4.r, q__3.i = snl * q__4.i;
+ q__2.r = q__3.r * a2->r - q__3.i * a2->i, q__2.i = q__3.r * a2->i
+ + q__3.i * a2->r;
+ r__1 = csl * *a3;
+ q__1.r = q__2.r + r__1, q__1.i = q__2.i;
+ ua22.r = q__1.r, ua22.i = q__1.i;
+
+ r_cnjg(&q__4, &d1);
+ q__3.r = -q__4.r, q__3.i = -q__4.i;
+ q__2.r = snr * q__3.r, q__2.i = snr * q__3.i;
+ q__1.r = *b1 * q__2.r, q__1.i = *b1 * q__2.i;
+ vb21.r = q__1.r, vb21.i = q__1.i;
+ r_cnjg(&q__5, &d1);
+ q__4.r = -q__5.r, q__4.i = -q__5.i;
+ q__3.r = snr * q__4.r, q__3.i = snr * q__4.i;
+ q__2.r = q__3.r * b2->r - q__3.i * b2->i, q__2.i = q__3.r * b2->i
+ + q__3.i * b2->r;
+ r__1 = csr * *b3;
+ q__1.r = q__2.r + r__1, q__1.i = q__2.i;
+ vb22.r = q__1.r, vb22.i = q__1.i;
+
+ aua22 = dabs(snl) * ((r__1 = a2->r, dabs(r__1)) + (r__2 = r_imag(
+ a2), dabs(r__2))) + dabs(csl) * dabs(*a3);
+ avb22 = dabs(snr) * ((r__1 = b2->r, dabs(r__1)) + (r__2 = r_imag(
+ b2), dabs(r__2))) + dabs(csr) * dabs(*b3);
+
+/* zero (2,2) elements of U'*A and V'*B, and then swap. */
+
+ if ((r__1 = ua21.r, dabs(r__1)) + (r__2 = r_imag(&ua21), dabs(
+ r__2)) + ((r__3 = ua22.r, dabs(r__3)) + (r__4 = r_imag(&
+ ua22), dabs(r__4))) == 0.f) {
+ r_cnjg(&q__2, &vb21);
+ q__1.r = -q__2.r, q__1.i = -q__2.i;
+ r_cnjg(&q__3, &vb22);
+ clartg_(&q__1, &q__3, csq, snq, &r__);
+ } else if ((r__1 = vb21.r, dabs(r__1)) + (r__2 = r_imag(&vb21),
+ dabs(r__2)) + c_abs(&vb22) == 0.f) {
+ r_cnjg(&q__2, &ua21);
+ q__1.r = -q__2.r, q__1.i = -q__2.i;
+ r_cnjg(&q__3, &ua22);
+ clartg_(&q__1, &q__3, csq, snq, &r__);
+ } else if (aua22 / ((r__1 = ua21.r, dabs(r__1)) + (r__2 = r_imag(&
+ ua21), dabs(r__2)) + ((r__3 = ua22.r, dabs(r__3)) + (r__4
+ = r_imag(&ua22), dabs(r__4)))) <= avb22 / ((r__5 = vb21.r,
+ dabs(r__5)) + (r__6 = r_imag(&vb21), dabs(r__6)) + ((
+ r__7 = vb22.r, dabs(r__7)) + (r__8 = r_imag(&vb22), dabs(
+ r__8))))) {
+ r_cnjg(&q__2, &ua21);
+ q__1.r = -q__2.r, q__1.i = -q__2.i;
+ r_cnjg(&q__3, &ua22);
+ clartg_(&q__1, &q__3, csq, snq, &r__);
+ } else {
+ r_cnjg(&q__2, &vb21);
+ q__1.r = -q__2.r, q__1.i = -q__2.i;
+ r_cnjg(&q__3, &vb22);
+ clartg_(&q__1, &q__3, csq, snq, &r__);
+ }
+
+ *csu = snl;
+ q__1.r = csl * d1.r, q__1.i = csl * d1.i;
+ snu->r = q__1.r, snu->i = q__1.i;
+ *csv = snr;
+ q__1.r = csr * d1.r, q__1.i = csr * d1.i;
+ snv->r = q__1.r, snv->i = q__1.i;
+
+ }
+
+ } else {
+
+/* Input matrices A and B are lower triangular matrices */
+
+/* Form matrix C = A*adj(B) = ( a 0 ) */
+/* ( c d ) */
+
+ a = *a1 * *b3;
+ d__ = *a3 * *b1;
+ q__2.r = *b3 * a2->r, q__2.i = *b3 * a2->i;
+ q__3.r = *a3 * b2->r, q__3.i = *a3 * b2->i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
+ c__.r = q__1.r, c__.i = q__1.i;
+ fc = c_abs(&c__);
+
+/* Transform complex 2-by-2 matrix C to real matrix by unitary */
+/* diagonal matrix diag(d1,1). */
+
+ d1.r = 1.f, d1.i = 0.f;
+ if (fc != 0.f) {
+ q__1.r = c__.r / fc, q__1.i = c__.i / fc;
+ d1.r = q__1.r, d1.i = q__1.i;
+ }
+
+/* The SVD of real 2 by 2 triangular C */
+
+/* ( CSL -SNL )*( A 0 )*( CSR SNR ) = ( R 0 ) */
+/* ( SNL CSL ) ( C D ) ( -SNR CSR ) ( 0 T ) */
+
+ slasv2_(&a, &fc, &d__, &s1, &s2, &snr, &csr, &snl, &csl);
+
+ if (dabs(csr) >= dabs(snr) || dabs(csl) >= dabs(snl)) {
+
+/* Compute the (2,1) and (2,2) elements of U'*A and V'*B, */
+/* and (2,1) element of |U|'*|A| and |V|'*|B|. */
+
+ q__4.r = -d1.r, q__4.i = -d1.i;
+ q__3.r = snr * q__4.r, q__3.i = snr * q__4.i;
+ q__2.r = *a1 * q__3.r, q__2.i = *a1 * q__3.i;
+ q__5.r = csr * a2->r, q__5.i = csr * a2->i;
+ q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
+ ua21.r = q__1.r, ua21.i = q__1.i;
+ ua22r = csr * *a3;
+
+ q__4.r = -d1.r, q__4.i = -d1.i;
+ q__3.r = snl * q__4.r, q__3.i = snl * q__4.i;
+ q__2.r = *b1 * q__3.r, q__2.i = *b1 * q__3.i;
+ q__5.r = csl * b2->r, q__5.i = csl * b2->i;
+ q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
+ vb21.r = q__1.r, vb21.i = q__1.i;
+ vb22r = csl * *b3;
+
+ aua21 = dabs(snr) * dabs(*a1) + dabs(csr) * ((r__1 = a2->r, dabs(
+ r__1)) + (r__2 = r_imag(a2), dabs(r__2)));
+ avb21 = dabs(snl) * dabs(*b1) + dabs(csl) * ((r__1 = b2->r, dabs(
+ r__1)) + (r__2 = r_imag(b2), dabs(r__2)));
+
+/* zero (2,1) elements of U'*A and V'*B. */
+
+ if ((r__1 = ua21.r, dabs(r__1)) + (r__2 = r_imag(&ua21), dabs(
+ r__2)) + dabs(ua22r) == 0.f) {
+ q__1.r = vb22r, q__1.i = 0.f;
+ clartg_(&q__1, &vb21, csq, snq, &r__);
+ } else if ((r__1 = vb21.r, dabs(r__1)) + (r__2 = r_imag(&vb21),
+ dabs(r__2)) + dabs(vb22r) == 0.f) {
+ q__1.r = ua22r, q__1.i = 0.f;
+ clartg_(&q__1, &ua21, csq, snq, &r__);
+ } else if (aua21 / ((r__1 = ua21.r, dabs(r__1)) + (r__2 = r_imag(&
+ ua21), dabs(r__2)) + dabs(ua22r)) <= avb21 / ((r__3 =
+ vb21.r, dabs(r__3)) + (r__4 = r_imag(&vb21), dabs(r__4))
+ + dabs(vb22r))) {
+ q__1.r = ua22r, q__1.i = 0.f;
+ clartg_(&q__1, &ua21, csq, snq, &r__);
+ } else {
+ q__1.r = vb22r, q__1.i = 0.f;
+ clartg_(&q__1, &vb21, csq, snq, &r__);
+ }
+
+ *csu = csr;
+ r_cnjg(&q__3, &d1);
+ q__2.r = -q__3.r, q__2.i = -q__3.i;
+ q__1.r = snr * q__2.r, q__1.i = snr * q__2.i;
+ snu->r = q__1.r, snu->i = q__1.i;
+ *csv = csl;
+ r_cnjg(&q__3, &d1);
+ q__2.r = -q__3.r, q__2.i = -q__3.i;
+ q__1.r = snl * q__2.r, q__1.i = snl * q__2.i;
+ snv->r = q__1.r, snv->i = q__1.i;
+
+ } else {
+
+/* Compute the (1,1) and (1,2) elements of U'*A and V'*B, */
+/* and (1,1) element of |U|'*|A| and |V|'*|B|. */
+
+ r__1 = csr * *a1;
+ r_cnjg(&q__4, &d1);
+ q__3.r = snr * q__4.r, q__3.i = snr * q__4.i;
+ q__2.r = q__3.r * a2->r - q__3.i * a2->i, q__2.i = q__3.r * a2->i
+ + q__3.i * a2->r;
+ q__1.r = r__1 + q__2.r, q__1.i = q__2.i;
+ ua11.r = q__1.r, ua11.i = q__1.i;
+ r_cnjg(&q__3, &d1);
+ q__2.r = snr * q__3.r, q__2.i = snr * q__3.i;
+ q__1.r = *a3 * q__2.r, q__1.i = *a3 * q__2.i;
+ ua12.r = q__1.r, ua12.i = q__1.i;
+
+ r__1 = csl * *b1;
+ r_cnjg(&q__4, &d1);
+ q__3.r = snl * q__4.r, q__3.i = snl * q__4.i;
+ q__2.r = q__3.r * b2->r - q__3.i * b2->i, q__2.i = q__3.r * b2->i
+ + q__3.i * b2->r;
+ q__1.r = r__1 + q__2.r, q__1.i = q__2.i;
+ vb11.r = q__1.r, vb11.i = q__1.i;
+ r_cnjg(&q__3, &d1);
+ q__2.r = snl * q__3.r, q__2.i = snl * q__3.i;
+ q__1.r = *b3 * q__2.r, q__1.i = *b3 * q__2.i;
+ vb12.r = q__1.r, vb12.i = q__1.i;
+
+ aua11 = dabs(csr) * dabs(*a1) + dabs(snr) * ((r__1 = a2->r, dabs(
+ r__1)) + (r__2 = r_imag(a2), dabs(r__2)));
+ avb11 = dabs(csl) * dabs(*b1) + dabs(snl) * ((r__1 = b2->r, dabs(
+ r__1)) + (r__2 = r_imag(b2), dabs(r__2)));
+
+/* zero (1,1) elements of U'*A and V'*B, and then swap. */
+
+ if ((r__1 = ua11.r, dabs(r__1)) + (r__2 = r_imag(&ua11), dabs(
+ r__2)) + ((r__3 = ua12.r, dabs(r__3)) + (r__4 = r_imag(&
+ ua12), dabs(r__4))) == 0.f) {
+ clartg_(&vb12, &vb11, csq, snq, &r__);
+ } else if ((r__1 = vb11.r, dabs(r__1)) + (r__2 = r_imag(&vb11),
+ dabs(r__2)) + ((r__3 = vb12.r, dabs(r__3)) + (r__4 =
+ r_imag(&vb12), dabs(r__4))) == 0.f) {
+ clartg_(&ua12, &ua11, csq, snq, &r__);
+ } else if (aua11 / ((r__1 = ua11.r, dabs(r__1)) + (r__2 = r_imag(&
+ ua11), dabs(r__2)) + ((r__3 = ua12.r, dabs(r__3)) + (r__4
+ = r_imag(&ua12), dabs(r__4)))) <= avb11 / ((r__5 = vb11.r,
+ dabs(r__5)) + (r__6 = r_imag(&vb11), dabs(r__6)) + ((
+ r__7 = vb12.r, dabs(r__7)) + (r__8 = r_imag(&vb12), dabs(
+ r__8))))) {
+ clartg_(&ua12, &ua11, csq, snq, &r__);
+ } else {
+ clartg_(&vb12, &vb11, csq, snq, &r__);
+ }
+
+ *csu = snr;
+ r_cnjg(&q__2, &d1);
+ q__1.r = csr * q__2.r, q__1.i = csr * q__2.i;
+ snu->r = q__1.r, snu->i = q__1.i;
+ *csv = snl;
+ r_cnjg(&q__2, &d1);
+ q__1.r = csl * q__2.r, q__1.i = csl * q__2.i;
+ snv->r = q__1.r, snv->i = q__1.i;
+
+ }
+
+ }
+
+ return 0;
+
+/* End of CLAGS2 */
+
+} /* clags2_ */
diff --git a/contrib/libs/clapack/clagtm.c b/contrib/libs/clapack/clagtm.c
new file mode 100644
index 0000000000..f50ddf04c5
--- /dev/null
+++ b/contrib/libs/clapack/clagtm.c
@@ -0,0 +1,598 @@
+/* clagtm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int clagtm_(char *trans, integer *n, integer *nrhs, real *
+ alpha, complex *dl, complex *d__, complex *du, complex *x, integer *
+ ldx, real *beta, complex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6, i__7, i__8, i__9, i__10;
+ complex q__1, q__2, q__3, q__4, q__5, q__6, q__7, q__8, q__9;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAGTM performs a matrix-vector product of the form */
+
+/* B := alpha * A * X + beta * B */
+
+/* where A is a tridiagonal matrix of order N, B and X are N by NRHS */
+/* matrices, and alpha and beta are real scalars, each of which may be */
+/* 0., 1., or -1. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the operation applied to A. */
+/* = 'N': No transpose, B := alpha * A * X + beta * B */
+/* = 'T': Transpose, B := alpha * A**T * X + beta * B */
+/* = 'C': Conjugate transpose, B := alpha * A**H * X + beta * B */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices X and B. */
+
+/* ALPHA (input) REAL */
+/* The scalar alpha. ALPHA must be 0., 1., or -1.; otherwise, */
+/* it is assumed to be 0. */
+
+/* DL (input) COMPLEX array, dimension (N-1) */
+/* The (n-1) sub-diagonal elements of T. */
+
+/* D (input) COMPLEX array, dimension (N) */
+/* The diagonal elements of T. */
+
+/* DU (input) COMPLEX array, dimension (N-1) */
+/* The (n-1) super-diagonal elements of T. */
+
+/* X (input) COMPLEX array, dimension (LDX,NRHS) */
+/* The N by NRHS matrix X. */
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(N,1). */
+
+/* BETA (input) REAL */
+/* The scalar beta. BETA must be 0., 1., or -1.; otherwise, */
+/* it is assumed to be 1. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the N by NRHS matrix B. */
+/* On exit, B is overwritten by the matrix expression */
+/* B := alpha * A * X + beta * B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(N,1). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --dl;
+ --d__;
+ --du;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Multiply B by BETA if BETA.NE.1. */
+
+ if (*beta == 0.f) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0.f, b[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if (*beta == -1.f) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ q__1.r = -b[i__4].r, q__1.i = -b[i__4].i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+
+ if (*alpha == 1.f) {
+ if (lsame_(trans, "N")) {
+
+/* Compute B := B + A*X */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ if (*n == 1) {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * b_dim1 + 1;
+ i__4 = j * x_dim1 + 1;
+ q__2.r = d__[1].r * x[i__4].r - d__[1].i * x[i__4].i,
+ q__2.i = d__[1].r * x[i__4].i + d__[1].i * x[i__4]
+ .r;
+ q__1.r = b[i__3].r + q__2.r, q__1.i = b[i__3].i + q__2.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ } else {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * b_dim1 + 1;
+ i__4 = j * x_dim1 + 1;
+ q__3.r = d__[1].r * x[i__4].r - d__[1].i * x[i__4].i,
+ q__3.i = d__[1].r * x[i__4].i + d__[1].i * x[i__4]
+ .r;
+ q__2.r = b[i__3].r + q__3.r, q__2.i = b[i__3].i + q__3.i;
+ i__5 = j * x_dim1 + 2;
+ q__4.r = du[1].r * x[i__5].r - du[1].i * x[i__5].i,
+ q__4.i = du[1].r * x[i__5].i + du[1].i * x[i__5]
+ .r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ i__2 = *n + j * b_dim1;
+ i__3 = *n + j * b_dim1;
+ i__4 = *n - 1;
+ i__5 = *n - 1 + j * x_dim1;
+ q__3.r = dl[i__4].r * x[i__5].r - dl[i__4].i * x[i__5].i,
+ q__3.i = dl[i__4].r * x[i__5].i + dl[i__4].i * x[
+ i__5].r;
+ q__2.r = b[i__3].r + q__3.r, q__2.i = b[i__3].i + q__3.i;
+ i__6 = *n;
+ i__7 = *n + j * x_dim1;
+ q__4.r = d__[i__6].r * x[i__7].r - d__[i__6].i * x[i__7]
+ .i, q__4.i = d__[i__6].r * x[i__7].i + d__[i__6]
+ .i * x[i__7].r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ - 1;
+ i__6 = i__ - 1 + j * x_dim1;
+ q__4.r = dl[i__5].r * x[i__6].r - dl[i__5].i * x[i__6]
+ .i, q__4.i = dl[i__5].r * x[i__6].i + dl[i__5]
+ .i * x[i__6].r;
+ q__3.r = b[i__4].r + q__4.r, q__3.i = b[i__4].i +
+ q__4.i;
+ i__7 = i__;
+ i__8 = i__ + j * x_dim1;
+ q__5.r = d__[i__7].r * x[i__8].r - d__[i__7].i * x[
+ i__8].i, q__5.i = d__[i__7].r * x[i__8].i +
+ d__[i__7].i * x[i__8].r;
+ q__2.r = q__3.r + q__5.r, q__2.i = q__3.i + q__5.i;
+ i__9 = i__;
+ i__10 = i__ + 1 + j * x_dim1;
+ q__6.r = du[i__9].r * x[i__10].r - du[i__9].i * x[
+ i__10].i, q__6.i = du[i__9].r * x[i__10].i +
+ du[i__9].i * x[i__10].r;
+ q__1.r = q__2.r + q__6.r, q__1.i = q__2.i + q__6.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else if (lsame_(trans, "T")) {
+
+/* Compute B := B + A**T * X */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ if (*n == 1) {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * b_dim1 + 1;
+ i__4 = j * x_dim1 + 1;
+ q__2.r = d__[1].r * x[i__4].r - d__[1].i * x[i__4].i,
+ q__2.i = d__[1].r * x[i__4].i + d__[1].i * x[i__4]
+ .r;
+ q__1.r = b[i__3].r + q__2.r, q__1.i = b[i__3].i + q__2.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ } else {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * b_dim1 + 1;
+ i__4 = j * x_dim1 + 1;
+ q__3.r = d__[1].r * x[i__4].r - d__[1].i * x[i__4].i,
+ q__3.i = d__[1].r * x[i__4].i + d__[1].i * x[i__4]
+ .r;
+ q__2.r = b[i__3].r + q__3.r, q__2.i = b[i__3].i + q__3.i;
+ i__5 = j * x_dim1 + 2;
+ q__4.r = dl[1].r * x[i__5].r - dl[1].i * x[i__5].i,
+ q__4.i = dl[1].r * x[i__5].i + dl[1].i * x[i__5]
+ .r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ i__2 = *n + j * b_dim1;
+ i__3 = *n + j * b_dim1;
+ i__4 = *n - 1;
+ i__5 = *n - 1 + j * x_dim1;
+ q__3.r = du[i__4].r * x[i__5].r - du[i__4].i * x[i__5].i,
+ q__3.i = du[i__4].r * x[i__5].i + du[i__4].i * x[
+ i__5].r;
+ q__2.r = b[i__3].r + q__3.r, q__2.i = b[i__3].i + q__3.i;
+ i__6 = *n;
+ i__7 = *n + j * x_dim1;
+ q__4.r = d__[i__6].r * x[i__7].r - d__[i__6].i * x[i__7]
+ .i, q__4.i = d__[i__6].r * x[i__7].i + d__[i__6]
+ .i * x[i__7].r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ - 1;
+ i__6 = i__ - 1 + j * x_dim1;
+ q__4.r = du[i__5].r * x[i__6].r - du[i__5].i * x[i__6]
+ .i, q__4.i = du[i__5].r * x[i__6].i + du[i__5]
+ .i * x[i__6].r;
+ q__3.r = b[i__4].r + q__4.r, q__3.i = b[i__4].i +
+ q__4.i;
+ i__7 = i__;
+ i__8 = i__ + j * x_dim1;
+ q__5.r = d__[i__7].r * x[i__8].r - d__[i__7].i * x[
+ i__8].i, q__5.i = d__[i__7].r * x[i__8].i +
+ d__[i__7].i * x[i__8].r;
+ q__2.r = q__3.r + q__5.r, q__2.i = q__3.i + q__5.i;
+ i__9 = i__;
+ i__10 = i__ + 1 + j * x_dim1;
+ q__6.r = dl[i__9].r * x[i__10].r - dl[i__9].i * x[
+ i__10].i, q__6.i = dl[i__9].r * x[i__10].i +
+ dl[i__9].i * x[i__10].r;
+ q__1.r = q__2.r + q__6.r, q__1.i = q__2.i + q__6.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L70: */
+ }
+ }
+/* L80: */
+ }
+ } else if (lsame_(trans, "C")) {
+
+/* Compute B := B + A**H * X */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ if (*n == 1) {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * b_dim1 + 1;
+ r_cnjg(&q__3, &d__[1]);
+ i__4 = j * x_dim1 + 1;
+ q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i, q__2.i =
+ q__3.r * x[i__4].i + q__3.i * x[i__4].r;
+ q__1.r = b[i__3].r + q__2.r, q__1.i = b[i__3].i + q__2.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ } else {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * b_dim1 + 1;
+ r_cnjg(&q__4, &d__[1]);
+ i__4 = j * x_dim1 + 1;
+ q__3.r = q__4.r * x[i__4].r - q__4.i * x[i__4].i, q__3.i =
+ q__4.r * x[i__4].i + q__4.i * x[i__4].r;
+ q__2.r = b[i__3].r + q__3.r, q__2.i = b[i__3].i + q__3.i;
+ r_cnjg(&q__6, &dl[1]);
+ i__5 = j * x_dim1 + 2;
+ q__5.r = q__6.r * x[i__5].r - q__6.i * x[i__5].i, q__5.i =
+ q__6.r * x[i__5].i + q__6.i * x[i__5].r;
+ q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ i__2 = *n + j * b_dim1;
+ i__3 = *n + j * b_dim1;
+ r_cnjg(&q__4, &du[*n - 1]);
+ i__4 = *n - 1 + j * x_dim1;
+ q__3.r = q__4.r * x[i__4].r - q__4.i * x[i__4].i, q__3.i =
+ q__4.r * x[i__4].i + q__4.i * x[i__4].r;
+ q__2.r = b[i__3].r + q__3.r, q__2.i = b[i__3].i + q__3.i;
+ r_cnjg(&q__6, &d__[*n]);
+ i__5 = *n + j * x_dim1;
+ q__5.r = q__6.r * x[i__5].r - q__6.i * x[i__5].i, q__5.i =
+ q__6.r * x[i__5].i + q__6.i * x[i__5].r;
+ q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ r_cnjg(&q__5, &du[i__ - 1]);
+ i__5 = i__ - 1 + j * x_dim1;
+ q__4.r = q__5.r * x[i__5].r - q__5.i * x[i__5].i,
+ q__4.i = q__5.r * x[i__5].i + q__5.i * x[i__5]
+ .r;
+ q__3.r = b[i__4].r + q__4.r, q__3.i = b[i__4].i +
+ q__4.i;
+ r_cnjg(&q__7, &d__[i__]);
+ i__6 = i__ + j * x_dim1;
+ q__6.r = q__7.r * x[i__6].r - q__7.i * x[i__6].i,
+ q__6.i = q__7.r * x[i__6].i + q__7.i * x[i__6]
+ .r;
+ q__2.r = q__3.r + q__6.r, q__2.i = q__3.i + q__6.i;
+ r_cnjg(&q__9, &dl[i__]);
+ i__7 = i__ + 1 + j * x_dim1;
+ q__8.r = q__9.r * x[i__7].r - q__9.i * x[i__7].i,
+ q__8.i = q__9.r * x[i__7].i + q__9.i * x[i__7]
+ .r;
+ q__1.r = q__2.r + q__8.r, q__1.i = q__2.i + q__8.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L90: */
+ }
+ }
+/* L100: */
+ }
+ }
+ } else if (*alpha == -1.f) {
+ if (lsame_(trans, "N")) {
+
+/* Compute B := B - A*X */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ if (*n == 1) {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * b_dim1 + 1;
+ i__4 = j * x_dim1 + 1;
+ q__2.r = d__[1].r * x[i__4].r - d__[1].i * x[i__4].i,
+ q__2.i = d__[1].r * x[i__4].i + d__[1].i * x[i__4]
+ .r;
+ q__1.r = b[i__3].r - q__2.r, q__1.i = b[i__3].i - q__2.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ } else {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * b_dim1 + 1;
+ i__4 = j * x_dim1 + 1;
+ q__3.r = d__[1].r * x[i__4].r - d__[1].i * x[i__4].i,
+ q__3.i = d__[1].r * x[i__4].i + d__[1].i * x[i__4]
+ .r;
+ q__2.r = b[i__3].r - q__3.r, q__2.i = b[i__3].i - q__3.i;
+ i__5 = j * x_dim1 + 2;
+ q__4.r = du[1].r * x[i__5].r - du[1].i * x[i__5].i,
+ q__4.i = du[1].r * x[i__5].i + du[1].i * x[i__5]
+ .r;
+ q__1.r = q__2.r - q__4.r, q__1.i = q__2.i - q__4.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ i__2 = *n + j * b_dim1;
+ i__3 = *n + j * b_dim1;
+ i__4 = *n - 1;
+ i__5 = *n - 1 + j * x_dim1;
+ q__3.r = dl[i__4].r * x[i__5].r - dl[i__4].i * x[i__5].i,
+ q__3.i = dl[i__4].r * x[i__5].i + dl[i__4].i * x[
+ i__5].r;
+ q__2.r = b[i__3].r - q__3.r, q__2.i = b[i__3].i - q__3.i;
+ i__6 = *n;
+ i__7 = *n + j * x_dim1;
+ q__4.r = d__[i__6].r * x[i__7].r - d__[i__6].i * x[i__7]
+ .i, q__4.i = d__[i__6].r * x[i__7].i + d__[i__6]
+ .i * x[i__7].r;
+ q__1.r = q__2.r - q__4.r, q__1.i = q__2.i - q__4.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ - 1;
+ i__6 = i__ - 1 + j * x_dim1;
+ q__4.r = dl[i__5].r * x[i__6].r - dl[i__5].i * x[i__6]
+ .i, q__4.i = dl[i__5].r * x[i__6].i + dl[i__5]
+ .i * x[i__6].r;
+ q__3.r = b[i__4].r - q__4.r, q__3.i = b[i__4].i -
+ q__4.i;
+ i__7 = i__;
+ i__8 = i__ + j * x_dim1;
+ q__5.r = d__[i__7].r * x[i__8].r - d__[i__7].i * x[
+ i__8].i, q__5.i = d__[i__7].r * x[i__8].i +
+ d__[i__7].i * x[i__8].r;
+ q__2.r = q__3.r - q__5.r, q__2.i = q__3.i - q__5.i;
+ i__9 = i__;
+ i__10 = i__ + 1 + j * x_dim1;
+ q__6.r = du[i__9].r * x[i__10].r - du[i__9].i * x[
+ i__10].i, q__6.i = du[i__9].r * x[i__10].i +
+ du[i__9].i * x[i__10].r;
+ q__1.r = q__2.r - q__6.r, q__1.i = q__2.i - q__6.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+ } else if (lsame_(trans, "T")) {
+
+/* Compute B := B - A'*X */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ if (*n == 1) {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * b_dim1 + 1;
+ i__4 = j * x_dim1 + 1;
+ q__2.r = d__[1].r * x[i__4].r - d__[1].i * x[i__4].i,
+ q__2.i = d__[1].r * x[i__4].i + d__[1].i * x[i__4]
+ .r;
+ q__1.r = b[i__3].r - q__2.r, q__1.i = b[i__3].i - q__2.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ } else {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * b_dim1 + 1;
+ i__4 = j * x_dim1 + 1;
+ q__3.r = d__[1].r * x[i__4].r - d__[1].i * x[i__4].i,
+ q__3.i = d__[1].r * x[i__4].i + d__[1].i * x[i__4]
+ .r;
+ q__2.r = b[i__3].r - q__3.r, q__2.i = b[i__3].i - q__3.i;
+ i__5 = j * x_dim1 + 2;
+ q__4.r = dl[1].r * x[i__5].r - dl[1].i * x[i__5].i,
+ q__4.i = dl[1].r * x[i__5].i + dl[1].i * x[i__5]
+ .r;
+ q__1.r = q__2.r - q__4.r, q__1.i = q__2.i - q__4.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ i__2 = *n + j * b_dim1;
+ i__3 = *n + j * b_dim1;
+ i__4 = *n - 1;
+ i__5 = *n - 1 + j * x_dim1;
+ q__3.r = du[i__4].r * x[i__5].r - du[i__4].i * x[i__5].i,
+ q__3.i = du[i__4].r * x[i__5].i + du[i__4].i * x[
+ i__5].r;
+ q__2.r = b[i__3].r - q__3.r, q__2.i = b[i__3].i - q__3.i;
+ i__6 = *n;
+ i__7 = *n + j * x_dim1;
+ q__4.r = d__[i__6].r * x[i__7].r - d__[i__6].i * x[i__7]
+ .i, q__4.i = d__[i__6].r * x[i__7].i + d__[i__6]
+ .i * x[i__7].r;
+ q__1.r = q__2.r - q__4.r, q__1.i = q__2.i - q__4.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ - 1;
+ i__6 = i__ - 1 + j * x_dim1;
+ q__4.r = du[i__5].r * x[i__6].r - du[i__5].i * x[i__6]
+ .i, q__4.i = du[i__5].r * x[i__6].i + du[i__5]
+ .i * x[i__6].r;
+ q__3.r = b[i__4].r - q__4.r, q__3.i = b[i__4].i -
+ q__4.i;
+ i__7 = i__;
+ i__8 = i__ + j * x_dim1;
+ q__5.r = d__[i__7].r * x[i__8].r - d__[i__7].i * x[
+ i__8].i, q__5.i = d__[i__7].r * x[i__8].i +
+ d__[i__7].i * x[i__8].r;
+ q__2.r = q__3.r - q__5.r, q__2.i = q__3.i - q__5.i;
+ i__9 = i__;
+ i__10 = i__ + 1 + j * x_dim1;
+ q__6.r = dl[i__9].r * x[i__10].r - dl[i__9].i * x[
+ i__10].i, q__6.i = dl[i__9].r * x[i__10].i +
+ dl[i__9].i * x[i__10].r;
+ q__1.r = q__2.r - q__6.r, q__1.i = q__2.i - q__6.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L130: */
+ }
+ }
+/* L140: */
+ }
+ } else if (lsame_(trans, "C")) {
+
+/* Compute B := B - A'*X */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ if (*n == 1) {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * b_dim1 + 1;
+ r_cnjg(&q__3, &d__[1]);
+ i__4 = j * x_dim1 + 1;
+ q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i, q__2.i =
+ q__3.r * x[i__4].i + q__3.i * x[i__4].r;
+ q__1.r = b[i__3].r - q__2.r, q__1.i = b[i__3].i - q__2.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ } else {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * b_dim1 + 1;
+ r_cnjg(&q__4, &d__[1]);
+ i__4 = j * x_dim1 + 1;
+ q__3.r = q__4.r * x[i__4].r - q__4.i * x[i__4].i, q__3.i =
+ q__4.r * x[i__4].i + q__4.i * x[i__4].r;
+ q__2.r = b[i__3].r - q__3.r, q__2.i = b[i__3].i - q__3.i;
+ r_cnjg(&q__6, &dl[1]);
+ i__5 = j * x_dim1 + 2;
+ q__5.r = q__6.r * x[i__5].r - q__6.i * x[i__5].i, q__5.i =
+ q__6.r * x[i__5].i + q__6.i * x[i__5].r;
+ q__1.r = q__2.r - q__5.r, q__1.i = q__2.i - q__5.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ i__2 = *n + j * b_dim1;
+ i__3 = *n + j * b_dim1;
+ r_cnjg(&q__4, &du[*n - 1]);
+ i__4 = *n - 1 + j * x_dim1;
+ q__3.r = q__4.r * x[i__4].r - q__4.i * x[i__4].i, q__3.i =
+ q__4.r * x[i__4].i + q__4.i * x[i__4].r;
+ q__2.r = b[i__3].r - q__3.r, q__2.i = b[i__3].i - q__3.i;
+ r_cnjg(&q__6, &d__[*n]);
+ i__5 = *n + j * x_dim1;
+ q__5.r = q__6.r * x[i__5].r - q__6.i * x[i__5].i, q__5.i =
+ q__6.r * x[i__5].i + q__6.i * x[i__5].r;
+ q__1.r = q__2.r - q__5.r, q__1.i = q__2.i - q__5.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ r_cnjg(&q__5, &du[i__ - 1]);
+ i__5 = i__ - 1 + j * x_dim1;
+ q__4.r = q__5.r * x[i__5].r - q__5.i * x[i__5].i,
+ q__4.i = q__5.r * x[i__5].i + q__5.i * x[i__5]
+ .r;
+ q__3.r = b[i__4].r - q__4.r, q__3.i = b[i__4].i -
+ q__4.i;
+ r_cnjg(&q__7, &d__[i__]);
+ i__6 = i__ + j * x_dim1;
+ q__6.r = q__7.r * x[i__6].r - q__7.i * x[i__6].i,
+ q__6.i = q__7.r * x[i__6].i + q__7.i * x[i__6]
+ .r;
+ q__2.r = q__3.r - q__6.r, q__2.i = q__3.i - q__6.i;
+ r_cnjg(&q__9, &dl[i__]);
+ i__7 = i__ + 1 + j * x_dim1;
+ q__8.r = q__9.r * x[i__7].r - q__9.i * x[i__7].i,
+ q__8.i = q__9.r * x[i__7].i + q__9.i * x[i__7]
+ .r;
+ q__1.r = q__2.r - q__8.r, q__1.i = q__2.i - q__8.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L150: */
+ }
+ }
+/* L160: */
+ }
+ }
+ }
+ return 0;
+
+/* End of CLAGTM */
+
+} /* clagtm_ */
diff --git a/contrib/libs/clapack/clahef.c b/contrib/libs/clapack/clahef.c
new file mode 100644
index 0000000000..2a9a9219dd
--- /dev/null
+++ b/contrib/libs/clapack/clahef.c
@@ -0,0 +1,933 @@
+/* clahef.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int clahef_(char *uplo, integer *n, integer *nb, integer *kb,
+ complex *a, integer *lda, integer *ipiv, complex *w, integer *ldw,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_imag(complex *);
+ void r_cnjg(complex *, complex *), c_div(complex *, complex *, complex *);
+
+ /* Local variables */
+ integer j, k;
+ real t, r1;
+ complex d11, d21, d22;
+ integer jb, jj, kk, jp, kp, kw, kkw, imax, jmax;
+ real alpha;
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+, complex *, integer *, complex *, integer *, complex *, complex *
+, integer *), ccopy_(integer *, complex *, integer *,
+ complex *, integer *), cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ integer kstep;
+ real absakk;
+ extern /* Subroutine */ int clacgv_(integer *, complex *, integer *);
+ extern integer icamax_(integer *, complex *, integer *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *);
+ real colmax, rowmax;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAHEF computes a partial factorization of a complex Hermitian */
+/* matrix A using the Bunch-Kaufman diagonal pivoting method. The */
+/* partial factorization has the form: */
+
+/* A = ( I U12 ) ( A11 0 ) ( I 0 ) if UPLO = 'U', or: */
+/* ( 0 U22 ) ( 0 D ) ( U12' U22' ) */
+
+/* A = ( L11 0 ) ( D 0 ) ( L11' L21' ) if UPLO = 'L' */
+/* ( L21 I ) ( 0 A22 ) ( 0 I ) */
+
+/* where the order of D is at most NB. The actual order is returned in */
+/* the argument KB, and is either NB or NB-1, or N if N <= NB. */
+/* Note that U' denotes the conjugate transpose of U. */
+
+/* CLAHEF is an auxiliary routine called by CHETRF. It uses blocked code */
+/* (calling Level 3 BLAS) to update the submatrix A11 (if UPLO = 'U') or */
+/* A22 (if UPLO = 'L'). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* Hermitian matrix A is stored: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NB (input) INTEGER */
+/* The maximum number of columns of the matrix A that should be */
+/* factored. NB should be at least 2 to allow for 2-by-2 pivot */
+/* blocks. */
+
+/* KB (output) INTEGER */
+/* The number of columns of A that were actually factored. */
+/* KB is either NB-1 or NB, or N if N <= NB. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* n-by-n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n-by-n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+/* On exit, A contains details of the partial factorization. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D. */
+/* If UPLO = 'U', only the last KB elements of IPIV are set; */
+/* if UPLO = 'L', only the first KB elements are set. */
+
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* W (workspace) COMPLEX array, dimension (LDW,NB) */
+
+/* LDW (input) INTEGER */
+/* The leading dimension of the array W. LDW >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* > 0: if INFO = k, D(k,k) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ w_dim1 = *ldw;
+ w_offset = 1 + w_dim1;
+ w -= w_offset;
+
+ /* Function Body */
+ *info = 0;
+
+/* Initialize ALPHA for use in choosing pivot block size. */
+
+ alpha = (sqrt(17.f) + 1.f) / 8.f;
+
+ if (lsame_(uplo, "U")) {
+
+/* Factorize the trailing columns of A using the upper triangle */
+/* of A and working backwards, and compute the matrix W = U12*D */
+/* for use in updating A11 (note that conjg(W) is actually stored) */
+
+/* K is the main loop index, decreasing from N in steps of 1 or 2 */
+
+/* KW is the column of W which corresponds to column K of A */
+
+ k = *n;
+L10:
+ kw = *nb + k - *n;
+
+/* Exit from loop */
+
+ if (k <= *n - *nb + 1 && *nb < *n || k < 1) {
+ goto L30;
+ }
+
+/* Copy column K of A to column KW of W and update it */
+
+ i__1 = k - 1;
+ ccopy_(&i__1, &a[k * a_dim1 + 1], &c__1, &w[kw * w_dim1 + 1], &c__1);
+ i__1 = k + kw * w_dim1;
+ i__2 = k + k * a_dim1;
+ r__1 = a[i__2].r;
+ w[i__1].r = r__1, w[i__1].i = 0.f;
+ if (k < *n) {
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &k, &i__1, &q__1, &a[(k + 1) * a_dim1 + 1],
+ lda, &w[k + (kw + 1) * w_dim1], ldw, &c_b1, &w[kw *
+ w_dim1 + 1], &c__1);
+ i__1 = k + kw * w_dim1;
+ i__2 = k + kw * w_dim1;
+ r__1 = w[i__2].r;
+ w[i__1].r = r__1, w[i__1].i = 0.f;
+ }
+
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ i__1 = k + kw * w_dim1;
+ absakk = (r__1 = w[i__1].r, dabs(r__1));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ imax = icamax_(&i__1, &w[kw * w_dim1 + 1], &c__1);
+ i__1 = imax + kw * w_dim1;
+ colmax = (r__1 = w[i__1].r, dabs(r__1)) + (r__2 = r_imag(&w[imax
+ + kw * w_dim1]), dabs(r__2));
+ } else {
+ colmax = 0.f;
+ }
+
+ if (dmax(absakk,colmax) == 0.f) {
+
+/* Column K is zero: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ r__1 = a[i__2].r;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* Copy column IMAX to column KW-1 of W and update it */
+
+ i__1 = imax - 1;
+ ccopy_(&i__1, &a[imax * a_dim1 + 1], &c__1, &w[(kw - 1) *
+ w_dim1 + 1], &c__1);
+ i__1 = imax + (kw - 1) * w_dim1;
+ i__2 = imax + imax * a_dim1;
+ r__1 = a[i__2].r;
+ w[i__1].r = r__1, w[i__1].i = 0.f;
+ i__1 = k - imax;
+ ccopy_(&i__1, &a[imax + (imax + 1) * a_dim1], lda, &w[imax +
+ 1 + (kw - 1) * w_dim1], &c__1);
+ i__1 = k - imax;
+ clacgv_(&i__1, &w[imax + 1 + (kw - 1) * w_dim1], &c__1);
+ if (k < *n) {
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &k, &i__1, &q__1, &a[(k + 1) *
+ a_dim1 + 1], lda, &w[imax + (kw + 1) * w_dim1],
+ ldw, &c_b1, &w[(kw - 1) * w_dim1 + 1], &c__1);
+ i__1 = imax + (kw - 1) * w_dim1;
+ i__2 = imax + (kw - 1) * w_dim1;
+ r__1 = w[i__2].r;
+ w[i__1].r = r__1, w[i__1].i = 0.f;
+ }
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ i__1 = k - imax;
+ jmax = imax + icamax_(&i__1, &w[imax + 1 + (kw - 1) * w_dim1],
+ &c__1);
+ i__1 = jmax + (kw - 1) * w_dim1;
+ rowmax = (r__1 = w[i__1].r, dabs(r__1)) + (r__2 = r_imag(&w[
+ jmax + (kw - 1) * w_dim1]), dabs(r__2));
+ if (imax > 1) {
+ i__1 = imax - 1;
+ jmax = icamax_(&i__1, &w[(kw - 1) * w_dim1 + 1], &c__1);
+/* Computing MAX */
+ i__1 = jmax + (kw - 1) * w_dim1;
+ r__3 = rowmax, r__4 = (r__1 = w[i__1].r, dabs(r__1)) + (
+ r__2 = r_imag(&w[jmax + (kw - 1) * w_dim1]), dabs(
+ r__2));
+ rowmax = dmax(r__3,r__4);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else /* if(complicated condition) */ {
+ i__1 = imax + (kw - 1) * w_dim1;
+ if ((r__1 = w[i__1].r, dabs(r__1)) >= alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+
+/* copy column KW-1 of W to column KW */
+
+ ccopy_(&k, &w[(kw - 1) * w_dim1 + 1], &c__1, &w[kw *
+ w_dim1 + 1], &c__1);
+ } else {
+
+/* interchange rows and columns K-1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+ }
+
+ kk = k - kstep + 1;
+ kkw = *nb + kk - *n;
+
+/* Updated column KP is already stored in column KKW of W */
+
+ if (kp != kk) {
+
+/* Copy non-updated column KK to column KP */
+
+ i__1 = kp + kp * a_dim1;
+ i__2 = kk + kk * a_dim1;
+ r__1 = a[i__2].r;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+ i__1 = kk - 1 - kp;
+ ccopy_(&i__1, &a[kp + 1 + kk * a_dim1], &c__1, &a[kp + (kp +
+ 1) * a_dim1], lda);
+ i__1 = kk - 1 - kp;
+ clacgv_(&i__1, &a[kp + (kp + 1) * a_dim1], lda);
+ i__1 = kp - 1;
+ ccopy_(&i__1, &a[kk * a_dim1 + 1], &c__1, &a[kp * a_dim1 + 1],
+ &c__1);
+
+/* Interchange rows KK and KP in last KK columns of A and W */
+
+ if (kk < *n) {
+ i__1 = *n - kk;
+ cswap_(&i__1, &a[kk + (kk + 1) * a_dim1], lda, &a[kp + (
+ kk + 1) * a_dim1], lda);
+ }
+ i__1 = *n - kk + 1;
+ cswap_(&i__1, &w[kk + kkw * w_dim1], ldw, &w[kp + kkw *
+ w_dim1], ldw);
+ }
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column KW of W now holds */
+
+/* W(k) = U(k)*D(k) */
+
+/* where U(k) is the k-th column of U */
+
+/* Store U(k) in column k of A */
+
+ ccopy_(&k, &w[kw * w_dim1 + 1], &c__1, &a[k * a_dim1 + 1], &
+ c__1);
+ i__1 = k + k * a_dim1;
+ r1 = 1.f / a[i__1].r;
+ i__1 = k - 1;
+ csscal_(&i__1, &r1, &a[k * a_dim1 + 1], &c__1);
+
+/* Conjugate W(k) */
+
+ i__1 = k - 1;
+ clacgv_(&i__1, &w[kw * w_dim1 + 1], &c__1);
+ } else {
+
+/* 2-by-2 pivot block D(k): columns KW and KW-1 of W now */
+/* hold */
+
+/* ( W(k-1) W(k) ) = ( U(k-1) U(k) )*D(k) */
+
+/* where U(k) and U(k-1) are the k-th and (k-1)-th columns */
+/* of U */
+
+ if (k > 2) {
+
+/* Store U(k) and U(k-1) in columns k and k-1 of A */
+
+ i__1 = k - 1 + kw * w_dim1;
+ d21.r = w[i__1].r, d21.i = w[i__1].i;
+ r_cnjg(&q__2, &d21);
+ c_div(&q__1, &w[k + kw * w_dim1], &q__2);
+ d11.r = q__1.r, d11.i = q__1.i;
+ c_div(&q__1, &w[k - 1 + (kw - 1) * w_dim1], &d21);
+ d22.r = q__1.r, d22.i = q__1.i;
+ q__1.r = d11.r * d22.r - d11.i * d22.i, q__1.i = d11.r *
+ d22.i + d11.i * d22.r;
+ t = 1.f / (q__1.r - 1.f);
+ q__2.r = t, q__2.i = 0.f;
+ c_div(&q__1, &q__2, &d21);
+ d21.r = q__1.r, d21.i = q__1.i;
+ i__1 = k - 2;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + (k - 1) * a_dim1;
+ i__3 = j + (kw - 1) * w_dim1;
+ q__3.r = d11.r * w[i__3].r - d11.i * w[i__3].i,
+ q__3.i = d11.r * w[i__3].i + d11.i * w[i__3]
+ .r;
+ i__4 = j + kw * w_dim1;
+ q__2.r = q__3.r - w[i__4].r, q__2.i = q__3.i - w[i__4]
+ .i;
+ q__1.r = d21.r * q__2.r - d21.i * q__2.i, q__1.i =
+ d21.r * q__2.i + d21.i * q__2.r;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+ i__2 = j + k * a_dim1;
+ r_cnjg(&q__2, &d21);
+ i__3 = j + kw * w_dim1;
+ q__4.r = d22.r * w[i__3].r - d22.i * w[i__3].i,
+ q__4.i = d22.r * w[i__3].i + d22.i * w[i__3]
+ .r;
+ i__4 = j + (kw - 1) * w_dim1;
+ q__3.r = q__4.r - w[i__4].r, q__3.i = q__4.i - w[i__4]
+ .i;
+ q__1.r = q__2.r * q__3.r - q__2.i * q__3.i, q__1.i =
+ q__2.r * q__3.i + q__2.i * q__3.r;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+/* L20: */
+ }
+ }
+
+/* Copy D(k) to A */
+
+ i__1 = k - 1 + (k - 1) * a_dim1;
+ i__2 = k - 1 + (kw - 1) * w_dim1;
+ a[i__1].r = w[i__2].r, a[i__1].i = w[i__2].i;
+ i__1 = k - 1 + k * a_dim1;
+ i__2 = k - 1 + kw * w_dim1;
+ a[i__1].r = w[i__2].r, a[i__1].i = w[i__2].i;
+ i__1 = k + k * a_dim1;
+ i__2 = k + kw * w_dim1;
+ a[i__1].r = w[i__2].r, a[i__1].i = w[i__2].i;
+
+/* Conjugate W(k) and W(k-1) */
+
+ i__1 = k - 1;
+ clacgv_(&i__1, &w[kw * w_dim1 + 1], &c__1);
+ i__1 = k - 2;
+ clacgv_(&i__1, &w[(kw - 1) * w_dim1 + 1], &c__1);
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k - 1] = -kp;
+ }
+
+/* Decrease K and return to the start of the main loop */
+
+ k -= kstep;
+ goto L10;
+
+L30:
+
+/* Update the upper triangle of A11 (= A(1:k,1:k)) as */
+
+/* A11 := A11 - U12*D*U12' = A11 - U12*W' */
+
+/* computing blocks of NB columns at a time (note that conjg(W) is */
+/* actually stored) */
+
+ i__1 = -(*nb);
+ for (j = (k - 1) / *nb * *nb + 1; i__1 < 0 ? j >= 1 : j <= 1; j +=
+ i__1) {
+/* Computing MIN */
+ i__2 = *nb, i__3 = k - j + 1;
+ jb = min(i__2,i__3);
+
+/* Update the upper triangle of the diagonal block */
+
+ i__2 = j + jb - 1;
+ for (jj = j; jj <= i__2; ++jj) {
+ i__3 = jj + jj * a_dim1;
+ i__4 = jj + jj * a_dim1;
+ r__1 = a[i__4].r;
+ a[i__3].r = r__1, a[i__3].i = 0.f;
+ i__3 = jj - j + 1;
+ i__4 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__3, &i__4, &q__1, &a[j + (k + 1) *
+ a_dim1], lda, &w[jj + (kw + 1) * w_dim1], ldw, &c_b1,
+ &a[j + jj * a_dim1], &c__1);
+ i__3 = jj + jj * a_dim1;
+ i__4 = jj + jj * a_dim1;
+ r__1 = a[i__4].r;
+ a[i__3].r = r__1, a[i__3].i = 0.f;
+/* L40: */
+ }
+
+/* Update the rectangular superdiagonal block */
+
+ i__2 = j - 1;
+ i__3 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "Transpose", &i__2, &jb, &i__3, &q__1, &a[(
+ k + 1) * a_dim1 + 1], lda, &w[j + (kw + 1) * w_dim1], ldw,
+ &c_b1, &a[j * a_dim1 + 1], lda);
+/* L50: */
+ }
+
+/* Put U12 in standard form by partially undoing the interchanges */
+/* in columns k+1:n */
+
+ j = k + 1;
+L60:
+ jj = j;
+ jp = ipiv[j];
+ if (jp < 0) {
+ jp = -jp;
+ ++j;
+ }
+ ++j;
+ if (jp != jj && j <= *n) {
+ i__1 = *n - j + 1;
+ cswap_(&i__1, &a[jp + j * a_dim1], lda, &a[jj + j * a_dim1], lda);
+ }
+ if (j <= *n) {
+ goto L60;
+ }
+
+/* Set KB to the number of columns factorized */
+
+ *kb = *n - k;
+
+ } else {
+
+/* Factorize the leading columns of A using the lower triangle */
+/* of A and working forwards, and compute the matrix W = L21*D */
+/* for use in updating A22 (note that conjg(W) is actually stored) */
+
+/* K is the main loop index, increasing from 1 in steps of 1 or 2 */
+
+ k = 1;
+L70:
+
+/* Exit from loop */
+
+ if (k >= *nb && *nb < *n || k > *n) {
+ goto L90;
+ }
+
+/* Copy column K of A to column K of W and update it */
+
+ i__1 = k + k * w_dim1;
+ i__2 = k + k * a_dim1;
+ r__1 = a[i__2].r;
+ w[i__1].r = r__1, w[i__1].i = 0.f;
+ if (k < *n) {
+ i__1 = *n - k;
+ ccopy_(&i__1, &a[k + 1 + k * a_dim1], &c__1, &w[k + 1 + k *
+ w_dim1], &c__1);
+ }
+ i__1 = *n - k + 1;
+ i__2 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__1, &i__2, &q__1, &a[k + a_dim1], lda, &w[k
+ + w_dim1], ldw, &c_b1, &w[k + k * w_dim1], &c__1);
+ i__1 = k + k * w_dim1;
+ i__2 = k + k * w_dim1;
+ r__1 = w[i__2].r;
+ w[i__1].r = r__1, w[i__1].i = 0.f;
+
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ i__1 = k + k * w_dim1;
+ absakk = (r__1 = w[i__1].r, dabs(r__1));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ imax = k + icamax_(&i__1, &w[k + 1 + k * w_dim1], &c__1);
+ i__1 = imax + k * w_dim1;
+ colmax = (r__1 = w[i__1].r, dabs(r__1)) + (r__2 = r_imag(&w[imax
+ + k * w_dim1]), dabs(r__2));
+ } else {
+ colmax = 0.f;
+ }
+
+ if (dmax(absakk,colmax) == 0.f) {
+
+/* Column K is zero: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ r__1 = a[i__2].r;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* Copy column IMAX to column K+1 of W and update it */
+
+ i__1 = imax - k;
+ ccopy_(&i__1, &a[imax + k * a_dim1], lda, &w[k + (k + 1) *
+ w_dim1], &c__1);
+ i__1 = imax - k;
+ clacgv_(&i__1, &w[k + (k + 1) * w_dim1], &c__1);
+ i__1 = imax + (k + 1) * w_dim1;
+ i__2 = imax + imax * a_dim1;
+ r__1 = a[i__2].r;
+ w[i__1].r = r__1, w[i__1].i = 0.f;
+ if (imax < *n) {
+ i__1 = *n - imax;
+ ccopy_(&i__1, &a[imax + 1 + imax * a_dim1], &c__1, &w[
+ imax + 1 + (k + 1) * w_dim1], &c__1);
+ }
+ i__1 = *n - k + 1;
+ i__2 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__1, &i__2, &q__1, &a[k + a_dim1],
+ lda, &w[imax + w_dim1], ldw, &c_b1, &w[k + (k + 1) *
+ w_dim1], &c__1);
+ i__1 = imax + (k + 1) * w_dim1;
+ i__2 = imax + (k + 1) * w_dim1;
+ r__1 = w[i__2].r;
+ w[i__1].r = r__1, w[i__1].i = 0.f;
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ i__1 = imax - k;
+ jmax = k - 1 + icamax_(&i__1, &w[k + (k + 1) * w_dim1], &c__1)
+ ;
+ i__1 = jmax + (k + 1) * w_dim1;
+ rowmax = (r__1 = w[i__1].r, dabs(r__1)) + (r__2 = r_imag(&w[
+ jmax + (k + 1) * w_dim1]), dabs(r__2));
+ if (imax < *n) {
+ i__1 = *n - imax;
+ jmax = imax + icamax_(&i__1, &w[imax + 1 + (k + 1) *
+ w_dim1], &c__1);
+/* Computing MAX */
+ i__1 = jmax + (k + 1) * w_dim1;
+ r__3 = rowmax, r__4 = (r__1 = w[i__1].r, dabs(r__1)) + (
+ r__2 = r_imag(&w[jmax + (k + 1) * w_dim1]), dabs(
+ r__2));
+ rowmax = dmax(r__3,r__4);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else /* if(complicated condition) */ {
+ i__1 = imax + (k + 1) * w_dim1;
+ if ((r__1 = w[i__1].r, dabs(r__1)) >= alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+
+/* copy column K+1 of W to column K */
+
+ i__1 = *n - k + 1;
+ ccopy_(&i__1, &w[k + (k + 1) * w_dim1], &c__1, &w[k +
+ k * w_dim1], &c__1);
+ } else {
+
+/* interchange rows and columns K+1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+ }
+
+ kk = k + kstep - 1;
+
+/* Updated column KP is already stored in column KK of W */
+
+ if (kp != kk) {
+
+/* Copy non-updated column KK to column KP */
+
+ i__1 = kp + kp * a_dim1;
+ i__2 = kk + kk * a_dim1;
+ r__1 = a[i__2].r;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+ i__1 = kp - kk - 1;
+ ccopy_(&i__1, &a[kk + 1 + kk * a_dim1], &c__1, &a[kp + (kk +
+ 1) * a_dim1], lda);
+ i__1 = kp - kk - 1;
+ clacgv_(&i__1, &a[kp + (kk + 1) * a_dim1], lda);
+ if (kp < *n) {
+ i__1 = *n - kp;
+ ccopy_(&i__1, &a[kp + 1 + kk * a_dim1], &c__1, &a[kp + 1
+ + kp * a_dim1], &c__1);
+ }
+
+/* Interchange rows KK and KP in first KK columns of A and W */
+
+ i__1 = kk - 1;
+ cswap_(&i__1, &a[kk + a_dim1], lda, &a[kp + a_dim1], lda);
+ cswap_(&kk, &w[kk + w_dim1], ldw, &w[kp + w_dim1], ldw);
+ }
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k of W now holds */
+
+/* W(k) = L(k)*D(k) */
+
+/* where L(k) is the k-th column of L */
+
+/* Store L(k) in column k of A */
+
+ i__1 = *n - k + 1;
+ ccopy_(&i__1, &w[k + k * w_dim1], &c__1, &a[k + k * a_dim1], &
+ c__1);
+ if (k < *n) {
+ i__1 = k + k * a_dim1;
+ r1 = 1.f / a[i__1].r;
+ i__1 = *n - k;
+ csscal_(&i__1, &r1, &a[k + 1 + k * a_dim1], &c__1);
+
+/* Conjugate W(k) */
+
+ i__1 = *n - k;
+ clacgv_(&i__1, &w[k + 1 + k * w_dim1], &c__1);
+ }
+ } else {
+
+/* 2-by-2 pivot block D(k): columns k and k+1 of W now hold */
+
+/* ( W(k) W(k+1) ) = ( L(k) L(k+1) )*D(k) */
+
+/* where L(k) and L(k+1) are the k-th and (k+1)-th columns */
+/* of L */
+
+ if (k < *n - 1) {
+
+/* Store L(k) and L(k+1) in columns k and k+1 of A */
+
+ i__1 = k + 1 + k * w_dim1;
+ d21.r = w[i__1].r, d21.i = w[i__1].i;
+ c_div(&q__1, &w[k + 1 + (k + 1) * w_dim1], &d21);
+ d11.r = q__1.r, d11.i = q__1.i;
+ r_cnjg(&q__2, &d21);
+ c_div(&q__1, &w[k + k * w_dim1], &q__2);
+ d22.r = q__1.r, d22.i = q__1.i;
+ q__1.r = d11.r * d22.r - d11.i * d22.i, q__1.i = d11.r *
+ d22.i + d11.i * d22.r;
+ t = 1.f / (q__1.r - 1.f);
+ q__2.r = t, q__2.i = 0.f;
+ c_div(&q__1, &q__2, &d21);
+ d21.r = q__1.r, d21.i = q__1.i;
+ i__1 = *n;
+ for (j = k + 2; j <= i__1; ++j) {
+ i__2 = j + k * a_dim1;
+ r_cnjg(&q__2, &d21);
+ i__3 = j + k * w_dim1;
+ q__4.r = d11.r * w[i__3].r - d11.i * w[i__3].i,
+ q__4.i = d11.r * w[i__3].i + d11.i * w[i__3]
+ .r;
+ i__4 = j + (k + 1) * w_dim1;
+ q__3.r = q__4.r - w[i__4].r, q__3.i = q__4.i - w[i__4]
+ .i;
+ q__1.r = q__2.r * q__3.r - q__2.i * q__3.i, q__1.i =
+ q__2.r * q__3.i + q__2.i * q__3.r;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+ i__2 = j + (k + 1) * a_dim1;
+ i__3 = j + (k + 1) * w_dim1;
+ q__3.r = d22.r * w[i__3].r - d22.i * w[i__3].i,
+ q__3.i = d22.r * w[i__3].i + d22.i * w[i__3]
+ .r;
+ i__4 = j + k * w_dim1;
+ q__2.r = q__3.r - w[i__4].r, q__2.i = q__3.i - w[i__4]
+ .i;
+ q__1.r = d21.r * q__2.r - d21.i * q__2.i, q__1.i =
+ d21.r * q__2.i + d21.i * q__2.r;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+/* L80: */
+ }
+ }
+
+/* Copy D(k) to A */
+
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * w_dim1;
+ a[i__1].r = w[i__2].r, a[i__1].i = w[i__2].i;
+ i__1 = k + 1 + k * a_dim1;
+ i__2 = k + 1 + k * w_dim1;
+ a[i__1].r = w[i__2].r, a[i__1].i = w[i__2].i;
+ i__1 = k + 1 + (k + 1) * a_dim1;
+ i__2 = k + 1 + (k + 1) * w_dim1;
+ a[i__1].r = w[i__2].r, a[i__1].i = w[i__2].i;
+
+/* Conjugate W(k) and W(k+1) */
+
+ i__1 = *n - k;
+ clacgv_(&i__1, &w[k + 1 + k * w_dim1], &c__1);
+ i__1 = *n - k - 1;
+ clacgv_(&i__1, &w[k + 2 + (k + 1) * w_dim1], &c__1);
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k + 1] = -kp;
+ }
+
+/* Increase K and return to the start of the main loop */
+
+ k += kstep;
+ goto L70;
+
+L90:
+
+/* Update the lower triangle of A22 (= A(k:n,k:n)) as */
+
+/* A22 := A22 - L21*D*L21' = A22 - L21*W' */
+
+/* computing blocks of NB columns at a time (note that conjg(W) is */
+/* actually stored) */
+
+ i__1 = *n;
+ i__2 = *nb;
+ for (j = k; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = *nb, i__4 = *n - j + 1;
+ jb = min(i__3,i__4);
+
+/* Update the lower triangle of the diagonal block */
+
+ i__3 = j + jb - 1;
+ for (jj = j; jj <= i__3; ++jj) {
+ i__4 = jj + jj * a_dim1;
+ i__5 = jj + jj * a_dim1;
+ r__1 = a[i__5].r;
+ a[i__4].r = r__1, a[i__4].i = 0.f;
+ i__4 = j + jb - jj;
+ i__5 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__4, &i__5, &q__1, &a[jj + a_dim1],
+ lda, &w[jj + w_dim1], ldw, &c_b1, &a[jj + jj * a_dim1]
+, &c__1);
+ i__4 = jj + jj * a_dim1;
+ i__5 = jj + jj * a_dim1;
+ r__1 = a[i__5].r;
+ a[i__4].r = r__1, a[i__4].i = 0.f;
+/* L100: */
+ }
+
+/* Update the rectangular subdiagonal block */
+
+ if (j + jb <= *n) {
+ i__3 = *n - j - jb + 1;
+ i__4 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "Transpose", &i__3, &jb, &i__4, &q__1,
+ &a[j + jb + a_dim1], lda, &w[j + w_dim1], ldw, &c_b1,
+ &a[j + jb + j * a_dim1], lda);
+ }
+/* L110: */
+ }
+
+/* Put L21 in standard form by partially undoing the interchanges */
+/* in columns 1:k-1 */
+
+ j = k - 1;
+L120:
+ jj = j;
+ jp = ipiv[j];
+ if (jp < 0) {
+ jp = -jp;
+ --j;
+ }
+ --j;
+ if (jp != jj && j >= 1) {
+ cswap_(&j, &a[jp + a_dim1], lda, &a[jj + a_dim1], lda);
+ }
+ if (j >= 1) {
+ goto L120;
+ }
+
+/* Set KB to the number of columns factorized */
+
+ *kb = k - 1;
+
+ }
+ return 0;
+
+/* End of CLAHEF */
+
+} /* clahef_ */
diff --git a/contrib/libs/clapack/clahqr.c b/contrib/libs/clapack/clahqr.c
new file mode 100644
index 0000000000..98674fc324
--- /dev/null
+++ b/contrib/libs/clapack/clahqr.c
@@ -0,0 +1,754 @@
+/* clahqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__2 = 2;
+
+/* Subroutine */ int clahqr_(logical *wantt, logical *wantz, integer *n,
+ integer *ilo, integer *ihi, complex *h__, integer *ldh, complex *w,
+ integer *iloz, integer *ihiz, complex *z__, integer *ldz, integer *
+ info)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4;
+ real r__1, r__2, r__3, r__4, r__5, r__6;
+ complex q__1, q__2, q__3, q__4, q__5, q__6, q__7;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+ void r_cnjg(complex *, complex *);
+ double c_abs(complex *);
+ void c_sqrt(complex *, complex *), pow_ci(complex *, complex *, integer *)
+ ;
+
+ /* Local variables */
+ integer i__, j, k, l, m;
+ real s;
+ complex t, u, v[2], x, y;
+ integer i1, i2;
+ complex t1;
+ real t2;
+ complex v2;
+ real aa, ab, ba, bb, h10;
+ complex h11;
+ real h21;
+ complex h22, sc;
+ integer nh, nz;
+ real sx;
+ integer jhi;
+ complex h11s;
+ integer jlo, its;
+ real ulp;
+ complex sum;
+ real tst;
+ complex temp;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *), ccopy_(integer *, complex *, integer *, complex *,
+ integer *);
+ real rtemp;
+ extern /* Subroutine */ int slabad_(real *, real *), clarfg_(integer *,
+ complex *, complex *, integer *, complex *);
+ extern /* Complex */ VOID cladiv_(complex *, complex *, complex *);
+ extern doublereal slamch_(char *);
+ real safmin, safmax, smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAHQR is an auxiliary routine called by CHSEQR to update the */
+/* eigenvalues and Schur decomposition already computed by CHSEQR, by */
+/* dealing with the Hessenberg submatrix in rows and columns ILO to */
+/* IHI. */
+
+/* Arguments */
+/* ========= */
+
+/* WANTT (input) LOGICAL */
+/* = .TRUE. : the full Schur form T is required; */
+/* = .FALSE.: only eigenvalues are required. */
+
+/* WANTZ (input) LOGICAL */
+/* = .TRUE. : the matrix of Schur vectors Z is required; */
+/* = .FALSE.: Schur vectors are not required. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* It is assumed that H is already upper triangular in rows and */
+/* columns IHI+1:N, and that H(ILO,ILO-1) = 0 (unless ILO = 1). */
+/* CLAHQR works primarily with the Hessenberg submatrix in rows */
+/* and columns ILO to IHI, but applies transformations to all of */
+/* H if WANTT is .TRUE.. */
+/* 1 <= ILO <= max(1,IHI); IHI <= N. */
+
+/* H (input/output) COMPLEX array, dimension (LDH,N) */
+/* On entry, the upper Hessenberg matrix H. */
+/* On exit, if INFO is zero and if WANTT is .TRUE., then H */
+/* is upper triangular in rows and columns ILO:IHI. If INFO */
+/* is zero and if WANTT is .FALSE., then the contents of H */
+/* are unspecified on exit. The output state of H in case */
+/* INF is positive is below under the description of INFO. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH >= max(1,N). */
+
+/* W (output) COMPLEX array, dimension (N) */
+/* The computed eigenvalues ILO to IHI are stored in the */
+/* corresponding elements of W. If WANTT is .TRUE., the */
+/* eigenvalues are stored in the same order as on the diagonal */
+/* of the Schur form returned in H, with W(i) = H(i,i). */
+
+/* ILOZ (input) INTEGER */
+/* IHIZ (input) INTEGER */
+/* Specify the rows of Z to which transformations must be */
+/* applied if WANTZ is .TRUE.. */
+/* 1 <= ILOZ <= ILO; IHI <= IHIZ <= N. */
+
+/* Z (input/output) COMPLEX array, dimension (LDZ,N) */
+/* If WANTZ is .TRUE., on entry Z must contain the current */
+/* matrix Z of transformations accumulated by CHSEQR, and on */
+/* exit Z has been updated; transformations are applied only to */
+/* the submatrix Z(ILOZ:IHIZ,ILO:IHI). */
+/* If WANTZ is .FALSE., Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* .GT. 0: if INFO = i, CLAHQR failed to compute all the */
+/* eigenvalues ILO to IHI in a total of 30 iterations */
+/* per eigenvalue; elements i+1:ihi of W contain */
+/* those eigenvalues which have been successfully */
+/* computed. */
+
+/* If INFO .GT. 0 and WANTT is .FALSE., then on exit, */
+/* the remaining unconverged eigenvalues are the */
+/* eigenvalues of the upper Hessenberg matrix */
+/* rows and columns ILO thorugh INFO of the final, */
+/* output value of H. */
+
+/* If INFO .GT. 0 and WANTT is .TRUE., then on exit */
+/* (*) (initial value of H)*U = U*(final value of H) */
+/* where U is an orthognal matrix. The final */
+/* value of H is upper Hessenberg and triangular in */
+/* rows and columns INFO+1 through IHI. */
+
+/* If INFO .GT. 0 and WANTZ is .TRUE., then on exit */
+/* (final value of Z) = (initial value of Z)*U */
+/* where U is the orthogonal matrix in (*) */
+/* (regardless of the value of WANTT.) */
+
+/* Further Details */
+/* =============== */
+
+/* 02-96 Based on modifications by */
+/* David Day, Sandia National Laboratory, USA */
+
+/* 12-04 Further modifications by */
+/* Ralph Byers, University of Kansas, USA */
+/* This is a modified version of CLAHQR from LAPACK version 3.0. */
+/* It is (1) more robust against overflow and underflow and */
+/* (2) adopts the more conservative Ahues & Tisseur stopping */
+/* criterion (LAWN 122, 1997). */
+
+/* ========================================================= */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+
+ /* Function Body */
+ *info = 0;
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*ilo == *ihi) {
+ i__1 = *ilo;
+ i__2 = *ilo + *ilo * h_dim1;
+ w[i__1].r = h__[i__2].r, w[i__1].i = h__[i__2].i;
+ return 0;
+ }
+
+/* ==== clear out the trash ==== */
+ i__1 = *ihi - 3;
+ for (j = *ilo; j <= i__1; ++j) {
+ i__2 = j + 2 + j * h_dim1;
+ h__[i__2].r = 0.f, h__[i__2].i = 0.f;
+ i__2 = j + 3 + j * h_dim1;
+ h__[i__2].r = 0.f, h__[i__2].i = 0.f;
+/* L10: */
+ }
+ if (*ilo <= *ihi - 2) {
+ i__1 = *ihi + (*ihi - 2) * h_dim1;
+ h__[i__1].r = 0.f, h__[i__1].i = 0.f;
+ }
+/* ==== ensure that subdiagonal entries are real ==== */
+ if (*wantt) {
+ jlo = 1;
+ jhi = *n;
+ } else {
+ jlo = *ilo;
+ jhi = *ihi;
+ }
+ i__1 = *ihi;
+ for (i__ = *ilo + 1; i__ <= i__1; ++i__) {
+ if (r_imag(&h__[i__ + (i__ - 1) * h_dim1]) != 0.f) {
+/* ==== The following redundant normalization */
+/* . avoids problems with both gradual and */
+/* . sudden underflow in ABS(H(I,I-1)) ==== */
+ i__2 = i__ + (i__ - 1) * h_dim1;
+ i__3 = i__ + (i__ - 1) * h_dim1;
+ r__3 = (r__1 = h__[i__3].r, dabs(r__1)) + (r__2 = r_imag(&h__[i__
+ + (i__ - 1) * h_dim1]), dabs(r__2));
+ q__1.r = h__[i__2].r / r__3, q__1.i = h__[i__2].i / r__3;
+ sc.r = q__1.r, sc.i = q__1.i;
+ r_cnjg(&q__2, &sc);
+ r__1 = c_abs(&sc);
+ q__1.r = q__2.r / r__1, q__1.i = q__2.i / r__1;
+ sc.r = q__1.r, sc.i = q__1.i;
+ i__2 = i__ + (i__ - 1) * h_dim1;
+ r__1 = c_abs(&h__[i__ + (i__ - 1) * h_dim1]);
+ h__[i__2].r = r__1, h__[i__2].i = 0.f;
+ i__2 = jhi - i__ + 1;
+ cscal_(&i__2, &sc, &h__[i__ + i__ * h_dim1], ldh);
+/* Computing MIN */
+ i__3 = jhi, i__4 = i__ + 1;
+ i__2 = min(i__3,i__4) - jlo + 1;
+ r_cnjg(&q__1, &sc);
+ cscal_(&i__2, &q__1, &h__[jlo + i__ * h_dim1], &c__1);
+ if (*wantz) {
+ i__2 = *ihiz - *iloz + 1;
+ r_cnjg(&q__1, &sc);
+ cscal_(&i__2, &q__1, &z__[*iloz + i__ * z_dim1], &c__1);
+ }
+ }
+/* L20: */
+ }
+
+ nh = *ihi - *ilo + 1;
+ nz = *ihiz - *iloz + 1;
+
+/* Set machine-dependent constants for the stopping criterion. */
+
+ safmin = slamch_("SAFE MINIMUM");
+ safmax = 1.f / safmin;
+ slabad_(&safmin, &safmax);
+ ulp = slamch_("PRECISION");
+ smlnum = safmin * ((real) nh / ulp);
+
+/* I1 and I2 are the indices of the first row and last column of H */
+/* to which transformations must be applied. If eigenvalues only are */
+/* being computed, I1 and I2 are set inside the main loop. */
+
+ if (*wantt) {
+ i1 = 1;
+ i2 = *n;
+ }
+
+/* The main loop begins here. I is the loop index and decreases from */
+/* IHI to ILO in steps of 1. Each iteration of the loop works */
+/* with the active submatrix in rows and columns L to I. */
+/* Eigenvalues I+1 to IHI have already converged. Either L = ILO, or */
+/* H(L,L-1) is negligible so that the matrix splits. */
+
+ i__ = *ihi;
+L30:
+ if (i__ < *ilo) {
+ goto L150;
+ }
+
+/* Perform QR iterations on rows and columns ILO to I until a */
+/* submatrix of order 1 splits off at the bottom because a */
+/* subdiagonal element has become negligible. */
+
+ l = *ilo;
+ for (its = 0; its <= 30; ++its) {
+
+/* Look for a single small subdiagonal element. */
+
+ i__1 = l + 1;
+ for (k = i__; k >= i__1; --k) {
+ i__2 = k + (k - 1) * h_dim1;
+ if ((r__1 = h__[i__2].r, dabs(r__1)) + (r__2 = r_imag(&h__[k + (k
+ - 1) * h_dim1]), dabs(r__2)) <= smlnum) {
+ goto L50;
+ }
+ i__2 = k - 1 + (k - 1) * h_dim1;
+ i__3 = k + k * h_dim1;
+ tst = (r__1 = h__[i__2].r, dabs(r__1)) + (r__2 = r_imag(&h__[k -
+ 1 + (k - 1) * h_dim1]), dabs(r__2)) + ((r__3 = h__[i__3]
+ .r, dabs(r__3)) + (r__4 = r_imag(&h__[k + k * h_dim1]),
+ dabs(r__4)));
+ if (tst == 0.f) {
+ if (k - 2 >= *ilo) {
+ i__2 = k - 1 + (k - 2) * h_dim1;
+ tst += (r__1 = h__[i__2].r, dabs(r__1));
+ }
+ if (k + 1 <= *ihi) {
+ i__2 = k + 1 + k * h_dim1;
+ tst += (r__1 = h__[i__2].r, dabs(r__1));
+ }
+ }
+/* ==== The following is a conservative small subdiagonal */
+/* . deflation criterion due to Ahues & Tisseur (LAWN 122, */
+/* . 1997). It has better mathematical foundation and */
+/* . improves accuracy in some examples. ==== */
+ i__2 = k + (k - 1) * h_dim1;
+ if ((r__1 = h__[i__2].r, dabs(r__1)) <= ulp * tst) {
+/* Computing MAX */
+ i__2 = k + (k - 1) * h_dim1;
+ i__3 = k - 1 + k * h_dim1;
+ r__5 = (r__1 = h__[i__2].r, dabs(r__1)) + (r__2 = r_imag(&h__[
+ k + (k - 1) * h_dim1]), dabs(r__2)), r__6 = (r__3 =
+ h__[i__3].r, dabs(r__3)) + (r__4 = r_imag(&h__[k - 1
+ + k * h_dim1]), dabs(r__4));
+ ab = dmax(r__5,r__6);
+/* Computing MIN */
+ i__2 = k + (k - 1) * h_dim1;
+ i__3 = k - 1 + k * h_dim1;
+ r__5 = (r__1 = h__[i__2].r, dabs(r__1)) + (r__2 = r_imag(&h__[
+ k + (k - 1) * h_dim1]), dabs(r__2)), r__6 = (r__3 =
+ h__[i__3].r, dabs(r__3)) + (r__4 = r_imag(&h__[k - 1
+ + k * h_dim1]), dabs(r__4));
+ ba = dmin(r__5,r__6);
+ i__2 = k - 1 + (k - 1) * h_dim1;
+ i__3 = k + k * h_dim1;
+ q__2.r = h__[i__2].r - h__[i__3].r, q__2.i = h__[i__2].i -
+ h__[i__3].i;
+ q__1.r = q__2.r, q__1.i = q__2.i;
+/* Computing MAX */
+ i__4 = k + k * h_dim1;
+ r__5 = (r__1 = h__[i__4].r, dabs(r__1)) + (r__2 = r_imag(&h__[
+ k + k * h_dim1]), dabs(r__2)), r__6 = (r__3 = q__1.r,
+ dabs(r__3)) + (r__4 = r_imag(&q__1), dabs(r__4));
+ aa = dmax(r__5,r__6);
+ i__2 = k - 1 + (k - 1) * h_dim1;
+ i__3 = k + k * h_dim1;
+ q__2.r = h__[i__2].r - h__[i__3].r, q__2.i = h__[i__2].i -
+ h__[i__3].i;
+ q__1.r = q__2.r, q__1.i = q__2.i;
+/* Computing MIN */
+ i__4 = k + k * h_dim1;
+ r__5 = (r__1 = h__[i__4].r, dabs(r__1)) + (r__2 = r_imag(&h__[
+ k + k * h_dim1]), dabs(r__2)), r__6 = (r__3 = q__1.r,
+ dabs(r__3)) + (r__4 = r_imag(&q__1), dabs(r__4));
+ bb = dmin(r__5,r__6);
+ s = aa + ab;
+/* Computing MAX */
+ r__1 = smlnum, r__2 = ulp * (bb * (aa / s));
+ if (ba * (ab / s) <= dmax(r__1,r__2)) {
+ goto L50;
+ }
+ }
+/* L40: */
+ }
+L50:
+ l = k;
+ if (l > *ilo) {
+
+/* H(L,L-1) is negligible */
+
+ i__1 = l + (l - 1) * h_dim1;
+ h__[i__1].r = 0.f, h__[i__1].i = 0.f;
+ }
+
+/* Exit from loop if a submatrix of order 1 has split off. */
+
+ if (l >= i__) {
+ goto L140;
+ }
+
+/* Now the active submatrix is in rows and columns L to I. If */
+/* eigenvalues only are being computed, only the active submatrix */
+/* need be transformed. */
+
+ if (! (*wantt)) {
+ i1 = l;
+ i2 = i__;
+ }
+
+ if (its == 10) {
+
+/* Exceptional shift. */
+
+ i__1 = l + 1 + l * h_dim1;
+ s = (r__1 = h__[i__1].r, dabs(r__1)) * .75f;
+ i__1 = l + l * h_dim1;
+ q__1.r = s + h__[i__1].r, q__1.i = h__[i__1].i;
+ t.r = q__1.r, t.i = q__1.i;
+ } else if (its == 20) {
+
+/* Exceptional shift. */
+
+ i__1 = i__ + (i__ - 1) * h_dim1;
+ s = (r__1 = h__[i__1].r, dabs(r__1)) * .75f;
+ i__1 = i__ + i__ * h_dim1;
+ q__1.r = s + h__[i__1].r, q__1.i = h__[i__1].i;
+ t.r = q__1.r, t.i = q__1.i;
+ } else {
+
+/* Wilkinson's shift. */
+
+ i__1 = i__ + i__ * h_dim1;
+ t.r = h__[i__1].r, t.i = h__[i__1].i;
+ c_sqrt(&q__2, &h__[i__ - 1 + i__ * h_dim1]);
+ c_sqrt(&q__3, &h__[i__ + (i__ - 1) * h_dim1]);
+ q__1.r = q__2.r * q__3.r - q__2.i * q__3.i, q__1.i = q__2.r *
+ q__3.i + q__2.i * q__3.r;
+ u.r = q__1.r, u.i = q__1.i;
+ s = (r__1 = u.r, dabs(r__1)) + (r__2 = r_imag(&u), dabs(r__2));
+ if (s != 0.f) {
+ i__1 = i__ - 1 + (i__ - 1) * h_dim1;
+ q__2.r = h__[i__1].r - t.r, q__2.i = h__[i__1].i - t.i;
+ q__1.r = q__2.r * .5f, q__1.i = q__2.i * .5f;
+ x.r = q__1.r, x.i = q__1.i;
+ sx = (r__1 = x.r, dabs(r__1)) + (r__2 = r_imag(&x), dabs(r__2)
+ );
+/* Computing MAX */
+ r__3 = s, r__4 = (r__1 = x.r, dabs(r__1)) + (r__2 = r_imag(&x)
+ , dabs(r__2));
+ s = dmax(r__3,r__4);
+ q__5.r = x.r / s, q__5.i = x.i / s;
+ pow_ci(&q__4, &q__5, &c__2);
+ q__7.r = u.r / s, q__7.i = u.i / s;
+ pow_ci(&q__6, &q__7, &c__2);
+ q__3.r = q__4.r + q__6.r, q__3.i = q__4.i + q__6.i;
+ c_sqrt(&q__2, &q__3);
+ q__1.r = s * q__2.r, q__1.i = s * q__2.i;
+ y.r = q__1.r, y.i = q__1.i;
+ if (sx > 0.f) {
+ q__1.r = x.r / sx, q__1.i = x.i / sx;
+ q__2.r = x.r / sx, q__2.i = x.i / sx;
+ if (q__1.r * y.r + r_imag(&q__2) * r_imag(&y) < 0.f) {
+ q__3.r = -y.r, q__3.i = -y.i;
+ y.r = q__3.r, y.i = q__3.i;
+ }
+ }
+ q__4.r = x.r + y.r, q__4.i = x.i + y.i;
+ cladiv_(&q__3, &u, &q__4);
+ q__2.r = u.r * q__3.r - u.i * q__3.i, q__2.i = u.r * q__3.i +
+ u.i * q__3.r;
+ q__1.r = t.r - q__2.r, q__1.i = t.i - q__2.i;
+ t.r = q__1.r, t.i = q__1.i;
+ }
+ }
+
+/* Look for two consecutive small subdiagonal elements. */
+
+ i__1 = l + 1;
+ for (m = i__ - 1; m >= i__1; --m) {
+
+/* Determine the effect of starting the single-shift QR */
+/* iteration at row M, and see if this would make H(M,M-1) */
+/* negligible. */
+
+ i__2 = m + m * h_dim1;
+ h11.r = h__[i__2].r, h11.i = h__[i__2].i;
+ i__2 = m + 1 + (m + 1) * h_dim1;
+ h22.r = h__[i__2].r, h22.i = h__[i__2].i;
+ q__1.r = h11.r - t.r, q__1.i = h11.i - t.i;
+ h11s.r = q__1.r, h11s.i = q__1.i;
+ i__2 = m + 1 + m * h_dim1;
+ h21 = h__[i__2].r;
+ s = (r__1 = h11s.r, dabs(r__1)) + (r__2 = r_imag(&h11s), dabs(
+ r__2)) + dabs(h21);
+ q__1.r = h11s.r / s, q__1.i = h11s.i / s;
+ h11s.r = q__1.r, h11s.i = q__1.i;
+ h21 /= s;
+ v[0].r = h11s.r, v[0].i = h11s.i;
+ v[1].r = h21, v[1].i = 0.f;
+ i__2 = m + (m - 1) * h_dim1;
+ h10 = h__[i__2].r;
+ if (dabs(h10) * dabs(h21) <= ulp * (((r__1 = h11s.r, dabs(r__1))
+ + (r__2 = r_imag(&h11s), dabs(r__2))) * ((r__3 = h11.r,
+ dabs(r__3)) + (r__4 = r_imag(&h11), dabs(r__4)) + ((r__5 =
+ h22.r, dabs(r__5)) + (r__6 = r_imag(&h22), dabs(r__6)))))
+ ) {
+ goto L70;
+ }
+/* L60: */
+ }
+ i__1 = l + l * h_dim1;
+ h11.r = h__[i__1].r, h11.i = h__[i__1].i;
+ i__1 = l + 1 + (l + 1) * h_dim1;
+ h22.r = h__[i__1].r, h22.i = h__[i__1].i;
+ q__1.r = h11.r - t.r, q__1.i = h11.i - t.i;
+ h11s.r = q__1.r, h11s.i = q__1.i;
+ i__1 = l + 1 + l * h_dim1;
+ h21 = h__[i__1].r;
+ s = (r__1 = h11s.r, dabs(r__1)) + (r__2 = r_imag(&h11s), dabs(r__2))
+ + dabs(h21);
+ q__1.r = h11s.r / s, q__1.i = h11s.i / s;
+ h11s.r = q__1.r, h11s.i = q__1.i;
+ h21 /= s;
+ v[0].r = h11s.r, v[0].i = h11s.i;
+ v[1].r = h21, v[1].i = 0.f;
+L70:
+
+/* Single-shift QR step */
+
+ i__1 = i__ - 1;
+ for (k = m; k <= i__1; ++k) {
+
+/* The first iteration of this loop determines a reflection G */
+/* from the vector V and applies it from left and right to H, */
+/* thus creating a nonzero bulge below the subdiagonal. */
+
+/* Each subsequent iteration determines a reflection G to */
+/* restore the Hessenberg form in the (K-1)th column, and thus */
+/* chases the bulge one step toward the bottom of the active */
+/* submatrix. */
+
+/* V(2) is always real before the call to CLARFG, and hence */
+/* after the call T2 ( = T1*V(2) ) is also real. */
+
+ if (k > m) {
+ ccopy_(&c__2, &h__[k + (k - 1) * h_dim1], &c__1, v, &c__1);
+ }
+ clarfg_(&c__2, v, &v[1], &c__1, &t1);
+ if (k > m) {
+ i__2 = k + (k - 1) * h_dim1;
+ h__[i__2].r = v[0].r, h__[i__2].i = v[0].i;
+ i__2 = k + 1 + (k - 1) * h_dim1;
+ h__[i__2].r = 0.f, h__[i__2].i = 0.f;
+ }
+ v2.r = v[1].r, v2.i = v[1].i;
+ q__1.r = t1.r * v2.r - t1.i * v2.i, q__1.i = t1.r * v2.i + t1.i *
+ v2.r;
+ t2 = q__1.r;
+
+/* Apply G from the left to transform the rows of the matrix */
+/* in columns K to I2. */
+
+ i__2 = i2;
+ for (j = k; j <= i__2; ++j) {
+ r_cnjg(&q__3, &t1);
+ i__3 = k + j * h_dim1;
+ q__2.r = q__3.r * h__[i__3].r - q__3.i * h__[i__3].i, q__2.i =
+ q__3.r * h__[i__3].i + q__3.i * h__[i__3].r;
+ i__4 = k + 1 + j * h_dim1;
+ q__4.r = t2 * h__[i__4].r, q__4.i = t2 * h__[i__4].i;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__3 = k + j * h_dim1;
+ i__4 = k + j * h_dim1;
+ q__1.r = h__[i__4].r - sum.r, q__1.i = h__[i__4].i - sum.i;
+ h__[i__3].r = q__1.r, h__[i__3].i = q__1.i;
+ i__3 = k + 1 + j * h_dim1;
+ i__4 = k + 1 + j * h_dim1;
+ q__2.r = sum.r * v2.r - sum.i * v2.i, q__2.i = sum.r * v2.i +
+ sum.i * v2.r;
+ q__1.r = h__[i__4].r - q__2.r, q__1.i = h__[i__4].i - q__2.i;
+ h__[i__3].r = q__1.r, h__[i__3].i = q__1.i;
+/* L80: */
+ }
+
+/* Apply G from the right to transform the columns of the */
+/* matrix in rows I1 to min(K+2,I). */
+
+/* Computing MIN */
+ i__3 = k + 2;
+ i__2 = min(i__3,i__);
+ for (j = i1; j <= i__2; ++j) {
+ i__3 = j + k * h_dim1;
+ q__2.r = t1.r * h__[i__3].r - t1.i * h__[i__3].i, q__2.i =
+ t1.r * h__[i__3].i + t1.i * h__[i__3].r;
+ i__4 = j + (k + 1) * h_dim1;
+ q__3.r = t2 * h__[i__4].r, q__3.i = t2 * h__[i__4].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__3 = j + k * h_dim1;
+ i__4 = j + k * h_dim1;
+ q__1.r = h__[i__4].r - sum.r, q__1.i = h__[i__4].i - sum.i;
+ h__[i__3].r = q__1.r, h__[i__3].i = q__1.i;
+ i__3 = j + (k + 1) * h_dim1;
+ i__4 = j + (k + 1) * h_dim1;
+ r_cnjg(&q__3, &v2);
+ q__2.r = sum.r * q__3.r - sum.i * q__3.i, q__2.i = sum.r *
+ q__3.i + sum.i * q__3.r;
+ q__1.r = h__[i__4].r - q__2.r, q__1.i = h__[i__4].i - q__2.i;
+ h__[i__3].r = q__1.r, h__[i__3].i = q__1.i;
+/* L90: */
+ }
+
+ if (*wantz) {
+
+/* Accumulate transformations in the matrix Z */
+
+ i__2 = *ihiz;
+ for (j = *iloz; j <= i__2; ++j) {
+ i__3 = j + k * z_dim1;
+ q__2.r = t1.r * z__[i__3].r - t1.i * z__[i__3].i, q__2.i =
+ t1.r * z__[i__3].i + t1.i * z__[i__3].r;
+ i__4 = j + (k + 1) * z_dim1;
+ q__3.r = t2 * z__[i__4].r, q__3.i = t2 * z__[i__4].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__3 = j + k * z_dim1;
+ i__4 = j + k * z_dim1;
+ q__1.r = z__[i__4].r - sum.r, q__1.i = z__[i__4].i -
+ sum.i;
+ z__[i__3].r = q__1.r, z__[i__3].i = q__1.i;
+ i__3 = j + (k + 1) * z_dim1;
+ i__4 = j + (k + 1) * z_dim1;
+ r_cnjg(&q__3, &v2);
+ q__2.r = sum.r * q__3.r - sum.i * q__3.i, q__2.i = sum.r *
+ q__3.i + sum.i * q__3.r;
+ q__1.r = z__[i__4].r - q__2.r, q__1.i = z__[i__4].i -
+ q__2.i;
+ z__[i__3].r = q__1.r, z__[i__3].i = q__1.i;
+/* L100: */
+ }
+ }
+
+ if (k == m && m > l) {
+
+/* If the QR step was started at row M > L because two */
+/* consecutive small subdiagonals were found, then extra */
+/* scaling must be performed to ensure that H(M,M-1) remains */
+/* real. */
+
+ q__1.r = 1.f - t1.r, q__1.i = 0.f - t1.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ r__1 = c_abs(&temp);
+ q__1.r = temp.r / r__1, q__1.i = temp.i / r__1;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = m + 1 + m * h_dim1;
+ i__3 = m + 1 + m * h_dim1;
+ r_cnjg(&q__2, &temp);
+ q__1.r = h__[i__3].r * q__2.r - h__[i__3].i * q__2.i, q__1.i =
+ h__[i__3].r * q__2.i + h__[i__3].i * q__2.r;
+ h__[i__2].r = q__1.r, h__[i__2].i = q__1.i;
+ if (m + 2 <= i__) {
+ i__2 = m + 2 + (m + 1) * h_dim1;
+ i__3 = m + 2 + (m + 1) * h_dim1;
+ q__1.r = h__[i__3].r * temp.r - h__[i__3].i * temp.i,
+ q__1.i = h__[i__3].r * temp.i + h__[i__3].i *
+ temp.r;
+ h__[i__2].r = q__1.r, h__[i__2].i = q__1.i;
+ }
+ i__2 = i__;
+ for (j = m; j <= i__2; ++j) {
+ if (j != m + 1) {
+ if (i2 > j) {
+ i__3 = i2 - j;
+ cscal_(&i__3, &temp, &h__[j + (j + 1) * h_dim1],
+ ldh);
+ }
+ i__3 = j - i1;
+ r_cnjg(&q__1, &temp);
+ cscal_(&i__3, &q__1, &h__[i1 + j * h_dim1], &c__1);
+ if (*wantz) {
+ r_cnjg(&q__1, &temp);
+ cscal_(&nz, &q__1, &z__[*iloz + j * z_dim1], &
+ c__1);
+ }
+ }
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+
+/* Ensure that H(I,I-1) is real. */
+
+ i__1 = i__ + (i__ - 1) * h_dim1;
+ temp.r = h__[i__1].r, temp.i = h__[i__1].i;
+ if (r_imag(&temp) != 0.f) {
+ rtemp = c_abs(&temp);
+ i__1 = i__ + (i__ - 1) * h_dim1;
+ h__[i__1].r = rtemp, h__[i__1].i = 0.f;
+ q__1.r = temp.r / rtemp, q__1.i = temp.i / rtemp;
+ temp.r = q__1.r, temp.i = q__1.i;
+ if (i2 > i__) {
+ i__1 = i2 - i__;
+ r_cnjg(&q__1, &temp);
+ cscal_(&i__1, &q__1, &h__[i__ + (i__ + 1) * h_dim1], ldh);
+ }
+ i__1 = i__ - i1;
+ cscal_(&i__1, &temp, &h__[i1 + i__ * h_dim1], &c__1);
+ if (*wantz) {
+ cscal_(&nz, &temp, &z__[*iloz + i__ * z_dim1], &c__1);
+ }
+ }
+
+/* L130: */
+ }
+
+/* Failure to converge in remaining number of iterations */
+
+ *info = i__;
+ return 0;
+
+L140:
+
+/* H(I,I-1) is negligible: one eigenvalue has converged. */
+
+ i__1 = i__;
+ i__2 = i__ + i__ * h_dim1;
+ w[i__1].r = h__[i__2].r, w[i__1].i = h__[i__2].i;
+
+/* return to start of the main loop with new value of I. */
+
+ i__ = l - 1;
+ goto L30;
+
+L150:
+ return 0;
+
+/* End of CLAHQR */
+
+} /* clahqr_ */
diff --git a/contrib/libs/clapack/clahr2.c b/contrib/libs/clapack/clahr2.c
new file mode 100644
index 0000000000..22d3bbf083
--- /dev/null
+++ b/contrib/libs/clapack/clahr2.c
@@ -0,0 +1,329 @@
+/* clahr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int clahr2_(integer *n, integer *k, integer *nb, complex *a,
+ integer *lda, complex *tau, complex *t, integer *ldt, complex *y,
+ integer *ldy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, t_dim1, t_offset, y_dim1, y_offset, i__1, i__2,
+ i__3;
+ complex q__1;
+
+ /* Local variables */
+ integer i__;
+ complex ei;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *), cgemm_(char *, char *, integer *, integer *, integer *
+, complex *, complex *, integer *, complex *, integer *, complex *
+, complex *, integer *), cgemv_(char *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *), ccopy_(integer *,
+ complex *, integer *, complex *, integer *), ctrmm_(char *, char *
+, char *, char *, integer *, integer *, complex *, complex *,
+ integer *, complex *, integer *),
+ caxpy_(integer *, complex *, complex *, integer *, complex *,
+ integer *), ctrmv_(char *, char *, char *, integer *, complex *,
+ integer *, complex *, integer *), clarfg_(
+ integer *, complex *, complex *, integer *, complex *), clacgv_(
+ integer *, complex *, integer *), clacpy_(char *, integer *,
+ integer *, complex *, integer *, complex *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAHR2 reduces the first NB columns of A complex general n-BY-(n-k+1) */
+/* matrix A so that elements below the k-th subdiagonal are zero. The */
+/* reduction is performed by an unitary similarity transformation */
+/* Q' * A * Q. The routine returns the matrices V and T which determine */
+/* Q as a block reflector I - V*T*V', and also the matrix Y = A * V * T. */
+
+/* This is an auxiliary routine called by CGEHRD. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* K (input) INTEGER */
+/* The offset for the reduction. Elements below the k-th */
+/* subdiagonal in the first NB columns are reduced to zero. */
+/* K < N. */
+
+/* NB (input) INTEGER */
+/* The number of columns to be reduced. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N-K+1) */
+/* On entry, the n-by-(n-k+1) general matrix A. */
+/* On exit, the elements on and above the k-th subdiagonal in */
+/* the first NB columns are overwritten with the corresponding */
+/* elements of the reduced matrix; the elements below the k-th */
+/* subdiagonal, with the array TAU, represent the matrix Q as a */
+/* product of elementary reflectors. The other columns of A are */
+/* unchanged. See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAU (output) COMPLEX array, dimension (NB) */
+/* The scalar factors of the elementary reflectors. See Further */
+/* Details. */
+
+/* T (output) COMPLEX array, dimension (LDT,NB) */
+/* The upper triangular matrix T. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= NB. */
+
+/* Y (output) COMPLEX array, dimension (LDY,NB) */
+/* The n-by-nb matrix Y. */
+
+/* LDY (input) INTEGER */
+/* The leading dimension of the array Y. LDY >= N. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of nb elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(nb). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i+k-1) = 0, v(i+k) = 1; v(i+k+1:n) is stored on exit in */
+/* A(i+k+1:n,i), and tau in TAU(i). */
+
+/* The elements of the vectors v together form the (n-k+1)-by-nb matrix */
+/* V which is needed, with T and Y, to apply the transformation to the */
+/* unreduced part of the matrix, using an update of the form: */
+/* A := (I - V*T*V') * (A - Y*V'). */
+
+/* The contents of A on exit are illustrated by the following example */
+/* with n = 7, k = 3 and nb = 2: */
+
+/* ( a a a a a ) */
+/* ( a a a a a ) */
+/* ( a a a a a ) */
+/* ( h h a a a ) */
+/* ( v1 h a a a ) */
+/* ( v1 v2 a a a ) */
+/* ( v1 v2 a a a ) */
+
+/* where a denotes an element of the original matrix A, h denotes a */
+/* modified element of the upper Hessenberg matrix H, and vi denotes an */
+/* element of the vector defining H(i). */
+
+/* This file is a slight modification of LAPACK-3.0's CLAHRD */
+/* incorporating improvements proposed by Quintana-Orti and Van de */
+/* Gejin. Note that the entries of A(1:K,2:NB) differ from those */
+/* returned by the original LAPACK routine. This function is */
+/* not backward compatible with LAPACK3.0. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --tau;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ y_dim1 = *ldy;
+ y_offset = 1 + y_dim1;
+ y -= y_offset;
+
+ /* Function Body */
+ if (*n <= 1) {
+ return 0;
+ }
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (i__ > 1) {
+
+/* Update A(K+1:N,I) */
+
+/* Update I-th column of A - Y * V' */
+
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &a[*k + i__ - 1 + a_dim1], lda);
+ i__2 = *n - *k;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("NO TRANSPOSE", &i__2, &i__3, &q__1, &y[*k + 1 + y_dim1],
+ ldy, &a[*k + i__ - 1 + a_dim1], lda, &c_b2, &a[*k + 1 +
+ i__ * a_dim1], &c__1);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &a[*k + i__ - 1 + a_dim1], lda);
+
+/* Apply I - V * T' * V' to this column (call it b) from the */
+/* left, using the last column of T as workspace */
+
+/* Let V = ( V1 ) and b = ( b1 ) (first I-1 rows) */
+/* ( V2 ) ( b2 ) */
+
+/* where V1 is unit lower triangular */
+
+/* w := V1' * b1 */
+
+ i__2 = i__ - 1;
+ ccopy_(&i__2, &a[*k + 1 + i__ * a_dim1], &c__1, &t[*nb * t_dim1 +
+ 1], &c__1);
+ i__2 = i__ - 1;
+ ctrmv_("Lower", "Conjugate transpose", "UNIT", &i__2, &a[*k + 1 +
+ a_dim1], lda, &t[*nb * t_dim1 + 1], &c__1);
+
+/* w := w + V2'*b2 */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[*k + i__ +
+ a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b2, &
+ t[*nb * t_dim1 + 1], &c__1);
+
+/* w := T'*w */
+
+ i__2 = i__ - 1;
+ ctrmv_("Upper", "Conjugate transpose", "NON-UNIT", &i__2, &t[
+ t_offset], ldt, &t[*nb * t_dim1 + 1], &c__1);
+
+/* b2 := b2 - V2*w */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("NO TRANSPOSE", &i__2, &i__3, &q__1, &a[*k + i__ + a_dim1],
+ lda, &t[*nb * t_dim1 + 1], &c__1, &c_b2, &a[*k + i__ +
+ i__ * a_dim1], &c__1);
+
+/* b1 := b1 - V1*w */
+
+ i__2 = i__ - 1;
+ ctrmv_("Lower", "NO TRANSPOSE", "UNIT", &i__2, &a[*k + 1 + a_dim1]
+, lda, &t[*nb * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ caxpy_(&i__2, &q__1, &t[*nb * t_dim1 + 1], &c__1, &a[*k + 1 + i__
+ * a_dim1], &c__1);
+
+ i__2 = *k + i__ - 1 + (i__ - 1) * a_dim1;
+ a[i__2].r = ei.r, a[i__2].i = ei.i;
+ }
+
+/* Generate the elementary reflector H(I) to annihilate */
+/* A(K+I+1:N,I) */
+
+ i__2 = *n - *k - i__ + 1;
+/* Computing MIN */
+ i__3 = *k + i__ + 1;
+ clarfg_(&i__2, &a[*k + i__ + i__ * a_dim1], &a[min(i__3, *n)+ i__ *
+ a_dim1], &c__1, &tau[i__]);
+ i__2 = *k + i__ + i__ * a_dim1;
+ ei.r = a[i__2].r, ei.i = a[i__2].i;
+ i__2 = *k + i__ + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Compute Y(K+1:N,I) */
+
+ i__2 = *n - *k;
+ i__3 = *n - *k - i__ + 1;
+ cgemv_("NO TRANSPOSE", &i__2, &i__3, &c_b2, &a[*k + 1 + (i__ + 1) *
+ a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b1, &y[*
+ k + 1 + i__ * y_dim1], &c__1);
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[*k + i__ +
+ a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b1, &t[
+ i__ * t_dim1 + 1], &c__1);
+ i__2 = *n - *k;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("NO TRANSPOSE", &i__2, &i__3, &q__1, &y[*k + 1 + y_dim1], ldy,
+ &t[i__ * t_dim1 + 1], &c__1, &c_b2, &y[*k + 1 + i__ * y_dim1],
+ &c__1);
+ i__2 = *n - *k;
+ cscal_(&i__2, &tau[i__], &y[*k + 1 + i__ * y_dim1], &c__1);
+
+/* Compute T(1:I,I) */
+
+ i__2 = i__ - 1;
+ i__3 = i__;
+ q__1.r = -tau[i__3].r, q__1.i = -tau[i__3].i;
+ cscal_(&i__2, &q__1, &t[i__ * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ ctrmv_("Upper", "No Transpose", "NON-UNIT", &i__2, &t[t_offset], ldt,
+ &t[i__ * t_dim1 + 1], &c__1)
+ ;
+ i__2 = i__ + i__ * t_dim1;
+ i__3 = i__;
+ t[i__2].r = tau[i__3].r, t[i__2].i = tau[i__3].i;
+
+/* L10: */
+ }
+ i__1 = *k + *nb + *nb * a_dim1;
+ a[i__1].r = ei.r, a[i__1].i = ei.i;
+
+/* Compute Y(1:K,1:NB) */
+
+ clacpy_("ALL", k, nb, &a[(a_dim1 << 1) + 1], lda, &y[y_offset], ldy);
+ ctrmm_("RIGHT", "Lower", "NO TRANSPOSE", "UNIT", k, nb, &c_b2, &a[*k + 1
+ + a_dim1], lda, &y[y_offset], ldy);
+ if (*n > *k + *nb) {
+ i__1 = *n - *k - *nb;
+ cgemm_("NO TRANSPOSE", "NO TRANSPOSE", k, nb, &i__1, &c_b2, &a[(*nb +
+ 2) * a_dim1 + 1], lda, &a[*k + 1 + *nb + a_dim1], lda, &c_b2,
+ &y[y_offset], ldy);
+ }
+ ctrmm_("RIGHT", "Upper", "NO TRANSPOSE", "NON-UNIT", k, nb, &c_b2, &t[
+ t_offset], ldt, &y[y_offset], ldy);
+
+ return 0;
+
+/* End of CLAHR2 */
+
+} /* clahr2_ */
diff --git a/contrib/libs/clapack/clahrd.c b/contrib/libs/clapack/clahrd.c
new file mode 100644
index 0000000000..00ac3ab769
--- /dev/null
+++ b/contrib/libs/clapack/clahrd.c
@@ -0,0 +1,298 @@
+/* clahrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int clahrd_(integer *n, integer *k, integer *nb, complex *a,
+ integer *lda, complex *tau, complex *t, integer *ldt, complex *y,
+ integer *ldy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, t_dim1, t_offset, y_dim1, y_offset, i__1, i__2,
+ i__3;
+ complex q__1;
+
+ /* Local variables */
+ integer i__;
+ complex ei;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *), cgemv_(char *, integer *, integer *, complex *,
+ complex *, integer *, complex *, integer *, complex *, complex *,
+ integer *), ccopy_(integer *, complex *, integer *,
+ complex *, integer *), caxpy_(integer *, complex *, complex *,
+ integer *, complex *, integer *), ctrmv_(char *, char *, char *,
+ integer *, complex *, integer *, complex *, integer *), clarfg_(integer *, complex *, complex *, integer
+ *, complex *), clacgv_(integer *, complex *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAHRD reduces the first NB columns of a complex general n-by-(n-k+1) */
+/* matrix A so that elements below the k-th subdiagonal are zero. The */
+/* reduction is performed by a unitary similarity transformation */
+/* Q' * A * Q. The routine returns the matrices V and T which determine */
+/* Q as a block reflector I - V*T*V', and also the matrix Y = A * V * T. */
+
+/* This is an OBSOLETE auxiliary routine. */
+/* This routine will be 'deprecated' in a future release. */
+/* Please use the new routine CLAHR2 instead. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* K (input) INTEGER */
+/* The offset for the reduction. Elements below the k-th */
+/* subdiagonal in the first NB columns are reduced to zero. */
+
+/* NB (input) INTEGER */
+/* The number of columns to be reduced. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N-K+1) */
+/* On entry, the n-by-(n-k+1) general matrix A. */
+/* On exit, the elements on and above the k-th subdiagonal in */
+/* the first NB columns are overwritten with the corresponding */
+/* elements of the reduced matrix; the elements below the k-th */
+/* subdiagonal, with the array TAU, represent the matrix Q as a */
+/* product of elementary reflectors. The other columns of A are */
+/* unchanged. See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAU (output) COMPLEX array, dimension (NB) */
+/* The scalar factors of the elementary reflectors. See Further */
+/* Details. */
+
+/* T (output) COMPLEX array, dimension (LDT,NB) */
+/* The upper triangular matrix T. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= NB. */
+
+/* Y (output) COMPLEX array, dimension (LDY,NB) */
+/* The n-by-nb matrix Y. */
+
+/* LDY (input) INTEGER */
+/* The leading dimension of the array Y. LDY >= max(1,N). */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of nb elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(nb). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i+k-1) = 0, v(i+k) = 1; v(i+k+1:n) is stored on exit in */
+/* A(i+k+1:n,i), and tau in TAU(i). */
+
+/* The elements of the vectors v together form the (n-k+1)-by-nb matrix */
+/* V which is needed, with T and Y, to apply the transformation to the */
+/* unreduced part of the matrix, using an update of the form: */
+/* A := (I - V*T*V') * (A - Y*V'). */
+
+/* The contents of A on exit are illustrated by the following example */
+/* with n = 7, k = 3 and nb = 2: */
+
+/* ( a h a a a ) */
+/* ( a h a a a ) */
+/* ( a h a a a ) */
+/* ( h h a a a ) */
+/* ( v1 h a a a ) */
+/* ( v1 v2 a a a ) */
+/* ( v1 v2 a a a ) */
+
+/* where a denotes an element of the original matrix A, h denotes a */
+/* modified element of the upper Hessenberg matrix H, and vi denotes an */
+/* element of the vector defining H(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --tau;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ y_dim1 = *ldy;
+ y_offset = 1 + y_dim1;
+ y -= y_offset;
+
+ /* Function Body */
+ if (*n <= 1) {
+ return 0;
+ }
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (i__ > 1) {
+
+/* Update A(1:n,i) */
+
+/* Compute i-th column of A - Y * V' */
+
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &a[*k + i__ - 1 + a_dim1], lda);
+ i__2 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", n, &i__2, &q__1, &y[y_offset], ldy, &a[*k
+ + i__ - 1 + a_dim1], lda, &c_b2, &a[i__ * a_dim1 + 1], &
+ c__1);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &a[*k + i__ - 1 + a_dim1], lda);
+
+/* Apply I - V * T' * V' to this column (call it b) from the */
+/* left, using the last column of T as workspace */
+
+/* Let V = ( V1 ) and b = ( b1 ) (first I-1 rows) */
+/* ( V2 ) ( b2 ) */
+
+/* where V1 is unit lower triangular */
+
+/* w := V1' * b1 */
+
+ i__2 = i__ - 1;
+ ccopy_(&i__2, &a[*k + 1 + i__ * a_dim1], &c__1, &t[*nb * t_dim1 +
+ 1], &c__1);
+ i__2 = i__ - 1;
+ ctrmv_("Lower", "Conjugate transpose", "Unit", &i__2, &a[*k + 1 +
+ a_dim1], lda, &t[*nb * t_dim1 + 1], &c__1);
+
+/* w := w + V2'*b2 */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[*k + i__ +
+ a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b2, &
+ t[*nb * t_dim1 + 1], &c__1);
+
+/* w := T'*w */
+
+ i__2 = i__ - 1;
+ ctrmv_("Upper", "Conjugate transpose", "Non-unit", &i__2, &t[
+ t_offset], ldt, &t[*nb * t_dim1 + 1], &c__1);
+
+/* b2 := b2 - V2*w */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &a[*k + i__ + a_dim1],
+ lda, &t[*nb * t_dim1 + 1], &c__1, &c_b2, &a[*k + i__ +
+ i__ * a_dim1], &c__1);
+
+/* b1 := b1 - V1*w */
+
+ i__2 = i__ - 1;
+ ctrmv_("Lower", "No transpose", "Unit", &i__2, &a[*k + 1 + a_dim1]
+, lda, &t[*nb * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ caxpy_(&i__2, &q__1, &t[*nb * t_dim1 + 1], &c__1, &a[*k + 1 + i__
+ * a_dim1], &c__1);
+
+ i__2 = *k + i__ - 1 + (i__ - 1) * a_dim1;
+ a[i__2].r = ei.r, a[i__2].i = ei.i;
+ }
+
+/* Generate the elementary reflector H(i) to annihilate */
+/* A(k+i+1:n,i) */
+
+ i__2 = *k + i__ + i__ * a_dim1;
+ ei.r = a[i__2].r, ei.i = a[i__2].i;
+ i__2 = *n - *k - i__ + 1;
+/* Computing MIN */
+ i__3 = *k + i__ + 1;
+ clarfg_(&i__2, &ei, &a[min(i__3, *n)+ i__ * a_dim1], &c__1, &tau[i__])
+ ;
+ i__2 = *k + i__ + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Compute Y(1:n,i) */
+
+ i__2 = *n - *k - i__ + 1;
+ cgemv_("No transpose", n, &i__2, &c_b2, &a[(i__ + 1) * a_dim1 + 1],
+ lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b1, &y[i__ *
+ y_dim1 + 1], &c__1);
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[*k + i__ +
+ a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b1, &t[
+ i__ * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", n, &i__2, &q__1, &y[y_offset], ldy, &t[i__ *
+ t_dim1 + 1], &c__1, &c_b2, &y[i__ * y_dim1 + 1], &c__1);
+ cscal_(n, &tau[i__], &y[i__ * y_dim1 + 1], &c__1);
+
+/* Compute T(1:i,i) */
+
+ i__2 = i__ - 1;
+ i__3 = i__;
+ q__1.r = -tau[i__3].r, q__1.i = -tau[i__3].i;
+ cscal_(&i__2, &q__1, &t[i__ * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ ctrmv_("Upper", "No transpose", "Non-unit", &i__2, &t[t_offset], ldt,
+ &t[i__ * t_dim1 + 1], &c__1)
+ ;
+ i__2 = i__ + i__ * t_dim1;
+ i__3 = i__;
+ t[i__2].r = tau[i__3].r, t[i__2].i = tau[i__3].i;
+
+/* L10: */
+ }
+ i__1 = *k + *nb + *nb * a_dim1;
+ a[i__1].r = ei.r, a[i__1].i = ei.i;
+
+ return 0;
+
+/* End of CLAHRD */
+
+} /* clahrd_ */
diff --git a/contrib/libs/clapack/claic1.c b/contrib/libs/clapack/claic1.c
new file mode 100644
index 0000000000..06e41da3a2
--- /dev/null
+++ b/contrib/libs/clapack/claic1.c
@@ -0,0 +1,448 @@
+/* claic1.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int claic1_(integer *job, integer *j, complex *x, real *sest,
+ complex *w, complex *gamma, real *sestpr, complex *s, complex *c__)
+{
+ /* System generated locals */
+ real r__1, r__2;
+ complex q__1, q__2, q__3, q__4, q__5, q__6;
+
+ /* Builtin functions */
+ double c_abs(complex *);
+ void r_cnjg(complex *, complex *), c_sqrt(complex *, complex *);
+ double sqrt(doublereal);
+ void c_div(complex *, complex *, complex *);
+
+ /* Local variables */
+ real b, t, s1, s2, scl, eps, tmp;
+ complex sine;
+ real test, zeta1, zeta2;
+ complex alpha;
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ real norma, absgam, absalp;
+ extern doublereal slamch_(char *);
+ complex cosine;
+ real absest;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAIC1 applies one step of incremental condition estimation in */
+/* its simplest version: */
+
+/* Let x, twonorm(x) = 1, be an approximate singular vector of an j-by-j */
+/* lower triangular matrix L, such that */
+/* twonorm(L*x) = sest */
+/* Then CLAIC1 computes sestpr, s, c such that */
+/* the vector */
+/* [ s*x ] */
+/* xhat = [ c ] */
+/* is an approximate singular vector of */
+/* [ L 0 ] */
+/* Lhat = [ w' gamma ] */
+/* in the sense that */
+/* twonorm(Lhat*xhat) = sestpr. */
+
+/* Depending on JOB, an estimate for the largest or smallest singular */
+/* value is computed. */
+
+/* Note that [s c]' and sestpr**2 is an eigenpair of the system */
+
+/* diag(sest*sest, 0) + [alpha gamma] * [ conjg(alpha) ] */
+/* [ conjg(gamma) ] */
+
+/* where alpha = conjg(x)'*w. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) INTEGER */
+/* = 1: an estimate for the largest singular value is computed. */
+/* = 2: an estimate for the smallest singular value is computed. */
+
+/* J (input) INTEGER */
+/* Length of X and W */
+
+/* X (input) COMPLEX array, dimension (J) */
+/* The j-vector x. */
+
+/* SEST (input) REAL */
+/* Estimated singular value of j by j matrix L */
+
+/* W (input) COMPLEX array, dimension (J) */
+/* The j-vector w. */
+
+/* GAMMA (input) COMPLEX */
+/* The diagonal element gamma. */
+
+/* SESTPR (output) REAL */
+/* Estimated singular value of (j+1) by (j+1) matrix Lhat. */
+
+/* S (output) COMPLEX */
+/* Sine needed in forming xhat. */
+
+/* C (output) COMPLEX */
+/* Cosine needed in forming xhat. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --w;
+ --x;
+
+ /* Function Body */
+ eps = slamch_("Epsilon");
+ cdotc_(&q__1, j, &x[1], &c__1, &w[1], &c__1);
+ alpha.r = q__1.r, alpha.i = q__1.i;
+
+ absalp = c_abs(&alpha);
+ absgam = c_abs(gamma);
+ absest = dabs(*sest);
+
+ if (*job == 1) {
+
+/* Estimating largest singular value */
+
+/* special cases */
+
+ if (*sest == 0.f) {
+ s1 = dmax(absgam,absalp);
+ if (s1 == 0.f) {
+ s->r = 0.f, s->i = 0.f;
+ c__->r = 1.f, c__->i = 0.f;
+ *sestpr = 0.f;
+ } else {
+ q__1.r = alpha.r / s1, q__1.i = alpha.i / s1;
+ s->r = q__1.r, s->i = q__1.i;
+ q__1.r = gamma->r / s1, q__1.i = gamma->i / s1;
+ c__->r = q__1.r, c__->i = q__1.i;
+ r_cnjg(&q__4, s);
+ q__3.r = s->r * q__4.r - s->i * q__4.i, q__3.i = s->r *
+ q__4.i + s->i * q__4.r;
+ r_cnjg(&q__6, c__);
+ q__5.r = c__->r * q__6.r - c__->i * q__6.i, q__5.i = c__->r *
+ q__6.i + c__->i * q__6.r;
+ q__2.r = q__3.r + q__5.r, q__2.i = q__3.i + q__5.i;
+ c_sqrt(&q__1, &q__2);
+ tmp = q__1.r;
+ q__1.r = s->r / tmp, q__1.i = s->i / tmp;
+ s->r = q__1.r, s->i = q__1.i;
+ q__1.r = c__->r / tmp, q__1.i = c__->i / tmp;
+ c__->r = q__1.r, c__->i = q__1.i;
+ *sestpr = s1 * tmp;
+ }
+ return 0;
+ } else if (absgam <= eps * absest) {
+ s->r = 1.f, s->i = 0.f;
+ c__->r = 0.f, c__->i = 0.f;
+ tmp = dmax(absest,absalp);
+ s1 = absest / tmp;
+ s2 = absalp / tmp;
+ *sestpr = tmp * sqrt(s1 * s1 + s2 * s2);
+ return 0;
+ } else if (absalp <= eps * absest) {
+ s1 = absgam;
+ s2 = absest;
+ if (s1 <= s2) {
+ s->r = 1.f, s->i = 0.f;
+ c__->r = 0.f, c__->i = 0.f;
+ *sestpr = s2;
+ } else {
+ s->r = 0.f, s->i = 0.f;
+ c__->r = 1.f, c__->i = 0.f;
+ *sestpr = s1;
+ }
+ return 0;
+ } else if (absest <= eps * absalp || absest <= eps * absgam) {
+ s1 = absgam;
+ s2 = absalp;
+ if (s1 <= s2) {
+ tmp = s1 / s2;
+ scl = sqrt(tmp * tmp + 1.f);
+ *sestpr = s2 * scl;
+ q__2.r = alpha.r / s2, q__2.i = alpha.i / s2;
+ q__1.r = q__2.r / scl, q__1.i = q__2.i / scl;
+ s->r = q__1.r, s->i = q__1.i;
+ q__2.r = gamma->r / s2, q__2.i = gamma->i / s2;
+ q__1.r = q__2.r / scl, q__1.i = q__2.i / scl;
+ c__->r = q__1.r, c__->i = q__1.i;
+ } else {
+ tmp = s2 / s1;
+ scl = sqrt(tmp * tmp + 1.f);
+ *sestpr = s1 * scl;
+ q__2.r = alpha.r / s1, q__2.i = alpha.i / s1;
+ q__1.r = q__2.r / scl, q__1.i = q__2.i / scl;
+ s->r = q__1.r, s->i = q__1.i;
+ q__2.r = gamma->r / s1, q__2.i = gamma->i / s1;
+ q__1.r = q__2.r / scl, q__1.i = q__2.i / scl;
+ c__->r = q__1.r, c__->i = q__1.i;
+ }
+ return 0;
+ } else {
+
+/* normal case */
+
+ zeta1 = absalp / absest;
+ zeta2 = absgam / absest;
+
+ b = (1.f - zeta1 * zeta1 - zeta2 * zeta2) * .5f;
+ r__1 = zeta1 * zeta1;
+ c__->r = r__1, c__->i = 0.f;
+ if (b > 0.f) {
+ r__1 = b * b;
+ q__4.r = r__1 + c__->r, q__4.i = c__->i;
+ c_sqrt(&q__3, &q__4);
+ q__2.r = b + q__3.r, q__2.i = q__3.i;
+ c_div(&q__1, c__, &q__2);
+ t = q__1.r;
+ } else {
+ r__1 = b * b;
+ q__3.r = r__1 + c__->r, q__3.i = c__->i;
+ c_sqrt(&q__2, &q__3);
+ q__1.r = q__2.r - b, q__1.i = q__2.i;
+ t = q__1.r;
+ }
+
+ q__3.r = alpha.r / absest, q__3.i = alpha.i / absest;
+ q__2.r = -q__3.r, q__2.i = -q__3.i;
+ q__1.r = q__2.r / t, q__1.i = q__2.i / t;
+ sine.r = q__1.r, sine.i = q__1.i;
+ q__3.r = gamma->r / absest, q__3.i = gamma->i / absest;
+ q__2.r = -q__3.r, q__2.i = -q__3.i;
+ r__1 = t + 1.f;
+ q__1.r = q__2.r / r__1, q__1.i = q__2.i / r__1;
+ cosine.r = q__1.r, cosine.i = q__1.i;
+ r_cnjg(&q__4, &sine);
+ q__3.r = sine.r * q__4.r - sine.i * q__4.i, q__3.i = sine.r *
+ q__4.i + sine.i * q__4.r;
+ r_cnjg(&q__6, &cosine);
+ q__5.r = cosine.r * q__6.r - cosine.i * q__6.i, q__5.i = cosine.r
+ * q__6.i + cosine.i * q__6.r;
+ q__2.r = q__3.r + q__5.r, q__2.i = q__3.i + q__5.i;
+ c_sqrt(&q__1, &q__2);
+ tmp = q__1.r;
+ q__1.r = sine.r / tmp, q__1.i = sine.i / tmp;
+ s->r = q__1.r, s->i = q__1.i;
+ q__1.r = cosine.r / tmp, q__1.i = cosine.i / tmp;
+ c__->r = q__1.r, c__->i = q__1.i;
+ *sestpr = sqrt(t + 1.f) * absest;
+ return 0;
+ }
+
+ } else if (*job == 2) {
+
+/* Estimating smallest singular value */
+
+/* special cases */
+
+ if (*sest == 0.f) {
+ *sestpr = 0.f;
+ if (dmax(absgam,absalp) == 0.f) {
+ sine.r = 1.f, sine.i = 0.f;
+ cosine.r = 0.f, cosine.i = 0.f;
+ } else {
+ r_cnjg(&q__2, gamma);
+ q__1.r = -q__2.r, q__1.i = -q__2.i;
+ sine.r = q__1.r, sine.i = q__1.i;
+ r_cnjg(&q__1, &alpha);
+ cosine.r = q__1.r, cosine.i = q__1.i;
+ }
+/* Computing MAX */
+ r__1 = c_abs(&sine), r__2 = c_abs(&cosine);
+ s1 = dmax(r__1,r__2);
+ q__1.r = sine.r / s1, q__1.i = sine.i / s1;
+ s->r = q__1.r, s->i = q__1.i;
+ q__1.r = cosine.r / s1, q__1.i = cosine.i / s1;
+ c__->r = q__1.r, c__->i = q__1.i;
+ r_cnjg(&q__4, s);
+ q__3.r = s->r * q__4.r - s->i * q__4.i, q__3.i = s->r * q__4.i +
+ s->i * q__4.r;
+ r_cnjg(&q__6, c__);
+ q__5.r = c__->r * q__6.r - c__->i * q__6.i, q__5.i = c__->r *
+ q__6.i + c__->i * q__6.r;
+ q__2.r = q__3.r + q__5.r, q__2.i = q__3.i + q__5.i;
+ c_sqrt(&q__1, &q__2);
+ tmp = q__1.r;
+ q__1.r = s->r / tmp, q__1.i = s->i / tmp;
+ s->r = q__1.r, s->i = q__1.i;
+ q__1.r = c__->r / tmp, q__1.i = c__->i / tmp;
+ c__->r = q__1.r, c__->i = q__1.i;
+ return 0;
+ } else if (absgam <= eps * absest) {
+ s->r = 0.f, s->i = 0.f;
+ c__->r = 1.f, c__->i = 0.f;
+ *sestpr = absgam;
+ return 0;
+ } else if (absalp <= eps * absest) {
+ s1 = absgam;
+ s2 = absest;
+ if (s1 <= s2) {
+ s->r = 0.f, s->i = 0.f;
+ c__->r = 1.f, c__->i = 0.f;
+ *sestpr = s1;
+ } else {
+ s->r = 1.f, s->i = 0.f;
+ c__->r = 0.f, c__->i = 0.f;
+ *sestpr = s2;
+ }
+ return 0;
+ } else if (absest <= eps * absalp || absest <= eps * absgam) {
+ s1 = absgam;
+ s2 = absalp;
+ if (s1 <= s2) {
+ tmp = s1 / s2;
+ scl = sqrt(tmp * tmp + 1.f);
+ *sestpr = absest * (tmp / scl);
+ r_cnjg(&q__4, gamma);
+ q__3.r = q__4.r / s2, q__3.i = q__4.i / s2;
+ q__2.r = -q__3.r, q__2.i = -q__3.i;
+ q__1.r = q__2.r / scl, q__1.i = q__2.i / scl;
+ s->r = q__1.r, s->i = q__1.i;
+ r_cnjg(&q__3, &alpha);
+ q__2.r = q__3.r / s2, q__2.i = q__3.i / s2;
+ q__1.r = q__2.r / scl, q__1.i = q__2.i / scl;
+ c__->r = q__1.r, c__->i = q__1.i;
+ } else {
+ tmp = s2 / s1;
+ scl = sqrt(tmp * tmp + 1.f);
+ *sestpr = absest / scl;
+ r_cnjg(&q__4, gamma);
+ q__3.r = q__4.r / s1, q__3.i = q__4.i / s1;
+ q__2.r = -q__3.r, q__2.i = -q__3.i;
+ q__1.r = q__2.r / scl, q__1.i = q__2.i / scl;
+ s->r = q__1.r, s->i = q__1.i;
+ r_cnjg(&q__3, &alpha);
+ q__2.r = q__3.r / s1, q__2.i = q__3.i / s1;
+ q__1.r = q__2.r / scl, q__1.i = q__2.i / scl;
+ c__->r = q__1.r, c__->i = q__1.i;
+ }
+ return 0;
+ } else {
+
+/* normal case */
+
+ zeta1 = absalp / absest;
+ zeta2 = absgam / absest;
+
+/* Computing MAX */
+ r__1 = zeta1 * zeta1 + 1.f + zeta1 * zeta2, r__2 = zeta1 * zeta2
+ + zeta2 * zeta2;
+ norma = dmax(r__1,r__2);
+
+/* See if root is closer to zero or to ONE */
+
+ test = (zeta1 - zeta2) * 2.f * (zeta1 + zeta2) + 1.f;
+ if (test >= 0.f) {
+
+/* root is close to zero, compute directly */
+
+ b = (zeta1 * zeta1 + zeta2 * zeta2 + 1.f) * .5f;
+ r__1 = zeta2 * zeta2;
+ c__->r = r__1, c__->i = 0.f;
+ r__2 = b * b;
+ q__2.r = r__2 - c__->r, q__2.i = -c__->i;
+ r__1 = b + sqrt(c_abs(&q__2));
+ q__1.r = c__->r / r__1, q__1.i = c__->i / r__1;
+ t = q__1.r;
+ q__2.r = alpha.r / absest, q__2.i = alpha.i / absest;
+ r__1 = 1.f - t;
+ q__1.r = q__2.r / r__1, q__1.i = q__2.i / r__1;
+ sine.r = q__1.r, sine.i = q__1.i;
+ q__3.r = gamma->r / absest, q__3.i = gamma->i / absest;
+ q__2.r = -q__3.r, q__2.i = -q__3.i;
+ q__1.r = q__2.r / t, q__1.i = q__2.i / t;
+ cosine.r = q__1.r, cosine.i = q__1.i;
+ *sestpr = sqrt(t + eps * 4.f * eps * norma) * absest;
+ } else {
+
+/* root is closer to ONE, shift by that amount */
+
+ b = (zeta2 * zeta2 + zeta1 * zeta1 - 1.f) * .5f;
+ r__1 = zeta1 * zeta1;
+ c__->r = r__1, c__->i = 0.f;
+ if (b >= 0.f) {
+ q__2.r = -c__->r, q__2.i = -c__->i;
+ r__1 = b * b;
+ q__5.r = r__1 + c__->r, q__5.i = c__->i;
+ c_sqrt(&q__4, &q__5);
+ q__3.r = b + q__4.r, q__3.i = q__4.i;
+ c_div(&q__1, &q__2, &q__3);
+ t = q__1.r;
+ } else {
+ r__1 = b * b;
+ q__3.r = r__1 + c__->r, q__3.i = c__->i;
+ c_sqrt(&q__2, &q__3);
+ q__1.r = b - q__2.r, q__1.i = -q__2.i;
+ t = q__1.r;
+ }
+ q__3.r = alpha.r / absest, q__3.i = alpha.i / absest;
+ q__2.r = -q__3.r, q__2.i = -q__3.i;
+ q__1.r = q__2.r / t, q__1.i = q__2.i / t;
+ sine.r = q__1.r, sine.i = q__1.i;
+ q__3.r = gamma->r / absest, q__3.i = gamma->i / absest;
+ q__2.r = -q__3.r, q__2.i = -q__3.i;
+ r__1 = t + 1.f;
+ q__1.r = q__2.r / r__1, q__1.i = q__2.i / r__1;
+ cosine.r = q__1.r, cosine.i = q__1.i;
+ *sestpr = sqrt(t + 1.f + eps * 4.f * eps * norma) * absest;
+ }
+ r_cnjg(&q__4, &sine);
+ q__3.r = sine.r * q__4.r - sine.i * q__4.i, q__3.i = sine.r *
+ q__4.i + sine.i * q__4.r;
+ r_cnjg(&q__6, &cosine);
+ q__5.r = cosine.r * q__6.r - cosine.i * q__6.i, q__5.i = cosine.r
+ * q__6.i + cosine.i * q__6.r;
+ q__2.r = q__3.r + q__5.r, q__2.i = q__3.i + q__5.i;
+ c_sqrt(&q__1, &q__2);
+ tmp = q__1.r;
+ q__1.r = sine.r / tmp, q__1.i = sine.i / tmp;
+ s->r = q__1.r, s->i = q__1.i;
+ q__1.r = cosine.r / tmp, q__1.i = cosine.i / tmp;
+ c__->r = q__1.r, c__->i = q__1.i;
+ return 0;
+
+ }
+ }
+ return 0;
+
+/* End of CLAIC1 */
+
+} /* claic1_ */
diff --git a/contrib/libs/clapack/clals0.c b/contrib/libs/clapack/clals0.c
new file mode 100644
index 0000000000..0e83601707
--- /dev/null
+++ b/contrib/libs/clapack/clals0.c
@@ -0,0 +1,558 @@
+/* clals0.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b5 = -1.f;
+static integer c__1 = 1;
+static real c_b13 = 1.f;
+static real c_b15 = 0.f;
+static integer c__0 = 0;
+
+/* Subroutine */ int clals0_(integer *icompq, integer *nl, integer *nr,
+ integer *sqre, integer *nrhs, complex *b, integer *ldb, complex *bx,
+ integer *ldbx, integer *perm, integer *givptr, integer *givcol,
+ integer *ldgcol, real *givnum, integer *ldgnum, real *poles, real *
+ difl, real *difr, real *z__, integer *k, real *c__, real *s, real *
+ rwork, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, difr_dim1, difr_offset, givnum_dim1,
+ givnum_offset, poles_dim1, poles_offset, b_dim1, b_offset,
+ bx_dim1, bx_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer i__, j, m, n;
+ real dj;
+ integer nlp1, jcol;
+ real temp;
+ integer jrow;
+ extern doublereal snrm2_(integer *, real *, integer *);
+ real diflj, difrj, dsigj;
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), sgemv_(char *, integer *, integer *, real *
+, real *, integer *, real *, integer *, real *, real *, integer *), csrot_(integer *, complex *, integer *, complex *,
+ integer *, real *, real *);
+ extern doublereal slamc3_(real *, real *);
+ extern /* Subroutine */ int clascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, complex *, integer *, integer *), csscal_(integer *, real *, complex *, integer *),
+ clacpy_(char *, integer *, integer *, complex *, integer *,
+ complex *, integer *), xerbla_(char *, integer *);
+ real dsigjp;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLALS0 applies back the multiplying factors of either the left or the */
+/* right singular vector matrix of a diagonal matrix appended by a row */
+/* to the right hand side matrix B in solving the least squares problem */
+/* using the divide-and-conquer SVD approach. */
+
+/* For the left singular vector matrix, three types of orthogonal */
+/* matrices are involved: */
+
+/* (1L) Givens rotations: the number of such rotations is GIVPTR; the */
+/* pairs of columns/rows they were applied to are stored in GIVCOL; */
+/* and the C- and S-values of these rotations are stored in GIVNUM. */
+
+/* (2L) Permutation. The (NL+1)-st row of B is to be moved to the first */
+/* row, and for J=2:N, PERM(J)-th row of B is to be moved to the */
+/* J-th row. */
+
+/* (3L) The left singular vector matrix of the remaining matrix. */
+
+/* For the right singular vector matrix, four types of orthogonal */
+/* matrices are involved: */
+
+/* (1R) The right singular vector matrix of the remaining matrix. */
+
+/* (2R) If SQRE = 1, one extra Givens rotation to generate the right */
+/* null space. */
+
+/* (3R) The inverse transformation of (2L). */
+
+/* (4R) The inverse transformation of (1L). */
+
+/* Arguments */
+/* ========= */
+
+/* ICOMPQ (input) INTEGER */
+/* Specifies whether singular vectors are to be computed in */
+/* factored form: */
+/* = 0: Left singular vector matrix. */
+/* = 1: Right singular vector matrix. */
+
+/* NL (input) INTEGER */
+/* The row dimension of the upper block. NL >= 1. */
+
+/* NR (input) INTEGER */
+/* The row dimension of the lower block. NR >= 1. */
+
+/* SQRE (input) INTEGER */
+/* = 0: the lower block is an NR-by-NR square matrix. */
+/* = 1: the lower block is an NR-by-(NR+1) rectangular matrix. */
+
+/* The bidiagonal matrix has row dimension N = NL + NR + 1, */
+/* and column dimension M = N + SQRE. */
+
+/* NRHS (input) INTEGER */
+/* The number of columns of B and BX. NRHS must be at least 1. */
+
+/* B (input/output) COMPLEX array, dimension ( LDB, NRHS ) */
+/* On input, B contains the right hand sides of the least */
+/* squares problem in rows 1 through M. On output, B contains */
+/* the solution X in rows 1 through N. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB must be at least */
+/* max(1,MAX( M, N ) ). */
+
+/* BX (workspace) COMPLEX array, dimension ( LDBX, NRHS ) */
+
+/* LDBX (input) INTEGER */
+/* The leading dimension of BX. */
+
+/* PERM (input) INTEGER array, dimension ( N ) */
+/* The permutations (from deflation and sorting) applied */
+/* to the two blocks. */
+
+/* GIVPTR (input) INTEGER */
+/* The number of Givens rotations which took place in this */
+/* subproblem. */
+
+/* GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 ) */
+/* Each pair of numbers indicates a pair of rows/columns */
+/* involved in a Givens rotation. */
+
+/* LDGCOL (input) INTEGER */
+/* The leading dimension of GIVCOL, must be at least N. */
+
+/* GIVNUM (input) REAL array, dimension ( LDGNUM, 2 ) */
+/* Each number indicates the C or S value used in the */
+/* corresponding Givens rotation. */
+
+/* LDGNUM (input) INTEGER */
+/* The leading dimension of arrays DIFR, POLES and */
+/* GIVNUM, must be at least K. */
+
+/* POLES (input) REAL array, dimension ( LDGNUM, 2 ) */
+/* On entry, POLES(1:K, 1) contains the new singular */
+/* values obtained from solving the secular equation, and */
+/* POLES(1:K, 2) is an array containing the poles in the secular */
+/* equation. */
+
+/* DIFL (input) REAL array, dimension ( K ). */
+/* On entry, DIFL(I) is the distance between I-th updated */
+/* (undeflated) singular value and the I-th (undeflated) old */
+/* singular value. */
+
+/* DIFR (input) REAL array, dimension ( LDGNUM, 2 ). */
+/* On entry, DIFR(I, 1) contains the distances between I-th */
+/* updated (undeflated) singular value and the I+1-th */
+/* (undeflated) old singular value. And DIFR(I, 2) is the */
+/* normalizing factor for the I-th right singular vector. */
+
+/* Z (input) REAL array, dimension ( K ) */
+/* Contain the components of the deflation-adjusted updating row */
+/* vector. */
+
+/* K (input) INTEGER */
+/* Contains the dimension of the non-deflated matrix, */
+/* This is the order of the related secular equation. 1 <= K <=N. */
+
+/* C (input) REAL */
+/* C contains garbage if SQRE =0 and the C-value of a Givens */
+/* rotation related to the right null space if SQRE = 1. */
+
+/* S (input) REAL */
+/* S contains garbage if SQRE =0 and the S-value of a Givens */
+/* rotation related to the right null space if SQRE = 1. */
+
+/* RWORK (workspace) REAL array, dimension */
+/* ( K*(1+NRHS) + 2*NRHS ) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Ren-Cang Li, Computer Science Division, University of */
+/* California at Berkeley, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ bx_dim1 = *ldbx;
+ bx_offset = 1 + bx_dim1;
+ bx -= bx_offset;
+ --perm;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ difr_dim1 = *ldgnum;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ poles_dim1 = *ldgnum;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ givnum_dim1 = *ldgnum;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ --difl;
+ --z__;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*nl < 1) {
+ *info = -2;
+ } else if (*nr < 1) {
+ *info = -3;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -4;
+ }
+
+ n = *nl + *nr + 1;
+
+ if (*nrhs < 1) {
+ *info = -5;
+ } else if (*ldb < n) {
+ *info = -7;
+ } else if (*ldbx < n) {
+ *info = -9;
+ } else if (*givptr < 0) {
+ *info = -11;
+ } else if (*ldgcol < n) {
+ *info = -13;
+ } else if (*ldgnum < n) {
+ *info = -15;
+ } else if (*k < 1) {
+ *info = -20;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CLALS0", &i__1);
+ return 0;
+ }
+
+ m = n + *sqre;
+ nlp1 = *nl + 1;
+
+ if (*icompq == 0) {
+
+/* Apply back orthogonal transformations from the left. */
+
+/* Step (1L): apply back the Givens rotations performed. */
+
+ i__1 = *givptr;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ csrot_(nrhs, &b[givcol[i__ + (givcol_dim1 << 1)] + b_dim1], ldb, &
+ b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[i__ +
+ (givnum_dim1 << 1)], &givnum[i__ + givnum_dim1]);
+/* L10: */
+ }
+
+/* Step (2L): permute rows of B. */
+
+ ccopy_(nrhs, &b[nlp1 + b_dim1], ldb, &bx[bx_dim1 + 1], ldbx);
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ ccopy_(nrhs, &b[perm[i__] + b_dim1], ldb, &bx[i__ + bx_dim1],
+ ldbx);
+/* L20: */
+ }
+
+/* Step (3L): apply the inverse of the left singular vector */
+/* matrix to BX. */
+
+ if (*k == 1) {
+ ccopy_(nrhs, &bx[bx_offset], ldbx, &b[b_offset], ldb);
+ if (z__[1] < 0.f) {
+ csscal_(nrhs, &c_b5, &b[b_offset], ldb);
+ }
+ } else {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ diflj = difl[j];
+ dj = poles[j + poles_dim1];
+ dsigj = -poles[j + (poles_dim1 << 1)];
+ if (j < *k) {
+ difrj = -difr[j + difr_dim1];
+ dsigjp = -poles[j + 1 + (poles_dim1 << 1)];
+ }
+ if (z__[j] == 0.f || poles[j + (poles_dim1 << 1)] == 0.f) {
+ rwork[j] = 0.f;
+ } else {
+ rwork[j] = -poles[j + (poles_dim1 << 1)] * z__[j] / diflj
+ / (poles[j + (poles_dim1 << 1)] + dj);
+ }
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (z__[i__] == 0.f || poles[i__ + (poles_dim1 << 1)] ==
+ 0.f) {
+ rwork[i__] = 0.f;
+ } else {
+ rwork[i__] = poles[i__ + (poles_dim1 << 1)] * z__[i__]
+ / (slamc3_(&poles[i__ + (poles_dim1 << 1)], &
+ dsigj) - diflj) / (poles[i__ + (poles_dim1 <<
+ 1)] + dj);
+ }
+/* L30: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ if (z__[i__] == 0.f || poles[i__ + (poles_dim1 << 1)] ==
+ 0.f) {
+ rwork[i__] = 0.f;
+ } else {
+ rwork[i__] = poles[i__ + (poles_dim1 << 1)] * z__[i__]
+ / (slamc3_(&poles[i__ + (poles_dim1 << 1)], &
+ dsigjp) + difrj) / (poles[i__ + (poles_dim1 <<
+ 1)] + dj);
+ }
+/* L40: */
+ }
+ rwork[1] = -1.f;
+ temp = snrm2_(k, &rwork[1], &c__1);
+
+/* Since B and BX are complex, the following call to SGEMV */
+/* is performed in two steps (real and imaginary parts). */
+
+/* CALL SGEMV( 'T', K, NRHS, ONE, BX, LDBX, WORK, 1, ZERO, */
+/* $ B( J, 1 ), LDB ) */
+
+ i__ = *k + (*nrhs << 1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = *k;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++i__;
+ i__4 = jrow + jcol * bx_dim1;
+ rwork[i__] = bx[i__4].r;
+/* L50: */
+ }
+/* L60: */
+ }
+ sgemv_("T", k, nrhs, &c_b13, &rwork[*k + 1 + (*nrhs << 1)], k,
+ &rwork[1], &c__1, &c_b15, &rwork[*k + 1], &c__1);
+ i__ = *k + (*nrhs << 1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = *k;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++i__;
+ rwork[i__] = r_imag(&bx[jrow + jcol * bx_dim1]);
+/* L70: */
+ }
+/* L80: */
+ }
+ sgemv_("T", k, nrhs, &c_b13, &rwork[*k + 1 + (*nrhs << 1)], k,
+ &rwork[1], &c__1, &c_b15, &rwork[*k + 1 + *nrhs], &
+ c__1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = j + jcol * b_dim1;
+ i__4 = jcol + *k;
+ i__5 = jcol + *k + *nrhs;
+ q__1.r = rwork[i__4], q__1.i = rwork[i__5];
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L90: */
+ }
+ clascl_("G", &c__0, &c__0, &temp, &c_b13, &c__1, nrhs, &b[j +
+ b_dim1], ldb, info);
+/* L100: */
+ }
+ }
+
+/* Move the deflated rows of BX to B also. */
+
+ if (*k < max(m,n)) {
+ i__1 = n - *k;
+ clacpy_("A", &i__1, nrhs, &bx[*k + 1 + bx_dim1], ldbx, &b[*k + 1
+ + b_dim1], ldb);
+ }
+ } else {
+
+/* Apply back the right orthogonal transformations. */
+
+/* Step (1R): apply back the new right singular vector matrix */
+/* to B. */
+
+ if (*k == 1) {
+ ccopy_(nrhs, &b[b_offset], ldb, &bx[bx_offset], ldbx);
+ } else {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dsigj = poles[j + (poles_dim1 << 1)];
+ if (z__[j] == 0.f) {
+ rwork[j] = 0.f;
+ } else {
+ rwork[j] = -z__[j] / difl[j] / (dsigj + poles[j +
+ poles_dim1]) / difr[j + (difr_dim1 << 1)];
+ }
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (z__[j] == 0.f) {
+ rwork[i__] = 0.f;
+ } else {
+ r__1 = -poles[i__ + 1 + (poles_dim1 << 1)];
+ rwork[i__] = z__[j] / (slamc3_(&dsigj, &r__1) - difr[
+ i__ + difr_dim1]) / (dsigj + poles[i__ +
+ poles_dim1]) / difr[i__ + (difr_dim1 << 1)];
+ }
+/* L110: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ if (z__[j] == 0.f) {
+ rwork[i__] = 0.f;
+ } else {
+ r__1 = -poles[i__ + (poles_dim1 << 1)];
+ rwork[i__] = z__[j] / (slamc3_(&dsigj, &r__1) - difl[
+ i__]) / (dsigj + poles[i__ + poles_dim1]) /
+ difr[i__ + (difr_dim1 << 1)];
+ }
+/* L120: */
+ }
+
+/* Since B and BX are complex, the following call to SGEMV */
+/* is performed in two steps (real and imaginary parts). */
+
+/* CALL SGEMV( 'T', K, NRHS, ONE, B, LDB, WORK, 1, ZERO, */
+/* $ BX( J, 1 ), LDBX ) */
+
+ i__ = *k + (*nrhs << 1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = *k;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++i__;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[i__] = b[i__4].r;
+/* L130: */
+ }
+/* L140: */
+ }
+ sgemv_("T", k, nrhs, &c_b13, &rwork[*k + 1 + (*nrhs << 1)], k,
+ &rwork[1], &c__1, &c_b15, &rwork[*k + 1], &c__1);
+ i__ = *k + (*nrhs << 1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = *k;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++i__;
+ rwork[i__] = r_imag(&b[jrow + jcol * b_dim1]);
+/* L150: */
+ }
+/* L160: */
+ }
+ sgemv_("T", k, nrhs, &c_b13, &rwork[*k + 1 + (*nrhs << 1)], k,
+ &rwork[1], &c__1, &c_b15, &rwork[*k + 1 + *nrhs], &
+ c__1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = j + jcol * bx_dim1;
+ i__4 = jcol + *k;
+ i__5 = jcol + *k + *nrhs;
+ q__1.r = rwork[i__4], q__1.i = rwork[i__5];
+ bx[i__3].r = q__1.r, bx[i__3].i = q__1.i;
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+
+/* Step (2R): if SQRE = 1, apply back the rotation that is */
+/* related to the right null space of the subproblem. */
+
+ if (*sqre == 1) {
+ ccopy_(nrhs, &b[m + b_dim1], ldb, &bx[m + bx_dim1], ldbx);
+ csrot_(nrhs, &bx[bx_dim1 + 1], ldbx, &bx[m + bx_dim1], ldbx, c__,
+ s);
+ }
+ if (*k < max(m,n)) {
+ i__1 = n - *k;
+ clacpy_("A", &i__1, nrhs, &b[*k + 1 + b_dim1], ldb, &bx[*k + 1 +
+ bx_dim1], ldbx);
+ }
+
+/* Step (3R): permute rows of B. */
+
+ ccopy_(nrhs, &bx[bx_dim1 + 1], ldbx, &b[nlp1 + b_dim1], ldb);
+ if (*sqre == 1) {
+ ccopy_(nrhs, &bx[m + bx_dim1], ldbx, &b[m + b_dim1], ldb);
+ }
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ ccopy_(nrhs, &bx[i__ + bx_dim1], ldbx, &b[perm[i__] + b_dim1],
+ ldb);
+/* L190: */
+ }
+
+/* Step (4R): apply back the Givens rotations performed. */
+
+ for (i__ = *givptr; i__ >= 1; --i__) {
+ r__1 = -givnum[i__ + givnum_dim1];
+ csrot_(nrhs, &b[givcol[i__ + (givcol_dim1 << 1)] + b_dim1], ldb, &
+ b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[i__ +
+ (givnum_dim1 << 1)], &r__1);
+/* L200: */
+ }
+ }
+
+ return 0;
+
+/* End of CLALS0 */
+
+} /* clals0_ */
diff --git a/contrib/libs/clapack/clalsa.c b/contrib/libs/clapack/clalsa.c
new file mode 100644
index 0000000000..ca12b37cdc
--- /dev/null
+++ b/contrib/libs/clapack/clalsa.c
@@ -0,0 +1,663 @@
+/* clalsa.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b9 = 1.f;
+static real c_b10 = 0.f;
+static integer c__2 = 2;
+
+/* Subroutine */ int clalsa_(integer *icompq, integer *smlsiz, integer *n,
+ integer *nrhs, complex *b, integer *ldb, complex *bx, integer *ldbx,
+ real *u, integer *ldu, real *vt, integer *k, real *difl, real *difr,
+ real *z__, real *poles, integer *givptr, integer *givcol, integer *
+ ldgcol, integer *perm, real *givnum, real *c__, real *s, real *rwork,
+ integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, perm_dim1, perm_offset, difl_dim1,
+ difl_offset, difr_dim1, difr_offset, givnum_dim1, givnum_offset,
+ poles_dim1, poles_offset, u_dim1, u_offset, vt_dim1, vt_offset,
+ z_dim1, z_offset, b_dim1, b_offset, bx_dim1, bx_offset, i__1,
+ i__2, i__3, i__4, i__5, i__6;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ integer i__, j, i1, ic, lf, nd, ll, nl, nr, im1, nlf, nrf, lvl, ndb1,
+ nlp1, lvl2, nrp1, jcol, nlvl, sqre, jrow, jimag, jreal, inode,
+ ndiml;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ integer ndimr;
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), clals0_(integer *, integer *, integer *,
+ integer *, integer *, complex *, integer *, complex *, integer *,
+ integer *, integer *, integer *, integer *, real *, integer *,
+ real *, real *, real *, real *, integer *, real *, real *, real *,
+ integer *), xerbla_(char *, integer *), slasdt_(integer *
+, integer *, integer *, integer *, integer *, integer *, integer *
+);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLALSA is an itermediate step in solving the least squares problem */
+/* by computing the SVD of the coefficient matrix in compact form (The */
+/* singular vectors are computed as products of simple orthorgonal */
+/* matrices.). */
+
+/* If ICOMPQ = 0, CLALSA applies the inverse of the left singular vector */
+/* matrix of an upper bidiagonal matrix to the right hand side; and if */
+/* ICOMPQ = 1, CLALSA applies the right singular vector matrix to the */
+/* right hand side. The singular vector matrices were generated in */
+/* compact form by CLALSA. */
+
+/* Arguments */
+/* ========= */
+
+/* ICOMPQ (input) INTEGER */
+/* Specifies whether the left or the right singular vector */
+/* matrix is involved. */
+/* = 0: Left singular vector matrix */
+/* = 1: Right singular vector matrix */
+
+/* SMLSIZ (input) INTEGER */
+/* The maximum size of the subproblems at the bottom of the */
+/* computation tree. */
+
+/* N (input) INTEGER */
+/* The row and column dimensions of the upper bidiagonal matrix. */
+
+/* NRHS (input) INTEGER */
+/* The number of columns of B and BX. NRHS must be at least 1. */
+
+/* B (input/output) COMPLEX array, dimension ( LDB, NRHS ) */
+/* On input, B contains the right hand sides of the least */
+/* squares problem in rows 1 through M. */
+/* On output, B contains the solution X in rows 1 through N. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B in the calling subprogram. */
+/* LDB must be at least max(1,MAX( M, N ) ). */
+
+/* BX (output) COMPLEX array, dimension ( LDBX, NRHS ) */
+/* On exit, the result of applying the left or right singular */
+/* vector matrix to B. */
+
+/* LDBX (input) INTEGER */
+/* The leading dimension of BX. */
+
+/* U (input) REAL array, dimension ( LDU, SMLSIZ ). */
+/* On entry, U contains the left singular vector matrices of all */
+/* subproblems at the bottom level. */
+
+/* LDU (input) INTEGER, LDU = > N. */
+/* The leading dimension of arrays U, VT, DIFL, DIFR, */
+/* POLES, GIVNUM, and Z. */
+
+/* VT (input) REAL array, dimension ( LDU, SMLSIZ+1 ). */
+/* On entry, VT' contains the right singular vector matrices of */
+/* all subproblems at the bottom level. */
+
+/* K (input) INTEGER array, dimension ( N ). */
+
+/* DIFL (input) REAL array, dimension ( LDU, NLVL ). */
+/* where NLVL = INT(log_2 (N/(SMLSIZ+1))) + 1. */
+
+/* DIFR (input) REAL array, dimension ( LDU, 2 * NLVL ). */
+/* On entry, DIFL(*, I) and DIFR(*, 2 * I -1) record */
+/* distances between singular values on the I-th level and */
+/* singular values on the (I -1)-th level, and DIFR(*, 2 * I) */
+/* record the normalizing factors of the right singular vectors */
+/* matrices of subproblems on I-th level. */
+
+/* Z (input) REAL array, dimension ( LDU, NLVL ). */
+/* On entry, Z(1, I) contains the components of the deflation- */
+/* adjusted updating row vector for subproblems on the I-th */
+/* level. */
+
+/* POLES (input) REAL array, dimension ( LDU, 2 * NLVL ). */
+/* On entry, POLES(*, 2 * I -1: 2 * I) contains the new and old */
+/* singular values involved in the secular equations on the I-th */
+/* level. */
+
+/* GIVPTR (input) INTEGER array, dimension ( N ). */
+/* On entry, GIVPTR( I ) records the number of Givens */
+/* rotations performed on the I-th problem on the computation */
+/* tree. */
+
+/* GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 * NLVL ). */
+/* On entry, for each I, GIVCOL(*, 2 * I - 1: 2 * I) records the */
+/* locations of Givens rotations performed on the I-th level on */
+/* the computation tree. */
+
+/* LDGCOL (input) INTEGER, LDGCOL = > N. */
+/* The leading dimension of arrays GIVCOL and PERM. */
+
+/* PERM (input) INTEGER array, dimension ( LDGCOL, NLVL ). */
+/* On entry, PERM(*, I) records permutations done on the I-th */
+/* level of the computation tree. */
+
+/* GIVNUM (input) REAL array, dimension ( LDU, 2 * NLVL ). */
+/* On entry, GIVNUM(*, 2 *I -1 : 2 * I) records the C- and S- */
+/* values of Givens rotations performed on the I-th level on the */
+/* computation tree. */
+
+/* C (input) REAL array, dimension ( N ). */
+/* On entry, if the I-th subproblem is not square, */
+/* C( I ) contains the C-value of a Givens rotation related to */
+/* the right null space of the I-th subproblem. */
+
+/* S (input) REAL array, dimension ( N ). */
+/* On entry, if the I-th subproblem is not square, */
+/* S( I ) contains the S-value of a Givens rotation related to */
+/* the right null space of the I-th subproblem. */
+
+/* RWORK (workspace) REAL array, dimension at least */
+/* max ( N, (SMLSZ+1)*NRHS*3 ). */
+
+/* IWORK (workspace) INTEGER array. */
+/* The dimension must be at least 3 * N */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Ren-Cang Li, Computer Science Division, University of */
+/* California at Berkeley, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ bx_dim1 = *ldbx;
+ bx_offset = 1 + bx_dim1;
+ bx -= bx_offset;
+ givnum_dim1 = *ldu;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ poles_dim1 = *ldu;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ z_dim1 = *ldu;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ difr_dim1 = *ldu;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ difl_dim1 = *ldu;
+ difl_offset = 1 + difl_dim1;
+ difl -= difl_offset;
+ vt_dim1 = *ldu;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ --k;
+ --givptr;
+ perm_dim1 = *ldgcol;
+ perm_offset = 1 + perm_dim1;
+ perm -= perm_offset;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ --c__;
+ --s;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*smlsiz < 3) {
+ *info = -2;
+ } else if (*n < *smlsiz) {
+ *info = -3;
+ } else if (*nrhs < 1) {
+ *info = -4;
+ } else if (*ldb < *n) {
+ *info = -6;
+ } else if (*ldbx < *n) {
+ *info = -8;
+ } else if (*ldu < *n) {
+ *info = -10;
+ } else if (*ldgcol < *n) {
+ *info = -19;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CLALSA", &i__1);
+ return 0;
+ }
+
+/* Book-keeping and setting up the computation tree. */
+
+ inode = 1;
+ ndiml = inode + *n;
+ ndimr = ndiml + *n;
+
+ slasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr],
+ smlsiz);
+
+/* The following code applies back the left singular vector factors. */
+/* For applying back the right singular vector factors, go to 170. */
+
+ if (*icompq == 1) {
+ goto L170;
+ }
+
+/* The nodes on the bottom level of the tree were solved */
+/* by SLASDQ. The corresponding left and right singular vector */
+/* matrices are in explicit form. First apply back the left */
+/* singular vector matrices. */
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+
+/* IC : center row of each node */
+/* NL : number of rows of left subproblem */
+/* NR : number of rows of right subproblem */
+/* NLF: starting row of the left subproblem */
+/* NRF: starting row of the right subproblem */
+
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nr = iwork[ndimr + i1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+
+/* Since B and BX are complex, the following call to SGEMM */
+/* is performed in two steps (real and imaginary parts). */
+
+/* CALL SGEMM( 'T', 'N', NL, NRHS, NL, ONE, U( NLF, 1 ), LDU, */
+/* $ B( NLF, 1 ), LDB, ZERO, BX( NLF, 1 ), LDBX ) */
+
+ j = nl * *nrhs << 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nl - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++j;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__4].r;
+/* L10: */
+ }
+/* L20: */
+ }
+ sgemm_("T", "N", &nl, nrhs, &nl, &c_b9, &u[nlf + u_dim1], ldu, &rwork[
+ (nl * *nrhs << 1) + 1], &nl, &c_b10, &rwork[1], &nl);
+ j = nl * *nrhs << 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nl - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++j;
+ rwork[j] = r_imag(&b[jrow + jcol * b_dim1]);
+/* L30: */
+ }
+/* L40: */
+ }
+ sgemm_("T", "N", &nl, nrhs, &nl, &c_b9, &u[nlf + u_dim1], ldu, &rwork[
+ (nl * *nrhs << 1) + 1], &nl, &c_b10, &rwork[nl * *nrhs + 1], &
+ nl);
+ jreal = 0;
+ jimag = nl * *nrhs;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nl - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * bx_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ q__1.r = rwork[i__5], q__1.i = rwork[i__6];
+ bx[i__4].r = q__1.r, bx[i__4].i = q__1.i;
+/* L50: */
+ }
+/* L60: */
+ }
+
+/* Since B and BX are complex, the following call to SGEMM */
+/* is performed in two steps (real and imaginary parts). */
+
+/* CALL SGEMM( 'T', 'N', NR, NRHS, NR, ONE, U( NRF, 1 ), LDU, */
+/* $ B( NRF, 1 ), LDB, ZERO, BX( NRF, 1 ), LDBX ) */
+
+ j = nr * *nrhs << 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nr - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++j;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__4].r;
+/* L70: */
+ }
+/* L80: */
+ }
+ sgemm_("T", "N", &nr, nrhs, &nr, &c_b9, &u[nrf + u_dim1], ldu, &rwork[
+ (nr * *nrhs << 1) + 1], &nr, &c_b10, &rwork[1], &nr);
+ j = nr * *nrhs << 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nr - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++j;
+ rwork[j] = r_imag(&b[jrow + jcol * b_dim1]);
+/* L90: */
+ }
+/* L100: */
+ }
+ sgemm_("T", "N", &nr, nrhs, &nr, &c_b9, &u[nrf + u_dim1], ldu, &rwork[
+ (nr * *nrhs << 1) + 1], &nr, &c_b10, &rwork[nr * *nrhs + 1], &
+ nr);
+ jreal = 0;
+ jimag = nr * *nrhs;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nr - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * bx_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ q__1.r = rwork[i__5], q__1.i = rwork[i__6];
+ bx[i__4].r = q__1.r, bx[i__4].i = q__1.i;
+/* L110: */
+ }
+/* L120: */
+ }
+
+/* L130: */
+ }
+
+/* Next copy the rows of B that correspond to unchanged rows */
+/* in the bidiagonal matrix to BX. */
+
+ i__1 = nd;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ic = iwork[inode + i__ - 1];
+ ccopy_(nrhs, &b[ic + b_dim1], ldb, &bx[ic + bx_dim1], ldbx);
+/* L140: */
+ }
+
+/* Finally go through the left singular vector matrices of all */
+/* the other subproblems bottom-up on the tree. */
+
+ j = pow_ii(&c__2, &nlvl);
+ sqre = 0;
+
+ for (lvl = nlvl; lvl >= 1; --lvl) {
+ lvl2 = (lvl << 1) - 1;
+
+/* find the first node LF and last node LL on */
+/* the current level LVL */
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__1 = lvl - 1;
+ lf = pow_ii(&c__2, &i__1);
+ ll = (lf << 1) - 1;
+ }
+ i__1 = ll;
+ for (i__ = lf; i__ <= i__1; ++i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ --j;
+ clals0_(icompq, &nl, &nr, &sqre, nrhs, &bx[nlf + bx_dim1], ldbx, &
+ b[nlf + b_dim1], ldb, &perm[nlf + lvl * perm_dim1], &
+ givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &
+ givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 *
+ poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf +
+ lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[
+ j], &s[j], &rwork[1], info);
+/* L150: */
+ }
+/* L160: */
+ }
+ goto L330;
+
+/* ICOMPQ = 1: applying back the right singular vector factors. */
+
+L170:
+
+/* First now go through the right singular vector matrices of all */
+/* the tree nodes top-down. */
+
+ j = 0;
+ i__1 = nlvl;
+ for (lvl = 1; lvl <= i__1; ++lvl) {
+ lvl2 = (lvl << 1) - 1;
+
+/* Find the first node LF and last node LL on */
+/* the current level LVL. */
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__2 = lvl - 1;
+ lf = pow_ii(&c__2, &i__2);
+ ll = (lf << 1) - 1;
+ }
+ i__2 = lf;
+ for (i__ = ll; i__ >= i__2; --i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ if (i__ == ll) {
+ sqre = 0;
+ } else {
+ sqre = 1;
+ }
+ ++j;
+ clals0_(icompq, &nl, &nr, &sqre, nrhs, &b[nlf + b_dim1], ldb, &bx[
+ nlf + bx_dim1], ldbx, &perm[nlf + lvl * perm_dim1], &
+ givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &
+ givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 *
+ poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf +
+ lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[
+ j], &s[j], &rwork[1], info);
+/* L180: */
+ }
+/* L190: */
+ }
+
+/* The nodes on the bottom level of the tree were solved */
+/* by SLASDQ. The corresponding right singular vector */
+/* matrices are in explicit form. Apply them back. */
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nr = iwork[ndimr + i1];
+ nlp1 = nl + 1;
+ if (i__ == nd) {
+ nrp1 = nr;
+ } else {
+ nrp1 = nr + 1;
+ }
+ nlf = ic - nl;
+ nrf = ic + 1;
+
+/* Since B and BX are complex, the following call to SGEMM is */
+/* performed in two steps (real and imaginary parts). */
+
+/* CALL SGEMM( 'T', 'N', NLP1, NRHS, NLP1, ONE, VT( NLF, 1 ), LDU, */
+/* $ B( NLF, 1 ), LDB, ZERO, BX( NLF, 1 ), LDBX ) */
+
+ j = nlp1 * *nrhs << 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nlp1 - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++j;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__4].r;
+/* L200: */
+ }
+/* L210: */
+ }
+ sgemm_("T", "N", &nlp1, nrhs, &nlp1, &c_b9, &vt[nlf + vt_dim1], ldu, &
+ rwork[(nlp1 * *nrhs << 1) + 1], &nlp1, &c_b10, &rwork[1], &
+ nlp1);
+ j = nlp1 * *nrhs << 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nlp1 - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++j;
+ rwork[j] = r_imag(&b[jrow + jcol * b_dim1]);
+/* L220: */
+ }
+/* L230: */
+ }
+ sgemm_("T", "N", &nlp1, nrhs, &nlp1, &c_b9, &vt[nlf + vt_dim1], ldu, &
+ rwork[(nlp1 * *nrhs << 1) + 1], &nlp1, &c_b10, &rwork[nlp1 * *
+ nrhs + 1], &nlp1);
+ jreal = 0;
+ jimag = nlp1 * *nrhs;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nlp1 - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * bx_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ q__1.r = rwork[i__5], q__1.i = rwork[i__6];
+ bx[i__4].r = q__1.r, bx[i__4].i = q__1.i;
+/* L240: */
+ }
+/* L250: */
+ }
+
+/* Since B and BX are complex, the following call to SGEMM is */
+/* performed in two steps (real and imaginary parts). */
+
+/* CALL SGEMM( 'T', 'N', NRP1, NRHS, NRP1, ONE, VT( NRF, 1 ), LDU, */
+/* $ B( NRF, 1 ), LDB, ZERO, BX( NRF, 1 ), LDBX ) */
+
+ j = nrp1 * *nrhs << 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nrp1 - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++j;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__4].r;
+/* L260: */
+ }
+/* L270: */
+ }
+ sgemm_("T", "N", &nrp1, nrhs, &nrp1, &c_b9, &vt[nrf + vt_dim1], ldu, &
+ rwork[(nrp1 * *nrhs << 1) + 1], &nrp1, &c_b10, &rwork[1], &
+ nrp1);
+ j = nrp1 * *nrhs << 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nrp1 - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++j;
+ rwork[j] = r_imag(&b[jrow + jcol * b_dim1]);
+/* L280: */
+ }
+/* L290: */
+ }
+ sgemm_("T", "N", &nrp1, nrhs, &nrp1, &c_b9, &vt[nrf + vt_dim1], ldu, &
+ rwork[(nrp1 * *nrhs << 1) + 1], &nrp1, &c_b10, &rwork[nrp1 * *
+ nrhs + 1], &nrp1);
+ jreal = 0;
+ jimag = nrp1 * *nrhs;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nrp1 - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * bx_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ q__1.r = rwork[i__5], q__1.i = rwork[i__6];
+ bx[i__4].r = q__1.r, bx[i__4].i = q__1.i;
+/* L300: */
+ }
+/* L310: */
+ }
+
+/* L320: */
+ }
+
+L330:
+
+ return 0;
+
+/* End of CLALSA */
+
+} /* clalsa_ */
diff --git a/contrib/libs/clapack/clalsd.c b/contrib/libs/clapack/clalsd.c
new file mode 100644
index 0000000000..0ae828fc8d
--- /dev/null
+++ b/contrib/libs/clapack/clalsd.c
@@ -0,0 +1,755 @@
+/* clalsd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static integer c__1 = 1;
+static integer c__0 = 0;
+static real c_b10 = 1.f;
+static real c_b35 = 0.f;
+
+/* Subroutine */ int clalsd_(char *uplo, integer *smlsiz, integer *n, integer
+ *nrhs, real *d__, real *e, complex *b, integer *ldb, real *rcond,
+ integer *rank, complex *work, real *rwork, integer *iwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ real r__1;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *), log(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ integer c__, i__, j, k;
+ real r__;
+ integer s, u, z__;
+ real cs;
+ integer bx;
+ real sn;
+ integer st, vt, nm1, st1;
+ real eps;
+ integer iwk;
+ real tol;
+ integer difl, difr;
+ real rcnd;
+ integer jcol, irwb, perm, nsub, nlvl, sqre, bxst, jrow, irwu, jimag,
+ jreal;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ integer irwib;
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *);
+ integer poles, sizei, irwrb, nsize;
+ extern /* Subroutine */ int csrot_(integer *, complex *, integer *,
+ complex *, integer *, real *, real *);
+ integer irwvt, icmpq1, icmpq2;
+ extern /* Subroutine */ int clalsa_(integer *, integer *, integer *,
+ integer *, complex *, integer *, complex *, integer *, real *,
+ integer *, real *, integer *, real *, real *, real *, real *,
+ integer *, integer *, integer *, integer *, real *, real *, real *
+, real *, integer *, integer *), clascl_(char *, integer *,
+ integer *, real *, real *, integer *, integer *, complex *,
+ integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int slasda_(integer *, integer *, integer *,
+ integer *, real *, real *, real *, integer *, real *, integer *,
+ real *, real *, real *, real *, integer *, integer *, integer *,
+ integer *, real *, real *, real *, real *, integer *, integer *),
+ clacpy_(char *, integer *, integer *, complex *, integer *,
+ complex *, integer *), claset_(char *, integer *, integer
+ *, complex *, complex *, complex *, integer *), xerbla_(
+ char *, integer *), slascl_(char *, integer *, integer *,
+ real *, real *, integer *, integer *, real *, integer *, integer *
+);
+ extern integer isamax_(integer *, real *, integer *);
+ integer givcol;
+ extern /* Subroutine */ int slasdq_(char *, integer *, integer *, integer
+ *, integer *, integer *, real *, real *, real *, integer *, real *
+, integer *, real *, integer *, real *, integer *),
+ slaset_(char *, integer *, integer *, real *, real *, real *,
+ integer *), slartg_(real *, real *, real *, real *, real *
+);
+ real orgnrm;
+ integer givnum;
+ extern doublereal slanst_(char *, integer *, real *, real *);
+ extern /* Subroutine */ int slasrt_(char *, integer *, real *, integer *);
+ integer givptr, nrwork, irwwrk, smlszp;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLALSD uses the singular value decomposition of A to solve the least */
+/* squares problem of finding X to minimize the Euclidean norm of each */
+/* column of A*X-B, where A is N-by-N upper bidiagonal, and X and B */
+/* are N-by-NRHS. The solution X overwrites B. */
+
+/* The singular values of A smaller than RCOND times the largest */
+/* singular value are treated as zero in solving the least squares */
+/* problem; in this case a minimum norm solution is returned. */
+/* The actual singular values are returned in D in ascending order. */
+
+/* This code makes very mild assumptions about floating point */
+/* arithmetic. It will work on machines with a guard digit in */
+/* add/subtract, or on those binary machines without guard digits */
+/* which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2. */
+/* It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': D and E define an upper bidiagonal matrix. */
+/* = 'L': D and E define a lower bidiagonal matrix. */
+
+/* SMLSIZ (input) INTEGER */
+/* The maximum size of the subproblems at the bottom of the */
+/* computation tree. */
+
+/* N (input) INTEGER */
+/* The dimension of the bidiagonal matrix. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of columns of B. NRHS must be at least 1. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry D contains the main diagonal of the bidiagonal */
+/* matrix. On exit, if INFO = 0, D contains its singular values. */
+
+/* E (input/output) REAL array, dimension (N-1) */
+/* Contains the super-diagonal entries of the bidiagonal matrix. */
+/* On exit, E has been destroyed. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On input, B contains the right hand sides of the least */
+/* squares problem. On output, B contains the solution X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B in the calling subprogram. */
+/* LDB must be at least max(1,N). */
+
+/* RCOND (input) REAL */
+/* The singular values of A less than or equal to RCOND times */
+/* the largest singular value are treated as zero in solving */
+/* the least squares problem. If RCOND is negative, */
+/* machine precision is used instead. */
+/* For example, if diag(S)*X=B were the least squares problem, */
+/* where diag(S) is a diagonal matrix of singular values, the */
+/* solution would be X(i) = B(i) / S(i) if S(i) is greater than */
+/* RCOND*max(S), and X(i) = 0 if S(i) is less than or equal to */
+/* RCOND*max(S). */
+
+/* RANK (output) INTEGER */
+/* The number of singular values of A greater than RCOND times */
+/* the largest singular value. */
+
+/* WORK (workspace) COMPLEX array, dimension (N * NRHS). */
+
+/* RWORK (workspace) REAL array, dimension at least */
+/* (9*N + 2*N*SMLSIZ + 8*N*NLVL + 3*SMLSIZ*NRHS + (SMLSIZ+1)**2), */
+/* where */
+/* NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 ) */
+
+/* IWORK (workspace) INTEGER array, dimension (3*N*NLVL + 11*N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: The algorithm failed to compute an singular value while */
+/* working on the submatrix lying in rows and columns */
+/* INFO/(N+1) through MOD(INFO,N+1). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Ren-Cang Li, Computer Science Division, University of */
+/* California at Berkeley, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 1) {
+ *info = -4;
+ } else if (*ldb < 1 || *ldb < *n) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CLALSD", &i__1);
+ return 0;
+ }
+
+ eps = slamch_("Epsilon");
+
+/* Set up the tolerance. */
+
+ if (*rcond <= 0.f || *rcond >= 1.f) {
+ rcnd = eps;
+ } else {
+ rcnd = *rcond;
+ }
+
+ *rank = 0;
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ } else if (*n == 1) {
+ if (d__[1] == 0.f) {
+ claset_("A", &c__1, nrhs, &c_b1, &c_b1, &b[b_offset], ldb);
+ } else {
+ *rank = 1;
+ clascl_("G", &c__0, &c__0, &d__[1], &c_b10, &c__1, nrhs, &b[
+ b_offset], ldb, info);
+ d__[1] = dabs(d__[1]);
+ }
+ return 0;
+ }
+
+/* Rotate the matrix if it is lower bidiagonal. */
+
+ if (*(unsigned char *)uplo == 'L') {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ slartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (*nrhs == 1) {
+ csrot_(&c__1, &b[i__ + b_dim1], &c__1, &b[i__ + 1 + b_dim1], &
+ c__1, &cs, &sn);
+ } else {
+ rwork[(i__ << 1) - 1] = cs;
+ rwork[i__ * 2] = sn;
+ }
+/* L10: */
+ }
+ if (*nrhs > 1) {
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *n - 1;
+ for (j = 1; j <= i__2; ++j) {
+ cs = rwork[(j << 1) - 1];
+ sn = rwork[j * 2];
+ csrot_(&c__1, &b[j + i__ * b_dim1], &c__1, &b[j + 1 + i__
+ * b_dim1], &c__1, &cs, &sn);
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+ }
+
+/* Scale. */
+
+ nm1 = *n - 1;
+ orgnrm = slanst_("M", n, &d__[1], &e[1]);
+ if (orgnrm == 0.f) {
+ claset_("A", n, nrhs, &c_b1, &c_b1, &b[b_offset], ldb);
+ return 0;
+ }
+
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b10, n, &c__1, &d__[1], n, info);
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b10, &nm1, &c__1, &e[1], &nm1,
+ info);
+
+/* If N is smaller than the minimum divide size SMLSIZ, then solve */
+/* the problem with another solver. */
+
+ if (*n <= *smlsiz) {
+ irwu = 1;
+ irwvt = irwu + *n * *n;
+ irwwrk = irwvt + *n * *n;
+ irwrb = irwwrk;
+ irwib = irwrb + *n * *nrhs;
+ irwb = irwib + *n * *nrhs;
+ slaset_("A", n, n, &c_b35, &c_b10, &rwork[irwu], n);
+ slaset_("A", n, n, &c_b35, &c_b10, &rwork[irwvt], n);
+ slasdq_("U", &c__0, n, n, n, &c__0, &d__[1], &e[1], &rwork[irwvt], n,
+ &rwork[irwu], n, &rwork[irwwrk], &c__1, &rwork[irwwrk], info);
+ if (*info != 0) {
+ return 0;
+ }
+
+/* In the real version, B is passed to SLASDQ and multiplied */
+/* internally by Q'. Here B is complex and that product is */
+/* computed below in two steps (real and imaginary parts). */
+
+ j = irwb - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++j;
+ i__3 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__3].r;
+/* L40: */
+ }
+/* L50: */
+ }
+ sgemm_("T", "N", n, nrhs, n, &c_b10, &rwork[irwu], n, &rwork[irwb], n,
+ &c_b35, &rwork[irwrb], n);
+ j = irwb - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++j;
+ rwork[j] = r_imag(&b[jrow + jcol * b_dim1]);
+/* L60: */
+ }
+/* L70: */
+ }
+ sgemm_("T", "N", n, nrhs, n, &c_b10, &rwork[irwu], n, &rwork[irwb], n,
+ &c_b35, &rwork[irwib], n);
+ jreal = irwrb - 1;
+ jimag = irwib - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__3 = jrow + jcol * b_dim1;
+ i__4 = jreal;
+ i__5 = jimag;
+ q__1.r = rwork[i__4], q__1.i = rwork[i__5];
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L80: */
+ }
+/* L90: */
+ }
+
+ tol = rcnd * (r__1 = d__[isamax_(n, &d__[1], &c__1)], dabs(r__1));
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] <= tol) {
+ claset_("A", &c__1, nrhs, &c_b1, &c_b1, &b[i__ + b_dim1], ldb);
+ } else {
+ clascl_("G", &c__0, &c__0, &d__[i__], &c_b10, &c__1, nrhs, &b[
+ i__ + b_dim1], ldb, info);
+ ++(*rank);
+ }
+/* L100: */
+ }
+
+/* Since B is complex, the following call to SGEMM is performed */
+/* in two steps (real and imaginary parts). That is for V * B */
+/* (in the real version of the code V' is stored in WORK). */
+
+/* CALL SGEMM( 'T', 'N', N, NRHS, N, ONE, WORK, N, B, LDB, ZERO, */
+/* $ WORK( NWORK ), N ) */
+
+ j = irwb - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++j;
+ i__3 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__3].r;
+/* L110: */
+ }
+/* L120: */
+ }
+ sgemm_("T", "N", n, nrhs, n, &c_b10, &rwork[irwvt], n, &rwork[irwb],
+ n, &c_b35, &rwork[irwrb], n);
+ j = irwb - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++j;
+ rwork[j] = r_imag(&b[jrow + jcol * b_dim1]);
+/* L130: */
+ }
+/* L140: */
+ }
+ sgemm_("T", "N", n, nrhs, n, &c_b10, &rwork[irwvt], n, &rwork[irwb],
+ n, &c_b35, &rwork[irwib], n);
+ jreal = irwrb - 1;
+ jimag = irwib - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__3 = jrow + jcol * b_dim1;
+ i__4 = jreal;
+ i__5 = jimag;
+ q__1.r = rwork[i__4], q__1.i = rwork[i__5];
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L150: */
+ }
+/* L160: */
+ }
+
+/* Unscale. */
+
+ slascl_("G", &c__0, &c__0, &c_b10, &orgnrm, n, &c__1, &d__[1], n,
+ info);
+ slasrt_("D", n, &d__[1], info);
+ clascl_("G", &c__0, &c__0, &orgnrm, &c_b10, n, nrhs, &b[b_offset],
+ ldb, info);
+
+ return 0;
+ }
+
+/* Book-keeping and setting up some constants. */
+
+ nlvl = (integer) (log((real) (*n) / (real) (*smlsiz + 1)) / log(2.f)) + 1;
+
+ smlszp = *smlsiz + 1;
+
+ u = 1;
+ vt = *smlsiz * *n + 1;
+ difl = vt + smlszp * *n;
+ difr = difl + nlvl * *n;
+ z__ = difr + (nlvl * *n << 1);
+ c__ = z__ + nlvl * *n;
+ s = c__ + *n;
+ poles = s + *n;
+ givnum = poles + (nlvl << 1) * *n;
+ nrwork = givnum + (nlvl << 1) * *n;
+ bx = 1;
+
+ irwrb = nrwork;
+ irwib = irwrb + *smlsiz * *nrhs;
+ irwb = irwib + *smlsiz * *nrhs;
+
+ sizei = *n + 1;
+ k = sizei + *n;
+ givptr = k + *n;
+ perm = givptr + *n;
+ givcol = perm + nlvl * *n;
+ iwk = givcol + (nlvl * *n << 1);
+
+ st = 1;
+ sqre = 0;
+ icmpq1 = 1;
+ icmpq2 = 0;
+ nsub = 0;
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((r__1 = d__[i__], dabs(r__1)) < eps) {
+ d__[i__] = r_sign(&eps, &d__[i__]);
+ }
+/* L170: */
+ }
+
+ i__1 = nm1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((r__1 = e[i__], dabs(r__1)) < eps || i__ == nm1) {
+ ++nsub;
+ iwork[nsub] = st;
+
+/* Subproblem found. First determine its size and then */
+/* apply divide and conquer on it. */
+
+ if (i__ < nm1) {
+
+/* A subproblem with E(I) small for I < NM1. */
+
+ nsize = i__ - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ } else if ((r__1 = e[i__], dabs(r__1)) >= eps) {
+
+/* A subproblem with E(NM1) not too small but I = NM1. */
+
+ nsize = *n - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ } else {
+
+/* A subproblem with E(NM1) small. This implies an */
+/* 1-by-1 subproblem at D(N), which is not solved */
+/* explicitly. */
+
+ nsize = i__ - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ ++nsub;
+ iwork[nsub] = *n;
+ iwork[sizei + nsub - 1] = 1;
+ ccopy_(nrhs, &b[*n + b_dim1], ldb, &work[bx + nm1], n);
+ }
+ st1 = st - 1;
+ if (nsize == 1) {
+
+/* This is a 1-by-1 subproblem and is not solved */
+/* explicitly. */
+
+ ccopy_(nrhs, &b[st + b_dim1], ldb, &work[bx + st1], n);
+ } else if (nsize <= *smlsiz) {
+
+/* This is a small subproblem and is solved by SLASDQ. */
+
+ slaset_("A", &nsize, &nsize, &c_b35, &c_b10, &rwork[vt + st1],
+ n);
+ slaset_("A", &nsize, &nsize, &c_b35, &c_b10, &rwork[u + st1],
+ n);
+ slasdq_("U", &c__0, &nsize, &nsize, &nsize, &c__0, &d__[st], &
+ e[st], &rwork[vt + st1], n, &rwork[u + st1], n, &
+ rwork[nrwork], &c__1, &rwork[nrwork], info)
+ ;
+ if (*info != 0) {
+ return 0;
+ }
+
+/* In the real version, B is passed to SLASDQ and multiplied */
+/* internally by Q'. Here B is complex and that product is */
+/* computed below in two steps (real and imaginary parts). */
+
+ j = irwb - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = st + nsize - 1;
+ for (jrow = st; jrow <= i__3; ++jrow) {
+ ++j;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__4].r;
+/* L180: */
+ }
+/* L190: */
+ }
+ sgemm_("T", "N", &nsize, nrhs, &nsize, &c_b10, &rwork[u + st1]
+, n, &rwork[irwb], &nsize, &c_b35, &rwork[irwrb], &
+ nsize);
+ j = irwb - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = st + nsize - 1;
+ for (jrow = st; jrow <= i__3; ++jrow) {
+ ++j;
+ rwork[j] = r_imag(&b[jrow + jcol * b_dim1]);
+/* L200: */
+ }
+/* L210: */
+ }
+ sgemm_("T", "N", &nsize, nrhs, &nsize, &c_b10, &rwork[u + st1]
+, n, &rwork[irwb], &nsize, &c_b35, &rwork[irwib], &
+ nsize);
+ jreal = irwrb - 1;
+ jimag = irwib - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = st + nsize - 1;
+ for (jrow = st; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * b_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ q__1.r = rwork[i__5], q__1.i = rwork[i__6];
+ b[i__4].r = q__1.r, b[i__4].i = q__1.i;
+/* L220: */
+ }
+/* L230: */
+ }
+
+ clacpy_("A", &nsize, nrhs, &b[st + b_dim1], ldb, &work[bx +
+ st1], n);
+ } else {
+
+/* A large problem. Solve it using divide and conquer. */
+
+ slasda_(&icmpq1, smlsiz, &nsize, &sqre, &d__[st], &e[st], &
+ rwork[u + st1], n, &rwork[vt + st1], &iwork[k + st1],
+ &rwork[difl + st1], &rwork[difr + st1], &rwork[z__ +
+ st1], &rwork[poles + st1], &iwork[givptr + st1], &
+ iwork[givcol + st1], n, &iwork[perm + st1], &rwork[
+ givnum + st1], &rwork[c__ + st1], &rwork[s + st1], &
+ rwork[nrwork], &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ bxst = bx + st1;
+ clalsa_(&icmpq2, smlsiz, &nsize, nrhs, &b[st + b_dim1], ldb, &
+ work[bxst], n, &rwork[u + st1], n, &rwork[vt + st1], &
+ iwork[k + st1], &rwork[difl + st1], &rwork[difr + st1]
+, &rwork[z__ + st1], &rwork[poles + st1], &iwork[
+ givptr + st1], &iwork[givcol + st1], n, &iwork[perm +
+ st1], &rwork[givnum + st1], &rwork[c__ + st1], &rwork[
+ s + st1], &rwork[nrwork], &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+ st = i__ + 1;
+ }
+/* L240: */
+ }
+
+/* Apply the singular values and treat the tiny ones as zero. */
+
+ tol = rcnd * (r__1 = d__[isamax_(n, &d__[1], &c__1)], dabs(r__1));
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Some of the elements in D can be negative because 1-by-1 */
+/* subproblems were not solved explicitly. */
+
+ if ((r__1 = d__[i__], dabs(r__1)) <= tol) {
+ claset_("A", &c__1, nrhs, &c_b1, &c_b1, &work[bx + i__ - 1], n);
+ } else {
+ ++(*rank);
+ clascl_("G", &c__0, &c__0, &d__[i__], &c_b10, &c__1, nrhs, &work[
+ bx + i__ - 1], n, info);
+ }
+ d__[i__] = (r__1 = d__[i__], dabs(r__1));
+/* L250: */
+ }
+
+/* Now apply back the right singular vectors. */
+
+ icmpq2 = 1;
+ i__1 = nsub;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ st = iwork[i__];
+ st1 = st - 1;
+ nsize = iwork[sizei + i__ - 1];
+ bxst = bx + st1;
+ if (nsize == 1) {
+ ccopy_(nrhs, &work[bxst], n, &b[st + b_dim1], ldb);
+ } else if (nsize <= *smlsiz) {
+
+/* Since B and BX are complex, the following call to SGEMM */
+/* is performed in two steps (real and imaginary parts). */
+
+/* CALL SGEMM( 'T', 'N', NSIZE, NRHS, NSIZE, ONE, */
+/* $ RWORK( VT+ST1 ), N, RWORK( BXST ), N, ZERO, */
+/* $ B( ST, 1 ), LDB ) */
+
+ j = bxst - *n - 1;
+ jreal = irwb - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ j += *n;
+ i__3 = nsize;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++jreal;
+ i__4 = j + jrow;
+ rwork[jreal] = work[i__4].r;
+/* L260: */
+ }
+/* L270: */
+ }
+ sgemm_("T", "N", &nsize, nrhs, &nsize, &c_b10, &rwork[vt + st1],
+ n, &rwork[irwb], &nsize, &c_b35, &rwork[irwrb], &nsize);
+ j = bxst - *n - 1;
+ jimag = irwb - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ j += *n;
+ i__3 = nsize;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++jimag;
+ rwork[jimag] = r_imag(&work[j + jrow]);
+/* L280: */
+ }
+/* L290: */
+ }
+ sgemm_("T", "N", &nsize, nrhs, &nsize, &c_b10, &rwork[vt + st1],
+ n, &rwork[irwb], &nsize, &c_b35, &rwork[irwib], &nsize);
+ jreal = irwrb - 1;
+ jimag = irwib - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = st + nsize - 1;
+ for (jrow = st; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * b_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ q__1.r = rwork[i__5], q__1.i = rwork[i__6];
+ b[i__4].r = q__1.r, b[i__4].i = q__1.i;
+/* L300: */
+ }
+/* L310: */
+ }
+ } else {
+ clalsa_(&icmpq2, smlsiz, &nsize, nrhs, &work[bxst], n, &b[st +
+ b_dim1], ldb, &rwork[u + st1], n, &rwork[vt + st1], &
+ iwork[k + st1], &rwork[difl + st1], &rwork[difr + st1], &
+ rwork[z__ + st1], &rwork[poles + st1], &iwork[givptr +
+ st1], &iwork[givcol + st1], n, &iwork[perm + st1], &rwork[
+ givnum + st1], &rwork[c__ + st1], &rwork[s + st1], &rwork[
+ nrwork], &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+/* L320: */
+ }
+
+/* Unscale and sort the singular values. */
+
+ slascl_("G", &c__0, &c__0, &c_b10, &orgnrm, n, &c__1, &d__[1], n, info);
+ slasrt_("D", n, &d__[1], info);
+ clascl_("G", &c__0, &c__0, &orgnrm, &c_b10, n, nrhs, &b[b_offset], ldb,
+ info);
+
+ return 0;
+
+/* End of CLALSD */
+
+} /* clalsd_ */
diff --git a/contrib/libs/clapack/clangb.c b/contrib/libs/clapack/clangb.c
new file mode 100644
index 0000000000..f192af55d7
--- /dev/null
+++ b/contrib/libs/clapack/clangb.c
@@ -0,0 +1,224 @@
+/* clangb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal clangb_(char *norm, integer *n, integer *kl, integer *ku, complex *
+ ab, integer *ldab, real *work)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ real ret_val, r__1, r__2;
+
+ /* Builtin functions */
+ double c_abs(complex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, l;
+ real sum, scale;
+ extern logical lsame_(char *, char *);
+ real value;
+ extern /* Subroutine */ int classq_(integer *, complex *, integer *, real
+ *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLANGB returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of an */
+/* n by n band matrix A, with kl sub-diagonals and ku super-diagonals. */
+
+/* Description */
+/* =========== */
+
+/* CLANGB returns the value */
+
+/* CLANGB = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in CLANGB as described */
+/* above. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, CLANGB is */
+/* set to zero. */
+
+/* KL (input) INTEGER */
+/* The number of sub-diagonals of the matrix A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of super-diagonals of the matrix A. KU >= 0. */
+
+/* AB (input) COMPLEX array, dimension (LDAB,N) */
+/* The band matrix A, stored in rows 1 to KL+KU+1. The j-th */
+/* column of A is stored in the j-th column of the array AB as */
+/* follows: */
+/* AB(ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(n,j+kl). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KL+KU+1. */
+
+/* WORK (workspace) REAL array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I'; otherwise, WORK is not */
+/* referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = *ku + 2 - j;
+/* Computing MIN */
+ i__4 = *n + *ku + 1 - j, i__5 = *kl + *ku + 1;
+ i__3 = min(i__4,i__5);
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&ab[i__ + j * ab_dim1]);
+ value = dmax(r__1,r__2);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.f;
+/* Computing MAX */
+ i__3 = *ku + 2 - j;
+/* Computing MIN */
+ i__4 = *n + *ku + 1 - j, i__5 = *kl + *ku + 1;
+ i__2 = min(i__4,i__5);
+ for (i__ = max(i__3,1); i__ <= i__2; ++i__) {
+ sum += c_abs(&ab[i__ + j * ab_dim1]);
+/* L30: */
+ }
+ value = dmax(value,sum);
+/* L40: */
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L50: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ k = *ku + 1 - j;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *ku;
+/* Computing MIN */
+ i__5 = *n, i__6 = j + *kl;
+ i__4 = min(i__5,i__6);
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ work[i__] += c_abs(&ab[k + i__ + j * ab_dim1]);
+/* L60: */
+ }
+/* L70: */
+ }
+ value = 0.f;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L80: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *ku;
+ l = max(i__4,i__2);
+ k = *ku + 1 - j + l;
+/* Computing MIN */
+ i__2 = *n, i__3 = j + *kl;
+ i__4 = min(i__2,i__3) - l + 1;
+ classq_(&i__4, &ab[k + j * ab_dim1], &c__1, &scale, &sum);
+/* L90: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of CLANGB */
+
+} /* clangb_ */
diff --git a/contrib/libs/clapack/clange.c b/contrib/libs/clapack/clange.c
new file mode 100644
index 0000000000..0530406088
--- /dev/null
+++ b/contrib/libs/clapack/clange.c
@@ -0,0 +1,199 @@
+/* clange.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal clange_(char *norm, integer *m, integer *n, complex *a, integer *
+ lda, real *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real ret_val, r__1, r__2;
+
+ /* Builtin functions */
+ double c_abs(complex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ real sum, scale;
+ extern logical lsame_(char *, char *);
+ real value;
+ extern /* Subroutine */ int classq_(integer *, complex *, integer *, real
+ *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLANGE returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* complex matrix A. */
+
+/* Description */
+/* =========== */
+
+/* CLANGE returns the value */
+
+/* CLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in CLANGE as described */
+/* above. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. When M = 0, */
+/* CLANGE is set to zero. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. When N = 0, */
+/* CLANGE is set to zero. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The m by n matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(M,1). */
+
+/* WORK (workspace) REAL array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= M when NORM = 'I'; otherwise, WORK is not */
+/* referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (min(*m,*n) == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * a_dim1]);
+ value = dmax(r__1,r__2);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.f;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += c_abs(&a[i__ + j * a_dim1]);
+/* L30: */
+ }
+ value = dmax(value,sum);
+/* L40: */
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L50: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += c_abs(&a[i__ + j * a_dim1]);
+/* L60: */
+ }
+/* L70: */
+ }
+ value = 0.f;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L80: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ classq_(m, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L90: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of CLANGE */
+
+} /* clange_ */
diff --git a/contrib/libs/clapack/clangt.c b/contrib/libs/clapack/clangt.c
new file mode 100644
index 0000000000..ac8e57c6c2
--- /dev/null
+++ b/contrib/libs/clapack/clangt.c
@@ -0,0 +1,195 @@
+/* clangt.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal clangt_(char *norm, integer *n, complex *dl, complex *d__, complex
+ *du)
+{
+ /* System generated locals */
+ integer i__1;
+ real ret_val, r__1, r__2;
+
+ /* Builtin functions */
+ double c_abs(complex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ real sum, scale;
+ extern logical lsame_(char *, char *);
+ real anorm;
+ extern /* Subroutine */ int classq_(integer *, complex *, integer *, real
+ *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLANGT returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* complex tridiagonal matrix A. */
+
+/* Description */
+/* =========== */
+
+/* CLANGT returns the value */
+
+/* CLANGT = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in CLANGT as described */
+/* above. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, CLANGT is */
+/* set to zero. */
+
+/* DL (input) COMPLEX array, dimension (N-1) */
+/* The (n-1) sub-diagonal elements of A. */
+
+/* D (input) COMPLEX array, dimension (N) */
+/* The diagonal elements of A. */
+
+/* DU (input) COMPLEX array, dimension (N-1) */
+/* The (n-1) super-diagonal elements of A. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --du;
+ --d__;
+ --dl;
+
+ /* Function Body */
+ if (*n <= 0) {
+ anorm = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ anorm = c_abs(&d__[*n]);
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = anorm, r__2 = c_abs(&dl[i__]);
+ anorm = dmax(r__1,r__2);
+/* Computing MAX */
+ r__1 = anorm, r__2 = c_abs(&d__[i__]);
+ anorm = dmax(r__1,r__2);
+/* Computing MAX */
+ r__1 = anorm, r__2 = c_abs(&du[i__]);
+ anorm = dmax(r__1,r__2);
+/* L10: */
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ if (*n == 1) {
+ anorm = c_abs(&d__[1]);
+ } else {
+/* Computing MAX */
+ r__1 = c_abs(&d__[1]) + c_abs(&dl[1]), r__2 = c_abs(&d__[*n]) +
+ c_abs(&du[*n - 1]);
+ anorm = dmax(r__1,r__2);
+ i__1 = *n - 1;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = anorm, r__2 = c_abs(&d__[i__]) + c_abs(&dl[i__]) +
+ c_abs(&du[i__ - 1]);
+ anorm = dmax(r__1,r__2);
+/* L20: */
+ }
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ if (*n == 1) {
+ anorm = c_abs(&d__[1]);
+ } else {
+/* Computing MAX */
+ r__1 = c_abs(&d__[1]) + c_abs(&du[1]), r__2 = c_abs(&d__[*n]) +
+ c_abs(&dl[*n - 1]);
+ anorm = dmax(r__1,r__2);
+ i__1 = *n - 1;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = anorm, r__2 = c_abs(&d__[i__]) + c_abs(&du[i__]) +
+ c_abs(&dl[i__ - 1]);
+ anorm = dmax(r__1,r__2);
+/* L30: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ classq_(n, &d__[1], &c__1, &scale, &sum);
+ if (*n > 1) {
+ i__1 = *n - 1;
+ classq_(&i__1, &dl[1], &c__1, &scale, &sum);
+ i__1 = *n - 1;
+ classq_(&i__1, &du[1], &c__1, &scale, &sum);
+ }
+ anorm = scale * sqrt(sum);
+ }
+
+ ret_val = anorm;
+ return ret_val;
+
+/* End of CLANGT */
+
+} /* clangt_ */
diff --git a/contrib/libs/clapack/clanhb.c b/contrib/libs/clapack/clanhb.c
new file mode 100644
index 0000000000..7305aed334
--- /dev/null
+++ b/contrib/libs/clapack/clanhb.c
@@ -0,0 +1,291 @@
+/* clanhb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal clanhb_(char *norm, char *uplo, integer *n, integer *k, complex *
+ ab, integer *ldab, real *work)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+ real ret_val, r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double c_abs(complex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, l;
+ real sum, absa, scale;
+ extern logical lsame_(char *, char *);
+ real value;
+ extern /* Subroutine */ int classq_(integer *, complex *, integer *, real
+ *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLANHB returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of an */
+/* n by n hermitian band matrix A, with k super-diagonals. */
+
+/* Description */
+/* =========== */
+
+/* CLANHB returns the value */
+
+/* CLANHB = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in CLANHB as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* band matrix A is supplied. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, CLANHB is */
+/* set to zero. */
+
+/* K (input) INTEGER */
+/* The number of super-diagonals or sub-diagonals of the */
+/* band matrix A. K >= 0. */
+
+/* AB (input) COMPLEX array, dimension (LDAB,N) */
+/* The upper or lower triangle of the hermitian band matrix A, */
+/* stored in the first K+1 rows of AB. The j-th column of A is */
+/* stored in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(k+1+i-j,j) = A(i,j) for max(1,j-k)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+k). */
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set and are assumed to be zero. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= K+1. */
+
+/* WORK (workspace) REAL array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, */
+/* WORK is not referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = *k + 2 - j;
+ i__3 = *k;
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&ab[i__ + j * ab_dim1]);
+ value = dmax(r__1,r__2);
+/* L10: */
+ }
+/* Computing MAX */
+ i__3 = *k + 1 + j * ab_dim1;
+ r__2 = value, r__3 = (r__1 = ab[i__3].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__3 = j * ab_dim1 + 1;
+ r__2 = value, r__3 = (r__1 = ab[i__3].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+/* Computing MIN */
+ i__2 = *n + 1 - j, i__4 = *k + 1;
+ i__3 = min(i__2,i__4);
+ for (i__ = 2; i__ <= i__3; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&ab[i__ + j * ab_dim1]);
+ value = dmax(r__1,r__2);
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
+
+/* Find normI(A) ( = norm1(A), since A is hermitian). */
+
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.f;
+ l = *k + 1 - j;
+/* Computing MAX */
+ i__3 = 1, i__2 = j - *k;
+ i__4 = j - 1;
+ for (i__ = max(i__3,i__2); i__ <= i__4; ++i__) {
+ absa = c_abs(&ab[l + i__ + j * ab_dim1]);
+ sum += absa;
+ work[i__] += absa;
+/* L50: */
+ }
+ i__4 = *k + 1 + j * ab_dim1;
+ work[j] = sum + (r__1 = ab[i__4].r, dabs(r__1));
+/* L60: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L80: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__4 = j * ab_dim1 + 1;
+ sum = work[j] + (r__1 = ab[i__4].r, dabs(r__1));
+ l = 1 - j;
+/* Computing MIN */
+ i__3 = *n, i__2 = j + *k;
+ i__4 = min(i__3,i__2);
+ for (i__ = j + 1; i__ <= i__4; ++i__) {
+ absa = c_abs(&ab[l + i__ + j * ab_dim1]);
+ sum += absa;
+ work[i__] += absa;
+/* L90: */
+ }
+ value = dmax(value,sum);
+/* L100: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ if (*k > 0) {
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = j - 1;
+ i__4 = min(i__3,*k);
+/* Computing MAX */
+ i__2 = *k + 2 - j;
+ classq_(&i__4, &ab[max(i__2, 1)+ j * ab_dim1], &c__1, &
+ scale, &sum);
+/* L110: */
+ }
+ l = *k + 1;
+ } else {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n - j;
+ i__4 = min(i__3,*k);
+ classq_(&i__4, &ab[j * ab_dim1 + 2], &c__1, &scale, &sum);
+/* L120: */
+ }
+ l = 1;
+ }
+ sum *= 2;
+ } else {
+ l = 1;
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__4 = l + j * ab_dim1;
+ if (ab[i__4].r != 0.f) {
+ i__4 = l + j * ab_dim1;
+ absa = (r__1 = ab[i__4].r, dabs(r__1));
+ if (scale < absa) {
+/* Computing 2nd power */
+ r__1 = scale / absa;
+ sum = sum * (r__1 * r__1) + 1.f;
+ scale = absa;
+ } else {
+/* Computing 2nd power */
+ r__1 = absa / scale;
+ sum += r__1 * r__1;
+ }
+ }
+/* L130: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of CLANHB */
+
+} /* clanhb_ */
diff --git a/contrib/libs/clapack/clanhe.c b/contrib/libs/clapack/clanhe.c
new file mode 100644
index 0000000000..9eae42c0f1
--- /dev/null
+++ b/contrib/libs/clapack/clanhe.c
@@ -0,0 +1,265 @@
+/* clanhe.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal clanhe_(char *norm, char *uplo, integer *n, complex *a, integer *
+ lda, real *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real ret_val, r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double c_abs(complex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ real sum, absa, scale;
+ extern logical lsame_(char *, char *);
+ real value;
+ extern /* Subroutine */ int classq_(integer *, complex *, integer *, real
+ *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLANHE returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* complex hermitian matrix A. */
+
+/* Description */
+/* =========== */
+
+/* CLANHE returns the value */
+
+/* CLANHE = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in CLANHE as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* hermitian matrix A is to be referenced. */
+/* = 'U': Upper triangular part of A is referenced */
+/* = 'L': Lower triangular part of A is referenced */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, CLANHE is */
+/* set to zero. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The hermitian matrix A. If UPLO = 'U', the leading n by n */
+/* upper triangular part of A contains the upper triangular part */
+/* of the matrix A, and the strictly lower triangular part of A */
+/* is not referenced. If UPLO = 'L', the leading n by n lower */
+/* triangular part of A contains the lower triangular part of */
+/* the matrix A, and the strictly upper triangular part of A is */
+/* not referenced. Note that the imaginary parts of the diagonal */
+/* elements need not be set and are assumed to be zero. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(N,1). */
+
+/* WORK (workspace) REAL array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, */
+/* WORK is not referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * a_dim1]);
+ value = dmax(r__1,r__2);
+/* L10: */
+ }
+/* Computing MAX */
+ i__2 = j + j * a_dim1;
+ r__2 = value, r__3 = (r__1 = a[i__2].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j + j * a_dim1;
+ r__2 = value, r__3 = (r__1 = a[i__2].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * a_dim1]);
+ value = dmax(r__1,r__2);
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
+
+/* Find normI(A) ( = norm1(A), since A is hermitian). */
+
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.f;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ absa = c_abs(&a[i__ + j * a_dim1]);
+ sum += absa;
+ work[i__] += absa;
+/* L50: */
+ }
+ i__2 = j + j * a_dim1;
+ work[j] = sum + (r__1 = a[i__2].r, dabs(r__1));
+/* L60: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L80: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + j * a_dim1;
+ sum = work[j] + (r__1 = a[i__2].r, dabs(r__1));
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ absa = c_abs(&a[i__ + j * a_dim1]);
+ sum += absa;
+ work[i__] += absa;
+/* L90: */
+ }
+ value = dmax(value,sum);
+/* L100: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ classq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L110: */
+ }
+ } else {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ classq_(&i__2, &a[j + 1 + j * a_dim1], &c__1, &scale, &sum);
+/* L120: */
+ }
+ }
+ sum *= 2;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ if (a[i__2].r != 0.f) {
+ i__2 = i__ + i__ * a_dim1;
+ absa = (r__1 = a[i__2].r, dabs(r__1));
+ if (scale < absa) {
+/* Computing 2nd power */
+ r__1 = scale / absa;
+ sum = sum * (r__1 * r__1) + 1.f;
+ scale = absa;
+ } else {
+/* Computing 2nd power */
+ r__1 = absa / scale;
+ sum += r__1 * r__1;
+ }
+ }
+/* L130: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of CLANHE */
+
+} /* clanhe_ */
diff --git a/contrib/libs/clapack/clanhf.c b/contrib/libs/clapack/clanhf.c
new file mode 100644
index 0000000000..e598dae650
--- /dev/null
+++ b/contrib/libs/clapack/clanhf.c
@@ -0,0 +1,1803 @@
+/* clanhf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal clanhf_(char *norm, char *transr, char *uplo, integer *n, complex *
+ a, real *work)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ real ret_val, r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double c_abs(complex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, l;
+ real s;
+ integer n1;
+ real aa;
+ integer lda, ifm, noe, ilu;
+ real scale;
+ extern logical lsame_(char *, char *);
+ real value;
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int classq_(integer *, complex *, integer *, real
+ *, real *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLANHF returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* complex Hermitian matrix A in RFP format. */
+
+/* Description */
+/* =========== */
+
+/* CLANHF returns the value */
+
+/* CLANHF = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER */
+/* Specifies the value to be returned in CLANHF as described */
+/* above. */
+
+/* TRANSR (input) CHARACTER */
+/* Specifies whether the RFP format of A is normal or */
+/* conjugate-transposed format. */
+/* = 'N': RFP format is Normal */
+/* = 'C': RFP format is Conjugate-transposed */
+
+/* UPLO (input) CHARACTER */
+/* On entry, UPLO specifies whether the RFP matrix A came from */
+/* an upper or lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' RFP A came from an upper triangular */
+/* matrix */
+
+/* UPLO = 'L' or 'l' RFP A came from a lower triangular */
+/* matrix */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, CLANHF is */
+/* set to zero. */
+
+/* A (input) COMPLEX*16 array, dimension ( N*(N+1)/2 ); */
+/* On entry, the matrix A in RFP Format. */
+/* RFP Format is described by TRANSR, UPLO and N as follows: */
+/* If TRANSR='N' then RFP A is (0:N,0:K-1) when N is even; */
+/* K=N/2. RFP A is (0:N-1,0:K) when N is odd; K=N/2. If */
+/* TRANSR = 'C' then RFP is the Conjugate-transpose of RFP A */
+/* as defined when TRANSR = 'N'. The contents of RFP A are */
+/* defined by UPLO as follows: If UPLO = 'U' the RFP A */
+/* contains the ( N*(N+1)/2 ) elements of upper packed A */
+/* either in normal or conjugate-transpose Format. If */
+/* UPLO = 'L' the RFP A contains the ( N*(N+1) /2 ) elements */
+/* of lower packed A either in normal or conjugate-transpose */
+/* Format. The LDA of RFP A is (N+1)/2 when TRANSR = 'C'. When */
+/* TRANSR is 'N' the LDA is N+1 when N is even and is N when */
+/* is odd. See the Note below for more details. */
+/* Unchanged on exit. */
+
+/* WORK (workspace) REAL array, dimension (LWORK), */
+/* where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, */
+/* WORK is not referenced. */
+
+/* Note: */
+/* ===== */
+
+/* We first consider Standard Packed Format when N is even. */
+/* We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* conjugate-transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* conjugate-transpose of the last three columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- -- */
+/* 03 04 05 33 43 53 */
+/* -- -- */
+/* 13 14 15 00 44 54 */
+/* -- */
+/* 23 24 25 10 11 55 */
+
+/* 33 34 35 20 21 22 */
+/* -- */
+/* 00 44 45 30 31 32 */
+/* -- -- */
+/* 01 11 55 40 41 42 */
+/* -- -- -- */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We next consider Standard Packed Format when N is odd. */
+/* We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* conjugate-transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* conjugate-transpose of the last two columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- */
+/* 02 03 04 00 33 43 */
+/* -- */
+/* 12 13 14 10 11 44 */
+
+/* 22 23 24 20 21 22 */
+/* -- */
+/* 00 33 34 30 31 32 */
+/* -- -- */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- */
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ if (*n == 0) {
+ ret_val = 0.f;
+ return ret_val;
+ }
+
+/* set noe = 1 if n is odd. if n is even set noe=0 */
+
+ noe = 1;
+ if (*n % 2 == 0) {
+ noe = 0;
+ }
+
+/* set ifm = 0 when form='C' or 'c' and 1 otherwise */
+
+ ifm = 1;
+ if (lsame_(transr, "C")) {
+ ifm = 0;
+ }
+
+/* set ilu = 0 when uplo='U or 'u' and 1 otherwise */
+
+ ilu = 1;
+ if (lsame_(uplo, "U")) {
+ ilu = 0;
+ }
+
+/* set lda = (n+1)/2 when ifm = 0 */
+/* set lda = n when ifm = 1 and noe = 1 */
+/* set lda = n+1 when ifm = 1 and noe = 0 */
+
+ if (ifm == 1) {
+ if (noe == 1) {
+ lda = *n;
+ } else {
+/* noe=0 */
+ lda = *n + 1;
+ }
+ } else {
+/* ifm=0 */
+ lda = (*n + 1) / 2;
+ }
+
+ if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ k = (*n + 1) / 2;
+ value = 0.f;
+ if (noe == 1) {
+/* n is odd & n = k + k - 1 */
+ if (ifm == 1) {
+/* A is n by k */
+ if (ilu == 1) {
+/* uplo ='L' */
+ j = 0;
+/* -> L(0,0) */
+/* Computing MAX */
+ i__1 = j + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__1].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 2;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ i__ = j - 1;
+/* L(k+j,k+j) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__2].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ i__ = j;
+/* -> L(j,j) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__2].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ i__2 = *n - 1;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ }
+ } else {
+/* uplo = 'U' */
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k + j - 2;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ i__ = k + j - 1;
+/* -> U(i,i) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__2].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ ++i__;
+/* =k+j; i -> U(j,j) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__2].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ i__2 = *n - 1;
+ for (i__ = k + j + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ }
+ i__1 = *n - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+/* j=k-1 */
+ }
+/* i=n-1 -> U(n-1,n-1) */
+/* Computing MAX */
+ i__1 = i__ + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__1].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ }
+ } else {
+/* xpose case; A is k by n */
+ if (ilu == 1) {
+/* uplo ='L' */
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ i__ = j;
+/* L(i,i) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__2].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ i__ = j + 1;
+/* L(j+k,j+k) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__2].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ i__2 = k - 1;
+ for (i__ = j + 2; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ }
+ j = k - 1;
+ i__1 = k - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ i__ = k - 1;
+/* -> L(i,i) is at A(i,j) */
+/* Computing MAX */
+ i__1 = i__ + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__1].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ }
+ } else {
+/* uplo = 'U' */
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ }
+ j = k - 1;
+/* -> U(j,j) is at A(0,j) */
+/* Computing MAX */
+ i__1 = j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__1].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ i__1 = k - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+ i__2 = j - k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ i__ = j - k;
+/* -> U(i,i) at A(i,j) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__2].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ i__ = j - k + 1;
+/* U(j,j) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__2].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ i__2 = k - 1;
+ for (i__ = j - k + 2; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ }
+ }
+ }
+ } else {
+/* n is even & k = n/2 */
+ if (ifm == 1) {
+/* A is n+1 by k */
+ if (ilu == 1) {
+/* uplo ='L' */
+ j = 0;
+/* -> L(k,k) & j=1 -> L(0,0) */
+/* Computing MAX */
+ i__1 = j + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__1].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+/* Computing MAX */
+ i__1 = j + 1 + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__1].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ i__ = j;
+/* L(k+j,k+j) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__2].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ i__ = j + 1;
+/* -> L(j,j) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__2].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ i__2 = *n;
+ for (i__ = j + 2; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ }
+ } else {
+/* uplo = 'U' */
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k + j - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ i__ = k + j;
+/* -> U(i,i) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__2].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ ++i__;
+/* =k+j+1; i -> U(j,j) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__2].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ i__2 = *n;
+ for (i__ = k + j + 2; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ }
+ i__1 = *n - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+/* j=k-1 */
+ }
+/* i=n-1 -> U(n-1,n-1) */
+/* Computing MAX */
+ i__1 = i__ + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__1].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ i__ = *n;
+/* -> U(k-1,k-1) */
+/* Computing MAX */
+ i__1 = i__ + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__1].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ }
+ } else {
+/* xpose case; A is k by n+1 */
+ if (ilu == 1) {
+/* uplo ='L' */
+ j = 0;
+/* -> L(k,k) at A(0,0) */
+/* Computing MAX */
+ i__1 = j + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__1].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ i__1 = k - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 2;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ i__ = j - 1;
+/* L(i,i) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__2].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ i__ = j;
+/* L(j+k,j+k) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__2].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ i__2 = k - 1;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ }
+ j = k;
+ i__1 = k - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ i__ = k - 1;
+/* -> L(i,i) is at A(i,j) */
+/* Computing MAX */
+ i__1 = i__ + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__1].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ i__1 = *n;
+ for (j = k + 1; j <= i__1; ++j) {
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ }
+ } else {
+/* uplo = 'U' */
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ }
+ j = k;
+/* -> U(j,j) is at A(0,j) */
+/* Computing MAX */
+ i__1 = j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__1].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ i__1 = k - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ i__1 = *n - 1;
+ for (j = k + 1; j <= i__1; ++j) {
+ i__2 = j - k - 2;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ i__ = j - k - 1;
+/* -> U(i,i) at A(i,j) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__2].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ i__ = j - k;
+/* U(j,j) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__2].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ i__2 = k - 1;
+ for (i__ = j - k + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ }
+ j = *n;
+ i__1 = k - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * lda]);
+ value = dmax(r__1,r__2);
+ }
+ i__ = k - 1;
+/* U(k,k) at A(i,j) */
+/* Computing MAX */
+ i__1 = i__ + j * lda;
+ r__2 = value, r__3 = (r__1 = a[i__1].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ }
+ }
+ }
+ } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
+
+/* Find normI(A) ( = norm1(A), since A is Hermitian). */
+
+ if (ifm == 1) {
+/* A is 'N' */
+ k = *n / 2;
+ if (noe == 1) {
+/* n is odd & A is n by (n+1)/2 */
+ if (ilu == 0) {
+/* uplo = 'U' */
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+ }
+ i__1 = k;
+ for (j = 0; j <= i__1; ++j) {
+ s = 0.f;
+ i__2 = k + j - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = c_abs(&a[i__ + j * lda]);
+/* -> A(i,j+k) */
+ s += aa;
+ work[i__] += aa;
+ }
+ i__2 = i__ + j * lda;
+ aa = (r__1 = a[i__2].r, dabs(r__1));
+/* -> A(j+k,j+k) */
+ work[j + k] = s + aa;
+ if (i__ == k + k) {
+ goto L10;
+ }
+ ++i__;
+ i__2 = i__ + j * lda;
+ aa = (r__1 = a[i__2].r, dabs(r__1));
+/* -> A(j,j) */
+ work[j] += aa;
+ s = 0.f;
+ i__2 = k - 1;
+ for (l = j + 1; l <= i__2; ++l) {
+ ++i__;
+ aa = c_abs(&a[i__ + j * lda]);
+/* -> A(l,j) */
+ s += aa;
+ work[l] += aa;
+ }
+ work[j] += s;
+ }
+L10:
+ i__ = isamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ } else {
+/* ilu = 1 & uplo = 'L' */
+ ++k;
+/* k=(n+1)/2 for n odd and ilu=1 */
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+ }
+ for (j = k - 1; j >= 0; --j) {
+ s = 0.f;
+ i__1 = j - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ aa = c_abs(&a[i__ + j * lda]);
+/* -> A(j+k,i+k) */
+ s += aa;
+ work[i__ + k] += aa;
+ }
+ if (j > 0) {
+ i__1 = i__ + j * lda;
+ aa = (r__1 = a[i__1].r, dabs(r__1));
+/* -> A(j+k,j+k) */
+ s += aa;
+ work[i__ + k] += s;
+/* i=j */
+ ++i__;
+ }
+ i__1 = i__ + j * lda;
+ aa = (r__1 = a[i__1].r, dabs(r__1));
+/* -> A(j,j) */
+ work[j] = aa;
+ s = 0.f;
+ i__1 = *n - 1;
+ for (l = j + 1; l <= i__1; ++l) {
+ ++i__;
+ aa = c_abs(&a[i__ + j * lda]);
+/* -> A(l,j) */
+ s += aa;
+ work[l] += aa;
+ }
+ work[j] += s;
+ }
+ i__ = isamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ }
+ } else {
+/* n is even & A is n+1 by k = n/2 */
+ if (ilu == 0) {
+/* uplo = 'U' */
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+ }
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ s = 0.f;
+ i__2 = k + j - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = c_abs(&a[i__ + j * lda]);
+/* -> A(i,j+k) */
+ s += aa;
+ work[i__] += aa;
+ }
+ i__2 = i__ + j * lda;
+ aa = (r__1 = a[i__2].r, dabs(r__1));
+/* -> A(j+k,j+k) */
+ work[j + k] = s + aa;
+ ++i__;
+ i__2 = i__ + j * lda;
+ aa = (r__1 = a[i__2].r, dabs(r__1));
+/* -> A(j,j) */
+ work[j] += aa;
+ s = 0.f;
+ i__2 = k - 1;
+ for (l = j + 1; l <= i__2; ++l) {
+ ++i__;
+ aa = c_abs(&a[i__ + j * lda]);
+/* -> A(l,j) */
+ s += aa;
+ work[l] += aa;
+ }
+ work[j] += s;
+ }
+ i__ = isamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ } else {
+/* ilu = 1 & uplo = 'L' */
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+ }
+ for (j = k - 1; j >= 0; --j) {
+ s = 0.f;
+ i__1 = j - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ aa = c_abs(&a[i__ + j * lda]);
+/* -> A(j+k,i+k) */
+ s += aa;
+ work[i__ + k] += aa;
+ }
+ i__1 = i__ + j * lda;
+ aa = (r__1 = a[i__1].r, dabs(r__1));
+/* -> A(j+k,j+k) */
+ s += aa;
+ work[i__ + k] += s;
+/* i=j */
+ ++i__;
+ i__1 = i__ + j * lda;
+ aa = (r__1 = a[i__1].r, dabs(r__1));
+/* -> A(j,j) */
+ work[j] = aa;
+ s = 0.f;
+ i__1 = *n - 1;
+ for (l = j + 1; l <= i__1; ++l) {
+ ++i__;
+ aa = c_abs(&a[i__ + j * lda]);
+/* -> A(l,j) */
+ s += aa;
+ work[l] += aa;
+ }
+ work[j] += s;
+ }
+ i__ = isamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ }
+ }
+ } else {
+/* ifm=0 */
+ k = *n / 2;
+ if (noe == 1) {
+/* n is odd & A is (n+1)/2 by n */
+ if (ilu == 0) {
+/* uplo = 'U' */
+ n1 = k;
+/* n/2 */
+ ++k;
+/* k is the row size and lda */
+ i__1 = *n - 1;
+ for (i__ = n1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+ }
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ s = 0.f;
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = c_abs(&a[i__ + j * lda]);
+/* A(j,n1+i) */
+ work[i__ + n1] += aa;
+ s += aa;
+ }
+ work[j] = s;
+ }
+/* j=n1=k-1 is special */
+ i__1 = j * lda;
+ s = (r__1 = a[i__1].r, dabs(r__1));
+/* A(k-1,k-1) */
+ i__1 = k - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ aa = c_abs(&a[i__ + j * lda]);
+/* A(k-1,i+n1) */
+ work[i__ + n1] += aa;
+ s += aa;
+ }
+ work[j] += s;
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+ s = 0.f;
+ i__2 = j - k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = c_abs(&a[i__ + j * lda]);
+/* A(i,j-k) */
+ work[i__] += aa;
+ s += aa;
+ }
+/* i=j-k */
+ i__2 = i__ + j * lda;
+ aa = (r__1 = a[i__2].r, dabs(r__1));
+/* A(j-k,j-k) */
+ s += aa;
+ work[j - k] += s;
+ ++i__;
+ i__2 = i__ + j * lda;
+ s = (r__1 = a[i__2].r, dabs(r__1));
+/* A(j,j) */
+ i__2 = *n - 1;
+ for (l = j + 1; l <= i__2; ++l) {
+ ++i__;
+ aa = c_abs(&a[i__ + j * lda]);
+/* A(j,l) */
+ work[l] += aa;
+ s += aa;
+ }
+ work[j] += s;
+ }
+ i__ = isamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ } else {
+/* ilu=1 & uplo = 'L' */
+ ++k;
+/* k=(n+1)/2 for n odd and ilu=1 */
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+/* process */
+ s = 0.f;
+ i__2 = j - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = c_abs(&a[i__ + j * lda]);
+/* A(j,i) */
+ work[i__] += aa;
+ s += aa;
+ }
+ i__2 = i__ + j * lda;
+ aa = (r__1 = a[i__2].r, dabs(r__1));
+/* i=j so process of A(j,j) */
+ s += aa;
+ work[j] = s;
+/* is initialised here */
+ ++i__;
+/* i=j process A(j+k,j+k) */
+ i__2 = i__ + j * lda;
+ aa = (r__1 = a[i__2].r, dabs(r__1));
+ s = aa;
+ i__2 = *n - 1;
+ for (l = k + j + 1; l <= i__2; ++l) {
+ ++i__;
+ aa = c_abs(&a[i__ + j * lda]);
+/* A(l,k+j) */
+ s += aa;
+ work[l] += aa;
+ }
+ work[k + j] += s;
+ }
+/* j=k-1 is special :process col A(k-1,0:k-1) */
+ s = 0.f;
+ i__1 = k - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ aa = c_abs(&a[i__ + j * lda]);
+/* A(k,i) */
+ work[i__] += aa;
+ s += aa;
+ }
+/* i=k-1 */
+ i__1 = i__ + j * lda;
+ aa = (r__1 = a[i__1].r, dabs(r__1));
+/* A(k-1,k-1) */
+ s += aa;
+ work[i__] = s;
+/* done with col j=k+1 */
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+/* process col j of A = A(j,0:k-1) */
+ s = 0.f;
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = c_abs(&a[i__ + j * lda]);
+/* A(j,i) */
+ work[i__] += aa;
+ s += aa;
+ }
+ work[j] += s;
+ }
+ i__ = isamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ }
+ } else {
+/* n is even & A is k=n/2 by n+1 */
+ if (ilu == 0) {
+/* uplo = 'U' */
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+ }
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ s = 0.f;
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = c_abs(&a[i__ + j * lda]);
+/* A(j,i+k) */
+ work[i__ + k] += aa;
+ s += aa;
+ }
+ work[j] = s;
+ }
+/* j=k */
+ i__1 = j * lda;
+ aa = (r__1 = a[i__1].r, dabs(r__1));
+/* A(k,k) */
+ s = aa;
+ i__1 = k - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ aa = c_abs(&a[i__ + j * lda]);
+/* A(k,k+i) */
+ work[i__ + k] += aa;
+ s += aa;
+ }
+ work[j] += s;
+ i__1 = *n - 1;
+ for (j = k + 1; j <= i__1; ++j) {
+ s = 0.f;
+ i__2 = j - 2 - k;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = c_abs(&a[i__ + j * lda]);
+/* A(i,j-k-1) */
+ work[i__] += aa;
+ s += aa;
+ }
+/* i=j-1-k */
+ i__2 = i__ + j * lda;
+ aa = (r__1 = a[i__2].r, dabs(r__1));
+/* A(j-k-1,j-k-1) */
+ s += aa;
+ work[j - k - 1] += s;
+ ++i__;
+ i__2 = i__ + j * lda;
+ aa = (r__1 = a[i__2].r, dabs(r__1));
+/* A(j,j) */
+ s = aa;
+ i__2 = *n - 1;
+ for (l = j + 1; l <= i__2; ++l) {
+ ++i__;
+ aa = c_abs(&a[i__ + j * lda]);
+/* A(j,l) */
+ work[l] += aa;
+ s += aa;
+ }
+ work[j] += s;
+ }
+/* j=n */
+ s = 0.f;
+ i__1 = k - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ aa = c_abs(&a[i__ + j * lda]);
+/* A(i,k-1) */
+ work[i__] += aa;
+ s += aa;
+ }
+/* i=k-1 */
+ i__1 = i__ + j * lda;
+ aa = (r__1 = a[i__1].r, dabs(r__1));
+/* A(k-1,k-1) */
+ s += aa;
+ work[i__] += s;
+ i__ = isamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ } else {
+/* ilu=1 & uplo = 'L' */
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+ }
+/* j=0 is special :process col A(k:n-1,k) */
+ s = (r__1 = a[0].r, dabs(r__1));
+/* A(k,k) */
+ i__1 = k - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ aa = c_abs(&a[i__]);
+/* A(k+i,k) */
+ work[i__ + k] += aa;
+ s += aa;
+ }
+ work[k] += s;
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* process */
+ s = 0.f;
+ i__2 = j - 2;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = c_abs(&a[i__ + j * lda]);
+/* A(j-1,i) */
+ work[i__] += aa;
+ s += aa;
+ }
+ i__2 = i__ + j * lda;
+ aa = (r__1 = a[i__2].r, dabs(r__1));
+/* i=j-1 so process of A(j-1,j-1) */
+ s += aa;
+ work[j - 1] = s;
+/* is initialised here */
+ ++i__;
+/* i=j process A(j+k,j+k) */
+ i__2 = i__ + j * lda;
+ aa = (r__1 = a[i__2].r, dabs(r__1));
+ s = aa;
+ i__2 = *n - 1;
+ for (l = k + j + 1; l <= i__2; ++l) {
+ ++i__;
+ aa = c_abs(&a[i__ + j * lda]);
+/* A(l,k+j) */
+ s += aa;
+ work[l] += aa;
+ }
+ work[k + j] += s;
+ }
+/* j=k is special :process col A(k,0:k-1) */
+ s = 0.f;
+ i__1 = k - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ aa = c_abs(&a[i__ + j * lda]);
+/* A(k,i) */
+ work[i__] += aa;
+ s += aa;
+ }
+
+/* i=k-1 */
+ i__1 = i__ + j * lda;
+ aa = (r__1 = a[i__1].r, dabs(r__1));
+/* A(k-1,k-1) */
+ s += aa;
+ work[i__] = s;
+/* done with col j=k+1 */
+ i__1 = *n;
+ for (j = k + 1; j <= i__1; ++j) {
+
+/* process col j-1 of A = A(j-1,0:k-1) */
+ s = 0.f;
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = c_abs(&a[i__ + j * lda]);
+/* A(j-1,i) */
+ work[i__] += aa;
+ s += aa;
+ }
+ work[j - 1] += s;
+ }
+ i__ = isamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ }
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ k = (*n + 1) / 2;
+ scale = 0.f;
+ s = 1.f;
+ if (noe == 1) {
+/* n is odd */
+ if (ifm == 1) {
+/* A is normal & A is n by k */
+ if (ilu == 0) {
+/* A is upper */
+ i__1 = k - 3;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - j - 2;
+ classq_(&i__2, &a[k + j + 1 + j * lda], &c__1, &scale,
+ &s);
+/* L at A(k,0) */
+ }
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k + j - 1;
+ classq_(&i__2, &a[j * lda], &c__1, &scale, &s);
+/* trap U at A(0,0) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ l = k - 1;
+/* -> U(k,k) at A(k-1,0) */
+ i__1 = k - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = l;
+ aa = a[i__2].r;
+/* U(k+i,k+i) */
+ if (aa != 0.f) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ r__1 = scale / aa;
+ s = s * (r__1 * r__1) + 1.f;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ r__1 = aa / scale;
+ s += r__1 * r__1;
+ }
+ }
+ i__2 = l + 1;
+ aa = a[i__2].r;
+/* U(i,i) */
+ if (aa != 0.f) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ r__1 = scale / aa;
+ s = s * (r__1 * r__1) + 1.f;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ r__1 = aa / scale;
+ s += r__1 * r__1;
+ }
+ }
+ l = l + lda + 1;
+ }
+ i__1 = l;
+ aa = a[i__1].r;
+/* U(n-1,n-1) */
+ if (aa != 0.f) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ r__1 = scale / aa;
+ s = s * (r__1 * r__1) + 1.f;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ r__1 = aa / scale;
+ s += r__1 * r__1;
+ }
+ }
+ } else {
+/* ilu=1 & A is lower */
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - j - 1;
+ classq_(&i__2, &a[j + 1 + j * lda], &c__1, &scale, &s)
+ ;
+/* trap L at A(0,0) */
+ }
+ i__1 = k - 2;
+ for (j = 1; j <= i__1; ++j) {
+ classq_(&j, &a[(j + 1) * lda], &c__1, &scale, &s);
+/* U at A(0,1) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ aa = a[0].r;
+/* L(0,0) at A(0,0) */
+ if (aa != 0.f) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ r__1 = scale / aa;
+ s = s * (r__1 * r__1) + 1.f;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ r__1 = aa / scale;
+ s += r__1 * r__1;
+ }
+ }
+ l = lda;
+/* -> L(k,k) at A(0,1) */
+ i__1 = k - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = l;
+ aa = a[i__2].r;
+/* L(k-1+i,k-1+i) */
+ if (aa != 0.f) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ r__1 = scale / aa;
+ s = s * (r__1 * r__1) + 1.f;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ r__1 = aa / scale;
+ s += r__1 * r__1;
+ }
+ }
+ i__2 = l + 1;
+ aa = a[i__2].r;
+/* L(i,i) */
+ if (aa != 0.f) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ r__1 = scale / aa;
+ s = s * (r__1 * r__1) + 1.f;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ r__1 = aa / scale;
+ s += r__1 * r__1;
+ }
+ }
+ l = l + lda + 1;
+ }
+ }
+ } else {
+/* A is xpose & A is k by n */
+ if (ilu == 0) {
+/* A' is upper */
+ i__1 = k - 2;
+ for (j = 1; j <= i__1; ++j) {
+ classq_(&j, &a[(k + j) * lda], &c__1, &scale, &s);
+/* U at A(0,k) */
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ classq_(&k, &a[j * lda], &c__1, &scale, &s);
+/* k by k-1 rect. at A(0,0) */
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - j - 1;
+ classq_(&i__2, &a[j + 1 + (j + k - 1) * lda], &c__1, &
+ scale, &s);
+/* L at A(0,k-1) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ l = k * lda - lda;
+/* -> U(k-1,k-1) at A(0,k-1) */
+ i__1 = l;
+ aa = a[i__1].r;
+/* U(k-1,k-1) */
+ if (aa != 0.f) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ r__1 = scale / aa;
+ s = s * (r__1 * r__1) + 1.f;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ r__1 = aa / scale;
+ s += r__1 * r__1;
+ }
+ }
+ l += lda;
+/* -> U(0,0) at A(0,k) */
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+ i__2 = l;
+ aa = a[i__2].r;
+/* -> U(j-k,j-k) */
+ if (aa != 0.f) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ r__1 = scale / aa;
+ s = s * (r__1 * r__1) + 1.f;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ r__1 = aa / scale;
+ s += r__1 * r__1;
+ }
+ }
+ i__2 = l + 1;
+ aa = a[i__2].r;
+/* -> U(j,j) */
+ if (aa != 0.f) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ r__1 = scale / aa;
+ s = s * (r__1 * r__1) + 1.f;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ r__1 = aa / scale;
+ s += r__1 * r__1;
+ }
+ }
+ l = l + lda + 1;
+ }
+ } else {
+/* A' is lower */
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ classq_(&j, &a[j * lda], &c__1, &scale, &s);
+/* U at A(0,0) */
+ }
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+ classq_(&k, &a[j * lda], &c__1, &scale, &s);
+/* k by k-1 rect. at A(0,k) */
+ }
+ i__1 = k - 3;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - j - 2;
+ classq_(&i__2, &a[j + 2 + j * lda], &c__1, &scale, &s)
+ ;
+/* L at A(1,0) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ l = 0;
+/* -> L(0,0) at A(0,0) */
+ i__1 = k - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = l;
+ aa = a[i__2].r;
+/* L(i,i) */
+ if (aa != 0.f) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ r__1 = scale / aa;
+ s = s * (r__1 * r__1) + 1.f;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ r__1 = aa / scale;
+ s += r__1 * r__1;
+ }
+ }
+ i__2 = l + 1;
+ aa = a[i__2].r;
+/* L(k+i,k+i) */
+ if (aa != 0.f) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ r__1 = scale / aa;
+ s = s * (r__1 * r__1) + 1.f;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ r__1 = aa / scale;
+ s += r__1 * r__1;
+ }
+ }
+ l = l + lda + 1;
+ }
+/* L-> k-1 + (k-1)*lda or L(k-1,k-1) at A(k-1,k-1) */
+ i__1 = l;
+ aa = a[i__1].r;
+/* L(k-1,k-1) at A(k-1,k-1) */
+ if (aa != 0.f) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ r__1 = scale / aa;
+ s = s * (r__1 * r__1) + 1.f;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ r__1 = aa / scale;
+ s += r__1 * r__1;
+ }
+ }
+ }
+ }
+ } else {
+/* n is even */
+ if (ifm == 1) {
+/* A is normal */
+ if (ilu == 0) {
+/* A is upper */
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - j - 1;
+ classq_(&i__2, &a[k + j + 2 + j * lda], &c__1, &scale,
+ &s);
+/* L at A(k+1,0) */
+ }
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k + j;
+ classq_(&i__2, &a[j * lda], &c__1, &scale, &s);
+/* trap U at A(0,0) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ l = k;
+/* -> U(k,k) at A(k,0) */
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = l;
+ aa = a[i__2].r;
+/* U(k+i,k+i) */
+ if (aa != 0.f) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ r__1 = scale / aa;
+ s = s * (r__1 * r__1) + 1.f;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ r__1 = aa / scale;
+ s += r__1 * r__1;
+ }
+ }
+ i__2 = l + 1;
+ aa = a[i__2].r;
+/* U(i,i) */
+ if (aa != 0.f) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ r__1 = scale / aa;
+ s = s * (r__1 * r__1) + 1.f;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ r__1 = aa / scale;
+ s += r__1 * r__1;
+ }
+ }
+ l = l + lda + 1;
+ }
+ } else {
+/* ilu=1 & A is lower */
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - j - 1;
+ classq_(&i__2, &a[j + 2 + j * lda], &c__1, &scale, &s)
+ ;
+/* trap L at A(1,0) */
+ }
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ classq_(&j, &a[j * lda], &c__1, &scale, &s);
+/* U at A(0,0) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ l = 0;
+/* -> L(k,k) at A(0,0) */
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = l;
+ aa = a[i__2].r;
+/* L(k-1+i,k-1+i) */
+ if (aa != 0.f) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ r__1 = scale / aa;
+ s = s * (r__1 * r__1) + 1.f;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ r__1 = aa / scale;
+ s += r__1 * r__1;
+ }
+ }
+ i__2 = l + 1;
+ aa = a[i__2].r;
+/* L(i,i) */
+ if (aa != 0.f) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ r__1 = scale / aa;
+ s = s * (r__1 * r__1) + 1.f;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ r__1 = aa / scale;
+ s += r__1 * r__1;
+ }
+ }
+ l = l + lda + 1;
+ }
+ }
+ } else {
+/* A is xpose */
+ if (ilu == 0) {
+/* A' is upper */
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ classq_(&j, &a[(k + 1 + j) * lda], &c__1, &scale, &s);
+/* U at A(0,k+1) */
+ }
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ classq_(&k, &a[j * lda], &c__1, &scale, &s);
+/* k by k rect. at A(0,0) */
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - j - 1;
+ classq_(&i__2, &a[j + 1 + (j + k) * lda], &c__1, &
+ scale, &s);
+/* L at A(0,k) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ l = k * lda;
+/* -> U(k,k) at A(0,k) */
+ i__1 = l;
+ aa = a[i__1].r;
+/* U(k,k) */
+ if (aa != 0.f) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ r__1 = scale / aa;
+ s = s * (r__1 * r__1) + 1.f;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ r__1 = aa / scale;
+ s += r__1 * r__1;
+ }
+ }
+ l += lda;
+/* -> U(0,0) at A(0,k+1) */
+ i__1 = *n - 1;
+ for (j = k + 1; j <= i__1; ++j) {
+ i__2 = l;
+ aa = a[i__2].r;
+/* -> U(j-k-1,j-k-1) */
+ if (aa != 0.f) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ r__1 = scale / aa;
+ s = s * (r__1 * r__1) + 1.f;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ r__1 = aa / scale;
+ s += r__1 * r__1;
+ }
+ }
+ i__2 = l + 1;
+ aa = a[i__2].r;
+/* -> U(j,j) */
+ if (aa != 0.f) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ r__1 = scale / aa;
+ s = s * (r__1 * r__1) + 1.f;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ r__1 = aa / scale;
+ s += r__1 * r__1;
+ }
+ }
+ l = l + lda + 1;
+ }
+/* L=k-1+n*lda */
+/* -> U(k-1,k-1) at A(k-1,n) */
+ i__1 = l;
+ aa = a[i__1].r;
+/* U(k,k) */
+ if (aa != 0.f) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ r__1 = scale / aa;
+ s = s * (r__1 * r__1) + 1.f;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ r__1 = aa / scale;
+ s += r__1 * r__1;
+ }
+ }
+ } else {
+/* A' is lower */
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ classq_(&j, &a[(j + 1) * lda], &c__1, &scale, &s);
+/* U at A(0,1) */
+ }
+ i__1 = *n;
+ for (j = k + 1; j <= i__1; ++j) {
+ classq_(&k, &a[j * lda], &c__1, &scale, &s);
+/* k by k rect. at A(0,k+1) */
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - j - 1;
+ classq_(&i__2, &a[j + 1 + j * lda], &c__1, &scale, &s)
+ ;
+/* L at A(0,0) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ l = 0;
+/* -> L(k,k) at A(0,0) */
+ i__1 = l;
+ aa = a[i__1].r;
+/* L(k,k) at A(0,0) */
+ if (aa != 0.f) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ r__1 = scale / aa;
+ s = s * (r__1 * r__1) + 1.f;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ r__1 = aa / scale;
+ s += r__1 * r__1;
+ }
+ }
+ l = lda;
+/* -> L(0,0) at A(0,1) */
+ i__1 = k - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = l;
+ aa = a[i__2].r;
+/* L(i,i) */
+ if (aa != 0.f) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ r__1 = scale / aa;
+ s = s * (r__1 * r__1) + 1.f;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ r__1 = aa / scale;
+ s += r__1 * r__1;
+ }
+ }
+ i__2 = l + 1;
+ aa = a[i__2].r;
+/* L(k+i+1,k+i+1) */
+ if (aa != 0.f) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ r__1 = scale / aa;
+ s = s * (r__1 * r__1) + 1.f;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ r__1 = aa / scale;
+ s += r__1 * r__1;
+ }
+ }
+ l = l + lda + 1;
+ }
+/* L-> k - 1 + k*lda or L(k-1,k-1) at A(k-1,k) */
+ i__1 = l;
+ aa = a[i__1].r;
+/* L(k-1,k-1) at A(k-1,k) */
+ if (aa != 0.f) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ r__1 = scale / aa;
+ s = s * (r__1 * r__1) + 1.f;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ r__1 = aa / scale;
+ s += r__1 * r__1;
+ }
+ }
+ }
+ }
+ }
+ value = scale * sqrt(s);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of CLANHF */
+
+} /* clanhf_ */
diff --git a/contrib/libs/clapack/clanhp.c b/contrib/libs/clapack/clanhp.c
new file mode 100644
index 0000000000..824b178920
--- /dev/null
+++ b/contrib/libs/clapack/clanhp.c
@@ -0,0 +1,277 @@
+/* clanhp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal clanhp_(char *norm, char *uplo, integer *n, complex *ap, real *
+ work)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ real ret_val, r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double c_abs(complex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k;
+ real sum, absa, scale;
+ extern logical lsame_(char *, char *);
+ real value;
+ extern /* Subroutine */ int classq_(integer *, complex *, integer *, real
+ *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLANHP returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* complex hermitian matrix A, supplied in packed form. */
+
+/* Description */
+/* =========== */
+
+/* CLANHP returns the value */
+
+/* CLANHP = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in CLANHP as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* hermitian matrix A is supplied. */
+/* = 'U': Upper triangular part of A is supplied */
+/* = 'L': Lower triangular part of A is supplied */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, CLANHP is */
+/* set to zero. */
+
+/* AP (input) COMPLEX array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the hermitian matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set and are assumed to be zero. */
+
+/* WORK (workspace) REAL array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, */
+/* WORK is not referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --work;
+ --ap;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ k = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + j - 1;
+ for (i__ = k + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&ap[i__]);
+ value = dmax(r__1,r__2);
+/* L10: */
+ }
+ k += j;
+/* Computing MAX */
+ i__2 = k;
+ r__2 = value, r__3 = (r__1 = ap[i__2].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+/* L20: */
+ }
+ } else {
+ k = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = k;
+ r__2 = value, r__3 = (r__1 = ap[i__2].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ i__2 = k + *n - j;
+ for (i__ = k + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&ap[i__]);
+ value = dmax(r__1,r__2);
+/* L30: */
+ }
+ k = k + *n - j + 1;
+/* L40: */
+ }
+ }
+ } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
+
+/* Find normI(A) ( = norm1(A), since A is hermitian). */
+
+ value = 0.f;
+ k = 1;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.f;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ absa = c_abs(&ap[k]);
+ sum += absa;
+ work[i__] += absa;
+ ++k;
+/* L50: */
+ }
+ i__2 = k;
+ work[j] = sum + (r__1 = ap[i__2].r, dabs(r__1));
+ ++k;
+/* L60: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L80: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k;
+ sum = work[j] + (r__1 = ap[i__2].r, dabs(r__1));
+ ++k;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ absa = c_abs(&ap[k]);
+ sum += absa;
+ work[i__] += absa;
+ ++k;
+/* L90: */
+ }
+ value = dmax(value,sum);
+/* L100: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ k = 2;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ classq_(&i__2, &ap[k], &c__1, &scale, &sum);
+ k += j;
+/* L110: */
+ }
+ } else {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ classq_(&i__2, &ap[k], &c__1, &scale, &sum);
+ k = k + *n - j + 1;
+/* L120: */
+ }
+ }
+ sum *= 2;
+ k = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = k;
+ if (ap[i__2].r != 0.f) {
+ i__2 = k;
+ absa = (r__1 = ap[i__2].r, dabs(r__1));
+ if (scale < absa) {
+/* Computing 2nd power */
+ r__1 = scale / absa;
+ sum = sum * (r__1 * r__1) + 1.f;
+ scale = absa;
+ } else {
+/* Computing 2nd power */
+ r__1 = absa / scale;
+ sum += r__1 * r__1;
+ }
+ }
+ if (lsame_(uplo, "U")) {
+ k = k + i__ + 1;
+ } else {
+ k = k + *n - i__ + 1;
+ }
+/* L130: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of CLANHP */
+
+} /* clanhp_ */
diff --git a/contrib/libs/clapack/clanhs.c b/contrib/libs/clapack/clanhs.c
new file mode 100644
index 0000000000..850ca81462
--- /dev/null
+++ b/contrib/libs/clapack/clanhs.c
@@ -0,0 +1,205 @@
+/* clanhs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal clanhs_(char *norm, integer *n, complex *a, integer *lda, real *
+ work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ real ret_val, r__1, r__2;
+
+ /* Builtin functions */
+ double c_abs(complex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ real sum, scale;
+ extern logical lsame_(char *, char *);
+ real value;
+ extern /* Subroutine */ int classq_(integer *, complex *, integer *, real
+ *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLANHS returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* Hessenberg matrix A. */
+
+/* Description */
+/* =========== */
+
+/* CLANHS returns the value */
+
+/* CLANHS = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in CLANHS as described */
+/* above. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, CLANHS is */
+/* set to zero. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The n by n upper Hessenberg matrix A; the part of A below the */
+/* first sub-diagonal is not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(N,1). */
+
+/* WORK (workspace) REAL array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I'; otherwise, WORK is not */
+/* referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * a_dim1]);
+ value = dmax(r__1,r__2);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.f;
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += c_abs(&a[i__ + j * a_dim1]);
+/* L30: */
+ }
+ value = dmax(value,sum);
+/* L40: */
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L50: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += c_abs(&a[i__ + j * a_dim1]);
+/* L60: */
+ }
+/* L70: */
+ }
+ value = 0.f;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L80: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ classq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L90: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of CLANHS */
+
+} /* clanhs_ */
diff --git a/contrib/libs/clapack/clanht.c b/contrib/libs/clapack/clanht.c
new file mode 100644
index 0000000000..834e68fb97
--- /dev/null
+++ b/contrib/libs/clapack/clanht.c
@@ -0,0 +1,166 @@
+/* clanht.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal clanht_(char *norm, integer *n, real *d__, complex *e)
+{
+ /* System generated locals */
+ integer i__1;
+ real ret_val, r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double c_abs(complex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ real sum, scale;
+ extern logical lsame_(char *, char *);
+ real anorm;
+ extern /* Subroutine */ int classq_(integer *, complex *, integer *, real
+ *, real *), slassq_(integer *, real *, integer *, real *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLANHT returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* complex Hermitian tridiagonal matrix A. */
+
+/* Description */
+/* =========== */
+
+/* CLANHT returns the value */
+
+/* CLANHT = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in CLANHT as described */
+/* above. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, CLANHT is */
+/* set to zero. */
+
+/* D (input) REAL array, dimension (N) */
+/* The diagonal elements of A. */
+
+/* E (input) COMPLEX array, dimension (N-1) */
+/* The (n-1) sub-diagonal or super-diagonal elements of A. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --e;
+ --d__;
+
+ /* Function Body */
+ if (*n <= 0) {
+ anorm = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ anorm = (r__1 = d__[*n], dabs(r__1));
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__2 = anorm, r__3 = (r__1 = d__[i__], dabs(r__1));
+ anorm = dmax(r__2,r__3);
+/* Computing MAX */
+ r__1 = anorm, r__2 = c_abs(&e[i__]);
+ anorm = dmax(r__1,r__2);
+/* L10: */
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1' || lsame_(norm, "I")) {
+
+/* Find norm1(A). */
+
+ if (*n == 1) {
+ anorm = dabs(d__[1]);
+ } else {
+/* Computing MAX */
+ r__2 = dabs(d__[1]) + c_abs(&e[1]), r__3 = c_abs(&e[*n - 1]) + (
+ r__1 = d__[*n], dabs(r__1));
+ anorm = dmax(r__2,r__3);
+ i__1 = *n - 1;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__2 = anorm, r__3 = (r__1 = d__[i__], dabs(r__1)) + c_abs(&e[
+ i__]) + c_abs(&e[i__ - 1]);
+ anorm = dmax(r__2,r__3);
+/* L20: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ if (*n > 1) {
+ i__1 = *n - 1;
+ classq_(&i__1, &e[1], &c__1, &scale, &sum);
+ sum *= 2;
+ }
+ slassq_(n, &d__[1], &c__1, &scale, &sum);
+ anorm = scale * sqrt(sum);
+ }
+
+ ret_val = anorm;
+ return ret_val;
+
+/* End of CLANHT */
+
+} /* clanht_ */
diff --git a/contrib/libs/clapack/clansb.c b/contrib/libs/clapack/clansb.c
new file mode 100644
index 0000000000..fa0c88511a
--- /dev/null
+++ b/contrib/libs/clapack/clansb.c
@@ -0,0 +1,261 @@
+/* clansb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal clansb_(char *norm, char *uplo, integer *n, integer *k, complex *
+ ab, integer *ldab, real *work)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+ real ret_val, r__1, r__2;
+
+ /* Builtin functions */
+ double c_abs(complex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, l;
+ real sum, absa, scale;
+ extern logical lsame_(char *, char *);
+ real value;
+ extern /* Subroutine */ int classq_(integer *, complex *, integer *, real
+ *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLANSB returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of an */
+/* n by n symmetric band matrix A, with k super-diagonals. */
+
+/* Description */
+/* =========== */
+
+/* CLANSB returns the value */
+
+/* CLANSB = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in CLANSB as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* band matrix A is supplied. */
+/* = 'U': Upper triangular part is supplied */
+/* = 'L': Lower triangular part is supplied */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, CLANSB is */
+/* set to zero. */
+
+/* K (input) INTEGER */
+/* The number of super-diagonals or sub-diagonals of the */
+/* band matrix A. K >= 0. */
+
+/* AB (input) COMPLEX array, dimension (LDAB,N) */
+/* The upper or lower triangle of the symmetric band matrix A, */
+/* stored in the first K+1 rows of AB. The j-th column of A is */
+/* stored in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(k+1+i-j,j) = A(i,j) for max(1,j-k)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+k). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= K+1. */
+
+/* WORK (workspace) REAL array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, */
+/* WORK is not referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = *k + 2 - j;
+ i__3 = *k + 1;
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&ab[i__ + j * ab_dim1]);
+ value = dmax(r__1,r__2);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = *n + 1 - j, i__4 = *k + 1;
+ i__3 = min(i__2,i__4);
+ for (i__ = 1; i__ <= i__3; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&ab[i__ + j * ab_dim1]);
+ value = dmax(r__1,r__2);
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
+
+/* Find normI(A) ( = norm1(A), since A is symmetric). */
+
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.f;
+ l = *k + 1 - j;
+/* Computing MAX */
+ i__3 = 1, i__2 = j - *k;
+ i__4 = j - 1;
+ for (i__ = max(i__3,i__2); i__ <= i__4; ++i__) {
+ absa = c_abs(&ab[l + i__ + j * ab_dim1]);
+ sum += absa;
+ work[i__] += absa;
+/* L50: */
+ }
+ work[j] = sum + c_abs(&ab[*k + 1 + j * ab_dim1]);
+/* L60: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L80: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = work[j] + c_abs(&ab[j * ab_dim1 + 1]);
+ l = 1 - j;
+/* Computing MIN */
+ i__3 = *n, i__2 = j + *k;
+ i__4 = min(i__3,i__2);
+ for (i__ = j + 1; i__ <= i__4; ++i__) {
+ absa = c_abs(&ab[l + i__ + j * ab_dim1]);
+ sum += absa;
+ work[i__] += absa;
+/* L90: */
+ }
+ value = dmax(value,sum);
+/* L100: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ if (*k > 0) {
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = j - 1;
+ i__4 = min(i__3,*k);
+/* Computing MAX */
+ i__2 = *k + 2 - j;
+ classq_(&i__4, &ab[max(i__2, 1)+ j * ab_dim1], &c__1, &
+ scale, &sum);
+/* L110: */
+ }
+ l = *k + 1;
+ } else {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n - j;
+ i__4 = min(i__3,*k);
+ classq_(&i__4, &ab[j * ab_dim1 + 2], &c__1, &scale, &sum);
+/* L120: */
+ }
+ l = 1;
+ }
+ sum *= 2;
+ } else {
+ l = 1;
+ }
+ classq_(n, &ab[l + ab_dim1], ldab, &scale, &sum);
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of CLANSB */
+
+} /* clansb_ */
diff --git a/contrib/libs/clapack/clansp.c b/contrib/libs/clapack/clansp.c
new file mode 100644
index 0000000000..906ea81420
--- /dev/null
+++ b/contrib/libs/clapack/clansp.c
@@ -0,0 +1,278 @@
+/* clansp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal clansp_(char *norm, char *uplo, integer *n, complex *ap, real *
+ work)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ real ret_val, r__1, r__2;
+
+ /* Builtin functions */
+ double c_abs(complex *), r_imag(complex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k;
+ real sum, absa, scale;
+ extern logical lsame_(char *, char *);
+ real value;
+ extern /* Subroutine */ int classq_(integer *, complex *, integer *, real
+ *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLANSP returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* complex symmetric matrix A, supplied in packed form. */
+
+/* Description */
+/* =========== */
+
+/* CLANSP returns the value */
+
+/* CLANSP = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in CLANSP as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is supplied. */
+/* = 'U': Upper triangular part of A is supplied */
+/* = 'L': Lower triangular part of A is supplied */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, CLANSP is */
+/* set to zero. */
+
+/* AP (input) COMPLEX array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the symmetric matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* WORK (workspace) REAL array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, */
+/* WORK is not referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --work;
+ --ap;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ k = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + j - 1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&ap[i__]);
+ value = dmax(r__1,r__2);
+/* L10: */
+ }
+ k += j;
+/* L20: */
+ }
+ } else {
+ k = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + *n - j;
+ for (i__ = k; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&ap[i__]);
+ value = dmax(r__1,r__2);
+/* L30: */
+ }
+ k = k + *n - j + 1;
+/* L40: */
+ }
+ }
+ } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
+
+/* Find normI(A) ( = norm1(A), since A is symmetric). */
+
+ value = 0.f;
+ k = 1;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.f;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ absa = c_abs(&ap[k]);
+ sum += absa;
+ work[i__] += absa;
+ ++k;
+/* L50: */
+ }
+ work[j] = sum + c_abs(&ap[k]);
+ ++k;
+/* L60: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L80: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = work[j] + c_abs(&ap[k]);
+ ++k;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ absa = c_abs(&ap[k]);
+ sum += absa;
+ work[i__] += absa;
+ ++k;
+/* L90: */
+ }
+ value = dmax(value,sum);
+/* L100: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ k = 2;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ classq_(&i__2, &ap[k], &c__1, &scale, &sum);
+ k += j;
+/* L110: */
+ }
+ } else {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ classq_(&i__2, &ap[k], &c__1, &scale, &sum);
+ k = k + *n - j + 1;
+/* L120: */
+ }
+ }
+ sum *= 2;
+ k = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = k;
+ if (ap[i__2].r != 0.f) {
+ i__2 = k;
+ absa = (r__1 = ap[i__2].r, dabs(r__1));
+ if (scale < absa) {
+/* Computing 2nd power */
+ r__1 = scale / absa;
+ sum = sum * (r__1 * r__1) + 1.f;
+ scale = absa;
+ } else {
+/* Computing 2nd power */
+ r__1 = absa / scale;
+ sum += r__1 * r__1;
+ }
+ }
+ if (r_imag(&ap[k]) != 0.f) {
+ absa = (r__1 = r_imag(&ap[k]), dabs(r__1));
+ if (scale < absa) {
+/* Computing 2nd power */
+ r__1 = scale / absa;
+ sum = sum * (r__1 * r__1) + 1.f;
+ scale = absa;
+ } else {
+/* Computing 2nd power */
+ r__1 = absa / scale;
+ sum += r__1 * r__1;
+ }
+ }
+ if (lsame_(uplo, "U")) {
+ k = k + i__ + 1;
+ } else {
+ k = k + *n - i__ + 1;
+ }
+/* L130: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of CLANSP */
+
+} /* clansp_ */
diff --git a/contrib/libs/clapack/clansy.c b/contrib/libs/clapack/clansy.c
new file mode 100644
index 0000000000..874e77eec2
--- /dev/null
+++ b/contrib/libs/clapack/clansy.c
@@ -0,0 +1,237 @@
+/* clansy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal clansy_(char *norm, char *uplo, integer *n, complex *a, integer *
+ lda, real *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real ret_val, r__1, r__2;
+
+ /* Builtin functions */
+ double c_abs(complex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ real sum, absa, scale;
+ extern logical lsame_(char *, char *);
+ real value;
+ extern /* Subroutine */ int classq_(integer *, complex *, integer *, real
+ *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLANSY returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* complex symmetric matrix A. */
+
+/* Description */
+/* =========== */
+
+/* CLANSY returns the value */
+
+/* CLANSY = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in CLANSY as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is to be referenced. */
+/* = 'U': Upper triangular part of A is referenced */
+/* = 'L': Lower triangular part of A is referenced */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, CLANSY is */
+/* set to zero. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The symmetric matrix A. If UPLO = 'U', the leading n by n */
+/* upper triangular part of A contains the upper triangular part */
+/* of the matrix A, and the strictly lower triangular part of A */
+/* is not referenced. If UPLO = 'L', the leading n by n lower */
+/* triangular part of A contains the lower triangular part of */
+/* the matrix A, and the strictly upper triangular part of A is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(N,1). */
+
+/* WORK (workspace) REAL array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, */
+/* WORK is not referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * a_dim1]);
+ value = dmax(r__1,r__2);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * a_dim1]);
+ value = dmax(r__1,r__2);
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
+
+/* Find normI(A) ( = norm1(A), since A is symmetric). */
+
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.f;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ absa = c_abs(&a[i__ + j * a_dim1]);
+ sum += absa;
+ work[i__] += absa;
+/* L50: */
+ }
+ work[j] = sum + c_abs(&a[j + j * a_dim1]);
+/* L60: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L80: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = work[j] + c_abs(&a[j + j * a_dim1]);
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ absa = c_abs(&a[i__ + j * a_dim1]);
+ sum += absa;
+ work[i__] += absa;
+/* L90: */
+ }
+ value = dmax(value,sum);
+/* L100: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ classq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L110: */
+ }
+ } else {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ classq_(&i__2, &a[j + 1 + j * a_dim1], &c__1, &scale, &sum);
+/* L120: */
+ }
+ }
+ sum *= 2;
+ i__1 = *lda + 1;
+ classq_(n, &a[a_offset], &i__1, &scale, &sum);
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of CLANSY */
+
+} /* clansy_ */
diff --git a/contrib/libs/clapack/clantb.c b/contrib/libs/clapack/clantb.c
new file mode 100644
index 0000000000..98d865714c
--- /dev/null
+++ b/contrib/libs/clapack/clantb.c
@@ -0,0 +1,426 @@
+/* clantb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal clantb_(char *norm, char *uplo, char *diag, integer *n, integer *k,
+ complex *ab, integer *ldab, real *work)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4, i__5;
+ real ret_val, r__1, r__2;
+
+ /* Builtin functions */
+ double c_abs(complex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, l;
+ real sum, scale;
+ logical udiag;
+ extern logical lsame_(char *, char *);
+ real value;
+ extern /* Subroutine */ int classq_(integer *, complex *, integer *, real
+ *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLANTB returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of an */
+/* n by n triangular band matrix A, with ( k + 1 ) diagonals. */
+
+/* Description */
+/* =========== */
+
+/* CLANTB returns the value */
+
+/* CLANTB = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in CLANTB as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower triangular. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A is unit triangular. */
+/* = 'N': Non-unit triangular */
+/* = 'U': Unit triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, CLANTB is */
+/* set to zero. */
+
+/* K (input) INTEGER */
+/* The number of super-diagonals of the matrix A if UPLO = 'U', */
+/* or the number of sub-diagonals of the matrix A if UPLO = 'L'. */
+/* K >= 0. */
+
+/* AB (input) COMPLEX array, dimension (LDAB,N) */
+/* The upper or lower triangular band matrix A, stored in the */
+/* first k+1 rows of AB. The j-th column of A is stored */
+/* in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(k+1+i-j,j) = A(i,j) for max(1,j-k)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+k). */
+/* Note that when DIAG = 'U', the elements of the array AB */
+/* corresponding to the diagonal elements of the matrix A are */
+/* not referenced, but are assumed to be one. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= K+1. */
+
+/* WORK (workspace) REAL array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I'; otherwise, WORK is not */
+/* referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ if (lsame_(diag, "U")) {
+ value = 1.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = *k + 2 - j;
+ i__3 = *k;
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&ab[i__ + j * ab_dim1]);
+ value = dmax(r__1,r__2);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = *n + 1 - j, i__4 = *k + 1;
+ i__3 = min(i__2,i__4);
+ for (i__ = 2; i__ <= i__3; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&ab[i__ + j * ab_dim1]);
+ value = dmax(r__1,r__2);
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else {
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__3 = *k + 2 - j;
+ i__2 = *k + 1;
+ for (i__ = max(i__3,1); i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&ab[i__ + j * ab_dim1]);
+ value = dmax(r__1,r__2);
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n + 1 - j, i__4 = *k + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&ab[i__ + j * ab_dim1]);
+ value = dmax(r__1,r__2);
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.f;
+ udiag = lsame_(diag, "U");
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (udiag) {
+ sum = 1.f;
+/* Computing MAX */
+ i__2 = *k + 2 - j;
+ i__3 = *k;
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+ sum += c_abs(&ab[i__ + j * ab_dim1]);
+/* L90: */
+ }
+ } else {
+ sum = 0.f;
+/* Computing MAX */
+ i__3 = *k + 2 - j;
+ i__2 = *k + 1;
+ for (i__ = max(i__3,1); i__ <= i__2; ++i__) {
+ sum += c_abs(&ab[i__ + j * ab_dim1]);
+/* L100: */
+ }
+ }
+ value = dmax(value,sum);
+/* L110: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (udiag) {
+ sum = 1.f;
+/* Computing MIN */
+ i__3 = *n + 1 - j, i__4 = *k + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ sum += c_abs(&ab[i__ + j * ab_dim1]);
+/* L120: */
+ }
+ } else {
+ sum = 0.f;
+/* Computing MIN */
+ i__3 = *n + 1 - j, i__4 = *k + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += c_abs(&ab[i__ + j * ab_dim1]);
+/* L130: */
+ }
+ }
+ value = dmax(value,sum);
+/* L140: */
+ }
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ if (lsame_(diag, "U")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 1.f;
+/* L150: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ l = *k + 1 - j;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__4 = j - 1;
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ work[i__] += c_abs(&ab[l + i__ + j * ab_dim1]);
+/* L160: */
+ }
+/* L170: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L180: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ l = *k + 1 - j;
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *k;
+ i__3 = j;
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ work[i__] += c_abs(&ab[l + i__ + j * ab_dim1]);
+/* L190: */
+ }
+/* L200: */
+ }
+ }
+ } else {
+ if (lsame_(diag, "U")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 1.f;
+/* L210: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ l = 1 - j;
+/* Computing MIN */
+ i__4 = *n, i__2 = j + *k;
+ i__3 = min(i__4,i__2);
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ work[i__] += c_abs(&ab[l + i__ + j * ab_dim1]);
+/* L220: */
+ }
+/* L230: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L240: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ l = 1 - j;
+/* Computing MIN */
+ i__4 = *n, i__2 = j + *k;
+ i__3 = min(i__4,i__2);
+ for (i__ = j; i__ <= i__3; ++i__) {
+ work[i__] += c_abs(&ab[l + i__ + j * ab_dim1]);
+/* L250: */
+ }
+/* L260: */
+ }
+ }
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L270: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ if (lsame_(uplo, "U")) {
+ if (lsame_(diag, "U")) {
+ scale = 1.f;
+ sum = (real) (*n);
+ if (*k > 0) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing MIN */
+ i__4 = j - 1;
+ i__3 = min(i__4,*k);
+/* Computing MAX */
+ i__2 = *k + 2 - j;
+ classq_(&i__3, &ab[max(i__2, 1)+ j * ab_dim1], &c__1,
+ &scale, &sum);
+/* L280: */
+ }
+ }
+ } else {
+ scale = 0.f;
+ sum = 1.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__4 = j, i__2 = *k + 1;
+ i__3 = min(i__4,i__2);
+/* Computing MAX */
+ i__5 = *k + 2 - j;
+ classq_(&i__3, &ab[max(i__5, 1)+ j * ab_dim1], &c__1, &
+ scale, &sum);
+/* L290: */
+ }
+ }
+ } else {
+ if (lsame_(diag, "U")) {
+ scale = 1.f;
+ sum = (real) (*n);
+ if (*k > 0) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__4 = *n - j;
+ i__3 = min(i__4,*k);
+ classq_(&i__3, &ab[j * ab_dim1 + 2], &c__1, &scale, &
+ sum);
+/* L300: */
+ }
+ }
+ } else {
+ scale = 0.f;
+ sum = 1.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__4 = *n - j + 1, i__2 = *k + 1;
+ i__3 = min(i__4,i__2);
+ classq_(&i__3, &ab[j * ab_dim1 + 1], &c__1, &scale, &sum);
+/* L310: */
+ }
+ }
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of CLANTB */
+
+} /* clantb_ */
diff --git a/contrib/libs/clapack/clantp.c b/contrib/libs/clapack/clantp.c
new file mode 100644
index 0000000000..720f2ccad0
--- /dev/null
+++ b/contrib/libs/clapack/clantp.c
@@ -0,0 +1,391 @@
+/* clantp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal clantp_(char *norm, char *uplo, char *diag, integer *n, complex *
+ ap, real *work)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ real ret_val, r__1, r__2;
+
+ /* Builtin functions */
+ double c_abs(complex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k;
+ real sum, scale;
+ logical udiag;
+ extern logical lsame_(char *, char *);
+ real value;
+ extern /* Subroutine */ int classq_(integer *, complex *, integer *, real
+ *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLANTP returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* triangular matrix A, supplied in packed form. */
+
+/* Description */
+/* =========== */
+
+/* CLANTP returns the value */
+
+/* CLANTP = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in CLANTP as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower triangular. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A is unit triangular. */
+/* = 'N': Non-unit triangular */
+/* = 'U': Unit triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, CLANTP is */
+/* set to zero. */
+
+/* AP (input) COMPLEX array, dimension (N*(N+1)/2) */
+/* The upper or lower triangular matrix A, packed columnwise in */
+/* a linear array. The j-th column of A is stored in the array */
+/* AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* Note that when DIAG = 'U', the elements of the array AP */
+/* corresponding to the diagonal elements of the matrix A are */
+/* not referenced, but are assumed to be one. */
+
+/* WORK (workspace) REAL array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I'; otherwise, WORK is not */
+/* referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --work;
+ --ap;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ k = 1;
+ if (lsame_(diag, "U")) {
+ value = 1.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + j - 2;
+ for (i__ = k; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&ap[i__]);
+ value = dmax(r__1,r__2);
+/* L10: */
+ }
+ k += j;
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + *n - j;
+ for (i__ = k + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&ap[i__]);
+ value = dmax(r__1,r__2);
+/* L30: */
+ }
+ k = k + *n - j + 1;
+/* L40: */
+ }
+ }
+ } else {
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + j - 1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&ap[i__]);
+ value = dmax(r__1,r__2);
+/* L50: */
+ }
+ k += j;
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + *n - j;
+ for (i__ = k; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&ap[i__]);
+ value = dmax(r__1,r__2);
+/* L70: */
+ }
+ k = k + *n - j + 1;
+/* L80: */
+ }
+ }
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.f;
+ k = 1;
+ udiag = lsame_(diag, "U");
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (udiag) {
+ sum = 1.f;
+ i__2 = k + j - 2;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ sum += c_abs(&ap[i__]);
+/* L90: */
+ }
+ } else {
+ sum = 0.f;
+ i__2 = k + j - 1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ sum += c_abs(&ap[i__]);
+/* L100: */
+ }
+ }
+ k += j;
+ value = dmax(value,sum);
+/* L110: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (udiag) {
+ sum = 1.f;
+ i__2 = k + *n - j;
+ for (i__ = k + 1; i__ <= i__2; ++i__) {
+ sum += c_abs(&ap[i__]);
+/* L120: */
+ }
+ } else {
+ sum = 0.f;
+ i__2 = k + *n - j;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ sum += c_abs(&ap[i__]);
+/* L130: */
+ }
+ }
+ k = k + *n - j + 1;
+ value = dmax(value,sum);
+/* L140: */
+ }
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ k = 1;
+ if (lsame_(uplo, "U")) {
+ if (lsame_(diag, "U")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 1.f;
+/* L150: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += c_abs(&ap[k]);
+ ++k;
+/* L160: */
+ }
+ ++k;
+/* L170: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L180: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += c_abs(&ap[k]);
+ ++k;
+/* L190: */
+ }
+/* L200: */
+ }
+ }
+ } else {
+ if (lsame_(diag, "U")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 1.f;
+/* L210: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ ++k;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ work[i__] += c_abs(&ap[k]);
+ ++k;
+/* L220: */
+ }
+/* L230: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L240: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ work[i__] += c_abs(&ap[k]);
+ ++k;
+/* L250: */
+ }
+/* L260: */
+ }
+ }
+ }
+ value = 0.f;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L270: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ if (lsame_(uplo, "U")) {
+ if (lsame_(diag, "U")) {
+ scale = 1.f;
+ sum = (real) (*n);
+ k = 2;
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ classq_(&i__2, &ap[k], &c__1, &scale, &sum);
+ k += j;
+/* L280: */
+ }
+ } else {
+ scale = 0.f;
+ sum = 1.f;
+ k = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ classq_(&j, &ap[k], &c__1, &scale, &sum);
+ k += j;
+/* L290: */
+ }
+ }
+ } else {
+ if (lsame_(diag, "U")) {
+ scale = 1.f;
+ sum = (real) (*n);
+ k = 2;
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ classq_(&i__2, &ap[k], &c__1, &scale, &sum);
+ k = k + *n - j + 1;
+/* L300: */
+ }
+ } else {
+ scale = 0.f;
+ sum = 1.f;
+ k = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j + 1;
+ classq_(&i__2, &ap[k], &c__1, &scale, &sum);
+ k = k + *n - j + 1;
+/* L310: */
+ }
+ }
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of CLANTP */
+
+} /* clantp_ */
diff --git a/contrib/libs/clapack/clantr.c b/contrib/libs/clapack/clantr.c
new file mode 100644
index 0000000000..adf6e51ca8
--- /dev/null
+++ b/contrib/libs/clapack/clantr.c
@@ -0,0 +1,394 @@
+/* clantr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal clantr_(char *norm, char *uplo, char *diag, integer *m, integer *n,
+ complex *a, integer *lda, real *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ real ret_val, r__1, r__2;
+
+ /* Builtin functions */
+ double c_abs(complex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ real sum, scale;
+ logical udiag;
+ extern logical lsame_(char *, char *);
+ real value;
+ extern /* Subroutine */ int classq_(integer *, complex *, integer *, real
+ *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLANTR returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* trapezoidal or triangular matrix A. */
+
+/* Description */
+/* =========== */
+
+/* CLANTR returns the value */
+
+/* CLANTR = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in CLANTR as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower trapezoidal. */
+/* = 'U': Upper trapezoidal */
+/* = 'L': Lower trapezoidal */
+/* Note that A is triangular instead of trapezoidal if M = N. */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A has unit diagonal. */
+/* = 'N': Non-unit diagonal */
+/* = 'U': Unit diagonal */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0, and if */
+/* UPLO = 'U', M <= N. When M = 0, CLANTR is set to zero. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0, and if */
+/* UPLO = 'L', N <= M. When N = 0, CLANTR is set to zero. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The trapezoidal matrix A (A is triangular if M = N). */
+/* If UPLO = 'U', the leading m by n upper trapezoidal part of */
+/* the array A contains the upper trapezoidal matrix, and the */
+/* strictly lower triangular part of A is not referenced. */
+/* If UPLO = 'L', the leading m by n lower trapezoidal part of */
+/* the array A contains the lower trapezoidal matrix, and the */
+/* strictly upper triangular part of A is not referenced. Note */
+/* that when DIAG = 'U', the diagonal elements of A are not */
+/* referenced and are assumed to be one. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(M,1). */
+
+/* WORK (workspace) REAL array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= M when NORM = 'I'; otherwise, WORK is not */
+/* referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (min(*m,*n) == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ if (lsame_(diag, "U")) {
+ value = 1.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *m, i__4 = j - 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * a_dim1]);
+ value = dmax(r__1,r__2);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * a_dim1]);
+ value = dmax(r__1,r__2);
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else {
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(*m,j);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * a_dim1]);
+ value = dmax(r__1,r__2);
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * a_dim1]);
+ value = dmax(r__1,r__2);
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.f;
+ udiag = lsame_(diag, "U");
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (udiag && j <= *m) {
+ sum = 1.f;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += c_abs(&a[i__ + j * a_dim1]);
+/* L90: */
+ }
+ } else {
+ sum = 0.f;
+ i__2 = min(*m,j);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += c_abs(&a[i__ + j * a_dim1]);
+/* L100: */
+ }
+ }
+ value = dmax(value,sum);
+/* L110: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (udiag) {
+ sum = 1.f;
+ i__2 = *m;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ sum += c_abs(&a[i__ + j * a_dim1]);
+/* L120: */
+ }
+ } else {
+ sum = 0.f;
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ sum += c_abs(&a[i__ + j * a_dim1]);
+/* L130: */
+ }
+ }
+ value = dmax(value,sum);
+/* L140: */
+ }
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ if (lsame_(uplo, "U")) {
+ if (lsame_(diag, "U")) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 1.f;
+/* L150: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *m, i__4 = j - 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += c_abs(&a[i__ + j * a_dim1]);
+/* L160: */
+ }
+/* L170: */
+ }
+ } else {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L180: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(*m,j);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += c_abs(&a[i__ + j * a_dim1]);
+/* L190: */
+ }
+/* L200: */
+ }
+ }
+ } else {
+ if (lsame_(diag, "U")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 1.f;
+/* L210: */
+ }
+ i__1 = *m;
+ for (i__ = *n + 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L220: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ work[i__] += c_abs(&a[i__ + j * a_dim1]);
+/* L230: */
+ }
+/* L240: */
+ }
+ } else {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L250: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ work[i__] += c_abs(&a[i__ + j * a_dim1]);
+/* L260: */
+ }
+/* L270: */
+ }
+ }
+ }
+ value = 0.f;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L280: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ if (lsame_(uplo, "U")) {
+ if (lsame_(diag, "U")) {
+ scale = 1.f;
+ sum = (real) min(*m,*n);
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *m, i__4 = j - 1;
+ i__2 = min(i__3,i__4);
+ classq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L290: */
+ }
+ } else {
+ scale = 0.f;
+ sum = 1.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(*m,j);
+ classq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L300: */
+ }
+ }
+ } else {
+ if (lsame_(diag, "U")) {
+ scale = 1.f;
+ sum = (real) min(*m,*n);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m - j;
+/* Computing MIN */
+ i__3 = *m, i__4 = j + 1;
+ classq_(&i__2, &a[min(i__3, i__4)+ j * a_dim1], &c__1, &
+ scale, &sum);
+/* L310: */
+ }
+ } else {
+ scale = 0.f;
+ sum = 1.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m - j + 1;
+ classq_(&i__2, &a[j + j * a_dim1], &c__1, &scale, &sum);
+/* L320: */
+ }
+ }
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of CLANTR */
+
+} /* clantr_ */
diff --git a/contrib/libs/clapack/clapll.c b/contrib/libs/clapack/clapll.c
new file mode 100644
index 0000000000..e398c67b2f
--- /dev/null
+++ b/contrib/libs/clapack/clapll.c
@@ -0,0 +1,143 @@
+/* clapll.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int clapll_(integer *n, complex *x, integer *incx, complex *
+ y, integer *incy, real *ssmin)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2, r__3;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+ double c_abs(complex *);
+
+ /* Local variables */
+ complex c__, a11, a12, a22, tau;
+ extern /* Subroutine */ int slas2_(real *, real *, real *, real *, real *)
+ ;
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern /* Subroutine */ int caxpy_(integer *, complex *, complex *,
+ integer *, complex *, integer *);
+ real ssmax;
+ extern /* Subroutine */ int clarfg_(integer *, complex *, complex *,
+ integer *, complex *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Given two column vectors X and Y, let */
+
+/* A = ( X Y ). */
+
+/* The subroutine first computes the QR factorization of A = Q*R, */
+/* and then computes the SVD of the 2-by-2 upper triangular matrix R. */
+/* The smaller singular value of R is returned in SSMIN, which is used */
+/* as the measurement of the linear dependency of the vectors X and Y. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The length of the vectors X and Y. */
+
+/* X (input/output) COMPLEX array, dimension (1+(N-1)*INCX) */
+/* On entry, X contains the N-vector X. */
+/* On exit, X is overwritten. */
+
+/* INCX (input) INTEGER */
+/* The increment between successive elements of X. INCX > 0. */
+
+/* Y (input/output) COMPLEX array, dimension (1+(N-1)*INCY) */
+/* On entry, Y contains the N-vector Y. */
+/* On exit, Y is overwritten. */
+
+/* INCY (input) INTEGER */
+/* The increment between successive elements of Y. INCY > 0. */
+
+/* SSMIN (output) REAL */
+/* The smallest singular value of the N-by-2 matrix A = ( X Y ). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --y;
+ --x;
+
+ /* Function Body */
+ if (*n <= 1) {
+ *ssmin = 0.f;
+ return 0;
+ }
+
+/* Compute the QR factorization of the N-by-2 matrix ( X Y ) */
+
+ clarfg_(n, &x[1], &x[*incx + 1], incx, &tau);
+ a11.r = x[1].r, a11.i = x[1].i;
+ x[1].r = 1.f, x[1].i = 0.f;
+
+ r_cnjg(&q__3, &tau);
+ q__2.r = -q__3.r, q__2.i = -q__3.i;
+ cdotc_(&q__4, n, &x[1], incx, &y[1], incy);
+ q__1.r = q__2.r * q__4.r - q__2.i * q__4.i, q__1.i = q__2.r * q__4.i +
+ q__2.i * q__4.r;
+ c__.r = q__1.r, c__.i = q__1.i;
+ caxpy_(n, &c__, &x[1], incx, &y[1], incy);
+
+ i__1 = *n - 1;
+ clarfg_(&i__1, &y[*incy + 1], &y[(*incy << 1) + 1], incy, &tau);
+
+ a12.r = y[1].r, a12.i = y[1].i;
+ i__1 = *incy + 1;
+ a22.r = y[i__1].r, a22.i = y[i__1].i;
+
+/* Compute the SVD of 2-by-2 Upper triangular matrix. */
+
+ r__1 = c_abs(&a11);
+ r__2 = c_abs(&a12);
+ r__3 = c_abs(&a22);
+ slas2_(&r__1, &r__2, &r__3, ssmin, &ssmax);
+
+ return 0;
+
+/* End of CLAPLL */
+
+} /* clapll_ */
diff --git a/contrib/libs/clapack/clapmt.c b/contrib/libs/clapack/clapmt.c
new file mode 100644
index 0000000000..7e63e4dbc3
--- /dev/null
+++ b/contrib/libs/clapack/clapmt.c
@@ -0,0 +1,185 @@
+/* clapmt.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int clapmt_(logical *forwrd, integer *m, integer *n, complex
+ *x, integer *ldx, integer *k)
+{
+ /* System generated locals */
+ integer x_dim1, x_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, ii, in;
+ complex temp;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAPMT rearranges the columns of the M by N matrix X as specified */
+/* by the permutation K(1),K(2),...,K(N) of the integers 1,...,N. */
+/* If FORWRD = .TRUE., forward permutation: */
+
+/* X(*,K(J)) is moved X(*,J) for J = 1,2,...,N. */
+
+/* If FORWRD = .FALSE., backward permutation: */
+
+/* X(*,J) is moved to X(*,K(J)) for J = 1,2,...,N. */
+
+/* Arguments */
+/* ========= */
+
+/* FORWRD (input) LOGICAL */
+/* = .TRUE., forward permutation */
+/* = .FALSE., backward permutation */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix X. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix X. N >= 0. */
+
+/* X (input/output) COMPLEX array, dimension (LDX,N) */
+/* On entry, the M by N matrix X. */
+/* On exit, X contains the permuted matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X, LDX >= MAX(1,M). */
+
+/* K (input/output) INTEGER array, dimension (N) */
+/* On entry, K contains the permutation vector. K is used as */
+/* internal workspace, but reset to its original value on */
+/* output. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --k;
+
+ /* Function Body */
+ if (*n <= 1) {
+ return 0;
+ }
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ k[i__] = -k[i__];
+/* L10: */
+ }
+
+ if (*forwrd) {
+
+/* Forward permutation */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+ if (k[i__] > 0) {
+ goto L40;
+ }
+
+ j = i__;
+ k[j] = -k[j];
+ in = k[j];
+
+L20:
+ if (k[in] > 0) {
+ goto L40;
+ }
+
+ i__2 = *m;
+ for (ii = 1; ii <= i__2; ++ii) {
+ i__3 = ii + j * x_dim1;
+ temp.r = x[i__3].r, temp.i = x[i__3].i;
+ i__3 = ii + j * x_dim1;
+ i__4 = ii + in * x_dim1;
+ x[i__3].r = x[i__4].r, x[i__3].i = x[i__4].i;
+ i__3 = ii + in * x_dim1;
+ x[i__3].r = temp.r, x[i__3].i = temp.i;
+/* L30: */
+ }
+
+ k[in] = -k[in];
+ j = in;
+ in = k[in];
+ goto L20;
+
+L40:
+
+/* L60: */
+ ;
+ }
+
+ } else {
+
+/* Backward permutation */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+ if (k[i__] > 0) {
+ goto L100;
+ }
+
+ k[i__] = -k[i__];
+ j = k[i__];
+L80:
+ if (j == i__) {
+ goto L100;
+ }
+
+ i__2 = *m;
+ for (ii = 1; ii <= i__2; ++ii) {
+ i__3 = ii + i__ * x_dim1;
+ temp.r = x[i__3].r, temp.i = x[i__3].i;
+ i__3 = ii + i__ * x_dim1;
+ i__4 = ii + j * x_dim1;
+ x[i__3].r = x[i__4].r, x[i__3].i = x[i__4].i;
+ i__3 = ii + j * x_dim1;
+ x[i__3].r = temp.r, x[i__3].i = temp.i;
+/* L90: */
+ }
+
+ k[j] = -k[j];
+ j = k[j];
+ goto L80;
+
+L100:
+/* L110: */
+ ;
+ }
+
+ }
+
+ return 0;
+
+/* End of CLAPMT */
+
+} /* clapmt_ */
diff --git a/contrib/libs/clapack/claqgb.c b/contrib/libs/clapack/claqgb.c
new file mode 100644
index 0000000000..a323247341
--- /dev/null
+++ b/contrib/libs/clapack/claqgb.c
@@ -0,0 +1,227 @@
+/* claqgb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int claqgb_(integer *m, integer *n, integer *kl, integer *ku,
+ complex *ab, integer *ldab, real *r__, real *c__, real *rowcnd, real
+ *colcnd, real *amax, char *equed)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ real r__1;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j;
+ real cj, large, small;
+ extern doublereal slamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAQGB equilibrates a general M by N band matrix A with KL */
+/* subdiagonals and KU superdiagonals using the row and scaling factors */
+/* in the vectors R and C. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* AB (input/output) COMPLEX array, dimension (LDAB,N) */
+/* On entry, the matrix A in band storage, in rows 1 to KL+KU+1. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(m,j+kl) */
+
+/* On exit, the equilibrated matrix, in the same storage format */
+/* as A. See EQUED for the form of the equilibrated matrix. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDA >= KL+KU+1. */
+
+/* R (input) REAL array, dimension (M) */
+/* The row scale factors for A. */
+
+/* C (input) REAL array, dimension (N) */
+/* The column scale factors for A. */
+
+/* ROWCND (input) REAL */
+/* Ratio of the smallest R(i) to the largest R(i). */
+
+/* COLCND (input) REAL */
+/* Ratio of the smallest C(i) to the largest C(i). */
+
+/* AMAX (input) REAL */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration */
+/* = 'R': Row equilibration, i.e., A has been premultiplied by */
+/* diag(R). */
+/* = 'C': Column equilibration, i.e., A has been postmultiplied */
+/* by diag(C). */
+/* = 'B': Both row and column equilibration, i.e., A has been */
+/* replaced by diag(R) * A * diag(C). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if row or column scaling */
+/* should be done based on the ratio of the row or column scaling */
+/* factors. If ROWCND < THRESH, row scaling is done, and if */
+/* COLCND < THRESH, column scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if row scaling */
+/* should be done based on the absolute size of the largest matrix */
+/* element. If AMAX > LARGE or AMAX < SMALL, row scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --r__;
+ --c__;
+
+ /* Function Body */
+ if (*m <= 0 || *n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = slamch_("Safe minimum") / slamch_("Precision");
+ large = 1.f / small;
+
+ if (*rowcnd >= .1f && *amax >= small && *amax <= large) {
+
+/* No row scaling */
+
+ if (*colcnd >= .1f) {
+
+/* No column scaling */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Column scaling */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = c__[j];
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__4 = min(i__5,i__6);
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ i__2 = *ku + 1 + i__ - j + j * ab_dim1;
+ i__3 = *ku + 1 + i__ - j + j * ab_dim1;
+ q__1.r = cj * ab[i__3].r, q__1.i = cj * ab[i__3].i;
+ ab[i__2].r = q__1.r, ab[i__2].i = q__1.i;
+/* L10: */
+ }
+/* L20: */
+ }
+ *(unsigned char *)equed = 'C';
+ }
+ } else if (*colcnd >= .1f) {
+
+/* Row scaling, no column scaling */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__3 = min(i__5,i__6);
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ i__4 = *ku + 1 + i__ - j + j * ab_dim1;
+ i__2 = i__;
+ i__5 = *ku + 1 + i__ - j + j * ab_dim1;
+ q__1.r = r__[i__2] * ab[i__5].r, q__1.i = r__[i__2] * ab[i__5]
+ .i;
+ ab[i__4].r = q__1.r, ab[i__4].i = q__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ *(unsigned char *)equed = 'R';
+ } else {
+
+/* Row and column scaling */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = c__[j];
+/* Computing MAX */
+ i__3 = 1, i__4 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__2 = min(i__5,i__6);
+ for (i__ = max(i__3,i__4); i__ <= i__2; ++i__) {
+ i__3 = *ku + 1 + i__ - j + j * ab_dim1;
+ r__1 = cj * r__[i__];
+ i__4 = *ku + 1 + i__ - j + j * ab_dim1;
+ q__1.r = r__1 * ab[i__4].r, q__1.i = r__1 * ab[i__4].i;
+ ab[i__3].r = q__1.r, ab[i__3].i = q__1.i;
+/* L50: */
+ }
+/* L60: */
+ }
+ *(unsigned char *)equed = 'B';
+ }
+
+ return 0;
+
+/* End of CLAQGB */
+
+} /* claqgb_ */
diff --git a/contrib/libs/clapack/claqge.c b/contrib/libs/clapack/claqge.c
new file mode 100644
index 0000000000..ed87bacadc
--- /dev/null
+++ b/contrib/libs/clapack/claqge.c
@@ -0,0 +1,202 @@
+/* claqge.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int claqge_(integer *m, integer *n, complex *a, integer *lda,
+ real *r__, real *c__, real *rowcnd, real *colcnd, real *amax, char *
+ equed)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j;
+ real cj, large, small;
+ extern doublereal slamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAQGE equilibrates a general M by N matrix A using the row and */
+/* column scaling factors in the vectors R and C. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the M by N matrix A. */
+/* On exit, the equilibrated matrix. See EQUED for the form of */
+/* the equilibrated matrix. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(M,1). */
+
+/* R (input) REAL array, dimension (M) */
+/* The row scale factors for A. */
+
+/* C (input) REAL array, dimension (N) */
+/* The column scale factors for A. */
+
+/* ROWCND (input) REAL */
+/* Ratio of the smallest R(i) to the largest R(i). */
+
+/* COLCND (input) REAL */
+/* Ratio of the smallest C(i) to the largest C(i). */
+
+/* AMAX (input) REAL */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration */
+/* = 'R': Row equilibration, i.e., A has been premultiplied by */
+/* diag(R). */
+/* = 'C': Column equilibration, i.e., A has been postmultiplied */
+/* by diag(C). */
+/* = 'B': Both row and column equilibration, i.e., A has been */
+/* replaced by diag(R) * A * diag(C). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if row or column scaling */
+/* should be done based on the ratio of the row or column scaling */
+/* factors. If ROWCND < THRESH, row scaling is done, and if */
+/* COLCND < THRESH, column scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if row scaling */
+/* should be done based on the absolute size of the largest matrix */
+/* element. If AMAX > LARGE or AMAX < SMALL, row scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --r__;
+ --c__;
+
+ /* Function Body */
+ if (*m <= 0 || *n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = slamch_("Safe minimum") / slamch_("Precision");
+ large = 1.f / small;
+
+ if (*rowcnd >= .1f && *amax >= small && *amax <= large) {
+
+/* No row scaling */
+
+ if (*colcnd >= .1f) {
+
+/* No column scaling */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Column scaling */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = c__[j];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ q__1.r = cj * a[i__4].r, q__1.i = cj * a[i__4].i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L10: */
+ }
+/* L20: */
+ }
+ *(unsigned char *)equed = 'C';
+ }
+ } else if (*colcnd >= .1f) {
+
+/* Row scaling, no column scaling */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ q__1.r = r__[i__4] * a[i__5].r, q__1.i = r__[i__4] * a[i__5]
+ .i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ *(unsigned char *)equed = 'R';
+ } else {
+
+/* Row and column scaling */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = c__[j];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ r__1 = cj * r__[i__];
+ i__4 = i__ + j * a_dim1;
+ q__1.r = r__1 * a[i__4].r, q__1.i = r__1 * a[i__4].i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L50: */
+ }
+/* L60: */
+ }
+ *(unsigned char *)equed = 'B';
+ }
+
+ return 0;
+
+/* End of CLAQGE */
+
+} /* claqge_ */
diff --git a/contrib/libs/clapack/claqhb.c b/contrib/libs/clapack/claqhb.c
new file mode 100644
index 0000000000..e4dc7f8b2b
--- /dev/null
+++ b/contrib/libs/clapack/claqhb.c
@@ -0,0 +1,200 @@
+/* claqhb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int claqhb_(char *uplo, integer *n, integer *kd, complex *ab,
+ integer *ldab, real *s, real *scond, real *amax, char *equed)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+ real r__1;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j;
+ real cj, large;
+ extern logical lsame_(char *, char *);
+ real small;
+ extern doublereal slamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAQHB equilibrates an Hermitian band matrix A using the scaling */
+/* factors in the vector S. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of super-diagonals of the matrix A if UPLO = 'U', */
+/* or the number of sub-diagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) COMPLEX array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, if INFO = 0, the triangular factor U or L from the */
+/* Cholesky factorization A = U'*U or A = L*L' of the band */
+/* matrix A, in the same storage format as A. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* S (output) REAL array, dimension (N) */
+/* The scale factors for A. */
+
+/* SCOND (input) REAL */
+/* Ratio of the smallest S(i) to the largest S(i). */
+
+/* AMAX (input) REAL */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies whether or not equilibration was done. */
+/* = 'N': No equilibration. */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if scaling should be done */
+/* based on the ratio of the scaling factors. If SCOND < THRESH, */
+/* scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if scaling should */
+/* be done based on the absolute size of the largest matrix element. */
+/* If AMAX > LARGE or AMAX < SMALL, scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --s;
+
+ /* Function Body */
+ if (*n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = slamch_("Safe minimum") / slamch_("Precision");
+ large = 1.f / small;
+
+ if (*scond >= .1f && *amax >= small && *amax <= large) {
+
+/* No equilibration */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Replace A by diag(S) * A * diag(S). */
+
+ if (lsame_(uplo, "U")) {
+
+/* Upper triangle of A is stored in band format. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *kd;
+ i__4 = j - 1;
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ i__2 = *kd + 1 + i__ - j + j * ab_dim1;
+ r__1 = cj * s[i__];
+ i__3 = *kd + 1 + i__ - j + j * ab_dim1;
+ q__1.r = r__1 * ab[i__3].r, q__1.i = r__1 * ab[i__3].i;
+ ab[i__2].r = q__1.r, ab[i__2].i = q__1.i;
+/* L10: */
+ }
+ i__4 = *kd + 1 + j * ab_dim1;
+ i__2 = *kd + 1 + j * ab_dim1;
+ r__1 = cj * cj * ab[i__2].r;
+ ab[i__4].r = r__1, ab[i__4].i = 0.f;
+/* L20: */
+ }
+ } else {
+
+/* Lower triangle of A is stored. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__4 = j * ab_dim1 + 1;
+ i__2 = j * ab_dim1 + 1;
+ r__1 = cj * cj * ab[i__2].r;
+ ab[i__4].r = r__1, ab[i__4].i = 0.f;
+/* Computing MIN */
+ i__2 = *n, i__3 = j + *kd;
+ i__4 = min(i__2,i__3);
+ for (i__ = j + 1; i__ <= i__4; ++i__) {
+ i__2 = i__ + 1 - j + j * ab_dim1;
+ r__1 = cj * s[i__];
+ i__3 = i__ + 1 - j + j * ab_dim1;
+ q__1.r = r__1 * ab[i__3].r, q__1.i = r__1 * ab[i__3].i;
+ ab[i__2].r = q__1.r, ab[i__2].i = q__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ *(unsigned char *)equed = 'Y';
+ }
+
+ return 0;
+
+/* End of CLAQHB */
+
+} /* claqhb_ */
diff --git a/contrib/libs/clapack/claqhe.c b/contrib/libs/clapack/claqhe.c
new file mode 100644
index 0000000000..507a7ac3c6
--- /dev/null
+++ b/contrib/libs/clapack/claqhe.c
@@ -0,0 +1,192 @@
+/* claqhe.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int claqhe_(char *uplo, integer *n, complex *a, integer *lda,
+ real *s, real *scond, real *amax, char *equed)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ real r__1;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j;
+ real cj, large;
+ extern logical lsame_(char *, char *);
+ real small;
+ extern doublereal slamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAQHE equilibrates a Hermitian matrix A using the scaling factors */
+/* in the vector S. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* Hermitian matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* n by n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if EQUED = 'Y', the equilibrated matrix: */
+/* diag(S) * A * diag(S). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(N,1). */
+
+/* S (input) REAL array, dimension (N) */
+/* The scale factors for A. */
+
+/* SCOND (input) REAL */
+/* Ratio of the smallest S(i) to the largest S(i). */
+
+/* AMAX (input) REAL */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies whether or not equilibration was done. */
+/* = 'N': No equilibration. */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if scaling should be done */
+/* based on the ratio of the scaling factors. If SCOND < THRESH, */
+/* scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if scaling should */
+/* be done based on the absolute size of the largest matrix element. */
+/* If AMAX > LARGE or AMAX < SMALL, scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+
+ /* Function Body */
+ if (*n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = slamch_("Safe minimum") / slamch_("Precision");
+ large = 1.f / small;
+
+ if (*scond >= .1f && *amax >= small && *amax <= large) {
+
+/* No equilibration */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Replace A by diag(S) * A * diag(S). */
+
+ if (lsame_(uplo, "U")) {
+
+/* Upper triangle of A is stored. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ r__1 = cj * s[i__];
+ i__4 = i__ + j * a_dim1;
+ q__1.r = r__1 * a[i__4].r, q__1.i = r__1 * a[i__4].i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L10: */
+ }
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ r__1 = cj * cj * a[i__3].r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+/* L20: */
+ }
+ } else {
+
+/* Lower triangle of A is stored. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ r__1 = cj * cj * a[i__3].r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ r__1 = cj * s[i__];
+ i__4 = i__ + j * a_dim1;
+ q__1.r = r__1 * a[i__4].r, q__1.i = r__1 * a[i__4].i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ *(unsigned char *)equed = 'Y';
+ }
+
+ return 0;
+
+/* End of CLAQHE */
+
+} /* claqhe_ */
diff --git a/contrib/libs/clapack/claqhp.c b/contrib/libs/clapack/claqhp.c
new file mode 100644
index 0000000000..2676a25fb0
--- /dev/null
+++ b/contrib/libs/clapack/claqhp.c
@@ -0,0 +1,189 @@
+/* claqhp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int claqhp_(char *uplo, integer *n, complex *ap, real *s,
+ real *scond, real *amax, char *equed)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ real r__1;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j, jc;
+ real cj, large;
+ extern logical lsame_(char *, char *);
+ real small;
+ extern doublereal slamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAQHP equilibrates a Hermitian matrix A using the scaling factors */
+/* in the vector S. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* Hermitian matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, the equilibrated matrix: diag(S) * A * diag(S), in */
+/* the same storage format as A. */
+
+/* S (input) REAL array, dimension (N) */
+/* The scale factors for A. */
+
+/* SCOND (input) REAL */
+/* Ratio of the smallest S(i) to the largest S(i). */
+
+/* AMAX (input) REAL */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies whether or not equilibration was done. */
+/* = 'N': No equilibration. */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if scaling should be done */
+/* based on the ratio of the scaling factors. If SCOND < THRESH, */
+/* scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if scaling should */
+/* be done based on the absolute size of the largest matrix element. */
+/* If AMAX > LARGE or AMAX < SMALL, scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --s;
+ --ap;
+
+ /* Function Body */
+ if (*n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = slamch_("Safe minimum") / slamch_("Precision");
+ large = 1.f / small;
+
+ if (*scond >= .1f && *amax >= small && *amax <= large) {
+
+/* No equilibration */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Replace A by diag(S) * A * diag(S). */
+
+ if (lsame_(uplo, "U")) {
+
+/* Upper triangle of A is stored. */
+
+ jc = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = jc + i__ - 1;
+ r__1 = cj * s[i__];
+ i__4 = jc + i__ - 1;
+ q__1.r = r__1 * ap[i__4].r, q__1.i = r__1 * ap[i__4].i;
+ ap[i__3].r = q__1.r, ap[i__3].i = q__1.i;
+/* L10: */
+ }
+ i__2 = jc + j - 1;
+ i__3 = jc + j - 1;
+ r__1 = cj * cj * ap[i__3].r;
+ ap[i__2].r = r__1, ap[i__2].i = 0.f;
+ jc += j;
+/* L20: */
+ }
+ } else {
+
+/* Lower triangle of A is stored. */
+
+ jc = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__2 = jc;
+ i__3 = jc;
+ r__1 = cj * cj * ap[i__3].r;
+ ap[i__2].r = r__1, ap[i__2].i = 0.f;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = jc + i__ - j;
+ r__1 = cj * s[i__];
+ i__4 = jc + i__ - j;
+ q__1.r = r__1 * ap[i__4].r, q__1.i = r__1 * ap[i__4].i;
+ ap[i__3].r = q__1.r, ap[i__3].i = q__1.i;
+/* L30: */
+ }
+ jc = jc + *n - j + 1;
+/* L40: */
+ }
+ }
+ *(unsigned char *)equed = 'Y';
+ }
+
+ return 0;
+
+/* End of CLAQHP */
+
+} /* claqhp_ */
diff --git a/contrib/libs/clapack/claqp2.c b/contrib/libs/clapack/claqp2.c
new file mode 100644
index 0000000000..6dc8ed8de0
--- /dev/null
+++ b/contrib/libs/clapack/claqp2.c
@@ -0,0 +1,244 @@
+/* claqp2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int claqp2_(integer *m, integer *n, integer *offset, complex
+ *a, integer *lda, integer *jpvt, complex *tau, real *vn1, real *vn2,
+ complex *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1;
+ complex q__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+ void r_cnjg(complex *, complex *);
+ double c_abs(complex *);
+
+ /* Local variables */
+ integer i__, j, mn;
+ complex aii;
+ integer pvt;
+ real temp, temp2, tol3z;
+ extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
+, integer *, complex *, complex *, integer *, complex *);
+ integer offpi;
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ integer itemp;
+ extern doublereal scnrm2_(integer *, complex *, integer *);
+ extern /* Subroutine */ int clarfp_(integer *, complex *, complex *,
+ integer *, complex *);
+ extern doublereal slamch_(char *);
+ extern integer isamax_(integer *, real *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAQP2 computes a QR factorization with column pivoting of */
+/* the block A(OFFSET+1:M,1:N). */
+/* The block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* OFFSET (input) INTEGER */
+/* The number of rows of the matrix A that must be pivoted */
+/* but no factorized. OFFSET >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the upper triangle of block A(OFFSET+1:M,1:N) is */
+/* the triangular factor obtained; the elements in block */
+/* A(OFFSET+1:M,1:N) below the diagonal, together with the */
+/* array TAU, represent the orthogonal matrix Q as a product of */
+/* elementary reflectors. Block A(1:OFFSET,1:N) has been */
+/* accordingly pivoted, but no factorized. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* JPVT (input/output) INTEGER array, dimension (N) */
+/* On entry, if JPVT(i) .ne. 0, the i-th column of A is permuted */
+/* to the front of A*P (a leading column); if JPVT(i) = 0, */
+/* the i-th column of A is a free column. */
+/* On exit, if JPVT(i) = k, then the i-th column of A*P */
+/* was the k-th column of A. */
+
+/* TAU (output) COMPLEX array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors. */
+
+/* VN1 (input/output) REAL array, dimension (N) */
+/* The vector with the partial column norms. */
+
+/* VN2 (input/output) REAL array, dimension (N) */
+/* The vector with the exact column norms. */
+
+/* WORK (workspace) COMPLEX array, dimension (N) */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* G. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */
+/* X. Sun, Computer Science Dept., Duke University, USA */
+
+/* Partial column norm updating strategy modified by */
+/* Z. Drmac and Z. Bujanovic, Dept. of Mathematics, */
+/* University of Zagreb, Croatia. */
+/* June 2006. */
+/* For more details see LAPACK Working Note 176. */
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --jpvt;
+ --tau;
+ --vn1;
+ --vn2;
+ --work;
+
+ /* Function Body */
+/* Computing MIN */
+ i__1 = *m - *offset;
+ mn = min(i__1,*n);
+ tol3z = sqrt(slamch_("Epsilon"));
+
+/* Compute factorization. */
+
+ i__1 = mn;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+ offpi = *offset + i__;
+
+/* Determine ith pivot column and swap if necessary. */
+
+ i__2 = *n - i__ + 1;
+ pvt = i__ - 1 + isamax_(&i__2, &vn1[i__], &c__1);
+
+ if (pvt != i__) {
+ cswap_(m, &a[pvt * a_dim1 + 1], &c__1, &a[i__ * a_dim1 + 1], &
+ c__1);
+ itemp = jpvt[pvt];
+ jpvt[pvt] = jpvt[i__];
+ jpvt[i__] = itemp;
+ vn1[pvt] = vn1[i__];
+ vn2[pvt] = vn2[i__];
+ }
+
+/* Generate elementary reflector H(i). */
+
+ if (offpi < *m) {
+ i__2 = *m - offpi + 1;
+ clarfp_(&i__2, &a[offpi + i__ * a_dim1], &a[offpi + 1 + i__ *
+ a_dim1], &c__1, &tau[i__]);
+ } else {
+ clarfp_(&c__1, &a[*m + i__ * a_dim1], &a[*m + i__ * a_dim1], &
+ c__1, &tau[i__]);
+ }
+
+ if (i__ < *n) {
+
+/* Apply H(i)' to A(offset+i:m,i+1:n) from the left. */
+
+ i__2 = offpi + i__ * a_dim1;
+ aii.r = a[i__2].r, aii.i = a[i__2].i;
+ i__2 = offpi + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+ i__2 = *m - offpi + 1;
+ i__3 = *n - i__;
+ r_cnjg(&q__1, &tau[i__]);
+ clarf_("Left", &i__2, &i__3, &a[offpi + i__ * a_dim1], &c__1, &
+ q__1, &a[offpi + (i__ + 1) * a_dim1], lda, &work[1]);
+ i__2 = offpi + i__ * a_dim1;
+ a[i__2].r = aii.r, a[i__2].i = aii.i;
+ }
+
+/* Update partial column norms. */
+
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ if (vn1[j] != 0.f) {
+
+/* NOTE: The following 4 lines follow from the analysis in */
+/* Lapack Working Note 176. */
+
+/* Computing 2nd power */
+ r__1 = c_abs(&a[offpi + j * a_dim1]) / vn1[j];
+ temp = 1.f - r__1 * r__1;
+ temp = dmax(temp,0.f);
+/* Computing 2nd power */
+ r__1 = vn1[j] / vn2[j];
+ temp2 = temp * (r__1 * r__1);
+ if (temp2 <= tol3z) {
+ if (offpi < *m) {
+ i__3 = *m - offpi;
+ vn1[j] = scnrm2_(&i__3, &a[offpi + 1 + j * a_dim1], &
+ c__1);
+ vn2[j] = vn1[j];
+ } else {
+ vn1[j] = 0.f;
+ vn2[j] = 0.f;
+ }
+ } else {
+ vn1[j] *= sqrt(temp);
+ }
+ }
+/* L10: */
+ }
+
+/* L20: */
+ }
+
+ return 0;
+
+/* End of CLAQP2 */
+
+} /* claqp2_ */
diff --git a/contrib/libs/clapack/claqps.c b/contrib/libs/clapack/claqps.c
new file mode 100644
index 0000000000..ae14316fa8
--- /dev/null
+++ b/contrib/libs/clapack/claqps.c
@@ -0,0 +1,367 @@
+/* claqps.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int claqps_(integer *m, integer *n, integer *offset, integer
+ *nb, integer *kb, complex *a, integer *lda, integer *jpvt, complex *
+ tau, real *vn1, real *vn2, complex *auxv, complex *f, integer *ldf)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, f_dim1, f_offset, i__1, i__2, i__3;
+ real r__1, r__2;
+ complex q__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+ void r_cnjg(complex *, complex *);
+ double c_abs(complex *);
+ integer i_nint(real *);
+
+ /* Local variables */
+ integer j, k, rk;
+ complex akk;
+ integer pvt;
+ real temp, temp2, tol3z;
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *), cgemv_(char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *, complex *, complex *, integer *), cswap_(
+ integer *, complex *, integer *, complex *, integer *);
+ integer itemp;
+ extern doublereal scnrm2_(integer *, complex *, integer *);
+ extern /* Subroutine */ int clarfp_(integer *, complex *, complex *,
+ integer *, complex *);
+ extern doublereal slamch_(char *);
+ integer lsticc;
+ extern integer isamax_(integer *, real *, integer *);
+ integer lastrk;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAQPS computes a step of QR factorization with column pivoting */
+/* of a complex M-by-N matrix A by using Blas-3. It tries to factorize */
+/* NB columns from A starting from the row OFFSET+1, and updates all */
+/* of the matrix with Blas-3 xGEMM. */
+
+/* In some cases, due to catastrophic cancellations, it cannot */
+/* factorize NB columns. Hence, the actual number of factorized */
+/* columns is returned in KB. */
+
+/* Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0 */
+
+/* OFFSET (input) INTEGER */
+/* The number of rows of A that have been factorized in */
+/* previous steps. */
+
+/* NB (input) INTEGER */
+/* The number of columns to factorize. */
+
+/* KB (output) INTEGER */
+/* The number of columns actually factorized. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, block A(OFFSET+1:M,1:KB) is the triangular */
+/* factor obtained and block A(1:OFFSET,1:N) has been */
+/* accordingly pivoted, but no factorized. */
+/* The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has */
+/* been updated. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* JPVT (input/output) INTEGER array, dimension (N) */
+/* JPVT(I) = K <==> Column K of the full matrix A has been */
+/* permuted into position I in AP. */
+
+/* TAU (output) COMPLEX array, dimension (KB) */
+/* The scalar factors of the elementary reflectors. */
+
+/* VN1 (input/output) REAL array, dimension (N) */
+/* The vector with the partial column norms. */
+
+/* VN2 (input/output) REAL array, dimension (N) */
+/* The vector with the exact column norms. */
+
+/* AUXV (input/output) COMPLEX array, dimension (NB) */
+/* Auxiliar vector. */
+
+/* F (input/output) COMPLEX array, dimension (LDF,NB) */
+/* Matrix F' = L*Y'*A. */
+
+/* LDF (input) INTEGER */
+/* The leading dimension of the array F. LDF >= max(1,N). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* G. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */
+/* X. Sun, Computer Science Dept., Duke University, USA */
+
+/* Partial column norm updating strategy modified by */
+/* Z. Drmac and Z. Bujanovic, Dept. of Mathematics, */
+/* University of Zagreb, Croatia. */
+/* June 2006. */
+/* For more details see LAPACK Working Note 176. */
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --jpvt;
+ --tau;
+ --vn1;
+ --vn2;
+ --auxv;
+ f_dim1 = *ldf;
+ f_offset = 1 + f_dim1;
+ f -= f_offset;
+
+ /* Function Body */
+/* Computing MIN */
+ i__1 = *m, i__2 = *n + *offset;
+ lastrk = min(i__1,i__2);
+ lsticc = 0;
+ k = 0;
+ tol3z = sqrt(slamch_("Epsilon"));
+
+/* Beginning of while loop. */
+
+L10:
+ if (k < *nb && lsticc == 0) {
+ ++k;
+ rk = *offset + k;
+
+/* Determine ith pivot column and swap if necessary */
+
+ i__1 = *n - k + 1;
+ pvt = k - 1 + isamax_(&i__1, &vn1[k], &c__1);
+ if (pvt != k) {
+ cswap_(m, &a[pvt * a_dim1 + 1], &c__1, &a[k * a_dim1 + 1], &c__1);
+ i__1 = k - 1;
+ cswap_(&i__1, &f[pvt + f_dim1], ldf, &f[k + f_dim1], ldf);
+ itemp = jpvt[pvt];
+ jpvt[pvt] = jpvt[k];
+ jpvt[k] = itemp;
+ vn1[pvt] = vn1[k];
+ vn2[pvt] = vn2[k];
+ }
+
+/* Apply previous Householder reflectors to column K: */
+/* A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)'. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + j * f_dim1;
+ r_cnjg(&q__1, &f[k + j * f_dim1]);
+ f[i__2].r = q__1.r, f[i__2].i = q__1.i;
+/* L20: */
+ }
+ i__1 = *m - rk + 1;
+ i__2 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__1, &i__2, &q__1, &a[rk + a_dim1], lda,
+ &f[k + f_dim1], ldf, &c_b2, &a[rk + k * a_dim1], &c__1);
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + j * f_dim1;
+ r_cnjg(&q__1, &f[k + j * f_dim1]);
+ f[i__2].r = q__1.r, f[i__2].i = q__1.i;
+/* L30: */
+ }
+ }
+
+/* Generate elementary reflector H(k). */
+
+ if (rk < *m) {
+ i__1 = *m - rk + 1;
+ clarfp_(&i__1, &a[rk + k * a_dim1], &a[rk + 1 + k * a_dim1], &
+ c__1, &tau[k]);
+ } else {
+ clarfp_(&c__1, &a[rk + k * a_dim1], &a[rk + k * a_dim1], &c__1, &
+ tau[k]);
+ }
+
+ i__1 = rk + k * a_dim1;
+ akk.r = a[i__1].r, akk.i = a[i__1].i;
+ i__1 = rk + k * a_dim1;
+ a[i__1].r = 1.f, a[i__1].i = 0.f;
+
+/* Compute Kth column of F: */
+
+/* Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K). */
+
+ if (k < *n) {
+ i__1 = *m - rk + 1;
+ i__2 = *n - k;
+ cgemv_("Conjugate transpose", &i__1, &i__2, &tau[k], &a[rk + (k +
+ 1) * a_dim1], lda, &a[rk + k * a_dim1], &c__1, &c_b1, &f[
+ k + 1 + k * f_dim1], &c__1);
+ }
+
+/* Padding F(1:K,K) with zeros. */
+
+ i__1 = k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + k * f_dim1;
+ f[i__2].r = 0.f, f[i__2].i = 0.f;
+/* L40: */
+ }
+
+/* Incremental updating of F: */
+/* F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)' */
+/* *A(RK:M,K). */
+
+ if (k > 1) {
+ i__1 = *m - rk + 1;
+ i__2 = k - 1;
+ i__3 = k;
+ q__1.r = -tau[i__3].r, q__1.i = -tau[i__3].i;
+ cgemv_("Conjugate transpose", &i__1, &i__2, &q__1, &a[rk + a_dim1]
+, lda, &a[rk + k * a_dim1], &c__1, &c_b1, &auxv[1], &c__1);
+
+ i__1 = k - 1;
+ cgemv_("No transpose", n, &i__1, &c_b2, &f[f_dim1 + 1], ldf, &
+ auxv[1], &c__1, &c_b2, &f[k * f_dim1 + 1], &c__1);
+ }
+
+/* Update the current row of A: */
+/* A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "Conjugate transpose", &c__1, &i__1, &k, &
+ q__1, &a[rk + a_dim1], lda, &f[k + 1 + f_dim1], ldf, &
+ c_b2, &a[rk + (k + 1) * a_dim1], lda);
+ }
+
+/* Update partial column norms. */
+
+ if (rk < lastrk) {
+ i__1 = *n;
+ for (j = k + 1; j <= i__1; ++j) {
+ if (vn1[j] != 0.f) {
+
+/* NOTE: The following 4 lines follow from the analysis in */
+/* Lapack Working Note 176. */
+
+ temp = c_abs(&a[rk + j * a_dim1]) / vn1[j];
+/* Computing MAX */
+ r__1 = 0.f, r__2 = (temp + 1.f) * (1.f - temp);
+ temp = dmax(r__1,r__2);
+/* Computing 2nd power */
+ r__1 = vn1[j] / vn2[j];
+ temp2 = temp * (r__1 * r__1);
+ if (temp2 <= tol3z) {
+ vn2[j] = (real) lsticc;
+ lsticc = j;
+ } else {
+ vn1[j] *= sqrt(temp);
+ }
+ }
+/* L50: */
+ }
+ }
+
+ i__1 = rk + k * a_dim1;
+ a[i__1].r = akk.r, a[i__1].i = akk.i;
+
+/* End of while loop. */
+
+ goto L10;
+ }
+ *kb = k;
+ rk = *offset + *kb;
+
+/* Apply the block reflector to the rest of the matrix: */
+/* A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) - */
+/* A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)'. */
+
+/* Computing MIN */
+ i__1 = *n, i__2 = *m - *offset;
+ if (*kb < min(i__1,i__2)) {
+ i__1 = *m - rk;
+ i__2 = *n - *kb;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "Conjugate transpose", &i__1, &i__2, kb, &q__1,
+ &a[rk + 1 + a_dim1], lda, &f[*kb + 1 + f_dim1], ldf, &c_b2, &
+ a[rk + 1 + (*kb + 1) * a_dim1], lda);
+ }
+
+/* Recomputation of difficult columns. */
+
+L60:
+ if (lsticc > 0) {
+ itemp = i_nint(&vn2[lsticc]);
+ i__1 = *m - rk;
+ vn1[lsticc] = scnrm2_(&i__1, &a[rk + 1 + lsticc * a_dim1], &c__1);
+
+/* NOTE: The computation of VN1( LSTICC ) relies on the fact that */
+/* SNRM2 does not fail on vectors with norm below the value of */
+/* SQRT(DLAMCH('S')) */
+
+ vn2[lsticc] = vn1[lsticc];
+ lsticc = itemp;
+ goto L60;
+ }
+
+ return 0;
+
+/* End of CLAQPS */
+
+} /* claqps_ */
diff --git a/contrib/libs/clapack/claqr0.c b/contrib/libs/clapack/claqr0.c
new file mode 100644
index 0000000000..13b139b361
--- /dev/null
+++ b/contrib/libs/clapack/claqr0.c
@@ -0,0 +1,784 @@
+/* claqr0.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__13 = 13;
+static integer c__15 = 15;
+static integer c_n1 = -1;
+static integer c__12 = 12;
+static integer c__14 = 14;
+static integer c__16 = 16;
+static logical c_false = FALSE_;
+static integer c__1 = 1;
+static integer c__3 = 3;
+
+/* Subroutine */ int claqr0_(logical *wantt, logical *wantz, integer *n,
+ integer *ilo, integer *ihi, complex *h__, integer *ldh, complex *w,
+ integer *iloz, integer *ihiz, complex *z__, integer *ldz, complex *
+ work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4, r__5, r__6, r__7, r__8;
+ complex q__1, q__2, q__3, q__4, q__5;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+ void c_sqrt(complex *, complex *);
+
+ /* Local variables */
+ integer i__, k;
+ real s;
+ complex aa, bb, cc, dd;
+ integer ld, nh, it, ks, kt, ku, kv, ls, ns, nw;
+ complex tr2, det;
+ integer inf, kdu, nho, nve, kwh, nsr, nwr, kwv, ndec, ndfl, kbot, nmin;
+ complex swap;
+ integer ktop;
+ complex zdum[1] /* was [1][1] */;
+ integer kacc22, itmax, nsmax, nwmax, kwtop;
+ extern /* Subroutine */ int claqr3_(logical *, logical *, integer *,
+ integer *, integer *, integer *, complex *, integer *, integer *,
+ integer *, complex *, integer *, integer *, integer *, complex *,
+ complex *, integer *, integer *, complex *, integer *, integer *,
+ complex *, integer *, complex *, integer *), claqr4_(logical *,
+ logical *, integer *, integer *, integer *, complex *, integer *,
+ complex *, integer *, integer *, complex *, integer *, complex *,
+ integer *, integer *), claqr5_(logical *, logical *, integer *,
+ integer *, integer *, integer *, integer *, complex *, complex *,
+ integer *, integer *, integer *, complex *, integer *, complex *,
+ integer *, complex *, integer *, integer *, complex *, integer *,
+ integer *, complex *, integer *);
+ integer nibble;
+ extern /* Subroutine */ int clahqr_(logical *, logical *, integer *,
+ integer *, integer *, complex *, integer *, complex *, integer *,
+ integer *, complex *, integer *, integer *), clacpy_(char *,
+ integer *, integer *, complex *, integer *, complex *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ char jbcmpz[1];
+ complex rtdisc;
+ integer nwupbd;
+ logical sorted;
+ integer lwkopt;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAQR0 computes the eigenvalues of a Hessenberg matrix H */
+/* and, optionally, the matrices T and Z from the Schur decomposition */
+/* H = Z T Z**H, where T is an upper triangular matrix (the */
+/* Schur form), and Z is the unitary matrix of Schur vectors. */
+
+/* Optionally Z may be postmultiplied into an input unitary */
+/* matrix Q so that this routine can give the Schur factorization */
+/* of a matrix A which has been reduced to the Hessenberg form H */
+/* by the unitary matrix Q: A = Q*H*Q**H = (QZ)*H*(QZ)**H. */
+
+/* Arguments */
+/* ========= */
+
+/* WANTT (input) LOGICAL */
+/* = .TRUE. : the full Schur form T is required; */
+/* = .FALSE.: only eigenvalues are required. */
+
+/* WANTZ (input) LOGICAL */
+/* = .TRUE. : the matrix of Schur vectors Z is required; */
+/* = .FALSE.: Schur vectors are not required. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H. N .GE. 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* It is assumed that H is already upper triangular in rows */
+/* and columns 1:ILO-1 and IHI+1:N and, if ILO.GT.1, */
+/* H(ILO,ILO-1) is zero. ILO and IHI are normally set by a */
+/* previous call to CGEBAL, and then passed to CGEHRD when the */
+/* matrix output by CGEBAL is reduced to Hessenberg form. */
+/* Otherwise, ILO and IHI should be set to 1 and N, */
+/* respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. */
+/* If N = 0, then ILO = 1 and IHI = 0. */
+
+/* H (input/output) COMPLEX array, dimension (LDH,N) */
+/* On entry, the upper Hessenberg matrix H. */
+/* On exit, if INFO = 0 and WANTT is .TRUE., then H */
+/* contains the upper triangular matrix T from the Schur */
+/* decomposition (the Schur form). If INFO = 0 and WANT is */
+/* .FALSE., then the contents of H are unspecified on exit. */
+/* (The output value of H when INFO.GT.0 is given under the */
+/* description of INFO below.) */
+
+/* This subroutine may explicitly set H(i,j) = 0 for i.GT.j and */
+/* j = 1, 2, ... ILO-1 or j = IHI+1, IHI+2, ... N. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH .GE. max(1,N). */
+
+/* W (output) COMPLEX array, dimension (N) */
+/* The computed eigenvalues of H(ILO:IHI,ILO:IHI) are stored */
+/* in W(ILO:IHI). If WANTT is .TRUE., then the eigenvalues are */
+/* stored in the same order as on the diagonal of the Schur */
+/* form returned in H, with W(i) = H(i,i). */
+
+/* Z (input/output) COMPLEX array, dimension (LDZ,IHI) */
+/* If WANTZ is .FALSE., then Z is not referenced. */
+/* If WANTZ is .TRUE., then Z(ILO:IHI,ILOZ:IHIZ) is */
+/* replaced by Z(ILO:IHI,ILOZ:IHIZ)*U where U is the */
+/* orthogonal Schur factor of H(ILO:IHI,ILO:IHI). */
+/* (The output value of Z when INFO.GT.0 is given under */
+/* the description of INFO below.) */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. if WANTZ is .TRUE. */
+/* then LDZ.GE.MAX(1,IHIZ). Otherwize, LDZ.GE.1. */
+
+/* WORK (workspace/output) COMPLEX array, dimension LWORK */
+/* On exit, if LWORK = -1, WORK(1) returns an estimate of */
+/* the optimal value for LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK .GE. max(1,N) */
+/* is sufficient, but LWORK typically as large as 6*N may */
+/* be required for optimal performance. A workspace query */
+/* to determine the optimal workspace size is recommended. */
+
+/* If LWORK = -1, then CLAQR0 does a workspace query. */
+/* In this case, CLAQR0 checks the input parameters and */
+/* estimates the optimal workspace size for the given */
+/* values of N, ILO and IHI. The estimate is returned */
+/* in WORK(1). No error message related to LWORK is */
+/* issued by XERBLA. Neither H nor Z are accessed. */
+
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* .GT. 0: if INFO = i, CLAQR0 failed to compute all of */
+/* the eigenvalues. Elements 1:ilo-1 and i+1:n of WR */
+/* and WI contain those eigenvalues which have been */
+/* successfully computed. (Failures are rare.) */
+
+/* If INFO .GT. 0 and WANT is .FALSE., then on exit, */
+/* the remaining unconverged eigenvalues are the eigen- */
+/* values of the upper Hessenberg matrix rows and */
+/* columns ILO through INFO of the final, output */
+/* value of H. */
+
+/* If INFO .GT. 0 and WANTT is .TRUE., then on exit */
+
+/* (*) (initial value of H)*U = U*(final value of H) */
+
+/* where U is a unitary matrix. The final */
+/* value of H is upper Hessenberg and triangular in */
+/* rows and columns INFO+1 through IHI. */
+
+/* If INFO .GT. 0 and WANTZ is .TRUE., then on exit */
+
+/* (final value of Z(ILO:IHI,ILOZ:IHIZ) */
+/* = (initial value of Z(ILO:IHI,ILOZ:IHIZ)*U */
+
+/* where U is the unitary matrix in (*) (regard- */
+/* less of the value of WANTT.) */
+
+/* If INFO .GT. 0 and WANTZ is .FALSE., then Z is not */
+/* accessed. */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+/* References: */
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part I: Maintaining Well Focused Shifts, and Level 3 */
+/* Performance, SIAM Journal of Matrix Analysis, volume 23, pages */
+/* 929--947, 2002. */
+
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part II: Aggressive Early Deflation, SIAM Journal */
+/* of Matrix Analysis, volume 23, pages 948--973, 2002. */
+
+/* ================================================================ */
+/* .. Parameters .. */
+
+/* ==== Matrices of order NTINY or smaller must be processed by */
+/* . CLAHQR because of insufficient subdiagonal scratch space. */
+/* . (This is a hard limit.) ==== */
+
+/* ==== Exceptional deflation windows: try to cure rare */
+/* . slow convergence by varying the size of the */
+/* . deflation window after KEXNW iterations. ==== */
+
+/* ==== Exceptional shifts: try to cure rare slow convergence */
+/* . with ad-hoc exceptional shifts every KEXSH iterations. */
+/* . ==== */
+
+/* ==== The constant WILK1 is used to form the exceptional */
+/* . shifts. ==== */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+/* ==== Quick return for N = 0: nothing to do. ==== */
+
+ if (*n == 0) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ if (*n <= 11) {
+
+/* ==== Tiny matrices must use CLAHQR. ==== */
+
+ lwkopt = 1;
+ if (*lwork != -1) {
+ clahqr_(wantt, wantz, n, ilo, ihi, &h__[h_offset], ldh, &w[1],
+ iloz, ihiz, &z__[z_offset], ldz, info);
+ }
+ } else {
+
+/* ==== Use small bulge multi-shift QR with aggressive early */
+/* . deflation on larger-than-tiny matrices. ==== */
+
+/* ==== Hope for the best. ==== */
+
+ *info = 0;
+
+/* ==== Set up job flags for ILAENV. ==== */
+
+ if (*wantt) {
+ *(unsigned char *)jbcmpz = 'S';
+ } else {
+ *(unsigned char *)jbcmpz = 'E';
+ }
+ if (*wantz) {
+ *(unsigned char *)&jbcmpz[1] = 'V';
+ } else {
+ *(unsigned char *)&jbcmpz[1] = 'N';
+ }
+
+/* ==== NWR = recommended deflation window size. At this */
+/* . point, N .GT. NTINY = 11, so there is enough */
+/* . subdiagonal workspace for NWR.GE.2 as required. */
+/* . (In fact, there is enough subdiagonal space for */
+/* . NWR.GE.3.) ==== */
+
+ nwr = ilaenv_(&c__13, "CLAQR0", jbcmpz, n, ilo, ihi, lwork);
+ nwr = max(2,nwr);
+/* Computing MIN */
+ i__1 = *ihi - *ilo + 1, i__2 = (*n - 1) / 3, i__1 = min(i__1,i__2);
+ nwr = min(i__1,nwr);
+
+/* ==== NSR = recommended number of simultaneous shifts. */
+/* . At this point N .GT. NTINY = 11, so there is at */
+/* . enough subdiagonal workspace for NSR to be even */
+/* . and greater than or equal to two as required. ==== */
+
+ nsr = ilaenv_(&c__15, "CLAQR0", jbcmpz, n, ilo, ihi, lwork);
+/* Computing MIN */
+ i__1 = nsr, i__2 = (*n + 6) / 9, i__1 = min(i__1,i__2), i__2 = *ihi -
+ *ilo;
+ nsr = min(i__1,i__2);
+/* Computing MAX */
+ i__1 = 2, i__2 = nsr - nsr % 2;
+ nsr = max(i__1,i__2);
+
+/* ==== Estimate optimal workspace ==== */
+
+/* ==== Workspace query call to CLAQR3 ==== */
+
+ i__1 = nwr + 1;
+ claqr3_(wantt, wantz, n, ilo, ihi, &i__1, &h__[h_offset], ldh, iloz,
+ ihiz, &z__[z_offset], ldz, &ls, &ld, &w[1], &h__[h_offset],
+ ldh, n, &h__[h_offset], ldh, n, &h__[h_offset], ldh, &work[1],
+ &c_n1);
+
+/* ==== Optimal workspace = MAX(CLAQR5, CLAQR3) ==== */
+
+/* Computing MAX */
+ i__1 = nsr * 3 / 2, i__2 = (integer) work[1].r;
+ lwkopt = max(i__1,i__2);
+
+/* ==== Quick return in case of workspace query. ==== */
+
+ if (*lwork == -1) {
+ r__1 = (real) lwkopt;
+ q__1.r = r__1, q__1.i = 0.f;
+ work[1].r = q__1.r, work[1].i = q__1.i;
+ return 0;
+ }
+
+/* ==== CLAHQR/CLAQR0 crossover point ==== */
+
+ nmin = ilaenv_(&c__12, "CLAQR0", jbcmpz, n, ilo, ihi, lwork);
+ nmin = max(11,nmin);
+
+/* ==== Nibble crossover point ==== */
+
+ nibble = ilaenv_(&c__14, "CLAQR0", jbcmpz, n, ilo, ihi, lwork);
+ nibble = max(0,nibble);
+
+/* ==== Accumulate reflections during ttswp? Use block */
+/* . 2-by-2 structure during matrix-matrix multiply? ==== */
+
+ kacc22 = ilaenv_(&c__16, "CLAQR0", jbcmpz, n, ilo, ihi, lwork);
+ kacc22 = max(0,kacc22);
+ kacc22 = min(2,kacc22);
+
+/* ==== NWMAX = the largest possible deflation window for */
+/* . which there is sufficient workspace. ==== */
+
+/* Computing MIN */
+ i__1 = (*n - 1) / 3, i__2 = *lwork / 2;
+ nwmax = min(i__1,i__2);
+ nw = nwmax;
+
+/* ==== NSMAX = the Largest number of simultaneous shifts */
+/* . for which there is sufficient workspace. ==== */
+
+/* Computing MIN */
+ i__1 = (*n + 6) / 9, i__2 = (*lwork << 1) / 3;
+ nsmax = min(i__1,i__2);
+ nsmax -= nsmax % 2;
+
+/* ==== NDFL: an iteration count restarted at deflation. ==== */
+
+ ndfl = 1;
+
+/* ==== ITMAX = iteration limit ==== */
+
+/* Computing MAX */
+ i__1 = 10, i__2 = *ihi - *ilo + 1;
+ itmax = max(i__1,i__2) * 30;
+
+/* ==== Last row and column in the active block ==== */
+
+ kbot = *ihi;
+
+/* ==== Main Loop ==== */
+
+ i__1 = itmax;
+ for (it = 1; it <= i__1; ++it) {
+
+/* ==== Done when KBOT falls below ILO ==== */
+
+ if (kbot < *ilo) {
+ goto L80;
+ }
+
+/* ==== Locate active block ==== */
+
+ i__2 = *ilo + 1;
+ for (k = kbot; k >= i__2; --k) {
+ i__3 = k + (k - 1) * h_dim1;
+ if (h__[i__3].r == 0.f && h__[i__3].i == 0.f) {
+ goto L20;
+ }
+/* L10: */
+ }
+ k = *ilo;
+L20:
+ ktop = k;
+
+/* ==== Select deflation window size: */
+/* . Typical Case: */
+/* . If possible and advisable, nibble the entire */
+/* . active block. If not, use size MIN(NWR,NWMAX) */
+/* . or MIN(NWR+1,NWMAX) depending upon which has */
+/* . the smaller corresponding subdiagonal entry */
+/* . (a heuristic). */
+/* . */
+/* . Exceptional Case: */
+/* . If there have been no deflations in KEXNW or */
+/* . more iterations, then vary the deflation window */
+/* . size. At first, because, larger windows are, */
+/* . in general, more powerful than smaller ones, */
+/* . rapidly increase the window to the maximum possible. */
+/* . Then, gradually reduce the window size. ==== */
+
+ nh = kbot - ktop + 1;
+ nwupbd = min(nh,nwmax);
+ if (ndfl < 5) {
+ nw = min(nwupbd,nwr);
+ } else {
+/* Computing MIN */
+ i__2 = nwupbd, i__3 = nw << 1;
+ nw = min(i__2,i__3);
+ }
+ if (nw < nwmax) {
+ if (nw >= nh - 1) {
+ nw = nh;
+ } else {
+ kwtop = kbot - nw + 1;
+ i__2 = kwtop + (kwtop - 1) * h_dim1;
+ i__3 = kwtop - 1 + (kwtop - 2) * h_dim1;
+ if ((r__1 = h__[i__2].r, dabs(r__1)) + (r__2 = r_imag(&
+ h__[kwtop + (kwtop - 1) * h_dim1]), dabs(r__2)) >
+ (r__3 = h__[i__3].r, dabs(r__3)) + (r__4 = r_imag(
+ &h__[kwtop - 1 + (kwtop - 2) * h_dim1]), dabs(
+ r__4))) {
+ ++nw;
+ }
+ }
+ }
+ if (ndfl < 5) {
+ ndec = -1;
+ } else if (ndec >= 0 || nw >= nwupbd) {
+ ++ndec;
+ if (nw - ndec < 2) {
+ ndec = 0;
+ }
+ nw -= ndec;
+ }
+
+/* ==== Aggressive early deflation: */
+/* . split workspace under the subdiagonal into */
+/* . - an nw-by-nw work array V in the lower */
+/* . left-hand-corner, */
+/* . - an NW-by-at-least-NW-but-more-is-better */
+/* . (NW-by-NHO) horizontal work array along */
+/* . the bottom edge, */
+/* . - an at-least-NW-but-more-is-better (NHV-by-NW) */
+/* . vertical work array along the left-hand-edge. */
+/* . ==== */
+
+ kv = *n - nw + 1;
+ kt = nw + 1;
+ nho = *n - nw - 1 - kt + 1;
+ kwv = nw + 2;
+ nve = *n - nw - kwv + 1;
+
+/* ==== Aggressive early deflation ==== */
+
+ claqr3_(wantt, wantz, n, &ktop, &kbot, &nw, &h__[h_offset], ldh,
+ iloz, ihiz, &z__[z_offset], ldz, &ls, &ld, &w[1], &h__[kv
+ + h_dim1], ldh, &nho, &h__[kv + kt * h_dim1], ldh, &nve, &
+ h__[kwv + h_dim1], ldh, &work[1], lwork);
+
+/* ==== Adjust KBOT accounting for new deflations. ==== */
+
+ kbot -= ld;
+
+/* ==== KS points to the shifts. ==== */
+
+ ks = kbot - ls + 1;
+
+/* ==== Skip an expensive QR sweep if there is a (partly */
+/* . heuristic) reason to expect that many eigenvalues */
+/* . will deflate without it. Here, the QR sweep is */
+/* . skipped if many eigenvalues have just been deflated */
+/* . or if the remaining active block is small. */
+
+ if (ld == 0 || ld * 100 <= nw * nibble && kbot - ktop + 1 > min(
+ nmin,nwmax)) {
+
+/* ==== NS = nominal number of simultaneous shifts. */
+/* . This may be lowered (slightly) if CLAQR3 */
+/* . did not provide that many shifts. ==== */
+
+/* Computing MIN */
+/* Computing MAX */
+ i__4 = 2, i__5 = kbot - ktop;
+ i__2 = min(nsmax,nsr), i__3 = max(i__4,i__5);
+ ns = min(i__2,i__3);
+ ns -= ns % 2;
+
+/* ==== If there have been no deflations */
+/* . in a multiple of KEXSH iterations, */
+/* . then try exceptional shifts. */
+/* . Otherwise use shifts provided by */
+/* . CLAQR3 above or from the eigenvalues */
+/* . of a trailing principal submatrix. ==== */
+
+ if (ndfl % 6 == 0) {
+ ks = kbot - ns + 1;
+ i__2 = ks + 1;
+ for (i__ = kbot; i__ >= i__2; i__ += -2) {
+ i__3 = i__;
+ i__4 = i__ + i__ * h_dim1;
+ i__5 = i__ + (i__ - 1) * h_dim1;
+ r__3 = ((r__1 = h__[i__5].r, dabs(r__1)) + (r__2 =
+ r_imag(&h__[i__ + (i__ - 1) * h_dim1]), dabs(
+ r__2))) * .75f;
+ q__1.r = h__[i__4].r + r__3, q__1.i = h__[i__4].i;
+ w[i__3].r = q__1.r, w[i__3].i = q__1.i;
+ i__3 = i__ - 1;
+ i__4 = i__;
+ w[i__3].r = w[i__4].r, w[i__3].i = w[i__4].i;
+/* L30: */
+ }
+ } else {
+
+/* ==== Got NS/2 or fewer shifts? Use CLAQR4 or */
+/* . CLAHQR on a trailing principal submatrix to */
+/* . get more. (Since NS.LE.NSMAX.LE.(N+6)/9, */
+/* . there is enough space below the subdiagonal */
+/* . to fit an NS-by-NS scratch array.) ==== */
+
+ if (kbot - ks + 1 <= ns / 2) {
+ ks = kbot - ns + 1;
+ kt = *n - ns + 1;
+ clacpy_("A", &ns, &ns, &h__[ks + ks * h_dim1], ldh, &
+ h__[kt + h_dim1], ldh);
+ if (ns > nmin) {
+ claqr4_(&c_false, &c_false, &ns, &c__1, &ns, &h__[
+ kt + h_dim1], ldh, &w[ks], &c__1, &c__1,
+ zdum, &c__1, &work[1], lwork, &inf);
+ } else {
+ clahqr_(&c_false, &c_false, &ns, &c__1, &ns, &h__[
+ kt + h_dim1], ldh, &w[ks], &c__1, &c__1,
+ zdum, &c__1, &inf);
+ }
+ ks += inf;
+
+/* ==== In case of a rare QR failure use */
+/* . eigenvalues of the trailing 2-by-2 */
+/* . principal submatrix. Scale to avoid */
+/* . overflows, underflows and subnormals. */
+/* . (The scale factor S can not be zero, */
+/* . because H(KBOT,KBOT-1) is nonzero.) ==== */
+
+ if (ks >= kbot) {
+ i__2 = kbot - 1 + (kbot - 1) * h_dim1;
+ i__3 = kbot + (kbot - 1) * h_dim1;
+ i__4 = kbot - 1 + kbot * h_dim1;
+ i__5 = kbot + kbot * h_dim1;
+ s = (r__1 = h__[i__2].r, dabs(r__1)) + (r__2 =
+ r_imag(&h__[kbot - 1 + (kbot - 1) *
+ h_dim1]), dabs(r__2)) + ((r__3 = h__[i__3]
+ .r, dabs(r__3)) + (r__4 = r_imag(&h__[
+ kbot + (kbot - 1) * h_dim1]), dabs(r__4)))
+ + ((r__5 = h__[i__4].r, dabs(r__5)) + (
+ r__6 = r_imag(&h__[kbot - 1 + kbot *
+ h_dim1]), dabs(r__6))) + ((r__7 = h__[
+ i__5].r, dabs(r__7)) + (r__8 = r_imag(&
+ h__[kbot + kbot * h_dim1]), dabs(r__8)));
+ i__2 = kbot - 1 + (kbot - 1) * h_dim1;
+ q__1.r = h__[i__2].r / s, q__1.i = h__[i__2].i /
+ s;
+ aa.r = q__1.r, aa.i = q__1.i;
+ i__2 = kbot + (kbot - 1) * h_dim1;
+ q__1.r = h__[i__2].r / s, q__1.i = h__[i__2].i /
+ s;
+ cc.r = q__1.r, cc.i = q__1.i;
+ i__2 = kbot - 1 + kbot * h_dim1;
+ q__1.r = h__[i__2].r / s, q__1.i = h__[i__2].i /
+ s;
+ bb.r = q__1.r, bb.i = q__1.i;
+ i__2 = kbot + kbot * h_dim1;
+ q__1.r = h__[i__2].r / s, q__1.i = h__[i__2].i /
+ s;
+ dd.r = q__1.r, dd.i = q__1.i;
+ q__2.r = aa.r + dd.r, q__2.i = aa.i + dd.i;
+ q__1.r = q__2.r / 2.f, q__1.i = q__2.i / 2.f;
+ tr2.r = q__1.r, tr2.i = q__1.i;
+ q__3.r = aa.r - tr2.r, q__3.i = aa.i - tr2.i;
+ q__4.r = dd.r - tr2.r, q__4.i = dd.i - tr2.i;
+ q__2.r = q__3.r * q__4.r - q__3.i * q__4.i,
+ q__2.i = q__3.r * q__4.i + q__3.i *
+ q__4.r;
+ q__5.r = bb.r * cc.r - bb.i * cc.i, q__5.i = bb.r
+ * cc.i + bb.i * cc.r;
+ q__1.r = q__2.r - q__5.r, q__1.i = q__2.i -
+ q__5.i;
+ det.r = q__1.r, det.i = q__1.i;
+ q__2.r = -det.r, q__2.i = -det.i;
+ c_sqrt(&q__1, &q__2);
+ rtdisc.r = q__1.r, rtdisc.i = q__1.i;
+ i__2 = kbot - 1;
+ q__2.r = tr2.r + rtdisc.r, q__2.i = tr2.i +
+ rtdisc.i;
+ q__1.r = s * q__2.r, q__1.i = s * q__2.i;
+ w[i__2].r = q__1.r, w[i__2].i = q__1.i;
+ i__2 = kbot;
+ q__2.r = tr2.r - rtdisc.r, q__2.i = tr2.i -
+ rtdisc.i;
+ q__1.r = s * q__2.r, q__1.i = s * q__2.i;
+ w[i__2].r = q__1.r, w[i__2].i = q__1.i;
+
+ ks = kbot - 1;
+ }
+ }
+
+ if (kbot - ks + 1 > ns) {
+
+/* ==== Sort the shifts (Helps a little) ==== */
+
+ sorted = FALSE_;
+ i__2 = ks + 1;
+ for (k = kbot; k >= i__2; --k) {
+ if (sorted) {
+ goto L60;
+ }
+ sorted = TRUE_;
+ i__3 = k - 1;
+ for (i__ = ks; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ i__5 = i__ + 1;
+ if ((r__1 = w[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&w[i__]), dabs(r__2)) < (r__3 =
+ w[i__5].r, dabs(r__3)) + (r__4 =
+ r_imag(&w[i__ + 1]), dabs(r__4))) {
+ sorted = FALSE_;
+ i__4 = i__;
+ swap.r = w[i__4].r, swap.i = w[i__4].i;
+ i__4 = i__;
+ i__5 = i__ + 1;
+ w[i__4].r = w[i__5].r, w[i__4].i = w[i__5]
+ .i;
+ i__4 = i__ + 1;
+ w[i__4].r = swap.r, w[i__4].i = swap.i;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+L60:
+ ;
+ }
+ }
+
+/* ==== If there are only two shifts, then use */
+/* . only one. ==== */
+
+ if (kbot - ks + 1 == 2) {
+ i__2 = kbot;
+ i__3 = kbot + kbot * h_dim1;
+ q__2.r = w[i__2].r - h__[i__3].r, q__2.i = w[i__2].i -
+ h__[i__3].i;
+ q__1.r = q__2.r, q__1.i = q__2.i;
+ i__4 = kbot - 1;
+ i__5 = kbot + kbot * h_dim1;
+ q__4.r = w[i__4].r - h__[i__5].r, q__4.i = w[i__4].i -
+ h__[i__5].i;
+ q__3.r = q__4.r, q__3.i = q__4.i;
+ if ((r__1 = q__1.r, dabs(r__1)) + (r__2 = r_imag(&q__1),
+ dabs(r__2)) < (r__3 = q__3.r, dabs(r__3)) + (r__4
+ = r_imag(&q__3), dabs(r__4))) {
+ i__2 = kbot - 1;
+ i__3 = kbot;
+ w[i__2].r = w[i__3].r, w[i__2].i = w[i__3].i;
+ } else {
+ i__2 = kbot;
+ i__3 = kbot - 1;
+ w[i__2].r = w[i__3].r, w[i__2].i = w[i__3].i;
+ }
+ }
+
+/* ==== Use up to NS of the the smallest magnatiude */
+/* . shifts. If there aren't NS shifts available, */
+/* . then use them all, possibly dropping one to */
+/* . make the number of shifts even. ==== */
+
+/* Computing MIN */
+ i__2 = ns, i__3 = kbot - ks + 1;
+ ns = min(i__2,i__3);
+ ns -= ns % 2;
+ ks = kbot - ns + 1;
+
+/* ==== Small-bulge multi-shift QR sweep: */
+/* . split workspace under the subdiagonal into */
+/* . - a KDU-by-KDU work array U in the lower */
+/* . left-hand-corner, */
+/* . - a KDU-by-at-least-KDU-but-more-is-better */
+/* . (KDU-by-NHo) horizontal work array WH along */
+/* . the bottom edge, */
+/* . - and an at-least-KDU-but-more-is-better-by-KDU */
+/* . (NVE-by-KDU) vertical work WV arrow along */
+/* . the left-hand-edge. ==== */
+
+ kdu = ns * 3 - 3;
+ ku = *n - kdu + 1;
+ kwh = kdu + 1;
+ nho = *n - kdu - 3 - (kdu + 1) + 1;
+ kwv = kdu + 4;
+ nve = *n - kdu - kwv + 1;
+
+/* ==== Small-bulge multi-shift QR sweep ==== */
+
+ claqr5_(wantt, wantz, &kacc22, n, &ktop, &kbot, &ns, &w[ks], &
+ h__[h_offset], ldh, iloz, ihiz, &z__[z_offset], ldz, &
+ work[1], &c__3, &h__[ku + h_dim1], ldh, &nve, &h__[
+ kwv + h_dim1], ldh, &nho, &h__[ku + kwh * h_dim1],
+ ldh);
+ }
+
+/* ==== Note progress (or the lack of it). ==== */
+
+ if (ld > 0) {
+ ndfl = 1;
+ } else {
+ ++ndfl;
+ }
+
+/* ==== End of main loop ==== */
+/* L70: */
+ }
+
+/* ==== Iteration limit exceeded. Set INFO to show where */
+/* . the problem occurred and exit. ==== */
+
+ *info = kbot;
+L80:
+ ;
+ }
+
+/* ==== Return the optimal value of LWORK. ==== */
+
+ r__1 = (real) lwkopt;
+ q__1.r = r__1, q__1.i = 0.f;
+ work[1].r = q__1.r, work[1].i = q__1.i;
+
+/* ==== End of CLAQR0 ==== */
+
+ return 0;
+} /* claqr0_ */
diff --git a/contrib/libs/clapack/claqr1.c b/contrib/libs/clapack/claqr1.c
new file mode 100644
index 0000000000..3f370be67e
--- /dev/null
+++ b/contrib/libs/clapack/claqr1.c
@@ -0,0 +1,197 @@
+/* claqr1.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int claqr1_(integer *n, complex *h__, integer *ldh, complex *
+ s1, complex *s2, complex *v)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, i__1, i__2, i__3, i__4;
+ real r__1, r__2, r__3, r__4, r__5, r__6;
+ complex q__1, q__2, q__3, q__4, q__5, q__6, q__7, q__8;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ real s;
+ complex h21s, h31s;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Given a 2-by-2 or 3-by-3 matrix H, CLAQR1 sets v to a */
+/* scalar multiple of the first column of the product */
+
+/* (*) K = (H - s1*I)*(H - s2*I) */
+
+/* scaling to avoid overflows and most underflows. */
+
+/* This is useful for starting double implicit shift bulges */
+/* in the QR algorithm. */
+
+
+/* N (input) integer */
+/* Order of the matrix H. N must be either 2 or 3. */
+
+/* H (input) COMPLEX array of dimension (LDH,N) */
+/* The 2-by-2 or 3-by-3 matrix H in (*). */
+
+/* LDH (input) integer */
+/* The leading dimension of H as declared in */
+/* the calling procedure. LDH.GE.N */
+
+/* S1 (input) COMPLEX */
+/* S2 S1 and S2 are the shifts defining K in (*) above. */
+
+/* V (output) COMPLEX array of dimension N */
+/* A scalar multiple of the first column of the */
+/* matrix K in (*). */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --v;
+
+ /* Function Body */
+ if (*n == 2) {
+ i__1 = h_dim1 + 1;
+ q__2.r = h__[i__1].r - s2->r, q__2.i = h__[i__1].i - s2->i;
+ q__1.r = q__2.r, q__1.i = q__2.i;
+ i__2 = h_dim1 + 2;
+ s = (r__1 = q__1.r, dabs(r__1)) + (r__2 = r_imag(&q__1), dabs(r__2))
+ + ((r__3 = h__[i__2].r, dabs(r__3)) + (r__4 = r_imag(&h__[
+ h_dim1 + 2]), dabs(r__4)));
+ if (s == 0.f) {
+ v[1].r = 0.f, v[1].i = 0.f;
+ v[2].r = 0.f, v[2].i = 0.f;
+ } else {
+ i__1 = h_dim1 + 2;
+ q__1.r = h__[i__1].r / s, q__1.i = h__[i__1].i / s;
+ h21s.r = q__1.r, h21s.i = q__1.i;
+ i__1 = (h_dim1 << 1) + 1;
+ q__2.r = h21s.r * h__[i__1].r - h21s.i * h__[i__1].i, q__2.i =
+ h21s.r * h__[i__1].i + h21s.i * h__[i__1].r;
+ i__2 = h_dim1 + 1;
+ q__4.r = h__[i__2].r - s1->r, q__4.i = h__[i__2].i - s1->i;
+ i__3 = h_dim1 + 1;
+ q__6.r = h__[i__3].r - s2->r, q__6.i = h__[i__3].i - s2->i;
+ q__5.r = q__6.r / s, q__5.i = q__6.i / s;
+ q__3.r = q__4.r * q__5.r - q__4.i * q__5.i, q__3.i = q__4.r *
+ q__5.i + q__4.i * q__5.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ v[1].r = q__1.r, v[1].i = q__1.i;
+ i__1 = h_dim1 + 1;
+ i__2 = (h_dim1 << 1) + 2;
+ q__4.r = h__[i__1].r + h__[i__2].r, q__4.i = h__[i__1].i + h__[
+ i__2].i;
+ q__3.r = q__4.r - s1->r, q__3.i = q__4.i - s1->i;
+ q__2.r = q__3.r - s2->r, q__2.i = q__3.i - s2->i;
+ q__1.r = h21s.r * q__2.r - h21s.i * q__2.i, q__1.i = h21s.r *
+ q__2.i + h21s.i * q__2.r;
+ v[2].r = q__1.r, v[2].i = q__1.i;
+ }
+ } else {
+ i__1 = h_dim1 + 1;
+ q__2.r = h__[i__1].r - s2->r, q__2.i = h__[i__1].i - s2->i;
+ q__1.r = q__2.r, q__1.i = q__2.i;
+ i__2 = h_dim1 + 2;
+ i__3 = h_dim1 + 3;
+ s = (r__1 = q__1.r, dabs(r__1)) + (r__2 = r_imag(&q__1), dabs(r__2))
+ + ((r__3 = h__[i__2].r, dabs(r__3)) + (r__4 = r_imag(&h__[
+ h_dim1 + 2]), dabs(r__4))) + ((r__5 = h__[i__3].r, dabs(r__5))
+ + (r__6 = r_imag(&h__[h_dim1 + 3]), dabs(r__6)));
+ if (s == 0.f) {
+ v[1].r = 0.f, v[1].i = 0.f;
+ v[2].r = 0.f, v[2].i = 0.f;
+ v[3].r = 0.f, v[3].i = 0.f;
+ } else {
+ i__1 = h_dim1 + 2;
+ q__1.r = h__[i__1].r / s, q__1.i = h__[i__1].i / s;
+ h21s.r = q__1.r, h21s.i = q__1.i;
+ i__1 = h_dim1 + 3;
+ q__1.r = h__[i__1].r / s, q__1.i = h__[i__1].i / s;
+ h31s.r = q__1.r, h31s.i = q__1.i;
+ i__1 = h_dim1 + 1;
+ q__4.r = h__[i__1].r - s1->r, q__4.i = h__[i__1].i - s1->i;
+ i__2 = h_dim1 + 1;
+ q__6.r = h__[i__2].r - s2->r, q__6.i = h__[i__2].i - s2->i;
+ q__5.r = q__6.r / s, q__5.i = q__6.i / s;
+ q__3.r = q__4.r * q__5.r - q__4.i * q__5.i, q__3.i = q__4.r *
+ q__5.i + q__4.i * q__5.r;
+ i__3 = (h_dim1 << 1) + 1;
+ q__7.r = h__[i__3].r * h21s.r - h__[i__3].i * h21s.i, q__7.i =
+ h__[i__3].r * h21s.i + h__[i__3].i * h21s.r;
+ q__2.r = q__3.r + q__7.r, q__2.i = q__3.i + q__7.i;
+ i__4 = h_dim1 * 3 + 1;
+ q__8.r = h__[i__4].r * h31s.r - h__[i__4].i * h31s.i, q__8.i =
+ h__[i__4].r * h31s.i + h__[i__4].i * h31s.r;
+ q__1.r = q__2.r + q__8.r, q__1.i = q__2.i + q__8.i;
+ v[1].r = q__1.r, v[1].i = q__1.i;
+ i__1 = h_dim1 + 1;
+ i__2 = (h_dim1 << 1) + 2;
+ q__5.r = h__[i__1].r + h__[i__2].r, q__5.i = h__[i__1].i + h__[
+ i__2].i;
+ q__4.r = q__5.r - s1->r, q__4.i = q__5.i - s1->i;
+ q__3.r = q__4.r - s2->r, q__3.i = q__4.i - s2->i;
+ q__2.r = h21s.r * q__3.r - h21s.i * q__3.i, q__2.i = h21s.r *
+ q__3.i + h21s.i * q__3.r;
+ i__3 = h_dim1 * 3 + 2;
+ q__6.r = h__[i__3].r * h31s.r - h__[i__3].i * h31s.i, q__6.i =
+ h__[i__3].r * h31s.i + h__[i__3].i * h31s.r;
+ q__1.r = q__2.r + q__6.r, q__1.i = q__2.i + q__6.i;
+ v[2].r = q__1.r, v[2].i = q__1.i;
+ i__1 = h_dim1 + 1;
+ i__2 = h_dim1 * 3 + 3;
+ q__5.r = h__[i__1].r + h__[i__2].r, q__5.i = h__[i__1].i + h__[
+ i__2].i;
+ q__4.r = q__5.r - s1->r, q__4.i = q__5.i - s1->i;
+ q__3.r = q__4.r - s2->r, q__3.i = q__4.i - s2->i;
+ q__2.r = h31s.r * q__3.r - h31s.i * q__3.i, q__2.i = h31s.r *
+ q__3.i + h31s.i * q__3.r;
+ i__3 = (h_dim1 << 1) + 3;
+ q__6.r = h21s.r * h__[i__3].r - h21s.i * h__[i__3].i, q__6.i =
+ h21s.r * h__[i__3].i + h21s.i * h__[i__3].r;
+ q__1.r = q__2.r + q__6.r, q__1.i = q__2.i + q__6.i;
+ v[3].r = q__1.r, v[3].i = q__1.i;
+ }
+ }
+ return 0;
+} /* claqr1_ */
diff --git a/contrib/libs/clapack/claqr2.c b/contrib/libs/clapack/claqr2.c
new file mode 100644
index 0000000000..ec288cfeb6
--- /dev/null
+++ b/contrib/libs/clapack/claqr2.c
@@ -0,0 +1,603 @@
+/* claqr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static logical c_true = TRUE_;
+
+/* Subroutine */ int claqr2_(logical *wantt, logical *wantz, integer *n,
+ integer *ktop, integer *kbot, integer *nw, complex *h__, integer *ldh,
+ integer *iloz, integer *ihiz, complex *z__, integer *ldz, integer *
+ ns, integer *nd, complex *sh, complex *v, integer *ldv, integer *nh,
+ complex *t, integer *ldt, integer *nv, complex *wv, integer *ldwv,
+ complex *work, integer *lwork)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, t_dim1, t_offset, v_dim1, v_offset, wv_dim1,
+ wv_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4;
+ real r__1, r__2, r__3, r__4, r__5, r__6;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j;
+ complex s;
+ integer jw;
+ real foo;
+ integer kln;
+ complex tau;
+ integer knt;
+ real ulp;
+ integer lwk1, lwk2;
+ complex beta;
+ integer kcol, info, ifst, ilst, ltop, krow;
+ extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
+, integer *, complex *, complex *, integer *, complex *),
+ cgemm_(char *, char *, integer *, integer *, integer *, complex *,
+ complex *, integer *, complex *, integer *, complex *, complex *,
+ integer *), ccopy_(integer *, complex *, integer
+ *, complex *, integer *);
+ integer infqr, kwtop;
+ extern /* Subroutine */ int slabad_(real *, real *), cgehrd_(integer *,
+ integer *, integer *, complex *, integer *, complex *, complex *,
+ integer *, integer *), clarfg_(integer *, complex *, complex *,
+ integer *, complex *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int clahqr_(logical *, logical *, integer *,
+ integer *, integer *, complex *, integer *, complex *, integer *,
+ integer *, complex *, integer *, integer *), clacpy_(char *,
+ integer *, integer *, complex *, integer *, complex *, integer *), claset_(char *, integer *, integer *, complex *, complex
+ *, complex *, integer *);
+ real safmin, safmax;
+ extern /* Subroutine */ int ctrexc_(char *, integer *, complex *, integer
+ *, complex *, integer *, integer *, integer *, integer *),
+ cunmhr_(char *, char *, integer *, integer *, integer *, integer
+ *, complex *, integer *, complex *, complex *, integer *, complex
+ *, integer *, integer *);
+ real smlnum;
+ integer lwkopt;
+
+
+/* -- LAPACK auxiliary routine (version 3.2.1) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* -- April 2009 -- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* This subroutine is identical to CLAQR3 except that it avoids */
+/* recursion by calling CLAHQR instead of CLAQR4. */
+
+
+/* ****************************************************************** */
+/* Aggressive early deflation: */
+
+/* This subroutine accepts as input an upper Hessenberg matrix */
+/* H and performs an unitary similarity transformation */
+/* designed to detect and deflate fully converged eigenvalues from */
+/* a trailing principal submatrix. On output H has been over- */
+/* written by a new Hessenberg matrix that is a perturbation of */
+/* an unitary similarity transformation of H. It is to be */
+/* hoped that the final version of H has many zero subdiagonal */
+/* entries. */
+
+/* ****************************************************************** */
+/* WANTT (input) LOGICAL */
+/* If .TRUE., then the Hessenberg matrix H is fully updated */
+/* so that the triangular Schur factor may be */
+/* computed (in cooperation with the calling subroutine). */
+/* If .FALSE., then only enough of H is updated to preserve */
+/* the eigenvalues. */
+
+/* WANTZ (input) LOGICAL */
+/* If .TRUE., then the unitary matrix Z is updated so */
+/* so that the unitary Schur factor may be computed */
+/* (in cooperation with the calling subroutine). */
+/* If .FALSE., then Z is not referenced. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H and (if WANTZ is .TRUE.) the */
+/* order of the unitary matrix Z. */
+
+/* KTOP (input) INTEGER */
+/* It is assumed that either KTOP = 1 or H(KTOP,KTOP-1)=0. */
+/* KBOT and KTOP together determine an isolated block */
+/* along the diagonal of the Hessenberg matrix. */
+
+/* KBOT (input) INTEGER */
+/* It is assumed without a check that either */
+/* KBOT = N or H(KBOT+1,KBOT)=0. KBOT and KTOP together */
+/* determine an isolated block along the diagonal of the */
+/* Hessenberg matrix. */
+
+/* NW (input) INTEGER */
+/* Deflation window size. 1 .LE. NW .LE. (KBOT-KTOP+1). */
+
+/* H (input/output) COMPLEX array, dimension (LDH,N) */
+/* On input the initial N-by-N section of H stores the */
+/* Hessenberg matrix undergoing aggressive early deflation. */
+/* On output H has been transformed by a unitary */
+/* similarity transformation, perturbed, and the returned */
+/* to Hessenberg form that (it is to be hoped) has some */
+/* zero subdiagonal entries. */
+
+/* LDH (input) integer */
+/* Leading dimension of H just as declared in the calling */
+/* subroutine. N .LE. LDH */
+
+/* ILOZ (input) INTEGER */
+/* IHIZ (input) INTEGER */
+/* Specify the rows of Z to which transformations must be */
+/* applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N. */
+
+/* Z (input/output) COMPLEX array, dimension (LDZ,N) */
+/* IF WANTZ is .TRUE., then on output, the unitary */
+/* similarity transformation mentioned above has been */
+/* accumulated into Z(ILOZ:IHIZ,ILO:IHI) from the right. */
+/* If WANTZ is .FALSE., then Z is unreferenced. */
+
+/* LDZ (input) integer */
+/* The leading dimension of Z just as declared in the */
+/* calling subroutine. 1 .LE. LDZ. */
+
+/* NS (output) integer */
+/* The number of unconverged (ie approximate) eigenvalues */
+/* returned in SR and SI that may be used as shifts by the */
+/* calling subroutine. */
+
+/* ND (output) integer */
+/* The number of converged eigenvalues uncovered by this */
+/* subroutine. */
+
+/* SH (output) COMPLEX array, dimension KBOT */
+/* On output, approximate eigenvalues that may */
+/* be used for shifts are stored in SH(KBOT-ND-NS+1) */
+/* through SR(KBOT-ND). Converged eigenvalues are */
+/* stored in SH(KBOT-ND+1) through SH(KBOT). */
+
+/* V (workspace) COMPLEX array, dimension (LDV,NW) */
+/* An NW-by-NW work array. */
+
+/* LDV (input) integer scalar */
+/* The leading dimension of V just as declared in the */
+/* calling subroutine. NW .LE. LDV */
+
+/* NH (input) integer scalar */
+/* The number of columns of T. NH.GE.NW. */
+
+/* T (workspace) COMPLEX array, dimension (LDT,NW) */
+
+/* LDT (input) integer */
+/* The leading dimension of T just as declared in the */
+/* calling subroutine. NW .LE. LDT */
+
+/* NV (input) integer */
+/* The number of rows of work array WV available for */
+/* workspace. NV.GE.NW. */
+
+/* WV (workspace) COMPLEX array, dimension (LDWV,NW) */
+
+/* LDWV (input) integer */
+/* The leading dimension of W just as declared in the */
+/* calling subroutine. NW .LE. LDV */
+
+/* WORK (workspace) COMPLEX array, dimension LWORK. */
+/* On exit, WORK(1) is set to an estimate of the optimal value */
+/* of LWORK for the given values of N, NW, KTOP and KBOT. */
+
+/* LWORK (input) integer */
+/* The dimension of the work array WORK. LWORK = 2*NW */
+/* suffices, but greater efficiency may result from larger */
+/* values of LWORK. */
+
+/* If LWORK = -1, then a workspace query is assumed; CLAQR2 */
+/* only estimates the optimal workspace size for the given */
+/* values of N, NW, KTOP and KBOT. The estimate is returned */
+/* in WORK(1). No error message related to LWORK is issued */
+/* by XERBLA. Neither H nor Z are accessed. */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* ==== Estimate optimal workspace. ==== */
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --sh;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ wv_dim1 = *ldwv;
+ wv_offset = 1 + wv_dim1;
+ wv -= wv_offset;
+ --work;
+
+ /* Function Body */
+/* Computing MIN */
+ i__1 = *nw, i__2 = *kbot - *ktop + 1;
+ jw = min(i__1,i__2);
+ if (jw <= 2) {
+ lwkopt = 1;
+ } else {
+
+/* ==== Workspace query call to CGEHRD ==== */
+
+ i__1 = jw - 1;
+ cgehrd_(&jw, &c__1, &i__1, &t[t_offset], ldt, &work[1], &work[1], &
+ c_n1, &info);
+ lwk1 = (integer) work[1].r;
+
+/* ==== Workspace query call to CUNMHR ==== */
+
+ i__1 = jw - 1;
+ cunmhr_("R", "N", &jw, &jw, &c__1, &i__1, &t[t_offset], ldt, &work[1],
+ &v[v_offset], ldv, &work[1], &c_n1, &info);
+ lwk2 = (integer) work[1].r;
+
+/* ==== Optimal workspace ==== */
+
+ lwkopt = jw + max(lwk1,lwk2);
+ }
+
+/* ==== Quick return in case of workspace query. ==== */
+
+ if (*lwork == -1) {
+ r__1 = (real) lwkopt;
+ q__1.r = r__1, q__1.i = 0.f;
+ work[1].r = q__1.r, work[1].i = q__1.i;
+ return 0;
+ }
+
+/* ==== Nothing to do ... */
+/* ... for an empty active block ... ==== */
+ *ns = 0;
+ *nd = 0;
+ work[1].r = 1.f, work[1].i = 0.f;
+ if (*ktop > *kbot) {
+ return 0;
+ }
+/* ... nor for an empty deflation window. ==== */
+ if (*nw < 1) {
+ return 0;
+ }
+
+/* ==== Machine constants ==== */
+
+ safmin = slamch_("SAFE MINIMUM");
+ safmax = 1.f / safmin;
+ slabad_(&safmin, &safmax);
+ ulp = slamch_("PRECISION");
+ smlnum = safmin * ((real) (*n) / ulp);
+
+/* ==== Setup deflation window ==== */
+
+/* Computing MIN */
+ i__1 = *nw, i__2 = *kbot - *ktop + 1;
+ jw = min(i__1,i__2);
+ kwtop = *kbot - jw + 1;
+ if (kwtop == *ktop) {
+ s.r = 0.f, s.i = 0.f;
+ } else {
+ i__1 = kwtop + (kwtop - 1) * h_dim1;
+ s.r = h__[i__1].r, s.i = h__[i__1].i;
+ }
+
+ if (*kbot == kwtop) {
+
+/* ==== 1-by-1 deflation window: not much to do ==== */
+
+ i__1 = kwtop;
+ i__2 = kwtop + kwtop * h_dim1;
+ sh[i__1].r = h__[i__2].r, sh[i__1].i = h__[i__2].i;
+ *ns = 1;
+ *nd = 0;
+/* Computing MAX */
+ i__1 = kwtop + kwtop * h_dim1;
+ r__5 = smlnum, r__6 = ulp * ((r__1 = h__[i__1].r, dabs(r__1)) + (r__2
+ = r_imag(&h__[kwtop + kwtop * h_dim1]), dabs(r__2)));
+ if ((r__3 = s.r, dabs(r__3)) + (r__4 = r_imag(&s), dabs(r__4)) <=
+ dmax(r__5,r__6)) {
+ *ns = 0;
+ *nd = 1;
+ if (kwtop > *ktop) {
+ i__1 = kwtop + (kwtop - 1) * h_dim1;
+ h__[i__1].r = 0.f, h__[i__1].i = 0.f;
+ }
+ }
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+/* ==== Convert to spike-triangular form. (In case of a */
+/* . rare QR failure, this routine continues to do */
+/* . aggressive early deflation using that part of */
+/* . the deflation window that converged using INFQR */
+/* . here and there to keep track.) ==== */
+
+ clacpy_("U", &jw, &jw, &h__[kwtop + kwtop * h_dim1], ldh, &t[t_offset],
+ ldt);
+ i__1 = jw - 1;
+ i__2 = *ldh + 1;
+ i__3 = *ldt + 1;
+ ccopy_(&i__1, &h__[kwtop + 1 + kwtop * h_dim1], &i__2, &t[t_dim1 + 2], &
+ i__3);
+
+ claset_("A", &jw, &jw, &c_b1, &c_b2, &v[v_offset], ldv);
+ clahqr_(&c_true, &c_true, &jw, &c__1, &jw, &t[t_offset], ldt, &sh[kwtop],
+ &c__1, &jw, &v[v_offset], ldv, &infqr);
+
+/* ==== Deflation detection loop ==== */
+
+ *ns = jw;
+ ilst = infqr + 1;
+ i__1 = jw;
+ for (knt = infqr + 1; knt <= i__1; ++knt) {
+
+/* ==== Small spike tip deflation test ==== */
+
+ i__2 = *ns + *ns * t_dim1;
+ foo = (r__1 = t[i__2].r, dabs(r__1)) + (r__2 = r_imag(&t[*ns + *ns *
+ t_dim1]), dabs(r__2));
+ if (foo == 0.f) {
+ foo = (r__1 = s.r, dabs(r__1)) + (r__2 = r_imag(&s), dabs(r__2));
+ }
+ i__2 = *ns * v_dim1 + 1;
+/* Computing MAX */
+ r__5 = smlnum, r__6 = ulp * foo;
+ if (((r__1 = s.r, dabs(r__1)) + (r__2 = r_imag(&s), dabs(r__2))) * ((
+ r__3 = v[i__2].r, dabs(r__3)) + (r__4 = r_imag(&v[*ns *
+ v_dim1 + 1]), dabs(r__4))) <= dmax(r__5,r__6)) {
+
+/* ==== One more converged eigenvalue ==== */
+
+ --(*ns);
+ } else {
+
+/* ==== One undeflatable eigenvalue. Move it up out of the */
+/* . way. (CTREXC can not fail in this case.) ==== */
+
+ ifst = *ns;
+ ctrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, &
+ ilst, &info);
+ ++ilst;
+ }
+/* L10: */
+ }
+
+/* ==== Return to Hessenberg form ==== */
+
+ if (*ns == 0) {
+ s.r = 0.f, s.i = 0.f;
+ }
+
+ if (*ns < jw) {
+
+/* ==== sorting the diagonal of T improves accuracy for */
+/* . graded matrices. ==== */
+
+ i__1 = *ns;
+ for (i__ = infqr + 1; i__ <= i__1; ++i__) {
+ ifst = i__;
+ i__2 = *ns;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ i__3 = j + j * t_dim1;
+ i__4 = ifst + ifst * t_dim1;
+ if ((r__1 = t[i__3].r, dabs(r__1)) + (r__2 = r_imag(&t[j + j *
+ t_dim1]), dabs(r__2)) > (r__3 = t[i__4].r, dabs(r__3)
+ ) + (r__4 = r_imag(&t[ifst + ifst * t_dim1]), dabs(
+ r__4))) {
+ ifst = j;
+ }
+/* L20: */
+ }
+ ilst = i__;
+ if (ifst != ilst) {
+ ctrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst,
+ &ilst, &info);
+ }
+/* L30: */
+ }
+ }
+
+/* ==== Restore shift/eigenvalue array from T ==== */
+
+ i__1 = jw;
+ for (i__ = infqr + 1; i__ <= i__1; ++i__) {
+ i__2 = kwtop + i__ - 1;
+ i__3 = i__ + i__ * t_dim1;
+ sh[i__2].r = t[i__3].r, sh[i__2].i = t[i__3].i;
+/* L40: */
+ }
+
+
+ if (*ns < jw || s.r == 0.f && s.i == 0.f) {
+ if (*ns > 1 && (s.r != 0.f || s.i != 0.f)) {
+
+/* ==== Reflect spike back into lower triangle ==== */
+
+ ccopy_(ns, &v[v_offset], ldv, &work[1], &c__1);
+ i__1 = *ns;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ r_cnjg(&q__1, &work[i__]);
+ work[i__2].r = q__1.r, work[i__2].i = q__1.i;
+/* L50: */
+ }
+ beta.r = work[1].r, beta.i = work[1].i;
+ clarfg_(ns, &beta, &work[2], &c__1, &tau);
+ work[1].r = 1.f, work[1].i = 0.f;
+
+ i__1 = jw - 2;
+ i__2 = jw - 2;
+ claset_("L", &i__1, &i__2, &c_b1, &c_b1, &t[t_dim1 + 3], ldt);
+
+ r_cnjg(&q__1, &tau);
+ clarf_("L", ns, &jw, &work[1], &c__1, &q__1, &t[t_offset], ldt, &
+ work[jw + 1]);
+ clarf_("R", ns, ns, &work[1], &c__1, &tau, &t[t_offset], ldt, &
+ work[jw + 1]);
+ clarf_("R", &jw, ns, &work[1], &c__1, &tau, &v[v_offset], ldv, &
+ work[jw + 1]);
+
+ i__1 = *lwork - jw;
+ cgehrd_(&jw, &c__1, ns, &t[t_offset], ldt, &work[1], &work[jw + 1]
+, &i__1, &info);
+ }
+
+/* ==== Copy updated reduced window into place ==== */
+
+ if (kwtop > 1) {
+ i__1 = kwtop + (kwtop - 1) * h_dim1;
+ r_cnjg(&q__2, &v[v_dim1 + 1]);
+ q__1.r = s.r * q__2.r - s.i * q__2.i, q__1.i = s.r * q__2.i + s.i
+ * q__2.r;
+ h__[i__1].r = q__1.r, h__[i__1].i = q__1.i;
+ }
+ clacpy_("U", &jw, &jw, &t[t_offset], ldt, &h__[kwtop + kwtop * h_dim1]
+, ldh);
+ i__1 = jw - 1;
+ i__2 = *ldt + 1;
+ i__3 = *ldh + 1;
+ ccopy_(&i__1, &t[t_dim1 + 2], &i__2, &h__[kwtop + 1 + kwtop * h_dim1],
+ &i__3);
+
+/* ==== Accumulate orthogonal matrix in order update */
+/* . H and Z, if requested. ==== */
+
+ if (*ns > 1 && (s.r != 0.f || s.i != 0.f)) {
+ i__1 = *lwork - jw;
+ cunmhr_("R", "N", &jw, ns, &c__1, ns, &t[t_offset], ldt, &work[1],
+ &v[v_offset], ldv, &work[jw + 1], &i__1, &info);
+ }
+
+/* ==== Update vertical slab in H ==== */
+
+ if (*wantt) {
+ ltop = 1;
+ } else {
+ ltop = *ktop;
+ }
+ i__1 = kwtop - 1;
+ i__2 = *nv;
+ for (krow = ltop; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *nv, i__4 = kwtop - krow;
+ kln = min(i__3,i__4);
+ cgemm_("N", "N", &kln, &jw, &jw, &c_b2, &h__[krow + kwtop *
+ h_dim1], ldh, &v[v_offset], ldv, &c_b1, &wv[wv_offset],
+ ldwv);
+ clacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &h__[krow + kwtop *
+ h_dim1], ldh);
+/* L60: */
+ }
+
+/* ==== Update horizontal slab in H ==== */
+
+ if (*wantt) {
+ i__2 = *n;
+ i__1 = *nh;
+ for (kcol = *kbot + 1; i__1 < 0 ? kcol >= i__2 : kcol <= i__2;
+ kcol += i__1) {
+/* Computing MIN */
+ i__3 = *nh, i__4 = *n - kcol + 1;
+ kln = min(i__3,i__4);
+ cgemm_("C", "N", &jw, &kln, &jw, &c_b2, &v[v_offset], ldv, &
+ h__[kwtop + kcol * h_dim1], ldh, &c_b1, &t[t_offset],
+ ldt);
+ clacpy_("A", &jw, &kln, &t[t_offset], ldt, &h__[kwtop + kcol *
+ h_dim1], ldh);
+/* L70: */
+ }
+ }
+
+/* ==== Update vertical slab in Z ==== */
+
+ if (*wantz) {
+ i__1 = *ihiz;
+ i__2 = *nv;
+ for (krow = *iloz; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *nv, i__4 = *ihiz - krow + 1;
+ kln = min(i__3,i__4);
+ cgemm_("N", "N", &kln, &jw, &jw, &c_b2, &z__[krow + kwtop *
+ z_dim1], ldz, &v[v_offset], ldv, &c_b1, &wv[wv_offset]
+, ldwv);
+ clacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &z__[krow +
+ kwtop * z_dim1], ldz);
+/* L80: */
+ }
+ }
+ }
+
+/* ==== Return the number of deflations ... ==== */
+
+ *nd = jw - *ns;
+
+/* ==== ... and the number of shifts. (Subtracting */
+/* . INFQR from the spike length takes care */
+/* . of the case of a rare QR failure while */
+/* . calculating eigenvalues of the deflation */
+/* . window.) ==== */
+
+ *ns -= infqr;
+
+/* ==== Return optimal workspace. ==== */
+
+ r__1 = (real) lwkopt;
+ q__1.r = r__1, q__1.i = 0.f;
+ work[1].r = q__1.r, work[1].i = q__1.i;
+
+/* ==== End of CLAQR2 ==== */
+
+ return 0;
+} /* claqr2_ */
diff --git a/contrib/libs/clapack/claqr3.c b/contrib/libs/clapack/claqr3.c
new file mode 100644
index 0000000000..0a3044a5cc
--- /dev/null
+++ b/contrib/libs/clapack/claqr3.c
@@ -0,0 +1,620 @@
+/* claqr3.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static logical c_true = TRUE_;
+static integer c__12 = 12;
+
+/* Subroutine */ int claqr3_(logical *wantt, logical *wantz, integer *n,
+ integer *ktop, integer *kbot, integer *nw, complex *h__, integer *ldh,
+ integer *iloz, integer *ihiz, complex *z__, integer *ldz, integer *
+ ns, integer *nd, complex *sh, complex *v, integer *ldv, integer *nh,
+ complex *t, integer *ldt, integer *nv, complex *wv, integer *ldwv,
+ complex *work, integer *lwork)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, t_dim1, t_offset, v_dim1, v_offset, wv_dim1,
+ wv_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4;
+ real r__1, r__2, r__3, r__4, r__5, r__6;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j;
+ complex s;
+ integer jw;
+ real foo;
+ integer kln;
+ complex tau;
+ integer knt;
+ real ulp;
+ integer lwk1, lwk2, lwk3;
+ complex beta;
+ integer kcol, info, nmin, ifst, ilst, ltop, krow;
+ extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
+, integer *, complex *, complex *, integer *, complex *),
+ cgemm_(char *, char *, integer *, integer *, integer *, complex *,
+ complex *, integer *, complex *, integer *, complex *, complex *,
+ integer *), ccopy_(integer *, complex *, integer
+ *, complex *, integer *);
+ integer infqr, kwtop;
+ extern /* Subroutine */ int claqr4_(logical *, logical *, integer *,
+ integer *, integer *, complex *, integer *, complex *, integer *,
+ integer *, complex *, integer *, complex *, integer *, integer *),
+ slabad_(real *, real *), cgehrd_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, integer *)
+ , clarfg_(integer *, complex *, complex *, integer *, complex *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int clahqr_(logical *, logical *, integer *,
+ integer *, integer *, complex *, integer *, complex *, integer *,
+ integer *, complex *, integer *, integer *), clacpy_(char *,
+ integer *, integer *, complex *, integer *, complex *, integer *), claset_(char *, integer *, integer *, complex *, complex
+ *, complex *, integer *);
+ real safmin;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ real safmax;
+ extern /* Subroutine */ int ctrexc_(char *, integer *, complex *, integer
+ *, complex *, integer *, integer *, integer *, integer *),
+ cunmhr_(char *, char *, integer *, integer *, integer *, integer
+ *, complex *, integer *, complex *, complex *, integer *, complex
+ *, integer *, integer *);
+ real smlnum;
+ integer lwkopt;
+
+
+/* -- LAPACK auxiliary routine (version 3.2.1) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* -- April 2009 -- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* ****************************************************************** */
+/* Aggressive early deflation: */
+
+/* This subroutine accepts as input an upper Hessenberg matrix */
+/* H and performs an unitary similarity transformation */
+/* designed to detect and deflate fully converged eigenvalues from */
+/* a trailing principal submatrix. On output H has been over- */
+/* written by a new Hessenberg matrix that is a perturbation of */
+/* an unitary similarity transformation of H. It is to be */
+/* hoped that the final version of H has many zero subdiagonal */
+/* entries. */
+
+/* ****************************************************************** */
+/* WANTT (input) LOGICAL */
+/* If .TRUE., then the Hessenberg matrix H is fully updated */
+/* so that the triangular Schur factor may be */
+/* computed (in cooperation with the calling subroutine). */
+/* If .FALSE., then only enough of H is updated to preserve */
+/* the eigenvalues. */
+
+/* WANTZ (input) LOGICAL */
+/* If .TRUE., then the unitary matrix Z is updated so */
+/* so that the unitary Schur factor may be computed */
+/* (in cooperation with the calling subroutine). */
+/* If .FALSE., then Z is not referenced. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H and (if WANTZ is .TRUE.) the */
+/* order of the unitary matrix Z. */
+
+/* KTOP (input) INTEGER */
+/* It is assumed that either KTOP = 1 or H(KTOP,KTOP-1)=0. */
+/* KBOT and KTOP together determine an isolated block */
+/* along the diagonal of the Hessenberg matrix. */
+
+/* KBOT (input) INTEGER */
+/* It is assumed without a check that either */
+/* KBOT = N or H(KBOT+1,KBOT)=0. KBOT and KTOP together */
+/* determine an isolated block along the diagonal of the */
+/* Hessenberg matrix. */
+
+/* NW (input) INTEGER */
+/* Deflation window size. 1 .LE. NW .LE. (KBOT-KTOP+1). */
+
+/* H (input/output) COMPLEX array, dimension (LDH,N) */
+/* On input the initial N-by-N section of H stores the */
+/* Hessenberg matrix undergoing aggressive early deflation. */
+/* On output H has been transformed by a unitary */
+/* similarity transformation, perturbed, and the returned */
+/* to Hessenberg form that (it is to be hoped) has some */
+/* zero subdiagonal entries. */
+
+/* LDH (input) integer */
+/* Leading dimension of H just as declared in the calling */
+/* subroutine. N .LE. LDH */
+
+/* ILOZ (input) INTEGER */
+/* IHIZ (input) INTEGER */
+/* Specify the rows of Z to which transformations must be */
+/* applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N. */
+
+/* Z (input/output) COMPLEX array, dimension (LDZ,N) */
+/* IF WANTZ is .TRUE., then on output, the unitary */
+/* similarity transformation mentioned above has been */
+/* accumulated into Z(ILOZ:IHIZ,ILO:IHI) from the right. */
+/* If WANTZ is .FALSE., then Z is unreferenced. */
+
+/* LDZ (input) integer */
+/* The leading dimension of Z just as declared in the */
+/* calling subroutine. 1 .LE. LDZ. */
+
+/* NS (output) integer */
+/* The number of unconverged (ie approximate) eigenvalues */
+/* returned in SR and SI that may be used as shifts by the */
+/* calling subroutine. */
+
+/* ND (output) integer */
+/* The number of converged eigenvalues uncovered by this */
+/* subroutine. */
+
+/* SH (output) COMPLEX array, dimension KBOT */
+/* On output, approximate eigenvalues that may */
+/* be used for shifts are stored in SH(KBOT-ND-NS+1) */
+/* through SR(KBOT-ND). Converged eigenvalues are */
+/* stored in SH(KBOT-ND+1) through SH(KBOT). */
+
+/* V (workspace) COMPLEX array, dimension (LDV,NW) */
+/* An NW-by-NW work array. */
+
+/* LDV (input) integer scalar */
+/* The leading dimension of V just as declared in the */
+/* calling subroutine. NW .LE. LDV */
+
+/* NH (input) integer scalar */
+/* The number of columns of T. NH.GE.NW. */
+
+/* T (workspace) COMPLEX array, dimension (LDT,NW) */
+
+/* LDT (input) integer */
+/* The leading dimension of T just as declared in the */
+/* calling subroutine. NW .LE. LDT */
+
+/* NV (input) integer */
+/* The number of rows of work array WV available for */
+/* workspace. NV.GE.NW. */
+
+/* WV (workspace) COMPLEX array, dimension (LDWV,NW) */
+
+/* LDWV (input) integer */
+/* The leading dimension of W just as declared in the */
+/* calling subroutine. NW .LE. LDV */
+
+/* WORK (workspace) COMPLEX array, dimension LWORK. */
+/* On exit, WORK(1) is set to an estimate of the optimal value */
+/* of LWORK for the given values of N, NW, KTOP and KBOT. */
+
+/* LWORK (input) integer */
+/* The dimension of the work array WORK. LWORK = 2*NW */
+/* suffices, but greater efficiency may result from larger */
+/* values of LWORK. */
+
+/* If LWORK = -1, then a workspace query is assumed; CLAQR3 */
+/* only estimates the optimal workspace size for the given */
+/* values of N, NW, KTOP and KBOT. The estimate is returned */
+/* in WORK(1). No error message related to LWORK is issued */
+/* by XERBLA. Neither H nor Z are accessed. */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* ==== Estimate optimal workspace. ==== */
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --sh;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ wv_dim1 = *ldwv;
+ wv_offset = 1 + wv_dim1;
+ wv -= wv_offset;
+ --work;
+
+ /* Function Body */
+/* Computing MIN */
+ i__1 = *nw, i__2 = *kbot - *ktop + 1;
+ jw = min(i__1,i__2);
+ if (jw <= 2) {
+ lwkopt = 1;
+ } else {
+
+/* ==== Workspace query call to CGEHRD ==== */
+
+ i__1 = jw - 1;
+ cgehrd_(&jw, &c__1, &i__1, &t[t_offset], ldt, &work[1], &work[1], &
+ c_n1, &info);
+ lwk1 = (integer) work[1].r;
+
+/* ==== Workspace query call to CUNMHR ==== */
+
+ i__1 = jw - 1;
+ cunmhr_("R", "N", &jw, &jw, &c__1, &i__1, &t[t_offset], ldt, &work[1],
+ &v[v_offset], ldv, &work[1], &c_n1, &info);
+ lwk2 = (integer) work[1].r;
+
+/* ==== Workspace query call to CLAQR4 ==== */
+
+ claqr4_(&c_true, &c_true, &jw, &c__1, &jw, &t[t_offset], ldt, &sh[1],
+ &c__1, &jw, &v[v_offset], ldv, &work[1], &c_n1, &infqr);
+ lwk3 = (integer) work[1].r;
+
+/* ==== Optimal workspace ==== */
+
+/* Computing MAX */
+ i__1 = jw + max(lwk1,lwk2);
+ lwkopt = max(i__1,lwk3);
+ }
+
+/* ==== Quick return in case of workspace query. ==== */
+
+ if (*lwork == -1) {
+ r__1 = (real) lwkopt;
+ q__1.r = r__1, q__1.i = 0.f;
+ work[1].r = q__1.r, work[1].i = q__1.i;
+ return 0;
+ }
+
+/* ==== Nothing to do ... */
+/* ... for an empty active block ... ==== */
+ *ns = 0;
+ *nd = 0;
+ work[1].r = 1.f, work[1].i = 0.f;
+ if (*ktop > *kbot) {
+ return 0;
+ }
+/* ... nor for an empty deflation window. ==== */
+ if (*nw < 1) {
+ return 0;
+ }
+
+/* ==== Machine constants ==== */
+
+ safmin = slamch_("SAFE MINIMUM");
+ safmax = 1.f / safmin;
+ slabad_(&safmin, &safmax);
+ ulp = slamch_("PRECISION");
+ smlnum = safmin * ((real) (*n) / ulp);
+
+/* ==== Setup deflation window ==== */
+
+/* Computing MIN */
+ i__1 = *nw, i__2 = *kbot - *ktop + 1;
+ jw = min(i__1,i__2);
+ kwtop = *kbot - jw + 1;
+ if (kwtop == *ktop) {
+ s.r = 0.f, s.i = 0.f;
+ } else {
+ i__1 = kwtop + (kwtop - 1) * h_dim1;
+ s.r = h__[i__1].r, s.i = h__[i__1].i;
+ }
+
+ if (*kbot == kwtop) {
+
+/* ==== 1-by-1 deflation window: not much to do ==== */
+
+ i__1 = kwtop;
+ i__2 = kwtop + kwtop * h_dim1;
+ sh[i__1].r = h__[i__2].r, sh[i__1].i = h__[i__2].i;
+ *ns = 1;
+ *nd = 0;
+/* Computing MAX */
+ i__1 = kwtop + kwtop * h_dim1;
+ r__5 = smlnum, r__6 = ulp * ((r__1 = h__[i__1].r, dabs(r__1)) + (r__2
+ = r_imag(&h__[kwtop + kwtop * h_dim1]), dabs(r__2)));
+ if ((r__3 = s.r, dabs(r__3)) + (r__4 = r_imag(&s), dabs(r__4)) <=
+ dmax(r__5,r__6)) {
+ *ns = 0;
+ *nd = 1;
+ if (kwtop > *ktop) {
+ i__1 = kwtop + (kwtop - 1) * h_dim1;
+ h__[i__1].r = 0.f, h__[i__1].i = 0.f;
+ }
+ }
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+/* ==== Convert to spike-triangular form. (In case of a */
+/* . rare QR failure, this routine continues to do */
+/* . aggressive early deflation using that part of */
+/* . the deflation window that converged using INFQR */
+/* . here and there to keep track.) ==== */
+
+ clacpy_("U", &jw, &jw, &h__[kwtop + kwtop * h_dim1], ldh, &t[t_offset],
+ ldt);
+ i__1 = jw - 1;
+ i__2 = *ldh + 1;
+ i__3 = *ldt + 1;
+ ccopy_(&i__1, &h__[kwtop + 1 + kwtop * h_dim1], &i__2, &t[t_dim1 + 2], &
+ i__3);
+
+ claset_("A", &jw, &jw, &c_b1, &c_b2, &v[v_offset], ldv);
+ nmin = ilaenv_(&c__12, "CLAQR3", "SV", &jw, &c__1, &jw, lwork);
+ if (jw > nmin) {
+ claqr4_(&c_true, &c_true, &jw, &c__1, &jw, &t[t_offset], ldt, &sh[
+ kwtop], &c__1, &jw, &v[v_offset], ldv, &work[1], lwork, &
+ infqr);
+ } else {
+ clahqr_(&c_true, &c_true, &jw, &c__1, &jw, &t[t_offset], ldt, &sh[
+ kwtop], &c__1, &jw, &v[v_offset], ldv, &infqr);
+ }
+
+/* ==== Deflation detection loop ==== */
+
+ *ns = jw;
+ ilst = infqr + 1;
+ i__1 = jw;
+ for (knt = infqr + 1; knt <= i__1; ++knt) {
+
+/* ==== Small spike tip deflation test ==== */
+
+ i__2 = *ns + *ns * t_dim1;
+ foo = (r__1 = t[i__2].r, dabs(r__1)) + (r__2 = r_imag(&t[*ns + *ns *
+ t_dim1]), dabs(r__2));
+ if (foo == 0.f) {
+ foo = (r__1 = s.r, dabs(r__1)) + (r__2 = r_imag(&s), dabs(r__2));
+ }
+ i__2 = *ns * v_dim1 + 1;
+/* Computing MAX */
+ r__5 = smlnum, r__6 = ulp * foo;
+ if (((r__1 = s.r, dabs(r__1)) + (r__2 = r_imag(&s), dabs(r__2))) * ((
+ r__3 = v[i__2].r, dabs(r__3)) + (r__4 = r_imag(&v[*ns *
+ v_dim1 + 1]), dabs(r__4))) <= dmax(r__5,r__6)) {
+
+/* ==== One more converged eigenvalue ==== */
+
+ --(*ns);
+ } else {
+
+/* ==== One undeflatable eigenvalue. Move it up out of the */
+/* . way. (CTREXC can not fail in this case.) ==== */
+
+ ifst = *ns;
+ ctrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, &
+ ilst, &info);
+ ++ilst;
+ }
+/* L10: */
+ }
+
+/* ==== Return to Hessenberg form ==== */
+
+ if (*ns == 0) {
+ s.r = 0.f, s.i = 0.f;
+ }
+
+ if (*ns < jw) {
+
+/* ==== sorting the diagonal of T improves accuracy for */
+/* . graded matrices. ==== */
+
+ i__1 = *ns;
+ for (i__ = infqr + 1; i__ <= i__1; ++i__) {
+ ifst = i__;
+ i__2 = *ns;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ i__3 = j + j * t_dim1;
+ i__4 = ifst + ifst * t_dim1;
+ if ((r__1 = t[i__3].r, dabs(r__1)) + (r__2 = r_imag(&t[j + j *
+ t_dim1]), dabs(r__2)) > (r__3 = t[i__4].r, dabs(r__3)
+ ) + (r__4 = r_imag(&t[ifst + ifst * t_dim1]), dabs(
+ r__4))) {
+ ifst = j;
+ }
+/* L20: */
+ }
+ ilst = i__;
+ if (ifst != ilst) {
+ ctrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst,
+ &ilst, &info);
+ }
+/* L30: */
+ }
+ }
+
+/* ==== Restore shift/eigenvalue array from T ==== */
+
+ i__1 = jw;
+ for (i__ = infqr + 1; i__ <= i__1; ++i__) {
+ i__2 = kwtop + i__ - 1;
+ i__3 = i__ + i__ * t_dim1;
+ sh[i__2].r = t[i__3].r, sh[i__2].i = t[i__3].i;
+/* L40: */
+ }
+
+
+ if (*ns < jw || s.r == 0.f && s.i == 0.f) {
+ if (*ns > 1 && (s.r != 0.f || s.i != 0.f)) {
+
+/* ==== Reflect spike back into lower triangle ==== */
+
+ ccopy_(ns, &v[v_offset], ldv, &work[1], &c__1);
+ i__1 = *ns;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ r_cnjg(&q__1, &work[i__]);
+ work[i__2].r = q__1.r, work[i__2].i = q__1.i;
+/* L50: */
+ }
+ beta.r = work[1].r, beta.i = work[1].i;
+ clarfg_(ns, &beta, &work[2], &c__1, &tau);
+ work[1].r = 1.f, work[1].i = 0.f;
+
+ i__1 = jw - 2;
+ i__2 = jw - 2;
+ claset_("L", &i__1, &i__2, &c_b1, &c_b1, &t[t_dim1 + 3], ldt);
+
+ r_cnjg(&q__1, &tau);
+ clarf_("L", ns, &jw, &work[1], &c__1, &q__1, &t[t_offset], ldt, &
+ work[jw + 1]);
+ clarf_("R", ns, ns, &work[1], &c__1, &tau, &t[t_offset], ldt, &
+ work[jw + 1]);
+ clarf_("R", &jw, ns, &work[1], &c__1, &tau, &v[v_offset], ldv, &
+ work[jw + 1]);
+
+ i__1 = *lwork - jw;
+ cgehrd_(&jw, &c__1, ns, &t[t_offset], ldt, &work[1], &work[jw + 1]
+, &i__1, &info);
+ }
+
+/* ==== Copy updated reduced window into place ==== */
+
+ if (kwtop > 1) {
+ i__1 = kwtop + (kwtop - 1) * h_dim1;
+ r_cnjg(&q__2, &v[v_dim1 + 1]);
+ q__1.r = s.r * q__2.r - s.i * q__2.i, q__1.i = s.r * q__2.i + s.i
+ * q__2.r;
+ h__[i__1].r = q__1.r, h__[i__1].i = q__1.i;
+ }
+ clacpy_("U", &jw, &jw, &t[t_offset], ldt, &h__[kwtop + kwtop * h_dim1]
+, ldh);
+ i__1 = jw - 1;
+ i__2 = *ldt + 1;
+ i__3 = *ldh + 1;
+ ccopy_(&i__1, &t[t_dim1 + 2], &i__2, &h__[kwtop + 1 + kwtop * h_dim1],
+ &i__3);
+
+/* ==== Accumulate orthogonal matrix in order update */
+/* . H and Z, if requested. ==== */
+
+ if (*ns > 1 && (s.r != 0.f || s.i != 0.f)) {
+ i__1 = *lwork - jw;
+ cunmhr_("R", "N", &jw, ns, &c__1, ns, &t[t_offset], ldt, &work[1],
+ &v[v_offset], ldv, &work[jw + 1], &i__1, &info);
+ }
+
+/* ==== Update vertical slab in H ==== */
+
+ if (*wantt) {
+ ltop = 1;
+ } else {
+ ltop = *ktop;
+ }
+ i__1 = kwtop - 1;
+ i__2 = *nv;
+ for (krow = ltop; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *nv, i__4 = kwtop - krow;
+ kln = min(i__3,i__4);
+ cgemm_("N", "N", &kln, &jw, &jw, &c_b2, &h__[krow + kwtop *
+ h_dim1], ldh, &v[v_offset], ldv, &c_b1, &wv[wv_offset],
+ ldwv);
+ clacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &h__[krow + kwtop *
+ h_dim1], ldh);
+/* L60: */
+ }
+
+/* ==== Update horizontal slab in H ==== */
+
+ if (*wantt) {
+ i__2 = *n;
+ i__1 = *nh;
+ for (kcol = *kbot + 1; i__1 < 0 ? kcol >= i__2 : kcol <= i__2;
+ kcol += i__1) {
+/* Computing MIN */
+ i__3 = *nh, i__4 = *n - kcol + 1;
+ kln = min(i__3,i__4);
+ cgemm_("C", "N", &jw, &kln, &jw, &c_b2, &v[v_offset], ldv, &
+ h__[kwtop + kcol * h_dim1], ldh, &c_b1, &t[t_offset],
+ ldt);
+ clacpy_("A", &jw, &kln, &t[t_offset], ldt, &h__[kwtop + kcol *
+ h_dim1], ldh);
+/* L70: */
+ }
+ }
+
+/* ==== Update vertical slab in Z ==== */
+
+ if (*wantz) {
+ i__1 = *ihiz;
+ i__2 = *nv;
+ for (krow = *iloz; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *nv, i__4 = *ihiz - krow + 1;
+ kln = min(i__3,i__4);
+ cgemm_("N", "N", &kln, &jw, &jw, &c_b2, &z__[krow + kwtop *
+ z_dim1], ldz, &v[v_offset], ldv, &c_b1, &wv[wv_offset]
+, ldwv);
+ clacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &z__[krow +
+ kwtop * z_dim1], ldz);
+/* L80: */
+ }
+ }
+ }
+
+/* ==== Return the number of deflations ... ==== */
+
+ *nd = jw - *ns;
+
+/* ==== ... and the number of shifts. (Subtracting */
+/* . INFQR from the spike length takes care */
+/* . of the case of a rare QR failure while */
+/* . calculating eigenvalues of the deflation */
+/* . window.) ==== */
+
+ *ns -= infqr;
+
+/* ==== Return optimal workspace. ==== */
+
+ r__1 = (real) lwkopt;
+ q__1.r = r__1, q__1.i = 0.f;
+ work[1].r = q__1.r, work[1].i = q__1.i;
+
+/* ==== End of CLAQR3 ==== */
+
+ return 0;
+} /* claqr3_ */
diff --git a/contrib/libs/clapack/claqr4.c b/contrib/libs/clapack/claqr4.c
new file mode 100644
index 0000000000..10ff1595e5
--- /dev/null
+++ b/contrib/libs/clapack/claqr4.c
@@ -0,0 +1,782 @@
+/* claqr4.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__13 = 13;
+static integer c__15 = 15;
+static integer c_n1 = -1;
+static integer c__12 = 12;
+static integer c__14 = 14;
+static integer c__16 = 16;
+static logical c_false = FALSE_;
+static integer c__1 = 1;
+static integer c__3 = 3;
+
+/* Subroutine */ int claqr4_(logical *wantt, logical *wantz, integer *n,
+ integer *ilo, integer *ihi, complex *h__, integer *ldh, complex *w,
+ integer *iloz, integer *ihiz, complex *z__, integer *ldz, complex *
+ work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4, r__5, r__6, r__7, r__8;
+ complex q__1, q__2, q__3, q__4, q__5;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+ void c_sqrt(complex *, complex *);
+
+ /* Local variables */
+ integer i__, k;
+ real s;
+ complex aa, bb, cc, dd;
+ integer ld, nh, it, ks, kt, ku, kv, ls, ns, nw;
+ complex tr2, det;
+ integer inf, kdu, nho, nve, kwh, nsr, nwr, kwv, ndec, ndfl, kbot, nmin;
+ complex swap;
+ integer ktop;
+ complex zdum[1] /* was [1][1] */;
+ integer kacc22, itmax, nsmax, nwmax, kwtop;
+ extern /* Subroutine */ int claqr2_(logical *, logical *, integer *,
+ integer *, integer *, integer *, complex *, integer *, integer *,
+ integer *, complex *, integer *, integer *, integer *, complex *,
+ complex *, integer *, integer *, complex *, integer *, integer *,
+ complex *, integer *, complex *, integer *), claqr5_(logical *,
+ logical *, integer *, integer *, integer *, integer *, integer *,
+ complex *, complex *, integer *, integer *, integer *, complex *,
+ integer *, complex *, integer *, complex *, integer *, integer *,
+ complex *, integer *, integer *, complex *, integer *);
+ integer nibble;
+ extern /* Subroutine */ int clahqr_(logical *, logical *, integer *,
+ integer *, integer *, complex *, integer *, complex *, integer *,
+ integer *, complex *, integer *, integer *), clacpy_(char *,
+ integer *, integer *, complex *, integer *, complex *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ char jbcmpz[1];
+ complex rtdisc;
+ integer nwupbd;
+ logical sorted;
+ integer lwkopt;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* This subroutine implements one level of recursion for CLAQR0. */
+/* It is a complete implementation of the small bulge multi-shift */
+/* QR algorithm. It may be called by CLAQR0 and, for large enough */
+/* deflation window size, it may be called by CLAQR3. This */
+/* subroutine is identical to CLAQR0 except that it calls CLAQR2 */
+/* instead of CLAQR3. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAQR4 computes the eigenvalues of a Hessenberg matrix H */
+/* and, optionally, the matrices T and Z from the Schur decomposition */
+/* H = Z T Z**H, where T is an upper triangular matrix (the */
+/* Schur form), and Z is the unitary matrix of Schur vectors. */
+
+/* Optionally Z may be postmultiplied into an input unitary */
+/* matrix Q so that this routine can give the Schur factorization */
+/* of a matrix A which has been reduced to the Hessenberg form H */
+/* by the unitary matrix Q: A = Q*H*Q**H = (QZ)*H*(QZ)**H. */
+
+/* Arguments */
+/* ========= */
+
+/* WANTT (input) LOGICAL */
+/* = .TRUE. : the full Schur form T is required; */
+/* = .FALSE.: only eigenvalues are required. */
+
+/* WANTZ (input) LOGICAL */
+/* = .TRUE. : the matrix of Schur vectors Z is required; */
+/* = .FALSE.: Schur vectors are not required. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H. N .GE. 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* It is assumed that H is already upper triangular in rows */
+/* and columns 1:ILO-1 and IHI+1:N and, if ILO.GT.1, */
+/* H(ILO,ILO-1) is zero. ILO and IHI are normally set by a */
+/* previous call to CGEBAL, and then passed to CGEHRD when the */
+/* matrix output by CGEBAL is reduced to Hessenberg form. */
+/* Otherwise, ILO and IHI should be set to 1 and N, */
+/* respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. */
+/* If N = 0, then ILO = 1 and IHI = 0. */
+
+/* H (input/output) COMPLEX array, dimension (LDH,N) */
+/* On entry, the upper Hessenberg matrix H. */
+/* On exit, if INFO = 0 and WANTT is .TRUE., then H */
+/* contains the upper triangular matrix T from the Schur */
+/* decomposition (the Schur form). If INFO = 0 and WANT is */
+/* .FALSE., then the contents of H are unspecified on exit. */
+/* (The output value of H when INFO.GT.0 is given under the */
+/* description of INFO below.) */
+
+/* This subroutine may explicitly set H(i,j) = 0 for i.GT.j and */
+/* j = 1, 2, ... ILO-1 or j = IHI+1, IHI+2, ... N. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH .GE. max(1,N). */
+
+/* W (output) COMPLEX array, dimension (N) */
+/* The computed eigenvalues of H(ILO:IHI,ILO:IHI) are stored */
+/* in W(ILO:IHI). If WANTT is .TRUE., then the eigenvalues are */
+/* stored in the same order as on the diagonal of the Schur */
+/* form returned in H, with W(i) = H(i,i). */
+
+/* Z (input/output) COMPLEX array, dimension (LDZ,IHI) */
+/* If WANTZ is .FALSE., then Z is not referenced. */
+/* If WANTZ is .TRUE., then Z(ILO:IHI,ILOZ:IHIZ) is */
+/* replaced by Z(ILO:IHI,ILOZ:IHIZ)*U where U is the */
+/* orthogonal Schur factor of H(ILO:IHI,ILO:IHI). */
+/* (The output value of Z when INFO.GT.0 is given under */
+/* the description of INFO below.) */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. if WANTZ is .TRUE. */
+/* then LDZ.GE.MAX(1,IHIZ). Otherwize, LDZ.GE.1. */
+
+/* WORK (workspace/output) COMPLEX array, dimension LWORK */
+/* On exit, if LWORK = -1, WORK(1) returns an estimate of */
+/* the optimal value for LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK .GE. max(1,N) */
+/* is sufficient, but LWORK typically as large as 6*N may */
+/* be required for optimal performance. A workspace query */
+/* to determine the optimal workspace size is recommended. */
+
+/* If LWORK = -1, then CLAQR4 does a workspace query. */
+/* In this case, CLAQR4 checks the input parameters and */
+/* estimates the optimal workspace size for the given */
+/* values of N, ILO and IHI. The estimate is returned */
+/* in WORK(1). No error message related to LWORK is */
+/* issued by XERBLA. Neither H nor Z are accessed. */
+
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* .GT. 0: if INFO = i, CLAQR4 failed to compute all of */
+/* the eigenvalues. Elements 1:ilo-1 and i+1:n of WR */
+/* and WI contain those eigenvalues which have been */
+/* successfully computed. (Failures are rare.) */
+
+/* If INFO .GT. 0 and WANT is .FALSE., then on exit, */
+/* the remaining unconverged eigenvalues are the eigen- */
+/* values of the upper Hessenberg matrix rows and */
+/* columns ILO through INFO of the final, output */
+/* value of H. */
+
+/* If INFO .GT. 0 and WANTT is .TRUE., then on exit */
+
+/* (*) (initial value of H)*U = U*(final value of H) */
+
+/* where U is a unitary matrix. The final */
+/* value of H is upper Hessenberg and triangular in */
+/* rows and columns INFO+1 through IHI. */
+
+/* If INFO .GT. 0 and WANTZ is .TRUE., then on exit */
+
+/* (final value of Z(ILO:IHI,ILOZ:IHIZ) */
+/* = (initial value of Z(ILO:IHI,ILOZ:IHIZ)*U */
+
+/* where U is the unitary matrix in (*) (regard- */
+/* less of the value of WANTT.) */
+
+/* If INFO .GT. 0 and WANTZ is .FALSE., then Z is not */
+/* accessed. */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+/* References: */
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part I: Maintaining Well Focused Shifts, and Level 3 */
+/* Performance, SIAM Journal of Matrix Analysis, volume 23, pages */
+/* 929--947, 2002. */
+
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part II: Aggressive Early Deflation, SIAM Journal */
+/* of Matrix Analysis, volume 23, pages 948--973, 2002. */
+
+/* ================================================================ */
+/* .. Parameters .. */
+
+/* ==== Matrices of order NTINY or smaller must be processed by */
+/* . CLAHQR because of insufficient subdiagonal scratch space. */
+/* . (This is a hard limit.) ==== */
+
+/* ==== Exceptional deflation windows: try to cure rare */
+/* . slow convergence by varying the size of the */
+/* . deflation window after KEXNW iterations. ==== */
+
+/* ==== Exceptional shifts: try to cure rare slow convergence */
+/* . with ad-hoc exceptional shifts every KEXSH iterations. */
+/* . ==== */
+
+/* ==== The constant WILK1 is used to form the exceptional */
+/* . shifts. ==== */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+/* ==== Quick return for N = 0: nothing to do. ==== */
+
+ if (*n == 0) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ if (*n <= 11) {
+
+/* ==== Tiny matrices must use CLAHQR. ==== */
+
+ lwkopt = 1;
+ if (*lwork != -1) {
+ clahqr_(wantt, wantz, n, ilo, ihi, &h__[h_offset], ldh, &w[1],
+ iloz, ihiz, &z__[z_offset], ldz, info);
+ }
+ } else {
+
+/* ==== Use small bulge multi-shift QR with aggressive early */
+/* . deflation on larger-than-tiny matrices. ==== */
+
+/* ==== Hope for the best. ==== */
+
+ *info = 0;
+
+/* ==== Set up job flags for ILAENV. ==== */
+
+ if (*wantt) {
+ *(unsigned char *)jbcmpz = 'S';
+ } else {
+ *(unsigned char *)jbcmpz = 'E';
+ }
+ if (*wantz) {
+ *(unsigned char *)&jbcmpz[1] = 'V';
+ } else {
+ *(unsigned char *)&jbcmpz[1] = 'N';
+ }
+
+/* ==== NWR = recommended deflation window size. At this */
+/* . point, N .GT. NTINY = 11, so there is enough */
+/* . subdiagonal workspace for NWR.GE.2 as required. */
+/* . (In fact, there is enough subdiagonal space for */
+/* . NWR.GE.3.) ==== */
+
+ nwr = ilaenv_(&c__13, "CLAQR4", jbcmpz, n, ilo, ihi, lwork);
+ nwr = max(2,nwr);
+/* Computing MIN */
+ i__1 = *ihi - *ilo + 1, i__2 = (*n - 1) / 3, i__1 = min(i__1,i__2);
+ nwr = min(i__1,nwr);
+
+/* ==== NSR = recommended number of simultaneous shifts. */
+/* . At this point N .GT. NTINY = 11, so there is at */
+/* . enough subdiagonal workspace for NSR to be even */
+/* . and greater than or equal to two as required. ==== */
+
+ nsr = ilaenv_(&c__15, "CLAQR4", jbcmpz, n, ilo, ihi, lwork);
+/* Computing MIN */
+ i__1 = nsr, i__2 = (*n + 6) / 9, i__1 = min(i__1,i__2), i__2 = *ihi -
+ *ilo;
+ nsr = min(i__1,i__2);
+/* Computing MAX */
+ i__1 = 2, i__2 = nsr - nsr % 2;
+ nsr = max(i__1,i__2);
+
+/* ==== Estimate optimal workspace ==== */
+
+/* ==== Workspace query call to CLAQR2 ==== */
+
+ i__1 = nwr + 1;
+ claqr2_(wantt, wantz, n, ilo, ihi, &i__1, &h__[h_offset], ldh, iloz,
+ ihiz, &z__[z_offset], ldz, &ls, &ld, &w[1], &h__[h_offset],
+ ldh, n, &h__[h_offset], ldh, n, &h__[h_offset], ldh, &work[1],
+ &c_n1);
+
+/* ==== Optimal workspace = MAX(CLAQR5, CLAQR2) ==== */
+
+/* Computing MAX */
+ i__1 = nsr * 3 / 2, i__2 = (integer) work[1].r;
+ lwkopt = max(i__1,i__2);
+
+/* ==== Quick return in case of workspace query. ==== */
+
+ if (*lwork == -1) {
+ r__1 = (real) lwkopt;
+ q__1.r = r__1, q__1.i = 0.f;
+ work[1].r = q__1.r, work[1].i = q__1.i;
+ return 0;
+ }
+
+/* ==== CLAHQR/CLAQR0 crossover point ==== */
+
+ nmin = ilaenv_(&c__12, "CLAQR4", jbcmpz, n, ilo, ihi, lwork);
+ nmin = max(11,nmin);
+
+/* ==== Nibble crossover point ==== */
+
+ nibble = ilaenv_(&c__14, "CLAQR4", jbcmpz, n, ilo, ihi, lwork);
+ nibble = max(0,nibble);
+
+/* ==== Accumulate reflections during ttswp? Use block */
+/* . 2-by-2 structure during matrix-matrix multiply? ==== */
+
+ kacc22 = ilaenv_(&c__16, "CLAQR4", jbcmpz, n, ilo, ihi, lwork);
+ kacc22 = max(0,kacc22);
+ kacc22 = min(2,kacc22);
+
+/* ==== NWMAX = the largest possible deflation window for */
+/* . which there is sufficient workspace. ==== */
+
+/* Computing MIN */
+ i__1 = (*n - 1) / 3, i__2 = *lwork / 2;
+ nwmax = min(i__1,i__2);
+ nw = nwmax;
+
+/* ==== NSMAX = the Largest number of simultaneous shifts */
+/* . for which there is sufficient workspace. ==== */
+
+/* Computing MIN */
+ i__1 = (*n + 6) / 9, i__2 = (*lwork << 1) / 3;
+ nsmax = min(i__1,i__2);
+ nsmax -= nsmax % 2;
+
+/* ==== NDFL: an iteration count restarted at deflation. ==== */
+
+ ndfl = 1;
+
+/* ==== ITMAX = iteration limit ==== */
+
+/* Computing MAX */
+ i__1 = 10, i__2 = *ihi - *ilo + 1;
+ itmax = max(i__1,i__2) * 30;
+
+/* ==== Last row and column in the active block ==== */
+
+ kbot = *ihi;
+
+/* ==== Main Loop ==== */
+
+ i__1 = itmax;
+ for (it = 1; it <= i__1; ++it) {
+
+/* ==== Done when KBOT falls below ILO ==== */
+
+ if (kbot < *ilo) {
+ goto L80;
+ }
+
+/* ==== Locate active block ==== */
+
+ i__2 = *ilo + 1;
+ for (k = kbot; k >= i__2; --k) {
+ i__3 = k + (k - 1) * h_dim1;
+ if (h__[i__3].r == 0.f && h__[i__3].i == 0.f) {
+ goto L20;
+ }
+/* L10: */
+ }
+ k = *ilo;
+L20:
+ ktop = k;
+
+/* ==== Select deflation window size: */
+/* . Typical Case: */
+/* . If possible and advisable, nibble the entire */
+/* . active block. If not, use size MIN(NWR,NWMAX) */
+/* . or MIN(NWR+1,NWMAX) depending upon which has */
+/* . the smaller corresponding subdiagonal entry */
+/* . (a heuristic). */
+/* . */
+/* . Exceptional Case: */
+/* . If there have been no deflations in KEXNW or */
+/* . more iterations, then vary the deflation window */
+/* . size. At first, because, larger windows are, */
+/* . in general, more powerful than smaller ones, */
+/* . rapidly increase the window to the maximum possible. */
+/* . Then, gradually reduce the window size. ==== */
+
+ nh = kbot - ktop + 1;
+ nwupbd = min(nh,nwmax);
+ if (ndfl < 5) {
+ nw = min(nwupbd,nwr);
+ } else {
+/* Computing MIN */
+ i__2 = nwupbd, i__3 = nw << 1;
+ nw = min(i__2,i__3);
+ }
+ if (nw < nwmax) {
+ if (nw >= nh - 1) {
+ nw = nh;
+ } else {
+ kwtop = kbot - nw + 1;
+ i__2 = kwtop + (kwtop - 1) * h_dim1;
+ i__3 = kwtop - 1 + (kwtop - 2) * h_dim1;
+ if ((r__1 = h__[i__2].r, dabs(r__1)) + (r__2 = r_imag(&
+ h__[kwtop + (kwtop - 1) * h_dim1]), dabs(r__2)) >
+ (r__3 = h__[i__3].r, dabs(r__3)) + (r__4 = r_imag(
+ &h__[kwtop - 1 + (kwtop - 2) * h_dim1]), dabs(
+ r__4))) {
+ ++nw;
+ }
+ }
+ }
+ if (ndfl < 5) {
+ ndec = -1;
+ } else if (ndec >= 0 || nw >= nwupbd) {
+ ++ndec;
+ if (nw - ndec < 2) {
+ ndec = 0;
+ }
+ nw -= ndec;
+ }
+
+/* ==== Aggressive early deflation: */
+/* . split workspace under the subdiagonal into */
+/* . - an nw-by-nw work array V in the lower */
+/* . left-hand-corner, */
+/* . - an NW-by-at-least-NW-but-more-is-better */
+/* . (NW-by-NHO) horizontal work array along */
+/* . the bottom edge, */
+/* . - an at-least-NW-but-more-is-better (NHV-by-NW) */
+/* . vertical work array along the left-hand-edge. */
+/* . ==== */
+
+ kv = *n - nw + 1;
+ kt = nw + 1;
+ nho = *n - nw - 1 - kt + 1;
+ kwv = nw + 2;
+ nve = *n - nw - kwv + 1;
+
+/* ==== Aggressive early deflation ==== */
+
+ claqr2_(wantt, wantz, n, &ktop, &kbot, &nw, &h__[h_offset], ldh,
+ iloz, ihiz, &z__[z_offset], ldz, &ls, &ld, &w[1], &h__[kv
+ + h_dim1], ldh, &nho, &h__[kv + kt * h_dim1], ldh, &nve, &
+ h__[kwv + h_dim1], ldh, &work[1], lwork);
+
+/* ==== Adjust KBOT accounting for new deflations. ==== */
+
+ kbot -= ld;
+
+/* ==== KS points to the shifts. ==== */
+
+ ks = kbot - ls + 1;
+
+/* ==== Skip an expensive QR sweep if there is a (partly */
+/* . heuristic) reason to expect that many eigenvalues */
+/* . will deflate without it. Here, the QR sweep is */
+/* . skipped if many eigenvalues have just been deflated */
+/* . or if the remaining active block is small. */
+
+ if (ld == 0 || ld * 100 <= nw * nibble && kbot - ktop + 1 > min(
+ nmin,nwmax)) {
+
+/* ==== NS = nominal number of simultaneous shifts. */
+/* . This may be lowered (slightly) if CLAQR2 */
+/* . did not provide that many shifts. ==== */
+
+/* Computing MIN */
+/* Computing MAX */
+ i__4 = 2, i__5 = kbot - ktop;
+ i__2 = min(nsmax,nsr), i__3 = max(i__4,i__5);
+ ns = min(i__2,i__3);
+ ns -= ns % 2;
+
+/* ==== If there have been no deflations */
+/* . in a multiple of KEXSH iterations, */
+/* . then try exceptional shifts. */
+/* . Otherwise use shifts provided by */
+/* . CLAQR2 above or from the eigenvalues */
+/* . of a trailing principal submatrix. ==== */
+
+ if (ndfl % 6 == 0) {
+ ks = kbot - ns + 1;
+ i__2 = ks + 1;
+ for (i__ = kbot; i__ >= i__2; i__ += -2) {
+ i__3 = i__;
+ i__4 = i__ + i__ * h_dim1;
+ i__5 = i__ + (i__ - 1) * h_dim1;
+ r__3 = ((r__1 = h__[i__5].r, dabs(r__1)) + (r__2 =
+ r_imag(&h__[i__ + (i__ - 1) * h_dim1]), dabs(
+ r__2))) * .75f;
+ q__1.r = h__[i__4].r + r__3, q__1.i = h__[i__4].i;
+ w[i__3].r = q__1.r, w[i__3].i = q__1.i;
+ i__3 = i__ - 1;
+ i__4 = i__;
+ w[i__3].r = w[i__4].r, w[i__3].i = w[i__4].i;
+/* L30: */
+ }
+ } else {
+
+/* ==== Got NS/2 or fewer shifts? Use CLAHQR */
+/* . on a trailing principal submatrix to */
+/* . get more. (Since NS.LE.NSMAX.LE.(N+6)/9, */
+/* . there is enough space below the subdiagonal */
+/* . to fit an NS-by-NS scratch array.) ==== */
+
+ if (kbot - ks + 1 <= ns / 2) {
+ ks = kbot - ns + 1;
+ kt = *n - ns + 1;
+ clacpy_("A", &ns, &ns, &h__[ks + ks * h_dim1], ldh, &
+ h__[kt + h_dim1], ldh);
+ clahqr_(&c_false, &c_false, &ns, &c__1, &ns, &h__[kt
+ + h_dim1], ldh, &w[ks], &c__1, &c__1, zdum, &
+ c__1, &inf);
+ ks += inf;
+
+/* ==== In case of a rare QR failure use */
+/* . eigenvalues of the trailing 2-by-2 */
+/* . principal submatrix. Scale to avoid */
+/* . overflows, underflows and subnormals. */
+/* . (The scale factor S can not be zero, */
+/* . because H(KBOT,KBOT-1) is nonzero.) ==== */
+
+ if (ks >= kbot) {
+ i__2 = kbot - 1 + (kbot - 1) * h_dim1;
+ i__3 = kbot + (kbot - 1) * h_dim1;
+ i__4 = kbot - 1 + kbot * h_dim1;
+ i__5 = kbot + kbot * h_dim1;
+ s = (r__1 = h__[i__2].r, dabs(r__1)) + (r__2 =
+ r_imag(&h__[kbot - 1 + (kbot - 1) *
+ h_dim1]), dabs(r__2)) + ((r__3 = h__[i__3]
+ .r, dabs(r__3)) + (r__4 = r_imag(&h__[
+ kbot + (kbot - 1) * h_dim1]), dabs(r__4)))
+ + ((r__5 = h__[i__4].r, dabs(r__5)) + (
+ r__6 = r_imag(&h__[kbot - 1 + kbot *
+ h_dim1]), dabs(r__6))) + ((r__7 = h__[
+ i__5].r, dabs(r__7)) + (r__8 = r_imag(&
+ h__[kbot + kbot * h_dim1]), dabs(r__8)));
+ i__2 = kbot - 1 + (kbot - 1) * h_dim1;
+ q__1.r = h__[i__2].r / s, q__1.i = h__[i__2].i /
+ s;
+ aa.r = q__1.r, aa.i = q__1.i;
+ i__2 = kbot + (kbot - 1) * h_dim1;
+ q__1.r = h__[i__2].r / s, q__1.i = h__[i__2].i /
+ s;
+ cc.r = q__1.r, cc.i = q__1.i;
+ i__2 = kbot - 1 + kbot * h_dim1;
+ q__1.r = h__[i__2].r / s, q__1.i = h__[i__2].i /
+ s;
+ bb.r = q__1.r, bb.i = q__1.i;
+ i__2 = kbot + kbot * h_dim1;
+ q__1.r = h__[i__2].r / s, q__1.i = h__[i__2].i /
+ s;
+ dd.r = q__1.r, dd.i = q__1.i;
+ q__2.r = aa.r + dd.r, q__2.i = aa.i + dd.i;
+ q__1.r = q__2.r / 2.f, q__1.i = q__2.i / 2.f;
+ tr2.r = q__1.r, tr2.i = q__1.i;
+ q__3.r = aa.r - tr2.r, q__3.i = aa.i - tr2.i;
+ q__4.r = dd.r - tr2.r, q__4.i = dd.i - tr2.i;
+ q__2.r = q__3.r * q__4.r - q__3.i * q__4.i,
+ q__2.i = q__3.r * q__4.i + q__3.i *
+ q__4.r;
+ q__5.r = bb.r * cc.r - bb.i * cc.i, q__5.i = bb.r
+ * cc.i + bb.i * cc.r;
+ q__1.r = q__2.r - q__5.r, q__1.i = q__2.i -
+ q__5.i;
+ det.r = q__1.r, det.i = q__1.i;
+ q__2.r = -det.r, q__2.i = -det.i;
+ c_sqrt(&q__1, &q__2);
+ rtdisc.r = q__1.r, rtdisc.i = q__1.i;
+ i__2 = kbot - 1;
+ q__2.r = tr2.r + rtdisc.r, q__2.i = tr2.i +
+ rtdisc.i;
+ q__1.r = s * q__2.r, q__1.i = s * q__2.i;
+ w[i__2].r = q__1.r, w[i__2].i = q__1.i;
+ i__2 = kbot;
+ q__2.r = tr2.r - rtdisc.r, q__2.i = tr2.i -
+ rtdisc.i;
+ q__1.r = s * q__2.r, q__1.i = s * q__2.i;
+ w[i__2].r = q__1.r, w[i__2].i = q__1.i;
+
+ ks = kbot - 1;
+ }
+ }
+
+ if (kbot - ks + 1 > ns) {
+
+/* ==== Sort the shifts (Helps a little) ==== */
+
+ sorted = FALSE_;
+ i__2 = ks + 1;
+ for (k = kbot; k >= i__2; --k) {
+ if (sorted) {
+ goto L60;
+ }
+ sorted = TRUE_;
+ i__3 = k - 1;
+ for (i__ = ks; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ i__5 = i__ + 1;
+ if ((r__1 = w[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&w[i__]), dabs(r__2)) < (r__3 =
+ w[i__5].r, dabs(r__3)) + (r__4 =
+ r_imag(&w[i__ + 1]), dabs(r__4))) {
+ sorted = FALSE_;
+ i__4 = i__;
+ swap.r = w[i__4].r, swap.i = w[i__4].i;
+ i__4 = i__;
+ i__5 = i__ + 1;
+ w[i__4].r = w[i__5].r, w[i__4].i = w[i__5]
+ .i;
+ i__4 = i__ + 1;
+ w[i__4].r = swap.r, w[i__4].i = swap.i;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+L60:
+ ;
+ }
+ }
+
+/* ==== If there are only two shifts, then use */
+/* . only one. ==== */
+
+ if (kbot - ks + 1 == 2) {
+ i__2 = kbot;
+ i__3 = kbot + kbot * h_dim1;
+ q__2.r = w[i__2].r - h__[i__3].r, q__2.i = w[i__2].i -
+ h__[i__3].i;
+ q__1.r = q__2.r, q__1.i = q__2.i;
+ i__4 = kbot - 1;
+ i__5 = kbot + kbot * h_dim1;
+ q__4.r = w[i__4].r - h__[i__5].r, q__4.i = w[i__4].i -
+ h__[i__5].i;
+ q__3.r = q__4.r, q__3.i = q__4.i;
+ if ((r__1 = q__1.r, dabs(r__1)) + (r__2 = r_imag(&q__1),
+ dabs(r__2)) < (r__3 = q__3.r, dabs(r__3)) + (r__4
+ = r_imag(&q__3), dabs(r__4))) {
+ i__2 = kbot - 1;
+ i__3 = kbot;
+ w[i__2].r = w[i__3].r, w[i__2].i = w[i__3].i;
+ } else {
+ i__2 = kbot;
+ i__3 = kbot - 1;
+ w[i__2].r = w[i__3].r, w[i__2].i = w[i__3].i;
+ }
+ }
+
+/* ==== Use up to NS of the the smallest magnatiude */
+/* . shifts. If there aren't NS shifts available, */
+/* . then use them all, possibly dropping one to */
+/* . make the number of shifts even. ==== */
+
+/* Computing MIN */
+ i__2 = ns, i__3 = kbot - ks + 1;
+ ns = min(i__2,i__3);
+ ns -= ns % 2;
+ ks = kbot - ns + 1;
+
+/* ==== Small-bulge multi-shift QR sweep: */
+/* . split workspace under the subdiagonal into */
+/* . - a KDU-by-KDU work array U in the lower */
+/* . left-hand-corner, */
+/* . - a KDU-by-at-least-KDU-but-more-is-better */
+/* . (KDU-by-NHo) horizontal work array WH along */
+/* . the bottom edge, */
+/* . - and an at-least-KDU-but-more-is-better-by-KDU */
+/* . (NVE-by-KDU) vertical work WV arrow along */
+/* . the left-hand-edge. ==== */
+
+ kdu = ns * 3 - 3;
+ ku = *n - kdu + 1;
+ kwh = kdu + 1;
+ nho = *n - kdu - 3 - (kdu + 1) + 1;
+ kwv = kdu + 4;
+ nve = *n - kdu - kwv + 1;
+
+/* ==== Small-bulge multi-shift QR sweep ==== */
+
+ claqr5_(wantt, wantz, &kacc22, n, &ktop, &kbot, &ns, &w[ks], &
+ h__[h_offset], ldh, iloz, ihiz, &z__[z_offset], ldz, &
+ work[1], &c__3, &h__[ku + h_dim1], ldh, &nve, &h__[
+ kwv + h_dim1], ldh, &nho, &h__[ku + kwh * h_dim1],
+ ldh);
+ }
+
+/* ==== Note progress (or the lack of it). ==== */
+
+ if (ld > 0) {
+ ndfl = 1;
+ } else {
+ ++ndfl;
+ }
+
+/* ==== End of main loop ==== */
+/* L70: */
+ }
+
+/* ==== Iteration limit exceeded. Set INFO to show where */
+/* . the problem occurred and exit. ==== */
+
+ *info = kbot;
+L80:
+ ;
+ }
+
+/* ==== Return the optimal value of LWORK. ==== */
+
+ r__1 = (real) lwkopt;
+ q__1.r = r__1, q__1.i = 0.f;
+ work[1].r = q__1.r, work[1].i = q__1.i;
+
+/* ==== End of CLAQR4 ==== */
+
+ return 0;
+} /* claqr4_ */
diff --git a/contrib/libs/clapack/claqr5.c b/contrib/libs/clapack/claqr5.c
new file mode 100644
index 0000000000..56552c3eb0
--- /dev/null
+++ b/contrib/libs/clapack/claqr5.c
@@ -0,0 +1,1345 @@
+/* claqr5.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__3 = 3;
+static integer c__1 = 1;
+static integer c__2 = 2;
+
+/* Subroutine */ int claqr5_(logical *wantt, logical *wantz, integer *kacc22,
+ integer *n, integer *ktop, integer *kbot, integer *nshfts, complex *s,
+ complex *h__, integer *ldh, integer *iloz, integer *ihiz, complex *
+ z__, integer *ldz, complex *v, integer *ldv, complex *u, integer *ldu,
+ integer *nv, complex *wv, integer *ldwv, integer *nh, complex *wh,
+ integer *ldwh)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, u_dim1, u_offset, v_dim1, v_offset, wh_dim1,
+ wh_offset, wv_dim1, wv_offset, z_dim1, z_offset, i__1, i__2, i__3,
+ i__4, i__5, i__6, i__7, i__8, i__9, i__10, i__11;
+ real r__1, r__2, r__3, r__4, r__5, r__6, r__7, r__8, r__9, r__10;
+ complex q__1, q__2, q__3, q__4, q__5, q__6, q__7, q__8;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer j, k, m, i2, j2, i4, j4, k1;
+ real h11, h12, h21, h22;
+ integer m22, ns, nu;
+ complex vt[3];
+ real scl;
+ integer kdu, kms;
+ real ulp;
+ integer knz, kzs;
+ real tst1, tst2;
+ complex beta;
+ logical blk22, bmp22;
+ integer mend, jcol, jlen, jbot, mbot, jtop, jrow, mtop;
+ complex alpha;
+ logical accum;
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *);
+ integer ndcol, incol, krcol, nbmps;
+ extern /* Subroutine */ int ctrmm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *), claqr1_(integer *,
+ complex *, integer *, complex *, complex *, complex *), slabad_(
+ real *, real *), clarfg_(integer *, complex *, complex *, integer
+ *, complex *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), claset_(char *,
+ integer *, integer *, complex *, complex *, complex *, integer *);
+ real safmin, safmax;
+ complex refsum;
+ integer mstart;
+ real smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* This auxiliary subroutine called by CLAQR0 performs a */
+/* single small-bulge multi-shift QR sweep. */
+
+/* WANTT (input) logical scalar */
+/* WANTT = .true. if the triangular Schur factor */
+/* is being computed. WANTT is set to .false. otherwise. */
+
+/* WANTZ (input) logical scalar */
+/* WANTZ = .true. if the unitary Schur factor is being */
+/* computed. WANTZ is set to .false. otherwise. */
+
+/* KACC22 (input) integer with value 0, 1, or 2. */
+/* Specifies the computation mode of far-from-diagonal */
+/* orthogonal updates. */
+/* = 0: CLAQR5 does not accumulate reflections and does not */
+/* use matrix-matrix multiply to update far-from-diagonal */
+/* matrix entries. */
+/* = 1: CLAQR5 accumulates reflections and uses matrix-matrix */
+/* multiply to update the far-from-diagonal matrix entries. */
+/* = 2: CLAQR5 accumulates reflections, uses matrix-matrix */
+/* multiply to update the far-from-diagonal matrix entries, */
+/* and takes advantage of 2-by-2 block structure during */
+/* matrix multiplies. */
+
+/* N (input) integer scalar */
+/* N is the order of the Hessenberg matrix H upon which this */
+/* subroutine operates. */
+
+/* KTOP (input) integer scalar */
+/* KBOT (input) integer scalar */
+/* These are the first and last rows and columns of an */
+/* isolated diagonal block upon which the QR sweep is to be */
+/* applied. It is assumed without a check that */
+/* either KTOP = 1 or H(KTOP,KTOP-1) = 0 */
+/* and */
+/* either KBOT = N or H(KBOT+1,KBOT) = 0. */
+
+/* NSHFTS (input) integer scalar */
+/* NSHFTS gives the number of simultaneous shifts. NSHFTS */
+/* must be positive and even. */
+
+/* S (input/output) COMPLEX array of size (NSHFTS) */
+/* S contains the shifts of origin that define the multi- */
+/* shift QR sweep. On output S may be reordered. */
+
+/* H (input/output) COMPLEX array of size (LDH,N) */
+/* On input H contains a Hessenberg matrix. On output a */
+/* multi-shift QR sweep with shifts SR(J)+i*SI(J) is applied */
+/* to the isolated diagonal block in rows and columns KTOP */
+/* through KBOT. */
+
+/* LDH (input) integer scalar */
+/* LDH is the leading dimension of H just as declared in the */
+/* calling procedure. LDH.GE.MAX(1,N). */
+
+/* ILOZ (input) INTEGER */
+/* IHIZ (input) INTEGER */
+/* Specify the rows of Z to which transformations must be */
+/* applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N */
+
+/* Z (input/output) COMPLEX array of size (LDZ,IHI) */
+/* If WANTZ = .TRUE., then the QR Sweep unitary */
+/* similarity transformation is accumulated into */
+/* Z(ILOZ:IHIZ,ILO:IHI) from the right. */
+/* If WANTZ = .FALSE., then Z is unreferenced. */
+
+/* LDZ (input) integer scalar */
+/* LDA is the leading dimension of Z just as declared in */
+/* the calling procedure. LDZ.GE.N. */
+
+/* V (workspace) COMPLEX array of size (LDV,NSHFTS/2) */
+
+/* LDV (input) integer scalar */
+/* LDV is the leading dimension of V as declared in the */
+/* calling procedure. LDV.GE.3. */
+
+/* U (workspace) COMPLEX array of size */
+/* (LDU,3*NSHFTS-3) */
+
+/* LDU (input) integer scalar */
+/* LDU is the leading dimension of U just as declared in the */
+/* in the calling subroutine. LDU.GE.3*NSHFTS-3. */
+
+/* NH (input) integer scalar */
+/* NH is the number of columns in array WH available for */
+/* workspace. NH.GE.1. */
+
+/* WH (workspace) COMPLEX array of size (LDWH,NH) */
+
+/* LDWH (input) integer scalar */
+/* Leading dimension of WH just as declared in the */
+/* calling procedure. LDWH.GE.3*NSHFTS-3. */
+
+/* NV (input) integer scalar */
+/* NV is the number of rows in WV agailable for workspace. */
+/* NV.GE.1. */
+
+/* WV (workspace) COMPLEX array of size */
+/* (LDWV,3*NSHFTS-3) */
+
+/* LDWV (input) integer scalar */
+/* LDWV is the leading dimension of WV as declared in the */
+/* in the calling subroutine. LDWV.GE.NV. */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+/* Reference: */
+
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part I: Maintaining Well Focused Shifts, and */
+/* Level 3 Performance, SIAM Journal of Matrix Analysis, */
+/* volume 23, pages 929--947, 2002. */
+
+/* ================================================================ */
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* ==== If there are no shifts, then there is nothing to do. ==== */
+
+ /* Parameter adjustments */
+ --s;
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ wv_dim1 = *ldwv;
+ wv_offset = 1 + wv_dim1;
+ wv -= wv_offset;
+ wh_dim1 = *ldwh;
+ wh_offset = 1 + wh_dim1;
+ wh -= wh_offset;
+
+ /* Function Body */
+ if (*nshfts < 2) {
+ return 0;
+ }
+
+/* ==== If the active block is empty or 1-by-1, then there */
+/* . is nothing to do. ==== */
+
+ if (*ktop >= *kbot) {
+ return 0;
+ }
+
+/* ==== NSHFTS is supposed to be even, but if it is odd, */
+/* . then simply reduce it by one. ==== */
+
+ ns = *nshfts - *nshfts % 2;
+
+/* ==== Machine constants for deflation ==== */
+
+ safmin = slamch_("SAFE MINIMUM");
+ safmax = 1.f / safmin;
+ slabad_(&safmin, &safmax);
+ ulp = slamch_("PRECISION");
+ smlnum = safmin * ((real) (*n) / ulp);
+
+/* ==== Use accumulated reflections to update far-from-diagonal */
+/* . entries ? ==== */
+
+ accum = *kacc22 == 1 || *kacc22 == 2;
+
+/* ==== If so, exploit the 2-by-2 block structure? ==== */
+
+ blk22 = ns > 2 && *kacc22 == 2;
+
+/* ==== clear trash ==== */
+
+ if (*ktop + 2 <= *kbot) {
+ i__1 = *ktop + 2 + *ktop * h_dim1;
+ h__[i__1].r = 0.f, h__[i__1].i = 0.f;
+ }
+
+/* ==== NBMPS = number of 2-shift bulges in the chain ==== */
+
+ nbmps = ns / 2;
+
+/* ==== KDU = width of slab ==== */
+
+ kdu = nbmps * 6 - 3;
+
+/* ==== Create and chase chains of NBMPS bulges ==== */
+
+ i__1 = *kbot - 2;
+ i__2 = nbmps * 3 - 2;
+ for (incol = (1 - nbmps) * 3 + *ktop - 1; i__2 < 0 ? incol >= i__1 :
+ incol <= i__1; incol += i__2) {
+ ndcol = incol + kdu;
+ if (accum) {
+ claset_("ALL", &kdu, &kdu, &c_b1, &c_b2, &u[u_offset], ldu);
+ }
+
+/* ==== Near-the-diagonal bulge chase. The following loop */
+/* . performs the near-the-diagonal part of a small bulge */
+/* . multi-shift QR sweep. Each 6*NBMPS-2 column diagonal */
+/* . chunk extends from column INCOL to column NDCOL */
+/* . (including both column INCOL and column NDCOL). The */
+/* . following loop chases a 3*NBMPS column long chain of */
+/* . NBMPS bulges 3*NBMPS-2 columns to the right. (INCOL */
+/* . may be less than KTOP and and NDCOL may be greater than */
+/* . KBOT indicating phantom columns from which to chase */
+/* . bulges before they are actually introduced or to which */
+/* . to chase bulges beyond column KBOT.) ==== */
+
+/* Computing MIN */
+ i__4 = incol + nbmps * 3 - 3, i__5 = *kbot - 2;
+ i__3 = min(i__4,i__5);
+ for (krcol = incol; krcol <= i__3; ++krcol) {
+
+/* ==== Bulges number MTOP to MBOT are active double implicit */
+/* . shift bulges. There may or may not also be small */
+/* . 2-by-2 bulge, if there is room. The inactive bulges */
+/* . (if any) must wait until the active bulges have moved */
+/* . down the diagonal to make room. The phantom matrix */
+/* . paradigm described above helps keep track. ==== */
+
+/* Computing MAX */
+ i__4 = 1, i__5 = (*ktop - 1 - krcol + 2) / 3 + 1;
+ mtop = max(i__4,i__5);
+/* Computing MIN */
+ i__4 = nbmps, i__5 = (*kbot - krcol) / 3;
+ mbot = min(i__4,i__5);
+ m22 = mbot + 1;
+ bmp22 = mbot < nbmps && krcol + (m22 - 1) * 3 == *kbot - 2;
+
+/* ==== Generate reflections to chase the chain right */
+/* . one column. (The minimum value of K is KTOP-1.) ==== */
+
+ i__4 = mbot;
+ for (m = mtop; m <= i__4; ++m) {
+ k = krcol + (m - 1) * 3;
+ if (k == *ktop - 1) {
+ claqr1_(&c__3, &h__[*ktop + *ktop * h_dim1], ldh, &s[(m <<
+ 1) - 1], &s[m * 2], &v[m * v_dim1 + 1]);
+ i__5 = m * v_dim1 + 1;
+ alpha.r = v[i__5].r, alpha.i = v[i__5].i;
+ clarfg_(&c__3, &alpha, &v[m * v_dim1 + 2], &c__1, &v[m *
+ v_dim1 + 1]);
+ } else {
+ i__5 = k + 1 + k * h_dim1;
+ beta.r = h__[i__5].r, beta.i = h__[i__5].i;
+ i__5 = m * v_dim1 + 2;
+ i__6 = k + 2 + k * h_dim1;
+ v[i__5].r = h__[i__6].r, v[i__5].i = h__[i__6].i;
+ i__5 = m * v_dim1 + 3;
+ i__6 = k + 3 + k * h_dim1;
+ v[i__5].r = h__[i__6].r, v[i__5].i = h__[i__6].i;
+ clarfg_(&c__3, &beta, &v[m * v_dim1 + 2], &c__1, &v[m *
+ v_dim1 + 1]);
+
+/* ==== A Bulge may collapse because of vigilant */
+/* . deflation or destructive underflow. In the */
+/* . underflow case, try the two-small-subdiagonals */
+/* . trick to try to reinflate the bulge. ==== */
+
+ i__5 = k + 3 + k * h_dim1;
+ i__6 = k + 3 + (k + 1) * h_dim1;
+ i__7 = k + 3 + (k + 2) * h_dim1;
+ if (h__[i__5].r != 0.f || h__[i__5].i != 0.f || (h__[i__6]
+ .r != 0.f || h__[i__6].i != 0.f) || h__[i__7].r ==
+ 0.f && h__[i__7].i == 0.f) {
+
+/* ==== Typical case: not collapsed (yet). ==== */
+
+ i__5 = k + 1 + k * h_dim1;
+ h__[i__5].r = beta.r, h__[i__5].i = beta.i;
+ i__5 = k + 2 + k * h_dim1;
+ h__[i__5].r = 0.f, h__[i__5].i = 0.f;
+ i__5 = k + 3 + k * h_dim1;
+ h__[i__5].r = 0.f, h__[i__5].i = 0.f;
+ } else {
+
+/* ==== Atypical case: collapsed. Attempt to */
+/* . reintroduce ignoring H(K+1,K) and H(K+2,K). */
+/* . If the fill resulting from the new */
+/* . reflector is too large, then abandon it. */
+/* . Otherwise, use the new one. ==== */
+
+ claqr1_(&c__3, &h__[k + 1 + (k + 1) * h_dim1], ldh, &
+ s[(m << 1) - 1], &s[m * 2], vt);
+ alpha.r = vt[0].r, alpha.i = vt[0].i;
+ clarfg_(&c__3, &alpha, &vt[1], &c__1, vt);
+ r_cnjg(&q__2, vt);
+ i__5 = k + 1 + k * h_dim1;
+ r_cnjg(&q__5, &vt[1]);
+ i__6 = k + 2 + k * h_dim1;
+ q__4.r = q__5.r * h__[i__6].r - q__5.i * h__[i__6].i,
+ q__4.i = q__5.r * h__[i__6].i + q__5.i * h__[
+ i__6].r;
+ q__3.r = h__[i__5].r + q__4.r, q__3.i = h__[i__5].i +
+ q__4.i;
+ q__1.r = q__2.r * q__3.r - q__2.i * q__3.i, q__1.i =
+ q__2.r * q__3.i + q__2.i * q__3.r;
+ refsum.r = q__1.r, refsum.i = q__1.i;
+
+ i__5 = k + 2 + k * h_dim1;
+ q__3.r = refsum.r * vt[1].r - refsum.i * vt[1].i,
+ q__3.i = refsum.r * vt[1].i + refsum.i * vt[1]
+ .r;
+ q__2.r = h__[i__5].r - q__3.r, q__2.i = h__[i__5].i -
+ q__3.i;
+ q__1.r = q__2.r, q__1.i = q__2.i;
+ q__5.r = refsum.r * vt[2].r - refsum.i * vt[2].i,
+ q__5.i = refsum.r * vt[2].i + refsum.i * vt[2]
+ .r;
+ q__4.r = q__5.r, q__4.i = q__5.i;
+ i__6 = k + k * h_dim1;
+ i__7 = k + 1 + (k + 1) * h_dim1;
+ i__8 = k + 2 + (k + 2) * h_dim1;
+ if ((r__1 = q__1.r, dabs(r__1)) + (r__2 = r_imag(&
+ q__1), dabs(r__2)) + ((r__3 = q__4.r, dabs(
+ r__3)) + (r__4 = r_imag(&q__4), dabs(r__4)))
+ > ulp * ((r__5 = h__[i__6].r, dabs(r__5)) + (
+ r__6 = r_imag(&h__[k + k * h_dim1]), dabs(
+ r__6)) + ((r__7 = h__[i__7].r, dabs(r__7)) + (
+ r__8 = r_imag(&h__[k + 1 + (k + 1) * h_dim1]),
+ dabs(r__8))) + ((r__9 = h__[i__8].r, dabs(
+ r__9)) + (r__10 = r_imag(&h__[k + 2 + (k + 2)
+ * h_dim1]), dabs(r__10))))) {
+
+/* ==== Starting a new bulge here would */
+/* . create non-negligible fill. Use */
+/* . the old one with trepidation. ==== */
+
+ i__5 = k + 1 + k * h_dim1;
+ h__[i__5].r = beta.r, h__[i__5].i = beta.i;
+ i__5 = k + 2 + k * h_dim1;
+ h__[i__5].r = 0.f, h__[i__5].i = 0.f;
+ i__5 = k + 3 + k * h_dim1;
+ h__[i__5].r = 0.f, h__[i__5].i = 0.f;
+ } else {
+
+/* ==== Stating a new bulge here would */
+/* . create only negligible fill. */
+/* . Replace the old reflector with */
+/* . the new one. ==== */
+
+ i__5 = k + 1 + k * h_dim1;
+ i__6 = k + 1 + k * h_dim1;
+ q__1.r = h__[i__6].r - refsum.r, q__1.i = h__[
+ i__6].i - refsum.i;
+ h__[i__5].r = q__1.r, h__[i__5].i = q__1.i;
+ i__5 = k + 2 + k * h_dim1;
+ h__[i__5].r = 0.f, h__[i__5].i = 0.f;
+ i__5 = k + 3 + k * h_dim1;
+ h__[i__5].r = 0.f, h__[i__5].i = 0.f;
+ i__5 = m * v_dim1 + 1;
+ v[i__5].r = vt[0].r, v[i__5].i = vt[0].i;
+ i__5 = m * v_dim1 + 2;
+ v[i__5].r = vt[1].r, v[i__5].i = vt[1].i;
+ i__5 = m * v_dim1 + 3;
+ v[i__5].r = vt[2].r, v[i__5].i = vt[2].i;
+ }
+ }
+ }
+/* L10: */
+ }
+
+/* ==== Generate a 2-by-2 reflection, if needed. ==== */
+
+ k = krcol + (m22 - 1) * 3;
+ if (bmp22) {
+ if (k == *ktop - 1) {
+ claqr1_(&c__2, &h__[k + 1 + (k + 1) * h_dim1], ldh, &s[(
+ m22 << 1) - 1], &s[m22 * 2], &v[m22 * v_dim1 + 1])
+ ;
+ i__4 = m22 * v_dim1 + 1;
+ beta.r = v[i__4].r, beta.i = v[i__4].i;
+ clarfg_(&c__2, &beta, &v[m22 * v_dim1 + 2], &c__1, &v[m22
+ * v_dim1 + 1]);
+ } else {
+ i__4 = k + 1 + k * h_dim1;
+ beta.r = h__[i__4].r, beta.i = h__[i__4].i;
+ i__4 = m22 * v_dim1 + 2;
+ i__5 = k + 2 + k * h_dim1;
+ v[i__4].r = h__[i__5].r, v[i__4].i = h__[i__5].i;
+ clarfg_(&c__2, &beta, &v[m22 * v_dim1 + 2], &c__1, &v[m22
+ * v_dim1 + 1]);
+ i__4 = k + 1 + k * h_dim1;
+ h__[i__4].r = beta.r, h__[i__4].i = beta.i;
+ i__4 = k + 2 + k * h_dim1;
+ h__[i__4].r = 0.f, h__[i__4].i = 0.f;
+ }
+ }
+
+/* ==== Multiply H by reflections from the left ==== */
+
+ if (accum) {
+ jbot = min(ndcol,*kbot);
+ } else if (*wantt) {
+ jbot = *n;
+ } else {
+ jbot = *kbot;
+ }
+ i__4 = jbot;
+ for (j = max(*ktop,krcol); j <= i__4; ++j) {
+/* Computing MIN */
+ i__5 = mbot, i__6 = (j - krcol + 2) / 3;
+ mend = min(i__5,i__6);
+ i__5 = mend;
+ for (m = mtop; m <= i__5; ++m) {
+ k = krcol + (m - 1) * 3;
+ r_cnjg(&q__2, &v[m * v_dim1 + 1]);
+ i__6 = k + 1 + j * h_dim1;
+ r_cnjg(&q__6, &v[m * v_dim1 + 2]);
+ i__7 = k + 2 + j * h_dim1;
+ q__5.r = q__6.r * h__[i__7].r - q__6.i * h__[i__7].i,
+ q__5.i = q__6.r * h__[i__7].i + q__6.i * h__[i__7]
+ .r;
+ q__4.r = h__[i__6].r + q__5.r, q__4.i = h__[i__6].i +
+ q__5.i;
+ r_cnjg(&q__8, &v[m * v_dim1 + 3]);
+ i__8 = k + 3 + j * h_dim1;
+ q__7.r = q__8.r * h__[i__8].r - q__8.i * h__[i__8].i,
+ q__7.i = q__8.r * h__[i__8].i + q__8.i * h__[i__8]
+ .r;
+ q__3.r = q__4.r + q__7.r, q__3.i = q__4.i + q__7.i;
+ q__1.r = q__2.r * q__3.r - q__2.i * q__3.i, q__1.i =
+ q__2.r * q__3.i + q__2.i * q__3.r;
+ refsum.r = q__1.r, refsum.i = q__1.i;
+ i__6 = k + 1 + j * h_dim1;
+ i__7 = k + 1 + j * h_dim1;
+ q__1.r = h__[i__7].r - refsum.r, q__1.i = h__[i__7].i -
+ refsum.i;
+ h__[i__6].r = q__1.r, h__[i__6].i = q__1.i;
+ i__6 = k + 2 + j * h_dim1;
+ i__7 = k + 2 + j * h_dim1;
+ i__8 = m * v_dim1 + 2;
+ q__2.r = refsum.r * v[i__8].r - refsum.i * v[i__8].i,
+ q__2.i = refsum.r * v[i__8].i + refsum.i * v[i__8]
+ .r;
+ q__1.r = h__[i__7].r - q__2.r, q__1.i = h__[i__7].i -
+ q__2.i;
+ h__[i__6].r = q__1.r, h__[i__6].i = q__1.i;
+ i__6 = k + 3 + j * h_dim1;
+ i__7 = k + 3 + j * h_dim1;
+ i__8 = m * v_dim1 + 3;
+ q__2.r = refsum.r * v[i__8].r - refsum.i * v[i__8].i,
+ q__2.i = refsum.r * v[i__8].i + refsum.i * v[i__8]
+ .r;
+ q__1.r = h__[i__7].r - q__2.r, q__1.i = h__[i__7].i -
+ q__2.i;
+ h__[i__6].r = q__1.r, h__[i__6].i = q__1.i;
+/* L20: */
+ }
+/* L30: */
+ }
+ if (bmp22) {
+ k = krcol + (m22 - 1) * 3;
+/* Computing MAX */
+ i__4 = k + 1;
+ i__5 = jbot;
+ for (j = max(i__4,*ktop); j <= i__5; ++j) {
+ r_cnjg(&q__2, &v[m22 * v_dim1 + 1]);
+ i__4 = k + 1 + j * h_dim1;
+ r_cnjg(&q__5, &v[m22 * v_dim1 + 2]);
+ i__6 = k + 2 + j * h_dim1;
+ q__4.r = q__5.r * h__[i__6].r - q__5.i * h__[i__6].i,
+ q__4.i = q__5.r * h__[i__6].i + q__5.i * h__[i__6]
+ .r;
+ q__3.r = h__[i__4].r + q__4.r, q__3.i = h__[i__4].i +
+ q__4.i;
+ q__1.r = q__2.r * q__3.r - q__2.i * q__3.i, q__1.i =
+ q__2.r * q__3.i + q__2.i * q__3.r;
+ refsum.r = q__1.r, refsum.i = q__1.i;
+ i__4 = k + 1 + j * h_dim1;
+ i__6 = k + 1 + j * h_dim1;
+ q__1.r = h__[i__6].r - refsum.r, q__1.i = h__[i__6].i -
+ refsum.i;
+ h__[i__4].r = q__1.r, h__[i__4].i = q__1.i;
+ i__4 = k + 2 + j * h_dim1;
+ i__6 = k + 2 + j * h_dim1;
+ i__7 = m22 * v_dim1 + 2;
+ q__2.r = refsum.r * v[i__7].r - refsum.i * v[i__7].i,
+ q__2.i = refsum.r * v[i__7].i + refsum.i * v[i__7]
+ .r;
+ q__1.r = h__[i__6].r - q__2.r, q__1.i = h__[i__6].i -
+ q__2.i;
+ h__[i__4].r = q__1.r, h__[i__4].i = q__1.i;
+/* L40: */
+ }
+ }
+
+/* ==== Multiply H by reflections from the right. */
+/* . Delay filling in the last row until the */
+/* . vigilant deflation check is complete. ==== */
+
+ if (accum) {
+ jtop = max(*ktop,incol);
+ } else if (*wantt) {
+ jtop = 1;
+ } else {
+ jtop = *ktop;
+ }
+ i__5 = mbot;
+ for (m = mtop; m <= i__5; ++m) {
+ i__4 = m * v_dim1 + 1;
+ if (v[i__4].r != 0.f || v[i__4].i != 0.f) {
+ k = krcol + (m - 1) * 3;
+/* Computing MIN */
+ i__6 = *kbot, i__7 = k + 3;
+ i__4 = min(i__6,i__7);
+ for (j = jtop; j <= i__4; ++j) {
+ i__6 = m * v_dim1 + 1;
+ i__7 = j + (k + 1) * h_dim1;
+ i__8 = m * v_dim1 + 2;
+ i__9 = j + (k + 2) * h_dim1;
+ q__4.r = v[i__8].r * h__[i__9].r - v[i__8].i * h__[
+ i__9].i, q__4.i = v[i__8].r * h__[i__9].i + v[
+ i__8].i * h__[i__9].r;
+ q__3.r = h__[i__7].r + q__4.r, q__3.i = h__[i__7].i +
+ q__4.i;
+ i__10 = m * v_dim1 + 3;
+ i__11 = j + (k + 3) * h_dim1;
+ q__5.r = v[i__10].r * h__[i__11].r - v[i__10].i * h__[
+ i__11].i, q__5.i = v[i__10].r * h__[i__11].i
+ + v[i__10].i * h__[i__11].r;
+ q__2.r = q__3.r + q__5.r, q__2.i = q__3.i + q__5.i;
+ q__1.r = v[i__6].r * q__2.r - v[i__6].i * q__2.i,
+ q__1.i = v[i__6].r * q__2.i + v[i__6].i *
+ q__2.r;
+ refsum.r = q__1.r, refsum.i = q__1.i;
+ i__6 = j + (k + 1) * h_dim1;
+ i__7 = j + (k + 1) * h_dim1;
+ q__1.r = h__[i__7].r - refsum.r, q__1.i = h__[i__7].i
+ - refsum.i;
+ h__[i__6].r = q__1.r, h__[i__6].i = q__1.i;
+ i__6 = j + (k + 2) * h_dim1;
+ i__7 = j + (k + 2) * h_dim1;
+ r_cnjg(&q__3, &v[m * v_dim1 + 2]);
+ q__2.r = refsum.r * q__3.r - refsum.i * q__3.i,
+ q__2.i = refsum.r * q__3.i + refsum.i *
+ q__3.r;
+ q__1.r = h__[i__7].r - q__2.r, q__1.i = h__[i__7].i -
+ q__2.i;
+ h__[i__6].r = q__1.r, h__[i__6].i = q__1.i;
+ i__6 = j + (k + 3) * h_dim1;
+ i__7 = j + (k + 3) * h_dim1;
+ r_cnjg(&q__3, &v[m * v_dim1 + 3]);
+ q__2.r = refsum.r * q__3.r - refsum.i * q__3.i,
+ q__2.i = refsum.r * q__3.i + refsum.i *
+ q__3.r;
+ q__1.r = h__[i__7].r - q__2.r, q__1.i = h__[i__7].i -
+ q__2.i;
+ h__[i__6].r = q__1.r, h__[i__6].i = q__1.i;
+/* L50: */
+ }
+
+ if (accum) {
+
+/* ==== Accumulate U. (If necessary, update Z later */
+/* . with with an efficient matrix-matrix */
+/* . multiply.) ==== */
+
+ kms = k - incol;
+/* Computing MAX */
+ i__4 = 1, i__6 = *ktop - incol;
+ i__7 = kdu;
+ for (j = max(i__4,i__6); j <= i__7; ++j) {
+ i__4 = m * v_dim1 + 1;
+ i__6 = j + (kms + 1) * u_dim1;
+ i__8 = m * v_dim1 + 2;
+ i__9 = j + (kms + 2) * u_dim1;
+ q__4.r = v[i__8].r * u[i__9].r - v[i__8].i * u[
+ i__9].i, q__4.i = v[i__8].r * u[i__9].i +
+ v[i__8].i * u[i__9].r;
+ q__3.r = u[i__6].r + q__4.r, q__3.i = u[i__6].i +
+ q__4.i;
+ i__10 = m * v_dim1 + 3;
+ i__11 = j + (kms + 3) * u_dim1;
+ q__5.r = v[i__10].r * u[i__11].r - v[i__10].i * u[
+ i__11].i, q__5.i = v[i__10].r * u[i__11]
+ .i + v[i__10].i * u[i__11].r;
+ q__2.r = q__3.r + q__5.r, q__2.i = q__3.i +
+ q__5.i;
+ q__1.r = v[i__4].r * q__2.r - v[i__4].i * q__2.i,
+ q__1.i = v[i__4].r * q__2.i + v[i__4].i *
+ q__2.r;
+ refsum.r = q__1.r, refsum.i = q__1.i;
+ i__4 = j + (kms + 1) * u_dim1;
+ i__6 = j + (kms + 1) * u_dim1;
+ q__1.r = u[i__6].r - refsum.r, q__1.i = u[i__6].i
+ - refsum.i;
+ u[i__4].r = q__1.r, u[i__4].i = q__1.i;
+ i__4 = j + (kms + 2) * u_dim1;
+ i__6 = j + (kms + 2) * u_dim1;
+ r_cnjg(&q__3, &v[m * v_dim1 + 2]);
+ q__2.r = refsum.r * q__3.r - refsum.i * q__3.i,
+ q__2.i = refsum.r * q__3.i + refsum.i *
+ q__3.r;
+ q__1.r = u[i__6].r - q__2.r, q__1.i = u[i__6].i -
+ q__2.i;
+ u[i__4].r = q__1.r, u[i__4].i = q__1.i;
+ i__4 = j + (kms + 3) * u_dim1;
+ i__6 = j + (kms + 3) * u_dim1;
+ r_cnjg(&q__3, &v[m * v_dim1 + 3]);
+ q__2.r = refsum.r * q__3.r - refsum.i * q__3.i,
+ q__2.i = refsum.r * q__3.i + refsum.i *
+ q__3.r;
+ q__1.r = u[i__6].r - q__2.r, q__1.i = u[i__6].i -
+ q__2.i;
+ u[i__4].r = q__1.r, u[i__4].i = q__1.i;
+/* L60: */
+ }
+ } else if (*wantz) {
+
+/* ==== U is not accumulated, so update Z */
+/* . now by multiplying by reflections */
+/* . from the right. ==== */
+
+ i__7 = *ihiz;
+ for (j = *iloz; j <= i__7; ++j) {
+ i__4 = m * v_dim1 + 1;
+ i__6 = j + (k + 1) * z_dim1;
+ i__8 = m * v_dim1 + 2;
+ i__9 = j + (k + 2) * z_dim1;
+ q__4.r = v[i__8].r * z__[i__9].r - v[i__8].i *
+ z__[i__9].i, q__4.i = v[i__8].r * z__[
+ i__9].i + v[i__8].i * z__[i__9].r;
+ q__3.r = z__[i__6].r + q__4.r, q__3.i = z__[i__6]
+ .i + q__4.i;
+ i__10 = m * v_dim1 + 3;
+ i__11 = j + (k + 3) * z_dim1;
+ q__5.r = v[i__10].r * z__[i__11].r - v[i__10].i *
+ z__[i__11].i, q__5.i = v[i__10].r * z__[
+ i__11].i + v[i__10].i * z__[i__11].r;
+ q__2.r = q__3.r + q__5.r, q__2.i = q__3.i +
+ q__5.i;
+ q__1.r = v[i__4].r * q__2.r - v[i__4].i * q__2.i,
+ q__1.i = v[i__4].r * q__2.i + v[i__4].i *
+ q__2.r;
+ refsum.r = q__1.r, refsum.i = q__1.i;
+ i__4 = j + (k + 1) * z_dim1;
+ i__6 = j + (k + 1) * z_dim1;
+ q__1.r = z__[i__6].r - refsum.r, q__1.i = z__[
+ i__6].i - refsum.i;
+ z__[i__4].r = q__1.r, z__[i__4].i = q__1.i;
+ i__4 = j + (k + 2) * z_dim1;
+ i__6 = j + (k + 2) * z_dim1;
+ r_cnjg(&q__3, &v[m * v_dim1 + 2]);
+ q__2.r = refsum.r * q__3.r - refsum.i * q__3.i,
+ q__2.i = refsum.r * q__3.i + refsum.i *
+ q__3.r;
+ q__1.r = z__[i__6].r - q__2.r, q__1.i = z__[i__6]
+ .i - q__2.i;
+ z__[i__4].r = q__1.r, z__[i__4].i = q__1.i;
+ i__4 = j + (k + 3) * z_dim1;
+ i__6 = j + (k + 3) * z_dim1;
+ r_cnjg(&q__3, &v[m * v_dim1 + 3]);
+ q__2.r = refsum.r * q__3.r - refsum.i * q__3.i,
+ q__2.i = refsum.r * q__3.i + refsum.i *
+ q__3.r;
+ q__1.r = z__[i__6].r - q__2.r, q__1.i = z__[i__6]
+ .i - q__2.i;
+ z__[i__4].r = q__1.r, z__[i__4].i = q__1.i;
+/* L70: */
+ }
+ }
+ }
+/* L80: */
+ }
+
+/* ==== Special case: 2-by-2 reflection (if needed) ==== */
+
+ k = krcol + (m22 - 1) * 3;
+ i__5 = m22 * v_dim1 + 1;
+ if (bmp22 && (v[i__5].r != 0.f || v[i__5].i != 0.f)) {
+/* Computing MIN */
+ i__7 = *kbot, i__4 = k + 3;
+ i__5 = min(i__7,i__4);
+ for (j = jtop; j <= i__5; ++j) {
+ i__7 = m22 * v_dim1 + 1;
+ i__4 = j + (k + 1) * h_dim1;
+ i__6 = m22 * v_dim1 + 2;
+ i__8 = j + (k + 2) * h_dim1;
+ q__3.r = v[i__6].r * h__[i__8].r - v[i__6].i * h__[i__8]
+ .i, q__3.i = v[i__6].r * h__[i__8].i + v[i__6].i *
+ h__[i__8].r;
+ q__2.r = h__[i__4].r + q__3.r, q__2.i = h__[i__4].i +
+ q__3.i;
+ q__1.r = v[i__7].r * q__2.r - v[i__7].i * q__2.i, q__1.i =
+ v[i__7].r * q__2.i + v[i__7].i * q__2.r;
+ refsum.r = q__1.r, refsum.i = q__1.i;
+ i__7 = j + (k + 1) * h_dim1;
+ i__4 = j + (k + 1) * h_dim1;
+ q__1.r = h__[i__4].r - refsum.r, q__1.i = h__[i__4].i -
+ refsum.i;
+ h__[i__7].r = q__1.r, h__[i__7].i = q__1.i;
+ i__7 = j + (k + 2) * h_dim1;
+ i__4 = j + (k + 2) * h_dim1;
+ r_cnjg(&q__3, &v[m22 * v_dim1 + 2]);
+ q__2.r = refsum.r * q__3.r - refsum.i * q__3.i, q__2.i =
+ refsum.r * q__3.i + refsum.i * q__3.r;
+ q__1.r = h__[i__4].r - q__2.r, q__1.i = h__[i__4].i -
+ q__2.i;
+ h__[i__7].r = q__1.r, h__[i__7].i = q__1.i;
+/* L90: */
+ }
+
+ if (accum) {
+ kms = k - incol;
+/* Computing MAX */
+ i__5 = 1, i__7 = *ktop - incol;
+ i__4 = kdu;
+ for (j = max(i__5,i__7); j <= i__4; ++j) {
+ i__5 = m22 * v_dim1 + 1;
+ i__7 = j + (kms + 1) * u_dim1;
+ i__6 = m22 * v_dim1 + 2;
+ i__8 = j + (kms + 2) * u_dim1;
+ q__3.r = v[i__6].r * u[i__8].r - v[i__6].i * u[i__8]
+ .i, q__3.i = v[i__6].r * u[i__8].i + v[i__6]
+ .i * u[i__8].r;
+ q__2.r = u[i__7].r + q__3.r, q__2.i = u[i__7].i +
+ q__3.i;
+ q__1.r = v[i__5].r * q__2.r - v[i__5].i * q__2.i,
+ q__1.i = v[i__5].r * q__2.i + v[i__5].i *
+ q__2.r;
+ refsum.r = q__1.r, refsum.i = q__1.i;
+ i__5 = j + (kms + 1) * u_dim1;
+ i__7 = j + (kms + 1) * u_dim1;
+ q__1.r = u[i__7].r - refsum.r, q__1.i = u[i__7].i -
+ refsum.i;
+ u[i__5].r = q__1.r, u[i__5].i = q__1.i;
+ i__5 = j + (kms + 2) * u_dim1;
+ i__7 = j + (kms + 2) * u_dim1;
+ r_cnjg(&q__3, &v[m22 * v_dim1 + 2]);
+ q__2.r = refsum.r * q__3.r - refsum.i * q__3.i,
+ q__2.i = refsum.r * q__3.i + refsum.i *
+ q__3.r;
+ q__1.r = u[i__7].r - q__2.r, q__1.i = u[i__7].i -
+ q__2.i;
+ u[i__5].r = q__1.r, u[i__5].i = q__1.i;
+/* L100: */
+ }
+ } else if (*wantz) {
+ i__4 = *ihiz;
+ for (j = *iloz; j <= i__4; ++j) {
+ i__5 = m22 * v_dim1 + 1;
+ i__7 = j + (k + 1) * z_dim1;
+ i__6 = m22 * v_dim1 + 2;
+ i__8 = j + (k + 2) * z_dim1;
+ q__3.r = v[i__6].r * z__[i__8].r - v[i__6].i * z__[
+ i__8].i, q__3.i = v[i__6].r * z__[i__8].i + v[
+ i__6].i * z__[i__8].r;
+ q__2.r = z__[i__7].r + q__3.r, q__2.i = z__[i__7].i +
+ q__3.i;
+ q__1.r = v[i__5].r * q__2.r - v[i__5].i * q__2.i,
+ q__1.i = v[i__5].r * q__2.i + v[i__5].i *
+ q__2.r;
+ refsum.r = q__1.r, refsum.i = q__1.i;
+ i__5 = j + (k + 1) * z_dim1;
+ i__7 = j + (k + 1) * z_dim1;
+ q__1.r = z__[i__7].r - refsum.r, q__1.i = z__[i__7].i
+ - refsum.i;
+ z__[i__5].r = q__1.r, z__[i__5].i = q__1.i;
+ i__5 = j + (k + 2) * z_dim1;
+ i__7 = j + (k + 2) * z_dim1;
+ r_cnjg(&q__3, &v[m22 * v_dim1 + 2]);
+ q__2.r = refsum.r * q__3.r - refsum.i * q__3.i,
+ q__2.i = refsum.r * q__3.i + refsum.i *
+ q__3.r;
+ q__1.r = z__[i__7].r - q__2.r, q__1.i = z__[i__7].i -
+ q__2.i;
+ z__[i__5].r = q__1.r, z__[i__5].i = q__1.i;
+/* L110: */
+ }
+ }
+ }
+
+/* ==== Vigilant deflation check ==== */
+
+ mstart = mtop;
+ if (krcol + (mstart - 1) * 3 < *ktop) {
+ ++mstart;
+ }
+ mend = mbot;
+ if (bmp22) {
+ ++mend;
+ }
+ if (krcol == *kbot - 2) {
+ ++mend;
+ }
+ i__4 = mend;
+ for (m = mstart; m <= i__4; ++m) {
+/* Computing MIN */
+ i__5 = *kbot - 1, i__7 = krcol + (m - 1) * 3;
+ k = min(i__5,i__7);
+
+/* ==== The following convergence test requires that */
+/* . the tradition small-compared-to-nearby-diagonals */
+/* . criterion and the Ahues & Tisseur (LAWN 122, 1997) */
+/* . criteria both be satisfied. The latter improves */
+/* . accuracy in some examples. Falling back on an */
+/* . alternate convergence criterion when TST1 or TST2 */
+/* . is zero (as done here) is traditional but probably */
+/* . unnecessary. ==== */
+
+ i__5 = k + 1 + k * h_dim1;
+ if (h__[i__5].r != 0.f || h__[i__5].i != 0.f) {
+ i__5 = k + k * h_dim1;
+ i__7 = k + 1 + (k + 1) * h_dim1;
+ tst1 = (r__1 = h__[i__5].r, dabs(r__1)) + (r__2 = r_imag(&
+ h__[k + k * h_dim1]), dabs(r__2)) + ((r__3 = h__[
+ i__7].r, dabs(r__3)) + (r__4 = r_imag(&h__[k + 1
+ + (k + 1) * h_dim1]), dabs(r__4)));
+ if (tst1 == 0.f) {
+ if (k >= *ktop + 1) {
+ i__5 = k + (k - 1) * h_dim1;
+ tst1 += (r__1 = h__[i__5].r, dabs(r__1)) + (r__2 =
+ r_imag(&h__[k + (k - 1) * h_dim1]), dabs(
+ r__2));
+ }
+ if (k >= *ktop + 2) {
+ i__5 = k + (k - 2) * h_dim1;
+ tst1 += (r__1 = h__[i__5].r, dabs(r__1)) + (r__2 =
+ r_imag(&h__[k + (k - 2) * h_dim1]), dabs(
+ r__2));
+ }
+ if (k >= *ktop + 3) {
+ i__5 = k + (k - 3) * h_dim1;
+ tst1 += (r__1 = h__[i__5].r, dabs(r__1)) + (r__2 =
+ r_imag(&h__[k + (k - 3) * h_dim1]), dabs(
+ r__2));
+ }
+ if (k <= *kbot - 2) {
+ i__5 = k + 2 + (k + 1) * h_dim1;
+ tst1 += (r__1 = h__[i__5].r, dabs(r__1)) + (r__2 =
+ r_imag(&h__[k + 2 + (k + 1) * h_dim1]),
+ dabs(r__2));
+ }
+ if (k <= *kbot - 3) {
+ i__5 = k + 3 + (k + 1) * h_dim1;
+ tst1 += (r__1 = h__[i__5].r, dabs(r__1)) + (r__2 =
+ r_imag(&h__[k + 3 + (k + 1) * h_dim1]),
+ dabs(r__2));
+ }
+ if (k <= *kbot - 4) {
+ i__5 = k + 4 + (k + 1) * h_dim1;
+ tst1 += (r__1 = h__[i__5].r, dabs(r__1)) + (r__2 =
+ r_imag(&h__[k + 4 + (k + 1) * h_dim1]),
+ dabs(r__2));
+ }
+ }
+ i__5 = k + 1 + k * h_dim1;
+/* Computing MAX */
+ r__3 = smlnum, r__4 = ulp * tst1;
+ if ((r__1 = h__[i__5].r, dabs(r__1)) + (r__2 = r_imag(&
+ h__[k + 1 + k * h_dim1]), dabs(r__2)) <= dmax(
+ r__3,r__4)) {
+/* Computing MAX */
+ i__5 = k + 1 + k * h_dim1;
+ i__7 = k + (k + 1) * h_dim1;
+ r__5 = (r__1 = h__[i__5].r, dabs(r__1)) + (r__2 =
+ r_imag(&h__[k + 1 + k * h_dim1]), dabs(r__2)),
+ r__6 = (r__3 = h__[i__7].r, dabs(r__3)) + (
+ r__4 = r_imag(&h__[k + (k + 1) * h_dim1]),
+ dabs(r__4));
+ h12 = dmax(r__5,r__6);
+/* Computing MIN */
+ i__5 = k + 1 + k * h_dim1;
+ i__7 = k + (k + 1) * h_dim1;
+ r__5 = (r__1 = h__[i__5].r, dabs(r__1)) + (r__2 =
+ r_imag(&h__[k + 1 + k * h_dim1]), dabs(r__2)),
+ r__6 = (r__3 = h__[i__7].r, dabs(r__3)) + (
+ r__4 = r_imag(&h__[k + (k + 1) * h_dim1]),
+ dabs(r__4));
+ h21 = dmin(r__5,r__6);
+ i__5 = k + k * h_dim1;
+ i__7 = k + 1 + (k + 1) * h_dim1;
+ q__2.r = h__[i__5].r - h__[i__7].r, q__2.i = h__[i__5]
+ .i - h__[i__7].i;
+ q__1.r = q__2.r, q__1.i = q__2.i;
+/* Computing MAX */
+ i__6 = k + 1 + (k + 1) * h_dim1;
+ r__5 = (r__1 = h__[i__6].r, dabs(r__1)) + (r__2 =
+ r_imag(&h__[k + 1 + (k + 1) * h_dim1]), dabs(
+ r__2)), r__6 = (r__3 = q__1.r, dabs(r__3)) + (
+ r__4 = r_imag(&q__1), dabs(r__4));
+ h11 = dmax(r__5,r__6);
+ i__5 = k + k * h_dim1;
+ i__7 = k + 1 + (k + 1) * h_dim1;
+ q__2.r = h__[i__5].r - h__[i__7].r, q__2.i = h__[i__5]
+ .i - h__[i__7].i;
+ q__1.r = q__2.r, q__1.i = q__2.i;
+/* Computing MIN */
+ i__6 = k + 1 + (k + 1) * h_dim1;
+ r__5 = (r__1 = h__[i__6].r, dabs(r__1)) + (r__2 =
+ r_imag(&h__[k + 1 + (k + 1) * h_dim1]), dabs(
+ r__2)), r__6 = (r__3 = q__1.r, dabs(r__3)) + (
+ r__4 = r_imag(&q__1), dabs(r__4));
+ h22 = dmin(r__5,r__6);
+ scl = h11 + h12;
+ tst2 = h22 * (h11 / scl);
+
+/* Computing MAX */
+ r__1 = smlnum, r__2 = ulp * tst2;
+ if (tst2 == 0.f || h21 * (h12 / scl) <= dmax(r__1,
+ r__2)) {
+ i__5 = k + 1 + k * h_dim1;
+ h__[i__5].r = 0.f, h__[i__5].i = 0.f;
+ }
+ }
+ }
+/* L120: */
+ }
+
+/* ==== Fill in the last row of each bulge. ==== */
+
+/* Computing MIN */
+ i__4 = nbmps, i__5 = (*kbot - krcol - 1) / 3;
+ mend = min(i__4,i__5);
+ i__4 = mend;
+ for (m = mtop; m <= i__4; ++m) {
+ k = krcol + (m - 1) * 3;
+ i__5 = m * v_dim1 + 1;
+ i__7 = m * v_dim1 + 3;
+ q__2.r = v[i__5].r * v[i__7].r - v[i__5].i * v[i__7].i,
+ q__2.i = v[i__5].r * v[i__7].i + v[i__5].i * v[i__7]
+ .r;
+ i__6 = k + 4 + (k + 3) * h_dim1;
+ q__1.r = q__2.r * h__[i__6].r - q__2.i * h__[i__6].i, q__1.i =
+ q__2.r * h__[i__6].i + q__2.i * h__[i__6].r;
+ refsum.r = q__1.r, refsum.i = q__1.i;
+ i__5 = k + 4 + (k + 1) * h_dim1;
+ q__1.r = -refsum.r, q__1.i = -refsum.i;
+ h__[i__5].r = q__1.r, h__[i__5].i = q__1.i;
+ i__5 = k + 4 + (k + 2) * h_dim1;
+ q__2.r = -refsum.r, q__2.i = -refsum.i;
+ r_cnjg(&q__3, &v[m * v_dim1 + 2]);
+ q__1.r = q__2.r * q__3.r - q__2.i * q__3.i, q__1.i = q__2.r *
+ q__3.i + q__2.i * q__3.r;
+ h__[i__5].r = q__1.r, h__[i__5].i = q__1.i;
+ i__5 = k + 4 + (k + 3) * h_dim1;
+ i__7 = k + 4 + (k + 3) * h_dim1;
+ r_cnjg(&q__3, &v[m * v_dim1 + 3]);
+ q__2.r = refsum.r * q__3.r - refsum.i * q__3.i, q__2.i =
+ refsum.r * q__3.i + refsum.i * q__3.r;
+ q__1.r = h__[i__7].r - q__2.r, q__1.i = h__[i__7].i - q__2.i;
+ h__[i__5].r = q__1.r, h__[i__5].i = q__1.i;
+/* L130: */
+ }
+
+/* ==== End of near-the-diagonal bulge chase. ==== */
+
+/* L140: */
+ }
+
+/* ==== Use U (if accumulated) to update far-from-diagonal */
+/* . entries in H. If required, use U to update Z as */
+/* . well. ==== */
+
+ if (accum) {
+ if (*wantt) {
+ jtop = 1;
+ jbot = *n;
+ } else {
+ jtop = *ktop;
+ jbot = *kbot;
+ }
+ if (! blk22 || incol < *ktop || ndcol > *kbot || ns <= 2) {
+
+/* ==== Updates not exploiting the 2-by-2 block */
+/* . structure of U. K1 and NU keep track of */
+/* . the location and size of U in the special */
+/* . cases of introducing bulges and chasing */
+/* . bulges off the bottom. In these special */
+/* . cases and in case the number of shifts */
+/* . is NS = 2, there is no 2-by-2 block */
+/* . structure to exploit. ==== */
+
+/* Computing MAX */
+ i__3 = 1, i__4 = *ktop - incol;
+ k1 = max(i__3,i__4);
+/* Computing MAX */
+ i__3 = 0, i__4 = ndcol - *kbot;
+ nu = kdu - max(i__3,i__4) - k1 + 1;
+
+/* ==== Horizontal Multiply ==== */
+
+ i__3 = jbot;
+ i__4 = *nh;
+ for (jcol = min(ndcol,*kbot) + 1; i__4 < 0 ? jcol >= i__3 :
+ jcol <= i__3; jcol += i__4) {
+/* Computing MIN */
+ i__5 = *nh, i__7 = jbot - jcol + 1;
+ jlen = min(i__5,i__7);
+ cgemm_("C", "N", &nu, &jlen, &nu, &c_b2, &u[k1 + k1 *
+ u_dim1], ldu, &h__[incol + k1 + jcol * h_dim1],
+ ldh, &c_b1, &wh[wh_offset], ldwh);
+ clacpy_("ALL", &nu, &jlen, &wh[wh_offset], ldwh, &h__[
+ incol + k1 + jcol * h_dim1], ldh);
+/* L150: */
+ }
+
+/* ==== Vertical multiply ==== */
+
+ i__4 = max(*ktop,incol) - 1;
+ i__3 = *nv;
+ for (jrow = jtop; i__3 < 0 ? jrow >= i__4 : jrow <= i__4;
+ jrow += i__3) {
+/* Computing MIN */
+ i__5 = *nv, i__7 = max(*ktop,incol) - jrow;
+ jlen = min(i__5,i__7);
+ cgemm_("N", "N", &jlen, &nu, &nu, &c_b2, &h__[jrow + (
+ incol + k1) * h_dim1], ldh, &u[k1 + k1 * u_dim1],
+ ldu, &c_b1, &wv[wv_offset], ldwv);
+ clacpy_("ALL", &jlen, &nu, &wv[wv_offset], ldwv, &h__[
+ jrow + (incol + k1) * h_dim1], ldh);
+/* L160: */
+ }
+
+/* ==== Z multiply (also vertical) ==== */
+
+ if (*wantz) {
+ i__3 = *ihiz;
+ i__4 = *nv;
+ for (jrow = *iloz; i__4 < 0 ? jrow >= i__3 : jrow <= i__3;
+ jrow += i__4) {
+/* Computing MIN */
+ i__5 = *nv, i__7 = *ihiz - jrow + 1;
+ jlen = min(i__5,i__7);
+ cgemm_("N", "N", &jlen, &nu, &nu, &c_b2, &z__[jrow + (
+ incol + k1) * z_dim1], ldz, &u[k1 + k1 *
+ u_dim1], ldu, &c_b1, &wv[wv_offset], ldwv);
+ clacpy_("ALL", &jlen, &nu, &wv[wv_offset], ldwv, &z__[
+ jrow + (incol + k1) * z_dim1], ldz)
+ ;
+/* L170: */
+ }
+ }
+ } else {
+
+/* ==== Updates exploiting U's 2-by-2 block structure. */
+/* . (I2, I4, J2, J4 are the last rows and columns */
+/* . of the blocks.) ==== */
+
+ i2 = (kdu + 1) / 2;
+ i4 = kdu;
+ j2 = i4 - i2;
+ j4 = kdu;
+
+/* ==== KZS and KNZ deal with the band of zeros */
+/* . along the diagonal of one of the triangular */
+/* . blocks. ==== */
+
+ kzs = j4 - j2 - (ns + 1);
+ knz = ns + 1;
+
+/* ==== Horizontal multiply ==== */
+
+ i__4 = jbot;
+ i__3 = *nh;
+ for (jcol = min(ndcol,*kbot) + 1; i__3 < 0 ? jcol >= i__4 :
+ jcol <= i__4; jcol += i__3) {
+/* Computing MIN */
+ i__5 = *nh, i__7 = jbot - jcol + 1;
+ jlen = min(i__5,i__7);
+
+/* ==== Copy bottom of H to top+KZS of scratch ==== */
+/* (The first KZS rows get multiplied by zero.) ==== */
+
+ clacpy_("ALL", &knz, &jlen, &h__[incol + 1 + j2 + jcol *
+ h_dim1], ldh, &wh[kzs + 1 + wh_dim1], ldwh);
+
+/* ==== Multiply by U21' ==== */
+
+ claset_("ALL", &kzs, &jlen, &c_b1, &c_b1, &wh[wh_offset],
+ ldwh);
+ ctrmm_("L", "U", "C", "N", &knz, &jlen, &c_b2, &u[j2 + 1
+ + (kzs + 1) * u_dim1], ldu, &wh[kzs + 1 + wh_dim1]
+, ldwh);
+
+/* ==== Multiply top of H by U11' ==== */
+
+ cgemm_("C", "N", &i2, &jlen, &j2, &c_b2, &u[u_offset],
+ ldu, &h__[incol + 1 + jcol * h_dim1], ldh, &c_b2,
+ &wh[wh_offset], ldwh);
+
+/* ==== Copy top of H to bottom of WH ==== */
+
+ clacpy_("ALL", &j2, &jlen, &h__[incol + 1 + jcol * h_dim1]
+, ldh, &wh[i2 + 1 + wh_dim1], ldwh);
+
+/* ==== Multiply by U21' ==== */
+
+ ctrmm_("L", "L", "C", "N", &j2, &jlen, &c_b2, &u[(i2 + 1)
+ * u_dim1 + 1], ldu, &wh[i2 + 1 + wh_dim1], ldwh);
+
+/* ==== Multiply by U22 ==== */
+
+ i__5 = i4 - i2;
+ i__7 = j4 - j2;
+ cgemm_("C", "N", &i__5, &jlen, &i__7, &c_b2, &u[j2 + 1 + (
+ i2 + 1) * u_dim1], ldu, &h__[incol + 1 + j2 +
+ jcol * h_dim1], ldh, &c_b2, &wh[i2 + 1 + wh_dim1],
+ ldwh);
+
+/* ==== Copy it back ==== */
+
+ clacpy_("ALL", &kdu, &jlen, &wh[wh_offset], ldwh, &h__[
+ incol + 1 + jcol * h_dim1], ldh);
+/* L180: */
+ }
+
+/* ==== Vertical multiply ==== */
+
+ i__3 = max(incol,*ktop) - 1;
+ i__4 = *nv;
+ for (jrow = jtop; i__4 < 0 ? jrow >= i__3 : jrow <= i__3;
+ jrow += i__4) {
+/* Computing MIN */
+ i__5 = *nv, i__7 = max(incol,*ktop) - jrow;
+ jlen = min(i__5,i__7);
+
+/* ==== Copy right of H to scratch (the first KZS */
+/* . columns get multiplied by zero) ==== */
+
+ clacpy_("ALL", &jlen, &knz, &h__[jrow + (incol + 1 + j2) *
+ h_dim1], ldh, &wv[(kzs + 1) * wv_dim1 + 1], ldwv);
+
+/* ==== Multiply by U21 ==== */
+
+ claset_("ALL", &jlen, &kzs, &c_b1, &c_b1, &wv[wv_offset],
+ ldwv);
+ ctrmm_("R", "U", "N", "N", &jlen, &knz, &c_b2, &u[j2 + 1
+ + (kzs + 1) * u_dim1], ldu, &wv[(kzs + 1) *
+ wv_dim1 + 1], ldwv);
+
+/* ==== Multiply by U11 ==== */
+
+ cgemm_("N", "N", &jlen, &i2, &j2, &c_b2, &h__[jrow + (
+ incol + 1) * h_dim1], ldh, &u[u_offset], ldu, &
+ c_b2, &wv[wv_offset], ldwv);
+
+/* ==== Copy left of H to right of scratch ==== */
+
+ clacpy_("ALL", &jlen, &j2, &h__[jrow + (incol + 1) *
+ h_dim1], ldh, &wv[(i2 + 1) * wv_dim1 + 1], ldwv);
+
+/* ==== Multiply by U21 ==== */
+
+ i__5 = i4 - i2;
+ ctrmm_("R", "L", "N", "N", &jlen, &i__5, &c_b2, &u[(i2 +
+ 1) * u_dim1 + 1], ldu, &wv[(i2 + 1) * wv_dim1 + 1]
+, ldwv);
+
+/* ==== Multiply by U22 ==== */
+
+ i__5 = i4 - i2;
+ i__7 = j4 - j2;
+ cgemm_("N", "N", &jlen, &i__5, &i__7, &c_b2, &h__[jrow + (
+ incol + 1 + j2) * h_dim1], ldh, &u[j2 + 1 + (i2 +
+ 1) * u_dim1], ldu, &c_b2, &wv[(i2 + 1) * wv_dim1
+ + 1], ldwv);
+
+/* ==== Copy it back ==== */
+
+ clacpy_("ALL", &jlen, &kdu, &wv[wv_offset], ldwv, &h__[
+ jrow + (incol + 1) * h_dim1], ldh);
+/* L190: */
+ }
+
+/* ==== Multiply Z (also vertical) ==== */
+
+ if (*wantz) {
+ i__4 = *ihiz;
+ i__3 = *nv;
+ for (jrow = *iloz; i__3 < 0 ? jrow >= i__4 : jrow <= i__4;
+ jrow += i__3) {
+/* Computing MIN */
+ i__5 = *nv, i__7 = *ihiz - jrow + 1;
+ jlen = min(i__5,i__7);
+
+/* ==== Copy right of Z to left of scratch (first */
+/* . KZS columns get multiplied by zero) ==== */
+
+ clacpy_("ALL", &jlen, &knz, &z__[jrow + (incol + 1 +
+ j2) * z_dim1], ldz, &wv[(kzs + 1) * wv_dim1 +
+ 1], ldwv);
+
+/* ==== Multiply by U12 ==== */
+
+ claset_("ALL", &jlen, &kzs, &c_b1, &c_b1, &wv[
+ wv_offset], ldwv);
+ ctrmm_("R", "U", "N", "N", &jlen, &knz, &c_b2, &u[j2
+ + 1 + (kzs + 1) * u_dim1], ldu, &wv[(kzs + 1)
+ * wv_dim1 + 1], ldwv);
+
+/* ==== Multiply by U11 ==== */
+
+ cgemm_("N", "N", &jlen, &i2, &j2, &c_b2, &z__[jrow + (
+ incol + 1) * z_dim1], ldz, &u[u_offset], ldu,
+ &c_b2, &wv[wv_offset], ldwv);
+
+/* ==== Copy left of Z to right of scratch ==== */
+
+ clacpy_("ALL", &jlen, &j2, &z__[jrow + (incol + 1) *
+ z_dim1], ldz, &wv[(i2 + 1) * wv_dim1 + 1],
+ ldwv);
+
+/* ==== Multiply by U21 ==== */
+
+ i__5 = i4 - i2;
+ ctrmm_("R", "L", "N", "N", &jlen, &i__5, &c_b2, &u[(
+ i2 + 1) * u_dim1 + 1], ldu, &wv[(i2 + 1) *
+ wv_dim1 + 1], ldwv);
+
+/* ==== Multiply by U22 ==== */
+
+ i__5 = i4 - i2;
+ i__7 = j4 - j2;
+ cgemm_("N", "N", &jlen, &i__5, &i__7, &c_b2, &z__[
+ jrow + (incol + 1 + j2) * z_dim1], ldz, &u[j2
+ + 1 + (i2 + 1) * u_dim1], ldu, &c_b2, &wv[(i2
+ + 1) * wv_dim1 + 1], ldwv);
+
+/* ==== Copy the result back to Z ==== */
+
+ clacpy_("ALL", &jlen, &kdu, &wv[wv_offset], ldwv, &
+ z__[jrow + (incol + 1) * z_dim1], ldz);
+/* L200: */
+ }
+ }
+ }
+ }
+/* L210: */
+ }
+
+/* ==== End of CLAQR5 ==== */
+
+ return 0;
+} /* claqr5_ */
diff --git a/contrib/libs/clapack/claqsb.c b/contrib/libs/clapack/claqsb.c
new file mode 100644
index 0000000000..040ba50cc6
--- /dev/null
+++ b/contrib/libs/clapack/claqsb.c
@@ -0,0 +1,192 @@
+/* claqsb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int claqsb_(char *uplo, integer *n, integer *kd, complex *ab,
+ integer *ldab, real *s, real *scond, real *amax, char *equed)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+ real r__1;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j;
+ real cj, large;
+ extern logical lsame_(char *, char *);
+ real small;
+ extern doublereal slamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAQSB equilibrates a symmetric band matrix A using the scaling */
+/* factors in the vector S. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of super-diagonals of the matrix A if UPLO = 'U', */
+/* or the number of sub-diagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) COMPLEX array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, if INFO = 0, the triangular factor U or L from the */
+/* Cholesky factorization A = U'*U or A = L*L' of the band */
+/* matrix A, in the same storage format as A. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* S (input) REAL array, dimension (N) */
+/* The scale factors for A. */
+
+/* SCOND (input) REAL */
+/* Ratio of the smallest S(i) to the largest S(i). */
+
+/* AMAX (input) REAL */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies whether or not equilibration was done. */
+/* = 'N': No equilibration. */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if scaling should be done */
+/* based on the ratio of the scaling factors. If SCOND < THRESH, */
+/* scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if scaling should */
+/* be done based on the absolute size of the largest matrix element. */
+/* If AMAX > LARGE or AMAX < SMALL, scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --s;
+
+ /* Function Body */
+ if (*n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = slamch_("Safe minimum") / slamch_("Precision");
+ large = 1.f / small;
+
+ if (*scond >= .1f && *amax >= small && *amax <= large) {
+
+/* No equilibration */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Replace A by diag(S) * A * diag(S). */
+
+ if (lsame_(uplo, "U")) {
+
+/* Upper triangle of A is stored in band format. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *kd;
+ i__4 = j;
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ i__2 = *kd + 1 + i__ - j + j * ab_dim1;
+ r__1 = cj * s[i__];
+ i__3 = *kd + 1 + i__ - j + j * ab_dim1;
+ q__1.r = r__1 * ab[i__3].r, q__1.i = r__1 * ab[i__3].i;
+ ab[i__2].r = q__1.r, ab[i__2].i = q__1.i;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+
+/* Lower triangle of A is stored. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+/* Computing MIN */
+ i__2 = *n, i__3 = j + *kd;
+ i__4 = min(i__2,i__3);
+ for (i__ = j; i__ <= i__4; ++i__) {
+ i__2 = i__ + 1 - j + j * ab_dim1;
+ r__1 = cj * s[i__];
+ i__3 = i__ + 1 - j + j * ab_dim1;
+ q__1.r = r__1 * ab[i__3].r, q__1.i = r__1 * ab[i__3].i;
+ ab[i__2].r = q__1.r, ab[i__2].i = q__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ *(unsigned char *)equed = 'Y';
+ }
+
+ return 0;
+
+/* End of CLAQSB */
+
+} /* claqsb_ */
diff --git a/contrib/libs/clapack/claqsp.c b/contrib/libs/clapack/claqsp.c
new file mode 100644
index 0000000000..f89a5f3467
--- /dev/null
+++ b/contrib/libs/clapack/claqsp.c
@@ -0,0 +1,179 @@
+/* claqsp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int claqsp_(char *uplo, integer *n, complex *ap, real *s,
+ real *scond, real *amax, char *equed)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ real r__1;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j, jc;
+ real cj, large;
+ extern logical lsame_(char *, char *);
+ real small;
+ extern doublereal slamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAQSP equilibrates a symmetric matrix A using the scaling factors */
+/* in the vector S. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, the equilibrated matrix: diag(S) * A * diag(S), in */
+/* the same storage format as A. */
+
+/* S (input) REAL array, dimension (N) */
+/* The scale factors for A. */
+
+/* SCOND (input) REAL */
+/* Ratio of the smallest S(i) to the largest S(i). */
+
+/* AMAX (input) REAL */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies whether or not equilibration was done. */
+/* = 'N': No equilibration. */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if scaling should be done */
+/* based on the ratio of the scaling factors. If SCOND < THRESH, */
+/* scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if scaling should */
+/* be done based on the absolute size of the largest matrix element. */
+/* If AMAX > LARGE or AMAX < SMALL, scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --s;
+ --ap;
+
+ /* Function Body */
+ if (*n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = slamch_("Safe minimum") / slamch_("Precision");
+ large = 1.f / small;
+
+ if (*scond >= .1f && *amax >= small && *amax <= large) {
+
+/* No equilibration */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Replace A by diag(S) * A * diag(S). */
+
+ if (lsame_(uplo, "U")) {
+
+/* Upper triangle of A is stored. */
+
+ jc = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = jc + i__ - 1;
+ r__1 = cj * s[i__];
+ i__4 = jc + i__ - 1;
+ q__1.r = r__1 * ap[i__4].r, q__1.i = r__1 * ap[i__4].i;
+ ap[i__3].r = q__1.r, ap[i__3].i = q__1.i;
+/* L10: */
+ }
+ jc += j;
+/* L20: */
+ }
+ } else {
+
+/* Lower triangle of A is stored. */
+
+ jc = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = jc + i__ - j;
+ r__1 = cj * s[i__];
+ i__4 = jc + i__ - j;
+ q__1.r = r__1 * ap[i__4].r, q__1.i = r__1 * ap[i__4].i;
+ ap[i__3].r = q__1.r, ap[i__3].i = q__1.i;
+/* L30: */
+ }
+ jc = jc + *n - j + 1;
+/* L40: */
+ }
+ }
+ *(unsigned char *)equed = 'Y';
+ }
+
+ return 0;
+
+/* End of CLAQSP */
+
+} /* claqsp_ */
diff --git a/contrib/libs/clapack/claqsy.c b/contrib/libs/clapack/claqsy.c
new file mode 100644
index 0000000000..eefd906968
--- /dev/null
+++ b/contrib/libs/clapack/claqsy.c
@@ -0,0 +1,182 @@
+/* claqsy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int claqsy_(char *uplo, integer *n, complex *a, integer *lda,
+ real *s, real *scond, real *amax, char *equed)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ real r__1;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j;
+ real cj, large;
+ extern logical lsame_(char *, char *);
+ real small;
+ extern doublereal slamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAQSY equilibrates a symmetric matrix A using the scaling factors */
+/* in the vector S. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n by n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if EQUED = 'Y', the equilibrated matrix: */
+/* diag(S) * A * diag(S). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(N,1). */
+
+/* S (input) REAL array, dimension (N) */
+/* The scale factors for A. */
+
+/* SCOND (input) REAL */
+/* Ratio of the smallest S(i) to the largest S(i). */
+
+/* AMAX (input) REAL */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies whether or not equilibration was done. */
+/* = 'N': No equilibration. */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if scaling should be done */
+/* based on the ratio of the scaling factors. If SCOND < THRESH, */
+/* scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if scaling should */
+/* be done based on the absolute size of the largest matrix element. */
+/* If AMAX > LARGE or AMAX < SMALL, scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+
+ /* Function Body */
+ if (*n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = slamch_("Safe minimum") / slamch_("Precision");
+ large = 1.f / small;
+
+ if (*scond >= .1f && *amax >= small && *amax <= large) {
+
+/* No equilibration */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Replace A by diag(S) * A * diag(S). */
+
+ if (lsame_(uplo, "U")) {
+
+/* Upper triangle of A is stored. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ r__1 = cj * s[i__];
+ i__4 = i__ + j * a_dim1;
+ q__1.r = r__1 * a[i__4].r, q__1.i = r__1 * a[i__4].i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+
+/* Lower triangle of A is stored. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ r__1 = cj * s[i__];
+ i__4 = i__ + j * a_dim1;
+ q__1.r = r__1 * a[i__4].r, q__1.i = r__1 * a[i__4].i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ *(unsigned char *)equed = 'Y';
+ }
+
+ return 0;
+
+/* End of CLAQSY */
+
+} /* claqsy_ */
diff --git a/contrib/libs/clapack/clar1v.c b/contrib/libs/clapack/clar1v.c
new file mode 100644
index 0000000000..b952776ea4
--- /dev/null
+++ b/contrib/libs/clapack/clar1v.c
@@ -0,0 +1,500 @@
+/* clar1v.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int clar1v_(integer *n, integer *b1, integer *bn, real *
+ lambda, real *d__, real *l, real *ld, real *lld, real *pivmin, real *
+ gaptol, complex *z__, logical *wantnc, integer *negcnt, real *ztz,
+ real *mingma, integer *r__, integer *isuppz, real *nrminv, real *
+ resid, real *rqcorr, real *work)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ real r__1;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ double c_abs(complex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ real s;
+ integer r1, r2;
+ real eps, tmp;
+ integer neg1, neg2, indp, inds;
+ real dplus;
+ extern doublereal slamch_(char *);
+ integer indlpl, indumn;
+ extern logical sisnan_(real *);
+ real dminus;
+ logical sawnan1, sawnan2;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAR1V computes the (scaled) r-th column of the inverse of */
+/* the sumbmatrix in rows B1 through BN of the tridiagonal matrix */
+/* L D L^T - sigma I. When sigma is close to an eigenvalue, the */
+/* computed vector is an accurate eigenvector. Usually, r corresponds */
+/* to the index where the eigenvector is largest in magnitude. */
+/* The following steps accomplish this computation : */
+/* (a) Stationary qd transform, L D L^T - sigma I = L(+) D(+) L(+)^T, */
+/* (b) Progressive qd transform, L D L^T - sigma I = U(-) D(-) U(-)^T, */
+/* (c) Computation of the diagonal elements of the inverse of */
+/* L D L^T - sigma I by combining the above transforms, and choosing */
+/* r as the index where the diagonal of the inverse is (one of the) */
+/* largest in magnitude. */
+/* (d) Computation of the (scaled) r-th column of the inverse using the */
+/* twisted factorization obtained by combining the top part of the */
+/* the stationary and the bottom part of the progressive transform. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix L D L^T. */
+
+/* B1 (input) INTEGER */
+/* First index of the submatrix of L D L^T. */
+
+/* BN (input) INTEGER */
+/* Last index of the submatrix of L D L^T. */
+
+/* LAMBDA (input) REAL */
+/* The shift. In order to compute an accurate eigenvector, */
+/* LAMBDA should be a good approximation to an eigenvalue */
+/* of L D L^T. */
+
+/* L (input) REAL array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of the unit bidiagonal matrix */
+/* L, in elements 1 to N-1. */
+
+/* D (input) REAL array, dimension (N) */
+/* The n diagonal elements of the diagonal matrix D. */
+
+/* LD (input) REAL array, dimension (N-1) */
+/* The n-1 elements L(i)*D(i). */
+
+/* LLD (input) REAL array, dimension (N-1) */
+/* The n-1 elements L(i)*L(i)*D(i). */
+
+/* PIVMIN (input) REAL */
+/* The minimum pivot in the Sturm sequence. */
+
+/* GAPTOL (input) REAL */
+/* Tolerance that indicates when eigenvector entries are negligible */
+/* w.r.t. their contribution to the residual. */
+
+/* Z (input/output) COMPLEX array, dimension (N) */
+/* On input, all entries of Z must be set to 0. */
+/* On output, Z contains the (scaled) r-th column of the */
+/* inverse. The scaling is such that Z(R) equals 1. */
+
+/* WANTNC (input) LOGICAL */
+/* Specifies whether NEGCNT has to be computed. */
+
+/* NEGCNT (output) INTEGER */
+/* If WANTNC is .TRUE. then NEGCNT = the number of pivots < pivmin */
+/* in the matrix factorization L D L^T, and NEGCNT = -1 otherwise. */
+
+/* ZTZ (output) REAL */
+/* The square of the 2-norm of Z. */
+
+/* MINGMA (output) REAL */
+/* The reciprocal of the largest (in magnitude) diagonal */
+/* element of the inverse of L D L^T - sigma I. */
+
+/* R (input/output) INTEGER */
+/* The twist index for the twisted factorization used to */
+/* compute Z. */
+/* On input, 0 <= R <= N. If R is input as 0, R is set to */
+/* the index where (L D L^T - sigma I)^{-1} is largest */
+/* in magnitude. If 1 <= R <= N, R is unchanged. */
+/* On output, R contains the twist index used to compute Z. */
+/* Ideally, R designates the position of the maximum entry in the */
+/* eigenvector. */
+
+/* ISUPPZ (output) INTEGER array, dimension (2) */
+/* The support of the vector in Z, i.e., the vector Z is */
+/* nonzero only in elements ISUPPZ(1) through ISUPPZ( 2 ). */
+
+/* NRMINV (output) REAL */
+/* NRMINV = 1/SQRT( ZTZ ) */
+
+/* RESID (output) REAL */
+/* The residual of the FP vector. */
+/* RESID = ABS( MINGMA )/SQRT( ZTZ ) */
+
+/* RQCORR (output) REAL */
+/* The Rayleigh Quotient correction to LAMBDA. */
+/* RQCORR = MINGMA*TMP */
+
+/* WORK (workspace) REAL array, dimension (4*N) */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --work;
+ --isuppz;
+ --z__;
+ --lld;
+ --ld;
+ --l;
+ --d__;
+
+ /* Function Body */
+ eps = slamch_("Precision");
+ if (*r__ == 0) {
+ r1 = *b1;
+ r2 = *bn;
+ } else {
+ r1 = *r__;
+ r2 = *r__;
+ }
+/* Storage for LPLUS */
+ indlpl = 0;
+/* Storage for UMINUS */
+ indumn = *n;
+ inds = (*n << 1) + 1;
+ indp = *n * 3 + 1;
+ if (*b1 == 1) {
+ work[inds] = 0.f;
+ } else {
+ work[inds + *b1 - 1] = lld[*b1 - 1];
+ }
+
+/* Compute the stationary transform (using the differential form) */
+/* until the index R2. */
+
+ sawnan1 = FALSE_;
+ neg1 = 0;
+ s = work[inds + *b1 - 1] - *lambda;
+ i__1 = r1 - 1;
+ for (i__ = *b1; i__ <= i__1; ++i__) {
+ dplus = d__[i__] + s;
+ work[indlpl + i__] = ld[i__] / dplus;
+ if (dplus < 0.f) {
+ ++neg1;
+ }
+ work[inds + i__] = s * work[indlpl + i__] * l[i__];
+ s = work[inds + i__] - *lambda;
+/* L50: */
+ }
+ sawnan1 = sisnan_(&s);
+ if (sawnan1) {
+ goto L60;
+ }
+ i__1 = r2 - 1;
+ for (i__ = r1; i__ <= i__1; ++i__) {
+ dplus = d__[i__] + s;
+ work[indlpl + i__] = ld[i__] / dplus;
+ work[inds + i__] = s * work[indlpl + i__] * l[i__];
+ s = work[inds + i__] - *lambda;
+/* L51: */
+ }
+ sawnan1 = sisnan_(&s);
+
+L60:
+ if (sawnan1) {
+/* Runs a slower version of the above loop if a NaN is detected */
+ neg1 = 0;
+ s = work[inds + *b1 - 1] - *lambda;
+ i__1 = r1 - 1;
+ for (i__ = *b1; i__ <= i__1; ++i__) {
+ dplus = d__[i__] + s;
+ if (dabs(dplus) < *pivmin) {
+ dplus = -(*pivmin);
+ }
+ work[indlpl + i__] = ld[i__] / dplus;
+ if (dplus < 0.f) {
+ ++neg1;
+ }
+ work[inds + i__] = s * work[indlpl + i__] * l[i__];
+ if (work[indlpl + i__] == 0.f) {
+ work[inds + i__] = lld[i__];
+ }
+ s = work[inds + i__] - *lambda;
+/* L70: */
+ }
+ i__1 = r2 - 1;
+ for (i__ = r1; i__ <= i__1; ++i__) {
+ dplus = d__[i__] + s;
+ if (dabs(dplus) < *pivmin) {
+ dplus = -(*pivmin);
+ }
+ work[indlpl + i__] = ld[i__] / dplus;
+ work[inds + i__] = s * work[indlpl + i__] * l[i__];
+ if (work[indlpl + i__] == 0.f) {
+ work[inds + i__] = lld[i__];
+ }
+ s = work[inds + i__] - *lambda;
+/* L71: */
+ }
+ }
+
+/* Compute the progressive transform (using the differential form) */
+/* until the index R1 */
+
+ sawnan2 = FALSE_;
+ neg2 = 0;
+ work[indp + *bn - 1] = d__[*bn] - *lambda;
+ i__1 = r1;
+ for (i__ = *bn - 1; i__ >= i__1; --i__) {
+ dminus = lld[i__] + work[indp + i__];
+ tmp = d__[i__] / dminus;
+ if (dminus < 0.f) {
+ ++neg2;
+ }
+ work[indumn + i__] = l[i__] * tmp;
+ work[indp + i__ - 1] = work[indp + i__] * tmp - *lambda;
+/* L80: */
+ }
+ tmp = work[indp + r1 - 1];
+ sawnan2 = sisnan_(&tmp);
+ if (sawnan2) {
+/* Runs a slower version of the above loop if a NaN is detected */
+ neg2 = 0;
+ i__1 = r1;
+ for (i__ = *bn - 1; i__ >= i__1; --i__) {
+ dminus = lld[i__] + work[indp + i__];
+ if (dabs(dminus) < *pivmin) {
+ dminus = -(*pivmin);
+ }
+ tmp = d__[i__] / dminus;
+ if (dminus < 0.f) {
+ ++neg2;
+ }
+ work[indumn + i__] = l[i__] * tmp;
+ work[indp + i__ - 1] = work[indp + i__] * tmp - *lambda;
+ if (tmp == 0.f) {
+ work[indp + i__ - 1] = d__[i__] - *lambda;
+ }
+/* L100: */
+ }
+ }
+
+/* Find the index (from R1 to R2) of the largest (in magnitude) */
+/* diagonal element of the inverse */
+
+ *mingma = work[inds + r1 - 1] + work[indp + r1 - 1];
+ if (*mingma < 0.f) {
+ ++neg1;
+ }
+ if (*wantnc) {
+ *negcnt = neg1 + neg2;
+ } else {
+ *negcnt = -1;
+ }
+ if (dabs(*mingma) == 0.f) {
+ *mingma = eps * work[inds + r1 - 1];
+ }
+ *r__ = r1;
+ i__1 = r2 - 1;
+ for (i__ = r1; i__ <= i__1; ++i__) {
+ tmp = work[inds + i__] + work[indp + i__];
+ if (tmp == 0.f) {
+ tmp = eps * work[inds + i__];
+ }
+ if (dabs(tmp) <= dabs(*mingma)) {
+ *mingma = tmp;
+ *r__ = i__ + 1;
+ }
+/* L110: */
+ }
+
+/* Compute the FP vector: solve N^T v = e_r */
+
+ isuppz[1] = *b1;
+ isuppz[2] = *bn;
+ i__1 = *r__;
+ z__[i__1].r = 1.f, z__[i__1].i = 0.f;
+ *ztz = 1.f;
+
+/* Compute the FP vector upwards from R */
+
+ if (! sawnan1 && ! sawnan2) {
+ i__1 = *b1;
+ for (i__ = *r__ - 1; i__ >= i__1; --i__) {
+ i__2 = i__;
+ i__3 = indlpl + i__;
+ i__4 = i__ + 1;
+ q__2.r = work[i__3] * z__[i__4].r, q__2.i = work[i__3] * z__[i__4]
+ .i;
+ q__1.r = -q__2.r, q__1.i = -q__2.i;
+ z__[i__2].r = q__1.r, z__[i__2].i = q__1.i;
+ if ((c_abs(&z__[i__]) + c_abs(&z__[i__ + 1])) * (r__1 = ld[i__],
+ dabs(r__1)) < *gaptol) {
+ i__2 = i__;
+ z__[i__2].r = 0.f, z__[i__2].i = 0.f;
+ isuppz[1] = i__ + 1;
+ goto L220;
+ }
+ i__2 = i__;
+ i__3 = i__;
+ q__1.r = z__[i__2].r * z__[i__3].r - z__[i__2].i * z__[i__3].i,
+ q__1.i = z__[i__2].r * z__[i__3].i + z__[i__2].i * z__[
+ i__3].r;
+ *ztz += q__1.r;
+/* L210: */
+ }
+L220:
+ ;
+ } else {
+/* Run slower loop if NaN occurred. */
+ i__1 = *b1;
+ for (i__ = *r__ - 1; i__ >= i__1; --i__) {
+ i__2 = i__ + 1;
+ if (z__[i__2].r == 0.f && z__[i__2].i == 0.f) {
+ i__2 = i__;
+ r__1 = -(ld[i__ + 1] / ld[i__]);
+ i__3 = i__ + 2;
+ q__1.r = r__1 * z__[i__3].r, q__1.i = r__1 * z__[i__3].i;
+ z__[i__2].r = q__1.r, z__[i__2].i = q__1.i;
+ } else {
+ i__2 = i__;
+ i__3 = indlpl + i__;
+ i__4 = i__ + 1;
+ q__2.r = work[i__3] * z__[i__4].r, q__2.i = work[i__3] * z__[
+ i__4].i;
+ q__1.r = -q__2.r, q__1.i = -q__2.i;
+ z__[i__2].r = q__1.r, z__[i__2].i = q__1.i;
+ }
+ if ((c_abs(&z__[i__]) + c_abs(&z__[i__ + 1])) * (r__1 = ld[i__],
+ dabs(r__1)) < *gaptol) {
+ i__2 = i__;
+ z__[i__2].r = 0.f, z__[i__2].i = 0.f;
+ isuppz[1] = i__ + 1;
+ goto L240;
+ }
+ i__2 = i__;
+ i__3 = i__;
+ q__1.r = z__[i__2].r * z__[i__3].r - z__[i__2].i * z__[i__3].i,
+ q__1.i = z__[i__2].r * z__[i__3].i + z__[i__2].i * z__[
+ i__3].r;
+ *ztz += q__1.r;
+/* L230: */
+ }
+L240:
+ ;
+ }
+/* Compute the FP vector downwards from R in blocks of size BLKSIZ */
+ if (! sawnan1 && ! sawnan2) {
+ i__1 = *bn - 1;
+ for (i__ = *r__; i__ <= i__1; ++i__) {
+ i__2 = i__ + 1;
+ i__3 = indumn + i__;
+ i__4 = i__;
+ q__2.r = work[i__3] * z__[i__4].r, q__2.i = work[i__3] * z__[i__4]
+ .i;
+ q__1.r = -q__2.r, q__1.i = -q__2.i;
+ z__[i__2].r = q__1.r, z__[i__2].i = q__1.i;
+ if ((c_abs(&z__[i__]) + c_abs(&z__[i__ + 1])) * (r__1 = ld[i__],
+ dabs(r__1)) < *gaptol) {
+ i__2 = i__ + 1;
+ z__[i__2].r = 0.f, z__[i__2].i = 0.f;
+ isuppz[2] = i__;
+ goto L260;
+ }
+ i__2 = i__ + 1;
+ i__3 = i__ + 1;
+ q__1.r = z__[i__2].r * z__[i__3].r - z__[i__2].i * z__[i__3].i,
+ q__1.i = z__[i__2].r * z__[i__3].i + z__[i__2].i * z__[
+ i__3].r;
+ *ztz += q__1.r;
+/* L250: */
+ }
+L260:
+ ;
+ } else {
+/* Run slower loop if NaN occurred. */
+ i__1 = *bn - 1;
+ for (i__ = *r__; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ if (z__[i__2].r == 0.f && z__[i__2].i == 0.f) {
+ i__2 = i__ + 1;
+ r__1 = -(ld[i__ - 1] / ld[i__]);
+ i__3 = i__ - 1;
+ q__1.r = r__1 * z__[i__3].r, q__1.i = r__1 * z__[i__3].i;
+ z__[i__2].r = q__1.r, z__[i__2].i = q__1.i;
+ } else {
+ i__2 = i__ + 1;
+ i__3 = indumn + i__;
+ i__4 = i__;
+ q__2.r = work[i__3] * z__[i__4].r, q__2.i = work[i__3] * z__[
+ i__4].i;
+ q__1.r = -q__2.r, q__1.i = -q__2.i;
+ z__[i__2].r = q__1.r, z__[i__2].i = q__1.i;
+ }
+ if ((c_abs(&z__[i__]) + c_abs(&z__[i__ + 1])) * (r__1 = ld[i__],
+ dabs(r__1)) < *gaptol) {
+ i__2 = i__ + 1;
+ z__[i__2].r = 0.f, z__[i__2].i = 0.f;
+ isuppz[2] = i__;
+ goto L280;
+ }
+ i__2 = i__ + 1;
+ i__3 = i__ + 1;
+ q__1.r = z__[i__2].r * z__[i__3].r - z__[i__2].i * z__[i__3].i,
+ q__1.i = z__[i__2].r * z__[i__3].i + z__[i__2].i * z__[
+ i__3].r;
+ *ztz += q__1.r;
+/* L270: */
+ }
+L280:
+ ;
+ }
+
+/* Compute quantities for convergence test */
+
+ tmp = 1.f / *ztz;
+ *nrminv = sqrt(tmp);
+ *resid = dabs(*mingma) * *nrminv;
+ *rqcorr = *mingma * tmp;
+
+
+ return 0;
+
+/* End of CLAR1V */
+
+} /* clar1v_ */
diff --git a/contrib/libs/clapack/clar2v.c b/contrib/libs/clapack/clar2v.c
new file mode 100644
index 0000000000..636d5bef3d
--- /dev/null
+++ b/contrib/libs/clapack/clar2v.c
@@ -0,0 +1,159 @@
+/* clar2v.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int clar2v_(integer *n, complex *x, complex *y, complex *z__,
+ integer *incx, real *c__, complex *s, integer *incc)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ real r__1;
+ complex q__1, q__2, q__3, q__4, q__5;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__;
+ complex t2, t3, t4;
+ real t5, t6;
+ integer ic;
+ real ci;
+ complex si;
+ integer ix;
+ real xi, yi;
+ complex zi;
+ real t1i, t1r, sii, zii, sir, zir;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAR2V applies a vector of complex plane rotations with real cosines */
+/* from both sides to a sequence of 2-by-2 complex Hermitian matrices, */
+/* defined by the elements of the vectors x, y and z. For i = 1,2,...,n */
+
+/* ( x(i) z(i) ) := */
+/* ( conjg(z(i)) y(i) ) */
+
+/* ( c(i) conjg(s(i)) ) ( x(i) z(i) ) ( c(i) -conjg(s(i)) ) */
+/* ( -s(i) c(i) ) ( conjg(z(i)) y(i) ) ( s(i) c(i) ) */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of plane rotations to be applied. */
+
+/* X (input/output) COMPLEX array, dimension (1+(N-1)*INCX) */
+/* The vector x; the elements of x are assumed to be real. */
+
+/* Y (input/output) COMPLEX array, dimension (1+(N-1)*INCX) */
+/* The vector y; the elements of y are assumed to be real. */
+
+/* Z (input/output) COMPLEX array, dimension (1+(N-1)*INCX) */
+/* The vector z. */
+
+/* INCX (input) INTEGER */
+/* The increment between elements of X, Y and Z. INCX > 0. */
+
+/* C (input) REAL array, dimension (1+(N-1)*INCC) */
+/* The cosines of the plane rotations. */
+
+/* S (input) COMPLEX array, dimension (1+(N-1)*INCC) */
+/* The sines of the plane rotations. */
+
+/* INCC (input) INTEGER */
+/* The increment between elements of C and S. INCC > 0. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --s;
+ --c__;
+ --z__;
+ --y;
+ --x;
+
+ /* Function Body */
+ ix = 1;
+ ic = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ xi = x[i__2].r;
+ i__2 = ix;
+ yi = y[i__2].r;
+ i__2 = ix;
+ zi.r = z__[i__2].r, zi.i = z__[i__2].i;
+ zir = zi.r;
+ zii = r_imag(&zi);
+ ci = c__[ic];
+ i__2 = ic;
+ si.r = s[i__2].r, si.i = s[i__2].i;
+ sir = si.r;
+ sii = r_imag(&si);
+ t1r = sir * zir - sii * zii;
+ t1i = sir * zii + sii * zir;
+ q__1.r = ci * zi.r, q__1.i = ci * zi.i;
+ t2.r = q__1.r, t2.i = q__1.i;
+ r_cnjg(&q__3, &si);
+ q__2.r = xi * q__3.r, q__2.i = xi * q__3.i;
+ q__1.r = t2.r - q__2.r, q__1.i = t2.i - q__2.i;
+ t3.r = q__1.r, t3.i = q__1.i;
+ r_cnjg(&q__2, &t2);
+ q__3.r = yi * si.r, q__3.i = yi * si.i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ t4.r = q__1.r, t4.i = q__1.i;
+ t5 = ci * xi + t1r;
+ t6 = ci * yi - t1r;
+ i__2 = ix;
+ r__1 = ci * t5 + (sir * t4.r + sii * r_imag(&t4));
+ x[i__2].r = r__1, x[i__2].i = 0.f;
+ i__2 = ix;
+ r__1 = ci * t6 - (sir * t3.r - sii * r_imag(&t3));
+ y[i__2].r = r__1, y[i__2].i = 0.f;
+ i__2 = ix;
+ q__2.r = ci * t3.r, q__2.i = ci * t3.i;
+ r_cnjg(&q__4, &si);
+ q__5.r = t6, q__5.i = t1i;
+ q__3.r = q__4.r * q__5.r - q__4.i * q__5.i, q__3.i = q__4.r * q__5.i
+ + q__4.i * q__5.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ z__[i__2].r = q__1.r, z__[i__2].i = q__1.i;
+ ix += *incx;
+ ic += *incc;
+/* L10: */
+ }
+ return 0;
+
+/* End of CLAR2V */
+
+} /* clar2v_ */
diff --git a/contrib/libs/clapack/clarcm.c b/contrib/libs/clapack/clarcm.c
new file mode 100644
index 0000000000..657e1650a6
--- /dev/null
+++ b/contrib/libs/clapack/clarcm.c
@@ -0,0 +1,176 @@
+/* clarcm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b6 = 1.f;
+static real c_b7 = 0.f;
+
+/* Subroutine */ int clarcm_(integer *m, integer *n, real *a, integer *lda,
+ complex *b, integer *ldb, complex *c__, integer *ldc, real *rwork)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4, i__5;
+ real r__1;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer i__, j, l;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLARCM performs a very simple matrix-matrix multiplication: */
+/* C := A * B, */
+/* where A is M by M and real; B is M by N and complex; */
+/* C is M by N and complex. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A and of the matrix C. */
+/* M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns and rows of the matrix B and */
+/* the number of columns of the matrix C. */
+/* N >= 0. */
+
+/* A (input) REAL array, dimension (LDA, M) */
+/* A contains the M by M matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >=max(1,M). */
+
+/* B (input) REAL array, dimension (LDB, N) */
+/* B contains the M by N matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >=max(1,M). */
+
+/* C (input) COMPLEX array, dimension (LDC, N) */
+/* C contains the M by N matrix C. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >=max(1,M). */
+
+/* RWORK (workspace) REAL array, dimension (2*M*N) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --rwork;
+
+ /* Function Body */
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[(j - 1) * *m + i__] = b[i__3].r;
+/* L10: */
+ }
+/* L20: */
+ }
+
+ l = *m * *n + 1;
+ sgemm_("N", "N", m, n, m, &c_b6, &a[a_offset], lda, &rwork[1], m, &c_b7, &
+ rwork[l], m);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = l + (j - 1) * *m + i__ - 1;
+ c__[i__3].r = rwork[i__4], c__[i__3].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ rwork[(j - 1) * *m + i__] = r_imag(&b[i__ + j * b_dim1]);
+/* L50: */
+ }
+/* L60: */
+ }
+ sgemm_("N", "N", m, n, m, &c_b6, &a[a_offset], lda, &rwork[1], m, &c_b7, &
+ rwork[l], m);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ r__1 = c__[i__4].r;
+ i__5 = l + (j - 1) * *m + i__ - 1;
+ q__1.r = r__1, q__1.i = rwork[i__5];
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+
+ return 0;
+
+/* End of CLARCM */
+
+} /* clarcm_ */
diff --git a/contrib/libs/clapack/clarf.c b/contrib/libs/clapack/clarf.c
new file mode 100644
index 0000000000..53da4d311a
--- /dev/null
+++ b/contrib/libs/clapack/clarf.c
@@ -0,0 +1,198 @@
+/* clarf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static complex c_b2 = {0.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int clarf_(char *side, integer *m, integer *n, complex *v,
+ integer *incv, complex *tau, complex *c__, integer *ldc, complex *
+ work)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, i__1;
+ complex q__1;
+
+ /* Local variables */
+ integer i__;
+ logical applyleft;
+ extern /* Subroutine */ int cgerc_(integer *, integer *, complex *,
+ complex *, integer *, complex *, integer *, complex *, integer *),
+ cgemv_(char *, integer *, integer *, complex *, complex *,
+ integer *, complex *, integer *, complex *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ integer lastc, lastv;
+ extern integer ilaclc_(integer *, integer *, complex *, integer *),
+ ilaclr_(integer *, integer *, complex *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLARF applies a complex elementary reflector H to a complex M-by-N */
+/* matrix C, from either the left or the right. H is represented in the */
+/* form */
+
+/* H = I - tau * v * v' */
+
+/* where tau is a complex scalar and v is a complex vector. */
+
+/* If tau = 0, then H is taken to be the unit matrix. */
+
+/* To apply H' (the conjugate transpose of H), supply conjg(tau) instead */
+/* tau. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': form H * C */
+/* = 'R': form C * H */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. */
+
+/* V (input) COMPLEX array, dimension */
+/* (1 + (M-1)*abs(INCV)) if SIDE = 'L' */
+/* or (1 + (N-1)*abs(INCV)) if SIDE = 'R' */
+/* The vector v in the representation of H. V is not used if */
+/* TAU = 0. */
+
+/* INCV (input) INTEGER */
+/* The increment between elements of v. INCV <> 0. */
+
+/* TAU (input) COMPLEX */
+/* The value tau in the representation of H. */
+
+/* C (input/output) COMPLEX array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by the matrix H * C if SIDE = 'L', */
+/* or C * H if SIDE = 'R'. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) COMPLEX array, dimension */
+/* (N) if SIDE = 'L' */
+/* or (M) if SIDE = 'R' */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --v;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ applyleft = lsame_(side, "L");
+ lastv = 0;
+ lastc = 0;
+ if (tau->r != 0.f || tau->i != 0.f) {
+/* Set up variables for scanning V. LASTV begins pointing to the end */
+/* of V. */
+ if (applyleft) {
+ lastv = *m;
+ } else {
+ lastv = *n;
+ }
+ if (*incv > 0) {
+ i__ = (lastv - 1) * *incv + 1;
+ } else {
+ i__ = 1;
+ }
+/* Look for the last non-zero row in V. */
+ for(;;) { /* while(complicated condition) */
+ i__1 = i__;
+ if (!(lastv > 0 && (v[i__1].r == 0.f && v[i__1].i == 0.f)))
+ break;
+ --lastv;
+ i__ -= *incv;
+ }
+ if (applyleft) {
+/* Scan for the last non-zero column in C(1:lastv,:). */
+ lastc = ilaclc_(&lastv, n, &c__[c_offset], ldc);
+ } else {
+/* Scan for the last non-zero row in C(:,1:lastv). */
+ lastc = ilaclr_(m, &lastv, &c__[c_offset], ldc);
+ }
+ }
+/* Note that lastc.eq.0 renders the BLAS operations null; no special */
+/* case is needed at this level. */
+ if (applyleft) {
+
+/* Form H * C */
+
+ if (lastv > 0) {
+
+/* w(1:lastc,1) := C(1:lastv,1:lastc)' * v(1:lastv,1) */
+
+ cgemv_("Conjugate transpose", &lastv, &lastc, &c_b1, &c__[
+ c_offset], ldc, &v[1], incv, &c_b2, &work[1], &c__1);
+
+/* C(1:lastv,1:lastc) := C(...) - v(1:lastv,1) * w(1:lastc,1)' */
+
+ q__1.r = -tau->r, q__1.i = -tau->i;
+ cgerc_(&lastv, &lastc, &q__1, &v[1], incv, &work[1], &c__1, &c__[
+ c_offset], ldc);
+ }
+ } else {
+
+/* Form C * H */
+
+ if (lastv > 0) {
+
+/* w(1:lastc,1) := C(1:lastc,1:lastv) * v(1:lastv,1) */
+
+ cgemv_("No transpose", &lastc, &lastv, &c_b1, &c__[c_offset], ldc,
+ &v[1], incv, &c_b2, &work[1], &c__1);
+
+/* C(1:lastc,1:lastv) := C(...) - w(1:lastc,1) * v(1:lastv,1)' */
+
+ q__1.r = -tau->r, q__1.i = -tau->i;
+ cgerc_(&lastc, &lastv, &q__1, &work[1], &c__1, &v[1], incv, &c__[
+ c_offset], ldc);
+ }
+ }
+ return 0;
+
+/* End of CLARF */
+
+} /* clarf_ */
diff --git a/contrib/libs/clapack/clarfb.c b/contrib/libs/clapack/clarfb.c
new file mode 100644
index 0000000000..6b31aa17ed
--- /dev/null
+++ b/contrib/libs/clapack/clarfb.c
@@ -0,0 +1,837 @@
+/* clarfb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int clarfb_(char *side, char *trans, char *direct, char *
+ storev, integer *m, integer *n, integer *k, complex *v, integer *ldv,
+ complex *t, integer *ldt, complex *c__, integer *ldc, complex *work,
+ integer *ldwork)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, t_dim1, t_offset, v_dim1, v_offset, work_dim1,
+ work_offset, i__1, i__2, i__3, i__4, i__5;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j;
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ integer lastc;
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), ctrmm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *);
+ integer lastv;
+ extern integer ilaclc_(integer *, integer *, complex *, integer *);
+ extern /* Subroutine */ int clacgv_(integer *, complex *, integer *);
+ extern integer ilaclr_(integer *, integer *, complex *, integer *);
+ char transt[1];
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLARFB applies a complex block reflector H or its transpose H' to a */
+/* complex M-by-N matrix C, from either the left or the right. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply H or H' from the Left */
+/* = 'R': apply H or H' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply H (No transpose) */
+/* = 'C': apply H' (Conjugate transpose) */
+
+/* DIRECT (input) CHARACTER*1 */
+/* Indicates how H is formed from a product of elementary */
+/* reflectors */
+/* = 'F': H = H(1) H(2) . . . H(k) (Forward) */
+/* = 'B': H = H(k) . . . H(2) H(1) (Backward) */
+
+/* STOREV (input) CHARACTER*1 */
+/* Indicates how the vectors which define the elementary */
+/* reflectors are stored: */
+/* = 'C': Columnwise */
+/* = 'R': Rowwise */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. */
+
+/* K (input) INTEGER */
+/* The order of the matrix T (= the number of elementary */
+/* reflectors whose product defines the block reflector). */
+
+/* V (input) COMPLEX array, dimension */
+/* (LDV,K) if STOREV = 'C' */
+/* (LDV,M) if STOREV = 'R' and SIDE = 'L' */
+/* (LDV,N) if STOREV = 'R' and SIDE = 'R' */
+/* The matrix V. See further details. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. */
+/* If STOREV = 'C' and SIDE = 'L', LDV >= max(1,M); */
+/* if STOREV = 'C' and SIDE = 'R', LDV >= max(1,N); */
+/* if STOREV = 'R', LDV >= K. */
+
+/* T (input) COMPLEX array, dimension (LDT,K) */
+/* The triangular K-by-K matrix T in the representation of the */
+/* block reflector. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= K. */
+
+/* C (input/output) COMPLEX array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by H*C or H'*C or C*H or C*H'. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) COMPLEX array, dimension (LDWORK,K) */
+
+/* LDWORK (input) INTEGER */
+/* The leading dimension of the array WORK. */
+/* If SIDE = 'L', LDWORK >= max(1,N); */
+/* if SIDE = 'R', LDWORK >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ work_dim1 = *ldwork;
+ work_offset = 1 + work_dim1;
+ work -= work_offset;
+
+ /* Function Body */
+ if (*m <= 0 || *n <= 0) {
+ return 0;
+ }
+
+ if (lsame_(trans, "N")) {
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ if (lsame_(storev, "C")) {
+
+ if (lsame_(direct, "F")) {
+
+/* Let V = ( V1 ) (first K rows) */
+/* ( V2 ) */
+/* where V1 is unit lower triangular. */
+
+ if (lsame_(side, "L")) {
+
+/* Form H * C or H' * C where C = ( C1 ) */
+/* ( C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = ilaclr_(m, k, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = ilaclc_(&lastv, n, &c__[c_offset], ldc);
+
+/* W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK) */
+
+/* W := C1' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ ccopy_(&lastc, &c__[j + c_dim1], ldc, &work[j * work_dim1
+ + 1], &c__1);
+ clacgv_(&lastc, &work[j * work_dim1 + 1], &c__1);
+/* L10: */
+ }
+
+/* W := W * V1 */
+
+ ctrmm_("Right", "Lower", "No transpose", "Unit", &lastc, k, &
+ c_b1, &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C2'*V2 */
+
+ i__1 = lastv - *k;
+ cgemm_("Conjugate transpose", "No transpose", &lastc, k, &
+ i__1, &c_b1, &c__[*k + 1 + c_dim1], ldc, &v[*k +
+ 1 + v_dim1], ldv, &c_b1, &work[work_offset],
+ ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ ctrmm_("Right", "Upper", transt, "Non-unit", &lastc, k, &c_b1,
+ &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V * W' */
+
+ if (*m > *k) {
+
+/* C2 := C2 - V2 * W' */
+
+ i__1 = lastv - *k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "Conjugate transpose", &i__1, &
+ lastc, k, &q__1, &v[*k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork, &c_b1, &c__[*k + 1 + c_dim1]
+, ldc);
+ }
+
+/* W := W * V1' */
+
+ ctrmm_("Right", "Lower", "Conjugate transpose", "Unit", &
+ lastc, k, &c_b1, &v[v_offset], ldv, &work[work_offset]
+, ldwork)
+ ;
+
+/* C1 := C1 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * c_dim1;
+ i__4 = j + i__ * c_dim1;
+ r_cnjg(&q__2, &work[i__ + j * work_dim1]);
+ q__1.r = c__[i__4].r - q__2.r, q__1.i = c__[i__4].i -
+ q__2.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L20: */
+ }
+/* L30: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/* Form C * H or C * H' where C = ( C1 C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = ilaclr_(n, k, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = ilaclr_(m, &lastv, &c__[c_offset], ldc);
+
+/* W := C * V = (C1*V1 + C2*V2) (stored in WORK) */
+
+/* W := C1 */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ ccopy_(&lastc, &c__[j * c_dim1 + 1], &c__1, &work[j *
+ work_dim1 + 1], &c__1);
+/* L40: */
+ }
+
+/* W := W * V1 */
+
+ ctrmm_("Right", "Lower", "No transpose", "Unit", &lastc, k, &
+ c_b1, &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C2 * V2 */
+
+ i__1 = lastv - *k;
+ cgemm_("No transpose", "No transpose", &lastc, k, &i__1, &
+ c_b1, &c__[(*k + 1) * c_dim1 + 1], ldc, &v[*k + 1
+ + v_dim1], ldv, &c_b1, &work[work_offset], ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ ctrmm_("Right", "Upper", trans, "Non-unit", &lastc, k, &c_b1,
+ &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V' */
+
+ if (lastv > *k) {
+
+/* C2 := C2 - W * V2' */
+
+ i__1 = lastv - *k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "Conjugate transpose", &lastc, &
+ i__1, k, &q__1, &work[work_offset], ldwork, &v[*k
+ + 1 + v_dim1], ldv, &c_b1, &c__[(*k + 1) * c_dim1
+ + 1], ldc);
+ }
+
+/* W := W * V1' */
+
+ ctrmm_("Right", "Lower", "Conjugate transpose", "Unit", &
+ lastc, k, &c_b1, &v[v_offset], ldv, &work[work_offset]
+, ldwork)
+ ;
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * work_dim1;
+ q__1.r = c__[i__4].r - work[i__5].r, q__1.i = c__[
+ i__4].i - work[i__5].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+ } else {
+
+/* Let V = ( V1 ) */
+/* ( V2 ) (last K rows) */
+/* where V2 is unit upper triangular. */
+
+ if (lsame_(side, "L")) {
+
+/* Form H * C or H' * C where C = ( C1 ) */
+/* ( C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = ilaclr_(m, k, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = ilaclc_(&lastv, n, &c__[c_offset], ldc);
+
+/* W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK) */
+
+/* W := C2' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ ccopy_(&lastc, &c__[lastv - *k + j + c_dim1], ldc, &work[
+ j * work_dim1 + 1], &c__1);
+ clacgv_(&lastc, &work[j * work_dim1 + 1], &c__1);
+/* L70: */
+ }
+
+/* W := W * V2 */
+
+ ctrmm_("Right", "Upper", "No transpose", "Unit", &lastc, k, &
+ c_b1, &v[lastv - *k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C1'*V1 */
+
+ i__1 = lastv - *k;
+ cgemm_("Conjugate transpose", "No transpose", &lastc, k, &
+ i__1, &c_b1, &c__[c_offset], ldc, &v[v_offset],
+ ldv, &c_b1, &work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ ctrmm_("Right", "Lower", transt, "Non-unit", &lastc, k, &c_b1,
+ &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V * W' */
+
+ if (lastv > *k) {
+
+/* C1 := C1 - V1 * W' */
+
+ i__1 = lastv - *k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "Conjugate transpose", &i__1, &
+ lastc, k, &q__1, &v[v_offset], ldv, &work[
+ work_offset], ldwork, &c_b1, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2' */
+
+ ctrmm_("Right", "Upper", "Conjugate transpose", "Unit", &
+ lastc, k, &c_b1, &v[lastv - *k + 1 + v_dim1], ldv, &
+ work[work_offset], ldwork);
+
+/* C2 := C2 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = lastv - *k + j + i__ * c_dim1;
+ i__4 = lastv - *k + j + i__ * c_dim1;
+ r_cnjg(&q__2, &work[i__ + j * work_dim1]);
+ q__1.r = c__[i__4].r - q__2.r, q__1.i = c__[i__4].i -
+ q__2.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L80: */
+ }
+/* L90: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/* Form C * H or C * H' where C = ( C1 C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = ilaclr_(n, k, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = ilaclr_(m, &lastv, &c__[c_offset], ldc);
+
+/* W := C * V = (C1*V1 + C2*V2) (stored in WORK) */
+
+/* W := C2 */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ ccopy_(&lastc, &c__[(lastv - *k + j) * c_dim1 + 1], &c__1,
+ &work[j * work_dim1 + 1], &c__1);
+/* L100: */
+ }
+
+/* W := W * V2 */
+
+ ctrmm_("Right", "Upper", "No transpose", "Unit", &lastc, k, &
+ c_b1, &v[lastv - *k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C1 * V1 */
+
+ i__1 = lastv - *k;
+ cgemm_("No transpose", "No transpose", &lastc, k, &i__1, &
+ c_b1, &c__[c_offset], ldc, &v[v_offset], ldv, &
+ c_b1, &work[work_offset], ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ ctrmm_("Right", "Lower", trans, "Non-unit", &lastc, k, &c_b1,
+ &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V' */
+
+ if (lastv > *k) {
+
+/* C1 := C1 - W * V1' */
+
+ i__1 = lastv - *k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "Conjugate transpose", &lastc, &
+ i__1, k, &q__1, &work[work_offset], ldwork, &v[
+ v_offset], ldv, &c_b1, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2' */
+
+ ctrmm_("Right", "Upper", "Conjugate transpose", "Unit", &
+ lastc, k, &c_b1, &v[lastv - *k + 1 + v_dim1], ldv, &
+ work[work_offset], ldwork);
+
+/* C2 := C2 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + (lastv - *k + j) * c_dim1;
+ i__4 = i__ + (lastv - *k + j) * c_dim1;
+ i__5 = i__ + j * work_dim1;
+ q__1.r = c__[i__4].r - work[i__5].r, q__1.i = c__[
+ i__4].i - work[i__5].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L110: */
+ }
+/* L120: */
+ }
+ }
+ }
+
+ } else if (lsame_(storev, "R")) {
+
+ if (lsame_(direct, "F")) {
+
+/* Let V = ( V1 V2 ) (V1: first K columns) */
+/* where V1 is unit upper triangular. */
+
+ if (lsame_(side, "L")) {
+
+/* Form H * C or H' * C where C = ( C1 ) */
+/* ( C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = ilaclc_(k, m, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = ilaclc_(&lastv, n, &c__[c_offset], ldc);
+
+/* W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK) */
+
+/* W := C1' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ ccopy_(&lastc, &c__[j + c_dim1], ldc, &work[j * work_dim1
+ + 1], &c__1);
+ clacgv_(&lastc, &work[j * work_dim1 + 1], &c__1);
+/* L130: */
+ }
+
+/* W := W * V1' */
+
+ ctrmm_("Right", "Upper", "Conjugate transpose", "Unit", &
+ lastc, k, &c_b1, &v[v_offset], ldv, &work[work_offset]
+, ldwork)
+ ;
+ if (lastv > *k) {
+
+/* W := W + C2'*V2' */
+
+ i__1 = lastv - *k;
+ cgemm_("Conjugate transpose", "Conjugate transpose", &
+ lastc, k, &i__1, &c_b1, &c__[*k + 1 + c_dim1],
+ ldc, &v[(*k + 1) * v_dim1 + 1], ldv, &c_b1, &work[
+ work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ ctrmm_("Right", "Upper", transt, "Non-unit", &lastc, k, &c_b1,
+ &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V' * W' */
+
+ if (lastv > *k) {
+
+/* C2 := C2 - V2' * W' */
+
+ i__1 = lastv - *k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("Conjugate transpose", "Conjugate transpose", &
+ i__1, &lastc, k, &q__1, &v[(*k + 1) * v_dim1 + 1],
+ ldv, &work[work_offset], ldwork, &c_b1, &c__[*k
+ + 1 + c_dim1], ldc);
+ }
+
+/* W := W * V1 */
+
+ ctrmm_("Right", "Upper", "No transpose", "Unit", &lastc, k, &
+ c_b1, &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * c_dim1;
+ i__4 = j + i__ * c_dim1;
+ r_cnjg(&q__2, &work[i__ + j * work_dim1]);
+ q__1.r = c__[i__4].r - q__2.r, q__1.i = c__[i__4].i -
+ q__2.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L140: */
+ }
+/* L150: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/* Form C * H or C * H' where C = ( C1 C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = ilaclc_(k, n, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = ilaclr_(m, &lastv, &c__[c_offset], ldc);
+
+/* W := C * V' = (C1*V1' + C2*V2') (stored in WORK) */
+
+/* W := C1 */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ ccopy_(&lastc, &c__[j * c_dim1 + 1], &c__1, &work[j *
+ work_dim1 + 1], &c__1);
+/* L160: */
+ }
+
+/* W := W * V1' */
+
+ ctrmm_("Right", "Upper", "Conjugate transpose", "Unit", &
+ lastc, k, &c_b1, &v[v_offset], ldv, &work[work_offset]
+, ldwork)
+ ;
+ if (lastv > *k) {
+
+/* W := W + C2 * V2' */
+
+ i__1 = lastv - *k;
+ cgemm_("No transpose", "Conjugate transpose", &lastc, k, &
+ i__1, &c_b1, &c__[(*k + 1) * c_dim1 + 1], ldc, &v[
+ (*k + 1) * v_dim1 + 1], ldv, &c_b1, &work[
+ work_offset], ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ ctrmm_("Right", "Upper", trans, "Non-unit", &lastc, k, &c_b1,
+ &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V */
+
+ if (lastv > *k) {
+
+/* C2 := C2 - W * V2 */
+
+ i__1 = lastv - *k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "No transpose", &lastc, &i__1, k, &
+ q__1, &work[work_offset], ldwork, &v[(*k + 1) *
+ v_dim1 + 1], ldv, &c_b1, &c__[(*k + 1) * c_dim1 +
+ 1], ldc);
+ }
+
+/* W := W * V1 */
+
+ ctrmm_("Right", "Upper", "No transpose", "Unit", &lastc, k, &
+ c_b1, &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * work_dim1;
+ q__1.r = c__[i__4].r - work[i__5].r, q__1.i = c__[
+ i__4].i - work[i__5].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L170: */
+ }
+/* L180: */
+ }
+
+ }
+
+ } else {
+
+/* Let V = ( V1 V2 ) (V2: last K columns) */
+/* where V2 is unit lower triangular. */
+
+ if (lsame_(side, "L")) {
+
+/* Form H * C or H' * C where C = ( C1 ) */
+/* ( C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = ilaclc_(k, m, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = ilaclc_(&lastv, n, &c__[c_offset], ldc);
+
+/* W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK) */
+
+/* W := C2' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ ccopy_(&lastc, &c__[lastv - *k + j + c_dim1], ldc, &work[
+ j * work_dim1 + 1], &c__1);
+ clacgv_(&lastc, &work[j * work_dim1 + 1], &c__1);
+/* L190: */
+ }
+
+/* W := W * V2' */
+
+ ctrmm_("Right", "Lower", "Conjugate transpose", "Unit", &
+ lastc, k, &c_b1, &v[(lastv - *k + 1) * v_dim1 + 1],
+ ldv, &work[work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C1'*V1' */
+
+ i__1 = lastv - *k;
+ cgemm_("Conjugate transpose", "Conjugate transpose", &
+ lastc, k, &i__1, &c_b1, &c__[c_offset], ldc, &v[
+ v_offset], ldv, &c_b1, &work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ ctrmm_("Right", "Lower", transt, "Non-unit", &lastc, k, &c_b1,
+ &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V' * W' */
+
+ if (lastv > *k) {
+
+/* C1 := C1 - V1' * W' */
+
+ i__1 = lastv - *k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("Conjugate transpose", "Conjugate transpose", &
+ i__1, &lastc, k, &q__1, &v[v_offset], ldv, &work[
+ work_offset], ldwork, &c_b1, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2 */
+
+ ctrmm_("Right", "Lower", "No transpose", "Unit", &lastc, k, &
+ c_b1, &v[(lastv - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+
+/* C2 := C2 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = lastv - *k + j + i__ * c_dim1;
+ i__4 = lastv - *k + j + i__ * c_dim1;
+ r_cnjg(&q__2, &work[i__ + j * work_dim1]);
+ q__1.r = c__[i__4].r - q__2.r, q__1.i = c__[i__4].i -
+ q__2.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L200: */
+ }
+/* L210: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/* Form C * H or C * H' where C = ( C1 C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = ilaclc_(k, n, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = ilaclr_(m, &lastv, &c__[c_offset], ldc);
+
+/* W := C * V' = (C1*V1' + C2*V2') (stored in WORK) */
+
+/* W := C2 */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ ccopy_(&lastc, &c__[(lastv - *k + j) * c_dim1 + 1], &c__1,
+ &work[j * work_dim1 + 1], &c__1);
+/* L220: */
+ }
+
+/* W := W * V2' */
+
+ ctrmm_("Right", "Lower", "Conjugate transpose", "Unit", &
+ lastc, k, &c_b1, &v[(lastv - *k + 1) * v_dim1 + 1],
+ ldv, &work[work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C1 * V1' */
+
+ i__1 = lastv - *k;
+ cgemm_("No transpose", "Conjugate transpose", &lastc, k, &
+ i__1, &c_b1, &c__[c_offset], ldc, &v[v_offset],
+ ldv, &c_b1, &work[work_offset], ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ ctrmm_("Right", "Lower", trans, "Non-unit", &lastc, k, &c_b1,
+ &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V */
+
+ if (lastv > *k) {
+
+/* C1 := C1 - W * V1 */
+
+ i__1 = lastv - *k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "No transpose", &lastc, &i__1, k, &
+ q__1, &work[work_offset], ldwork, &v[v_offset],
+ ldv, &c_b1, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2 */
+
+ ctrmm_("Right", "Lower", "No transpose", "Unit", &lastc, k, &
+ c_b1, &v[(lastv - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + (lastv - *k + j) * c_dim1;
+ i__4 = i__ + (lastv - *k + j) * c_dim1;
+ i__5 = i__ + j * work_dim1;
+ q__1.r = c__[i__4].r - work[i__5].r, q__1.i = c__[
+ i__4].i - work[i__5].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L230: */
+ }
+/* L240: */
+ }
+
+ }
+
+ }
+ }
+
+ return 0;
+
+/* End of CLARFB */
+
+} /* clarfb_ */
diff --git a/contrib/libs/clapack/clarfg.c b/contrib/libs/clapack/clarfg.c
new file mode 100644
index 0000000000..2a2c3eb6e5
--- /dev/null
+++ b/contrib/libs/clapack/clarfg.c
@@ -0,0 +1,190 @@
+/* clarfg.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b5 = {1.f,0.f};
+
+/* Subroutine */ int clarfg_(integer *n, complex *alpha, complex *x, integer *
+ incx, complex *tau)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ double r_imag(complex *), r_sign(real *, real *);
+
+ /* Local variables */
+ integer j, knt;
+ real beta;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *);
+ real alphi, alphr, xnorm;
+ extern doublereal scnrm2_(integer *, complex *, integer *), slapy3_(real *
+, real *, real *);
+ extern /* Complex */ VOID cladiv_(complex *, complex *, complex *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *);
+ real safmin, rsafmn;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLARFG generates a complex elementary reflector H of order n, such */
+/* that */
+
+/* H' * ( alpha ) = ( beta ), H' * H = I. */
+/* ( x ) ( 0 ) */
+
+/* where alpha and beta are scalars, with beta real, and x is an */
+/* (n-1)-element complex vector. H is represented in the form */
+
+/* H = I - tau * ( 1 ) * ( 1 v' ) , */
+/* ( v ) */
+
+/* where tau is a complex scalar and v is a complex (n-1)-element */
+/* vector. Note that H is not hermitian. */
+
+/* If the elements of x are all zero and alpha is real, then tau = 0 */
+/* and H is taken to be the unit matrix. */
+
+/* Otherwise 1 <= real(tau) <= 2 and abs(tau-1) <= 1 . */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the elementary reflector. */
+
+/* ALPHA (input/output) COMPLEX */
+/* On entry, the value alpha. */
+/* On exit, it is overwritten with the value beta. */
+
+/* X (input/output) COMPLEX array, dimension */
+/* (1+(N-2)*abs(INCX)) */
+/* On entry, the vector x. */
+/* On exit, it is overwritten with the vector v. */
+
+/* INCX (input) INTEGER */
+/* The increment between elements of X. INCX > 0. */
+
+/* TAU (output) COMPLEX */
+/* The value tau. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n <= 0) {
+ tau->r = 0.f, tau->i = 0.f;
+ return 0;
+ }
+
+ i__1 = *n - 1;
+ xnorm = scnrm2_(&i__1, &x[1], incx);
+ alphr = alpha->r;
+ alphi = r_imag(alpha);
+
+ if (xnorm == 0.f && alphi == 0.f) {
+
+/* H = I */
+
+ tau->r = 0.f, tau->i = 0.f;
+ } else {
+
+/* general case */
+
+ r__1 = slapy3_(&alphr, &alphi, &xnorm);
+ beta = -r_sign(&r__1, &alphr);
+ safmin = slamch_("S") / slamch_("E");
+ rsafmn = 1.f / safmin;
+
+ knt = 0;
+ if (dabs(beta) < safmin) {
+
+/* XNORM, BETA may be inaccurate; scale X and recompute them */
+
+L10:
+ ++knt;
+ i__1 = *n - 1;
+ csscal_(&i__1, &rsafmn, &x[1], incx);
+ beta *= rsafmn;
+ alphi *= rsafmn;
+ alphr *= rsafmn;
+ if (dabs(beta) < safmin) {
+ goto L10;
+ }
+
+/* New BETA is at most 1, at least SAFMIN */
+
+ i__1 = *n - 1;
+ xnorm = scnrm2_(&i__1, &x[1], incx);
+ q__1.r = alphr, q__1.i = alphi;
+ alpha->r = q__1.r, alpha->i = q__1.i;
+ r__1 = slapy3_(&alphr, &alphi, &xnorm);
+ beta = -r_sign(&r__1, &alphr);
+ }
+ r__1 = (beta - alphr) / beta;
+ r__2 = -alphi / beta;
+ q__1.r = r__1, q__1.i = r__2;
+ tau->r = q__1.r, tau->i = q__1.i;
+ q__2.r = alpha->r - beta, q__2.i = alpha->i;
+ cladiv_(&q__1, &c_b5, &q__2);
+ alpha->r = q__1.r, alpha->i = q__1.i;
+ i__1 = *n - 1;
+ cscal_(&i__1, alpha, &x[1], incx);
+
+/* If ALPHA is subnormal, it may lose relative accuracy */
+
+ i__1 = knt;
+ for (j = 1; j <= i__1; ++j) {
+ beta *= safmin;
+/* L20: */
+ }
+ alpha->r = beta, alpha->i = 0.f;
+ }
+
+ return 0;
+
+/* End of CLARFG */
+
+} /* clarfg_ */
diff --git a/contrib/libs/clapack/clarfp.c b/contrib/libs/clapack/clarfp.c
new file mode 100644
index 0000000000..5e08c2aceb
--- /dev/null
+++ b/contrib/libs/clapack/clarfp.c
@@ -0,0 +1,234 @@
+/* clarfp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b5 = {1.f,0.f};
+
+/* Subroutine */ int clarfp_(integer *n, complex *alpha, complex *x, integer *
+ incx, complex *tau)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ real r__1, r__2;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ double r_imag(complex *), r_sign(real *, real *);
+
+ /* Local variables */
+ integer j, knt;
+ real beta;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *);
+ real alphi, alphr, xnorm;
+ extern doublereal scnrm2_(integer *, complex *, integer *), slapy2_(real *
+, real *), slapy3_(real *, real *, real *);
+ extern /* Complex */ VOID cladiv_(complex *, complex *, complex *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *);
+ real safmin, rsafmn;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLARFP generates a complex elementary reflector H of order n, such */
+/* that */
+
+/* H' * ( alpha ) = ( beta ), H' * H = I. */
+/* ( x ) ( 0 ) */
+
+/* where alpha and beta are scalars, beta is real and non-negative, and */
+/* x is an (n-1)-element complex vector. H is represented in the form */
+
+/* H = I - tau * ( 1 ) * ( 1 v' ) , */
+/* ( v ) */
+
+/* where tau is a complex scalar and v is a complex (n-1)-element */
+/* vector. Note that H is not hermitian. */
+
+/* If the elements of x are all zero and alpha is real, then tau = 0 */
+/* and H is taken to be the unit matrix. */
+
+/* Otherwise 1 <= real(tau) <= 2 and abs(tau-1) <= 1 . */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the elementary reflector. */
+
+/* ALPHA (input/output) COMPLEX */
+/* On entry, the value alpha. */
+/* On exit, it is overwritten with the value beta. */
+
+/* X (input/output) COMPLEX array, dimension */
+/* (1+(N-2)*abs(INCX)) */
+/* On entry, the vector x. */
+/* On exit, it is overwritten with the vector v. */
+
+/* INCX (input) INTEGER */
+/* The increment between elements of X. INCX > 0. */
+
+/* TAU (output) COMPLEX */
+/* The value tau. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n <= 0) {
+ tau->r = 0.f, tau->i = 0.f;
+ return 0;
+ }
+
+ i__1 = *n - 1;
+ xnorm = scnrm2_(&i__1, &x[1], incx);
+ alphr = alpha->r;
+ alphi = r_imag(alpha);
+
+ if (xnorm == 0.f && alphi == 0.f) {
+
+/* H = [1-alpha/abs(alpha) 0; 0 I], sign chosen so ALPHA >= 0. */
+
+ if (alphi == 0.f) {
+ if (alphr >= 0.f) {
+/* When TAU.eq.ZERO, the vector is special-cased to be */
+/* all zeros in the application routines. We do not need */
+/* to clear it. */
+ tau->r = 0.f, tau->i = 0.f;
+ } else {
+/* However, the application routines rely on explicit */
+/* zero checks when TAU.ne.ZERO, and we must clear X. */
+ tau->r = 2.f, tau->i = 0.f;
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = (j - 1) * *incx + 1;
+ x[i__2].r = 0.f, x[i__2].i = 0.f;
+ }
+ q__1.r = -alpha->r, q__1.i = -alpha->i;
+ alpha->r = q__1.r, alpha->i = q__1.i;
+ }
+ } else {
+/* Only "reflecting" the diagonal entry to be real and non-negative. */
+ xnorm = slapy2_(&alphr, &alphi);
+ r__1 = 1.f - alphr / xnorm;
+ r__2 = -alphi / xnorm;
+ q__1.r = r__1, q__1.i = r__2;
+ tau->r = q__1.r, tau->i = q__1.i;
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = (j - 1) * *incx + 1;
+ x[i__2].r = 0.f, x[i__2].i = 0.f;
+ }
+ alpha->r = xnorm, alpha->i = 0.f;
+ }
+ } else {
+
+/* general case */
+
+ r__1 = slapy3_(&alphr, &alphi, &xnorm);
+ beta = r_sign(&r__1, &alphr);
+ safmin = slamch_("S") / slamch_("E");
+ rsafmn = 1.f / safmin;
+
+ knt = 0;
+ if (dabs(beta) < safmin) {
+
+/* XNORM, BETA may be inaccurate; scale X and recompute them */
+
+L10:
+ ++knt;
+ i__1 = *n - 1;
+ csscal_(&i__1, &rsafmn, &x[1], incx);
+ beta *= rsafmn;
+ alphi *= rsafmn;
+ alphr *= rsafmn;
+ if (dabs(beta) < safmin) {
+ goto L10;
+ }
+
+/* New BETA is at most 1, at least SAFMIN */
+
+ i__1 = *n - 1;
+ xnorm = scnrm2_(&i__1, &x[1], incx);
+ q__1.r = alphr, q__1.i = alphi;
+ alpha->r = q__1.r, alpha->i = q__1.i;
+ r__1 = slapy3_(&alphr, &alphi, &xnorm);
+ beta = r_sign(&r__1, &alphr);
+ }
+ q__1.r = alpha->r + beta, q__1.i = alpha->i;
+ alpha->r = q__1.r, alpha->i = q__1.i;
+ if (beta < 0.f) {
+ beta = -beta;
+ q__2.r = -alpha->r, q__2.i = -alpha->i;
+ q__1.r = q__2.r / beta, q__1.i = q__2.i / beta;
+ tau->r = q__1.r, tau->i = q__1.i;
+ } else {
+ alphr = alphi * (alphi / alpha->r);
+ alphr += xnorm * (xnorm / alpha->r);
+ r__1 = alphr / beta;
+ r__2 = -alphi / beta;
+ q__1.r = r__1, q__1.i = r__2;
+ tau->r = q__1.r, tau->i = q__1.i;
+ r__1 = -alphr;
+ q__1.r = r__1, q__1.i = alphi;
+ alpha->r = q__1.r, alpha->i = q__1.i;
+ }
+ cladiv_(&q__1, &c_b5, alpha);
+ alpha->r = q__1.r, alpha->i = q__1.i;
+ i__1 = *n - 1;
+ cscal_(&i__1, alpha, &x[1], incx);
+
+/* If BETA is subnormal, it may lose relative accuracy */
+
+ i__1 = knt;
+ for (j = 1; j <= i__1; ++j) {
+ beta *= safmin;
+/* L20: */
+ }
+ alpha->r = beta, alpha->i = 0.f;
+ }
+
+ return 0;
+
+/* End of CLARFP */
+
+} /* clarfp_ */
diff --git a/contrib/libs/clapack/clarft.c b/contrib/libs/clapack/clarft.c
new file mode 100644
index 0000000000..ca573e4ade
--- /dev/null
+++ b/contrib/libs/clapack/clarft.c
@@ -0,0 +1,361 @@
+/* clarft.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b2 = {0.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int clarft_(char *direct, char *storev, integer *n, integer *
+ k, complex *v, integer *ldv, complex *tau, complex *t, integer *ldt)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, v_dim1, v_offset, i__1, i__2, i__3, i__4;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j, prevlastv;
+ complex vii;
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+, complex *, integer *, complex *, integer *, complex *, complex *
+, integer *);
+ extern logical lsame_(char *, char *);
+ integer lastv;
+ extern /* Subroutine */ int ctrmv_(char *, char *, char *, integer *,
+ complex *, integer *, complex *, integer *), clacgv_(integer *, complex *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLARFT forms the triangular factor T of a complex block reflector H */
+/* of order n, which is defined as a product of k elementary reflectors. */
+
+/* If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular; */
+
+/* If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular. */
+
+/* If STOREV = 'C', the vector which defines the elementary reflector */
+/* H(i) is stored in the i-th column of the array V, and */
+
+/* H = I - V * T * V' */
+
+/* If STOREV = 'R', the vector which defines the elementary reflector */
+/* H(i) is stored in the i-th row of the array V, and */
+
+/* H = I - V' * T * V */
+
+/* Arguments */
+/* ========= */
+
+/* DIRECT (input) CHARACTER*1 */
+/* Specifies the order in which the elementary reflectors are */
+/* multiplied to form the block reflector: */
+/* = 'F': H = H(1) H(2) . . . H(k) (Forward) */
+/* = 'B': H = H(k) . . . H(2) H(1) (Backward) */
+
+/* STOREV (input) CHARACTER*1 */
+/* Specifies how the vectors which define the elementary */
+/* reflectors are stored (see also Further Details): */
+/* = 'C': columnwise */
+/* = 'R': rowwise */
+
+/* N (input) INTEGER */
+/* The order of the block reflector H. N >= 0. */
+
+/* K (input) INTEGER */
+/* The order of the triangular factor T (= the number of */
+/* elementary reflectors). K >= 1. */
+
+/* V (input/output) COMPLEX array, dimension */
+/* (LDV,K) if STOREV = 'C' */
+/* (LDV,N) if STOREV = 'R' */
+/* The matrix V. See further details. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. */
+/* If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K. */
+
+/* TAU (input) COMPLEX array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i). */
+
+/* T (output) COMPLEX array, dimension (LDT,K) */
+/* The k by k triangular factor T of the block reflector. */
+/* If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is */
+/* lower triangular. The rest of the array is not used. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= K. */
+
+/* Further Details */
+/* =============== */
+
+/* The shape of the matrix V and the storage of the vectors which define */
+/* the H(i) is best illustrated by the following example with n = 5 and */
+/* k = 3. The elements equal to 1 are not stored; the corresponding */
+/* array elements are modified but restored on exit. The rest of the */
+/* array is not used. */
+
+/* DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R': */
+
+/* V = ( 1 ) V = ( 1 v1 v1 v1 v1 ) */
+/* ( v1 1 ) ( 1 v2 v2 v2 ) */
+/* ( v1 v2 1 ) ( 1 v3 v3 ) */
+/* ( v1 v2 v3 ) */
+/* ( v1 v2 v3 ) */
+
+/* DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R': */
+
+/* V = ( v1 v2 v3 ) V = ( v1 v1 1 ) */
+/* ( v1 v2 v3 ) ( v2 v2 v2 1 ) */
+/* ( 1 v2 v3 ) ( v3 v3 v3 v3 1 ) */
+/* ( 1 v3 ) */
+/* ( 1 ) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ --tau;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+
+ /* Function Body */
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (lsame_(direct, "F")) {
+ prevlastv = *n;
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ prevlastv = max(prevlastv,i__);
+ i__2 = i__;
+ if (tau[i__2].r == 0.f && tau[i__2].i == 0.f) {
+
+/* H(i) = I */
+
+ i__2 = i__;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = j + i__ * t_dim1;
+ t[i__3].r = 0.f, t[i__3].i = 0.f;
+/* L10: */
+ }
+ } else {
+
+/* general case */
+
+ i__2 = i__ + i__ * v_dim1;
+ vii.r = v[i__2].r, vii.i = v[i__2].i;
+ i__2 = i__ + i__ * v_dim1;
+ v[i__2].r = 1.f, v[i__2].i = 0.f;
+ if (lsame_(storev, "C")) {
+/* Skip any trailing zeros. */
+ i__2 = i__ + 1;
+ for (lastv = *n; lastv >= i__2; --lastv) {
+ i__3 = lastv + i__ * v_dim1;
+ if (v[i__3].r != 0.f || v[i__3].i != 0.f) {
+ break;
+ }
+ }
+ j = min(lastv,prevlastv);
+
+/* T(1:i-1,i) := - tau(i) * V(i:j,1:i-1)' * V(i:j,i) */
+
+ i__2 = j - i__ + 1;
+ i__3 = i__ - 1;
+ i__4 = i__;
+ q__1.r = -tau[i__4].r, q__1.i = -tau[i__4].i;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &q__1, &v[i__
+ + v_dim1], ldv, &v[i__ + i__ * v_dim1], &c__1, &
+ c_b2, &t[i__ * t_dim1 + 1], &c__1);
+ } else {
+/* Skip any trailing zeros. */
+ i__2 = i__ + 1;
+ for (lastv = *n; lastv >= i__2; --lastv) {
+ i__3 = i__ + lastv * v_dim1;
+ if (v[i__3].r != 0.f || v[i__3].i != 0.f) {
+ break;
+ }
+ }
+ j = min(lastv,prevlastv);
+
+/* T(1:i-1,i) := - tau(i) * V(1:i-1,i:j) * V(i,i:j)' */
+
+ if (i__ < j) {
+ i__2 = j - i__;
+ clacgv_(&i__2, &v[i__ + (i__ + 1) * v_dim1], ldv);
+ }
+ i__2 = i__ - 1;
+ i__3 = j - i__ + 1;
+ i__4 = i__;
+ q__1.r = -tau[i__4].r, q__1.i = -tau[i__4].i;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &v[i__ *
+ v_dim1 + 1], ldv, &v[i__ + i__ * v_dim1], ldv, &
+ c_b2, &t[i__ * t_dim1 + 1], &c__1);
+ if (i__ < j) {
+ i__2 = j - i__;
+ clacgv_(&i__2, &v[i__ + (i__ + 1) * v_dim1], ldv);
+ }
+ }
+ i__2 = i__ + i__ * v_dim1;
+ v[i__2].r = vii.r, v[i__2].i = vii.i;
+
+/* T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) */
+
+ i__2 = i__ - 1;
+ ctrmv_("Upper", "No transpose", "Non-unit", &i__2, &t[
+ t_offset], ldt, &t[i__ * t_dim1 + 1], &c__1);
+ i__2 = i__ + i__ * t_dim1;
+ i__3 = i__;
+ t[i__2].r = tau[i__3].r, t[i__2].i = tau[i__3].i;
+ if (i__ > 1) {
+ prevlastv = max(prevlastv,lastv);
+ } else {
+ prevlastv = lastv;
+ }
+ }
+/* L20: */
+ }
+ } else {
+ prevlastv = 1;
+ for (i__ = *k; i__ >= 1; --i__) {
+ i__1 = i__;
+ if (tau[i__1].r == 0.f && tau[i__1].i == 0.f) {
+
+/* H(i) = I */
+
+ i__1 = *k;
+ for (j = i__; j <= i__1; ++j) {
+ i__2 = j + i__ * t_dim1;
+ t[i__2].r = 0.f, t[i__2].i = 0.f;
+/* L30: */
+ }
+ } else {
+
+/* general case */
+
+ if (i__ < *k) {
+ if (lsame_(storev, "C")) {
+ i__1 = *n - *k + i__ + i__ * v_dim1;
+ vii.r = v[i__1].r, vii.i = v[i__1].i;
+ i__1 = *n - *k + i__ + i__ * v_dim1;
+ v[i__1].r = 1.f, v[i__1].i = 0.f;
+/* Skip any leading zeros. */
+ i__1 = i__ - 1;
+ for (lastv = 1; lastv <= i__1; ++lastv) {
+ i__2 = lastv + i__ * v_dim1;
+ if (v[i__2].r != 0.f || v[i__2].i != 0.f) {
+ break;
+ }
+ }
+ j = max(lastv,prevlastv);
+
+/* T(i+1:k,i) := */
+/* - tau(i) * V(j:n-k+i,i+1:k)' * V(j:n-k+i,i) */
+
+ i__1 = *n - *k + i__ - j + 1;
+ i__2 = *k - i__;
+ i__3 = i__;
+ q__1.r = -tau[i__3].r, q__1.i = -tau[i__3].i;
+ cgemv_("Conjugate transpose", &i__1, &i__2, &q__1, &v[
+ j + (i__ + 1) * v_dim1], ldv, &v[j + i__ *
+ v_dim1], &c__1, &c_b2, &t[i__ + 1 + i__ *
+ t_dim1], &c__1);
+ i__1 = *n - *k + i__ + i__ * v_dim1;
+ v[i__1].r = vii.r, v[i__1].i = vii.i;
+ } else {
+ i__1 = i__ + (*n - *k + i__) * v_dim1;
+ vii.r = v[i__1].r, vii.i = v[i__1].i;
+ i__1 = i__ + (*n - *k + i__) * v_dim1;
+ v[i__1].r = 1.f, v[i__1].i = 0.f;
+/* Skip any leading zeros. */
+ i__1 = i__ - 1;
+ for (lastv = 1; lastv <= i__1; ++lastv) {
+ i__2 = i__ + lastv * v_dim1;
+ if (v[i__2].r != 0.f || v[i__2].i != 0.f) {
+ break;
+ }
+ }
+ j = max(lastv,prevlastv);
+
+/* T(i+1:k,i) := */
+/* - tau(i) * V(i+1:k,j:n-k+i) * V(i,j:n-k+i)' */
+
+ i__1 = *n - *k + i__ - 1 - j + 1;
+ clacgv_(&i__1, &v[i__ + j * v_dim1], ldv);
+ i__1 = *k - i__;
+ i__2 = *n - *k + i__ - j + 1;
+ i__3 = i__;
+ q__1.r = -tau[i__3].r, q__1.i = -tau[i__3].i;
+ cgemv_("No transpose", &i__1, &i__2, &q__1, &v[i__ +
+ 1 + j * v_dim1], ldv, &v[i__ + j * v_dim1],
+ ldv, &c_b2, &t[i__ + 1 + i__ * t_dim1], &c__1);
+ i__1 = *n - *k + i__ - 1 - j + 1;
+ clacgv_(&i__1, &v[i__ + j * v_dim1], ldv);
+ i__1 = i__ + (*n - *k + i__) * v_dim1;
+ v[i__1].r = vii.r, v[i__1].i = vii.i;
+ }
+
+/* T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) */
+
+ i__1 = *k - i__;
+ ctrmv_("Lower", "No transpose", "Non-unit", &i__1, &t[i__
+ + 1 + (i__ + 1) * t_dim1], ldt, &t[i__ + 1 + i__ *
+ t_dim1], &c__1)
+ ;
+ if (i__ > 1) {
+ prevlastv = min(prevlastv,lastv);
+ } else {
+ prevlastv = lastv;
+ }
+ }
+ i__1 = i__ + i__ * t_dim1;
+ i__2 = i__;
+ t[i__1].r = tau[i__2].r, t[i__1].i = tau[i__2].i;
+ }
+/* L40: */
+ }
+ }
+ return 0;
+
+/* End of CLARFT */
+
+} /* clarft_ */
diff --git a/contrib/libs/clapack/clarfx.c b/contrib/libs/clapack/clarfx.c
new file mode 100644
index 0000000000..538bd47f0b
--- /dev/null
+++ b/contrib/libs/clapack/clarfx.c
@@ -0,0 +1,2048 @@
+/* clarfx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int clarfx_(char *side, integer *m, integer *n, complex *v,
+ complex *tau, complex *c__, integer *ldc, complex *work)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7, i__8,
+ i__9, i__10, i__11;
+ complex q__1, q__2, q__3, q__4, q__5, q__6, q__7, q__8, q__9, q__10,
+ q__11, q__12, q__13, q__14, q__15, q__16, q__17, q__18, q__19;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer j;
+ complex t1, t2, t3, t4, t5, t6, t7, t8, t9, v1, v2, v3, v4, v5, v6, v7,
+ v8, v9, t10, v10, sum;
+ extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
+, integer *, complex *, complex *, integer *, complex *);
+ extern logical lsame_(char *, char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLARFX applies a complex elementary reflector H to a complex m by n */
+/* matrix C, from either the left or the right. H is represented in the */
+/* form */
+
+/* H = I - tau * v * v' */
+
+/* where tau is a complex scalar and v is a complex vector. */
+
+/* If tau = 0, then H is taken to be the unit matrix */
+
+/* This version uses inline code if H has order < 11. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': form H * C */
+/* = 'R': form C * H */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. */
+
+/* V (input) COMPLEX array, dimension (M) if SIDE = 'L' */
+/* or (N) if SIDE = 'R' */
+/* The vector v in the representation of H. */
+
+/* TAU (input) COMPLEX */
+/* The value tau in the representation of H. */
+
+/* C (input/output) COMPLEX array, dimension (LDC,N) */
+/* On entry, the m by n matrix C. */
+/* On exit, C is overwritten by the matrix H * C if SIDE = 'L', */
+/* or C * H if SIDE = 'R'. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDA >= max(1,M). */
+
+/* WORK (workspace) COMPLEX array, dimension (N) if SIDE = 'L' */
+/* or (M) if SIDE = 'R' */
+/* WORK is not referenced if H has order < 11. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --v;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ if (tau->r == 0.f && tau->i == 0.f) {
+ return 0;
+ }
+ if (lsame_(side, "L")) {
+
+/* Form H * C, where H has order m. */
+
+ switch (*m) {
+ case 1: goto L10;
+ case 2: goto L30;
+ case 3: goto L50;
+ case 4: goto L70;
+ case 5: goto L90;
+ case 6: goto L110;
+ case 7: goto L130;
+ case 8: goto L150;
+ case 9: goto L170;
+ case 10: goto L190;
+ }
+
+/* Code for general M */
+
+ clarf_(side, m, n, &v[1], &c__1, tau, &c__[c_offset], ldc, &work[1]);
+ goto L410;
+L10:
+
+/* Special code for 1 x 1 Householder */
+
+ q__3.r = tau->r * v[1].r - tau->i * v[1].i, q__3.i = tau->r * v[1].i
+ + tau->i * v[1].r;
+ r_cnjg(&q__4, &v[1]);
+ q__2.r = q__3.r * q__4.r - q__3.i * q__4.i, q__2.i = q__3.r * q__4.i
+ + q__3.i * q__4.r;
+ q__1.r = 1.f - q__2.r, q__1.i = 0.f - q__2.i;
+ t1.r = q__1.r, t1.i = q__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ q__1.r = t1.r * c__[i__3].r - t1.i * c__[i__3].i, q__1.i = t1.r *
+ c__[i__3].i + t1.i * c__[i__3].r;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L20: */
+ }
+ goto L410;
+L30:
+
+/* Special code for 2 x 2 Householder */
+
+ r_cnjg(&q__1, &v[1]);
+ v1.r = q__1.r, v1.i = q__1.i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ r_cnjg(&q__1, &v[2]);
+ v2.r = q__1.r, v2.i = q__1.i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ q__2.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__2.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ q__3.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__3.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L40: */
+ }
+ goto L410;
+L50:
+
+/* Special code for 3 x 3 Householder */
+
+ r_cnjg(&q__1, &v[1]);
+ v1.r = q__1.r, v1.i = q__1.i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ r_cnjg(&q__1, &v[2]);
+ v2.r = q__1.r, v2.i = q__1.i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ r_cnjg(&q__1, &v[3]);
+ v3.r = q__1.r, v3.i = q__1.i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ q__3.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__3.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ q__4.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__4.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__2.r = q__3.r + q__4.r, q__2.i = q__3.i + q__4.i;
+ i__4 = j * c_dim1 + 3;
+ q__5.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__5.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L60: */
+ }
+ goto L410;
+L70:
+
+/* Special code for 4 x 4 Householder */
+
+ r_cnjg(&q__1, &v[1]);
+ v1.r = q__1.r, v1.i = q__1.i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ r_cnjg(&q__1, &v[2]);
+ v2.r = q__1.r, v2.i = q__1.i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ r_cnjg(&q__1, &v[3]);
+ v3.r = q__1.r, v3.i = q__1.i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ r_cnjg(&q__1, &v[4]);
+ v4.r = q__1.r, v4.i = q__1.i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ q__4.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__4.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ q__5.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__5.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__3.r = q__4.r + q__5.r, q__3.i = q__4.i + q__5.i;
+ i__4 = j * c_dim1 + 3;
+ q__6.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__6.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ q__2.r = q__3.r + q__6.r, q__2.i = q__3.i + q__6.i;
+ i__5 = j * c_dim1 + 4;
+ q__7.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__7.i = v4.r *
+ c__[i__5].i + v4.i * c__[i__5].r;
+ q__1.r = q__2.r + q__7.r, q__1.i = q__2.i + q__7.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L80: */
+ }
+ goto L410;
+L90:
+
+/* Special code for 5 x 5 Householder */
+
+ r_cnjg(&q__1, &v[1]);
+ v1.r = q__1.r, v1.i = q__1.i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ r_cnjg(&q__1, &v[2]);
+ v2.r = q__1.r, v2.i = q__1.i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ r_cnjg(&q__1, &v[3]);
+ v3.r = q__1.r, v3.i = q__1.i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ r_cnjg(&q__1, &v[4]);
+ v4.r = q__1.r, v4.i = q__1.i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ r_cnjg(&q__1, &v[5]);
+ v5.r = q__1.r, v5.i = q__1.i;
+ r_cnjg(&q__2, &v5);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t5.r = q__1.r, t5.i = q__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ q__5.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__5.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ q__6.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__6.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__4.r = q__5.r + q__6.r, q__4.i = q__5.i + q__6.i;
+ i__4 = j * c_dim1 + 3;
+ q__7.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__7.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ q__3.r = q__4.r + q__7.r, q__3.i = q__4.i + q__7.i;
+ i__5 = j * c_dim1 + 4;
+ q__8.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__8.i = v4.r *
+ c__[i__5].i + v4.i * c__[i__5].r;
+ q__2.r = q__3.r + q__8.r, q__2.i = q__3.i + q__8.i;
+ i__6 = j * c_dim1 + 5;
+ q__9.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, q__9.i = v5.r *
+ c__[i__6].i + v5.i * c__[i__6].r;
+ q__1.r = q__2.r + q__9.r, q__1.i = q__2.i + q__9.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 5;
+ i__3 = j * c_dim1 + 5;
+ q__2.r = sum.r * t5.r - sum.i * t5.i, q__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L100: */
+ }
+ goto L410;
+L110:
+
+/* Special code for 6 x 6 Householder */
+
+ r_cnjg(&q__1, &v[1]);
+ v1.r = q__1.r, v1.i = q__1.i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ r_cnjg(&q__1, &v[2]);
+ v2.r = q__1.r, v2.i = q__1.i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ r_cnjg(&q__1, &v[3]);
+ v3.r = q__1.r, v3.i = q__1.i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ r_cnjg(&q__1, &v[4]);
+ v4.r = q__1.r, v4.i = q__1.i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ r_cnjg(&q__1, &v[5]);
+ v5.r = q__1.r, v5.i = q__1.i;
+ r_cnjg(&q__2, &v5);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t5.r = q__1.r, t5.i = q__1.i;
+ r_cnjg(&q__1, &v[6]);
+ v6.r = q__1.r, v6.i = q__1.i;
+ r_cnjg(&q__2, &v6);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t6.r = q__1.r, t6.i = q__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ q__6.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__6.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ q__7.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__7.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__5.r = q__6.r + q__7.r, q__5.i = q__6.i + q__7.i;
+ i__4 = j * c_dim1 + 3;
+ q__8.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__8.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ q__4.r = q__5.r + q__8.r, q__4.i = q__5.i + q__8.i;
+ i__5 = j * c_dim1 + 4;
+ q__9.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__9.i = v4.r *
+ c__[i__5].i + v4.i * c__[i__5].r;
+ q__3.r = q__4.r + q__9.r, q__3.i = q__4.i + q__9.i;
+ i__6 = j * c_dim1 + 5;
+ q__10.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, q__10.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ q__2.r = q__3.r + q__10.r, q__2.i = q__3.i + q__10.i;
+ i__7 = j * c_dim1 + 6;
+ q__11.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, q__11.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ q__1.r = q__2.r + q__11.r, q__1.i = q__2.i + q__11.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 5;
+ i__3 = j * c_dim1 + 5;
+ q__2.r = sum.r * t5.r - sum.i * t5.i, q__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 6;
+ i__3 = j * c_dim1 + 6;
+ q__2.r = sum.r * t6.r - sum.i * t6.i, q__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L120: */
+ }
+ goto L410;
+L130:
+
+/* Special code for 7 x 7 Householder */
+
+ r_cnjg(&q__1, &v[1]);
+ v1.r = q__1.r, v1.i = q__1.i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ r_cnjg(&q__1, &v[2]);
+ v2.r = q__1.r, v2.i = q__1.i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ r_cnjg(&q__1, &v[3]);
+ v3.r = q__1.r, v3.i = q__1.i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ r_cnjg(&q__1, &v[4]);
+ v4.r = q__1.r, v4.i = q__1.i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ r_cnjg(&q__1, &v[5]);
+ v5.r = q__1.r, v5.i = q__1.i;
+ r_cnjg(&q__2, &v5);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t5.r = q__1.r, t5.i = q__1.i;
+ r_cnjg(&q__1, &v[6]);
+ v6.r = q__1.r, v6.i = q__1.i;
+ r_cnjg(&q__2, &v6);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t6.r = q__1.r, t6.i = q__1.i;
+ r_cnjg(&q__1, &v[7]);
+ v7.r = q__1.r, v7.i = q__1.i;
+ r_cnjg(&q__2, &v7);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t7.r = q__1.r, t7.i = q__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ q__7.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__7.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ q__8.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__8.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__6.r = q__7.r + q__8.r, q__6.i = q__7.i + q__8.i;
+ i__4 = j * c_dim1 + 3;
+ q__9.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__9.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ q__5.r = q__6.r + q__9.r, q__5.i = q__6.i + q__9.i;
+ i__5 = j * c_dim1 + 4;
+ q__10.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__10.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ q__4.r = q__5.r + q__10.r, q__4.i = q__5.i + q__10.i;
+ i__6 = j * c_dim1 + 5;
+ q__11.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, q__11.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ q__3.r = q__4.r + q__11.r, q__3.i = q__4.i + q__11.i;
+ i__7 = j * c_dim1 + 6;
+ q__12.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, q__12.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ q__2.r = q__3.r + q__12.r, q__2.i = q__3.i + q__12.i;
+ i__8 = j * c_dim1 + 7;
+ q__13.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, q__13.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ q__1.r = q__2.r + q__13.r, q__1.i = q__2.i + q__13.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 5;
+ i__3 = j * c_dim1 + 5;
+ q__2.r = sum.r * t5.r - sum.i * t5.i, q__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 6;
+ i__3 = j * c_dim1 + 6;
+ q__2.r = sum.r * t6.r - sum.i * t6.i, q__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 7;
+ i__3 = j * c_dim1 + 7;
+ q__2.r = sum.r * t7.r - sum.i * t7.i, q__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L140: */
+ }
+ goto L410;
+L150:
+
+/* Special code for 8 x 8 Householder */
+
+ r_cnjg(&q__1, &v[1]);
+ v1.r = q__1.r, v1.i = q__1.i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ r_cnjg(&q__1, &v[2]);
+ v2.r = q__1.r, v2.i = q__1.i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ r_cnjg(&q__1, &v[3]);
+ v3.r = q__1.r, v3.i = q__1.i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ r_cnjg(&q__1, &v[4]);
+ v4.r = q__1.r, v4.i = q__1.i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ r_cnjg(&q__1, &v[5]);
+ v5.r = q__1.r, v5.i = q__1.i;
+ r_cnjg(&q__2, &v5);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t5.r = q__1.r, t5.i = q__1.i;
+ r_cnjg(&q__1, &v[6]);
+ v6.r = q__1.r, v6.i = q__1.i;
+ r_cnjg(&q__2, &v6);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t6.r = q__1.r, t6.i = q__1.i;
+ r_cnjg(&q__1, &v[7]);
+ v7.r = q__1.r, v7.i = q__1.i;
+ r_cnjg(&q__2, &v7);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t7.r = q__1.r, t7.i = q__1.i;
+ r_cnjg(&q__1, &v[8]);
+ v8.r = q__1.r, v8.i = q__1.i;
+ r_cnjg(&q__2, &v8);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t8.r = q__1.r, t8.i = q__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ q__8.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__8.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ q__9.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__9.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__7.r = q__8.r + q__9.r, q__7.i = q__8.i + q__9.i;
+ i__4 = j * c_dim1 + 3;
+ q__10.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__10.i = v3.r
+ * c__[i__4].i + v3.i * c__[i__4].r;
+ q__6.r = q__7.r + q__10.r, q__6.i = q__7.i + q__10.i;
+ i__5 = j * c_dim1 + 4;
+ q__11.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__11.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ q__5.r = q__6.r + q__11.r, q__5.i = q__6.i + q__11.i;
+ i__6 = j * c_dim1 + 5;
+ q__12.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, q__12.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ q__4.r = q__5.r + q__12.r, q__4.i = q__5.i + q__12.i;
+ i__7 = j * c_dim1 + 6;
+ q__13.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, q__13.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ q__3.r = q__4.r + q__13.r, q__3.i = q__4.i + q__13.i;
+ i__8 = j * c_dim1 + 7;
+ q__14.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, q__14.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ q__2.r = q__3.r + q__14.r, q__2.i = q__3.i + q__14.i;
+ i__9 = j * c_dim1 + 8;
+ q__15.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, q__15.i = v8.r
+ * c__[i__9].i + v8.i * c__[i__9].r;
+ q__1.r = q__2.r + q__15.r, q__1.i = q__2.i + q__15.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 5;
+ i__3 = j * c_dim1 + 5;
+ q__2.r = sum.r * t5.r - sum.i * t5.i, q__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 6;
+ i__3 = j * c_dim1 + 6;
+ q__2.r = sum.r * t6.r - sum.i * t6.i, q__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 7;
+ i__3 = j * c_dim1 + 7;
+ q__2.r = sum.r * t7.r - sum.i * t7.i, q__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 8;
+ i__3 = j * c_dim1 + 8;
+ q__2.r = sum.r * t8.r - sum.i * t8.i, q__2.i = sum.r * t8.i +
+ sum.i * t8.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L160: */
+ }
+ goto L410;
+L170:
+
+/* Special code for 9 x 9 Householder */
+
+ r_cnjg(&q__1, &v[1]);
+ v1.r = q__1.r, v1.i = q__1.i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ r_cnjg(&q__1, &v[2]);
+ v2.r = q__1.r, v2.i = q__1.i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ r_cnjg(&q__1, &v[3]);
+ v3.r = q__1.r, v3.i = q__1.i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ r_cnjg(&q__1, &v[4]);
+ v4.r = q__1.r, v4.i = q__1.i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ r_cnjg(&q__1, &v[5]);
+ v5.r = q__1.r, v5.i = q__1.i;
+ r_cnjg(&q__2, &v5);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t5.r = q__1.r, t5.i = q__1.i;
+ r_cnjg(&q__1, &v[6]);
+ v6.r = q__1.r, v6.i = q__1.i;
+ r_cnjg(&q__2, &v6);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t6.r = q__1.r, t6.i = q__1.i;
+ r_cnjg(&q__1, &v[7]);
+ v7.r = q__1.r, v7.i = q__1.i;
+ r_cnjg(&q__2, &v7);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t7.r = q__1.r, t7.i = q__1.i;
+ r_cnjg(&q__1, &v[8]);
+ v8.r = q__1.r, v8.i = q__1.i;
+ r_cnjg(&q__2, &v8);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t8.r = q__1.r, t8.i = q__1.i;
+ r_cnjg(&q__1, &v[9]);
+ v9.r = q__1.r, v9.i = q__1.i;
+ r_cnjg(&q__2, &v9);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t9.r = q__1.r, t9.i = q__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ q__9.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__9.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ q__10.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__10.i = v2.r
+ * c__[i__3].i + v2.i * c__[i__3].r;
+ q__8.r = q__9.r + q__10.r, q__8.i = q__9.i + q__10.i;
+ i__4 = j * c_dim1 + 3;
+ q__11.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__11.i = v3.r
+ * c__[i__4].i + v3.i * c__[i__4].r;
+ q__7.r = q__8.r + q__11.r, q__7.i = q__8.i + q__11.i;
+ i__5 = j * c_dim1 + 4;
+ q__12.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__12.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ q__6.r = q__7.r + q__12.r, q__6.i = q__7.i + q__12.i;
+ i__6 = j * c_dim1 + 5;
+ q__13.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, q__13.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ q__5.r = q__6.r + q__13.r, q__5.i = q__6.i + q__13.i;
+ i__7 = j * c_dim1 + 6;
+ q__14.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, q__14.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ q__4.r = q__5.r + q__14.r, q__4.i = q__5.i + q__14.i;
+ i__8 = j * c_dim1 + 7;
+ q__15.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, q__15.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ q__3.r = q__4.r + q__15.r, q__3.i = q__4.i + q__15.i;
+ i__9 = j * c_dim1 + 8;
+ q__16.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, q__16.i = v8.r
+ * c__[i__9].i + v8.i * c__[i__9].r;
+ q__2.r = q__3.r + q__16.r, q__2.i = q__3.i + q__16.i;
+ i__10 = j * c_dim1 + 9;
+ q__17.r = v9.r * c__[i__10].r - v9.i * c__[i__10].i, q__17.i =
+ v9.r * c__[i__10].i + v9.i * c__[i__10].r;
+ q__1.r = q__2.r + q__17.r, q__1.i = q__2.i + q__17.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 5;
+ i__3 = j * c_dim1 + 5;
+ q__2.r = sum.r * t5.r - sum.i * t5.i, q__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 6;
+ i__3 = j * c_dim1 + 6;
+ q__2.r = sum.r * t6.r - sum.i * t6.i, q__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 7;
+ i__3 = j * c_dim1 + 7;
+ q__2.r = sum.r * t7.r - sum.i * t7.i, q__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 8;
+ i__3 = j * c_dim1 + 8;
+ q__2.r = sum.r * t8.r - sum.i * t8.i, q__2.i = sum.r * t8.i +
+ sum.i * t8.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 9;
+ i__3 = j * c_dim1 + 9;
+ q__2.r = sum.r * t9.r - sum.i * t9.i, q__2.i = sum.r * t9.i +
+ sum.i * t9.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L180: */
+ }
+ goto L410;
+L190:
+
+/* Special code for 10 x 10 Householder */
+
+ r_cnjg(&q__1, &v[1]);
+ v1.r = q__1.r, v1.i = q__1.i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ r_cnjg(&q__1, &v[2]);
+ v2.r = q__1.r, v2.i = q__1.i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ r_cnjg(&q__1, &v[3]);
+ v3.r = q__1.r, v3.i = q__1.i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ r_cnjg(&q__1, &v[4]);
+ v4.r = q__1.r, v4.i = q__1.i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ r_cnjg(&q__1, &v[5]);
+ v5.r = q__1.r, v5.i = q__1.i;
+ r_cnjg(&q__2, &v5);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t5.r = q__1.r, t5.i = q__1.i;
+ r_cnjg(&q__1, &v[6]);
+ v6.r = q__1.r, v6.i = q__1.i;
+ r_cnjg(&q__2, &v6);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t6.r = q__1.r, t6.i = q__1.i;
+ r_cnjg(&q__1, &v[7]);
+ v7.r = q__1.r, v7.i = q__1.i;
+ r_cnjg(&q__2, &v7);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t7.r = q__1.r, t7.i = q__1.i;
+ r_cnjg(&q__1, &v[8]);
+ v8.r = q__1.r, v8.i = q__1.i;
+ r_cnjg(&q__2, &v8);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t8.r = q__1.r, t8.i = q__1.i;
+ r_cnjg(&q__1, &v[9]);
+ v9.r = q__1.r, v9.i = q__1.i;
+ r_cnjg(&q__2, &v9);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t9.r = q__1.r, t9.i = q__1.i;
+ r_cnjg(&q__1, &v[10]);
+ v10.r = q__1.r, v10.i = q__1.i;
+ r_cnjg(&q__2, &v10);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t10.r = q__1.r, t10.i = q__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ q__10.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__10.i = v1.r
+ * c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ q__11.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__11.i = v2.r
+ * c__[i__3].i + v2.i * c__[i__3].r;
+ q__9.r = q__10.r + q__11.r, q__9.i = q__10.i + q__11.i;
+ i__4 = j * c_dim1 + 3;
+ q__12.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__12.i = v3.r
+ * c__[i__4].i + v3.i * c__[i__4].r;
+ q__8.r = q__9.r + q__12.r, q__8.i = q__9.i + q__12.i;
+ i__5 = j * c_dim1 + 4;
+ q__13.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__13.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ q__7.r = q__8.r + q__13.r, q__7.i = q__8.i + q__13.i;
+ i__6 = j * c_dim1 + 5;
+ q__14.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, q__14.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ q__6.r = q__7.r + q__14.r, q__6.i = q__7.i + q__14.i;
+ i__7 = j * c_dim1 + 6;
+ q__15.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, q__15.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ q__5.r = q__6.r + q__15.r, q__5.i = q__6.i + q__15.i;
+ i__8 = j * c_dim1 + 7;
+ q__16.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, q__16.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ q__4.r = q__5.r + q__16.r, q__4.i = q__5.i + q__16.i;
+ i__9 = j * c_dim1 + 8;
+ q__17.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, q__17.i = v8.r
+ * c__[i__9].i + v8.i * c__[i__9].r;
+ q__3.r = q__4.r + q__17.r, q__3.i = q__4.i + q__17.i;
+ i__10 = j * c_dim1 + 9;
+ q__18.r = v9.r * c__[i__10].r - v9.i * c__[i__10].i, q__18.i =
+ v9.r * c__[i__10].i + v9.i * c__[i__10].r;
+ q__2.r = q__3.r + q__18.r, q__2.i = q__3.i + q__18.i;
+ i__11 = j * c_dim1 + 10;
+ q__19.r = v10.r * c__[i__11].r - v10.i * c__[i__11].i, q__19.i =
+ v10.r * c__[i__11].i + v10.i * c__[i__11].r;
+ q__1.r = q__2.r + q__19.r, q__1.i = q__2.i + q__19.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 5;
+ i__3 = j * c_dim1 + 5;
+ q__2.r = sum.r * t5.r - sum.i * t5.i, q__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 6;
+ i__3 = j * c_dim1 + 6;
+ q__2.r = sum.r * t6.r - sum.i * t6.i, q__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 7;
+ i__3 = j * c_dim1 + 7;
+ q__2.r = sum.r * t7.r - sum.i * t7.i, q__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 8;
+ i__3 = j * c_dim1 + 8;
+ q__2.r = sum.r * t8.r - sum.i * t8.i, q__2.i = sum.r * t8.i +
+ sum.i * t8.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 9;
+ i__3 = j * c_dim1 + 9;
+ q__2.r = sum.r * t9.r - sum.i * t9.i, q__2.i = sum.r * t9.i +
+ sum.i * t9.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 10;
+ i__3 = j * c_dim1 + 10;
+ q__2.r = sum.r * t10.r - sum.i * t10.i, q__2.i = sum.r * t10.i +
+ sum.i * t10.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L200: */
+ }
+ goto L410;
+ } else {
+
+/* Form C * H, where H has order n. */
+
+ switch (*n) {
+ case 1: goto L210;
+ case 2: goto L230;
+ case 3: goto L250;
+ case 4: goto L270;
+ case 5: goto L290;
+ case 6: goto L310;
+ case 7: goto L330;
+ case 8: goto L350;
+ case 9: goto L370;
+ case 10: goto L390;
+ }
+
+/* Code for general N */
+
+ clarf_(side, m, n, &v[1], &c__1, tau, &c__[c_offset], ldc, &work[1]);
+ goto L410;
+L210:
+
+/* Special code for 1 x 1 Householder */
+
+ q__3.r = tau->r * v[1].r - tau->i * v[1].i, q__3.i = tau->r * v[1].i
+ + tau->i * v[1].r;
+ r_cnjg(&q__4, &v[1]);
+ q__2.r = q__3.r * q__4.r - q__3.i * q__4.i, q__2.i = q__3.r * q__4.i
+ + q__3.i * q__4.r;
+ q__1.r = 1.f - q__2.r, q__1.i = 0.f - q__2.i;
+ t1.r = q__1.r, t1.i = q__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ q__1.r = t1.r * c__[i__3].r - t1.i * c__[i__3].i, q__1.i = t1.r *
+ c__[i__3].i + t1.i * c__[i__3].r;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L220: */
+ }
+ goto L410;
+L230:
+
+/* Special code for 2 x 2 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ q__2.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__2.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + (c_dim1 << 1);
+ q__3.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__3.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + (c_dim1 << 1);
+ i__3 = j + (c_dim1 << 1);
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L240: */
+ }
+ goto L410;
+L250:
+
+/* Special code for 3 x 3 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ q__3.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__3.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + (c_dim1 << 1);
+ q__4.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__4.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__2.r = q__3.r + q__4.r, q__2.i = q__3.i + q__4.i;
+ i__4 = j + c_dim1 * 3;
+ q__5.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__5.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + (c_dim1 << 1);
+ i__3 = j + (c_dim1 << 1);
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L260: */
+ }
+ goto L410;
+L270:
+
+/* Special code for 4 x 4 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ q__4.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__4.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + (c_dim1 << 1);
+ q__5.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__5.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__3.r = q__4.r + q__5.r, q__3.i = q__4.i + q__5.i;
+ i__4 = j + c_dim1 * 3;
+ q__6.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__6.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ q__2.r = q__3.r + q__6.r, q__2.i = q__3.i + q__6.i;
+ i__5 = j + (c_dim1 << 2);
+ q__7.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__7.i = v4.r *
+ c__[i__5].i + v4.i * c__[i__5].r;
+ q__1.r = q__2.r + q__7.r, q__1.i = q__2.i + q__7.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + (c_dim1 << 1);
+ i__3 = j + (c_dim1 << 1);
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + (c_dim1 << 2);
+ i__3 = j + (c_dim1 << 2);
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L280: */
+ }
+ goto L410;
+L290:
+
+/* Special code for 5 x 5 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ v5.r = v[5].r, v5.i = v[5].i;
+ r_cnjg(&q__2, &v5);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t5.r = q__1.r, t5.i = q__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ q__5.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__5.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + (c_dim1 << 1);
+ q__6.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__6.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__4.r = q__5.r + q__6.r, q__4.i = q__5.i + q__6.i;
+ i__4 = j + c_dim1 * 3;
+ q__7.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__7.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ q__3.r = q__4.r + q__7.r, q__3.i = q__4.i + q__7.i;
+ i__5 = j + (c_dim1 << 2);
+ q__8.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__8.i = v4.r *
+ c__[i__5].i + v4.i * c__[i__5].r;
+ q__2.r = q__3.r + q__8.r, q__2.i = q__3.i + q__8.i;
+ i__6 = j + c_dim1 * 5;
+ q__9.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, q__9.i = v5.r *
+ c__[i__6].i + v5.i * c__[i__6].r;
+ q__1.r = q__2.r + q__9.r, q__1.i = q__2.i + q__9.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + (c_dim1 << 1);
+ i__3 = j + (c_dim1 << 1);
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + (c_dim1 << 2);
+ i__3 = j + (c_dim1 << 2);
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 5;
+ i__3 = j + c_dim1 * 5;
+ q__2.r = sum.r * t5.r - sum.i * t5.i, q__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L300: */
+ }
+ goto L410;
+L310:
+
+/* Special code for 6 x 6 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ v5.r = v[5].r, v5.i = v[5].i;
+ r_cnjg(&q__2, &v5);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t5.r = q__1.r, t5.i = q__1.i;
+ v6.r = v[6].r, v6.i = v[6].i;
+ r_cnjg(&q__2, &v6);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t6.r = q__1.r, t6.i = q__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ q__6.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__6.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + (c_dim1 << 1);
+ q__7.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__7.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__5.r = q__6.r + q__7.r, q__5.i = q__6.i + q__7.i;
+ i__4 = j + c_dim1 * 3;
+ q__8.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__8.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ q__4.r = q__5.r + q__8.r, q__4.i = q__5.i + q__8.i;
+ i__5 = j + (c_dim1 << 2);
+ q__9.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__9.i = v4.r *
+ c__[i__5].i + v4.i * c__[i__5].r;
+ q__3.r = q__4.r + q__9.r, q__3.i = q__4.i + q__9.i;
+ i__6 = j + c_dim1 * 5;
+ q__10.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, q__10.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ q__2.r = q__3.r + q__10.r, q__2.i = q__3.i + q__10.i;
+ i__7 = j + c_dim1 * 6;
+ q__11.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, q__11.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ q__1.r = q__2.r + q__11.r, q__1.i = q__2.i + q__11.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + (c_dim1 << 1);
+ i__3 = j + (c_dim1 << 1);
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + (c_dim1 << 2);
+ i__3 = j + (c_dim1 << 2);
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 5;
+ i__3 = j + c_dim1 * 5;
+ q__2.r = sum.r * t5.r - sum.i * t5.i, q__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 6;
+ i__3 = j + c_dim1 * 6;
+ q__2.r = sum.r * t6.r - sum.i * t6.i, q__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L320: */
+ }
+ goto L410;
+L330:
+
+/* Special code for 7 x 7 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ v5.r = v[5].r, v5.i = v[5].i;
+ r_cnjg(&q__2, &v5);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t5.r = q__1.r, t5.i = q__1.i;
+ v6.r = v[6].r, v6.i = v[6].i;
+ r_cnjg(&q__2, &v6);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t6.r = q__1.r, t6.i = q__1.i;
+ v7.r = v[7].r, v7.i = v[7].i;
+ r_cnjg(&q__2, &v7);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t7.r = q__1.r, t7.i = q__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ q__7.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__7.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + (c_dim1 << 1);
+ q__8.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__8.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__6.r = q__7.r + q__8.r, q__6.i = q__7.i + q__8.i;
+ i__4 = j + c_dim1 * 3;
+ q__9.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__9.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ q__5.r = q__6.r + q__9.r, q__5.i = q__6.i + q__9.i;
+ i__5 = j + (c_dim1 << 2);
+ q__10.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__10.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ q__4.r = q__5.r + q__10.r, q__4.i = q__5.i + q__10.i;
+ i__6 = j + c_dim1 * 5;
+ q__11.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, q__11.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ q__3.r = q__4.r + q__11.r, q__3.i = q__4.i + q__11.i;
+ i__7 = j + c_dim1 * 6;
+ q__12.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, q__12.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ q__2.r = q__3.r + q__12.r, q__2.i = q__3.i + q__12.i;
+ i__8 = j + c_dim1 * 7;
+ q__13.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, q__13.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ q__1.r = q__2.r + q__13.r, q__1.i = q__2.i + q__13.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + (c_dim1 << 1);
+ i__3 = j + (c_dim1 << 1);
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + (c_dim1 << 2);
+ i__3 = j + (c_dim1 << 2);
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 5;
+ i__3 = j + c_dim1 * 5;
+ q__2.r = sum.r * t5.r - sum.i * t5.i, q__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 6;
+ i__3 = j + c_dim1 * 6;
+ q__2.r = sum.r * t6.r - sum.i * t6.i, q__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 7;
+ i__3 = j + c_dim1 * 7;
+ q__2.r = sum.r * t7.r - sum.i * t7.i, q__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L340: */
+ }
+ goto L410;
+L350:
+
+/* Special code for 8 x 8 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ v5.r = v[5].r, v5.i = v[5].i;
+ r_cnjg(&q__2, &v5);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t5.r = q__1.r, t5.i = q__1.i;
+ v6.r = v[6].r, v6.i = v[6].i;
+ r_cnjg(&q__2, &v6);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t6.r = q__1.r, t6.i = q__1.i;
+ v7.r = v[7].r, v7.i = v[7].i;
+ r_cnjg(&q__2, &v7);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t7.r = q__1.r, t7.i = q__1.i;
+ v8.r = v[8].r, v8.i = v[8].i;
+ r_cnjg(&q__2, &v8);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t8.r = q__1.r, t8.i = q__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ q__8.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__8.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + (c_dim1 << 1);
+ q__9.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__9.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__7.r = q__8.r + q__9.r, q__7.i = q__8.i + q__9.i;
+ i__4 = j + c_dim1 * 3;
+ q__10.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__10.i = v3.r
+ * c__[i__4].i + v3.i * c__[i__4].r;
+ q__6.r = q__7.r + q__10.r, q__6.i = q__7.i + q__10.i;
+ i__5 = j + (c_dim1 << 2);
+ q__11.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__11.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ q__5.r = q__6.r + q__11.r, q__5.i = q__6.i + q__11.i;
+ i__6 = j + c_dim1 * 5;
+ q__12.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, q__12.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ q__4.r = q__5.r + q__12.r, q__4.i = q__5.i + q__12.i;
+ i__7 = j + c_dim1 * 6;
+ q__13.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, q__13.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ q__3.r = q__4.r + q__13.r, q__3.i = q__4.i + q__13.i;
+ i__8 = j + c_dim1 * 7;
+ q__14.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, q__14.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ q__2.r = q__3.r + q__14.r, q__2.i = q__3.i + q__14.i;
+ i__9 = j + (c_dim1 << 3);
+ q__15.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, q__15.i = v8.r
+ * c__[i__9].i + v8.i * c__[i__9].r;
+ q__1.r = q__2.r + q__15.r, q__1.i = q__2.i + q__15.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + (c_dim1 << 1);
+ i__3 = j + (c_dim1 << 1);
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + (c_dim1 << 2);
+ i__3 = j + (c_dim1 << 2);
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 5;
+ i__3 = j + c_dim1 * 5;
+ q__2.r = sum.r * t5.r - sum.i * t5.i, q__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 6;
+ i__3 = j + c_dim1 * 6;
+ q__2.r = sum.r * t6.r - sum.i * t6.i, q__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 7;
+ i__3 = j + c_dim1 * 7;
+ q__2.r = sum.r * t7.r - sum.i * t7.i, q__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + (c_dim1 << 3);
+ i__3 = j + (c_dim1 << 3);
+ q__2.r = sum.r * t8.r - sum.i * t8.i, q__2.i = sum.r * t8.i +
+ sum.i * t8.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L360: */
+ }
+ goto L410;
+L370:
+
+/* Special code for 9 x 9 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ v5.r = v[5].r, v5.i = v[5].i;
+ r_cnjg(&q__2, &v5);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t5.r = q__1.r, t5.i = q__1.i;
+ v6.r = v[6].r, v6.i = v[6].i;
+ r_cnjg(&q__2, &v6);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t6.r = q__1.r, t6.i = q__1.i;
+ v7.r = v[7].r, v7.i = v[7].i;
+ r_cnjg(&q__2, &v7);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t7.r = q__1.r, t7.i = q__1.i;
+ v8.r = v[8].r, v8.i = v[8].i;
+ r_cnjg(&q__2, &v8);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t8.r = q__1.r, t8.i = q__1.i;
+ v9.r = v[9].r, v9.i = v[9].i;
+ r_cnjg(&q__2, &v9);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t9.r = q__1.r, t9.i = q__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ q__9.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__9.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + (c_dim1 << 1);
+ q__10.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__10.i = v2.r
+ * c__[i__3].i + v2.i * c__[i__3].r;
+ q__8.r = q__9.r + q__10.r, q__8.i = q__9.i + q__10.i;
+ i__4 = j + c_dim1 * 3;
+ q__11.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__11.i = v3.r
+ * c__[i__4].i + v3.i * c__[i__4].r;
+ q__7.r = q__8.r + q__11.r, q__7.i = q__8.i + q__11.i;
+ i__5 = j + (c_dim1 << 2);
+ q__12.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__12.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ q__6.r = q__7.r + q__12.r, q__6.i = q__7.i + q__12.i;
+ i__6 = j + c_dim1 * 5;
+ q__13.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, q__13.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ q__5.r = q__6.r + q__13.r, q__5.i = q__6.i + q__13.i;
+ i__7 = j + c_dim1 * 6;
+ q__14.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, q__14.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ q__4.r = q__5.r + q__14.r, q__4.i = q__5.i + q__14.i;
+ i__8 = j + c_dim1 * 7;
+ q__15.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, q__15.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ q__3.r = q__4.r + q__15.r, q__3.i = q__4.i + q__15.i;
+ i__9 = j + (c_dim1 << 3);
+ q__16.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, q__16.i = v8.r
+ * c__[i__9].i + v8.i * c__[i__9].r;
+ q__2.r = q__3.r + q__16.r, q__2.i = q__3.i + q__16.i;
+ i__10 = j + c_dim1 * 9;
+ q__17.r = v9.r * c__[i__10].r - v9.i * c__[i__10].i, q__17.i =
+ v9.r * c__[i__10].i + v9.i * c__[i__10].r;
+ q__1.r = q__2.r + q__17.r, q__1.i = q__2.i + q__17.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + (c_dim1 << 1);
+ i__3 = j + (c_dim1 << 1);
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + (c_dim1 << 2);
+ i__3 = j + (c_dim1 << 2);
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 5;
+ i__3 = j + c_dim1 * 5;
+ q__2.r = sum.r * t5.r - sum.i * t5.i, q__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 6;
+ i__3 = j + c_dim1 * 6;
+ q__2.r = sum.r * t6.r - sum.i * t6.i, q__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 7;
+ i__3 = j + c_dim1 * 7;
+ q__2.r = sum.r * t7.r - sum.i * t7.i, q__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + (c_dim1 << 3);
+ i__3 = j + (c_dim1 << 3);
+ q__2.r = sum.r * t8.r - sum.i * t8.i, q__2.i = sum.r * t8.i +
+ sum.i * t8.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 9;
+ i__3 = j + c_dim1 * 9;
+ q__2.r = sum.r * t9.r - sum.i * t9.i, q__2.i = sum.r * t9.i +
+ sum.i * t9.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L380: */
+ }
+ goto L410;
+L390:
+
+/* Special code for 10 x 10 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ v5.r = v[5].r, v5.i = v[5].i;
+ r_cnjg(&q__2, &v5);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t5.r = q__1.r, t5.i = q__1.i;
+ v6.r = v[6].r, v6.i = v[6].i;
+ r_cnjg(&q__2, &v6);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t6.r = q__1.r, t6.i = q__1.i;
+ v7.r = v[7].r, v7.i = v[7].i;
+ r_cnjg(&q__2, &v7);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t7.r = q__1.r, t7.i = q__1.i;
+ v8.r = v[8].r, v8.i = v[8].i;
+ r_cnjg(&q__2, &v8);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t8.r = q__1.r, t8.i = q__1.i;
+ v9.r = v[9].r, v9.i = v[9].i;
+ r_cnjg(&q__2, &v9);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t9.r = q__1.r, t9.i = q__1.i;
+ v10.r = v[10].r, v10.i = v[10].i;
+ r_cnjg(&q__2, &v10);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t10.r = q__1.r, t10.i = q__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ q__10.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__10.i = v1.r
+ * c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + (c_dim1 << 1);
+ q__11.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__11.i = v2.r
+ * c__[i__3].i + v2.i * c__[i__3].r;
+ q__9.r = q__10.r + q__11.r, q__9.i = q__10.i + q__11.i;
+ i__4 = j + c_dim1 * 3;
+ q__12.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__12.i = v3.r
+ * c__[i__4].i + v3.i * c__[i__4].r;
+ q__8.r = q__9.r + q__12.r, q__8.i = q__9.i + q__12.i;
+ i__5 = j + (c_dim1 << 2);
+ q__13.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__13.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ q__7.r = q__8.r + q__13.r, q__7.i = q__8.i + q__13.i;
+ i__6 = j + c_dim1 * 5;
+ q__14.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, q__14.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ q__6.r = q__7.r + q__14.r, q__6.i = q__7.i + q__14.i;
+ i__7 = j + c_dim1 * 6;
+ q__15.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, q__15.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ q__5.r = q__6.r + q__15.r, q__5.i = q__6.i + q__15.i;
+ i__8 = j + c_dim1 * 7;
+ q__16.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, q__16.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ q__4.r = q__5.r + q__16.r, q__4.i = q__5.i + q__16.i;
+ i__9 = j + (c_dim1 << 3);
+ q__17.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, q__17.i = v8.r
+ * c__[i__9].i + v8.i * c__[i__9].r;
+ q__3.r = q__4.r + q__17.r, q__3.i = q__4.i + q__17.i;
+ i__10 = j + c_dim1 * 9;
+ q__18.r = v9.r * c__[i__10].r - v9.i * c__[i__10].i, q__18.i =
+ v9.r * c__[i__10].i + v9.i * c__[i__10].r;
+ q__2.r = q__3.r + q__18.r, q__2.i = q__3.i + q__18.i;
+ i__11 = j + c_dim1 * 10;
+ q__19.r = v10.r * c__[i__11].r - v10.i * c__[i__11].i, q__19.i =
+ v10.r * c__[i__11].i + v10.i * c__[i__11].r;
+ q__1.r = q__2.r + q__19.r, q__1.i = q__2.i + q__19.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + (c_dim1 << 1);
+ i__3 = j + (c_dim1 << 1);
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + (c_dim1 << 2);
+ i__3 = j + (c_dim1 << 2);
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 5;
+ i__3 = j + c_dim1 * 5;
+ q__2.r = sum.r * t5.r - sum.i * t5.i, q__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 6;
+ i__3 = j + c_dim1 * 6;
+ q__2.r = sum.r * t6.r - sum.i * t6.i, q__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 7;
+ i__3 = j + c_dim1 * 7;
+ q__2.r = sum.r * t7.r - sum.i * t7.i, q__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + (c_dim1 << 3);
+ i__3 = j + (c_dim1 << 3);
+ q__2.r = sum.r * t8.r - sum.i * t8.i, q__2.i = sum.r * t8.i +
+ sum.i * t8.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 9;
+ i__3 = j + c_dim1 * 9;
+ q__2.r = sum.r * t9.r - sum.i * t9.i, q__2.i = sum.r * t9.i +
+ sum.i * t9.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 10;
+ i__3 = j + c_dim1 * 10;
+ q__2.r = sum.r * t10.r - sum.i * t10.i, q__2.i = sum.r * t10.i +
+ sum.i * t10.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L400: */
+ }
+ goto L410;
+ }
+L410:
+ return 0;
+
+/* End of CLARFX */
+
+} /* clarfx_ */
diff --git a/contrib/libs/clapack/clargv.c b/contrib/libs/clapack/clargv.c
new file mode 100644
index 0000000000..166806e001
--- /dev/null
+++ b/contrib/libs/clapack/clargv.c
@@ -0,0 +1,335 @@
+/* clargv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int clargv_(integer *n, complex *x, integer *incx, complex *
+ y, integer *incy, real *c__, integer *incc)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ real r__1, r__2, r__3, r__4, r__5, r__6, r__7, r__8, r__9, r__10;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ double log(doublereal), pow_ri(real *, integer *), r_imag(complex *),
+ sqrt(doublereal);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ real d__;
+ complex f, g;
+ integer i__, j;
+ complex r__;
+ real f2, g2;
+ integer ic;
+ real di;
+ complex ff;
+ real cs, dr;
+ complex fs, gs;
+ integer ix, iy;
+ complex sn;
+ real f2s, g2s, eps, scale;
+ integer count;
+ real safmn2, safmx2;
+ extern doublereal slapy2_(real *, real *), slamch_(char *);
+ real safmin;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLARGV generates a vector of complex plane rotations with real */
+/* cosines, determined by elements of the complex vectors x and y. */
+/* For i = 1,2,...,n */
+
+/* ( c(i) s(i) ) ( x(i) ) = ( r(i) ) */
+/* ( -conjg(s(i)) c(i) ) ( y(i) ) = ( 0 ) */
+
+/* where c(i)**2 + ABS(s(i))**2 = 1 */
+
+/* The following conventions are used (these are the same as in CLARTG, */
+/* but differ from the BLAS1 routine CROTG): */
+/* If y(i)=0, then c(i)=1 and s(i)=0. */
+/* If x(i)=0, then c(i)=0 and s(i) is chosen so that r(i) is real. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of plane rotations to be generated. */
+
+/* X (input/output) COMPLEX array, dimension (1+(N-1)*INCX) */
+/* On entry, the vector x. */
+/* On exit, x(i) is overwritten by r(i), for i = 1,...,n. */
+
+/* INCX (input) INTEGER */
+/* The increment between elements of X. INCX > 0. */
+
+/* Y (input/output) COMPLEX array, dimension (1+(N-1)*INCY) */
+/* On entry, the vector y. */
+/* On exit, the sines of the plane rotations. */
+
+/* INCY (input) INTEGER */
+/* The increment between elements of Y. INCY > 0. */
+
+/* C (output) REAL array, dimension (1+(N-1)*INCC) */
+/* The cosines of the plane rotations. */
+
+/* INCC (input) INTEGER */
+/* The increment between elements of C. INCC > 0. */
+
+/* Further Details */
+/* ======= ======= */
+
+/* 6-6-96 - Modified with a new algorithm by W. Kahan and J. Demmel */
+
+/* This version has a few statements commented out for thread safety */
+/* (machine parameters are computed on each entry). 10 feb 03, SJH. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* LOGICAL FIRST */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Save statement .. */
+/* SAVE FIRST, SAFMX2, SAFMIN, SAFMN2 */
+/* .. */
+/* .. Data statements .. */
+/* DATA FIRST / .TRUE. / */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* IF( FIRST ) THEN */
+/* FIRST = .FALSE. */
+ /* Parameter adjustments */
+ --c__;
+ --y;
+ --x;
+
+ /* Function Body */
+ safmin = slamch_("S");
+ eps = slamch_("E");
+ r__1 = slamch_("B");
+ i__1 = (integer) (log(safmin / eps) / log(slamch_("B")) / 2.f);
+ safmn2 = pow_ri(&r__1, &i__1);
+ safmx2 = 1.f / safmn2;
+/* END IF */
+ ix = 1;
+ iy = 1;
+ ic = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ f.r = x[i__2].r, f.i = x[i__2].i;
+ i__2 = iy;
+ g.r = y[i__2].r, g.i = y[i__2].i;
+
+/* Use identical algorithm as in CLARTG */
+
+/* Computing MAX */
+/* Computing MAX */
+ r__7 = (r__1 = f.r, dabs(r__1)), r__8 = (r__2 = r_imag(&f), dabs(r__2)
+ );
+/* Computing MAX */
+ r__9 = (r__3 = g.r, dabs(r__3)), r__10 = (r__4 = r_imag(&g), dabs(
+ r__4));
+ r__5 = dmax(r__7,r__8), r__6 = dmax(r__9,r__10);
+ scale = dmax(r__5,r__6);
+ fs.r = f.r, fs.i = f.i;
+ gs.r = g.r, gs.i = g.i;
+ count = 0;
+ if (scale >= safmx2) {
+L10:
+ ++count;
+ q__1.r = safmn2 * fs.r, q__1.i = safmn2 * fs.i;
+ fs.r = q__1.r, fs.i = q__1.i;
+ q__1.r = safmn2 * gs.r, q__1.i = safmn2 * gs.i;
+ gs.r = q__1.r, gs.i = q__1.i;
+ scale *= safmn2;
+ if (scale >= safmx2) {
+ goto L10;
+ }
+ } else if (scale <= safmn2) {
+ if (g.r == 0.f && g.i == 0.f) {
+ cs = 1.f;
+ sn.r = 0.f, sn.i = 0.f;
+ r__.r = f.r, r__.i = f.i;
+ goto L50;
+ }
+L20:
+ --count;
+ q__1.r = safmx2 * fs.r, q__1.i = safmx2 * fs.i;
+ fs.r = q__1.r, fs.i = q__1.i;
+ q__1.r = safmx2 * gs.r, q__1.i = safmx2 * gs.i;
+ gs.r = q__1.r, gs.i = q__1.i;
+ scale *= safmx2;
+ if (scale <= safmn2) {
+ goto L20;
+ }
+ }
+/* Computing 2nd power */
+ r__1 = fs.r;
+/* Computing 2nd power */
+ r__2 = r_imag(&fs);
+ f2 = r__1 * r__1 + r__2 * r__2;
+/* Computing 2nd power */
+ r__1 = gs.r;
+/* Computing 2nd power */
+ r__2 = r_imag(&gs);
+ g2 = r__1 * r__1 + r__2 * r__2;
+ if (f2 <= dmax(g2,1.f) * safmin) {
+
+/* This is a rare case: F is very small. */
+
+ if (f.r == 0.f && f.i == 0.f) {
+ cs = 0.f;
+ r__2 = g.r;
+ r__3 = r_imag(&g);
+ r__1 = slapy2_(&r__2, &r__3);
+ r__.r = r__1, r__.i = 0.f;
+/* Do complex/real division explicitly with two real */
+/* divisions */
+ r__1 = gs.r;
+ r__2 = r_imag(&gs);
+ d__ = slapy2_(&r__1, &r__2);
+ r__1 = gs.r / d__;
+ r__2 = -r_imag(&gs) / d__;
+ q__1.r = r__1, q__1.i = r__2;
+ sn.r = q__1.r, sn.i = q__1.i;
+ goto L50;
+ }
+ r__1 = fs.r;
+ r__2 = r_imag(&fs);
+ f2s = slapy2_(&r__1, &r__2);
+/* G2 and G2S are accurate */
+/* G2 is at least SAFMIN, and G2S is at least SAFMN2 */
+ g2s = sqrt(g2);
+/* Error in CS from underflow in F2S is at most */
+/* UNFL / SAFMN2 .lt. sqrt(UNFL*EPS) .lt. EPS */
+/* If MAX(G2,ONE)=G2, then F2 .lt. G2*SAFMIN, */
+/* and so CS .lt. sqrt(SAFMIN) */
+/* If MAX(G2,ONE)=ONE, then F2 .lt. SAFMIN */
+/* and so CS .lt. sqrt(SAFMIN)/SAFMN2 = sqrt(EPS) */
+/* Therefore, CS = F2S/G2S / sqrt( 1 + (F2S/G2S)**2 ) = F2S/G2S */
+ cs = f2s / g2s;
+/* Make sure abs(FF) = 1 */
+/* Do complex/real division explicitly with 2 real divisions */
+/* Computing MAX */
+ r__3 = (r__1 = f.r, dabs(r__1)), r__4 = (r__2 = r_imag(&f), dabs(
+ r__2));
+ if (dmax(r__3,r__4) > 1.f) {
+ r__1 = f.r;
+ r__2 = r_imag(&f);
+ d__ = slapy2_(&r__1, &r__2);
+ r__1 = f.r / d__;
+ r__2 = r_imag(&f) / d__;
+ q__1.r = r__1, q__1.i = r__2;
+ ff.r = q__1.r, ff.i = q__1.i;
+ } else {
+ dr = safmx2 * f.r;
+ di = safmx2 * r_imag(&f);
+ d__ = slapy2_(&dr, &di);
+ r__1 = dr / d__;
+ r__2 = di / d__;
+ q__1.r = r__1, q__1.i = r__2;
+ ff.r = q__1.r, ff.i = q__1.i;
+ }
+ r__1 = gs.r / g2s;
+ r__2 = -r_imag(&gs) / g2s;
+ q__2.r = r__1, q__2.i = r__2;
+ q__1.r = ff.r * q__2.r - ff.i * q__2.i, q__1.i = ff.r * q__2.i +
+ ff.i * q__2.r;
+ sn.r = q__1.r, sn.i = q__1.i;
+ q__2.r = cs * f.r, q__2.i = cs * f.i;
+ q__3.r = sn.r * g.r - sn.i * g.i, q__3.i = sn.r * g.i + sn.i *
+ g.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ r__.r = q__1.r, r__.i = q__1.i;
+ } else {
+
+/* This is the most common case. */
+/* Neither F2 nor F2/G2 are less than SAFMIN */
+/* F2S cannot overflow, and it is accurate */
+
+ f2s = sqrt(g2 / f2 + 1.f);
+/* Do the F2S(real)*FS(complex) multiply with two real */
+/* multiplies */
+ r__1 = f2s * fs.r;
+ r__2 = f2s * r_imag(&fs);
+ q__1.r = r__1, q__1.i = r__2;
+ r__.r = q__1.r, r__.i = q__1.i;
+ cs = 1.f / f2s;
+ d__ = f2 + g2;
+/* Do complex/real division explicitly with two real divisions */
+ r__1 = r__.r / d__;
+ r__2 = r_imag(&r__) / d__;
+ q__1.r = r__1, q__1.i = r__2;
+ sn.r = q__1.r, sn.i = q__1.i;
+ r_cnjg(&q__2, &gs);
+ q__1.r = sn.r * q__2.r - sn.i * q__2.i, q__1.i = sn.r * q__2.i +
+ sn.i * q__2.r;
+ sn.r = q__1.r, sn.i = q__1.i;
+ if (count != 0) {
+ if (count > 0) {
+ i__2 = count;
+ for (j = 1; j <= i__2; ++j) {
+ q__1.r = safmx2 * r__.r, q__1.i = safmx2 * r__.i;
+ r__.r = q__1.r, r__.i = q__1.i;
+/* L30: */
+ }
+ } else {
+ i__2 = -count;
+ for (j = 1; j <= i__2; ++j) {
+ q__1.r = safmn2 * r__.r, q__1.i = safmn2 * r__.i;
+ r__.r = q__1.r, r__.i = q__1.i;
+/* L40: */
+ }
+ }
+ }
+ }
+L50:
+ c__[ic] = cs;
+ i__2 = iy;
+ y[i__2].r = sn.r, y[i__2].i = sn.i;
+ i__2 = ix;
+ x[i__2].r = r__.r, x[i__2].i = r__.i;
+ ic += *incc;
+ iy += *incy;
+ ix += *incx;
+/* L60: */
+ }
+ return 0;
+
+/* End of CLARGV */
+
+} /* clargv_ */
diff --git a/contrib/libs/clapack/clarnv.c b/contrib/libs/clapack/clarnv.c
new file mode 100644
index 0000000000..b4f2076fc4
--- /dev/null
+++ b/contrib/libs/clapack/clarnv.c
@@ -0,0 +1,190 @@
+/* clarnv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int clarnv_(integer *idist, integer *iseed, integer *n,
+ complex *x)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ double log(doublereal), sqrt(doublereal);
+ void c_exp(complex *, complex *);
+
+ /* Local variables */
+ integer i__;
+ real u[128];
+ integer il, iv;
+ extern /* Subroutine */ int slaruv_(integer *, integer *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLARNV returns a vector of n random complex numbers from a uniform or */
+/* normal distribution. */
+
+/* Arguments */
+/* ========= */
+
+/* IDIST (input) INTEGER */
+/* Specifies the distribution of the random numbers: */
+/* = 1: real and imaginary parts each uniform (0,1) */
+/* = 2: real and imaginary parts each uniform (-1,1) */
+/* = 3: real and imaginary parts each normal (0,1) */
+/* = 4: uniformly distributed on the disc abs(z) < 1 */
+/* = 5: uniformly distributed on the circle abs(z) = 1 */
+
+/* ISEED (input/output) INTEGER array, dimension (4) */
+/* On entry, the seed of the random number generator; the array */
+/* elements must be between 0 and 4095, and ISEED(4) must be */
+/* odd. */
+/* On exit, the seed is updated. */
+
+/* N (input) INTEGER */
+/* The number of random numbers to be generated. */
+
+/* X (output) COMPLEX array, dimension (N) */
+/* The generated random numbers. */
+
+/* Further Details */
+/* =============== */
+
+/* This routine calls the auxiliary routine SLARUV to generate random */
+/* real numbers from a uniform (0,1) distribution, in batches of up to */
+/* 128 using vectorisable code. The Box-Muller method is used to */
+/* transform numbers from a uniform to a normal distribution. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --x;
+ --iseed;
+
+ /* Function Body */
+ i__1 = *n;
+ for (iv = 1; iv <= i__1; iv += 64) {
+/* Computing MIN */
+ i__2 = 64, i__3 = *n - iv + 1;
+ il = min(i__2,i__3);
+
+/* Call SLARUV to generate 2*IL real numbers from a uniform (0,1) */
+/* distribution (2*IL <= LV) */
+
+ i__2 = il << 1;
+ slaruv_(&iseed[1], &i__2, u);
+
+ if (*idist == 1) {
+
+/* Copy generated numbers */
+
+ i__2 = il;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = iv + i__ - 1;
+ i__4 = (i__ << 1) - 2;
+ i__5 = (i__ << 1) - 1;
+ q__1.r = u[i__4], q__1.i = u[i__5];
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+/* L10: */
+ }
+ } else if (*idist == 2) {
+
+/* Convert generated numbers to uniform (-1,1) distribution */
+
+ i__2 = il;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = iv + i__ - 1;
+ r__1 = u[(i__ << 1) - 2] * 2.f - 1.f;
+ r__2 = u[(i__ << 1) - 1] * 2.f - 1.f;
+ q__1.r = r__1, q__1.i = r__2;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+/* L20: */
+ }
+ } else if (*idist == 3) {
+
+/* Convert generated numbers to normal (0,1) distribution */
+
+ i__2 = il;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = iv + i__ - 1;
+ r__1 = sqrt(log(u[(i__ << 1) - 2]) * -2.f);
+ r__2 = u[(i__ << 1) - 1] * 6.2831853071795864769252867663f;
+ q__3.r = 0.f, q__3.i = r__2;
+ c_exp(&q__2, &q__3);
+ q__1.r = r__1 * q__2.r, q__1.i = r__1 * q__2.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+/* L30: */
+ }
+ } else if (*idist == 4) {
+
+/* Convert generated numbers to complex numbers uniformly */
+/* distributed on the unit disk */
+
+ i__2 = il;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = iv + i__ - 1;
+ r__1 = sqrt(u[(i__ << 1) - 2]);
+ r__2 = u[(i__ << 1) - 1] * 6.2831853071795864769252867663f;
+ q__3.r = 0.f, q__3.i = r__2;
+ c_exp(&q__2, &q__3);
+ q__1.r = r__1 * q__2.r, q__1.i = r__1 * q__2.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+/* L40: */
+ }
+ } else if (*idist == 5) {
+
+/* Convert generated numbers to complex numbers uniformly */
+/* distributed on the unit circle */
+
+ i__2 = il;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = iv + i__ - 1;
+ r__1 = u[(i__ << 1) - 1] * 6.2831853071795864769252867663f;
+ q__2.r = 0.f, q__2.i = r__1;
+ c_exp(&q__1, &q__2);
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ return 0;
+
+/* End of CLARNV */
+
+} /* clarnv_ */
diff --git a/contrib/libs/clapack/clarrv.c b/contrib/libs/clapack/clarrv.c
new file mode 100644
index 0000000000..a5bf8a79d8
--- /dev/null
+++ b/contrib/libs/clapack/clarrv.c
@@ -0,0 +1,1015 @@
+/* clarrv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static integer c__1 = 1;
+static integer c__2 = 2;
+static real c_b28 = 0.f;
+
+/* Subroutine */ int clarrv_(integer *n, real *vl, real *vu, real *d__, real *
+ l, real *pivmin, integer *isplit, integer *m, integer *dol, integer *
+ dou, real *minrgp, real *rtol1, real *rtol2, real *w, real *werr,
+ real *wgap, integer *iblock, integer *indexw, real *gers, complex *
+ z__, integer *ldz, integer *isuppz, real *work, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ real r__1, r__2;
+ complex q__1;
+ logical L__1;
+
+ /* Builtin functions */
+ double log(doublereal);
+
+ /* Local variables */
+ integer minwsize, i__, j, k, p, q, miniwsize, ii;
+ real gl;
+ integer im, in;
+ real gu, gap, eps, tau, tol, tmp;
+ integer zto;
+ real ztz;
+ integer iend, jblk;
+ real lgap;
+ integer done;
+ real rgap, left;
+ integer wend, iter;
+ real bstw;
+ integer itmp1, indld;
+ real fudge;
+ integer idone;
+ real sigma;
+ integer iinfo, iindr;
+ real resid;
+ logical eskip;
+ real right;
+ integer nclus, zfrom;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ real rqtol;
+ integer iindc1, iindc2, indin1, indin2;
+ extern /* Subroutine */ int clar1v_(integer *, integer *, integer *, real
+ *, real *, real *, real *, real *, real *, real *, complex *,
+ logical *, integer *, real *, real *, integer *, integer *, real *
+, real *, real *, real *);
+ logical stp2ii;
+ real lambda;
+ integer ibegin, indeig;
+ logical needbs;
+ integer indlld;
+ real sgndef, mingma;
+ extern doublereal slamch_(char *);
+ integer oldien, oldncl, wbegin;
+ real spdiam;
+ integer negcnt;
+ extern /* Subroutine */ int claset_(char *, integer *, integer *, complex
+ *, complex *, complex *, integer *);
+ integer oldcls;
+ real savgap;
+ integer ndepth;
+ real ssigma;
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *);
+ logical usedbs;
+ integer iindwk, offset;
+ real gaptol;
+ extern /* Subroutine */ int slarrb_(integer *, real *, real *, integer *,
+ integer *, real *, real *, integer *, real *, real *, real *,
+ real *, integer *, real *, real *, integer *, integer *);
+ integer newcls, oldfst, indwrk, windex, oldlst;
+ logical usedrq;
+ integer newfst, newftt, parity, windmn, windpl, isupmn, newlst, zusedl;
+ real bstres;
+ integer newsiz, zusedu, zusedw;
+ real nrminv, rqcorr;
+ logical tryrqc;
+ integer isupmx;
+ extern /* Subroutine */ int slarrf_(integer *, real *, real *, real *,
+ integer *, integer *, real *, real *, real *, real *, real *,
+ real *, real *, real *, real *, real *, real *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLARRV computes the eigenvectors of the tridiagonal matrix */
+/* T = L D L^T given L, D and APPROXIMATIONS to the eigenvalues of L D L^T. */
+/* The input eigenvalues should have been computed by SLARRE. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* VL (input) REAL */
+/* VU (input) REAL */
+/* Lower and upper bounds of the interval that contains the desired */
+/* eigenvalues. VL < VU. Needed to compute gaps on the left or right */
+/* end of the extremal eigenvalues in the desired RANGE. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the N diagonal elements of the diagonal matrix D. */
+/* On exit, D may be overwritten. */
+
+/* L (input/output) REAL array, dimension (N) */
+/* On entry, the (N-1) subdiagonal elements of the unit */
+/* bidiagonal matrix L are in elements 1 to N-1 of L */
+/* (if the matrix is not splitted.) At the end of each block */
+/* is stored the corresponding shift as given by SLARRE. */
+/* On exit, L is overwritten. */
+
+/* PIVMIN (in) DOUBLE PRECISION */
+/* The minimum pivot allowed in the Sturm sequence. */
+
+/* ISPLIT (input) INTEGER array, dimension (N) */
+/* The splitting points, at which T breaks up into blocks. */
+/* The first block consists of rows/columns 1 to */
+/* ISPLIT( 1 ), the second of rows/columns ISPLIT( 1 )+1 */
+/* through ISPLIT( 2 ), etc. */
+
+/* M (input) INTEGER */
+/* The total number of input eigenvalues. 0 <= M <= N. */
+
+/* DOL (input) INTEGER */
+/* DOU (input) INTEGER */
+/* If the user wants to compute only selected eigenvectors from all */
+/* the eigenvalues supplied, he can specify an index range DOL:DOU. */
+/* Or else the setting DOL=1, DOU=M should be applied. */
+/* Note that DOL and DOU refer to the order in which the eigenvalues */
+/* are stored in W. */
+/* If the user wants to compute only selected eigenpairs, then */
+/* the columns DOL-1 to DOU+1 of the eigenvector space Z contain the */
+/* computed eigenvectors. All other columns of Z are set to zero. */
+
+/* MINRGP (input) REAL */
+
+/* RTOL1 (input) REAL */
+/* RTOL2 (input) REAL */
+/* Parameters for bisection. */
+/* An interval [LEFT,RIGHT] has converged if */
+/* RIGHT-LEFT.LT.MAX( RTOL1*GAP, RTOL2*MAX(|LEFT|,|RIGHT|) ) */
+
+/* W (input/output) REAL array, dimension (N) */
+/* The first M elements of W contain the APPROXIMATE eigenvalues for */
+/* which eigenvectors are to be computed. The eigenvalues */
+/* should be grouped by split-off block and ordered from */
+/* smallest to largest within the block ( The output array */
+/* W from SLARRE is expected here ). Furthermore, they are with */
+/* respect to the shift of the corresponding root representation */
+/* for their block. On exit, W holds the eigenvalues of the */
+/* UNshifted matrix. */
+
+/* WERR (input/output) REAL array, dimension (N) */
+/* The first M elements contain the semiwidth of the uncertainty */
+/* interval of the corresponding eigenvalue in W */
+
+/* WGAP (input/output) REAL array, dimension (N) */
+/* The separation from the right neighbor eigenvalue in W. */
+
+/* IBLOCK (input) INTEGER array, dimension (N) */
+/* The indices of the blocks (submatrices) associated with the */
+/* corresponding eigenvalues in W; IBLOCK(i)=1 if eigenvalue */
+/* W(i) belongs to the first block from the top, =2 if W(i) */
+/* belongs to the second block, etc. */
+
+/* INDEXW (input) INTEGER array, dimension (N) */
+/* The indices of the eigenvalues within each block (submatrix); */
+/* for example, INDEXW(i)= 10 and IBLOCK(i)=2 imply that the */
+/* i-th eigenvalue W(i) is the 10-th eigenvalue in the second block. */
+
+/* GERS (input) REAL array, dimension (2*N) */
+/* The N Gerschgorin intervals (the i-th Gerschgorin interval */
+/* is (GERS(2*i-1), GERS(2*i)). The Gerschgorin intervals should */
+/* be computed from the original UNshifted matrix. */
+
+/* Z (output) COMPLEX array, dimension (LDZ, max(1,M) ) */
+/* If INFO = 0, the first M columns of Z contain the */
+/* orthonormal eigenvectors of the matrix T */
+/* corresponding to the input eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* ISUPPZ (output) INTEGER array, dimension ( 2*max(1,M) ) */
+/* The support of the eigenvectors in Z, i.e., the indices */
+/* indicating the nonzero elements in Z. The I-th eigenvector */
+/* is nonzero only in elements ISUPPZ( 2*I-1 ) through */
+/* ISUPPZ( 2*I ). */
+
+/* WORK (workspace) REAL array, dimension (12*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (7*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+
+/* > 0: A problem occured in CLARRV. */
+/* < 0: One of the called subroutines signaled an internal problem. */
+/* Needs inspection of the corresponding parameter IINFO */
+/* for further information. */
+
+/* =-1: Problem in SLARRB when refining a child's eigenvalues. */
+/* =-2: Problem in SLARRF when computing the RRR of a child. */
+/* When a child is inside a tight cluster, it can be difficult */
+/* to find an RRR. A partial remedy from the user's point of */
+/* view is to make the parameter MINRGP smaller and recompile. */
+/* However, as the orthogonality of the computed vectors is */
+/* proportional to 1/MINRGP, the user should be aware that */
+/* he might be trading in precision when he decreases MINRGP. */
+/* =-3: Problem in SLARRB when refining a single eigenvalue */
+/* after the Rayleigh correction was rejected. */
+/* = 5: The Rayleigh Quotient Iteration failed to converge to */
+/* full accuracy in MAXITR steps. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+/* .. */
+/* The first N entries of WORK are reserved for the eigenvalues */
+ /* Parameter adjustments */
+ --d__;
+ --l;
+ --isplit;
+ --w;
+ --werr;
+ --wgap;
+ --iblock;
+ --indexw;
+ --gers;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --isuppz;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ indld = *n + 1;
+ indlld = (*n << 1) + 1;
+ indin1 = *n * 3 + 1;
+ indin2 = (*n << 2) + 1;
+ indwrk = *n * 5 + 1;
+ minwsize = *n * 12;
+ i__1 = minwsize;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L5: */
+ }
+/* IWORK(IINDR+1:IINDR+N) hold the twist indices R for the */
+/* factorization used to compute the FP vector */
+ iindr = 0;
+/* IWORK(IINDC1+1:IINC2+N) are used to store the clusters of the current */
+/* layer and the one above. */
+ iindc1 = *n;
+ iindc2 = *n << 1;
+ iindwk = *n * 3 + 1;
+ miniwsize = *n * 7;
+ i__1 = miniwsize;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ iwork[i__] = 0;
+/* L10: */
+ }
+ zusedl = 1;
+ if (*dol > 1) {
+/* Set lower bound for use of Z */
+ zusedl = *dol - 1;
+ }
+ zusedu = *m;
+ if (*dou < *m) {
+/* Set lower bound for use of Z */
+ zusedu = *dou + 1;
+ }
+/* The width of the part of Z that is used */
+ zusedw = zusedu - zusedl + 1;
+ claset_("Full", n, &zusedw, &c_b1, &c_b1, &z__[zusedl * z_dim1 + 1], ldz);
+ eps = slamch_("Precision");
+ rqtol = eps * 2.f;
+
+/* Set expert flags for standard code. */
+ tryrqc = TRUE_;
+ if (*dol == 1 && *dou == *m) {
+ } else {
+/* Only selected eigenpairs are computed. Since the other evalues */
+/* are not refined by RQ iteration, bisection has to compute to full */
+/* accuracy. */
+ *rtol1 = eps * 4.f;
+ *rtol2 = eps * 4.f;
+ }
+/* The entries WBEGIN:WEND in W, WERR, WGAP correspond to the */
+/* desired eigenvalues. The support of the nonzero eigenvector */
+/* entries is contained in the interval IBEGIN:IEND. */
+/* Remark that if k eigenpairs are desired, then the eigenvectors */
+/* are stored in k contiguous columns of Z. */
+/* DONE is the number of eigenvectors already computed */
+ done = 0;
+ ibegin = 1;
+ wbegin = 1;
+ i__1 = iblock[*m];
+ for (jblk = 1; jblk <= i__1; ++jblk) {
+ iend = isplit[jblk];
+ sigma = l[iend];
+/* Find the eigenvectors of the submatrix indexed IBEGIN */
+/* through IEND. */
+ wend = wbegin - 1;
+L15:
+ if (wend < *m) {
+ if (iblock[wend + 1] == jblk) {
+ ++wend;
+ goto L15;
+ }
+ }
+ if (wend < wbegin) {
+ ibegin = iend + 1;
+ goto L170;
+ } else if (wend < *dol || wbegin > *dou) {
+ ibegin = iend + 1;
+ wbegin = wend + 1;
+ goto L170;
+ }
+/* Find local spectral diameter of the block */
+ gl = gers[(ibegin << 1) - 1];
+ gu = gers[ibegin * 2];
+ i__2 = iend;
+ for (i__ = ibegin + 1; i__ <= i__2; ++i__) {
+/* Computing MIN */
+ r__1 = gers[(i__ << 1) - 1];
+ gl = dmin(r__1,gl);
+/* Computing MAX */
+ r__1 = gers[i__ * 2];
+ gu = dmax(r__1,gu);
+/* L20: */
+ }
+ spdiam = gu - gl;
+/* OLDIEN is the last index of the previous block */
+ oldien = ibegin - 1;
+/* Calculate the size of the current block */
+ in = iend - ibegin + 1;
+/* The number of eigenvalues in the current block */
+ im = wend - wbegin + 1;
+/* This is for a 1x1 block */
+ if (ibegin == iend) {
+ ++done;
+ i__2 = ibegin + wbegin * z_dim1;
+ z__[i__2].r = 1.f, z__[i__2].i = 0.f;
+ isuppz[(wbegin << 1) - 1] = ibegin;
+ isuppz[wbegin * 2] = ibegin;
+ w[wbegin] += sigma;
+ work[wbegin] = w[wbegin];
+ ibegin = iend + 1;
+ ++wbegin;
+ goto L170;
+ }
+/* The desired (shifted) eigenvalues are stored in W(WBEGIN:WEND) */
+/* Note that these can be approximations, in this case, the corresp. */
+/* entries of WERR give the size of the uncertainty interval. */
+/* The eigenvalue approximations will be refined when necessary as */
+/* high relative accuracy is required for the computation of the */
+/* corresponding eigenvectors. */
+ scopy_(&im, &w[wbegin], &c__1, &work[wbegin], &c__1);
+/* We store in W the eigenvalue approximations w.r.t. the original */
+/* matrix T. */
+ i__2 = im;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ w[wbegin + i__ - 1] += sigma;
+/* L30: */
+ }
+/* NDEPTH is the current depth of the representation tree */
+ ndepth = 0;
+/* PARITY is either 1 or 0 */
+ parity = 1;
+/* NCLUS is the number of clusters for the next level of the */
+/* representation tree, we start with NCLUS = 1 for the root */
+ nclus = 1;
+ iwork[iindc1 + 1] = 1;
+ iwork[iindc1 + 2] = im;
+/* IDONE is the number of eigenvectors already computed in the current */
+/* block */
+ idone = 0;
+/* loop while( IDONE.LT.IM ) */
+/* generate the representation tree for the current block and */
+/* compute the eigenvectors */
+L40:
+ if (idone < im) {
+/* This is a crude protection against infinitely deep trees */
+ if (ndepth > *m) {
+ *info = -2;
+ return 0;
+ }
+/* breadth first processing of the current level of the representation */
+/* tree: OLDNCL = number of clusters on current level */
+ oldncl = nclus;
+/* reset NCLUS to count the number of child clusters */
+ nclus = 0;
+
+ parity = 1 - parity;
+ if (parity == 0) {
+ oldcls = iindc1;
+ newcls = iindc2;
+ } else {
+ oldcls = iindc2;
+ newcls = iindc1;
+ }
+/* Process the clusters on the current level */
+ i__2 = oldncl;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ j = oldcls + (i__ << 1);
+/* OLDFST, OLDLST = first, last index of current cluster. */
+/* cluster indices start with 1 and are relative */
+/* to WBEGIN when accessing W, WGAP, WERR, Z */
+ oldfst = iwork[j - 1];
+ oldlst = iwork[j];
+ if (ndepth > 0) {
+/* Retrieve relatively robust representation (RRR) of cluster */
+/* that has been computed at the previous level */
+/* The RRR is stored in Z and overwritten once the eigenvectors */
+/* have been computed or when the cluster is refined */
+ if (*dol == 1 && *dou == *m) {
+/* Get representation from location of the leftmost evalue */
+/* of the cluster */
+ j = wbegin + oldfst - 1;
+ } else {
+ if (wbegin + oldfst - 1 < *dol) {
+/* Get representation from the left end of Z array */
+ j = *dol - 1;
+ } else if (wbegin + oldfst - 1 > *dou) {
+/* Get representation from the right end of Z array */
+ j = *dou;
+ } else {
+ j = wbegin + oldfst - 1;
+ }
+ }
+ i__3 = in - 1;
+ for (k = 1; k <= i__3; ++k) {
+ i__4 = ibegin + k - 1 + j * z_dim1;
+ d__[ibegin + k - 1] = z__[i__4].r;
+ i__4 = ibegin + k - 1 + (j + 1) * z_dim1;
+ l[ibegin + k - 1] = z__[i__4].r;
+/* L45: */
+ }
+ i__3 = iend + j * z_dim1;
+ d__[iend] = z__[i__3].r;
+ i__3 = iend + (j + 1) * z_dim1;
+ sigma = z__[i__3].r;
+/* Set the corresponding entries in Z to zero */
+ claset_("Full", &in, &c__2, &c_b1, &c_b1, &z__[ibegin + j
+ * z_dim1], ldz);
+ }
+/* Compute DL and DLL of current RRR */
+ i__3 = iend - 1;
+ for (j = ibegin; j <= i__3; ++j) {
+ tmp = d__[j] * l[j];
+ work[indld - 1 + j] = tmp;
+ work[indlld - 1 + j] = tmp * l[j];
+/* L50: */
+ }
+ if (ndepth > 0) {
+/* P and Q are index of the first and last eigenvalue to compute */
+/* within the current block */
+ p = indexw[wbegin - 1 + oldfst];
+ q = indexw[wbegin - 1 + oldlst];
+/* Offset for the arrays WORK, WGAP and WERR, i.e., th P-OFFSET */
+/* thru' Q-OFFSET elements of these arrays are to be used. */
+/* OFFSET = P-OLDFST */
+ offset = indexw[wbegin] - 1;
+/* perform limited bisection (if necessary) to get approximate */
+/* eigenvalues to the precision needed. */
+ slarrb_(&in, &d__[ibegin], &work[indlld + ibegin - 1], &p,
+ &q, rtol1, rtol2, &offset, &work[wbegin], &wgap[
+ wbegin], &werr[wbegin], &work[indwrk], &iwork[
+ iindwk], pivmin, &spdiam, &in, &iinfo);
+ if (iinfo != 0) {
+ *info = -1;
+ return 0;
+ }
+/* We also recompute the extremal gaps. W holds all eigenvalues */
+/* of the unshifted matrix and must be used for computation */
+/* of WGAP, the entries of WORK might stem from RRRs with */
+/* different shifts. The gaps from WBEGIN-1+OLDFST to */
+/* WBEGIN-1+OLDLST are correctly computed in SLARRB. */
+/* However, we only allow the gaps to become greater since */
+/* this is what should happen when we decrease WERR */
+ if (oldfst > 1) {
+/* Computing MAX */
+ r__1 = wgap[wbegin + oldfst - 2], r__2 = w[wbegin +
+ oldfst - 1] - werr[wbegin + oldfst - 1] - w[
+ wbegin + oldfst - 2] - werr[wbegin + oldfst -
+ 2];
+ wgap[wbegin + oldfst - 2] = dmax(r__1,r__2);
+ }
+ if (wbegin + oldlst - 1 < wend) {
+/* Computing MAX */
+ r__1 = wgap[wbegin + oldlst - 1], r__2 = w[wbegin +
+ oldlst] - werr[wbegin + oldlst] - w[wbegin +
+ oldlst - 1] - werr[wbegin + oldlst - 1];
+ wgap[wbegin + oldlst - 1] = dmax(r__1,r__2);
+ }
+/* Each time the eigenvalues in WORK get refined, we store */
+/* the newly found approximation with all shifts applied in W */
+ i__3 = oldlst;
+ for (j = oldfst; j <= i__3; ++j) {
+ w[wbegin + j - 1] = work[wbegin + j - 1] + sigma;
+/* L53: */
+ }
+ }
+/* Process the current node. */
+ newfst = oldfst;
+ i__3 = oldlst;
+ for (j = oldfst; j <= i__3; ++j) {
+ if (j == oldlst) {
+/* we are at the right end of the cluster, this is also the */
+/* boundary of the child cluster */
+ newlst = j;
+ } else if (wgap[wbegin + j - 1] >= *minrgp * (r__1 = work[
+ wbegin + j - 1], dabs(r__1))) {
+/* the right relative gap is big enough, the child cluster */
+/* (NEWFST,..,NEWLST) is well separated from the following */
+ newlst = j;
+ } else {
+/* inside a child cluster, the relative gap is not */
+/* big enough. */
+ goto L140;
+ }
+/* Compute size of child cluster found */
+ newsiz = newlst - newfst + 1;
+/* NEWFTT is the place in Z where the new RRR or the computed */
+/* eigenvector is to be stored */
+ if (*dol == 1 && *dou == *m) {
+/* Store representation at location of the leftmost evalue */
+/* of the cluster */
+ newftt = wbegin + newfst - 1;
+ } else {
+ if (wbegin + newfst - 1 < *dol) {
+/* Store representation at the left end of Z array */
+ newftt = *dol - 1;
+ } else if (wbegin + newfst - 1 > *dou) {
+/* Store representation at the right end of Z array */
+ newftt = *dou;
+ } else {
+ newftt = wbegin + newfst - 1;
+ }
+ }
+ if (newsiz > 1) {
+
+/* Current child is not a singleton but a cluster. */
+/* Compute and store new representation of child. */
+
+
+/* Compute left and right cluster gap. */
+
+/* LGAP and RGAP are not computed from WORK because */
+/* the eigenvalue approximations may stem from RRRs */
+/* different shifts. However, W hold all eigenvalues */
+/* of the unshifted matrix. Still, the entries in WGAP */
+/* have to be computed from WORK since the entries */
+/* in W might be of the same order so that gaps are not */
+/* exhibited correctly for very close eigenvalues. */
+ if (newfst == 1) {
+/* Computing MAX */
+ r__1 = 0.f, r__2 = w[wbegin] - werr[wbegin] - *vl;
+ lgap = dmax(r__1,r__2);
+ } else {
+ lgap = wgap[wbegin + newfst - 2];
+ }
+ rgap = wgap[wbegin + newlst - 1];
+
+/* Compute left- and rightmost eigenvalue of child */
+/* to high precision in order to shift as close */
+/* as possible and obtain as large relative gaps */
+/* as possible */
+
+ for (k = 1; k <= 2; ++k) {
+ if (k == 1) {
+ p = indexw[wbegin - 1 + newfst];
+ } else {
+ p = indexw[wbegin - 1 + newlst];
+ }
+ offset = indexw[wbegin] - 1;
+ slarrb_(&in, &d__[ibegin], &work[indlld + ibegin
+ - 1], &p, &p, &rqtol, &rqtol, &offset, &
+ work[wbegin], &wgap[wbegin], &werr[wbegin]
+, &work[indwrk], &iwork[iindwk], pivmin, &
+ spdiam, &in, &iinfo);
+/* L55: */
+ }
+
+ if (wbegin + newlst - 1 < *dol || wbegin + newfst - 1
+ > *dou) {
+/* if the cluster contains no desired eigenvalues */
+/* skip the computation of that branch of the rep. tree */
+
+/* We could skip before the refinement of the extremal */
+/* eigenvalues of the child, but then the representation */
+/* tree could be different from the one when nothing is */
+/* skipped. For this reason we skip at this place. */
+ idone = idone + newlst - newfst + 1;
+ goto L139;
+ }
+
+/* Compute RRR of child cluster. */
+/* Note that the new RRR is stored in Z */
+
+/* SLARRF needs LWORK = 2*N */
+ slarrf_(&in, &d__[ibegin], &l[ibegin], &work[indld +
+ ibegin - 1], &newfst, &newlst, &work[wbegin],
+ &wgap[wbegin], &werr[wbegin], &spdiam, &lgap,
+ &rgap, pivmin, &tau, &work[indin1], &work[
+ indin2], &work[indwrk], &iinfo);
+/* In the complex case, SLARRF cannot write */
+/* the new RRR directly into Z and needs an intermediate */
+/* workspace */
+ i__4 = in - 1;
+ for (k = 1; k <= i__4; ++k) {
+ i__5 = ibegin + k - 1 + newftt * z_dim1;
+ i__6 = indin1 + k - 1;
+ q__1.r = work[i__6], q__1.i = 0.f;
+ z__[i__5].r = q__1.r, z__[i__5].i = q__1.i;
+ i__5 = ibegin + k - 1 + (newftt + 1) * z_dim1;
+ i__6 = indin2 + k - 1;
+ q__1.r = work[i__6], q__1.i = 0.f;
+ z__[i__5].r = q__1.r, z__[i__5].i = q__1.i;
+/* L56: */
+ }
+ i__4 = iend + newftt * z_dim1;
+ i__5 = indin1 + in - 1;
+ q__1.r = work[i__5], q__1.i = 0.f;
+ z__[i__4].r = q__1.r, z__[i__4].i = q__1.i;
+ if (iinfo == 0) {
+/* a new RRR for the cluster was found by SLARRF */
+/* update shift and store it */
+ ssigma = sigma + tau;
+ i__4 = iend + (newftt + 1) * z_dim1;
+ q__1.r = ssigma, q__1.i = 0.f;
+ z__[i__4].r = q__1.r, z__[i__4].i = q__1.i;
+/* WORK() are the midpoints and WERR() the semi-width */
+/* Note that the entries in W are unchanged. */
+ i__4 = newlst;
+ for (k = newfst; k <= i__4; ++k) {
+ fudge = eps * 3.f * (r__1 = work[wbegin + k -
+ 1], dabs(r__1));
+ work[wbegin + k - 1] -= tau;
+ fudge += eps * 4.f * (r__1 = work[wbegin + k
+ - 1], dabs(r__1));
+/* Fudge errors */
+ werr[wbegin + k - 1] += fudge;
+/* Gaps are not fudged. Provided that WERR is small */
+/* when eigenvalues are close, a zero gap indicates */
+/* that a new representation is needed for resolving */
+/* the cluster. A fudge could lead to a wrong decision */
+/* of judging eigenvalues 'separated' which in */
+/* reality are not. This could have a negative impact */
+/* on the orthogonality of the computed eigenvectors. */
+/* L116: */
+ }
+ ++nclus;
+ k = newcls + (nclus << 1);
+ iwork[k - 1] = newfst;
+ iwork[k] = newlst;
+ } else {
+ *info = -2;
+ return 0;
+ }
+ } else {
+
+/* Compute eigenvector of singleton */
+
+ iter = 0;
+
+ tol = log((real) in) * 4.f * eps;
+
+ k = newfst;
+ windex = wbegin + k - 1;
+/* Computing MAX */
+ i__4 = windex - 1;
+ windmn = max(i__4,1);
+/* Computing MIN */
+ i__4 = windex + 1;
+ windpl = min(i__4,*m);
+ lambda = work[windex];
+ ++done;
+/* Check if eigenvector computation is to be skipped */
+ if (windex < *dol || windex > *dou) {
+ eskip = TRUE_;
+ goto L125;
+ } else {
+ eskip = FALSE_;
+ }
+ left = work[windex] - werr[windex];
+ right = work[windex] + werr[windex];
+ indeig = indexw[windex];
+/* Note that since we compute the eigenpairs for a child, */
+/* all eigenvalue approximations are w.r.t the same shift. */
+/* In this case, the entries in WORK should be used for */
+/* computing the gaps since they exhibit even very small */
+/* differences in the eigenvalues, as opposed to the */
+/* entries in W which might "look" the same. */
+ if (k == 1) {
+/* In the case RANGE='I' and with not much initial */
+/* accuracy in LAMBDA and VL, the formula */
+/* LGAP = MAX( ZERO, (SIGMA - VL) + LAMBDA ) */
+/* can lead to an overestimation of the left gap and */
+/* thus to inadequately early RQI 'convergence'. */
+/* Prevent this by forcing a small left gap. */
+/* Computing MAX */
+ r__1 = dabs(left), r__2 = dabs(right);
+ lgap = eps * dmax(r__1,r__2);
+ } else {
+ lgap = wgap[windmn];
+ }
+ if (k == im) {
+/* In the case RANGE='I' and with not much initial */
+/* accuracy in LAMBDA and VU, the formula */
+/* can lead to an overestimation of the right gap and */
+/* thus to inadequately early RQI 'convergence'. */
+/* Prevent this by forcing a small right gap. */
+/* Computing MAX */
+ r__1 = dabs(left), r__2 = dabs(right);
+ rgap = eps * dmax(r__1,r__2);
+ } else {
+ rgap = wgap[windex];
+ }
+ gap = dmin(lgap,rgap);
+ if (k == 1 || k == im) {
+/* The eigenvector support can become wrong */
+/* because significant entries could be cut off due to a */
+/* large GAPTOL parameter in LAR1V. Prevent this. */
+ gaptol = 0.f;
+ } else {
+ gaptol = gap * eps;
+ }
+ isupmn = in;
+ isupmx = 1;
+/* Update WGAP so that it holds the minimum gap */
+/* to the left or the right. This is crucial in the */
+/* case where bisection is used to ensure that the */
+/* eigenvalue is refined up to the required precision. */
+/* The correct value is restored afterwards. */
+ savgap = wgap[windex];
+ wgap[windex] = gap;
+/* We want to use the Rayleigh Quotient Correction */
+/* as often as possible since it converges quadratically */
+/* when we are close enough to the desired eigenvalue. */
+/* However, the Rayleigh Quotient can have the wrong sign */
+/* and lead us away from the desired eigenvalue. In this */
+/* case, the best we can do is to use bisection. */
+ usedbs = FALSE_;
+ usedrq = FALSE_;
+/* Bisection is initially turned off unless it is forced */
+ needbs = ! tryrqc;
+L120:
+/* Check if bisection should be used to refine eigenvalue */
+ if (needbs) {
+/* Take the bisection as new iterate */
+ usedbs = TRUE_;
+ itmp1 = iwork[iindr + windex];
+ offset = indexw[wbegin] - 1;
+ r__1 = eps * 2.f;
+ slarrb_(&in, &d__[ibegin], &work[indlld + ibegin
+ - 1], &indeig, &indeig, &c_b28, &r__1, &
+ offset, &work[wbegin], &wgap[wbegin], &
+ werr[wbegin], &work[indwrk], &iwork[
+ iindwk], pivmin, &spdiam, &itmp1, &iinfo);
+ if (iinfo != 0) {
+ *info = -3;
+ return 0;
+ }
+ lambda = work[windex];
+/* Reset twist index from inaccurate LAMBDA to */
+/* force computation of true MINGMA */
+ iwork[iindr + windex] = 0;
+ }
+/* Given LAMBDA, compute the eigenvector. */
+ L__1 = ! usedbs;
+ clar1v_(&in, &c__1, &in, &lambda, &d__[ibegin], &l[
+ ibegin], &work[indld + ibegin - 1], &work[
+ indlld + ibegin - 1], pivmin, &gaptol, &z__[
+ ibegin + windex * z_dim1], &L__1, &negcnt, &
+ ztz, &mingma, &iwork[iindr + windex], &isuppz[
+ (windex << 1) - 1], &nrminv, &resid, &rqcorr,
+ &work[indwrk]);
+ if (iter == 0) {
+ bstres = resid;
+ bstw = lambda;
+ } else if (resid < bstres) {
+ bstres = resid;
+ bstw = lambda;
+ }
+/* Computing MIN */
+ i__4 = isupmn, i__5 = isuppz[(windex << 1) - 1];
+ isupmn = min(i__4,i__5);
+/* Computing MAX */
+ i__4 = isupmx, i__5 = isuppz[windex * 2];
+ isupmx = max(i__4,i__5);
+ ++iter;
+/* sin alpha <= |resid|/gap */
+/* Note that both the residual and the gap are */
+/* proportional to the matrix, so ||T|| doesn't play */
+/* a role in the quotient */
+
+/* Convergence test for Rayleigh-Quotient iteration */
+/* (omitted when Bisection has been used) */
+
+ if (resid > tol * gap && dabs(rqcorr) > rqtol * dabs(
+ lambda) && ! usedbs) {
+/* We need to check that the RQCORR update doesn't */
+/* move the eigenvalue away from the desired one and */
+/* towards a neighbor. -> protection with bisection */
+ if (indeig <= negcnt) {
+/* The wanted eigenvalue lies to the left */
+ sgndef = -1.f;
+ } else {
+/* The wanted eigenvalue lies to the right */
+ sgndef = 1.f;
+ }
+/* We only use the RQCORR if it improves the */
+/* the iterate reasonably. */
+ if (rqcorr * sgndef >= 0.f && lambda + rqcorr <=
+ right && lambda + rqcorr >= left) {
+ usedrq = TRUE_;
+/* Store new midpoint of bisection interval in WORK */
+ if (sgndef == 1.f) {
+/* The current LAMBDA is on the left of the true */
+/* eigenvalue */
+ left = lambda;
+/* We prefer to assume that the error estimate */
+/* is correct. We could make the interval not */
+/* as a bracket but to be modified if the RQCORR */
+/* chooses to. In this case, the RIGHT side should */
+/* be modified as follows: */
+/* RIGHT = MAX(RIGHT, LAMBDA + RQCORR) */
+ } else {
+/* The current LAMBDA is on the right of the true */
+/* eigenvalue */
+ right = lambda;
+/* See comment about assuming the error estimate is */
+/* correct above. */
+/* LEFT = MIN(LEFT, LAMBDA + RQCORR) */
+ }
+ work[windex] = (right + left) * .5f;
+/* Take RQCORR since it has the correct sign and */
+/* improves the iterate reasonably */
+ lambda += rqcorr;
+/* Update width of error interval */
+ werr[windex] = (right - left) * .5f;
+ } else {
+ needbs = TRUE_;
+ }
+ if (right - left < rqtol * dabs(lambda)) {
+/* The eigenvalue is computed to bisection accuracy */
+/* compute eigenvector and stop */
+ usedbs = TRUE_;
+ goto L120;
+ } else if (iter < 10) {
+ goto L120;
+ } else if (iter == 10) {
+ needbs = TRUE_;
+ goto L120;
+ } else {
+ *info = 5;
+ return 0;
+ }
+ } else {
+ stp2ii = FALSE_;
+ if (usedrq && usedbs && bstres <= resid) {
+ lambda = bstw;
+ stp2ii = TRUE_;
+ }
+ if (stp2ii) {
+/* improve error angle by second step */
+ L__1 = ! usedbs;
+ clar1v_(&in, &c__1, &in, &lambda, &d__[ibegin]
+, &l[ibegin], &work[indld + ibegin -
+ 1], &work[indlld + ibegin - 1],
+ pivmin, &gaptol, &z__[ibegin + windex
+ * z_dim1], &L__1, &negcnt, &ztz, &
+ mingma, &iwork[iindr + windex], &
+ isuppz[(windex << 1) - 1], &nrminv, &
+ resid, &rqcorr, &work[indwrk]);
+ }
+ work[windex] = lambda;
+ }
+
+/* Compute FP-vector support w.r.t. whole matrix */
+
+ isuppz[(windex << 1) - 1] += oldien;
+ isuppz[windex * 2] += oldien;
+ zfrom = isuppz[(windex << 1) - 1];
+ zto = isuppz[windex * 2];
+ isupmn += oldien;
+ isupmx += oldien;
+/* Ensure vector is ok if support in the RQI has changed */
+ if (isupmn < zfrom) {
+ i__4 = zfrom - 1;
+ for (ii = isupmn; ii <= i__4; ++ii) {
+ i__5 = ii + windex * z_dim1;
+ z__[i__5].r = 0.f, z__[i__5].i = 0.f;
+/* L122: */
+ }
+ }
+ if (isupmx > zto) {
+ i__4 = isupmx;
+ for (ii = zto + 1; ii <= i__4; ++ii) {
+ i__5 = ii + windex * z_dim1;
+ z__[i__5].r = 0.f, z__[i__5].i = 0.f;
+/* L123: */
+ }
+ }
+ i__4 = zto - zfrom + 1;
+ csscal_(&i__4, &nrminv, &z__[zfrom + windex * z_dim1],
+ &c__1);
+L125:
+/* Update W */
+ w[windex] = lambda + sigma;
+/* Recompute the gaps on the left and right */
+/* But only allow them to become larger and not */
+/* smaller (which can only happen through "bad" */
+/* cancellation and doesn't reflect the theory */
+/* where the initial gaps are underestimated due */
+/* to WERR being too crude.) */
+ if (! eskip) {
+ if (k > 1) {
+/* Computing MAX */
+ r__1 = wgap[windmn], r__2 = w[windex] - werr[
+ windex] - w[windmn] - werr[windmn];
+ wgap[windmn] = dmax(r__1,r__2);
+ }
+ if (windex < wend) {
+/* Computing MAX */
+ r__1 = savgap, r__2 = w[windpl] - werr[windpl]
+ - w[windex] - werr[windex];
+ wgap[windex] = dmax(r__1,r__2);
+ }
+ }
+ ++idone;
+ }
+/* here ends the code for the current child */
+
+L139:
+/* Proceed to any remaining child nodes */
+ newfst = j + 1;
+L140:
+ ;
+ }
+/* L150: */
+ }
+ ++ndepth;
+ goto L40;
+ }
+ ibegin = iend + 1;
+ wbegin = wend + 1;
+L170:
+ ;
+ }
+
+ return 0;
+
+/* End of CLARRV */
+
+} /* clarrv_ */
diff --git a/contrib/libs/clapack/clartg.c b/contrib/libs/clapack/clartg.c
new file mode 100644
index 0000000000..75456abe97
--- /dev/null
+++ b/contrib/libs/clapack/clartg.c
@@ -0,0 +1,284 @@
+/* clartg.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int clartg_(complex *f, complex *g, real *cs, complex *sn,
+ complex *r__)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2, r__3, r__4, r__5, r__6, r__7, r__8, r__9, r__10;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ double log(doublereal), pow_ri(real *, integer *), r_imag(complex *),
+ sqrt(doublereal);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ real d__;
+ integer i__;
+ real f2, g2;
+ complex ff;
+ real di, dr;
+ complex fs, gs;
+ real f2s, g2s, eps, scale;
+ integer count;
+ real safmn2, safmx2;
+ extern doublereal slapy2_(real *, real *), slamch_(char *);
+ real safmin;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLARTG generates a plane rotation so that */
+
+/* [ CS SN ] [ F ] [ R ] */
+/* [ __ ] . [ ] = [ ] where CS**2 + |SN|**2 = 1. */
+/* [ -SN CS ] [ G ] [ 0 ] */
+
+/* This is a faster version of the BLAS1 routine CROTG, except for */
+/* the following differences: */
+/* F and G are unchanged on return. */
+/* If G=0, then CS=1 and SN=0. */
+/* If F=0, then CS=0 and SN is chosen so that R is real. */
+
+/* Arguments */
+/* ========= */
+
+/* F (input) COMPLEX */
+/* The first component of vector to be rotated. */
+
+/* G (input) COMPLEX */
+/* The second component of vector to be rotated. */
+
+/* CS (output) REAL */
+/* The cosine of the rotation. */
+
+/* SN (output) COMPLEX */
+/* The sine of the rotation. */
+
+/* R (output) COMPLEX */
+/* The nonzero component of the rotated vector. */
+
+/* Further Details */
+/* ======= ======= */
+
+/* 3-5-96 - Modified with a new algorithm by W. Kahan and J. Demmel */
+
+/* This version has a few statements commented out for thread safety */
+/* (machine parameters are computed on each entry). 10 feb 03, SJH. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* LOGICAL FIRST */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Save statement .. */
+/* SAVE FIRST, SAFMX2, SAFMIN, SAFMN2 */
+/* .. */
+/* .. Data statements .. */
+/* DATA FIRST / .TRUE. / */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* IF( FIRST ) THEN */
+ safmin = slamch_("S");
+ eps = slamch_("E");
+ r__1 = slamch_("B");
+ i__1 = (integer) (log(safmin / eps) / log(slamch_("B")) / 2.f);
+ safmn2 = pow_ri(&r__1, &i__1);
+ safmx2 = 1.f / safmn2;
+/* FIRST = .FALSE. */
+/* END IF */
+/* Computing MAX */
+/* Computing MAX */
+ r__7 = (r__1 = f->r, dabs(r__1)), r__8 = (r__2 = r_imag(f), dabs(r__2));
+/* Computing MAX */
+ r__9 = (r__3 = g->r, dabs(r__3)), r__10 = (r__4 = r_imag(g), dabs(r__4));
+ r__5 = dmax(r__7,r__8), r__6 = dmax(r__9,r__10);
+ scale = dmax(r__5,r__6);
+ fs.r = f->r, fs.i = f->i;
+ gs.r = g->r, gs.i = g->i;
+ count = 0;
+ if (scale >= safmx2) {
+L10:
+ ++count;
+ q__1.r = safmn2 * fs.r, q__1.i = safmn2 * fs.i;
+ fs.r = q__1.r, fs.i = q__1.i;
+ q__1.r = safmn2 * gs.r, q__1.i = safmn2 * gs.i;
+ gs.r = q__1.r, gs.i = q__1.i;
+ scale *= safmn2;
+ if (scale >= safmx2) {
+ goto L10;
+ }
+ } else if (scale <= safmn2) {
+ if (g->r == 0.f && g->i == 0.f) {
+ *cs = 1.f;
+ sn->r = 0.f, sn->i = 0.f;
+ r__->r = f->r, r__->i = f->i;
+ return 0;
+ }
+L20:
+ --count;
+ q__1.r = safmx2 * fs.r, q__1.i = safmx2 * fs.i;
+ fs.r = q__1.r, fs.i = q__1.i;
+ q__1.r = safmx2 * gs.r, q__1.i = safmx2 * gs.i;
+ gs.r = q__1.r, gs.i = q__1.i;
+ scale *= safmx2;
+ if (scale <= safmn2) {
+ goto L20;
+ }
+ }
+/* Computing 2nd power */
+ r__1 = fs.r;
+/* Computing 2nd power */
+ r__2 = r_imag(&fs);
+ f2 = r__1 * r__1 + r__2 * r__2;
+/* Computing 2nd power */
+ r__1 = gs.r;
+/* Computing 2nd power */
+ r__2 = r_imag(&gs);
+ g2 = r__1 * r__1 + r__2 * r__2;
+ if (f2 <= dmax(g2,1.f) * safmin) {
+
+/* This is a rare case: F is very small. */
+
+ if (f->r == 0.f && f->i == 0.f) {
+ *cs = 0.f;
+ r__2 = g->r;
+ r__3 = r_imag(g);
+ r__1 = slapy2_(&r__2, &r__3);
+ r__->r = r__1, r__->i = 0.f;
+/* Do complex/real division explicitly with two real divisions */
+ r__1 = gs.r;
+ r__2 = r_imag(&gs);
+ d__ = slapy2_(&r__1, &r__2);
+ r__1 = gs.r / d__;
+ r__2 = -r_imag(&gs) / d__;
+ q__1.r = r__1, q__1.i = r__2;
+ sn->r = q__1.r, sn->i = q__1.i;
+ return 0;
+ }
+ r__1 = fs.r;
+ r__2 = r_imag(&fs);
+ f2s = slapy2_(&r__1, &r__2);
+/* G2 and G2S are accurate */
+/* G2 is at least SAFMIN, and G2S is at least SAFMN2 */
+ g2s = sqrt(g2);
+/* Error in CS from underflow in F2S is at most */
+/* UNFL / SAFMN2 .lt. sqrt(UNFL*EPS) .lt. EPS */
+/* If MAX(G2,ONE)=G2, then F2 .lt. G2*SAFMIN, */
+/* and so CS .lt. sqrt(SAFMIN) */
+/* If MAX(G2,ONE)=ONE, then F2 .lt. SAFMIN */
+/* and so CS .lt. sqrt(SAFMIN)/SAFMN2 = sqrt(EPS) */
+/* Therefore, CS = F2S/G2S / sqrt( 1 + (F2S/G2S)**2 ) = F2S/G2S */
+ *cs = f2s / g2s;
+/* Make sure abs(FF) = 1 */
+/* Do complex/real division explicitly with 2 real divisions */
+/* Computing MAX */
+ r__3 = (r__1 = f->r, dabs(r__1)), r__4 = (r__2 = r_imag(f), dabs(r__2)
+ );
+ if (dmax(r__3,r__4) > 1.f) {
+ r__1 = f->r;
+ r__2 = r_imag(f);
+ d__ = slapy2_(&r__1, &r__2);
+ r__1 = f->r / d__;
+ r__2 = r_imag(f) / d__;
+ q__1.r = r__1, q__1.i = r__2;
+ ff.r = q__1.r, ff.i = q__1.i;
+ } else {
+ dr = safmx2 * f->r;
+ di = safmx2 * r_imag(f);
+ d__ = slapy2_(&dr, &di);
+ r__1 = dr / d__;
+ r__2 = di / d__;
+ q__1.r = r__1, q__1.i = r__2;
+ ff.r = q__1.r, ff.i = q__1.i;
+ }
+ r__1 = gs.r / g2s;
+ r__2 = -r_imag(&gs) / g2s;
+ q__2.r = r__1, q__2.i = r__2;
+ q__1.r = ff.r * q__2.r - ff.i * q__2.i, q__1.i = ff.r * q__2.i + ff.i
+ * q__2.r;
+ sn->r = q__1.r, sn->i = q__1.i;
+ q__2.r = *cs * f->r, q__2.i = *cs * f->i;
+ q__3.r = sn->r * g->r - sn->i * g->i, q__3.i = sn->r * g->i + sn->i *
+ g->r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ r__->r = q__1.r, r__->i = q__1.i;
+ } else {
+
+/* This is the most common case. */
+/* Neither F2 nor F2/G2 are less than SAFMIN */
+/* F2S cannot overflow, and it is accurate */
+
+ f2s = sqrt(g2 / f2 + 1.f);
+/* Do the F2S(real)*FS(complex) multiply with two real multiplies */
+ r__1 = f2s * fs.r;
+ r__2 = f2s * r_imag(&fs);
+ q__1.r = r__1, q__1.i = r__2;
+ r__->r = q__1.r, r__->i = q__1.i;
+ *cs = 1.f / f2s;
+ d__ = f2 + g2;
+/* Do complex/real division explicitly with two real divisions */
+ r__1 = r__->r / d__;
+ r__2 = r_imag(r__) / d__;
+ q__1.r = r__1, q__1.i = r__2;
+ sn->r = q__1.r, sn->i = q__1.i;
+ r_cnjg(&q__2, &gs);
+ q__1.r = sn->r * q__2.r - sn->i * q__2.i, q__1.i = sn->r * q__2.i +
+ sn->i * q__2.r;
+ sn->r = q__1.r, sn->i = q__1.i;
+ if (count != 0) {
+ if (count > 0) {
+ i__1 = count;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ q__1.r = safmx2 * r__->r, q__1.i = safmx2 * r__->i;
+ r__->r = q__1.r, r__->i = q__1.i;
+/* L30: */
+ }
+ } else {
+ i__1 = -count;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ q__1.r = safmn2 * r__->r, q__1.i = safmn2 * r__->i;
+ r__->r = q__1.r, r__->i = q__1.i;
+/* L40: */
+ }
+ }
+ }
+ }
+ return 0;
+
+/* End of CLARTG */
+
+} /* clartg_ */
diff --git a/contrib/libs/clapack/clartv.c b/contrib/libs/clapack/clartv.c
new file mode 100644
index 0000000000..d77d54dcd8
--- /dev/null
+++ b/contrib/libs/clapack/clartv.c
@@ -0,0 +1,125 @@
+/* clartv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int clartv_(integer *n, complex *x, integer *incx, complex *
+ y, integer *incy, real *c__, complex *s, integer *incc)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, ic, ix, iy;
+ complex xi, yi;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLARTV applies a vector of complex plane rotations with real cosines */
+/* to elements of the complex vectors x and y. For i = 1,2,...,n */
+
+/* ( x(i) ) := ( c(i) s(i) ) ( x(i) ) */
+/* ( y(i) ) ( -conjg(s(i)) c(i) ) ( y(i) ) */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of plane rotations to be applied. */
+
+/* X (input/output) COMPLEX array, dimension (1+(N-1)*INCX) */
+/* The vector x. */
+
+/* INCX (input) INTEGER */
+/* The increment between elements of X. INCX > 0. */
+
+/* Y (input/output) COMPLEX array, dimension (1+(N-1)*INCY) */
+/* The vector y. */
+
+/* INCY (input) INTEGER */
+/* The increment between elements of Y. INCY > 0. */
+
+/* C (input) REAL array, dimension (1+(N-1)*INCC) */
+/* The cosines of the plane rotations. */
+
+/* S (input) COMPLEX array, dimension (1+(N-1)*INCC) */
+/* The sines of the plane rotations. */
+
+/* INCC (input) INTEGER */
+/* The increment between elements of C and S. INCC > 0. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --s;
+ --c__;
+ --y;
+ --x;
+
+ /* Function Body */
+ ix = 1;
+ iy = 1;
+ ic = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ xi.r = x[i__2].r, xi.i = x[i__2].i;
+ i__2 = iy;
+ yi.r = y[i__2].r, yi.i = y[i__2].i;
+ i__2 = ix;
+ i__3 = ic;
+ q__2.r = c__[i__3] * xi.r, q__2.i = c__[i__3] * xi.i;
+ i__4 = ic;
+ q__3.r = s[i__4].r * yi.r - s[i__4].i * yi.i, q__3.i = s[i__4].r *
+ yi.i + s[i__4].i * yi.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ i__2 = iy;
+ i__3 = ic;
+ q__2.r = c__[i__3] * yi.r, q__2.i = c__[i__3] * yi.i;
+ r_cnjg(&q__4, &s[ic]);
+ q__3.r = q__4.r * xi.r - q__4.i * xi.i, q__3.i = q__4.r * xi.i +
+ q__4.i * xi.r;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ ix += *incx;
+ iy += *incy;
+ ic += *incc;
+/* L10: */
+ }
+ return 0;
+
+/* End of CLARTV */
+
+} /* clartv_ */
diff --git a/contrib/libs/clapack/clarz.c b/contrib/libs/clapack/clarz.c
new file mode 100644
index 0000000000..5050cf5e2b
--- /dev/null
+++ b/contrib/libs/clapack/clarz.c
@@ -0,0 +1,198 @@
+/* clarz.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int clarz_(char *side, integer *m, integer *n, integer *l,
+ complex *v, integer *incv, complex *tau, complex *c__, integer *ldc,
+ complex *work)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset;
+ complex q__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int cgerc_(integer *, integer *, complex *,
+ complex *, integer *, complex *, integer *, complex *, integer *),
+ cgemv_(char *, integer *, integer *, complex *, complex *,
+ integer *, complex *, integer *, complex *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cgeru_(integer *, integer *, complex *,
+ complex *, integer *, complex *, integer *, complex *, integer *),
+ ccopy_(integer *, complex *, integer *, complex *, integer *),
+ caxpy_(integer *, complex *, complex *, integer *, complex *,
+ integer *), clacgv_(integer *, complex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLARZ applies a complex elementary reflector H to a complex */
+/* M-by-N matrix C, from either the left or the right. H is represented */
+/* in the form */
+
+/* H = I - tau * v * v' */
+
+/* where tau is a complex scalar and v is a complex vector. */
+
+/* If tau = 0, then H is taken to be the unit matrix. */
+
+/* To apply H' (the conjugate transpose of H), supply conjg(tau) instead */
+/* tau. */
+
+/* H is a product of k elementary reflectors as returned by CTZRZF. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': form H * C */
+/* = 'R': form C * H */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. */
+
+/* L (input) INTEGER */
+/* The number of entries of the vector V containing */
+/* the meaningful part of the Householder vectors. */
+/* If SIDE = 'L', M >= L >= 0, if SIDE = 'R', N >= L >= 0. */
+
+/* V (input) COMPLEX array, dimension (1+(L-1)*abs(INCV)) */
+/* The vector v in the representation of H as returned by */
+/* CTZRZF. V is not used if TAU = 0. */
+
+/* INCV (input) INTEGER */
+/* The increment between elements of v. INCV <> 0. */
+
+/* TAU (input) COMPLEX */
+/* The value tau in the representation of H. */
+
+/* C (input/output) COMPLEX array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by the matrix H * C if SIDE = 'L', */
+/* or C * H if SIDE = 'R'. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) COMPLEX array, dimension */
+/* (N) if SIDE = 'L' */
+/* or (M) if SIDE = 'R' */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --v;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ if (lsame_(side, "L")) {
+
+/* Form H * C */
+
+ if (tau->r != 0.f || tau->i != 0.f) {
+
+/* w( 1:n ) = conjg( C( 1, 1:n ) ) */
+
+ ccopy_(n, &c__[c_offset], ldc, &work[1], &c__1);
+ clacgv_(n, &work[1], &c__1);
+
+/* w( 1:n ) = conjg( w( 1:n ) + C( m-l+1:m, 1:n )' * v( 1:l ) ) */
+
+ cgemv_("Conjugate transpose", l, n, &c_b1, &c__[*m - *l + 1 +
+ c_dim1], ldc, &v[1], incv, &c_b1, &work[1], &c__1);
+ clacgv_(n, &work[1], &c__1);
+
+/* C( 1, 1:n ) = C( 1, 1:n ) - tau * w( 1:n ) */
+
+ q__1.r = -tau->r, q__1.i = -tau->i;
+ caxpy_(n, &q__1, &work[1], &c__1, &c__[c_offset], ldc);
+
+/* C( m-l+1:m, 1:n ) = C( m-l+1:m, 1:n ) - ... */
+/* tau * v( 1:l ) * conjg( w( 1:n )' ) */
+
+ q__1.r = -tau->r, q__1.i = -tau->i;
+ cgeru_(l, n, &q__1, &v[1], incv, &work[1], &c__1, &c__[*m - *l +
+ 1 + c_dim1], ldc);
+ }
+
+ } else {
+
+/* Form C * H */
+
+ if (tau->r != 0.f || tau->i != 0.f) {
+
+/* w( 1:m ) = C( 1:m, 1 ) */
+
+ ccopy_(m, &c__[c_offset], &c__1, &work[1], &c__1);
+
+/* w( 1:m ) = w( 1:m ) + C( 1:m, n-l+1:n, 1:n ) * v( 1:l ) */
+
+ cgemv_("No transpose", m, l, &c_b1, &c__[(*n - *l + 1) * c_dim1 +
+ 1], ldc, &v[1], incv, &c_b1, &work[1], &c__1);
+
+/* C( 1:m, 1 ) = C( 1:m, 1 ) - tau * w( 1:m ) */
+
+ q__1.r = -tau->r, q__1.i = -tau->i;
+ caxpy_(m, &q__1, &work[1], &c__1, &c__[c_offset], &c__1);
+
+/* C( 1:m, n-l+1:n ) = C( 1:m, n-l+1:n ) - ... */
+/* tau * w( 1:m ) * v( 1:l )' */
+
+ q__1.r = -tau->r, q__1.i = -tau->i;
+ cgerc_(m, l, &q__1, &work[1], &c__1, &v[1], incv, &c__[(*n - *l +
+ 1) * c_dim1 + 1], ldc);
+
+ }
+
+ }
+
+ return 0;
+
+/* End of CLARZ */
+
+} /* clarz_ */
diff --git a/contrib/libs/clapack/clarzb.c b/contrib/libs/clapack/clarzb.c
new file mode 100644
index 0000000000..8861dc6952
--- /dev/null
+++ b/contrib/libs/clapack/clarzb.c
@@ -0,0 +1,323 @@
+/* clarzb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int clarzb_(char *side, char *trans, char *direct, char *
+ storev, integer *m, integer *n, integer *k, integer *l, complex *v,
+ integer *ldv, complex *t, integer *ldt, complex *c__, integer *ldc,
+ complex *work, integer *ldwork)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, t_dim1, t_offset, v_dim1, v_offset, work_dim1,
+ work_offset, i__1, i__2, i__3, i__4, i__5;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j, info;
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), ctrmm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *), clacgv_(integer *,
+ complex *, integer *), xerbla_(char *, integer *);
+ char transt[1];
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLARZB applies a complex block reflector H or its transpose H**H */
+/* to a complex distributed M-by-N C from the left or the right. */
+
+/* Currently, only STOREV = 'R' and DIRECT = 'B' are supported. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply H or H' from the Left */
+/* = 'R': apply H or H' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply H (No transpose) */
+/* = 'C': apply H' (Conjugate transpose) */
+
+/* DIRECT (input) CHARACTER*1 */
+/* Indicates how H is formed from a product of elementary */
+/* reflectors */
+/* = 'F': H = H(1) H(2) . . . H(k) (Forward, not supported yet) */
+/* = 'B': H = H(k) . . . H(2) H(1) (Backward) */
+
+/* STOREV (input) CHARACTER*1 */
+/* Indicates how the vectors which define the elementary */
+/* reflectors are stored: */
+/* = 'C': Columnwise (not supported yet) */
+/* = 'R': Rowwise */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. */
+
+/* K (input) INTEGER */
+/* The order of the matrix T (= the number of elementary */
+/* reflectors whose product defines the block reflector). */
+
+/* L (input) INTEGER */
+/* The number of columns of the matrix V containing the */
+/* meaningful part of the Householder reflectors. */
+/* If SIDE = 'L', M >= L >= 0, if SIDE = 'R', N >= L >= 0. */
+
+/* V (input) COMPLEX array, dimension (LDV,NV). */
+/* If STOREV = 'C', NV = K; if STOREV = 'R', NV = L. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. */
+/* If STOREV = 'C', LDV >= L; if STOREV = 'R', LDV >= K. */
+
+/* T (input) COMPLEX array, dimension (LDT,K) */
+/* The triangular K-by-K matrix T in the representation of the */
+/* block reflector. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= K. */
+
+/* C (input/output) COMPLEX array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by H*C or H'*C or C*H or C*H'. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) COMPLEX array, dimension (LDWORK,K) */
+
+/* LDWORK (input) INTEGER */
+/* The leading dimension of the array WORK. */
+/* If SIDE = 'L', LDWORK >= max(1,N); */
+/* if SIDE = 'R', LDWORK >= max(1,M). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ work_dim1 = *ldwork;
+ work_offset = 1 + work_dim1;
+ work -= work_offset;
+
+ /* Function Body */
+ if (*m <= 0 || *n <= 0) {
+ return 0;
+ }
+
+/* Check for currently supported options */
+
+ info = 0;
+ if (! lsame_(direct, "B")) {
+ info = -3;
+ } else if (! lsame_(storev, "R")) {
+ info = -4;
+ }
+ if (info != 0) {
+ i__1 = -info;
+ xerbla_("CLARZB", &i__1);
+ return 0;
+ }
+
+ if (lsame_(trans, "N")) {
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ if (lsame_(side, "L")) {
+
+/* Form H * C or H' * C */
+
+/* W( 1:n, 1:k ) = conjg( C( 1:k, 1:n )' ) */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ ccopy_(n, &c__[j + c_dim1], ldc, &work[j * work_dim1 + 1], &c__1);
+/* L10: */
+ }
+
+/* W( 1:n, 1:k ) = W( 1:n, 1:k ) + ... */
+/* conjg( C( m-l+1:m, 1:n )' ) * V( 1:k, 1:l )' */
+
+ if (*l > 0) {
+ cgemm_("Transpose", "Conjugate transpose", n, k, l, &c_b1, &c__[*
+ m - *l + 1 + c_dim1], ldc, &v[v_offset], ldv, &c_b1, &
+ work[work_offset], ldwork);
+ }
+
+/* W( 1:n, 1:k ) = W( 1:n, 1:k ) * T' or W( 1:m, 1:k ) * T */
+
+ ctrmm_("Right", "Lower", transt, "Non-unit", n, k, &c_b1, &t[t_offset]
+, ldt, &work[work_offset], ldwork);
+
+/* C( 1:k, 1:n ) = C( 1:k, 1:n ) - conjg( W( 1:n, 1:k )' ) */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *k;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ i__5 = j + i__ * work_dim1;
+ q__1.r = c__[i__4].r - work[i__5].r, q__1.i = c__[i__4].i -
+ work[i__5].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L20: */
+ }
+/* L30: */
+ }
+
+/* C( m-l+1:m, 1:n ) = C( m-l+1:m, 1:n ) - ... */
+/* conjg( V( 1:k, 1:l )' ) * conjg( W( 1:n, 1:k )' ) */
+
+ if (*l > 0) {
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("Transpose", "Transpose", l, n, k, &q__1, &v[v_offset],
+ ldv, &work[work_offset], ldwork, &c_b1, &c__[*m - *l + 1
+ + c_dim1], ldc);
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/* Form C * H or C * H' */
+
+/* W( 1:m, 1:k ) = C( 1:m, 1:k ) */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ ccopy_(m, &c__[j * c_dim1 + 1], &c__1, &work[j * work_dim1 + 1], &
+ c__1);
+/* L40: */
+ }
+
+/* W( 1:m, 1:k ) = W( 1:m, 1:k ) + ... */
+/* C( 1:m, n-l+1:n ) * conjg( V( 1:k, 1:l )' ) */
+
+ if (*l > 0) {
+ cgemm_("No transpose", "Transpose", m, k, l, &c_b1, &c__[(*n - *l
+ + 1) * c_dim1 + 1], ldc, &v[v_offset], ldv, &c_b1, &work[
+ work_offset], ldwork);
+ }
+
+/* W( 1:m, 1:k ) = W( 1:m, 1:k ) * conjg( T ) or */
+/* W( 1:m, 1:k ) * conjg( T' ) */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *k - j + 1;
+ clacgv_(&i__2, &t[j + j * t_dim1], &c__1);
+/* L50: */
+ }
+ ctrmm_("Right", "Lower", trans, "Non-unit", m, k, &c_b1, &t[t_offset],
+ ldt, &work[work_offset], ldwork);
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *k - j + 1;
+ clacgv_(&i__2, &t[j + j * t_dim1], &c__1);
+/* L60: */
+ }
+
+/* C( 1:m, 1:k ) = C( 1:m, 1:k ) - W( 1:m, 1:k ) */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * work_dim1;
+ q__1.r = c__[i__4].r - work[i__5].r, q__1.i = c__[i__4].i -
+ work[i__5].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+
+/* C( 1:m, n-l+1:n ) = C( 1:m, n-l+1:n ) - ... */
+/* W( 1:m, 1:k ) * conjg( V( 1:k, 1:l ) ) */
+
+ i__1 = *l;
+ for (j = 1; j <= i__1; ++j) {
+ clacgv_(k, &v[j * v_dim1 + 1], &c__1);
+/* L90: */
+ }
+ if (*l > 0) {
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "No transpose", m, l, k, &q__1, &work[
+ work_offset], ldwork, &v[v_offset], ldv, &c_b1, &c__[(*n
+ - *l + 1) * c_dim1 + 1], ldc);
+ }
+ i__1 = *l;
+ for (j = 1; j <= i__1; ++j) {
+ clacgv_(k, &v[j * v_dim1 + 1], &c__1);
+/* L100: */
+ }
+
+ }
+
+ return 0;
+
+/* End of CLARZB */
+
+} /* clarzb_ */
diff --git a/contrib/libs/clapack/clarzt.c b/contrib/libs/clapack/clarzt.c
new file mode 100644
index 0000000000..8287e70d0d
--- /dev/null
+++ b/contrib/libs/clapack/clarzt.c
@@ -0,0 +1,236 @@
+/* clarzt.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int clarzt_(char *direct, char *storev, integer *n, integer *
+ k, complex *v, integer *ldv, complex *tau, complex *t, integer *ldt)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, v_dim1, v_offset, i__1, i__2;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j, info;
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+, complex *, integer *, complex *, integer *, complex *, complex *
+, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ctrmv_(char *, char *, char *, integer *,
+ complex *, integer *, complex *, integer *), clacgv_(integer *, complex *, integer *), xerbla_(char *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLARZT forms the triangular factor T of a complex block reflector */
+/* H of order > n, which is defined as a product of k elementary */
+/* reflectors. */
+
+/* If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular; */
+
+/* If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular. */
+
+/* If STOREV = 'C', the vector which defines the elementary reflector */
+/* H(i) is stored in the i-th column of the array V, and */
+
+/* H = I - V * T * V' */
+
+/* If STOREV = 'R', the vector which defines the elementary reflector */
+/* H(i) is stored in the i-th row of the array V, and */
+
+/* H = I - V' * T * V */
+
+/* Currently, only STOREV = 'R' and DIRECT = 'B' are supported. */
+
+/* Arguments */
+/* ========= */
+
+/* DIRECT (input) CHARACTER*1 */
+/* Specifies the order in which the elementary reflectors are */
+/* multiplied to form the block reflector: */
+/* = 'F': H = H(1) H(2) . . . H(k) (Forward, not supported yet) */
+/* = 'B': H = H(k) . . . H(2) H(1) (Backward) */
+
+/* STOREV (input) CHARACTER*1 */
+/* Specifies how the vectors which define the elementary */
+/* reflectors are stored (see also Further Details): */
+/* = 'C': columnwise (not supported yet) */
+/* = 'R': rowwise */
+
+/* N (input) INTEGER */
+/* The order of the block reflector H. N >= 0. */
+
+/* K (input) INTEGER */
+/* The order of the triangular factor T (= the number of */
+/* elementary reflectors). K >= 1. */
+
+/* V (input/output) COMPLEX array, dimension */
+/* (LDV,K) if STOREV = 'C' */
+/* (LDV,N) if STOREV = 'R' */
+/* The matrix V. See further details. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. */
+/* If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K. */
+
+/* TAU (input) COMPLEX array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i). */
+
+/* T (output) COMPLEX array, dimension (LDT,K) */
+/* The k by k triangular factor T of the block reflector. */
+/* If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is */
+/* lower triangular. The rest of the array is not used. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= K. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* The shape of the matrix V and the storage of the vectors which define */
+/* the H(i) is best illustrated by the following example with n = 5 and */
+/* k = 3. The elements equal to 1 are not stored; the corresponding */
+/* array elements are modified but restored on exit. The rest of the */
+/* array is not used. */
+
+/* DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R': */
+
+/* ______V_____ */
+/* ( v1 v2 v3 ) / \ */
+/* ( v1 v2 v3 ) ( v1 v1 v1 v1 v1 . . . . 1 ) */
+/* V = ( v1 v2 v3 ) ( v2 v2 v2 v2 v2 . . . 1 ) */
+/* ( v1 v2 v3 ) ( v3 v3 v3 v3 v3 . . 1 ) */
+/* ( v1 v2 v3 ) */
+/* . . . */
+/* . . . */
+/* 1 . . */
+/* 1 . */
+/* 1 */
+
+/* DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R': */
+
+/* ______V_____ */
+/* 1 / \ */
+/* . 1 ( 1 . . . . v1 v1 v1 v1 v1 ) */
+/* . . 1 ( . 1 . . . v2 v2 v2 v2 v2 ) */
+/* . . . ( . . 1 . . v3 v3 v3 v3 v3 ) */
+/* . . . */
+/* ( v1 v2 v3 ) */
+/* ( v1 v2 v3 ) */
+/* V = ( v1 v2 v3 ) */
+/* ( v1 v2 v3 ) */
+/* ( v1 v2 v3 ) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Check for currently supported options */
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ --tau;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(direct, "B")) {
+ info = -1;
+ } else if (! lsame_(storev, "R")) {
+ info = -2;
+ }
+ if (info != 0) {
+ i__1 = -info;
+ xerbla_("CLARZT", &i__1);
+ return 0;
+ }
+
+ for (i__ = *k; i__ >= 1; --i__) {
+ i__1 = i__;
+ if (tau[i__1].r == 0.f && tau[i__1].i == 0.f) {
+
+/* H(i) = I */
+
+ i__1 = *k;
+ for (j = i__; j <= i__1; ++j) {
+ i__2 = j + i__ * t_dim1;
+ t[i__2].r = 0.f, t[i__2].i = 0.f;
+/* L10: */
+ }
+ } else {
+
+/* general case */
+
+ if (i__ < *k) {
+
+/* T(i+1:k,i) = - tau(i) * V(i+1:k,1:n) * V(i,1:n)' */
+
+ clacgv_(n, &v[i__ + v_dim1], ldv);
+ i__1 = *k - i__;
+ i__2 = i__;
+ q__1.r = -tau[i__2].r, q__1.i = -tau[i__2].i;
+ cgemv_("No transpose", &i__1, n, &q__1, &v[i__ + 1 + v_dim1],
+ ldv, &v[i__ + v_dim1], ldv, &c_b1, &t[i__ + 1 + i__ *
+ t_dim1], &c__1);
+ clacgv_(n, &v[i__ + v_dim1], ldv);
+
+/* T(i+1:k,i) = T(i+1:k,i+1:k) * T(i+1:k,i) */
+
+ i__1 = *k - i__;
+ ctrmv_("Lower", "No transpose", "Non-unit", &i__1, &t[i__ + 1
+ + (i__ + 1) * t_dim1], ldt, &t[i__ + 1 + i__ * t_dim1]
+, &c__1);
+ }
+ i__1 = i__ + i__ * t_dim1;
+ i__2 = i__;
+ t[i__1].r = tau[i__2].r, t[i__1].i = tau[i__2].i;
+ }
+/* L20: */
+ }
+ return 0;
+
+/* End of CLARZT */
+
+} /* clarzt_ */
diff --git a/contrib/libs/clapack/clascl.c b/contrib/libs/clapack/clascl.c
new file mode 100644
index 0000000000..ca01694ca5
--- /dev/null
+++ b/contrib/libs/clapack/clascl.c
@@ -0,0 +1,377 @@
+/* clascl.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int clascl_(char *type__, integer *kl, integer *ku, real *
+ cfrom, real *cto, integer *m, integer *n, complex *a, integer *lda,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j, k1, k2, k3, k4;
+ real mul, cto1;
+ logical done;
+ real ctoc;
+ extern logical lsame_(char *, char *);
+ integer itype;
+ real cfrom1;
+ extern doublereal slamch_(char *);
+ real cfromc;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ extern logical sisnan_(real *);
+ real smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLASCL multiplies the M by N complex matrix A by the real scalar */
+/* CTO/CFROM. This is done without over/underflow as long as the final */
+/* result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that */
+/* A may be full, upper triangular, lower triangular, upper Hessenberg, */
+/* or banded. */
+
+/* Arguments */
+/* ========= */
+
+/* TYPE (input) CHARACTER*1 */
+/* TYPE indices the storage type of the input matrix. */
+/* = 'G': A is a full matrix. */
+/* = 'L': A is a lower triangular matrix. */
+/* = 'U': A is an upper triangular matrix. */
+/* = 'H': A is an upper Hessenberg matrix. */
+/* = 'B': A is a symmetric band matrix with lower bandwidth KL */
+/* and upper bandwidth KU and with the only the lower */
+/* half stored. */
+/* = 'Q': A is a symmetric band matrix with lower bandwidth KL */
+/* and upper bandwidth KU and with the only the upper */
+/* half stored. */
+/* = 'Z': A is a band matrix with lower bandwidth KL and upper */
+/* bandwidth KU. */
+
+/* KL (input) INTEGER */
+/* The lower bandwidth of A. Referenced only if TYPE = 'B', */
+/* 'Q' or 'Z'. */
+
+/* KU (input) INTEGER */
+/* The upper bandwidth of A. Referenced only if TYPE = 'B', */
+/* 'Q' or 'Z'. */
+
+/* CFROM (input) REAL */
+/* CTO (input) REAL */
+/* The matrix A is multiplied by CTO/CFROM. A(I,J) is computed */
+/* without over/underflow if the final result CTO*A(I,J)/CFROM */
+/* can be represented without over/underflow. CFROM must be */
+/* nonzero. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* The matrix to be multiplied by CTO/CFROM. See TYPE for the */
+/* storage type. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* INFO (output) INTEGER */
+/* 0 - successful exit */
+/* <0 - if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+
+ if (lsame_(type__, "G")) {
+ itype = 0;
+ } else if (lsame_(type__, "L")) {
+ itype = 1;
+ } else if (lsame_(type__, "U")) {
+ itype = 2;
+ } else if (lsame_(type__, "H")) {
+ itype = 3;
+ } else if (lsame_(type__, "B")) {
+ itype = 4;
+ } else if (lsame_(type__, "Q")) {
+ itype = 5;
+ } else if (lsame_(type__, "Z")) {
+ itype = 6;
+ } else {
+ itype = -1;
+ }
+
+ if (itype == -1) {
+ *info = -1;
+ } else if (*cfrom == 0.f || sisnan_(cfrom)) {
+ *info = -4;
+ } else if (sisnan_(cto)) {
+ *info = -5;
+ } else if (*m < 0) {
+ *info = -6;
+ } else if (*n < 0 || itype == 4 && *n != *m || itype == 5 && *n != *m) {
+ *info = -7;
+ } else if (itype <= 3 && *lda < max(1,*m)) {
+ *info = -9;
+ } else if (itype >= 4) {
+/* Computing MAX */
+ i__1 = *m - 1;
+ if (*kl < 0 || *kl > max(i__1,0)) {
+ *info = -2;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = *n - 1;
+ if (*ku < 0 || *ku > max(i__1,0) || (itype == 4 || itype == 5) &&
+ *kl != *ku) {
+ *info = -3;
+ } else if (itype == 4 && *lda < *kl + 1 || itype == 5 && *lda < *
+ ku + 1 || itype == 6 && *lda < (*kl << 1) + *ku + 1) {
+ *info = -9;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CLASCL", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *m == 0) {
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+
+ cfromc = *cfrom;
+ ctoc = *cto;
+
+L10:
+ cfrom1 = cfromc * smlnum;
+ if (cfrom1 == cfromc) {
+/* CFROMC is an inf. Multiply by a correctly signed zero for */
+/* finite CTOC, or a NaN if CTOC is infinite. */
+ mul = ctoc / cfromc;
+ done = TRUE_;
+ cto1 = ctoc;
+ } else {
+ cto1 = ctoc / bignum;
+ if (cto1 == ctoc) {
+/* CTOC is either 0 or an inf. In both cases, CTOC itself */
+/* serves as the correct multiplication factor. */
+ mul = ctoc;
+ done = TRUE_;
+ cfromc = 1.f;
+ } else if (dabs(cfrom1) > dabs(ctoc) && ctoc != 0.f) {
+ mul = smlnum;
+ done = FALSE_;
+ cfromc = cfrom1;
+ } else if (dabs(cto1) > dabs(cfromc)) {
+ mul = bignum;
+ done = FALSE_;
+ ctoc = cto1;
+ } else {
+ mul = ctoc / cfromc;
+ done = TRUE_;
+ }
+ }
+
+ if (itype == 0) {
+
+/* Full matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ q__1.r = mul * a[i__4].r, q__1.i = mul * a[i__4].i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L20: */
+ }
+/* L30: */
+ }
+
+ } else if (itype == 1) {
+
+/* Lower triangular matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ q__1.r = mul * a[i__4].r, q__1.i = mul * a[i__4].i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L40: */
+ }
+/* L50: */
+ }
+
+ } else if (itype == 2) {
+
+/* Upper triangular matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(j,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ q__1.r = mul * a[i__4].r, q__1.i = mul * a[i__4].i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L60: */
+ }
+/* L70: */
+ }
+
+ } else if (itype == 3) {
+
+/* Upper Hessenberg matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = j + 1;
+ i__2 = min(i__3,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ q__1.r = mul * a[i__4].r, q__1.i = mul * a[i__4].i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L80: */
+ }
+/* L90: */
+ }
+
+ } else if (itype == 4) {
+
+/* Lower half of a symmetric band matrix */
+
+ k3 = *kl + 1;
+ k4 = *n + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = k3, i__4 = k4 - j;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ q__1.r = mul * a[i__4].r, q__1.i = mul * a[i__4].i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L100: */
+ }
+/* L110: */
+ }
+
+ } else if (itype == 5) {
+
+/* Upper half of a symmetric band matrix */
+
+ k1 = *ku + 2;
+ k3 = *ku + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = k1 - j;
+ i__3 = k3;
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+ i__2 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ q__1.r = mul * a[i__4].r, q__1.i = mul * a[i__4].i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+/* L120: */
+ }
+/* L130: */
+ }
+
+ } else if (itype == 6) {
+
+/* Band matrix */
+
+ k1 = *kl + *ku + 2;
+ k2 = *kl + 1;
+ k3 = (*kl << 1) + *ku + 1;
+ k4 = *kl + *ku + 1 + *m;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__3 = k1 - j;
+/* Computing MIN */
+ i__4 = k3, i__5 = k4 - j;
+ i__2 = min(i__4,i__5);
+ for (i__ = max(i__3,k2); i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ q__1.r = mul * a[i__4].r, q__1.i = mul * a[i__4].i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L140: */
+ }
+/* L150: */
+ }
+
+ }
+
+ if (! done) {
+ goto L10;
+ }
+
+ return 0;
+
+/* End of CLASCL */
+
+} /* clascl_ */
diff --git a/contrib/libs/clapack/claset.c b/contrib/libs/clapack/claset.c
new file mode 100644
index 0000000000..ea011e493b
--- /dev/null
+++ b/contrib/libs/clapack/claset.c
@@ -0,0 +1,162 @@
+/* claset.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int claset_(char *uplo, integer *m, integer *n, complex *
+ alpha, complex *beta, complex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLASET initializes a 2-D array A to BETA on the diagonal and */
+/* ALPHA on the offdiagonals. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies the part of the matrix A to be set. */
+/* = 'U': Upper triangular part is set. The lower triangle */
+/* is unchanged. */
+/* = 'L': Lower triangular part is set. The upper triangle */
+/* is unchanged. */
+/* Otherwise: All of the matrix A is set. */
+
+/* M (input) INTEGER */
+/* On entry, M specifies the number of rows of A. */
+
+/* N (input) INTEGER */
+/* On entry, N specifies the number of columns of A. */
+
+/* ALPHA (input) COMPLEX */
+/* All the offdiagonal array elements are set to ALPHA. */
+
+/* BETA (input) COMPLEX */
+/* All the diagonal array elements are set to BETA. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the m by n matrix A. */
+/* On exit, A(i,j) = ALPHA, 1 <= i <= m, 1 <= j <= n, i.ne.j; */
+/* A(i,i) = BETA , 1 <= i <= min(m,n) */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ if (lsame_(uplo, "U")) {
+
+/* Set the diagonal to BETA and the strictly upper triangular */
+/* part of the array to ALPHA. */
+
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = j - 1;
+ i__2 = min(i__3,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = alpha->r, a[i__3].i = alpha->i;
+/* L10: */
+ }
+/* L20: */
+ }
+ i__1 = min(*n,*m);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = beta->r, a[i__2].i = beta->i;
+/* L30: */
+ }
+
+ } else if (lsame_(uplo, "L")) {
+
+/* Set the diagonal to BETA and the strictly lower triangular */
+/* part of the array to ALPHA. */
+
+ i__1 = min(*m,*n);
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = alpha->r, a[i__3].i = alpha->i;
+/* L40: */
+ }
+/* L50: */
+ }
+ i__1 = min(*n,*m);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = beta->r, a[i__2].i = beta->i;
+/* L60: */
+ }
+
+ } else {
+
+/* Set the array to BETA on the diagonal and ALPHA on the */
+/* offdiagonal. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = alpha->r, a[i__3].i = alpha->i;
+/* L70: */
+ }
+/* L80: */
+ }
+ i__1 = min(*m,*n);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = beta->r, a[i__2].i = beta->i;
+/* L90: */
+ }
+ }
+
+ return 0;
+
+/* End of CLASET */
+
+} /* claset_ */
diff --git a/contrib/libs/clapack/clasr.c b/contrib/libs/clapack/clasr.c
new file mode 100644
index 0000000000..4f12a7f8c8
--- /dev/null
+++ b/contrib/libs/clapack/clasr.c
@@ -0,0 +1,609 @@
+/* clasr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int clasr_(char *side, char *pivot, char *direct, integer *m,
+ integer *n, real *c__, real *s, complex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ complex q__1, q__2, q__3;
+
+ /* Local variables */
+ integer i__, j, info;
+ complex temp;
+ extern logical lsame_(char *, char *);
+ real ctemp, stemp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLASR applies a sequence of real plane rotations to a complex matrix */
+/* A, from either the left or the right. */
+
+/* When SIDE = 'L', the transformation takes the form */
+
+/* A := P*A */
+
+/* and when SIDE = 'R', the transformation takes the form */
+
+/* A := A*P**T */
+
+/* where P is an orthogonal matrix consisting of a sequence of z plane */
+/* rotations, with z = M when SIDE = 'L' and z = N when SIDE = 'R', */
+/* and P**T is the transpose of P. */
+
+/* When DIRECT = 'F' (Forward sequence), then */
+
+/* P = P(z-1) * ... * P(2) * P(1) */
+
+/* and when DIRECT = 'B' (Backward sequence), then */
+
+/* P = P(1) * P(2) * ... * P(z-1) */
+
+/* where P(k) is a plane rotation matrix defined by the 2-by-2 rotation */
+
+/* R(k) = ( c(k) s(k) ) */
+/* = ( -s(k) c(k) ). */
+
+/* When PIVOT = 'V' (Variable pivot), the rotation is performed */
+/* for the plane (k,k+1), i.e., P(k) has the form */
+
+/* P(k) = ( 1 ) */
+/* ( ... ) */
+/* ( 1 ) */
+/* ( c(k) s(k) ) */
+/* ( -s(k) c(k) ) */
+/* ( 1 ) */
+/* ( ... ) */
+/* ( 1 ) */
+
+/* where R(k) appears as a rank-2 modification to the identity matrix in */
+/* rows and columns k and k+1. */
+
+/* When PIVOT = 'T' (Top pivot), the rotation is performed for the */
+/* plane (1,k+1), so P(k) has the form */
+
+/* P(k) = ( c(k) s(k) ) */
+/* ( 1 ) */
+/* ( ... ) */
+/* ( 1 ) */
+/* ( -s(k) c(k) ) */
+/* ( 1 ) */
+/* ( ... ) */
+/* ( 1 ) */
+
+/* where R(k) appears in rows and columns 1 and k+1. */
+
+/* Similarly, when PIVOT = 'B' (Bottom pivot), the rotation is */
+/* performed for the plane (k,z), giving P(k) the form */
+
+/* P(k) = ( 1 ) */
+/* ( ... ) */
+/* ( 1 ) */
+/* ( c(k) s(k) ) */
+/* ( 1 ) */
+/* ( ... ) */
+/* ( 1 ) */
+/* ( -s(k) c(k) ) */
+
+/* where R(k) appears in rows and columns k and z. The rotations are */
+/* performed without ever forming P(k) explicitly. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* Specifies whether the plane rotation matrix P is applied to */
+/* A on the left or the right. */
+/* = 'L': Left, compute A := P*A */
+/* = 'R': Right, compute A:= A*P**T */
+
+/* PIVOT (input) CHARACTER*1 */
+/* Specifies the plane for which P(k) is a plane rotation */
+/* matrix. */
+/* = 'V': Variable pivot, the plane (k,k+1) */
+/* = 'T': Top pivot, the plane (1,k+1) */
+/* = 'B': Bottom pivot, the plane (k,z) */
+
+/* DIRECT (input) CHARACTER*1 */
+/* Specifies whether P is a forward or backward sequence of */
+/* plane rotations. */
+/* = 'F': Forward, P = P(z-1)*...*P(2)*P(1) */
+/* = 'B': Backward, P = P(1)*P(2)*...*P(z-1) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. If m <= 1, an immediate */
+/* return is effected. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. If n <= 1, an */
+/* immediate return is effected. */
+
+/* C (input) REAL array, dimension */
+/* (M-1) if SIDE = 'L' */
+/* (N-1) if SIDE = 'R' */
+/* The cosines c(k) of the plane rotations. */
+
+/* S (input) REAL array, dimension */
+/* (M-1) if SIDE = 'L' */
+/* (N-1) if SIDE = 'R' */
+/* The sines s(k) of the plane rotations. The 2-by-2 plane */
+/* rotation part of the matrix P(k), R(k), has the form */
+/* R(k) = ( c(k) s(k) ) */
+/* ( -s(k) c(k) ). */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* The M-by-N matrix A. On exit, A is overwritten by P*A if */
+/* SIDE = 'R' or by A*P**T if SIDE = 'L'. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ --c__;
+ --s;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! (lsame_(side, "L") || lsame_(side, "R"))) {
+ info = 1;
+ } else if (! (lsame_(pivot, "V") || lsame_(pivot,
+ "T") || lsame_(pivot, "B"))) {
+ info = 2;
+ } else if (! (lsame_(direct, "F") || lsame_(direct,
+ "B"))) {
+ info = 3;
+ } else if (*m < 0) {
+ info = 4;
+ } else if (*n < 0) {
+ info = 5;
+ } else if (*lda < max(1,*m)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("CLASR ", &info);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+ if (lsame_(side, "L")) {
+
+/* Form P * A */
+
+ if (lsame_(pivot, "V")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1.f || stemp != 0.f) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = j + 1 + i__ * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ i__3 = j + 1 + i__ * a_dim1;
+ q__2.r = ctemp * temp.r, q__2.i = ctemp * temp.i;
+ i__4 = j + i__ * a_dim1;
+ q__3.r = stemp * a[i__4].r, q__3.i = stemp * a[
+ i__4].i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i -
+ q__3.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ i__3 = j + i__ * a_dim1;
+ q__2.r = stemp * temp.r, q__2.i = stemp * temp.i;
+ i__4 = j + i__ * a_dim1;
+ q__3.r = ctemp * a[i__4].r, q__3.i = ctemp * a[
+ i__4].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1.f || stemp != 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = j + 1 + i__ * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ i__2 = j + 1 + i__ * a_dim1;
+ q__2.r = ctemp * temp.r, q__2.i = ctemp * temp.i;
+ i__3 = j + i__ * a_dim1;
+ q__3.r = stemp * a[i__3].r, q__3.i = stemp * a[
+ i__3].i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i -
+ q__3.i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+ i__2 = j + i__ * a_dim1;
+ q__2.r = stemp * temp.r, q__2.i = stemp * temp.i;
+ i__3 = j + i__ * a_dim1;
+ q__3.r = ctemp * a[i__3].r, q__3.i = ctemp * a[
+ i__3].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+/* L30: */
+ }
+ }
+/* L40: */
+ }
+ }
+ } else if (lsame_(pivot, "T")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m;
+ for (j = 2; j <= i__1; ++j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if (ctemp != 1.f || stemp != 0.f) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ i__3 = j + i__ * a_dim1;
+ q__2.r = ctemp * temp.r, q__2.i = ctemp * temp.i;
+ i__4 = i__ * a_dim1 + 1;
+ q__3.r = stemp * a[i__4].r, q__3.i = stemp * a[
+ i__4].i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i -
+ q__3.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ i__3 = i__ * a_dim1 + 1;
+ q__2.r = stemp * temp.r, q__2.i = stemp * temp.i;
+ i__4 = i__ * a_dim1 + 1;
+ q__3.r = ctemp * a[i__4].r, q__3.i = ctemp * a[
+ i__4].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m; j >= 2; --j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if (ctemp != 1.f || stemp != 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = j + i__ * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ i__2 = j + i__ * a_dim1;
+ q__2.r = ctemp * temp.r, q__2.i = ctemp * temp.i;
+ i__3 = i__ * a_dim1 + 1;
+ q__3.r = stemp * a[i__3].r, q__3.i = stemp * a[
+ i__3].i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i -
+ q__3.i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+ i__2 = i__ * a_dim1 + 1;
+ q__2.r = stemp * temp.r, q__2.i = stemp * temp.i;
+ i__3 = i__ * a_dim1 + 1;
+ q__3.r = ctemp * a[i__3].r, q__3.i = ctemp * a[
+ i__3].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+/* L70: */
+ }
+ }
+/* L80: */
+ }
+ }
+ } else if (lsame_(pivot, "B")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1.f || stemp != 0.f) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ i__3 = j + i__ * a_dim1;
+ i__4 = *m + i__ * a_dim1;
+ q__2.r = stemp * a[i__4].r, q__2.i = stemp * a[
+ i__4].i;
+ q__3.r = ctemp * temp.r, q__3.i = ctemp * temp.i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ i__3 = *m + i__ * a_dim1;
+ i__4 = *m + i__ * a_dim1;
+ q__2.r = ctemp * a[i__4].r, q__2.i = ctemp * a[
+ i__4].i;
+ q__3.r = stemp * temp.r, q__3.i = stemp * temp.i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i -
+ q__3.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L90: */
+ }
+ }
+/* L100: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1.f || stemp != 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = j + i__ * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ i__2 = j + i__ * a_dim1;
+ i__3 = *m + i__ * a_dim1;
+ q__2.r = stemp * a[i__3].r, q__2.i = stemp * a[
+ i__3].i;
+ q__3.r = ctemp * temp.r, q__3.i = ctemp * temp.i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+ i__2 = *m + i__ * a_dim1;
+ i__3 = *m + i__ * a_dim1;
+ q__2.r = ctemp * a[i__3].r, q__2.i = ctemp * a[
+ i__3].i;
+ q__3.r = stemp * temp.r, q__3.i = stemp * temp.i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i -
+ q__3.i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+ }
+ }
+ } else if (lsame_(side, "R")) {
+
+/* Form A * P' */
+
+ if (lsame_(pivot, "V")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1.f || stemp != 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + (j + 1) * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ i__3 = i__ + (j + 1) * a_dim1;
+ q__2.r = ctemp * temp.r, q__2.i = ctemp * temp.i;
+ i__4 = i__ + j * a_dim1;
+ q__3.r = stemp * a[i__4].r, q__3.i = stemp * a[
+ i__4].i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i -
+ q__3.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ i__3 = i__ + j * a_dim1;
+ q__2.r = stemp * temp.r, q__2.i = stemp * temp.i;
+ i__4 = i__ + j * a_dim1;
+ q__3.r = ctemp * a[i__4].r, q__3.i = ctemp * a[
+ i__4].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L130: */
+ }
+ }
+/* L140: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1.f || stemp != 0.f) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + (j + 1) * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ i__2 = i__ + (j + 1) * a_dim1;
+ q__2.r = ctemp * temp.r, q__2.i = ctemp * temp.i;
+ i__3 = i__ + j * a_dim1;
+ q__3.r = stemp * a[i__3].r, q__3.i = stemp * a[
+ i__3].i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i -
+ q__3.i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+ i__2 = i__ + j * a_dim1;
+ q__2.r = stemp * temp.r, q__2.i = stemp * temp.i;
+ i__3 = i__ + j * a_dim1;
+ q__3.r = ctemp * a[i__3].r, q__3.i = ctemp * a[
+ i__3].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+/* L150: */
+ }
+ }
+/* L160: */
+ }
+ }
+ } else if (lsame_(pivot, "T")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if (ctemp != 1.f || stemp != 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ i__3 = i__ + j * a_dim1;
+ q__2.r = ctemp * temp.r, q__2.i = ctemp * temp.i;
+ i__4 = i__ + a_dim1;
+ q__3.r = stemp * a[i__4].r, q__3.i = stemp * a[
+ i__4].i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i -
+ q__3.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ i__3 = i__ + a_dim1;
+ q__2.r = stemp * temp.r, q__2.i = stemp * temp.i;
+ i__4 = i__ + a_dim1;
+ q__3.r = ctemp * a[i__4].r, q__3.i = ctemp * a[
+ i__4].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L170: */
+ }
+ }
+/* L180: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n; j >= 2; --j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if (ctemp != 1.f || stemp != 0.f) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ i__2 = i__ + j * a_dim1;
+ q__2.r = ctemp * temp.r, q__2.i = ctemp * temp.i;
+ i__3 = i__ + a_dim1;
+ q__3.r = stemp * a[i__3].r, q__3.i = stemp * a[
+ i__3].i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i -
+ q__3.i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+ i__2 = i__ + a_dim1;
+ q__2.r = stemp * temp.r, q__2.i = stemp * temp.i;
+ i__3 = i__ + a_dim1;
+ q__3.r = ctemp * a[i__3].r, q__3.i = ctemp * a[
+ i__3].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+/* L190: */
+ }
+ }
+/* L200: */
+ }
+ }
+ } else if (lsame_(pivot, "B")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1.f || stemp != 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + *n * a_dim1;
+ q__2.r = stemp * a[i__4].r, q__2.i = stemp * a[
+ i__4].i;
+ q__3.r = ctemp * temp.r, q__3.i = ctemp * temp.i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ i__3 = i__ + *n * a_dim1;
+ i__4 = i__ + *n * a_dim1;
+ q__2.r = ctemp * a[i__4].r, q__2.i = ctemp * a[
+ i__4].i;
+ q__3.r = stemp * temp.r, q__3.i = stemp * temp.i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i -
+ q__3.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L210: */
+ }
+ }
+/* L220: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1.f || stemp != 0.f) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__ + *n * a_dim1;
+ q__2.r = stemp * a[i__3].r, q__2.i = stemp * a[
+ i__3].i;
+ q__3.r = ctemp * temp.r, q__3.i = ctemp * temp.i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+ i__2 = i__ + *n * a_dim1;
+ i__3 = i__ + *n * a_dim1;
+ q__2.r = ctemp * a[i__3].r, q__2.i = ctemp * a[
+ i__3].i;
+ q__3.r = stemp * temp.r, q__3.i = stemp * temp.i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i -
+ q__3.i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+/* L230: */
+ }
+ }
+/* L240: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CLASR */
+
+} /* clasr_ */
diff --git a/contrib/libs/clapack/classq.c b/contrib/libs/clapack/classq.c
new file mode 100644
index 0000000000..22e9b8cb75
--- /dev/null
+++ b/contrib/libs/clapack/classq.c
@@ -0,0 +1,138 @@
+/* classq.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int classq_(integer *n, complex *x, integer *incx, real *
+ scale, real *sumsq)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ real r__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer ix;
+ real temp1;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLASSQ returns the values scl and ssq such that */
+
+/* ( scl**2 )*ssq = x( 1 )**2 +...+ x( n )**2 + ( scale**2 )*sumsq, */
+
+/* where x( i ) = abs( X( 1 + ( i - 1 )*INCX ) ). The value of sumsq is */
+/* assumed to be at least unity and the value of ssq will then satisfy */
+
+/* 1.0 .le. ssq .le. ( sumsq + 2*n ). */
+
+/* scale is assumed to be non-negative and scl returns the value */
+
+/* scl = max( scale, abs( real( x( i ) ) ), abs( aimag( x( i ) ) ) ), */
+/* i */
+
+/* scale and sumsq must be supplied in SCALE and SUMSQ respectively. */
+/* SCALE and SUMSQ are overwritten by scl and ssq respectively. */
+
+/* The routine makes only one pass through the vector X. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of elements to be used from the vector X. */
+
+/* X (input) COMPLEX array, dimension (N) */
+/* The vector x as described above. */
+/* x( i ) = X( 1 + ( i - 1 )*INCX ), 1 <= i <= n. */
+
+/* INCX (input) INTEGER */
+/* The increment between successive values of the vector X. */
+/* INCX > 0. */
+
+/* SCALE (input/output) REAL */
+/* On entry, the value scale in the equation above. */
+/* On exit, SCALE is overwritten with the value scl . */
+
+/* SUMSQ (input/output) REAL */
+/* On entry, the value sumsq in the equation above. */
+/* On exit, SUMSQ is overwritten with the value ssq . */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n > 0) {
+ i__1 = (*n - 1) * *incx + 1;
+ i__2 = *incx;
+ for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) {
+ i__3 = ix;
+ if (x[i__3].r != 0.f) {
+ i__3 = ix;
+ temp1 = (r__1 = x[i__3].r, dabs(r__1));
+ if (*scale < temp1) {
+/* Computing 2nd power */
+ r__1 = *scale / temp1;
+ *sumsq = *sumsq * (r__1 * r__1) + 1;
+ *scale = temp1;
+ } else {
+/* Computing 2nd power */
+ r__1 = temp1 / *scale;
+ *sumsq += r__1 * r__1;
+ }
+ }
+ if (r_imag(&x[ix]) != 0.f) {
+ temp1 = (r__1 = r_imag(&x[ix]), dabs(r__1));
+ if (*scale < temp1) {
+/* Computing 2nd power */
+ r__1 = *scale / temp1;
+ *sumsq = *sumsq * (r__1 * r__1) + 1;
+ *scale = temp1;
+ } else {
+/* Computing 2nd power */
+ r__1 = temp1 / *scale;
+ *sumsq += r__1 * r__1;
+ }
+ }
+/* L10: */
+ }
+ }
+
+ return 0;
+
+/* End of CLASSQ */
+
+} /* classq_ */
diff --git a/contrib/libs/clapack/claswp.c b/contrib/libs/clapack/claswp.c
new file mode 100644
index 0000000000..01566c752e
--- /dev/null
+++ b/contrib/libs/clapack/claswp.c
@@ -0,0 +1,166 @@
+/* claswp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int claswp_(integer *n, complex *a, integer *lda, integer *
+ k1, integer *k2, integer *ipiv, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+
+ /* Local variables */
+ integer i__, j, k, i1, i2, n32, ip, ix, ix0, inc;
+ complex temp;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLASWP performs a series of row interchanges on the matrix A. */
+/* One row interchange is initiated for each of rows K1 through K2 of A. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the matrix of column dimension N to which the row */
+/* interchanges will be applied. */
+/* On exit, the permuted matrix. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+
+/* K1 (input) INTEGER */
+/* The first element of IPIV for which a row interchange will */
+/* be done. */
+
+/* K2 (input) INTEGER */
+/* The last element of IPIV for which a row interchange will */
+/* be done. */
+
+/* IPIV (input) INTEGER array, dimension (K2*abs(INCX)) */
+/* The vector of pivot indices. Only the elements in positions */
+/* K1 through K2 of IPIV are accessed. */
+/* IPIV(K) = L implies rows K and L are to be interchanged. */
+
+/* INCX (input) INTEGER */
+/* The increment between successive values of IPIV. If IPIV */
+/* is negative, the pivots are applied in reverse order. */
+
+/* Further Details */
+/* =============== */
+
+/* Modified by */
+/* R. C. Whaley, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Interchange row I with row IPIV(I) for each of rows K1 through K2. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ if (*incx > 0) {
+ ix0 = *k1;
+ i1 = *k1;
+ i2 = *k2;
+ inc = 1;
+ } else if (*incx < 0) {
+ ix0 = (1 - *k2) * *incx + 1;
+ i1 = *k2;
+ i2 = *k1;
+ inc = -1;
+ } else {
+ return 0;
+ }
+
+ n32 = *n / 32 << 5;
+ if (n32 != 0) {
+ i__1 = n32;
+ for (j = 1; j <= i__1; j += 32) {
+ ix = ix0;
+ i__2 = i2;
+ i__3 = inc;
+ for (i__ = i1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__3)
+ {
+ ip = ipiv[ix];
+ if (ip != i__) {
+ i__4 = j + 31;
+ for (k = j; k <= i__4; ++k) {
+ i__5 = i__ + k * a_dim1;
+ temp.r = a[i__5].r, temp.i = a[i__5].i;
+ i__5 = i__ + k * a_dim1;
+ i__6 = ip + k * a_dim1;
+ a[i__5].r = a[i__6].r, a[i__5].i = a[i__6].i;
+ i__5 = ip + k * a_dim1;
+ a[i__5].r = temp.r, a[i__5].i = temp.i;
+/* L10: */
+ }
+ }
+ ix += *incx;
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+ if (n32 != *n) {
+ ++n32;
+ ix = ix0;
+ i__1 = i2;
+ i__3 = inc;
+ for (i__ = i1; i__3 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__3) {
+ ip = ipiv[ix];
+ if (ip != i__) {
+ i__2 = *n;
+ for (k = n32; k <= i__2; ++k) {
+ i__4 = i__ + k * a_dim1;
+ temp.r = a[i__4].r, temp.i = a[i__4].i;
+ i__4 = i__ + k * a_dim1;
+ i__5 = ip + k * a_dim1;
+ a[i__4].r = a[i__5].r, a[i__4].i = a[i__5].i;
+ i__4 = ip + k * a_dim1;
+ a[i__4].r = temp.r, a[i__4].i = temp.i;
+/* L40: */
+ }
+ }
+ ix += *incx;
+/* L50: */
+ }
+ }
+
+ return 0;
+
+/* End of CLASWP */
+
+} /* claswp_ */
diff --git a/contrib/libs/clapack/clasyf.c b/contrib/libs/clapack/clasyf.c
new file mode 100644
index 0000000000..3010bbaf98
--- /dev/null
+++ b/contrib/libs/clapack/clasyf.c
@@ -0,0 +1,829 @@
+/* clasyf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int clasyf_(char *uplo, integer *n, integer *nb, integer *kb,
+ complex *a, integer *lda, integer *ipiv, complex *w, integer *ldw,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_imag(complex *);
+ void c_div(complex *, complex *, complex *);
+
+ /* Local variables */
+ integer j, k;
+ complex t, r1, d11, d21, d22;
+ integer jb, jj, kk, jp, kp, kw, kkw, imax, jmax;
+ real alpha;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *), cgemm_(char *, char *, integer *, integer *, integer *
+, complex *, complex *, integer *, complex *, integer *, complex *
+, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+, complex *, integer *, complex *, integer *, complex *, complex *
+, integer *), ccopy_(integer *, complex *, integer *,
+ complex *, integer *), cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ integer kstep;
+ real absakk;
+ extern integer icamax_(integer *, complex *, integer *);
+ real colmax, rowmax;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLASYF computes a partial factorization of a complex symmetric matrix */
+/* A using the Bunch-Kaufman diagonal pivoting method. The partial */
+/* factorization has the form: */
+
+/* A = ( I U12 ) ( A11 0 ) ( I 0 ) if UPLO = 'U', or: */
+/* ( 0 U22 ) ( 0 D ) ( U12' U22' ) */
+
+/* A = ( L11 0 ) ( D 0 ) ( L11' L21' ) if UPLO = 'L' */
+/* ( L21 I ) ( 0 A22 ) ( 0 I ) */
+
+/* where the order of D is at most NB. The actual order is returned in */
+/* the argument KB, and is either NB or NB-1, or N if N <= NB. */
+/* Note that U' denotes the transpose of U. */
+
+/* CLASYF is an auxiliary routine called by CSYTRF. It uses blocked code */
+/* (calling Level 3 BLAS) to update the submatrix A11 (if UPLO = 'U') or */
+/* A22 (if UPLO = 'L'). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NB (input) INTEGER */
+/* The maximum number of columns of the matrix A that should be */
+/* factored. NB should be at least 2 to allow for 2-by-2 pivot */
+/* blocks. */
+
+/* KB (output) INTEGER */
+/* The number of columns of A that were actually factored. */
+/* KB is either NB-1 or NB, or N if N <= NB. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n-by-n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n-by-n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+/* On exit, A contains details of the partial factorization. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D. */
+/* If UPLO = 'U', only the last KB elements of IPIV are set; */
+/* if UPLO = 'L', only the first KB elements are set. */
+
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* W (workspace) COMPLEX array, dimension (LDW,NB) */
+
+/* LDW (input) INTEGER */
+/* The leading dimension of the array W. LDW >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* > 0: if INFO = k, D(k,k) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ w_dim1 = *ldw;
+ w_offset = 1 + w_dim1;
+ w -= w_offset;
+
+ /* Function Body */
+ *info = 0;
+
+/* Initialize ALPHA for use in choosing pivot block size. */
+
+ alpha = (sqrt(17.f) + 1.f) / 8.f;
+
+ if (lsame_(uplo, "U")) {
+
+/* Factorize the trailing columns of A using the upper triangle */
+/* of A and working backwards, and compute the matrix W = U12*D */
+/* for use in updating A11 */
+
+/* K is the main loop index, decreasing from N in steps of 1 or 2 */
+
+/* KW is the column of W which corresponds to column K of A */
+
+ k = *n;
+L10:
+ kw = *nb + k - *n;
+
+/* Exit from loop */
+
+ if (k <= *n - *nb + 1 && *nb < *n || k < 1) {
+ goto L30;
+ }
+
+/* Copy column K of A to column KW of W and update it */
+
+ ccopy_(&k, &a[k * a_dim1 + 1], &c__1, &w[kw * w_dim1 + 1], &c__1);
+ if (k < *n) {
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &k, &i__1, &q__1, &a[(k + 1) * a_dim1 + 1],
+ lda, &w[k + (kw + 1) * w_dim1], ldw, &c_b1, &w[kw *
+ w_dim1 + 1], &c__1);
+ }
+
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ i__1 = k + kw * w_dim1;
+ absakk = (r__1 = w[i__1].r, dabs(r__1)) + (r__2 = r_imag(&w[k + kw *
+ w_dim1]), dabs(r__2));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ imax = icamax_(&i__1, &w[kw * w_dim1 + 1], &c__1);
+ i__1 = imax + kw * w_dim1;
+ colmax = (r__1 = w[i__1].r, dabs(r__1)) + (r__2 = r_imag(&w[imax
+ + kw * w_dim1]), dabs(r__2));
+ } else {
+ colmax = 0.f;
+ }
+
+ if (dmax(absakk,colmax) == 0.f) {
+
+/* Column K is zero: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* Copy column IMAX to column KW-1 of W and update it */
+
+ ccopy_(&imax, &a[imax * a_dim1 + 1], &c__1, &w[(kw - 1) *
+ w_dim1 + 1], &c__1);
+ i__1 = k - imax;
+ ccopy_(&i__1, &a[imax + (imax + 1) * a_dim1], lda, &w[imax +
+ 1 + (kw - 1) * w_dim1], &c__1);
+ if (k < *n) {
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &k, &i__1, &q__1, &a[(k + 1) *
+ a_dim1 + 1], lda, &w[imax + (kw + 1) * w_dim1],
+ ldw, &c_b1, &w[(kw - 1) * w_dim1 + 1], &c__1);
+ }
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ i__1 = k - imax;
+ jmax = imax + icamax_(&i__1, &w[imax + 1 + (kw - 1) * w_dim1],
+ &c__1);
+ i__1 = jmax + (kw - 1) * w_dim1;
+ rowmax = (r__1 = w[i__1].r, dabs(r__1)) + (r__2 = r_imag(&w[
+ jmax + (kw - 1) * w_dim1]), dabs(r__2));
+ if (imax > 1) {
+ i__1 = imax - 1;
+ jmax = icamax_(&i__1, &w[(kw - 1) * w_dim1 + 1], &c__1);
+/* Computing MAX */
+ i__1 = jmax + (kw - 1) * w_dim1;
+ r__3 = rowmax, r__4 = (r__1 = w[i__1].r, dabs(r__1)) + (
+ r__2 = r_imag(&w[jmax + (kw - 1) * w_dim1]), dabs(
+ r__2));
+ rowmax = dmax(r__3,r__4);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else /* if(complicated condition) */ {
+ i__1 = imax + (kw - 1) * w_dim1;
+ if ((r__1 = w[i__1].r, dabs(r__1)) + (r__2 = r_imag(&w[
+ imax + (kw - 1) * w_dim1]), dabs(r__2)) >= alpha *
+ rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+
+/* copy column KW-1 of W to column KW */
+
+ ccopy_(&k, &w[(kw - 1) * w_dim1 + 1], &c__1, &w[kw *
+ w_dim1 + 1], &c__1);
+ } else {
+
+/* interchange rows and columns K-1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+ }
+
+ kk = k - kstep + 1;
+ kkw = *nb + kk - *n;
+
+/* Updated column KP is already stored in column KKW of W */
+
+ if (kp != kk) {
+
+/* Copy non-updated column KK to column KP */
+
+ i__1 = kp + k * a_dim1;
+ i__2 = kk + k * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = k - 1 - kp;
+ ccopy_(&i__1, &a[kp + 1 + kk * a_dim1], &c__1, &a[kp + (kp +
+ 1) * a_dim1], lda);
+ ccopy_(&kp, &a[kk * a_dim1 + 1], &c__1, &a[kp * a_dim1 + 1], &
+ c__1);
+
+/* Interchange rows KK and KP in last KK columns of A and W */
+
+ i__1 = *n - kk + 1;
+ cswap_(&i__1, &a[kk + kk * a_dim1], lda, &a[kp + kk * a_dim1],
+ lda);
+ i__1 = *n - kk + 1;
+ cswap_(&i__1, &w[kk + kkw * w_dim1], ldw, &w[kp + kkw *
+ w_dim1], ldw);
+ }
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column KW of W now holds */
+
+/* W(k) = U(k)*D(k) */
+
+/* where U(k) is the k-th column of U */
+
+/* Store U(k) in column k of A */
+
+ ccopy_(&k, &w[kw * w_dim1 + 1], &c__1, &a[k * a_dim1 + 1], &
+ c__1);
+ c_div(&q__1, &c_b1, &a[k + k * a_dim1]);
+ r1.r = q__1.r, r1.i = q__1.i;
+ i__1 = k - 1;
+ cscal_(&i__1, &r1, &a[k * a_dim1 + 1], &c__1);
+ } else {
+
+/* 2-by-2 pivot block D(k): columns KW and KW-1 of W now */
+/* hold */
+
+/* ( W(k-1) W(k) ) = ( U(k-1) U(k) )*D(k) */
+
+/* where U(k) and U(k-1) are the k-th and (k-1)-th columns */
+/* of U */
+
+ if (k > 2) {
+
+/* Store U(k) and U(k-1) in columns k and k-1 of A */
+
+ i__1 = k - 1 + kw * w_dim1;
+ d21.r = w[i__1].r, d21.i = w[i__1].i;
+ c_div(&q__1, &w[k + kw * w_dim1], &d21);
+ d11.r = q__1.r, d11.i = q__1.i;
+ c_div(&q__1, &w[k - 1 + (kw - 1) * w_dim1], &d21);
+ d22.r = q__1.r, d22.i = q__1.i;
+ q__3.r = d11.r * d22.r - d11.i * d22.i, q__3.i = d11.r *
+ d22.i + d11.i * d22.r;
+ q__2.r = q__3.r - 1.f, q__2.i = q__3.i - 0.f;
+ c_div(&q__1, &c_b1, &q__2);
+ t.r = q__1.r, t.i = q__1.i;
+ c_div(&q__1, &t, &d21);
+ d21.r = q__1.r, d21.i = q__1.i;
+ i__1 = k - 2;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + (k - 1) * a_dim1;
+ i__3 = j + (kw - 1) * w_dim1;
+ q__3.r = d11.r * w[i__3].r - d11.i * w[i__3].i,
+ q__3.i = d11.r * w[i__3].i + d11.i * w[i__3]
+ .r;
+ i__4 = j + kw * w_dim1;
+ q__2.r = q__3.r - w[i__4].r, q__2.i = q__3.i - w[i__4]
+ .i;
+ q__1.r = d21.r * q__2.r - d21.i * q__2.i, q__1.i =
+ d21.r * q__2.i + d21.i * q__2.r;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+ i__2 = j + k * a_dim1;
+ i__3 = j + kw * w_dim1;
+ q__3.r = d22.r * w[i__3].r - d22.i * w[i__3].i,
+ q__3.i = d22.r * w[i__3].i + d22.i * w[i__3]
+ .r;
+ i__4 = j + (kw - 1) * w_dim1;
+ q__2.r = q__3.r - w[i__4].r, q__2.i = q__3.i - w[i__4]
+ .i;
+ q__1.r = d21.r * q__2.r - d21.i * q__2.i, q__1.i =
+ d21.r * q__2.i + d21.i * q__2.r;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+/* L20: */
+ }
+ }
+
+/* Copy D(k) to A */
+
+ i__1 = k - 1 + (k - 1) * a_dim1;
+ i__2 = k - 1 + (kw - 1) * w_dim1;
+ a[i__1].r = w[i__2].r, a[i__1].i = w[i__2].i;
+ i__1 = k - 1 + k * a_dim1;
+ i__2 = k - 1 + kw * w_dim1;
+ a[i__1].r = w[i__2].r, a[i__1].i = w[i__2].i;
+ i__1 = k + k * a_dim1;
+ i__2 = k + kw * w_dim1;
+ a[i__1].r = w[i__2].r, a[i__1].i = w[i__2].i;
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k - 1] = -kp;
+ }
+
+/* Decrease K and return to the start of the main loop */
+
+ k -= kstep;
+ goto L10;
+
+L30:
+
+/* Update the upper triangle of A11 (= A(1:k,1:k)) as */
+
+/* A11 := A11 - U12*D*U12' = A11 - U12*W' */
+
+/* computing blocks of NB columns at a time */
+
+ i__1 = -(*nb);
+ for (j = (k - 1) / *nb * *nb + 1; i__1 < 0 ? j >= 1 : j <= 1; j +=
+ i__1) {
+/* Computing MIN */
+ i__2 = *nb, i__3 = k - j + 1;
+ jb = min(i__2,i__3);
+
+/* Update the upper triangle of the diagonal block */
+
+ i__2 = j + jb - 1;
+ for (jj = j; jj <= i__2; ++jj) {
+ i__3 = jj - j + 1;
+ i__4 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__3, &i__4, &q__1, &a[j + (k + 1) *
+ a_dim1], lda, &w[jj + (kw + 1) * w_dim1], ldw, &c_b1,
+ &a[j + jj * a_dim1], &c__1);
+/* L40: */
+ }
+
+/* Update the rectangular superdiagonal block */
+
+ i__2 = j - 1;
+ i__3 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "Transpose", &i__2, &jb, &i__3, &q__1, &a[(
+ k + 1) * a_dim1 + 1], lda, &w[j + (kw + 1) * w_dim1], ldw,
+ &c_b1, &a[j * a_dim1 + 1], lda);
+/* L50: */
+ }
+
+/* Put U12 in standard form by partially undoing the interchanges */
+/* in columns k+1:n */
+
+ j = k + 1;
+L60:
+ jj = j;
+ jp = ipiv[j];
+ if (jp < 0) {
+ jp = -jp;
+ ++j;
+ }
+ ++j;
+ if (jp != jj && j <= *n) {
+ i__1 = *n - j + 1;
+ cswap_(&i__1, &a[jp + j * a_dim1], lda, &a[jj + j * a_dim1], lda);
+ }
+ if (j <= *n) {
+ goto L60;
+ }
+
+/* Set KB to the number of columns factorized */
+
+ *kb = *n - k;
+
+ } else {
+
+/* Factorize the leading columns of A using the lower triangle */
+/* of A and working forwards, and compute the matrix W = L21*D */
+/* for use in updating A22 */
+
+/* K is the main loop index, increasing from 1 in steps of 1 or 2 */
+
+ k = 1;
+L70:
+
+/* Exit from loop */
+
+ if (k >= *nb && *nb < *n || k > *n) {
+ goto L90;
+ }
+
+/* Copy column K of A to column K of W and update it */
+
+ i__1 = *n - k + 1;
+ ccopy_(&i__1, &a[k + k * a_dim1], &c__1, &w[k + k * w_dim1], &c__1);
+ i__1 = *n - k + 1;
+ i__2 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__1, &i__2, &q__1, &a[k + a_dim1], lda, &w[k
+ + w_dim1], ldw, &c_b1, &w[k + k * w_dim1], &c__1);
+
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ i__1 = k + k * w_dim1;
+ absakk = (r__1 = w[i__1].r, dabs(r__1)) + (r__2 = r_imag(&w[k + k *
+ w_dim1]), dabs(r__2));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ imax = k + icamax_(&i__1, &w[k + 1 + k * w_dim1], &c__1);
+ i__1 = imax + k * w_dim1;
+ colmax = (r__1 = w[i__1].r, dabs(r__1)) + (r__2 = r_imag(&w[imax
+ + k * w_dim1]), dabs(r__2));
+ } else {
+ colmax = 0.f;
+ }
+
+ if (dmax(absakk,colmax) == 0.f) {
+
+/* Column K is zero: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* Copy column IMAX to column K+1 of W and update it */
+
+ i__1 = imax - k;
+ ccopy_(&i__1, &a[imax + k * a_dim1], lda, &w[k + (k + 1) *
+ w_dim1], &c__1);
+ i__1 = *n - imax + 1;
+ ccopy_(&i__1, &a[imax + imax * a_dim1], &c__1, &w[imax + (k +
+ 1) * w_dim1], &c__1);
+ i__1 = *n - k + 1;
+ i__2 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__1, &i__2, &q__1, &a[k + a_dim1],
+ lda, &w[imax + w_dim1], ldw, &c_b1, &w[k + (k + 1) *
+ w_dim1], &c__1);
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ i__1 = imax - k;
+ jmax = k - 1 + icamax_(&i__1, &w[k + (k + 1) * w_dim1], &c__1)
+ ;
+ i__1 = jmax + (k + 1) * w_dim1;
+ rowmax = (r__1 = w[i__1].r, dabs(r__1)) + (r__2 = r_imag(&w[
+ jmax + (k + 1) * w_dim1]), dabs(r__2));
+ if (imax < *n) {
+ i__1 = *n - imax;
+ jmax = imax + icamax_(&i__1, &w[imax + 1 + (k + 1) *
+ w_dim1], &c__1);
+/* Computing MAX */
+ i__1 = jmax + (k + 1) * w_dim1;
+ r__3 = rowmax, r__4 = (r__1 = w[i__1].r, dabs(r__1)) + (
+ r__2 = r_imag(&w[jmax + (k + 1) * w_dim1]), dabs(
+ r__2));
+ rowmax = dmax(r__3,r__4);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else /* if(complicated condition) */ {
+ i__1 = imax + (k + 1) * w_dim1;
+ if ((r__1 = w[i__1].r, dabs(r__1)) + (r__2 = r_imag(&w[
+ imax + (k + 1) * w_dim1]), dabs(r__2)) >= alpha *
+ rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+
+/* copy column K+1 of W to column K */
+
+ i__1 = *n - k + 1;
+ ccopy_(&i__1, &w[k + (k + 1) * w_dim1], &c__1, &w[k +
+ k * w_dim1], &c__1);
+ } else {
+
+/* interchange rows and columns K+1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+ }
+
+ kk = k + kstep - 1;
+
+/* Updated column KP is already stored in column KK of W */
+
+ if (kp != kk) {
+
+/* Copy non-updated column KK to column KP */
+
+ i__1 = kp + k * a_dim1;
+ i__2 = kk + k * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp - k - 1;
+ ccopy_(&i__1, &a[k + 1 + kk * a_dim1], &c__1, &a[kp + (k + 1)
+ * a_dim1], lda);
+ i__1 = *n - kp + 1;
+ ccopy_(&i__1, &a[kp + kk * a_dim1], &c__1, &a[kp + kp *
+ a_dim1], &c__1);
+
+/* Interchange rows KK and KP in first KK columns of A and W */
+
+ cswap_(&kk, &a[kk + a_dim1], lda, &a[kp + a_dim1], lda);
+ cswap_(&kk, &w[kk + w_dim1], ldw, &w[kp + w_dim1], ldw);
+ }
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k of W now holds */
+
+/* W(k) = L(k)*D(k) */
+
+/* where L(k) is the k-th column of L */
+
+/* Store L(k) in column k of A */
+
+ i__1 = *n - k + 1;
+ ccopy_(&i__1, &w[k + k * w_dim1], &c__1, &a[k + k * a_dim1], &
+ c__1);
+ if (k < *n) {
+ c_div(&q__1, &c_b1, &a[k + k * a_dim1]);
+ r1.r = q__1.r, r1.i = q__1.i;
+ i__1 = *n - k;
+ cscal_(&i__1, &r1, &a[k + 1 + k * a_dim1], &c__1);
+ }
+ } else {
+
+/* 2-by-2 pivot block D(k): columns k and k+1 of W now hold */
+
+/* ( W(k) W(k+1) ) = ( L(k) L(k+1) )*D(k) */
+
+/* where L(k) and L(k+1) are the k-th and (k+1)-th columns */
+/* of L */
+
+ if (k < *n - 1) {
+
+/* Store L(k) and L(k+1) in columns k and k+1 of A */
+
+ i__1 = k + 1 + k * w_dim1;
+ d21.r = w[i__1].r, d21.i = w[i__1].i;
+ c_div(&q__1, &w[k + 1 + (k + 1) * w_dim1], &d21);
+ d11.r = q__1.r, d11.i = q__1.i;
+ c_div(&q__1, &w[k + k * w_dim1], &d21);
+ d22.r = q__1.r, d22.i = q__1.i;
+ q__3.r = d11.r * d22.r - d11.i * d22.i, q__3.i = d11.r *
+ d22.i + d11.i * d22.r;
+ q__2.r = q__3.r - 1.f, q__2.i = q__3.i - 0.f;
+ c_div(&q__1, &c_b1, &q__2);
+ t.r = q__1.r, t.i = q__1.i;
+ c_div(&q__1, &t, &d21);
+ d21.r = q__1.r, d21.i = q__1.i;
+ i__1 = *n;
+ for (j = k + 2; j <= i__1; ++j) {
+ i__2 = j + k * a_dim1;
+ i__3 = j + k * w_dim1;
+ q__3.r = d11.r * w[i__3].r - d11.i * w[i__3].i,
+ q__3.i = d11.r * w[i__3].i + d11.i * w[i__3]
+ .r;
+ i__4 = j + (k + 1) * w_dim1;
+ q__2.r = q__3.r - w[i__4].r, q__2.i = q__3.i - w[i__4]
+ .i;
+ q__1.r = d21.r * q__2.r - d21.i * q__2.i, q__1.i =
+ d21.r * q__2.i + d21.i * q__2.r;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+ i__2 = j + (k + 1) * a_dim1;
+ i__3 = j + (k + 1) * w_dim1;
+ q__3.r = d22.r * w[i__3].r - d22.i * w[i__3].i,
+ q__3.i = d22.r * w[i__3].i + d22.i * w[i__3]
+ .r;
+ i__4 = j + k * w_dim1;
+ q__2.r = q__3.r - w[i__4].r, q__2.i = q__3.i - w[i__4]
+ .i;
+ q__1.r = d21.r * q__2.r - d21.i * q__2.i, q__1.i =
+ d21.r * q__2.i + d21.i * q__2.r;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+/* L80: */
+ }
+ }
+
+/* Copy D(k) to A */
+
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * w_dim1;
+ a[i__1].r = w[i__2].r, a[i__1].i = w[i__2].i;
+ i__1 = k + 1 + k * a_dim1;
+ i__2 = k + 1 + k * w_dim1;
+ a[i__1].r = w[i__2].r, a[i__1].i = w[i__2].i;
+ i__1 = k + 1 + (k + 1) * a_dim1;
+ i__2 = k + 1 + (k + 1) * w_dim1;
+ a[i__1].r = w[i__2].r, a[i__1].i = w[i__2].i;
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k + 1] = -kp;
+ }
+
+/* Increase K and return to the start of the main loop */
+
+ k += kstep;
+ goto L70;
+
+L90:
+
+/* Update the lower triangle of A22 (= A(k:n,k:n)) as */
+
+/* A22 := A22 - L21*D*L21' = A22 - L21*W' */
+
+/* computing blocks of NB columns at a time */
+
+ i__1 = *n;
+ i__2 = *nb;
+ for (j = k; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = *nb, i__4 = *n - j + 1;
+ jb = min(i__3,i__4);
+
+/* Update the lower triangle of the diagonal block */
+
+ i__3 = j + jb - 1;
+ for (jj = j; jj <= i__3; ++jj) {
+ i__4 = j + jb - jj;
+ i__5 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__4, &i__5, &q__1, &a[jj + a_dim1],
+ lda, &w[jj + w_dim1], ldw, &c_b1, &a[jj + jj * a_dim1]
+, &c__1);
+/* L100: */
+ }
+
+/* Update the rectangular subdiagonal block */
+
+ if (j + jb <= *n) {
+ i__3 = *n - j - jb + 1;
+ i__4 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "Transpose", &i__3, &jb, &i__4, &q__1,
+ &a[j + jb + a_dim1], lda, &w[j + w_dim1], ldw, &c_b1,
+ &a[j + jb + j * a_dim1], lda);
+ }
+/* L110: */
+ }
+
+/* Put L21 in standard form by partially undoing the interchanges */
+/* in columns 1:k-1 */
+
+ j = k - 1;
+L120:
+ jj = j;
+ jp = ipiv[j];
+ if (jp < 0) {
+ jp = -jp;
+ --j;
+ }
+ --j;
+ if (jp != jj && j >= 1) {
+ cswap_(&j, &a[jp + a_dim1], lda, &a[jj + a_dim1], lda);
+ }
+ if (j >= 1) {
+ goto L120;
+ }
+
+/* Set KB to the number of columns factorized */
+
+ *kb = k - 1;
+
+ }
+ return 0;
+
+/* End of CLASYF */
+
+} /* clasyf_ */
diff --git a/contrib/libs/clapack/clatbs.c b/contrib/libs/clapack/clatbs.c
new file mode 100644
index 0000000000..646e4f4c15
--- /dev/null
+++ b/contrib/libs/clapack/clatbs.c
@@ -0,0 +1,1193 @@
+/* clatbs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b36 = .5f;
+
+/* Subroutine */ int clatbs_(char *uplo, char *trans, char *diag, char *
+ normin, integer *n, integer *kd, complex *ab, integer *ldab, complex *
+ x, real *scale, real *cnorm, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j;
+ real xj, rec, tjj;
+ integer jinc, jlen;
+ real xbnd;
+ integer imax;
+ real tmax;
+ complex tjjs;
+ real xmax, grow;
+ integer maind;
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ real tscal;
+ complex uscal;
+ integer jlast;
+ extern /* Complex */ VOID cdotu_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ complex csumj;
+ extern /* Subroutine */ int ctbsv_(char *, char *, char *, integer *,
+ integer *, complex *, integer *, complex *, integer *), caxpy_(integer *, complex *, complex *, integer *
+, complex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int slabad_(real *, real *);
+ extern integer icamax_(integer *, complex *, integer *);
+ extern /* Complex */ VOID cladiv_(complex *, complex *, complex *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *), xerbla_(char *, integer *);
+ real bignum;
+ extern integer isamax_(integer *, real *, integer *);
+ extern doublereal scasum_(integer *, complex *, integer *);
+ logical notran;
+ integer jfirst;
+ real smlnum;
+ logical nounit;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLATBS solves one of the triangular systems */
+
+/* A * x = s*b, A**T * x = s*b, or A**H * x = s*b, */
+
+/* with scaling to prevent overflow, where A is an upper or lower */
+/* triangular band matrix. Here A' denotes the transpose of A, x and b */
+/* are n-element vectors, and s is a scaling factor, usually less than */
+/* or equal to 1, chosen so that the components of x will be less than */
+/* the overflow threshold. If the unscaled problem will not cause */
+/* overflow, the Level 2 BLAS routine CTBSV is called. If the matrix A */
+/* is singular (A(j,j) = 0 for some j), then s is set to 0 and a */
+/* non-trivial solution to A*x = 0 is returned. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower triangular. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the operation applied to A. */
+/* = 'N': Solve A * x = s*b (No transpose) */
+/* = 'T': Solve A**T * x = s*b (Transpose) */
+/* = 'C': Solve A**H * x = s*b (Conjugate transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A is unit triangular. */
+/* = 'N': Non-unit triangular */
+/* = 'U': Unit triangular */
+
+/* NORMIN (input) CHARACTER*1 */
+/* Specifies whether CNORM has been set or not. */
+/* = 'Y': CNORM contains the column norms on entry */
+/* = 'N': CNORM is not set on entry. On exit, the norms will */
+/* be computed and stored in CNORM. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of subdiagonals or superdiagonals in the */
+/* triangular matrix A. KD >= 0. */
+
+/* AB (input) COMPLEX array, dimension (LDAB,N) */
+/* The upper or lower triangular band matrix A, stored in the */
+/* first KD+1 rows of the array. The j-th column of A is stored */
+/* in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* X (input/output) COMPLEX array, dimension (N) */
+/* On entry, the right hand side b of the triangular system. */
+/* On exit, X is overwritten by the solution vector x. */
+
+/* SCALE (output) REAL */
+/* The scaling factor s for the triangular system */
+/* A * x = s*b, A**T * x = s*b, or A**H * x = s*b. */
+/* If SCALE = 0, the matrix A is singular or badly scaled, and */
+/* the vector x is an exact or approximate solution to A*x = 0. */
+
+/* CNORM (input or output) REAL array, dimension (N) */
+
+/* If NORMIN = 'Y', CNORM is an input argument and CNORM(j) */
+/* contains the norm of the off-diagonal part of the j-th column */
+/* of A. If TRANS = 'N', CNORM(j) must be greater than or equal */
+/* to the infinity-norm, and if TRANS = 'T' or 'C', CNORM(j) */
+/* must be greater than or equal to the 1-norm. */
+
+/* If NORMIN = 'N', CNORM is an output argument and CNORM(j) */
+/* returns the 1-norm of the offdiagonal part of the j-th column */
+/* of A. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* Further Details */
+/* ======= ======= */
+
+/* A rough bound on x is computed; if that is less than overflow, CTBSV */
+/* is called, otherwise, specific code is used which checks for possible */
+/* overflow or divide-by-zero at every operation. */
+
+/* A columnwise scheme is used for solving A*x = b. The basic algorithm */
+/* if A is lower triangular is */
+
+/* x[1:n] := b[1:n] */
+/* for j = 1, ..., n */
+/* x(j) := x(j) / A(j,j) */
+/* x[j+1:n] := x[j+1:n] - x(j) * A[j+1:n,j] */
+/* end */
+
+/* Define bounds on the components of x after j iterations of the loop: */
+/* M(j) = bound on x[1:j] */
+/* G(j) = bound on x[j+1:n] */
+/* Initially, let M(0) = 0 and G(0) = max{x(i), i=1,...,n}. */
+
+/* Then for iteration j+1 we have */
+/* M(j+1) <= G(j) / | A(j+1,j+1) | */
+/* G(j+1) <= G(j) + M(j+1) * | A[j+2:n,j+1] | */
+/* <= G(j) ( 1 + CNORM(j+1) / | A(j+1,j+1) | ) */
+
+/* where CNORM(j+1) is greater than or equal to the infinity-norm of */
+/* column j+1 of A, not counting the diagonal. Hence */
+
+/* G(j) <= G(0) product ( 1 + CNORM(i) / | A(i,i) | ) */
+/* 1<=i<=j */
+/* and */
+
+/* |x(j)| <= ( G(0) / |A(j,j)| ) product ( 1 + CNORM(i) / |A(i,i)| ) */
+/* 1<=i< j */
+
+/* Since |x(j)| <= M(j), we use the Level 2 BLAS routine CTBSV if the */
+/* reciprocal of the largest M(j), j=1,..,n, is larger than */
+/* max(underflow, 1/overflow). */
+
+/* The bound on x(j) is also used to determine when a step in the */
+/* columnwise method can be performed without fear of overflow. If */
+/* the computed bound is greater than a large constant, x is scaled to */
+/* prevent overflow, but if the bound overflows, x is set to 0, x(j) to */
+/* 1, and scale to 0, and a non-trivial solution to A*x = 0 is found. */
+
+/* Similarly, a row-wise scheme is used to solve A**T *x = b or */
+/* A**H *x = b. The basic algorithm for A upper triangular is */
+
+/* for j = 1, ..., n */
+/* x(j) := ( b(j) - A[1:j-1,j]' * x[1:j-1] ) / A(j,j) */
+/* end */
+
+/* We simultaneously compute two bounds */
+/* G(j) = bound on ( b(i) - A[1:i-1,i]' * x[1:i-1] ), 1<=i<=j */
+/* M(j) = bound on x(i), 1<=i<=j */
+
+/* The initial values are G(0) = 0, M(0) = max{b(i), i=1,..,n}, and we */
+/* add the constraint G(j) >= G(j-1) and M(j) >= M(j-1) for j >= 1. */
+/* Then the bound on x(j) is */
+
+/* M(j) <= M(j-1) * ( 1 + CNORM(j) ) / | A(j,j) | */
+
+/* <= M(0) * product ( ( 1 + CNORM(i) ) / |A(i,i)| ) */
+/* 1<=i<=j */
+
+/* and we can safely call CTBSV if 1/M(n) and 1/G(n) are both greater */
+/* than max(underflow, 1/overflow). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --x;
+ --cnorm;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+/* Test the input parameters. */
+
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (! lsame_(normin, "Y") && ! lsame_(normin,
+ "N")) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*kd < 0) {
+ *info = -6;
+ } else if (*ldab < *kd + 1) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CLATBS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine machine dependent parameters to control overflow. */
+
+ smlnum = slamch_("Safe minimum");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+ smlnum /= slamch_("Precision");
+ bignum = 1.f / smlnum;
+ *scale = 1.f;
+
+ if (lsame_(normin, "N")) {
+
+/* Compute the 1-norm of each column, not including the diagonal. */
+
+ if (upper) {
+
+/* A is upper triangular. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = *kd, i__3 = j - 1;
+ jlen = min(i__2,i__3);
+ cnorm[j] = scasum_(&jlen, &ab[*kd + 1 - jlen + j * ab_dim1], &
+ c__1);
+/* L10: */
+ }
+ } else {
+
+/* A is lower triangular. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = *kd, i__3 = *n - j;
+ jlen = min(i__2,i__3);
+ if (jlen > 0) {
+ cnorm[j] = scasum_(&jlen, &ab[j * ab_dim1 + 2], &c__1);
+ } else {
+ cnorm[j] = 0.f;
+ }
+/* L20: */
+ }
+ }
+ }
+
+/* Scale the column norms by TSCAL if the maximum element in CNORM is */
+/* greater than BIGNUM/2. */
+
+ imax = isamax_(n, &cnorm[1], &c__1);
+ tmax = cnorm[imax];
+ if (tmax <= bignum * .5f) {
+ tscal = 1.f;
+ } else {
+ tscal = .5f / (smlnum * tmax);
+ sscal_(n, &tscal, &cnorm[1], &c__1);
+ }
+
+/* Compute a bound on the computed solution vector to see if the */
+/* Level 2 BLAS routine CTBSV can be used. */
+
+ xmax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j;
+ r__3 = xmax, r__4 = (r__1 = x[i__2].r / 2.f, dabs(r__1)) + (r__2 =
+ r_imag(&x[j]) / 2.f, dabs(r__2));
+ xmax = dmax(r__3,r__4);
+/* L30: */
+ }
+ xbnd = xmax;
+ if (notran) {
+
+/* Compute the growth in A * x = b. */
+
+ if (upper) {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ maind = *kd + 1;
+ } else {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ maind = 1;
+ }
+
+ if (tscal != 1.f) {
+ grow = 0.f;
+ goto L60;
+ }
+
+ if (nounit) {
+
+/* A is non-unit triangular. */
+
+/* Compute GROW = 1/G(j) and XBND = 1/M(j). */
+/* Initially, G(0) = max{x(i), i=1,...,n}. */
+
+ grow = .5f / dmax(xbnd,smlnum);
+ xbnd = grow;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L60;
+ }
+
+ i__3 = maind + j * ab_dim1;
+ tjjs.r = ab[i__3].r, tjjs.i = ab[i__3].i;
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+
+ if (tjj >= smlnum) {
+
+/* M(j) = G(j-1) / abs(A(j,j)) */
+
+/* Computing MIN */
+ r__1 = xbnd, r__2 = dmin(1.f,tjj) * grow;
+ xbnd = dmin(r__1,r__2);
+ } else {
+
+/* M(j) could overflow, set XBND to 0. */
+
+ xbnd = 0.f;
+ }
+
+ if (tjj + cnorm[j] >= smlnum) {
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) / abs(A(j,j)) ) */
+
+ grow *= tjj / (tjj + cnorm[j]);
+ } else {
+
+/* G(j) could overflow, set GROW to 0. */
+
+ grow = 0.f;
+ }
+/* L40: */
+ }
+ grow = xbnd;
+ } else {
+
+/* A is unit triangular. */
+
+/* Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. */
+
+/* Computing MIN */
+ r__1 = 1.f, r__2 = .5f / dmax(xbnd,smlnum);
+ grow = dmin(r__1,r__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L60;
+ }
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) ) */
+
+ grow *= 1.f / (cnorm[j] + 1.f);
+/* L50: */
+ }
+ }
+L60:
+
+ ;
+ } else {
+
+/* Compute the growth in A**T * x = b or A**H * x = b. */
+
+ if (upper) {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ maind = *kd + 1;
+ } else {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ maind = 1;
+ }
+
+ if (tscal != 1.f) {
+ grow = 0.f;
+ goto L90;
+ }
+
+ if (nounit) {
+
+/* A is non-unit triangular. */
+
+/* Compute GROW = 1/G(j) and XBND = 1/M(j). */
+/* Initially, M(0) = max{x(i), i=1,...,n}. */
+
+ grow = .5f / dmax(xbnd,smlnum);
+ xbnd = grow;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L90;
+ }
+
+/* G(j) = max( G(j-1), M(j-1)*( 1 + CNORM(j) ) ) */
+
+ xj = cnorm[j] + 1.f;
+/* Computing MIN */
+ r__1 = grow, r__2 = xbnd / xj;
+ grow = dmin(r__1,r__2);
+
+ i__3 = maind + j * ab_dim1;
+ tjjs.r = ab[i__3].r, tjjs.i = ab[i__3].i;
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+
+ if (tjj >= smlnum) {
+
+/* M(j) = M(j-1)*( 1 + CNORM(j) ) / abs(A(j,j)) */
+
+ if (xj > tjj) {
+ xbnd *= tjj / xj;
+ }
+ } else {
+
+/* M(j) could overflow, set XBND to 0. */
+
+ xbnd = 0.f;
+ }
+/* L70: */
+ }
+ grow = dmin(grow,xbnd);
+ } else {
+
+/* A is unit triangular. */
+
+/* Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. */
+
+/* Computing MIN */
+ r__1 = 1.f, r__2 = .5f / dmax(xbnd,smlnum);
+ grow = dmin(r__1,r__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L90;
+ }
+
+/* G(j) = ( 1 + CNORM(j) )*G(j-1) */
+
+ xj = cnorm[j] + 1.f;
+ grow /= xj;
+/* L80: */
+ }
+ }
+L90:
+ ;
+ }
+
+ if (grow * tscal > smlnum) {
+
+/* Use the Level 2 BLAS solve if the reciprocal of the bound on */
+/* elements of X is not too small. */
+
+ ctbsv_(uplo, trans, diag, n, kd, &ab[ab_offset], ldab, &x[1], &c__1);
+ } else {
+
+/* Use a Level 1 BLAS solve, scaling intermediate results. */
+
+ if (xmax > bignum * .5f) {
+
+/* Scale X so that its components are less than or equal to */
+/* BIGNUM in absolute value. */
+
+ *scale = bignum * .5f / xmax;
+ csscal_(n, scale, &x[1], &c__1);
+ xmax = bignum;
+ } else {
+ xmax *= 2.f;
+ }
+
+ if (notran) {
+
+/* Solve A * x = b */
+
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Compute x(j) = b(j) / A(j,j), scaling x if necessary. */
+
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]),
+ dabs(r__2));
+ if (nounit) {
+ i__3 = maind + j * ab_dim1;
+ q__1.r = tscal * ab[i__3].r, q__1.i = tscal * ab[i__3].i;
+ tjjs.r = q__1.r, tjjs.i = q__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.f;
+ if (tscal == 1.f) {
+ goto L105;
+ }
+ }
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.f) {
+ if (xj > tjj * bignum) {
+
+/* Scale x by 1/b(j). */
+
+ rec = 1.f / xj;
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ i__3 = j;
+ cladiv_(&q__1, &x[j], &tjjs);
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]
+ ), dabs(r__2));
+ } else if (tjj > 0.f) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM */
+/* to avoid overflow when dividing by A(j,j). */
+
+ rec = tjj * bignum / xj;
+ if (cnorm[j] > 1.f) {
+
+/* Scale by 1/CNORM(j) to avoid overflow when */
+/* multiplying x(j) times column j. */
+
+ rec /= cnorm[j];
+ }
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ i__3 = j;
+ cladiv_(&q__1, &x[j], &tjjs);
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]
+ ), dabs(r__2));
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0, and compute a solution to A*x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ x[i__4].r = 0.f, x[i__4].i = 0.f;
+/* L100: */
+ }
+ i__3 = j;
+ x[i__3].r = 1.f, x[i__3].i = 0.f;
+ xj = 1.f;
+ *scale = 0.f;
+ xmax = 0.f;
+ }
+L105:
+
+/* Scale x if necessary to avoid overflow when adding a */
+/* multiple of column j of A. */
+
+ if (xj > 1.f) {
+ rec = 1.f / xj;
+ if (cnorm[j] > (bignum - xmax) * rec) {
+
+/* Scale x by 1/(2*abs(x(j))). */
+
+ rec *= .5f;
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ }
+ } else if (xj * cnorm[j] > bignum - xmax) {
+
+/* Scale x by 1/2. */
+
+ csscal_(n, &c_b36, &x[1], &c__1);
+ *scale *= .5f;
+ }
+
+ if (upper) {
+ if (j > 1) {
+
+/* Compute the update */
+/* x(max(1,j-kd):j-1) := x(max(1,j-kd):j-1) - */
+/* x(j)* A(max(1,j-kd):j-1,j) */
+
+/* Computing MIN */
+ i__3 = *kd, i__4 = j - 1;
+ jlen = min(i__3,i__4);
+ i__3 = j;
+ q__2.r = -x[i__3].r, q__2.i = -x[i__3].i;
+ q__1.r = tscal * q__2.r, q__1.i = tscal * q__2.i;
+ caxpy_(&jlen, &q__1, &ab[*kd + 1 - jlen + j * ab_dim1]
+, &c__1, &x[j - jlen], &c__1);
+ i__3 = j - 1;
+ i__ = icamax_(&i__3, &x[1], &c__1);
+ i__3 = i__;
+ xmax = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[i__]), dabs(r__2));
+ }
+ } else if (j < *n) {
+
+/* Compute the update */
+/* x(j+1:min(j+kd,n)) := x(j+1:min(j+kd,n)) - */
+/* x(j) * A(j+1:min(j+kd,n),j) */
+
+/* Computing MIN */
+ i__3 = *kd, i__4 = *n - j;
+ jlen = min(i__3,i__4);
+ if (jlen > 0) {
+ i__3 = j;
+ q__2.r = -x[i__3].r, q__2.i = -x[i__3].i;
+ q__1.r = tscal * q__2.r, q__1.i = tscal * q__2.i;
+ caxpy_(&jlen, &q__1, &ab[j * ab_dim1 + 2], &c__1, &x[
+ j + 1], &c__1);
+ }
+ i__3 = *n - j;
+ i__ = j + icamax_(&i__3, &x[j + 1], &c__1);
+ i__3 = i__;
+ xmax = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[
+ i__]), dabs(r__2));
+ }
+/* L110: */
+ }
+
+ } else if (lsame_(trans, "T")) {
+
+/* Solve A**T * x = b */
+
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Compute x(j) = b(j) - sum A(k,j)*x(k). */
+/* k<>j */
+
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]),
+ dabs(r__2));
+ uscal.r = tscal, uscal.i = 0.f;
+ rec = 1.f / dmax(xmax,1.f);
+ if (cnorm[j] > (bignum - xj) * rec) {
+
+/* If x(j) could overflow, scale x by 1/(2*XMAX). */
+
+ rec *= .5f;
+ if (nounit) {
+ i__3 = maind + j * ab_dim1;
+ q__1.r = tscal * ab[i__3].r, q__1.i = tscal * ab[i__3]
+ .i;
+ tjjs.r = q__1.r, tjjs.i = q__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.f;
+ }
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+ if (tjj > 1.f) {
+
+/* Divide by A(j,j) when scaling x if A(j,j) > 1. */
+
+/* Computing MIN */
+ r__1 = 1.f, r__2 = rec * tjj;
+ rec = dmin(r__1,r__2);
+ cladiv_(&q__1, &uscal, &tjjs);
+ uscal.r = q__1.r, uscal.i = q__1.i;
+ }
+ if (rec < 1.f) {
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ csumj.r = 0.f, csumj.i = 0.f;
+ if (uscal.r == 1.f && uscal.i == 0.f) {
+
+/* If the scaling needed for A in the dot product is 1, */
+/* call CDOTU to perform the dot product. */
+
+ if (upper) {
+/* Computing MIN */
+ i__3 = *kd, i__4 = j - 1;
+ jlen = min(i__3,i__4);
+ cdotu_(&q__1, &jlen, &ab[*kd + 1 - jlen + j * ab_dim1]
+, &c__1, &x[j - jlen], &c__1);
+ csumj.r = q__1.r, csumj.i = q__1.i;
+ } else {
+/* Computing MIN */
+ i__3 = *kd, i__4 = *n - j;
+ jlen = min(i__3,i__4);
+ if (jlen > 1) {
+ cdotu_(&q__1, &jlen, &ab[j * ab_dim1 + 2], &c__1,
+ &x[j + 1], &c__1);
+ csumj.r = q__1.r, csumj.i = q__1.i;
+ }
+ }
+ } else {
+
+/* Otherwise, use in-line code for the dot product. */
+
+ if (upper) {
+/* Computing MIN */
+ i__3 = *kd, i__4 = j - 1;
+ jlen = min(i__3,i__4);
+ i__3 = jlen;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = *kd + i__ - jlen + j * ab_dim1;
+ q__3.r = ab[i__4].r * uscal.r - ab[i__4].i *
+ uscal.i, q__3.i = ab[i__4].r * uscal.i +
+ ab[i__4].i * uscal.r;
+ i__5 = j - jlen - 1 + i__;
+ q__2.r = q__3.r * x[i__5].r - q__3.i * x[i__5].i,
+ q__2.i = q__3.r * x[i__5].i + q__3.i * x[
+ i__5].r;
+ q__1.r = csumj.r + q__2.r, q__1.i = csumj.i +
+ q__2.i;
+ csumj.r = q__1.r, csumj.i = q__1.i;
+/* L120: */
+ }
+ } else {
+/* Computing MIN */
+ i__3 = *kd, i__4 = *n - j;
+ jlen = min(i__3,i__4);
+ i__3 = jlen;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + 1 + j * ab_dim1;
+ q__3.r = ab[i__4].r * uscal.r - ab[i__4].i *
+ uscal.i, q__3.i = ab[i__4].r * uscal.i +
+ ab[i__4].i * uscal.r;
+ i__5 = j + i__;
+ q__2.r = q__3.r * x[i__5].r - q__3.i * x[i__5].i,
+ q__2.i = q__3.r * x[i__5].i + q__3.i * x[
+ i__5].r;
+ q__1.r = csumj.r + q__2.r, q__1.i = csumj.i +
+ q__2.i;
+ csumj.r = q__1.r, csumj.i = q__1.i;
+/* L130: */
+ }
+ }
+ }
+
+ q__1.r = tscal, q__1.i = 0.f;
+ if (uscal.r == q__1.r && uscal.i == q__1.i) {
+
+/* Compute x(j) := ( x(j) - CSUMJ ) / A(j,j) if 1/A(j,j) */
+/* was not used to scale the dotproduct. */
+
+ i__3 = j;
+ i__4 = j;
+ q__1.r = x[i__4].r - csumj.r, q__1.i = x[i__4].i -
+ csumj.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]
+ ), dabs(r__2));
+ if (nounit) {
+
+/* Compute x(j) = x(j) / A(j,j), scaling if necessary. */
+
+ i__3 = maind + j * ab_dim1;
+ q__1.r = tscal * ab[i__3].r, q__1.i = tscal * ab[i__3]
+ .i;
+ tjjs.r = q__1.r, tjjs.i = q__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.f;
+ if (tscal == 1.f) {
+ goto L145;
+ }
+ }
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.f) {
+ if (xj > tjj * bignum) {
+
+/* Scale X by 1/abs(x(j)). */
+
+ rec = 1.f / xj;
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ i__3 = j;
+ cladiv_(&q__1, &x[j], &tjjs);
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ } else if (tjj > 0.f) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */
+
+ rec = tjj * bignum / xj;
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ i__3 = j;
+ cladiv_(&q__1, &x[j], &tjjs);
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0 and compute a solution to A**T *x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ x[i__4].r = 0.f, x[i__4].i = 0.f;
+/* L140: */
+ }
+ i__3 = j;
+ x[i__3].r = 1.f, x[i__3].i = 0.f;
+ *scale = 0.f;
+ xmax = 0.f;
+ }
+L145:
+ ;
+ } else {
+
+/* Compute x(j) := x(j) / A(j,j) - CSUMJ if the dot */
+/* product has already been divided by 1/A(j,j). */
+
+ i__3 = j;
+ cladiv_(&q__2, &x[j], &tjjs);
+ q__1.r = q__2.r - csumj.r, q__1.i = q__2.i - csumj.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ }
+/* Computing MAX */
+ i__3 = j;
+ r__3 = xmax, r__4 = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[j]), dabs(r__2));
+ xmax = dmax(r__3,r__4);
+/* L150: */
+ }
+
+ } else {
+
+/* Solve A**H * x = b */
+
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Compute x(j) = b(j) - sum A(k,j)*x(k). */
+/* k<>j */
+
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]),
+ dabs(r__2));
+ uscal.r = tscal, uscal.i = 0.f;
+ rec = 1.f / dmax(xmax,1.f);
+ if (cnorm[j] > (bignum - xj) * rec) {
+
+/* If x(j) could overflow, scale x by 1/(2*XMAX). */
+
+ rec *= .5f;
+ if (nounit) {
+ r_cnjg(&q__2, &ab[maind + j * ab_dim1]);
+ q__1.r = tscal * q__2.r, q__1.i = tscal * q__2.i;
+ tjjs.r = q__1.r, tjjs.i = q__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.f;
+ }
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+ if (tjj > 1.f) {
+
+/* Divide by A(j,j) when scaling x if A(j,j) > 1. */
+
+/* Computing MIN */
+ r__1 = 1.f, r__2 = rec * tjj;
+ rec = dmin(r__1,r__2);
+ cladiv_(&q__1, &uscal, &tjjs);
+ uscal.r = q__1.r, uscal.i = q__1.i;
+ }
+ if (rec < 1.f) {
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ csumj.r = 0.f, csumj.i = 0.f;
+ if (uscal.r == 1.f && uscal.i == 0.f) {
+
+/* If the scaling needed for A in the dot product is 1, */
+/* call CDOTC to perform the dot product. */
+
+ if (upper) {
+/* Computing MIN */
+ i__3 = *kd, i__4 = j - 1;
+ jlen = min(i__3,i__4);
+ cdotc_(&q__1, &jlen, &ab[*kd + 1 - jlen + j * ab_dim1]
+, &c__1, &x[j - jlen], &c__1);
+ csumj.r = q__1.r, csumj.i = q__1.i;
+ } else {
+/* Computing MIN */
+ i__3 = *kd, i__4 = *n - j;
+ jlen = min(i__3,i__4);
+ if (jlen > 1) {
+ cdotc_(&q__1, &jlen, &ab[j * ab_dim1 + 2], &c__1,
+ &x[j + 1], &c__1);
+ csumj.r = q__1.r, csumj.i = q__1.i;
+ }
+ }
+ } else {
+
+/* Otherwise, use in-line code for the dot product. */
+
+ if (upper) {
+/* Computing MIN */
+ i__3 = *kd, i__4 = j - 1;
+ jlen = min(i__3,i__4);
+ i__3 = jlen;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ r_cnjg(&q__4, &ab[*kd + i__ - jlen + j * ab_dim1])
+ ;
+ q__3.r = q__4.r * uscal.r - q__4.i * uscal.i,
+ q__3.i = q__4.r * uscal.i + q__4.i *
+ uscal.r;
+ i__4 = j - jlen - 1 + i__;
+ q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i,
+ q__2.i = q__3.r * x[i__4].i + q__3.i * x[
+ i__4].r;
+ q__1.r = csumj.r + q__2.r, q__1.i = csumj.i +
+ q__2.i;
+ csumj.r = q__1.r, csumj.i = q__1.i;
+/* L160: */
+ }
+ } else {
+/* Computing MIN */
+ i__3 = *kd, i__4 = *n - j;
+ jlen = min(i__3,i__4);
+ i__3 = jlen;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ r_cnjg(&q__4, &ab[i__ + 1 + j * ab_dim1]);
+ q__3.r = q__4.r * uscal.r - q__4.i * uscal.i,
+ q__3.i = q__4.r * uscal.i + q__4.i *
+ uscal.r;
+ i__4 = j + i__;
+ q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i,
+ q__2.i = q__3.r * x[i__4].i + q__3.i * x[
+ i__4].r;
+ q__1.r = csumj.r + q__2.r, q__1.i = csumj.i +
+ q__2.i;
+ csumj.r = q__1.r, csumj.i = q__1.i;
+/* L170: */
+ }
+ }
+ }
+
+ q__1.r = tscal, q__1.i = 0.f;
+ if (uscal.r == q__1.r && uscal.i == q__1.i) {
+
+/* Compute x(j) := ( x(j) - CSUMJ ) / A(j,j) if 1/A(j,j) */
+/* was not used to scale the dotproduct. */
+
+ i__3 = j;
+ i__4 = j;
+ q__1.r = x[i__4].r - csumj.r, q__1.i = x[i__4].i -
+ csumj.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]
+ ), dabs(r__2));
+ if (nounit) {
+
+/* Compute x(j) = x(j) / A(j,j), scaling if necessary. */
+
+ r_cnjg(&q__2, &ab[maind + j * ab_dim1]);
+ q__1.r = tscal * q__2.r, q__1.i = tscal * q__2.i;
+ tjjs.r = q__1.r, tjjs.i = q__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.f;
+ if (tscal == 1.f) {
+ goto L185;
+ }
+ }
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.f) {
+ if (xj > tjj * bignum) {
+
+/* Scale X by 1/abs(x(j)). */
+
+ rec = 1.f / xj;
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ i__3 = j;
+ cladiv_(&q__1, &x[j], &tjjs);
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ } else if (tjj > 0.f) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */
+
+ rec = tjj * bignum / xj;
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ i__3 = j;
+ cladiv_(&q__1, &x[j], &tjjs);
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0 and compute a solution to A**H *x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ x[i__4].r = 0.f, x[i__4].i = 0.f;
+/* L180: */
+ }
+ i__3 = j;
+ x[i__3].r = 1.f, x[i__3].i = 0.f;
+ *scale = 0.f;
+ xmax = 0.f;
+ }
+L185:
+ ;
+ } else {
+
+/* Compute x(j) := x(j) / A(j,j) - CSUMJ if the dot */
+/* product has already been divided by 1/A(j,j). */
+
+ i__3 = j;
+ cladiv_(&q__2, &x[j], &tjjs);
+ q__1.r = q__2.r - csumj.r, q__1.i = q__2.i - csumj.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ }
+/* Computing MAX */
+ i__3 = j;
+ r__3 = xmax, r__4 = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[j]), dabs(r__2));
+ xmax = dmax(r__3,r__4);
+/* L190: */
+ }
+ }
+ *scale /= tscal;
+ }
+
+/* Scale the column norms by 1/TSCAL for return. */
+
+ if (tscal != 1.f) {
+ r__1 = 1.f / tscal;
+ sscal_(n, &r__1, &cnorm[1], &c__1);
+ }
+
+ return 0;
+
+/* End of CLATBS */
+
+} /* clatbs_ */
diff --git a/contrib/libs/clapack/clatdf.c b/contrib/libs/clapack/clatdf.c
new file mode 100644
index 0000000000..89c38a297e
--- /dev/null
+++ b/contrib/libs/clapack/clatdf.c
@@ -0,0 +1,357 @@
+/* clatdf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static real c_b24 = 1.f;
+
+/* Subroutine */ int clatdf_(integer *ijob, integer *n, complex *z__, integer
+ *ldz, complex *rhs, real *rdsum, real *rdscal, integer *ipiv, integer
+ *jpiv)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void c_div(complex *, complex *, complex *);
+ double c_abs(complex *);
+ void c_sqrt(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ complex bm, bp, xm[2], xp[2];
+ integer info;
+ complex temp, work[8];
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *);
+ real scale;
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *);
+ complex pmone;
+ extern /* Subroutine */ int caxpy_(integer *, complex *, complex *,
+ integer *, complex *, integer *);
+ real rtemp, sminu, rwork[2], splus;
+ extern /* Subroutine */ int cgesc2_(integer *, complex *, integer *,
+ complex *, integer *, integer *, real *), cgecon_(char *, integer
+ *, complex *, integer *, real *, real *, complex *, real *,
+ integer *), classq_(integer *, complex *, integer *, real
+ *, real *), claswp_(integer *, complex *, integer *, integer *,
+ integer *, integer *, integer *);
+ extern doublereal scasum_(integer *, complex *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLATDF computes the contribution to the reciprocal Dif-estimate */
+/* by solving for x in Z * x = b, where b is chosen such that the norm */
+/* of x is as large as possible. It is assumed that LU decomposition */
+/* of Z has been computed by CGETC2. On entry RHS = f holds the */
+/* contribution from earlier solved sub-systems, and on return RHS = x. */
+
+/* The factorization of Z returned by CGETC2 has the form */
+/* Z = P * L * U * Q, where P and Q are permutation matrices. L is lower */
+/* triangular with unit diagonal elements and U is upper triangular. */
+
+/* Arguments */
+/* ========= */
+
+/* IJOB (input) INTEGER */
+/* IJOB = 2: First compute an approximative null-vector e */
+/* of Z using CGECON, e is normalized and solve for */
+/* Zx = +-e - f with the sign giving the greater value of */
+/* 2-norm(x). About 5 times as expensive as Default. */
+/* IJOB .ne. 2: Local look ahead strategy where */
+/* all entries of the r.h.s. b is choosen as either +1 or */
+/* -1. Default. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Z. */
+
+/* Z (input) REAL array, dimension (LDZ, N) */
+/* On entry, the LU part of the factorization of the n-by-n */
+/* matrix Z computed by CGETC2: Z = P * L * U * Q */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDA >= max(1, N). */
+
+/* RHS (input/output) REAL array, dimension (N). */
+/* On entry, RHS contains contributions from other subsystems. */
+/* On exit, RHS contains the solution of the subsystem with */
+/* entries according to the value of IJOB (see above). */
+
+/* RDSUM (input/output) REAL */
+/* On entry, the sum of squares of computed contributions to */
+/* the Dif-estimate under computation by CTGSYL, where the */
+/* scaling factor RDSCAL (see below) has been factored out. */
+/* On exit, the corresponding sum of squares updated with the */
+/* contributions from the current sub-system. */
+/* If TRANS = 'T' RDSUM is not touched. */
+/* NOTE: RDSUM only makes sense when CTGSY2 is called by CTGSYL. */
+
+/* RDSCAL (input/output) REAL */
+/* On entry, scaling factor used to prevent overflow in RDSUM. */
+/* On exit, RDSCAL is updated w.r.t. the current contributions */
+/* in RDSUM. */
+/* If TRANS = 'T', RDSCAL is not touched. */
+/* NOTE: RDSCAL only makes sense when CTGSY2 is called by */
+/* CTGSYL. */
+
+/* IPIV (input) INTEGER array, dimension (N). */
+/* The pivot indices; for 1 <= i <= N, row i of the */
+/* matrix has been interchanged with row IPIV(i). */
+
+/* JPIV (input) INTEGER array, dimension (N). */
+/* The pivot indices; for 1 <= j <= N, column j of the */
+/* matrix has been interchanged with column JPIV(j). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* This routine is a further developed implementation of algorithm */
+/* BSOLVE in [1] using complete pivoting in the LU factorization. */
+
+/* [1] Bo Kagstrom and Lars Westin, */
+/* Generalized Schur Methods with Condition Estimators for */
+/* Solving the Generalized Sylvester Equation, IEEE Transactions */
+/* on Automatic Control, Vol. 34, No. 7, July 1989, pp 745-751. */
+
+/* [2] Peter Poromaa, */
+/* On Efficient and Robust Estimators for the Separation */
+/* between two Regular Matrix Pairs with Applications in */
+/* Condition Estimation. Report UMINF-95.05, Department of */
+/* Computing Science, Umea University, S-901 87 Umea, Sweden, */
+/* 1995. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --rhs;
+ --ipiv;
+ --jpiv;
+
+ /* Function Body */
+ if (*ijob != 2) {
+
+/* Apply permutations IPIV to RHS */
+
+ i__1 = *n - 1;
+ claswp_(&c__1, &rhs[1], ldz, &c__1, &i__1, &ipiv[1], &c__1);
+
+/* Solve for L-part choosing RHS either to +1 or -1. */
+
+ q__1.r = -1.f, q__1.i = -0.f;
+ pmone.r = q__1.r, pmone.i = q__1.i;
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ q__1.r = rhs[i__2].r + 1.f, q__1.i = rhs[i__2].i + 0.f;
+ bp.r = q__1.r, bp.i = q__1.i;
+ i__2 = j;
+ q__1.r = rhs[i__2].r - 1.f, q__1.i = rhs[i__2].i - 0.f;
+ bm.r = q__1.r, bm.i = q__1.i;
+ splus = 1.f;
+
+/* Lockahead for L- part RHS(1:N-1) = +-1 */
+/* SPLUS and SMIN computed more efficiently than in BSOLVE[1]. */
+
+ i__2 = *n - j;
+ cdotc_(&q__1, &i__2, &z__[j + 1 + j * z_dim1], &c__1, &z__[j + 1
+ + j * z_dim1], &c__1);
+ splus += q__1.r;
+ i__2 = *n - j;
+ cdotc_(&q__1, &i__2, &z__[j + 1 + j * z_dim1], &c__1, &rhs[j + 1],
+ &c__1);
+ sminu = q__1.r;
+ i__2 = j;
+ splus *= rhs[i__2].r;
+ if (splus > sminu) {
+ i__2 = j;
+ rhs[i__2].r = bp.r, rhs[i__2].i = bp.i;
+ } else if (sminu > splus) {
+ i__2 = j;
+ rhs[i__2].r = bm.r, rhs[i__2].i = bm.i;
+ } else {
+
+/* In this case the updating sums are equal and we can */
+/* choose RHS(J) +1 or -1. The first time this happens we */
+/* choose -1, thereafter +1. This is a simple way to get */
+/* good estimates of matrices like Byers well-known example */
+/* (see [1]). (Not done in BSOLVE.) */
+
+ i__2 = j;
+ i__3 = j;
+ q__1.r = rhs[i__3].r + pmone.r, q__1.i = rhs[i__3].i +
+ pmone.i;
+ rhs[i__2].r = q__1.r, rhs[i__2].i = q__1.i;
+ pmone.r = 1.f, pmone.i = 0.f;
+ }
+
+/* Compute the remaining r.h.s. */
+
+ i__2 = j;
+ q__1.r = -rhs[i__2].r, q__1.i = -rhs[i__2].i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = *n - j;
+ caxpy_(&i__2, &temp, &z__[j + 1 + j * z_dim1], &c__1, &rhs[j + 1],
+ &c__1);
+/* L10: */
+ }
+
+/* Solve for U- part, lockahead for RHS(N) = +-1. This is not done */
+/* In BSOLVE and will hopefully give us a better estimate because */
+/* any ill-conditioning of the original matrix is transfered to U */
+/* and not to L. U(N, N) is an approximation to sigma_min(LU). */
+
+ i__1 = *n - 1;
+ ccopy_(&i__1, &rhs[1], &c__1, work, &c__1);
+ i__1 = *n - 1;
+ i__2 = *n;
+ q__1.r = rhs[i__2].r + 1.f, q__1.i = rhs[i__2].i + 0.f;
+ work[i__1].r = q__1.r, work[i__1].i = q__1.i;
+ i__1 = *n;
+ i__2 = *n;
+ q__1.r = rhs[i__2].r - 1.f, q__1.i = rhs[i__2].i - 0.f;
+ rhs[i__1].r = q__1.r, rhs[i__1].i = q__1.i;
+ splus = 0.f;
+ sminu = 0.f;
+ for (i__ = *n; i__ >= 1; --i__) {
+ c_div(&q__1, &c_b1, &z__[i__ + i__ * z_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__1 = i__ - 1;
+ i__2 = i__ - 1;
+ q__1.r = work[i__2].r * temp.r - work[i__2].i * temp.i, q__1.i =
+ work[i__2].r * temp.i + work[i__2].i * temp.r;
+ work[i__1].r = q__1.r, work[i__1].i = q__1.i;
+ i__1 = i__;
+ i__2 = i__;
+ q__1.r = rhs[i__2].r * temp.r - rhs[i__2].i * temp.i, q__1.i =
+ rhs[i__2].r * temp.i + rhs[i__2].i * temp.r;
+ rhs[i__1].r = q__1.r, rhs[i__1].i = q__1.i;
+ i__1 = *n;
+ for (k = i__ + 1; k <= i__1; ++k) {
+ i__2 = i__ - 1;
+ i__3 = i__ - 1;
+ i__4 = k - 1;
+ i__5 = i__ + k * z_dim1;
+ q__3.r = z__[i__5].r * temp.r - z__[i__5].i * temp.i, q__3.i =
+ z__[i__5].r * temp.i + z__[i__5].i * temp.r;
+ q__2.r = work[i__4].r * q__3.r - work[i__4].i * q__3.i,
+ q__2.i = work[i__4].r * q__3.i + work[i__4].i *
+ q__3.r;
+ q__1.r = work[i__3].r - q__2.r, q__1.i = work[i__3].i -
+ q__2.i;
+ work[i__2].r = q__1.r, work[i__2].i = q__1.i;
+ i__2 = i__;
+ i__3 = i__;
+ i__4 = k;
+ i__5 = i__ + k * z_dim1;
+ q__3.r = z__[i__5].r * temp.r - z__[i__5].i * temp.i, q__3.i =
+ z__[i__5].r * temp.i + z__[i__5].i * temp.r;
+ q__2.r = rhs[i__4].r * q__3.r - rhs[i__4].i * q__3.i, q__2.i =
+ rhs[i__4].r * q__3.i + rhs[i__4].i * q__3.r;
+ q__1.r = rhs[i__3].r - q__2.r, q__1.i = rhs[i__3].i - q__2.i;
+ rhs[i__2].r = q__1.r, rhs[i__2].i = q__1.i;
+/* L20: */
+ }
+ splus += c_abs(&work[i__ - 1]);
+ sminu += c_abs(&rhs[i__]);
+/* L30: */
+ }
+ if (splus > sminu) {
+ ccopy_(n, work, &c__1, &rhs[1], &c__1);
+ }
+
+/* Apply the permutations JPIV to the computed solution (RHS) */
+
+ i__1 = *n - 1;
+ claswp_(&c__1, &rhs[1], ldz, &c__1, &i__1, &jpiv[1], &c_n1);
+
+/* Compute the sum of squares */
+
+ classq_(n, &rhs[1], &c__1, rdscal, rdsum);
+ return 0;
+ }
+
+/* ENTRY IJOB = 2 */
+
+/* Compute approximate nullvector XM of Z */
+
+ cgecon_("I", n, &z__[z_offset], ldz, &c_b24, &rtemp, work, rwork, &info);
+ ccopy_(n, &work[*n], &c__1, xm, &c__1);
+
+/* Compute RHS */
+
+ i__1 = *n - 1;
+ claswp_(&c__1, xm, ldz, &c__1, &i__1, &ipiv[1], &c_n1);
+ cdotc_(&q__3, n, xm, &c__1, xm, &c__1);
+ c_sqrt(&q__2, &q__3);
+ c_div(&q__1, &c_b1, &q__2);
+ temp.r = q__1.r, temp.i = q__1.i;
+ cscal_(n, &temp, xm, &c__1);
+ ccopy_(n, xm, &c__1, xp, &c__1);
+ caxpy_(n, &c_b1, &rhs[1], &c__1, xp, &c__1);
+ q__1.r = -1.f, q__1.i = -0.f;
+ caxpy_(n, &q__1, xm, &c__1, &rhs[1], &c__1);
+ cgesc2_(n, &z__[z_offset], ldz, &rhs[1], &ipiv[1], &jpiv[1], &scale);
+ cgesc2_(n, &z__[z_offset], ldz, xp, &ipiv[1], &jpiv[1], &scale);
+ if (scasum_(n, xp, &c__1) > scasum_(n, &rhs[1], &c__1)) {
+ ccopy_(n, xp, &c__1, &rhs[1], &c__1);
+ }
+
+/* Compute the sum of squares */
+
+ classq_(n, &rhs[1], &c__1, rdscal, rdsum);
+ return 0;
+
+/* End of CLATDF */
+
+} /* clatdf_ */
diff --git a/contrib/libs/clapack/clatps.c b/contrib/libs/clapack/clatps.c
new file mode 100644
index 0000000000..c30b6d4601
--- /dev/null
+++ b/contrib/libs/clapack/clatps.c
@@ -0,0 +1,1161 @@
+/* clatps.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b36 = .5f;
+
+/* Subroutine */ int clatps_(char *uplo, char *trans, char *diag, char *
+ normin, integer *n, complex *ap, complex *x, real *scale, real *cnorm,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, ip;
+ real xj, rec, tjj;
+ integer jinc, jlen;
+ real xbnd;
+ integer imax;
+ real tmax;
+ complex tjjs;
+ real xmax, grow;
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ real tscal;
+ complex uscal;
+ integer jlast;
+ extern /* Complex */ VOID cdotu_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ complex csumj;
+ extern /* Subroutine */ int caxpy_(integer *, complex *, complex *,
+ integer *, complex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int ctpsv_(char *, char *, char *, integer *,
+ complex *, complex *, integer *), slabad_(
+ real *, real *);
+ extern integer icamax_(integer *, complex *, integer *);
+ extern /* Complex */ VOID cladiv_(complex *, complex *, complex *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *), xerbla_(char *, integer *);
+ real bignum;
+ extern integer isamax_(integer *, real *, integer *);
+ extern doublereal scasum_(integer *, complex *, integer *);
+ logical notran;
+ integer jfirst;
+ real smlnum;
+ logical nounit;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLATPS solves one of the triangular systems */
+
+/* A * x = s*b, A**T * x = s*b, or A**H * x = s*b, */
+
+/* with scaling to prevent overflow, where A is an upper or lower */
+/* triangular matrix stored in packed form. Here A**T denotes the */
+/* transpose of A, A**H denotes the conjugate transpose of A, x and b */
+/* are n-element vectors, and s is a scaling factor, usually less than */
+/* or equal to 1, chosen so that the components of x will be less than */
+/* the overflow threshold. If the unscaled problem will not cause */
+/* overflow, the Level 2 BLAS routine CTPSV is called. If the matrix A */
+/* is singular (A(j,j) = 0 for some j), then s is set to 0 and a */
+/* non-trivial solution to A*x = 0 is returned. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower triangular. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the operation applied to A. */
+/* = 'N': Solve A * x = s*b (No transpose) */
+/* = 'T': Solve A**T * x = s*b (Transpose) */
+/* = 'C': Solve A**H * x = s*b (Conjugate transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A is unit triangular. */
+/* = 'N': Non-unit triangular */
+/* = 'U': Unit triangular */
+
+/* NORMIN (input) CHARACTER*1 */
+/* Specifies whether CNORM has been set or not. */
+/* = 'Y': CNORM contains the column norms on entry */
+/* = 'N': CNORM is not set on entry. On exit, the norms will */
+/* be computed and stored in CNORM. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) COMPLEX array, dimension (N*(N+1)/2) */
+/* The upper or lower triangular matrix A, packed columnwise in */
+/* a linear array. The j-th column of A is stored in the array */
+/* AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* X (input/output) COMPLEX array, dimension (N) */
+/* On entry, the right hand side b of the triangular system. */
+/* On exit, X is overwritten by the solution vector x. */
+
+/* SCALE (output) REAL */
+/* The scaling factor s for the triangular system */
+/* A * x = s*b, A**T * x = s*b, or A**H * x = s*b. */
+/* If SCALE = 0, the matrix A is singular or badly scaled, and */
+/* the vector x is an exact or approximate solution to A*x = 0. */
+
+/* CNORM (input or output) REAL array, dimension (N) */
+
+/* If NORMIN = 'Y', CNORM is an input argument and CNORM(j) */
+/* contains the norm of the off-diagonal part of the j-th column */
+/* of A. If TRANS = 'N', CNORM(j) must be greater than or equal */
+/* to the infinity-norm, and if TRANS = 'T' or 'C', CNORM(j) */
+/* must be greater than or equal to the 1-norm. */
+
+/* If NORMIN = 'N', CNORM is an output argument and CNORM(j) */
+/* returns the 1-norm of the offdiagonal part of the j-th column */
+/* of A. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* Further Details */
+/* ======= ======= */
+
+/* A rough bound on x is computed; if that is less than overflow, CTPSV */
+/* is called, otherwise, specific code is used which checks for possible */
+/* overflow or divide-by-zero at every operation. */
+
+/* A columnwise scheme is used for solving A*x = b. The basic algorithm */
+/* if A is lower triangular is */
+
+/* x[1:n] := b[1:n] */
+/* for j = 1, ..., n */
+/* x(j) := x(j) / A(j,j) */
+/* x[j+1:n] := x[j+1:n] - x(j) * A[j+1:n,j] */
+/* end */
+
+/* Define bounds on the components of x after j iterations of the loop: */
+/* M(j) = bound on x[1:j] */
+/* G(j) = bound on x[j+1:n] */
+/* Initially, let M(0) = 0 and G(0) = max{x(i), i=1,...,n}. */
+
+/* Then for iteration j+1 we have */
+/* M(j+1) <= G(j) / | A(j+1,j+1) | */
+/* G(j+1) <= G(j) + M(j+1) * | A[j+2:n,j+1] | */
+/* <= G(j) ( 1 + CNORM(j+1) / | A(j+1,j+1) | ) */
+
+/* where CNORM(j+1) is greater than or equal to the infinity-norm of */
+/* column j+1 of A, not counting the diagonal. Hence */
+
+/* G(j) <= G(0) product ( 1 + CNORM(i) / | A(i,i) | ) */
+/* 1<=i<=j */
+/* and */
+
+/* |x(j)| <= ( G(0) / |A(j,j)| ) product ( 1 + CNORM(i) / |A(i,i)| ) */
+/* 1<=i< j */
+
+/* Since |x(j)| <= M(j), we use the Level 2 BLAS routine CTPSV if the */
+/* reciprocal of the largest M(j), j=1,..,n, is larger than */
+/* max(underflow, 1/overflow). */
+
+/* The bound on x(j) is also used to determine when a step in the */
+/* columnwise method can be performed without fear of overflow. If */
+/* the computed bound is greater than a large constant, x is scaled to */
+/* prevent overflow, but if the bound overflows, x is set to 0, x(j) to */
+/* 1, and scale to 0, and a non-trivial solution to A*x = 0 is found. */
+
+/* Similarly, a row-wise scheme is used to solve A**T *x = b or */
+/* A**H *x = b. The basic algorithm for A upper triangular is */
+
+/* for j = 1, ..., n */
+/* x(j) := ( b(j) - A[1:j-1,j]' * x[1:j-1] ) / A(j,j) */
+/* end */
+
+/* We simultaneously compute two bounds */
+/* G(j) = bound on ( b(i) - A[1:i-1,i]' * x[1:i-1] ), 1<=i<=j */
+/* M(j) = bound on x(i), 1<=i<=j */
+
+/* The initial values are G(0) = 0, M(0) = max{b(i), i=1,..,n}, and we */
+/* add the constraint G(j) >= G(j-1) and M(j) >= M(j-1) for j >= 1. */
+/* Then the bound on x(j) is */
+
+/* M(j) <= M(j-1) * ( 1 + CNORM(j) ) / | A(j,j) | */
+
+/* <= M(0) * product ( ( 1 + CNORM(i) ) / |A(i,i)| ) */
+/* 1<=i<=j */
+
+/* and we can safely call CTPSV if 1/M(n) and 1/G(n) are both greater */
+/* than max(underflow, 1/overflow). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --cnorm;
+ --x;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+/* Test the input parameters. */
+
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (! lsame_(normin, "Y") && ! lsame_(normin,
+ "N")) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CLATPS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine machine dependent parameters to control overflow. */
+
+ smlnum = slamch_("Safe minimum");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+ smlnum /= slamch_("Precision");
+ bignum = 1.f / smlnum;
+ *scale = 1.f;
+
+ if (lsame_(normin, "N")) {
+
+/* Compute the 1-norm of each column, not including the diagonal. */
+
+ if (upper) {
+
+/* A is upper triangular. */
+
+ ip = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ cnorm[j] = scasum_(&i__2, &ap[ip], &c__1);
+ ip += j;
+/* L10: */
+ }
+ } else {
+
+/* A is lower triangular. */
+
+ ip = 1;
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ cnorm[j] = scasum_(&i__2, &ap[ip + 1], &c__1);
+ ip = ip + *n - j + 1;
+/* L20: */
+ }
+ cnorm[*n] = 0.f;
+ }
+ }
+
+/* Scale the column norms by TSCAL if the maximum element in CNORM is */
+/* greater than BIGNUM/2. */
+
+ imax = isamax_(n, &cnorm[1], &c__1);
+ tmax = cnorm[imax];
+ if (tmax <= bignum * .5f) {
+ tscal = 1.f;
+ } else {
+ tscal = .5f / (smlnum * tmax);
+ sscal_(n, &tscal, &cnorm[1], &c__1);
+ }
+
+/* Compute a bound on the computed solution vector to see if the */
+/* Level 2 BLAS routine CTPSV can be used. */
+
+ xmax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j;
+ r__3 = xmax, r__4 = (r__1 = x[i__2].r / 2.f, dabs(r__1)) + (r__2 =
+ r_imag(&x[j]) / 2.f, dabs(r__2));
+ xmax = dmax(r__3,r__4);
+/* L30: */
+ }
+ xbnd = xmax;
+ if (notran) {
+
+/* Compute the growth in A * x = b. */
+
+ if (upper) {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ } else {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ }
+
+ if (tscal != 1.f) {
+ grow = 0.f;
+ goto L60;
+ }
+
+ if (nounit) {
+
+/* A is non-unit triangular. */
+
+/* Compute GROW = 1/G(j) and XBND = 1/M(j). */
+/* Initially, G(0) = max{x(i), i=1,...,n}. */
+
+ grow = .5f / dmax(xbnd,smlnum);
+ xbnd = grow;
+ ip = jfirst * (jfirst + 1) / 2;
+ jlen = *n;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L60;
+ }
+
+ i__3 = ip;
+ tjjs.r = ap[i__3].r, tjjs.i = ap[i__3].i;
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+
+ if (tjj >= smlnum) {
+
+/* M(j) = G(j-1) / abs(A(j,j)) */
+
+/* Computing MIN */
+ r__1 = xbnd, r__2 = dmin(1.f,tjj) * grow;
+ xbnd = dmin(r__1,r__2);
+ } else {
+
+/* M(j) could overflow, set XBND to 0. */
+
+ xbnd = 0.f;
+ }
+
+ if (tjj + cnorm[j] >= smlnum) {
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) / abs(A(j,j)) ) */
+
+ grow *= tjj / (tjj + cnorm[j]);
+ } else {
+
+/* G(j) could overflow, set GROW to 0. */
+
+ grow = 0.f;
+ }
+ ip += jinc * jlen;
+ --jlen;
+/* L40: */
+ }
+ grow = xbnd;
+ } else {
+
+/* A is unit triangular. */
+
+/* Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. */
+
+/* Computing MIN */
+ r__1 = 1.f, r__2 = .5f / dmax(xbnd,smlnum);
+ grow = dmin(r__1,r__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L60;
+ }
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) ) */
+
+ grow *= 1.f / (cnorm[j] + 1.f);
+/* L50: */
+ }
+ }
+L60:
+
+ ;
+ } else {
+
+/* Compute the growth in A**T * x = b or A**H * x = b. */
+
+ if (upper) {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ } else {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ }
+
+ if (tscal != 1.f) {
+ grow = 0.f;
+ goto L90;
+ }
+
+ if (nounit) {
+
+/* A is non-unit triangular. */
+
+/* Compute GROW = 1/G(j) and XBND = 1/M(j). */
+/* Initially, M(0) = max{x(i), i=1,...,n}. */
+
+ grow = .5f / dmax(xbnd,smlnum);
+ xbnd = grow;
+ ip = jfirst * (jfirst + 1) / 2;
+ jlen = 1;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L90;
+ }
+
+/* G(j) = max( G(j-1), M(j-1)*( 1 + CNORM(j) ) ) */
+
+ xj = cnorm[j] + 1.f;
+/* Computing MIN */
+ r__1 = grow, r__2 = xbnd / xj;
+ grow = dmin(r__1,r__2);
+
+ i__3 = ip;
+ tjjs.r = ap[i__3].r, tjjs.i = ap[i__3].i;
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+
+ if (tjj >= smlnum) {
+
+/* M(j) = M(j-1)*( 1 + CNORM(j) ) / abs(A(j,j)) */
+
+ if (xj > tjj) {
+ xbnd *= tjj / xj;
+ }
+ } else {
+
+/* M(j) could overflow, set XBND to 0. */
+
+ xbnd = 0.f;
+ }
+ ++jlen;
+ ip += jinc * jlen;
+/* L70: */
+ }
+ grow = dmin(grow,xbnd);
+ } else {
+
+/* A is unit triangular. */
+
+/* Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. */
+
+/* Computing MIN */
+ r__1 = 1.f, r__2 = .5f / dmax(xbnd,smlnum);
+ grow = dmin(r__1,r__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L90;
+ }
+
+/* G(j) = ( 1 + CNORM(j) )*G(j-1) */
+
+ xj = cnorm[j] + 1.f;
+ grow /= xj;
+/* L80: */
+ }
+ }
+L90:
+ ;
+ }
+
+ if (grow * tscal > smlnum) {
+
+/* Use the Level 2 BLAS solve if the reciprocal of the bound on */
+/* elements of X is not too small. */
+
+ ctpsv_(uplo, trans, diag, n, &ap[1], &x[1], &c__1);
+ } else {
+
+/* Use a Level 1 BLAS solve, scaling intermediate results. */
+
+ if (xmax > bignum * .5f) {
+
+/* Scale X so that its components are less than or equal to */
+/* BIGNUM in absolute value. */
+
+ *scale = bignum * .5f / xmax;
+ csscal_(n, scale, &x[1], &c__1);
+ xmax = bignum;
+ } else {
+ xmax *= 2.f;
+ }
+
+ if (notran) {
+
+/* Solve A * x = b */
+
+ ip = jfirst * (jfirst + 1) / 2;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Compute x(j) = b(j) / A(j,j), scaling x if necessary. */
+
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]),
+ dabs(r__2));
+ if (nounit) {
+ i__3 = ip;
+ q__1.r = tscal * ap[i__3].r, q__1.i = tscal * ap[i__3].i;
+ tjjs.r = q__1.r, tjjs.i = q__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.f;
+ if (tscal == 1.f) {
+ goto L105;
+ }
+ }
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.f) {
+ if (xj > tjj * bignum) {
+
+/* Scale x by 1/b(j). */
+
+ rec = 1.f / xj;
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ i__3 = j;
+ cladiv_(&q__1, &x[j], &tjjs);
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]
+ ), dabs(r__2));
+ } else if (tjj > 0.f) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM */
+/* to avoid overflow when dividing by A(j,j). */
+
+ rec = tjj * bignum / xj;
+ if (cnorm[j] > 1.f) {
+
+/* Scale by 1/CNORM(j) to avoid overflow when */
+/* multiplying x(j) times column j. */
+
+ rec /= cnorm[j];
+ }
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ i__3 = j;
+ cladiv_(&q__1, &x[j], &tjjs);
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]
+ ), dabs(r__2));
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0, and compute a solution to A*x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ x[i__4].r = 0.f, x[i__4].i = 0.f;
+/* L100: */
+ }
+ i__3 = j;
+ x[i__3].r = 1.f, x[i__3].i = 0.f;
+ xj = 1.f;
+ *scale = 0.f;
+ xmax = 0.f;
+ }
+L105:
+
+/* Scale x if necessary to avoid overflow when adding a */
+/* multiple of column j of A. */
+
+ if (xj > 1.f) {
+ rec = 1.f / xj;
+ if (cnorm[j] > (bignum - xmax) * rec) {
+
+/* Scale x by 1/(2*abs(x(j))). */
+
+ rec *= .5f;
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ }
+ } else if (xj * cnorm[j] > bignum - xmax) {
+
+/* Scale x by 1/2. */
+
+ csscal_(n, &c_b36, &x[1], &c__1);
+ *scale *= .5f;
+ }
+
+ if (upper) {
+ if (j > 1) {
+
+/* Compute the update */
+/* x(1:j-1) := x(1:j-1) - x(j) * A(1:j-1,j) */
+
+ i__3 = j - 1;
+ i__4 = j;
+ q__2.r = -x[i__4].r, q__2.i = -x[i__4].i;
+ q__1.r = tscal * q__2.r, q__1.i = tscal * q__2.i;
+ caxpy_(&i__3, &q__1, &ap[ip - j + 1], &c__1, &x[1], &
+ c__1);
+ i__3 = j - 1;
+ i__ = icamax_(&i__3, &x[1], &c__1);
+ i__3 = i__;
+ xmax = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[i__]), dabs(r__2));
+ }
+ ip -= j;
+ } else {
+ if (j < *n) {
+
+/* Compute the update */
+/* x(j+1:n) := x(j+1:n) - x(j) * A(j+1:n,j) */
+
+ i__3 = *n - j;
+ i__4 = j;
+ q__2.r = -x[i__4].r, q__2.i = -x[i__4].i;
+ q__1.r = tscal * q__2.r, q__1.i = tscal * q__2.i;
+ caxpy_(&i__3, &q__1, &ap[ip + 1], &c__1, &x[j + 1], &
+ c__1);
+ i__3 = *n - j;
+ i__ = j + icamax_(&i__3, &x[j + 1], &c__1);
+ i__3 = i__;
+ xmax = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[i__]), dabs(r__2));
+ }
+ ip = ip + *n - j + 1;
+ }
+/* L110: */
+ }
+
+ } else if (lsame_(trans, "T")) {
+
+/* Solve A**T * x = b */
+
+ ip = jfirst * (jfirst + 1) / 2;
+ jlen = 1;
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Compute x(j) = b(j) - sum A(k,j)*x(k). */
+/* k<>j */
+
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]),
+ dabs(r__2));
+ uscal.r = tscal, uscal.i = 0.f;
+ rec = 1.f / dmax(xmax,1.f);
+ if (cnorm[j] > (bignum - xj) * rec) {
+
+/* If x(j) could overflow, scale x by 1/(2*XMAX). */
+
+ rec *= .5f;
+ if (nounit) {
+ i__3 = ip;
+ q__1.r = tscal * ap[i__3].r, q__1.i = tscal * ap[i__3]
+ .i;
+ tjjs.r = q__1.r, tjjs.i = q__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.f;
+ }
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+ if (tjj > 1.f) {
+
+/* Divide by A(j,j) when scaling x if A(j,j) > 1. */
+
+/* Computing MIN */
+ r__1 = 1.f, r__2 = rec * tjj;
+ rec = dmin(r__1,r__2);
+ cladiv_(&q__1, &uscal, &tjjs);
+ uscal.r = q__1.r, uscal.i = q__1.i;
+ }
+ if (rec < 1.f) {
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ csumj.r = 0.f, csumj.i = 0.f;
+ if (uscal.r == 1.f && uscal.i == 0.f) {
+
+/* If the scaling needed for A in the dot product is 1, */
+/* call CDOTU to perform the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ cdotu_(&q__1, &i__3, &ap[ip - j + 1], &c__1, &x[1], &
+ c__1);
+ csumj.r = q__1.r, csumj.i = q__1.i;
+ } else if (j < *n) {
+ i__3 = *n - j;
+ cdotu_(&q__1, &i__3, &ap[ip + 1], &c__1, &x[j + 1], &
+ c__1);
+ csumj.r = q__1.r, csumj.i = q__1.i;
+ }
+ } else {
+
+/* Otherwise, use in-line code for the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = ip - j + i__;
+ q__3.r = ap[i__4].r * uscal.r - ap[i__4].i *
+ uscal.i, q__3.i = ap[i__4].r * uscal.i +
+ ap[i__4].i * uscal.r;
+ i__5 = i__;
+ q__2.r = q__3.r * x[i__5].r - q__3.i * x[i__5].i,
+ q__2.i = q__3.r * x[i__5].i + q__3.i * x[
+ i__5].r;
+ q__1.r = csumj.r + q__2.r, q__1.i = csumj.i +
+ q__2.i;
+ csumj.r = q__1.r, csumj.i = q__1.i;
+/* L120: */
+ }
+ } else if (j < *n) {
+ i__3 = *n - j;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = ip + i__;
+ q__3.r = ap[i__4].r * uscal.r - ap[i__4].i *
+ uscal.i, q__3.i = ap[i__4].r * uscal.i +
+ ap[i__4].i * uscal.r;
+ i__5 = j + i__;
+ q__2.r = q__3.r * x[i__5].r - q__3.i * x[i__5].i,
+ q__2.i = q__3.r * x[i__5].i + q__3.i * x[
+ i__5].r;
+ q__1.r = csumj.r + q__2.r, q__1.i = csumj.i +
+ q__2.i;
+ csumj.r = q__1.r, csumj.i = q__1.i;
+/* L130: */
+ }
+ }
+ }
+
+ q__1.r = tscal, q__1.i = 0.f;
+ if (uscal.r == q__1.r && uscal.i == q__1.i) {
+
+/* Compute x(j) := ( x(j) - CSUMJ ) / A(j,j) if 1/A(j,j) */
+/* was not used to scale the dotproduct. */
+
+ i__3 = j;
+ i__4 = j;
+ q__1.r = x[i__4].r - csumj.r, q__1.i = x[i__4].i -
+ csumj.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]
+ ), dabs(r__2));
+ if (nounit) {
+
+/* Compute x(j) = x(j) / A(j,j), scaling if necessary. */
+
+ i__3 = ip;
+ q__1.r = tscal * ap[i__3].r, q__1.i = tscal * ap[i__3]
+ .i;
+ tjjs.r = q__1.r, tjjs.i = q__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.f;
+ if (tscal == 1.f) {
+ goto L145;
+ }
+ }
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.f) {
+ if (xj > tjj * bignum) {
+
+/* Scale X by 1/abs(x(j)). */
+
+ rec = 1.f / xj;
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ i__3 = j;
+ cladiv_(&q__1, &x[j], &tjjs);
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ } else if (tjj > 0.f) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */
+
+ rec = tjj * bignum / xj;
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ i__3 = j;
+ cladiv_(&q__1, &x[j], &tjjs);
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0 and compute a solution to A**T *x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ x[i__4].r = 0.f, x[i__4].i = 0.f;
+/* L140: */
+ }
+ i__3 = j;
+ x[i__3].r = 1.f, x[i__3].i = 0.f;
+ *scale = 0.f;
+ xmax = 0.f;
+ }
+L145:
+ ;
+ } else {
+
+/* Compute x(j) := x(j) / A(j,j) - CSUMJ if the dot */
+/* product has already been divided by 1/A(j,j). */
+
+ i__3 = j;
+ cladiv_(&q__2, &x[j], &tjjs);
+ q__1.r = q__2.r - csumj.r, q__1.i = q__2.i - csumj.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ }
+/* Computing MAX */
+ i__3 = j;
+ r__3 = xmax, r__4 = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[j]), dabs(r__2));
+ xmax = dmax(r__3,r__4);
+ ++jlen;
+ ip += jinc * jlen;
+/* L150: */
+ }
+
+ } else {
+
+/* Solve A**H * x = b */
+
+ ip = jfirst * (jfirst + 1) / 2;
+ jlen = 1;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Compute x(j) = b(j) - sum A(k,j)*x(k). */
+/* k<>j */
+
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]),
+ dabs(r__2));
+ uscal.r = tscal, uscal.i = 0.f;
+ rec = 1.f / dmax(xmax,1.f);
+ if (cnorm[j] > (bignum - xj) * rec) {
+
+/* If x(j) could overflow, scale x by 1/(2*XMAX). */
+
+ rec *= .5f;
+ if (nounit) {
+ r_cnjg(&q__2, &ap[ip]);
+ q__1.r = tscal * q__2.r, q__1.i = tscal * q__2.i;
+ tjjs.r = q__1.r, tjjs.i = q__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.f;
+ }
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+ if (tjj > 1.f) {
+
+/* Divide by A(j,j) when scaling x if A(j,j) > 1. */
+
+/* Computing MIN */
+ r__1 = 1.f, r__2 = rec * tjj;
+ rec = dmin(r__1,r__2);
+ cladiv_(&q__1, &uscal, &tjjs);
+ uscal.r = q__1.r, uscal.i = q__1.i;
+ }
+ if (rec < 1.f) {
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ csumj.r = 0.f, csumj.i = 0.f;
+ if (uscal.r == 1.f && uscal.i == 0.f) {
+
+/* If the scaling needed for A in the dot product is 1, */
+/* call CDOTC to perform the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ cdotc_(&q__1, &i__3, &ap[ip - j + 1], &c__1, &x[1], &
+ c__1);
+ csumj.r = q__1.r, csumj.i = q__1.i;
+ } else if (j < *n) {
+ i__3 = *n - j;
+ cdotc_(&q__1, &i__3, &ap[ip + 1], &c__1, &x[j + 1], &
+ c__1);
+ csumj.r = q__1.r, csumj.i = q__1.i;
+ }
+ } else {
+
+/* Otherwise, use in-line code for the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ r_cnjg(&q__4, &ap[ip - j + i__]);
+ q__3.r = q__4.r * uscal.r - q__4.i * uscal.i,
+ q__3.i = q__4.r * uscal.i + q__4.i *
+ uscal.r;
+ i__4 = i__;
+ q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i,
+ q__2.i = q__3.r * x[i__4].i + q__3.i * x[
+ i__4].r;
+ q__1.r = csumj.r + q__2.r, q__1.i = csumj.i +
+ q__2.i;
+ csumj.r = q__1.r, csumj.i = q__1.i;
+/* L160: */
+ }
+ } else if (j < *n) {
+ i__3 = *n - j;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ r_cnjg(&q__4, &ap[ip + i__]);
+ q__3.r = q__4.r * uscal.r - q__4.i * uscal.i,
+ q__3.i = q__4.r * uscal.i + q__4.i *
+ uscal.r;
+ i__4 = j + i__;
+ q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i,
+ q__2.i = q__3.r * x[i__4].i + q__3.i * x[
+ i__4].r;
+ q__1.r = csumj.r + q__2.r, q__1.i = csumj.i +
+ q__2.i;
+ csumj.r = q__1.r, csumj.i = q__1.i;
+/* L170: */
+ }
+ }
+ }
+
+ q__1.r = tscal, q__1.i = 0.f;
+ if (uscal.r == q__1.r && uscal.i == q__1.i) {
+
+/* Compute x(j) := ( x(j) - CSUMJ ) / A(j,j) if 1/A(j,j) */
+/* was not used to scale the dotproduct. */
+
+ i__3 = j;
+ i__4 = j;
+ q__1.r = x[i__4].r - csumj.r, q__1.i = x[i__4].i -
+ csumj.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]
+ ), dabs(r__2));
+ if (nounit) {
+
+/* Compute x(j) = x(j) / A(j,j), scaling if necessary. */
+
+ r_cnjg(&q__2, &ap[ip]);
+ q__1.r = tscal * q__2.r, q__1.i = tscal * q__2.i;
+ tjjs.r = q__1.r, tjjs.i = q__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.f;
+ if (tscal == 1.f) {
+ goto L185;
+ }
+ }
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.f) {
+ if (xj > tjj * bignum) {
+
+/* Scale X by 1/abs(x(j)). */
+
+ rec = 1.f / xj;
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ i__3 = j;
+ cladiv_(&q__1, &x[j], &tjjs);
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ } else if (tjj > 0.f) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */
+
+ rec = tjj * bignum / xj;
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ i__3 = j;
+ cladiv_(&q__1, &x[j], &tjjs);
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0 and compute a solution to A**H *x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ x[i__4].r = 0.f, x[i__4].i = 0.f;
+/* L180: */
+ }
+ i__3 = j;
+ x[i__3].r = 1.f, x[i__3].i = 0.f;
+ *scale = 0.f;
+ xmax = 0.f;
+ }
+L185:
+ ;
+ } else {
+
+/* Compute x(j) := x(j) / A(j,j) - CSUMJ if the dot */
+/* product has already been divided by 1/A(j,j). */
+
+ i__3 = j;
+ cladiv_(&q__2, &x[j], &tjjs);
+ q__1.r = q__2.r - csumj.r, q__1.i = q__2.i - csumj.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ }
+/* Computing MAX */
+ i__3 = j;
+ r__3 = xmax, r__4 = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[j]), dabs(r__2));
+ xmax = dmax(r__3,r__4);
+ ++jlen;
+ ip += jinc * jlen;
+/* L190: */
+ }
+ }
+ *scale /= tscal;
+ }
+
+/* Scale the column norms by 1/TSCAL for return. */
+
+ if (tscal != 1.f) {
+ r__1 = 1.f / tscal;
+ sscal_(n, &r__1, &cnorm[1], &c__1);
+ }
+
+ return 0;
+
+/* End of CLATPS */
+
+} /* clatps_ */
diff --git a/contrib/libs/clapack/clatrd.c b/contrib/libs/clapack/clatrd.c
new file mode 100644
index 0000000000..4a696f17c9
--- /dev/null
+++ b/contrib/libs/clapack/clatrd.c
@@ -0,0 +1,418 @@
+/* clatrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int clatrd_(char *uplo, integer *n, integer *nb, complex *a,
+ integer *lda, real *e, complex *tau, complex *w, integer *ldw)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3;
+ real r__1;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Local variables */
+ integer i__, iw;
+ complex alpha;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *);
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+, complex *, integer *, complex *, integer *, complex *, complex *
+, integer *), chemv_(char *, integer *, complex *,
+ complex *, integer *, complex *, integer *, complex *, complex *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int caxpy_(integer *, complex *, complex *,
+ integer *, complex *, integer *), clarfg_(integer *, complex *,
+ complex *, integer *, complex *), clacgv_(integer *, complex *,
+ integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLATRD reduces NB rows and columns of a complex Hermitian matrix A to */
+/* Hermitian tridiagonal form by a unitary similarity */
+/* transformation Q' * A * Q, and returns the matrices V and W which are */
+/* needed to apply the transformation to the unreduced part of A. */
+
+/* If UPLO = 'U', CLATRD reduces the last NB rows and columns of a */
+/* matrix, of which the upper triangle is supplied; */
+/* if UPLO = 'L', CLATRD reduces the first NB rows and columns of a */
+/* matrix, of which the lower triangle is supplied. */
+
+/* This is an auxiliary routine called by CHETRD. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* Hermitian matrix A is stored: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* NB (input) INTEGER */
+/* The number of rows and columns to be reduced. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* n-by-n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n-by-n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+/* On exit: */
+/* if UPLO = 'U', the last NB columns have been reduced to */
+/* tridiagonal form, with the diagonal elements overwriting */
+/* the diagonal elements of A; the elements above the diagonal */
+/* with the array TAU, represent the unitary matrix Q as a */
+/* product of elementary reflectors; */
+/* if UPLO = 'L', the first NB columns have been reduced to */
+/* tridiagonal form, with the diagonal elements overwriting */
+/* the diagonal elements of A; the elements below the diagonal */
+/* with the array TAU, represent the unitary matrix Q as a */
+/* product of elementary reflectors. */
+/* See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* E (output) REAL array, dimension (N-1) */
+/* If UPLO = 'U', E(n-nb:n-1) contains the superdiagonal */
+/* elements of the last NB columns of the reduced matrix; */
+/* if UPLO = 'L', E(1:nb) contains the subdiagonal elements of */
+/* the first NB columns of the reduced matrix. */
+
+/* TAU (output) COMPLEX array, dimension (N-1) */
+/* The scalar factors of the elementary reflectors, stored in */
+/* TAU(n-nb:n-1) if UPLO = 'U', and in TAU(1:nb) if UPLO = 'L'. */
+/* See Further Details. */
+
+/* W (output) COMPLEX array, dimension (LDW,NB) */
+/* The n-by-nb matrix W required to update the unreduced part */
+/* of A. */
+
+/* LDW (input) INTEGER */
+/* The leading dimension of the array W. LDW >= max(1,N). */
+
+/* Further Details */
+/* =============== */
+
+/* If UPLO = 'U', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(n) H(n-1) . . . H(n-nb+1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(i:n) = 0 and v(i-1) = 1; v(1:i-1) is stored on exit in A(1:i-1,i), */
+/* and tau in TAU(i-1). */
+
+/* If UPLO = 'L', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(1) H(2) . . . H(nb). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i) = 0 and v(i+1) = 1; v(i+1:n) is stored on exit in A(i+1:n,i), */
+/* and tau in TAU(i). */
+
+/* The elements of the vectors v together form the n-by-nb matrix V */
+/* which is needed, with W, to apply the transformation to the unreduced */
+/* part of the matrix, using a Hermitian rank-2k update of the form: */
+/* A := A - V*W' - W*V'. */
+
+/* The contents of A on exit are illustrated by the following examples */
+/* with n = 5 and nb = 2: */
+
+/* if UPLO = 'U': if UPLO = 'L': */
+
+/* ( a a a v4 v5 ) ( d ) */
+/* ( a a v4 v5 ) ( 1 d ) */
+/* ( a 1 v5 ) ( v1 1 a ) */
+/* ( d 1 ) ( v1 v2 a a ) */
+/* ( d ) ( v1 v2 a a a ) */
+
+/* where d denotes a diagonal element of the reduced matrix, a denotes */
+/* an element of the original matrix that is unchanged, and vi denotes */
+/* an element of the vector defining H(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --e;
+ --tau;
+ w_dim1 = *ldw;
+ w_offset = 1 + w_dim1;
+ w -= w_offset;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+
+ if (lsame_(uplo, "U")) {
+
+/* Reduce last NB columns of upper triangle */
+
+ i__1 = *n - *nb + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ iw = i__ - *n + *nb;
+ if (i__ < *n) {
+
+/* Update A(1:i,i) */
+
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = i__ + i__ * a_dim1;
+ r__1 = a[i__3].r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ i__2 = *n - i__;
+ clacgv_(&i__2, &w[i__ + (iw + 1) * w_dim1], ldw);
+ i__2 = *n - i__;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__, &i__2, &q__1, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &w[i__ + (iw + 1) * w_dim1], ldw, &
+ c_b2, &a[i__ * a_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ clacgv_(&i__2, &w[i__ + (iw + 1) * w_dim1], ldw);
+ i__2 = *n - i__;
+ clacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__2 = *n - i__;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__, &i__2, &q__1, &w[(iw + 1) *
+ w_dim1 + 1], ldw, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ c_b2, &a[i__ * a_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ clacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = i__ + i__ * a_dim1;
+ r__1 = a[i__3].r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ }
+ if (i__ > 1) {
+
+/* Generate elementary reflector H(i) to annihilate */
+/* A(1:i-2,i) */
+
+ i__2 = i__ - 1 + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = i__ - 1;
+ clarfg_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &tau[i__
+ - 1]);
+ i__2 = i__ - 1;
+ e[i__2] = alpha.r;
+ i__2 = i__ - 1 + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Compute W(1:i-1,i) */
+
+ i__2 = i__ - 1;
+ chemv_("Upper", &i__2, &c_b2, &a[a_offset], lda, &a[i__ *
+ a_dim1 + 1], &c__1, &c_b1, &w[iw * w_dim1 + 1], &c__1);
+ if (i__ < *n) {
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &w[(iw
+ + 1) * w_dim1 + 1], ldw, &a[i__ * a_dim1 + 1], &
+ c__1, &c_b1, &w[i__ + 1 + iw * w_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &w[i__ + 1 + iw * w_dim1], &
+ c__1, &c_b2, &w[iw * w_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[(
+ i__ + 1) * a_dim1 + 1], lda, &a[i__ * a_dim1 + 1],
+ &c__1, &c_b1, &w[i__ + 1 + iw * w_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &w[(iw + 1) *
+ w_dim1 + 1], ldw, &w[i__ + 1 + iw * w_dim1], &
+ c__1, &c_b2, &w[iw * w_dim1 + 1], &c__1);
+ }
+ i__2 = i__ - 1;
+ cscal_(&i__2, &tau[i__ - 1], &w[iw * w_dim1 + 1], &c__1);
+ q__3.r = -.5f, q__3.i = -0.f;
+ i__2 = i__ - 1;
+ q__2.r = q__3.r * tau[i__2].r - q__3.i * tau[i__2].i, q__2.i =
+ q__3.r * tau[i__2].i + q__3.i * tau[i__2].r;
+ i__3 = i__ - 1;
+ cdotc_(&q__4, &i__3, &w[iw * w_dim1 + 1], &c__1, &a[i__ *
+ a_dim1 + 1], &c__1);
+ q__1.r = q__2.r * q__4.r - q__2.i * q__4.i, q__1.i = q__2.r *
+ q__4.i + q__2.i * q__4.r;
+ alpha.r = q__1.r, alpha.i = q__1.i;
+ i__2 = i__ - 1;
+ caxpy_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &w[iw *
+ w_dim1 + 1], &c__1);
+ }
+
+/* L10: */
+ }
+ } else {
+
+/* Reduce first NB columns of lower triangle */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i:n,i) */
+
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = i__ + i__ * a_dim1;
+ r__1 = a[i__3].r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &w[i__ + w_dim1], ldw);
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &a[i__ + a_dim1], lda,
+ &w[i__ + w_dim1], ldw, &c_b2, &a[i__ + i__ * a_dim1], &
+ c__1);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &w[i__ + w_dim1], ldw);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &a[i__ + a_dim1], lda);
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &w[i__ + w_dim1], ldw,
+ &a[i__ + a_dim1], lda, &c_b2, &a[i__ + i__ * a_dim1], &
+ c__1);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &a[i__ + a_dim1], lda);
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = i__ + i__ * a_dim1;
+ r__1 = a[i__3].r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ if (i__ < *n) {
+
+/* Generate elementary reflector H(i) to annihilate */
+/* A(i+2:n,i) */
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ clarfg_(&i__2, &alpha, &a[min(i__3, *n)+ i__ * a_dim1], &c__1,
+ &tau[i__]);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+ i__2 = i__ + 1 + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Compute W(i+1:n,i) */
+
+ i__2 = *n - i__;
+ chemv_("Lower", &i__2, &c_b2, &a[i__ + 1 + (i__ + 1) * a_dim1]
+, lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b1, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &w[i__ + 1
+ + w_dim1], ldw, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b1, &w[i__ * w_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &a[i__ + 1 +
+ a_dim1], lda, &w[i__ * w_dim1 + 1], &c__1, &c_b2, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[i__ + 1
+ + a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b1, &w[i__ * w_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &w[i__ + 1 +
+ w_dim1], ldw, &w[i__ * w_dim1 + 1], &c__1, &c_b2, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ cscal_(&i__2, &tau[i__], &w[i__ + 1 + i__ * w_dim1], &c__1);
+ q__3.r = -.5f, q__3.i = -0.f;
+ i__2 = i__;
+ q__2.r = q__3.r * tau[i__2].r - q__3.i * tau[i__2].i, q__2.i =
+ q__3.r * tau[i__2].i + q__3.i * tau[i__2].r;
+ i__3 = *n - i__;
+ cdotc_(&q__4, &i__3, &w[i__ + 1 + i__ * w_dim1], &c__1, &a[
+ i__ + 1 + i__ * a_dim1], &c__1);
+ q__1.r = q__2.r * q__4.r - q__2.i * q__4.i, q__1.i = q__2.r *
+ q__4.i + q__2.i * q__4.r;
+ alpha.r = q__1.r, alpha.i = q__1.i;
+ i__2 = *n - i__;
+ caxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ }
+
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of CLATRD */
+
+} /* clatrd_ */
diff --git a/contrib/libs/clapack/clatrs.c b/contrib/libs/clapack/clatrs.c
new file mode 100644
index 0000000000..3bf8994483
--- /dev/null
+++ b/contrib/libs/clapack/clatrs.c
@@ -0,0 +1,1147 @@
+/* clatrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b36 = .5f;
+
+/* Subroutine */ int clatrs_(char *uplo, char *trans, char *diag, char *
+ normin, integer *n, complex *a, integer *lda, complex *x, real *scale,
+ real *cnorm, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j;
+ real xj, rec, tjj;
+ integer jinc;
+ real xbnd;
+ integer imax;
+ real tmax;
+ complex tjjs;
+ real xmax, grow;
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ real tscal;
+ complex uscal;
+ integer jlast;
+ extern /* Complex */ VOID cdotu_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ complex csumj;
+ extern /* Subroutine */ int caxpy_(integer *, complex *, complex *,
+ integer *, complex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int ctrsv_(char *, char *, char *, integer *,
+ complex *, integer *, complex *, integer *), slabad_(real *, real *);
+ extern integer icamax_(integer *, complex *, integer *);
+ extern /* Complex */ VOID cladiv_(complex *, complex *, complex *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *), xerbla_(char *, integer *);
+ real bignum;
+ extern integer isamax_(integer *, real *, integer *);
+ extern doublereal scasum_(integer *, complex *, integer *);
+ logical notran;
+ integer jfirst;
+ real smlnum;
+ logical nounit;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLATRS solves one of the triangular systems */
+
+/* A * x = s*b, A**T * x = s*b, or A**H * x = s*b, */
+
+/* with scaling to prevent overflow. Here A is an upper or lower */
+/* triangular matrix, A**T denotes the transpose of A, A**H denotes the */
+/* conjugate transpose of A, x and b are n-element vectors, and s is a */
+/* scaling factor, usually less than or equal to 1, chosen so that the */
+/* components of x will be less than the overflow threshold. If the */
+/* unscaled problem will not cause overflow, the Level 2 BLAS routine */
+/* CTRSV is called. If the matrix A is singular (A(j,j) = 0 for some j), */
+/* then s is set to 0 and a non-trivial solution to A*x = 0 is returned. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower triangular. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the operation applied to A. */
+/* = 'N': Solve A * x = s*b (No transpose) */
+/* = 'T': Solve A**T * x = s*b (Transpose) */
+/* = 'C': Solve A**H * x = s*b (Conjugate transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A is unit triangular. */
+/* = 'N': Non-unit triangular */
+/* = 'U': Unit triangular */
+
+/* NORMIN (input) CHARACTER*1 */
+/* Specifies whether CNORM has been set or not. */
+/* = 'Y': CNORM contains the column norms on entry */
+/* = 'N': CNORM is not set on entry. On exit, the norms will */
+/* be computed and stored in CNORM. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The triangular matrix A. If UPLO = 'U', the leading n by n */
+/* upper triangular part of the array A contains the upper */
+/* triangular matrix, and the strictly lower triangular part of */
+/* A is not referenced. If UPLO = 'L', the leading n by n lower */
+/* triangular part of the array A contains the lower triangular */
+/* matrix, and the strictly upper triangular part of A is not */
+/* referenced. If DIAG = 'U', the diagonal elements of A are */
+/* also not referenced and are assumed to be 1. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max (1,N). */
+
+/* X (input/output) COMPLEX array, dimension (N) */
+/* On entry, the right hand side b of the triangular system. */
+/* On exit, X is overwritten by the solution vector x. */
+
+/* SCALE (output) REAL */
+/* The scaling factor s for the triangular system */
+/* A * x = s*b, A**T * x = s*b, or A**H * x = s*b. */
+/* If SCALE = 0, the matrix A is singular or badly scaled, and */
+/* the vector x is an exact or approximate solution to A*x = 0. */
+
+/* CNORM (input or output) REAL array, dimension (N) */
+
+/* If NORMIN = 'Y', CNORM is an input argument and CNORM(j) */
+/* contains the norm of the off-diagonal part of the j-th column */
+/* of A. If TRANS = 'N', CNORM(j) must be greater than or equal */
+/* to the infinity-norm, and if TRANS = 'T' or 'C', CNORM(j) */
+/* must be greater than or equal to the 1-norm. */
+
+/* If NORMIN = 'N', CNORM is an output argument and CNORM(j) */
+/* returns the 1-norm of the offdiagonal part of the j-th column */
+/* of A. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* Further Details */
+/* ======= ======= */
+
+/* A rough bound on x is computed; if that is less than overflow, CTRSV */
+/* is called, otherwise, specific code is used which checks for possible */
+/* overflow or divide-by-zero at every operation. */
+
+/* A columnwise scheme is used for solving A*x = b. The basic algorithm */
+/* if A is lower triangular is */
+
+/* x[1:n] := b[1:n] */
+/* for j = 1, ..., n */
+/* x(j) := x(j) / A(j,j) */
+/* x[j+1:n] := x[j+1:n] - x(j) * A[j+1:n,j] */
+/* end */
+
+/* Define bounds on the components of x after j iterations of the loop: */
+/* M(j) = bound on x[1:j] */
+/* G(j) = bound on x[j+1:n] */
+/* Initially, let M(0) = 0 and G(0) = max{x(i), i=1,...,n}. */
+
+/* Then for iteration j+1 we have */
+/* M(j+1) <= G(j) / | A(j+1,j+1) | */
+/* G(j+1) <= G(j) + M(j+1) * | A[j+2:n,j+1] | */
+/* <= G(j) ( 1 + CNORM(j+1) / | A(j+1,j+1) | ) */
+
+/* where CNORM(j+1) is greater than or equal to the infinity-norm of */
+/* column j+1 of A, not counting the diagonal. Hence */
+
+/* G(j) <= G(0) product ( 1 + CNORM(i) / | A(i,i) | ) */
+/* 1<=i<=j */
+/* and */
+
+/* |x(j)| <= ( G(0) / |A(j,j)| ) product ( 1 + CNORM(i) / |A(i,i)| ) */
+/* 1<=i< j */
+
+/* Since |x(j)| <= M(j), we use the Level 2 BLAS routine CTRSV if the */
+/* reciprocal of the largest M(j), j=1,..,n, is larger than */
+/* max(underflow, 1/overflow). */
+
+/* The bound on x(j) is also used to determine when a step in the */
+/* columnwise method can be performed without fear of overflow. If */
+/* the computed bound is greater than a large constant, x is scaled to */
+/* prevent overflow, but if the bound overflows, x is set to 0, x(j) to */
+/* 1, and scale to 0, and a non-trivial solution to A*x = 0 is found. */
+
+/* Similarly, a row-wise scheme is used to solve A**T *x = b or */
+/* A**H *x = b. The basic algorithm for A upper triangular is */
+
+/* for j = 1, ..., n */
+/* x(j) := ( b(j) - A[1:j-1,j]' * x[1:j-1] ) / A(j,j) */
+/* end */
+
+/* We simultaneously compute two bounds */
+/* G(j) = bound on ( b(i) - A[1:i-1,i]' * x[1:i-1] ), 1<=i<=j */
+/* M(j) = bound on x(i), 1<=i<=j */
+
+/* The initial values are G(0) = 0, M(0) = max{b(i), i=1,..,n}, and we */
+/* add the constraint G(j) >= G(j-1) and M(j) >= M(j-1) for j >= 1. */
+/* Then the bound on x(j) is */
+
+/* M(j) <= M(j-1) * ( 1 + CNORM(j) ) / | A(j,j) | */
+
+/* <= M(0) * product ( ( 1 + CNORM(i) ) / |A(i,i)| ) */
+/* 1<=i<=j */
+
+/* and we can safely call CTRSV if 1/M(n) and 1/G(n) are both greater */
+/* than max(underflow, 1/overflow). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --cnorm;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+/* Test the input parameters. */
+
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (! lsame_(normin, "Y") && ! lsame_(normin,
+ "N")) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CLATRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine machine dependent parameters to control overflow. */
+
+ smlnum = slamch_("Safe minimum");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+ smlnum /= slamch_("Precision");
+ bignum = 1.f / smlnum;
+ *scale = 1.f;
+
+ if (lsame_(normin, "N")) {
+
+/* Compute the 1-norm of each column, not including the diagonal. */
+
+ if (upper) {
+
+/* A is upper triangular. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ cnorm[j] = scasum_(&i__2, &a[j * a_dim1 + 1], &c__1);
+/* L10: */
+ }
+ } else {
+
+/* A is lower triangular. */
+
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ cnorm[j] = scasum_(&i__2, &a[j + 1 + j * a_dim1], &c__1);
+/* L20: */
+ }
+ cnorm[*n] = 0.f;
+ }
+ }
+
+/* Scale the column norms by TSCAL if the maximum element in CNORM is */
+/* greater than BIGNUM/2. */
+
+ imax = isamax_(n, &cnorm[1], &c__1);
+ tmax = cnorm[imax];
+ if (tmax <= bignum * .5f) {
+ tscal = 1.f;
+ } else {
+ tscal = .5f / (smlnum * tmax);
+ sscal_(n, &tscal, &cnorm[1], &c__1);
+ }
+
+/* Compute a bound on the computed solution vector to see if the */
+/* Level 2 BLAS routine CTRSV can be used. */
+
+ xmax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j;
+ r__3 = xmax, r__4 = (r__1 = x[i__2].r / 2.f, dabs(r__1)) + (r__2 =
+ r_imag(&x[j]) / 2.f, dabs(r__2));
+ xmax = dmax(r__3,r__4);
+/* L30: */
+ }
+ xbnd = xmax;
+
+ if (notran) {
+
+/* Compute the growth in A * x = b. */
+
+ if (upper) {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ } else {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ }
+
+ if (tscal != 1.f) {
+ grow = 0.f;
+ goto L60;
+ }
+
+ if (nounit) {
+
+/* A is non-unit triangular. */
+
+/* Compute GROW = 1/G(j) and XBND = 1/M(j). */
+/* Initially, G(0) = max{x(i), i=1,...,n}. */
+
+ grow = .5f / dmax(xbnd,smlnum);
+ xbnd = grow;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L60;
+ }
+
+ i__3 = j + j * a_dim1;
+ tjjs.r = a[i__3].r, tjjs.i = a[i__3].i;
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+
+ if (tjj >= smlnum) {
+
+/* M(j) = G(j-1) / abs(A(j,j)) */
+
+/* Computing MIN */
+ r__1 = xbnd, r__2 = dmin(1.f,tjj) * grow;
+ xbnd = dmin(r__1,r__2);
+ } else {
+
+/* M(j) could overflow, set XBND to 0. */
+
+ xbnd = 0.f;
+ }
+
+ if (tjj + cnorm[j] >= smlnum) {
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) / abs(A(j,j)) ) */
+
+ grow *= tjj / (tjj + cnorm[j]);
+ } else {
+
+/* G(j) could overflow, set GROW to 0. */
+
+ grow = 0.f;
+ }
+/* L40: */
+ }
+ grow = xbnd;
+ } else {
+
+/* A is unit triangular. */
+
+/* Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. */
+
+/* Computing MIN */
+ r__1 = 1.f, r__2 = .5f / dmax(xbnd,smlnum);
+ grow = dmin(r__1,r__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L60;
+ }
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) ) */
+
+ grow *= 1.f / (cnorm[j] + 1.f);
+/* L50: */
+ }
+ }
+L60:
+
+ ;
+ } else {
+
+/* Compute the growth in A**T * x = b or A**H * x = b. */
+
+ if (upper) {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ } else {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ }
+
+ if (tscal != 1.f) {
+ grow = 0.f;
+ goto L90;
+ }
+
+ if (nounit) {
+
+/* A is non-unit triangular. */
+
+/* Compute GROW = 1/G(j) and XBND = 1/M(j). */
+/* Initially, M(0) = max{x(i), i=1,...,n}. */
+
+ grow = .5f / dmax(xbnd,smlnum);
+ xbnd = grow;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L90;
+ }
+
+/* G(j) = max( G(j-1), M(j-1)*( 1 + CNORM(j) ) ) */
+
+ xj = cnorm[j] + 1.f;
+/* Computing MIN */
+ r__1 = grow, r__2 = xbnd / xj;
+ grow = dmin(r__1,r__2);
+
+ i__3 = j + j * a_dim1;
+ tjjs.r = a[i__3].r, tjjs.i = a[i__3].i;
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+
+ if (tjj >= smlnum) {
+
+/* M(j) = M(j-1)*( 1 + CNORM(j) ) / abs(A(j,j)) */
+
+ if (xj > tjj) {
+ xbnd *= tjj / xj;
+ }
+ } else {
+
+/* M(j) could overflow, set XBND to 0. */
+
+ xbnd = 0.f;
+ }
+/* L70: */
+ }
+ grow = dmin(grow,xbnd);
+ } else {
+
+/* A is unit triangular. */
+
+/* Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. */
+
+/* Computing MIN */
+ r__1 = 1.f, r__2 = .5f / dmax(xbnd,smlnum);
+ grow = dmin(r__1,r__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L90;
+ }
+
+/* G(j) = ( 1 + CNORM(j) )*G(j-1) */
+
+ xj = cnorm[j] + 1.f;
+ grow /= xj;
+/* L80: */
+ }
+ }
+L90:
+ ;
+ }
+
+ if (grow * tscal > smlnum) {
+
+/* Use the Level 2 BLAS solve if the reciprocal of the bound on */
+/* elements of X is not too small. */
+
+ ctrsv_(uplo, trans, diag, n, &a[a_offset], lda, &x[1], &c__1);
+ } else {
+
+/* Use a Level 1 BLAS solve, scaling intermediate results. */
+
+ if (xmax > bignum * .5f) {
+
+/* Scale X so that its components are less than or equal to */
+/* BIGNUM in absolute value. */
+
+ *scale = bignum * .5f / xmax;
+ csscal_(n, scale, &x[1], &c__1);
+ xmax = bignum;
+ } else {
+ xmax *= 2.f;
+ }
+
+ if (notran) {
+
+/* Solve A * x = b */
+
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Compute x(j) = b(j) / A(j,j), scaling x if necessary. */
+
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]),
+ dabs(r__2));
+ if (nounit) {
+ i__3 = j + j * a_dim1;
+ q__1.r = tscal * a[i__3].r, q__1.i = tscal * a[i__3].i;
+ tjjs.r = q__1.r, tjjs.i = q__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.f;
+ if (tscal == 1.f) {
+ goto L105;
+ }
+ }
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.f) {
+ if (xj > tjj * bignum) {
+
+/* Scale x by 1/b(j). */
+
+ rec = 1.f / xj;
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ i__3 = j;
+ cladiv_(&q__1, &x[j], &tjjs);
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]
+ ), dabs(r__2));
+ } else if (tjj > 0.f) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM */
+/* to avoid overflow when dividing by A(j,j). */
+
+ rec = tjj * bignum / xj;
+ if (cnorm[j] > 1.f) {
+
+/* Scale by 1/CNORM(j) to avoid overflow when */
+/* multiplying x(j) times column j. */
+
+ rec /= cnorm[j];
+ }
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ i__3 = j;
+ cladiv_(&q__1, &x[j], &tjjs);
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]
+ ), dabs(r__2));
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0, and compute a solution to A*x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ x[i__4].r = 0.f, x[i__4].i = 0.f;
+/* L100: */
+ }
+ i__3 = j;
+ x[i__3].r = 1.f, x[i__3].i = 0.f;
+ xj = 1.f;
+ *scale = 0.f;
+ xmax = 0.f;
+ }
+L105:
+
+/* Scale x if necessary to avoid overflow when adding a */
+/* multiple of column j of A. */
+
+ if (xj > 1.f) {
+ rec = 1.f / xj;
+ if (cnorm[j] > (bignum - xmax) * rec) {
+
+/* Scale x by 1/(2*abs(x(j))). */
+
+ rec *= .5f;
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ }
+ } else if (xj * cnorm[j] > bignum - xmax) {
+
+/* Scale x by 1/2. */
+
+ csscal_(n, &c_b36, &x[1], &c__1);
+ *scale *= .5f;
+ }
+
+ if (upper) {
+ if (j > 1) {
+
+/* Compute the update */
+/* x(1:j-1) := x(1:j-1) - x(j) * A(1:j-1,j) */
+
+ i__3 = j - 1;
+ i__4 = j;
+ q__2.r = -x[i__4].r, q__2.i = -x[i__4].i;
+ q__1.r = tscal * q__2.r, q__1.i = tscal * q__2.i;
+ caxpy_(&i__3, &q__1, &a[j * a_dim1 + 1], &c__1, &x[1],
+ &c__1);
+ i__3 = j - 1;
+ i__ = icamax_(&i__3, &x[1], &c__1);
+ i__3 = i__;
+ xmax = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[i__]), dabs(r__2));
+ }
+ } else {
+ if (j < *n) {
+
+/* Compute the update */
+/* x(j+1:n) := x(j+1:n) - x(j) * A(j+1:n,j) */
+
+ i__3 = *n - j;
+ i__4 = j;
+ q__2.r = -x[i__4].r, q__2.i = -x[i__4].i;
+ q__1.r = tscal * q__2.r, q__1.i = tscal * q__2.i;
+ caxpy_(&i__3, &q__1, &a[j + 1 + j * a_dim1], &c__1, &
+ x[j + 1], &c__1);
+ i__3 = *n - j;
+ i__ = j + icamax_(&i__3, &x[j + 1], &c__1);
+ i__3 = i__;
+ xmax = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[i__]), dabs(r__2));
+ }
+ }
+/* L110: */
+ }
+
+ } else if (lsame_(trans, "T")) {
+
+/* Solve A**T * x = b */
+
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Compute x(j) = b(j) - sum A(k,j)*x(k). */
+/* k<>j */
+
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]),
+ dabs(r__2));
+ uscal.r = tscal, uscal.i = 0.f;
+ rec = 1.f / dmax(xmax,1.f);
+ if (cnorm[j] > (bignum - xj) * rec) {
+
+/* If x(j) could overflow, scale x by 1/(2*XMAX). */
+
+ rec *= .5f;
+ if (nounit) {
+ i__3 = j + j * a_dim1;
+ q__1.r = tscal * a[i__3].r, q__1.i = tscal * a[i__3]
+ .i;
+ tjjs.r = q__1.r, tjjs.i = q__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.f;
+ }
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+ if (tjj > 1.f) {
+
+/* Divide by A(j,j) when scaling x if A(j,j) > 1. */
+
+/* Computing MIN */
+ r__1 = 1.f, r__2 = rec * tjj;
+ rec = dmin(r__1,r__2);
+ cladiv_(&q__1, &uscal, &tjjs);
+ uscal.r = q__1.r, uscal.i = q__1.i;
+ }
+ if (rec < 1.f) {
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ csumj.r = 0.f, csumj.i = 0.f;
+ if (uscal.r == 1.f && uscal.i == 0.f) {
+
+/* If the scaling needed for A in the dot product is 1, */
+/* call CDOTU to perform the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ cdotu_(&q__1, &i__3, &a[j * a_dim1 + 1], &c__1, &x[1],
+ &c__1);
+ csumj.r = q__1.r, csumj.i = q__1.i;
+ } else if (j < *n) {
+ i__3 = *n - j;
+ cdotu_(&q__1, &i__3, &a[j + 1 + j * a_dim1], &c__1, &
+ x[j + 1], &c__1);
+ csumj.r = q__1.r, csumj.i = q__1.i;
+ }
+ } else {
+
+/* Otherwise, use in-line code for the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * a_dim1;
+ q__3.r = a[i__4].r * uscal.r - a[i__4].i *
+ uscal.i, q__3.i = a[i__4].r * uscal.i + a[
+ i__4].i * uscal.r;
+ i__5 = i__;
+ q__2.r = q__3.r * x[i__5].r - q__3.i * x[i__5].i,
+ q__2.i = q__3.r * x[i__5].i + q__3.i * x[
+ i__5].r;
+ q__1.r = csumj.r + q__2.r, q__1.i = csumj.i +
+ q__2.i;
+ csumj.r = q__1.r, csumj.i = q__1.i;
+/* L120: */
+ }
+ } else if (j < *n) {
+ i__3 = *n;
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * a_dim1;
+ q__3.r = a[i__4].r * uscal.r - a[i__4].i *
+ uscal.i, q__3.i = a[i__4].r * uscal.i + a[
+ i__4].i * uscal.r;
+ i__5 = i__;
+ q__2.r = q__3.r * x[i__5].r - q__3.i * x[i__5].i,
+ q__2.i = q__3.r * x[i__5].i + q__3.i * x[
+ i__5].r;
+ q__1.r = csumj.r + q__2.r, q__1.i = csumj.i +
+ q__2.i;
+ csumj.r = q__1.r, csumj.i = q__1.i;
+/* L130: */
+ }
+ }
+ }
+
+ q__1.r = tscal, q__1.i = 0.f;
+ if (uscal.r == q__1.r && uscal.i == q__1.i) {
+
+/* Compute x(j) := ( x(j) - CSUMJ ) / A(j,j) if 1/A(j,j) */
+/* was not used to scale the dotproduct. */
+
+ i__3 = j;
+ i__4 = j;
+ q__1.r = x[i__4].r - csumj.r, q__1.i = x[i__4].i -
+ csumj.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]
+ ), dabs(r__2));
+ if (nounit) {
+ i__3 = j + j * a_dim1;
+ q__1.r = tscal * a[i__3].r, q__1.i = tscal * a[i__3]
+ .i;
+ tjjs.r = q__1.r, tjjs.i = q__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.f;
+ if (tscal == 1.f) {
+ goto L145;
+ }
+ }
+
+/* Compute x(j) = x(j) / A(j,j), scaling if necessary. */
+
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.f) {
+ if (xj > tjj * bignum) {
+
+/* Scale X by 1/abs(x(j)). */
+
+ rec = 1.f / xj;
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ i__3 = j;
+ cladiv_(&q__1, &x[j], &tjjs);
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ } else if (tjj > 0.f) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */
+
+ rec = tjj * bignum / xj;
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ i__3 = j;
+ cladiv_(&q__1, &x[j], &tjjs);
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0 and compute a solution to A**T *x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ x[i__4].r = 0.f, x[i__4].i = 0.f;
+/* L140: */
+ }
+ i__3 = j;
+ x[i__3].r = 1.f, x[i__3].i = 0.f;
+ *scale = 0.f;
+ xmax = 0.f;
+ }
+L145:
+ ;
+ } else {
+
+/* Compute x(j) := x(j) / A(j,j) - CSUMJ if the dot */
+/* product has already been divided by 1/A(j,j). */
+
+ i__3 = j;
+ cladiv_(&q__2, &x[j], &tjjs);
+ q__1.r = q__2.r - csumj.r, q__1.i = q__2.i - csumj.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ }
+/* Computing MAX */
+ i__3 = j;
+ r__3 = xmax, r__4 = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[j]), dabs(r__2));
+ xmax = dmax(r__3,r__4);
+/* L150: */
+ }
+
+ } else {
+
+/* Solve A**H * x = b */
+
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Compute x(j) = b(j) - sum A(k,j)*x(k). */
+/* k<>j */
+
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]),
+ dabs(r__2));
+ uscal.r = tscal, uscal.i = 0.f;
+ rec = 1.f / dmax(xmax,1.f);
+ if (cnorm[j] > (bignum - xj) * rec) {
+
+/* If x(j) could overflow, scale x by 1/(2*XMAX). */
+
+ rec *= .5f;
+ if (nounit) {
+ r_cnjg(&q__2, &a[j + j * a_dim1]);
+ q__1.r = tscal * q__2.r, q__1.i = tscal * q__2.i;
+ tjjs.r = q__1.r, tjjs.i = q__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.f;
+ }
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+ if (tjj > 1.f) {
+
+/* Divide by A(j,j) when scaling x if A(j,j) > 1. */
+
+/* Computing MIN */
+ r__1 = 1.f, r__2 = rec * tjj;
+ rec = dmin(r__1,r__2);
+ cladiv_(&q__1, &uscal, &tjjs);
+ uscal.r = q__1.r, uscal.i = q__1.i;
+ }
+ if (rec < 1.f) {
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ csumj.r = 0.f, csumj.i = 0.f;
+ if (uscal.r == 1.f && uscal.i == 0.f) {
+
+/* If the scaling needed for A in the dot product is 1, */
+/* call CDOTC to perform the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ cdotc_(&q__1, &i__3, &a[j * a_dim1 + 1], &c__1, &x[1],
+ &c__1);
+ csumj.r = q__1.r, csumj.i = q__1.i;
+ } else if (j < *n) {
+ i__3 = *n - j;
+ cdotc_(&q__1, &i__3, &a[j + 1 + j * a_dim1], &c__1, &
+ x[j + 1], &c__1);
+ csumj.r = q__1.r, csumj.i = q__1.i;
+ }
+ } else {
+
+/* Otherwise, use in-line code for the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ r_cnjg(&q__4, &a[i__ + j * a_dim1]);
+ q__3.r = q__4.r * uscal.r - q__4.i * uscal.i,
+ q__3.i = q__4.r * uscal.i + q__4.i *
+ uscal.r;
+ i__4 = i__;
+ q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i,
+ q__2.i = q__3.r * x[i__4].i + q__3.i * x[
+ i__4].r;
+ q__1.r = csumj.r + q__2.r, q__1.i = csumj.i +
+ q__2.i;
+ csumj.r = q__1.r, csumj.i = q__1.i;
+/* L160: */
+ }
+ } else if (j < *n) {
+ i__3 = *n;
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ r_cnjg(&q__4, &a[i__ + j * a_dim1]);
+ q__3.r = q__4.r * uscal.r - q__4.i * uscal.i,
+ q__3.i = q__4.r * uscal.i + q__4.i *
+ uscal.r;
+ i__4 = i__;
+ q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i,
+ q__2.i = q__3.r * x[i__4].i + q__3.i * x[
+ i__4].r;
+ q__1.r = csumj.r + q__2.r, q__1.i = csumj.i +
+ q__2.i;
+ csumj.r = q__1.r, csumj.i = q__1.i;
+/* L170: */
+ }
+ }
+ }
+
+ q__1.r = tscal, q__1.i = 0.f;
+ if (uscal.r == q__1.r && uscal.i == q__1.i) {
+
+/* Compute x(j) := ( x(j) - CSUMJ ) / A(j,j) if 1/A(j,j) */
+/* was not used to scale the dotproduct. */
+
+ i__3 = j;
+ i__4 = j;
+ q__1.r = x[i__4].r - csumj.r, q__1.i = x[i__4].i -
+ csumj.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]
+ ), dabs(r__2));
+ if (nounit) {
+ r_cnjg(&q__2, &a[j + j * a_dim1]);
+ q__1.r = tscal * q__2.r, q__1.i = tscal * q__2.i;
+ tjjs.r = q__1.r, tjjs.i = q__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.f;
+ if (tscal == 1.f) {
+ goto L185;
+ }
+ }
+
+/* Compute x(j) = x(j) / A(j,j), scaling if necessary. */
+
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.f) {
+ if (xj > tjj * bignum) {
+
+/* Scale X by 1/abs(x(j)). */
+
+ rec = 1.f / xj;
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ i__3 = j;
+ cladiv_(&q__1, &x[j], &tjjs);
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ } else if (tjj > 0.f) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */
+
+ rec = tjj * bignum / xj;
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ i__3 = j;
+ cladiv_(&q__1, &x[j], &tjjs);
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0 and compute a solution to A**H *x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ x[i__4].r = 0.f, x[i__4].i = 0.f;
+/* L180: */
+ }
+ i__3 = j;
+ x[i__3].r = 1.f, x[i__3].i = 0.f;
+ *scale = 0.f;
+ xmax = 0.f;
+ }
+L185:
+ ;
+ } else {
+
+/* Compute x(j) := x(j) / A(j,j) - CSUMJ if the dot */
+/* product has already been divided by 1/A(j,j). */
+
+ i__3 = j;
+ cladiv_(&q__2, &x[j], &tjjs);
+ q__1.r = q__2.r - csumj.r, q__1.i = q__2.i - csumj.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ }
+/* Computing MAX */
+ i__3 = j;
+ r__3 = xmax, r__4 = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[j]), dabs(r__2));
+ xmax = dmax(r__3,r__4);
+/* L190: */
+ }
+ }
+ *scale /= tscal;
+ }
+
+/* Scale the column norms by 1/TSCAL for return. */
+
+ if (tscal != 1.f) {
+ r__1 = 1.f / tscal;
+ sscal_(n, &r__1, &cnorm[1], &c__1);
+ }
+
+ return 0;
+
+/* End of CLATRS */
+
+} /* clatrs_ */
diff --git a/contrib/libs/clapack/clatrz.c b/contrib/libs/clapack/clatrz.c
new file mode 100644
index 0000000000..59faa33aa9
--- /dev/null
+++ b/contrib/libs/clapack/clatrz.c
@@ -0,0 +1,180 @@
+/* clatrz.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int clatrz_(integer *m, integer *n, integer *l, complex *a,
+ integer *lda, complex *tau, complex *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__;
+ complex alpha;
+ extern /* Subroutine */ int clarz_(char *, integer *, integer *, integer *
+, complex *, integer *, complex *, complex *, integer *, complex *
+), clacgv_(integer *, complex *, integer *), clarfp_(
+ integer *, complex *, complex *, integer *, complex *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLATRZ factors the M-by-(M+L) complex upper trapezoidal matrix */
+/* [ A1 A2 ] = [ A(1:M,1:M) A(1:M,N-L+1:N) ] as ( R 0 ) * Z by means */
+/* of unitary transformations, where Z is an (M+L)-by-(M+L) unitary */
+/* matrix and, R and A1 are M-by-M upper triangular matrices. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* L (input) INTEGER */
+/* The number of columns of the matrix A containing the */
+/* meaningful part of the Householder vectors. N-M >= L >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the leading M-by-N upper trapezoidal part of the */
+/* array A must contain the matrix to be factorized. */
+/* On exit, the leading M-by-M upper triangular part of A */
+/* contains the upper triangular matrix R, and elements N-L+1 to */
+/* N of the first M rows of A, with the array TAU, represent the */
+/* unitary matrix Z as a product of M elementary reflectors. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) COMPLEX array, dimension (M) */
+/* The scalar factors of the elementary reflectors. */
+
+/* WORK (workspace) COMPLEX array, dimension (M) */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* The factorization is obtained by Householder's method. The kth */
+/* transformation matrix, Z( k ), which is used to introduce zeros into */
+/* the ( m - k + 1 )th row of A, is given in the form */
+
+/* Z( k ) = ( I 0 ), */
+/* ( 0 T( k ) ) */
+
+/* where */
+
+/* T( k ) = I - tau*u( k )*u( k )', u( k ) = ( 1 ), */
+/* ( 0 ) */
+/* ( z( k ) ) */
+
+/* tau is a scalar and z( k ) is an l element vector. tau and z( k ) */
+/* are chosen to annihilate the elements of the kth row of A2. */
+
+/* The scalar tau is returned in the kth element of TAU and the vector */
+/* u( k ) in the kth row of A2, such that the elements of z( k ) are */
+/* in a( k, l + 1 ), ..., a( k, n ). The elements of R are returned in */
+/* the upper triangular part of A1. */
+
+/* Z is given by */
+
+/* Z = Z( 1 ) * Z( 2 ) * ... * Z( m ). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ if (*m == 0) {
+ return 0;
+ } else if (*m == *n) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ tau[i__2].r = 0.f, tau[i__2].i = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+ for (i__ = *m; i__ >= 1; --i__) {
+
+/* Generate elementary reflector H(i) to annihilate */
+/* [ A(i,i) A(i,n-l+1:n) ] */
+
+ clacgv_(l, &a[i__ + (*n - *l + 1) * a_dim1], lda);
+ r_cnjg(&q__1, &a[i__ + i__ * a_dim1]);
+ alpha.r = q__1.r, alpha.i = q__1.i;
+ i__1 = *l + 1;
+ clarfp_(&i__1, &alpha, &a[i__ + (*n - *l + 1) * a_dim1], lda, &tau[
+ i__]);
+ i__1 = i__;
+ r_cnjg(&q__1, &tau[i__]);
+ tau[i__1].r = q__1.r, tau[i__1].i = q__1.i;
+
+/* Apply H(i) to A(1:i-1,i:n) from the right */
+
+ i__1 = i__ - 1;
+ i__2 = *n - i__ + 1;
+ r_cnjg(&q__1, &tau[i__]);
+ clarz_("Right", &i__1, &i__2, l, &a[i__ + (*n - *l + 1) * a_dim1],
+ lda, &q__1, &a[i__ * a_dim1 + 1], lda, &work[1]);
+ i__1 = i__ + i__ * a_dim1;
+ r_cnjg(&q__1, &alpha);
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+
+/* L20: */
+ }
+
+ return 0;
+
+/* End of CLATRZ */
+
+} /* clatrz_ */
diff --git a/contrib/libs/clapack/clatzm.c b/contrib/libs/clapack/clatzm.c
new file mode 100644
index 0000000000..4daa3d44fd
--- /dev/null
+++ b/contrib/libs/clapack/clatzm.c
@@ -0,0 +1,196 @@
+/* clatzm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int clatzm_(char *side, integer *m, integer *n, complex *v,
+ integer *incv, complex *tau, complex *c1, complex *c2, integer *ldc,
+ complex *work)
+{
+ /* System generated locals */
+ integer c1_dim1, c1_offset, c2_dim1, c2_offset, i__1;
+ complex q__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int cgerc_(integer *, integer *, complex *,
+ complex *, integer *, complex *, integer *, complex *, integer *),
+ cgemv_(char *, integer *, integer *, complex *, complex *,
+ integer *, complex *, integer *, complex *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cgeru_(integer *, integer *, complex *,
+ complex *, integer *, complex *, integer *, complex *, integer *),
+ ccopy_(integer *, complex *, integer *, complex *, integer *),
+ caxpy_(integer *, complex *, complex *, integer *, complex *,
+ integer *), clacgv_(integer *, complex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is deprecated and has been replaced by routine CUNMRZ. */
+
+/* CLATZM applies a Householder matrix generated by CTZRQF to a matrix. */
+
+/* Let P = I - tau*u*u', u = ( 1 ), */
+/* ( v ) */
+/* where v is an (m-1) vector if SIDE = 'L', or a (n-1) vector if */
+/* SIDE = 'R'. */
+
+/* If SIDE equals 'L', let */
+/* C = [ C1 ] 1 */
+/* [ C2 ] m-1 */
+/* n */
+/* Then C is overwritten by P*C. */
+
+/* If SIDE equals 'R', let */
+/* C = [ C1, C2 ] m */
+/* 1 n-1 */
+/* Then C is overwritten by C*P. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': form P * C */
+/* = 'R': form C * P */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. */
+
+/* V (input) COMPLEX array, dimension */
+/* (1 + (M-1)*abs(INCV)) if SIDE = 'L' */
+/* (1 + (N-1)*abs(INCV)) if SIDE = 'R' */
+/* The vector v in the representation of P. V is not used */
+/* if TAU = 0. */
+
+/* INCV (input) INTEGER */
+/* The increment between elements of v. INCV <> 0 */
+
+/* TAU (input) COMPLEX */
+/* The value tau in the representation of P. */
+
+/* C1 (input/output) COMPLEX array, dimension */
+/* (LDC,N) if SIDE = 'L' */
+/* (M,1) if SIDE = 'R' */
+/* On entry, the n-vector C1 if SIDE = 'L', or the m-vector C1 */
+/* if SIDE = 'R'. */
+
+/* On exit, the first row of P*C if SIDE = 'L', or the first */
+/* column of C*P if SIDE = 'R'. */
+
+/* C2 (input/output) COMPLEX array, dimension */
+/* (LDC, N) if SIDE = 'L' */
+/* (LDC, N-1) if SIDE = 'R' */
+/* On entry, the (m - 1) x n matrix C2 if SIDE = 'L', or the */
+/* m x (n - 1) matrix C2 if SIDE = 'R'. */
+
+/* On exit, rows 2:m of P*C if SIDE = 'L', or columns 2:m of C*P */
+/* if SIDE = 'R'. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the arrays C1 and C2. */
+/* LDC >= max(1,M). */
+
+/* WORK (workspace) COMPLEX array, dimension */
+/* (N) if SIDE = 'L' */
+/* (M) if SIDE = 'R' */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --v;
+ c2_dim1 = *ldc;
+ c2_offset = 1 + c2_dim1;
+ c2 -= c2_offset;
+ c1_dim1 = *ldc;
+ c1_offset = 1 + c1_dim1;
+ c1 -= c1_offset;
+ --work;
+
+ /* Function Body */
+ if (min(*m,*n) == 0 || tau->r == 0.f && tau->i == 0.f) {
+ return 0;
+ }
+
+ if (lsame_(side, "L")) {
+
+/* w := conjg( C1 + v' * C2 ) */
+
+ ccopy_(n, &c1[c1_offset], ldc, &work[1], &c__1);
+ clacgv_(n, &work[1], &c__1);
+ i__1 = *m - 1;
+ cgemv_("Conjugate transpose", &i__1, n, &c_b1, &c2[c2_offset], ldc, &
+ v[1], incv, &c_b1, &work[1], &c__1);
+
+/* [ C1 ] := [ C1 ] - tau* [ 1 ] * w' */
+/* [ C2 ] [ C2 ] [ v ] */
+
+ clacgv_(n, &work[1], &c__1);
+ q__1.r = -tau->r, q__1.i = -tau->i;
+ caxpy_(n, &q__1, &work[1], &c__1, &c1[c1_offset], ldc);
+ i__1 = *m - 1;
+ q__1.r = -tau->r, q__1.i = -tau->i;
+ cgeru_(&i__1, n, &q__1, &v[1], incv, &work[1], &c__1, &c2[c2_offset],
+ ldc);
+
+ } else if (lsame_(side, "R")) {
+
+/* w := C1 + C2 * v */
+
+ ccopy_(m, &c1[c1_offset], &c__1, &work[1], &c__1);
+ i__1 = *n - 1;
+ cgemv_("No transpose", m, &i__1, &c_b1, &c2[c2_offset], ldc, &v[1],
+ incv, &c_b1, &work[1], &c__1);
+
+/* [ C1, C2 ] := [ C1, C2 ] - tau* w * [ 1 , v'] */
+
+ q__1.r = -tau->r, q__1.i = -tau->i;
+ caxpy_(m, &q__1, &work[1], &c__1, &c1[c1_offset], &c__1);
+ i__1 = *n - 1;
+ q__1.r = -tau->r, q__1.i = -tau->i;
+ cgerc_(m, &i__1, &q__1, &work[1], &c__1, &v[1], incv, &c2[c2_offset],
+ ldc);
+ }
+
+ return 0;
+
+/* End of CLATZM */
+
+} /* clatzm_ */
diff --git a/contrib/libs/clapack/clauu2.c b/contrib/libs/clapack/clauu2.c
new file mode 100644
index 0000000000..34f67f673e
--- /dev/null
+++ b/contrib/libs/clapack/clauu2.c
@@ -0,0 +1,203 @@
+/* clauu2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int clauu2_(char *uplo, integer *n, complex *a, integer *lda,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1;
+ complex q__1;
+
+ /* Local variables */
+ integer i__;
+ real aii;
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+, complex *, integer *, complex *, integer *, complex *, complex *
+, integer *);
+ logical upper;
+ extern /* Subroutine */ int clacgv_(integer *, complex *, integer *),
+ csscal_(integer *, real *, complex *, integer *), xerbla_(char *,
+ integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAUU2 computes the product U * U' or L' * L, where the triangular */
+/* factor U or L is stored in the upper or lower triangular part of */
+/* the array A. */
+
+/* If UPLO = 'U' or 'u' then the upper triangle of the result is stored, */
+/* overwriting the factor U in A. */
+/* If UPLO = 'L' or 'l' then the lower triangle of the result is stored, */
+/* overwriting the factor L in A. */
+
+/* This is the unblocked form of the algorithm, calling Level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the triangular factor stored in the array A */
+/* is upper or lower triangular: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the triangular factor U or L. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the triangular factor U or L. */
+/* On exit, if UPLO = 'U', the upper triangle of A is */
+/* overwritten with the upper triangle of the product U * U'; */
+/* if UPLO = 'L', the lower triangle of A is overwritten with */
+/* the lower triangle of the product L' * L. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CLAUU2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute the product U * U'. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ aii = a[i__2].r;
+ if (i__ < *n) {
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = *n - i__;
+ cdotc_(&q__1, &i__3, &a[i__ + (i__ + 1) * a_dim1], lda, &a[
+ i__ + (i__ + 1) * a_dim1], lda);
+ r__1 = aii * aii + q__1.r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ i__2 = *n - i__;
+ clacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ q__1.r = aii, q__1.i = 0.f;
+ cgemv_("No transpose", &i__2, &i__3, &c_b1, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ q__1, &a[i__ * a_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ clacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ } else {
+ csscal_(&i__, &aii, &a[i__ * a_dim1 + 1], &c__1);
+ }
+/* L10: */
+ }
+
+ } else {
+
+/* Compute the product L' * L. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ aii = a[i__2].r;
+ if (i__ < *n) {
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = *n - i__;
+ cdotc_(&q__1, &i__3, &a[i__ + 1 + i__ * a_dim1], &c__1, &a[
+ i__ + 1 + i__ * a_dim1], &c__1);
+ r__1 = aii * aii + q__1.r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &a[i__ + a_dim1], lda);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ q__1.r = aii, q__1.i = 0.f;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b1, &a[i__ + 1
+ + a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ q__1, &a[i__ + a_dim1], lda);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &a[i__ + a_dim1], lda);
+ } else {
+ csscal_(&i__, &aii, &a[i__ + a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of CLAUU2 */
+
+} /* clauu2_ */
diff --git a/contrib/libs/clapack/clauum.c b/contrib/libs/clapack/clauum.c
new file mode 100644
index 0000000000..c6816a7127
--- /dev/null
+++ b/contrib/libs/clapack/clauum.c
@@ -0,0 +1,217 @@
+/* clauum.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static real c_b21 = 1.f;
+
+/* Subroutine */ int clauum_(char *uplo, integer *n, complex *a, integer *lda,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, ib, nb;
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *), cherk_(char *,
+ char *, integer *, integer *, real *, complex *, integer *, real *
+, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ctrmm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *);
+ logical upper;
+ extern /* Subroutine */ int clauu2_(char *, integer *, complex *, integer
+ *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CLAUUM computes the product U * U' or L' * L, where the triangular */
+/* factor U or L is stored in the upper or lower triangular part of */
+/* the array A. */
+
+/* If UPLO = 'U' or 'u' then the upper triangle of the result is stored, */
+/* overwriting the factor U in A. */
+/* If UPLO = 'L' or 'l' then the lower triangle of the result is stored, */
+/* overwriting the factor L in A. */
+
+/* This is the blocked form of the algorithm, calling Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the triangular factor stored in the array A */
+/* is upper or lower triangular: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the triangular factor U or L. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the triangular factor U or L. */
+/* On exit, if UPLO = 'U', the upper triangle of A is */
+/* overwritten with the upper triangle of the product U * U'; */
+/* if UPLO = 'L', the lower triangle of A is overwritten with */
+/* the lower triangle of the product L' * L. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CLAUUM", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "CLAUUM", uplo, n, &c_n1, &c_n1, &c_n1);
+
+ if (nb <= 1 || nb >= *n) {
+
+/* Use unblocked code */
+
+ clauu2_(uplo, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code */
+
+ if (upper) {
+
+/* Compute the product U * U'. */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - i__ + 1;
+ ib = min(i__3,i__4);
+ i__3 = i__ - 1;
+ ctrmm_("Right", "Upper", "Conjugate transpose", "Non-unit", &
+ i__3, &ib, &c_b1, &a[i__ + i__ * a_dim1], lda, &a[i__
+ * a_dim1 + 1], lda);
+ clauu2_("Upper", &ib, &a[i__ + i__ * a_dim1], lda, info);
+ if (i__ + ib <= *n) {
+ i__3 = i__ - 1;
+ i__4 = *n - i__ - ib + 1;
+ cgemm_("No transpose", "Conjugate transpose", &i__3, &ib,
+ &i__4, &c_b1, &a[(i__ + ib) * a_dim1 + 1], lda, &
+ a[i__ + (i__ + ib) * a_dim1], lda, &c_b1, &a[i__ *
+ a_dim1 + 1], lda);
+ i__3 = *n - i__ - ib + 1;
+ cherk_("Upper", "No transpose", &ib, &i__3, &c_b21, &a[
+ i__ + (i__ + ib) * a_dim1], lda, &c_b21, &a[i__ +
+ i__ * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute the product L' * L. */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - i__ + 1;
+ ib = min(i__3,i__4);
+ i__3 = i__ - 1;
+ ctrmm_("Left", "Lower", "Conjugate transpose", "Non-unit", &
+ ib, &i__3, &c_b1, &a[i__ + i__ * a_dim1], lda, &a[i__
+ + a_dim1], lda);
+ clauu2_("Lower", &ib, &a[i__ + i__ * a_dim1], lda, info);
+ if (i__ + ib <= *n) {
+ i__3 = i__ - 1;
+ i__4 = *n - i__ - ib + 1;
+ cgemm_("Conjugate transpose", "No transpose", &ib, &i__3,
+ &i__4, &c_b1, &a[i__ + ib + i__ * a_dim1], lda, &
+ a[i__ + ib + a_dim1], lda, &c_b1, &a[i__ + a_dim1]
+, lda);
+ i__3 = *n - i__ - ib + 1;
+ cherk_("Lower", "Conjugate transpose", &ib, &i__3, &c_b21,
+ &a[i__ + ib + i__ * a_dim1], lda, &c_b21, &a[i__
+ + i__ * a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CLAUUM */
+
+} /* clauum_ */
diff --git a/contrib/libs/clapack/cpbcon.c b/contrib/libs/clapack/cpbcon.c
new file mode 100644
index 0000000000..6c32104eb7
--- /dev/null
+++ b/contrib/libs/clapack/cpbcon.c
@@ -0,0 +1,238 @@
+/* cpbcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cpbcon_(char *uplo, integer *n, integer *kd, complex *ab,
+ integer *ldab, real *anorm, real *rcond, complex *work, real *rwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer ix, kase;
+ real scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical upper;
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *);
+ extern integer icamax_(integer *, complex *, integer *);
+ real scalel;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int clatbs_(char *, char *, char *, char *,
+ integer *, integer *, complex *, integer *, complex *, real *,
+ real *, integer *);
+ real scaleu;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real ainvnm;
+ extern /* Subroutine */ int csrscl_(integer *, real *, complex *, integer
+ *);
+ char normin[1];
+ real smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPBCON estimates the reciprocal of the condition number (in the */
+/* 1-norm) of a complex Hermitian positive definite band matrix using */
+/* the Cholesky factorization A = U**H*U or A = L*L**H computed by */
+/* CPBTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangular factor stored in AB; */
+/* = 'L': Lower triangular factor stored in AB. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of sub-diagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input) COMPLEX array, dimension (LDAB,N) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**H*U or A = L*L**H of the band matrix A, stored in the */
+/* first KD+1 rows of the array. The j-th column of U or L is */
+/* stored in the j-th column of the array AB as follows: */
+/* if UPLO ='U', AB(kd+1+i-j,j) = U(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO ='L', AB(1+i-j,j) = L(i,j) for j<=i<=min(n,j+kd). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* ANORM (input) REAL */
+/* The 1-norm (or infinity-norm) of the Hermitian band matrix A. */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*ldab < *kd + 1) {
+ *info = -5;
+ } else if (*anorm < 0.f) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPBCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.f;
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ } else if (*anorm == 0.f) {
+ return 0;
+ }
+
+ smlnum = slamch_("Safe minimum");
+
+/* Estimate the 1-norm of the inverse. */
+
+ kase = 0;
+ *(unsigned char *)normin = 'N';
+L10:
+ clacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (upper) {
+
+/* Multiply by inv(U'). */
+
+ clatbs_("Upper", "Conjugate transpose", "Non-unit", normin, n, kd,
+ &ab[ab_offset], ldab, &work[1], &scalel, &rwork[1], info);
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by inv(U). */
+
+ clatbs_("Upper", "No transpose", "Non-unit", normin, n, kd, &ab[
+ ab_offset], ldab, &work[1], &scaleu, &rwork[1], info);
+ } else {
+
+/* Multiply by inv(L). */
+
+ clatbs_("Lower", "No transpose", "Non-unit", normin, n, kd, &ab[
+ ab_offset], ldab, &work[1], &scalel, &rwork[1], info);
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by inv(L'). */
+
+ clatbs_("Lower", "Conjugate transpose", "Non-unit", normin, n, kd,
+ &ab[ab_offset], ldab, &work[1], &scaleu, &rwork[1], info);
+ }
+
+/* Multiply by 1/SCALE if doing so will not cause overflow. */
+
+ scale = scalel * scaleu;
+ if (scale != 1.f) {
+ ix = icamax_(n, &work[1], &c__1);
+ i__1 = ix;
+ if (scale < ((r__1 = work[i__1].r, dabs(r__1)) + (r__2 = r_imag(&
+ work[ix]), dabs(r__2))) * smlnum || scale == 0.f) {
+ goto L20;
+ }
+ csrscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / ainvnm / *anorm;
+ }
+
+L20:
+
+ return 0;
+
+/* End of CPBCON */
+
+} /* cpbcon_ */
diff --git a/contrib/libs/clapack/cpbequ.c b/contrib/libs/clapack/cpbequ.c
new file mode 100644
index 0000000000..d83df718d3
--- /dev/null
+++ b/contrib/libs/clapack/cpbequ.c
@@ -0,0 +1,204 @@
+/* cpbequ.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cpbequ_(char *uplo, integer *n, integer *kd, complex *ab,
+ integer *ldab, real *s, real *scond, real *amax, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ real smin;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPBEQU computes row and column scalings intended to equilibrate a */
+/* Hermitian positive definite band matrix A and reduce its condition */
+/* number (with respect to the two-norm). S contains the scale factors, */
+/* S(i) = 1/sqrt(A(i,i)), chosen so that the scaled matrix B with */
+/* elements B(i,j) = S(i)*A(i,j)*S(j) has ones on the diagonal. This */
+/* choice of S puts the condition number of B within a factor N of the */
+/* smallest possible condition number over all possible diagonal */
+/* scalings. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangular of A is stored; */
+/* = 'L': Lower triangular of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input) COMPLEX array, dimension (LDAB,N) */
+/* The upper or lower triangle of the Hermitian band matrix A, */
+/* stored in the first KD+1 rows of the array. The j-th column */
+/* of A is stored in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array A. LDAB >= KD+1. */
+
+/* S (output) REAL array, dimension (N) */
+/* If INFO = 0, S contains the scale factors for A. */
+
+/* SCOND (output) REAL */
+/* If INFO = 0, S contains the ratio of the smallest S(i) to */
+/* the largest S(i). If SCOND >= 0.1 and AMAX is neither too */
+/* large nor too small, it is not worth scaling by S. */
+
+/* AMAX (output) REAL */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, the i-th diagonal element is nonpositive. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --s;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*ldab < *kd + 1) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPBEQU", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *scond = 1.f;
+ *amax = 0.f;
+ return 0;
+ }
+
+ if (upper) {
+ j = *kd + 1;
+ } else {
+ j = 1;
+ }
+
+/* Initialize SMIN and AMAX. */
+
+ i__1 = j + ab_dim1;
+ s[1] = ab[i__1].r;
+ smin = s[1];
+ *amax = s[1];
+
+/* Find the minimum and maximum diagonal elements. */
+
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ i__2 = j + i__ * ab_dim1;
+ s[i__] = ab[i__2].r;
+/* Computing MIN */
+ r__1 = smin, r__2 = s[i__];
+ smin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = *amax, r__2 = s[i__];
+ *amax = dmax(r__1,r__2);
+/* L10: */
+ }
+
+ if (smin <= 0.f) {
+
+/* Find the first non-positive diagonal element and return. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (s[i__] <= 0.f) {
+ *info = i__;
+ return 0;
+ }
+/* L20: */
+ }
+ } else {
+
+/* Set the scale factors to the reciprocals */
+/* of the diagonal elements. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ s[i__] = 1.f / sqrt(s[i__]);
+/* L30: */
+ }
+
+/* Compute SCOND = min(S(I)) / max(S(I)) */
+
+ *scond = sqrt(smin) / sqrt(*amax);
+ }
+ return 0;
+
+/* End of CPBEQU */
+
+} /* cpbequ_ */
diff --git a/contrib/libs/clapack/cpbrfs.c b/contrib/libs/clapack/cpbrfs.c
new file mode 100644
index 0000000000..43625c6dac
--- /dev/null
+++ b/contrib/libs/clapack/cpbrfs.c
@@ -0,0 +1,482 @@
+/* cpbrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int cpbrfs_(char *uplo, integer *n, integer *kd, integer *
+ nrhs, complex *ab, integer *ldab, complex *afb, integer *ldafb,
+ complex *b, integer *ldb, complex *x, integer *ldx, real *ferr, real *
+ berr, complex *work, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, afb_dim1, afb_offset, b_dim1, b_offset,
+ x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer i__, j, k, l;
+ real s, xk;
+ integer nz;
+ real eps;
+ integer kase;
+ real safe1, safe2;
+ extern /* Subroutine */ int chbmv_(char *, integer *, integer *, complex *
+, complex *, integer *, complex *, integer *, complex *, complex *
+, integer *);
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), caxpy_(integer *, complex *, complex *,
+ integer *, complex *, integer *);
+ integer count;
+ logical upper;
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *), cpbtrs_(
+ char *, integer *, integer *, integer *, complex *, integer *,
+ complex *, integer *, integer *);
+ real lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPBRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is Hermitian positive definite */
+/* and banded, and provides error bounds and backward error estimates */
+/* for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AB (input) COMPLEX array, dimension (LDAB,N) */
+/* The upper or lower triangle of the Hermitian band matrix A, */
+/* stored in the first KD+1 rows of the array. The j-th column */
+/* of A is stored in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* AFB (input) COMPLEX array, dimension (LDAFB,N) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**H*U or A = L*L**H of the band matrix A as computed by */
+/* CPBTRF, in the same storage format as A (see AB). */
+
+/* LDAFB (input) INTEGER */
+/* The leading dimension of the array AFB. LDAFB >= KD+1. */
+
+/* B (input) COMPLEX array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) COMPLEX array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by CPBTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ afb_dim1 = *ldafb;
+ afb_offset = 1 + afb_dim1;
+ afb -= afb_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldab < *kd + 1) {
+ *info = -6;
+ } else if (*ldafb < *kd + 1) {
+ *info = -8;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldx < max(1,*n)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPBRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+/* Computing MIN */
+ i__1 = *n + 1, i__2 = (*kd << 1) + 2;
+ nz = min(i__1,i__2);
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.f;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X */
+
+ ccopy_(n, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+ q__1.r = -1.f, q__1.i = -0.f;
+ chbmv_(uplo, n, kd, &q__1, &ab[ab_offset], ldab, &x[j * x_dim1 + 1], &
+ c__1, &c_b1, &work[1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[i__] = (r__1 = b[i__3].r, dabs(r__1)) + (r__2 = r_imag(&b[
+ i__ + j * b_dim1]), dabs(r__2));
+/* L30: */
+ }
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ i__3 = k + j * x_dim1;
+ xk = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[k + j
+ * x_dim1]), dabs(r__2));
+ l = *kd + 1 - k;
+/* Computing MAX */
+ i__3 = 1, i__4 = k - *kd;
+ i__5 = k - 1;
+ for (i__ = max(i__3,i__4); i__ <= i__5; ++i__) {
+ i__3 = l + i__ + k * ab_dim1;
+ rwork[i__] += ((r__1 = ab[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&ab[l + i__ + k * ab_dim1]), dabs(r__2))) *
+ xk;
+ i__3 = l + i__ + k * ab_dim1;
+ i__4 = i__ + j * x_dim1;
+ s += ((r__1 = ab[i__3].r, dabs(r__1)) + (r__2 = r_imag(&
+ ab[l + i__ + k * ab_dim1]), dabs(r__2))) * ((r__3
+ = x[i__4].r, dabs(r__3)) + (r__4 = r_imag(&x[i__
+ + j * x_dim1]), dabs(r__4)));
+/* L40: */
+ }
+ i__5 = *kd + 1 + k * ab_dim1;
+ rwork[k] = rwork[k] + (r__1 = ab[i__5].r, dabs(r__1)) * xk +
+ s;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ i__5 = k + j * x_dim1;
+ xk = (r__1 = x[i__5].r, dabs(r__1)) + (r__2 = r_imag(&x[k + j
+ * x_dim1]), dabs(r__2));
+ i__5 = k * ab_dim1 + 1;
+ rwork[k] += (r__1 = ab[i__5].r, dabs(r__1)) * xk;
+ l = 1 - k;
+/* Computing MIN */
+ i__3 = *n, i__4 = k + *kd;
+ i__5 = min(i__3,i__4);
+ for (i__ = k + 1; i__ <= i__5; ++i__) {
+ i__3 = l + i__ + k * ab_dim1;
+ rwork[i__] += ((r__1 = ab[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&ab[l + i__ + k * ab_dim1]), dabs(r__2))) *
+ xk;
+ i__3 = l + i__ + k * ab_dim1;
+ i__4 = i__ + j * x_dim1;
+ s += ((r__1 = ab[i__3].r, dabs(r__1)) + (r__2 = r_imag(&
+ ab[l + i__ + k * ab_dim1]), dabs(r__2))) * ((r__3
+ = x[i__4].r, dabs(r__3)) + (r__4 = r_imag(&x[i__
+ + j * x_dim1]), dabs(r__4)));
+/* L60: */
+ }
+ rwork[k] += s;
+/* L70: */
+ }
+ }
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__5 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__5].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2))) / rwork[i__];
+ s = dmax(r__3,r__4);
+ } else {
+/* Computing MAX */
+ i__5 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__5].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = dmax(r__3,r__4);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2.f <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ cpbtrs_(uplo, n, kd, &c__1, &afb[afb_offset], ldafb, &work[1], n,
+ info);
+ caxpy_(n, &c_b1, &work[1], &c__1, &x[j * x_dim1 + 1], &c__1);
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use CLACN2 to estimate the infinity-norm of the matrix */
+/* inv(A) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__5 = i__;
+ rwork[i__] = (r__1 = work[i__5].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__];
+ } else {
+ i__5 = i__;
+ rwork[i__] = (r__1 = work[i__5].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__] + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ clacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(A'). */
+
+ cpbtrs_(uplo, n, kd, &c__1, &afb[afb_offset], ldafb, &work[1],
+ n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__5 = i__;
+ i__3 = i__;
+ i__4 = i__;
+ q__1.r = rwork[i__3] * work[i__4].r, q__1.i = rwork[i__3]
+ * work[i__4].i;
+ work[i__5].r = q__1.r, work[i__5].i = q__1.i;
+/* L110: */
+ }
+ } else if (kase == 2) {
+
+/* Multiply by inv(A)*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__5 = i__;
+ i__3 = i__;
+ i__4 = i__;
+ q__1.r = rwork[i__3] * work[i__4].r, q__1.i = rwork[i__3]
+ * work[i__4].i;
+ work[i__5].r = q__1.r, work[i__5].i = q__1.i;
+/* L120: */
+ }
+ cpbtrs_(uplo, n, kd, &c__1, &afb[afb_offset], ldafb, &work[1],
+ n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__5 = i__ + j * x_dim1;
+ r__3 = lstres, r__4 = (r__1 = x[i__5].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[i__ + j * x_dim1]), dabs(r__2));
+ lstres = dmax(r__3,r__4);
+/* L130: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of CPBRFS */
+
+} /* cpbrfs_ */
diff --git a/contrib/libs/clapack/cpbstf.c b/contrib/libs/clapack/cpbstf.c
new file mode 100644
index 0000000000..37703305b0
--- /dev/null
+++ b/contrib/libs/clapack/cpbstf.c
@@ -0,0 +1,334 @@
+/* cpbstf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b9 = -1.f;
+
+/* Subroutine */ int cpbstf_(char *uplo, integer *n, integer *kd, complex *ab,
+ integer *ldab, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer j, m, km;
+ real ajj;
+ integer kld;
+ extern /* Subroutine */ int cher_(char *, integer *, real *, complex *,
+ integer *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int clacgv_(integer *, complex *, integer *),
+ csscal_(integer *, real *, complex *, integer *), xerbla_(char *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPBSTF computes a split Cholesky factorization of a complex */
+/* Hermitian positive definite band matrix A. */
+
+/* This routine is designed to be used in conjunction with CHBGST. */
+
+/* The factorization has the form A = S**H*S where S is a band matrix */
+/* of the same bandwidth as A and the following structure: */
+
+/* S = ( U ) */
+/* ( M L ) */
+
+/* where U is upper triangular of order m = (n+kd)/2, and L is lower */
+/* triangular of order n-m. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) COMPLEX array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first kd+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, if INFO = 0, the factor S from the split Cholesky */
+/* factorization A = S**H*S. See Further Details. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the factorization could not be completed, */
+/* because the updated element a(i,i) was negative; the */
+/* matrix A is not positive definite. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* N = 7, KD = 2: */
+
+/* S = ( s11 s12 s13 ) */
+/* ( s22 s23 s24 ) */
+/* ( s33 s34 ) */
+/* ( s44 ) */
+/* ( s53 s54 s55 ) */
+/* ( s64 s65 s66 ) */
+/* ( s75 s76 s77 ) */
+
+/* If UPLO = 'U', the array AB holds: */
+
+/* on entry: on exit: */
+
+/* * * a13 a24 a35 a46 a57 * * s13 s24 s53' s64' s75' */
+/* * a12 a23 a34 a45 a56 a67 * s12 s23 s34 s54' s65' s76' */
+/* a11 a22 a33 a44 a55 a66 a77 s11 s22 s33 s44 s55 s66 s77 */
+
+/* If UPLO = 'L', the array AB holds: */
+
+/* on entry: on exit: */
+
+/* a11 a22 a33 a44 a55 a66 a77 s11 s22 s33 s44 s55 s66 s77 */
+/* a21 a32 a43 a54 a65 a76 * s12' s23' s34' s54 s65 s76 * */
+/* a31 a42 a53 a64 a64 * * s13' s24' s53 s64 s75 * * */
+
+/* Array elements marked * are not used by the routine; s12' denotes */
+/* conjg(s12); the diagonal elements of S are real. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*ldab < *kd + 1) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPBSTF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Computing MAX */
+ i__1 = 1, i__2 = *ldab - 1;
+ kld = max(i__1,i__2);
+
+/* Set the splitting point m. */
+
+ m = (*n + *kd) / 2;
+
+ if (upper) {
+
+/* Factorize A(m+1:n,m+1:n) as L**H*L, and update A(1:m,1:m). */
+
+ i__1 = m + 1;
+ for (j = *n; j >= i__1; --j) {
+
+/* Compute s(j,j) and test for non-positive-definiteness. */
+
+ i__2 = *kd + 1 + j * ab_dim1;
+ ajj = ab[i__2].r;
+ if (ajj <= 0.f) {
+ i__2 = *kd + 1 + j * ab_dim1;
+ ab[i__2].r = ajj, ab[i__2].i = 0.f;
+ goto L50;
+ }
+ ajj = sqrt(ajj);
+ i__2 = *kd + 1 + j * ab_dim1;
+ ab[i__2].r = ajj, ab[i__2].i = 0.f;
+/* Computing MIN */
+ i__2 = j - 1;
+ km = min(i__2,*kd);
+
+/* Compute elements j-km:j-1 of the j-th column and update the */
+/* the leading submatrix within the band. */
+
+ r__1 = 1.f / ajj;
+ csscal_(&km, &r__1, &ab[*kd + 1 - km + j * ab_dim1], &c__1);
+ cher_("Upper", &km, &c_b9, &ab[*kd + 1 - km + j * ab_dim1], &c__1,
+ &ab[*kd + 1 + (j - km) * ab_dim1], &kld);
+/* L10: */
+ }
+
+/* Factorize the updated submatrix A(1:m,1:m) as U**H*U. */
+
+ i__1 = m;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute s(j,j) and test for non-positive-definiteness. */
+
+ i__2 = *kd + 1 + j * ab_dim1;
+ ajj = ab[i__2].r;
+ if (ajj <= 0.f) {
+ i__2 = *kd + 1 + j * ab_dim1;
+ ab[i__2].r = ajj, ab[i__2].i = 0.f;
+ goto L50;
+ }
+ ajj = sqrt(ajj);
+ i__2 = *kd + 1 + j * ab_dim1;
+ ab[i__2].r = ajj, ab[i__2].i = 0.f;
+/* Computing MIN */
+ i__2 = *kd, i__3 = m - j;
+ km = min(i__2,i__3);
+
+/* Compute elements j+1:j+km of the j-th row and update the */
+/* trailing submatrix within the band. */
+
+ if (km > 0) {
+ r__1 = 1.f / ajj;
+ csscal_(&km, &r__1, &ab[*kd + (j + 1) * ab_dim1], &kld);
+ clacgv_(&km, &ab[*kd + (j + 1) * ab_dim1], &kld);
+ cher_("Upper", &km, &c_b9, &ab[*kd + (j + 1) * ab_dim1], &kld,
+ &ab[*kd + 1 + (j + 1) * ab_dim1], &kld);
+ clacgv_(&km, &ab[*kd + (j + 1) * ab_dim1], &kld);
+ }
+/* L20: */
+ }
+ } else {
+
+/* Factorize A(m+1:n,m+1:n) as L**H*L, and update A(1:m,1:m). */
+
+ i__1 = m + 1;
+ for (j = *n; j >= i__1; --j) {
+
+/* Compute s(j,j) and test for non-positive-definiteness. */
+
+ i__2 = j * ab_dim1 + 1;
+ ajj = ab[i__2].r;
+ if (ajj <= 0.f) {
+ i__2 = j * ab_dim1 + 1;
+ ab[i__2].r = ajj, ab[i__2].i = 0.f;
+ goto L50;
+ }
+ ajj = sqrt(ajj);
+ i__2 = j * ab_dim1 + 1;
+ ab[i__2].r = ajj, ab[i__2].i = 0.f;
+/* Computing MIN */
+ i__2 = j - 1;
+ km = min(i__2,*kd);
+
+/* Compute elements j-km:j-1 of the j-th row and update the */
+/* trailing submatrix within the band. */
+
+ r__1 = 1.f / ajj;
+ csscal_(&km, &r__1, &ab[km + 1 + (j - km) * ab_dim1], &kld);
+ clacgv_(&km, &ab[km + 1 + (j - km) * ab_dim1], &kld);
+ cher_("Lower", &km, &c_b9, &ab[km + 1 + (j - km) * ab_dim1], &kld,
+ &ab[(j - km) * ab_dim1 + 1], &kld);
+ clacgv_(&km, &ab[km + 1 + (j - km) * ab_dim1], &kld);
+/* L30: */
+ }
+
+/* Factorize the updated submatrix A(1:m,1:m) as U**H*U. */
+
+ i__1 = m;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute s(j,j) and test for non-positive-definiteness. */
+
+ i__2 = j * ab_dim1 + 1;
+ ajj = ab[i__2].r;
+ if (ajj <= 0.f) {
+ i__2 = j * ab_dim1 + 1;
+ ab[i__2].r = ajj, ab[i__2].i = 0.f;
+ goto L50;
+ }
+ ajj = sqrt(ajj);
+ i__2 = j * ab_dim1 + 1;
+ ab[i__2].r = ajj, ab[i__2].i = 0.f;
+/* Computing MIN */
+ i__2 = *kd, i__3 = m - j;
+ km = min(i__2,i__3);
+
+/* Compute elements j+1:j+km of the j-th column and update the */
+/* trailing submatrix within the band. */
+
+ if (km > 0) {
+ r__1 = 1.f / ajj;
+ csscal_(&km, &r__1, &ab[j * ab_dim1 + 2], &c__1);
+ cher_("Lower", &km, &c_b9, &ab[j * ab_dim1 + 2], &c__1, &ab[(
+ j + 1) * ab_dim1 + 1], &kld);
+ }
+/* L40: */
+ }
+ }
+ return 0;
+
+L50:
+ *info = j;
+ return 0;
+
+/* End of CPBSTF */
+
+} /* cpbstf_ */
diff --git a/contrib/libs/clapack/cpbsv.c b/contrib/libs/clapack/cpbsv.c
new file mode 100644
index 0000000000..4b59d799c4
--- /dev/null
+++ b/contrib/libs/clapack/cpbsv.c
@@ -0,0 +1,182 @@
+/* cpbsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cpbsv_(char *uplo, integer *n, integer *kd, integer *
+ nrhs, complex *ab, integer *ldab, complex *b, integer *ldb, integer *
+ info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), cpbtrf_(
+ char *, integer *, integer *, complex *, integer *, integer *), cpbtrs_(char *, integer *, integer *, integer *, complex
+ *, integer *, complex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPBSV computes the solution to a complex system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N Hermitian positive definite band matrix and X */
+/* and B are N-by-NRHS matrices. */
+
+/* The Cholesky decomposition is used to factor A as */
+/* A = U**H * U, if UPLO = 'U', or */
+/* A = L * L**H, if UPLO = 'L', */
+/* where U is an upper triangular band matrix, and L is a lower */
+/* triangular band matrix, with the same number of superdiagonals or */
+/* subdiagonals as A. The factored form of A is then used to solve the */
+/* system of equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AB (input/output) COMPLEX array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(KD+1+i-j,j) = A(i,j) for max(1,j-KD)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(N,j+KD). */
+/* See below for further details. */
+
+/* On exit, if INFO = 0, the triangular factor U or L from the */
+/* Cholesky factorization A = U**H*U or A = L*L**H of the band */
+/* matrix A, in the same storage format as A. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i of A is not */
+/* positive definite, so the factorization could not be */
+/* completed, and the solution has not been computed. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* N = 6, KD = 2, and UPLO = 'U': */
+
+/* On entry: On exit: */
+
+/* * * a13 a24 a35 a46 * * u13 u24 u35 u46 */
+/* * a12 a23 a34 a45 a56 * u12 u23 u34 u45 u56 */
+/* a11 a22 a33 a44 a55 a66 u11 u22 u33 u44 u55 u66 */
+
+/* Similarly, if UPLO = 'L' the format of A is as follows: */
+
+/* On entry: On exit: */
+
+/* a11 a22 a33 a44 a55 a66 l11 l22 l33 l44 l55 l66 */
+/* a21 a32 a43 a54 a65 * l21 l32 l43 l54 l65 * */
+/* a31 a42 a53 a64 * * l31 l42 l53 l64 * * */
+
+/* Array elements marked * are not used by the routine. */
+
+/* ===================================================================== */
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldab < *kd + 1) {
+ *info = -6;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPBSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the Cholesky factorization A = U'*U or A = L*L'. */
+
+ cpbtrf_(uplo, n, kd, &ab[ab_offset], ldab, info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ cpbtrs_(uplo, n, kd, nrhs, &ab[ab_offset], ldab, &b[b_offset], ldb,
+ info);
+
+ }
+ return 0;
+
+/* End of CPBSV */
+
+} /* cpbsv_ */
diff --git a/contrib/libs/clapack/cpbsvx.c b/contrib/libs/clapack/cpbsvx.c
new file mode 100644
index 0000000000..f3ad88c03f
--- /dev/null
+++ b/contrib/libs/clapack/cpbsvx.c
@@ -0,0 +1,523 @@
+/* cpbsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cpbsvx_(char *fact, char *uplo, integer *n, integer *kd,
+ integer *nrhs, complex *ab, integer *ldab, complex *afb, integer *
+ ldafb, char *equed, real *s, complex *b, integer *ldb, complex *x,
+ integer *ldx, real *rcond, real *ferr, real *berr, complex *work,
+ real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, afb_dim1, afb_offset, b_dim1, b_offset,
+ x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j, j1, j2;
+ real amax, smin, smax;
+ extern logical lsame_(char *, char *);
+ real scond, anorm;
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *);
+ logical equil, rcequ, upper;
+ extern doublereal clanhb_(char *, char *, integer *, integer *, complex *,
+ integer *, real *);
+ extern /* Subroutine */ int claqhb_(char *, integer *, integer *, complex
+ *, integer *, real *, real *, real *, char *),
+ cpbcon_(char *, integer *, integer *, complex *, integer *, real *
+, real *, complex *, real *, integer *);
+ extern doublereal slamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), xerbla_(char *,
+ integer *), cpbequ_(char *, integer *, integer *, complex
+ *, integer *, real *, real *, real *, integer *), cpbrfs_(
+ char *, integer *, integer *, integer *, complex *, integer *,
+ complex *, integer *, complex *, integer *, complex *, integer *,
+ real *, real *, complex *, real *, integer *);
+ real bignum;
+ extern /* Subroutine */ int cpbtrf_(char *, integer *, integer *, complex
+ *, integer *, integer *);
+ integer infequ;
+ extern /* Subroutine */ int cpbtrs_(char *, integer *, integer *, integer
+ *, complex *, integer *, complex *, integer *, integer *);
+ real smlnum;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPBSVX uses the Cholesky factorization A = U**H*U or A = L*L**H to */
+/* compute the solution to a complex system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N Hermitian positive definite band matrix and X */
+/* and B are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'E', real scaling factors are computed to equilibrate */
+/* the system: */
+/* diag(S) * A * diag(S) * inv(diag(S)) * X = diag(S) * B */
+/* Whether or not the system will be equilibrated depends on the */
+/* scaling of the matrix A, but if equilibration is used, A is */
+/* overwritten by diag(S)*A*diag(S) and B by diag(S)*B. */
+
+/* 2. If FACT = 'N' or 'E', the Cholesky decomposition is used to */
+/* factor the matrix A (after equilibration if FACT = 'E') as */
+/* A = U**H * U, if UPLO = 'U', or */
+/* A = L * L**H, if UPLO = 'L', */
+/* where U is an upper triangular band matrix, and L is a lower */
+/* triangular band matrix. */
+
+/* 3. If the leading i-by-i principal minor is not positive definite, */
+/* then the routine returns with INFO = i. Otherwise, the factored */
+/* form of A is used to estimate the condition number of the matrix */
+/* A. If the reciprocal of the condition number is less than machine */
+/* precision, INFO = N+1 is returned as a warning, but the routine */
+/* still goes on to solve for X and compute error bounds as */
+/* described below. */
+
+/* 4. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 5. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* 6. If equilibration was used, the matrix X is premultiplied by */
+/* diag(S) so that it solves the original system before */
+/* equilibration. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of the matrix A is */
+/* supplied on entry, and if not, whether the matrix A should be */
+/* equilibrated before it is factored. */
+/* = 'F': On entry, AFB contains the factored form of A. */
+/* If EQUED = 'Y', the matrix A has been equilibrated */
+/* with scaling factors given by S. AB and AFB will not */
+/* be modified. */
+/* = 'N': The matrix A will be copied to AFB and factored. */
+/* = 'E': The matrix A will be equilibrated if necessary, then */
+/* copied to AFB and factored. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right-hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AB (input/output) COMPLEX array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first KD+1 rows of the array, except */
+/* if FACT = 'F' and EQUED = 'Y', then A must contain the */
+/* equilibrated matrix diag(S)*A*diag(S). The j-th column of A */
+/* is stored in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(KD+1+i-j,j) = A(i,j) for max(1,j-KD)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(N,j+KD). */
+/* See below for further details. */
+
+/* On exit, if FACT = 'E' and EQUED = 'Y', A is overwritten by */
+/* diag(S)*A*diag(S). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array A. LDAB >= KD+1. */
+
+/* AFB (input or output) COMPLEX array, dimension (LDAFB,N) */
+/* If FACT = 'F', then AFB is an input argument and on entry */
+/* contains the triangular factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H of the band matrix */
+/* A, in the same storage format as A (see AB). If EQUED = 'Y', */
+/* then AFB is the factored form of the equilibrated matrix A. */
+
+/* If FACT = 'N', then AFB is an output argument and on exit */
+/* returns the triangular factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H. */
+
+/* If FACT = 'E', then AFB is an output argument and on exit */
+/* returns the triangular factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H of the equilibrated */
+/* matrix A (see the description of A for the form of the */
+/* equilibrated matrix). */
+
+/* LDAFB (input) INTEGER */
+/* The leading dimension of the array AFB. LDAFB >= KD+1. */
+
+/* EQUED (input or output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration (always true if FACT = 'N'). */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+/* EQUED is an input argument if FACT = 'F'; otherwise, it is an */
+/* output argument. */
+
+/* S (input or output) REAL array, dimension (N) */
+/* The scale factors for A; not accessed if EQUED = 'N'. S is */
+/* an input argument if FACT = 'F'; otherwise, S is an output */
+/* argument. If FACT = 'F' and EQUED = 'Y', each element of S */
+/* must be positive. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if EQUED = 'N', B is not modified; if EQUED = 'Y', */
+/* B is overwritten by diag(S) * B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) COMPLEX array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X to */
+/* the original system of equations. Note that if EQUED = 'Y', */
+/* A and B are modified on exit, and the solution to the */
+/* equilibrated system is inv(diag(S))*X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) REAL */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A after equilibration (if done). If RCOND is less than the */
+/* machine precision (in particular, if RCOND = 0), the matrix */
+/* is singular to working precision. This condition is */
+/* indicated by a return code of INFO > 0. */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: the leading minor of order i of A is */
+/* not positive definite, so the factorization */
+/* could not be completed, and the solution has not */
+/* been computed. RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* N = 6, KD = 2, and UPLO = 'U': */
+
+/* Two-dimensional storage of the Hermitian matrix A: */
+
+/* a11 a12 a13 */
+/* a22 a23 a24 */
+/* a33 a34 a35 */
+/* a44 a45 a46 */
+/* a55 a56 */
+/* (aij=conjg(aji)) a66 */
+
+/* Band storage of the upper triangle of A: */
+
+/* * * a13 a24 a35 a46 */
+/* * a12 a23 a34 a45 a56 */
+/* a11 a22 a33 a44 a55 a66 */
+
+/* Similarly, if UPLO = 'L' the format of A is as follows: */
+
+/* a11 a22 a33 a44 a55 a66 */
+/* a21 a32 a43 a54 a65 * */
+/* a31 a42 a53 a64 * * */
+
+/* Array elements marked * are not used by the routine. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ afb_dim1 = *ldafb;
+ afb_offset = 1 + afb_dim1;
+ afb -= afb_offset;
+ --s;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ equil = lsame_(fact, "E");
+ upper = lsame_(uplo, "U");
+ if (nofact || equil) {
+ *(unsigned char *)equed = 'N';
+ rcequ = FALSE_;
+ } else {
+ rcequ = lsame_(equed, "Y");
+ smlnum = slamch_("Safe minimum");
+ bignum = 1.f / smlnum;
+ }
+
+/* Test the input parameters. */
+
+ if (! nofact && ! equil && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*kd < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*ldab < *kd + 1) {
+ *info = -7;
+ } else if (*ldafb < *kd + 1) {
+ *info = -9;
+ } else if (lsame_(fact, "F") && ! (rcequ || lsame_(
+ equed, "N"))) {
+ *info = -10;
+ } else {
+ if (rcequ) {
+ smin = bignum;
+ smax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ r__1 = smin, r__2 = s[j];
+ smin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = smax, r__2 = s[j];
+ smax = dmax(r__1,r__2);
+/* L10: */
+ }
+ if (smin <= 0.f) {
+ *info = -11;
+ } else if (*n > 0) {
+ scond = dmax(smin,smlnum) / dmin(smax,bignum);
+ } else {
+ scond = 1.f;
+ }
+ }
+ if (*info == 0) {
+ if (*ldb < max(1,*n)) {
+ *info = -13;
+ } else if (*ldx < max(1,*n)) {
+ *info = -15;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPBSVX", &i__1);
+ return 0;
+ }
+
+ if (equil) {
+
+/* Compute row and column scalings to equilibrate the matrix A. */
+
+ cpbequ_(uplo, n, kd, &ab[ab_offset], ldab, &s[1], &scond, &amax, &
+ infequ);
+ if (infequ == 0) {
+
+/* Equilibrate the matrix. */
+
+ claqhb_(uplo, n, kd, &ab[ab_offset], ldab, &s[1], &scond, &amax,
+ equed);
+ rcequ = lsame_(equed, "Y");
+ }
+ }
+
+/* Scale the right-hand side. */
+
+ if (rcequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * b_dim1;
+ q__1.r = s[i__4] * b[i__5].r, q__1.i = s[i__4] * b[i__5].i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+
+ if (nofact || equil) {
+
+/* Compute the Cholesky factorization A = U'*U or A = L*L'. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j - *kd;
+ j1 = max(i__2,1);
+ i__2 = j - j1 + 1;
+ ccopy_(&i__2, &ab[*kd + 1 - j + j1 + j * ab_dim1], &c__1, &
+ afb[*kd + 1 - j + j1 + j * afb_dim1], &c__1);
+/* L40: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = j + *kd;
+ j2 = min(i__2,*n);
+ i__2 = j2 - j + 1;
+ ccopy_(&i__2, &ab[j * ab_dim1 + 1], &c__1, &afb[j * afb_dim1
+ + 1], &c__1);
+/* L50: */
+ }
+ }
+
+ cpbtrf_(uplo, n, kd, &afb[afb_offset], ldafb, info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.f;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = clanhb_("1", uplo, n, kd, &ab[ab_offset], ldab, &rwork[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ cpbcon_(uplo, n, kd, &afb[afb_offset], ldafb, &anorm, rcond, &work[1], &
+ rwork[1], info);
+
+/* Compute the solution matrix X. */
+
+ clacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ cpbtrs_(uplo, n, kd, nrhs, &afb[afb_offset], ldafb, &x[x_offset], ldx,
+ info);
+
+/* Use iterative refinement to improve the computed solution and */
+/* compute error bounds and backward error estimates for it. */
+
+ cpbrfs_(uplo, n, kd, nrhs, &ab[ab_offset], ldab, &afb[afb_offset], ldafb,
+ &b[b_offset], ldb, &x[x_offset], ldx, &ferr[1], &berr[1], &work[1]
+, &rwork[1], info);
+
+/* Transform the solution matrix X to a solution of the original */
+/* system. */
+
+ if (rcequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * x_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * x_dim1;
+ q__1.r = s[i__4] * x[i__5].r, q__1.i = s[i__4] * x[i__5].i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+/* L60: */
+ }
+/* L70: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= scond;
+/* L80: */
+ }
+ }
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < slamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of CPBSVX */
+
+} /* cpbsvx_ */
diff --git a/contrib/libs/clapack/cpbtf2.c b/contrib/libs/clapack/cpbtf2.c
new file mode 100644
index 0000000000..eab52ff1aa
--- /dev/null
+++ b/contrib/libs/clapack/cpbtf2.c
@@ -0,0 +1,255 @@
+/* cpbtf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b8 = -1.f;
+static integer c__1 = 1;
+
+/* Subroutine */ int cpbtf2_(char *uplo, integer *n, integer *kd, complex *ab,
+ integer *ldab, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer j, kn;
+ real ajj;
+ integer kld;
+ extern /* Subroutine */ int cher_(char *, integer *, real *, complex *,
+ integer *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int clacgv_(integer *, complex *, integer *),
+ csscal_(integer *, real *, complex *, integer *), xerbla_(char *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPBTF2 computes the Cholesky factorization of a complex Hermitian */
+/* positive definite band matrix A. */
+
+/* The factorization has the form */
+/* A = U' * U , if UPLO = 'U', or */
+/* A = L * L', if UPLO = 'L', */
+/* where U is an upper triangular matrix, U' is the conjugate transpose */
+/* of U, and L is lower triangular. */
+
+/* This is the unblocked version of the algorithm, calling Level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* Hermitian matrix A is stored: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of super-diagonals of the matrix A if UPLO = 'U', */
+/* or the number of sub-diagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) COMPLEX array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, if INFO = 0, the triangular factor U or L from the */
+/* Cholesky factorization A = U'*U or A = L*L' of the band */
+/* matrix A, in the same storage format as A. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, the leading minor of order k is not */
+/* positive definite, and the factorization could not be */
+/* completed. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* N = 6, KD = 2, and UPLO = 'U': */
+
+/* On entry: On exit: */
+
+/* * * a13 a24 a35 a46 * * u13 u24 u35 u46 */
+/* * a12 a23 a34 a45 a56 * u12 u23 u34 u45 u56 */
+/* a11 a22 a33 a44 a55 a66 u11 u22 u33 u44 u55 u66 */
+
+/* Similarly, if UPLO = 'L' the format of A is as follows: */
+
+/* On entry: On exit: */
+
+/* a11 a22 a33 a44 a55 a66 l11 l22 l33 l44 l55 l66 */
+/* a21 a32 a43 a54 a65 * l21 l32 l43 l54 l65 * */
+/* a31 a42 a53 a64 * * l31 l42 l53 l64 * * */
+
+/* Array elements marked * are not used by the routine. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*ldab < *kd + 1) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPBTF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Computing MAX */
+ i__1 = 1, i__2 = *ldab - 1;
+ kld = max(i__1,i__2);
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute U(J,J) and test for non-positive-definiteness. */
+
+ i__2 = *kd + 1 + j * ab_dim1;
+ ajj = ab[i__2].r;
+ if (ajj <= 0.f) {
+ i__2 = *kd + 1 + j * ab_dim1;
+ ab[i__2].r = ajj, ab[i__2].i = 0.f;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ i__2 = *kd + 1 + j * ab_dim1;
+ ab[i__2].r = ajj, ab[i__2].i = 0.f;
+
+/* Compute elements J+1:J+KN of row J and update the */
+/* trailing submatrix within the band. */
+
+/* Computing MIN */
+ i__2 = *kd, i__3 = *n - j;
+ kn = min(i__2,i__3);
+ if (kn > 0) {
+ r__1 = 1.f / ajj;
+ csscal_(&kn, &r__1, &ab[*kd + (j + 1) * ab_dim1], &kld);
+ clacgv_(&kn, &ab[*kd + (j + 1) * ab_dim1], &kld);
+ cher_("Upper", &kn, &c_b8, &ab[*kd + (j + 1) * ab_dim1], &kld,
+ &ab[*kd + 1 + (j + 1) * ab_dim1], &kld);
+ clacgv_(&kn, &ab[*kd + (j + 1) * ab_dim1], &kld);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute L(J,J) and test for non-positive-definiteness. */
+
+ i__2 = j * ab_dim1 + 1;
+ ajj = ab[i__2].r;
+ if (ajj <= 0.f) {
+ i__2 = j * ab_dim1 + 1;
+ ab[i__2].r = ajj, ab[i__2].i = 0.f;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ i__2 = j * ab_dim1 + 1;
+ ab[i__2].r = ajj, ab[i__2].i = 0.f;
+
+/* Compute elements J+1:J+KN of column J and update the */
+/* trailing submatrix within the band. */
+
+/* Computing MIN */
+ i__2 = *kd, i__3 = *n - j;
+ kn = min(i__2,i__3);
+ if (kn > 0) {
+ r__1 = 1.f / ajj;
+ csscal_(&kn, &r__1, &ab[j * ab_dim1 + 2], &c__1);
+ cher_("Lower", &kn, &c_b8, &ab[j * ab_dim1 + 2], &c__1, &ab[(
+ j + 1) * ab_dim1 + 1], &kld);
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+L30:
+ *info = j;
+ return 0;
+
+/* End of CPBTF2 */
+
+} /* cpbtf2_ */
diff --git a/contrib/libs/clapack/cpbtrf.c b/contrib/libs/clapack/cpbtrf.c
new file mode 100644
index 0000000000..7d08412e03
--- /dev/null
+++ b/contrib/libs/clapack/cpbtrf.c
@@ -0,0 +1,489 @@
+/* cpbtrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static real c_b21 = -1.f;
+static real c_b22 = 1.f;
+static integer c__33 = 33;
+
+/* Subroutine */ int cpbtrf_(char *uplo, integer *n, integer *kd, complex *ab,
+ integer *ldab, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j, i2, i3, ib, nb, ii, jj;
+ complex work[1056] /* was [33][32] */;
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *), cherk_(char *,
+ char *, integer *, integer *, real *, complex *, integer *, real *
+, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ctrsm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *), cpbtf2_(char *,
+ integer *, integer *, complex *, integer *, integer *),
+ cpotf2_(char *, integer *, complex *, integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPBTRF computes the Cholesky factorization of a complex Hermitian */
+/* positive definite band matrix A. */
+
+/* The factorization has the form */
+/* A = U**H * U, if UPLO = 'U', or */
+/* A = L * L**H, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) COMPLEX array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, if INFO = 0, the triangular factor U or L from the */
+/* Cholesky factorization A = U**H*U or A = L*L**H of the band */
+/* matrix A, in the same storage format as A. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i is not */
+/* positive definite, and the factorization could not be */
+/* completed. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* N = 6, KD = 2, and UPLO = 'U': */
+
+/* On entry: On exit: */
+
+/* * * a13 a24 a35 a46 * * u13 u24 u35 u46 */
+/* * a12 a23 a34 a45 a56 * u12 u23 u34 u45 u56 */
+/* a11 a22 a33 a44 a55 a66 u11 u22 u33 u44 u55 u66 */
+
+/* Similarly, if UPLO = 'L' the format of A is as follows: */
+
+/* On entry: On exit: */
+
+/* a11 a22 a33 a44 a55 a66 l11 l22 l33 l44 l55 l66 */
+/* a21 a32 a43 a54 a65 * l21 l32 l43 l54 l65 * */
+/* a31 a42 a53 a64 * * l31 l42 l53 l64 * * */
+
+/* Array elements marked * are not used by the routine. */
+
+/* Contributed by */
+/* Peter Mayes and Giuseppe Radicati, IBM ECSEC, Rome, March 23, 1989 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*ldab < *kd + 1) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPBTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment */
+
+ nb = ilaenv_(&c__1, "CPBTRF", uplo, n, kd, &c_n1, &c_n1);
+
+/* The block size must not exceed the semi-bandwidth KD, and must not */
+/* exceed the limit set by the size of the local array WORK. */
+
+ nb = min(nb,32);
+
+ if (nb <= 1 || nb > *kd) {
+
+/* Use unblocked code */
+
+ cpbtf2_(uplo, n, kd, &ab[ab_offset], ldab, info);
+ } else {
+
+/* Use blocked code */
+
+ if (lsame_(uplo, "U")) {
+
+/* Compute the Cholesky factorization of a Hermitian band */
+/* matrix, given the upper triangle of the matrix in band */
+/* storage. */
+
+/* Zero the upper triangle of the work array. */
+
+ i__1 = nb;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * 33 - 34;
+ work[i__3].r = 0.f, work[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* Process the band matrix one diagonal block at a time. */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - i__ + 1;
+ ib = min(i__3,i__4);
+
+/* Factorize the diagonal block */
+
+ i__3 = *ldab - 1;
+ cpotf2_(uplo, &ib, &ab[*kd + 1 + i__ * ab_dim1], &i__3, &ii);
+ if (ii != 0) {
+ *info = i__ + ii - 1;
+ goto L150;
+ }
+ if (i__ + ib <= *n) {
+
+/* Update the relevant part of the trailing submatrix. */
+/* If A11 denotes the diagonal block which has just been */
+/* factorized, then we need to update the remaining */
+/* blocks in the diagram: */
+
+/* A11 A12 A13 */
+/* A22 A23 */
+/* A33 */
+
+/* The numbers of rows and columns in the partitioning */
+/* are IB, I2, I3 respectively. The blocks A12, A22 and */
+/* A23 are empty if IB = KD. The upper triangle of A13 */
+/* lies outside the band. */
+
+/* Computing MIN */
+ i__3 = *kd - ib, i__4 = *n - i__ - ib + 1;
+ i2 = min(i__3,i__4);
+/* Computing MIN */
+ i__3 = ib, i__4 = *n - i__ - *kd + 1;
+ i3 = min(i__3,i__4);
+
+ if (i2 > 0) {
+
+/* Update A12 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ ctrsm_("Left", "Upper", "Conjugate transpose", "Non-"
+ "unit", &ib, &i2, &c_b1, &ab[*kd + 1 + i__ *
+ ab_dim1], &i__3, &ab[*kd + 1 - ib + (i__ + ib)
+ * ab_dim1], &i__4);
+
+/* Update A22 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ cherk_("Upper", "Conjugate transpose", &i2, &ib, &
+ c_b21, &ab[*kd + 1 - ib + (i__ + ib) *
+ ab_dim1], &i__3, &c_b22, &ab[*kd + 1 + (i__ +
+ ib) * ab_dim1], &i__4);
+ }
+
+ if (i3 > 0) {
+
+/* Copy the lower triangle of A13 into the work array. */
+
+ i__3 = i3;
+ for (jj = 1; jj <= i__3; ++jj) {
+ i__4 = ib;
+ for (ii = jj; ii <= i__4; ++ii) {
+ i__5 = ii + jj * 33 - 34;
+ i__6 = ii - jj + 1 + (jj + i__ + *kd - 1) *
+ ab_dim1;
+ work[i__5].r = ab[i__6].r, work[i__5].i = ab[
+ i__6].i;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Update A13 (in the work array). */
+
+ i__3 = *ldab - 1;
+ ctrsm_("Left", "Upper", "Conjugate transpose", "Non-"
+ "unit", &ib, &i3, &c_b1, &ab[*kd + 1 + i__ *
+ ab_dim1], &i__3, work, &c__33);
+
+/* Update A23 */
+
+ if (i2 > 0) {
+ q__1.r = -1.f, q__1.i = -0.f;
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ cgemm_("Conjugate transpose", "No transpose", &i2,
+ &i3, &ib, &q__1, &ab[*kd + 1 - ib + (i__
+ + ib) * ab_dim1], &i__3, work, &c__33, &
+ c_b1, &ab[ib + 1 + (i__ + *kd) * ab_dim1],
+ &i__4);
+ }
+
+/* Update A33 */
+
+ i__3 = *ldab - 1;
+ cherk_("Upper", "Conjugate transpose", &i3, &ib, &
+ c_b21, work, &c__33, &c_b22, &ab[*kd + 1 + (
+ i__ + *kd) * ab_dim1], &i__3);
+
+/* Copy the lower triangle of A13 back into place. */
+
+ i__3 = i3;
+ for (jj = 1; jj <= i__3; ++jj) {
+ i__4 = ib;
+ for (ii = jj; ii <= i__4; ++ii) {
+ i__5 = ii - jj + 1 + (jj + i__ + *kd - 1) *
+ ab_dim1;
+ i__6 = ii + jj * 33 - 34;
+ ab[i__5].r = work[i__6].r, ab[i__5].i = work[
+ i__6].i;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+ }
+/* L70: */
+ }
+ } else {
+
+/* Compute the Cholesky factorization of a Hermitian band */
+/* matrix, given the lower triangle of the matrix in band */
+/* storage. */
+
+/* Zero the lower triangle of the work array. */
+
+ i__2 = nb;
+ for (j = 1; j <= i__2; ++j) {
+ i__1 = nb;
+ for (i__ = j + 1; i__ <= i__1; ++i__) {
+ i__3 = i__ + j * 33 - 34;
+ work[i__3].r = 0.f, work[i__3].i = 0.f;
+/* L80: */
+ }
+/* L90: */
+ }
+
+/* Process the band matrix one diagonal block at a time. */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - i__ + 1;
+ ib = min(i__3,i__4);
+
+/* Factorize the diagonal block */
+
+ i__3 = *ldab - 1;
+ cpotf2_(uplo, &ib, &ab[i__ * ab_dim1 + 1], &i__3, &ii);
+ if (ii != 0) {
+ *info = i__ + ii - 1;
+ goto L150;
+ }
+ if (i__ + ib <= *n) {
+
+/* Update the relevant part of the trailing submatrix. */
+/* If A11 denotes the diagonal block which has just been */
+/* factorized, then we need to update the remaining */
+/* blocks in the diagram: */
+
+/* A11 */
+/* A21 A22 */
+/* A31 A32 A33 */
+
+/* The numbers of rows and columns in the partitioning */
+/* are IB, I2, I3 respectively. The blocks A21, A22 and */
+/* A32 are empty if IB = KD. The lower triangle of A31 */
+/* lies outside the band. */
+
+/* Computing MIN */
+ i__3 = *kd - ib, i__4 = *n - i__ - ib + 1;
+ i2 = min(i__3,i__4);
+/* Computing MIN */
+ i__3 = ib, i__4 = *n - i__ - *kd + 1;
+ i3 = min(i__3,i__4);
+
+ if (i2 > 0) {
+
+/* Update A21 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ ctrsm_("Right", "Lower", "Conjugate transpose", "Non"
+ "-unit", &i2, &ib, &c_b1, &ab[i__ * ab_dim1 +
+ 1], &i__3, &ab[ib + 1 + i__ * ab_dim1], &i__4);
+
+/* Update A22 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ cherk_("Lower", "No transpose", &i2, &ib, &c_b21, &ab[
+ ib + 1 + i__ * ab_dim1], &i__3, &c_b22, &ab[(
+ i__ + ib) * ab_dim1 + 1], &i__4);
+ }
+
+ if (i3 > 0) {
+
+/* Copy the upper triangle of A31 into the work array. */
+
+ i__3 = ib;
+ for (jj = 1; jj <= i__3; ++jj) {
+ i__4 = min(jj,i3);
+ for (ii = 1; ii <= i__4; ++ii) {
+ i__5 = ii + jj * 33 - 34;
+ i__6 = *kd + 1 - jj + ii + (jj + i__ - 1) *
+ ab_dim1;
+ work[i__5].r = ab[i__6].r, work[i__5].i = ab[
+ i__6].i;
+/* L100: */
+ }
+/* L110: */
+ }
+
+/* Update A31 (in the work array). */
+
+ i__3 = *ldab - 1;
+ ctrsm_("Right", "Lower", "Conjugate transpose", "Non"
+ "-unit", &i3, &ib, &c_b1, &ab[i__ * ab_dim1 +
+ 1], &i__3, work, &c__33);
+
+/* Update A32 */
+
+ if (i2 > 0) {
+ q__1.r = -1.f, q__1.i = -0.f;
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ cgemm_("No transpose", "Conjugate transpose", &i3,
+ &i2, &ib, &q__1, work, &c__33, &ab[ib +
+ 1 + i__ * ab_dim1], &i__3, &c_b1, &ab[*kd
+ + 1 - ib + (i__ + ib) * ab_dim1], &i__4);
+ }
+
+/* Update A33 */
+
+ i__3 = *ldab - 1;
+ cherk_("Lower", "No transpose", &i3, &ib, &c_b21,
+ work, &c__33, &c_b22, &ab[(i__ + *kd) *
+ ab_dim1 + 1], &i__3);
+
+/* Copy the upper triangle of A31 back into place. */
+
+ i__3 = ib;
+ for (jj = 1; jj <= i__3; ++jj) {
+ i__4 = min(jj,i3);
+ for (ii = 1; ii <= i__4; ++ii) {
+ i__5 = *kd + 1 - jj + ii + (jj + i__ - 1) *
+ ab_dim1;
+ i__6 = ii + jj * 33 - 34;
+ ab[i__5].r = work[i__6].r, ab[i__5].i = work[
+ i__6].i;
+/* L120: */
+ }
+/* L130: */
+ }
+ }
+ }
+/* L140: */
+ }
+ }
+ }
+ return 0;
+
+L150:
+ return 0;
+
+/* End of CPBTRF */
+
+} /* cpbtrf_ */
diff --git a/contrib/libs/clapack/cpbtrs.c b/contrib/libs/clapack/cpbtrs.c
new file mode 100644
index 0000000000..d325213c90
--- /dev/null
+++ b/contrib/libs/clapack/cpbtrs.c
@@ -0,0 +1,184 @@
+/* cpbtrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cpbtrs_(char *uplo, integer *n, integer *kd, integer *
+ nrhs, complex *ab, integer *ldab, complex *b, integer *ldb, integer *
+ info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ integer j;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ctbsv_(char *, char *, char *, integer *,
+ integer *, complex *, integer *, complex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPBTRS solves a system of linear equations A*X = B with a Hermitian */
+/* positive definite band matrix A using the Cholesky factorization */
+/* A = U**H*U or A = L*L**H computed by CPBTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangular factor stored in AB; */
+/* = 'L': Lower triangular factor stored in AB. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AB (input) COMPLEX array, dimension (LDAB,N) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**H*U or A = L*L**H of the band matrix A, stored in the */
+/* first KD+1 rows of the array. The j-th column of U or L is */
+/* stored in the j-th column of the array AB as follows: */
+/* if UPLO ='U', AB(kd+1+i-j,j) = U(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO ='L', AB(1+i-j,j) = L(i,j) for j<=i<=min(n,j+kd). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldab < *kd + 1) {
+ *info = -6;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPBTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Solve A*X = B where A = U'*U. */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Solve U'*X = B, overwriting B with X. */
+
+ ctbsv_("Upper", "Conjugate transpose", "Non-unit", n, kd, &ab[
+ ab_offset], ldab, &b[j * b_dim1 + 1], &c__1);
+
+/* Solve U*X = B, overwriting B with X. */
+
+ ctbsv_("Upper", "No transpose", "Non-unit", n, kd, &ab[ab_offset],
+ ldab, &b[j * b_dim1 + 1], &c__1);
+/* L10: */
+ }
+ } else {
+
+/* Solve A*X = B where A = L*L'. */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Solve L*X = B, overwriting B with X. */
+
+ ctbsv_("Lower", "No transpose", "Non-unit", n, kd, &ab[ab_offset],
+ ldab, &b[j * b_dim1 + 1], &c__1);
+
+/* Solve L'*X = B, overwriting B with X. */
+
+ ctbsv_("Lower", "Conjugate transpose", "Non-unit", n, kd, &ab[
+ ab_offset], ldab, &b[j * b_dim1 + 1], &c__1);
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of CPBTRS */
+
+} /* cpbtrs_ */
diff --git a/contrib/libs/clapack/cpftrf.c b/contrib/libs/clapack/cpftrf.c
new file mode 100644
index 0000000000..85bdab73e6
--- /dev/null
+++ b/contrib/libs/clapack/cpftrf.c
@@ -0,0 +1,475 @@
+/* cpftrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static real c_b15 = -1.f;
+static real c_b16 = 1.f;
+
+/* Subroutine */ int cpftrf_(char *transr, char *uplo, integer *n, complex *a,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer k, n1, n2;
+ logical normaltransr;
+ extern /* Subroutine */ int cherk_(char *, char *, integer *, integer *,
+ real *, complex *, integer *, real *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int ctrsm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *), xerbla_(char *,
+ integer *);
+ logical nisodd;
+ extern /* Subroutine */ int cpotrf_(char *, integer *, complex *, integer
+ *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPFTRF computes the Cholesky factorization of a complex Hermitian */
+/* positive definite matrix A. */
+
+/* The factorization has the form */
+/* A = U**H * U, if UPLO = 'U', or */
+/* A = L * L**H, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular. */
+
+/* This is the block version of the algorithm, calling Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': The Normal TRANSR of RFP A is stored; */
+/* = 'C': The Conjugate-transpose TRANSR of RFP A is stored. */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': Upper triangle of RFP A is stored; */
+/* = 'L': Lower triangle of RFP A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension ( N*(N+1)/2 ); */
+/* On entry, the Hermitian matrix A in RFP format. RFP format is */
+/* described by TRANSR, UPLO, and N as follows: If TRANSR = 'N' */
+/* then RFP A is (0:N,0:k-1) when N is even; k=N/2. RFP A is */
+/* (0:N-1,0:k) when N is odd; k=N/2. IF TRANSR = 'C' then RFP is */
+/* the Conjugate-transpose of RFP A as defined when */
+/* TRANSR = 'N'. The contents of RFP A are defined by UPLO as */
+/* follows: If UPLO = 'U' the RFP A contains the nt elements of */
+/* upper packed A. If UPLO = 'L' the RFP A contains the elements */
+/* of lower packed A. The LDA of RFP A is (N+1)/2 when TRANSR = */
+/* 'C'. When TRANSR is 'N' the LDA is N+1 when N is even and N */
+/* is odd. See the Note below for more details. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization RFP A = U**H*U or RFP A = L*L**H. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i is not */
+/* positive definite, and the factorization could not be */
+/* completed. */
+
+/* Further Notes on RFP Format: */
+/* ============================ */
+
+
+/* We first consider Standard Packed Format when N is even. */
+/* We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* conjugate-transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* conjugate-transpose of the last three columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- -- */
+/* 03 04 05 33 43 53 */
+/* -- -- */
+/* 13 14 15 00 44 54 */
+/* -- */
+/* 23 24 25 10 11 55 */
+
+/* 33 34 35 20 21 22 */
+/* -- */
+/* 00 44 45 30 31 32 */
+/* -- -- */
+/* 01 11 55 40 41 42 */
+/* -- -- -- */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We next consider Standard Packed Format when N is odd. */
+/* We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* conjugate-transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* conjugate-transpose of the last two columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- */
+/* 02 03 04 00 33 43 */
+/* -- */
+/* 12 13 14 10 11 44 */
+
+/* 22 23 24 20 21 22 */
+/* -- */
+/* 00 33 34 30 31 32 */
+/* -- -- */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- */
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "C")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPFTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* If N is odd, set NISODD = .TRUE. */
+/* If N is even, set K = N/2 and NISODD = .FALSE. */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ } else {
+ nisodd = TRUE_;
+ }
+
+/* Set N1 and N2 depending on LOWER */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+/* start execution: there are eight cases */
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL and N is odd ( a(0:n-1,0:n1-1) ) */
+/* T1 -> a(0,0), T2 -> a(0,1), S -> a(n1,0) */
+/* T1 -> a(0), T2 -> a(n), S -> a(n1) */
+
+ cpotrf_("L", &n1, a, n, info);
+ if (*info > 0) {
+ return 0;
+ }
+ ctrsm_("R", "L", "C", "N", &n2, &n1, &c_b1, a, n, &a[n1], n);
+ cherk_("U", "N", &n2, &n1, &c_b15, &a[n1], n, &c_b16, &a[*n],
+ n);
+ cpotrf_("U", &n2, &a[*n], n, info);
+ if (*info > 0) {
+ *info += n1;
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL and N is odd ( a(0:n-1,0:n2-1) */
+/* T1 -> a(n1+1,0), T2 -> a(n1,0), S -> a(0,0) */
+/* T1 -> a(n2), T2 -> a(n1), S -> a(0) */
+
+ cpotrf_("L", &n1, &a[n2], n, info);
+ if (*info > 0) {
+ return 0;
+ }
+ ctrsm_("L", "L", "N", "N", &n1, &n2, &c_b1, &a[n2], n, a, n);
+ cherk_("U", "C", &n2, &n1, &c_b15, a, n, &c_b16, &a[n1], n);
+ cpotrf_("U", &n2, &a[n1], n, info);
+ if (*info > 0) {
+ *info += n1;
+ }
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is odd */
+/* T1 -> A(0,0) , T2 -> A(1,0) , S -> A(0,n1) */
+/* T1 -> a(0+0) , T2 -> a(1+0) , S -> a(0+n1*n1); lda=n1 */
+
+ cpotrf_("U", &n1, a, &n1, info);
+ if (*info > 0) {
+ return 0;
+ }
+ ctrsm_("L", "U", "C", "N", &n1, &n2, &c_b1, a, &n1, &a[n1 *
+ n1], &n1);
+ cherk_("L", "C", &n2, &n1, &c_b15, &a[n1 * n1], &n1, &c_b16, &
+ a[1], &n1);
+ cpotrf_("L", &n2, &a[1], &n1, info);
+ if (*info > 0) {
+ *info += n1;
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is odd */
+/* T1 -> A(0,n1+1), T2 -> A(0,n1), S -> A(0,0) */
+/* T1 -> a(n2*n2), T2 -> a(n1*n2), S -> a(0); lda = n2 */
+
+ cpotrf_("U", &n1, &a[n2 * n2], &n2, info);
+ if (*info > 0) {
+ return 0;
+ }
+ ctrsm_("R", "U", "N", "N", &n2, &n1, &c_b1, &a[n2 * n2], &n2,
+ a, &n2);
+ cherk_("L", "N", &n2, &n1, &c_b15, a, &n2, &c_b16, &a[n1 * n2]
+, &n2);
+ cpotrf_("L", &n2, &a[n1 * n2], &n2, info);
+ if (*info > 0) {
+ *info += n1;
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(1,0), T2 -> a(0,0), S -> a(k+1,0) */
+/* T1 -> a(1), T2 -> a(0), S -> a(k+1) */
+
+ i__1 = *n + 1;
+ cpotrf_("L", &k, &a[1], &i__1, info);
+ if (*info > 0) {
+ return 0;
+ }
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ ctrsm_("R", "L", "C", "N", &k, &k, &c_b1, &a[1], &i__1, &a[k
+ + 1], &i__2);
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ cherk_("U", "N", &k, &k, &c_b15, &a[k + 1], &i__1, &c_b16, a,
+ &i__2);
+ i__1 = *n + 1;
+ cpotrf_("U", &k, a, &i__1, info);
+ if (*info > 0) {
+ *info += k;
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(k+1,0) , T2 -> a(k,0), S -> a(0,0) */
+/* T1 -> a(k+1), T2 -> a(k), S -> a(0) */
+
+ i__1 = *n + 1;
+ cpotrf_("L", &k, &a[k + 1], &i__1, info);
+ if (*info > 0) {
+ return 0;
+ }
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ ctrsm_("L", "L", "N", "N", &k, &k, &c_b1, &a[k + 1], &i__1, a,
+ &i__2);
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ cherk_("U", "C", &k, &k, &c_b15, a, &i__1, &c_b16, &a[k], &
+ i__2);
+ i__1 = *n + 1;
+ cpotrf_("U", &k, &a[k], &i__1, info);
+ if (*info > 0) {
+ *info += k;
+ }
+
+ }
+
+ } else {
+
+/* N is even and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,1), T2 -> B(0,0), S -> B(0,k+1) */
+/* T1 -> a(0+k), T2 -> a(0+0), S -> a(0+k*(k+1)); lda=k */
+
+ cpotrf_("U", &k, &a[k], &k, info);
+ if (*info > 0) {
+ return 0;
+ }
+ ctrsm_("L", "U", "C", "N", &k, &k, &c_b1, &a[k], &n1, &a[k * (
+ k + 1)], &k);
+ cherk_("L", "C", &k, &k, &c_b15, &a[k * (k + 1)], &k, &c_b16,
+ a, &k);
+ cpotrf_("L", &k, a, &k, info);
+ if (*info > 0) {
+ *info += k;
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,k+1), T2 -> B(0,k), S -> B(0,0) */
+/* T1 -> a(0+k*(k+1)), T2 -> a(0+k*k), S -> a(0+0)); lda=k */
+
+ cpotrf_("U", &k, &a[k * (k + 1)], &k, info);
+ if (*info > 0) {
+ return 0;
+ }
+ ctrsm_("R", "U", "N", "N", &k, &k, &c_b1, &a[k * (k + 1)], &k,
+ a, &k);
+ cherk_("L", "N", &k, &k, &c_b15, a, &k, &c_b16, &a[k * k], &k);
+ cpotrf_("L", &k, &a[k * k], &k, info);
+ if (*info > 0) {
+ *info += k;
+ }
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of CPFTRF */
+
+} /* cpftrf_ */
diff --git a/contrib/libs/clapack/cpftri.c b/contrib/libs/clapack/cpftri.c
new file mode 100644
index 0000000000..da7e26d332
--- /dev/null
+++ b/contrib/libs/clapack/cpftri.c
@@ -0,0 +1,425 @@
+/* cpftri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static real c_b12 = 1.f;
+
+/* Subroutine */ int cpftri_(char *transr, char *uplo, integer *n, complex *a,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer k, n1, n2;
+ logical normaltransr;
+ extern /* Subroutine */ int cherk_(char *, char *, integer *, integer *,
+ real *, complex *, integer *, real *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ctrmm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nisodd;
+ extern /* Subroutine */ int clauum_(char *, integer *, complex *, integer
+ *, integer *), ctftri_(char *, char *, char *, integer *,
+ complex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPFTRI computes the inverse of a complex Hermitian positive definite */
+/* matrix A using the Cholesky factorization A = U**H*U or A = L*L**H */
+/* computed by CPFTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': The Normal TRANSR of RFP A is stored; */
+/* = 'C': The Conjugate-transpose TRANSR of RFP A is stored. */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension ( N*(N+1)/2 ); */
+/* On entry, the Hermitian matrix A in RFP format. RFP format is */
+/* described by TRANSR, UPLO, and N as follows: If TRANSR = 'N' */
+/* then RFP A is (0:N,0:k-1) when N is even; k=N/2. RFP A is */
+/* (0:N-1,0:k) when N is odd; k=N/2. IF TRANSR = 'C' then RFP is */
+/* the Conjugate-transpose of RFP A as defined when */
+/* TRANSR = 'N'. The contents of RFP A are defined by UPLO as */
+/* follows: If UPLO = 'U' the RFP A contains the nt elements of */
+/* upper packed A. If UPLO = 'L' the RFP A contains the elements */
+/* of lower packed A. The LDA of RFP A is (N+1)/2 when TRANSR = */
+/* 'C'. When TRANSR is 'N' the LDA is N+1 when N is even and N */
+/* is odd. See the Note below for more details. */
+
+/* On exit, the Hermitian inverse of the original matrix, in the */
+/* same storage format. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the (i,i) element of the factor U or L is */
+/* zero, and the inverse could not be computed. */
+
+/* Note: */
+/* ===== */
+
+/* We first consider Standard Packed Format when N is even. */
+/* We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* conjugate-transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* conjugate-transpose of the last three columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- -- */
+/* 03 04 05 33 43 53 */
+/* -- -- */
+/* 13 14 15 00 44 54 */
+/* -- */
+/* 23 24 25 10 11 55 */
+
+/* 33 34 35 20 21 22 */
+/* -- */
+/* 00 44 45 30 31 32 */
+/* -- -- */
+/* 01 11 55 40 41 42 */
+/* -- -- -- */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We next consider Standard Packed Format when N is odd. */
+/* We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* conjugate-transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* conjugate-transpose of the last two columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- */
+/* 02 03 04 00 33 43 */
+/* -- */
+/* 12 13 14 10 11 44 */
+
+/* 22 23 24 20 21 22 */
+/* -- */
+/* 00 33 34 30 31 32 */
+/* -- -- */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- */
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "C")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPFTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Invert the triangular Cholesky factor U or L. */
+
+ ctftri_(transr, uplo, "N", n, a, info);
+ if (*info > 0) {
+ return 0;
+ }
+
+/* If N is odd, set NISODD = .TRUE. */
+/* If N is even, set K = N/2 and NISODD = .FALSE. */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ } else {
+ nisodd = TRUE_;
+ }
+
+/* Set N1 and N2 depending on LOWER */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+/* Start execution of triangular matrix multiply: inv(U)*inv(U)^C or */
+/* inv(L)^C*inv(L). There are eight cases. */
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL and N is odd ( a(0:n-1,0:N1-1) ) */
+/* T1 -> a(0,0), T2 -> a(0,1), S -> a(N1,0) */
+/* T1 -> a(0), T2 -> a(n), S -> a(N1) */
+
+ clauum_("L", &n1, a, n, info);
+ cherk_("L", "C", &n1, &n2, &c_b12, &a[n1], n, &c_b12, a, n);
+ ctrmm_("L", "U", "N", "N", &n2, &n1, &c_b1, &a[*n], n, &a[n1],
+ n);
+ clauum_("U", &n2, &a[*n], n, info);
+
+ } else {
+
+/* SRPA for UPPER, NORMAL and N is odd ( a(0:n-1,0:N2-1) */
+/* T1 -> a(N1+1,0), T2 -> a(N1,0), S -> a(0,0) */
+/* T1 -> a(N2), T2 -> a(N1), S -> a(0) */
+
+ clauum_("L", &n1, &a[n2], n, info);
+ cherk_("L", "N", &n1, &n2, &c_b12, a, n, &c_b12, &a[n2], n);
+ ctrmm_("R", "U", "C", "N", &n1, &n2, &c_b1, &a[n1], n, a, n);
+ clauum_("U", &n2, &a[n1], n, info);
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE, and N is odd */
+/* T1 -> a(0), T2 -> a(1), S -> a(0+N1*N1) */
+
+ clauum_("U", &n1, a, &n1, info);
+ cherk_("U", "N", &n1, &n2, &c_b12, &a[n1 * n1], &n1, &c_b12,
+ a, &n1);
+ ctrmm_("R", "L", "N", "N", &n1, &n2, &c_b1, &a[1], &n1, &a[n1
+ * n1], &n1);
+ clauum_("L", &n2, &a[1], &n1, info);
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE, and N is odd */
+/* T1 -> a(0+N2*N2), T2 -> a(0+N1*N2), S -> a(0) */
+
+ clauum_("U", &n1, &a[n2 * n2], &n2, info);
+ cherk_("U", "C", &n1, &n2, &c_b12, a, &n2, &c_b12, &a[n2 * n2]
+, &n2);
+ ctrmm_("L", "L", "C", "N", &n2, &n1, &c_b1, &a[n1 * n2], &n2,
+ a, &n2);
+ clauum_("L", &n2, &a[n1 * n2], &n2, info);
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(1,0), T2 -> a(0,0), S -> a(k+1,0) */
+/* T1 -> a(1), T2 -> a(0), S -> a(k+1) */
+
+ i__1 = *n + 1;
+ clauum_("L", &k, &a[1], &i__1, info);
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ cherk_("L", "C", &k, &k, &c_b12, &a[k + 1], &i__1, &c_b12, &a[
+ 1], &i__2);
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ ctrmm_("L", "U", "N", "N", &k, &k, &c_b1, a, &i__1, &a[k + 1],
+ &i__2);
+ i__1 = *n + 1;
+ clauum_("U", &k, a, &i__1, info);
+
+ } else {
+
+/* SRPA for UPPER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(k+1,0) , T2 -> a(k,0), S -> a(0,0) */
+/* T1 -> a(k+1), T2 -> a(k), S -> a(0) */
+
+ i__1 = *n + 1;
+ clauum_("L", &k, &a[k + 1], &i__1, info);
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ cherk_("L", "N", &k, &k, &c_b12, a, &i__1, &c_b12, &a[k + 1],
+ &i__2);
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ ctrmm_("R", "U", "C", "N", &k, &k, &c_b1, &a[k], &i__1, a, &
+ i__2);
+ i__1 = *n + 1;
+ clauum_("U", &k, &a[k], &i__1, info);
+
+ }
+
+ } else {
+
+/* N is even and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE, and N is even (see paper) */
+/* T1 -> B(0,1), T2 -> B(0,0), S -> B(0,k+1), */
+/* T1 -> a(0+k), T2 -> a(0+0), S -> a(0+k*(k+1)); lda=k */
+
+ clauum_("U", &k, &a[k], &k, info);
+ cherk_("U", "N", &k, &k, &c_b12, &a[k * (k + 1)], &k, &c_b12,
+ &a[k], &k);
+ ctrmm_("R", "L", "N", "N", &k, &k, &c_b1, a, &k, &a[k * (k +
+ 1)], &k);
+ clauum_("L", &k, a, &k, info);
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE, and N is even (see paper) */
+/* T1 -> B(0,k+1), T2 -> B(0,k), S -> B(0,0), */
+/* T1 -> a(0+k*(k+1)), T2 -> a(0+k*k), S -> a(0+0)); lda=k */
+
+ clauum_("U", &k, &a[k * (k + 1)], &k, info);
+ cherk_("U", "C", &k, &k, &c_b12, a, &k, &c_b12, &a[k * (k + 1)
+ ], &k);
+ ctrmm_("L", "L", "C", "N", &k, &k, &c_b1, &a[k * k], &k, a, &
+ k);
+ clauum_("L", &k, &a[k * k], &k, info);
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of CPFTRI */
+
+} /* cpftri_ */
diff --git a/contrib/libs/clapack/cpftrs.c b/contrib/libs/clapack/cpftrs.c
new file mode 100644
index 0000000000..7c42d2679e
--- /dev/null
+++ b/contrib/libs/clapack/cpftrs.c
@@ -0,0 +1,260 @@
+/* cpftrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+
+/* Subroutine */ int cpftrs_(char *transr, char *uplo, integer *n, integer *
+ nrhs, complex *a, complex *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ctfsm_(char *, char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, complex *, integer *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPFTRS solves a system of linear equations A*X = B with a Hermitian */
+/* positive definite matrix A using the Cholesky factorization */
+/* A = U**H*U or A = L*L**H computed by CPFTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': The Normal TRANSR of RFP A is stored; */
+/* = 'C': The Conjugate-transpose TRANSR of RFP A is stored. */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': Upper triangle of RFP A is stored; */
+/* = 'L': Lower triangle of RFP A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input) COMPLEX array, dimension ( N*(N+1)/2 ); */
+/* The triangular factor U or L from the Cholesky factorization */
+/* of RFP A = U**H*U or RFP A = L*L**H, as computed by CPFTRF. */
+/* See note below for more details about RFP A. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Note: */
+/* ===== */
+
+/* We first consider Standard Packed Format when N is even. */
+/* We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* conjugate-transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* conjugate-transpose of the last three columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- -- */
+/* 03 04 05 33 43 53 */
+/* -- -- */
+/* 13 14 15 00 44 54 */
+/* -- */
+/* 23 24 25 10 11 55 */
+
+/* 33 34 35 20 21 22 */
+/* -- */
+/* 00 44 45 30 31 32 */
+/* -- -- */
+/* 01 11 55 40 41 42 */
+/* -- -- -- */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We next consider Standard Packed Format when N is odd. */
+/* We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* conjugate-transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* conjugate-transpose of the last two columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- */
+/* 02 03 04 00 33 43 */
+/* -- */
+/* 12 13 14 10 11 44 */
+
+/* 22 23 24 20 21 22 */
+/* -- */
+/* 00 33 34 30 31 32 */
+/* -- -- */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- */
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "C")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPFTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+/* start execution: there are two triangular solves */
+
+ if (lower) {
+ ctfsm_(transr, "L", uplo, "N", "N", n, nrhs, &c_b1, a, &b[b_offset],
+ ldb);
+ ctfsm_(transr, "L", uplo, "C", "N", n, nrhs, &c_b1, a, &b[b_offset],
+ ldb);
+ } else {
+ ctfsm_(transr, "L", uplo, "C", "N", n, nrhs, &c_b1, a, &b[b_offset],
+ ldb);
+ ctfsm_(transr, "L", uplo, "N", "N", n, nrhs, &c_b1, a, &b[b_offset],
+ ldb);
+ }
+
+ return 0;
+
+/* End of CPFTRS */
+
+} /* cpftrs_ */
diff --git a/contrib/libs/clapack/cpocon.c b/contrib/libs/clapack/cpocon.c
new file mode 100644
index 0000000000..b31555ea06
--- /dev/null
+++ b/contrib/libs/clapack/cpocon.c
@@ -0,0 +1,224 @@
+/* cpocon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cpocon_(char *uplo, integer *n, complex *a, integer *lda,
+ real *anorm, real *rcond, complex *work, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer ix, kase;
+ real scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical upper;
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *);
+ extern integer icamax_(integer *, complex *, integer *);
+ real scalel;
+ extern doublereal slamch_(char *);
+ real scaleu;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real ainvnm;
+ extern /* Subroutine */ int clatrs_(char *, char *, char *, char *,
+ integer *, complex *, integer *, complex *, real *, real *,
+ integer *), csrscl_(integer *,
+ real *, complex *, integer *);
+ char normin[1];
+ real smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPOCON estimates the reciprocal of the condition number (in the */
+/* 1-norm) of a complex Hermitian positive definite matrix using the */
+/* Cholesky factorization A = U**H*U or A = L*L**H computed by CPOTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**H*U or A = L*L**H, as computed by CPOTRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* ANORM (input) REAL */
+/* The 1-norm (or infinity-norm) of the Hermitian matrix A. */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*anorm < 0.f) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPOCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.f;
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ } else if (*anorm == 0.f) {
+ return 0;
+ }
+
+ smlnum = slamch_("Safe minimum");
+
+/* Estimate the 1-norm of inv(A). */
+
+ kase = 0;
+ *(unsigned char *)normin = 'N';
+L10:
+ clacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (upper) {
+
+/* Multiply by inv(U'). */
+
+ clatrs_("Upper", "Conjugate transpose", "Non-unit", normin, n, &a[
+ a_offset], lda, &work[1], &scalel, &rwork[1], info);
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by inv(U). */
+
+ clatrs_("Upper", "No transpose", "Non-unit", normin, n, &a[
+ a_offset], lda, &work[1], &scaleu, &rwork[1], info);
+ } else {
+
+/* Multiply by inv(L). */
+
+ clatrs_("Lower", "No transpose", "Non-unit", normin, n, &a[
+ a_offset], lda, &work[1], &scalel, &rwork[1], info);
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by inv(L'). */
+
+ clatrs_("Lower", "Conjugate transpose", "Non-unit", normin, n, &a[
+ a_offset], lda, &work[1], &scaleu, &rwork[1], info);
+ }
+
+/* Multiply by 1/SCALE if doing so will not cause overflow. */
+
+ scale = scalel * scaleu;
+ if (scale != 1.f) {
+ ix = icamax_(n, &work[1], &c__1);
+ i__1 = ix;
+ if (scale < ((r__1 = work[i__1].r, dabs(r__1)) + (r__2 = r_imag(&
+ work[ix]), dabs(r__2))) * smlnum || scale == 0.f) {
+ goto L20;
+ }
+ csrscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / ainvnm / *anorm;
+ }
+
+L20:
+ return 0;
+
+/* End of CPOCON */
+
+} /* cpocon_ */
diff --git a/contrib/libs/clapack/cpoequ.c b/contrib/libs/clapack/cpoequ.c
new file mode 100644
index 0000000000..a6c8498b8e
--- /dev/null
+++ b/contrib/libs/clapack/cpoequ.c
@@ -0,0 +1,176 @@
+/* cpoequ.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cpoequ_(integer *n, complex *a, integer *lda, real *s,
+ real *scond, real *amax, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ real smin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPOEQU computes row and column scalings intended to equilibrate a */
+/* Hermitian positive definite matrix A and reduce its condition number */
+/* (with respect to the two-norm). S contains the scale factors, */
+/* S(i) = 1/sqrt(A(i,i)), chosen so that the scaled matrix B with */
+/* elements B(i,j) = S(i)*A(i,j)*S(j) has ones on the diagonal. This */
+/* choice of S puts the condition number of B within a factor N of the */
+/* smallest possible condition number over all possible diagonal */
+/* scalings. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The N-by-N Hermitian positive definite matrix whose scaling */
+/* factors are to be computed. Only the diagonal elements of A */
+/* are referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* S (output) REAL array, dimension (N) */
+/* If INFO = 0, S contains the scale factors for A. */
+
+/* SCOND (output) REAL */
+/* If INFO = 0, S contains the ratio of the smallest S(i) to */
+/* the largest S(i). If SCOND >= 0.1 and AMAX is neither too */
+/* large nor too small, it is not worth scaling by S. */
+
+/* AMAX (output) REAL */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element is nonpositive. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*lda < max(1,*n)) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPOEQU", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *scond = 1.f;
+ *amax = 0.f;
+ return 0;
+ }
+
+/* Find the minimum and maximum diagonal elements. */
+
+ i__1 = a_dim1 + 1;
+ s[1] = a[i__1].r;
+ smin = s[1];
+ *amax = s[1];
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ s[i__] = a[i__2].r;
+/* Computing MIN */
+ r__1 = smin, r__2 = s[i__];
+ smin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = *amax, r__2 = s[i__];
+ *amax = dmax(r__1,r__2);
+/* L10: */
+ }
+
+ if (smin <= 0.f) {
+
+/* Find the first non-positive diagonal element and return. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (s[i__] <= 0.f) {
+ *info = i__;
+ return 0;
+ }
+/* L20: */
+ }
+ } else {
+
+/* Set the scale factors to the reciprocals */
+/* of the diagonal elements. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ s[i__] = 1.f / sqrt(s[i__]);
+/* L30: */
+ }
+
+/* Compute SCOND = min(S(I)) / max(S(I)) */
+
+ *scond = sqrt(smin) / sqrt(*amax);
+ }
+ return 0;
+
+/* End of CPOEQU */
+
+} /* cpoequ_ */
diff --git a/contrib/libs/clapack/cpoequb.c b/contrib/libs/clapack/cpoequb.c
new file mode 100644
index 0000000000..1c2963227b
--- /dev/null
+++ b/contrib/libs/clapack/cpoequb.c
@@ -0,0 +1,195 @@
+/* cpoequb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cpoequb_(integer *n, complex *a, integer *lda, real *s,
+ real *scond, real *amax, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double log(doublereal), pow_ri(real *, integer *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ real tmp, base, smin;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- Contributed by James Demmel, Deaglan Halligan, Yozo Hida and -- */
+/* -- Jason Riedy of Univ. of California Berkeley. -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley and NAG Ltd. -- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPOEQUB computes row and column scalings intended to equilibrate a */
+/* symmetric positive definite matrix A and reduce its condition number */
+/* (with respect to the two-norm). S contains the scale factors, */
+/* S(i) = 1/sqrt(A(i,i)), chosen so that the scaled matrix B with */
+/* elements B(i,j) = S(i)*A(i,j)*S(j) has ones on the diagonal. This */
+/* choice of S puts the condition number of B within a factor N of the */
+/* smallest possible condition number over all possible diagonal */
+/* scalings. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The N-by-N symmetric positive definite matrix whose scaling */
+/* factors are to be computed. Only the diagonal elements of A */
+/* are referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* S (output) REAL array, dimension (N) */
+/* If INFO = 0, S contains the scale factors for A. */
+
+/* SCOND (output) REAL */
+/* If INFO = 0, S contains the ratio of the smallest S(i) to */
+/* the largest S(i). If SCOND >= 0.1 and AMAX is neither too */
+/* large nor too small, it is not worth scaling by S. */
+
+/* AMAX (output) REAL */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element is nonpositive. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function Definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+/* Positive definite only performs 1 pass of equilibration. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*lda < max(1,*n)) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPOEQUB", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ *scond = 1.f;
+ *amax = 0.f;
+ return 0;
+ }
+ base = slamch_("B");
+ tmp = -.5f / log(base);
+
+/* Find the minimum and maximum diagonal elements. */
+
+ i__1 = a_dim1 + 1;
+ s[1] = a[i__1].r;
+ smin = s[1];
+ *amax = s[1];
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__ + i__ * a_dim1;
+ s[i__2] = a[i__3].r;
+/* Computing MIN */
+ r__1 = smin, r__2 = s[i__];
+ smin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = *amax, r__2 = s[i__];
+ *amax = dmax(r__1,r__2);
+/* L10: */
+ }
+
+ if (smin <= 0.f) {
+
+/* Find the first non-positive diagonal element and return. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (s[i__] <= 0.f) {
+ *info = i__;
+ return 0;
+ }
+/* L20: */
+ }
+ } else {
+
+/* Set the scale factors to the reciprocals */
+/* of the diagonal elements. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = (integer) (tmp * log(s[i__]));
+ s[i__] = pow_ri(&base, &i__2);
+/* L30: */
+ }
+
+/* Compute SCOND = min(S(I)) / max(S(I)). */
+
+ *scond = sqrt(smin) / sqrt(*amax);
+ }
+
+ return 0;
+
+/* End of CPOEQUB */
+
+} /* cpoequb_ */
diff --git a/contrib/libs/clapack/cporfs.c b/contrib/libs/clapack/cporfs.c
new file mode 100644
index 0000000000..401a8773d0
--- /dev/null
+++ b/contrib/libs/clapack/cporfs.c
@@ -0,0 +1,465 @@
+/* cporfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int cporfs_(char *uplo, integer *n, integer *nrhs, complex *
+ a, integer *lda, complex *af, integer *ldaf, complex *b, integer *ldb,
+ complex *x, integer *ldx, real *ferr, real *berr, complex *work,
+ real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ real s, xk;
+ integer nz;
+ real eps;
+ integer kase;
+ real safe1, safe2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int chemv_(char *, integer *, complex *, complex *
+, integer *, complex *, integer *, complex *, complex *, integer *
+);
+ integer isave[3];
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), caxpy_(integer *, complex *, complex *,
+ integer *, complex *, integer *);
+ integer count;
+ logical upper;
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *), cpotrs_(
+ char *, integer *, integer *, complex *, integer *, complex *,
+ integer *, integer *);
+ real lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPORFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is Hermitian positive definite, */
+/* and provides error bounds and backward error estimates for the */
+/* solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The Hermitian matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of A contains the upper triangular part */
+/* of the matrix A, and the strictly lower triangular part of A */
+/* is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of A contains the lower triangular part of */
+/* the matrix A, and the strictly upper triangular part of A is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input) COMPLEX array, dimension (LDAF,N) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**H*U or A = L*L**H, as computed by CPOTRF. */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* B (input) COMPLEX array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) COMPLEX array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by CPOTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ==================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldx < max(1,*n)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPORFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.f;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X */
+
+ ccopy_(n, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+ q__1.r = -1.f, q__1.i = -0.f;
+ chemv_(uplo, n, &q__1, &a[a_offset], lda, &x[j * x_dim1 + 1], &c__1, &
+ c_b1, &work[1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[i__] = (r__1 = b[i__3].r, dabs(r__1)) + (r__2 = r_imag(&b[
+ i__ + j * b_dim1]), dabs(r__2));
+/* L30: */
+ }
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ i__3 = k + j * x_dim1;
+ xk = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[k + j
+ * x_dim1]), dabs(r__2));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ rwork[i__] += ((r__1 = a[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + k * a_dim1]), dabs(r__2))) * xk;
+ i__4 = i__ + k * a_dim1;
+ i__5 = i__ + j * x_dim1;
+ s += ((r__1 = a[i__4].r, dabs(r__1)) + (r__2 = r_imag(&a[
+ i__ + k * a_dim1]), dabs(r__2))) * ((r__3 = x[
+ i__5].r, dabs(r__3)) + (r__4 = r_imag(&x[i__ + j *
+ x_dim1]), dabs(r__4)));
+/* L40: */
+ }
+ i__3 = k + k * a_dim1;
+ rwork[k] = rwork[k] + (r__1 = a[i__3].r, dabs(r__1)) * xk + s;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ i__3 = k + j * x_dim1;
+ xk = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[k + j
+ * x_dim1]), dabs(r__2));
+ i__3 = k + k * a_dim1;
+ rwork[k] += (r__1 = a[i__3].r, dabs(r__1)) * xk;
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ rwork[i__] += ((r__1 = a[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + k * a_dim1]), dabs(r__2))) * xk;
+ i__4 = i__ + k * a_dim1;
+ i__5 = i__ + j * x_dim1;
+ s += ((r__1 = a[i__4].r, dabs(r__1)) + (r__2 = r_imag(&a[
+ i__ + k * a_dim1]), dabs(r__2))) * ((r__3 = x[
+ i__5].r, dabs(r__3)) + (r__4 = r_imag(&x[i__ + j *
+ x_dim1]), dabs(r__4)));
+/* L60: */
+ }
+ rwork[k] += s;
+/* L70: */
+ }
+ }
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__3 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2))) / rwork[i__];
+ s = dmax(r__3,r__4);
+ } else {
+/* Computing MAX */
+ i__3 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = dmax(r__3,r__4);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2.f <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ cpotrs_(uplo, n, &c__1, &af[af_offset], ldaf, &work[1], n, info);
+ caxpy_(n, &c_b1, &work[1], &c__1, &x[j * x_dim1 + 1], &c__1);
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use CLACN2 to estimate the infinity-norm of the matrix */
+/* inv(A) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__3 = i__;
+ rwork[i__] = (r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__];
+ } else {
+ i__3 = i__;
+ rwork[i__] = (r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__] + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ clacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(A'). */
+
+ cpotrs_(uplo, n, &c__1, &af[af_offset], ldaf, &work[1], n,
+ info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ q__1.r = rwork[i__4] * work[i__5].r, q__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+/* L110: */
+ }
+ } else if (kase == 2) {
+
+/* Multiply by inv(A)*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ q__1.r = rwork[i__4] * work[i__5].r, q__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+/* L120: */
+ }
+ cpotrs_(uplo, n, &c__1, &af[af_offset], ldaf, &work[1], n,
+ info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * x_dim1;
+ r__3 = lstres, r__4 = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[i__ + j * x_dim1]), dabs(r__2));
+ lstres = dmax(r__3,r__4);
+/* L130: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of CPORFS */
+
+} /* cporfs_ */
diff --git a/contrib/libs/clapack/cposv.c b/contrib/libs/clapack/cposv.c
new file mode 100644
index 0000000000..8dbc051897
--- /dev/null
+++ b/contrib/libs/clapack/cposv.c
@@ -0,0 +1,151 @@
+/* cposv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cposv_(char *uplo, integer *n, integer *nrhs, complex *a,
+ integer *lda, complex *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), cpotrf_(
+ char *, integer *, complex *, integer *, integer *),
+ cpotrs_(char *, integer *, integer *, complex *, integer *,
+ complex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPOSV computes the solution to a complex system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N Hermitian positive definite matrix and X and B */
+/* are N-by-NRHS matrices. */
+
+/* The Cholesky decomposition is used to factor A as */
+/* A = U**H* U, if UPLO = 'U', or */
+/* A = L * L**H, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is a lower triangular */
+/* matrix. The factored form of A is then used to solve the system of */
+/* equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i of A is not */
+/* positive definite, so the factorization could not be */
+/* completed, and the solution has not been computed. */
+
+/* ===================================================================== */
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPOSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the Cholesky factorization A = U'*U or A = L*L'. */
+
+ cpotrf_(uplo, n, &a[a_offset], lda, info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ cpotrs_(uplo, n, nrhs, &a[a_offset], lda, &b[b_offset], ldb, info);
+
+ }
+ return 0;
+
+/* End of CPOSV */
+
+} /* cposv_ */
diff --git a/contrib/libs/clapack/cposvx.c b/contrib/libs/clapack/cposvx.c
new file mode 100644
index 0000000000..bf2fc35275
--- /dev/null
+++ b/contrib/libs/clapack/cposvx.c
@@ -0,0 +1,458 @@
+/* cposvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cposvx_(char *fact, char *uplo, integer *n, integer *
+ nrhs, complex *a, integer *lda, complex *af, integer *ldaf, char *
+ equed, real *s, complex *b, integer *ldb, complex *x, integer *ldx,
+ real *rcond, real *ferr, real *berr, complex *work, real *rwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j;
+ real amax, smin, smax;
+ extern logical lsame_(char *, char *);
+ real scond, anorm;
+ logical equil, rcequ;
+ extern doublereal clanhe_(char *, char *, integer *, complex *, integer *,
+ real *);
+ extern /* Subroutine */ int claqhe_(char *, integer *, complex *, integer
+ *, real *, real *, real *, char *);
+ extern doublereal slamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), xerbla_(char *,
+ integer *);
+ real bignum;
+ extern /* Subroutine */ int cpocon_(char *, integer *, complex *, integer
+ *, real *, real *, complex *, real *, integer *);
+ integer infequ;
+ extern /* Subroutine */ int cpoequ_(integer *, complex *, integer *, real
+ *, real *, real *, integer *), cporfs_(char *, integer *, integer
+ *, complex *, integer *, complex *, integer *, complex *, integer
+ *, complex *, integer *, real *, real *, complex *, real *,
+ integer *), cpotrf_(char *, integer *, complex *, integer
+ *, integer *), cpotrs_(char *, integer *, integer *,
+ complex *, integer *, complex *, integer *, integer *);
+ real smlnum;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPOSVX uses the Cholesky factorization A = U**H*U or A = L*L**H to */
+/* compute the solution to a complex system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N Hermitian positive definite matrix and X and B */
+/* are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'E', real scaling factors are computed to equilibrate */
+/* the system: */
+/* diag(S) * A * diag(S) * inv(diag(S)) * X = diag(S) * B */
+/* Whether or not the system will be equilibrated depends on the */
+/* scaling of the matrix A, but if equilibration is used, A is */
+/* overwritten by diag(S)*A*diag(S) and B by diag(S)*B. */
+
+/* 2. If FACT = 'N' or 'E', the Cholesky decomposition is used to */
+/* factor the matrix A (after equilibration if FACT = 'E') as */
+/* A = U**H* U, if UPLO = 'U', or */
+/* A = L * L**H, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is a lower triangular */
+/* matrix. */
+
+/* 3. If the leading i-by-i principal minor is not positive definite, */
+/* then the routine returns with INFO = i. Otherwise, the factored */
+/* form of A is used to estimate the condition number of the matrix */
+/* A. If the reciprocal of the condition number is less than machine */
+/* precision, INFO = N+1 is returned as a warning, but the routine */
+/* still goes on to solve for X and compute error bounds as */
+/* described below. */
+
+/* 4. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 5. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* 6. If equilibration was used, the matrix X is premultiplied by */
+/* diag(S) so that it solves the original system before */
+/* equilibration. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of the matrix A is */
+/* supplied on entry, and if not, whether the matrix A should be */
+/* equilibrated before it is factored. */
+/* = 'F': On entry, AF contains the factored form of A. */
+/* If EQUED = 'Y', the matrix A has been equilibrated */
+/* with scaling factors given by S. A and AF will not */
+/* be modified. */
+/* = 'N': The matrix A will be copied to AF and factored. */
+/* = 'E': The matrix A will be equilibrated if necessary, then */
+/* copied to AF and factored. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A, except if FACT = 'F' and */
+/* EQUED = 'Y', then A must contain the equilibrated matrix */
+/* diag(S)*A*diag(S). If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. A is not modified if */
+/* FACT = 'F' or 'N', or if FACT = 'E' and EQUED = 'N' on exit. */
+
+/* On exit, if FACT = 'E' and EQUED = 'Y', A is overwritten by */
+/* diag(S)*A*diag(S). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input or output) COMPLEX array, dimension (LDAF,N) */
+/* If FACT = 'F', then AF is an input argument and on entry */
+/* contains the triangular factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H, in the same storage */
+/* format as A. If EQUED .ne. 'N', then AF is the factored form */
+/* of the equilibrated matrix diag(S)*A*diag(S). */
+
+/* If FACT = 'N', then AF is an output argument and on exit */
+/* returns the triangular factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H of the original */
+/* matrix A. */
+
+/* If FACT = 'E', then AF is an output argument and on exit */
+/* returns the triangular factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H of the equilibrated */
+/* matrix A (see the description of A for the form of the */
+/* equilibrated matrix). */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* EQUED (input or output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration (always true if FACT = 'N'). */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+/* EQUED is an input argument if FACT = 'F'; otherwise, it is an */
+/* output argument. */
+
+/* S (input or output) REAL array, dimension (N) */
+/* The scale factors for A; not accessed if EQUED = 'N'. S is */
+/* an input argument if FACT = 'F'; otherwise, S is an output */
+/* argument. If FACT = 'F' and EQUED = 'Y', each element of S */
+/* must be positive. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS righthand side matrix B. */
+/* On exit, if EQUED = 'N', B is not modified; if EQUED = 'Y', */
+/* B is overwritten by diag(S) * B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) COMPLEX array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X to */
+/* the original system of equations. Note that if EQUED = 'Y', */
+/* A and B are modified on exit, and the solution to the */
+/* equilibrated system is inv(diag(S))*X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) REAL */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A after equilibration (if done). If RCOND is less than the */
+/* machine precision (in particular, if RCOND = 0), the matrix */
+/* is singular to working precision. This condition is */
+/* indicated by a return code of INFO > 0. */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: the leading minor of order i of A is */
+/* not positive definite, so the factorization */
+/* could not be completed, and the solution has not */
+/* been computed. RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ --s;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ equil = lsame_(fact, "E");
+ if (nofact || equil) {
+ *(unsigned char *)equed = 'N';
+ rcequ = FALSE_;
+ } else {
+ rcequ = lsame_(equed, "Y");
+ smlnum = slamch_("Safe minimum");
+ bignum = 1.f / smlnum;
+ }
+
+/* Test the input parameters. */
+
+ if (! nofact && ! equil && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! lsame_(uplo, "U") && ! lsame_(uplo,
+ "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -8;
+ } else if (lsame_(fact, "F") && ! (rcequ || lsame_(
+ equed, "N"))) {
+ *info = -9;
+ } else {
+ if (rcequ) {
+ smin = bignum;
+ smax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ r__1 = smin, r__2 = s[j];
+ smin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = smax, r__2 = s[j];
+ smax = dmax(r__1,r__2);
+/* L10: */
+ }
+ if (smin <= 0.f) {
+ *info = -10;
+ } else if (*n > 0) {
+ scond = dmax(smin,smlnum) / dmin(smax,bignum);
+ } else {
+ scond = 1.f;
+ }
+ }
+ if (*info == 0) {
+ if (*ldb < max(1,*n)) {
+ *info = -12;
+ } else if (*ldx < max(1,*n)) {
+ *info = -14;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPOSVX", &i__1);
+ return 0;
+ }
+
+ if (equil) {
+
+/* Compute row and column scalings to equilibrate the matrix A. */
+
+ cpoequ_(n, &a[a_offset], lda, &s[1], &scond, &amax, &infequ);
+ if (infequ == 0) {
+
+/* Equilibrate the matrix. */
+
+ claqhe_(uplo, n, &a[a_offset], lda, &s[1], &scond, &amax, equed);
+ rcequ = lsame_(equed, "Y");
+ }
+ }
+
+/* Scale the right hand side. */
+
+ if (rcequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * b_dim1;
+ q__1.r = s[i__4] * b[i__5].r, q__1.i = s[i__4] * b[i__5].i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+
+ if (nofact || equil) {
+
+/* Compute the Cholesky factorization A = U'*U or A = L*L'. */
+
+ clacpy_(uplo, n, n, &a[a_offset], lda, &af[af_offset], ldaf);
+ cpotrf_(uplo, n, &af[af_offset], ldaf, info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.f;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = clanhe_("1", uplo, n, &a[a_offset], lda, &rwork[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ cpocon_(uplo, n, &af[af_offset], ldaf, &anorm, rcond, &work[1], &rwork[1],
+ info);
+
+/* Compute the solution matrix X. */
+
+ clacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ cpotrs_(uplo, n, nrhs, &af[af_offset], ldaf, &x[x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solution and */
+/* compute error bounds and backward error estimates for it. */
+
+ cporfs_(uplo, n, nrhs, &a[a_offset], lda, &af[af_offset], ldaf, &b[
+ b_offset], ldb, &x[x_offset], ldx, &ferr[1], &berr[1], &work[1], &
+ rwork[1], info);
+
+/* Transform the solution matrix X to a solution of the original */
+/* system. */
+
+ if (rcequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * x_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * x_dim1;
+ q__1.r = s[i__4] * x[i__5].r, q__1.i = s[i__4] * x[i__5].i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+/* L40: */
+ }
+/* L50: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= scond;
+/* L60: */
+ }
+ }
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < slamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of CPOSVX */
+
+} /* cposvx_ */
diff --git a/contrib/libs/clapack/cpotf2.c b/contrib/libs/clapack/cpotf2.c
new file mode 100644
index 0000000000..53ff86252a
--- /dev/null
+++ b/contrib/libs/clapack/cpotf2.c
@@ -0,0 +1,245 @@
+/* cpotf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int cpotf2_(char *uplo, integer *n, complex *a, integer *lda,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer j;
+ real ajj;
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+, complex *, integer *, complex *, integer *, complex *, complex *
+, integer *);
+ logical upper;
+ extern /* Subroutine */ int clacgv_(integer *, complex *, integer *),
+ csscal_(integer *, real *, complex *, integer *), xerbla_(char *,
+ integer *);
+ extern logical sisnan_(real *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPOTF2 computes the Cholesky factorization of a complex Hermitian */
+/* positive definite matrix A. */
+
+/* The factorization has the form */
+/* A = U' * U , if UPLO = 'U', or */
+/* A = L * L', if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular. */
+
+/* This is the unblocked version of the algorithm, calling Level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* Hermitian matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* n by n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization A = U'*U or A = L*L'. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, the leading minor of order k is not */
+/* positive definite, and the factorization could not be */
+/* completed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPOTF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute U(J,J) and test for non-positive-definiteness. */
+
+ i__2 = j + j * a_dim1;
+ r__1 = a[i__2].r;
+ i__3 = j - 1;
+ cdotc_(&q__2, &i__3, &a[j * a_dim1 + 1], &c__1, &a[j * a_dim1 + 1]
+, &c__1);
+ q__1.r = r__1 - q__2.r, q__1.i = -q__2.i;
+ ajj = q__1.r;
+ if (ajj <= 0.f || sisnan_(&ajj)) {
+ i__2 = j + j * a_dim1;
+ a[i__2].r = ajj, a[i__2].i = 0.f;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ i__2 = j + j * a_dim1;
+ a[i__2].r = ajj, a[i__2].i = 0.f;
+
+/* Compute elements J+1:N of row J. */
+
+ if (j < *n) {
+ i__2 = j - 1;
+ clacgv_(&i__2, &a[j * a_dim1 + 1], &c__1);
+ i__2 = j - 1;
+ i__3 = *n - j;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Transpose", &i__2, &i__3, &q__1, &a[(j + 1) * a_dim1
+ + 1], lda, &a[j * a_dim1 + 1], &c__1, &c_b1, &a[j + (
+ j + 1) * a_dim1], lda);
+ i__2 = j - 1;
+ clacgv_(&i__2, &a[j * a_dim1 + 1], &c__1);
+ i__2 = *n - j;
+ r__1 = 1.f / ajj;
+ csscal_(&i__2, &r__1, &a[j + (j + 1) * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute L(J,J) and test for non-positive-definiteness. */
+
+ i__2 = j + j * a_dim1;
+ r__1 = a[i__2].r;
+ i__3 = j - 1;
+ cdotc_(&q__2, &i__3, &a[j + a_dim1], lda, &a[j + a_dim1], lda);
+ q__1.r = r__1 - q__2.r, q__1.i = -q__2.i;
+ ajj = q__1.r;
+ if (ajj <= 0.f || sisnan_(&ajj)) {
+ i__2 = j + j * a_dim1;
+ a[i__2].r = ajj, a[i__2].i = 0.f;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ i__2 = j + j * a_dim1;
+ a[i__2].r = ajj, a[i__2].i = 0.f;
+
+/* Compute elements J+1:N of column J. */
+
+ if (j < *n) {
+ i__2 = j - 1;
+ clacgv_(&i__2, &a[j + a_dim1], lda);
+ i__2 = *n - j;
+ i__3 = j - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &a[j + 1 + a_dim1]
+, lda, &a[j + a_dim1], lda, &c_b1, &a[j + 1 + j *
+ a_dim1], &c__1);
+ i__2 = j - 1;
+ clacgv_(&i__2, &a[j + a_dim1], lda);
+ i__2 = *n - j;
+ r__1 = 1.f / ajj;
+ csscal_(&i__2, &r__1, &a[j + 1 + j * a_dim1], &c__1);
+ }
+/* L20: */
+ }
+ }
+ goto L40;
+
+L30:
+ *info = j;
+
+L40:
+ return 0;
+
+/* End of CPOTF2 */
+
+} /* cpotf2_ */
diff --git a/contrib/libs/clapack/cpotrf.c b/contrib/libs/clapack/cpotrf.c
new file mode 100644
index 0000000000..f71adeef5d
--- /dev/null
+++ b/contrib/libs/clapack/cpotrf.c
@@ -0,0 +1,248 @@
+/* cpotrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static real c_b14 = -1.f;
+static real c_b15 = 1.f;
+
+/* Subroutine */ int cpotrf_(char *uplo, integer *n, complex *a, integer *lda,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ complex q__1;
+
+ /* Local variables */
+ integer j, jb, nb;
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *), cherk_(char *,
+ char *, integer *, integer *, real *, complex *, integer *, real *
+, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ctrsm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *);
+ logical upper;
+ extern /* Subroutine */ int cpotf2_(char *, integer *, complex *, integer
+ *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPOTRF computes the Cholesky factorization of a complex Hermitian */
+/* positive definite matrix A. */
+
+/* The factorization has the form */
+/* A = U**H * U, if UPLO = 'U', or */
+/* A = L * L**H, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular. */
+
+/* This is the block version of the algorithm, calling Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i is not */
+/* positive definite, and the factorization could not be */
+/* completed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPOTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "CPOTRF", uplo, n, &c_n1, &c_n1, &c_n1);
+ if (nb <= 1 || nb >= *n) {
+
+/* Use unblocked code. */
+
+ cpotf2_(uplo, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code. */
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Update and factorize the current diagonal block and test */
+/* for non-positive-definiteness. */
+
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - j + 1;
+ jb = min(i__3,i__4);
+ i__3 = j - 1;
+ cherk_("Upper", "Conjugate transpose", &jb, &i__3, &c_b14, &a[
+ j * a_dim1 + 1], lda, &c_b15, &a[j + j * a_dim1], lda);
+ cpotf2_("Upper", &jb, &a[j + j * a_dim1], lda, info);
+ if (*info != 0) {
+ goto L30;
+ }
+ if (j + jb <= *n) {
+
+/* Compute the current block row. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("Conjugate transpose", "No transpose", &jb, &i__3,
+ &i__4, &q__1, &a[j * a_dim1 + 1], lda, &a[(j + jb)
+ * a_dim1 + 1], lda, &c_b1, &a[j + (j + jb) *
+ a_dim1], lda);
+ i__3 = *n - j - jb + 1;
+ ctrsm_("Left", "Upper", "Conjugate transpose", "Non-unit",
+ &jb, &i__3, &c_b1, &a[j + j * a_dim1], lda, &a[j
+ + (j + jb) * a_dim1], lda);
+ }
+/* L10: */
+ }
+
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (j = 1; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Update and factorize the current diagonal block and test */
+/* for non-positive-definiteness. */
+
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - j + 1;
+ jb = min(i__3,i__4);
+ i__3 = j - 1;
+ cherk_("Lower", "No transpose", &jb, &i__3, &c_b14, &a[j +
+ a_dim1], lda, &c_b15, &a[j + j * a_dim1], lda);
+ cpotf2_("Lower", &jb, &a[j + j * a_dim1], lda, info);
+ if (*info != 0) {
+ goto L30;
+ }
+ if (j + jb <= *n) {
+
+/* Compute the current block column. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "Conjugate transpose", &i__3, &jb,
+ &i__4, &q__1, &a[j + jb + a_dim1], lda, &a[j +
+ a_dim1], lda, &c_b1, &a[j + jb + j * a_dim1], lda);
+ i__3 = *n - j - jb + 1;
+ ctrsm_("Right", "Lower", "Conjugate transpose", "Non-unit"
+, &i__3, &jb, &c_b1, &a[j + j * a_dim1], lda, &a[
+ j + jb + j * a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+ }
+ goto L40;
+
+L30:
+ *info = *info + j - 1;
+
+L40:
+ return 0;
+
+/* End of CPOTRF */
+
+} /* cpotrf_ */
diff --git a/contrib/libs/clapack/cpotri.c b/contrib/libs/clapack/cpotri.c
new file mode 100644
index 0000000000..27e88cef06
--- /dev/null
+++ b/contrib/libs/clapack/cpotri.c
@@ -0,0 +1,125 @@
+/* cpotri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cpotri_(char *uplo, integer *n, complex *a, integer *lda,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), clauum_(
+ char *, integer *, complex *, integer *, integer *),
+ ctrtri_(char *, char *, integer *, complex *, integer *, integer *
+);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPOTRI computes the inverse of a complex Hermitian positive definite */
+/* matrix A using the Cholesky factorization A = U**H*U or A = L*L**H */
+/* computed by CPOTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the triangular factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H, as computed by */
+/* CPOTRF. */
+/* On exit, the upper or lower triangle of the (Hermitian) */
+/* inverse of A, overwriting the input factor U or L. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the (i,i) element of the factor U or L is */
+/* zero, and the inverse could not be computed. */
+
+/* ===================================================================== */
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPOTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Invert the triangular Cholesky factor U or L. */
+
+ ctrtri_(uplo, "Non-unit", n, &a[a_offset], lda, info);
+ if (*info > 0) {
+ return 0;
+ }
+
+/* Form inv(U)*inv(U)' or inv(L)'*inv(L). */
+
+ clauum_(uplo, n, &a[a_offset], lda, info);
+
+ return 0;
+
+/* End of CPOTRI */
+
+} /* cpotri_ */
diff --git a/contrib/libs/clapack/cpotrs.c b/contrib/libs/clapack/cpotrs.c
new file mode 100644
index 0000000000..d65ce4e320
--- /dev/null
+++ b/contrib/libs/clapack/cpotrs.c
@@ -0,0 +1,165 @@
+/* cpotrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+
+/* Subroutine */ int cpotrs_(char *uplo, integer *n, integer *nrhs, complex *
+ a, integer *lda, complex *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ctrsm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPOTRS solves a system of linear equations A*X = B with a Hermitian */
+/* positive definite matrix A using the Cholesky factorization */
+/* A = U**H*U or A = L*L**H computed by CPOTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**H*U or A = L*L**H, as computed by CPOTRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPOTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Solve A*X = B where A = U'*U. */
+
+/* Solve U'*X = B, overwriting B with X. */
+
+ ctrsm_("Left", "Upper", "Conjugate transpose", "Non-unit", n, nrhs, &
+ c_b1, &a[a_offset], lda, &b[b_offset], ldb);
+
+/* Solve U*X = B, overwriting B with X. */
+
+ ctrsm_("Left", "Upper", "No transpose", "Non-unit", n, nrhs, &c_b1, &
+ a[a_offset], lda, &b[b_offset], ldb);
+ } else {
+
+/* Solve A*X = B where A = L*L'. */
+
+/* Solve L*X = B, overwriting B with X. */
+
+ ctrsm_("Left", "Lower", "No transpose", "Non-unit", n, nrhs, &c_b1, &
+ a[a_offset], lda, &b[b_offset], ldb);
+
+/* Solve L'*X = B, overwriting B with X. */
+
+ ctrsm_("Left", "Lower", "Conjugate transpose", "Non-unit", n, nrhs, &
+ c_b1, &a[a_offset], lda, &b[b_offset], ldb);
+ }
+
+ return 0;
+
+/* End of CPOTRS */
+
+} /* cpotrs_ */
diff --git a/contrib/libs/clapack/cppcon.c b/contrib/libs/clapack/cppcon.c
new file mode 100644
index 0000000000..656f100ef5
--- /dev/null
+++ b/contrib/libs/clapack/cppcon.c
@@ -0,0 +1,222 @@
+/* cppcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cppcon_(char *uplo, integer *n, complex *ap, real *anorm,
+ real *rcond, complex *work, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer ix, kase;
+ real scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical upper;
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *);
+ extern integer icamax_(integer *, complex *, integer *);
+ real scalel;
+ extern doublereal slamch_(char *);
+ real scaleu;
+ extern /* Subroutine */ int xerbla_(char *, integer *), clatps_(
+ char *, char *, char *, char *, integer *, complex *, complex *,
+ real *, real *, integer *);
+ real ainvnm;
+ extern /* Subroutine */ int csrscl_(integer *, real *, complex *, integer
+ *);
+ char normin[1];
+ real smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPPCON estimates the reciprocal of the condition number (in the */
+/* 1-norm) of a complex Hermitian positive definite packed matrix using */
+/* the Cholesky factorization A = U**H*U or A = L*L**H computed by */
+/* CPPTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) COMPLEX array, dimension (N*(N+1)/2) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**H*U or A = L*L**H, packed columnwise in a linear */
+/* array. The j-th column of U or L is stored in the array AP */
+/* as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = U(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = L(i,j) for j<=i<=n. */
+
+/* ANORM (input) REAL */
+/* The 1-norm (or infinity-norm) of the Hermitian matrix A. */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --rwork;
+ --work;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*anorm < 0.f) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPPCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.f;
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ } else if (*anorm == 0.f) {
+ return 0;
+ }
+
+ smlnum = slamch_("Safe minimum");
+
+/* Estimate the 1-norm of the inverse. */
+
+ kase = 0;
+ *(unsigned char *)normin = 'N';
+L10:
+ clacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (upper) {
+
+/* Multiply by inv(U'). */
+
+ clatps_("Upper", "Conjugate transpose", "Non-unit", normin, n, &
+ ap[1], &work[1], &scalel, &rwork[1], info);
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by inv(U). */
+
+ clatps_("Upper", "No transpose", "Non-unit", normin, n, &ap[1], &
+ work[1], &scaleu, &rwork[1], info);
+ } else {
+
+/* Multiply by inv(L). */
+
+ clatps_("Lower", "No transpose", "Non-unit", normin, n, &ap[1], &
+ work[1], &scalel, &rwork[1], info);
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by inv(L'). */
+
+ clatps_("Lower", "Conjugate transpose", "Non-unit", normin, n, &
+ ap[1], &work[1], &scaleu, &rwork[1], info);
+ }
+
+/* Multiply by 1/SCALE if doing so will not cause overflow. */
+
+ scale = scalel * scaleu;
+ if (scale != 1.f) {
+ ix = icamax_(n, &work[1], &c__1);
+ i__1 = ix;
+ if (scale < ((r__1 = work[i__1].r, dabs(r__1)) + (r__2 = r_imag(&
+ work[ix]), dabs(r__2))) * smlnum || scale == 0.f) {
+ goto L20;
+ }
+ csrscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / ainvnm / *anorm;
+ }
+
+L20:
+ return 0;
+
+/* End of CPPCON */
+
+} /* cppcon_ */
diff --git a/contrib/libs/clapack/cppequ.c b/contrib/libs/clapack/cppequ.c
new file mode 100644
index 0000000000..31877c8511
--- /dev/null
+++ b/contrib/libs/clapack/cppequ.c
@@ -0,0 +1,210 @@
+/* cppequ.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cppequ_(char *uplo, integer *n, complex *ap, real *s,
+ real *scond, real *amax, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, jj;
+ real smin;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPPEQU computes row and column scalings intended to equilibrate a */
+/* Hermitian positive definite matrix A in packed storage and reduce */
+/* its condition number (with respect to the two-norm). S contains the */
+/* scale factors, S(i)=1/sqrt(A(i,i)), chosen so that the scaled matrix */
+/* B with elements B(i,j)=S(i)*A(i,j)*S(j) has ones on the diagonal. */
+/* This choice of S puts the condition number of B within a factor N of */
+/* the smallest possible condition number over all possible diagonal */
+/* scalings. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) COMPLEX array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the Hermitian matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* S (output) REAL array, dimension (N) */
+/* If INFO = 0, S contains the scale factors for A. */
+
+/* SCOND (output) REAL */
+/* If INFO = 0, S contains the ratio of the smallest S(i) to */
+/* the largest S(i). If SCOND >= 0.1 and AMAX is neither too */
+/* large nor too small, it is not worth scaling by S. */
+
+/* AMAX (output) REAL */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element is nonpositive. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --s;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPPEQU", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *scond = 1.f;
+ *amax = 0.f;
+ return 0;
+ }
+
+/* Initialize SMIN and AMAX. */
+
+ s[1] = ap[1].r;
+ smin = s[1];
+ *amax = s[1];
+
+ if (upper) {
+
+/* UPLO = 'U': Upper triangle of A is stored. */
+/* Find the minimum and maximum diagonal elements. */
+
+ jj = 1;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ jj += i__;
+ i__2 = jj;
+ s[i__] = ap[i__2].r;
+/* Computing MIN */
+ r__1 = smin, r__2 = s[i__];
+ smin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = *amax, r__2 = s[i__];
+ *amax = dmax(r__1,r__2);
+/* L10: */
+ }
+
+ } else {
+
+/* UPLO = 'L': Lower triangle of A is stored. */
+/* Find the minimum and maximum diagonal elements. */
+
+ jj = 1;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ jj = jj + *n - i__ + 2;
+ i__2 = jj;
+ s[i__] = ap[i__2].r;
+/* Computing MIN */
+ r__1 = smin, r__2 = s[i__];
+ smin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = *amax, r__2 = s[i__];
+ *amax = dmax(r__1,r__2);
+/* L20: */
+ }
+ }
+
+ if (smin <= 0.f) {
+
+/* Find the first non-positive diagonal element and return. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (s[i__] <= 0.f) {
+ *info = i__;
+ return 0;
+ }
+/* L30: */
+ }
+ } else {
+
+/* Set the scale factors to the reciprocals */
+/* of the diagonal elements. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ s[i__] = 1.f / sqrt(s[i__]);
+/* L40: */
+ }
+
+/* Compute SCOND = min(S(I)) / max(S(I)) */
+
+ *scond = sqrt(smin) / sqrt(*amax);
+ }
+ return 0;
+
+/* End of CPPEQU */
+
+} /* cppequ_ */
diff --git a/contrib/libs/clapack/cpprfs.c b/contrib/libs/clapack/cpprfs.c
new file mode 100644
index 0000000000..2159485acb
--- /dev/null
+++ b/contrib/libs/clapack/cpprfs.c
@@ -0,0 +1,457 @@
+/* cpprfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int cpprfs_(char *uplo, integer *n, integer *nrhs, complex *
+ ap, complex *afp, complex *b, integer *ldb, complex *x, integer *ldx,
+ real *ferr, real *berr, complex *work, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ real s;
+ integer ik, kk;
+ real xk;
+ integer nz;
+ real eps;
+ integer kase;
+ real safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), chpmv_(char *, integer *, complex *,
+ complex *, complex *, integer *, complex *, complex *, integer *), caxpy_(integer *, complex *, complex *, integer *,
+ complex *, integer *);
+ integer count;
+ logical upper;
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *), cpptrs_(
+ char *, integer *, integer *, complex *, complex *, integer *,
+ integer *);
+ real lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPPRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is Hermitian positive definite */
+/* and packed, and provides error bounds and backward error estimates */
+/* for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AP (input) COMPLEX array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the Hermitian matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* AFP (input) COMPLEX array, dimension (N*(N+1)/2) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**H*U or A = L*L**H, as computed by SPPTRF/CPPTRF, */
+/* packed columnwise in a linear array in the same format as A */
+/* (see AP). */
+
+/* B (input) COMPLEX array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) COMPLEX array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by CPPTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ==================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --afp;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ } else if (*ldx < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPPRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.f;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X */
+
+ ccopy_(n, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+ q__1.r = -1.f, q__1.i = -0.f;
+ chpmv_(uplo, n, &q__1, &ap[1], &x[j * x_dim1 + 1], &c__1, &c_b1, &
+ work[1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[i__] = (r__1 = b[i__3].r, dabs(r__1)) + (r__2 = r_imag(&b[
+ i__ + j * b_dim1]), dabs(r__2));
+/* L30: */
+ }
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ kk = 1;
+ if (upper) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ i__3 = k + j * x_dim1;
+ xk = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[k + j
+ * x_dim1]), dabs(r__2));
+ ik = kk;
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = ik;
+ rwork[i__] += ((r__1 = ap[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&ap[ik]), dabs(r__2))) * xk;
+ i__4 = ik;
+ i__5 = i__ + j * x_dim1;
+ s += ((r__1 = ap[i__4].r, dabs(r__1)) + (r__2 = r_imag(&
+ ap[ik]), dabs(r__2))) * ((r__3 = x[i__5].r, dabs(
+ r__3)) + (r__4 = r_imag(&x[i__ + j * x_dim1]),
+ dabs(r__4)));
+ ++ik;
+/* L40: */
+ }
+ i__3 = kk + k - 1;
+ rwork[k] = rwork[k] + (r__1 = ap[i__3].r, dabs(r__1)) * xk +
+ s;
+ kk += k;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ i__3 = k + j * x_dim1;
+ xk = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[k + j
+ * x_dim1]), dabs(r__2));
+ i__3 = kk;
+ rwork[k] += (r__1 = ap[i__3].r, dabs(r__1)) * xk;
+ ik = kk + 1;
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ i__4 = ik;
+ rwork[i__] += ((r__1 = ap[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&ap[ik]), dabs(r__2))) * xk;
+ i__4 = ik;
+ i__5 = i__ + j * x_dim1;
+ s += ((r__1 = ap[i__4].r, dabs(r__1)) + (r__2 = r_imag(&
+ ap[ik]), dabs(r__2))) * ((r__3 = x[i__5].r, dabs(
+ r__3)) + (r__4 = r_imag(&x[i__ + j * x_dim1]),
+ dabs(r__4)));
+ ++ik;
+/* L60: */
+ }
+ rwork[k] += s;
+ kk += *n - k + 1;
+/* L70: */
+ }
+ }
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__3 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2))) / rwork[i__];
+ s = dmax(r__3,r__4);
+ } else {
+/* Computing MAX */
+ i__3 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = dmax(r__3,r__4);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2.f <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ cpptrs_(uplo, n, &c__1, &afp[1], &work[1], n, info);
+ caxpy_(n, &c_b1, &work[1], &c__1, &x[j * x_dim1 + 1], &c__1);
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use CLACN2 to estimate the infinity-norm of the matrix */
+/* inv(A) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__3 = i__;
+ rwork[i__] = (r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__];
+ } else {
+ i__3 = i__;
+ rwork[i__] = (r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__] + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ clacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(A'). */
+
+ cpptrs_(uplo, n, &c__1, &afp[1], &work[1], n, info)
+ ;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ q__1.r = rwork[i__4] * work[i__5].r, q__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+/* L110: */
+ }
+ } else if (kase == 2) {
+
+/* Multiply by inv(A)*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ q__1.r = rwork[i__4] * work[i__5].r, q__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+/* L120: */
+ }
+ cpptrs_(uplo, n, &c__1, &afp[1], &work[1], n, info)
+ ;
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * x_dim1;
+ r__3 = lstres, r__4 = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[i__ + j * x_dim1]), dabs(r__2));
+ lstres = dmax(r__3,r__4);
+/* L130: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of CPPRFS */
+
+} /* cpprfs_ */
diff --git a/contrib/libs/clapack/cppsv.c b/contrib/libs/clapack/cppsv.c
new file mode 100644
index 0000000000..c13d7d3db6
--- /dev/null
+++ b/contrib/libs/clapack/cppsv.c
@@ -0,0 +1,160 @@
+/* cppsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cppsv_(char *uplo, integer *n, integer *nrhs, complex *
+ ap, complex *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), cpptrf_(
+ char *, integer *, complex *, integer *), cpptrs_(char *,
+ integer *, integer *, complex *, complex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPPSV computes the solution to a complex system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N Hermitian positive definite matrix stored in */
+/* packed format and X and B are N-by-NRHS matrices. */
+
+/* The Cholesky decomposition is used to factor A as */
+/* A = U**H* U, if UPLO = 'U', or */
+/* A = L * L**H, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is a lower triangular */
+/* matrix. The factored form of A is then used to solve the system of */
+/* equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AP (input/output) COMPLEX array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H, in the same storage */
+/* format as A. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i of A is not */
+/* positive definite, so the factorization could not be */
+/* completed, and the solution has not been computed. */
+
+/* Further Details */
+/* =============== */
+
+/* The packed storage scheme is illustrated by the following example */
+/* when N = 4, UPLO = 'U': */
+
+/* Two-dimensional storage of the Hermitian matrix A: */
+
+/* a11 a12 a13 a14 */
+/* a22 a23 a24 */
+/* a33 a34 (aij = conjg(aji)) */
+/* a44 */
+
+/* Packed storage of the upper triangle of A: */
+
+/* AP = [ a11, a12, a22, a13, a23, a33, a14, a24, a34, a44 ] */
+
+/* ===================================================================== */
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPPSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the Cholesky factorization A = U'*U or A = L*L'. */
+
+ cpptrf_(uplo, n, &ap[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ cpptrs_(uplo, n, nrhs, &ap[1], &b[b_offset], ldb, info);
+
+ }
+ return 0;
+
+/* End of CPPSV */
+
+} /* cppsv_ */
diff --git a/contrib/libs/clapack/cppsvx.c b/contrib/libs/clapack/cppsvx.c
new file mode 100644
index 0000000000..3898b386b4
--- /dev/null
+++ b/contrib/libs/clapack/cppsvx.c
@@ -0,0 +1,461 @@
+/* cppsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cppsvx_(char *fact, char *uplo, integer *n, integer *
+ nrhs, complex *ap, complex *afp, char *equed, real *s, complex *b,
+ integer *ldb, complex *x, integer *ldx, real *rcond, real *ferr, real
+ *berr, complex *work, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j;
+ real amax, smin, smax;
+ extern logical lsame_(char *, char *);
+ real scond, anorm;
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *);
+ logical equil, rcequ;
+ extern doublereal clanhp_(char *, char *, integer *, complex *, real *), slamch_(char *);
+ extern /* Subroutine */ int claqhp_(char *, integer *, complex *, real *,
+ real *, real *, char *);
+ logical nofact;
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), xerbla_(char *,
+ integer *);
+ real bignum;
+ extern /* Subroutine */ int cppcon_(char *, integer *, complex *, real *,
+ real *, complex *, real *, integer *);
+ integer infequ;
+ extern /* Subroutine */ int cppequ_(char *, integer *, complex *, real *,
+ real *, real *, integer *), cpprfs_(char *, integer *,
+ integer *, complex *, complex *, complex *, integer *, complex *,
+ integer *, real *, real *, complex *, real *, integer *),
+ cpptrf_(char *, integer *, complex *, integer *);
+ real smlnum;
+ extern /* Subroutine */ int cpptrs_(char *, integer *, integer *, complex
+ *, complex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPPSVX uses the Cholesky factorization A = U**H*U or A = L*L**H to */
+/* compute the solution to a complex system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N Hermitian positive definite matrix stored in */
+/* packed format and X and B are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'E', real scaling factors are computed to equilibrate */
+/* the system: */
+/* diag(S) * A * diag(S) * inv(diag(S)) * X = diag(S) * B */
+/* Whether or not the system will be equilibrated depends on the */
+/* scaling of the matrix A, but if equilibration is used, A is */
+/* overwritten by diag(S)*A*diag(S) and B by diag(S)*B. */
+
+/* 2. If FACT = 'N' or 'E', the Cholesky decomposition is used to */
+/* factor the matrix A (after equilibration if FACT = 'E') as */
+/* A = U'* U , if UPLO = 'U', or */
+/* A = L * L', if UPLO = 'L', */
+/* where U is an upper triangular matrix, L is a lower triangular */
+/* matrix, and ' indicates conjugate transpose. */
+
+/* 3. If the leading i-by-i principal minor is not positive definite, */
+/* then the routine returns with INFO = i. Otherwise, the factored */
+/* form of A is used to estimate the condition number of the matrix */
+/* A. If the reciprocal of the condition number is less than machine */
+/* precision, INFO = N+1 is returned as a warning, but the routine */
+/* still goes on to solve for X and compute error bounds as */
+/* described below. */
+
+/* 4. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 5. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* 6. If equilibration was used, the matrix X is premultiplied by */
+/* diag(S) so that it solves the original system before */
+/* equilibration. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of the matrix A is */
+/* supplied on entry, and if not, whether the matrix A should be */
+/* equilibrated before it is factored. */
+/* = 'F': On entry, AFP contains the factored form of A. */
+/* If EQUED = 'Y', the matrix A has been equilibrated */
+/* with scaling factors given by S. AP and AFP will not */
+/* be modified. */
+/* = 'N': The matrix A will be copied to AFP and factored. */
+/* = 'E': The matrix A will be equilibrated if necessary, then */
+/* copied to AFP and factored. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AP (input/output) COMPLEX array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array, except if FACT = 'F' */
+/* and EQUED = 'Y', then A must contain the equilibrated matrix */
+/* diag(S)*A*diag(S). The j-th column of A is stored in the */
+/* array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. A is not modified if */
+/* FACT = 'F' or 'N', or if FACT = 'E' and EQUED = 'N' on exit. */
+
+/* On exit, if FACT = 'E' and EQUED = 'Y', A is overwritten by */
+/* diag(S)*A*diag(S). */
+
+/* AFP (input or output) COMPLEX array, dimension (N*(N+1)/2) */
+/* If FACT = 'F', then AFP is an input argument and on entry */
+/* contains the triangular factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H, in the same storage */
+/* format as A. If EQUED .ne. 'N', then AFP is the factored */
+/* form of the equilibrated matrix A. */
+
+/* If FACT = 'N', then AFP is an output argument and on exit */
+/* returns the triangular factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H of the original */
+/* matrix A. */
+
+/* If FACT = 'E', then AFP is an output argument and on exit */
+/* returns the triangular factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H of the equilibrated */
+/* matrix A (see the description of AP for the form of the */
+/* equilibrated matrix). */
+
+/* EQUED (input or output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration (always true if FACT = 'N'). */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+/* EQUED is an input argument if FACT = 'F'; otherwise, it is an */
+/* output argument. */
+
+/* S (input or output) REAL array, dimension (N) */
+/* The scale factors for A; not accessed if EQUED = 'N'. S is */
+/* an input argument if FACT = 'F'; otherwise, S is an output */
+/* argument. If FACT = 'F' and EQUED = 'Y', each element of S */
+/* must be positive. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if EQUED = 'N', B is not modified; if EQUED = 'Y', */
+/* B is overwritten by diag(S) * B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) COMPLEX array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X to */
+/* the original system of equations. Note that if EQUED = 'Y', */
+/* A and B are modified on exit, and the solution to the */
+/* equilibrated system is inv(diag(S))*X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) REAL */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A after equilibration (if done). If RCOND is less than the */
+/* machine precision (in particular, if RCOND = 0), the matrix */
+/* is singular to working precision. This condition is */
+/* indicated by a return code of INFO > 0. */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: the leading minor of order i of A is */
+/* not positive definite, so the factorization */
+/* could not be completed, and the solution has not */
+/* been computed. RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* Further Details */
+/* =============== */
+
+/* The packed storage scheme is illustrated by the following example */
+/* when N = 4, UPLO = 'U': */
+
+/* Two-dimensional storage of the Hermitian matrix A: */
+
+/* a11 a12 a13 a14 */
+/* a22 a23 a24 */
+/* a33 a34 (aij = conjg(aji)) */
+/* a44 */
+
+/* Packed storage of the upper triangle of A: */
+
+/* AP = [ a11, a12, a22, a13, a23, a33, a14, a24, a34, a44 ] */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --ap;
+ --afp;
+ --s;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ equil = lsame_(fact, "E");
+ if (nofact || equil) {
+ *(unsigned char *)equed = 'N';
+ rcequ = FALSE_;
+ } else {
+ rcequ = lsame_(equed, "Y");
+ smlnum = slamch_("Safe minimum");
+ bignum = 1.f / smlnum;
+ }
+
+/* Test the input parameters. */
+
+ if (! nofact && ! equil && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! lsame_(uplo, "U") && ! lsame_(uplo,
+ "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (lsame_(fact, "F") && ! (rcequ || lsame_(
+ equed, "N"))) {
+ *info = -7;
+ } else {
+ if (rcequ) {
+ smin = bignum;
+ smax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ r__1 = smin, r__2 = s[j];
+ smin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = smax, r__2 = s[j];
+ smax = dmax(r__1,r__2);
+/* L10: */
+ }
+ if (smin <= 0.f) {
+ *info = -8;
+ } else if (*n > 0) {
+ scond = dmax(smin,smlnum) / dmin(smax,bignum);
+ } else {
+ scond = 1.f;
+ }
+ }
+ if (*info == 0) {
+ if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldx < max(1,*n)) {
+ *info = -12;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPPSVX", &i__1);
+ return 0;
+ }
+
+ if (equil) {
+
+/* Compute row and column scalings to equilibrate the matrix A. */
+
+ cppequ_(uplo, n, &ap[1], &s[1], &scond, &amax, &infequ);
+ if (infequ == 0) {
+
+/* Equilibrate the matrix. */
+
+ claqhp_(uplo, n, &ap[1], &s[1], &scond, &amax, equed);
+ rcequ = lsame_(equed, "Y");
+ }
+ }
+
+/* Scale the right-hand side. */
+
+ if (rcequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * b_dim1;
+ q__1.r = s[i__4] * b[i__5].r, q__1.i = s[i__4] * b[i__5].i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+
+ if (nofact || equil) {
+
+/* Compute the Cholesky factorization A = U'*U or A = L*L'. */
+
+ i__1 = *n * (*n + 1) / 2;
+ ccopy_(&i__1, &ap[1], &c__1, &afp[1], &c__1);
+ cpptrf_(uplo, n, &afp[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.f;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = clanhp_("I", uplo, n, &ap[1], &rwork[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ cppcon_(uplo, n, &afp[1], &anorm, rcond, &work[1], &rwork[1], info);
+
+/* Compute the solution matrix X. */
+
+ clacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ cpptrs_(uplo, n, nrhs, &afp[1], &x[x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solution and */
+/* compute error bounds and backward error estimates for it. */
+
+ cpprfs_(uplo, n, nrhs, &ap[1], &afp[1], &b[b_offset], ldb, &x[x_offset],
+ ldx, &ferr[1], &berr[1], &work[1], &rwork[1], info);
+
+/* Transform the solution matrix X to a solution of the original */
+/* system. */
+
+ if (rcequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * x_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * x_dim1;
+ q__1.r = s[i__4] * x[i__5].r, q__1.i = s[i__4] * x[i__5].i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+/* L40: */
+ }
+/* L50: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= scond;
+/* L60: */
+ }
+ }
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < slamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of CPPSVX */
+
+} /* cppsvx_ */
diff --git a/contrib/libs/clapack/cpptrf.c b/contrib/libs/clapack/cpptrf.c
new file mode 100644
index 0000000000..39bf238262
--- /dev/null
+++ b/contrib/libs/clapack/cpptrf.c
@@ -0,0 +1,234 @@
+/* cpptrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b16 = -1.f;
+
+/* Subroutine */ int cpptrf_(char *uplo, integer *n, complex *ap, integer *
+ info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ real r__1;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer j, jc, jj;
+ real ajj;
+ extern /* Subroutine */ int chpr_(char *, integer *, real *, complex *,
+ integer *, complex *);
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int ctpsv_(char *, char *, char *, integer *,
+ complex *, complex *, integer *), csscal_(
+ integer *, real *, complex *, integer *), xerbla_(char *, integer
+ *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPPTRF computes the Cholesky factorization of a complex Hermitian */
+/* positive definite matrix A stored in packed format. */
+
+/* The factorization has the form */
+/* A = U**H * U, if UPLO = 'U', or */
+/* A = L * L**H, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. */
+
+/* On exit, if INFO = 0, the triangular factor U or L from the */
+/* Cholesky factorization A = U**H*U or A = L*L**H, in the same */
+/* storage format as A. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i is not */
+/* positive definite, and the factorization could not be */
+/* completed. */
+
+/* Further Details */
+/* =============== */
+
+/* The packed storage scheme is illustrated by the following example */
+/* when N = 4, UPLO = 'U': */
+
+/* Two-dimensional storage of the Hermitian matrix A: */
+
+/* a11 a12 a13 a14 */
+/* a22 a23 a24 */
+/* a33 a34 (aij = conjg(aji)) */
+/* a44 */
+
+/* Packed storage of the upper triangle of A: */
+
+/* AP = [ a11, a12, a22, a13, a23, a33, a14, a24, a34, a44 ] */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPPTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ jj = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jc = jj + 1;
+ jj += j;
+
+/* Compute elements 1:J-1 of column J. */
+
+ if (j > 1) {
+ i__2 = j - 1;
+ ctpsv_("Upper", "Conjugate transpose", "Non-unit", &i__2, &ap[
+ 1], &ap[jc], &c__1);
+ }
+
+/* Compute U(J,J) and test for non-positive-definiteness. */
+
+ i__2 = jj;
+ r__1 = ap[i__2].r;
+ i__3 = j - 1;
+ cdotc_(&q__2, &i__3, &ap[jc], &c__1, &ap[jc], &c__1);
+ q__1.r = r__1 - q__2.r, q__1.i = -q__2.i;
+ ajj = q__1.r;
+ if (ajj <= 0.f) {
+ i__2 = jj;
+ ap[i__2].r = ajj, ap[i__2].i = 0.f;
+ goto L30;
+ }
+ i__2 = jj;
+ r__1 = sqrt(ajj);
+ ap[i__2].r = r__1, ap[i__2].i = 0.f;
+/* L10: */
+ }
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ jj = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute L(J,J) and test for non-positive-definiteness. */
+
+ i__2 = jj;
+ ajj = ap[i__2].r;
+ if (ajj <= 0.f) {
+ i__2 = jj;
+ ap[i__2].r = ajj, ap[i__2].i = 0.f;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ i__2 = jj;
+ ap[i__2].r = ajj, ap[i__2].i = 0.f;
+
+/* Compute elements J+1:N of column J and update the trailing */
+/* submatrix. */
+
+ if (j < *n) {
+ i__2 = *n - j;
+ r__1 = 1.f / ajj;
+ csscal_(&i__2, &r__1, &ap[jj + 1], &c__1);
+ i__2 = *n - j;
+ chpr_("Lower", &i__2, &c_b16, &ap[jj + 1], &c__1, &ap[jj + *n
+ - j + 1]);
+ jj = jj + *n - j + 1;
+ }
+/* L20: */
+ }
+ }
+ goto L40;
+
+L30:
+ *info = j;
+
+L40:
+ return 0;
+
+/* End of CPPTRF */
+
+} /* cpptrf_ */
diff --git a/contrib/libs/clapack/cpptri.c b/contrib/libs/clapack/cpptri.c
new file mode 100644
index 0000000000..b3dcfd4d4e
--- /dev/null
+++ b/contrib/libs/clapack/cpptri.c
@@ -0,0 +1,180 @@
+/* cpptri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b8 = 1.f;
+static integer c__1 = 1;
+
+/* Subroutine */ int cpptri_(char *uplo, integer *n, complex *ap, integer *
+ info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ real r__1;
+ complex q__1;
+
+ /* Local variables */
+ integer j, jc, jj;
+ real ajj;
+ integer jjn;
+ extern /* Subroutine */ int chpr_(char *, integer *, real *, complex *,
+ integer *, complex *);
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ctpmv_(char *, char *, char *, integer *,
+ complex *, complex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *), xerbla_(char *, integer *), ctptri_(char *, char *,
+ integer *, complex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPPTRI computes the inverse of a complex Hermitian positive definite */
+/* matrix A using the Cholesky factorization A = U**H*U or A = L*L**H */
+/* computed by CPPTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangular factor is stored in AP; */
+/* = 'L': Lower triangular factor is stored in AP. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX array, dimension (N*(N+1)/2) */
+/* On entry, the triangular factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H, packed columnwise as */
+/* a linear array. The j-th column of U or L is stored in the */
+/* array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = U(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = L(i,j) for j<=i<=n. */
+
+/* On exit, the upper or lower triangle of the (Hermitian) */
+/* inverse of A, overwriting the input factor U or L. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the (i,i) element of the factor U or L is */
+/* zero, and the inverse could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPPTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Invert the triangular Cholesky factor U or L. */
+
+ ctptri_(uplo, "Non-unit", n, &ap[1], info);
+ if (*info > 0) {
+ return 0;
+ }
+ if (upper) {
+
+/* Compute the product inv(U) * inv(U)'. */
+
+ jj = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jc = jj + 1;
+ jj += j;
+ if (j > 1) {
+ i__2 = j - 1;
+ chpr_("Upper", &i__2, &c_b8, &ap[jc], &c__1, &ap[1]);
+ }
+ i__2 = jj;
+ ajj = ap[i__2].r;
+ csscal_(&j, &ajj, &ap[jc], &c__1);
+/* L10: */
+ }
+
+ } else {
+
+/* Compute the product inv(L)' * inv(L). */
+
+ jj = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jjn = jj + *n - j + 1;
+ i__2 = jj;
+ i__3 = *n - j + 1;
+ cdotc_(&q__1, &i__3, &ap[jj], &c__1, &ap[jj], &c__1);
+ r__1 = q__1.r;
+ ap[i__2].r = r__1, ap[i__2].i = 0.f;
+ if (j < *n) {
+ i__2 = *n - j;
+ ctpmv_("Lower", "Conjugate transpose", "Non-unit", &i__2, &ap[
+ jjn], &ap[jj + 1], &c__1);
+ }
+ jj = jjn;
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of CPPTRI */
+
+} /* cpptri_ */
diff --git a/contrib/libs/clapack/cpptrs.c b/contrib/libs/clapack/cpptrs.c
new file mode 100644
index 0000000000..631f8701c6
--- /dev/null
+++ b/contrib/libs/clapack/cpptrs.c
@@ -0,0 +1,170 @@
+/* cpptrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cpptrs_(char *uplo, integer *n, integer *nrhs, complex *
+ ap, complex *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ integer i__;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int ctpsv_(char *, char *, char *, integer *,
+ complex *, complex *, integer *), xerbla_(
+ char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPPTRS solves a system of linear equations A*X = B with a Hermitian */
+/* positive definite matrix A in packed storage using the Cholesky */
+/* factorization A = U**H*U or A = L*L**H computed by CPPTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AP (input) COMPLEX array, dimension (N*(N+1)/2) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**H*U or A = L*L**H, packed columnwise in a linear */
+/* array. The j-th column of U or L is stored in the array AP */
+/* as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = U(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = L(i,j) for j<=i<=n. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPPTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Solve A*X = B where A = U'*U. */
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Solve U'*X = B, overwriting B with X. */
+
+ ctpsv_("Upper", "Conjugate transpose", "Non-unit", n, &ap[1], &b[
+ i__ * b_dim1 + 1], &c__1);
+
+/* Solve U*X = B, overwriting B with X. */
+
+ ctpsv_("Upper", "No transpose", "Non-unit", n, &ap[1], &b[i__ *
+ b_dim1 + 1], &c__1);
+/* L10: */
+ }
+ } else {
+
+/* Solve A*X = B where A = L*L'. */
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Solve L*Y = B, overwriting B with X. */
+
+ ctpsv_("Lower", "No transpose", "Non-unit", n, &ap[1], &b[i__ *
+ b_dim1 + 1], &c__1);
+
+/* Solve L'*X = Y, overwriting B with X. */
+
+ ctpsv_("Lower", "Conjugate transpose", "Non-unit", n, &ap[1], &b[
+ i__ * b_dim1 + 1], &c__1);
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of CPPTRS */
+
+} /* cpptrs_ */
diff --git a/contrib/libs/clapack/cpstf2.c b/contrib/libs/clapack/cpstf2.c
new file mode 100644
index 0000000000..1811995dfb
--- /dev/null
+++ b/contrib/libs/clapack/cpstf2.c
@@ -0,0 +1,442 @@
+/* cpstf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int cpstf2_(char *uplo, integer *n, complex *a, integer *lda,
+ integer *piv, integer *rank, real *tol, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, maxlocval;
+ real ajj;
+ integer pvt;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+, complex *, integer *, complex *, integer *, complex *, complex *
+, integer *);
+ complex ctemp;
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ integer itemp;
+ real stemp;
+ logical upper;
+ real sstop;
+ extern /* Subroutine */ int clacgv_(integer *, complex *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *), xerbla_(char *, integer *);
+ extern logical sisnan_(real *);
+ extern integer smaxloc_(real *, integer *);
+
+
+/* -- LAPACK PROTOTYPE routine (version 3.2) -- */
+/* Craig Lucas, University of Manchester / NAG Ltd. */
+/* October, 2008 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPSTF2 computes the Cholesky factorization with complete */
+/* pivoting of a complex Hermitian positive semidefinite matrix A. */
+
+/* The factorization has the form */
+/* P' * A * P = U' * U , if UPLO = 'U', */
+/* P' * A * P = L * L', if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular, and */
+/* P is stored as vector PIV. */
+
+/* This algorithm does not attempt to check that A is positive */
+/* semidefinite. This version of the algorithm calls level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n by n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization as above. */
+
+/* PIV (output) INTEGER array, dimension (N) */
+/* PIV is such that the nonzero entries are P( PIV(K), K ) = 1. */
+
+/* RANK (output) INTEGER */
+/* The rank of A given by the number of steps the algorithm */
+/* completed. */
+
+/* TOL (input) REAL */
+/* User defined tolerance. If TOL < 0, then N*U*MAX( A( K,K ) ) */
+/* will be used. The algorithm terminates at the (K-1)st step */
+/* if the pivot <= TOL. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* WORK REAL array, dimension (2*N) */
+/* Work space. */
+
+/* INFO (output) INTEGER */
+/* < 0: If INFO = -K, the K-th argument had an illegal value, */
+/* = 0: algorithm completed successfully, and */
+/* > 0: the matrix A is either rank deficient with computed rank */
+/* as returned in RANK, or is indefinite. See Section 7 of */
+/* LAPACK Working Note #161 for further information. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ --work;
+ --piv;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPSTF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Initialize PIV */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ piv[i__] = i__;
+/* L100: */
+ }
+
+/* Compute stopping value */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ work[i__] = a[i__2].r;
+/* L110: */
+ }
+ pvt = smaxloc_(&work[1], n);
+ i__1 = pvt + pvt * a_dim1;
+ ajj = a[i__1].r;
+ if (ajj == 0.f || sisnan_(&ajj)) {
+ *rank = 0;
+ *info = 1;
+ goto L200;
+ }
+
+/* Compute stopping value if not supplied */
+
+ if (*tol < 0.f) {
+ sstop = *n * slamch_("Epsilon") * ajj;
+ } else {
+ sstop = *tol;
+ }
+
+/* Set first half of WORK to zero, holds dot products */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L120: */
+ }
+
+ if (upper) {
+
+/* Compute the Cholesky factorization P' * A * P = U' * U */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Find pivot, test for exit, else swap rows and columns */
+/* Update dot products, compute possible pivots which are */
+/* stored in the second half of WORK */
+
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+
+ if (j > 1) {
+ r_cnjg(&q__2, &a[j - 1 + i__ * a_dim1]);
+ i__3 = j - 1 + i__ * a_dim1;
+ q__1.r = q__2.r * a[i__3].r - q__2.i * a[i__3].i, q__1.i =
+ q__2.r * a[i__3].i + q__2.i * a[i__3].r;
+ work[i__] += q__1.r;
+ }
+ i__3 = i__ + i__ * a_dim1;
+ work[*n + i__] = a[i__3].r - work[i__];
+
+/* L130: */
+ }
+
+ if (j > 1) {
+ maxlocval = (*n << 1) - (*n + j) + 1;
+ itemp = smaxloc_(&work[*n + j], &maxlocval);
+ pvt = itemp + j - 1;
+ ajj = work[*n + pvt];
+ if (ajj <= sstop || sisnan_(&ajj)) {
+ i__2 = j + j * a_dim1;
+ a[i__2].r = ajj, a[i__2].i = 0.f;
+ goto L190;
+ }
+ }
+
+ if (j != pvt) {
+
+/* Pivot OK, so can now swap pivot rows and columns */
+
+ i__2 = pvt + pvt * a_dim1;
+ i__3 = j + j * a_dim1;
+ a[i__2].r = a[i__3].r, a[i__2].i = a[i__3].i;
+ i__2 = j - 1;
+ cswap_(&i__2, &a[j * a_dim1 + 1], &c__1, &a[pvt * a_dim1 + 1],
+ &c__1);
+ if (pvt < *n) {
+ i__2 = *n - pvt;
+ cswap_(&i__2, &a[j + (pvt + 1) * a_dim1], lda, &a[pvt + (
+ pvt + 1) * a_dim1], lda);
+ }
+ i__2 = pvt - 1;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ r_cnjg(&q__1, &a[j + i__ * a_dim1]);
+ ctemp.r = q__1.r, ctemp.i = q__1.i;
+ i__3 = j + i__ * a_dim1;
+ r_cnjg(&q__1, &a[i__ + pvt * a_dim1]);
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ i__3 = i__ + pvt * a_dim1;
+ a[i__3].r = ctemp.r, a[i__3].i = ctemp.i;
+/* L140: */
+ }
+ i__2 = j + pvt * a_dim1;
+ r_cnjg(&q__1, &a[j + pvt * a_dim1]);
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+
+/* Swap dot products and PIV */
+
+ stemp = work[j];
+ work[j] = work[pvt];
+ work[pvt] = stemp;
+ itemp = piv[pvt];
+ piv[pvt] = piv[j];
+ piv[j] = itemp;
+ }
+
+ ajj = sqrt(ajj);
+ i__2 = j + j * a_dim1;
+ a[i__2].r = ajj, a[i__2].i = 0.f;
+
+/* Compute elements J+1:N of row J */
+
+ if (j < *n) {
+ i__2 = j - 1;
+ clacgv_(&i__2, &a[j * a_dim1 + 1], &c__1);
+ i__2 = j - 1;
+ i__3 = *n - j;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Trans", &i__2, &i__3, &q__1, &a[(j + 1) * a_dim1 + 1],
+ lda, &a[j * a_dim1 + 1], &c__1, &c_b1, &a[j + (j + 1)
+ * a_dim1], lda);
+ i__2 = j - 1;
+ clacgv_(&i__2, &a[j * a_dim1 + 1], &c__1);
+ i__2 = *n - j;
+ r__1 = 1.f / ajj;
+ csscal_(&i__2, &r__1, &a[j + (j + 1) * a_dim1], lda);
+ }
+
+/* L150: */
+ }
+
+ } else {
+
+/* Compute the Cholesky factorization P' * A * P = L * L' */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Find pivot, test for exit, else swap rows and columns */
+/* Update dot products, compute possible pivots which are */
+/* stored in the second half of WORK */
+
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+
+ if (j > 1) {
+ r_cnjg(&q__2, &a[i__ + (j - 1) * a_dim1]);
+ i__3 = i__ + (j - 1) * a_dim1;
+ q__1.r = q__2.r * a[i__3].r - q__2.i * a[i__3].i, q__1.i =
+ q__2.r * a[i__3].i + q__2.i * a[i__3].r;
+ work[i__] += q__1.r;
+ }
+ i__3 = i__ + i__ * a_dim1;
+ work[*n + i__] = a[i__3].r - work[i__];
+
+/* L160: */
+ }
+
+ if (j > 1) {
+ maxlocval = (*n << 1) - (*n + j) + 1;
+ itemp = smaxloc_(&work[*n + j], &maxlocval);
+ pvt = itemp + j - 1;
+ ajj = work[*n + pvt];
+ if (ajj <= sstop || sisnan_(&ajj)) {
+ i__2 = j + j * a_dim1;
+ a[i__2].r = ajj, a[i__2].i = 0.f;
+ goto L190;
+ }
+ }
+
+ if (j != pvt) {
+
+/* Pivot OK, so can now swap pivot rows and columns */
+
+ i__2 = pvt + pvt * a_dim1;
+ i__3 = j + j * a_dim1;
+ a[i__2].r = a[i__3].r, a[i__2].i = a[i__3].i;
+ i__2 = j - 1;
+ cswap_(&i__2, &a[j + a_dim1], lda, &a[pvt + a_dim1], lda);
+ if (pvt < *n) {
+ i__2 = *n - pvt;
+ cswap_(&i__2, &a[pvt + 1 + j * a_dim1], &c__1, &a[pvt + 1
+ + pvt * a_dim1], &c__1);
+ }
+ i__2 = pvt - 1;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ r_cnjg(&q__1, &a[i__ + j * a_dim1]);
+ ctemp.r = q__1.r, ctemp.i = q__1.i;
+ i__3 = i__ + j * a_dim1;
+ r_cnjg(&q__1, &a[pvt + i__ * a_dim1]);
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ i__3 = pvt + i__ * a_dim1;
+ a[i__3].r = ctemp.r, a[i__3].i = ctemp.i;
+/* L170: */
+ }
+ i__2 = pvt + j * a_dim1;
+ r_cnjg(&q__1, &a[pvt + j * a_dim1]);
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+
+/* Swap dot products and PIV */
+
+ stemp = work[j];
+ work[j] = work[pvt];
+ work[pvt] = stemp;
+ itemp = piv[pvt];
+ piv[pvt] = piv[j];
+ piv[j] = itemp;
+ }
+
+ ajj = sqrt(ajj);
+ i__2 = j + j * a_dim1;
+ a[i__2].r = ajj, a[i__2].i = 0.f;
+
+/* Compute elements J+1:N of column J */
+
+ if (j < *n) {
+ i__2 = j - 1;
+ clacgv_(&i__2, &a[j + a_dim1], lda);
+ i__2 = *n - j;
+ i__3 = j - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No Trans", &i__2, &i__3, &q__1, &a[j + 1 + a_dim1],
+ lda, &a[j + a_dim1], lda, &c_b1, &a[j + 1 + j *
+ a_dim1], &c__1);
+ i__2 = j - 1;
+ clacgv_(&i__2, &a[j + a_dim1], lda);
+ i__2 = *n - j;
+ r__1 = 1.f / ajj;
+ csscal_(&i__2, &r__1, &a[j + 1 + j * a_dim1], &c__1);
+ }
+
+/* L180: */
+ }
+
+ }
+
+/* Ran to completion, A has full rank */
+
+ *rank = *n;
+
+ goto L200;
+L190:
+
+/* Rank is number of steps completed. Set INFO = 1 to signal */
+/* that the factorization cannot be used to solve a system. */
+
+ *rank = j - 1;
+ *info = 1;
+
+L200:
+ return 0;
+
+/* End of CPSTF2 */
+
+} /* cpstf2_ */
diff --git a/contrib/libs/clapack/cpstrf.c b/contrib/libs/clapack/cpstrf.c
new file mode 100644
index 0000000000..aab54b91d5
--- /dev/null
+++ b/contrib/libs/clapack/cpstrf.c
@@ -0,0 +1,521 @@
+/* cpstrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static real c_b29 = -1.f;
+static real c_b30 = 1.f;
+
+/* Subroutine */ int cpstrf_(char *uplo, integer *n, complex *a, integer *lda,
+ integer *piv, integer *rank, real *tol, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, maxlocval, jb, nb;
+ real ajj;
+ integer pvt;
+ extern /* Subroutine */ int cherk_(char *, char *, integer *, integer *,
+ real *, complex *, integer *, real *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+, complex *, integer *, complex *, integer *, complex *, complex *
+, integer *);
+ complex ctemp;
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ integer itemp;
+ real stemp;
+ logical upper;
+ real sstop;
+ extern /* Subroutine */ int cpstf2_(char *, integer *, complex *, integer
+ *, integer *, integer *, real *, real *, integer *),
+ clacgv_(integer *, complex *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern logical sisnan_(real *);
+ extern integer smaxloc_(real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Craig Lucas, University of Manchester / NAG Ltd. */
+/* October, 2008 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPSTRF computes the Cholesky factorization with complete */
+/* pivoting of a complex Hermitian positive semidefinite matrix A. */
+
+/* The factorization has the form */
+/* P' * A * P = U' * U , if UPLO = 'U', */
+/* P' * A * P = L * L', if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular, and */
+/* P is stored as vector PIV. */
+
+/* This algorithm does not attempt to check that A is positive */
+/* semidefinite. This version of the algorithm calls level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n by n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization as above. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* PIV (output) INTEGER array, dimension (N) */
+/* PIV is such that the nonzero entries are P( PIV(K), K ) = 1. */
+
+/* RANK (output) INTEGER */
+/* The rank of A given by the number of steps the algorithm */
+/* completed. */
+
+/* TOL (input) REAL */
+/* User defined tolerance. If TOL < 0, then N*U*MAX( A(K,K) ) */
+/* will be used. The algorithm terminates at the (K-1)st step */
+/* if the pivot <= TOL. */
+
+/* WORK REAL array, dimension (2*N) */
+/* Work space. */
+
+/* INFO (output) INTEGER */
+/* < 0: If INFO = -K, the K-th argument had an illegal value, */
+/* = 0: algorithm completed successfully, and */
+/* > 0: the matrix A is either rank deficient with computed rank */
+/* as returned in RANK, or is indefinite. See Section 7 of */
+/* LAPACK Working Note #161 for further information. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --work;
+ --piv;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPSTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get block size */
+
+ nb = ilaenv_(&c__1, "CPOTRF", uplo, n, &c_n1, &c_n1, &c_n1);
+ if (nb <= 1 || nb >= *n) {
+
+/* Use unblocked code */
+
+ cpstf2_(uplo, n, &a[a_dim1 + 1], lda, &piv[1], rank, tol, &work[1],
+ info);
+ goto L230;
+
+ } else {
+
+/* Initialize PIV */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ piv[i__] = i__;
+/* L100: */
+ }
+
+/* Compute stopping value */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ work[i__] = a[i__2].r;
+/* L110: */
+ }
+ pvt = smaxloc_(&work[1], n);
+ i__1 = pvt + pvt * a_dim1;
+ ajj = a[i__1].r;
+ if (ajj == 0.f || sisnan_(&ajj)) {
+ *rank = 0;
+ *info = 1;
+ goto L230;
+ }
+
+/* Compute stopping value if not supplied */
+
+ if (*tol < 0.f) {
+ sstop = *n * slamch_("Epsilon") * ajj;
+ } else {
+ sstop = *tol;
+ }
+
+
+ if (upper) {
+
+/* Compute the Cholesky factorization P' * A * P = U' * U */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (k = 1; i__2 < 0 ? k >= i__1 : k <= i__1; k += i__2) {
+
+/* Account for last block not being NB wide */
+
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - k + 1;
+ jb = min(i__3,i__4);
+
+/* Set relevant part of first half of WORK to zero, */
+/* holds dot products */
+
+ i__3 = *n;
+ for (i__ = k; i__ <= i__3; ++i__) {
+ work[i__] = 0.f;
+/* L120: */
+ }
+
+ i__3 = k + jb - 1;
+ for (j = k; j <= i__3; ++j) {
+
+/* Find pivot, test for exit, else swap rows and columns */
+/* Update dot products, compute possible pivots which are */
+/* stored in the second half of WORK */
+
+ i__4 = *n;
+ for (i__ = j; i__ <= i__4; ++i__) {
+
+ if (j > k) {
+ r_cnjg(&q__2, &a[j - 1 + i__ * a_dim1]);
+ i__5 = j - 1 + i__ * a_dim1;
+ q__1.r = q__2.r * a[i__5].r - q__2.i * a[i__5].i,
+ q__1.i = q__2.r * a[i__5].i + q__2.i * a[
+ i__5].r;
+ work[i__] += q__1.r;
+ }
+ i__5 = i__ + i__ * a_dim1;
+ work[*n + i__] = a[i__5].r - work[i__];
+
+/* L130: */
+ }
+
+ if (j > 1) {
+ maxlocval = (*n << 1) - (*n + j) + 1;
+ itemp = smaxloc_(&work[*n + j], &maxlocval);
+ pvt = itemp + j - 1;
+ ajj = work[*n + pvt];
+ if (ajj <= sstop || sisnan_(&ajj)) {
+ i__4 = j + j * a_dim1;
+ a[i__4].r = ajj, a[i__4].i = 0.f;
+ goto L220;
+ }
+ }
+
+ if (j != pvt) {
+
+/* Pivot OK, so can now swap pivot rows and columns */
+
+ i__4 = pvt + pvt * a_dim1;
+ i__5 = j + j * a_dim1;
+ a[i__4].r = a[i__5].r, a[i__4].i = a[i__5].i;
+ i__4 = j - 1;
+ cswap_(&i__4, &a[j * a_dim1 + 1], &c__1, &a[pvt *
+ a_dim1 + 1], &c__1);
+ if (pvt < *n) {
+ i__4 = *n - pvt;
+ cswap_(&i__4, &a[j + (pvt + 1) * a_dim1], lda, &a[
+ pvt + (pvt + 1) * a_dim1], lda);
+ }
+ i__4 = pvt - 1;
+ for (i__ = j + 1; i__ <= i__4; ++i__) {
+ r_cnjg(&q__1, &a[j + i__ * a_dim1]);
+ ctemp.r = q__1.r, ctemp.i = q__1.i;
+ i__5 = j + i__ * a_dim1;
+ r_cnjg(&q__1, &a[i__ + pvt * a_dim1]);
+ a[i__5].r = q__1.r, a[i__5].i = q__1.i;
+ i__5 = i__ + pvt * a_dim1;
+ a[i__5].r = ctemp.r, a[i__5].i = ctemp.i;
+/* L140: */
+ }
+ i__4 = j + pvt * a_dim1;
+ r_cnjg(&q__1, &a[j + pvt * a_dim1]);
+ a[i__4].r = q__1.r, a[i__4].i = q__1.i;
+
+/* Swap dot products and PIV */
+
+ stemp = work[j];
+ work[j] = work[pvt];
+ work[pvt] = stemp;
+ itemp = piv[pvt];
+ piv[pvt] = piv[j];
+ piv[j] = itemp;
+ }
+
+ ajj = sqrt(ajj);
+ i__4 = j + j * a_dim1;
+ a[i__4].r = ajj, a[i__4].i = 0.f;
+
+/* Compute elements J+1:N of row J. */
+
+ if (j < *n) {
+ i__4 = j - 1;
+ clacgv_(&i__4, &a[j * a_dim1 + 1], &c__1);
+ i__4 = j - k;
+ i__5 = *n - j;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Trans", &i__4, &i__5, &q__1, &a[k + (j + 1) *
+ a_dim1], lda, &a[k + j * a_dim1], &c__1, &
+ c_b1, &a[j + (j + 1) * a_dim1], lda);
+ i__4 = j - 1;
+ clacgv_(&i__4, &a[j * a_dim1 + 1], &c__1);
+ i__4 = *n - j;
+ r__1 = 1.f / ajj;
+ csscal_(&i__4, &r__1, &a[j + (j + 1) * a_dim1], lda);
+ }
+
+/* L150: */
+ }
+
+/* Update trailing matrix, J already incremented */
+
+ if (k + jb <= *n) {
+ i__3 = *n - j + 1;
+ cherk_("Upper", "Conj Trans", &i__3, &jb, &c_b29, &a[k +
+ j * a_dim1], lda, &c_b30, &a[j + j * a_dim1], lda);
+ }
+
+/* L160: */
+ }
+
+ } else {
+
+/* Compute the Cholesky factorization P' * A * P = L * L' */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (k = 1; i__1 < 0 ? k >= i__2 : k <= i__2; k += i__1) {
+
+/* Account for last block not being NB wide */
+
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - k + 1;
+ jb = min(i__3,i__4);
+
+/* Set relevant part of first half of WORK to zero, */
+/* holds dot products */
+
+ i__3 = *n;
+ for (i__ = k; i__ <= i__3; ++i__) {
+ work[i__] = 0.f;
+/* L170: */
+ }
+
+ i__3 = k + jb - 1;
+ for (j = k; j <= i__3; ++j) {
+
+/* Find pivot, test for exit, else swap rows and columns */
+/* Update dot products, compute possible pivots which are */
+/* stored in the second half of WORK */
+
+ i__4 = *n;
+ for (i__ = j; i__ <= i__4; ++i__) {
+
+ if (j > k) {
+ r_cnjg(&q__2, &a[i__ + (j - 1) * a_dim1]);
+ i__5 = i__ + (j - 1) * a_dim1;
+ q__1.r = q__2.r * a[i__5].r - q__2.i * a[i__5].i,
+ q__1.i = q__2.r * a[i__5].i + q__2.i * a[
+ i__5].r;
+ work[i__] += q__1.r;
+ }
+ i__5 = i__ + i__ * a_dim1;
+ work[*n + i__] = a[i__5].r - work[i__];
+
+/* L180: */
+ }
+
+ if (j > 1) {
+ maxlocval = (*n << 1) - (*n + j) + 1;
+ itemp = smaxloc_(&work[*n + j], &maxlocval);
+ pvt = itemp + j - 1;
+ ajj = work[*n + pvt];
+ if (ajj <= sstop || sisnan_(&ajj)) {
+ i__4 = j + j * a_dim1;
+ a[i__4].r = ajj, a[i__4].i = 0.f;
+ goto L220;
+ }
+ }
+
+ if (j != pvt) {
+
+/* Pivot OK, so can now swap pivot rows and columns */
+
+ i__4 = pvt + pvt * a_dim1;
+ i__5 = j + j * a_dim1;
+ a[i__4].r = a[i__5].r, a[i__4].i = a[i__5].i;
+ i__4 = j - 1;
+ cswap_(&i__4, &a[j + a_dim1], lda, &a[pvt + a_dim1],
+ lda);
+ if (pvt < *n) {
+ i__4 = *n - pvt;
+ cswap_(&i__4, &a[pvt + 1 + j * a_dim1], &c__1, &a[
+ pvt + 1 + pvt * a_dim1], &c__1);
+ }
+ i__4 = pvt - 1;
+ for (i__ = j + 1; i__ <= i__4; ++i__) {
+ r_cnjg(&q__1, &a[i__ + j * a_dim1]);
+ ctemp.r = q__1.r, ctemp.i = q__1.i;
+ i__5 = i__ + j * a_dim1;
+ r_cnjg(&q__1, &a[pvt + i__ * a_dim1]);
+ a[i__5].r = q__1.r, a[i__5].i = q__1.i;
+ i__5 = pvt + i__ * a_dim1;
+ a[i__5].r = ctemp.r, a[i__5].i = ctemp.i;
+/* L190: */
+ }
+ i__4 = pvt + j * a_dim1;
+ r_cnjg(&q__1, &a[pvt + j * a_dim1]);
+ a[i__4].r = q__1.r, a[i__4].i = q__1.i;
+
+/* Swap dot products and PIV */
+
+ stemp = work[j];
+ work[j] = work[pvt];
+ work[pvt] = stemp;
+ itemp = piv[pvt];
+ piv[pvt] = piv[j];
+ piv[j] = itemp;
+ }
+
+ ajj = sqrt(ajj);
+ i__4 = j + j * a_dim1;
+ a[i__4].r = ajj, a[i__4].i = 0.f;
+
+/* Compute elements J+1:N of column J. */
+
+ if (j < *n) {
+ i__4 = j - 1;
+ clacgv_(&i__4, &a[j + a_dim1], lda);
+ i__4 = *n - j;
+ i__5 = j - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No Trans", &i__4, &i__5, &q__1, &a[j + 1 + k *
+ a_dim1], lda, &a[j + k * a_dim1], lda, &c_b1,
+ &a[j + 1 + j * a_dim1], &c__1);
+ i__4 = j - 1;
+ clacgv_(&i__4, &a[j + a_dim1], lda);
+ i__4 = *n - j;
+ r__1 = 1.f / ajj;
+ csscal_(&i__4, &r__1, &a[j + 1 + j * a_dim1], &c__1);
+ }
+
+/* L200: */
+ }
+
+/* Update trailing matrix, J already incremented */
+
+ if (k + jb <= *n) {
+ i__3 = *n - j + 1;
+ cherk_("Lower", "No Trans", &i__3, &jb, &c_b29, &a[j + k *
+ a_dim1], lda, &c_b30, &a[j + j * a_dim1], lda);
+ }
+
+/* L210: */
+ }
+
+ }
+ }
+
+/* Ran to completion, A has full rank */
+
+ *rank = *n;
+
+ goto L230;
+L220:
+
+/* Rank is the number of steps completed. Set INFO = 1 to signal */
+/* that the factorization cannot be used to solve a system. */
+
+ *rank = j - 1;
+ *info = 1;
+
+L230:
+ return 0;
+
+/* End of CPSTRF */
+
+} /* cpstrf_ */
diff --git a/contrib/libs/clapack/cptcon.c b/contrib/libs/clapack/cptcon.c
new file mode 100644
index 0000000000..a09b17de0b
--- /dev/null
+++ b/contrib/libs/clapack/cptcon.c
@@ -0,0 +1,186 @@
+/* cptcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cptcon_(integer *n, real *d__, complex *e, real *anorm,
+ real *rcond, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1;
+
+ /* Builtin functions */
+ double c_abs(complex *);
+
+ /* Local variables */
+ integer i__, ix;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ real ainvnm;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPTCON computes the reciprocal of the condition number (in the */
+/* 1-norm) of a complex Hermitian positive definite tridiagonal matrix */
+/* using the factorization A = L*D*L**H or A = U**H*D*U computed by */
+/* CPTTRF. */
+
+/* Norm(inv(A)) is computed by a direct method, and the reciprocal of */
+/* the condition number is computed as */
+/* RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* D (input) REAL array, dimension (N) */
+/* The n diagonal elements of the diagonal matrix D from the */
+/* factorization of A, as computed by CPTTRF. */
+
+/* E (input) COMPLEX array, dimension (N-1) */
+/* The (n-1) off-diagonal elements of the unit bidiagonal factor */
+/* U or L from the factorization of A, as computed by CPTTRF. */
+
+/* ANORM (input) REAL */
+/* The 1-norm of the original matrix A. */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is the */
+/* 1-norm of inv(A) computed in this routine. */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The method used is described in Nicholas J. Higham, "Efficient */
+/* Algorithms for Computing the Condition Number of a Tridiagonal */
+/* Matrix", SIAM J. Sci. Stat. Comput., Vol. 7, No. 1, January 1986. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments. */
+
+ /* Parameter adjustments */
+ --rwork;
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*anorm < 0.f) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPTCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.f;
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ } else if (*anorm == 0.f) {
+ return 0;
+ }
+
+/* Check that D(1:N) is positive. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] <= 0.f) {
+ return 0;
+ }
+/* L10: */
+ }
+
+/* Solve M(A) * x = e, where M(A) = (m(i,j)) is given by */
+
+/* m(i,j) = abs(A(i,j)), i = j, */
+/* m(i,j) = -abs(A(i,j)), i .ne. j, */
+
+/* and e = [ 1, 1, ..., 1 ]'. Note M(A) = M(L)*D*M(L)'. */
+
+/* Solve M(L) * x = e. */
+
+ rwork[1] = 1.f;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ rwork[i__] = rwork[i__ - 1] * c_abs(&e[i__ - 1]) + 1.f;
+/* L20: */
+ }
+
+/* Solve D * M(L)' * x = b. */
+
+ rwork[*n] /= d__[*n];
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ rwork[i__] = rwork[i__] / d__[i__] + rwork[i__ + 1] * c_abs(&e[i__]);
+/* L30: */
+ }
+
+/* Compute AINVNM = max(x(i)), 1<=i<=n. */
+
+ ix = isamax_(n, &rwork[1], &c__1);
+ ainvnm = (r__1 = rwork[ix], dabs(r__1));
+
+/* Compute the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / ainvnm / *anorm;
+ }
+
+ return 0;
+
+/* End of CPTCON */
+
+} /* cptcon_ */
diff --git a/contrib/libs/clapack/cpteqr.c b/contrib/libs/clapack/cpteqr.c
new file mode 100644
index 0000000000..82c53c6f30
--- /dev/null
+++ b/contrib/libs/clapack/cpteqr.c
@@ -0,0 +1,241 @@
+/* cpteqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__0 = 0;
+static integer c__1 = 1;
+
+/* Subroutine */ int cpteqr_(char *compz, integer *n, real *d__, real *e,
+ complex *z__, integer *ldz, real *work, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ complex c__[1] /* was [1][1] */;
+ integer i__;
+ complex vt[1] /* was [1][1] */;
+ integer nru;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int claset_(char *, integer *, integer *, complex
+ *, complex *, complex *, integer *), xerbla_(char *,
+ integer *), cbdsqr_(char *, integer *, integer *, integer
+ *, integer *, real *, real *, complex *, integer *, complex *,
+ integer *, complex *, integer *, real *, integer *);
+ integer icompz;
+ extern /* Subroutine */ int spttrf_(integer *, real *, real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPTEQR computes all eigenvalues and, optionally, eigenvectors of a */
+/* symmetric positive definite tridiagonal matrix by first factoring the */
+/* matrix using SPTTRF and then calling CBDSQR to compute the singular */
+/* values of the bidiagonal factor. */
+
+/* This routine computes the eigenvalues of the positive definite */
+/* tridiagonal matrix to high relative accuracy. This means that if the */
+/* eigenvalues range over many orders of magnitude in size, then the */
+/* small eigenvalues and corresponding eigenvectors will be computed */
+/* more accurately than, for example, with the standard QR method. */
+
+/* The eigenvectors of a full or band positive definite Hermitian matrix */
+/* can also be found if CHETRD, CHPTRD, or CHBTRD has been used to */
+/* reduce this matrix to tridiagonal form. (The reduction to */
+/* tridiagonal form, however, may preclude the possibility of obtaining */
+/* high relative accuracy in the small eigenvalues of the original */
+/* matrix, if these eigenvalues range over many orders of magnitude.) */
+
+/* Arguments */
+/* ========= */
+
+/* COMPZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only. */
+/* = 'V': Compute eigenvectors of original Hermitian */
+/* matrix also. Array Z contains the unitary matrix */
+/* used to reduce the original matrix to tridiagonal */
+/* form. */
+/* = 'I': Compute eigenvectors of tridiagonal matrix also. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the n diagonal elements of the tridiagonal matrix. */
+/* On normal exit, D contains the eigenvalues, in descending */
+/* order. */
+
+/* E (input/output) REAL array, dimension (N-1) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix. */
+/* On exit, E has been destroyed. */
+
+/* Z (input/output) COMPLEX array, dimension (LDZ, N) */
+/* On entry, if COMPZ = 'V', the unitary matrix used in the */
+/* reduction to tridiagonal form. */
+/* On exit, if COMPZ = 'V', the orthonormal eigenvectors of the */
+/* original Hermitian matrix; */
+/* if COMPZ = 'I', the orthonormal eigenvectors of the */
+/* tridiagonal matrix. */
+/* If INFO > 0 on exit, Z contains the eigenvectors associated */
+/* with only the stored eigenvalues. */
+/* If COMPZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* COMPZ = 'V' or 'I', LDZ >= max(1,N). */
+
+/* WORK (workspace) REAL array, dimension (4*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, and i is: */
+/* <= N the Cholesky factorization of the matrix could */
+/* not be performed because the i-th principal minor */
+/* was not positive definite. */
+/* > N the SVD algorithm failed to converge; */
+/* if INFO = N+i, i off-diagonal elements of the */
+/* bidiagonal factor did not converge to zero. */
+
+/* ==================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (lsame_(compz, "N")) {
+ icompz = 0;
+ } else if (lsame_(compz, "V")) {
+ icompz = 1;
+ } else if (lsame_(compz, "I")) {
+ icompz = 2;
+ } else {
+ icompz = -1;
+ }
+ if (icompz < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldz < 1 || icompz > 0 && *ldz < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPTEQR", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (icompz > 0) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1.f, z__[i__1].i = 0.f;
+ }
+ return 0;
+ }
+ if (icompz == 2) {
+ claset_("Full", n, n, &c_b1, &c_b2, &z__[z_offset], ldz);
+ }
+
+/* Call SPTTRF to factor the matrix. */
+
+ spttrf_(n, &d__[1], &e[1], info);
+ if (*info != 0) {
+ return 0;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = sqrt(d__[i__]);
+/* L10: */
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ e[i__] *= d__[i__];
+/* L20: */
+ }
+
+/* Call CBDSQR to compute the singular values/vectors of the */
+/* bidiagonal factor. */
+
+ if (icompz > 0) {
+ nru = *n;
+ } else {
+ nru = 0;
+ }
+ cbdsqr_("Lower", n, &c__0, &nru, &c__0, &d__[1], &e[1], vt, &c__1, &z__[
+ z_offset], ldz, c__, &c__1, &work[1], info);
+
+/* Square the singular values. */
+
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] *= d__[i__];
+/* L30: */
+ }
+ } else {
+ *info = *n + *info;
+ }
+
+ return 0;
+
+/* End of CPTEQR */
+
+} /* cpteqr_ */
diff --git a/contrib/libs/clapack/cptrfs.c b/contrib/libs/clapack/cptrfs.c
new file mode 100644
index 0000000000..66e9b3ccb4
--- /dev/null
+++ b/contrib/libs/clapack/cptrfs.c
@@ -0,0 +1,574 @@
+/* cptrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static complex c_b16 = {1.f,0.f};
+
+/* Subroutine */ int cptrfs_(char *uplo, integer *n, integer *nrhs, real *d__,
+ complex *e, real *df, complex *ef, complex *b, integer *ldb, complex
+ *x, integer *ldx, real *ferr, real *berr, complex *work, real *rwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6;
+ real r__1, r__2, r__3, r__4, r__5, r__6, r__7, r__8, r__9, r__10, r__11,
+ r__12;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+ void r_cnjg(complex *, complex *);
+ double c_abs(complex *);
+
+ /* Local variables */
+ integer i__, j;
+ real s;
+ complex bi, cx, dx, ex;
+ integer ix, nz;
+ real eps, safe1, safe2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int caxpy_(integer *, complex *, complex *,
+ integer *, complex *, integer *);
+ integer count;
+ logical upper;
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ real lstres;
+ extern /* Subroutine */ int cpttrs_(char *, integer *, integer *, real *,
+ complex *, complex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPTRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is Hermitian positive definite */
+/* and tridiagonal, and provides error bounds and backward error */
+/* estimates for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the superdiagonal or the subdiagonal of the */
+/* tridiagonal matrix A is stored and the form of the */
+/* factorization: */
+/* = 'U': E is the superdiagonal of A, and A = U**H*D*U; */
+/* = 'L': E is the subdiagonal of A, and A = L*D*L**H. */
+/* (The two forms are equivalent if A is real.) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* D (input) REAL array, dimension (N) */
+/* The n real diagonal elements of the tridiagonal matrix A. */
+
+/* E (input) COMPLEX array, dimension (N-1) */
+/* The (n-1) off-diagonal elements of the tridiagonal matrix A */
+/* (see UPLO). */
+
+/* DF (input) REAL array, dimension (N) */
+/* The n diagonal elements of the diagonal matrix D from */
+/* the factorization computed by CPTTRF. */
+
+/* EF (input) COMPLEX array, dimension (N-1) */
+/* The (n-1) off-diagonal elements of the unit bidiagonal */
+/* factor U or L from the factorization computed by CPTTRF */
+/* (see UPLO). */
+
+/* B (input) COMPLEX array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) COMPLEX array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by CPTTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX array, dimension (N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ --df;
+ --ef;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldx < max(1,*n)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPTRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = 4;
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.f;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X. Also compute */
+/* abs(A)*abs(x) + abs(b) for use in the backward error bound. */
+
+ if (upper) {
+ if (*n == 1) {
+ i__2 = j * b_dim1 + 1;
+ bi.r = b[i__2].r, bi.i = b[i__2].i;
+ i__2 = j * x_dim1 + 1;
+ q__1.r = d__[1] * x[i__2].r, q__1.i = d__[1] * x[i__2].i;
+ dx.r = q__1.r, dx.i = q__1.i;
+ q__1.r = bi.r - dx.r, q__1.i = bi.i - dx.i;
+ work[1].r = q__1.r, work[1].i = q__1.i;
+ rwork[1] = (r__1 = bi.r, dabs(r__1)) + (r__2 = r_imag(&bi),
+ dabs(r__2)) + ((r__3 = dx.r, dabs(r__3)) + (r__4 =
+ r_imag(&dx), dabs(r__4)));
+ } else {
+ i__2 = j * b_dim1 + 1;
+ bi.r = b[i__2].r, bi.i = b[i__2].i;
+ i__2 = j * x_dim1 + 1;
+ q__1.r = d__[1] * x[i__2].r, q__1.i = d__[1] * x[i__2].i;
+ dx.r = q__1.r, dx.i = q__1.i;
+ i__2 = j * x_dim1 + 2;
+ q__1.r = e[1].r * x[i__2].r - e[1].i * x[i__2].i, q__1.i = e[
+ 1].r * x[i__2].i + e[1].i * x[i__2].r;
+ ex.r = q__1.r, ex.i = q__1.i;
+ q__2.r = bi.r - dx.r, q__2.i = bi.i - dx.i;
+ q__1.r = q__2.r - ex.r, q__1.i = q__2.i - ex.i;
+ work[1].r = q__1.r, work[1].i = q__1.i;
+ i__2 = j * x_dim1 + 2;
+ rwork[1] = (r__1 = bi.r, dabs(r__1)) + (r__2 = r_imag(&bi),
+ dabs(r__2)) + ((r__3 = dx.r, dabs(r__3)) + (r__4 =
+ r_imag(&dx), dabs(r__4))) + ((r__5 = e[1].r, dabs(
+ r__5)) + (r__6 = r_imag(&e[1]), dabs(r__6))) * ((r__7
+ = x[i__2].r, dabs(r__7)) + (r__8 = r_imag(&x[j *
+ x_dim1 + 2]), dabs(r__8)));
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ bi.r = b[i__3].r, bi.i = b[i__3].i;
+ r_cnjg(&q__2, &e[i__ - 1]);
+ i__3 = i__ - 1 + j * x_dim1;
+ q__1.r = q__2.r * x[i__3].r - q__2.i * x[i__3].i, q__1.i =
+ q__2.r * x[i__3].i + q__2.i * x[i__3].r;
+ cx.r = q__1.r, cx.i = q__1.i;
+ i__3 = i__;
+ i__4 = i__ + j * x_dim1;
+ q__1.r = d__[i__3] * x[i__4].r, q__1.i = d__[i__3] * x[
+ i__4].i;
+ dx.r = q__1.r, dx.i = q__1.i;
+ i__3 = i__;
+ i__4 = i__ + 1 + j * x_dim1;
+ q__1.r = e[i__3].r * x[i__4].r - e[i__3].i * x[i__4].i,
+ q__1.i = e[i__3].r * x[i__4].i + e[i__3].i * x[
+ i__4].r;
+ ex.r = q__1.r, ex.i = q__1.i;
+ i__3 = i__;
+ q__3.r = bi.r - cx.r, q__3.i = bi.i - cx.i;
+ q__2.r = q__3.r - dx.r, q__2.i = q__3.i - dx.i;
+ q__1.r = q__2.r - ex.r, q__1.i = q__2.i - ex.i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+ i__3 = i__ - 1;
+ i__4 = i__ - 1 + j * x_dim1;
+ i__5 = i__;
+ i__6 = i__ + 1 + j * x_dim1;
+ rwork[i__] = (r__1 = bi.r, dabs(r__1)) + (r__2 = r_imag(&
+ bi), dabs(r__2)) + ((r__3 = e[i__3].r, dabs(r__3))
+ + (r__4 = r_imag(&e[i__ - 1]), dabs(r__4))) * ((
+ r__5 = x[i__4].r, dabs(r__5)) + (r__6 = r_imag(&x[
+ i__ - 1 + j * x_dim1]), dabs(r__6))) + ((r__7 =
+ dx.r, dabs(r__7)) + (r__8 = r_imag(&dx), dabs(
+ r__8))) + ((r__9 = e[i__5].r, dabs(r__9)) + (
+ r__10 = r_imag(&e[i__]), dabs(r__10))) * ((r__11 =
+ x[i__6].r, dabs(r__11)) + (r__12 = r_imag(&x[i__
+ + 1 + j * x_dim1]), dabs(r__12)));
+/* L30: */
+ }
+ i__2 = *n + j * b_dim1;
+ bi.r = b[i__2].r, bi.i = b[i__2].i;
+ r_cnjg(&q__2, &e[*n - 1]);
+ i__2 = *n - 1 + j * x_dim1;
+ q__1.r = q__2.r * x[i__2].r - q__2.i * x[i__2].i, q__1.i =
+ q__2.r * x[i__2].i + q__2.i * x[i__2].r;
+ cx.r = q__1.r, cx.i = q__1.i;
+ i__2 = *n;
+ i__3 = *n + j * x_dim1;
+ q__1.r = d__[i__2] * x[i__3].r, q__1.i = d__[i__2] * x[i__3]
+ .i;
+ dx.r = q__1.r, dx.i = q__1.i;
+ i__2 = *n;
+ q__2.r = bi.r - cx.r, q__2.i = bi.i - cx.i;
+ q__1.r = q__2.r - dx.r, q__1.i = q__2.i - dx.i;
+ work[i__2].r = q__1.r, work[i__2].i = q__1.i;
+ i__2 = *n - 1;
+ i__3 = *n - 1 + j * x_dim1;
+ rwork[*n] = (r__1 = bi.r, dabs(r__1)) + (r__2 = r_imag(&bi),
+ dabs(r__2)) + ((r__3 = e[i__2].r, dabs(r__3)) + (r__4
+ = r_imag(&e[*n - 1]), dabs(r__4))) * ((r__5 = x[i__3]
+ .r, dabs(r__5)) + (r__6 = r_imag(&x[*n - 1 + j *
+ x_dim1]), dabs(r__6))) + ((r__7 = dx.r, dabs(r__7)) +
+ (r__8 = r_imag(&dx), dabs(r__8)));
+ }
+ } else {
+ if (*n == 1) {
+ i__2 = j * b_dim1 + 1;
+ bi.r = b[i__2].r, bi.i = b[i__2].i;
+ i__2 = j * x_dim1 + 1;
+ q__1.r = d__[1] * x[i__2].r, q__1.i = d__[1] * x[i__2].i;
+ dx.r = q__1.r, dx.i = q__1.i;
+ q__1.r = bi.r - dx.r, q__1.i = bi.i - dx.i;
+ work[1].r = q__1.r, work[1].i = q__1.i;
+ rwork[1] = (r__1 = bi.r, dabs(r__1)) + (r__2 = r_imag(&bi),
+ dabs(r__2)) + ((r__3 = dx.r, dabs(r__3)) + (r__4 =
+ r_imag(&dx), dabs(r__4)));
+ } else {
+ i__2 = j * b_dim1 + 1;
+ bi.r = b[i__2].r, bi.i = b[i__2].i;
+ i__2 = j * x_dim1 + 1;
+ q__1.r = d__[1] * x[i__2].r, q__1.i = d__[1] * x[i__2].i;
+ dx.r = q__1.r, dx.i = q__1.i;
+ r_cnjg(&q__2, &e[1]);
+ i__2 = j * x_dim1 + 2;
+ q__1.r = q__2.r * x[i__2].r - q__2.i * x[i__2].i, q__1.i =
+ q__2.r * x[i__2].i + q__2.i * x[i__2].r;
+ ex.r = q__1.r, ex.i = q__1.i;
+ q__2.r = bi.r - dx.r, q__2.i = bi.i - dx.i;
+ q__1.r = q__2.r - ex.r, q__1.i = q__2.i - ex.i;
+ work[1].r = q__1.r, work[1].i = q__1.i;
+ i__2 = j * x_dim1 + 2;
+ rwork[1] = (r__1 = bi.r, dabs(r__1)) + (r__2 = r_imag(&bi),
+ dabs(r__2)) + ((r__3 = dx.r, dabs(r__3)) + (r__4 =
+ r_imag(&dx), dabs(r__4))) + ((r__5 = e[1].r, dabs(
+ r__5)) + (r__6 = r_imag(&e[1]), dabs(r__6))) * ((r__7
+ = x[i__2].r, dabs(r__7)) + (r__8 = r_imag(&x[j *
+ x_dim1 + 2]), dabs(r__8)));
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ bi.r = b[i__3].r, bi.i = b[i__3].i;
+ i__3 = i__ - 1;
+ i__4 = i__ - 1 + j * x_dim1;
+ q__1.r = e[i__3].r * x[i__4].r - e[i__3].i * x[i__4].i,
+ q__1.i = e[i__3].r * x[i__4].i + e[i__3].i * x[
+ i__4].r;
+ cx.r = q__1.r, cx.i = q__1.i;
+ i__3 = i__;
+ i__4 = i__ + j * x_dim1;
+ q__1.r = d__[i__3] * x[i__4].r, q__1.i = d__[i__3] * x[
+ i__4].i;
+ dx.r = q__1.r, dx.i = q__1.i;
+ r_cnjg(&q__2, &e[i__]);
+ i__3 = i__ + 1 + j * x_dim1;
+ q__1.r = q__2.r * x[i__3].r - q__2.i * x[i__3].i, q__1.i =
+ q__2.r * x[i__3].i + q__2.i * x[i__3].r;
+ ex.r = q__1.r, ex.i = q__1.i;
+ i__3 = i__;
+ q__3.r = bi.r - cx.r, q__3.i = bi.i - cx.i;
+ q__2.r = q__3.r - dx.r, q__2.i = q__3.i - dx.i;
+ q__1.r = q__2.r - ex.r, q__1.i = q__2.i - ex.i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+ i__3 = i__ - 1;
+ i__4 = i__ - 1 + j * x_dim1;
+ i__5 = i__;
+ i__6 = i__ + 1 + j * x_dim1;
+ rwork[i__] = (r__1 = bi.r, dabs(r__1)) + (r__2 = r_imag(&
+ bi), dabs(r__2)) + ((r__3 = e[i__3].r, dabs(r__3))
+ + (r__4 = r_imag(&e[i__ - 1]), dabs(r__4))) * ((
+ r__5 = x[i__4].r, dabs(r__5)) + (r__6 = r_imag(&x[
+ i__ - 1 + j * x_dim1]), dabs(r__6))) + ((r__7 =
+ dx.r, dabs(r__7)) + (r__8 = r_imag(&dx), dabs(
+ r__8))) + ((r__9 = e[i__5].r, dabs(r__9)) + (
+ r__10 = r_imag(&e[i__]), dabs(r__10))) * ((r__11 =
+ x[i__6].r, dabs(r__11)) + (r__12 = r_imag(&x[i__
+ + 1 + j * x_dim1]), dabs(r__12)));
+/* L40: */
+ }
+ i__2 = *n + j * b_dim1;
+ bi.r = b[i__2].r, bi.i = b[i__2].i;
+ i__2 = *n - 1;
+ i__3 = *n - 1 + j * x_dim1;
+ q__1.r = e[i__2].r * x[i__3].r - e[i__2].i * x[i__3].i,
+ q__1.i = e[i__2].r * x[i__3].i + e[i__2].i * x[i__3]
+ .r;
+ cx.r = q__1.r, cx.i = q__1.i;
+ i__2 = *n;
+ i__3 = *n + j * x_dim1;
+ q__1.r = d__[i__2] * x[i__3].r, q__1.i = d__[i__2] * x[i__3]
+ .i;
+ dx.r = q__1.r, dx.i = q__1.i;
+ i__2 = *n;
+ q__2.r = bi.r - cx.r, q__2.i = bi.i - cx.i;
+ q__1.r = q__2.r - dx.r, q__1.i = q__2.i - dx.i;
+ work[i__2].r = q__1.r, work[i__2].i = q__1.i;
+ i__2 = *n - 1;
+ i__3 = *n - 1 + j * x_dim1;
+ rwork[*n] = (r__1 = bi.r, dabs(r__1)) + (r__2 = r_imag(&bi),
+ dabs(r__2)) + ((r__3 = e[i__2].r, dabs(r__3)) + (r__4
+ = r_imag(&e[*n - 1]), dabs(r__4))) * ((r__5 = x[i__3]
+ .r, dabs(r__5)) + (r__6 = r_imag(&x[*n - 1 + j *
+ x_dim1]), dabs(r__6))) + ((r__7 = dx.r, dabs(r__7)) +
+ (r__8 = r_imag(&dx), dabs(r__8)));
+ }
+ }
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__3 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2))) / rwork[i__];
+ s = dmax(r__3,r__4);
+ } else {
+/* Computing MAX */
+ i__3 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = dmax(r__3,r__4);
+ }
+/* L50: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2.f <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ cpttrs_(uplo, n, &c__1, &df[1], &ef[1], &work[1], n, info);
+ caxpy_(n, &c_b16, &work[1], &c__1, &x[j * x_dim1 + 1], &c__1);
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__3 = i__;
+ rwork[i__] = (r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__];
+ } else {
+ i__3 = i__;
+ rwork[i__] = (r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__] + safe1;
+ }
+/* L60: */
+ }
+ ix = isamax_(n, &rwork[1], &c__1);
+ ferr[j] = rwork[ix];
+
+/* Estimate the norm of inv(A). */
+
+/* Solve M(A) * x = e, where M(A) = (m(i,j)) is given by */
+
+/* m(i,j) = abs(A(i,j)), i = j, */
+/* m(i,j) = -abs(A(i,j)), i .ne. j, */
+
+/* and e = [ 1, 1, ..., 1 ]'. Note M(A) = M(L)*D*M(L)'. */
+
+/* Solve M(L) * x = e. */
+
+ rwork[1] = 1.f;
+ i__2 = *n;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ rwork[i__] = rwork[i__ - 1] * c_abs(&ef[i__ - 1]) + 1.f;
+/* L70: */
+ }
+
+/* Solve D * M(L)' * x = b. */
+
+ rwork[*n] /= df[*n];
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ rwork[i__] = rwork[i__] / df[i__] + rwork[i__ + 1] * c_abs(&ef[
+ i__]);
+/* L80: */
+ }
+
+/* Compute norm(inv(A)) = max(x(i)), 1<=i<=n. */
+
+ ix = isamax_(n, &rwork[1], &c__1);
+ ferr[j] *= (r__1 = rwork[ix], dabs(r__1));
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = lstres, r__2 = c_abs(&x[i__ + j * x_dim1]);
+ lstres = dmax(r__1,r__2);
+/* L90: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L100: */
+ }
+
+ return 0;
+
+/* End of CPTRFS */
+
+} /* cptrfs_ */
diff --git a/contrib/libs/clapack/cptsv.c b/contrib/libs/clapack/cptsv.c
new file mode 100644
index 0000000000..5f9b31fdf3
--- /dev/null
+++ b/contrib/libs/clapack/cptsv.c
@@ -0,0 +1,129 @@
+/* cptsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cptsv_(integer *n, integer *nrhs, real *d__, complex *e,
+ complex *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int xerbla_(char *, integer *), cpttrf_(
+ integer *, real *, complex *, integer *), cpttrs_(char *, integer
+ *, integer *, real *, complex *, complex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPTSV computes the solution to a complex system of linear equations */
+/* A*X = B, where A is an N-by-N Hermitian positive definite tridiagonal */
+/* matrix, and X and B are N-by-NRHS matrices. */
+
+/* A is factored as A = L*D*L**H, and the factored form of A is then */
+/* used to solve the system of equations. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the n diagonal elements of the tridiagonal matrix */
+/* A. On exit, the n diagonal elements of the diagonal matrix */
+/* D from the factorization A = L*D*L**H. */
+
+/* E (input/output) COMPLEX array, dimension (N-1) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix A. On exit, the (n-1) subdiagonal elements of the */
+/* unit bidiagonal factor L from the L*D*L**H factorization of */
+/* A. E can also be regarded as the superdiagonal of the unit */
+/* bidiagonal factor U from the U**H*D*U factorization of A. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i is not */
+/* positive definite, and the solution has not been */
+/* computed. The factorization has not been completed */
+/* unless i = N. */
+
+/* ===================================================================== */
+
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*nrhs < 0) {
+ *info = -2;
+ } else if (*ldb < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPTSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the L*D*L' (or U'*D*U) factorization of A. */
+
+ cpttrf_(n, &d__[1], &e[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ cpttrs_("Lower", n, nrhs, &d__[1], &e[1], &b[b_offset], ldb, info);
+ }
+ return 0;
+
+/* End of CPTSV */
+
+} /* cptsv_ */
diff --git a/contrib/libs/clapack/cptsvx.c b/contrib/libs/clapack/cptsvx.c
new file mode 100644
index 0000000000..2e31446ab9
--- /dev/null
+++ b/contrib/libs/clapack/cptsvx.c
@@ -0,0 +1,285 @@
+/* cptsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cptsvx_(char *fact, integer *n, integer *nrhs, real *d__,
+ complex *e, real *df, complex *ef, complex *b, integer *ldb, complex
+ *x, integer *ldx, real *rcond, real *ferr, real *berr, complex *work,
+ real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ real anorm;
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), scopy_(integer *, real *, integer *, real *
+, integer *);
+ extern doublereal slamch_(char *), clanht_(char *, integer *,
+ real *, complex *);
+ logical nofact;
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), xerbla_(char *,
+ integer *), cptcon_(integer *, real *, complex *, real *,
+ real *, real *, integer *), cptrfs_(char *, integer *, integer *,
+ real *, complex *, real *, complex *, complex *, integer *,
+ complex *, integer *, real *, real *, complex *, real *, integer *
+), cpttrf_(integer *, real *, complex *, integer *),
+ cpttrs_(char *, integer *, integer *, real *, complex *, complex *
+, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPTSVX uses the factorization A = L*D*L**H to compute the solution */
+/* to a complex system of linear equations A*X = B, where A is an */
+/* N-by-N Hermitian positive definite tridiagonal matrix and X and B */
+/* are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'N', the matrix A is factored as A = L*D*L**H, where L */
+/* is a unit lower bidiagonal matrix and D is diagonal. The */
+/* factorization can also be regarded as having the form */
+/* A = U**H*D*U. */
+
+/* 2. If the leading i-by-i principal minor is not positive definite, */
+/* then the routine returns with INFO = i. Otherwise, the factored */
+/* form of A is used to estimate the condition number of the matrix */
+/* A. If the reciprocal of the condition number is less than machine */
+/* precision, INFO = N+1 is returned as a warning, but the routine */
+/* still goes on to solve for X and compute error bounds as */
+/* described below. */
+
+/* 3. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 4. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of the matrix */
+/* A is supplied on entry. */
+/* = 'F': On entry, DF and EF contain the factored form of A. */
+/* D, E, DF, and EF will not be modified. */
+/* = 'N': The matrix A will be copied to DF and EF and */
+/* factored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* D (input) REAL array, dimension (N) */
+/* The n diagonal elements of the tridiagonal matrix A. */
+
+/* E (input) COMPLEX array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of the tridiagonal matrix A. */
+
+/* DF (input or output) REAL array, dimension (N) */
+/* If FACT = 'F', then DF is an input argument and on entry */
+/* contains the n diagonal elements of the diagonal matrix D */
+/* from the L*D*L**H factorization of A. */
+/* If FACT = 'N', then DF is an output argument and on exit */
+/* contains the n diagonal elements of the diagonal matrix D */
+/* from the L*D*L**H factorization of A. */
+
+/* EF (input or output) COMPLEX array, dimension (N-1) */
+/* If FACT = 'F', then EF is an input argument and on entry */
+/* contains the (n-1) subdiagonal elements of the unit */
+/* bidiagonal factor L from the L*D*L**H factorization of A. */
+/* If FACT = 'N', then EF is an output argument and on exit */
+/* contains the (n-1) subdiagonal elements of the unit */
+/* bidiagonal factor L from the L*D*L**H factorization of A. */
+
+/* B (input) COMPLEX array, dimension (LDB,NRHS) */
+/* The N-by-NRHS right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) COMPLEX array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) REAL */
+/* The reciprocal condition number of the matrix A. If RCOND */
+/* is less than the machine precision (in particular, if */
+/* RCOND = 0), the matrix is singular to working precision. */
+/* This condition is indicated by a return code of INFO > 0. */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in any */
+/* element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX array, dimension (N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: the leading minor of order i of A is */
+/* not positive definite, so the factorization */
+/* could not be completed, and the solution has not */
+/* been computed. RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ --df;
+ --ef;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ if (! nofact && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldx < max(1,*n)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPTSVX", &i__1);
+ return 0;
+ }
+
+ if (nofact) {
+
+/* Compute the L*D*L' (or U'*D*U) factorization of A. */
+
+ scopy_(n, &d__[1], &c__1, &df[1], &c__1);
+ if (*n > 1) {
+ i__1 = *n - 1;
+ ccopy_(&i__1, &e[1], &c__1, &ef[1], &c__1);
+ }
+ cpttrf_(n, &df[1], &ef[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.f;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = clanht_("1", n, &d__[1], &e[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ cptcon_(n, &df[1], &ef[1], &anorm, rcond, &rwork[1], info);
+
+/* Compute the solution vectors X. */
+
+ clacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ cpttrs_("Lower", n, nrhs, &df[1], &ef[1], &x[x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solutions and */
+/* compute error bounds and backward error estimates for them. */
+
+ cptrfs_("Lower", n, nrhs, &d__[1], &e[1], &df[1], &ef[1], &b[b_offset],
+ ldb, &x[x_offset], ldx, &ferr[1], &berr[1], &work[1], &rwork[1],
+ info);
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < slamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of CPTSVX */
+
+} /* cptsvx_ */
diff --git a/contrib/libs/clapack/cpttrf.c b/contrib/libs/clapack/cpttrf.c
new file mode 100644
index 0000000000..29a877c4dc
--- /dev/null
+++ b/contrib/libs/clapack/cpttrf.c
@@ -0,0 +1,215 @@
+/* cpttrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cpttrf_(integer *n, real *d__, complex *e, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ real f, g;
+ integer i__, i4;
+ real eii, eir;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPTTRF computes the L*D*L' factorization of a complex Hermitian */
+/* positive definite tridiagonal matrix A. The factorization may also */
+/* be regarded as having the form A = U'*D*U. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the n diagonal elements of the tridiagonal matrix */
+/* A. On exit, the n diagonal elements of the diagonal matrix */
+/* D from the L*D*L' factorization of A. */
+
+/* E (input/output) COMPLEX array, dimension (N-1) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix A. On exit, the (n-1) subdiagonal elements of the */
+/* unit bidiagonal factor L from the L*D*L' factorization of A. */
+/* E can also be regarded as the superdiagonal of the unit */
+/* bidiagonal factor U from the U'*D*U factorization of A. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, the leading minor of order k is not */
+/* positive definite; if k < N, the factorization could not */
+/* be completed, while if k = N, the factorization was */
+/* completed, but D(N) <= 0. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ i__1 = -(*info);
+ xerbla_("CPTTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Compute the L*D*L' (or U'*D*U) factorization of A. */
+
+ i4 = (*n - 1) % 4;
+ i__1 = i4;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] <= 0.f) {
+ *info = i__;
+ goto L20;
+ }
+ i__2 = i__;
+ eir = e[i__2].r;
+ eii = r_imag(&e[i__]);
+ f = eir / d__[i__];
+ g = eii / d__[i__];
+ i__2 = i__;
+ q__1.r = f, q__1.i = g;
+ e[i__2].r = q__1.r, e[i__2].i = q__1.i;
+ d__[i__ + 1] = d__[i__ + 1] - f * eir - g * eii;
+/* L10: */
+ }
+
+ i__1 = *n - 4;
+ for (i__ = i4 + 1; i__ <= i__1; i__ += 4) {
+
+/* Drop out of the loop if d(i) <= 0: the matrix is not positive */
+/* definite. */
+
+ if (d__[i__] <= 0.f) {
+ *info = i__;
+ goto L20;
+ }
+
+/* Solve for e(i) and d(i+1). */
+
+ i__2 = i__;
+ eir = e[i__2].r;
+ eii = r_imag(&e[i__]);
+ f = eir / d__[i__];
+ g = eii / d__[i__];
+ i__2 = i__;
+ q__1.r = f, q__1.i = g;
+ e[i__2].r = q__1.r, e[i__2].i = q__1.i;
+ d__[i__ + 1] = d__[i__ + 1] - f * eir - g * eii;
+
+ if (d__[i__ + 1] <= 0.f) {
+ *info = i__ + 1;
+ goto L20;
+ }
+
+/* Solve for e(i+1) and d(i+2). */
+
+ i__2 = i__ + 1;
+ eir = e[i__2].r;
+ eii = r_imag(&e[i__ + 1]);
+ f = eir / d__[i__ + 1];
+ g = eii / d__[i__ + 1];
+ i__2 = i__ + 1;
+ q__1.r = f, q__1.i = g;
+ e[i__2].r = q__1.r, e[i__2].i = q__1.i;
+ d__[i__ + 2] = d__[i__ + 2] - f * eir - g * eii;
+
+ if (d__[i__ + 2] <= 0.f) {
+ *info = i__ + 2;
+ goto L20;
+ }
+
+/* Solve for e(i+2) and d(i+3). */
+
+ i__2 = i__ + 2;
+ eir = e[i__2].r;
+ eii = r_imag(&e[i__ + 2]);
+ f = eir / d__[i__ + 2];
+ g = eii / d__[i__ + 2];
+ i__2 = i__ + 2;
+ q__1.r = f, q__1.i = g;
+ e[i__2].r = q__1.r, e[i__2].i = q__1.i;
+ d__[i__ + 3] = d__[i__ + 3] - f * eir - g * eii;
+
+ if (d__[i__ + 3] <= 0.f) {
+ *info = i__ + 3;
+ goto L20;
+ }
+
+/* Solve for e(i+3) and d(i+4). */
+
+ i__2 = i__ + 3;
+ eir = e[i__2].r;
+ eii = r_imag(&e[i__ + 3]);
+ f = eir / d__[i__ + 3];
+ g = eii / d__[i__ + 3];
+ i__2 = i__ + 3;
+ q__1.r = f, q__1.i = g;
+ e[i__2].r = q__1.r, e[i__2].i = q__1.i;
+ d__[i__ + 4] = d__[i__ + 4] - f * eir - g * eii;
+/* L110: */
+ }
+
+/* Check d(n) for positive definiteness. */
+
+ if (d__[*n] <= 0.f) {
+ *info = *n;
+ }
+
+L20:
+ return 0;
+
+/* End of CPTTRF */
+
+} /* cpttrf_ */
diff --git a/contrib/libs/clapack/cpttrs.c b/contrib/libs/clapack/cpttrs.c
new file mode 100644
index 0000000000..fe730b7129
--- /dev/null
+++ b/contrib/libs/clapack/cpttrs.c
@@ -0,0 +1,176 @@
+/* cpttrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int cpttrs_(char *uplo, integer *n, integer *nrhs, real *d__,
+ complex *e, complex *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer j, jb, nb, iuplo;
+ logical upper;
+ extern /* Subroutine */ int cptts2_(integer *, integer *, integer *, real
+ *, complex *, complex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPTTRS solves a tridiagonal system of the form */
+/* A * X = B */
+/* using the factorization A = U'*D*U or A = L*D*L' computed by CPTTRF. */
+/* D is a diagonal matrix specified in the vector D, U (or L) is a unit */
+/* bidiagonal matrix whose superdiagonal (subdiagonal) is specified in */
+/* the vector E, and X and B are N by NRHS matrices. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies the form of the factorization and whether the */
+/* vector E is the superdiagonal of the upper bidiagonal factor */
+/* U or the subdiagonal of the lower bidiagonal factor L. */
+/* = 'U': A = U'*D*U, E is the superdiagonal of U */
+/* = 'L': A = L*D*L', E is the subdiagonal of L */
+
+/* N (input) INTEGER */
+/* The order of the tridiagonal matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* D (input) REAL array, dimension (N) */
+/* The n diagonal elements of the diagonal matrix D from the */
+/* factorization A = U'*D*U or A = L*D*L'. */
+
+/* E (input) COMPLEX array, dimension (N-1) */
+/* If UPLO = 'U', the (n-1) superdiagonal elements of the unit */
+/* bidiagonal factor U from the factorization A = U'*D*U. */
+/* If UPLO = 'L', the (n-1) subdiagonal elements of the unit */
+/* bidiagonal factor L from the factorization A = L*D*L'. */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the right hand side vectors B for the system of */
+/* linear equations. */
+/* On exit, the solution vectors, X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = *(unsigned char *)uplo == 'U' || *(unsigned char *)uplo == 'u';
+ if (! upper && ! (*(unsigned char *)uplo == 'L' || *(unsigned char *)uplo
+ == 'l')) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPTTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+/* Determine the number of right-hand sides to solve at a time. */
+
+ if (*nrhs == 1) {
+ nb = 1;
+ } else {
+/* Computing MAX */
+ i__1 = 1, i__2 = ilaenv_(&c__1, "CPTTRS", uplo, n, nrhs, &c_n1, &c_n1);
+ nb = max(i__1,i__2);
+ }
+
+/* Decode UPLO */
+
+ if (upper) {
+ iuplo = 1;
+ } else {
+ iuplo = 0;
+ }
+
+ if (nb >= *nrhs) {
+ cptts2_(&iuplo, n, nrhs, &d__[1], &e[1], &b[b_offset], ldb);
+ } else {
+ i__1 = *nrhs;
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = *nrhs - j + 1;
+ jb = min(i__3,nb);
+ cptts2_(&iuplo, n, &jb, &d__[1], &e[1], &b[j * b_dim1 + 1], ldb);
+/* L10: */
+ }
+ }
+
+ return 0;
+
+/* End of CPTTRS */
+
+} /* cpttrs_ */
diff --git a/contrib/libs/clapack/cptts2.c b/contrib/libs/clapack/cptts2.c
new file mode 100644
index 0000000000..a871d79930
--- /dev/null
+++ b/contrib/libs/clapack/cptts2.c
@@ -0,0 +1,315 @@
+/* cptts2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cptts2_(integer *iuplo, integer *n, integer *nrhs, real *
+ d__, complex *e, complex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ real r__1;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j;
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CPTTS2 solves a tridiagonal system of the form */
+/* A * X = B */
+/* using the factorization A = U'*D*U or A = L*D*L' computed by CPTTRF. */
+/* D is a diagonal matrix specified in the vector D, U (or L) is a unit */
+/* bidiagonal matrix whose superdiagonal (subdiagonal) is specified in */
+/* the vector E, and X and B are N by NRHS matrices. */
+
+/* Arguments */
+/* ========= */
+
+/* IUPLO (input) INTEGER */
+/* Specifies the form of the factorization and whether the */
+/* vector E is the superdiagonal of the upper bidiagonal factor */
+/* U or the subdiagonal of the lower bidiagonal factor L. */
+/* = 1: A = U'*D*U, E is the superdiagonal of U */
+/* = 0: A = L*D*L', E is the subdiagonal of L */
+
+/* N (input) INTEGER */
+/* The order of the tridiagonal matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* D (input) REAL array, dimension (N) */
+/* The n diagonal elements of the diagonal matrix D from the */
+/* factorization A = U'*D*U or A = L*D*L'. */
+
+/* E (input) COMPLEX array, dimension (N-1) */
+/* If IUPLO = 1, the (n-1) superdiagonal elements of the unit */
+/* bidiagonal factor U from the factorization A = U'*D*U. */
+/* If IUPLO = 0, the (n-1) subdiagonal elements of the unit */
+/* bidiagonal factor L from the factorization A = L*D*L'. */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the right hand side vectors B for the system of */
+/* linear equations. */
+/* On exit, the solution vectors, X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ if (*n <= 1) {
+ if (*n == 1) {
+ r__1 = 1.f / d__[1];
+ csscal_(nrhs, &r__1, &b[b_offset], ldb);
+ }
+ return 0;
+ }
+
+ if (*iuplo == 1) {
+
+/* Solve A * X = B using the factorization A = U'*D*U, */
+/* overwriting each right hand side vector with its solution. */
+
+ if (*nrhs <= 2) {
+ j = 1;
+L5:
+
+/* Solve U' * x = b. */
+
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ - 1 + j * b_dim1;
+ r_cnjg(&q__3, &e[i__ - 1]);
+ q__2.r = b[i__4].r * q__3.r - b[i__4].i * q__3.i, q__2.i = b[
+ i__4].r * q__3.i + b[i__4].i * q__3.r;
+ q__1.r = b[i__3].r - q__2.r, q__1.i = b[i__3].i - q__2.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L10: */
+ }
+
+/* Solve D * U * x = b. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ q__1.r = b[i__3].r / d__[i__4], q__1.i = b[i__3].i / d__[i__4]
+ ;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L20: */
+ }
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ i__1 = i__ + j * b_dim1;
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + 1 + j * b_dim1;
+ i__4 = i__;
+ q__2.r = b[i__3].r * e[i__4].r - b[i__3].i * e[i__4].i,
+ q__2.i = b[i__3].r * e[i__4].i + b[i__3].i * e[i__4]
+ .r;
+ q__1.r = b[i__2].r - q__2.r, q__1.i = b[i__2].i - q__2.i;
+ b[i__1].r = q__1.r, b[i__1].i = q__1.i;
+/* L30: */
+ }
+ if (j < *nrhs) {
+ ++j;
+ goto L5;
+ }
+ } else {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Solve U' * x = b. */
+
+ i__2 = *n;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ - 1 + j * b_dim1;
+ r_cnjg(&q__3, &e[i__ - 1]);
+ q__2.r = b[i__5].r * q__3.r - b[i__5].i * q__3.i, q__2.i =
+ b[i__5].r * q__3.i + b[i__5].i * q__3.r;
+ q__1.r = b[i__4].r - q__2.r, q__1.i = b[i__4].i - q__2.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L40: */
+ }
+
+/* Solve D * U * x = b. */
+
+ i__2 = *n + j * b_dim1;
+ i__3 = *n + j * b_dim1;
+ i__4 = *n;
+ q__1.r = b[i__3].r / d__[i__4], q__1.i = b[i__3].i / d__[i__4]
+ ;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ q__2.r = b[i__3].r / d__[i__4], q__2.i = b[i__3].i / d__[
+ i__4];
+ i__5 = i__ + 1 + j * b_dim1;
+ i__6 = i__;
+ q__3.r = b[i__5].r * e[i__6].r - b[i__5].i * e[i__6].i,
+ q__3.i = b[i__5].r * e[i__6].i + b[i__5].i * e[
+ i__6].r;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+ } else {
+
+/* Solve A * X = B using the factorization A = L*D*L', */
+/* overwriting each right hand side vector with its solution. */
+
+ if (*nrhs <= 2) {
+ j = 1;
+L65:
+
+/* Solve L * x = b. */
+
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ - 1 + j * b_dim1;
+ i__5 = i__ - 1;
+ q__2.r = b[i__4].r * e[i__5].r - b[i__4].i * e[i__5].i,
+ q__2.i = b[i__4].r * e[i__5].i + b[i__4].i * e[i__5]
+ .r;
+ q__1.r = b[i__3].r - q__2.r, q__1.i = b[i__3].i - q__2.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L70: */
+ }
+
+/* Solve D * L' * x = b. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ q__1.r = b[i__3].r / d__[i__4], q__1.i = b[i__3].i / d__[i__4]
+ ;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L80: */
+ }
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ i__1 = i__ + j * b_dim1;
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + 1 + j * b_dim1;
+ r_cnjg(&q__3, &e[i__]);
+ q__2.r = b[i__3].r * q__3.r - b[i__3].i * q__3.i, q__2.i = b[
+ i__3].r * q__3.i + b[i__3].i * q__3.r;
+ q__1.r = b[i__2].r - q__2.r, q__1.i = b[i__2].i - q__2.i;
+ b[i__1].r = q__1.r, b[i__1].i = q__1.i;
+/* L90: */
+ }
+ if (j < *nrhs) {
+ ++j;
+ goto L65;
+ }
+ } else {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Solve L * x = b. */
+
+ i__2 = *n;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ - 1 + j * b_dim1;
+ i__6 = i__ - 1;
+ q__2.r = b[i__5].r * e[i__6].r - b[i__5].i * e[i__6].i,
+ q__2.i = b[i__5].r * e[i__6].i + b[i__5].i * e[
+ i__6].r;
+ q__1.r = b[i__4].r - q__2.r, q__1.i = b[i__4].i - q__2.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L100: */
+ }
+
+/* Solve D * L' * x = b. */
+
+ i__2 = *n + j * b_dim1;
+ i__3 = *n + j * b_dim1;
+ i__4 = *n;
+ q__1.r = b[i__3].r / d__[i__4], q__1.i = b[i__3].i / d__[i__4]
+ ;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ q__2.r = b[i__3].r / d__[i__4], q__2.i = b[i__3].i / d__[
+ i__4];
+ i__5 = i__ + 1 + j * b_dim1;
+ r_cnjg(&q__4, &e[i__]);
+ q__3.r = b[i__5].r * q__4.r - b[i__5].i * q__4.i, q__3.i =
+ b[i__5].r * q__4.i + b[i__5].i * q__4.r;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L110: */
+ }
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CPTTS2 */
+
+} /* cptts2_ */
diff --git a/contrib/libs/clapack/crot.c b/contrib/libs/clapack/crot.c
new file mode 100644
index 0000000000..4ebd3f2847
--- /dev/null
+++ b/contrib/libs/clapack/crot.c
@@ -0,0 +1,155 @@
+/* crot.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int crot_(integer *n, complex *cx, integer *incx, complex *
+ cy, integer *incy, real *c__, complex *s)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, ix, iy;
+ complex stemp;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CROT applies a plane rotation, where the cos (C) is real and the */
+/* sin (S) is complex, and the vectors CX and CY are complex. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of elements in the vectors CX and CY. */
+
+/* CX (input/output) COMPLEX array, dimension (N) */
+/* On input, the vector X. */
+/* On output, CX is overwritten with C*X + S*Y. */
+
+/* INCX (input) INTEGER */
+/* The increment between successive values of CY. INCX <> 0. */
+
+/* CY (input/output) COMPLEX array, dimension (N) */
+/* On input, the vector Y. */
+/* On output, CY is overwritten with -CONJG(S)*X + C*Y. */
+
+/* INCY (input) INTEGER */
+/* The increment between successive values of CY. INCX <> 0. */
+
+/* C (input) REAL */
+/* S (input) COMPLEX */
+/* C and S define a rotation */
+/* [ C S ] */
+/* [ -conjg(S) C ] */
+/* where C*C + S*CONJG(S) = 1.0. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --cy;
+ --cx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* Code for unequal increments or equal increments not equal to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ q__2.r = *c__ * cx[i__2].r, q__2.i = *c__ * cx[i__2].i;
+ i__3 = iy;
+ q__3.r = s->r * cy[i__3].r - s->i * cy[i__3].i, q__3.i = s->r * cy[
+ i__3].i + s->i * cy[i__3].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ stemp.r = q__1.r, stemp.i = q__1.i;
+ i__2 = iy;
+ i__3 = iy;
+ q__2.r = *c__ * cy[i__3].r, q__2.i = *c__ * cy[i__3].i;
+ r_cnjg(&q__4, s);
+ i__4 = ix;
+ q__3.r = q__4.r * cx[i__4].r - q__4.i * cx[i__4].i, q__3.i = q__4.r *
+ cx[i__4].i + q__4.i * cx[i__4].r;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
+ cy[i__2].r = q__1.r, cy[i__2].i = q__1.i;
+ i__2 = ix;
+ cx[i__2].r = stemp.r, cx[i__2].i = stemp.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* Code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ q__2.r = *c__ * cx[i__2].r, q__2.i = *c__ * cx[i__2].i;
+ i__3 = i__;
+ q__3.r = s->r * cy[i__3].r - s->i * cy[i__3].i, q__3.i = s->r * cy[
+ i__3].i + s->i * cy[i__3].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ stemp.r = q__1.r, stemp.i = q__1.i;
+ i__2 = i__;
+ i__3 = i__;
+ q__2.r = *c__ * cy[i__3].r, q__2.i = *c__ * cy[i__3].i;
+ r_cnjg(&q__4, s);
+ i__4 = i__;
+ q__3.r = q__4.r * cx[i__4].r - q__4.i * cx[i__4].i, q__3.i = q__4.r *
+ cx[i__4].i + q__4.i * cx[i__4].r;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
+ cy[i__2].r = q__1.r, cy[i__2].i = q__1.i;
+ i__2 = i__;
+ cx[i__2].r = stemp.r, cx[i__2].i = stemp.i;
+/* L30: */
+ }
+ return 0;
+} /* crot_ */
diff --git a/contrib/libs/clapack/cspcon.c b/contrib/libs/clapack/cspcon.c
new file mode 100644
index 0000000000..fe65b6908c
--- /dev/null
+++ b/contrib/libs/clapack/cspcon.c
@@ -0,0 +1,195 @@
+/* cspcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cspcon_(char *uplo, integer *n, complex *ap, integer *
+ ipiv, real *anorm, real *rcond, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__, ip, kase;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical upper;
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *), xerbla_(char *, integer *);
+ real ainvnm;
+ extern /* Subroutine */ int csptrs_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSPCON estimates the reciprocal of the condition number (in the */
+/* 1-norm) of a complex symmetric packed matrix A using the */
+/* factorization A = U*D*U**T or A = L*D*L**T computed by CSPTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**T; */
+/* = 'L': Lower triangular, form is A = L*D*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) COMPLEX array, dimension (N*(N+1)/2) */
+/* The block diagonal matrix D and the multipliers used to */
+/* obtain the factor U or L as computed by CSPTRF, stored as a */
+/* packed triangular matrix. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by CSPTRF. */
+
+/* ANORM (input) REAL */
+/* The 1-norm of the original matrix A. */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --work;
+ --ipiv;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*anorm < 0.f) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CSPCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.f;
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ } else if (*anorm <= 0.f) {
+ return 0;
+ }
+
+/* Check that the diagonal matrix D is nonsingular. */
+
+ if (upper) {
+
+/* Upper triangular storage: examine D from bottom to top */
+
+ ip = *n * (*n + 1) / 2;
+ for (i__ = *n; i__ >= 1; --i__) {
+ i__1 = ip;
+ if (ipiv[i__] > 0 && (ap[i__1].r == 0.f && ap[i__1].i == 0.f)) {
+ return 0;
+ }
+ ip -= i__;
+/* L10: */
+ }
+ } else {
+
+/* Lower triangular storage: examine D from top to bottom. */
+
+ ip = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ip;
+ if (ipiv[i__] > 0 && (ap[i__2].r == 0.f && ap[i__2].i == 0.f)) {
+ return 0;
+ }
+ ip = ip + *n - i__ + 1;
+/* L20: */
+ }
+ }
+
+/* Estimate the 1-norm of the inverse. */
+
+ kase = 0;
+L30:
+ clacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+
+/* Multiply by inv(L*D*L') or inv(U*D*U'). */
+
+ csptrs_(uplo, n, &c__1, &ap[1], &ipiv[1], &work[1], n, info);
+ goto L30;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / ainvnm / *anorm;
+ }
+
+ return 0;
+
+/* End of CSPCON */
+
+} /* cspcon_ */
diff --git a/contrib/libs/clapack/cspmv.c b/contrib/libs/clapack/cspmv.c
new file mode 100644
index 0000000000..fc9a3d173b
--- /dev/null
+++ b/contrib/libs/clapack/cspmv.c
@@ -0,0 +1,428 @@
+/* cspmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cspmv_(char *uplo, integer *n, complex *alpha, complex *
+ ap, complex *x, integer *incx, complex *beta, complex *y, integer *
+ incy)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4, i__5;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Local variables */
+ integer i__, j, k, kk, ix, iy, jx, jy, kx, ky, info;
+ complex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSPMV performs the matrix-vector operation */
+
+/* y := alpha*A*x + beta*y, */
+
+/* where alpha and beta are scalars, x and y are n element vectors and */
+/* A is an n by n symmetric matrix, supplied in packed form. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO (input) CHARACTER*1 */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the matrix A is supplied in the packed */
+/* array AP as follows: */
+
+/* UPLO = 'U' or 'u' The upper triangular part of A is */
+/* supplied in AP. */
+
+/* UPLO = 'L' or 'l' The lower triangular part of A is */
+/* supplied in AP. */
+
+/* Unchanged on exit. */
+
+/* N (input) INTEGER */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA (input) COMPLEX */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* AP (input) COMPLEX array, dimension at least */
+/* ( ( N*( N + 1 ) )/2 ). */
+/* Before entry, with UPLO = 'U' or 'u', the array AP must */
+/* contain the upper triangular part of the symmetric matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 ) */
+/* and a( 2, 2 ) respectively, and so on. */
+/* Before entry, with UPLO = 'L' or 'l', the array AP must */
+/* contain the lower triangular part of the symmetric matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 ) */
+/* and a( 3, 1 ) respectively, and so on. */
+/* Unchanged on exit. */
+
+/* X (input) COMPLEX array, dimension at least */
+/* ( 1 + ( N - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the N- */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX (input) INTEGER */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* BETA (input) COMPLEX */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then Y need not be set on input. */
+/* Unchanged on exit. */
+
+/* Y (input/output) COMPLEX array, dimension at least */
+/* ( 1 + ( N - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. On exit, Y is overwritten by the updated */
+/* vector y. */
+
+/* INCY (input) INTEGER */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --y;
+ --x;
+ --ap;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 6;
+ } else if (*incy == 0) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("CSPMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || alpha->r == 0.f && alpha->i == 0.f && (beta->r == 1.f &&
+ beta->i == 0.f)) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y. */
+
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+
+/* Start the operations. In this version the elements of the array AP */
+/* are accessed sequentially with one pass through AP. */
+
+/* First form y := beta*y. */
+
+ if (beta->r != 1.f || beta->i != 0.f) {
+ if (*incy == 1) {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ y[i__2].r = 0.f, y[i__2].i = 0.f;
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ q__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ q__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ y[i__2].r = 0.f, y[i__2].i = 0.f;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = iy;
+ q__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ q__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (alpha->r == 0.f && alpha->i == 0.f) {
+ return 0;
+ }
+ kk = 1;
+ if (lsame_(uplo, "U")) {
+
+/* Form y when AP contains the upper triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ k = kk;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = k;
+ q__2.r = temp1.r * ap[i__5].r - temp1.i * ap[i__5].i,
+ q__2.i = temp1.r * ap[i__5].i + temp1.i * ap[i__5]
+ .r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ i__3 = k;
+ i__4 = i__;
+ q__2.r = ap[i__3].r * x[i__4].r - ap[i__3].i * x[i__4].i,
+ q__2.i = ap[i__3].r * x[i__4].i + ap[i__3].i * x[
+ i__4].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ ++k;
+/* L50: */
+ }
+ i__2 = j;
+ i__3 = j;
+ i__4 = kk + j - 1;
+ q__3.r = temp1.r * ap[i__4].r - temp1.i * ap[i__4].i, q__3.i =
+ temp1.r * ap[i__4].i + temp1.i * ap[i__4].r;
+ q__2.r = y[i__3].r + q__3.r, q__2.i = y[i__3].i + q__3.i;
+ q__4.r = alpha->r * temp2.r - alpha->i * temp2.i, q__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ kk += j;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ ix = kx;
+ iy = ky;
+ i__2 = kk + j - 2;
+ for (k = kk; k <= i__2; ++k) {
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = k;
+ q__2.r = temp1.r * ap[i__5].r - temp1.i * ap[i__5].i,
+ q__2.i = temp1.r * ap[i__5].i + temp1.i * ap[i__5]
+ .r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ i__3 = k;
+ i__4 = ix;
+ q__2.r = ap[i__3].r * x[i__4].r - ap[i__3].i * x[i__4].i,
+ q__2.i = ap[i__3].r * x[i__4].i + ap[i__3].i * x[
+ i__4].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ i__2 = jy;
+ i__3 = jy;
+ i__4 = kk + j - 1;
+ q__3.r = temp1.r * ap[i__4].r - temp1.i * ap[i__4].i, q__3.i =
+ temp1.r * ap[i__4].i + temp1.i * ap[i__4].r;
+ q__2.r = y[i__3].r + q__3.r, q__2.i = y[i__3].i + q__3.i;
+ q__4.r = alpha->r * temp2.r - alpha->i * temp2.i, q__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ jx += *incx;
+ jy += *incy;
+ kk += j;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y when AP contains the lower triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ i__2 = j;
+ i__3 = j;
+ i__4 = kk;
+ q__2.r = temp1.r * ap[i__4].r - temp1.i * ap[i__4].i, q__2.i =
+ temp1.r * ap[i__4].i + temp1.i * ap[i__4].r;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ k = kk + 1;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = k;
+ q__2.r = temp1.r * ap[i__5].r - temp1.i * ap[i__5].i,
+ q__2.i = temp1.r * ap[i__5].i + temp1.i * ap[i__5]
+ .r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ i__3 = k;
+ i__4 = i__;
+ q__2.r = ap[i__3].r * x[i__4].r - ap[i__3].i * x[i__4].i,
+ q__2.i = ap[i__3].r * x[i__4].i + ap[i__3].i * x[
+ i__4].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ ++k;
+/* L90: */
+ }
+ i__2 = j;
+ i__3 = j;
+ q__2.r = alpha->r * temp2.r - alpha->i * temp2.i, q__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ kk += *n - j + 1;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ i__2 = jy;
+ i__3 = jy;
+ i__4 = kk;
+ q__2.r = temp1.r * ap[i__4].r - temp1.i * ap[i__4].i, q__2.i =
+ temp1.r * ap[i__4].i + temp1.i * ap[i__4].r;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ ix = jx;
+ iy = jy;
+ i__2 = kk + *n - j;
+ for (k = kk + 1; k <= i__2; ++k) {
+ ix += *incx;
+ iy += *incy;
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = k;
+ q__2.r = temp1.r * ap[i__5].r - temp1.i * ap[i__5].i,
+ q__2.i = temp1.r * ap[i__5].i + temp1.i * ap[i__5]
+ .r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ i__3 = k;
+ i__4 = ix;
+ q__2.r = ap[i__3].r * x[i__4].r - ap[i__3].i * x[i__4].i,
+ q__2.i = ap[i__3].r * x[i__4].i + ap[i__3].i * x[
+ i__4].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+/* L110: */
+ }
+ i__2 = jy;
+ i__3 = jy;
+ q__2.r = alpha->r * temp2.r - alpha->i * temp2.i, q__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ jx += *incx;
+ jy += *incy;
+ kk += *n - j + 1;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CSPMV */
+
+} /* cspmv_ */
diff --git a/contrib/libs/clapack/cspr.c b/contrib/libs/clapack/cspr.c
new file mode 100644
index 0000000000..9b49f94b29
--- /dev/null
+++ b/contrib/libs/clapack/cspr.c
@@ -0,0 +1,339 @@
+/* cspr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cspr_(char *uplo, integer *n, complex *alpha, complex *x,
+ integer *incx, complex *ap)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4, i__5;
+ complex q__1, q__2;
+
+ /* Local variables */
+ integer i__, j, k, kk, ix, jx, kx, info;
+ complex temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSPR performs the symmetric rank 1 operation */
+
+/* A := alpha*x*conjg( x' ) + A, */
+
+/* where alpha is a complex scalar, x is an n element vector and A is an */
+/* n by n symmetric matrix, supplied in packed form. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO (input) CHARACTER*1 */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the matrix A is supplied in the packed */
+/* array AP as follows: */
+
+/* UPLO = 'U' or 'u' The upper triangular part of A is */
+/* supplied in AP. */
+
+/* UPLO = 'L' or 'l' The lower triangular part of A is */
+/* supplied in AP. */
+
+/* Unchanged on exit. */
+
+/* N (input) INTEGER */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA (input) COMPLEX */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X (input) COMPLEX array, dimension at least */
+/* ( 1 + ( N - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the N- */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX (input) INTEGER */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* AP (input/output) COMPLEX array, dimension at least */
+/* ( ( N*( N + 1 ) )/2 ). */
+/* Before entry, with UPLO = 'U' or 'u', the array AP must */
+/* contain the upper triangular part of the symmetric matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 ) */
+/* and a( 2, 2 ) respectively, and so on. On exit, the array */
+/* AP is overwritten by the upper triangular part of the */
+/* updated matrix. */
+/* Before entry, with UPLO = 'L' or 'l', the array AP must */
+/* contain the lower triangular part of the symmetric matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 ) */
+/* and a( 3, 1 ) respectively, and so on. On exit, the array */
+/* AP is overwritten by the lower triangular part of the */
+/* updated matrix. */
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set, they are assumed to be zero, and on exit they */
+/* are set to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ }
+ if (info != 0) {
+ xerbla_("CSPR ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || alpha->r == 0.f && alpha->i == 0.f) {
+ return 0;
+ }
+
+/* Set the start point in X if the increment is not unity. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of the array AP */
+/* are accessed sequentially with one pass through AP. */
+
+ kk = 1;
+ if (lsame_(uplo, "U")) {
+
+/* Form A when upper triangle is stored in AP. */
+
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ i__2 = j;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ q__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ k = kk;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = k;
+ i__4 = k;
+ i__5 = i__;
+ q__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ q__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ q__1.r = ap[i__4].r + q__2.r, q__1.i = ap[i__4].i +
+ q__2.i;
+ ap[i__3].r = q__1.r, ap[i__3].i = q__1.i;
+ ++k;
+/* L10: */
+ }
+ i__2 = kk + j - 1;
+ i__3 = kk + j - 1;
+ i__4 = j;
+ q__2.r = x[i__4].r * temp.r - x[i__4].i * temp.i, q__2.i =
+ x[i__4].r * temp.i + x[i__4].i * temp.r;
+ q__1.r = ap[i__3].r + q__2.r, q__1.i = ap[i__3].i +
+ q__2.i;
+ ap[i__2].r = q__1.r, ap[i__2].i = q__1.i;
+ } else {
+ i__2 = kk + j - 1;
+ i__3 = kk + j - 1;
+ ap[i__2].r = ap[i__3].r, ap[i__2].i = ap[i__3].i;
+ }
+ kk += j;
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ i__2 = jx;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ q__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix = kx;
+ i__2 = kk + j - 2;
+ for (k = kk; k <= i__2; ++k) {
+ i__3 = k;
+ i__4 = k;
+ i__5 = ix;
+ q__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ q__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ q__1.r = ap[i__4].r + q__2.r, q__1.i = ap[i__4].i +
+ q__2.i;
+ ap[i__3].r = q__1.r, ap[i__3].i = q__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ i__2 = kk + j - 1;
+ i__3 = kk + j - 1;
+ i__4 = jx;
+ q__2.r = x[i__4].r * temp.r - x[i__4].i * temp.i, q__2.i =
+ x[i__4].r * temp.i + x[i__4].i * temp.r;
+ q__1.r = ap[i__3].r + q__2.r, q__1.i = ap[i__3].i +
+ q__2.i;
+ ap[i__2].r = q__1.r, ap[i__2].i = q__1.i;
+ } else {
+ i__2 = kk + j - 1;
+ i__3 = kk + j - 1;
+ ap[i__2].r = ap[i__3].r, ap[i__2].i = ap[i__3].i;
+ }
+ jx += *incx;
+ kk += j;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when lower triangle is stored in AP. */
+
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ i__2 = j;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ q__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = kk;
+ i__3 = kk;
+ i__4 = j;
+ q__2.r = temp.r * x[i__4].r - temp.i * x[i__4].i, q__2.i =
+ temp.r * x[i__4].i + temp.i * x[i__4].r;
+ q__1.r = ap[i__3].r + q__2.r, q__1.i = ap[i__3].i +
+ q__2.i;
+ ap[i__2].r = q__1.r, ap[i__2].i = q__1.i;
+ k = kk + 1;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = k;
+ i__4 = k;
+ i__5 = i__;
+ q__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ q__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ q__1.r = ap[i__4].r + q__2.r, q__1.i = ap[i__4].i +
+ q__2.i;
+ ap[i__3].r = q__1.r, ap[i__3].i = q__1.i;
+ ++k;
+/* L50: */
+ }
+ } else {
+ i__2 = kk;
+ i__3 = kk;
+ ap[i__2].r = ap[i__3].r, ap[i__2].i = ap[i__3].i;
+ }
+ kk = kk + *n - j + 1;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ i__2 = jx;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ q__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = kk;
+ i__3 = kk;
+ i__4 = jx;
+ q__2.r = temp.r * x[i__4].r - temp.i * x[i__4].i, q__2.i =
+ temp.r * x[i__4].i + temp.i * x[i__4].r;
+ q__1.r = ap[i__3].r + q__2.r, q__1.i = ap[i__3].i +
+ q__2.i;
+ ap[i__2].r = q__1.r, ap[i__2].i = q__1.i;
+ ix = jx;
+ i__2 = kk + *n - j;
+ for (k = kk + 1; k <= i__2; ++k) {
+ ix += *incx;
+ i__3 = k;
+ i__4 = k;
+ i__5 = ix;
+ q__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ q__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ q__1.r = ap[i__4].r + q__2.r, q__1.i = ap[i__4].i +
+ q__2.i;
+ ap[i__3].r = q__1.r, ap[i__3].i = q__1.i;
+/* L70: */
+ }
+ } else {
+ i__2 = kk;
+ i__3 = kk;
+ ap[i__2].r = ap[i__3].r, ap[i__2].i = ap[i__3].i;
+ }
+ jx += *incx;
+ kk = kk + *n - j + 1;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CSPR */
+
+} /* cspr_ */
diff --git a/contrib/libs/clapack/csprfs.c b/contrib/libs/clapack/csprfs.c
new file mode 100644
index 0000000000..2f385f6380
--- /dev/null
+++ b/contrib/libs/clapack/csprfs.c
@@ -0,0 +1,464 @@
+/* csprfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int csprfs_(char *uplo, integer *n, integer *nrhs, complex *
+ ap, complex *afp, integer *ipiv, complex *b, integer *ldb, complex *x,
+ integer *ldx, real *ferr, real *berr, complex *work, real *rwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ real s;
+ integer ik, kk;
+ real xk;
+ integer nz;
+ real eps;
+ integer kase;
+ real safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), caxpy_(integer *, complex *, complex *,
+ integer *, complex *, integer *);
+ integer count;
+ extern /* Subroutine */ int cspmv_(char *, integer *, complex *, complex *
+, complex *, integer *, complex *, complex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real lstres;
+ extern /* Subroutine */ int csptrs_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSPRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is symmetric indefinite */
+/* and packed, and provides error bounds and backward error estimates */
+/* for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AP (input) COMPLEX array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the symmetric matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* AFP (input) COMPLEX array, dimension (N*(N+1)/2) */
+/* The factored form of the matrix A. AFP contains the block */
+/* diagonal matrix D and the multipliers used to obtain the */
+/* factor U or L from the factorization A = U*D*U**T or */
+/* A = L*D*L**T as computed by CSPTRF, stored as a packed */
+/* triangular matrix. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by CSPTRF. */
+
+/* B (input) COMPLEX array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) COMPLEX array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by CSPTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --afp;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*ldx < max(1,*n)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CSPRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.f;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X */
+
+ ccopy_(n, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cspmv_(uplo, n, &q__1, &ap[1], &x[j * x_dim1 + 1], &c__1, &c_b1, &
+ work[1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[i__] = (r__1 = b[i__3].r, dabs(r__1)) + (r__2 = r_imag(&b[
+ i__ + j * b_dim1]), dabs(r__2));
+/* L30: */
+ }
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ kk = 1;
+ if (upper) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ i__3 = k + j * x_dim1;
+ xk = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[k + j
+ * x_dim1]), dabs(r__2));
+ ik = kk;
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = ik;
+ rwork[i__] += ((r__1 = ap[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&ap[ik]), dabs(r__2))) * xk;
+ i__4 = ik;
+ i__5 = i__ + j * x_dim1;
+ s += ((r__1 = ap[i__4].r, dabs(r__1)) + (r__2 = r_imag(&
+ ap[ik]), dabs(r__2))) * ((r__3 = x[i__5].r, dabs(
+ r__3)) + (r__4 = r_imag(&x[i__ + j * x_dim1]),
+ dabs(r__4)));
+ ++ik;
+/* L40: */
+ }
+ i__3 = kk + k - 1;
+ rwork[k] = rwork[k] + ((r__1 = ap[i__3].r, dabs(r__1)) + (
+ r__2 = r_imag(&ap[kk + k - 1]), dabs(r__2))) * xk + s;
+ kk += k;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ i__3 = k + j * x_dim1;
+ xk = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[k + j
+ * x_dim1]), dabs(r__2));
+ i__3 = kk;
+ rwork[k] += ((r__1 = ap[i__3].r, dabs(r__1)) + (r__2 = r_imag(
+ &ap[kk]), dabs(r__2))) * xk;
+ ik = kk + 1;
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ i__4 = ik;
+ rwork[i__] += ((r__1 = ap[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&ap[ik]), dabs(r__2))) * xk;
+ i__4 = ik;
+ i__5 = i__ + j * x_dim1;
+ s += ((r__1 = ap[i__4].r, dabs(r__1)) + (r__2 = r_imag(&
+ ap[ik]), dabs(r__2))) * ((r__3 = x[i__5].r, dabs(
+ r__3)) + (r__4 = r_imag(&x[i__ + j * x_dim1]),
+ dabs(r__4)));
+ ++ik;
+/* L60: */
+ }
+ rwork[k] += s;
+ kk += *n - k + 1;
+/* L70: */
+ }
+ }
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__3 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2))) / rwork[i__];
+ s = dmax(r__3,r__4);
+ } else {
+/* Computing MAX */
+ i__3 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = dmax(r__3,r__4);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2.f <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ csptrs_(uplo, n, &c__1, &afp[1], &ipiv[1], &work[1], n, info);
+ caxpy_(n, &c_b1, &work[1], &c__1, &x[j * x_dim1 + 1], &c__1);
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use CLACN2 to estimate the infinity-norm of the matrix */
+/* inv(A) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__3 = i__;
+ rwork[i__] = (r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__];
+ } else {
+ i__3 = i__;
+ rwork[i__] = (r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__] + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ clacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(A'). */
+
+ csptrs_(uplo, n, &c__1, &afp[1], &ipiv[1], &work[1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ q__1.r = rwork[i__4] * work[i__5].r, q__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+/* L110: */
+ }
+ } else if (kase == 2) {
+
+/* Multiply by inv(A)*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ q__1.r = rwork[i__4] * work[i__5].r, q__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+/* L120: */
+ }
+ csptrs_(uplo, n, &c__1, &afp[1], &ipiv[1], &work[1], n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * x_dim1;
+ r__3 = lstres, r__4 = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[i__ + j * x_dim1]), dabs(r__2));
+ lstres = dmax(r__3,r__4);
+/* L130: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of CSPRFS */
+
+} /* csprfs_ */
diff --git a/contrib/libs/clapack/cspsv.c b/contrib/libs/clapack/cspsv.c
new file mode 100644
index 0000000000..fd24509b34
--- /dev/null
+++ b/contrib/libs/clapack/cspsv.c
@@ -0,0 +1,176 @@
+/* cspsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cspsv_(char *uplo, integer *n, integer *nrhs, complex *
+ ap, integer *ipiv, complex *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), csptrf_(
+ char *, integer *, complex *, integer *, integer *),
+ csptrs_(char *, integer *, integer *, complex *, integer *,
+ complex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSPSV computes the solution to a complex system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N symmetric matrix stored in packed format and X */
+/* and B are N-by-NRHS matrices. */
+
+/* The diagonal pivoting method is used to factor A as */
+/* A = U * D * U**T, if UPLO = 'U', or */
+/* A = L * D * L**T, if UPLO = 'L', */
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, D is symmetric and block diagonal with 1-by-1 */
+/* and 2-by-2 diagonal blocks. The factored form of A is then used to */
+/* solve the system of equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AP (input/output) COMPLEX array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. */
+
+/* On exit, the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**T or A = L*D*L**T as computed by CSPTRF, stored as */
+/* a packed triangular matrix in the same storage format as A. */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D, as */
+/* determined by CSPTRF. If IPIV(k) > 0, then rows and columns */
+/* k and IPIV(k) were interchanged, and D(k,k) is a 1-by-1 */
+/* diagonal block. If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, */
+/* then rows and columns k-1 and -IPIV(k) were interchanged and */
+/* D(k-1:k,k-1:k) is a 2-by-2 diagonal block. If UPLO = 'L' and */
+/* IPIV(k) = IPIV(k+1) < 0, then rows and columns k+1 and */
+/* -IPIV(k) were interchanged and D(k:k+1,k:k+1) is a 2-by-2 */
+/* diagonal block. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, so the solution could not be */
+/* computed. */
+
+/* Further Details */
+/* =============== */
+
+/* The packed storage scheme is illustrated by the following example */
+/* when N = 4, UPLO = 'U': */
+
+/* Two-dimensional storage of the symmetric matrix A: */
+
+/* a11 a12 a13 a14 */
+/* a22 a23 a24 */
+/* a33 a34 (aij = aji) */
+/* a44 */
+
+/* Packed storage of the upper triangle of A: */
+
+/* AP = [ a11, a12, a22, a13, a23, a33, a14, a24, a34, a44 ] */
+
+/* ===================================================================== */
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CSPSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the factorization A = U*D*U' or A = L*D*L'. */
+
+ csptrf_(uplo, n, &ap[1], &ipiv[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ csptrs_(uplo, n, nrhs, &ap[1], &ipiv[1], &b[b_offset], ldb, info);
+
+ }
+ return 0;
+
+/* End of CSPSV */
+
+} /* cspsv_ */
diff --git a/contrib/libs/clapack/cspsvx.c b/contrib/libs/clapack/cspsvx.c
new file mode 100644
index 0000000000..5a2d13a617
--- /dev/null
+++ b/contrib/libs/clapack/cspsvx.c
@@ -0,0 +1,323 @@
+/* cspsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cspsvx_(char *fact, char *uplo, integer *n, integer *
+ nrhs, complex *ap, complex *afp, integer *ipiv, complex *b, integer *
+ ldb, complex *x, integer *ldx, real *rcond, real *ferr, real *berr,
+ complex *work, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ real anorm;
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *);
+ extern doublereal slamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), xerbla_(char *,
+ integer *);
+ extern doublereal clansp_(char *, char *, integer *, complex *, real *);
+ extern /* Subroutine */ int cspcon_(char *, integer *, complex *, integer
+ *, real *, real *, complex *, integer *), csprfs_(char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *, complex *, integer *, real *, real *, complex *, real *
+, integer *), csptrf_(char *, integer *, complex *,
+ integer *, integer *), csptrs_(char *, integer *, integer
+ *, complex *, integer *, complex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSPSVX uses the diagonal pivoting factorization A = U*D*U**T or */
+/* A = L*D*L**T to compute the solution to a complex system of linear */
+/* equations A * X = B, where A is an N-by-N symmetric matrix stored */
+/* in packed format and X and B are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'N', the diagonal pivoting method is used to factor A as */
+/* A = U * D * U**T, if UPLO = 'U', or */
+/* A = L * D * L**T, if UPLO = 'L', */
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices and D is symmetric and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* 2. If some D(i,i)=0, so that D is exactly singular, then the routine */
+/* returns with INFO = i. Otherwise, the factored form of A is used */
+/* to estimate the condition number of the matrix A. If the */
+/* reciprocal of the condition number is less than machine precision, */
+/* INFO = N+1 is returned as a warning, but the routine still goes on */
+/* to solve for X and compute error bounds as described below. */
+
+/* 3. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 4. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of A has been */
+/* supplied on entry. */
+/* = 'F': On entry, AFP and IPIV contain the factored form */
+/* of A. AP, AFP and IPIV will not be modified. */
+/* = 'N': The matrix A will be copied to AFP and factored. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AP (input) COMPLEX array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the symmetric matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. */
+
+/* AFP (input or output) COMPLEX array, dimension (N*(N+1)/2) */
+/* If FACT = 'F', then AFP is an input argument and on entry */
+/* contains the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**T or A = L*D*L**T as computed by CSPTRF, stored as */
+/* a packed triangular matrix in the same storage format as A. */
+
+/* If FACT = 'N', then AFP is an output argument and on exit */
+/* contains the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**T or A = L*D*L**T as computed by CSPTRF, stored as */
+/* a packed triangular matrix in the same storage format as A. */
+
+/* IPIV (input or output) INTEGER array, dimension (N) */
+/* If FACT = 'F', then IPIV is an input argument and on entry */
+/* contains details of the interchanges and the block structure */
+/* of D, as determined by CSPTRF. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* If FACT = 'N', then IPIV is an output argument and on exit */
+/* contains details of the interchanges and the block structure */
+/* of D, as determined by CSPTRF. */
+
+/* B (input) COMPLEX array, dimension (LDB,NRHS) */
+/* The N-by-NRHS right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) COMPLEX array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) REAL */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A. If RCOND is less than the machine precision (in */
+/* particular, if RCOND = 0), the matrix is singular to working */
+/* precision. This condition is indicated by a return code of */
+/* INFO > 0. */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: D(i,i) is exactly zero. The factorization */
+/* has been completed but the factor D is exactly */
+/* singular, so the solution and error bounds could */
+/* not be computed. RCOND = 0 is returned. */
+/* = N+1: D is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* Further Details */
+/* =============== */
+
+/* The packed storage scheme is illustrated by the following example */
+/* when N = 4, UPLO = 'U': */
+
+/* Two-dimensional storage of the symmetric matrix A: */
+
+/* a11 a12 a13 a14 */
+/* a22 a23 a24 */
+/* a33 a34 (aij = aji) */
+/* a44 */
+
+/* Packed storage of the upper triangle of A: */
+
+/* AP = [ a11, a12, a22, a13, a23, a33, a14, a24, a34, a44 ] */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --afp;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ if (! nofact && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! lsame_(uplo, "U") && ! lsame_(uplo,
+ "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldx < max(1,*n)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CSPSVX", &i__1);
+ return 0;
+ }
+
+ if (nofact) {
+
+/* Compute the factorization A = U*D*U' or A = L*D*L'. */
+
+ i__1 = *n * (*n + 1) / 2;
+ ccopy_(&i__1, &ap[1], &c__1, &afp[1], &c__1);
+ csptrf_(uplo, n, &afp[1], &ipiv[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.f;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = clansp_("I", uplo, n, &ap[1], &rwork[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ cspcon_(uplo, n, &afp[1], &ipiv[1], &anorm, rcond, &work[1], info);
+
+/* Compute the solution vectors X. */
+
+ clacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ csptrs_(uplo, n, nrhs, &afp[1], &ipiv[1], &x[x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solutions and */
+/* compute error bounds and backward error estimates for them. */
+
+ csprfs_(uplo, n, nrhs, &ap[1], &afp[1], &ipiv[1], &b[b_offset], ldb, &x[
+ x_offset], ldx, &ferr[1], &berr[1], &work[1], &rwork[1], info);
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < slamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of CSPSVX */
+
+} /* cspsvx_ */
diff --git a/contrib/libs/clapack/csptrf.c b/contrib/libs/clapack/csptrf.c
new file mode 100644
index 0000000000..753039b077
--- /dev/null
+++ b/contrib/libs/clapack/csptrf.c
@@ -0,0 +1,763 @@
+/* csptrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int csptrf_(char *uplo, integer *n, complex *ap, integer *
+ ipiv, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4, i__5, i__6;
+ real r__1, r__2, r__3, r__4;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_imag(complex *);
+ void c_div(complex *, complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ complex t, r1, d11, d12, d21, d22;
+ integer kc, kk, kp;
+ complex wk;
+ integer kx, knc, kpc, npp;
+ complex wkm1, wkp1;
+ integer imax, jmax;
+ extern /* Subroutine */ int cspr_(char *, integer *, complex *, complex *,
+ integer *, complex *);
+ real alpha;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ integer kstep;
+ logical upper;
+ real absakk;
+ extern integer icamax_(integer *, complex *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real colmax, rowmax;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSPTRF computes the factorization of a complex symmetric matrix A */
+/* stored in packed format using the Bunch-Kaufman diagonal pivoting */
+/* method: */
+
+/* A = U*D*U**T or A = L*D*L**T */
+
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, and D is symmetric and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L, stored as a packed triangular */
+/* matrix overwriting A (see below for further details). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, and division by zero will occur if it */
+/* is used to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* 5-96 - Based on modifications by J. Lewis, Boeing Computer Services */
+/* Company */
+
+/* If UPLO = 'U', then A = U*D*U', where */
+/* U = P(n)*U(n)* ... *P(k)U(k)* ..., */
+/* i.e., U is a product of terms P(k)*U(k), where k decreases from n to */
+/* 1 in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and U(k) is a unit upper triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I v 0 ) k-s */
+/* U(k) = ( 0 I 0 ) s */
+/* ( 0 0 I ) n-k */
+/* k-s s n-k */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(1:k-1,k). */
+/* If s = 2, the upper triangle of D(k) overwrites A(k-1,k-1), A(k-1,k), */
+/* and A(k,k), and v overwrites A(1:k-2,k-1:k). */
+
+/* If UPLO = 'L', then A = L*D*L', where */
+/* L = P(1)*L(1)* ... *P(k)*L(k)* ..., */
+/* i.e., L is a product of terms P(k)*L(k), where k increases from 1 to */
+/* n in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and L(k) is a unit lower triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I 0 0 ) k-1 */
+/* L(k) = ( 0 I 0 ) s */
+/* ( 0 v I ) n-k-s+1 */
+/* k-1 s n-k-s+1 */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(k+1:n,k). */
+/* If s = 2, the lower triangle of D(k) overwrites A(k,k), A(k+1,k), */
+/* and A(k+1,k+1), and v overwrites A(k+2:n,k:k+1). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ipiv;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CSPTRF", &i__1);
+ return 0;
+ }
+
+/* Initialize ALPHA for use in choosing pivot block size. */
+
+ alpha = (sqrt(17.f) + 1.f) / 8.f;
+
+ if (upper) {
+
+/* Factorize A as U*D*U' using the upper triangle of A */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2 */
+
+ k = *n;
+ kc = (*n - 1) * *n / 2 + 1;
+L10:
+ knc = kc;
+
+/* If K < 1, exit from loop */
+
+ if (k < 1) {
+ goto L110;
+ }
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ i__1 = kc + k - 1;
+ absakk = (r__1 = ap[i__1].r, dabs(r__1)) + (r__2 = r_imag(&ap[kc + k
+ - 1]), dabs(r__2));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ imax = icamax_(&i__1, &ap[kc], &c__1);
+ i__1 = kc + imax - 1;
+ colmax = (r__1 = ap[i__1].r, dabs(r__1)) + (r__2 = r_imag(&ap[kc
+ + imax - 1]), dabs(r__2));
+ } else {
+ colmax = 0.f;
+ }
+
+ if (dmax(absakk,colmax) == 0.f) {
+
+/* Column K is zero: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ rowmax = 0.f;
+ jmax = imax;
+ kx = imax * (imax + 1) / 2 + imax;
+ i__1 = k;
+ for (j = imax + 1; j <= i__1; ++j) {
+ i__2 = kx;
+ if ((r__1 = ap[i__2].r, dabs(r__1)) + (r__2 = r_imag(&ap[
+ kx]), dabs(r__2)) > rowmax) {
+ i__2 = kx;
+ rowmax = (r__1 = ap[i__2].r, dabs(r__1)) + (r__2 =
+ r_imag(&ap[kx]), dabs(r__2));
+ jmax = j;
+ }
+ kx += j;
+/* L20: */
+ }
+ kpc = (imax - 1) * imax / 2 + 1;
+ if (imax > 1) {
+ i__1 = imax - 1;
+ jmax = icamax_(&i__1, &ap[kpc], &c__1);
+/* Computing MAX */
+ i__1 = kpc + jmax - 1;
+ r__3 = rowmax, r__4 = (r__1 = ap[i__1].r, dabs(r__1)) + (
+ r__2 = r_imag(&ap[kpc + jmax - 1]), dabs(r__2));
+ rowmax = dmax(r__3,r__4);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else /* if(complicated condition) */ {
+ i__1 = kpc + imax - 1;
+ if ((r__1 = ap[i__1].r, dabs(r__1)) + (r__2 = r_imag(&ap[
+ kpc + imax - 1]), dabs(r__2)) >= alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+ } else {
+
+/* interchange rows and columns K-1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+ }
+
+ kk = k - kstep + 1;
+ if (kstep == 2) {
+ knc = knc - k + 1;
+ }
+ if (kp != kk) {
+
+/* Interchange rows and columns KK and KP in the leading */
+/* submatrix A(1:k,1:k) */
+
+ i__1 = kp - 1;
+ cswap_(&i__1, &ap[knc], &c__1, &ap[kpc], &c__1);
+ kx = kpc + kp - 1;
+ i__1 = kk - 1;
+ for (j = kp + 1; j <= i__1; ++j) {
+ kx = kx + j - 1;
+ i__2 = knc + j - 1;
+ t.r = ap[i__2].r, t.i = ap[i__2].i;
+ i__2 = knc + j - 1;
+ i__3 = kx;
+ ap[i__2].r = ap[i__3].r, ap[i__2].i = ap[i__3].i;
+ i__2 = kx;
+ ap[i__2].r = t.r, ap[i__2].i = t.i;
+/* L30: */
+ }
+ i__1 = knc + kk - 1;
+ t.r = ap[i__1].r, t.i = ap[i__1].i;
+ i__1 = knc + kk - 1;
+ i__2 = kpc + kp - 1;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kpc + kp - 1;
+ ap[i__1].r = t.r, ap[i__1].i = t.i;
+ if (kstep == 2) {
+ i__1 = kc + k - 2;
+ t.r = ap[i__1].r, t.i = ap[i__1].i;
+ i__1 = kc + k - 2;
+ i__2 = kc + kp - 1;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kc + kp - 1;
+ ap[i__1].r = t.r, ap[i__1].i = t.i;
+ }
+ }
+
+/* Update the leading submatrix */
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k now holds */
+
+/* W(k) = U(k)*D(k) */
+
+/* where U(k) is the k-th column of U */
+
+/* Perform a rank-1 update of A(1:k-1,1:k-1) as */
+
+/* A := A - U(k)*D(k)*U(k)' = A - W(k)*1/D(k)*W(k)' */
+
+ c_div(&q__1, &c_b1, &ap[kc + k - 1]);
+ r1.r = q__1.r, r1.i = q__1.i;
+ i__1 = k - 1;
+ q__1.r = -r1.r, q__1.i = -r1.i;
+ cspr_(uplo, &i__1, &q__1, &ap[kc], &c__1, &ap[1]);
+
+/* Store U(k) in column k */
+
+ i__1 = k - 1;
+ cscal_(&i__1, &r1, &ap[kc], &c__1);
+ } else {
+
+/* 2-by-2 pivot block D(k): columns k and k-1 now hold */
+
+/* ( W(k-1) W(k) ) = ( U(k-1) U(k) )*D(k) */
+
+/* where U(k) and U(k-1) are the k-th and (k-1)-th columns */
+/* of U */
+
+/* Perform a rank-2 update of A(1:k-2,1:k-2) as */
+
+/* A := A - ( U(k-1) U(k) )*D(k)*( U(k-1) U(k) )' */
+/* = A - ( W(k-1) W(k) )*inv(D(k))*( W(k-1) W(k) )' */
+
+ if (k > 2) {
+
+ i__1 = k - 1 + (k - 1) * k / 2;
+ d12.r = ap[i__1].r, d12.i = ap[i__1].i;
+ c_div(&q__1, &ap[k - 1 + (k - 2) * (k - 1) / 2], &d12);
+ d22.r = q__1.r, d22.i = q__1.i;
+ c_div(&q__1, &ap[k + (k - 1) * k / 2], &d12);
+ d11.r = q__1.r, d11.i = q__1.i;
+ q__3.r = d11.r * d22.r - d11.i * d22.i, q__3.i = d11.r *
+ d22.i + d11.i * d22.r;
+ q__2.r = q__3.r - 1.f, q__2.i = q__3.i - 0.f;
+ c_div(&q__1, &c_b1, &q__2);
+ t.r = q__1.r, t.i = q__1.i;
+ c_div(&q__1, &t, &d12);
+ d12.r = q__1.r, d12.i = q__1.i;
+
+ for (j = k - 2; j >= 1; --j) {
+ i__1 = j + (k - 2) * (k - 1) / 2;
+ q__3.r = d11.r * ap[i__1].r - d11.i * ap[i__1].i,
+ q__3.i = d11.r * ap[i__1].i + d11.i * ap[i__1]
+ .r;
+ i__2 = j + (k - 1) * k / 2;
+ q__2.r = q__3.r - ap[i__2].r, q__2.i = q__3.i - ap[
+ i__2].i;
+ q__1.r = d12.r * q__2.r - d12.i * q__2.i, q__1.i =
+ d12.r * q__2.i + d12.i * q__2.r;
+ wkm1.r = q__1.r, wkm1.i = q__1.i;
+ i__1 = j + (k - 1) * k / 2;
+ q__3.r = d22.r * ap[i__1].r - d22.i * ap[i__1].i,
+ q__3.i = d22.r * ap[i__1].i + d22.i * ap[i__1]
+ .r;
+ i__2 = j + (k - 2) * (k - 1) / 2;
+ q__2.r = q__3.r - ap[i__2].r, q__2.i = q__3.i - ap[
+ i__2].i;
+ q__1.r = d12.r * q__2.r - d12.i * q__2.i, q__1.i =
+ d12.r * q__2.i + d12.i * q__2.r;
+ wk.r = q__1.r, wk.i = q__1.i;
+ for (i__ = j; i__ >= 1; --i__) {
+ i__1 = i__ + (j - 1) * j / 2;
+ i__2 = i__ + (j - 1) * j / 2;
+ i__3 = i__ + (k - 1) * k / 2;
+ q__3.r = ap[i__3].r * wk.r - ap[i__3].i * wk.i,
+ q__3.i = ap[i__3].r * wk.i + ap[i__3].i *
+ wk.r;
+ q__2.r = ap[i__2].r - q__3.r, q__2.i = ap[i__2].i
+ - q__3.i;
+ i__4 = i__ + (k - 2) * (k - 1) / 2;
+ q__4.r = ap[i__4].r * wkm1.r - ap[i__4].i *
+ wkm1.i, q__4.i = ap[i__4].r * wkm1.i + ap[
+ i__4].i * wkm1.r;
+ q__1.r = q__2.r - q__4.r, q__1.i = q__2.i -
+ q__4.i;
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+/* L40: */
+ }
+ i__1 = j + (k - 1) * k / 2;
+ ap[i__1].r = wk.r, ap[i__1].i = wk.i;
+ i__1 = j + (k - 2) * (k - 1) / 2;
+ ap[i__1].r = wkm1.r, ap[i__1].i = wkm1.i;
+/* L50: */
+ }
+
+ }
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k - 1] = -kp;
+ }
+
+/* Decrease K and return to the start of the main loop */
+
+ k -= kstep;
+ kc = knc - k;
+ goto L10;
+
+ } else {
+
+/* Factorize A as L*D*L' using the lower triangle of A */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2 */
+
+ k = 1;
+ kc = 1;
+ npp = *n * (*n + 1) / 2;
+L60:
+ knc = kc;
+
+/* If K > N, exit from loop */
+
+ if (k > *n) {
+ goto L110;
+ }
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ i__1 = kc;
+ absakk = (r__1 = ap[i__1].r, dabs(r__1)) + (r__2 = r_imag(&ap[kc]),
+ dabs(r__2));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ imax = k + icamax_(&i__1, &ap[kc + 1], &c__1);
+ i__1 = kc + imax - k;
+ colmax = (r__1 = ap[i__1].r, dabs(r__1)) + (r__2 = r_imag(&ap[kc
+ + imax - k]), dabs(r__2));
+ } else {
+ colmax = 0.f;
+ }
+
+ if (dmax(absakk,colmax) == 0.f) {
+
+/* Column K is zero: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ rowmax = 0.f;
+ kx = kc + imax - k;
+ i__1 = imax - 1;
+ for (j = k; j <= i__1; ++j) {
+ i__2 = kx;
+ if ((r__1 = ap[i__2].r, dabs(r__1)) + (r__2 = r_imag(&ap[
+ kx]), dabs(r__2)) > rowmax) {
+ i__2 = kx;
+ rowmax = (r__1 = ap[i__2].r, dabs(r__1)) + (r__2 =
+ r_imag(&ap[kx]), dabs(r__2));
+ jmax = j;
+ }
+ kx = kx + *n - j;
+/* L70: */
+ }
+ kpc = npp - (*n - imax + 1) * (*n - imax + 2) / 2 + 1;
+ if (imax < *n) {
+ i__1 = *n - imax;
+ jmax = imax + icamax_(&i__1, &ap[kpc + 1], &c__1);
+/* Computing MAX */
+ i__1 = kpc + jmax - imax;
+ r__3 = rowmax, r__4 = (r__1 = ap[i__1].r, dabs(r__1)) + (
+ r__2 = r_imag(&ap[kpc + jmax - imax]), dabs(r__2))
+ ;
+ rowmax = dmax(r__3,r__4);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else /* if(complicated condition) */ {
+ i__1 = kpc;
+ if ((r__1 = ap[i__1].r, dabs(r__1)) + (r__2 = r_imag(&ap[
+ kpc]), dabs(r__2)) >= alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+ } else {
+
+/* interchange rows and columns K+1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+ }
+
+ kk = k + kstep - 1;
+ if (kstep == 2) {
+ knc = knc + *n - k + 1;
+ }
+ if (kp != kk) {
+
+/* Interchange rows and columns KK and KP in the trailing */
+/* submatrix A(k:n,k:n) */
+
+ if (kp < *n) {
+ i__1 = *n - kp;
+ cswap_(&i__1, &ap[knc + kp - kk + 1], &c__1, &ap[kpc + 1],
+ &c__1);
+ }
+ kx = knc + kp - kk;
+ i__1 = kp - 1;
+ for (j = kk + 1; j <= i__1; ++j) {
+ kx = kx + *n - j + 1;
+ i__2 = knc + j - kk;
+ t.r = ap[i__2].r, t.i = ap[i__2].i;
+ i__2 = knc + j - kk;
+ i__3 = kx;
+ ap[i__2].r = ap[i__3].r, ap[i__2].i = ap[i__3].i;
+ i__2 = kx;
+ ap[i__2].r = t.r, ap[i__2].i = t.i;
+/* L80: */
+ }
+ i__1 = knc;
+ t.r = ap[i__1].r, t.i = ap[i__1].i;
+ i__1 = knc;
+ i__2 = kpc;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kpc;
+ ap[i__1].r = t.r, ap[i__1].i = t.i;
+ if (kstep == 2) {
+ i__1 = kc + 1;
+ t.r = ap[i__1].r, t.i = ap[i__1].i;
+ i__1 = kc + 1;
+ i__2 = kc + kp - k;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kc + kp - k;
+ ap[i__1].r = t.r, ap[i__1].i = t.i;
+ }
+ }
+
+/* Update the trailing submatrix */
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k now holds */
+
+/* W(k) = L(k)*D(k) */
+
+/* where L(k) is the k-th column of L */
+
+ if (k < *n) {
+
+/* Perform a rank-1 update of A(k+1:n,k+1:n) as */
+
+/* A := A - L(k)*D(k)*L(k)' = A - W(k)*(1/D(k))*W(k)' */
+
+ c_div(&q__1, &c_b1, &ap[kc]);
+ r1.r = q__1.r, r1.i = q__1.i;
+ i__1 = *n - k;
+ q__1.r = -r1.r, q__1.i = -r1.i;
+ cspr_(uplo, &i__1, &q__1, &ap[kc + 1], &c__1, &ap[kc + *n
+ - k + 1]);
+
+/* Store L(k) in column K */
+
+ i__1 = *n - k;
+ cscal_(&i__1, &r1, &ap[kc + 1], &c__1);
+ }
+ } else {
+
+/* 2-by-2 pivot block D(k): columns K and K+1 now hold */
+
+/* ( W(k) W(k+1) ) = ( L(k) L(k+1) )*D(k) */
+
+/* where L(k) and L(k+1) are the k-th and (k+1)-th columns */
+/* of L */
+
+ if (k < *n - 1) {
+
+/* Perform a rank-2 update of A(k+2:n,k+2:n) as */
+
+/* A := A - ( L(k) L(k+1) )*D(k)*( L(k) L(k+1) )' */
+/* = A - ( W(k) W(k+1) )*inv(D(k))*( W(k) W(k+1) )' */
+
+/* where L(k) and L(k+1) are the k-th and (k+1)-th */
+/* columns of L */
+
+ i__1 = k + 1 + (k - 1) * ((*n << 1) - k) / 2;
+ d21.r = ap[i__1].r, d21.i = ap[i__1].i;
+ c_div(&q__1, &ap[k + 1 + k * ((*n << 1) - k - 1) / 2], &
+ d21);
+ d11.r = q__1.r, d11.i = q__1.i;
+ c_div(&q__1, &ap[k + (k - 1) * ((*n << 1) - k) / 2], &d21)
+ ;
+ d22.r = q__1.r, d22.i = q__1.i;
+ q__3.r = d11.r * d22.r - d11.i * d22.i, q__3.i = d11.r *
+ d22.i + d11.i * d22.r;
+ q__2.r = q__3.r - 1.f, q__2.i = q__3.i - 0.f;
+ c_div(&q__1, &c_b1, &q__2);
+ t.r = q__1.r, t.i = q__1.i;
+ c_div(&q__1, &t, &d21);
+ d21.r = q__1.r, d21.i = q__1.i;
+
+ i__1 = *n;
+ for (j = k + 2; j <= i__1; ++j) {
+ i__2 = j + (k - 1) * ((*n << 1) - k) / 2;
+ q__3.r = d11.r * ap[i__2].r - d11.i * ap[i__2].i,
+ q__3.i = d11.r * ap[i__2].i + d11.i * ap[i__2]
+ .r;
+ i__3 = j + k * ((*n << 1) - k - 1) / 2;
+ q__2.r = q__3.r - ap[i__3].r, q__2.i = q__3.i - ap[
+ i__3].i;
+ q__1.r = d21.r * q__2.r - d21.i * q__2.i, q__1.i =
+ d21.r * q__2.i + d21.i * q__2.r;
+ wk.r = q__1.r, wk.i = q__1.i;
+ i__2 = j + k * ((*n << 1) - k - 1) / 2;
+ q__3.r = d22.r * ap[i__2].r - d22.i * ap[i__2].i,
+ q__3.i = d22.r * ap[i__2].i + d22.i * ap[i__2]
+ .r;
+ i__3 = j + (k - 1) * ((*n << 1) - k) / 2;
+ q__2.r = q__3.r - ap[i__3].r, q__2.i = q__3.i - ap[
+ i__3].i;
+ q__1.r = d21.r * q__2.r - d21.i * q__2.i, q__1.i =
+ d21.r * q__2.i + d21.i * q__2.r;
+ wkp1.r = q__1.r, wkp1.i = q__1.i;
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + (j - 1) * ((*n << 1) - j) / 2;
+ i__4 = i__ + (j - 1) * ((*n << 1) - j) / 2;
+ i__5 = i__ + (k - 1) * ((*n << 1) - k) / 2;
+ q__3.r = ap[i__5].r * wk.r - ap[i__5].i * wk.i,
+ q__3.i = ap[i__5].r * wk.i + ap[i__5].i *
+ wk.r;
+ q__2.r = ap[i__4].r - q__3.r, q__2.i = ap[i__4].i
+ - q__3.i;
+ i__6 = i__ + k * ((*n << 1) - k - 1) / 2;
+ q__4.r = ap[i__6].r * wkp1.r - ap[i__6].i *
+ wkp1.i, q__4.i = ap[i__6].r * wkp1.i + ap[
+ i__6].i * wkp1.r;
+ q__1.r = q__2.r - q__4.r, q__1.i = q__2.i -
+ q__4.i;
+ ap[i__3].r = q__1.r, ap[i__3].i = q__1.i;
+/* L90: */
+ }
+ i__2 = j + (k - 1) * ((*n << 1) - k) / 2;
+ ap[i__2].r = wk.r, ap[i__2].i = wk.i;
+ i__2 = j + k * ((*n << 1) - k - 1) / 2;
+ ap[i__2].r = wkp1.r, ap[i__2].i = wkp1.i;
+/* L100: */
+ }
+ }
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k + 1] = -kp;
+ }
+
+/* Increase K and return to the start of the main loop */
+
+ k += kstep;
+ kc = knc + *n - k + 2;
+ goto L60;
+
+ }
+
+L110:
+ return 0;
+
+/* End of CSPTRF */
+
+} /* csptrf_ */
diff --git a/contrib/libs/clapack/csptri.c b/contrib/libs/clapack/csptri.c
new file mode 100644
index 0000000000..67dc455363
--- /dev/null
+++ b/contrib/libs/clapack/csptri.c
@@ -0,0 +1,508 @@
+/* csptri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static complex c_b2 = {0.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int csptri_(char *uplo, integer *n, complex *ap, integer *
+ ipiv, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void c_div(complex *, complex *, complex *);
+
+ /* Local variables */
+ complex d__;
+ integer j, k;
+ complex t, ak;
+ integer kc, kp, kx, kpc, npp;
+ complex akp1, temp, akkp1;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *);
+ extern /* Complex */ VOID cdotu_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ integer kstep;
+ extern /* Subroutine */ int cspmv_(char *, integer *, complex *, complex *
+, complex *, integer *, complex *, complex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ integer kcnext;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSPTRI computes the inverse of a complex symmetric indefinite matrix */
+/* A in packed storage using the factorization A = U*D*U**T or */
+/* A = L*D*L**T computed by CSPTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**T; */
+/* = 'L': Lower triangular, form is A = L*D*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX array, dimension (N*(N+1)/2) */
+/* On entry, the block diagonal matrix D and the multipliers */
+/* used to obtain the factor U or L as computed by CSPTRF, */
+/* stored as a packed triangular matrix. */
+
+/* On exit, if INFO = 0, the (symmetric) inverse of the original */
+/* matrix, stored as a packed triangular matrix. The j-th column */
+/* of inv(A) is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = inv(A)(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', */
+/* AP(i + (j-1)*(2n-j)/2) = inv(A)(i,j) for j<=i<=n. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by CSPTRF. */
+
+/* WORK (workspace) COMPLEX array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) = 0; the matrix is singular and its */
+/* inverse could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --work;
+ --ipiv;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CSPTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check that the diagonal matrix D is nonsingular. */
+
+ if (upper) {
+
+/* Upper triangular storage: examine D from bottom to top */
+
+ kp = *n * (*n + 1) / 2;
+ for (*info = *n; *info >= 1; --(*info)) {
+ i__1 = kp;
+ if (ipiv[*info] > 0 && (ap[i__1].r == 0.f && ap[i__1].i == 0.f)) {
+ return 0;
+ }
+ kp -= *info;
+/* L10: */
+ }
+ } else {
+
+/* Lower triangular storage: examine D from top to bottom. */
+
+ kp = 1;
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ i__2 = kp;
+ if (ipiv[*info] > 0 && (ap[i__2].r == 0.f && ap[i__2].i == 0.f)) {
+ return 0;
+ }
+ kp = kp + *n - *info + 1;
+/* L20: */
+ }
+ }
+ *info = 0;
+
+ if (upper) {
+
+/* Compute inv(A) from the factorization A = U*D*U'. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+ kc = 1;
+L30:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L50;
+ }
+
+ kcnext = kc + k;
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Invert the diagonal block. */
+
+ i__1 = kc + k - 1;
+ c_div(&q__1, &c_b1, &ap[kc + k - 1]);
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+
+/* Compute column K of the inverse. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ ccopy_(&i__1, &ap[kc], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cspmv_(uplo, &i__1, &q__1, &ap[1], &work[1], &c__1, &c_b2, &
+ ap[kc], &c__1);
+ i__1 = kc + k - 1;
+ i__2 = kc + k - 1;
+ i__3 = k - 1;
+ cdotu_(&q__2, &i__3, &work[1], &c__1, &ap[kc], &c__1);
+ q__1.r = ap[i__2].r - q__2.r, q__1.i = ap[i__2].i - q__2.i;
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+ }
+ kstep = 1;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Invert the diagonal block. */
+
+ i__1 = kcnext + k - 1;
+ t.r = ap[i__1].r, t.i = ap[i__1].i;
+ c_div(&q__1, &ap[kc + k - 1], &t);
+ ak.r = q__1.r, ak.i = q__1.i;
+ c_div(&q__1, &ap[kcnext + k], &t);
+ akp1.r = q__1.r, akp1.i = q__1.i;
+ c_div(&q__1, &ap[kcnext + k - 1], &t);
+ akkp1.r = q__1.r, akkp1.i = q__1.i;
+ q__3.r = ak.r * akp1.r - ak.i * akp1.i, q__3.i = ak.r * akp1.i +
+ ak.i * akp1.r;
+ q__2.r = q__3.r - 1.f, q__2.i = q__3.i - 0.f;
+ q__1.r = t.r * q__2.r - t.i * q__2.i, q__1.i = t.r * q__2.i + t.i
+ * q__2.r;
+ d__.r = q__1.r, d__.i = q__1.i;
+ i__1 = kc + k - 1;
+ c_div(&q__1, &akp1, &d__);
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+ i__1 = kcnext + k;
+ c_div(&q__1, &ak, &d__);
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+ i__1 = kcnext + k - 1;
+ q__2.r = -akkp1.r, q__2.i = -akkp1.i;
+ c_div(&q__1, &q__2, &d__);
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+
+/* Compute columns K and K+1 of the inverse. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ ccopy_(&i__1, &ap[kc], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cspmv_(uplo, &i__1, &q__1, &ap[1], &work[1], &c__1, &c_b2, &
+ ap[kc], &c__1);
+ i__1 = kc + k - 1;
+ i__2 = kc + k - 1;
+ i__3 = k - 1;
+ cdotu_(&q__2, &i__3, &work[1], &c__1, &ap[kc], &c__1);
+ q__1.r = ap[i__2].r - q__2.r, q__1.i = ap[i__2].i - q__2.i;
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+ i__1 = kcnext + k - 1;
+ i__2 = kcnext + k - 1;
+ i__3 = k - 1;
+ cdotu_(&q__2, &i__3, &ap[kc], &c__1, &ap[kcnext], &c__1);
+ q__1.r = ap[i__2].r - q__2.r, q__1.i = ap[i__2].i - q__2.i;
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+ i__1 = k - 1;
+ ccopy_(&i__1, &ap[kcnext], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cspmv_(uplo, &i__1, &q__1, &ap[1], &work[1], &c__1, &c_b2, &
+ ap[kcnext], &c__1);
+ i__1 = kcnext + k;
+ i__2 = kcnext + k;
+ i__3 = k - 1;
+ cdotu_(&q__2, &i__3, &work[1], &c__1, &ap[kcnext], &c__1);
+ q__1.r = ap[i__2].r - q__2.r, q__1.i = ap[i__2].i - q__2.i;
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+ }
+ kstep = 2;
+ kcnext = kcnext + k + 1;
+ }
+
+ kp = (i__1 = ipiv[k], abs(i__1));
+ if (kp != k) {
+
+/* Interchange rows and columns K and KP in the leading */
+/* submatrix A(1:k+1,1:k+1) */
+
+ kpc = (kp - 1) * kp / 2 + 1;
+ i__1 = kp - 1;
+ cswap_(&i__1, &ap[kc], &c__1, &ap[kpc], &c__1);
+ kx = kpc + kp - 1;
+ i__1 = k - 1;
+ for (j = kp + 1; j <= i__1; ++j) {
+ kx = kx + j - 1;
+ i__2 = kc + j - 1;
+ temp.r = ap[i__2].r, temp.i = ap[i__2].i;
+ i__2 = kc + j - 1;
+ i__3 = kx;
+ ap[i__2].r = ap[i__3].r, ap[i__2].i = ap[i__3].i;
+ i__2 = kx;
+ ap[i__2].r = temp.r, ap[i__2].i = temp.i;
+/* L40: */
+ }
+ i__1 = kc + k - 1;
+ temp.r = ap[i__1].r, temp.i = ap[i__1].i;
+ i__1 = kc + k - 1;
+ i__2 = kpc + kp - 1;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kpc + kp - 1;
+ ap[i__1].r = temp.r, ap[i__1].i = temp.i;
+ if (kstep == 2) {
+ i__1 = kc + k + k - 1;
+ temp.r = ap[i__1].r, temp.i = ap[i__1].i;
+ i__1 = kc + k + k - 1;
+ i__2 = kc + k + kp - 1;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kc + k + kp - 1;
+ ap[i__1].r = temp.r, ap[i__1].i = temp.i;
+ }
+ }
+
+ k += kstep;
+ kc = kcnext;
+ goto L30;
+L50:
+
+ ;
+ } else {
+
+/* Compute inv(A) from the factorization A = L*D*L'. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ npp = *n * (*n + 1) / 2;
+ k = *n;
+ kc = npp;
+L60:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L80;
+ }
+
+ kcnext = kc - (*n - k + 2);
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Invert the diagonal block. */
+
+ i__1 = kc;
+ c_div(&q__1, &c_b1, &ap[kc]);
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+
+/* Compute column K of the inverse. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ ccopy_(&i__1, &ap[kc + 1], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cspmv_(uplo, &i__1, &q__1, &ap[kc + *n - k + 1], &work[1], &
+ c__1, &c_b2, &ap[kc + 1], &c__1);
+ i__1 = kc;
+ i__2 = kc;
+ i__3 = *n - k;
+ cdotu_(&q__2, &i__3, &work[1], &c__1, &ap[kc + 1], &c__1);
+ q__1.r = ap[i__2].r - q__2.r, q__1.i = ap[i__2].i - q__2.i;
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+ }
+ kstep = 1;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Invert the diagonal block. */
+
+ i__1 = kcnext + 1;
+ t.r = ap[i__1].r, t.i = ap[i__1].i;
+ c_div(&q__1, &ap[kcnext], &t);
+ ak.r = q__1.r, ak.i = q__1.i;
+ c_div(&q__1, &ap[kc], &t);
+ akp1.r = q__1.r, akp1.i = q__1.i;
+ c_div(&q__1, &ap[kcnext + 1], &t);
+ akkp1.r = q__1.r, akkp1.i = q__1.i;
+ q__3.r = ak.r * akp1.r - ak.i * akp1.i, q__3.i = ak.r * akp1.i +
+ ak.i * akp1.r;
+ q__2.r = q__3.r - 1.f, q__2.i = q__3.i - 0.f;
+ q__1.r = t.r * q__2.r - t.i * q__2.i, q__1.i = t.r * q__2.i + t.i
+ * q__2.r;
+ d__.r = q__1.r, d__.i = q__1.i;
+ i__1 = kcnext;
+ c_div(&q__1, &akp1, &d__);
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+ i__1 = kc;
+ c_div(&q__1, &ak, &d__);
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+ i__1 = kcnext + 1;
+ q__2.r = -akkp1.r, q__2.i = -akkp1.i;
+ c_div(&q__1, &q__2, &d__);
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+
+/* Compute columns K-1 and K of the inverse. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ ccopy_(&i__1, &ap[kc + 1], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cspmv_(uplo, &i__1, &q__1, &ap[kc + (*n - k + 1)], &work[1], &
+ c__1, &c_b2, &ap[kc + 1], &c__1);
+ i__1 = kc;
+ i__2 = kc;
+ i__3 = *n - k;
+ cdotu_(&q__2, &i__3, &work[1], &c__1, &ap[kc + 1], &c__1);
+ q__1.r = ap[i__2].r - q__2.r, q__1.i = ap[i__2].i - q__2.i;
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+ i__1 = kcnext + 1;
+ i__2 = kcnext + 1;
+ i__3 = *n - k;
+ cdotu_(&q__2, &i__3, &ap[kc + 1], &c__1, &ap[kcnext + 2], &
+ c__1);
+ q__1.r = ap[i__2].r - q__2.r, q__1.i = ap[i__2].i - q__2.i;
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+ i__1 = *n - k;
+ ccopy_(&i__1, &ap[kcnext + 2], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cspmv_(uplo, &i__1, &q__1, &ap[kc + (*n - k + 1)], &work[1], &
+ c__1, &c_b2, &ap[kcnext + 2], &c__1);
+ i__1 = kcnext;
+ i__2 = kcnext;
+ i__3 = *n - k;
+ cdotu_(&q__2, &i__3, &work[1], &c__1, &ap[kcnext + 2], &c__1);
+ q__1.r = ap[i__2].r - q__2.r, q__1.i = ap[i__2].i - q__2.i;
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+ }
+ kstep = 2;
+ kcnext -= *n - k + 3;
+ }
+
+ kp = (i__1 = ipiv[k], abs(i__1));
+ if (kp != k) {
+
+/* Interchange rows and columns K and KP in the trailing */
+/* submatrix A(k-1:n,k-1:n) */
+
+ kpc = npp - (*n - kp + 1) * (*n - kp + 2) / 2 + 1;
+ if (kp < *n) {
+ i__1 = *n - kp;
+ cswap_(&i__1, &ap[kc + kp - k + 1], &c__1, &ap[kpc + 1], &
+ c__1);
+ }
+ kx = kc + kp - k;
+ i__1 = kp - 1;
+ for (j = k + 1; j <= i__1; ++j) {
+ kx = kx + *n - j + 1;
+ i__2 = kc + j - k;
+ temp.r = ap[i__2].r, temp.i = ap[i__2].i;
+ i__2 = kc + j - k;
+ i__3 = kx;
+ ap[i__2].r = ap[i__3].r, ap[i__2].i = ap[i__3].i;
+ i__2 = kx;
+ ap[i__2].r = temp.r, ap[i__2].i = temp.i;
+/* L70: */
+ }
+ i__1 = kc;
+ temp.r = ap[i__1].r, temp.i = ap[i__1].i;
+ i__1 = kc;
+ i__2 = kpc;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kpc;
+ ap[i__1].r = temp.r, ap[i__1].i = temp.i;
+ if (kstep == 2) {
+ i__1 = kc - *n + k - 1;
+ temp.r = ap[i__1].r, temp.i = ap[i__1].i;
+ i__1 = kc - *n + k - 1;
+ i__2 = kc - *n + kp - 1;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kc - *n + kp - 1;
+ ap[i__1].r = temp.r, ap[i__1].i = temp.i;
+ }
+ }
+
+ k -= kstep;
+ kc = kcnext;
+ goto L60;
+L80:
+ ;
+ }
+
+ return 0;
+
+/* End of CSPTRI */
+
+} /* csptri_ */
diff --git a/contrib/libs/clapack/csptrs.c b/contrib/libs/clapack/csptrs.c
new file mode 100644
index 0000000000..d976b8c0df
--- /dev/null
+++ b/contrib/libs/clapack/csptrs.c
@@ -0,0 +1,502 @@
+/* csptrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int csptrs_(char *uplo, integer *n, integer *nrhs, complex *
+ ap, integer *ipiv, complex *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void c_div(complex *, complex *, complex *);
+
+ /* Local variables */
+ integer j, k;
+ complex ak, bk;
+ integer kc, kp;
+ complex akm1, bkm1, akm1k;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ complex denom;
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+, complex *, integer *, complex *, integer *, complex *, complex *
+, integer *), cgeru_(integer *, integer *, complex *,
+ complex *, integer *, complex *, integer *, complex *, integer *),
+ cswap_(integer *, complex *, integer *, complex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSPTRS solves a system of linear equations A*X = B with a complex */
+/* symmetric matrix A stored in packed format using the factorization */
+/* A = U*D*U**T or A = L*D*L**T computed by CSPTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**T; */
+/* = 'L': Lower triangular, form is A = L*D*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AP (input) COMPLEX array, dimension (N*(N+1)/2) */
+/* The block diagonal matrix D and the multipliers used to */
+/* obtain the factor U or L as computed by CSPTRF, stored as a */
+/* packed triangular matrix. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by CSPTRF. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --ap;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CSPTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Solve A*X = B, where A = U*D*U'. */
+
+/* First solve U*D*X = B, overwriting B with X. */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+ kc = *n * (*n + 1) / 2 + 1;
+L10:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L30;
+ }
+
+ kc -= k;
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(U(K)), where U(K) is the transformation */
+/* stored in column K of A. */
+
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__1, nrhs, &q__1, &ap[kc], &c__1, &b[k + b_dim1], ldb, &
+ b[b_dim1 + 1], ldb);
+
+/* Multiply by the inverse of the diagonal block. */
+
+ c_div(&q__1, &c_b1, &ap[kc + k - 1]);
+ cscal_(nrhs, &q__1, &b[k + b_dim1], ldb);
+ --k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Interchange rows K-1 and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k - 1) {
+ cswap_(nrhs, &b[k - 1 + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(U(K)), where U(K) is the transformation */
+/* stored in columns K-1 and K of A. */
+
+ i__1 = k - 2;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__1, nrhs, &q__1, &ap[kc], &c__1, &b[k + b_dim1], ldb, &
+ b[b_dim1 + 1], ldb);
+ i__1 = k - 2;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__1, nrhs, &q__1, &ap[kc - (k - 1)], &c__1, &b[k - 1 +
+ b_dim1], ldb, &b[b_dim1 + 1], ldb);
+
+/* Multiply by the inverse of the diagonal block. */
+
+ i__1 = kc + k - 2;
+ akm1k.r = ap[i__1].r, akm1k.i = ap[i__1].i;
+ c_div(&q__1, &ap[kc - 1], &akm1k);
+ akm1.r = q__1.r, akm1.i = q__1.i;
+ c_div(&q__1, &ap[kc + k - 1], &akm1k);
+ ak.r = q__1.r, ak.i = q__1.i;
+ q__2.r = akm1.r * ak.r - akm1.i * ak.i, q__2.i = akm1.r * ak.i +
+ akm1.i * ak.r;
+ q__1.r = q__2.r - 1.f, q__1.i = q__2.i - 0.f;
+ denom.r = q__1.r, denom.i = q__1.i;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ c_div(&q__1, &b[k - 1 + j * b_dim1], &akm1k);
+ bkm1.r = q__1.r, bkm1.i = q__1.i;
+ c_div(&q__1, &b[k + j * b_dim1], &akm1k);
+ bk.r = q__1.r, bk.i = q__1.i;
+ i__2 = k - 1 + j * b_dim1;
+ q__3.r = ak.r * bkm1.r - ak.i * bkm1.i, q__3.i = ak.r *
+ bkm1.i + ak.i * bkm1.r;
+ q__2.r = q__3.r - bk.r, q__2.i = q__3.i - bk.i;
+ c_div(&q__1, &q__2, &denom);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ i__2 = k + j * b_dim1;
+ q__3.r = akm1.r * bk.r - akm1.i * bk.i, q__3.i = akm1.r *
+ bk.i + akm1.i * bk.r;
+ q__2.r = q__3.r - bkm1.r, q__2.i = q__3.i - bkm1.i;
+ c_div(&q__1, &q__2, &denom);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L20: */
+ }
+ kc = kc - k + 1;
+ k += -2;
+ }
+
+ goto L10;
+L30:
+
+/* Next solve U'*X = B, overwriting B with X. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+ kc = 1;
+L40:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L50;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Multiply by inv(U'(K)), where U(K) is the transformation */
+/* stored in column K of A. */
+
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Transpose", &i__1, nrhs, &q__1, &b[b_offset], ldb, &ap[kc]
+, &c__1, &c_b1, &b[k + b_dim1], ldb);
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ kc += k;
+ ++k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Multiply by inv(U'(K+1)), where U(K+1) is the transformation */
+/* stored in columns K and K+1 of A. */
+
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Transpose", &i__1, nrhs, &q__1, &b[b_offset], ldb, &ap[kc]
+, &c__1, &c_b1, &b[k + b_dim1], ldb);
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Transpose", &i__1, nrhs, &q__1, &b[b_offset], ldb, &ap[kc
+ + k], &c__1, &c_b1, &b[k + 1 + b_dim1], ldb);
+
+/* Interchange rows K and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k) {
+ cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ kc = kc + (k << 1) + 1;
+ k += 2;
+ }
+
+ goto L40;
+L50:
+
+ ;
+ } else {
+
+/* Solve A*X = B, where A = L*D*L'. */
+
+/* First solve L*D*X = B, overwriting B with X. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+ kc = 1;
+L60:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L80;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(L(K)), where L(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__1, nrhs, &q__1, &ap[kc + 1], &c__1, &b[k + b_dim1],
+ ldb, &b[k + 1 + b_dim1], ldb);
+ }
+
+/* Multiply by the inverse of the diagonal block. */
+
+ c_div(&q__1, &c_b1, &ap[kc]);
+ cscal_(nrhs, &q__1, &b[k + b_dim1], ldb);
+ kc = kc + *n - k + 1;
+ ++k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Interchange rows K+1 and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k + 1) {
+ cswap_(nrhs, &b[k + 1 + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(L(K)), where L(K) is the transformation */
+/* stored in columns K and K+1 of A. */
+
+ if (k < *n - 1) {
+ i__1 = *n - k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__1, nrhs, &q__1, &ap[kc + 2], &c__1, &b[k + b_dim1],
+ ldb, &b[k + 2 + b_dim1], ldb);
+ i__1 = *n - k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__1, nrhs, &q__1, &ap[kc + *n - k + 2], &c__1, &b[k
+ + 1 + b_dim1], ldb, &b[k + 2 + b_dim1], ldb);
+ }
+
+/* Multiply by the inverse of the diagonal block. */
+
+ i__1 = kc + 1;
+ akm1k.r = ap[i__1].r, akm1k.i = ap[i__1].i;
+ c_div(&q__1, &ap[kc], &akm1k);
+ akm1.r = q__1.r, akm1.i = q__1.i;
+ c_div(&q__1, &ap[kc + *n - k + 1], &akm1k);
+ ak.r = q__1.r, ak.i = q__1.i;
+ q__2.r = akm1.r * ak.r - akm1.i * ak.i, q__2.i = akm1.r * ak.i +
+ akm1.i * ak.r;
+ q__1.r = q__2.r - 1.f, q__1.i = q__2.i - 0.f;
+ denom.r = q__1.r, denom.i = q__1.i;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ c_div(&q__1, &b[k + j * b_dim1], &akm1k);
+ bkm1.r = q__1.r, bkm1.i = q__1.i;
+ c_div(&q__1, &b[k + 1 + j * b_dim1], &akm1k);
+ bk.r = q__1.r, bk.i = q__1.i;
+ i__2 = k + j * b_dim1;
+ q__3.r = ak.r * bkm1.r - ak.i * bkm1.i, q__3.i = ak.r *
+ bkm1.i + ak.i * bkm1.r;
+ q__2.r = q__3.r - bk.r, q__2.i = q__3.i - bk.i;
+ c_div(&q__1, &q__2, &denom);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ i__2 = k + 1 + j * b_dim1;
+ q__3.r = akm1.r * bk.r - akm1.i * bk.i, q__3.i = akm1.r *
+ bk.i + akm1.i * bk.r;
+ q__2.r = q__3.r - bkm1.r, q__2.i = q__3.i - bkm1.i;
+ c_div(&q__1, &q__2, &denom);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L70: */
+ }
+ kc = kc + (*n - k << 1) + 1;
+ k += 2;
+ }
+
+ goto L60;
+L80:
+
+/* Next solve L'*X = B, overwriting B with X. */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+ kc = *n * (*n + 1) / 2 + 1;
+L90:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L100;
+ }
+
+ kc -= *n - k + 1;
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Multiply by inv(L'(K)), where L(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Transpose", &i__1, nrhs, &q__1, &b[k + 1 + b_dim1],
+ ldb, &ap[kc + 1], &c__1, &c_b1, &b[k + b_dim1], ldb);
+ }
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ --k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Multiply by inv(L'(K-1)), where L(K-1) is the transformation */
+/* stored in columns K-1 and K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Transpose", &i__1, nrhs, &q__1, &b[k + 1 + b_dim1],
+ ldb, &ap[kc + 1], &c__1, &c_b1, &b[k + b_dim1], ldb);
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Transpose", &i__1, nrhs, &q__1, &b[k + 1 + b_dim1],
+ ldb, &ap[kc - (*n - k)], &c__1, &c_b1, &b[k - 1 +
+ b_dim1], ldb);
+ }
+
+/* Interchange rows K and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k) {
+ cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ kc -= *n - k + 2;
+ k += -2;
+ }
+
+ goto L90;
+L100:
+ ;
+ }
+
+ return 0;
+
+/* End of CSPTRS */
+
+} /* csptrs_ */
diff --git a/contrib/libs/clapack/csrscl.c b/contrib/libs/clapack/csrscl.c
new file mode 100644
index 0000000000..c940703a26
--- /dev/null
+++ b/contrib/libs/clapack/csrscl.c
@@ -0,0 +1,134 @@
+/* csrscl.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int csrscl_(integer *n, real *sa, complex *sx, integer *incx)
+{
+ real mul, cden;
+ logical done;
+ real cnum, cden1, cnum1;
+ extern /* Subroutine */ int slabad_(real *, real *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *);
+ real bignum, smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSRSCL multiplies an n-element complex vector x by the real scalar */
+/* 1/a. This is done without overflow or underflow as long as */
+/* the final result x/a does not overflow or underflow. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of components of the vector x. */
+
+/* SA (input) REAL */
+/* The scalar a which is used to divide each component of x. */
+/* SA must be >= 0, or the subroutine will divide by zero. */
+
+/* SX (input/output) COMPLEX array, dimension */
+/* (1+(N-1)*abs(INCX)) */
+/* The n-element vector x. */
+
+/* INCX (input) INTEGER */
+/* The increment between successive values of the vector SX. */
+/* > 0: SX(1) = X(1) and SX(1+(i-1)*INCX) = x(i), 1< i<= n */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --sx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+
+/* Initialize the denominator to SA and the numerator to 1. */
+
+ cden = *sa;
+ cnum = 1.f;
+
+L10:
+ cden1 = cden * smlnum;
+ cnum1 = cnum / bignum;
+ if (dabs(cden1) > dabs(cnum) && cnum != 0.f) {
+
+/* Pre-multiply X by SMLNUM if CDEN is large compared to CNUM. */
+
+ mul = smlnum;
+ done = FALSE_;
+ cden = cden1;
+ } else if (dabs(cnum1) > dabs(cden)) {
+
+/* Pre-multiply X by BIGNUM if CDEN is small compared to CNUM. */
+
+ mul = bignum;
+ done = FALSE_;
+ cnum = cnum1;
+ } else {
+
+/* Multiply X by CNUM / CDEN and return. */
+
+ mul = cnum / cden;
+ done = TRUE_;
+ }
+
+/* Scale the vector X by MUL */
+
+ csscal_(n, &mul, &sx[1], incx);
+
+ if (! done) {
+ goto L10;
+ }
+
+ return 0;
+
+/* End of CSRSCL */
+
+} /* csrscl_ */
diff --git a/contrib/libs/clapack/cstedc.c b/contrib/libs/clapack/cstedc.c
new file mode 100644
index 0000000000..6c499a474d
--- /dev/null
+++ b/contrib/libs/clapack/cstedc.c
@@ -0,0 +1,496 @@
+/* cstedc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__9 = 9;
+static integer c__0 = 0;
+static integer c__2 = 2;
+static real c_b17 = 0.f;
+static real c_b18 = 1.f;
+static integer c__1 = 1;
+
+/* Subroutine */ int cstedc_(char *compz, integer *n, real *d__, real *e,
+ complex *z__, integer *ldz, complex *work, integer *lwork, real *
+ rwork, integer *lrwork, integer *iwork, integer *liwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2, i__3, i__4;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double log(doublereal);
+ integer pow_ii(integer *, integer *);
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, m;
+ real p;
+ integer ii, ll, lgn;
+ real eps, tiny;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ integer lwmin;
+ extern /* Subroutine */ int claed0_(integer *, integer *, real *, real *,
+ complex *, integer *, complex *, integer *, real *, integer *,
+ integer *);
+ integer start;
+ extern /* Subroutine */ int clacrm_(integer *, integer *, complex *,
+ integer *, real *, integer *, complex *, integer *, real *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer finish;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *), sstedc_(char *, integer *, real *, real *, real *,
+ integer *, real *, integer *, integer *, integer *, integer *), slaset_(char *, integer *, integer *, real *, real *,
+ real *, integer *);
+ integer liwmin, icompz;
+ extern /* Subroutine */ int csteqr_(char *, integer *, real *, real *,
+ complex *, integer *, real *, integer *);
+ real orgnrm;
+ extern doublereal slanst_(char *, integer *, real *, real *);
+ extern /* Subroutine */ int ssterf_(integer *, real *, real *, integer *);
+ integer lrwmin;
+ logical lquery;
+ integer smlsiz;
+ extern /* Subroutine */ int ssteqr_(char *, integer *, real *, real *,
+ real *, integer *, real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSTEDC computes all eigenvalues and, optionally, eigenvectors of a */
+/* symmetric tridiagonal matrix using the divide and conquer method. */
+/* The eigenvectors of a full or band complex Hermitian matrix can also */
+/* be found if CHETRD or CHPTRD or CHBTRD has been used to reduce this */
+/* matrix to tridiagonal form. */
+
+/* This code makes very mild assumptions about floating point */
+/* arithmetic. It will work on machines with a guard digit in */
+/* add/subtract, or on those binary machines without guard digits */
+/* which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2. */
+/* It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. See SLAED3 for details. */
+
+/* Arguments */
+/* ========= */
+
+/* COMPZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only. */
+/* = 'I': Compute eigenvectors of tridiagonal matrix also. */
+/* = 'V': Compute eigenvectors of original Hermitian matrix */
+/* also. On entry, Z contains the unitary matrix used */
+/* to reduce the original matrix to tridiagonal form. */
+
+/* N (input) INTEGER */
+/* The dimension of the symmetric tridiagonal matrix. N >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the diagonal elements of the tridiagonal matrix. */
+/* On exit, if INFO = 0, the eigenvalues in ascending order. */
+
+/* E (input/output) REAL array, dimension (N-1) */
+/* On entry, the subdiagonal elements of the tridiagonal matrix. */
+/* On exit, E has been destroyed. */
+
+/* Z (input/output) COMPLEX array, dimension (LDZ,N) */
+/* On entry, if COMPZ = 'V', then Z contains the unitary */
+/* matrix used in the reduction to tridiagonal form. */
+/* On exit, if INFO = 0, then if COMPZ = 'V', Z contains the */
+/* orthonormal eigenvectors of the original Hermitian matrix, */
+/* and if COMPZ = 'I', Z contains the orthonormal eigenvectors */
+/* of the symmetric tridiagonal matrix. */
+/* If COMPZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1. */
+/* If eigenvectors are desired, then LDZ >= max(1,N). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If COMPZ = 'N' or 'I', or N <= 1, LWORK must be at least 1. */
+/* If COMPZ = 'V' and N > 1, LWORK must be at least N*N. */
+/* Note that for COMPZ = 'V', then if N is less than or */
+/* equal to the minimum divide size, usually 25, then LWORK need */
+/* only be 1. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal sizes of the WORK, RWORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* RWORK (workspace/output) REAL array, dimension (MAX(1,LRWORK)) */
+/* On exit, if INFO = 0, RWORK(1) returns the optimal LRWORK. */
+
+/* LRWORK (input) INTEGER */
+/* The dimension of the array RWORK. */
+/* If COMPZ = 'N' or N <= 1, LRWORK must be at least 1. */
+/* If COMPZ = 'V' and N > 1, LRWORK must be at least */
+/* 1 + 3*N + 2*N*lg N + 3*N**2 , */
+/* where lg( N ) = smallest integer k such */
+/* that 2**k >= N. */
+/* If COMPZ = 'I' and N > 1, LRWORK must be at least */
+/* 1 + 4*N + 2*N**2 . */
+/* Note that for COMPZ = 'I' or 'V', then if N is less than or */
+/* equal to the minimum divide size, usually 25, then LRWORK */
+/* need only be max(1,2*(N-1)). */
+
+/* If LRWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* If COMPZ = 'N' or N <= 1, LIWORK must be at least 1. */
+/* If COMPZ = 'V' or N > 1, LIWORK must be at least */
+/* 6 + 6*N + 5*N*lg N. */
+/* If COMPZ = 'I' or N > 1, LIWORK must be at least */
+/* 3 + 5*N . */
+/* Note that for COMPZ = 'I' or 'V', then if N is less than or */
+/* equal to the minimum divide size, usually 25, then LIWORK */
+/* need only be 1. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: The algorithm failed to compute an eigenvalue while */
+/* working on the submatrix lying in rows and columns */
+/* INFO/(N+1) through mod(INFO,N+1). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Jeff Rutter, Computer Science Division, University of California */
+/* at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1 || *lrwork == -1 || *liwork == -1;
+
+ if (lsame_(compz, "N")) {
+ icompz = 0;
+ } else if (lsame_(compz, "V")) {
+ icompz = 1;
+ } else if (lsame_(compz, "I")) {
+ icompz = 2;
+ } else {
+ icompz = -1;
+ }
+ if (icompz < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldz < 1 || icompz > 0 && *ldz < max(1,*n)) {
+ *info = -6;
+ }
+
+ if (*info == 0) {
+
+/* Compute the workspace requirements */
+
+ smlsiz = ilaenv_(&c__9, "CSTEDC", " ", &c__0, &c__0, &c__0, &c__0);
+ if (*n <= 1 || icompz == 0) {
+ lwmin = 1;
+ liwmin = 1;
+ lrwmin = 1;
+ } else if (*n <= smlsiz) {
+ lwmin = 1;
+ liwmin = 1;
+ lrwmin = *n - 1 << 1;
+ } else if (icompz == 1) {
+ lgn = (integer) (log((real) (*n)) / log(2.f));
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ lwmin = *n * *n;
+/* Computing 2nd power */
+ i__1 = *n;
+ lrwmin = *n * 3 + 1 + (*n << 1) * lgn + i__1 * i__1 * 3;
+ liwmin = *n * 6 + 6 + *n * 5 * lgn;
+ } else if (icompz == 2) {
+ lwmin = 1;
+/* Computing 2nd power */
+ i__1 = *n;
+ lrwmin = (*n << 2) + 1 + (i__1 * i__1 << 1);
+ liwmin = *n * 5 + 3;
+ }
+ work[1].r = (real) lwmin, work[1].i = 0.f;
+ rwork[1] = (real) lrwmin;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -8;
+ } else if (*lrwork < lrwmin && ! lquery) {
+ *info = -10;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CSTEDC", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*n == 1) {
+ if (icompz != 0) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1.f, z__[i__1].i = 0.f;
+ }
+ return 0;
+ }
+
+/* If the following conditional clause is removed, then the routine */
+/* will use the Divide and Conquer routine to compute only the */
+/* eigenvalues, which requires (3N + 3N**2) real workspace and */
+/* (2 + 5N + 2N lg(N)) integer workspace. */
+/* Since on many architectures SSTERF is much faster than any other */
+/* algorithm for finding eigenvalues only, it is used here */
+/* as the default. If the conditional clause is removed, then */
+/* information on the size of workspace needs to be changed. */
+
+/* If COMPZ = 'N', use SSTERF to compute the eigenvalues. */
+
+ if (icompz == 0) {
+ ssterf_(n, &d__[1], &e[1], info);
+ goto L70;
+ }
+
+/* If N is smaller than the minimum divide size (SMLSIZ+1), then */
+/* solve the problem with another solver. */
+
+ if (*n <= smlsiz) {
+
+ csteqr_(compz, n, &d__[1], &e[1], &z__[z_offset], ldz, &rwork[1],
+ info);
+
+ } else {
+
+/* If COMPZ = 'I', we simply call SSTEDC instead. */
+
+ if (icompz == 2) {
+ slaset_("Full", n, n, &c_b17, &c_b18, &rwork[1], n);
+ ll = *n * *n + 1;
+ i__1 = *lrwork - ll + 1;
+ sstedc_("I", n, &d__[1], &e[1], &rwork[1], n, &rwork[ll], &i__1, &
+ iwork[1], liwork, info);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * z_dim1;
+ i__4 = (j - 1) * *n + i__;
+ z__[i__3].r = rwork[i__4], z__[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ goto L70;
+ }
+
+/* From now on, only option left to be handled is COMPZ = 'V', */
+/* i.e. ICOMPZ = 1. */
+
+/* Scale. */
+
+ orgnrm = slanst_("M", n, &d__[1], &e[1]);
+ if (orgnrm == 0.f) {
+ goto L70;
+ }
+
+ eps = slamch_("Epsilon");
+
+ start = 1;
+
+/* while ( START <= N ) */
+
+L30:
+ if (start <= *n) {
+
+/* Let FINISH be the position of the next subdiagonal entry */
+/* such that E( FINISH ) <= TINY or FINISH = N if no such */
+/* subdiagonal exists. The matrix identified by the elements */
+/* between START and FINISH constitutes an independent */
+/* sub-problem. */
+
+ finish = start;
+L40:
+ if (finish < *n) {
+ tiny = eps * sqrt((r__1 = d__[finish], dabs(r__1))) * sqrt((
+ r__2 = d__[finish + 1], dabs(r__2)));
+ if ((r__1 = e[finish], dabs(r__1)) > tiny) {
+ ++finish;
+ goto L40;
+ }
+ }
+
+/* (Sub) Problem determined. Compute its size and solve it. */
+
+ m = finish - start + 1;
+ if (m > smlsiz) {
+
+/* Scale. */
+
+ orgnrm = slanst_("M", &m, &d__[start], &e[start]);
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b18, &m, &c__1, &d__[
+ start], &m, info);
+ i__1 = m - 1;
+ i__2 = m - 1;
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b18, &i__1, &c__1, &e[
+ start], &i__2, info);
+
+ claed0_(n, &m, &d__[start], &e[start], &z__[start * z_dim1 +
+ 1], ldz, &work[1], n, &rwork[1], &iwork[1], info);
+ if (*info > 0) {
+ *info = (*info / (m + 1) + start - 1) * (*n + 1) + *info %
+ (m + 1) + start - 1;
+ goto L70;
+ }
+
+/* Scale back. */
+
+ slascl_("G", &c__0, &c__0, &c_b18, &orgnrm, &m, &c__1, &d__[
+ start], &m, info);
+
+ } else {
+ ssteqr_("I", &m, &d__[start], &e[start], &rwork[1], &m, &
+ rwork[m * m + 1], info);
+ clacrm_(n, &m, &z__[start * z_dim1 + 1], ldz, &rwork[1], &m, &
+ work[1], n, &rwork[m * m + 1]);
+ clacpy_("A", n, &m, &work[1], n, &z__[start * z_dim1 + 1],
+ ldz);
+ if (*info > 0) {
+ *info = start * (*n + 1) + finish;
+ goto L70;
+ }
+ }
+
+ start = finish + 1;
+ goto L30;
+ }
+
+/* endwhile */
+
+/* If the problem split any number of times, then the eigenvalues */
+/* will not be properly ordered. Here we permute the eigenvalues */
+/* (and the associated eigenvectors) into ascending order. */
+
+ if (m != *n) {
+
+/* Use Selection Sort to minimize swaps of eigenvectors */
+
+ i__1 = *n;
+ for (ii = 2; ii <= i__1; ++ii) {
+ i__ = ii - 1;
+ k = i__;
+ p = d__[i__];
+ i__2 = *n;
+ for (j = ii; j <= i__2; ++j) {
+ if (d__[j] < p) {
+ k = j;
+ p = d__[j];
+ }
+/* L50: */
+ }
+ if (k != i__) {
+ d__[k] = d__[i__];
+ d__[i__] = p;
+ cswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1
+ + 1], &c__1);
+ }
+/* L60: */
+ }
+ }
+ }
+
+L70:
+ work[1].r = (real) lwmin, work[1].i = 0.f;
+ rwork[1] = (real) lrwmin;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of CSTEDC */
+
+} /* cstedc_ */
diff --git a/contrib/libs/clapack/cstegr.c b/contrib/libs/clapack/cstegr.c
new file mode 100644
index 0000000000..7ca137400a
--- /dev/null
+++ b/contrib/libs/clapack/cstegr.c
@@ -0,0 +1,210 @@
+/* cstegr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cstegr_(char *jobz, char *range, integer *n, real *d__,
+ real *e, real *vl, real *vu, integer *il, integer *iu, real *abstol,
+ integer *m, real *w, complex *z__, integer *ldz, integer *isuppz,
+ real *work, integer *lwork, integer *iwork, integer *liwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset;
+
+ /* Local variables */
+ extern /* Subroutine */ int cstemr_(char *, char *, integer *, real *,
+ real *, real *, real *, integer *, integer *, integer *, real *,
+ complex *, integer *, integer *, integer *, logical *, real *,
+ integer *, integer *, integer *, integer *);
+ logical tryrac;
+
+
+
+/* -- LAPACK computational routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSTEGR computes selected eigenvalues and, optionally, eigenvectors */
+/* of a real symmetric tridiagonal matrix T. Any such unreduced matrix has */
+/* a well defined set of pairwise different real eigenvalues, the corresponding */
+/* real eigenvectors are pairwise orthogonal. */
+
+/* The spectrum may be computed either completely or partially by specifying */
+/* either an interval (VL,VU] or a range of indices IL:IU for the desired */
+/* eigenvalues. */
+
+/* CSTEGR is a compatability wrapper around the improved CSTEMR routine. */
+/* See SSTEMR for further details. */
+
+/* One important change is that the ABSTOL parameter no longer provides any */
+/* benefit and hence is no longer used. */
+
+/* Note : CSTEGR and CSTEMR work only on machines which follow */
+/* IEEE-754 floating-point standard in their handling of infinities and */
+/* NaNs. Normal execution may create these exceptiona values and hence */
+/* may abort due to a floating point exception in environments which */
+/* do not conform to the IEEE-754 standard. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the N diagonal elements of the tridiagonal matrix */
+/* T. On exit, D is overwritten. */
+
+/* E (input/output) REAL array, dimension (N) */
+/* On entry, the (N-1) subdiagonal elements of the tridiagonal */
+/* matrix T in elements 1 to N-1 of E. E(N) need not be set on */
+/* input, but is used internally as workspace. */
+/* On exit, E is overwritten. */
+
+/* VL (input) REAL */
+/* VU (input) REAL */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) REAL */
+/* Unused. Was the absolute error tolerance for the */
+/* eigenvalues/eigenvectors in previous versions. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) REAL array, dimension (N) */
+/* The first M elements contain the selected eigenvalues in */
+/* ascending order. */
+
+/* Z (output) COMPLEX array, dimension (LDZ, max(1,M) ) */
+/* If JOBZ = 'V', and if INFO = 0, then the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix T */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+/* Supplying N columns is always safe. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', then LDZ >= max(1,N). */
+
+/* ISUPPZ (output) INTEGER ARRAY, dimension ( 2*max(1,M) ) */
+/* The support of the eigenvectors in Z, i.e., the indices */
+/* indicating the nonzero elements in Z. The i-th computed eigenvector */
+/* is nonzero only in elements ISUPPZ( 2*i-1 ) through */
+/* ISUPPZ( 2*i ). This is relevant in the case when the matrix */
+/* is split. ISUPPZ is only accessed when JOBZ is 'V' and N > 0. */
+
+/* WORK (workspace/output) REAL array, dimension (LWORK) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal */
+/* (and minimal) LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,18*N) */
+/* if JOBZ = 'V', and LWORK >= max(1,12*N) if JOBZ = 'N'. */
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (LIWORK) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. LIWORK >= max(1,10*N) */
+/* if the eigenvectors are desired, and LIWORK >= max(1,8*N) */
+/* if only the eigenvalues are to be computed. */
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal size of the IWORK array, */
+/* returns this value as the first entry of the IWORK array, and */
+/* no error message related to LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* On exit, INFO */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = 1X, internal error in SLARRE, */
+/* if INFO = 2X, internal error in CLARRV. */
+/* Here, the digit X = ABS( IINFO ) < 10, where IINFO is */
+/* the nonzero error code returned by SLARRE or */
+/* CLARRV, respectively. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Inderjit Dhillon, IBM Almaden, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, LBNL/NERSC, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --isuppz;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ tryrac = FALSE_;
+ cstemr_(jobz, range, n, &d__[1], &e[1], vl, vu, il, iu, m, &w[1], &z__[
+ z_offset], ldz, n, &isuppz[1], &tryrac, &work[1], lwork, &iwork[1]
+, liwork, info);
+
+/* End of CSTEGR */
+
+ return 0;
+} /* cstegr_ */
diff --git a/contrib/libs/clapack/cstein.c b/contrib/libs/clapack/cstein.c
new file mode 100644
index 0000000000..ad5a3fcc14
--- /dev/null
+++ b/contrib/libs/clapack/cstein.c
@@ -0,0 +1,468 @@
+/* cstein.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__2 = 2;
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int cstein_(integer *n, real *d__, real *e, integer *m, real
+ *w, integer *iblock, integer *isplit, complex *z__, integer *ldz,
+ real *work, integer *iwork, integer *ifail, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4, r__5;
+ complex q__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, b1, j1, bn, jr;
+ real xj, scl, eps, ctr, sep, nrm, tol;
+ integer its;
+ real xjm, eps1;
+ integer jblk, nblk, jmax;
+ extern doublereal snrm2_(integer *, real *, integer *);
+ integer iseed[4], gpind, iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ extern doublereal sasum_(integer *, real *, integer *);
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ real ortol;
+ integer indrv1, indrv2, indrv3, indrv4, indrv5;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), slagtf_(
+ integer *, real *, real *, real *, real *, real *, real *,
+ integer *, integer *);
+ integer nrmchk;
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int slagts_(integer *, integer *, real *, real *,
+ real *, real *, integer *, real *, real *, integer *);
+ integer blksiz;
+ real onenrm, pertol;
+ extern /* Subroutine */ int slarnv_(integer *, integer *, integer *, real
+ *);
+ real stpcrt;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSTEIN computes the eigenvectors of a real symmetric tridiagonal */
+/* matrix T corresponding to specified eigenvalues, using inverse */
+/* iteration. */
+
+/* The maximum number of iterations allowed for each eigenvector is */
+/* specified by an internal parameter MAXITS (currently set to 5). */
+
+/* Although the eigenvectors are real, they are stored in a complex */
+/* array, which may be passed to CUNMTR or CUPMTR for back */
+/* transformation to the eigenvectors of a complex Hermitian matrix */
+/* which was reduced to tridiagonal form. */
+
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input) REAL array, dimension (N) */
+/* The n diagonal elements of the tridiagonal matrix T. */
+
+/* E (input) REAL array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of the tridiagonal matrix */
+/* T, stored in elements 1 to N-1. */
+
+/* M (input) INTEGER */
+/* The number of eigenvectors to be found. 0 <= M <= N. */
+
+/* W (input) REAL array, dimension (N) */
+/* The first M elements of W contain the eigenvalues for */
+/* which eigenvectors are to be computed. The eigenvalues */
+/* should be grouped by split-off block and ordered from */
+/* smallest to largest within the block. ( The output array */
+/* W from SSTEBZ with ORDER = 'B' is expected here. ) */
+
+/* IBLOCK (input) INTEGER array, dimension (N) */
+/* The submatrix indices associated with the corresponding */
+/* eigenvalues in W; IBLOCK(i)=1 if eigenvalue W(i) belongs to */
+/* the first submatrix from the top, =2 if W(i) belongs to */
+/* the second submatrix, etc. ( The output array IBLOCK */
+/* from SSTEBZ is expected here. ) */
+
+/* ISPLIT (input) INTEGER array, dimension (N) */
+/* The splitting points, at which T breaks up into submatrices. */
+/* The first submatrix consists of rows/columns 1 to */
+/* ISPLIT( 1 ), the second of rows/columns ISPLIT( 1 )+1 */
+/* through ISPLIT( 2 ), etc. */
+/* ( The output array ISPLIT from SSTEBZ is expected here. ) */
+
+/* Z (output) COMPLEX array, dimension (LDZ, M) */
+/* The computed eigenvectors. The eigenvector associated */
+/* with the eigenvalue W(i) is stored in the i-th column of */
+/* Z. Any vector which fails to converge is set to its current */
+/* iterate after MAXITS iterations. */
+/* The imaginary parts of the eigenvectors are set to zero. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= max(1,N). */
+
+/* WORK (workspace) REAL array, dimension (5*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* IFAIL (output) INTEGER array, dimension (M) */
+/* On normal exit, all elements of IFAIL are zero. */
+/* If one or more eigenvectors fail to converge after */
+/* MAXITS iterations, then their indices are stored in */
+/* array IFAIL. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, then i eigenvectors failed to converge */
+/* in MAXITS iterations. Their indices are stored in */
+/* array IFAIL. */
+
+/* Internal Parameters */
+/* =================== */
+
+/* MAXITS INTEGER, default = 5 */
+/* The maximum number of iterations performed. */
+
+/* EXTRA INTEGER, default = 2 */
+/* The number of iterations performed after norm growth */
+/* criterion is satisfied, should be at least 1. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ --w;
+ --iblock;
+ --isplit;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ *info = 0;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ifail[i__] = 0;
+/* L10: */
+ }
+
+ if (*n < 0) {
+ *info = -1;
+ } else if (*m < 0 || *m > *n) {
+ *info = -4;
+ } else if (*ldz < max(1,*n)) {
+ *info = -9;
+ } else {
+ i__1 = *m;
+ for (j = 2; j <= i__1; ++j) {
+ if (iblock[j] < iblock[j - 1]) {
+ *info = -6;
+ goto L30;
+ }
+ if (iblock[j] == iblock[j - 1] && w[j] < w[j - 1]) {
+ *info = -5;
+ goto L30;
+ }
+/* L20: */
+ }
+L30:
+ ;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CSTEIN", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *m == 0) {
+ return 0;
+ } else if (*n == 1) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1.f, z__[i__1].i = 0.f;
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ eps = slamch_("Precision");
+
+/* Initialize seed for random number generator SLARNV. */
+
+ for (i__ = 1; i__ <= 4; ++i__) {
+ iseed[i__ - 1] = 1;
+/* L40: */
+ }
+
+/* Initialize pointers. */
+
+ indrv1 = 0;
+ indrv2 = indrv1 + *n;
+ indrv3 = indrv2 + *n;
+ indrv4 = indrv3 + *n;
+ indrv5 = indrv4 + *n;
+
+/* Compute eigenvectors of matrix blocks. */
+
+ j1 = 1;
+ i__1 = iblock[*m];
+ for (nblk = 1; nblk <= i__1; ++nblk) {
+
+/* Find starting and ending indices of block nblk. */
+
+ if (nblk == 1) {
+ b1 = 1;
+ } else {
+ b1 = isplit[nblk - 1] + 1;
+ }
+ bn = isplit[nblk];
+ blksiz = bn - b1 + 1;
+ if (blksiz == 1) {
+ goto L60;
+ }
+ gpind = b1;
+
+/* Compute reorthogonalization criterion and stopping criterion. */
+
+ onenrm = (r__1 = d__[b1], dabs(r__1)) + (r__2 = e[b1], dabs(r__2));
+/* Computing MAX */
+ r__3 = onenrm, r__4 = (r__1 = d__[bn], dabs(r__1)) + (r__2 = e[bn - 1]
+ , dabs(r__2));
+ onenrm = dmax(r__3,r__4);
+ i__2 = bn - 1;
+ for (i__ = b1 + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__4 = onenrm, r__5 = (r__1 = d__[i__], dabs(r__1)) + (r__2 = e[
+ i__ - 1], dabs(r__2)) + (r__3 = e[i__], dabs(r__3));
+ onenrm = dmax(r__4,r__5);
+/* L50: */
+ }
+ ortol = onenrm * .001f;
+
+ stpcrt = sqrt(.1f / blksiz);
+
+/* Loop through eigenvalues of block nblk. */
+
+L60:
+ jblk = 0;
+ i__2 = *m;
+ for (j = j1; j <= i__2; ++j) {
+ if (iblock[j] != nblk) {
+ j1 = j;
+ goto L180;
+ }
+ ++jblk;
+ xj = w[j];
+
+/* Skip all the work if the block size is one. */
+
+ if (blksiz == 1) {
+ work[indrv1 + 1] = 1.f;
+ goto L140;
+ }
+
+/* If eigenvalues j and j-1 are too close, add a relatively */
+/* small perturbation. */
+
+ if (jblk > 1) {
+ eps1 = (r__1 = eps * xj, dabs(r__1));
+ pertol = eps1 * 10.f;
+ sep = xj - xjm;
+ if (sep < pertol) {
+ xj = xjm + pertol;
+ }
+ }
+
+ its = 0;
+ nrmchk = 0;
+
+/* Get random starting vector. */
+
+ slarnv_(&c__2, iseed, &blksiz, &work[indrv1 + 1]);
+
+/* Copy the matrix T so it won't be destroyed in factorization. */
+
+ scopy_(&blksiz, &d__[b1], &c__1, &work[indrv4 + 1], &c__1);
+ i__3 = blksiz - 1;
+ scopy_(&i__3, &e[b1], &c__1, &work[indrv2 + 2], &c__1);
+ i__3 = blksiz - 1;
+ scopy_(&i__3, &e[b1], &c__1, &work[indrv3 + 1], &c__1);
+
+/* Compute LU factors with partial pivoting ( PT = LU ) */
+
+ tol = 0.f;
+ slagtf_(&blksiz, &work[indrv4 + 1], &xj, &work[indrv2 + 2], &work[
+ indrv3 + 1], &tol, &work[indrv5 + 1], &iwork[1], &iinfo);
+
+/* Update iteration count. */
+
+L70:
+ ++its;
+ if (its > 5) {
+ goto L120;
+ }
+
+/* Normalize and scale the righthand side vector Pb. */
+
+/* Computing MAX */
+ r__2 = eps, r__3 = (r__1 = work[indrv4 + blksiz], dabs(r__1));
+ scl = blksiz * onenrm * dmax(r__2,r__3) / sasum_(&blksiz, &work[
+ indrv1 + 1], &c__1);
+ sscal_(&blksiz, &scl, &work[indrv1 + 1], &c__1);
+
+/* Solve the system LU = Pb. */
+
+ slagts_(&c_n1, &blksiz, &work[indrv4 + 1], &work[indrv2 + 2], &
+ work[indrv3 + 1], &work[indrv5 + 1], &iwork[1], &work[
+ indrv1 + 1], &tol, &iinfo);
+
+/* Reorthogonalize by modified Gram-Schmidt if eigenvalues are */
+/* close enough. */
+
+ if (jblk == 1) {
+ goto L110;
+ }
+ if ((r__1 = xj - xjm, dabs(r__1)) > ortol) {
+ gpind = j;
+ }
+ if (gpind != j) {
+ i__3 = j - 1;
+ for (i__ = gpind; i__ <= i__3; ++i__) {
+ ctr = 0.f;
+ i__4 = blksiz;
+ for (jr = 1; jr <= i__4; ++jr) {
+ i__5 = b1 - 1 + jr + i__ * z_dim1;
+ ctr += work[indrv1 + jr] * z__[i__5].r;
+/* L80: */
+ }
+ i__4 = blksiz;
+ for (jr = 1; jr <= i__4; ++jr) {
+ i__5 = b1 - 1 + jr + i__ * z_dim1;
+ work[indrv1 + jr] -= ctr * z__[i__5].r;
+/* L90: */
+ }
+/* L100: */
+ }
+ }
+
+/* Check the infinity norm of the iterate. */
+
+L110:
+ jmax = isamax_(&blksiz, &work[indrv1 + 1], &c__1);
+ nrm = (r__1 = work[indrv1 + jmax], dabs(r__1));
+
+/* Continue for additional iterations after norm reaches */
+/* stopping criterion. */
+
+ if (nrm < stpcrt) {
+ goto L70;
+ }
+ ++nrmchk;
+ if (nrmchk < 3) {
+ goto L70;
+ }
+
+ goto L130;
+
+/* If stopping criterion was not satisfied, update info and */
+/* store eigenvector number in array ifail. */
+
+L120:
+ ++(*info);
+ ifail[*info] = j;
+
+/* Accept iterate as jth eigenvector. */
+
+L130:
+ scl = 1.f / snrm2_(&blksiz, &work[indrv1 + 1], &c__1);
+ jmax = isamax_(&blksiz, &work[indrv1 + 1], &c__1);
+ if (work[indrv1 + jmax] < 0.f) {
+ scl = -scl;
+ }
+ sscal_(&blksiz, &scl, &work[indrv1 + 1], &c__1);
+L140:
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * z_dim1;
+ z__[i__4].r = 0.f, z__[i__4].i = 0.f;
+/* L150: */
+ }
+ i__3 = blksiz;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = b1 + i__ - 1 + j * z_dim1;
+ i__5 = indrv1 + i__;
+ q__1.r = work[i__5], q__1.i = 0.f;
+ z__[i__4].r = q__1.r, z__[i__4].i = q__1.i;
+/* L160: */
+ }
+
+/* Save the shift to check eigenvalue spacing at next */
+/* iteration. */
+
+ xjm = xj;
+
+/* L170: */
+ }
+L180:
+ ;
+ }
+
+ return 0;
+
+/* End of CSTEIN */
+
+} /* cstein_ */
diff --git a/contrib/libs/clapack/cstemr.c b/contrib/libs/clapack/cstemr.c
new file mode 100644
index 0000000000..2726853682
--- /dev/null
+++ b/contrib/libs/clapack/cstemr.c
@@ -0,0 +1,749 @@
+/* cstemr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b18 = .003f;
+
+/* Subroutine */ int cstemr_(char *jobz, char *range, integer *n, real *d__,
+ real *e, real *vl, real *vu, integer *il, integer *iu, integer *m,
+ real *w, complex *z__, integer *ldz, integer *nzc, integer *isuppz,
+ logical *tryrac, real *work, integer *lwork, integer *iwork, integer *
+ liwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ real r1, r2;
+ integer jj;
+ real cs;
+ integer in;
+ real sn, wl, wu;
+ integer iil, iiu;
+ real eps, tmp;
+ integer indd, iend, jblk, wend;
+ real rmin, rmax;
+ integer itmp;
+ real tnrm;
+ integer inde2;
+ extern /* Subroutine */ int slae2_(real *, real *, real *, real *, real *)
+ ;
+ integer itmp2;
+ real rtol1, rtol2, scale;
+ integer indgp;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ integer iindw, ilast;
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ integer lwmin;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ logical wantz;
+ extern /* Subroutine */ int slaev2_(real *, real *, real *, real *, real *
+, real *, real *);
+ logical alleig;
+ integer ibegin;
+ logical indeig;
+ integer iindbl;
+ logical valeig;
+ extern doublereal slamch_(char *);
+ integer wbegin;
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ integer inderr, iindwk, indgrs, offset;
+ extern /* Subroutine */ int slarrc_(char *, integer *, real *, real *,
+ real *, real *, real *, integer *, integer *, integer *, integer *
+), clarrv_(integer *, real *, real *, real *, real *,
+ real *, integer *, integer *, integer *, integer *, real *, real *
+, real *, real *, real *, real *, integer *, integer *, real *,
+ complex *, integer *, integer *, real *, integer *, integer *),
+ slarre_(char *, integer *, real *, real *, integer *, integer *,
+ real *, real *, real *, real *, real *, real *, integer *,
+ integer *, integer *, real *, real *, real *, integer *, integer *
+, real *, real *, real *, integer *, integer *);
+ integer iinspl, indwrk, ifirst, liwmin, nzcmin;
+ real pivmin, thresh;
+ extern doublereal slanst_(char *, integer *, real *, real *);
+ extern /* Subroutine */ int slarrj_(integer *, real *, real *, integer *,
+ integer *, real *, integer *, real *, real *, real *, integer *,
+ real *, real *, integer *);
+ integer nsplit;
+ extern /* Subroutine */ int slarrr_(integer *, real *, real *, integer *);
+ real smlnum;
+ extern /* Subroutine */ int slasrt_(char *, integer *, real *, integer *);
+ logical lquery, zquery;
+
+
+/* -- LAPACK computational routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSTEMR computes selected eigenvalues and, optionally, eigenvectors */
+/* of a real symmetric tridiagonal matrix T. Any such unreduced matrix has */
+/* a well defined set of pairwise different real eigenvalues, the corresponding */
+/* real eigenvectors are pairwise orthogonal. */
+
+/* The spectrum may be computed either completely or partially by specifying */
+/* either an interval (VL,VU] or a range of indices IL:IU for the desired */
+/* eigenvalues. */
+
+/* Depending on the number of desired eigenvalues, these are computed either */
+/* by bisection or the dqds algorithm. Numerically orthogonal eigenvectors are */
+/* computed by the use of various suitable L D L^T factorizations near clusters */
+/* of close eigenvalues (referred to as RRRs, Relatively Robust */
+/* Representations). An informal sketch of the algorithm follows. */
+
+/* For each unreduced block (submatrix) of T, */
+/* (a) Compute T - sigma I = L D L^T, so that L and D */
+/* define all the wanted eigenvalues to high relative accuracy. */
+/* This means that small relative changes in the entries of D and L */
+/* cause only small relative changes in the eigenvalues and */
+/* eigenvectors. The standard (unfactored) representation of the */
+/* tridiagonal matrix T does not have this property in general. */
+/* (b) Compute the eigenvalues to suitable accuracy. */
+/* If the eigenvectors are desired, the algorithm attains full */
+/* accuracy of the computed eigenvalues only right before */
+/* the corresponding vectors have to be computed, see steps c) and d). */
+/* (c) For each cluster of close eigenvalues, select a new */
+/* shift close to the cluster, find a new factorization, and refine */
+/* the shifted eigenvalues to suitable accuracy. */
+/* (d) For each eigenvalue with a large enough relative separation compute */
+/* the corresponding eigenvector by forming a rank revealing twisted */
+/* factorization. Go back to (c) for any clusters that remain. */
+
+/* For more details, see: */
+/* - Inderjit S. Dhillon and Beresford N. Parlett: "Multiple representations */
+/* to compute orthogonal eigenvectors of symmetric tridiagonal matrices," */
+/* Linear Algebra and its Applications, 387(1), pp. 1-28, August 2004. */
+/* - Inderjit Dhillon and Beresford Parlett: "Orthogonal Eigenvectors and */
+/* Relative Gaps," SIAM Journal on Matrix Analysis and Applications, Vol. 25, */
+/* 2004. Also LAPACK Working Note 154. */
+/* - Inderjit Dhillon: "A new O(n^2) algorithm for the symmetric */
+/* tridiagonal eigenvalue/eigenvector problem", */
+/* Computer Science Division Technical Report No. UCB/CSD-97-971, */
+/* UC Berkeley, May 1997. */
+
+/* Notes: */
+/* 1.CSTEMR works only on machines which follow IEEE-754 */
+/* floating-point standard in their handling of infinities and NaNs. */
+/* This permits the use of efficient inner loops avoiding a check for */
+/* zero divisors. */
+
+/* 2. LAPACK routines can be used to reduce a complex Hermitean matrix to */
+/* real symmetric tridiagonal form. */
+
+/* (Any complex Hermitean tridiagonal matrix has real values on its diagonal */
+/* and potentially complex numbers on its off-diagonals. By applying a */
+/* similarity transform with an appropriate diagonal matrix */
+/* diag(1,e^{i \phy_1}, ... , e^{i \phy_{n-1}}), the complex Hermitean */
+/* matrix can be transformed into a real symmetric matrix and complex */
+/* arithmetic can be entirely avoided.) */
+
+/* While the eigenvectors of the real symmetric tridiagonal matrix are real, */
+/* the eigenvectors of original complex Hermitean matrix have complex entries */
+/* in general. */
+/* Since LAPACK drivers overwrite the matrix data with the eigenvectors, */
+/* CSTEMR accepts complex workspace to facilitate interoperability */
+/* with CUNMTR or CUPMTR. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the N diagonal elements of the tridiagonal matrix */
+/* T. On exit, D is overwritten. */
+
+/* E (input/output) REAL array, dimension (N) */
+/* On entry, the (N-1) subdiagonal elements of the tridiagonal */
+/* matrix T in elements 1 to N-1 of E. E(N) need not be set on */
+/* input, but is used internally as workspace. */
+/* On exit, E is overwritten. */
+
+/* VL (input) REAL */
+/* VU (input) REAL */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) REAL array, dimension (N) */
+/* The first M elements contain the selected eigenvalues in */
+/* ascending order. */
+
+/* Z (output) COMPLEX array, dimension (LDZ, max(1,M) ) */
+/* If JOBZ = 'V', and if INFO = 0, then the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix T */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and can be computed with a workspace */
+/* query by setting NZC = -1, see below. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', then LDZ >= max(1,N). */
+
+/* NZC (input) INTEGER */
+/* The number of eigenvectors to be held in the array Z. */
+/* If RANGE = 'A', then NZC >= max(1,N). */
+/* If RANGE = 'V', then NZC >= the number of eigenvalues in (VL,VU]. */
+/* If RANGE = 'I', then NZC >= IU-IL+1. */
+/* If NZC = -1, then a workspace query is assumed; the */
+/* routine calculates the number of columns of the array Z that */
+/* are needed to hold the eigenvectors. */
+/* This value is returned as the first entry of the Z array, and */
+/* no error message related to NZC is issued by XERBLA. */
+
+/* ISUPPZ (output) INTEGER ARRAY, dimension ( 2*max(1,M) ) */
+/* The support of the eigenvectors in Z, i.e., the indices */
+/* indicating the nonzero elements in Z. The i-th computed eigenvector */
+/* is nonzero only in elements ISUPPZ( 2*i-1 ) through */
+/* ISUPPZ( 2*i ). This is relevant in the case when the matrix */
+/* is split. ISUPPZ is only accessed when JOBZ is 'V' and N > 0. */
+
+/* TRYRAC (input/output) LOGICAL */
+/* If TRYRAC.EQ..TRUE., indicates that the code should check whether */
+/* the tridiagonal matrix defines its eigenvalues to high relative */
+/* accuracy. If so, the code uses relative-accuracy preserving */
+/* algorithms that might be (a bit) slower depending on the matrix. */
+/* If the matrix does not define its eigenvalues to high relative */
+/* accuracy, the code can uses possibly faster algorithms. */
+/* If TRYRAC.EQ..FALSE., the code is not required to guarantee */
+/* relatively accurate eigenvalues and can use the fastest possible */
+/* techniques. */
+/* On exit, a .TRUE. TRYRAC will be set to .FALSE. if the matrix */
+/* does not define its eigenvalues to high relative accuracy. */
+
+/* WORK (workspace/output) REAL array, dimension (LWORK) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal */
+/* (and minimal) LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,18*N) */
+/* if JOBZ = 'V', and LWORK >= max(1,12*N) if JOBZ = 'N'. */
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (LIWORK) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. LIWORK >= max(1,10*N) */
+/* if the eigenvectors are desired, and LIWORK >= max(1,8*N) */
+/* if only the eigenvalues are to be computed. */
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal size of the IWORK array, */
+/* returns this value as the first entry of the IWORK array, and */
+/* no error message related to LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* On exit, INFO */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = 1X, internal error in SLARRE, */
+/* if INFO = 2X, internal error in CLARRV. */
+/* Here, the digit X = ABS( IINFO ) < 10, where IINFO is */
+/* the nonzero error code returned by SLARRE or */
+/* CLARRV, respectively. */
+
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --isuppz;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+
+ lquery = *lwork == -1 || *liwork == -1;
+ zquery = *nzc == -1;
+/* SSTEMR needs WORK of size 6*N, IWORK of size 3*N. */
+/* In addition, SLARRE needs WORK of size 6*N, IWORK of size 5*N. */
+/* Furthermore, CLARRV needs WORK of size 12*N, IWORK of size 7*N. */
+ if (wantz) {
+ lwmin = *n * 18;
+ liwmin = *n * 10;
+ } else {
+/* need less workspace if only the eigenvalues are wanted */
+ lwmin = *n * 12;
+ liwmin = *n << 3;
+ }
+ wl = 0.f;
+ wu = 0.f;
+ iil = 0;
+ iiu = 0;
+ if (valeig) {
+/* We do not reference VL, VU in the cases RANGE = 'I','A' */
+/* The interval (WL, WU] contains all the wanted eigenvalues. */
+/* It is either given by the user or computed in SLARRE. */
+ wl = *vl;
+ wu = *vu;
+ } else if (indeig) {
+/* We do not reference IL, IU in the cases RANGE = 'V','A' */
+ iil = *il;
+ iiu = *iu;
+ }
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (valeig && *n > 0 && wu <= wl) {
+ *info = -7;
+ } else if (indeig && (iil < 1 || iil > *n)) {
+ *info = -8;
+ } else if (indeig && (iiu < iil || iiu > *n)) {
+ *info = -9;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -13;
+ } else if (*lwork < lwmin && ! lquery) {
+ *info = -17;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -19;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+/* Computing MIN */
+ r__1 = sqrt(bignum), r__2 = 1.f / sqrt(sqrt(safmin));
+ rmax = dmin(r__1,r__2);
+
+ if (*info == 0) {
+ work[1] = (real) lwmin;
+ iwork[1] = liwmin;
+
+ if (wantz && alleig) {
+ nzcmin = *n;
+ } else if (wantz && valeig) {
+ slarrc_("T", n, vl, vu, &d__[1], &e[1], &safmin, &nzcmin, &itmp, &
+ itmp2, info);
+ } else if (wantz && indeig) {
+ nzcmin = iiu - iil + 1;
+ } else {
+/* WANTZ .EQ. FALSE. */
+ nzcmin = 0;
+ }
+ if (zquery && *info == 0) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = (real) nzcmin, z__[i__1].i = 0.f;
+ } else if (*nzc < nzcmin && ! zquery) {
+ *info = -14;
+ }
+ }
+ if (*info != 0) {
+
+ i__1 = -(*info);
+ xerbla_("CSTEMR", &i__1);
+
+ return 0;
+ } else if (lquery || zquery) {
+ return 0;
+ }
+
+/* Handle N = 0, 1, and 2 cases immediately */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (alleig || indeig) {
+ *m = 1;
+ w[1] = d__[1];
+ } else {
+ if (wl < d__[1] && wu >= d__[1]) {
+ *m = 1;
+ w[1] = d__[1];
+ }
+ }
+ if (wantz && ! zquery) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1.f, z__[i__1].i = 0.f;
+ isuppz[1] = 1;
+ isuppz[2] = 1;
+ }
+ return 0;
+ }
+
+ if (*n == 2) {
+ if (! wantz) {
+ slae2_(&d__[1], &e[1], &d__[2], &r1, &r2);
+ } else if (wantz && ! zquery) {
+ slaev2_(&d__[1], &e[1], &d__[2], &r1, &r2, &cs, &sn);
+ }
+ if (alleig || valeig && r2 > wl && r2 <= wu || indeig && iil == 1) {
+ ++(*m);
+ w[*m] = r2;
+ if (wantz && ! zquery) {
+ i__1 = *m * z_dim1 + 1;
+ r__1 = -sn;
+ z__[i__1].r = r__1, z__[i__1].i = 0.f;
+ i__1 = *m * z_dim1 + 2;
+ z__[i__1].r = cs, z__[i__1].i = 0.f;
+/* Note: At most one of SN and CS can be zero. */
+ if (sn != 0.f) {
+ if (cs != 0.f) {
+ isuppz[(*m << 1) - 1] = 1;
+ isuppz[(*m << 1) - 1] = 2;
+ } else {
+ isuppz[(*m << 1) - 1] = 1;
+ isuppz[(*m << 1) - 1] = 1;
+ }
+ } else {
+ isuppz[(*m << 1) - 1] = 2;
+ isuppz[*m * 2] = 2;
+ }
+ }
+ }
+ if (alleig || valeig && r1 > wl && r1 <= wu || indeig && iiu == 2) {
+ ++(*m);
+ w[*m] = r1;
+ if (wantz && ! zquery) {
+ i__1 = *m * z_dim1 + 1;
+ z__[i__1].r = cs, z__[i__1].i = 0.f;
+ i__1 = *m * z_dim1 + 2;
+ z__[i__1].r = sn, z__[i__1].i = 0.f;
+/* Note: At most one of SN and CS can be zero. */
+ if (sn != 0.f) {
+ if (cs != 0.f) {
+ isuppz[(*m << 1) - 1] = 1;
+ isuppz[(*m << 1) - 1] = 2;
+ } else {
+ isuppz[(*m << 1) - 1] = 1;
+ isuppz[(*m << 1) - 1] = 1;
+ }
+ } else {
+ isuppz[(*m << 1) - 1] = 2;
+ isuppz[*m * 2] = 2;
+ }
+ }
+ }
+ return 0;
+ }
+/* Continue with general N */
+ indgrs = 1;
+ inderr = (*n << 1) + 1;
+ indgp = *n * 3 + 1;
+ indd = (*n << 2) + 1;
+ inde2 = *n * 5 + 1;
+ indwrk = *n * 6 + 1;
+
+ iinspl = 1;
+ iindbl = *n + 1;
+ iindw = (*n << 1) + 1;
+ iindwk = *n * 3 + 1;
+
+/* Scale matrix to allowable range, if necessary. */
+/* The allowable range is related to the PIVMIN parameter; see the */
+/* comments in SLARRD. The preference for scaling small values */
+/* up is heuristic; we expect users' matrices not to be close to the */
+/* RMAX threshold. */
+
+ scale = 1.f;
+ tnrm = slanst_("M", n, &d__[1], &e[1]);
+ if (tnrm > 0.f && tnrm < rmin) {
+ scale = rmin / tnrm;
+ } else if (tnrm > rmax) {
+ scale = rmax / tnrm;
+ }
+ if (scale != 1.f) {
+ sscal_(n, &scale, &d__[1], &c__1);
+ i__1 = *n - 1;
+ sscal_(&i__1, &scale, &e[1], &c__1);
+ tnrm *= scale;
+ if (valeig) {
+/* If eigenvalues in interval have to be found, */
+/* scale (WL, WU] accordingly */
+ wl *= scale;
+ wu *= scale;
+ }
+ }
+
+/* Compute the desired eigenvalues of the tridiagonal after splitting */
+/* into smaller subblocks if the corresponding off-diagonal elements */
+/* are small */
+/* THRESH is the splitting parameter for SLARRE */
+/* A negative THRESH forces the old splitting criterion based on the */
+/* size of the off-diagonal. A positive THRESH switches to splitting */
+/* which preserves relative accuracy. */
+
+ if (*tryrac) {
+/* Test whether the matrix warrants the more expensive relative approach. */
+ slarrr_(n, &d__[1], &e[1], &iinfo);
+ } else {
+/* The user does not care about relative accurately eigenvalues */
+ iinfo = -1;
+ }
+/* Set the splitting criterion */
+ if (iinfo == 0) {
+ thresh = eps;
+ } else {
+ thresh = -eps;
+/* relative accuracy is desired but T does not guarantee it */
+ *tryrac = FALSE_;
+ }
+
+ if (*tryrac) {
+/* Copy original diagonal, needed to guarantee relative accuracy */
+ scopy_(n, &d__[1], &c__1, &work[indd], &c__1);
+ }
+/* Store the squares of the offdiagonal values of T */
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing 2nd power */
+ r__1 = e[j];
+ work[inde2 + j - 1] = r__1 * r__1;
+/* L5: */
+ }
+/* Set the tolerance parameters for bisection */
+ if (! wantz) {
+/* SLARRE computes the eigenvalues to full precision. */
+ rtol1 = eps * 4.f;
+ rtol2 = eps * 4.f;
+ } else {
+/* SLARRE computes the eigenvalues to less than full precision. */
+/* CLARRV will refine the eigenvalue approximations, and we only */
+/* need less accurate initial bisection in SLARRE. */
+/* Note: these settings do only affect the subset case and SLARRE */
+/* Computing MAX */
+ r__1 = sqrt(eps) * .05f, r__2 = eps * 4.f;
+ rtol1 = dmax(r__1,r__2);
+/* Computing MAX */
+ r__1 = sqrt(eps) * .005f, r__2 = eps * 4.f;
+ rtol2 = dmax(r__1,r__2);
+ }
+ slarre_(range, n, &wl, &wu, &iil, &iiu, &d__[1], &e[1], &work[inde2], &
+ rtol1, &rtol2, &thresh, &nsplit, &iwork[iinspl], m, &w[1], &work[
+ inderr], &work[indgp], &iwork[iindbl], &iwork[iindw], &work[
+ indgrs], &pivmin, &work[indwrk], &iwork[iindwk], &iinfo);
+ if (iinfo != 0) {
+ *info = abs(iinfo) + 10;
+ return 0;
+ }
+/* Note that if RANGE .NE. 'V', SLARRE computes bounds on the desired */
+/* part of the spectrum. All desired eigenvalues are contained in */
+/* (WL,WU] */
+ if (wantz) {
+
+/* Compute the desired eigenvectors corresponding to the computed */
+/* eigenvalues */
+
+ clarrv_(n, &wl, &wu, &d__[1], &e[1], &pivmin, &iwork[iinspl], m, &
+ c__1, m, &c_b18, &rtol1, &rtol2, &w[1], &work[inderr], &work[
+ indgp], &iwork[iindbl], &iwork[iindw], &work[indgrs], &z__[
+ z_offset], ldz, &isuppz[1], &work[indwrk], &iwork[iindwk], &
+ iinfo);
+ if (iinfo != 0) {
+ *info = abs(iinfo) + 20;
+ return 0;
+ }
+ } else {
+/* SLARRE computes eigenvalues of the (shifted) root representation */
+/* CLARRV returns the eigenvalues of the unshifted matrix. */
+/* However, if the eigenvectors are not desired by the user, we need */
+/* to apply the corresponding shifts from SLARRE to obtain the */
+/* eigenvalues of the original matrix. */
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ itmp = iwork[iindbl + j - 1];
+ w[j] += e[iwork[iinspl + itmp - 1]];
+/* L20: */
+ }
+ }
+
+ if (*tryrac) {
+/* Refine computed eigenvalues so that they are relatively accurate */
+/* with respect to the original matrix T. */
+ ibegin = 1;
+ wbegin = 1;
+ i__1 = iwork[iindbl + *m - 1];
+ for (jblk = 1; jblk <= i__1; ++jblk) {
+ iend = iwork[iinspl + jblk - 1];
+ in = iend - ibegin + 1;
+ wend = wbegin - 1;
+/* check if any eigenvalues have to be refined in this block */
+L36:
+ if (wend < *m) {
+ if (iwork[iindbl + wend] == jblk) {
+ ++wend;
+ goto L36;
+ }
+ }
+ if (wend < wbegin) {
+ ibegin = iend + 1;
+ goto L39;
+ }
+ offset = iwork[iindw + wbegin - 1] - 1;
+ ifirst = iwork[iindw + wbegin - 1];
+ ilast = iwork[iindw + wend - 1];
+ rtol2 = eps * 4.f;
+ slarrj_(&in, &work[indd + ibegin - 1], &work[inde2 + ibegin - 1],
+ &ifirst, &ilast, &rtol2, &offset, &w[wbegin], &work[
+ inderr + wbegin - 1], &work[indwrk], &iwork[iindwk], &
+ pivmin, &tnrm, &iinfo);
+ ibegin = iend + 1;
+ wbegin = wend + 1;
+L39:
+ ;
+ }
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (scale != 1.f) {
+ r__1 = 1.f / scale;
+ sscal_(m, &r__1, &w[1], &c__1);
+ }
+
+/* If eigenvalues are not in increasing order, then sort them, */
+/* possibly along with eigenvectors. */
+
+ if (nsplit > 1) {
+ if (! wantz) {
+ slasrt_("I", m, &w[1], &iinfo);
+ if (iinfo != 0) {
+ *info = 3;
+ return 0;
+ }
+ } else {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp) {
+ i__ = jj;
+ tmp = w[jj];
+ }
+/* L50: */
+ }
+ if (i__ != 0) {
+ w[i__] = w[j];
+ w[j] = tmp;
+ if (wantz) {
+ cswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j *
+ z_dim1 + 1], &c__1);
+ itmp = isuppz[(i__ << 1) - 1];
+ isuppz[(i__ << 1) - 1] = isuppz[(j << 1) - 1];
+ isuppz[(j << 1) - 1] = itmp;
+ itmp = isuppz[i__ * 2];
+ isuppz[i__ * 2] = isuppz[j * 2];
+ isuppz[j * 2] = itmp;
+ }
+ }
+/* L60: */
+ }
+ }
+ }
+
+
+ work[1] = (real) lwmin;
+ iwork[1] = liwmin;
+ return 0;
+
+/* End of CSTEMR */
+
+} /* cstemr_ */
diff --git a/contrib/libs/clapack/csteqr.c b/contrib/libs/clapack/csteqr.c
new file mode 100644
index 0000000000..724bf457a5
--- /dev/null
+++ b/contrib/libs/clapack/csteqr.c
@@ -0,0 +1,620 @@
+/* csteqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__0 = 0;
+static integer c__1 = 1;
+static integer c__2 = 2;
+static real c_b41 = 1.f;
+
+/* Subroutine */ int csteqr_(char *compz, integer *n, real *d__, real *e,
+ complex *z__, integer *ldz, real *work, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ real b, c__, f, g;
+ integer i__, j, k, l, m;
+ real p, r__, s;
+ integer l1, ii, mm, lm1, mm1, nm1;
+ real rt1, rt2, eps;
+ integer lsv;
+ real tst, eps2;
+ integer lend, jtot;
+ extern /* Subroutine */ int slae2_(real *, real *, real *, real *, real *)
+ ;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int clasr_(char *, char *, char *, integer *,
+ integer *, real *, real *, complex *, integer *);
+ real anorm;
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ integer lendm1, lendp1;
+ extern /* Subroutine */ int slaev2_(real *, real *, real *, real *, real *
+, real *, real *);
+ extern doublereal slapy2_(real *, real *);
+ integer iscale;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int claset_(char *, integer *, integer *, complex
+ *, complex *, complex *, integer *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real safmax;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ integer lendsv;
+ extern /* Subroutine */ int slartg_(real *, real *, real *, real *, real *
+);
+ real ssfmin;
+ integer nmaxit, icompz;
+ real ssfmax;
+ extern doublereal slanst_(char *, integer *, real *, real *);
+ extern /* Subroutine */ int slasrt_(char *, integer *, real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSTEQR computes all eigenvalues and, optionally, eigenvectors of a */
+/* symmetric tridiagonal matrix using the implicit QL or QR method. */
+/* The eigenvectors of a full or band complex Hermitian matrix can also */
+/* be found if CHETRD or CHPTRD or CHBTRD has been used to reduce this */
+/* matrix to tridiagonal form. */
+
+/* Arguments */
+/* ========= */
+
+/* COMPZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only. */
+/* = 'V': Compute eigenvalues and eigenvectors of the original */
+/* Hermitian matrix. On entry, Z must contain the */
+/* unitary matrix used to reduce the original matrix */
+/* to tridiagonal form. */
+/* = 'I': Compute eigenvalues and eigenvectors of the */
+/* tridiagonal matrix. Z is initialized to the identity */
+/* matrix. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the diagonal elements of the tridiagonal matrix. */
+/* On exit, if INFO = 0, the eigenvalues in ascending order. */
+
+/* E (input/output) REAL array, dimension (N-1) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix. */
+/* On exit, E has been destroyed. */
+
+/* Z (input/output) COMPLEX array, dimension (LDZ, N) */
+/* On entry, if COMPZ = 'V', then Z contains the unitary */
+/* matrix used in the reduction to tridiagonal form. */
+/* On exit, if INFO = 0, then if COMPZ = 'V', Z contains the */
+/* orthonormal eigenvectors of the original Hermitian matrix, */
+/* and if COMPZ = 'I', Z contains the orthonormal eigenvectors */
+/* of the symmetric tridiagonal matrix. */
+/* If COMPZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* eigenvectors are desired, then LDZ >= max(1,N). */
+
+/* WORK (workspace) REAL array, dimension (max(1,2*N-2)) */
+/* If COMPZ = 'N', then WORK is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: the algorithm has failed to find all the eigenvalues in */
+/* a total of 30*N iterations; if INFO = i, then i */
+/* elements of E have not converged to zero; on exit, D */
+/* and E contain the elements of a symmetric tridiagonal */
+/* matrix which is unitarily similar to the original */
+/* matrix. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (lsame_(compz, "N")) {
+ icompz = 0;
+ } else if (lsame_(compz, "V")) {
+ icompz = 1;
+ } else if (lsame_(compz, "I")) {
+ icompz = 2;
+ } else {
+ icompz = -1;
+ }
+ if (icompz < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldz < 1 || icompz > 0 && *ldz < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CSTEQR", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (icompz == 2) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1.f, z__[i__1].i = 0.f;
+ }
+ return 0;
+ }
+
+/* Determine the unit roundoff and over/underflow thresholds. */
+
+ eps = slamch_("E");
+/* Computing 2nd power */
+ r__1 = eps;
+ eps2 = r__1 * r__1;
+ safmin = slamch_("S");
+ safmax = 1.f / safmin;
+ ssfmax = sqrt(safmax) / 3.f;
+ ssfmin = sqrt(safmin) / eps2;
+
+/* Compute the eigenvalues and eigenvectors of the tridiagonal */
+/* matrix. */
+
+ if (icompz == 2) {
+ claset_("Full", n, n, &c_b1, &c_b2, &z__[z_offset], ldz);
+ }
+
+ nmaxit = *n * 30;
+ jtot = 0;
+
+/* Determine where the matrix splits and choose QL or QR iteration */
+/* for each block, according to whether top or bottom diagonal */
+/* element is smaller. */
+
+ l1 = 1;
+ nm1 = *n - 1;
+
+L10:
+ if (l1 > *n) {
+ goto L160;
+ }
+ if (l1 > 1) {
+ e[l1 - 1] = 0.f;
+ }
+ if (l1 <= nm1) {
+ i__1 = nm1;
+ for (m = l1; m <= i__1; ++m) {
+ tst = (r__1 = e[m], dabs(r__1));
+ if (tst == 0.f) {
+ goto L30;
+ }
+ if (tst <= sqrt((r__1 = d__[m], dabs(r__1))) * sqrt((r__2 = d__[m
+ + 1], dabs(r__2))) * eps) {
+ e[m] = 0.f;
+ goto L30;
+ }
+/* L20: */
+ }
+ }
+ m = *n;
+
+L30:
+ l = l1;
+ lsv = l;
+ lend = m;
+ lendsv = lend;
+ l1 = m + 1;
+ if (lend == l) {
+ goto L10;
+ }
+
+/* Scale submatrix in rows and columns L to LEND */
+
+ i__1 = lend - l + 1;
+ anorm = slanst_("I", &i__1, &d__[l], &e[l]);
+ iscale = 0;
+ if (anorm == 0.f) {
+ goto L10;
+ }
+ if (anorm > ssfmax) {
+ iscale = 1;
+ i__1 = lend - l + 1;
+ slascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ slascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n,
+ info);
+ } else if (anorm < ssfmin) {
+ iscale = 2;
+ i__1 = lend - l + 1;
+ slascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ slascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n,
+ info);
+ }
+
+/* Choose between QL and QR iteration */
+
+ if ((r__1 = d__[lend], dabs(r__1)) < (r__2 = d__[l], dabs(r__2))) {
+ lend = lsv;
+ l = lendsv;
+ }
+
+ if (lend > l) {
+
+/* QL Iteration */
+
+/* Look for small subdiagonal element. */
+
+L40:
+ if (l != lend) {
+ lendm1 = lend - 1;
+ i__1 = lendm1;
+ for (m = l; m <= i__1; ++m) {
+/* Computing 2nd power */
+ r__2 = (r__1 = e[m], dabs(r__1));
+ tst = r__2 * r__2;
+ if (tst <= eps2 * (r__1 = d__[m], dabs(r__1)) * (r__2 = d__[m
+ + 1], dabs(r__2)) + safmin) {
+ goto L60;
+ }
+/* L50: */
+ }
+ }
+
+ m = lend;
+
+L60:
+ if (m < lend) {
+ e[m] = 0.f;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L80;
+ }
+
+/* If remaining matrix is 2-by-2, use SLAE2 or SLAEV2 */
+/* to compute its eigensystem. */
+
+ if (m == l + 1) {
+ if (icompz > 0) {
+ slaev2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2, &c__, &s);
+ work[l] = c__;
+ work[*n - 1 + l] = s;
+ clasr_("R", "V", "B", n, &c__2, &work[l], &work[*n - 1 + l], &
+ z__[l * z_dim1 + 1], ldz);
+ } else {
+ slae2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2);
+ }
+ d__[l] = rt1;
+ d__[l + 1] = rt2;
+ e[l] = 0.f;
+ l += 2;
+ if (l <= lend) {
+ goto L40;
+ }
+ goto L140;
+ }
+
+ if (jtot == nmaxit) {
+ goto L140;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ g = (d__[l + 1] - p) / (e[l] * 2.f);
+ r__ = slapy2_(&g, &c_b41);
+ g = d__[m] - p + e[l] / (g + r_sign(&r__, &g));
+
+ s = 1.f;
+ c__ = 1.f;
+ p = 0.f;
+
+/* Inner loop */
+
+ mm1 = m - 1;
+ i__1 = l;
+ for (i__ = mm1; i__ >= i__1; --i__) {
+ f = s * e[i__];
+ b = c__ * e[i__];
+ slartg_(&g, &f, &c__, &s, &r__);
+ if (i__ != m - 1) {
+ e[i__ + 1] = r__;
+ }
+ g = d__[i__ + 1] - p;
+ r__ = (d__[i__] - g) * s + c__ * 2.f * b;
+ p = s * r__;
+ d__[i__ + 1] = g + p;
+ g = c__ * r__ - b;
+
+/* If eigenvectors are desired, then save rotations. */
+
+ if (icompz > 0) {
+ work[i__] = c__;
+ work[*n - 1 + i__] = -s;
+ }
+
+/* L70: */
+ }
+
+/* If eigenvectors are desired, then apply saved rotations. */
+
+ if (icompz > 0) {
+ mm = m - l + 1;
+ clasr_("R", "V", "B", n, &mm, &work[l], &work[*n - 1 + l], &z__[l
+ * z_dim1 + 1], ldz);
+ }
+
+ d__[l] -= p;
+ e[l] = g;
+ goto L40;
+
+/* Eigenvalue found. */
+
+L80:
+ d__[l] = p;
+
+ ++l;
+ if (l <= lend) {
+ goto L40;
+ }
+ goto L140;
+
+ } else {
+
+/* QR Iteration */
+
+/* Look for small superdiagonal element. */
+
+L90:
+ if (l != lend) {
+ lendp1 = lend + 1;
+ i__1 = lendp1;
+ for (m = l; m >= i__1; --m) {
+/* Computing 2nd power */
+ r__2 = (r__1 = e[m - 1], dabs(r__1));
+ tst = r__2 * r__2;
+ if (tst <= eps2 * (r__1 = d__[m], dabs(r__1)) * (r__2 = d__[m
+ - 1], dabs(r__2)) + safmin) {
+ goto L110;
+ }
+/* L100: */
+ }
+ }
+
+ m = lend;
+
+L110:
+ if (m > lend) {
+ e[m - 1] = 0.f;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L130;
+ }
+
+/* If remaining matrix is 2-by-2, use SLAE2 or SLAEV2 */
+/* to compute its eigensystem. */
+
+ if (m == l - 1) {
+ if (icompz > 0) {
+ slaev2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2, &c__, &s)
+ ;
+ work[m] = c__;
+ work[*n - 1 + m] = s;
+ clasr_("R", "V", "F", n, &c__2, &work[m], &work[*n - 1 + m], &
+ z__[(l - 1) * z_dim1 + 1], ldz);
+ } else {
+ slae2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2);
+ }
+ d__[l - 1] = rt1;
+ d__[l] = rt2;
+ e[l - 1] = 0.f;
+ l += -2;
+ if (l >= lend) {
+ goto L90;
+ }
+ goto L140;
+ }
+
+ if (jtot == nmaxit) {
+ goto L140;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ g = (d__[l - 1] - p) / (e[l - 1] * 2.f);
+ r__ = slapy2_(&g, &c_b41);
+ g = d__[m] - p + e[l - 1] / (g + r_sign(&r__, &g));
+
+ s = 1.f;
+ c__ = 1.f;
+ p = 0.f;
+
+/* Inner loop */
+
+ lm1 = l - 1;
+ i__1 = lm1;
+ for (i__ = m; i__ <= i__1; ++i__) {
+ f = s * e[i__];
+ b = c__ * e[i__];
+ slartg_(&g, &f, &c__, &s, &r__);
+ if (i__ != m) {
+ e[i__ - 1] = r__;
+ }
+ g = d__[i__] - p;
+ r__ = (d__[i__ + 1] - g) * s + c__ * 2.f * b;
+ p = s * r__;
+ d__[i__] = g + p;
+ g = c__ * r__ - b;
+
+/* If eigenvectors are desired, then save rotations. */
+
+ if (icompz > 0) {
+ work[i__] = c__;
+ work[*n - 1 + i__] = s;
+ }
+
+/* L120: */
+ }
+
+/* If eigenvectors are desired, then apply saved rotations. */
+
+ if (icompz > 0) {
+ mm = l - m + 1;
+ clasr_("R", "V", "F", n, &mm, &work[m], &work[*n - 1 + m], &z__[m
+ * z_dim1 + 1], ldz);
+ }
+
+ d__[l] -= p;
+ e[lm1] = g;
+ goto L90;
+
+/* Eigenvalue found. */
+
+L130:
+ d__[l] = p;
+
+ --l;
+ if (l >= lend) {
+ goto L90;
+ }
+ goto L140;
+
+ }
+
+/* Undo scaling if necessary */
+
+L140:
+ if (iscale == 1) {
+ i__1 = lendsv - lsv + 1;
+ slascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ i__1 = lendsv - lsv;
+ slascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &e[lsv], n,
+ info);
+ } else if (iscale == 2) {
+ i__1 = lendsv - lsv + 1;
+ slascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ i__1 = lendsv - lsv;
+ slascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &e[lsv], n,
+ info);
+ }
+
+/* Check for no convergence to an eigenvalue after a total */
+/* of N*MAXIT iterations. */
+
+ if (jtot == nmaxit) {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (e[i__] != 0.f) {
+ ++(*info);
+ }
+/* L150: */
+ }
+ return 0;
+ }
+ goto L10;
+
+/* Order eigenvalues and eigenvectors. */
+
+L160:
+ if (icompz == 0) {
+
+/* Use Quick Sort */
+
+ slasrt_("I", n, &d__[1], info);
+
+ } else {
+
+/* Use Selection Sort to minimize swaps of eigenvectors */
+
+ i__1 = *n;
+ for (ii = 2; ii <= i__1; ++ii) {
+ i__ = ii - 1;
+ k = i__;
+ p = d__[i__];
+ i__2 = *n;
+ for (j = ii; j <= i__2; ++j) {
+ if (d__[j] < p) {
+ k = j;
+ p = d__[j];
+ }
+/* L170: */
+ }
+ if (k != i__) {
+ d__[k] = d__[i__];
+ d__[i__] = p;
+ cswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1 + 1],
+ &c__1);
+ }
+/* L180: */
+ }
+ }
+ return 0;
+
+/* End of CSTEQR */
+
+} /* csteqr_ */
diff --git a/contrib/libs/clapack/csycon.c b/contrib/libs/clapack/csycon.c
new file mode 100644
index 0000000000..fe23ded676
--- /dev/null
+++ b/contrib/libs/clapack/csycon.c
@@ -0,0 +1,201 @@
+/* csycon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int csycon_(char *uplo, integer *n, complex *a, integer *lda,
+ integer *ipiv, real *anorm, real *rcond, complex *work, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, kase;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical upper;
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *), xerbla_(char *, integer *);
+ real ainvnm;
+ extern /* Subroutine */ int csytrs_(char *, integer *, integer *, complex
+ *, integer *, integer *, complex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSYCON estimates the reciprocal of the condition number (in the */
+/* 1-norm) of a complex symmetric matrix A using the factorization */
+/* A = U*D*U**T or A = L*D*L**T computed by CSYTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**T; */
+/* = 'L': Lower triangular, form is A = L*D*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The block diagonal matrix D and the multipliers used to */
+/* obtain the factor U or L as computed by CSYTRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by CSYTRF. */
+
+/* ANORM (input) REAL */
+/* The 1-norm of the original matrix A. */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*anorm < 0.f) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CSYCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.f;
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ } else if (*anorm <= 0.f) {
+ return 0;
+ }
+
+/* Check that the diagonal matrix D is nonsingular. */
+
+ if (upper) {
+
+/* Upper triangular storage: examine D from bottom to top */
+
+ for (i__ = *n; i__ >= 1; --i__) {
+ i__1 = i__ + i__ * a_dim1;
+ if (ipiv[i__] > 0 && (a[i__1].r == 0.f && a[i__1].i == 0.f)) {
+ return 0;
+ }
+/* L10: */
+ }
+ } else {
+
+/* Lower triangular storage: examine D from top to bottom. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ if (ipiv[i__] > 0 && (a[i__2].r == 0.f && a[i__2].i == 0.f)) {
+ return 0;
+ }
+/* L20: */
+ }
+ }
+
+/* Estimate the 1-norm of the inverse. */
+
+ kase = 0;
+L30:
+ clacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+
+/* Multiply by inv(L*D*L') or inv(U*D*U'). */
+
+ csytrs_(uplo, n, &c__1, &a[a_offset], lda, &ipiv[1], &work[1], n,
+ info);
+ goto L30;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / ainvnm / *anorm;
+ }
+
+ return 0;
+
+/* End of CSYCON */
+
+} /* csycon_ */
diff --git a/contrib/libs/clapack/csyequb.c b/contrib/libs/clapack/csyequb.c
new file mode 100644
index 0000000000..d2e20e34ca
--- /dev/null
+++ b/contrib/libs/clapack/csyequb.c
@@ -0,0 +1,451 @@
+/* csyequb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int csyequb_(char *uplo, integer *n, complex *a, integer *
+ lda, real *s, real *scond, real *amax, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4;
+ doublereal d__1;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Builtin functions */
+ double r_imag(complex *), sqrt(doublereal), log(doublereal), pow_ri(real *
+ , integer *);
+
+ /* Local variables */
+ real d__;
+ integer i__, j;
+ real t, u, c0, c1, c2, si;
+ logical up;
+ real avg, std, tol, base;
+ integer iter;
+ real smin, smax, scale;
+ extern logical lsame_(char *, char *);
+ real sumsq;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ extern /* Subroutine */ int classq_(integer *, complex *, integer *, real
+ *, real *);
+ real smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- Contributed by James Demmel, Deaglan Halligan, Yozo Hida and -- */
+/* -- Jason Riedy of Univ. of California Berkeley. -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley and NAG Ltd. -- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSYEQUB computes row and column scalings intended to equilibrate a */
+/* symmetric matrix A and reduce its condition number */
+/* (with respect to the two-norm). S contains the scale factors, */
+/* S(i) = 1/sqrt(A(i,i)), chosen so that the scaled matrix B with */
+/* elements B(i,j) = S(i)*A(i,j)*S(j) has ones on the diagonal. This */
+/* choice of S puts the condition number of B within a factor N of the */
+/* smallest possible condition number over all possible diagonal */
+/* scalings. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The N-by-N symmetric matrix whose scaling */
+/* factors are to be computed. Only the diagonal elements of A */
+/* are referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* S (output) REAL array, dimension (N) */
+/* If INFO = 0, S contains the scale factors for A. */
+
+/* SCOND (output) REAL */
+/* If INFO = 0, S contains the ratio of the smallest S(i) to */
+/* the largest S(i). If SCOND >= 0.1 and AMAX is neither too */
+/* large nor too small, it is not worth scaling by S. */
+
+/* AMAX (output) REAL */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element is nonpositive. */
+
+/* Further Details */
+/* ======= ======= */
+
+/* Reference: Livne, O.E. and Golub, G.H., "Scaling by Binormalization", */
+/* Numerical Algorithms, vol. 35, no. 1, pp. 97-120, January 2004. */
+/* DOI 10.1023/B:NUMA.0000016606.32820.69 */
+/* Tech report version: http://ruready.utah.edu/archive/papers/bin.pdf */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* Statement Function Definitions */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (! (lsame_(uplo, "U") || lsame_(uplo, "L"))) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CSYEQUB", &i__1);
+ return 0;
+ }
+ up = lsame_(uplo, "U");
+ *amax = 0.f;
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ *scond = 1.f;
+ return 0;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ s[i__] = 0.f;
+ }
+ *amax = 0.f;
+ if (up) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ r__3 = s[i__], r__4 = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + j * a_dim1]), dabs(r__2));
+ s[i__] = dmax(r__3,r__4);
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ r__3 = s[j], r__4 = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + j * a_dim1]), dabs(r__2));
+ s[j] = dmax(r__3,r__4);
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ r__3 = *amax, r__4 = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + j * a_dim1]), dabs(r__2));
+ *amax = dmax(r__3,r__4);
+ }
+/* Computing MAX */
+ i__2 = j + j * a_dim1;
+ r__3 = s[j], r__4 = (r__1 = a[i__2].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[j + j * a_dim1]), dabs(r__2));
+ s[j] = dmax(r__3,r__4);
+/* Computing MAX */
+ i__2 = j + j * a_dim1;
+ r__3 = *amax, r__4 = (r__1 = a[i__2].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[j + j * a_dim1]), dabs(r__2));
+ *amax = dmax(r__3,r__4);
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j + j * a_dim1;
+ r__3 = s[j], r__4 = (r__1 = a[i__2].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[j + j * a_dim1]), dabs(r__2));
+ s[j] = dmax(r__3,r__4);
+/* Computing MAX */
+ i__2 = j + j * a_dim1;
+ r__3 = *amax, r__4 = (r__1 = a[i__2].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[j + j * a_dim1]), dabs(r__2));
+ *amax = dmax(r__3,r__4);
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ r__3 = s[i__], r__4 = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + j * a_dim1]), dabs(r__2));
+ s[i__] = dmax(r__3,r__4);
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ r__3 = s[j], r__4 = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + j * a_dim1]), dabs(r__2));
+ s[j] = dmax(r__3,r__4);
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ r__3 = *amax, r__4 = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + j * a_dim1]), dabs(r__2));
+ *amax = dmax(r__3,r__4);
+ }
+ }
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ s[j] = 1.f / s[j];
+ }
+ tol = 1.f / sqrt(*n * 2.f);
+ for (iter = 1; iter <= 100; ++iter) {
+ scale = 0.f;
+ sumsq = 0.f;
+/* beta = |A|s */
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ work[i__2].r = 0.f, work[i__2].i = 0.f;
+ }
+ if (up) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ t = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 = r_imag(&a[
+ i__ + j * a_dim1]), dabs(r__2));
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ r__3 = ((r__1 = a[i__5].r, dabs(r__1)) + (r__2 = r_imag(&
+ a[i__ + j * a_dim1]), dabs(r__2))) * s[j];
+ q__1.r = work[i__4].r + r__3, q__1.i = work[i__4].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+ i__3 = j;
+ i__4 = j;
+ i__5 = i__ + j * a_dim1;
+ r__3 = ((r__1 = a[i__5].r, dabs(r__1)) + (r__2 = r_imag(&
+ a[i__ + j * a_dim1]), dabs(r__2))) * s[i__];
+ q__1.r = work[i__4].r + r__3, q__1.i = work[i__4].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+ }
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ r__3 = ((r__1 = a[i__4].r, dabs(r__1)) + (r__2 = r_imag(&a[j
+ + j * a_dim1]), dabs(r__2))) * s[j];
+ q__1.r = work[i__3].r + r__3, q__1.i = work[i__3].i;
+ work[i__2].r = q__1.r, work[i__2].i = q__1.i;
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ r__3 = ((r__1 = a[i__4].r, dabs(r__1)) + (r__2 = r_imag(&a[j
+ + j * a_dim1]), dabs(r__2))) * s[j];
+ q__1.r = work[i__3].r + r__3, q__1.i = work[i__3].i;
+ work[i__2].r = q__1.r, work[i__2].i = q__1.i;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ t = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 = r_imag(&a[
+ i__ + j * a_dim1]), dabs(r__2));
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ r__3 = ((r__1 = a[i__5].r, dabs(r__1)) + (r__2 = r_imag(&
+ a[i__ + j * a_dim1]), dabs(r__2))) * s[j];
+ q__1.r = work[i__4].r + r__3, q__1.i = work[i__4].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+ i__3 = j;
+ i__4 = j;
+ i__5 = i__ + j * a_dim1;
+ r__3 = ((r__1 = a[i__5].r, dabs(r__1)) + (r__2 = r_imag(&
+ a[i__ + j * a_dim1]), dabs(r__2))) * s[i__];
+ q__1.r = work[i__4].r + r__3, q__1.i = work[i__4].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+ }
+ }
+ }
+/* avg = s^T beta / n */
+ avg = 0.f;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ q__2.r = s[i__2] * work[i__3].r, q__2.i = s[i__2] * work[i__3].i;
+ q__1.r = avg + q__2.r, q__1.i = q__2.i;
+ avg = q__1.r;
+ }
+ avg /= *n;
+ std = 0.f;
+ i__1 = *n << 1;
+ for (i__ = *n + 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__ - *n;
+ i__4 = i__ - *n;
+ q__2.r = s[i__3] * work[i__4].r, q__2.i = s[i__3] * work[i__4].i;
+ q__1.r = q__2.r - avg, q__1.i = q__2.i;
+ work[i__2].r = q__1.r, work[i__2].i = q__1.i;
+ }
+ classq_(n, &work[*n + 1], &c__1, &scale, &sumsq);
+ std = scale * sqrt(sumsq / *n);
+ if (std < tol * avg) {
+ goto L999;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ t = (r__1 = a[i__2].r, dabs(r__1)) + (r__2 = r_imag(&a[i__ + i__ *
+ a_dim1]), dabs(r__2));
+ si = s[i__];
+ c2 = (*n - 1) * t;
+ i__2 = *n - 2;
+ i__3 = i__;
+ r__1 = t * si;
+ q__2.r = work[i__3].r - r__1, q__2.i = work[i__3].i;
+ d__1 = (doublereal) i__2;
+ q__1.r = d__1 * q__2.r, q__1.i = d__1 * q__2.i;
+ c1 = q__1.r;
+ r__1 = -(t * si) * si;
+ i__2 = i__;
+ d__1 = 2.;
+ q__4.r = d__1 * work[i__2].r, q__4.i = d__1 * work[i__2].i;
+ q__3.r = si * q__4.r, q__3.i = si * q__4.i;
+ q__2.r = r__1 + q__3.r, q__2.i = q__3.i;
+ r__2 = *n * avg;
+ q__1.r = q__2.r - r__2, q__1.i = q__2.i;
+ c0 = q__1.r;
+ d__ = c1 * c1 - c0 * 4 * c2;
+ if (d__ <= 0.f) {
+ *info = -1;
+ return 0;
+ }
+ si = c0 * -2 / (c1 + sqrt(d__));
+ d__ = si - s[i__];
+ u = 0.f;
+ if (up) {
+ i__2 = i__;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = j + i__ * a_dim1;
+ t = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 = r_imag(&a[j
+ + i__ * a_dim1]), dabs(r__2));
+ u += s[j] * t;
+ i__3 = j;
+ i__4 = j;
+ r__1 = d__ * t;
+ q__1.r = work[i__4].r + r__1, q__1.i = work[i__4].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+ }
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ i__3 = i__ + j * a_dim1;
+ t = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 = r_imag(&a[
+ i__ + j * a_dim1]), dabs(r__2));
+ u += s[j] * t;
+ i__3 = j;
+ i__4 = j;
+ r__1 = d__ * t;
+ q__1.r = work[i__4].r + r__1, q__1.i = work[i__4].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+ }
+ } else {
+ i__2 = i__;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = i__ + j * a_dim1;
+ t = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 = r_imag(&a[
+ i__ + j * a_dim1]), dabs(r__2));
+ u += s[j] * t;
+ i__3 = j;
+ i__4 = j;
+ r__1 = d__ * t;
+ q__1.r = work[i__4].r + r__1, q__1.i = work[i__4].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+ }
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ i__3 = j + i__ * a_dim1;
+ t = (r__1 = a[i__3].r, dabs(r__1)) + (r__2 = r_imag(&a[j
+ + i__ * a_dim1]), dabs(r__2));
+ u += s[j] * t;
+ i__3 = j;
+ i__4 = j;
+ r__1 = d__ * t;
+ q__1.r = work[i__4].r + r__1, q__1.i = work[i__4].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+ }
+ }
+ i__2 = i__;
+ q__4.r = u + work[i__2].r, q__4.i = work[i__2].i;
+ q__3.r = d__ * q__4.r, q__3.i = d__ * q__4.i;
+ d__1 = (doublereal) (*n);
+ q__2.r = q__3.r / d__1, q__2.i = q__3.i / d__1;
+ q__1.r = avg + q__2.r, q__1.i = q__2.i;
+ avg = q__1.r;
+ s[i__] = si;
+ }
+ }
+L999:
+ smlnum = slamch_("SAFEMIN");
+ bignum = 1.f / smlnum;
+ smin = bignum;
+ smax = 0.f;
+ t = 1.f / sqrt(avg);
+ base = slamch_("B");
+ u = 1.f / log(base);
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = (integer) (u * log(s[i__] * t));
+ s[i__] = pow_ri(&base, &i__2);
+/* Computing MIN */
+ r__1 = smin, r__2 = s[i__];
+ smin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = smax, r__2 = s[i__];
+ smax = dmax(r__1,r__2);
+ }
+ *scond = dmax(smin,smlnum) / dmin(smax,bignum);
+
+ return 0;
+} /* csyequb_ */
diff --git a/contrib/libs/clapack/csymv.c b/contrib/libs/clapack/csymv.c
new file mode 100644
index 0000000000..d6851ee089
--- /dev/null
+++ b/contrib/libs/clapack/csymv.c
@@ -0,0 +1,429 @@
+/* csymv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int csymv_(char *uplo, integer *n, complex *alpha, complex *
+ a, integer *lda, complex *x, integer *incx, complex *beta, complex *y,
+ integer *incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Local variables */
+ integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ complex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSYMV performs the matrix-vector operation */
+
+/* y := alpha*A*x + beta*y, */
+
+/* where alpha and beta are scalars, x and y are n element vectors and */
+/* A is an n by n symmetric matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO (input) CHARACTER*1 */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array A is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of A */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of A */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* N (input) INTEGER */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA (input) COMPLEX */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A (input) COMPLEX array, dimension ( LDA, N ) */
+/* Before entry, with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array A must contain the upper */
+/* triangular part of the symmetric matrix and the strictly */
+/* lower triangular part of A is not referenced. */
+/* Before entry, with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array A must contain the lower */
+/* triangular part of the symmetric matrix and the strictly */
+/* upper triangular part of A is not referenced. */
+/* Unchanged on exit. */
+
+/* LDA (input) INTEGER */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, N ). */
+/* Unchanged on exit. */
+
+/* X (input) COMPLEX array, dimension at least */
+/* ( 1 + ( N - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the N- */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX (input) INTEGER */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* BETA (input) COMPLEX */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then Y need not be set on input. */
+/* Unchanged on exit. */
+
+/* Y (input/output) COMPLEX array, dimension at least */
+/* ( 1 + ( N - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. On exit, Y is overwritten by the updated */
+/* vector y. */
+
+/* INCY (input) INTEGER */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*lda < max(1,*n)) {
+ info = 5;
+ } else if (*incx == 0) {
+ info = 7;
+ } else if (*incy == 0) {
+ info = 10;
+ }
+ if (info != 0) {
+ xerbla_("CSYMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || alpha->r == 0.f && alpha->i == 0.f && (beta->r == 1.f &&
+ beta->i == 0.f)) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y. */
+
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through the triangular part */
+/* of A. */
+
+/* First form y := beta*y. */
+
+ if (beta->r != 1.f || beta->i != 0.f) {
+ if (*incy == 1) {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ y[i__2].r = 0.f, y[i__2].i = 0.f;
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ q__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ q__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ y[i__2].r = 0.f, y[i__2].i = 0.f;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = iy;
+ q__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ q__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (alpha->r == 0.f && alpha->i == 0.f) {
+ return 0;
+ }
+ if (lsame_(uplo, "U")) {
+
+/* Form y when A is stored in upper triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ q__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__;
+ q__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4].i,
+ q__2.i = a[i__3].r * x[i__4].i + a[i__3].i * x[
+ i__4].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+/* L50: */
+ }
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ q__3.r = temp1.r * a[i__4].r - temp1.i * a[i__4].i, q__3.i =
+ temp1.r * a[i__4].i + temp1.i * a[i__4].r;
+ q__2.r = y[i__3].r + q__3.r, q__2.i = y[i__3].i + q__3.i;
+ q__4.r = alpha->r * temp2.r - alpha->i * temp2.i, q__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ ix = kx;
+ iy = ky;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = i__ + j * a_dim1;
+ q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ q__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ i__3 = i__ + j * a_dim1;
+ i__4 = ix;
+ q__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4].i,
+ q__2.i = a[i__3].r * x[i__4].i + a[i__3].i * x[
+ i__4].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ i__2 = jy;
+ i__3 = jy;
+ i__4 = j + j * a_dim1;
+ q__3.r = temp1.r * a[i__4].r - temp1.i * a[i__4].i, q__3.i =
+ temp1.r * a[i__4].i + temp1.i * a[i__4].r;
+ q__2.r = y[i__3].r + q__3.r, q__2.i = y[i__3].i + q__3.i;
+ q__4.r = alpha->r * temp2.r - alpha->i * temp2.i, q__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ jx += *incx;
+ jy += *incy;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y when A is stored in lower triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ q__2.r = temp1.r * a[i__4].r - temp1.i * a[i__4].i, q__2.i =
+ temp1.r * a[i__4].i + temp1.i * a[i__4].r;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ q__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__;
+ q__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4].i,
+ q__2.i = a[i__3].r * x[i__4].i + a[i__3].i * x[
+ i__4].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+/* L90: */
+ }
+ i__2 = j;
+ i__3 = j;
+ q__2.r = alpha->r * temp2.r - alpha->i * temp2.i, q__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ i__2 = jy;
+ i__3 = jy;
+ i__4 = j + j * a_dim1;
+ q__2.r = temp1.r * a[i__4].r - temp1.i * a[i__4].i, q__2.i =
+ temp1.r * a[i__4].i + temp1.i * a[i__4].r;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ ix = jx;
+ iy = jy;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ iy += *incy;
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = i__ + j * a_dim1;
+ q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ q__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ i__3 = i__ + j * a_dim1;
+ i__4 = ix;
+ q__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4].i,
+ q__2.i = a[i__3].r * x[i__4].i + a[i__3].i * x[
+ i__4].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+/* L110: */
+ }
+ i__2 = jy;
+ i__3 = jy;
+ q__2.r = alpha->r * temp2.r - alpha->i * temp2.i, q__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ jx += *incx;
+ jy += *incy;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CSYMV */
+
+} /* csymv_ */
diff --git a/contrib/libs/clapack/csyr.c b/contrib/libs/clapack/csyr.c
new file mode 100644
index 0000000000..d09dcea1bc
--- /dev/null
+++ b/contrib/libs/clapack/csyr.c
@@ -0,0 +1,289 @@
+/* csyr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int csyr_(char *uplo, integer *n, complex *alpha, complex *x,
+ integer *incx, complex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ complex q__1, q__2;
+
+ /* Local variables */
+ integer i__, j, ix, jx, kx, info;
+ complex temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSYR performs the symmetric rank 1 operation */
+
+/* A := alpha*x*( x' ) + A, */
+
+/* where alpha is a complex scalar, x is an n element vector and A is an */
+/* n by n symmetric matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO (input) CHARACTER*1 */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array A is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of A */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of A */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* N (input) INTEGER */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA (input) COMPLEX */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X (input) COMPLEX array, dimension at least */
+/* ( 1 + ( N - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the N- */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX (input) INTEGER */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* A (input/output) COMPLEX array, dimension ( LDA, N ) */
+/* Before entry, with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array A must contain the upper */
+/* triangular part of the symmetric matrix and the strictly */
+/* lower triangular part of A is not referenced. On exit, the */
+/* upper triangular part of the array A is overwritten by the */
+/* upper triangular part of the updated matrix. */
+/* Before entry, with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array A must contain the lower */
+/* triangular part of the symmetric matrix and the strictly */
+/* upper triangular part of A is not referenced. On exit, the */
+/* lower triangular part of the array A is overwritten by the */
+/* lower triangular part of the updated matrix. */
+
+/* LDA (input) INTEGER */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, N ). */
+/* Unchanged on exit. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --x;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*lda < max(1,*n)) {
+ info = 7;
+ }
+ if (info != 0) {
+ xerbla_("CSYR ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || alpha->r == 0.f && alpha->i == 0.f) {
+ return 0;
+ }
+
+/* Set the start point in X if the increment is not unity. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through the triangular part */
+/* of A. */
+
+ if (lsame_(uplo, "U")) {
+
+/* Form A when A is stored in upper triangle. */
+
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ i__2 = j;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ q__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ q__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ q__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ q__1.r = a[i__4].r + q__2.r, q__1.i = a[i__4].i +
+ q__2.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ i__2 = jx;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ q__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix = kx;
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ q__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ q__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ q__1.r = a[i__4].r + q__2.r, q__1.i = a[i__4].i +
+ q__2.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ }
+ jx += *incx;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when A is stored in lower triangle. */
+
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ i__2 = j;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ q__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ q__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ q__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ q__1.r = a[i__4].r + q__2.r, q__1.i = a[i__4].i +
+ q__2.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0.f || x[i__2].i != 0.f) {
+ i__2 = jx;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ q__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix = jx;
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ q__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ q__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ q__1.r = a[i__4].r + q__2.r, q__1.i = a[i__4].i +
+ q__2.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ ix += *incx;
+/* L70: */
+ }
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CSYR */
+
+} /* csyr_ */
diff --git a/contrib/libs/clapack/csyrfs.c b/contrib/libs/clapack/csyrfs.c
new file mode 100644
index 0000000000..730bd46609
--- /dev/null
+++ b/contrib/libs/clapack/csyrfs.c
@@ -0,0 +1,473 @@
+/* csyrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int csyrfs_(char *uplo, integer *n, integer *nrhs, complex *
+ a, integer *lda, complex *af, integer *ldaf, integer *ipiv, complex *
+ b, integer *ldb, complex *x, integer *ldx, real *ferr, real *berr,
+ complex *work, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ real s, xk;
+ integer nz;
+ real eps;
+ integer kase;
+ real safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), caxpy_(integer *, complex *, complex *,
+ integer *, complex *, integer *);
+ integer count;
+ logical upper;
+ extern /* Subroutine */ int csymv_(char *, integer *, complex *, complex *
+, integer *, complex *, integer *, complex *, complex *, integer *
+), clacn2_(integer *, complex *, complex *, real *,
+ integer *, integer *);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real lstres;
+ extern /* Subroutine */ int csytrs_(char *, integer *, integer *, complex
+ *, integer *, integer *, complex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSYRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is symmetric indefinite, and */
+/* provides error bounds and backward error estimates for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The symmetric matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of A contains the upper triangular part */
+/* of the matrix A, and the strictly lower triangular part of A */
+/* is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of A contains the lower triangular part of */
+/* the matrix A, and the strictly upper triangular part of A is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input) COMPLEX array, dimension (LDAF,N) */
+/* The factored form of the matrix A. AF contains the block */
+/* diagonal matrix D and the multipliers used to obtain the */
+/* factor U or L from the factorization A = U*D*U**T or */
+/* A = L*D*L**T as computed by CSYTRF. */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by CSYTRF. */
+
+/* B (input) COMPLEX array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) COMPLEX array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by CSYTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldx < max(1,*n)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CSYRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.f;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X */
+
+ ccopy_(n, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+ q__1.r = -1.f, q__1.i = -0.f;
+ csymv_(uplo, n, &q__1, &a[a_offset], lda, &x[j * x_dim1 + 1], &c__1, &
+ c_b1, &work[1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[i__] = (r__1 = b[i__3].r, dabs(r__1)) + (r__2 = r_imag(&b[
+ i__ + j * b_dim1]), dabs(r__2));
+/* L30: */
+ }
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ i__3 = k + j * x_dim1;
+ xk = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[k + j
+ * x_dim1]), dabs(r__2));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ rwork[i__] += ((r__1 = a[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + k * a_dim1]), dabs(r__2))) * xk;
+ i__4 = i__ + k * a_dim1;
+ i__5 = i__ + j * x_dim1;
+ s += ((r__1 = a[i__4].r, dabs(r__1)) + (r__2 = r_imag(&a[
+ i__ + k * a_dim1]), dabs(r__2))) * ((r__3 = x[
+ i__5].r, dabs(r__3)) + (r__4 = r_imag(&x[i__ + j *
+ x_dim1]), dabs(r__4)));
+/* L40: */
+ }
+ i__3 = k + k * a_dim1;
+ rwork[k] = rwork[k] + ((r__1 = a[i__3].r, dabs(r__1)) + (r__2
+ = r_imag(&a[k + k * a_dim1]), dabs(r__2))) * xk + s;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ i__3 = k + j * x_dim1;
+ xk = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[k + j
+ * x_dim1]), dabs(r__2));
+ i__3 = k + k * a_dim1;
+ rwork[k] += ((r__1 = a[i__3].r, dabs(r__1)) + (r__2 = r_imag(&
+ a[k + k * a_dim1]), dabs(r__2))) * xk;
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ rwork[i__] += ((r__1 = a[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + k * a_dim1]), dabs(r__2))) * xk;
+ i__4 = i__ + k * a_dim1;
+ i__5 = i__ + j * x_dim1;
+ s += ((r__1 = a[i__4].r, dabs(r__1)) + (r__2 = r_imag(&a[
+ i__ + k * a_dim1]), dabs(r__2))) * ((r__3 = x[
+ i__5].r, dabs(r__3)) + (r__4 = r_imag(&x[i__ + j *
+ x_dim1]), dabs(r__4)));
+/* L60: */
+ }
+ rwork[k] += s;
+/* L70: */
+ }
+ }
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__3 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2))) / rwork[i__];
+ s = dmax(r__3,r__4);
+ } else {
+/* Computing MAX */
+ i__3 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = dmax(r__3,r__4);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2.f <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ csytrs_(uplo, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &work[1],
+ n, info);
+ caxpy_(n, &c_b1, &work[1], &c__1, &x[j * x_dim1 + 1], &c__1);
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use CLACN2 to estimate the infinity-norm of the matrix */
+/* inv(A) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__3 = i__;
+ rwork[i__] = (r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__];
+ } else {
+ i__3 = i__;
+ rwork[i__] = (r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__] + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ clacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(A'). */
+
+ csytrs_(uplo, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &work[
+ 1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ q__1.r = rwork[i__4] * work[i__5].r, q__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+/* L110: */
+ }
+ } else if (kase == 2) {
+
+/* Multiply by inv(A)*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ q__1.r = rwork[i__4] * work[i__5].r, q__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+/* L120: */
+ }
+ csytrs_(uplo, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &work[
+ 1], n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * x_dim1;
+ r__3 = lstres, r__4 = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[i__ + j * x_dim1]), dabs(r__2));
+ lstres = dmax(r__3,r__4);
+/* L130: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of CSYRFS */
+
+} /* csyrfs_ */
diff --git a/contrib/libs/clapack/csysv.c b/contrib/libs/clapack/csysv.c
new file mode 100644
index 0000000000..c5e682ddad
--- /dev/null
+++ b/contrib/libs/clapack/csysv.c
@@ -0,0 +1,214 @@
+/* csysv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int csysv_(char *uplo, integer *n, integer *nrhs, complex *a,
+ integer *lda, integer *ipiv, complex *b, integer *ldb, complex *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ integer nb;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int csytrf_(char *, integer *, complex *, integer
+ *, integer *, complex *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int csytrs_(char *, integer *, integer *, complex
+ *, integer *, integer *, complex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSYSV computes the solution to a complex system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N symmetric matrix and X and B are N-by-NRHS */
+/* matrices. */
+
+/* The diagonal pivoting method is used to factor A as */
+/* A = U * D * U**T, if UPLO = 'U', or */
+/* A = L * D * L**T, if UPLO = 'L', */
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, and D is symmetric and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. The factored form of A is then */
+/* used to solve the system of equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the block diagonal matrix D and the */
+/* multipliers used to obtain the factor U or L from the */
+/* factorization A = U*D*U**T or A = L*D*L**T as computed by */
+/* CSYTRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D, as */
+/* determined by CSYTRF. If IPIV(k) > 0, then rows and columns */
+/* k and IPIV(k) were interchanged, and D(k,k) is a 1-by-1 */
+/* diagonal block. If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, */
+/* then rows and columns k-1 and -IPIV(k) were interchanged and */
+/* D(k-1:k,k-1:k) is a 2-by-2 diagonal block. If UPLO = 'L' and */
+/* IPIV(k) = IPIV(k+1) < 0, then rows and columns k+1 and */
+/* -IPIV(k) were interchanged and D(k:k+1,k:k+1) is a 2-by-2 */
+/* diagonal block. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of WORK. LWORK >= 1, and for best performance */
+/* LWORK >= max(1,N*NB), where NB is the optimal blocksize for */
+/* CSYTRF. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, so the solution could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*lwork < 1 && ! lquery) {
+ *info = -10;
+ }
+
+ if (*info == 0) {
+ if (*n == 0) {
+ lwkopt = 1;
+ } else {
+ nb = ilaenv_(&c__1, "CSYTRF", uplo, n, &c_n1, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CSYSV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Compute the factorization A = U*D*U' or A = L*D*L'. */
+
+ csytrf_(uplo, n, &a[a_offset], lda, &ipiv[1], &work[1], lwork, info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ csytrs_(uplo, n, nrhs, &a[a_offset], lda, &ipiv[1], &b[b_offset], ldb,
+ info);
+
+ }
+
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CSYSV */
+
+} /* csysv_ */
diff --git a/contrib/libs/clapack/csysvx.c b/contrib/libs/clapack/csysvx.c
new file mode 100644
index 0000000000..233fa11e10
--- /dev/null
+++ b/contrib/libs/clapack/csysvx.c
@@ -0,0 +1,368 @@
+/* csysvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int csysvx_(char *fact, char *uplo, integer *n, integer *
+ nrhs, complex *a, integer *lda, complex *af, integer *ldaf, integer *
+ ipiv, complex *b, integer *ldb, complex *x, integer *ldx, real *rcond,
+ real *ferr, real *berr, complex *work, integer *lwork, real *rwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2;
+
+ /* Local variables */
+ integer nb;
+ extern logical lsame_(char *, char *);
+ real anorm;
+ extern doublereal slamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern doublereal clansy_(char *, char *, integer *, complex *, integer *,
+ real *);
+ extern /* Subroutine */ int csycon_(char *, integer *, complex *, integer
+ *, integer *, real *, real *, complex *, integer *),
+ csyrfs_(char *, integer *, integer *, complex *, integer *,
+ complex *, integer *, integer *, complex *, integer *, complex *,
+ integer *, real *, real *, complex *, real *, integer *),
+ csytrf_(char *, integer *, complex *, integer *, integer *,
+ complex *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int csytrs_(char *, integer *, integer *, complex
+ *, integer *, integer *, complex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSYSVX uses the diagonal pivoting factorization to compute the */
+/* solution to a complex system of linear equations A * X = B, */
+/* where A is an N-by-N symmetric matrix and X and B are N-by-NRHS */
+/* matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'N', the diagonal pivoting method is used to factor A. */
+/* The form of the factorization is */
+/* A = U * D * U**T, if UPLO = 'U', or */
+/* A = L * D * L**T, if UPLO = 'L', */
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, and D is symmetric and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* 2. If some D(i,i)=0, so that D is exactly singular, then the routine */
+/* returns with INFO = i. Otherwise, the factored form of A is used */
+/* to estimate the condition number of the matrix A. If the */
+/* reciprocal of the condition number is less than machine precision, */
+/* INFO = N+1 is returned as a warning, but the routine still goes on */
+/* to solve for X and compute error bounds as described below. */
+
+/* 3. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 4. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of A has been */
+/* supplied on entry. */
+/* = 'F': On entry, AF and IPIV contain the factored form */
+/* of A. A, AF and IPIV will not be modified. */
+/* = 'N': The matrix A will be copied to AF and factored. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The symmetric matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of A contains the upper triangular part */
+/* of the matrix A, and the strictly lower triangular part of A */
+/* is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of A contains the lower triangular part of */
+/* the matrix A, and the strictly upper triangular part of A is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input or output) COMPLEX array, dimension (LDAF,N) */
+/* If FACT = 'F', then AF is an input argument and on entry */
+/* contains the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**T or A = L*D*L**T as computed by CSYTRF. */
+
+/* If FACT = 'N', then AF is an output argument and on exit */
+/* returns the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**T or A = L*D*L**T. */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* IPIV (input or output) INTEGER array, dimension (N) */
+/* If FACT = 'F', then IPIV is an input argument and on entry */
+/* contains details of the interchanges and the block structure */
+/* of D, as determined by CSYTRF. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* If FACT = 'N', then IPIV is an output argument and on exit */
+/* contains details of the interchanges and the block structure */
+/* of D, as determined by CSYTRF. */
+
+/* B (input) COMPLEX array, dimension (LDB,NRHS) */
+/* The N-by-NRHS right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) COMPLEX array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) REAL */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A. If RCOND is less than the machine precision (in */
+/* particular, if RCOND = 0), the matrix is singular to working */
+/* precision. This condition is indicated by a return code of */
+/* INFO > 0. */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of WORK. LWORK >= max(1,2*N), and for best */
+/* performance, when FACT = 'N', LWORK >= max(1,2*N,N*NB), where */
+/* NB is the optimal blocksize for CSYTRF. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: D(i,i) is exactly zero. The factorization */
+/* has been completed but the factor D is exactly */
+/* singular, so the solution and error bounds could */
+/* not be computed. RCOND = 0 is returned. */
+/* = N+1: D is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ lquery = *lwork == -1;
+ if (! nofact && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! lsame_(uplo, "U") && ! lsame_(uplo,
+ "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -8;
+ } else if (*ldb < max(1,*n)) {
+ *info = -11;
+ } else if (*ldx < max(1,*n)) {
+ *info = -13;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n << 1;
+ if (*lwork < max(i__1,i__2) && ! lquery) {
+ *info = -18;
+ }
+ }
+
+ if (*info == 0) {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n << 1;
+ lwkopt = max(i__1,i__2);
+ if (nofact) {
+ nb = ilaenv_(&c__1, "CSYTRF", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = *n * nb;
+ lwkopt = max(i__1,i__2);
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CSYSVX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+ if (nofact) {
+
+/* Compute the factorization A = U*D*U' or A = L*D*L'. */
+
+ clacpy_(uplo, n, n, &a[a_offset], lda, &af[af_offset], ldaf);
+ csytrf_(uplo, n, &af[af_offset], ldaf, &ipiv[1], &work[1], lwork,
+ info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.f;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = clansy_("I", uplo, n, &a[a_offset], lda, &rwork[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ csycon_(uplo, n, &af[af_offset], ldaf, &ipiv[1], &anorm, rcond, &work[1],
+ info);
+
+/* Compute the solution vectors X. */
+
+ clacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ csytrs_(uplo, n, nrhs, &af[af_offset], ldaf, &ipiv[1], &x[x_offset], ldx,
+ info);
+
+/* Use iterative refinement to improve the computed solutions and */
+/* compute error bounds and backward error estimates for them. */
+
+ csyrfs_(uplo, n, nrhs, &a[a_offset], lda, &af[af_offset], ldaf, &ipiv[1],
+ &b[b_offset], ldb, &x[x_offset], ldx, &ferr[1], &berr[1], &work[1]
+, &rwork[1], info);
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < slamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CSYSVX */
+
+} /* csysvx_ */
diff --git a/contrib/libs/clapack/csytf2.c b/contrib/libs/clapack/csytf2.c
new file mode 100644
index 0000000000..5d00495bb0
--- /dev/null
+++ b/contrib/libs/clapack/csytf2.c
@@ -0,0 +1,727 @@
+/* csytf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int csytf2_(char *uplo, integer *n, complex *a, integer *lda,
+ integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ real r__1, r__2, r__3, r__4;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_imag(complex *);
+ void c_div(complex *, complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ complex t, r1, d11, d12, d21, d22;
+ integer kk, kp;
+ complex wk, wkm1, wkp1;
+ integer imax, jmax;
+ extern /* Subroutine */ int csyr_(char *, integer *, complex *, complex *,
+ integer *, complex *, integer *);
+ real alpha;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ integer kstep;
+ logical upper;
+ real absakk;
+ extern integer icamax_(integer *, complex *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real colmax;
+ extern logical sisnan_(real *);
+ real rowmax;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSYTF2 computes the factorization of a complex symmetric matrix A */
+/* using the Bunch-Kaufman diagonal pivoting method: */
+
+/* A = U*D*U' or A = L*D*L' */
+
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, U' is the transpose of U, and D is symmetric and */
+/* block diagonal with 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* This is the unblocked version of the algorithm, calling Level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n-by-n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n-by-n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L (see below for further details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, D(k,k) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, and division by zero will occur if it */
+/* is used to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* 09-29-06 - patch from */
+/* Bobby Cheng, MathWorks */
+
+/* Replace l.209 and l.377 */
+/* IF( MAX( ABSAKK, COLMAX ).EQ.ZERO ) THEN */
+/* by */
+/* IF( (MAX( ABSAKK, COLMAX ).EQ.ZERO) .OR. SISNAN(ABSAKK) ) THEN */
+
+/* 1-96 - Based on modifications by J. Lewis, Boeing Computer Services */
+/* Company */
+
+/* If UPLO = 'U', then A = U*D*U', where */
+/* U = P(n)*U(n)* ... *P(k)U(k)* ..., */
+/* i.e., U is a product of terms P(k)*U(k), where k decreases from n to */
+/* 1 in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and U(k) is a unit upper triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I v 0 ) k-s */
+/* U(k) = ( 0 I 0 ) s */
+/* ( 0 0 I ) n-k */
+/* k-s s n-k */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(1:k-1,k). */
+/* If s = 2, the upper triangle of D(k) overwrites A(k-1,k-1), A(k-1,k), */
+/* and A(k,k), and v overwrites A(1:k-2,k-1:k). */
+
+/* If UPLO = 'L', then A = L*D*L', where */
+/* L = P(1)*L(1)* ... *P(k)*L(k)* ..., */
+/* i.e., L is a product of terms P(k)*L(k), where k increases from 1 to */
+/* n in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and L(k) is a unit lower triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I 0 0 ) k-1 */
+/* L(k) = ( 0 I 0 ) s */
+/* ( 0 v I ) n-k-s+1 */
+/* k-1 s n-k-s+1 */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(k+1:n,k). */
+/* If s = 2, the lower triangle of D(k) overwrites A(k,k), A(k+1,k), */
+/* and A(k+1,k+1), and v overwrites A(k+2:n,k:k+1). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CSYTF2", &i__1);
+ return 0;
+ }
+
+/* Initialize ALPHA for use in choosing pivot block size. */
+
+ alpha = (sqrt(17.f) + 1.f) / 8.f;
+
+ if (upper) {
+
+/* Factorize A as U*D*U' using the upper triangle of A */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2 */
+
+ k = *n;
+L10:
+
+/* If K < 1, exit from loop */
+
+ if (k < 1) {
+ goto L70;
+ }
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ i__1 = k + k * a_dim1;
+ absakk = (r__1 = a[i__1].r, dabs(r__1)) + (r__2 = r_imag(&a[k + k *
+ a_dim1]), dabs(r__2));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ imax = icamax_(&i__1, &a[k * a_dim1 + 1], &c__1);
+ i__1 = imax + k * a_dim1;
+ colmax = (r__1 = a[i__1].r, dabs(r__1)) + (r__2 = r_imag(&a[imax
+ + k * a_dim1]), dabs(r__2));
+ } else {
+ colmax = 0.f;
+ }
+
+ if (dmax(absakk,colmax) == 0.f || sisnan_(&absakk)) {
+
+/* Column K is zero or NaN: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ i__1 = k - imax;
+ jmax = imax + icamax_(&i__1, &a[imax + (imax + 1) * a_dim1],
+ lda);
+ i__1 = imax + jmax * a_dim1;
+ rowmax = (r__1 = a[i__1].r, dabs(r__1)) + (r__2 = r_imag(&a[
+ imax + jmax * a_dim1]), dabs(r__2));
+ if (imax > 1) {
+ i__1 = imax - 1;
+ jmax = icamax_(&i__1, &a[imax * a_dim1 + 1], &c__1);
+/* Computing MAX */
+ i__1 = jmax + imax * a_dim1;
+ r__3 = rowmax, r__4 = (r__1 = a[i__1].r, dabs(r__1)) + (
+ r__2 = r_imag(&a[jmax + imax * a_dim1]), dabs(
+ r__2));
+ rowmax = dmax(r__3,r__4);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else /* if(complicated condition) */ {
+ i__1 = imax + imax * a_dim1;
+ if ((r__1 = a[i__1].r, dabs(r__1)) + (r__2 = r_imag(&a[
+ imax + imax * a_dim1]), dabs(r__2)) >= alpha *
+ rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+ } else {
+
+/* interchange rows and columns K-1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+ }
+
+ kk = k - kstep + 1;
+ if (kp != kk) {
+
+/* Interchange rows and columns KK and KP in the leading */
+/* submatrix A(1:k,1:k) */
+
+ i__1 = kp - 1;
+ cswap_(&i__1, &a[kk * a_dim1 + 1], &c__1, &a[kp * a_dim1 + 1],
+ &c__1);
+ i__1 = kk - kp - 1;
+ cswap_(&i__1, &a[kp + 1 + kk * a_dim1], &c__1, &a[kp + (kp +
+ 1) * a_dim1], lda);
+ i__1 = kk + kk * a_dim1;
+ t.r = a[i__1].r, t.i = a[i__1].i;
+ i__1 = kk + kk * a_dim1;
+ i__2 = kp + kp * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + kp * a_dim1;
+ a[i__1].r = t.r, a[i__1].i = t.i;
+ if (kstep == 2) {
+ i__1 = k - 1 + k * a_dim1;
+ t.r = a[i__1].r, t.i = a[i__1].i;
+ i__1 = k - 1 + k * a_dim1;
+ i__2 = kp + k * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + k * a_dim1;
+ a[i__1].r = t.r, a[i__1].i = t.i;
+ }
+ }
+
+/* Update the leading submatrix */
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k now holds */
+
+/* W(k) = U(k)*D(k) */
+
+/* where U(k) is the k-th column of U */
+
+/* Perform a rank-1 update of A(1:k-1,1:k-1) as */
+
+/* A := A - U(k)*D(k)*U(k)' = A - W(k)*1/D(k)*W(k)' */
+
+ c_div(&q__1, &c_b1, &a[k + k * a_dim1]);
+ r1.r = q__1.r, r1.i = q__1.i;
+ i__1 = k - 1;
+ q__1.r = -r1.r, q__1.i = -r1.i;
+ csyr_(uplo, &i__1, &q__1, &a[k * a_dim1 + 1], &c__1, &a[
+ a_offset], lda);
+
+/* Store U(k) in column k */
+
+ i__1 = k - 1;
+ cscal_(&i__1, &r1, &a[k * a_dim1 + 1], &c__1);
+ } else {
+
+/* 2-by-2 pivot block D(k): columns k and k-1 now hold */
+
+/* ( W(k-1) W(k) ) = ( U(k-1) U(k) )*D(k) */
+
+/* where U(k) and U(k-1) are the k-th and (k-1)-th columns */
+/* of U */
+
+/* Perform a rank-2 update of A(1:k-2,1:k-2) as */
+
+/* A := A - ( U(k-1) U(k) )*D(k)*( U(k-1) U(k) )' */
+/* = A - ( W(k-1) W(k) )*inv(D(k))*( W(k-1) W(k) )' */
+
+ if (k > 2) {
+
+ i__1 = k - 1 + k * a_dim1;
+ d12.r = a[i__1].r, d12.i = a[i__1].i;
+ c_div(&q__1, &a[k - 1 + (k - 1) * a_dim1], &d12);
+ d22.r = q__1.r, d22.i = q__1.i;
+ c_div(&q__1, &a[k + k * a_dim1], &d12);
+ d11.r = q__1.r, d11.i = q__1.i;
+ q__3.r = d11.r * d22.r - d11.i * d22.i, q__3.i = d11.r *
+ d22.i + d11.i * d22.r;
+ q__2.r = q__3.r - 1.f, q__2.i = q__3.i - 0.f;
+ c_div(&q__1, &c_b1, &q__2);
+ t.r = q__1.r, t.i = q__1.i;
+ c_div(&q__1, &t, &d12);
+ d12.r = q__1.r, d12.i = q__1.i;
+
+ for (j = k - 2; j >= 1; --j) {
+ i__1 = j + (k - 1) * a_dim1;
+ q__3.r = d11.r * a[i__1].r - d11.i * a[i__1].i,
+ q__3.i = d11.r * a[i__1].i + d11.i * a[i__1]
+ .r;
+ i__2 = j + k * a_dim1;
+ q__2.r = q__3.r - a[i__2].r, q__2.i = q__3.i - a[i__2]
+ .i;
+ q__1.r = d12.r * q__2.r - d12.i * q__2.i, q__1.i =
+ d12.r * q__2.i + d12.i * q__2.r;
+ wkm1.r = q__1.r, wkm1.i = q__1.i;
+ i__1 = j + k * a_dim1;
+ q__3.r = d22.r * a[i__1].r - d22.i * a[i__1].i,
+ q__3.i = d22.r * a[i__1].i + d22.i * a[i__1]
+ .r;
+ i__2 = j + (k - 1) * a_dim1;
+ q__2.r = q__3.r - a[i__2].r, q__2.i = q__3.i - a[i__2]
+ .i;
+ q__1.r = d12.r * q__2.r - d12.i * q__2.i, q__1.i =
+ d12.r * q__2.i + d12.i * q__2.r;
+ wk.r = q__1.r, wk.i = q__1.i;
+ for (i__ = j; i__ >= 1; --i__) {
+ i__1 = i__ + j * a_dim1;
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__ + k * a_dim1;
+ q__3.r = a[i__3].r * wk.r - a[i__3].i * wk.i,
+ q__3.i = a[i__3].r * wk.i + a[i__3].i *
+ wk.r;
+ q__2.r = a[i__2].r - q__3.r, q__2.i = a[i__2].i -
+ q__3.i;
+ i__4 = i__ + (k - 1) * a_dim1;
+ q__4.r = a[i__4].r * wkm1.r - a[i__4].i * wkm1.i,
+ q__4.i = a[i__4].r * wkm1.i + a[i__4].i *
+ wkm1.r;
+ q__1.r = q__2.r - q__4.r, q__1.i = q__2.i -
+ q__4.i;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+/* L20: */
+ }
+ i__1 = j + k * a_dim1;
+ a[i__1].r = wk.r, a[i__1].i = wk.i;
+ i__1 = j + (k - 1) * a_dim1;
+ a[i__1].r = wkm1.r, a[i__1].i = wkm1.i;
+/* L30: */
+ }
+
+ }
+
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k - 1] = -kp;
+ }
+
+/* Decrease K and return to the start of the main loop */
+
+ k -= kstep;
+ goto L10;
+
+ } else {
+
+/* Factorize A as L*D*L' using the lower triangle of A */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2 */
+
+ k = 1;
+L40:
+
+/* If K > N, exit from loop */
+
+ if (k > *n) {
+ goto L70;
+ }
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ i__1 = k + k * a_dim1;
+ absakk = (r__1 = a[i__1].r, dabs(r__1)) + (r__2 = r_imag(&a[k + k *
+ a_dim1]), dabs(r__2));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ imax = k + icamax_(&i__1, &a[k + 1 + k * a_dim1], &c__1);
+ i__1 = imax + k * a_dim1;
+ colmax = (r__1 = a[i__1].r, dabs(r__1)) + (r__2 = r_imag(&a[imax
+ + k * a_dim1]), dabs(r__2));
+ } else {
+ colmax = 0.f;
+ }
+
+ if (dmax(absakk,colmax) == 0.f || sisnan_(&absakk)) {
+
+/* Column K is zero or NaN: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ i__1 = imax - k;
+ jmax = k - 1 + icamax_(&i__1, &a[imax + k * a_dim1], lda);
+ i__1 = imax + jmax * a_dim1;
+ rowmax = (r__1 = a[i__1].r, dabs(r__1)) + (r__2 = r_imag(&a[
+ imax + jmax * a_dim1]), dabs(r__2));
+ if (imax < *n) {
+ i__1 = *n - imax;
+ jmax = imax + icamax_(&i__1, &a[imax + 1 + imax * a_dim1],
+ &c__1);
+/* Computing MAX */
+ i__1 = jmax + imax * a_dim1;
+ r__3 = rowmax, r__4 = (r__1 = a[i__1].r, dabs(r__1)) + (
+ r__2 = r_imag(&a[jmax + imax * a_dim1]), dabs(
+ r__2));
+ rowmax = dmax(r__3,r__4);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else /* if(complicated condition) */ {
+ i__1 = imax + imax * a_dim1;
+ if ((r__1 = a[i__1].r, dabs(r__1)) + (r__2 = r_imag(&a[
+ imax + imax * a_dim1]), dabs(r__2)) >= alpha *
+ rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+ } else {
+
+/* interchange rows and columns K+1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+ }
+
+ kk = k + kstep - 1;
+ if (kp != kk) {
+
+/* Interchange rows and columns KK and KP in the trailing */
+/* submatrix A(k:n,k:n) */
+
+ if (kp < *n) {
+ i__1 = *n - kp;
+ cswap_(&i__1, &a[kp + 1 + kk * a_dim1], &c__1, &a[kp + 1
+ + kp * a_dim1], &c__1);
+ }
+ i__1 = kp - kk - 1;
+ cswap_(&i__1, &a[kk + 1 + kk * a_dim1], &c__1, &a[kp + (kk +
+ 1) * a_dim1], lda);
+ i__1 = kk + kk * a_dim1;
+ t.r = a[i__1].r, t.i = a[i__1].i;
+ i__1 = kk + kk * a_dim1;
+ i__2 = kp + kp * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + kp * a_dim1;
+ a[i__1].r = t.r, a[i__1].i = t.i;
+ if (kstep == 2) {
+ i__1 = k + 1 + k * a_dim1;
+ t.r = a[i__1].r, t.i = a[i__1].i;
+ i__1 = k + 1 + k * a_dim1;
+ i__2 = kp + k * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + k * a_dim1;
+ a[i__1].r = t.r, a[i__1].i = t.i;
+ }
+ }
+
+/* Update the trailing submatrix */
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k now holds */
+
+/* W(k) = L(k)*D(k) */
+
+/* where L(k) is the k-th column of L */
+
+ if (k < *n) {
+
+/* Perform a rank-1 update of A(k+1:n,k+1:n) as */
+
+/* A := A - L(k)*D(k)*L(k)' = A - W(k)*(1/D(k))*W(k)' */
+
+ c_div(&q__1, &c_b1, &a[k + k * a_dim1]);
+ r1.r = q__1.r, r1.i = q__1.i;
+ i__1 = *n - k;
+ q__1.r = -r1.r, q__1.i = -r1.i;
+ csyr_(uplo, &i__1, &q__1, &a[k + 1 + k * a_dim1], &c__1, &
+ a[k + 1 + (k + 1) * a_dim1], lda);
+
+/* Store L(k) in column K */
+
+ i__1 = *n - k;
+ cscal_(&i__1, &r1, &a[k + 1 + k * a_dim1], &c__1);
+ }
+ } else {
+
+/* 2-by-2 pivot block D(k) */
+
+ if (k < *n - 1) {
+
+/* Perform a rank-2 update of A(k+2:n,k+2:n) as */
+
+/* A := A - ( L(k) L(k+1) )*D(k)*( L(k) L(k+1) )' */
+/* = A - ( W(k) W(k+1) )*inv(D(k))*( W(k) W(k+1) )' */
+
+/* where L(k) and L(k+1) are the k-th and (k+1)-th */
+/* columns of L */
+
+ i__1 = k + 1 + k * a_dim1;
+ d21.r = a[i__1].r, d21.i = a[i__1].i;
+ c_div(&q__1, &a[k + 1 + (k + 1) * a_dim1], &d21);
+ d11.r = q__1.r, d11.i = q__1.i;
+ c_div(&q__1, &a[k + k * a_dim1], &d21);
+ d22.r = q__1.r, d22.i = q__1.i;
+ q__3.r = d11.r * d22.r - d11.i * d22.i, q__3.i = d11.r *
+ d22.i + d11.i * d22.r;
+ q__2.r = q__3.r - 1.f, q__2.i = q__3.i - 0.f;
+ c_div(&q__1, &c_b1, &q__2);
+ t.r = q__1.r, t.i = q__1.i;
+ c_div(&q__1, &t, &d21);
+ d21.r = q__1.r, d21.i = q__1.i;
+
+ i__1 = *n;
+ for (j = k + 2; j <= i__1; ++j) {
+ i__2 = j + k * a_dim1;
+ q__3.r = d11.r * a[i__2].r - d11.i * a[i__2].i,
+ q__3.i = d11.r * a[i__2].i + d11.i * a[i__2]
+ .r;
+ i__3 = j + (k + 1) * a_dim1;
+ q__2.r = q__3.r - a[i__3].r, q__2.i = q__3.i - a[i__3]
+ .i;
+ q__1.r = d21.r * q__2.r - d21.i * q__2.i, q__1.i =
+ d21.r * q__2.i + d21.i * q__2.r;
+ wk.r = q__1.r, wk.i = q__1.i;
+ i__2 = j + (k + 1) * a_dim1;
+ q__3.r = d22.r * a[i__2].r - d22.i * a[i__2].i,
+ q__3.i = d22.r * a[i__2].i + d22.i * a[i__2]
+ .r;
+ i__3 = j + k * a_dim1;
+ q__2.r = q__3.r - a[i__3].r, q__2.i = q__3.i - a[i__3]
+ .i;
+ q__1.r = d21.r * q__2.r - d21.i * q__2.i, q__1.i =
+ d21.r * q__2.i + d21.i * q__2.r;
+ wkp1.r = q__1.r, wkp1.i = q__1.i;
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__ + k * a_dim1;
+ q__3.r = a[i__5].r * wk.r - a[i__5].i * wk.i,
+ q__3.i = a[i__5].r * wk.i + a[i__5].i *
+ wk.r;
+ q__2.r = a[i__4].r - q__3.r, q__2.i = a[i__4].i -
+ q__3.i;
+ i__6 = i__ + (k + 1) * a_dim1;
+ q__4.r = a[i__6].r * wkp1.r - a[i__6].i * wkp1.i,
+ q__4.i = a[i__6].r * wkp1.i + a[i__6].i *
+ wkp1.r;
+ q__1.r = q__2.r - q__4.r, q__1.i = q__2.i -
+ q__4.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L50: */
+ }
+ i__2 = j + k * a_dim1;
+ a[i__2].r = wk.r, a[i__2].i = wk.i;
+ i__2 = j + (k + 1) * a_dim1;
+ a[i__2].r = wkp1.r, a[i__2].i = wkp1.i;
+/* L60: */
+ }
+ }
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k + 1] = -kp;
+ }
+
+/* Increase K and return to the start of the main loop */
+
+ k += kstep;
+ goto L40;
+
+ }
+
+L70:
+ return 0;
+
+/* End of CSYTF2 */
+
+} /* csytf2_ */
diff --git a/contrib/libs/clapack/csytrf.c b/contrib/libs/clapack/csytrf.c
new file mode 100644
index 0000000000..c8d2b533b9
--- /dev/null
+++ b/contrib/libs/clapack/csytrf.c
@@ -0,0 +1,340 @@
+/* csytrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+
+/* Subroutine */ int csytrf_(char *uplo, integer *n, complex *a, integer *lda,
+ integer *ipiv, complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer j, k, kb, nb, iws;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ logical upper;
+ extern /* Subroutine */ int csytf2_(char *, integer *, complex *, integer
+ *, integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int clasyf_(char *, integer *, integer *, integer
+ *, complex *, integer *, integer *, complex *, integer *, integer
+ *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSYTRF computes the factorization of a complex symmetric matrix A */
+/* using the Bunch-Kaufman diagonal pivoting method. The form of the */
+/* factorization is */
+
+/* A = U*D*U**T or A = L*D*L**T */
+
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, and D is symmetric and block diagonal with */
+/* with 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* This is the blocked version of the algorithm, calling Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L (see below for further details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of WORK. LWORK >=1. For best performance */
+/* LWORK >= N*NB, where NB is the block size returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, and division by zero will occur if it */
+/* is used to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* If UPLO = 'U', then A = U*D*U', where */
+/* U = P(n)*U(n)* ... *P(k)U(k)* ..., */
+/* i.e., U is a product of terms P(k)*U(k), where k decreases from n to */
+/* 1 in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and U(k) is a unit upper triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I v 0 ) k-s */
+/* U(k) = ( 0 I 0 ) s */
+/* ( 0 0 I ) n-k */
+/* k-s s n-k */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(1:k-1,k). */
+/* If s = 2, the upper triangle of D(k) overwrites A(k-1,k-1), A(k-1,k), */
+/* and A(k,k), and v overwrites A(1:k-2,k-1:k). */
+
+/* If UPLO = 'L', then A = L*D*L', where */
+/* L = P(1)*L(1)* ... *P(k)*L(k)* ..., */
+/* i.e., L is a product of terms P(k)*L(k), where k increases from 1 to */
+/* n in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and L(k) is a unit lower triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I 0 0 ) k-1 */
+/* L(k) = ( 0 I 0 ) s */
+/* ( 0 v I ) n-k-s+1 */
+/* k-1 s n-k-s+1 */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(k+1:n,k). */
+/* If s = 2, the lower triangle of D(k) overwrites A(k,k), A(k+1,k), */
+/* and A(k+1,k+1), and v overwrites A(k+2:n,k:k+1). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*lwork < 1 && ! lquery) {
+ *info = -7;
+ }
+
+ if (*info == 0) {
+
+/* Determine the block size */
+
+ nb = ilaenv_(&c__1, "CSYTRF", uplo, n, &c_n1, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CSYTRF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = *n;
+ if (nb > 1 && nb < *n) {
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+/* Computing MAX */
+ i__1 = *lwork / ldwork;
+ nb = max(i__1,1);
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CSYTRF", uplo, n, &c_n1, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = 1;
+ }
+ if (nb < nbmin) {
+ nb = *n;
+ }
+
+ if (upper) {
+
+/* Factorize A as U*D*U' using the upper triangle of A */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* KB, where KB is the number of columns factorized by CLASYF; */
+/* KB is either NB or NB-1, or K for the last block */
+
+ k = *n;
+L10:
+
+/* If K < 1, exit from loop */
+
+ if (k < 1) {
+ goto L40;
+ }
+
+ if (k > nb) {
+
+/* Factorize columns k-kb+1:k of A and use blocked code to */
+/* update columns 1:k-kb */
+
+ clasyf_(uplo, &k, &nb, &kb, &a[a_offset], lda, &ipiv[1], &work[1],
+ n, &iinfo);
+ } else {
+
+/* Use unblocked code to factorize columns 1:k of A */
+
+ csytf2_(uplo, &k, &a[a_offset], lda, &ipiv[1], &iinfo);
+ kb = k;
+ }
+
+/* Set INFO on the first occurrence of a zero pivot */
+
+ if (*info == 0 && iinfo > 0) {
+ *info = iinfo;
+ }
+
+/* Decrease K and return to the start of the main loop */
+
+ k -= kb;
+ goto L10;
+
+ } else {
+
+/* Factorize A as L*D*L' using the lower triangle of A */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* KB, where KB is the number of columns factorized by CLASYF; */
+/* KB is either NB or NB-1, or N-K+1 for the last block */
+
+ k = 1;
+L20:
+
+/* If K > N, exit from loop */
+
+ if (k > *n) {
+ goto L40;
+ }
+
+ if (k <= *n - nb) {
+
+/* Factorize columns k:k+kb-1 of A and use blocked code to */
+/* update columns k+kb:n */
+
+ i__1 = *n - k + 1;
+ clasyf_(uplo, &i__1, &nb, &kb, &a[k + k * a_dim1], lda, &ipiv[k],
+ &work[1], n, &iinfo);
+ } else {
+
+/* Use unblocked code to factorize columns k:n of A */
+
+ i__1 = *n - k + 1;
+ csytf2_(uplo, &i__1, &a[k + k * a_dim1], lda, &ipiv[k], &iinfo);
+ kb = *n - k + 1;
+ }
+
+/* Set INFO on the first occurrence of a zero pivot */
+
+ if (*info == 0 && iinfo > 0) {
+ *info = iinfo + k - 1;
+ }
+
+/* Adjust IPIV */
+
+ i__1 = k + kb - 1;
+ for (j = k; j <= i__1; ++j) {
+ if (ipiv[j] > 0) {
+ ipiv[j] = ipiv[j] + k - 1;
+ } else {
+ ipiv[j] = ipiv[j] - k + 1;
+ }
+/* L30: */
+ }
+
+/* Increase K and return to the start of the main loop */
+
+ k += kb;
+ goto L20;
+
+ }
+
+L40:
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ return 0;
+
+/* End of CSYTRF */
+
+} /* csytrf_ */
diff --git a/contrib/libs/clapack/csytri.c b/contrib/libs/clapack/csytri.c
new file mode 100644
index 0000000000..a35945d853
--- /dev/null
+++ b/contrib/libs/clapack/csytri.c
@@ -0,0 +1,489 @@
+/* csytri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static complex c_b2 = {0.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int csytri_(char *uplo, integer *n, complex *a, integer *lda,
+ integer *ipiv, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void c_div(complex *, complex *, complex *);
+
+ /* Local variables */
+ complex d__;
+ integer k;
+ complex t, ak;
+ integer kp;
+ complex akp1, temp, akkp1;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *);
+ extern /* Complex */ VOID cdotu_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ integer kstep;
+ logical upper;
+ extern /* Subroutine */ int csymv_(char *, integer *, complex *, complex *
+, integer *, complex *, integer *, complex *, complex *, integer *
+), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSYTRI computes the inverse of a complex symmetric indefinite matrix */
+/* A using the factorization A = U*D*U**T or A = L*D*L**T computed by */
+/* CSYTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**T; */
+/* = 'L': Lower triangular, form is A = L*D*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the block diagonal matrix D and the multipliers */
+/* used to obtain the factor U or L as computed by CSYTRF. */
+
+/* On exit, if INFO = 0, the (symmetric) inverse of the original */
+/* matrix. If UPLO = 'U', the upper triangular part of the */
+/* inverse is formed and the part of A below the diagonal is not */
+/* referenced; if UPLO = 'L' the lower triangular part of the */
+/* inverse is formed and the part of A above the diagonal is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by CSYTRF. */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) = 0; the matrix is singular and its */
+/* inverse could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CSYTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check that the diagonal matrix D is nonsingular. */
+
+ if (upper) {
+
+/* Upper triangular storage: examine D from bottom to top */
+
+ for (*info = *n; *info >= 1; --(*info)) {
+ i__1 = *info + *info * a_dim1;
+ if (ipiv[*info] > 0 && (a[i__1].r == 0.f && a[i__1].i == 0.f)) {
+ return 0;
+ }
+/* L10: */
+ }
+ } else {
+
+/* Lower triangular storage: examine D from top to bottom. */
+
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ i__2 = *info + *info * a_dim1;
+ if (ipiv[*info] > 0 && (a[i__2].r == 0.f && a[i__2].i == 0.f)) {
+ return 0;
+ }
+/* L20: */
+ }
+ }
+ *info = 0;
+
+ if (upper) {
+
+/* Compute inv(A) from the factorization A = U*D*U'. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+L30:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L40;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Invert the diagonal block. */
+
+ i__1 = k + k * a_dim1;
+ c_div(&q__1, &c_b1, &a[k + k * a_dim1]);
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+
+/* Compute column K of the inverse. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ ccopy_(&i__1, &a[k * a_dim1 + 1], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ csymv_(uplo, &i__1, &q__1, &a[a_offset], lda, &work[1], &c__1,
+ &c_b2, &a[k * a_dim1 + 1], &c__1);
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ i__3 = k - 1;
+ cdotu_(&q__2, &i__3, &work[1], &c__1, &a[k * a_dim1 + 1], &
+ c__1);
+ q__1.r = a[i__2].r - q__2.r, q__1.i = a[i__2].i - q__2.i;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ }
+ kstep = 1;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Invert the diagonal block. */
+
+ i__1 = k + (k + 1) * a_dim1;
+ t.r = a[i__1].r, t.i = a[i__1].i;
+ c_div(&q__1, &a[k + k * a_dim1], &t);
+ ak.r = q__1.r, ak.i = q__1.i;
+ c_div(&q__1, &a[k + 1 + (k + 1) * a_dim1], &t);
+ akp1.r = q__1.r, akp1.i = q__1.i;
+ c_div(&q__1, &a[k + (k + 1) * a_dim1], &t);
+ akkp1.r = q__1.r, akkp1.i = q__1.i;
+ q__3.r = ak.r * akp1.r - ak.i * akp1.i, q__3.i = ak.r * akp1.i +
+ ak.i * akp1.r;
+ q__2.r = q__3.r - 1.f, q__2.i = q__3.i - 0.f;
+ q__1.r = t.r * q__2.r - t.i * q__2.i, q__1.i = t.r * q__2.i + t.i
+ * q__2.r;
+ d__.r = q__1.r, d__.i = q__1.i;
+ i__1 = k + k * a_dim1;
+ c_div(&q__1, &akp1, &d__);
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ i__1 = k + 1 + (k + 1) * a_dim1;
+ c_div(&q__1, &ak, &d__);
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ i__1 = k + (k + 1) * a_dim1;
+ q__2.r = -akkp1.r, q__2.i = -akkp1.i;
+ c_div(&q__1, &q__2, &d__);
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+
+/* Compute columns K and K+1 of the inverse. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ ccopy_(&i__1, &a[k * a_dim1 + 1], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ csymv_(uplo, &i__1, &q__1, &a[a_offset], lda, &work[1], &c__1,
+ &c_b2, &a[k * a_dim1 + 1], &c__1);
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ i__3 = k - 1;
+ cdotu_(&q__2, &i__3, &work[1], &c__1, &a[k * a_dim1 + 1], &
+ c__1);
+ q__1.r = a[i__2].r - q__2.r, q__1.i = a[i__2].i - q__2.i;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ i__1 = k + (k + 1) * a_dim1;
+ i__2 = k + (k + 1) * a_dim1;
+ i__3 = k - 1;
+ cdotu_(&q__2, &i__3, &a[k * a_dim1 + 1], &c__1, &a[(k + 1) *
+ a_dim1 + 1], &c__1);
+ q__1.r = a[i__2].r - q__2.r, q__1.i = a[i__2].i - q__2.i;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ i__1 = k - 1;
+ ccopy_(&i__1, &a[(k + 1) * a_dim1 + 1], &c__1, &work[1], &
+ c__1);
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ csymv_(uplo, &i__1, &q__1, &a[a_offset], lda, &work[1], &c__1,
+ &c_b2, &a[(k + 1) * a_dim1 + 1], &c__1);
+ i__1 = k + 1 + (k + 1) * a_dim1;
+ i__2 = k + 1 + (k + 1) * a_dim1;
+ i__3 = k - 1;
+ cdotu_(&q__2, &i__3, &work[1], &c__1, &a[(k + 1) * a_dim1 + 1]
+, &c__1);
+ q__1.r = a[i__2].r - q__2.r, q__1.i = a[i__2].i - q__2.i;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ }
+ kstep = 2;
+ }
+
+ kp = (i__1 = ipiv[k], abs(i__1));
+ if (kp != k) {
+
+/* Interchange rows and columns K and KP in the leading */
+/* submatrix A(1:k+1,1:k+1) */
+
+ i__1 = kp - 1;
+ cswap_(&i__1, &a[k * a_dim1 + 1], &c__1, &a[kp * a_dim1 + 1], &
+ c__1);
+ i__1 = k - kp - 1;
+ cswap_(&i__1, &a[kp + 1 + k * a_dim1], &c__1, &a[kp + (kp + 1) *
+ a_dim1], lda);
+ i__1 = k + k * a_dim1;
+ temp.r = a[i__1].r, temp.i = a[i__1].i;
+ i__1 = k + k * a_dim1;
+ i__2 = kp + kp * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + kp * a_dim1;
+ a[i__1].r = temp.r, a[i__1].i = temp.i;
+ if (kstep == 2) {
+ i__1 = k + (k + 1) * a_dim1;
+ temp.r = a[i__1].r, temp.i = a[i__1].i;
+ i__1 = k + (k + 1) * a_dim1;
+ i__2 = kp + (k + 1) * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + (k + 1) * a_dim1;
+ a[i__1].r = temp.r, a[i__1].i = temp.i;
+ }
+ }
+
+ k += kstep;
+ goto L30;
+L40:
+
+ ;
+ } else {
+
+/* Compute inv(A) from the factorization A = L*D*L'. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+L50:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L60;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Invert the diagonal block. */
+
+ i__1 = k + k * a_dim1;
+ c_div(&q__1, &c_b1, &a[k + k * a_dim1]);
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+
+/* Compute column K of the inverse. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ ccopy_(&i__1, &a[k + 1 + k * a_dim1], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ csymv_(uplo, &i__1, &q__1, &a[k + 1 + (k + 1) * a_dim1], lda,
+ &work[1], &c__1, &c_b2, &a[k + 1 + k * a_dim1], &c__1);
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ i__3 = *n - k;
+ cdotu_(&q__2, &i__3, &work[1], &c__1, &a[k + 1 + k * a_dim1],
+ &c__1);
+ q__1.r = a[i__2].r - q__2.r, q__1.i = a[i__2].i - q__2.i;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ }
+ kstep = 1;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Invert the diagonal block. */
+
+ i__1 = k + (k - 1) * a_dim1;
+ t.r = a[i__1].r, t.i = a[i__1].i;
+ c_div(&q__1, &a[k - 1 + (k - 1) * a_dim1], &t);
+ ak.r = q__1.r, ak.i = q__1.i;
+ c_div(&q__1, &a[k + k * a_dim1], &t);
+ akp1.r = q__1.r, akp1.i = q__1.i;
+ c_div(&q__1, &a[k + (k - 1) * a_dim1], &t);
+ akkp1.r = q__1.r, akkp1.i = q__1.i;
+ q__3.r = ak.r * akp1.r - ak.i * akp1.i, q__3.i = ak.r * akp1.i +
+ ak.i * akp1.r;
+ q__2.r = q__3.r - 1.f, q__2.i = q__3.i - 0.f;
+ q__1.r = t.r * q__2.r - t.i * q__2.i, q__1.i = t.r * q__2.i + t.i
+ * q__2.r;
+ d__.r = q__1.r, d__.i = q__1.i;
+ i__1 = k - 1 + (k - 1) * a_dim1;
+ c_div(&q__1, &akp1, &d__);
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ i__1 = k + k * a_dim1;
+ c_div(&q__1, &ak, &d__);
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ i__1 = k + (k - 1) * a_dim1;
+ q__2.r = -akkp1.r, q__2.i = -akkp1.i;
+ c_div(&q__1, &q__2, &d__);
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+
+/* Compute columns K-1 and K of the inverse. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ ccopy_(&i__1, &a[k + 1 + k * a_dim1], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ csymv_(uplo, &i__1, &q__1, &a[k + 1 + (k + 1) * a_dim1], lda,
+ &work[1], &c__1, &c_b2, &a[k + 1 + k * a_dim1], &c__1);
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ i__3 = *n - k;
+ cdotu_(&q__2, &i__3, &work[1], &c__1, &a[k + 1 + k * a_dim1],
+ &c__1);
+ q__1.r = a[i__2].r - q__2.r, q__1.i = a[i__2].i - q__2.i;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ i__1 = k + (k - 1) * a_dim1;
+ i__2 = k + (k - 1) * a_dim1;
+ i__3 = *n - k;
+ cdotu_(&q__2, &i__3, &a[k + 1 + k * a_dim1], &c__1, &a[k + 1
+ + (k - 1) * a_dim1], &c__1);
+ q__1.r = a[i__2].r - q__2.r, q__1.i = a[i__2].i - q__2.i;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ i__1 = *n - k;
+ ccopy_(&i__1, &a[k + 1 + (k - 1) * a_dim1], &c__1, &work[1], &
+ c__1);
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ csymv_(uplo, &i__1, &q__1, &a[k + 1 + (k + 1) * a_dim1], lda,
+ &work[1], &c__1, &c_b2, &a[k + 1 + (k - 1) * a_dim1],
+ &c__1);
+ i__1 = k - 1 + (k - 1) * a_dim1;
+ i__2 = k - 1 + (k - 1) * a_dim1;
+ i__3 = *n - k;
+ cdotu_(&q__2, &i__3, &work[1], &c__1, &a[k + 1 + (k - 1) *
+ a_dim1], &c__1);
+ q__1.r = a[i__2].r - q__2.r, q__1.i = a[i__2].i - q__2.i;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ }
+ kstep = 2;
+ }
+
+ kp = (i__1 = ipiv[k], abs(i__1));
+ if (kp != k) {
+
+/* Interchange rows and columns K and KP in the trailing */
+/* submatrix A(k-1:n,k-1:n) */
+
+ if (kp < *n) {
+ i__1 = *n - kp;
+ cswap_(&i__1, &a[kp + 1 + k * a_dim1], &c__1, &a[kp + 1 + kp *
+ a_dim1], &c__1);
+ }
+ i__1 = kp - k - 1;
+ cswap_(&i__1, &a[k + 1 + k * a_dim1], &c__1, &a[kp + (k + 1) *
+ a_dim1], lda);
+ i__1 = k + k * a_dim1;
+ temp.r = a[i__1].r, temp.i = a[i__1].i;
+ i__1 = k + k * a_dim1;
+ i__2 = kp + kp * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + kp * a_dim1;
+ a[i__1].r = temp.r, a[i__1].i = temp.i;
+ if (kstep == 2) {
+ i__1 = k + (k - 1) * a_dim1;
+ temp.r = a[i__1].r, temp.i = a[i__1].i;
+ i__1 = k + (k - 1) * a_dim1;
+ i__2 = kp + (k - 1) * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + (k - 1) * a_dim1;
+ a[i__1].r = temp.r, a[i__1].i = temp.i;
+ }
+ }
+
+ k -= kstep;
+ goto L50;
+L60:
+ ;
+ }
+
+ return 0;
+
+/* End of CSYTRI */
+
+} /* csytri_ */
diff --git a/contrib/libs/clapack/csytrs.c b/contrib/libs/clapack/csytrs.c
new file mode 100644
index 0000000000..50a2488c32
--- /dev/null
+++ b/contrib/libs/clapack/csytrs.c
@@ -0,0 +1,502 @@
+/* csytrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int csytrs_(char *uplo, integer *n, integer *nrhs, complex *
+ a, integer *lda, integer *ipiv, complex *b, integer *ldb, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void c_div(complex *, complex *, complex *);
+
+ /* Local variables */
+ integer j, k;
+ complex ak, bk;
+ integer kp;
+ complex akm1, bkm1, akm1k;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ complex denom;
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+, complex *, integer *, complex *, integer *, complex *, complex *
+, integer *), cgeru_(integer *, integer *, complex *,
+ complex *, integer *, complex *, integer *, complex *, integer *),
+ cswap_(integer *, complex *, integer *, complex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CSYTRS solves a system of linear equations A*X = B with a complex */
+/* symmetric matrix A using the factorization A = U*D*U**T or */
+/* A = L*D*L**T computed by CSYTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**T; */
+/* = 'L': Lower triangular, form is A = L*D*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The block diagonal matrix D and the multipliers used to */
+/* obtain the factor U or L as computed by CSYTRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by CSYTRF. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CSYTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Solve A*X = B, where A = U*D*U'. */
+
+/* First solve U*D*X = B, overwriting B with X. */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+L10:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L30;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(U(K)), where U(K) is the transformation */
+/* stored in column K of A. */
+
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__1, nrhs, &q__1, &a[k * a_dim1 + 1], &c__1, &b[k +
+ b_dim1], ldb, &b[b_dim1 + 1], ldb);
+
+/* Multiply by the inverse of the diagonal block. */
+
+ c_div(&q__1, &c_b1, &a[k + k * a_dim1]);
+ cscal_(nrhs, &q__1, &b[k + b_dim1], ldb);
+ --k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Interchange rows K-1 and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k - 1) {
+ cswap_(nrhs, &b[k - 1 + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(U(K)), where U(K) is the transformation */
+/* stored in columns K-1 and K of A. */
+
+ i__1 = k - 2;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__1, nrhs, &q__1, &a[k * a_dim1 + 1], &c__1, &b[k +
+ b_dim1], ldb, &b[b_dim1 + 1], ldb);
+ i__1 = k - 2;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__1, nrhs, &q__1, &a[(k - 1) * a_dim1 + 1], &c__1, &b[k
+ - 1 + b_dim1], ldb, &b[b_dim1 + 1], ldb);
+
+/* Multiply by the inverse of the diagonal block. */
+
+ i__1 = k - 1 + k * a_dim1;
+ akm1k.r = a[i__1].r, akm1k.i = a[i__1].i;
+ c_div(&q__1, &a[k - 1 + (k - 1) * a_dim1], &akm1k);
+ akm1.r = q__1.r, akm1.i = q__1.i;
+ c_div(&q__1, &a[k + k * a_dim1], &akm1k);
+ ak.r = q__1.r, ak.i = q__1.i;
+ q__2.r = akm1.r * ak.r - akm1.i * ak.i, q__2.i = akm1.r * ak.i +
+ akm1.i * ak.r;
+ q__1.r = q__2.r - 1.f, q__1.i = q__2.i - 0.f;
+ denom.r = q__1.r, denom.i = q__1.i;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ c_div(&q__1, &b[k - 1 + j * b_dim1], &akm1k);
+ bkm1.r = q__1.r, bkm1.i = q__1.i;
+ c_div(&q__1, &b[k + j * b_dim1], &akm1k);
+ bk.r = q__1.r, bk.i = q__1.i;
+ i__2 = k - 1 + j * b_dim1;
+ q__3.r = ak.r * bkm1.r - ak.i * bkm1.i, q__3.i = ak.r *
+ bkm1.i + ak.i * bkm1.r;
+ q__2.r = q__3.r - bk.r, q__2.i = q__3.i - bk.i;
+ c_div(&q__1, &q__2, &denom);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ i__2 = k + j * b_dim1;
+ q__3.r = akm1.r * bk.r - akm1.i * bk.i, q__3.i = akm1.r *
+ bk.i + akm1.i * bk.r;
+ q__2.r = q__3.r - bkm1.r, q__2.i = q__3.i - bkm1.i;
+ c_div(&q__1, &q__2, &denom);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L20: */
+ }
+ k += -2;
+ }
+
+ goto L10;
+L30:
+
+/* Next solve U'*X = B, overwriting B with X. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+L40:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L50;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Multiply by inv(U'(K)), where U(K) is the transformation */
+/* stored in column K of A. */
+
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Transpose", &i__1, nrhs, &q__1, &b[b_offset], ldb, &a[k *
+ a_dim1 + 1], &c__1, &c_b1, &b[k + b_dim1], ldb)
+ ;
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ ++k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Multiply by inv(U'(K+1)), where U(K+1) is the transformation */
+/* stored in columns K and K+1 of A. */
+
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Transpose", &i__1, nrhs, &q__1, &b[b_offset], ldb, &a[k *
+ a_dim1 + 1], &c__1, &c_b1, &b[k + b_dim1], ldb)
+ ;
+ i__1 = k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Transpose", &i__1, nrhs, &q__1, &b[b_offset], ldb, &a[(k
+ + 1) * a_dim1 + 1], &c__1, &c_b1, &b[k + 1 + b_dim1], ldb);
+
+/* Interchange rows K and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k) {
+ cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ k += 2;
+ }
+
+ goto L40;
+L50:
+
+ ;
+ } else {
+
+/* Solve A*X = B, where A = L*D*L'. */
+
+/* First solve L*D*X = B, overwriting B with X. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+L60:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L80;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(L(K)), where L(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__1, nrhs, &q__1, &a[k + 1 + k * a_dim1], &c__1, &b[
+ k + b_dim1], ldb, &b[k + 1 + b_dim1], ldb);
+ }
+
+/* Multiply by the inverse of the diagonal block. */
+
+ c_div(&q__1, &c_b1, &a[k + k * a_dim1]);
+ cscal_(nrhs, &q__1, &b[k + b_dim1], ldb);
+ ++k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Interchange rows K+1 and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k + 1) {
+ cswap_(nrhs, &b[k + 1 + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(L(K)), where L(K) is the transformation */
+/* stored in columns K and K+1 of A. */
+
+ if (k < *n - 1) {
+ i__1 = *n - k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__1, nrhs, &q__1, &a[k + 2 + k * a_dim1], &c__1, &b[
+ k + b_dim1], ldb, &b[k + 2 + b_dim1], ldb);
+ i__1 = *n - k - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__1, nrhs, &q__1, &a[k + 2 + (k + 1) * a_dim1], &
+ c__1, &b[k + 1 + b_dim1], ldb, &b[k + 2 + b_dim1],
+ ldb);
+ }
+
+/* Multiply by the inverse of the diagonal block. */
+
+ i__1 = k + 1 + k * a_dim1;
+ akm1k.r = a[i__1].r, akm1k.i = a[i__1].i;
+ c_div(&q__1, &a[k + k * a_dim1], &akm1k);
+ akm1.r = q__1.r, akm1.i = q__1.i;
+ c_div(&q__1, &a[k + 1 + (k + 1) * a_dim1], &akm1k);
+ ak.r = q__1.r, ak.i = q__1.i;
+ q__2.r = akm1.r * ak.r - akm1.i * ak.i, q__2.i = akm1.r * ak.i +
+ akm1.i * ak.r;
+ q__1.r = q__2.r - 1.f, q__1.i = q__2.i - 0.f;
+ denom.r = q__1.r, denom.i = q__1.i;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ c_div(&q__1, &b[k + j * b_dim1], &akm1k);
+ bkm1.r = q__1.r, bkm1.i = q__1.i;
+ c_div(&q__1, &b[k + 1 + j * b_dim1], &akm1k);
+ bk.r = q__1.r, bk.i = q__1.i;
+ i__2 = k + j * b_dim1;
+ q__3.r = ak.r * bkm1.r - ak.i * bkm1.i, q__3.i = ak.r *
+ bkm1.i + ak.i * bkm1.r;
+ q__2.r = q__3.r - bk.r, q__2.i = q__3.i - bk.i;
+ c_div(&q__1, &q__2, &denom);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ i__2 = k + 1 + j * b_dim1;
+ q__3.r = akm1.r * bk.r - akm1.i * bk.i, q__3.i = akm1.r *
+ bk.i + akm1.i * bk.r;
+ q__2.r = q__3.r - bkm1.r, q__2.i = q__3.i - bkm1.i;
+ c_div(&q__1, &q__2, &denom);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L70: */
+ }
+ k += 2;
+ }
+
+ goto L60;
+L80:
+
+/* Next solve L'*X = B, overwriting B with X. */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+L90:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L100;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Multiply by inv(L'(K)), where L(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Transpose", &i__1, nrhs, &q__1, &b[k + 1 + b_dim1],
+ ldb, &a[k + 1 + k * a_dim1], &c__1, &c_b1, &b[k +
+ b_dim1], ldb);
+ }
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ --k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Multiply by inv(L'(K-1)), where L(K-1) is the transformation */
+/* stored in columns K-1 and K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Transpose", &i__1, nrhs, &q__1, &b[k + 1 + b_dim1],
+ ldb, &a[k + 1 + k * a_dim1], &c__1, &c_b1, &b[k +
+ b_dim1], ldb);
+ i__1 = *n - k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Transpose", &i__1, nrhs, &q__1, &b[k + 1 + b_dim1],
+ ldb, &a[k + 1 + (k - 1) * a_dim1], &c__1, &c_b1, &b[k
+ - 1 + b_dim1], ldb);
+ }
+
+/* Interchange rows K and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k) {
+ cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ k += -2;
+ }
+
+ goto L90;
+L100:
+ ;
+ }
+
+ return 0;
+
+/* End of CSYTRS */
+
+} /* csytrs_ */
diff --git a/contrib/libs/clapack/ctbcon.c b/contrib/libs/clapack/ctbcon.c
new file mode 100644
index 0000000000..014bac54ef
--- /dev/null
+++ b/contrib/libs/clapack/ctbcon.c
@@ -0,0 +1,255 @@
+/* ctbcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ctbcon_(char *norm, char *uplo, char *diag, integer *n,
+ integer *kd, complex *ab, integer *ldab, real *rcond, complex *work,
+ real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer ix, kase, kase1;
+ real scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ real anorm;
+ logical upper;
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *);
+ real xnorm;
+ extern integer icamax_(integer *, complex *, integer *);
+ extern doublereal clantb_(char *, char *, char *, integer *, integer *,
+ complex *, integer *, real *), slamch_(
+ char *);
+ extern /* Subroutine */ int clatbs_(char *, char *, char *, char *,
+ integer *, integer *, complex *, integer *, complex *, real *,
+ real *, integer *), xerbla_(char *
+, integer *);
+ real ainvnm;
+ extern /* Subroutine */ int csrscl_(integer *, real *, complex *, integer
+ *);
+ logical onenrm;
+ char normin[1];
+ real smlnum;
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTBCON estimates the reciprocal of the condition number of a */
+/* triangular band matrix A, in either the 1-norm or the infinity-norm. */
+
+/* The norm of A is computed and an estimate is obtained for */
+/* norm(inv(A)), then the reciprocal of the condition number is */
+/* computed as */
+/* RCOND = 1 / ( norm(A) * norm(inv(A)) ). */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies whether the 1-norm condition number or the */
+/* infinity-norm condition number is required: */
+/* = '1' or 'O': 1-norm; */
+/* = 'I': Infinity-norm. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals or subdiagonals of the */
+/* triangular band matrix A. KD >= 0. */
+
+/* AB (input) COMPLEX array, dimension (LDAB,N) */
+/* The upper or lower triangular band matrix A, stored in the */
+/* first kd+1 rows of the array. The j-th column of A is stored */
+/* in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+/* If DIAG = 'U', the diagonal elements of A are not referenced */
+/* and are assumed to be 1. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(norm(A) * norm(inv(A))). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O");
+ nounit = lsame_(diag, "N");
+
+ if (! onenrm && ! lsame_(norm, "I")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*kd < 0) {
+ *info = -5;
+ } else if (*ldab < *kd + 1) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTBCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ }
+
+ *rcond = 0.f;
+ smlnum = slamch_("Safe minimum") * (real) max(*n,1);
+
+/* Compute the 1-norm of the triangular matrix A or A'. */
+
+ anorm = clantb_(norm, uplo, diag, n, kd, &ab[ab_offset], ldab, &rwork[1]);
+
+/* Continue only if ANORM > 0. */
+
+ if (anorm > 0.f) {
+
+/* Estimate the 1-norm of the inverse of A. */
+
+ ainvnm = 0.f;
+ *(unsigned char *)normin = 'N';
+ if (onenrm) {
+ kase1 = 1;
+ } else {
+ kase1 = 2;
+ }
+ kase = 0;
+L10:
+ clacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (kase == kase1) {
+
+/* Multiply by inv(A). */
+
+ clatbs_(uplo, "No transpose", diag, normin, n, kd, &ab[
+ ab_offset], ldab, &work[1], &scale, &rwork[1], info);
+ } else {
+
+/* Multiply by inv(A'). */
+
+ clatbs_(uplo, "Conjugate transpose", diag, normin, n, kd, &ab[
+ ab_offset], ldab, &work[1], &scale, &rwork[1], info);
+ }
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by 1/SCALE if doing so will not cause overflow. */
+
+ if (scale != 1.f) {
+ ix = icamax_(n, &work[1], &c__1);
+ i__1 = ix;
+ xnorm = (r__1 = work[i__1].r, dabs(r__1)) + (r__2 = r_imag(&
+ work[ix]), dabs(r__2));
+ if (scale < xnorm * smlnum || scale == 0.f) {
+ goto L20;
+ }
+ csrscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / anorm / ainvnm;
+ }
+ }
+
+L20:
+ return 0;
+
+/* End of CTBCON */
+
+} /* ctbcon_ */
diff --git a/contrib/libs/clapack/ctbrfs.c b/contrib/libs/clapack/ctbrfs.c
new file mode 100644
index 0000000000..883e53277c
--- /dev/null
+++ b/contrib/libs/clapack/ctbrfs.c
@@ -0,0 +1,584 @@
+/* ctbrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ctbrfs_(char *uplo, char *trans, char *diag, integer *n,
+ integer *kd, integer *nrhs, complex *ab, integer *ldab, complex *b,
+ integer *ldb, complex *x, integer *ldx, real *ferr, real *berr,
+ complex *work, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, b_dim1, b_offset, x_dim1, x_offset, i__1,
+ i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ real s, xk;
+ integer nz;
+ real eps;
+ integer kase;
+ real safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int ctbmv_(char *, char *, char *, integer *,
+ integer *, complex *, integer *, complex *, integer *), ccopy_(integer *, complex *, integer *, complex *
+, integer *), ctbsv_(char *, char *, char *, integer *, integer *,
+ complex *, integer *, complex *, integer *), caxpy_(integer *, complex *, complex *, integer *,
+ complex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+ char transn[1], transt[1];
+ logical nounit;
+ real lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTBRFS provides error bounds and backward error estimates for the */
+/* solution to a system of linear equations with a triangular band */
+/* coefficient matrix. */
+
+/* The solution matrix X must be computed by CTBTRS or some other */
+/* means before entering this routine. CTBRFS does not do iterative */
+/* refinement because doing so cannot improve the backward error. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals or subdiagonals of the */
+/* triangular band matrix A. KD >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AB (input) COMPLEX array, dimension (LDAB,N) */
+/* The upper or lower triangular band matrix A, stored in the */
+/* first kd+1 rows of the array. The j-th column of A is stored */
+/* in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+/* If DIAG = 'U', the diagonal elements of A are not referenced */
+/* and are assumed to be 1. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* B (input) COMPLEX array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input) COMPLEX array, dimension (LDX,NRHS) */
+/* The solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*kd < 0) {
+ *info = -5;
+ } else if (*nrhs < 0) {
+ *info = -6;
+ } else if (*ldab < *kd + 1) {
+ *info = -8;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldx < max(1,*n)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTBRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+ if (notran) {
+ *(unsigned char *)transn = 'N';
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transn = 'C';
+ *(unsigned char *)transt = 'N';
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *kd + 2;
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute residual R = B - op(A) * X, */
+/* where op(A) = A, A**T, or A**H, depending on TRANS. */
+
+ ccopy_(n, &x[j * x_dim1 + 1], &c__1, &work[1], &c__1);
+ ctbmv_(uplo, trans, diag, n, kd, &ab[ab_offset], ldab, &work[1], &
+ c__1);
+ q__1.r = -1.f, q__1.i = -0.f;
+ caxpy_(n, &q__1, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[i__] = (r__1 = b[i__3].r, dabs(r__1)) + (r__2 = r_imag(&b[
+ i__ + j * b_dim1]), dabs(r__2));
+/* L20: */
+ }
+
+ if (notran) {
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ xk = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&
+ x[k + j * x_dim1]), dabs(r__2));
+/* Computing MAX */
+ i__3 = 1, i__4 = k - *kd;
+ i__5 = k;
+ for (i__ = max(i__3,i__4); i__ <= i__5; ++i__) {
+ i__3 = *kd + 1 + i__ - k + k * ab_dim1;
+ rwork[i__] += ((r__1 = ab[i__3].r, dabs(r__1)) + (
+ r__2 = r_imag(&ab[*kd + 1 + i__ - k + k *
+ ab_dim1]), dabs(r__2))) * xk;
+/* L30: */
+ }
+/* L40: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__5 = k + j * x_dim1;
+ xk = (r__1 = x[i__5].r, dabs(r__1)) + (r__2 = r_imag(&
+ x[k + j * x_dim1]), dabs(r__2));
+/* Computing MAX */
+ i__5 = 1, i__3 = k - *kd;
+ i__4 = k - 1;
+ for (i__ = max(i__5,i__3); i__ <= i__4; ++i__) {
+ i__5 = *kd + 1 + i__ - k + k * ab_dim1;
+ rwork[i__] += ((r__1 = ab[i__5].r, dabs(r__1)) + (
+ r__2 = r_imag(&ab[*kd + 1 + i__ - k + k *
+ ab_dim1]), dabs(r__2))) * xk;
+/* L50: */
+ }
+ rwork[k] += xk;
+/* L60: */
+ }
+ }
+ } else {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__4 = k + j * x_dim1;
+ xk = (r__1 = x[i__4].r, dabs(r__1)) + (r__2 = r_imag(&
+ x[k + j * x_dim1]), dabs(r__2));
+/* Computing MIN */
+ i__5 = *n, i__3 = k + *kd;
+ i__4 = min(i__5,i__3);
+ for (i__ = k; i__ <= i__4; ++i__) {
+ i__5 = i__ + 1 - k + k * ab_dim1;
+ rwork[i__] += ((r__1 = ab[i__5].r, dabs(r__1)) + (
+ r__2 = r_imag(&ab[i__ + 1 - k + k *
+ ab_dim1]), dabs(r__2))) * xk;
+/* L70: */
+ }
+/* L80: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__4 = k + j * x_dim1;
+ xk = (r__1 = x[i__4].r, dabs(r__1)) + (r__2 = r_imag(&
+ x[k + j * x_dim1]), dabs(r__2));
+/* Computing MIN */
+ i__5 = *n, i__3 = k + *kd;
+ i__4 = min(i__5,i__3);
+ for (i__ = k + 1; i__ <= i__4; ++i__) {
+ i__5 = i__ + 1 - k + k * ab_dim1;
+ rwork[i__] += ((r__1 = ab[i__5].r, dabs(r__1)) + (
+ r__2 = r_imag(&ab[i__ + 1 - k + k *
+ ab_dim1]), dabs(r__2))) * xk;
+/* L90: */
+ }
+ rwork[k] += xk;
+/* L100: */
+ }
+ }
+ }
+ } else {
+
+/* Compute abs(A**H)*abs(X) + abs(B). */
+
+ if (upper) {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+/* Computing MAX */
+ i__4 = 1, i__5 = k - *kd;
+ i__3 = k;
+ for (i__ = max(i__4,i__5); i__ <= i__3; ++i__) {
+ i__4 = *kd + 1 + i__ - k + k * ab_dim1;
+ i__5 = i__ + j * x_dim1;
+ s += ((r__1 = ab[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&ab[*kd + 1 + i__ - k + k *
+ ab_dim1]), dabs(r__2))) * ((r__3 = x[i__5]
+ .r, dabs(r__3)) + (r__4 = r_imag(&x[i__ +
+ j * x_dim1]), dabs(r__4)));
+/* L110: */
+ }
+ rwork[k] += s;
+/* L120: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ s = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&
+ x[k + j * x_dim1]), dabs(r__2));
+/* Computing MAX */
+ i__3 = 1, i__4 = k - *kd;
+ i__5 = k - 1;
+ for (i__ = max(i__3,i__4); i__ <= i__5; ++i__) {
+ i__3 = *kd + 1 + i__ - k + k * ab_dim1;
+ i__4 = i__ + j * x_dim1;
+ s += ((r__1 = ab[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&ab[*kd + 1 + i__ - k + k *
+ ab_dim1]), dabs(r__2))) * ((r__3 = x[i__4]
+ .r, dabs(r__3)) + (r__4 = r_imag(&x[i__ +
+ j * x_dim1]), dabs(r__4)));
+/* L130: */
+ }
+ rwork[k] += s;
+/* L140: */
+ }
+ }
+ } else {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+/* Computing MIN */
+ i__3 = *n, i__4 = k + *kd;
+ i__5 = min(i__3,i__4);
+ for (i__ = k; i__ <= i__5; ++i__) {
+ i__3 = i__ + 1 - k + k * ab_dim1;
+ i__4 = i__ + j * x_dim1;
+ s += ((r__1 = ab[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&ab[i__ + 1 - k + k * ab_dim1]),
+ dabs(r__2))) * ((r__3 = x[i__4].r, dabs(
+ r__3)) + (r__4 = r_imag(&x[i__ + j *
+ x_dim1]), dabs(r__4)));
+/* L150: */
+ }
+ rwork[k] += s;
+/* L160: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__5 = k + j * x_dim1;
+ s = (r__1 = x[i__5].r, dabs(r__1)) + (r__2 = r_imag(&
+ x[k + j * x_dim1]), dabs(r__2));
+/* Computing MIN */
+ i__3 = *n, i__4 = k + *kd;
+ i__5 = min(i__3,i__4);
+ for (i__ = k + 1; i__ <= i__5; ++i__) {
+ i__3 = i__ + 1 - k + k * ab_dim1;
+ i__4 = i__ + j * x_dim1;
+ s += ((r__1 = ab[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&ab[i__ + 1 - k + k * ab_dim1]),
+ dabs(r__2))) * ((r__3 = x[i__4].r, dabs(
+ r__3)) + (r__4 = r_imag(&x[i__ + j *
+ x_dim1]), dabs(r__4)));
+/* L170: */
+ }
+ rwork[k] += s;
+/* L180: */
+ }
+ }
+ }
+ }
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__5 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__5].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2))) / rwork[i__];
+ s = dmax(r__3,r__4);
+ } else {
+/* Computing MAX */
+ i__5 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__5].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = dmax(r__3,r__4);
+ }
+/* L190: */
+ }
+ berr[j] = s;
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(op(A)))* */
+/* ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(op(A)) is the inverse of op(A) */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(op(A))*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use CLACN2 to estimate the infinity-norm of the matrix */
+/* inv(op(A)) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__5 = i__;
+ rwork[i__] = (r__1 = work[i__5].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__];
+ } else {
+ i__5 = i__;
+ rwork[i__] = (r__1 = work[i__5].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__] + safe1;
+ }
+/* L200: */
+ }
+
+ kase = 0;
+L210:
+ clacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(op(A)**H). */
+
+ ctbsv_(uplo, transt, diag, n, kd, &ab[ab_offset], ldab, &work[
+ 1], &c__1);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__5 = i__;
+ i__3 = i__;
+ i__4 = i__;
+ q__1.r = rwork[i__3] * work[i__4].r, q__1.i = rwork[i__3]
+ * work[i__4].i;
+ work[i__5].r = q__1.r, work[i__5].i = q__1.i;
+/* L220: */
+ }
+ } else {
+
+/* Multiply by inv(op(A))*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__5 = i__;
+ i__3 = i__;
+ i__4 = i__;
+ q__1.r = rwork[i__3] * work[i__4].r, q__1.i = rwork[i__3]
+ * work[i__4].i;
+ work[i__5].r = q__1.r, work[i__5].i = q__1.i;
+/* L230: */
+ }
+ ctbsv_(uplo, transn, diag, n, kd, &ab[ab_offset], ldab, &work[
+ 1], &c__1);
+ }
+ goto L210;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__5 = i__ + j * x_dim1;
+ r__3 = lstres, r__4 = (r__1 = x[i__5].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[i__ + j * x_dim1]), dabs(r__2));
+ lstres = dmax(r__3,r__4);
+/* L240: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L250: */
+ }
+
+ return 0;
+
+/* End of CTBRFS */
+
+} /* ctbrfs_ */
diff --git a/contrib/libs/clapack/ctbtrs.c b/contrib/libs/clapack/ctbtrs.c
new file mode 100644
index 0000000000..d19e3ea76c
--- /dev/null
+++ b/contrib/libs/clapack/ctbtrs.c
@@ -0,0 +1,206 @@
+/* ctbtrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ctbtrs_(char *uplo, char *trans, char *diag, integer *n,
+ integer *kd, integer *nrhs, complex *ab, integer *ldab, complex *b,
+ integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, b_dim1, b_offset, i__1, i__2;
+
+ /* Local variables */
+ integer j;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ctbsv_(char *, char *, char *, integer *,
+ integer *, complex *, integer *, complex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTBTRS solves a triangular system of the form */
+
+/* A * X = B, A**T * X = B, or A**H * X = B, */
+
+/* where A is a triangular band matrix of order N, and B is an */
+/* N-by-NRHS matrix. A check is made to verify that A is nonsingular. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals or subdiagonals of the */
+/* triangular band matrix A. KD >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AB (input) COMPLEX array, dimension (LDAB,N) */
+/* The upper or lower triangular band matrix A, stored in the */
+/* first kd+1 rows of AB. The j-th column of A is stored */
+/* in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+/* If DIAG = 'U', the diagonal elements of A are not referenced */
+/* and are assumed to be 1. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, if INFO = 0, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element of A is zero, */
+/* indicating that the matrix is singular and the */
+/* solutions X have not been computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ nounit = lsame_(diag, "N");
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*kd < 0) {
+ *info = -5;
+ } else if (*nrhs < 0) {
+ *info = -6;
+ } else if (*ldab < *kd + 1) {
+ *info = -8;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTBTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check for singularity. */
+
+ if (nounit) {
+ if (upper) {
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ i__2 = *kd + 1 + *info * ab_dim1;
+ if (ab[i__2].r == 0.f && ab[i__2].i == 0.f) {
+ return 0;
+ }
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ i__2 = *info * ab_dim1 + 1;
+ if (ab[i__2].r == 0.f && ab[i__2].i == 0.f) {
+ return 0;
+ }
+/* L20: */
+ }
+ }
+ }
+ *info = 0;
+
+/* Solve A * X = B, A**T * X = B, or A**H * X = B. */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ctbsv_(uplo, trans, diag, n, kd, &ab[ab_offset], ldab, &b[j * b_dim1
+ + 1], &c__1);
+/* L30: */
+ }
+
+ return 0;
+
+/* End of CTBTRS */
+
+} /* ctbtrs_ */
diff --git a/contrib/libs/clapack/ctfsm.c b/contrib/libs/clapack/ctfsm.c
new file mode 100644
index 0000000000..2b07055672
--- /dev/null
+++ b/contrib/libs/clapack/ctfsm.c
@@ -0,0 +1,1024 @@
+/* ctfsm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+
+/* Subroutine */ int ctfsm_(char *transr, char *side, char *uplo, char *trans,
+ char *diag, integer *m, integer *n, complex *alpha, complex *a,
+ complex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j, k, m1, m2, n1, n2, info;
+ logical normaltransr;
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *);
+ logical lside;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int ctrsm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *), xerbla_(char *,
+ integer *);
+ logical misodd, nisodd, notrans;
+
+
+/* -- LAPACK routine (version 3.2.1) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- April 2009 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Level 3 BLAS like routine for A in RFP Format. */
+
+/* CTFSM solves the matrix equation */
+
+/* op( A )*X = alpha*B or X*op( A ) = alpha*B */
+
+/* where alpha is a scalar, X and B are m by n matrices, A is a unit, or */
+/* non-unit, upper or lower triangular matrix and op( A ) is one of */
+
+/* op( A ) = A or op( A ) = conjg( A' ). */
+
+/* A is in Rectangular Full Packed (RFP) Format. */
+
+/* The matrix X is overwritten on B. */
+
+/* Arguments */
+/* ========== */
+
+/* TRANSR - (input) CHARACTER */
+/* = 'N': The Normal Form of RFP A is stored; */
+/* = 'C': The Conjugate-transpose Form of RFP A is stored. */
+
+/* SIDE - (input) CHARACTER */
+/* On entry, SIDE specifies whether op( A ) appears on the left */
+/* or right of X as follows: */
+
+/* SIDE = 'L' or 'l' op( A )*X = alpha*B. */
+
+/* SIDE = 'R' or 'r' X*op( A ) = alpha*B. */
+
+/* Unchanged on exit. */
+
+/* UPLO - (input) CHARACTER */
+/* On entry, UPLO specifies whether the RFP matrix A came from */
+/* an upper or lower triangular matrix as follows: */
+/* UPLO = 'U' or 'u' RFP A came from an upper triangular matrix */
+/* UPLO = 'L' or 'l' RFP A came from a lower triangular matrix */
+
+/* Unchanged on exit. */
+
+/* TRANS - (input) CHARACTER */
+/* On entry, TRANS specifies the form of op( A ) to be used */
+/* in the matrix multiplication as follows: */
+
+/* TRANS = 'N' or 'n' op( A ) = A. */
+
+/* TRANS = 'C' or 'c' op( A ) = conjg( A' ). */
+
+/* Unchanged on exit. */
+
+/* DIAG - (input) CHARACTER */
+/* On entry, DIAG specifies whether or not RFP A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* M - (input) INTEGER. */
+/* On entry, M specifies the number of rows of B. M must be at */
+/* least zero. */
+/* Unchanged on exit. */
+
+/* N - (input) INTEGER. */
+/* On entry, N specifies the number of columns of B. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - (input) COMPLEX. */
+/* On entry, ALPHA specifies the scalar alpha. When alpha is */
+/* zero then A is not referenced and B need not be set before */
+/* entry. */
+/* Unchanged on exit. */
+
+/* A - (input) COMPLEX array, dimension ( N*(N+1)/2 ); */
+/* NT = N*(N+1)/2. On entry, the matrix A in RFP Format. */
+/* RFP Format is described by TRANSR, UPLO and N as follows: */
+/* If TRANSR='N' then RFP A is (0:N,0:K-1) when N is even; */
+/* K=N/2. RFP A is (0:N-1,0:K) when N is odd; K=N/2. If */
+/* TRANSR = 'C' then RFP is the Conjugate-transpose of RFP A as */
+/* defined when TRANSR = 'N'. The contents of RFP A are defined */
+/* by UPLO as follows: If UPLO = 'U' the RFP A contains the NT */
+/* elements of upper packed A either in normal or */
+/* conjugate-transpose Format. If UPLO = 'L' the RFP A contains */
+/* the NT elements of lower packed A either in normal or */
+/* conjugate-transpose Format. The LDA of RFP A is (N+1)/2 when */
+/* TRANSR = 'C'. When TRANSR is 'N' the LDA is N+1 when N is */
+/* even and is N when is odd. */
+/* See the Note below for more details. Unchanged on exit. */
+
+/* B - (input/ouptut) COMPLEX array, DIMENSION ( LDB, N ) */
+/* Before entry, the leading m by n part of the array B must */
+/* contain the right-hand side matrix B, and on exit is */
+/* overwritten by the solution matrix X. */
+
+/* LDB - (input) INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. LDB must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+/* Further Details */
+/* =============== */
+
+/* We first consider Standard Packed Format when N is even. */
+/* We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* conjugate-transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* conjugate-transpose of the last three columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- -- */
+/* 03 04 05 33 43 53 */
+/* -- -- */
+/* 13 14 15 00 44 54 */
+/* -- */
+/* 23 24 25 10 11 55 */
+
+/* 33 34 35 20 21 22 */
+/* -- */
+/* 00 44 45 30 31 32 */
+/* -- -- */
+/* 01 11 55 40 41 42 */
+/* -- -- -- */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We next consider Standard Packed Format when N is odd. */
+/* We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* conjugate-transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* conjugate-transpose of the last two columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- */
+/* 02 03 04 00 33 43 */
+/* -- */
+/* 12 13 14 10 11 44 */
+
+/* 22 23 24 20 21 22 */
+/* -- */
+/* 00 33 34 30 31 32 */
+/* -- -- */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- */
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* .. */
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb - 1 - 0 + 1;
+ b_offset = 0 + b_dim1 * 0;
+ b -= b_offset;
+
+ /* Function Body */
+ info = 0;
+ normaltransr = lsame_(transr, "N");
+ lside = lsame_(side, "L");
+ lower = lsame_(uplo, "L");
+ notrans = lsame_(trans, "N");
+ if (! normaltransr && ! lsame_(transr, "C")) {
+ info = -1;
+ } else if (! lside && ! lsame_(side, "R")) {
+ info = -2;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ info = -3;
+ } else if (! notrans && ! lsame_(trans, "C")) {
+ info = -4;
+ } else if (! lsame_(diag, "N") && ! lsame_(diag,
+ "U")) {
+ info = -5;
+ } else if (*m < 0) {
+ info = -6;
+ } else if (*n < 0) {
+ info = -7;
+ } else if (*ldb < max(1,*m)) {
+ info = -11;
+ }
+ if (info != 0) {
+ i__1 = -info;
+ xerbla_("CTFSM ", &i__1);
+ return 0;
+ }
+
+/* Quick return when ( (N.EQ.0).OR.(M.EQ.0) ) */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Quick return when ALPHA.EQ.(0E+0,0E+0) */
+
+ if (alpha->r == 0.f && alpha->i == 0.f) {
+ i__1 = *n - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *m - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0.f, b[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+ if (lside) {
+
+/* SIDE = 'L' */
+
+/* A is M-by-M. */
+/* If M is odd, set NISODD = .TRUE., and M1 and M2. */
+/* If M is even, NISODD = .FALSE., and M. */
+
+ if (*m % 2 == 0) {
+ misodd = FALSE_;
+ k = *m / 2;
+ } else {
+ misodd = TRUE_;
+ if (lower) {
+ m2 = *m / 2;
+ m1 = *m - m2;
+ } else {
+ m1 = *m / 2;
+ m2 = *m - m1;
+ }
+ }
+
+ if (misodd) {
+
+/* SIDE = 'L' and N is odd */
+
+ if (normaltransr) {
+
+/* SIDE = 'L', N is odd, and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SIDE ='L', N is odd, TRANSR = 'N', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='L', N is odd, TRANSR = 'N', UPLO = 'L', and */
+/* TRANS = 'N' */
+
+ if (*m == 1) {
+ ctrsm_("L", "L", "N", diag, &m1, n, alpha, a, m, &
+ b[b_offset], ldb);
+ } else {
+ ctrsm_("L", "L", "N", diag, &m1, n, alpha, a, m, &
+ b[b_offset], ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("N", "N", &m2, n, &m1, &q__1, &a[m1], m, &
+ b[b_offset], ldb, alpha, &b[m1], ldb);
+ ctrsm_("L", "U", "C", diag, &m2, n, &c_b1, &a[*m],
+ m, &b[m1], ldb);
+ }
+
+ } else {
+
+/* SIDE ='L', N is odd, TRANSR = 'N', UPLO = 'L', and */
+/* TRANS = 'C' */
+
+ if (*m == 1) {
+ ctrsm_("L", "L", "C", diag, &m1, n, alpha, a, m, &
+ b[b_offset], ldb);
+ } else {
+ ctrsm_("L", "U", "N", diag, &m2, n, alpha, &a[*m],
+ m, &b[m1], ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("C", "N", &m1, n, &m2, &q__1, &a[m1], m, &
+ b[m1], ldb, alpha, &b[b_offset], ldb);
+ ctrsm_("L", "L", "C", diag, &m1, n, &c_b1, a, m, &
+ b[b_offset], ldb);
+ }
+
+ }
+
+ } else {
+
+/* SIDE ='L', N is odd, TRANSR = 'N', and UPLO = 'U' */
+
+ if (! notrans) {
+
+/* SIDE ='L', N is odd, TRANSR = 'N', UPLO = 'U', and */
+/* TRANS = 'N' */
+
+ ctrsm_("L", "L", "N", diag, &m1, n, alpha, &a[m2], m,
+ &b[b_offset], ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("C", "N", &m2, n, &m1, &q__1, a, m, &b[
+ b_offset], ldb, alpha, &b[m1], ldb);
+ ctrsm_("L", "U", "C", diag, &m2, n, &c_b1, &a[m1], m,
+ &b[m1], ldb);
+
+ } else {
+
+/* SIDE ='L', N is odd, TRANSR = 'N', UPLO = 'U', and */
+/* TRANS = 'C' */
+
+ ctrsm_("L", "U", "N", diag, &m2, n, alpha, &a[m1], m,
+ &b[m1], ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("N", "N", &m1, n, &m2, &q__1, a, m, &b[m1],
+ ldb, alpha, &b[b_offset], ldb);
+ ctrsm_("L", "L", "C", diag, &m1, n, &c_b1, &a[m2], m,
+ &b[b_offset], ldb);
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'L', N is odd, and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SIDE ='L', N is odd, TRANSR = 'C', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='L', N is odd, TRANSR = 'C', UPLO = 'L', and */
+/* TRANS = 'N' */
+
+ if (*m == 1) {
+ ctrsm_("L", "U", "C", diag, &m1, n, alpha, a, &m1,
+ &b[b_offset], ldb);
+ } else {
+ ctrsm_("L", "U", "C", diag, &m1, n, alpha, a, &m1,
+ &b[b_offset], ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("C", "N", &m2, n, &m1, &q__1, &a[m1 * m1],
+ &m1, &b[b_offset], ldb, alpha, &b[m1],
+ ldb);
+ ctrsm_("L", "L", "N", diag, &m2, n, &c_b1, &a[1],
+ &m1, &b[m1], ldb);
+ }
+
+ } else {
+
+/* SIDE ='L', N is odd, TRANSR = 'C', UPLO = 'L', and */
+/* TRANS = 'C' */
+
+ if (*m == 1) {
+ ctrsm_("L", "U", "N", diag, &m1, n, alpha, a, &m1,
+ &b[b_offset], ldb);
+ } else {
+ ctrsm_("L", "L", "C", diag, &m2, n, alpha, &a[1],
+ &m1, &b[m1], ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("N", "N", &m1, n, &m2, &q__1, &a[m1 * m1],
+ &m1, &b[m1], ldb, alpha, &b[b_offset],
+ ldb);
+ ctrsm_("L", "U", "N", diag, &m1, n, &c_b1, a, &m1,
+ &b[b_offset], ldb);
+ }
+
+ }
+
+ } else {
+
+/* SIDE ='L', N is odd, TRANSR = 'C', and UPLO = 'U' */
+
+ if (! notrans) {
+
+/* SIDE ='L', N is odd, TRANSR = 'C', UPLO = 'U', and */
+/* TRANS = 'N' */
+
+ ctrsm_("L", "U", "C", diag, &m1, n, alpha, &a[m2 * m2]
+, &m2, &b[b_offset], ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("N", "N", &m2, n, &m1, &q__1, a, &m2, &b[
+ b_offset], ldb, alpha, &b[m1], ldb);
+ ctrsm_("L", "L", "N", diag, &m2, n, &c_b1, &a[m1 * m2]
+, &m2, &b[m1], ldb);
+
+ } else {
+
+/* SIDE ='L', N is odd, TRANSR = 'C', UPLO = 'U', and */
+/* TRANS = 'C' */
+
+ ctrsm_("L", "L", "C", diag, &m2, n, alpha, &a[m1 * m2]
+, &m2, &b[m1], ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("C", "N", &m1, n, &m2, &q__1, a, &m2, &b[m1],
+ ldb, alpha, &b[b_offset], ldb);
+ ctrsm_("L", "U", "N", diag, &m1, n, &c_b1, &a[m2 * m2]
+, &m2, &b[b_offset], ldb);
+
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'L' and N is even */
+
+ if (normaltransr) {
+
+/* SIDE = 'L', N is even, and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SIDE ='L', N is even, TRANSR = 'N', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='L', N is even, TRANSR = 'N', UPLO = 'L', */
+/* and TRANS = 'N' */
+
+ i__1 = *m + 1;
+ ctrsm_("L", "L", "N", diag, &k, n, alpha, &a[1], &
+ i__1, &b[b_offset], ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ i__1 = *m + 1;
+ cgemm_("N", "N", &k, n, &k, &q__1, &a[k + 1], &i__1, &
+ b[b_offset], ldb, alpha, &b[k], ldb);
+ i__1 = *m + 1;
+ ctrsm_("L", "U", "C", diag, &k, n, &c_b1, a, &i__1, &
+ b[k], ldb);
+
+ } else {
+
+/* SIDE ='L', N is even, TRANSR = 'N', UPLO = 'L', */
+/* and TRANS = 'C' */
+
+ i__1 = *m + 1;
+ ctrsm_("L", "U", "N", diag, &k, n, alpha, a, &i__1, &
+ b[k], ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ i__1 = *m + 1;
+ cgemm_("C", "N", &k, n, &k, &q__1, &a[k + 1], &i__1, &
+ b[k], ldb, alpha, &b[b_offset], ldb);
+ i__1 = *m + 1;
+ ctrsm_("L", "L", "C", diag, &k, n, &c_b1, &a[1], &
+ i__1, &b[b_offset], ldb);
+
+ }
+
+ } else {
+
+/* SIDE ='L', N is even, TRANSR = 'N', and UPLO = 'U' */
+
+ if (! notrans) {
+
+/* SIDE ='L', N is even, TRANSR = 'N', UPLO = 'U', */
+/* and TRANS = 'N' */
+
+ i__1 = *m + 1;
+ ctrsm_("L", "L", "N", diag, &k, n, alpha, &a[k + 1], &
+ i__1, &b[b_offset], ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ i__1 = *m + 1;
+ cgemm_("C", "N", &k, n, &k, &q__1, a, &i__1, &b[
+ b_offset], ldb, alpha, &b[k], ldb);
+ i__1 = *m + 1;
+ ctrsm_("L", "U", "C", diag, &k, n, &c_b1, &a[k], &
+ i__1, &b[k], ldb);
+
+ } else {
+
+/* SIDE ='L', N is even, TRANSR = 'N', UPLO = 'U', */
+/* and TRANS = 'C' */
+ i__1 = *m + 1;
+ ctrsm_("L", "U", "N", diag, &k, n, alpha, &a[k], &
+ i__1, &b[k], ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ i__1 = *m + 1;
+ cgemm_("N", "N", &k, n, &k, &q__1, a, &i__1, &b[k],
+ ldb, alpha, &b[b_offset], ldb);
+ i__1 = *m + 1;
+ ctrsm_("L", "L", "C", diag, &k, n, &c_b1, &a[k + 1], &
+ i__1, &b[b_offset], ldb);
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'L', N is even, and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SIDE ='L', N is even, TRANSR = 'C', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='L', N is even, TRANSR = 'C', UPLO = 'L', */
+/* and TRANS = 'N' */
+
+ ctrsm_("L", "U", "C", diag, &k, n, alpha, &a[k], &k, &
+ b[b_offset], ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("C", "N", &k, n, &k, &q__1, &a[k * (k + 1)], &
+ k, &b[b_offset], ldb, alpha, &b[k], ldb);
+ ctrsm_("L", "L", "N", diag, &k, n, &c_b1, a, &k, &b[k]
+, ldb);
+
+ } else {
+
+/* SIDE ='L', N is even, TRANSR = 'C', UPLO = 'L', */
+/* and TRANS = 'C' */
+
+ ctrsm_("L", "L", "C", diag, &k, n, alpha, a, &k, &b[k]
+, ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("N", "N", &k, n, &k, &q__1, &a[k * (k + 1)], &
+ k, &b[k], ldb, alpha, &b[b_offset], ldb);
+ ctrsm_("L", "U", "N", diag, &k, n, &c_b1, &a[k], &k, &
+ b[b_offset], ldb);
+
+ }
+
+ } else {
+
+/* SIDE ='L', N is even, TRANSR = 'C', and UPLO = 'U' */
+
+ if (! notrans) {
+
+/* SIDE ='L', N is even, TRANSR = 'C', UPLO = 'U', */
+/* and TRANS = 'N' */
+
+ ctrsm_("L", "U", "C", diag, &k, n, alpha, &a[k * (k +
+ 1)], &k, &b[b_offset], ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("N", "N", &k, n, &k, &q__1, a, &k, &b[b_offset]
+, ldb, alpha, &b[k], ldb);
+ ctrsm_("L", "L", "N", diag, &k, n, &c_b1, &a[k * k], &
+ k, &b[k], ldb);
+
+ } else {
+
+/* SIDE ='L', N is even, TRANSR = 'C', UPLO = 'U', */
+/* and TRANS = 'C' */
+
+ ctrsm_("L", "L", "C", diag, &k, n, alpha, &a[k * k], &
+ k, &b[k], ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("C", "N", &k, n, &k, &q__1, a, &k, &b[k], ldb,
+ alpha, &b[b_offset], ldb);
+ ctrsm_("L", "U", "N", diag, &k, n, &c_b1, &a[k * (k +
+ 1)], &k, &b[b_offset], ldb);
+
+ }
+
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'R' */
+
+/* A is N-by-N. */
+/* If N is odd, set NISODD = .TRUE., and N1 and N2. */
+/* If N is even, NISODD = .FALSE., and K. */
+
+ if (*n % 2 == 0) {
+ nisodd = FALSE_;
+ k = *n / 2;
+ } else {
+ nisodd = TRUE_;
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+ }
+
+ if (nisodd) {
+
+/* SIDE = 'R' and N is odd */
+
+ if (normaltransr) {
+
+/* SIDE = 'R', N is odd, and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SIDE ='R', N is odd, TRANSR = 'N', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is odd, TRANSR = 'N', UPLO = 'L', and */
+/* TRANS = 'N' */
+
+ ctrsm_("R", "U", "C", diag, m, &n2, alpha, &a[*n], n,
+ &b[n1 * b_dim1], ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("N", "N", m, &n1, &n2, &q__1, &b[n1 * b_dim1],
+ ldb, &a[n1], n, alpha, b, ldb);
+ ctrsm_("R", "L", "N", diag, m, &n1, &c_b1, a, n, b,
+ ldb);
+
+ } else {
+
+/* SIDE ='R', N is odd, TRANSR = 'N', UPLO = 'L', and */
+/* TRANS = 'C' */
+
+ ctrsm_("R", "L", "C", diag, m, &n1, alpha, a, n, b,
+ ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("N", "C", m, &n2, &n1, &q__1, b, ldb, &a[n1],
+ n, alpha, &b[n1 * b_dim1], ldb);
+ ctrsm_("R", "U", "N", diag, m, &n2, &c_b1, &a[*n], n,
+ &b[n1 * b_dim1], ldb);
+
+ }
+
+ } else {
+
+/* SIDE ='R', N is odd, TRANSR = 'N', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is odd, TRANSR = 'N', UPLO = 'U', and */
+/* TRANS = 'N' */
+
+ ctrsm_("R", "L", "C", diag, m, &n1, alpha, &a[n2], n,
+ b, ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("N", "N", m, &n2, &n1, &q__1, b, ldb, a, n,
+ alpha, &b[n1 * b_dim1], ldb);
+ ctrsm_("R", "U", "N", diag, m, &n2, &c_b1, &a[n1], n,
+ &b[n1 * b_dim1], ldb);
+
+ } else {
+
+/* SIDE ='R', N is odd, TRANSR = 'N', UPLO = 'U', and */
+/* TRANS = 'C' */
+
+ ctrsm_("R", "U", "C", diag, m, &n2, alpha, &a[n1], n,
+ &b[n1 * b_dim1], ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("N", "C", m, &n1, &n2, &q__1, &b[n1 * b_dim1],
+ ldb, a, n, alpha, b, ldb);
+ ctrsm_("R", "L", "N", diag, m, &n1, &c_b1, &a[n2], n,
+ b, ldb);
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'R', N is odd, and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SIDE ='R', N is odd, TRANSR = 'C', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is odd, TRANSR = 'C', UPLO = 'L', and */
+/* TRANS = 'N' */
+
+ ctrsm_("R", "L", "N", diag, m, &n2, alpha, &a[1], &n1,
+ &b[n1 * b_dim1], ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("N", "C", m, &n1, &n2, &q__1, &b[n1 * b_dim1],
+ ldb, &a[n1 * n1], &n1, alpha, b, ldb);
+ ctrsm_("R", "U", "C", diag, m, &n1, &c_b1, a, &n1, b,
+ ldb);
+
+ } else {
+
+/* SIDE ='R', N is odd, TRANSR = 'C', UPLO = 'L', and */
+/* TRANS = 'C' */
+
+ ctrsm_("R", "U", "N", diag, m, &n1, alpha, a, &n1, b,
+ ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("N", "N", m, &n2, &n1, &q__1, b, ldb, &a[n1 *
+ n1], &n1, alpha, &b[n1 * b_dim1], ldb);
+ ctrsm_("R", "L", "C", diag, m, &n2, &c_b1, &a[1], &n1,
+ &b[n1 * b_dim1], ldb);
+
+ }
+
+ } else {
+
+/* SIDE ='R', N is odd, TRANSR = 'C', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is odd, TRANSR = 'C', UPLO = 'U', and */
+/* TRANS = 'N' */
+
+ ctrsm_("R", "U", "N", diag, m, &n1, alpha, &a[n2 * n2]
+, &n2, b, ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("N", "C", m, &n2, &n1, &q__1, b, ldb, a, &n2,
+ alpha, &b[n1 * b_dim1], ldb);
+ ctrsm_("R", "L", "C", diag, m, &n2, &c_b1, &a[n1 * n2]
+, &n2, &b[n1 * b_dim1], ldb);
+
+ } else {
+
+/* SIDE ='R', N is odd, TRANSR = 'C', UPLO = 'U', and */
+/* TRANS = 'C' */
+
+ ctrsm_("R", "L", "N", diag, m, &n2, alpha, &a[n1 * n2]
+, &n2, &b[n1 * b_dim1], ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("N", "N", m, &n1, &n2, &q__1, &b[n1 * b_dim1],
+ ldb, a, &n2, alpha, b, ldb);
+ ctrsm_("R", "U", "C", diag, m, &n1, &c_b1, &a[n2 * n2]
+, &n2, b, ldb);
+
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'R' and N is even */
+
+ if (normaltransr) {
+
+/* SIDE = 'R', N is even, and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SIDE ='R', N is even, TRANSR = 'N', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is even, TRANSR = 'N', UPLO = 'L', */
+/* and TRANS = 'N' */
+
+ i__1 = *n + 1;
+ ctrsm_("R", "U", "C", diag, m, &k, alpha, a, &i__1, &
+ b[k * b_dim1], ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ i__1 = *n + 1;
+ cgemm_("N", "N", m, &k, &k, &q__1, &b[k * b_dim1],
+ ldb, &a[k + 1], &i__1, alpha, b, ldb);
+ i__1 = *n + 1;
+ ctrsm_("R", "L", "N", diag, m, &k, &c_b1, &a[1], &
+ i__1, b, ldb);
+
+ } else {
+
+/* SIDE ='R', N is even, TRANSR = 'N', UPLO = 'L', */
+/* and TRANS = 'C' */
+
+ i__1 = *n + 1;
+ ctrsm_("R", "L", "C", diag, m, &k, alpha, &a[1], &
+ i__1, b, ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ i__1 = *n + 1;
+ cgemm_("N", "C", m, &k, &k, &q__1, b, ldb, &a[k + 1],
+ &i__1, alpha, &b[k * b_dim1], ldb);
+ i__1 = *n + 1;
+ ctrsm_("R", "U", "N", diag, m, &k, &c_b1, a, &i__1, &
+ b[k * b_dim1], ldb);
+
+ }
+
+ } else {
+
+/* SIDE ='R', N is even, TRANSR = 'N', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is even, TRANSR = 'N', UPLO = 'U', */
+/* and TRANS = 'N' */
+
+ i__1 = *n + 1;
+ ctrsm_("R", "L", "C", diag, m, &k, alpha, &a[k + 1], &
+ i__1, b, ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ i__1 = *n + 1;
+ cgemm_("N", "N", m, &k, &k, &q__1, b, ldb, a, &i__1,
+ alpha, &b[k * b_dim1], ldb);
+ i__1 = *n + 1;
+ ctrsm_("R", "U", "N", diag, m, &k, &c_b1, &a[k], &
+ i__1, &b[k * b_dim1], ldb);
+
+ } else {
+
+/* SIDE ='R', N is even, TRANSR = 'N', UPLO = 'U', */
+/* and TRANS = 'C' */
+
+ i__1 = *n + 1;
+ ctrsm_("R", "U", "C", diag, m, &k, alpha, &a[k], &
+ i__1, &b[k * b_dim1], ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ i__1 = *n + 1;
+ cgemm_("N", "C", m, &k, &k, &q__1, &b[k * b_dim1],
+ ldb, a, &i__1, alpha, b, ldb);
+ i__1 = *n + 1;
+ ctrsm_("R", "L", "N", diag, m, &k, &c_b1, &a[k + 1], &
+ i__1, b, ldb);
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'R', N is even, and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SIDE ='R', N is even, TRANSR = 'C', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is even, TRANSR = 'C', UPLO = 'L', */
+/* and TRANS = 'N' */
+
+ ctrsm_("R", "L", "N", diag, m, &k, alpha, a, &k, &b[k
+ * b_dim1], ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("N", "C", m, &k, &k, &q__1, &b[k * b_dim1],
+ ldb, &a[(k + 1) * k], &k, alpha, b, ldb);
+ ctrsm_("R", "U", "C", diag, m, &k, &c_b1, &a[k], &k,
+ b, ldb);
+
+ } else {
+
+/* SIDE ='R', N is even, TRANSR = 'C', UPLO = 'L', */
+/* and TRANS = 'C' */
+
+ ctrsm_("R", "U", "N", diag, m, &k, alpha, &a[k], &k,
+ b, ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("N", "N", m, &k, &k, &q__1, b, ldb, &a[(k + 1)
+ * k], &k, alpha, &b[k * b_dim1], ldb);
+ ctrsm_("R", "L", "C", diag, m, &k, &c_b1, a, &k, &b[k
+ * b_dim1], ldb);
+
+ }
+
+ } else {
+
+/* SIDE ='R', N is even, TRANSR = 'C', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is even, TRANSR = 'C', UPLO = 'U', */
+/* and TRANS = 'N' */
+
+ ctrsm_("R", "U", "N", diag, m, &k, alpha, &a[(k + 1) *
+ k], &k, b, ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("N", "C", m, &k, &k, &q__1, b, ldb, a, &k,
+ alpha, &b[k * b_dim1], ldb);
+ ctrsm_("R", "L", "C", diag, m, &k, &c_b1, &a[k * k], &
+ k, &b[k * b_dim1], ldb);
+
+ } else {
+
+/* SIDE ='R', N is even, TRANSR = 'C', UPLO = 'U', */
+/* and TRANS = 'C' */
+
+ ctrsm_("R", "L", "N", diag, m, &k, alpha, &a[k * k], &
+ k, &b[k * b_dim1], ldb);
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("N", "N", m, &k, &k, &q__1, &b[k * b_dim1],
+ ldb, a, &k, alpha, b, ldb);
+ ctrsm_("R", "U", "C", diag, m, &k, &c_b1, &a[(k + 1) *
+ k], &k, b, ldb);
+
+ }
+
+ }
+
+ }
+
+ }
+ }
+
+ return 0;
+
+/* End of CTFSM */
+
+} /* ctfsm_ */
diff --git a/contrib/libs/clapack/ctftri.c b/contrib/libs/clapack/ctftri.c
new file mode 100644
index 0000000000..1453e33cae
--- /dev/null
+++ b/contrib/libs/clapack/ctftri.c
@@ -0,0 +1,500 @@
+/* ctftri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+
+/* Subroutine */ int ctftri_(char *transr, char *uplo, char *diag, integer *n,
+ complex *a, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ complex q__1;
+
+ /* Local variables */
+ integer k, n1, n2;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ctrmm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nisodd;
+ extern /* Subroutine */ int ctrtri_(char *, char *, integer *, complex *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTFTRI computes the inverse of a triangular matrix A stored in RFP */
+/* format. */
+
+/* This is a Level 3 BLAS version of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': The Normal TRANSR of RFP A is stored; */
+/* = 'C': The Conjugate-transpose TRANSR of RFP A is stored. */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* DIAG (input) CHARACTER */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension ( N*(N+1)/2 ); */
+/* On entry, the triangular matrix A in RFP format. RFP format */
+/* is described by TRANSR, UPLO, and N as follows: If TRANSR = */
+/* 'N' then RFP A is (0:N,0:k-1) when N is even; k=N/2. RFP A is */
+/* (0:N-1,0:k) when N is odd; k=N/2. IF TRANSR = 'C' then RFP is */
+/* the Conjugate-transpose of RFP A as defined when */
+/* TRANSR = 'N'. The contents of RFP A are defined by UPLO as */
+/* follows: If UPLO = 'U' the RFP A contains the nt elements of */
+/* upper packed A; If UPLO = 'L' the RFP A contains the nt */
+/* elements of lower packed A. The LDA of RFP A is (N+1)/2 when */
+/* TRANSR = 'C'. When TRANSR is 'N' the LDA is N+1 when N is */
+/* even and N is odd. See the Note below for more details. */
+
+/* On exit, the (triangular) inverse of the original matrix, in */
+/* the same storage format. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, A(i,i) is exactly zero. The triangular */
+/* matrix is singular and its inverse can not be computed. */
+
+/* Notes: */
+/* ====== */
+
+/* We first consider Standard Packed Format when N is even. */
+/* We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* conjugate-transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* conjugate-transpose of the last three columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- -- */
+/* 03 04 05 33 43 53 */
+/* -- -- */
+/* 13 14 15 00 44 54 */
+/* -- */
+/* 23 24 25 10 11 55 */
+
+/* 33 34 35 20 21 22 */
+/* -- */
+/* 00 44 45 30 31 32 */
+/* -- -- */
+/* 01 11 55 40 41 42 */
+/* -- -- -- */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We next consider Standard Packed Format when N is odd. */
+/* We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* conjugate-transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* conjugate-transpose of the last two columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- */
+/* 02 03 04 00 33 43 */
+/* -- */
+/* 12 13 14 10 11 44 */
+
+/* 22 23 24 20 21 22 */
+/* -- */
+/* 00 33 34 30 31 32 */
+/* -- -- */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- */
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "C")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (! lsame_(diag, "N") && ! lsame_(diag,
+ "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTFTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* If N is odd, set NISODD = .TRUE. */
+/* If N is even, set K = N/2 and NISODD = .FALSE. */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ } else {
+ nisodd = TRUE_;
+ }
+
+/* Set N1 and N2 depending on LOWER */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+
+/* start execution: there are eight cases */
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL and N is odd ( a(0:n-1,0:n1-1) ) */
+/* T1 -> a(0,0), T2 -> a(0,1), S -> a(n1,0) */
+/* T1 -> a(0), T2 -> a(n), S -> a(n1) */
+
+ ctrtri_("L", diag, &n1, a, n, info);
+ if (*info > 0) {
+ return 0;
+ }
+ q__1.r = -1.f, q__1.i = -0.f;
+ ctrmm_("R", "L", "N", diag, &n2, &n1, &q__1, a, n, &a[n1], n);
+ ctrtri_("U", diag, &n2, &a[*n], n, info)
+ ;
+ if (*info > 0) {
+ *info += n1;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ ctrmm_("L", "U", "C", diag, &n2, &n1, &c_b1, &a[*n], n, &a[n1]
+, n);
+
+ } else {
+
+/* SRPA for UPPER, NORMAL and N is odd ( a(0:n-1,0:n2-1) */
+/* T1 -> a(n1+1,0), T2 -> a(n1,0), S -> a(0,0) */
+/* T1 -> a(n2), T2 -> a(n1), S -> a(0) */
+
+ ctrtri_("L", diag, &n1, &a[n2], n, info)
+ ;
+ if (*info > 0) {
+ return 0;
+ }
+ q__1.r = -1.f, q__1.i = -0.f;
+ ctrmm_("L", "L", "C", diag, &n1, &n2, &q__1, &a[n2], n, a, n);
+ ctrtri_("U", diag, &n2, &a[n1], n, info)
+ ;
+ if (*info > 0) {
+ *info += n1;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ ctrmm_("R", "U", "N", diag, &n1, &n2, &c_b1, &a[n1], n, a, n);
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is odd */
+/* T1 -> a(0), T2 -> a(1), S -> a(0+n1*n1) */
+
+ ctrtri_("U", diag, &n1, a, &n1, info);
+ if (*info > 0) {
+ return 0;
+ }
+ q__1.r = -1.f, q__1.i = -0.f;
+ ctrmm_("L", "U", "N", diag, &n1, &n2, &q__1, a, &n1, &a[n1 *
+ n1], &n1);
+ ctrtri_("L", diag, &n2, &a[1], &n1, info);
+ if (*info > 0) {
+ *info += n1;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ ctrmm_("R", "L", "C", diag, &n1, &n2, &c_b1, &a[1], &n1, &a[
+ n1 * n1], &n1);
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is odd */
+/* T1 -> a(0+n2*n2), T2 -> a(0+n1*n2), S -> a(0) */
+
+ ctrtri_("U", diag, &n1, &a[n2 * n2], &n2, info);
+ if (*info > 0) {
+ return 0;
+ }
+ q__1.r = -1.f, q__1.i = -0.f;
+ ctrmm_("R", "U", "C", diag, &n2, &n1, &q__1, &a[n2 * n2], &n2,
+ a, &n2);
+ ctrtri_("L", diag, &n2, &a[n1 * n2], &n2, info);
+ if (*info > 0) {
+ *info += n1;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ ctrmm_("L", "L", "N", diag, &n2, &n1, &c_b1, &a[n1 * n2], &n2,
+ a, &n2);
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(1,0), T2 -> a(0,0), S -> a(k+1,0) */
+/* T1 -> a(1), T2 -> a(0), S -> a(k+1) */
+
+ i__1 = *n + 1;
+ ctrtri_("L", diag, &k, &a[1], &i__1, info);
+ if (*info > 0) {
+ return 0;
+ }
+ q__1.r = -1.f, q__1.i = -0.f;
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ ctrmm_("R", "L", "N", diag, &k, &k, &q__1, &a[1], &i__1, &a[k
+ + 1], &i__2);
+ i__1 = *n + 1;
+ ctrtri_("U", diag, &k, a, &i__1, info);
+ if (*info > 0) {
+ *info += k;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ ctrmm_("L", "U", "C", diag, &k, &k, &c_b1, a, &i__1, &a[k + 1]
+, &i__2);
+
+ } else {
+
+/* SRPA for UPPER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(k+1,0) , T2 -> a(k,0), S -> a(0,0) */
+/* T1 -> a(k+1), T2 -> a(k), S -> a(0) */
+
+ i__1 = *n + 1;
+ ctrtri_("L", diag, &k, &a[k + 1], &i__1, info);
+ if (*info > 0) {
+ return 0;
+ }
+ q__1.r = -1.f, q__1.i = -0.f;
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ ctrmm_("L", "L", "C", diag, &k, &k, &q__1, &a[k + 1], &i__1,
+ a, &i__2);
+ i__1 = *n + 1;
+ ctrtri_("U", diag, &k, &a[k], &i__1, info);
+ if (*info > 0) {
+ *info += k;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ ctrmm_("R", "U", "N", diag, &k, &k, &c_b1, &a[k], &i__1, a, &
+ i__2);
+ }
+ } else {
+
+/* N is even and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,1), T2 -> B(0,0), S -> B(0,k+1) */
+/* T1 -> a(0+k), T2 -> a(0+0), S -> a(0+k*(k+1)); lda=k */
+
+ ctrtri_("U", diag, &k, &a[k], &k, info);
+ if (*info > 0) {
+ return 0;
+ }
+ q__1.r = -1.f, q__1.i = -0.f;
+ ctrmm_("L", "U", "N", diag, &k, &k, &q__1, &a[k], &k, &a[k * (
+ k + 1)], &k);
+ ctrtri_("L", diag, &k, a, &k, info);
+ if (*info > 0) {
+ *info += k;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ ctrmm_("R", "L", "C", diag, &k, &k, &c_b1, a, &k, &a[k * (k +
+ 1)], &k);
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,k+1), T2 -> B(0,k), S -> B(0,0) */
+/* T1 -> a(0+k*(k+1)), T2 -> a(0+k*k), S -> a(0+0)); lda=k */
+
+ ctrtri_("U", diag, &k, &a[k * (k + 1)], &k, info);
+ if (*info > 0) {
+ return 0;
+ }
+ q__1.r = -1.f, q__1.i = -0.f;
+ ctrmm_("R", "U", "C", diag, &k, &k, &q__1, &a[k * (k + 1)], &
+ k, a, &k);
+ ctrtri_("L", diag, &k, &a[k * k], &k, info);
+ if (*info > 0) {
+ *info += k;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ ctrmm_("L", "L", "N", diag, &k, &k, &c_b1, &a[k * k], &k, a, &
+ k);
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CTFTRI */
+
+} /* ctftri_ */
diff --git a/contrib/libs/clapack/ctfttp.c b/contrib/libs/clapack/ctfttp.c
new file mode 100644
index 0000000000..c581937158
--- /dev/null
+++ b/contrib/libs/clapack/ctfttp.c
@@ -0,0 +1,576 @@
+/* ctfttp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ctfttp_(char *transr, char *uplo, integer *n, complex *
+ arf, complex *ap, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, k, n1, n2, ij, jp, js, nt, lda, ijp;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nisodd;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTFTTP copies a triangular matrix A from rectangular full packed */
+/* format (TF) to standard packed format (TP). */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': ARF is in Normal format; */
+/* = 'C': ARF is in Conjugate-transpose format; */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* ARF (input) COMPLEX array, dimension ( N*(N+1)/2 ), */
+/* On entry, the upper or lower triangular matrix A stored in */
+/* RFP format. For a further discussion see Notes below. */
+
+/* AP (output) COMPLEX array, dimension ( N*(N+1)/2 ), */
+/* On exit, the upper or lower triangular matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Notes: */
+/* ====== */
+
+/* We first consider Standard Packed Format when N is even. */
+/* We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* conjugate-transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* conjugate-transpose of the last three columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- -- */
+/* 03 04 05 33 43 53 */
+/* -- -- */
+/* 13 14 15 00 44 54 */
+/* -- */
+/* 23 24 25 10 11 55 */
+
+/* 33 34 35 20 21 22 */
+/* -- */
+/* 00 44 45 30 31 32 */
+/* -- -- */
+/* 01 11 55 40 41 42 */
+/* -- -- -- */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We next consider Standard Packed Format when N is odd. */
+/* We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* conjugate-transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* conjugate-transpose of the last two columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- */
+/* 02 03 04 00 33 43 */
+/* -- */
+/* 12 13 14 10 11 44 */
+
+/* 22 23 24 20 21 22 */
+/* -- */
+/* 00 33 34 30 31 32 */
+/* -- -- */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- */
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "C")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTFTTP", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (normaltransr) {
+ ap[0].r = arf[0].r, ap[0].i = arf[0].i;
+ } else {
+ r_cnjg(&q__1, arf);
+ ap[0].r = q__1.r, ap[0].i = q__1.i;
+ }
+ return 0;
+ }
+
+/* Size of array ARF(0:NT-1) */
+
+ nt = *n * (*n + 1) / 2;
+
+/* Set N1 and N2 depending on LOWER */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+/* If N is odd, set NISODD = .TRUE. */
+/* If N is even, set K = N/2 and NISODD = .FALSE. */
+
+/* set lda of ARF^C; ARF^C is (0:(N+1)/2-1,0:N-noe) */
+/* where noe = 0 if n is even, noe = 1 if n is odd */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ lda = *n + 1;
+ } else {
+ nisodd = TRUE_;
+ lda = *n;
+ }
+
+/* ARF^C has lda rows and n+1-noe cols */
+
+ if (! normaltransr) {
+ lda = (*n + 1) / 2;
+ }
+
+/* start execution: there are eight cases */
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL and N is odd ( a(0:n-1,0:n1-1) ) */
+/* T1 -> a(0,0), T2 -> a(0,1), S -> a(n1,0) */
+/* T1 -> a(0), T2 -> a(n), S -> a(n1); lda = n */
+
+ ijp = 0;
+ jp = 0;
+ i__1 = n2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ij = i__ + jp;
+ i__3 = ijp;
+ i__4 = ij;
+ ap[i__3].r = arf[i__4].r, ap[i__3].i = arf[i__4].i;
+ ++ijp;
+ }
+ jp += lda;
+ }
+ i__1 = n2 - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = n2;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ ij = i__ + j * lda;
+ i__3 = ijp;
+ r_cnjg(&q__1, &arf[ij]);
+ ap[i__3].r = q__1.r, ap[i__3].i = q__1.i;
+ ++ijp;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL and N is odd ( a(0:n-1,0:n2-1) */
+/* T1 -> a(n1+1,0), T2 -> a(n1,0), S -> a(0,0) */
+/* T1 -> a(n2), T2 -> a(n1), S -> a(0) */
+
+ ijp = 0;
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ ij = n2 + j;
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = ijp;
+ r_cnjg(&q__1, &arf[ij]);
+ ap[i__3].r = q__1.r, ap[i__3].i = q__1.i;
+ ++ijp;
+ ij += lda;
+ }
+ }
+ js = 0;
+ i__1 = *n - 1;
+ for (j = n1; j <= i__1; ++j) {
+ ij = js;
+ i__2 = js + j;
+ for (ij = js; ij <= i__2; ++ij) {
+ i__3 = ijp;
+ i__4 = ij;
+ ap[i__3].r = arf[i__4].r, ap[i__3].i = arf[i__4].i;
+ ++ijp;
+ }
+ js += lda;
+ }
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is odd */
+/* T1 -> A(0,0) , T2 -> A(1,0) , S -> A(0,n1) */
+/* T1 -> a(0+0) , T2 -> a(1+0) , S -> a(0+n1*n1); lda=n1 */
+
+ ijp = 0;
+ i__1 = n2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = *n * lda - 1;
+ i__3 = lda;
+ for (ij = i__ * (lda + 1); i__3 < 0 ? ij >= i__2 : ij <=
+ i__2; ij += i__3) {
+ i__4 = ijp;
+ r_cnjg(&q__1, &arf[ij]);
+ ap[i__4].r = q__1.r, ap[i__4].i = q__1.i;
+ ++ijp;
+ }
+ }
+ js = 1;
+ i__1 = n2 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + n2 - j - 1;
+ for (ij = js; ij <= i__3; ++ij) {
+ i__2 = ijp;
+ i__4 = ij;
+ ap[i__2].r = arf[i__4].r, ap[i__2].i = arf[i__4].i;
+ ++ijp;
+ }
+ js = js + lda + 1;
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is odd */
+/* T1 -> A(0,n1+1), T2 -> A(0,n1), S -> A(0,0) */
+/* T1 -> a(n2*n2), T2 -> a(n1*n2), S -> a(0); lda = n2 */
+
+ ijp = 0;
+ js = n2 * lda;
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + j;
+ for (ij = js; ij <= i__3; ++ij) {
+ i__2 = ijp;
+ i__4 = ij;
+ ap[i__2].r = arf[i__4].r, ap[i__2].i = arf[i__4].i;
+ ++ijp;
+ }
+ js += lda;
+ }
+ i__1 = n1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__3 = i__ + (n1 + i__) * lda;
+ i__2 = lda;
+ for (ij = i__; i__2 < 0 ? ij >= i__3 : ij <= i__3; ij +=
+ i__2) {
+ i__4 = ijp;
+ r_cnjg(&q__1, &arf[ij]);
+ ap[i__4].r = q__1.r, ap[i__4].i = q__1.i;
+ ++ijp;
+ }
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(1,0), T2 -> a(0,0), S -> a(k+1,0) */
+/* T1 -> a(1), T2 -> a(0), S -> a(k+1) */
+
+ ijp = 0;
+ jp = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ij = i__ + 1 + jp;
+ i__3 = ijp;
+ i__4 = ij;
+ ap[i__3].r = arf[i__4].r, ap[i__3].i = arf[i__4].i;
+ ++ijp;
+ }
+ jp += lda;
+ }
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = k - 1;
+ for (j = i__; j <= i__2; ++j) {
+ ij = i__ + j * lda;
+ i__3 = ijp;
+ r_cnjg(&q__1, &arf[ij]);
+ ap[i__3].r = q__1.r, ap[i__3].i = q__1.i;
+ ++ijp;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(k+1,0) , T2 -> a(k,0), S -> a(0,0) */
+/* T1 -> a(k+1), T2 -> a(k), S -> a(0) */
+
+ ijp = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ ij = k + 1 + j;
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = ijp;
+ r_cnjg(&q__1, &arf[ij]);
+ ap[i__3].r = q__1.r, ap[i__3].i = q__1.i;
+ ++ijp;
+ ij += lda;
+ }
+ }
+ js = 0;
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+ ij = js;
+ i__2 = js + j;
+ for (ij = js; ij <= i__2; ++ij) {
+ i__3 = ijp;
+ i__4 = ij;
+ ap[i__3].r = arf[i__4].r, ap[i__3].i = arf[i__4].i;
+ ++ijp;
+ }
+ js += lda;
+ }
+
+ }
+
+ } else {
+
+/* N is even and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,1), T2 -> B(0,0), S -> B(0,k+1) */
+/* T1 -> a(0+k), T2 -> a(0+0), S -> a(0+k*(k+1)); lda=k */
+
+ ijp = 0;
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = (*n + 1) * lda - 1;
+ i__3 = lda;
+ for (ij = i__ + (i__ + 1) * lda; i__3 < 0 ? ij >= i__2 :
+ ij <= i__2; ij += i__3) {
+ i__4 = ijp;
+ r_cnjg(&q__1, &arf[ij]);
+ ap[i__4].r = q__1.r, ap[i__4].i = q__1.i;
+ ++ijp;
+ }
+ }
+ js = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + k - j - 1;
+ for (ij = js; ij <= i__3; ++ij) {
+ i__2 = ijp;
+ i__4 = ij;
+ ap[i__2].r = arf[i__4].r, ap[i__2].i = arf[i__4].i;
+ ++ijp;
+ }
+ js = js + lda + 1;
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,k+1), T2 -> B(0,k), S -> B(0,0) */
+/* T1 -> a(0+k*(k+1)), T2 -> a(0+k*k), S -> a(0+0)); lda=k */
+
+ ijp = 0;
+ js = (k + 1) * lda;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + j;
+ for (ij = js; ij <= i__3; ++ij) {
+ i__2 = ijp;
+ i__4 = ij;
+ ap[i__2].r = arf[i__4].r, ap[i__2].i = arf[i__4].i;
+ ++ijp;
+ }
+ js += lda;
+ }
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__3 = i__ + (k + i__) * lda;
+ i__2 = lda;
+ for (ij = i__; i__2 < 0 ? ij >= i__3 : ij <= i__3; ij +=
+ i__2) {
+ i__4 = ijp;
+ r_cnjg(&q__1, &arf[ij]);
+ ap[i__4].r = q__1.r, ap[i__4].i = q__1.i;
+ ++ijp;
+ }
+ }
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of CTFTTP */
+
+} /* ctfttp_ */
diff --git a/contrib/libs/clapack/ctfttr.c b/contrib/libs/clapack/ctfttr.c
new file mode 100644
index 0000000000..a927b2d9b5
--- /dev/null
+++ b/contrib/libs/clapack/ctfttr.c
@@ -0,0 +1,580 @@
+/* ctfttr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ctfttr_(char *transr, char *uplo, integer *n, complex *
+ arf, complex *a, integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, k, l, n1, n2, ij, nt, nx2, np1x2;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nisodd;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTFTTR copies a triangular matrix A from rectangular full packed */
+/* format (TF) to standard full format (TR). */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': ARF is in Normal format; */
+/* = 'C': ARF is in Conjugate-transpose format; */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* ARF (input) COMPLEX array, dimension ( N*(N+1)/2 ), */
+/* On entry, the upper or lower triangular matrix A stored in */
+/* RFP format. For a further discussion see Notes below. */
+
+/* A (output) COMPLEX array, dimension ( LDA, N ) */
+/* On exit, the triangular matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of the array A contains */
+/* the upper triangular matrix, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of the array A contains */
+/* the lower triangular matrix, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Notes: */
+/* ====== */
+
+/* We first consider Standard Packed Format when N is even. */
+/* We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* conjugate-transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* conjugate-transpose of the last three columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- -- */
+/* 03 04 05 33 43 53 */
+/* -- -- */
+/* 13 14 15 00 44 54 */
+/* -- */
+/* 23 24 25 10 11 55 */
+
+/* 33 34 35 20 21 22 */
+/* -- */
+/* 00 44 45 30 31 32 */
+/* -- -- */
+/* 01 11 55 40 41 42 */
+/* -- -- -- */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We next consider Standard Packed Format when N is odd. */
+/* We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* conjugate-transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* conjugate-transpose of the last two columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- */
+/* 02 03 04 00 33 43 */
+/* -- */
+/* 12 13 14 10 11 44 */
+
+/* 22 23 24 20 21 22 */
+/* -- */
+/* 00 33 34 30 31 32 */
+/* -- -- */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- */
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda - 1 - 0 + 1;
+ a_offset = 0 + a_dim1 * 0;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "C")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTFTTR", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 1) {
+ if (*n == 1) {
+ if (normaltransr) {
+ a[0].r = arf[0].r, a[0].i = arf[0].i;
+ } else {
+ r_cnjg(&q__1, arf);
+ a[0].r = q__1.r, a[0].i = q__1.i;
+ }
+ }
+ return 0;
+ }
+
+/* Size of array ARF(1:2,0:nt-1) */
+
+ nt = *n * (*n + 1) / 2;
+
+/* set N1 and N2 depending on LOWER: for N even N1=N2=K */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+/* If N is odd, set NISODD = .TRUE., LDA=N+1 and A is (N+1)--by--K2. */
+/* If N is even, set K = N/2 and NISODD = .FALSE., LDA=N and A is */
+/* N--by--(N+1)/2. */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ if (! lower) {
+ np1x2 = *n + *n + 2;
+ }
+ } else {
+ nisodd = TRUE_;
+ if (! lower) {
+ nx2 = *n + *n;
+ }
+ }
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL and N is odd ( a(0:n-1,0:n1-1) ) */
+/* T1 -> a(0,0), T2 -> a(0,1), S -> a(n1,0) */
+/* T1 -> a(0), T2 -> a(n), S -> a(n1); lda=n */
+
+ ij = 0;
+ i__1 = n2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = n2 + j;
+ for (i__ = n1; i__ <= i__2; ++i__) {
+ i__3 = n2 + j + i__ * a_dim1;
+ r_cnjg(&q__1, &arf[ij]);
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = ij;
+ a[i__3].r = arf[i__4].r, a[i__3].i = arf[i__4].i;
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL and N is odd ( a(0:n-1,0:n2-1) */
+/* T1 -> a(n1+1,0), T2 -> a(n1,0), S -> a(0,0) */
+/* T1 -> a(n2), T2 -> a(n1), S -> a(0); lda=n */
+
+ ij = nt - *n;
+ i__1 = n1;
+ for (j = *n - 1; j >= i__1; --j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = ij;
+ a[i__3].r = arf[i__4].r, a[i__3].i = arf[i__4].i;
+ ++ij;
+ }
+ i__2 = n1 - 1;
+ for (l = j - n1; l <= i__2; ++l) {
+ i__3 = j - n1 + l * a_dim1;
+ r_cnjg(&q__1, &arf[ij]);
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ ++ij;
+ }
+ ij -= nx2;
+ }
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is odd */
+/* T1 -> A(0,0) , T2 -> A(1,0) , S -> A(0,n1) */
+/* T1 -> A(0+0) , T2 -> A(1+0) , S -> A(0+n1*n1); lda=n1 */
+
+ ij = 0;
+ i__1 = n2 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * a_dim1;
+ r_cnjg(&q__1, &arf[ij]);
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = n1 + j; i__ <= i__2; ++i__) {
+ i__3 = i__ + (n1 + j) * a_dim1;
+ i__4 = ij;
+ a[i__3].r = arf[i__4].r, a[i__3].i = arf[i__4].i;
+ ++ij;
+ }
+ }
+ i__1 = *n - 1;
+ for (j = n2; j <= i__1; ++j) {
+ i__2 = n1 - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * a_dim1;
+ r_cnjg(&q__1, &arf[ij]);
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is odd */
+/* T1 -> A(0,n1+1), T2 -> A(0,n1), S -> A(0,0) */
+/* T1 -> A(n2*n2), T2 -> A(n1*n2), S -> A(0); lda = n2 */
+
+ ij = 0;
+ i__1 = n1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = n1; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * a_dim1;
+ r_cnjg(&q__1, &arf[ij]);
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ ++ij;
+ }
+ }
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = ij;
+ a[i__3].r = arf[i__4].r, a[i__3].i = arf[i__4].i;
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (l = n2 + j; l <= i__2; ++l) {
+ i__3 = n2 + j + l * a_dim1;
+ r_cnjg(&q__1, &arf[ij]);
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ ++ij;
+ }
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(1,0), T2 -> a(0,0), S -> a(k+1,0) */
+/* T1 -> a(1), T2 -> a(0), S -> a(k+1); lda=n+1 */
+
+ ij = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k + j;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ i__3 = k + j + i__ * a_dim1;
+ r_cnjg(&q__1, &arf[ij]);
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = ij;
+ a[i__3].r = arf[i__4].r, a[i__3].i = arf[i__4].i;
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(k+1,0) , T2 -> a(k,0), S -> a(0,0) */
+/* T1 -> a(k+1), T2 -> a(k), S -> a(0); lda=n+1 */
+
+ ij = nt - *n - 1;
+ i__1 = k;
+ for (j = *n - 1; j >= i__1; --j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = ij;
+ a[i__3].r = arf[i__4].r, a[i__3].i = arf[i__4].i;
+ ++ij;
+ }
+ i__2 = k - 1;
+ for (l = j - k; l <= i__2; ++l) {
+ i__3 = j - k + l * a_dim1;
+ r_cnjg(&q__1, &arf[ij]);
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ ++ij;
+ }
+ ij -= np1x2;
+ }
+
+ }
+
+ } else {
+
+/* N is even and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is even (see paper, A=B) */
+/* T1 -> A(0,1) , T2 -> A(0,0) , S -> A(0,k+1) : */
+/* T1 -> A(0+k) , T2 -> A(0+0) , S -> A(0+k*(k+1)); lda=k */
+
+ ij = 0;
+ j = k;
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = ij;
+ a[i__2].r = arf[i__3].r, a[i__2].i = arf[i__3].i;
+ ++ij;
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * a_dim1;
+ r_cnjg(&q__1, &arf[ij]);
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = k + 1 + j; i__ <= i__2; ++i__) {
+ i__3 = i__ + (k + 1 + j) * a_dim1;
+ i__4 = ij;
+ a[i__3].r = arf[i__4].r, a[i__3].i = arf[i__4].i;
+ ++ij;
+ }
+ }
+ i__1 = *n - 1;
+ for (j = k - 1; j <= i__1; ++j) {
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * a_dim1;
+ r_cnjg(&q__1, &arf[ij]);
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is even (see paper, A=B) */
+/* T1 -> A(0,k+1) , T2 -> A(0,k) , S -> A(0,0) */
+/* T1 -> A(0+k*(k+1)) , T2 -> A(0+k*k) , S -> A(0+0)); lda=k */
+
+ ij = 0;
+ i__1 = k;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * a_dim1;
+ r_cnjg(&q__1, &arf[ij]);
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ ++ij;
+ }
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = ij;
+ a[i__3].r = arf[i__4].r, a[i__3].i = arf[i__4].i;
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (l = k + 1 + j; l <= i__2; ++l) {
+ i__3 = k + 1 + j + l * a_dim1;
+ r_cnjg(&q__1, &arf[ij]);
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ ++ij;
+ }
+ }
+
+/* Note that here J = K-1 */
+
+ i__1 = j;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = ij;
+ a[i__2].r = arf[i__3].r, a[i__2].i = arf[i__3].i;
+ ++ij;
+ }
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of CTFTTR */
+
+} /* ctfttr_ */
diff --git a/contrib/libs/clapack/ctgevc.c b/contrib/libs/clapack/ctgevc.c
new file mode 100644
index 0000000000..7aeae85ab8
--- /dev/null
+++ b/contrib/libs/clapack/ctgevc.c
@@ -0,0 +1,971 @@
+/* ctgevc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int ctgevc_(char *side, char *howmny, logical *select,
+ integer *n, complex *s, integer *lds, complex *p, integer *ldp,
+ complex *vl, integer *ldvl, complex *vr, integer *ldvr, integer *mm,
+ integer *m, complex *work, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer p_dim1, p_offset, s_dim1, s_offset, vl_dim1, vl_offset, vr_dim1,
+ vr_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4, r__5, r__6;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ complex d__;
+ integer i__, j;
+ complex ca, cb;
+ integer je, im, jr;
+ real big;
+ logical lsa, lsb;
+ real ulp;
+ complex sum;
+ integer ibeg, ieig, iend;
+ real dmin__;
+ integer isrc;
+ real temp;
+ complex suma, sumb;
+ real xmax, scale;
+ logical ilall;
+ integer iside;
+ real sbeta;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+, complex *, integer *, complex *, integer *, complex *, complex *
+, integer *);
+ real small;
+ logical compl;
+ real anorm, bnorm;
+ logical compr, ilbbad;
+ real acoefa, bcoefa, acoeff;
+ complex bcoeff;
+ logical ilback;
+ extern /* Subroutine */ int slabad_(real *, real *);
+ real ascale, bscale;
+ extern /* Complex */ VOID cladiv_(complex *, complex *, complex *);
+ extern doublereal slamch_(char *);
+ complex salpha;
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ logical ilcomp;
+ integer ihwmny;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+
+/* Purpose */
+/* ======= */
+
+/* CTGEVC computes some or all of the right and/or left eigenvectors of */
+/* a pair of complex matrices (S,P), where S and P are upper triangular. */
+/* Matrix pairs of this type are produced by the generalized Schur */
+/* factorization of a complex matrix pair (A,B): */
+
+/* A = Q*S*Z**H, B = Q*P*Z**H */
+
+/* as computed by CGGHRD + CHGEQZ. */
+
+/* The right eigenvector x and the left eigenvector y of (S,P) */
+/* corresponding to an eigenvalue w are defined by: */
+
+/* S*x = w*P*x, (y**H)*S = w*(y**H)*P, */
+
+/* where y**H denotes the conjugate tranpose of y. */
+/* The eigenvalues are not input to this routine, but are computed */
+/* directly from the diagonal elements of S and P. */
+
+/* This routine returns the matrices X and/or Y of right and left */
+/* eigenvectors of (S,P), or the products Z*X and/or Q*Y, */
+/* where Z and Q are input matrices. */
+/* If Q and Z are the unitary factors from the generalized Schur */
+/* factorization of a matrix pair (A,B), then Z*X and Q*Y */
+/* are the matrices of right and left eigenvectors of (A,B). */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'R': compute right eigenvectors only; */
+/* = 'L': compute left eigenvectors only; */
+/* = 'B': compute both right and left eigenvectors. */
+
+/* HOWMNY (input) CHARACTER*1 */
+/* = 'A': compute all right and/or left eigenvectors; */
+/* = 'B': compute all right and/or left eigenvectors, */
+/* backtransformed by the matrices in VR and/or VL; */
+/* = 'S': compute selected right and/or left eigenvectors, */
+/* specified by the logical array SELECT. */
+
+/* SELECT (input) LOGICAL array, dimension (N) */
+/* If HOWMNY='S', SELECT specifies the eigenvectors to be */
+/* computed. The eigenvector corresponding to the j-th */
+/* eigenvalue is computed if SELECT(j) = .TRUE.. */
+/* Not referenced if HOWMNY = 'A' or 'B'. */
+
+/* N (input) INTEGER */
+/* The order of the matrices S and P. N >= 0. */
+
+/* S (input) COMPLEX array, dimension (LDS,N) */
+/* The upper triangular matrix S from a generalized Schur */
+/* factorization, as computed by CHGEQZ. */
+
+/* LDS (input) INTEGER */
+/* The leading dimension of array S. LDS >= max(1,N). */
+
+/* P (input) COMPLEX array, dimension (LDP,N) */
+/* The upper triangular matrix P from a generalized Schur */
+/* factorization, as computed by CHGEQZ. P must have real */
+/* diagonal elements. */
+
+/* LDP (input) INTEGER */
+/* The leading dimension of array P. LDP >= max(1,N). */
+
+/* VL (input/output) COMPLEX array, dimension (LDVL,MM) */
+/* On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must */
+/* contain an N-by-N matrix Q (usually the unitary matrix Q */
+/* of left Schur vectors returned by CHGEQZ). */
+/* On exit, if SIDE = 'L' or 'B', VL contains: */
+/* if HOWMNY = 'A', the matrix Y of left eigenvectors of (S,P); */
+/* if HOWMNY = 'B', the matrix Q*Y; */
+/* if HOWMNY = 'S', the left eigenvectors of (S,P) specified by */
+/* SELECT, stored consecutively in the columns of */
+/* VL, in the same order as their eigenvalues. */
+/* Not referenced if SIDE = 'R'. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of array VL. LDVL >= 1, and if */
+/* SIDE = 'L' or 'l' or 'B' or 'b', LDVL >= N. */
+
+/* VR (input/output) COMPLEX array, dimension (LDVR,MM) */
+/* On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must */
+/* contain an N-by-N matrix Q (usually the unitary matrix Z */
+/* of right Schur vectors returned by CHGEQZ). */
+/* On exit, if SIDE = 'R' or 'B', VR contains: */
+/* if HOWMNY = 'A', the matrix X of right eigenvectors of (S,P); */
+/* if HOWMNY = 'B', the matrix Z*X; */
+/* if HOWMNY = 'S', the right eigenvectors of (S,P) specified by */
+/* SELECT, stored consecutively in the columns of */
+/* VR, in the same order as their eigenvalues. */
+/* Not referenced if SIDE = 'L'. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. LDVR >= 1, and if */
+/* SIDE = 'R' or 'B', LDVR >= N. */
+
+/* MM (input) INTEGER */
+/* The number of columns in the arrays VL and/or VR. MM >= M. */
+
+/* M (output) INTEGER */
+/* The number of columns in the arrays VL and/or VR actually */
+/* used to store the eigenvectors. If HOWMNY = 'A' or 'B', M */
+/* is set to N. Each selected eigenvector occupies one column. */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and Test the input parameters */
+
+ /* Parameter adjustments */
+ --select;
+ s_dim1 = *lds;
+ s_offset = 1 + s_dim1;
+ s -= s_offset;
+ p_dim1 = *ldp;
+ p_offset = 1 + p_dim1;
+ p -= p_offset;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ if (lsame_(howmny, "A")) {
+ ihwmny = 1;
+ ilall = TRUE_;
+ ilback = FALSE_;
+ } else if (lsame_(howmny, "S")) {
+ ihwmny = 2;
+ ilall = FALSE_;
+ ilback = FALSE_;
+ } else if (lsame_(howmny, "B")) {
+ ihwmny = 3;
+ ilall = TRUE_;
+ ilback = TRUE_;
+ } else {
+ ihwmny = -1;
+ }
+
+ if (lsame_(side, "R")) {
+ iside = 1;
+ compl = FALSE_;
+ compr = TRUE_;
+ } else if (lsame_(side, "L")) {
+ iside = 2;
+ compl = TRUE_;
+ compr = FALSE_;
+ } else if (lsame_(side, "B")) {
+ iside = 3;
+ compl = TRUE_;
+ compr = TRUE_;
+ } else {
+ iside = -1;
+ }
+
+ *info = 0;
+ if (iside < 0) {
+ *info = -1;
+ } else if (ihwmny < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lds < max(1,*n)) {
+ *info = -6;
+ } else if (*ldp < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTGEVC", &i__1);
+ return 0;
+ }
+
+/* Count the number of eigenvectors */
+
+ if (! ilall) {
+ im = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (select[j]) {
+ ++im;
+ }
+/* L10: */
+ }
+ } else {
+ im = *n;
+ }
+
+/* Check diagonal of B */
+
+ ilbbad = FALSE_;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (r_imag(&p[j + j * p_dim1]) != 0.f) {
+ ilbbad = TRUE_;
+ }
+/* L20: */
+ }
+
+ if (ilbbad) {
+ *info = -7;
+ } else if (compl && *ldvl < *n || *ldvl < 1) {
+ *info = -10;
+ } else if (compr && *ldvr < *n || *ldvr < 1) {
+ *info = -12;
+ } else if (*mm < im) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTGEVC", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = im;
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Machine Constants */
+
+ safmin = slamch_("Safe minimum");
+ big = 1.f / safmin;
+ slabad_(&safmin, &big);
+ ulp = slamch_("Epsilon") * slamch_("Base");
+ small = safmin * *n / ulp;
+ big = 1.f / small;
+ bignum = 1.f / (safmin * *n);
+
+/* Compute the 1-norm of each column of the strictly upper triangular */
+/* part of A and B to check for possible overflow in the triangular */
+/* solver. */
+
+ i__1 = s_dim1 + 1;
+ anorm = (r__1 = s[i__1].r, dabs(r__1)) + (r__2 = r_imag(&s[s_dim1 + 1]),
+ dabs(r__2));
+ i__1 = p_dim1 + 1;
+ bnorm = (r__1 = p[i__1].r, dabs(r__1)) + (r__2 = r_imag(&p[p_dim1 + 1]),
+ dabs(r__2));
+ rwork[1] = 0.f;
+ rwork[*n + 1] = 0.f;
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ rwork[j] = 0.f;
+ rwork[*n + j] = 0.f;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * s_dim1;
+ rwork[j] += (r__1 = s[i__3].r, dabs(r__1)) + (r__2 = r_imag(&s[
+ i__ + j * s_dim1]), dabs(r__2));
+ i__3 = i__ + j * p_dim1;
+ rwork[*n + j] += (r__1 = p[i__3].r, dabs(r__1)) + (r__2 = r_imag(&
+ p[i__ + j * p_dim1]), dabs(r__2));
+/* L30: */
+ }
+/* Computing MAX */
+ i__2 = j + j * s_dim1;
+ r__3 = anorm, r__4 = rwork[j] + ((r__1 = s[i__2].r, dabs(r__1)) + (
+ r__2 = r_imag(&s[j + j * s_dim1]), dabs(r__2)));
+ anorm = dmax(r__3,r__4);
+/* Computing MAX */
+ i__2 = j + j * p_dim1;
+ r__3 = bnorm, r__4 = rwork[*n + j] + ((r__1 = p[i__2].r, dabs(r__1))
+ + (r__2 = r_imag(&p[j + j * p_dim1]), dabs(r__2)));
+ bnorm = dmax(r__3,r__4);
+/* L40: */
+ }
+
+ ascale = 1.f / dmax(anorm,safmin);
+ bscale = 1.f / dmax(bnorm,safmin);
+
+/* Left eigenvectors */
+
+ if (compl) {
+ ieig = 0;
+
+/* Main loop over eigenvalues */
+
+ i__1 = *n;
+ for (je = 1; je <= i__1; ++je) {
+ if (ilall) {
+ ilcomp = TRUE_;
+ } else {
+ ilcomp = select[je];
+ }
+ if (ilcomp) {
+ ++ieig;
+
+ i__2 = je + je * s_dim1;
+ i__3 = je + je * p_dim1;
+ if ((r__2 = s[i__2].r, dabs(r__2)) + (r__3 = r_imag(&s[je +
+ je * s_dim1]), dabs(r__3)) <= safmin && (r__1 = p[
+ i__3].r, dabs(r__1)) <= safmin) {
+
+/* Singular matrix pencil -- return unit eigenvector */
+
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ i__3 = jr + ieig * vl_dim1;
+ vl[i__3].r = 0.f, vl[i__3].i = 0.f;
+/* L50: */
+ }
+ i__2 = ieig + ieig * vl_dim1;
+ vl[i__2].r = 1.f, vl[i__2].i = 0.f;
+ goto L140;
+ }
+
+/* Non-singular eigenvalue: */
+/* Compute coefficients a and b in */
+/* H */
+/* y ( a A - b B ) = 0 */
+
+/* Computing MAX */
+ i__2 = je + je * s_dim1;
+ i__3 = je + je * p_dim1;
+ r__4 = ((r__2 = s[i__2].r, dabs(r__2)) + (r__3 = r_imag(&s[je
+ + je * s_dim1]), dabs(r__3))) * ascale, r__5 = (r__1 =
+ p[i__3].r, dabs(r__1)) * bscale, r__4 = max(r__4,
+ r__5);
+ temp = 1.f / dmax(r__4,safmin);
+ i__2 = je + je * s_dim1;
+ q__2.r = temp * s[i__2].r, q__2.i = temp * s[i__2].i;
+ q__1.r = ascale * q__2.r, q__1.i = ascale * q__2.i;
+ salpha.r = q__1.r, salpha.i = q__1.i;
+ i__2 = je + je * p_dim1;
+ sbeta = temp * p[i__2].r * bscale;
+ acoeff = sbeta * ascale;
+ q__1.r = bscale * salpha.r, q__1.i = bscale * salpha.i;
+ bcoeff.r = q__1.r, bcoeff.i = q__1.i;
+
+/* Scale to avoid underflow */
+
+ lsa = dabs(sbeta) >= safmin && dabs(acoeff) < small;
+ lsb = (r__1 = salpha.r, dabs(r__1)) + (r__2 = r_imag(&salpha),
+ dabs(r__2)) >= safmin && (r__3 = bcoeff.r, dabs(r__3)
+ ) + (r__4 = r_imag(&bcoeff), dabs(r__4)) < small;
+
+ scale = 1.f;
+ if (lsa) {
+ scale = small / dabs(sbeta) * dmin(anorm,big);
+ }
+ if (lsb) {
+/* Computing MAX */
+ r__3 = scale, r__4 = small / ((r__1 = salpha.r, dabs(r__1)
+ ) + (r__2 = r_imag(&salpha), dabs(r__2))) * dmin(
+ bnorm,big);
+ scale = dmax(r__3,r__4);
+ }
+ if (lsa || lsb) {
+/* Computing MIN */
+/* Computing MAX */
+ r__5 = 1.f, r__6 = dabs(acoeff), r__5 = max(r__5,r__6),
+ r__6 = (r__1 = bcoeff.r, dabs(r__1)) + (r__2 =
+ r_imag(&bcoeff), dabs(r__2));
+ r__3 = scale, r__4 = 1.f / (safmin * dmax(r__5,r__6));
+ scale = dmin(r__3,r__4);
+ if (lsa) {
+ acoeff = ascale * (scale * sbeta);
+ } else {
+ acoeff = scale * acoeff;
+ }
+ if (lsb) {
+ q__2.r = scale * salpha.r, q__2.i = scale * salpha.i;
+ q__1.r = bscale * q__2.r, q__1.i = bscale * q__2.i;
+ bcoeff.r = q__1.r, bcoeff.i = q__1.i;
+ } else {
+ q__1.r = scale * bcoeff.r, q__1.i = scale * bcoeff.i;
+ bcoeff.r = q__1.r, bcoeff.i = q__1.i;
+ }
+ }
+
+ acoefa = dabs(acoeff);
+ bcoefa = (r__1 = bcoeff.r, dabs(r__1)) + (r__2 = r_imag(&
+ bcoeff), dabs(r__2));
+ xmax = 1.f;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ i__3 = jr;
+ work[i__3].r = 0.f, work[i__3].i = 0.f;
+/* L60: */
+ }
+ i__2 = je;
+ work[i__2].r = 1.f, work[i__2].i = 0.f;
+/* Computing MAX */
+ r__1 = ulp * acoefa * anorm, r__2 = ulp * bcoefa * bnorm,
+ r__1 = max(r__1,r__2);
+ dmin__ = dmax(r__1,safmin);
+
+/* H */
+/* Triangular solve of (a A - b B) y = 0 */
+
+/* H */
+/* (rowwise in (a A - b B) , or columnwise in a A - b B) */
+
+ i__2 = *n;
+ for (j = je + 1; j <= i__2; ++j) {
+
+/* Compute */
+/* j-1 */
+/* SUM = sum conjg( a*S(k,j) - b*P(k,j) )*x(k) */
+/* k=je */
+/* (Scale if necessary) */
+
+ temp = 1.f / xmax;
+ if (acoefa * rwork[j] + bcoefa * rwork[*n + j] > bignum *
+ temp) {
+ i__3 = j - 1;
+ for (jr = je; jr <= i__3; ++jr) {
+ i__4 = jr;
+ i__5 = jr;
+ q__1.r = temp * work[i__5].r, q__1.i = temp *
+ work[i__5].i;
+ work[i__4].r = q__1.r, work[i__4].i = q__1.i;
+/* L70: */
+ }
+ xmax = 1.f;
+ }
+ suma.r = 0.f, suma.i = 0.f;
+ sumb.r = 0.f, sumb.i = 0.f;
+
+ i__3 = j - 1;
+ for (jr = je; jr <= i__3; ++jr) {
+ r_cnjg(&q__3, &s[jr + j * s_dim1]);
+ i__4 = jr;
+ q__2.r = q__3.r * work[i__4].r - q__3.i * work[i__4]
+ .i, q__2.i = q__3.r * work[i__4].i + q__3.i *
+ work[i__4].r;
+ q__1.r = suma.r + q__2.r, q__1.i = suma.i + q__2.i;
+ suma.r = q__1.r, suma.i = q__1.i;
+ r_cnjg(&q__3, &p[jr + j * p_dim1]);
+ i__4 = jr;
+ q__2.r = q__3.r * work[i__4].r - q__3.i * work[i__4]
+ .i, q__2.i = q__3.r * work[i__4].i + q__3.i *
+ work[i__4].r;
+ q__1.r = sumb.r + q__2.r, q__1.i = sumb.i + q__2.i;
+ sumb.r = q__1.r, sumb.i = q__1.i;
+/* L80: */
+ }
+ q__2.r = acoeff * suma.r, q__2.i = acoeff * suma.i;
+ r_cnjg(&q__4, &bcoeff);
+ q__3.r = q__4.r * sumb.r - q__4.i * sumb.i, q__3.i =
+ q__4.r * sumb.i + q__4.i * sumb.r;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+
+/* Form x(j) = - SUM / conjg( a*S(j,j) - b*P(j,j) ) */
+
+/* with scaling and perturbation of the denominator */
+
+ i__3 = j + j * s_dim1;
+ q__3.r = acoeff * s[i__3].r, q__3.i = acoeff * s[i__3].i;
+ i__4 = j + j * p_dim1;
+ q__4.r = bcoeff.r * p[i__4].r - bcoeff.i * p[i__4].i,
+ q__4.i = bcoeff.r * p[i__4].i + bcoeff.i * p[i__4]
+ .r;
+ q__2.r = q__3.r - q__4.r, q__2.i = q__3.i - q__4.i;
+ r_cnjg(&q__1, &q__2);
+ d__.r = q__1.r, d__.i = q__1.i;
+ if ((r__1 = d__.r, dabs(r__1)) + (r__2 = r_imag(&d__),
+ dabs(r__2)) <= dmin__) {
+ q__1.r = dmin__, q__1.i = 0.f;
+ d__.r = q__1.r, d__.i = q__1.i;
+ }
+
+ if ((r__1 = d__.r, dabs(r__1)) + (r__2 = r_imag(&d__),
+ dabs(r__2)) < 1.f) {
+ if ((r__1 = sum.r, dabs(r__1)) + (r__2 = r_imag(&sum),
+ dabs(r__2)) >= bignum * ((r__3 = d__.r, dabs(
+ r__3)) + (r__4 = r_imag(&d__), dabs(r__4)))) {
+ temp = 1.f / ((r__1 = sum.r, dabs(r__1)) + (r__2 =
+ r_imag(&sum), dabs(r__2)));
+ i__3 = j - 1;
+ for (jr = je; jr <= i__3; ++jr) {
+ i__4 = jr;
+ i__5 = jr;
+ q__1.r = temp * work[i__5].r, q__1.i = temp *
+ work[i__5].i;
+ work[i__4].r = q__1.r, work[i__4].i = q__1.i;
+/* L90: */
+ }
+ xmax = temp * xmax;
+ q__1.r = temp * sum.r, q__1.i = temp * sum.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ }
+ }
+ i__3 = j;
+ q__2.r = -sum.r, q__2.i = -sum.i;
+ cladiv_(&q__1, &q__2, &d__);
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+/* Computing MAX */
+ i__3 = j;
+ r__3 = xmax, r__4 = (r__1 = work[i__3].r, dabs(r__1)) + (
+ r__2 = r_imag(&work[j]), dabs(r__2));
+ xmax = dmax(r__3,r__4);
+/* L100: */
+ }
+
+/* Back transform eigenvector if HOWMNY='B'. */
+
+ if (ilback) {
+ i__2 = *n + 1 - je;
+ cgemv_("N", n, &i__2, &c_b2, &vl[je * vl_dim1 + 1], ldvl,
+ &work[je], &c__1, &c_b1, &work[*n + 1], &c__1);
+ isrc = 2;
+ ibeg = 1;
+ } else {
+ isrc = 1;
+ ibeg = je;
+ }
+
+/* Copy and scale eigenvector into column of VL */
+
+ xmax = 0.f;
+ i__2 = *n;
+ for (jr = ibeg; jr <= i__2; ++jr) {
+/* Computing MAX */
+ i__3 = (isrc - 1) * *n + jr;
+ r__3 = xmax, r__4 = (r__1 = work[i__3].r, dabs(r__1)) + (
+ r__2 = r_imag(&work[(isrc - 1) * *n + jr]), dabs(
+ r__2));
+ xmax = dmax(r__3,r__4);
+/* L110: */
+ }
+
+ if (xmax > safmin) {
+ temp = 1.f / xmax;
+ i__2 = *n;
+ for (jr = ibeg; jr <= i__2; ++jr) {
+ i__3 = jr + ieig * vl_dim1;
+ i__4 = (isrc - 1) * *n + jr;
+ q__1.r = temp * work[i__4].r, q__1.i = temp * work[
+ i__4].i;
+ vl[i__3].r = q__1.r, vl[i__3].i = q__1.i;
+/* L120: */
+ }
+ } else {
+ ibeg = *n + 1;
+ }
+
+ i__2 = ibeg - 1;
+ for (jr = 1; jr <= i__2; ++jr) {
+ i__3 = jr + ieig * vl_dim1;
+ vl[i__3].r = 0.f, vl[i__3].i = 0.f;
+/* L130: */
+ }
+
+ }
+L140:
+ ;
+ }
+ }
+
+/* Right eigenvectors */
+
+ if (compr) {
+ ieig = im + 1;
+
+/* Main loop over eigenvalues */
+
+ for (je = *n; je >= 1; --je) {
+ if (ilall) {
+ ilcomp = TRUE_;
+ } else {
+ ilcomp = select[je];
+ }
+ if (ilcomp) {
+ --ieig;
+
+ i__1 = je + je * s_dim1;
+ i__2 = je + je * p_dim1;
+ if ((r__2 = s[i__1].r, dabs(r__2)) + (r__3 = r_imag(&s[je +
+ je * s_dim1]), dabs(r__3)) <= safmin && (r__1 = p[
+ i__2].r, dabs(r__1)) <= safmin) {
+
+/* Singular matrix pencil -- return unit eigenvector */
+
+ i__1 = *n;
+ for (jr = 1; jr <= i__1; ++jr) {
+ i__2 = jr + ieig * vr_dim1;
+ vr[i__2].r = 0.f, vr[i__2].i = 0.f;
+/* L150: */
+ }
+ i__1 = ieig + ieig * vr_dim1;
+ vr[i__1].r = 1.f, vr[i__1].i = 0.f;
+ goto L250;
+ }
+
+/* Non-singular eigenvalue: */
+/* Compute coefficients a and b in */
+
+/* ( a A - b B ) x = 0 */
+
+/* Computing MAX */
+ i__1 = je + je * s_dim1;
+ i__2 = je + je * p_dim1;
+ r__4 = ((r__2 = s[i__1].r, dabs(r__2)) + (r__3 = r_imag(&s[je
+ + je * s_dim1]), dabs(r__3))) * ascale, r__5 = (r__1 =
+ p[i__2].r, dabs(r__1)) * bscale, r__4 = max(r__4,
+ r__5);
+ temp = 1.f / dmax(r__4,safmin);
+ i__1 = je + je * s_dim1;
+ q__2.r = temp * s[i__1].r, q__2.i = temp * s[i__1].i;
+ q__1.r = ascale * q__2.r, q__1.i = ascale * q__2.i;
+ salpha.r = q__1.r, salpha.i = q__1.i;
+ i__1 = je + je * p_dim1;
+ sbeta = temp * p[i__1].r * bscale;
+ acoeff = sbeta * ascale;
+ q__1.r = bscale * salpha.r, q__1.i = bscale * salpha.i;
+ bcoeff.r = q__1.r, bcoeff.i = q__1.i;
+
+/* Scale to avoid underflow */
+
+ lsa = dabs(sbeta) >= safmin && dabs(acoeff) < small;
+ lsb = (r__1 = salpha.r, dabs(r__1)) + (r__2 = r_imag(&salpha),
+ dabs(r__2)) >= safmin && (r__3 = bcoeff.r, dabs(r__3)
+ ) + (r__4 = r_imag(&bcoeff), dabs(r__4)) < small;
+
+ scale = 1.f;
+ if (lsa) {
+ scale = small / dabs(sbeta) * dmin(anorm,big);
+ }
+ if (lsb) {
+/* Computing MAX */
+ r__3 = scale, r__4 = small / ((r__1 = salpha.r, dabs(r__1)
+ ) + (r__2 = r_imag(&salpha), dabs(r__2))) * dmin(
+ bnorm,big);
+ scale = dmax(r__3,r__4);
+ }
+ if (lsa || lsb) {
+/* Computing MIN */
+/* Computing MAX */
+ r__5 = 1.f, r__6 = dabs(acoeff), r__5 = max(r__5,r__6),
+ r__6 = (r__1 = bcoeff.r, dabs(r__1)) + (r__2 =
+ r_imag(&bcoeff), dabs(r__2));
+ r__3 = scale, r__4 = 1.f / (safmin * dmax(r__5,r__6));
+ scale = dmin(r__3,r__4);
+ if (lsa) {
+ acoeff = ascale * (scale * sbeta);
+ } else {
+ acoeff = scale * acoeff;
+ }
+ if (lsb) {
+ q__2.r = scale * salpha.r, q__2.i = scale * salpha.i;
+ q__1.r = bscale * q__2.r, q__1.i = bscale * q__2.i;
+ bcoeff.r = q__1.r, bcoeff.i = q__1.i;
+ } else {
+ q__1.r = scale * bcoeff.r, q__1.i = scale * bcoeff.i;
+ bcoeff.r = q__1.r, bcoeff.i = q__1.i;
+ }
+ }
+
+ acoefa = dabs(acoeff);
+ bcoefa = (r__1 = bcoeff.r, dabs(r__1)) + (r__2 = r_imag(&
+ bcoeff), dabs(r__2));
+ xmax = 1.f;
+ i__1 = *n;
+ for (jr = 1; jr <= i__1; ++jr) {
+ i__2 = jr;
+ work[i__2].r = 0.f, work[i__2].i = 0.f;
+/* L160: */
+ }
+ i__1 = je;
+ work[i__1].r = 1.f, work[i__1].i = 0.f;
+/* Computing MAX */
+ r__1 = ulp * acoefa * anorm, r__2 = ulp * bcoefa * bnorm,
+ r__1 = max(r__1,r__2);
+ dmin__ = dmax(r__1,safmin);
+
+/* Triangular solve of (a A - b B) x = 0 (columnwise) */
+
+/* WORK(1:j-1) contains sums w, */
+/* WORK(j+1:JE) contains x */
+
+ i__1 = je - 1;
+ for (jr = 1; jr <= i__1; ++jr) {
+ i__2 = jr;
+ i__3 = jr + je * s_dim1;
+ q__2.r = acoeff * s[i__3].r, q__2.i = acoeff * s[i__3].i;
+ i__4 = jr + je * p_dim1;
+ q__3.r = bcoeff.r * p[i__4].r - bcoeff.i * p[i__4].i,
+ q__3.i = bcoeff.r * p[i__4].i + bcoeff.i * p[i__4]
+ .r;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
+ work[i__2].r = q__1.r, work[i__2].i = q__1.i;
+/* L170: */
+ }
+ i__1 = je;
+ work[i__1].r = 1.f, work[i__1].i = 0.f;
+
+ for (j = je - 1; j >= 1; --j) {
+
+/* Form x(j) := - w(j) / d */
+/* with scaling and perturbation of the denominator */
+
+ i__1 = j + j * s_dim1;
+ q__2.r = acoeff * s[i__1].r, q__2.i = acoeff * s[i__1].i;
+ i__2 = j + j * p_dim1;
+ q__3.r = bcoeff.r * p[i__2].r - bcoeff.i * p[i__2].i,
+ q__3.i = bcoeff.r * p[i__2].i + bcoeff.i * p[i__2]
+ .r;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
+ d__.r = q__1.r, d__.i = q__1.i;
+ if ((r__1 = d__.r, dabs(r__1)) + (r__2 = r_imag(&d__),
+ dabs(r__2)) <= dmin__) {
+ q__1.r = dmin__, q__1.i = 0.f;
+ d__.r = q__1.r, d__.i = q__1.i;
+ }
+
+ if ((r__1 = d__.r, dabs(r__1)) + (r__2 = r_imag(&d__),
+ dabs(r__2)) < 1.f) {
+ i__1 = j;
+ if ((r__1 = work[i__1].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[j]), dabs(r__2)) >= bignum * ((
+ r__3 = d__.r, dabs(r__3)) + (r__4 = r_imag(&
+ d__), dabs(r__4)))) {
+ i__1 = j;
+ temp = 1.f / ((r__1 = work[i__1].r, dabs(r__1)) +
+ (r__2 = r_imag(&work[j]), dabs(r__2)));
+ i__1 = je;
+ for (jr = 1; jr <= i__1; ++jr) {
+ i__2 = jr;
+ i__3 = jr;
+ q__1.r = temp * work[i__3].r, q__1.i = temp *
+ work[i__3].i;
+ work[i__2].r = q__1.r, work[i__2].i = q__1.i;
+/* L180: */
+ }
+ }
+ }
+
+ i__1 = j;
+ i__2 = j;
+ q__2.r = -work[i__2].r, q__2.i = -work[i__2].i;
+ cladiv_(&q__1, &q__2, &d__);
+ work[i__1].r = q__1.r, work[i__1].i = q__1.i;
+
+ if (j > 1) {
+
+/* w = w + x(j)*(a S(*,j) - b P(*,j) ) with scaling */
+
+ i__1 = j;
+ if ((r__1 = work[i__1].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[j]), dabs(r__2)) > 1.f) {
+ i__1 = j;
+ temp = 1.f / ((r__1 = work[i__1].r, dabs(r__1)) +
+ (r__2 = r_imag(&work[j]), dabs(r__2)));
+ if (acoefa * rwork[j] + bcoefa * rwork[*n + j] >=
+ bignum * temp) {
+ i__1 = je;
+ for (jr = 1; jr <= i__1; ++jr) {
+ i__2 = jr;
+ i__3 = jr;
+ q__1.r = temp * work[i__3].r, q__1.i =
+ temp * work[i__3].i;
+ work[i__2].r = q__1.r, work[i__2].i =
+ q__1.i;
+/* L190: */
+ }
+ }
+ }
+
+ i__1 = j;
+ q__1.r = acoeff * work[i__1].r, q__1.i = acoeff *
+ work[i__1].i;
+ ca.r = q__1.r, ca.i = q__1.i;
+ i__1 = j;
+ q__1.r = bcoeff.r * work[i__1].r - bcoeff.i * work[
+ i__1].i, q__1.i = bcoeff.r * work[i__1].i +
+ bcoeff.i * work[i__1].r;
+ cb.r = q__1.r, cb.i = q__1.i;
+ i__1 = j - 1;
+ for (jr = 1; jr <= i__1; ++jr) {
+ i__2 = jr;
+ i__3 = jr;
+ i__4 = jr + j * s_dim1;
+ q__3.r = ca.r * s[i__4].r - ca.i * s[i__4].i,
+ q__3.i = ca.r * s[i__4].i + ca.i * s[i__4]
+ .r;
+ q__2.r = work[i__3].r + q__3.r, q__2.i = work[
+ i__3].i + q__3.i;
+ i__5 = jr + j * p_dim1;
+ q__4.r = cb.r * p[i__5].r - cb.i * p[i__5].i,
+ q__4.i = cb.r * p[i__5].i + cb.i * p[i__5]
+ .r;
+ q__1.r = q__2.r - q__4.r, q__1.i = q__2.i -
+ q__4.i;
+ work[i__2].r = q__1.r, work[i__2].i = q__1.i;
+/* L200: */
+ }
+ }
+/* L210: */
+ }
+
+/* Back transform eigenvector if HOWMNY='B'. */
+
+ if (ilback) {
+ cgemv_("N", n, &je, &c_b2, &vr[vr_offset], ldvr, &work[1],
+ &c__1, &c_b1, &work[*n + 1], &c__1);
+ isrc = 2;
+ iend = *n;
+ } else {
+ isrc = 1;
+ iend = je;
+ }
+
+/* Copy and scale eigenvector into column of VR */
+
+ xmax = 0.f;
+ i__1 = iend;
+ for (jr = 1; jr <= i__1; ++jr) {
+/* Computing MAX */
+ i__2 = (isrc - 1) * *n + jr;
+ r__3 = xmax, r__4 = (r__1 = work[i__2].r, dabs(r__1)) + (
+ r__2 = r_imag(&work[(isrc - 1) * *n + jr]), dabs(
+ r__2));
+ xmax = dmax(r__3,r__4);
+/* L220: */
+ }
+
+ if (xmax > safmin) {
+ temp = 1.f / xmax;
+ i__1 = iend;
+ for (jr = 1; jr <= i__1; ++jr) {
+ i__2 = jr + ieig * vr_dim1;
+ i__3 = (isrc - 1) * *n + jr;
+ q__1.r = temp * work[i__3].r, q__1.i = temp * work[
+ i__3].i;
+ vr[i__2].r = q__1.r, vr[i__2].i = q__1.i;
+/* L230: */
+ }
+ } else {
+ iend = 0;
+ }
+
+ i__1 = *n;
+ for (jr = iend + 1; jr <= i__1; ++jr) {
+ i__2 = jr + ieig * vr_dim1;
+ vr[i__2].r = 0.f, vr[i__2].i = 0.f;
+/* L240: */
+ }
+
+ }
+L250:
+ ;
+ }
+ }
+
+ return 0;
+
+/* End of CTGEVC */
+
+} /* ctgevc_ */
diff --git a/contrib/libs/clapack/ctgex2.c b/contrib/libs/clapack/ctgex2.c
new file mode 100644
index 0000000000..59846f2836
--- /dev/null
+++ b/contrib/libs/clapack/ctgex2.c
@@ -0,0 +1,373 @@
+/* ctgex2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__2 = 2;
+static integer c__1 = 1;
+
+/* Subroutine */ int ctgex2_(logical *wantq, logical *wantz, integer *n,
+ complex *a, integer *lda, complex *b, integer *ldb, complex *q,
+ integer *ldq, complex *z__, integer *ldz, integer *j1, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1,
+ z_offset, i__1, i__2, i__3;
+ real r__1;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal), c_abs(complex *);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ complex f, g;
+ integer i__, m;
+ complex s[4] /* was [2][2] */, t[4] /* was [2][2] */;
+ real cq, sa, sb, cz;
+ complex sq;
+ real ss, ws;
+ complex sz;
+ real eps, sum;
+ logical weak;
+ complex cdum;
+ extern /* Subroutine */ int crot_(integer *, complex *, integer *,
+ complex *, integer *, real *, complex *);
+ complex work[8];
+ real scale;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), clartg_(complex *,
+ complex *, real *, complex *, complex *), classq_(integer *,
+ complex *, integer *, real *, real *);
+ real thresh, smlnum;
+ logical strong;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTGEX2 swaps adjacent diagonal 1 by 1 blocks (A11,B11) and (A22,B22) */
+/* in an upper triangular matrix pair (A, B) by an unitary equivalence */
+/* transformation. */
+
+/* (A, B) must be in generalized Schur canonical form, that is, A and */
+/* B are both upper triangular. */
+
+/* Optionally, the matrices Q and Z of generalized Schur vectors are */
+/* updated. */
+
+/* Q(in) * A(in) * Z(in)' = Q(out) * A(out) * Z(out)' */
+/* Q(in) * B(in) * Z(in)' = Q(out) * B(out) * Z(out)' */
+
+
+/* Arguments */
+/* ========= */
+
+/* WANTQ (input) LOGICAL */
+/* .TRUE. : update the left transformation matrix Q; */
+/* .FALSE.: do not update Q. */
+
+/* WANTZ (input) LOGICAL */
+/* .TRUE. : update the right transformation matrix Z; */
+/* .FALSE.: do not update Z. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) COMPLEX arrays, dimensions (LDA,N) */
+/* On entry, the matrix A in the pair (A, B). */
+/* On exit, the updated matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX arrays, dimensions (LDB,N) */
+/* On entry, the matrix B in the pair (A, B). */
+/* On exit, the updated matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* Q (input/output) COMPLEX array, dimension (LDZ,N) */
+/* If WANTQ = .TRUE, on entry, the unitary matrix Q. On exit, */
+/* the updated matrix Q. */
+/* Not referenced if WANTQ = .FALSE.. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= 1; */
+/* If WANTQ = .TRUE., LDQ >= N. */
+
+/* Z (input/output) COMPLEX array, dimension (LDZ,N) */
+/* If WANTZ = .TRUE, on entry, the unitary matrix Z. On exit, */
+/* the updated matrix Z. */
+/* Not referenced if WANTZ = .FALSE.. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1; */
+/* If WANTZ = .TRUE., LDZ >= N. */
+
+/* J1 (input) INTEGER */
+/* The index to the first block (A11, B11). */
+
+/* INFO (output) INTEGER */
+/* =0: Successful exit. */
+/* =1: The transformed matrix pair (A, B) would be too far */
+/* from generalized Schur form; the problem is ill- */
+/* conditioned. */
+
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* In the current code both weak and strong stability tests are */
+/* performed. The user can omit the strong stability test by changing */
+/* the internal logical parameter WANDS to .FALSE.. See ref. [2] for */
+/* details. */
+
+/* [1] B. Kagstrom; A Direct Method for Reordering Eigenvalues in the */
+/* Generalized Real Schur Form of a Regular Matrix Pair (A, B), in */
+/* M.S. Moonen et al (eds), Linear Algebra for Large Scale and */
+/* Real-Time Applications, Kluwer Academic Publ. 1993, pp 195-218. */
+
+/* [2] B. Kagstrom and P. Poromaa; Computing Eigenspaces with Specified */
+/* Eigenvalues of a Regular Matrix Pair (A, B) and Condition */
+/* Estimation: Theory, Algorithms and Software, Report UMINF-94.04, */
+/* Department of Computing Science, Umea University, S-901 87 Umea, */
+/* Sweden, 1994. Also as LAPACK Working Note 87. To appear in */
+/* Numerical Algorithms, 1996. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+
+ /* Function Body */
+ *info = 0;
+
+/* Quick return if possible */
+
+ if (*n <= 1) {
+ return 0;
+ }
+
+ m = 2;
+ weak = FALSE_;
+ strong = FALSE_;
+
+/* Make a local copy of selected block in (A, B) */
+
+ clacpy_("Full", &m, &m, &a[*j1 + *j1 * a_dim1], lda, s, &c__2);
+ clacpy_("Full", &m, &m, &b[*j1 + *j1 * b_dim1], ldb, t, &c__2);
+
+/* Compute the threshold for testing the acceptance of swapping. */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S") / eps;
+ scale = 0.f;
+ sum = 1.f;
+ clacpy_("Full", &m, &m, s, &c__2, work, &m);
+ clacpy_("Full", &m, &m, t, &c__2, &work[m * m], &m);
+ i__1 = (m << 1) * m;
+ classq_(&i__1, work, &c__1, &scale, &sum);
+ sa = scale * sqrt(sum);
+/* Computing MAX */
+ r__1 = eps * 10.f * sa;
+ thresh = dmax(r__1,smlnum);
+
+/* Compute unitary QL and RQ that swap 1-by-1 and 1-by-1 blocks */
+/* using Givens rotations and perform the swap tentatively. */
+
+ q__2.r = s[3].r * t[0].r - s[3].i * t[0].i, q__2.i = s[3].r * t[0].i + s[
+ 3].i * t[0].r;
+ q__3.r = t[3].r * s[0].r - t[3].i * s[0].i, q__3.i = t[3].r * s[0].i + t[
+ 3].i * s[0].r;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
+ f.r = q__1.r, f.i = q__1.i;
+ q__2.r = s[3].r * t[2].r - s[3].i * t[2].i, q__2.i = s[3].r * t[2].i + s[
+ 3].i * t[2].r;
+ q__3.r = t[3].r * s[2].r - t[3].i * s[2].i, q__3.i = t[3].r * s[2].i + t[
+ 3].i * s[2].r;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
+ g.r = q__1.r, g.i = q__1.i;
+ sa = c_abs(&s[3]);
+ sb = c_abs(&t[3]);
+ clartg_(&g, &f, &cz, &sz, &cdum);
+ q__1.r = -sz.r, q__1.i = -sz.i;
+ sz.r = q__1.r, sz.i = q__1.i;
+ r_cnjg(&q__1, &sz);
+ crot_(&c__2, s, &c__1, &s[2], &c__1, &cz, &q__1);
+ r_cnjg(&q__1, &sz);
+ crot_(&c__2, t, &c__1, &t[2], &c__1, &cz, &q__1);
+ if (sa >= sb) {
+ clartg_(s, &s[1], &cq, &sq, &cdum);
+ } else {
+ clartg_(t, &t[1], &cq, &sq, &cdum);
+ }
+ crot_(&c__2, s, &c__2, &s[1], &c__2, &cq, &sq);
+ crot_(&c__2, t, &c__2, &t[1], &c__2, &cq, &sq);
+
+/* Weak stability test: |S21| + |T21| <= O(EPS F-norm((S, T))) */
+
+ ws = c_abs(&s[1]) + c_abs(&t[1]);
+ weak = ws <= thresh;
+ if (! weak) {
+ goto L20;
+ }
+
+ if (TRUE_) {
+
+/* Strong stability test: */
+/* F-norm((A-QL'*S*QR, B-QL'*T*QR)) <= O(EPS*F-norm((A, B))) */
+
+ clacpy_("Full", &m, &m, s, &c__2, work, &m);
+ clacpy_("Full", &m, &m, t, &c__2, &work[m * m], &m);
+ r_cnjg(&q__2, &sz);
+ q__1.r = -q__2.r, q__1.i = -q__2.i;
+ crot_(&c__2, work, &c__1, &work[2], &c__1, &cz, &q__1);
+ r_cnjg(&q__2, &sz);
+ q__1.r = -q__2.r, q__1.i = -q__2.i;
+ crot_(&c__2, &work[4], &c__1, &work[6], &c__1, &cz, &q__1);
+ q__1.r = -sq.r, q__1.i = -sq.i;
+ crot_(&c__2, work, &c__2, &work[1], &c__2, &cq, &q__1);
+ q__1.r = -sq.r, q__1.i = -sq.i;
+ crot_(&c__2, &work[4], &c__2, &work[5], &c__2, &cq, &q__1);
+ for (i__ = 1; i__ <= 2; ++i__) {
+ i__1 = i__ - 1;
+ i__2 = i__ - 1;
+ i__3 = *j1 + i__ - 1 + *j1 * a_dim1;
+ q__1.r = work[i__2].r - a[i__3].r, q__1.i = work[i__2].i - a[i__3]
+ .i;
+ work[i__1].r = q__1.r, work[i__1].i = q__1.i;
+ i__1 = i__ + 1;
+ i__2 = i__ + 1;
+ i__3 = *j1 + i__ - 1 + (*j1 + 1) * a_dim1;
+ q__1.r = work[i__2].r - a[i__3].r, q__1.i = work[i__2].i - a[i__3]
+ .i;
+ work[i__1].r = q__1.r, work[i__1].i = q__1.i;
+ i__1 = i__ + 3;
+ i__2 = i__ + 3;
+ i__3 = *j1 + i__ - 1 + *j1 * b_dim1;
+ q__1.r = work[i__2].r - b[i__3].r, q__1.i = work[i__2].i - b[i__3]
+ .i;
+ work[i__1].r = q__1.r, work[i__1].i = q__1.i;
+ i__1 = i__ + 5;
+ i__2 = i__ + 5;
+ i__3 = *j1 + i__ - 1 + (*j1 + 1) * b_dim1;
+ q__1.r = work[i__2].r - b[i__3].r, q__1.i = work[i__2].i - b[i__3]
+ .i;
+ work[i__1].r = q__1.r, work[i__1].i = q__1.i;
+/* L10: */
+ }
+ scale = 0.f;
+ sum = 1.f;
+ i__1 = (m << 1) * m;
+ classq_(&i__1, work, &c__1, &scale, &sum);
+ ss = scale * sqrt(sum);
+ strong = ss <= thresh;
+ if (! strong) {
+ goto L20;
+ }
+ }
+
+/* If the swap is accepted ("weakly" and "strongly"), apply the */
+/* equivalence transformations to the original matrix pair (A,B) */
+
+ i__1 = *j1 + 1;
+ r_cnjg(&q__1, &sz);
+ crot_(&i__1, &a[*j1 * a_dim1 + 1], &c__1, &a[(*j1 + 1) * a_dim1 + 1], &
+ c__1, &cz, &q__1);
+ i__1 = *j1 + 1;
+ r_cnjg(&q__1, &sz);
+ crot_(&i__1, &b[*j1 * b_dim1 + 1], &c__1, &b[(*j1 + 1) * b_dim1 + 1], &
+ c__1, &cz, &q__1);
+ i__1 = *n - *j1 + 1;
+ crot_(&i__1, &a[*j1 + *j1 * a_dim1], lda, &a[*j1 + 1 + *j1 * a_dim1], lda,
+ &cq, &sq);
+ i__1 = *n - *j1 + 1;
+ crot_(&i__1, &b[*j1 + *j1 * b_dim1], ldb, &b[*j1 + 1 + *j1 * b_dim1], ldb,
+ &cq, &sq);
+
+/* Set N1 by N2 (2,1) blocks to 0 */
+
+ i__1 = *j1 + 1 + *j1 * a_dim1;
+ a[i__1].r = 0.f, a[i__1].i = 0.f;
+ i__1 = *j1 + 1 + *j1 * b_dim1;
+ b[i__1].r = 0.f, b[i__1].i = 0.f;
+
+/* Accumulate transformations into Q and Z if requested. */
+
+ if (*wantz) {
+ r_cnjg(&q__1, &sz);
+ crot_(n, &z__[*j1 * z_dim1 + 1], &c__1, &z__[(*j1 + 1) * z_dim1 + 1],
+ &c__1, &cz, &q__1);
+ }
+ if (*wantq) {
+ r_cnjg(&q__1, &sq);
+ crot_(n, &q[*j1 * q_dim1 + 1], &c__1, &q[(*j1 + 1) * q_dim1 + 1], &
+ c__1, &cq, &q__1);
+ }
+
+/* Exit with INFO = 0 if swap was successfully performed. */
+
+ return 0;
+
+/* Exit with INFO = 1 if swap was rejected. */
+
+L20:
+ *info = 1;
+ return 0;
+
+/* End of CTGEX2 */
+
+} /* ctgex2_ */
diff --git a/contrib/libs/clapack/ctgexc.c b/contrib/libs/clapack/ctgexc.c
new file mode 100644
index 0000000000..2ef6ebccfb
--- /dev/null
+++ b/contrib/libs/clapack/ctgexc.c
@@ -0,0 +1,248 @@
+/* ctgexc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ctgexc_(logical *wantq, logical *wantz, integer *n,
+ complex *a, integer *lda, complex *b, integer *ldb, complex *q,
+ integer *ldq, complex *z__, integer *ldz, integer *ifst, integer *
+ ilst, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1,
+ z_offset, i__1;
+
+ /* Local variables */
+ integer here;
+ extern /* Subroutine */ int ctgex2_(logical *, logical *, integer *,
+ complex *, integer *, complex *, integer *, complex *, integer *,
+ complex *, integer *, integer *, integer *), xerbla_(char *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTGEXC reorders the generalized Schur decomposition of a complex */
+/* matrix pair (A,B), using an unitary equivalence transformation */
+/* (A, B) := Q * (A, B) * Z', so that the diagonal block of (A, B) with */
+/* row index IFST is moved to row ILST. */
+
+/* (A, B) must be in generalized Schur canonical form, that is, A and */
+/* B are both upper triangular. */
+
+/* Optionally, the matrices Q and Z of generalized Schur vectors are */
+/* updated. */
+
+/* Q(in) * A(in) * Z(in)' = Q(out) * A(out) * Z(out)' */
+/* Q(in) * B(in) * Z(in)' = Q(out) * B(out) * Z(out)' */
+
+/* Arguments */
+/* ========= */
+
+/* WANTQ (input) LOGICAL */
+/* .TRUE. : update the left transformation matrix Q; */
+/* .FALSE.: do not update Q. */
+
+/* WANTZ (input) LOGICAL */
+/* .TRUE. : update the right transformation matrix Z; */
+/* .FALSE.: do not update Z. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the upper triangular matrix A in the pair (A, B). */
+/* On exit, the updated matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX array, dimension (LDB,N) */
+/* On entry, the upper triangular matrix B in the pair (A, B). */
+/* On exit, the updated matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* Q (input/output) COMPLEX array, dimension (LDZ,N) */
+/* On entry, if WANTQ = .TRUE., the unitary matrix Q. */
+/* On exit, the updated matrix Q. */
+/* If WANTQ = .FALSE., Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= 1; */
+/* If WANTQ = .TRUE., LDQ >= N. */
+
+/* Z (input/output) COMPLEX array, dimension (LDZ,N) */
+/* On entry, if WANTZ = .TRUE., the unitary matrix Z. */
+/* On exit, the updated matrix Z. */
+/* If WANTZ = .FALSE., Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1; */
+/* If WANTZ = .TRUE., LDZ >= N. */
+
+/* IFST (input) INTEGER */
+/* ILST (input/output) INTEGER */
+/* Specify the reordering of the diagonal blocks of (A, B). */
+/* The block with row index IFST is moved to row ILST, by a */
+/* sequence of swapping between adjacent blocks. */
+
+/* INFO (output) INTEGER */
+/* =0: Successful exit. */
+/* <0: if INFO = -i, the i-th argument had an illegal value. */
+/* =1: The transformed matrix pair (A, B) would be too far */
+/* from generalized Schur form; the problem is ill- */
+/* conditioned. (A, B) may have been partially reordered, */
+/* and ILST points to the first row of the current */
+/* position of the block being moved. */
+
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* [1] B. Kagstrom; A Direct Method for Reordering Eigenvalues in the */
+/* Generalized Real Schur Form of a Regular Matrix Pair (A, B), in */
+/* M.S. Moonen et al (eds), Linear Algebra for Large Scale and */
+/* Real-Time Applications, Kluwer Academic Publ. 1993, pp 195-218. */
+
+/* [2] B. Kagstrom and P. Poromaa; Computing Eigenspaces with Specified */
+/* Eigenvalues of a Regular Matrix Pair (A, B) and Condition */
+/* Estimation: Theory, Algorithms and Software, Report */
+/* UMINF - 94.04, Department of Computing Science, Umea University, */
+/* S-901 87 Umea, Sweden, 1994. Also as LAPACK Working Note 87. */
+/* To appear in Numerical Algorithms, 1996. */
+
+/* [3] B. Kagstrom and P. Poromaa, LAPACK-Style Algorithms and Software */
+/* for Solving the Generalized Sylvester Equation and Estimating the */
+/* Separation between Regular Matrix Pairs, Report UMINF - 93.23, */
+/* Department of Computing Science, Umea University, S-901 87 Umea, */
+/* Sweden, December 1993, Revised April 1994, Also as LAPACK working */
+/* Note 75. To appear in ACM Trans. on Math. Software, Vol 22, No 1, */
+/* 1996. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test input arguments. */
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ } else if (*ldq < 1 || *wantq && *ldq < max(1,*n)) {
+ *info = -9;
+ } else if (*ldz < 1 || *wantz && *ldz < max(1,*n)) {
+ *info = -11;
+ } else if (*ifst < 1 || *ifst > *n) {
+ *info = -12;
+ } else if (*ilst < 1 || *ilst > *n) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTGEXC", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 1) {
+ return 0;
+ }
+ if (*ifst == *ilst) {
+ return 0;
+ }
+
+ if (*ifst < *ilst) {
+
+ here = *ifst;
+
+L10:
+
+/* Swap with next one below */
+
+ ctgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset], ldb, &q[
+ q_offset], ldq, &z__[z_offset], ldz, &here, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ ++here;
+ if (here < *ilst) {
+ goto L10;
+ }
+ --here;
+ } else {
+ here = *ifst - 1;
+
+L20:
+
+/* Swap with next one above */
+
+ ctgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset], ldb, &q[
+ q_offset], ldq, &z__[z_offset], ldz, &here, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ --here;
+ if (here >= *ilst) {
+ goto L20;
+ }
+ ++here;
+ }
+ *ilst = here;
+ return 0;
+
+/* End of CTGEXC */
+
+} /* ctgexc_ */
diff --git a/contrib/libs/clapack/ctgsen.c b/contrib/libs/clapack/ctgsen.c
new file mode 100644
index 0000000000..c307f994a7
--- /dev/null
+++ b/contrib/libs/clapack/ctgsen.c
@@ -0,0 +1,762 @@
+/* ctgsen.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ctgsen_(integer *ijob, logical *wantq, logical *wantz,
+ logical *select, integer *n, complex *a, integer *lda, complex *b,
+ integer *ldb, complex *alpha, complex *beta, complex *q, integer *ldq,
+ complex *z__, integer *ldz, integer *m, real *pl, real *pr, real *
+ dif, complex *work, integer *lwork, integer *iwork, integer *liwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1,
+ z_offset, i__1, i__2, i__3;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), c_abs(complex *);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, k, n1, n2, ks, mn2, ijb, kase, ierr;
+ real dsum;
+ logical swap;
+ complex temp1, temp2;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *);
+ integer isave[3];
+ logical wantd;
+ integer lwmin;
+ logical wantp;
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *);
+ logical wantd1, wantd2;
+ real dscale;
+ extern doublereal slamch_(char *);
+ real rdscal;
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *);
+ real safmin;
+ extern /* Subroutine */ int ctgexc_(logical *, logical *, integer *,
+ complex *, integer *, complex *, integer *, complex *, integer *,
+ complex *, integer *, integer *, integer *, integer *), xerbla_(
+ char *, integer *), classq_(integer *, complex *, integer
+ *, real *, real *);
+ integer liwmin;
+ extern /* Subroutine */ int ctgsyl_(char *, integer *, integer *, integer
+ *, complex *, integer *, complex *, integer *, complex *, integer
+ *, complex *, integer *, complex *, integer *, complex *, integer
+ *, real *, real *, complex *, integer *, integer *, integer *);
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* January 2007 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTGSEN reorders the generalized Schur decomposition of a complex */
+/* matrix pair (A, B) (in terms of an unitary equivalence trans- */
+/* formation Q' * (A, B) * Z), so that a selected cluster of eigenvalues */
+/* appears in the leading diagonal blocks of the pair (A,B). The leading */
+/* columns of Q and Z form unitary bases of the corresponding left and */
+/* right eigenspaces (deflating subspaces). (A, B) must be in */
+/* generalized Schur canonical form, that is, A and B are both upper */
+/* triangular. */
+
+/* CTGSEN also computes the generalized eigenvalues */
+
+/* w(j)= ALPHA(j) / BETA(j) */
+
+/* of the reordered matrix pair (A, B). */
+
+/* Optionally, the routine computes estimates of reciprocal condition */
+/* numbers for eigenvalues and eigenspaces. These are Difu[(A11,B11), */
+/* (A22,B22)] and Difl[(A11,B11), (A22,B22)], i.e. the separation(s) */
+/* between the matrix pairs (A11, B11) and (A22,B22) that correspond to */
+/* the selected cluster and the eigenvalues outside the cluster, resp., */
+/* and norms of "projections" onto left and right eigenspaces w.r.t. */
+/* the selected cluster in the (1,1)-block. */
+
+
+/* Arguments */
+/* ========= */
+
+/* IJOB (input) integer */
+/* Specifies whether condition numbers are required for the */
+/* cluster of eigenvalues (PL and PR) or the deflating subspaces */
+/* (Difu and Difl): */
+/* =0: Only reorder w.r.t. SELECT. No extras. */
+/* =1: Reciprocal of norms of "projections" onto left and right */
+/* eigenspaces w.r.t. the selected cluster (PL and PR). */
+/* =2: Upper bounds on Difu and Difl. F-norm-based estimate */
+/* (DIF(1:2)). */
+/* =3: Estimate of Difu and Difl. 1-norm-based estimate */
+/* (DIF(1:2)). */
+/* About 5 times as expensive as IJOB = 2. */
+/* =4: Compute PL, PR and DIF (i.e. 0, 1 and 2 above): Economic */
+/* version to get it all. */
+/* =5: Compute PL, PR and DIF (i.e. 0, 1 and 3 above) */
+
+/* WANTQ (input) LOGICAL */
+/* .TRUE. : update the left transformation matrix Q; */
+/* .FALSE.: do not update Q. */
+
+/* WANTZ (input) LOGICAL */
+/* .TRUE. : update the right transformation matrix Z; */
+/* .FALSE.: do not update Z. */
+
+/* SELECT (input) LOGICAL array, dimension (N) */
+/* SELECT specifies the eigenvalues in the selected cluster. To */
+/* select an eigenvalue w(j), SELECT(j) must be set to */
+/* .TRUE.. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension(LDA,N) */
+/* On entry, the upper triangular matrix A, in generalized */
+/* Schur canonical form. */
+/* On exit, A is overwritten by the reordered matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX array, dimension(LDB,N) */
+/* On entry, the upper triangular matrix B, in generalized */
+/* Schur canonical form. */
+/* On exit, B is overwritten by the reordered matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* ALPHA (output) COMPLEX array, dimension (N) */
+/* BETA (output) COMPLEX array, dimension (N) */
+/* The diagonal elements of A and B, respectively, */
+/* when the pair (A,B) has been reduced to generalized Schur */
+/* form. ALPHA(i)/BETA(i) i=1,...,N are the generalized */
+/* eigenvalues. */
+
+/* Q (input/output) COMPLEX array, dimension (LDQ,N) */
+/* On entry, if WANTQ = .TRUE., Q is an N-by-N matrix. */
+/* On exit, Q has been postmultiplied by the left unitary */
+/* transformation matrix which reorder (A, B); The leading M */
+/* columns of Q form orthonormal bases for the specified pair of */
+/* left eigenspaces (deflating subspaces). */
+/* If WANTQ = .FALSE., Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= 1. */
+/* If WANTQ = .TRUE., LDQ >= N. */
+
+/* Z (input/output) COMPLEX array, dimension (LDZ,N) */
+/* On entry, if WANTZ = .TRUE., Z is an N-by-N matrix. */
+/* On exit, Z has been postmultiplied by the left unitary */
+/* transformation matrix which reorder (A, B); The leading M */
+/* columns of Z form orthonormal bases for the specified pair of */
+/* left eigenspaces (deflating subspaces). */
+/* If WANTZ = .FALSE., Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1. */
+/* If WANTZ = .TRUE., LDZ >= N. */
+
+/* M (output) INTEGER */
+/* The dimension of the specified pair of left and right */
+/* eigenspaces, (deflating subspaces) 0 <= M <= N. */
+
+/* PL (output) REAL */
+/* PR (output) REAL */
+/* If IJOB = 1, 4 or 5, PL, PR are lower bounds on the */
+/* reciprocal of the norm of "projections" onto left and right */
+/* eigenspace with respect to the selected cluster. */
+/* 0 < PL, PR <= 1. */
+/* If M = 0 or M = N, PL = PR = 1. */
+/* If IJOB = 0, 2 or 3 PL, PR are not referenced. */
+
+/* DIF (output) REAL array, dimension (2). */
+/* If IJOB >= 2, DIF(1:2) store the estimates of Difu and Difl. */
+/* If IJOB = 2 or 4, DIF(1:2) are F-norm-based upper bounds on */
+/* Difu and Difl. If IJOB = 3 or 5, DIF(1:2) are 1-norm-based */
+/* estimates of Difu and Difl, computed using reversed */
+/* communication with CLACN2. */
+/* If M = 0 or N, DIF(1:2) = F-norm([A, B]). */
+/* If IJOB = 0 or 1, DIF is not referenced. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* IF IJOB = 0, WORK is not referenced. Otherwise, */
+/* on exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= 1 */
+/* If IJOB = 1, 2 or 4, LWORK >= 2*M*(N-M) */
+/* If IJOB = 3 or 5, LWORK >= 4*M*(N-M) */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* IF IJOB = 0, IWORK is not referenced. Otherwise, */
+/* on exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. LIWORK >= 1. */
+/* If IJOB = 1, 2 or 4, LIWORK >= N+2; */
+/* If IJOB = 3 or 5, LIWORK >= MAX(N+2, 2*M*(N-M)); */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal size of the IWORK array, */
+/* returns this value as the first entry of the IWORK array, and */
+/* no error message related to LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* =0: Successful exit. */
+/* <0: If INFO = -i, the i-th argument had an illegal value. */
+/* =1: Reordering of (A, B) failed because the transformed */
+/* matrix pair (A, B) would be too far from generalized */
+/* Schur form; the problem is very ill-conditioned. */
+/* (A, B) may have been partially reordered. */
+/* If requested, 0 is returned in DIF(*), PL and PR. */
+
+
+/* Further Details */
+/* =============== */
+
+/* CTGSEN first collects the selected eigenvalues by computing unitary */
+/* U and W that move them to the top left corner of (A, B). In other */
+/* words, the selected eigenvalues are the eigenvalues of (A11, B11) in */
+
+/* U'*(A, B)*W = (A11 A12) (B11 B12) n1 */
+/* ( 0 A22),( 0 B22) n2 */
+/* n1 n2 n1 n2 */
+
+/* where N = n1+n2 and U' means the conjugate transpose of U. The first */
+/* n1 columns of U and W span the specified pair of left and right */
+/* eigenspaces (deflating subspaces) of (A, B). */
+
+/* If (A, B) has been obtained from the generalized real Schur */
+/* decomposition of a matrix pair (C, D) = Q*(A, B)*Z', then the */
+/* reordered generalized Schur form of (C, D) is given by */
+
+/* (C, D) = (Q*U)*(U'*(A, B)*W)*(Z*W)', */
+
+/* and the first n1 columns of Q*U and Z*W span the corresponding */
+/* deflating subspaces of (C, D) (Q and Z store Q*U and Z*W, resp.). */
+
+/* Note that if the selected eigenvalue is sufficiently ill-conditioned, */
+/* then its value may differ significantly from its value before */
+/* reordering. */
+
+/* The reciprocal condition numbers of the left and right eigenspaces */
+/* spanned by the first n1 columns of U and W (or Q*U and Z*W) may */
+/* be returned in DIF(1:2), corresponding to Difu and Difl, resp. */
+
+/* The Difu and Difl are defined as: */
+
+/* Difu[(A11, B11), (A22, B22)] = sigma-min( Zu ) */
+/* and */
+/* Difl[(A11, B11), (A22, B22)] = Difu[(A22, B22), (A11, B11)], */
+
+/* where sigma-min(Zu) is the smallest singular value of the */
+/* (2*n1*n2)-by-(2*n1*n2) matrix */
+
+/* Zu = [ kron(In2, A11) -kron(A22', In1) ] */
+/* [ kron(In2, B11) -kron(B22', In1) ]. */
+
+/* Here, Inx is the identity matrix of size nx and A22' is the */
+/* transpose of A22. kron(X, Y) is the Kronecker product between */
+/* the matrices X and Y. */
+
+/* When DIF(2) is small, small changes in (A, B) can cause large changes */
+/* in the deflating subspace. An approximate (asymptotic) bound on the */
+/* maximum angular error in the computed deflating subspaces is */
+
+/* EPS * norm((A, B)) / DIF(2), */
+
+/* where EPS is the machine precision. */
+
+/* The reciprocal norm of the projectors on the left and right */
+/* eigenspaces associated with (A11, B11) may be returned in PL and PR. */
+/* They are computed as follows. First we compute L and R so that */
+/* P*(A, B)*Q is block diagonal, where */
+
+/* P = ( I -L ) n1 Q = ( I R ) n1 */
+/* ( 0 I ) n2 and ( 0 I ) n2 */
+/* n1 n2 n1 n2 */
+
+/* and (L, R) is the solution to the generalized Sylvester equation */
+
+/* A11*R - L*A22 = -A12 */
+/* B11*R - L*B22 = -B12 */
+
+/* Then PL = (F-norm(L)**2+1)**(-1/2) and PR = (F-norm(R)**2+1)**(-1/2). */
+/* An approximate (asymptotic) bound on the average absolute error of */
+/* the selected eigenvalues is */
+
+/* EPS * norm((A, B)) / PL. */
+
+/* There are also global error bounds which valid for perturbations up */
+/* to a certain restriction: A lower bound (x) on the smallest */
+/* F-norm(E,F) for which an eigenvalue of (A11, B11) may move and */
+/* coalesce with an eigenvalue of (A22, B22) under perturbation (E,F), */
+/* (i.e. (A + E, B + F), is */
+
+/* x = min(Difu,Difl)/((1/(PL*PL)+1/(PR*PR))**(1/2)+2*max(1/PL,1/PR)). */
+
+/* An approximate bound on x can be computed from DIF(1:2), PL and PR. */
+
+/* If y = ( F-norm(E,F) / x) <= 1, the angles between the perturbed */
+/* (L', R') and unperturbed (L, R) left and right deflating subspaces */
+/* associated with the selected cluster in the (1,1)-blocks can be */
+/* bounded as */
+
+/* max-angle(L, L') <= arctan( y * PL / (1 - y * (1 - PL * PL)**(1/2)) */
+/* max-angle(R, R') <= arctan( y * PR / (1 - y * (1 - PR * PR)**(1/2)) */
+
+/* See LAPACK User's Guide section 4.11 or the following references */
+/* for more information. */
+
+/* Note that if the default method for computing the Frobenius-norm- */
+/* based estimate DIF is not wanted (see CLATDF), then the parameter */
+/* IDIFJB (see below) should be changed from 3 to 4 (routine CLATDF */
+/* (IJOB = 2 will be used)). See CTGSYL for more details. */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* References */
+/* ========== */
+
+/* [1] B. Kagstrom; A Direct Method for Reordering Eigenvalues in the */
+/* Generalized Real Schur Form of a Regular Matrix Pair (A, B), in */
+/* M.S. Moonen et al (eds), Linear Algebra for Large Scale and */
+/* Real-Time Applications, Kluwer Academic Publ. 1993, pp 195-218. */
+
+/* [2] B. Kagstrom and P. Poromaa; Computing Eigenspaces with Specified */
+/* Eigenvalues of a Regular Matrix Pair (A, B) and Condition */
+/* Estimation: Theory, Algorithms and Software, Report */
+/* UMINF - 94.04, Department of Computing Science, Umea University, */
+/* S-901 87 Umea, Sweden, 1994. Also as LAPACK Working Note 87. */
+/* To appear in Numerical Algorithms, 1996. */
+
+/* [3] B. Kagstrom and P. Poromaa, LAPACK-Style Algorithms and Software */
+/* for Solving the Generalized Sylvester Equation and Estimating the */
+/* Separation between Regular Matrix Pairs, Report UMINF - 93.23, */
+/* Department of Computing Science, Umea University, S-901 87 Umea, */
+/* Sweden, December 1993, Revised April 1994, Also as LAPACK working */
+/* Note 75. To appear in ACM Trans. on Math. Software, Vol 22, No 1, */
+/* 1996. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters */
+
+ /* Parameter adjustments */
+ --select;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alpha;
+ --beta;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --dif;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1 || *liwork == -1;
+
+ if (*ijob < 0 || *ijob > 5) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldq < 1 || *wantq && *ldq < *n) {
+ *info = -13;
+ } else if (*ldz < 1 || *wantz && *ldz < *n) {
+ *info = -15;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTGSEN", &i__1);
+ return 0;
+ }
+
+ ierr = 0;
+
+ wantp = *ijob == 1 || *ijob >= 4;
+ wantd1 = *ijob == 2 || *ijob == 4;
+ wantd2 = *ijob == 3 || *ijob == 5;
+ wantd = wantd1 || wantd2;
+
+/* Set M to the dimension of the specified pair of deflating */
+/* subspaces. */
+
+ *m = 0;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k;
+ i__3 = k + k * a_dim1;
+ alpha[i__2].r = a[i__3].r, alpha[i__2].i = a[i__3].i;
+ i__2 = k;
+ i__3 = k + k * b_dim1;
+ beta[i__2].r = b[i__3].r, beta[i__2].i = b[i__3].i;
+ if (k < *n) {
+ if (select[k]) {
+ ++(*m);
+ }
+ } else {
+ if (select[*n]) {
+ ++(*m);
+ }
+ }
+/* L10: */
+ }
+
+ if (*ijob == 1 || *ijob == 2 || *ijob == 4) {
+/* Computing MAX */
+ i__1 = 1, i__2 = (*m << 1) * (*n - *m);
+ lwmin = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = 1, i__2 = *n + 2;
+ liwmin = max(i__1,i__2);
+ } else if (*ijob == 3 || *ijob == 5) {
+/* Computing MAX */
+ i__1 = 1, i__2 = (*m << 2) * (*n - *m);
+ lwmin = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = 1, i__2 = (*m << 1) * (*n - *m), i__1 = max(i__1,i__2), i__2 =
+ *n + 2;
+ liwmin = max(i__1,i__2);
+ } else {
+ lwmin = 1;
+ liwmin = 1;
+ }
+
+ work[1].r = (real) lwmin, work[1].i = 0.f;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -21;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -23;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTGSEN", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == *n || *m == 0) {
+ if (wantp) {
+ *pl = 1.f;
+ *pr = 1.f;
+ }
+ if (wantd) {
+ dscale = 0.f;
+ dsum = 1.f;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ classq_(n, &a[i__ * a_dim1 + 1], &c__1, &dscale, &dsum);
+ classq_(n, &b[i__ * b_dim1 + 1], &c__1, &dscale, &dsum);
+/* L20: */
+ }
+ dif[1] = dscale * sqrt(dsum);
+ dif[2] = dif[1];
+ }
+ goto L70;
+ }
+
+/* Get machine constant */
+
+ safmin = slamch_("S");
+
+/* Collect the selected blocks at the top-left corner of (A, B). */
+
+ ks = 0;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ swap = select[k];
+ if (swap) {
+ ++ks;
+
+/* Swap the K-th block to position KS. Compute unitary Q */
+/* and Z that will swap adjacent diagonal blocks in (A, B). */
+
+ if (k != ks) {
+ ctgexc_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset], ldb,
+ &q[q_offset], ldq, &z__[z_offset], ldz, &k, &ks, &
+ ierr);
+ }
+
+ if (ierr > 0) {
+
+/* Swap is rejected: exit. */
+
+ *info = 1;
+ if (wantp) {
+ *pl = 0.f;
+ *pr = 0.f;
+ }
+ if (wantd) {
+ dif[1] = 0.f;
+ dif[2] = 0.f;
+ }
+ goto L70;
+ }
+ }
+/* L30: */
+ }
+ if (wantp) {
+
+/* Solve generalized Sylvester equation for R and L: */
+/* A11 * R - L * A22 = A12 */
+/* B11 * R - L * B22 = B12 */
+
+ n1 = *m;
+ n2 = *n - *m;
+ i__ = n1 + 1;
+ clacpy_("Full", &n1, &n2, &a[i__ * a_dim1 + 1], lda, &work[1], &n1);
+ clacpy_("Full", &n1, &n2, &b[i__ * b_dim1 + 1], ldb, &work[n1 * n2 +
+ 1], &n1);
+ ijb = 0;
+ i__1 = *lwork - (n1 << 1) * n2;
+ ctgsyl_("N", &ijb, &n1, &n2, &a[a_offset], lda, &a[i__ + i__ * a_dim1]
+, lda, &work[1], &n1, &b[b_offset], ldb, &b[i__ + i__ *
+ b_dim1], ldb, &work[n1 * n2 + 1], &n1, &dscale, &dif[1], &
+ work[(n1 * n2 << 1) + 1], &i__1, &iwork[1], &ierr);
+
+/* Estimate the reciprocal of norms of "projections" onto */
+/* left and right eigenspaces */
+
+ rdscal = 0.f;
+ dsum = 1.f;
+ i__1 = n1 * n2;
+ classq_(&i__1, &work[1], &c__1, &rdscal, &dsum);
+ *pl = rdscal * sqrt(dsum);
+ if (*pl == 0.f) {
+ *pl = 1.f;
+ } else {
+ *pl = dscale / (sqrt(dscale * dscale / *pl + *pl) * sqrt(*pl));
+ }
+ rdscal = 0.f;
+ dsum = 1.f;
+ i__1 = n1 * n2;
+ classq_(&i__1, &work[n1 * n2 + 1], &c__1, &rdscal, &dsum);
+ *pr = rdscal * sqrt(dsum);
+ if (*pr == 0.f) {
+ *pr = 1.f;
+ } else {
+ *pr = dscale / (sqrt(dscale * dscale / *pr + *pr) * sqrt(*pr));
+ }
+ }
+ if (wantd) {
+
+/* Compute estimates Difu and Difl. */
+
+ if (wantd1) {
+ n1 = *m;
+ n2 = *n - *m;
+ i__ = n1 + 1;
+ ijb = 3;
+
+/* Frobenius norm-based Difu estimate. */
+
+ i__1 = *lwork - (n1 << 1) * n2;
+ ctgsyl_("N", &ijb, &n1, &n2, &a[a_offset], lda, &a[i__ + i__ *
+ a_dim1], lda, &work[1], &n1, &b[b_offset], ldb, &b[i__ +
+ i__ * b_dim1], ldb, &work[n1 * n2 + 1], &n1, &dscale, &
+ dif[1], &work[(n1 * n2 << 1) + 1], &i__1, &iwork[1], &
+ ierr);
+
+/* Frobenius norm-based Difl estimate. */
+
+ i__1 = *lwork - (n1 << 1) * n2;
+ ctgsyl_("N", &ijb, &n2, &n1, &a[i__ + i__ * a_dim1], lda, &a[
+ a_offset], lda, &work[1], &n2, &b[i__ + i__ * b_dim1],
+ ldb, &b[b_offset], ldb, &work[n1 * n2 + 1], &n2, &dscale,
+ &dif[2], &work[(n1 * n2 << 1) + 1], &i__1, &iwork[1], &
+ ierr);
+ } else {
+
+/* Compute 1-norm-based estimates of Difu and Difl using */
+/* reversed communication with CLACN2. In each step a */
+/* generalized Sylvester equation or a transposed variant */
+/* is solved. */
+
+ kase = 0;
+ n1 = *m;
+ n2 = *n - *m;
+ i__ = n1 + 1;
+ ijb = 0;
+ mn2 = (n1 << 1) * n2;
+
+/* 1-norm-based estimate of Difu. */
+
+L40:
+ clacn2_(&mn2, &work[mn2 + 1], &work[1], &dif[1], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Solve generalized Sylvester equation */
+
+ i__1 = *lwork - (n1 << 1) * n2;
+ ctgsyl_("N", &ijb, &n1, &n2, &a[a_offset], lda, &a[i__ +
+ i__ * a_dim1], lda, &work[1], &n1, &b[b_offset],
+ ldb, &b[i__ + i__ * b_dim1], ldb, &work[n1 * n2 +
+ 1], &n1, &dscale, &dif[1], &work[(n1 * n2 << 1) +
+ 1], &i__1, &iwork[1], &ierr);
+ } else {
+
+/* Solve the transposed variant. */
+
+ i__1 = *lwork - (n1 << 1) * n2;
+ ctgsyl_("C", &ijb, &n1, &n2, &a[a_offset], lda, &a[i__ +
+ i__ * a_dim1], lda, &work[1], &n1, &b[b_offset],
+ ldb, &b[i__ + i__ * b_dim1], ldb, &work[n1 * n2 +
+ 1], &n1, &dscale, &dif[1], &work[(n1 * n2 << 1) +
+ 1], &i__1, &iwork[1], &ierr);
+ }
+ goto L40;
+ }
+ dif[1] = dscale / dif[1];
+
+/* 1-norm-based estimate of Difl. */
+
+L50:
+ clacn2_(&mn2, &work[mn2 + 1], &work[1], &dif[2], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Solve generalized Sylvester equation */
+
+ i__1 = *lwork - (n1 << 1) * n2;
+ ctgsyl_("N", &ijb, &n2, &n1, &a[i__ + i__ * a_dim1], lda,
+ &a[a_offset], lda, &work[1], &n2, &b[i__ + i__ *
+ b_dim1], ldb, &b[b_offset], ldb, &work[n1 * n2 +
+ 1], &n2, &dscale, &dif[2], &work[(n1 * n2 << 1) +
+ 1], &i__1, &iwork[1], &ierr);
+ } else {
+
+/* Solve the transposed variant. */
+
+ i__1 = *lwork - (n1 << 1) * n2;
+ ctgsyl_("C", &ijb, &n2, &n1, &a[i__ + i__ * a_dim1], lda,
+ &a[a_offset], lda, &work[1], &n2, &b[b_offset],
+ ldb, &b[i__ + i__ * b_dim1], ldb, &work[n1 * n2 +
+ 1], &n2, &dscale, &dif[2], &work[(n1 * n2 << 1) +
+ 1], &i__1, &iwork[1], &ierr);
+ }
+ goto L50;
+ }
+ dif[2] = dscale / dif[2];
+ }
+ }
+
+/* If B(K,K) is complex, make it real and positive (normalization */
+/* of the generalized Schur form) and Store the generalized */
+/* eigenvalues of reordered pair (A, B) */
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ dscale = c_abs(&b[k + k * b_dim1]);
+ if (dscale > safmin) {
+ i__2 = k + k * b_dim1;
+ q__2.r = b[i__2].r / dscale, q__2.i = b[i__2].i / dscale;
+ r_cnjg(&q__1, &q__2);
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ i__2 = k + k * b_dim1;
+ q__1.r = b[i__2].r / dscale, q__1.i = b[i__2].i / dscale;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ i__2 = k + k * b_dim1;
+ b[i__2].r = dscale, b[i__2].i = 0.f;
+ i__2 = *n - k;
+ cscal_(&i__2, &temp1, &b[k + (k + 1) * b_dim1], ldb);
+ i__2 = *n - k + 1;
+ cscal_(&i__2, &temp1, &a[k + k * a_dim1], lda);
+ if (*wantq) {
+ cscal_(n, &temp2, &q[k * q_dim1 + 1], &c__1);
+ }
+ } else {
+ i__2 = k + k * b_dim1;
+ b[i__2].r = 0.f, b[i__2].i = 0.f;
+ }
+
+ i__2 = k;
+ i__3 = k + k * a_dim1;
+ alpha[i__2].r = a[i__3].r, alpha[i__2].i = a[i__3].i;
+ i__2 = k;
+ i__3 = k + k * b_dim1;
+ beta[i__2].r = b[i__3].r, beta[i__2].i = b[i__3].i;
+
+/* L60: */
+ }
+
+L70:
+
+ work[1].r = (real) lwmin, work[1].i = 0.f;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of CTGSEN */
+
+} /* ctgsen_ */
diff --git a/contrib/libs/clapack/ctgsja.c b/contrib/libs/clapack/ctgsja.c
new file mode 100644
index 0000000000..04174e8862
--- /dev/null
+++ b/contrib/libs/clapack/ctgsja.c
@@ -0,0 +1,671 @@
+/* ctgsja.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+static real c_b39 = -1.f;
+static real c_b42 = 1.f;
+
+/* Subroutine */ int ctgsja_(char *jobu, char *jobv, char *jobq, integer *m,
+ integer *p, integer *n, integer *k, integer *l, complex *a, integer *
+ lda, complex *b, integer *ldb, real *tola, real *tolb, real *alpha,
+ real *beta, complex *u, integer *ldu, complex *v, integer *ldv,
+ complex *q, integer *ldq, complex *work, integer *ncycle, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, u_dim1,
+ u_offset, v_dim1, v_offset, i__1, i__2, i__3, i__4;
+ real r__1;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j;
+ real a1, b1, a3, b3;
+ complex a2, b2;
+ real csq, csu, csv;
+ complex snq;
+ real rwk;
+ complex snu, snv;
+ extern /* Subroutine */ int crot_(integer *, complex *, integer *,
+ complex *, integer *, real *, complex *);
+ real gamma;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *);
+ logical initq, initu, initv, wantq, upper;
+ real error, ssmin;
+ logical wantu, wantv;
+ extern /* Subroutine */ int clags2_(logical *, real *, complex *, real *,
+ real *, complex *, real *, real *, complex *, real *, complex *,
+ real *, complex *), clapll_(integer *, complex *, integer *,
+ complex *, integer *, real *), csscal_(integer *, real *, complex
+ *, integer *);
+ integer kcycle;
+ extern /* Subroutine */ int claset_(char *, integer *, integer *, complex
+ *, complex *, complex *, integer *), xerbla_(char *,
+ integer *), slartg_(real *, real *, real *, real *, real *
+);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTGSJA computes the generalized singular value decomposition (GSVD) */
+/* of two complex upper triangular (or trapezoidal) matrices A and B. */
+
+/* On entry, it is assumed that matrices A and B have the following */
+/* forms, which may be obtained by the preprocessing subroutine CGGSVP */
+/* from a general M-by-N matrix A and P-by-N matrix B: */
+
+/* N-K-L K L */
+/* A = K ( 0 A12 A13 ) if M-K-L >= 0; */
+/* L ( 0 0 A23 ) */
+/* M-K-L ( 0 0 0 ) */
+
+/* N-K-L K L */
+/* A = K ( 0 A12 A13 ) if M-K-L < 0; */
+/* M-K ( 0 0 A23 ) */
+
+/* N-K-L K L */
+/* B = L ( 0 0 B13 ) */
+/* P-L ( 0 0 0 ) */
+
+/* where the K-by-K matrix A12 and L-by-L matrix B13 are nonsingular */
+/* upper triangular; A23 is L-by-L upper triangular if M-K-L >= 0, */
+/* otherwise A23 is (M-K)-by-L upper trapezoidal. */
+
+/* On exit, */
+
+/* U'*A*Q = D1*( 0 R ), V'*B*Q = D2*( 0 R ), */
+
+/* where U, V and Q are unitary matrices, Z' denotes the conjugate */
+/* transpose of Z, R is a nonsingular upper triangular matrix, and D1 */
+/* and D2 are ``diagonal'' matrices, which are of the following */
+/* structures: */
+
+/* If M-K-L >= 0, */
+
+/* K L */
+/* D1 = K ( I 0 ) */
+/* L ( 0 C ) */
+/* M-K-L ( 0 0 ) */
+
+/* K L */
+/* D2 = L ( 0 S ) */
+/* P-L ( 0 0 ) */
+
+/* N-K-L K L */
+/* ( 0 R ) = K ( 0 R11 R12 ) K */
+/* L ( 0 0 R22 ) L */
+
+/* where */
+
+/* C = diag( ALPHA(K+1), ... , ALPHA(K+L) ), */
+/* S = diag( BETA(K+1), ... , BETA(K+L) ), */
+/* C**2 + S**2 = I. */
+
+/* R is stored in A(1:K+L,N-K-L+1:N) on exit. */
+
+/* If M-K-L < 0, */
+
+/* K M-K K+L-M */
+/* D1 = K ( I 0 0 ) */
+/* M-K ( 0 C 0 ) */
+
+/* K M-K K+L-M */
+/* D2 = M-K ( 0 S 0 ) */
+/* K+L-M ( 0 0 I ) */
+/* P-L ( 0 0 0 ) */
+
+/* N-K-L K M-K K+L-M */
+/* ( 0 R ) = K ( 0 R11 R12 R13 ) */
+/* M-K ( 0 0 R22 R23 ) */
+/* K+L-M ( 0 0 0 R33 ) */
+
+/* where */
+/* C = diag( ALPHA(K+1), ... , ALPHA(M) ), */
+/* S = diag( BETA(K+1), ... , BETA(M) ), */
+/* C**2 + S**2 = I. */
+
+/* R = ( R11 R12 R13 ) is stored in A(1:M, N-K-L+1:N) and R33 is stored */
+/* ( 0 R22 R23 ) */
+/* in B(M-K+1:L,N+M-K-L+1:N) on exit. */
+
+/* The computation of the unitary transformation matrices U, V or Q */
+/* is optional. These matrices may either be formed explicitly, or they */
+/* may be postmultiplied into input matrices U1, V1, or Q1. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBU (input) CHARACTER*1 */
+/* = 'U': U must contain a unitary matrix U1 on entry, and */
+/* the product U1*U is returned; */
+/* = 'I': U is initialized to the unit matrix, and the */
+/* unitary matrix U is returned; */
+/* = 'N': U is not computed. */
+
+/* JOBV (input) CHARACTER*1 */
+/* = 'V': V must contain a unitary matrix V1 on entry, and */
+/* the product V1*V is returned; */
+/* = 'I': V is initialized to the unit matrix, and the */
+/* unitary matrix V is returned; */
+/* = 'N': V is not computed. */
+
+/* JOBQ (input) CHARACTER*1 */
+/* = 'Q': Q must contain a unitary matrix Q1 on entry, and */
+/* the product Q1*Q is returned; */
+/* = 'I': Q is initialized to the unit matrix, and the */
+/* unitary matrix Q is returned; */
+/* = 'N': Q is not computed. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* P (input) INTEGER */
+/* The number of rows of the matrix B. P >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrices A and B. N >= 0. */
+
+/* K (input) INTEGER */
+/* L (input) INTEGER */
+/* K and L specify the subblocks in the input matrices A and B: */
+/* A23 = A(K+1:MIN(K+L,M),N-L+1:N) and B13 = B(1:L,,N-L+1:N) */
+/* of A and B, whose GSVD is going to be computed by CTGSJA. */
+/* See Further details. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, A(N-K+1:N,1:MIN(K+L,M) ) contains the triangular */
+/* matrix R or part of R. See Purpose for details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) COMPLEX array, dimension (LDB,N) */
+/* On entry, the P-by-N matrix B. */
+/* On exit, if necessary, B(M-K+1:L,N+M-K-L+1:N) contains */
+/* a part of R. See Purpose for details. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,P). */
+
+/* TOLA (input) REAL */
+/* TOLB (input) REAL */
+/* TOLA and TOLB are the convergence criteria for the Jacobi- */
+/* Kogbetliantz iteration procedure. Generally, they are the */
+/* same as used in the preprocessing step, say */
+/* TOLA = MAX(M,N)*norm(A)*MACHEPS, */
+/* TOLB = MAX(P,N)*norm(B)*MACHEPS. */
+
+/* ALPHA (output) REAL array, dimension (N) */
+/* BETA (output) REAL array, dimension (N) */
+/* On exit, ALPHA and BETA contain the generalized singular */
+/* value pairs of A and B; */
+/* ALPHA(1:K) = 1, */
+/* BETA(1:K) = 0, */
+/* and if M-K-L >= 0, */
+/* ALPHA(K+1:K+L) = diag(C), */
+/* BETA(K+1:K+L) = diag(S), */
+/* or if M-K-L < 0, */
+/* ALPHA(K+1:M)= C, ALPHA(M+1:K+L)= 0 */
+/* BETA(K+1:M) = S, BETA(M+1:K+L) = 1. */
+/* Furthermore, if K+L < N, */
+/* ALPHA(K+L+1:N) = 0 */
+/* BETA(K+L+1:N) = 0. */
+
+/* U (input/output) COMPLEX array, dimension (LDU,M) */
+/* On entry, if JOBU = 'U', U must contain a matrix U1 (usually */
+/* the unitary matrix returned by CGGSVP). */
+/* On exit, */
+/* if JOBU = 'I', U contains the unitary matrix U; */
+/* if JOBU = 'U', U contains the product U1*U. */
+/* If JOBU = 'N', U is not referenced. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= max(1,M) if */
+/* JOBU = 'U'; LDU >= 1 otherwise. */
+
+/* V (input/output) COMPLEX array, dimension (LDV,P) */
+/* On entry, if JOBV = 'V', V must contain a matrix V1 (usually */
+/* the unitary matrix returned by CGGSVP). */
+/* On exit, */
+/* if JOBV = 'I', V contains the unitary matrix V; */
+/* if JOBV = 'V', V contains the product V1*V. */
+/* If JOBV = 'N', V is not referenced. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. LDV >= max(1,P) if */
+/* JOBV = 'V'; LDV >= 1 otherwise. */
+
+/* Q (input/output) COMPLEX array, dimension (LDQ,N) */
+/* On entry, if JOBQ = 'Q', Q must contain a matrix Q1 (usually */
+/* the unitary matrix returned by CGGSVP). */
+/* On exit, */
+/* if JOBQ = 'I', Q contains the unitary matrix Q; */
+/* if JOBQ = 'Q', Q contains the product Q1*Q. */
+/* If JOBQ = 'N', Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N) if */
+/* JOBQ = 'Q'; LDQ >= 1 otherwise. */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* NCYCLE (output) INTEGER */
+/* The number of cycles required for convergence. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1: the procedure does not converge after MAXIT cycles. */
+
+/* Internal Parameters */
+/* =================== */
+
+/* MAXIT INTEGER */
+/* MAXIT specifies the total loops that the iterative procedure */
+/* may take. If after MAXIT cycles, the routine fails to */
+/* converge, we return INFO = 1. */
+
+/* Further Details */
+/* =============== */
+
+/* CTGSJA essentially uses a variant of Kogbetliantz algorithm to reduce */
+/* min(L,M-K)-by-L triangular (or trapezoidal) matrix A23 and L-by-L */
+/* matrix B13 to the form: */
+
+/* U1'*A13*Q1 = C1*R1; V1'*B13*Q1 = S1*R1, */
+
+/* where U1, V1 and Q1 are unitary matrix, and Z' is the conjugate */
+/* transpose of Z. C1 and S1 are diagonal matrices satisfying */
+
+/* C1**2 + S1**2 = I, */
+
+/* and R1 is an L-by-L nonsingular upper triangular matrix. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alpha;
+ --beta;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --work;
+
+ /* Function Body */
+ initu = lsame_(jobu, "I");
+ wantu = initu || lsame_(jobu, "U");
+
+ initv = lsame_(jobv, "I");
+ wantv = initv || lsame_(jobv, "V");
+
+ initq = lsame_(jobq, "I");
+ wantq = initq || lsame_(jobq, "Q");
+
+ *info = 0;
+ if (! (initu || wantu || lsame_(jobu, "N"))) {
+ *info = -1;
+ } else if (! (initv || wantv || lsame_(jobv, "N")))
+ {
+ *info = -2;
+ } else if (! (initq || wantq || lsame_(jobq, "N")))
+ {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*p < 0) {
+ *info = -5;
+ } else if (*n < 0) {
+ *info = -6;
+ } else if (*lda < max(1,*m)) {
+ *info = -10;
+ } else if (*ldb < max(1,*p)) {
+ *info = -12;
+ } else if (*ldu < 1 || wantu && *ldu < *m) {
+ *info = -18;
+ } else if (*ldv < 1 || wantv && *ldv < *p) {
+ *info = -20;
+ } else if (*ldq < 1 || wantq && *ldq < *n) {
+ *info = -22;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTGSJA", &i__1);
+ return 0;
+ }
+
+/* Initialize U, V and Q, if necessary */
+
+ if (initu) {
+ claset_("Full", m, m, &c_b1, &c_b2, &u[u_offset], ldu);
+ }
+ if (initv) {
+ claset_("Full", p, p, &c_b1, &c_b2, &v[v_offset], ldv);
+ }
+ if (initq) {
+ claset_("Full", n, n, &c_b1, &c_b2, &q[q_offset], ldq);
+ }
+
+/* Loop until convergence */
+
+ upper = FALSE_;
+ for (kcycle = 1; kcycle <= 40; ++kcycle) {
+
+ upper = ! upper;
+
+ i__1 = *l - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *l;
+ for (j = i__ + 1; j <= i__2; ++j) {
+
+ a1 = 0.f;
+ a2.r = 0.f, a2.i = 0.f;
+ a3 = 0.f;
+ if (*k + i__ <= *m) {
+ i__3 = *k + i__ + (*n - *l + i__) * a_dim1;
+ a1 = a[i__3].r;
+ }
+ if (*k + j <= *m) {
+ i__3 = *k + j + (*n - *l + j) * a_dim1;
+ a3 = a[i__3].r;
+ }
+
+ i__3 = i__ + (*n - *l + i__) * b_dim1;
+ b1 = b[i__3].r;
+ i__3 = j + (*n - *l + j) * b_dim1;
+ b3 = b[i__3].r;
+
+ if (upper) {
+ if (*k + i__ <= *m) {
+ i__3 = *k + i__ + (*n - *l + j) * a_dim1;
+ a2.r = a[i__3].r, a2.i = a[i__3].i;
+ }
+ i__3 = i__ + (*n - *l + j) * b_dim1;
+ b2.r = b[i__3].r, b2.i = b[i__3].i;
+ } else {
+ if (*k + j <= *m) {
+ i__3 = *k + j + (*n - *l + i__) * a_dim1;
+ a2.r = a[i__3].r, a2.i = a[i__3].i;
+ }
+ i__3 = j + (*n - *l + i__) * b_dim1;
+ b2.r = b[i__3].r, b2.i = b[i__3].i;
+ }
+
+ clags2_(&upper, &a1, &a2, &a3, &b1, &b2, &b3, &csu, &snu, &
+ csv, &snv, &csq, &snq);
+
+/* Update (K+I)-th and (K+J)-th rows of matrix A: U'*A */
+
+ if (*k + j <= *m) {
+ r_cnjg(&q__1, &snu);
+ crot_(l, &a[*k + j + (*n - *l + 1) * a_dim1], lda, &a[*k
+ + i__ + (*n - *l + 1) * a_dim1], lda, &csu, &q__1)
+ ;
+ }
+
+/* Update I-th and J-th rows of matrix B: V'*B */
+
+ r_cnjg(&q__1, &snv);
+ crot_(l, &b[j + (*n - *l + 1) * b_dim1], ldb, &b[i__ + (*n - *
+ l + 1) * b_dim1], ldb, &csv, &q__1);
+
+/* Update (N-L+I)-th and (N-L+J)-th columns of matrices */
+/* A and B: A*Q and B*Q */
+
+/* Computing MIN */
+ i__4 = *k + *l;
+ i__3 = min(i__4,*m);
+ crot_(&i__3, &a[(*n - *l + j) * a_dim1 + 1], &c__1, &a[(*n - *
+ l + i__) * a_dim1 + 1], &c__1, &csq, &snq);
+
+ crot_(l, &b[(*n - *l + j) * b_dim1 + 1], &c__1, &b[(*n - *l +
+ i__) * b_dim1 + 1], &c__1, &csq, &snq);
+
+ if (upper) {
+ if (*k + i__ <= *m) {
+ i__3 = *k + i__ + (*n - *l + j) * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+ }
+ i__3 = i__ + (*n - *l + j) * b_dim1;
+ b[i__3].r = 0.f, b[i__3].i = 0.f;
+ } else {
+ if (*k + j <= *m) {
+ i__3 = *k + j + (*n - *l + i__) * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+ }
+ i__3 = j + (*n - *l + i__) * b_dim1;
+ b[i__3].r = 0.f, b[i__3].i = 0.f;
+ }
+
+/* Ensure that the diagonal elements of A and B are real. */
+
+ if (*k + i__ <= *m) {
+ i__3 = *k + i__ + (*n - *l + i__) * a_dim1;
+ i__4 = *k + i__ + (*n - *l + i__) * a_dim1;
+ r__1 = a[i__4].r;
+ a[i__3].r = r__1, a[i__3].i = 0.f;
+ }
+ if (*k + j <= *m) {
+ i__3 = *k + j + (*n - *l + j) * a_dim1;
+ i__4 = *k + j + (*n - *l + j) * a_dim1;
+ r__1 = a[i__4].r;
+ a[i__3].r = r__1, a[i__3].i = 0.f;
+ }
+ i__3 = i__ + (*n - *l + i__) * b_dim1;
+ i__4 = i__ + (*n - *l + i__) * b_dim1;
+ r__1 = b[i__4].r;
+ b[i__3].r = r__1, b[i__3].i = 0.f;
+ i__3 = j + (*n - *l + j) * b_dim1;
+ i__4 = j + (*n - *l + j) * b_dim1;
+ r__1 = b[i__4].r;
+ b[i__3].r = r__1, b[i__3].i = 0.f;
+
+/* Update unitary matrices U, V, Q, if desired. */
+
+ if (wantu && *k + j <= *m) {
+ crot_(m, &u[(*k + j) * u_dim1 + 1], &c__1, &u[(*k + i__) *
+ u_dim1 + 1], &c__1, &csu, &snu);
+ }
+
+ if (wantv) {
+ crot_(p, &v[j * v_dim1 + 1], &c__1, &v[i__ * v_dim1 + 1],
+ &c__1, &csv, &snv);
+ }
+
+ if (wantq) {
+ crot_(n, &q[(*n - *l + j) * q_dim1 + 1], &c__1, &q[(*n - *
+ l + i__) * q_dim1 + 1], &c__1, &csq, &snq);
+ }
+
+/* L10: */
+ }
+/* L20: */
+ }
+
+ if (! upper) {
+
+/* The matrices A13 and B13 were lower triangular at the start */
+/* of the cycle, and are now upper triangular. */
+
+/* Convergence test: test the parallelism of the corresponding */
+/* rows of A and B. */
+
+ error = 0.f;
+/* Computing MIN */
+ i__2 = *l, i__3 = *m - *k;
+ i__1 = min(i__2,i__3);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *l - i__ + 1;
+ ccopy_(&i__2, &a[*k + i__ + (*n - *l + i__) * a_dim1], lda, &
+ work[1], &c__1);
+ i__2 = *l - i__ + 1;
+ ccopy_(&i__2, &b[i__ + (*n - *l + i__) * b_dim1], ldb, &work[*
+ l + 1], &c__1);
+ i__2 = *l - i__ + 1;
+ clapll_(&i__2, &work[1], &c__1, &work[*l + 1], &c__1, &ssmin);
+ error = dmax(error,ssmin);
+/* L30: */
+ }
+
+ if (dabs(error) <= dmin(*tola,*tolb)) {
+ goto L50;
+ }
+ }
+
+/* End of cycle loop */
+
+/* L40: */
+ }
+
+/* The algorithm has not converged after MAXIT cycles. */
+
+ *info = 1;
+ goto L100;
+
+L50:
+
+/* If ERROR <= MIN(TOLA,TOLB), then the algorithm has converged. */
+/* Compute the generalized singular value pairs (ALPHA, BETA), and */
+/* set the triangular matrix R to array A. */
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ alpha[i__] = 1.f;
+ beta[i__] = 0.f;
+/* L60: */
+ }
+
+/* Computing MIN */
+ i__2 = *l, i__3 = *m - *k;
+ i__1 = min(i__2,i__3);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+ i__2 = *k + i__ + (*n - *l + i__) * a_dim1;
+ a1 = a[i__2].r;
+ i__2 = i__ + (*n - *l + i__) * b_dim1;
+ b1 = b[i__2].r;
+
+ if (a1 != 0.f) {
+ gamma = b1 / a1;
+
+ if (gamma < 0.f) {
+ i__2 = *l - i__ + 1;
+ csscal_(&i__2, &c_b39, &b[i__ + (*n - *l + i__) * b_dim1],
+ ldb);
+ if (wantv) {
+ csscal_(p, &c_b39, &v[i__ * v_dim1 + 1], &c__1);
+ }
+ }
+
+ r__1 = dabs(gamma);
+ slartg_(&r__1, &c_b42, &beta[*k + i__], &alpha[*k + i__], &rwk);
+
+ if (alpha[*k + i__] >= beta[*k + i__]) {
+ i__2 = *l - i__ + 1;
+ r__1 = 1.f / alpha[*k + i__];
+ csscal_(&i__2, &r__1, &a[*k + i__ + (*n - *l + i__) * a_dim1],
+ lda);
+ } else {
+ i__2 = *l - i__ + 1;
+ r__1 = 1.f / beta[*k + i__];
+ csscal_(&i__2, &r__1, &b[i__ + (*n - *l + i__) * b_dim1], ldb)
+ ;
+ i__2 = *l - i__ + 1;
+ ccopy_(&i__2, &b[i__ + (*n - *l + i__) * b_dim1], ldb, &a[*k
+ + i__ + (*n - *l + i__) * a_dim1], lda);
+ }
+
+ } else {
+ alpha[*k + i__] = 0.f;
+ beta[*k + i__] = 1.f;
+ i__2 = *l - i__ + 1;
+ ccopy_(&i__2, &b[i__ + (*n - *l + i__) * b_dim1], ldb, &a[*k +
+ i__ + (*n - *l + i__) * a_dim1], lda);
+ }
+/* L70: */
+ }
+
+/* Post-assignment */
+
+ i__1 = *k + *l;
+ for (i__ = *m + 1; i__ <= i__1; ++i__) {
+ alpha[i__] = 0.f;
+ beta[i__] = 1.f;
+/* L80: */
+ }
+
+ if (*k + *l < *n) {
+ i__1 = *n;
+ for (i__ = *k + *l + 1; i__ <= i__1; ++i__) {
+ alpha[i__] = 0.f;
+ beta[i__] = 0.f;
+/* L90: */
+ }
+ }
+
+L100:
+ *ncycle = kcycle;
+
+ return 0;
+
+/* End of CTGSJA */
+
+} /* ctgsja_ */
diff --git a/contrib/libs/clapack/ctgsna.c b/contrib/libs/clapack/ctgsna.c
new file mode 100644
index 0000000000..d235cdb719
--- /dev/null
+++ b/contrib/libs/clapack/ctgsna.c
@@ -0,0 +1,484 @@
+/* ctgsna.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static complex c_b19 = {1.f,0.f};
+static complex c_b20 = {0.f,0.f};
+static logical c_false = FALSE_;
+static integer c__3 = 3;
+
+/* Subroutine */ int ctgsna_(char *job, char *howmny, logical *select,
+ integer *n, complex *a, integer *lda, complex *b, integer *ldb,
+ complex *vl, integer *ldvl, complex *vr, integer *ldvr, real *s, real
+ *dif, integer *mm, integer *m, complex *work, integer *lwork, integer
+ *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vl_dim1, vl_offset, vr_dim1,
+ vr_offset, i__1;
+ real r__1, r__2;
+ complex q__1;
+
+ /* Builtin functions */
+ double c_abs(complex *);
+
+ /* Local variables */
+ integer i__, k, n1, n2, ks;
+ real eps, cond;
+ integer ierr, ifst;
+ real lnrm;
+ complex yhax, yhbx;
+ integer ilst;
+ real rnrm, scale;
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+, complex *, integer *, complex *, integer *, complex *, complex *
+, integer *);
+ integer lwmin;
+ logical wants;
+ complex dummy[1];
+ extern doublereal scnrm2_(integer *, complex *, integer *), slapy2_(real *
+, real *);
+ complex dummy1[1];
+ extern /* Subroutine */ int slabad_(real *, real *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), ctgexc_(logical *,
+ logical *, integer *, complex *, integer *, complex *, integer *,
+ complex *, integer *, complex *, integer *, integer *, integer *,
+ integer *), xerbla_(char *, integer *);
+ real bignum;
+ logical wantbh, wantdf, somcon;
+ extern /* Subroutine */ int ctgsyl_(char *, integer *, integer *, integer
+ *, complex *, integer *, complex *, integer *, complex *, integer
+ *, complex *, integer *, complex *, integer *, complex *, integer
+ *, real *, real *, complex *, integer *, integer *, integer *);
+ real smlnum;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTGSNA estimates reciprocal condition numbers for specified */
+/* eigenvalues and/or eigenvectors of a matrix pair (A, B). */
+
+/* (A, B) must be in generalized Schur canonical form, that is, A and */
+/* B are both upper triangular. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies whether condition numbers are required for */
+/* eigenvalues (S) or eigenvectors (DIF): */
+/* = 'E': for eigenvalues only (S); */
+/* = 'V': for eigenvectors only (DIF); */
+/* = 'B': for both eigenvalues and eigenvectors (S and DIF). */
+
+/* HOWMNY (input) CHARACTER*1 */
+/* = 'A': compute condition numbers for all eigenpairs; */
+/* = 'S': compute condition numbers for selected eigenpairs */
+/* specified by the array SELECT. */
+
+/* SELECT (input) LOGICAL array, dimension (N) */
+/* If HOWMNY = 'S', SELECT specifies the eigenpairs for which */
+/* condition numbers are required. To select condition numbers */
+/* for the corresponding j-th eigenvalue and/or eigenvector, */
+/* SELECT(j) must be set to .TRUE.. */
+/* If HOWMNY = 'A', SELECT is not referenced. */
+
+/* N (input) INTEGER */
+/* The order of the square matrix pair (A, B). N >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The upper triangular matrix A in the pair (A,B). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input) COMPLEX array, dimension (LDB,N) */
+/* The upper triangular matrix B in the pair (A, B). */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* VL (input) COMPLEX array, dimension (LDVL,M) */
+/* IF JOB = 'E' or 'B', VL must contain left eigenvectors of */
+/* (A, B), corresponding to the eigenpairs specified by HOWMNY */
+/* and SELECT. The eigenvectors must be stored in consecutive */
+/* columns of VL, as returned by CTGEVC. */
+/* If JOB = 'V', VL is not referenced. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the array VL. LDVL >= 1; and */
+/* If JOB = 'E' or 'B', LDVL >= N. */
+
+/* VR (input) COMPLEX array, dimension (LDVR,M) */
+/* IF JOB = 'E' or 'B', VR must contain right eigenvectors of */
+/* (A, B), corresponding to the eigenpairs specified by HOWMNY */
+/* and SELECT. The eigenvectors must be stored in consecutive */
+/* columns of VR, as returned by CTGEVC. */
+/* If JOB = 'V', VR is not referenced. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. LDVR >= 1; */
+/* If JOB = 'E' or 'B', LDVR >= N. */
+
+/* S (output) REAL array, dimension (MM) */
+/* If JOB = 'E' or 'B', the reciprocal condition numbers of the */
+/* selected eigenvalues, stored in consecutive elements of the */
+/* array. */
+/* If JOB = 'V', S is not referenced. */
+
+/* DIF (output) REAL array, dimension (MM) */
+/* If JOB = 'V' or 'B', the estimated reciprocal condition */
+/* numbers of the selected eigenvectors, stored in consecutive */
+/* elements of the array. */
+/* If the eigenvalues cannot be reordered to compute DIF(j), */
+/* DIF(j) is set to 0; this can only occur when the true value */
+/* would be very small anyway. */
+/* For each eigenvalue/vector specified by SELECT, DIF stores */
+/* a Frobenius norm-based estimate of Difl. */
+/* If JOB = 'E', DIF is not referenced. */
+
+/* MM (input) INTEGER */
+/* The number of elements in the arrays S and DIF. MM >= M. */
+
+/* M (output) INTEGER */
+/* The number of elements of the arrays S and DIF used to store */
+/* the specified condition numbers; for each selected eigenvalue */
+/* one element is used. If HOWMNY = 'A', M is set to N. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+/* If JOB = 'V' or 'B', LWORK >= max(1,2*N*N). */
+
+/* IWORK (workspace) INTEGER array, dimension (N+2) */
+/* If JOB = 'E', IWORK is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: Successful exit */
+/* < 0: If INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The reciprocal of the condition number of the i-th generalized */
+/* eigenvalue w = (a, b) is defined as */
+
+/* S(I) = (|v'Au|**2 + |v'Bu|**2)**(1/2) / (norm(u)*norm(v)) */
+
+/* where u and v are the right and left eigenvectors of (A, B) */
+/* corresponding to w; |z| denotes the absolute value of the complex */
+/* number, and norm(u) denotes the 2-norm of the vector u. The pair */
+/* (a, b) corresponds to an eigenvalue w = a/b (= v'Au/v'Bu) of the */
+/* matrix pair (A, B). If both a and b equal zero, then (A,B) is */
+/* singular and S(I) = -1 is returned. */
+
+/* An approximate error bound on the chordal distance between the i-th */
+/* computed generalized eigenvalue w and the corresponding exact */
+/* eigenvalue lambda is */
+
+/* chord(w, lambda) <= EPS * norm(A, B) / S(I), */
+
+/* where EPS is the machine precision. */
+
+/* The reciprocal of the condition number of the right eigenvector u */
+/* and left eigenvector v corresponding to the generalized eigenvalue w */
+/* is defined as follows. Suppose */
+
+/* (A, B) = ( a * ) ( b * ) 1 */
+/* ( 0 A22 ),( 0 B22 ) n-1 */
+/* 1 n-1 1 n-1 */
+
+/* Then the reciprocal condition number DIF(I) is */
+
+/* Difl[(a, b), (A22, B22)] = sigma-min( Zl ) */
+
+/* where sigma-min(Zl) denotes the smallest singular value of */
+
+/* Zl = [ kron(a, In-1) -kron(1, A22) ] */
+/* [ kron(b, In-1) -kron(1, B22) ]. */
+
+/* Here In-1 is the identity matrix of size n-1 and X' is the conjugate */
+/* transpose of X. kron(X, Y) is the Kronecker product between the */
+/* matrices X and Y. */
+
+/* We approximate the smallest singular value of Zl with an upper */
+/* bound. This is done by CLATDF. */
+
+/* An approximate error bound for a computed eigenvector VL(i) or */
+/* VR(i) is given by */
+
+/* EPS * norm(A, B) / DIF(i). */
+
+/* See ref. [2-3] for more details and further references. */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* References */
+/* ========== */
+
+/* [1] B. Kagstrom; A Direct Method for Reordering Eigenvalues in the */
+/* Generalized Real Schur Form of a Regular Matrix Pair (A, B), in */
+/* M.S. Moonen et al (eds), Linear Algebra for Large Scale and */
+/* Real-Time Applications, Kluwer Academic Publ. 1993, pp 195-218. */
+
+/* [2] B. Kagstrom and P. Poromaa; Computing Eigenspaces with Specified */
+/* Eigenvalues of a Regular Matrix Pair (A, B) and Condition */
+/* Estimation: Theory, Algorithms and Software, Report */
+/* UMINF - 94.04, Department of Computing Science, Umea University, */
+/* S-901 87 Umea, Sweden, 1994. Also as LAPACK Working Note 87. */
+/* To appear in Numerical Algorithms, 1996. */
+
+/* [3] B. Kagstrom and P. Poromaa, LAPACK-Style Algorithms and Software */
+/* for Solving the Generalized Sylvester Equation and Estimating the */
+/* Separation between Regular Matrix Pairs, Report UMINF - 93.23, */
+/* Department of Computing Science, Umea University, S-901 87 Umea, */
+/* Sweden, December 1993, Revised April 1994, Also as LAPACK Working */
+/* Note 75. */
+/* To appear in ACM Trans. on Math. Software, Vol 22, No 1, 1996. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters */
+
+ /* Parameter adjustments */
+ --select;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --s;
+ --dif;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantbh = lsame_(job, "B");
+ wants = lsame_(job, "E") || wantbh;
+ wantdf = lsame_(job, "V") || wantbh;
+
+ somcon = lsame_(howmny, "S");
+
+ *info = 0;
+ lquery = *lwork == -1;
+
+ if (! wants && ! wantdf) {
+ *info = -1;
+ } else if (! lsame_(howmny, "A") && ! somcon) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (wants && *ldvl < *n) {
+ *info = -10;
+ } else if (wants && *ldvr < *n) {
+ *info = -12;
+ } else {
+
+/* Set M to the number of eigenpairs for which condition numbers */
+/* are required, and test MM. */
+
+ if (somcon) {
+ *m = 0;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (select[k]) {
+ ++(*m);
+ }
+/* L10: */
+ }
+ } else {
+ *m = *n;
+ }
+
+ if (*n == 0) {
+ lwmin = 1;
+ } else if (lsame_(job, "V") || lsame_(job,
+ "B")) {
+ lwmin = (*n << 1) * *n;
+ } else {
+ lwmin = *n;
+ }
+ work[1].r = (real) lwmin, work[1].i = 0.f;
+
+ if (*mm < *m) {
+ *info = -15;
+ } else if (*lwork < lwmin && ! lquery) {
+ *info = -18;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTGSNA", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S") / eps;
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+ ks = 0;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+
+/* Determine whether condition numbers are required for the k-th */
+/* eigenpair. */
+
+ if (somcon) {
+ if (! select[k]) {
+ goto L20;
+ }
+ }
+
+ ++ks;
+
+ if (wants) {
+
+/* Compute the reciprocal condition number of the k-th */
+/* eigenvalue. */
+
+ rnrm = scnrm2_(n, &vr[ks * vr_dim1 + 1], &c__1);
+ lnrm = scnrm2_(n, &vl[ks * vl_dim1 + 1], &c__1);
+ cgemv_("N", n, n, &c_b19, &a[a_offset], lda, &vr[ks * vr_dim1 + 1]
+, &c__1, &c_b20, &work[1], &c__1);
+ cdotc_(&q__1, n, &work[1], &c__1, &vl[ks * vl_dim1 + 1], &c__1);
+ yhax.r = q__1.r, yhax.i = q__1.i;
+ cgemv_("N", n, n, &c_b19, &b[b_offset], ldb, &vr[ks * vr_dim1 + 1]
+, &c__1, &c_b20, &work[1], &c__1);
+ cdotc_(&q__1, n, &work[1], &c__1, &vl[ks * vl_dim1 + 1], &c__1);
+ yhbx.r = q__1.r, yhbx.i = q__1.i;
+ r__1 = c_abs(&yhax);
+ r__2 = c_abs(&yhbx);
+ cond = slapy2_(&r__1, &r__2);
+ if (cond == 0.f) {
+ s[ks] = -1.f;
+ } else {
+ s[ks] = cond / (rnrm * lnrm);
+ }
+ }
+
+ if (wantdf) {
+ if (*n == 1) {
+ r__1 = c_abs(&a[a_dim1 + 1]);
+ r__2 = c_abs(&b[b_dim1 + 1]);
+ dif[ks] = slapy2_(&r__1, &r__2);
+ } else {
+
+/* Estimate the reciprocal condition number of the k-th */
+/* eigenvectors. */
+
+/* Copy the matrix (A, B) to the array WORK and move the */
+/* (k,k)th pair to the (1,1) position. */
+
+ clacpy_("Full", n, n, &a[a_offset], lda, &work[1], n);
+ clacpy_("Full", n, n, &b[b_offset], ldb, &work[*n * *n + 1],
+ n);
+ ifst = k;
+ ilst = 1;
+
+ ctgexc_(&c_false, &c_false, n, &work[1], n, &work[*n * *n + 1]
+, n, dummy, &c__1, dummy1, &c__1, &ifst, &ilst, &ierr)
+ ;
+
+ if (ierr > 0) {
+
+/* Ill-conditioned problem - swap rejected. */
+
+ dif[ks] = 0.f;
+ } else {
+
+/* Reordering successful, solve generalized Sylvester */
+/* equation for R and L, */
+/* A22 * R - L * A11 = A12 */
+/* B22 * R - L * B11 = B12, */
+/* and compute estimate of Difl[(A11,B11), (A22, B22)]. */
+
+ n1 = 1;
+ n2 = *n - n1;
+ i__ = *n * *n + 1;
+ ctgsyl_("N", &c__3, &n2, &n1, &work[*n * n1 + n1 + 1], n,
+ &work[1], n, &work[n1 + 1], n, &work[*n * n1 + n1
+ + i__], n, &work[i__], n, &work[n1 + i__], n, &
+ scale, &dif[ks], dummy, &c__1, &iwork[1], &ierr);
+ }
+ }
+ }
+
+L20:
+ ;
+ }
+ work[1].r = (real) lwmin, work[1].i = 0.f;
+ return 0;
+
+/* End of CTGSNA */
+
+} /* ctgsna_ */
diff --git a/contrib/libs/clapack/ctgsy2.c b/contrib/libs/clapack/ctgsy2.c
new file mode 100644
index 0000000000..055e2ed036
--- /dev/null
+++ b/contrib/libs/clapack/ctgsy2.c
@@ -0,0 +1,477 @@
+/* ctgsy2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__2 = 2;
+static integer c__1 = 1;
+
+/* Subroutine */ int ctgsy2_(char *trans, integer *ijob, integer *m, integer *
+ n, complex *a, integer *lda, complex *b, integer *ldb, complex *c__,
+ integer *ldc, complex *d__, integer *ldd, complex *e, integer *lde,
+ complex *f, integer *ldf, real *scale, real *rdsum, real *rdscal,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, d_dim1,
+ d_offset, e_dim1, e_offset, f_dim1, f_offset, i__1, i__2, i__3,
+ i__4;
+ complex q__1, q__2, q__3, q__4, q__5, q__6;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ complex z__[4] /* was [2][2] */, rhs[2];
+ integer ierr, ipiv[2], jpiv[2];
+ complex alpha;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int caxpy_(integer *, complex *, complex *,
+ integer *, complex *, integer *), cgesc2_(integer *, complex *,
+ integer *, complex *, integer *, integer *, real *), cgetc2_(
+ integer *, complex *, integer *, integer *, integer *, integer *),
+ clatdf_(integer *, integer *, complex *, integer *, complex *,
+ real *, real *, integer *, integer *);
+ real scaloc;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTGSY2 solves the generalized Sylvester equation */
+
+/* A * R - L * B = scale * C (1) */
+/* D * R - L * E = scale * F */
+
+/* using Level 1 and 2 BLAS, where R and L are unknown M-by-N matrices, */
+/* (A, D), (B, E) and (C, F) are given matrix pairs of size M-by-M, */
+/* N-by-N and M-by-N, respectively. A, B, D and E are upper triangular */
+/* (i.e., (A,D) and (B,E) in generalized Schur form). */
+
+/* The solution (R, L) overwrites (C, F). 0 <= SCALE <= 1 is an output */
+/* scaling factor chosen to avoid overflow. */
+
+/* In matrix notation solving equation (1) corresponds to solve */
+/* Zx = scale * b, where Z is defined as */
+
+/* Z = [ kron(In, A) -kron(B', Im) ] (2) */
+/* [ kron(In, D) -kron(E', Im) ], */
+
+/* Ik is the identity matrix of size k and X' is the transpose of X. */
+/* kron(X, Y) is the Kronecker product between the matrices X and Y. */
+
+/* If TRANS = 'C', y in the conjugate transposed system Z'y = scale*b */
+/* is solved for, which is equivalent to solve for R and L in */
+
+/* A' * R + D' * L = scale * C (3) */
+/* R * B' + L * E' = scale * -F */
+
+/* This case is used to compute an estimate of Dif[(A, D), (B, E)] = */
+/* = sigma_min(Z) using reverse communicaton with CLACON. */
+
+/* CTGSY2 also (IJOB >= 1) contributes to the computation in CTGSYL */
+/* of an upper bound on the separation between to matrix pairs. Then */
+/* the input (A, D), (B, E) are sub-pencils of two matrix pairs in */
+/* CTGSYL. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N', solve the generalized Sylvester equation (1). */
+/* = 'T': solve the 'transposed' system (3). */
+
+/* IJOB (input) INTEGER */
+/* Specifies what kind of functionality to be performed. */
+/* =0: solve (1) only. */
+/* =1: A contribution from this subsystem to a Frobenius */
+/* norm-based estimate of the separation between two matrix */
+/* pairs is computed. (look ahead strategy is used). */
+/* =2: A contribution from this subsystem to a Frobenius */
+/* norm-based estimate of the separation between two matrix */
+/* pairs is computed. (SGECON on sub-systems is used.) */
+/* Not referenced if TRANS = 'T'. */
+
+/* M (input) INTEGER */
+/* On entry, M specifies the order of A and D, and the row */
+/* dimension of C, F, R and L. */
+
+/* N (input) INTEGER */
+/* On entry, N specifies the order of B and E, and the column */
+/* dimension of C, F, R and L. */
+
+/* A (input) COMPLEX array, dimension (LDA, M) */
+/* On entry, A contains an upper triangular matrix. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the matrix A. LDA >= max(1, M). */
+
+/* B (input) COMPLEX array, dimension (LDB, N) */
+/* On entry, B contains an upper triangular matrix. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the matrix B. LDB >= max(1, N). */
+
+/* C (input/output) COMPLEX array, dimension (LDC, N) */
+/* On entry, C contains the right-hand-side of the first matrix */
+/* equation in (1). */
+/* On exit, if IJOB = 0, C has been overwritten by the solution */
+/* R. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the matrix C. LDC >= max(1, M). */
+
+/* D (input) COMPLEX array, dimension (LDD, M) */
+/* On entry, D contains an upper triangular matrix. */
+
+/* LDD (input) INTEGER */
+/* The leading dimension of the matrix D. LDD >= max(1, M). */
+
+/* E (input) COMPLEX array, dimension (LDE, N) */
+/* On entry, E contains an upper triangular matrix. */
+
+/* LDE (input) INTEGER */
+/* The leading dimension of the matrix E. LDE >= max(1, N). */
+
+/* F (input/output) COMPLEX array, dimension (LDF, N) */
+/* On entry, F contains the right-hand-side of the second matrix */
+/* equation in (1). */
+/* On exit, if IJOB = 0, F has been overwritten by the solution */
+/* L. */
+
+/* LDF (input) INTEGER */
+/* The leading dimension of the matrix F. LDF >= max(1, M). */
+
+/* SCALE (output) REAL */
+/* On exit, 0 <= SCALE <= 1. If 0 < SCALE < 1, the solutions */
+/* R and L (C and F on entry) will hold the solutions to a */
+/* slightly perturbed system but the input matrices A, B, D and */
+/* E have not been changed. If SCALE = 0, R and L will hold the */
+/* solutions to the homogeneous system with C = F = 0. */
+/* Normally, SCALE = 1. */
+
+/* RDSUM (input/output) REAL */
+/* On entry, the sum of squares of computed contributions to */
+/* the Dif-estimate under computation by CTGSYL, where the */
+/* scaling factor RDSCAL (see below) has been factored out. */
+/* On exit, the corresponding sum of squares updated with the */
+/* contributions from the current sub-system. */
+/* If TRANS = 'T' RDSUM is not touched. */
+/* NOTE: RDSUM only makes sense when CTGSY2 is called by */
+/* CTGSYL. */
+
+/* RDSCAL (input/output) REAL */
+/* On entry, scaling factor used to prevent overflow in RDSUM. */
+/* On exit, RDSCAL is updated w.r.t. the current contributions */
+/* in RDSUM. */
+/* If TRANS = 'T', RDSCAL is not touched. */
+/* NOTE: RDSCAL only makes sense when CTGSY2 is called by */
+/* CTGSYL. */
+
+/* INFO (output) INTEGER */
+/* On exit, if INFO is set to */
+/* =0: Successful exit */
+/* <0: If INFO = -i, input argument number i is illegal. */
+/* >0: The matrix pairs (A, D) and (B, E) have common or very */
+/* close eigenvalues. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ d_dim1 = *ldd;
+ d_offset = 1 + d_dim1;
+ d__ -= d_offset;
+ e_dim1 = *lde;
+ e_offset = 1 + e_dim1;
+ e -= e_offset;
+ f_dim1 = *ldf;
+ f_offset = 1 + f_dim1;
+ f -= f_offset;
+
+ /* Function Body */
+ *info = 0;
+ ierr = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "C")) {
+ *info = -1;
+ } else if (notran) {
+ if (*ijob < 0 || *ijob > 2) {
+ *info = -2;
+ }
+ }
+ if (*info == 0) {
+ if (*m <= 0) {
+ *info = -3;
+ } else if (*n <= 0) {
+ *info = -4;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*ldd < max(1,*m)) {
+ *info = -12;
+ } else if (*lde < max(1,*n)) {
+ *info = -14;
+ } else if (*ldf < max(1,*m)) {
+ *info = -16;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTGSY2", &i__1);
+ return 0;
+ }
+
+ if (notran) {
+
+/* Solve (I, J) - system */
+/* A(I, I) * R(I, J) - L(I, J) * B(J, J) = C(I, J) */
+/* D(I, I) * R(I, J) - L(I, J) * E(J, J) = F(I, J) */
+/* for I = M, M - 1, ..., 1; J = 1, 2, ..., N */
+
+ *scale = 1.f;
+ scaloc = 1.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+
+/* Build 2 by 2 system */
+
+ i__2 = i__ + i__ * a_dim1;
+ z__[0].r = a[i__2].r, z__[0].i = a[i__2].i;
+ i__2 = i__ + i__ * d_dim1;
+ z__[1].r = d__[i__2].r, z__[1].i = d__[i__2].i;
+ i__2 = j + j * b_dim1;
+ q__1.r = -b[i__2].r, q__1.i = -b[i__2].i;
+ z__[2].r = q__1.r, z__[2].i = q__1.i;
+ i__2 = j + j * e_dim1;
+ q__1.r = -e[i__2].r, q__1.i = -e[i__2].i;
+ z__[3].r = q__1.r, z__[3].i = q__1.i;
+
+/* Set up right hand side(s) */
+
+ i__2 = i__ + j * c_dim1;
+ rhs[0].r = c__[i__2].r, rhs[0].i = c__[i__2].i;
+ i__2 = i__ + j * f_dim1;
+ rhs[1].r = f[i__2].r, rhs[1].i = f[i__2].i;
+
+/* Solve Z * x = RHS */
+
+ cgetc2_(&c__2, z__, &c__2, ipiv, jpiv, &ierr);
+ if (ierr > 0) {
+ *info = ierr;
+ }
+ if (*ijob == 0) {
+ cgesc2_(&c__2, z__, &c__2, rhs, ipiv, jpiv, &scaloc);
+ if (scaloc != 1.f) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ q__1.r = scaloc, q__1.i = 0.f;
+ cscal_(m, &q__1, &c__[k * c_dim1 + 1], &c__1);
+ q__1.r = scaloc, q__1.i = 0.f;
+ cscal_(m, &q__1, &f[k * f_dim1 + 1], &c__1);
+/* L10: */
+ }
+ *scale *= scaloc;
+ }
+ } else {
+ clatdf_(ijob, &c__2, z__, &c__2, rhs, rdsum, rdscal, ipiv,
+ jpiv);
+ }
+
+/* Unpack solution vector(s) */
+
+ i__2 = i__ + j * c_dim1;
+ c__[i__2].r = rhs[0].r, c__[i__2].i = rhs[0].i;
+ i__2 = i__ + j * f_dim1;
+ f[i__2].r = rhs[1].r, f[i__2].i = rhs[1].i;
+
+/* Substitute R(I, J) and L(I, J) into remaining equation. */
+
+ if (i__ > 1) {
+ q__1.r = -rhs[0].r, q__1.i = -rhs[0].i;
+ alpha.r = q__1.r, alpha.i = q__1.i;
+ i__2 = i__ - 1;
+ caxpy_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &c__[j
+ * c_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ caxpy_(&i__2, &alpha, &d__[i__ * d_dim1 + 1], &c__1, &f[j
+ * f_dim1 + 1], &c__1);
+ }
+ if (j < *n) {
+ i__2 = *n - j;
+ caxpy_(&i__2, &rhs[1], &b[j + (j + 1) * b_dim1], ldb, &
+ c__[i__ + (j + 1) * c_dim1], ldc);
+ i__2 = *n - j;
+ caxpy_(&i__2, &rhs[1], &e[j + (j + 1) * e_dim1], lde, &f[
+ i__ + (j + 1) * f_dim1], ldf);
+ }
+
+/* L20: */
+ }
+/* L30: */
+ }
+ } else {
+
+/* Solve transposed (I, J) - system: */
+/* A(I, I)' * R(I, J) + D(I, I)' * L(J, J) = C(I, J) */
+/* R(I, I) * B(J, J) + L(I, J) * E(J, J) = -F(I, J) */
+/* for I = 1, 2, ..., M, J = N, N - 1, ..., 1 */
+
+ *scale = 1.f;
+ scaloc = 1.f;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ for (j = *n; j >= 1; --j) {
+
+/* Build 2 by 2 system Z' */
+
+ r_cnjg(&q__1, &a[i__ + i__ * a_dim1]);
+ z__[0].r = q__1.r, z__[0].i = q__1.i;
+ r_cnjg(&q__2, &b[j + j * b_dim1]);
+ q__1.r = -q__2.r, q__1.i = -q__2.i;
+ z__[1].r = q__1.r, z__[1].i = q__1.i;
+ r_cnjg(&q__1, &d__[i__ + i__ * d_dim1]);
+ z__[2].r = q__1.r, z__[2].i = q__1.i;
+ r_cnjg(&q__2, &e[j + j * e_dim1]);
+ q__1.r = -q__2.r, q__1.i = -q__2.i;
+ z__[3].r = q__1.r, z__[3].i = q__1.i;
+
+
+/* Set up right hand side(s) */
+
+ i__2 = i__ + j * c_dim1;
+ rhs[0].r = c__[i__2].r, rhs[0].i = c__[i__2].i;
+ i__2 = i__ + j * f_dim1;
+ rhs[1].r = f[i__2].r, rhs[1].i = f[i__2].i;
+
+/* Solve Z' * x = RHS */
+
+ cgetc2_(&c__2, z__, &c__2, ipiv, jpiv, &ierr);
+ if (ierr > 0) {
+ *info = ierr;
+ }
+ cgesc2_(&c__2, z__, &c__2, rhs, ipiv, jpiv, &scaloc);
+ if (scaloc != 1.f) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ q__1.r = scaloc, q__1.i = 0.f;
+ cscal_(m, &q__1, &c__[k * c_dim1 + 1], &c__1);
+ q__1.r = scaloc, q__1.i = 0.f;
+ cscal_(m, &q__1, &f[k * f_dim1 + 1], &c__1);
+/* L40: */
+ }
+ *scale *= scaloc;
+ }
+
+/* Unpack solution vector(s) */
+
+ i__2 = i__ + j * c_dim1;
+ c__[i__2].r = rhs[0].r, c__[i__2].i = rhs[0].i;
+ i__2 = i__ + j * f_dim1;
+ f[i__2].r = rhs[1].r, f[i__2].i = rhs[1].i;
+
+/* Substitute R(I, J) and L(I, J) into remaining equation. */
+
+ i__2 = j - 1;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = i__ + k * f_dim1;
+ i__4 = i__ + k * f_dim1;
+ r_cnjg(&q__4, &b[k + j * b_dim1]);
+ q__3.r = rhs[0].r * q__4.r - rhs[0].i * q__4.i, q__3.i =
+ rhs[0].r * q__4.i + rhs[0].i * q__4.r;
+ q__2.r = f[i__4].r + q__3.r, q__2.i = f[i__4].i + q__3.i;
+ r_cnjg(&q__6, &e[k + j * e_dim1]);
+ q__5.r = rhs[1].r * q__6.r - rhs[1].i * q__6.i, q__5.i =
+ rhs[1].r * q__6.i + rhs[1].i * q__6.r;
+ q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
+ f[i__3].r = q__1.r, f[i__3].i = q__1.i;
+/* L50: */
+ }
+ i__2 = *m;
+ for (k = i__ + 1; k <= i__2; ++k) {
+ i__3 = k + j * c_dim1;
+ i__4 = k + j * c_dim1;
+ r_cnjg(&q__4, &a[i__ + k * a_dim1]);
+ q__3.r = q__4.r * rhs[0].r - q__4.i * rhs[0].i, q__3.i =
+ q__4.r * rhs[0].i + q__4.i * rhs[0].r;
+ q__2.r = c__[i__4].r - q__3.r, q__2.i = c__[i__4].i -
+ q__3.i;
+ r_cnjg(&q__6, &d__[i__ + k * d_dim1]);
+ q__5.r = q__6.r * rhs[1].r - q__6.i * rhs[1].i, q__5.i =
+ q__6.r * rhs[1].i + q__6.i * rhs[1].r;
+ q__1.r = q__2.r - q__5.r, q__1.i = q__2.i - q__5.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L60: */
+ }
+
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ return 0;
+
+/* End of CTGSY2 */
+
+} /* ctgsy2_ */
diff --git a/contrib/libs/clapack/ctgsyl.c b/contrib/libs/clapack/ctgsyl.c
new file mode 100644
index 0000000000..2e13f9bfd3
--- /dev/null
+++ b/contrib/libs/clapack/ctgsyl.c
@@ -0,0 +1,689 @@
+/* ctgsyl.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {0.f,0.f};
+static integer c__2 = 2;
+static integer c_n1 = -1;
+static integer c__5 = 5;
+static integer c__1 = 1;
+static complex c_b44 = {-1.f,0.f};
+static complex c_b45 = {1.f,0.f};
+
+/* Subroutine */ int ctgsyl_(char *trans, integer *ijob, integer *m, integer *
+ n, complex *a, integer *lda, complex *b, integer *ldb, complex *c__,
+ integer *ldc, complex *d__, integer *ldd, complex *e, integer *lde,
+ complex *f, integer *ldf, real *scale, real *dif, complex *work,
+ integer *lwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, d_dim1,
+ d_offset, e_dim1, e_offset, f_dim1, f_offset, i__1, i__2, i__3,
+ i__4;
+ complex q__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, p, q, ie, je, mb, nb, is, js, pq;
+ real dsum;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *), cgemm_(char *, char *, integer *, integer *, integer *
+, complex *, complex *, integer *, complex *, integer *, complex *
+, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ integer ifunc, linfo, lwmin;
+ real scale2;
+ extern /* Subroutine */ int ctgsy2_(char *, integer *, integer *, integer
+ *, complex *, integer *, complex *, integer *, complex *, integer
+ *, complex *, integer *, complex *, integer *, complex *, integer
+ *, real *, real *, real *, integer *);
+ real dscale, scaloc;
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), claset_(char *,
+ integer *, integer *, complex *, complex *, complex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer iround;
+ logical notran;
+ integer isolve;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* January 2007 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTGSYL solves the generalized Sylvester equation: */
+
+/* A * R - L * B = scale * C (1) */
+/* D * R - L * E = scale * F */
+
+/* where R and L are unknown m-by-n matrices, (A, D), (B, E) and */
+/* (C, F) are given matrix pairs of size m-by-m, n-by-n and m-by-n, */
+/* respectively, with complex entries. A, B, D and E are upper */
+/* triangular (i.e., (A,D) and (B,E) in generalized Schur form). */
+
+/* The solution (R, L) overwrites (C, F). 0 <= SCALE <= 1 */
+/* is an output scaling factor chosen to avoid overflow. */
+
+/* In matrix notation (1) is equivalent to solve Zx = scale*b, where Z */
+/* is defined as */
+
+/* Z = [ kron(In, A) -kron(B', Im) ] (2) */
+/* [ kron(In, D) -kron(E', Im) ], */
+
+/* Here Ix is the identity matrix of size x and X' is the conjugate */
+/* transpose of X. Kron(X, Y) is the Kronecker product between the */
+/* matrices X and Y. */
+
+/* If TRANS = 'C', y in the conjugate transposed system Z'*y = scale*b */
+/* is solved for, which is equivalent to solve for R and L in */
+
+/* A' * R + D' * L = scale * C (3) */
+/* R * B' + L * E' = scale * -F */
+
+/* This case (TRANS = 'C') is used to compute an one-norm-based estimate */
+/* of Dif[(A,D), (B,E)], the separation between the matrix pairs (A,D) */
+/* and (B,E), using CLACON. */
+
+/* If IJOB >= 1, CTGSYL computes a Frobenius norm-based estimate of */
+/* Dif[(A,D),(B,E)]. That is, the reciprocal of a lower bound on the */
+/* reciprocal of the smallest singular value of Z. */
+
+/* This is a level-3 BLAS algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': solve the generalized sylvester equation (1). */
+/* = 'C': solve the "conjugate transposed" system (3). */
+
+/* IJOB (input) INTEGER */
+/* Specifies what kind of functionality to be performed. */
+/* =0: solve (1) only. */
+/* =1: The functionality of 0 and 3. */
+/* =2: The functionality of 0 and 4. */
+/* =3: Only an estimate of Dif[(A,D), (B,E)] is computed. */
+/* (look ahead strategy is used). */
+/* =4: Only an estimate of Dif[(A,D), (B,E)] is computed. */
+/* (CGECON on sub-systems is used). */
+/* Not referenced if TRANS = 'C'. */
+
+/* M (input) INTEGER */
+/* The order of the matrices A and D, and the row dimension of */
+/* the matrices C, F, R and L. */
+
+/* N (input) INTEGER */
+/* The order of the matrices B and E, and the column dimension */
+/* of the matrices C, F, R and L. */
+
+/* A (input) COMPLEX array, dimension (LDA, M) */
+/* The upper triangular matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1, M). */
+
+/* B (input) COMPLEX array, dimension (LDB, N) */
+/* The upper triangular matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1, N). */
+
+/* C (input/output) COMPLEX array, dimension (LDC, N) */
+/* On entry, C contains the right-hand-side of the first matrix */
+/* equation in (1) or (3). */
+/* On exit, if IJOB = 0, 1 or 2, C has been overwritten by */
+/* the solution R. If IJOB = 3 or 4 and TRANS = 'N', C holds R, */
+/* the solution achieved during the computation of the */
+/* Dif-estimate. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1, M). */
+
+/* D (input) COMPLEX array, dimension (LDD, M) */
+/* The upper triangular matrix D. */
+
+/* LDD (input) INTEGER */
+/* The leading dimension of the array D. LDD >= max(1, M). */
+
+/* E (input) COMPLEX array, dimension (LDE, N) */
+/* The upper triangular matrix E. */
+
+/* LDE (input) INTEGER */
+/* The leading dimension of the array E. LDE >= max(1, N). */
+
+/* F (input/output) COMPLEX array, dimension (LDF, N) */
+/* On entry, F contains the right-hand-side of the second matrix */
+/* equation in (1) or (3). */
+/* On exit, if IJOB = 0, 1 or 2, F has been overwritten by */
+/* the solution L. If IJOB = 3 or 4 and TRANS = 'N', F holds L, */
+/* the solution achieved during the computation of the */
+/* Dif-estimate. */
+
+/* LDF (input) INTEGER */
+/* The leading dimension of the array F. LDF >= max(1, M). */
+
+/* DIF (output) REAL */
+/* On exit DIF is the reciprocal of a lower bound of the */
+/* reciprocal of the Dif-function, i.e. DIF is an upper bound of */
+/* Dif[(A,D), (B,E)] = sigma-min(Z), where Z as in (2). */
+/* IF IJOB = 0 or TRANS = 'C', DIF is not referenced. */
+
+/* SCALE (output) REAL */
+/* On exit SCALE is the scaling factor in (1) or (3). */
+/* If 0 < SCALE < 1, C and F hold the solutions R and L, resp., */
+/* to a slightly perturbed system but the input matrices A, B, */
+/* D and E have not been changed. If SCALE = 0, R and L will */
+/* hold the solutions to the homogenious system with C = F = 0. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK > = 1. */
+/* If IJOB = 1 or 2 and TRANS = 'N', LWORK >= max(1,2*M*N). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace) INTEGER array, dimension (M+N+2) */
+
+/* INFO (output) INTEGER */
+/* =0: successful exit */
+/* <0: If INFO = -i, the i-th argument had an illegal value. */
+/* >0: (A, D) and (B, E) have common or very close */
+/* eigenvalues. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* [1] B. Kagstrom and P. Poromaa, LAPACK-Style Algorithms and Software */
+/* for Solving the Generalized Sylvester Equation and Estimating the */
+/* Separation between Regular Matrix Pairs, Report UMINF - 93.23, */
+/* Department of Computing Science, Umea University, S-901 87 Umea, */
+/* Sweden, December 1993, Revised April 1994, Also as LAPACK Working */
+/* Note 75. To appear in ACM Trans. on Math. Software, Vol 22, */
+/* No 1, 1996. */
+
+/* [2] B. Kagstrom, A Perturbation Analysis of the Generalized Sylvester */
+/* Equation (AR - LB, DR - LE ) = (C, F), SIAM J. Matrix Anal. */
+/* Appl., 15(4):1045-1060, 1994. */
+
+/* [3] B. Kagstrom and L. Westin, Generalized Schur Methods with */
+/* Condition Estimators for Solving the Generalized Sylvester */
+/* Equation, IEEE Transactions on Automatic Control, Vol. 34, No. 7, */
+/* July 1989, pp 745-751. */
+
+/* ===================================================================== */
+/* Replaced various illegal calls to CCOPY by calls to CLASET. */
+/* Sven Hammarling, 1/5/02. */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ d_dim1 = *ldd;
+ d_offset = 1 + d_dim1;
+ d__ -= d_offset;
+ e_dim1 = *lde;
+ e_offset = 1 + e_dim1;
+ e -= e_offset;
+ f_dim1 = *ldf;
+ f_offset = 1 + f_dim1;
+ f -= f_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+ if (! notran && ! lsame_(trans, "C")) {
+ *info = -1;
+ } else if (notran) {
+ if (*ijob < 0 || *ijob > 4) {
+ *info = -2;
+ }
+ }
+ if (*info == 0) {
+ if (*m <= 0) {
+ *info = -3;
+ } else if (*n <= 0) {
+ *info = -4;
+ } else if (*lda < max(1,*m)) {
+ *info = -6;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*ldd < max(1,*m)) {
+ *info = -12;
+ } else if (*lde < max(1,*n)) {
+ *info = -14;
+ } else if (*ldf < max(1,*m)) {
+ *info = -16;
+ }
+ }
+
+ if (*info == 0) {
+ if (notran) {
+ if (*ijob == 1 || *ijob == 2) {
+/* Computing MAX */
+ i__1 = 1, i__2 = (*m << 1) * *n;
+ lwmin = max(i__1,i__2);
+ } else {
+ lwmin = 1;
+ }
+ } else {
+ lwmin = 1;
+ }
+ work[1].r = (real) lwmin, work[1].i = 0.f;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -20;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTGSYL", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ *scale = 1.f;
+ if (notran) {
+ if (*ijob != 0) {
+ *dif = 0.f;
+ }
+ }
+ return 0;
+ }
+
+/* Determine optimal block sizes MB and NB */
+
+ mb = ilaenv_(&c__2, "CTGSYL", trans, m, n, &c_n1, &c_n1);
+ nb = ilaenv_(&c__5, "CTGSYL", trans, m, n, &c_n1, &c_n1);
+
+ isolve = 1;
+ ifunc = 0;
+ if (notran) {
+ if (*ijob >= 3) {
+ ifunc = *ijob - 2;
+ claset_("F", m, n, &c_b1, &c_b1, &c__[c_offset], ldc);
+ claset_("F", m, n, &c_b1, &c_b1, &f[f_offset], ldf);
+ } else if (*ijob >= 1 && notran) {
+ isolve = 2;
+ }
+ }
+
+ if (mb <= 1 && nb <= 1 || mb >= *m && nb >= *n) {
+
+/* Use unblocked Level 2 solver */
+
+ i__1 = isolve;
+ for (iround = 1; iround <= i__1; ++iround) {
+
+ *scale = 1.f;
+ dscale = 0.f;
+ dsum = 1.f;
+ pq = *m * *n;
+ ctgsy2_(trans, &ifunc, m, n, &a[a_offset], lda, &b[b_offset], ldb,
+ &c__[c_offset], ldc, &d__[d_offset], ldd, &e[e_offset],
+ lde, &f[f_offset], ldf, scale, &dsum, &dscale, info);
+ if (dscale != 0.f) {
+ if (*ijob == 1 || *ijob == 3) {
+ *dif = sqrt((real) ((*m << 1) * *n)) / (dscale * sqrt(
+ dsum));
+ } else {
+ *dif = sqrt((real) pq) / (dscale * sqrt(dsum));
+ }
+ }
+ if (isolve == 2 && iround == 1) {
+ if (notran) {
+ ifunc = *ijob;
+ }
+ scale2 = *scale;
+ clacpy_("F", m, n, &c__[c_offset], ldc, &work[1], m);
+ clacpy_("F", m, n, &f[f_offset], ldf, &work[*m * *n + 1], m);
+ claset_("F", m, n, &c_b1, &c_b1, &c__[c_offset], ldc);
+ claset_("F", m, n, &c_b1, &c_b1, &f[f_offset], ldf)
+ ;
+ } else if (isolve == 2 && iround == 2) {
+ clacpy_("F", m, n, &work[1], m, &c__[c_offset], ldc);
+ clacpy_("F", m, n, &work[*m * *n + 1], m, &f[f_offset], ldf);
+ *scale = scale2;
+ }
+/* L30: */
+ }
+
+ return 0;
+
+ }
+
+/* Determine block structure of A */
+
+ p = 0;
+ i__ = 1;
+L40:
+ if (i__ > *m) {
+ goto L50;
+ }
+ ++p;
+ iwork[p] = i__;
+ i__ += mb;
+ if (i__ >= *m) {
+ goto L50;
+ }
+ goto L40;
+L50:
+ iwork[p + 1] = *m + 1;
+ if (iwork[p] == iwork[p + 1]) {
+ --p;
+ }
+
+/* Determine block structure of B */
+
+ q = p + 1;
+ j = 1;
+L60:
+ if (j > *n) {
+ goto L70;
+ }
+
+ ++q;
+ iwork[q] = j;
+ j += nb;
+ if (j >= *n) {
+ goto L70;
+ }
+ goto L60;
+
+L70:
+ iwork[q + 1] = *n + 1;
+ if (iwork[q] == iwork[q + 1]) {
+ --q;
+ }
+
+ if (notran) {
+ i__1 = isolve;
+ for (iround = 1; iround <= i__1; ++iround) {
+
+/* Solve (I, J) - subsystem */
+/* A(I, I) * R(I, J) - L(I, J) * B(J, J) = C(I, J) */
+/* D(I, I) * R(I, J) - L(I, J) * E(J, J) = F(I, J) */
+/* for I = P, P - 1, ..., 1; J = 1, 2, ..., Q */
+
+ pq = 0;
+ *scale = 1.f;
+ dscale = 0.f;
+ dsum = 1.f;
+ i__2 = q;
+ for (j = p + 2; j <= i__2; ++j) {
+ js = iwork[j];
+ je = iwork[j + 1] - 1;
+ nb = je - js + 1;
+ for (i__ = p; i__ >= 1; --i__) {
+ is = iwork[i__];
+ ie = iwork[i__ + 1] - 1;
+ mb = ie - is + 1;
+ ctgsy2_(trans, &ifunc, &mb, &nb, &a[is + is * a_dim1],
+ lda, &b[js + js * b_dim1], ldb, &c__[is + js *
+ c_dim1], ldc, &d__[is + is * d_dim1], ldd, &e[js
+ + js * e_dim1], lde, &f[is + js * f_dim1], ldf, &
+ scaloc, &dsum, &dscale, &linfo);
+ if (linfo > 0) {
+ *info = linfo;
+ }
+ pq += mb * nb;
+ if (scaloc != 1.f) {
+ i__3 = js - 1;
+ for (k = 1; k <= i__3; ++k) {
+ q__1.r = scaloc, q__1.i = 0.f;
+ cscal_(m, &q__1, &c__[k * c_dim1 + 1], &c__1);
+ q__1.r = scaloc, q__1.i = 0.f;
+ cscal_(m, &q__1, &f[k * f_dim1 + 1], &c__1);
+/* L80: */
+ }
+ i__3 = je;
+ for (k = js; k <= i__3; ++k) {
+ i__4 = is - 1;
+ q__1.r = scaloc, q__1.i = 0.f;
+ cscal_(&i__4, &q__1, &c__[k * c_dim1 + 1], &c__1);
+ i__4 = is - 1;
+ q__1.r = scaloc, q__1.i = 0.f;
+ cscal_(&i__4, &q__1, &f[k * f_dim1 + 1], &c__1);
+/* L90: */
+ }
+ i__3 = je;
+ for (k = js; k <= i__3; ++k) {
+ i__4 = *m - ie;
+ q__1.r = scaloc, q__1.i = 0.f;
+ cscal_(&i__4, &q__1, &c__[ie + 1 + k * c_dim1], &
+ c__1);
+ i__4 = *m - ie;
+ q__1.r = scaloc, q__1.i = 0.f;
+ cscal_(&i__4, &q__1, &f[ie + 1 + k * f_dim1], &
+ c__1);
+/* L100: */
+ }
+ i__3 = *n;
+ for (k = je + 1; k <= i__3; ++k) {
+ q__1.r = scaloc, q__1.i = 0.f;
+ cscal_(m, &q__1, &c__[k * c_dim1 + 1], &c__1);
+ q__1.r = scaloc, q__1.i = 0.f;
+ cscal_(m, &q__1, &f[k * f_dim1 + 1], &c__1);
+/* L110: */
+ }
+ *scale *= scaloc;
+ }
+
+/* Substitute R(I,J) and L(I,J) into remaining equation. */
+
+ if (i__ > 1) {
+ i__3 = is - 1;
+ cgemm_("N", "N", &i__3, &nb, &mb, &c_b44, &a[is *
+ a_dim1 + 1], lda, &c__[is + js * c_dim1], ldc,
+ &c_b45, &c__[js * c_dim1 + 1], ldc);
+ i__3 = is - 1;
+ cgemm_("N", "N", &i__3, &nb, &mb, &c_b44, &d__[is *
+ d_dim1 + 1], ldd, &c__[is + js * c_dim1], ldc,
+ &c_b45, &f[js * f_dim1 + 1], ldf);
+ }
+ if (j < q) {
+ i__3 = *n - je;
+ cgemm_("N", "N", &mb, &i__3, &nb, &c_b45, &f[is + js *
+ f_dim1], ldf, &b[js + (je + 1) * b_dim1],
+ ldb, &c_b45, &c__[is + (je + 1) * c_dim1],
+ ldc);
+ i__3 = *n - je;
+ cgemm_("N", "N", &mb, &i__3, &nb, &c_b45, &f[is + js *
+ f_dim1], ldf, &e[js + (je + 1) * e_dim1],
+ lde, &c_b45, &f[is + (je + 1) * f_dim1], ldf);
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ if (dscale != 0.f) {
+ if (*ijob == 1 || *ijob == 3) {
+ *dif = sqrt((real) ((*m << 1) * *n)) / (dscale * sqrt(
+ dsum));
+ } else {
+ *dif = sqrt((real) pq) / (dscale * sqrt(dsum));
+ }
+ }
+ if (isolve == 2 && iround == 1) {
+ if (notran) {
+ ifunc = *ijob;
+ }
+ scale2 = *scale;
+ clacpy_("F", m, n, &c__[c_offset], ldc, &work[1], m);
+ clacpy_("F", m, n, &f[f_offset], ldf, &work[*m * *n + 1], m);
+ claset_("F", m, n, &c_b1, &c_b1, &c__[c_offset], ldc);
+ claset_("F", m, n, &c_b1, &c_b1, &f[f_offset], ldf)
+ ;
+ } else if (isolve == 2 && iround == 2) {
+ clacpy_("F", m, n, &work[1], m, &c__[c_offset], ldc);
+ clacpy_("F", m, n, &work[*m * *n + 1], m, &f[f_offset], ldf);
+ *scale = scale2;
+ }
+/* L150: */
+ }
+ } else {
+
+/* Solve transposed (I, J)-subsystem */
+/* A(I, I)' * R(I, J) + D(I, I)' * L(I, J) = C(I, J) */
+/* R(I, J) * B(J, J) + L(I, J) * E(J, J) = -F(I, J) */
+/* for I = 1,2,..., P; J = Q, Q-1,..., 1 */
+
+ *scale = 1.f;
+ i__1 = p;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ is = iwork[i__];
+ ie = iwork[i__ + 1] - 1;
+ mb = ie - is + 1;
+ i__2 = p + 2;
+ for (j = q; j >= i__2; --j) {
+ js = iwork[j];
+ je = iwork[j + 1] - 1;
+ nb = je - js + 1;
+ ctgsy2_(trans, &ifunc, &mb, &nb, &a[is + is * a_dim1], lda, &
+ b[js + js * b_dim1], ldb, &c__[is + js * c_dim1], ldc,
+ &d__[is + is * d_dim1], ldd, &e[js + js * e_dim1],
+ lde, &f[is + js * f_dim1], ldf, &scaloc, &dsum, &
+ dscale, &linfo);
+ if (linfo > 0) {
+ *info = linfo;
+ }
+ if (scaloc != 1.f) {
+ i__3 = js - 1;
+ for (k = 1; k <= i__3; ++k) {
+ q__1.r = scaloc, q__1.i = 0.f;
+ cscal_(m, &q__1, &c__[k * c_dim1 + 1], &c__1);
+ q__1.r = scaloc, q__1.i = 0.f;
+ cscal_(m, &q__1, &f[k * f_dim1 + 1], &c__1);
+/* L160: */
+ }
+ i__3 = je;
+ for (k = js; k <= i__3; ++k) {
+ i__4 = is - 1;
+ q__1.r = scaloc, q__1.i = 0.f;
+ cscal_(&i__4, &q__1, &c__[k * c_dim1 + 1], &c__1);
+ i__4 = is - 1;
+ q__1.r = scaloc, q__1.i = 0.f;
+ cscal_(&i__4, &q__1, &f[k * f_dim1 + 1], &c__1);
+/* L170: */
+ }
+ i__3 = je;
+ for (k = js; k <= i__3; ++k) {
+ i__4 = *m - ie;
+ q__1.r = scaloc, q__1.i = 0.f;
+ cscal_(&i__4, &q__1, &c__[ie + 1 + k * c_dim1], &c__1)
+ ;
+ i__4 = *m - ie;
+ q__1.r = scaloc, q__1.i = 0.f;
+ cscal_(&i__4, &q__1, &f[ie + 1 + k * f_dim1], &c__1);
+/* L180: */
+ }
+ i__3 = *n;
+ for (k = je + 1; k <= i__3; ++k) {
+ q__1.r = scaloc, q__1.i = 0.f;
+ cscal_(m, &q__1, &c__[k * c_dim1 + 1], &c__1);
+ q__1.r = scaloc, q__1.i = 0.f;
+ cscal_(m, &q__1, &f[k * f_dim1 + 1], &c__1);
+/* L190: */
+ }
+ *scale *= scaloc;
+ }
+
+/* Substitute R(I,J) and L(I,J) into remaining equation. */
+
+ if (j > p + 2) {
+ i__3 = js - 1;
+ cgemm_("N", "C", &mb, &i__3, &nb, &c_b45, &c__[is + js *
+ c_dim1], ldc, &b[js * b_dim1 + 1], ldb, &c_b45, &
+ f[is + f_dim1], ldf);
+ i__3 = js - 1;
+ cgemm_("N", "C", &mb, &i__3, &nb, &c_b45, &f[is + js *
+ f_dim1], ldf, &e[js * e_dim1 + 1], lde, &c_b45, &
+ f[is + f_dim1], ldf);
+ }
+ if (i__ < p) {
+ i__3 = *m - ie;
+ cgemm_("C", "N", &i__3, &nb, &mb, &c_b44, &a[is + (ie + 1)
+ * a_dim1], lda, &c__[is + js * c_dim1], ldc, &
+ c_b45, &c__[ie + 1 + js * c_dim1], ldc);
+ i__3 = *m - ie;
+ cgemm_("C", "N", &i__3, &nb, &mb, &c_b44, &d__[is + (ie +
+ 1) * d_dim1], ldd, &f[is + js * f_dim1], ldf, &
+ c_b45, &c__[ie + 1 + js * c_dim1], ldc);
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+ }
+
+ work[1].r = (real) lwmin, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CTGSYL */
+
+} /* ctgsyl_ */
diff --git a/contrib/libs/clapack/ctpcon.c b/contrib/libs/clapack/ctpcon.c
new file mode 100644
index 0000000000..fafeb8ef30
--- /dev/null
+++ b/contrib/libs/clapack/ctpcon.c
@@ -0,0 +1,240 @@
+/* ctpcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ctpcon_(char *norm, char *uplo, char *diag, integer *n,
+ complex *ap, real *rcond, complex *work, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer ix, kase, kase1;
+ real scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ real anorm;
+ logical upper;
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *);
+ real xnorm;
+ extern integer icamax_(integer *, complex *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern doublereal clantp_(char *, char *, char *, integer *, complex *,
+ real *);
+ extern /* Subroutine */ int clatps_(char *, char *, char *, char *,
+ integer *, complex *, complex *, real *, real *, integer *);
+ real ainvnm;
+ extern /* Subroutine */ int csrscl_(integer *, real *, complex *, integer
+ *);
+ logical onenrm;
+ char normin[1];
+ real smlnum;
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTPCON estimates the reciprocal of the condition number of a packed */
+/* triangular matrix A, in either the 1-norm or the infinity-norm. */
+
+/* The norm of A is computed and an estimate is obtained for */
+/* norm(inv(A)), then the reciprocal of the condition number is */
+/* computed as */
+/* RCOND = 1 / ( norm(A) * norm(inv(A)) ). */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies whether the 1-norm condition number or the */
+/* infinity-norm condition number is required: */
+/* = '1' or 'O': 1-norm; */
+/* = 'I': Infinity-norm. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) COMPLEX array, dimension (N*(N+1)/2) */
+/* The upper or lower triangular matrix A, packed columnwise in */
+/* a linear array. The j-th column of A is stored in the array */
+/* AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* If DIAG = 'U', the diagonal elements of A are not referenced */
+/* and are assumed to be 1. */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(norm(A) * norm(inv(A))). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --rwork;
+ --work;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O");
+ nounit = lsame_(diag, "N");
+
+ if (! onenrm && ! lsame_(norm, "I")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTPCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ }
+
+ *rcond = 0.f;
+ smlnum = slamch_("Safe minimum") * (real) max(1,*n);
+
+/* Compute the norm of the triangular matrix A. */
+
+ anorm = clantp_(norm, uplo, diag, n, &ap[1], &rwork[1]);
+
+/* Continue only if ANORM > 0. */
+
+ if (anorm > 0.f) {
+
+/* Estimate the norm of the inverse of A. */
+
+ ainvnm = 0.f;
+ *(unsigned char *)normin = 'N';
+ if (onenrm) {
+ kase1 = 1;
+ } else {
+ kase1 = 2;
+ }
+ kase = 0;
+L10:
+ clacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (kase == kase1) {
+
+/* Multiply by inv(A). */
+
+ clatps_(uplo, "No transpose", diag, normin, n, &ap[1], &work[
+ 1], &scale, &rwork[1], info);
+ } else {
+
+/* Multiply by inv(A'). */
+
+ clatps_(uplo, "Conjugate transpose", diag, normin, n, &ap[1],
+ &work[1], &scale, &rwork[1], info);
+ }
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by 1/SCALE if doing so will not cause overflow. */
+
+ if (scale != 1.f) {
+ ix = icamax_(n, &work[1], &c__1);
+ i__1 = ix;
+ xnorm = (r__1 = work[i__1].r, dabs(r__1)) + (r__2 = r_imag(&
+ work[ix]), dabs(r__2));
+ if (scale < xnorm * smlnum || scale == 0.f) {
+ goto L20;
+ }
+ csrscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / anorm / ainvnm;
+ }
+ }
+
+L20:
+ return 0;
+
+/* End of CTPCON */
+
+} /* ctpcon_ */
diff --git a/contrib/libs/clapack/ctprfs.c b/contrib/libs/clapack/ctprfs.c
new file mode 100644
index 0000000000..cf3bd0e229
--- /dev/null
+++ b/contrib/libs/clapack/ctprfs.c
@@ -0,0 +1,565 @@
+/* ctprfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ctprfs_(char *uplo, char *trans, char *diag, integer *n,
+ integer *nrhs, complex *ap, complex *b, integer *ldb, complex *x,
+ integer *ldx, real *ferr, real *berr, complex *work, real *rwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ real s;
+ integer kc;
+ real xk;
+ integer nz;
+ real eps;
+ integer kase;
+ real safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), caxpy_(integer *, complex *, complex *,
+ integer *, complex *, integer *), ctpmv_(char *, char *, char *,
+ integer *, complex *, complex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int ctpsv_(char *, char *, char *, integer *,
+ complex *, complex *, integer *), clacn2_(
+ integer *, complex *, complex *, real *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+ char transn[1], transt[1];
+ logical nounit;
+ real lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTPRFS provides error bounds and backward error estimates for the */
+/* solution to a system of linear equations with a triangular packed */
+/* coefficient matrix. */
+
+/* The solution matrix X must be computed by CTPTRS or some other */
+/* means before entering this routine. CTPRFS does not do iterative */
+/* refinement because doing so cannot improve the backward error. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AP (input) COMPLEX array, dimension (N*(N+1)/2) */
+/* The upper or lower triangular matrix A, packed columnwise in */
+/* a linear array. The j-th column of A is stored in the array */
+/* AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* If DIAG = 'U', the diagonal elements of A are not referenced */
+/* and are assumed to be 1. */
+
+/* B (input) COMPLEX array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input) COMPLEX array, dimension (LDX,NRHS) */
+/* The solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*ldx < max(1,*n)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTPRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+ if (notran) {
+ *(unsigned char *)transn = 'N';
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transn = 'C';
+ *(unsigned char *)transt = 'N';
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute residual R = B - op(A) * X, */
+/* where op(A) = A, A**T, or A**H, depending on TRANS. */
+
+ ccopy_(n, &x[j * x_dim1 + 1], &c__1, &work[1], &c__1);
+ ctpmv_(uplo, trans, diag, n, &ap[1], &work[1], &c__1);
+ q__1.r = -1.f, q__1.i = -0.f;
+ caxpy_(n, &q__1, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[i__] = (r__1 = b[i__3].r, dabs(r__1)) + (r__2 = r_imag(&b[
+ i__ + j * b_dim1]), dabs(r__2));
+/* L20: */
+ }
+
+ if (notran) {
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ kc = 1;
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ xk = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&
+ x[k + j * x_dim1]), dabs(r__2));
+ i__3 = k;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = kc + i__ - 1;
+ rwork[i__] += ((r__1 = ap[i__4].r, dabs(r__1)) + (
+ r__2 = r_imag(&ap[kc + i__ - 1]), dabs(
+ r__2))) * xk;
+/* L30: */
+ }
+ kc += k;
+/* L40: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ xk = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&
+ x[k + j * x_dim1]), dabs(r__2));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = kc + i__ - 1;
+ rwork[i__] += ((r__1 = ap[i__4].r, dabs(r__1)) + (
+ r__2 = r_imag(&ap[kc + i__ - 1]), dabs(
+ r__2))) * xk;
+/* L50: */
+ }
+ rwork[k] += xk;
+ kc += k;
+/* L60: */
+ }
+ }
+ } else {
+ kc = 1;
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ xk = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&
+ x[k + j * x_dim1]), dabs(r__2));
+ i__3 = *n;
+ for (i__ = k; i__ <= i__3; ++i__) {
+ i__4 = kc + i__ - k;
+ rwork[i__] += ((r__1 = ap[i__4].r, dabs(r__1)) + (
+ r__2 = r_imag(&ap[kc + i__ - k]), dabs(
+ r__2))) * xk;
+/* L70: */
+ }
+ kc = kc + *n - k + 1;
+/* L80: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ xk = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&
+ x[k + j * x_dim1]), dabs(r__2));
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ i__4 = kc + i__ - k;
+ rwork[i__] += ((r__1 = ap[i__4].r, dabs(r__1)) + (
+ r__2 = r_imag(&ap[kc + i__ - k]), dabs(
+ r__2))) * xk;
+/* L90: */
+ }
+ rwork[k] += xk;
+ kc = kc + *n - k + 1;
+/* L100: */
+ }
+ }
+ }
+ } else {
+
+/* Compute abs(A**H)*abs(X) + abs(B). */
+
+ if (upper) {
+ kc = 1;
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ i__3 = k;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = kc + i__ - 1;
+ i__5 = i__ + j * x_dim1;
+ s += ((r__1 = ap[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&ap[kc + i__ - 1]), dabs(r__2))) *
+ ((r__3 = x[i__5].r, dabs(r__3)) + (r__4 =
+ r_imag(&x[i__ + j * x_dim1]), dabs(r__4)))
+ ;
+/* L110: */
+ }
+ rwork[k] += s;
+ kc += k;
+/* L120: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ s = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&
+ x[k + j * x_dim1]), dabs(r__2));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = kc + i__ - 1;
+ i__5 = i__ + j * x_dim1;
+ s += ((r__1 = ap[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&ap[kc + i__ - 1]), dabs(r__2))) *
+ ((r__3 = x[i__5].r, dabs(r__3)) + (r__4 =
+ r_imag(&x[i__ + j * x_dim1]), dabs(r__4)))
+ ;
+/* L130: */
+ }
+ rwork[k] += s;
+ kc += k;
+/* L140: */
+ }
+ }
+ } else {
+ kc = 1;
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ i__3 = *n;
+ for (i__ = k; i__ <= i__3; ++i__) {
+ i__4 = kc + i__ - k;
+ i__5 = i__ + j * x_dim1;
+ s += ((r__1 = ap[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&ap[kc + i__ - k]), dabs(r__2))) *
+ ((r__3 = x[i__5].r, dabs(r__3)) + (r__4 =
+ r_imag(&x[i__ + j * x_dim1]), dabs(r__4)))
+ ;
+/* L150: */
+ }
+ rwork[k] += s;
+ kc = kc + *n - k + 1;
+/* L160: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ s = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&
+ x[k + j * x_dim1]), dabs(r__2));
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ i__4 = kc + i__ - k;
+ i__5 = i__ + j * x_dim1;
+ s += ((r__1 = ap[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&ap[kc + i__ - k]), dabs(r__2))) *
+ ((r__3 = x[i__5].r, dabs(r__3)) + (r__4 =
+ r_imag(&x[i__ + j * x_dim1]), dabs(r__4)))
+ ;
+/* L170: */
+ }
+ rwork[k] += s;
+ kc = kc + *n - k + 1;
+/* L180: */
+ }
+ }
+ }
+ }
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__3 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2))) / rwork[i__];
+ s = dmax(r__3,r__4);
+ } else {
+/* Computing MAX */
+ i__3 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = dmax(r__3,r__4);
+ }
+/* L190: */
+ }
+ berr[j] = s;
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(op(A)))* */
+/* ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(op(A)) is the inverse of op(A) */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(op(A))*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use CLACN2 to estimate the infinity-norm of the matrix */
+/* inv(op(A)) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__3 = i__;
+ rwork[i__] = (r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__];
+ } else {
+ i__3 = i__;
+ rwork[i__] = (r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__] + safe1;
+ }
+/* L200: */
+ }
+
+ kase = 0;
+L210:
+ clacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(op(A)**H). */
+
+ ctpsv_(uplo, transt, diag, n, &ap[1], &work[1], &c__1);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ q__1.r = rwork[i__4] * work[i__5].r, q__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+/* L220: */
+ }
+ } else {
+
+/* Multiply by inv(op(A))*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ q__1.r = rwork[i__4] * work[i__5].r, q__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+/* L230: */
+ }
+ ctpsv_(uplo, transn, diag, n, &ap[1], &work[1], &c__1);
+ }
+ goto L210;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * x_dim1;
+ r__3 = lstres, r__4 = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[i__ + j * x_dim1]), dabs(r__2));
+ lstres = dmax(r__3,r__4);
+/* L240: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L250: */
+ }
+
+ return 0;
+
+/* End of CTPRFS */
+
+} /* ctprfs_ */
diff --git a/contrib/libs/clapack/ctptri.c b/contrib/libs/clapack/ctptri.c
new file mode 100644
index 0000000000..f8161f2a00
--- /dev/null
+++ b/contrib/libs/clapack/ctptri.c
@@ -0,0 +1,236 @@
+/* ctptri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int ctptri_(char *uplo, char *diag, integer *n, complex *ap,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ complex q__1;
+
+ /* Builtin functions */
+ void c_div(complex *, complex *, complex *);
+
+ /* Local variables */
+ integer j, jc, jj;
+ complex ajj;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ctpmv_(char *, char *, char *, integer *,
+ complex *, complex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ integer jclast;
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTPTRI computes the inverse of a complex upper or lower triangular */
+/* matrix A stored in packed format. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangular matrix A, stored */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*((2*n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. */
+/* On exit, the (triangular) inverse of the original matrix, in */
+/* the same packed storage format. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, A(i,i) is exactly zero. The triangular */
+/* matrix is singular and its inverse can not be computed. */
+
+/* Further Details */
+/* =============== */
+
+/* A triangular matrix A can be transferred to packed storage using one */
+/* of the following program segments: */
+
+/* UPLO = 'U': UPLO = 'L': */
+
+/* JC = 1 JC = 1 */
+/* DO 2 J = 1, N DO 2 J = 1, N */
+/* DO 1 I = 1, J DO 1 I = J, N */
+/* AP(JC+I-1) = A(I,J) AP(JC+I-J) = A(I,J) */
+/* 1 CONTINUE 1 CONTINUE */
+/* JC = JC + J JC = JC + N - J + 1 */
+/* 2 CONTINUE 2 CONTINUE */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ nounit = lsame_(diag, "N");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTPTRI", &i__1);
+ return 0;
+ }
+
+/* Check for singularity if non-unit. */
+
+ if (nounit) {
+ if (upper) {
+ jj = 0;
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ jj += *info;
+ i__2 = jj;
+ if (ap[i__2].r == 0.f && ap[i__2].i == 0.f) {
+ return 0;
+ }
+/* L10: */
+ }
+ } else {
+ jj = 1;
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ i__2 = jj;
+ if (ap[i__2].r == 0.f && ap[i__2].i == 0.f) {
+ return 0;
+ }
+ jj = jj + *n - *info + 1;
+/* L20: */
+ }
+ }
+ *info = 0;
+ }
+
+ if (upper) {
+
+/* Compute inverse of upper triangular matrix. */
+
+ jc = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (nounit) {
+ i__2 = jc + j - 1;
+ c_div(&q__1, &c_b1, &ap[jc + j - 1]);
+ ap[i__2].r = q__1.r, ap[i__2].i = q__1.i;
+ i__2 = jc + j - 1;
+ q__1.r = -ap[i__2].r, q__1.i = -ap[i__2].i;
+ ajj.r = q__1.r, ajj.i = q__1.i;
+ } else {
+ q__1.r = -1.f, q__1.i = -0.f;
+ ajj.r = q__1.r, ajj.i = q__1.i;
+ }
+
+/* Compute elements 1:j-1 of j-th column. */
+
+ i__2 = j - 1;
+ ctpmv_("Upper", "No transpose", diag, &i__2, &ap[1], &ap[jc], &
+ c__1);
+ i__2 = j - 1;
+ cscal_(&i__2, &ajj, &ap[jc], &c__1);
+ jc += j;
+/* L30: */
+ }
+
+ } else {
+
+/* Compute inverse of lower triangular matrix. */
+
+ jc = *n * (*n + 1) / 2;
+ for (j = *n; j >= 1; --j) {
+ if (nounit) {
+ i__1 = jc;
+ c_div(&q__1, &c_b1, &ap[jc]);
+ ap[i__1].r = q__1.r, ap[i__1].i = q__1.i;
+ i__1 = jc;
+ q__1.r = -ap[i__1].r, q__1.i = -ap[i__1].i;
+ ajj.r = q__1.r, ajj.i = q__1.i;
+ } else {
+ q__1.r = -1.f, q__1.i = -0.f;
+ ajj.r = q__1.r, ajj.i = q__1.i;
+ }
+ if (j < *n) {
+
+/* Compute elements j+1:n of j-th column. */
+
+ i__1 = *n - j;
+ ctpmv_("Lower", "No transpose", diag, &i__1, &ap[jclast], &ap[
+ jc + 1], &c__1);
+ i__1 = *n - j;
+ cscal_(&i__1, &ajj, &ap[jc + 1], &c__1);
+ }
+ jclast = jc;
+ jc = jc - *n + j - 2;
+/* L40: */
+ }
+ }
+
+ return 0;
+
+/* End of CTPTRI */
+
+} /* ctptri_ */
diff --git a/contrib/libs/clapack/ctptrs.c b/contrib/libs/clapack/ctptrs.c
new file mode 100644
index 0000000000..ae5dbc7c3a
--- /dev/null
+++ b/contrib/libs/clapack/ctptrs.c
@@ -0,0 +1,194 @@
+/* ctptrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ctptrs_(char *uplo, char *trans, char *diag, integer *n,
+ integer *nrhs, complex *ap, complex *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2;
+
+ /* Local variables */
+ integer j, jc;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int ctpsv_(char *, char *, char *, integer *,
+ complex *, complex *, integer *), xerbla_(
+ char *, integer *);
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTPTRS solves a triangular system of the form */
+
+/* A * X = B, A**T * X = B, or A**H * X = B, */
+
+/* where A is a triangular matrix of order N stored in packed format, */
+/* and B is an N-by-NRHS matrix. A check is made to verify that A is */
+/* nonsingular. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AP (input) COMPLEX array, dimension (N*(N+1)/2) */
+/* The upper or lower triangular matrix A, packed columnwise in */
+/* a linear array. The j-th column of A is stored in the array */
+/* AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, if INFO = 0, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element of A is zero, */
+/* indicating that the matrix is singular and the */
+/* solutions X have not been computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ nounit = lsame_(diag, "N");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTPTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check for singularity. */
+
+ if (nounit) {
+ if (upper) {
+ jc = 1;
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ i__2 = jc + *info - 1;
+ if (ap[i__2].r == 0.f && ap[i__2].i == 0.f) {
+ return 0;
+ }
+ jc += *info;
+/* L10: */
+ }
+ } else {
+ jc = 1;
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ i__2 = jc;
+ if (ap[i__2].r == 0.f && ap[i__2].i == 0.f) {
+ return 0;
+ }
+ jc = jc + *n - *info + 1;
+/* L20: */
+ }
+ }
+ }
+ *info = 0;
+
+/* Solve A * x = b, A**T * x = b, or A**H * x = b. */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ctpsv_(uplo, trans, diag, n, &ap[1], &b[j * b_dim1 + 1], &c__1);
+/* L30: */
+ }
+
+ return 0;
+
+/* End of CTPTRS */
+
+} /* ctptrs_ */
diff --git a/contrib/libs/clapack/ctpttf.c b/contrib/libs/clapack/ctpttf.c
new file mode 100644
index 0000000000..ffd424c90a
--- /dev/null
+++ b/contrib/libs/clapack/ctpttf.c
@@ -0,0 +1,573 @@
+/* ctpttf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ctpttf_(char *transr, char *uplo, integer *n, complex *
+ ap, complex *arf, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, k, n1, n2, ij, jp, js, nt, lda, ijp;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nisodd;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTPTTF copies a triangular matrix A from standard packed format (TP) */
+/* to rectangular full packed format (TF). */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': ARF in Normal format is wanted; */
+/* = 'C': ARF in Conjugate-transpose format is wanted. */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) COMPLEX array, dimension ( N*(N+1)/2 ), */
+/* On entry, the upper or lower triangular matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* ARF (output) COMPLEX array, dimension ( N*(N+1)/2 ), */
+/* On exit, the upper or lower triangular matrix A stored in */
+/* RFP format. For a further discussion see Notes below. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Notes: */
+/* ====== */
+
+/* We first consider Standard Packed Format when N is even. */
+/* We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* conjugate-transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* conjugate-transpose of the last three columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- -- */
+/* 03 04 05 33 43 53 */
+/* -- -- */
+/* 13 14 15 00 44 54 */
+/* -- */
+/* 23 24 25 10 11 55 */
+
+/* 33 34 35 20 21 22 */
+/* -- */
+/* 00 44 45 30 31 32 */
+/* -- -- */
+/* 01 11 55 40 41 42 */
+/* -- -- -- */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We next consider Standard Packed Format when N is odd. */
+/* We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* conjugate-transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* conjugate-transpose of the last two columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- */
+/* 02 03 04 00 33 43 */
+/* -- */
+/* 12 13 14 10 11 44 */
+
+/* 22 23 24 20 21 22 */
+/* -- */
+/* 00 33 34 30 31 32 */
+/* -- -- */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- */
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "C")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTPTTF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (normaltransr) {
+ arf[0].r = ap[0].r, arf[0].i = ap[0].i;
+ } else {
+ r_cnjg(&q__1, ap);
+ arf[0].r = q__1.r, arf[0].i = q__1.i;
+ }
+ return 0;
+ }
+
+/* Size of array ARF(0:NT-1) */
+
+ nt = *n * (*n + 1) / 2;
+
+/* Set N1 and N2 depending on LOWER */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+/* If N is odd, set NISODD = .TRUE. */
+/* If N is even, set K = N/2 and NISODD = .FALSE. */
+
+/* set lda of ARF^C; ARF^C is (0:(N+1)/2-1,0:N-noe) */
+/* where noe = 0 if n is even, noe = 1 if n is odd */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ lda = *n + 1;
+ } else {
+ nisodd = TRUE_;
+ lda = *n;
+ }
+
+/* ARF^C has lda rows and n+1-noe cols */
+
+ if (! normaltransr) {
+ lda = (*n + 1) / 2;
+ }
+
+/* start execution: there are eight cases */
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL and N is odd ( a(0:n-1,0:n1-1) ) */
+/* T1 -> a(0,0), T2 -> a(0,1), S -> a(n1,0) */
+/* T1 -> a(0), T2 -> a(n), S -> a(n1); lda = n */
+
+ ijp = 0;
+ jp = 0;
+ i__1 = n2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ij = i__ + jp;
+ i__3 = ij;
+ i__4 = ijp;
+ arf[i__3].r = ap[i__4].r, arf[i__3].i = ap[i__4].i;
+ ++ijp;
+ }
+ jp += lda;
+ }
+ i__1 = n2 - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = n2;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ ij = i__ + j * lda;
+ i__3 = ij;
+ r_cnjg(&q__1, &ap[ijp]);
+ arf[i__3].r = q__1.r, arf[i__3].i = q__1.i;
+ ++ijp;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL and N is odd ( a(0:n-1,0:n2-1) */
+/* T1 -> a(n1+1,0), T2 -> a(n1,0), S -> a(0,0) */
+/* T1 -> a(n2), T2 -> a(n1), S -> a(0) */
+
+ ijp = 0;
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ ij = n2 + j;
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ r_cnjg(&q__1, &ap[ijp]);
+ arf[i__3].r = q__1.r, arf[i__3].i = q__1.i;
+ ++ijp;
+ ij += lda;
+ }
+ }
+ js = 0;
+ i__1 = *n - 1;
+ for (j = n1; j <= i__1; ++j) {
+ ij = js;
+ i__2 = js + j;
+ for (ij = js; ij <= i__2; ++ij) {
+ i__3 = ij;
+ i__4 = ijp;
+ arf[i__3].r = ap[i__4].r, arf[i__3].i = ap[i__4].i;
+ ++ijp;
+ }
+ js += lda;
+ }
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is odd */
+/* T1 -> A(0,0) , T2 -> A(1,0) , S -> A(0,n1) */
+/* T1 -> a(0+0) , T2 -> a(1+0) , S -> a(0+n1*n1); lda=n1 */
+
+ ijp = 0;
+ i__1 = n2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = *n * lda - 1;
+ i__3 = lda;
+ for (ij = i__ * (lda + 1); i__3 < 0 ? ij >= i__2 : ij <=
+ i__2; ij += i__3) {
+ i__4 = ij;
+ r_cnjg(&q__1, &ap[ijp]);
+ arf[i__4].r = q__1.r, arf[i__4].i = q__1.i;
+ ++ijp;
+ }
+ }
+ js = 1;
+ i__1 = n2 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + n2 - j - 1;
+ for (ij = js; ij <= i__3; ++ij) {
+ i__2 = ij;
+ i__4 = ijp;
+ arf[i__2].r = ap[i__4].r, arf[i__2].i = ap[i__4].i;
+ ++ijp;
+ }
+ js = js + lda + 1;
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is odd */
+/* T1 -> A(0,n1+1), T2 -> A(0,n1), S -> A(0,0) */
+/* T1 -> a(n2*n2), T2 -> a(n1*n2), S -> a(0); lda = n2 */
+
+ ijp = 0;
+ js = n2 * lda;
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + j;
+ for (ij = js; ij <= i__3; ++ij) {
+ i__2 = ij;
+ i__4 = ijp;
+ arf[i__2].r = ap[i__4].r, arf[i__2].i = ap[i__4].i;
+ ++ijp;
+ }
+ js += lda;
+ }
+ i__1 = n1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__3 = i__ + (n1 + i__) * lda;
+ i__2 = lda;
+ for (ij = i__; i__2 < 0 ? ij >= i__3 : ij <= i__3; ij +=
+ i__2) {
+ i__4 = ij;
+ r_cnjg(&q__1, &ap[ijp]);
+ arf[i__4].r = q__1.r, arf[i__4].i = q__1.i;
+ ++ijp;
+ }
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(1,0), T2 -> a(0,0), S -> a(k+1,0) */
+/* T1 -> a(1), T2 -> a(0), S -> a(k+1) */
+
+ ijp = 0;
+ jp = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ij = i__ + 1 + jp;
+ i__3 = ij;
+ i__4 = ijp;
+ arf[i__3].r = ap[i__4].r, arf[i__3].i = ap[i__4].i;
+ ++ijp;
+ }
+ jp += lda;
+ }
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = k - 1;
+ for (j = i__; j <= i__2; ++j) {
+ ij = i__ + j * lda;
+ i__3 = ij;
+ r_cnjg(&q__1, &ap[ijp]);
+ arf[i__3].r = q__1.r, arf[i__3].i = q__1.i;
+ ++ijp;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(k+1,0) , T2 -> a(k,0), S -> a(0,0) */
+/* T1 -> a(k+1), T2 -> a(k), S -> a(0) */
+
+ ijp = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ ij = k + 1 + j;
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ r_cnjg(&q__1, &ap[ijp]);
+ arf[i__3].r = q__1.r, arf[i__3].i = q__1.i;
+ ++ijp;
+ ij += lda;
+ }
+ }
+ js = 0;
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+ ij = js;
+ i__2 = js + j;
+ for (ij = js; ij <= i__2; ++ij) {
+ i__3 = ij;
+ i__4 = ijp;
+ arf[i__3].r = ap[i__4].r, arf[i__3].i = ap[i__4].i;
+ ++ijp;
+ }
+ js += lda;
+ }
+
+ }
+
+ } else {
+
+/* N is even and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,1), T2 -> B(0,0), S -> B(0,k+1) */
+/* T1 -> a(0+k), T2 -> a(0+0), S -> a(0+k*(k+1)); lda=k */
+
+ ijp = 0;
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = (*n + 1) * lda - 1;
+ i__3 = lda;
+ for (ij = i__ + (i__ + 1) * lda; i__3 < 0 ? ij >= i__2 :
+ ij <= i__2; ij += i__3) {
+ i__4 = ij;
+ r_cnjg(&q__1, &ap[ijp]);
+ arf[i__4].r = q__1.r, arf[i__4].i = q__1.i;
+ ++ijp;
+ }
+ }
+ js = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + k - j - 1;
+ for (ij = js; ij <= i__3; ++ij) {
+ i__2 = ij;
+ i__4 = ijp;
+ arf[i__2].r = ap[i__4].r, arf[i__2].i = ap[i__4].i;
+ ++ijp;
+ }
+ js = js + lda + 1;
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,k+1), T2 -> B(0,k), S -> B(0,0) */
+/* T1 -> a(0+k*(k+1)), T2 -> a(0+k*k), S -> a(0+0)); lda=k */
+
+ ijp = 0;
+ js = (k + 1) * lda;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + j;
+ for (ij = js; ij <= i__3; ++ij) {
+ i__2 = ij;
+ i__4 = ijp;
+ arf[i__2].r = ap[i__4].r, arf[i__2].i = ap[i__4].i;
+ ++ijp;
+ }
+ js += lda;
+ }
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__3 = i__ + (k + i__) * lda;
+ i__2 = lda;
+ for (ij = i__; i__2 < 0 ? ij >= i__3 : ij <= i__3; ij +=
+ i__2) {
+ i__4 = ij;
+ r_cnjg(&q__1, &ap[ijp]);
+ arf[i__4].r = q__1.r, arf[i__4].i = q__1.i;
+ ++ijp;
+ }
+ }
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of CTPTTF */
+
+} /* ctpttf_ */
diff --git a/contrib/libs/clapack/ctpttr.c b/contrib/libs/clapack/ctpttr.c
new file mode 100644
index 0000000000..552c88ec18
--- /dev/null
+++ b/contrib/libs/clapack/ctpttr.c
@@ -0,0 +1,148 @@
+/* ctpttr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ctpttr_(char *uplo, integer *n, complex *ap, complex *a,
+ integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, k;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Julien Langou of the Univ. of Colorado Denver -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTPTTR copies a triangular matrix A from standard packed format (TP) */
+/* to standard full format (TR). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular. */
+/* = 'L': A is lower triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) COMPLEX array, dimension ( N*(N+1)/2 ), */
+/* On entry, the upper or lower triangular matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* A (output) COMPLEX array, dimension ( LDA, N ) */
+/* On exit, the triangular matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ lower = lsame_(uplo, "L");
+ if (! lower && ! lsame_(uplo, "U")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTPTTR", &i__1);
+ return 0;
+ }
+
+ if (lower) {
+ k = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ++k;
+ i__3 = i__ + j * a_dim1;
+ i__4 = k;
+ a[i__3].r = ap[i__4].r, a[i__3].i = ap[i__4].i;
+ }
+ }
+ } else {
+ k = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ ++k;
+ i__3 = i__ + j * a_dim1;
+ i__4 = k;
+ a[i__3].r = ap[i__4].r, a[i__3].i = ap[i__4].i;
+ }
+ }
+ }
+
+
+ return 0;
+
+/* End of CTPTTR */
+
+} /* ctpttr_ */
diff --git a/contrib/libs/clapack/ctrcon.c b/contrib/libs/clapack/ctrcon.c
new file mode 100644
index 0000000000..05831a70f1
--- /dev/null
+++ b/contrib/libs/clapack/ctrcon.c
@@ -0,0 +1,249 @@
+/* ctrcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ctrcon_(char *norm, char *uplo, char *diag, integer *n,
+ complex *a, integer *lda, real *rcond, complex *work, real *rwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer ix, kase, kase1;
+ real scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ real anorm;
+ logical upper;
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *);
+ real xnorm;
+ extern integer icamax_(integer *, complex *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern doublereal clantr_(char *, char *, char *, integer *, integer *,
+ complex *, integer *, real *);
+ real ainvnm;
+ extern /* Subroutine */ int clatrs_(char *, char *, char *, char *,
+ integer *, complex *, integer *, complex *, real *, real *,
+ integer *), csrscl_(integer *,
+ real *, complex *, integer *);
+ logical onenrm;
+ char normin[1];
+ real smlnum;
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTRCON estimates the reciprocal of the condition number of a */
+/* triangular matrix A, in either the 1-norm or the infinity-norm. */
+
+/* The norm of A is computed and an estimate is obtained for */
+/* norm(inv(A)), then the reciprocal of the condition number is */
+/* computed as */
+/* RCOND = 1 / ( norm(A) * norm(inv(A)) ). */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies whether the 1-norm condition number or the */
+/* infinity-norm condition number is required: */
+/* = '1' or 'O': 1-norm; */
+/* = 'I': Infinity-norm. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The triangular matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of the array A contains the upper */
+/* triangular matrix, and the strictly lower triangular part of */
+/* A is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of the array A contains the lower triangular */
+/* matrix, and the strictly upper triangular part of A is not */
+/* referenced. If DIAG = 'U', the diagonal elements of A are */
+/* also not referenced and are assumed to be 1. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(norm(A) * norm(inv(A))). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O");
+ nounit = lsame_(diag, "N");
+
+ if (! onenrm && ! lsame_(norm, "I")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTRCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ }
+
+ *rcond = 0.f;
+ smlnum = slamch_("Safe minimum") * (real) max(1,*n);
+
+/* Compute the norm of the triangular matrix A. */
+
+ anorm = clantr_(norm, uplo, diag, n, n, &a[a_offset], lda, &rwork[1]);
+
+/* Continue only if ANORM > 0. */
+
+ if (anorm > 0.f) {
+
+/* Estimate the norm of the inverse of A. */
+
+ ainvnm = 0.f;
+ *(unsigned char *)normin = 'N';
+ if (onenrm) {
+ kase1 = 1;
+ } else {
+ kase1 = 2;
+ }
+ kase = 0;
+L10:
+ clacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (kase == kase1) {
+
+/* Multiply by inv(A). */
+
+ clatrs_(uplo, "No transpose", diag, normin, n, &a[a_offset],
+ lda, &work[1], &scale, &rwork[1], info);
+ } else {
+
+/* Multiply by inv(A'). */
+
+ clatrs_(uplo, "Conjugate transpose", diag, normin, n, &a[
+ a_offset], lda, &work[1], &scale, &rwork[1], info);
+ }
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by 1/SCALE if doing so will not cause overflow. */
+
+ if (scale != 1.f) {
+ ix = icamax_(n, &work[1], &c__1);
+ i__1 = ix;
+ xnorm = (r__1 = work[i__1].r, dabs(r__1)) + (r__2 = r_imag(&
+ work[ix]), dabs(r__2));
+ if (scale < xnorm * smlnum || scale == 0.f) {
+ goto L20;
+ }
+ csrscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / anorm / ainvnm;
+ }
+ }
+
+L20:
+ return 0;
+
+/* End of CTRCON */
+
+} /* ctrcon_ */
diff --git a/contrib/libs/clapack/ctrevc.c b/contrib/libs/clapack/ctrevc.c
new file mode 100644
index 0000000000..05c60f4aad
--- /dev/null
+++ b/contrib/libs/clapack/ctrevc.c
@@ -0,0 +1,532 @@
+/* ctrevc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b2 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int ctrevc_(char *side, char *howmny, logical *select,
+ integer *n, complex *t, integer *ldt, complex *vl, integer *ldvl,
+ complex *vr, integer *ldvr, integer *mm, integer *m, complex *work,
+ real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, k, ii, ki, is;
+ real ulp;
+ logical allv;
+ real unfl, ovfl, smin;
+ logical over;
+ real scale;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+, complex *, integer *, complex *, integer *, complex *, complex *
+, integer *);
+ real remax;
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *);
+ logical leftv, bothv, somev;
+ extern /* Subroutine */ int slabad_(real *, real *);
+ extern integer icamax_(integer *, complex *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *), xerbla_(char *, integer *), clatrs_(char *, char *,
+ char *, char *, integer *, complex *, integer *, complex *, real *
+, real *, integer *);
+ extern doublereal scasum_(integer *, complex *, integer *);
+ logical rightv;
+ real smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTREVC computes some or all of the right and/or left eigenvectors of */
+/* a complex upper triangular matrix T. */
+/* Matrices of this type are produced by the Schur factorization of */
+/* a complex general matrix: A = Q*T*Q**H, as computed by CHSEQR. */
+
+/* The right eigenvector x and the left eigenvector y of T corresponding */
+/* to an eigenvalue w are defined by: */
+
+/* T*x = w*x, (y**H)*T = w*(y**H) */
+
+/* where y**H denotes the conjugate transpose of the vector y. */
+/* The eigenvalues are not input to this routine, but are read directly */
+/* from the diagonal of T. */
+
+/* This routine returns the matrices X and/or Y of right and left */
+/* eigenvectors of T, or the products Q*X and/or Q*Y, where Q is an */
+/* input matrix. If Q is the unitary factor that reduces a matrix A to */
+/* Schur form T, then Q*X and Q*Y are the matrices of right and left */
+/* eigenvectors of A. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'R': compute right eigenvectors only; */
+/* = 'L': compute left eigenvectors only; */
+/* = 'B': compute both right and left eigenvectors. */
+
+/* HOWMNY (input) CHARACTER*1 */
+/* = 'A': compute all right and/or left eigenvectors; */
+/* = 'B': compute all right and/or left eigenvectors, */
+/* backtransformed using the matrices supplied in */
+/* VR and/or VL; */
+/* = 'S': compute selected right and/or left eigenvectors, */
+/* as indicated by the logical array SELECT. */
+
+/* SELECT (input) LOGICAL array, dimension (N) */
+/* If HOWMNY = 'S', SELECT specifies the eigenvectors to be */
+/* computed. */
+/* The eigenvector corresponding to the j-th eigenvalue is */
+/* computed if SELECT(j) = .TRUE.. */
+/* Not referenced if HOWMNY = 'A' or 'B'. */
+
+/* N (input) INTEGER */
+/* The order of the matrix T. N >= 0. */
+
+/* T (input/output) COMPLEX array, dimension (LDT,N) */
+/* The upper triangular matrix T. T is modified, but restored */
+/* on exit. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= max(1,N). */
+
+/* VL (input/output) COMPLEX array, dimension (LDVL,MM) */
+/* On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must */
+/* contain an N-by-N matrix Q (usually the unitary matrix Q of */
+/* Schur vectors returned by CHSEQR). */
+/* On exit, if SIDE = 'L' or 'B', VL contains: */
+/* if HOWMNY = 'A', the matrix Y of left eigenvectors of T; */
+/* if HOWMNY = 'B', the matrix Q*Y; */
+/* if HOWMNY = 'S', the left eigenvectors of T specified by */
+/* SELECT, stored consecutively in the columns */
+/* of VL, in the same order as their */
+/* eigenvalues. */
+/* Not referenced if SIDE = 'R'. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the array VL. LDVL >= 1, and if */
+/* SIDE = 'L' or 'B', LDVL >= N. */
+
+/* VR (input/output) COMPLEX array, dimension (LDVR,MM) */
+/* On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must */
+/* contain an N-by-N matrix Q (usually the unitary matrix Q of */
+/* Schur vectors returned by CHSEQR). */
+/* On exit, if SIDE = 'R' or 'B', VR contains: */
+/* if HOWMNY = 'A', the matrix X of right eigenvectors of T; */
+/* if HOWMNY = 'B', the matrix Q*X; */
+/* if HOWMNY = 'S', the right eigenvectors of T specified by */
+/* SELECT, stored consecutively in the columns */
+/* of VR, in the same order as their */
+/* eigenvalues. */
+/* Not referenced if SIDE = 'L'. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. LDVR >= 1, and if */
+/* SIDE = 'R' or 'B'; LDVR >= N. */
+
+/* MM (input) INTEGER */
+/* The number of columns in the arrays VL and/or VR. MM >= M. */
+
+/* M (output) INTEGER */
+/* The number of columns in the arrays VL and/or VR actually */
+/* used to store the eigenvectors. If HOWMNY = 'A' or 'B', M */
+/* is set to N. Each selected eigenvector occupies one */
+/* column. */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The algorithm used in this program is basically backward (forward) */
+/* substitution, with scaling to make the the code robust against */
+/* possible overflow. */
+
+/* Each eigenvector is normalized so that the element of largest */
+/* magnitude has magnitude 1; here the magnitude of a complex number */
+/* (x,y) is taken to be |x| + |y|. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters */
+
+ /* Parameter adjustments */
+ --select;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ bothv = lsame_(side, "B");
+ rightv = lsame_(side, "R") || bothv;
+ leftv = lsame_(side, "L") || bothv;
+
+ allv = lsame_(howmny, "A");
+ over = lsame_(howmny, "B");
+ somev = lsame_(howmny, "S");
+
+/* Set M to the number of columns required to store the selected */
+/* eigenvectors. */
+
+ if (somev) {
+ *m = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (select[j]) {
+ ++(*m);
+ }
+/* L10: */
+ }
+ } else {
+ *m = *n;
+ }
+
+ *info = 0;
+ if (! rightv && ! leftv) {
+ *info = -1;
+ } else if (! allv && ! over && ! somev) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ldt < max(1,*n)) {
+ *info = -6;
+ } else if (*ldvl < 1 || leftv && *ldvl < *n) {
+ *info = -8;
+ } else if (*ldvr < 1 || rightv && *ldvr < *n) {
+ *info = -10;
+ } else if (*mm < *m) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTREVC", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Set the constants to control overflow. */
+
+ unfl = slamch_("Safe minimum");
+ ovfl = 1.f / unfl;
+ slabad_(&unfl, &ovfl);
+ ulp = slamch_("Precision");
+ smlnum = unfl * (*n / ulp);
+
+/* Store the diagonal elements of T in working array WORK. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + *n;
+ i__3 = i__ + i__ * t_dim1;
+ work[i__2].r = t[i__3].r, work[i__2].i = t[i__3].i;
+/* L20: */
+ }
+
+/* Compute 1-norm of each column of strictly upper triangular */
+/* part of T to control overflow in triangular solver. */
+
+ rwork[1] = 0.f;
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ rwork[j] = scasum_(&i__2, &t[j * t_dim1 + 1], &c__1);
+/* L30: */
+ }
+
+ if (rightv) {
+
+/* Compute right eigenvectors. */
+
+ is = *m;
+ for (ki = *n; ki >= 1; --ki) {
+
+ if (somev) {
+ if (! select[ki]) {
+ goto L80;
+ }
+ }
+/* Computing MAX */
+ i__1 = ki + ki * t_dim1;
+ r__3 = ulp * ((r__1 = t[i__1].r, dabs(r__1)) + (r__2 = r_imag(&t[
+ ki + ki * t_dim1]), dabs(r__2)));
+ smin = dmax(r__3,smlnum);
+
+ work[1].r = 1.f, work[1].i = 0.f;
+
+/* Form right-hand side. */
+
+ i__1 = ki - 1;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k;
+ i__3 = k + ki * t_dim1;
+ q__1.r = -t[i__3].r, q__1.i = -t[i__3].i;
+ work[i__2].r = q__1.r, work[i__2].i = q__1.i;
+/* L40: */
+ }
+
+/* Solve the triangular system: */
+/* (T(1:KI-1,1:KI-1) - T(KI,KI))*X = SCALE*WORK. */
+
+ i__1 = ki - 1;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k + k * t_dim1;
+ i__3 = k + k * t_dim1;
+ i__4 = ki + ki * t_dim1;
+ q__1.r = t[i__3].r - t[i__4].r, q__1.i = t[i__3].i - t[i__4]
+ .i;
+ t[i__2].r = q__1.r, t[i__2].i = q__1.i;
+ i__2 = k + k * t_dim1;
+ if ((r__1 = t[i__2].r, dabs(r__1)) + (r__2 = r_imag(&t[k + k *
+ t_dim1]), dabs(r__2)) < smin) {
+ i__3 = k + k * t_dim1;
+ t[i__3].r = smin, t[i__3].i = 0.f;
+ }
+/* L50: */
+ }
+
+ if (ki > 1) {
+ i__1 = ki - 1;
+ clatrs_("Upper", "No transpose", "Non-unit", "Y", &i__1, &t[
+ t_offset], ldt, &work[1], &scale, &rwork[1], info);
+ i__1 = ki;
+ work[i__1].r = scale, work[i__1].i = 0.f;
+ }
+
+/* Copy the vector x or Q*x to VR and normalize. */
+
+ if (! over) {
+ ccopy_(&ki, &work[1], &c__1, &vr[is * vr_dim1 + 1], &c__1);
+
+ ii = icamax_(&ki, &vr[is * vr_dim1 + 1], &c__1);
+ i__1 = ii + is * vr_dim1;
+ remax = 1.f / ((r__1 = vr[i__1].r, dabs(r__1)) + (r__2 =
+ r_imag(&vr[ii + is * vr_dim1]), dabs(r__2)));
+ csscal_(&ki, &remax, &vr[is * vr_dim1 + 1], &c__1);
+
+ i__1 = *n;
+ for (k = ki + 1; k <= i__1; ++k) {
+ i__2 = k + is * vr_dim1;
+ vr[i__2].r = 0.f, vr[i__2].i = 0.f;
+/* L60: */
+ }
+ } else {
+ if (ki > 1) {
+ i__1 = ki - 1;
+ q__1.r = scale, q__1.i = 0.f;
+ cgemv_("N", n, &i__1, &c_b2, &vr[vr_offset], ldvr, &work[
+ 1], &c__1, &q__1, &vr[ki * vr_dim1 + 1], &c__1);
+ }
+
+ ii = icamax_(n, &vr[ki * vr_dim1 + 1], &c__1);
+ i__1 = ii + ki * vr_dim1;
+ remax = 1.f / ((r__1 = vr[i__1].r, dabs(r__1)) + (r__2 =
+ r_imag(&vr[ii + ki * vr_dim1]), dabs(r__2)));
+ csscal_(n, &remax, &vr[ki * vr_dim1 + 1], &c__1);
+ }
+
+/* Set back the original diagonal elements of T. */
+
+ i__1 = ki - 1;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k + k * t_dim1;
+ i__3 = k + *n;
+ t[i__2].r = work[i__3].r, t[i__2].i = work[i__3].i;
+/* L70: */
+ }
+
+ --is;
+L80:
+ ;
+ }
+ }
+
+ if (leftv) {
+
+/* Compute left eigenvectors. */
+
+ is = 1;
+ i__1 = *n;
+ for (ki = 1; ki <= i__1; ++ki) {
+
+ if (somev) {
+ if (! select[ki]) {
+ goto L130;
+ }
+ }
+/* Computing MAX */
+ i__2 = ki + ki * t_dim1;
+ r__3 = ulp * ((r__1 = t[i__2].r, dabs(r__1)) + (r__2 = r_imag(&t[
+ ki + ki * t_dim1]), dabs(r__2)));
+ smin = dmax(r__3,smlnum);
+
+ i__2 = *n;
+ work[i__2].r = 1.f, work[i__2].i = 0.f;
+
+/* Form right-hand side. */
+
+ i__2 = *n;
+ for (k = ki + 1; k <= i__2; ++k) {
+ i__3 = k;
+ r_cnjg(&q__2, &t[ki + k * t_dim1]);
+ q__1.r = -q__2.r, q__1.i = -q__2.i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+/* L90: */
+ }
+
+/* Solve the triangular system: */
+/* (T(KI+1:N,KI+1:N) - T(KI,KI))'*X = SCALE*WORK. */
+
+ i__2 = *n;
+ for (k = ki + 1; k <= i__2; ++k) {
+ i__3 = k + k * t_dim1;
+ i__4 = k + k * t_dim1;
+ i__5 = ki + ki * t_dim1;
+ q__1.r = t[i__4].r - t[i__5].r, q__1.i = t[i__4].i - t[i__5]
+ .i;
+ t[i__3].r = q__1.r, t[i__3].i = q__1.i;
+ i__3 = k + k * t_dim1;
+ if ((r__1 = t[i__3].r, dabs(r__1)) + (r__2 = r_imag(&t[k + k *
+ t_dim1]), dabs(r__2)) < smin) {
+ i__4 = k + k * t_dim1;
+ t[i__4].r = smin, t[i__4].i = 0.f;
+ }
+/* L100: */
+ }
+
+ if (ki < *n) {
+ i__2 = *n - ki;
+ clatrs_("Upper", "Conjugate transpose", "Non-unit", "Y", &
+ i__2, &t[ki + 1 + (ki + 1) * t_dim1], ldt, &work[ki +
+ 1], &scale, &rwork[1], info);
+ i__2 = ki;
+ work[i__2].r = scale, work[i__2].i = 0.f;
+ }
+
+/* Copy the vector x or Q*x to VL and normalize. */
+
+ if (! over) {
+ i__2 = *n - ki + 1;
+ ccopy_(&i__2, &work[ki], &c__1, &vl[ki + is * vl_dim1], &c__1)
+ ;
+
+ i__2 = *n - ki + 1;
+ ii = icamax_(&i__2, &vl[ki + is * vl_dim1], &c__1) + ki - 1;
+ i__2 = ii + is * vl_dim1;
+ remax = 1.f / ((r__1 = vl[i__2].r, dabs(r__1)) + (r__2 =
+ r_imag(&vl[ii + is * vl_dim1]), dabs(r__2)));
+ i__2 = *n - ki + 1;
+ csscal_(&i__2, &remax, &vl[ki + is * vl_dim1], &c__1);
+
+ i__2 = ki - 1;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + is * vl_dim1;
+ vl[i__3].r = 0.f, vl[i__3].i = 0.f;
+/* L110: */
+ }
+ } else {
+ if (ki < *n) {
+ i__2 = *n - ki;
+ q__1.r = scale, q__1.i = 0.f;
+ cgemv_("N", n, &i__2, &c_b2, &vl[(ki + 1) * vl_dim1 + 1],
+ ldvl, &work[ki + 1], &c__1, &q__1, &vl[ki *
+ vl_dim1 + 1], &c__1);
+ }
+
+ ii = icamax_(n, &vl[ki * vl_dim1 + 1], &c__1);
+ i__2 = ii + ki * vl_dim1;
+ remax = 1.f / ((r__1 = vl[i__2].r, dabs(r__1)) + (r__2 =
+ r_imag(&vl[ii + ki * vl_dim1]), dabs(r__2)));
+ csscal_(n, &remax, &vl[ki * vl_dim1 + 1], &c__1);
+ }
+
+/* Set back the original diagonal elements of T. */
+
+ i__2 = *n;
+ for (k = ki + 1; k <= i__2; ++k) {
+ i__3 = k + k * t_dim1;
+ i__4 = k + *n;
+ t[i__3].r = work[i__4].r, t[i__3].i = work[i__4].i;
+/* L120: */
+ }
+
+ ++is;
+L130:
+ ;
+ }
+ }
+
+ return 0;
+
+/* End of CTREVC */
+
+} /* ctrevc_ */
diff --git a/contrib/libs/clapack/ctrexc.c b/contrib/libs/clapack/ctrexc.c
new file mode 100644
index 0000000000..7f292e42c1
--- /dev/null
+++ b/contrib/libs/clapack/ctrexc.c
@@ -0,0 +1,215 @@
+/* ctrexc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ctrexc_(char *compq, integer *n, complex *t, integer *
+ ldt, complex *q, integer *ldq, integer *ifst, integer *ilst, integer *
+ info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, t_dim1, t_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer k, m1, m2, m3;
+ real cs;
+ complex t11, t22, sn, temp;
+ extern /* Subroutine */ int crot_(integer *, complex *, integer *,
+ complex *, integer *, real *, complex *);
+ extern logical lsame_(char *, char *);
+ logical wantq;
+ extern /* Subroutine */ int clartg_(complex *, complex *, real *, complex
+ *, complex *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTREXC reorders the Schur factorization of a complex matrix */
+/* A = Q*T*Q**H, so that the diagonal element of T with row index IFST */
+/* is moved to row ILST. */
+
+/* The Schur form T is reordered by a unitary similarity transformation */
+/* Z**H*T*Z, and optionally the matrix Q of Schur vectors is updated by */
+/* postmultplying it with Z. */
+
+/* Arguments */
+/* ========= */
+
+/* COMPQ (input) CHARACTER*1 */
+/* = 'V': update the matrix Q of Schur vectors; */
+/* = 'N': do not update Q. */
+
+/* N (input) INTEGER */
+/* The order of the matrix T. N >= 0. */
+
+/* T (input/output) COMPLEX array, dimension (LDT,N) */
+/* On entry, the upper triangular matrix T. */
+/* On exit, the reordered upper triangular matrix. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= max(1,N). */
+
+/* Q (input/output) COMPLEX array, dimension (LDQ,N) */
+/* On entry, if COMPQ = 'V', the matrix Q of Schur vectors. */
+/* On exit, if COMPQ = 'V', Q has been postmultiplied by the */
+/* unitary transformation matrix Z which reorders T. */
+/* If COMPQ = 'N', Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N). */
+
+/* IFST (input) INTEGER */
+/* ILST (input) INTEGER */
+/* Specify the reordering of the diagonal elements of T: */
+/* The element with row index IFST is moved to row ILST by a */
+/* sequence of transpositions between adjacent elements. */
+/* 1 <= IFST <= N; 1 <= ILST <= N. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters. */
+
+ /* Parameter adjustments */
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+
+ /* Function Body */
+ *info = 0;
+ wantq = lsame_(compq, "V");
+ if (! lsame_(compq, "N") && ! wantq) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldt < max(1,*n)) {
+ *info = -4;
+ } else if (*ldq < 1 || wantq && *ldq < max(1,*n)) {
+ *info = -6;
+ } else if (*ifst < 1 || *ifst > *n) {
+ *info = -7;
+ } else if (*ilst < 1 || *ilst > *n) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTREXC", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 1 || *ifst == *ilst) {
+ return 0;
+ }
+
+ if (*ifst < *ilst) {
+
+/* Move the IFST-th diagonal element forward down the diagonal. */
+
+ m1 = 0;
+ m2 = -1;
+ m3 = 1;
+ } else {
+
+/* Move the IFST-th diagonal element backward up the diagonal. */
+
+ m1 = -1;
+ m2 = 0;
+ m3 = -1;
+ }
+
+ i__1 = *ilst + m2;
+ i__2 = m3;
+ for (k = *ifst + m1; i__2 < 0 ? k >= i__1 : k <= i__1; k += i__2) {
+
+/* Interchange the k-th and (k+1)-th diagonal elements. */
+
+ i__3 = k + k * t_dim1;
+ t11.r = t[i__3].r, t11.i = t[i__3].i;
+ i__3 = k + 1 + (k + 1) * t_dim1;
+ t22.r = t[i__3].r, t22.i = t[i__3].i;
+
+/* Determine the transformation to perform the interchange. */
+
+ q__1.r = t22.r - t11.r, q__1.i = t22.i - t11.i;
+ clartg_(&t[k + (k + 1) * t_dim1], &q__1, &cs, &sn, &temp);
+
+/* Apply transformation to the matrix T. */
+
+ if (k + 2 <= *n) {
+ i__3 = *n - k - 1;
+ crot_(&i__3, &t[k + (k + 2) * t_dim1], ldt, &t[k + 1 + (k + 2) *
+ t_dim1], ldt, &cs, &sn);
+ }
+ i__3 = k - 1;
+ r_cnjg(&q__1, &sn);
+ crot_(&i__3, &t[k * t_dim1 + 1], &c__1, &t[(k + 1) * t_dim1 + 1], &
+ c__1, &cs, &q__1);
+
+ i__3 = k + k * t_dim1;
+ t[i__3].r = t22.r, t[i__3].i = t22.i;
+ i__3 = k + 1 + (k + 1) * t_dim1;
+ t[i__3].r = t11.r, t[i__3].i = t11.i;
+
+ if (wantq) {
+
+/* Accumulate transformation in the matrix Q. */
+
+ r_cnjg(&q__1, &sn);
+ crot_(n, &q[k * q_dim1 + 1], &c__1, &q[(k + 1) * q_dim1 + 1], &
+ c__1, &cs, &q__1);
+ }
+
+/* L10: */
+ }
+
+ return 0;
+
+/* End of CTREXC */
+
+} /* ctrexc_ */
diff --git a/contrib/libs/clapack/ctrrfs.c b/contrib/libs/clapack/ctrrfs.c
new file mode 100644
index 0000000000..c7748a679f
--- /dev/null
+++ b/contrib/libs/clapack/ctrrfs.c
@@ -0,0 +1,562 @@
+/* ctrrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ctrrfs_(char *uplo, char *trans, char *diag, integer *n,
+ integer *nrhs, complex *a, integer *lda, complex *b, integer *ldb,
+ complex *x, integer *ldx, real *ferr, real *berr, complex *work, real
+ *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, x_dim1, x_offset, i__1, i__2,
+ i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ real s, xk;
+ integer nz;
+ real eps;
+ integer kase;
+ real safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), caxpy_(integer *, complex *, complex *,
+ integer *, complex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int ctrmv_(char *, char *, char *, integer *,
+ complex *, integer *, complex *, integer *), ctrsv_(char *, char *, char *, integer *, complex *,
+ integer *, complex *, integer *), clacn2_(
+ integer *, complex *, complex *, real *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+ char transn[1], transt[1];
+ logical nounit;
+ real lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTRRFS provides error bounds and backward error estimates for the */
+/* solution to a system of linear equations with a triangular */
+/* coefficient matrix. */
+
+/* The solution matrix X must be computed by CTRTRS or some other */
+/* means before entering this routine. CTRRFS does not do iterative */
+/* refinement because doing so cannot improve the backward error. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The triangular matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of the array A contains the upper */
+/* triangular matrix, and the strictly lower triangular part of */
+/* A is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of the array A contains the lower triangular */
+/* matrix, and the strictly upper triangular part of A is not */
+/* referenced. If DIAG = 'U', the diagonal elements of A are */
+/* also not referenced and are assumed to be 1. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input) COMPLEX array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input) COMPLEX array, dimension (LDX,NRHS) */
+/* The solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX array, dimension (2*N) */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldx < max(1,*n)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTRRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+ if (notran) {
+ *(unsigned char *)transn = 'N';
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transn = 'C';
+ *(unsigned char *)transt = 'N';
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute residual R = B - op(A) * X, */
+/* where op(A) = A, A**T, or A**H, depending on TRANS. */
+
+ ccopy_(n, &x[j * x_dim1 + 1], &c__1, &work[1], &c__1);
+ ctrmv_(uplo, trans, diag, n, &a[a_offset], lda, &work[1], &c__1);
+ q__1.r = -1.f, q__1.i = -0.f;
+ caxpy_(n, &q__1, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[i__] = (r__1 = b[i__3].r, dabs(r__1)) + (r__2 = r_imag(&b[
+ i__ + j * b_dim1]), dabs(r__2));
+/* L20: */
+ }
+
+ if (notran) {
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ xk = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&
+ x[k + j * x_dim1]), dabs(r__2));
+ i__3 = k;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ rwork[i__] += ((r__1 = a[i__4].r, dabs(r__1)) + (
+ r__2 = r_imag(&a[i__ + k * a_dim1]), dabs(
+ r__2))) * xk;
+/* L30: */
+ }
+/* L40: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ xk = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&
+ x[k + j * x_dim1]), dabs(r__2));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ rwork[i__] += ((r__1 = a[i__4].r, dabs(r__1)) + (
+ r__2 = r_imag(&a[i__ + k * a_dim1]), dabs(
+ r__2))) * xk;
+/* L50: */
+ }
+ rwork[k] += xk;
+/* L60: */
+ }
+ }
+ } else {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ xk = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&
+ x[k + j * x_dim1]), dabs(r__2));
+ i__3 = *n;
+ for (i__ = k; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ rwork[i__] += ((r__1 = a[i__4].r, dabs(r__1)) + (
+ r__2 = r_imag(&a[i__ + k * a_dim1]), dabs(
+ r__2))) * xk;
+/* L70: */
+ }
+/* L80: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ xk = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&
+ x[k + j * x_dim1]), dabs(r__2));
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ rwork[i__] += ((r__1 = a[i__4].r, dabs(r__1)) + (
+ r__2 = r_imag(&a[i__ + k * a_dim1]), dabs(
+ r__2))) * xk;
+/* L90: */
+ }
+ rwork[k] += xk;
+/* L100: */
+ }
+ }
+ }
+ } else {
+
+/* Compute abs(A**H)*abs(X) + abs(B). */
+
+ if (upper) {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ i__3 = k;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ i__5 = i__ + j * x_dim1;
+ s += ((r__1 = a[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + k * a_dim1]), dabs(r__2)))
+ * ((r__3 = x[i__5].r, dabs(r__3)) + (
+ r__4 = r_imag(&x[i__ + j * x_dim1]), dabs(
+ r__4)));
+/* L110: */
+ }
+ rwork[k] += s;
+/* L120: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ s = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&
+ x[k + j * x_dim1]), dabs(r__2));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ i__5 = i__ + j * x_dim1;
+ s += ((r__1 = a[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + k * a_dim1]), dabs(r__2)))
+ * ((r__3 = x[i__5].r, dabs(r__3)) + (
+ r__4 = r_imag(&x[i__ + j * x_dim1]), dabs(
+ r__4)));
+/* L130: */
+ }
+ rwork[k] += s;
+/* L140: */
+ }
+ }
+ } else {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ i__3 = *n;
+ for (i__ = k; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ i__5 = i__ + j * x_dim1;
+ s += ((r__1 = a[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + k * a_dim1]), dabs(r__2)))
+ * ((r__3 = x[i__5].r, dabs(r__3)) + (
+ r__4 = r_imag(&x[i__ + j * x_dim1]), dabs(
+ r__4)));
+/* L150: */
+ }
+ rwork[k] += s;
+/* L160: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ s = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&
+ x[k + j * x_dim1]), dabs(r__2));
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ i__5 = i__ + j * x_dim1;
+ s += ((r__1 = a[i__4].r, dabs(r__1)) + (r__2 =
+ r_imag(&a[i__ + k * a_dim1]), dabs(r__2)))
+ * ((r__3 = x[i__5].r, dabs(r__3)) + (
+ r__4 = r_imag(&x[i__ + j * x_dim1]), dabs(
+ r__4)));
+/* L170: */
+ }
+ rwork[k] += s;
+/* L180: */
+ }
+ }
+ }
+ }
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__3 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2))) / rwork[i__];
+ s = dmax(r__3,r__4);
+ } else {
+/* Computing MAX */
+ i__3 = i__;
+ r__3 = s, r__4 = ((r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = dmax(r__3,r__4);
+ }
+/* L190: */
+ }
+ berr[j] = s;
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(op(A)))* */
+/* ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(op(A)) is the inverse of op(A) */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(op(A))*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use CLACN2 to estimate the infinity-norm of the matrix */
+/* inv(op(A)) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__3 = i__;
+ rwork[i__] = (r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__];
+ } else {
+ i__3 = i__;
+ rwork[i__] = (r__1 = work[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[i__]), dabs(r__2)) + nz * eps * rwork[
+ i__] + safe1;
+ }
+/* L200: */
+ }
+
+ kase = 0;
+L210:
+ clacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(op(A)**H). */
+
+ ctrsv_(uplo, transt, diag, n, &a[a_offset], lda, &work[1], &
+ c__1);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ q__1.r = rwork[i__4] * work[i__5].r, q__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+/* L220: */
+ }
+ } else {
+
+/* Multiply by inv(op(A))*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ q__1.r = rwork[i__4] * work[i__5].r, q__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+/* L230: */
+ }
+ ctrsv_(uplo, transn, diag, n, &a[a_offset], lda, &work[1], &
+ c__1);
+ }
+ goto L210;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * x_dim1;
+ r__3 = lstres, r__4 = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[i__ + j * x_dim1]), dabs(r__2));
+ lstres = dmax(r__3,r__4);
+/* L240: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L250: */
+ }
+
+ return 0;
+
+/* End of CTRRFS */
+
+} /* ctrrfs_ */
diff --git a/contrib/libs/clapack/ctrsen.c b/contrib/libs/clapack/ctrsen.c
new file mode 100644
index 0000000000..7e455871e5
--- /dev/null
+++ b/contrib/libs/clapack/ctrsen.c
@@ -0,0 +1,422 @@
+/* ctrsen.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c_n1 = -1;
+
+/* Subroutine */ int ctrsen_(char *job, char *compq, logical *select, integer
+ *n, complex *t, integer *ldt, complex *q, integer *ldq, complex *w,
+ integer *m, real *s, real *sep, complex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, t_dim1, t_offset, i__1, i__2, i__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer k, n1, n2, nn, ks;
+ real est;
+ integer kase, ierr;
+ real scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3], lwmin;
+ logical wantq, wants;
+ real rnorm;
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *);
+ real rwork[1];
+ extern doublereal clange_(char *, integer *, integer *, complex *,
+ integer *, real *);
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), xerbla_(char *,
+ integer *);
+ logical wantbh;
+ extern /* Subroutine */ int ctrexc_(char *, integer *, complex *, integer
+ *, complex *, integer *, integer *, integer *, integer *);
+ logical wantsp;
+ extern /* Subroutine */ int ctrsyl_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *, real *, integer *);
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTRSEN reorders the Schur factorization of a complex matrix */
+/* A = Q*T*Q**H, so that a selected cluster of eigenvalues appears in */
+/* the leading positions on the diagonal of the upper triangular matrix */
+/* T, and the leading columns of Q form an orthonormal basis of the */
+/* corresponding right invariant subspace. */
+
+/* Optionally the routine computes the reciprocal condition numbers of */
+/* the cluster of eigenvalues and/or the invariant subspace. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies whether condition numbers are required for the */
+/* cluster of eigenvalues (S) or the invariant subspace (SEP): */
+/* = 'N': none; */
+/* = 'E': for eigenvalues only (S); */
+/* = 'V': for invariant subspace only (SEP); */
+/* = 'B': for both eigenvalues and invariant subspace (S and */
+/* SEP). */
+
+/* COMPQ (input) CHARACTER*1 */
+/* = 'V': update the matrix Q of Schur vectors; */
+/* = 'N': do not update Q. */
+
+/* SELECT (input) LOGICAL array, dimension (N) */
+/* SELECT specifies the eigenvalues in the selected cluster. To */
+/* select the j-th eigenvalue, SELECT(j) must be set to .TRUE.. */
+
+/* N (input) INTEGER */
+/* The order of the matrix T. N >= 0. */
+
+/* T (input/output) COMPLEX array, dimension (LDT,N) */
+/* On entry, the upper triangular matrix T. */
+/* On exit, T is overwritten by the reordered matrix T, with the */
+/* selected eigenvalues as the leading diagonal elements. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= max(1,N). */
+
+/* Q (input/output) COMPLEX array, dimension (LDQ,N) */
+/* On entry, if COMPQ = 'V', the matrix Q of Schur vectors. */
+/* On exit, if COMPQ = 'V', Q has been postmultiplied by the */
+/* unitary transformation matrix which reorders T; the leading M */
+/* columns of Q form an orthonormal basis for the specified */
+/* invariant subspace. */
+/* If COMPQ = 'N', Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. */
+/* LDQ >= 1; and if COMPQ = 'V', LDQ >= N. */
+
+/* W (output) COMPLEX array, dimension (N) */
+/* The reordered eigenvalues of T, in the same order as they */
+/* appear on the diagonal of T. */
+
+/* M (output) INTEGER */
+/* The dimension of the specified invariant subspace. */
+/* 0 <= M <= N. */
+
+/* S (output) REAL */
+/* If JOB = 'E' or 'B', S is a lower bound on the reciprocal */
+/* condition number for the selected cluster of eigenvalues. */
+/* S cannot underestimate the true reciprocal condition number */
+/* by more than a factor of sqrt(N). If M = 0 or N, S = 1. */
+/* If JOB = 'N' or 'V', S is not referenced. */
+
+/* SEP (output) REAL */
+/* If JOB = 'V' or 'B', SEP is the estimated reciprocal */
+/* condition number of the specified invariant subspace. If */
+/* M = 0 or N, SEP = norm(T). */
+/* If JOB = 'N' or 'E', SEP is not referenced. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If JOB = 'N', LWORK >= 1; */
+/* if JOB = 'E', LWORK = max(1,M*(N-M)); */
+/* if JOB = 'V' or 'B', LWORK >= max(1,2*M*(N-M)). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* CTRSEN first collects the selected eigenvalues by computing a unitary */
+/* transformation Z to move them to the top left corner of T. In other */
+/* words, the selected eigenvalues are the eigenvalues of T11 in: */
+
+/* Z'*T*Z = ( T11 T12 ) n1 */
+/* ( 0 T22 ) n2 */
+/* n1 n2 */
+
+/* where N = n1+n2 and Z' means the conjugate transpose of Z. The first */
+/* n1 columns of Z span the specified invariant subspace of T. */
+
+/* If T has been obtained from the Schur factorization of a matrix */
+/* A = Q*T*Q', then the reordered Schur factorization of A is given by */
+/* A = (Q*Z)*(Z'*T*Z)*(Q*Z)', and the first n1 columns of Q*Z span the */
+/* corresponding invariant subspace of A. */
+
+/* The reciprocal condition number of the average of the eigenvalues of */
+/* T11 may be returned in S. S lies between 0 (very badly conditioned) */
+/* and 1 (very well conditioned). It is computed as follows. First we */
+/* compute R so that */
+
+/* P = ( I R ) n1 */
+/* ( 0 0 ) n2 */
+/* n1 n2 */
+
+/* is the projector on the invariant subspace associated with T11. */
+/* R is the solution of the Sylvester equation: */
+
+/* T11*R - R*T22 = T12. */
+
+/* Let F-norm(M) denote the Frobenius-norm of M and 2-norm(M) denote */
+/* the two-norm of M. Then S is computed as the lower bound */
+
+/* (1 + F-norm(R)**2)**(-1/2) */
+
+/* on the reciprocal of 2-norm(P), the true reciprocal condition number. */
+/* S cannot underestimate 1 / 2-norm(P) by more than a factor of */
+/* sqrt(N). */
+
+/* An approximate error bound for the computed average of the */
+/* eigenvalues of T11 is */
+
+/* EPS * norm(T) / S */
+
+/* where EPS is the machine precision. */
+
+/* The reciprocal condition number of the right invariant subspace */
+/* spanned by the first n1 columns of Z (or of Q*Z) is returned in SEP. */
+/* SEP is defined as the separation of T11 and T22: */
+
+/* sep( T11, T22 ) = sigma-min( C ) */
+
+/* where sigma-min(C) is the smallest singular value of the */
+/* n1*n2-by-n1*n2 matrix */
+
+/* C = kprod( I(n2), T11 ) - kprod( transpose(T22), I(n1) ) */
+
+/* I(m) is an m by m identity matrix, and kprod denotes the Kronecker */
+/* product. We estimate sigma-min(C) by the reciprocal of an estimate of */
+/* the 1-norm of inverse(C). The true reciprocal 1-norm of inverse(C) */
+/* cannot differ from sigma-min(C) by more than a factor of sqrt(n1*n2). */
+
+/* When SEP is small, small changes in T can cause large changes in */
+/* the invariant subspace. An approximate bound on the maximum angular */
+/* error in the computed right invariant subspace is */
+
+/* EPS * norm(T) / SEP */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters. */
+
+ /* Parameter adjustments */
+ --select;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --w;
+ --work;
+
+ /* Function Body */
+ wantbh = lsame_(job, "B");
+ wants = lsame_(job, "E") || wantbh;
+ wantsp = lsame_(job, "V") || wantbh;
+ wantq = lsame_(compq, "V");
+
+/* Set M to the number of selected eigenvalues. */
+
+ *m = 0;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (select[k]) {
+ ++(*m);
+ }
+/* L10: */
+ }
+
+ n1 = *m;
+ n2 = *n - *m;
+ nn = n1 * n2;
+
+ *info = 0;
+ lquery = *lwork == -1;
+
+ if (wantsp) {
+/* Computing MAX */
+ i__1 = 1, i__2 = nn << 1;
+ lwmin = max(i__1,i__2);
+ } else if (lsame_(job, "N")) {
+ lwmin = 1;
+ } else if (lsame_(job, "E")) {
+ lwmin = max(1,nn);
+ }
+
+ if (! lsame_(job, "N") && ! wants && ! wantsp) {
+ *info = -1;
+ } else if (! lsame_(compq, "N") && ! wantq) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ldt < max(1,*n)) {
+ *info = -6;
+ } else if (*ldq < 1 || wantq && *ldq < *n) {
+ *info = -8;
+ } else if (*lwork < lwmin && ! lquery) {
+ *info = -14;
+ }
+
+ if (*info == 0) {
+ work[1].r = (real) lwmin, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTRSEN", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == *n || *m == 0) {
+ if (wants) {
+ *s = 1.f;
+ }
+ if (wantsp) {
+ *sep = clange_("1", n, n, &t[t_offset], ldt, rwork);
+ }
+ goto L40;
+ }
+
+/* Collect the selected eigenvalues at the top left corner of T. */
+
+ ks = 0;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (select[k]) {
+ ++ks;
+
+/* Swap the K-th eigenvalue to position KS. */
+
+ if (k != ks) {
+ ctrexc_(compq, n, &t[t_offset], ldt, &q[q_offset], ldq, &k, &
+ ks, &ierr);
+ }
+ }
+/* L20: */
+ }
+
+ if (wants) {
+
+/* Solve the Sylvester equation for R: */
+
+/* T11*R - R*T22 = scale*T12 */
+
+ clacpy_("F", &n1, &n2, &t[(n1 + 1) * t_dim1 + 1], ldt, &work[1], &n1);
+ ctrsyl_("N", "N", &c_n1, &n1, &n2, &t[t_offset], ldt, &t[n1 + 1 + (n1
+ + 1) * t_dim1], ldt, &work[1], &n1, &scale, &ierr);
+
+/* Estimate the reciprocal of the condition number of the cluster */
+/* of eigenvalues. */
+
+ rnorm = clange_("F", &n1, &n2, &work[1], &n1, rwork);
+ if (rnorm == 0.f) {
+ *s = 1.f;
+ } else {
+ *s = scale / (sqrt(scale * scale / rnorm + rnorm) * sqrt(rnorm));
+ }
+ }
+
+ if (wantsp) {
+
+/* Estimate sep(T11,T22). */
+
+ est = 0.f;
+ kase = 0;
+L30:
+ clacn2_(&nn, &work[nn + 1], &work[1], &est, &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Solve T11*R - R*T22 = scale*X. */
+
+ ctrsyl_("N", "N", &c_n1, &n1, &n2, &t[t_offset], ldt, &t[n1 +
+ 1 + (n1 + 1) * t_dim1], ldt, &work[1], &n1, &scale, &
+ ierr);
+ } else {
+
+/* Solve T11'*R - R*T22' = scale*X. */
+
+ ctrsyl_("C", "C", &c_n1, &n1, &n2, &t[t_offset], ldt, &t[n1 +
+ 1 + (n1 + 1) * t_dim1], ldt, &work[1], &n1, &scale, &
+ ierr);
+ }
+ goto L30;
+ }
+
+ *sep = scale / est;
+ }
+
+L40:
+
+/* Copy reordered eigenvalues to W. */
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k;
+ i__3 = k + k * t_dim1;
+ w[i__2].r = t[i__3].r, w[i__2].i = t[i__3].i;
+/* L50: */
+ }
+
+ work[1].r = (real) lwmin, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CTRSEN */
+
+} /* ctrsen_ */
diff --git a/contrib/libs/clapack/ctrsna.c b/contrib/libs/clapack/ctrsna.c
new file mode 100644
index 0000000000..0ca945733c
--- /dev/null
+++ b/contrib/libs/clapack/ctrsna.c
@@ -0,0 +1,445 @@
+/* ctrsna.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ctrsna_(char *job, char *howmny, logical *select,
+ integer *n, complex *t, integer *ldt, complex *vl, integer *ldvl,
+ complex *vr, integer *ldvr, real *s, real *sep, integer *mm, integer *
+ m, complex *work, integer *ldwork, real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, vl_dim1, vl_offset, vr_dim1, vr_offset,
+ work_dim1, work_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2;
+ complex q__1;
+
+ /* Builtin functions */
+ double c_abs(complex *), r_imag(complex *);
+
+ /* Local variables */
+ integer i__, j, k, ks, ix;
+ real eps, est;
+ integer kase, ierr;
+ complex prod;
+ real lnrm, rnrm, scale;
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ complex dummy[1];
+ logical wants;
+ extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real
+ *, integer *, integer *);
+ real xnorm;
+ extern doublereal scnrm2_(integer *, complex *, integer *);
+ extern /* Subroutine */ int slabad_(real *, real *);
+ extern integer icamax_(integer *, complex *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), xerbla_(char *,
+ integer *);
+ real bignum;
+ logical wantbh;
+ extern /* Subroutine */ int clatrs_(char *, char *, char *, char *,
+ integer *, complex *, integer *, complex *, real *, real *,
+ integer *), csrscl_(integer *,
+ real *, complex *, integer *), ctrexc_(char *, integer *, complex
+ *, integer *, complex *, integer *, integer *, integer *, integer
+ *);
+ logical somcon;
+ char normin[1];
+ real smlnum;
+ logical wantsp;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTRSNA estimates reciprocal condition numbers for specified */
+/* eigenvalues and/or right eigenvectors of a complex upper triangular */
+/* matrix T (or of any matrix Q*T*Q**H with Q unitary). */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies whether condition numbers are required for */
+/* eigenvalues (S) or eigenvectors (SEP): */
+/* = 'E': for eigenvalues only (S); */
+/* = 'V': for eigenvectors only (SEP); */
+/* = 'B': for both eigenvalues and eigenvectors (S and SEP). */
+
+/* HOWMNY (input) CHARACTER*1 */
+/* = 'A': compute condition numbers for all eigenpairs; */
+/* = 'S': compute condition numbers for selected eigenpairs */
+/* specified by the array SELECT. */
+
+/* SELECT (input) LOGICAL array, dimension (N) */
+/* If HOWMNY = 'S', SELECT specifies the eigenpairs for which */
+/* condition numbers are required. To select condition numbers */
+/* for the j-th eigenpair, SELECT(j) must be set to .TRUE.. */
+/* If HOWMNY = 'A', SELECT is not referenced. */
+
+/* N (input) INTEGER */
+/* The order of the matrix T. N >= 0. */
+
+/* T (input) COMPLEX array, dimension (LDT,N) */
+/* The upper triangular matrix T. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= max(1,N). */
+
+/* VL (input) COMPLEX array, dimension (LDVL,M) */
+/* If JOB = 'E' or 'B', VL must contain left eigenvectors of T */
+/* (or of any Q*T*Q**H with Q unitary), corresponding to the */
+/* eigenpairs specified by HOWMNY and SELECT. The eigenvectors */
+/* must be stored in consecutive columns of VL, as returned by */
+/* CHSEIN or CTREVC. */
+/* If JOB = 'V', VL is not referenced. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the array VL. */
+/* LDVL >= 1; and if JOB = 'E' or 'B', LDVL >= N. */
+
+/* VR (input) COMPLEX array, dimension (LDVR,M) */
+/* If JOB = 'E' or 'B', VR must contain right eigenvectors of T */
+/* (or of any Q*T*Q**H with Q unitary), corresponding to the */
+/* eigenpairs specified by HOWMNY and SELECT. The eigenvectors */
+/* must be stored in consecutive columns of VR, as returned by */
+/* CHSEIN or CTREVC. */
+/* If JOB = 'V', VR is not referenced. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. */
+/* LDVR >= 1; and if JOB = 'E' or 'B', LDVR >= N. */
+
+/* S (output) REAL array, dimension (MM) */
+/* If JOB = 'E' or 'B', the reciprocal condition numbers of the */
+/* selected eigenvalues, stored in consecutive elements of the */
+/* array. Thus S(j), SEP(j), and the j-th columns of VL and VR */
+/* all correspond to the same eigenpair (but not in general the */
+/* j-th eigenpair, unless all eigenpairs are selected). */
+/* If JOB = 'V', S is not referenced. */
+
+/* SEP (output) REAL array, dimension (MM) */
+/* If JOB = 'V' or 'B', the estimated reciprocal condition */
+/* numbers of the selected eigenvectors, stored in consecutive */
+/* elements of the array. */
+/* If JOB = 'E', SEP is not referenced. */
+
+/* MM (input) INTEGER */
+/* The number of elements in the arrays S (if JOB = 'E' or 'B') */
+/* and/or SEP (if JOB = 'V' or 'B'). MM >= M. */
+
+/* M (output) INTEGER */
+/* The number of elements of the arrays S and/or SEP actually */
+/* used to store the estimated condition numbers. */
+/* If HOWMNY = 'A', M is set to N. */
+
+/* WORK (workspace) COMPLEX array, dimension (LDWORK,N+6) */
+/* If JOB = 'E', WORK is not referenced. */
+
+/* LDWORK (input) INTEGER */
+/* The leading dimension of the array WORK. */
+/* LDWORK >= 1; and if JOB = 'V' or 'B', LDWORK >= N. */
+
+/* RWORK (workspace) REAL array, dimension (N) */
+/* If JOB = 'E', RWORK is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The reciprocal of the condition number of an eigenvalue lambda is */
+/* defined as */
+
+/* S(lambda) = |v'*u| / (norm(u)*norm(v)) */
+
+/* where u and v are the right and left eigenvectors of T corresponding */
+/* to lambda; v' denotes the conjugate transpose of v, and norm(u) */
+/* denotes the Euclidean norm. These reciprocal condition numbers always */
+/* lie between zero (very badly conditioned) and one (very well */
+/* conditioned). If n = 1, S(lambda) is defined to be 1. */
+
+/* An approximate error bound for a computed eigenvalue W(i) is given by */
+
+/* EPS * norm(T) / S(i) */
+
+/* where EPS is the machine precision. */
+
+/* The reciprocal of the condition number of the right eigenvector u */
+/* corresponding to lambda is defined as follows. Suppose */
+
+/* T = ( lambda c ) */
+/* ( 0 T22 ) */
+
+/* Then the reciprocal condition number is */
+
+/* SEP( lambda, T22 ) = sigma-min( T22 - lambda*I ) */
+
+/* where sigma-min denotes the smallest singular value. We approximate */
+/* the smallest singular value by the reciprocal of an estimate of the */
+/* one-norm of the inverse of T22 - lambda*I. If n = 1, SEP(1) is */
+/* defined to be abs(T(1,1)). */
+
+/* An approximate error bound for a computed right eigenvector VR(i) */
+/* is given by */
+
+/* EPS * norm(T) / SEP(i) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters */
+
+ /* Parameter adjustments */
+ --select;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --s;
+ --sep;
+ work_dim1 = *ldwork;
+ work_offset = 1 + work_dim1;
+ work -= work_offset;
+ --rwork;
+
+ /* Function Body */
+ wantbh = lsame_(job, "B");
+ wants = lsame_(job, "E") || wantbh;
+ wantsp = lsame_(job, "V") || wantbh;
+
+ somcon = lsame_(howmny, "S");
+
+/* Set M to the number of eigenpairs for which condition numbers are */
+/* to be computed. */
+
+ if (somcon) {
+ *m = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (select[j]) {
+ ++(*m);
+ }
+/* L10: */
+ }
+ } else {
+ *m = *n;
+ }
+
+ *info = 0;
+ if (! wants && ! wantsp) {
+ *info = -1;
+ } else if (! lsame_(howmny, "A") && ! somcon) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ldt < max(1,*n)) {
+ *info = -6;
+ } else if (*ldvl < 1 || wants && *ldvl < *n) {
+ *info = -8;
+ } else if (*ldvr < 1 || wants && *ldvr < *n) {
+ *info = -10;
+ } else if (*mm < *m) {
+ *info = -13;
+ } else if (*ldwork < 1 || wantsp && *ldwork < *n) {
+ *info = -16;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTRSNA", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (somcon) {
+ if (! select[1]) {
+ return 0;
+ }
+ }
+ if (wants) {
+ s[1] = 1.f;
+ }
+ if (wantsp) {
+ sep[1] = c_abs(&t[t_dim1 + 1]);
+ }
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S") / eps;
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+
+ ks = 1;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+
+ if (somcon) {
+ if (! select[k]) {
+ goto L50;
+ }
+ }
+
+ if (wants) {
+
+/* Compute the reciprocal condition number of the k-th */
+/* eigenvalue. */
+
+ cdotc_(&q__1, n, &vr[ks * vr_dim1 + 1], &c__1, &vl[ks * vl_dim1 +
+ 1], &c__1);
+ prod.r = q__1.r, prod.i = q__1.i;
+ rnrm = scnrm2_(n, &vr[ks * vr_dim1 + 1], &c__1);
+ lnrm = scnrm2_(n, &vl[ks * vl_dim1 + 1], &c__1);
+ s[ks] = c_abs(&prod) / (rnrm * lnrm);
+
+ }
+
+ if (wantsp) {
+
+/* Estimate the reciprocal condition number of the k-th */
+/* eigenvector. */
+
+/* Copy the matrix T to the array WORK and swap the k-th */
+/* diagonal element to the (1,1) position. */
+
+ clacpy_("Full", n, n, &t[t_offset], ldt, &work[work_offset],
+ ldwork);
+ ctrexc_("No Q", n, &work[work_offset], ldwork, dummy, &c__1, &k, &
+ c__1, &ierr);
+
+/* Form C = T22 - lambda*I in WORK(2:N,2:N). */
+
+ i__2 = *n;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + i__ * work_dim1;
+ i__4 = i__ + i__ * work_dim1;
+ i__5 = work_dim1 + 1;
+ q__1.r = work[i__4].r - work[i__5].r, q__1.i = work[i__4].i -
+ work[i__5].i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+/* L20: */
+ }
+
+/* Estimate a lower bound for the 1-norm of inv(C'). The 1st */
+/* and (N+1)th columns of WORK are used to store work vectors. */
+
+ sep[ks] = 0.f;
+ est = 0.f;
+ kase = 0;
+ *(unsigned char *)normin = 'N';
+L30:
+ i__2 = *n - 1;
+ clacn2_(&i__2, &work[(*n + 1) * work_dim1 + 1], &work[work_offset]
+, &est, &kase, isave);
+
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Solve C'*x = scale*b */
+
+ i__2 = *n - 1;
+ clatrs_("Upper", "Conjugate transpose", "Nonunit", normin,
+ &i__2, &work[(work_dim1 << 1) + 2], ldwork, &
+ work[work_offset], &scale, &rwork[1], &ierr);
+ } else {
+
+/* Solve C*x = scale*b */
+
+ i__2 = *n - 1;
+ clatrs_("Upper", "No transpose", "Nonunit", normin, &i__2,
+ &work[(work_dim1 << 1) + 2], ldwork, &work[
+ work_offset], &scale, &rwork[1], &ierr);
+ }
+ *(unsigned char *)normin = 'Y';
+ if (scale != 1.f) {
+
+/* Multiply by 1/SCALE if doing so will not cause */
+/* overflow. */
+
+ i__2 = *n - 1;
+ ix = icamax_(&i__2, &work[work_offset], &c__1);
+ i__2 = ix + work_dim1;
+ xnorm = (r__1 = work[i__2].r, dabs(r__1)) + (r__2 =
+ r_imag(&work[ix + work_dim1]), dabs(r__2));
+ if (scale < xnorm * smlnum || scale == 0.f) {
+ goto L40;
+ }
+ csrscl_(n, &scale, &work[work_offset], &c__1);
+ }
+ goto L30;
+ }
+
+ sep[ks] = 1.f / dmax(est,smlnum);
+ }
+
+L40:
+ ++ks;
+L50:
+ ;
+ }
+ return 0;
+
+/* End of CTRSNA */
+
+} /* ctrsna_ */
diff --git a/contrib/libs/clapack/ctrsyl.c b/contrib/libs/clapack/ctrsyl.c
new file mode 100644
index 0000000000..1d525ae933
--- /dev/null
+++ b/contrib/libs/clapack/ctrsyl.c
@@ -0,0 +1,544 @@
+/* ctrsyl.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ctrsyl_(char *trana, char *tranb, integer *isgn, integer
+ *m, integer *n, complex *a, integer *lda, complex *b, integer *ldb,
+ complex *c__, integer *ldc, real *scale, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4;
+ real r__1, r__2;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer j, k, l;
+ complex a11;
+ real db;
+ complex x11;
+ real da11;
+ complex vec;
+ real dum[1], eps, sgn, smin;
+ complex suml, sumr;
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Complex */ VOID cdotu_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern /* Subroutine */ int slabad_(real *, real *);
+ extern doublereal clange_(char *, integer *, integer *, complex *,
+ integer *, real *);
+ extern /* Complex */ VOID cladiv_(complex *, complex *, complex *);
+ real scaloc;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *), xerbla_(char *, integer *);
+ real bignum;
+ logical notrna, notrnb;
+ real smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTRSYL solves the complex Sylvester matrix equation: */
+
+/* op(A)*X + X*op(B) = scale*C or */
+/* op(A)*X - X*op(B) = scale*C, */
+
+/* where op(A) = A or A**H, and A and B are both upper triangular. A is */
+/* M-by-M and B is N-by-N; the right hand side C and the solution X are */
+/* M-by-N; and scale is an output scale factor, set <= 1 to avoid */
+/* overflow in X. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANA (input) CHARACTER*1 */
+/* Specifies the option op(A): */
+/* = 'N': op(A) = A (No transpose) */
+/* = 'C': op(A) = A**H (Conjugate transpose) */
+
+/* TRANB (input) CHARACTER*1 */
+/* Specifies the option op(B): */
+/* = 'N': op(B) = B (No transpose) */
+/* = 'C': op(B) = B**H (Conjugate transpose) */
+
+/* ISGN (input) INTEGER */
+/* Specifies the sign in the equation: */
+/* = +1: solve op(A)*X + X*op(B) = scale*C */
+/* = -1: solve op(A)*X - X*op(B) = scale*C */
+
+/* M (input) INTEGER */
+/* The order of the matrix A, and the number of rows in the */
+/* matrices X and C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The order of the matrix B, and the number of columns in the */
+/* matrices X and C. N >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,M) */
+/* The upper triangular matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input) COMPLEX array, dimension (LDB,N) */
+/* The upper triangular matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* C (input/output) COMPLEX array, dimension (LDC,N) */
+/* On entry, the M-by-N right hand side matrix C. */
+/* On exit, C is overwritten by the solution matrix X. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M) */
+
+/* SCALE (output) REAL */
+/* The scale factor, scale, set <= 1 to avoid overflow in X. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* = 1: A and B have common or very close eigenvalues; perturbed */
+/* values were used to solve the equation (but the matrices */
+/* A and B are unchanged). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and Test input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ notrna = lsame_(trana, "N");
+ notrnb = lsame_(tranb, "N");
+
+ *info = 0;
+ if (! notrna && ! lsame_(trana, "C")) {
+ *info = -1;
+ } else if (! notrnb && ! lsame_(tranb, "C")) {
+ *info = -2;
+ } else if (*isgn != 1 && *isgn != -1) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*m)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldc < max(1,*m)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTRSYL", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *scale = 1.f;
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Set constants to control overflow */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+ smlnum = smlnum * (real) (*m * *n) / eps;
+ bignum = 1.f / smlnum;
+/* Computing MAX */
+ r__1 = smlnum, r__2 = eps * clange_("M", m, m, &a[a_offset], lda, dum), r__1 = max(r__1,r__2), r__2 = eps * clange_("M", n, n,
+ &b[b_offset], ldb, dum);
+ smin = dmax(r__1,r__2);
+ sgn = (real) (*isgn);
+
+ if (notrna && notrnb) {
+
+/* Solve A*X + ISGN*X*B = scale*C. */
+
+/* The (K,L)th block of X is determined starting from */
+/* bottom-left corner column by column by */
+
+/* A(K,K)*X(K,L) + ISGN*X(K,L)*B(L,L) = C(K,L) - R(K,L) */
+
+/* Where */
+/* M L-1 */
+/* R(K,L) = SUM [A(K,I)*X(I,L)] +ISGN*SUM [X(K,J)*B(J,L)]. */
+/* I=K+1 J=1 */
+
+ i__1 = *n;
+ for (l = 1; l <= i__1; ++l) {
+ for (k = *m; k >= 1; --k) {
+
+ i__2 = *m - k;
+/* Computing MIN */
+ i__3 = k + 1;
+/* Computing MIN */
+ i__4 = k + 1;
+ cdotu_(&q__1, &i__2, &a[k + min(i__3, *m)* a_dim1], lda, &c__[
+ min(i__4, *m)+ l * c_dim1], &c__1);
+ suml.r = q__1.r, suml.i = q__1.i;
+ i__2 = l - 1;
+ cdotu_(&q__1, &i__2, &c__[k + c_dim1], ldc, &b[l * b_dim1 + 1]
+, &c__1);
+ sumr.r = q__1.r, sumr.i = q__1.i;
+ i__2 = k + l * c_dim1;
+ q__3.r = sgn * sumr.r, q__3.i = sgn * sumr.i;
+ q__2.r = suml.r + q__3.r, q__2.i = suml.i + q__3.i;
+ q__1.r = c__[i__2].r - q__2.r, q__1.i = c__[i__2].i - q__2.i;
+ vec.r = q__1.r, vec.i = q__1.i;
+
+ scaloc = 1.f;
+ i__2 = k + k * a_dim1;
+ i__3 = l + l * b_dim1;
+ q__2.r = sgn * b[i__3].r, q__2.i = sgn * b[i__3].i;
+ q__1.r = a[i__2].r + q__2.r, q__1.i = a[i__2].i + q__2.i;
+ a11.r = q__1.r, a11.i = q__1.i;
+ da11 = (r__1 = a11.r, dabs(r__1)) + (r__2 = r_imag(&a11),
+ dabs(r__2));
+ if (da11 <= smin) {
+ a11.r = smin, a11.i = 0.f;
+ da11 = smin;
+ *info = 1;
+ }
+ db = (r__1 = vec.r, dabs(r__1)) + (r__2 = r_imag(&vec), dabs(
+ r__2));
+ if (da11 < 1.f && db > 1.f) {
+ if (db > bignum * da11) {
+ scaloc = 1.f / db;
+ }
+ }
+ q__3.r = scaloc, q__3.i = 0.f;
+ q__2.r = vec.r * q__3.r - vec.i * q__3.i, q__2.i = vec.r *
+ q__3.i + vec.i * q__3.r;
+ cladiv_(&q__1, &q__2, &a11);
+ x11.r = q__1.r, x11.i = q__1.i;
+
+ if (scaloc != 1.f) {
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ csscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L10: */
+ }
+ *scale *= scaloc;
+ }
+ i__2 = k + l * c_dim1;
+ c__[i__2].r = x11.r, c__[i__2].i = x11.i;
+
+/* L20: */
+ }
+/* L30: */
+ }
+
+ } else if (! notrna && notrnb) {
+
+/* Solve A' *X + ISGN*X*B = scale*C. */
+
+/* The (K,L)th block of X is determined starting from */
+/* upper-left corner column by column by */
+
+/* A'(K,K)*X(K,L) + ISGN*X(K,L)*B(L,L) = C(K,L) - R(K,L) */
+
+/* Where */
+/* K-1 L-1 */
+/* R(K,L) = SUM [A'(I,K)*X(I,L)] + ISGN*SUM [X(K,J)*B(J,L)] */
+/* I=1 J=1 */
+
+ i__1 = *n;
+ for (l = 1; l <= i__1; ++l) {
+ i__2 = *m;
+ for (k = 1; k <= i__2; ++k) {
+
+ i__3 = k - 1;
+ cdotc_(&q__1, &i__3, &a[k * a_dim1 + 1], &c__1, &c__[l *
+ c_dim1 + 1], &c__1);
+ suml.r = q__1.r, suml.i = q__1.i;
+ i__3 = l - 1;
+ cdotu_(&q__1, &i__3, &c__[k + c_dim1], ldc, &b[l * b_dim1 + 1]
+, &c__1);
+ sumr.r = q__1.r, sumr.i = q__1.i;
+ i__3 = k + l * c_dim1;
+ q__3.r = sgn * sumr.r, q__3.i = sgn * sumr.i;
+ q__2.r = suml.r + q__3.r, q__2.i = suml.i + q__3.i;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ vec.r = q__1.r, vec.i = q__1.i;
+
+ scaloc = 1.f;
+ r_cnjg(&q__2, &a[k + k * a_dim1]);
+ i__3 = l + l * b_dim1;
+ q__3.r = sgn * b[i__3].r, q__3.i = sgn * b[i__3].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ a11.r = q__1.r, a11.i = q__1.i;
+ da11 = (r__1 = a11.r, dabs(r__1)) + (r__2 = r_imag(&a11),
+ dabs(r__2));
+ if (da11 <= smin) {
+ a11.r = smin, a11.i = 0.f;
+ da11 = smin;
+ *info = 1;
+ }
+ db = (r__1 = vec.r, dabs(r__1)) + (r__2 = r_imag(&vec), dabs(
+ r__2));
+ if (da11 < 1.f && db > 1.f) {
+ if (db > bignum * da11) {
+ scaloc = 1.f / db;
+ }
+ }
+
+ q__3.r = scaloc, q__3.i = 0.f;
+ q__2.r = vec.r * q__3.r - vec.i * q__3.i, q__2.i = vec.r *
+ q__3.i + vec.i * q__3.r;
+ cladiv_(&q__1, &q__2, &a11);
+ x11.r = q__1.r, x11.i = q__1.i;
+
+ if (scaloc != 1.f) {
+ i__3 = *n;
+ for (j = 1; j <= i__3; ++j) {
+ csscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L40: */
+ }
+ *scale *= scaloc;
+ }
+ i__3 = k + l * c_dim1;
+ c__[i__3].r = x11.r, c__[i__3].i = x11.i;
+
+/* L50: */
+ }
+/* L60: */
+ }
+
+ } else if (! notrna && ! notrnb) {
+
+/* Solve A'*X + ISGN*X*B' = C. */
+
+/* The (K,L)th block of X is determined starting from */
+/* upper-right corner column by column by */
+
+/* A'(K,K)*X(K,L) + ISGN*X(K,L)*B'(L,L) = C(K,L) - R(K,L) */
+
+/* Where */
+/* K-1 */
+/* R(K,L) = SUM [A'(I,K)*X(I,L)] + */
+/* I=1 */
+/* N */
+/* ISGN*SUM [X(K,J)*B'(L,J)]. */
+/* J=L+1 */
+
+ for (l = *n; l >= 1; --l) {
+ i__1 = *m;
+ for (k = 1; k <= i__1; ++k) {
+
+ i__2 = k - 1;
+ cdotc_(&q__1, &i__2, &a[k * a_dim1 + 1], &c__1, &c__[l *
+ c_dim1 + 1], &c__1);
+ suml.r = q__1.r, suml.i = q__1.i;
+ i__2 = *n - l;
+/* Computing MIN */
+ i__3 = l + 1;
+/* Computing MIN */
+ i__4 = l + 1;
+ cdotc_(&q__1, &i__2, &c__[k + min(i__3, *n)* c_dim1], ldc, &b[
+ l + min(i__4, *n)* b_dim1], ldb);
+ sumr.r = q__1.r, sumr.i = q__1.i;
+ i__2 = k + l * c_dim1;
+ r_cnjg(&q__4, &sumr);
+ q__3.r = sgn * q__4.r, q__3.i = sgn * q__4.i;
+ q__2.r = suml.r + q__3.r, q__2.i = suml.i + q__3.i;
+ q__1.r = c__[i__2].r - q__2.r, q__1.i = c__[i__2].i - q__2.i;
+ vec.r = q__1.r, vec.i = q__1.i;
+
+ scaloc = 1.f;
+ i__2 = k + k * a_dim1;
+ i__3 = l + l * b_dim1;
+ q__3.r = sgn * b[i__3].r, q__3.i = sgn * b[i__3].i;
+ q__2.r = a[i__2].r + q__3.r, q__2.i = a[i__2].i + q__3.i;
+ r_cnjg(&q__1, &q__2);
+ a11.r = q__1.r, a11.i = q__1.i;
+ da11 = (r__1 = a11.r, dabs(r__1)) + (r__2 = r_imag(&a11),
+ dabs(r__2));
+ if (da11 <= smin) {
+ a11.r = smin, a11.i = 0.f;
+ da11 = smin;
+ *info = 1;
+ }
+ db = (r__1 = vec.r, dabs(r__1)) + (r__2 = r_imag(&vec), dabs(
+ r__2));
+ if (da11 < 1.f && db > 1.f) {
+ if (db > bignum * da11) {
+ scaloc = 1.f / db;
+ }
+ }
+
+ q__3.r = scaloc, q__3.i = 0.f;
+ q__2.r = vec.r * q__3.r - vec.i * q__3.i, q__2.i = vec.r *
+ q__3.i + vec.i * q__3.r;
+ cladiv_(&q__1, &q__2, &a11);
+ x11.r = q__1.r, x11.i = q__1.i;
+
+ if (scaloc != 1.f) {
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ csscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L70: */
+ }
+ *scale *= scaloc;
+ }
+ i__2 = k + l * c_dim1;
+ c__[i__2].r = x11.r, c__[i__2].i = x11.i;
+
+/* L80: */
+ }
+/* L90: */
+ }
+
+ } else if (notrna && ! notrnb) {
+
+/* Solve A*X + ISGN*X*B' = C. */
+
+/* The (K,L)th block of X is determined starting from */
+/* bottom-left corner column by column by */
+
+/* A(K,K)*X(K,L) + ISGN*X(K,L)*B'(L,L) = C(K,L) - R(K,L) */
+
+/* Where */
+/* M N */
+/* R(K,L) = SUM [A(K,I)*X(I,L)] + ISGN*SUM [X(K,J)*B'(L,J)] */
+/* I=K+1 J=L+1 */
+
+ for (l = *n; l >= 1; --l) {
+ for (k = *m; k >= 1; --k) {
+
+ i__1 = *m - k;
+/* Computing MIN */
+ i__2 = k + 1;
+/* Computing MIN */
+ i__3 = k + 1;
+ cdotu_(&q__1, &i__1, &a[k + min(i__2, *m)* a_dim1], lda, &c__[
+ min(i__3, *m)+ l * c_dim1], &c__1);
+ suml.r = q__1.r, suml.i = q__1.i;
+ i__1 = *n - l;
+/* Computing MIN */
+ i__2 = l + 1;
+/* Computing MIN */
+ i__3 = l + 1;
+ cdotc_(&q__1, &i__1, &c__[k + min(i__2, *n)* c_dim1], ldc, &b[
+ l + min(i__3, *n)* b_dim1], ldb);
+ sumr.r = q__1.r, sumr.i = q__1.i;
+ i__1 = k + l * c_dim1;
+ r_cnjg(&q__4, &sumr);
+ q__3.r = sgn * q__4.r, q__3.i = sgn * q__4.i;
+ q__2.r = suml.r + q__3.r, q__2.i = suml.i + q__3.i;
+ q__1.r = c__[i__1].r - q__2.r, q__1.i = c__[i__1].i - q__2.i;
+ vec.r = q__1.r, vec.i = q__1.i;
+
+ scaloc = 1.f;
+ i__1 = k + k * a_dim1;
+ r_cnjg(&q__3, &b[l + l * b_dim1]);
+ q__2.r = sgn * q__3.r, q__2.i = sgn * q__3.i;
+ q__1.r = a[i__1].r + q__2.r, q__1.i = a[i__1].i + q__2.i;
+ a11.r = q__1.r, a11.i = q__1.i;
+ da11 = (r__1 = a11.r, dabs(r__1)) + (r__2 = r_imag(&a11),
+ dabs(r__2));
+ if (da11 <= smin) {
+ a11.r = smin, a11.i = 0.f;
+ da11 = smin;
+ *info = 1;
+ }
+ db = (r__1 = vec.r, dabs(r__1)) + (r__2 = r_imag(&vec), dabs(
+ r__2));
+ if (da11 < 1.f && db > 1.f) {
+ if (db > bignum * da11) {
+ scaloc = 1.f / db;
+ }
+ }
+
+ q__3.r = scaloc, q__3.i = 0.f;
+ q__2.r = vec.r * q__3.r - vec.i * q__3.i, q__2.i = vec.r *
+ q__3.i + vec.i * q__3.r;
+ cladiv_(&q__1, &q__2, &a11);
+ x11.r = q__1.r, x11.i = q__1.i;
+
+ if (scaloc != 1.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ csscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L100: */
+ }
+ *scale *= scaloc;
+ }
+ i__1 = k + l * c_dim1;
+ c__[i__1].r = x11.r, c__[i__1].i = x11.i;
+
+/* L110: */
+ }
+/* L120: */
+ }
+
+ }
+
+ return 0;
+
+/* End of CTRSYL */
+
+} /* ctrsyl_ */
diff --git a/contrib/libs/clapack/ctrti2.c b/contrib/libs/clapack/ctrti2.c
new file mode 100644
index 0000000000..36dee65ccc
--- /dev/null
+++ b/contrib/libs/clapack/ctrti2.c
@@ -0,0 +1,198 @@
+/* ctrti2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int ctrti2_(char *uplo, char *diag, integer *n, complex *a,
+ integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ complex q__1;
+
+ /* Builtin functions */
+ void c_div(complex *, complex *, complex *);
+
+ /* Local variables */
+ integer j;
+ complex ajj;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int ctrmv_(char *, char *, char *, integer *,
+ complex *, integer *, complex *, integer *), xerbla_(char *, integer *);
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTRTI2 computes the inverse of a complex upper or lower triangular */
+/* matrix. */
+
+/* This is the Level 2 BLAS version of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower triangular. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A is unit triangular. */
+/* = 'N': Non-unit triangular */
+/* = 'U': Unit triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the triangular matrix A. If UPLO = 'U', the */
+/* leading n by n upper triangular part of the array A contains */
+/* the upper triangular matrix, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of the array A contains */
+/* the lower triangular matrix, and the strictly upper */
+/* triangular part of A is not referenced. If DIAG = 'U', the */
+/* diagonal elements of A are also not referenced and are */
+/* assumed to be 1. */
+
+/* On exit, the (triangular) inverse of the original matrix, in */
+/* the same storage format. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ nounit = lsame_(diag, "N");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTRTI2", &i__1);
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute inverse of upper triangular matrix. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (nounit) {
+ i__2 = j + j * a_dim1;
+ c_div(&q__1, &c_b1, &a[j + j * a_dim1]);
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+ i__2 = j + j * a_dim1;
+ q__1.r = -a[i__2].r, q__1.i = -a[i__2].i;
+ ajj.r = q__1.r, ajj.i = q__1.i;
+ } else {
+ q__1.r = -1.f, q__1.i = -0.f;
+ ajj.r = q__1.r, ajj.i = q__1.i;
+ }
+
+/* Compute elements 1:j-1 of j-th column. */
+
+ i__2 = j - 1;
+ ctrmv_("Upper", "No transpose", diag, &i__2, &a[a_offset], lda, &
+ a[j * a_dim1 + 1], &c__1);
+ i__2 = j - 1;
+ cscal_(&i__2, &ajj, &a[j * a_dim1 + 1], &c__1);
+/* L10: */
+ }
+ } else {
+
+/* Compute inverse of lower triangular matrix. */
+
+ for (j = *n; j >= 1; --j) {
+ if (nounit) {
+ i__1 = j + j * a_dim1;
+ c_div(&q__1, &c_b1, &a[j + j * a_dim1]);
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ i__1 = j + j * a_dim1;
+ q__1.r = -a[i__1].r, q__1.i = -a[i__1].i;
+ ajj.r = q__1.r, ajj.i = q__1.i;
+ } else {
+ q__1.r = -1.f, q__1.i = -0.f;
+ ajj.r = q__1.r, ajj.i = q__1.i;
+ }
+ if (j < *n) {
+
+/* Compute elements j+1:n of j-th column. */
+
+ i__1 = *n - j;
+ ctrmv_("Lower", "No transpose", diag, &i__1, &a[j + 1 + (j +
+ 1) * a_dim1], lda, &a[j + 1 + j * a_dim1], &c__1);
+ i__1 = *n - j;
+ cscal_(&i__1, &ajj, &a[j + 1 + j * a_dim1], &c__1);
+ }
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of CTRTI2 */
+
+} /* ctrti2_ */
diff --git a/contrib/libs/clapack/ctrtri.c b/contrib/libs/clapack/ctrtri.c
new file mode 100644
index 0000000000..4647bbf1cd
--- /dev/null
+++ b/contrib/libs/clapack/ctrtri.c
@@ -0,0 +1,244 @@
+/* ctrtri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+
+/* Subroutine */ int ctrtri_(char *uplo, char *diag, integer *n, complex *a,
+ integer *lda, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, i__1, i__2, i__3[2], i__4, i__5;
+ complex q__1;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer j, jb, nb, nn;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ctrmm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *), ctrsm_(char *, char *,
+ char *, char *, integer *, integer *, complex *, complex *,
+ integer *, complex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int ctrti2_(char *, char *, integer *, complex *,
+ integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTRTRI computes the inverse of a complex upper or lower triangular */
+/* matrix A. */
+
+/* This is the Level 3 BLAS version of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the triangular matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of the array A contains */
+/* the upper triangular matrix, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of the array A contains */
+/* the lower triangular matrix, and the strictly upper */
+/* triangular part of A is not referenced. If DIAG = 'U', the */
+/* diagonal elements of A are also not referenced and are */
+/* assumed to be 1. */
+/* On exit, the (triangular) inverse of the original matrix, in */
+/* the same storage format. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, A(i,i) is exactly zero. The triangular */
+/* matrix is singular and its inverse can not be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ nounit = lsame_(diag, "N");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTRTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check for singularity if non-unit. */
+
+ if (nounit) {
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ i__2 = *info + *info * a_dim1;
+ if (a[i__2].r == 0.f && a[i__2].i == 0.f) {
+ return 0;
+ }
+/* L10: */
+ }
+ *info = 0;
+ }
+
+/* Determine the block size for this environment. */
+
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = uplo;
+ i__3[1] = 1, a__1[1] = diag;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ nb = ilaenv_(&c__1, "CTRTRI", ch__1, n, &c_n1, &c_n1, &c_n1);
+ if (nb <= 1 || nb >= *n) {
+
+/* Use unblocked code */
+
+ ctrti2_(uplo, diag, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code */
+
+ if (upper) {
+
+/* Compute inverse of upper triangular matrix */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *n - j + 1;
+ jb = min(i__4,i__5);
+
+/* Compute rows 1:j-1 of current block column */
+
+ i__4 = j - 1;
+ ctrmm_("Left", "Upper", "No transpose", diag, &i__4, &jb, &
+ c_b1, &a[a_offset], lda, &a[j * a_dim1 + 1], lda);
+ i__4 = j - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ ctrsm_("Right", "Upper", "No transpose", diag, &i__4, &jb, &
+ q__1, &a[j + j * a_dim1], lda, &a[j * a_dim1 + 1],
+ lda);
+
+/* Compute inverse of current diagonal block */
+
+ ctrti2_("Upper", diag, &jb, &a[j + j * a_dim1], lda, info);
+/* L20: */
+ }
+ } else {
+
+/* Compute inverse of lower triangular matrix */
+
+ nn = (*n - 1) / nb * nb + 1;
+ i__2 = -nb;
+ for (j = nn; i__2 < 0 ? j >= 1 : j <= 1; j += i__2) {
+/* Computing MIN */
+ i__1 = nb, i__4 = *n - j + 1;
+ jb = min(i__1,i__4);
+ if (j + jb <= *n) {
+
+/* Compute rows j+jb:n of current block column */
+
+ i__1 = *n - j - jb + 1;
+ ctrmm_("Left", "Lower", "No transpose", diag, &i__1, &jb,
+ &c_b1, &a[j + jb + (j + jb) * a_dim1], lda, &a[j
+ + jb + j * a_dim1], lda);
+ i__1 = *n - j - jb + 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ ctrsm_("Right", "Lower", "No transpose", diag, &i__1, &jb,
+ &q__1, &a[j + j * a_dim1], lda, &a[j + jb + j *
+ a_dim1], lda);
+ }
+
+/* Compute inverse of current diagonal block */
+
+ ctrti2_("Lower", diag, &jb, &a[j + j * a_dim1], lda, info);
+/* L30: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CTRTRI */
+
+} /* ctrtri_ */
diff --git a/contrib/libs/clapack/ctrtrs.c b/contrib/libs/clapack/ctrtrs.c
new file mode 100644
index 0000000000..71692d1383
--- /dev/null
+++ b/contrib/libs/clapack/ctrtrs.c
@@ -0,0 +1,184 @@
+/* ctrtrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b2 = {1.f,0.f};
+
+/* Subroutine */ int ctrtrs_(char *uplo, char *trans, char *diag, integer *n,
+ integer *nrhs, complex *a, integer *lda, complex *b, integer *ldb,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ctrsm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *), xerbla_(char *,
+ integer *);
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTRTRS solves a triangular system of the form */
+
+/* A * X = B, A**T * X = B, or A**H * X = B, */
+
+/* where A is a triangular matrix of order N, and B is an N-by-NRHS */
+/* matrix. A check is made to verify that A is nonsingular. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The triangular matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of the array A contains the upper */
+/* triangular matrix, and the strictly lower triangular part of */
+/* A is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of the array A contains the lower triangular */
+/* matrix, and the strictly upper triangular part of A is not */
+/* referenced. If DIAG = 'U', the diagonal elements of A are */
+/* also not referenced and are assumed to be 1. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, if INFO = 0, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element of A is zero, */
+/* indicating that the matrix is singular and the solutions */
+/* X have not been computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ nounit = lsame_(diag, "N");
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTRTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check for singularity. */
+
+ if (nounit) {
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ i__2 = *info + *info * a_dim1;
+ if (a[i__2].r == 0.f && a[i__2].i == 0.f) {
+ return 0;
+ }
+/* L10: */
+ }
+ }
+ *info = 0;
+
+/* Solve A * x = b, A**T * x = b, or A**H * x = b. */
+
+ ctrsm_("Left", uplo, trans, diag, n, nrhs, &c_b2, &a[a_offset], lda, &b[
+ b_offset], ldb);
+
+ return 0;
+
+/* End of CTRTRS */
+
+} /* ctrtrs_ */
diff --git a/contrib/libs/clapack/ctrttf.c b/contrib/libs/clapack/ctrttf.c
new file mode 100644
index 0000000000..406aaed03f
--- /dev/null
+++ b/contrib/libs/clapack/ctrttf.c
@@ -0,0 +1,580 @@
+/* ctrttf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ctrttf_(char *transr, char *uplo, integer *n, complex *a,
+ integer *lda, complex *arf, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, k, l, n1, n2, ij, nt, nx2, np1x2;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nisodd;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTRTTF copies a triangular matrix A from standard full format (TR) */
+/* to rectangular full packed format (TF) . */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': ARF in Normal mode is wanted; */
+/* = 'C': ARF in Conjugate Transpose mode is wanted; */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX array, dimension ( LDA, N ) */
+/* On entry, the triangular matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of the array A contains */
+/* the upper triangular matrix, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of the array A contains */
+/* the lower triangular matrix, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the matrix A. LDA >= max(1,N). */
+
+/* ARF (output) COMPLEX*16 array, dimension ( N*(N+1)/2 ), */
+/* On exit, the upper or lower triangular matrix A stored in */
+/* RFP format. For a further discussion see Notes below. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Notes */
+/* ===== */
+
+/* We first consider Standard Packed Format when N is even. */
+/* We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = `N'. RFP holds AP as follows: */
+/* For UPLO = `U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* conjugate-transpose of the first three columns of AP upper. */
+/* For UPLO = `L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* conjugate-transpose of the last three columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N even and TRANSR = `N'. */
+
+/* RFP A RFP A */
+
+/* -- -- -- */
+/* 03 04 05 33 43 53 */
+/* -- -- */
+/* 13 14 15 00 44 54 */
+/* -- */
+/* 23 24 25 10 11 55 */
+
+/* 33 34 35 20 21 22 */
+/* -- */
+/* 00 44 45 30 31 32 */
+/* -- -- */
+/* 01 11 55 40 41 42 */
+/* -- -- -- */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = `C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We next consider Standard Packed Format when N is odd. */
+/* We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = `N'. RFP holds AP as follows: */
+/* For UPLO = `U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* conjugate-transpose of the first two columns of AP upper. */
+/* For UPLO = `L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* conjugate-transpose of the last two columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N odd and TRANSR = `N'. */
+
+/* RFP A RFP A */
+
+/* -- -- */
+/* 02 03 04 00 33 43 */
+/* -- */
+/* 12 13 14 10 11 44 */
+
+/* 22 23 24 20 21 22 */
+/* -- */
+/* 00 33 34 30 31 32 */
+/* -- -- */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = `C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- */
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda - 1 - 0 + 1;
+ a_offset = 0 + a_dim1 * 0;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "C")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTRTTF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 1) {
+ if (*n == 1) {
+ if (normaltransr) {
+ arf[0].r = a[0].r, arf[0].i = a[0].i;
+ } else {
+ r_cnjg(&q__1, a);
+ arf[0].r = q__1.r, arf[0].i = q__1.i;
+ }
+ }
+ return 0;
+ }
+
+/* Size of array ARF(1:2,0:nt-1) */
+
+ nt = *n * (*n + 1) / 2;
+
+/* set N1 and N2 depending on LOWER: for N even N1=N2=K */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+/* If N is odd, set NISODD = .TRUE., LDA=N+1 and A is (N+1)--by--K2. */
+/* If N is even, set K = N/2 and NISODD = .FALSE., LDA=N and A is */
+/* N--by--(N+1)/2. */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ if (! lower) {
+ np1x2 = *n + *n + 2;
+ }
+ } else {
+ nisodd = TRUE_;
+ if (! lower) {
+ nx2 = *n + *n;
+ }
+ }
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL and N is odd ( a(0:n-1,0:n1-1) ) */
+/* T1 -> a(0,0), T2 -> a(0,1), S -> a(n1,0) */
+/* T1 -> a(0), T2 -> a(n), S -> a(n1); lda=n */
+
+ ij = 0;
+ i__1 = n2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = n2 + j;
+ for (i__ = n1; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ r_cnjg(&q__1, &a[n2 + j + i__ * a_dim1]);
+ arf[i__3].r = q__1.r, arf[i__3].i = q__1.i;
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ i__4 = i__ + j * a_dim1;
+ arf[i__3].r = a[i__4].r, arf[i__3].i = a[i__4].i;
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL and N is odd ( a(0:n-1,0:n2-1) */
+/* T1 -> a(n1+1,0), T2 -> a(n1,0), S -> a(0,0) */
+/* T1 -> a(n2), T2 -> a(n1), S -> a(0); lda=n */
+
+ ij = nt - *n;
+ i__1 = n1;
+ for (j = *n - 1; j >= i__1; --j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ i__4 = i__ + j * a_dim1;
+ arf[i__3].r = a[i__4].r, arf[i__3].i = a[i__4].i;
+ ++ij;
+ }
+ i__2 = n1 - 1;
+ for (l = j - n1; l <= i__2; ++l) {
+ i__3 = ij;
+ r_cnjg(&q__1, &a[j - n1 + l * a_dim1]);
+ arf[i__3].r = q__1.r, arf[i__3].i = q__1.i;
+ ++ij;
+ }
+ ij -= nx2;
+ }
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is odd */
+/* T1 -> A(0,0) , T2 -> A(1,0) , S -> A(0,n1) */
+/* T1 -> A(0+0) , T2 -> A(1+0) , S -> A(0+n1*n1); lda=n1 */
+
+ ij = 0;
+ i__1 = n2 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ r_cnjg(&q__1, &a[j + i__ * a_dim1]);
+ arf[i__3].r = q__1.r, arf[i__3].i = q__1.i;
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = n1 + j; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ i__4 = i__ + (n1 + j) * a_dim1;
+ arf[i__3].r = a[i__4].r, arf[i__3].i = a[i__4].i;
+ ++ij;
+ }
+ }
+ i__1 = *n - 1;
+ for (j = n2; j <= i__1; ++j) {
+ i__2 = n1 - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ r_cnjg(&q__1, &a[j + i__ * a_dim1]);
+ arf[i__3].r = q__1.r, arf[i__3].i = q__1.i;
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is odd */
+/* T1 -> A(0,n1+1), T2 -> A(0,n1), S -> A(0,0) */
+/* T1 -> A(n2*n2), T2 -> A(n1*n2), S -> A(0); lda=n2 */
+
+ ij = 0;
+ i__1 = n1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = n1; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ r_cnjg(&q__1, &a[j + i__ * a_dim1]);
+ arf[i__3].r = q__1.r, arf[i__3].i = q__1.i;
+ ++ij;
+ }
+ }
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ i__4 = i__ + j * a_dim1;
+ arf[i__3].r = a[i__4].r, arf[i__3].i = a[i__4].i;
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (l = n2 + j; l <= i__2; ++l) {
+ i__3 = ij;
+ r_cnjg(&q__1, &a[n2 + j + l * a_dim1]);
+ arf[i__3].r = q__1.r, arf[i__3].i = q__1.i;
+ ++ij;
+ }
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(1,0), T2 -> a(0,0), S -> a(k+1,0) */
+/* T1 -> a(1), T2 -> a(0), S -> a(k+1); lda=n+1 */
+
+ ij = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k + j;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ r_cnjg(&q__1, &a[k + j + i__ * a_dim1]);
+ arf[i__3].r = q__1.r, arf[i__3].i = q__1.i;
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ i__4 = i__ + j * a_dim1;
+ arf[i__3].r = a[i__4].r, arf[i__3].i = a[i__4].i;
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(k+1,0) , T2 -> a(k,0), S -> a(0,0) */
+/* T1 -> a(k+1), T2 -> a(k), S -> a(0); lda=n+1 */
+
+ ij = nt - *n - 1;
+ i__1 = k;
+ for (j = *n - 1; j >= i__1; --j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ i__4 = i__ + j * a_dim1;
+ arf[i__3].r = a[i__4].r, arf[i__3].i = a[i__4].i;
+ ++ij;
+ }
+ i__2 = k - 1;
+ for (l = j - k; l <= i__2; ++l) {
+ i__3 = ij;
+ r_cnjg(&q__1, &a[j - k + l * a_dim1]);
+ arf[i__3].r = q__1.r, arf[i__3].i = q__1.i;
+ ++ij;
+ }
+ ij -= np1x2;
+ }
+
+ }
+
+ } else {
+
+/* N is even and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is even (see paper, A=B) */
+/* T1 -> A(0,1) , T2 -> A(0,0) , S -> A(0,k+1) : */
+/* T1 -> A(0+k) , T2 -> A(0+0) , S -> A(0+k*(k+1)); lda=k */
+
+ ij = 0;
+ j = k;
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ i__2 = ij;
+ i__3 = i__ + j * a_dim1;
+ arf[i__2].r = a[i__3].r, arf[i__2].i = a[i__3].i;
+ ++ij;
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ r_cnjg(&q__1, &a[j + i__ * a_dim1]);
+ arf[i__3].r = q__1.r, arf[i__3].i = q__1.i;
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = k + 1 + j; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ i__4 = i__ + (k + 1 + j) * a_dim1;
+ arf[i__3].r = a[i__4].r, arf[i__3].i = a[i__4].i;
+ ++ij;
+ }
+ }
+ i__1 = *n - 1;
+ for (j = k - 1; j <= i__1; ++j) {
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ r_cnjg(&q__1, &a[j + i__ * a_dim1]);
+ arf[i__3].r = q__1.r, arf[i__3].i = q__1.i;
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is even (see paper, A=B) */
+/* T1 -> A(0,k+1) , T2 -> A(0,k) , S -> A(0,0) */
+/* T1 -> A(0+k*(k+1)) , T2 -> A(0+k*k) , S -> A(0+0)); lda=k */
+
+ ij = 0;
+ i__1 = k;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ r_cnjg(&q__1, &a[j + i__ * a_dim1]);
+ arf[i__3].r = q__1.r, arf[i__3].i = q__1.i;
+ ++ij;
+ }
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ i__4 = i__ + j * a_dim1;
+ arf[i__3].r = a[i__4].r, arf[i__3].i = a[i__4].i;
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (l = k + 1 + j; l <= i__2; ++l) {
+ i__3 = ij;
+ r_cnjg(&q__1, &a[k + 1 + j + l * a_dim1]);
+ arf[i__3].r = q__1.r, arf[i__3].i = q__1.i;
+ ++ij;
+ }
+ }
+
+/* Note that here J = K-1 */
+
+ i__1 = j;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = ij;
+ i__3 = i__ + j * a_dim1;
+ arf[i__2].r = a[i__3].r, arf[i__2].i = a[i__3].i;
+ ++ij;
+ }
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of CTRTTF */
+
+} /* ctrttf_ */
diff --git a/contrib/libs/clapack/ctrttp.c b/contrib/libs/clapack/ctrttp.c
new file mode 100644
index 0000000000..bdf7acbd70
--- /dev/null
+++ b/contrib/libs/clapack/ctrttp.c
@@ -0,0 +1,148 @@
+/* ctrttp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ctrttp_(char *uplo, integer *n, complex *a, integer *lda,
+ complex *ap, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, k;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- and Julien Langou of the Univ. of Colorado Denver -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTRTTP copies a triangular matrix A from full format (TR) to standard */
+/* packed format (TP). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrices AP and A. N >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* On entry, the triangular matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AP (output) COMPLEX array, dimension ( N*(N+1)/2 ), */
+/* On exit, the upper or lower triangular matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ lower = lsame_(uplo, "L");
+ if (! lower && ! lsame_(uplo, "U")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTRTTP", &i__1);
+ return 0;
+ }
+
+ if (lower) {
+ k = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ++k;
+ i__3 = k;
+ i__4 = i__ + j * a_dim1;
+ ap[i__3].r = a[i__4].r, ap[i__3].i = a[i__4].i;
+ }
+ }
+ } else {
+ k = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ ++k;
+ i__3 = k;
+ i__4 = i__ + j * a_dim1;
+ ap[i__3].r = a[i__4].r, ap[i__3].i = a[i__4].i;
+ }
+ }
+ }
+
+
+ return 0;
+
+/* End of CTRTTP */
+
+} /* ctrttp_ */
diff --git a/contrib/libs/clapack/ctzrqf.c b/contrib/libs/clapack/ctzrqf.c
new file mode 100644
index 0000000000..80987b925a
--- /dev/null
+++ b/contrib/libs/clapack/ctzrqf.c
@@ -0,0 +1,241 @@
+/* ctzrqf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static complex c_b1 = {1.f,0.f};
+static integer c__1 = 1;
+
+/* Subroutine */ int ctzrqf_(integer *m, integer *n, complex *a, integer *lda,
+ complex *tau, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, k, m1;
+ extern /* Subroutine */ int cgerc_(integer *, integer *, complex *,
+ complex *, integer *, complex *, integer *, complex *, integer *);
+ complex alpha;
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+, complex *, integer *, complex *, integer *, complex *, complex *
+, integer *), ccopy_(integer *, complex *, integer *,
+ complex *, integer *), caxpy_(integer *, complex *, complex *,
+ integer *, complex *, integer *), clacgv_(integer *, complex *,
+ integer *), clarfp_(integer *, complex *, complex *, integer *,
+ complex *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is deprecated and has been replaced by routine CTZRZF. */
+
+/* CTZRQF reduces the M-by-N ( M<=N ) complex upper trapezoidal matrix A */
+/* to upper triangular form by means of unitary transformations. */
+
+/* The upper trapezoidal matrix A is factored as */
+
+/* A = ( R 0 ) * Z, */
+
+/* where Z is an N-by-N unitary matrix and R is an M-by-M upper */
+/* triangular matrix. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= M. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the leading M-by-N upper trapezoidal part of the */
+/* array A must contain the matrix to be factorized. */
+/* On exit, the leading M-by-M upper triangular part of A */
+/* contains the upper triangular matrix R, and elements M+1 to */
+/* N of the first M rows of A, with the array TAU, represent the */
+/* unitary matrix Z as a product of M elementary reflectors. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) COMPLEX array, dimension (M) */
+/* The scalar factors of the elementary reflectors. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The factorization is obtained by Householder's method. The kth */
+/* transformation matrix, Z( k ), whose conjugate transpose is used to */
+/* introduce zeros into the (m - k + 1)th row of A, is given in the form */
+
+/* Z( k ) = ( I 0 ), */
+/* ( 0 T( k ) ) */
+
+/* where */
+
+/* T( k ) = I - tau*u( k )*u( k )', u( k ) = ( 1 ), */
+/* ( 0 ) */
+/* ( z( k ) ) */
+
+/* tau is a scalar and z( k ) is an ( n - m ) element vector. */
+/* tau and z( k ) are chosen to annihilate the elements of the kth row */
+/* of X. */
+
+/* The scalar tau is returned in the kth element of TAU and the vector */
+/* u( k ) in the kth row of A, such that the elements of z( k ) are */
+/* in a( k, m + 1 ), ..., a( k, n ). The elements of R are returned in */
+/* the upper triangular part of A. */
+
+/* Z is given by */
+
+/* Z = Z( 1 ) * Z( 2 ) * ... * Z( m ). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTZRQF", &i__1);
+ return 0;
+ }
+
+/* Perform the factorization. */
+
+ if (*m == 0) {
+ return 0;
+ }
+ if (*m == *n) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ tau[i__2].r = 0.f, tau[i__2].i = 0.f;
+/* L10: */
+ }
+ } else {
+/* Computing MIN */
+ i__1 = *m + 1;
+ m1 = min(i__1,*n);
+ for (k = *m; k >= 1; --k) {
+
+/* Use a Householder reflection to zero the kth row of A. */
+/* First set up the reflection. */
+
+ i__1 = k + k * a_dim1;
+ r_cnjg(&q__1, &a[k + k * a_dim1]);
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ i__1 = *n - *m;
+ clacgv_(&i__1, &a[k + m1 * a_dim1], lda);
+ i__1 = k + k * a_dim1;
+ alpha.r = a[i__1].r, alpha.i = a[i__1].i;
+ i__1 = *n - *m + 1;
+ clarfp_(&i__1, &alpha, &a[k + m1 * a_dim1], lda, &tau[k]);
+ i__1 = k + k * a_dim1;
+ a[i__1].r = alpha.r, a[i__1].i = alpha.i;
+ i__1 = k;
+ r_cnjg(&q__1, &tau[k]);
+ tau[i__1].r = q__1.r, tau[i__1].i = q__1.i;
+
+ i__1 = k;
+ if ((tau[i__1].r != 0.f || tau[i__1].i != 0.f) && k > 1) {
+
+/* We now perform the operation A := A*P( k )'. */
+
+/* Use the first ( k - 1 ) elements of TAU to store a( k ), */
+/* where a( k ) consists of the first ( k - 1 ) elements of */
+/* the kth column of A. Also let B denote the first */
+/* ( k - 1 ) rows of the last ( n - m ) columns of A. */
+
+ i__1 = k - 1;
+ ccopy_(&i__1, &a[k * a_dim1 + 1], &c__1, &tau[1], &c__1);
+
+/* Form w = a( k ) + B*z( k ) in TAU. */
+
+ i__1 = k - 1;
+ i__2 = *n - *m;
+ cgemv_("No transpose", &i__1, &i__2, &c_b1, &a[m1 * a_dim1 +
+ 1], lda, &a[k + m1 * a_dim1], lda, &c_b1, &tau[1], &
+ c__1);
+
+/* Now form a( k ) := a( k ) - conjg(tau)*w */
+/* and B := B - conjg(tau)*w*z( k )'. */
+
+ i__1 = k - 1;
+ r_cnjg(&q__2, &tau[k]);
+ q__1.r = -q__2.r, q__1.i = -q__2.i;
+ caxpy_(&i__1, &q__1, &tau[1], &c__1, &a[k * a_dim1 + 1], &
+ c__1);
+ i__1 = k - 1;
+ i__2 = *n - *m;
+ r_cnjg(&q__2, &tau[k]);
+ q__1.r = -q__2.r, q__1.i = -q__2.i;
+ cgerc_(&i__1, &i__2, &q__1, &tau[1], &c__1, &a[k + m1 *
+ a_dim1], lda, &a[m1 * a_dim1 + 1], lda);
+ }
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of CTZRQF */
+
+} /* ctzrqf_ */
diff --git a/contrib/libs/clapack/ctzrzf.c b/contrib/libs/clapack/ctzrzf.c
new file mode 100644
index 0000000000..759ccba1a7
--- /dev/null
+++ b/contrib/libs/clapack/ctzrzf.c
@@ -0,0 +1,310 @@
+/* ctzrzf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int ctzrzf_(integer *m, integer *n, complex *a, integer *lda,
+ complex *tau, complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+
+ /* Local variables */
+ integer i__, m1, ib, nb, ki, kk, mu, nx, iws, nbmin;
+ extern /* Subroutine */ int clarzb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, integer *, complex *, integer *,
+ complex *, integer *, complex *, integer *, complex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int clarzt_(char *, char *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *), clatrz_(integer *, integer *, integer *, complex *,
+ integer *, complex *, complex *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CTZRZF reduces the M-by-N ( M<=N ) complex upper trapezoidal matrix A */
+/* to upper triangular form by means of unitary transformations. */
+
+/* The upper trapezoidal matrix A is factored as */
+
+/* A = ( R 0 ) * Z, */
+
+/* where Z is an N-by-N unitary matrix and R is an M-by-M upper */
+/* triangular matrix. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= M. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the leading M-by-N upper trapezoidal part of the */
+/* array A must contain the matrix to be factorized. */
+/* On exit, the leading M-by-M upper triangular part of A */
+/* contains the upper triangular matrix R, and elements M+1 to */
+/* N of the first M rows of A, with the array TAU, represent the */
+/* unitary matrix Z as a product of M elementary reflectors. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) COMPLEX array, dimension (M) */
+/* The scalar factors of the elementary reflectors. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,M). */
+/* For optimum performance LWORK >= M*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* The factorization is obtained by Householder's method. The kth */
+/* transformation matrix, Z( k ), which is used to introduce zeros into */
+/* the ( m - k + 1 )th row of A, is given in the form */
+
+/* Z( k ) = ( I 0 ), */
+/* ( 0 T( k ) ) */
+
+/* where */
+
+/* T( k ) = I - tau*u( k )*u( k )', u( k ) = ( 1 ), */
+/* ( 0 ) */
+/* ( z( k ) ) */
+
+/* tau is a scalar and z( k ) is an ( n - m ) element vector. */
+/* tau and z( k ) are chosen to annihilate the elements of the kth row */
+/* of X. */
+
+/* The scalar tau is returned in the kth element of TAU and the vector */
+/* u( k ) in the kth row of A, such that the elements of z( k ) are */
+/* in a( k, m + 1 ), ..., a( k, n ). The elements of R are returned in */
+/* the upper triangular part of A. */
+
+/* Z is given by */
+
+/* Z = Z( 1 ) * Z( 2 ) * ... * Z( m ). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else if (*lwork < max(1,*m) && ! lquery) {
+ *info = -7;
+ }
+
+ if (*info == 0) {
+ if (*m == 0 || *m == *n) {
+ lwkopt = 1;
+ } else {
+
+/* Determine the block size. */
+
+ nb = ilaenv_(&c__1, "CGERQF", " ", m, n, &c_n1, &c_n1);
+ lwkopt = *m * nb;
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ if (*lwork < max(1,*m) && ! lquery) {
+ *info = -7;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTZRZF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0) {
+ return 0;
+ } else if (*m == *n) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ tau[i__2].r = 0.f, tau[i__2].i = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 1;
+ iws = *m;
+ if (nb > 1 && nb < *m) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "CGERQF", " ", m, n, &c_n1, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < *m) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CGERQF", " ", m, n, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *m && nx < *m) {
+
+/* Use blocked code initially. */
+/* The last kk rows are handled by the block method. */
+
+/* Computing MIN */
+ i__1 = *m + 1;
+ m1 = min(i__1,*n);
+ ki = (*m - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = *m, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+ i__1 = *m - kk + 1;
+ i__2 = -nb;
+ for (i__ = *m - kk + ki + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1;
+ i__ += i__2) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ ib = min(i__3,nb);
+
+/* Compute the TZ factorization of the current block */
+/* A(i:i+ib-1,i:n) */
+
+ i__3 = *n - i__ + 1;
+ i__4 = *n - *m;
+ clatrz_(&ib, &i__3, &i__4, &a[i__ + i__ * a_dim1], lda, &tau[i__],
+ &work[1]);
+ if (i__ > 1) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__3 = *n - *m;
+ clarzt_("Backward", "Rowwise", &i__3, &ib, &a[i__ + m1 *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(1:i-1,i:n) from the right */
+
+ i__3 = i__ - 1;
+ i__4 = *n - i__ + 1;
+ i__5 = *n - *m;
+ clarzb_("Right", "No transpose", "Backward", "Rowwise", &i__3,
+ &i__4, &ib, &i__5, &a[i__ + m1 * a_dim1], lda, &work[
+ 1], &ldwork, &a[i__ * a_dim1 + 1], lda, &work[ib + 1],
+ &ldwork)
+ ;
+ }
+/* L20: */
+ }
+ mu = i__ + nb - 1;
+ } else {
+ mu = *m;
+ }
+
+/* Use unblocked code to factor the last or only block */
+
+ if (mu > 0) {
+ i__2 = *n - *m;
+ clatrz_(&mu, n, &i__2, &a[a_offset], lda, &tau[1], &work[1]);
+ }
+
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CTZRZF */
+
+} /* ctzrzf_ */
diff --git a/contrib/libs/clapack/cung2l.c b/contrib/libs/clapack/cung2l.c
new file mode 100644
index 0000000000..5af1652abb
--- /dev/null
+++ b/contrib/libs/clapack/cung2l.c
@@ -0,0 +1,182 @@
+/* cung2l.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cung2l_(integer *m, integer *n, integer *k, complex *a,
+ integer *lda, complex *tau, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j, l, ii;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *), clarf_(char *, integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, complex *),
+ xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CUNG2L generates an m by n complex matrix Q with orthonormal columns, */
+/* which is defined as the last n columns of a product of k elementary */
+/* reflectors of order m */
+
+/* Q = H(k) . . . H(2) H(1) */
+
+/* as returned by CGEQLF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. M >= N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. N >= K >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the (n-k+i)-th column must contain the vector which */
+/* defines the elementary reflector H(i), for i = 1,2,...,k, as */
+/* returned by CGEQLF in the last k columns of its array */
+/* argument A. */
+/* On exit, the m-by-n matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) COMPLEX array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by CGEQLF. */
+
+/* WORK (workspace) COMPLEX array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0 || *n > *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNG2L", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+/* Initialise columns 1:n-k to columns of the unit matrix */
+
+ i__1 = *n - *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = l + j * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+/* L10: */
+ }
+ i__2 = *m - *n + j + j * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+/* L20: */
+ }
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ii = *n - *k + i__;
+
+/* Apply H(i) to A(1:m-k+i,1:n-k+i) from the left */
+
+ i__2 = *m - *n + ii + ii * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+ i__2 = *m - *n + ii;
+ i__3 = ii - 1;
+ clarf_("Left", &i__2, &i__3, &a[ii * a_dim1 + 1], &c__1, &tau[i__], &
+ a[a_offset], lda, &work[1]);
+ i__2 = *m - *n + ii - 1;
+ i__3 = i__;
+ q__1.r = -tau[i__3].r, q__1.i = -tau[i__3].i;
+ cscal_(&i__2, &q__1, &a[ii * a_dim1 + 1], &c__1);
+ i__2 = *m - *n + ii + ii * a_dim1;
+ i__3 = i__;
+ q__1.r = 1.f - tau[i__3].r, q__1.i = 0.f - tau[i__3].i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+
+/* Set A(m-k+i+1:m,n-k+i) to zero */
+
+ i__2 = *m;
+ for (l = *m - *n + ii + 1; l <= i__2; ++l) {
+ i__3 = l + ii * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of CUNG2L */
+
+} /* cung2l_ */
diff --git a/contrib/libs/clapack/cung2r.c b/contrib/libs/clapack/cung2r.c
new file mode 100644
index 0000000000..d02600f936
--- /dev/null
+++ b/contrib/libs/clapack/cung2r.c
@@ -0,0 +1,184 @@
+/* cung2r.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cung2r_(integer *m, integer *n, integer *k, complex *a,
+ integer *lda, complex *tau, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Local variables */
+ integer i__, j, l;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *), clarf_(char *, integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, complex *),
+ xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CUNG2R generates an m by n complex matrix Q with orthonormal columns, */
+/* which is defined as the first n columns of a product of k elementary */
+/* reflectors of order m */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by CGEQRF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. M >= N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. N >= K >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the i-th column must contain the vector which */
+/* defines the elementary reflector H(i), for i = 1,2,...,k, as */
+/* returned by CGEQRF in the first k columns of its array */
+/* argument A. */
+/* On exit, the m by n matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) COMPLEX array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by CGEQRF. */
+
+/* WORK (workspace) COMPLEX array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0 || *n > *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNG2R", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+/* Initialise columns k+1:n to columns of the unit matrix */
+
+ i__1 = *n;
+ for (j = *k + 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = l + j * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+/* L10: */
+ }
+ i__2 = j + j * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+/* L20: */
+ }
+
+ for (i__ = *k; i__ >= 1; --i__) {
+
+/* Apply H(i) to A(i:m,i:n) from the left */
+
+ if (i__ < *n) {
+ i__1 = i__ + i__ * a_dim1;
+ a[i__1].r = 1.f, a[i__1].i = 0.f;
+ i__1 = *m - i__ + 1;
+ i__2 = *n - i__;
+ clarf_("Left", &i__1, &i__2, &a[i__ + i__ * a_dim1], &c__1, &tau[
+ i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ }
+ if (i__ < *m) {
+ i__1 = *m - i__;
+ i__2 = i__;
+ q__1.r = -tau[i__2].r, q__1.i = -tau[i__2].i;
+ cscal_(&i__1, &q__1, &a[i__ + 1 + i__ * a_dim1], &c__1);
+ }
+ i__1 = i__ + i__ * a_dim1;
+ i__2 = i__;
+ q__1.r = 1.f - tau[i__2].r, q__1.i = 0.f - tau[i__2].i;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+
+/* Set A(1:i-1,i) to zero */
+
+ i__1 = i__ - 1;
+ for (l = 1; l <= i__1; ++l) {
+ i__2 = l + i__ * a_dim1;
+ a[i__2].r = 0.f, a[i__2].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of CUNG2R */
+
+} /* cung2r_ */
diff --git a/contrib/libs/clapack/cungbr.c b/contrib/libs/clapack/cungbr.c
new file mode 100644
index 0000000000..c8369b06f8
--- /dev/null
+++ b/contrib/libs/clapack/cungbr.c
@@ -0,0 +1,309 @@
+/* cungbr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int cungbr_(char *vect, integer *m, integer *n, integer *k,
+ complex *a, integer *lda, complex *tau, complex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, nb, mn;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical wantq;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int cunglq_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, integer *),
+ cungqr_(integer *, integer *, integer *, complex *, integer *,
+ complex *, complex *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CUNGBR generates one of the complex unitary matrices Q or P**H */
+/* determined by CGEBRD when reducing a complex matrix A to bidiagonal */
+/* form: A = Q * B * P**H. Q and P**H are defined as products of */
+/* elementary reflectors H(i) or G(i) respectively. */
+
+/* If VECT = 'Q', A is assumed to have been an M-by-K matrix, and Q */
+/* is of order M: */
+/* if m >= k, Q = H(1) H(2) . . . H(k) and CUNGBR returns the first n */
+/* columns of Q, where m >= n >= k; */
+/* if m < k, Q = H(1) H(2) . . . H(m-1) and CUNGBR returns Q as an */
+/* M-by-M matrix. */
+
+/* If VECT = 'P', A is assumed to have been a K-by-N matrix, and P**H */
+/* is of order N: */
+/* if k < n, P**H = G(k) . . . G(2) G(1) and CUNGBR returns the first m */
+/* rows of P**H, where n >= m >= k; */
+/* if k >= n, P**H = G(n-1) . . . G(2) G(1) and CUNGBR returns P**H as */
+/* an N-by-N matrix. */
+
+/* Arguments */
+/* ========= */
+
+/* VECT (input) CHARACTER*1 */
+/* Specifies whether the matrix Q or the matrix P**H is */
+/* required, as defined in the transformation applied by CGEBRD: */
+/* = 'Q': generate Q; */
+/* = 'P': generate P**H. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q or P**H to be returned. */
+/* M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q or P**H to be returned. */
+/* N >= 0. */
+/* If VECT = 'Q', M >= N >= min(M,K); */
+/* if VECT = 'P', N >= M >= min(N,K). */
+
+/* K (input) INTEGER */
+/* If VECT = 'Q', the number of columns in the original M-by-K */
+/* matrix reduced by CGEBRD. */
+/* If VECT = 'P', the number of rows in the original K-by-N */
+/* matrix reduced by CGEBRD. */
+/* K >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the vectors which define the elementary reflectors, */
+/* as returned by CGEBRD. */
+/* On exit, the M-by-N matrix Q or P**H. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= M. */
+
+/* TAU (input) COMPLEX array, dimension */
+/* (min(M,K)) if VECT = 'Q' */
+/* (min(N,K)) if VECT = 'P' */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i) or G(i), which determines Q or P**H, as */
+/* returned by CGEBRD in its array argument TAUQ or TAUP. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,min(M,N)). */
+/* For optimum performance LWORK >= min(M,N)*NB, where NB */
+/* is the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ wantq = lsame_(vect, "Q");
+ mn = min(*m,*n);
+ lquery = *lwork == -1;
+ if (! wantq && ! lsame_(vect, "P")) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0 || wantq && (*n > *m || *n < min(*m,*k)) || ! wantq && (
+ *m > *n || *m < min(*n,*k))) {
+ *info = -3;
+ } else if (*k < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*m)) {
+ *info = -6;
+ } else if (*lwork < max(1,mn) && ! lquery) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+ if (wantq) {
+ nb = ilaenv_(&c__1, "CUNGQR", " ", m, n, k, &c_n1);
+ } else {
+ nb = ilaenv_(&c__1, "CUNGLQ", " ", m, n, k, &c_n1);
+ }
+ lwkopt = max(1,mn) * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNGBR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ if (wantq) {
+
+/* Form Q, determined by a call to CGEBRD to reduce an m-by-k */
+/* matrix */
+
+ if (*m >= *k) {
+
+/* If m >= k, assume m >= n >= k */
+
+ cungqr_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, &
+ iinfo);
+
+ } else {
+
+/* If m < k, assume m = n */
+
+/* Shift the vectors which define the elementary reflectors one */
+/* column to the right, and set the first row and column of Q */
+/* to those of the unit matrix */
+
+ for (j = *m; j >= 2; --j) {
+ i__1 = j * a_dim1 + 1;
+ a[i__1].r = 0.f, a[i__1].i = 0.f;
+ i__1 = *m;
+ for (i__ = j + 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__ + (j - 1) * a_dim1;
+ a[i__2].r = a[i__3].r, a[i__2].i = a[i__3].i;
+/* L10: */
+ }
+/* L20: */
+ }
+ i__1 = a_dim1 + 1;
+ a[i__1].r = 1.f, a[i__1].i = 0.f;
+ i__1 = *m;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ i__2 = i__ + a_dim1;
+ a[i__2].r = 0.f, a[i__2].i = 0.f;
+/* L30: */
+ }
+ if (*m > 1) {
+
+/* Form Q(2:m,2:m) */
+
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ cungqr_(&i__1, &i__2, &i__3, &a[(a_dim1 << 1) + 2], lda, &tau[
+ 1], &work[1], lwork, &iinfo);
+ }
+ }
+ } else {
+
+/* Form P', determined by a call to CGEBRD to reduce a k-by-n */
+/* matrix */
+
+ if (*k < *n) {
+
+/* If k < n, assume k <= m <= n */
+
+ cunglq_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, &
+ iinfo);
+
+ } else {
+
+/* If k >= n, assume m = n */
+
+/* Shift the vectors which define the elementary reflectors one */
+/* row downward, and set the first row and column of P' to */
+/* those of the unit matrix */
+
+ i__1 = a_dim1 + 1;
+ a[i__1].r = 1.f, a[i__1].i = 0.f;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ i__2 = i__ + a_dim1;
+ a[i__2].r = 0.f, a[i__2].i = 0.f;
+/* L40: */
+ }
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ for (i__ = j - 1; i__ >= 2; --i__) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__ - 1 + j * a_dim1;
+ a[i__2].r = a[i__3].r, a[i__2].i = a[i__3].i;
+/* L50: */
+ }
+ i__2 = j * a_dim1 + 1;
+ a[i__2].r = 0.f, a[i__2].i = 0.f;
+/* L60: */
+ }
+ if (*n > 1) {
+
+/* Form P'(2:n,2:n) */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ cunglq_(&i__1, &i__2, &i__3, &a[(a_dim1 << 1) + 2], lda, &tau[
+ 1], &work[1], lwork, &iinfo);
+ }
+ }
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ return 0;
+
+/* End of CUNGBR */
+
+} /* cungbr_ */
diff --git a/contrib/libs/clapack/cunghr.c b/contrib/libs/clapack/cunghr.c
new file mode 100644
index 0000000000..c52e4a4341
--- /dev/null
+++ b/contrib/libs/clapack/cunghr.c
@@ -0,0 +1,223 @@
+/* cunghr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int cunghr_(integer *n, integer *ilo, integer *ihi, complex *
+ a, integer *lda, complex *tau, complex *work, integer *lwork, integer
+ *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, nb, nh, iinfo;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int cungqr_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CUNGHR generates a complex unitary matrix Q which is defined as the */
+/* product of IHI-ILO elementary reflectors of order N, as returned by */
+/* CGEHRD: */
+
+/* Q = H(ilo) H(ilo+1) . . . H(ihi-1). */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix Q. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* ILO and IHI must have the same values as in the previous call */
+/* of CGEHRD. Q is equal to the unit matrix except in the */
+/* submatrix Q(ilo+1:ihi,ilo+1:ihi). */
+/* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the vectors which define the elementary reflectors, */
+/* as returned by CGEHRD. */
+/* On exit, the N-by-N unitary matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAU (input) COMPLEX array, dimension (N-1) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by CGEHRD. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= IHI-ILO. */
+/* For optimum performance LWORK >= (IHI-ILO)*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nh = *ihi - *ilo;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -2;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*lwork < max(1,nh) && ! lquery) {
+ *info = -8;
+ }
+
+ if (*info == 0) {
+ nb = ilaenv_(&c__1, "CUNGQR", " ", &nh, &nh, &nh, &c_n1);
+ lwkopt = max(1,nh) * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNGHR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+/* Shift the vectors which define the elementary reflectors one */
+/* column to the right, and set the first ilo and the last n-ihi */
+/* rows and columns to those of the unit matrix */
+
+ i__1 = *ilo + 1;
+ for (j = *ihi; j >= i__1; --j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+/* L10: */
+ }
+ i__2 = *ihi;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + (j - 1) * a_dim1;
+ a[i__3].r = a[i__4].r, a[i__3].i = a[i__4].i;
+/* L20: */
+ }
+ i__2 = *n;
+ for (i__ = *ihi + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+ i__1 = *ilo;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+/* L50: */
+ }
+ i__2 = j + j * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+/* L60: */
+ }
+ i__1 = *n;
+ for (j = *ihi + 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+/* L70: */
+ }
+ i__2 = j + j * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+/* L80: */
+ }
+
+ if (nh > 0) {
+
+/* Generate Q(ilo+1:ihi,ilo+1:ihi) */
+
+ cungqr_(&nh, &nh, &nh, &a[*ilo + 1 + (*ilo + 1) * a_dim1], lda, &tau[*
+ ilo], &work[1], lwork, &iinfo);
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ return 0;
+
+/* End of CUNGHR */
+
+} /* cunghr_ */
diff --git a/contrib/libs/clapack/cungl2.c b/contrib/libs/clapack/cungl2.c
new file mode 100644
index 0000000000..6a74939d16
--- /dev/null
+++ b/contrib/libs/clapack/cungl2.c
@@ -0,0 +1,193 @@
+/* cungl2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cungl2_(integer *m, integer *n, integer *k, complex *a,
+ integer *lda, complex *tau, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, l;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *), clarf_(char *, integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, complex *),
+ clacgv_(integer *, complex *, integer *), xerbla_(char *, integer
+ *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CUNGL2 generates an m-by-n complex matrix Q with orthonormal rows, */
+/* which is defined as the first m rows of a product of k elementary */
+/* reflectors of order n */
+
+/* Q = H(k)' . . . H(2)' H(1)' */
+
+/* as returned by CGELQF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. N >= M. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. M >= K >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the i-th row must contain the vector which defines */
+/* the elementary reflector H(i), for i = 1,2,...,k, as returned */
+/* by CGELQF in the first k rows of its array argument A. */
+/* On exit, the m by n matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) COMPLEX array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by CGELQF. */
+
+/* WORK (workspace) COMPLEX array, dimension (M) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *m) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNGL2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ return 0;
+ }
+
+ if (*k < *m) {
+
+/* Initialise rows k+1:m to rows of the unit matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (l = *k + 1; l <= i__2; ++l) {
+ i__3 = l + j * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+/* L10: */
+ }
+ if (j > *k && j <= *m) {
+ i__2 = j + j * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+ }
+/* L20: */
+ }
+ }
+
+ for (i__ = *k; i__ >= 1; --i__) {
+
+/* Apply H(i)' to A(i:m,i:n) from the right */
+
+ if (i__ < *n) {
+ i__1 = *n - i__;
+ clacgv_(&i__1, &a[i__ + (i__ + 1) * a_dim1], lda);
+ if (i__ < *m) {
+ i__1 = i__ + i__ * a_dim1;
+ a[i__1].r = 1.f, a[i__1].i = 0.f;
+ i__1 = *m - i__;
+ i__2 = *n - i__ + 1;
+ r_cnjg(&q__1, &tau[i__]);
+ clarf_("Right", &i__1, &i__2, &a[i__ + i__ * a_dim1], lda, &
+ q__1, &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
+ }
+ i__1 = *n - i__;
+ i__2 = i__;
+ q__1.r = -tau[i__2].r, q__1.i = -tau[i__2].i;
+ cscal_(&i__1, &q__1, &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__1 = *n - i__;
+ clacgv_(&i__1, &a[i__ + (i__ + 1) * a_dim1], lda);
+ }
+ i__1 = i__ + i__ * a_dim1;
+ r_cnjg(&q__2, &tau[i__]);
+ q__1.r = 1.f - q__2.r, q__1.i = 0.f - q__2.i;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+
+/* Set A(i,1:i-1,i) to zero */
+
+ i__1 = i__ - 1;
+ for (l = 1; l <= i__1; ++l) {
+ i__2 = i__ + l * a_dim1;
+ a[i__2].r = 0.f, a[i__2].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of CUNGL2 */
+
+} /* cungl2_ */
diff --git a/contrib/libs/clapack/cunglq.c b/contrib/libs/clapack/cunglq.c
new file mode 100644
index 0000000000..9165a4702c
--- /dev/null
+++ b/contrib/libs/clapack/cunglq.c
@@ -0,0 +1,284 @@
+/* cunglq.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int cunglq_(integer *m, integer *n, integer *k, complex *a,
+ integer *lda, complex *tau, complex *work, integer *lwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, l, ib, nb, ki, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int cungl2_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *), clarfb_(
+ char *, char *, char *, char *, integer *, integer *, integer *,
+ complex *, integer *, complex *, integer *, complex *, integer *,
+ complex *, integer *), clarft_(
+ char *, char *, integer *, integer *, complex *, integer *,
+ complex *, complex *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CUNGLQ generates an M-by-N complex matrix Q with orthonormal rows, */
+/* which is defined as the first M rows of a product of K elementary */
+/* reflectors of order N */
+
+/* Q = H(k)' . . . H(2)' H(1)' */
+
+/* as returned by CGELQF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. N >= M. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. M >= K >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the i-th row must contain the vector which defines */
+/* the elementary reflector H(i), for i = 1,2,...,k, as returned */
+/* by CGELQF in the first k rows of its array argument A. */
+/* On exit, the M-by-N matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) COMPLEX array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by CGELQF. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,M). */
+/* For optimum performance LWORK >= M*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit; */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "CUNGLQ", " ", m, n, k, &c_n1);
+ lwkopt = max(1,*m) * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *m) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*lwork < max(1,*m) && ! lquery) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNGLQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *m;
+ if (nb > 1 && nb < *k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "CUNGLQ", " ", m, n, k, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CUNGLQ", " ", m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *k && nx < *k) {
+
+/* Use blocked code after the last block. */
+/* The first kk rows are handled by the block method. */
+
+ ki = (*k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = *k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+/* Set A(kk+1:m,1:kk) to zero. */
+
+ i__1 = kk;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = kk + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the last or only block. */
+
+ if (kk < *m) {
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ cungl2_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, &
+ tau[kk + 1], &work[1], &iinfo);
+ }
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = -nb;
+ for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) {
+/* Computing MIN */
+ i__2 = nb, i__3 = *k - i__ + 1;
+ ib = min(i__2,i__3);
+ if (i__ + ib <= *m) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i) H(i+1) . . . H(i+ib-1) */
+
+ i__2 = *n - i__ + 1;
+ clarft_("Forward", "Rowwise", &i__2, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(i+ib:m,i:n) from the right */
+
+ i__2 = *m - i__ - ib + 1;
+ i__3 = *n - i__ + 1;
+ clarfb_("Right", "Conjugate transpose", "Forward", "Rowwise",
+ &i__2, &i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[
+ 1], &ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[
+ ib + 1], &ldwork);
+ }
+
+/* Apply H' to columns i:n of current block */
+
+ i__2 = *n - i__ + 1;
+ cungl2_(&ib, &i__2, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &
+ work[1], &iinfo);
+
+/* Set columns 1:i-1 of current block to zero */
+
+ i__2 = i__ - 1;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = i__ + ib - 1;
+ for (l = i__; l <= i__3; ++l) {
+ i__4 = l + j * a_dim1;
+ a[i__4].r = 0.f, a[i__4].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1].r = (real) iws, work[1].i = 0.f;
+ return 0;
+
+/* End of CUNGLQ */
+
+} /* cunglq_ */
diff --git a/contrib/libs/clapack/cungql.c b/contrib/libs/clapack/cungql.c
new file mode 100644
index 0000000000..fc7f77d465
--- /dev/null
+++ b/contrib/libs/clapack/cungql.c
@@ -0,0 +1,293 @@
+/* cungql.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int cungql_(integer *m, integer *n, integer *k, complex *a,
+ integer *lda, complex *tau, complex *work, integer *lwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+
+ /* Local variables */
+ integer i__, j, l, ib, nb, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int cung2l_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *), clarfb_(
+ char *, char *, char *, char *, integer *, integer *, integer *,
+ complex *, integer *, complex *, integer *, complex *, integer *,
+ complex *, integer *), clarft_(
+ char *, char *, integer *, integer *, complex *, integer *,
+ complex *, complex *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CUNGQL generates an M-by-N complex matrix Q with orthonormal columns, */
+/* which is defined as the last N columns of a product of K elementary */
+/* reflectors of order M */
+
+/* Q = H(k) . . . H(2) H(1) */
+
+/* as returned by CGEQLF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. M >= N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. N >= K >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the (n-k+i)-th column must contain the vector which */
+/* defines the elementary reflector H(i), for i = 1,2,...,k, as */
+/* returned by CGEQLF in the last k columns of its array */
+/* argument A. */
+/* On exit, the M-by-N matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) COMPLEX array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by CGEQLF. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+/* For optimum performance LWORK >= N*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0 || *n > *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+
+ if (*info == 0) {
+ if (*n == 0) {
+ lwkopt = 1;
+ } else {
+ nb = ilaenv_(&c__1, "CUNGQL", " ", m, n, k, &c_n1);
+ lwkopt = *n * nb;
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ if (*lwork < max(1,*n) && ! lquery) {
+ *info = -8;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNGQL", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *n;
+ if (nb > 1 && nb < *k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "CUNGQL", " ", m, n, k, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CUNGQL", " ", m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *k && nx < *k) {
+
+/* Use blocked code after the first block. */
+/* The last kk columns are handled by the block method. */
+
+/* Computing MIN */
+ i__1 = *k, i__2 = (*k - nx + nb - 1) / nb * nb;
+ kk = min(i__1,i__2);
+
+/* Set A(m-kk+1:m,1:n-kk) to zero. */
+
+ i__1 = *n - kk;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = *m - kk + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the first or only block. */
+
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ cung2l_(&i__1, &i__2, &i__3, &a[a_offset], lda, &tau[1], &work[1], &iinfo)
+ ;
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = *k;
+ i__2 = nb;
+ for (i__ = *k - kk + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *k - i__ + 1;
+ ib = min(i__3,i__4);
+ if (*n - *k + i__ > 1) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__3 = *m - *k + i__ + ib - 1;
+ clarft_("Backward", "Columnwise", &i__3, &ib, &a[(*n - *k +
+ i__) * a_dim1 + 1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(1:m-k+i+ib-1,1:n-k+i-1) from the left */
+
+ i__3 = *m - *k + i__ + ib - 1;
+ i__4 = *n - *k + i__ - 1;
+ clarfb_("Left", "No transpose", "Backward", "Columnwise", &
+ i__3, &i__4, &ib, &a[(*n - *k + i__) * a_dim1 + 1],
+ lda, &work[1], &ldwork, &a[a_offset], lda, &work[ib +
+ 1], &ldwork);
+ }
+
+/* Apply H to rows 1:m-k+i+ib-1 of current block */
+
+ i__3 = *m - *k + i__ + ib - 1;
+ cung2l_(&i__3, &ib, &ib, &a[(*n - *k + i__) * a_dim1 + 1], lda, &
+ tau[i__], &work[1], &iinfo);
+
+/* Set rows m-k+i+ib:m of current block to zero */
+
+ i__3 = *n - *k + i__ + ib - 1;
+ for (j = *n - *k + i__; j <= i__3; ++j) {
+ i__4 = *m;
+ for (l = *m - *k + i__ + ib; l <= i__4; ++l) {
+ i__5 = l + j * a_dim1;
+ a[i__5].r = 0.f, a[i__5].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1].r = (real) iws, work[1].i = 0.f;
+ return 0;
+
+/* End of CUNGQL */
+
+} /* cungql_ */
diff --git a/contrib/libs/clapack/cungqr.c b/contrib/libs/clapack/cungqr.c
new file mode 100644
index 0000000000..beb92b7e84
--- /dev/null
+++ b/contrib/libs/clapack/cungqr.c
@@ -0,0 +1,285 @@
+/* cungqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int cungqr_(integer *m, integer *n, integer *k, complex *a,
+ integer *lda, complex *tau, complex *work, integer *lwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, l, ib, nb, ki, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int cung2r_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *), clarfb_(
+ char *, char *, char *, char *, integer *, integer *, integer *,
+ complex *, integer *, complex *, integer *, complex *, integer *,
+ complex *, integer *), clarft_(
+ char *, char *, integer *, integer *, complex *, integer *,
+ complex *, complex *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CUNGQR generates an M-by-N complex matrix Q with orthonormal columns, */
+/* which is defined as the first N columns of a product of K elementary */
+/* reflectors of order M */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by CGEQRF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. M >= N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. N >= K >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the i-th column must contain the vector which */
+/* defines the elementary reflector H(i), for i = 1,2,...,k, as */
+/* returned by CGEQRF in the first k columns of its array */
+/* argument A. */
+/* On exit, the M-by-N matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) COMPLEX array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by CGEQRF. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+/* For optimum performance LWORK >= N*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "CUNGQR", " ", m, n, k, &c_n1);
+ lwkopt = max(1,*n) * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0 || *n > *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNGQR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *n;
+ if (nb > 1 && nb < *k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "CUNGQR", " ", m, n, k, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CUNGQR", " ", m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *k && nx < *k) {
+
+/* Use blocked code after the last block. */
+/* The first kk columns are handled by the block method. */
+
+ ki = (*k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = *k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+/* Set A(1:kk,kk+1:n) to zero. */
+
+ i__1 = *n;
+ for (j = kk + 1; j <= i__1; ++j) {
+ i__2 = kk;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the last or only block. */
+
+ if (kk < *n) {
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ cung2r_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, &
+ tau[kk + 1], &work[1], &iinfo);
+ }
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = -nb;
+ for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) {
+/* Computing MIN */
+ i__2 = nb, i__3 = *k - i__ + 1;
+ ib = min(i__2,i__3);
+ if (i__ + ib <= *n) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i) H(i+1) . . . H(i+ib-1) */
+
+ i__2 = *m - i__ + 1;
+ clarft_("Forward", "Columnwise", &i__2, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(i:m,i+ib:n) from the left */
+
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__ - ib + 1;
+ clarfb_("Left", "No transpose", "Forward", "Columnwise", &
+ i__2, &i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[
+ 1], &ldwork, &a[i__ + (i__ + ib) * a_dim1], lda, &
+ work[ib + 1], &ldwork);
+ }
+
+/* Apply H to rows i:m of current block */
+
+ i__2 = *m - i__ + 1;
+ cung2r_(&i__2, &ib, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &
+ work[1], &iinfo);
+
+/* Set rows 1:i-1 of current block to zero */
+
+ i__2 = i__ + ib - 1;
+ for (j = i__; j <= i__2; ++j) {
+ i__3 = i__ - 1;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + j * a_dim1;
+ a[i__4].r = 0.f, a[i__4].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1].r = (real) iws, work[1].i = 0.f;
+ return 0;
+
+/* End of CUNGQR */
+
+} /* cungqr_ */
diff --git a/contrib/libs/clapack/cungr2.c b/contrib/libs/clapack/cungr2.c
new file mode 100644
index 0000000000..5a9c5f3187
--- /dev/null
+++ b/contrib/libs/clapack/cungr2.c
@@ -0,0 +1,192 @@
+/* cungr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cungr2_(integer *m, integer *n, integer *k, complex *a,
+ integer *lda, complex *tau, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, j, l, ii;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *), clarf_(char *, integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, complex *),
+ clacgv_(integer *, complex *, integer *), xerbla_(char *, integer
+ *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CUNGR2 generates an m by n complex matrix Q with orthonormal rows, */
+/* which is defined as the last m rows of a product of k elementary */
+/* reflectors of order n */
+
+/* Q = H(1)' H(2)' . . . H(k)' */
+
+/* as returned by CGERQF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. N >= M. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. M >= K >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the (m-k+i)-th row must contain the vector which */
+/* defines the elementary reflector H(i), for i = 1,2,...,k, as */
+/* returned by CGERQF in the last k rows of its array argument */
+/* A. */
+/* On exit, the m-by-n matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) COMPLEX array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by CGERQF. */
+
+/* WORK (workspace) COMPLEX array, dimension (M) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *m) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNGR2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ return 0;
+ }
+
+ if (*k < *m) {
+
+/* Initialise rows 1:m-k to rows of the unit matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m - *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = l + j * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+/* L10: */
+ }
+ if (j > *n - *m && j <= *n - *k) {
+ i__2 = *m - *n + j + j * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+ }
+/* L20: */
+ }
+ }
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ii = *m - *k + i__;
+
+/* Apply H(i)' to A(1:m-k+i,1:n-k+i) from the right */
+
+ i__2 = *n - *m + ii - 1;
+ clacgv_(&i__2, &a[ii + a_dim1], lda);
+ i__2 = ii + (*n - *m + ii) * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+ i__2 = ii - 1;
+ i__3 = *n - *m + ii;
+ r_cnjg(&q__1, &tau[i__]);
+ clarf_("Right", &i__2, &i__3, &a[ii + a_dim1], lda, &q__1, &a[
+ a_offset], lda, &work[1]);
+ i__2 = *n - *m + ii - 1;
+ i__3 = i__;
+ q__1.r = -tau[i__3].r, q__1.i = -tau[i__3].i;
+ cscal_(&i__2, &q__1, &a[ii + a_dim1], lda);
+ i__2 = *n - *m + ii - 1;
+ clacgv_(&i__2, &a[ii + a_dim1], lda);
+ i__2 = ii + (*n - *m + ii) * a_dim1;
+ r_cnjg(&q__2, &tau[i__]);
+ q__1.r = 1.f - q__2.r, q__1.i = 0.f - q__2.i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+
+/* Set A(m-k+i,n-k+i+1:n) to zero */
+
+ i__2 = *n;
+ for (l = *n - *m + ii + 1; l <= i__2; ++l) {
+ i__3 = ii + l * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of CUNGR2 */
+
+} /* cungr2_ */
diff --git a/contrib/libs/clapack/cungrq.c b/contrib/libs/clapack/cungrq.c
new file mode 100644
index 0000000000..5eeb910e1e
--- /dev/null
+++ b/contrib/libs/clapack/cungrq.c
@@ -0,0 +1,293 @@
+/* cungrq.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int cungrq_(integer *m, integer *n, integer *k, complex *a,
+ integer *lda, complex *tau, complex *work, integer *lwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+
+ /* Local variables */
+ integer i__, j, l, ib, nb, ii, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int cungr2_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *), clarfb_(
+ char *, char *, char *, char *, integer *, integer *, integer *,
+ complex *, integer *, complex *, integer *, complex *, integer *,
+ complex *, integer *), clarft_(
+ char *, char *, integer *, integer *, complex *, integer *,
+ complex *, complex *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CUNGRQ generates an M-by-N complex matrix Q with orthonormal rows, */
+/* which is defined as the last M rows of a product of K elementary */
+/* reflectors of order N */
+
+/* Q = H(1)' H(2)' . . . H(k)' */
+
+/* as returned by CGERQF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. N >= M. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. M >= K >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the (m-k+i)-th row must contain the vector which */
+/* defines the elementary reflector H(i), for i = 1,2,...,k, as */
+/* returned by CGERQF in the last k rows of its array argument */
+/* A. */
+/* On exit, the M-by-N matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) COMPLEX array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by CGERQF. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,M). */
+/* For optimum performance LWORK >= M*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *m) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+
+ if (*info == 0) {
+ if (*m <= 0) {
+ lwkopt = 1;
+ } else {
+ nb = ilaenv_(&c__1, "CUNGRQ", " ", m, n, k, &c_n1);
+ lwkopt = *m * nb;
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ if (*lwork < max(1,*m) && ! lquery) {
+ *info = -8;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNGRQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *m;
+ if (nb > 1 && nb < *k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "CUNGRQ", " ", m, n, k, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CUNGRQ", " ", m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *k && nx < *k) {
+
+/* Use blocked code after the first block. */
+/* The last kk rows are handled by the block method. */
+
+/* Computing MIN */
+ i__1 = *k, i__2 = (*k - nx + nb - 1) / nb * nb;
+ kk = min(i__1,i__2);
+
+/* Set A(1:m-kk,n-kk+1:n) to zero. */
+
+ i__1 = *n;
+ for (j = *n - kk + 1; j <= i__1; ++j) {
+ i__2 = *m - kk;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the first or only block. */
+
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ cungr2_(&i__1, &i__2, &i__3, &a[a_offset], lda, &tau[1], &work[1], &iinfo)
+ ;
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = *k;
+ i__2 = nb;
+ for (i__ = *k - kk + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *k - i__ + 1;
+ ib = min(i__3,i__4);
+ ii = *m - *k + i__;
+ if (ii > 1) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__3 = *n - *k + i__ + ib - 1;
+ clarft_("Backward", "Rowwise", &i__3, &ib, &a[ii + a_dim1],
+ lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(1:m-k+i-1,1:n-k+i+ib-1) from the right */
+
+ i__3 = ii - 1;
+ i__4 = *n - *k + i__ + ib - 1;
+ clarfb_("Right", "Conjugate transpose", "Backward", "Rowwise",
+ &i__3, &i__4, &ib, &a[ii + a_dim1], lda, &work[1], &
+ ldwork, &a[a_offset], lda, &work[ib + 1], &ldwork);
+ }
+
+/* Apply H' to columns 1:n-k+i+ib-1 of current block */
+
+ i__3 = *n - *k + i__ + ib - 1;
+ cungr2_(&ib, &i__3, &ib, &a[ii + a_dim1], lda, &tau[i__], &work[1]
+, &iinfo);
+
+/* Set columns n-k+i+ib:n of current block to zero */
+
+ i__3 = *n;
+ for (l = *n - *k + i__ + ib; l <= i__3; ++l) {
+ i__4 = ii + ib - 1;
+ for (j = ii; j <= i__4; ++j) {
+ i__5 = j + l * a_dim1;
+ a[i__5].r = 0.f, a[i__5].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1].r = (real) iws, work[1].i = 0.f;
+ return 0;
+
+/* End of CUNGRQ */
+
+} /* cungrq_ */
diff --git a/contrib/libs/clapack/cungtr.c b/contrib/libs/clapack/cungtr.c
new file mode 100644
index 0000000000..58997f709e
--- /dev/null
+++ b/contrib/libs/clapack/cungtr.c
@@ -0,0 +1,260 @@
+/* cungtr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int cungtr_(char *uplo, integer *n, complex *a, integer *lda,
+ complex *tau, complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, nb;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int cungql_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, integer *),
+ cungqr_(integer *, integer *, integer *, complex *, integer *,
+ complex *, complex *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CUNGTR generates a complex unitary matrix Q which is defined as the */
+/* product of n-1 elementary reflectors of order N, as returned by */
+/* CHETRD: */
+
+/* if UPLO = 'U', Q = H(n-1) . . . H(2) H(1), */
+
+/* if UPLO = 'L', Q = H(1) H(2) . . . H(n-1). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A contains elementary reflectors */
+/* from CHETRD; */
+/* = 'L': Lower triangle of A contains elementary reflectors */
+/* from CHETRD. */
+
+/* N (input) INTEGER */
+/* The order of the matrix Q. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension (LDA,N) */
+/* On entry, the vectors which define the elementary reflectors, */
+/* as returned by CHETRD. */
+/* On exit, the N-by-N unitary matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= N. */
+
+/* TAU (input) COMPLEX array, dimension (N-1) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by CHETRD. */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= N-1. */
+/* For optimum performance LWORK >= (N-1)*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n - 1;
+ if (*lwork < max(i__1,i__2) && ! lquery) {
+ *info = -7;
+ }
+ }
+
+ if (*info == 0) {
+ if (upper) {
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "CUNGQL", " ", &i__1, &i__2, &i__3, &c_n1);
+ } else {
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "CUNGQR", " ", &i__1, &i__2, &i__3, &c_n1);
+ }
+/* Computing MAX */
+ i__1 = 1, i__2 = *n - 1;
+ lwkopt = max(i__1,i__2) * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNGTR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ if (upper) {
+
+/* Q was determined by a call to CHETRD with UPLO = 'U' */
+
+/* Shift the vectors which define the elementary reflectors one */
+/* column to the left, and set the last row and column of Q to */
+/* those of the unit matrix */
+
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + (j + 1) * a_dim1;
+ a[i__3].r = a[i__4].r, a[i__3].i = a[i__4].i;
+/* L10: */
+ }
+ i__2 = *n + j * a_dim1;
+ a[i__2].r = 0.f, a[i__2].i = 0.f;
+/* L20: */
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + *n * a_dim1;
+ a[i__2].r = 0.f, a[i__2].i = 0.f;
+/* L30: */
+ }
+ i__1 = *n + *n * a_dim1;
+ a[i__1].r = 1.f, a[i__1].i = 0.f;
+
+/* Generate Q(1:n-1,1:n-1) */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ cungql_(&i__1, &i__2, &i__3, &a[a_offset], lda, &tau[1], &work[1],
+ lwork, &iinfo);
+
+ } else {
+
+/* Q was determined by a call to CHETRD with UPLO = 'L'. */
+
+/* Shift the vectors which define the elementary reflectors one */
+/* column to the right, and set the first row and column of Q to */
+/* those of the unit matrix */
+
+ for (j = *n; j >= 2; --j) {
+ i__1 = j * a_dim1 + 1;
+ a[i__1].r = 0.f, a[i__1].i = 0.f;
+ i__1 = *n;
+ for (i__ = j + 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__ + (j - 1) * a_dim1;
+ a[i__2].r = a[i__3].r, a[i__2].i = a[i__3].i;
+/* L40: */
+ }
+/* L50: */
+ }
+ i__1 = a_dim1 + 1;
+ a[i__1].r = 1.f, a[i__1].i = 0.f;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ i__2 = i__ + a_dim1;
+ a[i__2].r = 0.f, a[i__2].i = 0.f;
+/* L60: */
+ }
+ if (*n > 1) {
+
+/* Generate Q(2:n,2:n) */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ cungqr_(&i__1, &i__2, &i__3, &a[(a_dim1 << 1) + 2], lda, &tau[1],
+ &work[1], lwork, &iinfo);
+ }
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ return 0;
+
+/* End of CUNGTR */
+
+} /* cungtr_ */
diff --git a/contrib/libs/clapack/cunm2l.c b/contrib/libs/clapack/cunm2l.c
new file mode 100644
index 0000000000..c553f8f06a
--- /dev/null
+++ b/contrib/libs/clapack/cunm2l.c
@@ -0,0 +1,245 @@
+/* cunm2l.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cunm2l_(char *side, char *trans, integer *m, integer *n,
+ integer *k, complex *a, integer *lda, complex *tau, complex *c__,
+ integer *ldc, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, i1, i2, i3, mi, ni, nq;
+ complex aii;
+ logical left;
+ complex taui;
+ extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
+, integer *, complex *, complex *, integer *, complex *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CUNM2L overwrites the general complex m-by-n matrix C with */
+
+/* Q * C if SIDE = 'L' and TRANS = 'N', or */
+
+/* Q'* C if SIDE = 'L' and TRANS = 'C', or */
+
+/* C * Q if SIDE = 'R' and TRANS = 'N', or */
+
+/* C * Q' if SIDE = 'R' and TRANS = 'C', */
+
+/* where Q is a complex unitary matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(k) . . . H(2) H(1) */
+
+/* as returned by CGEQLF. Q is of order m if SIDE = 'L' and of order n */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q' from the Left */
+/* = 'R': apply Q or Q' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply Q (No transpose) */
+/* = 'C': apply Q' (Conjugate transpose) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,K) */
+/* The i-th column must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* CGEQLF in the last k columns of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* If SIDE = 'L', LDA >= max(1,M); */
+/* if SIDE = 'R', LDA >= max(1,N). */
+
+/* TAU (input) COMPLEX array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by CGEQLF. */
+
+/* C (input/output) COMPLEX array, dimension (LDC,N) */
+/* On entry, the m-by-n matrix C. */
+/* On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) COMPLEX array, dimension */
+/* (N) if SIDE = 'L', */
+/* (M) if SIDE = 'R' */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNM2L", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if (left && notran || ! left && ! notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) or H(i)' is applied to C(1:m-k+i,1:n) */
+
+ mi = *m - *k + i__;
+ } else {
+
+/* H(i) or H(i)' is applied to C(1:m,1:n-k+i) */
+
+ ni = *n - *k + i__;
+ }
+
+/* Apply H(i) or H(i)' */
+
+ if (notran) {
+ i__3 = i__;
+ taui.r = tau[i__3].r, taui.i = tau[i__3].i;
+ } else {
+ r_cnjg(&q__1, &tau[i__]);
+ taui.r = q__1.r, taui.i = q__1.i;
+ }
+ i__3 = nq - *k + i__ + i__ * a_dim1;
+ aii.r = a[i__3].r, aii.i = a[i__3].i;
+ i__3 = nq - *k + i__ + i__ * a_dim1;
+ a[i__3].r = 1.f, a[i__3].i = 0.f;
+ clarf_(side, &mi, &ni, &a[i__ * a_dim1 + 1], &c__1, &taui, &c__[
+ c_offset], ldc, &work[1]);
+ i__3 = nq - *k + i__ + i__ * a_dim1;
+ a[i__3].r = aii.r, a[i__3].i = aii.i;
+/* L10: */
+ }
+ return 0;
+
+/* End of CUNM2L */
+
+} /* cunm2l_ */
diff --git a/contrib/libs/clapack/cunm2r.c b/contrib/libs/clapack/cunm2r.c
new file mode 100644
index 0000000000..58c69adeb0
--- /dev/null
+++ b/contrib/libs/clapack/cunm2r.c
@@ -0,0 +1,249 @@
+/* cunm2r.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cunm2r_(char *side, char *trans, integer *m, integer *n,
+ integer *k, complex *a, integer *lda, complex *tau, complex *c__,
+ integer *ldc, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, i1, i2, i3, ic, jc, mi, ni, nq;
+ complex aii;
+ logical left;
+ complex taui;
+ extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
+, integer *, complex *, complex *, integer *, complex *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CUNM2R overwrites the general complex m-by-n matrix C with */
+
+/* Q * C if SIDE = 'L' and TRANS = 'N', or */
+
+/* Q'* C if SIDE = 'L' and TRANS = 'C', or */
+
+/* C * Q if SIDE = 'R' and TRANS = 'N', or */
+
+/* C * Q' if SIDE = 'R' and TRANS = 'C', */
+
+/* where Q is a complex unitary matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by CGEQRF. Q is of order m if SIDE = 'L' and of order n */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q' from the Left */
+/* = 'R': apply Q or Q' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply Q (No transpose) */
+/* = 'C': apply Q' (Conjugate transpose) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,K) */
+/* The i-th column must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* CGEQRF in the first k columns of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* If SIDE = 'L', LDA >= max(1,M); */
+/* if SIDE = 'R', LDA >= max(1,N). */
+
+/* TAU (input) COMPLEX array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by CGEQRF. */
+
+/* C (input/output) COMPLEX array, dimension (LDC,N) */
+/* On entry, the m-by-n matrix C. */
+/* On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) COMPLEX array, dimension */
+/* (N) if SIDE = 'L', */
+/* (M) if SIDE = 'R' */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNM2R", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if (left && ! notran || ! left && notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) or H(i)' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H(i) or H(i)' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H(i) or H(i)' */
+
+ if (notran) {
+ i__3 = i__;
+ taui.r = tau[i__3].r, taui.i = tau[i__3].i;
+ } else {
+ r_cnjg(&q__1, &tau[i__]);
+ taui.r = q__1.r, taui.i = q__1.i;
+ }
+ i__3 = i__ + i__ * a_dim1;
+ aii.r = a[i__3].r, aii.i = a[i__3].i;
+ i__3 = i__ + i__ * a_dim1;
+ a[i__3].r = 1.f, a[i__3].i = 0.f;
+ clarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], &c__1, &taui, &c__[ic
+ + jc * c_dim1], ldc, &work[1]);
+ i__3 = i__ + i__ * a_dim1;
+ a[i__3].r = aii.r, a[i__3].i = aii.i;
+/* L10: */
+ }
+ return 0;
+
+/* End of CUNM2R */
+
+} /* cunm2r_ */
diff --git a/contrib/libs/clapack/cunmbr.c b/contrib/libs/clapack/cunmbr.c
new file mode 100644
index 0000000000..ccf73e4b89
--- /dev/null
+++ b/contrib/libs/clapack/cunmbr.c
@@ -0,0 +1,373 @@
+/* cunmbr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+
+/* Subroutine */ int cunmbr_(char *vect, char *side, char *trans, integer *m,
+ integer *n, integer *k, complex *a, integer *lda, complex *tau,
+ complex *c__, integer *ldc, complex *work, integer *lwork, integer *
+ info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2];
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i1, i2, nb, mi, ni, nq, nw;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int cunmlq_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *);
+ logical notran;
+ extern /* Subroutine */ int cunmqr_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *);
+ logical applyq;
+ char transt[1];
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* If VECT = 'Q', CUNMBR overwrites the general complex M-by-N matrix C */
+/* with */
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'C': Q**H * C C * Q**H */
+
+/* If VECT = 'P', CUNMBR overwrites the general complex M-by-N matrix C */
+/* with */
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': P * C C * P */
+/* TRANS = 'C': P**H * C C * P**H */
+
+/* Here Q and P**H are the unitary matrices determined by CGEBRD when */
+/* reducing a complex matrix A to bidiagonal form: A = Q * B * P**H. Q */
+/* and P**H are defined as products of elementary reflectors H(i) and */
+/* G(i) respectively. */
+
+/* Let nq = m if SIDE = 'L' and nq = n if SIDE = 'R'. Thus nq is the */
+/* order of the unitary matrix Q or P**H that is applied. */
+
+/* If VECT = 'Q', A is assumed to have been an NQ-by-K matrix: */
+/* if nq >= k, Q = H(1) H(2) . . . H(k); */
+/* if nq < k, Q = H(1) H(2) . . . H(nq-1). */
+
+/* If VECT = 'P', A is assumed to have been a K-by-NQ matrix: */
+/* if k < nq, P = G(1) G(2) . . . G(k); */
+/* if k >= nq, P = G(1) G(2) . . . G(nq-1). */
+
+/* Arguments */
+/* ========= */
+
+/* VECT (input) CHARACTER*1 */
+/* = 'Q': apply Q or Q**H; */
+/* = 'P': apply P or P**H. */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q, Q**H, P or P**H from the Left; */
+/* = 'R': apply Q, Q**H, P or P**H from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q or P; */
+/* = 'C': Conjugate transpose, apply Q**H or P**H. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* If VECT = 'Q', the number of columns in the original */
+/* matrix reduced by CGEBRD. */
+/* If VECT = 'P', the number of rows in the original */
+/* matrix reduced by CGEBRD. */
+/* K >= 0. */
+
+/* A (input) COMPLEX array, dimension */
+/* (LDA,min(nq,K)) if VECT = 'Q' */
+/* (LDA,nq) if VECT = 'P' */
+/* The vectors which define the elementary reflectors H(i) and */
+/* G(i), whose products determine the matrices Q and P, as */
+/* returned by CGEBRD. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* If VECT = 'Q', LDA >= max(1,nq); */
+/* if VECT = 'P', LDA >= max(1,min(nq,K)). */
+
+/* TAU (input) COMPLEX array, dimension (min(nq,K)) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i) or G(i) which determines Q or P, as returned */
+/* by CGEBRD in the array argument TAUQ or TAUP. */
+
+/* C (input/output) COMPLEX array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q */
+/* or P*C or P**H*C or C*P or C*P**H. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M); */
+/* if N = 0 or M = 0, LWORK >= 1. */
+/* For optimum performance LWORK >= max(1,N*NB) if SIDE = 'L', */
+/* and LWORK >= max(1,M*NB) if SIDE = 'R', where NB is the */
+/* optimal blocksize. (NB = 0 if M = 0 or N = 0.) */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ applyq = lsame_(vect, "Q");
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q or P and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (*m == 0 || *n == 0) {
+ nw = 0;
+ }
+ if (! applyq && ! lsame_(vect, "P")) {
+ *info = -1;
+ } else if (! left && ! lsame_(side, "R")) {
+ *info = -2;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*k < 0) {
+ *info = -6;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = 1, i__2 = min(nq,*k);
+ if (applyq && *lda < max(1,nq) || ! applyq && *lda < max(i__1,i__2)) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -11;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info == 0) {
+ if (nw > 0) {
+ if (applyq) {
+ if (left) {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ nb = ilaenv_(&c__1, "CUNMQR", ch__1, &i__1, n, &i__2, &
+ c_n1);
+ } else {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ nb = ilaenv_(&c__1, "CUNMQR", ch__1, m, &i__1, &i__2, &
+ c_n1);
+ }
+ } else {
+ if (left) {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ nb = ilaenv_(&c__1, "CUNMLQ", ch__1, &i__1, n, &i__2, &
+ c_n1);
+ } else {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ nb = ilaenv_(&c__1, "CUNMLQ", ch__1, m, &i__1, &i__2, &
+ c_n1);
+ }
+ }
+/* Computing MAX */
+ i__1 = 1, i__2 = nw * nb;
+ lwkopt = max(i__1,i__2);
+ } else {
+ lwkopt = 1;
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNMBR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ if (applyq) {
+
+/* Apply Q */
+
+ if (nq >= *k) {
+
+/* Q was determined by a call to CGEBRD with nq >= k */
+
+ cunmqr_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], lwork, &iinfo);
+ } else if (nq > 1) {
+
+/* Q was determined by a call to CGEBRD with nq < k */
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ i1 = 2;
+ i2 = 1;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ i1 = 1;
+ i2 = 2;
+ }
+ i__1 = nq - 1;
+ cunmqr_(side, trans, &mi, &ni, &i__1, &a[a_dim1 + 2], lda, &tau[1]
+, &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo);
+ }
+ } else {
+
+/* Apply P */
+
+ if (notran) {
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+ if (nq > *k) {
+
+/* P was determined by a call to CGEBRD with nq > k */
+
+ cunmlq_(side, transt, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], lwork, &iinfo);
+ } else if (nq > 1) {
+
+/* P was determined by a call to CGEBRD with nq <= k */
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ i1 = 2;
+ i2 = 1;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ i1 = 1;
+ i2 = 2;
+ }
+ i__1 = nq - 1;
+ cunmlq_(side, transt, &mi, &ni, &i__1, &a[(a_dim1 << 1) + 1], lda,
+ &tau[1], &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &
+ iinfo);
+ }
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ return 0;
+
+/* End of CUNMBR */
+
+} /* cunmbr_ */
diff --git a/contrib/libs/clapack/cunmhr.c b/contrib/libs/clapack/cunmhr.c
new file mode 100644
index 0000000000..687085adbb
--- /dev/null
+++ b/contrib/libs/clapack/cunmhr.c
@@ -0,0 +1,257 @@
+/* cunmhr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+
+/* Subroutine */ int cunmhr_(char *side, char *trans, integer *m, integer *n,
+ integer *ilo, integer *ihi, complex *a, integer *lda, complex *tau,
+ complex *c__, integer *ldc, complex *work, integer *lwork, integer *
+ info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i1, i2, nb, mi, nh, ni, nq, nw;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int cunmqr_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CUNMHR overwrites the general complex M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'C': Q**H * C C * Q**H */
+
+/* where Q is a complex unitary matrix of order nq, with nq = m if */
+/* SIDE = 'L' and nq = n if SIDE = 'R'. Q is defined as the product of */
+/* IHI-ILO elementary reflectors, as returned by CGEHRD: */
+
+/* Q = H(ilo) H(ilo+1) . . . H(ihi-1). */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**H from the Left; */
+/* = 'R': apply Q or Q**H from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply Q (No transpose) */
+/* = 'C': apply Q**H (Conjugate transpose) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* ILO and IHI must have the same values as in the previous call */
+/* of CGEHRD. Q is equal to the unit matrix except in the */
+/* submatrix Q(ilo+1:ihi,ilo+1:ihi). */
+/* If SIDE = 'L', then 1 <= ILO <= IHI <= M, if M > 0, and */
+/* ILO = 1 and IHI = 0, if M = 0; */
+/* if SIDE = 'R', then 1 <= ILO <= IHI <= N, if N > 0, and */
+/* ILO = 1 and IHI = 0, if N = 0. */
+
+/* A (input) COMPLEX array, dimension */
+/* (LDA,M) if SIDE = 'L' */
+/* (LDA,N) if SIDE = 'R' */
+/* The vectors which define the elementary reflectors, as */
+/* returned by CGEHRD. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* LDA >= max(1,M) if SIDE = 'L'; LDA >= max(1,N) if SIDE = 'R'. */
+
+/* TAU (input) COMPLEX array, dimension */
+/* (M-1) if SIDE = 'L' */
+/* (N-1) if SIDE = 'R' */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by CGEHRD. */
+
+/* C (input/output) COMPLEX array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nh = *ihi - *ilo;
+ left = lsame_(side, "L");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ilo < 1 || *ilo > max(1,nq)) {
+ *info = -5;
+ } else if (*ihi < min(*ilo,nq) || *ihi > nq) {
+ *info = -6;
+ } else if (*lda < max(1,nq)) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -11;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -13;
+ }
+
+ if (*info == 0) {
+ if (left) {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ nb = ilaenv_(&c__1, "CUNMQR", ch__1, &nh, n, &nh, &c_n1);
+ } else {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ nb = ilaenv_(&c__1, "CUNMQR", ch__1, m, &nh, &nh, &c_n1);
+ }
+ lwkopt = max(1,nw) * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__2 = -(*info);
+ xerbla_("CUNMHR", &i__2);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || nh == 0) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ if (left) {
+ mi = nh;
+ ni = *n;
+ i1 = *ilo + 1;
+ i2 = 1;
+ } else {
+ mi = *m;
+ ni = nh;
+ i1 = 1;
+ i2 = *ilo + 1;
+ }
+
+ cunmqr_(side, trans, &mi, &ni, &nh, &a[*ilo + 1 + *ilo * a_dim1], lda, &
+ tau[*ilo], &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo);
+
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ return 0;
+
+/* End of CUNMHR */
+
+} /* cunmhr_ */
diff --git a/contrib/libs/clapack/cunml2.c b/contrib/libs/clapack/cunml2.c
new file mode 100644
index 0000000000..21b050eec1
--- /dev/null
+++ b/contrib/libs/clapack/cunml2.c
@@ -0,0 +1,254 @@
+/* cunml2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cunml2_(char *side, char *trans, integer *m, integer *n,
+ integer *k, complex *a, integer *lda, complex *tau, complex *c__,
+ integer *ldc, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, i1, i2, i3, ic, jc, mi, ni, nq;
+ complex aii;
+ logical left;
+ complex taui;
+ extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
+, integer *, complex *, complex *, integer *, complex *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int clacgv_(integer *, complex *, integer *),
+ xerbla_(char *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CUNML2 overwrites the general complex m-by-n matrix C with */
+
+/* Q * C if SIDE = 'L' and TRANS = 'N', or */
+
+/* Q'* C if SIDE = 'L' and TRANS = 'C', or */
+
+/* C * Q if SIDE = 'R' and TRANS = 'N', or */
+
+/* C * Q' if SIDE = 'R' and TRANS = 'C', */
+
+/* where Q is a complex unitary matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(k)' . . . H(2)' H(1)' */
+
+/* as returned by CGELQF. Q is of order m if SIDE = 'L' and of order n */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q' from the Left */
+/* = 'R': apply Q or Q' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply Q (No transpose) */
+/* = 'C': apply Q' (Conjugate transpose) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) COMPLEX array, dimension */
+/* (LDA,M) if SIDE = 'L', */
+/* (LDA,N) if SIDE = 'R' */
+/* The i-th row must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* CGELQF in the first k rows of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,K). */
+
+/* TAU (input) COMPLEX array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by CGELQF. */
+
+/* C (input/output) COMPLEX array, dimension (LDC,N) */
+/* On entry, the m-by-n matrix C. */
+/* On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) COMPLEX array, dimension */
+/* (N) if SIDE = 'L', */
+/* (M) if SIDE = 'R' */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNML2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if (left && notran || ! left && ! notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) or H(i)' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H(i) or H(i)' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H(i) or H(i)' */
+
+ if (notran) {
+ r_cnjg(&q__1, &tau[i__]);
+ taui.r = q__1.r, taui.i = q__1.i;
+ } else {
+ i__3 = i__;
+ taui.r = tau[i__3].r, taui.i = tau[i__3].i;
+ }
+ if (i__ < nq) {
+ i__3 = nq - i__;
+ clacgv_(&i__3, &a[i__ + (i__ + 1) * a_dim1], lda);
+ }
+ i__3 = i__ + i__ * a_dim1;
+ aii.r = a[i__3].r, aii.i = a[i__3].i;
+ i__3 = i__ + i__ * a_dim1;
+ a[i__3].r = 1.f, a[i__3].i = 0.f;
+ clarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], lda, &taui, &c__[ic +
+ jc * c_dim1], ldc, &work[1]);
+ i__3 = i__ + i__ * a_dim1;
+ a[i__3].r = aii.r, a[i__3].i = aii.i;
+ if (i__ < nq) {
+ i__3 = nq - i__;
+ clacgv_(&i__3, &a[i__ + (i__ + 1) * a_dim1], lda);
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of CUNML2 */
+
+} /* cunml2_ */
diff --git a/contrib/libs/clapack/cunmlq.c b/contrib/libs/clapack/cunmlq.c
new file mode 100644
index 0000000000..d7aa7a425c
--- /dev/null
+++ b/contrib/libs/clapack/cunmlq.c
@@ -0,0 +1,335 @@
+/* cunmlq.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static integer c__65 = 65;
+
+/* Subroutine */ int cunmlq_(char *side, char *trans, integer *m, integer *n,
+ integer *k, complex *a, integer *lda, complex *tau, complex *c__,
+ integer *ldc, complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i__;
+ complex t[4160] /* was [65][64] */;
+ integer i1, i2, i3, ib, ic, jc, nb, mi, ni, nq, nw, iws;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ extern /* Subroutine */ int cunml2_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *), clarfb_(char *, char *,
+ char *, char *, integer *, integer *, integer *, complex *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *), clarft_(char *, char *
+, integer *, integer *, complex *, integer *, complex *, complex *
+, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ logical notran;
+ integer ldwork;
+ char transt[1];
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CUNMLQ overwrites the general complex M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'C': Q**H * C C * Q**H */
+
+/* where Q is a complex unitary matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(k)' . . . H(2)' H(1)' */
+
+/* as returned by CGELQF. Q is of order M if SIDE = 'L' and of order N */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**H from the Left; */
+/* = 'R': apply Q or Q**H from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'C': Conjugate transpose, apply Q**H. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) COMPLEX array, dimension */
+/* (LDA,M) if SIDE = 'L', */
+/* (LDA,N) if SIDE = 'R' */
+/* The i-th row must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* CGELQF in the first k rows of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,K). */
+
+/* TAU (input) COMPLEX array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by CGELQF. */
+
+/* C (input/output) COMPLEX array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+
+/* Determine the block size. NB may be at most NBMAX, where NBMAX */
+/* is used to define the local array T. */
+
+/* Computing MIN */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "CUNMLQ", ch__1, m, n, k, &c_n1);
+ nb = min(i__1,i__2);
+ lwkopt = max(1,nw) * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNMLQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/* Computing MAX */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CUNMLQ", ch__1, m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ cunml2_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if (left && notran || ! left && ! notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i) H(i+1) . . . H(i+ib-1) */
+
+ i__4 = nq - i__ + 1;
+ clarft_("Forward", "Rowwise", &i__4, &ib, &a[i__ + i__ * a_dim1],
+ lda, &tau[i__], t, &c__65);
+ if (left) {
+
+/* H or H' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H or H' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H or H' */
+
+ clarfb_(side, transt, "Forward", "Rowwise", &mi, &ni, &ib, &a[i__
+ + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc * c_dim1],
+ ldc, &work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ return 0;
+
+/* End of CUNMLQ */
+
+} /* cunmlq_ */
diff --git a/contrib/libs/clapack/cunmql.c b/contrib/libs/clapack/cunmql.c
new file mode 100644
index 0000000000..cdd02a34c3
--- /dev/null
+++ b/contrib/libs/clapack/cunmql.c
@@ -0,0 +1,328 @@
+/* cunmql.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static integer c__65 = 65;
+
+/* Subroutine */ int cunmql_(char *side, char *trans, integer *m, integer *n,
+ integer *k, complex *a, integer *lda, complex *tau, complex *c__,
+ integer *ldc, complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i__;
+ complex t[4160] /* was [65][64] */;
+ integer i1, i2, i3, ib, nb, mi, ni, nq, nw, iws;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ extern /* Subroutine */ int cunm2l_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *), clarfb_(char *, char *,
+ char *, char *, integer *, integer *, integer *, complex *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *), clarft_(char *, char *
+, integer *, integer *, complex *, integer *, complex *, complex *
+, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ logical notran;
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CUNMQL overwrites the general complex M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'C': Q**H * C C * Q**H */
+
+/* where Q is a complex unitary matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(k) . . . H(2) H(1) */
+
+/* as returned by CGEQLF. Q is of order M if SIDE = 'L' and of order N */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**H from the Left; */
+/* = 'R': apply Q or Q**H from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'C': Transpose, apply Q**H. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,K) */
+/* The i-th column must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* CGEQLF in the last k columns of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* If SIDE = 'L', LDA >= max(1,M); */
+/* if SIDE = 'R', LDA >= max(1,N). */
+
+/* TAU (input) COMPLEX array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by CGEQLF. */
+
+/* C (input/output) COMPLEX array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = max(1,*n);
+ } else {
+ nq = *n;
+ nw = max(1,*m);
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+
+ if (*info == 0) {
+ if (*m == 0 || *n == 0) {
+ lwkopt = 1;
+ } else {
+
+/* Determine the block size. NB may be at most NBMAX, where */
+/* NBMAX is used to define the local array T. */
+
+/* Computing MIN */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "CUNMQL", ch__1, m, n, k, &c_n1);
+ nb = min(i__1,i__2);
+ lwkopt = nw * nb;
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ if (*lwork < nw && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNMQL", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/* Computing MAX */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CUNMQL", ch__1, m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ cunm2l_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if (left && notran || ! left && ! notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__4 = nq - *k + i__ + ib - 1;
+ clarft_("Backward", "Columnwise", &i__4, &ib, &a[i__ * a_dim1 + 1]
+, lda, &tau[i__], t, &c__65);
+ if (left) {
+
+/* H or H' is applied to C(1:m-k+i+ib-1,1:n) */
+
+ mi = *m - *k + i__ + ib - 1;
+ } else {
+
+/* H or H' is applied to C(1:m,1:n-k+i+ib-1) */
+
+ ni = *n - *k + i__ + ib - 1;
+ }
+
+/* Apply H or H' */
+
+ clarfb_(side, trans, "Backward", "Columnwise", &mi, &ni, &ib, &a[
+ i__ * a_dim1 + 1], lda, t, &c__65, &c__[c_offset], ldc, &
+ work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ return 0;
+
+/* End of CUNMQL */
+
+} /* cunmql_ */
diff --git a/contrib/libs/clapack/cunmqr.c b/contrib/libs/clapack/cunmqr.c
new file mode 100644
index 0000000000..846c9a53d6
--- /dev/null
+++ b/contrib/libs/clapack/cunmqr.c
@@ -0,0 +1,328 @@
+/* cunmqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static integer c__65 = 65;
+
+/* Subroutine */ int cunmqr_(char *side, char *trans, integer *m, integer *n,
+ integer *k, complex *a, integer *lda, complex *tau, complex *c__,
+ integer *ldc, complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i__;
+ complex t[4160] /* was [65][64] */;
+ integer i1, i2, i3, ib, ic, jc, nb, mi, ni, nq, nw, iws;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ extern /* Subroutine */ int cunm2r_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *), clarfb_(char *, char *,
+ char *, char *, integer *, integer *, integer *, complex *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *), clarft_(char *, char *
+, integer *, integer *, complex *, integer *, complex *, complex *
+, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ logical notran;
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CUNMQR overwrites the general complex M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'C': Q**H * C C * Q**H */
+
+/* where Q is a complex unitary matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by CGEQRF. Q is of order M if SIDE = 'L' and of order N */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**H from the Left; */
+/* = 'R': apply Q or Q**H from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'C': Conjugate transpose, apply Q**H. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) COMPLEX array, dimension (LDA,K) */
+/* The i-th column must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* CGEQRF in the first k columns of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* If SIDE = 'L', LDA >= max(1,M); */
+/* if SIDE = 'R', LDA >= max(1,N). */
+
+/* TAU (input) COMPLEX array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by CGEQRF. */
+
+/* C (input/output) COMPLEX array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+
+/* Determine the block size. NB may be at most NBMAX, where NBMAX */
+/* is used to define the local array T. */
+
+/* Computing MIN */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "CUNMQR", ch__1, m, n, k, &c_n1);
+ nb = min(i__1,i__2);
+ lwkopt = max(1,nw) * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNMQR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/* Computing MAX */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CUNMQR", ch__1, m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ cunm2r_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if (left && ! notran || ! left && notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i) H(i+1) . . . H(i+ib-1) */
+
+ i__4 = nq - i__ + 1;
+ clarft_("Forward", "Columnwise", &i__4, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], t, &c__65)
+ ;
+ if (left) {
+
+/* H or H' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H or H' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H or H' */
+
+ clarfb_(side, trans, "Forward", "Columnwise", &mi, &ni, &ib, &a[
+ i__ + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc *
+ c_dim1], ldc, &work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ return 0;
+
+/* End of CUNMQR */
+
+} /* cunmqr_ */
diff --git a/contrib/libs/clapack/cunmr2.c b/contrib/libs/clapack/cunmr2.c
new file mode 100644
index 0000000000..2cfe39ec5c
--- /dev/null
+++ b/contrib/libs/clapack/cunmr2.c
@@ -0,0 +1,246 @@
+/* cunmr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cunmr2_(char *side, char *trans, integer *m, integer *n,
+ integer *k, complex *a, integer *lda, complex *tau, complex *c__,
+ integer *ldc, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, i1, i2, i3, mi, ni, nq;
+ complex aii;
+ logical left;
+ complex taui;
+ extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
+, integer *, complex *, complex *, integer *, complex *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int clacgv_(integer *, complex *, integer *),
+ xerbla_(char *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CUNMR2 overwrites the general complex m-by-n matrix C with */
+
+/* Q * C if SIDE = 'L' and TRANS = 'N', or */
+
+/* Q'* C if SIDE = 'L' and TRANS = 'C', or */
+
+/* C * Q if SIDE = 'R' and TRANS = 'N', or */
+
+/* C * Q' if SIDE = 'R' and TRANS = 'C', */
+
+/* where Q is a complex unitary matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(1)' H(2)' . . . H(k)' */
+
+/* as returned by CGERQF. Q is of order m if SIDE = 'L' and of order n */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q' from the Left */
+/* = 'R': apply Q or Q' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply Q (No transpose) */
+/* = 'C': apply Q' (Conjugate transpose) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) COMPLEX array, dimension */
+/* (LDA,M) if SIDE = 'L', */
+/* (LDA,N) if SIDE = 'R' */
+/* The i-th row must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* CGERQF in the last k rows of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,K). */
+
+/* TAU (input) COMPLEX array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by CGERQF. */
+
+/* C (input/output) COMPLEX array, dimension (LDC,N) */
+/* On entry, the m-by-n matrix C. */
+/* On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) COMPLEX array, dimension */
+/* (N) if SIDE = 'L', */
+/* (M) if SIDE = 'R' */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNMR2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if (left && ! notran || ! left && notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) or H(i)' is applied to C(1:m-k+i,1:n) */
+
+ mi = *m - *k + i__;
+ } else {
+
+/* H(i) or H(i)' is applied to C(1:m,1:n-k+i) */
+
+ ni = *n - *k + i__;
+ }
+
+/* Apply H(i) or H(i)' */
+
+ if (notran) {
+ r_cnjg(&q__1, &tau[i__]);
+ taui.r = q__1.r, taui.i = q__1.i;
+ } else {
+ i__3 = i__;
+ taui.r = tau[i__3].r, taui.i = tau[i__3].i;
+ }
+ i__3 = nq - *k + i__ - 1;
+ clacgv_(&i__3, &a[i__ + a_dim1], lda);
+ i__3 = i__ + (nq - *k + i__) * a_dim1;
+ aii.r = a[i__3].r, aii.i = a[i__3].i;
+ i__3 = i__ + (nq - *k + i__) * a_dim1;
+ a[i__3].r = 1.f, a[i__3].i = 0.f;
+ clarf_(side, &mi, &ni, &a[i__ + a_dim1], lda, &taui, &c__[c_offset],
+ ldc, &work[1]);
+ i__3 = i__ + (nq - *k + i__) * a_dim1;
+ a[i__3].r = aii.r, a[i__3].i = aii.i;
+ i__3 = nq - *k + i__ - 1;
+ clacgv_(&i__3, &a[i__ + a_dim1], lda);
+/* L10: */
+ }
+ return 0;
+
+/* End of CUNMR2 */
+
+} /* cunmr2_ */
diff --git a/contrib/libs/clapack/cunmr3.c b/contrib/libs/clapack/cunmr3.c
new file mode 100644
index 0000000000..ba5e2bd62f
--- /dev/null
+++ b/contrib/libs/clapack/cunmr3.c
@@ -0,0 +1,253 @@
+/* cunmr3.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cunmr3_(char *side, char *trans, integer *m, integer *n,
+ integer *k, integer *l, complex *a, integer *lda, complex *tau,
+ complex *c__, integer *ldc, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, i1, i2, i3, ja, ic, jc, mi, ni, nq;
+ logical left;
+ complex taui;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int clarz_(char *, integer *, integer *, integer *
+, complex *, integer *, complex *, complex *, integer *, complex *
+), xerbla_(char *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CUNMR3 overwrites the general complex m by n matrix C with */
+
+/* Q * C if SIDE = 'L' and TRANS = 'N', or */
+
+/* Q'* C if SIDE = 'L' and TRANS = 'C', or */
+
+/* C * Q if SIDE = 'R' and TRANS = 'N', or */
+
+/* C * Q' if SIDE = 'R' and TRANS = 'C', */
+
+/* where Q is a complex unitary matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by CTZRZF. Q is of order m if SIDE = 'L' and of order n */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q' from the Left */
+/* = 'R': apply Q or Q' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply Q (No transpose) */
+/* = 'C': apply Q' (Conjugate transpose) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* L (input) INTEGER */
+/* The number of columns of the matrix A containing */
+/* the meaningful part of the Householder reflectors. */
+/* If SIDE = 'L', M >= L >= 0, if SIDE = 'R', N >= L >= 0. */
+
+/* A (input) COMPLEX array, dimension */
+/* (LDA,M) if SIDE = 'L', */
+/* (LDA,N) if SIDE = 'R' */
+/* The i-th row must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* CTZRZF in the last k rows of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,K). */
+
+/* TAU (input) COMPLEX array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by CTZRZF. */
+
+/* C (input/output) COMPLEX array, dimension (LDC,N) */
+/* On entry, the m-by-n matrix C. */
+/* On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) COMPLEX array, dimension */
+/* (N) if SIDE = 'L', */
+/* (M) if SIDE = 'R' */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*l < 0 || left && *l > *m || ! left && *l > *n) {
+ *info = -6;
+ } else if (*lda < max(1,*k)) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNMR3", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if (left && ! notran || ! left && notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ ja = *m - *l + 1;
+ jc = 1;
+ } else {
+ mi = *m;
+ ja = *n - *l + 1;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) or H(i)' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H(i) or H(i)' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H(i) or H(i)' */
+
+ if (notran) {
+ i__3 = i__;
+ taui.r = tau[i__3].r, taui.i = tau[i__3].i;
+ } else {
+ r_cnjg(&q__1, &tau[i__]);
+ taui.r = q__1.r, taui.i = q__1.i;
+ }
+ clarz_(side, &mi, &ni, l, &a[i__ + ja * a_dim1], lda, &taui, &c__[ic
+ + jc * c_dim1], ldc, &work[1]);
+
+/* L10: */
+ }
+
+ return 0;
+
+/* End of CUNMR3 */
+
+} /* cunmr3_ */
diff --git a/contrib/libs/clapack/cunmrq.c b/contrib/libs/clapack/cunmrq.c
new file mode 100644
index 0000000000..020d684138
--- /dev/null
+++ b/contrib/libs/clapack/cunmrq.c
@@ -0,0 +1,336 @@
+/* cunmrq.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static integer c__65 = 65;
+
+/* Subroutine */ int cunmrq_(char *side, char *trans, integer *m, integer *n,
+ integer *k, complex *a, integer *lda, complex *tau, complex *c__,
+ integer *ldc, complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i__;
+ complex t[4160] /* was [65][64] */;
+ integer i1, i2, i3, ib, nb, mi, ni, nq, nw, iws;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ extern /* Subroutine */ int cunmr2_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *), clarfb_(char *, char *,
+ char *, char *, integer *, integer *, integer *, complex *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *), clarft_(char *, char *
+, integer *, integer *, complex *, integer *, complex *, complex *
+, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ logical notran;
+ integer ldwork;
+ char transt[1];
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CUNMRQ overwrites the general complex M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'C': Q**H * C C * Q**H */
+
+/* where Q is a complex unitary matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(1)' H(2)' . . . H(k)' */
+
+/* as returned by CGERQF. Q is of order M if SIDE = 'L' and of order N */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**H from the Left; */
+/* = 'R': apply Q or Q**H from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'C': Transpose, apply Q**H. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) COMPLEX array, dimension */
+/* (LDA,M) if SIDE = 'L', */
+/* (LDA,N) if SIDE = 'R' */
+/* The i-th row must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* CGERQF in the last k rows of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,K). */
+
+/* TAU (input) COMPLEX array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by CGERQF. */
+
+/* C (input/output) COMPLEX array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = max(1,*n);
+ } else {
+ nq = *n;
+ nw = max(1,*m);
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+
+ if (*info == 0) {
+ if (*m == 0 || *n == 0) {
+ lwkopt = 1;
+ } else {
+
+/* Determine the block size. NB may be at most NBMAX, where */
+/* NBMAX is used to define the local array T. */
+
+/* Computing MIN */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "CUNMRQ", ch__1, m, n, k, &c_n1);
+ nb = min(i__1,i__2);
+ lwkopt = nw * nb;
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ if (*lwork < nw && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNMRQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/* Computing MAX */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CUNMRQ", ch__1, m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ cunmr2_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if (left && ! notran || ! left && notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__4 = nq - *k + i__ + ib - 1;
+ clarft_("Backward", "Rowwise", &i__4, &ib, &a[i__ + a_dim1], lda,
+ &tau[i__], t, &c__65);
+ if (left) {
+
+/* H or H' is applied to C(1:m-k+i+ib-1,1:n) */
+
+ mi = *m - *k + i__ + ib - 1;
+ } else {
+
+/* H or H' is applied to C(1:m,1:n-k+i+ib-1) */
+
+ ni = *n - *k + i__ + ib - 1;
+ }
+
+/* Apply H or H' */
+
+ clarfb_(side, transt, "Backward", "Rowwise", &mi, &ni, &ib, &a[
+ i__ + a_dim1], lda, t, &c__65, &c__[c_offset], ldc, &work[
+ 1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ return 0;
+
+/* End of CUNMRQ */
+
+} /* cunmrq_ */
diff --git a/contrib/libs/clapack/cunmrz.c b/contrib/libs/clapack/cunmrz.c
new file mode 100644
index 0000000000..e9d4d63a19
--- /dev/null
+++ b/contrib/libs/clapack/cunmrz.c
@@ -0,0 +1,370 @@
+/* cunmrz.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static integer c__65 = 65;
+
+/* Subroutine */ int cunmrz_(char *side, char *trans, integer *m, integer *n,
+ integer *k, integer *l, complex *a, integer *lda, complex *tau,
+ complex *c__, integer *ldc, complex *work, integer *lwork, integer *
+ info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i__;
+ complex t[4160] /* was [65][64] */;
+ integer i1, i2, i3, ib, ic, ja, jc, nb, mi, ni, nq, nw, iws;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ extern /* Subroutine */ int cunmr3_(char *, char *, integer *, integer *,
+ integer *, integer *, complex *, integer *, complex *, complex *,
+ integer *, complex *, integer *), clarzb_(char *,
+ char *, char *, char *, integer *, integer *, integer *, integer *
+, complex *, integer *, complex *, integer *, complex *, integer *
+, complex *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), clarzt_(
+ char *, char *, integer *, integer *, complex *, integer *,
+ complex *, complex *, integer *);
+ logical notran;
+ integer ldwork;
+ char transt[1];
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* January 2007 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CUNMRZ overwrites the general complex M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'C': Q**H * C C * Q**H */
+
+/* where Q is a complex unitary matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by CTZRZF. Q is of order M if SIDE = 'L' and of order N */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**H from the Left; */
+/* = 'R': apply Q or Q**H from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'C': Conjugate transpose, apply Q**H. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* L (input) INTEGER */
+/* The number of columns of the matrix A containing */
+/* the meaningful part of the Householder reflectors. */
+/* If SIDE = 'L', M >= L >= 0, if SIDE = 'R', N >= L >= 0. */
+
+/* A (input) COMPLEX array, dimension */
+/* (LDA,M) if SIDE = 'L', */
+/* (LDA,N) if SIDE = 'R' */
+/* The i-th row must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* CTZRZF in the last k rows of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,K). */
+
+/* TAU (input) COMPLEX array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by CTZRZF. */
+
+/* C (input/output) COMPLEX array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = max(1,*n);
+ } else {
+ nq = *n;
+ nw = max(1,*m);
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*l < 0 || left && *l > *m || ! left && *l > *n) {
+ *info = -6;
+ } else if (*lda < max(1,*k)) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -11;
+ }
+
+ if (*info == 0) {
+ if (*m == 0 || *n == 0) {
+ lwkopt = 1;
+ } else {
+
+/* Determine the block size. NB may be at most NBMAX, where */
+/* NBMAX is used to define the local array T. */
+
+/* Computing MIN */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "CUNMRQ", ch__1, m, n, k, &c_n1);
+ nb = min(i__1,i__2);
+ lwkopt = nw * nb;
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ if (*lwork < max(1,nw) && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNMRZ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Determine the block size. NB may be at most NBMAX, where NBMAX */
+/* is used to define the local array T. */
+
+/* Computing MIN */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "CUNMRQ", ch__1, m, n, k, &c_n1);
+ nb = min(i__1,i__2);
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/* Computing MAX */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CUNMRQ", ch__1, m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ cunmr3_(side, trans, m, n, k, l, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if (left && ! notran || ! left && notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ ja = *m - *l + 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ ja = *n - *l + 1;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ clarzt_("Backward", "Rowwise", l, &ib, &a[i__ + ja * a_dim1], lda,
+ &tau[i__], t, &c__65);
+
+ if (left) {
+
+/* H or H' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H or H' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H or H' */
+
+ clarzb_(side, transt, "Backward", "Rowwise", &mi, &ni, &ib, l, &a[
+ i__ + ja * a_dim1], lda, t, &c__65, &c__[ic + jc * c_dim1]
+, ldc, &work[1], &ldwork);
+/* L10: */
+ }
+
+ }
+
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CUNMRZ */
+
+} /* cunmrz_ */
diff --git a/contrib/libs/clapack/cunmtr.c b/contrib/libs/clapack/cunmtr.c
new file mode 100644
index 0000000000..c1d63f48d3
--- /dev/null
+++ b/contrib/libs/clapack/cunmtr.c
@@ -0,0 +1,294 @@
+/* cunmtr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+
+/* Subroutine */ int cunmtr_(char *side, char *uplo, char *trans, integer *m,
+ integer *n, complex *a, integer *lda, complex *tau, complex *c__,
+ integer *ldc, complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2, i__3;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i1, i2, nb, mi, ni, nq, nw;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int cunmql_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *), cunmqr_(char *,
+ char *, integer *, integer *, integer *, complex *, integer *,
+ complex *, complex *, integer *, complex *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CUNMTR overwrites the general complex M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'C': Q**H * C C * Q**H */
+
+/* where Q is a complex unitary matrix of order nq, with nq = m if */
+/* SIDE = 'L' and nq = n if SIDE = 'R'. Q is defined as the product of */
+/* nq-1 elementary reflectors, as returned by CHETRD: */
+
+/* if UPLO = 'U', Q = H(nq-1) . . . H(2) H(1); */
+
+/* if UPLO = 'L', Q = H(1) H(2) . . . H(nq-1). */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**H from the Left; */
+/* = 'R': apply Q or Q**H from the Right. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A contains elementary reflectors */
+/* from CHETRD; */
+/* = 'L': Lower triangle of A contains elementary reflectors */
+/* from CHETRD. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'C': Conjugate transpose, apply Q**H. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* A (input) COMPLEX array, dimension */
+/* (LDA,M) if SIDE = 'L' */
+/* (LDA,N) if SIDE = 'R' */
+/* The vectors which define the elementary reflectors, as */
+/* returned by CHETRD. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* LDA >= max(1,M) if SIDE = 'L'; LDA >= max(1,N) if SIDE = 'R'. */
+
+/* TAU (input) COMPLEX array, dimension */
+/* (M-1) if SIDE = 'L' */
+/* (N-1) if SIDE = 'R' */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by CHETRD. */
+
+/* C (input/output) COMPLEX array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >=M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "C")) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+ if (upper) {
+ if (left) {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ nb = ilaenv_(&c__1, "CUNMQL", ch__1, &i__2, n, &i__3, &c_n1);
+ } else {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "CUNMQL", ch__1, m, &i__2, &i__3, &c_n1);
+ }
+ } else {
+ if (left) {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ nb = ilaenv_(&c__1, "CUNMQR", ch__1, &i__2, n, &i__3, &c_n1);
+ } else {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "CUNMQR", ch__1, m, &i__2, &i__3, &c_n1);
+ }
+ }
+ lwkopt = max(1,nw) * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__2 = -(*info);
+ xerbla_("CUNMTR", &i__2);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || nq == 1) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ }
+
+ if (upper) {
+
+/* Q was determined by a call to CHETRD with UPLO = 'U' */
+
+ i__2 = nq - 1;
+ cunmql_(side, trans, &mi, &ni, &i__2, &a[(a_dim1 << 1) + 1], lda, &
+ tau[1], &c__[c_offset], ldc, &work[1], lwork, &iinfo);
+ } else {
+
+/* Q was determined by a call to CHETRD with UPLO = 'L' */
+
+ if (left) {
+ i1 = 2;
+ i2 = 1;
+ } else {
+ i1 = 1;
+ i2 = 2;
+ }
+ i__2 = nq - 1;
+ cunmqr_(side, trans, &mi, &ni, &i__2, &a[a_dim1 + 2], lda, &tau[1], &
+ c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo);
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ return 0;
+
+/* End of CUNMTR */
+
+} /* cunmtr_ */
diff --git a/contrib/libs/clapack/cupgtr.c b/contrib/libs/clapack/cupgtr.c
new file mode 100644
index 0000000000..135728bc5c
--- /dev/null
+++ b/contrib/libs/clapack/cupgtr.c
@@ -0,0 +1,219 @@
+/* cupgtr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int cupgtr_(char *uplo, integer *n, complex *ap, complex *
+ tau, complex *q, integer *ldq, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, ij;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical upper;
+ extern /* Subroutine */ int cung2l_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *), cung2r_(
+ integer *, integer *, integer *, complex *, integer *, complex *,
+ complex *, integer *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CUPGTR generates a complex unitary matrix Q which is defined as the */
+/* product of n-1 elementary reflectors H(i) of order n, as returned by */
+/* CHPTRD using packed storage: */
+
+/* if UPLO = 'U', Q = H(n-1) . . . H(2) H(1), */
+
+/* if UPLO = 'L', Q = H(1) H(2) . . . H(n-1). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangular packed storage used in previous */
+/* call to CHPTRD; */
+/* = 'L': Lower triangular packed storage used in previous */
+/* call to CHPTRD. */
+
+/* N (input) INTEGER */
+/* The order of the matrix Q. N >= 0. */
+
+/* AP (input) COMPLEX array, dimension (N*(N+1)/2) */
+/* The vectors which define the elementary reflectors, as */
+/* returned by CHPTRD. */
+
+/* TAU (input) COMPLEX array, dimension (N-1) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by CHPTRD. */
+
+/* Q (output) COMPLEX array, dimension (LDQ,N) */
+/* The N-by-N unitary matrix Q. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N). */
+
+/* WORK (workspace) COMPLEX array, dimension (N-1) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ --ap;
+ --tau;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldq < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUPGTR", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Q was determined by a call to CHPTRD with UPLO = 'U' */
+
+/* Unpack the vectors which define the elementary reflectors and */
+/* set the last row and column of Q equal to those of the unit */
+/* matrix */
+
+ ij = 2;
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * q_dim1;
+ i__4 = ij;
+ q[i__3].r = ap[i__4].r, q[i__3].i = ap[i__4].i;
+ ++ij;
+/* L10: */
+ }
+ ij += 2;
+ i__2 = *n + j * q_dim1;
+ q[i__2].r = 0.f, q[i__2].i = 0.f;
+/* L20: */
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + *n * q_dim1;
+ q[i__2].r = 0.f, q[i__2].i = 0.f;
+/* L30: */
+ }
+ i__1 = *n + *n * q_dim1;
+ q[i__1].r = 1.f, q[i__1].i = 0.f;
+
+/* Generate Q(1:n-1,1:n-1) */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ cung2l_(&i__1, &i__2, &i__3, &q[q_offset], ldq, &tau[1], &work[1], &
+ iinfo);
+
+ } else {
+
+/* Q was determined by a call to CHPTRD with UPLO = 'L'. */
+
+/* Unpack the vectors which define the elementary reflectors and */
+/* set the first row and column of Q equal to those of the unit */
+/* matrix */
+
+ i__1 = q_dim1 + 1;
+ q[i__1].r = 1.f, q[i__1].i = 0.f;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ i__2 = i__ + q_dim1;
+ q[i__2].r = 0.f, q[i__2].i = 0.f;
+/* L40: */
+ }
+ ij = 3;
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j * q_dim1 + 1;
+ q[i__2].r = 0.f, q[i__2].i = 0.f;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * q_dim1;
+ i__4 = ij;
+ q[i__3].r = ap[i__4].r, q[i__3].i = ap[i__4].i;
+ ++ij;
+/* L50: */
+ }
+ ij += 2;
+/* L60: */
+ }
+ if (*n > 1) {
+
+/* Generate Q(2:n,2:n) */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ cung2r_(&i__1, &i__2, &i__3, &q[(q_dim1 << 1) + 2], ldq, &tau[1],
+ &work[1], &iinfo);
+ }
+ }
+ return 0;
+
+/* End of CUPGTR */
+
+} /* cupgtr_ */
diff --git a/contrib/libs/clapack/cupmtr.c b/contrib/libs/clapack/cupmtr.c
new file mode 100644
index 0000000000..993d479a37
--- /dev/null
+++ b/contrib/libs/clapack/cupmtr.c
@@ -0,0 +1,320 @@
+/* cupmtr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int cupmtr_(char *side, char *uplo, char *trans, integer *m,
+ integer *n, complex *ap, complex *tau, complex *c__, integer *ldc,
+ complex *work, integer *info)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ integer i__, i1, i2, i3, ic, jc, ii, mi, ni, nq;
+ complex aii;
+ logical left;
+ complex taui;
+ extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
+, integer *, complex *, complex *, integer *, complex *);
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran, forwrd;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* CUPMTR overwrites the general complex M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'C': Q**H * C C * Q**H */
+
+/* where Q is a complex unitary matrix of order nq, with nq = m if */
+/* SIDE = 'L' and nq = n if SIDE = 'R'. Q is defined as the product of */
+/* nq-1 elementary reflectors, as returned by CHPTRD using packed */
+/* storage: */
+
+/* if UPLO = 'U', Q = H(nq-1) . . . H(2) H(1); */
+
+/* if UPLO = 'L', Q = H(1) H(2) . . . H(nq-1). */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**H from the Left; */
+/* = 'R': apply Q or Q**H from the Right. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangular packed storage used in previous */
+/* call to CHPTRD; */
+/* = 'L': Lower triangular packed storage used in previous */
+/* call to CHPTRD. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'C': Conjugate transpose, apply Q**H. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* AP (input) COMPLEX array, dimension */
+/* (M*(M+1)/2) if SIDE = 'L' */
+/* (N*(N+1)/2) if SIDE = 'R' */
+/* The vectors which define the elementary reflectors, as */
+/* returned by CHPTRD. AP is modified by the routine but */
+/* restored on exit. */
+
+/* TAU (input) COMPLEX array, dimension (M-1) if SIDE = 'L' */
+/* or (N-1) if SIDE = 'R' */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by CHPTRD. */
+
+/* C (input/output) COMPLEX array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) COMPLEX array, dimension */
+/* (N) if SIDE = 'L' */
+/* (M) if SIDE = 'R' */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ --ap;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ upper = lsame_(uplo, "U");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*ldc < max(1,*m)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUPMTR", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Q was determined by a call to CHPTRD with UPLO = 'U' */
+
+ forwrd = left && notran || ! left && ! notran;
+
+ if (forwrd) {
+ i1 = 1;
+ i2 = nq - 1;
+ i3 = 1;
+ ii = 2;
+ } else {
+ i1 = nq - 1;
+ i2 = 1;
+ i3 = -1;
+ ii = nq * (nq + 1) / 2 - 1;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) or H(i)' is applied to C(1:i,1:n) */
+
+ mi = i__;
+ } else {
+
+/* H(i) or H(i)' is applied to C(1:m,1:i) */
+
+ ni = i__;
+ }
+
+/* Apply H(i) or H(i)' */
+
+ if (notran) {
+ i__3 = i__;
+ taui.r = tau[i__3].r, taui.i = tau[i__3].i;
+ } else {
+ r_cnjg(&q__1, &tau[i__]);
+ taui.r = q__1.r, taui.i = q__1.i;
+ }
+ i__3 = ii;
+ aii.r = ap[i__3].r, aii.i = ap[i__3].i;
+ i__3 = ii;
+ ap[i__3].r = 1.f, ap[i__3].i = 0.f;
+ clarf_(side, &mi, &ni, &ap[ii - i__ + 1], &c__1, &taui, &c__[
+ c_offset], ldc, &work[1]);
+ i__3 = ii;
+ ap[i__3].r = aii.r, ap[i__3].i = aii.i;
+
+ if (forwrd) {
+ ii = ii + i__ + 2;
+ } else {
+ ii = ii - i__ - 1;
+ }
+/* L10: */
+ }
+ } else {
+
+/* Q was determined by a call to CHPTRD with UPLO = 'L'. */
+
+ forwrd = left && ! notran || ! left && notran;
+
+ if (forwrd) {
+ i1 = 1;
+ i2 = nq - 1;
+ i3 = 1;
+ ii = 2;
+ } else {
+ i1 = nq - 1;
+ i2 = 1;
+ i3 = -1;
+ ii = nq * (nq + 1) / 2 - 1;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__2 = i2;
+ i__1 = i3;
+ for (i__ = i1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+ i__3 = ii;
+ aii.r = ap[i__3].r, aii.i = ap[i__3].i;
+ i__3 = ii;
+ ap[i__3].r = 1.f, ap[i__3].i = 0.f;
+ if (left) {
+
+/* H(i) or H(i)' is applied to C(i+1:m,1:n) */
+
+ mi = *m - i__;
+ ic = i__ + 1;
+ } else {
+
+/* H(i) or H(i)' is applied to C(1:m,i+1:n) */
+
+ ni = *n - i__;
+ jc = i__ + 1;
+ }
+
+/* Apply H(i) or H(i)' */
+
+ if (notran) {
+ i__3 = i__;
+ taui.r = tau[i__3].r, taui.i = tau[i__3].i;
+ } else {
+ r_cnjg(&q__1, &tau[i__]);
+ taui.r = q__1.r, taui.i = q__1.i;
+ }
+ clarf_(side, &mi, &ni, &ap[ii], &c__1, &taui, &c__[ic + jc *
+ c_dim1], ldc, &work[1]);
+ i__3 = ii;
+ ap[i__3].r = aii.r, ap[i__3].i = aii.i;
+
+ if (forwrd) {
+ ii = ii + nq - i__ + 1;
+ } else {
+ ii = ii - nq + i__ - 2;
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of CUPMTR */
+
+} /* cupmtr_ */
diff --git a/contrib/libs/clapack/dbdsdc.c b/contrib/libs/clapack/dbdsdc.c
new file mode 100644
index 0000000000..6096e4faae
--- /dev/null
+++ b/contrib/libs/clapack/dbdsdc.c
@@ -0,0 +1,514 @@
+/* dbdsdc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__9 = 9;
+static integer c__0 = 0;
+static doublereal c_b15 = 1.;
+static integer c__1 = 1;
+static doublereal c_b29 = 0.;
+
+/* Subroutine */ int dbdsdc_(char *uplo, char *compq, integer *n, doublereal *
+ d__, doublereal *e, doublereal *u, integer *ldu, doublereal *vt,
+ integer *ldvt, doublereal *q, integer *iq, doublereal *work, integer *
+ iwork, integer *info)
+{
+ /* System generated locals */
+ integer u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double d_sign(doublereal *, doublereal *), log(doublereal);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal p, r__;
+ integer z__, ic, ii, kk;
+ doublereal cs;
+ integer is, iu;
+ doublereal sn;
+ integer nm1;
+ doublereal eps;
+ integer ivt, difl, difr, ierr, perm, mlvl, sqre;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *), dcopy_(integer *, doublereal *, integer *
+, doublereal *, integer *), dswap_(integer *, doublereal *,
+ integer *, doublereal *, integer *);
+ integer poles, iuplo, nsize, start;
+ extern /* Subroutine */ int dlasd0_(integer *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ integer *, integer *, doublereal *, integer *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dlasda_(integer *, integer *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ integer *), dlascl_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *), dlasdq_(char *, integer *, integer *, integer
+ *, integer *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *), dlaset_(char *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *), dlartg_(doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ integer givcol;
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ integer icompq;
+ doublereal orgnrm;
+ integer givnum, givptr, qstart, smlsiz, wstart, smlszp;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DBDSDC computes the singular value decomposition (SVD) of a real */
+/* N-by-N (upper or lower) bidiagonal matrix B: B = U * S * VT, */
+/* using a divide and conquer method, where S is a diagonal matrix */
+/* with non-negative diagonal elements (the singular values of B), and */
+/* U and VT are orthogonal matrices of left and right singular vectors, */
+/* respectively. DBDSDC can be used to compute all singular values, */
+/* and optionally, singular vectors or singular vectors in compact form. */
+
+/* This code makes very mild assumptions about floating point */
+/* arithmetic. It will work on machines with a guard digit in */
+/* add/subtract, or on those binary machines without guard digits */
+/* which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2. */
+/* It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. See DLASD3 for details. */
+
+/* The code currently calls DLASDQ if singular values only are desired. */
+/* However, it can be slightly modified to compute singular values */
+/* using the divide and conquer method. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': B is upper bidiagonal. */
+/* = 'L': B is lower bidiagonal. */
+
+/* COMPQ (input) CHARACTER*1 */
+/* Specifies whether singular vectors are to be computed */
+/* as follows: */
+/* = 'N': Compute singular values only; */
+/* = 'P': Compute singular values and compute singular */
+/* vectors in compact form; */
+/* = 'I': Compute singular values and singular vectors. */
+
+/* N (input) INTEGER */
+/* The order of the matrix B. N >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the n diagonal elements of the bidiagonal matrix B. */
+/* On exit, if INFO=0, the singular values of B. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (N-1) */
+/* On entry, the elements of E contain the offdiagonal */
+/* elements of the bidiagonal matrix whose SVD is desired. */
+/* On exit, E has been destroyed. */
+
+/* U (output) DOUBLE PRECISION array, dimension (LDU,N) */
+/* If COMPQ = 'I', then: */
+/* On exit, if INFO = 0, U contains the left singular vectors */
+/* of the bidiagonal matrix. */
+/* For other values of COMPQ, U is not referenced. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= 1. */
+/* If singular vectors are desired, then LDU >= max( 1, N ). */
+
+/* VT (output) DOUBLE PRECISION array, dimension (LDVT,N) */
+/* If COMPQ = 'I', then: */
+/* On exit, if INFO = 0, VT' contains the right singular */
+/* vectors of the bidiagonal matrix. */
+/* For other values of COMPQ, VT is not referenced. */
+
+/* LDVT (input) INTEGER */
+/* The leading dimension of the array VT. LDVT >= 1. */
+/* If singular vectors are desired, then LDVT >= max( 1, N ). */
+
+/* Q (output) DOUBLE PRECISION array, dimension (LDQ) */
+/* If COMPQ = 'P', then: */
+/* On exit, if INFO = 0, Q and IQ contain the left */
+/* and right singular vectors in a compact form, */
+/* requiring O(N log N) space instead of 2*N**2. */
+/* In particular, Q contains all the DOUBLE PRECISION data in */
+/* LDQ >= N*(11 + 2*SMLSIZ + 8*INT(LOG_2(N/(SMLSIZ+1)))) */
+/* words of memory, where SMLSIZ is returned by ILAENV and */
+/* is equal to the maximum size of the subproblems at the */
+/* bottom of the computation tree (usually about 25). */
+/* For other values of COMPQ, Q is not referenced. */
+
+/* IQ (output) INTEGER array, dimension (LDIQ) */
+/* If COMPQ = 'P', then: */
+/* On exit, if INFO = 0, Q and IQ contain the left */
+/* and right singular vectors in a compact form, */
+/* requiring O(N log N) space instead of 2*N**2. */
+/* In particular, IQ contains all INTEGER data in */
+/* LDIQ >= N*(3 + 3*INT(LOG_2(N/(SMLSIZ+1)))) */
+/* words of memory, where SMLSIZ is returned by ILAENV and */
+/* is equal to the maximum size of the subproblems at the */
+/* bottom of the computation tree (usually about 25). */
+/* For other values of COMPQ, IQ is not referenced. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* If COMPQ = 'N' then LWORK >= (4 * N). */
+/* If COMPQ = 'P' then LWORK >= (6 * N). */
+/* If COMPQ = 'I' then LWORK >= (3 * N**2 + 4 * N). */
+
+/* IWORK (workspace) INTEGER array, dimension (8*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: The algorithm failed to compute an singular value. */
+/* The update process of divide and conquer failed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+/* Changed dimension statement in comment describing E from (N) to */
+/* (N-1). Sven, 17 Feb 05. */
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --q;
+ --iq;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ iuplo = 0;
+ if (lsame_(uplo, "U")) {
+ iuplo = 1;
+ }
+ if (lsame_(uplo, "L")) {
+ iuplo = 2;
+ }
+ if (lsame_(compq, "N")) {
+ icompq = 0;
+ } else if (lsame_(compq, "P")) {
+ icompq = 1;
+ } else if (lsame_(compq, "I")) {
+ icompq = 2;
+ } else {
+ icompq = -1;
+ }
+ if (iuplo == 0) {
+ *info = -1;
+ } else if (icompq < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ldu < 1 || icompq == 2 && *ldu < *n) {
+ *info = -7;
+ } else if (*ldvt < 1 || icompq == 2 && *ldvt < *n) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DBDSDC", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ smlsiz = ilaenv_(&c__9, "DBDSDC", " ", &c__0, &c__0, &c__0, &c__0);
+ if (*n == 1) {
+ if (icompq == 1) {
+ q[1] = d_sign(&c_b15, &d__[1]);
+ q[smlsiz * *n + 1] = 1.;
+ } else if (icompq == 2) {
+ u[u_dim1 + 1] = d_sign(&c_b15, &d__[1]);
+ vt[vt_dim1 + 1] = 1.;
+ }
+ d__[1] = abs(d__[1]);
+ return 0;
+ }
+ nm1 = *n - 1;
+
+/* If matrix lower bidiagonal, rotate to be upper bidiagonal */
+/* by applying Givens rotations on the left */
+
+ wstart = 1;
+ qstart = 3;
+ if (icompq == 1) {
+ dcopy_(n, &d__[1], &c__1, &q[1], &c__1);
+ i__1 = *n - 1;
+ dcopy_(&i__1, &e[1], &c__1, &q[*n + 1], &c__1);
+ }
+ if (iuplo == 2) {
+ qstart = 5;
+ wstart = (*n << 1) - 1;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (icompq == 1) {
+ q[i__ + (*n << 1)] = cs;
+ q[i__ + *n * 3] = sn;
+ } else if (icompq == 2) {
+ work[i__] = cs;
+ work[nm1 + i__] = -sn;
+ }
+/* L10: */
+ }
+ }
+
+/* If ICOMPQ = 0, use DLASDQ to compute the singular values. */
+
+ if (icompq == 0) {
+ dlasdq_("U", &c__0, n, &c__0, &c__0, &c__0, &d__[1], &e[1], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[
+ wstart], info);
+ goto L40;
+ }
+
+/* If N is smaller than the minimum divide size SMLSIZ, then solve */
+/* the problem with another solver. */
+
+ if (*n <= smlsiz) {
+ if (icompq == 2) {
+ dlaset_("A", n, n, &c_b29, &c_b15, &u[u_offset], ldu);
+ dlaset_("A", n, n, &c_b29, &c_b15, &vt[vt_offset], ldvt);
+ dlasdq_("U", &c__0, n, n, n, &c__0, &d__[1], &e[1], &vt[vt_offset]
+, ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[
+ wstart], info);
+ } else if (icompq == 1) {
+ iu = 1;
+ ivt = iu + *n;
+ dlaset_("A", n, n, &c_b29, &c_b15, &q[iu + (qstart - 1) * *n], n);
+ dlaset_("A", n, n, &c_b29, &c_b15, &q[ivt + (qstart - 1) * *n], n);
+ dlasdq_("U", &c__0, n, n, n, &c__0, &d__[1], &e[1], &q[ivt + (
+ qstart - 1) * *n], n, &q[iu + (qstart - 1) * *n], n, &q[
+ iu + (qstart - 1) * *n], n, &work[wstart], info);
+ }
+ goto L40;
+ }
+
+ if (icompq == 2) {
+ dlaset_("A", n, n, &c_b29, &c_b15, &u[u_offset], ldu);
+ dlaset_("A", n, n, &c_b29, &c_b15, &vt[vt_offset], ldvt);
+ }
+
+/* Scale. */
+
+ orgnrm = dlanst_("M", n, &d__[1], &e[1]);
+ if (orgnrm == 0.) {
+ return 0;
+ }
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, n, &c__1, &d__[1], n, &ierr);
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &nm1, &c__1, &e[1], &nm1, &
+ ierr);
+
+ eps = dlamch_("Epsilon");
+
+ mlvl = (integer) (log((doublereal) (*n) / (doublereal) (smlsiz + 1)) /
+ log(2.)) + 1;
+ smlszp = smlsiz + 1;
+
+ if (icompq == 1) {
+ iu = 1;
+ ivt = smlsiz + 1;
+ difl = ivt + smlszp;
+ difr = difl + mlvl;
+ z__ = difr + (mlvl << 1);
+ ic = z__ + mlvl;
+ is = ic + 1;
+ poles = is + 1;
+ givnum = poles + (mlvl << 1);
+
+ k = 1;
+ givptr = 2;
+ perm = 3;
+ givcol = perm + mlvl;
+ }
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = d__[i__], abs(d__1)) < eps) {
+ d__[i__] = d_sign(&eps, &d__[i__]);
+ }
+/* L20: */
+ }
+
+ start = 1;
+ sqre = 0;
+
+ i__1 = nm1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = e[i__], abs(d__1)) < eps || i__ == nm1) {
+
+/* Subproblem found. First determine its size and then */
+/* apply divide and conquer on it. */
+
+ if (i__ < nm1) {
+
+/* A subproblem with E(I) small for I < NM1. */
+
+ nsize = i__ - start + 1;
+ } else if ((d__1 = e[i__], abs(d__1)) >= eps) {
+
+/* A subproblem with E(NM1) not too small but I = NM1. */
+
+ nsize = *n - start + 1;
+ } else {
+
+/* A subproblem with E(NM1) small. This implies an */
+/* 1-by-1 subproblem at D(N). Solve this 1-by-1 problem */
+/* first. */
+
+ nsize = i__ - start + 1;
+ if (icompq == 2) {
+ u[*n + *n * u_dim1] = d_sign(&c_b15, &d__[*n]);
+ vt[*n + *n * vt_dim1] = 1.;
+ } else if (icompq == 1) {
+ q[*n + (qstart - 1) * *n] = d_sign(&c_b15, &d__[*n]);
+ q[*n + (smlsiz + qstart - 1) * *n] = 1.;
+ }
+ d__[*n] = (d__1 = d__[*n], abs(d__1));
+ }
+ if (icompq == 2) {
+ dlasd0_(&nsize, &sqre, &d__[start], &e[start], &u[start +
+ start * u_dim1], ldu, &vt[start + start * vt_dim1],
+ ldvt, &smlsiz, &iwork[1], &work[wstart], info);
+ } else {
+ dlasda_(&icompq, &smlsiz, &nsize, &sqre, &d__[start], &e[
+ start], &q[start + (iu + qstart - 2) * *n], n, &q[
+ start + (ivt + qstart - 2) * *n], &iq[start + k * *n],
+ &q[start + (difl + qstart - 2) * *n], &q[start + (
+ difr + qstart - 2) * *n], &q[start + (z__ + qstart -
+ 2) * *n], &q[start + (poles + qstart - 2) * *n], &iq[
+ start + givptr * *n], &iq[start + givcol * *n], n, &
+ iq[start + perm * *n], &q[start + (givnum + qstart -
+ 2) * *n], &q[start + (ic + qstart - 2) * *n], &q[
+ start + (is + qstart - 2) * *n], &work[wstart], &
+ iwork[1], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+ start = i__ + 1;
+ }
+/* L30: */
+ }
+
+/* Unscale */
+
+ dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, n, &c__1, &d__[1], n, &ierr);
+L40:
+
+/* Use Selection Sort to minimize swaps of singular vectors */
+
+ i__1 = *n;
+ for (ii = 2; ii <= i__1; ++ii) {
+ i__ = ii - 1;
+ kk = i__;
+ p = d__[i__];
+ i__2 = *n;
+ for (j = ii; j <= i__2; ++j) {
+ if (d__[j] > p) {
+ kk = j;
+ p = d__[j];
+ }
+/* L50: */
+ }
+ if (kk != i__) {
+ d__[kk] = d__[i__];
+ d__[i__] = p;
+ if (icompq == 1) {
+ iq[i__] = kk;
+ } else if (icompq == 2) {
+ dswap_(n, &u[i__ * u_dim1 + 1], &c__1, &u[kk * u_dim1 + 1], &
+ c__1);
+ dswap_(n, &vt[i__ + vt_dim1], ldvt, &vt[kk + vt_dim1], ldvt);
+ }
+ } else if (icompq == 1) {
+ iq[i__] = i__;
+ }
+/* L60: */
+ }
+
+/* If ICOMPQ = 1, use IQ(N,1) as the indicator for UPLO */
+
+ if (icompq == 1) {
+ if (iuplo == 1) {
+ iq[*n] = 1;
+ } else {
+ iq[*n] = 0;
+ }
+ }
+
+/* If B is lower bidiagonal, update U by those Givens rotations */
+/* which rotated B to be upper bidiagonal */
+
+ if (iuplo == 2 && icompq == 2) {
+ dlasr_("L", "V", "B", n, n, &work[1], &work[*n], &u[u_offset], ldu);
+ }
+
+ return 0;
+
+/* End of DBDSDC */
+
+} /* dbdsdc_ */
diff --git a/contrib/libs/clapack/dbdsqr.c b/contrib/libs/clapack/dbdsqr.c
new file mode 100644
index 0000000000..08b04fc9da
--- /dev/null
+++ b/contrib/libs/clapack/dbdsqr.c
@@ -0,0 +1,918 @@
+/* dbdsqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b15 = -.125;
+static integer c__1 = 1;
+static doublereal c_b49 = 1.;
+static doublereal c_b72 = -1.;
+
+/* Subroutine */ int dbdsqr_(char *uplo, integer *n, integer *ncvt, integer *
+ nru, integer *ncc, doublereal *d__, doublereal *e, doublereal *vt,
+ integer *ldvt, doublereal *u, integer *ldu, doublereal *c__, integer *
+ ldc, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1,
+ i__2;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Builtin functions */
+ double pow_dd(doublereal *, doublereal *), sqrt(doublereal), d_sign(
+ doublereal *, doublereal *);
+
+ /* Local variables */
+ doublereal f, g, h__;
+ integer i__, j, m;
+ doublereal r__, cs;
+ integer ll;
+ doublereal sn, mu;
+ integer nm1, nm12, nm13, lll;
+ doublereal eps, sll, tol, abse;
+ integer idir;
+ doublereal abss;
+ integer oldm;
+ doublereal cosl;
+ integer isub, iter;
+ doublereal unfl, sinl, cosr, smin, smax, sinr;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *), dlas2_(
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *), dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ doublereal oldcs;
+ extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *);
+ integer oldll;
+ doublereal shift, sigmn, oldsn;
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ integer maxit;
+ doublereal sminl, sigmx;
+ logical lower;
+ extern /* Subroutine */ int dlasq1_(integer *, doublereal *, doublereal *,
+ doublereal *, integer *), dlasv2_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dlartg_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *), xerbla_(char *,
+ integer *);
+ doublereal sminoa, thresh;
+ logical rotate;
+ doublereal tolmul;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* January 2007 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DBDSQR computes the singular values and, optionally, the right and/or */
+/* left singular vectors from the singular value decomposition (SVD) of */
+/* a real N-by-N (upper or lower) bidiagonal matrix B using the implicit */
+/* zero-shift QR algorithm. The SVD of B has the form */
+
+/* B = Q * S * P**T */
+
+/* where S is the diagonal matrix of singular values, Q is an orthogonal */
+/* matrix of left singular vectors, and P is an orthogonal matrix of */
+/* right singular vectors. If left singular vectors are requested, this */
+/* subroutine actually returns U*Q instead of Q, and, if right singular */
+/* vectors are requested, this subroutine returns P**T*VT instead of */
+/* P**T, for given real input matrices U and VT. When U and VT are the */
+/* orthogonal matrices that reduce a general matrix A to bidiagonal */
+/* form: A = U*B*VT, as computed by DGEBRD, then */
+
+/* A = (U*Q) * S * (P**T*VT) */
+
+/* is the SVD of A. Optionally, the subroutine may also compute Q**T*C */
+/* for a given real input matrix C. */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices With */
+/* Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan, */
+/* LAPACK Working Note #3 (or SIAM J. Sci. Statist. Comput. vol. 11, */
+/* no. 5, pp. 873-912, Sept 1990) and */
+/* "Accurate singular values and differential qd algorithms," by */
+/* B. Parlett and V. Fernando, Technical Report CPAM-554, Mathematics */
+/* Department, University of California at Berkeley, July 1992 */
+/* for a detailed description of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': B is upper bidiagonal; */
+/* = 'L': B is lower bidiagonal. */
+
+/* N (input) INTEGER */
+/* The order of the matrix B. N >= 0. */
+
+/* NCVT (input) INTEGER */
+/* The number of columns of the matrix VT. NCVT >= 0. */
+
+/* NRU (input) INTEGER */
+/* The number of rows of the matrix U. NRU >= 0. */
+
+/* NCC (input) INTEGER */
+/* The number of columns of the matrix C. NCC >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the n diagonal elements of the bidiagonal matrix B. */
+/* On exit, if INFO=0, the singular values of B in decreasing */
+/* order. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (N-1) */
+/* On entry, the N-1 offdiagonal elements of the bidiagonal */
+/* matrix B. */
+/* On exit, if INFO = 0, E is destroyed; if INFO > 0, D and E */
+/* will contain the diagonal and superdiagonal elements of a */
+/* bidiagonal matrix orthogonally equivalent to the one given */
+/* as input. */
+
+/* VT (input/output) DOUBLE PRECISION array, dimension (LDVT, NCVT) */
+/* On entry, an N-by-NCVT matrix VT. */
+/* On exit, VT is overwritten by P**T * VT. */
+/* Not referenced if NCVT = 0. */
+
+/* LDVT (input) INTEGER */
+/* The leading dimension of the array VT. */
+/* LDVT >= max(1,N) if NCVT > 0; LDVT >= 1 if NCVT = 0. */
+
+/* U (input/output) DOUBLE PRECISION array, dimension (LDU, N) */
+/* On entry, an NRU-by-N matrix U. */
+/* On exit, U is overwritten by U * Q. */
+/* Not referenced if NRU = 0. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= max(1,NRU). */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (LDC, NCC) */
+/* On entry, an N-by-NCC matrix C. */
+/* On exit, C is overwritten by Q**T * C. */
+/* Not referenced if NCC = 0. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. */
+/* LDC >= max(1,N) if NCC > 0; LDC >=1 if NCC = 0. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (4*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: If INFO = -i, the i-th argument had an illegal value */
+/* > 0: */
+/* if NCVT = NRU = NCC = 0, */
+/* = 1, a split was marked by a positive value in E */
+/* = 2, current block of Z not diagonalized after 30*N */
+/* iterations (in inner while loop) */
+/* = 3, termination criterion of outer while loop not met */
+/* (program created more than N unreduced blocks) */
+/* else NCVT = NRU = NCC = 0, */
+/* the algorithm did not converge; D and E contain the */
+/* elements of a bidiagonal matrix which is orthogonally */
+/* similar to the input matrix B; if INFO = i, i */
+/* elements of E have not converged to zero. */
+
+/* Internal Parameters */
+/* =================== */
+
+/* TOLMUL DOUBLE PRECISION, default = max(10,min(100,EPS**(-1/8))) */
+/* TOLMUL controls the convergence criterion of the QR loop. */
+/* If it is positive, TOLMUL*EPS is the desired relative */
+/* precision in the computed singular values. */
+/* If it is negative, abs(TOLMUL*EPS*sigma_max) is the */
+/* desired absolute accuracy in the computed singular */
+/* values (corresponds to relative accuracy */
+/* abs(TOLMUL*EPS) in the largest singular value. */
+/* abs(TOLMUL) should be between 1 and 1/EPS, and preferably */
+/* between 10 (for fast convergence) and .1/EPS */
+/* (for there to be some accuracy in the results). */
+/* Default is to lose at either one eighth or 2 of the */
+/* available decimal digits in each computed singular value */
+/* (whichever is smaller). */
+
+/* MAXITR INTEGER, default = 6 */
+/* MAXITR controls the maximum number of passes of the */
+/* algorithm through its inner loop. The algorithms stops */
+/* (and so fails to converge) if the number of passes */
+/* through the inner loop exceeds MAXITR*N**2. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lower = lsame_(uplo, "L");
+ if (! lsame_(uplo, "U") && ! lower) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ncvt < 0) {
+ *info = -3;
+ } else if (*nru < 0) {
+ *info = -4;
+ } else if (*ncc < 0) {
+ *info = -5;
+ } else if (*ncvt == 0 && *ldvt < 1 || *ncvt > 0 && *ldvt < max(1,*n)) {
+ *info = -9;
+ } else if (*ldu < max(1,*nru)) {
+ *info = -11;
+ } else if (*ncc == 0 && *ldc < 1 || *ncc > 0 && *ldc < max(1,*n)) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DBDSQR", &i__1);
+ return 0;
+ }
+ if (*n == 0) {
+ return 0;
+ }
+ if (*n == 1) {
+ goto L160;
+ }
+
+/* ROTATE is true if any singular vectors desired, false otherwise */
+
+ rotate = *ncvt > 0 || *nru > 0 || *ncc > 0;
+
+/* If no singular vectors desired, use qd algorithm */
+
+ if (! rotate) {
+ dlasq1_(n, &d__[1], &e[1], &work[1], info);
+ return 0;
+ }
+
+ nm1 = *n - 1;
+ nm12 = nm1 + nm1;
+ nm13 = nm12 + nm1;
+ idir = 0;
+
+/* Get machine constants */
+
+ eps = dlamch_("Epsilon");
+ unfl = dlamch_("Safe minimum");
+
+/* If matrix lower bidiagonal, rotate to be upper bidiagonal */
+/* by applying Givens rotations on the left */
+
+ if (lower) {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ work[i__] = cs;
+ work[nm1 + i__] = sn;
+/* L10: */
+ }
+
+/* Update singular vectors if desired */
+
+ if (*nru > 0) {
+ dlasr_("R", "V", "F", nru, n, &work[1], &work[*n], &u[u_offset],
+ ldu);
+ }
+ if (*ncc > 0) {
+ dlasr_("L", "V", "F", n, ncc, &work[1], &work[*n], &c__[c_offset],
+ ldc);
+ }
+ }
+
+/* Compute singular values to relative accuracy TOL */
+/* (By setting TOL to be negative, algorithm will compute */
+/* singular values to absolute accuracy ABS(TOL)*norm(input matrix)) */
+
+/* Computing MAX */
+/* Computing MIN */
+ d__3 = 100., d__4 = pow_dd(&eps, &c_b15);
+ d__1 = 10., d__2 = min(d__3,d__4);
+ tolmul = max(d__1,d__2);
+ tol = tolmul * eps;
+
+/* Compute approximate maximum, minimum singular values */
+
+ smax = 0.;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__2 = smax, d__3 = (d__1 = d__[i__], abs(d__1));
+ smax = max(d__2,d__3);
+/* L20: */
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__2 = smax, d__3 = (d__1 = e[i__], abs(d__1));
+ smax = max(d__2,d__3);
+/* L30: */
+ }
+ sminl = 0.;
+ if (tol >= 0.) {
+
+/* Relative accuracy desired */
+
+ sminoa = abs(d__[1]);
+ if (sminoa == 0.) {
+ goto L50;
+ }
+ mu = sminoa;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ mu = (d__2 = d__[i__], abs(d__2)) * (mu / (mu + (d__1 = e[i__ - 1]
+ , abs(d__1))));
+ sminoa = min(sminoa,mu);
+ if (sminoa == 0.) {
+ goto L50;
+ }
+/* L40: */
+ }
+L50:
+ sminoa /= sqrt((doublereal) (*n));
+/* Computing MAX */
+ d__1 = tol * sminoa, d__2 = *n * 6 * *n * unfl;
+ thresh = max(d__1,d__2);
+ } else {
+
+/* Absolute accuracy desired */
+
+/* Computing MAX */
+ d__1 = abs(tol) * smax, d__2 = *n * 6 * *n * unfl;
+ thresh = max(d__1,d__2);
+ }
+
+/* Prepare for main iteration loop for the singular values */
+/* (MAXIT is the maximum number of passes through the inner */
+/* loop permitted before nonconvergence signalled.) */
+
+ maxit = *n * 6 * *n;
+ iter = 0;
+ oldll = -1;
+ oldm = -1;
+
+/* M points to last element of unconverged part of matrix */
+
+ m = *n;
+
+/* Begin main iteration loop */
+
+L60:
+
+/* Check for convergence or exceeding iteration count */
+
+ if (m <= 1) {
+ goto L160;
+ }
+ if (iter > maxit) {
+ goto L200;
+ }
+
+/* Find diagonal block of matrix to work on */
+
+ if (tol < 0. && (d__1 = d__[m], abs(d__1)) <= thresh) {
+ d__[m] = 0.;
+ }
+ smax = (d__1 = d__[m], abs(d__1));
+ smin = smax;
+ i__1 = m - 1;
+ for (lll = 1; lll <= i__1; ++lll) {
+ ll = m - lll;
+ abss = (d__1 = d__[ll], abs(d__1));
+ abse = (d__1 = e[ll], abs(d__1));
+ if (tol < 0. && abss <= thresh) {
+ d__[ll] = 0.;
+ }
+ if (abse <= thresh) {
+ goto L80;
+ }
+ smin = min(smin,abss);
+/* Computing MAX */
+ d__1 = max(smax,abss);
+ smax = max(d__1,abse);
+/* L70: */
+ }
+ ll = 0;
+ goto L90;
+L80:
+ e[ll] = 0.;
+
+/* Matrix splits since E(LL) = 0 */
+
+ if (ll == m - 1) {
+
+/* Convergence of bottom singular value, return to top of loop */
+
+ --m;
+ goto L60;
+ }
+L90:
+ ++ll;
+
+/* E(LL) through E(M-1) are nonzero, E(LL-1) is zero */
+
+ if (ll == m - 1) {
+
+/* 2 by 2 block, handle separately */
+
+ dlasv2_(&d__[m - 1], &e[m - 1], &d__[m], &sigmn, &sigmx, &sinr, &cosr,
+ &sinl, &cosl);
+ d__[m - 1] = sigmx;
+ e[m - 1] = 0.;
+ d__[m] = sigmn;
+
+/* Compute singular vectors, if desired */
+
+ if (*ncvt > 0) {
+ drot_(ncvt, &vt[m - 1 + vt_dim1], ldvt, &vt[m + vt_dim1], ldvt, &
+ cosr, &sinr);
+ }
+ if (*nru > 0) {
+ drot_(nru, &u[(m - 1) * u_dim1 + 1], &c__1, &u[m * u_dim1 + 1], &
+ c__1, &cosl, &sinl);
+ }
+ if (*ncc > 0) {
+ drot_(ncc, &c__[m - 1 + c_dim1], ldc, &c__[m + c_dim1], ldc, &
+ cosl, &sinl);
+ }
+ m += -2;
+ goto L60;
+ }
+
+/* If working on new submatrix, choose shift direction */
+/* (from larger end diagonal element towards smaller) */
+
+ if (ll > oldm || m < oldll) {
+ if ((d__1 = d__[ll], abs(d__1)) >= (d__2 = d__[m], abs(d__2))) {
+
+/* Chase bulge from top (big end) to bottom (small end) */
+
+ idir = 1;
+ } else {
+
+/* Chase bulge from bottom (big end) to top (small end) */
+
+ idir = 2;
+ }
+ }
+
+/* Apply convergence tests */
+
+ if (idir == 1) {
+
+/* Run convergence test in forward direction */
+/* First apply standard test to bottom of matrix */
+
+ if ((d__2 = e[m - 1], abs(d__2)) <= abs(tol) * (d__1 = d__[m], abs(
+ d__1)) || tol < 0. && (d__3 = e[m - 1], abs(d__3)) <= thresh)
+ {
+ e[m - 1] = 0.;
+ goto L60;
+ }
+
+ if (tol >= 0.) {
+
+/* If relative accuracy desired, */
+/* apply convergence criterion forward */
+
+ mu = (d__1 = d__[ll], abs(d__1));
+ sminl = mu;
+ i__1 = m - 1;
+ for (lll = ll; lll <= i__1; ++lll) {
+ if ((d__1 = e[lll], abs(d__1)) <= tol * mu) {
+ e[lll] = 0.;
+ goto L60;
+ }
+ mu = (d__2 = d__[lll + 1], abs(d__2)) * (mu / (mu + (d__1 = e[
+ lll], abs(d__1))));
+ sminl = min(sminl,mu);
+/* L100: */
+ }
+ }
+
+ } else {
+
+/* Run convergence test in backward direction */
+/* First apply standard test to top of matrix */
+
+ if ((d__2 = e[ll], abs(d__2)) <= abs(tol) * (d__1 = d__[ll], abs(d__1)
+ ) || tol < 0. && (d__3 = e[ll], abs(d__3)) <= thresh) {
+ e[ll] = 0.;
+ goto L60;
+ }
+
+ if (tol >= 0.) {
+
+/* If relative accuracy desired, */
+/* apply convergence criterion backward */
+
+ mu = (d__1 = d__[m], abs(d__1));
+ sminl = mu;
+ i__1 = ll;
+ for (lll = m - 1; lll >= i__1; --lll) {
+ if ((d__1 = e[lll], abs(d__1)) <= tol * mu) {
+ e[lll] = 0.;
+ goto L60;
+ }
+ mu = (d__2 = d__[lll], abs(d__2)) * (mu / (mu + (d__1 = e[lll]
+ , abs(d__1))));
+ sminl = min(sminl,mu);
+/* L110: */
+ }
+ }
+ }
+ oldll = ll;
+ oldm = m;
+
+/* Compute shift. First, test if shifting would ruin relative */
+/* accuracy, and if so set the shift to zero. */
+
+/* Computing MAX */
+ d__1 = eps, d__2 = tol * .01;
+ if (tol >= 0. && *n * tol * (sminl / smax) <= max(d__1,d__2)) {
+
+/* Use a zero shift to avoid loss of relative accuracy */
+
+ shift = 0.;
+ } else {
+
+/* Compute the shift from 2-by-2 block at end of matrix */
+
+ if (idir == 1) {
+ sll = (d__1 = d__[ll], abs(d__1));
+ dlas2_(&d__[m - 1], &e[m - 1], &d__[m], &shift, &r__);
+ } else {
+ sll = (d__1 = d__[m], abs(d__1));
+ dlas2_(&d__[ll], &e[ll], &d__[ll + 1], &shift, &r__);
+ }
+
+/* Test if shift negligible, and if so set to zero */
+
+ if (sll > 0.) {
+/* Computing 2nd power */
+ d__1 = shift / sll;
+ if (d__1 * d__1 < eps) {
+ shift = 0.;
+ }
+ }
+ }
+
+/* Increment iteration count */
+
+ iter = iter + m - ll;
+
+/* If SHIFT = 0, do simplified QR iteration */
+
+ if (shift == 0.) {
+ if (idir == 1) {
+
+/* Chase bulge from top to bottom */
+/* Save cosines and sines for later singular vector updates */
+
+ cs = 1.;
+ oldcs = 1.;
+ i__1 = m - 1;
+ for (i__ = ll; i__ <= i__1; ++i__) {
+ d__1 = d__[i__] * cs;
+ dlartg_(&d__1, &e[i__], &cs, &sn, &r__);
+ if (i__ > ll) {
+ e[i__ - 1] = oldsn * r__;
+ }
+ d__1 = oldcs * r__;
+ d__2 = d__[i__ + 1] * sn;
+ dlartg_(&d__1, &d__2, &oldcs, &oldsn, &d__[i__]);
+ work[i__ - ll + 1] = cs;
+ work[i__ - ll + 1 + nm1] = sn;
+ work[i__ - ll + 1 + nm12] = oldcs;
+ work[i__ - ll + 1 + nm13] = oldsn;
+/* L120: */
+ }
+ h__ = d__[m] * cs;
+ d__[m] = h__ * oldcs;
+ e[m - 1] = h__ * oldsn;
+
+/* Update singular vectors */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("L", "V", "F", &i__1, ncvt, &work[1], &work[*n], &vt[
+ ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("R", "V", "F", nru, &i__1, &work[nm12 + 1], &work[nm13
+ + 1], &u[ll * u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("L", "V", "F", &i__1, ncc, &work[nm12 + 1], &work[nm13
+ + 1], &c__[ll + c_dim1], ldc);
+ }
+
+/* Test convergence */
+
+ if ((d__1 = e[m - 1], abs(d__1)) <= thresh) {
+ e[m - 1] = 0.;
+ }
+
+ } else {
+
+/* Chase bulge from bottom to top */
+/* Save cosines and sines for later singular vector updates */
+
+ cs = 1.;
+ oldcs = 1.;
+ i__1 = ll + 1;
+ for (i__ = m; i__ >= i__1; --i__) {
+ d__1 = d__[i__] * cs;
+ dlartg_(&d__1, &e[i__ - 1], &cs, &sn, &r__);
+ if (i__ < m) {
+ e[i__] = oldsn * r__;
+ }
+ d__1 = oldcs * r__;
+ d__2 = d__[i__ - 1] * sn;
+ dlartg_(&d__1, &d__2, &oldcs, &oldsn, &d__[i__]);
+ work[i__ - ll] = cs;
+ work[i__ - ll + nm1] = -sn;
+ work[i__ - ll + nm12] = oldcs;
+ work[i__ - ll + nm13] = -oldsn;
+/* L130: */
+ }
+ h__ = d__[ll] * cs;
+ d__[ll] = h__ * oldcs;
+ e[ll] = h__ * oldsn;
+
+/* Update singular vectors */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("L", "V", "B", &i__1, ncvt, &work[nm12 + 1], &work[
+ nm13 + 1], &vt[ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("R", "V", "B", nru, &i__1, &work[1], &work[*n], &u[ll *
+ u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("L", "V", "B", &i__1, ncc, &work[1], &work[*n], &c__[
+ ll + c_dim1], ldc);
+ }
+
+/* Test convergence */
+
+ if ((d__1 = e[ll], abs(d__1)) <= thresh) {
+ e[ll] = 0.;
+ }
+ }
+ } else {
+
+/* Use nonzero shift */
+
+ if (idir == 1) {
+
+/* Chase bulge from top to bottom */
+/* Save cosines and sines for later singular vector updates */
+
+ f = ((d__1 = d__[ll], abs(d__1)) - shift) * (d_sign(&c_b49, &d__[
+ ll]) + shift / d__[ll]);
+ g = e[ll];
+ i__1 = m - 1;
+ for (i__ = ll; i__ <= i__1; ++i__) {
+ dlartg_(&f, &g, &cosr, &sinr, &r__);
+ if (i__ > ll) {
+ e[i__ - 1] = r__;
+ }
+ f = cosr * d__[i__] + sinr * e[i__];
+ e[i__] = cosr * e[i__] - sinr * d__[i__];
+ g = sinr * d__[i__ + 1];
+ d__[i__ + 1] = cosr * d__[i__ + 1];
+ dlartg_(&f, &g, &cosl, &sinl, &r__);
+ d__[i__] = r__;
+ f = cosl * e[i__] + sinl * d__[i__ + 1];
+ d__[i__ + 1] = cosl * d__[i__ + 1] - sinl * e[i__];
+ if (i__ < m - 1) {
+ g = sinl * e[i__ + 1];
+ e[i__ + 1] = cosl * e[i__ + 1];
+ }
+ work[i__ - ll + 1] = cosr;
+ work[i__ - ll + 1 + nm1] = sinr;
+ work[i__ - ll + 1 + nm12] = cosl;
+ work[i__ - ll + 1 + nm13] = sinl;
+/* L140: */
+ }
+ e[m - 1] = f;
+
+/* Update singular vectors */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("L", "V", "F", &i__1, ncvt, &work[1], &work[*n], &vt[
+ ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("R", "V", "F", nru, &i__1, &work[nm12 + 1], &work[nm13
+ + 1], &u[ll * u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("L", "V", "F", &i__1, ncc, &work[nm12 + 1], &work[nm13
+ + 1], &c__[ll + c_dim1], ldc);
+ }
+
+/* Test convergence */
+
+ if ((d__1 = e[m - 1], abs(d__1)) <= thresh) {
+ e[m - 1] = 0.;
+ }
+
+ } else {
+
+/* Chase bulge from bottom to top */
+/* Save cosines and sines for later singular vector updates */
+
+ f = ((d__1 = d__[m], abs(d__1)) - shift) * (d_sign(&c_b49, &d__[m]
+ ) + shift / d__[m]);
+ g = e[m - 1];
+ i__1 = ll + 1;
+ for (i__ = m; i__ >= i__1; --i__) {
+ dlartg_(&f, &g, &cosr, &sinr, &r__);
+ if (i__ < m) {
+ e[i__] = r__;
+ }
+ f = cosr * d__[i__] + sinr * e[i__ - 1];
+ e[i__ - 1] = cosr * e[i__ - 1] - sinr * d__[i__];
+ g = sinr * d__[i__ - 1];
+ d__[i__ - 1] = cosr * d__[i__ - 1];
+ dlartg_(&f, &g, &cosl, &sinl, &r__);
+ d__[i__] = r__;
+ f = cosl * e[i__ - 1] + sinl * d__[i__ - 1];
+ d__[i__ - 1] = cosl * d__[i__ - 1] - sinl * e[i__ - 1];
+ if (i__ > ll + 1) {
+ g = sinl * e[i__ - 2];
+ e[i__ - 2] = cosl * e[i__ - 2];
+ }
+ work[i__ - ll] = cosr;
+ work[i__ - ll + nm1] = -sinr;
+ work[i__ - ll + nm12] = cosl;
+ work[i__ - ll + nm13] = -sinl;
+/* L150: */
+ }
+ e[ll] = f;
+
+/* Test convergence */
+
+ if ((d__1 = e[ll], abs(d__1)) <= thresh) {
+ e[ll] = 0.;
+ }
+
+/* Update singular vectors if desired */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("L", "V", "B", &i__1, ncvt, &work[nm12 + 1], &work[
+ nm13 + 1], &vt[ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("R", "V", "B", nru, &i__1, &work[1], &work[*n], &u[ll *
+ u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("L", "V", "B", &i__1, ncc, &work[1], &work[*n], &c__[
+ ll + c_dim1], ldc);
+ }
+ }
+ }
+
+/* QR iteration finished, go back and check convergence */
+
+ goto L60;
+
+/* All singular values converged, so make them positive */
+
+L160:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] < 0.) {
+ d__[i__] = -d__[i__];
+
+/* Change sign of singular vectors, if desired */
+
+ if (*ncvt > 0) {
+ dscal_(ncvt, &c_b72, &vt[i__ + vt_dim1], ldvt);
+ }
+ }
+/* L170: */
+ }
+
+/* Sort the singular values into decreasing order (insertion sort on */
+/* singular values, but only one transposition per singular vector) */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Scan for smallest D(I) */
+
+ isub = 1;
+ smin = d__[1];
+ i__2 = *n + 1 - i__;
+ for (j = 2; j <= i__2; ++j) {
+ if (d__[j] <= smin) {
+ isub = j;
+ smin = d__[j];
+ }
+/* L180: */
+ }
+ if (isub != *n + 1 - i__) {
+
+/* Swap singular values and vectors */
+
+ d__[isub] = d__[*n + 1 - i__];
+ d__[*n + 1 - i__] = smin;
+ if (*ncvt > 0) {
+ dswap_(ncvt, &vt[isub + vt_dim1], ldvt, &vt[*n + 1 - i__ +
+ vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ dswap_(nru, &u[isub * u_dim1 + 1], &c__1, &u[(*n + 1 - i__) *
+ u_dim1 + 1], &c__1);
+ }
+ if (*ncc > 0) {
+ dswap_(ncc, &c__[isub + c_dim1], ldc, &c__[*n + 1 - i__ +
+ c_dim1], ldc);
+ }
+ }
+/* L190: */
+ }
+ goto L220;
+
+/* Maximum number of iterations exceeded, failure to converge */
+
+L200:
+ *info = 0;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (e[i__] != 0.) {
+ ++(*info);
+ }
+/* L210: */
+ }
+L220:
+ return 0;
+
+/* End of DBDSQR */
+
+} /* dbdsqr_ */
diff --git a/contrib/libs/clapack/ddisna.c b/contrib/libs/clapack/ddisna.c
new file mode 100644
index 0000000000..ad7c3677ab
--- /dev/null
+++ b/contrib/libs/clapack/ddisna.c
@@ -0,0 +1,227 @@
+/* ddisna.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ddisna_(char *job, integer *m, integer *n, doublereal *
+ d__, doublereal *sep, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2, d__3;
+
+ /* Local variables */
+ integer i__, k;
+ doublereal eps;
+ logical decr, left, incr, sing, eigen;
+ extern logical lsame_(char *, char *);
+ doublereal anorm;
+ logical right;
+ extern doublereal dlamch_(char *);
+ doublereal oldgap, safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal newgap, thresh;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DDISNA computes the reciprocal condition numbers for the eigenvectors */
+/* of a real symmetric or complex Hermitian matrix or for the left or */
+/* right singular vectors of a general m-by-n matrix. The reciprocal */
+/* condition number is the 'gap' between the corresponding eigenvalue or */
+/* singular value and the nearest other one. */
+
+/* The bound on the error, measured by angle in radians, in the I-th */
+/* computed vector is given by */
+
+/* DLAMCH( 'E' ) * ( ANORM / SEP( I ) ) */
+
+/* where ANORM = 2-norm(A) = max( abs( D(j) ) ). SEP(I) is not allowed */
+/* to be smaller than DLAMCH( 'E' )*ANORM in order to limit the size of */
+/* the error bound. */
+
+/* DDISNA may also be used to compute error bounds for eigenvectors of */
+/* the generalized symmetric definite eigenproblem. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies for which problem the reciprocal condition numbers */
+/* should be computed: */
+/* = 'E': the eigenvectors of a symmetric/Hermitian matrix; */
+/* = 'L': the left singular vectors of a general matrix; */
+/* = 'R': the right singular vectors of a general matrix. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix. M >= 0. */
+
+/* N (input) INTEGER */
+/* If JOB = 'L' or 'R', the number of columns of the matrix, */
+/* in which case N >= 0. Ignored if JOB = 'E'. */
+
+/* D (input) DOUBLE PRECISION array, dimension (M) if JOB = 'E' */
+/* dimension (min(M,N)) if JOB = 'L' or 'R' */
+/* The eigenvalues (if JOB = 'E') or singular values (if JOB = */
+/* 'L' or 'R') of the matrix, in either increasing or decreasing */
+/* order. If singular values, they must be non-negative. */
+
+/* SEP (output) DOUBLE PRECISION array, dimension (M) if JOB = 'E' */
+/* dimension (min(M,N)) if JOB = 'L' or 'R' */
+/* The reciprocal condition numbers of the vectors. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ --sep;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ eigen = lsame_(job, "E");
+ left = lsame_(job, "L");
+ right = lsame_(job, "R");
+ sing = left || right;
+ if (eigen) {
+ k = *m;
+ } else if (sing) {
+ k = min(*m,*n);
+ }
+ if (! eigen && ! sing) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (k < 0) {
+ *info = -3;
+ } else {
+ incr = TRUE_;
+ decr = TRUE_;
+ i__1 = k - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (incr) {
+ incr = incr && d__[i__] <= d__[i__ + 1];
+ }
+ if (decr) {
+ decr = decr && d__[i__] >= d__[i__ + 1];
+ }
+/* L10: */
+ }
+ if (sing && k > 0) {
+ if (incr) {
+ incr = incr && 0. <= d__[1];
+ }
+ if (decr) {
+ decr = decr && d__[k] >= 0.;
+ }
+ }
+ if (! (incr || decr)) {
+ *info = -4;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DDISNA", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (k == 0) {
+ return 0;
+ }
+
+/* Compute reciprocal condition numbers */
+
+ if (k == 1) {
+ sep[1] = dlamch_("O");
+ } else {
+ oldgap = (d__1 = d__[2] - d__[1], abs(d__1));
+ sep[1] = oldgap;
+ i__1 = k - 1;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ newgap = (d__1 = d__[i__ + 1] - d__[i__], abs(d__1));
+ sep[i__] = min(oldgap,newgap);
+ oldgap = newgap;
+/* L20: */
+ }
+ sep[k] = oldgap;
+ }
+ if (sing) {
+ if (left && *m > *n || right && *m < *n) {
+ if (incr) {
+ sep[1] = min(sep[1],d__[1]);
+ }
+ if (decr) {
+/* Computing MIN */
+ d__1 = sep[k], d__2 = d__[k];
+ sep[k] = min(d__1,d__2);
+ }
+ }
+ }
+
+/* Ensure that reciprocal condition numbers are not less than */
+/* threshold, in order to limit the size of the error bound */
+
+ eps = dlamch_("E");
+ safmin = dlamch_("S");
+/* Computing MAX */
+ d__2 = abs(d__[1]), d__3 = (d__1 = d__[k], abs(d__1));
+ anorm = max(d__2,d__3);
+ if (anorm == 0.) {
+ thresh = eps;
+ } else {
+/* Computing MAX */
+ d__1 = eps * anorm;
+ thresh = max(d__1,safmin);
+ }
+ i__1 = k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = sep[i__];
+ sep[i__] = max(d__1,thresh);
+/* L30: */
+ }
+
+ return 0;
+
+/* End of DDISNA */
+
+} /* ddisna_ */
diff --git a/contrib/libs/clapack/dgbbrd.c b/contrib/libs/clapack/dgbbrd.c
new file mode 100644
index 0000000000..03e09a7597
--- /dev/null
+++ b/contrib/libs/clapack/dgbbrd.c
@@ -0,0 +1,566 @@
+/* dgbbrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b8 = 0.;
+static doublereal c_b9 = 1.;
+static integer c__1 = 1;
+
+/* Subroutine */ int dgbbrd_(char *vect, integer *m, integer *n, integer *ncc,
+ integer *kl, integer *ku, doublereal *ab, integer *ldab, doublereal *
+ d__, doublereal *e, doublereal *q, integer *ldq, doublereal *pt,
+ integer *ldpt, doublereal *c__, integer *ldc, doublereal *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, c_dim1, c_offset, pt_dim1, pt_offset, q_dim1,
+ q_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7;
+
+ /* Local variables */
+ integer i__, j, l, j1, j2, kb;
+ doublereal ra, rb, rc;
+ integer kk, ml, mn, nr, mu;
+ doublereal rs;
+ integer kb1, ml0, mu0, klm, kun, nrt, klu1, inca;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ extern logical lsame_(char *, char *);
+ logical wantb, wantc;
+ integer minmn;
+ logical wantq;
+ extern /* Subroutine */ int dlaset_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *),
+ dlartg_(doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *), xerbla_(char *, integer *), dlargv_(
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *), dlartv_(integer *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *);
+ logical wantpt;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGBBRD reduces a real general m-by-n band matrix A to upper */
+/* bidiagonal form B by an orthogonal transformation: Q' * A * P = B. */
+
+/* The routine computes B, and optionally forms Q or P', or computes */
+/* Q'*C for a given matrix C. */
+
+/* Arguments */
+/* ========= */
+
+/* VECT (input) CHARACTER*1 */
+/* Specifies whether or not the matrices Q and P' are to be */
+/* formed. */
+/* = 'N': do not form Q or P'; */
+/* = 'Q': form Q only; */
+/* = 'P': form P' only; */
+/* = 'B': form both. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* NCC (input) INTEGER */
+/* The number of columns of the matrix C. NCC >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals of the matrix A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals of the matrix A. KU >= 0. */
+
+/* AB (input/output) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* On entry, the m-by-n band matrix A, stored in rows 1 to */
+/* KL+KU+1. The j-th column of A is stored in the j-th column of */
+/* the array AB as follows: */
+/* AB(ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(m,j+kl). */
+/* On exit, A is overwritten by values generated during the */
+/* reduction. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array A. LDAB >= KL+KU+1. */
+
+/* D (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The diagonal elements of the bidiagonal matrix B. */
+
+/* E (output) DOUBLE PRECISION array, dimension (min(M,N)-1) */
+/* The superdiagonal elements of the bidiagonal matrix B. */
+
+/* Q (output) DOUBLE PRECISION array, dimension (LDQ,M) */
+/* If VECT = 'Q' or 'B', the m-by-m orthogonal matrix Q. */
+/* If VECT = 'N' or 'P', the array Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. */
+/* LDQ >= max(1,M) if VECT = 'Q' or 'B'; LDQ >= 1 otherwise. */
+
+/* PT (output) DOUBLE PRECISION array, dimension (LDPT,N) */
+/* If VECT = 'P' or 'B', the n-by-n orthogonal matrix P'. */
+/* If VECT = 'N' or 'Q', the array PT is not referenced. */
+
+/* LDPT (input) INTEGER */
+/* The leading dimension of the array PT. */
+/* LDPT >= max(1,N) if VECT = 'P' or 'B'; LDPT >= 1 otherwise. */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (LDC,NCC) */
+/* On entry, an m-by-ncc matrix C. */
+/* On exit, C is overwritten by Q'*C. */
+/* C is not referenced if NCC = 0. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. */
+/* LDC >= max(1,M) if NCC > 0; LDC >= 1 if NCC = 0. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (2*max(M,N)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --d__;
+ --e;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ pt_dim1 = *ldpt;
+ pt_offset = 1 + pt_dim1;
+ pt -= pt_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ wantb = lsame_(vect, "B");
+ wantq = lsame_(vect, "Q") || wantb;
+ wantpt = lsame_(vect, "P") || wantb;
+ wantc = *ncc > 0;
+ klu1 = *kl + *ku + 1;
+ *info = 0;
+ if (! wantq && ! wantpt && ! lsame_(vect, "N")) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ncc < 0) {
+ *info = -4;
+ } else if (*kl < 0) {
+ *info = -5;
+ } else if (*ku < 0) {
+ *info = -6;
+ } else if (*ldab < klu1) {
+ *info = -8;
+ } else if (*ldq < 1 || wantq && *ldq < max(1,*m)) {
+ *info = -12;
+ } else if (*ldpt < 1 || wantpt && *ldpt < max(1,*n)) {
+ *info = -14;
+ } else if (*ldc < 1 || wantc && *ldc < max(1,*m)) {
+ *info = -16;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGBBRD", &i__1);
+ return 0;
+ }
+
+/* Initialize Q and P' to the unit matrix, if needed */
+
+ if (wantq) {
+ dlaset_("Full", m, m, &c_b8, &c_b9, &q[q_offset], ldq);
+ }
+ if (wantpt) {
+ dlaset_("Full", n, n, &c_b8, &c_b9, &pt[pt_offset], ldpt);
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ minmn = min(*m,*n);
+
+ if (*kl + *ku > 1) {
+
+/* Reduce to upper bidiagonal form if KU > 0; if KU = 0, reduce */
+/* first to lower bidiagonal form and then transform to upper */
+/* bidiagonal */
+
+ if (*ku > 0) {
+ ml0 = 1;
+ mu0 = 2;
+ } else {
+ ml0 = 2;
+ mu0 = 1;
+ }
+
+/* Wherever possible, plane rotations are generated and applied in */
+/* vector operations of length NR over the index set J1:J2:KLU1. */
+
+/* The sines of the plane rotations are stored in WORK(1:max(m,n)) */
+/* and the cosines in WORK(max(m,n)+1:2*max(m,n)). */
+
+ mn = max(*m,*n);
+/* Computing MIN */
+ i__1 = *m - 1;
+ klm = min(i__1,*kl);
+/* Computing MIN */
+ i__1 = *n - 1;
+ kun = min(i__1,*ku);
+ kb = klm + kun;
+ kb1 = kb + 1;
+ inca = kb1 * *ldab;
+ nr = 0;
+ j1 = klm + 2;
+ j2 = 1 - kun;
+
+ i__1 = minmn;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Reduce i-th column and i-th row of matrix to bidiagonal form */
+
+ ml = klm + 1;
+ mu = kun + 1;
+ i__2 = kb;
+ for (kk = 1; kk <= i__2; ++kk) {
+ j1 += kb;
+ j2 += kb;
+
+/* generate plane rotations to annihilate nonzero elements */
+/* which have been created below the band */
+
+ if (nr > 0) {
+ dlargv_(&nr, &ab[klu1 + (j1 - klm - 1) * ab_dim1], &inca,
+ &work[j1], &kb1, &work[mn + j1], &kb1);
+ }
+
+/* apply plane rotations from the left */
+
+ i__3 = kb;
+ for (l = 1; l <= i__3; ++l) {
+ if (j2 - klm + l - 1 > *n) {
+ nrt = nr - 1;
+ } else {
+ nrt = nr;
+ }
+ if (nrt > 0) {
+ dlartv_(&nrt, &ab[klu1 - l + (j1 - klm + l - 1) *
+ ab_dim1], &inca, &ab[klu1 - l + 1 + (j1 - klm
+ + l - 1) * ab_dim1], &inca, &work[mn + j1], &
+ work[j1], &kb1);
+ }
+/* L10: */
+ }
+
+ if (ml > ml0) {
+ if (ml <= *m - i__ + 1) {
+
+/* generate plane rotation to annihilate a(i+ml-1,i) */
+/* within the band, and apply rotation from the left */
+
+ dlartg_(&ab[*ku + ml - 1 + i__ * ab_dim1], &ab[*ku +
+ ml + i__ * ab_dim1], &work[mn + i__ + ml - 1],
+ &work[i__ + ml - 1], &ra);
+ ab[*ku + ml - 1 + i__ * ab_dim1] = ra;
+ if (i__ < *n) {
+/* Computing MIN */
+ i__4 = *ku + ml - 2, i__5 = *n - i__;
+ i__3 = min(i__4,i__5);
+ i__6 = *ldab - 1;
+ i__7 = *ldab - 1;
+ drot_(&i__3, &ab[*ku + ml - 2 + (i__ + 1) *
+ ab_dim1], &i__6, &ab[*ku + ml - 1 + (i__
+ + 1) * ab_dim1], &i__7, &work[mn + i__ +
+ ml - 1], &work[i__ + ml - 1]);
+ }
+ }
+ ++nr;
+ j1 -= kb1;
+ }
+
+ if (wantq) {
+
+/* accumulate product of plane rotations in Q */
+
+ i__3 = j2;
+ i__4 = kb1;
+ for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4)
+ {
+ drot_(m, &q[(j - 1) * q_dim1 + 1], &c__1, &q[j *
+ q_dim1 + 1], &c__1, &work[mn + j], &work[j]);
+/* L20: */
+ }
+ }
+
+ if (wantc) {
+
+/* apply plane rotations to C */
+
+ i__4 = j2;
+ i__3 = kb1;
+ for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3)
+ {
+ drot_(ncc, &c__[j - 1 + c_dim1], ldc, &c__[j + c_dim1]
+, ldc, &work[mn + j], &work[j]);
+/* L30: */
+ }
+ }
+
+ if (j2 + kun > *n) {
+
+/* adjust J2 to keep within the bounds of the matrix */
+
+ --nr;
+ j2 -= kb1;
+ }
+
+ i__3 = j2;
+ i__4 = kb1;
+ for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {
+
+/* create nonzero element a(j-1,j+ku) above the band */
+/* and store it in WORK(n+1:2*n) */
+
+ work[j + kun] = work[j] * ab[(j + kun) * ab_dim1 + 1];
+ ab[(j + kun) * ab_dim1 + 1] = work[mn + j] * ab[(j + kun)
+ * ab_dim1 + 1];
+/* L40: */
+ }
+
+/* generate plane rotations to annihilate nonzero elements */
+/* which have been generated above the band */
+
+ if (nr > 0) {
+ dlargv_(&nr, &ab[(j1 + kun - 1) * ab_dim1 + 1], &inca, &
+ work[j1 + kun], &kb1, &work[mn + j1 + kun], &kb1);
+ }
+
+/* apply plane rotations from the right */
+
+ i__4 = kb;
+ for (l = 1; l <= i__4; ++l) {
+ if (j2 + l - 1 > *m) {
+ nrt = nr - 1;
+ } else {
+ nrt = nr;
+ }
+ if (nrt > 0) {
+ dlartv_(&nrt, &ab[l + 1 + (j1 + kun - 1) * ab_dim1], &
+ inca, &ab[l + (j1 + kun) * ab_dim1], &inca, &
+ work[mn + j1 + kun], &work[j1 + kun], &kb1);
+ }
+/* L50: */
+ }
+
+ if (ml == ml0 && mu > mu0) {
+ if (mu <= *n - i__ + 1) {
+
+/* generate plane rotation to annihilate a(i,i+mu-1) */
+/* within the band, and apply rotation from the right */
+
+ dlartg_(&ab[*ku - mu + 3 + (i__ + mu - 2) * ab_dim1],
+ &ab[*ku - mu + 2 + (i__ + mu - 1) * ab_dim1],
+ &work[mn + i__ + mu - 1], &work[i__ + mu - 1],
+ &ra);
+ ab[*ku - mu + 3 + (i__ + mu - 2) * ab_dim1] = ra;
+/* Computing MIN */
+ i__3 = *kl + mu - 2, i__5 = *m - i__;
+ i__4 = min(i__3,i__5);
+ drot_(&i__4, &ab[*ku - mu + 4 + (i__ + mu - 2) *
+ ab_dim1], &c__1, &ab[*ku - mu + 3 + (i__ + mu
+ - 1) * ab_dim1], &c__1, &work[mn + i__ + mu -
+ 1], &work[i__ + mu - 1]);
+ }
+ ++nr;
+ j1 -= kb1;
+ }
+
+ if (wantpt) {
+
+/* accumulate product of plane rotations in P' */
+
+ i__4 = j2;
+ i__3 = kb1;
+ for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3)
+ {
+ drot_(n, &pt[j + kun - 1 + pt_dim1], ldpt, &pt[j +
+ kun + pt_dim1], ldpt, &work[mn + j + kun], &
+ work[j + kun]);
+/* L60: */
+ }
+ }
+
+ if (j2 + kb > *m) {
+
+/* adjust J2 to keep within the bounds of the matrix */
+
+ --nr;
+ j2 -= kb1;
+ }
+
+ i__3 = j2;
+ i__4 = kb1;
+ for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {
+
+/* create nonzero element a(j+kl+ku,j+ku-1) below the */
+/* band and store it in WORK(1:n) */
+
+ work[j + kb] = work[j + kun] * ab[klu1 + (j + kun) *
+ ab_dim1];
+ ab[klu1 + (j + kun) * ab_dim1] = work[mn + j + kun] * ab[
+ klu1 + (j + kun) * ab_dim1];
+/* L70: */
+ }
+
+ if (ml > ml0) {
+ --ml;
+ } else {
+ --mu;
+ }
+/* L80: */
+ }
+/* L90: */
+ }
+ }
+
+ if (*ku == 0 && *kl > 0) {
+
+/* A has been reduced to lower bidiagonal form */
+
+/* Transform lower bidiagonal form to upper bidiagonal by applying */
+/* plane rotations from the left, storing diagonal elements in D */
+/* and off-diagonal elements in E */
+
+/* Computing MIN */
+ i__2 = *m - 1;
+ i__1 = min(i__2,*n);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlartg_(&ab[i__ * ab_dim1 + 1], &ab[i__ * ab_dim1 + 2], &rc, &rs,
+ &ra);
+ d__[i__] = ra;
+ if (i__ < *n) {
+ e[i__] = rs * ab[(i__ + 1) * ab_dim1 + 1];
+ ab[(i__ + 1) * ab_dim1 + 1] = rc * ab[(i__ + 1) * ab_dim1 + 1]
+ ;
+ }
+ if (wantq) {
+ drot_(m, &q[i__ * q_dim1 + 1], &c__1, &q[(i__ + 1) * q_dim1 +
+ 1], &c__1, &rc, &rs);
+ }
+ if (wantc) {
+ drot_(ncc, &c__[i__ + c_dim1], ldc, &c__[i__ + 1 + c_dim1],
+ ldc, &rc, &rs);
+ }
+/* L100: */
+ }
+ if (*m <= *n) {
+ d__[*m] = ab[*m * ab_dim1 + 1];
+ }
+ } else if (*ku > 0) {
+
+/* A has been reduced to upper bidiagonal form */
+
+ if (*m < *n) {
+
+/* Annihilate a(m,m+1) by applying plane rotations from the */
+/* right, storing diagonal elements in D and off-diagonal */
+/* elements in E */
+
+ rb = ab[*ku + (*m + 1) * ab_dim1];
+ for (i__ = *m; i__ >= 1; --i__) {
+ dlartg_(&ab[*ku + 1 + i__ * ab_dim1], &rb, &rc, &rs, &ra);
+ d__[i__] = ra;
+ if (i__ > 1) {
+ rb = -rs * ab[*ku + i__ * ab_dim1];
+ e[i__ - 1] = rc * ab[*ku + i__ * ab_dim1];
+ }
+ if (wantpt) {
+ drot_(n, &pt[i__ + pt_dim1], ldpt, &pt[*m + 1 + pt_dim1],
+ ldpt, &rc, &rs);
+ }
+/* L110: */
+ }
+ } else {
+
+/* Copy off-diagonal elements to E and diagonal elements to D */
+
+ i__1 = minmn - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ e[i__] = ab[*ku + (i__ + 1) * ab_dim1];
+/* L120: */
+ }
+ i__1 = minmn;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = ab[*ku + 1 + i__ * ab_dim1];
+/* L130: */
+ }
+ }
+ } else {
+
+/* A is diagonal. Set elements of E to zero and copy diagonal */
+/* elements to D. */
+
+ i__1 = minmn - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ e[i__] = 0.;
+/* L140: */
+ }
+ i__1 = minmn;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = ab[i__ * ab_dim1 + 1];
+/* L150: */
+ }
+ }
+ return 0;
+
+/* End of DGBBRD */
+
+} /* dgbbrd_ */
diff --git a/contrib/libs/clapack/dgbcon.c b/contrib/libs/clapack/dgbcon.c
new file mode 100644
index 0000000000..8b6144cf63
--- /dev/null
+++ b/contrib/libs/clapack/dgbcon.c
@@ -0,0 +1,284 @@
+/* dgbcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dgbcon_(char *norm, integer *n, integer *kl, integer *ku,
+ doublereal *ab, integer *ldab, integer *ipiv, doublereal *anorm,
+ doublereal *rcond, doublereal *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Local variables */
+ integer j;
+ doublereal t;
+ integer kd, lm, jp, ix, kase;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ integer kase1;
+ doublereal scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int drscl_(integer *, doublereal *, doublereal *,
+ integer *);
+ logical lnoti;
+ extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *), dlacn2_(integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ integer *);
+ extern doublereal dlamch_(char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlatbs_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ doublereal ainvnm;
+ logical onenrm;
+ char normin[1];
+ doublereal smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call DLACN2 in place of DLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGBCON estimates the reciprocal of the condition number of a real */
+/* general band matrix A, in either the 1-norm or the infinity-norm, */
+/* using the LU factorization computed by DGBTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as */
+/* RCOND = 1 / ( norm(A) * norm(inv(A)) ). */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies whether the 1-norm condition number or the */
+/* infinity-norm condition number is required: */
+/* = '1' or 'O': 1-norm; */
+/* = 'I': Infinity-norm. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* AB (input) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* Details of the LU factorization of the band matrix A, as */
+/* computed by DGBTRF. U is stored as an upper triangular band */
+/* matrix with KL+KU superdiagonals in rows 1 to KL+KU+1, and */
+/* the multipliers used during the factorization are stored in */
+/* rows KL+KU+2 to 2*KL+KU+1. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= 2*KL+KU+1. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= N, row i of the matrix was */
+/* interchanged with row IPIV(i). */
+
+/* ANORM (input) DOUBLE PRECISION */
+/* If NORM = '1' or 'O', the 1-norm of the original matrix A. */
+/* If NORM = 'I', the infinity-norm of the original matrix A. */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(norm(A) * norm(inv(A))). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --ipiv;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O");
+ if (! onenrm && ! lsame_(norm, "I")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*ldab < (*kl << 1) + *ku + 1) {
+ *info = -6;
+ } else if (*anorm < 0.) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGBCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.;
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ } else if (*anorm == 0.) {
+ return 0;
+ }
+
+ smlnum = dlamch_("Safe minimum");
+
+/* Estimate the norm of inv(A). */
+
+ ainvnm = 0.;
+ *(unsigned char *)normin = 'N';
+ if (onenrm) {
+ kase1 = 1;
+ } else {
+ kase1 = 2;
+ }
+ kd = *kl + *ku + 1;
+ lnoti = *kl > 0;
+ kase = 0;
+L10:
+ dlacn2_(n, &work[*n + 1], &work[1], &iwork[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (kase == kase1) {
+
+/* Multiply by inv(L). */
+
+ if (lnoti) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = *kl, i__3 = *n - j;
+ lm = min(i__2,i__3);
+ jp = ipiv[j];
+ t = work[jp];
+ if (jp != j) {
+ work[jp] = work[j];
+ work[j] = t;
+ }
+ d__1 = -t;
+ daxpy_(&lm, &d__1, &ab[kd + 1 + j * ab_dim1], &c__1, &
+ work[j + 1], &c__1);
+/* L20: */
+ }
+ }
+
+/* Multiply by inv(U). */
+
+ i__1 = *kl + *ku;
+ dlatbs_("Upper", "No transpose", "Non-unit", normin, n, &i__1, &
+ ab[ab_offset], ldab, &work[1], &scale, &work[(*n << 1) +
+ 1], info);
+ } else {
+
+/* Multiply by inv(U'). */
+
+ i__1 = *kl + *ku;
+ dlatbs_("Upper", "Transpose", "Non-unit", normin, n, &i__1, &ab[
+ ab_offset], ldab, &work[1], &scale, &work[(*n << 1) + 1],
+ info);
+
+/* Multiply by inv(L'). */
+
+ if (lnoti) {
+ for (j = *n - 1; j >= 1; --j) {
+/* Computing MIN */
+ i__1 = *kl, i__2 = *n - j;
+ lm = min(i__1,i__2);
+ work[j] -= ddot_(&lm, &ab[kd + 1 + j * ab_dim1], &c__1, &
+ work[j + 1], &c__1);
+ jp = ipiv[j];
+ if (jp != j) {
+ t = work[jp];
+ work[jp] = work[j];
+ work[j] = t;
+ }
+/* L30: */
+ }
+ }
+ }
+
+/* Divide X by 1/SCALE if doing so will not cause overflow. */
+
+ *(unsigned char *)normin = 'Y';
+ if (scale != 1.) {
+ ix = idamax_(n, &work[1], &c__1);
+ if (scale < (d__1 = work[ix], abs(d__1)) * smlnum || scale == 0.)
+ {
+ goto L40;
+ }
+ drscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / ainvnm / *anorm;
+ }
+
+L40:
+ return 0;
+
+/* End of DGBCON */
+
+} /* dgbcon_ */
diff --git a/contrib/libs/clapack/dgbequ.c b/contrib/libs/clapack/dgbequ.c
new file mode 100644
index 0000000000..d1045379b4
--- /dev/null
+++ b/contrib/libs/clapack/dgbequ.c
@@ -0,0 +1,320 @@
+/* dgbequ.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dgbequ_(integer *m, integer *n, integer *kl, integer *ku,
+ doublereal *ab, integer *ldab, doublereal *r__, doublereal *c__,
+ doublereal *rowcnd, doublereal *colcnd, doublereal *amax, integer *
+ info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1, d__2, d__3;
+
+ /* Local variables */
+ integer i__, j, kd;
+ doublereal rcmin, rcmax;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum, smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGBEQU computes row and column scalings intended to equilibrate an */
+/* M-by-N band matrix A and reduce its condition number. R returns the */
+/* row scale factors and C the column scale factors, chosen to try to */
+/* make the largest element in each row and column of the matrix B with */
+/* elements B(i,j)=R(i)*A(i,j)*C(j) have absolute value 1. */
+
+/* R(i) and C(j) are restricted to be between SMLNUM = smallest safe */
+/* number and BIGNUM = largest safe number. Use of these scaling */
+/* factors is not guaranteed to reduce the condition number of A but */
+/* works well in practice. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* AB (input) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* The band matrix A, stored in rows 1 to KL+KU+1. The j-th */
+/* column of A is stored in the j-th column of the array AB as */
+/* follows: */
+/* AB(ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(m,j+kl). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KL+KU+1. */
+
+/* R (output) DOUBLE PRECISION array, dimension (M) */
+/* If INFO = 0, or INFO > M, R contains the row scale factors */
+/* for A. */
+
+/* C (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, C contains the column scale factors for A. */
+
+/* ROWCND (output) DOUBLE PRECISION */
+/* If INFO = 0 or INFO > M, ROWCND contains the ratio of the */
+/* smallest R(i) to the largest R(i). If ROWCND >= 0.1 and */
+/* AMAX is neither too large nor too small, it is not worth */
+/* scaling by R. */
+
+/* COLCND (output) DOUBLE PRECISION */
+/* If INFO = 0, COLCND contains the ratio of the smallest */
+/* C(i) to the largest C(i). If COLCND >= 0.1, it is not */
+/* worth scaling by C. */
+
+/* AMAX (output) DOUBLE PRECISION */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= M: the i-th row of A is exactly zero */
+/* > M: the (i-M)-th column of A is exactly zero */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --r__;
+ --c__;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*ldab < *kl + *ku + 1) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGBEQU", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ *rowcnd = 1.;
+ *colcnd = 1.;
+ *amax = 0.;
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+
+/* Compute row scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__[i__] = 0.;
+/* L10: */
+ }
+
+/* Find the maximum element in each row. */
+
+ kd = *ku + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j - *ku;
+/* Computing MIN */
+ i__4 = j + *kl;
+ i__3 = min(i__4,*m);
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ d__2 = r__[i__], d__3 = (d__1 = ab[kd + i__ - j + j * ab_dim1],
+ abs(d__1));
+ r__[i__] = max(d__2,d__3);
+/* L20: */
+ }
+/* L30: */
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = rcmax, d__2 = r__[i__];
+ rcmax = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = rcmin, d__2 = r__[i__];
+ rcmin = min(d__1,d__2);
+/* L40: */
+ }
+ *amax = rcmax;
+
+ if (rcmin == 0.) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (r__[i__] == 0.) {
+ *info = i__;
+ return 0;
+ }
+/* L50: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MIN */
+/* Computing MAX */
+ d__2 = r__[i__];
+ d__1 = max(d__2,smlnum);
+ r__[i__] = 1. / min(d__1,bignum);
+/* L60: */
+ }
+
+/* Compute ROWCND = min(R(I)) / max(R(I)) */
+
+ *rowcnd = max(rcmin,smlnum) / min(rcmax,bignum);
+ }
+
+/* Compute column scale factors */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j] = 0.;
+/* L70: */
+ }
+
+/* Find the maximum element in each column, */
+/* assuming the row scaling computed above. */
+
+ kd = *ku + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__3 = j - *ku;
+/* Computing MIN */
+ i__4 = j + *kl;
+ i__2 = min(i__4,*m);
+ for (i__ = max(i__3,1); i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = c__[j], d__3 = (d__1 = ab[kd + i__ - j + j * ab_dim1], abs(
+ d__1)) * r__[i__];
+ c__[j] = max(d__2,d__3);
+/* L80: */
+ }
+/* L90: */
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ d__1 = rcmin, d__2 = c__[j];
+ rcmin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = rcmax, d__2 = c__[j];
+ rcmax = max(d__1,d__2);
+/* L100: */
+ }
+
+ if (rcmin == 0.) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (c__[j] == 0.) {
+ *info = *m + j;
+ return 0;
+ }
+/* L110: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+/* Computing MAX */
+ d__2 = c__[j];
+ d__1 = max(d__2,smlnum);
+ c__[j] = 1. / min(d__1,bignum);
+/* L120: */
+ }
+
+/* Compute COLCND = min(C(J)) / max(C(J)) */
+
+ *colcnd = max(rcmin,smlnum) / min(rcmax,bignum);
+ }
+
+ return 0;
+
+/* End of DGBEQU */
+
+} /* dgbequ_ */
diff --git a/contrib/libs/clapack/dgbequb.c b/contrib/libs/clapack/dgbequb.c
new file mode 100644
index 0000000000..4a0f34b6ad
--- /dev/null
+++ b/contrib/libs/clapack/dgbequb.c
@@ -0,0 +1,347 @@
+/* dgbequb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dgbequb_(integer *m, integer *n, integer *kl, integer *
+ ku, doublereal *ab, integer *ldab, doublereal *r__, doublereal *c__,
+ doublereal *rowcnd, doublereal *colcnd, doublereal *amax, integer *
+ info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double log(doublereal), pow_di(doublereal *, integer *);
+
+ /* Local variables */
+ integer i__, j, kd;
+ doublereal radix, rcmin, rcmax;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum, logrdx, smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- Contributed by James Demmel, Deaglan Halligan, Yozo Hida and -- */
+/* -- Jason Riedy of Univ. of California Berkeley. -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley and NAG Ltd. -- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGBEQUB computes row and column scalings intended to equilibrate an */
+/* M-by-N matrix A and reduce its condition number. R returns the row */
+/* scale factors and C the column scale factors, chosen to try to make */
+/* the largest element in each row and column of the matrix B with */
+/* elements B(i,j)=R(i)*A(i,j)*C(j) have an absolute value of at most */
+/* the radix. */
+
+/* R(i) and C(j) are restricted to be a power of the radix between */
+/* SMLNUM = smallest safe number and BIGNUM = largest safe number. Use */
+/* of these scaling factors is not guaranteed to reduce the condition */
+/* number of A but works well in practice. */
+
+/* This routine differs from DGEEQU by restricting the scaling factors */
+/* to a power of the radix. Baring over- and underflow, scaling by */
+/* these factors introduces no additional rounding errors. However, the */
+/* scaled entries' magnitured are no longer approximately 1 but lie */
+/* between sqrt(radix) and 1/sqrt(radix). */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* AB (input) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* On entry, the matrix A in band storage, in rows 1 to KL+KU+1. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(KU+1+i-j,j) = A(i,j) for max(1,j-KU)<=i<=min(N,j+kl) */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array A. LDAB >= max(1,M). */
+
+/* R (output) DOUBLE PRECISION array, dimension (M) */
+/* If INFO = 0 or INFO > M, R contains the row scale factors */
+/* for A. */
+
+/* C (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, C contains the column scale factors for A. */
+
+/* ROWCND (output) DOUBLE PRECISION */
+/* If INFO = 0 or INFO > M, ROWCND contains the ratio of the */
+/* smallest R(i) to the largest R(i). If ROWCND >= 0.1 and */
+/* AMAX is neither too large nor too small, it is not worth */
+/* scaling by R. */
+
+/* COLCND (output) DOUBLE PRECISION */
+/* If INFO = 0, COLCND contains the ratio of the smallest */
+/* C(i) to the largest C(i). If COLCND >= 0.1, it is not */
+/* worth scaling by C. */
+
+/* AMAX (output) DOUBLE PRECISION */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= M: the i-th row of A is exactly zero */
+/* > M: the (i-M)-th column of A is exactly zero */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --r__;
+ --c__;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*ldab < *kl + *ku + 1) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGBEQUB", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ *rowcnd = 1.;
+ *colcnd = 1.;
+ *amax = 0.;
+ return 0;
+ }
+
+/* Get machine constants. Assume SMLNUM is a power of the radix. */
+
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+ radix = dlamch_("B");
+ logrdx = log(radix);
+
+/* Compute row scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__[i__] = 0.;
+/* L10: */
+ }
+
+/* Find the maximum element in each row. */
+
+ kd = *ku + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j - *ku;
+/* Computing MIN */
+ i__4 = j + *kl;
+ i__3 = min(i__4,*m);
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ d__2 = r__[i__], d__3 = (d__1 = ab[kd + i__ - j + j * ab_dim1],
+ abs(d__1));
+ r__[i__] = max(d__2,d__3);
+/* L20: */
+ }
+/* L30: */
+ }
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (r__[i__] > 0.) {
+ i__3 = (integer) (log(r__[i__]) / logrdx);
+ r__[i__] = pow_di(&radix, &i__3);
+ }
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = rcmax, d__2 = r__[i__];
+ rcmax = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = rcmin, d__2 = r__[i__];
+ rcmin = min(d__1,d__2);
+/* L40: */
+ }
+ *amax = rcmax;
+
+ if (rcmin == 0.) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (r__[i__] == 0.) {
+ *info = i__;
+ return 0;
+ }
+/* L50: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MIN */
+/* Computing MAX */
+ d__2 = r__[i__];
+ d__1 = max(d__2,smlnum);
+ r__[i__] = 1. / min(d__1,bignum);
+/* L60: */
+ }
+
+/* Compute ROWCND = min(R(I)) / max(R(I)). */
+
+ *rowcnd = max(rcmin,smlnum) / min(rcmax,bignum);
+ }
+
+/* Compute column scale factors. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j] = 0.;
+/* L70: */
+ }
+
+/* Find the maximum element in each column, */
+/* assuming the row scaling computed above. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__3 = j - *ku;
+/* Computing MIN */
+ i__4 = j + *kl;
+ i__2 = min(i__4,*m);
+ for (i__ = max(i__3,1); i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = c__[j], d__3 = (d__1 = ab[kd + i__ - j + j * ab_dim1], abs(
+ d__1)) * r__[i__];
+ c__[j] = max(d__2,d__3);
+/* L80: */
+ }
+ if (c__[j] > 0.) {
+ i__2 = (integer) (log(c__[j]) / logrdx);
+ c__[j] = pow_di(&radix, &i__2);
+ }
+/* L90: */
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ d__1 = rcmin, d__2 = c__[j];
+ rcmin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = rcmax, d__2 = c__[j];
+ rcmax = max(d__1,d__2);
+/* L100: */
+ }
+
+ if (rcmin == 0.) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (c__[j] == 0.) {
+ *info = *m + j;
+ return 0;
+ }
+/* L110: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+/* Computing MAX */
+ d__2 = c__[j];
+ d__1 = max(d__2,smlnum);
+ c__[j] = 1. / min(d__1,bignum);
+/* L120: */
+ }
+
+/* Compute COLCND = min(C(J)) / max(C(J)). */
+
+ *colcnd = max(rcmin,smlnum) / min(rcmax,bignum);
+ }
+
+ return 0;
+
+/* End of DGBEQUB */
+
+} /* dgbequb_ */
diff --git a/contrib/libs/clapack/dgbrfs.c b/contrib/libs/clapack/dgbrfs.c
new file mode 100644
index 0000000000..e95b2dcb2d
--- /dev/null
+++ b/contrib/libs/clapack/dgbrfs.c
@@ -0,0 +1,455 @@
+/* dgbrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b15 = -1.;
+static doublereal c_b17 = 1.;
+
+/* Subroutine */ int dgbrfs_(char *trans, integer *n, integer *kl, integer *
+ ku, integer *nrhs, doublereal *ab, integer *ldab, doublereal *afb,
+ integer *ldafb, integer *ipiv, doublereal *b, integer *ldb,
+ doublereal *x, integer *ldx, doublereal *ferr, doublereal *berr,
+ doublereal *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, afb_dim1, afb_offset, b_dim1, b_offset,
+ x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7;
+ doublereal d__1, d__2, d__3;
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal s;
+ integer kk;
+ doublereal xk;
+ integer nz;
+ doublereal eps;
+ integer kase;
+ doublereal safe1, safe2;
+ extern /* Subroutine */ int dgbmv_(char *, integer *, integer *, integer *
+, integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), daxpy_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *);
+ integer count;
+ extern /* Subroutine */ int dlacn2_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *), dgbtrs_(
+ char *, integer *, integer *, integer *, integer *, doublereal *,
+ integer *, integer *, doublereal *, integer *, integer *);
+ logical notran;
+ char transt[1];
+ doublereal lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call DLACN2 in place of DLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGBRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is banded, and provides */
+/* error bounds and backward error estimates for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose = Transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AB (input) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* The original band matrix A, stored in rows 1 to KL+KU+1. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(n,j+kl). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KL+KU+1. */
+
+/* AFB (input) DOUBLE PRECISION array, dimension (LDAFB,N) */
+/* Details of the LU factorization of the band matrix A, as */
+/* computed by DGBTRF. U is stored as an upper triangular band */
+/* matrix with KL+KU superdiagonals in rows 1 to KL+KU+1, and */
+/* the multipliers used during the factorization are stored in */
+/* rows KL+KU+2 to 2*KL+KU+1. */
+
+/* LDAFB (input) INTEGER */
+/* The leading dimension of the array AFB. LDAFB >= 2*KL*KU+1. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices from DGBTRF; for 1<=i<=N, row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) DOUBLE PRECISION array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by DGBTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ afb_dim1 = *ldafb;
+ afb_offset = 1 + afb_dim1;
+ afb -= afb_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "T") && ! lsame_(
+ trans, "C")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*ldab < *kl + *ku + 1) {
+ *info = -7;
+ } else if (*ldafb < (*kl << 1) + *ku + 1) {
+ *info = -9;
+ } else if (*ldb < max(1,*n)) {
+ *info = -12;
+ } else if (*ldx < max(1,*n)) {
+ *info = -14;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGBRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+/* Computing MIN */
+ i__1 = *kl + *ku + 2, i__2 = *n + 1;
+ nz = min(i__1,i__2);
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - op(A) * X, */
+/* where op(A) = A, A**T, or A**H, depending on TRANS. */
+
+ dcopy_(n, &b[j * b_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+ dgbmv_(trans, n, n, kl, ku, &c_b15, &ab[ab_offset], ldab, &x[j *
+ x_dim1 + 1], &c__1, &c_b17, &work[*n + 1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] = (d__1 = b[i__ + j * b_dim1], abs(d__1));
+/* L30: */
+ }
+
+/* Compute abs(op(A))*abs(X) + abs(B). */
+
+ if (notran) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ kk = *ku + 1 - k;
+ xk = (d__1 = x[k + j * x_dim1], abs(d__1));
+/* Computing MAX */
+ i__3 = 1, i__4 = k - *ku;
+/* Computing MIN */
+ i__6 = *n, i__7 = k + *kl;
+ i__5 = min(i__6,i__7);
+ for (i__ = max(i__3,i__4); i__ <= i__5; ++i__) {
+ work[i__] += (d__1 = ab[kk + i__ + k * ab_dim1], abs(d__1)
+ ) * xk;
+/* L40: */
+ }
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ kk = *ku + 1 - k;
+/* Computing MAX */
+ i__5 = 1, i__3 = k - *ku;
+/* Computing MIN */
+ i__6 = *n, i__7 = k + *kl;
+ i__4 = min(i__6,i__7);
+ for (i__ = max(i__5,i__3); i__ <= i__4; ++i__) {
+ s += (d__1 = ab[kk + i__ + k * ab_dim1], abs(d__1)) * (
+ d__2 = x[i__ + j * x_dim1], abs(d__2));
+/* L60: */
+ }
+ work[k] += s;
+/* L70: */
+ }
+ }
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+/* Computing MAX */
+ d__2 = s, d__3 = (d__1 = work[*n + i__], abs(d__1)) / work[
+ i__];
+ s = max(d__2,d__3);
+ } else {
+/* Computing MAX */
+ d__2 = s, d__3 = ((d__1 = work[*n + i__], abs(d__1)) + safe1)
+ / (work[i__] + safe1);
+ s = max(d__2,d__3);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2. <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ dgbtrs_(trans, n, kl, ku, &c__1, &afb[afb_offset], ldafb, &ipiv[1]
+, &work[*n + 1], n, info);
+ daxpy_(n, &c_b17, &work[*n + 1], &c__1, &x[j * x_dim1 + 1], &c__1)
+ ;
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(op(A)))* */
+/* ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(op(A)) is the inverse of op(A) */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(op(A))*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use DLACN2 to estimate the infinity-norm of the matrix */
+/* inv(op(A)) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+ work[i__] = (d__1 = work[*n + i__], abs(d__1)) + nz * eps *
+ work[i__];
+ } else {
+ work[i__] = (d__1 = work[*n + i__], abs(d__1)) + nz * eps *
+ work[i__] + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ dlacn2_(n, &work[(*n << 1) + 1], &work[*n + 1], &iwork[1], &ferr[j], &
+ kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(op(A)**T). */
+
+ dgbtrs_(transt, n, kl, ku, &c__1, &afb[afb_offset], ldafb, &
+ ipiv[1], &work[*n + 1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] *= work[i__];
+/* L110: */
+ }
+ } else {
+
+/* Multiply by inv(op(A))*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] *= work[i__];
+/* L120: */
+ }
+ dgbtrs_(trans, n, kl, ku, &c__1, &afb[afb_offset], ldafb, &
+ ipiv[1], &work[*n + 1], n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = lstres, d__3 = (d__1 = x[i__ + j * x_dim1], abs(d__1));
+ lstres = max(d__2,d__3);
+/* L130: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of DGBRFS */
+
+} /* dgbrfs_ */
diff --git a/contrib/libs/clapack/dgbsv.c b/contrib/libs/clapack/dgbsv.c
new file mode 100644
index 0000000000..97c05380eb
--- /dev/null
+++ b/contrib/libs/clapack/dgbsv.c
@@ -0,0 +1,176 @@
+/* dgbsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dgbsv_(integer *n, integer *kl, integer *ku, integer *
+ nrhs, doublereal *ab, integer *ldab, integer *ipiv, doublereal *b,
+ integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int dgbtrf_(integer *, integer *, integer *,
+ integer *, doublereal *, integer *, integer *, integer *),
+ xerbla_(char *, integer *), dgbtrs_(char *, integer *,
+ integer *, integer *, integer *, doublereal *, integer *, integer
+ *, doublereal *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGBSV computes the solution to a real system of linear equations */
+/* A * X = B, where A is a band matrix of order N with KL subdiagonals */
+/* and KU superdiagonals, and X and B are N-by-NRHS matrices. */
+
+/* The LU decomposition with partial pivoting and row interchanges is */
+/* used to factor A as A = L * U, where L is a product of permutation */
+/* and unit lower triangular matrices with KL subdiagonals, and U is */
+/* upper triangular with KL+KU superdiagonals. The factored form of A */
+/* is then used to solve the system of equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AB (input/output) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* On entry, the matrix A in band storage, in rows KL+1 to */
+/* 2*KL+KU+1; rows 1 to KL of the array need not be set. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(KL+KU+1+i-j,j) = A(i,j) for max(1,j-KU)<=i<=min(N,j+KL) */
+/* On exit, details of the factorization: U is stored as an */
+/* upper triangular band matrix with KL+KU superdiagonals in */
+/* rows 1 to KL+KU+1, and the multipliers used during the */
+/* factorization are stored in rows KL+KU+2 to 2*KL+KU+1. */
+/* See below for further details. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= 2*KL+KU+1. */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* The pivot indices that define the permutation matrix P; */
+/* row i of the matrix was interchanged with row IPIV(i). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, U(i,i) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, and the solution has not been computed. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* M = N = 6, KL = 2, KU = 1: */
+
+/* On entry: On exit: */
+
+/* * * * + + + * * * u14 u25 u36 */
+/* * * + + + + * * u13 u24 u35 u46 */
+/* * a12 a23 a34 a45 a56 * u12 u23 u34 u45 u56 */
+/* a11 a22 a33 a44 a55 a66 u11 u22 u33 u44 u55 u66 */
+/* a21 a32 a43 a54 a65 * m21 m32 m43 m54 m65 * */
+/* a31 a42 a53 a64 * * m31 m42 m53 m64 * * */
+
+/* Array elements marked * are not used by the routine; elements marked */
+/* + need not be set on entry, but are required by the routine to store */
+/* elements of U because of fill-in resulting from the row interchanges. */
+
+/* ===================================================================== */
+
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*kl < 0) {
+ *info = -2;
+ } else if (*ku < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldab < (*kl << 1) + *ku + 1) {
+ *info = -6;
+ } else if (*ldb < max(*n,1)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGBSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the LU factorization of the band matrix A. */
+
+ dgbtrf_(n, n, kl, ku, &ab[ab_offset], ldab, &ipiv[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ dgbtrs_("No transpose", n, kl, ku, nrhs, &ab[ab_offset], ldab, &ipiv[
+ 1], &b[b_offset], ldb, info);
+ }
+ return 0;
+
+/* End of DGBSV */
+
+} /* dgbsv_ */
diff --git a/contrib/libs/clapack/dgbsvx.c b/contrib/libs/clapack/dgbsvx.c
new file mode 100644
index 0000000000..7e414cc961
--- /dev/null
+++ b/contrib/libs/clapack/dgbsvx.c
@@ -0,0 +1,650 @@
+/* dgbsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dgbsvx_(char *fact, char *trans, integer *n, integer *kl,
+ integer *ku, integer *nrhs, doublereal *ab, integer *ldab,
+ doublereal *afb, integer *ldafb, integer *ipiv, char *equed,
+ doublereal *r__, doublereal *c__, doublereal *b, integer *ldb,
+ doublereal *x, integer *ldx, doublereal *rcond, doublereal *ferr,
+ doublereal *berr, doublereal *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, afb_dim1, afb_offset, b_dim1, b_offset,
+ x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3;
+
+ /* Local variables */
+ integer i__, j, j1, j2;
+ doublereal amax;
+ char norm[1];
+ extern logical lsame_(char *, char *);
+ doublereal rcmin, rcmax, anorm;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical equil;
+ extern doublereal dlangb_(char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *), dlamch_(char *);
+ extern /* Subroutine */ int dlaqgb_(integer *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, char *),
+ dgbcon_(char *, integer *, integer *, integer *, doublereal *,
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, integer *);
+ doublereal colcnd;
+ extern doublereal dlantb_(char *, char *, char *, integer *, integer *,
+ doublereal *, integer *, doublereal *);
+ extern /* Subroutine */ int dgbequ_(integer *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *), dgbrfs_(
+ char *, integer *, integer *, integer *, integer *, doublereal *,
+ integer *, doublereal *, integer *, integer *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *), dgbtrf_(integer *,
+ integer *, integer *, integer *, doublereal *, integer *, integer
+ *, integer *);
+ logical nofact;
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ doublereal bignum;
+ extern /* Subroutine */ int dgbtrs_(char *, integer *, integer *, integer
+ *, integer *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *);
+ integer infequ;
+ logical colequ;
+ doublereal rowcnd;
+ logical notran;
+ doublereal smlnum;
+ logical rowequ;
+ doublereal rpvgrw;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGBSVX uses the LU factorization to compute the solution to a real */
+/* system of linear equations A * X = B, A**T * X = B, or A**H * X = B, */
+/* where A is a band matrix of order N with KL subdiagonals and KU */
+/* superdiagonals, and X and B are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed by this subroutine: */
+
+/* 1. If FACT = 'E', real scaling factors are computed to equilibrate */
+/* the system: */
+/* TRANS = 'N': diag(R)*A*diag(C) *inv(diag(C))*X = diag(R)*B */
+/* TRANS = 'T': (diag(R)*A*diag(C))**T *inv(diag(R))*X = diag(C)*B */
+/* TRANS = 'C': (diag(R)*A*diag(C))**H *inv(diag(R))*X = diag(C)*B */
+/* Whether or not the system will be equilibrated depends on the */
+/* scaling of the matrix A, but if equilibration is used, A is */
+/* overwritten by diag(R)*A*diag(C) and B by diag(R)*B (if TRANS='N') */
+/* or diag(C)*B (if TRANS = 'T' or 'C'). */
+
+/* 2. If FACT = 'N' or 'E', the LU decomposition is used to factor the */
+/* matrix A (after equilibration if FACT = 'E') as */
+/* A = L * U, */
+/* where L is a product of permutation and unit lower triangular */
+/* matrices with KL subdiagonals, and U is upper triangular with */
+/* KL+KU superdiagonals. */
+
+/* 3. If some U(i,i)=0, so that U is exactly singular, then the routine */
+/* returns with INFO = i. Otherwise, the factored form of A is used */
+/* to estimate the condition number of the matrix A. If the */
+/* reciprocal of the condition number is less than machine precision, */
+/* INFO = N+1 is returned as a warning, but the routine still goes on */
+/* to solve for X and compute error bounds as described below. */
+
+/* 4. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 5. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* 6. If equilibration was used, the matrix X is premultiplied by */
+/* diag(C) (if TRANS = 'N') or diag(R) (if TRANS = 'T' or 'C') so */
+/* that it solves the original system before equilibration. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of the matrix A is */
+/* supplied on entry, and if not, whether the matrix A should be */
+/* equilibrated before it is factored. */
+/* = 'F': On entry, AFB and IPIV contain the factored form of */
+/* A. If EQUED is not 'N', the matrix A has been */
+/* equilibrated with scaling factors given by R and C. */
+/* AB, AFB, and IPIV are not modified. */
+/* = 'N': The matrix A will be copied to AFB and factored. */
+/* = 'E': The matrix A will be equilibrated if necessary, then */
+/* copied to AFB and factored. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations. */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Transpose) */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AB (input/output) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* On entry, the matrix A in band storage, in rows 1 to KL+KU+1. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(KU+1+i-j,j) = A(i,j) for max(1,j-KU)<=i<=min(N,j+kl) */
+
+/* If FACT = 'F' and EQUED is not 'N', then A must have been */
+/* equilibrated by the scaling factors in R and/or C. AB is not */
+/* modified if FACT = 'F' or 'N', or if FACT = 'E' and */
+/* EQUED = 'N' on exit. */
+
+/* On exit, if EQUED .ne. 'N', A is scaled as follows: */
+/* EQUED = 'R': A := diag(R) * A */
+/* EQUED = 'C': A := A * diag(C) */
+/* EQUED = 'B': A := diag(R) * A * diag(C). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KL+KU+1. */
+
+/* AFB (input or output) DOUBLE PRECISION array, dimension (LDAFB,N) */
+/* If FACT = 'F', then AFB is an input argument and on entry */
+/* contains details of the LU factorization of the band matrix */
+/* A, as computed by DGBTRF. U is stored as an upper triangular */
+/* band matrix with KL+KU superdiagonals in rows 1 to KL+KU+1, */
+/* and the multipliers used during the factorization are stored */
+/* in rows KL+KU+2 to 2*KL+KU+1. If EQUED .ne. 'N', then AFB is */
+/* the factored form of the equilibrated matrix A. */
+
+/* If FACT = 'N', then AFB is an output argument and on exit */
+/* returns details of the LU factorization of A. */
+
+/* If FACT = 'E', then AFB is an output argument and on exit */
+/* returns details of the LU factorization of the equilibrated */
+/* matrix A (see the description of AB for the form of the */
+/* equilibrated matrix). */
+
+/* LDAFB (input) INTEGER */
+/* The leading dimension of the array AFB. LDAFB >= 2*KL+KU+1. */
+
+/* IPIV (input or output) INTEGER array, dimension (N) */
+/* If FACT = 'F', then IPIV is an input argument and on entry */
+/* contains the pivot indices from the factorization A = L*U */
+/* as computed by DGBTRF; row i of the matrix was interchanged */
+/* with row IPIV(i). */
+
+/* If FACT = 'N', then IPIV is an output argument and on exit */
+/* contains the pivot indices from the factorization A = L*U */
+/* of the original matrix A. */
+
+/* If FACT = 'E', then IPIV is an output argument and on exit */
+/* contains the pivot indices from the factorization A = L*U */
+/* of the equilibrated matrix A. */
+
+/* EQUED (input or output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration (always true if FACT = 'N'). */
+/* = 'R': Row equilibration, i.e., A has been premultiplied by */
+/* diag(R). */
+/* = 'C': Column equilibration, i.e., A has been postmultiplied */
+/* by diag(C). */
+/* = 'B': Both row and column equilibration, i.e., A has been */
+/* replaced by diag(R) * A * diag(C). */
+/* EQUED is an input argument if FACT = 'F'; otherwise, it is an */
+/* output argument. */
+
+/* R (input or output) DOUBLE PRECISION array, dimension (N) */
+/* The row scale factors for A. If EQUED = 'R' or 'B', A is */
+/* multiplied on the left by diag(R); if EQUED = 'N' or 'C', R */
+/* is not accessed. R is an input argument if FACT = 'F'; */
+/* otherwise, R is an output argument. If FACT = 'F' and */
+/* EQUED = 'R' or 'B', each element of R must be positive. */
+
+/* C (input or output) DOUBLE PRECISION array, dimension (N) */
+/* The column scale factors for A. If EQUED = 'C' or 'B', A is */
+/* multiplied on the right by diag(C); if EQUED = 'N' or 'R', C */
+/* is not accessed. C is an input argument if FACT = 'F'; */
+/* otherwise, C is an output argument. If FACT = 'F' and */
+/* EQUED = 'C' or 'B', each element of C must be positive. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, */
+/* if EQUED = 'N', B is not modified; */
+/* if TRANS = 'N' and EQUED = 'R' or 'B', B is overwritten by */
+/* diag(R)*B; */
+/* if TRANS = 'T' or 'C' and EQUED = 'C' or 'B', B is */
+/* overwritten by diag(C)*B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) DOUBLE PRECISION array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X */
+/* to the original system of equations. Note that A and B are */
+/* modified on exit if EQUED .ne. 'N', and the solution to the */
+/* equilibrated system is inv(diag(C))*X if TRANS = 'N' and */
+/* EQUED = 'C' or 'B', or inv(diag(R))*X if TRANS = 'T' or 'C' */
+/* and EQUED = 'R' or 'B'. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A after equilibration (if done). If RCOND is less than the */
+/* machine precision (in particular, if RCOND = 0), the matrix */
+/* is singular to working precision. This condition is */
+/* indicated by a return code of INFO > 0. */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (3*N) */
+/* On exit, WORK(1) contains the reciprocal pivot growth */
+/* factor norm(A)/norm(U). The "max absolute element" norm is */
+/* used. If WORK(1) is much less than 1, then the stability */
+/* of the LU factorization of the (equilibrated) matrix A */
+/* could be poor. This also means that the solution X, condition */
+/* estimator RCOND, and forward error bound FERR could be */
+/* unreliable. If factorization fails with 0<INFO<=N, then */
+/* WORK(1) contains the reciprocal pivot growth factor for the */
+/* leading INFO columns of A. */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: U(i,i) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, so the solution and error bounds */
+/* could not be computed. RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ afb_dim1 = *ldafb;
+ afb_offset = 1 + afb_dim1;
+ afb -= afb_offset;
+ --ipiv;
+ --r__;
+ --c__;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ equil = lsame_(fact, "E");
+ notran = lsame_(trans, "N");
+ if (nofact || equil) {
+ *(unsigned char *)equed = 'N';
+ rowequ = FALSE_;
+ colequ = FALSE_;
+ } else {
+ rowequ = lsame_(equed, "R") || lsame_(equed,
+ "B");
+ colequ = lsame_(equed, "C") || lsame_(equed,
+ "B");
+ smlnum = dlamch_("Safe minimum");
+ bignum = 1. / smlnum;
+ }
+
+/* Test the input parameters. */
+
+ if (! nofact && ! equil && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*kl < 0) {
+ *info = -4;
+ } else if (*ku < 0) {
+ *info = -5;
+ } else if (*nrhs < 0) {
+ *info = -6;
+ } else if (*ldab < *kl + *ku + 1) {
+ *info = -8;
+ } else if (*ldafb < (*kl << 1) + *ku + 1) {
+ *info = -10;
+ } else if (lsame_(fact, "F") && ! (rowequ || colequ
+ || lsame_(equed, "N"))) {
+ *info = -12;
+ } else {
+ if (rowequ) {
+ rcmin = bignum;
+ rcmax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ d__1 = rcmin, d__2 = r__[j];
+ rcmin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = rcmax, d__2 = r__[j];
+ rcmax = max(d__1,d__2);
+/* L10: */
+ }
+ if (rcmin <= 0.) {
+ *info = -13;
+ } else if (*n > 0) {
+ rowcnd = max(rcmin,smlnum) / min(rcmax,bignum);
+ } else {
+ rowcnd = 1.;
+ }
+ }
+ if (colequ && *info == 0) {
+ rcmin = bignum;
+ rcmax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ d__1 = rcmin, d__2 = c__[j];
+ rcmin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = rcmax, d__2 = c__[j];
+ rcmax = max(d__1,d__2);
+/* L20: */
+ }
+ if (rcmin <= 0.) {
+ *info = -14;
+ } else if (*n > 0) {
+ colcnd = max(rcmin,smlnum) / min(rcmax,bignum);
+ } else {
+ colcnd = 1.;
+ }
+ }
+ if (*info == 0) {
+ if (*ldb < max(1,*n)) {
+ *info = -16;
+ } else if (*ldx < max(1,*n)) {
+ *info = -18;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGBSVX", &i__1);
+ return 0;
+ }
+
+ if (equil) {
+
+/* Compute row and column scalings to equilibrate the matrix A. */
+
+ dgbequ_(n, n, kl, ku, &ab[ab_offset], ldab, &r__[1], &c__[1], &rowcnd,
+ &colcnd, &amax, &infequ);
+ if (infequ == 0) {
+
+/* Equilibrate the matrix. */
+
+ dlaqgb_(n, n, kl, ku, &ab[ab_offset], ldab, &r__[1], &c__[1], &
+ rowcnd, &colcnd, &amax, equed);
+ rowequ = lsame_(equed, "R") || lsame_(equed,
+ "B");
+ colequ = lsame_(equed, "C") || lsame_(equed,
+ "B");
+ }
+ }
+
+/* Scale the right hand side. */
+
+ if (notran) {
+ if (rowequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = r__[i__] * b[i__ + j * b_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else if (colequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = c__[i__] * b[i__ + j * b_dim1];
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+ if (nofact || equil) {
+
+/* Compute the LU factorization of the band matrix A. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j - *ku;
+ j1 = max(i__2,1);
+/* Computing MIN */
+ i__2 = j + *kl;
+ j2 = min(i__2,*n);
+ i__2 = j2 - j1 + 1;
+ dcopy_(&i__2, &ab[*ku + 1 - j + j1 + j * ab_dim1], &c__1, &afb[*
+ kl + *ku + 1 - j + j1 + j * afb_dim1], &c__1);
+/* L70: */
+ }
+
+ dgbtrf_(n, n, kl, ku, &afb[afb_offset], ldafb, &ipiv[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+
+/* Compute the reciprocal pivot growth factor of the */
+/* leading rank-deficient INFO columns of A. */
+
+ anorm = 0.;
+ i__1 = *info;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = *ku + 2 - j;
+/* Computing MIN */
+ i__4 = *n + *ku + 1 - j, i__5 = *kl + *ku + 1;
+ i__3 = min(i__4,i__5);
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ d__2 = anorm, d__3 = (d__1 = ab[i__ + j * ab_dim1], abs(
+ d__1));
+ anorm = max(d__2,d__3);
+/* L80: */
+ }
+/* L90: */
+ }
+/* Computing MIN */
+ i__3 = *info - 1, i__2 = *kl + *ku;
+ i__1 = min(i__3,i__2);
+/* Computing MAX */
+ i__4 = 1, i__5 = *kl + *ku + 2 - *info;
+ rpvgrw = dlantb_("M", "U", "N", info, &i__1, &afb[max(i__4, i__5)
+ + afb_dim1], ldafb, &work[1]);
+ if (rpvgrw == 0.) {
+ rpvgrw = 1.;
+ } else {
+ rpvgrw = anorm / rpvgrw;
+ }
+ work[1] = rpvgrw;
+ *rcond = 0.;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A and the */
+/* reciprocal pivot growth factor RPVGRW. */
+
+ if (notran) {
+ *(unsigned char *)norm = '1';
+ } else {
+ *(unsigned char *)norm = 'I';
+ }
+ anorm = dlangb_(norm, n, kl, ku, &ab[ab_offset], ldab, &work[1]);
+ i__1 = *kl + *ku;
+ rpvgrw = dlantb_("M", "U", "N", n, &i__1, &afb[afb_offset], ldafb, &work[
+ 1]);
+ if (rpvgrw == 0.) {
+ rpvgrw = 1.;
+ } else {
+ rpvgrw = dlangb_("M", n, kl, ku, &ab[ab_offset], ldab, &work[1]) / rpvgrw;
+ }
+
+/* Compute the reciprocal of the condition number of A. */
+
+ dgbcon_(norm, n, kl, ku, &afb[afb_offset], ldafb, &ipiv[1], &anorm, rcond,
+ &work[1], &iwork[1], info);
+
+/* Compute the solution matrix X. */
+
+ dlacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ dgbtrs_(trans, n, kl, ku, nrhs, &afb[afb_offset], ldafb, &ipiv[1], &x[
+ x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solution and */
+/* compute error bounds and backward error estimates for it. */
+
+ dgbrfs_(trans, n, kl, ku, nrhs, &ab[ab_offset], ldab, &afb[afb_offset],
+ ldafb, &ipiv[1], &b[b_offset], ldb, &x[x_offset], ldx, &ferr[1], &
+ berr[1], &work[1], &iwork[1], info);
+
+/* Transform the solution matrix X to a solution of the original */
+/* system. */
+
+ if (notran) {
+ if (colequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ x[i__ + j * x_dim1] = c__[i__] * x[i__ + j * x_dim1];
+/* L100: */
+ }
+/* L110: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= colcnd;
+/* L120: */
+ }
+ }
+ } else if (rowequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ x[i__ + j * x_dim1] = r__[i__] * x[i__ + j * x_dim1];
+/* L130: */
+ }
+/* L140: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= rowcnd;
+/* L150: */
+ }
+ }
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < dlamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ work[1] = rpvgrw;
+ return 0;
+
+/* End of DGBSVX */
+
+} /* dgbsvx_ */
diff --git a/contrib/libs/clapack/dgbtf2.c b/contrib/libs/clapack/dgbtf2.c
new file mode 100644
index 0000000000..400eafb098
--- /dev/null
+++ b/contrib/libs/clapack/dgbtf2.c
@@ -0,0 +1,262 @@
+/* dgbtf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b9 = -1.;
+
+/* Subroutine */ int dgbtf2_(integer *m, integer *n, integer *kl, integer *ku,
+ doublereal *ab, integer *ldab, integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__, j, km, jp, ju, kv;
+ extern /* Subroutine */ int dger_(integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *), dscal_(integer *, doublereal *, doublereal *, integer
+ *), dswap_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGBTF2 computes an LU factorization of a real m-by-n band matrix A */
+/* using partial pivoting with row interchanges. */
+
+/* This is the unblocked version of the algorithm, calling Level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* AB (input/output) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* On entry, the matrix A in band storage, in rows KL+1 to */
+/* 2*KL+KU+1; rows 1 to KL of the array need not be set. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(kl+ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(m,j+kl) */
+
+/* On exit, details of the factorization: U is stored as an */
+/* upper triangular band matrix with KL+KU superdiagonals in */
+/* rows 1 to KL+KU+1, and the multipliers used during the */
+/* factorization are stored in rows KL+KU+2 to 2*KL+KU+1. */
+/* See below for further details. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= 2*KL+KU+1. */
+
+/* IPIV (output) INTEGER array, dimension (min(M,N)) */
+/* The pivot indices; for 1 <= i <= min(M,N), row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = +i, U(i,i) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, and division by zero will occur if it is used */
+/* to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* M = N = 6, KL = 2, KU = 1: */
+
+/* On entry: On exit: */
+
+/* * * * + + + * * * u14 u25 u36 */
+/* * * + + + + * * u13 u24 u35 u46 */
+/* * a12 a23 a34 a45 a56 * u12 u23 u34 u45 u56 */
+/* a11 a22 a33 a44 a55 a66 u11 u22 u33 u44 u55 u66 */
+/* a21 a32 a43 a54 a65 * m21 m32 m43 m54 m65 * */
+/* a31 a42 a53 a64 * * m31 m42 m53 m64 * * */
+
+/* Array elements marked * are not used by the routine; elements marked */
+/* + need not be set on entry, but are required by the routine to store */
+/* elements of U, because of fill-in resulting from the row */
+/* interchanges. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* KV is the number of superdiagonals in the factor U, allowing for */
+/* fill-in. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --ipiv;
+
+ /* Function Body */
+ kv = *ku + *kl;
+
+/* Test the input parameters. */
+
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*ldab < *kl + kv + 1) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGBTF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Gaussian elimination with partial pivoting */
+
+/* Set fill-in elements in columns KU+2 to KV to zero. */
+
+ i__1 = min(kv,*n);
+ for (j = *ku + 2; j <= i__1; ++j) {
+ i__2 = *kl;
+ for (i__ = kv - j + 2; i__ <= i__2; ++i__) {
+ ab[i__ + j * ab_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* JU is the index of the last column affected by the current stage */
+/* of the factorization. */
+
+ ju = 1;
+
+ i__1 = min(*m,*n);
+ for (j = 1; j <= i__1; ++j) {
+
+/* Set fill-in elements in column J+KV to zero. */
+
+ if (j + kv <= *n) {
+ i__2 = *kl;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ ab[i__ + (j + kv) * ab_dim1] = 0.;
+/* L30: */
+ }
+ }
+
+/* Find pivot and test for singularity. KM is the number of */
+/* subdiagonal elements in the current column. */
+
+/* Computing MIN */
+ i__2 = *kl, i__3 = *m - j;
+ km = min(i__2,i__3);
+ i__2 = km + 1;
+ jp = idamax_(&i__2, &ab[kv + 1 + j * ab_dim1], &c__1);
+ ipiv[j] = jp + j - 1;
+ if (ab[kv + jp + j * ab_dim1] != 0.) {
+/* Computing MAX */
+/* Computing MIN */
+ i__4 = j + *ku + jp - 1;
+ i__2 = ju, i__3 = min(i__4,*n);
+ ju = max(i__2,i__3);
+
+/* Apply interchange to columns J to JU. */
+
+ if (jp != 1) {
+ i__2 = ju - j + 1;
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ dswap_(&i__2, &ab[kv + jp + j * ab_dim1], &i__3, &ab[kv + 1 +
+ j * ab_dim1], &i__4);
+ }
+
+ if (km > 0) {
+
+/* Compute multipliers. */
+
+ d__1 = 1. / ab[kv + 1 + j * ab_dim1];
+ dscal_(&km, &d__1, &ab[kv + 2 + j * ab_dim1], &c__1);
+
+/* Update trailing submatrix within the band. */
+
+ if (ju > j) {
+ i__2 = ju - j;
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ dger_(&km, &i__2, &c_b9, &ab[kv + 2 + j * ab_dim1], &c__1,
+ &ab[kv + (j + 1) * ab_dim1], &i__3, &ab[kv + 1 +
+ (j + 1) * ab_dim1], &i__4);
+ }
+ }
+ } else {
+
+/* If pivot is zero, set INFO to the index of the pivot */
+/* unless a zero pivot has already been found. */
+
+ if (*info == 0) {
+ *info = j;
+ }
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of DGBTF2 */
+
+} /* dgbtf2_ */
diff --git a/contrib/libs/clapack/dgbtrf.c b/contrib/libs/clapack/dgbtrf.c
new file mode 100644
index 0000000000..7b90c9bc8e
--- /dev/null
+++ b/contrib/libs/clapack/dgbtrf.c
@@ -0,0 +1,588 @@
+/* dgbtrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__65 = 65;
+static doublereal c_b18 = -1.;
+static doublereal c_b31 = 1.;
+
+/* Subroutine */ int dgbtrf_(integer *m, integer *n, integer *kl, integer *ku,
+ doublereal *ab, integer *ldab, integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__, j, i2, i3, j2, j3, k2, jb, nb, ii, jj, jm, ip, jp, km, ju,
+ kv, nw;
+ extern /* Subroutine */ int dger_(integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ doublereal temp;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dgemm_(char *, char *, integer *, integer *, integer *
+, doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), dcopy_(
+ integer *, doublereal *, integer *, doublereal *, integer *),
+ dswap_(integer *, doublereal *, integer *, doublereal *, integer *
+);
+ doublereal work13[4160] /* was [65][64] */, work31[4160] /*
+ was [65][64] */;
+ extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), dgbtf2_(
+ integer *, integer *, integer *, integer *, doublereal *, integer
+ *, integer *, integer *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int dlaswp_(integer *, doublereal *, integer *,
+ integer *, integer *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGBTRF computes an LU factorization of a real m-by-n band matrix A */
+/* using partial pivoting with row interchanges. */
+
+/* This is the blocked version of the algorithm, calling Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* AB (input/output) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* On entry, the matrix A in band storage, in rows KL+1 to */
+/* 2*KL+KU+1; rows 1 to KL of the array need not be set. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(kl+ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(m,j+kl) */
+
+/* On exit, details of the factorization: U is stored as an */
+/* upper triangular band matrix with KL+KU superdiagonals in */
+/* rows 1 to KL+KU+1, and the multipliers used during the */
+/* factorization are stored in rows KL+KU+2 to 2*KL+KU+1. */
+/* See below for further details. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= 2*KL+KU+1. */
+
+/* IPIV (output) INTEGER array, dimension (min(M,N)) */
+/* The pivot indices; for 1 <= i <= min(M,N), row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = +i, U(i,i) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, and division by zero will occur if it is used */
+/* to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* M = N = 6, KL = 2, KU = 1: */
+
+/* On entry: On exit: */
+
+/* * * * + + + * * * u14 u25 u36 */
+/* * * + + + + * * u13 u24 u35 u46 */
+/* * a12 a23 a34 a45 a56 * u12 u23 u34 u45 u56 */
+/* a11 a22 a33 a44 a55 a66 u11 u22 u33 u44 u55 u66 */
+/* a21 a32 a43 a54 a65 * m21 m32 m43 m54 m65 * */
+/* a31 a42 a53 a64 * * m31 m42 m53 m64 * * */
+
+/* Array elements marked * are not used by the routine; elements marked */
+/* + need not be set on entry, but are required by the routine to store */
+/* elements of U because of fill-in resulting from the row interchanges. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* KV is the number of superdiagonals in the factor U, allowing for */
+/* fill-in */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --ipiv;
+
+ /* Function Body */
+ kv = *ku + *kl;
+
+/* Test the input parameters. */
+
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*ldab < *kl + kv + 1) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGBTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment */
+
+ nb = ilaenv_(&c__1, "DGBTRF", " ", m, n, kl, ku);
+
+/* The block size must not exceed the limit set by the size of the */
+/* local arrays WORK13 and WORK31. */
+
+ nb = min(nb,64);
+
+ if (nb <= 1 || nb > *kl) {
+
+/* Use unblocked code */
+
+ dgbtf2_(m, n, kl, ku, &ab[ab_offset], ldab, &ipiv[1], info);
+ } else {
+
+/* Use blocked code */
+
+/* Zero the superdiagonal elements of the work array WORK13 */
+
+ i__1 = nb;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work13[i__ + j * 65 - 66] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* Zero the subdiagonal elements of the work array WORK31 */
+
+ i__1 = nb;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = nb;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ work31[i__ + j * 65 - 66] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Gaussian elimination with partial pivoting */
+
+/* Set fill-in elements in columns KU+2 to KV to zero */
+
+ i__1 = min(kv,*n);
+ for (j = *ku + 2; j <= i__1; ++j) {
+ i__2 = *kl;
+ for (i__ = kv - j + 2; i__ <= i__2; ++i__) {
+ ab[i__ + j * ab_dim1] = 0.;
+/* L50: */
+ }
+/* L60: */
+ }
+
+/* JU is the index of the last column affected by the current */
+/* stage of the factorization */
+
+ ju = 1;
+
+ i__1 = min(*m,*n);
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = min(*m,*n) - j + 1;
+ jb = min(i__3,i__4);
+
+/* The active part of the matrix is partitioned */
+
+/* A11 A12 A13 */
+/* A21 A22 A23 */
+/* A31 A32 A33 */
+
+/* Here A11, A21 and A31 denote the current block of JB columns */
+/* which is about to be factorized. The number of rows in the */
+/* partitioning are JB, I2, I3 respectively, and the numbers */
+/* of columns are JB, J2, J3. The superdiagonal elements of A13 */
+/* and the subdiagonal elements of A31 lie outside the band. */
+
+/* Computing MIN */
+ i__3 = *kl - jb, i__4 = *m - j - jb + 1;
+ i2 = min(i__3,i__4);
+/* Computing MIN */
+ i__3 = jb, i__4 = *m - j - *kl + 1;
+ i3 = min(i__3,i__4);
+
+/* J2 and J3 are computed after JU has been updated. */
+
+/* Factorize the current block of JB columns */
+
+ i__3 = j + jb - 1;
+ for (jj = j; jj <= i__3; ++jj) {
+
+/* Set fill-in elements in column JJ+KV to zero */
+
+ if (jj + kv <= *n) {
+ i__4 = *kl;
+ for (i__ = 1; i__ <= i__4; ++i__) {
+ ab[i__ + (jj + kv) * ab_dim1] = 0.;
+/* L70: */
+ }
+ }
+
+/* Find pivot and test for singularity. KM is the number of */
+/* subdiagonal elements in the current column. */
+
+/* Computing MIN */
+ i__4 = *kl, i__5 = *m - jj;
+ km = min(i__4,i__5);
+ i__4 = km + 1;
+ jp = idamax_(&i__4, &ab[kv + 1 + jj * ab_dim1], &c__1);
+ ipiv[jj] = jp + jj - j;
+ if (ab[kv + jp + jj * ab_dim1] != 0.) {
+/* Computing MAX */
+/* Computing MIN */
+ i__6 = jj + *ku + jp - 1;
+ i__4 = ju, i__5 = min(i__6,*n);
+ ju = max(i__4,i__5);
+ if (jp != 1) {
+
+/* Apply interchange to columns J to J+JB-1 */
+
+ if (jp + jj - 1 < j + *kl) {
+
+ i__4 = *ldab - 1;
+ i__5 = *ldab - 1;
+ dswap_(&jb, &ab[kv + 1 + jj - j + j * ab_dim1], &
+ i__4, &ab[kv + jp + jj - j + j * ab_dim1],
+ &i__5);
+ } else {
+
+/* The interchange affects columns J to JJ-1 of A31 */
+/* which are stored in the work array WORK31 */
+
+ i__4 = jj - j;
+ i__5 = *ldab - 1;
+ dswap_(&i__4, &ab[kv + 1 + jj - j + j * ab_dim1],
+ &i__5, &work31[jp + jj - j - *kl - 1], &
+ c__65);
+ i__4 = j + jb - jj;
+ i__5 = *ldab - 1;
+ i__6 = *ldab - 1;
+ dswap_(&i__4, &ab[kv + 1 + jj * ab_dim1], &i__5, &
+ ab[kv + jp + jj * ab_dim1], &i__6);
+ }
+ }
+
+/* Compute multipliers */
+
+ d__1 = 1. / ab[kv + 1 + jj * ab_dim1];
+ dscal_(&km, &d__1, &ab[kv + 2 + jj * ab_dim1], &c__1);
+
+/* Update trailing submatrix within the band and within */
+/* the current block. JM is the index of the last column */
+/* which needs to be updated. */
+
+/* Computing MIN */
+ i__4 = ju, i__5 = j + jb - 1;
+ jm = min(i__4,i__5);
+ if (jm > jj) {
+ i__4 = jm - jj;
+ i__5 = *ldab - 1;
+ i__6 = *ldab - 1;
+ dger_(&km, &i__4, &c_b18, &ab[kv + 2 + jj * ab_dim1],
+ &c__1, &ab[kv + (jj + 1) * ab_dim1], &i__5, &
+ ab[kv + 1 + (jj + 1) * ab_dim1], &i__6);
+ }
+ } else {
+
+/* If pivot is zero, set INFO to the index of the pivot */
+/* unless a zero pivot has already been found. */
+
+ if (*info == 0) {
+ *info = jj;
+ }
+ }
+
+/* Copy current column of A31 into the work array WORK31 */
+
+/* Computing MIN */
+ i__4 = jj - j + 1;
+ nw = min(i__4,i3);
+ if (nw > 0) {
+ dcopy_(&nw, &ab[kv + *kl + 1 - jj + j + jj * ab_dim1], &
+ c__1, &work31[(jj - j + 1) * 65 - 65], &c__1);
+ }
+/* L80: */
+ }
+ if (j + jb <= *n) {
+
+/* Apply the row interchanges to the other blocks. */
+
+/* Computing MIN */
+ i__3 = ju - j + 1;
+ j2 = min(i__3,kv) - jb;
+/* Computing MAX */
+ i__3 = 0, i__4 = ju - j - kv + 1;
+ j3 = max(i__3,i__4);
+
+/* Use DLASWP to apply the row interchanges to A12, A22, and */
+/* A32. */
+
+ i__3 = *ldab - 1;
+ dlaswp_(&j2, &ab[kv + 1 - jb + (j + jb) * ab_dim1], &i__3, &
+ c__1, &jb, &ipiv[j], &c__1);
+
+/* Adjust the pivot indices. */
+
+ i__3 = j + jb - 1;
+ for (i__ = j; i__ <= i__3; ++i__) {
+ ipiv[i__] = ipiv[i__] + j - 1;
+/* L90: */
+ }
+
+/* Apply the row interchanges to A13, A23, and A33 */
+/* columnwise. */
+
+ k2 = j - 1 + jb + j2;
+ i__3 = j3;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ jj = k2 + i__;
+ i__4 = j + jb - 1;
+ for (ii = j + i__ - 1; ii <= i__4; ++ii) {
+ ip = ipiv[ii];
+ if (ip != ii) {
+ temp = ab[kv + 1 + ii - jj + jj * ab_dim1];
+ ab[kv + 1 + ii - jj + jj * ab_dim1] = ab[kv + 1 +
+ ip - jj + jj * ab_dim1];
+ ab[kv + 1 + ip - jj + jj * ab_dim1] = temp;
+ }
+/* L100: */
+ }
+/* L110: */
+ }
+
+/* Update the relevant part of the trailing submatrix */
+
+ if (j2 > 0) {
+
+/* Update A12 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ dtrsm_("Left", "Lower", "No transpose", "Unit", &jb, &j2,
+ &c_b31, &ab[kv + 1 + j * ab_dim1], &i__3, &ab[kv
+ + 1 - jb + (j + jb) * ab_dim1], &i__4);
+
+ if (i2 > 0) {
+
+/* Update A22 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ i__5 = *ldab - 1;
+ dgemm_("No transpose", "No transpose", &i2, &j2, &jb,
+ &c_b18, &ab[kv + 1 + jb + j * ab_dim1], &i__3,
+ &ab[kv + 1 - jb + (j + jb) * ab_dim1], &i__4,
+ &c_b31, &ab[kv + 1 + (j + jb) * ab_dim1], &
+ i__5);
+ }
+
+ if (i3 > 0) {
+
+/* Update A32 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ dgemm_("No transpose", "No transpose", &i3, &j2, &jb,
+ &c_b18, work31, &c__65, &ab[kv + 1 - jb + (j
+ + jb) * ab_dim1], &i__3, &c_b31, &ab[kv + *kl
+ + 1 - jb + (j + jb) * ab_dim1], &i__4);
+ }
+ }
+
+ if (j3 > 0) {
+
+/* Copy the lower triangle of A13 into the work array */
+/* WORK13 */
+
+ i__3 = j3;
+ for (jj = 1; jj <= i__3; ++jj) {
+ i__4 = jb;
+ for (ii = jj; ii <= i__4; ++ii) {
+ work13[ii + jj * 65 - 66] = ab[ii - jj + 1 + (jj
+ + j + kv - 1) * ab_dim1];
+/* L120: */
+ }
+/* L130: */
+ }
+
+/* Update A13 in the work array */
+
+ i__3 = *ldab - 1;
+ dtrsm_("Left", "Lower", "No transpose", "Unit", &jb, &j3,
+ &c_b31, &ab[kv + 1 + j * ab_dim1], &i__3, work13,
+ &c__65);
+
+ if (i2 > 0) {
+
+/* Update A23 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ dgemm_("No transpose", "No transpose", &i2, &j3, &jb,
+ &c_b18, &ab[kv + 1 + jb + j * ab_dim1], &i__3,
+ work13, &c__65, &c_b31, &ab[jb + 1 + (j + kv)
+ * ab_dim1], &i__4);
+ }
+
+ if (i3 > 0) {
+
+/* Update A33 */
+
+ i__3 = *ldab - 1;
+ dgemm_("No transpose", "No transpose", &i3, &j3, &jb,
+ &c_b18, work31, &c__65, work13, &c__65, &
+ c_b31, &ab[*kl + 1 + (j + kv) * ab_dim1], &
+ i__3);
+ }
+
+/* Copy the lower triangle of A13 back into place */
+
+ i__3 = j3;
+ for (jj = 1; jj <= i__3; ++jj) {
+ i__4 = jb;
+ for (ii = jj; ii <= i__4; ++ii) {
+ ab[ii - jj + 1 + (jj + j + kv - 1) * ab_dim1] =
+ work13[ii + jj * 65 - 66];
+/* L140: */
+ }
+/* L150: */
+ }
+ }
+ } else {
+
+/* Adjust the pivot indices. */
+
+ i__3 = j + jb - 1;
+ for (i__ = j; i__ <= i__3; ++i__) {
+ ipiv[i__] = ipiv[i__] + j - 1;
+/* L160: */
+ }
+ }
+
+/* Partially undo the interchanges in the current block to */
+/* restore the upper triangular form of A31 and copy the upper */
+/* triangle of A31 back into place */
+
+ i__3 = j;
+ for (jj = j + jb - 1; jj >= i__3; --jj) {
+ jp = ipiv[jj] - jj + 1;
+ if (jp != 1) {
+
+/* Apply interchange to columns J to JJ-1 */
+
+ if (jp + jj - 1 < j + *kl) {
+
+/* The interchange does not affect A31 */
+
+ i__4 = jj - j;
+ i__5 = *ldab - 1;
+ i__6 = *ldab - 1;
+ dswap_(&i__4, &ab[kv + 1 + jj - j + j * ab_dim1], &
+ i__5, &ab[kv + jp + jj - j + j * ab_dim1], &
+ i__6);
+ } else {
+
+/* The interchange does affect A31 */
+
+ i__4 = jj - j;
+ i__5 = *ldab - 1;
+ dswap_(&i__4, &ab[kv + 1 + jj - j + j * ab_dim1], &
+ i__5, &work31[jp + jj - j - *kl - 1], &c__65);
+ }
+ }
+
+/* Copy the current column of A31 back into place */
+
+/* Computing MIN */
+ i__4 = i3, i__5 = jj - j + 1;
+ nw = min(i__4,i__5);
+ if (nw > 0) {
+ dcopy_(&nw, &work31[(jj - j + 1) * 65 - 65], &c__1, &ab[
+ kv + *kl + 1 - jj + j + jj * ab_dim1], &c__1);
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+
+ return 0;
+
+/* End of DGBTRF */
+
+} /* dgbtrf_ */
diff --git a/contrib/libs/clapack/dgbtrs.c b/contrib/libs/clapack/dgbtrs.c
new file mode 100644
index 0000000000..83d8460df1
--- /dev/null
+++ b/contrib/libs/clapack/dgbtrs.c
@@ -0,0 +1,244 @@
+/* dgbtrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b7 = -1.;
+static integer c__1 = 1;
+static doublereal c_b23 = 1.;
+
+/* Subroutine */ int dgbtrs_(char *trans, integer *n, integer *kl, integer *
+ ku, integer *nrhs, doublereal *ab, integer *ldab, integer *ipiv,
+ doublereal *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, l, kd, lm;
+ extern /* Subroutine */ int dger_(integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), dswap_(integer *,
+ doublereal *, integer *, doublereal *, integer *), dtbsv_(char *,
+ char *, char *, integer *, integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical lnoti;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGBTRS solves a system of linear equations */
+/* A * X = B or A' * X = B */
+/* with a general band matrix A using the LU factorization computed */
+/* by DGBTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations. */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A'* X = B (Transpose) */
+/* = 'C': A'* X = B (Conjugate transpose = Transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AB (input) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* Details of the LU factorization of the band matrix A, as */
+/* computed by DGBTRF. U is stored as an upper triangular band */
+/* matrix with KL+KU superdiagonals in rows 1 to KL+KU+1, and */
+/* the multipliers used during the factorization are stored in */
+/* rows KL+KU+2 to 2*KL+KU+1. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= 2*KL+KU+1. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= N, row i of the matrix was */
+/* interchanged with row IPIV(i). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "T") && ! lsame_(
+ trans, "C")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*ldab < (*kl << 1) + *ku + 1) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGBTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ kd = *ku + *kl + 1;
+ lnoti = *kl > 0;
+
+ if (notran) {
+
+/* Solve A*X = B. */
+
+/* Solve L*X = B, overwriting B with X. */
+
+/* L is represented as a product of permutations and unit lower */
+/* triangular matrices L = P(1) * L(1) * ... * P(n-1) * L(n-1), */
+/* where each transformation L(i) is a rank-one modification of */
+/* the identity matrix. */
+
+ if (lnoti) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = *kl, i__3 = *n - j;
+ lm = min(i__2,i__3);
+ l = ipiv[j];
+ if (l != j) {
+ dswap_(nrhs, &b[l + b_dim1], ldb, &b[j + b_dim1], ldb);
+ }
+ dger_(&lm, nrhs, &c_b7, &ab[kd + 1 + j * ab_dim1], &c__1, &b[
+ j + b_dim1], ldb, &b[j + 1 + b_dim1], ldb);
+/* L10: */
+ }
+ }
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Solve U*X = B, overwriting B with X. */
+
+ i__2 = *kl + *ku;
+ dtbsv_("Upper", "No transpose", "Non-unit", n, &i__2, &ab[
+ ab_offset], ldab, &b[i__ * b_dim1 + 1], &c__1);
+/* L20: */
+ }
+
+ } else {
+
+/* Solve A'*X = B. */
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Solve U'*X = B, overwriting B with X. */
+
+ i__2 = *kl + *ku;
+ dtbsv_("Upper", "Transpose", "Non-unit", n, &i__2, &ab[ab_offset],
+ ldab, &b[i__ * b_dim1 + 1], &c__1);
+/* L30: */
+ }
+
+/* Solve L'*X = B, overwriting B with X. */
+
+ if (lnoti) {
+ for (j = *n - 1; j >= 1; --j) {
+/* Computing MIN */
+ i__1 = *kl, i__2 = *n - j;
+ lm = min(i__1,i__2);
+ dgemv_("Transpose", &lm, nrhs, &c_b7, &b[j + 1 + b_dim1], ldb,
+ &ab[kd + 1 + j * ab_dim1], &c__1, &c_b23, &b[j +
+ b_dim1], ldb);
+ l = ipiv[j];
+ if (l != j) {
+ dswap_(nrhs, &b[l + b_dim1], ldb, &b[j + b_dim1], ldb);
+ }
+/* L40: */
+ }
+ }
+ }
+ return 0;
+
+/* End of DGBTRS */
+
+} /* dgbtrs_ */
diff --git a/contrib/libs/clapack/dgebak.c b/contrib/libs/clapack/dgebak.c
new file mode 100644
index 0000000000..db1af91420
--- /dev/null
+++ b/contrib/libs/clapack/dgebak.c
@@ -0,0 +1,237 @@
+/* dgebak.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dgebak_(char *job, char *side, integer *n, integer *ilo,
+ integer *ihi, doublereal *scale, integer *m, doublereal *v, integer *
+ ldv, integer *info)
+{
+ /* System generated locals */
+ integer v_dim1, v_offset, i__1;
+
+ /* Local variables */
+ integer i__, k;
+ doublereal s;
+ integer ii;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical leftv;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical rightv;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGEBAK forms the right or left eigenvectors of a real general matrix */
+/* by backward transformation on the computed eigenvectors of the */
+/* balanced matrix output by DGEBAL. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies the type of backward transformation required: */
+/* = 'N', do nothing, return immediately; */
+/* = 'P', do backward transformation for permutation only; */
+/* = 'S', do backward transformation for scaling only; */
+/* = 'B', do backward transformations for both permutation and */
+/* scaling. */
+/* JOB must be the same as the argument JOB supplied to DGEBAL. */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'R': V contains right eigenvectors; */
+/* = 'L': V contains left eigenvectors. */
+
+/* N (input) INTEGER */
+/* The number of rows of the matrix V. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* The integers ILO and IHI determined by DGEBAL. */
+/* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */
+
+/* SCALE (input) DOUBLE PRECISION array, dimension (N) */
+/* Details of the permutation and scaling factors, as returned */
+/* by DGEBAL. */
+
+/* M (input) INTEGER */
+/* The number of columns of the matrix V. M >= 0. */
+
+/* V (input/output) DOUBLE PRECISION array, dimension (LDV,M) */
+/* On entry, the matrix of right or left eigenvectors to be */
+/* transformed, as returned by DHSEIN or DTREVC. */
+/* On exit, V is overwritten by the transformed eigenvectors. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. LDV >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and Test the input parameters */
+
+ /* Parameter adjustments */
+ --scale;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+
+ /* Function Body */
+ rightv = lsame_(side, "R");
+ leftv = lsame_(side, "L");
+
+ *info = 0;
+ if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S")
+ && ! lsame_(job, "B")) {
+ *info = -1;
+ } else if (! rightv && ! leftv) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -4;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -5;
+ } else if (*m < 0) {
+ *info = -7;
+ } else if (*ldv < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEBAK", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*m == 0) {
+ return 0;
+ }
+ if (lsame_(job, "N")) {
+ return 0;
+ }
+
+ if (*ilo == *ihi) {
+ goto L30;
+ }
+
+/* Backward balance */
+
+ if (lsame_(job, "S") || lsame_(job, "B")) {
+
+ if (rightv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ s = scale[i__];
+ dscal_(m, &s, &v[i__ + v_dim1], ldv);
+/* L10: */
+ }
+ }
+
+ if (leftv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ s = 1. / scale[i__];
+ dscal_(m, &s, &v[i__ + v_dim1], ldv);
+/* L20: */
+ }
+ }
+
+ }
+
+/* Backward permutation */
+
+/* For I = ILO-1 step -1 until 1, */
+/* IHI+1 step 1 until N do -- */
+
+L30:
+ if (lsame_(job, "P") || lsame_(job, "B")) {
+ if (rightv) {
+ i__1 = *n;
+ for (ii = 1; ii <= i__1; ++ii) {
+ i__ = ii;
+ if (i__ >= *ilo && i__ <= *ihi) {
+ goto L40;
+ }
+ if (i__ < *ilo) {
+ i__ = *ilo - ii;
+ }
+ k = (integer) scale[i__];
+ if (k == i__) {
+ goto L40;
+ }
+ dswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L40:
+ ;
+ }
+ }
+
+ if (leftv) {
+ i__1 = *n;
+ for (ii = 1; ii <= i__1; ++ii) {
+ i__ = ii;
+ if (i__ >= *ilo && i__ <= *ihi) {
+ goto L50;
+ }
+ if (i__ < *ilo) {
+ i__ = *ilo - ii;
+ }
+ k = (integer) scale[i__];
+ if (k == i__) {
+ goto L50;
+ }
+ dswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L50:
+ ;
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DGEBAK */
+
+} /* dgebak_ */
diff --git a/contrib/libs/clapack/dgebal.c b/contrib/libs/clapack/dgebal.c
new file mode 100644
index 0000000000..aef5ab825f
--- /dev/null
+++ b/contrib/libs/clapack/dgebal.c
@@ -0,0 +1,402 @@
+/* dgebal.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dgebal_(char *job, integer *n, doublereal *a, integer *
+ lda, integer *ilo, integer *ihi, doublereal *scale, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ doublereal c__, f, g;
+ integer i__, j, k, l, m;
+ doublereal r__, s, ca, ra;
+ integer ica, ira, iexc;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ doublereal sfmin1, sfmin2, sfmax1, sfmax2;
+ extern doublereal dlamch_(char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical noconv;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGEBAL balances a general real matrix A. This involves, first, */
+/* permuting A by a similarity transformation to isolate eigenvalues */
+/* in the first 1 to ILO-1 and last IHI+1 to N elements on the */
+/* diagonal; and second, applying a diagonal similarity transformation */
+/* to rows and columns ILO to IHI to make the rows and columns as */
+/* close in norm as possible. Both steps are optional. */
+
+/* Balancing may reduce the 1-norm of the matrix, and improve the */
+/* accuracy of the computed eigenvalues and/or eigenvectors. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies the operations to be performed on A: */
+/* = 'N': none: simply set ILO = 1, IHI = N, SCALE(I) = 1.0 */
+/* for i = 1,...,N; */
+/* = 'P': permute only; */
+/* = 'S': scale only; */
+/* = 'B': both permute and scale. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the input matrix A. */
+/* On exit, A is overwritten by the balanced matrix. */
+/* If JOB = 'N', A is not referenced. */
+/* See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* ILO (output) INTEGER */
+/* IHI (output) INTEGER */
+/* ILO and IHI are set to integers such that on exit */
+/* A(i,j) = 0 if i > j and j = 1,...,ILO-1 or I = IHI+1,...,N. */
+/* If JOB = 'N' or 'S', ILO = 1 and IHI = N. */
+
+/* SCALE (output) DOUBLE PRECISION array, dimension (N) */
+/* Details of the permutations and scaling factors applied to */
+/* A. If P(j) is the index of the row and column interchanged */
+/* with row and column j and D(j) is the scaling factor */
+/* applied to row and column j, then */
+/* SCALE(j) = P(j) for j = 1,...,ILO-1 */
+/* = D(j) for j = ILO,...,IHI */
+/* = P(j) for j = IHI+1,...,N. */
+/* The order in which the interchanges are made is N to IHI+1, */
+/* then 1 to ILO-1. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The permutations consist of row and column interchanges which put */
+/* the matrix in the form */
+
+/* ( T1 X Y ) */
+/* P A P = ( 0 B Z ) */
+/* ( 0 0 T2 ) */
+
+/* where T1 and T2 are upper triangular matrices whose eigenvalues lie */
+/* along the diagonal. The column indices ILO and IHI mark the starting */
+/* and ending columns of the submatrix B. Balancing consists of applying */
+/* a diagonal similarity transformation inv(D) * B * D to make the */
+/* 1-norms of each row of B and its corresponding column nearly equal. */
+/* The output matrix is */
+
+/* ( T1 X*D Y ) */
+/* ( 0 inv(D)*B*D inv(D)*Z ). */
+/* ( 0 0 T2 ) */
+
+/* Information about the permutations P and the diagonal matrix D is */
+/* returned in the vector SCALE. */
+
+/* This subroutine is based on the EISPACK routine BALANC. */
+
+/* Modified by Tzu-Yi Chen, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --scale;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S")
+ && ! lsame_(job, "B")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEBAL", &i__1);
+ return 0;
+ }
+
+ k = 1;
+ l = *n;
+
+ if (*n == 0) {
+ goto L210;
+ }
+
+ if (lsame_(job, "N")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ scale[i__] = 1.;
+/* L10: */
+ }
+ goto L210;
+ }
+
+ if (lsame_(job, "S")) {
+ goto L120;
+ }
+
+/* Permutation to isolate eigenvalues if possible */
+
+ goto L50;
+
+/* Row and column exchange. */
+
+L20:
+ scale[m] = (doublereal) j;
+ if (j == m) {
+ goto L30;
+ }
+
+ dswap_(&l, &a[j * a_dim1 + 1], &c__1, &a[m * a_dim1 + 1], &c__1);
+ i__1 = *n - k + 1;
+ dswap_(&i__1, &a[j + k * a_dim1], lda, &a[m + k * a_dim1], lda);
+
+L30:
+ switch (iexc) {
+ case 1: goto L40;
+ case 2: goto L80;
+ }
+
+/* Search for rows isolating an eigenvalue and push them down. */
+
+L40:
+ if (l == 1) {
+ goto L210;
+ }
+ --l;
+
+L50:
+ for (j = l; j >= 1; --j) {
+
+ i__1 = l;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (i__ == j) {
+ goto L60;
+ }
+ if (a[j + i__ * a_dim1] != 0.) {
+ goto L70;
+ }
+L60:
+ ;
+ }
+
+ m = l;
+ iexc = 1;
+ goto L20;
+L70:
+ ;
+ }
+
+ goto L90;
+
+/* Search for columns isolating an eigenvalue and push them left. */
+
+L80:
+ ++k;
+
+L90:
+ i__1 = l;
+ for (j = k; j <= i__1; ++j) {
+
+ i__2 = l;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ if (i__ == j) {
+ goto L100;
+ }
+ if (a[i__ + j * a_dim1] != 0.) {
+ goto L110;
+ }
+L100:
+ ;
+ }
+
+ m = k;
+ iexc = 2;
+ goto L20;
+L110:
+ ;
+ }
+
+L120:
+ i__1 = l;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ scale[i__] = 1.;
+/* L130: */
+ }
+
+ if (lsame_(job, "P")) {
+ goto L210;
+ }
+
+/* Balance the submatrix in rows K to L. */
+
+/* Iterative loop for norm reduction */
+
+ sfmin1 = dlamch_("S") / dlamch_("P");
+ sfmax1 = 1. / sfmin1;
+ sfmin2 = sfmin1 * 2.;
+ sfmax2 = 1. / sfmin2;
+L140:
+ noconv = FALSE_;
+
+ i__1 = l;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ c__ = 0.;
+ r__ = 0.;
+
+ i__2 = l;
+ for (j = k; j <= i__2; ++j) {
+ if (j == i__) {
+ goto L150;
+ }
+ c__ += (d__1 = a[j + i__ * a_dim1], abs(d__1));
+ r__ += (d__1 = a[i__ + j * a_dim1], abs(d__1));
+L150:
+ ;
+ }
+ ica = idamax_(&l, &a[i__ * a_dim1 + 1], &c__1);
+ ca = (d__1 = a[ica + i__ * a_dim1], abs(d__1));
+ i__2 = *n - k + 1;
+ ira = idamax_(&i__2, &a[i__ + k * a_dim1], lda);
+ ra = (d__1 = a[i__ + (ira + k - 1) * a_dim1], abs(d__1));
+
+/* Guard against zero C or R due to underflow. */
+
+ if (c__ == 0. || r__ == 0.) {
+ goto L200;
+ }
+ g = r__ / 2.;
+ f = 1.;
+ s = c__ + r__;
+L160:
+/* Computing MAX */
+ d__1 = max(f,c__);
+/* Computing MIN */
+ d__2 = min(r__,g);
+ if (c__ >= g || max(d__1,ca) >= sfmax2 || min(d__2,ra) <= sfmin2) {
+ goto L170;
+ }
+ f *= 2.;
+ c__ *= 2.;
+ ca *= 2.;
+ r__ /= 2.;
+ g /= 2.;
+ ra /= 2.;
+ goto L160;
+
+L170:
+ g = c__ / 2.;
+L180:
+/* Computing MIN */
+ d__1 = min(f,c__), d__1 = min(d__1,g);
+ if (g < r__ || max(r__,ra) >= sfmax2 || min(d__1,ca) <= sfmin2) {
+ goto L190;
+ }
+ f /= 2.;
+ c__ /= 2.;
+ g /= 2.;
+ ca /= 2.;
+ r__ *= 2.;
+ ra *= 2.;
+ goto L180;
+
+/* Now balance. */
+
+L190:
+ if (c__ + r__ >= s * .95) {
+ goto L200;
+ }
+ if (f < 1. && scale[i__] < 1.) {
+ if (f * scale[i__] <= sfmin1) {
+ goto L200;
+ }
+ }
+ if (f > 1. && scale[i__] > 1.) {
+ if (scale[i__] >= sfmax1 / f) {
+ goto L200;
+ }
+ }
+ g = 1. / f;
+ scale[i__] *= f;
+ noconv = TRUE_;
+
+ i__2 = *n - k + 1;
+ dscal_(&i__2, &g, &a[i__ + k * a_dim1], lda);
+ dscal_(&l, &f, &a[i__ * a_dim1 + 1], &c__1);
+
+L200:
+ ;
+ }
+
+ if (noconv) {
+ goto L140;
+ }
+
+L210:
+ *ilo = k;
+ *ihi = l;
+
+ return 0;
+
+/* End of DGEBAL */
+
+} /* dgebal_ */
diff --git a/contrib/libs/clapack/dgebd2.c b/contrib/libs/clapack/dgebd2.c
new file mode 100644
index 0000000000..e2e5472edb
--- /dev/null
+++ b/contrib/libs/clapack/dgebd2.c
@@ -0,0 +1,304 @@
+/* dgebd2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dgebd2_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *d__, doublereal *e, doublereal *tauq, doublereal *
+ taup, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *), dlarfg_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGEBD2 reduces a real general m by n matrix A to upper or lower */
+/* bidiagonal form B by an orthogonal transformation: Q' * A * P = B. */
+
+/* If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows in the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns in the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the m by n general matrix to be reduced. */
+/* On exit, */
+/* if m >= n, the diagonal and the first superdiagonal are */
+/* overwritten with the upper bidiagonal matrix B; the */
+/* elements below the diagonal, with the array TAUQ, represent */
+/* the orthogonal matrix Q as a product of elementary */
+/* reflectors, and the elements above the first superdiagonal, */
+/* with the array TAUP, represent the orthogonal matrix P as */
+/* a product of elementary reflectors; */
+/* if m < n, the diagonal and the first subdiagonal are */
+/* overwritten with the lower bidiagonal matrix B; the */
+/* elements below the first subdiagonal, with the array TAUQ, */
+/* represent the orthogonal matrix Q as a product of */
+/* elementary reflectors, and the elements above the diagonal, */
+/* with the array TAUP, represent the orthogonal matrix P as */
+/* a product of elementary reflectors. */
+/* See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* D (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The diagonal elements of the bidiagonal matrix B: */
+/* D(i) = A(i,i). */
+
+/* E (output) DOUBLE PRECISION array, dimension (min(M,N)-1) */
+/* The off-diagonal elements of the bidiagonal matrix B: */
+/* if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1; */
+/* if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1. */
+
+/* TAUQ (output) DOUBLE PRECISION array dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the orthogonal matrix Q. See Further Details. */
+
+/* TAUP (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the orthogonal matrix P. See Further Details. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (max(M,N)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrices Q and P are represented as products of elementary */
+/* reflectors: */
+
+/* If m >= n, */
+
+/* Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1) */
+
+/* Each H(i) and G(i) has the form: */
+
+/* H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' */
+
+/* where tauq and taup are real scalars, and v and u are real vectors; */
+/* v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in A(i+1:m,i); */
+/* u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in A(i,i+2:n); */
+/* tauq is stored in TAUQ(i) and taup in TAUP(i). */
+
+/* If m < n, */
+
+/* Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m) */
+
+/* Each H(i) and G(i) has the form: */
+
+/* H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' */
+
+/* where tauq and taup are real scalars, and v and u are real vectors; */
+/* v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in A(i+2:m,i); */
+/* u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n); */
+/* tauq is stored in TAUQ(i) and taup in TAUP(i). */
+
+/* The contents of A on exit are illustrated by the following examples: */
+
+/* m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n): */
+
+/* ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 ) */
+/* ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 ) */
+/* ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 ) */
+/* ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 ) */
+/* ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 ) */
+/* ( v1 v2 v3 v4 v5 ) */
+
+/* where d and e denote diagonal and off-diagonal elements of B, vi */
+/* denotes an element of the vector defining H(i), and ui an element of */
+/* the vector defining G(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info < 0) {
+ i__1 = -(*info);
+ xerbla_("DGEBD2", &i__1);
+ return 0;
+ }
+
+ if (*m >= *n) {
+
+/* Reduce to upper bidiagonal form */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
+
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3, *m)+ i__ *
+ a_dim1], &c__1, &tauq[i__]);
+ d__[i__] = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.;
+
+/* Apply H(i) to A(i:m,i+1:n) from the left */
+
+ if (i__ < *n) {
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ dlarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &
+ tauq[i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]
+);
+ }
+ a[i__ + i__ * a_dim1] = d__[i__];
+
+ if (i__ < *n) {
+
+/* Generate elementary reflector G(i) to annihilate */
+/* A(i,i+2:n) */
+
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ dlarfg_(&i__2, &a[i__ + (i__ + 1) * a_dim1], &a[i__ + min(
+ i__3, *n)* a_dim1], lda, &taup[i__]);
+ e[i__] = a[i__ + (i__ + 1) * a_dim1];
+ a[i__ + (i__ + 1) * a_dim1] = 1.;
+
+/* Apply G(i) to A(i+1:m,i+1:n) from the right */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ dlarf_("Right", &i__2, &i__3, &a[i__ + (i__ + 1) * a_dim1],
+ lda, &taup[i__], &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda, &work[1]);
+ a[i__ + (i__ + 1) * a_dim1] = e[i__];
+ } else {
+ taup[i__] = 0.;
+ }
+/* L10: */
+ }
+ } else {
+
+/* Reduce to lower bidiagonal form */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector G(i) to annihilate A(i,i+1:n) */
+
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3, *n)*
+ a_dim1], lda, &taup[i__]);
+ d__[i__] = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.;
+
+/* Apply G(i) to A(i+1:m,i:n) from the right */
+
+ if (i__ < *m) {
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+ dlarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &
+ taup[i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
+ }
+ a[i__ + i__ * a_dim1] = d__[i__];
+
+ if (i__ < *m) {
+
+/* Generate elementary reflector H(i) to annihilate */
+/* A(i+2:m,i) */
+
+ i__2 = *m - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3, *m)+
+ i__ * a_dim1], &c__1, &tauq[i__]);
+ e[i__] = a[i__ + 1 + i__ * a_dim1];
+ a[i__ + 1 + i__ * a_dim1] = 1.;
+
+/* Apply H(i) to A(i+1:m,i+1:n) from the left */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ dlarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &
+ c__1, &tauq[i__], &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda, &work[1]);
+ a[i__ + 1 + i__ * a_dim1] = e[i__];
+ } else {
+ tauq[i__] = 0.;
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of DGEBD2 */
+
+} /* dgebd2_ */
diff --git a/contrib/libs/clapack/dgebrd.c b/contrib/libs/clapack/dgebrd.c
new file mode 100644
index 0000000000..d5202c83d6
--- /dev/null
+++ b/contrib/libs/clapack/dgebrd.c
@@ -0,0 +1,336 @@
+/* dgebrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+static doublereal c_b21 = -1.;
+static doublereal c_b22 = 1.;
+
+/* Subroutine */ int dgebrd_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *d__, doublereal *e, doublereal *tauq, doublereal *
+ taup, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, nb, nx;
+ doublereal ws;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ integer nbmin, iinfo, minmn;
+ extern /* Subroutine */ int dgebd2_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *), dlabrd_(integer *, integer *, integer *
+, doublereal *, integer *, doublereal *, doublereal *, doublereal
+ *, doublereal *, doublereal *, integer *, doublereal *, integer *)
+ , xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwrkx, ldwrky, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGEBRD reduces a general real M-by-N matrix A to upper or lower */
+/* bidiagonal form B by an orthogonal transformation: Q**T * A * P = B. */
+
+/* If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows in the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns in the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the M-by-N general matrix to be reduced. */
+/* On exit, */
+/* if m >= n, the diagonal and the first superdiagonal are */
+/* overwritten with the upper bidiagonal matrix B; the */
+/* elements below the diagonal, with the array TAUQ, represent */
+/* the orthogonal matrix Q as a product of elementary */
+/* reflectors, and the elements above the first superdiagonal, */
+/* with the array TAUP, represent the orthogonal matrix P as */
+/* a product of elementary reflectors; */
+/* if m < n, the diagonal and the first subdiagonal are */
+/* overwritten with the lower bidiagonal matrix B; the */
+/* elements below the first subdiagonal, with the array TAUQ, */
+/* represent the orthogonal matrix Q as a product of */
+/* elementary reflectors, and the elements above the diagonal, */
+/* with the array TAUP, represent the orthogonal matrix P as */
+/* a product of elementary reflectors. */
+/* See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* D (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The diagonal elements of the bidiagonal matrix B: */
+/* D(i) = A(i,i). */
+
+/* E (output) DOUBLE PRECISION array, dimension (min(M,N)-1) */
+/* The off-diagonal elements of the bidiagonal matrix B: */
+/* if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1; */
+/* if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1. */
+
+/* TAUQ (output) DOUBLE PRECISION array dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the orthogonal matrix Q. See Further Details. */
+
+/* TAUP (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the orthogonal matrix P. See Further Details. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= max(1,M,N). */
+/* For optimum performance LWORK >= (M+N)*NB, where NB */
+/* is the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrices Q and P are represented as products of elementary */
+/* reflectors: */
+
+/* If m >= n, */
+
+/* Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1) */
+
+/* Each H(i) and G(i) has the form: */
+
+/* H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' */
+
+/* where tauq and taup are real scalars, and v and u are real vectors; */
+/* v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in A(i+1:m,i); */
+/* u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in A(i,i+2:n); */
+/* tauq is stored in TAUQ(i) and taup in TAUP(i). */
+
+/* If m < n, */
+
+/* Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m) */
+
+/* Each H(i) and G(i) has the form: */
+
+/* H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' */
+
+/* where tauq and taup are real scalars, and v and u are real vectors; */
+/* v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in A(i+2:m,i); */
+/* u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n); */
+/* tauq is stored in TAUQ(i) and taup in TAUP(i). */
+
+/* The contents of A on exit are illustrated by the following examples: */
+
+/* m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n): */
+
+/* ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 ) */
+/* ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 ) */
+/* ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 ) */
+/* ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 ) */
+/* ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 ) */
+/* ( v1 v2 v3 v4 v5 ) */
+
+/* where d and e denote diagonal and off-diagonal elements of B, vi */
+/* denotes an element of the vector defining H(i), and ui an element of */
+/* the vector defining G(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+/* Computing MAX */
+ i__1 = 1, i__2 = ilaenv_(&c__1, "DGEBRD", " ", m, n, &c_n1, &c_n1);
+ nb = max(i__1,i__2);
+ lwkopt = (*m + *n) * nb;
+ work[1] = (doublereal) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*m);
+ if (*lwork < max(i__1,*n) && ! lquery) {
+ *info = -10;
+ }
+ }
+ if (*info < 0) {
+ i__1 = -(*info);
+ xerbla_("DGEBRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ minmn = min(*m,*n);
+ if (minmn == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ ws = (doublereal) max(*m,*n);
+ ldwrkx = *m;
+ ldwrky = *n;
+
+ if (nb > 1 && nb < minmn) {
+
+/* Set the crossover point NX. */
+
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__3, "DGEBRD", " ", m, n, &c_n1, &c_n1);
+ nx = max(i__1,i__2);
+
+/* Determine when to switch from blocked to unblocked code. */
+
+ if (nx < minmn) {
+ ws = (doublereal) ((*m + *n) * nb);
+ if ((doublereal) (*lwork) < ws) {
+
+/* Not enough work space for the optimal NB, consider using */
+/* a smaller block size. */
+
+ nbmin = ilaenv_(&c__2, "DGEBRD", " ", m, n, &c_n1, &c_n1);
+ if (*lwork >= (*m + *n) * nbmin) {
+ nb = *lwork / (*m + *n);
+ } else {
+ nb = 1;
+ nx = minmn;
+ }
+ }
+ }
+ } else {
+ nx = minmn;
+ }
+
+ i__1 = minmn - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+
+/* Reduce rows and columns i:i+nb-1 to bidiagonal form and return */
+/* the matrices X and Y which are needed to update the unreduced */
+/* part of the matrix */
+
+ i__3 = *m - i__ + 1;
+ i__4 = *n - i__ + 1;
+ dlabrd_(&i__3, &i__4, &nb, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[
+ i__], &tauq[i__], &taup[i__], &work[1], &ldwrkx, &work[ldwrkx
+ * nb + 1], &ldwrky);
+
+/* Update the trailing submatrix A(i+nb:m,i+nb:n), using an update */
+/* of the form A := A - V*Y' - X*U' */
+
+ i__3 = *m - i__ - nb + 1;
+ i__4 = *n - i__ - nb + 1;
+ dgemm_("No transpose", "Transpose", &i__3, &i__4, &nb, &c_b21, &a[i__
+ + nb + i__ * a_dim1], lda, &work[ldwrkx * nb + nb + 1], &
+ ldwrky, &c_b22, &a[i__ + nb + (i__ + nb) * a_dim1], lda);
+ i__3 = *m - i__ - nb + 1;
+ i__4 = *n - i__ - nb + 1;
+ dgemm_("No transpose", "No transpose", &i__3, &i__4, &nb, &c_b21, &
+ work[nb + 1], &ldwrkx, &a[i__ + (i__ + nb) * a_dim1], lda, &
+ c_b22, &a[i__ + nb + (i__ + nb) * a_dim1], lda);
+
+/* Copy diagonal and off-diagonal elements of B back into A */
+
+ if (*m >= *n) {
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ a[j + j * a_dim1] = d__[j];
+ a[j + (j + 1) * a_dim1] = e[j];
+/* L10: */
+ }
+ } else {
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ a[j + j * a_dim1] = d__[j];
+ a[j + 1 + j * a_dim1] = e[j];
+/* L20: */
+ }
+ }
+/* L30: */
+ }
+
+/* Use unblocked code to reduce the remainder of the matrix */
+
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ dgebd2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__], &
+ tauq[i__], &taup[i__], &work[1], &iinfo);
+ work[1] = ws;
+ return 0;
+
+/* End of DGEBRD */
+
+} /* dgebrd_ */
diff --git a/contrib/libs/clapack/dgecon.c b/contrib/libs/clapack/dgecon.c
new file mode 100644
index 0000000000..ba86cf68e6
--- /dev/null
+++ b/contrib/libs/clapack/dgecon.c
@@ -0,0 +1,226 @@
+/* dgecon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dgecon_(char *norm, integer *n, doublereal *a, integer *
+ lda, doublereal *anorm, doublereal *rcond, doublereal *work, integer *
+ iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+ doublereal d__1;
+
+ /* Local variables */
+ doublereal sl;
+ integer ix;
+ doublereal su;
+ integer kase, kase1;
+ doublereal scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int drscl_(integer *, doublereal *, doublereal *,
+ integer *), dlacn2_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal ainvnm;
+ extern /* Subroutine */ int dlatrs_(char *, char *, char *, char *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *);
+ logical onenrm;
+ char normin[1];
+ doublereal smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call DLACN2 in place of DLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGECON estimates the reciprocal of the condition number of a general */
+/* real matrix A, in either the 1-norm or the infinity-norm, using */
+/* the LU factorization computed by DGETRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as */
+/* RCOND = 1 / ( norm(A) * norm(inv(A)) ). */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies whether the 1-norm condition number or the */
+/* infinity-norm condition number is required: */
+/* = '1' or 'O': 1-norm; */
+/* = 'I': Infinity-norm. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The factors L and U from the factorization A = P*L*U */
+/* as computed by DGETRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* ANORM (input) DOUBLE PRECISION */
+/* If NORM = '1' or 'O', the 1-norm of the original matrix A. */
+/* If NORM = 'I', the infinity-norm of the original matrix A. */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(norm(A) * norm(inv(A))). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (4*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O");
+ if (! onenrm && ! lsame_(norm, "I")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*anorm < 0.) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGECON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.;
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ } else if (*anorm == 0.) {
+ return 0;
+ }
+
+ smlnum = dlamch_("Safe minimum");
+
+/* Estimate the norm of inv(A). */
+
+ ainvnm = 0.;
+ *(unsigned char *)normin = 'N';
+ if (onenrm) {
+ kase1 = 1;
+ } else {
+ kase1 = 2;
+ }
+ kase = 0;
+L10:
+ dlacn2_(n, &work[*n + 1], &work[1], &iwork[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (kase == kase1) {
+
+/* Multiply by inv(L). */
+
+ dlatrs_("Lower", "No transpose", "Unit", normin, n, &a[a_offset],
+ lda, &work[1], &sl, &work[(*n << 1) + 1], info);
+
+/* Multiply by inv(U). */
+
+ dlatrs_("Upper", "No transpose", "Non-unit", normin, n, &a[
+ a_offset], lda, &work[1], &su, &work[*n * 3 + 1], info);
+ } else {
+
+/* Multiply by inv(U'). */
+
+ dlatrs_("Upper", "Transpose", "Non-unit", normin, n, &a[a_offset],
+ lda, &work[1], &su, &work[*n * 3 + 1], info);
+
+/* Multiply by inv(L'). */
+
+ dlatrs_("Lower", "Transpose", "Unit", normin, n, &a[a_offset],
+ lda, &work[1], &sl, &work[(*n << 1) + 1], info);
+ }
+
+/* Divide X by 1/(SL*SU) if doing so will not cause overflow. */
+
+ scale = sl * su;
+ *(unsigned char *)normin = 'Y';
+ if (scale != 1.) {
+ ix = idamax_(n, &work[1], &c__1);
+ if (scale < (d__1 = work[ix], abs(d__1)) * smlnum || scale == 0.)
+ {
+ goto L20;
+ }
+ drscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / ainvnm / *anorm;
+ }
+
+L20:
+ return 0;
+
+/* End of DGECON */
+
+} /* dgecon_ */
diff --git a/contrib/libs/clapack/dgeequ.c b/contrib/libs/clapack/dgeequ.c
new file mode 100644
index 0000000000..b0b1462151
--- /dev/null
+++ b/contrib/libs/clapack/dgeequ.c
@@ -0,0 +1,296 @@
+/* dgeequ.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dgeequ_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *r__, doublereal *c__, doublereal *rowcnd, doublereal
+ *colcnd, doublereal *amax, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal d__1, d__2, d__3;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal rcmin, rcmax;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum, smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGEEQU computes row and column scalings intended to equilibrate an */
+/* M-by-N matrix A and reduce its condition number. R returns the row */
+/* scale factors and C the column scale factors, chosen to try to make */
+/* the largest element in each row and column of the matrix B with */
+/* elements B(i,j)=R(i)*A(i,j)*C(j) have absolute value 1. */
+
+/* R(i) and C(j) are restricted to be between SMLNUM = smallest safe */
+/* number and BIGNUM = largest safe number. Use of these scaling */
+/* factors is not guaranteed to reduce the condition number of A but */
+/* works well in practice. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The M-by-N matrix whose equilibration factors are */
+/* to be computed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* R (output) DOUBLE PRECISION array, dimension (M) */
+/* If INFO = 0 or INFO > M, R contains the row scale factors */
+/* for A. */
+
+/* C (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, C contains the column scale factors for A. */
+
+/* ROWCND (output) DOUBLE PRECISION */
+/* If INFO = 0 or INFO > M, ROWCND contains the ratio of the */
+/* smallest R(i) to the largest R(i). If ROWCND >= 0.1 and */
+/* AMAX is neither too large nor too small, it is not worth */
+/* scaling by R. */
+
+/* COLCND (output) DOUBLE PRECISION */
+/* If INFO = 0, COLCND contains the ratio of the smallest */
+/* C(i) to the largest C(i). If COLCND >= 0.1, it is not */
+/* worth scaling by C. */
+
+/* AMAX (output) DOUBLE PRECISION */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= M: the i-th row of A is exactly zero */
+/* > M: the (i-M)-th column of A is exactly zero */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --r__;
+ --c__;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEEQU", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ *rowcnd = 1.;
+ *colcnd = 1.;
+ *amax = 0.;
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+
+/* Compute row scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__[i__] = 0.;
+/* L10: */
+ }
+
+/* Find the maximum element in each row. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = r__[i__], d__3 = (d__1 = a[i__ + j * a_dim1], abs(d__1));
+ r__[i__] = max(d__2,d__3);
+/* L20: */
+ }
+/* L30: */
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = rcmax, d__2 = r__[i__];
+ rcmax = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = rcmin, d__2 = r__[i__];
+ rcmin = min(d__1,d__2);
+/* L40: */
+ }
+ *amax = rcmax;
+
+ if (rcmin == 0.) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (r__[i__] == 0.) {
+ *info = i__;
+ return 0;
+ }
+/* L50: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MIN */
+/* Computing MAX */
+ d__2 = r__[i__];
+ d__1 = max(d__2,smlnum);
+ r__[i__] = 1. / min(d__1,bignum);
+/* L60: */
+ }
+
+/* Compute ROWCND = min(R(I)) / max(R(I)) */
+
+ *rowcnd = max(rcmin,smlnum) / min(rcmax,bignum);
+ }
+
+/* Compute column scale factors */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j] = 0.;
+/* L70: */
+ }
+
+/* Find the maximum element in each column, */
+/* assuming the row scaling computed above. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = c__[j], d__3 = (d__1 = a[i__ + j * a_dim1], abs(d__1)) *
+ r__[i__];
+ c__[j] = max(d__2,d__3);
+/* L80: */
+ }
+/* L90: */
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ d__1 = rcmin, d__2 = c__[j];
+ rcmin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = rcmax, d__2 = c__[j];
+ rcmax = max(d__1,d__2);
+/* L100: */
+ }
+
+ if (rcmin == 0.) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (c__[j] == 0.) {
+ *info = *m + j;
+ return 0;
+ }
+/* L110: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+/* Computing MAX */
+ d__2 = c__[j];
+ d__1 = max(d__2,smlnum);
+ c__[j] = 1. / min(d__1,bignum);
+/* L120: */
+ }
+
+/* Compute COLCND = min(C(J)) / max(C(J)) */
+
+ *colcnd = max(rcmin,smlnum) / min(rcmax,bignum);
+ }
+
+ return 0;
+
+/* End of DGEEQU */
+
+} /* dgeequ_ */
diff --git a/contrib/libs/clapack/dgeequb.c b/contrib/libs/clapack/dgeequb.c
new file mode 100644
index 0000000000..fbaeb2f609
--- /dev/null
+++ b/contrib/libs/clapack/dgeequb.c
@@ -0,0 +1,324 @@
+/* dgeequb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dgeequb_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *r__, doublereal *c__, doublereal *rowcnd, doublereal
+ *colcnd, doublereal *amax, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double log(doublereal), pow_di(doublereal *, integer *);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal radix, rcmin, rcmax;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum, logrdx, smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- Contributed by James Demmel, Deaglan Halligan, Yozo Hida and -- */
+/* -- Jason Riedy of Univ. of California Berkeley. -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley and NAG Ltd. -- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGEEQUB computes row and column scalings intended to equilibrate an */
+/* M-by-N matrix A and reduce its condition number. R returns the row */
+/* scale factors and C the column scale factors, chosen to try to make */
+/* the largest element in each row and column of the matrix B with */
+/* elements B(i,j)=R(i)*A(i,j)*C(j) have an absolute value of at most */
+/* the radix. */
+
+/* R(i) and C(j) are restricted to be a power of the radix between */
+/* SMLNUM = smallest safe number and BIGNUM = largest safe number. Use */
+/* of these scaling factors is not guaranteed to reduce the condition */
+/* number of A but works well in practice. */
+
+/* This routine differs from DGEEQU by restricting the scaling factors */
+/* to a power of the radix. Baring over- and underflow, scaling by */
+/* these factors introduces no additional rounding errors. However, the */
+/* scaled entries' magnitured are no longer approximately 1 but lie */
+/* between sqrt(radix) and 1/sqrt(radix). */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The M-by-N matrix whose equilibration factors are */
+/* to be computed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* R (output) DOUBLE PRECISION array, dimension (M) */
+/* If INFO = 0 or INFO > M, R contains the row scale factors */
+/* for A. */
+
+/* C (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, C contains the column scale factors for A. */
+
+/* ROWCND (output) DOUBLE PRECISION */
+/* If INFO = 0 or INFO > M, ROWCND contains the ratio of the */
+/* smallest R(i) to the largest R(i). If ROWCND >= 0.1 and */
+/* AMAX is neither too large nor too small, it is not worth */
+/* scaling by R. */
+
+/* COLCND (output) DOUBLE PRECISION */
+/* If INFO = 0, COLCND contains the ratio of the smallest */
+/* C(i) to the largest C(i). If COLCND >= 0.1, it is not */
+/* worth scaling by C. */
+
+/* AMAX (output) DOUBLE PRECISION */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= M: the i-th row of A is exactly zero */
+/* > M: the (i-M)-th column of A is exactly zero */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --r__;
+ --c__;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEEQUB", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ *rowcnd = 1.;
+ *colcnd = 1.;
+ *amax = 0.;
+ return 0;
+ }
+
+/* Get machine constants. Assume SMLNUM is a power of the radix. */
+
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+ radix = dlamch_("B");
+ logrdx = log(radix);
+
+/* Compute row scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__[i__] = 0.;
+/* L10: */
+ }
+
+/* Find the maximum element in each row. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = r__[i__], d__3 = (d__1 = a[i__ + j * a_dim1], abs(d__1));
+ r__[i__] = max(d__2,d__3);
+/* L20: */
+ }
+/* L30: */
+ }
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (r__[i__] > 0.) {
+ i__2 = (integer) (log(r__[i__]) / logrdx);
+ r__[i__] = pow_di(&radix, &i__2);
+ }
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = rcmax, d__2 = r__[i__];
+ rcmax = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = rcmin, d__2 = r__[i__];
+ rcmin = min(d__1,d__2);
+/* L40: */
+ }
+ *amax = rcmax;
+
+ if (rcmin == 0.) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (r__[i__] == 0.) {
+ *info = i__;
+ return 0;
+ }
+/* L50: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MIN */
+/* Computing MAX */
+ d__2 = r__[i__];
+ d__1 = max(d__2,smlnum);
+ r__[i__] = 1. / min(d__1,bignum);
+/* L60: */
+ }
+
+/* Compute ROWCND = min(R(I)) / max(R(I)). */
+
+ *rowcnd = max(rcmin,smlnum) / min(rcmax,bignum);
+ }
+
+/* Compute column scale factors */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j] = 0.;
+/* L70: */
+ }
+
+/* Find the maximum element in each column, */
+/* assuming the row scaling computed above. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = c__[j], d__3 = (d__1 = a[i__ + j * a_dim1], abs(d__1)) *
+ r__[i__];
+ c__[j] = max(d__2,d__3);
+/* L80: */
+ }
+ if (c__[j] > 0.) {
+ i__2 = (integer) (log(c__[j]) / logrdx);
+ c__[j] = pow_di(&radix, &i__2);
+ }
+/* L90: */
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ d__1 = rcmin, d__2 = c__[j];
+ rcmin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = rcmax, d__2 = c__[j];
+ rcmax = max(d__1,d__2);
+/* L100: */
+ }
+
+ if (rcmin == 0.) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (c__[j] == 0.) {
+ *info = *m + j;
+ return 0;
+ }
+/* L110: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+/* Computing MAX */
+ d__2 = c__[j];
+ d__1 = max(d__2,smlnum);
+ c__[j] = 1. / min(d__1,bignum);
+/* L120: */
+ }
+
+/* Compute COLCND = min(C(J)) / max(C(J)). */
+
+ *colcnd = max(rcmin,smlnum) / min(rcmax,bignum);
+ }
+
+ return 0;
+
+/* End of DGEEQUB */
+
+} /* dgeequb_ */
diff --git a/contrib/libs/clapack/dgees.c b/contrib/libs/clapack/dgees.c
new file mode 100644
index 0000000000..9de8ab0b6f
--- /dev/null
+++ b/contrib/libs/clapack/dgees.c
@@ -0,0 +1,549 @@
+/* dgees.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+
+/* Subroutine */ int dgees_(char *jobvs, char *sort, L_fp select, integer *n,
+ doublereal *a, integer *lda, integer *sdim, doublereal *wr,
+ doublereal *wi, doublereal *vs, integer *ldvs, doublereal *work,
+ integer *lwork, logical *bwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, vs_dim1, vs_offset, i__1, i__2, i__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ doublereal s;
+ integer i1, i2, ip, ihi, ilo;
+ doublereal dum[1], eps, sep;
+ integer ibal;
+ doublereal anrm;
+ integer idum[1], ierr, itau, iwrk, inxt, icond, ieval;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dswap_(integer *, doublereal *, integer
+ *, doublereal *, integer *);
+ logical cursl;
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *), dgebak_(
+ char *, char *, integer *, integer *, integer *, doublereal *,
+ integer *, doublereal *, integer *, integer *),
+ dgebal_(char *, integer *, doublereal *, integer *, integer *,
+ integer *, doublereal *, integer *);
+ logical lst2sl, scalea;
+ extern doublereal dlamch_(char *);
+ doublereal cscale;
+ extern doublereal dlange_(char *, integer *, integer *, doublereal *,
+ integer *, doublereal *);
+ extern /* Subroutine */ int dgehrd_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *), dlascl_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *), dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ doublereal bignum;
+ extern /* Subroutine */ int dorghr_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *), dhseqr_(char *, char *, integer *, integer *, integer
+ *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *), dtrsen_(char *, char *, logical *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, integer *, integer *, integer *);
+ logical lastsl;
+ integer minwrk, maxwrk;
+ doublereal smlnum;
+ integer hswork;
+ logical wantst, lquery, wantvs;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+/* .. Function Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGEES computes for an N-by-N real nonsymmetric matrix A, the */
+/* eigenvalues, the real Schur form T, and, optionally, the matrix of */
+/* Schur vectors Z. This gives the Schur factorization A = Z*T*(Z**T). */
+
+/* Optionally, it also orders the eigenvalues on the diagonal of the */
+/* real Schur form so that selected eigenvalues are at the top left. */
+/* The leading columns of Z then form an orthonormal basis for the */
+/* invariant subspace corresponding to the selected eigenvalues. */
+
+/* A matrix is in real Schur form if it is upper quasi-triangular with */
+/* 1-by-1 and 2-by-2 blocks. 2-by-2 blocks will be standardized in the */
+/* form */
+/* [ a b ] */
+/* [ c a ] */
+
+/* where b*c < 0. The eigenvalues of such a block are a +- sqrt(bc). */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVS (input) CHARACTER*1 */
+/* = 'N': Schur vectors are not computed; */
+/* = 'V': Schur vectors are computed. */
+
+/* SORT (input) CHARACTER*1 */
+/* Specifies whether or not to order the eigenvalues on the */
+/* diagonal of the Schur form. */
+/* = 'N': Eigenvalues are not ordered; */
+/* = 'S': Eigenvalues are ordered (see SELECT). */
+
+/* SELECT (external procedure) LOGICAL FUNCTION of two DOUBLE PRECISION arguments */
+/* SELECT must be declared EXTERNAL in the calling subroutine. */
+/* If SORT = 'S', SELECT is used to select eigenvalues to sort */
+/* to the top left of the Schur form. */
+/* If SORT = 'N', SELECT is not referenced. */
+/* An eigenvalue WR(j)+sqrt(-1)*WI(j) is selected if */
+/* SELECT(WR(j),WI(j)) is true; i.e., if either one of a complex */
+/* conjugate pair of eigenvalues is selected, then both complex */
+/* eigenvalues are selected. */
+/* Note that a selected complex eigenvalue may no longer */
+/* satisfy SELECT(WR(j),WI(j)) = .TRUE. after ordering, since */
+/* ordering may change the value of complex eigenvalues */
+/* (especially if the eigenvalue is ill-conditioned); in this */
+/* case INFO is set to N+2 (see INFO below). */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the N-by-N matrix A. */
+/* On exit, A has been overwritten by its real Schur form T. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* SDIM (output) INTEGER */
+/* If SORT = 'N', SDIM = 0. */
+/* If SORT = 'S', SDIM = number of eigenvalues (after sorting) */
+/* for which SELECT is true. (Complex conjugate */
+/* pairs for which SELECT is true for either */
+/* eigenvalue count as 2.) */
+
+/* WR (output) DOUBLE PRECISION array, dimension (N) */
+/* WI (output) DOUBLE PRECISION array, dimension (N) */
+/* WR and WI contain the real and imaginary parts, */
+/* respectively, of the computed eigenvalues in the same order */
+/* that they appear on the diagonal of the output Schur form T. */
+/* Complex conjugate pairs of eigenvalues will appear */
+/* consecutively with the eigenvalue having the positive */
+/* imaginary part first. */
+
+/* VS (output) DOUBLE PRECISION array, dimension (LDVS,N) */
+/* If JOBVS = 'V', VS contains the orthogonal matrix Z of Schur */
+/* vectors. */
+/* If JOBVS = 'N', VS is not referenced. */
+
+/* LDVS (input) INTEGER */
+/* The leading dimension of the array VS. LDVS >= 1; if */
+/* JOBVS = 'V', LDVS >= N. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) contains the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,3*N). */
+/* For good performance, LWORK must generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* BWORK (workspace) LOGICAL array, dimension (N) */
+/* Not referenced if SORT = 'N'. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, and i is */
+/* <= N: the QR algorithm failed to compute all the */
+/* eigenvalues; elements 1:ILO-1 and i+1:N of WR and WI */
+/* contain those eigenvalues which have converged; if */
+/* JOBVS = 'V', VS contains the matrix which reduces A */
+/* to its partially converged Schur form. */
+/* = N+1: the eigenvalues could not be reordered because some */
+/* eigenvalues were too close to separate (the problem */
+/* is very ill-conditioned); */
+/* = N+2: after reordering, roundoff changed values of some */
+/* complex eigenvalues so that leading eigenvalues in */
+/* the Schur form no longer satisfy SELECT=.TRUE. This */
+/* could also be caused by underflow due to scaling. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --wr;
+ --wi;
+ vs_dim1 = *ldvs;
+ vs_offset = 1 + vs_dim1;
+ vs -= vs_offset;
+ --work;
+ --bwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ wantvs = lsame_(jobvs, "V");
+ wantst = lsame_(sort, "S");
+ if (! wantvs && ! lsame_(jobvs, "N")) {
+ *info = -1;
+ } else if (! wantst && ! lsame_(sort, "N")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldvs < 1 || wantvs && *ldvs < *n) {
+ *info = -11;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV. */
+/* HSWORK refers to the workspace preferred by DHSEQR, as */
+/* calculated below. HSWORK is computed assuming ILO=1 and IHI=N, */
+/* the worst case.) */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ } else {
+ maxwrk = (*n << 1) + *n * ilaenv_(&c__1, "DGEHRD", " ", n, &c__1,
+ n, &c__0);
+ minwrk = *n * 3;
+
+ dhseqr_("S", jobvs, n, &c__1, n, &a[a_offset], lda, &wr[1], &wi[1]
+, &vs[vs_offset], ldvs, &work[1], &c_n1, &ieval);
+ hswork = (integer) work[1];
+
+ if (! wantvs) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + hswork;
+ maxwrk = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + (*n - 1) * ilaenv_(&c__1,
+ "DORGHR", " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + hswork;
+ maxwrk = max(i__1,i__2);
+ }
+ }
+ work[1] = (doublereal) maxwrk;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEES ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *sdim = 0;
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("P");
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = dlange_("M", n, n, &a[a_offset], lda, dum);
+ scalea = FALSE_;
+ if (anrm > 0. && anrm < smlnum) {
+ scalea = TRUE_;
+ cscale = smlnum;
+ } else if (anrm > bignum) {
+ scalea = TRUE_;
+ cscale = bignum;
+ }
+ if (scalea) {
+ dlascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Permute the matrix to make it more nearly triangular */
+/* (Workspace: need N) */
+
+ ibal = 1;
+ dgebal_("P", n, &a[a_offset], lda, &ilo, &ihi, &work[ibal], &ierr);
+
+/* Reduce to upper Hessenberg form */
+/* (Workspace: need 3*N, prefer 2*N+N*NB) */
+
+ itau = *n + ibal;
+ iwrk = *n + itau;
+ i__1 = *lwork - iwrk + 1;
+ dgehrd_(n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1,
+ &ierr);
+
+ if (wantvs) {
+
+/* Copy Householder vectors to VS */
+
+ dlacpy_("L", n, n, &a[a_offset], lda, &vs[vs_offset], ldvs)
+ ;
+
+/* Generate orthogonal matrix in VS */
+/* (Workspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+
+ i__1 = *lwork - iwrk + 1;
+ dorghr_(n, &ilo, &ihi, &vs[vs_offset], ldvs, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+ }
+
+ *sdim = 0;
+
+/* Perform QR iteration, accumulating Schur vectors in VS if desired */
+/* (Workspace: need N+1, prefer N+HSWORK (see comments) ) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ dhseqr_("S", jobvs, n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &vs[
+ vs_offset], ldvs, &work[iwrk], &i__1, &ieval);
+ if (ieval > 0) {
+ *info = ieval;
+ }
+
+/* Sort eigenvalues if desired */
+
+ if (wantst && *info == 0) {
+ if (scalea) {
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, n, &c__1, &wr[1], n, &
+ ierr);
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, n, &c__1, &wi[1], n, &
+ ierr);
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ bwork[i__] = (*select)(&wr[i__], &wi[i__]);
+/* L10: */
+ }
+
+/* Reorder eigenvalues and transform Schur vectors */
+/* (Workspace: none needed) */
+
+ i__1 = *lwork - iwrk + 1;
+ dtrsen_("N", jobvs, &bwork[1], n, &a[a_offset], lda, &vs[vs_offset],
+ ldvs, &wr[1], &wi[1], sdim, &s, &sep, &work[iwrk], &i__1,
+ idum, &c__1, &icond);
+ if (icond > 0) {
+ *info = *n + icond;
+ }
+ }
+
+ if (wantvs) {
+
+/* Undo balancing */
+/* (Workspace: need N) */
+
+ dgebak_("P", "R", n, &ilo, &ihi, &work[ibal], n, &vs[vs_offset], ldvs,
+ &ierr);
+ }
+
+ if (scalea) {
+
+/* Undo scaling for the Schur form of A */
+
+ dlascl_("H", &c__0, &c__0, &cscale, &anrm, n, n, &a[a_offset], lda, &
+ ierr);
+ i__1 = *lda + 1;
+ dcopy_(n, &a[a_offset], &i__1, &wr[1], &c__1);
+ if (cscale == smlnum) {
+
+/* If scaling back towards underflow, adjust WI if an */
+/* offdiagonal element of a 2-by-2 block in the Schur form */
+/* underflows. */
+
+ if (ieval > 0) {
+ i1 = ieval + 1;
+ i2 = ihi - 1;
+ i__1 = ilo - 1;
+/* Computing MAX */
+ i__3 = ilo - 1;
+ i__2 = max(i__3,1);
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[
+ 1], &i__2, &ierr);
+ } else if (wantst) {
+ i1 = 1;
+ i2 = *n - 1;
+ } else {
+ i1 = ilo;
+ i2 = ihi - 1;
+ }
+ inxt = i1 - 1;
+ i__1 = i2;
+ for (i__ = i1; i__ <= i__1; ++i__) {
+ if (i__ < inxt) {
+ goto L20;
+ }
+ if (wi[i__] == 0.) {
+ inxt = i__ + 1;
+ } else {
+ if (a[i__ + 1 + i__ * a_dim1] == 0.) {
+ wi[i__] = 0.;
+ wi[i__ + 1] = 0.;
+ } else if (a[i__ + 1 + i__ * a_dim1] != 0. && a[i__ + (
+ i__ + 1) * a_dim1] == 0.) {
+ wi[i__] = 0.;
+ wi[i__ + 1] = 0.;
+ if (i__ > 1) {
+ i__2 = i__ - 1;
+ dswap_(&i__2, &a[i__ * a_dim1 + 1], &c__1, &a[(
+ i__ + 1) * a_dim1 + 1], &c__1);
+ }
+ if (*n > i__ + 1) {
+ i__2 = *n - i__ - 1;
+ dswap_(&i__2, &a[i__ + (i__ + 2) * a_dim1], lda, &
+ a[i__ + 1 + (i__ + 2) * a_dim1], lda);
+ }
+ if (wantvs) {
+ dswap_(n, &vs[i__ * vs_dim1 + 1], &c__1, &vs[(i__
+ + 1) * vs_dim1 + 1], &c__1);
+ }
+ a[i__ + (i__ + 1) * a_dim1] = a[i__ + 1 + i__ *
+ a_dim1];
+ a[i__ + 1 + i__ * a_dim1] = 0.;
+ }
+ inxt = i__ + 2;
+ }
+L20:
+ ;
+ }
+ }
+
+/* Undo scaling for the imaginary part of the eigenvalues */
+
+ i__1 = *n - ieval;
+/* Computing MAX */
+ i__3 = *n - ieval;
+ i__2 = max(i__3,1);
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[ieval +
+ 1], &i__2, &ierr);
+ }
+
+ if (wantst && *info == 0) {
+
+/* Check if reordering successful */
+
+ lastsl = TRUE_;
+ lst2sl = TRUE_;
+ *sdim = 0;
+ ip = 0;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ cursl = (*select)(&wr[i__], &wi[i__]);
+ if (wi[i__] == 0.) {
+ if (cursl) {
+ ++(*sdim);
+ }
+ ip = 0;
+ if (cursl && ! lastsl) {
+ *info = *n + 2;
+ }
+ } else {
+ if (ip == 1) {
+
+/* Last eigenvalue of conjugate pair */
+
+ cursl = cursl || lastsl;
+ lastsl = cursl;
+ if (cursl) {
+ *sdim += 2;
+ }
+ ip = -1;
+ if (cursl && ! lst2sl) {
+ *info = *n + 2;
+ }
+ } else {
+
+/* First eigenvalue of conjugate pair */
+
+ ip = 1;
+ }
+ }
+ lst2sl = lastsl;
+ lastsl = cursl;
+/* L30: */
+ }
+ }
+
+ work[1] = (doublereal) maxwrk;
+ return 0;
+
+/* End of DGEES */
+
+} /* dgees_ */
diff --git a/contrib/libs/clapack/dgeesx.c b/contrib/libs/clapack/dgeesx.c
new file mode 100644
index 0000000000..d404f4aa78
--- /dev/null
+++ b/contrib/libs/clapack/dgeesx.c
@@ -0,0 +1,649 @@
+/* dgeesx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+
+/* Subroutine */ int dgeesx_(char *jobvs, char *sort, L_fp select, char *
+ sense, integer *n, doublereal *a, integer *lda, integer *sdim,
+ doublereal *wr, doublereal *wi, doublereal *vs, integer *ldvs,
+ doublereal *rconde, doublereal *rcondv, doublereal *work, integer *
+ lwork, integer *iwork, integer *liwork, logical *bwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, vs_dim1, vs_offset, i__1, i__2, i__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, i1, i2, ip, ihi, ilo;
+ doublereal dum[1], eps;
+ integer ibal;
+ doublereal anrm;
+ integer ierr, itau, iwrk, lwrk, inxt, icond, ieval;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dswap_(integer *, doublereal *, integer
+ *, doublereal *, integer *);
+ logical cursl;
+ integer liwrk;
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *), dgebak_(
+ char *, char *, integer *, integer *, integer *, doublereal *,
+ integer *, doublereal *, integer *, integer *),
+ dgebal_(char *, integer *, doublereal *, integer *, integer *,
+ integer *, doublereal *, integer *);
+ logical lst2sl, scalea;
+ extern doublereal dlamch_(char *);
+ doublereal cscale;
+ extern doublereal dlange_(char *, integer *, integer *, doublereal *,
+ integer *, doublereal *);
+ extern /* Subroutine */ int dgehrd_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *), dlascl_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *), dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ doublereal bignum;
+ extern /* Subroutine */ int dorghr_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *), dhseqr_(char *, char *, integer *, integer *, integer
+ *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *);
+ logical wantsb;
+ extern /* Subroutine */ int dtrsen_(char *, char *, logical *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, integer *, integer *, integer *);
+ logical wantse, lastsl;
+ integer minwrk, maxwrk;
+ logical wantsn;
+ doublereal smlnum;
+ integer hswork;
+ logical wantst, lquery, wantsv, wantvs;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+/* .. Function Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGEESX computes for an N-by-N real nonsymmetric matrix A, the */
+/* eigenvalues, the real Schur form T, and, optionally, the matrix of */
+/* Schur vectors Z. This gives the Schur factorization A = Z*T*(Z**T). */
+
+/* Optionally, it also orders the eigenvalues on the diagonal of the */
+/* real Schur form so that selected eigenvalues are at the top left; */
+/* computes a reciprocal condition number for the average of the */
+/* selected eigenvalues (RCONDE); and computes a reciprocal condition */
+/* number for the right invariant subspace corresponding to the */
+/* selected eigenvalues (RCONDV). The leading columns of Z form an */
+/* orthonormal basis for this invariant subspace. */
+
+/* For further explanation of the reciprocal condition numbers RCONDE */
+/* and RCONDV, see Section 4.10 of the LAPACK Users' Guide (where */
+/* these quantities are called s and sep respectively). */
+
+/* A real matrix is in real Schur form if it is upper quasi-triangular */
+/* with 1-by-1 and 2-by-2 blocks. 2-by-2 blocks will be standardized in */
+/* the form */
+/* [ a b ] */
+/* [ c a ] */
+
+/* where b*c < 0. The eigenvalues of such a block are a +- sqrt(bc). */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVS (input) CHARACTER*1 */
+/* = 'N': Schur vectors are not computed; */
+/* = 'V': Schur vectors are computed. */
+
+/* SORT (input) CHARACTER*1 */
+/* Specifies whether or not to order the eigenvalues on the */
+/* diagonal of the Schur form. */
+/* = 'N': Eigenvalues are not ordered; */
+/* = 'S': Eigenvalues are ordered (see SELECT). */
+
+/* SELECT (external procedure) LOGICAL FUNCTION of two DOUBLE PRECISION arguments */
+/* SELECT must be declared EXTERNAL in the calling subroutine. */
+/* If SORT = 'S', SELECT is used to select eigenvalues to sort */
+/* to the top left of the Schur form. */
+/* If SORT = 'N', SELECT is not referenced. */
+/* An eigenvalue WR(j)+sqrt(-1)*WI(j) is selected if */
+/* SELECT(WR(j),WI(j)) is true; i.e., if either one of a */
+/* complex conjugate pair of eigenvalues is selected, then both */
+/* are. Note that a selected complex eigenvalue may no longer */
+/* satisfy SELECT(WR(j),WI(j)) = .TRUE. after ordering, since */
+/* ordering may change the value of complex eigenvalues */
+/* (especially if the eigenvalue is ill-conditioned); in this */
+/* case INFO may be set to N+3 (see INFO below). */
+
+/* SENSE (input) CHARACTER*1 */
+/* Determines which reciprocal condition numbers are computed. */
+/* = 'N': None are computed; */
+/* = 'E': Computed for average of selected eigenvalues only; */
+/* = 'V': Computed for selected right invariant subspace only; */
+/* = 'B': Computed for both. */
+/* If SENSE = 'E', 'V' or 'B', SORT must equal 'S'. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA, N) */
+/* On entry, the N-by-N matrix A. */
+/* On exit, A is overwritten by its real Schur form T. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* SDIM (output) INTEGER */
+/* If SORT = 'N', SDIM = 0. */
+/* If SORT = 'S', SDIM = number of eigenvalues (after sorting) */
+/* for which SELECT is true. (Complex conjugate */
+/* pairs for which SELECT is true for either */
+/* eigenvalue count as 2.) */
+
+/* WR (output) DOUBLE PRECISION array, dimension (N) */
+/* WI (output) DOUBLE PRECISION array, dimension (N) */
+/* WR and WI contain the real and imaginary parts, respectively, */
+/* of the computed eigenvalues, in the same order that they */
+/* appear on the diagonal of the output Schur form T. Complex */
+/* conjugate pairs of eigenvalues appear consecutively with the */
+/* eigenvalue having the positive imaginary part first. */
+
+/* VS (output) DOUBLE PRECISION array, dimension (LDVS,N) */
+/* If JOBVS = 'V', VS contains the orthogonal matrix Z of Schur */
+/* vectors. */
+/* If JOBVS = 'N', VS is not referenced. */
+
+/* LDVS (input) INTEGER */
+/* The leading dimension of the array VS. LDVS >= 1, and if */
+/* JOBVS = 'V', LDVS >= N. */
+
+/* RCONDE (output) DOUBLE PRECISION */
+/* If SENSE = 'E' or 'B', RCONDE contains the reciprocal */
+/* condition number for the average of the selected eigenvalues. */
+/* Not referenced if SENSE = 'N' or 'V'. */
+
+/* RCONDV (output) DOUBLE PRECISION */
+/* If SENSE = 'V' or 'B', RCONDV contains the reciprocal */
+/* condition number for the selected right invariant subspace. */
+/* Not referenced if SENSE = 'N' or 'E'. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,3*N). */
+/* Also, if SENSE = 'E' or 'V' or 'B', */
+/* LWORK >= N+2*SDIM*(N-SDIM), where SDIM is the number of */
+/* selected eigenvalues computed by this routine. Note that */
+/* N+2*SDIM*(N-SDIM) <= N+N*N/2. Note also that an error is only */
+/* returned if LWORK < max(1,3*N), but if SENSE = 'E' or 'V' or */
+/* 'B' this may not be large enough. */
+/* For good performance, LWORK must generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates upper bounds on the optimal sizes of the */
+/* arrays WORK and IWORK, returns these values as the first */
+/* entries of the WORK and IWORK arrays, and no error messages */
+/* related to LWORK or LIWORK are issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* LIWORK >= 1; if SENSE = 'V' or 'B', LIWORK >= SDIM*(N-SDIM). */
+/* Note that SDIM*(N-SDIM) <= N*N/4. Note also that an error is */
+/* only returned if LIWORK < 1, but if SENSE = 'V' or 'B' this */
+/* may not be large enough. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates upper bounds on the optimal sizes of */
+/* the arrays WORK and IWORK, returns these values as the first */
+/* entries of the WORK and IWORK arrays, and no error messages */
+/* related to LWORK or LIWORK are issued by XERBLA. */
+
+/* BWORK (workspace) LOGICAL array, dimension (N) */
+/* Not referenced if SORT = 'N'. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, and i is */
+/* <= N: the QR algorithm failed to compute all the */
+/* eigenvalues; elements 1:ILO-1 and i+1:N of WR and WI */
+/* contain those eigenvalues which have converged; if */
+/* JOBVS = 'V', VS contains the transformation which */
+/* reduces A to its partially converged Schur form. */
+/* = N+1: the eigenvalues could not be reordered because some */
+/* eigenvalues were too close to separate (the problem */
+/* is very ill-conditioned); */
+/* = N+2: after reordering, roundoff changed values of some */
+/* complex eigenvalues so that leading eigenvalues in */
+/* the Schur form no longer satisfy SELECT=.TRUE. This */
+/* could also be caused by underflow due to scaling. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --wr;
+ --wi;
+ vs_dim1 = *ldvs;
+ vs_offset = 1 + vs_dim1;
+ vs -= vs_offset;
+ --work;
+ --iwork;
+ --bwork;
+
+ /* Function Body */
+ *info = 0;
+ wantvs = lsame_(jobvs, "V");
+ wantst = lsame_(sort, "S");
+ wantsn = lsame_(sense, "N");
+ wantse = lsame_(sense, "E");
+ wantsv = lsame_(sense, "V");
+ wantsb = lsame_(sense, "B");
+ lquery = *lwork == -1 || *liwork == -1;
+ if (! wantvs && ! lsame_(jobvs, "N")) {
+ *info = -1;
+ } else if (! wantst && ! lsame_(sort, "N")) {
+ *info = -2;
+ } else if (! (wantsn || wantse || wantsv || wantsb) || ! wantst && !
+ wantsn) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldvs < 1 || wantvs && *ldvs < *n) {
+ *info = -12;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "RWorkspace:" describe the */
+/* minimal amount of real workspace needed at that point in the */
+/* code, as well as the preferred amount for good performance. */
+/* IWorkspace refers to integer workspace. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV. */
+/* HSWORK refers to the workspace preferred by DHSEQR, as */
+/* calculated below. HSWORK is computed assuming ILO=1 and IHI=N, */
+/* the worst case. */
+/* If SENSE = 'E', 'V' or 'B', then the amount of workspace needed */
+/* depends on SDIM, which is computed by the routine DTRSEN later */
+/* in the code.) */
+
+ if (*info == 0) {
+ liwrk = 1;
+ if (*n == 0) {
+ minwrk = 1;
+ lwrk = 1;
+ } else {
+ maxwrk = (*n << 1) + *n * ilaenv_(&c__1, "DGEHRD", " ", n, &c__1,
+ n, &c__0);
+ minwrk = *n * 3;
+
+ dhseqr_("S", jobvs, n, &c__1, n, &a[a_offset], lda, &wr[1], &wi[1]
+, &vs[vs_offset], ldvs, &work[1], &c_n1, &ieval);
+ hswork = (integer) work[1];
+
+ if (! wantvs) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + hswork;
+ maxwrk = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + (*n - 1) * ilaenv_(&c__1,
+ "DORGHR", " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + hswork;
+ maxwrk = max(i__1,i__2);
+ }
+ lwrk = maxwrk;
+ if (! wantsn) {
+/* Computing MAX */
+ i__1 = lwrk, i__2 = *n + *n * *n / 2;
+ lwrk = max(i__1,i__2);
+ }
+ if (wantsv || wantsb) {
+ liwrk = *n * *n / 4;
+ }
+ }
+ iwork[1] = liwrk;
+ work[1] = (doublereal) lwrk;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -16;
+ } else if (*liwork < 1 && ! lquery) {
+ *info = -18;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEESX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *sdim = 0;
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("P");
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = dlange_("M", n, n, &a[a_offset], lda, dum);
+ scalea = FALSE_;
+ if (anrm > 0. && anrm < smlnum) {
+ scalea = TRUE_;
+ cscale = smlnum;
+ } else if (anrm > bignum) {
+ scalea = TRUE_;
+ cscale = bignum;
+ }
+ if (scalea) {
+ dlascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Permute the matrix to make it more nearly triangular */
+/* (RWorkspace: need N) */
+
+ ibal = 1;
+ dgebal_("P", n, &a[a_offset], lda, &ilo, &ihi, &work[ibal], &ierr);
+
+/* Reduce to upper Hessenberg form */
+/* (RWorkspace: need 3*N, prefer 2*N+N*NB) */
+
+ itau = *n + ibal;
+ iwrk = *n + itau;
+ i__1 = *lwork - iwrk + 1;
+ dgehrd_(n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1,
+ &ierr);
+
+ if (wantvs) {
+
+/* Copy Householder vectors to VS */
+
+ dlacpy_("L", n, n, &a[a_offset], lda, &vs[vs_offset], ldvs)
+ ;
+
+/* Generate orthogonal matrix in VS */
+/* (RWorkspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+
+ i__1 = *lwork - iwrk + 1;
+ dorghr_(n, &ilo, &ihi, &vs[vs_offset], ldvs, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+ }
+
+ *sdim = 0;
+
+/* Perform QR iteration, accumulating Schur vectors in VS if desired */
+/* (RWorkspace: need N+1, prefer N+HSWORK (see comments) ) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ dhseqr_("S", jobvs, n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &vs[
+ vs_offset], ldvs, &work[iwrk], &i__1, &ieval);
+ if (ieval > 0) {
+ *info = ieval;
+ }
+
+/* Sort eigenvalues if desired */
+
+ if (wantst && *info == 0) {
+ if (scalea) {
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, n, &c__1, &wr[1], n, &
+ ierr);
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, n, &c__1, &wi[1], n, &
+ ierr);
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ bwork[i__] = (*select)(&wr[i__], &wi[i__]);
+/* L10: */
+ }
+
+/* Reorder eigenvalues, transform Schur vectors, and compute */
+/* reciprocal condition numbers */
+/* (RWorkspace: if SENSE is not 'N', need N+2*SDIM*(N-SDIM) */
+/* otherwise, need N ) */
+/* (IWorkspace: if SENSE is 'V' or 'B', need SDIM*(N-SDIM) */
+/* otherwise, need 0 ) */
+
+ i__1 = *lwork - iwrk + 1;
+ dtrsen_(sense, jobvs, &bwork[1], n, &a[a_offset], lda, &vs[vs_offset],
+ ldvs, &wr[1], &wi[1], sdim, rconde, rcondv, &work[iwrk], &
+ i__1, &iwork[1], liwork, &icond);
+ if (! wantsn) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + (*sdim << 1) * (*n - *sdim);
+ maxwrk = max(i__1,i__2);
+ }
+ if (icond == -15) {
+
+/* Not enough real workspace */
+
+ *info = -16;
+ } else if (icond == -17) {
+
+/* Not enough integer workspace */
+
+ *info = -18;
+ } else if (icond > 0) {
+
+/* DTRSEN failed to reorder or to restore standard Schur form */
+
+ *info = icond + *n;
+ }
+ }
+
+ if (wantvs) {
+
+/* Undo balancing */
+/* (RWorkspace: need N) */
+
+ dgebak_("P", "R", n, &ilo, &ihi, &work[ibal], n, &vs[vs_offset], ldvs,
+ &ierr);
+ }
+
+ if (scalea) {
+
+/* Undo scaling for the Schur form of A */
+
+ dlascl_("H", &c__0, &c__0, &cscale, &anrm, n, n, &a[a_offset], lda, &
+ ierr);
+ i__1 = *lda + 1;
+ dcopy_(n, &a[a_offset], &i__1, &wr[1], &c__1);
+ if ((wantsv || wantsb) && *info == 0) {
+ dum[0] = *rcondv;
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, &c__1, &c__1, dum, &
+ c__1, &ierr);
+ *rcondv = dum[0];
+ }
+ if (cscale == smlnum) {
+
+/* If scaling back towards underflow, adjust WI if an */
+/* offdiagonal element of a 2-by-2 block in the Schur form */
+/* underflows. */
+
+ if (ieval > 0) {
+ i1 = ieval + 1;
+ i2 = ihi - 1;
+ i__1 = ilo - 1;
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[
+ 1], n, &ierr);
+ } else if (wantst) {
+ i1 = 1;
+ i2 = *n - 1;
+ } else {
+ i1 = ilo;
+ i2 = ihi - 1;
+ }
+ inxt = i1 - 1;
+ i__1 = i2;
+ for (i__ = i1; i__ <= i__1; ++i__) {
+ if (i__ < inxt) {
+ goto L20;
+ }
+ if (wi[i__] == 0.) {
+ inxt = i__ + 1;
+ } else {
+ if (a[i__ + 1 + i__ * a_dim1] == 0.) {
+ wi[i__] = 0.;
+ wi[i__ + 1] = 0.;
+ } else if (a[i__ + 1 + i__ * a_dim1] != 0. && a[i__ + (
+ i__ + 1) * a_dim1] == 0.) {
+ wi[i__] = 0.;
+ wi[i__ + 1] = 0.;
+ if (i__ > 1) {
+ i__2 = i__ - 1;
+ dswap_(&i__2, &a[i__ * a_dim1 + 1], &c__1, &a[(
+ i__ + 1) * a_dim1 + 1], &c__1);
+ }
+ if (*n > i__ + 1) {
+ i__2 = *n - i__ - 1;
+ dswap_(&i__2, &a[i__ + (i__ + 2) * a_dim1], lda, &
+ a[i__ + 1 + (i__ + 2) * a_dim1], lda);
+ }
+ dswap_(n, &vs[i__ * vs_dim1 + 1], &c__1, &vs[(i__ + 1)
+ * vs_dim1 + 1], &c__1);
+ a[i__ + (i__ + 1) * a_dim1] = a[i__ + 1 + i__ *
+ a_dim1];
+ a[i__ + 1 + i__ * a_dim1] = 0.;
+ }
+ inxt = i__ + 2;
+ }
+L20:
+ ;
+ }
+ }
+ i__1 = *n - ieval;
+/* Computing MAX */
+ i__3 = *n - ieval;
+ i__2 = max(i__3,1);
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[ieval +
+ 1], &i__2, &ierr);
+ }
+
+ if (wantst && *info == 0) {
+
+/* Check if reordering successful */
+
+ lastsl = TRUE_;
+ lst2sl = TRUE_;
+ *sdim = 0;
+ ip = 0;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ cursl = (*select)(&wr[i__], &wi[i__]);
+ if (wi[i__] == 0.) {
+ if (cursl) {
+ ++(*sdim);
+ }
+ ip = 0;
+ if (cursl && ! lastsl) {
+ *info = *n + 2;
+ }
+ } else {
+ if (ip == 1) {
+
+/* Last eigenvalue of conjugate pair */
+
+ cursl = cursl || lastsl;
+ lastsl = cursl;
+ if (cursl) {
+ *sdim += 2;
+ }
+ ip = -1;
+ if (cursl && ! lst2sl) {
+ *info = *n + 2;
+ }
+ } else {
+
+/* First eigenvalue of conjugate pair */
+
+ ip = 1;
+ }
+ }
+ lst2sl = lastsl;
+ lastsl = cursl;
+/* L30: */
+ }
+ }
+
+ work[1] = (doublereal) maxwrk;
+ if (wantsv || wantsb) {
+/* Computing MAX */
+ i__1 = 1, i__2 = *sdim * (*n - *sdim);
+ iwork[1] = max(i__1,i__2);
+ } else {
+ iwork[1] = 1;
+ }
+
+ return 0;
+
+/* End of DGEESX */
+
+} /* dgeesx_ */
diff --git a/contrib/libs/clapack/dgeev.c b/contrib/libs/clapack/dgeev.c
new file mode 100644
index 0000000000..d523306aa6
--- /dev/null
+++ b/contrib/libs/clapack/dgeev.c
@@ -0,0 +1,566 @@
+/* dgeev.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+
+/* Subroutine */ int dgeev_(char *jobvl, char *jobvr, integer *n, doublereal *
+ a, integer *lda, doublereal *wr, doublereal *wi, doublereal *vl,
+ integer *ldvl, doublereal *vr, integer *ldvr, doublereal *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2, i__3;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, k;
+ doublereal r__, cs, sn;
+ integer ihi;
+ doublereal scl;
+ integer ilo;
+ doublereal dum[1], eps;
+ integer ibal;
+ char side[1];
+ doublereal anrm;
+ integer ierr, itau;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ integer iwrk, nout;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern doublereal dlapy2_(doublereal *, doublereal *);
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *), dgebak_(
+ char *, char *, integer *, integer *, integer *, doublereal *,
+ integer *, doublereal *, integer *, integer *),
+ dgebal_(char *, integer *, doublereal *, integer *, integer *,
+ integer *, doublereal *, integer *);
+ logical scalea;
+ extern doublereal dlamch_(char *);
+ doublereal cscale;
+ extern doublereal dlange_(char *, integer *, integer *, doublereal *,
+ integer *, doublereal *);
+ extern /* Subroutine */ int dgehrd_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *), dlascl_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ dlartg_(doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *), xerbla_(char *, integer *);
+ logical select[1];
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ doublereal bignum;
+ extern /* Subroutine */ int dorghr_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *), dhseqr_(char *, char *, integer *, integer *, integer
+ *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *), dtrevc_(char *, char *, logical *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, integer *, integer *, doublereal *, integer *);
+ integer minwrk, maxwrk;
+ logical wantvl;
+ doublereal smlnum;
+ integer hswork;
+ logical lquery, wantvr;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGEEV computes for an N-by-N real nonsymmetric matrix A, the */
+/* eigenvalues and, optionally, the left and/or right eigenvectors. */
+
+/* The right eigenvector v(j) of A satisfies */
+/* A * v(j) = lambda(j) * v(j) */
+/* where lambda(j) is its eigenvalue. */
+/* The left eigenvector u(j) of A satisfies */
+/* u(j)**H * A = lambda(j) * u(j)**H */
+/* where u(j)**H denotes the conjugate transpose of u(j). */
+
+/* The computed eigenvectors are normalized to have Euclidean norm */
+/* equal to 1 and largest component real. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVL (input) CHARACTER*1 */
+/* = 'N': left eigenvectors of A are not computed; */
+/* = 'V': left eigenvectors of A are computed. */
+
+/* JOBVR (input) CHARACTER*1 */
+/* = 'N': right eigenvectors of A are not computed; */
+/* = 'V': right eigenvectors of A are computed. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the N-by-N matrix A. */
+/* On exit, A has been overwritten. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* WR (output) DOUBLE PRECISION array, dimension (N) */
+/* WI (output) DOUBLE PRECISION array, dimension (N) */
+/* WR and WI contain the real and imaginary parts, */
+/* respectively, of the computed eigenvalues. Complex */
+/* conjugate pairs of eigenvalues appear consecutively */
+/* with the eigenvalue having the positive imaginary part */
+/* first. */
+
+/* VL (output) DOUBLE PRECISION array, dimension (LDVL,N) */
+/* If JOBVL = 'V', the left eigenvectors u(j) are stored one */
+/* after another in the columns of VL, in the same order */
+/* as their eigenvalues. */
+/* If JOBVL = 'N', VL is not referenced. */
+/* If the j-th eigenvalue is real, then u(j) = VL(:,j), */
+/* the j-th column of VL. */
+/* If the j-th and (j+1)-st eigenvalues form a complex */
+/* conjugate pair, then u(j) = VL(:,j) + i*VL(:,j+1) and */
+/* u(j+1) = VL(:,j) - i*VL(:,j+1). */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the array VL. LDVL >= 1; if */
+/* JOBVL = 'V', LDVL >= N. */
+
+/* VR (output) DOUBLE PRECISION array, dimension (LDVR,N) */
+/* If JOBVR = 'V', the right eigenvectors v(j) are stored one */
+/* after another in the columns of VR, in the same order */
+/* as their eigenvalues. */
+/* If JOBVR = 'N', VR is not referenced. */
+/* If the j-th eigenvalue is real, then v(j) = VR(:,j), */
+/* the j-th column of VR. */
+/* If the j-th and (j+1)-st eigenvalues form a complex */
+/* conjugate pair, then v(j) = VR(:,j) + i*VR(:,j+1) and */
+/* v(j+1) = VR(:,j) - i*VR(:,j+1). */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. LDVR >= 1; if */
+/* JOBVR = 'V', LDVR >= N. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,3*N), and */
+/* if JOBVL = 'V' or JOBVR = 'V', LWORK >= 4*N. For good */
+/* performance, LWORK must generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, the QR algorithm failed to compute all the */
+/* eigenvalues, and no eigenvectors have been computed; */
+/* elements i+1:N of WR and WI contain eigenvalues which */
+/* have converged. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --wr;
+ --wi;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ wantvl = lsame_(jobvl, "V");
+ wantvr = lsame_(jobvr, "V");
+ if (! wantvl && ! lsame_(jobvl, "N")) {
+ *info = -1;
+ } else if (! wantvr && ! lsame_(jobvr, "N")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldvl < 1 || wantvl && *ldvl < *n) {
+ *info = -9;
+ } else if (*ldvr < 1 || wantvr && *ldvr < *n) {
+ *info = -11;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV. */
+/* HSWORK refers to the workspace preferred by DHSEQR, as */
+/* calculated below. HSWORK is computed assuming ILO=1 and IHI=N, */
+/* the worst case.) */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ } else {
+ maxwrk = (*n << 1) + *n * ilaenv_(&c__1, "DGEHRD", " ", n, &c__1,
+ n, &c__0);
+ if (wantvl) {
+ minwrk = *n << 2;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + (*n - 1) * ilaenv_(&c__1,
+ "DORGHR", " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ dhseqr_("S", "V", n, &c__1, n, &a[a_offset], lda, &wr[1], &wi[
+ 1], &vl[vl_offset], ldvl, &work[1], &c_n1, info);
+ hswork = (integer) work[1];
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = *
+ n + hswork;
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n << 2;
+ maxwrk = max(i__1,i__2);
+ } else if (wantvr) {
+ minwrk = *n << 2;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + (*n - 1) * ilaenv_(&c__1,
+ "DORGHR", " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ dhseqr_("S", "V", n, &c__1, n, &a[a_offset], lda, &wr[1], &wi[
+ 1], &vr[vr_offset], ldvr, &work[1], &c_n1, info);
+ hswork = (integer) work[1];
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = *
+ n + hswork;
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n << 2;
+ maxwrk = max(i__1,i__2);
+ } else {
+ minwrk = *n * 3;
+ dhseqr_("E", "N", n, &c__1, n, &a[a_offset], lda, &wr[1], &wi[
+ 1], &vr[vr_offset], ldvr, &work[1], &c_n1, info);
+ hswork = (integer) work[1];
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = *
+ n + hswork;
+ maxwrk = max(i__1,i__2);
+ }
+ maxwrk = max(maxwrk,minwrk);
+ }
+ work[1] = (doublereal) maxwrk;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEEV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("P");
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = dlange_("M", n, n, &a[a_offset], lda, dum);
+ scalea = FALSE_;
+ if (anrm > 0. && anrm < smlnum) {
+ scalea = TRUE_;
+ cscale = smlnum;
+ } else if (anrm > bignum) {
+ scalea = TRUE_;
+ cscale = bignum;
+ }
+ if (scalea) {
+ dlascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Balance the matrix */
+/* (Workspace: need N) */
+
+ ibal = 1;
+ dgebal_("B", n, &a[a_offset], lda, &ilo, &ihi, &work[ibal], &ierr);
+
+/* Reduce to upper Hessenberg form */
+/* (Workspace: need 3*N, prefer 2*N+N*NB) */
+
+ itau = ibal + *n;
+ iwrk = itau + *n;
+ i__1 = *lwork - iwrk + 1;
+ dgehrd_(n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1,
+ &ierr);
+
+ if (wantvl) {
+
+/* Want left eigenvectors */
+/* Copy Householder vectors to VL */
+
+ *(unsigned char *)side = 'L';
+ dlacpy_("L", n, n, &a[a_offset], lda, &vl[vl_offset], ldvl)
+ ;
+
+/* Generate orthogonal matrix in VL */
+/* (Workspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+
+ i__1 = *lwork - iwrk + 1;
+ dorghr_(n, &ilo, &ihi, &vl[vl_offset], ldvl, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+
+/* Perform QR iteration, accumulating Schur vectors in VL */
+/* (Workspace: need N+1, prefer N+HSWORK (see comments) ) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ dhseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &
+ vl[vl_offset], ldvl, &work[iwrk], &i__1, info);
+
+ if (wantvr) {
+
+/* Want left and right eigenvectors */
+/* Copy Schur vectors to VR */
+
+ *(unsigned char *)side = 'B';
+ dlacpy_("F", n, n, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr);
+ }
+
+ } else if (wantvr) {
+
+/* Want right eigenvectors */
+/* Copy Householder vectors to VR */
+
+ *(unsigned char *)side = 'R';
+ dlacpy_("L", n, n, &a[a_offset], lda, &vr[vr_offset], ldvr)
+ ;
+
+/* Generate orthogonal matrix in VR */
+/* (Workspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+
+ i__1 = *lwork - iwrk + 1;
+ dorghr_(n, &ilo, &ihi, &vr[vr_offset], ldvr, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+
+/* Perform QR iteration, accumulating Schur vectors in VR */
+/* (Workspace: need N+1, prefer N+HSWORK (see comments) ) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ dhseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &
+ vr[vr_offset], ldvr, &work[iwrk], &i__1, info);
+
+ } else {
+
+/* Compute eigenvalues only */
+/* (Workspace: need N+1, prefer N+HSWORK (see comments) ) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ dhseqr_("E", "N", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &
+ vr[vr_offset], ldvr, &work[iwrk], &i__1, info);
+ }
+
+/* If INFO > 0 from DHSEQR, then quit */
+
+ if (*info > 0) {
+ goto L50;
+ }
+
+ if (wantvl || wantvr) {
+
+/* Compute left and/or right eigenvectors */
+/* (Workspace: need 4*N) */
+
+ dtrevc_(side, "B", select, n, &a[a_offset], lda, &vl[vl_offset], ldvl,
+ &vr[vr_offset], ldvr, n, &nout, &work[iwrk], &ierr);
+ }
+
+ if (wantvl) {
+
+/* Undo balancing of left eigenvectors */
+/* (Workspace: need N) */
+
+ dgebak_("B", "L", n, &ilo, &ihi, &work[ibal], n, &vl[vl_offset], ldvl,
+ &ierr);
+
+/* Normalize left eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (wi[i__] == 0.) {
+ scl = 1. / dnrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1);
+ dscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1);
+ } else if (wi[i__] > 0.) {
+ d__1 = dnrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1);
+ d__2 = dnrm2_(n, &vl[(i__ + 1) * vl_dim1 + 1], &c__1);
+ scl = 1. / dlapy2_(&d__1, &d__2);
+ dscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1);
+ dscal_(n, &scl, &vl[(i__ + 1) * vl_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+/* Computing 2nd power */
+ d__1 = vl[k + i__ * vl_dim1];
+/* Computing 2nd power */
+ d__2 = vl[k + (i__ + 1) * vl_dim1];
+ work[iwrk + k - 1] = d__1 * d__1 + d__2 * d__2;
+/* L10: */
+ }
+ k = idamax_(n, &work[iwrk], &c__1);
+ dlartg_(&vl[k + i__ * vl_dim1], &vl[k + (i__ + 1) * vl_dim1],
+ &cs, &sn, &r__);
+ drot_(n, &vl[i__ * vl_dim1 + 1], &c__1, &vl[(i__ + 1) *
+ vl_dim1 + 1], &c__1, &cs, &sn);
+ vl[k + (i__ + 1) * vl_dim1] = 0.;
+ }
+/* L20: */
+ }
+ }
+
+ if (wantvr) {
+
+/* Undo balancing of right eigenvectors */
+/* (Workspace: need N) */
+
+ dgebak_("B", "R", n, &ilo, &ihi, &work[ibal], n, &vr[vr_offset], ldvr,
+ &ierr);
+
+/* Normalize right eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (wi[i__] == 0.) {
+ scl = 1. / dnrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1);
+ dscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1);
+ } else if (wi[i__] > 0.) {
+ d__1 = dnrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1);
+ d__2 = dnrm2_(n, &vr[(i__ + 1) * vr_dim1 + 1], &c__1);
+ scl = 1. / dlapy2_(&d__1, &d__2);
+ dscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1);
+ dscal_(n, &scl, &vr[(i__ + 1) * vr_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+/* Computing 2nd power */
+ d__1 = vr[k + i__ * vr_dim1];
+/* Computing 2nd power */
+ d__2 = vr[k + (i__ + 1) * vr_dim1];
+ work[iwrk + k - 1] = d__1 * d__1 + d__2 * d__2;
+/* L30: */
+ }
+ k = idamax_(n, &work[iwrk], &c__1);
+ dlartg_(&vr[k + i__ * vr_dim1], &vr[k + (i__ + 1) * vr_dim1],
+ &cs, &sn, &r__);
+ drot_(n, &vr[i__ * vr_dim1 + 1], &c__1, &vr[(i__ + 1) *
+ vr_dim1 + 1], &c__1, &cs, &sn);
+ vr[k + (i__ + 1) * vr_dim1] = 0.;
+ }
+/* L40: */
+ }
+ }
+
+/* Undo scaling if necessary */
+
+L50:
+ if (scalea) {
+ i__1 = *n - *info;
+/* Computing MAX */
+ i__3 = *n - *info;
+ i__2 = max(i__3,1);
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wr[*info +
+ 1], &i__2, &ierr);
+ i__1 = *n - *info;
+/* Computing MAX */
+ i__3 = *n - *info;
+ i__2 = max(i__3,1);
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[*info +
+ 1], &i__2, &ierr);
+ if (*info > 0) {
+ i__1 = ilo - 1;
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wr[1],
+ n, &ierr);
+ i__1 = ilo - 1;
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[1],
+ n, &ierr);
+ }
+ }
+
+ work[1] = (doublereal) maxwrk;
+ return 0;
+
+/* End of DGEEV */
+
+} /* dgeev_ */
diff --git a/contrib/libs/clapack/dgeevx.c b/contrib/libs/clapack/dgeevx.c
new file mode 100644
index 0000000000..290776799c
--- /dev/null
+++ b/contrib/libs/clapack/dgeevx.c
@@ -0,0 +1,703 @@
+/* dgeevx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+
+/* Subroutine */ int dgeevx_(char *balanc, char *jobvl, char *jobvr, char *
+ sense, integer *n, doublereal *a, integer *lda, doublereal *wr,
+ doublereal *wi, doublereal *vl, integer *ldvl, doublereal *vr,
+ integer *ldvr, integer *ilo, integer *ihi, doublereal *scale,
+ doublereal *abnrm, doublereal *rconde, doublereal *rcondv, doublereal
+ *work, integer *lwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2, i__3;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, k;
+ doublereal r__, cs, sn;
+ char job[1];
+ doublereal scl, dum[1], eps;
+ char side[1];
+ doublereal anrm;
+ integer ierr, itau;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ integer iwrk, nout;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ integer icond;
+ extern logical lsame_(char *, char *);
+ extern doublereal dlapy2_(doublereal *, doublereal *);
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *), dgebak_(
+ char *, char *, integer *, integer *, integer *, doublereal *,
+ integer *, doublereal *, integer *, integer *),
+ dgebal_(char *, integer *, doublereal *, integer *, integer *,
+ integer *, doublereal *, integer *);
+ logical scalea;
+ extern doublereal dlamch_(char *);
+ doublereal cscale;
+ extern doublereal dlange_(char *, integer *, integer *, doublereal *,
+ integer *, doublereal *);
+ extern /* Subroutine */ int dgehrd_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *), dlascl_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ dlartg_(doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *), xerbla_(char *, integer *);
+ logical select[1];
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ doublereal bignum;
+ extern /* Subroutine */ int dorghr_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *), dhseqr_(char *, char *, integer *, integer *, integer
+ *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *), dtrevc_(char *, char *, logical *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, integer *, integer *, doublereal *, integer *), dtrsna_(char *, char *, logical *, integer *, doublereal
+ *, integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *, integer *);
+ integer minwrk, maxwrk;
+ logical wantvl, wntsnb;
+ integer hswork;
+ logical wntsne;
+ doublereal smlnum;
+ logical lquery, wantvr, wntsnn, wntsnv;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGEEVX computes for an N-by-N real nonsymmetric matrix A, the */
+/* eigenvalues and, optionally, the left and/or right eigenvectors. */
+
+/* Optionally also, it computes a balancing transformation to improve */
+/* the conditioning of the eigenvalues and eigenvectors (ILO, IHI, */
+/* SCALE, and ABNRM), reciprocal condition numbers for the eigenvalues */
+/* (RCONDE), and reciprocal condition numbers for the right */
+/* eigenvectors (RCONDV). */
+
+/* The right eigenvector v(j) of A satisfies */
+/* A * v(j) = lambda(j) * v(j) */
+/* where lambda(j) is its eigenvalue. */
+/* The left eigenvector u(j) of A satisfies */
+/* u(j)**H * A = lambda(j) * u(j)**H */
+/* where u(j)**H denotes the conjugate transpose of u(j). */
+
+/* The computed eigenvectors are normalized to have Euclidean norm */
+/* equal to 1 and largest component real. */
+
+/* Balancing a matrix means permuting the rows and columns to make it */
+/* more nearly upper triangular, and applying a diagonal similarity */
+/* transformation D * A * D**(-1), where D is a diagonal matrix, to */
+/* make its rows and columns closer in norm and the condition numbers */
+/* of its eigenvalues and eigenvectors smaller. The computed */
+/* reciprocal condition numbers correspond to the balanced matrix. */
+/* Permuting rows and columns will not change the condition numbers */
+/* (in exact arithmetic) but diagonal scaling will. For further */
+/* explanation of balancing, see section 4.10.2 of the LAPACK */
+/* Users' Guide. */
+
+/* Arguments */
+/* ========= */
+
+/* BALANC (input) CHARACTER*1 */
+/* Indicates how the input matrix should be diagonally scaled */
+/* and/or permuted to improve the conditioning of its */
+/* eigenvalues. */
+/* = 'N': Do not diagonally scale or permute; */
+/* = 'P': Perform permutations to make the matrix more nearly */
+/* upper triangular. Do not diagonally scale; */
+/* = 'S': Diagonally scale the matrix, i.e. replace A by */
+/* D*A*D**(-1), where D is a diagonal matrix chosen */
+/* to make the rows and columns of A more equal in */
+/* norm. Do not permute; */
+/* = 'B': Both diagonally scale and permute A. */
+
+/* Computed reciprocal condition numbers will be for the matrix */
+/* after balancing and/or permuting. Permuting does not change */
+/* condition numbers (in exact arithmetic), but balancing does. */
+
+/* JOBVL (input) CHARACTER*1 */
+/* = 'N': left eigenvectors of A are not computed; */
+/* = 'V': left eigenvectors of A are computed. */
+/* If SENSE = 'E' or 'B', JOBVL must = 'V'. */
+
+/* JOBVR (input) CHARACTER*1 */
+/* = 'N': right eigenvectors of A are not computed; */
+/* = 'V': right eigenvectors of A are computed. */
+/* If SENSE = 'E' or 'B', JOBVR must = 'V'. */
+
+/* SENSE (input) CHARACTER*1 */
+/* Determines which reciprocal condition numbers are computed. */
+/* = 'N': None are computed; */
+/* = 'E': Computed for eigenvalues only; */
+/* = 'V': Computed for right eigenvectors only; */
+/* = 'B': Computed for eigenvalues and right eigenvectors. */
+
+/* If SENSE = 'E' or 'B', both left and right eigenvectors */
+/* must also be computed (JOBVL = 'V' and JOBVR = 'V'). */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the N-by-N matrix A. */
+/* On exit, A has been overwritten. If JOBVL = 'V' or */
+/* JOBVR = 'V', A contains the real Schur form of the balanced */
+/* version of the input matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* WR (output) DOUBLE PRECISION array, dimension (N) */
+/* WI (output) DOUBLE PRECISION array, dimension (N) */
+/* WR and WI contain the real and imaginary parts, */
+/* respectively, of the computed eigenvalues. Complex */
+/* conjugate pairs of eigenvalues will appear consecutively */
+/* with the eigenvalue having the positive imaginary part */
+/* first. */
+
+/* VL (output) DOUBLE PRECISION array, dimension (LDVL,N) */
+/* If JOBVL = 'V', the left eigenvectors u(j) are stored one */
+/* after another in the columns of VL, in the same order */
+/* as their eigenvalues. */
+/* If JOBVL = 'N', VL is not referenced. */
+/* If the j-th eigenvalue is real, then u(j) = VL(:,j), */
+/* the j-th column of VL. */
+/* If the j-th and (j+1)-st eigenvalues form a complex */
+/* conjugate pair, then u(j) = VL(:,j) + i*VL(:,j+1) and */
+/* u(j+1) = VL(:,j) - i*VL(:,j+1). */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the array VL. LDVL >= 1; if */
+/* JOBVL = 'V', LDVL >= N. */
+
+/* VR (output) DOUBLE PRECISION array, dimension (LDVR,N) */
+/* If JOBVR = 'V', the right eigenvectors v(j) are stored one */
+/* after another in the columns of VR, in the same order */
+/* as their eigenvalues. */
+/* If JOBVR = 'N', VR is not referenced. */
+/* If the j-th eigenvalue is real, then v(j) = VR(:,j), */
+/* the j-th column of VR. */
+/* If the j-th and (j+1)-st eigenvalues form a complex */
+/* conjugate pair, then v(j) = VR(:,j) + i*VR(:,j+1) and */
+/* v(j+1) = VR(:,j) - i*VR(:,j+1). */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. LDVR >= 1, and if */
+/* JOBVR = 'V', LDVR >= N. */
+
+/* ILO (output) INTEGER */
+/* IHI (output) INTEGER */
+/* ILO and IHI are integer values determined when A was */
+/* balanced. The balanced A(i,j) = 0 if I > J and */
+/* J = 1,...,ILO-1 or I = IHI+1,...,N. */
+
+/* SCALE (output) DOUBLE PRECISION array, dimension (N) */
+/* Details of the permutations and scaling factors applied */
+/* when balancing A. If P(j) is the index of the row and column */
+/* interchanged with row and column j, and D(j) is the scaling */
+/* factor applied to row and column j, then */
+/* SCALE(J) = P(J), for J = 1,...,ILO-1 */
+/* = D(J), for J = ILO,...,IHI */
+/* = P(J) for J = IHI+1,...,N. */
+/* The order in which the interchanges are made is N to IHI+1, */
+/* then 1 to ILO-1. */
+
+/* ABNRM (output) DOUBLE PRECISION */
+/* The one-norm of the balanced matrix (the maximum */
+/* of the sum of absolute values of elements of any column). */
+
+/* RCONDE (output) DOUBLE PRECISION array, dimension (N) */
+/* RCONDE(j) is the reciprocal condition number of the j-th */
+/* eigenvalue. */
+
+/* RCONDV (output) DOUBLE PRECISION array, dimension (N) */
+/* RCONDV(j) is the reciprocal condition number of the j-th */
+/* right eigenvector. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. If SENSE = 'N' or 'E', */
+/* LWORK >= max(1,2*N), and if JOBVL = 'V' or JOBVR = 'V', */
+/* LWORK >= 3*N. If SENSE = 'V' or 'B', LWORK >= N*(N+6). */
+/* For good performance, LWORK must generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace) INTEGER array, dimension (2*N-2) */
+/* If SENSE = 'N' or 'E', not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, the QR algorithm failed to compute all the */
+/* eigenvalues, and no eigenvectors or condition numbers */
+/* have been computed; elements 1:ILO-1 and i+1:N of WR */
+/* and WI contain eigenvalues which have converged. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --wr;
+ --wi;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --scale;
+ --rconde;
+ --rcondv;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ wantvl = lsame_(jobvl, "V");
+ wantvr = lsame_(jobvr, "V");
+ wntsnn = lsame_(sense, "N");
+ wntsne = lsame_(sense, "E");
+ wntsnv = lsame_(sense, "V");
+ wntsnb = lsame_(sense, "B");
+ if (! (lsame_(balanc, "N") || lsame_(balanc, "S") || lsame_(balanc, "P")
+ || lsame_(balanc, "B"))) {
+ *info = -1;
+ } else if (! wantvl && ! lsame_(jobvl, "N")) {
+ *info = -2;
+ } else if (! wantvr && ! lsame_(jobvr, "N")) {
+ *info = -3;
+ } else if (! (wntsnn || wntsne || wntsnb || wntsnv) || (wntsne || wntsnb)
+ && ! (wantvl && wantvr)) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldvl < 1 || wantvl && *ldvl < *n) {
+ *info = -11;
+ } else if (*ldvr < 1 || wantvr && *ldvr < *n) {
+ *info = -13;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV. */
+/* HSWORK refers to the workspace preferred by DHSEQR, as */
+/* calculated below. HSWORK is computed assuming ILO=1 and IHI=N, */
+/* the worst case.) */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ } else {
+ maxwrk = *n + *n * ilaenv_(&c__1, "DGEHRD", " ", n, &c__1, n, &
+ c__0);
+
+ if (wantvl) {
+ dhseqr_("S", "V", n, &c__1, n, &a[a_offset], lda, &wr[1], &wi[
+ 1], &vl[vl_offset], ldvl, &work[1], &c_n1, info);
+ } else if (wantvr) {
+ dhseqr_("S", "V", n, &c__1, n, &a[a_offset], lda, &wr[1], &wi[
+ 1], &vr[vr_offset], ldvr, &work[1], &c_n1, info);
+ } else {
+ if (wntsnn) {
+ dhseqr_("E", "N", n, &c__1, n, &a[a_offset], lda, &wr[1],
+ &wi[1], &vr[vr_offset], ldvr, &work[1], &c_n1,
+ info);
+ } else {
+ dhseqr_("S", "N", n, &c__1, n, &a[a_offset], lda, &wr[1],
+ &wi[1], &vr[vr_offset], ldvr, &work[1], &c_n1,
+ info);
+ }
+ }
+ hswork = (integer) work[1];
+
+ if (! wantvl && ! wantvr) {
+ minwrk = *n << 1;
+ if (! wntsnn) {
+/* Computing MAX */
+ i__1 = minwrk, i__2 = *n * *n + *n * 6;
+ minwrk = max(i__1,i__2);
+ }
+ maxwrk = max(maxwrk,hswork);
+ if (! wntsnn) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * *n + *n * 6;
+ maxwrk = max(i__1,i__2);
+ }
+ } else {
+ minwrk = *n * 3;
+ if (! wntsnn && ! wntsne) {
+/* Computing MAX */
+ i__1 = minwrk, i__2 = *n * *n + *n * 6;
+ minwrk = max(i__1,i__2);
+ }
+ maxwrk = max(maxwrk,hswork);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + (*n - 1) * ilaenv_(&c__1, "DORGHR",
+ " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ if (! wntsnn && ! wntsne) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * *n + *n * 6;
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3;
+ maxwrk = max(i__1,i__2);
+ }
+ maxwrk = max(maxwrk,minwrk);
+ }
+ work[1] = (doublereal) maxwrk;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -21;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEEVX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("P");
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ icond = 0;
+ anrm = dlange_("M", n, n, &a[a_offset], lda, dum);
+ scalea = FALSE_;
+ if (anrm > 0. && anrm < smlnum) {
+ scalea = TRUE_;
+ cscale = smlnum;
+ } else if (anrm > bignum) {
+ scalea = TRUE_;
+ cscale = bignum;
+ }
+ if (scalea) {
+ dlascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Balance the matrix and compute ABNRM */
+
+ dgebal_(balanc, n, &a[a_offset], lda, ilo, ihi, &scale[1], &ierr);
+ *abnrm = dlange_("1", n, n, &a[a_offset], lda, dum);
+ if (scalea) {
+ dum[0] = *abnrm;
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, &c__1, &c__1, dum, &c__1, &
+ ierr);
+ *abnrm = dum[0];
+ }
+
+/* Reduce to upper Hessenberg form */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ itau = 1;
+ iwrk = itau + *n;
+ i__1 = *lwork - iwrk + 1;
+ dgehrd_(n, ilo, ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1, &
+ ierr);
+
+ if (wantvl) {
+
+/* Want left eigenvectors */
+/* Copy Householder vectors to VL */
+
+ *(unsigned char *)side = 'L';
+ dlacpy_("L", n, n, &a[a_offset], lda, &vl[vl_offset], ldvl)
+ ;
+
+/* Generate orthogonal matrix in VL */
+/* (Workspace: need 2*N-1, prefer N+(N-1)*NB) */
+
+ i__1 = *lwork - iwrk + 1;
+ dorghr_(n, ilo, ihi, &vl[vl_offset], ldvl, &work[itau], &work[iwrk], &
+ i__1, &ierr);
+
+/* Perform QR iteration, accumulating Schur vectors in VL */
+/* (Workspace: need 1, prefer HSWORK (see comments) ) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ dhseqr_("S", "V", n, ilo, ihi, &a[a_offset], lda, &wr[1], &wi[1], &vl[
+ vl_offset], ldvl, &work[iwrk], &i__1, info);
+
+ if (wantvr) {
+
+/* Want left and right eigenvectors */
+/* Copy Schur vectors to VR */
+
+ *(unsigned char *)side = 'B';
+ dlacpy_("F", n, n, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr);
+ }
+
+ } else if (wantvr) {
+
+/* Want right eigenvectors */
+/* Copy Householder vectors to VR */
+
+ *(unsigned char *)side = 'R';
+ dlacpy_("L", n, n, &a[a_offset], lda, &vr[vr_offset], ldvr)
+ ;
+
+/* Generate orthogonal matrix in VR */
+/* (Workspace: need 2*N-1, prefer N+(N-1)*NB) */
+
+ i__1 = *lwork - iwrk + 1;
+ dorghr_(n, ilo, ihi, &vr[vr_offset], ldvr, &work[itau], &work[iwrk], &
+ i__1, &ierr);
+
+/* Perform QR iteration, accumulating Schur vectors in VR */
+/* (Workspace: need 1, prefer HSWORK (see comments) ) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ dhseqr_("S", "V", n, ilo, ihi, &a[a_offset], lda, &wr[1], &wi[1], &vr[
+ vr_offset], ldvr, &work[iwrk], &i__1, info);
+
+ } else {
+
+/* Compute eigenvalues only */
+/* If condition numbers desired, compute Schur form */
+
+ if (wntsnn) {
+ *(unsigned char *)job = 'E';
+ } else {
+ *(unsigned char *)job = 'S';
+ }
+
+/* (Workspace: need 1, prefer HSWORK (see comments) ) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ dhseqr_(job, "N", n, ilo, ihi, &a[a_offset], lda, &wr[1], &wi[1], &vr[
+ vr_offset], ldvr, &work[iwrk], &i__1, info);
+ }
+
+/* If INFO > 0 from DHSEQR, then quit */
+
+ if (*info > 0) {
+ goto L50;
+ }
+
+ if (wantvl || wantvr) {
+
+/* Compute left and/or right eigenvectors */
+/* (Workspace: need 3*N) */
+
+ dtrevc_(side, "B", select, n, &a[a_offset], lda, &vl[vl_offset], ldvl,
+ &vr[vr_offset], ldvr, n, &nout, &work[iwrk], &ierr);
+ }
+
+/* Compute condition numbers if desired */
+/* (Workspace: need N*N+6*N unless SENSE = 'E') */
+
+ if (! wntsnn) {
+ dtrsna_(sense, "A", select, n, &a[a_offset], lda, &vl[vl_offset],
+ ldvl, &vr[vr_offset], ldvr, &rconde[1], &rcondv[1], n, &nout,
+ &work[iwrk], n, &iwork[1], &icond);
+ }
+
+ if (wantvl) {
+
+/* Undo balancing of left eigenvectors */
+
+ dgebak_(balanc, "L", n, ilo, ihi, &scale[1], n, &vl[vl_offset], ldvl,
+ &ierr);
+
+/* Normalize left eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (wi[i__] == 0.) {
+ scl = 1. / dnrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1);
+ dscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1);
+ } else if (wi[i__] > 0.) {
+ d__1 = dnrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1);
+ d__2 = dnrm2_(n, &vl[(i__ + 1) * vl_dim1 + 1], &c__1);
+ scl = 1. / dlapy2_(&d__1, &d__2);
+ dscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1);
+ dscal_(n, &scl, &vl[(i__ + 1) * vl_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+/* Computing 2nd power */
+ d__1 = vl[k + i__ * vl_dim1];
+/* Computing 2nd power */
+ d__2 = vl[k + (i__ + 1) * vl_dim1];
+ work[k] = d__1 * d__1 + d__2 * d__2;
+/* L10: */
+ }
+ k = idamax_(n, &work[1], &c__1);
+ dlartg_(&vl[k + i__ * vl_dim1], &vl[k + (i__ + 1) * vl_dim1],
+ &cs, &sn, &r__);
+ drot_(n, &vl[i__ * vl_dim1 + 1], &c__1, &vl[(i__ + 1) *
+ vl_dim1 + 1], &c__1, &cs, &sn);
+ vl[k + (i__ + 1) * vl_dim1] = 0.;
+ }
+/* L20: */
+ }
+ }
+
+ if (wantvr) {
+
+/* Undo balancing of right eigenvectors */
+
+ dgebak_(balanc, "R", n, ilo, ihi, &scale[1], n, &vr[vr_offset], ldvr,
+ &ierr);
+
+/* Normalize right eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (wi[i__] == 0.) {
+ scl = 1. / dnrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1);
+ dscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1);
+ } else if (wi[i__] > 0.) {
+ d__1 = dnrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1);
+ d__2 = dnrm2_(n, &vr[(i__ + 1) * vr_dim1 + 1], &c__1);
+ scl = 1. / dlapy2_(&d__1, &d__2);
+ dscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1);
+ dscal_(n, &scl, &vr[(i__ + 1) * vr_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+/* Computing 2nd power */
+ d__1 = vr[k + i__ * vr_dim1];
+/* Computing 2nd power */
+ d__2 = vr[k + (i__ + 1) * vr_dim1];
+ work[k] = d__1 * d__1 + d__2 * d__2;
+/* L30: */
+ }
+ k = idamax_(n, &work[1], &c__1);
+ dlartg_(&vr[k + i__ * vr_dim1], &vr[k + (i__ + 1) * vr_dim1],
+ &cs, &sn, &r__);
+ drot_(n, &vr[i__ * vr_dim1 + 1], &c__1, &vr[(i__ + 1) *
+ vr_dim1 + 1], &c__1, &cs, &sn);
+ vr[k + (i__ + 1) * vr_dim1] = 0.;
+ }
+/* L40: */
+ }
+ }
+
+/* Undo scaling if necessary */
+
+L50:
+ if (scalea) {
+ i__1 = *n - *info;
+/* Computing MAX */
+ i__3 = *n - *info;
+ i__2 = max(i__3,1);
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wr[*info +
+ 1], &i__2, &ierr);
+ i__1 = *n - *info;
+/* Computing MAX */
+ i__3 = *n - *info;
+ i__2 = max(i__3,1);
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[*info +
+ 1], &i__2, &ierr);
+ if (*info == 0) {
+ if ((wntsnv || wntsnb) && icond == 0) {
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, n, &c__1, &rcondv[
+ 1], n, &ierr);
+ }
+ } else {
+ i__1 = *ilo - 1;
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wr[1],
+ n, &ierr);
+ i__1 = *ilo - 1;
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[1],
+ n, &ierr);
+ }
+ }
+
+ work[1] = (doublereal) maxwrk;
+ return 0;
+
+/* End of DGEEVX */
+
+} /* dgeevx_ */
diff --git a/contrib/libs/clapack/dgegs.c b/contrib/libs/clapack/dgegs.c
new file mode 100644
index 0000000000..cb409fbe18
--- /dev/null
+++ b/contrib/libs/clapack/dgegs.c
@@ -0,0 +1,548 @@
+/* dgegs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static doublereal c_b36 = 0.;
+static doublereal c_b37 = 1.;
+
+/* Subroutine */ int dgegs_(char *jobvsl, char *jobvsr, integer *n,
+ doublereal *a, integer *lda, doublereal *b, integer *ldb, doublereal *
+ alphar, doublereal *alphai, doublereal *beta, doublereal *vsl,
+ integer *ldvsl, doublereal *vsr, integer *ldvsr, doublereal *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vsl_dim1, vsl_offset,
+ vsr_dim1, vsr_offset, i__1, i__2;
+
+ /* Local variables */
+ integer nb, nb1, nb2, nb3, ihi, ilo;
+ doublereal eps, anrm, bnrm;
+ integer itau, lopt;
+ extern logical lsame_(char *, char *);
+ integer ileft, iinfo, icols;
+ logical ilvsl;
+ integer iwork;
+ logical ilvsr;
+ integer irows;
+ extern /* Subroutine */ int dggbak_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, integer *), dggbal_(char *, integer *,
+ doublereal *, integer *, doublereal *, integer *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *);
+ extern doublereal dlamch_(char *), dlange_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *);
+ extern /* Subroutine */ int dgghrd_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *, integer *), dlascl_(char *, integer *, integer *, doublereal
+ *, doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *);
+ logical ilascl, ilbscl;
+ extern /* Subroutine */ int dgeqrf_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *),
+ dlacpy_(char *, integer *, integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ doublereal safmin;
+ extern /* Subroutine */ int dlaset_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ doublereal bignum;
+ extern /* Subroutine */ int dhgeqz_(char *, char *, char *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ integer *);
+ integer ijobvl, iright, ijobvr;
+ extern /* Subroutine */ int dorgqr_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *);
+ doublereal anrmto;
+ integer lwkmin;
+ doublereal bnrmto;
+ extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ doublereal smlnum;
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is deprecated and has been replaced by routine DGGES. */
+
+/* DGEGS computes the eigenvalues, real Schur form, and, optionally, */
+/* left and or/right Schur vectors of a real matrix pair (A,B). */
+/* Given two square matrices A and B, the generalized real Schur */
+/* factorization has the form */
+
+/* A = Q*S*Z**T, B = Q*T*Z**T */
+
+/* where Q and Z are orthogonal matrices, T is upper triangular, and S */
+/* is an upper quasi-triangular matrix with 1-by-1 and 2-by-2 diagonal */
+/* blocks, the 2-by-2 blocks corresponding to complex conjugate pairs */
+/* of eigenvalues of (A,B). The columns of Q are the left Schur vectors */
+/* and the columns of Z are the right Schur vectors. */
+
+/* If only the eigenvalues of (A,B) are needed, the driver routine */
+/* DGEGV should be used instead. See DGEGV for a description of the */
+/* eigenvalues of the generalized nonsymmetric eigenvalue problem */
+/* (GNEP). */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVSL (input) CHARACTER*1 */
+/* = 'N': do not compute the left Schur vectors; */
+/* = 'V': compute the left Schur vectors (returned in VSL). */
+
+/* JOBVSR (input) CHARACTER*1 */
+/* = 'N': do not compute the right Schur vectors; */
+/* = 'V': compute the right Schur vectors (returned in VSR). */
+
+/* N (input) INTEGER */
+/* The order of the matrices A, B, VSL, and VSR. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA, N) */
+/* On entry, the matrix A. */
+/* On exit, the upper quasi-triangular matrix S from the */
+/* generalized real Schur factorization. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. LDA >= max(1,N). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB, N) */
+/* On entry, the matrix B. */
+/* On exit, the upper triangular matrix T from the generalized */
+/* real Schur factorization. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB >= max(1,N). */
+
+/* ALPHAR (output) DOUBLE PRECISION array, dimension (N) */
+/* The real parts of each scalar alpha defining an eigenvalue */
+/* of GNEP. */
+
+/* ALPHAI (output) DOUBLE PRECISION array, dimension (N) */
+/* The imaginary parts of each scalar alpha defining an */
+/* eigenvalue of GNEP. If ALPHAI(j) is zero, then the j-th */
+/* eigenvalue is real; if positive, then the j-th and (j+1)-st */
+/* eigenvalues are a complex conjugate pair, with */
+/* ALPHAI(j+1) = -ALPHAI(j). */
+
+/* BETA (output) DOUBLE PRECISION array, dimension (N) */
+/* The scalars beta that define the eigenvalues of GNEP. */
+/* Together, the quantities alpha = (ALPHAR(j),ALPHAI(j)) and */
+/* beta = BETA(j) represent the j-th eigenvalue of the matrix */
+/* pair (A,B), in one of the forms lambda = alpha/beta or */
+/* mu = beta/alpha. Since either lambda or mu may overflow, */
+/* they should not, in general, be computed. */
+
+/* VSL (output) DOUBLE PRECISION array, dimension (LDVSL,N) */
+/* If JOBVSL = 'V', the matrix of left Schur vectors Q. */
+/* Not referenced if JOBVSL = 'N'. */
+
+/* LDVSL (input) INTEGER */
+/* The leading dimension of the matrix VSL. LDVSL >=1, and */
+/* if JOBVSL = 'V', LDVSL >= N. */
+
+/* VSR (output) DOUBLE PRECISION array, dimension (LDVSR,N) */
+/* If JOBVSR = 'V', the matrix of right Schur vectors Z. */
+/* Not referenced if JOBVSR = 'N'. */
+
+/* LDVSR (input) INTEGER */
+/* The leading dimension of the matrix VSR. LDVSR >= 1, and */
+/* if JOBVSR = 'V', LDVSR >= N. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,4*N). */
+/* For good performance, LWORK must generally be larger. */
+/* To compute the optimal value of LWORK, call ILAENV to get */
+/* blocksizes (for DGEQRF, DORMQR, and DORGQR.) Then compute: */
+/* NB -- MAX of the blocksizes for DGEQRF, DORMQR, and DORGQR */
+/* The optimal LWORK is 2*N + N*(NB+1). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1,...,N: */
+/* The QZ iteration failed. (A,B) are not in Schur */
+/* form, but ALPHAR(j), ALPHAI(j), and BETA(j) should */
+/* be correct for j=INFO+1,...,N. */
+/* > N: errors that usually indicate LAPACK problems: */
+/* =N+1: error return from DGGBAL */
+/* =N+2: error return from DGEQRF */
+/* =N+3: error return from DORMQR */
+/* =N+4: error return from DORGQR */
+/* =N+5: error return from DGGHRD */
+/* =N+6: error return from DHGEQZ (other than failed */
+/* iteration) */
+/* =N+7: error return from DGGBAK (computing VSL) */
+/* =N+8: error return from DGGBAK (computing VSR) */
+/* =N+9: error return from DLASCL (various places) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alphar;
+ --alphai;
+ --beta;
+ vsl_dim1 = *ldvsl;
+ vsl_offset = 1 + vsl_dim1;
+ vsl -= vsl_offset;
+ vsr_dim1 = *ldvsr;
+ vsr_offset = 1 + vsr_dim1;
+ vsr -= vsr_offset;
+ --work;
+
+ /* Function Body */
+ if (lsame_(jobvsl, "N")) {
+ ijobvl = 1;
+ ilvsl = FALSE_;
+ } else if (lsame_(jobvsl, "V")) {
+ ijobvl = 2;
+ ilvsl = TRUE_;
+ } else {
+ ijobvl = -1;
+ ilvsl = FALSE_;
+ }
+
+ if (lsame_(jobvsr, "N")) {
+ ijobvr = 1;
+ ilvsr = FALSE_;
+ } else if (lsame_(jobvsr, "V")) {
+ ijobvr = 2;
+ ilvsr = TRUE_;
+ } else {
+ ijobvr = -1;
+ ilvsr = FALSE_;
+ }
+
+/* Test the input arguments */
+
+/* Computing MAX */
+ i__1 = *n << 2;
+ lwkmin = max(i__1,1);
+ lwkopt = lwkmin;
+ work[1] = (doublereal) lwkopt;
+ lquery = *lwork == -1;
+ *info = 0;
+ if (ijobvl <= 0) {
+ *info = -1;
+ } else if (ijobvr <= 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ } else if (*ldvsl < 1 || ilvsl && *ldvsl < *n) {
+ *info = -12;
+ } else if (*ldvsr < 1 || ilvsr && *ldvsr < *n) {
+ *info = -14;
+ } else if (*lwork < lwkmin && ! lquery) {
+ *info = -16;
+ }
+
+ if (*info == 0) {
+ nb1 = ilaenv_(&c__1, "DGEQRF", " ", n, n, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "DORMQR", " ", n, n, n, &c_n1);
+ nb3 = ilaenv_(&c__1, "DORGQR", " ", n, n, n, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2);
+ nb = max(i__1,nb3);
+ lopt = (*n << 1) + *n * (nb + 1);
+ work[1] = (doublereal) lopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEGS ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("E") * dlamch_("B");
+ safmin = dlamch_("S");
+ smlnum = *n * safmin / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = dlange_("M", n, n, &a[a_offset], lda, &work[1]);
+ ilascl = FALSE_;
+ if (anrm > 0. && anrm < smlnum) {
+ anrmto = smlnum;
+ ilascl = TRUE_;
+ } else if (anrm > bignum) {
+ anrmto = bignum;
+ ilascl = TRUE_;
+ }
+
+ if (ilascl) {
+ dlascl_("G", &c_n1, &c_n1, &anrm, &anrmto, n, n, &a[a_offset], lda, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ }
+
+/* Scale B if max element outside range [SMLNUM,BIGNUM] */
+
+ bnrm = dlange_("M", n, n, &b[b_offset], ldb, &work[1]);
+ ilbscl = FALSE_;
+ if (bnrm > 0. && bnrm < smlnum) {
+ bnrmto = smlnum;
+ ilbscl = TRUE_;
+ } else if (bnrm > bignum) {
+ bnrmto = bignum;
+ ilbscl = TRUE_;
+ }
+
+ if (ilbscl) {
+ dlascl_("G", &c_n1, &c_n1, &bnrm, &bnrmto, n, n, &b[b_offset], ldb, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ }
+
+/* Permute the matrix to make it more nearly triangular */
+/* Workspace layout: (2*N words -- "work..." not actually used) */
+/* left_permutation, right_permutation, work... */
+
+ ileft = 1;
+ iright = *n + 1;
+ iwork = iright + *n;
+ dggbal_("P", n, &a[a_offset], lda, &b[b_offset], ldb, &ilo, &ihi, &work[
+ ileft], &work[iright], &work[iwork], &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 1;
+ goto L10;
+ }
+
+/* Reduce B to triangular form, and initialize VSL and/or VSR */
+/* Workspace layout: ("work..." must have at least N words) */
+/* left_permutation, right_permutation, tau, work... */
+
+ irows = ihi + 1 - ilo;
+ icols = *n + 1 - ilo;
+ itau = iwork;
+ iwork = itau + irows;
+ i__1 = *lwork + 1 - iwork;
+ dgeqrf_(&irows, &icols, &b[ilo + ilo * b_dim1], ldb, &work[itau], &work[
+ iwork], &i__1, &iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = (integer) work[iwork] + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ *info = *n + 2;
+ goto L10;
+ }
+
+ i__1 = *lwork + 1 - iwork;
+ dormqr_("L", "T", &irows, &icols, &irows, &b[ilo + ilo * b_dim1], ldb, &
+ work[itau], &a[ilo + ilo * a_dim1], lda, &work[iwork], &i__1, &
+ iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = (integer) work[iwork] + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ *info = *n + 3;
+ goto L10;
+ }
+
+ if (ilvsl) {
+ dlaset_("Full", n, n, &c_b36, &c_b37, &vsl[vsl_offset], ldvsl);
+ i__1 = irows - 1;
+ i__2 = irows - 1;
+ dlacpy_("L", &i__1, &i__2, &b[ilo + 1 + ilo * b_dim1], ldb, &vsl[ilo
+ + 1 + ilo * vsl_dim1], ldvsl);
+ i__1 = *lwork + 1 - iwork;
+ dorgqr_(&irows, &irows, &irows, &vsl[ilo + ilo * vsl_dim1], ldvsl, &
+ work[itau], &work[iwork], &i__1, &iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = (integer) work[iwork] + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ *info = *n + 4;
+ goto L10;
+ }
+ }
+
+ if (ilvsr) {
+ dlaset_("Full", n, n, &c_b36, &c_b37, &vsr[vsr_offset], ldvsr);
+ }
+
+/* Reduce to generalized Hessenberg form */
+
+ dgghrd_(jobvsl, jobvsr, n, &ilo, &ihi, &a[a_offset], lda, &b[b_offset],
+ ldb, &vsl[vsl_offset], ldvsl, &vsr[vsr_offset], ldvsr, &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 5;
+ goto L10;
+ }
+
+/* Perform QZ algorithm, computing Schur vectors if desired */
+/* Workspace layout: ("work..." must have at least 1 word) */
+/* left_permutation, right_permutation, work... */
+
+ iwork = itau;
+ i__1 = *lwork + 1 - iwork;
+ dhgeqz_("S", jobvsl, jobvsr, n, &ilo, &ihi, &a[a_offset], lda, &b[
+ b_offset], ldb, &alphar[1], &alphai[1], &beta[1], &vsl[vsl_offset]
+, ldvsl, &vsr[vsr_offset], ldvsr, &work[iwork], &i__1, &iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = (integer) work[iwork] + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ if (iinfo > 0 && iinfo <= *n) {
+ *info = iinfo;
+ } else if (iinfo > *n && iinfo <= *n << 1) {
+ *info = iinfo - *n;
+ } else {
+ *info = *n + 6;
+ }
+ goto L10;
+ }
+
+/* Apply permutation to VSL and VSR */
+
+ if (ilvsl) {
+ dggbak_("P", "L", n, &ilo, &ihi, &work[ileft], &work[iright], n, &vsl[
+ vsl_offset], ldvsl, &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 7;
+ goto L10;
+ }
+ }
+ if (ilvsr) {
+ dggbak_("P", "R", n, &ilo, &ihi, &work[ileft], &work[iright], n, &vsr[
+ vsr_offset], ldvsr, &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 8;
+ goto L10;
+ }
+ }
+
+/* Undo scaling */
+
+ if (ilascl) {
+ dlascl_("H", &c_n1, &c_n1, &anrmto, &anrm, n, n, &a[a_offset], lda, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ dlascl_("G", &c_n1, &c_n1, &anrmto, &anrm, n, &c__1, &alphar[1], n, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ dlascl_("G", &c_n1, &c_n1, &anrmto, &anrm, n, &c__1, &alphai[1], n, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ }
+
+ if (ilbscl) {
+ dlascl_("U", &c_n1, &c_n1, &bnrmto, &bnrm, n, n, &b[b_offset], ldb, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ dlascl_("G", &c_n1, &c_n1, &bnrmto, &bnrm, n, &c__1, &beta[1], n, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ }
+
+L10:
+ work[1] = (doublereal) lwkopt;
+
+ return 0;
+
+/* End of DGEGS */
+
+} /* dgegs_ */
diff --git a/contrib/libs/clapack/dgegv.c b/contrib/libs/clapack/dgegv.c
new file mode 100644
index 0000000000..8551895e4c
--- /dev/null
+++ b/contrib/libs/clapack/dgegv.c
@@ -0,0 +1,842 @@
+/* dgegv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static doublereal c_b27 = 1.;
+static doublereal c_b38 = 0.;
+
+/* Subroutine */ int dgegv_(char *jobvl, char *jobvr, integer *n, doublereal *
+ a, integer *lda, doublereal *b, integer *ldb, doublereal *alphar,
+ doublereal *alphai, doublereal *beta, doublereal *vl, integer *ldvl,
+ doublereal *vr, integer *ldvr, doublereal *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vl_dim1, vl_offset, vr_dim1,
+ vr_offset, i__1, i__2;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Local variables */
+ integer jc, nb, in, jr, nb1, nb2, nb3, ihi, ilo;
+ doublereal eps;
+ logical ilv;
+ doublereal absb, anrm, bnrm;
+ integer itau;
+ doublereal temp;
+ logical ilvl, ilvr;
+ integer lopt;
+ doublereal anrm1, anrm2, bnrm1, bnrm2, absai, scale, absar, sbeta;
+ extern logical lsame_(char *, char *);
+ integer ileft, iinfo, icols, iwork, irows;
+ extern /* Subroutine */ int dggbak_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, integer *), dggbal_(char *, integer *,
+ doublereal *, integer *, doublereal *, integer *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *);
+ extern doublereal dlamch_(char *), dlange_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *);
+ doublereal salfai;
+ extern /* Subroutine */ int dgghrd_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *, integer *), dlascl_(char *, integer *, integer *, doublereal
+ *, doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *);
+ doublereal salfar;
+ extern /* Subroutine */ int dgeqrf_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *),
+ dlacpy_(char *, integer *, integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ doublereal safmin;
+ extern /* Subroutine */ int dlaset_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *);
+ doublereal safmax;
+ char chtemp[1];
+ logical ldumma[1];
+ extern /* Subroutine */ int dhgeqz_(char *, char *, char *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ integer *), dtgevc_(char *, char *,
+ logical *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ integer *, integer *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ integer ijobvl, iright;
+ logical ilimit;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ijobvr;
+ extern /* Subroutine */ int dorgqr_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *);
+ doublereal onepls;
+ integer lwkmin;
+ extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is deprecated and has been replaced by routine DGGEV. */
+
+/* DGEGV computes the eigenvalues and, optionally, the left and/or right */
+/* eigenvectors of a real matrix pair (A,B). */
+/* Given two square matrices A and B, */
+/* the generalized nonsymmetric eigenvalue problem (GNEP) is to find the */
+/* eigenvalues lambda and corresponding (non-zero) eigenvectors x such */
+/* that */
+
+/* A*x = lambda*B*x. */
+
+/* An alternate form is to find the eigenvalues mu and corresponding */
+/* eigenvectors y such that */
+
+/* mu*A*y = B*y. */
+
+/* These two forms are equivalent with mu = 1/lambda and x = y if */
+/* neither lambda nor mu is zero. In order to deal with the case that */
+/* lambda or mu is zero or small, two values alpha and beta are returned */
+/* for each eigenvalue, such that lambda = alpha/beta and */
+/* mu = beta/alpha. */
+
+/* The vectors x and y in the above equations are right eigenvectors of */
+/* the matrix pair (A,B). Vectors u and v satisfying */
+
+/* u**H*A = lambda*u**H*B or mu*v**H*A = v**H*B */
+
+/* are left eigenvectors of (A,B). */
+
+/* Note: this routine performs "full balancing" on A and B -- see */
+/* "Further Details", below. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVL (input) CHARACTER*1 */
+/* = 'N': do not compute the left generalized eigenvectors; */
+/* = 'V': compute the left generalized eigenvectors (returned */
+/* in VL). */
+
+/* JOBVR (input) CHARACTER*1 */
+/* = 'N': do not compute the right generalized eigenvectors; */
+/* = 'V': compute the right generalized eigenvectors (returned */
+/* in VR). */
+
+/* N (input) INTEGER */
+/* The order of the matrices A, B, VL, and VR. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA, N) */
+/* On entry, the matrix A. */
+/* If JOBVL = 'V' or JOBVR = 'V', then on exit A */
+/* contains the real Schur form of A from the generalized Schur */
+/* factorization of the pair (A,B) after balancing. */
+/* If no eigenvectors were computed, then only the diagonal */
+/* blocks from the Schur form will be correct. See DGGHRD and */
+/* DHGEQZ for details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. LDA >= max(1,N). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB, N) */
+/* On entry, the matrix B. */
+/* If JOBVL = 'V' or JOBVR = 'V', then on exit B contains the */
+/* upper triangular matrix obtained from B in the generalized */
+/* Schur factorization of the pair (A,B) after balancing. */
+/* If no eigenvectors were computed, then only those elements of */
+/* B corresponding to the diagonal blocks from the Schur form of */
+/* A will be correct. See DGGHRD and DHGEQZ for details. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB >= max(1,N). */
+
+/* ALPHAR (output) DOUBLE PRECISION array, dimension (N) */
+/* The real parts of each scalar alpha defining an eigenvalue of */
+/* GNEP. */
+
+/* ALPHAI (output) DOUBLE PRECISION array, dimension (N) */
+/* The imaginary parts of each scalar alpha defining an */
+/* eigenvalue of GNEP. If ALPHAI(j) is zero, then the j-th */
+/* eigenvalue is real; if positive, then the j-th and */
+/* (j+1)-st eigenvalues are a complex conjugate pair, with */
+/* ALPHAI(j+1) = -ALPHAI(j). */
+
+/* BETA (output) DOUBLE PRECISION array, dimension (N) */
+/* The scalars beta that define the eigenvalues of GNEP. */
+
+/* Together, the quantities alpha = (ALPHAR(j),ALPHAI(j)) and */
+/* beta = BETA(j) represent the j-th eigenvalue of the matrix */
+/* pair (A,B), in one of the forms lambda = alpha/beta or */
+/* mu = beta/alpha. Since either lambda or mu may overflow, */
+/* they should not, in general, be computed. */
+
+/* VL (output) DOUBLE PRECISION array, dimension (LDVL,N) */
+/* If JOBVL = 'V', the left eigenvectors u(j) are stored */
+/* in the columns of VL, in the same order as their eigenvalues. */
+/* If the j-th eigenvalue is real, then u(j) = VL(:,j). */
+/* If the j-th and (j+1)-st eigenvalues form a complex conjugate */
+/* pair, then */
+/* u(j) = VL(:,j) + i*VL(:,j+1) */
+/* and */
+/* u(j+1) = VL(:,j) - i*VL(:,j+1). */
+
+/* Each eigenvector is scaled so that its largest component has */
+/* abs(real part) + abs(imag. part) = 1, except for eigenvectors */
+/* corresponding to an eigenvalue with alpha = beta = 0, which */
+/* are set to zero. */
+/* Not referenced if JOBVL = 'N'. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the matrix VL. LDVL >= 1, and */
+/* if JOBVL = 'V', LDVL >= N. */
+
+/* VR (output) DOUBLE PRECISION array, dimension (LDVR,N) */
+/* If JOBVR = 'V', the right eigenvectors x(j) are stored */
+/* in the columns of VR, in the same order as their eigenvalues. */
+/* If the j-th eigenvalue is real, then x(j) = VR(:,j). */
+/* If the j-th and (j+1)-st eigenvalues form a complex conjugate */
+/* pair, then */
+/* x(j) = VR(:,j) + i*VR(:,j+1) */
+/* and */
+/* x(j+1) = VR(:,j) - i*VR(:,j+1). */
+
+/* Each eigenvector is scaled so that its largest component has */
+/* abs(real part) + abs(imag. part) = 1, except for eigenvalues */
+/* corresponding to an eigenvalue with alpha = beta = 0, which */
+/* are set to zero. */
+/* Not referenced if JOBVR = 'N'. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the matrix VR. LDVR >= 1, and */
+/* if JOBVR = 'V', LDVR >= N. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,8*N). */
+/* For good performance, LWORK must generally be larger. */
+/* To compute the optimal value of LWORK, call ILAENV to get */
+/* blocksizes (for DGEQRF, DORMQR, and DORGQR.) Then compute: */
+/* NB -- MAX of the blocksizes for DGEQRF, DORMQR, and DORGQR; */
+/* The optimal LWORK is: */
+/* 2*N + MAX( 6*N, N*(NB+1) ). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1,...,N: */
+/* The QZ iteration failed. No eigenvectors have been */
+/* calculated, but ALPHAR(j), ALPHAI(j), and BETA(j) */
+/* should be correct for j=INFO+1,...,N. */
+/* > N: errors that usually indicate LAPACK problems: */
+/* =N+1: error return from DGGBAL */
+/* =N+2: error return from DGEQRF */
+/* =N+3: error return from DORMQR */
+/* =N+4: error return from DORGQR */
+/* =N+5: error return from DGGHRD */
+/* =N+6: error return from DHGEQZ (other than failed */
+/* iteration) */
+/* =N+7: error return from DTGEVC */
+/* =N+8: error return from DGGBAK (computing VL) */
+/* =N+9: error return from DGGBAK (computing VR) */
+/* =N+10: error return from DLASCL (various calls) */
+
+/* Further Details */
+/* =============== */
+
+/* Balancing */
+/* --------- */
+
+/* This driver calls DGGBAL to both permute and scale rows and columns */
+/* of A and B. The permutations PL and PR are chosen so that PL*A*PR */
+/* and PL*B*R will be upper triangular except for the diagonal blocks */
+/* A(i:j,i:j) and B(i:j,i:j), with i and j as close together as */
+/* possible. The diagonal scaling matrices DL and DR are chosen so */
+/* that the pair DL*PL*A*PR*DR, DL*PL*B*PR*DR have elements close to */
+/* one (except for the elements that start out zero.) */
+
+/* After the eigenvalues and eigenvectors of the balanced matrices */
+/* have been computed, DGGBAK transforms the eigenvectors back to what */
+/* they would have been (in perfect arithmetic) if they had not been */
+/* balanced. */
+
+/* Contents of A and B on Exit */
+/* -------- -- - --- - -- ---- */
+
+/* If any eigenvectors are computed (either JOBVL='V' or JOBVR='V' or */
+/* both), then on exit the arrays A and B will contain the real Schur */
+/* form[*] of the "balanced" versions of A and B. If no eigenvectors */
+/* are computed, then only the diagonal blocks will be correct. */
+
+/* [*] See DHGEQZ, DGEGS, or read the book "Matrix Computations", */
+/* by Golub & van Loan, pub. by Johns Hopkins U. Press. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alphar;
+ --alphai;
+ --beta;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+
+ /* Function Body */
+ if (lsame_(jobvl, "N")) {
+ ijobvl = 1;
+ ilvl = FALSE_;
+ } else if (lsame_(jobvl, "V")) {
+ ijobvl = 2;
+ ilvl = TRUE_;
+ } else {
+ ijobvl = -1;
+ ilvl = FALSE_;
+ }
+
+ if (lsame_(jobvr, "N")) {
+ ijobvr = 1;
+ ilvr = FALSE_;
+ } else if (lsame_(jobvr, "V")) {
+ ijobvr = 2;
+ ilvr = TRUE_;
+ } else {
+ ijobvr = -1;
+ ilvr = FALSE_;
+ }
+ ilv = ilvl || ilvr;
+
+/* Test the input arguments */
+
+/* Computing MAX */
+ i__1 = *n << 3;
+ lwkmin = max(i__1,1);
+ lwkopt = lwkmin;
+ work[1] = (doublereal) lwkopt;
+ lquery = *lwork == -1;
+ *info = 0;
+ if (ijobvl <= 0) {
+ *info = -1;
+ } else if (ijobvr <= 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ } else if (*ldvl < 1 || ilvl && *ldvl < *n) {
+ *info = -12;
+ } else if (*ldvr < 1 || ilvr && *ldvr < *n) {
+ *info = -14;
+ } else if (*lwork < lwkmin && ! lquery) {
+ *info = -16;
+ }
+
+ if (*info == 0) {
+ nb1 = ilaenv_(&c__1, "DGEQRF", " ", n, n, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "DORMQR", " ", n, n, n, &c_n1);
+ nb3 = ilaenv_(&c__1, "DORGQR", " ", n, n, n, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2);
+ nb = max(i__1,nb3);
+/* Computing MAX */
+ i__1 = *n * 6, i__2 = *n * (nb + 1);
+ lopt = (*n << 1) + max(i__1,i__2);
+ work[1] = (doublereal) lopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEGV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("E") * dlamch_("B");
+ safmin = dlamch_("S");
+ safmin += safmin;
+ safmax = 1. / safmin;
+ onepls = eps * 4 + 1.;
+
+/* Scale A */
+
+ anrm = dlange_("M", n, n, &a[a_offset], lda, &work[1]);
+ anrm1 = anrm;
+ anrm2 = 1.;
+ if (anrm < 1.) {
+ if (safmax * anrm < 1.) {
+ anrm1 = safmin;
+ anrm2 = safmax * anrm;
+ }
+ }
+
+ if (anrm > 0.) {
+ dlascl_("G", &c_n1, &c_n1, &anrm, &c_b27, n, n, &a[a_offset], lda, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 10;
+ return 0;
+ }
+ }
+
+/* Scale B */
+
+ bnrm = dlange_("M", n, n, &b[b_offset], ldb, &work[1]);
+ bnrm1 = bnrm;
+ bnrm2 = 1.;
+ if (bnrm < 1.) {
+ if (safmax * bnrm < 1.) {
+ bnrm1 = safmin;
+ bnrm2 = safmax * bnrm;
+ }
+ }
+
+ if (bnrm > 0.) {
+ dlascl_("G", &c_n1, &c_n1, &bnrm, &c_b27, n, n, &b[b_offset], ldb, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 10;
+ return 0;
+ }
+ }
+
+/* Permute the matrix to make it more nearly triangular */
+/* Workspace layout: (8*N words -- "work" requires 6*N words) */
+/* left_permutation, right_permutation, work... */
+
+ ileft = 1;
+ iright = *n + 1;
+ iwork = iright + *n;
+ dggbal_("P", n, &a[a_offset], lda, &b[b_offset], ldb, &ilo, &ihi, &work[
+ ileft], &work[iright], &work[iwork], &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 1;
+ goto L120;
+ }
+
+/* Reduce B to triangular form, and initialize VL and/or VR */
+/* Workspace layout: ("work..." must have at least N words) */
+/* left_permutation, right_permutation, tau, work... */
+
+ irows = ihi + 1 - ilo;
+ if (ilv) {
+ icols = *n + 1 - ilo;
+ } else {
+ icols = irows;
+ }
+ itau = iwork;
+ iwork = itau + irows;
+ i__1 = *lwork + 1 - iwork;
+ dgeqrf_(&irows, &icols, &b[ilo + ilo * b_dim1], ldb, &work[itau], &work[
+ iwork], &i__1, &iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = (integer) work[iwork] + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ *info = *n + 2;
+ goto L120;
+ }
+
+ i__1 = *lwork + 1 - iwork;
+ dormqr_("L", "T", &irows, &icols, &irows, &b[ilo + ilo * b_dim1], ldb, &
+ work[itau], &a[ilo + ilo * a_dim1], lda, &work[iwork], &i__1, &
+ iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = (integer) work[iwork] + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ *info = *n + 3;
+ goto L120;
+ }
+
+ if (ilvl) {
+ dlaset_("Full", n, n, &c_b38, &c_b27, &vl[vl_offset], ldvl)
+ ;
+ i__1 = irows - 1;
+ i__2 = irows - 1;
+ dlacpy_("L", &i__1, &i__2, &b[ilo + 1 + ilo * b_dim1], ldb, &vl[ilo +
+ 1 + ilo * vl_dim1], ldvl);
+ i__1 = *lwork + 1 - iwork;
+ dorgqr_(&irows, &irows, &irows, &vl[ilo + ilo * vl_dim1], ldvl, &work[
+ itau], &work[iwork], &i__1, &iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = (integer) work[iwork] + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ *info = *n + 4;
+ goto L120;
+ }
+ }
+
+ if (ilvr) {
+ dlaset_("Full", n, n, &c_b38, &c_b27, &vr[vr_offset], ldvr)
+ ;
+ }
+
+/* Reduce to generalized Hessenberg form */
+
+ if (ilv) {
+
+/* Eigenvectors requested -- work on whole matrix. */
+
+ dgghrd_(jobvl, jobvr, n, &ilo, &ihi, &a[a_offset], lda, &b[b_offset],
+ ldb, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, &iinfo);
+ } else {
+ dgghrd_("N", "N", &irows, &c__1, &irows, &a[ilo + ilo * a_dim1], lda,
+ &b[ilo + ilo * b_dim1], ldb, &vl[vl_offset], ldvl, &vr[
+ vr_offset], ldvr, &iinfo);
+ }
+ if (iinfo != 0) {
+ *info = *n + 5;
+ goto L120;
+ }
+
+/* Perform QZ algorithm */
+/* Workspace layout: ("work..." must have at least 1 word) */
+/* left_permutation, right_permutation, work... */
+
+ iwork = itau;
+ if (ilv) {
+ *(unsigned char *)chtemp = 'S';
+ } else {
+ *(unsigned char *)chtemp = 'E';
+ }
+ i__1 = *lwork + 1 - iwork;
+ dhgeqz_(chtemp, jobvl, jobvr, n, &ilo, &ihi, &a[a_offset], lda, &b[
+ b_offset], ldb, &alphar[1], &alphai[1], &beta[1], &vl[vl_offset],
+ ldvl, &vr[vr_offset], ldvr, &work[iwork], &i__1, &iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = (integer) work[iwork] + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ if (iinfo > 0 && iinfo <= *n) {
+ *info = iinfo;
+ } else if (iinfo > *n && iinfo <= *n << 1) {
+ *info = iinfo - *n;
+ } else {
+ *info = *n + 6;
+ }
+ goto L120;
+ }
+
+ if (ilv) {
+
+/* Compute Eigenvectors (DTGEVC requires 6*N words of workspace) */
+
+ if (ilvl) {
+ if (ilvr) {
+ *(unsigned char *)chtemp = 'B';
+ } else {
+ *(unsigned char *)chtemp = 'L';
+ }
+ } else {
+ *(unsigned char *)chtemp = 'R';
+ }
+
+ dtgevc_(chtemp, "B", ldumma, n, &a[a_offset], lda, &b[b_offset], ldb,
+ &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, n, &in, &work[
+ iwork], &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 7;
+ goto L120;
+ }
+
+/* Undo balancing on VL and VR, rescale */
+
+ if (ilvl) {
+ dggbak_("P", "L", n, &ilo, &ihi, &work[ileft], &work[iright], n, &
+ vl[vl_offset], ldvl, &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 8;
+ goto L120;
+ }
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ if (alphai[jc] < 0.) {
+ goto L50;
+ }
+ temp = 0.;
+ if (alphai[jc] == 0.) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ d__2 = temp, d__3 = (d__1 = vl[jr + jc * vl_dim1],
+ abs(d__1));
+ temp = max(d__2,d__3);
+/* L10: */
+ }
+ } else {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ d__3 = temp, d__4 = (d__1 = vl[jr + jc * vl_dim1],
+ abs(d__1)) + (d__2 = vl[jr + (jc + 1) *
+ vl_dim1], abs(d__2));
+ temp = max(d__3,d__4);
+/* L20: */
+ }
+ }
+ if (temp < safmin) {
+ goto L50;
+ }
+ temp = 1. / temp;
+ if (alphai[jc] == 0.) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vl[jr + jc * vl_dim1] *= temp;
+/* L30: */
+ }
+ } else {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vl[jr + jc * vl_dim1] *= temp;
+ vl[jr + (jc + 1) * vl_dim1] *= temp;
+/* L40: */
+ }
+ }
+L50:
+ ;
+ }
+ }
+ if (ilvr) {
+ dggbak_("P", "R", n, &ilo, &ihi, &work[ileft], &work[iright], n, &
+ vr[vr_offset], ldvr, &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ goto L120;
+ }
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ if (alphai[jc] < 0.) {
+ goto L100;
+ }
+ temp = 0.;
+ if (alphai[jc] == 0.) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ d__2 = temp, d__3 = (d__1 = vr[jr + jc * vr_dim1],
+ abs(d__1));
+ temp = max(d__2,d__3);
+/* L60: */
+ }
+ } else {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ d__3 = temp, d__4 = (d__1 = vr[jr + jc * vr_dim1],
+ abs(d__1)) + (d__2 = vr[jr + (jc + 1) *
+ vr_dim1], abs(d__2));
+ temp = max(d__3,d__4);
+/* L70: */
+ }
+ }
+ if (temp < safmin) {
+ goto L100;
+ }
+ temp = 1. / temp;
+ if (alphai[jc] == 0.) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vr[jr + jc * vr_dim1] *= temp;
+/* L80: */
+ }
+ } else {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vr[jr + jc * vr_dim1] *= temp;
+ vr[jr + (jc + 1) * vr_dim1] *= temp;
+/* L90: */
+ }
+ }
+L100:
+ ;
+ }
+ }
+
+/* End of eigenvector calculation */
+
+ }
+
+/* Undo scaling in alpha, beta */
+
+/* Note: this does not give the alpha and beta for the unscaled */
+/* problem. */
+
+/* Un-scaling is limited to avoid underflow in alpha and beta */
+/* if they are significant. */
+
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ absar = (d__1 = alphar[jc], abs(d__1));
+ absai = (d__1 = alphai[jc], abs(d__1));
+ absb = (d__1 = beta[jc], abs(d__1));
+ salfar = anrm * alphar[jc];
+ salfai = anrm * alphai[jc];
+ sbeta = bnrm * beta[jc];
+ ilimit = FALSE_;
+ scale = 1.;
+
+/* Check for significant underflow in ALPHAI */
+
+/* Computing MAX */
+ d__1 = safmin, d__2 = eps * absar, d__1 = max(d__1,d__2), d__2 = eps *
+ absb;
+ if (abs(salfai) < safmin && absai >= max(d__1,d__2)) {
+ ilimit = TRUE_;
+/* Computing MAX */
+ d__1 = onepls * safmin, d__2 = anrm2 * absai;
+ scale = onepls * safmin / anrm1 / max(d__1,d__2);
+
+ } else if (salfai == 0.) {
+
+/* If insignificant underflow in ALPHAI, then make the */
+/* conjugate eigenvalue real. */
+
+ if (alphai[jc] < 0. && jc > 1) {
+ alphai[jc - 1] = 0.;
+ } else if (alphai[jc] > 0. && jc < *n) {
+ alphai[jc + 1] = 0.;
+ }
+ }
+
+/* Check for significant underflow in ALPHAR */
+
+/* Computing MAX */
+ d__1 = safmin, d__2 = eps * absai, d__1 = max(d__1,d__2), d__2 = eps *
+ absb;
+ if (abs(salfar) < safmin && absar >= max(d__1,d__2)) {
+ ilimit = TRUE_;
+/* Computing MAX */
+/* Computing MAX */
+ d__3 = onepls * safmin, d__4 = anrm2 * absar;
+ d__1 = scale, d__2 = onepls * safmin / anrm1 / max(d__3,d__4);
+ scale = max(d__1,d__2);
+ }
+
+/* Check for significant underflow in BETA */
+
+/* Computing MAX */
+ d__1 = safmin, d__2 = eps * absar, d__1 = max(d__1,d__2), d__2 = eps *
+ absai;
+ if (abs(sbeta) < safmin && absb >= max(d__1,d__2)) {
+ ilimit = TRUE_;
+/* Computing MAX */
+/* Computing MAX */
+ d__3 = onepls * safmin, d__4 = bnrm2 * absb;
+ d__1 = scale, d__2 = onepls * safmin / bnrm1 / max(d__3,d__4);
+ scale = max(d__1,d__2);
+ }
+
+/* Check for possible overflow when limiting scaling */
+
+ if (ilimit) {
+/* Computing MAX */
+ d__1 = abs(salfar), d__2 = abs(salfai), d__1 = max(d__1,d__2),
+ d__2 = abs(sbeta);
+ temp = scale * safmin * max(d__1,d__2);
+ if (temp > 1.) {
+ scale /= temp;
+ }
+ if (scale < 1.) {
+ ilimit = FALSE_;
+ }
+ }
+
+/* Recompute un-scaled ALPHAR, ALPHAI, BETA if necessary. */
+
+ if (ilimit) {
+ salfar = scale * alphar[jc] * anrm;
+ salfai = scale * alphai[jc] * anrm;
+ sbeta = scale * beta[jc] * bnrm;
+ }
+ alphar[jc] = salfar;
+ alphai[jc] = salfai;
+ beta[jc] = sbeta;
+/* L110: */
+ }
+
+L120:
+ work[1] = (doublereal) lwkopt;
+
+ return 0;
+
+/* End of DGEGV */
+
+} /* dgegv_ */
diff --git a/contrib/libs/clapack/dgehd2.c b/contrib/libs/clapack/dgehd2.c
new file mode 100644
index 0000000000..b9a4c75327
--- /dev/null
+++ b/contrib/libs/clapack/dgehd2.c
@@ -0,0 +1,191 @@
+/* dgehd2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dgehd2_(integer *n, integer *ilo, integer *ihi,
+ doublereal *a, integer *lda, doublereal *tau, doublereal *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__;
+ doublereal aii;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *), dlarfg_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGEHD2 reduces a real general matrix A to upper Hessenberg form H by */
+/* an orthogonal similarity transformation: Q' * A * Q = H . */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* It is assumed that A is already upper triangular in rows */
+/* and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally */
+/* set by a previous call to DGEBAL; otherwise they should be */
+/* set to 1 and N respectively. See Further Details. */
+/* 1 <= ILO <= IHI <= max(1,N). */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the n by n general matrix to be reduced. */
+/* On exit, the upper triangle and the first subdiagonal of A */
+/* are overwritten with the upper Hessenberg matrix H, and the */
+/* elements below the first subdiagonal, with the array TAU, */
+/* represent the orthogonal matrix Q as a product of elementary */
+/* reflectors. See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAU (output) DOUBLE PRECISION array, dimension (N-1) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of (ihi-ilo) elementary */
+/* reflectors */
+
+/* Q = H(ilo) H(ilo+1) . . . H(ihi-1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on */
+/* exit in A(i+2:ihi,i), and tau in TAU(i). */
+
+/* The contents of A are illustrated by the following example, with */
+/* n = 7, ilo = 2 and ihi = 6: */
+
+/* on entry, on exit, */
+
+/* ( a a a a a a a ) ( a a h h h h a ) */
+/* ( a a a a a a ) ( a h h h h a ) */
+/* ( a a a a a a ) ( h h h h h h ) */
+/* ( a a a a a a ) ( v2 h h h h h ) */
+/* ( a a a a a a ) ( v2 v3 h h h h ) */
+/* ( a a a a a a ) ( v2 v3 v4 h h h ) */
+/* ( a ) ( a ) */
+
+/* where a denotes an element of the original matrix A, h denotes a */
+/* modified element of the upper Hessenberg matrix H, and vi denotes an */
+/* element of the vector defining H(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -2;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEHD2", &i__1);
+ return 0;
+ }
+
+ i__1 = *ihi - 1;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+
+/* Compute elementary reflector H(i) to annihilate A(i+2:ihi,i) */
+
+ i__2 = *ihi - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3, *n)+ i__ *
+ a_dim1], &c__1, &tau[i__]);
+ aii = a[i__ + 1 + i__ * a_dim1];
+ a[i__ + 1 + i__ * a_dim1] = 1.;
+
+/* Apply H(i) to A(1:ihi,i+1:ihi) from the right */
+
+ i__2 = *ihi - i__;
+ dlarf_("Right", ihi, &i__2, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
+ i__], &a[(i__ + 1) * a_dim1 + 1], lda, &work[1]);
+
+/* Apply H(i) to A(i+1:ihi,i+1:n) from the left */
+
+ i__2 = *ihi - i__;
+ i__3 = *n - i__;
+ dlarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
+ i__], &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &work[1]);
+
+ a[i__ + 1 + i__ * a_dim1] = aii;
+/* L10: */
+ }
+
+ return 0;
+
+/* End of DGEHD2 */
+
+} /* dgehd2_ */
diff --git a/contrib/libs/clapack/dgehrd.c b/contrib/libs/clapack/dgehrd.c
new file mode 100644
index 0000000000..6e88e9d98f
--- /dev/null
+++ b/contrib/libs/clapack/dgehrd.c
@@ -0,0 +1,342 @@
+/* dgehrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+static integer c__65 = 65;
+static doublereal c_b25 = -1.;
+static doublereal c_b26 = 1.;
+
+/* Subroutine */ int dgehrd_(integer *n, integer *ilo, integer *ihi,
+ doublereal *a, integer *lda, doublereal *tau, doublereal *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal t[4160] /* was [65][64] */;
+ integer ib;
+ doublereal ei;
+ integer nb, nh, nx, iws;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ integer nbmin, iinfo;
+ extern /* Subroutine */ int dtrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), daxpy_(
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *), dgehd2_(integer *, integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *), dlahr2_(
+ integer *, integer *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *),
+ dlarfb_(char *, char *, char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGEHRD reduces a real general matrix A to upper Hessenberg form H by */
+/* an orthogonal similarity transformation: Q' * A * Q = H . */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* It is assumed that A is already upper triangular in rows */
+/* and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally */
+/* set by a previous call to DGEBAL; otherwise they should be */
+/* set to 1 and N respectively. See Further Details. */
+/* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the N-by-N general matrix to be reduced. */
+/* On exit, the upper triangle and the first subdiagonal of A */
+/* are overwritten with the upper Hessenberg matrix H, and the */
+/* elements below the first subdiagonal, with the array TAU, */
+/* represent the orthogonal matrix Q as a product of elementary */
+/* reflectors. See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAU (output) DOUBLE PRECISION array, dimension (N-1) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). Elements 1:ILO-1 and IHI:N-1 of TAU are set to */
+/* zero. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= max(1,N). */
+/* For optimum performance LWORK >= N*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of (ihi-ilo) elementary */
+/* reflectors */
+
+/* Q = H(ilo) H(ilo+1) . . . H(ihi-1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on */
+/* exit in A(i+2:ihi,i), and tau in TAU(i). */
+
+/* The contents of A are illustrated by the following example, with */
+/* n = 7, ilo = 2 and ihi = 6: */
+
+/* on entry, on exit, */
+
+/* ( a a a a a a a ) ( a a h h h h a ) */
+/* ( a a a a a a ) ( a h h h h a ) */
+/* ( a a a a a a ) ( h h h h h h ) */
+/* ( a a a a a a ) ( v2 h h h h h ) */
+/* ( a a a a a a ) ( v2 v3 h h h h ) */
+/* ( a a a a a a ) ( v2 v3 v4 h h h ) */
+/* ( a ) ( a ) */
+
+/* where a denotes an element of the original matrix A, h denotes a */
+/* modified element of the upper Hessenberg matrix H, and vi denotes an */
+/* element of the vector defining H(i). */
+
+/* This file is a slight modification of LAPACK-3.0's DGEHRD */
+/* subroutine incorporating improvements proposed by Quintana-Orti and */
+/* Van de Geijn (2005). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+/* Computing MIN */
+ i__1 = 64, i__2 = ilaenv_(&c__1, "DGEHRD", " ", n, ilo, ihi, &c_n1);
+ nb = min(i__1,i__2);
+ lwkopt = *n * nb;
+ work[1] = (doublereal) lwkopt;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -2;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEHRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Set elements 1:ILO-1 and IHI:N-1 of TAU to zero */
+
+ i__1 = *ilo - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ tau[i__] = 0.;
+/* L10: */
+ }
+ i__1 = *n - 1;
+ for (i__ = max(1,*ihi); i__ <= i__1; ++i__) {
+ tau[i__] = 0.;
+/* L20: */
+ }
+
+/* Quick return if possible */
+
+ nh = *ihi - *ilo + 1;
+ if (nh <= 1) {
+ work[1] = 1.;
+ return 0;
+ }
+
+/* Determine the block size */
+
+/* Computing MIN */
+ i__1 = 64, i__2 = ilaenv_(&c__1, "DGEHRD", " ", n, ilo, ihi, &c_n1);
+ nb = min(i__1,i__2);
+ nbmin = 2;
+ iws = 1;
+ if (nb > 1 && nb < nh) {
+
+/* Determine when to cross over from blocked to unblocked code */
+/* (last block is always handled by unblocked code) */
+
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__3, "DGEHRD", " ", n, ilo, ihi, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < nh) {
+
+/* Determine if workspace is large enough for blocked code */
+
+ iws = *n * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: determine the */
+/* minimum value of NB, and reduce NB or force use of */
+/* unblocked code */
+
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DGEHRD", " ", n, ilo, ihi, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ if (*lwork >= *n * nbmin) {
+ nb = *lwork / *n;
+ } else {
+ nb = 1;
+ }
+ }
+ }
+ }
+ ldwork = *n;
+
+ if (nb < nbmin || nb >= nh) {
+
+/* Use unblocked code below */
+
+ i__ = *ilo;
+
+ } else {
+
+/* Use blocked code */
+
+ i__1 = *ihi - 1 - nx;
+ i__2 = nb;
+ for (i__ = *ilo; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *ihi - i__;
+ ib = min(i__3,i__4);
+
+/* Reduce columns i:i+ib-1 to Hessenberg form, returning the */
+/* matrices V and T of the block reflector H = I - V*T*V' */
+/* which performs the reduction, and also the matrix Y = A*V*T */
+
+ dlahr2_(ihi, &i__, &ib, &a[i__ * a_dim1 + 1], lda, &tau[i__], t, &
+ c__65, &work[1], &ldwork);
+
+/* Apply the block reflector H to A(1:ihi,i+ib:ihi) from the */
+/* right, computing A := A - Y * V'. V(i+ib,ib-1) must be set */
+/* to 1 */
+
+ ei = a[i__ + ib + (i__ + ib - 1) * a_dim1];
+ a[i__ + ib + (i__ + ib - 1) * a_dim1] = 1.;
+ i__3 = *ihi - i__ - ib + 1;
+ dgemm_("No transpose", "Transpose", ihi, &i__3, &ib, &c_b25, &
+ work[1], &ldwork, &a[i__ + ib + i__ * a_dim1], lda, &
+ c_b26, &a[(i__ + ib) * a_dim1 + 1], lda);
+ a[i__ + ib + (i__ + ib - 1) * a_dim1] = ei;
+
+/* Apply the block reflector H to A(1:i,i+1:i+ib-1) from the */
+/* right */
+
+ i__3 = ib - 1;
+ dtrmm_("Right", "Lower", "Transpose", "Unit", &i__, &i__3, &c_b26,
+ &a[i__ + 1 + i__ * a_dim1], lda, &work[1], &ldwork);
+ i__3 = ib - 2;
+ for (j = 0; j <= i__3; ++j) {
+ daxpy_(&i__, &c_b25, &work[ldwork * j + 1], &c__1, &a[(i__ +
+ j + 1) * a_dim1 + 1], &c__1);
+/* L30: */
+ }
+
+/* Apply the block reflector H to A(i+1:ihi,i+ib:n) from the */
+/* left */
+
+ i__3 = *ihi - i__;
+ i__4 = *n - i__ - ib + 1;
+ dlarfb_("Left", "Transpose", "Forward", "Columnwise", &i__3, &
+ i__4, &ib, &a[i__ + 1 + i__ * a_dim1], lda, t, &c__65, &a[
+ i__ + 1 + (i__ + ib) * a_dim1], lda, &work[1], &ldwork);
+/* L40: */
+ }
+ }
+
+/* Use unblocked code to reduce the rest of the matrix */
+
+ dgehd2_(n, &i__, ihi, &a[a_offset], lda, &tau[1], &work[1], &iinfo);
+ work[1] = (doublereal) iws;
+
+ return 0;
+
+/* End of DGEHRD */
+
+} /* dgehrd_ */
diff --git a/contrib/libs/clapack/dgejsv.c b/contrib/libs/clapack/dgejsv.c
new file mode 100644
index 0000000000..0b0376b8a5
--- /dev/null
+++ b/contrib/libs/clapack/dgejsv.c
@@ -0,0 +1,2218 @@
+/* dgejsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b34 = 0.;
+static doublereal c_b35 = 1.;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+
+/* Subroutine */ int dgejsv_(char *joba, char *jobu, char *jobv, char *jobr,
+ char *jobt, char *jobp, integer *m, integer *n, doublereal *a,
+ integer *lda, doublereal *sva, doublereal *u, integer *ldu,
+ doublereal *v, integer *ldv, doublereal *work, integer *lwork,
+ integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, u_dim1, u_offset, v_dim1, v_offset, i__1, i__2,
+ i__3, i__4, i__5, i__6, i__7, i__8, i__9, i__10;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal), log(doublereal), d_sign(doublereal *, doublereal
+ *);
+ integer i_dnnt(doublereal *);
+
+ /* Local variables */
+ integer p, q, n1, nr;
+ doublereal big, xsc, big1;
+ logical defr;
+ doublereal aapp, aaqq;
+ logical kill;
+ integer ierr;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ doublereal temp1;
+ logical jracc;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ doublereal small, entra, sfmin;
+ logical lsvec;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dswap_(integer *, doublereal *, integer
+ *, doublereal *, integer *);
+ doublereal epsln;
+ logical rsvec;
+ extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical l2aber;
+ extern /* Subroutine */ int dgeqp3_(integer *, integer *, doublereal *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ integer *);
+ doublereal condr1, condr2, uscal1, uscal2;
+ logical l2kill, l2rank, l2tran, l2pert;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dgelqf_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ doublereal scalem;
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *);
+ doublereal sconda;
+ logical goscal;
+ doublereal aatmin;
+ extern /* Subroutine */ int dgeqrf_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *);
+ doublereal aatmax;
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ dlaset_(char *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *), xerbla_(char *, integer *);
+ logical noscal;
+ extern /* Subroutine */ int dpocon_(char *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *,
+ integer *), dgesvj_(char *, char *, char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *, integer *), dlassq_(integer *, doublereal *, integer
+ *, doublereal *, doublereal *), dlaswp_(integer *, doublereal *,
+ integer *, integer *, integer *, integer *, integer *);
+ doublereal entrat;
+ logical almort;
+ extern /* Subroutine */ int dorgqr_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *), dormlq_(char *, char *, integer *, integer *, integer
+ *, doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, integer *);
+ doublereal maxprj;
+ logical errest;
+ extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ logical transp, rowpiv;
+ doublereal cond_ok__;
+ integer warning, numrank;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Zlatko Drmac of the University of Zagreb and -- */
+/* -- Kresimir Veselic of the Fernuniversitaet Hagen -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* This routine is also part of SIGMA (version 1.23, October 23. 2008.) */
+/* SIGMA is a library of algorithms for highly accurate algorithms for */
+/* computation of SVD, PSVD, QSVD, (H,K)-SVD, and for solution of the */
+/* eigenvalue problems Hx = lambda M x, H M x = lambda x with H, M > 0. */
+
+/* -#- Scalar Arguments -#- */
+
+
+/* -#- Array Arguments -#- */
+
+/* .. */
+
+/* Purpose */
+/* ~~~~~~~ */
+/* DGEJSV computes the singular value decomposition (SVD) of a real M-by-N */
+/* matrix [A], where M >= N. The SVD of [A] is written as */
+
+/* [A] = [U] * [SIGMA] * [V]^t, */
+
+/* where [SIGMA] is an N-by-N (M-by-N) matrix which is zero except for its N */
+/* diagonal elements, [U] is an M-by-N (or M-by-M) orthonormal matrix, and */
+/* [V] is an N-by-N orthogonal matrix. The diagonal elements of [SIGMA] are */
+/* the singular values of [A]. The columns of [U] and [V] are the left and */
+/* the right singular vectors of [A], respectively. The matrices [U] and [V] */
+/* are computed and stored in the arrays U and V, respectively. The diagonal */
+/* of [SIGMA] is computed and stored in the array SVA. */
+
+/* Further details */
+/* ~~~~~~~~~~~~~~~ */
+/* DGEJSV implements a preconditioned Jacobi SVD algorithm. It uses SGEQP3, */
+/* SGEQRF, and SGELQF as preprocessors and preconditioners. Optionally, an */
+/* additional row pivoting can be used as a preprocessor, which in some */
+/* cases results in much higher accuracy. An example is matrix A with the */
+/* structure A = D1 * C * D2, where D1, D2 are arbitrarily ill-conditioned */
+/* diagonal matrices and C is well-conditioned matrix. In that case, complete */
+/* pivoting in the first QR factorizations provides accuracy dependent on the */
+/* condition number of C, and independent of D1, D2. Such higher accuracy is */
+/* not completely understood theoretically, but it works well in practice. */
+/* Further, if A can be written as A = B*D, with well-conditioned B and some */
+/* diagonal D, then the high accuracy is guaranteed, both theoretically and */
+/* in software, independent of D. For more details see [1], [2]. */
+/* The computational range for the singular values can be the full range */
+/* ( UNDERFLOW,OVERFLOW ), provided that the machine arithmetic and the BLAS */
+/* & LAPACK routines called by DGEJSV are implemented to work in that range. */
+/* If that is not the case, then the restriction for safe computation with */
+/* the singular values in the range of normalized IEEE numbers is that the */
+/* spectral condition number kappa(A)=sigma_max(A)/sigma_min(A) does not */
+/* overflow. This code (DGEJSV) is best used in this restricted range, */
+/* meaning that singular values of magnitude below ||A||_2 / SLAMCH('O') are */
+/* returned as zeros. See JOBR for details on this. */
+/* Further, this implementation is somewhat slower than the one described */
+/* in [1,2] due to replacement of some non-LAPACK components, and because */
+/* the choice of some tuning parameters in the iterative part (DGESVJ) is */
+/* left to the implementer on a particular machine. */
+/* The rank revealing QR factorization (in this code: SGEQP3) should be */
+/* implemented as in [3]. We have a new version of SGEQP3 under development */
+/* that is more robust than the current one in LAPACK, with a cleaner cut in */
+/* rank defficient cases. It will be available in the SIGMA library [4]. */
+/* If M is much larger than N, it is obvious that the inital QRF with */
+/* column pivoting can be preprocessed by the QRF without pivoting. That */
+/* well known trick is not used in DGEJSV because in some cases heavy row */
+/* weighting can be treated with complete pivoting. The overhead in cases */
+/* M much larger than N is then only due to pivoting, but the benefits in */
+/* terms of accuracy have prevailed. The implementer/user can incorporate */
+/* this extra QRF step easily. The implementer can also improve data movement */
+/* (matrix transpose, matrix copy, matrix transposed copy) - this */
+/* implementation of DGEJSV uses only the simplest, naive data movement. */
+
+/* Contributors */
+/* ~~~~~~~~~~~~ */
+/* Zlatko Drmac (Zagreb, Croatia) and Kresimir Veselic (Hagen, Germany) */
+
+/* References */
+/* ~~~~~~~~~~ */
+/* [1] Z. Drmac and K. Veselic: New fast and accurate Jacobi SVD algorithm I. */
+/* SIAM J. Matrix Anal. Appl. Vol. 35, No. 2 (2008), pp. 1322-1342. */
+/* LAPACK Working note 169. */
+/* [2] Z. Drmac and K. Veselic: New fast and accurate Jacobi SVD algorithm II. */
+/* SIAM J. Matrix Anal. Appl. Vol. 35, No. 2 (2008), pp. 1343-1362. */
+/* LAPACK Working note 170. */
+/* [3] Z. Drmac and Z. Bujanovic: On the failure of rank-revealing QR */
+/* factorization software - a case study. */
+/* ACM Trans. Math. Softw. Vol. 35, No 2 (2008), pp. 1-28. */
+/* LAPACK Working note 176. */
+/* [4] Z. Drmac: SIGMA - mathematical software library for accurate SVD, PSV, */
+/* QSVD, (H,K)-SVD computations. */
+/* Department of Mathematics, University of Zagreb, 2008. */
+
+/* Bugs, examples and comments */
+/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
+/* Please report all bugs and send interesting examples and/or comments to */
+/* drmac@math.hr. Thank you. */
+
+/* Arguments */
+/* ~~~~~~~~~ */
+/* ............................................................................ */
+/* . JOBA (input) CHARACTER*1 */
+/* . Specifies the level of accuracy: */
+/* . = 'C': This option works well (high relative accuracy) if A = B * D, */
+/* . with well-conditioned B and arbitrary diagonal matrix D. */
+/* . The accuracy cannot be spoiled by COLUMN scaling. The */
+/* . accuracy of the computed output depends on the condition of */
+/* . B, and the procedure aims at the best theoretical accuracy. */
+/* . The relative error max_{i=1:N}|d sigma_i| / sigma_i is */
+/* . bounded by f(M,N)*epsilon* cond(B), independent of D. */
+/* . The input matrix is preprocessed with the QRF with column */
+/* . pivoting. This initial preprocessing and preconditioning by */
+/* . a rank revealing QR factorization is common for all values of */
+/* . JOBA. Additional actions are specified as follows: */
+/* . = 'E': Computation as with 'C' with an additional estimate of the */
+/* . condition number of B. It provides a realistic error bound. */
+/* . = 'F': If A = D1 * C * D2 with ill-conditioned diagonal scalings */
+/* . D1, D2, and well-conditioned matrix C, this option gives */
+/* . higher accuracy than the 'C' option. If the structure of the */
+/* . input matrix is not known, and relative accuracy is */
+/* . desirable, then this option is advisable. The input matrix A */
+/* . is preprocessed with QR factorization with FULL (row and */
+/* . column) pivoting. */
+/* . = 'G' Computation as with 'F' with an additional estimate of the */
+/* . condition number of B, where A=D*B. If A has heavily weighted */
+/* . rows, then using this condition number gives too pessimistic */
+/* . error bound. */
+/* . = 'A': Small singular values are the noise and the matrix is treated */
+/* . as numerically rank defficient. The error in the computed */
+/* . singular values is bounded by f(m,n)*epsilon*||A||. */
+/* . The computed SVD A = U * S * V^t restores A up to */
+/* . f(m,n)*epsilon*||A||. */
+/* . This gives the procedure the licence to discard (set to zero) */
+/* . all singular values below N*epsilon*||A||. */
+/* . = 'R': Similar as in 'A'. Rank revealing property of the initial */
+/* . QR factorization is used do reveal (using triangular factor) */
+/* . a gap sigma_{r+1} < epsilon * sigma_r in which case the */
+/* . numerical RANK is declared to be r. The SVD is computed with */
+/* . absolute error bounds, but more accurately than with 'A'. */
+/* . */
+/* . JOBU (input) CHARACTER*1 */
+/* . Specifies whether to compute the columns of U: */
+/* . = 'U': N columns of U are returned in the array U. */
+/* . = 'F': full set of M left sing. vectors is returned in the array U. */
+/* . = 'W': U may be used as workspace of length M*N. See the description */
+/* . of U. */
+/* . = 'N': U is not computed. */
+/* . */
+/* . JOBV (input) CHARACTER*1 */
+/* . Specifies whether to compute the matrix V: */
+/* . = 'V': N columns of V are returned in the array V; Jacobi rotations */
+/* . are not explicitly accumulated. */
+/* . = 'J': N columns of V are returned in the array V, but they are */
+/* . computed as the product of Jacobi rotations. This option is */
+/* . allowed only if JOBU .NE. 'N', i.e. in computing the full SVD. */
+/* . = 'W': V may be used as workspace of length N*N. See the description */
+/* . of V. */
+/* . = 'N': V is not computed. */
+/* . */
+/* . JOBR (input) CHARACTER*1 */
+/* . Specifies the RANGE for the singular values. Issues the licence to */
+/* . set to zero small positive singular values if they are outside */
+/* . specified range. If A .NE. 0 is scaled so that the largest singular */
+/* . value of c*A is around DSQRT(BIG), BIG=SLAMCH('O'), then JOBR issues */
+/* . the licence to kill columns of A whose norm in c*A is less than */
+/* . DSQRT(SFMIN) (for JOBR.EQ.'R'), or less than SMALL=SFMIN/EPSLN, */
+/* . where SFMIN=SLAMCH('S'), EPSLN=SLAMCH('E'). */
+/* . = 'N': Do not kill small columns of c*A. This option assumes that */
+/* . BLAS and QR factorizations and triangular solvers are */
+/* . implemented to work in that range. If the condition of A */
+/* . is greater than BIG, use DGESVJ. */
+/* . = 'R': RESTRICTED range for sigma(c*A) is [DSQRT(SFMIN), DSQRT(BIG)] */
+/* . (roughly, as described above). This option is recommended. */
+/* . ~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
+/* . For computing the singular values in the FULL range [SFMIN,BIG] */
+/* . use DGESVJ. */
+/* . */
+/* . JOBT (input) CHARACTER*1 */
+/* . If the matrix is square then the procedure may determine to use */
+/* . transposed A if A^t seems to be better with respect to convergence. */
+/* . If the matrix is not square, JOBT is ignored. This is subject to */
+/* . changes in the future. */
+/* . The decision is based on two values of entropy over the adjoint */
+/* . orbit of A^t * A. See the descriptions of WORK(6) and WORK(7). */
+/* . = 'T': transpose if entropy test indicates possibly faster */
+/* . convergence of Jacobi process if A^t is taken as input. If A is */
+/* . replaced with A^t, then the row pivoting is included automatically. */
+/* . = 'N': do not speculate. */
+/* . This option can be used to compute only the singular values, or the */
+/* . full SVD (U, SIGMA and V). For only one set of singular vectors */
+/* . (U or V), the caller should provide both U and V, as one of the */
+/* . matrices is used as workspace if the matrix A is transposed. */
+/* . The implementer can easily remove this constraint and make the */
+/* . code more complicated. See the descriptions of U and V. */
+/* . */
+/* . JOBP (input) CHARACTER*1 */
+/* . Issues the licence to introduce structured perturbations to drown */
+/* . denormalized numbers. This licence should be active if the */
+/* . denormals are poorly implemented, causing slow computation, */
+/* . especially in cases of fast convergence (!). For details see [1,2]. */
+/* . For the sake of simplicity, this perturbations are included only */
+/* . when the full SVD or only the singular values are requested. The */
+/* . implementer/user can easily add the perturbation for the cases of */
+/* . computing one set of singular vectors. */
+/* . = 'P': introduce perturbation */
+/* . = 'N': do not perturb */
+/* ............................................................................ */
+
+/* M (input) INTEGER */
+/* The number of rows of the input matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the input matrix A. M >= N >= 0. */
+
+/* A (input/workspace) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* SVA (workspace/output) REAL array, dimension (N) */
+/* On exit, */
+/* - For WORK(1)/WORK(2) = ONE: The singular values of A. During the */
+/* computation SVA contains Euclidean column norms of the */
+/* iterated matrices in the array A. */
+/* - For WORK(1) .NE. WORK(2): The singular values of A are */
+/* (WORK(1)/WORK(2)) * SVA(1:N). This factored form is used if */
+/* sigma_max(A) overflows or if small singular values have been */
+/* saved from underflow by scaling the input matrix A. */
+/* - If JOBR='R' then some of the singular values may be returned */
+/* as exact zeros obtained by "set to zero" because they are */
+/* below the numerical rank threshold or are denormalized numbers. */
+
+/* U (workspace/output) REAL array, dimension ( LDU, N ) */
+/* If JOBU = 'U', then U contains on exit the M-by-N matrix of */
+/* the left singular vectors. */
+/* If JOBU = 'F', then U contains on exit the M-by-M matrix of */
+/* the left singular vectors, including an ONB */
+/* of the orthogonal complement of the Range(A). */
+/* If JOBU = 'W' .AND. (JOBV.EQ.'V' .AND. JOBT.EQ.'T' .AND. M.EQ.N), */
+/* then U is used as workspace if the procedure */
+/* replaces A with A^t. In that case, [V] is computed */
+/* in U as left singular vectors of A^t and then */
+/* copied back to the V array. This 'W' option is just */
+/* a reminder to the caller that in this case U is */
+/* reserved as workspace of length N*N. */
+/* If JOBU = 'N' U is not referenced. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U, LDU >= 1. */
+/* IF JOBU = 'U' or 'F' or 'W', then LDU >= M. */
+
+/* V (workspace/output) REAL array, dimension ( LDV, N ) */
+/* If JOBV = 'V', 'J' then V contains on exit the N-by-N matrix of */
+/* the right singular vectors; */
+/* If JOBV = 'W', AND (JOBU.EQ.'U' AND JOBT.EQ.'T' AND M.EQ.N), */
+/* then V is used as workspace if the pprocedure */
+/* replaces A with A^t. In that case, [U] is computed */
+/* in V as right singular vectors of A^t and then */
+/* copied back to the U array. This 'W' option is just */
+/* a reminder to the caller that in this case V is */
+/* reserved as workspace of length N*N. */
+/* If JOBV = 'N' V is not referenced. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V, LDV >= 1. */
+/* If JOBV = 'V' or 'J' or 'W', then LDV >= N. */
+
+/* WORK (workspace/output) REAL array, dimension at least LWORK. */
+/* On exit, */
+/* WORK(1) = SCALE = WORK(2) / WORK(1) is the scaling factor such */
+/* that SCALE*SVA(1:N) are the computed singular values */
+/* of A. (See the description of SVA().) */
+/* WORK(2) = See the description of WORK(1). */
+/* WORK(3) = SCONDA is an estimate for the condition number of */
+/* column equilibrated A. (If JOBA .EQ. 'E' or 'G') */
+/* SCONDA is an estimate of DSQRT(||(R^t * R)^(-1)||_1). */
+/* It is computed using DPOCON. It holds */
+/* N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA */
+/* where R is the triangular factor from the QRF of A. */
+/* However, if R is truncated and the numerical rank is */
+/* determined to be strictly smaller than N, SCONDA is */
+/* returned as -1, thus indicating that the smallest */
+/* singular values might be lost. */
+
+/* If full SVD is needed, the following two condition numbers are */
+/* useful for the analysis of the algorithm. They are provied for */
+/* a developer/implementer who is familiar with the details of */
+/* the method. */
+
+/* WORK(4) = an estimate of the scaled condition number of the */
+/* triangular factor in the first QR factorization. */
+/* WORK(5) = an estimate of the scaled condition number of the */
+/* triangular factor in the second QR factorization. */
+/* The following two parameters are computed if JOBT .EQ. 'T'. */
+/* They are provided for a developer/implementer who is familiar */
+/* with the details of the method. */
+
+/* WORK(6) = the entropy of A^t*A :: this is the Shannon entropy */
+/* of diag(A^t*A) / Trace(A^t*A) taken as point in the */
+/* probability simplex. */
+/* WORK(7) = the entropy of A*A^t. */
+
+/* LWORK (input) INTEGER */
+/* Length of WORK to confirm proper allocation of work space. */
+/* LWORK depends on the job: */
+
+/* If only SIGMA is needed ( JOBU.EQ.'N', JOBV.EQ.'N' ) and */
+/* -> .. no scaled condition estimate required ( JOBE.EQ.'N'): */
+/* LWORK >= max(2*M+N,4*N+1,7). This is the minimal requirement. */
+/* For optimal performance (blocked code) the optimal value */
+/* is LWORK >= max(2*M+N,3*N+(N+1)*NB,7). Here NB is the optimal */
+/* block size for xGEQP3/xGEQRF. */
+/* -> .. an estimate of the scaled condition number of A is */
+/* required (JOBA='E', 'G'). In this case, LWORK is the maximum */
+/* of the above and N*N+4*N, i.e. LWORK >= max(2*M+N,N*N+4N,7). */
+
+/* If SIGMA and the right singular vectors are needed (JOBV.EQ.'V'), */
+/* -> the minimal requirement is LWORK >= max(2*N+M,7). */
+/* -> For optimal performance, LWORK >= max(2*N+M,2*N+N*NB,7), */
+/* where NB is the optimal block size. */
+
+/* If SIGMA and the left singular vectors are needed */
+/* -> the minimal requirement is LWORK >= max(2*N+M,7). */
+/* -> For optimal performance, LWORK >= max(2*N+M,2*N+N*NB,7), */
+/* where NB is the optimal block size. */
+
+/* If full SVD is needed ( JOBU.EQ.'U' or 'F', JOBV.EQ.'V' ) and */
+/* -> .. the singular vectors are computed without explicit */
+/* accumulation of the Jacobi rotations, LWORK >= 6*N+2*N*N */
+/* -> .. in the iterative part, the Jacobi rotations are */
+/* explicitly accumulated (option, see the description of JOBV), */
+/* then the minimal requirement is LWORK >= max(M+3*N+N*N,7). */
+/* For better performance, if NB is the optimal block size, */
+/* LWORK >= max(3*N+N*N+M,3*N+N*N+N*NB,7). */
+
+/* IWORK (workspace/output) INTEGER array, dimension M+3*N. */
+/* On exit, */
+/* IWORK(1) = the numerical rank determined after the initial */
+/* QR factorization with pivoting. See the descriptions */
+/* of JOBA and JOBR. */
+/* IWORK(2) = the number of the computed nonzero singular values */
+/* IWORK(3) = if nonzero, a warning message: */
+/* If IWORK(3).EQ.1 then some of the column norms of A */
+/* were denormalized floats. The requested high accuracy */
+/* is not warranted by the data. */
+
+/* INFO (output) INTEGER */
+/* < 0 : if INFO = -i, then the i-th argument had an illegal value. */
+/* = 0 : successfull exit; */
+/* > 0 : DGEJSV did not converge in the maximal allowed number */
+/* of sweeps. The computed values may be inaccurate. */
+
+/* ............................................................................ */
+
+/* Local Parameters: */
+
+
+/* Local Scalars: */
+
+
+/* Intrinsic Functions: */
+
+
+/* External Functions: */
+
+
+/* External Subroutines ( BLAS, LAPACK ): */
+
+
+
+/* ............................................................................ */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ --sva;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ lsvec = lsame_(jobu, "U") || lsame_(jobu, "F");
+ jracc = lsame_(jobv, "J");
+ rsvec = lsame_(jobv, "V") || jracc;
+ rowpiv = lsame_(joba, "F") || lsame_(joba, "G");
+ l2rank = lsame_(joba, "R");
+ l2aber = lsame_(joba, "A");
+ errest = lsame_(joba, "E") || lsame_(joba, "G");
+ l2tran = lsame_(jobt, "T");
+ l2kill = lsame_(jobr, "R");
+ defr = lsame_(jobr, "N");
+ l2pert = lsame_(jobp, "P");
+
+ if (! (rowpiv || l2rank || l2aber || errest || lsame_(joba, "C"))) {
+ *info = -1;
+ } else if (! (lsvec || lsame_(jobu, "N") || lsame_(
+ jobu, "W"))) {
+ *info = -2;
+ } else if (! (rsvec || lsame_(jobv, "N") || lsame_(
+ jobv, "W")) || jracc && ! lsvec) {
+ *info = -3;
+ } else if (! (l2kill || defr)) {
+ *info = -4;
+ } else if (! (l2tran || lsame_(jobt, "N"))) {
+ *info = -5;
+ } else if (! (l2pert || lsame_(jobp, "N"))) {
+ *info = -6;
+ } else if (*m < 0) {
+ *info = -7;
+ } else if (*n < 0 || *n > *m) {
+ *info = -8;
+ } else if (*lda < *m) {
+ *info = -10;
+ } else if (lsvec && *ldu < *m) {
+ *info = -13;
+ } else if (rsvec && *ldv < *n) {
+ *info = -14;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = 7, i__2 = (*n << 2) + 1, i__1 = max(i__1,i__2), i__2 = (*m <<
+ 1) + *n;
+/* Computing MAX */
+ i__3 = 7, i__4 = (*n << 2) + *n * *n, i__3 = max(i__3,i__4), i__4 = (*
+ m << 1) + *n;
+/* Computing MAX */
+ i__5 = 7, i__6 = (*n << 1) + *m;
+/* Computing MAX */
+ i__7 = 7, i__8 = (*n << 1) + *m;
+/* Computing MAX */
+ i__9 = 7, i__10 = *m + *n * 3 + *n * *n;
+ if (! (lsvec || rsvec || errest) && *lwork < max(i__1,i__2) || ! (
+ lsvec || lsvec) && errest && *lwork < max(i__3,i__4) || lsvec
+ && ! rsvec && *lwork < max(i__5,i__6) || rsvec && ! lsvec && *
+ lwork < max(i__7,i__8) || lsvec && rsvec && ! jracc && *lwork
+ < *n * 6 + (*n << 1) * *n || lsvec && rsvec && jracc && *
+ lwork < max(i__9,i__10)) {
+ *info = -17;
+ } else {
+/* #:) */
+ *info = 0;
+ }
+ }
+
+ if (*info != 0) {
+/* #:( */
+ i__1 = -(*info);
+ xerbla_("DGEJSV", &i__1);
+ }
+
+/* Quick return for void matrix (Y3K safe) */
+/* #:) */
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Determine whether the matrix U should be M x N or M x M */
+
+ if (lsvec) {
+ n1 = *n;
+ if (lsame_(jobu, "F")) {
+ n1 = *m;
+ }
+ }
+
+/* Set numerical parameters */
+
+/* ! NOTE: Make sure DLAMCH() does not fail on the target architecture. */
+
+ epsln = dlamch_("Epsilon");
+ sfmin = dlamch_("SafeMinimum");
+ small = sfmin / epsln;
+ big = dlamch_("O");
+/* BIG = ONE / SFMIN */
+
+/* Initialize SVA(1:N) = diag( ||A e_i||_2 )_1^N */
+
+/* (!) If necessary, scale SVA() to protect the largest norm from */
+/* overflow. It is possible that this scaling pushes the smallest */
+/* column norm left from the underflow threshold (extreme case). */
+
+ scalem = 1. / sqrt((doublereal) (*m) * (doublereal) (*n));
+ noscal = TRUE_;
+ goscal = TRUE_;
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ aapp = 0.;
+ aaqq = 0.;
+ dlassq_(m, &a[p * a_dim1 + 1], &c__1, &aapp, &aaqq);
+ if (aapp > big) {
+ *info = -9;
+ i__2 = -(*info);
+ xerbla_("DGEJSV", &i__2);
+ return 0;
+ }
+ aaqq = sqrt(aaqq);
+ if (aapp < big / aaqq && noscal) {
+ sva[p] = aapp * aaqq;
+ } else {
+ noscal = FALSE_;
+ sva[p] = aapp * (aaqq * scalem);
+ if (goscal) {
+ goscal = FALSE_;
+ i__2 = p - 1;
+ dscal_(&i__2, &scalem, &sva[1], &c__1);
+ }
+ }
+/* L1874: */
+ }
+
+ if (noscal) {
+ scalem = 1.;
+ }
+
+ aapp = 0.;
+ aaqq = big;
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+/* Computing MAX */
+ d__1 = aapp, d__2 = sva[p];
+ aapp = max(d__1,d__2);
+ if (sva[p] != 0.) {
+/* Computing MIN */
+ d__1 = aaqq, d__2 = sva[p];
+ aaqq = min(d__1,d__2);
+ }
+/* L4781: */
+ }
+
+/* Quick return for zero M x N matrix */
+/* #:) */
+ if (aapp == 0.) {
+ if (lsvec) {
+ dlaset_("G", m, &n1, &c_b34, &c_b35, &u[u_offset], ldu)
+ ;
+ }
+ if (rsvec) {
+ dlaset_("G", n, n, &c_b34, &c_b35, &v[v_offset], ldv);
+ }
+ work[1] = 1.;
+ work[2] = 1.;
+ if (errest) {
+ work[3] = 1.;
+ }
+ if (lsvec && rsvec) {
+ work[4] = 1.;
+ work[5] = 1.;
+ }
+ if (l2tran) {
+ work[6] = 0.;
+ work[7] = 0.;
+ }
+ iwork[1] = 0;
+ iwork[2] = 0;
+ return 0;
+ }
+
+/* Issue warning if denormalized column norms detected. Override the */
+/* high relative accuracy request. Issue licence to kill columns */
+/* (set them to zero) whose norm is less than sigma_max / BIG (roughly). */
+/* #:( */
+ warning = 0;
+ if (aaqq <= sfmin) {
+ l2rank = TRUE_;
+ l2kill = TRUE_;
+ warning = 1;
+ }
+
+/* Quick return for one-column matrix */
+/* #:) */
+ if (*n == 1) {
+
+ if (lsvec) {
+ dlascl_("G", &c__0, &c__0, &sva[1], &scalem, m, &c__1, &a[a_dim1
+ + 1], lda, &ierr);
+ dlacpy_("A", m, &c__1, &a[a_offset], lda, &u[u_offset], ldu);
+/* computing all M left singular vectors of the M x 1 matrix */
+ if (n1 != *n) {
+ i__1 = *lwork - *n;
+ dgeqrf_(m, n, &u[u_offset], ldu, &work[1], &work[*n + 1], &
+ i__1, &ierr);
+ i__1 = *lwork - *n;
+ dorgqr_(m, &n1, &c__1, &u[u_offset], ldu, &work[1], &work[*n
+ + 1], &i__1, &ierr);
+ dcopy_(m, &a[a_dim1 + 1], &c__1, &u[u_dim1 + 1], &c__1);
+ }
+ }
+ if (rsvec) {
+ v[v_dim1 + 1] = 1.;
+ }
+ if (sva[1] < big * scalem) {
+ sva[1] /= scalem;
+ scalem = 1.;
+ }
+ work[1] = 1. / scalem;
+ work[2] = 1.;
+ if (sva[1] != 0.) {
+ iwork[1] = 1;
+ if (sva[1] / scalem >= sfmin) {
+ iwork[2] = 1;
+ } else {
+ iwork[2] = 0;
+ }
+ } else {
+ iwork[1] = 0;
+ iwork[2] = 0;
+ }
+ if (errest) {
+ work[3] = 1.;
+ }
+ if (lsvec && rsvec) {
+ work[4] = 1.;
+ work[5] = 1.;
+ }
+ if (l2tran) {
+ work[6] = 0.;
+ work[7] = 0.;
+ }
+ return 0;
+
+ }
+
+ transp = FALSE_;
+ l2tran = l2tran && *m == *n;
+
+ aatmax = -1.;
+ aatmin = big;
+ if (rowpiv || l2tran) {
+
+/* Compute the row norms, needed to determine row pivoting sequence */
+/* (in the case of heavily row weighted A, row pivoting is strongly */
+/* advised) and to collect information needed to compare the */
+/* structures of A * A^t and A^t * A (in the case L2TRAN.EQ..TRUE.). */
+
+ if (l2tran) {
+ i__1 = *m;
+ for (p = 1; p <= i__1; ++p) {
+ xsc = 0.;
+ temp1 = 0.;
+ dlassq_(n, &a[p + a_dim1], lda, &xsc, &temp1);
+/* DLASSQ gets both the ell_2 and the ell_infinity norm */
+/* in one pass through the vector */
+ work[*m + *n + p] = xsc * scalem;
+ work[*n + p] = xsc * (scalem * sqrt(temp1));
+/* Computing MAX */
+ d__1 = aatmax, d__2 = work[*n + p];
+ aatmax = max(d__1,d__2);
+ if (work[*n + p] != 0.) {
+/* Computing MIN */
+ d__1 = aatmin, d__2 = work[*n + p];
+ aatmin = min(d__1,d__2);
+ }
+/* L1950: */
+ }
+ } else {
+ i__1 = *m;
+ for (p = 1; p <= i__1; ++p) {
+ work[*m + *n + p] = scalem * (d__1 = a[p + idamax_(n, &a[p +
+ a_dim1], lda) * a_dim1], abs(d__1));
+/* Computing MAX */
+ d__1 = aatmax, d__2 = work[*m + *n + p];
+ aatmax = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = aatmin, d__2 = work[*m + *n + p];
+ aatmin = min(d__1,d__2);
+/* L1904: */
+ }
+ }
+
+ }
+
+/* For square matrix A try to determine whether A^t would be better */
+/* input for the preconditioned Jacobi SVD, with faster convergence. */
+/* The decision is based on an O(N) function of the vector of column */
+/* and row norms of A, based on the Shannon entropy. This should give */
+/* the right choice in most cases when the difference actually matters. */
+/* It may fail and pick the slower converging side. */
+
+ entra = 0.;
+ entrat = 0.;
+ if (l2tran) {
+
+ xsc = 0.;
+ temp1 = 0.;
+ dlassq_(n, &sva[1], &c__1, &xsc, &temp1);
+ temp1 = 1. / temp1;
+
+ entra = 0.;
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+/* Computing 2nd power */
+ d__1 = sva[p] / xsc;
+ big1 = d__1 * d__1 * temp1;
+ if (big1 != 0.) {
+ entra += big1 * log(big1);
+ }
+/* L1113: */
+ }
+ entra = -entra / log((doublereal) (*n));
+
+/* Now, SVA().^2/Trace(A^t * A) is a point in the probability simplex. */
+/* It is derived from the diagonal of A^t * A. Do the same with the */
+/* diagonal of A * A^t, compute the entropy of the corresponding */
+/* probability distribution. Note that A * A^t and A^t * A have the */
+/* same trace. */
+
+ entrat = 0.;
+ i__1 = *n + *m;
+ for (p = *n + 1; p <= i__1; ++p) {
+/* Computing 2nd power */
+ d__1 = work[p] / xsc;
+ big1 = d__1 * d__1 * temp1;
+ if (big1 != 0.) {
+ entrat += big1 * log(big1);
+ }
+/* L1114: */
+ }
+ entrat = -entrat / log((doublereal) (*m));
+
+/* Analyze the entropies and decide A or A^t. Smaller entropy */
+/* usually means better input for the algorithm. */
+
+ transp = entrat < entra;
+
+/* If A^t is better than A, transpose A. */
+
+ if (transp) {
+/* In an optimal implementation, this trivial transpose */
+/* should be replaced with faster transpose. */
+ i__1 = *n - 1;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = *n;
+ for (q = p + 1; q <= i__2; ++q) {
+ temp1 = a[q + p * a_dim1];
+ a[q + p * a_dim1] = a[p + q * a_dim1];
+ a[p + q * a_dim1] = temp1;
+/* L1116: */
+ }
+/* L1115: */
+ }
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ work[*m + *n + p] = sva[p];
+ sva[p] = work[*n + p];
+/* L1117: */
+ }
+ temp1 = aapp;
+ aapp = aatmax;
+ aatmax = temp1;
+ temp1 = aaqq;
+ aaqq = aatmin;
+ aatmin = temp1;
+ kill = lsvec;
+ lsvec = rsvec;
+ rsvec = kill;
+
+ rowpiv = TRUE_;
+ }
+
+ }
+/* END IF L2TRAN */
+
+/* Scale the matrix so that its maximal singular value remains less */
+/* than DSQRT(BIG) -- the matrix is scaled so that its maximal column */
+/* has Euclidean norm equal to DSQRT(BIG/N). The only reason to keep */
+/* DSQRT(BIG) instead of BIG is the fact that DGEJSV uses LAPACK and */
+/* BLAS routines that, in some implementations, are not capable of */
+/* working in the full interval [SFMIN,BIG] and that they may provoke */
+/* overflows in the intermediate results. If the singular values spread */
+/* from SFMIN to BIG, then DGESVJ will compute them. So, in that case, */
+/* one should use DGESVJ instead of DGEJSV. */
+
+ big1 = sqrt(big);
+ temp1 = sqrt(big / (doublereal) (*n));
+
+ dlascl_("G", &c__0, &c__0, &aapp, &temp1, n, &c__1, &sva[1], n, &ierr);
+ if (aaqq > aapp * sfmin) {
+ aaqq = aaqq / aapp * temp1;
+ } else {
+ aaqq = aaqq * temp1 / aapp;
+ }
+ temp1 *= scalem;
+ dlascl_("G", &c__0, &c__0, &aapp, &temp1, m, n, &a[a_offset], lda, &ierr);
+
+/* To undo scaling at the end of this procedure, multiply the */
+/* computed singular values with USCAL2 / USCAL1. */
+
+ uscal1 = temp1;
+ uscal2 = aapp;
+
+ if (l2kill) {
+/* L2KILL enforces computation of nonzero singular values in */
+/* the restricted range of condition number of the initial A, */
+/* sigma_max(A) / sigma_min(A) approx. DSQRT(BIG)/DSQRT(SFMIN). */
+ xsc = sqrt(sfmin);
+ } else {
+ xsc = small;
+
+/* Now, if the condition number of A is too big, */
+/* sigma_max(A) / sigma_min(A) .GT. DSQRT(BIG/N) * EPSLN / SFMIN, */
+/* as a precaution measure, the full SVD is computed using DGESVJ */
+/* with accumulated Jacobi rotations. This provides numerically */
+/* more robust computation, at the cost of slightly increased run */
+/* time. Depending on the concrete implementation of BLAS and LAPACK */
+/* (i.e. how they behave in presence of extreme ill-conditioning) the */
+/* implementor may decide to remove this switch. */
+ if (aaqq < sqrt(sfmin) && lsvec && rsvec) {
+ jracc = TRUE_;
+ }
+
+ }
+ if (aaqq < xsc) {
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ if (sva[p] < xsc) {
+ dlaset_("A", m, &c__1, &c_b34, &c_b34, &a[p * a_dim1 + 1],
+ lda);
+ sva[p] = 0.;
+ }
+/* L700: */
+ }
+ }
+
+/* Preconditioning using QR factorization with pivoting */
+
+ if (rowpiv) {
+/* Optional row permutation (Bjoerck row pivoting): */
+/* A result by Cox and Higham shows that the Bjoerck's */
+/* row pivoting combined with standard column pivoting */
+/* has similar effect as Powell-Reid complete pivoting. */
+/* The ell-infinity norms of A are made nonincreasing. */
+ i__1 = *m - 1;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = *m - p + 1;
+ q = idamax_(&i__2, &work[*m + *n + p], &c__1) + p - 1;
+ iwork[(*n << 1) + p] = q;
+ if (p != q) {
+ temp1 = work[*m + *n + p];
+ work[*m + *n + p] = work[*m + *n + q];
+ work[*m + *n + q] = temp1;
+ }
+/* L1952: */
+ }
+ i__1 = *m - 1;
+ dlaswp_(n, &a[a_offset], lda, &c__1, &i__1, &iwork[(*n << 1) + 1], &
+ c__1);
+ }
+
+/* End of the preparation phase (scaling, optional sorting and */
+/* transposing, optional flushing of small columns). */
+
+/* Preconditioning */
+
+/* If the full SVD is needed, the right singular vectors are computed */
+/* from a matrix equation, and for that we need theoretical analysis */
+/* of the Businger-Golub pivoting. So we use DGEQP3 as the first RR QRF. */
+/* In all other cases the first RR QRF can be chosen by other criteria */
+/* (eg speed by replacing global with restricted window pivoting, such */
+/* as in SGEQPX from TOMS # 782). Good results will be obtained using */
+/* SGEQPX with properly (!) chosen numerical parameters. */
+/* Any improvement of DGEQP3 improves overal performance of DGEJSV. */
+
+/* A * P1 = Q1 * [ R1^t 0]^t: */
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+/* .. all columns are free columns */
+ iwork[p] = 0;
+/* L1963: */
+ }
+ i__1 = *lwork - *n;
+ dgeqp3_(m, n, &a[a_offset], lda, &iwork[1], &work[1], &work[*n + 1], &
+ i__1, &ierr);
+
+/* The upper triangular matrix R1 from the first QRF is inspected for */
+/* rank deficiency and possibilities for deflation, or possible */
+/* ill-conditioning. Depending on the user specified flag L2RANK, */
+/* the procedure explores possibilities to reduce the numerical */
+/* rank by inspecting the computed upper triangular factor. If */
+/* L2RANK or L2ABER are up, then DGEJSV will compute the SVD of */
+/* A + dA, where ||dA|| <= f(M,N)*EPSLN. */
+
+ nr = 1;
+ if (l2aber) {
+/* Standard absolute error bound suffices. All sigma_i with */
+/* sigma_i < N*EPSLN*||A|| are flushed to zero. This is an */
+/* agressive enforcement of lower numerical rank by introducing a */
+/* backward error of the order of N*EPSLN*||A||. */
+ temp1 = sqrt((doublereal) (*n)) * epsln;
+ i__1 = *n;
+ for (p = 2; p <= i__1; ++p) {
+ if ((d__2 = a[p + p * a_dim1], abs(d__2)) >= temp1 * (d__1 = a[
+ a_dim1 + 1], abs(d__1))) {
+ ++nr;
+ } else {
+ goto L3002;
+ }
+/* L3001: */
+ }
+L3002:
+ ;
+ } else if (l2rank) {
+/* .. similarly as above, only slightly more gentle (less agressive). */
+/* Sudden drop on the diagonal of R1 is used as the criterion for */
+/* close-to-rank-defficient. */
+ temp1 = sqrt(sfmin);
+ i__1 = *n;
+ for (p = 2; p <= i__1; ++p) {
+ if ((d__2 = a[p + p * a_dim1], abs(d__2)) < epsln * (d__1 = a[p -
+ 1 + (p - 1) * a_dim1], abs(d__1)) || (d__3 = a[p + p *
+ a_dim1], abs(d__3)) < small || l2kill && (d__4 = a[p + p *
+ a_dim1], abs(d__4)) < temp1) {
+ goto L3402;
+ }
+ ++nr;
+/* L3401: */
+ }
+L3402:
+
+ ;
+ } else {
+/* The goal is high relative accuracy. However, if the matrix */
+/* has high scaled condition number the relative accuracy is in */
+/* general not feasible. Later on, a condition number estimator */
+/* will be deployed to estimate the scaled condition number. */
+/* Here we just remove the underflowed part of the triangular */
+/* factor. This prevents the situation in which the code is */
+/* working hard to get the accuracy not warranted by the data. */
+ temp1 = sqrt(sfmin);
+ i__1 = *n;
+ for (p = 2; p <= i__1; ++p) {
+ if ((d__1 = a[p + p * a_dim1], abs(d__1)) < small || l2kill && (
+ d__2 = a[p + p * a_dim1], abs(d__2)) < temp1) {
+ goto L3302;
+ }
+ ++nr;
+/* L3301: */
+ }
+L3302:
+
+ ;
+ }
+
+ almort = FALSE_;
+ if (nr == *n) {
+ maxprj = 1.;
+ i__1 = *n;
+ for (p = 2; p <= i__1; ++p) {
+ temp1 = (d__1 = a[p + p * a_dim1], abs(d__1)) / sva[iwork[p]];
+ maxprj = min(maxprj,temp1);
+/* L3051: */
+ }
+/* Computing 2nd power */
+ d__1 = maxprj;
+ if (d__1 * d__1 >= 1. - (doublereal) (*n) * epsln) {
+ almort = TRUE_;
+ }
+ }
+
+
+ sconda = -1.;
+ condr1 = -1.;
+ condr2 = -1.;
+
+ if (errest) {
+ if (*n == nr) {
+ if (rsvec) {
+/* .. V is available as workspace */
+ dlacpy_("U", n, n, &a[a_offset], lda, &v[v_offset], ldv);
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ temp1 = sva[iwork[p]];
+ d__1 = 1. / temp1;
+ dscal_(&p, &d__1, &v[p * v_dim1 + 1], &c__1);
+/* L3053: */
+ }
+ dpocon_("U", n, &v[v_offset], ldv, &c_b35, &temp1, &work[*n +
+ 1], &iwork[(*n << 1) + *m + 1], &ierr);
+ } else if (lsvec) {
+/* .. U is available as workspace */
+ dlacpy_("U", n, n, &a[a_offset], lda, &u[u_offset], ldu);
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ temp1 = sva[iwork[p]];
+ d__1 = 1. / temp1;
+ dscal_(&p, &d__1, &u[p * u_dim1 + 1], &c__1);
+/* L3054: */
+ }
+ dpocon_("U", n, &u[u_offset], ldu, &c_b35, &temp1, &work[*n +
+ 1], &iwork[(*n << 1) + *m + 1], &ierr);
+ } else {
+ dlacpy_("U", n, n, &a[a_offset], lda, &work[*n + 1], n);
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ temp1 = sva[iwork[p]];
+ d__1 = 1. / temp1;
+ dscal_(&p, &d__1, &work[*n + (p - 1) * *n + 1], &c__1);
+/* L3052: */
+ }
+/* .. the columns of R are scaled to have unit Euclidean lengths. */
+ dpocon_("U", n, &work[*n + 1], n, &c_b35, &temp1, &work[*n + *
+ n * *n + 1], &iwork[(*n << 1) + *m + 1], &ierr);
+ }
+ sconda = 1. / sqrt(temp1);
+/* SCONDA is an estimate of DSQRT(||(R^t * R)^(-1)||_1). */
+/* N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA */
+ } else {
+ sconda = -1.;
+ }
+ }
+
+ l2pert = l2pert && (d__1 = a[a_dim1 + 1] / a[nr + nr * a_dim1], abs(d__1))
+ > sqrt(big1);
+/* If there is no violent scaling, artificial perturbation is not needed. */
+
+/* Phase 3: */
+
+ if (! (rsvec || lsvec)) {
+
+/* Singular Values only */
+
+/* .. transpose A(1:NR,1:N) */
+/* Computing MIN */
+ i__2 = *n - 1;
+ i__1 = min(i__2,nr);
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = *n - p;
+ dcopy_(&i__2, &a[p + (p + 1) * a_dim1], lda, &a[p + 1 + p *
+ a_dim1], &c__1);
+/* L1946: */
+ }
+
+/* The following two DO-loops introduce small relative perturbation */
+/* into the strict upper triangle of the lower triangular matrix. */
+/* Small entries below the main diagonal are also changed. */
+/* This modification is useful if the computing environment does not */
+/* provide/allow FLUSH TO ZERO underflow, for it prevents many */
+/* annoying denormalized numbers in case of strongly scaled matrices. */
+/* The perturbation is structured so that it does not introduce any */
+/* new perturbation of the singular values, and it does not destroy */
+/* the job done by the preconditioner. */
+/* The licence for this perturbation is in the variable L2PERT, which */
+/* should be .FALSE. if FLUSH TO ZERO underflow is active. */
+
+ if (! almort) {
+
+ if (l2pert) {
+/* XSC = DSQRT(SMALL) */
+ xsc = epsln / (doublereal) (*n);
+ i__1 = nr;
+ for (q = 1; q <= i__1; ++q) {
+ temp1 = xsc * (d__1 = a[q + q * a_dim1], abs(d__1));
+ i__2 = *n;
+ for (p = 1; p <= i__2; ++p) {
+ if (p > q && (d__1 = a[p + q * a_dim1], abs(d__1)) <=
+ temp1 || p < q) {
+ a[p + q * a_dim1] = d_sign(&temp1, &a[p + q *
+ a_dim1]);
+ }
+/* L4949: */
+ }
+/* L4947: */
+ }
+ } else {
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ dlaset_("U", &i__1, &i__2, &c_b34, &c_b34, &a[(a_dim1 << 1) +
+ 1], lda);
+ }
+
+/* .. second preconditioning using the QR factorization */
+
+ i__1 = *lwork - *n;
+ dgeqrf_(n, &nr, &a[a_offset], lda, &work[1], &work[*n + 1], &i__1,
+ &ierr);
+
+/* .. and transpose upper to lower triangular */
+ i__1 = nr - 1;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = nr - p;
+ dcopy_(&i__2, &a[p + (p + 1) * a_dim1], lda, &a[p + 1 + p *
+ a_dim1], &c__1);
+/* L1948: */
+ }
+
+ }
+
+/* Row-cyclic Jacobi SVD algorithm with column pivoting */
+
+/* .. again some perturbation (a "background noise") is added */
+/* to drown denormals */
+ if (l2pert) {
+/* XSC = DSQRT(SMALL) */
+ xsc = epsln / (doublereal) (*n);
+ i__1 = nr;
+ for (q = 1; q <= i__1; ++q) {
+ temp1 = xsc * (d__1 = a[q + q * a_dim1], abs(d__1));
+ i__2 = nr;
+ for (p = 1; p <= i__2; ++p) {
+ if (p > q && (d__1 = a[p + q * a_dim1], abs(d__1)) <=
+ temp1 || p < q) {
+ a[p + q * a_dim1] = d_sign(&temp1, &a[p + q * a_dim1])
+ ;
+ }
+/* L1949: */
+ }
+/* L1947: */
+ }
+ } else {
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ dlaset_("U", &i__1, &i__2, &c_b34, &c_b34, &a[(a_dim1 << 1) + 1],
+ lda);
+ }
+
+/* .. and one-sided Jacobi rotations are started on a lower */
+/* triangular matrix (plus perturbation which is ignored in */
+/* the part which destroys triangular form (confusing?!)) */
+
+ dgesvj_("L", "NoU", "NoV", &nr, &nr, &a[a_offset], lda, &sva[1], n, &
+ v[v_offset], ldv, &work[1], lwork, info);
+
+ scalem = work[1];
+ numrank = i_dnnt(&work[2]);
+
+
+ } else if (rsvec && ! lsvec) {
+
+/* -> Singular Values and Right Singular Vectors <- */
+
+ if (almort) {
+
+/* .. in this case NR equals N */
+ i__1 = nr;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = *n - p + 1;
+ dcopy_(&i__2, &a[p + p * a_dim1], lda, &v[p + p * v_dim1], &
+ c__1);
+/* L1998: */
+ }
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ dlaset_("Upper", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 << 1) +
+ 1], ldv);
+
+ dgesvj_("L", "U", "N", n, &nr, &v[v_offset], ldv, &sva[1], &nr, &
+ a[a_offset], lda, &work[1], lwork, info);
+ scalem = work[1];
+ numrank = i_dnnt(&work[2]);
+ } else {
+
+/* .. two more QR factorizations ( one QRF is not enough, two require */
+/* accumulated product of Jacobi rotations, three are perfect ) */
+
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ dlaset_("Lower", &i__1, &i__2, &c_b34, &c_b34, &a[a_dim1 + 2],
+ lda);
+ i__1 = *lwork - *n;
+ dgelqf_(&nr, n, &a[a_offset], lda, &work[1], &work[*n + 1], &i__1,
+ &ierr);
+ dlacpy_("Lower", &nr, &nr, &a[a_offset], lda, &v[v_offset], ldv);
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ dlaset_("Upper", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 << 1) +
+ 1], ldv);
+ i__1 = *lwork - (*n << 1);
+ dgeqrf_(&nr, &nr, &v[v_offset], ldv, &work[*n + 1], &work[(*n <<
+ 1) + 1], &i__1, &ierr);
+ i__1 = nr;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = nr - p + 1;
+ dcopy_(&i__2, &v[p + p * v_dim1], ldv, &v[p + p * v_dim1], &
+ c__1);
+/* L8998: */
+ }
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ dlaset_("Upper", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 << 1) +
+ 1], ldv);
+
+ dgesvj_("Lower", "U", "N", &nr, &nr, &v[v_offset], ldv, &sva[1], &
+ nr, &u[u_offset], ldu, &work[*n + 1], lwork, info);
+ scalem = work[*n + 1];
+ numrank = i_dnnt(&work[*n + 2]);
+ if (nr < *n) {
+ i__1 = *n - nr;
+ dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &v[nr + 1 + v_dim1],
+ ldv);
+ i__1 = *n - nr;
+ dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &v[(nr + 1) * v_dim1
+ + 1], ldv);
+ i__1 = *n - nr;
+ i__2 = *n - nr;
+ dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &v[nr + 1 + (nr +
+ 1) * v_dim1], ldv);
+ }
+
+ i__1 = *lwork - *n;
+ dormlq_("Left", "Transpose", n, n, &nr, &a[a_offset], lda, &work[
+ 1], &v[v_offset], ldv, &work[*n + 1], &i__1, &ierr);
+
+ }
+
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ dcopy_(n, &v[p + v_dim1], ldv, &a[iwork[p] + a_dim1], lda);
+/* L8991: */
+ }
+ dlacpy_("All", n, n, &a[a_offset], lda, &v[v_offset], ldv);
+
+ if (transp) {
+ dlacpy_("All", n, n, &v[v_offset], ldv, &u[u_offset], ldu);
+ }
+
+ } else if (lsvec && ! rsvec) {
+
+/* -#- Singular Values and Left Singular Vectors -#- */
+
+/* .. second preconditioning step to avoid need to accumulate */
+/* Jacobi rotations in the Jacobi iterations. */
+ i__1 = nr;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = *n - p + 1;
+ dcopy_(&i__2, &a[p + p * a_dim1], lda, &u[p + p * u_dim1], &c__1);
+/* L1965: */
+ }
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ dlaset_("Upper", &i__1, &i__2, &c_b34, &c_b34, &u[(u_dim1 << 1) + 1],
+ ldu);
+
+ i__1 = *lwork - (*n << 1);
+ dgeqrf_(n, &nr, &u[u_offset], ldu, &work[*n + 1], &work[(*n << 1) + 1]
+, &i__1, &ierr);
+
+ i__1 = nr - 1;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = nr - p;
+ dcopy_(&i__2, &u[p + (p + 1) * u_dim1], ldu, &u[p + 1 + p *
+ u_dim1], &c__1);
+/* L1967: */
+ }
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ dlaset_("Upper", &i__1, &i__2, &c_b34, &c_b34, &u[(u_dim1 << 1) + 1],
+ ldu);
+
+ i__1 = *lwork - *n;
+ dgesvj_("Lower", "U", "N", &nr, &nr, &u[u_offset], ldu, &sva[1], &nr,
+ &a[a_offset], lda, &work[*n + 1], &i__1, info);
+ scalem = work[*n + 1];
+ numrank = i_dnnt(&work[*n + 2]);
+
+ if (nr < *m) {
+ i__1 = *m - nr;
+ dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &u[nr + 1 + u_dim1], ldu);
+ if (nr < n1) {
+ i__1 = n1 - nr;
+ dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &u[(nr + 1) * u_dim1
+ + 1], ldu);
+ i__1 = *m - nr;
+ i__2 = n1 - nr;
+ dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &u[nr + 1 + (nr +
+ 1) * u_dim1], ldu);
+ }
+ }
+
+ i__1 = *lwork - *n;
+ dormqr_("Left", "No Tr", m, &n1, n, &a[a_offset], lda, &work[1], &u[
+ u_offset], ldu, &work[*n + 1], &i__1, &ierr);
+
+ if (rowpiv) {
+ i__1 = *m - 1;
+ dlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[(*n << 1) +
+ 1], &c_n1);
+ }
+
+ i__1 = n1;
+ for (p = 1; p <= i__1; ++p) {
+ xsc = 1. / dnrm2_(m, &u[p * u_dim1 + 1], &c__1);
+ dscal_(m, &xsc, &u[p * u_dim1 + 1], &c__1);
+/* L1974: */
+ }
+
+ if (transp) {
+ dlacpy_("All", n, n, &u[u_offset], ldu, &v[v_offset], ldv);
+ }
+
+ } else {
+
+/* -#- Full SVD -#- */
+
+ if (! jracc) {
+
+ if (! almort) {
+
+/* Second Preconditioning Step (QRF [with pivoting]) */
+/* Note that the composition of TRANSPOSE, QRF and TRANSPOSE is */
+/* equivalent to an LQF CALL. Since in many libraries the QRF */
+/* seems to be better optimized than the LQF, we do explicit */
+/* transpose and use the QRF. This is subject to changes in an */
+/* optimized implementation of DGEJSV. */
+
+ i__1 = nr;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = *n - p + 1;
+ dcopy_(&i__2, &a[p + p * a_dim1], lda, &v[p + p * v_dim1],
+ &c__1);
+/* L1968: */
+ }
+
+/* .. the following two loops perturb small entries to avoid */
+/* denormals in the second QR factorization, where they are */
+/* as good as zeros. This is done to avoid painfully slow */
+/* computation with denormals. The relative size of the perturbation */
+/* is a parameter that can be changed by the implementer. */
+/* This perturbation device will be obsolete on machines with */
+/* properly implemented arithmetic. */
+/* To switch it off, set L2PERT=.FALSE. To remove it from the */
+/* code, remove the action under L2PERT=.TRUE., leave the ELSE part. */
+/* The following two loops should be blocked and fused with the */
+/* transposed copy above. */
+
+ if (l2pert) {
+ xsc = sqrt(small);
+ i__1 = nr;
+ for (q = 1; q <= i__1; ++q) {
+ temp1 = xsc * (d__1 = v[q + q * v_dim1], abs(d__1));
+ i__2 = *n;
+ for (p = 1; p <= i__2; ++p) {
+ if (p > q && (d__1 = v[p + q * v_dim1], abs(d__1))
+ <= temp1 || p < q) {
+ v[p + q * v_dim1] = d_sign(&temp1, &v[p + q *
+ v_dim1]);
+ }
+ if (p < q) {
+ v[p + q * v_dim1] = -v[p + q * v_dim1];
+ }
+/* L2968: */
+ }
+/* L2969: */
+ }
+ } else {
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ dlaset_("U", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 <<
+ 1) + 1], ldv);
+ }
+
+/* Estimate the row scaled condition number of R1 */
+/* (If R1 is rectangular, N > NR, then the condition number */
+/* of the leading NR x NR submatrix is estimated.) */
+
+ dlacpy_("L", &nr, &nr, &v[v_offset], ldv, &work[(*n << 1) + 1]
+, &nr);
+ i__1 = nr;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = nr - p + 1;
+ temp1 = dnrm2_(&i__2, &work[(*n << 1) + (p - 1) * nr + p],
+ &c__1);
+ i__2 = nr - p + 1;
+ d__1 = 1. / temp1;
+ dscal_(&i__2, &d__1, &work[(*n << 1) + (p - 1) * nr + p],
+ &c__1);
+/* L3950: */
+ }
+ dpocon_("Lower", &nr, &work[(*n << 1) + 1], &nr, &c_b35, &
+ temp1, &work[(*n << 1) + nr * nr + 1], &iwork[*m + (*
+ n << 1) + 1], &ierr);
+ condr1 = 1. / sqrt(temp1);
+/* .. here need a second oppinion on the condition number */
+/* .. then assume worst case scenario */
+/* R1 is OK for inverse <=> CONDR1 .LT. DBLE(N) */
+/* more conservative <=> CONDR1 .LT. DSQRT(DBLE(N)) */
+
+ cond_ok__ = sqrt((doublereal) nr);
+/* [TP] COND_OK is a tuning parameter. */
+ if (condr1 < cond_ok__) {
+/* .. the second QRF without pivoting. Note: in an optimized */
+/* implementation, this QRF should be implemented as the QRF */
+/* of a lower triangular matrix. */
+/* R1^t = Q2 * R2 */
+ i__1 = *lwork - (*n << 1);
+ dgeqrf_(n, &nr, &v[v_offset], ldv, &work[*n + 1], &work[(*
+ n << 1) + 1], &i__1, &ierr);
+
+ if (l2pert) {
+ xsc = sqrt(small) / epsln;
+ i__1 = nr;
+ for (p = 2; p <= i__1; ++p) {
+ i__2 = p - 1;
+ for (q = 1; q <= i__2; ++q) {
+/* Computing MIN */
+ d__3 = (d__1 = v[p + p * v_dim1], abs(d__1)),
+ d__4 = (d__2 = v[q + q * v_dim1], abs(
+ d__2));
+ temp1 = xsc * min(d__3,d__4);
+ if ((d__1 = v[q + p * v_dim1], abs(d__1)) <=
+ temp1) {
+ v[q + p * v_dim1] = d_sign(&temp1, &v[q +
+ p * v_dim1]);
+ }
+/* L3958: */
+ }
+/* L3959: */
+ }
+ }
+
+ if (nr != *n) {
+ dlacpy_("A", n, &nr, &v[v_offset], ldv, &work[(*n <<
+ 1) + 1], n);
+ }
+/* .. save ... */
+
+/* .. this transposed copy should be better than naive */
+ i__1 = nr - 1;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = nr - p;
+ dcopy_(&i__2, &v[p + (p + 1) * v_dim1], ldv, &v[p + 1
+ + p * v_dim1], &c__1);
+/* L1969: */
+ }
+
+ condr2 = condr1;
+
+ } else {
+
+/* .. ill-conditioned case: second QRF with pivoting */
+/* Note that windowed pivoting would be equaly good */
+/* numerically, and more run-time efficient. So, in */
+/* an optimal implementation, the next call to DGEQP3 */
+/* should be replaced with eg. CALL SGEQPX (ACM TOMS #782) */
+/* with properly (carefully) chosen parameters. */
+
+/* R1^t * P2 = Q2 * R2 */
+ i__1 = nr;
+ for (p = 1; p <= i__1; ++p) {
+ iwork[*n + p] = 0;
+/* L3003: */
+ }
+ i__1 = *lwork - (*n << 1);
+ dgeqp3_(n, &nr, &v[v_offset], ldv, &iwork[*n + 1], &work[*
+ n + 1], &work[(*n << 1) + 1], &i__1, &ierr);
+/* * CALL DGEQRF( N, NR, V, LDV, WORK(N+1), WORK(2*N+1), */
+/* * & LWORK-2*N, IERR ) */
+ if (l2pert) {
+ xsc = sqrt(small);
+ i__1 = nr;
+ for (p = 2; p <= i__1; ++p) {
+ i__2 = p - 1;
+ for (q = 1; q <= i__2; ++q) {
+/* Computing MIN */
+ d__3 = (d__1 = v[p + p * v_dim1], abs(d__1)),
+ d__4 = (d__2 = v[q + q * v_dim1], abs(
+ d__2));
+ temp1 = xsc * min(d__3,d__4);
+ if ((d__1 = v[q + p * v_dim1], abs(d__1)) <=
+ temp1) {
+ v[q + p * v_dim1] = d_sign(&temp1, &v[q +
+ p * v_dim1]);
+ }
+/* L3968: */
+ }
+/* L3969: */
+ }
+ }
+
+ dlacpy_("A", n, &nr, &v[v_offset], ldv, &work[(*n << 1) +
+ 1], n);
+
+ if (l2pert) {
+ xsc = sqrt(small);
+ i__1 = nr;
+ for (p = 2; p <= i__1; ++p) {
+ i__2 = p - 1;
+ for (q = 1; q <= i__2; ++q) {
+/* Computing MIN */
+ d__3 = (d__1 = v[p + p * v_dim1], abs(d__1)),
+ d__4 = (d__2 = v[q + q * v_dim1], abs(
+ d__2));
+ temp1 = xsc * min(d__3,d__4);
+ v[p + q * v_dim1] = -d_sign(&temp1, &v[q + p *
+ v_dim1]);
+/* L8971: */
+ }
+/* L8970: */
+ }
+ } else {
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ dlaset_("L", &i__1, &i__2, &c_b34, &c_b34, &v[v_dim1
+ + 2], ldv);
+ }
+/* Now, compute R2 = L3 * Q3, the LQ factorization. */
+ i__1 = *lwork - (*n << 1) - *n * nr - nr;
+ dgelqf_(&nr, &nr, &v[v_offset], ldv, &work[(*n << 1) + *n
+ * nr + 1], &work[(*n << 1) + *n * nr + nr + 1], &
+ i__1, &ierr);
+/* .. and estimate the condition number */
+ dlacpy_("L", &nr, &nr, &v[v_offset], ldv, &work[(*n << 1)
+ + *n * nr + nr + 1], &nr);
+ i__1 = nr;
+ for (p = 1; p <= i__1; ++p) {
+ temp1 = dnrm2_(&p, &work[(*n << 1) + *n * nr + nr + p]
+, &nr);
+ d__1 = 1. / temp1;
+ dscal_(&p, &d__1, &work[(*n << 1) + *n * nr + nr + p],
+ &nr);
+/* L4950: */
+ }
+ dpocon_("L", &nr, &work[(*n << 1) + *n * nr + nr + 1], &
+ nr, &c_b35, &temp1, &work[(*n << 1) + *n * nr +
+ nr + nr * nr + 1], &iwork[*m + (*n << 1) + 1], &
+ ierr);
+ condr2 = 1. / sqrt(temp1);
+
+ if (condr2 >= cond_ok__) {
+/* .. save the Householder vectors used for Q3 */
+/* (this overwrittes the copy of R2, as it will not be */
+/* needed in this branch, but it does not overwritte the */
+/* Huseholder vectors of Q2.). */
+ dlacpy_("U", &nr, &nr, &v[v_offset], ldv, &work[(*n <<
+ 1) + 1], n);
+/* .. and the rest of the information on Q3 is in */
+/* WORK(2*N+N*NR+1:2*N+N*NR+N) */
+ }
+
+ }
+
+ if (l2pert) {
+ xsc = sqrt(small);
+ i__1 = nr;
+ for (q = 2; q <= i__1; ++q) {
+ temp1 = xsc * v[q + q * v_dim1];
+ i__2 = q - 1;
+ for (p = 1; p <= i__2; ++p) {
+/* V(p,q) = - DSIGN( TEMP1, V(q,p) ) */
+ v[p + q * v_dim1] = -d_sign(&temp1, &v[p + q *
+ v_dim1]);
+/* L4969: */
+ }
+/* L4968: */
+ }
+ } else {
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ dlaset_("U", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 <<
+ 1) + 1], ldv);
+ }
+
+/* Second preconditioning finished; continue with Jacobi SVD */
+/* The input matrix is lower trinagular. */
+
+/* Recover the right singular vectors as solution of a well */
+/* conditioned triangular matrix equation. */
+
+ if (condr1 < cond_ok__) {
+
+ i__1 = *lwork - (*n << 1) - *n * nr - nr;
+ dgesvj_("L", "U", "N", &nr, &nr, &v[v_offset], ldv, &sva[
+ 1], &nr, &u[u_offset], ldu, &work[(*n << 1) + *n *
+ nr + nr + 1], &i__1, info);
+ scalem = work[(*n << 1) + *n * nr + nr + 1];
+ numrank = i_dnnt(&work[(*n << 1) + *n * nr + nr + 2]);
+ i__1 = nr;
+ for (p = 1; p <= i__1; ++p) {
+ dcopy_(&nr, &v[p * v_dim1 + 1], &c__1, &u[p * u_dim1
+ + 1], &c__1);
+ dscal_(&nr, &sva[p], &v[p * v_dim1 + 1], &c__1);
+/* L3970: */
+ }
+/* .. pick the right matrix equation and solve it */
+
+ if (nr == *n) {
+/* :)) .. best case, R1 is inverted. The solution of this matrix */
+/* equation is Q2*V2 = the product of the Jacobi rotations */
+/* used in DGESVJ, premultiplied with the orthogonal matrix */
+/* from the second QR factorization. */
+ dtrsm_("L", "U", "N", "N", &nr, &nr, &c_b35, &a[
+ a_offset], lda, &v[v_offset], ldv);
+ } else {
+/* .. R1 is well conditioned, but non-square. Transpose(R2) */
+/* is inverted to get the product of the Jacobi rotations */
+/* used in DGESVJ. The Q-factor from the second QR */
+/* factorization is then built in explicitly. */
+ dtrsm_("L", "U", "T", "N", &nr, &nr, &c_b35, &work[(*
+ n << 1) + 1], n, &v[v_offset], ldv);
+ if (nr < *n) {
+ i__1 = *n - nr;
+ dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &v[nr +
+ 1 + v_dim1], ldv);
+ i__1 = *n - nr;
+ dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &v[(nr +
+ 1) * v_dim1 + 1], ldv);
+ i__1 = *n - nr;
+ i__2 = *n - nr;
+ dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &v[nr
+ + 1 + (nr + 1) * v_dim1], ldv);
+ }
+ i__1 = *lwork - (*n << 1) - *n * nr - nr;
+ dormqr_("L", "N", n, n, &nr, &work[(*n << 1) + 1], n,
+ &work[*n + 1], &v[v_offset], ldv, &work[(*n <<
+ 1) + *n * nr + nr + 1], &i__1, &ierr);
+ }
+
+ } else if (condr2 < cond_ok__) {
+
+/* :) .. the input matrix A is very likely a relative of */
+/* the Kahan matrix :) */
+/* The matrix R2 is inverted. The solution of the matrix equation */
+/* is Q3^T*V3 = the product of the Jacobi rotations (appplied to */
+/* the lower triangular L3 from the LQ factorization of */
+/* R2=L3*Q3), pre-multiplied with the transposed Q3. */
+ i__1 = *lwork - (*n << 1) - *n * nr - nr;
+ dgesvj_("L", "U", "N", &nr, &nr, &v[v_offset], ldv, &sva[
+ 1], &nr, &u[u_offset], ldu, &work[(*n << 1) + *n *
+ nr + nr + 1], &i__1, info);
+ scalem = work[(*n << 1) + *n * nr + nr + 1];
+ numrank = i_dnnt(&work[(*n << 1) + *n * nr + nr + 2]);
+ i__1 = nr;
+ for (p = 1; p <= i__1; ++p) {
+ dcopy_(&nr, &v[p * v_dim1 + 1], &c__1, &u[p * u_dim1
+ + 1], &c__1);
+ dscal_(&nr, &sva[p], &u[p * u_dim1 + 1], &c__1);
+/* L3870: */
+ }
+ dtrsm_("L", "U", "N", "N", &nr, &nr, &c_b35, &work[(*n <<
+ 1) + 1], n, &u[u_offset], ldu);
+/* .. apply the permutation from the second QR factorization */
+ i__1 = nr;
+ for (q = 1; q <= i__1; ++q) {
+ i__2 = nr;
+ for (p = 1; p <= i__2; ++p) {
+ work[(*n << 1) + *n * nr + nr + iwork[*n + p]] =
+ u[p + q * u_dim1];
+/* L872: */
+ }
+ i__2 = nr;
+ for (p = 1; p <= i__2; ++p) {
+ u[p + q * u_dim1] = work[(*n << 1) + *n * nr + nr
+ + p];
+/* L874: */
+ }
+/* L873: */
+ }
+ if (nr < *n) {
+ i__1 = *n - nr;
+ dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &v[nr + 1 +
+ v_dim1], ldv);
+ i__1 = *n - nr;
+ dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &v[(nr + 1) *
+ v_dim1 + 1], ldv);
+ i__1 = *n - nr;
+ i__2 = *n - nr;
+ dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &v[nr + 1
+ + (nr + 1) * v_dim1], ldv);
+ }
+ i__1 = *lwork - (*n << 1) - *n * nr - nr;
+ dormqr_("L", "N", n, n, &nr, &work[(*n << 1) + 1], n, &
+ work[*n + 1], &v[v_offset], ldv, &work[(*n << 1)
+ + *n * nr + nr + 1], &i__1, &ierr);
+ } else {
+/* Last line of defense. */
+/* #:( This is a rather pathological case: no scaled condition */
+/* improvement after two pivoted QR factorizations. Other */
+/* possibility is that the rank revealing QR factorization */
+/* or the condition estimator has failed, or the COND_OK */
+/* is set very close to ONE (which is unnecessary). Normally, */
+/* this branch should never be executed, but in rare cases of */
+/* failure of the RRQR or condition estimator, the last line of */
+/* defense ensures that DGEJSV completes the task. */
+/* Compute the full SVD of L3 using DGESVJ with explicit */
+/* accumulation of Jacobi rotations. */
+ i__1 = *lwork - (*n << 1) - *n * nr - nr;
+ dgesvj_("L", "U", "V", &nr, &nr, &v[v_offset], ldv, &sva[
+ 1], &nr, &u[u_offset], ldu, &work[(*n << 1) + *n *
+ nr + nr + 1], &i__1, info);
+ scalem = work[(*n << 1) + *n * nr + nr + 1];
+ numrank = i_dnnt(&work[(*n << 1) + *n * nr + nr + 2]);
+ if (nr < *n) {
+ i__1 = *n - nr;
+ dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &v[nr + 1 +
+ v_dim1], ldv);
+ i__1 = *n - nr;
+ dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &v[(nr + 1) *
+ v_dim1 + 1], ldv);
+ i__1 = *n - nr;
+ i__2 = *n - nr;
+ dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &v[nr + 1
+ + (nr + 1) * v_dim1], ldv);
+ }
+ i__1 = *lwork - (*n << 1) - *n * nr - nr;
+ dormqr_("L", "N", n, n, &nr, &work[(*n << 1) + 1], n, &
+ work[*n + 1], &v[v_offset], ldv, &work[(*n << 1)
+ + *n * nr + nr + 1], &i__1, &ierr);
+
+ i__1 = *lwork - (*n << 1) - *n * nr - nr;
+ dormlq_("L", "T", &nr, &nr, &nr, &work[(*n << 1) + 1], n,
+ &work[(*n << 1) + *n * nr + 1], &u[u_offset], ldu,
+ &work[(*n << 1) + *n * nr + nr + 1], &i__1, &
+ ierr);
+ i__1 = nr;
+ for (q = 1; q <= i__1; ++q) {
+ i__2 = nr;
+ for (p = 1; p <= i__2; ++p) {
+ work[(*n << 1) + *n * nr + nr + iwork[*n + p]] =
+ u[p + q * u_dim1];
+/* L772: */
+ }
+ i__2 = nr;
+ for (p = 1; p <= i__2; ++p) {
+ u[p + q * u_dim1] = work[(*n << 1) + *n * nr + nr
+ + p];
+/* L774: */
+ }
+/* L773: */
+ }
+
+ }
+
+/* Permute the rows of V using the (column) permutation from the */
+/* first QRF. Also, scale the columns to make them unit in */
+/* Euclidean norm. This applies to all cases. */
+
+ temp1 = sqrt((doublereal) (*n)) * epsln;
+ i__1 = *n;
+ for (q = 1; q <= i__1; ++q) {
+ i__2 = *n;
+ for (p = 1; p <= i__2; ++p) {
+ work[(*n << 1) + *n * nr + nr + iwork[p]] = v[p + q *
+ v_dim1];
+/* L972: */
+ }
+ i__2 = *n;
+ for (p = 1; p <= i__2; ++p) {
+ v[p + q * v_dim1] = work[(*n << 1) + *n * nr + nr + p]
+ ;
+/* L973: */
+ }
+ xsc = 1. / dnrm2_(n, &v[q * v_dim1 + 1], &c__1);
+ if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
+ dscal_(n, &xsc, &v[q * v_dim1 + 1], &c__1);
+ }
+/* L1972: */
+ }
+/* At this moment, V contains the right singular vectors of A. */
+/* Next, assemble the left singular vector matrix U (M x N). */
+ if (nr < *m) {
+ i__1 = *m - nr;
+ dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &u[nr + 1 +
+ u_dim1], ldu);
+ if (nr < n1) {
+ i__1 = n1 - nr;
+ dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &u[(nr + 1) *
+ u_dim1 + 1], ldu);
+ i__1 = *m - nr;
+ i__2 = n1 - nr;
+ dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &u[nr + 1
+ + (nr + 1) * u_dim1], ldu);
+ }
+ }
+
+/* The Q matrix from the first QRF is built into the left singular */
+/* matrix U. This applies to all cases. */
+
+ i__1 = *lwork - *n;
+ dormqr_("Left", "No_Tr", m, &n1, n, &a[a_offset], lda, &work[
+ 1], &u[u_offset], ldu, &work[*n + 1], &i__1, &ierr);
+/* The columns of U are normalized. The cost is O(M*N) flops. */
+ temp1 = sqrt((doublereal) (*m)) * epsln;
+ i__1 = nr;
+ for (p = 1; p <= i__1; ++p) {
+ xsc = 1. / dnrm2_(m, &u[p * u_dim1 + 1], &c__1);
+ if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
+ dscal_(m, &xsc, &u[p * u_dim1 + 1], &c__1);
+ }
+/* L1973: */
+ }
+
+/* If the initial QRF is computed with row pivoting, the left */
+/* singular vectors must be adjusted. */
+
+ if (rowpiv) {
+ i__1 = *m - 1;
+ dlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[(*n
+ << 1) + 1], &c_n1);
+ }
+
+ } else {
+
+/* .. the initial matrix A has almost orthogonal columns and */
+/* the second QRF is not needed */
+
+ dlacpy_("Upper", n, n, &a[a_offset], lda, &work[*n + 1], n);
+ if (l2pert) {
+ xsc = sqrt(small);
+ i__1 = *n;
+ for (p = 2; p <= i__1; ++p) {
+ temp1 = xsc * work[*n + (p - 1) * *n + p];
+ i__2 = p - 1;
+ for (q = 1; q <= i__2; ++q) {
+ work[*n + (q - 1) * *n + p] = -d_sign(&temp1, &
+ work[*n + (p - 1) * *n + q]);
+/* L5971: */
+ }
+/* L5970: */
+ }
+ } else {
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ dlaset_("Lower", &i__1, &i__2, &c_b34, &c_b34, &work[*n +
+ 2], n);
+ }
+
+ i__1 = *lwork - *n - *n * *n;
+ dgesvj_("Upper", "U", "N", n, n, &work[*n + 1], n, &sva[1], n,
+ &u[u_offset], ldu, &work[*n + *n * *n + 1], &i__1,
+ info);
+
+ scalem = work[*n + *n * *n + 1];
+ numrank = i_dnnt(&work[*n + *n * *n + 2]);
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ dcopy_(n, &work[*n + (p - 1) * *n + 1], &c__1, &u[p *
+ u_dim1 + 1], &c__1);
+ dscal_(n, &sva[p], &work[*n + (p - 1) * *n + 1], &c__1);
+/* L6970: */
+ }
+
+ dtrsm_("Left", "Upper", "NoTrans", "No UD", n, n, &c_b35, &a[
+ a_offset], lda, &work[*n + 1], n);
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ dcopy_(n, &work[*n + p], n, &v[iwork[p] + v_dim1], ldv);
+/* L6972: */
+ }
+ temp1 = sqrt((doublereal) (*n)) * epsln;
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ xsc = 1. / dnrm2_(n, &v[p * v_dim1 + 1], &c__1);
+ if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
+ dscal_(n, &xsc, &v[p * v_dim1 + 1], &c__1);
+ }
+/* L6971: */
+ }
+
+/* Assemble the left singular vector matrix U (M x N). */
+
+ if (*n < *m) {
+ i__1 = *m - *n;
+ dlaset_("A", &i__1, n, &c_b34, &c_b34, &u[nr + 1 + u_dim1]
+, ldu);
+ if (*n < n1) {
+ i__1 = n1 - *n;
+ dlaset_("A", n, &i__1, &c_b34, &c_b34, &u[(*n + 1) *
+ u_dim1 + 1], ldu);
+ i__1 = *m - *n;
+ i__2 = n1 - *n;
+ dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &u[nr + 1
+ + (*n + 1) * u_dim1], ldu);
+ }
+ }
+ i__1 = *lwork - *n;
+ dormqr_("Left", "No Tr", m, &n1, n, &a[a_offset], lda, &work[
+ 1], &u[u_offset], ldu, &work[*n + 1], &i__1, &ierr);
+ temp1 = sqrt((doublereal) (*m)) * epsln;
+ i__1 = n1;
+ for (p = 1; p <= i__1; ++p) {
+ xsc = 1. / dnrm2_(m, &u[p * u_dim1 + 1], &c__1);
+ if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
+ dscal_(m, &xsc, &u[p * u_dim1 + 1], &c__1);
+ }
+/* L6973: */
+ }
+
+ if (rowpiv) {
+ i__1 = *m - 1;
+ dlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[(*n
+ << 1) + 1], &c_n1);
+ }
+
+ }
+
+/* end of the >> almost orthogonal case << in the full SVD */
+
+ } else {
+
+/* This branch deploys a preconditioned Jacobi SVD with explicitly */
+/* accumulated rotations. It is included as optional, mainly for */
+/* experimental purposes. It does perfom well, and can also be used. */
+/* In this implementation, this branch will be automatically activated */
+/* if the condition number sigma_max(A) / sigma_min(A) is predicted */
+/* to be greater than the overflow threshold. This is because the */
+/* a posteriori computation of the singular vectors assumes robust */
+/* implementation of BLAS and some LAPACK procedures, capable of working */
+/* in presence of extreme values. Since that is not always the case, ... */
+
+ i__1 = nr;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = *n - p + 1;
+ dcopy_(&i__2, &a[p + p * a_dim1], lda, &v[p + p * v_dim1], &
+ c__1);
+/* L7968: */
+ }
+
+ if (l2pert) {
+ xsc = sqrt(small / epsln);
+ i__1 = nr;
+ for (q = 1; q <= i__1; ++q) {
+ temp1 = xsc * (d__1 = v[q + q * v_dim1], abs(d__1));
+ i__2 = *n;
+ for (p = 1; p <= i__2; ++p) {
+ if (p > q && (d__1 = v[p + q * v_dim1], abs(d__1)) <=
+ temp1 || p < q) {
+ v[p + q * v_dim1] = d_sign(&temp1, &v[p + q *
+ v_dim1]);
+ }
+ if (p < q) {
+ v[p + q * v_dim1] = -v[p + q * v_dim1];
+ }
+/* L5968: */
+ }
+/* L5969: */
+ }
+ } else {
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ dlaset_("U", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 << 1) +
+ 1], ldv);
+ }
+ i__1 = *lwork - (*n << 1);
+ dgeqrf_(n, &nr, &v[v_offset], ldv, &work[*n + 1], &work[(*n << 1)
+ + 1], &i__1, &ierr);
+ dlacpy_("L", n, &nr, &v[v_offset], ldv, &work[(*n << 1) + 1], n);
+
+ i__1 = nr;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = nr - p + 1;
+ dcopy_(&i__2, &v[p + p * v_dim1], ldv, &u[p + p * u_dim1], &
+ c__1);
+/* L7969: */
+ }
+ if (l2pert) {
+ xsc = sqrt(small / epsln);
+ i__1 = nr;
+ for (q = 2; q <= i__1; ++q) {
+ i__2 = q - 1;
+ for (p = 1; p <= i__2; ++p) {
+/* Computing MIN */
+ d__3 = (d__1 = u[p + p * u_dim1], abs(d__1)), d__4 = (
+ d__2 = u[q + q * u_dim1], abs(d__2));
+ temp1 = xsc * min(d__3,d__4);
+ u[p + q * u_dim1] = -d_sign(&temp1, &u[q + p * u_dim1]
+ );
+/* L9971: */
+ }
+/* L9970: */
+ }
+ } else {
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ dlaset_("U", &i__1, &i__2, &c_b34, &c_b34, &u[(u_dim1 << 1) +
+ 1], ldu);
+ }
+ i__1 = *lwork - (*n << 1) - *n * nr;
+ dgesvj_("G", "U", "V", &nr, &nr, &u[u_offset], ldu, &sva[1], n, &
+ v[v_offset], ldv, &work[(*n << 1) + *n * nr + 1], &i__1,
+ info);
+ scalem = work[(*n << 1) + *n * nr + 1];
+ numrank = i_dnnt(&work[(*n << 1) + *n * nr + 2]);
+ if (nr < *n) {
+ i__1 = *n - nr;
+ dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &v[nr + 1 + v_dim1],
+ ldv);
+ i__1 = *n - nr;
+ dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &v[(nr + 1) * v_dim1
+ + 1], ldv);
+ i__1 = *n - nr;
+ i__2 = *n - nr;
+ dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &v[nr + 1 + (nr +
+ 1) * v_dim1], ldv);
+ }
+ i__1 = *lwork - (*n << 1) - *n * nr - nr;
+ dormqr_("L", "N", n, n, &nr, &work[(*n << 1) + 1], n, &work[*n +
+ 1], &v[v_offset], ldv, &work[(*n << 1) + *n * nr + nr + 1]
+, &i__1, &ierr);
+
+/* Permute the rows of V using the (column) permutation from the */
+/* first QRF. Also, scale the columns to make them unit in */
+/* Euclidean norm. This applies to all cases. */
+
+ temp1 = sqrt((doublereal) (*n)) * epsln;
+ i__1 = *n;
+ for (q = 1; q <= i__1; ++q) {
+ i__2 = *n;
+ for (p = 1; p <= i__2; ++p) {
+ work[(*n << 1) + *n * nr + nr + iwork[p]] = v[p + q *
+ v_dim1];
+/* L8972: */
+ }
+ i__2 = *n;
+ for (p = 1; p <= i__2; ++p) {
+ v[p + q * v_dim1] = work[(*n << 1) + *n * nr + nr + p];
+/* L8973: */
+ }
+ xsc = 1. / dnrm2_(n, &v[q * v_dim1 + 1], &c__1);
+ if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
+ dscal_(n, &xsc, &v[q * v_dim1 + 1], &c__1);
+ }
+/* L7972: */
+ }
+
+/* At this moment, V contains the right singular vectors of A. */
+/* Next, assemble the left singular vector matrix U (M x N). */
+
+ if (*n < *m) {
+ i__1 = *m - *n;
+ dlaset_("A", &i__1, n, &c_b34, &c_b34, &u[nr + 1 + u_dim1],
+ ldu);
+ if (*n < n1) {
+ i__1 = n1 - *n;
+ dlaset_("A", n, &i__1, &c_b34, &c_b34, &u[(*n + 1) *
+ u_dim1 + 1], ldu);
+ i__1 = *m - *n;
+ i__2 = n1 - *n;
+ dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &u[nr + 1 + (*
+ n + 1) * u_dim1], ldu);
+ }
+ }
+
+ i__1 = *lwork - *n;
+ dormqr_("Left", "No Tr", m, &n1, n, &a[a_offset], lda, &work[1], &
+ u[u_offset], ldu, &work[*n + 1], &i__1, &ierr);
+
+ if (rowpiv) {
+ i__1 = *m - 1;
+ dlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[(*n << 1)
+ + 1], &c_n1);
+ }
+
+
+ }
+ if (transp) {
+/* .. swap U and V because the procedure worked on A^t */
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ dswap_(n, &u[p * u_dim1 + 1], &c__1, &v[p * v_dim1 + 1], &
+ c__1);
+/* L6974: */
+ }
+ }
+
+ }
+/* end of the full SVD */
+
+/* Undo scaling, if necessary (and possible) */
+
+ if (uscal2 <= big / sva[1] * uscal1) {
+ dlascl_("G", &c__0, &c__0, &uscal1, &uscal2, &nr, &c__1, &sva[1], n, &
+ ierr);
+ uscal1 = 1.;
+ uscal2 = 1.;
+ }
+
+ if (nr < *n) {
+ i__1 = *n;
+ for (p = nr + 1; p <= i__1; ++p) {
+ sva[p] = 0.;
+/* L3004: */
+ }
+ }
+
+ work[1] = uscal2 * scalem;
+ work[2] = uscal1;
+ if (errest) {
+ work[3] = sconda;
+ }
+ if (lsvec && rsvec) {
+ work[4] = condr1;
+ work[5] = condr2;
+ }
+ if (l2tran) {
+ work[6] = entra;
+ work[7] = entrat;
+ }
+
+ iwork[1] = nr;
+ iwork[2] = numrank;
+ iwork[3] = warning;
+
+ return 0;
+/* .. */
+/* .. END OF DGEJSV */
+/* .. */
+} /* dgejsv_ */
diff --git a/contrib/libs/clapack/dgelq2.c b/contrib/libs/clapack/dgelq2.c
new file mode 100644
index 0000000000..c77e5a857b
--- /dev/null
+++ b/contrib/libs/clapack/dgelq2.c
@@ -0,0 +1,157 @@
+/* dgelq2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dgelq2_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *tau, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, k;
+ doublereal aii;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *), dlarfp_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGELQ2 computes an LQ factorization of a real m by n matrix A: */
+/* A = L * Q. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the m by n matrix A. */
+/* On exit, the elements on and below the diagonal of the array */
+/* contain the m by min(m,n) lower trapezoidal matrix L (L is */
+/* lower triangular if m <= n); the elements above the diagonal, */
+/* with the array TAU, represent the orthogonal matrix Q as a */
+/* product of elementary reflectors (see Further Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (M) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(k) . . . H(2) H(1), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i-1) = 0 and v(i) = 1; v(i+1:n) is stored on exit in A(i,i+1:n), */
+/* and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGELQ2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ i__1 = k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i,i+1:n) */
+
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ dlarfp_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3, *n)* a_dim1]
+, lda, &tau[i__]);
+ if (i__ < *m) {
+
+/* Apply H(i) to A(i+1:m,i:n) from the right */
+
+ aii = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.;
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+ dlarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[
+ i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
+ a[i__ + i__ * a_dim1] = aii;
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of DGELQ2 */
+
+} /* dgelq2_ */
diff --git a/contrib/libs/clapack/dgelqf.c b/contrib/libs/clapack/dgelqf.c
new file mode 100644
index 0000000000..08bc8187d3
--- /dev/null
+++ b/contrib/libs/clapack/dgelqf.c
@@ -0,0 +1,251 @@
+/* dgelqf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int dgelqf_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *tau, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, k, ib, nb, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int dgelq2_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *), dlarfb_(char *,
+ char *, char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGELQF computes an LQ factorization of a real M-by-N matrix A: */
+/* A = L * Q. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the elements on and below the diagonal of the array */
+/* contain the m-by-min(m,n) lower trapezoidal matrix L (L is */
+/* lower triangular if m <= n); the elements above the diagonal, */
+/* with the array TAU, represent the orthogonal matrix Q as a */
+/* product of elementary reflectors (see Further Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,M). */
+/* For optimum performance LWORK >= M*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(k) . . . H(2) H(1), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i-1) = 0 and v(i) = 1; v(i+1:n) is stored on exit in A(i,i+1:n), */
+/* and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "DGELQF", " ", m, n, &c_n1, &c_n1);
+ lwkopt = *m * nb;
+ work[1] = (doublereal) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else if (*lwork < max(1,*m) && ! lquery) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGELQF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ k = min(*m,*n);
+ if (k == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *m;
+ if (nb > 1 && nb < k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "DGELQF", " ", m, n, &c_n1, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DGELQF", " ", m, n, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < k && nx < k) {
+
+/* Use blocked code initially */
+
+ i__1 = k - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/* Compute the LQ factorization of the current block */
+/* A(i:i+ib-1,i:n) */
+
+ i__3 = *n - i__ + 1;
+ dgelq2_(&ib, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[
+ 1], &iinfo);
+ if (i__ + ib <= *m) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i) H(i+1) . . . H(i+ib-1) */
+
+ i__3 = *n - i__ + 1;
+ dlarft_("Forward", "Rowwise", &i__3, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(i+ib:m,i:n) from the right */
+
+ i__3 = *m - i__ - ib + 1;
+ i__4 = *n - i__ + 1;
+ dlarfb_("Right", "No transpose", "Forward", "Rowwise", &i__3,
+ &i__4, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], &
+ ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[ib +
+ 1], &ldwork);
+ }
+/* L10: */
+ }
+ } else {
+ i__ = 1;
+ }
+
+/* Use unblocked code to factor the last or only block. */
+
+ if (i__ <= k) {
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ dgelq2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1]
+, &iinfo);
+ }
+
+ work[1] = (doublereal) iws;
+ return 0;
+
+/* End of DGELQF */
+
+} /* dgelqf_ */
diff --git a/contrib/libs/clapack/dgels.c b/contrib/libs/clapack/dgels.c
new file mode 100644
index 0000000000..55b28c7398
--- /dev/null
+++ b/contrib/libs/clapack/dgels.c
@@ -0,0 +1,515 @@
+/* dgels.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static doublereal c_b33 = 0.;
+static integer c__0 = 0;
+
+/* Subroutine */ int dgels_(char *trans, integer *m, integer *n, integer *
+ nrhs, doublereal *a, integer *lda, doublereal *b, integer *ldb,
+ doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, nb, mn;
+ doublereal anrm, bnrm;
+ integer brow;
+ logical tpsd;
+ integer iascl, ibscl;
+ extern logical lsame_(char *, char *);
+ integer wsize;
+ doublereal rwork[1];
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *);
+ extern doublereal dlamch_(char *), dlange_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *);
+ extern /* Subroutine */ int dgelqf_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *),
+ dlascl_(char *, integer *, integer *, doublereal *, doublereal *,
+ integer *, integer *, doublereal *, integer *, integer *),
+ dgeqrf_(integer *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *, integer *), dlaset_(char *,
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer scllen;
+ doublereal bignum;
+ extern /* Subroutine */ int dormlq_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *),
+ dormqr_(char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, integer *);
+ doublereal smlnum;
+ logical lquery;
+ extern /* Subroutine */ int dtrtrs_(char *, char *, char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGELS solves overdetermined or underdetermined real linear systems */
+/* involving an M-by-N matrix A, or its transpose, using a QR or LQ */
+/* factorization of A. It is assumed that A has full rank. */
+
+/* The following options are provided: */
+
+/* 1. If TRANS = 'N' and m >= n: find the least squares solution of */
+/* an overdetermined system, i.e., solve the least squares problem */
+/* minimize || B - A*X ||. */
+
+/* 2. If TRANS = 'N' and m < n: find the minimum norm solution of */
+/* an underdetermined system A * X = B. */
+
+/* 3. If TRANS = 'T' and m >= n: find the minimum norm solution of */
+/* an undetermined system A**T * X = B. */
+
+/* 4. If TRANS = 'T' and m < n: find the least squares solution of */
+/* an overdetermined system, i.e., solve the least squares problem */
+/* minimize || B - A**T * X ||. */
+
+/* Several right hand side vectors b and solution vectors x can be */
+/* handled in a single call; they are stored as the columns of the */
+/* M-by-NRHS right hand side matrix B and the N-by-NRHS solution */
+/* matrix X. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': the linear system involves A; */
+/* = 'T': the linear system involves A**T. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of */
+/* columns of the matrices B and X. NRHS >=0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, */
+/* if M >= N, A is overwritten by details of its QR */
+/* factorization as returned by DGEQRF; */
+/* if M < N, A is overwritten by details of its LQ */
+/* factorization as returned by DGELQF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the matrix B of right hand side vectors, stored */
+/* columnwise; B is M-by-NRHS if TRANS = 'N', or N-by-NRHS */
+/* if TRANS = 'T'. */
+/* On exit, if INFO = 0, B is overwritten by the solution */
+/* vectors, stored columnwise: */
+/* if TRANS = 'N' and m >= n, rows 1 to n of B contain the least */
+/* squares solution vectors; the residual sum of squares for the */
+/* solution in each column is given by the sum of squares of */
+/* elements N+1 to M in that column; */
+/* if TRANS = 'N' and m < n, rows 1 to N of B contain the */
+/* minimum norm solution vectors; */
+/* if TRANS = 'T' and m >= n, rows 1 to M of B contain the */
+/* minimum norm solution vectors; */
+/* if TRANS = 'T' and m < n, rows 1 to M of B contain the */
+/* least squares solution vectors; the residual sum of squares */
+/* for the solution in each column is given by the sum of */
+/* squares of elements M+1 to N in that column. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= MAX(1,M,N). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* LWORK >= max( 1, MN + max( MN, NRHS ) ). */
+/* For optimal performance, */
+/* LWORK >= max( 1, MN + max( MN, NRHS )*NB ). */
+/* where MN = min(M,N) and NB is the optimum block size. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element of the */
+/* triangular factor of A is zero, so that A does not have */
+/* full rank; the least squares solution could not be */
+/* computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ mn = min(*m,*n);
+ lquery = *lwork == -1;
+ if (! (lsame_(trans, "N") || lsame_(trans, "T"))) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*m)) {
+ *info = -6;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*m);
+ if (*ldb < max(i__1,*n)) {
+ *info = -8;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = 1, i__2 = mn + max(mn,*nrhs);
+ if (*lwork < max(i__1,i__2) && ! lquery) {
+ *info = -10;
+ }
+ }
+ }
+
+/* Figure out optimal block size */
+
+ if (*info == 0 || *info == -10) {
+
+ tpsd = TRUE_;
+ if (lsame_(trans, "N")) {
+ tpsd = FALSE_;
+ }
+
+ if (*m >= *n) {
+ nb = ilaenv_(&c__1, "DGEQRF", " ", m, n, &c_n1, &c_n1);
+ if (tpsd) {
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__1, "DORMQR", "LN", m, nrhs, n, &
+ c_n1);
+ nb = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__1, "DORMQR", "LT", m, nrhs, n, &
+ c_n1);
+ nb = max(i__1,i__2);
+ }
+ } else {
+ nb = ilaenv_(&c__1, "DGELQF", " ", m, n, &c_n1, &c_n1);
+ if (tpsd) {
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__1, "DORMLQ", "LT", n, nrhs, m, &
+ c_n1);
+ nb = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__1, "DORMLQ", "LN", n, nrhs, m, &
+ c_n1);
+ nb = max(i__1,i__2);
+ }
+ }
+
+/* Computing MAX */
+ i__1 = 1, i__2 = mn + max(mn,*nrhs) * nb;
+ wsize = max(i__1,i__2);
+ work[1] = (doublereal) wsize;
+
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGELS ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+/* Computing MIN */
+ i__1 = min(*m,*n);
+ if (min(i__1,*nrhs) == 0) {
+ i__1 = max(*m,*n);
+ dlaset_("Full", &i__1, nrhs, &c_b33, &c_b33, &b[b_offset], ldb);
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ smlnum = dlamch_("S") / dlamch_("P");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+
+/* Scale A, B if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = dlange_("M", m, n, &a[a_offset], lda, rwork);
+ iascl = 0;
+ if (anrm > 0. && anrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ dlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ dlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ dlaset_("F", &i__1, nrhs, &c_b33, &c_b33, &b[b_offset], ldb);
+ goto L50;
+ }
+
+ brow = *m;
+ if (tpsd) {
+ brow = *n;
+ }
+ bnrm = dlange_("M", &brow, nrhs, &b[b_offset], ldb, rwork);
+ ibscl = 0;
+ if (bnrm > 0. && bnrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ dlascl_("G", &c__0, &c__0, &bnrm, &smlnum, &brow, nrhs, &b[b_offset],
+ ldb, info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ dlascl_("G", &c__0, &c__0, &bnrm, &bignum, &brow, nrhs, &b[b_offset],
+ ldb, info);
+ ibscl = 2;
+ }
+
+ if (*m >= *n) {
+
+/* compute QR factorization of A */
+
+ i__1 = *lwork - mn;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[1], &work[mn + 1], &i__1, info)
+ ;
+
+/* workspace at least N, optimally N*NB */
+
+ if (! tpsd) {
+
+/* Least-Squares Problem min || A * X - B || */
+
+/* B(1:M,1:NRHS) := Q' * B(1:M,1:NRHS) */
+
+ i__1 = *lwork - mn;
+ dormqr_("Left", "Transpose", m, nrhs, n, &a[a_offset], lda, &work[
+ 1], &b[b_offset], ldb, &work[mn + 1], &i__1, info);
+
+/* workspace at least NRHS, optimally NRHS*NB */
+
+/* B(1:N,1:NRHS) := inv(R) * B(1:N,1:NRHS) */
+
+ dtrtrs_("Upper", "No transpose", "Non-unit", n, nrhs, &a[a_offset]
+, lda, &b[b_offset], ldb, info);
+
+ if (*info > 0) {
+ return 0;
+ }
+
+ scllen = *n;
+
+ } else {
+
+/* Overdetermined system of equations A' * X = B */
+
+/* B(1:N,1:NRHS) := inv(R') * B(1:N,1:NRHS) */
+
+ dtrtrs_("Upper", "Transpose", "Non-unit", n, nrhs, &a[a_offset],
+ lda, &b[b_offset], ldb, info);
+
+ if (*info > 0) {
+ return 0;
+ }
+
+/* B(N+1:M,1:NRHS) = ZERO */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = *n + 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* B(1:M,1:NRHS) := Q(1:N,:) * B(1:N,1:NRHS) */
+
+ i__1 = *lwork - mn;
+ dormqr_("Left", "No transpose", m, nrhs, n, &a[a_offset], lda, &
+ work[1], &b[b_offset], ldb, &work[mn + 1], &i__1, info);
+
+/* workspace at least NRHS, optimally NRHS*NB */
+
+ scllen = *m;
+
+ }
+
+ } else {
+
+/* Compute LQ factorization of A */
+
+ i__1 = *lwork - mn;
+ dgelqf_(m, n, &a[a_offset], lda, &work[1], &work[mn + 1], &i__1, info)
+ ;
+
+/* workspace at least M, optimally M*NB. */
+
+ if (! tpsd) {
+
+/* underdetermined system of equations A * X = B */
+
+/* B(1:M,1:NRHS) := inv(L) * B(1:M,1:NRHS) */
+
+ dtrtrs_("Lower", "No transpose", "Non-unit", m, nrhs, &a[a_offset]
+, lda, &b[b_offset], ldb, info);
+
+ if (*info > 0) {
+ return 0;
+ }
+
+/* B(M+1:N,1:NRHS) = 0 */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = *m + 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* B(1:N,1:NRHS) := Q(1:N,:)' * B(1:M,1:NRHS) */
+
+ i__1 = *lwork - mn;
+ dormlq_("Left", "Transpose", n, nrhs, m, &a[a_offset], lda, &work[
+ 1], &b[b_offset], ldb, &work[mn + 1], &i__1, info);
+
+/* workspace at least NRHS, optimally NRHS*NB */
+
+ scllen = *n;
+
+ } else {
+
+/* overdetermined system min || A' * X - B || */
+
+/* B(1:N,1:NRHS) := Q * B(1:N,1:NRHS) */
+
+ i__1 = *lwork - mn;
+ dormlq_("Left", "No transpose", n, nrhs, m, &a[a_offset], lda, &
+ work[1], &b[b_offset], ldb, &work[mn + 1], &i__1, info);
+
+/* workspace at least NRHS, optimally NRHS*NB */
+
+/* B(1:M,1:NRHS) := inv(L') * B(1:M,1:NRHS) */
+
+ dtrtrs_("Lower", "Transpose", "Non-unit", m, nrhs, &a[a_offset],
+ lda, &b[b_offset], ldb, info);
+
+ if (*info > 0) {
+ return 0;
+ }
+
+ scllen = *m;
+
+ }
+
+ }
+
+/* Undo scaling */
+
+ if (iascl == 1) {
+ dlascl_("G", &c__0, &c__0, &anrm, &smlnum, &scllen, nrhs, &b[b_offset]
+, ldb, info);
+ } else if (iascl == 2) {
+ dlascl_("G", &c__0, &c__0, &anrm, &bignum, &scllen, nrhs, &b[b_offset]
+, ldb, info);
+ }
+ if (ibscl == 1) {
+ dlascl_("G", &c__0, &c__0, &smlnum, &bnrm, &scllen, nrhs, &b[b_offset]
+, ldb, info);
+ } else if (ibscl == 2) {
+ dlascl_("G", &c__0, &c__0, &bignum, &bnrm, &scllen, nrhs, &b[b_offset]
+, ldb, info);
+ }
+
+L50:
+ work[1] = (doublereal) wsize;
+
+ return 0;
+
+/* End of DGELS */
+
+} /* dgels_ */
diff --git a/contrib/libs/clapack/dgelsd.c b/contrib/libs/clapack/dgelsd.c
new file mode 100644
index 0000000000..01a638d7e5
--- /dev/null
+++ b/contrib/libs/clapack/dgelsd.c
@@ -0,0 +1,693 @@
+/* dgelsd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__6 = 6;
+static integer c_n1 = -1;
+static integer c__9 = 9;
+static integer c__0 = 0;
+static integer c__1 = 1;
+static doublereal c_b82 = 0.;
+
+/* Subroutine */ int dgelsd_(integer *m, integer *n, integer *nrhs,
+ doublereal *a, integer *lda, doublereal *b, integer *ldb, doublereal *
+ s, doublereal *rcond, integer *rank, doublereal *work, integer *lwork,
+ integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+
+ /* Builtin functions */
+ double log(doublereal);
+
+ /* Local variables */
+ integer ie, il, mm;
+ doublereal eps, anrm, bnrm;
+ integer itau, nlvl, iascl, ibscl;
+ doublereal sfmin;
+ integer minmn, maxmn, itaup, itauq, mnthr, nwork;
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *), dgebrd_(
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ integer *);
+ extern doublereal dlamch_(char *), dlange_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *);
+ extern /* Subroutine */ int dgelqf_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *),
+ dlalsd_(char *, integer *, integer *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, integer *), dlascl_(char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ integer *, doublereal *, integer *, integer *), dgeqrf_(
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, integer *), dlacpy_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *), dlaset_(char *, integer *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ doublereal bignum;
+ extern /* Subroutine */ int dormbr_(char *, char *, char *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *);
+ integer wlalsd;
+ extern /* Subroutine */ int dormlq_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ integer ldwork;
+ extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ integer minwrk, maxwrk;
+ doublereal smlnum;
+ logical lquery;
+ integer smlsiz;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGELSD computes the minimum-norm solution to a real linear least */
+/* squares problem: */
+/* minimize 2-norm(| b - A*x |) */
+/* using the singular value decomposition (SVD) of A. A is an M-by-N */
+/* matrix which may be rank-deficient. */
+
+/* Several right hand side vectors b and solution vectors x can be */
+/* handled in a single call; they are stored as the columns of the */
+/* M-by-NRHS right hand side matrix B and the N-by-NRHS solution */
+/* matrix X. */
+
+/* The problem is solved in three steps: */
+/* (1) Reduce the coefficient matrix A to bidiagonal form with */
+/* Householder transformations, reducing the original problem */
+/* into a "bidiagonal least squares problem" (BLS) */
+/* (2) Solve the BLS using a divide and conquer approach. */
+/* (3) Apply back all the Householder tranformations to solve */
+/* the original least squares problem. */
+
+/* The effective rank of A is determined by treating as zero those */
+/* singular values which are less than RCOND times the largest singular */
+/* value. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, A has been destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the M-by-NRHS right hand side matrix B. */
+/* On exit, B is overwritten by the N-by-NRHS solution */
+/* matrix X. If m >= n and RANK = n, the residual */
+/* sum-of-squares for the solution in the i-th column is given */
+/* by the sum of squares of elements n+1:m in that column. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,max(M,N)). */
+
+/* S (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The singular values of A in decreasing order. */
+/* The condition number of A in the 2-norm = S(1)/S(min(m,n)). */
+
+/* RCOND (input) DOUBLE PRECISION */
+/* RCOND is used to determine the effective rank of A. */
+/* Singular values S(i) <= RCOND*S(1) are treated as zero. */
+/* If RCOND < 0, machine precision is used instead. */
+
+/* RANK (output) INTEGER */
+/* The effective rank of A, i.e., the number of singular values */
+/* which are greater than RCOND*S(1). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK must be at least 1. */
+/* The exact minimum amount of workspace needed depends on M, */
+/* N and NRHS. As long as LWORK is at least */
+/* 12*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS + (SMLSIZ+1)**2, */
+/* if M is greater than or equal to N or */
+/* 12*M + 2*M*SMLSIZ + 8*M*NLVL + M*NRHS + (SMLSIZ+1)**2, */
+/* if M is less than N, the code will execute correctly. */
+/* SMLSIZ is returned by ILAENV and is equal to the maximum */
+/* size of the subproblems at the bottom of the computation */
+/* tree (usually about 25), and */
+/* NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 ) */
+/* For good performance, LWORK should generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* LIWORK >= 3 * MINMN * NLVL + 11 * MINMN, */
+/* where MINMN = MIN( M,N ). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: the algorithm for computing the SVD failed to converge; */
+/* if INFO = i, i off-diagonal elements of an intermediate */
+/* bidiagonal form did not converge to zero. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Ren-Cang Li, Computer Science Division, University of */
+/* California at Berkeley, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --s;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ maxmn = max(*m,*n);
+ mnthr = ilaenv_(&c__6, "DGELSD", " ", m, n, nrhs, &c_n1);
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldb < max(1,maxmn)) {
+ *info = -7;
+ }
+
+ smlsiz = ilaenv_(&c__9, "DGELSD", " ", &c__0, &c__0, &c__0, &c__0);
+
+/* Compute workspace. */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV.) */
+
+ minwrk = 1;
+ minmn = max(1,minmn);
+/* Computing MAX */
+ i__1 = (integer) (log((doublereal) minmn / (doublereal) (smlsiz + 1)) /
+ log(2.)) + 1;
+ nlvl = max(i__1,0);
+
+ if (*info == 0) {
+ maxwrk = 0;
+ mm = *m;
+ if (*m >= *n && *m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than columns. */
+
+ mm = *n;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m,
+ n, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *nrhs * ilaenv_(&c__1, "DORMQR", "LT",
+ m, nrhs, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ }
+ if (*m >= *n) {
+
+/* Path 1 - overdetermined or exactly determined. */
+
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + (mm + *n) * ilaenv_(&c__1, "DGEBRD"
+, " ", &mm, n, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + *nrhs * ilaenv_(&c__1, "DORMBR",
+ "QLT", &mm, nrhs, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + (*n - 1) * ilaenv_(&c__1, "DORMBR",
+ "PLN", n, nrhs, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing 2nd power */
+ i__1 = smlsiz + 1;
+ wlalsd = *n * 9 + (*n << 1) * smlsiz + (*n << 3) * nlvl + *n * *
+ nrhs + i__1 * i__1;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + wlalsd;
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = *n * 3 + mm, i__2 = *n * 3 + *nrhs, i__1 = max(i__1,i__2),
+ i__2 = *n * 3 + wlalsd;
+ minwrk = max(i__1,i__2);
+ }
+ if (*n > *m) {
+/* Computing 2nd power */
+ i__1 = smlsiz + 1;
+ wlalsd = *m * 9 + (*m << 1) * smlsiz + (*m << 3) * nlvl + *m * *
+ nrhs + i__1 * i__1;
+ if (*n >= mnthr) {
+
+/* Path 2a - underdetermined, with many more columns */
+/* than rows. */
+
+ maxwrk = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &c_n1,
+ &c_n1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + (*m << 1) *
+ ilaenv_(&c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + *nrhs * ilaenv_(&
+ c__1, "DORMBR", "QLT", m, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + (*m - 1) *
+ ilaenv_(&c__1, "DORMBR", "PLN", m, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+ if (*nrhs > 1) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + *m + *m * *nrhs;
+ maxwrk = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 1);
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m + *nrhs * ilaenv_(&c__1, "DORMLQ",
+ "LT", n, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + wlalsd;
+ maxwrk = max(i__1,i__2);
+/* XXX: Ensure the Path 2a case below is triggered. The workspace */
+/* calculation should use queries for all routines eventually. */
+/* Computing MAX */
+/* Computing MAX */
+ i__3 = *m, i__4 = (*m << 1) - 4, i__3 = max(i__3,i__4), i__3 =
+ max(i__3,*nrhs), i__4 = *n - *m * 3;
+ i__1 = maxwrk, i__2 = (*m << 2) + *m * *m + max(i__3,i__4);
+ maxwrk = max(i__1,i__2);
+ } else {
+
+/* Path 2 - remaining underdetermined cases. */
+
+ maxwrk = *m * 3 + (*n + *m) * ilaenv_(&c__1, "DGEBRD", " ", m,
+ n, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + *nrhs * ilaenv_(&c__1, "DORMBR"
+, "QLT", m, nrhs, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR",
+ "PLN", n, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + wlalsd;
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = *m * 3 + *nrhs, i__2 = *m * 3 + *m, i__1 = max(i__1,i__2),
+ i__2 = *m * 3 + wlalsd;
+ minwrk = max(i__1,i__2);
+ }
+ minwrk = min(minwrk,maxwrk);
+ work[1] = (doublereal) maxwrk;
+ if (*lwork < minwrk && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGELSD", &i__1);
+ return 0;
+ } else if (lquery) {
+ goto L10;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ *rank = 0;
+ return 0;
+ }
+
+/* Get machine parameters. */
+
+ eps = dlamch_("P");
+ sfmin = dlamch_("S");
+ smlnum = sfmin / eps;
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+
+/* Scale A if max entry outside range [SMLNUM,BIGNUM]. */
+
+ anrm = dlange_("M", m, n, &a[a_offset], lda, &work[1]);
+ iascl = 0;
+ if (anrm > 0. && anrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM. */
+
+ dlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM. */
+
+ dlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ dlaset_("F", &i__1, nrhs, &c_b82, &c_b82, &b[b_offset], ldb);
+ dlaset_("F", &minmn, &c__1, &c_b82, &c_b82, &s[1], &c__1);
+ *rank = 0;
+ goto L10;
+ }
+
+/* Scale B if max entry outside range [SMLNUM,BIGNUM]. */
+
+ bnrm = dlange_("M", m, nrhs, &b[b_offset], ldb, &work[1]);
+ ibscl = 0;
+ if (bnrm > 0. && bnrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM. */
+
+ dlascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM. */
+
+ dlascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 2;
+ }
+
+/* If M < N make sure certain entries of B are zero. */
+
+ if (*m < *n) {
+ i__1 = *n - *m;
+ dlaset_("F", &i__1, nrhs, &c_b82, &c_b82, &b[*m + 1 + b_dim1], ldb);
+ }
+
+/* Overdetermined case. */
+
+ if (*m >= *n) {
+
+/* Path 1 - overdetermined or exactly determined. */
+
+ mm = *m;
+ if (*m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than columns. */
+
+ mm = *n;
+ itau = 1;
+ nwork = itau + *n;
+
+/* Compute A=Q*R. */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1,
+ info);
+
+/* Multiply B by transpose(Q). */
+/* (Workspace: need N+NRHS, prefer N+NRHS*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dormqr_("L", "T", m, nrhs, n, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Zero out below R. */
+
+ if (*n > 1) {
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ dlaset_("L", &i__1, &i__2, &c_b82, &c_b82, &a[a_dim1 + 2],
+ lda);
+ }
+ }
+
+ ie = 1;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/* Bidiagonalize R in A. */
+/* (Workspace: need 3*N+MM, prefer 3*N+(MM+N)*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dgebrd_(&mm, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__1, info);
+
+/* Multiply B by transpose of left bidiagonalizing vectors of R. */
+/* (Workspace: need 3*N+NRHS, prefer 3*N+NRHS*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "T", &mm, nrhs, n, &a[a_offset], lda, &work[itauq],
+ &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ dlalsd_("U", &smlsiz, n, nrhs, &s[1], &work[ie], &b[b_offset], ldb,
+ rcond, rank, &work[nwork], &iwork[1], info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of R. */
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "L", "N", n, nrhs, n, &a[a_offset], lda, &work[itaup], &
+ b[b_offset], ldb, &work[nwork], &i__1, info);
+
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = *m, i__2 = (*m << 1) - 4, i__1 = max(i__1,i__2), i__1 = max(
+ i__1,*nrhs), i__2 = *n - *m * 3, i__1 = max(i__1,i__2);
+ if (*n >= mnthr && *lwork >= (*m << 2) + *m * *m + max(i__1,wlalsd)) {
+
+/* Path 2a - underdetermined, with many more columns than rows */
+/* and sufficient workspace for an efficient algorithm. */
+
+ ldwork = *m;
+/* Computing MAX */
+/* Computing MAX */
+ i__3 = *m, i__4 = (*m << 1) - 4, i__3 = max(i__3,i__4), i__3 =
+ max(i__3,*nrhs), i__4 = *n - *m * 3;
+ i__1 = (*m << 2) + *m * *lda + max(i__3,i__4), i__2 = *m * *lda +
+ *m + *m * *nrhs, i__1 = max(i__1,i__2), i__2 = (*m << 2)
+ + *m * *lda + wlalsd;
+ if (*lwork >= max(i__1,i__2)) {
+ ldwork = *lda;
+ }
+ itau = 1;
+ nwork = *m + 1;
+
+/* Compute A=L*Q. */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1,
+ info);
+ il = nwork;
+
+/* Copy L to WORK(IL), zeroing out above its diagonal. */
+
+ dlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwork);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ dlaset_("U", &i__1, &i__2, &c_b82, &c_b82, &work[il + ldwork], &
+ ldwork);
+ ie = il + ldwork * *m;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IL). */
+/* (Workspace: need M*M+5*M, prefer M*M+4*M+2*M*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dgebrd_(m, m, &work[il], &ldwork, &s[1], &work[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__1, info);
+
+/* Multiply B by transpose of left bidiagonalizing vectors of L. */
+/* (Workspace: need M*M+4*M+NRHS, prefer M*M+4*M+NRHS*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "T", m, nrhs, m, &work[il], &ldwork, &work[
+ itauq], &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ dlalsd_("U", &smlsiz, m, nrhs, &s[1], &work[ie], &b[b_offset],
+ ldb, rcond, rank, &work[nwork], &iwork[1], info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of L. */
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "L", "N", m, nrhs, m, &work[il], &ldwork, &work[
+ itaup], &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Zero out below first M rows of B. */
+
+ i__1 = *n - *m;
+ dlaset_("F", &i__1, nrhs, &c_b82, &c_b82, &b[*m + 1 + b_dim1],
+ ldb);
+ nwork = itau + *m;
+
+/* Multiply transpose(Q) by B. */
+/* (Workspace: need M+NRHS, prefer M+NRHS*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dormlq_("L", "T", n, nrhs, m, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[nwork], &i__1, info);
+
+ } else {
+
+/* Path 2 - remaining underdetermined cases. */
+
+ ie = 1;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize A. */
+/* (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__1, info);
+
+/* Multiply B by transpose of left bidiagonalizing vectors. */
+/* (Workspace: need 3*M+NRHS, prefer 3*M+NRHS*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "T", m, nrhs, n, &a[a_offset], lda, &work[itauq]
+, &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ dlalsd_("L", &smlsiz, m, nrhs, &s[1], &work[ie], &b[b_offset],
+ ldb, rcond, rank, &work[nwork], &iwork[1], info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of A. */
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "L", "N", n, nrhs, m, &a[a_offset], lda, &work[itaup]
+, &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+ }
+ }
+
+/* Undo scaling. */
+
+ if (iascl == 1) {
+ dlascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb,
+ info);
+ dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ } else if (iascl == 2) {
+ dlascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb,
+ info);
+ dlascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ }
+ if (ibscl == 1) {
+ dlascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ } else if (ibscl == 2) {
+ dlascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ }
+
+L10:
+ work[1] = (doublereal) maxwrk;
+ return 0;
+
+/* End of DGELSD */
+
+} /* dgelsd_ */
diff --git a/contrib/libs/clapack/dgelss.c b/contrib/libs/clapack/dgelss.c
new file mode 100644
index 0000000000..82cf9a405e
--- /dev/null
+++ b/contrib/libs/clapack/dgelss.c
@@ -0,0 +1,828 @@
+/* dgelss.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__6 = 6;
+static integer c_n1 = -1;
+static integer c__1 = 1;
+static integer c__0 = 0;
+static doublereal c_b74 = 0.;
+static doublereal c_b108 = 1.;
+
+/* Subroutine */ int dgelss_(integer *m, integer *n, integer *nrhs,
+ doublereal *a, integer *lda, doublereal *b, integer *ldb, doublereal *
+ s, doublereal *rcond, integer *rank, doublereal *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__, bl, ie, il, mm;
+ doublereal eps, thr, anrm, bnrm;
+ integer itau;
+ doublereal vdum[1];
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ integer iascl, ibscl;
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), drscl_(integer *,
+ doublereal *, doublereal *, integer *);
+ integer chunk;
+ doublereal sfmin;
+ integer minmn;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ integer maxmn, itaup, itauq, mnthr, iwork;
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *), dgebrd_(
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ integer *);
+ extern doublereal dlamch_(char *), dlange_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *);
+ integer bdspac;
+ extern /* Subroutine */ int dgelqf_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *),
+ dlascl_(char *, integer *, integer *, doublereal *, doublereal *,
+ integer *, integer *, doublereal *, integer *, integer *),
+ dgeqrf_(integer *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *, integer *), dlacpy_(char *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ integer *), dlaset_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *),
+ xerbla_(char *, integer *), dbdsqr_(char *, integer *,
+ integer *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *), dorgbr_(char *,
+ integer *, integer *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *, integer *);
+ doublereal bignum;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int dormbr_(char *, char *, char *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *), dormlq_(char *, char *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *);
+ integer ldwork;
+ extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ integer minwrk, maxwrk;
+ doublereal smlnum;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGELSS computes the minimum norm solution to a real linear least */
+/* squares problem: */
+
+/* Minimize 2-norm(| b - A*x |). */
+
+/* using the singular value decomposition (SVD) of A. A is an M-by-N */
+/* matrix which may be rank-deficient. */
+
+/* Several right hand side vectors b and solution vectors x can be */
+/* handled in a single call; they are stored as the columns of the */
+/* M-by-NRHS right hand side matrix B and the N-by-NRHS solution matrix */
+/* X. */
+
+/* The effective rank of A is determined by treating as zero those */
+/* singular values which are less than RCOND times the largest singular */
+/* value. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the first min(m,n) rows of A are overwritten with */
+/* its right singular vectors, stored rowwise. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the M-by-NRHS right hand side matrix B. */
+/* On exit, B is overwritten by the N-by-NRHS solution */
+/* matrix X. If m >= n and RANK = n, the residual */
+/* sum-of-squares for the solution in the i-th column is given */
+/* by the sum of squares of elements n+1:m in that column. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,max(M,N)). */
+
+/* S (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The singular values of A in decreasing order. */
+/* The condition number of A in the 2-norm = S(1)/S(min(m,n)). */
+
+/* RCOND (input) DOUBLE PRECISION */
+/* RCOND is used to determine the effective rank of A. */
+/* Singular values S(i) <= RCOND*S(1) are treated as zero. */
+/* If RCOND < 0, machine precision is used instead. */
+
+/* RANK (output) INTEGER */
+/* The effective rank of A, i.e., the number of singular values */
+/* which are greater than RCOND*S(1). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= 1, and also: */
+/* LWORK >= 3*min(M,N) + max( 2*min(M,N), max(M,N), NRHS ) */
+/* For good performance, LWORK should generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: the algorithm for computing the SVD failed to converge; */
+/* if INFO = i, i off-diagonal elements of an intermediate */
+/* bidiagonal form did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --s;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ maxmn = max(*m,*n);
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldb < max(1,maxmn)) {
+ *info = -7;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV.) */
+
+ if (*info == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ if (minmn > 0) {
+ mm = *m;
+ mnthr = ilaenv_(&c__6, "DGELSS", " ", m, n, nrhs, &c_n1);
+ if (*m >= *n && *m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than */
+/* columns */
+
+ mm = *n;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "DGEQRF",
+ " ", m, n, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *nrhs * ilaenv_(&c__1, "DORMQR",
+ "LT", m, nrhs, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ }
+ if (*m >= *n) {
+
+/* Path 1 - overdetermined or exactly determined */
+
+/* Compute workspace needed for DBDSQR */
+
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * 5;
+ bdspac = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + (mm + *n) * ilaenv_(&c__1,
+ "DGEBRD", " ", &mm, n, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + *nrhs * ilaenv_(&c__1, "DORMBR"
+, "QLT", &mm, nrhs, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + (*n - 1) * ilaenv_(&c__1,
+ "DORGBR", "P", n, n, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ maxwrk = max(maxwrk,bdspac);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * *nrhs;
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = *n * 3 + mm, i__2 = *n * 3 + *nrhs, i__1 = max(i__1,
+ i__2);
+ minwrk = max(i__1,bdspac);
+ maxwrk = max(minwrk,maxwrk);
+ }
+ if (*n > *m) {
+
+/* Compute workspace needed for DBDSQR */
+
+/* Computing MAX */
+ i__1 = 1, i__2 = *m * 5;
+ bdspac = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = *m * 3 + *nrhs, i__2 = *m * 3 + *n, i__1 = max(i__1,
+ i__2);
+ minwrk = max(i__1,bdspac);
+ if (*n >= mnthr) {
+
+/* Path 2a - underdetermined, with many more columns */
+/* than rows */
+
+ maxwrk = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + (*m << 1) *
+ ilaenv_(&c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + *nrhs *
+ ilaenv_(&c__1, "DORMBR", "QLT", m, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + (*m - 1) *
+ ilaenv_(&c__1, "DORGBR", "P", m, m, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + *m + bdspac;
+ maxwrk = max(i__1,i__2);
+ if (*nrhs > 1) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + *m + *m * *nrhs;
+ maxwrk = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 1);
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m + *nrhs * ilaenv_(&c__1, "DORMLQ"
+, "LT", n, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+ } else {
+
+/* Path 2 - underdetermined */
+
+ maxwrk = *m * 3 + (*n + *m) * ilaenv_(&c__1, "DGEBRD",
+ " ", m, n, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + *nrhs * ilaenv_(&c__1,
+ "DORMBR", "QLT", m, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORG"
+ "BR", "P", m, n, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+ maxwrk = max(maxwrk,bdspac);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * *nrhs;
+ maxwrk = max(i__1,i__2);
+ }
+ }
+ maxwrk = max(minwrk,maxwrk);
+ }
+ work[1] = (doublereal) maxwrk;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGELSS", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ *rank = 0;
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ eps = dlamch_("P");
+ sfmin = dlamch_("S");
+ smlnum = sfmin / eps;
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = dlange_("M", m, n, &a[a_offset], lda, &work[1]);
+ iascl = 0;
+ if (anrm > 0. && anrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ dlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ dlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ dlaset_("F", &i__1, nrhs, &c_b74, &c_b74, &b[b_offset], ldb);
+ dlaset_("F", &minmn, &c__1, &c_b74, &c_b74, &s[1], &c__1);
+ *rank = 0;
+ goto L70;
+ }
+
+/* Scale B if max element outside range [SMLNUM,BIGNUM] */
+
+ bnrm = dlange_("M", m, nrhs, &b[b_offset], ldb, &work[1]);
+ ibscl = 0;
+ if (bnrm > 0. && bnrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ dlascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ dlascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 2;
+ }
+
+/* Overdetermined case */
+
+ if (*m >= *n) {
+
+/* Path 1 - overdetermined or exactly determined */
+
+ mm = *m;
+ if (*m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than columns */
+
+ mm = *n;
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__1 = *lwork - iwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__1,
+ info);
+
+/* Multiply B by transpose(Q) */
+/* (Workspace: need N+NRHS, prefer N+NRHS*NB) */
+
+ i__1 = *lwork - iwork + 1;
+ dormqr_("L", "T", m, nrhs, n, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[iwork], &i__1, info);
+
+/* Zero out below R */
+
+ if (*n > 1) {
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ dlaset_("L", &i__1, &i__2, &c_b74, &c_b74, &a[a_dim1 + 2],
+ lda);
+ }
+ }
+
+ ie = 1;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in A */
+/* (Workspace: need 3*N+MM, prefer 3*N+(MM+N)*NB) */
+
+ i__1 = *lwork - iwork + 1;
+ dgebrd_(&mm, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[iwork], &i__1, info);
+
+/* Multiply B by transpose of left bidiagonalizing vectors of R */
+/* (Workspace: need 3*N+NRHS, prefer 3*N+NRHS*NB) */
+
+ i__1 = *lwork - iwork + 1;
+ dormbr_("Q", "L", "T", &mm, nrhs, n, &a[a_offset], lda, &work[itauq],
+ &b[b_offset], ldb, &work[iwork], &i__1, info);
+
+/* Generate right bidiagonalizing vectors of R in A */
+/* (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB) */
+
+ i__1 = *lwork - iwork + 1;
+ dorgbr_("P", n, n, n, &a[a_offset], lda, &work[itaup], &work[iwork], &
+ i__1, info);
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration */
+/* multiply B by transpose of left singular vectors */
+/* compute right singular vectors in A */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("U", n, n, &c__0, nrhs, &s[1], &work[ie], &a[a_offset], lda,
+ vdum, &c__1, &b[b_offset], ldb, &work[iwork], info)
+ ;
+ if (*info != 0) {
+ goto L70;
+ }
+
+/* Multiply B by reciprocals of singular values */
+
+/* Computing MAX */
+ d__1 = *rcond * s[1];
+ thr = max(d__1,sfmin);
+ if (*rcond < 0.) {
+/* Computing MAX */
+ d__1 = eps * s[1];
+ thr = max(d__1,sfmin);
+ }
+ *rank = 0;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (s[i__] > thr) {
+ drscl_(nrhs, &s[i__], &b[i__ + b_dim1], ldb);
+ ++(*rank);
+ } else {
+ dlaset_("F", &c__1, nrhs, &c_b74, &c_b74, &b[i__ + b_dim1],
+ ldb);
+ }
+/* L10: */
+ }
+
+/* Multiply B by right singular vectors */
+/* (Workspace: need N, prefer N*NRHS) */
+
+ if (*lwork >= *ldb * *nrhs && *nrhs > 1) {
+ dgemm_("T", "N", n, nrhs, n, &c_b108, &a[a_offset], lda, &b[
+ b_offset], ldb, &c_b74, &work[1], ldb);
+ dlacpy_("G", n, nrhs, &work[1], ldb, &b[b_offset], ldb)
+ ;
+ } else if (*nrhs > 1) {
+ chunk = *lwork / *n;
+ i__1 = *nrhs;
+ i__2 = chunk;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = *nrhs - i__ + 1;
+ bl = min(i__3,chunk);
+ dgemm_("T", "N", n, &bl, n, &c_b108, &a[a_offset], lda, &b[
+ i__ * b_dim1 + 1], ldb, &c_b74, &work[1], n);
+ dlacpy_("G", n, &bl, &work[1], n, &b[i__ * b_dim1 + 1], ldb);
+/* L20: */
+ }
+ } else {
+ dgemv_("T", n, n, &c_b108, &a[a_offset], lda, &b[b_offset], &c__1,
+ &c_b74, &work[1], &c__1);
+ dcopy_(n, &work[1], &c__1, &b[b_offset], &c__1);
+ }
+
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__2 = *m, i__1 = (*m << 1) - 4, i__2 = max(i__2,i__1), i__2 = max(
+ i__2,*nrhs), i__1 = *n - *m * 3;
+ if (*n >= mnthr && *lwork >= (*m << 2) + *m * *m + max(i__2,i__1)) {
+
+/* Path 2a - underdetermined, with many more columns than rows */
+/* and sufficient workspace for an efficient algorithm */
+
+ ldwork = *m;
+/* Computing MAX */
+/* Computing MAX */
+ i__3 = *m, i__4 = (*m << 1) - 4, i__3 = max(i__3,i__4), i__3 =
+ max(i__3,*nrhs), i__4 = *n - *m * 3;
+ i__2 = (*m << 2) + *m * *lda + max(i__3,i__4), i__1 = *m * *lda +
+ *m + *m * *nrhs;
+ if (*lwork >= max(i__2,i__1)) {
+ ldwork = *lda;
+ }
+ itau = 1;
+ iwork = *m + 1;
+
+/* Compute A=L*Q */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2,
+ info);
+ il = iwork;
+
+/* Copy L to WORK(IL), zeroing out above it */
+
+ dlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwork);
+ i__2 = *m - 1;
+ i__1 = *m - 1;
+ dlaset_("U", &i__2, &i__1, &c_b74, &c_b74, &work[il + ldwork], &
+ ldwork);
+ ie = il + ldwork * *m;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IL) */
+/* (Workspace: need M*M+5*M, prefer M*M+4*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(m, m, &work[il], &ldwork, &s[1], &work[ie], &work[itauq],
+ &work[itaup], &work[iwork], &i__2, info);
+
+/* Multiply B by transpose of left bidiagonalizing vectors of L */
+/* (Workspace: need M*M+4*M+NRHS, prefer M*M+4*M+NRHS*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dormbr_("Q", "L", "T", m, nrhs, m, &work[il], &ldwork, &work[
+ itauq], &b[b_offset], ldb, &work[iwork], &i__2, info);
+
+/* Generate right bidiagonalizing vectors of R in WORK(IL) */
+/* (Workspace: need M*M+5*M-1, prefer M*M+4*M+(M-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("P", m, m, m, &work[il], &ldwork, &work[itaup], &work[
+ iwork], &i__2, info);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, */
+/* computing right singular vectors of L in WORK(IL) and */
+/* multiplying B by transpose of left singular vectors */
+/* (Workspace: need M*M+M+BDSPAC) */
+
+ dbdsqr_("U", m, m, &c__0, nrhs, &s[1], &work[ie], &work[il], &
+ ldwork, &a[a_offset], lda, &b[b_offset], ldb, &work[iwork]
+, info);
+ if (*info != 0) {
+ goto L70;
+ }
+
+/* Multiply B by reciprocals of singular values */
+
+/* Computing MAX */
+ d__1 = *rcond * s[1];
+ thr = max(d__1,sfmin);
+ if (*rcond < 0.) {
+/* Computing MAX */
+ d__1 = eps * s[1];
+ thr = max(d__1,sfmin);
+ }
+ *rank = 0;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (s[i__] > thr) {
+ drscl_(nrhs, &s[i__], &b[i__ + b_dim1], ldb);
+ ++(*rank);
+ } else {
+ dlaset_("F", &c__1, nrhs, &c_b74, &c_b74, &b[i__ + b_dim1]
+, ldb);
+ }
+/* L30: */
+ }
+ iwork = ie;
+
+/* Multiply B by right singular vectors of L in WORK(IL) */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NRHS) */
+
+ if (*lwork >= *ldb * *nrhs + iwork - 1 && *nrhs > 1) {
+ dgemm_("T", "N", m, nrhs, m, &c_b108, &work[il], &ldwork, &b[
+ b_offset], ldb, &c_b74, &work[iwork], ldb);
+ dlacpy_("G", m, nrhs, &work[iwork], ldb, &b[b_offset], ldb);
+ } else if (*nrhs > 1) {
+ chunk = (*lwork - iwork + 1) / *m;
+ i__2 = *nrhs;
+ i__1 = chunk;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *nrhs - i__ + 1;
+ bl = min(i__3,chunk);
+ dgemm_("T", "N", m, &bl, m, &c_b108, &work[il], &ldwork, &
+ b[i__ * b_dim1 + 1], ldb, &c_b74, &work[iwork], m);
+ dlacpy_("G", m, &bl, &work[iwork], m, &b[i__ * b_dim1 + 1]
+, ldb);
+/* L40: */
+ }
+ } else {
+ dgemv_("T", m, m, &c_b108, &work[il], &ldwork, &b[b_dim1 + 1],
+ &c__1, &c_b74, &work[iwork], &c__1);
+ dcopy_(m, &work[iwork], &c__1, &b[b_dim1 + 1], &c__1);
+ }
+
+/* Zero out below first M rows of B */
+
+ i__1 = *n - *m;
+ dlaset_("F", &i__1, nrhs, &c_b74, &c_b74, &b[*m + 1 + b_dim1],
+ ldb);
+ iwork = itau + *m;
+
+/* Multiply transpose(Q) by B */
+/* (Workspace: need M+NRHS, prefer M+NRHS*NB) */
+
+ i__1 = *lwork - iwork + 1;
+ dormlq_("L", "T", n, nrhs, m, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[iwork], &i__1, info);
+
+ } else {
+
+/* Path 2 - remaining underdetermined cases */
+
+ ie = 1;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize A */
+/* (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB) */
+
+ i__1 = *lwork - iwork + 1;
+ dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[iwork], &i__1, info);
+
+/* Multiply B by transpose of left bidiagonalizing vectors */
+/* (Workspace: need 3*M+NRHS, prefer 3*M+NRHS*NB) */
+
+ i__1 = *lwork - iwork + 1;
+ dormbr_("Q", "L", "T", m, nrhs, n, &a[a_offset], lda, &work[itauq]
+, &b[b_offset], ldb, &work[iwork], &i__1, info);
+
+/* Generate right bidiagonalizing vectors in A */
+/* (Workspace: need 4*M, prefer 3*M+M*NB) */
+
+ i__1 = *lwork - iwork + 1;
+ dorgbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &work[
+ iwork], &i__1, info);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, */
+/* computing right singular vectors of A in A and */
+/* multiplying B by transpose of left singular vectors */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("L", m, n, &c__0, nrhs, &s[1], &work[ie], &a[a_offset],
+ lda, vdum, &c__1, &b[b_offset], ldb, &work[iwork], info);
+ if (*info != 0) {
+ goto L70;
+ }
+
+/* Multiply B by reciprocals of singular values */
+
+/* Computing MAX */
+ d__1 = *rcond * s[1];
+ thr = max(d__1,sfmin);
+ if (*rcond < 0.) {
+/* Computing MAX */
+ d__1 = eps * s[1];
+ thr = max(d__1,sfmin);
+ }
+ *rank = 0;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (s[i__] > thr) {
+ drscl_(nrhs, &s[i__], &b[i__ + b_dim1], ldb);
+ ++(*rank);
+ } else {
+ dlaset_("F", &c__1, nrhs, &c_b74, &c_b74, &b[i__ + b_dim1]
+, ldb);
+ }
+/* L50: */
+ }
+
+/* Multiply B by right singular vectors of A */
+/* (Workspace: need N, prefer N*NRHS) */
+
+ if (*lwork >= *ldb * *nrhs && *nrhs > 1) {
+ dgemm_("T", "N", n, nrhs, m, &c_b108, &a[a_offset], lda, &b[
+ b_offset], ldb, &c_b74, &work[1], ldb);
+ dlacpy_("F", n, nrhs, &work[1], ldb, &b[b_offset], ldb);
+ } else if (*nrhs > 1) {
+ chunk = *lwork / *n;
+ i__1 = *nrhs;
+ i__2 = chunk;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *nrhs - i__ + 1;
+ bl = min(i__3,chunk);
+ dgemm_("T", "N", n, &bl, m, &c_b108, &a[a_offset], lda, &
+ b[i__ * b_dim1 + 1], ldb, &c_b74, &work[1], n);
+ dlacpy_("F", n, &bl, &work[1], n, &b[i__ * b_dim1 + 1],
+ ldb);
+/* L60: */
+ }
+ } else {
+ dgemv_("T", m, n, &c_b108, &a[a_offset], lda, &b[b_offset], &
+ c__1, &c_b74, &work[1], &c__1);
+ dcopy_(n, &work[1], &c__1, &b[b_offset], &c__1);
+ }
+ }
+ }
+
+/* Undo scaling */
+
+ if (iascl == 1) {
+ dlascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb,
+ info);
+ dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ } else if (iascl == 2) {
+ dlascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb,
+ info);
+ dlascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ }
+ if (ibscl == 1) {
+ dlascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ } else if (ibscl == 2) {
+ dlascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ }
+
+L70:
+ work[1] = (doublereal) maxwrk;
+ return 0;
+
+/* End of DGELSS */
+
+} /* dgelss_ */
diff --git a/contrib/libs/clapack/dgelsx.c b/contrib/libs/clapack/dgelsx.c
new file mode 100644
index 0000000000..75217ae0d4
--- /dev/null
+++ b/contrib/libs/clapack/dgelsx.c
@@ -0,0 +1,438 @@
+/* dgelsx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__0 = 0;
+static doublereal c_b13 = 0.;
+static integer c__2 = 2;
+static integer c__1 = 1;
+static doublereal c_b36 = 1.;
+
+/* Subroutine */ int dgelsx_(integer *m, integer *n, integer *nrhs,
+ doublereal *a, integer *lda, doublereal *b, integer *ldb, integer *
+ jpvt, doublereal *rcond, integer *rank, doublereal *work, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal c1, c2, s1, s2, t1, t2;
+ integer mn;
+ doublereal anrm, bnrm, smin, smax;
+ integer iascl, ibscl, ismin, ismax;
+ extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), dlaic1_(
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *), dorm2r_(
+ char *, char *, integer *, integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *), dlabad_(doublereal *, doublereal *);
+ extern doublereal dlamch_(char *), dlange_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *);
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dgeqpf_(integer *, integer *,
+ doublereal *, integer *, integer *, doublereal *, doublereal *,
+ integer *), dlaset_(char *, integer *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *), xerbla_(char *,
+ integer *);
+ doublereal bignum;
+ extern /* Subroutine */ int dlatzm_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *);
+ doublereal sminpr, smaxpr, smlnum;
+ extern /* Subroutine */ int dtzrqf_(integer *, integer *, doublereal *,
+ integer *, doublereal *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is deprecated and has been replaced by routine DGELSY. */
+
+/* DGELSX computes the minimum-norm solution to a real linear least */
+/* squares problem: */
+/* minimize || A * X - B || */
+/* using a complete orthogonal factorization of A. A is an M-by-N */
+/* matrix which may be rank-deficient. */
+
+/* Several right hand side vectors b and solution vectors x can be */
+/* handled in a single call; they are stored as the columns of the */
+/* M-by-NRHS right hand side matrix B and the N-by-NRHS solution */
+/* matrix X. */
+
+/* The routine first computes a QR factorization with column pivoting: */
+/* A * P = Q * [ R11 R12 ] */
+/* [ 0 R22 ] */
+/* with R11 defined as the largest leading submatrix whose estimated */
+/* condition number is less than 1/RCOND. The order of R11, RANK, */
+/* is the effective rank of A. */
+
+/* Then, R22 is considered to be negligible, and R12 is annihilated */
+/* by orthogonal transformations from the right, arriving at the */
+/* complete orthogonal factorization: */
+/* A * P = Q * [ T11 0 ] * Z */
+/* [ 0 0 ] */
+/* The minimum-norm solution is then */
+/* X = P * Z' [ inv(T11)*Q1'*B ] */
+/* [ 0 ] */
+/* where Q1 consists of the first RANK columns of Q. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of */
+/* columns of matrices B and X. NRHS >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, A has been overwritten by details of its */
+/* complete orthogonal factorization. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the M-by-NRHS right hand side matrix B. */
+/* On exit, the N-by-NRHS solution matrix X. */
+/* If m >= n and RANK = n, the residual sum-of-squares for */
+/* the solution in the i-th column is given by the sum of */
+/* squares of elements N+1:M in that column. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,M,N). */
+
+/* JPVT (input/output) INTEGER array, dimension (N) */
+/* On entry, if JPVT(i) .ne. 0, the i-th column of A is an */
+/* initial column, otherwise it is a free column. Before */
+/* the QR factorization of A, all initial columns are */
+/* permuted to the leading positions; only the remaining */
+/* free columns are moved as a result of column pivoting */
+/* during the factorization. */
+/* On exit, if JPVT(i) = k, then the i-th column of A*P */
+/* was the k-th column of A. */
+
+/* RCOND (input) DOUBLE PRECISION */
+/* RCOND is used to determine the effective rank of A, which */
+/* is defined as the order of the largest leading triangular */
+/* submatrix R11 in the QR factorization with pivoting of A, */
+/* whose estimated condition number < 1/RCOND. */
+
+/* RANK (output) INTEGER */
+/* The effective rank of A, i.e., the order of the submatrix */
+/* R11. This is the same as the order of the submatrix T11 */
+/* in the complete orthogonal factorization of A. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension */
+/* (max( min(M,N)+3*N, 2*min(M,N)+NRHS )), */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --jpvt;
+ --work;
+
+ /* Function Body */
+ mn = min(*m,*n);
+ ismin = mn + 1;
+ ismax = (mn << 1) + 1;
+
+/* Test the input arguments. */
+
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*m);
+ if (*ldb < max(i__1,*n)) {
+ *info = -7;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGELSX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+/* Computing MIN */
+ i__1 = min(*m,*n);
+ if (min(i__1,*nrhs) == 0) {
+ *rank = 0;
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ smlnum = dlamch_("S") / dlamch_("P");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+
+/* Scale A, B if max elements outside range [SMLNUM,BIGNUM] */
+
+ anrm = dlange_("M", m, n, &a[a_offset], lda, &work[1]);
+ iascl = 0;
+ if (anrm > 0. && anrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ dlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ dlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ dlaset_("F", &i__1, nrhs, &c_b13, &c_b13, &b[b_offset], ldb);
+ *rank = 0;
+ goto L100;
+ }
+
+ bnrm = dlange_("M", m, nrhs, &b[b_offset], ldb, &work[1]);
+ ibscl = 0;
+ if (bnrm > 0. && bnrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ dlascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ dlascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 2;
+ }
+
+/* Compute QR factorization with column pivoting of A: */
+/* A * P = Q * R */
+
+ dgeqpf_(m, n, &a[a_offset], lda, &jpvt[1], &work[1], &work[mn + 1], info);
+
+/* workspace 3*N. Details of Householder rotations stored */
+/* in WORK(1:MN). */
+
+/* Determine RANK using incremental condition estimation */
+
+ work[ismin] = 1.;
+ work[ismax] = 1.;
+ smax = (d__1 = a[a_dim1 + 1], abs(d__1));
+ smin = smax;
+ if ((d__1 = a[a_dim1 + 1], abs(d__1)) == 0.) {
+ *rank = 0;
+ i__1 = max(*m,*n);
+ dlaset_("F", &i__1, nrhs, &c_b13, &c_b13, &b[b_offset], ldb);
+ goto L100;
+ } else {
+ *rank = 1;
+ }
+
+L10:
+ if (*rank < mn) {
+ i__ = *rank + 1;
+ dlaic1_(&c__2, rank, &work[ismin], &smin, &a[i__ * a_dim1 + 1], &a[
+ i__ + i__ * a_dim1], &sminpr, &s1, &c1);
+ dlaic1_(&c__1, rank, &work[ismax], &smax, &a[i__ * a_dim1 + 1], &a[
+ i__ + i__ * a_dim1], &smaxpr, &s2, &c2);
+
+ if (smaxpr * *rcond <= sminpr) {
+ i__1 = *rank;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[ismin + i__ - 1] = s1 * work[ismin + i__ - 1];
+ work[ismax + i__ - 1] = s2 * work[ismax + i__ - 1];
+/* L20: */
+ }
+ work[ismin + *rank] = c1;
+ work[ismax + *rank] = c2;
+ smin = sminpr;
+ smax = smaxpr;
+ ++(*rank);
+ goto L10;
+ }
+ }
+
+/* Logically partition R = [ R11 R12 ] */
+/* [ 0 R22 ] */
+/* where R11 = R(1:RANK,1:RANK) */
+
+/* [R11,R12] = [ T11, 0 ] * Y */
+
+ if (*rank < *n) {
+ dtzrqf_(rank, n, &a[a_offset], lda, &work[mn + 1], info);
+ }
+
+/* Details of Householder rotations stored in WORK(MN+1:2*MN) */
+
+/* B(1:M,1:NRHS) := Q' * B(1:M,1:NRHS) */
+
+ dorm2r_("Left", "Transpose", m, nrhs, &mn, &a[a_offset], lda, &work[1], &
+ b[b_offset], ldb, &work[(mn << 1) + 1], info);
+
+/* workspace NRHS */
+
+/* B(1:RANK,1:NRHS) := inv(T11) * B(1:RANK,1:NRHS) */
+
+ dtrsm_("Left", "Upper", "No transpose", "Non-unit", rank, nrhs, &c_b36, &
+ a[a_offset], lda, &b[b_offset], ldb);
+
+ i__1 = *n;
+ for (i__ = *rank + 1; i__ <= i__1; ++i__) {
+ i__2 = *nrhs;
+ for (j = 1; j <= i__2; ++j) {
+ b[i__ + j * b_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* B(1:N,1:NRHS) := Y' * B(1:N,1:NRHS) */
+
+ if (*rank < *n) {
+ i__1 = *rank;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *n - *rank + 1;
+ dlatzm_("Left", &i__2, nrhs, &a[i__ + (*rank + 1) * a_dim1], lda,
+ &work[mn + i__], &b[i__ + b_dim1], &b[*rank + 1 + b_dim1],
+ ldb, &work[(mn << 1) + 1]);
+/* L50: */
+ }
+ }
+
+/* workspace NRHS */
+
+/* B(1:N,1:NRHS) := P * B(1:N,1:NRHS) */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[(mn << 1) + i__] = 1.;
+/* L60: */
+ }
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[(mn << 1) + i__] == 1.) {
+ if (jpvt[i__] != i__) {
+ k = i__;
+ t1 = b[k + j * b_dim1];
+ t2 = b[jpvt[k] + j * b_dim1];
+L70:
+ b[jpvt[k] + j * b_dim1] = t1;
+ work[(mn << 1) + k] = 0.;
+ t1 = t2;
+ k = jpvt[k];
+ t2 = b[jpvt[k] + j * b_dim1];
+ if (jpvt[k] != i__) {
+ goto L70;
+ }
+ b[i__ + j * b_dim1] = t1;
+ work[(mn << 1) + k] = 0.;
+ }
+ }
+/* L80: */
+ }
+/* L90: */
+ }
+
+/* Undo scaling */
+
+ if (iascl == 1) {
+ dlascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb,
+ info);
+ dlascl_("U", &c__0, &c__0, &smlnum, &anrm, rank, rank, &a[a_offset],
+ lda, info);
+ } else if (iascl == 2) {
+ dlascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb,
+ info);
+ dlascl_("U", &c__0, &c__0, &bignum, &anrm, rank, rank, &a[a_offset],
+ lda, info);
+ }
+ if (ibscl == 1) {
+ dlascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ } else if (ibscl == 2) {
+ dlascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ }
+
+L100:
+
+ return 0;
+
+/* End of DGELSX */
+
+} /* dgelsx_ */
diff --git a/contrib/libs/clapack/dgelsy.c b/contrib/libs/clapack/dgelsy.c
new file mode 100644
index 0000000000..0ae3452e70
--- /dev/null
+++ b/contrib/libs/clapack/dgelsy.c
@@ -0,0 +1,495 @@
+/* dgelsy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__0 = 0;
+static doublereal c_b31 = 0.;
+static integer c__2 = 2;
+static doublereal c_b54 = 1.;
+
+/* Subroutine */ int dgelsy_(integer *m, integer *n, integer *nrhs,
+ doublereal *a, integer *lda, doublereal *b, integer *ldb, integer *
+ jpvt, doublereal *rcond, integer *rank, doublereal *work, integer *
+ lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal c1, c2, s1, s2;
+ integer nb, mn, nb1, nb2, nb3, nb4;
+ doublereal anrm, bnrm, smin, smax;
+ integer iascl, ibscl;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ integer ismin, ismax;
+ extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), dlaic1_(
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *);
+ doublereal wsize;
+ extern /* Subroutine */ int dgeqp3_(integer *, integer *, doublereal *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ integer *), dlabad_(doublereal *, doublereal *);
+ extern doublereal dlamch_(char *), dlange_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *);
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dlaset_(char *, integer *, integer
+ *, doublereal *, doublereal *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ doublereal bignum;
+ integer lwkmin;
+ extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ doublereal sminpr, smaxpr, smlnum;
+ extern /* Subroutine */ int dormrz_(char *, char *, integer *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int dtzrzf_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGELSY computes the minimum-norm solution to a real linear least */
+/* squares problem: */
+/* minimize || A * X - B || */
+/* using a complete orthogonal factorization of A. A is an M-by-N */
+/* matrix which may be rank-deficient. */
+
+/* Several right hand side vectors b and solution vectors x can be */
+/* handled in a single call; they are stored as the columns of the */
+/* M-by-NRHS right hand side matrix B and the N-by-NRHS solution */
+/* matrix X. */
+
+/* The routine first computes a QR factorization with column pivoting: */
+/* A * P = Q * [ R11 R12 ] */
+/* [ 0 R22 ] */
+/* with R11 defined as the largest leading submatrix whose estimated */
+/* condition number is less than 1/RCOND. The order of R11, RANK, */
+/* is the effective rank of A. */
+
+/* Then, R22 is considered to be negligible, and R12 is annihilated */
+/* by orthogonal transformations from the right, arriving at the */
+/* complete orthogonal factorization: */
+/* A * P = Q * [ T11 0 ] * Z */
+/* [ 0 0 ] */
+/* The minimum-norm solution is then */
+/* X = P * Z' [ inv(T11)*Q1'*B ] */
+/* [ 0 ] */
+/* where Q1 consists of the first RANK columns of Q. */
+
+/* This routine is basically identical to the original xGELSX except */
+/* three differences: */
+/* o The call to the subroutine xGEQPF has been substituted by the */
+/* the call to the subroutine xGEQP3. This subroutine is a Blas-3 */
+/* version of the QR factorization with column pivoting. */
+/* o Matrix B (the right hand side) is updated with Blas-3. */
+/* o The permutation of matrix B (the right hand side) is faster and */
+/* more simple. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of */
+/* columns of matrices B and X. NRHS >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, A has been overwritten by details of its */
+/* complete orthogonal factorization. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the M-by-NRHS right hand side matrix B. */
+/* On exit, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,M,N). */
+
+/* JPVT (input/output) INTEGER array, dimension (N) */
+/* On entry, if JPVT(i) .ne. 0, the i-th column of A is permuted */
+/* to the front of AP, otherwise column i is a free column. */
+/* On exit, if JPVT(i) = k, then the i-th column of AP */
+/* was the k-th column of A. */
+
+/* RCOND (input) DOUBLE PRECISION */
+/* RCOND is used to determine the effective rank of A, which */
+/* is defined as the order of the largest leading triangular */
+/* submatrix R11 in the QR factorization with pivoting of A, */
+/* whose estimated condition number < 1/RCOND. */
+
+/* RANK (output) INTEGER */
+/* The effective rank of A, i.e., the order of the submatrix */
+/* R11. This is the same as the order of the submatrix T11 */
+/* in the complete orthogonal factorization of A. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* The unblocked strategy requires that: */
+/* LWORK >= MAX( MN+3*N+1, 2*MN+NRHS ), */
+/* where MN = min( M, N ). */
+/* The block algorithm requires that: */
+/* LWORK >= MAX( MN+2*N+NB*(N+1), 2*MN+NB*NRHS ), */
+/* where NB is an upper bound on the blocksize returned */
+/* by ILAENV for the routines DGEQP3, DTZRZF, STZRQF, DORMQR, */
+/* and DORMRZ. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: If INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+/* E. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */
+/* G. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --jpvt;
+ --work;
+
+ /* Function Body */
+ mn = min(*m,*n);
+ ismin = mn + 1;
+ ismax = (mn << 1) + 1;
+
+/* Test the input arguments. */
+
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*m);
+ if (*ldb < max(i__1,*n)) {
+ *info = -7;
+ }
+ }
+
+/* Figure out optimal block size */
+
+ if (*info == 0) {
+ if (mn == 0 || *nrhs == 0) {
+ lwkmin = 1;
+ lwkopt = 1;
+ } else {
+ nb1 = ilaenv_(&c__1, "DGEQRF", " ", m, n, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "DGERQF", " ", m, n, &c_n1, &c_n1);
+ nb3 = ilaenv_(&c__1, "DORMQR", " ", m, n, nrhs, &c_n1);
+ nb4 = ilaenv_(&c__1, "DORMRQ", " ", m, n, nrhs, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2), i__1 = max(i__1,nb3);
+ nb = max(i__1,nb4);
+/* Computing MAX */
+ i__1 = mn << 1, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = mn +
+ *nrhs;
+ lwkmin = mn + max(i__1,i__2);
+/* Computing MAX */
+ i__1 = lwkmin, i__2 = mn + (*n << 1) + nb * (*n + 1), i__1 = max(
+ i__1,i__2), i__2 = (mn << 1) + nb * *nrhs;
+ lwkopt = max(i__1,i__2);
+ }
+ work[1] = (doublereal) lwkopt;
+
+ if (*lwork < lwkmin && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGELSY", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (mn == 0 || *nrhs == 0) {
+ *rank = 0;
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ smlnum = dlamch_("S") / dlamch_("P");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+
+/* Scale A, B if max entries outside range [SMLNUM,BIGNUM] */
+
+ anrm = dlange_("M", m, n, &a[a_offset], lda, &work[1]);
+ iascl = 0;
+ if (anrm > 0. && anrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ dlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ dlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ dlaset_("F", &i__1, nrhs, &c_b31, &c_b31, &b[b_offset], ldb);
+ *rank = 0;
+ goto L70;
+ }
+
+ bnrm = dlange_("M", m, nrhs, &b[b_offset], ldb, &work[1]);
+ ibscl = 0;
+ if (bnrm > 0. && bnrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ dlascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ dlascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 2;
+ }
+
+/* Compute QR factorization with column pivoting of A: */
+/* A * P = Q * R */
+
+ i__1 = *lwork - mn;
+ dgeqp3_(m, n, &a[a_offset], lda, &jpvt[1], &work[1], &work[mn + 1], &i__1,
+ info);
+ wsize = mn + work[mn + 1];
+
+/* workspace: MN+2*N+NB*(N+1). */
+/* Details of Householder rotations stored in WORK(1:MN). */
+
+/* Determine RANK using incremental condition estimation */
+
+ work[ismin] = 1.;
+ work[ismax] = 1.;
+ smax = (d__1 = a[a_dim1 + 1], abs(d__1));
+ smin = smax;
+ if ((d__1 = a[a_dim1 + 1], abs(d__1)) == 0.) {
+ *rank = 0;
+ i__1 = max(*m,*n);
+ dlaset_("F", &i__1, nrhs, &c_b31, &c_b31, &b[b_offset], ldb);
+ goto L70;
+ } else {
+ *rank = 1;
+ }
+
+L10:
+ if (*rank < mn) {
+ i__ = *rank + 1;
+ dlaic1_(&c__2, rank, &work[ismin], &smin, &a[i__ * a_dim1 + 1], &a[
+ i__ + i__ * a_dim1], &sminpr, &s1, &c1);
+ dlaic1_(&c__1, rank, &work[ismax], &smax, &a[i__ * a_dim1 + 1], &a[
+ i__ + i__ * a_dim1], &smaxpr, &s2, &c2);
+
+ if (smaxpr * *rcond <= sminpr) {
+ i__1 = *rank;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[ismin + i__ - 1] = s1 * work[ismin + i__ - 1];
+ work[ismax + i__ - 1] = s2 * work[ismax + i__ - 1];
+/* L20: */
+ }
+ work[ismin + *rank] = c1;
+ work[ismax + *rank] = c2;
+ smin = sminpr;
+ smax = smaxpr;
+ ++(*rank);
+ goto L10;
+ }
+ }
+
+/* workspace: 3*MN. */
+
+/* Logically partition R = [ R11 R12 ] */
+/* [ 0 R22 ] */
+/* where R11 = R(1:RANK,1:RANK) */
+
+/* [R11,R12] = [ T11, 0 ] * Y */
+
+ if (*rank < *n) {
+ i__1 = *lwork - (mn << 1);
+ dtzrzf_(rank, n, &a[a_offset], lda, &work[mn + 1], &work[(mn << 1) +
+ 1], &i__1, info);
+ }
+
+/* workspace: 2*MN. */
+/* Details of Householder rotations stored in WORK(MN+1:2*MN) */
+
+/* B(1:M,1:NRHS) := Q' * B(1:M,1:NRHS) */
+
+ i__1 = *lwork - (mn << 1);
+ dormqr_("Left", "Transpose", m, nrhs, &mn, &a[a_offset], lda, &work[1], &
+ b[b_offset], ldb, &work[(mn << 1) + 1], &i__1, info);
+/* Computing MAX */
+ d__1 = wsize, d__2 = (mn << 1) + work[(mn << 1) + 1];
+ wsize = max(d__1,d__2);
+
+/* workspace: 2*MN+NB*NRHS. */
+
+/* B(1:RANK,1:NRHS) := inv(T11) * B(1:RANK,1:NRHS) */
+
+ dtrsm_("Left", "Upper", "No transpose", "Non-unit", rank, nrhs, &c_b54, &
+ a[a_offset], lda, &b[b_offset], ldb);
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = *rank + 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* B(1:N,1:NRHS) := Y' * B(1:N,1:NRHS) */
+
+ if (*rank < *n) {
+ i__1 = *n - *rank;
+ i__2 = *lwork - (mn << 1);
+ dormrz_("Left", "Transpose", n, nrhs, rank, &i__1, &a[a_offset], lda,
+ &work[mn + 1], &b[b_offset], ldb, &work[(mn << 1) + 1], &i__2,
+ info);
+ }
+
+/* workspace: 2*MN+NRHS. */
+
+/* B(1:N,1:NRHS) := P * B(1:N,1:NRHS) */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[jpvt[i__]] = b[i__ + j * b_dim1];
+/* L50: */
+ }
+ dcopy_(n, &work[1], &c__1, &b[j * b_dim1 + 1], &c__1);
+/* L60: */
+ }
+
+/* workspace: N. */
+
+/* Undo scaling */
+
+ if (iascl == 1) {
+ dlascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb,
+ info);
+ dlascl_("U", &c__0, &c__0, &smlnum, &anrm, rank, rank, &a[a_offset],
+ lda, info);
+ } else if (iascl == 2) {
+ dlascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb,
+ info);
+ dlascl_("U", &c__0, &c__0, &bignum, &anrm, rank, rank, &a[a_offset],
+ lda, info);
+ }
+ if (ibscl == 1) {
+ dlascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ } else if (ibscl == 2) {
+ dlascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ }
+
+L70:
+ work[1] = (doublereal) lwkopt;
+
+ return 0;
+
+/* End of DGELSY */
+
+} /* dgelsy_ */
diff --git a/contrib/libs/clapack/dgeql2.c b/contrib/libs/clapack/dgeql2.c
new file mode 100644
index 0000000000..07cd9663ae
--- /dev/null
+++ b/contrib/libs/clapack/dgeql2.c
@@ -0,0 +1,159 @@
+/* dgeql2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dgeql2_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *tau, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, k;
+ doublereal aii;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *), dlarfp_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGEQL2 computes a QL factorization of a real m by n matrix A: */
+/* A = Q * L. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the m by n matrix A. */
+/* On exit, if m >= n, the lower triangle of the subarray */
+/* A(m-n+1:m,1:n) contains the n by n lower triangular matrix L; */
+/* if m <= n, the elements on and below the (n-m)-th */
+/* superdiagonal contain the m by n lower trapezoidal matrix L; */
+/* the remaining elements, with the array TAU, represent the */
+/* orthogonal matrix Q as a product of elementary reflectors */
+/* (see Further Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(k) . . . H(2) H(1), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(m-k+i+1:m) = 0 and v(m-k+i) = 1; v(1:m-k+i-1) is stored on exit in */
+/* A(1:m-k+i-1,n-k+i), and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEQL2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ for (i__ = k; i__ >= 1; --i__) {
+
+/* Generate elementary reflector H(i) to annihilate */
+/* A(1:m-k+i-1,n-k+i) */
+
+ i__1 = *m - k + i__;
+ dlarfp_(&i__1, &a[*m - k + i__ + (*n - k + i__) * a_dim1], &a[(*n - k
+ + i__) * a_dim1 + 1], &c__1, &tau[i__]);
+
+/* Apply H(i) to A(1:m-k+i,1:n-k+i-1) from the left */
+
+ aii = a[*m - k + i__ + (*n - k + i__) * a_dim1];
+ a[*m - k + i__ + (*n - k + i__) * a_dim1] = 1.;
+ i__1 = *m - k + i__;
+ i__2 = *n - k + i__ - 1;
+ dlarf_("Left", &i__1, &i__2, &a[(*n - k + i__) * a_dim1 + 1], &c__1, &
+ tau[i__], &a[a_offset], lda, &work[1]);
+ a[*m - k + i__ + (*n - k + i__) * a_dim1] = aii;
+/* L10: */
+ }
+ return 0;
+
+/* End of DGEQL2 */
+
+} /* dgeql2_ */
diff --git a/contrib/libs/clapack/dgeqlf.c b/contrib/libs/clapack/dgeqlf.c
new file mode 100644
index 0000000000..0b9779554d
--- /dev/null
+++ b/contrib/libs/clapack/dgeqlf.c
@@ -0,0 +1,270 @@
+/* dgeqlf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int dgeqlf_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *tau, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, k, ib, nb, ki, kk, mu, nu, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int dgeql2_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *), dlarfb_(char *,
+ char *, char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGEQLF computes a QL factorization of a real M-by-N matrix A: */
+/* A = Q * L. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, */
+/* if m >= n, the lower triangle of the subarray */
+/* A(m-n+1:m,1:n) contains the N-by-N lower triangular matrix L; */
+/* if m <= n, the elements on and below the (n-m)-th */
+/* superdiagonal contain the M-by-N lower trapezoidal matrix L; */
+/* the remaining elements, with the array TAU, represent the */
+/* orthogonal matrix Q as a product of elementary reflectors */
+/* (see Further Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+/* For optimum performance LWORK >= N*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(k) . . . H(2) H(1), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(m-k+i+1:m) = 0 and v(m-k+i) = 1; v(1:m-k+i-1) is stored on exit in */
+/* A(1:m-k+i-1,n-k+i), and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+
+ if (*info == 0) {
+ k = min(*m,*n);
+ if (k == 0) {
+ lwkopt = 1;
+ } else {
+ nb = ilaenv_(&c__1, "DGEQLF", " ", m, n, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ }
+ work[1] = (doublereal) lwkopt;
+
+ if (*lwork < max(1,*n) && ! lquery) {
+ *info = -7;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEQLF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (k == 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 1;
+ iws = *n;
+ if (nb > 1 && nb < k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "DGEQLF", " ", m, n, &c_n1, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DGEQLF", " ", m, n, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < k && nx < k) {
+
+/* Use blocked code initially. */
+/* The last kk columns are handled by the block method. */
+
+ ki = (k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+ i__1 = k - kk + 1;
+ i__2 = -nb;
+ for (i__ = k - kk + ki + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__
+ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/* Compute the QL factorization of the current block */
+/* A(1:m-k+i+ib-1,n-k+i:n-k+i+ib-1) */
+
+ i__3 = *m - k + i__ + ib - 1;
+ dgeql2_(&i__3, &ib, &a[(*n - k + i__) * a_dim1 + 1], lda, &tau[
+ i__], &work[1], &iinfo);
+ if (*n - k + i__ > 1) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__3 = *m - k + i__ + ib - 1;
+ dlarft_("Backward", "Columnwise", &i__3, &ib, &a[(*n - k +
+ i__) * a_dim1 + 1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(1:m-k+i+ib-1,1:n-k+i-1) from the left */
+
+ i__3 = *m - k + i__ + ib - 1;
+ i__4 = *n - k + i__ - 1;
+ dlarfb_("Left", "Transpose", "Backward", "Columnwise", &i__3,
+ &i__4, &ib, &a[(*n - k + i__) * a_dim1 + 1], lda, &
+ work[1], &ldwork, &a[a_offset], lda, &work[ib + 1], &
+ ldwork);
+ }
+/* L10: */
+ }
+ mu = *m - k + i__ + nb - 1;
+ nu = *n - k + i__ + nb - 1;
+ } else {
+ mu = *m;
+ nu = *n;
+ }
+
+/* Use unblocked code to factor the last or only block */
+
+ if (mu > 0 && nu > 0) {
+ dgeql2_(&mu, &nu, &a[a_offset], lda, &tau[1], &work[1], &iinfo);
+ }
+
+ work[1] = (doublereal) iws;
+ return 0;
+
+/* End of DGEQLF */
+
+} /* dgeqlf_ */
diff --git a/contrib/libs/clapack/dgeqp3.c b/contrib/libs/clapack/dgeqp3.c
new file mode 100644
index 0000000000..e3fda572c2
--- /dev/null
+++ b/contrib/libs/clapack/dgeqp3.c
@@ -0,0 +1,358 @@
+/* dgeqp3.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int dgeqp3_(integer *m, integer *n, doublereal *a, integer *
+ lda, integer *jpvt, doublereal *tau, doublereal *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer j, jb, na, nb, sm, sn, nx, fjb, iws, nfxd;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ integer nbmin, minmn;
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ integer minws;
+ extern /* Subroutine */ int dlaqp2_(integer *, integer *, integer *,
+ doublereal *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *), dgeqrf_(integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int dlaqps_(integer *, integer *, integer *,
+ integer *, integer *, doublereal *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *);
+ integer topbmn, sminmn;
+ extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGEQP3 computes a QR factorization with column pivoting of a */
+/* matrix A: A*P = Q*R using Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the upper triangle of the array contains the */
+/* min(M,N)-by-N upper trapezoidal matrix R; the elements below */
+/* the diagonal, together with the array TAU, represent the */
+/* orthogonal matrix Q as a product of min(M,N) elementary */
+/* reflectors. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* JPVT (input/output) INTEGER array, dimension (N) */
+/* On entry, if JPVT(J).ne.0, the J-th column of A is permuted */
+/* to the front of A*P (a leading column); if JPVT(J)=0, */
+/* the J-th column of A is a free column. */
+/* On exit, if JPVT(J)=K, then the J-th column of A*P was the */
+/* the K-th column of A. */
+
+/* TAU (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO=0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= 3*N+1. */
+/* For optimal performance LWORK >= 2*N+( N+1 )*NB, where NB */
+/* is the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real/complex scalar, and v is a real/complex vector */
+/* with v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in */
+/* A(i+1:m,i), and tau in TAU(i). */
+
+/* Based on contributions by */
+/* G. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */
+/* X. Sun, Computer Science Dept., Duke University, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test input arguments */
+/* ==================== */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --jpvt;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+
+ if (*info == 0) {
+ minmn = min(*m,*n);
+ if (minmn == 0) {
+ iws = 1;
+ lwkopt = 1;
+ } else {
+ iws = *n * 3 + 1;
+ nb = ilaenv_(&c__1, "DGEQRF", " ", m, n, &c_n1, &c_n1);
+ lwkopt = (*n << 1) + (*n + 1) * nb;
+ }
+ work[1] = (doublereal) lwkopt;
+
+ if (*lwork < iws && ! lquery) {
+ *info = -8;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEQP3", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (minmn == 0) {
+ return 0;
+ }
+
+/* Move initial columns up front. */
+
+ nfxd = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (jpvt[j] != 0) {
+ if (j != nfxd) {
+ dswap_(m, &a[j * a_dim1 + 1], &c__1, &a[nfxd * a_dim1 + 1], &
+ c__1);
+ jpvt[j] = jpvt[nfxd];
+ jpvt[nfxd] = j;
+ } else {
+ jpvt[j] = j;
+ }
+ ++nfxd;
+ } else {
+ jpvt[j] = j;
+ }
+/* L10: */
+ }
+ --nfxd;
+
+/* Factorize fixed columns */
+/* ======================= */
+
+/* Compute the QR factorization of fixed columns and update */
+/* remaining columns. */
+
+ if (nfxd > 0) {
+ na = min(*m,nfxd);
+/* CC CALL DGEQR2( M, NA, A, LDA, TAU, WORK, INFO ) */
+ dgeqrf_(m, &na, &a[a_offset], lda, &tau[1], &work[1], lwork, info);
+/* Computing MAX */
+ i__1 = iws, i__2 = (integer) work[1];
+ iws = max(i__1,i__2);
+ if (na < *n) {
+/* CC CALL DORM2R( 'Left', 'Transpose', M, N-NA, NA, A, LDA, */
+/* CC $ TAU, A( 1, NA+1 ), LDA, WORK, INFO ) */
+ i__1 = *n - na;
+ dormqr_("Left", "Transpose", m, &i__1, &na, &a[a_offset], lda, &
+ tau[1], &a[(na + 1) * a_dim1 + 1], lda, &work[1], lwork,
+ info);
+/* Computing MAX */
+ i__1 = iws, i__2 = (integer) work[1];
+ iws = max(i__1,i__2);
+ }
+ }
+
+/* Factorize free columns */
+/* ====================== */
+
+ if (nfxd < minmn) {
+
+ sm = *m - nfxd;
+ sn = *n - nfxd;
+ sminmn = minmn - nfxd;
+
+/* Determine the block size. */
+
+ nb = ilaenv_(&c__1, "DGEQRF", " ", &sm, &sn, &c_n1, &c_n1);
+ nbmin = 2;
+ nx = 0;
+
+ if (nb > 1 && nb < sminmn) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "DGEQRF", " ", &sm, &sn, &c_n1, &
+ c_n1);
+ nx = max(i__1,i__2);
+
+
+ if (nx < sminmn) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ minws = (sn << 1) + (sn + 1) * nb;
+ iws = max(iws,minws);
+ if (*lwork < minws) {
+
+/* Not enough workspace to use optimal NB: Reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = (*lwork - (sn << 1)) / (sn + 1);
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DGEQRF", " ", &sm, &sn, &
+ c_n1, &c_n1);
+ nbmin = max(i__1,i__2);
+
+
+ }
+ }
+ }
+
+/* Initialize partial column norms. The first N elements of work */
+/* store the exact column norms. */
+
+ i__1 = *n;
+ for (j = nfxd + 1; j <= i__1; ++j) {
+ work[j] = dnrm2_(&sm, &a[nfxd + 1 + j * a_dim1], &c__1);
+ work[*n + j] = work[j];
+/* L20: */
+ }
+
+ if (nb >= nbmin && nb < sminmn && nx < sminmn) {
+
+/* Use blocked code initially. */
+
+ j = nfxd + 1;
+
+/* Compute factorization: while loop. */
+
+
+ topbmn = minmn - nx;
+L30:
+ if (j <= topbmn) {
+/* Computing MIN */
+ i__1 = nb, i__2 = topbmn - j + 1;
+ jb = min(i__1,i__2);
+
+/* Factorize JB columns among columns J:N. */
+
+ i__1 = *n - j + 1;
+ i__2 = j - 1;
+ i__3 = *n - j + 1;
+ dlaqps_(m, &i__1, &i__2, &jb, &fjb, &a[j * a_dim1 + 1], lda, &
+ jpvt[j], &tau[j], &work[j], &work[*n + j], &work[(*n
+ << 1) + 1], &work[(*n << 1) + jb + 1], &i__3);
+
+ j += fjb;
+ goto L30;
+ }
+ } else {
+ j = nfxd + 1;
+ }
+
+/* Use unblocked code to factor the last or only block. */
+
+
+ if (j <= minmn) {
+ i__1 = *n - j + 1;
+ i__2 = j - 1;
+ dlaqp2_(m, &i__1, &i__2, &a[j * a_dim1 + 1], lda, &jpvt[j], &tau[
+ j], &work[j], &work[*n + j], &work[(*n << 1) + 1]);
+ }
+
+ }
+
+ work[1] = (doublereal) iws;
+ return 0;
+
+/* End of DGEQP3 */
+
+} /* dgeqp3_ */
diff --git a/contrib/libs/clapack/dgeqpf.c b/contrib/libs/clapack/dgeqpf.c
new file mode 100644
index 0000000000..10743f5f5f
--- /dev/null
+++ b/contrib/libs/clapack/dgeqpf.c
@@ -0,0 +1,304 @@
+/* dgeqpf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dgeqpf_(integer *m, integer *n, doublereal *a, integer *
+ lda, integer *jpvt, doublereal *tau, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, ma, mn;
+ doublereal aii;
+ integer pvt;
+ doublereal temp;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ doublereal temp2, tol3z;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *);
+ integer itemp;
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dgeqr2_(integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *),
+ dorm2r_(char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *);
+ extern doublereal dlamch_(char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlarfp_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK deprecated driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is deprecated and has been replaced by routine DGEQP3. */
+
+/* DGEQPF computes a QR factorization with column pivoting of a */
+/* real M-by-N matrix A: A*P = Q*R. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0 */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the upper triangle of the array contains the */
+/* min(M,N)-by-N upper triangular matrix R; the elements */
+/* below the diagonal, together with the array TAU, */
+/* represent the orthogonal matrix Q as a product of */
+/* min(m,n) elementary reflectors. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* JPVT (input/output) INTEGER array, dimension (N) */
+/* On entry, if JPVT(i) .ne. 0, the i-th column of A is permuted */
+/* to the front of A*P (a leading column); if JPVT(i) = 0, */
+/* the i-th column of A is a free column. */
+/* On exit, if JPVT(i) = k, then the i-th column of A*P */
+/* was the k-th column of A. */
+
+/* TAU (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(n) */
+
+/* Each H(i) has the form */
+
+/* H = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i). */
+
+/* The matrix P is represented in jpvt as follows: If */
+/* jpvt(j) = i */
+/* then the jth column of P is the ith canonical unit vector. */
+
+/* Partial column norm updating strategy modified by */
+/* Z. Drmac and Z. Bujanovic, Dept. of Mathematics, */
+/* University of Zagreb, Croatia. */
+/* June 2006. */
+/* For more details see LAPACK Working Note 176. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --jpvt;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEQPF", &i__1);
+ return 0;
+ }
+
+ mn = min(*m,*n);
+ tol3z = sqrt(dlamch_("Epsilon"));
+
+/* Move initial columns up front */
+
+ itemp = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (jpvt[i__] != 0) {
+ if (i__ != itemp) {
+ dswap_(m, &a[i__ * a_dim1 + 1], &c__1, &a[itemp * a_dim1 + 1],
+ &c__1);
+ jpvt[i__] = jpvt[itemp];
+ jpvt[itemp] = i__;
+ } else {
+ jpvt[i__] = i__;
+ }
+ ++itemp;
+ } else {
+ jpvt[i__] = i__;
+ }
+/* L10: */
+ }
+ --itemp;
+
+/* Compute the QR factorization and update remaining columns */
+
+ if (itemp > 0) {
+ ma = min(itemp,*m);
+ dgeqr2_(m, &ma, &a[a_offset], lda, &tau[1], &work[1], info);
+ if (ma < *n) {
+ i__1 = *n - ma;
+ dorm2r_("Left", "Transpose", m, &i__1, &ma, &a[a_offset], lda, &
+ tau[1], &a[(ma + 1) * a_dim1 + 1], lda, &work[1], info);
+ }
+ }
+
+ if (itemp < mn) {
+
+/* Initialize partial column norms. The first n elements of */
+/* work store the exact column norms. */
+
+ i__1 = *n;
+ for (i__ = itemp + 1; i__ <= i__1; ++i__) {
+ i__2 = *m - itemp;
+ work[i__] = dnrm2_(&i__2, &a[itemp + 1 + i__ * a_dim1], &c__1);
+ work[*n + i__] = work[i__];
+/* L20: */
+ }
+
+/* Compute factorization */
+
+ i__1 = mn;
+ for (i__ = itemp + 1; i__ <= i__1; ++i__) {
+
+/* Determine ith pivot column and swap if necessary */
+
+ i__2 = *n - i__ + 1;
+ pvt = i__ - 1 + idamax_(&i__2, &work[i__], &c__1);
+
+ if (pvt != i__) {
+ dswap_(m, &a[pvt * a_dim1 + 1], &c__1, &a[i__ * a_dim1 + 1], &
+ c__1);
+ itemp = jpvt[pvt];
+ jpvt[pvt] = jpvt[i__];
+ jpvt[i__] = itemp;
+ work[pvt] = work[i__];
+ work[*n + pvt] = work[*n + i__];
+ }
+
+/* Generate elementary reflector H(i) */
+
+ if (i__ < *m) {
+ i__2 = *m - i__ + 1;
+ dlarfp_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + 1 + i__ *
+ a_dim1], &c__1, &tau[i__]);
+ } else {
+ dlarfp_(&c__1, &a[*m + *m * a_dim1], &a[*m + *m * a_dim1], &
+ c__1, &tau[*m]);
+ }
+
+ if (i__ < *n) {
+
+/* Apply H(i) to A(i:m,i+1:n) from the left */
+
+ aii = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.;
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ dlarf_("LEFT", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &
+ tau[i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[(*
+ n << 1) + 1]);
+ a[i__ + i__ * a_dim1] = aii;
+ }
+
+/* Update partial column norms */
+
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ if (work[j] != 0.) {
+
+/* NOTE: The following 4 lines follow from the analysis in */
+/* Lapack Working Note 176. */
+
+ temp = (d__1 = a[i__ + j * a_dim1], abs(d__1)) / work[j];
+/* Computing MAX */
+ d__1 = 0., d__2 = (temp + 1.) * (1. - temp);
+ temp = max(d__1,d__2);
+/* Computing 2nd power */
+ d__1 = work[j] / work[*n + j];
+ temp2 = temp * (d__1 * d__1);
+ if (temp2 <= tol3z) {
+ if (*m - i__ > 0) {
+ i__3 = *m - i__;
+ work[j] = dnrm2_(&i__3, &a[i__ + 1 + j * a_dim1],
+ &c__1);
+ work[*n + j] = work[j];
+ } else {
+ work[j] = 0.;
+ work[*n + j] = 0.;
+ }
+ } else {
+ work[j] *= sqrt(temp);
+ }
+ }
+/* L30: */
+ }
+
+/* L40: */
+ }
+ }
+ return 0;
+
+/* End of DGEQPF */
+
+} /* dgeqpf_ */
diff --git a/contrib/libs/clapack/dgeqr2.c b/contrib/libs/clapack/dgeqr2.c
new file mode 100644
index 0000000000..663388fdf7
--- /dev/null
+++ b/contrib/libs/clapack/dgeqr2.c
@@ -0,0 +1,161 @@
+/* dgeqr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dgeqr2_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *tau, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, k;
+ doublereal aii;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *), dlarfp_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGEQR2 computes a QR factorization of a real m by n matrix A: */
+/* A = Q * R. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the m by n matrix A. */
+/* On exit, the elements on and above the diagonal of the array */
+/* contain the min(m,n) by n upper trapezoidal matrix R (R is */
+/* upper triangular if m >= n); the elements below the diagonal, */
+/* with the array TAU, represent the orthogonal matrix Q as a */
+/* product of elementary reflectors (see Further Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), */
+/* and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEQR2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ i__1 = k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
+
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ dlarfp_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3, *m)+ i__ * a_dim1]
+, &c__1, &tau[i__]);
+ if (i__ < *n) {
+
+/* Apply H(i) to A(i:m,i+1:n) from the left */
+
+ aii = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.;
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ dlarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &tau[
+ i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ a[i__ + i__ * a_dim1] = aii;
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of DGEQR2 */
+
+} /* dgeqr2_ */
diff --git a/contrib/libs/clapack/dgeqrf.c b/contrib/libs/clapack/dgeqrf.c
new file mode 100644
index 0000000000..1062e27d98
--- /dev/null
+++ b/contrib/libs/clapack/dgeqrf.c
@@ -0,0 +1,252 @@
+/* dgeqrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int dgeqrf_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *tau, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, k, ib, nb, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int dgeqr2_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *), dlarfb_(char *,
+ char *, char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGEQRF computes a QR factorization of a real M-by-N matrix A: */
+/* A = Q * R. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the elements on and above the diagonal of the array */
+/* contain the min(M,N)-by-N upper trapezoidal matrix R (R is */
+/* upper triangular if m >= n); the elements below the diagonal, */
+/* with the array TAU, represent the orthogonal matrix Q as a */
+/* product of min(m,n) elementary reflectors (see Further */
+/* Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+/* For optimum performance LWORK >= N*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), */
+/* and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "DGEQRF", " ", m, n, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ work[1] = (doublereal) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEQRF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ k = min(*m,*n);
+ if (k == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *n;
+ if (nb > 1 && nb < k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "DGEQRF", " ", m, n, &c_n1, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DGEQRF", " ", m, n, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < k && nx < k) {
+
+/* Use blocked code initially */
+
+ i__1 = k - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/* Compute the QR factorization of the current block */
+/* A(i:m,i:i+ib-1) */
+
+ i__3 = *m - i__ + 1;
+ dgeqr2_(&i__3, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[
+ 1], &iinfo);
+ if (i__ + ib <= *n) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i) H(i+1) . . . H(i+ib-1) */
+
+ i__3 = *m - i__ + 1;
+ dlarft_("Forward", "Columnwise", &i__3, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(i:m,i+ib:n) from the left */
+
+ i__3 = *m - i__ + 1;
+ i__4 = *n - i__ - ib + 1;
+ dlarfb_("Left", "Transpose", "Forward", "Columnwise", &i__3, &
+ i__4, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], &
+ ldwork, &a[i__ + (i__ + ib) * a_dim1], lda, &work[ib
+ + 1], &ldwork);
+ }
+/* L10: */
+ }
+ } else {
+ i__ = 1;
+ }
+
+/* Use unblocked code to factor the last or only block. */
+
+ if (i__ <= k) {
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ dgeqr2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1]
+, &iinfo);
+ }
+
+ work[1] = (doublereal) iws;
+ return 0;
+
+/* End of DGEQRF */
+
+} /* dgeqrf_ */
diff --git a/contrib/libs/clapack/dgerfs.c b/contrib/libs/clapack/dgerfs.c
new file mode 100644
index 0000000000..d8f941de2b
--- /dev/null
+++ b/contrib/libs/clapack/dgerfs.c
@@ -0,0 +1,424 @@
+/* dgerfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b15 = -1.;
+static doublereal c_b17 = 1.;
+
+/* Subroutine */ int dgerfs_(char *trans, integer *n, integer *nrhs,
+ doublereal *a, integer *lda, doublereal *af, integer *ldaf, integer *
+ ipiv, doublereal *b, integer *ldb, doublereal *x, integer *ldx,
+ doublereal *ferr, doublereal *berr, doublereal *work, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2, i__3;
+ doublereal d__1, d__2, d__3;
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal s, xk;
+ integer nz;
+ doublereal eps;
+ integer kase;
+ doublereal safe1, safe2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+ integer isave[3];
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), daxpy_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *);
+ integer count;
+ extern /* Subroutine */ int dlacn2_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *), dgetrs_(
+ char *, integer *, integer *, doublereal *, integer *, integer *,
+ doublereal *, integer *, integer *);
+ logical notran;
+ char transt[1];
+ doublereal lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call DLACN2 in place of DLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGERFS improves the computed solution to a system of linear */
+/* equations and provides error bounds and backward error estimates for */
+/* the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose = Transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The original N-by-N matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input) DOUBLE PRECISION array, dimension (LDAF,N) */
+/* The factors L and U from the factorization A = P*L*U */
+/* as computed by DGETRF. */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices from DGETRF; for 1<=i<=N, row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) DOUBLE PRECISION array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by DGETRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "T") && ! lsame_(
+ trans, "C")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldx < max(1,*n)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGERFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - op(A) * X, */
+/* where op(A) = A, A**T, or A**H, depending on TRANS. */
+
+ dcopy_(n, &b[j * b_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+ dgemv_(trans, n, n, &c_b15, &a[a_offset], lda, &x[j * x_dim1 + 1], &
+ c__1, &c_b17, &work[*n + 1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] = (d__1 = b[i__ + j * b_dim1], abs(d__1));
+/* L30: */
+ }
+
+/* Compute abs(op(A))*abs(X) + abs(B). */
+
+ if (notran) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (d__1 = x[k + j * x_dim1], abs(d__1));
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ work[i__] += (d__1 = a[i__ + k * a_dim1], abs(d__1)) * xk;
+/* L40: */
+ }
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ s += (d__1 = a[i__ + k * a_dim1], abs(d__1)) * (d__2 = x[
+ i__ + j * x_dim1], abs(d__2));
+/* L60: */
+ }
+ work[k] += s;
+/* L70: */
+ }
+ }
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+/* Computing MAX */
+ d__2 = s, d__3 = (d__1 = work[*n + i__], abs(d__1)) / work[
+ i__];
+ s = max(d__2,d__3);
+ } else {
+/* Computing MAX */
+ d__2 = s, d__3 = ((d__1 = work[*n + i__], abs(d__1)) + safe1)
+ / (work[i__] + safe1);
+ s = max(d__2,d__3);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2. <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ dgetrs_(trans, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &work[*n
+ + 1], n, info);
+ daxpy_(n, &c_b17, &work[*n + 1], &c__1, &x[j * x_dim1 + 1], &c__1)
+ ;
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(op(A)))* */
+/* ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(op(A)) is the inverse of op(A) */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(op(A))*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use DLACN2 to estimate the infinity-norm of the matrix */
+/* inv(op(A)) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+ work[i__] = (d__1 = work[*n + i__], abs(d__1)) + nz * eps *
+ work[i__];
+ } else {
+ work[i__] = (d__1 = work[*n + i__], abs(d__1)) + nz * eps *
+ work[i__] + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ dlacn2_(n, &work[(*n << 1) + 1], &work[*n + 1], &iwork[1], &ferr[j], &
+ kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(op(A)**T). */
+
+ dgetrs_(transt, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &
+ work[*n + 1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L110: */
+ }
+ } else {
+
+/* Multiply by inv(op(A))*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L120: */
+ }
+ dgetrs_(trans, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &
+ work[*n + 1], n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = lstres, d__3 = (d__1 = x[i__ + j * x_dim1], abs(d__1));
+ lstres = max(d__2,d__3);
+/* L130: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of DGERFS */
+
+} /* dgerfs_ */
diff --git a/contrib/libs/clapack/dgerq2.c b/contrib/libs/clapack/dgerq2.c
new file mode 100644
index 0000000000..13ccabfa37
--- /dev/null
+++ b/contrib/libs/clapack/dgerq2.c
@@ -0,0 +1,155 @@
+/* dgerq2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dgerq2_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *tau, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, k;
+ doublereal aii;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *), dlarfp_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGERQ2 computes an RQ factorization of a real m by n matrix A: */
+/* A = R * Q. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the m by n matrix A. */
+/* On exit, if m <= n, the upper triangle of the subarray */
+/* A(1:m,n-m+1:n) contains the m by m upper triangular matrix R; */
+/* if m >= n, the elements on and above the (m-n)-th subdiagonal */
+/* contain the m by n upper trapezoidal matrix R; the remaining */
+/* elements, with the array TAU, represent the orthogonal matrix */
+/* Q as a product of elementary reflectors (see Further */
+/* Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (M) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(n-k+i+1:n) = 0 and v(n-k+i) = 1; v(1:n-k+i-1) is stored on exit in */
+/* A(m-k+i,1:n-k+i-1), and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGERQ2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ for (i__ = k; i__ >= 1; --i__) {
+
+/* Generate elementary reflector H(i) to annihilate */
+/* A(m-k+i,1:n-k+i-1) */
+
+ i__1 = *n - k + i__;
+ dlarfp_(&i__1, &a[*m - k + i__ + (*n - k + i__) * a_dim1], &a[*m - k
+ + i__ + a_dim1], lda, &tau[i__]);
+
+/* Apply H(i) to A(1:m-k+i-1,1:n-k+i) from the right */
+
+ aii = a[*m - k + i__ + (*n - k + i__) * a_dim1];
+ a[*m - k + i__ + (*n - k + i__) * a_dim1] = 1.;
+ i__1 = *m - k + i__ - 1;
+ i__2 = *n - k + i__;
+ dlarf_("Right", &i__1, &i__2, &a[*m - k + i__ + a_dim1], lda, &tau[
+ i__], &a[a_offset], lda, &work[1]);
+ a[*m - k + i__ + (*n - k + i__) * a_dim1] = aii;
+/* L10: */
+ }
+ return 0;
+
+/* End of DGERQ2 */
+
+} /* dgerq2_ */
diff --git a/contrib/libs/clapack/dgerqf.c b/contrib/libs/clapack/dgerqf.c
new file mode 100644
index 0000000000..1d78250978
--- /dev/null
+++ b/contrib/libs/clapack/dgerqf.c
@@ -0,0 +1,269 @@
+/* dgerqf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int dgerqf_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *tau, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, k, ib, nb, ki, kk, mu, nu, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int dgerq2_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *), dlarfb_(char *,
+ char *, char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGERQF computes an RQ factorization of a real M-by-N matrix A: */
+/* A = R * Q. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, */
+/* if m <= n, the upper triangle of the subarray */
+/* A(1:m,n-m+1:n) contains the M-by-M upper triangular matrix R; */
+/* if m >= n, the elements on and above the (m-n)-th subdiagonal */
+/* contain the M-by-N upper trapezoidal matrix R; */
+/* the remaining elements, with the array TAU, represent the */
+/* orthogonal matrix Q as a product of min(m,n) elementary */
+/* reflectors (see Further Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,M). */
+/* For optimum performance LWORK >= M*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(n-k+i+1:n) = 0 and v(n-k+i) = 1; v(1:n-k+i-1) is stored on exit in */
+/* A(m-k+i,1:n-k+i-1), and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+
+ if (*info == 0) {
+ k = min(*m,*n);
+ if (k == 0) {
+ lwkopt = 1;
+ } else {
+ nb = ilaenv_(&c__1, "DGERQF", " ", m, n, &c_n1, &c_n1);
+ lwkopt = *m * nb;
+ }
+ work[1] = (doublereal) lwkopt;
+
+ if (*lwork < max(1,*m) && ! lquery) {
+ *info = -7;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGERQF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (k == 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 1;
+ iws = *m;
+ if (nb > 1 && nb < k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "DGERQF", " ", m, n, &c_n1, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DGERQF", " ", m, n, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < k && nx < k) {
+
+/* Use blocked code initially. */
+/* The last kk rows are handled by the block method. */
+
+ ki = (k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+ i__1 = k - kk + 1;
+ i__2 = -nb;
+ for (i__ = k - kk + ki + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__
+ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/* Compute the RQ factorization of the current block */
+/* A(m-k+i:m-k+i+ib-1,1:n-k+i+ib-1) */
+
+ i__3 = *n - k + i__ + ib - 1;
+ dgerq2_(&ib, &i__3, &a[*m - k + i__ + a_dim1], lda, &tau[i__], &
+ work[1], &iinfo);
+ if (*m - k + i__ > 1) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__3 = *n - k + i__ + ib - 1;
+ dlarft_("Backward", "Rowwise", &i__3, &ib, &a[*m - k + i__ +
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(1:m-k+i-1,1:n-k+i+ib-1) from the right */
+
+ i__3 = *m - k + i__ - 1;
+ i__4 = *n - k + i__ + ib - 1;
+ dlarfb_("Right", "No transpose", "Backward", "Rowwise", &i__3,
+ &i__4, &ib, &a[*m - k + i__ + a_dim1], lda, &work[1],
+ &ldwork, &a[a_offset], lda, &work[ib + 1], &ldwork);
+ }
+/* L10: */
+ }
+ mu = *m - k + i__ + nb - 1;
+ nu = *n - k + i__ + nb - 1;
+ } else {
+ mu = *m;
+ nu = *n;
+ }
+
+/* Use unblocked code to factor the last or only block */
+
+ if (mu > 0 && nu > 0) {
+ dgerq2_(&mu, &nu, &a[a_offset], lda, &tau[1], &work[1], &iinfo);
+ }
+
+ work[1] = (doublereal) iws;
+ return 0;
+
+/* End of DGERQF */
+
+} /* dgerqf_ */
diff --git a/contrib/libs/clapack/dgesc2.c b/contrib/libs/clapack/dgesc2.c
new file mode 100644
index 0000000000..1f7163116b
--- /dev/null
+++ b/contrib/libs/clapack/dgesc2.c
@@ -0,0 +1,176 @@
+/* dgesc2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int dgesc2_(integer *n, doublereal *a, integer *lda,
+ doublereal *rhs, integer *ipiv, integer *jpiv, doublereal *scale)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal eps, temp;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dlabad_(doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ doublereal bignum;
+ extern /* Subroutine */ int dlaswp_(integer *, doublereal *, integer *,
+ integer *, integer *, integer *, integer *);
+ doublereal smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGESC2 solves a system of linear equations */
+
+/* A * X = scale* RHS */
+
+/* with a general N-by-N matrix A using the LU factorization with */
+/* complete pivoting computed by DGETC2. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the LU part of the factorization of the n-by-n */
+/* matrix A computed by DGETC2: A = P * L * U * Q */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1, N). */
+
+/* RHS (input/output) DOUBLE PRECISION array, dimension (N). */
+/* On entry, the right hand side vector b. */
+/* On exit, the solution vector X. */
+
+/* IPIV (input) INTEGER array, dimension (N). */
+/* The pivot indices; for 1 <= i <= N, row i of the */
+/* matrix has been interchanged with row IPIV(i). */
+
+/* JPIV (input) INTEGER array, dimension (N). */
+/* The pivot indices; for 1 <= j <= N, column j of the */
+/* matrix has been interchanged with column JPIV(j). */
+
+/* SCALE (output) DOUBLE PRECISION */
+/* On exit, SCALE contains the scale factor. SCALE is chosen */
+/* 0 <= SCALE <= 1 to prevent owerflow in the solution. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Set constant to control owerflow */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --rhs;
+ --ipiv;
+ --jpiv;
+
+ /* Function Body */
+ eps = dlamch_("P");
+ smlnum = dlamch_("S") / eps;
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+
+/* Apply permutations IPIV to RHS */
+
+ i__1 = *n - 1;
+ dlaswp_(&c__1, &rhs[1], lda, &c__1, &i__1, &ipiv[1], &c__1);
+
+/* Solve for L part */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ rhs[j] -= a[j + i__ * a_dim1] * rhs[i__];
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* Solve for U part */
+
+ *scale = 1.;
+
+/* Check for scaling */
+
+ i__ = idamax_(n, &rhs[1], &c__1);
+ if (smlnum * 2. * (d__1 = rhs[i__], abs(d__1)) > (d__2 = a[*n + *n *
+ a_dim1], abs(d__2))) {
+ temp = .5 / (d__1 = rhs[i__], abs(d__1));
+ dscal_(n, &temp, &rhs[1], &c__1);
+ *scale *= temp;
+ }
+
+ for (i__ = *n; i__ >= 1; --i__) {
+ temp = 1. / a[i__ + i__ * a_dim1];
+ rhs[i__] *= temp;
+ i__1 = *n;
+ for (j = i__ + 1; j <= i__1; ++j) {
+ rhs[i__] -= rhs[j] * (a[i__ + j * a_dim1] * temp);
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Apply permutations JPIV to the solution (RHS) */
+
+ i__1 = *n - 1;
+ dlaswp_(&c__1, &rhs[1], lda, &c__1, &i__1, &jpiv[1], &c_n1);
+ return 0;
+
+/* End of DGESC2 */
+
+} /* dgesc2_ */
diff --git a/contrib/libs/clapack/dgesdd.c b/contrib/libs/clapack/dgesdd.c
new file mode 100644
index 0000000000..9a1283018a
--- /dev/null
+++ b/contrib/libs/clapack/dgesdd.c
@@ -0,0 +1,1609 @@
+/* dgesdd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__0 = 0;
+static doublereal c_b227 = 0.;
+static doublereal c_b248 = 1.;
+
+/* Subroutine */ int dgesdd_(char *jobz, integer *m, integer *n, doublereal *
+ a, integer *lda, doublereal *s, doublereal *u, integer *ldu,
+ doublereal *vt, integer *ldvt, doublereal *work, integer *lwork,
+ integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1,
+ i__2, i__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, ie, il, ir, iu, blk;
+ doublereal dum[1], eps;
+ integer ivt, iscl;
+ doublereal anrm;
+ integer idum[1], ierr, itau;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ integer chunk, minmn, wrkbl, itaup, itauq, mnthr;
+ logical wntqa;
+ integer nwork;
+ logical wntqn, wntqo, wntqs;
+ extern /* Subroutine */ int dbdsdc_(char *, char *, integer *, doublereal
+ *, doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *, integer *), dgebrd_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *), dlange_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *);
+ integer bdspac;
+ extern /* Subroutine */ int dgelqf_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *),
+ dlascl_(char *, integer *, integer *, doublereal *, doublereal *,
+ integer *, integer *, doublereal *, integer *, integer *),
+ dgeqrf_(integer *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *, integer *), dlacpy_(char *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ integer *), dlaset_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *),
+ xerbla_(char *, integer *), dorgbr_(char *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ doublereal bignum;
+ extern /* Subroutine */ int dormbr_(char *, char *, char *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *), dorglq_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *), dorgqr_(integer *, integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *);
+ integer ldwrkl, ldwrkr, minwrk, ldwrku, maxwrk, ldwkvt;
+ doublereal smlnum;
+ logical wntqas, lquery;
+
+
+/* -- LAPACK driver routine (version 3.2.1) -- */
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+/* March 2009 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGESDD computes the singular value decomposition (SVD) of a real */
+/* M-by-N matrix A, optionally computing the left and right singular */
+/* vectors. If singular vectors are desired, it uses a */
+/* divide-and-conquer algorithm. */
+
+/* The SVD is written */
+
+/* A = U * SIGMA * transpose(V) */
+
+/* where SIGMA is an M-by-N matrix which is zero except for its */
+/* min(m,n) diagonal elements, U is an M-by-M orthogonal matrix, and */
+/* V is an N-by-N orthogonal matrix. The diagonal elements of SIGMA */
+/* are the singular values of A; they are real and non-negative, and */
+/* are returned in descending order. The first min(m,n) columns of */
+/* U and V are the left and right singular vectors of A. */
+
+/* Note that the routine returns VT = V**T, not V. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* Specifies options for computing all or part of the matrix U: */
+/* = 'A': all M columns of U and all N rows of V**T are */
+/* returned in the arrays U and VT; */
+/* = 'S': the first min(M,N) columns of U and the first */
+/* min(M,N) rows of V**T are returned in the arrays U */
+/* and VT; */
+/* = 'O': If M >= N, the first N columns of U are overwritten */
+/* on the array A and all rows of V**T are returned in */
+/* the array VT; */
+/* otherwise, all columns of U are returned in the */
+/* array U and the first M rows of V**T are overwritten */
+/* in the array A; */
+/* = 'N': no columns of U or rows of V**T are computed. */
+
+/* M (input) INTEGER */
+/* The number of rows of the input matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the input matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, */
+/* if JOBZ = 'O', A is overwritten with the first N columns */
+/* of U (the left singular vectors, stored */
+/* columnwise) if M >= N; */
+/* A is overwritten with the first M rows */
+/* of V**T (the right singular vectors, stored */
+/* rowwise) otherwise. */
+/* if JOBZ .ne. 'O', the contents of A are destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* S (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The singular values of A, sorted so that S(i) >= S(i+1). */
+
+/* U (output) DOUBLE PRECISION array, dimension (LDU,UCOL) */
+/* UCOL = M if JOBZ = 'A' or JOBZ = 'O' and M < N; */
+/* UCOL = min(M,N) if JOBZ = 'S'. */
+/* If JOBZ = 'A' or JOBZ = 'O' and M < N, U contains the M-by-M */
+/* orthogonal matrix U; */
+/* if JOBZ = 'S', U contains the first min(M,N) columns of U */
+/* (the left singular vectors, stored columnwise); */
+/* if JOBZ = 'O' and M >= N, or JOBZ = 'N', U is not referenced. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= 1; if */
+/* JOBZ = 'S' or 'A' or JOBZ = 'O' and M < N, LDU >= M. */
+
+/* VT (output) DOUBLE PRECISION array, dimension (LDVT,N) */
+/* If JOBZ = 'A' or JOBZ = 'O' and M >= N, VT contains the */
+/* N-by-N orthogonal matrix V**T; */
+/* if JOBZ = 'S', VT contains the first min(M,N) rows of */
+/* V**T (the right singular vectors, stored rowwise); */
+/* if JOBZ = 'O' and M < N, or JOBZ = 'N', VT is not referenced. */
+
+/* LDVT (input) INTEGER */
+/* The leading dimension of the array VT. LDVT >= 1; if */
+/* JOBZ = 'A' or JOBZ = 'O' and M >= N, LDVT >= N; */
+/* if JOBZ = 'S', LDVT >= min(M,N). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK; */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= 1. */
+/* If JOBZ = 'N', */
+/* LWORK >= 3*min(M,N) + max(max(M,N),7*min(M,N)). */
+/* If JOBZ = 'O', */
+/* LWORK >= 3*min(M,N) + */
+/* max(max(M,N),5*min(M,N)*min(M,N)+4*min(M,N)). */
+/* If JOBZ = 'S' or 'A' */
+/* LWORK >= 3*min(M,N) + */
+/* max(max(M,N),4*min(M,N)*min(M,N)+4*min(M,N)). */
+/* For good performance, LWORK should generally be larger. */
+/* If LWORK = -1 but other input arguments are legal, WORK(1) */
+/* returns the optimal LWORK. */
+
+/* IWORK (workspace) INTEGER array, dimension (8*min(M,N)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: DBDSDC did not converge, updating process failed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ wntqa = lsame_(jobz, "A");
+ wntqs = lsame_(jobz, "S");
+ wntqas = wntqa || wntqs;
+ wntqo = lsame_(jobz, "O");
+ wntqn = lsame_(jobz, "N");
+ lquery = *lwork == -1;
+
+ if (! (wntqa || wntqs || wntqo || wntqn)) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldu < 1 || wntqas && *ldu < *m || wntqo && *m < *n && *ldu < *
+ m) {
+ *info = -8;
+ } else if (*ldvt < 1 || wntqa && *ldvt < *n || wntqs && *ldvt < minmn ||
+ wntqo && *m >= *n && *ldvt < *n) {
+ *info = -10;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV.) */
+
+ if (*info == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ if (*m >= *n && minmn > 0) {
+
+/* Compute space needed for DBDSDC */
+
+ mnthr = (integer) (minmn * 11. / 6.);
+ if (wntqn) {
+ bdspac = *n * 7;
+ } else {
+ bdspac = *n * 3 * *n + (*n << 2);
+ }
+ if (*m >= mnthr) {
+ if (wntqn) {
+
+/* Path 1 (M much larger than N, JOBZ='N') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n;
+ maxwrk = max(i__1,i__2);
+ minwrk = bdspac + *n;
+ } else if (wntqo) {
+
+/* Path 2 (M much larger than N, JOBZ='O') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "DORGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+, "QLN", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+, "PRT", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + (*n << 1) * *n;
+ minwrk = bdspac + (*n << 1) * *n + *n * 3;
+ } else if (wntqs) {
+
+/* Path 3 (M much larger than N, JOBZ='S') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "DORGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+, "QLN", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+, "PRT", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *n * *n;
+ minwrk = bdspac + *n * *n + *n * 3;
+ } else if (wntqa) {
+
+/* Path 4 (M much larger than N, JOBZ='A') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *m * ilaenv_(&c__1, "DORGQR",
+ " ", m, m, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+, "QLN", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+, "PRT", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *n * *n;
+ minwrk = bdspac + *n * *n + *n * 3;
+ }
+ } else {
+
+/* Path 5 (M at least N, but not much larger) */
+
+ wrkbl = *n * 3 + (*m + *n) * ilaenv_(&c__1, "DGEBRD", " ", m,
+ n, &c_n1, &c_n1);
+ if (wntqn) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *n * 3 + max(*m,bdspac);
+ } else if (wntqo) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+, "QLN", m, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+, "PRT", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *m * *n;
+/* Computing MAX */
+ i__1 = *m, i__2 = *n * *n + bdspac;
+ minwrk = *n * 3 + max(i__1,i__2);
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+, "QLN", m, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+, "PRT", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *n * 3 + max(*m,bdspac);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *m * ilaenv_(&c__1, "DORMBR"
+, "QLN", m, m, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+, "PRT", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = bdspac + *n * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *n * 3 + max(*m,bdspac);
+ }
+ }
+ } else if (minmn > 0) {
+
+/* Compute space needed for DBDSDC */
+
+ mnthr = (integer) (minmn * 11. / 6.);
+ if (wntqn) {
+ bdspac = *m * 7;
+ } else {
+ bdspac = *m * 3 * *m + (*m << 2);
+ }
+ if (*n >= mnthr) {
+ if (wntqn) {
+
+/* Path 1t (N much larger than M, JOBZ='N') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m;
+ maxwrk = max(i__1,i__2);
+ minwrk = bdspac + *m;
+ } else if (wntqo) {
+
+/* Path 2t (N much larger than M, JOBZ='O') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "DORGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+, "QLN", m, m, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+, "PRT", m, m, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + (*m << 1) * *m;
+ minwrk = bdspac + (*m << 1) * *m + *m * 3;
+ } else if (wntqs) {
+
+/* Path 3t (N much larger than M, JOBZ='S') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "DORGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+, "QLN", m, m, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+, "PRT", m, m, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *m * *m;
+ minwrk = bdspac + *m * *m + *m * 3;
+ } else if (wntqa) {
+
+/* Path 4t (N much larger than M, JOBZ='A') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *n * ilaenv_(&c__1, "DORGLQ",
+ " ", n, n, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+, "QLN", m, m, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+, "PRT", m, m, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *m * *m;
+ minwrk = bdspac + *m * *m + *m * 3;
+ }
+ } else {
+
+/* Path 5t (N greater than M, but not much larger) */
+
+ wrkbl = *m * 3 + (*m + *n) * ilaenv_(&c__1, "DGEBRD", " ", m,
+ n, &c_n1, &c_n1);
+ if (wntqn) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *m * 3 + max(*n,bdspac);
+ } else if (wntqo) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+, "QLN", m, m, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+, "PRT", m, n, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *m * *n;
+/* Computing MAX */
+ i__1 = *n, i__2 = *m * *m + bdspac;
+ minwrk = *m * 3 + max(i__1,i__2);
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+, "QLN", m, m, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+, "PRT", m, n, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *m * 3 + max(*n,bdspac);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+, "QLN", m, m, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+, "PRT", n, n, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *m * 3 + max(*n,bdspac);
+ }
+ }
+ }
+ maxwrk = max(maxwrk,minwrk);
+ work[1] = (doublereal) maxwrk;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGESDD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("P");
+ smlnum = sqrt(dlamch_("S")) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = dlange_("M", m, n, &a[a_offset], lda, dum);
+ iscl = 0;
+ if (anrm > 0. && anrm < smlnum) {
+ iscl = 1;
+ dlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, &
+ ierr);
+ } else if (anrm > bignum) {
+ iscl = 1;
+ dlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+ if (*m >= *n) {
+
+/* A has at least as many rows as columns. If A has sufficiently */
+/* more rows than columns, first reduce using the QR */
+/* decomposition (if sufficient workspace available) */
+
+ if (*m >= mnthr) {
+
+ if (wntqn) {
+
+/* Path 1 (M much larger than N, JOBZ='N') */
+/* No singular vectors to be computed */
+
+ itau = 1;
+ nwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Zero out below R */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ dlaset_("L", &i__1, &i__2, &c_b227, &c_b227, &a[a_dim1 + 2],
+ lda);
+ ie = 1;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/* Bidiagonalize R in A */
+/* (Workspace: need 4*N, prefer 3*N+2*N*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+ nwork = ie + *n;
+
+/* Perform bidiagonal SVD, computing singular values only */
+/* (Workspace: need N+BDSPAC) */
+
+ dbdsdc_("U", "N", n, &s[1], &work[ie], dum, &c__1, dum, &c__1,
+ dum, idum, &work[nwork], &iwork[1], info);
+
+ } else if (wntqo) {
+
+/* Path 2 (M much larger than N, JOBZ = 'O') */
+/* N left singular vectors to be overwritten on A and */
+/* N right singular vectors to be computed in VT */
+
+ ir = 1;
+
+/* WORK(IR) is LDWRKR by N */
+
+ if (*lwork >= *lda * *n + *n * *n + *n * 3 + bdspac) {
+ ldwrkr = *lda;
+ } else {
+ ldwrkr = (*lwork - *n * *n - *n * 3 - bdspac) / *n;
+ }
+ itau = ir + ldwrkr * *n;
+ nwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Copy R to WORK(IR), zeroing out below it */
+
+ dlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ dlaset_("L", &i__1, &i__2, &c_b227, &c_b227, &work[ir + 1], &
+ ldwrkr);
+
+/* Generate Q in A */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__1, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/* Bidiagonalize R in VT, copying result to WORK(IR) */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+
+/* WORK(IU) is N by N */
+
+ iu = nwork;
+ nwork = iu + *n * *n;
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in WORK(IU) and computing right */
+/* singular vectors of bidiagonal matrix in VT */
+/* (Workspace: need N+N*N+BDSPAC) */
+
+ dbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], n, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/* Overwrite WORK(IU) by left singular vectors of R */
+/* and VT by right singular vectors of R */
+/* (Workspace: need 2*N*N+3*N, prefer 2*N*N+2*N+N*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[
+ itauq], &work[iu], n, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", n, n, n, &work[ir], &ldwrkr, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IU), storing result in WORK(IR) and copying to A */
+/* (Workspace: need 2*N*N, prefer N*N+M*N) */
+
+ i__1 = *m;
+ i__2 = ldwrkr;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ chunk = min(i__3,ldwrkr);
+ dgemm_("N", "N", &chunk, n, n, &c_b248, &a[i__ + a_dim1],
+ lda, &work[iu], n, &c_b227, &work[ir], &ldwrkr);
+ dlacpy_("F", &chunk, n, &work[ir], &ldwrkr, &a[i__ +
+ a_dim1], lda);
+/* L10: */
+ }
+
+ } else if (wntqs) {
+
+/* Path 3 (M much larger than N, JOBZ='S') */
+/* N left singular vectors to be computed in U and */
+/* N right singular vectors to be computed in VT */
+
+ ir = 1;
+
+/* WORK(IR) is N by N */
+
+ ldwrkr = *n;
+ itau = ir + ldwrkr * *n;
+ nwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+
+/* Copy R to WORK(IR), zeroing out below it */
+
+ dlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__2 = *n - 1;
+ i__1 = *n - 1;
+ dlaset_("L", &i__2, &i__1, &c_b227, &c_b227, &work[ir + 1], &
+ ldwrkr);
+
+/* Generate Q in A */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ dorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IR) */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ dgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagoal matrix in U and computing right singular */
+/* vectors of bidiagonal matrix in VT */
+/* (Workspace: need N+BDSPAC) */
+
+ dbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/* Overwrite U by left singular vectors of R and VT */
+/* by right singular vectors of R */
+/* (Workspace: need N*N+3*N, prefer N*N+2*N+N*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", n, n, n, &work[ir], &ldwrkr, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IR), storing result in U */
+/* (Workspace: need N*N) */
+
+ dlacpy_("F", n, n, &u[u_offset], ldu, &work[ir], &ldwrkr);
+ dgemm_("N", "N", m, n, n, &c_b248, &a[a_offset], lda, &work[
+ ir], &ldwrkr, &c_b227, &u[u_offset], ldu);
+
+ } else if (wntqa) {
+
+/* Path 4 (M much larger than N, JOBZ='A') */
+/* M left singular vectors to be computed in U and */
+/* N right singular vectors to be computed in VT */
+
+ iu = 1;
+
+/* WORK(IU) is N by N */
+
+ ldwrku = *n;
+ itau = iu + ldwrku * *n;
+ nwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+ dlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+
+/* Generate Q in U */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+ i__2 = *lwork - nwork + 1;
+ dorgqr_(m, m, n, &u[u_offset], ldu, &work[itau], &work[nwork],
+ &i__2, &ierr);
+
+/* Produce R in A, zeroing out other entries */
+
+ i__2 = *n - 1;
+ i__1 = *n - 1;
+ dlaset_("L", &i__2, &i__1, &c_b227, &c_b227, &a[a_dim1 + 2],
+ lda);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/* Bidiagonalize R in A */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ dgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in WORK(IU) and computing right */
+/* singular vectors of bidiagonal matrix in VT */
+/* (Workspace: need N+N*N+BDSPAC) */
+
+ dbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], n, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/* Overwrite WORK(IU) by left singular vectors of R and VT */
+/* by right singular vectors of R */
+/* (Workspace: need N*N+3*N, prefer N*N+2*N+N*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", n, n, n, &a[a_offset], lda, &work[
+ itauq], &work[iu], &ldwrku, &work[nwork], &i__2, &
+ ierr);
+ i__2 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+/* Multiply Q in U by left singular vectors of R in */
+/* WORK(IU), storing result in A */
+/* (Workspace: need N*N) */
+
+ dgemm_("N", "N", m, n, n, &c_b248, &u[u_offset], ldu, &work[
+ iu], &ldwrku, &c_b227, &a[a_offset], lda);
+
+/* Copy left singular vectors of A from A to U */
+
+ dlacpy_("F", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+
+ }
+
+ } else {
+
+/* M .LT. MNTHR */
+
+/* Path 5 (M at least N, but not much larger) */
+/* Reduce to bidiagonal form without QR decomposition */
+
+ ie = 1;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/* Bidiagonalize A */
+/* (Workspace: need 3*N+M, prefer 3*N+(M+N)*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__2, &ierr);
+ if (wntqn) {
+
+/* Perform bidiagonal SVD, only computing singular values */
+/* (Workspace: need N+BDSPAC) */
+
+ dbdsdc_("U", "N", n, &s[1], &work[ie], dum, &c__1, dum, &c__1,
+ dum, idum, &work[nwork], &iwork[1], info);
+ } else if (wntqo) {
+ iu = nwork;
+ if (*lwork >= *m * *n + *n * 3 + bdspac) {
+
+/* WORK( IU ) is M by N */
+
+ ldwrku = *m;
+ nwork = iu + ldwrku * *n;
+ dlaset_("F", m, n, &c_b227, &c_b227, &work[iu], &ldwrku);
+ } else {
+
+/* WORK( IU ) is N by N */
+
+ ldwrku = *n;
+ nwork = iu + ldwrku * *n;
+
+/* WORK(IR) is LDWRKR by N */
+
+ ir = nwork;
+ ldwrkr = (*lwork - *n * *n - *n * 3) / *n;
+ }
+ nwork = iu + ldwrku * *n;
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in WORK(IU) and computing right */
+/* singular vectors of bidiagonal matrix in VT */
+/* (Workspace: need N+N*N+BDSPAC) */
+
+ dbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], &ldwrku, &
+ vt[vt_offset], ldvt, dum, idum, &work[nwork], &iwork[
+ 1], info);
+
+/* Overwrite VT by right singular vectors of A */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+ if (*lwork >= *m * *n + *n * 3 + bdspac) {
+
+/* Overwrite WORK(IU) by left singular vectors of A */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[
+ itauq], &work[iu], &ldwrku, &work[nwork], &i__2, &
+ ierr);
+
+/* Copy left singular vectors of A from WORK(IU) to A */
+
+ dlacpy_("F", m, n, &work[iu], &ldwrku, &a[a_offset], lda);
+ } else {
+
+/* Generate Q in A */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ dorgbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], &
+ work[nwork], &i__2, &ierr);
+
+/* Multiply Q in A by left singular vectors of */
+/* bidiagonal matrix in WORK(IU), storing result in */
+/* WORK(IR) and copying to A */
+/* (Workspace: need 2*N*N, prefer N*N+M*N) */
+
+ i__2 = *m;
+ i__1 = ldwrkr;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ chunk = min(i__3,ldwrkr);
+ dgemm_("N", "N", &chunk, n, n, &c_b248, &a[i__ +
+ a_dim1], lda, &work[iu], &ldwrku, &c_b227, &
+ work[ir], &ldwrkr);
+ dlacpy_("F", &chunk, n, &work[ir], &ldwrkr, &a[i__ +
+ a_dim1], lda);
+/* L20: */
+ }
+ }
+
+ } else if (wntqs) {
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in U and computing right singular */
+/* vectors of bidiagonal matrix in VT */
+/* (Workspace: need N+BDSPAC) */
+
+ dlaset_("F", m, n, &c_b227, &c_b227, &u[u_offset], ldu);
+ dbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/* Overwrite U by left singular vectors of A and VT */
+/* by right singular vectors of A */
+/* (Workspace: need 3*N, prefer 2*N+N*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ } else if (wntqa) {
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in U and computing right singular */
+/* vectors of bidiagonal matrix in VT */
+/* (Workspace: need N+BDSPAC) */
+
+ dlaset_("F", m, m, &c_b227, &c_b227, &u[u_offset], ldu);
+ dbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/* Set the right corner of U to identity matrix */
+
+ if (*m > *n) {
+ i__1 = *m - *n;
+ i__2 = *m - *n;
+ dlaset_("F", &i__1, &i__2, &c_b227, &c_b248, &u[*n + 1 + (
+ *n + 1) * u_dim1], ldu);
+ }
+
+/* Overwrite U by left singular vectors of A and VT */
+/* by right singular vectors of A */
+/* (Workspace: need N*N+2*N+M, prefer N*N+2*N+M*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", n, n, m, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ }
+
+ }
+
+ } else {
+
+/* A has more columns than rows. If A has sufficiently more */
+/* columns than rows, first reduce using the LQ decomposition (if */
+/* sufficient workspace available) */
+
+ if (*n >= mnthr) {
+
+ if (wntqn) {
+
+/* Path 1t (N much larger than M, JOBZ='N') */
+/* No singular vectors to be computed */
+
+ itau = 1;
+ nwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Zero out above L */
+
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ dlaset_("U", &i__1, &i__2, &c_b227, &c_b227, &a[(a_dim1 << 1)
+ + 1], lda);
+ ie = 1;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize L in A */
+/* (Workspace: need 4*M, prefer 3*M+2*M*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+ nwork = ie + *m;
+
+/* Perform bidiagonal SVD, computing singular values only */
+/* (Workspace: need M+BDSPAC) */
+
+ dbdsdc_("U", "N", m, &s[1], &work[ie], dum, &c__1, dum, &c__1,
+ dum, idum, &work[nwork], &iwork[1], info);
+
+ } else if (wntqo) {
+
+/* Path 2t (N much larger than M, JOBZ='O') */
+/* M right singular vectors to be overwritten on A and */
+/* M left singular vectors to be computed in U */
+
+ ivt = 1;
+
+/* IVT is M by M */
+
+ il = ivt + *m * *m;
+ if (*lwork >= *m * *n + *m * *m + *m * 3 + bdspac) {
+
+/* WORK(IL) is M by N */
+
+ ldwrkl = *m;
+ chunk = *n;
+ } else {
+ ldwrkl = *m;
+ chunk = (*lwork - *m * *m) / *m;
+ }
+ itau = il + ldwrkl * *m;
+ nwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Copy L to WORK(IL), zeroing about above it */
+
+ dlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ dlaset_("U", &i__1, &i__2, &c_b227, &c_b227, &work[il +
+ ldwrkl], &ldwrkl);
+
+/* Generate Q in A */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__1, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IL) */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dgebrd_(m, m, &work[il], &ldwrkl, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in U, and computing right singular */
+/* vectors of bidiagonal matrix in WORK(IVT) */
+/* (Workspace: need M+M*M+BDSPAC) */
+
+ dbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &
+ work[ivt], m, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/* Overwrite U by left singular vectors of L and WORK(IVT) */
+/* by right singular vectors of L */
+/* (Workspace: need 2*M*M+3*M, prefer 2*M*M+2*M+M*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", m, m, m, &work[il], &ldwrkl, &work[
+ itaup], &work[ivt], m, &work[nwork], &i__1, &ierr);
+
+/* Multiply right singular vectors of L in WORK(IVT) by Q */
+/* in A, storing result in WORK(IL) and copying to A */
+/* (Workspace: need 2*M*M, prefer M*M+M*N) */
+
+ i__1 = *n;
+ i__2 = chunk;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *n - i__ + 1;
+ blk = min(i__3,chunk);
+ dgemm_("N", "N", m, &blk, m, &c_b248, &work[ivt], m, &a[
+ i__ * a_dim1 + 1], lda, &c_b227, &work[il], &
+ ldwrkl);
+ dlacpy_("F", m, &blk, &work[il], &ldwrkl, &a[i__ * a_dim1
+ + 1], lda);
+/* L30: */
+ }
+
+ } else if (wntqs) {
+
+/* Path 3t (N much larger than M, JOBZ='S') */
+/* M right singular vectors to be computed in VT and */
+/* M left singular vectors to be computed in U */
+
+ il = 1;
+
+/* WORK(IL) is M by M */
+
+ ldwrkl = *m;
+ itau = il + ldwrkl * *m;
+ nwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+
+/* Copy L to WORK(IL), zeroing out above it */
+
+ dlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl);
+ i__2 = *m - 1;
+ i__1 = *m - 1;
+ dlaset_("U", &i__2, &i__1, &c_b227, &c_b227, &work[il +
+ ldwrkl], &ldwrkl);
+
+/* Generate Q in A */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ dorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IU), copying result to U */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ dgebrd_(m, m, &work[il], &ldwrkl, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in U and computing right singular */
+/* vectors of bidiagonal matrix in VT */
+/* (Workspace: need M+BDSPAC) */
+
+ dbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/* Overwrite U by left singular vectors of L and VT */
+/* by right singular vectors of L */
+/* (Workspace: need M*M+3*M, prefer M*M+2*M+M*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+ i__2 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", m, m, m, &work[il], &ldwrkl, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+/* Multiply right singular vectors of L in WORK(IL) by */
+/* Q in A, storing result in VT */
+/* (Workspace: need M*M) */
+
+ dlacpy_("F", m, m, &vt[vt_offset], ldvt, &work[il], &ldwrkl);
+ dgemm_("N", "N", m, n, m, &c_b248, &work[il], &ldwrkl, &a[
+ a_offset], lda, &c_b227, &vt[vt_offset], ldvt);
+
+ } else if (wntqa) {
+
+/* Path 4t (N much larger than M, JOBZ='A') */
+/* N right singular vectors to be computed in VT and */
+/* M left singular vectors to be computed in U */
+
+ ivt = 1;
+
+/* WORK(IVT) is M by M */
+
+ ldwkvt = *m;
+ itau = ivt + ldwkvt * *m;
+ nwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+ dlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+/* Generate Q in VT */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ dorglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &work[
+ nwork], &i__2, &ierr);
+
+/* Produce L in A, zeroing out other entries */
+
+ i__2 = *m - 1;
+ i__1 = *m - 1;
+ dlaset_("U", &i__2, &i__1, &c_b227, &c_b227, &a[(a_dim1 << 1)
+ + 1], lda);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize L in A */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ dgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in U and computing right singular */
+/* vectors of bidiagonal matrix in WORK(IVT) */
+/* (Workspace: need M+M*M+BDSPAC) */
+
+ dbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &
+ work[ivt], &ldwkvt, dum, idum, &work[nwork], &iwork[1]
+, info);
+
+/* Overwrite U by left singular vectors of L and WORK(IVT) */
+/* by right singular vectors of L */
+/* (Workspace: need M*M+3*M, prefer M*M+2*M+M*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, m, m, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+ i__2 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", m, m, m, &a[a_offset], lda, &work[
+ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2, &
+ ierr);
+
+/* Multiply right singular vectors of L in WORK(IVT) by */
+/* Q in VT, storing result in A */
+/* (Workspace: need M*M) */
+
+ dgemm_("N", "N", m, n, m, &c_b248, &work[ivt], &ldwkvt, &vt[
+ vt_offset], ldvt, &c_b227, &a[a_offset], lda);
+
+/* Copy right singular vectors of A from A to VT */
+
+ dlacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+ }
+
+ } else {
+
+/* N .LT. MNTHR */
+
+/* Path 5t (N greater than M, but not much larger) */
+/* Reduce to bidiagonal form without LQ decomposition */
+
+ ie = 1;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize A */
+/* (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__2, &ierr);
+ if (wntqn) {
+
+/* Perform bidiagonal SVD, only computing singular values */
+/* (Workspace: need M+BDSPAC) */
+
+ dbdsdc_("L", "N", m, &s[1], &work[ie], dum, &c__1, dum, &c__1,
+ dum, idum, &work[nwork], &iwork[1], info);
+ } else if (wntqo) {
+ ldwkvt = *m;
+ ivt = nwork;
+ if (*lwork >= *m * *n + *m * 3 + bdspac) {
+
+/* WORK( IVT ) is M by N */
+
+ dlaset_("F", m, n, &c_b227, &c_b227, &work[ivt], &ldwkvt);
+ nwork = ivt + ldwkvt * *n;
+ } else {
+
+/* WORK( IVT ) is M by M */
+
+ nwork = ivt + ldwkvt * *m;
+ il = nwork;
+
+/* WORK(IL) is M by CHUNK */
+
+ chunk = (*lwork - *m * *m - *m * 3) / *m;
+ }
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in U and computing right singular */
+/* vectors of bidiagonal matrix in WORK(IVT) */
+/* (Workspace: need M*M+BDSPAC) */
+
+ dbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &
+ work[ivt], &ldwkvt, dum, idum, &work[nwork], &iwork[1]
+, info);
+
+/* Overwrite U by left singular vectors of A */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+ if (*lwork >= *m * *n + *m * 3 + bdspac) {
+
+/* Overwrite WORK(IVT) by left singular vectors of A */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", m, n, m, &a[a_offset], lda, &work[
+ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2,
+ &ierr);
+
+/* Copy right singular vectors of A from WORK(IVT) to A */
+
+ dlacpy_("F", m, n, &work[ivt], &ldwkvt, &a[a_offset], lda);
+ } else {
+
+/* Generate P**T in A */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ dorgbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &
+ work[nwork], &i__2, &ierr);
+
+/* Multiply Q in A by right singular vectors of */
+/* bidiagonal matrix in WORK(IVT), storing result in */
+/* WORK(IL) and copying to A */
+/* (Workspace: need 2*M*M, prefer M*M+M*N) */
+
+ i__2 = *n;
+ i__1 = chunk;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *n - i__ + 1;
+ blk = min(i__3,chunk);
+ dgemm_("N", "N", m, &blk, m, &c_b248, &work[ivt], &
+ ldwkvt, &a[i__ * a_dim1 + 1], lda, &c_b227, &
+ work[il], m);
+ dlacpy_("F", m, &blk, &work[il], m, &a[i__ * a_dim1 +
+ 1], lda);
+/* L40: */
+ }
+ }
+ } else if (wntqs) {
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in U and computing right singular */
+/* vectors of bidiagonal matrix in VT */
+/* (Workspace: need M+BDSPAC) */
+
+ dlaset_("F", m, n, &c_b227, &c_b227, &vt[vt_offset], ldvt);
+ dbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/* Overwrite U by left singular vectors of A and VT */
+/* by right singular vectors of A */
+/* (Workspace: need 3*M, prefer 2*M+M*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", m, n, m, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ } else if (wntqa) {
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in U and computing right singular */
+/* vectors of bidiagonal matrix in VT */
+/* (Workspace: need M+BDSPAC) */
+
+ dlaset_("F", n, n, &c_b227, &c_b227, &vt[vt_offset], ldvt);
+ dbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/* Set the right corner of VT to identity matrix */
+
+ if (*n > *m) {
+ i__1 = *n - *m;
+ i__2 = *n - *m;
+ dlaset_("F", &i__1, &i__2, &c_b227, &c_b248, &vt[*m + 1 +
+ (*m + 1) * vt_dim1], ldvt);
+ }
+
+/* Overwrite U by left singular vectors of A and VT */
+/* by right singular vectors of A */
+/* (Workspace: need 2*M+N, prefer 2*M+N*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", n, n, m, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ }
+
+ }
+
+ }
+
+/* Undo scaling if necessary */
+
+ if (iscl == 1) {
+ if (anrm > bignum) {
+ dlascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ if (anrm < smlnum) {
+ dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ }
+
+/* Return optimal workspace in WORK(1) */
+
+ work[1] = (doublereal) maxwrk;
+
+ return 0;
+
+/* End of DGESDD */
+
+} /* dgesdd_ */
diff --git a/contrib/libs/clapack/dgesv.c b/contrib/libs/clapack/dgesv.c
new file mode 100644
index 0000000000..c44d06ff0d
--- /dev/null
+++ b/contrib/libs/clapack/dgesv.c
@@ -0,0 +1,138 @@
+/* dgesv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dgesv_(integer *n, integer *nrhs, doublereal *a, integer
+ *lda, integer *ipiv, doublereal *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int dgetrf_(integer *, integer *, doublereal *,
+ integer *, integer *, integer *), xerbla_(char *, integer *), dgetrs_(char *, integer *, integer *, doublereal *,
+ integer *, integer *, doublereal *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGESV computes the solution to a real system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N matrix and X and B are N-by-NRHS matrices. */
+
+/* The LU decomposition with partial pivoting and row interchanges is */
+/* used to factor A as */
+/* A = P * L * U, */
+/* where P is a permutation matrix, L is unit lower triangular, and U is */
+/* upper triangular. The factored form of A is then used to solve the */
+/* system of equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the N-by-N coefficient matrix A. */
+/* On exit, the factors L and U from the factorization */
+/* A = P*L*U; the unit diagonal elements of L are not stored. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* The pivot indices that define the permutation matrix P; */
+/* row i of the matrix was interchanged with row IPIV(i). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS matrix of right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, U(i,i) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, so the solution could not be computed. */
+
+/* ===================================================================== */
+
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*nrhs < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGESV ", &i__1);
+ return 0;
+ }
+
+/* Compute the LU factorization of A. */
+
+ dgetrf_(n, n, &a[a_offset], lda, &ipiv[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ dgetrs_("No transpose", n, nrhs, &a[a_offset], lda, &ipiv[1], &b[
+ b_offset], ldb, info);
+ }
+ return 0;
+
+/* End of DGESV */
+
+} /* dgesv_ */
diff --git a/contrib/libs/clapack/dgesvd.c b/contrib/libs/clapack/dgesvd.c
new file mode 100644
index 0000000000..a667033d2d
--- /dev/null
+++ b/contrib/libs/clapack/dgesvd.c
@@ -0,0 +1,4050 @@
+/* dgesvd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__6 = 6;
+static integer c__0 = 0;
+static integer c__2 = 2;
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static doublereal c_b421 = 0.;
+static doublereal c_b443 = 1.;
+
+/* Subroutine */ int dgesvd_(char *jobu, char *jobvt, integer *m, integer *n,
+ doublereal *a, integer *lda, doublereal *s, doublereal *u, integer *
+ ldu, doublereal *vt, integer *ldvt, doublereal *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1[2],
+ i__2, i__3, i__4;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, ie, ir, iu, blk, ncu;
+ doublereal dum[1], eps;
+ integer nru, iscl;
+ doublereal anrm;
+ integer ierr, itau, ncvt, nrvt;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ integer chunk, minmn, wrkbl, itaup, itauq, mnthr, iwork;
+ logical wntua, wntva, wntun, wntuo, wntvn, wntvo, wntus, wntvs;
+ extern /* Subroutine */ int dgebrd_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *), dlange_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *);
+ integer bdspac;
+ extern /* Subroutine */ int dgelqf_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *),
+ dlascl_(char *, integer *, integer *, doublereal *, doublereal *,
+ integer *, integer *, doublereal *, integer *, integer *),
+ dgeqrf_(integer *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *, integer *), dlacpy_(char *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ integer *), dlaset_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *),
+ dbdsqr_(char *, integer *, integer *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *), dorgbr_(char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *);
+ doublereal bignum;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int dormbr_(char *, char *, char *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *), dorglq_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *), dorgqr_(integer *, integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *);
+ integer ldwrkr, minwrk, ldwrku, maxwrk;
+ doublereal smlnum;
+ logical lquery, wntuas, wntvas;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGESVD computes the singular value decomposition (SVD) of a real */
+/* M-by-N matrix A, optionally computing the left and/or right singular */
+/* vectors. The SVD is written */
+
+/* A = U * SIGMA * transpose(V) */
+
+/* where SIGMA is an M-by-N matrix which is zero except for its */
+/* min(m,n) diagonal elements, U is an M-by-M orthogonal matrix, and */
+/* V is an N-by-N orthogonal matrix. The diagonal elements of SIGMA */
+/* are the singular values of A; they are real and non-negative, and */
+/* are returned in descending order. The first min(m,n) columns of */
+/* U and V are the left and right singular vectors of A. */
+
+/* Note that the routine returns V**T, not V. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBU (input) CHARACTER*1 */
+/* Specifies options for computing all or part of the matrix U: */
+/* = 'A': all M columns of U are returned in array U: */
+/* = 'S': the first min(m,n) columns of U (the left singular */
+/* vectors) are returned in the array U; */
+/* = 'O': the first min(m,n) columns of U (the left singular */
+/* vectors) are overwritten on the array A; */
+/* = 'N': no columns of U (no left singular vectors) are */
+/* computed. */
+
+/* JOBVT (input) CHARACTER*1 */
+/* Specifies options for computing all or part of the matrix */
+/* V**T: */
+/* = 'A': all N rows of V**T are returned in the array VT; */
+/* = 'S': the first min(m,n) rows of V**T (the right singular */
+/* vectors) are returned in the array VT; */
+/* = 'O': the first min(m,n) rows of V**T (the right singular */
+/* vectors) are overwritten on the array A; */
+/* = 'N': no rows of V**T (no right singular vectors) are */
+/* computed. */
+
+/* JOBVT and JOBU cannot both be 'O'. */
+
+/* M (input) INTEGER */
+/* The number of rows of the input matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the input matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, */
+/* if JOBU = 'O', A is overwritten with the first min(m,n) */
+/* columns of U (the left singular vectors, */
+/* stored columnwise); */
+/* if JOBVT = 'O', A is overwritten with the first min(m,n) */
+/* rows of V**T (the right singular vectors, */
+/* stored rowwise); */
+/* if JOBU .ne. 'O' and JOBVT .ne. 'O', the contents of A */
+/* are destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* S (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The singular values of A, sorted so that S(i) >= S(i+1). */
+
+/* U (output) DOUBLE PRECISION array, dimension (LDU,UCOL) */
+/* (LDU,M) if JOBU = 'A' or (LDU,min(M,N)) if JOBU = 'S'. */
+/* If JOBU = 'A', U contains the M-by-M orthogonal matrix U; */
+/* if JOBU = 'S', U contains the first min(m,n) columns of U */
+/* (the left singular vectors, stored columnwise); */
+/* if JOBU = 'N' or 'O', U is not referenced. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= 1; if */
+/* JOBU = 'S' or 'A', LDU >= M. */
+
+/* VT (output) DOUBLE PRECISION array, dimension (LDVT,N) */
+/* If JOBVT = 'A', VT contains the N-by-N orthogonal matrix */
+/* V**T; */
+/* if JOBVT = 'S', VT contains the first min(m,n) rows of */
+/* V**T (the right singular vectors, stored rowwise); */
+/* if JOBVT = 'N' or 'O', VT is not referenced. */
+
+/* LDVT (input) INTEGER */
+/* The leading dimension of the array VT. LDVT >= 1; if */
+/* JOBVT = 'A', LDVT >= N; if JOBVT = 'S', LDVT >= min(M,N). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK; */
+/* if INFO > 0, WORK(2:MIN(M,N)) contains the unconverged */
+/* superdiagonal elements of an upper bidiagonal matrix B */
+/* whose diagonal is in S (not necessarily sorted). B */
+/* satisfies A = U * B * VT, so it has the same singular values */
+/* as A, and singular vectors related by U and VT. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* LWORK >= MAX(1,3*MIN(M,N)+MAX(M,N),5*MIN(M,N)). */
+/* For good performance, LWORK should generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if DBDSQR did not converge, INFO specifies how many */
+/* superdiagonals of an intermediate bidiagonal form B */
+/* did not converge to zero. See the description of WORK */
+/* above for details. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ wntua = lsame_(jobu, "A");
+ wntus = lsame_(jobu, "S");
+ wntuas = wntua || wntus;
+ wntuo = lsame_(jobu, "O");
+ wntun = lsame_(jobu, "N");
+ wntva = lsame_(jobvt, "A");
+ wntvs = lsame_(jobvt, "S");
+ wntvas = wntva || wntvs;
+ wntvo = lsame_(jobvt, "O");
+ wntvn = lsame_(jobvt, "N");
+ lquery = *lwork == -1;
+
+ if (! (wntua || wntus || wntuo || wntun)) {
+ *info = -1;
+ } else if (! (wntva || wntvs || wntvo || wntvn) || wntvo && wntuo) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*m)) {
+ *info = -6;
+ } else if (*ldu < 1 || wntuas && *ldu < *m) {
+ *info = -9;
+ } else if (*ldvt < 1 || wntva && *ldvt < *n || wntvs && *ldvt < minmn) {
+ *info = -11;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV.) */
+
+ if (*info == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ if (*m >= *n && minmn > 0) {
+
+/* Compute space needed for DBDSQR */
+
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = jobu;
+ i__1[1] = 1, a__1[1] = jobvt;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ mnthr = ilaenv_(&c__6, "DGESVD", ch__1, m, n, &c__0, &c__0);
+ bdspac = *n * 5;
+ if (*m >= mnthr) {
+ if (wntun) {
+
+/* Path 1 (M much larger than N, JOBU='N') */
+
+ maxwrk = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", n, n, &c_n1, &c_n1);
+ maxwrk = max(i__2,i__3);
+ if (wntvo || wntvas) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = *n * 3 + (*n - 1) * ilaenv_(&
+ c__1, "DORGBR", "P", n, n, n, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ maxwrk = max(maxwrk,bdspac);
+/* Computing MAX */
+ i__2 = *n << 2;
+ minwrk = max(i__2,bdspac);
+ } else if (wntuo && wntvn) {
+
+/* Path 2 (M much larger than N, JOBU='O', JOBVT='N') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *n * ilaenv_(&c__1, "DORGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + *n * ilaenv_(&c__1, "DORGBR"
+, "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+/* Computing MAX */
+ i__2 = *n * *n + wrkbl, i__3 = *n * *n + *m * *n + *n;
+ maxwrk = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = *n * 3 + *m;
+ minwrk = max(i__2,bdspac);
+ } else if (wntuo && wntvas) {
+
+/* Path 3 (M much larger than N, JOBU='O', JOBVT='S' or */
+/* 'A') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *n * ilaenv_(&c__1, "DORGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + *n * ilaenv_(&c__1, "DORGBR"
+, "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n - 1) * ilaenv_(&c__1,
+ "DORGBR", "P", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+/* Computing MAX */
+ i__2 = *n * *n + wrkbl, i__3 = *n * *n + *m * *n + *n;
+ maxwrk = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = *n * 3 + *m;
+ minwrk = max(i__2,bdspac);
+ } else if (wntus && wntvn) {
+
+/* Path 4 (M much larger than N, JOBU='S', JOBVT='N') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *n * ilaenv_(&c__1, "DORGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + *n * ilaenv_(&c__1, "DORGBR"
+, "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+ maxwrk = *n * *n + wrkbl;
+/* Computing MAX */
+ i__2 = *n * 3 + *m;
+ minwrk = max(i__2,bdspac);
+ } else if (wntus && wntvo) {
+
+/* Path 5 (M much larger than N, JOBU='S', JOBVT='O') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *n * ilaenv_(&c__1, "DORGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + *n * ilaenv_(&c__1, "DORGBR"
+, "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n - 1) * ilaenv_(&c__1,
+ "DORGBR", "P", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+ maxwrk = (*n << 1) * *n + wrkbl;
+/* Computing MAX */
+ i__2 = *n * 3 + *m;
+ minwrk = max(i__2,bdspac);
+ } else if (wntus && wntvas) {
+
+/* Path 6 (M much larger than N, JOBU='S', JOBVT='S' or */
+/* 'A') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *n * ilaenv_(&c__1, "DORGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + *n * ilaenv_(&c__1, "DORGBR"
+, "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n - 1) * ilaenv_(&c__1,
+ "DORGBR", "P", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+ maxwrk = *n * *n + wrkbl;
+/* Computing MAX */
+ i__2 = *n * 3 + *m;
+ minwrk = max(i__2,bdspac);
+ } else if (wntua && wntvn) {
+
+/* Path 7 (M much larger than N, JOBU='A', JOBVT='N') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *m * ilaenv_(&c__1, "DORGQR",
+ " ", m, m, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + *n * ilaenv_(&c__1, "DORGBR"
+, "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+ maxwrk = *n * *n + wrkbl;
+/* Computing MAX */
+ i__2 = *n * 3 + *m;
+ minwrk = max(i__2,bdspac);
+ } else if (wntua && wntvo) {
+
+/* Path 8 (M much larger than N, JOBU='A', JOBVT='O') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *m * ilaenv_(&c__1, "DORGQR",
+ " ", m, m, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + *n * ilaenv_(&c__1, "DORGBR"
+, "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n - 1) * ilaenv_(&c__1,
+ "DORGBR", "P", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+ maxwrk = (*n << 1) * *n + wrkbl;
+/* Computing MAX */
+ i__2 = *n * 3 + *m;
+ minwrk = max(i__2,bdspac);
+ } else if (wntua && wntvas) {
+
+/* Path 9 (M much larger than N, JOBU='A', JOBVT='S' or */
+/* 'A') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *m * ilaenv_(&c__1, "DORGQR",
+ " ", m, m, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + *n * ilaenv_(&c__1, "DORGBR"
+, "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n - 1) * ilaenv_(&c__1,
+ "DORGBR", "P", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+ maxwrk = *n * *n + wrkbl;
+/* Computing MAX */
+ i__2 = *n * 3 + *m;
+ minwrk = max(i__2,bdspac);
+ }
+ } else {
+
+/* Path 10 (M at least N, but not much larger) */
+
+ maxwrk = *n * 3 + (*m + *n) * ilaenv_(&c__1, "DGEBRD", " ", m,
+ n, &c_n1, &c_n1);
+ if (wntus || wntuo) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = *n * 3 + *n * ilaenv_(&c__1, "DORG"
+ "BR", "Q", m, n, n, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ if (wntua) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = *n * 3 + *m * ilaenv_(&c__1, "DORG"
+ "BR", "Q", m, m, n, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ if (! wntvn) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = *n * 3 + (*n - 1) * ilaenv_(&c__1,
+ "DORGBR", "P", n, n, n, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ maxwrk = max(maxwrk,bdspac);
+/* Computing MAX */
+ i__2 = *n * 3 + *m;
+ minwrk = max(i__2,bdspac);
+ }
+ } else if (minmn > 0) {
+
+/* Compute space needed for DBDSQR */
+
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = jobu;
+ i__1[1] = 1, a__1[1] = jobvt;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ mnthr = ilaenv_(&c__6, "DGESVD", ch__1, m, n, &c__0, &c__0);
+ bdspac = *m * 5;
+ if (*n >= mnthr) {
+ if (wntvn) {
+
+/* Path 1t(N much larger than M, JOBVT='N') */
+
+ maxwrk = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", m, m, &c_n1, &c_n1);
+ maxwrk = max(i__2,i__3);
+ if (wntuo || wntuas) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = *m * 3 + *m * ilaenv_(&c__1,
+ "DORGBR", "Q", m, m, m, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ maxwrk = max(maxwrk,bdspac);
+/* Computing MAX */
+ i__2 = *m << 2;
+ minwrk = max(i__2,bdspac);
+ } else if (wntvo && wntun) {
+
+/* Path 2t(N much larger than M, JOBU='N', JOBVT='O') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *m * ilaenv_(&c__1, "DORGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m - 1) * ilaenv_(&c__1,
+ "DORGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+/* Computing MAX */
+ i__2 = *m * *m + wrkbl, i__3 = *m * *m + *m * *n + *m;
+ maxwrk = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = *m * 3 + *n;
+ minwrk = max(i__2,bdspac);
+ } else if (wntvo && wntuas) {
+
+/* Path 3t(N much larger than M, JOBU='S' or 'A', */
+/* JOBVT='O') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *m * ilaenv_(&c__1, "DORGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m - 1) * ilaenv_(&c__1,
+ "DORGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + *m * ilaenv_(&c__1, "DORGBR"
+, "Q", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+/* Computing MAX */
+ i__2 = *m * *m + wrkbl, i__3 = *m * *m + *m * *n + *m;
+ maxwrk = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = *m * 3 + *n;
+ minwrk = max(i__2,bdspac);
+ } else if (wntvs && wntun) {
+
+/* Path 4t(N much larger than M, JOBU='N', JOBVT='S') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *m * ilaenv_(&c__1, "DORGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m - 1) * ilaenv_(&c__1,
+ "DORGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+ maxwrk = *m * *m + wrkbl;
+/* Computing MAX */
+ i__2 = *m * 3 + *n;
+ minwrk = max(i__2,bdspac);
+ } else if (wntvs && wntuo) {
+
+/* Path 5t(N much larger than M, JOBU='O', JOBVT='S') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *m * ilaenv_(&c__1, "DORGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m - 1) * ilaenv_(&c__1,
+ "DORGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + *m * ilaenv_(&c__1, "DORGBR"
+, "Q", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+ maxwrk = (*m << 1) * *m + wrkbl;
+/* Computing MAX */
+ i__2 = *m * 3 + *n;
+ minwrk = max(i__2,bdspac);
+ } else if (wntvs && wntuas) {
+
+/* Path 6t(N much larger than M, JOBU='S' or 'A', */
+/* JOBVT='S') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *m * ilaenv_(&c__1, "DORGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m - 1) * ilaenv_(&c__1,
+ "DORGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + *m * ilaenv_(&c__1, "DORGBR"
+, "Q", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+ maxwrk = *m * *m + wrkbl;
+/* Computing MAX */
+ i__2 = *m * 3 + *n;
+ minwrk = max(i__2,bdspac);
+ } else if (wntva && wntun) {
+
+/* Path 7t(N much larger than M, JOBU='N', JOBVT='A') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *n * ilaenv_(&c__1, "DORGLQ",
+ " ", n, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m - 1) * ilaenv_(&c__1,
+ "DORGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+ maxwrk = *m * *m + wrkbl;
+/* Computing MAX */
+ i__2 = *m * 3 + *n;
+ minwrk = max(i__2,bdspac);
+ } else if (wntva && wntuo) {
+
+/* Path 8t(N much larger than M, JOBU='O', JOBVT='A') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *n * ilaenv_(&c__1, "DORGLQ",
+ " ", n, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m - 1) * ilaenv_(&c__1,
+ "DORGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + *m * ilaenv_(&c__1, "DORGBR"
+, "Q", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+ maxwrk = (*m << 1) * *m + wrkbl;
+/* Computing MAX */
+ i__2 = *m * 3 + *n;
+ minwrk = max(i__2,bdspac);
+ } else if (wntva && wntuas) {
+
+/* Path 9t(N much larger than M, JOBU='S' or 'A', */
+/* JOBVT='A') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *n * ilaenv_(&c__1, "DORGLQ",
+ " ", n, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "DGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m - 1) * ilaenv_(&c__1,
+ "DORGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + *m * ilaenv_(&c__1, "DORGBR"
+, "Q", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+ maxwrk = *m * *m + wrkbl;
+/* Computing MAX */
+ i__2 = *m * 3 + *n;
+ minwrk = max(i__2,bdspac);
+ }
+ } else {
+
+/* Path 10t(N greater than M, but not much larger) */
+
+ maxwrk = *m * 3 + (*m + *n) * ilaenv_(&c__1, "DGEBRD", " ", m,
+ n, &c_n1, &c_n1);
+ if (wntvs || wntvo) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = *m * 3 + *m * ilaenv_(&c__1, "DORG"
+ "BR", "P", m, n, m, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ if (wntva) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = *m * 3 + *n * ilaenv_(&c__1, "DORG"
+ "BR", "P", n, n, m, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ if (! wntun) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = *m * 3 + (*m - 1) * ilaenv_(&c__1,
+ "DORGBR", "Q", m, m, m, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ maxwrk = max(maxwrk,bdspac);
+/* Computing MAX */
+ i__2 = *m * 3 + *n;
+ minwrk = max(i__2,bdspac);
+ }
+ }
+ maxwrk = max(maxwrk,minwrk);
+ work[1] = (doublereal) maxwrk;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info != 0) {
+ i__2 = -(*info);
+ xerbla_("DGESVD", &i__2);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("P");
+ smlnum = sqrt(dlamch_("S")) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = dlange_("M", m, n, &a[a_offset], lda, dum);
+ iscl = 0;
+ if (anrm > 0. && anrm < smlnum) {
+ iscl = 1;
+ dlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, &
+ ierr);
+ } else if (anrm > bignum) {
+ iscl = 1;
+ dlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+ if (*m >= *n) {
+
+/* A has at least as many rows as columns. If A has sufficiently */
+/* more rows than columns, first reduce using the QR */
+/* decomposition (if sufficient workspace available) */
+
+ if (*m >= mnthr) {
+
+ if (wntun) {
+
+/* Path 1 (M much larger than N, JOBU='N') */
+/* No left singular vectors to be computed */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork], &
+ i__2, &ierr);
+
+/* Zero out below R */
+
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ dlaset_("L", &i__2, &i__3, &c_b421, &c_b421, &a[a_dim1 + 2],
+ lda);
+ ie = 1;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in A */
+/* (Workspace: need 4*N, prefer 3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__2, &ierr);
+ ncvt = 0;
+ if (wntvo || wntvas) {
+
+/* If right singular vectors desired, generate P'. */
+/* (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("P", n, n, n, &a[a_offset], lda, &work[itaup], &
+ work[iwork], &i__2, &ierr);
+ ncvt = *n;
+ }
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of A in A if desired */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("U", n, &ncvt, &c__0, &c__0, &s[1], &work[ie], &a[
+ a_offset], lda, dum, &c__1, dum, &c__1, &work[iwork],
+ info);
+
+/* If right singular vectors desired in VT, copy them there */
+
+ if (wntvas) {
+ dlacpy_("F", n, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+ }
+
+ } else if (wntuo && wntvn) {
+
+/* Path 2 (M much larger than N, JOBU='O', JOBVT='N') */
+/* N left singular vectors to be overwritten on A and */
+/* no right singular vectors to be computed */
+
+/* Computing MAX */
+ i__2 = *n << 2;
+ if (*lwork >= *n * *n + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *lda * *n + *n;
+ if (*lwork >= max(i__2,i__3) + *lda * *n) {
+
+/* WORK(IU) is LDA by N, WORK(IR) is LDA by N */
+
+ ldwrku = *lda;
+ ldwrkr = *lda;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *lda * *n + *n;
+ if (*lwork >= max(i__2,i__3) + *n * *n) {
+
+/* WORK(IU) is LDA by N, WORK(IR) is N by N */
+
+ ldwrku = *lda;
+ ldwrkr = *n;
+ } else {
+
+/* WORK(IU) is LDWRKU by N, WORK(IR) is N by N */
+
+ ldwrku = (*lwork - *n * *n - *n) / *n;
+ ldwrkr = *n;
+ }
+ }
+ itau = ir + ldwrkr * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork]
+, &i__2, &ierr);
+
+/* Copy R to WORK(IR) and zero out below it */
+
+ dlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ dlaset_("L", &i__2, &i__3, &c_b421, &c_b421, &work[ir + 1]
+, &ldwrkr);
+
+/* Generate Q in A */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IR) */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__2, &ierr);
+
+/* Generate left vectors bidiagonalizing R */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("Q", n, n, n, &work[ir], &ldwrkr, &work[itauq], &
+ work[iwork], &i__2, &ierr);
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IR) */
+/* (Workspace: need N*N+BDSPAC) */
+
+ dbdsqr_("U", n, &c__0, n, &c__0, &s[1], &work[ie], dum, &
+ c__1, &work[ir], &ldwrkr, dum, &c__1, &work[iwork]
+, info);
+ iu = ie + *n;
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IR), storing result in WORK(IU) and copying to A */
+/* (Workspace: need N*N+2*N, prefer N*N+M*N+N) */
+
+ i__2 = *m;
+ i__3 = ldwrku;
+ for (i__ = 1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__3) {
+/* Computing MIN */
+ i__4 = *m - i__ + 1;
+ chunk = min(i__4,ldwrku);
+ dgemm_("N", "N", &chunk, n, n, &c_b443, &a[i__ +
+ a_dim1], lda, &work[ir], &ldwrkr, &c_b421, &
+ work[iu], &ldwrku);
+ dlacpy_("F", &chunk, n, &work[iu], &ldwrku, &a[i__ +
+ a_dim1], lda);
+/* L10: */
+ }
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ ie = 1;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize A */
+/* (Workspace: need 3*N+M, prefer 3*N+(M+N)*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__3, &ierr);
+
+/* Generate left vectors bidiagonalizing A */
+/* (Workspace: need 4*N, prefer 3*N+N*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ dorgbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], &
+ work[iwork], &i__3, &ierr);
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in A */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("U", n, &c__0, m, &c__0, &s[1], &work[ie], dum, &
+ c__1, &a[a_offset], lda, dum, &c__1, &work[iwork],
+ info);
+
+ }
+
+ } else if (wntuo && wntvas) {
+
+/* Path 3 (M much larger than N, JOBU='O', JOBVT='S' or 'A') */
+/* N left singular vectors to be overwritten on A and */
+/* N right singular vectors to be computed in VT */
+
+/* Computing MAX */
+ i__3 = *n << 2;
+ if (*lwork >= *n * *n + max(i__3,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+/* Computing MAX */
+ i__3 = wrkbl, i__2 = *lda * *n + *n;
+ if (*lwork >= max(i__3,i__2) + *lda * *n) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is LDA by N */
+
+ ldwrku = *lda;
+ ldwrkr = *lda;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__3 = wrkbl, i__2 = *lda * *n + *n;
+ if (*lwork >= max(i__3,i__2) + *n * *n) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is N by N */
+
+ ldwrku = *lda;
+ ldwrkr = *n;
+ } else {
+
+/* WORK(IU) is LDWRKU by N and WORK(IR) is N by N */
+
+ ldwrku = (*lwork - *n * *n - *n) / *n;
+ ldwrkr = *n;
+ }
+ }
+ itau = ir + ldwrkr * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork]
+, &i__3, &ierr);
+
+/* Copy R to VT, zeroing out below it */
+
+ dlacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+ if (*n > 1) {
+ i__3 = *n - 1;
+ i__2 = *n - 1;
+ dlaset_("L", &i__3, &i__2, &c_b421, &c_b421, &vt[
+ vt_dim1 + 2], ldvt);
+ }
+
+/* Generate Q in A */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ dorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__3, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in VT, copying result to WORK(IR) */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ dgebrd_(n, n, &vt[vt_offset], ldvt, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &i__3, &
+ ierr);
+ dlacpy_("L", n, n, &vt[vt_offset], ldvt, &work[ir], &
+ ldwrkr);
+
+/* Generate left vectors bidiagonalizing R in WORK(IR) */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+N*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ dorgbr_("Q", n, n, n, &work[ir], &ldwrkr, &work[itauq], &
+ work[iwork], &i__3, &ierr);
+
+/* Generate right vectors bidiagonalizing R in VT */
+/* (Workspace: need N*N+4*N-1, prefer N*N+3*N+(N-1)*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ dorgbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup],
+ &work[iwork], &i__3, &ierr);
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IR) and computing right */
+/* singular vectors of R in VT */
+/* (Workspace: need N*N+BDSPAC) */
+
+ dbdsqr_("U", n, n, n, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &work[ir], &ldwrkr, dum, &c__1,
+ &work[iwork], info);
+ iu = ie + *n;
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IR), storing result in WORK(IU) and copying to A */
+/* (Workspace: need N*N+2*N, prefer N*N+M*N+N) */
+
+ i__3 = *m;
+ i__2 = ldwrku;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__3 : i__ <= i__3; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__4 = *m - i__ + 1;
+ chunk = min(i__4,ldwrku);
+ dgemm_("N", "N", &chunk, n, n, &c_b443, &a[i__ +
+ a_dim1], lda, &work[ir], &ldwrkr, &c_b421, &
+ work[iu], &ldwrku);
+ dlacpy_("F", &chunk, n, &work[iu], &ldwrku, &a[i__ +
+ a_dim1], lda);
+/* L20: */
+ }
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork]
+, &i__2, &ierr);
+
+/* Copy R to VT, zeroing out below it */
+
+ dlacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+ if (*n > 1) {
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ dlaset_("L", &i__2, &i__3, &c_b421, &c_b421, &vt[
+ vt_dim1 + 2], ldvt);
+ }
+
+/* Generate Q in A */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in VT */
+/* (Workspace: need 4*N, prefer 3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(n, n, &vt[vt_offset], ldvt, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &i__2, &
+ ierr);
+
+/* Multiply Q in A by left vectors bidiagonalizing R */
+/* (Workspace: need 3*N+M, prefer 3*N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dormbr_("Q", "R", "N", m, n, n, &vt[vt_offset], ldvt, &
+ work[itauq], &a[a_offset], lda, &work[iwork], &
+ i__2, &ierr);
+
+/* Generate right vectors bidiagonalizing R in VT */
+/* (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup],
+ &work[iwork], &i__2, &ierr);
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in A and computing right */
+/* singular vectors of A in VT */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("U", n, n, m, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &a[a_offset], lda, dum, &c__1, &
+ work[iwork], info);
+
+ }
+
+ } else if (wntus) {
+
+ if (wntvn) {
+
+/* Path 4 (M much larger than N, JOBU='S', JOBVT='N') */
+/* N left singular vectors to be computed in U and */
+/* no right singular vectors to be computed */
+
+/* Computing MAX */
+ i__2 = *n << 2;
+ if (*lwork >= *n * *n + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+ if (*lwork >= wrkbl + *lda * *n) {
+
+/* WORK(IR) is LDA by N */
+
+ ldwrkr = *lda;
+ } else {
+
+/* WORK(IR) is N by N */
+
+ ldwrkr = *n;
+ }
+ itau = ir + ldwrkr * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy R to WORK(IR), zeroing out below it */
+
+ dlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &
+ ldwrkr);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ dlaset_("L", &i__2, &i__3, &c_b421, &c_b421, &work[ir
+ + 1], &ldwrkr);
+
+/* Generate Q in A */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IR) */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Generate left vectors bidiagonalizing R in WORK(IR) */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("Q", n, n, n, &work[ir], &ldwrkr, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IR) */
+/* (Workspace: need N*N+BDSPAC) */
+
+ dbdsqr_("U", n, &c__0, n, &c__0, &s[1], &work[ie],
+ dum, &c__1, &work[ir], &ldwrkr, dum, &c__1, &
+ work[iwork], info);
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IR), storing result in U */
+/* (Workspace: need N*N) */
+
+ dgemm_("N", "N", m, n, n, &c_b443, &a[a_offset], lda,
+ &work[ir], &ldwrkr, &c_b421, &u[u_offset],
+ ldu);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ dlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgqr_(m, n, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Zero out below R in A */
+
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ dlaset_("L", &i__2, &i__3, &c_b421, &c_b421, &a[
+ a_dim1 + 2], lda);
+
+/* Bidiagonalize R in A */
+/* (Workspace: need 4*N, prefer 3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply Q in U by left vectors bidiagonalizing R */
+/* (Workspace: need 3*N+M, prefer 3*N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dormbr_("Q", "R", "N", m, n, n, &a[a_offset], lda, &
+ work[itauq], &u[u_offset], ldu, &work[iwork],
+ &i__2, &ierr)
+ ;
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("U", n, &c__0, m, &c__0, &s[1], &work[ie],
+ dum, &c__1, &u[u_offset], ldu, dum, &c__1, &
+ work[iwork], info);
+
+ }
+
+ } else if (wntvo) {
+
+/* Path 5 (M much larger than N, JOBU='S', JOBVT='O') */
+/* N left singular vectors to be computed in U and */
+/* N right singular vectors to be overwritten on A */
+
+/* Computing MAX */
+ i__2 = *n << 2;
+ if (*lwork >= (*n << 1) * *n + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + (*lda << 1) * *n) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is LDA by N */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *n;
+ ldwrkr = *lda;
+ } else if (*lwork >= wrkbl + (*lda + *n) * *n) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is N by N */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *n;
+ ldwrkr = *n;
+ } else {
+
+/* WORK(IU) is N by N and WORK(IR) is N by N */
+
+ ldwrku = *n;
+ ir = iu + ldwrku * *n;
+ ldwrkr = *n;
+ }
+ itau = ir + ldwrkr * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (Workspace: need 2*N*N+2*N, prefer 2*N*N+N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy R to WORK(IU), zeroing out below it */
+
+ dlacpy_("U", n, n, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ dlaset_("L", &i__2, &i__3, &c_b421, &c_b421, &work[iu
+ + 1], &ldwrku);
+
+/* Generate Q in A */
+/* (Workspace: need 2*N*N+2*N, prefer 2*N*N+N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IU), copying result to */
+/* WORK(IR) */
+/* (Workspace: need 2*N*N+4*N, */
+/* prefer 2*N*N+3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(n, n, &work[iu], &ldwrku, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ dlacpy_("U", n, n, &work[iu], &ldwrku, &work[ir], &
+ ldwrkr);
+
+/* Generate left bidiagonalizing vectors in WORK(IU) */
+/* (Workspace: need 2*N*N+4*N, prefer 2*N*N+3*N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("Q", n, n, n, &work[iu], &ldwrku, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in WORK(IR) */
+/* (Workspace: need 2*N*N+4*N-1, */
+/* prefer 2*N*N+3*N+(N-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("P", n, n, n, &work[ir], &ldwrkr, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IU) and computing */
+/* right singular vectors of R in WORK(IR) */
+/* (Workspace: need 2*N*N+BDSPAC) */
+
+ dbdsqr_("U", n, n, n, &c__0, &s[1], &work[ie], &work[
+ ir], &ldwrkr, &work[iu], &ldwrku, dum, &c__1,
+ &work[iwork], info);
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IU), storing result in U */
+/* (Workspace: need N*N) */
+
+ dgemm_("N", "N", m, n, n, &c_b443, &a[a_offset], lda,
+ &work[iu], &ldwrku, &c_b421, &u[u_offset],
+ ldu);
+
+/* Copy right singular vectors of R to A */
+/* (Workspace: need N*N) */
+
+ dlacpy_("F", n, n, &work[ir], &ldwrkr, &a[a_offset],
+ lda);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ dlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgqr_(m, n, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Zero out below R in A */
+
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ dlaset_("L", &i__2, &i__3, &c_b421, &c_b421, &a[
+ a_dim1 + 2], lda);
+
+/* Bidiagonalize R in A */
+/* (Workspace: need 4*N, prefer 3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply Q in U by left vectors bidiagonalizing R */
+/* (Workspace: need 3*N+M, prefer 3*N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dormbr_("Q", "R", "N", m, n, n, &a[a_offset], lda, &
+ work[itauq], &u[u_offset], ldu, &work[iwork],
+ &i__2, &ierr)
+ ;
+
+/* Generate right vectors bidiagonalizing R in A */
+/* (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("P", n, n, n, &a[a_offset], lda, &work[itaup],
+ &work[iwork], &i__2, &ierr);
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in A */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("U", n, n, m, &c__0, &s[1], &work[ie], &a[
+ a_offset], lda, &u[u_offset], ldu, dum, &c__1,
+ &work[iwork], info);
+
+ }
+
+ } else if (wntvas) {
+
+/* Path 6 (M much larger than N, JOBU='S', JOBVT='S' */
+/* or 'A') */
+/* N left singular vectors to be computed in U and */
+/* N right singular vectors to be computed in VT */
+
+/* Computing MAX */
+ i__2 = *n << 2;
+ if (*lwork >= *n * *n + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + *lda * *n) {
+
+/* WORK(IU) is LDA by N */
+
+ ldwrku = *lda;
+ } else {
+
+/* WORK(IU) is N by N */
+
+ ldwrku = *n;
+ }
+ itau = iu + ldwrku * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy R to WORK(IU), zeroing out below it */
+
+ dlacpy_("U", n, n, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ dlaset_("L", &i__2, &i__3, &c_b421, &c_b421, &work[iu
+ + 1], &ldwrku);
+
+/* Generate Q in A */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IU), copying result to VT */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(n, n, &work[iu], &ldwrku, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ dlacpy_("U", n, n, &work[iu], &ldwrku, &vt[vt_offset],
+ ldvt);
+
+/* Generate left bidiagonalizing vectors in WORK(IU) */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("Q", n, n, n, &work[iu], &ldwrku, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in VT */
+/* (Workspace: need N*N+4*N-1, */
+/* prefer N*N+3*N+(N-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[
+ itaup], &work[iwork], &i__2, &ierr)
+ ;
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IU) and computing */
+/* right singular vectors of R in VT */
+/* (Workspace: need N*N+BDSPAC) */
+
+ dbdsqr_("U", n, n, n, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &work[iu], &ldwrku, dum, &
+ c__1, &work[iwork], info);
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IU), storing result in U */
+/* (Workspace: need N*N) */
+
+ dgemm_("N", "N", m, n, n, &c_b443, &a[a_offset], lda,
+ &work[iu], &ldwrku, &c_b421, &u[u_offset],
+ ldu);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ dlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgqr_(m, n, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy R to VT, zeroing out below it */
+
+ dlacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+ if (*n > 1) {
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ dlaset_("L", &i__2, &i__3, &c_b421, &c_b421, &vt[
+ vt_dim1 + 2], ldvt);
+ }
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in VT */
+/* (Workspace: need 4*N, prefer 3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(n, n, &vt[vt_offset], ldvt, &s[1], &work[ie],
+ &work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply Q in U by left bidiagonalizing vectors */
+/* in VT */
+/* (Workspace: need 3*N+M, prefer 3*N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dormbr_("Q", "R", "N", m, n, n, &vt[vt_offset], ldvt,
+ &work[itauq], &u[u_offset], ldu, &work[iwork],
+ &i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in VT */
+/* (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[
+ itaup], &work[iwork], &i__2, &ierr)
+ ;
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in VT */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("U", n, n, m, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, dum, &
+ c__1, &work[iwork], info);
+
+ }
+
+ }
+
+ } else if (wntua) {
+
+ if (wntvn) {
+
+/* Path 7 (M much larger than N, JOBU='A', JOBVT='N') */
+/* M left singular vectors to be computed in U and */
+/* no right singular vectors to be computed */
+
+/* Computing MAX */
+ i__2 = *n + *m, i__3 = *n << 2, i__2 = max(i__2,i__3);
+ if (*lwork >= *n * *n + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+ if (*lwork >= wrkbl + *lda * *n) {
+
+/* WORK(IR) is LDA by N */
+
+ ldwrkr = *lda;
+ } else {
+
+/* WORK(IR) is N by N */
+
+ ldwrkr = *n;
+ }
+ itau = ir + ldwrkr * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ dlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Copy R to WORK(IR), zeroing out below it */
+
+ dlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &
+ ldwrkr);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ dlaset_("L", &i__2, &i__3, &c_b421, &c_b421, &work[ir
+ + 1], &ldwrkr);
+
+/* Generate Q in U */
+/* (Workspace: need N*N+N+M, prefer N*N+N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgqr_(m, m, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IR) */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in WORK(IR) */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("Q", n, n, n, &work[ir], &ldwrkr, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IR) */
+/* (Workspace: need N*N+BDSPAC) */
+
+ dbdsqr_("U", n, &c__0, n, &c__0, &s[1], &work[ie],
+ dum, &c__1, &work[ir], &ldwrkr, dum, &c__1, &
+ work[iwork], info);
+
+/* Multiply Q in U by left singular vectors of R in */
+/* WORK(IR), storing result in A */
+/* (Workspace: need N*N) */
+
+ dgemm_("N", "N", m, n, n, &c_b443, &u[u_offset], ldu,
+ &work[ir], &ldwrkr, &c_b421, &a[a_offset],
+ lda);
+
+/* Copy left singular vectors of A from A to U */
+
+ dlacpy_("F", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ dlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (Workspace: need N+M, prefer N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgqr_(m, m, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Zero out below R in A */
+
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ dlaset_("L", &i__2, &i__3, &c_b421, &c_b421, &a[
+ a_dim1 + 2], lda);
+
+/* Bidiagonalize R in A */
+/* (Workspace: need 4*N, prefer 3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply Q in U by left bidiagonalizing vectors */
+/* in A */
+/* (Workspace: need 3*N+M, prefer 3*N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dormbr_("Q", "R", "N", m, n, n, &a[a_offset], lda, &
+ work[itauq], &u[u_offset], ldu, &work[iwork],
+ &i__2, &ierr)
+ ;
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("U", n, &c__0, m, &c__0, &s[1], &work[ie],
+ dum, &c__1, &u[u_offset], ldu, dum, &c__1, &
+ work[iwork], info);
+
+ }
+
+ } else if (wntvo) {
+
+/* Path 8 (M much larger than N, JOBU='A', JOBVT='O') */
+/* M left singular vectors to be computed in U and */
+/* N right singular vectors to be overwritten on A */
+
+/* Computing MAX */
+ i__2 = *n + *m, i__3 = *n << 2, i__2 = max(i__2,i__3);
+ if (*lwork >= (*n << 1) * *n + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + (*lda << 1) * *n) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is LDA by N */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *n;
+ ldwrkr = *lda;
+ } else if (*lwork >= wrkbl + (*lda + *n) * *n) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is N by N */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *n;
+ ldwrkr = *n;
+ } else {
+
+/* WORK(IU) is N by N and WORK(IR) is N by N */
+
+ ldwrku = *n;
+ ir = iu + ldwrku * *n;
+ ldwrkr = *n;
+ }
+ itau = ir + ldwrkr * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (Workspace: need 2*N*N+2*N, prefer 2*N*N+N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ dlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (Workspace: need 2*N*N+N+M, prefer 2*N*N+N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgqr_(m, m, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy R to WORK(IU), zeroing out below it */
+
+ dlacpy_("U", n, n, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ dlaset_("L", &i__2, &i__3, &c_b421, &c_b421, &work[iu
+ + 1], &ldwrku);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IU), copying result to */
+/* WORK(IR) */
+/* (Workspace: need 2*N*N+4*N, */
+/* prefer 2*N*N+3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(n, n, &work[iu], &ldwrku, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ dlacpy_("U", n, n, &work[iu], &ldwrku, &work[ir], &
+ ldwrkr);
+
+/* Generate left bidiagonalizing vectors in WORK(IU) */
+/* (Workspace: need 2*N*N+4*N, prefer 2*N*N+3*N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("Q", n, n, n, &work[iu], &ldwrku, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in WORK(IR) */
+/* (Workspace: need 2*N*N+4*N-1, */
+/* prefer 2*N*N+3*N+(N-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("P", n, n, n, &work[ir], &ldwrkr, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IU) and computing */
+/* right singular vectors of R in WORK(IR) */
+/* (Workspace: need 2*N*N+BDSPAC) */
+
+ dbdsqr_("U", n, n, n, &c__0, &s[1], &work[ie], &work[
+ ir], &ldwrkr, &work[iu], &ldwrku, dum, &c__1,
+ &work[iwork], info);
+
+/* Multiply Q in U by left singular vectors of R in */
+/* WORK(IU), storing result in A */
+/* (Workspace: need N*N) */
+
+ dgemm_("N", "N", m, n, n, &c_b443, &u[u_offset], ldu,
+ &work[iu], &ldwrku, &c_b421, &a[a_offset],
+ lda);
+
+/* Copy left singular vectors of A from A to U */
+
+ dlacpy_("F", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Copy right singular vectors of R from WORK(IR) to A */
+
+ dlacpy_("F", n, n, &work[ir], &ldwrkr, &a[a_offset],
+ lda);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ dlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (Workspace: need N+M, prefer N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgqr_(m, m, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Zero out below R in A */
+
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ dlaset_("L", &i__2, &i__3, &c_b421, &c_b421, &a[
+ a_dim1 + 2], lda);
+
+/* Bidiagonalize R in A */
+/* (Workspace: need 4*N, prefer 3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply Q in U by left bidiagonalizing vectors */
+/* in A */
+/* (Workspace: need 3*N+M, prefer 3*N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dormbr_("Q", "R", "N", m, n, n, &a[a_offset], lda, &
+ work[itauq], &u[u_offset], ldu, &work[iwork],
+ &i__2, &ierr)
+ ;
+
+/* Generate right bidiagonalizing vectors in A */
+/* (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("P", n, n, n, &a[a_offset], lda, &work[itaup],
+ &work[iwork], &i__2, &ierr);
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in A */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("U", n, n, m, &c__0, &s[1], &work[ie], &a[
+ a_offset], lda, &u[u_offset], ldu, dum, &c__1,
+ &work[iwork], info);
+
+ }
+
+ } else if (wntvas) {
+
+/* Path 9 (M much larger than N, JOBU='A', JOBVT='S' */
+/* or 'A') */
+/* M left singular vectors to be computed in U and */
+/* N right singular vectors to be computed in VT */
+
+/* Computing MAX */
+ i__2 = *n + *m, i__3 = *n << 2, i__2 = max(i__2,i__3);
+ if (*lwork >= *n * *n + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + *lda * *n) {
+
+/* WORK(IU) is LDA by N */
+
+ ldwrku = *lda;
+ } else {
+
+/* WORK(IU) is N by N */
+
+ ldwrku = *n;
+ }
+ itau = iu + ldwrku * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ dlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (Workspace: need N*N+N+M, prefer N*N+N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgqr_(m, m, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy R to WORK(IU), zeroing out below it */
+
+ dlacpy_("U", n, n, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ dlaset_("L", &i__2, &i__3, &c_b421, &c_b421, &work[iu
+ + 1], &ldwrku);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IU), copying result to VT */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(n, n, &work[iu], &ldwrku, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ dlacpy_("U", n, n, &work[iu], &ldwrku, &vt[vt_offset],
+ ldvt);
+
+/* Generate left bidiagonalizing vectors in WORK(IU) */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("Q", n, n, n, &work[iu], &ldwrku, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in VT */
+/* (Workspace: need N*N+4*N-1, */
+/* prefer N*N+3*N+(N-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[
+ itaup], &work[iwork], &i__2, &ierr)
+ ;
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IU) and computing */
+/* right singular vectors of R in VT */
+/* (Workspace: need N*N+BDSPAC) */
+
+ dbdsqr_("U", n, n, n, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &work[iu], &ldwrku, dum, &
+ c__1, &work[iwork], info);
+
+/* Multiply Q in U by left singular vectors of R in */
+/* WORK(IU), storing result in A */
+/* (Workspace: need N*N) */
+
+ dgemm_("N", "N", m, n, n, &c_b443, &u[u_offset], ldu,
+ &work[iu], &ldwrku, &c_b421, &a[a_offset],
+ lda);
+
+/* Copy left singular vectors of A from A to U */
+
+ dlacpy_("F", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ dlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (Workspace: need N+M, prefer N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgqr_(m, m, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy R from A to VT, zeroing out below it */
+
+ dlacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+ if (*n > 1) {
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ dlaset_("L", &i__2, &i__3, &c_b421, &c_b421, &vt[
+ vt_dim1 + 2], ldvt);
+ }
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in VT */
+/* (Workspace: need 4*N, prefer 3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(n, n, &vt[vt_offset], ldvt, &s[1], &work[ie],
+ &work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply Q in U by left bidiagonalizing vectors */
+/* in VT */
+/* (Workspace: need 3*N+M, prefer 3*N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dormbr_("Q", "R", "N", m, n, n, &vt[vt_offset], ldvt,
+ &work[itauq], &u[u_offset], ldu, &work[iwork],
+ &i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in VT */
+/* (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[
+ itaup], &work[iwork], &i__2, &ierr)
+ ;
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in VT */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("U", n, n, m, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, dum, &
+ c__1, &work[iwork], info);
+
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* M .LT. MNTHR */
+
+/* Path 10 (M at least N, but not much larger) */
+/* Reduce to bidiagonal form without QR decomposition */
+
+ ie = 1;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize A */
+/* (Workspace: need 3*N+M, prefer 3*N+(M+N)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[iwork], &i__2, &ierr);
+ if (wntuas) {
+
+/* If left singular vectors desired in U, copy result to U */
+/* and generate left bidiagonalizing vectors in U */
+/* (Workspace: need 3*N+NCU, prefer 3*N+NCU*NB) */
+
+ dlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+ if (wntus) {
+ ncu = *n;
+ }
+ if (wntua) {
+ ncu = *m;
+ }
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("Q", m, &ncu, n, &u[u_offset], ldu, &work[itauq], &
+ work[iwork], &i__2, &ierr);
+ }
+ if (wntvas) {
+
+/* If right singular vectors desired in VT, copy result to */
+/* VT and generate right bidiagonalizing vectors in VT */
+/* (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB) */
+
+ dlacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], &
+ work[iwork], &i__2, &ierr);
+ }
+ if (wntuo) {
+
+/* If left singular vectors desired in A, generate left */
+/* bidiagonalizing vectors in A */
+/* (Workspace: need 4*N, prefer 3*N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], &work[
+ iwork], &i__2, &ierr);
+ }
+ if (wntvo) {
+
+/* If right singular vectors desired in A, generate right */
+/* bidiagonalizing vectors in A */
+/* (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("P", n, n, n, &a[a_offset], lda, &work[itaup], &work[
+ iwork], &i__2, &ierr);
+ }
+ iwork = ie + *n;
+ if (wntuas || wntuo) {
+ nru = *m;
+ }
+ if (wntun) {
+ nru = 0;
+ }
+ if (wntvas || wntvo) {
+ ncvt = *n;
+ }
+ if (wntvn) {
+ ncvt = 0;
+ }
+ if (! wntuo && ! wntvo) {
+
+/* Perform bidiagonal QR iteration, if desired, computing */
+/* left singular vectors in U and computing right singular */
+/* vectors in VT */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("U", n, &ncvt, &nru, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, dum, &c__1, &
+ work[iwork], info);
+ } else if (! wntuo && wntvo) {
+
+/* Perform bidiagonal QR iteration, if desired, computing */
+/* left singular vectors in U and computing right singular */
+/* vectors in A */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("U", n, &ncvt, &nru, &c__0, &s[1], &work[ie], &a[
+ a_offset], lda, &u[u_offset], ldu, dum, &c__1, &work[
+ iwork], info);
+ } else {
+
+/* Perform bidiagonal QR iteration, if desired, computing */
+/* left singular vectors in A and computing right singular */
+/* vectors in VT */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("U", n, &ncvt, &nru, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &a[a_offset], lda, dum, &c__1, &
+ work[iwork], info);
+ }
+
+ }
+
+ } else {
+
+/* A has more columns than rows. If A has sufficiently more */
+/* columns than rows, first reduce using the LQ decomposition (if */
+/* sufficient workspace available) */
+
+ if (*n >= mnthr) {
+
+ if (wntvn) {
+
+/* Path 1t(N much larger than M, JOBVT='N') */
+/* No right singular vectors to be computed */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork], &
+ i__2, &ierr);
+
+/* Zero out above L */
+
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ dlaset_("U", &i__2, &i__3, &c_b421, &c_b421, &a[(a_dim1 << 1)
+ + 1], lda);
+ ie = 1;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in A */
+/* (Workspace: need 4*M, prefer 3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__2, &ierr);
+ if (wntuo || wntuas) {
+
+/* If left singular vectors desired, generate Q */
+/* (Workspace: need 4*M, prefer 3*M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("Q", m, m, m, &a[a_offset], lda, &work[itauq], &
+ work[iwork], &i__2, &ierr);
+ }
+ iwork = ie + *m;
+ nru = 0;
+ if (wntuo || wntuas) {
+ nru = *m;
+ }
+
+/* Perform bidiagonal QR iteration, computing left singular */
+/* vectors of A in A if desired */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("U", m, &c__0, &nru, &c__0, &s[1], &work[ie], dum, &
+ c__1, &a[a_offset], lda, dum, &c__1, &work[iwork],
+ info);
+
+/* If left singular vectors desired in U, copy them there */
+
+ if (wntuas) {
+ dlacpy_("F", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ }
+
+ } else if (wntvo && wntun) {
+
+/* Path 2t(N much larger than M, JOBU='N', JOBVT='O') */
+/* M right singular vectors to be overwritten on A and */
+/* no left singular vectors to be computed */
+
+/* Computing MAX */
+ i__2 = *m << 2;
+ if (*lwork >= *m * *m + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *lda * *n + *m;
+ if (*lwork >= max(i__2,i__3) + *lda * *m) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is LDA by M */
+
+ ldwrku = *lda;
+ chunk = *n;
+ ldwrkr = *lda;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *lda * *n + *m;
+ if (*lwork >= max(i__2,i__3) + *m * *m) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is M by M */
+
+ ldwrku = *lda;
+ chunk = *n;
+ ldwrkr = *m;
+ } else {
+
+/* WORK(IU) is M by CHUNK and WORK(IR) is M by M */
+
+ ldwrku = *m;
+ chunk = (*lwork - *m * *m - *m) / *m;
+ ldwrkr = *m;
+ }
+ }
+ itau = ir + ldwrkr * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork]
+, &i__2, &ierr);
+
+/* Copy L to WORK(IR) and zero out above it */
+
+ dlacpy_("L", m, m, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ dlaset_("U", &i__2, &i__3, &c_b421, &c_b421, &work[ir +
+ ldwrkr], &ldwrkr);
+
+/* Generate Q in A */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IR) */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(m, m, &work[ir], &ldwrkr, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__2, &ierr);
+
+/* Generate right vectors bidiagonalizing L */
+/* (Workspace: need M*M+4*M-1, prefer M*M+3*M+(M-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("P", m, m, m, &work[ir], &ldwrkr, &work[itaup], &
+ work[iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of L in WORK(IR) */
+/* (Workspace: need M*M+BDSPAC) */
+
+ dbdsqr_("U", m, m, &c__0, &c__0, &s[1], &work[ie], &work[
+ ir], &ldwrkr, dum, &c__1, dum, &c__1, &work[iwork]
+, info);
+ iu = ie + *m;
+
+/* Multiply right singular vectors of L in WORK(IR) by Q */
+/* in A, storing result in WORK(IU) and copying to A */
+/* (Workspace: need M*M+2*M, prefer M*M+M*N+M) */
+
+ i__2 = *n;
+ i__3 = chunk;
+ for (i__ = 1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__3) {
+/* Computing MIN */
+ i__4 = *n - i__ + 1;
+ blk = min(i__4,chunk);
+ dgemm_("N", "N", m, &blk, m, &c_b443, &work[ir], &
+ ldwrkr, &a[i__ * a_dim1 + 1], lda, &c_b421, &
+ work[iu], &ldwrku);
+ dlacpy_("F", m, &blk, &work[iu], &ldwrku, &a[i__ *
+ a_dim1 + 1], lda);
+/* L30: */
+ }
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ ie = 1;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize A */
+/* (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__3, &ierr);
+
+/* Generate right vectors bidiagonalizing A */
+/* (Workspace: need 4*M, prefer 3*M+M*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ dorgbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &
+ work[iwork], &i__3, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of A in A */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("L", m, n, &c__0, &c__0, &s[1], &work[ie], &a[
+ a_offset], lda, dum, &c__1, dum, &c__1, &work[
+ iwork], info);
+
+ }
+
+ } else if (wntvo && wntuas) {
+
+/* Path 3t(N much larger than M, JOBU='S' or 'A', JOBVT='O') */
+/* M right singular vectors to be overwritten on A and */
+/* M left singular vectors to be computed in U */
+
+/* Computing MAX */
+ i__3 = *m << 2;
+ if (*lwork >= *m * *m + max(i__3,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+/* Computing MAX */
+ i__3 = wrkbl, i__2 = *lda * *n + *m;
+ if (*lwork >= max(i__3,i__2) + *lda * *m) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is LDA by M */
+
+ ldwrku = *lda;
+ chunk = *n;
+ ldwrkr = *lda;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__3 = wrkbl, i__2 = *lda * *n + *m;
+ if (*lwork >= max(i__3,i__2) + *m * *m) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is M by M */
+
+ ldwrku = *lda;
+ chunk = *n;
+ ldwrkr = *m;
+ } else {
+
+/* WORK(IU) is M by CHUNK and WORK(IR) is M by M */
+
+ ldwrku = *m;
+ chunk = (*lwork - *m * *m - *m) / *m;
+ ldwrkr = *m;
+ }
+ }
+ itau = ir + ldwrkr * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork]
+, &i__3, &ierr);
+
+/* Copy L to U, zeroing about above it */
+
+ dlacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ i__3 = *m - 1;
+ i__2 = *m - 1;
+ dlaset_("U", &i__3, &i__2, &c_b421, &c_b421, &u[(u_dim1 <<
+ 1) + 1], ldu);
+
+/* Generate Q in A */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ dorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__3, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in U, copying result to WORK(IR) */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ dgebrd_(m, m, &u[u_offset], ldu, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__3, &ierr);
+ dlacpy_("U", m, m, &u[u_offset], ldu, &work[ir], &ldwrkr);
+
+/* Generate right vectors bidiagonalizing L in WORK(IR) */
+/* (Workspace: need M*M+4*M-1, prefer M*M+3*M+(M-1)*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ dorgbr_("P", m, m, m, &work[ir], &ldwrkr, &work[itaup], &
+ work[iwork], &i__3, &ierr);
+
+/* Generate left vectors bidiagonalizing L in U */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+M*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ dorgbr_("Q", m, m, m, &u[u_offset], ldu, &work[itauq], &
+ work[iwork], &i__3, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of L in U, and computing right */
+/* singular vectors of L in WORK(IR) */
+/* (Workspace: need M*M+BDSPAC) */
+
+ dbdsqr_("U", m, m, m, &c__0, &s[1], &work[ie], &work[ir],
+ &ldwrkr, &u[u_offset], ldu, dum, &c__1, &work[
+ iwork], info);
+ iu = ie + *m;
+
+/* Multiply right singular vectors of L in WORK(IR) by Q */
+/* in A, storing result in WORK(IU) and copying to A */
+/* (Workspace: need M*M+2*M, prefer M*M+M*N+M)) */
+
+ i__3 = *n;
+ i__2 = chunk;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__3 : i__ <= i__3; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__4 = *n - i__ + 1;
+ blk = min(i__4,chunk);
+ dgemm_("N", "N", m, &blk, m, &c_b443, &work[ir], &
+ ldwrkr, &a[i__ * a_dim1 + 1], lda, &c_b421, &
+ work[iu], &ldwrku);
+ dlacpy_("F", m, &blk, &work[iu], &ldwrku, &a[i__ *
+ a_dim1 + 1], lda);
+/* L40: */
+ }
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork]
+, &i__2, &ierr);
+
+/* Copy L to U, zeroing out above it */
+
+ dlacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ dlaset_("U", &i__2, &i__3, &c_b421, &c_b421, &u[(u_dim1 <<
+ 1) + 1], ldu);
+
+/* Generate Q in A */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in U */
+/* (Workspace: need 4*M, prefer 3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(m, m, &u[u_offset], ldu, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__2, &ierr);
+
+/* Multiply right vectors bidiagonalizing L by Q in A */
+/* (Workspace: need 3*M+N, prefer 3*M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dormbr_("P", "L", "T", m, n, m, &u[u_offset], ldu, &work[
+ itaup], &a[a_offset], lda, &work[iwork], &i__2, &
+ ierr);
+
+/* Generate left vectors bidiagonalizing L in U */
+/* (Workspace: need 4*M, prefer 3*M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("Q", m, m, m, &u[u_offset], ldu, &work[itauq], &
+ work[iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in A */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("U", m, n, m, &c__0, &s[1], &work[ie], &a[
+ a_offset], lda, &u[u_offset], ldu, dum, &c__1, &
+ work[iwork], info);
+
+ }
+
+ } else if (wntvs) {
+
+ if (wntun) {
+
+/* Path 4t(N much larger than M, JOBU='N', JOBVT='S') */
+/* M right singular vectors to be computed in VT and */
+/* no left singular vectors to be computed */
+
+/* Computing MAX */
+ i__2 = *m << 2;
+ if (*lwork >= *m * *m + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+ if (*lwork >= wrkbl + *lda * *m) {
+
+/* WORK(IR) is LDA by M */
+
+ ldwrkr = *lda;
+ } else {
+
+/* WORK(IR) is M by M */
+
+ ldwrkr = *m;
+ }
+ itau = ir + ldwrkr * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy L to WORK(IR), zeroing out above it */
+
+ dlacpy_("L", m, m, &a[a_offset], lda, &work[ir], &
+ ldwrkr);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ dlaset_("U", &i__2, &i__3, &c_b421, &c_b421, &work[ir
+ + ldwrkr], &ldwrkr);
+
+/* Generate Q in A */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorglq_(m, n, m, &a[a_offset], lda, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IR) */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(m, m, &work[ir], &ldwrkr, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Generate right vectors bidiagonalizing L in */
+/* WORK(IR) */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+(M-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("P", m, m, m, &work[ir], &ldwrkr, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of L in WORK(IR) */
+/* (Workspace: need M*M+BDSPAC) */
+
+ dbdsqr_("U", m, m, &c__0, &c__0, &s[1], &work[ie], &
+ work[ir], &ldwrkr, dum, &c__1, dum, &c__1, &
+ work[iwork], info);
+
+/* Multiply right singular vectors of L in WORK(IR) by */
+/* Q in A, storing result in VT */
+/* (Workspace: need M*M) */
+
+ dgemm_("N", "N", m, n, m, &c_b443, &work[ir], &ldwrkr,
+ &a[a_offset], lda, &c_b421, &vt[vt_offset],
+ ldvt);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy result to VT */
+
+ dlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorglq_(m, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Zero out above L in A */
+
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ dlaset_("U", &i__2, &i__3, &c_b421, &c_b421, &a[(
+ a_dim1 << 1) + 1], lda);
+
+/* Bidiagonalize L in A */
+/* (Workspace: need 4*M, prefer 3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply right vectors bidiagonalizing L by Q in VT */
+/* (Workspace: need 3*M+N, prefer 3*M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dormbr_("P", "L", "T", m, n, m, &a[a_offset], lda, &
+ work[itaup], &vt[vt_offset], ldvt, &work[
+ iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of A in VT */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("U", m, n, &c__0, &c__0, &s[1], &work[ie], &
+ vt[vt_offset], ldvt, dum, &c__1, dum, &c__1, &
+ work[iwork], info);
+
+ }
+
+ } else if (wntuo) {
+
+/* Path 5t(N much larger than M, JOBU='O', JOBVT='S') */
+/* M right singular vectors to be computed in VT and */
+/* M left singular vectors to be overwritten on A */
+
+/* Computing MAX */
+ i__2 = *m << 2;
+ if (*lwork >= (*m << 1) * *m + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + (*lda << 1) * *m) {
+
+/* WORK(IU) is LDA by M and WORK(IR) is LDA by M */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *m;
+ ldwrkr = *lda;
+ } else if (*lwork >= wrkbl + (*lda + *m) * *m) {
+
+/* WORK(IU) is LDA by M and WORK(IR) is M by M */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *m;
+ ldwrkr = *m;
+ } else {
+
+/* WORK(IU) is M by M and WORK(IR) is M by M */
+
+ ldwrku = *m;
+ ir = iu + ldwrku * *m;
+ ldwrkr = *m;
+ }
+ itau = ir + ldwrkr * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (Workspace: need 2*M*M+2*M, prefer 2*M*M+M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy L to WORK(IU), zeroing out below it */
+
+ dlacpy_("L", m, m, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ dlaset_("U", &i__2, &i__3, &c_b421, &c_b421, &work[iu
+ + ldwrku], &ldwrku);
+
+/* Generate Q in A */
+/* (Workspace: need 2*M*M+2*M, prefer 2*M*M+M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorglq_(m, n, m, &a[a_offset], lda, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IU), copying result to */
+/* WORK(IR) */
+/* (Workspace: need 2*M*M+4*M, */
+/* prefer 2*M*M+3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(m, m, &work[iu], &ldwrku, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ dlacpy_("L", m, m, &work[iu], &ldwrku, &work[ir], &
+ ldwrkr);
+
+/* Generate right bidiagonalizing vectors in WORK(IU) */
+/* (Workspace: need 2*M*M+4*M-1, */
+/* prefer 2*M*M+3*M+(M-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("P", m, m, m, &work[iu], &ldwrku, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in WORK(IR) */
+/* (Workspace: need 2*M*M+4*M, prefer 2*M*M+3*M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("Q", m, m, m, &work[ir], &ldwrkr, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of L in WORK(IR) and computing */
+/* right singular vectors of L in WORK(IU) */
+/* (Workspace: need 2*M*M+BDSPAC) */
+
+ dbdsqr_("U", m, m, m, &c__0, &s[1], &work[ie], &work[
+ iu], &ldwrku, &work[ir], &ldwrkr, dum, &c__1,
+ &work[iwork], info);
+
+/* Multiply right singular vectors of L in WORK(IU) by */
+/* Q in A, storing result in VT */
+/* (Workspace: need M*M) */
+
+ dgemm_("N", "N", m, n, m, &c_b443, &work[iu], &ldwrku,
+ &a[a_offset], lda, &c_b421, &vt[vt_offset],
+ ldvt);
+
+/* Copy left singular vectors of L to A */
+/* (Workspace: need M*M) */
+
+ dlacpy_("F", m, m, &work[ir], &ldwrkr, &a[a_offset],
+ lda);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ dlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorglq_(m, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Zero out above L in A */
+
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ dlaset_("U", &i__2, &i__3, &c_b421, &c_b421, &a[(
+ a_dim1 << 1) + 1], lda);
+
+/* Bidiagonalize L in A */
+/* (Workspace: need 4*M, prefer 3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply right vectors bidiagonalizing L by Q in VT */
+/* (Workspace: need 3*M+N, prefer 3*M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dormbr_("P", "L", "T", m, n, m, &a[a_offset], lda, &
+ work[itaup], &vt[vt_offset], ldvt, &work[
+ iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors of L in A */
+/* (Workspace: need 4*M, prefer 3*M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("Q", m, m, m, &a[a_offset], lda, &work[itauq],
+ &work[iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, compute left */
+/* singular vectors of A in A and compute right */
+/* singular vectors of A in VT */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("U", m, n, m, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &a[a_offset], lda, dum, &
+ c__1, &work[iwork], info);
+
+ }
+
+ } else if (wntuas) {
+
+/* Path 6t(N much larger than M, JOBU='S' or 'A', */
+/* JOBVT='S') */
+/* M right singular vectors to be computed in VT and */
+/* M left singular vectors to be computed in U */
+
+/* Computing MAX */
+ i__2 = *m << 2;
+ if (*lwork >= *m * *m + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + *lda * *m) {
+
+/* WORK(IU) is LDA by N */
+
+ ldwrku = *lda;
+ } else {
+
+/* WORK(IU) is LDA by M */
+
+ ldwrku = *m;
+ }
+ itau = iu + ldwrku * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy L to WORK(IU), zeroing out above it */
+
+ dlacpy_("L", m, m, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ dlaset_("U", &i__2, &i__3, &c_b421, &c_b421, &work[iu
+ + ldwrku], &ldwrku);
+
+/* Generate Q in A */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorglq_(m, n, m, &a[a_offset], lda, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IU), copying result to U */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(m, m, &work[iu], &ldwrku, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ dlacpy_("L", m, m, &work[iu], &ldwrku, &u[u_offset],
+ ldu);
+
+/* Generate right bidiagonalizing vectors in WORK(IU) */
+/* (Workspace: need M*M+4*M-1, */
+/* prefer M*M+3*M+(M-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("P", m, m, m, &work[iu], &ldwrku, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in U */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("Q", m, m, m, &u[u_offset], ldu, &work[itauq],
+ &work[iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of L in U and computing right */
+/* singular vectors of L in WORK(IU) */
+/* (Workspace: need M*M+BDSPAC) */
+
+ dbdsqr_("U", m, m, m, &c__0, &s[1], &work[ie], &work[
+ iu], &ldwrku, &u[u_offset], ldu, dum, &c__1, &
+ work[iwork], info);
+
+/* Multiply right singular vectors of L in WORK(IU) by */
+/* Q in A, storing result in VT */
+/* (Workspace: need M*M) */
+
+ dgemm_("N", "N", m, n, m, &c_b443, &work[iu], &ldwrku,
+ &a[a_offset], lda, &c_b421, &vt[vt_offset],
+ ldvt);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ dlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorglq_(m, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy L to U, zeroing out above it */
+
+ dlacpy_("L", m, m, &a[a_offset], lda, &u[u_offset],
+ ldu);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ dlaset_("U", &i__2, &i__3, &c_b421, &c_b421, &u[(
+ u_dim1 << 1) + 1], ldu);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in U */
+/* (Workspace: need 4*M, prefer 3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(m, m, &u[u_offset], ldu, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply right bidiagonalizing vectors in U by Q */
+/* in VT */
+/* (Workspace: need 3*M+N, prefer 3*M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dormbr_("P", "L", "T", m, n, m, &u[u_offset], ldu, &
+ work[itaup], &vt[vt_offset], ldvt, &work[
+ iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in U */
+/* (Workspace: need 4*M, prefer 3*M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("Q", m, m, m, &u[u_offset], ldu, &work[itauq],
+ &work[iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in VT */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("U", m, n, m, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, dum, &
+ c__1, &work[iwork], info);
+
+ }
+
+ }
+
+ } else if (wntva) {
+
+ if (wntun) {
+
+/* Path 7t(N much larger than M, JOBU='N', JOBVT='A') */
+/* N right singular vectors to be computed in VT and */
+/* no left singular vectors to be computed */
+
+/* Computing MAX */
+ i__2 = *n + *m, i__3 = *m << 2, i__2 = max(i__2,i__3);
+ if (*lwork >= *m * *m + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+ if (*lwork >= wrkbl + *lda * *m) {
+
+/* WORK(IR) is LDA by M */
+
+ ldwrkr = *lda;
+ } else {
+
+/* WORK(IR) is M by M */
+
+ ldwrkr = *m;
+ }
+ itau = ir + ldwrkr * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ dlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Copy L to WORK(IR), zeroing out above it */
+
+ dlacpy_("L", m, m, &a[a_offset], lda, &work[ir], &
+ ldwrkr);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ dlaset_("U", &i__2, &i__3, &c_b421, &c_b421, &work[ir
+ + ldwrkr], &ldwrkr);
+
+/* Generate Q in VT */
+/* (Workspace: need M*M+M+N, prefer M*M+M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IR) */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(m, m, &work[ir], &ldwrkr, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in WORK(IR) */
+/* (Workspace: need M*M+4*M-1, */
+/* prefer M*M+3*M+(M-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("P", m, m, m, &work[ir], &ldwrkr, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of L in WORK(IR) */
+/* (Workspace: need M*M+BDSPAC) */
+
+ dbdsqr_("U", m, m, &c__0, &c__0, &s[1], &work[ie], &
+ work[ir], &ldwrkr, dum, &c__1, dum, &c__1, &
+ work[iwork], info);
+
+/* Multiply right singular vectors of L in WORK(IR) by */
+/* Q in VT, storing result in A */
+/* (Workspace: need M*M) */
+
+ dgemm_("N", "N", m, n, m, &c_b443, &work[ir], &ldwrkr,
+ &vt[vt_offset], ldvt, &c_b421, &a[a_offset],
+ lda);
+
+/* Copy right singular vectors of A from A to VT */
+
+ dlacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ dlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (Workspace: need M+N, prefer M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Zero out above L in A */
+
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ dlaset_("U", &i__2, &i__3, &c_b421, &c_b421, &a[(
+ a_dim1 << 1) + 1], lda);
+
+/* Bidiagonalize L in A */
+/* (Workspace: need 4*M, prefer 3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply right bidiagonalizing vectors in A by Q */
+/* in VT */
+/* (Workspace: need 3*M+N, prefer 3*M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dormbr_("P", "L", "T", m, n, m, &a[a_offset], lda, &
+ work[itaup], &vt[vt_offset], ldvt, &work[
+ iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of A in VT */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("U", m, n, &c__0, &c__0, &s[1], &work[ie], &
+ vt[vt_offset], ldvt, dum, &c__1, dum, &c__1, &
+ work[iwork], info);
+
+ }
+
+ } else if (wntuo) {
+
+/* Path 8t(N much larger than M, JOBU='O', JOBVT='A') */
+/* N right singular vectors to be computed in VT and */
+/* M left singular vectors to be overwritten on A */
+
+/* Computing MAX */
+ i__2 = *n + *m, i__3 = *m << 2, i__2 = max(i__2,i__3);
+ if (*lwork >= (*m << 1) * *m + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + (*lda << 1) * *m) {
+
+/* WORK(IU) is LDA by M and WORK(IR) is LDA by M */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *m;
+ ldwrkr = *lda;
+ } else if (*lwork >= wrkbl + (*lda + *m) * *m) {
+
+/* WORK(IU) is LDA by M and WORK(IR) is M by M */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *m;
+ ldwrkr = *m;
+ } else {
+
+/* WORK(IU) is M by M and WORK(IR) is M by M */
+
+ ldwrku = *m;
+ ir = iu + ldwrku * *m;
+ ldwrkr = *m;
+ }
+ itau = ir + ldwrkr * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (Workspace: need 2*M*M+2*M, prefer 2*M*M+M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ dlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (Workspace: need 2*M*M+M+N, prefer 2*M*M+M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy L to WORK(IU), zeroing out above it */
+
+ dlacpy_("L", m, m, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ dlaset_("U", &i__2, &i__3, &c_b421, &c_b421, &work[iu
+ + ldwrku], &ldwrku);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IU), copying result to */
+/* WORK(IR) */
+/* (Workspace: need 2*M*M+4*M, */
+/* prefer 2*M*M+3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(m, m, &work[iu], &ldwrku, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ dlacpy_("L", m, m, &work[iu], &ldwrku, &work[ir], &
+ ldwrkr);
+
+/* Generate right bidiagonalizing vectors in WORK(IU) */
+/* (Workspace: need 2*M*M+4*M-1, */
+/* prefer 2*M*M+3*M+(M-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("P", m, m, m, &work[iu], &ldwrku, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in WORK(IR) */
+/* (Workspace: need 2*M*M+4*M, prefer 2*M*M+3*M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("Q", m, m, m, &work[ir], &ldwrkr, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of L in WORK(IR) and computing */
+/* right singular vectors of L in WORK(IU) */
+/* (Workspace: need 2*M*M+BDSPAC) */
+
+ dbdsqr_("U", m, m, m, &c__0, &s[1], &work[ie], &work[
+ iu], &ldwrku, &work[ir], &ldwrkr, dum, &c__1,
+ &work[iwork], info);
+
+/* Multiply right singular vectors of L in WORK(IU) by */
+/* Q in VT, storing result in A */
+/* (Workspace: need M*M) */
+
+ dgemm_("N", "N", m, n, m, &c_b443, &work[iu], &ldwrku,
+ &vt[vt_offset], ldvt, &c_b421, &a[a_offset],
+ lda);
+
+/* Copy right singular vectors of A from A to VT */
+
+ dlacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Copy left singular vectors of A from WORK(IR) to A */
+
+ dlacpy_("F", m, m, &work[ir], &ldwrkr, &a[a_offset],
+ lda);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ dlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (Workspace: need M+N, prefer M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Zero out above L in A */
+
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ dlaset_("U", &i__2, &i__3, &c_b421, &c_b421, &a[(
+ a_dim1 << 1) + 1], lda);
+
+/* Bidiagonalize L in A */
+/* (Workspace: need 4*M, prefer 3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply right bidiagonalizing vectors in A by Q */
+/* in VT */
+/* (Workspace: need 3*M+N, prefer 3*M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dormbr_("P", "L", "T", m, n, m, &a[a_offset], lda, &
+ work[itaup], &vt[vt_offset], ldvt, &work[
+ iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in A */
+/* (Workspace: need 4*M, prefer 3*M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("Q", m, m, m, &a[a_offset], lda, &work[itauq],
+ &work[iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in A and computing right */
+/* singular vectors of A in VT */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("U", m, n, m, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &a[a_offset], lda, dum, &
+ c__1, &work[iwork], info);
+
+ }
+
+ } else if (wntuas) {
+
+/* Path 9t(N much larger than M, JOBU='S' or 'A', */
+/* JOBVT='A') */
+/* N right singular vectors to be computed in VT and */
+/* M left singular vectors to be computed in U */
+
+/* Computing MAX */
+ i__2 = *n + *m, i__3 = *m << 2, i__2 = max(i__2,i__3);
+ if (*lwork >= *m * *m + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + *lda * *m) {
+
+/* WORK(IU) is LDA by M */
+
+ ldwrku = *lda;
+ } else {
+
+/* WORK(IU) is M by M */
+
+ ldwrku = *m;
+ }
+ itau = iu + ldwrku * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ dlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (Workspace: need M*M+M+N, prefer M*M+M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy L to WORK(IU), zeroing out above it */
+
+ dlacpy_("L", m, m, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ dlaset_("U", &i__2, &i__3, &c_b421, &c_b421, &work[iu
+ + ldwrku], &ldwrku);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IU), copying result to U */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(m, m, &work[iu], &ldwrku, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ dlacpy_("L", m, m, &work[iu], &ldwrku, &u[u_offset],
+ ldu);
+
+/* Generate right bidiagonalizing vectors in WORK(IU) */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+(M-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("P", m, m, m, &work[iu], &ldwrku, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in U */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("Q", m, m, m, &u[u_offset], ldu, &work[itauq],
+ &work[iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of L in U and computing right */
+/* singular vectors of L in WORK(IU) */
+/* (Workspace: need M*M+BDSPAC) */
+
+ dbdsqr_("U", m, m, m, &c__0, &s[1], &work[ie], &work[
+ iu], &ldwrku, &u[u_offset], ldu, dum, &c__1, &
+ work[iwork], info);
+
+/* Multiply right singular vectors of L in WORK(IU) by */
+/* Q in VT, storing result in A */
+/* (Workspace: need M*M) */
+
+ dgemm_("N", "N", m, n, m, &c_b443, &work[iu], &ldwrku,
+ &vt[vt_offset], ldvt, &c_b421, &a[a_offset],
+ lda);
+
+/* Copy right singular vectors of A from A to VT */
+
+ dlacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ dlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (Workspace: need M+N, prefer M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy L to U, zeroing out above it */
+
+ dlacpy_("L", m, m, &a[a_offset], lda, &u[u_offset],
+ ldu);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ dlaset_("U", &i__2, &i__3, &c_b421, &c_b421, &u[(
+ u_dim1 << 1) + 1], ldu);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in U */
+/* (Workspace: need 4*M, prefer 3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(m, m, &u[u_offset], ldu, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply right bidiagonalizing vectors in U by Q */
+/* in VT */
+/* (Workspace: need 3*M+N, prefer 3*M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dormbr_("P", "L", "T", m, n, m, &u[u_offset], ldu, &
+ work[itaup], &vt[vt_offset], ldvt, &work[
+ iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in U */
+/* (Workspace: need 4*M, prefer 3*M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("Q", m, m, m, &u[u_offset], ldu, &work[itauq],
+ &work[iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in VT */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("U", m, n, m, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, dum, &
+ c__1, &work[iwork], info);
+
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N .LT. MNTHR */
+
+/* Path 10t(N greater than M, but not much larger) */
+/* Reduce to bidiagonal form without LQ decomposition */
+
+ ie = 1;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize A */
+/* (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[iwork], &i__2, &ierr);
+ if (wntuas) {
+
+/* If left singular vectors desired in U, copy result to U */
+/* and generate left bidiagonalizing vectors in U */
+/* (Workspace: need 4*M-1, prefer 3*M+(M-1)*NB) */
+
+ dlacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("Q", m, m, n, &u[u_offset], ldu, &work[itauq], &work[
+ iwork], &i__2, &ierr);
+ }
+ if (wntvas) {
+
+/* If right singular vectors desired in VT, copy result to */
+/* VT and generate right bidiagonalizing vectors in VT */
+/* (Workspace: need 3*M+NRVT, prefer 3*M+NRVT*NB) */
+
+ dlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ if (wntva) {
+ nrvt = *n;
+ }
+ if (wntvs) {
+ nrvt = *m;
+ }
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("P", &nrvt, n, m, &vt[vt_offset], ldvt, &work[itaup],
+ &work[iwork], &i__2, &ierr);
+ }
+ if (wntuo) {
+
+/* If left singular vectors desired in A, generate left */
+/* bidiagonalizing vectors in A */
+/* (Workspace: need 4*M-1, prefer 3*M+(M-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("Q", m, m, n, &a[a_offset], lda, &work[itauq], &work[
+ iwork], &i__2, &ierr);
+ }
+ if (wntvo) {
+
+/* If right singular vectors desired in A, generate right */
+/* bidiagonalizing vectors in A */
+/* (Workspace: need 4*M, prefer 3*M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ dorgbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &work[
+ iwork], &i__2, &ierr);
+ }
+ iwork = ie + *m;
+ if (wntuas || wntuo) {
+ nru = *m;
+ }
+ if (wntun) {
+ nru = 0;
+ }
+ if (wntvas || wntvo) {
+ ncvt = *n;
+ }
+ if (wntvn) {
+ ncvt = 0;
+ }
+ if (! wntuo && ! wntvo) {
+
+/* Perform bidiagonal QR iteration, if desired, computing */
+/* left singular vectors in U and computing right singular */
+/* vectors in VT */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("L", m, &ncvt, &nru, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, dum, &c__1, &
+ work[iwork], info);
+ } else if (! wntuo && wntvo) {
+
+/* Perform bidiagonal QR iteration, if desired, computing */
+/* left singular vectors in U and computing right singular */
+/* vectors in A */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("L", m, &ncvt, &nru, &c__0, &s[1], &work[ie], &a[
+ a_offset], lda, &u[u_offset], ldu, dum, &c__1, &work[
+ iwork], info);
+ } else {
+
+/* Perform bidiagonal QR iteration, if desired, computing */
+/* left singular vectors in A and computing right singular */
+/* vectors in VT */
+/* (Workspace: need BDSPAC) */
+
+ dbdsqr_("L", m, &ncvt, &nru, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &a[a_offset], lda, dum, &c__1, &
+ work[iwork], info);
+ }
+
+ }
+
+ }
+
+/* If DBDSQR failed to converge, copy unconverged superdiagonals */
+/* to WORK( 2:MINMN ) */
+
+ if (*info != 0) {
+ if (ie > 2) {
+ i__2 = minmn - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__ + 1] = work[i__ + ie - 1];
+/* L50: */
+ }
+ }
+ if (ie < 2) {
+ for (i__ = minmn - 1; i__ >= 1; --i__) {
+ work[i__ + 1] = work[i__ + ie - 1];
+/* L60: */
+ }
+ }
+ }
+
+/* Undo scaling if necessary */
+
+ if (iscl == 1) {
+ if (anrm > bignum) {
+ dlascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ if (*info != 0 && anrm > bignum) {
+ i__2 = minmn - 1;
+ dlascl_("G", &c__0, &c__0, &bignum, &anrm, &i__2, &c__1, &work[2],
+ &minmn, &ierr);
+ }
+ if (anrm < smlnum) {
+ dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ if (*info != 0 && anrm < smlnum) {
+ i__2 = minmn - 1;
+ dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &i__2, &c__1, &work[2],
+ &minmn, &ierr);
+ }
+ }
+
+/* Return optimal workspace in WORK(1) */
+
+ work[1] = (doublereal) maxwrk;
+
+ return 0;
+
+/* End of DGESVD */
+
+} /* dgesvd_ */
diff --git a/contrib/libs/clapack/dgesvj.c b/contrib/libs/clapack/dgesvj.c
new file mode 100644
index 0000000000..2d36977c0e
--- /dev/null
+++ b/contrib/libs/clapack/dgesvj.c
@@ -0,0 +1,1796 @@
+/* dgesvj.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b17 = 0.;
+static doublereal c_b18 = 1.;
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c__2 = 2;
+
+/* Subroutine */ int dgesvj_(char *joba, char *jobu, char *jobv, integer *m,
+ integer *n, doublereal *a, integer *lda, doublereal *sva, integer *mv,
+ doublereal *v, integer *ldv, doublereal *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, v_dim1, v_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ doublereal bigtheta;
+ integer pskipped, i__, p, q;
+ doublereal t;
+ integer n2, n4;
+ doublereal rootsfmin;
+ integer n34;
+ doublereal cs, sn;
+ integer ir1, jbc;
+ doublereal big;
+ integer kbl, igl, ibr, jgl, nbl;
+ doublereal tol;
+ integer mvl;
+ doublereal aapp, aapq, aaqq;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ doublereal ctol;
+ integer ierr;
+ doublereal aapp0;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ doublereal temp1;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal scale, large, apoaq, aqoap;
+ extern logical lsame_(char *, char *);
+ doublereal theta, small, sfmin;
+ logical lsvec;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ doublereal fastr[5];
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical applv, rsvec;
+ extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *);
+ logical uctol;
+ extern /* Subroutine */ int drotm_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *);
+ logical lower, upper, rotok;
+ extern /* Subroutine */ int dgsvj0_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *), dgsvj1_(
+ char *, integer *, integer *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, integer *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlaset_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ integer ijblsk, swband, blskip;
+ doublereal mxaapq;
+ extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *);
+ doublereal thsign, mxsinj;
+ integer emptsw, notrot, iswrot, lkahead;
+ logical goscale, noscale;
+ doublereal rootbig, epsilon, rooteps;
+ integer rowskip;
+ doublereal roottol;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Zlatko Drmac of the University of Zagreb and -- */
+/* -- Kresimir Veselic of the Fernuniversitaet Hagen -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* This routine is also part of SIGMA (version 1.23, October 23. 2008.) */
+/* SIGMA is a library of algorithms for highly accurate algorithms for */
+/* computation of SVD, PSVD, QSVD, (H,K)-SVD, and for solution of the */
+/* eigenvalue problems Hx = lambda M x, H M x = lambda x with H, M > 0. */
+
+/* -#- Scalar Arguments -#- */
+
+
+/* -#- Array Arguments -#- */
+
+/* .. */
+
+/* Purpose */
+/* ~~~~~~~ */
+/* DGESVJ computes the singular value decomposition (SVD) of a real */
+/* M-by-N matrix A, where M >= N. The SVD of A is written as */
+/* [++] [xx] [x0] [xx] */
+/* A = U * SIGMA * V^t, [++] = [xx] * [ox] * [xx] */
+/* [++] [xx] */
+/* where SIGMA is an N-by-N diagonal matrix, U is an M-by-N orthonormal */
+/* matrix, and V is an N-by-N orthogonal matrix. The diagonal elements */
+/* of SIGMA are the singular values of A. The columns of U and V are the */
+/* left and the right singular vectors of A, respectively. */
+
+/* Further Details */
+/* ~~~~~~~~~~~~~~~ */
+/* The orthogonal N-by-N matrix V is obtained as a product of Jacobi plane */
+/* rotations. The rotations are implemented as fast scaled rotations of */
+/* Anda and Park [1]. In the case of underflow of the Jacobi angle, a */
+/* modified Jacobi transformation of Drmac [4] is used. Pivot strategy uses */
+/* column interchanges of de Rijk [2]. The relative accuracy of the computed */
+/* singular values and the accuracy of the computed singular vectors (in */
+/* angle metric) is as guaranteed by the theory of Demmel and Veselic [3]. */
+/* The condition number that determines the accuracy in the full rank case */
+/* is essentially min_{D=diag} kappa(A*D), where kappa(.) is the */
+/* spectral condition number. The best performance of this Jacobi SVD */
+/* procedure is achieved if used in an accelerated version of Drmac and */
+/* Veselic [5,6], and it is the kernel routine in the SIGMA library [7]. */
+/* Some tunning parameters (marked with [TP]) are available for the */
+/* implementer. */
+/* The computational range for the nonzero singular values is the machine */
+/* number interval ( UNDERFLOW , OVERFLOW ). In extreme cases, even */
+/* denormalized singular values can be computed with the corresponding */
+/* gradual loss of accurate digits. */
+
+/* Contributors */
+/* ~~~~~~~~~~~~ */
+/* Zlatko Drmac (Zagreb, Croatia) and Kresimir Veselic (Hagen, Germany) */
+
+/* References */
+/* ~~~~~~~~~~ */
+/* [1] A. A. Anda and H. Park: Fast plane rotations with dynamic scaling. */
+/* SIAM J. matrix Anal. Appl., Vol. 15 (1994), pp. 162-174. */
+/* [2] P. P. M. De Rijk: A one-sided Jacobi algorithm for computing the */
+/* singular value decomposition on a vector computer. */
+/* SIAM J. Sci. Stat. Comp., Vol. 10 (1998), pp. 359-371. */
+/* [3] J. Demmel and K. Veselic: Jacobi method is more accurate than QR. */
+/* [4] Z. Drmac: Implementation of Jacobi rotations for accurate singular */
+/* value computation in floating point arithmetic. */
+/* SIAM J. Sci. Comp., Vol. 18 (1997), pp. 1200-1222. */
+/* [5] Z. Drmac and K. Veselic: New fast and accurate Jacobi SVD algorithm I. */
+/* SIAM J. Matrix Anal. Appl. Vol. 35, No. 2 (2008), pp. 1322-1342. */
+/* LAPACK Working note 169. */
+/* [6] Z. Drmac and K. Veselic: New fast and accurate Jacobi SVD algorithm II. */
+/* SIAM J. Matrix Anal. Appl. Vol. 35, No. 2 (2008), pp. 1343-1362. */
+/* LAPACK Working note 170. */
+/* [7] Z. Drmac: SIGMA - mathematical software library for accurate SVD, PSV, */
+/* QSVD, (H,K)-SVD computations. */
+/* Department of Mathematics, University of Zagreb, 2008. */
+
+/* Bugs, Examples and Comments */
+/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
+/* Please report all bugs and send interesting test examples and comments to */
+/* drmac@math.hr. Thank you. */
+
+/* Arguments */
+/* ~~~~~~~~~ */
+
+/* JOBA (input) CHARACTER* 1 */
+/* Specifies the structure of A. */
+/* = 'L': The input matrix A is lower triangular; */
+/* = 'U': The input matrix A is upper triangular; */
+/* = 'G': The input matrix A is general M-by-N matrix, M >= N. */
+
+/* JOBU (input) CHARACTER*1 */
+/* Specifies whether to compute the left singular vectors */
+/* (columns of U): */
+
+/* = 'U': The left singular vectors corresponding to the nonzero */
+/* singular values are computed and returned in the leading */
+/* columns of A. See more details in the description of A. */
+/* The default numerical orthogonality threshold is set to */
+/* approximately TOL=CTOL*EPS, CTOL=DSQRT(M), EPS=DLAMCH('E'). */
+/* = 'C': Analogous to JOBU='U', except that user can control the */
+/* level of numerical orthogonality of the computed left */
+/* singular vectors. TOL can be set to TOL = CTOL*EPS, where */
+/* CTOL is given on input in the array WORK. */
+/* No CTOL smaller than ONE is allowed. CTOL greater */
+/* than 1 / EPS is meaningless. The option 'C' */
+/* can be used if M*EPS is satisfactory orthogonality */
+/* of the computed left singular vectors, so CTOL=M could */
+/* save few sweeps of Jacobi rotations. */
+/* See the descriptions of A and WORK(1). */
+/* = 'N': The matrix U is not computed. However, see the */
+/* description of A. */
+
+/* JOBV (input) CHARACTER*1 */
+/* Specifies whether to compute the right singular vectors, that */
+/* is, the matrix V: */
+/* = 'V' : the matrix V is computed and returned in the array V */
+/* = 'A' : the Jacobi rotations are applied to the MV-by-N */
+/* array V. In other words, the right singular vector */
+/* matrix V is not computed explicitly, instead it is */
+/* applied to an MV-by-N matrix initially stored in the */
+/* first MV rows of V. */
+/* = 'N' : the matrix V is not computed and the array V is not */
+/* referenced */
+
+/* M (input) INTEGER */
+/* The number of rows of the input matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the input matrix A. */
+/* M >= N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, */
+/* If JOBU .EQ. 'U' .OR. JOBU .EQ. 'C': */
+/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
+/* If INFO .EQ. 0, */
+/* ~~~~~~~~~~~~~~~ */
+/* RANKA orthonormal columns of U are returned in the */
+/* leading RANKA columns of the array A. Here RANKA <= N */
+/* is the number of computed singular values of A that are */
+/* above the underflow threshold DLAMCH('S'). The singular */
+/* vectors corresponding to underflowed or zero singular */
+/* values are not computed. The value of RANKA is returned */
+/* in the array WORK as RANKA=NINT(WORK(2)). Also see the */
+/* descriptions of SVA and WORK. The computed columns of U */
+/* are mutually numerically orthogonal up to approximately */
+/* TOL=DSQRT(M)*EPS (default); or TOL=CTOL*EPS (JOBU.EQ.'C'), */
+/* see the description of JOBU. */
+/* If INFO .GT. 0, */
+/* ~~~~~~~~~~~~~~~ */
+/* the procedure DGESVJ did not converge in the given number */
+/* of iterations (sweeps). In that case, the computed */
+/* columns of U may not be orthogonal up to TOL. The output */
+/* U (stored in A), SIGMA (given by the computed singular */
+/* values in SVA(1:N)) and V is still a decomposition of the */
+/* input matrix A in the sense that the residual */
+/* ||A-SCALE*U*SIGMA*V^T||_2 / ||A||_2 is small. */
+
+/* If JOBU .EQ. 'N': */
+/* ~~~~~~~~~~~~~~~~~ */
+/* If INFO .EQ. 0 */
+/* ~~~~~~~~~~~~~~ */
+/* Note that the left singular vectors are 'for free' in the */
+/* one-sided Jacobi SVD algorithm. However, if only the */
+/* singular values are needed, the level of numerical */
+/* orthogonality of U is not an issue and iterations are */
+/* stopped when the columns of the iterated matrix are */
+/* numerically orthogonal up to approximately M*EPS. Thus, */
+/* on exit, A contains the columns of U scaled with the */
+/* corresponding singular values. */
+/* If INFO .GT. 0, */
+/* ~~~~~~~~~~~~~~~ */
+/* the procedure DGESVJ did not converge in the given number */
+/* of iterations (sweeps). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* SVA (workspace/output) REAL array, dimension (N) */
+/* On exit, */
+/* If INFO .EQ. 0, */
+/* ~~~~~~~~~~~~~~~ */
+/* depending on the value SCALE = WORK(1), we have: */
+/* If SCALE .EQ. ONE: */
+/* ~~~~~~~~~~~~~~~~~~ */
+/* SVA(1:N) contains the computed singular values of A. */
+/* During the computation SVA contains the Euclidean column */
+/* norms of the iterated matrices in the array A. */
+/* If SCALE .NE. ONE: */
+/* ~~~~~~~~~~~~~~~~~~ */
+/* The singular values of A are SCALE*SVA(1:N), and this */
+/* factored representation is due to the fact that some of the */
+/* singular values of A might underflow or overflow. */
+
+/* If INFO .GT. 0, */
+/* ~~~~~~~~~~~~~~~ */
+/* the procedure DGESVJ did not converge in the given number of */
+/* iterations (sweeps) and SCALE*SVA(1:N) may not be accurate. */
+
+/* MV (input) INTEGER */
+/* If JOBV .EQ. 'A', then the product of Jacobi rotations in DGESVJ */
+/* is applied to the first MV rows of V. See the description of JOBV. */
+
+/* V (input/output) REAL array, dimension (LDV,N) */
+/* If JOBV = 'V', then V contains on exit the N-by-N matrix of */
+/* the right singular vectors; */
+/* If JOBV = 'A', then V contains the product of the computed right */
+/* singular vector matrix and the initial matrix in */
+/* the array V. */
+/* If JOBV = 'N', then V is not referenced. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V, LDV .GE. 1. */
+/* If JOBV .EQ. 'V', then LDV .GE. max(1,N). */
+/* If JOBV .EQ. 'A', then LDV .GE. max(1,MV) . */
+
+/* WORK (input/workspace/output) REAL array, dimension max(4,M+N). */
+/* On entry, */
+/* If JOBU .EQ. 'C', */
+/* ~~~~~~~~~~~~~~~~~ */
+/* WORK(1) = CTOL, where CTOL defines the threshold for convergence. */
+/* The process stops if all columns of A are mutually */
+/* orthogonal up to CTOL*EPS, EPS=DLAMCH('E'). */
+/* It is required that CTOL >= ONE, i.e. it is not */
+/* allowed to force the routine to obtain orthogonality */
+/* below EPSILON. */
+/* On exit, */
+/* WORK(1) = SCALE is the scaling factor such that SCALE*SVA(1:N) */
+/* are the computed singular vcalues of A. */
+/* (See description of SVA().) */
+/* WORK(2) = NINT(WORK(2)) is the number of the computed nonzero */
+/* singular values. */
+/* WORK(3) = NINT(WORK(3)) is the number of the computed singular */
+/* values that are larger than the underflow threshold. */
+/* WORK(4) = NINT(WORK(4)) is the number of sweeps of Jacobi */
+/* rotations needed for numerical convergence. */
+/* WORK(5) = max_{i.NE.j} |COS(A(:,i),A(:,j))| in the last sweep. */
+/* This is useful information in cases when DGESVJ did */
+/* not converge, as it can be used to estimate whether */
+/* the output is stil useful and for post festum analysis. */
+/* WORK(6) = the largest absolute value over all sines of the */
+/* Jacobi rotation angles in the last sweep. It can be */
+/* useful for a post festum analysis. */
+
+/* LWORK length of WORK, WORK >= MAX(6,M+N) */
+
+/* INFO (output) INTEGER */
+/* = 0 : successful exit. */
+/* < 0 : if INFO = -i, then the i-th argument had an illegal value */
+/* > 0 : DGESVJ did not converge in the maximal allowed number (30) */
+/* of sweeps. The output may still be useful. See the */
+/* description of WORK. */
+
+/* Local Parameters */
+
+
+/* Local Scalars */
+
+
+/* Local Arrays */
+
+
+/* Intrinsic Functions */
+
+
+/* External Functions */
+/* .. from BLAS */
+/* .. from LAPACK */
+
+/* External Subroutines */
+/* .. from BLAS */
+/* .. from LAPACK */
+
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ --sva;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ --work;
+
+ /* Function Body */
+ lsvec = lsame_(jobu, "U");
+ uctol = lsame_(jobu, "C");
+ rsvec = lsame_(jobv, "V");
+ applv = lsame_(jobv, "A");
+ upper = lsame_(joba, "U");
+ lower = lsame_(joba, "L");
+
+ if (! (upper || lower || lsame_(joba, "G"))) {
+ *info = -1;
+ } else if (! (lsvec || uctol || lsame_(jobu, "N")))
+ {
+ *info = -2;
+ } else if (! (rsvec || applv || lsame_(jobv, "N")))
+ {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0 || *n > *m) {
+ *info = -5;
+ } else if (*lda < *m) {
+ *info = -7;
+ } else if (*mv < 0) {
+ *info = -9;
+ } else if (rsvec && *ldv < *n || applv && *ldv < *mv) {
+ *info = -11;
+ } else if (uctol && work[1] <= 1.) {
+ *info = -12;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = *m + *n;
+ if (*lwork < max(i__1,6)) {
+ *info = -13;
+ } else {
+ *info = 0;
+ }
+ }
+
+/* #:( */
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGESVJ", &i__1);
+ return 0;
+ }
+
+/* #:) Quick return for void matrix */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Set numerical parameters */
+/* The stopping criterion for Jacobi rotations is */
+
+/* max_{i<>j}|A(:,i)^T * A(:,j)|/(||A(:,i)||*||A(:,j)||) < CTOL*EPS */
+
+/* where EPS is the round-off and CTOL is defined as follows: */
+
+ if (uctol) {
+/* ... user controlled */
+ ctol = work[1];
+ } else {
+/* ... default */
+ if (lsvec || rsvec || applv) {
+ ctol = sqrt((doublereal) (*m));
+ } else {
+ ctol = (doublereal) (*m);
+ }
+ }
+/* ... and the machine dependent parameters are */
+/* [!] (Make sure that DLAMCH() works properly on the target machine.) */
+
+ epsilon = dlamch_("Epsilon");
+ rooteps = sqrt(epsilon);
+ sfmin = dlamch_("SafeMinimum");
+ rootsfmin = sqrt(sfmin);
+ small = sfmin / epsilon;
+ big = dlamch_("Overflow");
+/* BIG = ONE / SFMIN */
+ rootbig = 1. / rootsfmin;
+ large = big / sqrt((doublereal) (*m * *n));
+ bigtheta = 1. / rooteps;
+
+ tol = ctol * epsilon;
+ roottol = sqrt(tol);
+
+ if ((doublereal) (*m) * epsilon >= 1.) {
+ *info = -5;
+ i__1 = -(*info);
+ xerbla_("DGESVJ", &i__1);
+ return 0;
+ }
+
+/* Initialize the right singular vector matrix. */
+
+ if (rsvec) {
+ mvl = *n;
+ dlaset_("A", &mvl, n, &c_b17, &c_b18, &v[v_offset], ldv);
+ } else if (applv) {
+ mvl = *mv;
+ }
+ rsvec = rsvec || applv;
+
+/* Initialize SVA( 1:N ) = ( ||A e_i||_2, i = 1:N ) */
+/* (!) If necessary, scale A to protect the largest singular value */
+/* from overflow. It is possible that saving the largest singular */
+/* value destroys the information about the small ones. */
+/* This initial scaling is almost minimal in the sense that the */
+/* goal is to make sure that no column norm overflows, and that */
+/* DSQRT(N)*max_i SVA(i) does not overflow. If INFinite entries */
+/* in A are detected, the procedure returns with INFO=-6. */
+
+ scale = 1. / sqrt((doublereal) (*m) * (doublereal) (*n));
+ noscale = TRUE_;
+ goscale = TRUE_;
+
+ if (lower) {
+/* the input matrix is M-by-N lower triangular (trapezoidal) */
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ aapp = 0.;
+ aaqq = 0.;
+ i__2 = *m - p + 1;
+ dlassq_(&i__2, &a[p + p * a_dim1], &c__1, &aapp, &aaqq);
+ if (aapp > big) {
+ *info = -6;
+ i__2 = -(*info);
+ xerbla_("DGESVJ", &i__2);
+ return 0;
+ }
+ aaqq = sqrt(aaqq);
+ if (aapp < big / aaqq && noscale) {
+ sva[p] = aapp * aaqq;
+ } else {
+ noscale = FALSE_;
+ sva[p] = aapp * (aaqq * scale);
+ if (goscale) {
+ goscale = FALSE_;
+ i__2 = p - 1;
+ for (q = 1; q <= i__2; ++q) {
+ sva[q] *= scale;
+/* L1873: */
+ }
+ }
+ }
+/* L1874: */
+ }
+ } else if (upper) {
+/* the input matrix is M-by-N upper triangular (trapezoidal) */
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ aapp = 0.;
+ aaqq = 0.;
+ dlassq_(&p, &a[p * a_dim1 + 1], &c__1, &aapp, &aaqq);
+ if (aapp > big) {
+ *info = -6;
+ i__2 = -(*info);
+ xerbla_("DGESVJ", &i__2);
+ return 0;
+ }
+ aaqq = sqrt(aaqq);
+ if (aapp < big / aaqq && noscale) {
+ sva[p] = aapp * aaqq;
+ } else {
+ noscale = FALSE_;
+ sva[p] = aapp * (aaqq * scale);
+ if (goscale) {
+ goscale = FALSE_;
+ i__2 = p - 1;
+ for (q = 1; q <= i__2; ++q) {
+ sva[q] *= scale;
+/* L2873: */
+ }
+ }
+ }
+/* L2874: */
+ }
+ } else {
+/* the input matrix is M-by-N general dense */
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ aapp = 0.;
+ aaqq = 0.;
+ dlassq_(m, &a[p * a_dim1 + 1], &c__1, &aapp, &aaqq);
+ if (aapp > big) {
+ *info = -6;
+ i__2 = -(*info);
+ xerbla_("DGESVJ", &i__2);
+ return 0;
+ }
+ aaqq = sqrt(aaqq);
+ if (aapp < big / aaqq && noscale) {
+ sva[p] = aapp * aaqq;
+ } else {
+ noscale = FALSE_;
+ sva[p] = aapp * (aaqq * scale);
+ if (goscale) {
+ goscale = FALSE_;
+ i__2 = p - 1;
+ for (q = 1; q <= i__2; ++q) {
+ sva[q] *= scale;
+/* L3873: */
+ }
+ }
+ }
+/* L3874: */
+ }
+ }
+
+ if (noscale) {
+ scale = 1.;
+ }
+
+/* Move the smaller part of the spectrum from the underflow threshold */
+/* (!) Start by determining the position of the nonzero entries of the */
+/* array SVA() relative to ( SFMIN, BIG ). */
+
+ aapp = 0.;
+ aaqq = big;
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ if (sva[p] != 0.) {
+/* Computing MIN */
+ d__1 = aaqq, d__2 = sva[p];
+ aaqq = min(d__1,d__2);
+ }
+/* Computing MAX */
+ d__1 = aapp, d__2 = sva[p];
+ aapp = max(d__1,d__2);
+/* L4781: */
+ }
+
+/* #:) Quick return for zero matrix */
+
+ if (aapp == 0.) {
+ if (lsvec) {
+ dlaset_("G", m, n, &c_b17, &c_b18, &a[a_offset], lda);
+ }
+ work[1] = 1.;
+ work[2] = 0.;
+ work[3] = 0.;
+ work[4] = 0.;
+ work[5] = 0.;
+ work[6] = 0.;
+ return 0;
+ }
+
+/* #:) Quick return for one-column matrix */
+
+ if (*n == 1) {
+ if (lsvec) {
+ dlascl_("G", &c__0, &c__0, &sva[1], &scale, m, &c__1, &a[a_dim1 +
+ 1], lda, &ierr);
+ }
+ work[1] = 1. / scale;
+ if (sva[1] >= sfmin) {
+ work[2] = 1.;
+ } else {
+ work[2] = 0.;
+ }
+ work[3] = 0.;
+ work[4] = 0.;
+ work[5] = 0.;
+ work[6] = 0.;
+ return 0;
+ }
+
+/* Protect small singular values from underflow, and try to */
+/* avoid underflows/overflows in computing Jacobi rotations. */
+
+ sn = sqrt(sfmin / epsilon);
+ temp1 = sqrt(big / (doublereal) (*n));
+ if (aapp <= sn || aaqq >= temp1 || sn <= aaqq && aapp <= temp1) {
+/* Computing MIN */
+ d__1 = big, d__2 = temp1 / aapp;
+ temp1 = min(d__1,d__2);
+/* AAQQ = AAQQ*TEMP1 */
+/* AAPP = AAPP*TEMP1 */
+ } else if (aaqq <= sn && aapp <= temp1) {
+/* Computing MIN */
+ d__1 = sn / aaqq, d__2 = big / (aapp * sqrt((doublereal) (*n)));
+ temp1 = min(d__1,d__2);
+/* AAQQ = AAQQ*TEMP1 */
+/* AAPP = AAPP*TEMP1 */
+ } else if (aaqq >= sn && aapp >= temp1) {
+/* Computing MAX */
+ d__1 = sn / aaqq, d__2 = temp1 / aapp;
+ temp1 = max(d__1,d__2);
+/* AAQQ = AAQQ*TEMP1 */
+/* AAPP = AAPP*TEMP1 */
+ } else if (aaqq <= sn && aapp >= temp1) {
+/* Computing MIN */
+ d__1 = sn / aaqq, d__2 = big / (sqrt((doublereal) (*n)) * aapp);
+ temp1 = min(d__1,d__2);
+/* AAQQ = AAQQ*TEMP1 */
+/* AAPP = AAPP*TEMP1 */
+ } else {
+ temp1 = 1.;
+ }
+
+/* Scale, if necessary */
+
+ if (temp1 != 1.) {
+ dlascl_("G", &c__0, &c__0, &c_b18, &temp1, n, &c__1, &sva[1], n, &
+ ierr);
+ }
+ scale = temp1 * scale;
+ if (scale != 1.) {
+ dlascl_(joba, &c__0, &c__0, &c_b18, &scale, m, n, &a[a_offset], lda, &
+ ierr);
+ scale = 1. / scale;
+ }
+
+/* Row-cyclic Jacobi SVD algorithm with column pivoting */
+
+ emptsw = *n * (*n - 1) / 2;
+ notrot = 0;
+ fastr[0] = 0.;
+
+/* A is represented in factored form A = A * diag(WORK), where diag(WORK) */
+/* is initialized to identity. WORK is updated during fast scaled */
+/* rotations. */
+
+ i__1 = *n;
+ for (q = 1; q <= i__1; ++q) {
+ work[q] = 1.;
+/* L1868: */
+ }
+
+
+ swband = 3;
+/* [TP] SWBAND is a tuning parameter [TP]. It is meaningful and effective */
+/* if DGESVJ is used as a computational routine in the preconditioned */
+/* Jacobi SVD algorithm DGESVJ. For sweeps i=1:SWBAND the procedure */
+/* works on pivots inside a band-like region around the diagonal. */
+/* The boundaries are determined dynamically, based on the number of */
+/* pivots above a threshold. */
+
+ kbl = min(8,*n);
+/* [TP] KBL is a tuning parameter that defines the tile size in the */
+/* tiling of the p-q loops of pivot pairs. In general, an optimal */
+/* value of KBL depends on the matrix dimensions and on the */
+/* parameters of the computer's memory. */
+
+ nbl = *n / kbl;
+ if (nbl * kbl != *n) {
+ ++nbl;
+ }
+
+/* Computing 2nd power */
+ i__1 = kbl;
+ blskip = i__1 * i__1;
+/* [TP] BLKSKIP is a tuning parameter that depends on SWBAND and KBL. */
+
+ rowskip = min(5,kbl);
+/* [TP] ROWSKIP is a tuning parameter. */
+
+ lkahead = 1;
+/* [TP] LKAHEAD is a tuning parameter. */
+
+/* Quasi block transformations, using the lower (upper) triangular */
+/* structure of the input matrix. The quasi-block-cycling usually */
+/* invokes cubic convergence. Big part of this cycle is done inside */
+/* canonical subspaces of dimensions less than M. */
+
+/* Computing MAX */
+ i__1 = 64, i__2 = kbl << 2;
+ if ((lower || upper) && *n > max(i__1,i__2)) {
+/* [TP] The number of partition levels and the actual partition are */
+/* tuning parameters. */
+ n4 = *n / 4;
+ n2 = *n / 2;
+ n34 = n4 * 3;
+ if (applv) {
+ q = 0;
+ } else {
+ q = 1;
+ }
+
+ if (lower) {
+
+/* This works very well on lower triangular matrices, in particular */
+/* in the framework of the preconditioned Jacobi SVD (xGEJSV). */
+/* The idea is simple: */
+/* [+ 0 0 0] Note that Jacobi transformations of [0 0] */
+/* [+ + 0 0] [0 0] */
+/* [+ + x 0] actually work on [x 0] [x 0] */
+/* [+ + x x] [x x]. [x x] */
+
+ i__1 = *m - n34;
+ i__2 = *n - n34;
+ i__3 = *lwork - *n;
+ dgsvj0_(jobv, &i__1, &i__2, &a[n34 + 1 + (n34 + 1) * a_dim1], lda,
+ &work[n34 + 1], &sva[n34 + 1], &mvl, &v[n34 * q + 1 + (
+ n34 + 1) * v_dim1], ldv, &epsilon, &sfmin, &tol, &c__2, &
+ work[*n + 1], &i__3, &ierr);
+
+ i__1 = *m - n2;
+ i__2 = n34 - n2;
+ i__3 = *lwork - *n;
+ dgsvj0_(jobv, &i__1, &i__2, &a[n2 + 1 + (n2 + 1) * a_dim1], lda, &
+ work[n2 + 1], &sva[n2 + 1], &mvl, &v[n2 * q + 1 + (n2 + 1)
+ * v_dim1], ldv, &epsilon, &sfmin, &tol, &c__2, &work[*n
+ + 1], &i__3, &ierr);
+
+ i__1 = *m - n2;
+ i__2 = *n - n2;
+ i__3 = *lwork - *n;
+ dgsvj1_(jobv, &i__1, &i__2, &n4, &a[n2 + 1 + (n2 + 1) * a_dim1],
+ lda, &work[n2 + 1], &sva[n2 + 1], &mvl, &v[n2 * q + 1 + (
+ n2 + 1) * v_dim1], ldv, &epsilon, &sfmin, &tol, &c__1, &
+ work[*n + 1], &i__3, &ierr);
+
+ i__1 = *m - n4;
+ i__2 = n2 - n4;
+ i__3 = *lwork - *n;
+ dgsvj0_(jobv, &i__1, &i__2, &a[n4 + 1 + (n4 + 1) * a_dim1], lda, &
+ work[n4 + 1], &sva[n4 + 1], &mvl, &v[n4 * q + 1 + (n4 + 1)
+ * v_dim1], ldv, &epsilon, &sfmin, &tol, &c__1, &work[*n
+ + 1], &i__3, &ierr);
+
+ i__1 = *lwork - *n;
+ dgsvj0_(jobv, m, &n4, &a[a_offset], lda, &work[1], &sva[1], &mvl,
+ &v[v_offset], ldv, &epsilon, &sfmin, &tol, &c__1, &work[*
+ n + 1], &i__1, &ierr);
+
+ i__1 = *lwork - *n;
+ dgsvj1_(jobv, m, &n2, &n4, &a[a_offset], lda, &work[1], &sva[1], &
+ mvl, &v[v_offset], ldv, &epsilon, &sfmin, &tol, &c__1, &
+ work[*n + 1], &i__1, &ierr);
+
+
+ } else if (upper) {
+
+
+ i__1 = *lwork - *n;
+ dgsvj0_(jobv, &n4, &n4, &a[a_offset], lda, &work[1], &sva[1], &
+ mvl, &v[v_offset], ldv, &epsilon, &sfmin, &tol, &c__2, &
+ work[*n + 1], &i__1, &ierr);
+
+ i__1 = *lwork - *n;
+ dgsvj0_(jobv, &n2, &n4, &a[(n4 + 1) * a_dim1 + 1], lda, &work[n4
+ + 1], &sva[n4 + 1], &mvl, &v[n4 * q + 1 + (n4 + 1) *
+ v_dim1], ldv, &epsilon, &sfmin, &tol, &c__1, &work[*n + 1]
+, &i__1, &ierr);
+
+ i__1 = *lwork - *n;
+ dgsvj1_(jobv, &n2, &n2, &n4, &a[a_offset], lda, &work[1], &sva[1],
+ &mvl, &v[v_offset], ldv, &epsilon, &sfmin, &tol, &c__1, &
+ work[*n + 1], &i__1, &ierr);
+
+ i__1 = n2 + n4;
+ i__2 = *lwork - *n;
+ dgsvj0_(jobv, &i__1, &n4, &a[(n2 + 1) * a_dim1 + 1], lda, &work[
+ n2 + 1], &sva[n2 + 1], &mvl, &v[n2 * q + 1 + (n2 + 1) *
+ v_dim1], ldv, &epsilon, &sfmin, &tol, &c__1, &work[*n + 1]
+, &i__2, &ierr);
+ }
+
+ }
+
+/* -#- Row-cyclic pivot strategy with de Rijk's pivoting -#- */
+
+ for (i__ = 1; i__ <= 30; ++i__) {
+
+/* .. go go go ... */
+
+ mxaapq = 0.;
+ mxsinj = 0.;
+ iswrot = 0;
+
+ notrot = 0;
+ pskipped = 0;
+
+/* Each sweep is unrolled using KBL-by-KBL tiles over the pivot pairs */
+/* 1 <= p < q <= N. This is the first step toward a blocked implementation */
+/* of the rotations. New implementation, based on block transformations, */
+/* is under development. */
+
+ i__1 = nbl;
+ for (ibr = 1; ibr <= i__1; ++ibr) {
+
+ igl = (ibr - 1) * kbl + 1;
+
+/* Computing MIN */
+ i__3 = lkahead, i__4 = nbl - ibr;
+ i__2 = min(i__3,i__4);
+ for (ir1 = 0; ir1 <= i__2; ++ir1) {
+
+ igl += ir1 * kbl;
+
+/* Computing MIN */
+ i__4 = igl + kbl - 1, i__5 = *n - 1;
+ i__3 = min(i__4,i__5);
+ for (p = igl; p <= i__3; ++p) {
+
+/* .. de Rijk's pivoting */
+
+ i__4 = *n - p + 1;
+ q = idamax_(&i__4, &sva[p], &c__1) + p - 1;
+ if (p != q) {
+ dswap_(m, &a[p * a_dim1 + 1], &c__1, &a[q * a_dim1 +
+ 1], &c__1);
+ if (rsvec) {
+ dswap_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[q *
+ v_dim1 + 1], &c__1);
+ }
+ temp1 = sva[p];
+ sva[p] = sva[q];
+ sva[q] = temp1;
+ temp1 = work[p];
+ work[p] = work[q];
+ work[q] = temp1;
+ }
+
+ if (ir1 == 0) {
+
+/* Column norms are periodically updated by explicit */
+/* norm computation. */
+/* Caveat: */
+/* Unfortunately, some BLAS implementations compute DNRM2(M,A(1,p),1) */
+/* as DSQRT(DDOT(M,A(1,p),1,A(1,p),1)), which may cause the result to */
+/* overflow for ||A(:,p)||_2 > DSQRT(overflow_threshold), and to */
+/* underflow for ||A(:,p)||_2 < DSQRT(underflow_threshold). */
+/* Hence, DNRM2 cannot be trusted, not even in the case when */
+/* the true norm is far from the under(over)flow boundaries. */
+/* If properly implemented DNRM2 is available, the IF-THEN-ELSE */
+/* below should read "AAPP = DNRM2( M, A(1,p), 1 ) * WORK(p)". */
+
+ if (sva[p] < rootbig && sva[p] > rootsfmin) {
+ sva[p] = dnrm2_(m, &a[p * a_dim1 + 1], &c__1) *
+ work[p];
+ } else {
+ temp1 = 0.;
+ aapp = 0.;
+ dlassq_(m, &a[p * a_dim1 + 1], &c__1, &temp1, &
+ aapp);
+ sva[p] = temp1 * sqrt(aapp) * work[p];
+ }
+ aapp = sva[p];
+ } else {
+ aapp = sva[p];
+ }
+
+ if (aapp > 0.) {
+
+ pskipped = 0;
+
+/* Computing MIN */
+ i__5 = igl + kbl - 1;
+ i__4 = min(i__5,*n);
+ for (q = p + 1; q <= i__4; ++q) {
+
+ aaqq = sva[q];
+
+ if (aaqq > 0.) {
+
+ aapp0 = aapp;
+ if (aaqq >= 1.) {
+ rotok = small * aapp <= aaqq;
+ if (aapp < big / aaqq) {
+ aapq = ddot_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1], &
+ c__1) * work[p] * work[q] /
+ aaqq / aapp;
+ } else {
+ dcopy_(m, &a[p * a_dim1 + 1], &c__1, &
+ work[*n + 1], &c__1);
+ dlascl_("G", &c__0, &c__0, &aapp, &
+ work[p], m, &c__1, &work[*n +
+ 1], lda, &ierr);
+ aapq = ddot_(m, &work[*n + 1], &c__1,
+ &a[q * a_dim1 + 1], &c__1) *
+ work[q] / aaqq;
+ }
+ } else {
+ rotok = aapp <= aaqq / small;
+ if (aapp > small / aaqq) {
+ aapq = ddot_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1], &
+ c__1) * work[p] * work[q] /
+ aaqq / aapp;
+ } else {
+ dcopy_(m, &a[q * a_dim1 + 1], &c__1, &
+ work[*n + 1], &c__1);
+ dlascl_("G", &c__0, &c__0, &aaqq, &
+ work[q], m, &c__1, &work[*n +
+ 1], lda, &ierr);
+ aapq = ddot_(m, &work[*n + 1], &c__1,
+ &a[p * a_dim1 + 1], &c__1) *
+ work[p] / aapp;
+ }
+ }
+
+/* Computing MAX */
+ d__1 = mxaapq, d__2 = abs(aapq);
+ mxaapq = max(d__1,d__2);
+
+/* TO rotate or NOT to rotate, THAT is the question ... */
+
+ if (abs(aapq) > tol) {
+
+/* .. rotate */
+/* [RTD] ROTATED = ROTATED + ONE */
+
+ if (ir1 == 0) {
+ notrot = 0;
+ pskipped = 0;
+ ++iswrot;
+ }
+
+ if (rotok) {
+
+ aqoap = aaqq / aapp;
+ apoaq = aapp / aaqq;
+ theta = (d__1 = aqoap - apoaq, abs(
+ d__1)) * -.5 / aapq;
+
+ if (abs(theta) > bigtheta) {
+
+ t = .5 / theta;
+ fastr[2] = t * work[p] / work[q];
+ fastr[3] = -t * work[q] / work[p];
+ drotm_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1],
+ &c__1, fastr);
+ if (rsvec) {
+ drotm_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[q *
+ v_dim1 + 1], &c__1, fastr);
+ }
+/* Computing MAX */
+ d__1 = 0., d__2 = t * apoaq *
+ aapq + 1.;
+ sva[q] = aaqq * sqrt((max(d__1,
+ d__2)));
+ aapp *= sqrt(1. - t * aqoap *
+ aapq);
+/* Computing MAX */
+ d__1 = mxsinj, d__2 = abs(t);
+ mxsinj = max(d__1,d__2);
+
+ } else {
+
+/* .. choose correct signum for THETA and rotate */
+
+ thsign = -d_sign(&c_b18, &aapq);
+ t = 1. / (theta + thsign * sqrt(
+ theta * theta + 1.));
+ cs = sqrt(1. / (t * t + 1.));
+ sn = t * cs;
+
+/* Computing MAX */
+ d__1 = mxsinj, d__2 = abs(sn);
+ mxsinj = max(d__1,d__2);
+/* Computing MAX */
+ d__1 = 0., d__2 = t * apoaq *
+ aapq + 1.;
+ sva[q] = aaqq * sqrt((max(d__1,
+ d__2)));
+/* Computing MAX */
+ d__1 = 0., d__2 = 1. - t * aqoap *
+ aapq;
+ aapp *= sqrt((max(d__1,d__2)));
+
+ apoaq = work[p] / work[q];
+ aqoap = work[q] / work[p];
+ if (work[p] >= 1.) {
+ if (work[q] >= 1.) {
+ fastr[2] = t * apoaq;
+ fastr[3] = -t * aqoap;
+ work[p] *= cs;
+ work[q] *= cs;
+ drotm_(m, &a[p * a_dim1 + 1], &c__1, &a[q *
+ a_dim1 + 1], &c__1, fastr);
+ if (rsvec) {
+ drotm_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[
+ q * v_dim1 + 1], &c__1, fastr);
+ }
+ } else {
+ d__1 = -t * aqoap;
+ daxpy_(m, &d__1, &a[q * a_dim1 + 1], &c__1, &a[
+ p * a_dim1 + 1], &c__1);
+ d__1 = cs * sn * apoaq;
+ daxpy_(m, &d__1, &a[p * a_dim1 + 1], &c__1, &a[
+ q * a_dim1 + 1], &c__1);
+ work[p] *= cs;
+ work[q] /= cs;
+ if (rsvec) {
+ d__1 = -t * aqoap;
+ daxpy_(&mvl, &d__1, &v[q * v_dim1 + 1], &
+ c__1, &v[p * v_dim1 + 1], &c__1);
+ d__1 = cs * sn * apoaq;
+ daxpy_(&mvl, &d__1, &v[p * v_dim1 + 1], &
+ c__1, &v[q * v_dim1 + 1], &c__1);
+ }
+ }
+ } else {
+ if (work[q] >= 1.) {
+ d__1 = t * apoaq;
+ daxpy_(m, &d__1, &a[p * a_dim1 + 1], &c__1, &a[
+ q * a_dim1 + 1], &c__1);
+ d__1 = -cs * sn * aqoap;
+ daxpy_(m, &d__1, &a[q * a_dim1 + 1], &c__1, &a[
+ p * a_dim1 + 1], &c__1);
+ work[p] /= cs;
+ work[q] *= cs;
+ if (rsvec) {
+ d__1 = t * apoaq;
+ daxpy_(&mvl, &d__1, &v[p * v_dim1 + 1], &
+ c__1, &v[q * v_dim1 + 1], &c__1);
+ d__1 = -cs * sn * aqoap;
+ daxpy_(&mvl, &d__1, &v[q * v_dim1 + 1], &
+ c__1, &v[p * v_dim1 + 1], &c__1);
+ }
+ } else {
+ if (work[p] >= work[q]) {
+ d__1 = -t * aqoap;
+ daxpy_(m, &d__1, &a[q * a_dim1 + 1], &c__1,
+ &a[p * a_dim1 + 1], &c__1);
+ d__1 = cs * sn * apoaq;
+ daxpy_(m, &d__1, &a[p * a_dim1 + 1], &c__1,
+ &a[q * a_dim1 + 1], &c__1);
+ work[p] *= cs;
+ work[q] /= cs;
+ if (rsvec) {
+ d__1 = -t * aqoap;
+ daxpy_(&mvl, &d__1, &v[q * v_dim1 + 1],
+ &c__1, &v[p * v_dim1 + 1], &
+ c__1);
+ d__1 = cs * sn * apoaq;
+ daxpy_(&mvl, &d__1, &v[p * v_dim1 + 1],
+ &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ }
+ } else {
+ d__1 = t * apoaq;
+ daxpy_(m, &d__1, &a[p * a_dim1 + 1], &c__1,
+ &a[q * a_dim1 + 1], &c__1);
+ d__1 = -cs * sn * aqoap;
+ daxpy_(m, &d__1, &a[q * a_dim1 + 1], &c__1,
+ &a[p * a_dim1 + 1], &c__1);
+ work[p] /= cs;
+ work[q] *= cs;
+ if (rsvec) {
+ d__1 = t * apoaq;
+ daxpy_(&mvl, &d__1, &v[p * v_dim1 + 1],
+ &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ d__1 = -cs * sn * aqoap;
+ daxpy_(&mvl, &d__1, &v[q * v_dim1 + 1],
+ &c__1, &v[p * v_dim1 + 1], &
+ c__1);
+ }
+ }
+ }
+ }
+ }
+
+ } else {
+/* .. have to use modified Gram-Schmidt like transformation */
+ dcopy_(m, &a[p * a_dim1 + 1], &c__1, &
+ work[*n + 1], &c__1);
+ dlascl_("G", &c__0, &c__0, &aapp, &
+ c_b18, m, &c__1, &work[*n + 1]
+, lda, &ierr);
+ dlascl_("G", &c__0, &c__0, &aaqq, &
+ c_b18, m, &c__1, &a[q *
+ a_dim1 + 1], lda, &ierr);
+ temp1 = -aapq * work[p] / work[q];
+ daxpy_(m, &temp1, &work[*n + 1], &
+ c__1, &a[q * a_dim1 + 1], &
+ c__1);
+ dlascl_("G", &c__0, &c__0, &c_b18, &
+ aaqq, m, &c__1, &a[q * a_dim1
+ + 1], lda, &ierr);
+/* Computing MAX */
+ d__1 = 0., d__2 = 1. - aapq * aapq;
+ sva[q] = aaqq * sqrt((max(d__1,d__2)))
+ ;
+ mxsinj = max(mxsinj,sfmin);
+ }
+/* END IF ROTOK THEN ... ELSE */
+
+/* In the case of cancellation in updating SVA(q), SVA(p) */
+/* recompute SVA(q), SVA(p). */
+
+/* Computing 2nd power */
+ d__1 = sva[q] / aaqq;
+ if (d__1 * d__1 <= rooteps) {
+ if (aaqq < rootbig && aaqq >
+ rootsfmin) {
+ sva[q] = dnrm2_(m, &a[q * a_dim1
+ + 1], &c__1) * work[q];
+ } else {
+ t = 0.;
+ aaqq = 0.;
+ dlassq_(m, &a[q * a_dim1 + 1], &
+ c__1, &t, &aaqq);
+ sva[q] = t * sqrt(aaqq) * work[q];
+ }
+ }
+ if (aapp / aapp0 <= rooteps) {
+ if (aapp < rootbig && aapp >
+ rootsfmin) {
+ aapp = dnrm2_(m, &a[p * a_dim1 +
+ 1], &c__1) * work[p];
+ } else {
+ t = 0.;
+ aapp = 0.;
+ dlassq_(m, &a[p * a_dim1 + 1], &
+ c__1, &t, &aapp);
+ aapp = t * sqrt(aapp) * work[p];
+ }
+ sva[p] = aapp;
+ }
+
+ } else {
+/* A(:,p) and A(:,q) already numerically orthogonal */
+ if (ir1 == 0) {
+ ++notrot;
+ }
+/* [RTD] SKIPPED = SKIPPED + 1 */
+ ++pskipped;
+ }
+ } else {
+/* A(:,q) is zero column */
+ if (ir1 == 0) {
+ ++notrot;
+ }
+ ++pskipped;
+ }
+
+ if (i__ <= swband && pskipped > rowskip) {
+ if (ir1 == 0) {
+ aapp = -aapp;
+ }
+ notrot = 0;
+ goto L2103;
+ }
+
+/* L2002: */
+ }
+/* END q-LOOP */
+
+L2103:
+/* bailed out of q-loop */
+
+ sva[p] = aapp;
+
+ } else {
+ sva[p] = aapp;
+ if (ir1 == 0 && aapp == 0.) {
+/* Computing MIN */
+ i__4 = igl + kbl - 1;
+ notrot = notrot + min(i__4,*n) - p;
+ }
+ }
+
+/* L2001: */
+ }
+/* end of the p-loop */
+/* end of doing the block ( ibr, ibr ) */
+/* L1002: */
+ }
+/* end of ir1-loop */
+
+/* ... go to the off diagonal blocks */
+
+ igl = (ibr - 1) * kbl + 1;
+
+ i__2 = nbl;
+ for (jbc = ibr + 1; jbc <= i__2; ++jbc) {
+
+ jgl = (jbc - 1) * kbl + 1;
+
+/* doing the block at ( ibr, jbc ) */
+
+ ijblsk = 0;
+/* Computing MIN */
+ i__4 = igl + kbl - 1;
+ i__3 = min(i__4,*n);
+ for (p = igl; p <= i__3; ++p) {
+
+ aapp = sva[p];
+ if (aapp > 0.) {
+
+ pskipped = 0;
+
+/* Computing MIN */
+ i__5 = jgl + kbl - 1;
+ i__4 = min(i__5,*n);
+ for (q = jgl; q <= i__4; ++q) {
+
+ aaqq = sva[q];
+ if (aaqq > 0.) {
+ aapp0 = aapp;
+
+/* -#- M x 2 Jacobi SVD -#- */
+
+/* Safe Gram matrix computation */
+
+ if (aaqq >= 1.) {
+ if (aapp >= aaqq) {
+ rotok = small * aapp <= aaqq;
+ } else {
+ rotok = small * aaqq <= aapp;
+ }
+ if (aapp < big / aaqq) {
+ aapq = ddot_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1], &
+ c__1) * work[p] * work[q] /
+ aaqq / aapp;
+ } else {
+ dcopy_(m, &a[p * a_dim1 + 1], &c__1, &
+ work[*n + 1], &c__1);
+ dlascl_("G", &c__0, &c__0, &aapp, &
+ work[p], m, &c__1, &work[*n +
+ 1], lda, &ierr);
+ aapq = ddot_(m, &work[*n + 1], &c__1,
+ &a[q * a_dim1 + 1], &c__1) *
+ work[q] / aaqq;
+ }
+ } else {
+ if (aapp >= aaqq) {
+ rotok = aapp <= aaqq / small;
+ } else {
+ rotok = aaqq <= aapp / small;
+ }
+ if (aapp > small / aaqq) {
+ aapq = ddot_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1], &
+ c__1) * work[p] * work[q] /
+ aaqq / aapp;
+ } else {
+ dcopy_(m, &a[q * a_dim1 + 1], &c__1, &
+ work[*n + 1], &c__1);
+ dlascl_("G", &c__0, &c__0, &aaqq, &
+ work[q], m, &c__1, &work[*n +
+ 1], lda, &ierr);
+ aapq = ddot_(m, &work[*n + 1], &c__1,
+ &a[p * a_dim1 + 1], &c__1) *
+ work[p] / aapp;
+ }
+ }
+
+/* Computing MAX */
+ d__1 = mxaapq, d__2 = abs(aapq);
+ mxaapq = max(d__1,d__2);
+
+/* TO rotate or NOT to rotate, THAT is the question ... */
+
+ if (abs(aapq) > tol) {
+ notrot = 0;
+/* [RTD] ROTATED = ROTATED + 1 */
+ pskipped = 0;
+ ++iswrot;
+
+ if (rotok) {
+
+ aqoap = aaqq / aapp;
+ apoaq = aapp / aaqq;
+ theta = (d__1 = aqoap - apoaq, abs(
+ d__1)) * -.5 / aapq;
+ if (aaqq > aapp0) {
+ theta = -theta;
+ }
+
+ if (abs(theta) > bigtheta) {
+ t = .5 / theta;
+ fastr[2] = t * work[p] / work[q];
+ fastr[3] = -t * work[q] / work[p];
+ drotm_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1],
+ &c__1, fastr);
+ if (rsvec) {
+ drotm_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[q *
+ v_dim1 + 1], &c__1, fastr);
+ }
+/* Computing MAX */
+ d__1 = 0., d__2 = t * apoaq *
+ aapq + 1.;
+ sva[q] = aaqq * sqrt((max(d__1,
+ d__2)));
+/* Computing MAX */
+ d__1 = 0., d__2 = 1. - t * aqoap *
+ aapq;
+ aapp *= sqrt((max(d__1,d__2)));
+/* Computing MAX */
+ d__1 = mxsinj, d__2 = abs(t);
+ mxsinj = max(d__1,d__2);
+ } else {
+
+/* .. choose correct signum for THETA and rotate */
+
+ thsign = -d_sign(&c_b18, &aapq);
+ if (aaqq > aapp0) {
+ thsign = -thsign;
+ }
+ t = 1. / (theta + thsign * sqrt(
+ theta * theta + 1.));
+ cs = sqrt(1. / (t * t + 1.));
+ sn = t * cs;
+/* Computing MAX */
+ d__1 = mxsinj, d__2 = abs(sn);
+ mxsinj = max(d__1,d__2);
+/* Computing MAX */
+ d__1 = 0., d__2 = t * apoaq *
+ aapq + 1.;
+ sva[q] = aaqq * sqrt((max(d__1,
+ d__2)));
+ aapp *= sqrt(1. - t * aqoap *
+ aapq);
+
+ apoaq = work[p] / work[q];
+ aqoap = work[q] / work[p];
+ if (work[p] >= 1.) {
+
+ if (work[q] >= 1.) {
+ fastr[2] = t * apoaq;
+ fastr[3] = -t * aqoap;
+ work[p] *= cs;
+ work[q] *= cs;
+ drotm_(m, &a[p * a_dim1 + 1], &c__1, &a[q *
+ a_dim1 + 1], &c__1, fastr);
+ if (rsvec) {
+ drotm_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[
+ q * v_dim1 + 1], &c__1, fastr);
+ }
+ } else {
+ d__1 = -t * aqoap;
+ daxpy_(m, &d__1, &a[q * a_dim1 + 1], &c__1, &a[
+ p * a_dim1 + 1], &c__1);
+ d__1 = cs * sn * apoaq;
+ daxpy_(m, &d__1, &a[p * a_dim1 + 1], &c__1, &a[
+ q * a_dim1 + 1], &c__1);
+ if (rsvec) {
+ d__1 = -t * aqoap;
+ daxpy_(&mvl, &d__1, &v[q * v_dim1 + 1], &
+ c__1, &v[p * v_dim1 + 1], &c__1);
+ d__1 = cs * sn * apoaq;
+ daxpy_(&mvl, &d__1, &v[p * v_dim1 + 1], &
+ c__1, &v[q * v_dim1 + 1], &c__1);
+ }
+ work[p] *= cs;
+ work[q] /= cs;
+ }
+ } else {
+ if (work[q] >= 1.) {
+ d__1 = t * apoaq;
+ daxpy_(m, &d__1, &a[p * a_dim1 + 1], &c__1, &a[
+ q * a_dim1 + 1], &c__1);
+ d__1 = -cs * sn * aqoap;
+ daxpy_(m, &d__1, &a[q * a_dim1 + 1], &c__1, &a[
+ p * a_dim1 + 1], &c__1);
+ if (rsvec) {
+ d__1 = t * apoaq;
+ daxpy_(&mvl, &d__1, &v[p * v_dim1 + 1], &
+ c__1, &v[q * v_dim1 + 1], &c__1);
+ d__1 = -cs * sn * aqoap;
+ daxpy_(&mvl, &d__1, &v[q * v_dim1 + 1], &
+ c__1, &v[p * v_dim1 + 1], &c__1);
+ }
+ work[p] /= cs;
+ work[q] *= cs;
+ } else {
+ if (work[p] >= work[q]) {
+ d__1 = -t * aqoap;
+ daxpy_(m, &d__1, &a[q * a_dim1 + 1], &c__1,
+ &a[p * a_dim1 + 1], &c__1);
+ d__1 = cs * sn * apoaq;
+ daxpy_(m, &d__1, &a[p * a_dim1 + 1], &c__1,
+ &a[q * a_dim1 + 1], &c__1);
+ work[p] *= cs;
+ work[q] /= cs;
+ if (rsvec) {
+ d__1 = -t * aqoap;
+ daxpy_(&mvl, &d__1, &v[q * v_dim1 + 1],
+ &c__1, &v[p * v_dim1 + 1], &
+ c__1);
+ d__1 = cs * sn * apoaq;
+ daxpy_(&mvl, &d__1, &v[p * v_dim1 + 1],
+ &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ }
+ } else {
+ d__1 = t * apoaq;
+ daxpy_(m, &d__1, &a[p * a_dim1 + 1], &c__1,
+ &a[q * a_dim1 + 1], &c__1);
+ d__1 = -cs * sn * aqoap;
+ daxpy_(m, &d__1, &a[q * a_dim1 + 1], &c__1,
+ &a[p * a_dim1 + 1], &c__1);
+ work[p] /= cs;
+ work[q] *= cs;
+ if (rsvec) {
+ d__1 = t * apoaq;
+ daxpy_(&mvl, &d__1, &v[p * v_dim1 + 1],
+ &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ d__1 = -cs * sn * aqoap;
+ daxpy_(&mvl, &d__1, &v[q * v_dim1 + 1],
+ &c__1, &v[p * v_dim1 + 1], &
+ c__1);
+ }
+ }
+ }
+ }
+ }
+
+ } else {
+ if (aapp > aaqq) {
+ dcopy_(m, &a[p * a_dim1 + 1], &
+ c__1, &work[*n + 1], &
+ c__1);
+ dlascl_("G", &c__0, &c__0, &aapp,
+ &c_b18, m, &c__1, &work[*
+ n + 1], lda, &ierr);
+ dlascl_("G", &c__0, &c__0, &aaqq,
+ &c_b18, m, &c__1, &a[q *
+ a_dim1 + 1], lda, &ierr);
+ temp1 = -aapq * work[p] / work[q];
+ daxpy_(m, &temp1, &work[*n + 1], &
+ c__1, &a[q * a_dim1 + 1],
+ &c__1);
+ dlascl_("G", &c__0, &c__0, &c_b18,
+ &aaqq, m, &c__1, &a[q *
+ a_dim1 + 1], lda, &ierr);
+/* Computing MAX */
+ d__1 = 0., d__2 = 1. - aapq *
+ aapq;
+ sva[q] = aaqq * sqrt((max(d__1,
+ d__2)));
+ mxsinj = max(mxsinj,sfmin);
+ } else {
+ dcopy_(m, &a[q * a_dim1 + 1], &
+ c__1, &work[*n + 1], &
+ c__1);
+ dlascl_("G", &c__0, &c__0, &aaqq,
+ &c_b18, m, &c__1, &work[*
+ n + 1], lda, &ierr);
+ dlascl_("G", &c__0, &c__0, &aapp,
+ &c_b18, m, &c__1, &a[p *
+ a_dim1 + 1], lda, &ierr);
+ temp1 = -aapq * work[q] / work[p];
+ daxpy_(m, &temp1, &work[*n + 1], &
+ c__1, &a[p * a_dim1 + 1],
+ &c__1);
+ dlascl_("G", &c__0, &c__0, &c_b18,
+ &aapp, m, &c__1, &a[p *
+ a_dim1 + 1], lda, &ierr);
+/* Computing MAX */
+ d__1 = 0., d__2 = 1. - aapq *
+ aapq;
+ sva[p] = aapp * sqrt((max(d__1,
+ d__2)));
+ mxsinj = max(mxsinj,sfmin);
+ }
+ }
+/* END IF ROTOK THEN ... ELSE */
+
+/* In the case of cancellation in updating SVA(q) */
+/* .. recompute SVA(q) */
+/* Computing 2nd power */
+ d__1 = sva[q] / aaqq;
+ if (d__1 * d__1 <= rooteps) {
+ if (aaqq < rootbig && aaqq >
+ rootsfmin) {
+ sva[q] = dnrm2_(m, &a[q * a_dim1
+ + 1], &c__1) * work[q];
+ } else {
+ t = 0.;
+ aaqq = 0.;
+ dlassq_(m, &a[q * a_dim1 + 1], &
+ c__1, &t, &aaqq);
+ sva[q] = t * sqrt(aaqq) * work[q];
+ }
+ }
+/* Computing 2nd power */
+ d__1 = aapp / aapp0;
+ if (d__1 * d__1 <= rooteps) {
+ if (aapp < rootbig && aapp >
+ rootsfmin) {
+ aapp = dnrm2_(m, &a[p * a_dim1 +
+ 1], &c__1) * work[p];
+ } else {
+ t = 0.;
+ aapp = 0.;
+ dlassq_(m, &a[p * a_dim1 + 1], &
+ c__1, &t, &aapp);
+ aapp = t * sqrt(aapp) * work[p];
+ }
+ sva[p] = aapp;
+ }
+/* end of OK rotation */
+ } else {
+ ++notrot;
+/* [RTD] SKIPPED = SKIPPED + 1 */
+ ++pskipped;
+ ++ijblsk;
+ }
+ } else {
+ ++notrot;
+ ++pskipped;
+ ++ijblsk;
+ }
+
+ if (i__ <= swband && ijblsk >= blskip) {
+ sva[p] = aapp;
+ notrot = 0;
+ goto L2011;
+ }
+ if (i__ <= swband && pskipped > rowskip) {
+ aapp = -aapp;
+ notrot = 0;
+ goto L2203;
+ }
+
+/* L2200: */
+ }
+/* end of the q-loop */
+L2203:
+
+ sva[p] = aapp;
+
+ } else {
+
+ if (aapp == 0.) {
+/* Computing MIN */
+ i__4 = jgl + kbl - 1;
+ notrot = notrot + min(i__4,*n) - jgl + 1;
+ }
+ if (aapp < 0.) {
+ notrot = 0;
+ }
+
+ }
+
+/* L2100: */
+ }
+/* end of the p-loop */
+/* L2010: */
+ }
+/* end of the jbc-loop */
+L2011:
+/* 2011 bailed out of the jbc-loop */
+/* Computing MIN */
+ i__3 = igl + kbl - 1;
+ i__2 = min(i__3,*n);
+ for (p = igl; p <= i__2; ++p) {
+ sva[p] = (d__1 = sva[p], abs(d__1));
+/* L2012: */
+ }
+/* ** */
+/* L2000: */
+ }
+/* 2000 :: end of the ibr-loop */
+
+/* .. update SVA(N) */
+ if (sva[*n] < rootbig && sva[*n] > rootsfmin) {
+ sva[*n] = dnrm2_(m, &a[*n * a_dim1 + 1], &c__1) * work[*n];
+ } else {
+ t = 0.;
+ aapp = 0.;
+ dlassq_(m, &a[*n * a_dim1 + 1], &c__1, &t, &aapp);
+ sva[*n] = t * sqrt(aapp) * work[*n];
+ }
+
+/* Additional steering devices */
+
+ if (i__ < swband && (mxaapq <= roottol || iswrot <= *n)) {
+ swband = i__;
+ }
+
+ if (i__ > swband + 1 && mxaapq < sqrt((doublereal) (*n)) * tol && (
+ doublereal) (*n) * mxaapq * mxsinj < tol) {
+ goto L1994;
+ }
+
+ if (notrot >= emptsw) {
+ goto L1994;
+ }
+
+/* L1993: */
+ }
+/* end i=1:NSWEEP loop */
+
+/* #:( Reaching this point means that the procedure has not converged. */
+ *info = 29;
+ goto L1995;
+
+L1994:
+/* #:) Reaching this point means numerical convergence after the i-th */
+/* sweep. */
+
+ *info = 0;
+/* #:) INFO = 0 confirms successful iterations. */
+L1995:
+
+/* Sort the singular values and find how many are above */
+/* the underflow threshold. */
+
+ n2 = 0;
+ n4 = 0;
+ i__1 = *n - 1;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = *n - p + 1;
+ q = idamax_(&i__2, &sva[p], &c__1) + p - 1;
+ if (p != q) {
+ temp1 = sva[p];
+ sva[p] = sva[q];
+ sva[q] = temp1;
+ temp1 = work[p];
+ work[p] = work[q];
+ work[q] = temp1;
+ dswap_(m, &a[p * a_dim1 + 1], &c__1, &a[q * a_dim1 + 1], &c__1);
+ if (rsvec) {
+ dswap_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ }
+ }
+ if (sva[p] != 0.) {
+ ++n4;
+ if (sva[p] * scale > sfmin) {
+ ++n2;
+ }
+ }
+/* L5991: */
+ }
+ if (sva[*n] != 0.) {
+ ++n4;
+ if (sva[*n] * scale > sfmin) {
+ ++n2;
+ }
+ }
+
+/* Normalize the left singular vectors. */
+
+ if (lsvec || uctol) {
+ i__1 = n2;
+ for (p = 1; p <= i__1; ++p) {
+ d__1 = work[p] / sva[p];
+ dscal_(m, &d__1, &a[p * a_dim1 + 1], &c__1);
+/* L1998: */
+ }
+ }
+
+/* Scale the product of Jacobi rotations (assemble the fast rotations). */
+
+ if (rsvec) {
+ if (applv) {
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ dscal_(&mvl, &work[p], &v[p * v_dim1 + 1], &c__1);
+/* L2398: */
+ }
+ } else {
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ temp1 = 1. / dnrm2_(&mvl, &v[p * v_dim1 + 1], &c__1);
+ dscal_(&mvl, &temp1, &v[p * v_dim1 + 1], &c__1);
+/* L2399: */
+ }
+ }
+ }
+
+/* Undo scaling, if necessary (and possible). */
+ if (scale > 1. && sva[1] < big / scale || scale < 1. && sva[n2] > sfmin /
+ scale) {
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ sva[p] = scale * sva[p];
+/* L2400: */
+ }
+ scale = 1.;
+ }
+
+ work[1] = scale;
+/* The singular values of A are SCALE*SVA(1:N). If SCALE.NE.ONE */
+/* then some of the singular values may overflow or underflow and */
+/* the spectrum is given in this factored representation. */
+
+ work[2] = (doublereal) n4;
+/* N4 is the number of computed nonzero singular values of A. */
+
+ work[3] = (doublereal) n2;
+/* N2 is the number of singular values of A greater than SFMIN. */
+/* If N2<N, SVA(N2:N) contains ZEROS and/or denormalized numbers */
+/* that may carry some information. */
+
+ work[4] = (doublereal) i__;
+/* i is the index of the last sweep before declaring convergence. */
+
+ work[5] = mxaapq;
+/* MXAAPQ is the largest absolute value of scaled pivots in the */
+/* last sweep */
+
+ work[6] = mxsinj;
+/* MXSINJ is the largest absolute value of the sines of Jacobi angles */
+/* in the last sweep */
+
+ return 0;
+/* .. */
+/* .. END OF DGESVJ */
+/* .. */
+} /* dgesvj_ */
diff --git a/contrib/libs/clapack/dgesvx.c b/contrib/libs/clapack/dgesvx.c
new file mode 100644
index 0000000000..ab372d0edf
--- /dev/null
+++ b/contrib/libs/clapack/dgesvx.c
@@ -0,0 +1,587 @@
+/* dgesvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dgesvx_(char *fact, char *trans, integer *n, integer *
+ nrhs, doublereal *a, integer *lda, doublereal *af, integer *ldaf,
+ integer *ipiv, char *equed, doublereal *r__, doublereal *c__,
+ doublereal *b, integer *ldb, doublereal *x, integer *ldx, doublereal *
+ rcond, doublereal *ferr, doublereal *berr, doublereal *work, integer *
+ iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal amax;
+ char norm[1];
+ extern logical lsame_(char *, char *);
+ doublereal rcmin, rcmax, anorm;
+ logical equil;
+ extern doublereal dlamch_(char *), dlange_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *);
+ extern /* Subroutine */ int dlaqge_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, char *), dgecon_(char *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, integer *);
+ doublereal colcnd;
+ logical nofact;
+ extern /* Subroutine */ int dgeequ_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *), dgerfs_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, integer *),
+ dgetrf_(integer *, integer *, doublereal *, integer *, integer *,
+ integer *), dlacpy_(char *, integer *, integer *, doublereal *,
+ integer *, doublereal *, integer *), xerbla_(char *,
+ integer *);
+ doublereal bignum;
+ extern doublereal dlantr_(char *, char *, char *, integer *, integer *,
+ doublereal *, integer *, doublereal *);
+ integer infequ;
+ logical colequ;
+ extern /* Subroutine */ int dgetrs_(char *, integer *, integer *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *);
+ doublereal rowcnd;
+ logical notran;
+ doublereal smlnum;
+ logical rowequ;
+ doublereal rpvgrw;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGESVX uses the LU factorization to compute the solution to a real */
+/* system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N matrix and X and B are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'E', real scaling factors are computed to equilibrate */
+/* the system: */
+/* TRANS = 'N': diag(R)*A*diag(C) *inv(diag(C))*X = diag(R)*B */
+/* TRANS = 'T': (diag(R)*A*diag(C))**T *inv(diag(R))*X = diag(C)*B */
+/* TRANS = 'C': (diag(R)*A*diag(C))**H *inv(diag(R))*X = diag(C)*B */
+/* Whether or not the system will be equilibrated depends on the */
+/* scaling of the matrix A, but if equilibration is used, A is */
+/* overwritten by diag(R)*A*diag(C) and B by diag(R)*B (if TRANS='N') */
+/* or diag(C)*B (if TRANS = 'T' or 'C'). */
+
+/* 2. If FACT = 'N' or 'E', the LU decomposition is used to factor the */
+/* matrix A (after equilibration if FACT = 'E') as */
+/* A = P * L * U, */
+/* where P is a permutation matrix, L is a unit lower triangular */
+/* matrix, and U is upper triangular. */
+
+/* 3. If some U(i,i)=0, so that U is exactly singular, then the routine */
+/* returns with INFO = i. Otherwise, the factored form of A is used */
+/* to estimate the condition number of the matrix A. If the */
+/* reciprocal of the condition number is less than machine precision, */
+/* INFO = N+1 is returned as a warning, but the routine still goes on */
+/* to solve for X and compute error bounds as described below. */
+
+/* 4. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 5. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* 6. If equilibration was used, the matrix X is premultiplied by */
+/* diag(C) (if TRANS = 'N') or diag(R) (if TRANS = 'T' or 'C') so */
+/* that it solves the original system before equilibration. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of the matrix A is */
+/* supplied on entry, and if not, whether the matrix A should be */
+/* equilibrated before it is factored. */
+/* = 'F': On entry, AF and IPIV contain the factored form of A. */
+/* If EQUED is not 'N', the matrix A has been */
+/* equilibrated with scaling factors given by R and C. */
+/* A, AF, and IPIV are not modified. */
+/* = 'N': The matrix A will be copied to AF and factored. */
+/* = 'E': The matrix A will be equilibrated if necessary, then */
+/* copied to AF and factored. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Transpose) */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the N-by-N matrix A. If FACT = 'F' and EQUED is */
+/* not 'N', then A must have been equilibrated by the scaling */
+/* factors in R and/or C. A is not modified if FACT = 'F' or */
+/* 'N', or if FACT = 'E' and EQUED = 'N' on exit. */
+
+/* On exit, if EQUED .ne. 'N', A is scaled as follows: */
+/* EQUED = 'R': A := diag(R) * A */
+/* EQUED = 'C': A := A * diag(C) */
+/* EQUED = 'B': A := diag(R) * A * diag(C). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input or output) DOUBLE PRECISION array, dimension (LDAF,N) */
+/* If FACT = 'F', then AF is an input argument and on entry */
+/* contains the factors L and U from the factorization */
+/* A = P*L*U as computed by DGETRF. If EQUED .ne. 'N', then */
+/* AF is the factored form of the equilibrated matrix A. */
+
+/* If FACT = 'N', then AF is an output argument and on exit */
+/* returns the factors L and U from the factorization A = P*L*U */
+/* of the original matrix A. */
+
+/* If FACT = 'E', then AF is an output argument and on exit */
+/* returns the factors L and U from the factorization A = P*L*U */
+/* of the equilibrated matrix A (see the description of A for */
+/* the form of the equilibrated matrix). */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* IPIV (input or output) INTEGER array, dimension (N) */
+/* If FACT = 'F', then IPIV is an input argument and on entry */
+/* contains the pivot indices from the factorization A = P*L*U */
+/* as computed by DGETRF; row i of the matrix was interchanged */
+/* with row IPIV(i). */
+
+/* If FACT = 'N', then IPIV is an output argument and on exit */
+/* contains the pivot indices from the factorization A = P*L*U */
+/* of the original matrix A. */
+
+/* If FACT = 'E', then IPIV is an output argument and on exit */
+/* contains the pivot indices from the factorization A = P*L*U */
+/* of the equilibrated matrix A. */
+
+/* EQUED (input or output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration (always true if FACT = 'N'). */
+/* = 'R': Row equilibration, i.e., A has been premultiplied by */
+/* diag(R). */
+/* = 'C': Column equilibration, i.e., A has been postmultiplied */
+/* by diag(C). */
+/* = 'B': Both row and column equilibration, i.e., A has been */
+/* replaced by diag(R) * A * diag(C). */
+/* EQUED is an input argument if FACT = 'F'; otherwise, it is an */
+/* output argument. */
+
+/* R (input or output) DOUBLE PRECISION array, dimension (N) */
+/* The row scale factors for A. If EQUED = 'R' or 'B', A is */
+/* multiplied on the left by diag(R); if EQUED = 'N' or 'C', R */
+/* is not accessed. R is an input argument if FACT = 'F'; */
+/* otherwise, R is an output argument. If FACT = 'F' and */
+/* EQUED = 'R' or 'B', each element of R must be positive. */
+
+/* C (input or output) DOUBLE PRECISION array, dimension (N) */
+/* The column scale factors for A. If EQUED = 'C' or 'B', A is */
+/* multiplied on the right by diag(C); if EQUED = 'N' or 'R', C */
+/* is not accessed. C is an input argument if FACT = 'F'; */
+/* otherwise, C is an output argument. If FACT = 'F' and */
+/* EQUED = 'C' or 'B', each element of C must be positive. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, */
+/* if EQUED = 'N', B is not modified; */
+/* if TRANS = 'N' and EQUED = 'R' or 'B', B is overwritten by */
+/* diag(R)*B; */
+/* if TRANS = 'T' or 'C' and EQUED = 'C' or 'B', B is */
+/* overwritten by diag(C)*B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) DOUBLE PRECISION array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X */
+/* to the original system of equations. Note that A and B are */
+/* modified on exit if EQUED .ne. 'N', and the solution to the */
+/* equilibrated system is inv(diag(C))*X if TRANS = 'N' and */
+/* EQUED = 'C' or 'B', or inv(diag(R))*X if TRANS = 'T' or 'C' */
+/* and EQUED = 'R' or 'B'. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A after equilibration (if done). If RCOND is less than the */
+/* machine precision (in particular, if RCOND = 0), the matrix */
+/* is singular to working precision. This condition is */
+/* indicated by a return code of INFO > 0. */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (4*N) */
+/* On exit, WORK(1) contains the reciprocal pivot growth */
+/* factor norm(A)/norm(U). The "max absolute element" norm is */
+/* used. If WORK(1) is much less than 1, then the stability */
+/* of the LU factorization of the (equilibrated) matrix A */
+/* could be poor. This also means that the solution X, condition */
+/* estimator RCOND, and forward error bound FERR could be */
+/* unreliable. If factorization fails with 0<INFO<=N, then */
+/* WORK(1) contains the reciprocal pivot growth factor for the */
+/* leading INFO columns of A. */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: U(i,i) is exactly zero. The factorization has */
+/* been completed, but the factor U is exactly */
+/* singular, so the solution and error bounds */
+/* could not be computed. RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ --ipiv;
+ --r__;
+ --c__;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ equil = lsame_(fact, "E");
+ notran = lsame_(trans, "N");
+ if (nofact || equil) {
+ *(unsigned char *)equed = 'N';
+ rowequ = FALSE_;
+ colequ = FALSE_;
+ } else {
+ rowequ = lsame_(equed, "R") || lsame_(equed,
+ "B");
+ colequ = lsame_(equed, "C") || lsame_(equed,
+ "B");
+ smlnum = dlamch_("Safe minimum");
+ bignum = 1. / smlnum;
+ }
+
+/* Test the input parameters. */
+
+ if (! nofact && ! equil && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -8;
+ } else if (lsame_(fact, "F") && ! (rowequ || colequ
+ || lsame_(equed, "N"))) {
+ *info = -10;
+ } else {
+ if (rowequ) {
+ rcmin = bignum;
+ rcmax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ d__1 = rcmin, d__2 = r__[j];
+ rcmin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = rcmax, d__2 = r__[j];
+ rcmax = max(d__1,d__2);
+/* L10: */
+ }
+ if (rcmin <= 0.) {
+ *info = -11;
+ } else if (*n > 0) {
+ rowcnd = max(rcmin,smlnum) / min(rcmax,bignum);
+ } else {
+ rowcnd = 1.;
+ }
+ }
+ if (colequ && *info == 0) {
+ rcmin = bignum;
+ rcmax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ d__1 = rcmin, d__2 = c__[j];
+ rcmin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = rcmax, d__2 = c__[j];
+ rcmax = max(d__1,d__2);
+/* L20: */
+ }
+ if (rcmin <= 0.) {
+ *info = -12;
+ } else if (*n > 0) {
+ colcnd = max(rcmin,smlnum) / min(rcmax,bignum);
+ } else {
+ colcnd = 1.;
+ }
+ }
+ if (*info == 0) {
+ if (*ldb < max(1,*n)) {
+ *info = -14;
+ } else if (*ldx < max(1,*n)) {
+ *info = -16;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGESVX", &i__1);
+ return 0;
+ }
+
+ if (equil) {
+
+/* Compute row and column scalings to equilibrate the matrix A. */
+
+ dgeequ_(n, n, &a[a_offset], lda, &r__[1], &c__[1], &rowcnd, &colcnd, &
+ amax, &infequ);
+ if (infequ == 0) {
+
+/* Equilibrate the matrix. */
+
+ dlaqge_(n, n, &a[a_offset], lda, &r__[1], &c__[1], &rowcnd, &
+ colcnd, &amax, equed);
+ rowequ = lsame_(equed, "R") || lsame_(equed,
+ "B");
+ colequ = lsame_(equed, "C") || lsame_(equed,
+ "B");
+ }
+ }
+
+/* Scale the right hand side. */
+
+ if (notran) {
+ if (rowequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = r__[i__] * b[i__ + j * b_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else if (colequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = c__[i__] * b[i__ + j * b_dim1];
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+ if (nofact || equil) {
+
+/* Compute the LU factorization of A. */
+
+ dlacpy_("Full", n, n, &a[a_offset], lda, &af[af_offset], ldaf);
+ dgetrf_(n, n, &af[af_offset], ldaf, &ipiv[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+
+/* Compute the reciprocal pivot growth factor of the */
+/* leading rank-deficient INFO columns of A. */
+
+ rpvgrw = dlantr_("M", "U", "N", info, info, &af[af_offset], ldaf,
+ &work[1]);
+ if (rpvgrw == 0.) {
+ rpvgrw = 1.;
+ } else {
+ rpvgrw = dlange_("M", n, info, &a[a_offset], lda, &work[1]) / rpvgrw;
+ }
+ work[1] = rpvgrw;
+ *rcond = 0.;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A and the */
+/* reciprocal pivot growth factor RPVGRW. */
+
+ if (notran) {
+ *(unsigned char *)norm = '1';
+ } else {
+ *(unsigned char *)norm = 'I';
+ }
+ anorm = dlange_(norm, n, n, &a[a_offset], lda, &work[1]);
+ rpvgrw = dlantr_("M", "U", "N", n, n, &af[af_offset], ldaf, &work[1]);
+ if (rpvgrw == 0.) {
+ rpvgrw = 1.;
+ } else {
+ rpvgrw = dlange_("M", n, n, &a[a_offset], lda, &work[1]) /
+ rpvgrw;
+ }
+
+/* Compute the reciprocal of the condition number of A. */
+
+ dgecon_(norm, n, &af[af_offset], ldaf, &anorm, rcond, &work[1], &iwork[1],
+ info);
+
+/* Compute the solution matrix X. */
+
+ dlacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ dgetrs_(trans, n, nrhs, &af[af_offset], ldaf, &ipiv[1], &x[x_offset], ldx,
+ info);
+
+/* Use iterative refinement to improve the computed solution and */
+/* compute error bounds and backward error estimates for it. */
+
+ dgerfs_(trans, n, nrhs, &a[a_offset], lda, &af[af_offset], ldaf, &ipiv[1],
+ &b[b_offset], ldb, &x[x_offset], ldx, &ferr[1], &berr[1], &work[
+ 1], &iwork[1], info);
+
+/* Transform the solution matrix X to a solution of the original */
+/* system. */
+
+ if (notran) {
+ if (colequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[i__ + j * x_dim1] = c__[i__] * x[i__ + j * x_dim1];
+/* L70: */
+ }
+/* L80: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= colcnd;
+/* L90: */
+ }
+ }
+ } else if (rowequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[i__ + j * x_dim1] = r__[i__] * x[i__ + j * x_dim1];
+/* L100: */
+ }
+/* L110: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= rowcnd;
+/* L120: */
+ }
+ }
+
+ work[1] = rpvgrw;
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < dlamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+ return 0;
+
+/* End of DGESVX */
+
+} /* dgesvx_ */
diff --git a/contrib/libs/clapack/dgetc2.c b/contrib/libs/clapack/dgetc2.c
new file mode 100644
index 0000000000..4c6e8c5544
--- /dev/null
+++ b/contrib/libs/clapack/dgetc2.c
@@ -0,0 +1,199 @@
+/* dgetc2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b10 = -1.;
+
+/* Subroutine */ int dgetc2_(integer *n, doublereal *a, integer *lda, integer
+ *ipiv, integer *jpiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__, j, ip, jp;
+ doublereal eps;
+ integer ipv, jpv;
+ extern /* Subroutine */ int dger_(integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ doublereal smin, xmax;
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dlabad_(doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ doublereal bignum, smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGETC2 computes an LU factorization with complete pivoting of the */
+/* n-by-n matrix A. The factorization has the form A = P * L * U * Q, */
+/* where P and Q are permutation matrices, L is lower triangular with */
+/* unit diagonal elements and U is upper triangular. */
+
+/* This is the Level 2 BLAS algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA, N) */
+/* On entry, the n-by-n matrix A to be factored. */
+/* On exit, the factors L and U from the factorization */
+/* A = P*L*U*Q; the unit diagonal elements of L are not stored. */
+/* If U(k, k) appears to be less than SMIN, U(k, k) is given the */
+/* value of SMIN, i.e., giving a nonsingular perturbed system. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension(N). */
+/* The pivot indices; for 1 <= i <= N, row i of the */
+/* matrix has been interchanged with row IPIV(i). */
+
+/* JPIV (output) INTEGER array, dimension(N). */
+/* The pivot indices; for 1 <= j <= N, column j of the */
+/* matrix has been interchanged with column JPIV(j). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* > 0: if INFO = k, U(k, k) is likely to produce owerflow if */
+/* we try to solve for x in Ax = b. So U is perturbed to */
+/* avoid the overflow. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Set constants to control overflow */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --jpiv;
+
+ /* Function Body */
+ *info = 0;
+ eps = dlamch_("P");
+ smlnum = dlamch_("S") / eps;
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+
+/* Factorize A using complete pivoting. */
+/* Set pivots less than SMIN to SMIN. */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Find max element in matrix A */
+
+ xmax = 0.;
+ i__2 = *n;
+ for (ip = i__; ip <= i__2; ++ip) {
+ i__3 = *n;
+ for (jp = i__; jp <= i__3; ++jp) {
+ if ((d__1 = a[ip + jp * a_dim1], abs(d__1)) >= xmax) {
+ xmax = (d__1 = a[ip + jp * a_dim1], abs(d__1));
+ ipv = ip;
+ jpv = jp;
+ }
+/* L10: */
+ }
+/* L20: */
+ }
+ if (i__ == 1) {
+/* Computing MAX */
+ d__1 = eps * xmax;
+ smin = max(d__1,smlnum);
+ }
+
+/* Swap rows */
+
+ if (ipv != i__) {
+ dswap_(n, &a[ipv + a_dim1], lda, &a[i__ + a_dim1], lda);
+ }
+ ipiv[i__] = ipv;
+
+/* Swap columns */
+
+ if (jpv != i__) {
+ dswap_(n, &a[jpv * a_dim1 + 1], &c__1, &a[i__ * a_dim1 + 1], &
+ c__1);
+ }
+ jpiv[i__] = jpv;
+
+/* Check for singularity */
+
+ if ((d__1 = a[i__ + i__ * a_dim1], abs(d__1)) < smin) {
+ *info = i__;
+ a[i__ + i__ * a_dim1] = smin;
+ }
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ a[j + i__ * a_dim1] /= a[i__ + i__ * a_dim1];
+/* L30: */
+ }
+ i__2 = *n - i__;
+ i__3 = *n - i__;
+ dger_(&i__2, &i__3, &c_b10, &a[i__ + 1 + i__ * a_dim1], &c__1, &a[i__
+ + (i__ + 1) * a_dim1], lda, &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda);
+/* L40: */
+ }
+
+ if ((d__1 = a[*n + *n * a_dim1], abs(d__1)) < smin) {
+ *info = *n;
+ a[*n + *n * a_dim1] = smin;
+ }
+
+ return 0;
+
+/* End of DGETC2 */
+
+} /* dgetc2_ */
diff --git a/contrib/libs/clapack/dgetf2.c b/contrib/libs/clapack/dgetf2.c
new file mode 100644
index 0000000000..be6639aff7
--- /dev/null
+++ b/contrib/libs/clapack/dgetf2.c
@@ -0,0 +1,193 @@
+/* dgetf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b8 = -1.;
+
+/* Subroutine */ int dgetf2_(integer *m, integer *n, doublereal *a, integer *
+ lda, integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__, j, jp;
+ extern /* Subroutine */ int dger_(integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *), dscal_(integer *, doublereal *, doublereal *, integer
+ *);
+ doublereal sfmin;
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ extern doublereal dlamch_(char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGETF2 computes an LU factorization of a general m-by-n matrix A */
+/* using partial pivoting with row interchanges. */
+
+/* The factorization has the form */
+/* A = P * L * U */
+/* where P is a permutation matrix, L is lower triangular with unit */
+/* diagonal elements (lower trapezoidal if m > n), and U is upper */
+/* triangular (upper trapezoidal if m < n). */
+
+/* This is the right-looking Level 2 BLAS version of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the m by n matrix to be factored. */
+/* On exit, the factors L and U from the factorization */
+/* A = P*L*U; the unit diagonal elements of L are not stored. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* IPIV (output) INTEGER array, dimension (min(M,N)) */
+/* The pivot indices; for 1 <= i <= min(M,N), row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, U(k,k) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, and division by zero will occur if it is used */
+/* to solve a system of equations. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGETF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Compute machine safe minimum */
+
+ sfmin = dlamch_("S");
+
+ i__1 = min(*m,*n);
+ for (j = 1; j <= i__1; ++j) {
+
+/* Find pivot and test for singularity. */
+
+ i__2 = *m - j + 1;
+ jp = j - 1 + idamax_(&i__2, &a[j + j * a_dim1], &c__1);
+ ipiv[j] = jp;
+ if (a[jp + j * a_dim1] != 0.) {
+
+/* Apply the interchange to columns 1:N. */
+
+ if (jp != j) {
+ dswap_(n, &a[j + a_dim1], lda, &a[jp + a_dim1], lda);
+ }
+
+/* Compute elements J+1:M of J-th column. */
+
+ if (j < *m) {
+ if ((d__1 = a[j + j * a_dim1], abs(d__1)) >= sfmin) {
+ i__2 = *m - j;
+ d__1 = 1. / a[j + j * a_dim1];
+ dscal_(&i__2, &d__1, &a[j + 1 + j * a_dim1], &c__1);
+ } else {
+ i__2 = *m - j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[j + i__ + j * a_dim1] /= a[j + j * a_dim1];
+/* L20: */
+ }
+ }
+ }
+
+ } else if (*info == 0) {
+
+ *info = j;
+ }
+
+ if (j < min(*m,*n)) {
+
+/* Update trailing submatrix. */
+
+ i__2 = *m - j;
+ i__3 = *n - j;
+ dger_(&i__2, &i__3, &c_b8, &a[j + 1 + j * a_dim1], &c__1, &a[j + (
+ j + 1) * a_dim1], lda, &a[j + 1 + (j + 1) * a_dim1], lda);
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of DGETF2 */
+
+} /* dgetf2_ */
diff --git a/contrib/libs/clapack/dgetrf.c b/contrib/libs/clapack/dgetrf.c
new file mode 100644
index 0000000000..8a945af253
--- /dev/null
+++ b/contrib/libs/clapack/dgetrf.c
@@ -0,0 +1,219 @@
+/* dgetrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static doublereal c_b16 = 1.;
+static doublereal c_b19 = -1.;
+
+/* Subroutine */ int dgetrf_(integer *m, integer *n, doublereal *a, integer *
+ lda, integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+
+ /* Local variables */
+ integer i__, j, jb, nb;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ integer iinfo;
+ extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), dgetf2_(
+ integer *, integer *, doublereal *, integer *, integer *, integer
+ *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int dlaswp_(integer *, doublereal *, integer *,
+ integer *, integer *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGETRF computes an LU factorization of a general M-by-N matrix A */
+/* using partial pivoting with row interchanges. */
+
+/* The factorization has the form */
+/* A = P * L * U */
+/* where P is a permutation matrix, L is lower triangular with unit */
+/* diagonal elements (lower trapezoidal if m > n), and U is upper */
+/* triangular (upper trapezoidal if m < n). */
+
+/* This is the right-looking Level 3 BLAS version of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix to be factored. */
+/* On exit, the factors L and U from the factorization */
+/* A = P*L*U; the unit diagonal elements of L are not stored. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* IPIV (output) INTEGER array, dimension (min(M,N)) */
+/* The pivot indices; for 1 <= i <= min(M,N), row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, U(i,i) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, and division by zero will occur if it is used */
+/* to solve a system of equations. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGETRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "DGETRF", " ", m, n, &c_n1, &c_n1);
+ if (nb <= 1 || nb >= min(*m,*n)) {
+
+/* Use unblocked code. */
+
+ dgetf2_(m, n, &a[a_offset], lda, &ipiv[1], info);
+ } else {
+
+/* Use blocked code. */
+
+ i__1 = min(*m,*n);
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = min(*m,*n) - j + 1;
+ jb = min(i__3,nb);
+
+/* Factor diagonal and subdiagonal blocks and test for exact */
+/* singularity. */
+
+ i__3 = *m - j + 1;
+ dgetf2_(&i__3, &jb, &a[j + j * a_dim1], lda, &ipiv[j], &iinfo);
+
+/* Adjust INFO and the pivot indices. */
+
+ if (*info == 0 && iinfo > 0) {
+ *info = iinfo + j - 1;
+ }
+/* Computing MIN */
+ i__4 = *m, i__5 = j + jb - 1;
+ i__3 = min(i__4,i__5);
+ for (i__ = j; i__ <= i__3; ++i__) {
+ ipiv[i__] = j - 1 + ipiv[i__];
+/* L10: */
+ }
+
+/* Apply interchanges to columns 1:J-1. */
+
+ i__3 = j - 1;
+ i__4 = j + jb - 1;
+ dlaswp_(&i__3, &a[a_offset], lda, &j, &i__4, &ipiv[1], &c__1);
+
+ if (j + jb <= *n) {
+
+/* Apply interchanges to columns J+JB:N. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j + jb - 1;
+ dlaswp_(&i__3, &a[(j + jb) * a_dim1 + 1], lda, &j, &i__4, &
+ ipiv[1], &c__1);
+
+/* Compute block row of U. */
+
+ i__3 = *n - j - jb + 1;
+ dtrsm_("Left", "Lower", "No transpose", "Unit", &jb, &i__3, &
+ c_b16, &a[j + j * a_dim1], lda, &a[j + (j + jb) *
+ a_dim1], lda);
+ if (j + jb <= *m) {
+
+/* Update trailing submatrix. */
+
+ i__3 = *m - j - jb + 1;
+ i__4 = *n - j - jb + 1;
+ dgemm_("No transpose", "No transpose", &i__3, &i__4, &jb,
+ &c_b19, &a[j + jb + j * a_dim1], lda, &a[j + (j +
+ jb) * a_dim1], lda, &c_b16, &a[j + jb + (j + jb) *
+ a_dim1], lda);
+ }
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of DGETRF */
+
+} /* dgetrf_ */
diff --git a/contrib/libs/clapack/dgetri.c b/contrib/libs/clapack/dgetri.c
new file mode 100644
index 0000000000..d075f0c7d6
--- /dev/null
+++ b/contrib/libs/clapack/dgetri.c
@@ -0,0 +1,264 @@
+/* dgetri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static doublereal c_b20 = -1.;
+static doublereal c_b22 = 1.;
+
+/* Subroutine */ int dgetri_(integer *n, doublereal *a, integer *lda, integer
+ *ipiv, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, jb, nb, jj, jp, nn, iws;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *),
+ dgemv_(char *, integer *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *);
+ integer nbmin;
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dtrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), xerbla_(
+ char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork;
+ extern /* Subroutine */ int dtrtri_(char *, char *, integer *, doublereal
+ *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGETRI computes the inverse of a matrix using the LU factorization */
+/* computed by DGETRF. */
+
+/* This method inverts U and then computes inv(A) by solving the system */
+/* inv(A)*L = inv(U) for inv(A). */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the factors L and U from the factorization */
+/* A = P*L*U as computed by DGETRF. */
+/* On exit, if INFO = 0, the inverse of the original matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices from DGETRF; for 1<=i<=N, row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO=0, then WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+/* For optimal performance LWORK >= N*NB, where NB is */
+/* the optimal blocksize returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, U(i,i) is exactly zero; the matrix is */
+/* singular and its inverse could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "DGETRI", " ", n, &c_n1, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ work[1] = (doublereal) lwkopt;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*lda < max(1,*n)) {
+ *info = -3;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGETRI", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form inv(U). If INFO > 0 from DTRTRI, then U is singular, */
+/* and the inverse is not computed. */
+
+ dtrtri_("Upper", "Non-unit", n, &a[a_offset], lda, info);
+ if (*info > 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = *n;
+ if (nb > 1 && nb < *n) {
+/* Computing MAX */
+ i__1 = ldwork * nb;
+ iws = max(i__1,1);
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DGETRI", " ", n, &c_n1, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = *n;
+ }
+
+/* Solve the equation inv(A)*L = inv(U) for inv(A). */
+
+ if (nb < nbmin || nb >= *n) {
+
+/* Use unblocked code. */
+
+ for (j = *n; j >= 1; --j) {
+
+/* Copy current column of L to WORK and replace with zeros. */
+
+ i__1 = *n;
+ for (i__ = j + 1; i__ <= i__1; ++i__) {
+ work[i__] = a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = 0.;
+/* L10: */
+ }
+
+/* Compute current column of inv(A). */
+
+ if (j < *n) {
+ i__1 = *n - j;
+ dgemv_("No transpose", n, &i__1, &c_b20, &a[(j + 1) * a_dim1
+ + 1], lda, &work[j + 1], &c__1, &c_b22, &a[j * a_dim1
+ + 1], &c__1);
+ }
+/* L20: */
+ }
+ } else {
+
+/* Use blocked code. */
+
+ nn = (*n - 1) / nb * nb + 1;
+ i__1 = -nb;
+ for (j = nn; i__1 < 0 ? j >= 1 : j <= 1; j += i__1) {
+/* Computing MIN */
+ i__2 = nb, i__3 = *n - j + 1;
+ jb = min(i__2,i__3);
+
+/* Copy current block column of L to WORK and replace with */
+/* zeros. */
+
+ i__2 = j + jb - 1;
+ for (jj = j; jj <= i__2; ++jj) {
+ i__3 = *n;
+ for (i__ = jj + 1; i__ <= i__3; ++i__) {
+ work[i__ + (jj - j) * ldwork] = a[i__ + jj * a_dim1];
+ a[i__ + jj * a_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Compute current block column of inv(A). */
+
+ if (j + jb <= *n) {
+ i__2 = *n - j - jb + 1;
+ dgemm_("No transpose", "No transpose", n, &jb, &i__2, &c_b20,
+ &a[(j + jb) * a_dim1 + 1], lda, &work[j + jb], &
+ ldwork, &c_b22, &a[j * a_dim1 + 1], lda);
+ }
+ dtrsm_("Right", "Lower", "No transpose", "Unit", n, &jb, &c_b22, &
+ work[j], &ldwork, &a[j * a_dim1 + 1], lda);
+/* L50: */
+ }
+ }
+
+/* Apply column interchanges. */
+
+ for (j = *n - 1; j >= 1; --j) {
+ jp = ipiv[j];
+ if (jp != j) {
+ dswap_(n, &a[j * a_dim1 + 1], &c__1, &a[jp * a_dim1 + 1], &c__1);
+ }
+/* L60: */
+ }
+
+ work[1] = (doublereal) iws;
+ return 0;
+
+/* End of DGETRI */
+
+} /* dgetri_ */
diff --git a/contrib/libs/clapack/dgetrs.c b/contrib/libs/clapack/dgetrs.c
new file mode 100644
index 0000000000..943e22fbd9
--- /dev/null
+++ b/contrib/libs/clapack/dgetrs.c
@@ -0,0 +1,186 @@
+/* dgetrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b12 = 1.;
+static integer c_n1 = -1;
+
+/* Subroutine */ int dgetrs_(char *trans, integer *n, integer *nrhs,
+ doublereal *a, integer *lda, integer *ipiv, doublereal *b, integer *
+ ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), xerbla_(
+ char *, integer *), dlaswp_(integer *, doublereal *,
+ integer *, integer *, integer *, integer *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGETRS solves a system of linear equations */
+/* A * X = B or A' * X = B */
+/* with a general N-by-N matrix A using the LU factorization computed */
+/* by DGETRF. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A'* X = B (Transpose) */
+/* = 'C': A'* X = B (Conjugate transpose = Transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The factors L and U from the factorization A = P*L*U */
+/* as computed by DGETRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices from DGETRF; for 1<=i<=N, row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "T") && ! lsame_(
+ trans, "C")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGETRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (notran) {
+
+/* Solve A * X = B. */
+
+/* Apply row interchanges to the right hand sides. */
+
+ dlaswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c__1);
+
+/* Solve L*X = B, overwriting B with X. */
+
+ dtrsm_("Left", "Lower", "No transpose", "Unit", n, nrhs, &c_b12, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+/* Solve U*X = B, overwriting B with X. */
+
+ dtrsm_("Left", "Upper", "No transpose", "Non-unit", n, nrhs, &c_b12, &
+ a[a_offset], lda, &b[b_offset], ldb);
+ } else {
+
+/* Solve A' * X = B. */
+
+/* Solve U'*X = B, overwriting B with X. */
+
+ dtrsm_("Left", "Upper", "Transpose", "Non-unit", n, nrhs, &c_b12, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+/* Solve L'*X = B, overwriting B with X. */
+
+ dtrsm_("Left", "Lower", "Transpose", "Unit", n, nrhs, &c_b12, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+/* Apply row interchanges to the solution vectors. */
+
+ dlaswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c_n1);
+ }
+
+ return 0;
+
+/* End of DGETRS */
+
+} /* dgetrs_ */
diff --git a/contrib/libs/clapack/dggbak.c b/contrib/libs/clapack/dggbak.c
new file mode 100644
index 0000000000..a581392039
--- /dev/null
+++ b/contrib/libs/clapack/dggbak.c
@@ -0,0 +1,276 @@
+/* dggbak.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dggbak_(char *job, char *side, integer *n, integer *ilo,
+ integer *ihi, doublereal *lscale, doublereal *rscale, integer *m,
+ doublereal *v, integer *ldv, integer *info)
+{
+ /* System generated locals */
+ integer v_dim1, v_offset, i__1;
+
+ /* Local variables */
+ integer i__, k;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical leftv;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical rightv;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGGBAK forms the right or left eigenvectors of a real generalized */
+/* eigenvalue problem A*x = lambda*B*x, by backward transformation on */
+/* the computed eigenvectors of the balanced pair of matrices output by */
+/* DGGBAL. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies the type of backward transformation required: */
+/* = 'N': do nothing, return immediately; */
+/* = 'P': do backward transformation for permutation only; */
+/* = 'S': do backward transformation for scaling only; */
+/* = 'B': do backward transformations for both permutation and */
+/* scaling. */
+/* JOB must be the same as the argument JOB supplied to DGGBAL. */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'R': V contains right eigenvectors; */
+/* = 'L': V contains left eigenvectors. */
+
+/* N (input) INTEGER */
+/* The number of rows of the matrix V. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* The integers ILO and IHI determined by DGGBAL. */
+/* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */
+
+/* LSCALE (input) DOUBLE PRECISION array, dimension (N) */
+/* Details of the permutations and/or scaling factors applied */
+/* to the left side of A and B, as returned by DGGBAL. */
+
+/* RSCALE (input) DOUBLE PRECISION array, dimension (N) */
+/* Details of the permutations and/or scaling factors applied */
+/* to the right side of A and B, as returned by DGGBAL. */
+
+/* M (input) INTEGER */
+/* The number of columns of the matrix V. M >= 0. */
+
+/* V (input/output) DOUBLE PRECISION array, dimension (LDV,M) */
+/* On entry, the matrix of right or left eigenvectors to be */
+/* transformed, as returned by DTGEVC. */
+/* On exit, V is overwritten by the transformed eigenvectors. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the matrix V. LDV >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* See R.C. Ward, Balancing the generalized eigenvalue problem, */
+/* SIAM J. Sci. Stat. Comp. 2 (1981), 141-152. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ --lscale;
+ --rscale;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+
+ /* Function Body */
+ rightv = lsame_(side, "R");
+ leftv = lsame_(side, "L");
+
+ *info = 0;
+ if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S")
+ && ! lsame_(job, "B")) {
+ *info = -1;
+ } else if (! rightv && ! leftv) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ilo < 1) {
+ *info = -4;
+ } else if (*n == 0 && *ihi == 0 && *ilo != 1) {
+ *info = -4;
+ } else if (*n > 0 && (*ihi < *ilo || *ihi > max(1,*n))) {
+ *info = -5;
+ } else if (*n == 0 && *ilo == 1 && *ihi != 0) {
+ *info = -5;
+ } else if (*m < 0) {
+ *info = -8;
+ } else if (*ldv < max(1,*n)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGGBAK", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*m == 0) {
+ return 0;
+ }
+ if (lsame_(job, "N")) {
+ return 0;
+ }
+
+ if (*ilo == *ihi) {
+ goto L30;
+ }
+
+/* Backward balance */
+
+ if (lsame_(job, "S") || lsame_(job, "B")) {
+
+/* Backward transformation on right eigenvectors */
+
+ if (rightv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ dscal_(m, &rscale[i__], &v[i__ + v_dim1], ldv);
+/* L10: */
+ }
+ }
+
+/* Backward transformation on left eigenvectors */
+
+ if (leftv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ dscal_(m, &lscale[i__], &v[i__ + v_dim1], ldv);
+/* L20: */
+ }
+ }
+ }
+
+/* Backward permutation */
+
+L30:
+ if (lsame_(job, "P") || lsame_(job, "B")) {
+
+/* Backward permutation on right eigenvectors */
+
+ if (rightv) {
+ if (*ilo == 1) {
+ goto L50;
+ }
+
+ for (i__ = *ilo - 1; i__ >= 1; --i__) {
+ k = (integer) rscale[i__];
+ if (k == i__) {
+ goto L40;
+ }
+ dswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L40:
+ ;
+ }
+
+L50:
+ if (*ihi == *n) {
+ goto L70;
+ }
+ i__1 = *n;
+ for (i__ = *ihi + 1; i__ <= i__1; ++i__) {
+ k = (integer) rscale[i__];
+ if (k == i__) {
+ goto L60;
+ }
+ dswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L60:
+ ;
+ }
+ }
+
+/* Backward permutation on left eigenvectors */
+
+L70:
+ if (leftv) {
+ if (*ilo == 1) {
+ goto L90;
+ }
+ for (i__ = *ilo - 1; i__ >= 1; --i__) {
+ k = (integer) lscale[i__];
+ if (k == i__) {
+ goto L80;
+ }
+ dswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L80:
+ ;
+ }
+
+L90:
+ if (*ihi == *n) {
+ goto L110;
+ }
+ i__1 = *n;
+ for (i__ = *ihi + 1; i__ <= i__1; ++i__) {
+ k = (integer) lscale[i__];
+ if (k == i__) {
+ goto L100;
+ }
+ dswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L100:
+ ;
+ }
+ }
+ }
+
+L110:
+
+ return 0;
+
+/* End of DGGBAK */
+
+} /* dggbak_ */
diff --git a/contrib/libs/clapack/dggbal.c b/contrib/libs/clapack/dggbal.c
new file mode 100644
index 0000000000..27a4b340ce
--- /dev/null
+++ b/contrib/libs/clapack/dggbal.c
@@ -0,0 +1,627 @@
+/* dggbal.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b35 = 10.;
+static doublereal c_b71 = .5;
+
+/* Subroutine */ int dggbal_(char *job, integer *n, doublereal *a, integer *
+ lda, doublereal *b, integer *ldb, integer *ilo, integer *ihi,
+ doublereal *lscale, doublereal *rscale, doublereal *work, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+ doublereal d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double d_lg10(doublereal *), d_sign(doublereal *, doublereal *), pow_di(
+ doublereal *, integer *);
+
+ /* Local variables */
+ integer i__, j, k, l, m;
+ doublereal t;
+ integer jc;
+ doublereal ta, tb, tc;
+ integer ir;
+ doublereal ew;
+ integer it, nr, ip1, jp1, lm1;
+ doublereal cab, rab, ewc, cor, sum;
+ integer nrp2, icab, lcab;
+ doublereal beta, coef;
+ integer irab, lrab;
+ doublereal basl, cmax;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ doublereal coef2, coef5, gamma, alpha;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ doublereal sfmin, sfmax;
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ integer iflow;
+ extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *);
+ integer kount;
+ extern doublereal dlamch_(char *);
+ doublereal pgamma;
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ integer lsfmin, lsfmax;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGGBAL balances a pair of general real matrices (A,B). This */
+/* involves, first, permuting A and B by similarity transformations to */
+/* isolate eigenvalues in the first 1 to ILO$-$1 and last IHI+1 to N */
+/* elements on the diagonal; and second, applying a diagonal similarity */
+/* transformation to rows and columns ILO to IHI to make the rows */
+/* and columns as close in norm as possible. Both steps are optional. */
+
+/* Balancing may reduce the 1-norm of the matrices, and improve the */
+/* accuracy of the computed eigenvalues and/or eigenvectors in the */
+/* generalized eigenvalue problem A*x = lambda*B*x. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies the operations to be performed on A and B: */
+/* = 'N': none: simply set ILO = 1, IHI = N, LSCALE(I) = 1.0 */
+/* and RSCALE(I) = 1.0 for i = 1,...,N. */
+/* = 'P': permute only; */
+/* = 'S': scale only; */
+/* = 'B': both permute and scale. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the input matrix A. */
+/* On exit, A is overwritten by the balanced matrix. */
+/* If JOB = 'N', A is not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,N) */
+/* On entry, the input matrix B. */
+/* On exit, B is overwritten by the balanced matrix. */
+/* If JOB = 'N', B is not referenced. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* ILO (output) INTEGER */
+/* IHI (output) INTEGER */
+/* ILO and IHI are set to integers such that on exit */
+/* A(i,j) = 0 and B(i,j) = 0 if i > j and */
+/* j = 1,...,ILO-1 or i = IHI+1,...,N. */
+/* If JOB = 'N' or 'S', ILO = 1 and IHI = N. */
+
+/* LSCALE (output) DOUBLE PRECISION array, dimension (N) */
+/* Details of the permutations and scaling factors applied */
+/* to the left side of A and B. If P(j) is the index of the */
+/* row interchanged with row j, and D(j) */
+/* is the scaling factor applied to row j, then */
+/* LSCALE(j) = P(j) for J = 1,...,ILO-1 */
+/* = D(j) for J = ILO,...,IHI */
+/* = P(j) for J = IHI+1,...,N. */
+/* The order in which the interchanges are made is N to IHI+1, */
+/* then 1 to ILO-1. */
+
+/* RSCALE (output) DOUBLE PRECISION array, dimension (N) */
+/* Details of the permutations and scaling factors applied */
+/* to the right side of A and B. If P(j) is the index of the */
+/* column interchanged with column j, and D(j) */
+/* is the scaling factor applied to column j, then */
+/* LSCALE(j) = P(j) for J = 1,...,ILO-1 */
+/* = D(j) for J = ILO,...,IHI */
+/* = P(j) for J = IHI+1,...,N. */
+/* The order in which the interchanges are made is N to IHI+1, */
+/* then 1 to ILO-1. */
+
+/* WORK (workspace) REAL array, dimension (lwork) */
+/* lwork must be at least max(1,6*N) when JOB = 'S' or 'B', and */
+/* at least 1 when JOB = 'N' or 'P'. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* See R.C. WARD, Balancing the generalized eigenvalue problem, */
+/* SIAM J. Sci. Stat. Comp. 2 (1981), 141-152. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --lscale;
+ --rscale;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S")
+ && ! lsame_(job, "B")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGGBAL", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *ilo = 1;
+ *ihi = *n;
+ return 0;
+ }
+
+ if (*n == 1) {
+ *ilo = 1;
+ *ihi = *n;
+ lscale[1] = 1.;
+ rscale[1] = 1.;
+ return 0;
+ }
+
+ if (lsame_(job, "N")) {
+ *ilo = 1;
+ *ihi = *n;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ lscale[i__] = 1.;
+ rscale[i__] = 1.;
+/* L10: */
+ }
+ return 0;
+ }
+
+ k = 1;
+ l = *n;
+ if (lsame_(job, "S")) {
+ goto L190;
+ }
+
+ goto L30;
+
+/* Permute the matrices A and B to isolate the eigenvalues. */
+
+/* Find row with one nonzero in columns 1 through L */
+
+L20:
+ l = lm1;
+ if (l != 1) {
+ goto L30;
+ }
+
+ rscale[1] = 1.;
+ lscale[1] = 1.;
+ goto L190;
+
+L30:
+ lm1 = l - 1;
+ for (i__ = l; i__ >= 1; --i__) {
+ i__1 = lm1;
+ for (j = 1; j <= i__1; ++j) {
+ jp1 = j + 1;
+ if (a[i__ + j * a_dim1] != 0. || b[i__ + j * b_dim1] != 0.) {
+ goto L50;
+ }
+/* L40: */
+ }
+ j = l;
+ goto L70;
+
+L50:
+ i__1 = l;
+ for (j = jp1; j <= i__1; ++j) {
+ if (a[i__ + j * a_dim1] != 0. || b[i__ + j * b_dim1] != 0.) {
+ goto L80;
+ }
+/* L60: */
+ }
+ j = jp1 - 1;
+
+L70:
+ m = l;
+ iflow = 1;
+ goto L160;
+L80:
+ ;
+ }
+ goto L100;
+
+/* Find column with one nonzero in rows K through N */
+
+L90:
+ ++k;
+
+L100:
+ i__1 = l;
+ for (j = k; j <= i__1; ++j) {
+ i__2 = lm1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ ip1 = i__ + 1;
+ if (a[i__ + j * a_dim1] != 0. || b[i__ + j * b_dim1] != 0.) {
+ goto L120;
+ }
+/* L110: */
+ }
+ i__ = l;
+ goto L140;
+L120:
+ i__2 = l;
+ for (i__ = ip1; i__ <= i__2; ++i__) {
+ if (a[i__ + j * a_dim1] != 0. || b[i__ + j * b_dim1] != 0.) {
+ goto L150;
+ }
+/* L130: */
+ }
+ i__ = ip1 - 1;
+L140:
+ m = k;
+ iflow = 2;
+ goto L160;
+L150:
+ ;
+ }
+ goto L190;
+
+/* Permute rows M and I */
+
+L160:
+ lscale[m] = (doublereal) i__;
+ if (i__ == m) {
+ goto L170;
+ }
+ i__1 = *n - k + 1;
+ dswap_(&i__1, &a[i__ + k * a_dim1], lda, &a[m + k * a_dim1], lda);
+ i__1 = *n - k + 1;
+ dswap_(&i__1, &b[i__ + k * b_dim1], ldb, &b[m + k * b_dim1], ldb);
+
+/* Permute columns M and J */
+
+L170:
+ rscale[m] = (doublereal) j;
+ if (j == m) {
+ goto L180;
+ }
+ dswap_(&l, &a[j * a_dim1 + 1], &c__1, &a[m * a_dim1 + 1], &c__1);
+ dswap_(&l, &b[j * b_dim1 + 1], &c__1, &b[m * b_dim1 + 1], &c__1);
+
+L180:
+ switch (iflow) {
+ case 1: goto L20;
+ case 2: goto L90;
+ }
+
+L190:
+ *ilo = k;
+ *ihi = l;
+
+ if (lsame_(job, "P")) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ lscale[i__] = 1.;
+ rscale[i__] = 1.;
+/* L195: */
+ }
+ return 0;
+ }
+
+ if (*ilo == *ihi) {
+ return 0;
+ }
+
+/* Balance the submatrix in rows ILO to IHI. */
+
+ nr = *ihi - *ilo + 1;
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ rscale[i__] = 0.;
+ lscale[i__] = 0.;
+
+ work[i__] = 0.;
+ work[i__ + *n] = 0.;
+ work[i__ + (*n << 1)] = 0.;
+ work[i__ + *n * 3] = 0.;
+ work[i__ + (*n << 2)] = 0.;
+ work[i__ + *n * 5] = 0.;
+/* L200: */
+ }
+
+/* Compute right side vector in resulting linear equations */
+
+ basl = d_lg10(&c_b35);
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ i__2 = *ihi;
+ for (j = *ilo; j <= i__2; ++j) {
+ tb = b[i__ + j * b_dim1];
+ ta = a[i__ + j * a_dim1];
+ if (ta == 0.) {
+ goto L210;
+ }
+ d__1 = abs(ta);
+ ta = d_lg10(&d__1) / basl;
+L210:
+ if (tb == 0.) {
+ goto L220;
+ }
+ d__1 = abs(tb);
+ tb = d_lg10(&d__1) / basl;
+L220:
+ work[i__ + (*n << 2)] = work[i__ + (*n << 2)] - ta - tb;
+ work[j + *n * 5] = work[j + *n * 5] - ta - tb;
+/* L230: */
+ }
+/* L240: */
+ }
+
+ coef = 1. / (doublereal) (nr << 1);
+ coef2 = coef * coef;
+ coef5 = coef2 * .5;
+ nrp2 = nr + 2;
+ beta = 0.;
+ it = 1;
+
+/* Start generalized conjugate gradient iteration */
+
+L250:
+
+ gamma = ddot_(&nr, &work[*ilo + (*n << 2)], &c__1, &work[*ilo + (*n << 2)]
+, &c__1) + ddot_(&nr, &work[*ilo + *n * 5], &c__1, &work[*ilo + *
+ n * 5], &c__1);
+
+ ew = 0.;
+ ewc = 0.;
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ ew += work[i__ + (*n << 2)];
+ ewc += work[i__ + *n * 5];
+/* L260: */
+ }
+
+/* Computing 2nd power */
+ d__1 = ew;
+/* Computing 2nd power */
+ d__2 = ewc;
+/* Computing 2nd power */
+ d__3 = ew - ewc;
+ gamma = coef * gamma - coef2 * (d__1 * d__1 + d__2 * d__2) - coef5 * (
+ d__3 * d__3);
+ if (gamma == 0.) {
+ goto L350;
+ }
+ if (it != 1) {
+ beta = gamma / pgamma;
+ }
+ t = coef5 * (ewc - ew * 3.);
+ tc = coef5 * (ew - ewc * 3.);
+
+ dscal_(&nr, &beta, &work[*ilo], &c__1);
+ dscal_(&nr, &beta, &work[*ilo + *n], &c__1);
+
+ daxpy_(&nr, &coef, &work[*ilo + (*n << 2)], &c__1, &work[*ilo + *n], &
+ c__1);
+ daxpy_(&nr, &coef, &work[*ilo + *n * 5], &c__1, &work[*ilo], &c__1);
+
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ work[i__] += tc;
+ work[i__ + *n] += t;
+/* L270: */
+ }
+
+/* Apply matrix to vector */
+
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ kount = 0;
+ sum = 0.;
+ i__2 = *ihi;
+ for (j = *ilo; j <= i__2; ++j) {
+ if (a[i__ + j * a_dim1] == 0.) {
+ goto L280;
+ }
+ ++kount;
+ sum += work[j];
+L280:
+ if (b[i__ + j * b_dim1] == 0.) {
+ goto L290;
+ }
+ ++kount;
+ sum += work[j];
+L290:
+ ;
+ }
+ work[i__ + (*n << 1)] = (doublereal) kount * work[i__ + *n] + sum;
+/* L300: */
+ }
+
+ i__1 = *ihi;
+ for (j = *ilo; j <= i__1; ++j) {
+ kount = 0;
+ sum = 0.;
+ i__2 = *ihi;
+ for (i__ = *ilo; i__ <= i__2; ++i__) {
+ if (a[i__ + j * a_dim1] == 0.) {
+ goto L310;
+ }
+ ++kount;
+ sum += work[i__ + *n];
+L310:
+ if (b[i__ + j * b_dim1] == 0.) {
+ goto L320;
+ }
+ ++kount;
+ sum += work[i__ + *n];
+L320:
+ ;
+ }
+ work[j + *n * 3] = (doublereal) kount * work[j] + sum;
+/* L330: */
+ }
+
+ sum = ddot_(&nr, &work[*ilo + *n], &c__1, &work[*ilo + (*n << 1)], &c__1)
+ + ddot_(&nr, &work[*ilo], &c__1, &work[*ilo + *n * 3], &c__1);
+ alpha = gamma / sum;
+
+/* Determine correction to current iteration */
+
+ cmax = 0.;
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ cor = alpha * work[i__ + *n];
+ if (abs(cor) > cmax) {
+ cmax = abs(cor);
+ }
+ lscale[i__] += cor;
+ cor = alpha * work[i__];
+ if (abs(cor) > cmax) {
+ cmax = abs(cor);
+ }
+ rscale[i__] += cor;
+/* L340: */
+ }
+ if (cmax < .5) {
+ goto L350;
+ }
+
+ d__1 = -alpha;
+ daxpy_(&nr, &d__1, &work[*ilo + (*n << 1)], &c__1, &work[*ilo + (*n << 2)]
+, &c__1);
+ d__1 = -alpha;
+ daxpy_(&nr, &d__1, &work[*ilo + *n * 3], &c__1, &work[*ilo + *n * 5], &
+ c__1);
+
+ pgamma = gamma;
+ ++it;
+ if (it <= nrp2) {
+ goto L250;
+ }
+
+/* End generalized conjugate gradient iteration */
+
+L350:
+ sfmin = dlamch_("S");
+ sfmax = 1. / sfmin;
+ lsfmin = (integer) (d_lg10(&sfmin) / basl + 1.);
+ lsfmax = (integer) (d_lg10(&sfmax) / basl);
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ i__2 = *n - *ilo + 1;
+ irab = idamax_(&i__2, &a[i__ + *ilo * a_dim1], lda);
+ rab = (d__1 = a[i__ + (irab + *ilo - 1) * a_dim1], abs(d__1));
+ i__2 = *n - *ilo + 1;
+ irab = idamax_(&i__2, &b[i__ + *ilo * b_dim1], ldb);
+/* Computing MAX */
+ d__2 = rab, d__3 = (d__1 = b[i__ + (irab + *ilo - 1) * b_dim1], abs(
+ d__1));
+ rab = max(d__2,d__3);
+ d__1 = rab + sfmin;
+ lrab = (integer) (d_lg10(&d__1) / basl + 1.);
+ ir = (integer) (lscale[i__] + d_sign(&c_b71, &lscale[i__]));
+/* Computing MIN */
+ i__2 = max(ir,lsfmin), i__2 = min(i__2,lsfmax), i__3 = lsfmax - lrab;
+ ir = min(i__2,i__3);
+ lscale[i__] = pow_di(&c_b35, &ir);
+ icab = idamax_(ihi, &a[i__ * a_dim1 + 1], &c__1);
+ cab = (d__1 = a[icab + i__ * a_dim1], abs(d__1));
+ icab = idamax_(ihi, &b[i__ * b_dim1 + 1], &c__1);
+/* Computing MAX */
+ d__2 = cab, d__3 = (d__1 = b[icab + i__ * b_dim1], abs(d__1));
+ cab = max(d__2,d__3);
+ d__1 = cab + sfmin;
+ lcab = (integer) (d_lg10(&d__1) / basl + 1.);
+ jc = (integer) (rscale[i__] + d_sign(&c_b71, &rscale[i__]));
+/* Computing MIN */
+ i__2 = max(jc,lsfmin), i__2 = min(i__2,lsfmax), i__3 = lsfmax - lcab;
+ jc = min(i__2,i__3);
+ rscale[i__] = pow_di(&c_b35, &jc);
+/* L360: */
+ }
+
+/* Row scaling of matrices A and B */
+
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ i__2 = *n - *ilo + 1;
+ dscal_(&i__2, &lscale[i__], &a[i__ + *ilo * a_dim1], lda);
+ i__2 = *n - *ilo + 1;
+ dscal_(&i__2, &lscale[i__], &b[i__ + *ilo * b_dim1], ldb);
+/* L370: */
+ }
+
+/* Column scaling of matrices A and B */
+
+ i__1 = *ihi;
+ for (j = *ilo; j <= i__1; ++j) {
+ dscal_(ihi, &rscale[j], &a[j * a_dim1 + 1], &c__1);
+ dscal_(ihi, &rscale[j], &b[j * b_dim1 + 1], &c__1);
+/* L380: */
+ }
+
+ return 0;
+
+/* End of DGGBAL */
+
+} /* dggbal_ */
diff --git a/contrib/libs/clapack/dgges.c b/contrib/libs/clapack/dgges.c
new file mode 100644
index 0000000000..57a40f7bee
--- /dev/null
+++ b/contrib/libs/clapack/dgges.c
@@ -0,0 +1,692 @@
+/* dgges.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+static doublereal c_b38 = 0.;
+static doublereal c_b39 = 1.;
+
+/* Subroutine */ int dgges_(char *jobvsl, char *jobvsr, char *sort, L_fp
+ selctg, integer *n, doublereal *a, integer *lda, doublereal *b,
+ integer *ldb, integer *sdim, doublereal *alphar, doublereal *alphai,
+ doublereal *beta, doublereal *vsl, integer *ldvsl, doublereal *vsr,
+ integer *ldvsr, doublereal *work, integer *lwork, logical *bwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vsl_dim1, vsl_offset,
+ vsr_dim1, vsr_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, ip;
+ doublereal dif[2];
+ integer ihi, ilo;
+ doublereal eps, anrm, bnrm;
+ integer idum[1], ierr, itau, iwrk;
+ doublereal pvsl, pvsr;
+ extern logical lsame_(char *, char *);
+ integer ileft, icols;
+ logical cursl, ilvsl, ilvsr;
+ integer irows;
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *), dggbak_(
+ char *, char *, integer *, integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *), dggbal_(char *, integer *, doublereal *, integer
+ *, doublereal *, integer *, integer *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *);
+ logical lst2sl;
+ extern doublereal dlamch_(char *), dlange_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *);
+ extern /* Subroutine */ int dgghrd_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *, integer *), dlascl_(char *, integer *, integer *, doublereal
+ *, doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *);
+ logical ilascl, ilbscl;
+ extern /* Subroutine */ int dgeqrf_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *),
+ dlacpy_(char *, integer *, integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ doublereal safmin;
+ extern /* Subroutine */ int dlaset_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *);
+ doublereal safmax;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ extern /* Subroutine */ int dhgeqz_(char *, char *, char *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ integer *), dtgsen_(integer *, logical *,
+ logical *, logical *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ integer *, integer *, integer *);
+ integer ijobvl, iright;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ijobvr;
+ extern /* Subroutine */ int dorgqr_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *);
+ doublereal anrmto, bnrmto;
+ logical lastsl;
+ extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ integer minwrk, maxwrk;
+ doublereal smlnum;
+ logical wantst, lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+/* .. Function Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGGES computes for a pair of N-by-N real nonsymmetric matrices (A,B), */
+/* the generalized eigenvalues, the generalized real Schur form (S,T), */
+/* optionally, the left and/or right matrices of Schur vectors (VSL and */
+/* VSR). This gives the generalized Schur factorization */
+
+/* (A,B) = ( (VSL)*S*(VSR)**T, (VSL)*T*(VSR)**T ) */
+
+/* Optionally, it also orders the eigenvalues so that a selected cluster */
+/* of eigenvalues appears in the leading diagonal blocks of the upper */
+/* quasi-triangular matrix S and the upper triangular matrix T.The */
+/* leading columns of VSL and VSR then form an orthonormal basis for the */
+/* corresponding left and right eigenspaces (deflating subspaces). */
+
+/* (If only the generalized eigenvalues are needed, use the driver */
+/* DGGEV instead, which is faster.) */
+
+/* A generalized eigenvalue for a pair of matrices (A,B) is a scalar w */
+/* or a ratio alpha/beta = w, such that A - w*B is singular. It is */
+/* usually represented as the pair (alpha,beta), as there is a */
+/* reasonable interpretation for beta=0 or both being zero. */
+
+/* A pair of matrices (S,T) is in generalized real Schur form if T is */
+/* upper triangular with non-negative diagonal and S is block upper */
+/* triangular with 1-by-1 and 2-by-2 blocks. 1-by-1 blocks correspond */
+/* to real generalized eigenvalues, while 2-by-2 blocks of S will be */
+/* "standardized" by making the corresponding elements of T have the */
+/* form: */
+/* [ a 0 ] */
+/* [ 0 b ] */
+
+/* and the pair of corresponding 2-by-2 blocks in S and T will have a */
+/* complex conjugate pair of generalized eigenvalues. */
+
+
+/* Arguments */
+/* ========= */
+
+/* JOBVSL (input) CHARACTER*1 */
+/* = 'N': do not compute the left Schur vectors; */
+/* = 'V': compute the left Schur vectors. */
+
+/* JOBVSR (input) CHARACTER*1 */
+/* = 'N': do not compute the right Schur vectors; */
+/* = 'V': compute the right Schur vectors. */
+
+/* SORT (input) CHARACTER*1 */
+/* Specifies whether or not to order the eigenvalues on the */
+/* diagonal of the generalized Schur form. */
+/* = 'N': Eigenvalues are not ordered; */
+/* = 'S': Eigenvalues are ordered (see SELCTG); */
+
+/* SELCTG (external procedure) LOGICAL FUNCTION of three DOUBLE PRECISION arguments */
+/* SELCTG must be declared EXTERNAL in the calling subroutine. */
+/* If SORT = 'N', SELCTG is not referenced. */
+/* If SORT = 'S', SELCTG is used to select eigenvalues to sort */
+/* to the top left of the Schur form. */
+/* An eigenvalue (ALPHAR(j)+ALPHAI(j))/BETA(j) is selected if */
+/* SELCTG(ALPHAR(j),ALPHAI(j),BETA(j)) is true; i.e. if either */
+/* one of a complex conjugate pair of eigenvalues is selected, */
+/* then both complex eigenvalues are selected. */
+
+/* Note that in the ill-conditioned case, a selected complex */
+/* eigenvalue may no longer satisfy SELCTG(ALPHAR(j),ALPHAI(j), */
+/* BETA(j)) = .TRUE. after ordering. INFO is to be set to N+2 */
+/* in this case. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A, B, VSL, and VSR. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA, N) */
+/* On entry, the first of the pair of matrices. */
+/* On exit, A has been overwritten by its generalized Schur */
+/* form S. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. LDA >= max(1,N). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB, N) */
+/* On entry, the second of the pair of matrices. */
+/* On exit, B has been overwritten by its generalized Schur */
+/* form T. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB >= max(1,N). */
+
+/* SDIM (output) INTEGER */
+/* If SORT = 'N', SDIM = 0. */
+/* If SORT = 'S', SDIM = number of eigenvalues (after sorting) */
+/* for which SELCTG is true. (Complex conjugate pairs for which */
+/* SELCTG is true for either eigenvalue count as 2.) */
+
+/* ALPHAR (output) DOUBLE PRECISION array, dimension (N) */
+/* ALPHAI (output) DOUBLE PRECISION array, dimension (N) */
+/* BETA (output) DOUBLE PRECISION array, dimension (N) */
+/* On exit, (ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N, will */
+/* be the generalized eigenvalues. ALPHAR(j) + ALPHAI(j)*i, */
+/* and BETA(j),j=1,...,N are the diagonals of the complex Schur */
+/* form (S,T) that would result if the 2-by-2 diagonal blocks of */
+/* the real Schur form of (A,B) were further reduced to */
+/* triangular form using 2-by-2 complex unitary transformations. */
+/* If ALPHAI(j) is zero, then the j-th eigenvalue is real; if */
+/* positive, then the j-th and (j+1)-st eigenvalues are a */
+/* complex conjugate pair, with ALPHAI(j+1) negative. */
+
+/* Note: the quotients ALPHAR(j)/BETA(j) and ALPHAI(j)/BETA(j) */
+/* may easily over- or underflow, and BETA(j) may even be zero. */
+/* Thus, the user should avoid naively computing the ratio. */
+/* However, ALPHAR and ALPHAI will be always less than and */
+/* usually comparable with norm(A) in magnitude, and BETA always */
+/* less than and usually comparable with norm(B). */
+
+/* VSL (output) DOUBLE PRECISION array, dimension (LDVSL,N) */
+/* If JOBVSL = 'V', VSL will contain the left Schur vectors. */
+/* Not referenced if JOBVSL = 'N'. */
+
+/* LDVSL (input) INTEGER */
+/* The leading dimension of the matrix VSL. LDVSL >=1, and */
+/* if JOBVSL = 'V', LDVSL >= N. */
+
+/* VSR (output) DOUBLE PRECISION array, dimension (LDVSR,N) */
+/* If JOBVSR = 'V', VSR will contain the right Schur vectors. */
+/* Not referenced if JOBVSR = 'N'. */
+
+/* LDVSR (input) INTEGER */
+/* The leading dimension of the matrix VSR. LDVSR >= 1, and */
+/* if JOBVSR = 'V', LDVSR >= N. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If N = 0, LWORK >= 1, else LWORK >= 8*N+16. */
+/* For good performance , LWORK must generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* BWORK (workspace) LOGICAL array, dimension (N) */
+/* Not referenced if SORT = 'N'. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1,...,N: */
+/* The QZ iteration failed. (A,B) are not in Schur */
+/* form, but ALPHAR(j), ALPHAI(j), and BETA(j) should */
+/* be correct for j=INFO+1,...,N. */
+/* > N: =N+1: other than QZ iteration failed in DHGEQZ. */
+/* =N+2: after reordering, roundoff changed values of */
+/* some complex eigenvalues so that leading */
+/* eigenvalues in the Generalized Schur form no */
+/* longer satisfy SELCTG=.TRUE. This could also */
+/* be caused due to scaling. */
+/* =N+3: reordering failed in DTGSEN. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alphar;
+ --alphai;
+ --beta;
+ vsl_dim1 = *ldvsl;
+ vsl_offset = 1 + vsl_dim1;
+ vsl -= vsl_offset;
+ vsr_dim1 = *ldvsr;
+ vsr_offset = 1 + vsr_dim1;
+ vsr -= vsr_offset;
+ --work;
+ --bwork;
+
+ /* Function Body */
+ if (lsame_(jobvsl, "N")) {
+ ijobvl = 1;
+ ilvsl = FALSE_;
+ } else if (lsame_(jobvsl, "V")) {
+ ijobvl = 2;
+ ilvsl = TRUE_;
+ } else {
+ ijobvl = -1;
+ ilvsl = FALSE_;
+ }
+
+ if (lsame_(jobvsr, "N")) {
+ ijobvr = 1;
+ ilvsr = FALSE_;
+ } else if (lsame_(jobvsr, "V")) {
+ ijobvr = 2;
+ ilvsr = TRUE_;
+ } else {
+ ijobvr = -1;
+ ilvsr = FALSE_;
+ }
+
+ wantst = lsame_(sort, "S");
+
+/* Test the input arguments */
+
+ *info = 0;
+ lquery = *lwork == -1;
+ if (ijobvl <= 0) {
+ *info = -1;
+ } else if (ijobvr <= 0) {
+ *info = -2;
+ } else if (! wantst && ! lsame_(sort, "N")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldvsl < 1 || ilvsl && *ldvsl < *n) {
+ *info = -15;
+ } else if (*ldvsr < 1 || ilvsr && *ldvsr < *n) {
+ *info = -17;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV.) */
+
+ if (*info == 0) {
+ if (*n > 0) {
+/* Computing MAX */
+ i__1 = *n << 3, i__2 = *n * 6 + 16;
+ minwrk = max(i__1,i__2);
+ maxwrk = minwrk - *n + *n * ilaenv_(&c__1, "DGEQRF", " ", n, &
+ c__1, n, &c__0);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = minwrk - *n + *n * ilaenv_(&c__1, "DORMQR",
+ " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ if (ilvsl) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = minwrk - *n + *n * ilaenv_(&c__1, "DOR"
+ "GQR", " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ }
+ } else {
+ minwrk = 1;
+ maxwrk = 1;
+ }
+ work[1] = (doublereal) maxwrk;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -19;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGGES ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *sdim = 0;
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("P");
+ safmin = dlamch_("S");
+ safmax = 1. / safmin;
+ dlabad_(&safmin, &safmax);
+ smlnum = sqrt(safmin) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = dlange_("M", n, n, &a[a_offset], lda, &work[1]);
+ ilascl = FALSE_;
+ if (anrm > 0. && anrm < smlnum) {
+ anrmto = smlnum;
+ ilascl = TRUE_;
+ } else if (anrm > bignum) {
+ anrmto = bignum;
+ ilascl = TRUE_;
+ }
+ if (ilascl) {
+ dlascl_("G", &c__0, &c__0, &anrm, &anrmto, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Scale B if max element outside range [SMLNUM,BIGNUM] */
+
+ bnrm = dlange_("M", n, n, &b[b_offset], ldb, &work[1]);
+ ilbscl = FALSE_;
+ if (bnrm > 0. && bnrm < smlnum) {
+ bnrmto = smlnum;
+ ilbscl = TRUE_;
+ } else if (bnrm > bignum) {
+ bnrmto = bignum;
+ ilbscl = TRUE_;
+ }
+ if (ilbscl) {
+ dlascl_("G", &c__0, &c__0, &bnrm, &bnrmto, n, n, &b[b_offset], ldb, &
+ ierr);
+ }
+
+/* Permute the matrix to make it more nearly triangular */
+/* (Workspace: need 6*N + 2*N space for storing balancing factors) */
+
+ ileft = 1;
+ iright = *n + 1;
+ iwrk = iright + *n;
+ dggbal_("P", n, &a[a_offset], lda, &b[b_offset], ldb, &ilo, &ihi, &work[
+ ileft], &work[iright], &work[iwrk], &ierr);
+
+/* Reduce B to triangular form (QR decomposition of B) */
+/* (Workspace: need N, prefer N*NB) */
+
+ irows = ihi + 1 - ilo;
+ icols = *n + 1 - ilo;
+ itau = iwrk;
+ iwrk = itau + irows;
+ i__1 = *lwork + 1 - iwrk;
+ dgeqrf_(&irows, &icols, &b[ilo + ilo * b_dim1], ldb, &work[itau], &work[
+ iwrk], &i__1, &ierr);
+
+/* Apply the orthogonal transformation to matrix A */
+/* (Workspace: need N, prefer N*NB) */
+
+ i__1 = *lwork + 1 - iwrk;
+ dormqr_("L", "T", &irows, &icols, &irows, &b[ilo + ilo * b_dim1], ldb, &
+ work[itau], &a[ilo + ilo * a_dim1], lda, &work[iwrk], &i__1, &
+ ierr);
+
+/* Initialize VSL */
+/* (Workspace: need N, prefer N*NB) */
+
+ if (ilvsl) {
+ dlaset_("Full", n, n, &c_b38, &c_b39, &vsl[vsl_offset], ldvsl);
+ if (irows > 1) {
+ i__1 = irows - 1;
+ i__2 = irows - 1;
+ dlacpy_("L", &i__1, &i__2, &b[ilo + 1 + ilo * b_dim1], ldb, &vsl[
+ ilo + 1 + ilo * vsl_dim1], ldvsl);
+ }
+ i__1 = *lwork + 1 - iwrk;
+ dorgqr_(&irows, &irows, &irows, &vsl[ilo + ilo * vsl_dim1], ldvsl, &
+ work[itau], &work[iwrk], &i__1, &ierr);
+ }
+
+/* Initialize VSR */
+
+ if (ilvsr) {
+ dlaset_("Full", n, n, &c_b38, &c_b39, &vsr[vsr_offset], ldvsr);
+ }
+
+/* Reduce to generalized Hessenberg form */
+/* (Workspace: none needed) */
+
+ dgghrd_(jobvsl, jobvsr, n, &ilo, &ihi, &a[a_offset], lda, &b[b_offset],
+ ldb, &vsl[vsl_offset], ldvsl, &vsr[vsr_offset], ldvsr, &ierr);
+
+/* Perform QZ algorithm, computing Schur vectors if desired */
+/* (Workspace: need N) */
+
+ iwrk = itau;
+ i__1 = *lwork + 1 - iwrk;
+ dhgeqz_("S", jobvsl, jobvsr, n, &ilo, &ihi, &a[a_offset], lda, &b[
+ b_offset], ldb, &alphar[1], &alphai[1], &beta[1], &vsl[vsl_offset]
+, ldvsl, &vsr[vsr_offset], ldvsr, &work[iwrk], &i__1, &ierr);
+ if (ierr != 0) {
+ if (ierr > 0 && ierr <= *n) {
+ *info = ierr;
+ } else if (ierr > *n && ierr <= *n << 1) {
+ *info = ierr - *n;
+ } else {
+ *info = *n + 1;
+ }
+ goto L50;
+ }
+
+/* Sort eigenvalues ALPHA/BETA if desired */
+/* (Workspace: need 4*N+16 ) */
+
+ *sdim = 0;
+ if (wantst) {
+
+/* Undo scaling on eigenvalues before SELCTGing */
+
+ if (ilascl) {
+ dlascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alphar[1],
+ n, &ierr);
+ dlascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alphai[1],
+ n, &ierr);
+ }
+ if (ilbscl) {
+ dlascl_("G", &c__0, &c__0, &bnrmto, &bnrm, n, &c__1, &beta[1], n,
+ &ierr);
+ }
+
+/* Select eigenvalues */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ bwork[i__] = (*selctg)(&alphar[i__], &alphai[i__], &beta[i__]);
+/* L10: */
+ }
+
+ i__1 = *lwork - iwrk + 1;
+ dtgsen_(&c__0, &ilvsl, &ilvsr, &bwork[1], n, &a[a_offset], lda, &b[
+ b_offset], ldb, &alphar[1], &alphai[1], &beta[1], &vsl[
+ vsl_offset], ldvsl, &vsr[vsr_offset], ldvsr, sdim, &pvsl, &
+ pvsr, dif, &work[iwrk], &i__1, idum, &c__1, &ierr);
+ if (ierr == 1) {
+ *info = *n + 3;
+ }
+
+ }
+
+/* Apply back-permutation to VSL and VSR */
+/* (Workspace: none needed) */
+
+ if (ilvsl) {
+ dggbak_("P", "L", n, &ilo, &ihi, &work[ileft], &work[iright], n, &vsl[
+ vsl_offset], ldvsl, &ierr);
+ }
+
+ if (ilvsr) {
+ dggbak_("P", "R", n, &ilo, &ihi, &work[ileft], &work[iright], n, &vsr[
+ vsr_offset], ldvsr, &ierr);
+ }
+
+/* Check if unscaling would cause over/underflow, if so, rescale */
+/* (ALPHAR(I),ALPHAI(I),BETA(I)) so BETA(I) is on the order of */
+/* B(I,I) and ALPHAR(I) and ALPHAI(I) are on the order of A(I,I) */
+
+ if (ilascl) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (alphai[i__] != 0.) {
+ if (alphar[i__] / safmax > anrmto / anrm || safmin / alphar[
+ i__] > anrm / anrmto) {
+ work[1] = (d__1 = a[i__ + i__ * a_dim1] / alphar[i__],
+ abs(d__1));
+ beta[i__] *= work[1];
+ alphar[i__] *= work[1];
+ alphai[i__] *= work[1];
+ } else if (alphai[i__] / safmax > anrmto / anrm || safmin /
+ alphai[i__] > anrm / anrmto) {
+ work[1] = (d__1 = a[i__ + (i__ + 1) * a_dim1] / alphai[
+ i__], abs(d__1));
+ beta[i__] *= work[1];
+ alphar[i__] *= work[1];
+ alphai[i__] *= work[1];
+ }
+ }
+/* L20: */
+ }
+ }
+
+ if (ilbscl) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (alphai[i__] != 0.) {
+ if (beta[i__] / safmax > bnrmto / bnrm || safmin / beta[i__]
+ > bnrm / bnrmto) {
+ work[1] = (d__1 = b[i__ + i__ * b_dim1] / beta[i__], abs(
+ d__1));
+ beta[i__] *= work[1];
+ alphar[i__] *= work[1];
+ alphai[i__] *= work[1];
+ }
+ }
+/* L30: */
+ }
+ }
+
+/* Undo scaling */
+
+ if (ilascl) {
+ dlascl_("H", &c__0, &c__0, &anrmto, &anrm, n, n, &a[a_offset], lda, &
+ ierr);
+ dlascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alphar[1], n, &
+ ierr);
+ dlascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alphai[1], n, &
+ ierr);
+ }
+
+ if (ilbscl) {
+ dlascl_("U", &c__0, &c__0, &bnrmto, &bnrm, n, n, &b[b_offset], ldb, &
+ ierr);
+ dlascl_("G", &c__0, &c__0, &bnrmto, &bnrm, n, &c__1, &beta[1], n, &
+ ierr);
+ }
+
+ if (wantst) {
+
+/* Check if reordering is correct */
+
+ lastsl = TRUE_;
+ lst2sl = TRUE_;
+ *sdim = 0;
+ ip = 0;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ cursl = (*selctg)(&alphar[i__], &alphai[i__], &beta[i__]);
+ if (alphai[i__] == 0.) {
+ if (cursl) {
+ ++(*sdim);
+ }
+ ip = 0;
+ if (cursl && ! lastsl) {
+ *info = *n + 2;
+ }
+ } else {
+ if (ip == 1) {
+
+/* Last eigenvalue of conjugate pair */
+
+ cursl = cursl || lastsl;
+ lastsl = cursl;
+ if (cursl) {
+ *sdim += 2;
+ }
+ ip = -1;
+ if (cursl && ! lst2sl) {
+ *info = *n + 2;
+ }
+ } else {
+
+/* First eigenvalue of conjugate pair */
+
+ ip = 1;
+ }
+ }
+ lst2sl = lastsl;
+ lastsl = cursl;
+/* L40: */
+ }
+
+ }
+
+L50:
+
+ work[1] = (doublereal) maxwrk;
+
+ return 0;
+
+/* End of DGGES */
+
+} /* dgges_ */
diff --git a/contrib/libs/clapack/dggesx.c b/contrib/libs/clapack/dggesx.c
new file mode 100644
index 0000000000..c0bd11fe2d
--- /dev/null
+++ b/contrib/libs/clapack/dggesx.c
@@ -0,0 +1,818 @@
+/* dggesx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+static doublereal c_b42 = 0.;
+static doublereal c_b43 = 1.;
+
+/* Subroutine */ int dggesx_(char *jobvsl, char *jobvsr, char *sort, L_fp
+ selctg, char *sense, integer *n, doublereal *a, integer *lda,
+ doublereal *b, integer *ldb, integer *sdim, doublereal *alphar,
+ doublereal *alphai, doublereal *beta, doublereal *vsl, integer *ldvsl,
+ doublereal *vsr, integer *ldvsr, doublereal *rconde, doublereal *
+ rcondv, doublereal *work, integer *lwork, integer *iwork, integer *
+ liwork, logical *bwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vsl_dim1, vsl_offset,
+ vsr_dim1, vsr_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, ip;
+ doublereal pl, pr, dif[2];
+ integer ihi, ilo;
+ doublereal eps;
+ integer ijob;
+ doublereal anrm, bnrm;
+ integer ierr, itau, iwrk, lwrk;
+ extern logical lsame_(char *, char *);
+ integer ileft, icols;
+ logical cursl, ilvsl, ilvsr;
+ integer irows;
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *), dggbak_(
+ char *, char *, integer *, integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *), dggbal_(char *, integer *, doublereal *, integer
+ *, doublereal *, integer *, integer *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *);
+ logical lst2sl;
+ extern doublereal dlamch_(char *), dlange_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *);
+ extern /* Subroutine */ int dgghrd_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *, integer *), dlascl_(char *, integer *, integer *, doublereal
+ *, doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *);
+ logical ilascl, ilbscl;
+ extern /* Subroutine */ int dgeqrf_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *),
+ dlacpy_(char *, integer *, integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ doublereal safmin;
+ extern /* Subroutine */ int dlaset_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *);
+ doublereal safmax;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ extern /* Subroutine */ int dhgeqz_(char *, char *, char *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ integer *);
+ integer ijobvl, iright;
+ extern /* Subroutine */ int dtgsen_(integer *, logical *, logical *,
+ logical *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *,
+ integer *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ijobvr;
+ logical wantsb;
+ integer liwmin;
+ logical wantse, lastsl;
+ doublereal anrmto, bnrmto;
+ extern /* Subroutine */ int dorgqr_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *);
+ integer minwrk, maxwrk;
+ logical wantsn;
+ doublereal smlnum;
+ extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ logical wantst, lquery, wantsv;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+/* .. Function Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGGESX computes for a pair of N-by-N real nonsymmetric matrices */
+/* (A,B), the generalized eigenvalues, the real Schur form (S,T), and, */
+/* optionally, the left and/or right matrices of Schur vectors (VSL and */
+/* VSR). This gives the generalized Schur factorization */
+
+/* (A,B) = ( (VSL) S (VSR)**T, (VSL) T (VSR)**T ) */
+
+/* Optionally, it also orders the eigenvalues so that a selected cluster */
+/* of eigenvalues appears in the leading diagonal blocks of the upper */
+/* quasi-triangular matrix S and the upper triangular matrix T; computes */
+/* a reciprocal condition number for the average of the selected */
+/* eigenvalues (RCONDE); and computes a reciprocal condition number for */
+/* the right and left deflating subspaces corresponding to the selected */
+/* eigenvalues (RCONDV). The leading columns of VSL and VSR then form */
+/* an orthonormal basis for the corresponding left and right eigenspaces */
+/* (deflating subspaces). */
+
+/* A generalized eigenvalue for a pair of matrices (A,B) is a scalar w */
+/* or a ratio alpha/beta = w, such that A - w*B is singular. It is */
+/* usually represented as the pair (alpha,beta), as there is a */
+/* reasonable interpretation for beta=0 or for both being zero. */
+
+/* A pair of matrices (S,T) is in generalized real Schur form if T is */
+/* upper triangular with non-negative diagonal and S is block upper */
+/* triangular with 1-by-1 and 2-by-2 blocks. 1-by-1 blocks correspond */
+/* to real generalized eigenvalues, while 2-by-2 blocks of S will be */
+/* "standardized" by making the corresponding elements of T have the */
+/* form: */
+/* [ a 0 ] */
+/* [ 0 b ] */
+
+/* and the pair of corresponding 2-by-2 blocks in S and T will have a */
+/* complex conjugate pair of generalized eigenvalues. */
+
+
+/* Arguments */
+/* ========= */
+
+/* JOBVSL (input) CHARACTER*1 */
+/* = 'N': do not compute the left Schur vectors; */
+/* = 'V': compute the left Schur vectors. */
+
+/* JOBVSR (input) CHARACTER*1 */
+/* = 'N': do not compute the right Schur vectors; */
+/* = 'V': compute the right Schur vectors. */
+
+/* SORT (input) CHARACTER*1 */
+/* Specifies whether or not to order the eigenvalues on the */
+/* diagonal of the generalized Schur form. */
+/* = 'N': Eigenvalues are not ordered; */
+/* = 'S': Eigenvalues are ordered (see SELCTG). */
+
+/* SELCTG (external procedure) LOGICAL FUNCTION of three DOUBLE PRECISION arguments */
+/* SELCTG must be declared EXTERNAL in the calling subroutine. */
+/* If SORT = 'N', SELCTG is not referenced. */
+/* If SORT = 'S', SELCTG is used to select eigenvalues to sort */
+/* to the top left of the Schur form. */
+/* An eigenvalue (ALPHAR(j)+ALPHAI(j))/BETA(j) is selected if */
+/* SELCTG(ALPHAR(j),ALPHAI(j),BETA(j)) is true; i.e. if either */
+/* one of a complex conjugate pair of eigenvalues is selected, */
+/* then both complex eigenvalues are selected. */
+/* Note that a selected complex eigenvalue may no longer satisfy */
+/* SELCTG(ALPHAR(j),ALPHAI(j),BETA(j)) = .TRUE. after ordering, */
+/* since ordering may change the value of complex eigenvalues */
+/* (especially if the eigenvalue is ill-conditioned), in this */
+/* case INFO is set to N+3. */
+
+/* SENSE (input) CHARACTER*1 */
+/* Determines which reciprocal condition numbers are computed. */
+/* = 'N' : None are computed; */
+/* = 'E' : Computed for average of selected eigenvalues only; */
+/* = 'V' : Computed for selected deflating subspaces only; */
+/* = 'B' : Computed for both. */
+/* If SENSE = 'E', 'V', or 'B', SORT must equal 'S'. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A, B, VSL, and VSR. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA, N) */
+/* On entry, the first of the pair of matrices. */
+/* On exit, A has been overwritten by its generalized Schur */
+/* form S. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. LDA >= max(1,N). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB, N) */
+/* On entry, the second of the pair of matrices. */
+/* On exit, B has been overwritten by its generalized Schur */
+/* form T. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB >= max(1,N). */
+
+/* SDIM (output) INTEGER */
+/* If SORT = 'N', SDIM = 0. */
+/* If SORT = 'S', SDIM = number of eigenvalues (after sorting) */
+/* for which SELCTG is true. (Complex conjugate pairs for which */
+/* SELCTG is true for either eigenvalue count as 2.) */
+
+/* ALPHAR (output) DOUBLE PRECISION array, dimension (N) */
+/* ALPHAI (output) DOUBLE PRECISION array, dimension (N) */
+/* BETA (output) DOUBLE PRECISION array, dimension (N) */
+/* On exit, (ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N, will */
+/* be the generalized eigenvalues. ALPHAR(j) + ALPHAI(j)*i */
+/* and BETA(j),j=1,...,N are the diagonals of the complex Schur */
+/* form (S,T) that would result if the 2-by-2 diagonal blocks of */
+/* the real Schur form of (A,B) were further reduced to */
+/* triangular form using 2-by-2 complex unitary transformations. */
+/* If ALPHAI(j) is zero, then the j-th eigenvalue is real; if */
+/* positive, then the j-th and (j+1)-st eigenvalues are a */
+/* complex conjugate pair, with ALPHAI(j+1) negative. */
+
+/* Note: the quotients ALPHAR(j)/BETA(j) and ALPHAI(j)/BETA(j) */
+/* may easily over- or underflow, and BETA(j) may even be zero. */
+/* Thus, the user should avoid naively computing the ratio. */
+/* However, ALPHAR and ALPHAI will be always less than and */
+/* usually comparable with norm(A) in magnitude, and BETA always */
+/* less than and usually comparable with norm(B). */
+
+/* VSL (output) DOUBLE PRECISION array, dimension (LDVSL,N) */
+/* If JOBVSL = 'V', VSL will contain the left Schur vectors. */
+/* Not referenced if JOBVSL = 'N'. */
+
+/* LDVSL (input) INTEGER */
+/* The leading dimension of the matrix VSL. LDVSL >=1, and */
+/* if JOBVSL = 'V', LDVSL >= N. */
+
+/* VSR (output) DOUBLE PRECISION array, dimension (LDVSR,N) */
+/* If JOBVSR = 'V', VSR will contain the right Schur vectors. */
+/* Not referenced if JOBVSR = 'N'. */
+
+/* LDVSR (input) INTEGER */
+/* The leading dimension of the matrix VSR. LDVSR >= 1, and */
+/* if JOBVSR = 'V', LDVSR >= N. */
+
+/* RCONDE (output) DOUBLE PRECISION array, dimension ( 2 ) */
+/* If SENSE = 'E' or 'B', RCONDE(1) and RCONDE(2) contain the */
+/* reciprocal condition numbers for the average of the selected */
+/* eigenvalues. */
+/* Not referenced if SENSE = 'N' or 'V'. */
+
+/* RCONDV (output) DOUBLE PRECISION array, dimension ( 2 ) */
+/* If SENSE = 'V' or 'B', RCONDV(1) and RCONDV(2) contain the */
+/* reciprocal condition numbers for the selected deflating */
+/* subspaces. */
+/* Not referenced if SENSE = 'N' or 'E'. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If N = 0, LWORK >= 1, else if SENSE = 'E', 'V', or 'B', */
+/* LWORK >= max( 8*N, 6*N+16, 2*SDIM*(N-SDIM) ), else */
+/* LWORK >= max( 8*N, 6*N+16 ). */
+/* Note that 2*SDIM*(N-SDIM) <= N*N/2. */
+/* Note also that an error is only returned if */
+/* LWORK < max( 8*N, 6*N+16), but if SENSE = 'E' or 'V' or 'B' */
+/* this may not be large enough. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the bound on the optimal size of the WORK */
+/* array and the minimum size of the IWORK array, returns these */
+/* values as the first entries of the WORK and IWORK arrays, and */
+/* no error message related to LWORK or LIWORK is issued by */
+/* XERBLA. */
+
+/* IWORK (workspace) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the minimum LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* If SENSE = 'N' or N = 0, LIWORK >= 1, otherwise */
+/* LIWORK >= N+6. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the bound on the optimal size of the */
+/* WORK array and the minimum size of the IWORK array, returns */
+/* these values as the first entries of the WORK and IWORK */
+/* arrays, and no error message related to LWORK or LIWORK is */
+/* issued by XERBLA. */
+
+/* BWORK (workspace) LOGICAL array, dimension (N) */
+/* Not referenced if SORT = 'N'. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1,...,N: */
+/* The QZ iteration failed. (A,B) are not in Schur */
+/* form, but ALPHAR(j), ALPHAI(j), and BETA(j) should */
+/* be correct for j=INFO+1,...,N. */
+/* > N: =N+1: other than QZ iteration failed in DHGEQZ */
+/* =N+2: after reordering, roundoff changed values of */
+/* some complex eigenvalues so that leading */
+/* eigenvalues in the Generalized Schur form no */
+/* longer satisfy SELCTG=.TRUE. This could also */
+/* be caused due to scaling. */
+/* =N+3: reordering failed in DTGSEN. */
+
+/* Further details */
+/* =============== */
+
+/* An approximate (asymptotic) bound on the average absolute error of */
+/* the selected eigenvalues is */
+
+/* EPS * norm((A, B)) / RCONDE( 1 ). */
+
+/* An approximate (asymptotic) bound on the maximum angular error in */
+/* the computed deflating subspaces is */
+
+/* EPS * norm((A, B)) / RCONDV( 2 ). */
+
+/* See LAPACK User's Guide, section 4.11 for more information. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alphar;
+ --alphai;
+ --beta;
+ vsl_dim1 = *ldvsl;
+ vsl_offset = 1 + vsl_dim1;
+ vsl -= vsl_offset;
+ vsr_dim1 = *ldvsr;
+ vsr_offset = 1 + vsr_dim1;
+ vsr -= vsr_offset;
+ --rconde;
+ --rcondv;
+ --work;
+ --iwork;
+ --bwork;
+
+ /* Function Body */
+ if (lsame_(jobvsl, "N")) {
+ ijobvl = 1;
+ ilvsl = FALSE_;
+ } else if (lsame_(jobvsl, "V")) {
+ ijobvl = 2;
+ ilvsl = TRUE_;
+ } else {
+ ijobvl = -1;
+ ilvsl = FALSE_;
+ }
+
+ if (lsame_(jobvsr, "N")) {
+ ijobvr = 1;
+ ilvsr = FALSE_;
+ } else if (lsame_(jobvsr, "V")) {
+ ijobvr = 2;
+ ilvsr = TRUE_;
+ } else {
+ ijobvr = -1;
+ ilvsr = FALSE_;
+ }
+
+ wantst = lsame_(sort, "S");
+ wantsn = lsame_(sense, "N");
+ wantse = lsame_(sense, "E");
+ wantsv = lsame_(sense, "V");
+ wantsb = lsame_(sense, "B");
+ lquery = *lwork == -1 || *liwork == -1;
+ if (wantsn) {
+ ijob = 0;
+ } else if (wantse) {
+ ijob = 1;
+ } else if (wantsv) {
+ ijob = 2;
+ } else if (wantsb) {
+ ijob = 4;
+ }
+
+/* Test the input arguments */
+
+ *info = 0;
+ if (ijobvl <= 0) {
+ *info = -1;
+ } else if (ijobvr <= 0) {
+ *info = -2;
+ } else if (! wantst && ! lsame_(sort, "N")) {
+ *info = -3;
+ } else if (! (wantsn || wantse || wantsv || wantsb) || ! wantst && !
+ wantsn) {
+ *info = -5;
+ } else if (*n < 0) {
+ *info = -6;
+ } else if (*lda < max(1,*n)) {
+ *info = -8;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldvsl < 1 || ilvsl && *ldvsl < *n) {
+ *info = -16;
+ } else if (*ldvsr < 1 || ilvsr && *ldvsr < *n) {
+ *info = -18;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV.) */
+
+ if (*info == 0) {
+ if (*n > 0) {
+/* Computing MAX */
+ i__1 = *n << 3, i__2 = *n * 6 + 16;
+ minwrk = max(i__1,i__2);
+ maxwrk = minwrk - *n + *n * ilaenv_(&c__1, "DGEQRF", " ", n, &
+ c__1, n, &c__0);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = minwrk - *n + *n * ilaenv_(&c__1, "DORMQR",
+ " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ if (ilvsl) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = minwrk - *n + *n * ilaenv_(&c__1, "DOR"
+ "GQR", " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ }
+ lwrk = maxwrk;
+ if (ijob >= 1) {
+/* Computing MAX */
+ i__1 = lwrk, i__2 = *n * *n / 2;
+ lwrk = max(i__1,i__2);
+ }
+ } else {
+ minwrk = 1;
+ maxwrk = 1;
+ lwrk = 1;
+ }
+ work[1] = (doublereal) lwrk;
+ if (wantsn || *n == 0) {
+ liwmin = 1;
+ } else {
+ liwmin = *n + 6;
+ }
+ iwork[1] = liwmin;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -22;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -24;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGGESX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *sdim = 0;
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("P");
+ safmin = dlamch_("S");
+ safmax = 1. / safmin;
+ dlabad_(&safmin, &safmax);
+ smlnum = sqrt(safmin) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = dlange_("M", n, n, &a[a_offset], lda, &work[1]);
+ ilascl = FALSE_;
+ if (anrm > 0. && anrm < smlnum) {
+ anrmto = smlnum;
+ ilascl = TRUE_;
+ } else if (anrm > bignum) {
+ anrmto = bignum;
+ ilascl = TRUE_;
+ }
+ if (ilascl) {
+ dlascl_("G", &c__0, &c__0, &anrm, &anrmto, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Scale B if max element outside range [SMLNUM,BIGNUM] */
+
+ bnrm = dlange_("M", n, n, &b[b_offset], ldb, &work[1]);
+ ilbscl = FALSE_;
+ if (bnrm > 0. && bnrm < smlnum) {
+ bnrmto = smlnum;
+ ilbscl = TRUE_;
+ } else if (bnrm > bignum) {
+ bnrmto = bignum;
+ ilbscl = TRUE_;
+ }
+ if (ilbscl) {
+ dlascl_("G", &c__0, &c__0, &bnrm, &bnrmto, n, n, &b[b_offset], ldb, &
+ ierr);
+ }
+
+/* Permute the matrix to make it more nearly triangular */
+/* (Workspace: need 6*N + 2*N for permutation parameters) */
+
+ ileft = 1;
+ iright = *n + 1;
+ iwrk = iright + *n;
+ dggbal_("P", n, &a[a_offset], lda, &b[b_offset], ldb, &ilo, &ihi, &work[
+ ileft], &work[iright], &work[iwrk], &ierr);
+
+/* Reduce B to triangular form (QR decomposition of B) */
+/* (Workspace: need N, prefer N*NB) */
+
+ irows = ihi + 1 - ilo;
+ icols = *n + 1 - ilo;
+ itau = iwrk;
+ iwrk = itau + irows;
+ i__1 = *lwork + 1 - iwrk;
+ dgeqrf_(&irows, &icols, &b[ilo + ilo * b_dim1], ldb, &work[itau], &work[
+ iwrk], &i__1, &ierr);
+
+/* Apply the orthogonal transformation to matrix A */
+/* (Workspace: need N, prefer N*NB) */
+
+ i__1 = *lwork + 1 - iwrk;
+ dormqr_("L", "T", &irows, &icols, &irows, &b[ilo + ilo * b_dim1], ldb, &
+ work[itau], &a[ilo + ilo * a_dim1], lda, &work[iwrk], &i__1, &
+ ierr);
+
+/* Initialize VSL */
+/* (Workspace: need N, prefer N*NB) */
+
+ if (ilvsl) {
+ dlaset_("Full", n, n, &c_b42, &c_b43, &vsl[vsl_offset], ldvsl);
+ if (irows > 1) {
+ i__1 = irows - 1;
+ i__2 = irows - 1;
+ dlacpy_("L", &i__1, &i__2, &b[ilo + 1 + ilo * b_dim1], ldb, &vsl[
+ ilo + 1 + ilo * vsl_dim1], ldvsl);
+ }
+ i__1 = *lwork + 1 - iwrk;
+ dorgqr_(&irows, &irows, &irows, &vsl[ilo + ilo * vsl_dim1], ldvsl, &
+ work[itau], &work[iwrk], &i__1, &ierr);
+ }
+
+/* Initialize VSR */
+
+ if (ilvsr) {
+ dlaset_("Full", n, n, &c_b42, &c_b43, &vsr[vsr_offset], ldvsr);
+ }
+
+/* Reduce to generalized Hessenberg form */
+/* (Workspace: none needed) */
+
+ dgghrd_(jobvsl, jobvsr, n, &ilo, &ihi, &a[a_offset], lda, &b[b_offset],
+ ldb, &vsl[vsl_offset], ldvsl, &vsr[vsr_offset], ldvsr, &ierr);
+
+ *sdim = 0;
+
+/* Perform QZ algorithm, computing Schur vectors if desired */
+/* (Workspace: need N) */
+
+ iwrk = itau;
+ i__1 = *lwork + 1 - iwrk;
+ dhgeqz_("S", jobvsl, jobvsr, n, &ilo, &ihi, &a[a_offset], lda, &b[
+ b_offset], ldb, &alphar[1], &alphai[1], &beta[1], &vsl[vsl_offset]
+, ldvsl, &vsr[vsr_offset], ldvsr, &work[iwrk], &i__1, &ierr);
+ if (ierr != 0) {
+ if (ierr > 0 && ierr <= *n) {
+ *info = ierr;
+ } else if (ierr > *n && ierr <= *n << 1) {
+ *info = ierr - *n;
+ } else {
+ *info = *n + 1;
+ }
+ goto L60;
+ }
+
+/* Sort eigenvalues ALPHA/BETA and compute the reciprocal of */
+/* condition number(s) */
+/* (Workspace: If IJOB >= 1, need MAX( 8*(N+1), 2*SDIM*(N-SDIM) ) */
+/* otherwise, need 8*(N+1) ) */
+
+ if (wantst) {
+
+/* Undo scaling on eigenvalues before SELCTGing */
+
+ if (ilascl) {
+ dlascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alphar[1],
+ n, &ierr);
+ dlascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alphai[1],
+ n, &ierr);
+ }
+ if (ilbscl) {
+ dlascl_("G", &c__0, &c__0, &bnrmto, &bnrm, n, &c__1, &beta[1], n,
+ &ierr);
+ }
+
+/* Select eigenvalues */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ bwork[i__] = (*selctg)(&alphar[i__], &alphai[i__], &beta[i__]);
+/* L10: */
+ }
+
+/* Reorder eigenvalues, transform Generalized Schur vectors, and */
+/* compute reciprocal condition numbers */
+
+ i__1 = *lwork - iwrk + 1;
+ dtgsen_(&ijob, &ilvsl, &ilvsr, &bwork[1], n, &a[a_offset], lda, &b[
+ b_offset], ldb, &alphar[1], &alphai[1], &beta[1], &vsl[
+ vsl_offset], ldvsl, &vsr[vsr_offset], ldvsr, sdim, &pl, &pr,
+ dif, &work[iwrk], &i__1, &iwork[1], liwork, &ierr);
+
+ if (ijob >= 1) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*sdim << 1) * (*n - *sdim);
+ maxwrk = max(i__1,i__2);
+ }
+ if (ierr == -22) {
+
+/* not enough real workspace */
+
+ *info = -22;
+ } else {
+ if (ijob == 1 || ijob == 4) {
+ rconde[1] = pl;
+ rconde[2] = pr;
+ }
+ if (ijob == 2 || ijob == 4) {
+ rcondv[1] = dif[0];
+ rcondv[2] = dif[1];
+ }
+ if (ierr == 1) {
+ *info = *n + 3;
+ }
+ }
+
+ }
+
+/* Apply permutation to VSL and VSR */
+/* (Workspace: none needed) */
+
+ if (ilvsl) {
+ dggbak_("P", "L", n, &ilo, &ihi, &work[ileft], &work[iright], n, &vsl[
+ vsl_offset], ldvsl, &ierr);
+ }
+
+ if (ilvsr) {
+ dggbak_("P", "R", n, &ilo, &ihi, &work[ileft], &work[iright], n, &vsr[
+ vsr_offset], ldvsr, &ierr);
+ }
+
+/* Check if unscaling would cause over/underflow, if so, rescale */
+/* (ALPHAR(I),ALPHAI(I),BETA(I)) so BETA(I) is on the order of */
+/* B(I,I) and ALPHAR(I) and ALPHAI(I) are on the order of A(I,I) */
+
+ if (ilascl) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (alphai[i__] != 0.) {
+ if (alphar[i__] / safmax > anrmto / anrm || safmin / alphar[
+ i__] > anrm / anrmto) {
+ work[1] = (d__1 = a[i__ + i__ * a_dim1] / alphar[i__],
+ abs(d__1));
+ beta[i__] *= work[1];
+ alphar[i__] *= work[1];
+ alphai[i__] *= work[1];
+ } else if (alphai[i__] / safmax > anrmto / anrm || safmin /
+ alphai[i__] > anrm / anrmto) {
+ work[1] = (d__1 = a[i__ + (i__ + 1) * a_dim1] / alphai[
+ i__], abs(d__1));
+ beta[i__] *= work[1];
+ alphar[i__] *= work[1];
+ alphai[i__] *= work[1];
+ }
+ }
+/* L20: */
+ }
+ }
+
+ if (ilbscl) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (alphai[i__] != 0.) {
+ if (beta[i__] / safmax > bnrmto / bnrm || safmin / beta[i__]
+ > bnrm / bnrmto) {
+ work[1] = (d__1 = b[i__ + i__ * b_dim1] / beta[i__], abs(
+ d__1));
+ beta[i__] *= work[1];
+ alphar[i__] *= work[1];
+ alphai[i__] *= work[1];
+ }
+ }
+/* L30: */
+ }
+ }
+
+/* Undo scaling */
+
+ if (ilascl) {
+ dlascl_("H", &c__0, &c__0, &anrmto, &anrm, n, n, &a[a_offset], lda, &
+ ierr);
+ dlascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alphar[1], n, &
+ ierr);
+ dlascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alphai[1], n, &
+ ierr);
+ }
+
+ if (ilbscl) {
+ dlascl_("U", &c__0, &c__0, &bnrmto, &bnrm, n, n, &b[b_offset], ldb, &
+ ierr);
+ dlascl_("G", &c__0, &c__0, &bnrmto, &bnrm, n, &c__1, &beta[1], n, &
+ ierr);
+ }
+
+ if (wantst) {
+
+/* Check if reordering is correct */
+
+ lastsl = TRUE_;
+ lst2sl = TRUE_;
+ *sdim = 0;
+ ip = 0;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ cursl = (*selctg)(&alphar[i__], &alphai[i__], &beta[i__]);
+ if (alphai[i__] == 0.) {
+ if (cursl) {
+ ++(*sdim);
+ }
+ ip = 0;
+ if (cursl && ! lastsl) {
+ *info = *n + 2;
+ }
+ } else {
+ if (ip == 1) {
+
+/* Last eigenvalue of conjugate pair */
+
+ cursl = cursl || lastsl;
+ lastsl = cursl;
+ if (cursl) {
+ *sdim += 2;
+ }
+ ip = -1;
+ if (cursl && ! lst2sl) {
+ *info = *n + 2;
+ }
+ } else {
+
+/* First eigenvalue of conjugate pair */
+
+ ip = 1;
+ }
+ }
+ lst2sl = lastsl;
+ lastsl = cursl;
+/* L50: */
+ }
+
+ }
+
+L60:
+
+ work[1] = (doublereal) maxwrk;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of DGGESX */
+
+} /* dggesx_ */
diff --git a/contrib/libs/clapack/dggev.c b/contrib/libs/clapack/dggev.c
new file mode 100644
index 0000000000..4b47f8b03f
--- /dev/null
+++ b/contrib/libs/clapack/dggev.c
@@ -0,0 +1,641 @@
+/* dggev.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+static doublereal c_b36 = 0.;
+static doublereal c_b37 = 1.;
+
+/* Subroutine */ int dggev_(char *jobvl, char *jobvr, integer *n, doublereal *
+ a, integer *lda, doublereal *b, integer *ldb, doublereal *alphar,
+ doublereal *alphai, doublereal *beta, doublereal *vl, integer *ldvl,
+ doublereal *vr, integer *ldvr, doublereal *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vl_dim1, vl_offset, vr_dim1,
+ vr_offset, i__1, i__2;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer jc, in, jr, ihi, ilo;
+ doublereal eps;
+ logical ilv;
+ doublereal anrm, bnrm;
+ integer ierr, itau;
+ doublereal temp;
+ logical ilvl, ilvr;
+ integer iwrk;
+ extern logical lsame_(char *, char *);
+ integer ileft, icols, irows;
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *), dggbak_(
+ char *, char *, integer *, integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *), dggbal_(char *, integer *, doublereal *, integer
+ *, doublereal *, integer *, integer *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *);
+ extern doublereal dlamch_(char *), dlange_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *);
+ extern /* Subroutine */ int dgghrd_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *, integer *), dlascl_(char *, integer *, integer *, doublereal
+ *, doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *);
+ logical ilascl, ilbscl;
+ extern /* Subroutine */ int dgeqrf_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *),
+ dlacpy_(char *, integer *, integer *, doublereal *, integer *,
+ doublereal *, integer *), dlaset_(char *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *), dtgevc_(char *, char *, logical *, integer *, doublereal
+ *, integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, integer *, integer *, doublereal *,
+ integer *);
+ logical ldumma[1];
+ char chtemp[1];
+ doublereal bignum;
+ extern /* Subroutine */ int dhgeqz_(char *, char *, char *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ijobvl, iright, ijobvr;
+ extern /* Subroutine */ int dorgqr_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *);
+ doublereal anrmto, bnrmto;
+ extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ integer minwrk, maxwrk;
+ doublereal smlnum;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGGEV computes for a pair of N-by-N real nonsymmetric matrices (A,B) */
+/* the generalized eigenvalues, and optionally, the left and/or right */
+/* generalized eigenvectors. */
+
+/* A generalized eigenvalue for a pair of matrices (A,B) is a scalar */
+/* lambda or a ratio alpha/beta = lambda, such that A - lambda*B is */
+/* singular. It is usually represented as the pair (alpha,beta), as */
+/* there is a reasonable interpretation for beta=0, and even for both */
+/* being zero. */
+
+/* The right eigenvector v(j) corresponding to the eigenvalue lambda(j) */
+/* of (A,B) satisfies */
+
+/* A * v(j) = lambda(j) * B * v(j). */
+
+/* The left eigenvector u(j) corresponding to the eigenvalue lambda(j) */
+/* of (A,B) satisfies */
+
+/* u(j)**H * A = lambda(j) * u(j)**H * B . */
+
+/* where u(j)**H is the conjugate-transpose of u(j). */
+
+
+/* Arguments */
+/* ========= */
+
+/* JOBVL (input) CHARACTER*1 */
+/* = 'N': do not compute the left generalized eigenvectors; */
+/* = 'V': compute the left generalized eigenvectors. */
+
+/* JOBVR (input) CHARACTER*1 */
+/* = 'N': do not compute the right generalized eigenvectors; */
+/* = 'V': compute the right generalized eigenvectors. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A, B, VL, and VR. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA, N) */
+/* On entry, the matrix A in the pair (A,B). */
+/* On exit, A has been overwritten. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. LDA >= max(1,N). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB, N) */
+/* On entry, the matrix B in the pair (A,B). */
+/* On exit, B has been overwritten. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB >= max(1,N). */
+
+/* ALPHAR (output) DOUBLE PRECISION array, dimension (N) */
+/* ALPHAI (output) DOUBLE PRECISION array, dimension (N) */
+/* BETA (output) DOUBLE PRECISION array, dimension (N) */
+/* On exit, (ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N, will */
+/* be the generalized eigenvalues. If ALPHAI(j) is zero, then */
+/* the j-th eigenvalue is real; if positive, then the j-th and */
+/* (j+1)-st eigenvalues are a complex conjugate pair, with */
+/* ALPHAI(j+1) negative. */
+
+/* Note: the quotients ALPHAR(j)/BETA(j) and ALPHAI(j)/BETA(j) */
+/* may easily over- or underflow, and BETA(j) may even be zero. */
+/* Thus, the user should avoid naively computing the ratio */
+/* alpha/beta. However, ALPHAR and ALPHAI will be always less */
+/* than and usually comparable with norm(A) in magnitude, and */
+/* BETA always less than and usually comparable with norm(B). */
+
+/* VL (output) DOUBLE PRECISION array, dimension (LDVL,N) */
+/* If JOBVL = 'V', the left eigenvectors u(j) are stored one */
+/* after another in the columns of VL, in the same order as */
+/* their eigenvalues. If the j-th eigenvalue is real, then */
+/* u(j) = VL(:,j), the j-th column of VL. If the j-th and */
+/* (j+1)-th eigenvalues form a complex conjugate pair, then */
+/* u(j) = VL(:,j)+i*VL(:,j+1) and u(j+1) = VL(:,j)-i*VL(:,j+1). */
+/* Each eigenvector is scaled so the largest component has */
+/* abs(real part)+abs(imag. part)=1. */
+/* Not referenced if JOBVL = 'N'. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the matrix VL. LDVL >= 1, and */
+/* if JOBVL = 'V', LDVL >= N. */
+
+/* VR (output) DOUBLE PRECISION array, dimension (LDVR,N) */
+/* If JOBVR = 'V', the right eigenvectors v(j) are stored one */
+/* after another in the columns of VR, in the same order as */
+/* their eigenvalues. If the j-th eigenvalue is real, then */
+/* v(j) = VR(:,j), the j-th column of VR. If the j-th and */
+/* (j+1)-th eigenvalues form a complex conjugate pair, then */
+/* v(j) = VR(:,j)+i*VR(:,j+1) and v(j+1) = VR(:,j)-i*VR(:,j+1). */
+/* Each eigenvector is scaled so the largest component has */
+/* abs(real part)+abs(imag. part)=1. */
+/* Not referenced if JOBVR = 'N'. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the matrix VR. LDVR >= 1, and */
+/* if JOBVR = 'V', LDVR >= N. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,8*N). */
+/* For good performance, LWORK must generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1,...,N: */
+/* The QZ iteration failed. No eigenvectors have been */
+/* calculated, but ALPHAR(j), ALPHAI(j), and BETA(j) */
+/* should be correct for j=INFO+1,...,N. */
+/* > N: =N+1: other than QZ iteration failed in DHGEQZ. */
+/* =N+2: error return from DTGEVC. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alphar;
+ --alphai;
+ --beta;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+
+ /* Function Body */
+ if (lsame_(jobvl, "N")) {
+ ijobvl = 1;
+ ilvl = FALSE_;
+ } else if (lsame_(jobvl, "V")) {
+ ijobvl = 2;
+ ilvl = TRUE_;
+ } else {
+ ijobvl = -1;
+ ilvl = FALSE_;
+ }
+
+ if (lsame_(jobvr, "N")) {
+ ijobvr = 1;
+ ilvr = FALSE_;
+ } else if (lsame_(jobvr, "V")) {
+ ijobvr = 2;
+ ilvr = TRUE_;
+ } else {
+ ijobvr = -1;
+ ilvr = FALSE_;
+ }
+ ilv = ilvl || ilvr;
+
+/* Test the input arguments */
+
+ *info = 0;
+ lquery = *lwork == -1;
+ if (ijobvl <= 0) {
+ *info = -1;
+ } else if (ijobvr <= 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ } else if (*ldvl < 1 || ilvl && *ldvl < *n) {
+ *info = -12;
+ } else if (*ldvr < 1 || ilvr && *ldvr < *n) {
+ *info = -14;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV. The workspace is */
+/* computed assuming ILO = 1 and IHI = N, the worst case.) */
+
+ if (*info == 0) {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n << 3;
+ minwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * (ilaenv_(&c__1, "DGEQRF", " ", n, &c__1, n, &
+ c__0) + 7);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * (ilaenv_(&c__1, "DORMQR", " ", n, &c__1, n,
+ &c__0) + 7);
+ maxwrk = max(i__1,i__2);
+ if (ilvl) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * (ilaenv_(&c__1, "DORGQR", " ", n, &
+ c__1, n, &c_n1) + 7);
+ maxwrk = max(i__1,i__2);
+ }
+ work[1] = (doublereal) maxwrk;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -16;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGGEV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("P");
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = dlange_("M", n, n, &a[a_offset], lda, &work[1]);
+ ilascl = FALSE_;
+ if (anrm > 0. && anrm < smlnum) {
+ anrmto = smlnum;
+ ilascl = TRUE_;
+ } else if (anrm > bignum) {
+ anrmto = bignum;
+ ilascl = TRUE_;
+ }
+ if (ilascl) {
+ dlascl_("G", &c__0, &c__0, &anrm, &anrmto, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Scale B if max element outside range [SMLNUM,BIGNUM] */
+
+ bnrm = dlange_("M", n, n, &b[b_offset], ldb, &work[1]);
+ ilbscl = FALSE_;
+ if (bnrm > 0. && bnrm < smlnum) {
+ bnrmto = smlnum;
+ ilbscl = TRUE_;
+ } else if (bnrm > bignum) {
+ bnrmto = bignum;
+ ilbscl = TRUE_;
+ }
+ if (ilbscl) {
+ dlascl_("G", &c__0, &c__0, &bnrm, &bnrmto, n, n, &b[b_offset], ldb, &
+ ierr);
+ }
+
+/* Permute the matrices A, B to isolate eigenvalues if possible */
+/* (Workspace: need 6*N) */
+
+ ileft = 1;
+ iright = *n + 1;
+ iwrk = iright + *n;
+ dggbal_("P", n, &a[a_offset], lda, &b[b_offset], ldb, &ilo, &ihi, &work[
+ ileft], &work[iright], &work[iwrk], &ierr);
+
+/* Reduce B to triangular form (QR decomposition of B) */
+/* (Workspace: need N, prefer N*NB) */
+
+ irows = ihi + 1 - ilo;
+ if (ilv) {
+ icols = *n + 1 - ilo;
+ } else {
+ icols = irows;
+ }
+ itau = iwrk;
+ iwrk = itau + irows;
+ i__1 = *lwork + 1 - iwrk;
+ dgeqrf_(&irows, &icols, &b[ilo + ilo * b_dim1], ldb, &work[itau], &work[
+ iwrk], &i__1, &ierr);
+
+/* Apply the orthogonal transformation to matrix A */
+/* (Workspace: need N, prefer N*NB) */
+
+ i__1 = *lwork + 1 - iwrk;
+ dormqr_("L", "T", &irows, &icols, &irows, &b[ilo + ilo * b_dim1], ldb, &
+ work[itau], &a[ilo + ilo * a_dim1], lda, &work[iwrk], &i__1, &
+ ierr);
+
+/* Initialize VL */
+/* (Workspace: need N, prefer N*NB) */
+
+ if (ilvl) {
+ dlaset_("Full", n, n, &c_b36, &c_b37, &vl[vl_offset], ldvl)
+ ;
+ if (irows > 1) {
+ i__1 = irows - 1;
+ i__2 = irows - 1;
+ dlacpy_("L", &i__1, &i__2, &b[ilo + 1 + ilo * b_dim1], ldb, &vl[
+ ilo + 1 + ilo * vl_dim1], ldvl);
+ }
+ i__1 = *lwork + 1 - iwrk;
+ dorgqr_(&irows, &irows, &irows, &vl[ilo + ilo * vl_dim1], ldvl, &work[
+ itau], &work[iwrk], &i__1, &ierr);
+ }
+
+/* Initialize VR */
+
+ if (ilvr) {
+ dlaset_("Full", n, n, &c_b36, &c_b37, &vr[vr_offset], ldvr)
+ ;
+ }
+
+/* Reduce to generalized Hessenberg form */
+/* (Workspace: none needed) */
+
+ if (ilv) {
+
+/* Eigenvectors requested -- work on whole matrix. */
+
+ dgghrd_(jobvl, jobvr, n, &ilo, &ihi, &a[a_offset], lda, &b[b_offset],
+ ldb, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, &ierr);
+ } else {
+ dgghrd_("N", "N", &irows, &c__1, &irows, &a[ilo + ilo * a_dim1], lda,
+ &b[ilo + ilo * b_dim1], ldb, &vl[vl_offset], ldvl, &vr[
+ vr_offset], ldvr, &ierr);
+ }
+
+/* Perform QZ algorithm (Compute eigenvalues, and optionally, the */
+/* Schur forms and Schur vectors) */
+/* (Workspace: need N) */
+
+ iwrk = itau;
+ if (ilv) {
+ *(unsigned char *)chtemp = 'S';
+ } else {
+ *(unsigned char *)chtemp = 'E';
+ }
+ i__1 = *lwork + 1 - iwrk;
+ dhgeqz_(chtemp, jobvl, jobvr, n, &ilo, &ihi, &a[a_offset], lda, &b[
+ b_offset], ldb, &alphar[1], &alphai[1], &beta[1], &vl[vl_offset],
+ ldvl, &vr[vr_offset], ldvr, &work[iwrk], &i__1, &ierr);
+ if (ierr != 0) {
+ if (ierr > 0 && ierr <= *n) {
+ *info = ierr;
+ } else if (ierr > *n && ierr <= *n << 1) {
+ *info = ierr - *n;
+ } else {
+ *info = *n + 1;
+ }
+ goto L110;
+ }
+
+/* Compute Eigenvectors */
+/* (Workspace: need 6*N) */
+
+ if (ilv) {
+ if (ilvl) {
+ if (ilvr) {
+ *(unsigned char *)chtemp = 'B';
+ } else {
+ *(unsigned char *)chtemp = 'L';
+ }
+ } else {
+ *(unsigned char *)chtemp = 'R';
+ }
+ dtgevc_(chtemp, "B", ldumma, n, &a[a_offset], lda, &b[b_offset], ldb,
+ &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, n, &in, &work[
+ iwrk], &ierr);
+ if (ierr != 0) {
+ *info = *n + 2;
+ goto L110;
+ }
+
+/* Undo balancing on VL and VR and normalization */
+/* (Workspace: none needed) */
+
+ if (ilvl) {
+ dggbak_("P", "L", n, &ilo, &ihi, &work[ileft], &work[iright], n, &
+ vl[vl_offset], ldvl, &ierr);
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ if (alphai[jc] < 0.) {
+ goto L50;
+ }
+ temp = 0.;
+ if (alphai[jc] == 0.) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ d__2 = temp, d__3 = (d__1 = vl[jr + jc * vl_dim1],
+ abs(d__1));
+ temp = max(d__2,d__3);
+/* L10: */
+ }
+ } else {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ d__3 = temp, d__4 = (d__1 = vl[jr + jc * vl_dim1],
+ abs(d__1)) + (d__2 = vl[jr + (jc + 1) *
+ vl_dim1], abs(d__2));
+ temp = max(d__3,d__4);
+/* L20: */
+ }
+ }
+ if (temp < smlnum) {
+ goto L50;
+ }
+ temp = 1. / temp;
+ if (alphai[jc] == 0.) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vl[jr + jc * vl_dim1] *= temp;
+/* L30: */
+ }
+ } else {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vl[jr + jc * vl_dim1] *= temp;
+ vl[jr + (jc + 1) * vl_dim1] *= temp;
+/* L40: */
+ }
+ }
+L50:
+ ;
+ }
+ }
+ if (ilvr) {
+ dggbak_("P", "R", n, &ilo, &ihi, &work[ileft], &work[iright], n, &
+ vr[vr_offset], ldvr, &ierr);
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ if (alphai[jc] < 0.) {
+ goto L100;
+ }
+ temp = 0.;
+ if (alphai[jc] == 0.) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ d__2 = temp, d__3 = (d__1 = vr[jr + jc * vr_dim1],
+ abs(d__1));
+ temp = max(d__2,d__3);
+/* L60: */
+ }
+ } else {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ d__3 = temp, d__4 = (d__1 = vr[jr + jc * vr_dim1],
+ abs(d__1)) + (d__2 = vr[jr + (jc + 1) *
+ vr_dim1], abs(d__2));
+ temp = max(d__3,d__4);
+/* L70: */
+ }
+ }
+ if (temp < smlnum) {
+ goto L100;
+ }
+ temp = 1. / temp;
+ if (alphai[jc] == 0.) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vr[jr + jc * vr_dim1] *= temp;
+/* L80: */
+ }
+ } else {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vr[jr + jc * vr_dim1] *= temp;
+ vr[jr + (jc + 1) * vr_dim1] *= temp;
+/* L90: */
+ }
+ }
+L100:
+ ;
+ }
+ }
+
+/* End of eigenvector calculation */
+
+ }
+
+/* Undo scaling if necessary */
+
+ if (ilascl) {
+ dlascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alphar[1], n, &
+ ierr);
+ dlascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alphai[1], n, &
+ ierr);
+ }
+
+ if (ilbscl) {
+ dlascl_("G", &c__0, &c__0, &bnrmto, &bnrm, n, &c__1, &beta[1], n, &
+ ierr);
+ }
+
+L110:
+
+ work[1] = (doublereal) maxwrk;
+
+ return 0;
+
+/* End of DGGEV */
+
+} /* dggev_ */
diff --git a/contrib/libs/clapack/dggevx.c b/contrib/libs/clapack/dggevx.c
new file mode 100644
index 0000000000..50bb67025d
--- /dev/null
+++ b/contrib/libs/clapack/dggevx.c
@@ -0,0 +1,885 @@
+/* dggevx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static doublereal c_b59 = 0.;
+static doublereal c_b60 = 1.;
+
+/* Subroutine */ int dggevx_(char *balanc, char *jobvl, char *jobvr, char *
+ sense, integer *n, doublereal *a, integer *lda, doublereal *b,
+ integer *ldb, doublereal *alphar, doublereal *alphai, doublereal *
+ beta, doublereal *vl, integer *ldvl, doublereal *vr, integer *ldvr,
+ integer *ilo, integer *ihi, doublereal *lscale, doublereal *rscale,
+ doublereal *abnrm, doublereal *bbnrm, doublereal *rconde, doublereal *
+ rcondv, doublereal *work, integer *lwork, integer *iwork, logical *
+ bwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vl_dim1, vl_offset, vr_dim1,
+ vr_offset, i__1, i__2;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, m, jc, in, mm, jr;
+ doublereal eps;
+ logical ilv, pair;
+ doublereal anrm, bnrm;
+ integer ierr, itau;
+ doublereal temp;
+ logical ilvl, ilvr;
+ integer iwrk, iwrk1;
+ extern logical lsame_(char *, char *);
+ integer icols;
+ logical noscl;
+ integer irows;
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *), dggbak_(
+ char *, char *, integer *, integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *), dggbal_(char *, integer *, doublereal *, integer
+ *, doublereal *, integer *, integer *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *);
+ extern doublereal dlamch_(char *), dlange_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *);
+ extern /* Subroutine */ int dgghrd_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *, integer *), dlascl_(char *, integer *, integer *, doublereal
+ *, doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *);
+ logical ilascl, ilbscl;
+ extern /* Subroutine */ int dgeqrf_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *),
+ dlacpy_(char *, integer *, integer *, doublereal *, integer *,
+ doublereal *, integer *), dlaset_(char *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *);
+ logical ldumma[1];
+ char chtemp[1];
+ doublereal bignum;
+ extern /* Subroutine */ int dhgeqz_(char *, char *, char *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ integer *), dtgevc_(char *, char *,
+ logical *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ integer *, integer *, doublereal *, integer *);
+ integer ijobvl;
+ extern /* Subroutine */ int dtgsna_(char *, char *, logical *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, integer *, doublereal *, integer *, integer *, integer
+ *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ijobvr;
+ logical wantsb;
+ extern /* Subroutine */ int dorgqr_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *);
+ doublereal anrmto;
+ logical wantse;
+ doublereal bnrmto;
+ extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ integer minwrk, maxwrk;
+ logical wantsn;
+ doublereal smlnum;
+ logical lquery, wantsv;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGGEVX computes for a pair of N-by-N real nonsymmetric matrices (A,B) */
+/* the generalized eigenvalues, and optionally, the left and/or right */
+/* generalized eigenvectors. */
+
+/* Optionally also, it computes a balancing transformation to improve */
+/* the conditioning of the eigenvalues and eigenvectors (ILO, IHI, */
+/* LSCALE, RSCALE, ABNRM, and BBNRM), reciprocal condition numbers for */
+/* the eigenvalues (RCONDE), and reciprocal condition numbers for the */
+/* right eigenvectors (RCONDV). */
+
+/* A generalized eigenvalue for a pair of matrices (A,B) is a scalar */
+/* lambda or a ratio alpha/beta = lambda, such that A - lambda*B is */
+/* singular. It is usually represented as the pair (alpha,beta), as */
+/* there is a reasonable interpretation for beta=0, and even for both */
+/* being zero. */
+
+/* The right eigenvector v(j) corresponding to the eigenvalue lambda(j) */
+/* of (A,B) satisfies */
+
+/* A * v(j) = lambda(j) * B * v(j) . */
+
+/* The left eigenvector u(j) corresponding to the eigenvalue lambda(j) */
+/* of (A,B) satisfies */
+
+/* u(j)**H * A = lambda(j) * u(j)**H * B. */
+
+/* where u(j)**H is the conjugate-transpose of u(j). */
+
+
+/* Arguments */
+/* ========= */
+
+/* BALANC (input) CHARACTER*1 */
+/* Specifies the balance option to be performed. */
+/* = 'N': do not diagonally scale or permute; */
+/* = 'P': permute only; */
+/* = 'S': scale only; */
+/* = 'B': both permute and scale. */
+/* Computed reciprocal condition numbers will be for the */
+/* matrices after permuting and/or balancing. Permuting does */
+/* not change condition numbers (in exact arithmetic), but */
+/* balancing does. */
+
+/* JOBVL (input) CHARACTER*1 */
+/* = 'N': do not compute the left generalized eigenvectors; */
+/* = 'V': compute the left generalized eigenvectors. */
+
+/* JOBVR (input) CHARACTER*1 */
+/* = 'N': do not compute the right generalized eigenvectors; */
+/* = 'V': compute the right generalized eigenvectors. */
+
+/* SENSE (input) CHARACTER*1 */
+/* Determines which reciprocal condition numbers are computed. */
+/* = 'N': none are computed; */
+/* = 'E': computed for eigenvalues only; */
+/* = 'V': computed for eigenvectors only; */
+/* = 'B': computed for eigenvalues and eigenvectors. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A, B, VL, and VR. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA, N) */
+/* On entry, the matrix A in the pair (A,B). */
+/* On exit, A has been overwritten. If JOBVL='V' or JOBVR='V' */
+/* or both, then A contains the first part of the real Schur */
+/* form of the "balanced" versions of the input A and B. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. LDA >= max(1,N). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB, N) */
+/* On entry, the matrix B in the pair (A,B). */
+/* On exit, B has been overwritten. If JOBVL='V' or JOBVR='V' */
+/* or both, then B contains the second part of the real Schur */
+/* form of the "balanced" versions of the input A and B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB >= max(1,N). */
+
+/* ALPHAR (output) DOUBLE PRECISION array, dimension (N) */
+/* ALPHAI (output) DOUBLE PRECISION array, dimension (N) */
+/* BETA (output) DOUBLE PRECISION array, dimension (N) */
+/* On exit, (ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N, will */
+/* be the generalized eigenvalues. If ALPHAI(j) is zero, then */
+/* the j-th eigenvalue is real; if positive, then the j-th and */
+/* (j+1)-st eigenvalues are a complex conjugate pair, with */
+/* ALPHAI(j+1) negative. */
+
+/* Note: the quotients ALPHAR(j)/BETA(j) and ALPHAI(j)/BETA(j) */
+/* may easily over- or underflow, and BETA(j) may even be zero. */
+/* Thus, the user should avoid naively computing the ratio */
+/* ALPHA/BETA. However, ALPHAR and ALPHAI will be always less */
+/* than and usually comparable with norm(A) in magnitude, and */
+/* BETA always less than and usually comparable with norm(B). */
+
+/* VL (output) DOUBLE PRECISION array, dimension (LDVL,N) */
+/* If JOBVL = 'V', the left eigenvectors u(j) are stored one */
+/* after another in the columns of VL, in the same order as */
+/* their eigenvalues. If the j-th eigenvalue is real, then */
+/* u(j) = VL(:,j), the j-th column of VL. If the j-th and */
+/* (j+1)-th eigenvalues form a complex conjugate pair, then */
+/* u(j) = VL(:,j)+i*VL(:,j+1) and u(j+1) = VL(:,j)-i*VL(:,j+1). */
+/* Each eigenvector will be scaled so the largest component have */
+/* abs(real part) + abs(imag. part) = 1. */
+/* Not referenced if JOBVL = 'N'. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the matrix VL. LDVL >= 1, and */
+/* if JOBVL = 'V', LDVL >= N. */
+
+/* VR (output) DOUBLE PRECISION array, dimension (LDVR,N) */
+/* If JOBVR = 'V', the right eigenvectors v(j) are stored one */
+/* after another in the columns of VR, in the same order as */
+/* their eigenvalues. If the j-th eigenvalue is real, then */
+/* v(j) = VR(:,j), the j-th column of VR. If the j-th and */
+/* (j+1)-th eigenvalues form a complex conjugate pair, then */
+/* v(j) = VR(:,j)+i*VR(:,j+1) and v(j+1) = VR(:,j)-i*VR(:,j+1). */
+/* Each eigenvector will be scaled so the largest component have */
+/* abs(real part) + abs(imag. part) = 1. */
+/* Not referenced if JOBVR = 'N'. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the matrix VR. LDVR >= 1, and */
+/* if JOBVR = 'V', LDVR >= N. */
+
+/* ILO (output) INTEGER */
+/* IHI (output) INTEGER */
+/* ILO and IHI are integer values such that on exit */
+/* A(i,j) = 0 and B(i,j) = 0 if i > j and */
+/* j = 1,...,ILO-1 or i = IHI+1,...,N. */
+/* If BALANC = 'N' or 'S', ILO = 1 and IHI = N. */
+
+/* LSCALE (output) DOUBLE PRECISION array, dimension (N) */
+/* Details of the permutations and scaling factors applied */
+/* to the left side of A and B. If PL(j) is the index of the */
+/* row interchanged with row j, and DL(j) is the scaling */
+/* factor applied to row j, then */
+/* LSCALE(j) = PL(j) for j = 1,...,ILO-1 */
+/* = DL(j) for j = ILO,...,IHI */
+/* = PL(j) for j = IHI+1,...,N. */
+/* The order in which the interchanges are made is N to IHI+1, */
+/* then 1 to ILO-1. */
+
+/* RSCALE (output) DOUBLE PRECISION array, dimension (N) */
+/* Details of the permutations and scaling factors applied */
+/* to the right side of A and B. If PR(j) is the index of the */
+/* column interchanged with column j, and DR(j) is the scaling */
+/* factor applied to column j, then */
+/* RSCALE(j) = PR(j) for j = 1,...,ILO-1 */
+/* = DR(j) for j = ILO,...,IHI */
+/* = PR(j) for j = IHI+1,...,N */
+/* The order in which the interchanges are made is N to IHI+1, */
+/* then 1 to ILO-1. */
+
+/* ABNRM (output) DOUBLE PRECISION */
+/* The one-norm of the balanced matrix A. */
+
+/* BBNRM (output) DOUBLE PRECISION */
+/* The one-norm of the balanced matrix B. */
+
+/* RCONDE (output) DOUBLE PRECISION array, dimension (N) */
+/* If SENSE = 'E' or 'B', the reciprocal condition numbers of */
+/* the eigenvalues, stored in consecutive elements of the array. */
+/* For a complex conjugate pair of eigenvalues two consecutive */
+/* elements of RCONDE are set to the same value. Thus RCONDE(j), */
+/* RCONDV(j), and the j-th columns of VL and VR all correspond */
+/* to the j-th eigenpair. */
+/* If SENSE = 'N or 'V', RCONDE is not referenced. */
+
+/* RCONDV (output) DOUBLE PRECISION array, dimension (N) */
+/* If SENSE = 'V' or 'B', the estimated reciprocal condition */
+/* numbers of the eigenvectors, stored in consecutive elements */
+/* of the array. For a complex eigenvector two consecutive */
+/* elements of RCONDV are set to the same value. If the */
+/* eigenvalues cannot be reordered to compute RCONDV(j), */
+/* RCONDV(j) is set to 0; this can only occur when the true */
+/* value would be very small anyway. */
+/* If SENSE = 'N' or 'E', RCONDV is not referenced. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,2*N). */
+/* If BALANC = 'S' or 'B', or JOBVL = 'V', or JOBVR = 'V', */
+/* LWORK >= max(1,6*N). */
+/* If SENSE = 'E' or 'B', LWORK >= max(1,10*N). */
+/* If SENSE = 'V' or 'B', LWORK >= 2*N*N+8*N+16. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace) INTEGER array, dimension (N+6) */
+/* If SENSE = 'E', IWORK is not referenced. */
+
+/* BWORK (workspace) LOGICAL array, dimension (N) */
+/* If SENSE = 'N', BWORK is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1,...,N: */
+/* The QZ iteration failed. No eigenvectors have been */
+/* calculated, but ALPHAR(j), ALPHAI(j), and BETA(j) */
+/* should be correct for j=INFO+1,...,N. */
+/* > N: =N+1: other than QZ iteration failed in DHGEQZ. */
+/* =N+2: error return from DTGEVC. */
+
+/* Further Details */
+/* =============== */
+
+/* Balancing a matrix pair (A,B) includes, first, permuting rows and */
+/* columns to isolate eigenvalues, second, applying diagonal similarity */
+/* transformation to the rows and columns to make the rows and columns */
+/* as close in norm as possible. The computed reciprocal condition */
+/* numbers correspond to the balanced matrix. Permuting rows and columns */
+/* will not change the condition numbers (in exact arithmetic) but */
+/* diagonal scaling will. For further explanation of balancing, see */
+/* section 4.11.1.2 of LAPACK Users' Guide. */
+
+/* An approximate error bound on the chordal distance between the i-th */
+/* computed generalized eigenvalue w and the corresponding exact */
+/* eigenvalue lambda is */
+
+/* chord(w, lambda) <= EPS * norm(ABNRM, BBNRM) / RCONDE(I) */
+
+/* An approximate error bound for the angle between the i-th computed */
+/* eigenvector VL(i) or VR(i) is given by */
+
+/* EPS * norm(ABNRM, BBNRM) / DIF(i). */
+
+/* For further explanation of the reciprocal condition numbers RCONDE */
+/* and RCONDV, see section 4.11 of LAPACK User's Guide. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alphar;
+ --alphai;
+ --beta;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --lscale;
+ --rscale;
+ --rconde;
+ --rcondv;
+ --work;
+ --iwork;
+ --bwork;
+
+ /* Function Body */
+ if (lsame_(jobvl, "N")) {
+ ijobvl = 1;
+ ilvl = FALSE_;
+ } else if (lsame_(jobvl, "V")) {
+ ijobvl = 2;
+ ilvl = TRUE_;
+ } else {
+ ijobvl = -1;
+ ilvl = FALSE_;
+ }
+
+ if (lsame_(jobvr, "N")) {
+ ijobvr = 1;
+ ilvr = FALSE_;
+ } else if (lsame_(jobvr, "V")) {
+ ijobvr = 2;
+ ilvr = TRUE_;
+ } else {
+ ijobvr = -1;
+ ilvr = FALSE_;
+ }
+ ilv = ilvl || ilvr;
+
+ noscl = lsame_(balanc, "N") || lsame_(balanc, "P");
+ wantsn = lsame_(sense, "N");
+ wantse = lsame_(sense, "E");
+ wantsv = lsame_(sense, "V");
+ wantsb = lsame_(sense, "B");
+
+/* Test the input arguments */
+
+ *info = 0;
+ lquery = *lwork == -1;
+ if (! (lsame_(balanc, "N") || lsame_(balanc, "S") || lsame_(balanc, "P")
+ || lsame_(balanc, "B"))) {
+ *info = -1;
+ } else if (ijobvl <= 0) {
+ *info = -2;
+ } else if (ijobvr <= 0) {
+ *info = -3;
+ } else if (! (wantsn || wantse || wantsb || wantsv)) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldvl < 1 || ilvl && *ldvl < *n) {
+ *info = -14;
+ } else if (*ldvr < 1 || ilvr && *ldvr < *n) {
+ *info = -16;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV. The workspace is */
+/* computed assuming ILO = 1 and IHI = N, the worst case.) */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ } else {
+ if (noscl && ! ilv) {
+ minwrk = *n << 1;
+ } else {
+ minwrk = *n * 6;
+ }
+ if (wantse || wantsb) {
+ minwrk = *n * 10;
+ }
+ if (wantsv || wantsb) {
+/* Computing MAX */
+ i__1 = minwrk, i__2 = (*n << 1) * (*n + 4) + 16;
+ minwrk = max(i__1,i__2);
+ }
+ maxwrk = minwrk;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", n, &
+ c__1, n, &c__0);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "DORMQR", " ", n, &
+ c__1, n, &c__0);
+ maxwrk = max(i__1,i__2);
+ if (ilvl) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "DORGQR",
+ " ", n, &c__1, n, &c__0);
+ maxwrk = max(i__1,i__2);
+ }
+ }
+ work[1] = (doublereal) maxwrk;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -26;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGGEVX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+
+/* Get machine constants */
+
+ eps = dlamch_("P");
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = dlange_("M", n, n, &a[a_offset], lda, &work[1]);
+ ilascl = FALSE_;
+ if (anrm > 0. && anrm < smlnum) {
+ anrmto = smlnum;
+ ilascl = TRUE_;
+ } else if (anrm > bignum) {
+ anrmto = bignum;
+ ilascl = TRUE_;
+ }
+ if (ilascl) {
+ dlascl_("G", &c__0, &c__0, &anrm, &anrmto, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Scale B if max element outside range [SMLNUM,BIGNUM] */
+
+ bnrm = dlange_("M", n, n, &b[b_offset], ldb, &work[1]);
+ ilbscl = FALSE_;
+ if (bnrm > 0. && bnrm < smlnum) {
+ bnrmto = smlnum;
+ ilbscl = TRUE_;
+ } else if (bnrm > bignum) {
+ bnrmto = bignum;
+ ilbscl = TRUE_;
+ }
+ if (ilbscl) {
+ dlascl_("G", &c__0, &c__0, &bnrm, &bnrmto, n, n, &b[b_offset], ldb, &
+ ierr);
+ }
+
+/* Permute and/or balance the matrix pair (A,B) */
+/* (Workspace: need 6*N if BALANC = 'S' or 'B', 1 otherwise) */
+
+ dggbal_(balanc, n, &a[a_offset], lda, &b[b_offset], ldb, ilo, ihi, &
+ lscale[1], &rscale[1], &work[1], &ierr);
+
+/* Compute ABNRM and BBNRM */
+
+ *abnrm = dlange_("1", n, n, &a[a_offset], lda, &work[1]);
+ if (ilascl) {
+ work[1] = *abnrm;
+ dlascl_("G", &c__0, &c__0, &anrmto, &anrm, &c__1, &c__1, &work[1], &
+ c__1, &ierr);
+ *abnrm = work[1];
+ }
+
+ *bbnrm = dlange_("1", n, n, &b[b_offset], ldb, &work[1]);
+ if (ilbscl) {
+ work[1] = *bbnrm;
+ dlascl_("G", &c__0, &c__0, &bnrmto, &bnrm, &c__1, &c__1, &work[1], &
+ c__1, &ierr);
+ *bbnrm = work[1];
+ }
+
+/* Reduce B to triangular form (QR decomposition of B) */
+/* (Workspace: need N, prefer N*NB ) */
+
+ irows = *ihi + 1 - *ilo;
+ if (ilv || ! wantsn) {
+ icols = *n + 1 - *ilo;
+ } else {
+ icols = irows;
+ }
+ itau = 1;
+ iwrk = itau + irows;
+ i__1 = *lwork + 1 - iwrk;
+ dgeqrf_(&irows, &icols, &b[*ilo + *ilo * b_dim1], ldb, &work[itau], &work[
+ iwrk], &i__1, &ierr);
+
+/* Apply the orthogonal transformation to A */
+/* (Workspace: need N, prefer N*NB) */
+
+ i__1 = *lwork + 1 - iwrk;
+ dormqr_("L", "T", &irows, &icols, &irows, &b[*ilo + *ilo * b_dim1], ldb, &
+ work[itau], &a[*ilo + *ilo * a_dim1], lda, &work[iwrk], &i__1, &
+ ierr);
+
+/* Initialize VL and/or VR */
+/* (Workspace: need N, prefer N*NB) */
+
+ if (ilvl) {
+ dlaset_("Full", n, n, &c_b59, &c_b60, &vl[vl_offset], ldvl)
+ ;
+ if (irows > 1) {
+ i__1 = irows - 1;
+ i__2 = irows - 1;
+ dlacpy_("L", &i__1, &i__2, &b[*ilo + 1 + *ilo * b_dim1], ldb, &vl[
+ *ilo + 1 + *ilo * vl_dim1], ldvl);
+ }
+ i__1 = *lwork + 1 - iwrk;
+ dorgqr_(&irows, &irows, &irows, &vl[*ilo + *ilo * vl_dim1], ldvl, &
+ work[itau], &work[iwrk], &i__1, &ierr);
+ }
+
+ if (ilvr) {
+ dlaset_("Full", n, n, &c_b59, &c_b60, &vr[vr_offset], ldvr)
+ ;
+ }
+
+/* Reduce to generalized Hessenberg form */
+/* (Workspace: none needed) */
+
+ if (ilv || ! wantsn) {
+
+/* Eigenvectors requested -- work on whole matrix. */
+
+ dgghrd_(jobvl, jobvr, n, ilo, ihi, &a[a_offset], lda, &b[b_offset],
+ ldb, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, &ierr);
+ } else {
+ dgghrd_("N", "N", &irows, &c__1, &irows, &a[*ilo + *ilo * a_dim1],
+ lda, &b[*ilo + *ilo * b_dim1], ldb, &vl[vl_offset], ldvl, &vr[
+ vr_offset], ldvr, &ierr);
+ }
+
+/* Perform QZ algorithm (Compute eigenvalues, and optionally, the */
+/* Schur forms and Schur vectors) */
+/* (Workspace: need N) */
+
+ if (ilv || ! wantsn) {
+ *(unsigned char *)chtemp = 'S';
+ } else {
+ *(unsigned char *)chtemp = 'E';
+ }
+
+ dhgeqz_(chtemp, jobvl, jobvr, n, ilo, ihi, &a[a_offset], lda, &b[b_offset]
+, ldb, &alphar[1], &alphai[1], &beta[1], &vl[vl_offset], ldvl, &
+ vr[vr_offset], ldvr, &work[1], lwork, &ierr);
+ if (ierr != 0) {
+ if (ierr > 0 && ierr <= *n) {
+ *info = ierr;
+ } else if (ierr > *n && ierr <= *n << 1) {
+ *info = ierr - *n;
+ } else {
+ *info = *n + 1;
+ }
+ goto L130;
+ }
+
+/* Compute Eigenvectors and estimate condition numbers if desired */
+/* (Workspace: DTGEVC: need 6*N */
+/* DTGSNA: need 2*N*(N+2)+16 if SENSE = 'V' or 'B', */
+/* need N otherwise ) */
+
+ if (ilv || ! wantsn) {
+ if (ilv) {
+ if (ilvl) {
+ if (ilvr) {
+ *(unsigned char *)chtemp = 'B';
+ } else {
+ *(unsigned char *)chtemp = 'L';
+ }
+ } else {
+ *(unsigned char *)chtemp = 'R';
+ }
+
+ dtgevc_(chtemp, "B", ldumma, n, &a[a_offset], lda, &b[b_offset],
+ ldb, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, n, &in, &
+ work[1], &ierr);
+ if (ierr != 0) {
+ *info = *n + 2;
+ goto L130;
+ }
+ }
+
+ if (! wantsn) {
+
+/* compute eigenvectors (DTGEVC) and estimate condition */
+/* numbers (DTGSNA). Note that the definition of the condition */
+/* number is not invariant under transformation (u,v) to */
+/* (Q*u, Z*v), where (u,v) are eigenvectors of the generalized */
+/* Schur form (S,T), Q and Z are orthogonal matrices. In order */
+/* to avoid using extra 2*N*N workspace, we have to recalculate */
+/* eigenvectors and estimate one condition numbers at a time. */
+
+ pair = FALSE_;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+ if (pair) {
+ pair = FALSE_;
+ goto L20;
+ }
+ mm = 1;
+ if (i__ < *n) {
+ if (a[i__ + 1 + i__ * a_dim1] != 0.) {
+ pair = TRUE_;
+ mm = 2;
+ }
+ }
+
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ bwork[j] = FALSE_;
+/* L10: */
+ }
+ if (mm == 1) {
+ bwork[i__] = TRUE_;
+ } else if (mm == 2) {
+ bwork[i__] = TRUE_;
+ bwork[i__ + 1] = TRUE_;
+ }
+
+ iwrk = mm * *n + 1;
+ iwrk1 = iwrk + mm * *n;
+
+/* Compute a pair of left and right eigenvectors. */
+/* (compute workspace: need up to 4*N + 6*N) */
+
+ if (wantse || wantsb) {
+ dtgevc_("B", "S", &bwork[1], n, &a[a_offset], lda, &b[
+ b_offset], ldb, &work[1], n, &work[iwrk], n, &mm,
+ &m, &work[iwrk1], &ierr);
+ if (ierr != 0) {
+ *info = *n + 2;
+ goto L130;
+ }
+ }
+
+ i__2 = *lwork - iwrk1 + 1;
+ dtgsna_(sense, "S", &bwork[1], n, &a[a_offset], lda, &b[
+ b_offset], ldb, &work[1], n, &work[iwrk], n, &rconde[
+ i__], &rcondv[i__], &mm, &m, &work[iwrk1], &i__2, &
+ iwork[1], &ierr);
+
+L20:
+ ;
+ }
+ }
+ }
+
+/* Undo balancing on VL and VR and normalization */
+/* (Workspace: none needed) */
+
+ if (ilvl) {
+ dggbak_(balanc, "L", n, ilo, ihi, &lscale[1], &rscale[1], n, &vl[
+ vl_offset], ldvl, &ierr);
+
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ if (alphai[jc] < 0.) {
+ goto L70;
+ }
+ temp = 0.;
+ if (alphai[jc] == 0.) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ d__2 = temp, d__3 = (d__1 = vl[jr + jc * vl_dim1], abs(
+ d__1));
+ temp = max(d__2,d__3);
+/* L30: */
+ }
+ } else {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ d__3 = temp, d__4 = (d__1 = vl[jr + jc * vl_dim1], abs(
+ d__1)) + (d__2 = vl[jr + (jc + 1) * vl_dim1], abs(
+ d__2));
+ temp = max(d__3,d__4);
+/* L40: */
+ }
+ }
+ if (temp < smlnum) {
+ goto L70;
+ }
+ temp = 1. / temp;
+ if (alphai[jc] == 0.) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vl[jr + jc * vl_dim1] *= temp;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vl[jr + jc * vl_dim1] *= temp;
+ vl[jr + (jc + 1) * vl_dim1] *= temp;
+/* L60: */
+ }
+ }
+L70:
+ ;
+ }
+ }
+ if (ilvr) {
+ dggbak_(balanc, "R", n, ilo, ihi, &lscale[1], &rscale[1], n, &vr[
+ vr_offset], ldvr, &ierr);
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ if (alphai[jc] < 0.) {
+ goto L120;
+ }
+ temp = 0.;
+ if (alphai[jc] == 0.) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ d__2 = temp, d__3 = (d__1 = vr[jr + jc * vr_dim1], abs(
+ d__1));
+ temp = max(d__2,d__3);
+/* L80: */
+ }
+ } else {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ d__3 = temp, d__4 = (d__1 = vr[jr + jc * vr_dim1], abs(
+ d__1)) + (d__2 = vr[jr + (jc + 1) * vr_dim1], abs(
+ d__2));
+ temp = max(d__3,d__4);
+/* L90: */
+ }
+ }
+ if (temp < smlnum) {
+ goto L120;
+ }
+ temp = 1. / temp;
+ if (alphai[jc] == 0.) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vr[jr + jc * vr_dim1] *= temp;
+/* L100: */
+ }
+ } else {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vr[jr + jc * vr_dim1] *= temp;
+ vr[jr + (jc + 1) * vr_dim1] *= temp;
+/* L110: */
+ }
+ }
+L120:
+ ;
+ }
+ }
+
+/* Undo scaling if necessary */
+
+ if (ilascl) {
+ dlascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alphar[1], n, &
+ ierr);
+ dlascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alphai[1], n, &
+ ierr);
+ }
+
+ if (ilbscl) {
+ dlascl_("G", &c__0, &c__0, &bnrmto, &bnrm, n, &c__1, &beta[1], n, &
+ ierr);
+ }
+
+L130:
+ work[1] = (doublereal) maxwrk;
+
+ return 0;
+
+/* End of DGGEVX */
+
+} /* dggevx_ */
diff --git a/contrib/libs/clapack/dggglm.c b/contrib/libs/clapack/dggglm.c
new file mode 100644
index 0000000000..378885e2a1
--- /dev/null
+++ b/contrib/libs/clapack/dggglm.c
@@ -0,0 +1,331 @@
+/* dggglm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static doublereal c_b32 = -1.;
+static doublereal c_b34 = 1.;
+
+/* Subroutine */ int dggglm_(integer *n, integer *m, integer *p, doublereal *
+ a, integer *lda, doublereal *b, integer *ldb, doublereal *d__,
+ doublereal *x, doublereal *y, doublereal *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, nb, np, nb1, nb2, nb3, nb4, lopt;
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), dcopy_(integer *,
+ doublereal *, integer *, doublereal *, integer *), dggqrf_(
+ integer *, integer *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer lwkmin;
+ extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *),
+ dormrq_(char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int dtrtrs_(char *, char *, char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGGGLM solves a general Gauss-Markov linear model (GLM) problem: */
+
+/* minimize || y ||_2 subject to d = A*x + B*y */
+/* x */
+
+/* where A is an N-by-M matrix, B is an N-by-P matrix, and d is a */
+/* given N-vector. It is assumed that M <= N <= M+P, and */
+
+/* rank(A) = M and rank( A B ) = N. */
+
+/* Under these assumptions, the constrained equation is always */
+/* consistent, and there is a unique solution x and a minimal 2-norm */
+/* solution y, which is obtained using a generalized QR factorization */
+/* of the matrices (A, B) given by */
+
+/* A = Q*(R), B = Q*T*Z. */
+/* (0) */
+
+/* In particular, if matrix B is square nonsingular, then the problem */
+/* GLM is equivalent to the following weighted linear least squares */
+/* problem */
+
+/* minimize || inv(B)*(d-A*x) ||_2 */
+/* x */
+
+/* where inv(B) denotes the inverse of B. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of rows of the matrices A and B. N >= 0. */
+
+/* M (input) INTEGER */
+/* The number of columns of the matrix A. 0 <= M <= N. */
+
+/* P (input) INTEGER */
+/* The number of columns of the matrix B. P >= N-M. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,M) */
+/* On entry, the N-by-M matrix A. */
+/* On exit, the upper triangular part of the array A contains */
+/* the M-by-M upper triangular matrix R. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,P) */
+/* On entry, the N-by-P matrix B. */
+/* On exit, if N <= P, the upper triangle of the subarray */
+/* B(1:N,P-N+1:P) contains the N-by-N upper triangular matrix T; */
+/* if N > P, the elements on and above the (N-P)th subdiagonal */
+/* contain the N-by-P upper trapezoidal matrix T. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, D is the left hand side of the GLM equation. */
+/* On exit, D is destroyed. */
+
+/* X (output) DOUBLE PRECISION array, dimension (M) */
+/* Y (output) DOUBLE PRECISION array, dimension (P) */
+/* On exit, X and Y are the solutions of the GLM problem. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N+M+P). */
+/* For optimum performance, LWORK >= M+min(N,P)+max(N,P)*NB, */
+/* where NB is an upper bound for the optimal blocksizes for */
+/* DGEQRF, SGERQF, DORMQR and SORMRQ. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1: the upper triangular factor R associated with A in the */
+/* generalized QR factorization of the pair (A, B) is */
+/* singular, so that rank(A) < M; the least squares */
+/* solution could not be computed. */
+/* = 2: the bottom (N-M) by (N-M) part of the upper trapezoidal */
+/* factor T associated with B in the generalized QR */
+/* factorization of the pair (A, B) is singular, so that */
+/* rank( A B ) < N; the least squares solution could not */
+/* be computed. */
+
+/* =================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --d__;
+ --x;
+ --y;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ np = min(*n,*p);
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*m < 0 || *m > *n) {
+ *info = -2;
+ } else if (*p < 0 || *p < *n - *m) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+
+/* Calculate workspace */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ lwkmin = 1;
+ lwkopt = 1;
+ } else {
+ nb1 = ilaenv_(&c__1, "DGEQRF", " ", n, m, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "DGERQF", " ", n, m, &c_n1, &c_n1);
+ nb3 = ilaenv_(&c__1, "DORMQR", " ", n, m, p, &c_n1);
+ nb4 = ilaenv_(&c__1, "DORMRQ", " ", n, m, p, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2), i__1 = max(i__1,nb3);
+ nb = max(i__1,nb4);
+ lwkmin = *m + *n + *p;
+ lwkopt = *m + np + max(*n,*p) * nb;
+ }
+ work[1] = (doublereal) lwkopt;
+
+ if (*lwork < lwkmin && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGGGLM", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Compute the GQR factorization of matrices A and B: */
+
+/* Q'*A = ( R11 ) M, Q'*B*Z' = ( T11 T12 ) M */
+/* ( 0 ) N-M ( 0 T22 ) N-M */
+/* M M+P-N N-M */
+
+/* where R11 and T22 are upper triangular, and Q and Z are */
+/* orthogonal. */
+
+ i__1 = *lwork - *m - np;
+ dggqrf_(n, m, p, &a[a_offset], lda, &work[1], &b[b_offset], ldb, &work[*m
+ + 1], &work[*m + np + 1], &i__1, info);
+ lopt = (integer) work[*m + np + 1];
+
+/* Update left-hand-side vector d = Q'*d = ( d1 ) M */
+/* ( d2 ) N-M */
+
+ i__1 = max(1,*n);
+ i__2 = *lwork - *m - np;
+ dormqr_("Left", "Transpose", n, &c__1, m, &a[a_offset], lda, &work[1], &
+ d__[1], &i__1, &work[*m + np + 1], &i__2, info);
+/* Computing MAX */
+ i__1 = lopt, i__2 = (integer) work[*m + np + 1];
+ lopt = max(i__1,i__2);
+
+/* Solve T22*y2 = d2 for y2 */
+
+ if (*n > *m) {
+ i__1 = *n - *m;
+ i__2 = *n - *m;
+ dtrtrs_("Upper", "No transpose", "Non unit", &i__1, &c__1, &b[*m + 1
+ + (*m + *p - *n + 1) * b_dim1], ldb, &d__[*m + 1], &i__2,
+ info);
+
+ if (*info > 0) {
+ *info = 1;
+ return 0;
+ }
+
+ i__1 = *n - *m;
+ dcopy_(&i__1, &d__[*m + 1], &c__1, &y[*m + *p - *n + 1], &c__1);
+ }
+
+/* Set y1 = 0 */
+
+ i__1 = *m + *p - *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = 0.;
+/* L10: */
+ }
+
+/* Update d1 = d1 - T12*y2 */
+
+ i__1 = *n - *m;
+ dgemv_("No transpose", m, &i__1, &c_b32, &b[(*m + *p - *n + 1) * b_dim1 +
+ 1], ldb, &y[*m + *p - *n + 1], &c__1, &c_b34, &d__[1], &c__1);
+
+/* Solve triangular system: R11*x = d1 */
+
+ if (*m > 0) {
+ dtrtrs_("Upper", "No Transpose", "Non unit", m, &c__1, &a[a_offset],
+ lda, &d__[1], m, info);
+
+ if (*info > 0) {
+ *info = 2;
+ return 0;
+ }
+
+/* Copy D to X */
+
+ dcopy_(m, &d__[1], &c__1, &x[1], &c__1);
+ }
+
+/* Backward transformation y = Z'*y */
+
+/* Computing MAX */
+ i__1 = 1, i__2 = *n - *p + 1;
+ i__3 = max(1,*p);
+ i__4 = *lwork - *m - np;
+ dormrq_("Left", "Transpose", p, &c__1, &np, &b[max(i__1, i__2)+ b_dim1],
+ ldb, &work[*m + 1], &y[1], &i__3, &work[*m + np + 1], &i__4, info);
+/* Computing MAX */
+ i__1 = lopt, i__2 = (integer) work[*m + np + 1];
+ work[1] = (doublereal) (*m + np + max(i__1,i__2));
+
+ return 0;
+
+/* End of DGGGLM */
+
+} /* dggglm_ */
diff --git a/contrib/libs/clapack/dgghrd.c b/contrib/libs/clapack/dgghrd.c
new file mode 100644
index 0000000000..2d73bfadd9
--- /dev/null
+++ b/contrib/libs/clapack/dgghrd.c
@@ -0,0 +1,329 @@
+/* dgghrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b10 = 0.;
+static doublereal c_b11 = 1.;
+static integer c__1 = 1;
+
+/* Subroutine */ int dgghrd_(char *compq, char *compz, integer *n, integer *
+ ilo, integer *ihi, doublereal *a, integer *lda, doublereal *b,
+ integer *ldb, doublereal *q, integer *ldq, doublereal *z__, integer *
+ ldz, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1,
+ z_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ doublereal c__, s;
+ logical ilq, ilz;
+ integer jcol;
+ doublereal temp;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ integer jrow;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dlaset_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *),
+ dlartg_(doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *), xerbla_(char *, integer *);
+ integer icompq, icompz;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGGHRD reduces a pair of real matrices (A,B) to generalized upper */
+/* Hessenberg form using orthogonal transformations, where A is a */
+/* general matrix and B is upper triangular. The form of the */
+/* generalized eigenvalue problem is */
+/* A*x = lambda*B*x, */
+/* and B is typically made upper triangular by computing its QR */
+/* factorization and moving the orthogonal matrix Q to the left side */
+/* of the equation. */
+
+/* This subroutine simultaneously reduces A to a Hessenberg matrix H: */
+/* Q**T*A*Z = H */
+/* and transforms B to another upper triangular matrix T: */
+/* Q**T*B*Z = T */
+/* in order to reduce the problem to its standard form */
+/* H*y = lambda*T*y */
+/* where y = Z**T*x. */
+
+/* The orthogonal matrices Q and Z are determined as products of Givens */
+/* rotations. They may either be formed explicitly, or they may be */
+/* postmultiplied into input matrices Q1 and Z1, so that */
+
+/* Q1 * A * Z1**T = (Q1*Q) * H * (Z1*Z)**T */
+
+/* Q1 * B * Z1**T = (Q1*Q) * T * (Z1*Z)**T */
+
+/* If Q1 is the orthogonal matrix from the QR factorization of B in the */
+/* original equation A*x = lambda*B*x, then DGGHRD reduces the original */
+/* problem to generalized Hessenberg form. */
+
+/* Arguments */
+/* ========= */
+
+/* COMPQ (input) CHARACTER*1 */
+/* = 'N': do not compute Q; */
+/* = 'I': Q is initialized to the unit matrix, and the */
+/* orthogonal matrix Q is returned; */
+/* = 'V': Q must contain an orthogonal matrix Q1 on entry, */
+/* and the product Q1*Q is returned. */
+
+/* COMPZ (input) CHARACTER*1 */
+/* = 'N': do not compute Z; */
+/* = 'I': Z is initialized to the unit matrix, and the */
+/* orthogonal matrix Z is returned; */
+/* = 'V': Z must contain an orthogonal matrix Z1 on entry, */
+/* and the product Z1*Z is returned. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* ILO and IHI mark the rows and columns of A which are to be */
+/* reduced. It is assumed that A is already upper triangular */
+/* in rows and columns 1:ILO-1 and IHI+1:N. ILO and IHI are */
+/* normally set by a previous call to SGGBAL; otherwise they */
+/* should be set to 1 and N respectively. */
+/* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA, N) */
+/* On entry, the N-by-N general matrix to be reduced. */
+/* On exit, the upper triangle and the first subdiagonal of A */
+/* are overwritten with the upper Hessenberg matrix H, and the */
+/* rest is set to zero. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB, N) */
+/* On entry, the N-by-N upper triangular matrix B. */
+/* On exit, the upper triangular matrix T = Q**T B Z. The */
+/* elements below the diagonal are set to zero. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N) */
+/* On entry, if COMPQ = 'V', the orthogonal matrix Q1, */
+/* typically from the QR factorization of B. */
+/* On exit, if COMPQ='I', the orthogonal matrix Q, and if */
+/* COMPQ = 'V', the product Q1*Q. */
+/* Not referenced if COMPQ='N'. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. */
+/* LDQ >= N if COMPQ='V' or 'I'; LDQ >= 1 otherwise. */
+
+/* Z (input/output) DOUBLE PRECISION array, dimension (LDZ, N) */
+/* On entry, if COMPZ = 'V', the orthogonal matrix Z1. */
+/* On exit, if COMPZ='I', the orthogonal matrix Z, and if */
+/* COMPZ = 'V', the product Z1*Z. */
+/* Not referenced if COMPZ='N'. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. */
+/* LDZ >= N if COMPZ='V' or 'I'; LDZ >= 1 otherwise. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* This routine reduces A to Hessenberg and B to triangular form by */
+/* an unblocked reduction, as described in _Matrix_Computations_, */
+/* by Golub and Van Loan (Johns Hopkins Press.) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode COMPQ */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+
+ /* Function Body */
+ if (lsame_(compq, "N")) {
+ ilq = FALSE_;
+ icompq = 1;
+ } else if (lsame_(compq, "V")) {
+ ilq = TRUE_;
+ icompq = 2;
+ } else if (lsame_(compq, "I")) {
+ ilq = TRUE_;
+ icompq = 3;
+ } else {
+ icompq = 0;
+ }
+
+/* Decode COMPZ */
+
+ if (lsame_(compz, "N")) {
+ ilz = FALSE_;
+ icompz = 1;
+ } else if (lsame_(compz, "V")) {
+ ilz = TRUE_;
+ icompz = 2;
+ } else if (lsame_(compz, "I")) {
+ ilz = TRUE_;
+ icompz = 3;
+ } else {
+ icompz = 0;
+ }
+
+/* Test the input parameters. */
+
+ *info = 0;
+ if (icompq <= 0) {
+ *info = -1;
+ } else if (icompz <= 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ilo < 1) {
+ *info = -4;
+ } else if (*ihi > *n || *ihi < *ilo - 1) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (ilq && *ldq < *n || *ldq < 1) {
+ *info = -11;
+ } else if (ilz && *ldz < *n || *ldz < 1) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGGHRD", &i__1);
+ return 0;
+ }
+
+/* Initialize Q and Z if desired. */
+
+ if (icompq == 3) {
+ dlaset_("Full", n, n, &c_b10, &c_b11, &q[q_offset], ldq);
+ }
+ if (icompz == 3) {
+ dlaset_("Full", n, n, &c_b10, &c_b11, &z__[z_offset], ldz);
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 1) {
+ return 0;
+ }
+
+/* Zero out lower triangle of B */
+
+ i__1 = *n - 1;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = jcol + 1; jrow <= i__2; ++jrow) {
+ b[jrow + jcol * b_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* Reduce A and B */
+
+ i__1 = *ihi - 2;
+ for (jcol = *ilo; jcol <= i__1; ++jcol) {
+
+ i__2 = jcol + 2;
+ for (jrow = *ihi; jrow >= i__2; --jrow) {
+
+/* Step 1: rotate rows JROW-1, JROW to kill A(JROW,JCOL) */
+
+ temp = a[jrow - 1 + jcol * a_dim1];
+ dlartg_(&temp, &a[jrow + jcol * a_dim1], &c__, &s, &a[jrow - 1 +
+ jcol * a_dim1]);
+ a[jrow + jcol * a_dim1] = 0.;
+ i__3 = *n - jcol;
+ drot_(&i__3, &a[jrow - 1 + (jcol + 1) * a_dim1], lda, &a[jrow + (
+ jcol + 1) * a_dim1], lda, &c__, &s);
+ i__3 = *n + 2 - jrow;
+ drot_(&i__3, &b[jrow - 1 + (jrow - 1) * b_dim1], ldb, &b[jrow + (
+ jrow - 1) * b_dim1], ldb, &c__, &s);
+ if (ilq) {
+ drot_(n, &q[(jrow - 1) * q_dim1 + 1], &c__1, &q[jrow * q_dim1
+ + 1], &c__1, &c__, &s);
+ }
+
+/* Step 2: rotate columns JROW, JROW-1 to kill B(JROW,JROW-1) */
+
+ temp = b[jrow + jrow * b_dim1];
+ dlartg_(&temp, &b[jrow + (jrow - 1) * b_dim1], &c__, &s, &b[jrow
+ + jrow * b_dim1]);
+ b[jrow + (jrow - 1) * b_dim1] = 0.;
+ drot_(ihi, &a[jrow * a_dim1 + 1], &c__1, &a[(jrow - 1) * a_dim1 +
+ 1], &c__1, &c__, &s);
+ i__3 = jrow - 1;
+ drot_(&i__3, &b[jrow * b_dim1 + 1], &c__1, &b[(jrow - 1) * b_dim1
+ + 1], &c__1, &c__, &s);
+ if (ilz) {
+ drot_(n, &z__[jrow * z_dim1 + 1], &c__1, &z__[(jrow - 1) *
+ z_dim1 + 1], &c__1, &c__, &s);
+ }
+/* L30: */
+ }
+/* L40: */
+ }
+
+ return 0;
+
+/* End of DGGHRD */
+
+} /* dgghrd_ */
diff --git a/contrib/libs/clapack/dgglse.c b/contrib/libs/clapack/dgglse.c
new file mode 100644
index 0000000000..4a021830cc
--- /dev/null
+++ b/contrib/libs/clapack/dgglse.c
@@ -0,0 +1,340 @@
+/* dgglse.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static doublereal c_b31 = -1.;
+static doublereal c_b33 = 1.;
+
+/* Subroutine */ int dgglse_(integer *m, integer *n, integer *p, doublereal *
+ a, integer *lda, doublereal *b, integer *ldb, doublereal *c__,
+ doublereal *d__, doublereal *x, doublereal *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+
+ /* Local variables */
+ integer nb, mn, nr, nb1, nb2, nb3, nb4, lopt;
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), dcopy_(integer *,
+ doublereal *, integer *, doublereal *, integer *), daxpy_(integer
+ *, doublereal *, doublereal *, integer *, doublereal *, integer *)
+ , dtrmv_(char *, char *, char *, integer *, doublereal *, integer
+ *, doublereal *, integer *), dggrqf_(
+ integer *, integer *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer lwkmin;
+ extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *),
+ dormrq_(char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int dtrtrs_(char *, char *, char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGGLSE solves the linear equality-constrained least squares (LSE) */
+/* problem: */
+
+/* minimize || c - A*x ||_2 subject to B*x = d */
+
+/* where A is an M-by-N matrix, B is a P-by-N matrix, c is a given */
+/* M-vector, and d is a given P-vector. It is assumed that */
+/* P <= N <= M+P, and */
+
+/* rank(B) = P and rank( (A) ) = N. */
+/* ( (B) ) */
+
+/* These conditions ensure that the LSE problem has a unique solution, */
+/* which is obtained using a generalized RQ factorization of the */
+/* matrices (B, A) given by */
+
+/* B = (0 R)*Q, A = Z*T*Q. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrices A and B. N >= 0. */
+
+/* P (input) INTEGER */
+/* The number of rows of the matrix B. 0 <= P <= N <= M+P. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the elements on and above the diagonal of the array */
+/* contain the min(M,N)-by-N upper trapezoidal matrix T. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,N) */
+/* On entry, the P-by-N matrix B. */
+/* On exit, the upper triangle of the subarray B(1:P,N-P+1:N) */
+/* contains the P-by-P upper triangular matrix R. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,P). */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (M) */
+/* On entry, C contains the right hand side vector for the */
+/* least squares part of the LSE problem. */
+/* On exit, the residual sum of squares for the solution */
+/* is given by the sum of squares of elements N-P+1 to M of */
+/* vector C. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (P) */
+/* On entry, D contains the right hand side vector for the */
+/* constrained equation. */
+/* On exit, D is destroyed. */
+
+/* X (output) DOUBLE PRECISION array, dimension (N) */
+/* On exit, X is the solution of the LSE problem. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,M+N+P). */
+/* For optimum performance LWORK >= P+min(M,N)+max(M,N)*NB, */
+/* where NB is an upper bound for the optimal blocksizes for */
+/* DGEQRF, SGERQF, DORMQR and SORMRQ. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1: the upper triangular factor R associated with B in the */
+/* generalized RQ factorization of the pair (B, A) is */
+/* singular, so that rank(B) < P; the least squares */
+/* solution could not be computed. */
+/* = 2: the (N-P) by (N-P) part of the upper trapezoidal factor */
+/* T associated with A in the generalized RQ factorization */
+/* of the pair (B, A) is singular, so that */
+/* rank( (A) ) < N; the least squares solution could not */
+/* ( (B) ) */
+/* be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --c__;
+ --d__;
+ --x;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ mn = min(*m,*n);
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*p < 0 || *p > *n || *p < *n - *m) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldb < max(1,*p)) {
+ *info = -7;
+ }
+
+/* Calculate workspace */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ lwkmin = 1;
+ lwkopt = 1;
+ } else {
+ nb1 = ilaenv_(&c__1, "DGEQRF", " ", m, n, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "DGERQF", " ", m, n, &c_n1, &c_n1);
+ nb3 = ilaenv_(&c__1, "DORMQR", " ", m, n, p, &c_n1);
+ nb4 = ilaenv_(&c__1, "DORMRQ", " ", m, n, p, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2), i__1 = max(i__1,nb3);
+ nb = max(i__1,nb4);
+ lwkmin = *m + *n + *p;
+ lwkopt = *p + mn + max(*m,*n) * nb;
+ }
+ work[1] = (doublereal) lwkopt;
+
+ if (*lwork < lwkmin && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGGLSE", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Compute the GRQ factorization of matrices B and A: */
+
+/* B*Q' = ( 0 T12 ) P Z'*A*Q' = ( R11 R12 ) N-P */
+/* N-P P ( 0 R22 ) M+P-N */
+/* N-P P */
+
+/* where T12 and R11 are upper triangular, and Q and Z are */
+/* orthogonal. */
+
+ i__1 = *lwork - *p - mn;
+ dggrqf_(p, m, n, &b[b_offset], ldb, &work[1], &a[a_offset], lda, &work[*p
+ + 1], &work[*p + mn + 1], &i__1, info);
+ lopt = (integer) work[*p + mn + 1];
+
+/* Update c = Z'*c = ( c1 ) N-P */
+/* ( c2 ) M+P-N */
+
+ i__1 = max(1,*m);
+ i__2 = *lwork - *p - mn;
+ dormqr_("Left", "Transpose", m, &c__1, &mn, &a[a_offset], lda, &work[*p +
+ 1], &c__[1], &i__1, &work[*p + mn + 1], &i__2, info);
+/* Computing MAX */
+ i__1 = lopt, i__2 = (integer) work[*p + mn + 1];
+ lopt = max(i__1,i__2);
+
+/* Solve T12*x2 = d for x2 */
+
+ if (*p > 0) {
+ dtrtrs_("Upper", "No transpose", "Non-unit", p, &c__1, &b[(*n - *p +
+ 1) * b_dim1 + 1], ldb, &d__[1], p, info);
+
+ if (*info > 0) {
+ *info = 1;
+ return 0;
+ }
+
+/* Put the solution in X */
+
+ dcopy_(p, &d__[1], &c__1, &x[*n - *p + 1], &c__1);
+
+/* Update c1 */
+
+ i__1 = *n - *p;
+ dgemv_("No transpose", &i__1, p, &c_b31, &a[(*n - *p + 1) * a_dim1 +
+ 1], lda, &d__[1], &c__1, &c_b33, &c__[1], &c__1);
+ }
+
+/* Solve R11*x1 = c1 for x1 */
+
+ if (*n > *p) {
+ i__1 = *n - *p;
+ i__2 = *n - *p;
+ dtrtrs_("Upper", "No transpose", "Non-unit", &i__1, &c__1, &a[
+ a_offset], lda, &c__[1], &i__2, info);
+
+ if (*info > 0) {
+ *info = 2;
+ return 0;
+ }
+
+/* Put the solutions in X */
+
+ i__1 = *n - *p;
+ dcopy_(&i__1, &c__[1], &c__1, &x[1], &c__1);
+ }
+
+/* Compute the residual vector: */
+
+ if (*m < *n) {
+ nr = *m + *p - *n;
+ if (nr > 0) {
+ i__1 = *n - *m;
+ dgemv_("No transpose", &nr, &i__1, &c_b31, &a[*n - *p + 1 + (*m +
+ 1) * a_dim1], lda, &d__[nr + 1], &c__1, &c_b33, &c__[*n -
+ *p + 1], &c__1);
+ }
+ } else {
+ nr = *p;
+ }
+ if (nr > 0) {
+ dtrmv_("Upper", "No transpose", "Non unit", &nr, &a[*n - *p + 1 + (*n
+ - *p + 1) * a_dim1], lda, &d__[1], &c__1);
+ daxpy_(&nr, &c_b31, &d__[1], &c__1, &c__[*n - *p + 1], &c__1);
+ }
+
+/* Backward transformation x = Q'*x */
+
+ i__1 = *lwork - *p - mn;
+ dormrq_("Left", "Transpose", n, &c__1, p, &b[b_offset], ldb, &work[1], &x[
+ 1], n, &work[*p + mn + 1], &i__1, info);
+/* Computing MAX */
+ i__1 = lopt, i__2 = (integer) work[*p + mn + 1];
+ work[1] = (doublereal) (*p + mn + max(i__1,i__2));
+
+ return 0;
+
+/* End of DGGLSE */
+
+} /* dgglse_ */
diff --git a/contrib/libs/clapack/dggqrf.c b/contrib/libs/clapack/dggqrf.c
new file mode 100644
index 0000000000..b75d005d06
--- /dev/null
+++ b/contrib/libs/clapack/dggqrf.c
@@ -0,0 +1,267 @@
+/* dggqrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int dggqrf_(integer *n, integer *m, integer *p, doublereal *
+ a, integer *lda, doublereal *taua, doublereal *b, integer *ldb,
+ doublereal *taub, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+
+ /* Local variables */
+ integer nb, nb1, nb2, nb3, lopt;
+ extern /* Subroutine */ int dgeqrf_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *),
+ dgerqf_(integer *, integer *, doublereal *, integer *, doublereal
+ *, doublereal *, integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGGQRF computes a generalized QR factorization of an N-by-M matrix A */
+/* and an N-by-P matrix B: */
+
+/* A = Q*R, B = Q*T*Z, */
+
+/* where Q is an N-by-N orthogonal matrix, Z is a P-by-P orthogonal */
+/* matrix, and R and T assume one of the forms: */
+
+/* if N >= M, R = ( R11 ) M , or if N < M, R = ( R11 R12 ) N, */
+/* ( 0 ) N-M N M-N */
+/* M */
+
+/* where R11 is upper triangular, and */
+
+/* if N <= P, T = ( 0 T12 ) N, or if N > P, T = ( T11 ) N-P, */
+/* P-N N ( T21 ) P */
+/* P */
+
+/* where T12 or T21 is upper triangular. */
+
+/* In particular, if B is square and nonsingular, the GQR factorization */
+/* of A and B implicitly gives the QR factorization of inv(B)*A: */
+
+/* inv(B)*A = Z'*(inv(T)*R) */
+
+/* where inv(B) denotes the inverse of the matrix B, and Z' denotes the */
+/* transpose of the matrix Z. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of rows of the matrices A and B. N >= 0. */
+
+/* M (input) INTEGER */
+/* The number of columns of the matrix A. M >= 0. */
+
+/* P (input) INTEGER */
+/* The number of columns of the matrix B. P >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,M) */
+/* On entry, the N-by-M matrix A. */
+/* On exit, the elements on and above the diagonal of the array */
+/* contain the min(N,M)-by-M upper trapezoidal matrix R (R is */
+/* upper triangular if N >= M); the elements below the diagonal, */
+/* with the array TAUA, represent the orthogonal matrix Q as a */
+/* product of min(N,M) elementary reflectors (see Further */
+/* Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAUA (output) DOUBLE PRECISION array, dimension (min(N,M)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the orthogonal matrix Q (see Further Details). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,P) */
+/* On entry, the N-by-P matrix B. */
+/* On exit, if N <= P, the upper triangle of the subarray */
+/* B(1:N,P-N+1:P) contains the N-by-N upper triangular matrix T; */
+/* if N > P, the elements on and above the (N-P)-th subdiagonal */
+/* contain the N-by-P upper trapezoidal matrix T; the remaining */
+/* elements, with the array TAUB, represent the orthogonal */
+/* matrix Z as a product of elementary reflectors (see Further */
+/* Details). */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* TAUB (output) DOUBLE PRECISION array, dimension (min(N,P)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the orthogonal matrix Z (see Further Details). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N,M,P). */
+/* For optimum performance LWORK >= max(N,M,P)*max(NB1,NB2,NB3), */
+/* where NB1 is the optimal blocksize for the QR factorization */
+/* of an N-by-M matrix, NB2 is the optimal blocksize for the */
+/* RQ factorization of an N-by-P matrix, and NB3 is the optimal */
+/* blocksize for a call of DORMQR. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k), where k = min(n,m). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - taua * v * v' */
+
+/* where taua is a real scalar, and v is a real vector with */
+/* v(1:i-1) = 0 and v(i) = 1; v(i+1:n) is stored on exit in A(i+1:n,i), */
+/* and taua in TAUA(i). */
+/* To form Q explicitly, use LAPACK subroutine DORGQR. */
+/* To use Q to update another matrix, use LAPACK subroutine DORMQR. */
+
+/* The matrix Z is represented as a product of elementary reflectors */
+
+/* Z = H(1) H(2) . . . H(k), where k = min(n,p). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - taub * v * v' */
+
+/* where taub is a real scalar, and v is a real vector with */
+/* v(p-k+i+1:p) = 0 and v(p-k+i) = 1; v(1:p-k+i-1) is stored on exit in */
+/* B(n-k+i,1:p-k+i-1), and taub in TAUB(i). */
+/* To form Z explicitly, use LAPACK subroutine DORGRQ. */
+/* To use Z to update another matrix, use LAPACK subroutine DORMRQ. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --taua;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --taub;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb1 = ilaenv_(&c__1, "DGEQRF", " ", n, m, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "DGERQF", " ", n, p, &c_n1, &c_n1);
+ nb3 = ilaenv_(&c__1, "DORMQR", " ", n, m, p, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2);
+ nb = max(i__1,nb3);
+/* Computing MAX */
+ i__1 = max(*n,*m);
+ lwkopt = max(i__1,*p) * nb;
+ work[1] = (doublereal) lwkopt;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*p < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*n), i__1 = max(i__1,*m);
+ if (*lwork < max(i__1,*p) && ! lquery) {
+ *info = -11;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGGQRF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* QR factorization of N-by-M matrix A: A = Q*R */
+
+ dgeqrf_(n, m, &a[a_offset], lda, &taua[1], &work[1], lwork, info);
+ lopt = (integer) work[1];
+
+/* Update B := Q'*B. */
+
+ i__1 = min(*n,*m);
+ dormqr_("Left", "Transpose", n, p, &i__1, &a[a_offset], lda, &taua[1], &b[
+ b_offset], ldb, &work[1], lwork, info);
+/* Computing MAX */
+ i__1 = lopt, i__2 = (integer) work[1];
+ lopt = max(i__1,i__2);
+
+/* RQ factorization of N-by-P matrix B: B = T*Z. */
+
+ dgerqf_(n, p, &b[b_offset], ldb, &taub[1], &work[1], lwork, info);
+/* Computing MAX */
+ i__1 = lopt, i__2 = (integer) work[1];
+ work[1] = (doublereal) max(i__1,i__2);
+
+ return 0;
+
+/* End of DGGQRF */
+
+} /* dggqrf_ */
diff --git a/contrib/libs/clapack/dggrqf.c b/contrib/libs/clapack/dggrqf.c
new file mode 100644
index 0000000000..1a49ec91e0
--- /dev/null
+++ b/contrib/libs/clapack/dggrqf.c
@@ -0,0 +1,268 @@
+/* dggrqf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int dggrqf_(integer *m, integer *p, integer *n, doublereal *
+ a, integer *lda, doublereal *taua, doublereal *b, integer *ldb,
+ doublereal *taub, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer nb, nb1, nb2, nb3, lopt;
+ extern /* Subroutine */ int dgeqrf_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *),
+ dgerqf_(integer *, integer *, doublereal *, integer *, doublereal
+ *, doublereal *, integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int dormrq_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGGRQF computes a generalized RQ factorization of an M-by-N matrix A */
+/* and a P-by-N matrix B: */
+
+/* A = R*Q, B = Z*T*Q, */
+
+/* where Q is an N-by-N orthogonal matrix, Z is a P-by-P orthogonal */
+/* matrix, and R and T assume one of the forms: */
+
+/* if M <= N, R = ( 0 R12 ) M, or if M > N, R = ( R11 ) M-N, */
+/* N-M M ( R21 ) N */
+/* N */
+
+/* where R12 or R21 is upper triangular, and */
+
+/* if P >= N, T = ( T11 ) N , or if P < N, T = ( T11 T12 ) P, */
+/* ( 0 ) P-N P N-P */
+/* N */
+
+/* where T11 is upper triangular. */
+
+/* In particular, if B is square and nonsingular, the GRQ factorization */
+/* of A and B implicitly gives the RQ factorization of A*inv(B): */
+
+/* A*inv(B) = (R*inv(T))*Z' */
+
+/* where inv(B) denotes the inverse of the matrix B, and Z' denotes the */
+/* transpose of the matrix Z. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* P (input) INTEGER */
+/* The number of rows of the matrix B. P >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrices A and B. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, if M <= N, the upper triangle of the subarray */
+/* A(1:M,N-M+1:N) contains the M-by-M upper triangular matrix R; */
+/* if M > N, the elements on and above the (M-N)-th subdiagonal */
+/* contain the M-by-N upper trapezoidal matrix R; the remaining */
+/* elements, with the array TAUA, represent the orthogonal */
+/* matrix Q as a product of elementary reflectors (see Further */
+/* Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAUA (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the orthogonal matrix Q (see Further Details). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,N) */
+/* On entry, the P-by-N matrix B. */
+/* On exit, the elements on and above the diagonal of the array */
+/* contain the min(P,N)-by-N upper trapezoidal matrix T (T is */
+/* upper triangular if P >= N); the elements below the diagonal, */
+/* with the array TAUB, represent the orthogonal matrix Z as a */
+/* product of elementary reflectors (see Further Details). */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,P). */
+
+/* TAUB (output) DOUBLE PRECISION array, dimension (min(P,N)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the orthogonal matrix Z (see Further Details). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N,M,P). */
+/* For optimum performance LWORK >= max(N,M,P)*max(NB1,NB2,NB3), */
+/* where NB1 is the optimal blocksize for the RQ factorization */
+/* of an M-by-N matrix, NB2 is the optimal blocksize for the */
+/* QR factorization of a P-by-N matrix, and NB3 is the optimal */
+/* blocksize for a call of DORMRQ. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INF0= -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - taua * v * v' */
+
+/* where taua is a real scalar, and v is a real vector with */
+/* v(n-k+i+1:n) = 0 and v(n-k+i) = 1; v(1:n-k+i-1) is stored on exit in */
+/* A(m-k+i,1:n-k+i-1), and taua in TAUA(i). */
+/* To form Q explicitly, use LAPACK subroutine DORGRQ. */
+/* To use Q to update another matrix, use LAPACK subroutine DORMRQ. */
+
+/* The matrix Z is represented as a product of elementary reflectors */
+
+/* Z = H(1) H(2) . . . H(k), where k = min(p,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - taub * v * v' */
+
+/* where taub is a real scalar, and v is a real vector with */
+/* v(1:i-1) = 0 and v(i) = 1; v(i+1:p) is stored on exit in B(i+1:p,i), */
+/* and taub in TAUB(i). */
+/* To form Z explicitly, use LAPACK subroutine DORGQR. */
+/* To use Z to update another matrix, use LAPACK subroutine DORMQR. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --taua;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --taub;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb1 = ilaenv_(&c__1, "DGERQF", " ", m, n, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "DGEQRF", " ", p, n, &c_n1, &c_n1);
+ nb3 = ilaenv_(&c__1, "DORMRQ", " ", m, n, p, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2);
+ nb = max(i__1,nb3);
+/* Computing MAX */
+ i__1 = max(*n,*m);
+ lwkopt = max(i__1,*p) * nb;
+ work[1] = (doublereal) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*p < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldb < max(1,*p)) {
+ *info = -8;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*m), i__1 = max(i__1,*p);
+ if (*lwork < max(i__1,*n) && ! lquery) {
+ *info = -11;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGGRQF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* RQ factorization of M-by-N matrix A: A = R*Q */
+
+ dgerqf_(m, n, &a[a_offset], lda, &taua[1], &work[1], lwork, info);
+ lopt = (integer) work[1];
+
+/* Update B := B*Q' */
+
+ i__1 = min(*m,*n);
+/* Computing MAX */
+ i__2 = 1, i__3 = *m - *n + 1;
+ dormrq_("Right", "Transpose", p, n, &i__1, &a[max(i__2, i__3)+ a_dim1],
+ lda, &taua[1], &b[b_offset], ldb, &work[1], lwork, info);
+/* Computing MAX */
+ i__1 = lopt, i__2 = (integer) work[1];
+ lopt = max(i__1,i__2);
+
+/* QR factorization of P-by-N matrix B: B = Z*T */
+
+ dgeqrf_(p, n, &b[b_offset], ldb, &taub[1], &work[1], lwork, info);
+/* Computing MAX */
+ i__1 = lopt, i__2 = (integer) work[1];
+ work[1] = (doublereal) max(i__1,i__2);
+
+ return 0;
+
+/* End of DGGRQF */
+
+} /* dggrqf_ */
diff --git a/contrib/libs/clapack/dggsvd.c b/contrib/libs/clapack/dggsvd.c
new file mode 100644
index 0000000000..b9b567ef2d
--- /dev/null
+++ b/contrib/libs/clapack/dggsvd.c
@@ -0,0 +1,405 @@
+/* dggsvd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dggsvd_(char *jobu, char *jobv, char *jobq, integer *m,
+ integer *n, integer *p, integer *k, integer *l, doublereal *a,
+ integer *lda, doublereal *b, integer *ldb, doublereal *alpha,
+ doublereal *beta, doublereal *u, integer *ldu, doublereal *v, integer
+ *ldv, doublereal *q, integer *ldq, doublereal *work, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, u_dim1,
+ u_offset, v_dim1, v_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal ulp;
+ integer ibnd;
+ doublereal tola;
+ integer isub;
+ doublereal tolb, unfl, temp, smax;
+ extern logical lsame_(char *, char *);
+ doublereal anorm, bnorm;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical wantq, wantu, wantv;
+ extern doublereal dlamch_(char *), dlange_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *);
+ extern /* Subroutine */ int dtgsja_(char *, char *, char *, integer *,
+ integer *, integer *, integer *, integer *, doublereal *, integer
+ *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ integer *);
+ integer ncycle;
+ extern /* Subroutine */ int xerbla_(char *, integer *), dggsvp_(
+ char *, char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *, integer *,
+ doublereal *, doublereal *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGGSVD computes the generalized singular value decomposition (GSVD) */
+/* of an M-by-N real matrix A and P-by-N real matrix B: */
+
+/* U'*A*Q = D1*( 0 R ), V'*B*Q = D2*( 0 R ) */
+
+/* where U, V and Q are orthogonal matrices, and Z' is the transpose */
+/* of Z. Let K+L = the effective numerical rank of the matrix (A',B')', */
+/* then R is a K+L-by-K+L nonsingular upper triangular matrix, D1 and */
+/* D2 are M-by-(K+L) and P-by-(K+L) "diagonal" matrices and of the */
+/* following structures, respectively: */
+
+/* If M-K-L >= 0, */
+
+/* K L */
+/* D1 = K ( I 0 ) */
+/* L ( 0 C ) */
+/* M-K-L ( 0 0 ) */
+
+/* K L */
+/* D2 = L ( 0 S ) */
+/* P-L ( 0 0 ) */
+
+/* N-K-L K L */
+/* ( 0 R ) = K ( 0 R11 R12 ) */
+/* L ( 0 0 R22 ) */
+
+/* where */
+
+/* C = diag( ALPHA(K+1), ... , ALPHA(K+L) ), */
+/* S = diag( BETA(K+1), ... , BETA(K+L) ), */
+/* C**2 + S**2 = I. */
+
+/* R is stored in A(1:K+L,N-K-L+1:N) on exit. */
+
+/* If M-K-L < 0, */
+
+/* K M-K K+L-M */
+/* D1 = K ( I 0 0 ) */
+/* M-K ( 0 C 0 ) */
+
+/* K M-K K+L-M */
+/* D2 = M-K ( 0 S 0 ) */
+/* K+L-M ( 0 0 I ) */
+/* P-L ( 0 0 0 ) */
+
+/* N-K-L K M-K K+L-M */
+/* ( 0 R ) = K ( 0 R11 R12 R13 ) */
+/* M-K ( 0 0 R22 R23 ) */
+/* K+L-M ( 0 0 0 R33 ) */
+
+/* where */
+
+/* C = diag( ALPHA(K+1), ... , ALPHA(M) ), */
+/* S = diag( BETA(K+1), ... , BETA(M) ), */
+/* C**2 + S**2 = I. */
+
+/* (R11 R12 R13 ) is stored in A(1:M, N-K-L+1:N), and R33 is stored */
+/* ( 0 R22 R23 ) */
+/* in B(M-K+1:L,N+M-K-L+1:N) on exit. */
+
+/* The routine computes C, S, R, and optionally the orthogonal */
+/* transformation matrices U, V and Q. */
+
+/* In particular, if B is an N-by-N nonsingular matrix, then the GSVD of */
+/* A and B implicitly gives the SVD of A*inv(B): */
+/* A*inv(B) = U*(D1*inv(D2))*V'. */
+/* If ( A',B')' has orthonormal columns, then the GSVD of A and B is */
+/* also equal to the CS decomposition of A and B. Furthermore, the GSVD */
+/* can be used to derive the solution of the eigenvalue problem: */
+/* A'*A x = lambda* B'*B x. */
+/* In some literature, the GSVD of A and B is presented in the form */
+/* U'*A*X = ( 0 D1 ), V'*B*X = ( 0 D2 ) */
+/* where U and V are orthogonal and X is nonsingular, D1 and D2 are */
+/* ``diagonal''. The former GSVD form can be converted to the latter */
+/* form by taking the nonsingular matrix X as */
+
+/* X = Q*( I 0 ) */
+/* ( 0 inv(R) ). */
+
+/* Arguments */
+/* ========= */
+
+/* JOBU (input) CHARACTER*1 */
+/* = 'U': Orthogonal matrix U is computed; */
+/* = 'N': U is not computed. */
+
+/* JOBV (input) CHARACTER*1 */
+/* = 'V': Orthogonal matrix V is computed; */
+/* = 'N': V is not computed. */
+
+/* JOBQ (input) CHARACTER*1 */
+/* = 'Q': Orthogonal matrix Q is computed; */
+/* = 'N': Q is not computed. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrices A and B. N >= 0. */
+
+/* P (input) INTEGER */
+/* The number of rows of the matrix B. P >= 0. */
+
+/* K (output) INTEGER */
+/* L (output) INTEGER */
+/* On exit, K and L specify the dimension of the subblocks */
+/* described in the Purpose section. */
+/* K + L = effective numerical rank of (A',B')'. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, A contains the triangular matrix R, or part of R. */
+/* See Purpose for details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,N) */
+/* On entry, the P-by-N matrix B. */
+/* On exit, B contains the triangular matrix R if M-K-L < 0. */
+/* See Purpose for details. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,P). */
+
+/* ALPHA (output) DOUBLE PRECISION array, dimension (N) */
+/* BETA (output) DOUBLE PRECISION array, dimension (N) */
+/* On exit, ALPHA and BETA contain the generalized singular */
+/* value pairs of A and B; */
+/* ALPHA(1:K) = 1, */
+/* BETA(1:K) = 0, */
+/* and if M-K-L >= 0, */
+/* ALPHA(K+1:K+L) = C, */
+/* BETA(K+1:K+L) = S, */
+/* or if M-K-L < 0, */
+/* ALPHA(K+1:M)=C, ALPHA(M+1:K+L)=0 */
+/* BETA(K+1:M) =S, BETA(M+1:K+L) =1 */
+/* and */
+/* ALPHA(K+L+1:N) = 0 */
+/* BETA(K+L+1:N) = 0 */
+
+/* U (output) DOUBLE PRECISION array, dimension (LDU,M) */
+/* If JOBU = 'U', U contains the M-by-M orthogonal matrix U. */
+/* If JOBU = 'N', U is not referenced. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= max(1,M) if */
+/* JOBU = 'U'; LDU >= 1 otherwise. */
+
+/* V (output) DOUBLE PRECISION array, dimension (LDV,P) */
+/* If JOBV = 'V', V contains the P-by-P orthogonal matrix V. */
+/* If JOBV = 'N', V is not referenced. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. LDV >= max(1,P) if */
+/* JOBV = 'V'; LDV >= 1 otherwise. */
+
+/* Q (output) DOUBLE PRECISION array, dimension (LDQ,N) */
+/* If JOBQ = 'Q', Q contains the N-by-N orthogonal matrix Q. */
+/* If JOBQ = 'N', Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N) if */
+/* JOBQ = 'Q'; LDQ >= 1 otherwise. */
+
+/* WORK (workspace) DOUBLE PRECISION array, */
+/* dimension (max(3*N,M,P)+N) */
+
+/* IWORK (workspace/output) INTEGER array, dimension (N) */
+/* On exit, IWORK stores the sorting information. More */
+/* precisely, the following loop will sort ALPHA */
+/* for I = K+1, min(M,K+L) */
+/* swap ALPHA(I) and ALPHA(IWORK(I)) */
+/* endfor */
+/* such that ALPHA(1) >= ALPHA(2) >= ... >= ALPHA(N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, the Jacobi-type procedure failed to */
+/* converge. For further details, see subroutine DTGSJA. */
+
+/* Internal Parameters */
+/* =================== */
+
+/* TOLA DOUBLE PRECISION */
+/* TOLB DOUBLE PRECISION */
+/* TOLA and TOLB are the thresholds to determine the effective */
+/* rank of (A',B')'. Generally, they are set to */
+/* TOLA = MAX(M,N)*norm(A)*MAZHEPS, */
+/* TOLB = MAX(P,N)*norm(B)*MAZHEPS. */
+/* The size of TOLA and TOLB may affect the size of backward */
+/* errors of the decomposition. */
+
+/* Further Details */
+/* =============== */
+
+/* 2-96 Based on modifications by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alpha;
+ --beta;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantu = lsame_(jobu, "U");
+ wantv = lsame_(jobv, "V");
+ wantq = lsame_(jobq, "Q");
+
+ *info = 0;
+ if (! (wantu || lsame_(jobu, "N"))) {
+ *info = -1;
+ } else if (! (wantv || lsame_(jobv, "N"))) {
+ *info = -2;
+ } else if (! (wantq || lsame_(jobq, "N"))) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*p < 0) {
+ *info = -6;
+ } else if (*lda < max(1,*m)) {
+ *info = -10;
+ } else if (*ldb < max(1,*p)) {
+ *info = -12;
+ } else if (*ldu < 1 || wantu && *ldu < *m) {
+ *info = -16;
+ } else if (*ldv < 1 || wantv && *ldv < *p) {
+ *info = -18;
+ } else if (*ldq < 1 || wantq && *ldq < *n) {
+ *info = -20;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGGSVD", &i__1);
+ return 0;
+ }
+
+/* Compute the Frobenius norm of matrices A and B */
+
+ anorm = dlange_("1", m, n, &a[a_offset], lda, &work[1]);
+ bnorm = dlange_("1", p, n, &b[b_offset], ldb, &work[1]);
+
+/* Get machine precision and set up threshold for determining */
+/* the effective numerical rank of the matrices A and B. */
+
+ ulp = dlamch_("Precision");
+ unfl = dlamch_("Safe Minimum");
+ tola = max(*m,*n) * max(anorm,unfl) * ulp;
+ tolb = max(*p,*n) * max(bnorm,unfl) * ulp;
+
+/* Preprocessing */
+
+ dggsvp_(jobu, jobv, jobq, m, p, n, &a[a_offset], lda, &b[b_offset], ldb, &
+ tola, &tolb, k, l, &u[u_offset], ldu, &v[v_offset], ldv, &q[
+ q_offset], ldq, &iwork[1], &work[1], &work[*n + 1], info);
+
+/* Compute the GSVD of two upper "triangular" matrices */
+
+ dtgsja_(jobu, jobv, jobq, m, p, n, k, l, &a[a_offset], lda, &b[b_offset],
+ ldb, &tola, &tolb, &alpha[1], &beta[1], &u[u_offset], ldu, &v[
+ v_offset], ldv, &q[q_offset], ldq, &work[1], &ncycle, info);
+
+/* Sort the singular values and store the pivot indices in IWORK */
+/* Copy ALPHA to WORK, then sort ALPHA in WORK */
+
+ dcopy_(n, &alpha[1], &c__1, &work[1], &c__1);
+/* Computing MIN */
+ i__1 = *l, i__2 = *m - *k;
+ ibnd = min(i__1,i__2);
+ i__1 = ibnd;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Scan for largest ALPHA(K+I) */
+
+ isub = i__;
+ smax = work[*k + i__];
+ i__2 = ibnd;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ temp = work[*k + j];
+ if (temp > smax) {
+ isub = j;
+ smax = temp;
+ }
+/* L10: */
+ }
+ if (isub != i__) {
+ work[*k + isub] = work[*k + i__];
+ work[*k + i__] = smax;
+ iwork[*k + i__] = *k + isub;
+ } else {
+ iwork[*k + i__] = *k + i__;
+ }
+/* L20: */
+ }
+
+ return 0;
+
+/* End of DGGSVD */
+
+} /* dggsvd_ */
diff --git a/contrib/libs/clapack/dggsvp.c b/contrib/libs/clapack/dggsvp.c
new file mode 100644
index 0000000000..7cf51c29c2
--- /dev/null
+++ b/contrib/libs/clapack/dggsvp.c
@@ -0,0 +1,512 @@
+/* dggsvp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b12 = 0.;
+static doublereal c_b22 = 1.;
+
+/* Subroutine */ int dggsvp_(char *jobu, char *jobv, char *jobq, integer *m,
+ integer *p, integer *n, doublereal *a, integer *lda, doublereal *b,
+ integer *ldb, doublereal *tola, doublereal *tolb, integer *k, integer
+ *l, doublereal *u, integer *ldu, doublereal *v, integer *ldv,
+ doublereal *q, integer *ldq, integer *iwork, doublereal *tau,
+ doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, u_dim1,
+ u_offset, v_dim1, v_offset, i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__, j;
+ extern logical lsame_(char *, char *);
+ logical wantq, wantu, wantv;
+ extern /* Subroutine */ int dgeqr2_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *), dgerq2_(
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *), dorg2r_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *),
+ dorm2r_(char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), dormr2_(char *, char *,
+ integer *, integer *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *), dgeqpf_(integer *, integer *, doublereal *,
+ integer *, integer *, doublereal *, doublereal *, integer *),
+ dlacpy_(char *, integer *, integer *, doublereal *, integer *,
+ doublereal *, integer *), dlaset_(char *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *), dlapmt_(logical *,
+ integer *, integer *, doublereal *, integer *, integer *);
+ logical forwrd;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGGSVP computes orthogonal matrices U, V and Q such that */
+
+/* N-K-L K L */
+/* U'*A*Q = K ( 0 A12 A13 ) if M-K-L >= 0; */
+/* L ( 0 0 A23 ) */
+/* M-K-L ( 0 0 0 ) */
+
+/* N-K-L K L */
+/* = K ( 0 A12 A13 ) if M-K-L < 0; */
+/* M-K ( 0 0 A23 ) */
+
+/* N-K-L K L */
+/* V'*B*Q = L ( 0 0 B13 ) */
+/* P-L ( 0 0 0 ) */
+
+/* where the K-by-K matrix A12 and L-by-L matrix B13 are nonsingular */
+/* upper triangular; A23 is L-by-L upper triangular if M-K-L >= 0, */
+/* otherwise A23 is (M-K)-by-L upper trapezoidal. K+L = the effective */
+/* numerical rank of the (M+P)-by-N matrix (A',B')'. Z' denotes the */
+/* transpose of Z. */
+
+/* This decomposition is the preprocessing step for computing the */
+/* Generalized Singular Value Decomposition (GSVD), see subroutine */
+/* DGGSVD. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBU (input) CHARACTER*1 */
+/* = 'U': Orthogonal matrix U is computed; */
+/* = 'N': U is not computed. */
+
+/* JOBV (input) CHARACTER*1 */
+/* = 'V': Orthogonal matrix V is computed; */
+/* = 'N': V is not computed. */
+
+/* JOBQ (input) CHARACTER*1 */
+/* = 'Q': Orthogonal matrix Q is computed; */
+/* = 'N': Q is not computed. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* P (input) INTEGER */
+/* The number of rows of the matrix B. P >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrices A and B. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, A contains the triangular (or trapezoidal) matrix */
+/* described in the Purpose section. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,N) */
+/* On entry, the P-by-N matrix B. */
+/* On exit, B contains the triangular matrix described in */
+/* the Purpose section. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,P). */
+
+/* TOLA (input) DOUBLE PRECISION */
+/* TOLB (input) DOUBLE PRECISION */
+/* TOLA and TOLB are the thresholds to determine the effective */
+/* numerical rank of matrix B and a subblock of A. Generally, */
+/* they are set to */
+/* TOLA = MAX(M,N)*norm(A)*MAZHEPS, */
+/* TOLB = MAX(P,N)*norm(B)*MAZHEPS. */
+/* The size of TOLA and TOLB may affect the size of backward */
+/* errors of the decomposition. */
+
+/* K (output) INTEGER */
+/* L (output) INTEGER */
+/* On exit, K and L specify the dimension of the subblocks */
+/* described in Purpose. */
+/* K + L = effective numerical rank of (A',B')'. */
+
+/* U (output) DOUBLE PRECISION array, dimension (LDU,M) */
+/* If JOBU = 'U', U contains the orthogonal matrix U. */
+/* If JOBU = 'N', U is not referenced. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= max(1,M) if */
+/* JOBU = 'U'; LDU >= 1 otherwise. */
+
+/* V (output) DOUBLE PRECISION array, dimension (LDV,P) */
+/* If JOBV = 'V', V contains the orthogonal matrix V. */
+/* If JOBV = 'N', V is not referenced. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. LDV >= max(1,P) if */
+/* JOBV = 'V'; LDV >= 1 otherwise. */
+
+/* Q (output) DOUBLE PRECISION array, dimension (LDQ,N) */
+/* If JOBQ = 'Q', Q contains the orthogonal matrix Q. */
+/* If JOBQ = 'N', Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N) if */
+/* JOBQ = 'Q'; LDQ >= 1 otherwise. */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* TAU (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (max(3*N,M,P)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+
+/* Further Details */
+/* =============== */
+
+/* The subroutine uses LAPACK subroutine DGEQPF for the QR factorization */
+/* with column pivoting to detect the effective numerical rank of the */
+/* a matrix. It may be replaced by a better rank determination strategy. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --iwork;
+ --tau;
+ --work;
+
+ /* Function Body */
+ wantu = lsame_(jobu, "U");
+ wantv = lsame_(jobv, "V");
+ wantq = lsame_(jobq, "Q");
+ forwrd = TRUE_;
+
+ *info = 0;
+ if (! (wantu || lsame_(jobu, "N"))) {
+ *info = -1;
+ } else if (! (wantv || lsame_(jobv, "N"))) {
+ *info = -2;
+ } else if (! (wantq || lsame_(jobq, "N"))) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*p < 0) {
+ *info = -5;
+ } else if (*n < 0) {
+ *info = -6;
+ } else if (*lda < max(1,*m)) {
+ *info = -8;
+ } else if (*ldb < max(1,*p)) {
+ *info = -10;
+ } else if (*ldu < 1 || wantu && *ldu < *m) {
+ *info = -16;
+ } else if (*ldv < 1 || wantv && *ldv < *p) {
+ *info = -18;
+ } else if (*ldq < 1 || wantq && *ldq < *n) {
+ *info = -20;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGGSVP", &i__1);
+ return 0;
+ }
+
+/* QR with column pivoting of B: B*P = V*( S11 S12 ) */
+/* ( 0 0 ) */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ iwork[i__] = 0;
+/* L10: */
+ }
+ dgeqpf_(p, n, &b[b_offset], ldb, &iwork[1], &tau[1], &work[1], info);
+
+/* Update A := A*P */
+
+ dlapmt_(&forwrd, m, n, &a[a_offset], lda, &iwork[1]);
+
+/* Determine the effective rank of matrix B. */
+
+ *l = 0;
+ i__1 = min(*p,*n);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = b[i__ + i__ * b_dim1], abs(d__1)) > *tolb) {
+ ++(*l);
+ }
+/* L20: */
+ }
+
+ if (wantv) {
+
+/* Copy the details of V, and form V. */
+
+ dlaset_("Full", p, p, &c_b12, &c_b12, &v[v_offset], ldv);
+ if (*p > 1) {
+ i__1 = *p - 1;
+ dlacpy_("Lower", &i__1, n, &b[b_dim1 + 2], ldb, &v[v_dim1 + 2],
+ ldv);
+ }
+ i__1 = min(*p,*n);
+ dorg2r_(p, p, &i__1, &v[v_offset], ldv, &tau[1], &work[1], info);
+ }
+
+/* Clean up B */
+
+ i__1 = *l - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *l;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+ if (*p > *l) {
+ i__1 = *p - *l;
+ dlaset_("Full", &i__1, n, &c_b12, &c_b12, &b[*l + 1 + b_dim1], ldb);
+ }
+
+ if (wantq) {
+
+/* Set Q = I and Update Q := Q*P */
+
+ dlaset_("Full", n, n, &c_b12, &c_b22, &q[q_offset], ldq);
+ dlapmt_(&forwrd, n, n, &q[q_offset], ldq, &iwork[1]);
+ }
+
+ if (*p >= *l && *n != *l) {
+
+/* RQ factorization of (S11 S12): ( S11 S12 ) = ( 0 S12 )*Z */
+
+ dgerq2_(l, n, &b[b_offset], ldb, &tau[1], &work[1], info);
+
+/* Update A := A*Z' */
+
+ dormr2_("Right", "Transpose", m, n, l, &b[b_offset], ldb, &tau[1], &a[
+ a_offset], lda, &work[1], info);
+
+ if (wantq) {
+
+/* Update Q := Q*Z' */
+
+ dormr2_("Right", "Transpose", n, n, l, &b[b_offset], ldb, &tau[1],
+ &q[q_offset], ldq, &work[1], info);
+ }
+
+/* Clean up B */
+
+ i__1 = *n - *l;
+ dlaset_("Full", l, &i__1, &c_b12, &c_b12, &b[b_offset], ldb);
+ i__1 = *n;
+ for (j = *n - *l + 1; j <= i__1; ++j) {
+ i__2 = *l;
+ for (i__ = j - *n + *l + 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = 0.;
+/* L50: */
+ }
+/* L60: */
+ }
+
+ }
+
+/* Let N-L L */
+/* A = ( A11 A12 ) M, */
+
+/* then the following does the complete QR decomposition of A11: */
+
+/* A11 = U*( 0 T12 )*P1' */
+/* ( 0 0 ) */
+
+ i__1 = *n - *l;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ iwork[i__] = 0;
+/* L70: */
+ }
+ i__1 = *n - *l;
+ dgeqpf_(m, &i__1, &a[a_offset], lda, &iwork[1], &tau[1], &work[1], info);
+
+/* Determine the effective rank of A11 */
+
+ *k = 0;
+/* Computing MIN */
+ i__2 = *m, i__3 = *n - *l;
+ i__1 = min(i__2,i__3);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = a[i__ + i__ * a_dim1], abs(d__1)) > *tola) {
+ ++(*k);
+ }
+/* L80: */
+ }
+
+/* Update A12 := U'*A12, where A12 = A( 1:M, N-L+1:N ) */
+
+/* Computing MIN */
+ i__2 = *m, i__3 = *n - *l;
+ i__1 = min(i__2,i__3);
+ dorm2r_("Left", "Transpose", m, l, &i__1, &a[a_offset], lda, &tau[1], &a[(
+ *n - *l + 1) * a_dim1 + 1], lda, &work[1], info);
+
+ if (wantu) {
+
+/* Copy the details of U, and form U */
+
+ dlaset_("Full", m, m, &c_b12, &c_b12, &u[u_offset], ldu);
+ if (*m > 1) {
+ i__1 = *m - 1;
+ i__2 = *n - *l;
+ dlacpy_("Lower", &i__1, &i__2, &a[a_dim1 + 2], lda, &u[u_dim1 + 2]
+, ldu);
+ }
+/* Computing MIN */
+ i__2 = *m, i__3 = *n - *l;
+ i__1 = min(i__2,i__3);
+ dorg2r_(m, m, &i__1, &u[u_offset], ldu, &tau[1], &work[1], info);
+ }
+
+ if (wantq) {
+
+/* Update Q( 1:N, 1:N-L ) = Q( 1:N, 1:N-L )*P1 */
+
+ i__1 = *n - *l;
+ dlapmt_(&forwrd, n, &i__1, &q[q_offset], ldq, &iwork[1]);
+ }
+
+/* Clean up A: set the strictly lower triangular part of */
+/* A(1:K, 1:K) = 0, and A( K+1:M, 1:N-L ) = 0. */
+
+ i__1 = *k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.;
+/* L90: */
+ }
+/* L100: */
+ }
+ if (*m > *k) {
+ i__1 = *m - *k;
+ i__2 = *n - *l;
+ dlaset_("Full", &i__1, &i__2, &c_b12, &c_b12, &a[*k + 1 + a_dim1],
+ lda);
+ }
+
+ if (*n - *l > *k) {
+
+/* RQ factorization of ( T11 T12 ) = ( 0 T12 )*Z1 */
+
+ i__1 = *n - *l;
+ dgerq2_(k, &i__1, &a[a_offset], lda, &tau[1], &work[1], info);
+
+ if (wantq) {
+
+/* Update Q( 1:N,1:N-L ) = Q( 1:N,1:N-L )*Z1' */
+
+ i__1 = *n - *l;
+ dormr2_("Right", "Transpose", n, &i__1, k, &a[a_offset], lda, &
+ tau[1], &q[q_offset], ldq, &work[1], info);
+ }
+
+/* Clean up A */
+
+ i__1 = *n - *l - *k;
+ dlaset_("Full", k, &i__1, &c_b12, &c_b12, &a[a_offset], lda);
+ i__1 = *n - *l;
+ for (j = *n - *l - *k + 1; j <= i__1; ++j) {
+ i__2 = *k;
+ for (i__ = j - *n + *l + *k + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.;
+/* L110: */
+ }
+/* L120: */
+ }
+
+ }
+
+ if (*m > *k) {
+
+/* QR factorization of A( K+1:M,N-L+1:N ) */
+
+ i__1 = *m - *k;
+ dgeqr2_(&i__1, l, &a[*k + 1 + (*n - *l + 1) * a_dim1], lda, &tau[1], &
+ work[1], info);
+
+ if (wantu) {
+
+/* Update U(:,K+1:M) := U(:,K+1:M)*U1 */
+
+ i__1 = *m - *k;
+/* Computing MIN */
+ i__3 = *m - *k;
+ i__2 = min(i__3,*l);
+ dorm2r_("Right", "No transpose", m, &i__1, &i__2, &a[*k + 1 + (*n
+ - *l + 1) * a_dim1], lda, &tau[1], &u[(*k + 1) * u_dim1 +
+ 1], ldu, &work[1], info);
+ }
+
+/* Clean up */
+
+ i__1 = *n;
+ for (j = *n - *l + 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j - *n + *k + *l + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.;
+/* L130: */
+ }
+/* L140: */
+ }
+
+ }
+
+ return 0;
+
+/* End of DGGSVP */
+
+} /* dggsvp_ */
diff --git a/contrib/libs/clapack/dgsvj0.c b/contrib/libs/clapack/dgsvj0.c
new file mode 100644
index 0000000000..c10304ba54
--- /dev/null
+++ b/contrib/libs/clapack/dgsvj0.c
@@ -0,0 +1,1159 @@
+/* dgsvj0.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static doublereal c_b42 = 1.;
+
+/* Subroutine */ int dgsvj0_(char *jobv, integer *m, integer *n, doublereal *
+ a, integer *lda, doublereal *d__, doublereal *sva, integer *mv,
+ doublereal *v, integer *ldv, doublereal *eps, doublereal *sfmin,
+ doublereal *tol, integer *nsweep, doublereal *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, v_dim1, v_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ doublereal bigtheta;
+ integer pskipped, i__, p, q;
+ doublereal t, rootsfmin, cs, sn;
+ integer ir1, jbc;
+ doublereal big;
+ integer kbl, igl, ibr, jgl, nbl, mvl;
+ doublereal aapp, aapq, aaqq;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ integer ierr;
+ doublereal aapp0;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ doublereal temp1, apoaq, aqoap;
+ extern logical lsame_(char *, char *);
+ doublereal theta, small;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ doublereal fastr[5];
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical applv, rsvec;
+ extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *), drotm_(integer *, doublereal
+ *, integer *, doublereal *, integer *, doublereal *);
+ logical rotok;
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ integer ijblsk, swband, blskip;
+ doublereal mxaapq;
+ extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *);
+ doublereal thsign, mxsinj;
+ integer emptsw, notrot, iswrot, lkahead;
+ doublereal rootbig, rooteps;
+ integer rowskip;
+ doublereal roottol;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Zlatko Drmac of the University of Zagreb and -- */
+/* -- Kresimir Veselic of the Fernuniversitaet Hagen -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* This routine is also part of SIGMA (version 1.23, October 23. 2008.) */
+/* SIGMA is a library of algorithms for highly accurate algorithms for */
+/* computation of SVD, PSVD, QSVD, (H,K)-SVD, and for solution of the */
+/* eigenvalue problems Hx = lambda M x, H M x = lambda x with H, M > 0. */
+
+/* Scalar Arguments */
+
+
+/* Array Arguments */
+
+/* .. */
+
+/* Purpose */
+/* ~~~~~~~ */
+/* DGSVJ0 is called from DGESVJ as a pre-processor and that is its main */
+/* purpose. It applies Jacobi rotations in the same way as DGESVJ does, but */
+/* it does not check convergence (stopping criterion). Few tuning */
+/* parameters (marked by [TP]) are available for the implementer. */
+
+/* Further details */
+/* ~~~~~~~~~~~~~~~ */
+/* DGSVJ0 is used just to enable SGESVJ to call a simplified version of */
+/* itself to work on a submatrix of the original matrix. */
+
+/* Contributors */
+/* ~~~~~~~~~~~~ */
+/* Zlatko Drmac (Zagreb, Croatia) and Kresimir Veselic (Hagen, Germany) */
+
+/* Bugs, Examples and Comments */
+/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
+/* Please report all bugs and send interesting test examples and comments to */
+/* drmac@math.hr. Thank you. */
+
+/* Arguments */
+/* ~~~~~~~~~ */
+
+/* JOBV (input) CHARACTER*1 */
+/* Specifies whether the output from this procedure is used */
+/* to compute the matrix V: */
+/* = 'V': the product of the Jacobi rotations is accumulated */
+/* by postmulyiplying the N-by-N array V. */
+/* (See the description of V.) */
+/* = 'A': the product of the Jacobi rotations is accumulated */
+/* by postmulyiplying the MV-by-N array V. */
+/* (See the descriptions of MV and V.) */
+/* = 'N': the Jacobi rotations are not accumulated. */
+
+/* M (input) INTEGER */
+/* The number of rows of the input matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the input matrix A. */
+/* M >= N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, M-by-N matrix A, such that A*diag(D) represents */
+/* the input matrix. */
+/* On exit, */
+/* A_onexit * D_onexit represents the input matrix A*diag(D) */
+/* post-multiplied by a sequence of Jacobi rotations, where the */
+/* rotation threshold and the total number of sweeps are given in */
+/* TOL and NSWEEP, respectively. */
+/* (See the descriptions of D, TOL and NSWEEP.) */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* D (input/workspace/output) REAL array, dimension (N) */
+/* The array D accumulates the scaling factors from the fast scaled */
+/* Jacobi rotations. */
+/* On entry, A*diag(D) represents the input matrix. */
+/* On exit, A_onexit*diag(D_onexit) represents the input matrix */
+/* post-multiplied by a sequence of Jacobi rotations, where the */
+/* rotation threshold and the total number of sweeps are given in */
+/* TOL and NSWEEP, respectively. */
+/* (See the descriptions of A, TOL and NSWEEP.) */
+
+/* SVA (input/workspace/output) REAL array, dimension (N) */
+/* On entry, SVA contains the Euclidean norms of the columns of */
+/* the matrix A*diag(D). */
+/* On exit, SVA contains the Euclidean norms of the columns of */
+/* the matrix onexit*diag(D_onexit). */
+
+/* MV (input) INTEGER */
+/* If JOBV .EQ. 'A', then MV rows of V are post-multipled by a */
+/* sequence of Jacobi rotations. */
+/* If JOBV = 'N', then MV is not referenced. */
+
+/* V (input/output) REAL array, dimension (LDV,N) */
+/* If JOBV .EQ. 'V' then N rows of V are post-multipled by a */
+/* sequence of Jacobi rotations. */
+/* If JOBV .EQ. 'A' then MV rows of V are post-multipled by a */
+/* sequence of Jacobi rotations. */
+/* If JOBV = 'N', then V is not referenced. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V, LDV >= 1. */
+/* If JOBV = 'V', LDV .GE. N. */
+/* If JOBV = 'A', LDV .GE. MV. */
+
+/* EPS (input) INTEGER */
+/* EPS = SLAMCH('Epsilon') */
+
+/* SFMIN (input) INTEGER */
+/* SFMIN = SLAMCH('Safe Minimum') */
+
+/* TOL (input) REAL */
+/* TOL is the threshold for Jacobi rotations. For a pair */
+/* A(:,p), A(:,q) of pivot columns, the Jacobi rotation is */
+/* applied only if DABS(COS(angle(A(:,p),A(:,q)))) .GT. TOL. */
+
+/* NSWEEP (input) INTEGER */
+/* NSWEEP is the number of sweeps of Jacobi rotations to be */
+/* performed. */
+
+/* WORK (workspace) REAL array, dimension LWORK. */
+
+/* LWORK (input) INTEGER */
+/* LWORK is the dimension of WORK. LWORK .GE. M. */
+
+/* INFO (output) INTEGER */
+/* = 0 : successful exit. */
+/* < 0 : if INFO = -i, then the i-th argument had an illegal value */
+
+/* Local Parameters */
+/* Local Scalars */
+/* Local Arrays */
+
+
+/* Intrinsic Functions */
+
+
+/* External Functions */
+
+
+/* External Subroutines */
+
+
+/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~| */
+
+ /* Parameter adjustments */
+ --sva;
+ --d__;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ --work;
+
+ /* Function Body */
+ applv = lsame_(jobv, "A");
+ rsvec = lsame_(jobv, "V");
+ if (! (rsvec || applv || lsame_(jobv, "N"))) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0 || *n > *m) {
+ *info = -3;
+ } else if (*lda < *m) {
+ *info = -5;
+ } else if (*mv < 0) {
+ *info = -8;
+ } else if (*ldv < *m) {
+ *info = -10;
+ } else if (*tol <= *eps) {
+ *info = -13;
+ } else if (*nsweep < 0) {
+ *info = -14;
+ } else if (*lwork < *m) {
+ *info = -16;
+ } else {
+ *info = 0;
+ }
+
+/* #:( */
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGSVJ0", &i__1);
+ return 0;
+ }
+
+ if (rsvec) {
+ mvl = *n;
+ } else if (applv) {
+ mvl = *mv;
+ }
+ rsvec = rsvec || applv;
+ rooteps = sqrt(*eps);
+ rootsfmin = sqrt(*sfmin);
+ small = *sfmin / *eps;
+ big = 1. / *sfmin;
+ rootbig = 1. / rootsfmin;
+ bigtheta = 1. / rooteps;
+ roottol = sqrt(*tol);
+
+
+/* -#- Row-cyclic Jacobi SVD algorithm with column pivoting -#- */
+
+ emptsw = *n * (*n - 1) / 2;
+ notrot = 0;
+ fastr[0] = 0.;
+
+/* -#- Row-cyclic pivot strategy with de Rijk's pivoting -#- */
+
+ swband = 0;
+/* [TP] SWBAND is a tuning parameter. It is meaningful and effective */
+/* if SGESVJ is used as a computational routine in the preconditioned */
+/* Jacobi SVD algorithm SGESVJ. For sweeps i=1:SWBAND the procedure */
+/* ...... */
+ kbl = min(8,*n);
+/* [TP] KBL is a tuning parameter that defines the tile size in the */
+/* tiling of the p-q loops of pivot pairs. In general, an optimal */
+/* value of KBL depends on the matrix dimensions and on the */
+/* parameters of the computer's memory. */
+
+ nbl = *n / kbl;
+ if (nbl * kbl != *n) {
+ ++nbl;
+ }
+/* Computing 2nd power */
+ i__1 = kbl;
+ blskip = i__1 * i__1 + 1;
+/* [TP] BLKSKIP is a tuning parameter that depends on SWBAND and KBL. */
+ rowskip = min(5,kbl);
+/* [TP] ROWSKIP is a tuning parameter. */
+ lkahead = 1;
+/* [TP] LKAHEAD is a tuning parameter. */
+ swband = 0;
+ pskipped = 0;
+
+ i__1 = *nsweep;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* .. go go go ... */
+
+ mxaapq = 0.;
+ mxsinj = 0.;
+ iswrot = 0;
+
+ notrot = 0;
+ pskipped = 0;
+
+ i__2 = nbl;
+ for (ibr = 1; ibr <= i__2; ++ibr) {
+ igl = (ibr - 1) * kbl + 1;
+
+/* Computing MIN */
+ i__4 = lkahead, i__5 = nbl - ibr;
+ i__3 = min(i__4,i__5);
+ for (ir1 = 0; ir1 <= i__3; ++ir1) {
+
+ igl += ir1 * kbl;
+
+/* Computing MIN */
+ i__5 = igl + kbl - 1, i__6 = *n - 1;
+ i__4 = min(i__5,i__6);
+ for (p = igl; p <= i__4; ++p) {
+/* .. de Rijk's pivoting */
+ i__5 = *n - p + 1;
+ q = idamax_(&i__5, &sva[p], &c__1) + p - 1;
+ if (p != q) {
+ dswap_(m, &a[p * a_dim1 + 1], &c__1, &a[q * a_dim1 +
+ 1], &c__1);
+ if (rsvec) {
+ dswap_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[q *
+ v_dim1 + 1], &c__1);
+ }
+ temp1 = sva[p];
+ sva[p] = sva[q];
+ sva[q] = temp1;
+ temp1 = d__[p];
+ d__[p] = d__[q];
+ d__[q] = temp1;
+ }
+
+ if (ir1 == 0) {
+
+/* Column norms are periodically updated by explicit */
+/* norm computation. */
+/* Caveat: */
+/* Some BLAS implementations compute DNRM2(M,A(1,p),1) */
+/* as DSQRT(DDOT(M,A(1,p),1,A(1,p),1)), which may result in */
+/* overflow for ||A(:,p)||_2 > DSQRT(overflow_threshold), and */
+/* undeflow for ||A(:,p)||_2 < DSQRT(underflow_threshold). */
+/* Hence, DNRM2 cannot be trusted, not even in the case when */
+/* the true norm is far from the under(over)flow boundaries. */
+/* If properly implemented DNRM2 is available, the IF-THEN-ELSE */
+/* below should read "AAPP = DNRM2( M, A(1,p), 1 ) * D(p)". */
+
+ if (sva[p] < rootbig && sva[p] > rootsfmin) {
+ sva[p] = dnrm2_(m, &a[p * a_dim1 + 1], &c__1) *
+ d__[p];
+ } else {
+ temp1 = 0.;
+ aapp = 0.;
+ dlassq_(m, &a[p * a_dim1 + 1], &c__1, &temp1, &
+ aapp);
+ sva[p] = temp1 * sqrt(aapp) * d__[p];
+ }
+ aapp = sva[p];
+ } else {
+ aapp = sva[p];
+ }
+
+ if (aapp > 0.) {
+
+ pskipped = 0;
+
+/* Computing MIN */
+ i__6 = igl + kbl - 1;
+ i__5 = min(i__6,*n);
+ for (q = p + 1; q <= i__5; ++q) {
+
+ aaqq = sva[q];
+ if (aaqq > 0.) {
+
+ aapp0 = aapp;
+ if (aaqq >= 1.) {
+ rotok = small * aapp <= aaqq;
+ if (aapp < big / aaqq) {
+ aapq = ddot_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1], &
+ c__1) * d__[p] * d__[q] /
+ aaqq / aapp;
+ } else {
+ dcopy_(m, &a[p * a_dim1 + 1], &c__1, &
+ work[1], &c__1);
+ dlascl_("G", &c__0, &c__0, &aapp, &
+ d__[p], m, &c__1, &work[1],
+ lda, &ierr);
+ aapq = ddot_(m, &work[1], &c__1, &a[q
+ * a_dim1 + 1], &c__1) * d__[q]
+ / aaqq;
+ }
+ } else {
+ rotok = aapp <= aaqq / small;
+ if (aapp > small / aaqq) {
+ aapq = ddot_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1], &
+ c__1) * d__[p] * d__[q] /
+ aaqq / aapp;
+ } else {
+ dcopy_(m, &a[q * a_dim1 + 1], &c__1, &
+ work[1], &c__1);
+ dlascl_("G", &c__0, &c__0, &aaqq, &
+ d__[q], m, &c__1, &work[1],
+ lda, &ierr);
+ aapq = ddot_(m, &work[1], &c__1, &a[p
+ * a_dim1 + 1], &c__1) * d__[p]
+ / aapp;
+ }
+ }
+
+/* Computing MAX */
+ d__1 = mxaapq, d__2 = abs(aapq);
+ mxaapq = max(d__1,d__2);
+
+/* TO rotate or NOT to rotate, THAT is the question ... */
+
+ if (abs(aapq) > *tol) {
+
+/* .. rotate */
+/* ROTATED = ROTATED + ONE */
+
+ if (ir1 == 0) {
+ notrot = 0;
+ pskipped = 0;
+ ++iswrot;
+ }
+
+ if (rotok) {
+
+ aqoap = aaqq / aapp;
+ apoaq = aapp / aaqq;
+ theta = (d__1 = aqoap - apoaq, abs(
+ d__1)) * -.5 / aapq;
+
+ if (abs(theta) > bigtheta) {
+
+ t = .5 / theta;
+ fastr[2] = t * d__[p] / d__[q];
+ fastr[3] = -t * d__[q] / d__[p];
+ drotm_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1],
+ &c__1, fastr);
+ if (rsvec) {
+ drotm_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[q *
+ v_dim1 + 1], &c__1, fastr);
+ }
+/* Computing MAX */
+ d__1 = 0., d__2 = t * apoaq *
+ aapq + 1.;
+ sva[q] = aaqq * sqrt((max(d__1,
+ d__2)));
+ aapp *= sqrt(1. - t * aqoap *
+ aapq);
+/* Computing MAX */
+ d__1 = mxsinj, d__2 = abs(t);
+ mxsinj = max(d__1,d__2);
+
+ } else {
+
+/* .. choose correct signum for THETA and rotate */
+
+ thsign = -d_sign(&c_b42, &aapq);
+ t = 1. / (theta + thsign * sqrt(
+ theta * theta + 1.));
+ cs = sqrt(1. / (t * t + 1.));
+ sn = t * cs;
+
+/* Computing MAX */
+ d__1 = mxsinj, d__2 = abs(sn);
+ mxsinj = max(d__1,d__2);
+/* Computing MAX */
+ d__1 = 0., d__2 = t * apoaq *
+ aapq + 1.;
+ sva[q] = aaqq * sqrt((max(d__1,
+ d__2)));
+/* Computing MAX */
+ d__1 = 0., d__2 = 1. - t * aqoap *
+ aapq;
+ aapp *= sqrt((max(d__1,d__2)));
+
+ apoaq = d__[p] / d__[q];
+ aqoap = d__[q] / d__[p];
+ if (d__[p] >= 1.) {
+ if (d__[q] >= 1.) {
+ fastr[2] = t * apoaq;
+ fastr[3] = -t * aqoap;
+ d__[p] *= cs;
+ d__[q] *= cs;
+ drotm_(m, &a[p * a_dim1 + 1], &c__1, &a[q *
+ a_dim1 + 1], &c__1, fastr);
+ if (rsvec) {
+ drotm_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[
+ q * v_dim1 + 1], &c__1, fastr);
+ }
+ } else {
+ d__1 = -t * aqoap;
+ daxpy_(m, &d__1, &a[q * a_dim1 + 1], &c__1, &a[
+ p * a_dim1 + 1], &c__1);
+ d__1 = cs * sn * apoaq;
+ daxpy_(m, &d__1, &a[p * a_dim1 + 1], &c__1, &a[
+ q * a_dim1 + 1], &c__1);
+ d__[p] *= cs;
+ d__[q] /= cs;
+ if (rsvec) {
+ d__1 = -t * aqoap;
+ daxpy_(&mvl, &d__1, &v[q * v_dim1 + 1], &
+ c__1, &v[p * v_dim1 + 1], &c__1);
+ d__1 = cs * sn * apoaq;
+ daxpy_(&mvl, &d__1, &v[p * v_dim1 + 1], &
+ c__1, &v[q * v_dim1 + 1], &c__1);
+ }
+ }
+ } else {
+ if (d__[q] >= 1.) {
+ d__1 = t * apoaq;
+ daxpy_(m, &d__1, &a[p * a_dim1 + 1], &c__1, &a[
+ q * a_dim1 + 1], &c__1);
+ d__1 = -cs * sn * aqoap;
+ daxpy_(m, &d__1, &a[q * a_dim1 + 1], &c__1, &a[
+ p * a_dim1 + 1], &c__1);
+ d__[p] /= cs;
+ d__[q] *= cs;
+ if (rsvec) {
+ d__1 = t * apoaq;
+ daxpy_(&mvl, &d__1, &v[p * v_dim1 + 1], &
+ c__1, &v[q * v_dim1 + 1], &c__1);
+ d__1 = -cs * sn * aqoap;
+ daxpy_(&mvl, &d__1, &v[q * v_dim1 + 1], &
+ c__1, &v[p * v_dim1 + 1], &c__1);
+ }
+ } else {
+ if (d__[p] >= d__[q]) {
+ d__1 = -t * aqoap;
+ daxpy_(m, &d__1, &a[q * a_dim1 + 1], &c__1,
+ &a[p * a_dim1 + 1], &c__1);
+ d__1 = cs * sn * apoaq;
+ daxpy_(m, &d__1, &a[p * a_dim1 + 1], &c__1,
+ &a[q * a_dim1 + 1], &c__1);
+ d__[p] *= cs;
+ d__[q] /= cs;
+ if (rsvec) {
+ d__1 = -t * aqoap;
+ daxpy_(&mvl, &d__1, &v[q * v_dim1 + 1],
+ &c__1, &v[p * v_dim1 + 1], &
+ c__1);
+ d__1 = cs * sn * apoaq;
+ daxpy_(&mvl, &d__1, &v[p * v_dim1 + 1],
+ &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ }
+ } else {
+ d__1 = t * apoaq;
+ daxpy_(m, &d__1, &a[p * a_dim1 + 1], &c__1,
+ &a[q * a_dim1 + 1], &c__1);
+ d__1 = -cs * sn * aqoap;
+ daxpy_(m, &d__1, &a[q * a_dim1 + 1], &c__1,
+ &a[p * a_dim1 + 1], &c__1);
+ d__[p] /= cs;
+ d__[q] *= cs;
+ if (rsvec) {
+ d__1 = t * apoaq;
+ daxpy_(&mvl, &d__1, &v[p * v_dim1 + 1],
+ &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ d__1 = -cs * sn * aqoap;
+ daxpy_(&mvl, &d__1, &v[q * v_dim1 + 1],
+ &c__1, &v[p * v_dim1 + 1], &
+ c__1);
+ }
+ }
+ }
+ }
+ }
+
+ } else {
+/* .. have to use modified Gram-Schmidt like transformation */
+ dcopy_(m, &a[p * a_dim1 + 1], &c__1, &
+ work[1], &c__1);
+ dlascl_("G", &c__0, &c__0, &aapp, &
+ c_b42, m, &c__1, &work[1],
+ lda, &ierr);
+ dlascl_("G", &c__0, &c__0, &aaqq, &
+ c_b42, m, &c__1, &a[q *
+ a_dim1 + 1], lda, &ierr);
+ temp1 = -aapq * d__[p] / d__[q];
+ daxpy_(m, &temp1, &work[1], &c__1, &a[
+ q * a_dim1 + 1], &c__1);
+ dlascl_("G", &c__0, &c__0, &c_b42, &
+ aaqq, m, &c__1, &a[q * a_dim1
+ + 1], lda, &ierr);
+/* Computing MAX */
+ d__1 = 0., d__2 = 1. - aapq * aapq;
+ sva[q] = aaqq * sqrt((max(d__1,d__2)))
+ ;
+ mxsinj = max(mxsinj,*sfmin);
+ }
+/* END IF ROTOK THEN ... ELSE */
+
+/* In the case of cancellation in updating SVA(q), SVA(p) */
+/* recompute SVA(q), SVA(p). */
+/* Computing 2nd power */
+ d__1 = sva[q] / aaqq;
+ if (d__1 * d__1 <= rooteps) {
+ if (aaqq < rootbig && aaqq >
+ rootsfmin) {
+ sva[q] = dnrm2_(m, &a[q * a_dim1
+ + 1], &c__1) * d__[q];
+ } else {
+ t = 0.;
+ aaqq = 0.;
+ dlassq_(m, &a[q * a_dim1 + 1], &
+ c__1, &t, &aaqq);
+ sva[q] = t * sqrt(aaqq) * d__[q];
+ }
+ }
+ if (aapp / aapp0 <= rooteps) {
+ if (aapp < rootbig && aapp >
+ rootsfmin) {
+ aapp = dnrm2_(m, &a[p * a_dim1 +
+ 1], &c__1) * d__[p];
+ } else {
+ t = 0.;
+ aapp = 0.;
+ dlassq_(m, &a[p * a_dim1 + 1], &
+ c__1, &t, &aapp);
+ aapp = t * sqrt(aapp) * d__[p];
+ }
+ sva[p] = aapp;
+ }
+
+ } else {
+/* A(:,p) and A(:,q) already numerically orthogonal */
+ if (ir1 == 0) {
+ ++notrot;
+ }
+ ++pskipped;
+ }
+ } else {
+/* A(:,q) is zero column */
+ if (ir1 == 0) {
+ ++notrot;
+ }
+ ++pskipped;
+ }
+
+ if (i__ <= swband && pskipped > rowskip) {
+ if (ir1 == 0) {
+ aapp = -aapp;
+ }
+ notrot = 0;
+ goto L2103;
+ }
+
+/* L2002: */
+ }
+/* END q-LOOP */
+
+L2103:
+/* bailed out of q-loop */
+ sva[p] = aapp;
+ } else {
+ sva[p] = aapp;
+ if (ir1 == 0 && aapp == 0.) {
+/* Computing MIN */
+ i__5 = igl + kbl - 1;
+ notrot = notrot + min(i__5,*n) - p;
+ }
+ }
+
+/* L2001: */
+ }
+/* end of the p-loop */
+/* end of doing the block ( ibr, ibr ) */
+/* L1002: */
+ }
+/* end of ir1-loop */
+
+/* ........................................................ */
+/* ... go to the off diagonal blocks */
+
+ igl = (ibr - 1) * kbl + 1;
+
+ i__3 = nbl;
+ for (jbc = ibr + 1; jbc <= i__3; ++jbc) {
+
+ jgl = (jbc - 1) * kbl + 1;
+
+/* doing the block at ( ibr, jbc ) */
+
+ ijblsk = 0;
+/* Computing MIN */
+ i__5 = igl + kbl - 1;
+ i__4 = min(i__5,*n);
+ for (p = igl; p <= i__4; ++p) {
+
+ aapp = sva[p];
+
+ if (aapp > 0.) {
+
+ pskipped = 0;
+
+/* Computing MIN */
+ i__6 = jgl + kbl - 1;
+ i__5 = min(i__6,*n);
+ for (q = jgl; q <= i__5; ++q) {
+
+ aaqq = sva[q];
+
+ if (aaqq > 0.) {
+ aapp0 = aapp;
+
+/* -#- M x 2 Jacobi SVD -#- */
+
+/* -#- Safe Gram matrix computation -#- */
+
+ if (aaqq >= 1.) {
+ if (aapp >= aaqq) {
+ rotok = small * aapp <= aaqq;
+ } else {
+ rotok = small * aaqq <= aapp;
+ }
+ if (aapp < big / aaqq) {
+ aapq = ddot_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1], &
+ c__1) * d__[p] * d__[q] /
+ aaqq / aapp;
+ } else {
+ dcopy_(m, &a[p * a_dim1 + 1], &c__1, &
+ work[1], &c__1);
+ dlascl_("G", &c__0, &c__0, &aapp, &
+ d__[p], m, &c__1, &work[1],
+ lda, &ierr);
+ aapq = ddot_(m, &work[1], &c__1, &a[q
+ * a_dim1 + 1], &c__1) * d__[q]
+ / aaqq;
+ }
+ } else {
+ if (aapp >= aaqq) {
+ rotok = aapp <= aaqq / small;
+ } else {
+ rotok = aaqq <= aapp / small;
+ }
+ if (aapp > small / aaqq) {
+ aapq = ddot_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1], &
+ c__1) * d__[p] * d__[q] /
+ aaqq / aapp;
+ } else {
+ dcopy_(m, &a[q * a_dim1 + 1], &c__1, &
+ work[1], &c__1);
+ dlascl_("G", &c__0, &c__0, &aaqq, &
+ d__[q], m, &c__1, &work[1],
+ lda, &ierr);
+ aapq = ddot_(m, &work[1], &c__1, &a[p
+ * a_dim1 + 1], &c__1) * d__[p]
+ / aapp;
+ }
+ }
+
+/* Computing MAX */
+ d__1 = mxaapq, d__2 = abs(aapq);
+ mxaapq = max(d__1,d__2);
+
+/* TO rotate or NOT to rotate, THAT is the question ... */
+
+ if (abs(aapq) > *tol) {
+ notrot = 0;
+/* ROTATED = ROTATED + 1 */
+ pskipped = 0;
+ ++iswrot;
+
+ if (rotok) {
+
+ aqoap = aaqq / aapp;
+ apoaq = aapp / aaqq;
+ theta = (d__1 = aqoap - apoaq, abs(
+ d__1)) * -.5 / aapq;
+ if (aaqq > aapp0) {
+ theta = -theta;
+ }
+
+ if (abs(theta) > bigtheta) {
+ t = .5 / theta;
+ fastr[2] = t * d__[p] / d__[q];
+ fastr[3] = -t * d__[q] / d__[p];
+ drotm_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1],
+ &c__1, fastr);
+ if (rsvec) {
+ drotm_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[q *
+ v_dim1 + 1], &c__1, fastr);
+ }
+/* Computing MAX */
+ d__1 = 0., d__2 = t * apoaq *
+ aapq + 1.;
+ sva[q] = aaqq * sqrt((max(d__1,
+ d__2)));
+/* Computing MAX */
+ d__1 = 0., d__2 = 1. - t * aqoap *
+ aapq;
+ aapp *= sqrt((max(d__1,d__2)));
+/* Computing MAX */
+ d__1 = mxsinj, d__2 = abs(t);
+ mxsinj = max(d__1,d__2);
+ } else {
+
+/* .. choose correct signum for THETA and rotate */
+
+ thsign = -d_sign(&c_b42, &aapq);
+ if (aaqq > aapp0) {
+ thsign = -thsign;
+ }
+ t = 1. / (theta + thsign * sqrt(
+ theta * theta + 1.));
+ cs = sqrt(1. / (t * t + 1.));
+ sn = t * cs;
+/* Computing MAX */
+ d__1 = mxsinj, d__2 = abs(sn);
+ mxsinj = max(d__1,d__2);
+/* Computing MAX */
+ d__1 = 0., d__2 = t * apoaq *
+ aapq + 1.;
+ sva[q] = aaqq * sqrt((max(d__1,
+ d__2)));
+ aapp *= sqrt(1. - t * aqoap *
+ aapq);
+
+ apoaq = d__[p] / d__[q];
+ aqoap = d__[q] / d__[p];
+ if (d__[p] >= 1.) {
+
+ if (d__[q] >= 1.) {
+ fastr[2] = t * apoaq;
+ fastr[3] = -t * aqoap;
+ d__[p] *= cs;
+ d__[q] *= cs;
+ drotm_(m, &a[p * a_dim1 + 1], &c__1, &a[q *
+ a_dim1 + 1], &c__1, fastr);
+ if (rsvec) {
+ drotm_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[
+ q * v_dim1 + 1], &c__1, fastr);
+ }
+ } else {
+ d__1 = -t * aqoap;
+ daxpy_(m, &d__1, &a[q * a_dim1 + 1], &c__1, &a[
+ p * a_dim1 + 1], &c__1);
+ d__1 = cs * sn * apoaq;
+ daxpy_(m, &d__1, &a[p * a_dim1 + 1], &c__1, &a[
+ q * a_dim1 + 1], &c__1);
+ if (rsvec) {
+ d__1 = -t * aqoap;
+ daxpy_(&mvl, &d__1, &v[q * v_dim1 + 1], &
+ c__1, &v[p * v_dim1 + 1], &c__1);
+ d__1 = cs * sn * apoaq;
+ daxpy_(&mvl, &d__1, &v[p * v_dim1 + 1], &
+ c__1, &v[q * v_dim1 + 1], &c__1);
+ }
+ d__[p] *= cs;
+ d__[q] /= cs;
+ }
+ } else {
+ if (d__[q] >= 1.) {
+ d__1 = t * apoaq;
+ daxpy_(m, &d__1, &a[p * a_dim1 + 1], &c__1, &a[
+ q * a_dim1 + 1], &c__1);
+ d__1 = -cs * sn * aqoap;
+ daxpy_(m, &d__1, &a[q * a_dim1 + 1], &c__1, &a[
+ p * a_dim1 + 1], &c__1);
+ if (rsvec) {
+ d__1 = t * apoaq;
+ daxpy_(&mvl, &d__1, &v[p * v_dim1 + 1], &
+ c__1, &v[q * v_dim1 + 1], &c__1);
+ d__1 = -cs * sn * aqoap;
+ daxpy_(&mvl, &d__1, &v[q * v_dim1 + 1], &
+ c__1, &v[p * v_dim1 + 1], &c__1);
+ }
+ d__[p] /= cs;
+ d__[q] *= cs;
+ } else {
+ if (d__[p] >= d__[q]) {
+ d__1 = -t * aqoap;
+ daxpy_(m, &d__1, &a[q * a_dim1 + 1], &c__1,
+ &a[p * a_dim1 + 1], &c__1);
+ d__1 = cs * sn * apoaq;
+ daxpy_(m, &d__1, &a[p * a_dim1 + 1], &c__1,
+ &a[q * a_dim1 + 1], &c__1);
+ d__[p] *= cs;
+ d__[q] /= cs;
+ if (rsvec) {
+ d__1 = -t * aqoap;
+ daxpy_(&mvl, &d__1, &v[q * v_dim1 + 1],
+ &c__1, &v[p * v_dim1 + 1], &
+ c__1);
+ d__1 = cs * sn * apoaq;
+ daxpy_(&mvl, &d__1, &v[p * v_dim1 + 1],
+ &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ }
+ } else {
+ d__1 = t * apoaq;
+ daxpy_(m, &d__1, &a[p * a_dim1 + 1], &c__1,
+ &a[q * a_dim1 + 1], &c__1);
+ d__1 = -cs * sn * aqoap;
+ daxpy_(m, &d__1, &a[q * a_dim1 + 1], &c__1,
+ &a[p * a_dim1 + 1], &c__1);
+ d__[p] /= cs;
+ d__[q] *= cs;
+ if (rsvec) {
+ d__1 = t * apoaq;
+ daxpy_(&mvl, &d__1, &v[p * v_dim1 + 1],
+ &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ d__1 = -cs * sn * aqoap;
+ daxpy_(&mvl, &d__1, &v[q * v_dim1 + 1],
+ &c__1, &v[p * v_dim1 + 1], &
+ c__1);
+ }
+ }
+ }
+ }
+ }
+
+ } else {
+ if (aapp > aaqq) {
+ dcopy_(m, &a[p * a_dim1 + 1], &
+ c__1, &work[1], &c__1);
+ dlascl_("G", &c__0, &c__0, &aapp,
+ &c_b42, m, &c__1, &work[1]
+, lda, &ierr);
+ dlascl_("G", &c__0, &c__0, &aaqq,
+ &c_b42, m, &c__1, &a[q *
+ a_dim1 + 1], lda, &ierr);
+ temp1 = -aapq * d__[p] / d__[q];
+ daxpy_(m, &temp1, &work[1], &c__1,
+ &a[q * a_dim1 + 1], &
+ c__1);
+ dlascl_("G", &c__0, &c__0, &c_b42,
+ &aaqq, m, &c__1, &a[q *
+ a_dim1 + 1], lda, &ierr);
+/* Computing MAX */
+ d__1 = 0., d__2 = 1. - aapq *
+ aapq;
+ sva[q] = aaqq * sqrt((max(d__1,
+ d__2)));
+ mxsinj = max(mxsinj,*sfmin);
+ } else {
+ dcopy_(m, &a[q * a_dim1 + 1], &
+ c__1, &work[1], &c__1);
+ dlascl_("G", &c__0, &c__0, &aaqq,
+ &c_b42, m, &c__1, &work[1]
+, lda, &ierr);
+ dlascl_("G", &c__0, &c__0, &aapp,
+ &c_b42, m, &c__1, &a[p *
+ a_dim1 + 1], lda, &ierr);
+ temp1 = -aapq * d__[q] / d__[p];
+ daxpy_(m, &temp1, &work[1], &c__1,
+ &a[p * a_dim1 + 1], &
+ c__1);
+ dlascl_("G", &c__0, &c__0, &c_b42,
+ &aapp, m, &c__1, &a[p *
+ a_dim1 + 1], lda, &ierr);
+/* Computing MAX */
+ d__1 = 0., d__2 = 1. - aapq *
+ aapq;
+ sva[p] = aapp * sqrt((max(d__1,
+ d__2)));
+ mxsinj = max(mxsinj,*sfmin);
+ }
+ }
+/* END IF ROTOK THEN ... ELSE */
+
+/* In the case of cancellation in updating SVA(q) */
+/* .. recompute SVA(q) */
+/* Computing 2nd power */
+ d__1 = sva[q] / aaqq;
+ if (d__1 * d__1 <= rooteps) {
+ if (aaqq < rootbig && aaqq >
+ rootsfmin) {
+ sva[q] = dnrm2_(m, &a[q * a_dim1
+ + 1], &c__1) * d__[q];
+ } else {
+ t = 0.;
+ aaqq = 0.;
+ dlassq_(m, &a[q * a_dim1 + 1], &
+ c__1, &t, &aaqq);
+ sva[q] = t * sqrt(aaqq) * d__[q];
+ }
+ }
+/* Computing 2nd power */
+ d__1 = aapp / aapp0;
+ if (d__1 * d__1 <= rooteps) {
+ if (aapp < rootbig && aapp >
+ rootsfmin) {
+ aapp = dnrm2_(m, &a[p * a_dim1 +
+ 1], &c__1) * d__[p];
+ } else {
+ t = 0.;
+ aapp = 0.;
+ dlassq_(m, &a[p * a_dim1 + 1], &
+ c__1, &t, &aapp);
+ aapp = t * sqrt(aapp) * d__[p];
+ }
+ sva[p] = aapp;
+ }
+/* end of OK rotation */
+ } else {
+ ++notrot;
+ ++pskipped;
+ ++ijblsk;
+ }
+ } else {
+ ++notrot;
+ ++pskipped;
+ ++ijblsk;
+ }
+
+ if (i__ <= swband && ijblsk >= blskip) {
+ sva[p] = aapp;
+ notrot = 0;
+ goto L2011;
+ }
+ if (i__ <= swband && pskipped > rowskip) {
+ aapp = -aapp;
+ notrot = 0;
+ goto L2203;
+ }
+
+/* L2200: */
+ }
+/* end of the q-loop */
+L2203:
+
+ sva[p] = aapp;
+
+ } else {
+ if (aapp == 0.) {
+/* Computing MIN */
+ i__5 = jgl + kbl - 1;
+ notrot = notrot + min(i__5,*n) - jgl + 1;
+ }
+ if (aapp < 0.) {
+ notrot = 0;
+ }
+ }
+/* L2100: */
+ }
+/* end of the p-loop */
+/* L2010: */
+ }
+/* end of the jbc-loop */
+L2011:
+/* 2011 bailed out of the jbc-loop */
+/* Computing MIN */
+ i__4 = igl + kbl - 1;
+ i__3 = min(i__4,*n);
+ for (p = igl; p <= i__3; ++p) {
+ sva[p] = (d__1 = sva[p], abs(d__1));
+/* L2012: */
+ }
+
+/* L2000: */
+ }
+/* 2000 :: end of the ibr-loop */
+
+/* .. update SVA(N) */
+ if (sva[*n] < rootbig && sva[*n] > rootsfmin) {
+ sva[*n] = dnrm2_(m, &a[*n * a_dim1 + 1], &c__1) * d__[*n];
+ } else {
+ t = 0.;
+ aapp = 0.;
+ dlassq_(m, &a[*n * a_dim1 + 1], &c__1, &t, &aapp);
+ sva[*n] = t * sqrt(aapp) * d__[*n];
+ }
+
+/* Additional steering devices */
+
+ if (i__ < swband && (mxaapq <= roottol || iswrot <= *n)) {
+ swband = i__;
+ }
+
+ if (i__ > swband + 1 && mxaapq < (doublereal) (*n) * *tol && (
+ doublereal) (*n) * mxaapq * mxsinj < *tol) {
+ goto L1994;
+ }
+
+ if (notrot >= emptsw) {
+ goto L1994;
+ }
+/* L1993: */
+ }
+/* end i=1:NSWEEP loop */
+/* #:) Reaching this point means that the procedure has comleted the given */
+/* number of iterations. */
+ *info = *nsweep - 1;
+ goto L1995;
+L1994:
+/* #:) Reaching this point means that during the i-th sweep all pivots were */
+/* below the given tolerance, causing early exit. */
+
+ *info = 0;
+/* #:) INFO = 0 confirms successful iterations. */
+L1995:
+
+/* Sort the vector D. */
+ i__1 = *n - 1;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = *n - p + 1;
+ q = idamax_(&i__2, &sva[p], &c__1) + p - 1;
+ if (p != q) {
+ temp1 = sva[p];
+ sva[p] = sva[q];
+ sva[q] = temp1;
+ temp1 = d__[p];
+ d__[p] = d__[q];
+ d__[q] = temp1;
+ dswap_(m, &a[p * a_dim1 + 1], &c__1, &a[q * a_dim1 + 1], &c__1);
+ if (rsvec) {
+ dswap_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ }
+ }
+/* L5991: */
+ }
+
+ return 0;
+/* .. */
+/* .. END OF DGSVJ0 */
+/* .. */
+} /* dgsvj0_ */
diff --git a/contrib/libs/clapack/dgsvj1.c b/contrib/libs/clapack/dgsvj1.c
new file mode 100644
index 0000000000..eb6a967482
--- /dev/null
+++ b/contrib/libs/clapack/dgsvj1.c
@@ -0,0 +1,798 @@
+/* dgsvj1.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static doublereal c_b35 = 1.;
+
+/* Subroutine */ int dgsvj1_(char *jobv, integer *m, integer *n, integer *n1,
+ doublereal *a, integer *lda, doublereal *d__, doublereal *sva,
+ integer *mv, doublereal *v, integer *ldv, doublereal *eps, doublereal
+ *sfmin, doublereal *tol, integer *nsweep, doublereal *work, integer *
+ lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, v_dim1, v_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ doublereal bigtheta;
+ integer pskipped, i__, p, q;
+ doublereal t, rootsfmin, cs, sn;
+ integer jbc;
+ doublereal big;
+ integer kbl, igl, ibr, jgl, mvl, nblc;
+ doublereal aapp, aapq, aaqq;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ integer nblr, ierr;
+ doublereal aapp0;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ doublereal temp1, large, apoaq, aqoap;
+ extern logical lsame_(char *, char *);
+ doublereal theta, small;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ doublereal fastr[5];
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical applv, rsvec;
+ extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *), drotm_(integer *, doublereal
+ *, integer *, doublereal *, integer *, doublereal *);
+ logical rotok;
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ integer ijblsk, swband, blskip;
+ doublereal mxaapq;
+ extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *);
+ doublereal thsign, mxsinj;
+ integer emptsw, notrot, iswrot;
+ doublereal rootbig, rooteps;
+ integer rowskip;
+ doublereal roottol;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Zlatko Drmac of the University of Zagreb and -- */
+/* -- Kresimir Veselic of the Fernuniversitaet Hagen -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* This routine is also part of SIGMA (version 1.23, October 23. 2008.) */
+/* SIGMA is a library of algorithms for highly accurate algorithms for */
+/* computation of SVD, PSVD, QSVD, (H,K)-SVD, and for solution of the */
+/* eigenvalue problems Hx = lambda M x, H M x = lambda x with H, M > 0. */
+
+/* -#- Scalar Arguments -#- */
+
+
+/* -#- Array Arguments -#- */
+
+/* .. */
+
+/* Purpose */
+/* ~~~~~~~ */
+/* DGSVJ1 is called from SGESVJ as a pre-processor and that is its main */
+/* purpose. It applies Jacobi rotations in the same way as SGESVJ does, but */
+/* it targets only particular pivots and it does not check convergence */
+/* (stopping criterion). Few tunning parameters (marked by [TP]) are */
+/* available for the implementer. */
+
+/* Further details */
+/* ~~~~~~~~~~~~~~~ */
+/* DGSVJ1 applies few sweeps of Jacobi rotations in the column space of */
+/* the input M-by-N matrix A. The pivot pairs are taken from the (1,2) */
+/* off-diagonal block in the corresponding N-by-N Gram matrix A^T * A. The */
+/* block-entries (tiles) of the (1,2) off-diagonal block are marked by the */
+/* [x]'s in the following scheme: */
+
+/* | * * * [x] [x] [x]| */
+/* | * * * [x] [x] [x]| Row-cycling in the nblr-by-nblc [x] blocks. */
+/* | * * * [x] [x] [x]| Row-cyclic pivoting inside each [x] block. */
+/* |[x] [x] [x] * * * | */
+/* |[x] [x] [x] * * * | */
+/* |[x] [x] [x] * * * | */
+
+/* In terms of the columns of A, the first N1 columns are rotated 'against' */
+/* the remaining N-N1 columns, trying to increase the angle between the */
+/* corresponding subspaces. The off-diagonal block is N1-by(N-N1) and it is */
+/* tiled using quadratic tiles of side KBL. Here, KBL is a tunning parmeter. */
+/* The number of sweeps is given in NSWEEP and the orthogonality threshold */
+/* is given in TOL. */
+
+/* Contributors */
+/* ~~~~~~~~~~~~ */
+/* Zlatko Drmac (Zagreb, Croatia) and Kresimir Veselic (Hagen, Germany) */
+
+/* Arguments */
+/* ~~~~~~~~~ */
+
+/* JOBV (input) CHARACTER*1 */
+/* Specifies whether the output from this procedure is used */
+/* to compute the matrix V: */
+/* = 'V': the product of the Jacobi rotations is accumulated */
+/* by postmulyiplying the N-by-N array V. */
+/* (See the description of V.) */
+/* = 'A': the product of the Jacobi rotations is accumulated */
+/* by postmulyiplying the MV-by-N array V. */
+/* (See the descriptions of MV and V.) */
+/* = 'N': the Jacobi rotations are not accumulated. */
+
+/* M (input) INTEGER */
+/* The number of rows of the input matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the input matrix A. */
+/* M >= N >= 0. */
+
+/* N1 (input) INTEGER */
+/* N1 specifies the 2 x 2 block partition, the first N1 columns are */
+/* rotated 'against' the remaining N-N1 columns of A. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, M-by-N matrix A, such that A*diag(D) represents */
+/* the input matrix. */
+/* On exit, */
+/* A_onexit * D_onexit represents the input matrix A*diag(D) */
+/* post-multiplied by a sequence of Jacobi rotations, where the */
+/* rotation threshold and the total number of sweeps are given in */
+/* TOL and NSWEEP, respectively. */
+/* (See the descriptions of N1, D, TOL and NSWEEP.) */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* D (input/workspace/output) REAL array, dimension (N) */
+/* The array D accumulates the scaling factors from the fast scaled */
+/* Jacobi rotations. */
+/* On entry, A*diag(D) represents the input matrix. */
+/* On exit, A_onexit*diag(D_onexit) represents the input matrix */
+/* post-multiplied by a sequence of Jacobi rotations, where the */
+/* rotation threshold and the total number of sweeps are given in */
+/* TOL and NSWEEP, respectively. */
+/* (See the descriptions of N1, A, TOL and NSWEEP.) */
+
+/* SVA (input/workspace/output) REAL array, dimension (N) */
+/* On entry, SVA contains the Euclidean norms of the columns of */
+/* the matrix A*diag(D). */
+/* On exit, SVA contains the Euclidean norms of the columns of */
+/* the matrix onexit*diag(D_onexit). */
+
+/* MV (input) INTEGER */
+/* If JOBV .EQ. 'A', then MV rows of V are post-multipled by a */
+/* sequence of Jacobi rotations. */
+/* If JOBV = 'N', then MV is not referenced. */
+
+/* V (input/output) REAL array, dimension (LDV,N) */
+/* If JOBV .EQ. 'V' then N rows of V are post-multipled by a */
+/* sequence of Jacobi rotations. */
+/* If JOBV .EQ. 'A' then MV rows of V are post-multipled by a */
+/* sequence of Jacobi rotations. */
+/* If JOBV = 'N', then V is not referenced. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V, LDV >= 1. */
+/* If JOBV = 'V', LDV .GE. N. */
+/* If JOBV = 'A', LDV .GE. MV. */
+
+/* EPS (input) INTEGER */
+/* EPS = SLAMCH('Epsilon') */
+
+/* SFMIN (input) INTEGER */
+/* SFMIN = SLAMCH('Safe Minimum') */
+
+/* TOL (input) REAL */
+/* TOL is the threshold for Jacobi rotations. For a pair */
+/* A(:,p), A(:,q) of pivot columns, the Jacobi rotation is */
+/* applied only if DABS(COS(angle(A(:,p),A(:,q)))) .GT. TOL. */
+
+/* NSWEEP (input) INTEGER */
+/* NSWEEP is the number of sweeps of Jacobi rotations to be */
+/* performed. */
+
+/* WORK (workspace) REAL array, dimension LWORK. */
+
+/* LWORK (input) INTEGER */
+/* LWORK is the dimension of WORK. LWORK .GE. M. */
+
+/* INFO (output) INTEGER */
+/* = 0 : successful exit. */
+/* < 0 : if INFO = -i, then the i-th argument had an illegal value */
+
+/* -#- Local Parameters -#- */
+
+/* -#- Local Scalars -#- */
+
+
+/* Local Arrays */
+
+
+/* Intrinsic Functions */
+
+
+/* External Functions */
+
+
+/* External Subroutines */
+
+
+
+ /* Parameter adjustments */
+ --sva;
+ --d__;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ --work;
+
+ /* Function Body */
+ applv = lsame_(jobv, "A");
+ rsvec = lsame_(jobv, "V");
+ if (! (rsvec || applv || lsame_(jobv, "N"))) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0 || *n > *m) {
+ *info = -3;
+ } else if (*n1 < 0) {
+ *info = -4;
+ } else if (*lda < *m) {
+ *info = -6;
+ } else if (*mv < 0) {
+ *info = -9;
+ } else if (*ldv < *m) {
+ *info = -11;
+ } else if (*tol <= *eps) {
+ *info = -14;
+ } else if (*nsweep < 0) {
+ *info = -15;
+ } else if (*lwork < *m) {
+ *info = -17;
+ } else {
+ *info = 0;
+ }
+
+/* #:( */
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGSVJ1", &i__1);
+ return 0;
+ }
+
+ if (rsvec) {
+ mvl = *n;
+ } else if (applv) {
+ mvl = *mv;
+ }
+ rsvec = rsvec || applv;
+ rooteps = sqrt(*eps);
+ rootsfmin = sqrt(*sfmin);
+ small = *sfmin / *eps;
+ big = 1. / *sfmin;
+ rootbig = 1. / rootsfmin;
+ large = big / sqrt((doublereal) (*m * *n));
+ bigtheta = 1. / rooteps;
+ roottol = sqrt(*tol);
+
+/* -#- Initialize the right singular vector matrix -#- */
+
+/* RSVEC = LSAME( JOBV, 'Y' ) */
+
+ emptsw = *n1 * (*n - *n1);
+ notrot = 0;
+ fastr[0] = 0.;
+
+/* -#- Row-cyclic pivot strategy with de Rijk's pivoting -#- */
+
+ kbl = min(8,*n);
+ nblr = *n1 / kbl;
+ if (nblr * kbl != *n1) {
+ ++nblr;
+ }
+/* .. the tiling is nblr-by-nblc [tiles] */
+ nblc = (*n - *n1) / kbl;
+ if (nblc * kbl != *n - *n1) {
+ ++nblc;
+ }
+/* Computing 2nd power */
+ i__1 = kbl;
+ blskip = i__1 * i__1 + 1;
+/* [TP] BLKSKIP is a tuning parameter that depends on SWBAND and KBL. */
+ rowskip = min(5,kbl);
+/* [TP] ROWSKIP is a tuning parameter. */
+ swband = 0;
+/* [TP] SWBAND is a tuning parameter. It is meaningful and effective */
+/* if SGESVJ is used as a computational routine in the preconditioned */
+/* Jacobi SVD algorithm SGESVJ. */
+
+
+/* | * * * [x] [x] [x]| */
+/* | * * * [x] [x] [x]| Row-cycling in the nblr-by-nblc [x] blocks. */
+/* | * * * [x] [x] [x]| Row-cyclic pivoting inside each [x] block. */
+/* |[x] [x] [x] * * * | */
+/* |[x] [x] [x] * * * | */
+/* |[x] [x] [x] * * * | */
+
+
+ i__1 = *nsweep;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* .. go go go ... */
+
+ mxaapq = 0.;
+ mxsinj = 0.;
+ iswrot = 0;
+
+ notrot = 0;
+ pskipped = 0;
+
+ i__2 = nblr;
+ for (ibr = 1; ibr <= i__2; ++ibr) {
+ igl = (ibr - 1) * kbl + 1;
+
+
+/* ........................................................ */
+/* ... go to the off diagonal blocks */
+ igl = (ibr - 1) * kbl + 1;
+ i__3 = nblc;
+ for (jbc = 1; jbc <= i__3; ++jbc) {
+ jgl = *n1 + (jbc - 1) * kbl + 1;
+/* doing the block at ( ibr, jbc ) */
+ ijblsk = 0;
+/* Computing MIN */
+ i__5 = igl + kbl - 1;
+ i__4 = min(i__5,*n1);
+ for (p = igl; p <= i__4; ++p) {
+ aapp = sva[p];
+ if (aapp > 0.) {
+ pskipped = 0;
+/* Computing MIN */
+ i__6 = jgl + kbl - 1;
+ i__5 = min(i__6,*n);
+ for (q = jgl; q <= i__5; ++q) {
+
+ aaqq = sva[q];
+ if (aaqq > 0.) {
+ aapp0 = aapp;
+
+/* -#- M x 2 Jacobi SVD -#- */
+
+/* -#- Safe Gram matrix computation -#- */
+
+ if (aaqq >= 1.) {
+ if (aapp >= aaqq) {
+ rotok = small * aapp <= aaqq;
+ } else {
+ rotok = small * aaqq <= aapp;
+ }
+ if (aapp < big / aaqq) {
+ aapq = ddot_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1], &
+ c__1) * d__[p] * d__[q] /
+ aaqq / aapp;
+ } else {
+ dcopy_(m, &a[p * a_dim1 + 1], &c__1, &
+ work[1], &c__1);
+ dlascl_("G", &c__0, &c__0, &aapp, &
+ d__[p], m, &c__1, &work[1],
+ lda, &ierr);
+ aapq = ddot_(m, &work[1], &c__1, &a[q
+ * a_dim1 + 1], &c__1) * d__[q]
+ / aaqq;
+ }
+ } else {
+ if (aapp >= aaqq) {
+ rotok = aapp <= aaqq / small;
+ } else {
+ rotok = aaqq <= aapp / small;
+ }
+ if (aapp > small / aaqq) {
+ aapq = ddot_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1], &
+ c__1) * d__[p] * d__[q] /
+ aaqq / aapp;
+ } else {
+ dcopy_(m, &a[q * a_dim1 + 1], &c__1, &
+ work[1], &c__1);
+ dlascl_("G", &c__0, &c__0, &aaqq, &
+ d__[q], m, &c__1, &work[1],
+ lda, &ierr);
+ aapq = ddot_(m, &work[1], &c__1, &a[p
+ * a_dim1 + 1], &c__1) * d__[p]
+ / aapp;
+ }
+ }
+/* Computing MAX */
+ d__1 = mxaapq, d__2 = abs(aapq);
+ mxaapq = max(d__1,d__2);
+/* TO rotate or NOT to rotate, THAT is the question ... */
+
+ if (abs(aapq) > *tol) {
+ notrot = 0;
+/* ROTATED = ROTATED + 1 */
+ pskipped = 0;
+ ++iswrot;
+
+ if (rotok) {
+
+ aqoap = aaqq / aapp;
+ apoaq = aapp / aaqq;
+ theta = (d__1 = aqoap - apoaq, abs(
+ d__1)) * -.5 / aapq;
+ if (aaqq > aapp0) {
+ theta = -theta;
+ }
+ if (abs(theta) > bigtheta) {
+ t = .5 / theta;
+ fastr[2] = t * d__[p] / d__[q];
+ fastr[3] = -t * d__[q] / d__[p];
+ drotm_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1],
+ &c__1, fastr);
+ if (rsvec) {
+ drotm_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[q *
+ v_dim1 + 1], &c__1, fastr);
+ }
+/* Computing MAX */
+ d__1 = 0., d__2 = t * apoaq *
+ aapq + 1.;
+ sva[q] = aaqq * sqrt((max(d__1,
+ d__2)));
+/* Computing MAX */
+ d__1 = 0., d__2 = 1. - t * aqoap *
+ aapq;
+ aapp *= sqrt((max(d__1,d__2)));
+/* Computing MAX */
+ d__1 = mxsinj, d__2 = abs(t);
+ mxsinj = max(d__1,d__2);
+ } else {
+
+/* .. choose correct signum for THETA and rotate */
+
+ thsign = -d_sign(&c_b35, &aapq);
+ if (aaqq > aapp0) {
+ thsign = -thsign;
+ }
+ t = 1. / (theta + thsign * sqrt(
+ theta * theta + 1.));
+ cs = sqrt(1. / (t * t + 1.));
+ sn = t * cs;
+/* Computing MAX */
+ d__1 = mxsinj, d__2 = abs(sn);
+ mxsinj = max(d__1,d__2);
+/* Computing MAX */
+ d__1 = 0., d__2 = t * apoaq *
+ aapq + 1.;
+ sva[q] = aaqq * sqrt((max(d__1,
+ d__2)));
+ aapp *= sqrt(1. - t * aqoap *
+ aapq);
+ apoaq = d__[p] / d__[q];
+ aqoap = d__[q] / d__[p];
+ if (d__[p] >= 1.) {
+
+ if (d__[q] >= 1.) {
+ fastr[2] = t * apoaq;
+ fastr[3] = -t * aqoap;
+ d__[p] *= cs;
+ d__[q] *= cs;
+ drotm_(m, &a[p * a_dim1 + 1], &c__1, &a[q *
+ a_dim1 + 1], &c__1, fastr);
+ if (rsvec) {
+ drotm_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[
+ q * v_dim1 + 1], &c__1, fastr);
+ }
+ } else {
+ d__1 = -t * aqoap;
+ daxpy_(m, &d__1, &a[q * a_dim1 + 1], &c__1, &a[
+ p * a_dim1 + 1], &c__1);
+ d__1 = cs * sn * apoaq;
+ daxpy_(m, &d__1, &a[p * a_dim1 + 1], &c__1, &a[
+ q * a_dim1 + 1], &c__1);
+ if (rsvec) {
+ d__1 = -t * aqoap;
+ daxpy_(&mvl, &d__1, &v[q * v_dim1 + 1], &
+ c__1, &v[p * v_dim1 + 1], &c__1);
+ d__1 = cs * sn * apoaq;
+ daxpy_(&mvl, &d__1, &v[p * v_dim1 + 1], &
+ c__1, &v[q * v_dim1 + 1], &c__1);
+ }
+ d__[p] *= cs;
+ d__[q] /= cs;
+ }
+ } else {
+ if (d__[q] >= 1.) {
+ d__1 = t * apoaq;
+ daxpy_(m, &d__1, &a[p * a_dim1 + 1], &c__1, &a[
+ q * a_dim1 + 1], &c__1);
+ d__1 = -cs * sn * aqoap;
+ daxpy_(m, &d__1, &a[q * a_dim1 + 1], &c__1, &a[
+ p * a_dim1 + 1], &c__1);
+ if (rsvec) {
+ d__1 = t * apoaq;
+ daxpy_(&mvl, &d__1, &v[p * v_dim1 + 1], &
+ c__1, &v[q * v_dim1 + 1], &c__1);
+ d__1 = -cs * sn * aqoap;
+ daxpy_(&mvl, &d__1, &v[q * v_dim1 + 1], &
+ c__1, &v[p * v_dim1 + 1], &c__1);
+ }
+ d__[p] /= cs;
+ d__[q] *= cs;
+ } else {
+ if (d__[p] >= d__[q]) {
+ d__1 = -t * aqoap;
+ daxpy_(m, &d__1, &a[q * a_dim1 + 1], &c__1,
+ &a[p * a_dim1 + 1], &c__1);
+ d__1 = cs * sn * apoaq;
+ daxpy_(m, &d__1, &a[p * a_dim1 + 1], &c__1,
+ &a[q * a_dim1 + 1], &c__1);
+ d__[p] *= cs;
+ d__[q] /= cs;
+ if (rsvec) {
+ d__1 = -t * aqoap;
+ daxpy_(&mvl, &d__1, &v[q * v_dim1 + 1],
+ &c__1, &v[p * v_dim1 + 1], &
+ c__1);
+ d__1 = cs * sn * apoaq;
+ daxpy_(&mvl, &d__1, &v[p * v_dim1 + 1],
+ &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ }
+ } else {
+ d__1 = t * apoaq;
+ daxpy_(m, &d__1, &a[p * a_dim1 + 1], &c__1,
+ &a[q * a_dim1 + 1], &c__1);
+ d__1 = -cs * sn * aqoap;
+ daxpy_(m, &d__1, &a[q * a_dim1 + 1], &c__1,
+ &a[p * a_dim1 + 1], &c__1);
+ d__[p] /= cs;
+ d__[q] *= cs;
+ if (rsvec) {
+ d__1 = t * apoaq;
+ daxpy_(&mvl, &d__1, &v[p * v_dim1 + 1],
+ &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ d__1 = -cs * sn * aqoap;
+ daxpy_(&mvl, &d__1, &v[q * v_dim1 + 1],
+ &c__1, &v[p * v_dim1 + 1], &
+ c__1);
+ }
+ }
+ }
+ }
+ }
+ } else {
+ if (aapp > aaqq) {
+ dcopy_(m, &a[p * a_dim1 + 1], &
+ c__1, &work[1], &c__1);
+ dlascl_("G", &c__0, &c__0, &aapp,
+ &c_b35, m, &c__1, &work[1]
+, lda, &ierr);
+ dlascl_("G", &c__0, &c__0, &aaqq,
+ &c_b35, m, &c__1, &a[q *
+ a_dim1 + 1], lda, &ierr);
+ temp1 = -aapq * d__[p] / d__[q];
+ daxpy_(m, &temp1, &work[1], &c__1,
+ &a[q * a_dim1 + 1], &
+ c__1);
+ dlascl_("G", &c__0, &c__0, &c_b35,
+ &aaqq, m, &c__1, &a[q *
+ a_dim1 + 1], lda, &ierr);
+/* Computing MAX */
+ d__1 = 0., d__2 = 1. - aapq *
+ aapq;
+ sva[q] = aaqq * sqrt((max(d__1,
+ d__2)));
+ mxsinj = max(mxsinj,*sfmin);
+ } else {
+ dcopy_(m, &a[q * a_dim1 + 1], &
+ c__1, &work[1], &c__1);
+ dlascl_("G", &c__0, &c__0, &aaqq,
+ &c_b35, m, &c__1, &work[1]
+, lda, &ierr);
+ dlascl_("G", &c__0, &c__0, &aapp,
+ &c_b35, m, &c__1, &a[p *
+ a_dim1 + 1], lda, &ierr);
+ temp1 = -aapq * d__[q] / d__[p];
+ daxpy_(m, &temp1, &work[1], &c__1,
+ &a[p * a_dim1 + 1], &
+ c__1);
+ dlascl_("G", &c__0, &c__0, &c_b35,
+ &aapp, m, &c__1, &a[p *
+ a_dim1 + 1], lda, &ierr);
+/* Computing MAX */
+ d__1 = 0., d__2 = 1. - aapq *
+ aapq;
+ sva[p] = aapp * sqrt((max(d__1,
+ d__2)));
+ mxsinj = max(mxsinj,*sfmin);
+ }
+ }
+/* END IF ROTOK THEN ... ELSE */
+
+/* In the case of cancellation in updating SVA(q) */
+/* .. recompute SVA(q) */
+/* Computing 2nd power */
+ d__1 = sva[q] / aaqq;
+ if (d__1 * d__1 <= rooteps) {
+ if (aaqq < rootbig && aaqq >
+ rootsfmin) {
+ sva[q] = dnrm2_(m, &a[q * a_dim1
+ + 1], &c__1) * d__[q];
+ } else {
+ t = 0.;
+ aaqq = 0.;
+ dlassq_(m, &a[q * a_dim1 + 1], &
+ c__1, &t, &aaqq);
+ sva[q] = t * sqrt(aaqq) * d__[q];
+ }
+ }
+/* Computing 2nd power */
+ d__1 = aapp / aapp0;
+ if (d__1 * d__1 <= rooteps) {
+ if (aapp < rootbig && aapp >
+ rootsfmin) {
+ aapp = dnrm2_(m, &a[p * a_dim1 +
+ 1], &c__1) * d__[p];
+ } else {
+ t = 0.;
+ aapp = 0.;
+ dlassq_(m, &a[p * a_dim1 + 1], &
+ c__1, &t, &aapp);
+ aapp = t * sqrt(aapp) * d__[p];
+ }
+ sva[p] = aapp;
+ }
+/* end of OK rotation */
+ } else {
+ ++notrot;
+/* SKIPPED = SKIPPED + 1 */
+ ++pskipped;
+ ++ijblsk;
+ }
+ } else {
+ ++notrot;
+ ++pskipped;
+ ++ijblsk;
+ }
+/* IF ( NOTROT .GE. EMPTSW ) GO TO 2011 */
+ if (i__ <= swband && ijblsk >= blskip) {
+ sva[p] = aapp;
+ notrot = 0;
+ goto L2011;
+ }
+ if (i__ <= swband && pskipped > rowskip) {
+ aapp = -aapp;
+ notrot = 0;
+ goto L2203;
+ }
+
+/* L2200: */
+ }
+/* end of the q-loop */
+L2203:
+ sva[p] = aapp;
+
+ } else {
+ if (aapp == 0.) {
+/* Computing MIN */
+ i__5 = jgl + kbl - 1;
+ notrot = notrot + min(i__5,*n) - jgl + 1;
+ }
+ if (aapp < 0.) {
+ notrot = 0;
+ }
+/* ** IF ( NOTROT .GE. EMPTSW ) GO TO 2011 */
+ }
+/* L2100: */
+ }
+/* end of the p-loop */
+/* L2010: */
+ }
+/* end of the jbc-loop */
+L2011:
+/* 2011 bailed out of the jbc-loop */
+/* Computing MIN */
+ i__4 = igl + kbl - 1;
+ i__3 = min(i__4,*n);
+ for (p = igl; p <= i__3; ++p) {
+ sva[p] = (d__1 = sva[p], abs(d__1));
+/* L2012: */
+ }
+/* ** IF ( NOTROT .GE. EMPTSW ) GO TO 1994 */
+/* L2000: */
+ }
+/* 2000 :: end of the ibr-loop */
+
+/* .. update SVA(N) */
+ if (sva[*n] < rootbig && sva[*n] > rootsfmin) {
+ sva[*n] = dnrm2_(m, &a[*n * a_dim1 + 1], &c__1) * d__[*n];
+ } else {
+ t = 0.;
+ aapp = 0.;
+ dlassq_(m, &a[*n * a_dim1 + 1], &c__1, &t, &aapp);
+ sva[*n] = t * sqrt(aapp) * d__[*n];
+ }
+
+/* Additional steering devices */
+
+ if (i__ < swband && (mxaapq <= roottol || iswrot <= *n)) {
+ swband = i__;
+ }
+ if (i__ > swband + 1 && mxaapq < (doublereal) (*n) * *tol && (
+ doublereal) (*n) * mxaapq * mxsinj < *tol) {
+ goto L1994;
+ }
+
+ if (notrot >= emptsw) {
+ goto L1994;
+ }
+/* L1993: */
+ }
+/* end i=1:NSWEEP loop */
+/* #:) Reaching this point means that the procedure has completed the given */
+/* number of sweeps. */
+ *info = *nsweep - 1;
+ goto L1995;
+L1994:
+/* #:) Reaching this point means that during the i-th sweep all pivots were */
+/* below the given threshold, causing early exit. */
+ *info = 0;
+/* #:) INFO = 0 confirms successful iterations. */
+L1995:
+
+/* Sort the vector D */
+
+ i__1 = *n - 1;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = *n - p + 1;
+ q = idamax_(&i__2, &sva[p], &c__1) + p - 1;
+ if (p != q) {
+ temp1 = sva[p];
+ sva[p] = sva[q];
+ sva[q] = temp1;
+ temp1 = d__[p];
+ d__[p] = d__[q];
+ d__[q] = temp1;
+ dswap_(m, &a[p * a_dim1 + 1], &c__1, &a[q * a_dim1 + 1], &c__1);
+ if (rsvec) {
+ dswap_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ }
+ }
+/* L5991: */
+ }
+
+ return 0;
+/* .. */
+/* .. END OF DGSVJ1 */
+/* .. */
+} /* dgsvj1_ */
diff --git a/contrib/libs/clapack/dgtcon.c b/contrib/libs/clapack/dgtcon.c
new file mode 100644
index 0000000000..ef4721d1f6
--- /dev/null
+++ b/contrib/libs/clapack/dgtcon.c
@@ -0,0 +1,209 @@
+/* dgtcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dgtcon_(char *norm, integer *n, doublereal *dl,
+ doublereal *d__, doublereal *du, doublereal *du2, integer *ipiv,
+ doublereal *anorm, doublereal *rcond, doublereal *work, integer *
+ iwork, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ integer i__, kase, kase1;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int dlacn2_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *), xerbla_(char *,
+ integer *);
+ doublereal ainvnm;
+ logical onenrm;
+ extern /* Subroutine */ int dgttrs_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call DLACN2 in place of DLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGTCON estimates the reciprocal of the condition number of a real */
+/* tridiagonal matrix A using the LU factorization as computed by */
+/* DGTTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies whether the 1-norm condition number or the */
+/* infinity-norm condition number is required: */
+/* = '1' or 'O': 1-norm; */
+/* = 'I': Infinity-norm. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* DL (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) multipliers that define the matrix L from the */
+/* LU factorization of A as computed by DGTTRF. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The n diagonal elements of the upper triangular matrix U from */
+/* the LU factorization of A. */
+
+/* DU (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) elements of the first superdiagonal of U. */
+
+/* DU2 (input) DOUBLE PRECISION array, dimension (N-2) */
+/* The (n-2) elements of the second superdiagonal of U. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= n, row i of the matrix was */
+/* interchanged with row IPIV(i). IPIV(i) will always be either */
+/* i or i+1; IPIV(i) = i indicates a row interchange was not */
+/* required. */
+
+/* ANORM (input) DOUBLE PRECISION */
+/* If NORM = '1' or 'O', the 1-norm of the original matrix A. */
+/* If NORM = 'I', the infinity-norm of the original matrix A. */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (2*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments. */
+
+ /* Parameter adjustments */
+ --iwork;
+ --work;
+ --ipiv;
+ --du2;
+ --du;
+ --d__;
+ --dl;
+
+ /* Function Body */
+ *info = 0;
+ onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O");
+ if (! onenrm && ! lsame_(norm, "I")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*anorm < 0.) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGTCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.;
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ } else if (*anorm == 0.) {
+ return 0;
+ }
+
+/* Check that D(1:N) is non-zero. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] == 0.) {
+ return 0;
+ }
+/* L10: */
+ }
+
+ ainvnm = 0.;
+ if (onenrm) {
+ kase1 = 1;
+ } else {
+ kase1 = 2;
+ }
+ kase = 0;
+L20:
+ dlacn2_(n, &work[*n + 1], &work[1], &iwork[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (kase == kase1) {
+
+/* Multiply by inv(U)*inv(L). */
+
+ dgttrs_("No transpose", n, &c__1, &dl[1], &d__[1], &du[1], &du2[1]
+, &ipiv[1], &work[1], n, info);
+ } else {
+
+/* Multiply by inv(L')*inv(U'). */
+
+ dgttrs_("Transpose", n, &c__1, &dl[1], &d__[1], &du[1], &du2[1], &
+ ipiv[1], &work[1], n, info);
+ }
+ goto L20;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / ainvnm / *anorm;
+ }
+
+ return 0;
+
+/* End of DGTCON */
+
+} /* dgtcon_ */
diff --git a/contrib/libs/clapack/dgtrfs.c b/contrib/libs/clapack/dgtrfs.c
new file mode 100644
index 0000000000..fa9a4c632d
--- /dev/null
+++ b/contrib/libs/clapack/dgtrfs.c
@@ -0,0 +1,451 @@
+/* dgtrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b18 = -1.;
+static doublereal c_b19 = 1.;
+
+/* Subroutine */ int dgtrfs_(char *trans, integer *n, integer *nrhs,
+ doublereal *dl, doublereal *d__, doublereal *du, doublereal *dlf,
+ doublereal *df, doublereal *duf, doublereal *du2, integer *ipiv,
+ doublereal *b, integer *ldb, doublereal *x, integer *ldx, doublereal *
+ ferr, doublereal *berr, doublereal *work, integer *iwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal s;
+ integer nz;
+ doublereal eps;
+ integer kase;
+ doublereal safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), daxpy_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *);
+ integer count;
+ extern /* Subroutine */ int dlacn2_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dlagtm_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+ char transn[1];
+ extern /* Subroutine */ int dgttrs_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, integer *);
+ char transt[1];
+ doublereal lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call DLACN2 in place of DLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGTRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is tridiagonal, and provides */
+/* error bounds and backward error estimates for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose = Transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* DL (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of A. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The diagonal elements of A. */
+
+/* DU (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) superdiagonal elements of A. */
+
+/* DLF (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) multipliers that define the matrix L from the */
+/* LU factorization of A as computed by DGTTRF. */
+
+/* DF (input) DOUBLE PRECISION array, dimension (N) */
+/* The n diagonal elements of the upper triangular matrix U from */
+/* the LU factorization of A. */
+
+/* DUF (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) elements of the first superdiagonal of U. */
+
+/* DU2 (input) DOUBLE PRECISION array, dimension (N-2) */
+/* The (n-2) elements of the second superdiagonal of U. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= n, row i of the matrix was */
+/* interchanged with row IPIV(i). IPIV(i) will always be either */
+/* i or i+1; IPIV(i) = i indicates a row interchange was not */
+/* required. */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) DOUBLE PRECISION array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by DGTTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --dl;
+ --d__;
+ --du;
+ --dlf;
+ --df;
+ --duf;
+ --du2;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "T") && ! lsame_(
+ trans, "C")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -13;
+ } else if (*ldx < max(1,*n)) {
+ *info = -15;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGTRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+ if (notran) {
+ *(unsigned char *)transn = 'N';
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transn = 'T';
+ *(unsigned char *)transt = 'N';
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = 4;
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - op(A) * X, */
+/* where op(A) = A, A**T, or A**H, depending on TRANS. */
+
+ dcopy_(n, &b[j * b_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+ dlagtm_(trans, n, &c__1, &c_b18, &dl[1], &d__[1], &du[1], &x[j *
+ x_dim1 + 1], ldx, &c_b19, &work[*n + 1], n);
+
+/* Compute abs(op(A))*abs(x) + abs(b) for use in the backward */
+/* error bound. */
+
+ if (notran) {
+ if (*n == 1) {
+ work[1] = (d__1 = b[j * b_dim1 + 1], abs(d__1)) + (d__2 = d__[
+ 1] * x[j * x_dim1 + 1], abs(d__2));
+ } else {
+ work[1] = (d__1 = b[j * b_dim1 + 1], abs(d__1)) + (d__2 = d__[
+ 1] * x[j * x_dim1 + 1], abs(d__2)) + (d__3 = du[1] *
+ x[j * x_dim1 + 2], abs(d__3));
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ work[i__] = (d__1 = b[i__ + j * b_dim1], abs(d__1)) + (
+ d__2 = dl[i__ - 1] * x[i__ - 1 + j * x_dim1], abs(
+ d__2)) + (d__3 = d__[i__] * x[i__ + j * x_dim1],
+ abs(d__3)) + (d__4 = du[i__] * x[i__ + 1 + j *
+ x_dim1], abs(d__4));
+/* L30: */
+ }
+ work[*n] = (d__1 = b[*n + j * b_dim1], abs(d__1)) + (d__2 =
+ dl[*n - 1] * x[*n - 1 + j * x_dim1], abs(d__2)) + (
+ d__3 = d__[*n] * x[*n + j * x_dim1], abs(d__3));
+ }
+ } else {
+ if (*n == 1) {
+ work[1] = (d__1 = b[j * b_dim1 + 1], abs(d__1)) + (d__2 = d__[
+ 1] * x[j * x_dim1 + 1], abs(d__2));
+ } else {
+ work[1] = (d__1 = b[j * b_dim1 + 1], abs(d__1)) + (d__2 = d__[
+ 1] * x[j * x_dim1 + 1], abs(d__2)) + (d__3 = dl[1] *
+ x[j * x_dim1 + 2], abs(d__3));
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ work[i__] = (d__1 = b[i__ + j * b_dim1], abs(d__1)) + (
+ d__2 = du[i__ - 1] * x[i__ - 1 + j * x_dim1], abs(
+ d__2)) + (d__3 = d__[i__] * x[i__ + j * x_dim1],
+ abs(d__3)) + (d__4 = dl[i__] * x[i__ + 1 + j *
+ x_dim1], abs(d__4));
+/* L40: */
+ }
+ work[*n] = (d__1 = b[*n + j * b_dim1], abs(d__1)) + (d__2 =
+ du[*n - 1] * x[*n - 1 + j * x_dim1], abs(d__2)) + (
+ d__3 = d__[*n] * x[*n + j * x_dim1], abs(d__3));
+ }
+ }
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+/* Computing MAX */
+ d__2 = s, d__3 = (d__1 = work[*n + i__], abs(d__1)) / work[
+ i__];
+ s = max(d__2,d__3);
+ } else {
+/* Computing MAX */
+ d__2 = s, d__3 = ((d__1 = work[*n + i__], abs(d__1)) + safe1)
+ / (work[i__] + safe1);
+ s = max(d__2,d__3);
+ }
+/* L50: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2. <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ dgttrs_(trans, n, &c__1, &dlf[1], &df[1], &duf[1], &du2[1], &ipiv[
+ 1], &work[*n + 1], n, info);
+ daxpy_(n, &c_b19, &work[*n + 1], &c__1, &x[j * x_dim1 + 1], &c__1)
+ ;
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(op(A)))* */
+/* ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(op(A)) is the inverse of op(A) */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(op(A))*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use DLACN2 to estimate the infinity-norm of the matrix */
+/* inv(op(A)) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+ work[i__] = (d__1 = work[*n + i__], abs(d__1)) + nz * eps *
+ work[i__];
+ } else {
+ work[i__] = (d__1 = work[*n + i__], abs(d__1)) + nz * eps *
+ work[i__] + safe1;
+ }
+/* L60: */
+ }
+
+ kase = 0;
+L70:
+ dlacn2_(n, &work[(*n << 1) + 1], &work[*n + 1], &iwork[1], &ferr[j], &
+ kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(op(A)**T). */
+
+ dgttrs_(transt, n, &c__1, &dlf[1], &df[1], &duf[1], &du2[1], &
+ ipiv[1], &work[*n + 1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L80: */
+ }
+ } else {
+
+/* Multiply by inv(op(A))*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L90: */
+ }
+ dgttrs_(transn, n, &c__1, &dlf[1], &df[1], &duf[1], &du2[1], &
+ ipiv[1], &work[*n + 1], n, info);
+ }
+ goto L70;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = lstres, d__3 = (d__1 = x[i__ + j * x_dim1], abs(d__1));
+ lstres = max(d__2,d__3);
+/* L100: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L110: */
+ }
+
+ return 0;
+
+/* End of DGTRFS */
+
+} /* dgtrfs_ */
diff --git a/contrib/libs/clapack/dgtsv.c b/contrib/libs/clapack/dgtsv.c
new file mode 100644
index 0000000000..3d5995a64a
--- /dev/null
+++ b/contrib/libs/clapack/dgtsv.c
@@ -0,0 +1,315 @@
+/* dgtsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dgtsv_(integer *n, integer *nrhs, doublereal *dl,
+ doublereal *d__, doublereal *du, doublereal *b, integer *ldb, integer
+ *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal fact, temp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGTSV solves the equation */
+
+/* A*X = B, */
+
+/* where A is an n by n tridiagonal matrix, by Gaussian elimination with */
+/* partial pivoting. */
+
+/* Note that the equation A'*X = B may be solved by interchanging the */
+/* order of the arguments DU and DL. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* DL (input/output) DOUBLE PRECISION array, dimension (N-1) */
+/* On entry, DL must contain the (n-1) sub-diagonal elements of */
+/* A. */
+
+/* On exit, DL is overwritten by the (n-2) elements of the */
+/* second super-diagonal of the upper triangular matrix U from */
+/* the LU factorization of A, in DL(1), ..., DL(n-2). */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, D must contain the diagonal elements of A. */
+
+/* On exit, D is overwritten by the n diagonal elements of U. */
+
+/* DU (input/output) DOUBLE PRECISION array, dimension (N-1) */
+/* On entry, DU must contain the (n-1) super-diagonal elements */
+/* of A. */
+
+/* On exit, DU is overwritten by the (n-1) elements of the first */
+/* super-diagonal of U. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the N by NRHS matrix of right hand side matrix B. */
+/* On exit, if INFO = 0, the N by NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, U(i,i) is exactly zero, and the solution */
+/* has not been computed. The factorization has not been */
+/* completed unless i = N. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --dl;
+ --d__;
+ --du;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*nrhs < 0) {
+ *info = -2;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGTSV ", &i__1);
+ return 0;
+ }
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*nrhs == 1) {
+ i__1 = *n - 2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = d__[i__], abs(d__1)) >= (d__2 = dl[i__], abs(d__2))) {
+
+/* No row interchange required */
+
+ if (d__[i__] != 0.) {
+ fact = dl[i__] / d__[i__];
+ d__[i__ + 1] -= fact * du[i__];
+ b[i__ + 1 + b_dim1] -= fact * b[i__ + b_dim1];
+ } else {
+ *info = i__;
+ return 0;
+ }
+ dl[i__] = 0.;
+ } else {
+
+/* Interchange rows I and I+1 */
+
+ fact = d__[i__] / dl[i__];
+ d__[i__] = dl[i__];
+ temp = d__[i__ + 1];
+ d__[i__ + 1] = du[i__] - fact * temp;
+ dl[i__] = du[i__ + 1];
+ du[i__ + 1] = -fact * dl[i__];
+ du[i__] = temp;
+ temp = b[i__ + b_dim1];
+ b[i__ + b_dim1] = b[i__ + 1 + b_dim1];
+ b[i__ + 1 + b_dim1] = temp - fact * b[i__ + 1 + b_dim1];
+ }
+/* L10: */
+ }
+ if (*n > 1) {
+ i__ = *n - 1;
+ if ((d__1 = d__[i__], abs(d__1)) >= (d__2 = dl[i__], abs(d__2))) {
+ if (d__[i__] != 0.) {
+ fact = dl[i__] / d__[i__];
+ d__[i__ + 1] -= fact * du[i__];
+ b[i__ + 1 + b_dim1] -= fact * b[i__ + b_dim1];
+ } else {
+ *info = i__;
+ return 0;
+ }
+ } else {
+ fact = d__[i__] / dl[i__];
+ d__[i__] = dl[i__];
+ temp = d__[i__ + 1];
+ d__[i__ + 1] = du[i__] - fact * temp;
+ du[i__] = temp;
+ temp = b[i__ + b_dim1];
+ b[i__ + b_dim1] = b[i__ + 1 + b_dim1];
+ b[i__ + 1 + b_dim1] = temp - fact * b[i__ + 1 + b_dim1];
+ }
+ }
+ if (d__[*n] == 0.) {
+ *info = *n;
+ return 0;
+ }
+ } else {
+ i__1 = *n - 2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = d__[i__], abs(d__1)) >= (d__2 = dl[i__], abs(d__2))) {
+
+/* No row interchange required */
+
+ if (d__[i__] != 0.) {
+ fact = dl[i__] / d__[i__];
+ d__[i__ + 1] -= fact * du[i__];
+ i__2 = *nrhs;
+ for (j = 1; j <= i__2; ++j) {
+ b[i__ + 1 + j * b_dim1] -= fact * b[i__ + j * b_dim1];
+/* L20: */
+ }
+ } else {
+ *info = i__;
+ return 0;
+ }
+ dl[i__] = 0.;
+ } else {
+
+/* Interchange rows I and I+1 */
+
+ fact = d__[i__] / dl[i__];
+ d__[i__] = dl[i__];
+ temp = d__[i__ + 1];
+ d__[i__ + 1] = du[i__] - fact * temp;
+ dl[i__] = du[i__ + 1];
+ du[i__ + 1] = -fact * dl[i__];
+ du[i__] = temp;
+ i__2 = *nrhs;
+ for (j = 1; j <= i__2; ++j) {
+ temp = b[i__ + j * b_dim1];
+ b[i__ + j * b_dim1] = b[i__ + 1 + j * b_dim1];
+ b[i__ + 1 + j * b_dim1] = temp - fact * b[i__ + 1 + j *
+ b_dim1];
+/* L30: */
+ }
+ }
+/* L40: */
+ }
+ if (*n > 1) {
+ i__ = *n - 1;
+ if ((d__1 = d__[i__], abs(d__1)) >= (d__2 = dl[i__], abs(d__2))) {
+ if (d__[i__] != 0.) {
+ fact = dl[i__] / d__[i__];
+ d__[i__ + 1] -= fact * du[i__];
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ b[i__ + 1 + j * b_dim1] -= fact * b[i__ + j * b_dim1];
+/* L50: */
+ }
+ } else {
+ *info = i__;
+ return 0;
+ }
+ } else {
+ fact = d__[i__] / dl[i__];
+ d__[i__] = dl[i__];
+ temp = d__[i__ + 1];
+ d__[i__ + 1] = du[i__] - fact * temp;
+ du[i__] = temp;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ temp = b[i__ + j * b_dim1];
+ b[i__ + j * b_dim1] = b[i__ + 1 + j * b_dim1];
+ b[i__ + 1 + j * b_dim1] = temp - fact * b[i__ + 1 + j *
+ b_dim1];
+/* L60: */
+ }
+ }
+ }
+ if (d__[*n] == 0.) {
+ *info = *n;
+ return 0;
+ }
+ }
+
+/* Back solve with the matrix U from the factorization. */
+
+ if (*nrhs <= 2) {
+ j = 1;
+L70:
+ b[*n + j * b_dim1] /= d__[*n];
+ if (*n > 1) {
+ b[*n - 1 + j * b_dim1] = (b[*n - 1 + j * b_dim1] - du[*n - 1] * b[
+ *n + j * b_dim1]) / d__[*n - 1];
+ }
+ for (i__ = *n - 2; i__ >= 1; --i__) {
+ b[i__ + j * b_dim1] = (b[i__ + j * b_dim1] - du[i__] * b[i__ + 1
+ + j * b_dim1] - dl[i__] * b[i__ + 2 + j * b_dim1]) / d__[
+ i__];
+/* L80: */
+ }
+ if (j < *nrhs) {
+ ++j;
+ goto L70;
+ }
+ } else {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ b[*n + j * b_dim1] /= d__[*n];
+ if (*n > 1) {
+ b[*n - 1 + j * b_dim1] = (b[*n - 1 + j * b_dim1] - du[*n - 1]
+ * b[*n + j * b_dim1]) / d__[*n - 1];
+ }
+ for (i__ = *n - 2; i__ >= 1; --i__) {
+ b[i__ + j * b_dim1] = (b[i__ + j * b_dim1] - du[i__] * b[i__
+ + 1 + j * b_dim1] - dl[i__] * b[i__ + 2 + j * b_dim1])
+ / d__[i__];
+/* L90: */
+ }
+/* L100: */
+ }
+ }
+
+ return 0;
+
+/* End of DGTSV */
+
+} /* dgtsv_ */
diff --git a/contrib/libs/clapack/dgtsvx.c b/contrib/libs/clapack/dgtsvx.c
new file mode 100644
index 0000000000..807c6e8308
--- /dev/null
+++ b/contrib/libs/clapack/dgtsvx.c
@@ -0,0 +1,349 @@
+/* dgtsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dgtsvx_(char *fact, char *trans, integer *n, integer *
+ nrhs, doublereal *dl, doublereal *d__, doublereal *du, doublereal *
+ dlf, doublereal *df, doublereal *duf, doublereal *du2, integer *ipiv,
+ doublereal *b, integer *ldb, doublereal *x, integer *ldx, doublereal *
+ rcond, doublereal *ferr, doublereal *berr, doublereal *work, integer *
+ iwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1;
+
+ /* Local variables */
+ char norm[1];
+ extern logical lsame_(char *, char *);
+ doublereal anorm;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ extern doublereal dlamch_(char *), dlangt_(char *, integer *,
+ doublereal *, doublereal *, doublereal *);
+ logical nofact;
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ xerbla_(char *, integer *), dgtcon_(char *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *), dgtrfs_(char *, integer *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, integer *), dgttrf_(integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *);
+ logical notran;
+ extern /* Subroutine */ int dgttrs_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGTSVX uses the LU factorization to compute the solution to a real */
+/* system of linear equations A * X = B or A**T * X = B, */
+/* where A is a tridiagonal matrix of order N and X and B are N-by-NRHS */
+/* matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'N', the LU decomposition is used to factor the matrix A */
+/* as A = L * U, where L is a product of permutation and unit lower */
+/* bidiagonal matrices and U is upper triangular with nonzeros in */
+/* only the main diagonal and first two superdiagonals. */
+
+/* 2. If some U(i,i)=0, so that U is exactly singular, then the routine */
+/* returns with INFO = i. Otherwise, the factored form of A is used */
+/* to estimate the condition number of the matrix A. If the */
+/* reciprocal of the condition number is less than machine precision, */
+/* INFO = N+1 is returned as a warning, but the routine still goes on */
+/* to solve for X and compute error bounds as described below. */
+
+/* 3. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 4. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of A has been */
+/* supplied on entry. */
+/* = 'F': DLF, DF, DUF, DU2, and IPIV contain the factored */
+/* form of A; DL, D, DU, DLF, DF, DUF, DU2 and IPIV */
+/* will not be modified. */
+/* = 'N': The matrix will be copied to DLF, DF, and DUF */
+/* and factored. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose = Transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* DL (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of A. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The n diagonal elements of A. */
+
+/* DU (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) superdiagonal elements of A. */
+
+/* DLF (input or output) DOUBLE PRECISION array, dimension (N-1) */
+/* If FACT = 'F', then DLF is an input argument and on entry */
+/* contains the (n-1) multipliers that define the matrix L from */
+/* the LU factorization of A as computed by DGTTRF. */
+
+/* If FACT = 'N', then DLF is an output argument and on exit */
+/* contains the (n-1) multipliers that define the matrix L from */
+/* the LU factorization of A. */
+
+/* DF (input or output) DOUBLE PRECISION array, dimension (N) */
+/* If FACT = 'F', then DF is an input argument and on entry */
+/* contains the n diagonal elements of the upper triangular */
+/* matrix U from the LU factorization of A. */
+
+/* If FACT = 'N', then DF is an output argument and on exit */
+/* contains the n diagonal elements of the upper triangular */
+/* matrix U from the LU factorization of A. */
+
+/* DUF (input or output) DOUBLE PRECISION array, dimension (N-1) */
+/* If FACT = 'F', then DUF is an input argument and on entry */
+/* contains the (n-1) elements of the first superdiagonal of U. */
+
+/* If FACT = 'N', then DUF is an output argument and on exit */
+/* contains the (n-1) elements of the first superdiagonal of U. */
+
+/* DU2 (input or output) DOUBLE PRECISION array, dimension (N-2) */
+/* If FACT = 'F', then DU2 is an input argument and on entry */
+/* contains the (n-2) elements of the second superdiagonal of */
+/* U. */
+
+/* If FACT = 'N', then DU2 is an output argument and on exit */
+/* contains the (n-2) elements of the second superdiagonal of */
+/* U. */
+
+/* IPIV (input or output) INTEGER array, dimension (N) */
+/* If FACT = 'F', then IPIV is an input argument and on entry */
+/* contains the pivot indices from the LU factorization of A as */
+/* computed by DGTTRF. */
+
+/* If FACT = 'N', then IPIV is an output argument and on exit */
+/* contains the pivot indices from the LU factorization of A; */
+/* row i of the matrix was interchanged with row IPIV(i). */
+/* IPIV(i) will always be either i or i+1; IPIV(i) = i indicates */
+/* a row interchange was not required. */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* The N-by-NRHS right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) DOUBLE PRECISION array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A. If RCOND is less than the machine precision (in */
+/* particular, if RCOND = 0), the matrix is singular to working */
+/* precision. This condition is indicated by a return code of */
+/* INFO > 0. */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: U(i,i) is exactly zero. The factorization */
+/* has not been completed unless i = N, but the */
+/* factor U is exactly singular, so the solution */
+/* and error bounds could not be computed. */
+/* RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --dl;
+ --d__;
+ --du;
+ --dlf;
+ --df;
+ --duf;
+ --du2;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ notran = lsame_(trans, "N");
+ if (! nofact && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -14;
+ } else if (*ldx < max(1,*n)) {
+ *info = -16;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGTSVX", &i__1);
+ return 0;
+ }
+
+ if (nofact) {
+
+/* Compute the LU factorization of A. */
+
+ dcopy_(n, &d__[1], &c__1, &df[1], &c__1);
+ if (*n > 1) {
+ i__1 = *n - 1;
+ dcopy_(&i__1, &dl[1], &c__1, &dlf[1], &c__1);
+ i__1 = *n - 1;
+ dcopy_(&i__1, &du[1], &c__1, &duf[1], &c__1);
+ }
+ dgttrf_(n, &dlf[1], &df[1], &duf[1], &du2[1], &ipiv[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ if (notran) {
+ *(unsigned char *)norm = '1';
+ } else {
+ *(unsigned char *)norm = 'I';
+ }
+ anorm = dlangt_(norm, n, &dl[1], &d__[1], &du[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ dgtcon_(norm, n, &dlf[1], &df[1], &duf[1], &du2[1], &ipiv[1], &anorm,
+ rcond, &work[1], &iwork[1], info);
+
+/* Compute the solution vectors X. */
+
+ dlacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ dgttrs_(trans, n, nrhs, &dlf[1], &df[1], &duf[1], &du2[1], &ipiv[1], &x[
+ x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solutions and */
+/* compute error bounds and backward error estimates for them. */
+
+ dgtrfs_(trans, n, nrhs, &dl[1], &d__[1], &du[1], &dlf[1], &df[1], &duf[1],
+ &du2[1], &ipiv[1], &b[b_offset], ldb, &x[x_offset], ldx, &ferr[1]
+, &berr[1], &work[1], &iwork[1], info);
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < dlamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of DGTSVX */
+
+} /* dgtsvx_ */
diff --git a/contrib/libs/clapack/dgttrf.c b/contrib/libs/clapack/dgttrf.c
new file mode 100644
index 0000000000..510bc02d85
--- /dev/null
+++ b/contrib/libs/clapack/dgttrf.c
@@ -0,0 +1,203 @@
+/* dgttrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dgttrf_(integer *n, doublereal *dl, doublereal *d__,
+ doublereal *du, doublereal *du2, integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ integer i__;
+ doublereal fact, temp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGTTRF computes an LU factorization of a real tridiagonal matrix A */
+/* using elimination with partial pivoting and row interchanges. */
+
+/* The factorization has the form */
+/* A = L * U */
+/* where L is a product of permutation and unit lower bidiagonal */
+/* matrices and U is upper triangular with nonzeros in only the main */
+/* diagonal and first two superdiagonals. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* DL (input/output) DOUBLE PRECISION array, dimension (N-1) */
+/* On entry, DL must contain the (n-1) sub-diagonal elements of */
+/* A. */
+
+/* On exit, DL is overwritten by the (n-1) multipliers that */
+/* define the matrix L from the LU factorization of A. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, D must contain the diagonal elements of A. */
+
+/* On exit, D is overwritten by the n diagonal elements of the */
+/* upper triangular matrix U from the LU factorization of A. */
+
+/* DU (input/output) DOUBLE PRECISION array, dimension (N-1) */
+/* On entry, DU must contain the (n-1) super-diagonal elements */
+/* of A. */
+
+/* On exit, DU is overwritten by the (n-1) elements of the first */
+/* super-diagonal of U. */
+
+/* DU2 (output) DOUBLE PRECISION array, dimension (N-2) */
+/* On exit, DU2 is overwritten by the (n-2) elements of the */
+/* second super-diagonal of U. */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= n, row i of the matrix was */
+/* interchanged with row IPIV(i). IPIV(i) will always be either */
+/* i or i+1; IPIV(i) = i indicates a row interchange was not */
+/* required. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, U(k,k) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, and division by zero will occur if it is used */
+/* to solve a system of equations. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --ipiv;
+ --du2;
+ --du;
+ --d__;
+ --dl;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ i__1 = -(*info);
+ xerbla_("DGTTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Initialize IPIV(i) = i and DU2(I) = 0 */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ipiv[i__] = i__;
+/* L10: */
+ }
+ i__1 = *n - 2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ du2[i__] = 0.;
+/* L20: */
+ }
+
+ i__1 = *n - 2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = d__[i__], abs(d__1)) >= (d__2 = dl[i__], abs(d__2))) {
+
+/* No row interchange required, eliminate DL(I) */
+
+ if (d__[i__] != 0.) {
+ fact = dl[i__] / d__[i__];
+ dl[i__] = fact;
+ d__[i__ + 1] -= fact * du[i__];
+ }
+ } else {
+
+/* Interchange rows I and I+1, eliminate DL(I) */
+
+ fact = d__[i__] / dl[i__];
+ d__[i__] = dl[i__];
+ dl[i__] = fact;
+ temp = du[i__];
+ du[i__] = d__[i__ + 1];
+ d__[i__ + 1] = temp - fact * d__[i__ + 1];
+ du2[i__] = du[i__ + 1];
+ du[i__ + 1] = -fact * du[i__ + 1];
+ ipiv[i__] = i__ + 1;
+ }
+/* L30: */
+ }
+ if (*n > 1) {
+ i__ = *n - 1;
+ if ((d__1 = d__[i__], abs(d__1)) >= (d__2 = dl[i__], abs(d__2))) {
+ if (d__[i__] != 0.) {
+ fact = dl[i__] / d__[i__];
+ dl[i__] = fact;
+ d__[i__ + 1] -= fact * du[i__];
+ }
+ } else {
+ fact = d__[i__] / dl[i__];
+ d__[i__] = dl[i__];
+ dl[i__] = fact;
+ temp = du[i__];
+ du[i__] = d__[i__ + 1];
+ d__[i__ + 1] = temp - fact * d__[i__ + 1];
+ ipiv[i__] = i__ + 1;
+ }
+ }
+
+/* Check for a zero on the diagonal of U. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] == 0.) {
+ *info = i__;
+ goto L50;
+ }
+/* L40: */
+ }
+L50:
+
+ return 0;
+
+/* End of DGTTRF */
+
+} /* dgttrf_ */
diff --git a/contrib/libs/clapack/dgttrs.c b/contrib/libs/clapack/dgttrs.c
new file mode 100644
index 0000000000..7b4c5b3e2a
--- /dev/null
+++ b/contrib/libs/clapack/dgttrs.c
@@ -0,0 +1,189 @@
+/* dgttrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int dgttrs_(char *trans, integer *n, integer *nrhs,
+ doublereal *dl, doublereal *d__, doublereal *du, doublereal *du2,
+ integer *ipiv, doublereal *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer j, jb, nb;
+ extern /* Subroutine */ int dgtts2_(integer *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer itrans;
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGTTRS solves one of the systems of equations */
+/* A*X = B or A'*X = B, */
+/* with a tridiagonal matrix A using the LU factorization computed */
+/* by DGTTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations. */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A'* X = B (Transpose) */
+/* = 'C': A'* X = B (Conjugate transpose = Transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* DL (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) multipliers that define the matrix L from the */
+/* LU factorization of A. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The n diagonal elements of the upper triangular matrix U from */
+/* the LU factorization of A. */
+
+/* DU (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) elements of the first super-diagonal of U. */
+
+/* DU2 (input) DOUBLE PRECISION array, dimension (N-2) */
+/* The (n-2) elements of the second super-diagonal of U. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= n, row i of the matrix was */
+/* interchanged with row IPIV(i). IPIV(i) will always be either */
+/* i or i+1; IPIV(i) = i indicates a row interchange was not */
+/* required. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the matrix of right hand side vectors B. */
+/* On exit, B is overwritten by the solution vectors X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --dl;
+ --d__;
+ --du;
+ --du2;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ notran = *(unsigned char *)trans == 'N' || *(unsigned char *)trans == 'n';
+ if (! notran && ! (*(unsigned char *)trans == 'T' || *(unsigned char *)
+ trans == 't') && ! (*(unsigned char *)trans == 'C' || *(unsigned
+ char *)trans == 'c')) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(*n,1)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGTTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+/* Decode TRANS */
+
+ if (notran) {
+ itrans = 0;
+ } else {
+ itrans = 1;
+ }
+
+/* Determine the number of right-hand sides to solve at a time. */
+
+ if (*nrhs == 1) {
+ nb = 1;
+ } else {
+/* Computing MAX */
+ i__1 = 1, i__2 = ilaenv_(&c__1, "DGTTRS", trans, n, nrhs, &c_n1, &
+ c_n1);
+ nb = max(i__1,i__2);
+ }
+
+ if (nb >= *nrhs) {
+ dgtts2_(&itrans, n, nrhs, &dl[1], &d__[1], &du[1], &du2[1], &ipiv[1],
+ &b[b_offset], ldb);
+ } else {
+ i__1 = *nrhs;
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = *nrhs - j + 1;
+ jb = min(i__3,nb);
+ dgtts2_(&itrans, n, &jb, &dl[1], &d__[1], &du[1], &du2[1], &ipiv[
+ 1], &b[j * b_dim1 + 1], ldb);
+/* L10: */
+ }
+ }
+
+/* End of DGTTRS */
+
+ return 0;
+} /* dgttrs_ */
diff --git a/contrib/libs/clapack/dgtts2.c b/contrib/libs/clapack/dgtts2.c
new file mode 100644
index 0000000000..fce9cc8e8c
--- /dev/null
+++ b/contrib/libs/clapack/dgtts2.c
@@ -0,0 +1,261 @@
+/* dgtts2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dgtts2_(integer *itrans, integer *n, integer *nrhs,
+ doublereal *dl, doublereal *d__, doublereal *du, doublereal *du2,
+ integer *ipiv, doublereal *b, integer *ldb)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, ip;
+ doublereal temp;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DGTTS2 solves one of the systems of equations */
+/* A*X = B or A'*X = B, */
+/* with a tridiagonal matrix A using the LU factorization computed */
+/* by DGTTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* ITRANS (input) INTEGER */
+/* Specifies the form of the system of equations. */
+/* = 0: A * X = B (No transpose) */
+/* = 1: A'* X = B (Transpose) */
+/* = 2: A'* X = B (Conjugate transpose = Transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* DL (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) multipliers that define the matrix L from the */
+/* LU factorization of A. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The n diagonal elements of the upper triangular matrix U from */
+/* the LU factorization of A. */
+
+/* DU (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) elements of the first super-diagonal of U. */
+
+/* DU2 (input) DOUBLE PRECISION array, dimension (N-2) */
+/* The (n-2) elements of the second super-diagonal of U. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= n, row i of the matrix was */
+/* interchanged with row IPIV(i). IPIV(i) will always be either */
+/* i or i+1; IPIV(i) = i indicates a row interchange was not */
+/* required. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the matrix of right hand side vectors B. */
+/* On exit, B is overwritten by the solution vectors X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --dl;
+ --d__;
+ --du;
+ --du2;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (*itrans == 0) {
+
+/* Solve A*X = B using the LU factorization of A, */
+/* overwriting each right hand side vector with its solution. */
+
+ if (*nrhs <= 1) {
+ j = 1;
+L10:
+
+/* Solve L*x = b. */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ip = ipiv[i__];
+ temp = b[i__ + 1 - ip + i__ + j * b_dim1] - dl[i__] * b[ip +
+ j * b_dim1];
+ b[i__ + j * b_dim1] = b[ip + j * b_dim1];
+ b[i__ + 1 + j * b_dim1] = temp;
+/* L20: */
+ }
+
+/* Solve U*x = b. */
+
+ b[*n + j * b_dim1] /= d__[*n];
+ if (*n > 1) {
+ b[*n - 1 + j * b_dim1] = (b[*n - 1 + j * b_dim1] - du[*n - 1]
+ * b[*n + j * b_dim1]) / d__[*n - 1];
+ }
+ for (i__ = *n - 2; i__ >= 1; --i__) {
+ b[i__ + j * b_dim1] = (b[i__ + j * b_dim1] - du[i__] * b[i__
+ + 1 + j * b_dim1] - du2[i__] * b[i__ + 2 + j * b_dim1]
+ ) / d__[i__];
+/* L30: */
+ }
+ if (j < *nrhs) {
+ ++j;
+ goto L10;
+ }
+ } else {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Solve L*x = b. */
+
+ i__2 = *n - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (ipiv[i__] == i__) {
+ b[i__ + 1 + j * b_dim1] -= dl[i__] * b[i__ + j *
+ b_dim1];
+ } else {
+ temp = b[i__ + j * b_dim1];
+ b[i__ + j * b_dim1] = b[i__ + 1 + j * b_dim1];
+ b[i__ + 1 + j * b_dim1] = temp - dl[i__] * b[i__ + j *
+ b_dim1];
+ }
+/* L40: */
+ }
+
+/* Solve U*x = b. */
+
+ b[*n + j * b_dim1] /= d__[*n];
+ if (*n > 1) {
+ b[*n - 1 + j * b_dim1] = (b[*n - 1 + j * b_dim1] - du[*n
+ - 1] * b[*n + j * b_dim1]) / d__[*n - 1];
+ }
+ for (i__ = *n - 2; i__ >= 1; --i__) {
+ b[i__ + j * b_dim1] = (b[i__ + j * b_dim1] - du[i__] * b[
+ i__ + 1 + j * b_dim1] - du2[i__] * b[i__ + 2 + j *
+ b_dim1]) / d__[i__];
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+ } else {
+
+/* Solve A' * X = B. */
+
+ if (*nrhs <= 1) {
+
+/* Solve U'*x = b. */
+
+ j = 1;
+L70:
+ b[j * b_dim1 + 1] /= d__[1];
+ if (*n > 1) {
+ b[j * b_dim1 + 2] = (b[j * b_dim1 + 2] - du[1] * b[j * b_dim1
+ + 1]) / d__[2];
+ }
+ i__1 = *n;
+ for (i__ = 3; i__ <= i__1; ++i__) {
+ b[i__ + j * b_dim1] = (b[i__ + j * b_dim1] - du[i__ - 1] * b[
+ i__ - 1 + j * b_dim1] - du2[i__ - 2] * b[i__ - 2 + j *
+ b_dim1]) / d__[i__];
+/* L80: */
+ }
+
+/* Solve L'*x = b. */
+
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ ip = ipiv[i__];
+ temp = b[i__ + j * b_dim1] - dl[i__] * b[i__ + 1 + j * b_dim1]
+ ;
+ b[i__ + j * b_dim1] = b[ip + j * b_dim1];
+ b[ip + j * b_dim1] = temp;
+/* L90: */
+ }
+ if (j < *nrhs) {
+ ++j;
+ goto L70;
+ }
+
+ } else {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Solve U'*x = b. */
+
+ b[j * b_dim1 + 1] /= d__[1];
+ if (*n > 1) {
+ b[j * b_dim1 + 2] = (b[j * b_dim1 + 2] - du[1] * b[j *
+ b_dim1 + 1]) / d__[2];
+ }
+ i__2 = *n;
+ for (i__ = 3; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = (b[i__ + j * b_dim1] - du[i__ - 1] *
+ b[i__ - 1 + j * b_dim1] - du2[i__ - 2] * b[i__ -
+ 2 + j * b_dim1]) / d__[i__];
+/* L100: */
+ }
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ if (ipiv[i__] == i__) {
+ b[i__ + j * b_dim1] -= dl[i__] * b[i__ + 1 + j *
+ b_dim1];
+ } else {
+ temp = b[i__ + 1 + j * b_dim1];
+ b[i__ + 1 + j * b_dim1] = b[i__ + j * b_dim1] - dl[
+ i__] * temp;
+ b[i__ + j * b_dim1] = temp;
+ }
+/* L110: */
+ }
+/* L120: */
+ }
+ }
+ }
+
+/* End of DGTTS2 */
+
+ return 0;
+} /* dgtts2_ */
diff --git a/contrib/libs/clapack/dhgeqz.c b/contrib/libs/clapack/dhgeqz.c
new file mode 100644
index 0000000000..b594c954b8
--- /dev/null
+++ b/contrib/libs/clapack/dhgeqz.c
@@ -0,0 +1,1498 @@
+/* dhgeqz.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b12 = 0.;
+static doublereal c_b13 = 1.;
+static integer c__1 = 1;
+static integer c__3 = 3;
+
+/* Subroutine */ int dhgeqz_(char *job, char *compq, char *compz, integer *n,
+ integer *ilo, integer *ihi, doublereal *h__, integer *ldh, doublereal
+ *t, integer *ldt, doublereal *alphar, doublereal *alphai, doublereal *
+ beta, doublereal *q, integer *ldq, doublereal *z__, integer *ldz,
+ doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, q_dim1, q_offset, t_dim1, t_offset, z_dim1,
+ z_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal c__;
+ integer j;
+ doublereal s, v[3], s1, s2, t1, u1, u2, a11, a12, a21, a22, b11, b22, c12,
+ c21;
+ integer jc;
+ doublereal an, bn, cl, cq, cr;
+ integer in;
+ doublereal u12, w11, w12, w21;
+ integer jr;
+ doublereal cz, w22, sl, wi, sr, vs, wr, b1a, b2a, a1i, a2i, b1i, b2i, a1r,
+ a2r, b1r, b2r, wr2, ad11, ad12, ad21, ad22, c11i, c22i;
+ integer jch;
+ doublereal c11r, c22r;
+ logical ilq;
+ doublereal u12l, tau, sqi;
+ logical ilz;
+ doublereal ulp, sqr, szi, szr, ad11l, ad12l, ad21l, ad22l, ad32l, wabs,
+ atol, btol, temp;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *), dlag2_(
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *);
+ doublereal temp2, s1inv, scale;
+ extern logical lsame_(char *, char *);
+ integer iiter, ilast, jiter;
+ doublereal anorm, bnorm;
+ integer maxit;
+ doublereal tempi, tempr;
+ extern doublereal dlapy2_(doublereal *, doublereal *), dlapy3_(doublereal
+ *, doublereal *, doublereal *);
+ extern /* Subroutine */ int dlasv2_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *);
+ logical ilazr2;
+ doublereal ascale, bscale;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dlarfg_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *);
+ extern doublereal dlanhs_(char *, integer *, doublereal *, integer *,
+ doublereal *);
+ extern /* Subroutine */ int dlaset_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *);
+ doublereal safmin;
+ extern /* Subroutine */ int dlartg_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *);
+ doublereal safmax;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal eshift;
+ logical ilschr;
+ integer icompq, ilastm, ischur;
+ logical ilazro;
+ integer icompz, ifirst, ifrstm, istart;
+ logical ilpivt, lquery;
+
+
+/* -- LAPACK routine (version 3.2.1) -- */
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+/* -- April 2009 -- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DHGEQZ computes the eigenvalues of a real matrix pair (H,T), */
+/* where H is an upper Hessenberg matrix and T is upper triangular, */
+/* using the double-shift QZ method. */
+/* Matrix pairs of this type are produced by the reduction to */
+/* generalized upper Hessenberg form of a real matrix pair (A,B): */
+
+/* A = Q1*H*Z1**T, B = Q1*T*Z1**T, */
+
+/* as computed by DGGHRD. */
+
+/* If JOB='S', then the Hessenberg-triangular pair (H,T) is */
+/* also reduced to generalized Schur form, */
+
+/* H = Q*S*Z**T, T = Q*P*Z**T, */
+
+/* where Q and Z are orthogonal matrices, P is an upper triangular */
+/* matrix, and S is a quasi-triangular matrix with 1-by-1 and 2-by-2 */
+/* diagonal blocks. */
+
+/* The 1-by-1 blocks correspond to real eigenvalues of the matrix pair */
+/* (H,T) and the 2-by-2 blocks correspond to complex conjugate pairs of */
+/* eigenvalues. */
+
+/* Additionally, the 2-by-2 upper triangular diagonal blocks of P */
+/* corresponding to 2-by-2 blocks of S are reduced to positive diagonal */
+/* form, i.e., if S(j+1,j) is non-zero, then P(j+1,j) = P(j,j+1) = 0, */
+/* P(j,j) > 0, and P(j+1,j+1) > 0. */
+
+/* Optionally, the orthogonal matrix Q from the generalized Schur */
+/* factorization may be postmultiplied into an input matrix Q1, and the */
+/* orthogonal matrix Z may be postmultiplied into an input matrix Z1. */
+/* If Q1 and Z1 are the orthogonal matrices from DGGHRD that reduced */
+/* the matrix pair (A,B) to generalized upper Hessenberg form, then the */
+/* output matrices Q1*Q and Z1*Z are the orthogonal factors from the */
+/* generalized Schur factorization of (A,B): */
+
+/* A = (Q1*Q)*S*(Z1*Z)**T, B = (Q1*Q)*P*(Z1*Z)**T. */
+
+/* To avoid overflow, eigenvalues of the matrix pair (H,T) (equivalently, */
+/* of (A,B)) are computed as a pair of values (alpha,beta), where alpha is */
+/* complex and beta real. */
+/* If beta is nonzero, lambda = alpha / beta is an eigenvalue of the */
+/* generalized nonsymmetric eigenvalue problem (GNEP) */
+/* A*x = lambda*B*x */
+/* and if alpha is nonzero, mu = beta / alpha is an eigenvalue of the */
+/* alternate form of the GNEP */
+/* mu*A*y = B*y. */
+/* Real eigenvalues can be read directly from the generalized Schur */
+/* form: */
+/* alpha = S(i,i), beta = P(i,i). */
+
+/* Ref: C.B. Moler & G.W. Stewart, "An Algorithm for Generalized Matrix */
+/* Eigenvalue Problems", SIAM J. Numer. Anal., 10(1973), */
+/* pp. 241--256. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* = 'E': Compute eigenvalues only; */
+/* = 'S': Compute eigenvalues and the Schur form. */
+
+/* COMPQ (input) CHARACTER*1 */
+/* = 'N': Left Schur vectors (Q) are not computed; */
+/* = 'I': Q is initialized to the unit matrix and the matrix Q */
+/* of left Schur vectors of (H,T) is returned; */
+/* = 'V': Q must contain an orthogonal matrix Q1 on entry and */
+/* the product Q1*Q is returned. */
+
+/* COMPZ (input) CHARACTER*1 */
+/* = 'N': Right Schur vectors (Z) are not computed; */
+/* = 'I': Z is initialized to the unit matrix and the matrix Z */
+/* of right Schur vectors of (H,T) is returned; */
+/* = 'V': Z must contain an orthogonal matrix Z1 on entry and */
+/* the product Z1*Z is returned. */
+
+/* N (input) INTEGER */
+/* The order of the matrices H, T, Q, and Z. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* ILO and IHI mark the rows and columns of H which are in */
+/* Hessenberg form. It is assumed that A is already upper */
+/* triangular in rows and columns 1:ILO-1 and IHI+1:N. */
+/* If N > 0, 1 <= ILO <= IHI <= N; if N = 0, ILO=1 and IHI=0. */
+
+/* H (input/output) DOUBLE PRECISION array, dimension (LDH, N) */
+/* On entry, the N-by-N upper Hessenberg matrix H. */
+/* On exit, if JOB = 'S', H contains the upper quasi-triangular */
+/* matrix S from the generalized Schur factorization; */
+/* 2-by-2 diagonal blocks (corresponding to complex conjugate */
+/* pairs of eigenvalues) are returned in standard form, with */
+/* H(i,i) = H(i+1,i+1) and H(i+1,i)*H(i,i+1) < 0. */
+/* If JOB = 'E', the diagonal blocks of H match those of S, but */
+/* the rest of H is unspecified. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH >= max( 1, N ). */
+
+/* T (input/output) DOUBLE PRECISION array, dimension (LDT, N) */
+/* On entry, the N-by-N upper triangular matrix T. */
+/* On exit, if JOB = 'S', T contains the upper triangular */
+/* matrix P from the generalized Schur factorization; */
+/* 2-by-2 diagonal blocks of P corresponding to 2-by-2 blocks of S */
+/* are reduced to positive diagonal form, i.e., if H(j+1,j) is */
+/* non-zero, then T(j+1,j) = T(j,j+1) = 0, T(j,j) > 0, and */
+/* T(j+1,j+1) > 0. */
+/* If JOB = 'E', the diagonal blocks of T match those of P, but */
+/* the rest of T is unspecified. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= max( 1, N ). */
+
+/* ALPHAR (output) DOUBLE PRECISION array, dimension (N) */
+/* The real parts of each scalar alpha defining an eigenvalue */
+/* of GNEP. */
+
+/* ALPHAI (output) DOUBLE PRECISION array, dimension (N) */
+/* The imaginary parts of each scalar alpha defining an */
+/* eigenvalue of GNEP. */
+/* If ALPHAI(j) is zero, then the j-th eigenvalue is real; if */
+/* positive, then the j-th and (j+1)-st eigenvalues are a */
+/* complex conjugate pair, with ALPHAI(j+1) = -ALPHAI(j). */
+
+/* BETA (output) DOUBLE PRECISION array, dimension (N) */
+/* The scalars beta that define the eigenvalues of GNEP. */
+/* Together, the quantities alpha = (ALPHAR(j),ALPHAI(j)) and */
+/* beta = BETA(j) represent the j-th eigenvalue of the matrix */
+/* pair (A,B), in one of the forms lambda = alpha/beta or */
+/* mu = beta/alpha. Since either lambda or mu may overflow, */
+/* they should not, in general, be computed. */
+
+/* Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N) */
+/* On entry, if COMPZ = 'V', the orthogonal matrix Q1 used in */
+/* the reduction of (A,B) to generalized Hessenberg form. */
+/* On exit, if COMPZ = 'I', the orthogonal matrix of left Schur */
+/* vectors of (H,T), and if COMPZ = 'V', the orthogonal matrix */
+/* of left Schur vectors of (A,B). */
+/* Not referenced if COMPZ = 'N'. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= 1. */
+/* If COMPQ='V' or 'I', then LDQ >= N. */
+
+/* Z (input/output) DOUBLE PRECISION array, dimension (LDZ, N) */
+/* On entry, if COMPZ = 'V', the orthogonal matrix Z1 used in */
+/* the reduction of (A,B) to generalized Hessenberg form. */
+/* On exit, if COMPZ = 'I', the orthogonal matrix of */
+/* right Schur vectors of (H,T), and if COMPZ = 'V', the */
+/* orthogonal matrix of right Schur vectors of (A,B). */
+/* Not referenced if COMPZ = 'N'. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1. */
+/* If COMPZ='V' or 'I', then LDZ >= N. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO >= 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* = 1,...,N: the QZ iteration did not converge. (H,T) is not */
+/* in Schur form, but ALPHAR(i), ALPHAI(i), and */
+/* BETA(i), i=INFO+1,...,N should be correct. */
+/* = N+1,...,2*N: the shift calculation failed. (H,T) is not */
+/* in Schur form, but ALPHAR(i), ALPHAI(i), and */
+/* BETA(i), i=INFO-N+1,...,N should be correct. */
+
+/* Further Details */
+/* =============== */
+
+/* Iteration counters: */
+
+/* JITER -- counts iterations. */
+/* IITER -- counts iterations run since ILAST was last */
+/* changed. This is therefore reset only when a 1-by-1 or */
+/* 2-by-2 block deflates off the bottom. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* $ SAFETY = 1.0E+0 ) */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode JOB, COMPQ, COMPZ */
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ --alphar;
+ --alphai;
+ --beta;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ if (lsame_(job, "E")) {
+ ilschr = FALSE_;
+ ischur = 1;
+ } else if (lsame_(job, "S")) {
+ ilschr = TRUE_;
+ ischur = 2;
+ } else {
+ ischur = 0;
+ }
+
+ if (lsame_(compq, "N")) {
+ ilq = FALSE_;
+ icompq = 1;
+ } else if (lsame_(compq, "V")) {
+ ilq = TRUE_;
+ icompq = 2;
+ } else if (lsame_(compq, "I")) {
+ ilq = TRUE_;
+ icompq = 3;
+ } else {
+ icompq = 0;
+ }
+
+ if (lsame_(compz, "N")) {
+ ilz = FALSE_;
+ icompz = 1;
+ } else if (lsame_(compz, "V")) {
+ ilz = TRUE_;
+ icompz = 2;
+ } else if (lsame_(compz, "I")) {
+ ilz = TRUE_;
+ icompz = 3;
+ } else {
+ icompz = 0;
+ }
+
+/* Check Argument Values */
+
+ *info = 0;
+ work[1] = (doublereal) max(1,*n);
+ lquery = *lwork == -1;
+ if (ischur == 0) {
+ *info = -1;
+ } else if (icompq == 0) {
+ *info = -2;
+ } else if (icompz == 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ilo < 1) {
+ *info = -5;
+ } else if (*ihi > *n || *ihi < *ilo - 1) {
+ *info = -6;
+ } else if (*ldh < *n) {
+ *info = -8;
+ } else if (*ldt < *n) {
+ *info = -10;
+ } else if (*ldq < 1 || ilq && *ldq < *n) {
+ *info = -15;
+ } else if (*ldz < 1 || ilz && *ldz < *n) {
+ *info = -17;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -19;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DHGEQZ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+/* Initialize Q and Z */
+
+ if (icompq == 3) {
+ dlaset_("Full", n, n, &c_b12, &c_b13, &q[q_offset], ldq);
+ }
+ if (icompz == 3) {
+ dlaset_("Full", n, n, &c_b12, &c_b13, &z__[z_offset], ldz);
+ }
+
+/* Machine Constants */
+
+ in = *ihi + 1 - *ilo;
+ safmin = dlamch_("S");
+ safmax = 1. / safmin;
+ ulp = dlamch_("E") * dlamch_("B");
+ anorm = dlanhs_("F", &in, &h__[*ilo + *ilo * h_dim1], ldh, &work[1]);
+ bnorm = dlanhs_("F", &in, &t[*ilo + *ilo * t_dim1], ldt, &work[1]);
+/* Computing MAX */
+ d__1 = safmin, d__2 = ulp * anorm;
+ atol = max(d__1,d__2);
+/* Computing MAX */
+ d__1 = safmin, d__2 = ulp * bnorm;
+ btol = max(d__1,d__2);
+ ascale = 1. / max(safmin,anorm);
+ bscale = 1. / max(safmin,bnorm);
+
+/* Set Eigenvalues IHI+1:N */
+
+ i__1 = *n;
+ for (j = *ihi + 1; j <= i__1; ++j) {
+ if (t[j + j * t_dim1] < 0.) {
+ if (ilschr) {
+ i__2 = j;
+ for (jr = 1; jr <= i__2; ++jr) {
+ h__[jr + j * h_dim1] = -h__[jr + j * h_dim1];
+ t[jr + j * t_dim1] = -t[jr + j * t_dim1];
+/* L10: */
+ }
+ } else {
+ h__[j + j * h_dim1] = -h__[j + j * h_dim1];
+ t[j + j * t_dim1] = -t[j + j * t_dim1];
+ }
+ if (ilz) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ z__[jr + j * z_dim1] = -z__[jr + j * z_dim1];
+/* L20: */
+ }
+ }
+ }
+ alphar[j] = h__[j + j * h_dim1];
+ alphai[j] = 0.;
+ beta[j] = t[j + j * t_dim1];
+/* L30: */
+ }
+
+/* If IHI < ILO, skip QZ steps */
+
+ if (*ihi < *ilo) {
+ goto L380;
+ }
+
+/* MAIN QZ ITERATION LOOP */
+
+/* Initialize dynamic indices */
+
+/* Eigenvalues ILAST+1:N have been found. */
+/* Column operations modify rows IFRSTM:whatever. */
+/* Row operations modify columns whatever:ILASTM. */
+
+/* If only eigenvalues are being computed, then */
+/* IFRSTM is the row of the last splitting row above row ILAST; */
+/* this is always at least ILO. */
+/* IITER counts iterations since the last eigenvalue was found, */
+/* to tell when to use an extraordinary shift. */
+/* MAXIT is the maximum number of QZ sweeps allowed. */
+
+ ilast = *ihi;
+ if (ilschr) {
+ ifrstm = 1;
+ ilastm = *n;
+ } else {
+ ifrstm = *ilo;
+ ilastm = *ihi;
+ }
+ iiter = 0;
+ eshift = 0.;
+ maxit = (*ihi - *ilo + 1) * 30;
+
+ i__1 = maxit;
+ for (jiter = 1; jiter <= i__1; ++jiter) {
+
+/* Split the matrix if possible. */
+
+/* Two tests: */
+/* 1: H(j,j-1)=0 or j=ILO */
+/* 2: T(j,j)=0 */
+
+ if (ilast == *ilo) {
+
+/* Special case: j=ILAST */
+
+ goto L80;
+ } else {
+ if ((d__1 = h__[ilast + (ilast - 1) * h_dim1], abs(d__1)) <= atol)
+ {
+ h__[ilast + (ilast - 1) * h_dim1] = 0.;
+ goto L80;
+ }
+ }
+
+ if ((d__1 = t[ilast + ilast * t_dim1], abs(d__1)) <= btol) {
+ t[ilast + ilast * t_dim1] = 0.;
+ goto L70;
+ }
+
+/* General case: j<ILAST */
+
+ i__2 = *ilo;
+ for (j = ilast - 1; j >= i__2; --j) {
+
+/* Test 1: for H(j,j-1)=0 or j=ILO */
+
+ if (j == *ilo) {
+ ilazro = TRUE_;
+ } else {
+ if ((d__1 = h__[j + (j - 1) * h_dim1], abs(d__1)) <= atol) {
+ h__[j + (j - 1) * h_dim1] = 0.;
+ ilazro = TRUE_;
+ } else {
+ ilazro = FALSE_;
+ }
+ }
+
+/* Test 2: for T(j,j)=0 */
+
+ if ((d__1 = t[j + j * t_dim1], abs(d__1)) < btol) {
+ t[j + j * t_dim1] = 0.;
+
+/* Test 1a: Check for 2 consecutive small subdiagonals in A */
+
+ ilazr2 = FALSE_;
+ if (! ilazro) {
+ temp = (d__1 = h__[j + (j - 1) * h_dim1], abs(d__1));
+ temp2 = (d__1 = h__[j + j * h_dim1], abs(d__1));
+ tempr = max(temp,temp2);
+ if (tempr < 1. && tempr != 0.) {
+ temp /= tempr;
+ temp2 /= tempr;
+ }
+ if (temp * (ascale * (d__1 = h__[j + 1 + j * h_dim1], abs(
+ d__1))) <= temp2 * (ascale * atol)) {
+ ilazr2 = TRUE_;
+ }
+ }
+
+/* If both tests pass (1 & 2), i.e., the leading diagonal */
+/* element of B in the block is zero, split a 1x1 block off */
+/* at the top. (I.e., at the J-th row/column) The leading */
+/* diagonal element of the remainder can also be zero, so */
+/* this may have to be done repeatedly. */
+
+ if (ilazro || ilazr2) {
+ i__3 = ilast - 1;
+ for (jch = j; jch <= i__3; ++jch) {
+ temp = h__[jch + jch * h_dim1];
+ dlartg_(&temp, &h__[jch + 1 + jch * h_dim1], &c__, &s,
+ &h__[jch + jch * h_dim1]);
+ h__[jch + 1 + jch * h_dim1] = 0.;
+ i__4 = ilastm - jch;
+ drot_(&i__4, &h__[jch + (jch + 1) * h_dim1], ldh, &
+ h__[jch + 1 + (jch + 1) * h_dim1], ldh, &c__,
+ &s);
+ i__4 = ilastm - jch;
+ drot_(&i__4, &t[jch + (jch + 1) * t_dim1], ldt, &t[
+ jch + 1 + (jch + 1) * t_dim1], ldt, &c__, &s);
+ if (ilq) {
+ drot_(n, &q[jch * q_dim1 + 1], &c__1, &q[(jch + 1)
+ * q_dim1 + 1], &c__1, &c__, &s);
+ }
+ if (ilazr2) {
+ h__[jch + (jch - 1) * h_dim1] *= c__;
+ }
+ ilazr2 = FALSE_;
+ if ((d__1 = t[jch + 1 + (jch + 1) * t_dim1], abs(d__1)
+ ) >= btol) {
+ if (jch + 1 >= ilast) {
+ goto L80;
+ } else {
+ ifirst = jch + 1;
+ goto L110;
+ }
+ }
+ t[jch + 1 + (jch + 1) * t_dim1] = 0.;
+/* L40: */
+ }
+ goto L70;
+ } else {
+
+/* Only test 2 passed -- chase the zero to T(ILAST,ILAST) */
+/* Then process as in the case T(ILAST,ILAST)=0 */
+
+ i__3 = ilast - 1;
+ for (jch = j; jch <= i__3; ++jch) {
+ temp = t[jch + (jch + 1) * t_dim1];
+ dlartg_(&temp, &t[jch + 1 + (jch + 1) * t_dim1], &c__,
+ &s, &t[jch + (jch + 1) * t_dim1]);
+ t[jch + 1 + (jch + 1) * t_dim1] = 0.;
+ if (jch < ilastm - 1) {
+ i__4 = ilastm - jch - 1;
+ drot_(&i__4, &t[jch + (jch + 2) * t_dim1], ldt, &
+ t[jch + 1 + (jch + 2) * t_dim1], ldt, &
+ c__, &s);
+ }
+ i__4 = ilastm - jch + 2;
+ drot_(&i__4, &h__[jch + (jch - 1) * h_dim1], ldh, &
+ h__[jch + 1 + (jch - 1) * h_dim1], ldh, &c__,
+ &s);
+ if (ilq) {
+ drot_(n, &q[jch * q_dim1 + 1], &c__1, &q[(jch + 1)
+ * q_dim1 + 1], &c__1, &c__, &s);
+ }
+ temp = h__[jch + 1 + jch * h_dim1];
+ dlartg_(&temp, &h__[jch + 1 + (jch - 1) * h_dim1], &
+ c__, &s, &h__[jch + 1 + jch * h_dim1]);
+ h__[jch + 1 + (jch - 1) * h_dim1] = 0.;
+ i__4 = jch + 1 - ifrstm;
+ drot_(&i__4, &h__[ifrstm + jch * h_dim1], &c__1, &h__[
+ ifrstm + (jch - 1) * h_dim1], &c__1, &c__, &s)
+ ;
+ i__4 = jch - ifrstm;
+ drot_(&i__4, &t[ifrstm + jch * t_dim1], &c__1, &t[
+ ifrstm + (jch - 1) * t_dim1], &c__1, &c__, &s)
+ ;
+ if (ilz) {
+ drot_(n, &z__[jch * z_dim1 + 1], &c__1, &z__[(jch
+ - 1) * z_dim1 + 1], &c__1, &c__, &s);
+ }
+/* L50: */
+ }
+ goto L70;
+ }
+ } else if (ilazro) {
+
+/* Only test 1 passed -- work on J:ILAST */
+
+ ifirst = j;
+ goto L110;
+ }
+
+/* Neither test passed -- try next J */
+
+/* L60: */
+ }
+
+/* (Drop-through is "impossible") */
+
+ *info = *n + 1;
+ goto L420;
+
+/* T(ILAST,ILAST)=0 -- clear H(ILAST,ILAST-1) to split off a */
+/* 1x1 block. */
+
+L70:
+ temp = h__[ilast + ilast * h_dim1];
+ dlartg_(&temp, &h__[ilast + (ilast - 1) * h_dim1], &c__, &s, &h__[
+ ilast + ilast * h_dim1]);
+ h__[ilast + (ilast - 1) * h_dim1] = 0.;
+ i__2 = ilast - ifrstm;
+ drot_(&i__2, &h__[ifrstm + ilast * h_dim1], &c__1, &h__[ifrstm + (
+ ilast - 1) * h_dim1], &c__1, &c__, &s);
+ i__2 = ilast - ifrstm;
+ drot_(&i__2, &t[ifrstm + ilast * t_dim1], &c__1, &t[ifrstm + (ilast -
+ 1) * t_dim1], &c__1, &c__, &s);
+ if (ilz) {
+ drot_(n, &z__[ilast * z_dim1 + 1], &c__1, &z__[(ilast - 1) *
+ z_dim1 + 1], &c__1, &c__, &s);
+ }
+
+/* H(ILAST,ILAST-1)=0 -- Standardize B, set ALPHAR, ALPHAI, */
+/* and BETA */
+
+L80:
+ if (t[ilast + ilast * t_dim1] < 0.) {
+ if (ilschr) {
+ i__2 = ilast;
+ for (j = ifrstm; j <= i__2; ++j) {
+ h__[j + ilast * h_dim1] = -h__[j + ilast * h_dim1];
+ t[j + ilast * t_dim1] = -t[j + ilast * t_dim1];
+/* L90: */
+ }
+ } else {
+ h__[ilast + ilast * h_dim1] = -h__[ilast + ilast * h_dim1];
+ t[ilast + ilast * t_dim1] = -t[ilast + ilast * t_dim1];
+ }
+ if (ilz) {
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ z__[j + ilast * z_dim1] = -z__[j + ilast * z_dim1];
+/* L100: */
+ }
+ }
+ }
+ alphar[ilast] = h__[ilast + ilast * h_dim1];
+ alphai[ilast] = 0.;
+ beta[ilast] = t[ilast + ilast * t_dim1];
+
+/* Go to next block -- exit if finished. */
+
+ --ilast;
+ if (ilast < *ilo) {
+ goto L380;
+ }
+
+/* Reset counters */
+
+ iiter = 0;
+ eshift = 0.;
+ if (! ilschr) {
+ ilastm = ilast;
+ if (ifrstm > ilast) {
+ ifrstm = *ilo;
+ }
+ }
+ goto L350;
+
+/* QZ step */
+
+/* This iteration only involves rows/columns IFIRST:ILAST. We */
+/* assume IFIRST < ILAST, and that the diagonal of B is non-zero. */
+
+L110:
+ ++iiter;
+ if (! ilschr) {
+ ifrstm = ifirst;
+ }
+
+/* Compute single shifts. */
+
+/* At this point, IFIRST < ILAST, and the diagonal elements of */
+/* T(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in */
+/* magnitude) */
+
+ if (iiter / 10 * 10 == iiter) {
+
+/* Exceptional shift. Chosen for no particularly good reason. */
+/* (Single shift only.) */
+
+ if ((doublereal) maxit * safmin * (d__1 = h__[ilast - 1 + ilast *
+ h_dim1], abs(d__1)) < (d__2 = t[ilast - 1 + (ilast - 1) *
+ t_dim1], abs(d__2))) {
+ eshift += h__[ilast - 1 + ilast * h_dim1] / t[ilast - 1 + (
+ ilast - 1) * t_dim1];
+ } else {
+ eshift += 1. / (safmin * (doublereal) maxit);
+ }
+ s1 = 1.;
+ wr = eshift;
+
+ } else {
+
+/* Shifts based on the generalized eigenvalues of the */
+/* bottom-right 2x2 block of A and B. The first eigenvalue */
+/* returned by DLAG2 is the Wilkinson shift (AEP p.512), */
+
+ d__1 = safmin * 100.;
+ dlag2_(&h__[ilast - 1 + (ilast - 1) * h_dim1], ldh, &t[ilast - 1
+ + (ilast - 1) * t_dim1], ldt, &d__1, &s1, &s2, &wr, &wr2,
+ &wi);
+
+/* Computing MAX */
+/* Computing MAX */
+ d__3 = 1., d__4 = abs(wr), d__3 = max(d__3,d__4), d__4 = abs(wi);
+ d__1 = s1, d__2 = safmin * max(d__3,d__4);
+ temp = max(d__1,d__2);
+ if (wi != 0.) {
+ goto L200;
+ }
+ }
+
+/* Fiddle with shift to avoid overflow */
+
+ temp = min(ascale,1.) * (safmax * .5);
+ if (s1 > temp) {
+ scale = temp / s1;
+ } else {
+ scale = 1.;
+ }
+
+ temp = min(bscale,1.) * (safmax * .5);
+ if (abs(wr) > temp) {
+/* Computing MIN */
+ d__1 = scale, d__2 = temp / abs(wr);
+ scale = min(d__1,d__2);
+ }
+ s1 = scale * s1;
+ wr = scale * wr;
+
+/* Now check for two consecutive small subdiagonals. */
+
+ i__2 = ifirst + 1;
+ for (j = ilast - 1; j >= i__2; --j) {
+ istart = j;
+ temp = (d__1 = s1 * h__[j + (j - 1) * h_dim1], abs(d__1));
+ temp2 = (d__1 = s1 * h__[j + j * h_dim1] - wr * t[j + j * t_dim1],
+ abs(d__1));
+ tempr = max(temp,temp2);
+ if (tempr < 1. && tempr != 0.) {
+ temp /= tempr;
+ temp2 /= tempr;
+ }
+ if ((d__1 = ascale * h__[j + 1 + j * h_dim1] * temp, abs(d__1)) <=
+ ascale * atol * temp2) {
+ goto L130;
+ }
+/* L120: */
+ }
+
+ istart = ifirst;
+L130:
+
+/* Do an implicit single-shift QZ sweep. */
+
+/* Initial Q */
+
+ temp = s1 * h__[istart + istart * h_dim1] - wr * t[istart + istart *
+ t_dim1];
+ temp2 = s1 * h__[istart + 1 + istart * h_dim1];
+ dlartg_(&temp, &temp2, &c__, &s, &tempr);
+
+/* Sweep */
+
+ i__2 = ilast - 1;
+ for (j = istart; j <= i__2; ++j) {
+ if (j > istart) {
+ temp = h__[j + (j - 1) * h_dim1];
+ dlartg_(&temp, &h__[j + 1 + (j - 1) * h_dim1], &c__, &s, &h__[
+ j + (j - 1) * h_dim1]);
+ h__[j + 1 + (j - 1) * h_dim1] = 0.;
+ }
+
+ i__3 = ilastm;
+ for (jc = j; jc <= i__3; ++jc) {
+ temp = c__ * h__[j + jc * h_dim1] + s * h__[j + 1 + jc *
+ h_dim1];
+ h__[j + 1 + jc * h_dim1] = -s * h__[j + jc * h_dim1] + c__ *
+ h__[j + 1 + jc * h_dim1];
+ h__[j + jc * h_dim1] = temp;
+ temp2 = c__ * t[j + jc * t_dim1] + s * t[j + 1 + jc * t_dim1];
+ t[j + 1 + jc * t_dim1] = -s * t[j + jc * t_dim1] + c__ * t[j
+ + 1 + jc * t_dim1];
+ t[j + jc * t_dim1] = temp2;
+/* L140: */
+ }
+ if (ilq) {
+ i__3 = *n;
+ for (jr = 1; jr <= i__3; ++jr) {
+ temp = c__ * q[jr + j * q_dim1] + s * q[jr + (j + 1) *
+ q_dim1];
+ q[jr + (j + 1) * q_dim1] = -s * q[jr + j * q_dim1] + c__ *
+ q[jr + (j + 1) * q_dim1];
+ q[jr + j * q_dim1] = temp;
+/* L150: */
+ }
+ }
+
+ temp = t[j + 1 + (j + 1) * t_dim1];
+ dlartg_(&temp, &t[j + 1 + j * t_dim1], &c__, &s, &t[j + 1 + (j +
+ 1) * t_dim1]);
+ t[j + 1 + j * t_dim1] = 0.;
+
+/* Computing MIN */
+ i__4 = j + 2;
+ i__3 = min(i__4,ilast);
+ for (jr = ifrstm; jr <= i__3; ++jr) {
+ temp = c__ * h__[jr + (j + 1) * h_dim1] + s * h__[jr + j *
+ h_dim1];
+ h__[jr + j * h_dim1] = -s * h__[jr + (j + 1) * h_dim1] + c__ *
+ h__[jr + j * h_dim1];
+ h__[jr + (j + 1) * h_dim1] = temp;
+/* L160: */
+ }
+ i__3 = j;
+ for (jr = ifrstm; jr <= i__3; ++jr) {
+ temp = c__ * t[jr + (j + 1) * t_dim1] + s * t[jr + j * t_dim1]
+ ;
+ t[jr + j * t_dim1] = -s * t[jr + (j + 1) * t_dim1] + c__ * t[
+ jr + j * t_dim1];
+ t[jr + (j + 1) * t_dim1] = temp;
+/* L170: */
+ }
+ if (ilz) {
+ i__3 = *n;
+ for (jr = 1; jr <= i__3; ++jr) {
+ temp = c__ * z__[jr + (j + 1) * z_dim1] + s * z__[jr + j *
+ z_dim1];
+ z__[jr + j * z_dim1] = -s * z__[jr + (j + 1) * z_dim1] +
+ c__ * z__[jr + j * z_dim1];
+ z__[jr + (j + 1) * z_dim1] = temp;
+/* L180: */
+ }
+ }
+/* L190: */
+ }
+
+ goto L350;
+
+/* Use Francis double-shift */
+
+/* Note: the Francis double-shift should work with real shifts, */
+/* but only if the block is at least 3x3. */
+/* This code may break if this point is reached with */
+/* a 2x2 block with real eigenvalues. */
+
+L200:
+ if (ifirst + 1 == ilast) {
+
+/* Special case -- 2x2 block with complex eigenvectors */
+
+/* Step 1: Standardize, that is, rotate so that */
+
+/* ( B11 0 ) */
+/* B = ( ) with B11 non-negative. */
+/* ( 0 B22 ) */
+
+ dlasv2_(&t[ilast - 1 + (ilast - 1) * t_dim1], &t[ilast - 1 +
+ ilast * t_dim1], &t[ilast + ilast * t_dim1], &b22, &b11, &
+ sr, &cr, &sl, &cl);
+
+ if (b11 < 0.) {
+ cr = -cr;
+ sr = -sr;
+ b11 = -b11;
+ b22 = -b22;
+ }
+
+ i__2 = ilastm + 1 - ifirst;
+ drot_(&i__2, &h__[ilast - 1 + (ilast - 1) * h_dim1], ldh, &h__[
+ ilast + (ilast - 1) * h_dim1], ldh, &cl, &sl);
+ i__2 = ilast + 1 - ifrstm;
+ drot_(&i__2, &h__[ifrstm + (ilast - 1) * h_dim1], &c__1, &h__[
+ ifrstm + ilast * h_dim1], &c__1, &cr, &sr);
+
+ if (ilast < ilastm) {
+ i__2 = ilastm - ilast;
+ drot_(&i__2, &t[ilast - 1 + (ilast + 1) * t_dim1], ldt, &t[
+ ilast + (ilast + 1) * t_dim1], ldt, &cl, &sl);
+ }
+ if (ifrstm < ilast - 1) {
+ i__2 = ifirst - ifrstm;
+ drot_(&i__2, &t[ifrstm + (ilast - 1) * t_dim1], &c__1, &t[
+ ifrstm + ilast * t_dim1], &c__1, &cr, &sr);
+ }
+
+ if (ilq) {
+ drot_(n, &q[(ilast - 1) * q_dim1 + 1], &c__1, &q[ilast *
+ q_dim1 + 1], &c__1, &cl, &sl);
+ }
+ if (ilz) {
+ drot_(n, &z__[(ilast - 1) * z_dim1 + 1], &c__1, &z__[ilast *
+ z_dim1 + 1], &c__1, &cr, &sr);
+ }
+
+ t[ilast - 1 + (ilast - 1) * t_dim1] = b11;
+ t[ilast - 1 + ilast * t_dim1] = 0.;
+ t[ilast + (ilast - 1) * t_dim1] = 0.;
+ t[ilast + ilast * t_dim1] = b22;
+
+/* If B22 is negative, negate column ILAST */
+
+ if (b22 < 0.) {
+ i__2 = ilast;
+ for (j = ifrstm; j <= i__2; ++j) {
+ h__[j + ilast * h_dim1] = -h__[j + ilast * h_dim1];
+ t[j + ilast * t_dim1] = -t[j + ilast * t_dim1];
+/* L210: */
+ }
+
+ if (ilz) {
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ z__[j + ilast * z_dim1] = -z__[j + ilast * z_dim1];
+/* L220: */
+ }
+ }
+ }
+
+/* Step 2: Compute ALPHAR, ALPHAI, and BETA (see refs.) */
+
+/* Recompute shift */
+
+ d__1 = safmin * 100.;
+ dlag2_(&h__[ilast - 1 + (ilast - 1) * h_dim1], ldh, &t[ilast - 1
+ + (ilast - 1) * t_dim1], ldt, &d__1, &s1, &temp, &wr, &
+ temp2, &wi);
+
+/* If standardization has perturbed the shift onto real line, */
+/* do another (real single-shift) QR step. */
+
+ if (wi == 0.) {
+ goto L350;
+ }
+ s1inv = 1. / s1;
+
+/* Do EISPACK (QZVAL) computation of alpha and beta */
+
+ a11 = h__[ilast - 1 + (ilast - 1) * h_dim1];
+ a21 = h__[ilast + (ilast - 1) * h_dim1];
+ a12 = h__[ilast - 1 + ilast * h_dim1];
+ a22 = h__[ilast + ilast * h_dim1];
+
+/* Compute complex Givens rotation on right */
+/* (Assume some element of C = (sA - wB) > unfl ) */
+/* __ */
+/* (sA - wB) ( CZ -SZ ) */
+/* ( SZ CZ ) */
+
+ c11r = s1 * a11 - wr * b11;
+ c11i = -wi * b11;
+ c12 = s1 * a12;
+ c21 = s1 * a21;
+ c22r = s1 * a22 - wr * b22;
+ c22i = -wi * b22;
+
+ if (abs(c11r) + abs(c11i) + abs(c12) > abs(c21) + abs(c22r) + abs(
+ c22i)) {
+ t1 = dlapy3_(&c12, &c11r, &c11i);
+ cz = c12 / t1;
+ szr = -c11r / t1;
+ szi = -c11i / t1;
+ } else {
+ cz = dlapy2_(&c22r, &c22i);
+ if (cz <= safmin) {
+ cz = 0.;
+ szr = 1.;
+ szi = 0.;
+ } else {
+ tempr = c22r / cz;
+ tempi = c22i / cz;
+ t1 = dlapy2_(&cz, &c21);
+ cz /= t1;
+ szr = -c21 * tempr / t1;
+ szi = c21 * tempi / t1;
+ }
+ }
+
+/* Compute Givens rotation on left */
+
+/* ( CQ SQ ) */
+/* ( __ ) A or B */
+/* ( -SQ CQ ) */
+
+ an = abs(a11) + abs(a12) + abs(a21) + abs(a22);
+ bn = abs(b11) + abs(b22);
+ wabs = abs(wr) + abs(wi);
+ if (s1 * an > wabs * bn) {
+ cq = cz * b11;
+ sqr = szr * b22;
+ sqi = -szi * b22;
+ } else {
+ a1r = cz * a11 + szr * a12;
+ a1i = szi * a12;
+ a2r = cz * a21 + szr * a22;
+ a2i = szi * a22;
+ cq = dlapy2_(&a1r, &a1i);
+ if (cq <= safmin) {
+ cq = 0.;
+ sqr = 1.;
+ sqi = 0.;
+ } else {
+ tempr = a1r / cq;
+ tempi = a1i / cq;
+ sqr = tempr * a2r + tempi * a2i;
+ sqi = tempi * a2r - tempr * a2i;
+ }
+ }
+ t1 = dlapy3_(&cq, &sqr, &sqi);
+ cq /= t1;
+ sqr /= t1;
+ sqi /= t1;
+
+/* Compute diagonal elements of QBZ */
+
+ tempr = sqr * szr - sqi * szi;
+ tempi = sqr * szi + sqi * szr;
+ b1r = cq * cz * b11 + tempr * b22;
+ b1i = tempi * b22;
+ b1a = dlapy2_(&b1r, &b1i);
+ b2r = cq * cz * b22 + tempr * b11;
+ b2i = -tempi * b11;
+ b2a = dlapy2_(&b2r, &b2i);
+
+/* Normalize so beta > 0, and Im( alpha1 ) > 0 */
+
+ beta[ilast - 1] = b1a;
+ beta[ilast] = b2a;
+ alphar[ilast - 1] = wr * b1a * s1inv;
+ alphai[ilast - 1] = wi * b1a * s1inv;
+ alphar[ilast] = wr * b2a * s1inv;
+ alphai[ilast] = -(wi * b2a) * s1inv;
+
+/* Step 3: Go to next block -- exit if finished. */
+
+ ilast = ifirst - 1;
+ if (ilast < *ilo) {
+ goto L380;
+ }
+
+/* Reset counters */
+
+ iiter = 0;
+ eshift = 0.;
+ if (! ilschr) {
+ ilastm = ilast;
+ if (ifrstm > ilast) {
+ ifrstm = *ilo;
+ }
+ }
+ goto L350;
+ } else {
+
+/* Usual case: 3x3 or larger block, using Francis implicit */
+/* double-shift */
+
+/* 2 */
+/* Eigenvalue equation is w - c w + d = 0, */
+
+/* -1 2 -1 */
+/* so compute 1st column of (A B ) - c A B + d */
+/* using the formula in QZIT (from EISPACK) */
+
+/* We assume that the block is at least 3x3 */
+
+ ad11 = ascale * h__[ilast - 1 + (ilast - 1) * h_dim1] / (bscale *
+ t[ilast - 1 + (ilast - 1) * t_dim1]);
+ ad21 = ascale * h__[ilast + (ilast - 1) * h_dim1] / (bscale * t[
+ ilast - 1 + (ilast - 1) * t_dim1]);
+ ad12 = ascale * h__[ilast - 1 + ilast * h_dim1] / (bscale * t[
+ ilast + ilast * t_dim1]);
+ ad22 = ascale * h__[ilast + ilast * h_dim1] / (bscale * t[ilast +
+ ilast * t_dim1]);
+ u12 = t[ilast - 1 + ilast * t_dim1] / t[ilast + ilast * t_dim1];
+ ad11l = ascale * h__[ifirst + ifirst * h_dim1] / (bscale * t[
+ ifirst + ifirst * t_dim1]);
+ ad21l = ascale * h__[ifirst + 1 + ifirst * h_dim1] / (bscale * t[
+ ifirst + ifirst * t_dim1]);
+ ad12l = ascale * h__[ifirst + (ifirst + 1) * h_dim1] / (bscale *
+ t[ifirst + 1 + (ifirst + 1) * t_dim1]);
+ ad22l = ascale * h__[ifirst + 1 + (ifirst + 1) * h_dim1] / (
+ bscale * t[ifirst + 1 + (ifirst + 1) * t_dim1]);
+ ad32l = ascale * h__[ifirst + 2 + (ifirst + 1) * h_dim1] / (
+ bscale * t[ifirst + 1 + (ifirst + 1) * t_dim1]);
+ u12l = t[ifirst + (ifirst + 1) * t_dim1] / t[ifirst + 1 + (ifirst
+ + 1) * t_dim1];
+
+ v[0] = (ad11 - ad11l) * (ad22 - ad11l) - ad12 * ad21 + ad21 * u12
+ * ad11l + (ad12l - ad11l * u12l) * ad21l;
+ v[1] = (ad22l - ad11l - ad21l * u12l - (ad11 - ad11l) - (ad22 -
+ ad11l) + ad21 * u12) * ad21l;
+ v[2] = ad32l * ad21l;
+
+ istart = ifirst;
+
+ dlarfg_(&c__3, v, &v[1], &c__1, &tau);
+ v[0] = 1.;
+
+/* Sweep */
+
+ i__2 = ilast - 2;
+ for (j = istart; j <= i__2; ++j) {
+
+/* All but last elements: use 3x3 Householder transforms. */
+
+/* Zero (j-1)st column of A */
+
+ if (j > istart) {
+ v[0] = h__[j + (j - 1) * h_dim1];
+ v[1] = h__[j + 1 + (j - 1) * h_dim1];
+ v[2] = h__[j + 2 + (j - 1) * h_dim1];
+
+ dlarfg_(&c__3, &h__[j + (j - 1) * h_dim1], &v[1], &c__1, &
+ tau);
+ v[0] = 1.;
+ h__[j + 1 + (j - 1) * h_dim1] = 0.;
+ h__[j + 2 + (j - 1) * h_dim1] = 0.;
+ }
+
+ i__3 = ilastm;
+ for (jc = j; jc <= i__3; ++jc) {
+ temp = tau * (h__[j + jc * h_dim1] + v[1] * h__[j + 1 +
+ jc * h_dim1] + v[2] * h__[j + 2 + jc * h_dim1]);
+ h__[j + jc * h_dim1] -= temp;
+ h__[j + 1 + jc * h_dim1] -= temp * v[1];
+ h__[j + 2 + jc * h_dim1] -= temp * v[2];
+ temp2 = tau * (t[j + jc * t_dim1] + v[1] * t[j + 1 + jc *
+ t_dim1] + v[2] * t[j + 2 + jc * t_dim1]);
+ t[j + jc * t_dim1] -= temp2;
+ t[j + 1 + jc * t_dim1] -= temp2 * v[1];
+ t[j + 2 + jc * t_dim1] -= temp2 * v[2];
+/* L230: */
+ }
+ if (ilq) {
+ i__3 = *n;
+ for (jr = 1; jr <= i__3; ++jr) {
+ temp = tau * (q[jr + j * q_dim1] + v[1] * q[jr + (j +
+ 1) * q_dim1] + v[2] * q[jr + (j + 2) * q_dim1]
+ );
+ q[jr + j * q_dim1] -= temp;
+ q[jr + (j + 1) * q_dim1] -= temp * v[1];
+ q[jr + (j + 2) * q_dim1] -= temp * v[2];
+/* L240: */
+ }
+ }
+
+/* Zero j-th column of B (see DLAGBC for details) */
+
+/* Swap rows to pivot */
+
+ ilpivt = FALSE_;
+/* Computing MAX */
+ d__3 = (d__1 = t[j + 1 + (j + 1) * t_dim1], abs(d__1)), d__4 =
+ (d__2 = t[j + 1 + (j + 2) * t_dim1], abs(d__2));
+ temp = max(d__3,d__4);
+/* Computing MAX */
+ d__3 = (d__1 = t[j + 2 + (j + 1) * t_dim1], abs(d__1)), d__4 =
+ (d__2 = t[j + 2 + (j + 2) * t_dim1], abs(d__2));
+ temp2 = max(d__3,d__4);
+ if (max(temp,temp2) < safmin) {
+ scale = 0.;
+ u1 = 1.;
+ u2 = 0.;
+ goto L250;
+ } else if (temp >= temp2) {
+ w11 = t[j + 1 + (j + 1) * t_dim1];
+ w21 = t[j + 2 + (j + 1) * t_dim1];
+ w12 = t[j + 1 + (j + 2) * t_dim1];
+ w22 = t[j + 2 + (j + 2) * t_dim1];
+ u1 = t[j + 1 + j * t_dim1];
+ u2 = t[j + 2 + j * t_dim1];
+ } else {
+ w21 = t[j + 1 + (j + 1) * t_dim1];
+ w11 = t[j + 2 + (j + 1) * t_dim1];
+ w22 = t[j + 1 + (j + 2) * t_dim1];
+ w12 = t[j + 2 + (j + 2) * t_dim1];
+ u2 = t[j + 1 + j * t_dim1];
+ u1 = t[j + 2 + j * t_dim1];
+ }
+
+/* Swap columns if nec. */
+
+ if (abs(w12) > abs(w11)) {
+ ilpivt = TRUE_;
+ temp = w12;
+ temp2 = w22;
+ w12 = w11;
+ w22 = w21;
+ w11 = temp;
+ w21 = temp2;
+ }
+
+/* LU-factor */
+
+ temp = w21 / w11;
+ u2 -= temp * u1;
+ w22 -= temp * w12;
+ w21 = 0.;
+
+/* Compute SCALE */
+
+ scale = 1.;
+ if (abs(w22) < safmin) {
+ scale = 0.;
+ u2 = 1.;
+ u1 = -w12 / w11;
+ goto L250;
+ }
+ if (abs(w22) < abs(u2)) {
+ scale = (d__1 = w22 / u2, abs(d__1));
+ }
+ if (abs(w11) < abs(u1)) {
+/* Computing MIN */
+ d__2 = scale, d__3 = (d__1 = w11 / u1, abs(d__1));
+ scale = min(d__2,d__3);
+ }
+
+/* Solve */
+
+ u2 = scale * u2 / w22;
+ u1 = (scale * u1 - w12 * u2) / w11;
+
+L250:
+ if (ilpivt) {
+ temp = u2;
+ u2 = u1;
+ u1 = temp;
+ }
+
+/* Compute Householder Vector */
+
+/* Computing 2nd power */
+ d__1 = scale;
+/* Computing 2nd power */
+ d__2 = u1;
+/* Computing 2nd power */
+ d__3 = u2;
+ t1 = sqrt(d__1 * d__1 + d__2 * d__2 + d__3 * d__3);
+ tau = scale / t1 + 1.;
+ vs = -1. / (scale + t1);
+ v[0] = 1.;
+ v[1] = vs * u1;
+ v[2] = vs * u2;
+
+/* Apply transformations from the right. */
+
+/* Computing MIN */
+ i__4 = j + 3;
+ i__3 = min(i__4,ilast);
+ for (jr = ifrstm; jr <= i__3; ++jr) {
+ temp = tau * (h__[jr + j * h_dim1] + v[1] * h__[jr + (j +
+ 1) * h_dim1] + v[2] * h__[jr + (j + 2) * h_dim1]);
+ h__[jr + j * h_dim1] -= temp;
+ h__[jr + (j + 1) * h_dim1] -= temp * v[1];
+ h__[jr + (j + 2) * h_dim1] -= temp * v[2];
+/* L260: */
+ }
+ i__3 = j + 2;
+ for (jr = ifrstm; jr <= i__3; ++jr) {
+ temp = tau * (t[jr + j * t_dim1] + v[1] * t[jr + (j + 1) *
+ t_dim1] + v[2] * t[jr + (j + 2) * t_dim1]);
+ t[jr + j * t_dim1] -= temp;
+ t[jr + (j + 1) * t_dim1] -= temp * v[1];
+ t[jr + (j + 2) * t_dim1] -= temp * v[2];
+/* L270: */
+ }
+ if (ilz) {
+ i__3 = *n;
+ for (jr = 1; jr <= i__3; ++jr) {
+ temp = tau * (z__[jr + j * z_dim1] + v[1] * z__[jr + (
+ j + 1) * z_dim1] + v[2] * z__[jr + (j + 2) *
+ z_dim1]);
+ z__[jr + j * z_dim1] -= temp;
+ z__[jr + (j + 1) * z_dim1] -= temp * v[1];
+ z__[jr + (j + 2) * z_dim1] -= temp * v[2];
+/* L280: */
+ }
+ }
+ t[j + 1 + j * t_dim1] = 0.;
+ t[j + 2 + j * t_dim1] = 0.;
+/* L290: */
+ }
+
+/* Last elements: Use Givens rotations */
+
+/* Rotations from the left */
+
+ j = ilast - 1;
+ temp = h__[j + (j - 1) * h_dim1];
+ dlartg_(&temp, &h__[j + 1 + (j - 1) * h_dim1], &c__, &s, &h__[j +
+ (j - 1) * h_dim1]);
+ h__[j + 1 + (j - 1) * h_dim1] = 0.;
+
+ i__2 = ilastm;
+ for (jc = j; jc <= i__2; ++jc) {
+ temp = c__ * h__[j + jc * h_dim1] + s * h__[j + 1 + jc *
+ h_dim1];
+ h__[j + 1 + jc * h_dim1] = -s * h__[j + jc * h_dim1] + c__ *
+ h__[j + 1 + jc * h_dim1];
+ h__[j + jc * h_dim1] = temp;
+ temp2 = c__ * t[j + jc * t_dim1] + s * t[j + 1 + jc * t_dim1];
+ t[j + 1 + jc * t_dim1] = -s * t[j + jc * t_dim1] + c__ * t[j
+ + 1 + jc * t_dim1];
+ t[j + jc * t_dim1] = temp2;
+/* L300: */
+ }
+ if (ilq) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ temp = c__ * q[jr + j * q_dim1] + s * q[jr + (j + 1) *
+ q_dim1];
+ q[jr + (j + 1) * q_dim1] = -s * q[jr + j * q_dim1] + c__ *
+ q[jr + (j + 1) * q_dim1];
+ q[jr + j * q_dim1] = temp;
+/* L310: */
+ }
+ }
+
+/* Rotations from the right. */
+
+ temp = t[j + 1 + (j + 1) * t_dim1];
+ dlartg_(&temp, &t[j + 1 + j * t_dim1], &c__, &s, &t[j + 1 + (j +
+ 1) * t_dim1]);
+ t[j + 1 + j * t_dim1] = 0.;
+
+ i__2 = ilast;
+ for (jr = ifrstm; jr <= i__2; ++jr) {
+ temp = c__ * h__[jr + (j + 1) * h_dim1] + s * h__[jr + j *
+ h_dim1];
+ h__[jr + j * h_dim1] = -s * h__[jr + (j + 1) * h_dim1] + c__ *
+ h__[jr + j * h_dim1];
+ h__[jr + (j + 1) * h_dim1] = temp;
+/* L320: */
+ }
+ i__2 = ilast - 1;
+ for (jr = ifrstm; jr <= i__2; ++jr) {
+ temp = c__ * t[jr + (j + 1) * t_dim1] + s * t[jr + j * t_dim1]
+ ;
+ t[jr + j * t_dim1] = -s * t[jr + (j + 1) * t_dim1] + c__ * t[
+ jr + j * t_dim1];
+ t[jr + (j + 1) * t_dim1] = temp;
+/* L330: */
+ }
+ if (ilz) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ temp = c__ * z__[jr + (j + 1) * z_dim1] + s * z__[jr + j *
+ z_dim1];
+ z__[jr + j * z_dim1] = -s * z__[jr + (j + 1) * z_dim1] +
+ c__ * z__[jr + j * z_dim1];
+ z__[jr + (j + 1) * z_dim1] = temp;
+/* L340: */
+ }
+ }
+
+/* End of Double-Shift code */
+
+ }
+
+ goto L350;
+
+/* End of iteration loop */
+
+L350:
+/* L360: */
+ ;
+ }
+
+/* Drop-through = non-convergence */
+
+ *info = ilast;
+ goto L420;
+
+/* Successful completion of all QZ steps */
+
+L380:
+
+/* Set Eigenvalues 1:ILO-1 */
+
+ i__1 = *ilo - 1;
+ for (j = 1; j <= i__1; ++j) {
+ if (t[j + j * t_dim1] < 0.) {
+ if (ilschr) {
+ i__2 = j;
+ for (jr = 1; jr <= i__2; ++jr) {
+ h__[jr + j * h_dim1] = -h__[jr + j * h_dim1];
+ t[jr + j * t_dim1] = -t[jr + j * t_dim1];
+/* L390: */
+ }
+ } else {
+ h__[j + j * h_dim1] = -h__[j + j * h_dim1];
+ t[j + j * t_dim1] = -t[j + j * t_dim1];
+ }
+ if (ilz) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ z__[jr + j * z_dim1] = -z__[jr + j * z_dim1];
+/* L400: */
+ }
+ }
+ }
+ alphar[j] = h__[j + j * h_dim1];
+ alphai[j] = 0.;
+ beta[j] = t[j + j * t_dim1];
+/* L410: */
+ }
+
+/* Normal Termination */
+
+ *info = 0;
+
+/* Exit (other than argument error) -- return optimal workspace size */
+
+L420:
+ work[1] = (doublereal) (*n);
+ return 0;
+
+/* End of DHGEQZ */
+
+} /* dhgeqz_ */
diff --git a/contrib/libs/clapack/dhsein.c b/contrib/libs/clapack/dhsein.c
new file mode 100644
index 0000000000..7220f147a0
--- /dev/null
+++ b/contrib/libs/clapack/dhsein.c
@@ -0,0 +1,491 @@
+/* dhsein.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static logical c_false = FALSE_;
+static logical c_true = TRUE_;
+
+/* Subroutine */ int dhsein_(char *side, char *eigsrc, char *initv, logical *
+ select, integer *n, doublereal *h__, integer *ldh, doublereal *wr,
+ doublereal *wi, doublereal *vl, integer *ldvl, doublereal *vr,
+ integer *ldvr, integer *mm, integer *m, doublereal *work, integer *
+ ifaill, integer *ifailr, integer *info)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ integer i__, k, kl, kr, kln, ksi;
+ doublereal wki;
+ integer ksr;
+ doublereal ulp, wkr, eps3;
+ logical pair;
+ doublereal unfl;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical leftv, bothv;
+ doublereal hnorm;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dlaein_(logical *, logical *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *
+, doublereal *, doublereal *, integer *);
+ extern doublereal dlanhs_(char *, integer *, doublereal *, integer *,
+ doublereal *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ logical noinit;
+ integer ldwork;
+ logical rightv, fromqr;
+ doublereal smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DHSEIN uses inverse iteration to find specified right and/or left */
+/* eigenvectors of a real upper Hessenberg matrix H. */
+
+/* The right eigenvector x and the left eigenvector y of the matrix H */
+/* corresponding to an eigenvalue w are defined by: */
+
+/* H * x = w * x, y**h * H = w * y**h */
+
+/* where y**h denotes the conjugate transpose of the vector y. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'R': compute right eigenvectors only; */
+/* = 'L': compute left eigenvectors only; */
+/* = 'B': compute both right and left eigenvectors. */
+
+/* EIGSRC (input) CHARACTER*1 */
+/* Specifies the source of eigenvalues supplied in (WR,WI): */
+/* = 'Q': the eigenvalues were found using DHSEQR; thus, if */
+/* H has zero subdiagonal elements, and so is */
+/* block-triangular, then the j-th eigenvalue can be */
+/* assumed to be an eigenvalue of the block containing */
+/* the j-th row/column. This property allows DHSEIN to */
+/* perform inverse iteration on just one diagonal block. */
+/* = 'N': no assumptions are made on the correspondence */
+/* between eigenvalues and diagonal blocks. In this */
+/* case, DHSEIN must always perform inverse iteration */
+/* using the whole matrix H. */
+
+/* INITV (input) CHARACTER*1 */
+/* = 'N': no initial vectors are supplied; */
+/* = 'U': user-supplied initial vectors are stored in the arrays */
+/* VL and/or VR. */
+
+/* SELECT (input/output) LOGICAL array, dimension (N) */
+/* Specifies the eigenvectors to be computed. To select the */
+/* real eigenvector corresponding to a real eigenvalue WR(j), */
+/* SELECT(j) must be set to .TRUE.. To select the complex */
+/* eigenvector corresponding to a complex eigenvalue */
+/* (WR(j),WI(j)), with complex conjugate (WR(j+1),WI(j+1)), */
+/* either SELECT(j) or SELECT(j+1) or both must be set to */
+/* .TRUE.; then on exit SELECT(j) is .TRUE. and SELECT(j+1) is */
+/* .FALSE.. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H. N >= 0. */
+
+/* H (input) DOUBLE PRECISION array, dimension (LDH,N) */
+/* The upper Hessenberg matrix H. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH >= max(1,N). */
+
+/* WR (input/output) DOUBLE PRECISION array, dimension (N) */
+/* WI (input) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the real and imaginary parts of the eigenvalues of */
+/* H; a complex conjugate pair of eigenvalues must be stored in */
+/* consecutive elements of WR and WI. */
+/* On exit, WR may have been altered since close eigenvalues */
+/* are perturbed slightly in searching for independent */
+/* eigenvectors. */
+
+/* VL (input/output) DOUBLE PRECISION array, dimension (LDVL,MM) */
+/* On entry, if INITV = 'U' and SIDE = 'L' or 'B', VL must */
+/* contain starting vectors for the inverse iteration for the */
+/* left eigenvectors; the starting vector for each eigenvector */
+/* must be in the same column(s) in which the eigenvector will */
+/* be stored. */
+/* On exit, if SIDE = 'L' or 'B', the left eigenvectors */
+/* specified by SELECT will be stored consecutively in the */
+/* columns of VL, in the same order as their eigenvalues. A */
+/* complex eigenvector corresponding to a complex eigenvalue is */
+/* stored in two consecutive columns, the first holding the real */
+/* part and the second the imaginary part. */
+/* If SIDE = 'R', VL is not referenced. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the array VL. */
+/* LDVL >= max(1,N) if SIDE = 'L' or 'B'; LDVL >= 1 otherwise. */
+
+/* VR (input/output) DOUBLE PRECISION array, dimension (LDVR,MM) */
+/* On entry, if INITV = 'U' and SIDE = 'R' or 'B', VR must */
+/* contain starting vectors for the inverse iteration for the */
+/* right eigenvectors; the starting vector for each eigenvector */
+/* must be in the same column(s) in which the eigenvector will */
+/* be stored. */
+/* On exit, if SIDE = 'R' or 'B', the right eigenvectors */
+/* specified by SELECT will be stored consecutively in the */
+/* columns of VR, in the same order as their eigenvalues. A */
+/* complex eigenvector corresponding to a complex eigenvalue is */
+/* stored in two consecutive columns, the first holding the real */
+/* part and the second the imaginary part. */
+/* If SIDE = 'L', VR is not referenced. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. */
+/* LDVR >= max(1,N) if SIDE = 'R' or 'B'; LDVR >= 1 otherwise. */
+
+/* MM (input) INTEGER */
+/* The number of columns in the arrays VL and/or VR. MM >= M. */
+
+/* M (output) INTEGER */
+/* The number of columns in the arrays VL and/or VR required to */
+/* store the eigenvectors; each selected real eigenvector */
+/* occupies one column and each selected complex eigenvector */
+/* occupies two columns. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension ((N+2)*N) */
+
+/* IFAILL (output) INTEGER array, dimension (MM) */
+/* If SIDE = 'L' or 'B', IFAILL(i) = j > 0 if the left */
+/* eigenvector in the i-th column of VL (corresponding to the */
+/* eigenvalue w(j)) failed to converge; IFAILL(i) = 0 if the */
+/* eigenvector converged satisfactorily. If the i-th and (i+1)th */
+/* columns of VL hold a complex eigenvector, then IFAILL(i) and */
+/* IFAILL(i+1) are set to the same value. */
+/* If SIDE = 'R', IFAILL is not referenced. */
+
+/* IFAILR (output) INTEGER array, dimension (MM) */
+/* If SIDE = 'R' or 'B', IFAILR(i) = j > 0 if the right */
+/* eigenvector in the i-th column of VR (corresponding to the */
+/* eigenvalue w(j)) failed to converge; IFAILR(i) = 0 if the */
+/* eigenvector converged satisfactorily. If the i-th and (i+1)th */
+/* columns of VR hold a complex eigenvector, then IFAILR(i) and */
+/* IFAILR(i+1) are set to the same value. */
+/* If SIDE = 'L', IFAILR is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, i is the number of eigenvectors which */
+/* failed to converge; see IFAILL and IFAILR for further */
+/* details. */
+
+/* Further Details */
+/* =============== */
+
+/* Each eigenvector is normalized so that the element of largest */
+/* magnitude has magnitude 1; here the magnitude of a complex number */
+/* (x,y) is taken to be |x|+|y|. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters. */
+
+ /* Parameter adjustments */
+ --select;
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --wr;
+ --wi;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+ --ifaill;
+ --ifailr;
+
+ /* Function Body */
+ bothv = lsame_(side, "B");
+ rightv = lsame_(side, "R") || bothv;
+ leftv = lsame_(side, "L") || bothv;
+
+ fromqr = lsame_(eigsrc, "Q");
+
+ noinit = lsame_(initv, "N");
+
+/* Set M to the number of columns required to store the selected */
+/* eigenvectors, and standardize the array SELECT. */
+
+ *m = 0;
+ pair = FALSE_;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (pair) {
+ pair = FALSE_;
+ select[k] = FALSE_;
+ } else {
+ if (wi[k] == 0.) {
+ if (select[k]) {
+ ++(*m);
+ }
+ } else {
+ pair = TRUE_;
+ if (select[k] || select[k + 1]) {
+ select[k] = TRUE_;
+ *m += 2;
+ }
+ }
+ }
+/* L10: */
+ }
+
+ *info = 0;
+ if (! rightv && ! leftv) {
+ *info = -1;
+ } else if (! fromqr && ! lsame_(eigsrc, "N")) {
+ *info = -2;
+ } else if (! noinit && ! lsame_(initv, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*ldh < max(1,*n)) {
+ *info = -7;
+ } else if (*ldvl < 1 || leftv && *ldvl < *n) {
+ *info = -11;
+ } else if (*ldvr < 1 || rightv && *ldvr < *n) {
+ *info = -13;
+ } else if (*mm < *m) {
+ *info = -14;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DHSEIN", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Set machine-dependent constants. */
+
+ unfl = dlamch_("Safe minimum");
+ ulp = dlamch_("Precision");
+ smlnum = unfl * (*n / ulp);
+ bignum = (1. - ulp) / smlnum;
+
+ ldwork = *n + 1;
+
+ kl = 1;
+ kln = 0;
+ if (fromqr) {
+ kr = 0;
+ } else {
+ kr = *n;
+ }
+ ksr = 1;
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (select[k]) {
+
+/* Compute eigenvector(s) corresponding to W(K). */
+
+ if (fromqr) {
+
+/* If affiliation of eigenvalues is known, check whether */
+/* the matrix splits. */
+
+/* Determine KL and KR such that 1 <= KL <= K <= KR <= N */
+/* and H(KL,KL-1) and H(KR+1,KR) are zero (or KL = 1 or */
+/* KR = N). */
+
+/* Then inverse iteration can be performed with the */
+/* submatrix H(KL:N,KL:N) for a left eigenvector, and with */
+/* the submatrix H(1:KR,1:KR) for a right eigenvector. */
+
+ i__2 = kl + 1;
+ for (i__ = k; i__ >= i__2; --i__) {
+ if (h__[i__ + (i__ - 1) * h_dim1] == 0.) {
+ goto L30;
+ }
+/* L20: */
+ }
+L30:
+ kl = i__;
+ if (k > kr) {
+ i__2 = *n - 1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ if (h__[i__ + 1 + i__ * h_dim1] == 0.) {
+ goto L50;
+ }
+/* L40: */
+ }
+L50:
+ kr = i__;
+ }
+ }
+
+ if (kl != kln) {
+ kln = kl;
+
+/* Compute infinity-norm of submatrix H(KL:KR,KL:KR) if it */
+/* has not ben computed before. */
+
+ i__2 = kr - kl + 1;
+ hnorm = dlanhs_("I", &i__2, &h__[kl + kl * h_dim1], ldh, &
+ work[1]);
+ if (hnorm > 0.) {
+ eps3 = hnorm * ulp;
+ } else {
+ eps3 = smlnum;
+ }
+ }
+
+/* Perturb eigenvalue if it is close to any previous */
+/* selected eigenvalues affiliated to the submatrix */
+/* H(KL:KR,KL:KR). Close roots are modified by EPS3. */
+
+ wkr = wr[k];
+ wki = wi[k];
+L60:
+ i__2 = kl;
+ for (i__ = k - 1; i__ >= i__2; --i__) {
+ if (select[i__] && (d__1 = wr[i__] - wkr, abs(d__1)) + (d__2 =
+ wi[i__] - wki, abs(d__2)) < eps3) {
+ wkr += eps3;
+ goto L60;
+ }
+/* L70: */
+ }
+ wr[k] = wkr;
+
+ pair = wki != 0.;
+ if (pair) {
+ ksi = ksr + 1;
+ } else {
+ ksi = ksr;
+ }
+ if (leftv) {
+
+/* Compute left eigenvector. */
+
+ i__2 = *n - kl + 1;
+ dlaein_(&c_false, &noinit, &i__2, &h__[kl + kl * h_dim1], ldh,
+ &wkr, &wki, &vl[kl + ksr * vl_dim1], &vl[kl + ksi *
+ vl_dim1], &work[1], &ldwork, &work[*n * *n + *n + 1],
+ &eps3, &smlnum, &bignum, &iinfo);
+ if (iinfo > 0) {
+ if (pair) {
+ *info += 2;
+ } else {
+ ++(*info);
+ }
+ ifaill[ksr] = k;
+ ifaill[ksi] = k;
+ } else {
+ ifaill[ksr] = 0;
+ ifaill[ksi] = 0;
+ }
+ i__2 = kl - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ vl[i__ + ksr * vl_dim1] = 0.;
+/* L80: */
+ }
+ if (pair) {
+ i__2 = kl - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ vl[i__ + ksi * vl_dim1] = 0.;
+/* L90: */
+ }
+ }
+ }
+ if (rightv) {
+
+/* Compute right eigenvector. */
+
+ dlaein_(&c_true, &noinit, &kr, &h__[h_offset], ldh, &wkr, &
+ wki, &vr[ksr * vr_dim1 + 1], &vr[ksi * vr_dim1 + 1], &
+ work[1], &ldwork, &work[*n * *n + *n + 1], &eps3, &
+ smlnum, &bignum, &iinfo);
+ if (iinfo > 0) {
+ if (pair) {
+ *info += 2;
+ } else {
+ ++(*info);
+ }
+ ifailr[ksr] = k;
+ ifailr[ksi] = k;
+ } else {
+ ifailr[ksr] = 0;
+ ifailr[ksi] = 0;
+ }
+ i__2 = *n;
+ for (i__ = kr + 1; i__ <= i__2; ++i__) {
+ vr[i__ + ksr * vr_dim1] = 0.;
+/* L100: */
+ }
+ if (pair) {
+ i__2 = *n;
+ for (i__ = kr + 1; i__ <= i__2; ++i__) {
+ vr[i__ + ksi * vr_dim1] = 0.;
+/* L110: */
+ }
+ }
+ }
+
+ if (pair) {
+ ksr += 2;
+ } else {
+ ++ksr;
+ }
+ }
+/* L120: */
+ }
+
+ return 0;
+
+/* End of DHSEIN */
+
+} /* dhsein_ */
diff --git a/contrib/libs/clapack/dhseqr.c b/contrib/libs/clapack/dhseqr.c
new file mode 100644
index 0000000000..48ad560d27
--- /dev/null
+++ b/contrib/libs/clapack/dhseqr.c
@@ -0,0 +1,487 @@
+/* dhseqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b11 = 0.;
+static doublereal c_b12 = 1.;
+static integer c__12 = 12;
+static integer c__2 = 2;
+static integer c__49 = 49;
+
+/* Subroutine */ int dhseqr_(char *job, char *compz, integer *n, integer *ilo,
+ integer *ihi, doublereal *h__, integer *ldh, doublereal *wr,
+ doublereal *wi, doublereal *z__, integer *ldz, doublereal *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2[2], i__3;
+ doublereal d__1;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i__;
+ doublereal hl[2401] /* was [49][49] */;
+ integer kbot, nmin;
+ extern logical lsame_(char *, char *);
+ logical initz;
+ doublereal workl[49];
+ logical wantt, wantz;
+ extern /* Subroutine */ int dlaqr0_(logical *, logical *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ doublereal *, integer *, integer *), dlahqr_(logical *, logical *,
+ integer *, integer *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ dlaset_(char *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+/* Purpose */
+/* ======= */
+
+/* DHSEQR computes the eigenvalues of a Hessenberg matrix H */
+/* and, optionally, the matrices T and Z from the Schur decomposition */
+/* H = Z T Z**T, where T is an upper quasi-triangular matrix (the */
+/* Schur form), and Z is the orthogonal matrix of Schur vectors. */
+
+/* Optionally Z may be postmultiplied into an input orthogonal */
+/* matrix Q so that this routine can give the Schur factorization */
+/* of a matrix A which has been reduced to the Hessenberg form H */
+/* by the orthogonal matrix Q: A = Q*H*Q**T = (QZ)*T*(QZ)**T. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* = 'E': compute eigenvalues only; */
+/* = 'S': compute eigenvalues and the Schur form T. */
+
+/* COMPZ (input) CHARACTER*1 */
+/* = 'N': no Schur vectors are computed; */
+/* = 'I': Z is initialized to the unit matrix and the matrix Z */
+/* of Schur vectors of H is returned; */
+/* = 'V': Z must contain an orthogonal matrix Q on entry, and */
+/* the product Q*Z is returned. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H. N .GE. 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* It is assumed that H is already upper triangular in rows */
+/* and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally */
+/* set by a previous call to DGEBAL, and then passed to DGEHRD */
+/* when the matrix output by DGEBAL is reduced to Hessenberg */
+/* form. Otherwise ILO and IHI should be set to 1 and N */
+/* respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. */
+/* If N = 0, then ILO = 1 and IHI = 0. */
+
+/* H (input/output) DOUBLE PRECISION array, dimension (LDH,N) */
+/* On entry, the upper Hessenberg matrix H. */
+/* On exit, if INFO = 0 and JOB = 'S', then H contains the */
+/* upper quasi-triangular matrix T from the Schur decomposition */
+/* (the Schur form); 2-by-2 diagonal blocks (corresponding to */
+/* complex conjugate pairs of eigenvalues) are returned in */
+/* standard form, with H(i,i) = H(i+1,i+1) and */
+/* H(i+1,i)*H(i,i+1).LT.0. If INFO = 0 and JOB = 'E', the */
+/* contents of H are unspecified on exit. (The output value of */
+/* H when INFO.GT.0 is given under the description of INFO */
+/* below.) */
+
+/* Unlike earlier versions of DHSEQR, this subroutine may */
+/* explicitly H(i,j) = 0 for i.GT.j and j = 1, 2, ... ILO-1 */
+/* or j = IHI+1, IHI+2, ... N. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH .GE. max(1,N). */
+
+/* WR (output) DOUBLE PRECISION array, dimension (N) */
+/* WI (output) DOUBLE PRECISION array, dimension (N) */
+/* The real and imaginary parts, respectively, of the computed */
+/* eigenvalues. If two eigenvalues are computed as a complex */
+/* conjugate pair, they are stored in consecutive elements of */
+/* WR and WI, say the i-th and (i+1)th, with WI(i) .GT. 0 and */
+/* WI(i+1) .LT. 0. If JOB = 'S', the eigenvalues are stored in */
+/* the same order as on the diagonal of the Schur form returned */
+/* in H, with WR(i) = H(i,i) and, if H(i:i+1,i:i+1) is a 2-by-2 */
+/* diagonal block, WI(i) = sqrt(-H(i+1,i)*H(i,i+1)) and */
+/* WI(i+1) = -WI(i). */
+
+/* Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N) */
+/* If COMPZ = 'N', Z is not referenced. */
+/* If COMPZ = 'I', on entry Z need not be set and on exit, */
+/* if INFO = 0, Z contains the orthogonal matrix Z of the Schur */
+/* vectors of H. If COMPZ = 'V', on entry Z must contain an */
+/* N-by-N matrix Q, which is assumed to be equal to the unit */
+/* matrix except for the submatrix Z(ILO:IHI,ILO:IHI). On exit, */
+/* if INFO = 0, Z contains Q*Z. */
+/* Normally Q is the orthogonal matrix generated by DORGHR */
+/* after the call to DGEHRD which formed the Hessenberg matrix */
+/* H. (The output value of Z when INFO.GT.0 is given under */
+/* the description of INFO below.) */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. if COMPZ = 'I' or */
+/* COMPZ = 'V', then LDZ.GE.MAX(1,N). Otherwize, LDZ.GE.1. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) */
+/* On exit, if INFO = 0, WORK(1) returns an estimate of */
+/* the optimal value for LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK .GE. max(1,N) */
+/* is sufficient and delivers very good and sometimes */
+/* optimal performance. However, LWORK as large as 11*N */
+/* may be required for optimal performance. A workspace */
+/* query is recommended to determine the optimal workspace */
+/* size. */
+
+/* If LWORK = -1, then DHSEQR does a workspace query. */
+/* In this case, DHSEQR checks the input parameters and */
+/* estimates the optimal workspace size for the given */
+/* values of N, ILO and IHI. The estimate is returned */
+/* in WORK(1). No error message related to LWORK is */
+/* issued by XERBLA. Neither H nor Z are accessed. */
+
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* .LT. 0: if INFO = -i, the i-th argument had an illegal */
+/* value */
+/* .GT. 0: if INFO = i, DHSEQR failed to compute all of */
+/* the eigenvalues. Elements 1:ilo-1 and i+1:n of WR */
+/* and WI contain those eigenvalues which have been */
+/* successfully computed. (Failures are rare.) */
+
+/* If INFO .GT. 0 and JOB = 'E', then on exit, the */
+/* remaining unconverged eigenvalues are the eigen- */
+/* values of the upper Hessenberg matrix rows and */
+/* columns ILO through INFO of the final, output */
+/* value of H. */
+
+/* If INFO .GT. 0 and JOB = 'S', then on exit */
+
+/* (*) (initial value of H)*U = U*(final value of H) */
+
+/* where U is an orthogonal matrix. The final */
+/* value of H is upper Hessenberg and quasi-triangular */
+/* in rows and columns INFO+1 through IHI. */
+
+/* If INFO .GT. 0 and COMPZ = 'V', then on exit */
+
+/* (final value of Z) = (initial value of Z)*U */
+
+/* where U is the orthogonal matrix in (*) (regard- */
+/* less of the value of JOB.) */
+
+/* If INFO .GT. 0 and COMPZ = 'I', then on exit */
+/* (final value of Z) = U */
+/* where U is the orthogonal matrix in (*) (regard- */
+/* less of the value of JOB.) */
+
+/* If INFO .GT. 0 and COMPZ = 'N', then Z is not */
+/* accessed. */
+
+/* ================================================================ */
+/* Default values supplied by */
+/* ILAENV(ISPEC,'DHSEQR',JOB(:1)//COMPZ(:1),N,ILO,IHI,LWORK). */
+/* It is suggested that these defaults be adjusted in order */
+/* to attain best performance in each particular */
+/* computational environment. */
+
+/* ISPEC=12: The DLAHQR vs DLAQR0 crossover point. */
+/* Default: 75. (Must be at least 11.) */
+
+/* ISPEC=13: Recommended deflation window size. */
+/* This depends on ILO, IHI and NS. NS is the */
+/* number of simultaneous shifts returned */
+/* by ILAENV(ISPEC=15). (See ISPEC=15 below.) */
+/* The default for (IHI-ILO+1).LE.500 is NS. */
+/* The default for (IHI-ILO+1).GT.500 is 3*NS/2. */
+
+/* ISPEC=14: Nibble crossover point. (See IPARMQ for */
+/* details.) Default: 14% of deflation window */
+/* size. */
+
+/* ISPEC=15: Number of simultaneous shifts in a multishift */
+/* QR iteration. */
+
+/* If IHI-ILO+1 is ... */
+
+/* greater than ...but less ... the */
+/* or equal to ... than default is */
+
+/* 1 30 NS = 2(+) */
+/* 30 60 NS = 4(+) */
+/* 60 150 NS = 10(+) */
+/* 150 590 NS = ** */
+/* 590 3000 NS = 64 */
+/* 3000 6000 NS = 128 */
+/* 6000 infinity NS = 256 */
+
+/* (+) By default some or all matrices of this order */
+/* are passed to the implicit double shift routine */
+/* DLAHQR and this parameter is ignored. See */
+/* ISPEC=12 above and comments in IPARMQ for */
+/* details. */
+
+/* (**) The asterisks (**) indicate an ad-hoc */
+/* function of N increasing from 10 to 64. */
+
+/* ISPEC=16: Select structured matrix multiply. */
+/* If the number of simultaneous shifts (specified */
+/* by ISPEC=15) is less than 14, then the default */
+/* for ISPEC=16 is 0. Otherwise the default for */
+/* ISPEC=16 is 2. */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+/* References: */
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part I: Maintaining Well Focused Shifts, and Level 3 */
+/* Performance, SIAM Journal of Matrix Analysis, volume 23, pages */
+/* 929--947, 2002. */
+
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part II: Aggressive Early Deflation, SIAM Journal */
+/* of Matrix Analysis, volume 23, pages 948--973, 2002. */
+
+/* ================================================================ */
+/* .. Parameters .. */
+
+/* ==== Matrices of order NTINY or smaller must be processed by */
+/* . DLAHQR because of insufficient subdiagonal scratch space. */
+/* . (This is a hard limit.) ==== */
+
+/* ==== NL allocates some local workspace to help small matrices */
+/* . through a rare DLAHQR failure. NL .GT. NTINY = 11 is */
+/* . required and NL .LE. NMIN = ILAENV(ISPEC=12,...) is recom- */
+/* . mended. (The default value of NMIN is 75.) Using NL = 49 */
+/* . allows up to six simultaneous shifts and a 16-by-16 */
+/* . deflation window. ==== */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* ==== Decode and check the input parameters. ==== */
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --wr;
+ --wi;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ wantt = lsame_(job, "S");
+ initz = lsame_(compz, "I");
+ wantz = initz || lsame_(compz, "V");
+ work[1] = (doublereal) max(1,*n);
+ lquery = *lwork == -1;
+
+ *info = 0;
+ if (! lsame_(job, "E") && ! wantt) {
+ *info = -1;
+ } else if (! lsame_(compz, "N") && ! wantz) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -4;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -5;
+ } else if (*ldh < max(1,*n)) {
+ *info = -7;
+ } else if (*ldz < 1 || wantz && *ldz < max(1,*n)) {
+ *info = -11;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -13;
+ }
+
+ if (*info != 0) {
+
+/* ==== Quick return in case of invalid argument. ==== */
+
+ i__1 = -(*info);
+ xerbla_("DHSEQR", &i__1);
+ return 0;
+
+ } else if (*n == 0) {
+
+/* ==== Quick return in case N = 0; nothing to do. ==== */
+
+ return 0;
+
+ } else if (lquery) {
+
+/* ==== Quick return in case of a workspace query ==== */
+
+ dlaqr0_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1], &wi[
+ 1], ilo, ihi, &z__[z_offset], ldz, &work[1], lwork, info);
+/* ==== Ensure reported workspace size is backward-compatible with */
+/* . previous LAPACK versions. ==== */
+/* Computing MAX */
+ d__1 = (doublereal) max(1,*n);
+ work[1] = max(d__1,work[1]);
+ return 0;
+
+ } else {
+
+/* ==== copy eigenvalues isolated by DGEBAL ==== */
+
+ i__1 = *ilo - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ wr[i__] = h__[i__ + i__ * h_dim1];
+ wi[i__] = 0.;
+/* L10: */
+ }
+ i__1 = *n;
+ for (i__ = *ihi + 1; i__ <= i__1; ++i__) {
+ wr[i__] = h__[i__ + i__ * h_dim1];
+ wi[i__] = 0.;
+/* L20: */
+ }
+
+/* ==== Initialize Z, if requested ==== */
+
+ if (initz) {
+ dlaset_("A", n, n, &c_b11, &c_b12, &z__[z_offset], ldz)
+ ;
+ }
+
+/* ==== Quick return if possible ==== */
+
+ if (*ilo == *ihi) {
+ wr[*ilo] = h__[*ilo + *ilo * h_dim1];
+ wi[*ilo] = 0.;
+ return 0;
+ }
+
+/* ==== DLAHQR/DLAQR0 crossover point ==== */
+
+/* Writing concatenation */
+ i__2[0] = 1, a__1[0] = job;
+ i__2[1] = 1, a__1[1] = compz;
+ s_cat(ch__1, a__1, i__2, &c__2, (ftnlen)2);
+ nmin = ilaenv_(&c__12, "DHSEQR", ch__1, n, ilo, ihi, lwork);
+ nmin = max(11,nmin);
+
+/* ==== DLAQR0 for big matrices; DLAHQR for small ones ==== */
+
+ if (*n > nmin) {
+ dlaqr0_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1],
+ &wi[1], ilo, ihi, &z__[z_offset], ldz, &work[1], lwork,
+ info);
+ } else {
+
+/* ==== Small matrix ==== */
+
+ dlahqr_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1],
+ &wi[1], ilo, ihi, &z__[z_offset], ldz, info);
+
+ if (*info > 0) {
+
+/* ==== A rare DLAHQR failure! DLAQR0 sometimes succeeds */
+/* . when DLAHQR fails. ==== */
+
+ kbot = *info;
+
+ if (*n >= 49) {
+
+/* ==== Larger matrices have enough subdiagonal scratch */
+/* . space to call DLAQR0 directly. ==== */
+
+ dlaqr0_(&wantt, &wantz, n, ilo, &kbot, &h__[h_offset],
+ ldh, &wr[1], &wi[1], ilo, ihi, &z__[z_offset],
+ ldz, &work[1], lwork, info);
+
+ } else {
+
+/* ==== Tiny matrices don't have enough subdiagonal */
+/* . scratch space to benefit from DLAQR0. Hence, */
+/* . tiny matrices must be copied into a larger */
+/* . array before calling DLAQR0. ==== */
+
+ dlacpy_("A", n, n, &h__[h_offset], ldh, hl, &c__49);
+ hl[*n + 1 + *n * 49 - 50] = 0.;
+ i__1 = 49 - *n;
+ dlaset_("A", &c__49, &i__1, &c_b11, &c_b11, &hl[(*n + 1) *
+ 49 - 49], &c__49);
+ dlaqr0_(&wantt, &wantz, &c__49, ilo, &kbot, hl, &c__49, &
+ wr[1], &wi[1], ilo, ihi, &z__[z_offset], ldz,
+ workl, &c__49, info);
+ if (wantt || *info != 0) {
+ dlacpy_("A", n, n, hl, &c__49, &h__[h_offset], ldh);
+ }
+ }
+ }
+ }
+
+/* ==== Clear out the trash, if necessary. ==== */
+
+ if ((wantt || *info != 0) && *n > 2) {
+ i__1 = *n - 2;
+ i__3 = *n - 2;
+ dlaset_("L", &i__1, &i__3, &c_b11, &c_b11, &h__[h_dim1 + 3], ldh);
+ }
+
+/* ==== Ensure reported workspace size is backward-compatible with */
+/* . previous LAPACK versions. ==== */
+
+/* Computing MAX */
+ d__1 = (doublereal) max(1,*n);
+ work[1] = max(d__1,work[1]);
+ }
+
+/* ==== End of DHSEQR ==== */
+
+ return 0;
+} /* dhseqr_ */
diff --git a/contrib/libs/clapack/disnan.c b/contrib/libs/clapack/disnan.c
new file mode 100644
index 0000000000..564ca2c574
--- /dev/null
+++ b/contrib/libs/clapack/disnan.c
@@ -0,0 +1,52 @@
+/* disnan.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+logical disnan_(doublereal *din)
+{
+ /* System generated locals */
+ logical ret_val;
+
+ /* Local variables */
+ extern logical dlaisnan_(doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DISNAN returns .TRUE. if its argument is NaN, and .FALSE. */
+/* otherwise. To be replaced by the Fortran 2003 intrinsic in the */
+/* future. */
+
+/* Arguments */
+/* ========= */
+
+/* DIN (input) DOUBLE PRECISION */
+/* Input to test for NaN. */
+
+/* ===================================================================== */
+
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+ ret_val = dlaisnan_(din, din);
+ return ret_val;
+} /* disnan_ */
diff --git a/contrib/libs/clapack/dlabad.c b/contrib/libs/clapack/dlabad.c
new file mode 100644
index 0000000000..01e6a9ca2a
--- /dev/null
+++ b/contrib/libs/clapack/dlabad.c
@@ -0,0 +1,72 @@
+/* dlabad.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlabad_(doublereal *small, doublereal *large)
+{
+ /* Builtin functions */
+ double d_lg10(doublereal *), sqrt(doublereal);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLABAD takes as input the values computed by DLAMCH for underflow and */
+/* overflow, and returns the square root of each of these values if the */
+/* log of LARGE is sufficiently large. This subroutine is intended to */
+/* identify machines with a large exponent range, such as the Crays, and */
+/* redefine the underflow and overflow limits to be the square roots of */
+/* the values computed by DLAMCH. This subroutine is needed because */
+/* DLAMCH does not compensate for poor arithmetic in the upper half of */
+/* the exponent range, as is found on a Cray. */
+
+/* Arguments */
+/* ========= */
+
+/* SMALL (input/output) DOUBLE PRECISION */
+/* On entry, the underflow threshold as computed by DLAMCH. */
+/* On exit, if LOG10(LARGE) is sufficiently large, the square */
+/* root of SMALL, otherwise unchanged. */
+
+/* LARGE (input/output) DOUBLE PRECISION */
+/* On entry, the overflow threshold as computed by DLAMCH. */
+/* On exit, if LOG10(LARGE) is sufficiently large, the square */
+/* root of LARGE, otherwise unchanged. */
+
+/* ===================================================================== */
+
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* If it looks like we're on a Cray, take the square root of */
+/* SMALL and LARGE to avoid overflow and underflow problems. */
+
+ if (d_lg10(large) > 2e3) {
+ *small = sqrt(*small);
+ *large = sqrt(*large);
+ }
+
+ return 0;
+
+/* End of DLABAD */
+
+} /* dlabad_ */
diff --git a/contrib/libs/clapack/dlabrd.c b/contrib/libs/clapack/dlabrd.c
new file mode 100644
index 0000000000..2048ef7f2b
--- /dev/null
+++ b/contrib/libs/clapack/dlabrd.c
@@ -0,0 +1,434 @@
+/* dlabrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b4 = -1.;
+static doublereal c_b5 = 1.;
+static integer c__1 = 1;
+static doublereal c_b16 = 0.;
+
+/* Subroutine */ int dlabrd_(integer *m, integer *n, integer *nb, doublereal *
+ a, integer *lda, doublereal *d__, doublereal *e, doublereal *tauq,
+ doublereal *taup, doublereal *x, integer *ldx, doublereal *y, integer
+ *ldy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, x_dim1, x_offset, y_dim1, y_offset, i__1, i__2,
+ i__3;
+
+ /* Local variables */
+ integer i__;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dgemv_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *), dlarfg_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLABRD reduces the first NB rows and columns of a real general */
+/* m by n matrix A to upper or lower bidiagonal form by an orthogonal */
+/* transformation Q' * A * P, and returns the matrices X and Y which */
+/* are needed to apply the transformation to the unreduced part of A. */
+
+/* If m >= n, A is reduced to upper bidiagonal form; if m < n, to lower */
+/* bidiagonal form. */
+
+/* This is an auxiliary routine called by DGEBRD */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows in the matrix A. */
+
+/* N (input) INTEGER */
+/* The number of columns in the matrix A. */
+
+/* NB (input) INTEGER */
+/* The number of leading rows and columns of A to be reduced. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the m by n general matrix to be reduced. */
+/* On exit, the first NB rows and columns of the matrix are */
+/* overwritten; the rest of the array is unchanged. */
+/* If m >= n, elements on and below the diagonal in the first NB */
+/* columns, with the array TAUQ, represent the orthogonal */
+/* matrix Q as a product of elementary reflectors; and */
+/* elements above the diagonal in the first NB rows, with the */
+/* array TAUP, represent the orthogonal matrix P as a product */
+/* of elementary reflectors. */
+/* If m < n, elements below the diagonal in the first NB */
+/* columns, with the array TAUQ, represent the orthogonal */
+/* matrix Q as a product of elementary reflectors, and */
+/* elements on and above the diagonal in the first NB rows, */
+/* with the array TAUP, represent the orthogonal matrix P as */
+/* a product of elementary reflectors. */
+/* See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* D (output) DOUBLE PRECISION array, dimension (NB) */
+/* The diagonal elements of the first NB rows and columns of */
+/* the reduced matrix. D(i) = A(i,i). */
+
+/* E (output) DOUBLE PRECISION array, dimension (NB) */
+/* The off-diagonal elements of the first NB rows and columns of */
+/* the reduced matrix. */
+
+/* TAUQ (output) DOUBLE PRECISION array dimension (NB) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the orthogonal matrix Q. See Further Details. */
+
+/* TAUP (output) DOUBLE PRECISION array, dimension (NB) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the orthogonal matrix P. See Further Details. */
+
+/* X (output) DOUBLE PRECISION array, dimension (LDX,NB) */
+/* The m-by-nb matrix X required to update the unreduced part */
+/* of A. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= M. */
+
+/* Y (output) DOUBLE PRECISION array, dimension (LDY,NB) */
+/* The n-by-nb matrix Y required to update the unreduced part */
+/* of A. */
+
+/* LDY (input) INTEGER */
+/* The leading dimension of the array Y. LDY >= N. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrices Q and P are represented as products of elementary */
+/* reflectors: */
+
+/* Q = H(1) H(2) . . . H(nb) and P = G(1) G(2) . . . G(nb) */
+
+/* Each H(i) and G(i) has the form: */
+
+/* H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' */
+
+/* where tauq and taup are real scalars, and v and u are real vectors. */
+
+/* If m >= n, v(1:i-1) = 0, v(i) = 1, and v(i:m) is stored on exit in */
+/* A(i:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+1:n) is stored on exit in */
+/* A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). */
+
+/* If m < n, v(1:i) = 0, v(i+1) = 1, and v(i+1:m) is stored on exit in */
+/* A(i+2:m,i); u(1:i-1) = 0, u(i) = 1, and u(i:n) is stored on exit in */
+/* A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). */
+
+/* The elements of the vectors v and u together form the m-by-nb matrix */
+/* V and the nb-by-n matrix U' which are needed, with X and Y, to apply */
+/* the transformation to the unreduced part of the matrix, using a block */
+/* update of the form: A := A - V*Y' - X*U'. */
+
+/* The contents of A on exit are illustrated by the following examples */
+/* with nb = 2: */
+
+/* m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n): */
+
+/* ( 1 1 u1 u1 u1 ) ( 1 u1 u1 u1 u1 u1 ) */
+/* ( v1 1 1 u2 u2 ) ( 1 1 u2 u2 u2 u2 ) */
+/* ( v1 v2 a a a ) ( v1 1 a a a a ) */
+/* ( v1 v2 a a a ) ( v1 v2 a a a a ) */
+/* ( v1 v2 a a a ) ( v1 v2 a a a a ) */
+/* ( v1 v2 a a a ) */
+
+/* where a denotes an element of the original matrix which is unchanged, */
+/* vi denotes an element of the vector defining H(i), and ui an element */
+/* of the vector defining G(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ y_dim1 = *ldy;
+ y_offset = 1 + y_dim1;
+ y -= y_offset;
+
+ /* Function Body */
+ if (*m <= 0 || *n <= 0) {
+ return 0;
+ }
+
+ if (*m >= *n) {
+
+/* Reduce to upper bidiagonal form */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i:m,i) */
+
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b4, &a[i__ + a_dim1], lda,
+ &y[i__ + y_dim1], ldy, &c_b5, &a[i__ + i__ * a_dim1], &
+ c__1);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b4, &x[i__ + x_dim1], ldx,
+ &a[i__ * a_dim1 + 1], &c__1, &c_b5, &a[i__ + i__ *
+ a_dim1], &c__1);
+
+/* Generate reflection Q(i) to annihilate A(i+1:m,i) */
+
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3, *m)+ i__ *
+ a_dim1], &c__1, &tauq[i__]);
+ d__[i__] = a[i__ + i__ * a_dim1];
+ if (i__ < *n) {
+ a[i__ + i__ * a_dim1] = 1.;
+
+/* Compute Y(i+1:n,i) */
+
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ dgemv_("Transpose", &i__2, &i__3, &c_b5, &a[i__ + (i__ + 1) *
+ a_dim1], lda, &a[i__ + i__ * a_dim1], &c__1, &c_b16, &
+ y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b5, &a[i__ + a_dim1],
+ lda, &a[i__ + i__ * a_dim1], &c__1, &c_b16, &y[i__ *
+ y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b4, &y[i__ + 1 +
+ y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b5, &y[
+ i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b5, &x[i__ + x_dim1],
+ ldx, &a[i__ + i__ * a_dim1], &c__1, &c_b16, &y[i__ *
+ y_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ dgemv_("Transpose", &i__2, &i__3, &c_b4, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &c_b5,
+ &y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *n - i__;
+ dscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1);
+
+/* Update A(i,i+1:n) */
+
+ i__2 = *n - i__;
+ dgemv_("No transpose", &i__2, &i__, &c_b4, &y[i__ + 1 +
+ y_dim1], ldy, &a[i__ + a_dim1], lda, &c_b5, &a[i__ + (
+ i__ + 1) * a_dim1], lda);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ dgemv_("Transpose", &i__2, &i__3, &c_b4, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &x[i__ + x_dim1], ldx, &c_b5, &a[
+ i__ + (i__ + 1) * a_dim1], lda);
+
+/* Generate reflection P(i) to annihilate A(i,i+2:n) */
+
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ dlarfg_(&i__2, &a[i__ + (i__ + 1) * a_dim1], &a[i__ + min(
+ i__3, *n)* a_dim1], lda, &taup[i__]);
+ e[i__] = a[i__ + (i__ + 1) * a_dim1];
+ a[i__ + (i__ + 1) * a_dim1] = 1.;
+
+/* Compute X(i+1:m,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ dgemv_("No transpose", &i__2, &i__3, &c_b5, &a[i__ + 1 + (i__
+ + 1) * a_dim1], lda, &a[i__ + (i__ + 1) * a_dim1],
+ lda, &c_b16, &x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *n - i__;
+ dgemv_("Transpose", &i__2, &i__, &c_b5, &y[i__ + 1 + y_dim1],
+ ldy, &a[i__ + (i__ + 1) * a_dim1], lda, &c_b16, &x[
+ i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ dgemv_("No transpose", &i__2, &i__, &c_b4, &a[i__ + 1 +
+ a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b5, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ dgemv_("No transpose", &i__2, &i__3, &c_b5, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ c_b16, &x[i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b4, &x[i__ + 1 +
+ x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b5, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *m - i__;
+ dscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Reduce to lower bidiagonal form */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i,i:n) */
+
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b4, &y[i__ + y_dim1], ldy,
+ &a[i__ + a_dim1], lda, &c_b5, &a[i__ + i__ * a_dim1],
+ lda);
+ i__2 = i__ - 1;
+ i__3 = *n - i__ + 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b4, &a[i__ * a_dim1 + 1],
+ lda, &x[i__ + x_dim1], ldx, &c_b5, &a[i__ + i__ * a_dim1],
+ lda);
+
+/* Generate reflection P(i) to annihilate A(i,i+1:n) */
+
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3, *n)*
+ a_dim1], lda, &taup[i__]);
+ d__[i__] = a[i__ + i__ * a_dim1];
+ if (i__ < *m) {
+ a[i__ + i__ * a_dim1] = 1.;
+
+/* Compute X(i+1:m,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b5, &a[i__ + 1 + i__ *
+ a_dim1], lda, &a[i__ + i__ * a_dim1], lda, &c_b16, &
+ x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b5, &y[i__ + y_dim1],
+ ldy, &a[i__ + i__ * a_dim1], lda, &c_b16, &x[i__ *
+ x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b4, &a[i__ + 1 +
+ a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b5, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__ + 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b5, &a[i__ * a_dim1 +
+ 1], lda, &a[i__ + i__ * a_dim1], lda, &c_b16, &x[i__ *
+ x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b4, &x[i__ + 1 +
+ x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b5, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *m - i__;
+ dscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1);
+
+/* Update A(i+1:m,i) */
+
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b4, &a[i__ + 1 +
+ a_dim1], lda, &y[i__ + y_dim1], ldy, &c_b5, &a[i__ +
+ 1 + i__ * a_dim1], &c__1);
+ i__2 = *m - i__;
+ dgemv_("No transpose", &i__2, &i__, &c_b4, &x[i__ + 1 +
+ x_dim1], ldx, &a[i__ * a_dim1 + 1], &c__1, &c_b5, &a[
+ i__ + 1 + i__ * a_dim1], &c__1);
+
+/* Generate reflection Q(i) to annihilate A(i+2:m,i) */
+
+ i__2 = *m - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3, *m)+
+ i__ * a_dim1], &c__1, &tauq[i__]);
+ e[i__] = a[i__ + 1 + i__ * a_dim1];
+ a[i__ + 1 + i__ * a_dim1] = 1.;
+
+/* Compute Y(i+1:n,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ dgemv_("Transpose", &i__2, &i__3, &c_b5, &a[i__ + 1 + (i__ +
+ 1) * a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1,
+ &c_b16, &y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b5, &a[i__ + 1 + a_dim1],
+ lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b16, &y[
+ i__ * y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b4, &y[i__ + 1 +
+ y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b5, &y[
+ i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__;
+ dgemv_("Transpose", &i__2, &i__, &c_b5, &x[i__ + 1 + x_dim1],
+ ldx, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b16, &y[
+ i__ * y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ dgemv_("Transpose", &i__, &i__2, &c_b4, &a[(i__ + 1) * a_dim1
+ + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &c_b5, &y[i__
+ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *n - i__;
+ dscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1);
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of DLABRD */
+
+} /* dlabrd_ */
diff --git a/contrib/libs/clapack/dlacn2.c b/contrib/libs/clapack/dlacn2.c
new file mode 100644
index 0000000000..958f294de0
--- /dev/null
+++ b/contrib/libs/clapack/dlacn2.c
@@ -0,0 +1,267 @@
+/* dlacn2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b11 = 1.;
+
+/* Subroutine */ int dlacn2_(integer *n, doublereal *v, doublereal *x,
+ integer *isgn, doublereal *est, integer *kase, integer *isave)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double d_sign(doublereal *, doublereal *);
+ integer i_dnnt(doublereal *);
+
+ /* Local variables */
+ integer i__;
+ doublereal temp;
+ extern doublereal dasum_(integer *, doublereal *, integer *);
+ integer jlast;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ doublereal altsgn, estold;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLACN2 estimates the 1-norm of a square, real matrix A. */
+/* Reverse communication is used for evaluating matrix-vector products. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 1. */
+
+/* V (workspace) DOUBLE PRECISION array, dimension (N) */
+/* On the final return, V = A*W, where EST = norm(V)/norm(W) */
+/* (W is not returned). */
+
+/* X (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On an intermediate return, X should be overwritten by */
+/* A * X, if KASE=1, */
+/* A' * X, if KASE=2, */
+/* and DLACN2 must be re-called with all the other parameters */
+/* unchanged. */
+
+/* ISGN (workspace) INTEGER array, dimension (N) */
+
+/* EST (input/output) DOUBLE PRECISION */
+/* On entry with KASE = 1 or 2 and ISAVE(1) = 3, EST should be */
+/* unchanged from the previous call to DLACN2. */
+/* On exit, EST is an estimate (a lower bound) for norm(A). */
+
+/* KASE (input/output) INTEGER */
+/* On the initial call to DLACN2, KASE should be 0. */
+/* On an intermediate return, KASE will be 1 or 2, indicating */
+/* whether X should be overwritten by A * X or A' * X. */
+/* On the final return from DLACN2, KASE will again be 0. */
+
+/* ISAVE (input/output) INTEGER array, dimension (3) */
+/* ISAVE is used to save variables between calls to DLACN2 */
+
+/* Further Details */
+/* ======= ======= */
+
+/* Contributed by Nick Higham, University of Manchester. */
+/* Originally named SONEST, dated March 16, 1988. */
+
+/* Reference: N.J. Higham, "FORTRAN codes for estimating the one-norm of */
+/* a real or complex matrix, with applications to condition estimation", */
+/* ACM Trans. Math. Soft., vol. 14, no. 4, pp. 381-396, December 1988. */
+
+/* This is a thread safe version of DLACON, which uses the array ISAVE */
+/* in place of a SAVE statement, as follows: */
+
+/* DLACON DLACN2 */
+/* JUMP ISAVE(1) */
+/* J ISAVE(2) */
+/* ITER ISAVE(3) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --isave;
+ --isgn;
+ --x;
+ --v;
+
+ /* Function Body */
+ if (*kase == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ x[i__] = 1. / (doublereal) (*n);
+/* L10: */
+ }
+ *kase = 1;
+ isave[1] = 1;
+ return 0;
+ }
+
+ switch (isave[1]) {
+ case 1: goto L20;
+ case 2: goto L40;
+ case 3: goto L70;
+ case 4: goto L110;
+ case 5: goto L140;
+ }
+
+/* ................ ENTRY (ISAVE( 1 ) = 1) */
+/* FIRST ITERATION. X HAS BEEN OVERWRITTEN BY A*X. */
+
+L20:
+ if (*n == 1) {
+ v[1] = x[1];
+ *est = abs(v[1]);
+/* ... QUIT */
+ goto L150;
+ }
+ *est = dasum_(n, &x[1], &c__1);
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ x[i__] = d_sign(&c_b11, &x[i__]);
+ isgn[i__] = i_dnnt(&x[i__]);
+/* L30: */
+ }
+ *kase = 2;
+ isave[1] = 2;
+ return 0;
+
+/* ................ ENTRY (ISAVE( 1 ) = 2) */
+/* FIRST ITERATION. X HAS BEEN OVERWRITTEN BY TRANSPOSE(A)*X. */
+
+L40:
+ isave[2] = idamax_(n, &x[1], &c__1);
+ isave[3] = 2;
+
+/* MAIN LOOP - ITERATIONS 2,3,...,ITMAX. */
+
+L50:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ x[i__] = 0.;
+/* L60: */
+ }
+ x[isave[2]] = 1.;
+ *kase = 1;
+ isave[1] = 3;
+ return 0;
+
+/* ................ ENTRY (ISAVE( 1 ) = 3) */
+/* X HAS BEEN OVERWRITTEN BY A*X. */
+
+L70:
+ dcopy_(n, &x[1], &c__1, &v[1], &c__1);
+ estold = *est;
+ *est = dasum_(n, &v[1], &c__1);
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__1 = d_sign(&c_b11, &x[i__]);
+ if (i_dnnt(&d__1) != isgn[i__]) {
+ goto L90;
+ }
+/* L80: */
+ }
+/* REPEATED SIGN VECTOR DETECTED, HENCE ALGORITHM HAS CONVERGED. */
+ goto L120;
+
+L90:
+/* TEST FOR CYCLING. */
+ if (*est <= estold) {
+ goto L120;
+ }
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ x[i__] = d_sign(&c_b11, &x[i__]);
+ isgn[i__] = i_dnnt(&x[i__]);
+/* L100: */
+ }
+ *kase = 2;
+ isave[1] = 4;
+ return 0;
+
+/* ................ ENTRY (ISAVE( 1 ) = 4) */
+/* X HAS BEEN OVERWRITTEN BY TRANSPOSE(A)*X. */
+
+L110:
+ jlast = isave[2];
+ isave[2] = idamax_(n, &x[1], &c__1);
+ if (x[jlast] != (d__1 = x[isave[2]], abs(d__1)) && isave[3] < 5) {
+ ++isave[3];
+ goto L50;
+ }
+
+/* ITERATION COMPLETE. FINAL STAGE. */
+
+L120:
+ altsgn = 1.;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ x[i__] = altsgn * ((doublereal) (i__ - 1) / (doublereal) (*n - 1) +
+ 1.);
+ altsgn = -altsgn;
+/* L130: */
+ }
+ *kase = 1;
+ isave[1] = 5;
+ return 0;
+
+/* ................ ENTRY (ISAVE( 1 ) = 5) */
+/* X HAS BEEN OVERWRITTEN BY A*X. */
+
+L140:
+ temp = dasum_(n, &x[1], &c__1) / (doublereal) (*n * 3) * 2.;
+ if (temp > *est) {
+ dcopy_(n, &x[1], &c__1, &v[1], &c__1);
+ *est = temp;
+ }
+
+L150:
+ *kase = 0;
+ return 0;
+
+/* End of DLACN2 */
+
+} /* dlacn2_ */
diff --git a/contrib/libs/clapack/dlacon.c b/contrib/libs/clapack/dlacon.c
new file mode 100644
index 0000000000..b99e00fd6d
--- /dev/null
+++ b/contrib/libs/clapack/dlacon.c
@@ -0,0 +1,258 @@
+/* dlacon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b11 = 1.;
+
+/* Subroutine */ int dlacon_(integer *n, doublereal *v, doublereal *x,
+ integer *isgn, doublereal *est, integer *kase)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double d_sign(doublereal *, doublereal *);
+ integer i_dnnt(doublereal *);
+
+ /* Local variables */
+ static integer i__, j, iter;
+ static doublereal temp;
+ static integer jump;
+ extern doublereal dasum_(integer *, doublereal *, integer *);
+ static integer jlast;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ static doublereal altsgn, estold;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLACON estimates the 1-norm of a square, real matrix A. */
+/* Reverse communication is used for evaluating matrix-vector products. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 1. */
+
+/* V (workspace) DOUBLE PRECISION array, dimension (N) */
+/* On the final return, V = A*W, where EST = norm(V)/norm(W) */
+/* (W is not returned). */
+
+/* X (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On an intermediate return, X should be overwritten by */
+/* A * X, if KASE=1, */
+/* A' * X, if KASE=2, */
+/* and DLACON must be re-called with all the other parameters */
+/* unchanged. */
+
+/* ISGN (workspace) INTEGER array, dimension (N) */
+
+/* EST (input/output) DOUBLE PRECISION */
+/* On entry with KASE = 1 or 2 and JUMP = 3, EST should be */
+/* unchanged from the previous call to DLACON. */
+/* On exit, EST is an estimate (a lower bound) for norm(A). */
+
+/* KASE (input/output) INTEGER */
+/* On the initial call to DLACON, KASE should be 0. */
+/* On an intermediate return, KASE will be 1 or 2, indicating */
+/* whether X should be overwritten by A * X or A' * X. */
+/* On the final return from DLACON, KASE will again be 0. */
+
+/* Further Details */
+/* ======= ======= */
+
+/* Contributed by Nick Higham, University of Manchester. */
+/* Originally named SONEST, dated March 16, 1988. */
+
+/* Reference: N.J. Higham, "FORTRAN codes for estimating the one-norm of */
+/* a real or complex matrix, with applications to condition estimation", */
+/* ACM Trans. Math. Soft., vol. 14, no. 4, pp. 381-396, December 1988. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Save statement .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --isgn;
+ --x;
+ --v;
+
+ /* Function Body */
+ if (*kase == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ x[i__] = 1. / (doublereal) (*n);
+/* L10: */
+ }
+ *kase = 1;
+ jump = 1;
+ return 0;
+ }
+
+ switch (jump) {
+ case 1: goto L20;
+ case 2: goto L40;
+ case 3: goto L70;
+ case 4: goto L110;
+ case 5: goto L140;
+ }
+
+/* ................ ENTRY (JUMP = 1) */
+/* FIRST ITERATION. X HAS BEEN OVERWRITTEN BY A*X. */
+
+L20:
+ if (*n == 1) {
+ v[1] = x[1];
+ *est = abs(v[1]);
+/* ... QUIT */
+ goto L150;
+ }
+ *est = dasum_(n, &x[1], &c__1);
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ x[i__] = d_sign(&c_b11, &x[i__]);
+ isgn[i__] = i_dnnt(&x[i__]);
+/* L30: */
+ }
+ *kase = 2;
+ jump = 2;
+ return 0;
+
+/* ................ ENTRY (JUMP = 2) */
+/* FIRST ITERATION. X HAS BEEN OVERWRITTEN BY TRANSPOSE(A)*X. */
+
+L40:
+ j = idamax_(n, &x[1], &c__1);
+ iter = 2;
+
+/* MAIN LOOP - ITERATIONS 2,3,...,ITMAX. */
+
+L50:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ x[i__] = 0.;
+/* L60: */
+ }
+ x[j] = 1.;
+ *kase = 1;
+ jump = 3;
+ return 0;
+
+/* ................ ENTRY (JUMP = 3) */
+/* X HAS BEEN OVERWRITTEN BY A*X. */
+
+L70:
+ dcopy_(n, &x[1], &c__1, &v[1], &c__1);
+ estold = *est;
+ *est = dasum_(n, &v[1], &c__1);
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__1 = d_sign(&c_b11, &x[i__]);
+ if (i_dnnt(&d__1) != isgn[i__]) {
+ goto L90;
+ }
+/* L80: */
+ }
+/* REPEATED SIGN VECTOR DETECTED, HENCE ALGORITHM HAS CONVERGED. */
+ goto L120;
+
+L90:
+/* TEST FOR CYCLING. */
+ if (*est <= estold) {
+ goto L120;
+ }
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ x[i__] = d_sign(&c_b11, &x[i__]);
+ isgn[i__] = i_dnnt(&x[i__]);
+/* L100: */
+ }
+ *kase = 2;
+ jump = 4;
+ return 0;
+
+/* ................ ENTRY (JUMP = 4) */
+/* X HAS BEEN OVERWRITTEN BY TRANSPOSE(A)*X. */
+
+L110:
+ jlast = j;
+ j = idamax_(n, &x[1], &c__1);
+ if (x[jlast] != (d__1 = x[j], abs(d__1)) && iter < 5) {
+ ++iter;
+ goto L50;
+ }
+
+/* ITERATION COMPLETE. FINAL STAGE. */
+
+L120:
+ altsgn = 1.;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ x[i__] = altsgn * ((doublereal) (i__ - 1) / (doublereal) (*n - 1) +
+ 1.);
+ altsgn = -altsgn;
+/* L130: */
+ }
+ *kase = 1;
+ jump = 5;
+ return 0;
+
+/* ................ ENTRY (JUMP = 5) */
+/* X HAS BEEN OVERWRITTEN BY A*X. */
+
+L140:
+ temp = dasum_(n, &x[1], &c__1) / (doublereal) (*n * 3) * 2.;
+ if (temp > *est) {
+ dcopy_(n, &x[1], &c__1, &v[1], &c__1);
+ *est = temp;
+ }
+
+L150:
+ *kase = 0;
+ return 0;
+
+/* End of DLACON */
+
+} /* dlacon_ */
diff --git a/contrib/libs/clapack/dlacpy.c b/contrib/libs/clapack/dlacpy.c
new file mode 100644
index 0000000000..9fff49569c
--- /dev/null
+++ b/contrib/libs/clapack/dlacpy.c
@@ -0,0 +1,125 @@
+/* dlacpy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlacpy_(char *uplo, integer *m, integer *n, doublereal *
+ a, integer *lda, doublereal *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLACPY copies all or part of a two-dimensional matrix A to another */
+/* matrix B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies the part of the matrix A to be copied to B. */
+/* = 'U': Upper triangular part */
+/* = 'L': Lower triangular part */
+/* Otherwise: All of the matrix A */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The m by n matrix A. If UPLO = 'U', only the upper triangle */
+/* or trapezoid is accessed; if UPLO = 'L', only the lower */
+/* triangle or trapezoid is accessed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (output) DOUBLE PRECISION array, dimension (LDB,N) */
+/* On exit, B = A in the locations specified by UPLO. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(j,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = a[i__ + j * a_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if (lsame_(uplo, "L")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = a[i__ + j * a_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = a[i__ + j * a_dim1];
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+ return 0;
+
+/* End of DLACPY */
+
+} /* dlacpy_ */
diff --git a/contrib/libs/clapack/dladiv.c b/contrib/libs/clapack/dladiv.c
new file mode 100644
index 0000000000..20fbe6be93
--- /dev/null
+++ b/contrib/libs/clapack/dladiv.c
@@ -0,0 +1,78 @@
+/* dladiv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dladiv_(doublereal *a, doublereal *b, doublereal *c__,
+ doublereal *d__, doublereal *p, doublereal *q)
+{
+ doublereal e, f;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLADIV performs complex division in real arithmetic */
+
+/* a + i*b */
+/* p + i*q = --------- */
+/* c + i*d */
+
+/* The algorithm is due to Robert L. Smith and can be found */
+/* in D. Knuth, The art of Computer Programming, Vol.2, p.195 */
+
+/* Arguments */
+/* ========= */
+
+/* A (input) DOUBLE PRECISION */
+/* B (input) DOUBLE PRECISION */
+/* C (input) DOUBLE PRECISION */
+/* D (input) DOUBLE PRECISION */
+/* The scalars a, b, c, and d in the above expression. */
+
+/* P (output) DOUBLE PRECISION */
+/* Q (output) DOUBLE PRECISION */
+/* The scalars p and q in the above expression. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ if (abs(*d__) < abs(*c__)) {
+ e = *d__ / *c__;
+ f = *c__ + *d__ * e;
+ *p = (*a + *b * e) / f;
+ *q = (*b - *a * e) / f;
+ } else {
+ e = *c__ / *d__;
+ f = *d__ + *c__ * e;
+ *p = (*b + *a * e) / f;
+ *q = (-(*a) + *b * e) / f;
+ }
+
+ return 0;
+
+/* End of DLADIV */
+
+} /* dladiv_ */
diff --git a/contrib/libs/clapack/dlae2.c b/contrib/libs/clapack/dlae2.c
new file mode 100644
index 0000000000..c119034ecf
--- /dev/null
+++ b/contrib/libs/clapack/dlae2.c
@@ -0,0 +1,142 @@
+/* dlae2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlae2_(doublereal *a, doublereal *b, doublereal *c__,
+ doublereal *rt1, doublereal *rt2)
+{
+ /* System generated locals */
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal ab, df, tb, sm, rt, adf, acmn, acmx;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAE2 computes the eigenvalues of a 2-by-2 symmetric matrix */
+/* [ A B ] */
+/* [ B C ]. */
+/* On return, RT1 is the eigenvalue of larger absolute value, and RT2 */
+/* is the eigenvalue of smaller absolute value. */
+
+/* Arguments */
+/* ========= */
+
+/* A (input) DOUBLE PRECISION */
+/* The (1,1) element of the 2-by-2 matrix. */
+
+/* B (input) DOUBLE PRECISION */
+/* The (1,2) and (2,1) elements of the 2-by-2 matrix. */
+
+/* C (input) DOUBLE PRECISION */
+/* The (2,2) element of the 2-by-2 matrix. */
+
+/* RT1 (output) DOUBLE PRECISION */
+/* The eigenvalue of larger absolute value. */
+
+/* RT2 (output) DOUBLE PRECISION */
+/* The eigenvalue of smaller absolute value. */
+
+/* Further Details */
+/* =============== */
+
+/* RT1 is accurate to a few ulps barring over/underflow. */
+
+/* RT2 may be inaccurate if there is massive cancellation in the */
+/* determinant A*C-B*B; higher precision or correctly rounded or */
+/* correctly truncated arithmetic would be needed to compute RT2 */
+/* accurately in all cases. */
+
+/* Overflow is possible only if RT1 is within a factor of 5 of overflow. */
+/* Underflow is harmless if the input data is 0 or exceeds */
+/* underflow_threshold / macheps. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Compute the eigenvalues */
+
+ sm = *a + *c__;
+ df = *a - *c__;
+ adf = abs(df);
+ tb = *b + *b;
+ ab = abs(tb);
+ if (abs(*a) > abs(*c__)) {
+ acmx = *a;
+ acmn = *c__;
+ } else {
+ acmx = *c__;
+ acmn = *a;
+ }
+ if (adf > ab) {
+/* Computing 2nd power */
+ d__1 = ab / adf;
+ rt = adf * sqrt(d__1 * d__1 + 1.);
+ } else if (adf < ab) {
+/* Computing 2nd power */
+ d__1 = adf / ab;
+ rt = ab * sqrt(d__1 * d__1 + 1.);
+ } else {
+
+/* Includes case AB=ADF=0 */
+
+ rt = ab * sqrt(2.);
+ }
+ if (sm < 0.) {
+ *rt1 = (sm - rt) * .5;
+
+/* Order of execution important. */
+/* To get fully accurate smaller eigenvalue, */
+/* next line needs to be executed in higher precision. */
+
+ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
+ } else if (sm > 0.) {
+ *rt1 = (sm + rt) * .5;
+
+/* Order of execution important. */
+/* To get fully accurate smaller eigenvalue, */
+/* next line needs to be executed in higher precision. */
+
+ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
+ } else {
+
+/* Includes case RT1 = RT2 = 0 */
+
+ *rt1 = rt * .5;
+ *rt2 = rt * -.5;
+ }
+ return 0;
+
+/* End of DLAE2 */
+
+} /* dlae2_ */
diff --git a/contrib/libs/clapack/dlaebz.c b/contrib/libs/clapack/dlaebz.c
new file mode 100644
index 0000000000..a628943937
--- /dev/null
+++ b/contrib/libs/clapack/dlaebz.c
@@ -0,0 +1,640 @@
+/* dlaebz.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlaebz_(integer *ijob, integer *nitmax, integer *n,
+ integer *mmax, integer *minp, integer *nbmin, doublereal *abstol,
+ doublereal *reltol, doublereal *pivmin, doublereal *d__, doublereal *
+ e, doublereal *e2, integer *nval, doublereal *ab, doublereal *c__,
+ integer *mout, integer *nab, doublereal *work, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer nab_dim1, nab_offset, ab_dim1, ab_offset, i__1, i__2, i__3, i__4,
+ i__5, i__6;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Local variables */
+ integer j, kf, ji, kl, jp, jit;
+ doublereal tmp1, tmp2;
+ integer itmp1, itmp2, kfnew, klnew;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAEBZ contains the iteration loops which compute and use the */
+/* function N(w), which is the count of eigenvalues of a symmetric */
+/* tridiagonal matrix T less than or equal to its argument w. It */
+/* performs a choice of two types of loops: */
+
+/* IJOB=1, followed by */
+/* IJOB=2: It takes as input a list of intervals and returns a list of */
+/* sufficiently small intervals whose union contains the same */
+/* eigenvalues as the union of the original intervals. */
+/* The input intervals are (AB(j,1),AB(j,2)], j=1,...,MINP. */
+/* The output interval (AB(j,1),AB(j,2)] will contain */
+/* eigenvalues NAB(j,1)+1,...,NAB(j,2), where 1 <= j <= MOUT. */
+
+/* IJOB=3: It performs a binary search in each input interval */
+/* (AB(j,1),AB(j,2)] for a point w(j) such that */
+/* N(w(j))=NVAL(j), and uses C(j) as the starting point of */
+/* the search. If such a w(j) is found, then on output */
+/* AB(j,1)=AB(j,2)=w. If no such w(j) is found, then on output */
+/* (AB(j,1),AB(j,2)] will be a small interval containing the */
+/* point where N(w) jumps through NVAL(j), unless that point */
+/* lies outside the initial interval. */
+
+/* Note that the intervals are in all cases half-open intervals, */
+/* i.e., of the form (a,b] , which includes b but not a . */
+
+/* To avoid underflow, the matrix should be scaled so that its largest */
+/* element is no greater than overflow**(1/2) * underflow**(1/4) */
+/* in absolute value. To assure the most accurate computation */
+/* of small eigenvalues, the matrix should be scaled to be */
+/* not much smaller than that, either. */
+
+/* See W. Kahan "Accurate Eigenvalues of a Symmetric Tridiagonal */
+/* Matrix", Report CS41, Computer Science Dept., Stanford */
+/* University, July 21, 1966 */
+
+/* Note: the arguments are, in general, *not* checked for unreasonable */
+/* values. */
+
+/* Arguments */
+/* ========= */
+
+/* IJOB (input) INTEGER */
+/* Specifies what is to be done: */
+/* = 1: Compute NAB for the initial intervals. */
+/* = 2: Perform bisection iteration to find eigenvalues of T. */
+/* = 3: Perform bisection iteration to invert N(w), i.e., */
+/* to find a point which has a specified number of */
+/* eigenvalues of T to its left. */
+/* Other values will cause DLAEBZ to return with INFO=-1. */
+
+/* NITMAX (input) INTEGER */
+/* The maximum number of "levels" of bisection to be */
+/* performed, i.e., an interval of width W will not be made */
+/* smaller than 2^(-NITMAX) * W. If not all intervals */
+/* have converged after NITMAX iterations, then INFO is set */
+/* to the number of non-converged intervals. */
+
+/* N (input) INTEGER */
+/* The dimension n of the tridiagonal matrix T. It must be at */
+/* least 1. */
+
+/* MMAX (input) INTEGER */
+/* The maximum number of intervals. If more than MMAX intervals */
+/* are generated, then DLAEBZ will quit with INFO=MMAX+1. */
+
+/* MINP (input) INTEGER */
+/* The initial number of intervals. It may not be greater than */
+/* MMAX. */
+
+/* NBMIN (input) INTEGER */
+/* The smallest number of intervals that should be processed */
+/* using a vector loop. If zero, then only the scalar loop */
+/* will be used. */
+
+/* ABSTOL (input) DOUBLE PRECISION */
+/* The minimum (absolute) width of an interval. When an */
+/* interval is narrower than ABSTOL, or than RELTOL times the */
+/* larger (in magnitude) endpoint, then it is considered to be */
+/* sufficiently small, i.e., converged. This must be at least */
+/* zero. */
+
+/* RELTOL (input) DOUBLE PRECISION */
+/* The minimum relative width of an interval. When an interval */
+/* is narrower than ABSTOL, or than RELTOL times the larger (in */
+/* magnitude) endpoint, then it is considered to be */
+/* sufficiently small, i.e., converged. Note: this should */
+/* always be at least radix*machine epsilon. */
+
+/* PIVMIN (input) DOUBLE PRECISION */
+/* The minimum absolute value of a "pivot" in the Sturm */
+/* sequence loop. This *must* be at least max |e(j)**2| * */
+/* safe_min and at least safe_min, where safe_min is at least */
+/* the smallest number that can divide one without overflow. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The diagonal elements of the tridiagonal matrix T. */
+
+/* E (input) DOUBLE PRECISION array, dimension (N) */
+/* The offdiagonal elements of the tridiagonal matrix T in */
+/* positions 1 through N-1. E(N) is arbitrary. */
+
+/* E2 (input) DOUBLE PRECISION array, dimension (N) */
+/* The squares of the offdiagonal elements of the tridiagonal */
+/* matrix T. E2(N) is ignored. */
+
+/* NVAL (input/output) INTEGER array, dimension (MINP) */
+/* If IJOB=1 or 2, not referenced. */
+/* If IJOB=3, the desired values of N(w). The elements of NVAL */
+/* will be reordered to correspond with the intervals in AB. */
+/* Thus, NVAL(j) on output will not, in general be the same as */
+/* NVAL(j) on input, but it will correspond with the interval */
+/* (AB(j,1),AB(j,2)] on output. */
+
+/* AB (input/output) DOUBLE PRECISION array, dimension (MMAX,2) */
+/* The endpoints of the intervals. AB(j,1) is a(j), the left */
+/* endpoint of the j-th interval, and AB(j,2) is b(j), the */
+/* right endpoint of the j-th interval. The input intervals */
+/* will, in general, be modified, split, and reordered by the */
+/* calculation. */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (MMAX) */
+/* If IJOB=1, ignored. */
+/* If IJOB=2, workspace. */
+/* If IJOB=3, then on input C(j) should be initialized to the */
+/* first search point in the binary search. */
+
+/* MOUT (output) INTEGER */
+/* If IJOB=1, the number of eigenvalues in the intervals. */
+/* If IJOB=2 or 3, the number of intervals output. */
+/* If IJOB=3, MOUT will equal MINP. */
+
+/* NAB (input/output) INTEGER array, dimension (MMAX,2) */
+/* If IJOB=1, then on output NAB(i,j) will be set to N(AB(i,j)). */
+/* If IJOB=2, then on input, NAB(i,j) should be set. It must */
+/* satisfy the condition: */
+/* N(AB(i,1)) <= NAB(i,1) <= NAB(i,2) <= N(AB(i,2)), */
+/* which means that in interval i only eigenvalues */
+/* NAB(i,1)+1,...,NAB(i,2) will be considered. Usually, */
+/* NAB(i,j)=N(AB(i,j)), from a previous call to DLAEBZ with */
+/* IJOB=1. */
+/* On output, NAB(i,j) will contain */
+/* max(na(k),min(nb(k),N(AB(i,j)))), where k is the index of */
+/* the input interval that the output interval */
+/* (AB(j,1),AB(j,2)] came from, and na(k) and nb(k) are the */
+/* the input values of NAB(k,1) and NAB(k,2). */
+/* If IJOB=3, then on output, NAB(i,j) contains N(AB(i,j)), */
+/* unless N(w) > NVAL(i) for all search points w , in which */
+/* case NAB(i,1) will not be modified, i.e., the output */
+/* value will be the same as the input value (modulo */
+/* reorderings -- see NVAL and AB), or unless N(w) < NVAL(i) */
+/* for all search points w , in which case NAB(i,2) will */
+/* not be modified. Normally, NAB should be set to some */
+/* distinctive value(s) before DLAEBZ is called. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (MMAX) */
+/* Workspace. */
+
+/* IWORK (workspace) INTEGER array, dimension (MMAX) */
+/* Workspace. */
+
+/* INFO (output) INTEGER */
+/* = 0: All intervals converged. */
+/* = 1--MMAX: The last INFO intervals did not converge. */
+/* = MMAX+1: More than MMAX intervals were generated. */
+
+/* Further Details */
+/* =============== */
+
+/* This routine is intended to be called only by other LAPACK */
+/* routines, thus the interface is less user-friendly. It is intended */
+/* for two purposes: */
+
+/* (a) finding eigenvalues. In this case, DLAEBZ should have one or */
+/* more initial intervals set up in AB, and DLAEBZ should be called */
+/* with IJOB=1. This sets up NAB, and also counts the eigenvalues. */
+/* Intervals with no eigenvalues would usually be thrown out at */
+/* this point. Also, if not all the eigenvalues in an interval i */
+/* are desired, NAB(i,1) can be increased or NAB(i,2) decreased. */
+/* For example, set NAB(i,1)=NAB(i,2)-1 to get the largest */
+/* eigenvalue. DLAEBZ is then called with IJOB=2 and MMAX */
+/* no smaller than the value of MOUT returned by the call with */
+/* IJOB=1. After this (IJOB=2) call, eigenvalues NAB(i,1)+1 */
+/* through NAB(i,2) are approximately AB(i,1) (or AB(i,2)) to the */
+/* tolerance specified by ABSTOL and RELTOL. */
+
+/* (b) finding an interval (a',b'] containing eigenvalues w(f),...,w(l). */
+/* In this case, start with a Gershgorin interval (a,b). Set up */
+/* AB to contain 2 search intervals, both initially (a,b). One */
+/* NVAL element should contain f-1 and the other should contain l */
+/* , while C should contain a and b, resp. NAB(i,1) should be -1 */
+/* and NAB(i,2) should be N+1, to flag an error if the desired */
+/* interval does not lie in (a,b). DLAEBZ is then called with */
+/* IJOB=3. On exit, if w(f-1) < w(f), then one of the intervals -- */
+/* j -- will have AB(j,1)=AB(j,2) and NAB(j,1)=NAB(j,2)=f-1, while */
+/* if, to the specified tolerance, w(f-k)=...=w(f+r), k > 0 and r */
+/* >= 0, then the interval will have N(AB(j,1))=NAB(j,1)=f-k and */
+/* N(AB(j,2))=NAB(j,2)=f+r. The cases w(l) < w(l+1) and */
+/* w(l-r)=...=w(l+k) are handled similarly. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Check for Errors */
+
+ /* Parameter adjustments */
+ nab_dim1 = *mmax;
+ nab_offset = 1 + nab_dim1;
+ nab -= nab_offset;
+ ab_dim1 = *mmax;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --d__;
+ --e;
+ --e2;
+ --nval;
+ --c__;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ if (*ijob < 1 || *ijob > 3) {
+ *info = -1;
+ return 0;
+ }
+
+/* Initialize NAB */
+
+ if (*ijob == 1) {
+
+/* Compute the number of eigenvalues in the initial intervals. */
+
+ *mout = 0;
+/* DIR$ NOVECTOR */
+ i__1 = *minp;
+ for (ji = 1; ji <= i__1; ++ji) {
+ for (jp = 1; jp <= 2; ++jp) {
+ tmp1 = d__[1] - ab[ji + jp * ab_dim1];
+ if (abs(tmp1) < *pivmin) {
+ tmp1 = -(*pivmin);
+ }
+ nab[ji + jp * nab_dim1] = 0;
+ if (tmp1 <= 0.) {
+ nab[ji + jp * nab_dim1] = 1;
+ }
+
+ i__2 = *n;
+ for (j = 2; j <= i__2; ++j) {
+ tmp1 = d__[j] - e2[j - 1] / tmp1 - ab[ji + jp * ab_dim1];
+ if (abs(tmp1) < *pivmin) {
+ tmp1 = -(*pivmin);
+ }
+ if (tmp1 <= 0.) {
+ ++nab[ji + jp * nab_dim1];
+ }
+/* L10: */
+ }
+/* L20: */
+ }
+ *mout = *mout + nab[ji + (nab_dim1 << 1)] - nab[ji + nab_dim1];
+/* L30: */
+ }
+ return 0;
+ }
+
+/* Initialize for loop */
+
+/* KF and KL have the following meaning: */
+/* Intervals 1,...,KF-1 have converged. */
+/* Intervals KF,...,KL still need to be refined. */
+
+ kf = 1;
+ kl = *minp;
+
+/* If IJOB=2, initialize C. */
+/* If IJOB=3, use the user-supplied starting point. */
+
+ if (*ijob == 2) {
+ i__1 = *minp;
+ for (ji = 1; ji <= i__1; ++ji) {
+ c__[ji] = (ab[ji + ab_dim1] + ab[ji + (ab_dim1 << 1)]) * .5;
+/* L40: */
+ }
+ }
+
+/* Iteration loop */
+
+ i__1 = *nitmax;
+ for (jit = 1; jit <= i__1; ++jit) {
+
+/* Loop over intervals */
+
+ if (kl - kf + 1 >= *nbmin && *nbmin > 0) {
+
+/* Begin of Parallel Version of the loop */
+
+ i__2 = kl;
+ for (ji = kf; ji <= i__2; ++ji) {
+
+/* Compute N(c), the number of eigenvalues less than c */
+
+ work[ji] = d__[1] - c__[ji];
+ iwork[ji] = 0;
+ if (work[ji] <= *pivmin) {
+ iwork[ji] = 1;
+/* Computing MIN */
+ d__1 = work[ji], d__2 = -(*pivmin);
+ work[ji] = min(d__1,d__2);
+ }
+
+ i__3 = *n;
+ for (j = 2; j <= i__3; ++j) {
+ work[ji] = d__[j] - e2[j - 1] / work[ji] - c__[ji];
+ if (work[ji] <= *pivmin) {
+ ++iwork[ji];
+/* Computing MIN */
+ d__1 = work[ji], d__2 = -(*pivmin);
+ work[ji] = min(d__1,d__2);
+ }
+/* L50: */
+ }
+/* L60: */
+ }
+
+ if (*ijob <= 2) {
+
+/* IJOB=2: Choose all intervals containing eigenvalues. */
+
+ klnew = kl;
+ i__2 = kl;
+ for (ji = kf; ji <= i__2; ++ji) {
+
+/* Insure that N(w) is monotone */
+
+/* Computing MIN */
+/* Computing MAX */
+ i__5 = nab[ji + nab_dim1], i__6 = iwork[ji];
+ i__3 = nab[ji + (nab_dim1 << 1)], i__4 = max(i__5,i__6);
+ iwork[ji] = min(i__3,i__4);
+
+/* Update the Queue -- add intervals if both halves */
+/* contain eigenvalues. */
+
+ if (iwork[ji] == nab[ji + (nab_dim1 << 1)]) {
+
+/* No eigenvalue in the upper interval: */
+/* just use the lower interval. */
+
+ ab[ji + (ab_dim1 << 1)] = c__[ji];
+
+ } else if (iwork[ji] == nab[ji + nab_dim1]) {
+
+/* No eigenvalue in the lower interval: */
+/* just use the upper interval. */
+
+ ab[ji + ab_dim1] = c__[ji];
+ } else {
+ ++klnew;
+ if (klnew <= *mmax) {
+
+/* Eigenvalue in both intervals -- add upper to */
+/* queue. */
+
+ ab[klnew + (ab_dim1 << 1)] = ab[ji + (ab_dim1 <<
+ 1)];
+ nab[klnew + (nab_dim1 << 1)] = nab[ji + (nab_dim1
+ << 1)];
+ ab[klnew + ab_dim1] = c__[ji];
+ nab[klnew + nab_dim1] = iwork[ji];
+ ab[ji + (ab_dim1 << 1)] = c__[ji];
+ nab[ji + (nab_dim1 << 1)] = iwork[ji];
+ } else {
+ *info = *mmax + 1;
+ }
+ }
+/* L70: */
+ }
+ if (*info != 0) {
+ return 0;
+ }
+ kl = klnew;
+ } else {
+
+/* IJOB=3: Binary search. Keep only the interval containing */
+/* w s.t. N(w) = NVAL */
+
+ i__2 = kl;
+ for (ji = kf; ji <= i__2; ++ji) {
+ if (iwork[ji] <= nval[ji]) {
+ ab[ji + ab_dim1] = c__[ji];
+ nab[ji + nab_dim1] = iwork[ji];
+ }
+ if (iwork[ji] >= nval[ji]) {
+ ab[ji + (ab_dim1 << 1)] = c__[ji];
+ nab[ji + (nab_dim1 << 1)] = iwork[ji];
+ }
+/* L80: */
+ }
+ }
+
+ } else {
+
+/* End of Parallel Version of the loop */
+
+/* Begin of Serial Version of the loop */
+
+ klnew = kl;
+ i__2 = kl;
+ for (ji = kf; ji <= i__2; ++ji) {
+
+/* Compute N(w), the number of eigenvalues less than w */
+
+ tmp1 = c__[ji];
+ tmp2 = d__[1] - tmp1;
+ itmp1 = 0;
+ if (tmp2 <= *pivmin) {
+ itmp1 = 1;
+/* Computing MIN */
+ d__1 = tmp2, d__2 = -(*pivmin);
+ tmp2 = min(d__1,d__2);
+ }
+
+/* A series of compiler directives to defeat vectorization */
+/* for the next loop */
+
+/* $PL$ CMCHAR=' ' */
+/* DIR$ NEXTSCALAR */
+/* $DIR SCALAR */
+/* DIR$ NEXT SCALAR */
+/* VD$L NOVECTOR */
+/* DEC$ NOVECTOR */
+/* VD$ NOVECTOR */
+/* VDIR NOVECTOR */
+/* VOCL LOOP,SCALAR */
+/* IBM PREFER SCALAR */
+/* $PL$ CMCHAR='*' */
+
+ i__3 = *n;
+ for (j = 2; j <= i__3; ++j) {
+ tmp2 = d__[j] - e2[j - 1] / tmp2 - tmp1;
+ if (tmp2 <= *pivmin) {
+ ++itmp1;
+/* Computing MIN */
+ d__1 = tmp2, d__2 = -(*pivmin);
+ tmp2 = min(d__1,d__2);
+ }
+/* L90: */
+ }
+
+ if (*ijob <= 2) {
+
+/* IJOB=2: Choose all intervals containing eigenvalues. */
+
+/* Insure that N(w) is monotone */
+
+/* Computing MIN */
+/* Computing MAX */
+ i__5 = nab[ji + nab_dim1];
+ i__3 = nab[ji + (nab_dim1 << 1)], i__4 = max(i__5,itmp1);
+ itmp1 = min(i__3,i__4);
+
+/* Update the Queue -- add intervals if both halves */
+/* contain eigenvalues. */
+
+ if (itmp1 == nab[ji + (nab_dim1 << 1)]) {
+
+/* No eigenvalue in the upper interval: */
+/* just use the lower interval. */
+
+ ab[ji + (ab_dim1 << 1)] = tmp1;
+
+ } else if (itmp1 == nab[ji + nab_dim1]) {
+
+/* No eigenvalue in the lower interval: */
+/* just use the upper interval. */
+
+ ab[ji + ab_dim1] = tmp1;
+ } else if (klnew < *mmax) {
+
+/* Eigenvalue in both intervals -- add upper to queue. */
+
+ ++klnew;
+ ab[klnew + (ab_dim1 << 1)] = ab[ji + (ab_dim1 << 1)];
+ nab[klnew + (nab_dim1 << 1)] = nab[ji + (nab_dim1 <<
+ 1)];
+ ab[klnew + ab_dim1] = tmp1;
+ nab[klnew + nab_dim1] = itmp1;
+ ab[ji + (ab_dim1 << 1)] = tmp1;
+ nab[ji + (nab_dim1 << 1)] = itmp1;
+ } else {
+ *info = *mmax + 1;
+ return 0;
+ }
+ } else {
+
+/* IJOB=3: Binary search. Keep only the interval */
+/* containing w s.t. N(w) = NVAL */
+
+ if (itmp1 <= nval[ji]) {
+ ab[ji + ab_dim1] = tmp1;
+ nab[ji + nab_dim1] = itmp1;
+ }
+ if (itmp1 >= nval[ji]) {
+ ab[ji + (ab_dim1 << 1)] = tmp1;
+ nab[ji + (nab_dim1 << 1)] = itmp1;
+ }
+ }
+/* L100: */
+ }
+ kl = klnew;
+
+/* End of Serial Version of the loop */
+
+ }
+
+/* Check for convergence */
+
+ kfnew = kf;
+ i__2 = kl;
+ for (ji = kf; ji <= i__2; ++ji) {
+ tmp1 = (d__1 = ab[ji + (ab_dim1 << 1)] - ab[ji + ab_dim1], abs(
+ d__1));
+/* Computing MAX */
+ d__3 = (d__1 = ab[ji + (ab_dim1 << 1)], abs(d__1)), d__4 = (d__2 =
+ ab[ji + ab_dim1], abs(d__2));
+ tmp2 = max(d__3,d__4);
+/* Computing MAX */
+ d__1 = max(*abstol,*pivmin), d__2 = *reltol * tmp2;
+ if (tmp1 < max(d__1,d__2) || nab[ji + nab_dim1] >= nab[ji + (
+ nab_dim1 << 1)]) {
+
+/* Converged -- Swap with position KFNEW, */
+/* then increment KFNEW */
+
+ if (ji > kfnew) {
+ tmp1 = ab[ji + ab_dim1];
+ tmp2 = ab[ji + (ab_dim1 << 1)];
+ itmp1 = nab[ji + nab_dim1];
+ itmp2 = nab[ji + (nab_dim1 << 1)];
+ ab[ji + ab_dim1] = ab[kfnew + ab_dim1];
+ ab[ji + (ab_dim1 << 1)] = ab[kfnew + (ab_dim1 << 1)];
+ nab[ji + nab_dim1] = nab[kfnew + nab_dim1];
+ nab[ji + (nab_dim1 << 1)] = nab[kfnew + (nab_dim1 << 1)];
+ ab[kfnew + ab_dim1] = tmp1;
+ ab[kfnew + (ab_dim1 << 1)] = tmp2;
+ nab[kfnew + nab_dim1] = itmp1;
+ nab[kfnew + (nab_dim1 << 1)] = itmp2;
+ if (*ijob == 3) {
+ itmp1 = nval[ji];
+ nval[ji] = nval[kfnew];
+ nval[kfnew] = itmp1;
+ }
+ }
+ ++kfnew;
+ }
+/* L110: */
+ }
+ kf = kfnew;
+
+/* Choose Midpoints */
+
+ i__2 = kl;
+ for (ji = kf; ji <= i__2; ++ji) {
+ c__[ji] = (ab[ji + ab_dim1] + ab[ji + (ab_dim1 << 1)]) * .5;
+/* L120: */
+ }
+
+/* If no more intervals to refine, quit. */
+
+ if (kf > kl) {
+ goto L140;
+ }
+/* L130: */
+ }
+
+/* Converged */
+
+L140:
+/* Computing MAX */
+ i__1 = kl + 1 - kf;
+ *info = max(i__1,0);
+ *mout = kl;
+
+ return 0;
+
+/* End of DLAEBZ */
+
+} /* dlaebz_ */
diff --git a/contrib/libs/clapack/dlaed0.c b/contrib/libs/clapack/dlaed0.c
new file mode 100644
index 0000000000..e9cb08002d
--- /dev/null
+++ b/contrib/libs/clapack/dlaed0.c
@@ -0,0 +1,440 @@
+/* dlaed0.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__9 = 9;
+static integer c__0 = 0;
+static integer c__2 = 2;
+static doublereal c_b23 = 1.;
+static doublereal c_b24 = 0.;
+static integer c__1 = 1;
+
+/* Subroutine */ int dlaed0_(integer *icompq, integer *qsiz, integer *n,
+ doublereal *d__, doublereal *e, doublereal *q, integer *ldq,
+ doublereal *qstore, integer *ldqs, doublereal *work, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, qstore_dim1, qstore_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double log(doublereal);
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ integer i__, j, k, iq, lgn, msd2, smm1, spm1, spm2;
+ doublereal temp;
+ integer curr;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ integer iperm;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ integer indxq, iwrem;
+ extern /* Subroutine */ int dlaed1_(integer *, doublereal *, doublereal *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ integer *, integer *);
+ integer iqptr;
+ extern /* Subroutine */ int dlaed7_(integer *, integer *, integer *,
+ integer *, integer *, integer *, doublereal *, doublereal *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ integer *, integer *, integer *, integer *, integer *, doublereal
+ *, doublereal *, integer *, integer *);
+ integer tlvls;
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *);
+ integer igivcl;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer igivnm, submat, curprb, subpbs, igivpt;
+ extern /* Subroutine */ int dsteqr_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *);
+ integer curlvl, matsiz, iprmpt, smlsiz;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAED0 computes all eigenvalues and corresponding eigenvectors of a */
+/* symmetric tridiagonal matrix using the divide and conquer method. */
+
+/* Arguments */
+/* ========= */
+
+/* ICOMPQ (input) INTEGER */
+/* = 0: Compute eigenvalues only. */
+/* = 1: Compute eigenvectors of original dense symmetric matrix */
+/* also. On entry, Q contains the orthogonal matrix used */
+/* to reduce the original matrix to tridiagonal form. */
+/* = 2: Compute eigenvalues and eigenvectors of tridiagonal */
+/* matrix. */
+
+/* QSIZ (input) INTEGER */
+/* The dimension of the orthogonal matrix used to reduce */
+/* the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1. */
+
+/* N (input) INTEGER */
+/* The dimension of the symmetric tridiagonal matrix. N >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the main diagonal of the tridiagonal matrix. */
+/* On exit, its eigenvalues. */
+
+/* E (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The off-diagonal elements of the tridiagonal matrix. */
+/* On exit, E has been destroyed. */
+
+/* Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N) */
+/* On entry, Q must contain an N-by-N orthogonal matrix. */
+/* If ICOMPQ = 0 Q is not referenced. */
+/* If ICOMPQ = 1 On entry, Q is a subset of the columns of the */
+/* orthogonal matrix used to reduce the full */
+/* matrix to tridiagonal form corresponding to */
+/* the subset of the full matrix which is being */
+/* decomposed at this time. */
+/* If ICOMPQ = 2 On entry, Q will be the identity matrix. */
+/* On exit, Q contains the eigenvectors of the */
+/* tridiagonal matrix. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. If eigenvectors are */
+/* desired, then LDQ >= max(1,N). In any case, LDQ >= 1. */
+
+/* QSTORE (workspace) DOUBLE PRECISION array, dimension (LDQS, N) */
+/* Referenced only when ICOMPQ = 1. Used to store parts of */
+/* the eigenvector matrix when the updating matrix multiplies */
+/* take place. */
+
+/* LDQS (input) INTEGER */
+/* The leading dimension of the array QSTORE. If ICOMPQ = 1, */
+/* then LDQS >= max(1,N). In any case, LDQS >= 1. */
+
+/* WORK (workspace) DOUBLE PRECISION array, */
+/* If ICOMPQ = 0 or 1, the dimension of WORK must be at least */
+/* 1 + 3*N + 2*N*lg N + 2*N**2 */
+/* ( lg( N ) = smallest integer k */
+/* such that 2^k >= N ) */
+/* If ICOMPQ = 2, the dimension of WORK must be at least */
+/* 4*N + N**2. */
+
+/* IWORK (workspace) INTEGER array, */
+/* If ICOMPQ = 0 or 1, the dimension of IWORK must be at least */
+/* 6 + 6*N + 5*N*lg N. */
+/* ( lg( N ) = smallest integer k */
+/* such that 2^k >= N ) */
+/* If ICOMPQ = 2, the dimension of IWORK must be at least */
+/* 3 + 5*N. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: The algorithm failed to compute an eigenvalue while */
+/* working on the submatrix lying in rows and columns */
+/* INFO/(N+1) through mod(INFO,N+1). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Jeff Rutter, Computer Science Division, University of California */
+/* at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ qstore_dim1 = *ldqs;
+ qstore_offset = 1 + qstore_dim1;
+ qstore -= qstore_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 2) {
+ *info = -1;
+ } else if (*icompq == 1 && *qsiz < max(0,*n)) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ldq < max(1,*n)) {
+ *info = -7;
+ } else if (*ldqs < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAED0", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ smlsiz = ilaenv_(&c__9, "DLAED0", " ", &c__0, &c__0, &c__0, &c__0);
+
+/* Determine the size and placement of the submatrices, and save in */
+/* the leading elements of IWORK. */
+
+ iwork[1] = *n;
+ subpbs = 1;
+ tlvls = 0;
+L10:
+ if (iwork[subpbs] > smlsiz) {
+ for (j = subpbs; j >= 1; --j) {
+ iwork[j * 2] = (iwork[j] + 1) / 2;
+ iwork[(j << 1) - 1] = iwork[j] / 2;
+/* L20: */
+ }
+ ++tlvls;
+ subpbs <<= 1;
+ goto L10;
+ }
+ i__1 = subpbs;
+ for (j = 2; j <= i__1; ++j) {
+ iwork[j] += iwork[j - 1];
+/* L30: */
+ }
+
+/* Divide the matrix into SUBPBS submatrices of size at most SMLSIZ+1 */
+/* using rank-1 modifications (cuts). */
+
+ spm1 = subpbs - 1;
+ i__1 = spm1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ submat = iwork[i__] + 1;
+ smm1 = submat - 1;
+ d__[smm1] -= (d__1 = e[smm1], abs(d__1));
+ d__[submat] -= (d__1 = e[smm1], abs(d__1));
+/* L40: */
+ }
+
+ indxq = (*n << 2) + 3;
+ if (*icompq != 2) {
+
+/* Set up workspaces for eigenvalues only/accumulate new vectors */
+/* routine */
+
+ temp = log((doublereal) (*n)) / log(2.);
+ lgn = (integer) temp;
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ iprmpt = indxq + *n + 1;
+ iperm = iprmpt + *n * lgn;
+ iqptr = iperm + *n * lgn;
+ igivpt = iqptr + *n + 2;
+ igivcl = igivpt + *n * lgn;
+
+ igivnm = 1;
+ iq = igivnm + (*n << 1) * lgn;
+/* Computing 2nd power */
+ i__1 = *n;
+ iwrem = iq + i__1 * i__1 + 1;
+
+/* Initialize pointers */
+
+ i__1 = subpbs;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ iwork[iprmpt + i__] = 1;
+ iwork[igivpt + i__] = 1;
+/* L50: */
+ }
+ iwork[iqptr] = 1;
+ }
+
+/* Solve each submatrix eigenproblem at the bottom of the divide and */
+/* conquer tree. */
+
+ curr = 0;
+ i__1 = spm1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ if (i__ == 0) {
+ submat = 1;
+ matsiz = iwork[1];
+ } else {
+ submat = iwork[i__] + 1;
+ matsiz = iwork[i__ + 1] - iwork[i__];
+ }
+ if (*icompq == 2) {
+ dsteqr_("I", &matsiz, &d__[submat], &e[submat], &q[submat +
+ submat * q_dim1], ldq, &work[1], info);
+ if (*info != 0) {
+ goto L130;
+ }
+ } else {
+ dsteqr_("I", &matsiz, &d__[submat], &e[submat], &work[iq - 1 +
+ iwork[iqptr + curr]], &matsiz, &work[1], info);
+ if (*info != 0) {
+ goto L130;
+ }
+ if (*icompq == 1) {
+ dgemm_("N", "N", qsiz, &matsiz, &matsiz, &c_b23, &q[submat *
+ q_dim1 + 1], ldq, &work[iq - 1 + iwork[iqptr + curr]],
+ &matsiz, &c_b24, &qstore[submat * qstore_dim1 + 1],
+ ldqs);
+ }
+/* Computing 2nd power */
+ i__2 = matsiz;
+ iwork[iqptr + curr + 1] = iwork[iqptr + curr] + i__2 * i__2;
+ ++curr;
+ }
+ k = 1;
+ i__2 = iwork[i__ + 1];
+ for (j = submat; j <= i__2; ++j) {
+ iwork[indxq + j] = k;
+ ++k;
+/* L60: */
+ }
+/* L70: */
+ }
+
+/* Successively merge eigensystems of adjacent submatrices */
+/* into eigensystem for the corresponding larger matrix. */
+
+/* while ( SUBPBS > 1 ) */
+
+ curlvl = 1;
+L80:
+ if (subpbs > 1) {
+ spm2 = subpbs - 2;
+ i__1 = spm2;
+ for (i__ = 0; i__ <= i__1; i__ += 2) {
+ if (i__ == 0) {
+ submat = 1;
+ matsiz = iwork[2];
+ msd2 = iwork[1];
+ curprb = 0;
+ } else {
+ submat = iwork[i__] + 1;
+ matsiz = iwork[i__ + 2] - iwork[i__];
+ msd2 = matsiz / 2;
+ ++curprb;
+ }
+
+/* Merge lower order eigensystems (of size MSD2 and MATSIZ - MSD2) */
+/* into an eigensystem of size MATSIZ. */
+/* DLAED1 is used only for the full eigensystem of a tridiagonal */
+/* matrix. */
+/* DLAED7 handles the cases in which eigenvalues only or eigenvalues */
+/* and eigenvectors of a full symmetric matrix (which was reduced to */
+/* tridiagonal form) are desired. */
+
+ if (*icompq == 2) {
+ dlaed1_(&matsiz, &d__[submat], &q[submat + submat * q_dim1],
+ ldq, &iwork[indxq + submat], &e[submat + msd2 - 1], &
+ msd2, &work[1], &iwork[subpbs + 1], info);
+ } else {
+ dlaed7_(icompq, &matsiz, qsiz, &tlvls, &curlvl, &curprb, &d__[
+ submat], &qstore[submat * qstore_dim1 + 1], ldqs, &
+ iwork[indxq + submat], &e[submat + msd2 - 1], &msd2, &
+ work[iq], &iwork[iqptr], &iwork[iprmpt], &iwork[iperm]
+, &iwork[igivpt], &iwork[igivcl], &work[igivnm], &
+ work[iwrem], &iwork[subpbs + 1], info);
+ }
+ if (*info != 0) {
+ goto L130;
+ }
+ iwork[i__ / 2 + 1] = iwork[i__ + 2];
+/* L90: */
+ }
+ subpbs /= 2;
+ ++curlvl;
+ goto L80;
+ }
+
+/* end while */
+
+/* Re-merge the eigenvalues/vectors which were deflated at the final */
+/* merge step. */
+
+ if (*icompq == 1) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ j = iwork[indxq + i__];
+ work[i__] = d__[j];
+ dcopy_(qsiz, &qstore[j * qstore_dim1 + 1], &c__1, &q[i__ * q_dim1
+ + 1], &c__1);
+/* L100: */
+ }
+ dcopy_(n, &work[1], &c__1, &d__[1], &c__1);
+ } else if (*icompq == 2) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ j = iwork[indxq + i__];
+ work[i__] = d__[j];
+ dcopy_(n, &q[j * q_dim1 + 1], &c__1, &work[*n * i__ + 1], &c__1);
+/* L110: */
+ }
+ dcopy_(n, &work[1], &c__1, &d__[1], &c__1);
+ dlacpy_("A", n, n, &work[*n + 1], n, &q[q_offset], ldq);
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ j = iwork[indxq + i__];
+ work[i__] = d__[j];
+/* L120: */
+ }
+ dcopy_(n, &work[1], &c__1, &d__[1], &c__1);
+ }
+ goto L140;
+
+L130:
+ *info = submat * (*n + 1) + submat + matsiz - 1;
+
+L140:
+ return 0;
+
+/* End of DLAED0 */
+
+} /* dlaed0_ */
diff --git a/contrib/libs/clapack/dlaed1.c b/contrib/libs/clapack/dlaed1.c
new file mode 100644
index 0000000000..170f2a0eaf
--- /dev/null
+++ b/contrib/libs/clapack/dlaed1.c
@@ -0,0 +1,249 @@
+/* dlaed1.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int dlaed1_(integer *n, doublereal *d__, doublereal *q,
+ integer *ldq, integer *indxq, doublereal *rho, integer *cutpnt,
+ doublereal *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, k, n1, n2, is, iw, iz, iq2, zpp1, indx, indxc;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ integer indxp;
+ extern /* Subroutine */ int dlaed2_(integer *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ integer *, integer *, integer *, integer *), dlaed3_(integer *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *,
+ doublereal *, doublereal *, integer *);
+ integer idlmda;
+ extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
+ integer *, integer *, integer *), xerbla_(char *, integer *);
+ integer coltyp;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAED1 computes the updated eigensystem of a diagonal */
+/* matrix after modification by a rank-one symmetric matrix. This */
+/* routine is used only for the eigenproblem which requires all */
+/* eigenvalues and eigenvectors of a tridiagonal matrix. DLAED7 handles */
+/* the case in which eigenvalues only or eigenvalues and eigenvectors */
+/* of a full symmetric matrix (which was reduced to tridiagonal form) */
+/* are desired. */
+
+/* T = Q(in) ( D(in) + RHO * Z*Z' ) Q'(in) = Q(out) * D(out) * Q'(out) */
+
+/* where Z = Q'u, u is a vector of length N with ones in the */
+/* CUTPNT and CUTPNT + 1 th elements and zeros elsewhere. */
+
+/* The eigenvectors of the original matrix are stored in Q, and the */
+/* eigenvalues are in D. The algorithm consists of three stages: */
+
+/* The first stage consists of deflating the size of the problem */
+/* when there are multiple eigenvalues or if there is a zero in */
+/* the Z vector. For each such occurence the dimension of the */
+/* secular equation problem is reduced by one. This stage is */
+/* performed by the routine DLAED2. */
+
+/* The second stage consists of calculating the updated */
+/* eigenvalues. This is done by finding the roots of the secular */
+/* equation via the routine DLAED4 (as called by DLAED3). */
+/* This routine also calculates the eigenvectors of the current */
+/* problem. */
+
+/* The final stage consists of computing the updated eigenvectors */
+/* directly using the updated eigenvalues. The eigenvectors for */
+/* the current problem are multiplied with the eigenvectors from */
+/* the overall problem. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The dimension of the symmetric tridiagonal matrix. N >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the eigenvalues of the rank-1-perturbed matrix. */
+/* On exit, the eigenvalues of the repaired matrix. */
+
+/* Q (input/output) DOUBLE PRECISION array, dimension (LDQ,N) */
+/* On entry, the eigenvectors of the rank-1-perturbed matrix. */
+/* On exit, the eigenvectors of the repaired tridiagonal matrix. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N). */
+
+/* INDXQ (input/output) INTEGER array, dimension (N) */
+/* On entry, the permutation which separately sorts the two */
+/* subproblems in D into ascending order. */
+/* On exit, the permutation which will reintegrate the */
+/* subproblems back into sorted order, */
+/* i.e. D( INDXQ( I = 1, N ) ) will be in ascending order. */
+
+/* RHO (input) DOUBLE PRECISION */
+/* The subdiagonal entry used to create the rank-1 modification. */
+
+/* CUTPNT (input) INTEGER */
+/* The location of the last eigenvalue in the leading sub-matrix. */
+/* min(1,N) <= CUTPNT <= N/2. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (4*N + N**2) */
+
+/* IWORK (workspace) INTEGER array, dimension (4*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, an eigenvalue did not converge */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Jeff Rutter, Computer Science Division, University of California */
+/* at Berkeley, USA */
+/* Modified by Francoise Tisseur, University of Tennessee. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --indxq;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -1;
+ } else if (*ldq < max(1,*n)) {
+ *info = -4;
+ } else /* if(complicated condition) */ {
+/* Computing MIN */
+ i__1 = 1, i__2 = *n / 2;
+ if (min(i__1,i__2) > *cutpnt || *n / 2 < *cutpnt) {
+ *info = -7;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAED1", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* The following values are integer pointers which indicate */
+/* the portion of the workspace */
+/* used by a particular array in DLAED2 and DLAED3. */
+
+ iz = 1;
+ idlmda = iz + *n;
+ iw = idlmda + *n;
+ iq2 = iw + *n;
+
+ indx = 1;
+ indxc = indx + *n;
+ coltyp = indxc + *n;
+ indxp = coltyp + *n;
+
+
+/* Form the z-vector which consists of the last row of Q_1 and the */
+/* first row of Q_2. */
+
+ dcopy_(cutpnt, &q[*cutpnt + q_dim1], ldq, &work[iz], &c__1);
+ zpp1 = *cutpnt + 1;
+ i__1 = *n - *cutpnt;
+ dcopy_(&i__1, &q[zpp1 + zpp1 * q_dim1], ldq, &work[iz + *cutpnt], &c__1);
+
+/* Deflate eigenvalues. */
+
+ dlaed2_(&k, n, cutpnt, &d__[1], &q[q_offset], ldq, &indxq[1], rho, &work[
+ iz], &work[idlmda], &work[iw], &work[iq2], &iwork[indx], &iwork[
+ indxc], &iwork[indxp], &iwork[coltyp], info);
+
+ if (*info != 0) {
+ goto L20;
+ }
+
+/* Solve Secular Equation. */
+
+ if (k != 0) {
+ is = (iwork[coltyp] + iwork[coltyp + 1]) * *cutpnt + (iwork[coltyp +
+ 1] + iwork[coltyp + 2]) * (*n - *cutpnt) + iq2;
+ dlaed3_(&k, n, cutpnt, &d__[1], &q[q_offset], ldq, rho, &work[idlmda],
+ &work[iq2], &iwork[indxc], &iwork[coltyp], &work[iw], &work[
+ is], info);
+ if (*info != 0) {
+ goto L20;
+ }
+
+/* Prepare the INDXQ sorting permutation. */
+
+ n1 = k;
+ n2 = *n - k;
+ dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &indxq[1]);
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ indxq[i__] = i__;
+/* L10: */
+ }
+ }
+
+L20:
+ return 0;
+
+/* End of DLAED1 */
+
+} /* dlaed1_ */
diff --git a/contrib/libs/clapack/dlaed2.c b/contrib/libs/clapack/dlaed2.c
new file mode 100644
index 0000000000..20d93a099d
--- /dev/null
+++ b/contrib/libs/clapack/dlaed2.c
@@ -0,0 +1,532 @@
+/* dlaed2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b3 = -1.;
+static integer c__1 = 1;
+
+/* Subroutine */ int dlaed2_(integer *k, integer *n, integer *n1, doublereal *
+ d__, doublereal *q, integer *ldq, integer *indxq, doublereal *rho,
+ doublereal *z__, doublereal *dlamda, doublereal *w, doublereal *q2,
+ integer *indx, integer *indxc, integer *indxp, integer *coltyp,
+ integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal c__;
+ integer i__, j;
+ doublereal s, t;
+ integer k2, n2, ct, nj, pj, js, iq1, iq2, n1p1;
+ doublereal eps, tau, tol;
+ integer psm[4], imax, jmax;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ integer ctot[4];
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dcopy_(integer *, doublereal *, integer *, doublereal
+ *, integer *);
+ extern doublereal dlapy2_(doublereal *, doublereal *), dlamch_(char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
+ integer *, integer *, integer *), dlacpy_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAED2 merges the two sets of eigenvalues together into a single */
+/* sorted set. Then it tries to deflate the size of the problem. */
+/* There are two ways in which deflation can occur: when two or more */
+/* eigenvalues are close together or if there is a tiny entry in the */
+/* Z vector. For each such occurrence the order of the related secular */
+/* equation problem is reduced by one. */
+
+/* Arguments */
+/* ========= */
+
+/* K (output) INTEGER */
+/* The number of non-deflated eigenvalues, and the order of the */
+/* related secular equation. 0 <= K <=N. */
+
+/* N (input) INTEGER */
+/* The dimension of the symmetric tridiagonal matrix. N >= 0. */
+
+/* N1 (input) INTEGER */
+/* The location of the last eigenvalue in the leading sub-matrix. */
+/* min(1,N) <= N1 <= N/2. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, D contains the eigenvalues of the two submatrices to */
+/* be combined. */
+/* On exit, D contains the trailing (N-K) updated eigenvalues */
+/* (those which were deflated) sorted into increasing order. */
+
+/* Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N) */
+/* On entry, Q contains the eigenvectors of two submatrices in */
+/* the two square blocks with corners at (1,1), (N1,N1) */
+/* and (N1+1, N1+1), (N,N). */
+/* On exit, Q contains the trailing (N-K) updated eigenvectors */
+/* (those which were deflated) in its last N-K columns. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N). */
+
+/* INDXQ (input/output) INTEGER array, dimension (N) */
+/* The permutation which separately sorts the two sub-problems */
+/* in D into ascending order. Note that elements in the second */
+/* half of this permutation must first have N1 added to their */
+/* values. Destroyed on exit. */
+
+/* RHO (input/output) DOUBLE PRECISION */
+/* On entry, the off-diagonal element associated with the rank-1 */
+/* cut which originally split the two submatrices which are now */
+/* being recombined. */
+/* On exit, RHO has been modified to the value required by */
+/* DLAED3. */
+
+/* Z (input) DOUBLE PRECISION array, dimension (N) */
+/* On entry, Z contains the updating vector (the last */
+/* row of the first sub-eigenvector matrix and the first row of */
+/* the second sub-eigenvector matrix). */
+/* On exit, the contents of Z have been destroyed by the updating */
+/* process. */
+
+/* DLAMDA (output) DOUBLE PRECISION array, dimension (N) */
+/* A copy of the first K eigenvalues which will be used by */
+/* DLAED3 to form the secular equation. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* The first k values of the final deflation-altered z-vector */
+/* which will be passed to DLAED3. */
+
+/* Q2 (output) DOUBLE PRECISION array, dimension (N1**2+(N-N1)**2) */
+/* A copy of the first K eigenvectors which will be used by */
+/* DLAED3 in a matrix multiply (DGEMM) to solve for the new */
+/* eigenvectors. */
+
+/* INDX (workspace) INTEGER array, dimension (N) */
+/* The permutation used to sort the contents of DLAMDA into */
+/* ascending order. */
+
+/* INDXC (output) INTEGER array, dimension (N) */
+/* The permutation used to arrange the columns of the deflated */
+/* Q matrix into three groups: the first group contains non-zero */
+/* elements only at and above N1, the second contains */
+/* non-zero elements only below N1, and the third is dense. */
+
+/* INDXP (workspace) INTEGER array, dimension (N) */
+/* The permutation used to place deflated values of D at the end */
+/* of the array. INDXP(1:K) points to the nondeflated D-values */
+/* and INDXP(K+1:N) points to the deflated eigenvalues. */
+
+/* COLTYP (workspace/output) INTEGER array, dimension (N) */
+/* During execution, a label which will indicate which of the */
+/* following types a column in the Q2 matrix is: */
+/* 1 : non-zero in the upper half only; */
+/* 2 : dense; */
+/* 3 : non-zero in the lower half only; */
+/* 4 : deflated. */
+/* On exit, COLTYP(i) is the number of columns of type i, */
+/* for i=1 to 4 only. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Jeff Rutter, Computer Science Division, University of California */
+/* at Berkeley, USA */
+/* Modified by Francoise Tisseur, University of Tennessee. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --indxq;
+ --z__;
+ --dlamda;
+ --w;
+ --q2;
+ --indx;
+ --indxc;
+ --indxp;
+ --coltyp;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -2;
+ } else if (*ldq < max(1,*n)) {
+ *info = -6;
+ } else /* if(complicated condition) */ {
+/* Computing MIN */
+ i__1 = 1, i__2 = *n / 2;
+ if (min(i__1,i__2) > *n1 || *n / 2 < *n1) {
+ *info = -3;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAED2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ n2 = *n - *n1;
+ n1p1 = *n1 + 1;
+
+ if (*rho < 0.) {
+ dscal_(&n2, &c_b3, &z__[n1p1], &c__1);
+ }
+
+/* Normalize z so that norm(z) = 1. Since z is the concatenation of */
+/* two normalized vectors, norm2(z) = sqrt(2). */
+
+ t = 1. / sqrt(2.);
+ dscal_(n, &t, &z__[1], &c__1);
+
+/* RHO = ABS( norm(z)**2 * RHO ) */
+
+ *rho = (d__1 = *rho * 2., abs(d__1));
+
+/* Sort the eigenvalues into increasing order */
+
+ i__1 = *n;
+ for (i__ = n1p1; i__ <= i__1; ++i__) {
+ indxq[i__] += *n1;
+/* L10: */
+ }
+
+/* re-integrate the deflated parts from the last pass */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlamda[i__] = d__[indxq[i__]];
+/* L20: */
+ }
+ dlamrg_(n1, &n2, &dlamda[1], &c__1, &c__1, &indxc[1]);
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ indx[i__] = indxq[indxc[i__]];
+/* L30: */
+ }
+
+/* Calculate the allowable deflation tolerance */
+
+ imax = idamax_(n, &z__[1], &c__1);
+ jmax = idamax_(n, &d__[1], &c__1);
+ eps = dlamch_("Epsilon");
+/* Computing MAX */
+ d__3 = (d__1 = d__[jmax], abs(d__1)), d__4 = (d__2 = z__[imax], abs(d__2))
+ ;
+ tol = eps * 8. * max(d__3,d__4);
+
+/* If the rank-1 modifier is small enough, no more needs to be done */
+/* except to reorganize Q so that its columns correspond with the */
+/* elements in D. */
+
+ if (*rho * (d__1 = z__[imax], abs(d__1)) <= tol) {
+ *k = 0;
+ iq2 = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = indx[j];
+ dcopy_(n, &q[i__ * q_dim1 + 1], &c__1, &q2[iq2], &c__1);
+ dlamda[j] = d__[i__];
+ iq2 += *n;
+/* L40: */
+ }
+ dlacpy_("A", n, n, &q2[1], n, &q[q_offset], ldq);
+ dcopy_(n, &dlamda[1], &c__1, &d__[1], &c__1);
+ goto L190;
+ }
+
+/* If there are multiple eigenvalues then the problem deflates. Here */
+/* the number of equal eigenvalues are found. As each equal */
+/* eigenvalue is found, an elementary reflector is computed to rotate */
+/* the corresponding eigensubspace so that the corresponding */
+/* components of Z are zero in this new basis. */
+
+ i__1 = *n1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ coltyp[i__] = 1;
+/* L50: */
+ }
+ i__1 = *n;
+ for (i__ = n1p1; i__ <= i__1; ++i__) {
+ coltyp[i__] = 3;
+/* L60: */
+ }
+
+
+ *k = 0;
+ k2 = *n + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ nj = indx[j];
+ if (*rho * (d__1 = z__[nj], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ coltyp[nj] = 4;
+ indxp[k2] = nj;
+ if (j == *n) {
+ goto L100;
+ }
+ } else {
+ pj = nj;
+ goto L80;
+ }
+/* L70: */
+ }
+L80:
+ ++j;
+ nj = indx[j];
+ if (j > *n) {
+ goto L100;
+ }
+ if (*rho * (d__1 = z__[nj], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ coltyp[nj] = 4;
+ indxp[k2] = nj;
+ } else {
+
+/* Check if eigenvalues are close enough to allow deflation. */
+
+ s = z__[pj];
+ c__ = z__[nj];
+
+/* Find sqrt(a**2+b**2) without overflow or */
+/* destructive underflow. */
+
+ tau = dlapy2_(&c__, &s);
+ t = d__[nj] - d__[pj];
+ c__ /= tau;
+ s = -s / tau;
+ if ((d__1 = t * c__ * s, abs(d__1)) <= tol) {
+
+/* Deflation is possible. */
+
+ z__[nj] = tau;
+ z__[pj] = 0.;
+ if (coltyp[nj] != coltyp[pj]) {
+ coltyp[nj] = 2;
+ }
+ coltyp[pj] = 4;
+ drot_(n, &q[pj * q_dim1 + 1], &c__1, &q[nj * q_dim1 + 1], &c__1, &
+ c__, &s);
+/* Computing 2nd power */
+ d__1 = c__;
+/* Computing 2nd power */
+ d__2 = s;
+ t = d__[pj] * (d__1 * d__1) + d__[nj] * (d__2 * d__2);
+/* Computing 2nd power */
+ d__1 = s;
+/* Computing 2nd power */
+ d__2 = c__;
+ d__[nj] = d__[pj] * (d__1 * d__1) + d__[nj] * (d__2 * d__2);
+ d__[pj] = t;
+ --k2;
+ i__ = 1;
+L90:
+ if (k2 + i__ <= *n) {
+ if (d__[pj] < d__[indxp[k2 + i__]]) {
+ indxp[k2 + i__ - 1] = indxp[k2 + i__];
+ indxp[k2 + i__] = pj;
+ ++i__;
+ goto L90;
+ } else {
+ indxp[k2 + i__ - 1] = pj;
+ }
+ } else {
+ indxp[k2 + i__ - 1] = pj;
+ }
+ pj = nj;
+ } else {
+ ++(*k);
+ dlamda[*k] = d__[pj];
+ w[*k] = z__[pj];
+ indxp[*k] = pj;
+ pj = nj;
+ }
+ }
+ goto L80;
+L100:
+
+/* Record the last eigenvalue. */
+
+ ++(*k);
+ dlamda[*k] = d__[pj];
+ w[*k] = z__[pj];
+ indxp[*k] = pj;
+
+/* Count up the total number of the various types of columns, then */
+/* form a permutation which positions the four column types into */
+/* four uniform groups (although one or more of these groups may be */
+/* empty). */
+
+ for (j = 1; j <= 4; ++j) {
+ ctot[j - 1] = 0;
+/* L110: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ ct = coltyp[j];
+ ++ctot[ct - 1];
+/* L120: */
+ }
+
+/* PSM(*) = Position in SubMatrix (of types 1 through 4) */
+
+ psm[0] = 1;
+ psm[1] = ctot[0] + 1;
+ psm[2] = psm[1] + ctot[1];
+ psm[3] = psm[2] + ctot[2];
+ *k = *n - ctot[3];
+
+/* Fill out the INDXC array so that the permutation which it induces */
+/* will place all type-1 columns first, all type-2 columns next, */
+/* then all type-3's, and finally all type-4's. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ js = indxp[j];
+ ct = coltyp[js];
+ indx[psm[ct - 1]] = js;
+ indxc[psm[ct - 1]] = j;
+ ++psm[ct - 1];
+/* L130: */
+ }
+
+/* Sort the eigenvalues and corresponding eigenvectors into DLAMDA */
+/* and Q2 respectively. The eigenvalues/vectors which were not */
+/* deflated go into the first K slots of DLAMDA and Q2 respectively, */
+/* while those which were deflated go into the last N - K slots. */
+
+ i__ = 1;
+ iq1 = 1;
+ iq2 = (ctot[0] + ctot[1]) * *n1 + 1;
+ i__1 = ctot[0];
+ for (j = 1; j <= i__1; ++j) {
+ js = indx[i__];
+ dcopy_(n1, &q[js * q_dim1 + 1], &c__1, &q2[iq1], &c__1);
+ z__[i__] = d__[js];
+ ++i__;
+ iq1 += *n1;
+/* L140: */
+ }
+
+ i__1 = ctot[1];
+ for (j = 1; j <= i__1; ++j) {
+ js = indx[i__];
+ dcopy_(n1, &q[js * q_dim1 + 1], &c__1, &q2[iq1], &c__1);
+ dcopy_(&n2, &q[*n1 + 1 + js * q_dim1], &c__1, &q2[iq2], &c__1);
+ z__[i__] = d__[js];
+ ++i__;
+ iq1 += *n1;
+ iq2 += n2;
+/* L150: */
+ }
+
+ i__1 = ctot[2];
+ for (j = 1; j <= i__1; ++j) {
+ js = indx[i__];
+ dcopy_(&n2, &q[*n1 + 1 + js * q_dim1], &c__1, &q2[iq2], &c__1);
+ z__[i__] = d__[js];
+ ++i__;
+ iq2 += n2;
+/* L160: */
+ }
+
+ iq1 = iq2;
+ i__1 = ctot[3];
+ for (j = 1; j <= i__1; ++j) {
+ js = indx[i__];
+ dcopy_(n, &q[js * q_dim1 + 1], &c__1, &q2[iq2], &c__1);
+ iq2 += *n;
+ z__[i__] = d__[js];
+ ++i__;
+/* L170: */
+ }
+
+/* The deflated eigenvalues and their corresponding vectors go back */
+/* into the last N - K slots of D and Q respectively. */
+
+ dlacpy_("A", n, &ctot[3], &q2[iq1], n, &q[(*k + 1) * q_dim1 + 1], ldq);
+ i__1 = *n - *k;
+ dcopy_(&i__1, &z__[*k + 1], &c__1, &d__[*k + 1], &c__1);
+
+/* Copy CTOT into COLTYP for referencing in DLAED3. */
+
+ for (j = 1; j <= 4; ++j) {
+ coltyp[j] = ctot[j - 1];
+/* L180: */
+ }
+
+L190:
+ return 0;
+
+/* End of DLAED2 */
+
+} /* dlaed2_ */
diff --git a/contrib/libs/clapack/dlaed3.c b/contrib/libs/clapack/dlaed3.c
new file mode 100644
index 0000000000..ce4f772672
--- /dev/null
+++ b/contrib/libs/clapack/dlaed3.c
@@ -0,0 +1,338 @@
+/* dlaed3.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b22 = 1.;
+static doublereal c_b23 = 0.;
+
+/* Subroutine */ int dlaed3_(integer *k, integer *n, integer *n1, doublereal *
+ d__, doublereal *q, integer *ldq, doublereal *rho, doublereal *dlamda,
+ doublereal *q2, integer *indx, integer *ctot, doublereal *w,
+ doublereal *s, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ integer i__, j, n2, n12, ii, n23, iq2;
+ doublereal temp;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *),
+ dcopy_(integer *, doublereal *, integer *, doublereal *, integer
+ *), dlaed4_(integer *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *);
+ extern doublereal dlamc3_(doublereal *, doublereal *);
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ dlaset_(char *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAED3 finds the roots of the secular equation, as defined by the */
+/* values in D, W, and RHO, between 1 and K. It makes the */
+/* appropriate calls to DLAED4 and then updates the eigenvectors by */
+/* multiplying the matrix of eigenvectors of the pair of eigensystems */
+/* being combined by the matrix of eigenvectors of the K-by-K system */
+/* which is solved here. */
+
+/* This code makes very mild assumptions about floating point */
+/* arithmetic. It will work on machines with a guard digit in */
+/* add/subtract, or on those binary machines without guard digits */
+/* which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2. */
+/* It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* K (input) INTEGER */
+/* The number of terms in the rational function to be solved by */
+/* DLAED4. K >= 0. */
+
+/* N (input) INTEGER */
+/* The number of rows and columns in the Q matrix. */
+/* N >= K (deflation may result in N>K). */
+
+/* N1 (input) INTEGER */
+/* The location of the last eigenvalue in the leading submatrix. */
+/* min(1,N) <= N1 <= N/2. */
+
+/* D (output) DOUBLE PRECISION array, dimension (N) */
+/* D(I) contains the updated eigenvalues for */
+/* 1 <= I <= K. */
+
+/* Q (output) DOUBLE PRECISION array, dimension (LDQ,N) */
+/* Initially the first K columns are used as workspace. */
+/* On output the columns 1 to K contain */
+/* the updated eigenvectors. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N). */
+
+/* RHO (input) DOUBLE PRECISION */
+/* The value of the parameter in the rank one update equation. */
+/* RHO >= 0 required. */
+
+/* DLAMDA (input/output) DOUBLE PRECISION array, dimension (K) */
+/* The first K elements of this array contain the old roots */
+/* of the deflated updating problem. These are the poles */
+/* of the secular equation. May be changed on output by */
+/* having lowest order bit set to zero on Cray X-MP, Cray Y-MP, */
+/* Cray-2, or Cray C-90, as described above. */
+
+/* Q2 (input) DOUBLE PRECISION array, dimension (LDQ2, N) */
+/* The first K columns of this matrix contain the non-deflated */
+/* eigenvectors for the split problem. */
+
+/* INDX (input) INTEGER array, dimension (N) */
+/* The permutation used to arrange the columns of the deflated */
+/* Q matrix into three groups (see DLAED2). */
+/* The rows of the eigenvectors found by DLAED4 must be likewise */
+/* permuted before the matrix multiply can take place. */
+
+/* CTOT (input) INTEGER array, dimension (4) */
+/* A count of the total number of the various types of columns */
+/* in Q, as described in INDX. The fourth column type is any */
+/* column which has been deflated. */
+
+/* W (input/output) DOUBLE PRECISION array, dimension (K) */
+/* The first K elements of this array contain the components */
+/* of the deflation-adjusted updating vector. Destroyed on */
+/* output. */
+
+/* S (workspace) DOUBLE PRECISION array, dimension (N1 + 1)*K */
+/* Will contain the eigenvectors of the repaired matrix which */
+/* will be multiplied by the previously accumulated eigenvectors */
+/* to update the system. */
+
+/* LDS (input) INTEGER */
+/* The leading dimension of S. LDS >= max(1,K). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, an eigenvalue did not converge */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Jeff Rutter, Computer Science Division, University of California */
+/* at Berkeley, USA */
+/* Modified by Francoise Tisseur, University of Tennessee. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --dlamda;
+ --q2;
+ --indx;
+ --ctot;
+ --w;
+ --s;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*k < 0) {
+ *info = -1;
+ } else if (*n < *k) {
+ *info = -2;
+ } else if (*ldq < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAED3", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*k == 0) {
+ return 0;
+ }
+
+/* Modify values DLAMDA(i) to make sure all DLAMDA(i)-DLAMDA(j) can */
+/* be computed with high relative accuracy (barring over/underflow). */
+/* This is a problem on machines without a guard digit in */
+/* add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2). */
+/* The following code replaces DLAMDA(I) by 2*DLAMDA(I)-DLAMDA(I), */
+/* which on any of these machines zeros out the bottommost */
+/* bit of DLAMDA(I) if it is 1; this makes the subsequent */
+/* subtractions DLAMDA(I)-DLAMDA(J) unproblematic when cancellation */
+/* occurs. On binary machines with a guard digit (almost all */
+/* machines) it does not change DLAMDA(I) at all. On hexadecimal */
+/* and decimal machines with a guard digit, it slightly */
+/* changes the bottommost bits of DLAMDA(I). It does not account */
+/* for hexadecimal or decimal machines without guard digits */
+/* (we know of none). We use a subroutine call to compute */
+/* 2*DLAMBDA(I) to prevent optimizing compilers from eliminating */
+/* this code. */
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlamda[i__] = dlamc3_(&dlamda[i__], &dlamda[i__]) - dlamda[i__];
+/* L10: */
+ }
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dlaed4_(k, &j, &dlamda[1], &w[1], &q[j * q_dim1 + 1], rho, &d__[j],
+ info);
+
+/* If the zero finder fails, the computation is terminated. */
+
+ if (*info != 0) {
+ goto L120;
+ }
+/* L20: */
+ }
+
+ if (*k == 1) {
+ goto L110;
+ }
+ if (*k == 2) {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ w[1] = q[j * q_dim1 + 1];
+ w[2] = q[j * q_dim1 + 2];
+ ii = indx[1];
+ q[j * q_dim1 + 1] = w[ii];
+ ii = indx[2];
+ q[j * q_dim1 + 2] = w[ii];
+/* L30: */
+ }
+ goto L110;
+ }
+
+/* Compute updated W. */
+
+ dcopy_(k, &w[1], &c__1, &s[1], &c__1);
+
+/* Initialize W(I) = Q(I,I) */
+
+ i__1 = *ldq + 1;
+ dcopy_(k, &q[q_offset], &i__1, &w[1], &c__1);
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]);
+/* L40: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]);
+/* L50: */
+ }
+/* L60: */
+ }
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__1 = sqrt(-w[i__]);
+ w[i__] = d_sign(&d__1, &s[i__]);
+/* L70: */
+ }
+
+/* Compute eigenvectors of the modified rank-1 modification. */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *k;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ s[i__] = w[i__] / q[i__ + j * q_dim1];
+/* L80: */
+ }
+ temp = dnrm2_(k, &s[1], &c__1);
+ i__2 = *k;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ ii = indx[i__];
+ q[i__ + j * q_dim1] = s[ii] / temp;
+/* L90: */
+ }
+/* L100: */
+ }
+
+/* Compute the updated eigenvectors. */
+
+L110:
+
+ n2 = *n - *n1;
+ n12 = ctot[1] + ctot[2];
+ n23 = ctot[2] + ctot[3];
+
+ dlacpy_("A", &n23, k, &q[ctot[1] + 1 + q_dim1], ldq, &s[1], &n23);
+ iq2 = *n1 * n12 + 1;
+ if (n23 != 0) {
+ dgemm_("N", "N", &n2, k, &n23, &c_b22, &q2[iq2], &n2, &s[1], &n23, &
+ c_b23, &q[*n1 + 1 + q_dim1], ldq);
+ } else {
+ dlaset_("A", &n2, k, &c_b23, &c_b23, &q[*n1 + 1 + q_dim1], ldq);
+ }
+
+ dlacpy_("A", &n12, k, &q[q_offset], ldq, &s[1], &n12);
+ if (n12 != 0) {
+ dgemm_("N", "N", n1, k, &n12, &c_b22, &q2[1], n1, &s[1], &n12, &c_b23,
+ &q[q_offset], ldq);
+ } else {
+ dlaset_("A", n1, k, &c_b23, &c_b23, &q[q_dim1 + 1], ldq);
+ }
+
+
+L120:
+ return 0;
+
+/* End of DLAED3 */
+
+} /* dlaed3_ */
diff --git a/contrib/libs/clapack/dlaed4.c b/contrib/libs/clapack/dlaed4.c
new file mode 100644
index 0000000000..414c2cf356
--- /dev/null
+++ b/contrib/libs/clapack/dlaed4.c
@@ -0,0 +1,954 @@
+/* dlaed4.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlaed4_(integer *n, integer *i__, doublereal *d__,
+ doublereal *z__, doublereal *delta, doublereal *rho, doublereal *dlam,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal a, b, c__;
+ integer j;
+ doublereal w;
+ integer ii;
+ doublereal dw, zz[3];
+ integer ip1;
+ doublereal del, eta, phi, eps, tau, psi;
+ integer iim1, iip1;
+ doublereal dphi, dpsi;
+ integer iter;
+ doublereal temp, prew, temp1, dltlb, dltub, midpt;
+ integer niter;
+ logical swtch;
+ extern /* Subroutine */ int dlaed5_(integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *), dlaed6_(integer *,
+ logical *, doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *);
+ logical swtch3;
+ extern doublereal dlamch_(char *);
+ logical orgati;
+ doublereal erretm, rhoinv;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This subroutine computes the I-th updated eigenvalue of a symmetric */
+/* rank-one modification to a diagonal matrix whose elements are */
+/* given in the array d, and that */
+
+/* D(i) < D(j) for i < j */
+
+/* and that RHO > 0. This is arranged by the calling routine, and is */
+/* no loss in generality. The rank-one modified system is thus */
+
+/* diag( D ) + RHO * Z * Z_transpose. */
+
+/* where we assume the Euclidean norm of Z is 1. */
+
+/* The method consists of approximating the rational functions in the */
+/* secular equation by simpler interpolating rational functions. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The length of all arrays. */
+
+/* I (input) INTEGER */
+/* The index of the eigenvalue to be computed. 1 <= I <= N. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The original eigenvalues. It is assumed that they are in */
+/* order, D(I) < D(J) for I < J. */
+
+/* Z (input) DOUBLE PRECISION array, dimension (N) */
+/* The components of the updating vector. */
+
+/* DELTA (output) DOUBLE PRECISION array, dimension (N) */
+/* If N .GT. 2, DELTA contains (D(j) - lambda_I) in its j-th */
+/* component. If N = 1, then DELTA(1) = 1. If N = 2, see DLAED5 */
+/* for detail. The vector DELTA contains the information necessary */
+/* to construct the eigenvectors by DLAED3 and DLAED9. */
+
+/* RHO (input) DOUBLE PRECISION */
+/* The scalar in the symmetric updating formula. */
+
+/* DLAM (output) DOUBLE PRECISION */
+/* The computed lambda_I, the I-th updated eigenvalue. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* > 0: if INFO = 1, the updating process failed. */
+
+/* Internal Parameters */
+/* =================== */
+
+/* Logical variable ORGATI (origin-at-i?) is used for distinguishing */
+/* whether D(i) or D(i+1) is treated as the origin. */
+
+/* ORGATI = .true. origin at i */
+/* ORGATI = .false. origin at i+1 */
+
+/* Logical variable SWTCH3 (switch-for-3-poles?) is for noting */
+/* if we are working with THREE poles! */
+
+/* MAXIT is the maximum number of iterations allowed for each */
+/* eigenvalue. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ren-Cang Li, Computer Science Division, University of California */
+/* at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Since this routine is called in an inner loop, we do no argument */
+/* checking. */
+
+/* Quick return for N=1 and 2. */
+
+ /* Parameter adjustments */
+ --delta;
+ --z__;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ if (*n == 1) {
+
+/* Presumably, I=1 upon entry */
+
+ *dlam = d__[1] + *rho * z__[1] * z__[1];
+ delta[1] = 1.;
+ return 0;
+ }
+ if (*n == 2) {
+ dlaed5_(i__, &d__[1], &z__[1], &delta[1], rho, dlam);
+ return 0;
+ }
+
+/* Compute machine epsilon */
+
+ eps = dlamch_("Epsilon");
+ rhoinv = 1. / *rho;
+
+/* The case I = N */
+
+ if (*i__ == *n) {
+
+/* Initialize some basic variables */
+
+ ii = *n - 1;
+ niter = 1;
+
+/* Calculate initial guess */
+
+ midpt = *rho / 2.;
+
+/* If ||Z||_2 is not one, then TEMP should be set to */
+/* RHO * ||Z||_2^2 / TWO */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[*i__] - midpt;
+/* L10: */
+ }
+
+ psi = 0.;
+ i__1 = *n - 2;
+ for (j = 1; j <= i__1; ++j) {
+ psi += z__[j] * z__[j] / delta[j];
+/* L20: */
+ }
+
+ c__ = rhoinv + psi;
+ w = c__ + z__[ii] * z__[ii] / delta[ii] + z__[*n] * z__[*n] / delta[*
+ n];
+
+ if (w <= 0.) {
+ temp = z__[*n - 1] * z__[*n - 1] / (d__[*n] - d__[*n - 1] + *rho)
+ + z__[*n] * z__[*n] / *rho;
+ if (c__ <= temp) {
+ tau = *rho;
+ } else {
+ del = d__[*n] - d__[*n - 1];
+ a = -c__ * del + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n]
+ ;
+ b = z__[*n] * z__[*n] * del;
+ if (a < 0.) {
+ tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a);
+ } else {
+ tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.);
+ }
+ }
+
+/* It can be proved that */
+/* D(N)+RHO/2 <= LAMBDA(N) < D(N)+TAU <= D(N)+RHO */
+
+ dltlb = midpt;
+ dltub = *rho;
+ } else {
+ del = d__[*n] - d__[*n - 1];
+ a = -c__ * del + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n];
+ b = z__[*n] * z__[*n] * del;
+ if (a < 0.) {
+ tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a);
+ } else {
+ tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.);
+ }
+
+/* It can be proved that */
+/* D(N) < D(N)+TAU < LAMBDA(N) < D(N)+RHO/2 */
+
+ dltlb = 0.;
+ dltub = midpt;
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[*i__] - tau;
+/* L30: */
+ }
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L40: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / delta[*n];
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi
+ + dphi);
+
+ w = rhoinv + phi + psi;
+
+/* Test for convergence */
+
+ if (abs(w) <= eps * erretm) {
+ *dlam = d__[*i__] + tau;
+ goto L250;
+ }
+
+ if (w <= 0.) {
+ dltlb = max(dltlb,tau);
+ } else {
+ dltub = min(dltub,tau);
+ }
+
+/* Calculate the new step */
+
+ ++niter;
+ c__ = w - delta[*n - 1] * dpsi - delta[*n] * dphi;
+ a = (delta[*n - 1] + delta[*n]) * w - delta[*n - 1] * delta[*n] * (
+ dpsi + dphi);
+ b = delta[*n - 1] * delta[*n] * w;
+ if (c__ < 0.) {
+ c__ = abs(c__);
+ }
+ if (c__ == 0.) {
+/* ETA = B/A */
+/* ETA = RHO - TAU */
+ eta = dltub - tau;
+ } else if (a >= 0.) {
+ eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__
+ * 2.);
+ } else {
+ eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))
+ );
+ }
+
+/* Note, eta should be positive if w is negative, and */
+/* eta should be negative otherwise. However, */
+/* if for some reason caused by roundoff, eta*w > 0, */
+/* we simply use one Newton step instead. This way */
+/* will guarantee eta*w < 0. */
+
+ if (w * eta > 0.) {
+ eta = -w / (dpsi + dphi);
+ }
+ temp = tau + eta;
+ if (temp > dltub || temp < dltlb) {
+ if (w < 0.) {
+ eta = (dltub - tau) / 2.;
+ } else {
+ eta = (dltlb - tau) / 2.;
+ }
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+/* L50: */
+ }
+
+ tau += eta;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L60: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / delta[*n];
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi
+ + dphi);
+
+ w = rhoinv + phi + psi;
+
+/* Main loop to update the values of the array DELTA */
+
+ iter = niter + 1;
+
+ for (niter = iter; niter <= 30; ++niter) {
+
+/* Test for convergence */
+
+ if (abs(w) <= eps * erretm) {
+ *dlam = d__[*i__] + tau;
+ goto L250;
+ }
+
+ if (w <= 0.) {
+ dltlb = max(dltlb,tau);
+ } else {
+ dltub = min(dltub,tau);
+ }
+
+/* Calculate the new step */
+
+ c__ = w - delta[*n - 1] * dpsi - delta[*n] * dphi;
+ a = (delta[*n - 1] + delta[*n]) * w - delta[*n - 1] * delta[*n] *
+ (dpsi + dphi);
+ b = delta[*n - 1] * delta[*n] * w;
+ if (a >= 0.) {
+ eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
+ c__ * 2.);
+ } else {
+ eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs(
+ d__1))));
+ }
+
+/* Note, eta should be positive if w is negative, and */
+/* eta should be negative otherwise. However, */
+/* if for some reason caused by roundoff, eta*w > 0, */
+/* we simply use one Newton step instead. This way */
+/* will guarantee eta*w < 0. */
+
+ if (w * eta > 0.) {
+ eta = -w / (dpsi + dphi);
+ }
+ temp = tau + eta;
+ if (temp > dltub || temp < dltlb) {
+ if (w < 0.) {
+ eta = (dltub - tau) / 2.;
+ } else {
+ eta = (dltlb - tau) / 2.;
+ }
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+/* L70: */
+ }
+
+ tau += eta;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L80: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / delta[*n];
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (
+ dpsi + dphi);
+
+ w = rhoinv + phi + psi;
+/* L90: */
+ }
+
+/* Return with INFO = 1, NITER = MAXIT and not converged */
+
+ *info = 1;
+ *dlam = d__[*i__] + tau;
+ goto L250;
+
+/* End for the case I = N */
+
+ } else {
+
+/* The case for I < N */
+
+ niter = 1;
+ ip1 = *i__ + 1;
+
+/* Calculate initial guess */
+
+ del = d__[ip1] - d__[*i__];
+ midpt = del / 2.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[*i__] - midpt;
+/* L100: */
+ }
+
+ psi = 0.;
+ i__1 = *i__ - 1;
+ for (j = 1; j <= i__1; ++j) {
+ psi += z__[j] * z__[j] / delta[j];
+/* L110: */
+ }
+
+ phi = 0.;
+ i__1 = *i__ + 2;
+ for (j = *n; j >= i__1; --j) {
+ phi += z__[j] * z__[j] / delta[j];
+/* L120: */
+ }
+ c__ = rhoinv + psi + phi;
+ w = c__ + z__[*i__] * z__[*i__] / delta[*i__] + z__[ip1] * z__[ip1] /
+ delta[ip1];
+
+ if (w > 0.) {
+
+/* d(i)< the ith eigenvalue < (d(i)+d(i+1))/2 */
+
+/* We choose d(i) as origin. */
+
+ orgati = TRUE_;
+ a = c__ * del + z__[*i__] * z__[*i__] + z__[ip1] * z__[ip1];
+ b = z__[*i__] * z__[*i__] * del;
+ if (a > 0.) {
+ tau = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(
+ d__1))));
+ } else {
+ tau = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
+ c__ * 2.);
+ }
+ dltlb = 0.;
+ dltub = midpt;
+ } else {
+
+/* (d(i)+d(i+1))/2 <= the ith eigenvalue < d(i+1) */
+
+/* We choose d(i+1) as origin. */
+
+ orgati = FALSE_;
+ a = c__ * del - z__[*i__] * z__[*i__] - z__[ip1] * z__[ip1];
+ b = z__[ip1] * z__[ip1] * del;
+ if (a < 0.) {
+ tau = b * 2. / (a - sqrt((d__1 = a * a + b * 4. * c__, abs(
+ d__1))));
+ } else {
+ tau = -(a + sqrt((d__1 = a * a + b * 4. * c__, abs(d__1)))) /
+ (c__ * 2.);
+ }
+ dltlb = -midpt;
+ dltub = 0.;
+ }
+
+ if (orgati) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[*i__] - tau;
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[ip1] - tau;
+/* L140: */
+ }
+ }
+ if (orgati) {
+ ii = *i__;
+ } else {
+ ii = *i__ + 1;
+ }
+ iim1 = ii - 1;
+ iip1 = ii + 1;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L150: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.;
+ phi = 0.;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / delta[j];
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L160: */
+ }
+
+ w = rhoinv + phi + psi;
+
+/* W is the value of the secular function with */
+/* its ii-th element removed. */
+
+ swtch3 = FALSE_;
+ if (orgati) {
+ if (w < 0.) {
+ swtch3 = TRUE_;
+ }
+ } else {
+ if (w > 0.) {
+ swtch3 = TRUE_;
+ }
+ }
+ if (ii == 1 || ii == *n) {
+ swtch3 = FALSE_;
+ }
+
+ temp = z__[ii] / delta[ii];
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w += temp;
+ erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. +
+ abs(tau) * dw;
+
+/* Test for convergence */
+
+ if (abs(w) <= eps * erretm) {
+ if (orgati) {
+ *dlam = d__[*i__] + tau;
+ } else {
+ *dlam = d__[ip1] + tau;
+ }
+ goto L250;
+ }
+
+ if (w <= 0.) {
+ dltlb = max(dltlb,tau);
+ } else {
+ dltub = min(dltub,tau);
+ }
+
+/* Calculate the new step */
+
+ ++niter;
+ if (! swtch3) {
+ if (orgati) {
+/* Computing 2nd power */
+ d__1 = z__[*i__] / delta[*i__];
+ c__ = w - delta[ip1] * dw - (d__[*i__] - d__[ip1]) * (d__1 *
+ d__1);
+ } else {
+/* Computing 2nd power */
+ d__1 = z__[ip1] / delta[ip1];
+ c__ = w - delta[*i__] * dw - (d__[ip1] - d__[*i__]) * (d__1 *
+ d__1);
+ }
+ a = (delta[*i__] + delta[ip1]) * w - delta[*i__] * delta[ip1] *
+ dw;
+ b = delta[*i__] * delta[ip1] * w;
+ if (c__ == 0.) {
+ if (a == 0.) {
+ if (orgati) {
+ a = z__[*i__] * z__[*i__] + delta[ip1] * delta[ip1] *
+ (dpsi + dphi);
+ } else {
+ a = z__[ip1] * z__[ip1] + delta[*i__] * delta[*i__] *
+ (dpsi + dphi);
+ }
+ }
+ eta = b / a;
+ } else if (a <= 0.) {
+ eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
+ c__ * 2.);
+ } else {
+ eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(
+ d__1))));
+ }
+ } else {
+
+/* Interpolation using THREE most relevant poles */
+
+ temp = rhoinv + psi + phi;
+ if (orgati) {
+ temp1 = z__[iim1] / delta[iim1];
+ temp1 *= temp1;
+ c__ = temp - delta[iip1] * (dpsi + dphi) - (d__[iim1] - d__[
+ iip1]) * temp1;
+ zz[0] = z__[iim1] * z__[iim1];
+ zz[2] = delta[iip1] * delta[iip1] * (dpsi - temp1 + dphi);
+ } else {
+ temp1 = z__[iip1] / delta[iip1];
+ temp1 *= temp1;
+ c__ = temp - delta[iim1] * (dpsi + dphi) - (d__[iip1] - d__[
+ iim1]) * temp1;
+ zz[0] = delta[iim1] * delta[iim1] * (dpsi + (dphi - temp1));
+ zz[2] = z__[iip1] * z__[iip1];
+ }
+ zz[1] = z__[ii] * z__[ii];
+ dlaed6_(&niter, &orgati, &c__, &delta[iim1], zz, &w, &eta, info);
+ if (*info != 0) {
+ goto L250;
+ }
+ }
+
+/* Note, eta should be positive if w is negative, and */
+/* eta should be negative otherwise. However, */
+/* if for some reason caused by roundoff, eta*w > 0, */
+/* we simply use one Newton step instead. This way */
+/* will guarantee eta*w < 0. */
+
+ if (w * eta >= 0.) {
+ eta = -w / dw;
+ }
+ temp = tau + eta;
+ if (temp > dltub || temp < dltlb) {
+ if (w < 0.) {
+ eta = (dltub - tau) / 2.;
+ } else {
+ eta = (dltlb - tau) / 2.;
+ }
+ }
+
+ prew = w;
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+/* L180: */
+ }
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L190: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.;
+ phi = 0.;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / delta[j];
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L200: */
+ }
+
+ temp = z__[ii] / delta[ii];
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w = rhoinv + phi + psi + temp;
+ erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. + (
+ d__1 = tau + eta, abs(d__1)) * dw;
+
+ swtch = FALSE_;
+ if (orgati) {
+ if (-w > abs(prew) / 10.) {
+ swtch = TRUE_;
+ }
+ } else {
+ if (w > abs(prew) / 10.) {
+ swtch = TRUE_;
+ }
+ }
+
+ tau += eta;
+
+/* Main loop to update the values of the array DELTA */
+
+ iter = niter + 1;
+
+ for (niter = iter; niter <= 30; ++niter) {
+
+/* Test for convergence */
+
+ if (abs(w) <= eps * erretm) {
+ if (orgati) {
+ *dlam = d__[*i__] + tau;
+ } else {
+ *dlam = d__[ip1] + tau;
+ }
+ goto L250;
+ }
+
+ if (w <= 0.) {
+ dltlb = max(dltlb,tau);
+ } else {
+ dltub = min(dltub,tau);
+ }
+
+/* Calculate the new step */
+
+ if (! swtch3) {
+ if (! swtch) {
+ if (orgati) {
+/* Computing 2nd power */
+ d__1 = z__[*i__] / delta[*i__];
+ c__ = w - delta[ip1] * dw - (d__[*i__] - d__[ip1]) * (
+ d__1 * d__1);
+ } else {
+/* Computing 2nd power */
+ d__1 = z__[ip1] / delta[ip1];
+ c__ = w - delta[*i__] * dw - (d__[ip1] - d__[*i__]) *
+ (d__1 * d__1);
+ }
+ } else {
+ temp = z__[ii] / delta[ii];
+ if (orgati) {
+ dpsi += temp * temp;
+ } else {
+ dphi += temp * temp;
+ }
+ c__ = w - delta[*i__] * dpsi - delta[ip1] * dphi;
+ }
+ a = (delta[*i__] + delta[ip1]) * w - delta[*i__] * delta[ip1]
+ * dw;
+ b = delta[*i__] * delta[ip1] * w;
+ if (c__ == 0.) {
+ if (a == 0.) {
+ if (! swtch) {
+ if (orgati) {
+ a = z__[*i__] * z__[*i__] + delta[ip1] *
+ delta[ip1] * (dpsi + dphi);
+ } else {
+ a = z__[ip1] * z__[ip1] + delta[*i__] * delta[
+ *i__] * (dpsi + dphi);
+ }
+ } else {
+ a = delta[*i__] * delta[*i__] * dpsi + delta[ip1]
+ * delta[ip1] * dphi;
+ }
+ }
+ eta = b / a;
+ } else if (a <= 0.) {
+ eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1))))
+ / (c__ * 2.);
+ } else {
+ eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__,
+ abs(d__1))));
+ }
+ } else {
+
+/* Interpolation using THREE most relevant poles */
+
+ temp = rhoinv + psi + phi;
+ if (swtch) {
+ c__ = temp - delta[iim1] * dpsi - delta[iip1] * dphi;
+ zz[0] = delta[iim1] * delta[iim1] * dpsi;
+ zz[2] = delta[iip1] * delta[iip1] * dphi;
+ } else {
+ if (orgati) {
+ temp1 = z__[iim1] / delta[iim1];
+ temp1 *= temp1;
+ c__ = temp - delta[iip1] * (dpsi + dphi) - (d__[iim1]
+ - d__[iip1]) * temp1;
+ zz[0] = z__[iim1] * z__[iim1];
+ zz[2] = delta[iip1] * delta[iip1] * (dpsi - temp1 +
+ dphi);
+ } else {
+ temp1 = z__[iip1] / delta[iip1];
+ temp1 *= temp1;
+ c__ = temp - delta[iim1] * (dpsi + dphi) - (d__[iip1]
+ - d__[iim1]) * temp1;
+ zz[0] = delta[iim1] * delta[iim1] * (dpsi + (dphi -
+ temp1));
+ zz[2] = z__[iip1] * z__[iip1];
+ }
+ }
+ dlaed6_(&niter, &orgati, &c__, &delta[iim1], zz, &w, &eta,
+ info);
+ if (*info != 0) {
+ goto L250;
+ }
+ }
+
+/* Note, eta should be positive if w is negative, and */
+/* eta should be negative otherwise. However, */
+/* if for some reason caused by roundoff, eta*w > 0, */
+/* we simply use one Newton step instead. This way */
+/* will guarantee eta*w < 0. */
+
+ if (w * eta >= 0.) {
+ eta = -w / dw;
+ }
+ temp = tau + eta;
+ if (temp > dltub || temp < dltlb) {
+ if (w < 0.) {
+ eta = (dltub - tau) / 2.;
+ } else {
+ eta = (dltlb - tau) / 2.;
+ }
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+/* L210: */
+ }
+
+ tau += eta;
+ prew = w;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L220: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.;
+ phi = 0.;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / delta[j];
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L230: */
+ }
+
+ temp = z__[ii] / delta[ii];
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w = rhoinv + phi + psi + temp;
+ erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3.
+ + abs(tau) * dw;
+ if (w * prew > 0. && abs(w) > abs(prew) / 10.) {
+ swtch = ! swtch;
+ }
+
+/* L240: */
+ }
+
+/* Return with INFO = 1, NITER = MAXIT and not converged */
+
+ *info = 1;
+ if (orgati) {
+ *dlam = d__[*i__] + tau;
+ } else {
+ *dlam = d__[ip1] + tau;
+ }
+
+ }
+
+L250:
+
+ return 0;
+
+/* End of DLAED4 */
+
+} /* dlaed4_ */
diff --git a/contrib/libs/clapack/dlaed5.c b/contrib/libs/clapack/dlaed5.c
new file mode 100644
index 0000000000..fdec19f3c0
--- /dev/null
+++ b/contrib/libs/clapack/dlaed5.c
@@ -0,0 +1,148 @@
+/* dlaed5.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlaed5_(integer *i__, doublereal *d__, doublereal *z__,
+ doublereal *delta, doublereal *rho, doublereal *dlam)
+{
+ /* System generated locals */
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal b, c__, w, del, tau, temp;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This subroutine computes the I-th eigenvalue of a symmetric rank-one */
+/* modification of a 2-by-2 diagonal matrix */
+
+/* diag( D ) + RHO * Z * transpose(Z) . */
+
+/* The diagonal elements in the array D are assumed to satisfy */
+
+/* D(i) < D(j) for i < j . */
+
+/* We also assume RHO > 0 and that the Euclidean norm of the vector */
+/* Z is one. */
+
+/* Arguments */
+/* ========= */
+
+/* I (input) INTEGER */
+/* The index of the eigenvalue to be computed. I = 1 or I = 2. */
+
+/* D (input) DOUBLE PRECISION array, dimension (2) */
+/* The original eigenvalues. We assume D(1) < D(2). */
+
+/* Z (input) DOUBLE PRECISION array, dimension (2) */
+/* The components of the updating vector. */
+
+/* DELTA (output) DOUBLE PRECISION array, dimension (2) */
+/* The vector DELTA contains the information necessary */
+/* to construct the eigenvectors. */
+
+/* RHO (input) DOUBLE PRECISION */
+/* The scalar in the symmetric updating formula. */
+
+/* DLAM (output) DOUBLE PRECISION */
+/* The computed lambda_I, the I-th updated eigenvalue. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ren-Cang Li, Computer Science Division, University of California */
+/* at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --delta;
+ --z__;
+ --d__;
+
+ /* Function Body */
+ del = d__[2] - d__[1];
+ if (*i__ == 1) {
+ w = *rho * 2. * (z__[2] * z__[2] - z__[1] * z__[1]) / del + 1.;
+ if (w > 0.) {
+ b = del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[1] * z__[1] * del;
+
+/* B > ZERO, always */
+
+ tau = c__ * 2. / (b + sqrt((d__1 = b * b - c__ * 4., abs(d__1))));
+ *dlam = d__[1] + tau;
+ delta[1] = -z__[1] / tau;
+ delta[2] = z__[2] / (del - tau);
+ } else {
+ b = -del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[2] * z__[2] * del;
+ if (b > 0.) {
+ tau = c__ * -2. / (b + sqrt(b * b + c__ * 4.));
+ } else {
+ tau = (b - sqrt(b * b + c__ * 4.)) / 2.;
+ }
+ *dlam = d__[2] + tau;
+ delta[1] = -z__[1] / (del + tau);
+ delta[2] = -z__[2] / tau;
+ }
+ temp = sqrt(delta[1] * delta[1] + delta[2] * delta[2]);
+ delta[1] /= temp;
+ delta[2] /= temp;
+ } else {
+
+/* Now I=2 */
+
+ b = -del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[2] * z__[2] * del;
+ if (b > 0.) {
+ tau = (b + sqrt(b * b + c__ * 4.)) / 2.;
+ } else {
+ tau = c__ * 2. / (-b + sqrt(b * b + c__ * 4.));
+ }
+ *dlam = d__[2] + tau;
+ delta[1] = -z__[1] / (del + tau);
+ delta[2] = -z__[2] / tau;
+ temp = sqrt(delta[1] * delta[1] + delta[2] * delta[2]);
+ delta[1] /= temp;
+ delta[2] /= temp;
+ }
+ return 0;
+
+/* End OF DLAED5 */
+
+} /* dlaed5_ */
diff --git a/contrib/libs/clapack/dlaed6.c b/contrib/libs/clapack/dlaed6.c
new file mode 100644
index 0000000000..eff20c2f7c
--- /dev/null
+++ b/contrib/libs/clapack/dlaed6.c
@@ -0,0 +1,374 @@
+/* dlaed6.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlaed6_(integer *kniter, logical *orgati, doublereal *
+ rho, doublereal *d__, doublereal *z__, doublereal *finit, doublereal *
+ tau, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal), log(doublereal), pow_di(doublereal *, integer *);
+
+ /* Local variables */
+ doublereal a, b, c__, f;
+ integer i__;
+ doublereal fc, df, ddf, lbd, eta, ubd, eps, base;
+ integer iter;
+ doublereal temp, temp1, temp2, temp3, temp4;
+ logical scale;
+ integer niter;
+ doublereal small1, small2, sminv1, sminv2;
+ extern doublereal dlamch_(char *);
+ doublereal dscale[3], sclfac, zscale[3], erretm, sclinv;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* February 2007 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAED6 computes the positive or negative root (closest to the origin) */
+/* of */
+/* z(1) z(2) z(3) */
+/* f(x) = rho + --------- + ---------- + --------- */
+/* d(1)-x d(2)-x d(3)-x */
+
+/* It is assumed that */
+
+/* if ORGATI = .true. the root is between d(2) and d(3); */
+/* otherwise it is between d(1) and d(2) */
+
+/* This routine will be called by DLAED4 when necessary. In most cases, */
+/* the root sought is the smallest in magnitude, though it might not be */
+/* in some extremely rare situations. */
+
+/* Arguments */
+/* ========= */
+
+/* KNITER (input) INTEGER */
+/* Refer to DLAED4 for its significance. */
+
+/* ORGATI (input) LOGICAL */
+/* If ORGATI is true, the needed root is between d(2) and */
+/* d(3); otherwise it is between d(1) and d(2). See */
+/* DLAED4 for further details. */
+
+/* RHO (input) DOUBLE PRECISION */
+/* Refer to the equation f(x) above. */
+
+/* D (input) DOUBLE PRECISION array, dimension (3) */
+/* D satisfies d(1) < d(2) < d(3). */
+
+/* Z (input) DOUBLE PRECISION array, dimension (3) */
+/* Each of the elements in z must be positive. */
+
+/* FINIT (input) DOUBLE PRECISION */
+/* The value of f at 0. It is more accurate than the one */
+/* evaluated inside this routine (if someone wants to do */
+/* so). */
+
+/* TAU (output) DOUBLE PRECISION */
+/* The root of the equation f(x). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* > 0: if INFO = 1, failure to converge */
+
+/* Further Details */
+/* =============== */
+
+/* 30/06/99: Based on contributions by */
+/* Ren-Cang Li, Computer Science Division, University of California */
+/* at Berkeley, USA */
+
+/* 10/02/03: This version has a few statements commented out for thread */
+/* safety (machine parameters are computed on each entry). SJH. */
+
+/* 05/10/06: Modified from a new version of Ren-Cang Li, use */
+/* Gragg-Thornton-Warner cubic convergent scheme for better stability. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --z__;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*orgati) {
+ lbd = d__[2];
+ ubd = d__[3];
+ } else {
+ lbd = d__[1];
+ ubd = d__[2];
+ }
+ if (*finit < 0.) {
+ lbd = 0.;
+ } else {
+ ubd = 0.;
+ }
+
+ niter = 1;
+ *tau = 0.;
+ if (*kniter == 2) {
+ if (*orgati) {
+ temp = (d__[3] - d__[2]) / 2.;
+ c__ = *rho + z__[1] / (d__[1] - d__[2] - temp);
+ a = c__ * (d__[2] + d__[3]) + z__[2] + z__[3];
+ b = c__ * d__[2] * d__[3] + z__[2] * d__[3] + z__[3] * d__[2];
+ } else {
+ temp = (d__[1] - d__[2]) / 2.;
+ c__ = *rho + z__[3] / (d__[3] - d__[2] - temp);
+ a = c__ * (d__[1] + d__[2]) + z__[1] + z__[2];
+ b = c__ * d__[1] * d__[2] + z__[1] * d__[2] + z__[2] * d__[1];
+ }
+/* Computing MAX */
+ d__1 = abs(a), d__2 = abs(b), d__1 = max(d__1,d__2), d__2 = abs(c__);
+ temp = max(d__1,d__2);
+ a /= temp;
+ b /= temp;
+ c__ /= temp;
+ if (c__ == 0.) {
+ *tau = b / a;
+ } else if (a <= 0.) {
+ *tau = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
+ c__ * 2.);
+ } else {
+ *tau = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1))
+ ));
+ }
+ if (*tau < lbd || *tau > ubd) {
+ *tau = (lbd + ubd) / 2.;
+ }
+ if (d__[1] == *tau || d__[2] == *tau || d__[3] == *tau) {
+ *tau = 0.;
+ } else {
+ temp = *finit + *tau * z__[1] / (d__[1] * (d__[1] - *tau)) + *tau
+ * z__[2] / (d__[2] * (d__[2] - *tau)) + *tau * z__[3] / (
+ d__[3] * (d__[3] - *tau));
+ if (temp <= 0.) {
+ lbd = *tau;
+ } else {
+ ubd = *tau;
+ }
+ if (abs(*finit) <= abs(temp)) {
+ *tau = 0.;
+ }
+ }
+ }
+
+/* get machine parameters for possible scaling to avoid overflow */
+
+/* modified by Sven: parameters SMALL1, SMINV1, SMALL2, */
+/* SMINV2, EPS are not SAVEd anymore between one call to the */
+/* others but recomputed at each call */
+
+ eps = dlamch_("Epsilon");
+ base = dlamch_("Base");
+ i__1 = (integer) (log(dlamch_("SafMin")) / log(base) / 3.);
+ small1 = pow_di(&base, &i__1);
+ sminv1 = 1. / small1;
+ small2 = small1 * small1;
+ sminv2 = sminv1 * sminv1;
+
+/* Determine if scaling of inputs necessary to avoid overflow */
+/* when computing 1/TEMP**3 */
+
+ if (*orgati) {
+/* Computing MIN */
+ d__3 = (d__1 = d__[2] - *tau, abs(d__1)), d__4 = (d__2 = d__[3] - *
+ tau, abs(d__2));
+ temp = min(d__3,d__4);
+ } else {
+/* Computing MIN */
+ d__3 = (d__1 = d__[1] - *tau, abs(d__1)), d__4 = (d__2 = d__[2] - *
+ tau, abs(d__2));
+ temp = min(d__3,d__4);
+ }
+ scale = FALSE_;
+ if (temp <= small1) {
+ scale = TRUE_;
+ if (temp <= small2) {
+
+/* Scale up by power of radix nearest 1/SAFMIN**(2/3) */
+
+ sclfac = sminv2;
+ sclinv = small2;
+ } else {
+
+/* Scale up by power of radix nearest 1/SAFMIN**(1/3) */
+
+ sclfac = sminv1;
+ sclinv = small1;
+ }
+
+/* Scaling up safe because D, Z, TAU scaled elsewhere to be O(1) */
+
+ for (i__ = 1; i__ <= 3; ++i__) {
+ dscale[i__ - 1] = d__[i__] * sclfac;
+ zscale[i__ - 1] = z__[i__] * sclfac;
+/* L10: */
+ }
+ *tau *= sclfac;
+ lbd *= sclfac;
+ ubd *= sclfac;
+ } else {
+
+/* Copy D and Z to DSCALE and ZSCALE */
+
+ for (i__ = 1; i__ <= 3; ++i__) {
+ dscale[i__ - 1] = d__[i__];
+ zscale[i__ - 1] = z__[i__];
+/* L20: */
+ }
+ }
+
+ fc = 0.;
+ df = 0.;
+ ddf = 0.;
+ for (i__ = 1; i__ <= 3; ++i__) {
+ temp = 1. / (dscale[i__ - 1] - *tau);
+ temp1 = zscale[i__ - 1] * temp;
+ temp2 = temp1 * temp;
+ temp3 = temp2 * temp;
+ fc += temp1 / dscale[i__ - 1];
+ df += temp2;
+ ddf += temp3;
+/* L30: */
+ }
+ f = *finit + *tau * fc;
+
+ if (abs(f) <= 0.) {
+ goto L60;
+ }
+ if (f <= 0.) {
+ lbd = *tau;
+ } else {
+ ubd = *tau;
+ }
+
+/* Iteration begins -- Use Gragg-Thornton-Warner cubic convergent */
+/* scheme */
+
+/* It is not hard to see that */
+
+/* 1) Iterations will go up monotonically */
+/* if FINIT < 0; */
+
+/* 2) Iterations will go down monotonically */
+/* if FINIT > 0. */
+
+ iter = niter + 1;
+
+ for (niter = iter; niter <= 40; ++niter) {
+
+ if (*orgati) {
+ temp1 = dscale[1] - *tau;
+ temp2 = dscale[2] - *tau;
+ } else {
+ temp1 = dscale[0] - *tau;
+ temp2 = dscale[1] - *tau;
+ }
+ a = (temp1 + temp2) * f - temp1 * temp2 * df;
+ b = temp1 * temp2 * f;
+ c__ = f - (temp1 + temp2) * df + temp1 * temp2 * ddf;
+/* Computing MAX */
+ d__1 = abs(a), d__2 = abs(b), d__1 = max(d__1,d__2), d__2 = abs(c__);
+ temp = max(d__1,d__2);
+ a /= temp;
+ b /= temp;
+ c__ /= temp;
+ if (c__ == 0.) {
+ eta = b / a;
+ } else if (a <= 0.) {
+ eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__
+ * 2.);
+ } else {
+ eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))
+ );
+ }
+ if (f * eta >= 0.) {
+ eta = -f / df;
+ }
+
+ *tau += eta;
+ if (*tau < lbd || *tau > ubd) {
+ *tau = (lbd + ubd) / 2.;
+ }
+
+ fc = 0.;
+ erretm = 0.;
+ df = 0.;
+ ddf = 0.;
+ for (i__ = 1; i__ <= 3; ++i__) {
+ temp = 1. / (dscale[i__ - 1] - *tau);
+ temp1 = zscale[i__ - 1] * temp;
+ temp2 = temp1 * temp;
+ temp3 = temp2 * temp;
+ temp4 = temp1 / dscale[i__ - 1];
+ fc += temp4;
+ erretm += abs(temp4);
+ df += temp2;
+ ddf += temp3;
+/* L40: */
+ }
+ f = *finit + *tau * fc;
+ erretm = (abs(*finit) + abs(*tau) * erretm) * 8. + abs(*tau) * df;
+ if (abs(f) <= eps * erretm) {
+ goto L60;
+ }
+ if (f <= 0.) {
+ lbd = *tau;
+ } else {
+ ubd = *tau;
+ }
+/* L50: */
+ }
+ *info = 1;
+L60:
+
+/* Undo scaling */
+
+ if (scale) {
+ *tau *= sclinv;
+ }
+ return 0;
+
+/* End of DLAED6 */
+
+} /* dlaed6_ */
diff --git a/contrib/libs/clapack/dlaed7.c b/contrib/libs/clapack/dlaed7.c
new file mode 100644
index 0000000000..93a1848934
--- /dev/null
+++ b/contrib/libs/clapack/dlaed7.c
@@ -0,0 +1,354 @@
+/* dlaed7.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__2 = 2;
+static integer c__1 = 1;
+static doublereal c_b10 = 1.;
+static doublereal c_b11 = 0.;
+static integer c_n1 = -1;
+
+/* Subroutine */ int dlaed7_(integer *icompq, integer *n, integer *qsiz,
+ integer *tlvls, integer *curlvl, integer *curpbm, doublereal *d__,
+ doublereal *q, integer *ldq, integer *indxq, doublereal *rho, integer
+ *cutpnt, doublereal *qstore, integer *qptr, integer *prmptr, integer *
+ perm, integer *givptr, integer *givcol, doublereal *givnum,
+ doublereal *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ integer i__, k, n1, n2, is, iw, iz, iq2, ptr, ldq2, indx, curr;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ integer indxc, indxp;
+ extern /* Subroutine */ int dlaed8_(integer *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *, integer *,
+ doublereal *, integer *, integer *, integer *), dlaed9_(integer *,
+ integer *, integer *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ integer *, integer *), dlaeda_(integer *, integer *, integer *,
+ integer *, integer *, integer *, integer *, integer *, doublereal
+ *, doublereal *, integer *, doublereal *, doublereal *, integer *)
+ ;
+ integer idlmda;
+ extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
+ integer *, integer *, integer *), xerbla_(char *, integer *);
+ integer coltyp;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAED7 computes the updated eigensystem of a diagonal */
+/* matrix after modification by a rank-one symmetric matrix. This */
+/* routine is used only for the eigenproblem which requires all */
+/* eigenvalues and optionally eigenvectors of a dense symmetric matrix */
+/* that has been reduced to tridiagonal form. DLAED1 handles */
+/* the case in which all eigenvalues and eigenvectors of a symmetric */
+/* tridiagonal matrix are desired. */
+
+/* T = Q(in) ( D(in) + RHO * Z*Z' ) Q'(in) = Q(out) * D(out) * Q'(out) */
+
+/* where Z = Q'u, u is a vector of length N with ones in the */
+/* CUTPNT and CUTPNT + 1 th elements and zeros elsewhere. */
+
+/* The eigenvectors of the original matrix are stored in Q, and the */
+/* eigenvalues are in D. The algorithm consists of three stages: */
+
+/* The first stage consists of deflating the size of the problem */
+/* when there are multiple eigenvalues or if there is a zero in */
+/* the Z vector. For each such occurence the dimension of the */
+/* secular equation problem is reduced by one. This stage is */
+/* performed by the routine DLAED8. */
+
+/* The second stage consists of calculating the updated */
+/* eigenvalues. This is done by finding the roots of the secular */
+/* equation via the routine DLAED4 (as called by DLAED9). */
+/* This routine also calculates the eigenvectors of the current */
+/* problem. */
+
+/* The final stage consists of computing the updated eigenvectors */
+/* directly using the updated eigenvalues. The eigenvectors for */
+/* the current problem are multiplied with the eigenvectors from */
+/* the overall problem. */
+
+/* Arguments */
+/* ========= */
+
+/* ICOMPQ (input) INTEGER */
+/* = 0: Compute eigenvalues only. */
+/* = 1: Compute eigenvectors of original dense symmetric matrix */
+/* also. On entry, Q contains the orthogonal matrix used */
+/* to reduce the original matrix to tridiagonal form. */
+
+/* N (input) INTEGER */
+/* The dimension of the symmetric tridiagonal matrix. N >= 0. */
+
+/* QSIZ (input) INTEGER */
+/* The dimension of the orthogonal matrix used to reduce */
+/* the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1. */
+
+/* TLVLS (input) INTEGER */
+/* The total number of merging levels in the overall divide and */
+/* conquer tree. */
+
+/* CURLVL (input) INTEGER */
+/* The current level in the overall merge routine, */
+/* 0 <= CURLVL <= TLVLS. */
+
+/* CURPBM (input) INTEGER */
+/* The current problem in the current level in the overall */
+/* merge routine (counting from upper left to lower right). */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the eigenvalues of the rank-1-perturbed matrix. */
+/* On exit, the eigenvalues of the repaired matrix. */
+
+/* Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N) */
+/* On entry, the eigenvectors of the rank-1-perturbed matrix. */
+/* On exit, the eigenvectors of the repaired tridiagonal matrix. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N). */
+
+/* INDXQ (output) INTEGER array, dimension (N) */
+/* The permutation which will reintegrate the subproblem just */
+/* solved back into sorted order, i.e., D( INDXQ( I = 1, N ) ) */
+/* will be in ascending order. */
+
+/* RHO (input) DOUBLE PRECISION */
+/* The subdiagonal element used to create the rank-1 */
+/* modification. */
+
+/* CUTPNT (input) INTEGER */
+/* Contains the location of the last eigenvalue in the leading */
+/* sub-matrix. min(1,N) <= CUTPNT <= N. */
+
+/* QSTORE (input/output) DOUBLE PRECISION array, dimension (N**2+1) */
+/* Stores eigenvectors of submatrices encountered during */
+/* divide and conquer, packed together. QPTR points to */
+/* beginning of the submatrices. */
+
+/* QPTR (input/output) INTEGER array, dimension (N+2) */
+/* List of indices pointing to beginning of submatrices stored */
+/* in QSTORE. The submatrices are numbered starting at the */
+/* bottom left of the divide and conquer tree, from left to */
+/* right and bottom to top. */
+
+/* PRMPTR (input) INTEGER array, dimension (N lg N) */
+/* Contains a list of pointers which indicate where in PERM a */
+/* level's permutation is stored. PRMPTR(i+1) - PRMPTR(i) */
+/* indicates the size of the permutation and also the size of */
+/* the full, non-deflated problem. */
+
+/* PERM (input) INTEGER array, dimension (N lg N) */
+/* Contains the permutations (from deflation and sorting) to be */
+/* applied to each eigenblock. */
+
+/* GIVPTR (input) INTEGER array, dimension (N lg N) */
+/* Contains a list of pointers which indicate where in GIVCOL a */
+/* level's Givens rotations are stored. GIVPTR(i+1) - GIVPTR(i) */
+/* indicates the number of Givens rotations. */
+
+/* GIVCOL (input) INTEGER array, dimension (2, N lg N) */
+/* Each pair of numbers indicates a pair of columns to take place */
+/* in a Givens rotation. */
+
+/* GIVNUM (input) DOUBLE PRECISION array, dimension (2, N lg N) */
+/* Each number indicates the S value to be used in the */
+/* corresponding Givens rotation. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N+QSIZ*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (4*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, an eigenvalue did not converge */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Jeff Rutter, Computer Science Division, University of California */
+/* at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --indxq;
+ --qstore;
+ --qptr;
+ --prmptr;
+ --perm;
+ --givptr;
+ givcol -= 3;
+ givnum -= 3;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*icompq == 1 && *qsiz < *n) {
+ *info = -4;
+ } else if (*ldq < max(1,*n)) {
+ *info = -9;
+ } else if (min(1,*n) > *cutpnt || *n < *cutpnt) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAED7", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* The following values are for bookkeeping purposes only. They are */
+/* integer pointers which indicate the portion of the workspace */
+/* used by a particular array in DLAED8 and DLAED9. */
+
+ if (*icompq == 1) {
+ ldq2 = *qsiz;
+ } else {
+ ldq2 = *n;
+ }
+
+ iz = 1;
+ idlmda = iz + *n;
+ iw = idlmda + *n;
+ iq2 = iw + *n;
+ is = iq2 + *n * ldq2;
+
+ indx = 1;
+ indxc = indx + *n;
+ coltyp = indxc + *n;
+ indxp = coltyp + *n;
+
+/* Form the z-vector which consists of the last row of Q_1 and the */
+/* first row of Q_2. */
+
+ ptr = pow_ii(&c__2, tlvls) + 1;
+ i__1 = *curlvl - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *tlvls - i__;
+ ptr += pow_ii(&c__2, &i__2);
+/* L10: */
+ }
+ curr = ptr + *curpbm;
+ dlaeda_(n, tlvls, curlvl, curpbm, &prmptr[1], &perm[1], &givptr[1], &
+ givcol[3], &givnum[3], &qstore[1], &qptr[1], &work[iz], &work[iz
+ + *n], info);
+
+/* When solving the final problem, we no longer need the stored data, */
+/* so we will overwrite the data from this level onto the previously */
+/* used storage space. */
+
+ if (*curlvl == *tlvls) {
+ qptr[curr] = 1;
+ prmptr[curr] = 1;
+ givptr[curr] = 1;
+ }
+
+/* Sort and Deflate eigenvalues. */
+
+ dlaed8_(icompq, &k, n, qsiz, &d__[1], &q[q_offset], ldq, &indxq[1], rho,
+ cutpnt, &work[iz], &work[idlmda], &work[iq2], &ldq2, &work[iw], &
+ perm[prmptr[curr]], &givptr[curr + 1], &givcol[(givptr[curr] << 1)
+ + 1], &givnum[(givptr[curr] << 1) + 1], &iwork[indxp], &iwork[
+ indx], info);
+ prmptr[curr + 1] = prmptr[curr] + *n;
+ givptr[curr + 1] += givptr[curr];
+
+/* Solve Secular Equation. */
+
+ if (k != 0) {
+ dlaed9_(&k, &c__1, &k, n, &d__[1], &work[is], &k, rho, &work[idlmda],
+ &work[iw], &qstore[qptr[curr]], &k, info);
+ if (*info != 0) {
+ goto L30;
+ }
+ if (*icompq == 1) {
+ dgemm_("N", "N", qsiz, &k, &k, &c_b10, &work[iq2], &ldq2, &qstore[
+ qptr[curr]], &k, &c_b11, &q[q_offset], ldq);
+ }
+/* Computing 2nd power */
+ i__1 = k;
+ qptr[curr + 1] = qptr[curr] + i__1 * i__1;
+
+/* Prepare the INDXQ sorting permutation. */
+
+ n1 = k;
+ n2 = *n - k;
+ dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &indxq[1]);
+ } else {
+ qptr[curr + 1] = qptr[curr];
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ indxq[i__] = i__;
+/* L20: */
+ }
+ }
+
+L30:
+ return 0;
+
+/* End of DLAED7 */
+
+} /* dlaed7_ */
diff --git a/contrib/libs/clapack/dlaed8.c b/contrib/libs/clapack/dlaed8.c
new file mode 100644
index 0000000000..6c1656b300
--- /dev/null
+++ b/contrib/libs/clapack/dlaed8.c
@@ -0,0 +1,475 @@
+/* dlaed8.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b3 = -1.;
+static integer c__1 = 1;
+
+/* Subroutine */ int dlaed8_(integer *icompq, integer *k, integer *n, integer
+ *qsiz, doublereal *d__, doublereal *q, integer *ldq, integer *indxq,
+ doublereal *rho, integer *cutpnt, doublereal *z__, doublereal *dlamda,
+ doublereal *q2, integer *ldq2, doublereal *w, integer *perm, integer
+ *givptr, integer *givcol, doublereal *givnum, integer *indxp, integer
+ *indx, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, q2_dim1, q2_offset, i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal c__;
+ integer i__, j;
+ doublereal s, t;
+ integer k2, n1, n2, jp, n1p1;
+ doublereal eps, tau, tol;
+ integer jlam, imax, jmax;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *), dscal_(
+ integer *, doublereal *, doublereal *, integer *), dcopy_(integer
+ *, doublereal *, integer *, doublereal *, integer *);
+ extern doublereal dlapy2_(doublereal *, doublereal *), dlamch_(char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
+ integer *, integer *, integer *), dlacpy_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAED8 merges the two sets of eigenvalues together into a single */
+/* sorted set. Then it tries to deflate the size of the problem. */
+/* There are two ways in which deflation can occur: when two or more */
+/* eigenvalues are close together or if there is a tiny element in the */
+/* Z vector. For each such occurrence the order of the related secular */
+/* equation problem is reduced by one. */
+
+/* Arguments */
+/* ========= */
+
+/* ICOMPQ (input) INTEGER */
+/* = 0: Compute eigenvalues only. */
+/* = 1: Compute eigenvectors of original dense symmetric matrix */
+/* also. On entry, Q contains the orthogonal matrix used */
+/* to reduce the original matrix to tridiagonal form. */
+
+/* K (output) INTEGER */
+/* The number of non-deflated eigenvalues, and the order of the */
+/* related secular equation. */
+
+/* N (input) INTEGER */
+/* The dimension of the symmetric tridiagonal matrix. N >= 0. */
+
+/* QSIZ (input) INTEGER */
+/* The dimension of the orthogonal matrix used to reduce */
+/* the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the eigenvalues of the two submatrices to be */
+/* combined. On exit, the trailing (N-K) updated eigenvalues */
+/* (those which were deflated) sorted into increasing order. */
+
+/* Q (input/output) DOUBLE PRECISION array, dimension (LDQ,N) */
+/* If ICOMPQ = 0, Q is not referenced. Otherwise, */
+/* on entry, Q contains the eigenvectors of the partially solved */
+/* system which has been previously updated in matrix */
+/* multiplies with other partially solved eigensystems. */
+/* On exit, Q contains the trailing (N-K) updated eigenvectors */
+/* (those which were deflated) in its last N-K columns. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N). */
+
+/* INDXQ (input) INTEGER array, dimension (N) */
+/* The permutation which separately sorts the two sub-problems */
+/* in D into ascending order. Note that elements in the second */
+/* half of this permutation must first have CUTPNT added to */
+/* their values in order to be accurate. */
+
+/* RHO (input/output) DOUBLE PRECISION */
+/* On entry, the off-diagonal element associated with the rank-1 */
+/* cut which originally split the two submatrices which are now */
+/* being recombined. */
+/* On exit, RHO has been modified to the value required by */
+/* DLAED3. */
+
+/* CUTPNT (input) INTEGER */
+/* The location of the last eigenvalue in the leading */
+/* sub-matrix. min(1,N) <= CUTPNT <= N. */
+
+/* Z (input) DOUBLE PRECISION array, dimension (N) */
+/* On entry, Z contains the updating vector (the last row of */
+/* the first sub-eigenvector matrix and the first row of the */
+/* second sub-eigenvector matrix). */
+/* On exit, the contents of Z are destroyed by the updating */
+/* process. */
+
+/* DLAMDA (output) DOUBLE PRECISION array, dimension (N) */
+/* A copy of the first K eigenvalues which will be used by */
+/* DLAED3 to form the secular equation. */
+
+/* Q2 (output) DOUBLE PRECISION array, dimension (LDQ2,N) */
+/* If ICOMPQ = 0, Q2 is not referenced. Otherwise, */
+/* a copy of the first K eigenvectors which will be used by */
+/* DLAED7 in a matrix multiply (DGEMM) to update the new */
+/* eigenvectors. */
+
+/* LDQ2 (input) INTEGER */
+/* The leading dimension of the array Q2. LDQ2 >= max(1,N). */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* The first k values of the final deflation-altered z-vector and */
+/* will be passed to DLAED3. */
+
+/* PERM (output) INTEGER array, dimension (N) */
+/* The permutations (from deflation and sorting) to be applied */
+/* to each eigenblock. */
+
+/* GIVPTR (output) INTEGER */
+/* The number of Givens rotations which took place in this */
+/* subproblem. */
+
+/* GIVCOL (output) INTEGER array, dimension (2, N) */
+/* Each pair of numbers indicates a pair of columns to take place */
+/* in a Givens rotation. */
+
+/* GIVNUM (output) DOUBLE PRECISION array, dimension (2, N) */
+/* Each number indicates the S value to be used in the */
+/* corresponding Givens rotation. */
+
+/* INDXP (workspace) INTEGER array, dimension (N) */
+/* The permutation used to place deflated values of D at the end */
+/* of the array. INDXP(1:K) points to the nondeflated D-values */
+/* and INDXP(K+1:N) points to the deflated eigenvalues. */
+
+/* INDX (workspace) INTEGER array, dimension (N) */
+/* The permutation used to sort the contents of D into ascending */
+/* order. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Jeff Rutter, Computer Science Division, University of California */
+/* at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --indxq;
+ --z__;
+ --dlamda;
+ q2_dim1 = *ldq2;
+ q2_offset = 1 + q2_dim1;
+ q2 -= q2_offset;
+ --w;
+ --perm;
+ givcol -= 3;
+ givnum -= 3;
+ --indxp;
+ --indx;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*icompq == 1 && *qsiz < *n) {
+ *info = -4;
+ } else if (*ldq < max(1,*n)) {
+ *info = -7;
+ } else if (*cutpnt < min(1,*n) || *cutpnt > *n) {
+ *info = -10;
+ } else if (*ldq2 < max(1,*n)) {
+ *info = -14;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAED8", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ n1 = *cutpnt;
+ n2 = *n - n1;
+ n1p1 = n1 + 1;
+
+ if (*rho < 0.) {
+ dscal_(&n2, &c_b3, &z__[n1p1], &c__1);
+ }
+
+/* Normalize z so that norm(z) = 1 */
+
+ t = 1. / sqrt(2.);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ indx[j] = j;
+/* L10: */
+ }
+ dscal_(n, &t, &z__[1], &c__1);
+ *rho = (d__1 = *rho * 2., abs(d__1));
+
+/* Sort the eigenvalues into increasing order */
+
+ i__1 = *n;
+ for (i__ = *cutpnt + 1; i__ <= i__1; ++i__) {
+ indxq[i__] += *cutpnt;
+/* L20: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlamda[i__] = d__[indxq[i__]];
+ w[i__] = z__[indxq[i__]];
+/* L30: */
+ }
+ i__ = 1;
+ j = *cutpnt + 1;
+ dlamrg_(&n1, &n2, &dlamda[1], &c__1, &c__1, &indx[1]);
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = dlamda[indx[i__]];
+ z__[i__] = w[indx[i__]];
+/* L40: */
+ }
+
+/* Calculate the allowable deflation tolerence */
+
+ imax = idamax_(n, &z__[1], &c__1);
+ jmax = idamax_(n, &d__[1], &c__1);
+ eps = dlamch_("Epsilon");
+ tol = eps * 8. * (d__1 = d__[jmax], abs(d__1));
+
+/* If the rank-1 modifier is small enough, no more needs to be done */
+/* except to reorganize Q so that its columns correspond with the */
+/* elements in D. */
+
+ if (*rho * (d__1 = z__[imax], abs(d__1)) <= tol) {
+ *k = 0;
+ if (*icompq == 0) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ perm[j] = indxq[indx[j]];
+/* L50: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ perm[j] = indxq[indx[j]];
+ dcopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1
+ + 1], &c__1);
+/* L60: */
+ }
+ dlacpy_("A", qsiz, n, &q2[q2_dim1 + 1], ldq2, &q[q_dim1 + 1], ldq);
+ }
+ return 0;
+ }
+
+/* If there are multiple eigenvalues then the problem deflates. Here */
+/* the number of equal eigenvalues are found. As each equal */
+/* eigenvalue is found, an elementary reflector is computed to rotate */
+/* the corresponding eigensubspace so that the corresponding */
+/* components of Z are zero in this new basis. */
+
+ *k = 0;
+ *givptr = 0;
+ k2 = *n + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*rho * (d__1 = z__[j], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ indxp[k2] = j;
+ if (j == *n) {
+ goto L110;
+ }
+ } else {
+ jlam = j;
+ goto L80;
+ }
+/* L70: */
+ }
+L80:
+ ++j;
+ if (j > *n) {
+ goto L100;
+ }
+ if (*rho * (d__1 = z__[j], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ indxp[k2] = j;
+ } else {
+
+/* Check if eigenvalues are close enough to allow deflation. */
+
+ s = z__[jlam];
+ c__ = z__[j];
+
+/* Find sqrt(a**2+b**2) without overflow or */
+/* destructive underflow. */
+
+ tau = dlapy2_(&c__, &s);
+ t = d__[j] - d__[jlam];
+ c__ /= tau;
+ s = -s / tau;
+ if ((d__1 = t * c__ * s, abs(d__1)) <= tol) {
+
+/* Deflation is possible. */
+
+ z__[j] = tau;
+ z__[jlam] = 0.;
+
+/* Record the appropriate Givens rotation */
+
+ ++(*givptr);
+ givcol[(*givptr << 1) + 1] = indxq[indx[jlam]];
+ givcol[(*givptr << 1) + 2] = indxq[indx[j]];
+ givnum[(*givptr << 1) + 1] = c__;
+ givnum[(*givptr << 1) + 2] = s;
+ if (*icompq == 1) {
+ drot_(qsiz, &q[indxq[indx[jlam]] * q_dim1 + 1], &c__1, &q[
+ indxq[indx[j]] * q_dim1 + 1], &c__1, &c__, &s);
+ }
+ t = d__[jlam] * c__ * c__ + d__[j] * s * s;
+ d__[j] = d__[jlam] * s * s + d__[j] * c__ * c__;
+ d__[jlam] = t;
+ --k2;
+ i__ = 1;
+L90:
+ if (k2 + i__ <= *n) {
+ if (d__[jlam] < d__[indxp[k2 + i__]]) {
+ indxp[k2 + i__ - 1] = indxp[k2 + i__];
+ indxp[k2 + i__] = jlam;
+ ++i__;
+ goto L90;
+ } else {
+ indxp[k2 + i__ - 1] = jlam;
+ }
+ } else {
+ indxp[k2 + i__ - 1] = jlam;
+ }
+ jlam = j;
+ } else {
+ ++(*k);
+ w[*k] = z__[jlam];
+ dlamda[*k] = d__[jlam];
+ indxp[*k] = jlam;
+ jlam = j;
+ }
+ }
+ goto L80;
+L100:
+
+/* Record the last eigenvalue. */
+
+ ++(*k);
+ w[*k] = z__[jlam];
+ dlamda[*k] = d__[jlam];
+ indxp[*k] = jlam;
+
+L110:
+
+/* Sort the eigenvalues and corresponding eigenvectors into DLAMDA */
+/* and Q2 respectively. The eigenvalues/vectors which were not */
+/* deflated go into the first K slots of DLAMDA and Q2 respectively, */
+/* while those which were deflated go into the last N - K slots. */
+
+ if (*icompq == 0) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jp = indxp[j];
+ dlamda[j] = d__[jp];
+ perm[j] = indxq[indx[jp]];
+/* L120: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jp = indxp[j];
+ dlamda[j] = d__[jp];
+ perm[j] = indxq[indx[jp]];
+ dcopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 + 1]
+, &c__1);
+/* L130: */
+ }
+ }
+
+/* The deflated eigenvalues and their corresponding vectors go back */
+/* into the last N - K slots of D and Q respectively. */
+
+ if (*k < *n) {
+ if (*icompq == 0) {
+ i__1 = *n - *k;
+ dcopy_(&i__1, &dlamda[*k + 1], &c__1, &d__[*k + 1], &c__1);
+ } else {
+ i__1 = *n - *k;
+ dcopy_(&i__1, &dlamda[*k + 1], &c__1, &d__[*k + 1], &c__1);
+ i__1 = *n - *k;
+ dlacpy_("A", qsiz, &i__1, &q2[(*k + 1) * q2_dim1 + 1], ldq2, &q[(*
+ k + 1) * q_dim1 + 1], ldq);
+ }
+ }
+
+ return 0;
+
+/* End of DLAED8 */
+
+} /* dlaed8_ */
diff --git a/contrib/libs/clapack/dlaed9.c b/contrib/libs/clapack/dlaed9.c
new file mode 100644
index 0000000000..25b3466e8f
--- /dev/null
+++ b/contrib/libs/clapack/dlaed9.c
@@ -0,0 +1,274 @@
+/* dlaed9.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dlaed9_(integer *k, integer *kstart, integer *kstop,
+ integer *n, doublereal *d__, doublereal *q, integer *ldq, doublereal *
+ rho, doublereal *dlamda, doublereal *w, doublereal *s, integer *lds,
+ integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, s_dim1, s_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal temp;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dlaed4_(integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *);
+ extern doublereal dlamc3_(doublereal *, doublereal *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAED9 finds the roots of the secular equation, as defined by the */
+/* values in D, Z, and RHO, between KSTART and KSTOP. It makes the */
+/* appropriate calls to DLAED4 and then stores the new matrix of */
+/* eigenvectors for use in calculating the next level of Z vectors. */
+
+/* Arguments */
+/* ========= */
+
+/* K (input) INTEGER */
+/* The number of terms in the rational function to be solved by */
+/* DLAED4. K >= 0. */
+
+/* KSTART (input) INTEGER */
+/* KSTOP (input) INTEGER */
+/* The updated eigenvalues Lambda(I), KSTART <= I <= KSTOP */
+/* are to be computed. 1 <= KSTART <= KSTOP <= K. */
+
+/* N (input) INTEGER */
+/* The number of rows and columns in the Q matrix. */
+/* N >= K (delation may result in N > K). */
+
+/* D (output) DOUBLE PRECISION array, dimension (N) */
+/* D(I) contains the updated eigenvalues */
+/* for KSTART <= I <= KSTOP. */
+
+/* Q (workspace) DOUBLE PRECISION array, dimension (LDQ,N) */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max( 1, N ). */
+
+/* RHO (input) DOUBLE PRECISION */
+/* The value of the parameter in the rank one update equation. */
+/* RHO >= 0 required. */
+
+/* DLAMDA (input) DOUBLE PRECISION array, dimension (K) */
+/* The first K elements of this array contain the old roots */
+/* of the deflated updating problem. These are the poles */
+/* of the secular equation. */
+
+/* W (input) DOUBLE PRECISION array, dimension (K) */
+/* The first K elements of this array contain the components */
+/* of the deflation-adjusted updating vector. */
+
+/* S (output) DOUBLE PRECISION array, dimension (LDS, K) */
+/* Will contain the eigenvectors of the repaired matrix which */
+/* will be stored for subsequent Z vector calculation and */
+/* multiplied by the previously accumulated eigenvectors */
+/* to update the system. */
+
+/* LDS (input) INTEGER */
+/* The leading dimension of S. LDS >= max( 1, K ). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, an eigenvalue did not converge */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Jeff Rutter, Computer Science Division, University of California */
+/* at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --dlamda;
+ --w;
+ s_dim1 = *lds;
+ s_offset = 1 + s_dim1;
+ s -= s_offset;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*k < 0) {
+ *info = -1;
+ } else if (*kstart < 1 || *kstart > max(1,*k)) {
+ *info = -2;
+ } else if (max(1,*kstop) < *kstart || *kstop > max(1,*k)) {
+ *info = -3;
+ } else if (*n < *k) {
+ *info = -4;
+ } else if (*ldq < max(1,*k)) {
+ *info = -7;
+ } else if (*lds < max(1,*k)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAED9", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*k == 0) {
+ return 0;
+ }
+
+/* Modify values DLAMDA(i) to make sure all DLAMDA(i)-DLAMDA(j) can */
+/* be computed with high relative accuracy (barring over/underflow). */
+/* This is a problem on machines without a guard digit in */
+/* add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2). */
+/* The following code replaces DLAMDA(I) by 2*DLAMDA(I)-DLAMDA(I), */
+/* which on any of these machines zeros out the bottommost */
+/* bit of DLAMDA(I) if it is 1; this makes the subsequent */
+/* subtractions DLAMDA(I)-DLAMDA(J) unproblematic when cancellation */
+/* occurs. On binary machines with a guard digit (almost all */
+/* machines) it does not change DLAMDA(I) at all. On hexadecimal */
+/* and decimal machines with a guard digit, it slightly */
+/* changes the bottommost bits of DLAMDA(I). It does not account */
+/* for hexadecimal or decimal machines without guard digits */
+/* (we know of none). We use a subroutine call to compute */
+/* 2*DLAMBDA(I) to prevent optimizing compilers from eliminating */
+/* this code. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlamda[i__] = dlamc3_(&dlamda[i__], &dlamda[i__]) - dlamda[i__];
+/* L10: */
+ }
+
+ i__1 = *kstop;
+ for (j = *kstart; j <= i__1; ++j) {
+ dlaed4_(k, &j, &dlamda[1], &w[1], &q[j * q_dim1 + 1], rho, &d__[j],
+ info);
+
+/* If the zero finder fails, the computation is terminated. */
+
+ if (*info != 0) {
+ goto L120;
+ }
+/* L20: */
+ }
+
+ if (*k == 1 || *k == 2) {
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *k;
+ for (j = 1; j <= i__2; ++j) {
+ s[j + i__ * s_dim1] = q[j + i__ * q_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ goto L120;
+ }
+
+/* Compute updated W. */
+
+ dcopy_(k, &w[1], &c__1, &s[s_offset], &c__1);
+
+/* Initialize W(I) = Q(I,I) */
+
+ i__1 = *ldq + 1;
+ dcopy_(k, &q[q_offset], &i__1, &w[1], &c__1);
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]);
+/* L50: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]);
+/* L60: */
+ }
+/* L70: */
+ }
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__1 = sqrt(-w[i__]);
+ w[i__] = d_sign(&d__1, &s[i__ + s_dim1]);
+/* L80: */
+ }
+
+/* Compute eigenvectors of the modified rank-1 modification. */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *k;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ q[i__ + j * q_dim1] = w[i__] / q[i__ + j * q_dim1];
+/* L90: */
+ }
+ temp = dnrm2_(k, &q[j * q_dim1 + 1], &c__1);
+ i__2 = *k;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ s[i__ + j * s_dim1] = q[i__ + j * q_dim1] / temp;
+/* L100: */
+ }
+/* L110: */
+ }
+
+L120:
+ return 0;
+
+/* End of DLAED9 */
+
+} /* dlaed9_ */
diff --git a/contrib/libs/clapack/dlaeda.c b/contrib/libs/clapack/dlaeda.c
new file mode 100644
index 0000000000..f9d8536bea
--- /dev/null
+++ b/contrib/libs/clapack/dlaeda.c
@@ -0,0 +1,287 @@
+/* dlaeda.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__2 = 2;
+static integer c__1 = 1;
+static doublereal c_b24 = 1.;
+static doublereal c_b26 = 0.;
+
+/* Subroutine */ int dlaeda_(integer *n, integer *tlvls, integer *curlvl,
+ integer *curpbm, integer *prmptr, integer *perm, integer *givptr,
+ integer *givcol, doublereal *givnum, doublereal *q, integer *qptr,
+ doublereal *z__, doublereal *ztemp, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, k, mid, ptr;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ integer curr, bsiz1, bsiz2, psiz1, psiz2, zptr1;
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), dcopy_(integer *,
+ doublereal *, integer *, doublereal *, integer *), xerbla_(char *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAEDA computes the Z vector corresponding to the merge step in the */
+/* CURLVLth step of the merge process with TLVLS steps for the CURPBMth */
+/* problem. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The dimension of the symmetric tridiagonal matrix. N >= 0. */
+
+/* TLVLS (input) INTEGER */
+/* The total number of merging levels in the overall divide and */
+/* conquer tree. */
+
+/* CURLVL (input) INTEGER */
+/* The current level in the overall merge routine, */
+/* 0 <= curlvl <= tlvls. */
+
+/* CURPBM (input) INTEGER */
+/* The current problem in the current level in the overall */
+/* merge routine (counting from upper left to lower right). */
+
+/* PRMPTR (input) INTEGER array, dimension (N lg N) */
+/* Contains a list of pointers which indicate where in PERM a */
+/* level's permutation is stored. PRMPTR(i+1) - PRMPTR(i) */
+/* indicates the size of the permutation and incidentally the */
+/* size of the full, non-deflated problem. */
+
+/* PERM (input) INTEGER array, dimension (N lg N) */
+/* Contains the permutations (from deflation and sorting) to be */
+/* applied to each eigenblock. */
+
+/* GIVPTR (input) INTEGER array, dimension (N lg N) */
+/* Contains a list of pointers which indicate where in GIVCOL a */
+/* level's Givens rotations are stored. GIVPTR(i+1) - GIVPTR(i) */
+/* indicates the number of Givens rotations. */
+
+/* GIVCOL (input) INTEGER array, dimension (2, N lg N) */
+/* Each pair of numbers indicates a pair of columns to take place */
+/* in a Givens rotation. */
+
+/* GIVNUM (input) DOUBLE PRECISION array, dimension (2, N lg N) */
+/* Each number indicates the S value to be used in the */
+/* corresponding Givens rotation. */
+
+/* Q (input) DOUBLE PRECISION array, dimension (N**2) */
+/* Contains the square eigenblocks from previous levels, the */
+/* starting positions for blocks are given by QPTR. */
+
+/* QPTR (input) INTEGER array, dimension (N+2) */
+/* Contains a list of pointers which indicate where in Q an */
+/* eigenblock is stored. SQRT( QPTR(i+1) - QPTR(i) ) indicates */
+/* the size of the block. */
+
+/* Z (output) DOUBLE PRECISION array, dimension (N) */
+/* On output this vector contains the updating vector (the last */
+/* row of the first sub-eigenvector matrix and the first row of */
+/* the second sub-eigenvector matrix). */
+
+/* ZTEMP (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Jeff Rutter, Computer Science Division, University of California */
+/* at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ztemp;
+ --z__;
+ --qptr;
+ --q;
+ givnum -= 3;
+ givcol -= 3;
+ --givptr;
+ --perm;
+ --prmptr;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -1;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAEDA", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine location of first number in second half. */
+
+ mid = *n / 2 + 1;
+
+/* Gather last/first rows of appropriate eigenblocks into center of Z */
+
+ ptr = 1;
+
+/* Determine location of lowest level subproblem in the full storage */
+/* scheme */
+
+ i__1 = *curlvl - 1;
+ curr = ptr + *curpbm * pow_ii(&c__2, curlvl) + pow_ii(&c__2, &i__1) - 1;
+
+/* Determine size of these matrices. We add HALF to the value of */
+/* the SQRT in case the machine underestimates one of these square */
+/* roots. */
+
+ bsiz1 = (integer) (sqrt((doublereal) (qptr[curr + 1] - qptr[curr])) + .5);
+ bsiz2 = (integer) (sqrt((doublereal) (qptr[curr + 2] - qptr[curr + 1])) +
+ .5);
+ i__1 = mid - bsiz1 - 1;
+ for (k = 1; k <= i__1; ++k) {
+ z__[k] = 0.;
+/* L10: */
+ }
+ dcopy_(&bsiz1, &q[qptr[curr] + bsiz1 - 1], &bsiz1, &z__[mid - bsiz1], &
+ c__1);
+ dcopy_(&bsiz2, &q[qptr[curr + 1]], &bsiz2, &z__[mid], &c__1);
+ i__1 = *n;
+ for (k = mid + bsiz2; k <= i__1; ++k) {
+ z__[k] = 0.;
+/* L20: */
+ }
+
+/* Loop thru remaining levels 1 -> CURLVL applying the Givens */
+/* rotations and permutation and then multiplying the center matrices */
+/* against the current Z. */
+
+ ptr = pow_ii(&c__2, tlvls) + 1;
+ i__1 = *curlvl - 1;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = *curlvl - k;
+ i__3 = *curlvl - k - 1;
+ curr = ptr + *curpbm * pow_ii(&c__2, &i__2) + pow_ii(&c__2, &i__3) -
+ 1;
+ psiz1 = prmptr[curr + 1] - prmptr[curr];
+ psiz2 = prmptr[curr + 2] - prmptr[curr + 1];
+ zptr1 = mid - psiz1;
+
+/* Apply Givens at CURR and CURR+1 */
+
+ i__2 = givptr[curr + 1] - 1;
+ for (i__ = givptr[curr]; i__ <= i__2; ++i__) {
+ drot_(&c__1, &z__[zptr1 + givcol[(i__ << 1) + 1] - 1], &c__1, &
+ z__[zptr1 + givcol[(i__ << 1) + 2] - 1], &c__1, &givnum[(
+ i__ << 1) + 1], &givnum[(i__ << 1) + 2]);
+/* L30: */
+ }
+ i__2 = givptr[curr + 2] - 1;
+ for (i__ = givptr[curr + 1]; i__ <= i__2; ++i__) {
+ drot_(&c__1, &z__[mid - 1 + givcol[(i__ << 1) + 1]], &c__1, &z__[
+ mid - 1 + givcol[(i__ << 1) + 2]], &c__1, &givnum[(i__ <<
+ 1) + 1], &givnum[(i__ << 1) + 2]);
+/* L40: */
+ }
+ psiz1 = prmptr[curr + 1] - prmptr[curr];
+ psiz2 = prmptr[curr + 2] - prmptr[curr + 1];
+ i__2 = psiz1 - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ ztemp[i__ + 1] = z__[zptr1 + perm[prmptr[curr] + i__] - 1];
+/* L50: */
+ }
+ i__2 = psiz2 - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ ztemp[psiz1 + i__ + 1] = z__[mid + perm[prmptr[curr + 1] + i__] -
+ 1];
+/* L60: */
+ }
+
+/* Multiply Blocks at CURR and CURR+1 */
+
+/* Determine size of these matrices. We add HALF to the value of */
+/* the SQRT in case the machine underestimates one of these */
+/* square roots. */
+
+ bsiz1 = (integer) (sqrt((doublereal) (qptr[curr + 1] - qptr[curr])) +
+ .5);
+ bsiz2 = (integer) (sqrt((doublereal) (qptr[curr + 2] - qptr[curr + 1])
+ ) + .5);
+ if (bsiz1 > 0) {
+ dgemv_("T", &bsiz1, &bsiz1, &c_b24, &q[qptr[curr]], &bsiz1, &
+ ztemp[1], &c__1, &c_b26, &z__[zptr1], &c__1);
+ }
+ i__2 = psiz1 - bsiz1;
+ dcopy_(&i__2, &ztemp[bsiz1 + 1], &c__1, &z__[zptr1 + bsiz1], &c__1);
+ if (bsiz2 > 0) {
+ dgemv_("T", &bsiz2, &bsiz2, &c_b24, &q[qptr[curr + 1]], &bsiz2, &
+ ztemp[psiz1 + 1], &c__1, &c_b26, &z__[mid], &c__1);
+ }
+ i__2 = psiz2 - bsiz2;
+ dcopy_(&i__2, &ztemp[psiz1 + bsiz2 + 1], &c__1, &z__[mid + bsiz2], &
+ c__1);
+
+ i__2 = *tlvls - k;
+ ptr += pow_ii(&c__2, &i__2);
+/* L70: */
+ }
+
+ return 0;
+
+/* End of DLAEDA */
+
+} /* dlaeda_ */
diff --git a/contrib/libs/clapack/dlaein.c b/contrib/libs/clapack/dlaein.c
new file mode 100644
index 0000000000..133171ef4e
--- /dev/null
+++ b/contrib/libs/clapack/dlaein.c
@@ -0,0 +1,677 @@
+/* dlaein.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dlaein_(logical *rightv, logical *noinit, integer *n,
+ doublereal *h__, integer *ldh, doublereal *wr, doublereal *wi,
+ doublereal *vr, doublereal *vi, doublereal *b, integer *ldb,
+ doublereal *work, doublereal *eps3, doublereal *smlnum, doublereal *
+ bignum, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, h_dim1, h_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal w, x, y;
+ integer i1, i2, i3;
+ doublereal w1, ei, ej, xi, xr, rec;
+ integer its, ierr;
+ doublereal temp, norm, vmax;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal scale;
+ extern doublereal dasum_(integer *, doublereal *, integer *);
+ char trans[1];
+ doublereal vcrit, rootn, vnorm;
+ extern doublereal dlapy2_(doublereal *, doublereal *);
+ doublereal absbii, absbjj;
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dladiv_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *), dlatrs_(
+ char *, char *, char *, char *, integer *, doublereal *, integer *
+, doublereal *, doublereal *, doublereal *, integer *);
+ char normin[1];
+ doublereal nrmsml, growto;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAEIN uses inverse iteration to find a right or left eigenvector */
+/* corresponding to the eigenvalue (WR,WI) of a real upper Hessenberg */
+/* matrix H. */
+
+/* Arguments */
+/* ========= */
+
+/* RIGHTV (input) LOGICAL */
+/* = .TRUE. : compute right eigenvector; */
+/* = .FALSE.: compute left eigenvector. */
+
+/* NOINIT (input) LOGICAL */
+/* = .TRUE. : no initial vector supplied in (VR,VI). */
+/* = .FALSE.: initial vector supplied in (VR,VI). */
+
+/* N (input) INTEGER */
+/* The order of the matrix H. N >= 0. */
+
+/* H (input) DOUBLE PRECISION array, dimension (LDH,N) */
+/* The upper Hessenberg matrix H. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH >= max(1,N). */
+
+/* WR (input) DOUBLE PRECISION */
+/* WI (input) DOUBLE PRECISION */
+/* The real and imaginary parts of the eigenvalue of H whose */
+/* corresponding right or left eigenvector is to be computed. */
+
+/* VR (input/output) DOUBLE PRECISION array, dimension (N) */
+/* VI (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, if NOINIT = .FALSE. and WI = 0.0, VR must contain */
+/* a real starting vector for inverse iteration using the real */
+/* eigenvalue WR; if NOINIT = .FALSE. and WI.ne.0.0, VR and VI */
+/* must contain the real and imaginary parts of a complex */
+/* starting vector for inverse iteration using the complex */
+/* eigenvalue (WR,WI); otherwise VR and VI need not be set. */
+/* On exit, if WI = 0.0 (real eigenvalue), VR contains the */
+/* computed real eigenvector; if WI.ne.0.0 (complex eigenvalue), */
+/* VR and VI contain the real and imaginary parts of the */
+/* computed complex eigenvector. The eigenvector is normalized */
+/* so that the component of largest magnitude has magnitude 1; */
+/* here the magnitude of a complex number (x,y) is taken to be */
+/* |x| + |y|. */
+/* VI is not referenced if WI = 0.0. */
+
+/* B (workspace) DOUBLE PRECISION array, dimension (LDB,N) */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= N+1. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* EPS3 (input) DOUBLE PRECISION */
+/* A small machine-dependent value which is used to perturb */
+/* close eigenvalues, and to replace zero pivots. */
+
+/* SMLNUM (input) DOUBLE PRECISION */
+/* A machine-dependent value close to the underflow threshold. */
+
+/* BIGNUM (input) DOUBLE PRECISION */
+/* A machine-dependent value close to the overflow threshold. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* = 1: inverse iteration did not converge; VR is set to the */
+/* last iterate, and so is VI if WI.ne.0.0. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --vr;
+ --vi;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+/* GROWTO is the threshold used in the acceptance test for an */
+/* eigenvector. */
+
+ rootn = sqrt((doublereal) (*n));
+ growto = .1 / rootn;
+/* Computing MAX */
+ d__1 = 1., d__2 = *eps3 * rootn;
+ nrmsml = max(d__1,d__2) * *smlnum;
+
+/* Form B = H - (WR,WI)*I (except that the subdiagonal elements and */
+/* the imaginary parts of the diagonal elements are not stored). */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = h__[i__ + j * h_dim1];
+/* L10: */
+ }
+ b[j + j * b_dim1] = h__[j + j * h_dim1] - *wr;
+/* L20: */
+ }
+
+ if (*wi == 0.) {
+
+/* Real eigenvalue. */
+
+ if (*noinit) {
+
+/* Set initial vector. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ vr[i__] = *eps3;
+/* L30: */
+ }
+ } else {
+
+/* Scale supplied initial vector. */
+
+ vnorm = dnrm2_(n, &vr[1], &c__1);
+ d__1 = *eps3 * rootn / max(vnorm,nrmsml);
+ dscal_(n, &d__1, &vr[1], &c__1);
+ }
+
+ if (*rightv) {
+
+/* LU decomposition with partial pivoting of B, replacing zero */
+/* pivots by EPS3. */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ei = h__[i__ + 1 + i__ * h_dim1];
+ if ((d__1 = b[i__ + i__ * b_dim1], abs(d__1)) < abs(ei)) {
+
+/* Interchange rows and eliminate. */
+
+ x = b[i__ + i__ * b_dim1] / ei;
+ b[i__ + i__ * b_dim1] = ei;
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ temp = b[i__ + 1 + j * b_dim1];
+ b[i__ + 1 + j * b_dim1] = b[i__ + j * b_dim1] - x *
+ temp;
+ b[i__ + j * b_dim1] = temp;
+/* L40: */
+ }
+ } else {
+
+/* Eliminate without interchange. */
+
+ if (b[i__ + i__ * b_dim1] == 0.) {
+ b[i__ + i__ * b_dim1] = *eps3;
+ }
+ x = ei / b[i__ + i__ * b_dim1];
+ if (x != 0.) {
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ b[i__ + 1 + j * b_dim1] -= x * b[i__ + j * b_dim1]
+ ;
+/* L50: */
+ }
+ }
+ }
+/* L60: */
+ }
+ if (b[*n + *n * b_dim1] == 0.) {
+ b[*n + *n * b_dim1] = *eps3;
+ }
+
+ *(unsigned char *)trans = 'N';
+
+ } else {
+
+/* UL decomposition with partial pivoting of B, replacing zero */
+/* pivots by EPS3. */
+
+ for (j = *n; j >= 2; --j) {
+ ej = h__[j + (j - 1) * h_dim1];
+ if ((d__1 = b[j + j * b_dim1], abs(d__1)) < abs(ej)) {
+
+/* Interchange columns and eliminate. */
+
+ x = b[j + j * b_dim1] / ej;
+ b[j + j * b_dim1] = ej;
+ i__1 = j - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = b[i__ + (j - 1) * b_dim1];
+ b[i__ + (j - 1) * b_dim1] = b[i__ + j * b_dim1] - x *
+ temp;
+ b[i__ + j * b_dim1] = temp;
+/* L70: */
+ }
+ } else {
+
+/* Eliminate without interchange. */
+
+ if (b[j + j * b_dim1] == 0.) {
+ b[j + j * b_dim1] = *eps3;
+ }
+ x = ej / b[j + j * b_dim1];
+ if (x != 0.) {
+ i__1 = j - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + (j - 1) * b_dim1] -= x * b[i__ + j *
+ b_dim1];
+/* L80: */
+ }
+ }
+ }
+/* L90: */
+ }
+ if (b[b_dim1 + 1] == 0.) {
+ b[b_dim1 + 1] = *eps3;
+ }
+
+ *(unsigned char *)trans = 'T';
+
+ }
+
+ *(unsigned char *)normin = 'N';
+ i__1 = *n;
+ for (its = 1; its <= i__1; ++its) {
+
+/* Solve U*x = scale*v for a right eigenvector */
+/* or U'*x = scale*v for a left eigenvector, */
+/* overwriting x on v. */
+
+ dlatrs_("Upper", trans, "Nonunit", normin, n, &b[b_offset], ldb, &
+ vr[1], &scale, &work[1], &ierr);
+ *(unsigned char *)normin = 'Y';
+
+/* Test for sufficient growth in the norm of v. */
+
+ vnorm = dasum_(n, &vr[1], &c__1);
+ if (vnorm >= growto * scale) {
+ goto L120;
+ }
+
+/* Choose new orthogonal starting vector and try again. */
+
+ temp = *eps3 / (rootn + 1.);
+ vr[1] = *eps3;
+ i__2 = *n;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ vr[i__] = temp;
+/* L100: */
+ }
+ vr[*n - its + 1] -= *eps3 * rootn;
+/* L110: */
+ }
+
+/* Failure to find eigenvector in N iterations. */
+
+ *info = 1;
+
+L120:
+
+/* Normalize eigenvector. */
+
+ i__ = idamax_(n, &vr[1], &c__1);
+ d__2 = 1. / (d__1 = vr[i__], abs(d__1));
+ dscal_(n, &d__2, &vr[1], &c__1);
+ } else {
+
+/* Complex eigenvalue. */
+
+ if (*noinit) {
+
+/* Set initial vector. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ vr[i__] = *eps3;
+ vi[i__] = 0.;
+/* L130: */
+ }
+ } else {
+
+/* Scale supplied initial vector. */
+
+ d__1 = dnrm2_(n, &vr[1], &c__1);
+ d__2 = dnrm2_(n, &vi[1], &c__1);
+ norm = dlapy2_(&d__1, &d__2);
+ rec = *eps3 * rootn / max(norm,nrmsml);
+ dscal_(n, &rec, &vr[1], &c__1);
+ dscal_(n, &rec, &vi[1], &c__1);
+ }
+
+ if (*rightv) {
+
+/* LU decomposition with partial pivoting of B, replacing zero */
+/* pivots by EPS3. */
+
+/* The imaginary part of the (i,j)-th element of U is stored in */
+/* B(j+1,i). */
+
+ b[b_dim1 + 2] = -(*wi);
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ b[i__ + 1 + b_dim1] = 0.;
+/* L140: */
+ }
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ absbii = dlapy2_(&b[i__ + i__ * b_dim1], &b[i__ + 1 + i__ *
+ b_dim1]);
+ ei = h__[i__ + 1 + i__ * h_dim1];
+ if (absbii < abs(ei)) {
+
+/* Interchange rows and eliminate. */
+
+ xr = b[i__ + i__ * b_dim1] / ei;
+ xi = b[i__ + 1 + i__ * b_dim1] / ei;
+ b[i__ + i__ * b_dim1] = ei;
+ b[i__ + 1 + i__ * b_dim1] = 0.;
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ temp = b[i__ + 1 + j * b_dim1];
+ b[i__ + 1 + j * b_dim1] = b[i__ + j * b_dim1] - xr *
+ temp;
+ b[j + 1 + (i__ + 1) * b_dim1] = b[j + 1 + i__ *
+ b_dim1] - xi * temp;
+ b[i__ + j * b_dim1] = temp;
+ b[j + 1 + i__ * b_dim1] = 0.;
+/* L150: */
+ }
+ b[i__ + 2 + i__ * b_dim1] = -(*wi);
+ b[i__ + 1 + (i__ + 1) * b_dim1] -= xi * *wi;
+ b[i__ + 2 + (i__ + 1) * b_dim1] += xr * *wi;
+ } else {
+
+/* Eliminate without interchanging rows. */
+
+ if (absbii == 0.) {
+ b[i__ + i__ * b_dim1] = *eps3;
+ b[i__ + 1 + i__ * b_dim1] = 0.;
+ absbii = *eps3;
+ }
+ ei = ei / absbii / absbii;
+ xr = b[i__ + i__ * b_dim1] * ei;
+ xi = -b[i__ + 1 + i__ * b_dim1] * ei;
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ b[i__ + 1 + j * b_dim1] = b[i__ + 1 + j * b_dim1] -
+ xr * b[i__ + j * b_dim1] + xi * b[j + 1 + i__
+ * b_dim1];
+ b[j + 1 + (i__ + 1) * b_dim1] = -xr * b[j + 1 + i__ *
+ b_dim1] - xi * b[i__ + j * b_dim1];
+/* L160: */
+ }
+ b[i__ + 2 + (i__ + 1) * b_dim1] -= *wi;
+ }
+
+/* Compute 1-norm of offdiagonal elements of i-th row. */
+
+ i__2 = *n - i__;
+ i__3 = *n - i__;
+ work[i__] = dasum_(&i__2, &b[i__ + (i__ + 1) * b_dim1], ldb)
+ + dasum_(&i__3, &b[i__ + 2 + i__ * b_dim1], &c__1);
+/* L170: */
+ }
+ if (b[*n + *n * b_dim1] == 0. && b[*n + 1 + *n * b_dim1] == 0.) {
+ b[*n + *n * b_dim1] = *eps3;
+ }
+ work[*n] = 0.;
+
+ i1 = *n;
+ i2 = 1;
+ i3 = -1;
+ } else {
+
+/* UL decomposition with partial pivoting of conjg(B), */
+/* replacing zero pivots by EPS3. */
+
+/* The imaginary part of the (i,j)-th element of U is stored in */
+/* B(j+1,i). */
+
+ b[*n + 1 + *n * b_dim1] = *wi;
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ b[*n + 1 + j * b_dim1] = 0.;
+/* L180: */
+ }
+
+ for (j = *n; j >= 2; --j) {
+ ej = h__[j + (j - 1) * h_dim1];
+ absbjj = dlapy2_(&b[j + j * b_dim1], &b[j + 1 + j * b_dim1]);
+ if (absbjj < abs(ej)) {
+
+/* Interchange columns and eliminate */
+
+ xr = b[j + j * b_dim1] / ej;
+ xi = b[j + 1 + j * b_dim1] / ej;
+ b[j + j * b_dim1] = ej;
+ b[j + 1 + j * b_dim1] = 0.;
+ i__1 = j - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = b[i__ + (j - 1) * b_dim1];
+ b[i__ + (j - 1) * b_dim1] = b[i__ + j * b_dim1] - xr *
+ temp;
+ b[j + i__ * b_dim1] = b[j + 1 + i__ * b_dim1] - xi *
+ temp;
+ b[i__ + j * b_dim1] = temp;
+ b[j + 1 + i__ * b_dim1] = 0.;
+/* L190: */
+ }
+ b[j + 1 + (j - 1) * b_dim1] = *wi;
+ b[j - 1 + (j - 1) * b_dim1] += xi * *wi;
+ b[j + (j - 1) * b_dim1] -= xr * *wi;
+ } else {
+
+/* Eliminate without interchange. */
+
+ if (absbjj == 0.) {
+ b[j + j * b_dim1] = *eps3;
+ b[j + 1 + j * b_dim1] = 0.;
+ absbjj = *eps3;
+ }
+ ej = ej / absbjj / absbjj;
+ xr = b[j + j * b_dim1] * ej;
+ xi = -b[j + 1 + j * b_dim1] * ej;
+ i__1 = j - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + (j - 1) * b_dim1] = b[i__ + (j - 1) * b_dim1]
+ - xr * b[i__ + j * b_dim1] + xi * b[j + 1 +
+ i__ * b_dim1];
+ b[j + i__ * b_dim1] = -xr * b[j + 1 + i__ * b_dim1] -
+ xi * b[i__ + j * b_dim1];
+/* L200: */
+ }
+ b[j + (j - 1) * b_dim1] += *wi;
+ }
+
+/* Compute 1-norm of offdiagonal elements of j-th column. */
+
+ i__1 = j - 1;
+ i__2 = j - 1;
+ work[j] = dasum_(&i__1, &b[j * b_dim1 + 1], &c__1) + dasum_(&
+ i__2, &b[j + 1 + b_dim1], ldb);
+/* L210: */
+ }
+ if (b[b_dim1 + 1] == 0. && b[b_dim1 + 2] == 0.) {
+ b[b_dim1 + 1] = *eps3;
+ }
+ work[1] = 0.;
+
+ i1 = 1;
+ i2 = *n;
+ i3 = 1;
+ }
+
+ i__1 = *n;
+ for (its = 1; its <= i__1; ++its) {
+ scale = 1.;
+ vmax = 1.;
+ vcrit = *bignum;
+
+/* Solve U*(xr,xi) = scale*(vr,vi) for a right eigenvector, */
+/* or U'*(xr,xi) = scale*(vr,vi) for a left eigenvector, */
+/* overwriting (xr,xi) on (vr,vi). */
+
+ i__2 = i2;
+ i__3 = i3;
+ for (i__ = i1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__3)
+ {
+
+ if (work[i__] > vcrit) {
+ rec = 1. / vmax;
+ dscal_(n, &rec, &vr[1], &c__1);
+ dscal_(n, &rec, &vi[1], &c__1);
+ scale *= rec;
+ vmax = 1.;
+ vcrit = *bignum;
+ }
+
+ xr = vr[i__];
+ xi = vi[i__];
+ if (*rightv) {
+ i__4 = *n;
+ for (j = i__ + 1; j <= i__4; ++j) {
+ xr = xr - b[i__ + j * b_dim1] * vr[j] + b[j + 1 + i__
+ * b_dim1] * vi[j];
+ xi = xi - b[i__ + j * b_dim1] * vi[j] - b[j + 1 + i__
+ * b_dim1] * vr[j];
+/* L220: */
+ }
+ } else {
+ i__4 = i__ - 1;
+ for (j = 1; j <= i__4; ++j) {
+ xr = xr - b[j + i__ * b_dim1] * vr[j] + b[i__ + 1 + j
+ * b_dim1] * vi[j];
+ xi = xi - b[j + i__ * b_dim1] * vi[j] - b[i__ + 1 + j
+ * b_dim1] * vr[j];
+/* L230: */
+ }
+ }
+
+ w = (d__1 = b[i__ + i__ * b_dim1], abs(d__1)) + (d__2 = b[i__
+ + 1 + i__ * b_dim1], abs(d__2));
+ if (w > *smlnum) {
+ if (w < 1.) {
+ w1 = abs(xr) + abs(xi);
+ if (w1 > w * *bignum) {
+ rec = 1. / w1;
+ dscal_(n, &rec, &vr[1], &c__1);
+ dscal_(n, &rec, &vi[1], &c__1);
+ xr = vr[i__];
+ xi = vi[i__];
+ scale *= rec;
+ vmax *= rec;
+ }
+ }
+
+/* Divide by diagonal element of B. */
+
+ dladiv_(&xr, &xi, &b[i__ + i__ * b_dim1], &b[i__ + 1 +
+ i__ * b_dim1], &vr[i__], &vi[i__]);
+/* Computing MAX */
+ d__3 = (d__1 = vr[i__], abs(d__1)) + (d__2 = vi[i__], abs(
+ d__2));
+ vmax = max(d__3,vmax);
+ vcrit = *bignum / vmax;
+ } else {
+ i__4 = *n;
+ for (j = 1; j <= i__4; ++j) {
+ vr[j] = 0.;
+ vi[j] = 0.;
+/* L240: */
+ }
+ vr[i__] = 1.;
+ vi[i__] = 1.;
+ scale = 0.;
+ vmax = 1.;
+ vcrit = *bignum;
+ }
+/* L250: */
+ }
+
+/* Test for sufficient growth in the norm of (VR,VI). */
+
+ vnorm = dasum_(n, &vr[1], &c__1) + dasum_(n, &vi[1], &c__1);
+ if (vnorm >= growto * scale) {
+ goto L280;
+ }
+
+/* Choose a new orthogonal starting vector and try again. */
+
+ y = *eps3 / (rootn + 1.);
+ vr[1] = *eps3;
+ vi[1] = 0.;
+
+ i__3 = *n;
+ for (i__ = 2; i__ <= i__3; ++i__) {
+ vr[i__] = y;
+ vi[i__] = 0.;
+/* L260: */
+ }
+ vr[*n - its + 1] -= *eps3 * rootn;
+/* L270: */
+ }
+
+/* Failure to find eigenvector in N iterations */
+
+ *info = 1;
+
+L280:
+
+/* Normalize eigenvector. */
+
+ vnorm = 0.;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__3 = vnorm, d__4 = (d__1 = vr[i__], abs(d__1)) + (d__2 = vi[i__]
+ , abs(d__2));
+ vnorm = max(d__3,d__4);
+/* L290: */
+ }
+ d__1 = 1. / vnorm;
+ dscal_(n, &d__1, &vr[1], &c__1);
+ d__1 = 1. / vnorm;
+ dscal_(n, &d__1, &vi[1], &c__1);
+
+ }
+
+ return 0;
+
+/* End of DLAEIN */
+
+} /* dlaein_ */
diff --git a/contrib/libs/clapack/dlaev2.c b/contrib/libs/clapack/dlaev2.c
new file mode 100644
index 0000000000..6cd4c93fa6
--- /dev/null
+++ b/contrib/libs/clapack/dlaev2.c
@@ -0,0 +1,188 @@
+/* dlaev2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlaev2_(doublereal *a, doublereal *b, doublereal *c__,
+ doublereal *rt1, doublereal *rt2, doublereal *cs1, doublereal *sn1)
+{
+ /* System generated locals */
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal ab, df, cs, ct, tb, sm, tn, rt, adf, acs;
+ integer sgn1, sgn2;
+ doublereal acmn, acmx;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAEV2 computes the eigendecomposition of a 2-by-2 symmetric matrix */
+/* [ A B ] */
+/* [ B C ]. */
+/* On return, RT1 is the eigenvalue of larger absolute value, RT2 is the */
+/* eigenvalue of smaller absolute value, and (CS1,SN1) is the unit right */
+/* eigenvector for RT1, giving the decomposition */
+
+/* [ CS1 SN1 ] [ A B ] [ CS1 -SN1 ] = [ RT1 0 ] */
+/* [-SN1 CS1 ] [ B C ] [ SN1 CS1 ] [ 0 RT2 ]. */
+
+/* Arguments */
+/* ========= */
+
+/* A (input) DOUBLE PRECISION */
+/* The (1,1) element of the 2-by-2 matrix. */
+
+/* B (input) DOUBLE PRECISION */
+/* The (1,2) element and the conjugate of the (2,1) element of */
+/* the 2-by-2 matrix. */
+
+/* C (input) DOUBLE PRECISION */
+/* The (2,2) element of the 2-by-2 matrix. */
+
+/* RT1 (output) DOUBLE PRECISION */
+/* The eigenvalue of larger absolute value. */
+
+/* RT2 (output) DOUBLE PRECISION */
+/* The eigenvalue of smaller absolute value. */
+
+/* CS1 (output) DOUBLE PRECISION */
+/* SN1 (output) DOUBLE PRECISION */
+/* The vector (CS1, SN1) is a unit right eigenvector for RT1. */
+
+/* Further Details */
+/* =============== */
+
+/* RT1 is accurate to a few ulps barring over/underflow. */
+
+/* RT2 may be inaccurate if there is massive cancellation in the */
+/* determinant A*C-B*B; higher precision or correctly rounded or */
+/* correctly truncated arithmetic would be needed to compute RT2 */
+/* accurately in all cases. */
+
+/* CS1 and SN1 are accurate to a few ulps barring over/underflow. */
+
+/* Overflow is possible only if RT1 is within a factor of 5 of overflow. */
+/* Underflow is harmless if the input data is 0 or exceeds */
+/* underflow_threshold / macheps. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Compute the eigenvalues */
+
+ sm = *a + *c__;
+ df = *a - *c__;
+ adf = abs(df);
+ tb = *b + *b;
+ ab = abs(tb);
+ if (abs(*a) > abs(*c__)) {
+ acmx = *a;
+ acmn = *c__;
+ } else {
+ acmx = *c__;
+ acmn = *a;
+ }
+ if (adf > ab) {
+/* Computing 2nd power */
+ d__1 = ab / adf;
+ rt = adf * sqrt(d__1 * d__1 + 1.);
+ } else if (adf < ab) {
+/* Computing 2nd power */
+ d__1 = adf / ab;
+ rt = ab * sqrt(d__1 * d__1 + 1.);
+ } else {
+
+/* Includes case AB=ADF=0 */
+
+ rt = ab * sqrt(2.);
+ }
+ if (sm < 0.) {
+ *rt1 = (sm - rt) * .5;
+ sgn1 = -1;
+
+/* Order of execution important. */
+/* To get fully accurate smaller eigenvalue, */
+/* next line needs to be executed in higher precision. */
+
+ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
+ } else if (sm > 0.) {
+ *rt1 = (sm + rt) * .5;
+ sgn1 = 1;
+
+/* Order of execution important. */
+/* To get fully accurate smaller eigenvalue, */
+/* next line needs to be executed in higher precision. */
+
+ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
+ } else {
+
+/* Includes case RT1 = RT2 = 0 */
+
+ *rt1 = rt * .5;
+ *rt2 = rt * -.5;
+ sgn1 = 1;
+ }
+
+/* Compute the eigenvector */
+
+ if (df >= 0.) {
+ cs = df + rt;
+ sgn2 = 1;
+ } else {
+ cs = df - rt;
+ sgn2 = -1;
+ }
+ acs = abs(cs);
+ if (acs > ab) {
+ ct = -tb / cs;
+ *sn1 = 1. / sqrt(ct * ct + 1.);
+ *cs1 = ct * *sn1;
+ } else {
+ if (ab == 0.) {
+ *cs1 = 1.;
+ *sn1 = 0.;
+ } else {
+ tn = -cs / tb;
+ *cs1 = 1. / sqrt(tn * tn + 1.);
+ *sn1 = tn * *cs1;
+ }
+ }
+ if (sgn1 == sgn2) {
+ tn = *cs1;
+ *cs1 = -(*sn1);
+ *sn1 = tn;
+ }
+ return 0;
+
+/* End of DLAEV2 */
+
+} /* dlaev2_ */
diff --git a/contrib/libs/clapack/dlaexc.c b/contrib/libs/clapack/dlaexc.c
new file mode 100644
index 0000000000..03d6679702
--- /dev/null
+++ b/contrib/libs/clapack/dlaexc.c
@@ -0,0 +1,459 @@
+/* dlaexc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__4 = 4;
+static logical c_false = FALSE_;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static integer c__3 = 3;
+
+/* Subroutine */ int dlaexc_(logical *wantq, integer *n, doublereal *t,
+ integer *ldt, doublereal *q, integer *ldq, integer *j1, integer *n1,
+ integer *n2, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, t_dim1, t_offset, i__1;
+ doublereal d__1, d__2, d__3;
+
+ /* Local variables */
+ doublereal d__[16] /* was [4][4] */;
+ integer k;
+ doublereal u[3], x[4] /* was [2][2] */;
+ integer j2, j3, j4;
+ doublereal u1[3], u2[3];
+ integer nd;
+ doublereal cs, t11, t22, t33, sn, wi1, wi2, wr1, wr2, eps, tau, tau1,
+ tau2;
+ integer ierr;
+ doublereal temp;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ doublereal scale, dnorm, xnorm;
+ extern /* Subroutine */ int dlanv2_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *), dlasy2_(
+ logical *, logical *, integer *, integer *, integer *, doublereal
+ *, integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *);
+ extern doublereal dlamch_(char *), dlange_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *);
+ extern /* Subroutine */ int dlarfg_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *), dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ dlartg_(doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *), dlarfx_(char *, integer *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *);
+ doublereal thresh, smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAEXC swaps adjacent diagonal blocks T11 and T22 of order 1 or 2 in */
+/* an upper quasi-triangular matrix T by an orthogonal similarity */
+/* transformation. */
+
+/* T must be in Schur canonical form, that is, block upper triangular */
+/* with 1-by-1 and 2-by-2 diagonal blocks; each 2-by-2 diagonal block */
+/* has its diagonal elemnts equal and its off-diagonal elements of */
+/* opposite sign. */
+
+/* Arguments */
+/* ========= */
+
+/* WANTQ (input) LOGICAL */
+/* = .TRUE. : accumulate the transformation in the matrix Q; */
+/* = .FALSE.: do not accumulate the transformation. */
+
+/* N (input) INTEGER */
+/* The order of the matrix T. N >= 0. */
+
+/* T (input/output) DOUBLE PRECISION array, dimension (LDT,N) */
+/* On entry, the upper quasi-triangular matrix T, in Schur */
+/* canonical form. */
+/* On exit, the updated matrix T, again in Schur canonical form. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= max(1,N). */
+
+/* Q (input/output) DOUBLE PRECISION array, dimension (LDQ,N) */
+/* On entry, if WANTQ is .TRUE., the orthogonal matrix Q. */
+/* On exit, if WANTQ is .TRUE., the updated matrix Q. */
+/* If WANTQ is .FALSE., Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. */
+/* LDQ >= 1; and if WANTQ is .TRUE., LDQ >= N. */
+
+/* J1 (input) INTEGER */
+/* The index of the first row of the first block T11. */
+
+/* N1 (input) INTEGER */
+/* The order of the first block T11. N1 = 0, 1 or 2. */
+
+/* N2 (input) INTEGER */
+/* The order of the second block T22. N2 = 0, 1 or 2. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* = 1: the transformed matrix T would be too far from Schur */
+/* form; the blocks are not swapped and T and Q are */
+/* unchanged. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+/* Quick return if possible */
+
+ if (*n == 0 || *n1 == 0 || *n2 == 0) {
+ return 0;
+ }
+ if (*j1 + *n1 > *n) {
+ return 0;
+ }
+
+ j2 = *j1 + 1;
+ j3 = *j1 + 2;
+ j4 = *j1 + 3;
+
+ if (*n1 == 1 && *n2 == 1) {
+
+/* Swap two 1-by-1 blocks. */
+
+ t11 = t[*j1 + *j1 * t_dim1];
+ t22 = t[j2 + j2 * t_dim1];
+
+/* Determine the transformation to perform the interchange. */
+
+ d__1 = t22 - t11;
+ dlartg_(&t[*j1 + j2 * t_dim1], &d__1, &cs, &sn, &temp);
+
+/* Apply transformation to the matrix T. */
+
+ if (j3 <= *n) {
+ i__1 = *n - *j1 - 1;
+ drot_(&i__1, &t[*j1 + j3 * t_dim1], ldt, &t[j2 + j3 * t_dim1],
+ ldt, &cs, &sn);
+ }
+ i__1 = *j1 - 1;
+ drot_(&i__1, &t[*j1 * t_dim1 + 1], &c__1, &t[j2 * t_dim1 + 1], &c__1,
+ &cs, &sn);
+
+ t[*j1 + *j1 * t_dim1] = t22;
+ t[j2 + j2 * t_dim1] = t11;
+
+ if (*wantq) {
+
+/* Accumulate transformation in the matrix Q. */
+
+ drot_(n, &q[*j1 * q_dim1 + 1], &c__1, &q[j2 * q_dim1 + 1], &c__1,
+ &cs, &sn);
+ }
+
+ } else {
+
+/* Swapping involves at least one 2-by-2 block. */
+
+/* Copy the diagonal block of order N1+N2 to the local array D */
+/* and compute its norm. */
+
+ nd = *n1 + *n2;
+ dlacpy_("Full", &nd, &nd, &t[*j1 + *j1 * t_dim1], ldt, d__, &c__4);
+ dnorm = dlange_("Max", &nd, &nd, d__, &c__4, &work[1]);
+
+/* Compute machine-dependent threshold for test for accepting */
+/* swap. */
+
+ eps = dlamch_("P");
+ smlnum = dlamch_("S") / eps;
+/* Computing MAX */
+ d__1 = eps * 10. * dnorm;
+ thresh = max(d__1,smlnum);
+
+/* Solve T11*X - X*T22 = scale*T12 for X. */
+
+ dlasy2_(&c_false, &c_false, &c_n1, n1, n2, d__, &c__4, &d__[*n1 + 1 +
+ (*n1 + 1 << 2) - 5], &c__4, &d__[(*n1 + 1 << 2) - 4], &c__4, &
+ scale, x, &c__2, &xnorm, &ierr);
+
+/* Swap the adjacent diagonal blocks. */
+
+ k = *n1 + *n1 + *n2 - 3;
+ switch (k) {
+ case 1: goto L10;
+ case 2: goto L20;
+ case 3: goto L30;
+ }
+
+L10:
+
+/* N1 = 1, N2 = 2: generate elementary reflector H so that: */
+
+/* ( scale, X11, X12 ) H = ( 0, 0, * ) */
+
+ u[0] = scale;
+ u[1] = x[0];
+ u[2] = x[2];
+ dlarfg_(&c__3, &u[2], u, &c__1, &tau);
+ u[2] = 1.;
+ t11 = t[*j1 + *j1 * t_dim1];
+
+/* Perform swap provisionally on diagonal block in D. */
+
+ dlarfx_("L", &c__3, &c__3, u, &tau, d__, &c__4, &work[1]);
+ dlarfx_("R", &c__3, &c__3, u, &tau, d__, &c__4, &work[1]);
+
+/* Test whether to reject swap. */
+
+/* Computing MAX */
+ d__2 = abs(d__[2]), d__3 = abs(d__[6]), d__2 = max(d__2,d__3), d__3 =
+ (d__1 = d__[10] - t11, abs(d__1));
+ if (max(d__2,d__3) > thresh) {
+ goto L50;
+ }
+
+/* Accept swap: apply transformation to the entire matrix T. */
+
+ i__1 = *n - *j1 + 1;
+ dlarfx_("L", &c__3, &i__1, u, &tau, &t[*j1 + *j1 * t_dim1], ldt, &
+ work[1]);
+ dlarfx_("R", &j2, &c__3, u, &tau, &t[*j1 * t_dim1 + 1], ldt, &work[1]);
+
+ t[j3 + *j1 * t_dim1] = 0.;
+ t[j3 + j2 * t_dim1] = 0.;
+ t[j3 + j3 * t_dim1] = t11;
+
+ if (*wantq) {
+
+/* Accumulate transformation in the matrix Q. */
+
+ dlarfx_("R", n, &c__3, u, &tau, &q[*j1 * q_dim1 + 1], ldq, &work[
+ 1]);
+ }
+ goto L40;
+
+L20:
+
+/* N1 = 2, N2 = 1: generate elementary reflector H so that: */
+
+/* H ( -X11 ) = ( * ) */
+/* ( -X21 ) = ( 0 ) */
+/* ( scale ) = ( 0 ) */
+
+ u[0] = -x[0];
+ u[1] = -x[1];
+ u[2] = scale;
+ dlarfg_(&c__3, u, &u[1], &c__1, &tau);
+ u[0] = 1.;
+ t33 = t[j3 + j3 * t_dim1];
+
+/* Perform swap provisionally on diagonal block in D. */
+
+ dlarfx_("L", &c__3, &c__3, u, &tau, d__, &c__4, &work[1]);
+ dlarfx_("R", &c__3, &c__3, u, &tau, d__, &c__4, &work[1]);
+
+/* Test whether to reject swap. */
+
+/* Computing MAX */
+ d__2 = abs(d__[1]), d__3 = abs(d__[2]), d__2 = max(d__2,d__3), d__3 =
+ (d__1 = d__[0] - t33, abs(d__1));
+ if (max(d__2,d__3) > thresh) {
+ goto L50;
+ }
+
+/* Accept swap: apply transformation to the entire matrix T. */
+
+ dlarfx_("R", &j3, &c__3, u, &tau, &t[*j1 * t_dim1 + 1], ldt, &work[1]);
+ i__1 = *n - *j1;
+ dlarfx_("L", &c__3, &i__1, u, &tau, &t[*j1 + j2 * t_dim1], ldt, &work[
+ 1]);
+
+ t[*j1 + *j1 * t_dim1] = t33;
+ t[j2 + *j1 * t_dim1] = 0.;
+ t[j3 + *j1 * t_dim1] = 0.;
+
+ if (*wantq) {
+
+/* Accumulate transformation in the matrix Q. */
+
+ dlarfx_("R", n, &c__3, u, &tau, &q[*j1 * q_dim1 + 1], ldq, &work[
+ 1]);
+ }
+ goto L40;
+
+L30:
+
+/* N1 = 2, N2 = 2: generate elementary reflectors H(1) and H(2) so */
+/* that: */
+
+/* H(2) H(1) ( -X11 -X12 ) = ( * * ) */
+/* ( -X21 -X22 ) ( 0 * ) */
+/* ( scale 0 ) ( 0 0 ) */
+/* ( 0 scale ) ( 0 0 ) */
+
+ u1[0] = -x[0];
+ u1[1] = -x[1];
+ u1[2] = scale;
+ dlarfg_(&c__3, u1, &u1[1], &c__1, &tau1);
+ u1[0] = 1.;
+
+ temp = -tau1 * (x[2] + u1[1] * x[3]);
+ u2[0] = -temp * u1[1] - x[3];
+ u2[1] = -temp * u1[2];
+ u2[2] = scale;
+ dlarfg_(&c__3, u2, &u2[1], &c__1, &tau2);
+ u2[0] = 1.;
+
+/* Perform swap provisionally on diagonal block in D. */
+
+ dlarfx_("L", &c__3, &c__4, u1, &tau1, d__, &c__4, &work[1])
+ ;
+ dlarfx_("R", &c__4, &c__3, u1, &tau1, d__, &c__4, &work[1])
+ ;
+ dlarfx_("L", &c__3, &c__4, u2, &tau2, &d__[1], &c__4, &work[1]);
+ dlarfx_("R", &c__4, &c__3, u2, &tau2, &d__[4], &c__4, &work[1]);
+
+/* Test whether to reject swap. */
+
+/* Computing MAX */
+ d__1 = abs(d__[2]), d__2 = abs(d__[6]), d__1 = max(d__1,d__2), d__2 =
+ abs(d__[3]), d__1 = max(d__1,d__2), d__2 = abs(d__[7]);
+ if (max(d__1,d__2) > thresh) {
+ goto L50;
+ }
+
+/* Accept swap: apply transformation to the entire matrix T. */
+
+ i__1 = *n - *j1 + 1;
+ dlarfx_("L", &c__3, &i__1, u1, &tau1, &t[*j1 + *j1 * t_dim1], ldt, &
+ work[1]);
+ dlarfx_("R", &j4, &c__3, u1, &tau1, &t[*j1 * t_dim1 + 1], ldt, &work[
+ 1]);
+ i__1 = *n - *j1 + 1;
+ dlarfx_("L", &c__3, &i__1, u2, &tau2, &t[j2 + *j1 * t_dim1], ldt, &
+ work[1]);
+ dlarfx_("R", &j4, &c__3, u2, &tau2, &t[j2 * t_dim1 + 1], ldt, &work[1]
+);
+
+ t[j3 + *j1 * t_dim1] = 0.;
+ t[j3 + j2 * t_dim1] = 0.;
+ t[j4 + *j1 * t_dim1] = 0.;
+ t[j4 + j2 * t_dim1] = 0.;
+
+ if (*wantq) {
+
+/* Accumulate transformation in the matrix Q. */
+
+ dlarfx_("R", n, &c__3, u1, &tau1, &q[*j1 * q_dim1 + 1], ldq, &
+ work[1]);
+ dlarfx_("R", n, &c__3, u2, &tau2, &q[j2 * q_dim1 + 1], ldq, &work[
+ 1]);
+ }
+
+L40:
+
+ if (*n2 == 2) {
+
+/* Standardize new 2-by-2 block T11 */
+
+ dlanv2_(&t[*j1 + *j1 * t_dim1], &t[*j1 + j2 * t_dim1], &t[j2 + *
+ j1 * t_dim1], &t[j2 + j2 * t_dim1], &wr1, &wi1, &wr2, &
+ wi2, &cs, &sn);
+ i__1 = *n - *j1 - 1;
+ drot_(&i__1, &t[*j1 + (*j1 + 2) * t_dim1], ldt, &t[j2 + (*j1 + 2)
+ * t_dim1], ldt, &cs, &sn);
+ i__1 = *j1 - 1;
+ drot_(&i__1, &t[*j1 * t_dim1 + 1], &c__1, &t[j2 * t_dim1 + 1], &
+ c__1, &cs, &sn);
+ if (*wantq) {
+ drot_(n, &q[*j1 * q_dim1 + 1], &c__1, &q[j2 * q_dim1 + 1], &
+ c__1, &cs, &sn);
+ }
+ }
+
+ if (*n1 == 2) {
+
+/* Standardize new 2-by-2 block T22 */
+
+ j3 = *j1 + *n2;
+ j4 = j3 + 1;
+ dlanv2_(&t[j3 + j3 * t_dim1], &t[j3 + j4 * t_dim1], &t[j4 + j3 *
+ t_dim1], &t[j4 + j4 * t_dim1], &wr1, &wi1, &wr2, &wi2, &
+ cs, &sn);
+ if (j3 + 2 <= *n) {
+ i__1 = *n - j3 - 1;
+ drot_(&i__1, &t[j3 + (j3 + 2) * t_dim1], ldt, &t[j4 + (j3 + 2)
+ * t_dim1], ldt, &cs, &sn);
+ }
+ i__1 = j3 - 1;
+ drot_(&i__1, &t[j3 * t_dim1 + 1], &c__1, &t[j4 * t_dim1 + 1], &
+ c__1, &cs, &sn);
+ if (*wantq) {
+ drot_(n, &q[j3 * q_dim1 + 1], &c__1, &q[j4 * q_dim1 + 1], &
+ c__1, &cs, &sn);
+ }
+ }
+
+ }
+ return 0;
+
+/* Exit with INFO = 1 if swap was rejected. */
+
+L50:
+ *info = 1;
+ return 0;
+
+/* End of DLAEXC */
+
+} /* dlaexc_ */
diff --git a/contrib/libs/clapack/dlag2.c b/contrib/libs/clapack/dlag2.c
new file mode 100644
index 0000000000..ecbc7490f4
--- /dev/null
+++ b/contrib/libs/clapack/dlag2.c
@@ -0,0 +1,356 @@
+/* dlag2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlag2_(doublereal *a, integer *lda, doublereal *b,
+ integer *ldb, doublereal *safmin, doublereal *scale1, doublereal *
+ scale2, doublereal *wr1, doublereal *wr2, doublereal *wi)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset;
+ doublereal d__1, d__2, d__3, d__4, d__5, d__6;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ doublereal r__, c1, c2, c3, c4, c5, s1, s2, a11, a12, a21, a22, b11, b12,
+ b22, pp, qq, ss, as11, as12, as22, sum, abi22, diff, bmin, wbig,
+ wabs, wdet, binv11, binv22, discr, anorm, bnorm, bsize, shift,
+ rtmin, rtmax, wsize, ascale, bscale, wscale, safmax, wsmall;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAG2 computes the eigenvalues of a 2 x 2 generalized eigenvalue */
+/* problem A - w B, with scaling as necessary to avoid over-/underflow. */
+
+/* The scaling factor "s" results in a modified eigenvalue equation */
+
+/* s A - w B */
+
+/* where s is a non-negative scaling factor chosen so that w, w B, */
+/* and s A do not overflow and, if possible, do not underflow, either. */
+
+/* Arguments */
+/* ========= */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA, 2) */
+/* On entry, the 2 x 2 matrix A. It is assumed that its 1-norm */
+/* is less than 1/SAFMIN. Entries less than */
+/* sqrt(SAFMIN)*norm(A) are subject to being treated as zero. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= 2. */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB, 2) */
+/* On entry, the 2 x 2 upper triangular matrix B. It is */
+/* assumed that the one-norm of B is less than 1/SAFMIN. The */
+/* diagonals should be at least sqrt(SAFMIN) times the largest */
+/* element of B (in absolute value); if a diagonal is smaller */
+/* than that, then +/- sqrt(SAFMIN) will be used instead of */
+/* that diagonal. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= 2. */
+
+/* SAFMIN (input) DOUBLE PRECISION */
+/* The smallest positive number s.t. 1/SAFMIN does not */
+/* overflow. (This should always be DLAMCH('S') -- it is an */
+/* argument in order to avoid having to call DLAMCH frequently.) */
+
+/* SCALE1 (output) DOUBLE PRECISION */
+/* A scaling factor used to avoid over-/underflow in the */
+/* eigenvalue equation which defines the first eigenvalue. If */
+/* the eigenvalues are complex, then the eigenvalues are */
+/* ( WR1 +/- WI i ) / SCALE1 (which may lie outside the */
+/* exponent range of the machine), SCALE1=SCALE2, and SCALE1 */
+/* will always be positive. If the eigenvalues are real, then */
+/* the first (real) eigenvalue is WR1 / SCALE1 , but this may */
+/* overflow or underflow, and in fact, SCALE1 may be zero or */
+/* less than the underflow threshhold if the exact eigenvalue */
+/* is sufficiently large. */
+
+/* SCALE2 (output) DOUBLE PRECISION */
+/* A scaling factor used to avoid over-/underflow in the */
+/* eigenvalue equation which defines the second eigenvalue. If */
+/* the eigenvalues are complex, then SCALE2=SCALE1. If the */
+/* eigenvalues are real, then the second (real) eigenvalue is */
+/* WR2 / SCALE2 , but this may overflow or underflow, and in */
+/* fact, SCALE2 may be zero or less than the underflow */
+/* threshhold if the exact eigenvalue is sufficiently large. */
+
+/* WR1 (output) DOUBLE PRECISION */
+/* If the eigenvalue is real, then WR1 is SCALE1 times the */
+/* eigenvalue closest to the (2,2) element of A B**(-1). If the */
+/* eigenvalue is complex, then WR1=WR2 is SCALE1 times the real */
+/* part of the eigenvalues. */
+
+/* WR2 (output) DOUBLE PRECISION */
+/* If the eigenvalue is real, then WR2 is SCALE2 times the */
+/* other eigenvalue. If the eigenvalue is complex, then */
+/* WR1=WR2 is SCALE1 times the real part of the eigenvalues. */
+
+/* WI (output) DOUBLE PRECISION */
+/* If the eigenvalue is real, then WI is zero. If the */
+/* eigenvalue is complex, then WI is SCALE1 times the imaginary */
+/* part of the eigenvalues. WI will always be non-negative. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ rtmin = sqrt(*safmin);
+ rtmax = 1. / rtmin;
+ safmax = 1. / *safmin;
+
+/* Scale A */
+
+/* Computing MAX */
+ d__5 = (d__1 = a[a_dim1 + 1], abs(d__1)) + (d__2 = a[a_dim1 + 2], abs(
+ d__2)), d__6 = (d__3 = a[(a_dim1 << 1) + 1], abs(d__3)) + (d__4 =
+ a[(a_dim1 << 1) + 2], abs(d__4)), d__5 = max(d__5,d__6);
+ anorm = max(d__5,*safmin);
+ ascale = 1. / anorm;
+ a11 = ascale * a[a_dim1 + 1];
+ a21 = ascale * a[a_dim1 + 2];
+ a12 = ascale * a[(a_dim1 << 1) + 1];
+ a22 = ascale * a[(a_dim1 << 1) + 2];
+
+/* Perturb B if necessary to insure non-singularity */
+
+ b11 = b[b_dim1 + 1];
+ b12 = b[(b_dim1 << 1) + 1];
+ b22 = b[(b_dim1 << 1) + 2];
+/* Computing MAX */
+ d__1 = abs(b11), d__2 = abs(b12), d__1 = max(d__1,d__2), d__2 = abs(b22),
+ d__1 = max(d__1,d__2);
+ bmin = rtmin * max(d__1,rtmin);
+ if (abs(b11) < bmin) {
+ b11 = d_sign(&bmin, &b11);
+ }
+ if (abs(b22) < bmin) {
+ b22 = d_sign(&bmin, &b22);
+ }
+
+/* Scale B */
+
+/* Computing MAX */
+ d__1 = abs(b11), d__2 = abs(b12) + abs(b22), d__1 = max(d__1,d__2);
+ bnorm = max(d__1,*safmin);
+/* Computing MAX */
+ d__1 = abs(b11), d__2 = abs(b22);
+ bsize = max(d__1,d__2);
+ bscale = 1. / bsize;
+ b11 *= bscale;
+ b12 *= bscale;
+ b22 *= bscale;
+
+/* Compute larger eigenvalue by method described by C. van Loan */
+
+/* ( AS is A shifted by -SHIFT*B ) */
+
+ binv11 = 1. / b11;
+ binv22 = 1. / b22;
+ s1 = a11 * binv11;
+ s2 = a22 * binv22;
+ if (abs(s1) <= abs(s2)) {
+ as12 = a12 - s1 * b12;
+ as22 = a22 - s1 * b22;
+ ss = a21 * (binv11 * binv22);
+ abi22 = as22 * binv22 - ss * b12;
+ pp = abi22 * .5;
+ shift = s1;
+ } else {
+ as12 = a12 - s2 * b12;
+ as11 = a11 - s2 * b11;
+ ss = a21 * (binv11 * binv22);
+ abi22 = -ss * b12;
+ pp = (as11 * binv11 + abi22) * .5;
+ shift = s2;
+ }
+ qq = ss * as12;
+ if ((d__1 = pp * rtmin, abs(d__1)) >= 1.) {
+/* Computing 2nd power */
+ d__1 = rtmin * pp;
+ discr = d__1 * d__1 + qq * *safmin;
+ r__ = sqrt((abs(discr))) * rtmax;
+ } else {
+/* Computing 2nd power */
+ d__1 = pp;
+ if (d__1 * d__1 + abs(qq) <= *safmin) {
+/* Computing 2nd power */
+ d__1 = rtmax * pp;
+ discr = d__1 * d__1 + qq * safmax;
+ r__ = sqrt((abs(discr))) * rtmin;
+ } else {
+/* Computing 2nd power */
+ d__1 = pp;
+ discr = d__1 * d__1 + qq;
+ r__ = sqrt((abs(discr)));
+ }
+ }
+
+/* Note: the test of R in the following IF is to cover the case when */
+/* DISCR is small and negative and is flushed to zero during */
+/* the calculation of R. On machines which have a consistent */
+/* flush-to-zero threshhold and handle numbers above that */
+/* threshhold correctly, it would not be necessary. */
+
+ if (discr >= 0. || r__ == 0.) {
+ sum = pp + d_sign(&r__, &pp);
+ diff = pp - d_sign(&r__, &pp);
+ wbig = shift + sum;
+
+/* Compute smaller eigenvalue */
+
+ wsmall = shift + diff;
+/* Computing MAX */
+ d__1 = abs(wsmall);
+ if (abs(wbig) * .5 > max(d__1,*safmin)) {
+ wdet = (a11 * a22 - a12 * a21) * (binv11 * binv22);
+ wsmall = wdet / wbig;
+ }
+
+/* Choose (real) eigenvalue closest to 2,2 element of A*B**(-1) */
+/* for WR1. */
+
+ if (pp > abi22) {
+ *wr1 = min(wbig,wsmall);
+ *wr2 = max(wbig,wsmall);
+ } else {
+ *wr1 = max(wbig,wsmall);
+ *wr2 = min(wbig,wsmall);
+ }
+ *wi = 0.;
+ } else {
+
+/* Complex eigenvalues */
+
+ *wr1 = shift + pp;
+ *wr2 = *wr1;
+ *wi = r__;
+ }
+
+/* Further scaling to avoid underflow and overflow in computing */
+/* SCALE1 and overflow in computing w*B. */
+
+/* This scale factor (WSCALE) is bounded from above using C1 and C2, */
+/* and from below using C3 and C4. */
+/* C1 implements the condition s A must never overflow. */
+/* C2 implements the condition w B must never overflow. */
+/* C3, with C2, */
+/* implement the condition that s A - w B must never overflow. */
+/* C4 implements the condition s should not underflow. */
+/* C5 implements the condition max(s,|w|) should be at least 2. */
+
+ c1 = bsize * (*safmin * max(1.,ascale));
+ c2 = *safmin * max(1.,bnorm);
+ c3 = bsize * *safmin;
+ if (ascale <= 1. && bsize <= 1.) {
+/* Computing MIN */
+ d__1 = 1., d__2 = ascale / *safmin * bsize;
+ c4 = min(d__1,d__2);
+ } else {
+ c4 = 1.;
+ }
+ if (ascale <= 1. || bsize <= 1.) {
+/* Computing MIN */
+ d__1 = 1., d__2 = ascale * bsize;
+ c5 = min(d__1,d__2);
+ } else {
+ c5 = 1.;
+ }
+
+/* Scale first eigenvalue */
+
+ wabs = abs(*wr1) + abs(*wi);
+/* Computing MAX */
+/* Computing MIN */
+ d__3 = c4, d__4 = max(wabs,c5) * .5;
+ d__1 = max(*safmin,c1), d__2 = (wabs * c2 + c3) * 1.0000100000000001,
+ d__1 = max(d__1,d__2), d__2 = min(d__3,d__4);
+ wsize = max(d__1,d__2);
+ if (wsize != 1.) {
+ wscale = 1. / wsize;
+ if (wsize > 1.) {
+ *scale1 = max(ascale,bsize) * wscale * min(ascale,bsize);
+ } else {
+ *scale1 = min(ascale,bsize) * wscale * max(ascale,bsize);
+ }
+ *wr1 *= wscale;
+ if (*wi != 0.) {
+ *wi *= wscale;
+ *wr2 = *wr1;
+ *scale2 = *scale1;
+ }
+ } else {
+ *scale1 = ascale * bsize;
+ *scale2 = *scale1;
+ }
+
+/* Scale second eigenvalue (if real) */
+
+ if (*wi == 0.) {
+/* Computing MAX */
+/* Computing MIN */
+/* Computing MAX */
+ d__5 = abs(*wr2);
+ d__3 = c4, d__4 = max(d__5,c5) * .5;
+ d__1 = max(*safmin,c1), d__2 = (abs(*wr2) * c2 + c3) *
+ 1.0000100000000001, d__1 = max(d__1,d__2), d__2 = min(d__3,
+ d__4);
+ wsize = max(d__1,d__2);
+ if (wsize != 1.) {
+ wscale = 1. / wsize;
+ if (wsize > 1.) {
+ *scale2 = max(ascale,bsize) * wscale * min(ascale,bsize);
+ } else {
+ *scale2 = min(ascale,bsize) * wscale * max(ascale,bsize);
+ }
+ *wr2 *= wscale;
+ } else {
+ *scale2 = ascale * bsize;
+ }
+ }
+
+/* End of DLAG2 */
+
+ return 0;
+} /* dlag2_ */
diff --git a/contrib/libs/clapack/dlag2s.c b/contrib/libs/clapack/dlag2s.c
new file mode 100644
index 0000000000..bdc68fc8e7
--- /dev/null
+++ b/contrib/libs/clapack/dlag2s.c
@@ -0,0 +1,115 @@
+/* dlag2s.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlag2s_(integer *m, integer *n, doublereal *a, integer *
+ lda, real *sa, integer *ldsa, integer *info)
+{
+ /* System generated locals */
+ integer sa_dim1, sa_offset, a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal rmax;
+ extern doublereal slamch_(char *);
+
+
+/* -- LAPACK PROTOTYPE auxiliary routine (version 3.1.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* August 2007 */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAG2S converts a DOUBLE PRECISION matrix, SA, to a SINGLE */
+/* PRECISION matrix, A. */
+
+/* RMAX is the overflow for the SINGLE PRECISION arithmetic */
+/* DLAG2S checks that all the entries of A are between -RMAX and */
+/* RMAX. If not the convertion is aborted and a flag is raised. */
+
+/* This is an auxiliary routine so there is no argument checking. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of lines of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the M-by-N coefficient matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* SA (output) REAL array, dimension (LDSA,N) */
+/* On exit, if INFO=0, the M-by-N coefficient matrix SA; if */
+/* INFO>0, the content of SA is unspecified. */
+
+/* LDSA (input) INTEGER */
+/* The leading dimension of the array SA. LDSA >= max(1,M). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* = 1: an entry of the matrix A is greater than the SINGLE */
+/* PRECISION overflow threshold, in this case, the content */
+/* of SA in exit is unspecified. */
+
+/* ========= */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ sa_dim1 = *ldsa;
+ sa_offset = 1 + sa_dim1;
+ sa -= sa_offset;
+
+ /* Function Body */
+ rmax = slamch_("O");
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (a[i__ + j * a_dim1] < -rmax || a[i__ + j * a_dim1] > rmax) {
+ *info = 1;
+ goto L30;
+ }
+ sa[i__ + j * sa_dim1] = a[i__ + j * a_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+ *info = 0;
+L30:
+ return 0;
+
+/* End of DLAG2S */
+
+} /* dlag2s_ */
diff --git a/contrib/libs/clapack/dlags2.c b/contrib/libs/clapack/dlags2.c
new file mode 100644
index 0000000000..4fb02ff6a2
--- /dev/null
+++ b/contrib/libs/clapack/dlags2.c
@@ -0,0 +1,292 @@
+/* dlags2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlags2_(logical *upper, doublereal *a1, doublereal *a2,
+ doublereal *a3, doublereal *b1, doublereal *b2, doublereal *b3,
+ doublereal *csu, doublereal *snu, doublereal *csv, doublereal *snv,
+ doublereal *csq, doublereal *snq)
+{
+ /* System generated locals */
+ doublereal d__1;
+
+ /* Local variables */
+ doublereal a, b, c__, d__, r__, s1, s2, ua11, ua12, ua21, ua22, vb11,
+ vb12, vb21, vb22, csl, csr, snl, snr, aua11, aua12, aua21, aua22,
+ avb11, avb12, avb21, avb22, ua11r, ua22r, vb11r, vb22r;
+ extern /* Subroutine */ int dlasv2_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *), dlartg_(doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAGS2 computes 2-by-2 orthogonal matrices U, V and Q, such */
+/* that if ( UPPER ) then */
+
+/* U'*A*Q = U'*( A1 A2 )*Q = ( x 0 ) */
+/* ( 0 A3 ) ( x x ) */
+/* and */
+/* V'*B*Q = V'*( B1 B2 )*Q = ( x 0 ) */
+/* ( 0 B3 ) ( x x ) */
+
+/* or if ( .NOT.UPPER ) then */
+
+/* U'*A*Q = U'*( A1 0 )*Q = ( x x ) */
+/* ( A2 A3 ) ( 0 x ) */
+/* and */
+/* V'*B*Q = V'*( B1 0 )*Q = ( x x ) */
+/* ( B2 B3 ) ( 0 x ) */
+
+/* The rows of the transformed A and B are parallel, where */
+
+/* U = ( CSU SNU ), V = ( CSV SNV ), Q = ( CSQ SNQ ) */
+/* ( -SNU CSU ) ( -SNV CSV ) ( -SNQ CSQ ) */
+
+/* Z' denotes the transpose of Z. */
+
+
+/* Arguments */
+/* ========= */
+
+/* UPPER (input) LOGICAL */
+/* = .TRUE.: the input matrices A and B are upper triangular. */
+/* = .FALSE.: the input matrices A and B are lower triangular. */
+
+/* A1 (input) DOUBLE PRECISION */
+/* A2 (input) DOUBLE PRECISION */
+/* A3 (input) DOUBLE PRECISION */
+/* On entry, A1, A2 and A3 are elements of the input 2-by-2 */
+/* upper (lower) triangular matrix A. */
+
+/* B1 (input) DOUBLE PRECISION */
+/* B2 (input) DOUBLE PRECISION */
+/* B3 (input) DOUBLE PRECISION */
+/* On entry, B1, B2 and B3 are elements of the input 2-by-2 */
+/* upper (lower) triangular matrix B. */
+
+/* CSU (output) DOUBLE PRECISION */
+/* SNU (output) DOUBLE PRECISION */
+/* The desired orthogonal matrix U. */
+
+/* CSV (output) DOUBLE PRECISION */
+/* SNV (output) DOUBLE PRECISION */
+/* The desired orthogonal matrix V. */
+
+/* CSQ (output) DOUBLE PRECISION */
+/* SNQ (output) DOUBLE PRECISION */
+/* The desired orthogonal matrix Q. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ if (*upper) {
+
+/* Input matrices A and B are upper triangular matrices */
+
+/* Form matrix C = A*adj(B) = ( a b ) */
+/* ( 0 d ) */
+
+ a = *a1 * *b3;
+ d__ = *a3 * *b1;
+ b = *a2 * *b1 - *a1 * *b2;
+
+/* The SVD of real 2-by-2 triangular C */
+
+/* ( CSL -SNL )*( A B )*( CSR SNR ) = ( R 0 ) */
+/* ( SNL CSL ) ( 0 D ) ( -SNR CSR ) ( 0 T ) */
+
+ dlasv2_(&a, &b, &d__, &s1, &s2, &snr, &csr, &snl, &csl);
+
+ if (abs(csl) >= abs(snl) || abs(csr) >= abs(snr)) {
+
+/* Compute the (1,1) and (1,2) elements of U'*A and V'*B, */
+/* and (1,2) element of |U|'*|A| and |V|'*|B|. */
+
+ ua11r = csl * *a1;
+ ua12 = csl * *a2 + snl * *a3;
+
+ vb11r = csr * *b1;
+ vb12 = csr * *b2 + snr * *b3;
+
+ aua12 = abs(csl) * abs(*a2) + abs(snl) * abs(*a3);
+ avb12 = abs(csr) * abs(*b2) + abs(snr) * abs(*b3);
+
+/* zero (1,2) elements of U'*A and V'*B */
+
+ if (abs(ua11r) + abs(ua12) != 0.) {
+ if (aua12 / (abs(ua11r) + abs(ua12)) <= avb12 / (abs(vb11r) +
+ abs(vb12))) {
+ d__1 = -ua11r;
+ dlartg_(&d__1, &ua12, csq, snq, &r__);
+ } else {
+ d__1 = -vb11r;
+ dlartg_(&d__1, &vb12, csq, snq, &r__);
+ }
+ } else {
+ d__1 = -vb11r;
+ dlartg_(&d__1, &vb12, csq, snq, &r__);
+ }
+
+ *csu = csl;
+ *snu = -snl;
+ *csv = csr;
+ *snv = -snr;
+
+ } else {
+
+/* Compute the (2,1) and (2,2) elements of U'*A and V'*B, */
+/* and (2,2) element of |U|'*|A| and |V|'*|B|. */
+
+ ua21 = -snl * *a1;
+ ua22 = -snl * *a2 + csl * *a3;
+
+ vb21 = -snr * *b1;
+ vb22 = -snr * *b2 + csr * *b3;
+
+ aua22 = abs(snl) * abs(*a2) + abs(csl) * abs(*a3);
+ avb22 = abs(snr) * abs(*b2) + abs(csr) * abs(*b3);
+
+/* zero (2,2) elements of U'*A and V'*B, and then swap. */
+
+ if (abs(ua21) + abs(ua22) != 0.) {
+ if (aua22 / (abs(ua21) + abs(ua22)) <= avb22 / (abs(vb21) +
+ abs(vb22))) {
+ d__1 = -ua21;
+ dlartg_(&d__1, &ua22, csq, snq, &r__);
+ } else {
+ d__1 = -vb21;
+ dlartg_(&d__1, &vb22, csq, snq, &r__);
+ }
+ } else {
+ d__1 = -vb21;
+ dlartg_(&d__1, &vb22, csq, snq, &r__);
+ }
+
+ *csu = snl;
+ *snu = csl;
+ *csv = snr;
+ *snv = csr;
+
+ }
+
+ } else {
+
+/* Input matrices A and B are lower triangular matrices */
+
+/* Form matrix C = A*adj(B) = ( a 0 ) */
+/* ( c d ) */
+
+ a = *a1 * *b3;
+ d__ = *a3 * *b1;
+ c__ = *a2 * *b3 - *a3 * *b2;
+
+/* The SVD of real 2-by-2 triangular C */
+
+/* ( CSL -SNL )*( A 0 )*( CSR SNR ) = ( R 0 ) */
+/* ( SNL CSL ) ( C D ) ( -SNR CSR ) ( 0 T ) */
+
+ dlasv2_(&a, &c__, &d__, &s1, &s2, &snr, &csr, &snl, &csl);
+
+ if (abs(csr) >= abs(snr) || abs(csl) >= abs(snl)) {
+
+/* Compute the (2,1) and (2,2) elements of U'*A and V'*B, */
+/* and (2,1) element of |U|'*|A| and |V|'*|B|. */
+
+ ua21 = -snr * *a1 + csr * *a2;
+ ua22r = csr * *a3;
+
+ vb21 = -snl * *b1 + csl * *b2;
+ vb22r = csl * *b3;
+
+ aua21 = abs(snr) * abs(*a1) + abs(csr) * abs(*a2);
+ avb21 = abs(snl) * abs(*b1) + abs(csl) * abs(*b2);
+
+/* zero (2,1) elements of U'*A and V'*B. */
+
+ if (abs(ua21) + abs(ua22r) != 0.) {
+ if (aua21 / (abs(ua21) + abs(ua22r)) <= avb21 / (abs(vb21) +
+ abs(vb22r))) {
+ dlartg_(&ua22r, &ua21, csq, snq, &r__);
+ } else {
+ dlartg_(&vb22r, &vb21, csq, snq, &r__);
+ }
+ } else {
+ dlartg_(&vb22r, &vb21, csq, snq, &r__);
+ }
+
+ *csu = csr;
+ *snu = -snr;
+ *csv = csl;
+ *snv = -snl;
+
+ } else {
+
+/* Compute the (1,1) and (1,2) elements of U'*A and V'*B, */
+/* and (1,1) element of |U|'*|A| and |V|'*|B|. */
+
+ ua11 = csr * *a1 + snr * *a2;
+ ua12 = snr * *a3;
+
+ vb11 = csl * *b1 + snl * *b2;
+ vb12 = snl * *b3;
+
+ aua11 = abs(csr) * abs(*a1) + abs(snr) * abs(*a2);
+ avb11 = abs(csl) * abs(*b1) + abs(snl) * abs(*b2);
+
+/* zero (1,1) elements of U'*A and V'*B, and then swap. */
+
+ if (abs(ua11) + abs(ua12) != 0.) {
+ if (aua11 / (abs(ua11) + abs(ua12)) <= avb11 / (abs(vb11) +
+ abs(vb12))) {
+ dlartg_(&ua12, &ua11, csq, snq, &r__);
+ } else {
+ dlartg_(&vb12, &vb11, csq, snq, &r__);
+ }
+ } else {
+ dlartg_(&vb12, &vb11, csq, snq, &r__);
+ }
+
+ *csu = snr;
+ *snu = csr;
+ *csv = snl;
+ *snv = csl;
+
+ }
+
+ }
+
+ return 0;
+
+/* End of DLAGS2 */
+
+} /* dlags2_ */
diff --git a/contrib/libs/clapack/dlagtf.c b/contrib/libs/clapack/dlagtf.c
new file mode 100644
index 0000000000..533f53d436
--- /dev/null
+++ b/contrib/libs/clapack/dlagtf.c
@@ -0,0 +1,224 @@
+/* dlagtf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlagtf_(integer *n, doublereal *a, doublereal *lambda,
+ doublereal *b, doublereal *c__, doublereal *tol, doublereal *d__,
+ integer *in, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ integer k;
+ doublereal tl, eps, piv1, piv2, temp, mult, scale1, scale2;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAGTF factorizes the matrix (T - lambda*I), where T is an n by n */
+/* tridiagonal matrix and lambda is a scalar, as */
+
+/* T - lambda*I = PLU, */
+
+/* where P is a permutation matrix, L is a unit lower tridiagonal matrix */
+/* with at most one non-zero sub-diagonal elements per column and U is */
+/* an upper triangular matrix with at most two non-zero super-diagonal */
+/* elements per column. */
+
+/* The factorization is obtained by Gaussian elimination with partial */
+/* pivoting and implicit row scaling. */
+
+/* The parameter LAMBDA is included in the routine so that DLAGTF may */
+/* be used, in conjunction with DLAGTS, to obtain eigenvectors of T by */
+/* inverse iteration. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix T. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, A must contain the diagonal elements of T. */
+
+/* On exit, A is overwritten by the n diagonal elements of the */
+/* upper triangular matrix U of the factorization of T. */
+
+/* LAMBDA (input) DOUBLE PRECISION */
+/* On entry, the scalar lambda. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (N-1) */
+/* On entry, B must contain the (n-1) super-diagonal elements of */
+/* T. */
+
+/* On exit, B is overwritten by the (n-1) super-diagonal */
+/* elements of the matrix U of the factorization of T. */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (N-1) */
+/* On entry, C must contain the (n-1) sub-diagonal elements of */
+/* T. */
+
+/* On exit, C is overwritten by the (n-1) sub-diagonal elements */
+/* of the matrix L of the factorization of T. */
+
+/* TOL (input) DOUBLE PRECISION */
+/* On entry, a relative tolerance used to indicate whether or */
+/* not the matrix (T - lambda*I) is nearly singular. TOL should */
+/* normally be chose as approximately the largest relative error */
+/* in the elements of T. For example, if the elements of T are */
+/* correct to about 4 significant figures, then TOL should be */
+/* set to about 5*10**(-4). If TOL is supplied as less than eps, */
+/* where eps is the relative machine precision, then the value */
+/* eps is used in place of TOL. */
+
+/* D (output) DOUBLE PRECISION array, dimension (N-2) */
+/* On exit, D is overwritten by the (n-2) second super-diagonal */
+/* elements of the matrix U of the factorization of T. */
+
+/* IN (output) INTEGER array, dimension (N) */
+/* On exit, IN contains details of the permutation matrix P. If */
+/* an interchange occurred at the kth step of the elimination, */
+/* then IN(k) = 1, otherwise IN(k) = 0. The element IN(n) */
+/* returns the smallest positive integer j such that */
+
+/* abs( u(j,j) ).le. norm( (T - lambda*I)(j) )*TOL, */
+
+/* where norm( A(j) ) denotes the sum of the absolute values of */
+/* the jth row of the matrix A. If no such j exists then IN(n) */
+/* is returned as zero. If IN(n) is returned as positive, then a */
+/* diagonal element of U is small, indicating that */
+/* (T - lambda*I) is singular or nearly singular, */
+
+/* INFO (output) INTEGER */
+/* = 0 : successful exit */
+/* .lt. 0: if INFO = -k, the kth argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --in;
+ --d__;
+ --c__;
+ --b;
+ --a;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ i__1 = -(*info);
+ xerbla_("DLAGTF", &i__1);
+ return 0;
+ }
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ a[1] -= *lambda;
+ in[*n] = 0;
+ if (*n == 1) {
+ if (a[1] == 0.) {
+ in[1] = 1;
+ }
+ return 0;
+ }
+
+ eps = dlamch_("Epsilon");
+
+ tl = max(*tol,eps);
+ scale1 = abs(a[1]) + abs(b[1]);
+ i__1 = *n - 1;
+ for (k = 1; k <= i__1; ++k) {
+ a[k + 1] -= *lambda;
+ scale2 = (d__1 = c__[k], abs(d__1)) + (d__2 = a[k + 1], abs(d__2));
+ if (k < *n - 1) {
+ scale2 += (d__1 = b[k + 1], abs(d__1));
+ }
+ if (a[k] == 0.) {
+ piv1 = 0.;
+ } else {
+ piv1 = (d__1 = a[k], abs(d__1)) / scale1;
+ }
+ if (c__[k] == 0.) {
+ in[k] = 0;
+ piv2 = 0.;
+ scale1 = scale2;
+ if (k < *n - 1) {
+ d__[k] = 0.;
+ }
+ } else {
+ piv2 = (d__1 = c__[k], abs(d__1)) / scale2;
+ if (piv2 <= piv1) {
+ in[k] = 0;
+ scale1 = scale2;
+ c__[k] /= a[k];
+ a[k + 1] -= c__[k] * b[k];
+ if (k < *n - 1) {
+ d__[k] = 0.;
+ }
+ } else {
+ in[k] = 1;
+ mult = a[k] / c__[k];
+ a[k] = c__[k];
+ temp = a[k + 1];
+ a[k + 1] = b[k] - mult * temp;
+ if (k < *n - 1) {
+ d__[k] = b[k + 1];
+ b[k + 1] = -mult * d__[k];
+ }
+ b[k] = temp;
+ c__[k] = mult;
+ }
+ }
+ if (max(piv1,piv2) <= tl && in[*n] == 0) {
+ in[*n] = k;
+ }
+/* L10: */
+ }
+ if ((d__1 = a[*n], abs(d__1)) <= scale1 * tl && in[*n] == 0) {
+ in[*n] = *n;
+ }
+
+ return 0;
+
+/* End of DLAGTF */
+
+} /* dlagtf_ */
diff --git a/contrib/libs/clapack/dlagtm.c b/contrib/libs/clapack/dlagtm.c
new file mode 100644
index 0000000000..4cd922674b
--- /dev/null
+++ b/contrib/libs/clapack/dlagtm.c
@@ -0,0 +1,254 @@
+/* dlagtm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlagtm_(char *trans, integer *n, integer *nrhs,
+ doublereal *alpha, doublereal *dl, doublereal *d__, doublereal *du,
+ doublereal *x, integer *ldx, doublereal *beta, doublereal *b, integer
+ *ldb)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAGTM performs a matrix-vector product of the form */
+
+/* B := alpha * A * X + beta * B */
+
+/* where A is a tridiagonal matrix of order N, B and X are N by NRHS */
+/* matrices, and alpha and beta are real scalars, each of which may be */
+/* 0., 1., or -1. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the operation applied to A. */
+/* = 'N': No transpose, B := alpha * A * X + beta * B */
+/* = 'T': Transpose, B := alpha * A'* X + beta * B */
+/* = 'C': Conjugate transpose = Transpose */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices X and B. */
+
+/* ALPHA (input) DOUBLE PRECISION */
+/* The scalar alpha. ALPHA must be 0., 1., or -1.; otherwise, */
+/* it is assumed to be 0. */
+
+/* DL (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) sub-diagonal elements of T. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The diagonal elements of T. */
+
+/* DU (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) super-diagonal elements of T. */
+
+/* X (input) DOUBLE PRECISION array, dimension (LDX,NRHS) */
+/* The N by NRHS matrix X. */
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(N,1). */
+
+/* BETA (input) DOUBLE PRECISION */
+/* The scalar beta. BETA must be 0., 1., or -1.; otherwise, */
+/* it is assumed to be 1. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the N by NRHS matrix B. */
+/* On exit, B is overwritten by the matrix expression */
+/* B := alpha * A * X + beta * B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(N,1). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --dl;
+ --d__;
+ --du;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Multiply B by BETA if BETA.NE.1. */
+
+ if (*beta == 0.) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if (*beta == -1.) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = -b[i__ + j * b_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+
+ if (*alpha == 1.) {
+ if (lsame_(trans, "N")) {
+
+/* Compute B := B + A*X */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ if (*n == 1) {
+ b[j * b_dim1 + 1] += d__[1] * x[j * x_dim1 + 1];
+ } else {
+ b[j * b_dim1 + 1] = b[j * b_dim1 + 1] + d__[1] * x[j *
+ x_dim1 + 1] + du[1] * x[j * x_dim1 + 2];
+ b[*n + j * b_dim1] = b[*n + j * b_dim1] + dl[*n - 1] * x[*
+ n - 1 + j * x_dim1] + d__[*n] * x[*n + j * x_dim1]
+ ;
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = b[i__ + j * b_dim1] + dl[i__ -
+ 1] * x[i__ - 1 + j * x_dim1] + d__[i__] * x[
+ i__ + j * x_dim1] + du[i__] * x[i__ + 1 + j *
+ x_dim1];
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else {
+
+/* Compute B := B + A'*X */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ if (*n == 1) {
+ b[j * b_dim1 + 1] += d__[1] * x[j * x_dim1 + 1];
+ } else {
+ b[j * b_dim1 + 1] = b[j * b_dim1 + 1] + d__[1] * x[j *
+ x_dim1 + 1] + dl[1] * x[j * x_dim1 + 2];
+ b[*n + j * b_dim1] = b[*n + j * b_dim1] + du[*n - 1] * x[*
+ n - 1 + j * x_dim1] + d__[*n] * x[*n + j * x_dim1]
+ ;
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = b[i__ + j * b_dim1] + du[i__ -
+ 1] * x[i__ - 1 + j * x_dim1] + d__[i__] * x[
+ i__ + j * x_dim1] + dl[i__] * x[i__ + 1 + j *
+ x_dim1];
+/* L70: */
+ }
+ }
+/* L80: */
+ }
+ }
+ } else if (*alpha == -1.) {
+ if (lsame_(trans, "N")) {
+
+/* Compute B := B - A*X */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ if (*n == 1) {
+ b[j * b_dim1 + 1] -= d__[1] * x[j * x_dim1 + 1];
+ } else {
+ b[j * b_dim1 + 1] = b[j * b_dim1 + 1] - d__[1] * x[j *
+ x_dim1 + 1] - du[1] * x[j * x_dim1 + 2];
+ b[*n + j * b_dim1] = b[*n + j * b_dim1] - dl[*n - 1] * x[*
+ n - 1 + j * x_dim1] - d__[*n] * x[*n + j * x_dim1]
+ ;
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = b[i__ + j * b_dim1] - dl[i__ -
+ 1] * x[i__ - 1 + j * x_dim1] - d__[i__] * x[
+ i__ + j * x_dim1] - du[i__] * x[i__ + 1 + j *
+ x_dim1];
+/* L90: */
+ }
+ }
+/* L100: */
+ }
+ } else {
+
+/* Compute B := B - A'*X */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ if (*n == 1) {
+ b[j * b_dim1 + 1] -= d__[1] * x[j * x_dim1 + 1];
+ } else {
+ b[j * b_dim1 + 1] = b[j * b_dim1 + 1] - d__[1] * x[j *
+ x_dim1 + 1] - dl[1] * x[j * x_dim1 + 2];
+ b[*n + j * b_dim1] = b[*n + j * b_dim1] - du[*n - 1] * x[*
+ n - 1 + j * x_dim1] - d__[*n] * x[*n + j * x_dim1]
+ ;
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = b[i__ + j * b_dim1] - du[i__ -
+ 1] * x[i__ - 1 + j * x_dim1] - d__[i__] * x[
+ i__ + j * x_dim1] - dl[i__] * x[i__ + 1 + j *
+ x_dim1];
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+ }
+ }
+ return 0;
+
+/* End of DLAGTM */
+
+} /* dlagtm_ */
diff --git a/contrib/libs/clapack/dlagts.c b/contrib/libs/clapack/dlagts.c
new file mode 100644
index 0000000000..91e1ba10a9
--- /dev/null
+++ b/contrib/libs/clapack/dlagts.c
@@ -0,0 +1,351 @@
+/* dlagts.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlagts_(integer *job, integer *n, doublereal *a,
+ doublereal *b, doublereal *c__, doublereal *d__, integer *in,
+ doublereal *y, doublereal *tol, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2, d__3, d__4, d__5;
+
+ /* Builtin functions */
+ double d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ integer k;
+ doublereal ak, eps, temp, pert, absak, sfmin;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAGTS may be used to solve one of the systems of equations */
+
+/* (T - lambda*I)*x = y or (T - lambda*I)'*x = y, */
+
+/* where T is an n by n tridiagonal matrix, for x, following the */
+/* factorization of (T - lambda*I) as */
+
+/* (T - lambda*I) = P*L*U , */
+
+/* by routine DLAGTF. The choice of equation to be solved is */
+/* controlled by the argument JOB, and in each case there is an option */
+/* to perturb zero or very small diagonal elements of U, this option */
+/* being intended for use in applications such as inverse iteration. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) INTEGER */
+/* Specifies the job to be performed by DLAGTS as follows: */
+/* = 1: The equations (T - lambda*I)x = y are to be solved, */
+/* but diagonal elements of U are not to be perturbed. */
+/* = -1: The equations (T - lambda*I)x = y are to be solved */
+/* and, if overflow would otherwise occur, the diagonal */
+/* elements of U are to be perturbed. See argument TOL */
+/* below. */
+/* = 2: The equations (T - lambda*I)'x = y are to be solved, */
+/* but diagonal elements of U are not to be perturbed. */
+/* = -2: The equations (T - lambda*I)'x = y are to be solved */
+/* and, if overflow would otherwise occur, the diagonal */
+/* elements of U are to be perturbed. See argument TOL */
+/* below. */
+
+/* N (input) INTEGER */
+/* The order of the matrix T. */
+
+/* A (input) DOUBLE PRECISION array, dimension (N) */
+/* On entry, A must contain the diagonal elements of U as */
+/* returned from DLAGTF. */
+
+/* B (input) DOUBLE PRECISION array, dimension (N-1) */
+/* On entry, B must contain the first super-diagonal elements of */
+/* U as returned from DLAGTF. */
+
+/* C (input) DOUBLE PRECISION array, dimension (N-1) */
+/* On entry, C must contain the sub-diagonal elements of L as */
+/* returned from DLAGTF. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N-2) */
+/* On entry, D must contain the second super-diagonal elements */
+/* of U as returned from DLAGTF. */
+
+/* IN (input) INTEGER array, dimension (N) */
+/* On entry, IN must contain details of the matrix P as returned */
+/* from DLAGTF. */
+
+/* Y (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the right hand side vector y. */
+/* On exit, Y is overwritten by the solution vector x. */
+
+/* TOL (input/output) DOUBLE PRECISION */
+/* On entry, with JOB .lt. 0, TOL should be the minimum */
+/* perturbation to be made to very small diagonal elements of U. */
+/* TOL should normally be chosen as about eps*norm(U), where eps */
+/* is the relative machine precision, but if TOL is supplied as */
+/* non-positive, then it is reset to eps*max( abs( u(i,j) ) ). */
+/* If JOB .gt. 0 then TOL is not referenced. */
+
+/* On exit, TOL is changed as described above, only if TOL is */
+/* non-positive on entry. Otherwise TOL is unchanged. */
+
+/* INFO (output) INTEGER */
+/* = 0 : successful exit */
+/* .lt. 0: if INFO = -i, the i-th argument had an illegal value */
+/* .gt. 0: overflow would occur when computing the INFO(th) */
+/* element of the solution vector x. This can only occur */
+/* when JOB is supplied as positive and either means */
+/* that a diagonal element of U is very small, or that */
+/* the elements of the right-hand side vector y are very */
+/* large. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --y;
+ --in;
+ --d__;
+ --c__;
+ --b;
+ --a;
+
+ /* Function Body */
+ *info = 0;
+ if (abs(*job) > 2 || *job == 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAGTS", &i__1);
+ return 0;
+ }
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ eps = dlamch_("Epsilon");
+ sfmin = dlamch_("Safe minimum");
+ bignum = 1. / sfmin;
+
+ if (*job < 0) {
+ if (*tol <= 0.) {
+ *tol = abs(a[1]);
+ if (*n > 1) {
+/* Computing MAX */
+ d__1 = *tol, d__2 = abs(a[2]), d__1 = max(d__1,d__2), d__2 =
+ abs(b[1]);
+ *tol = max(d__1,d__2);
+ }
+ i__1 = *n;
+ for (k = 3; k <= i__1; ++k) {
+/* Computing MAX */
+ d__4 = *tol, d__5 = (d__1 = a[k], abs(d__1)), d__4 = max(d__4,
+ d__5), d__5 = (d__2 = b[k - 1], abs(d__2)), d__4 =
+ max(d__4,d__5), d__5 = (d__3 = d__[k - 2], abs(d__3));
+ *tol = max(d__4,d__5);
+/* L10: */
+ }
+ *tol *= eps;
+ if (*tol == 0.) {
+ *tol = eps;
+ }
+ }
+ }
+
+ if (abs(*job) == 1) {
+ i__1 = *n;
+ for (k = 2; k <= i__1; ++k) {
+ if (in[k - 1] == 0) {
+ y[k] -= c__[k - 1] * y[k - 1];
+ } else {
+ temp = y[k - 1];
+ y[k - 1] = y[k];
+ y[k] = temp - c__[k - 1] * y[k];
+ }
+/* L20: */
+ }
+ if (*job == 1) {
+ for (k = *n; k >= 1; --k) {
+ if (k <= *n - 2) {
+ temp = y[k] - b[k] * y[k + 1] - d__[k] * y[k + 2];
+ } else if (k == *n - 1) {
+ temp = y[k] - b[k] * y[k + 1];
+ } else {
+ temp = y[k];
+ }
+ ak = a[k];
+ absak = abs(ak);
+ if (absak < 1.) {
+ if (absak < sfmin) {
+ if (absak == 0. || abs(temp) * sfmin > absak) {
+ *info = k;
+ return 0;
+ } else {
+ temp *= bignum;
+ ak *= bignum;
+ }
+ } else if (abs(temp) > absak * bignum) {
+ *info = k;
+ return 0;
+ }
+ }
+ y[k] = temp / ak;
+/* L30: */
+ }
+ } else {
+ for (k = *n; k >= 1; --k) {
+ if (k <= *n - 2) {
+ temp = y[k] - b[k] * y[k + 1] - d__[k] * y[k + 2];
+ } else if (k == *n - 1) {
+ temp = y[k] - b[k] * y[k + 1];
+ } else {
+ temp = y[k];
+ }
+ ak = a[k];
+ pert = d_sign(tol, &ak);
+L40:
+ absak = abs(ak);
+ if (absak < 1.) {
+ if (absak < sfmin) {
+ if (absak == 0. || abs(temp) * sfmin > absak) {
+ ak += pert;
+ pert *= 2;
+ goto L40;
+ } else {
+ temp *= bignum;
+ ak *= bignum;
+ }
+ } else if (abs(temp) > absak * bignum) {
+ ak += pert;
+ pert *= 2;
+ goto L40;
+ }
+ }
+ y[k] = temp / ak;
+/* L50: */
+ }
+ }
+ } else {
+
+/* Come to here if JOB = 2 or -2 */
+
+ if (*job == 2) {
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (k >= 3) {
+ temp = y[k] - b[k - 1] * y[k - 1] - d__[k - 2] * y[k - 2];
+ } else if (k == 2) {
+ temp = y[k] - b[k - 1] * y[k - 1];
+ } else {
+ temp = y[k];
+ }
+ ak = a[k];
+ absak = abs(ak);
+ if (absak < 1.) {
+ if (absak < sfmin) {
+ if (absak == 0. || abs(temp) * sfmin > absak) {
+ *info = k;
+ return 0;
+ } else {
+ temp *= bignum;
+ ak *= bignum;
+ }
+ } else if (abs(temp) > absak * bignum) {
+ *info = k;
+ return 0;
+ }
+ }
+ y[k] = temp / ak;
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (k >= 3) {
+ temp = y[k] - b[k - 1] * y[k - 1] - d__[k - 2] * y[k - 2];
+ } else if (k == 2) {
+ temp = y[k] - b[k - 1] * y[k - 1];
+ } else {
+ temp = y[k];
+ }
+ ak = a[k];
+ pert = d_sign(tol, &ak);
+L70:
+ absak = abs(ak);
+ if (absak < 1.) {
+ if (absak < sfmin) {
+ if (absak == 0. || abs(temp) * sfmin > absak) {
+ ak += pert;
+ pert *= 2;
+ goto L70;
+ } else {
+ temp *= bignum;
+ ak *= bignum;
+ }
+ } else if (abs(temp) > absak * bignum) {
+ ak += pert;
+ pert *= 2;
+ goto L70;
+ }
+ }
+ y[k] = temp / ak;
+/* L80: */
+ }
+ }
+
+ for (k = *n; k >= 2; --k) {
+ if (in[k - 1] == 0) {
+ y[k - 1] -= c__[k - 1] * y[k];
+ } else {
+ temp = y[k - 1];
+ y[k - 1] = y[k];
+ y[k] = temp - c__[k - 1] * y[k];
+ }
+/* L90: */
+ }
+ }
+
+/* End of DLAGTS */
+
+ return 0;
+} /* dlagts_ */
diff --git a/contrib/libs/clapack/dlagv2.c b/contrib/libs/clapack/dlagv2.c
new file mode 100644
index 0000000000..634a8887a2
--- /dev/null
+++ b/contrib/libs/clapack/dlagv2.c
@@ -0,0 +1,351 @@
+/* dlagv2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__2 = 2;
+static integer c__1 = 1;
+
+/* Subroutine */ int dlagv2_(doublereal *a, integer *lda, doublereal *b,
+ integer *ldb, doublereal *alphar, doublereal *alphai, doublereal *
+ beta, doublereal *csl, doublereal *snl, doublereal *csr, doublereal *
+ snr)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset;
+ doublereal d__1, d__2, d__3, d__4, d__5, d__6;
+
+ /* Local variables */
+ doublereal r__, t, h1, h2, h3, wi, qq, rr, wr1, wr2, ulp;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *), dlag2_(
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *);
+ doublereal anorm, bnorm, scale1, scale2;
+ extern /* Subroutine */ int dlasv2_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *);
+ extern doublereal dlapy2_(doublereal *, doublereal *);
+ doublereal ascale, bscale;
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int dlartg_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAGV2 computes the Generalized Schur factorization of a real 2-by-2 */
+/* matrix pencil (A,B) where B is upper triangular. This routine */
+/* computes orthogonal (rotation) matrices given by CSL, SNL and CSR, */
+/* SNR such that */
+
+/* 1) if the pencil (A,B) has two real eigenvalues (include 0/0 or 1/0 */
+/* types), then */
+
+/* [ a11 a12 ] := [ CSL SNL ] [ a11 a12 ] [ CSR -SNR ] */
+/* [ 0 a22 ] [ -SNL CSL ] [ a21 a22 ] [ SNR CSR ] */
+
+/* [ b11 b12 ] := [ CSL SNL ] [ b11 b12 ] [ CSR -SNR ] */
+/* [ 0 b22 ] [ -SNL CSL ] [ 0 b22 ] [ SNR CSR ], */
+
+/* 2) if the pencil (A,B) has a pair of complex conjugate eigenvalues, */
+/* then */
+
+/* [ a11 a12 ] := [ CSL SNL ] [ a11 a12 ] [ CSR -SNR ] */
+/* [ a21 a22 ] [ -SNL CSL ] [ a21 a22 ] [ SNR CSR ] */
+
+/* [ b11 0 ] := [ CSL SNL ] [ b11 b12 ] [ CSR -SNR ] */
+/* [ 0 b22 ] [ -SNL CSL ] [ 0 b22 ] [ SNR CSR ] */
+
+/* where b11 >= b22 > 0. */
+
+
+/* Arguments */
+/* ========= */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA, 2) */
+/* On entry, the 2 x 2 matrix A. */
+/* On exit, A is overwritten by the ``A-part'' of the */
+/* generalized Schur form. */
+
+/* LDA (input) INTEGER */
+/* THe leading dimension of the array A. LDA >= 2. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB, 2) */
+/* On entry, the upper triangular 2 x 2 matrix B. */
+/* On exit, B is overwritten by the ``B-part'' of the */
+/* generalized Schur form. */
+
+/* LDB (input) INTEGER */
+/* THe leading dimension of the array B. LDB >= 2. */
+
+/* ALPHAR (output) DOUBLE PRECISION array, dimension (2) */
+/* ALPHAI (output) DOUBLE PRECISION array, dimension (2) */
+/* BETA (output) DOUBLE PRECISION array, dimension (2) */
+/* (ALPHAR(k)+i*ALPHAI(k))/BETA(k) are the eigenvalues of the */
+/* pencil (A,B), k=1,2, i = sqrt(-1). Note that BETA(k) may */
+/* be zero. */
+
+/* CSL (output) DOUBLE PRECISION */
+/* The cosine of the left rotation matrix. */
+
+/* SNL (output) DOUBLE PRECISION */
+/* The sine of the left rotation matrix. */
+
+/* CSR (output) DOUBLE PRECISION */
+/* The cosine of the right rotation matrix. */
+
+/* SNR (output) DOUBLE PRECISION */
+/* The sine of the right rotation matrix. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alphar;
+ --alphai;
+ --beta;
+
+ /* Function Body */
+ safmin = dlamch_("S");
+ ulp = dlamch_("P");
+
+/* Scale A */
+
+/* Computing MAX */
+ d__5 = (d__1 = a[a_dim1 + 1], abs(d__1)) + (d__2 = a[a_dim1 + 2], abs(
+ d__2)), d__6 = (d__3 = a[(a_dim1 << 1) + 1], abs(d__3)) + (d__4 =
+ a[(a_dim1 << 1) + 2], abs(d__4)), d__5 = max(d__5,d__6);
+ anorm = max(d__5,safmin);
+ ascale = 1. / anorm;
+ a[a_dim1 + 1] = ascale * a[a_dim1 + 1];
+ a[(a_dim1 << 1) + 1] = ascale * a[(a_dim1 << 1) + 1];
+ a[a_dim1 + 2] = ascale * a[a_dim1 + 2];
+ a[(a_dim1 << 1) + 2] = ascale * a[(a_dim1 << 1) + 2];
+
+/* Scale B */
+
+/* Computing MAX */
+ d__4 = (d__3 = b[b_dim1 + 1], abs(d__3)), d__5 = (d__1 = b[(b_dim1 << 1)
+ + 1], abs(d__1)) + (d__2 = b[(b_dim1 << 1) + 2], abs(d__2)), d__4
+ = max(d__4,d__5);
+ bnorm = max(d__4,safmin);
+ bscale = 1. / bnorm;
+ b[b_dim1 + 1] = bscale * b[b_dim1 + 1];
+ b[(b_dim1 << 1) + 1] = bscale * b[(b_dim1 << 1) + 1];
+ b[(b_dim1 << 1) + 2] = bscale * b[(b_dim1 << 1) + 2];
+
+/* Check if A can be deflated */
+
+ if ((d__1 = a[a_dim1 + 2], abs(d__1)) <= ulp) {
+ *csl = 1.;
+ *snl = 0.;
+ *csr = 1.;
+ *snr = 0.;
+ a[a_dim1 + 2] = 0.;
+ b[b_dim1 + 2] = 0.;
+
+/* Check if B is singular */
+
+ } else if ((d__1 = b[b_dim1 + 1], abs(d__1)) <= ulp) {
+ dlartg_(&a[a_dim1 + 1], &a[a_dim1 + 2], csl, snl, &r__);
+ *csr = 1.;
+ *snr = 0.;
+ drot_(&c__2, &a[a_dim1 + 1], lda, &a[a_dim1 + 2], lda, csl, snl);
+ drot_(&c__2, &b[b_dim1 + 1], ldb, &b[b_dim1 + 2], ldb, csl, snl);
+ a[a_dim1 + 2] = 0.;
+ b[b_dim1 + 1] = 0.;
+ b[b_dim1 + 2] = 0.;
+
+ } else if ((d__1 = b[(b_dim1 << 1) + 2], abs(d__1)) <= ulp) {
+ dlartg_(&a[(a_dim1 << 1) + 2], &a[a_dim1 + 2], csr, snr, &t);
+ *snr = -(*snr);
+ drot_(&c__2, &a[a_dim1 + 1], &c__1, &a[(a_dim1 << 1) + 1], &c__1, csr,
+ snr);
+ drot_(&c__2, &b[b_dim1 + 1], &c__1, &b[(b_dim1 << 1) + 1], &c__1, csr,
+ snr);
+ *csl = 1.;
+ *snl = 0.;
+ a[a_dim1 + 2] = 0.;
+ b[b_dim1 + 2] = 0.;
+ b[(b_dim1 << 1) + 2] = 0.;
+
+ } else {
+
+/* B is nonsingular, first compute the eigenvalues of (A,B) */
+
+ dlag2_(&a[a_offset], lda, &b[b_offset], ldb, &safmin, &scale1, &
+ scale2, &wr1, &wr2, &wi);
+
+ if (wi == 0.) {
+
+/* two real eigenvalues, compute s*A-w*B */
+
+ h1 = scale1 * a[a_dim1 + 1] - wr1 * b[b_dim1 + 1];
+ h2 = scale1 * a[(a_dim1 << 1) + 1] - wr1 * b[(b_dim1 << 1) + 1];
+ h3 = scale1 * a[(a_dim1 << 1) + 2] - wr1 * b[(b_dim1 << 1) + 2];
+
+ rr = dlapy2_(&h1, &h2);
+ d__1 = scale1 * a[a_dim1 + 2];
+ qq = dlapy2_(&d__1, &h3);
+
+ if (rr > qq) {
+
+/* find right rotation matrix to zero 1,1 element of */
+/* (sA - wB) */
+
+ dlartg_(&h2, &h1, csr, snr, &t);
+
+ } else {
+
+/* find right rotation matrix to zero 2,1 element of */
+/* (sA - wB) */
+
+ d__1 = scale1 * a[a_dim1 + 2];
+ dlartg_(&h3, &d__1, csr, snr, &t);
+
+ }
+
+ *snr = -(*snr);
+ drot_(&c__2, &a[a_dim1 + 1], &c__1, &a[(a_dim1 << 1) + 1], &c__1,
+ csr, snr);
+ drot_(&c__2, &b[b_dim1 + 1], &c__1, &b[(b_dim1 << 1) + 1], &c__1,
+ csr, snr);
+
+/* compute inf norms of A and B */
+
+/* Computing MAX */
+ d__5 = (d__1 = a[a_dim1 + 1], abs(d__1)) + (d__2 = a[(a_dim1 << 1)
+ + 1], abs(d__2)), d__6 = (d__3 = a[a_dim1 + 2], abs(d__3)
+ ) + (d__4 = a[(a_dim1 << 1) + 2], abs(d__4));
+ h1 = max(d__5,d__6);
+/* Computing MAX */
+ d__5 = (d__1 = b[b_dim1 + 1], abs(d__1)) + (d__2 = b[(b_dim1 << 1)
+ + 1], abs(d__2)), d__6 = (d__3 = b[b_dim1 + 2], abs(d__3)
+ ) + (d__4 = b[(b_dim1 << 1) + 2], abs(d__4));
+ h2 = max(d__5,d__6);
+
+ if (scale1 * h1 >= abs(wr1) * h2) {
+
+/* find left rotation matrix Q to zero out B(2,1) */
+
+ dlartg_(&b[b_dim1 + 1], &b[b_dim1 + 2], csl, snl, &r__);
+
+ } else {
+
+/* find left rotation matrix Q to zero out A(2,1) */
+
+ dlartg_(&a[a_dim1 + 1], &a[a_dim1 + 2], csl, snl, &r__);
+
+ }
+
+ drot_(&c__2, &a[a_dim1 + 1], lda, &a[a_dim1 + 2], lda, csl, snl);
+ drot_(&c__2, &b[b_dim1 + 1], ldb, &b[b_dim1 + 2], ldb, csl, snl);
+
+ a[a_dim1 + 2] = 0.;
+ b[b_dim1 + 2] = 0.;
+
+ } else {
+
+/* a pair of complex conjugate eigenvalues */
+/* first compute the SVD of the matrix B */
+
+ dlasv2_(&b[b_dim1 + 1], &b[(b_dim1 << 1) + 1], &b[(b_dim1 << 1) +
+ 2], &r__, &t, snr, csr, snl, csl);
+
+/* Form (A,B) := Q(A,B)Z' where Q is left rotation matrix and */
+/* Z is right rotation matrix computed from DLASV2 */
+
+ drot_(&c__2, &a[a_dim1 + 1], lda, &a[a_dim1 + 2], lda, csl, snl);
+ drot_(&c__2, &b[b_dim1 + 1], ldb, &b[b_dim1 + 2], ldb, csl, snl);
+ drot_(&c__2, &a[a_dim1 + 1], &c__1, &a[(a_dim1 << 1) + 1], &c__1,
+ csr, snr);
+ drot_(&c__2, &b[b_dim1 + 1], &c__1, &b[(b_dim1 << 1) + 1], &c__1,
+ csr, snr);
+
+ b[b_dim1 + 2] = 0.;
+ b[(b_dim1 << 1) + 1] = 0.;
+
+ }
+
+ }
+
+/* Unscaling */
+
+ a[a_dim1 + 1] = anorm * a[a_dim1 + 1];
+ a[a_dim1 + 2] = anorm * a[a_dim1 + 2];
+ a[(a_dim1 << 1) + 1] = anorm * a[(a_dim1 << 1) + 1];
+ a[(a_dim1 << 1) + 2] = anorm * a[(a_dim1 << 1) + 2];
+ b[b_dim1 + 1] = bnorm * b[b_dim1 + 1];
+ b[b_dim1 + 2] = bnorm * b[b_dim1 + 2];
+ b[(b_dim1 << 1) + 1] = bnorm * b[(b_dim1 << 1) + 1];
+ b[(b_dim1 << 1) + 2] = bnorm * b[(b_dim1 << 1) + 2];
+
+ if (wi == 0.) {
+ alphar[1] = a[a_dim1 + 1];
+ alphar[2] = a[(a_dim1 << 1) + 2];
+ alphai[1] = 0.;
+ alphai[2] = 0.;
+ beta[1] = b[b_dim1 + 1];
+ beta[2] = b[(b_dim1 << 1) + 2];
+ } else {
+ alphar[1] = anorm * wr1 / scale1 / bnorm;
+ alphai[1] = anorm * wi / scale1 / bnorm;
+ alphar[2] = alphar[1];
+ alphai[2] = -alphai[1];
+ beta[1] = 1.;
+ beta[2] = 1.;
+ }
+
+ return 0;
+
+/* End of DLAGV2 */
+
+} /* dlagv2_ */
diff --git a/contrib/libs/clapack/dlahqr.c b/contrib/libs/clapack/dlahqr.c
new file mode 100644
index 0000000000..555f5841d8
--- /dev/null
+++ b/contrib/libs/clapack/dlahqr.c
@@ -0,0 +1,631 @@
+/* dlahqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dlahqr_(logical *wantt, logical *wantz, integer *n,
+ integer *ilo, integer *ihi, doublereal *h__, integer *ldh, doublereal
+ *wr, doublereal *wi, integer *iloz, integer *ihiz, doublereal *z__,
+ integer *ldz, integer *info)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, l, m;
+ doublereal s, v[3];
+ integer i1, i2;
+ doublereal t1, t2, t3, v2, v3, aa, ab, ba, bb, h11, h12, h21, h22, cs;
+ integer nh;
+ doublereal sn;
+ integer nr;
+ doublereal tr;
+ integer nz;
+ doublereal det, h21s;
+ integer its;
+ doublereal ulp, sum, tst, rt1i, rt2i, rt1r, rt2r;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *), dcopy_(
+ integer *, doublereal *, integer *, doublereal *, integer *),
+ dlanv2_(doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *), dlabad_(doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dlarfg_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *);
+ doublereal safmin, safmax, rtdisc, smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAHQR is an auxiliary routine called by DHSEQR to update the */
+/* eigenvalues and Schur decomposition already computed by DHSEQR, by */
+/* dealing with the Hessenberg submatrix in rows and columns ILO to */
+/* IHI. */
+
+/* Arguments */
+/* ========= */
+
+/* WANTT (input) LOGICAL */
+/* = .TRUE. : the full Schur form T is required; */
+/* = .FALSE.: only eigenvalues are required. */
+
+/* WANTZ (input) LOGICAL */
+/* = .TRUE. : the matrix of Schur vectors Z is required; */
+/* = .FALSE.: Schur vectors are not required. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* It is assumed that H is already upper quasi-triangular in */
+/* rows and columns IHI+1:N, and that H(ILO,ILO-1) = 0 (unless */
+/* ILO = 1). DLAHQR works primarily with the Hessenberg */
+/* submatrix in rows and columns ILO to IHI, but applies */
+/* transformations to all of H if WANTT is .TRUE.. */
+/* 1 <= ILO <= max(1,IHI); IHI <= N. */
+
+/* H (input/output) DOUBLE PRECISION array, dimension (LDH,N) */
+/* On entry, the upper Hessenberg matrix H. */
+/* On exit, if INFO is zero and if WANTT is .TRUE., H is upper */
+/* quasi-triangular in rows and columns ILO:IHI, with any */
+/* 2-by-2 diagonal blocks in standard form. If INFO is zero */
+/* and WANTT is .FALSE., the contents of H are unspecified on */
+/* exit. The output state of H if INFO is nonzero is given */
+/* below under the description of INFO. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH >= max(1,N). */
+
+/* WR (output) DOUBLE PRECISION array, dimension (N) */
+/* WI (output) DOUBLE PRECISION array, dimension (N) */
+/* The real and imaginary parts, respectively, of the computed */
+/* eigenvalues ILO to IHI are stored in the corresponding */
+/* elements of WR and WI. If two eigenvalues are computed as a */
+/* complex conjugate pair, they are stored in consecutive */
+/* elements of WR and WI, say the i-th and (i+1)th, with */
+/* WI(i) > 0 and WI(i+1) < 0. If WANTT is .TRUE., the */
+/* eigenvalues are stored in the same order as on the diagonal */
+/* of the Schur form returned in H, with WR(i) = H(i,i), and, if */
+/* H(i:i+1,i:i+1) is a 2-by-2 diagonal block, */
+/* WI(i) = sqrt(H(i+1,i)*H(i,i+1)) and WI(i+1) = -WI(i). */
+
+/* ILOZ (input) INTEGER */
+/* IHIZ (input) INTEGER */
+/* Specify the rows of Z to which transformations must be */
+/* applied if WANTZ is .TRUE.. */
+/* 1 <= ILOZ <= ILO; IHI <= IHIZ <= N. */
+
+/* Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N) */
+/* If WANTZ is .TRUE., on entry Z must contain the current */
+/* matrix Z of transformations accumulated by DHSEQR, and on */
+/* exit Z has been updated; transformations are applied only to */
+/* the submatrix Z(ILOZ:IHIZ,ILO:IHI). */
+/* If WANTZ is .FALSE., Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* .GT. 0: If INFO = i, DLAHQR failed to compute all the */
+/* eigenvalues ILO to IHI in a total of 30 iterations */
+/* per eigenvalue; elements i+1:ihi of WR and WI */
+/* contain those eigenvalues which have been */
+/* successfully computed. */
+
+/* If INFO .GT. 0 and WANTT is .FALSE., then on exit, */
+/* the remaining unconverged eigenvalues are the */
+/* eigenvalues of the upper Hessenberg matrix rows */
+/* and columns ILO thorugh INFO of the final, output */
+/* value of H. */
+
+/* If INFO .GT. 0 and WANTT is .TRUE., then on exit */
+/* (*) (initial value of H)*U = U*(final value of H) */
+/* where U is an orthognal matrix. The final */
+/* value of H is upper Hessenberg and triangular in */
+/* rows and columns INFO+1 through IHI. */
+
+/* If INFO .GT. 0 and WANTZ is .TRUE., then on exit */
+/* (final value of Z) = (initial value of Z)*U */
+/* where U is the orthogonal matrix in (*) */
+/* (regardless of the value of WANTT.) */
+
+/* Further Details */
+/* =============== */
+
+/* 02-96 Based on modifications by */
+/* David Day, Sandia National Laboratory, USA */
+
+/* 12-04 Further modifications by */
+/* Ralph Byers, University of Kansas, USA */
+/* This is a modified version of DLAHQR from LAPACK version 3.0. */
+/* It is (1) more robust against overflow and underflow and */
+/* (2) adopts the more conservative Ahues & Tisseur stopping */
+/* criterion (LAWN 122, 1997). */
+
+/* ========================================================= */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --wr;
+ --wi;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+
+ /* Function Body */
+ *info = 0;
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*ilo == *ihi) {
+ wr[*ilo] = h__[*ilo + *ilo * h_dim1];
+ wi[*ilo] = 0.;
+ return 0;
+ }
+
+/* ==== clear out the trash ==== */
+ i__1 = *ihi - 3;
+ for (j = *ilo; j <= i__1; ++j) {
+ h__[j + 2 + j * h_dim1] = 0.;
+ h__[j + 3 + j * h_dim1] = 0.;
+/* L10: */
+ }
+ if (*ilo <= *ihi - 2) {
+ h__[*ihi + (*ihi - 2) * h_dim1] = 0.;
+ }
+
+ nh = *ihi - *ilo + 1;
+ nz = *ihiz - *iloz + 1;
+
+/* Set machine-dependent constants for the stopping criterion. */
+
+ safmin = dlamch_("SAFE MINIMUM");
+ safmax = 1. / safmin;
+ dlabad_(&safmin, &safmax);
+ ulp = dlamch_("PRECISION");
+ smlnum = safmin * ((doublereal) nh / ulp);
+
+/* I1 and I2 are the indices of the first row and last column of H */
+/* to which transformations must be applied. If eigenvalues only are */
+/* being computed, I1 and I2 are set inside the main loop. */
+
+ if (*wantt) {
+ i1 = 1;
+ i2 = *n;
+ }
+
+/* The main loop begins here. I is the loop index and decreases from */
+/* IHI to ILO in steps of 1 or 2. Each iteration of the loop works */
+/* with the active submatrix in rows and columns L to I. */
+/* Eigenvalues I+1 to IHI have already converged. Either L = ILO or */
+/* H(L,L-1) is negligible so that the matrix splits. */
+
+ i__ = *ihi;
+L20:
+ l = *ilo;
+ if (i__ < *ilo) {
+ goto L160;
+ }
+
+/* Perform QR iterations on rows and columns ILO to I until a */
+/* submatrix of order 1 or 2 splits off at the bottom because a */
+/* subdiagonal element has become negligible. */
+
+ for (its = 0; its <= 30; ++its) {
+
+/* Look for a single small subdiagonal element. */
+
+ i__1 = l + 1;
+ for (k = i__; k >= i__1; --k) {
+ if ((d__1 = h__[k + (k - 1) * h_dim1], abs(d__1)) <= smlnum) {
+ goto L40;
+ }
+ tst = (d__1 = h__[k - 1 + (k - 1) * h_dim1], abs(d__1)) + (d__2 =
+ h__[k + k * h_dim1], abs(d__2));
+ if (tst == 0.) {
+ if (k - 2 >= *ilo) {
+ tst += (d__1 = h__[k - 1 + (k - 2) * h_dim1], abs(d__1));
+ }
+ if (k + 1 <= *ihi) {
+ tst += (d__1 = h__[k + 1 + k * h_dim1], abs(d__1));
+ }
+ }
+/* ==== The following is a conservative small subdiagonal */
+/* . deflation criterion due to Ahues & Tisseur (LAWN 122, */
+/* . 1997). It has better mathematical foundation and */
+/* . improves accuracy in some cases. ==== */
+ if ((d__1 = h__[k + (k - 1) * h_dim1], abs(d__1)) <= ulp * tst) {
+/* Computing MAX */
+ d__3 = (d__1 = h__[k + (k - 1) * h_dim1], abs(d__1)), d__4 = (
+ d__2 = h__[k - 1 + k * h_dim1], abs(d__2));
+ ab = max(d__3,d__4);
+/* Computing MIN */
+ d__3 = (d__1 = h__[k + (k - 1) * h_dim1], abs(d__1)), d__4 = (
+ d__2 = h__[k - 1 + k * h_dim1], abs(d__2));
+ ba = min(d__3,d__4);
+/* Computing MAX */
+ d__3 = (d__1 = h__[k + k * h_dim1], abs(d__1)), d__4 = (d__2 =
+ h__[k - 1 + (k - 1) * h_dim1] - h__[k + k * h_dim1],
+ abs(d__2));
+ aa = max(d__3,d__4);
+/* Computing MIN */
+ d__3 = (d__1 = h__[k + k * h_dim1], abs(d__1)), d__4 = (d__2 =
+ h__[k - 1 + (k - 1) * h_dim1] - h__[k + k * h_dim1],
+ abs(d__2));
+ bb = min(d__3,d__4);
+ s = aa + ab;
+/* Computing MAX */
+ d__1 = smlnum, d__2 = ulp * (bb * (aa / s));
+ if (ba * (ab / s) <= max(d__1,d__2)) {
+ goto L40;
+ }
+ }
+/* L30: */
+ }
+L40:
+ l = k;
+ if (l > *ilo) {
+
+/* H(L,L-1) is negligible */
+
+ h__[l + (l - 1) * h_dim1] = 0.;
+ }
+
+/* Exit from loop if a submatrix of order 1 or 2 has split off. */
+
+ if (l >= i__ - 1) {
+ goto L150;
+ }
+
+/* Now the active submatrix is in rows and columns L to I. If */
+/* eigenvalues only are being computed, only the active submatrix */
+/* need be transformed. */
+
+ if (! (*wantt)) {
+ i1 = l;
+ i2 = i__;
+ }
+
+ if (its == 10) {
+
+/* Exceptional shift. */
+
+ s = (d__1 = h__[l + 1 + l * h_dim1], abs(d__1)) + (d__2 = h__[l +
+ 2 + (l + 1) * h_dim1], abs(d__2));
+ h11 = s * .75 + h__[l + l * h_dim1];
+ h12 = s * -.4375;
+ h21 = s;
+ h22 = h11;
+ } else if (its == 20) {
+
+/* Exceptional shift. */
+
+ s = (d__1 = h__[i__ + (i__ - 1) * h_dim1], abs(d__1)) + (d__2 =
+ h__[i__ - 1 + (i__ - 2) * h_dim1], abs(d__2));
+ h11 = s * .75 + h__[i__ + i__ * h_dim1];
+ h12 = s * -.4375;
+ h21 = s;
+ h22 = h11;
+ } else {
+
+/* Prepare to use Francis' double shift */
+/* (i.e. 2nd degree generalized Rayleigh quotient) */
+
+ h11 = h__[i__ - 1 + (i__ - 1) * h_dim1];
+ h21 = h__[i__ + (i__ - 1) * h_dim1];
+ h12 = h__[i__ - 1 + i__ * h_dim1];
+ h22 = h__[i__ + i__ * h_dim1];
+ }
+ s = abs(h11) + abs(h12) + abs(h21) + abs(h22);
+ if (s == 0.) {
+ rt1r = 0.;
+ rt1i = 0.;
+ rt2r = 0.;
+ rt2i = 0.;
+ } else {
+ h11 /= s;
+ h21 /= s;
+ h12 /= s;
+ h22 /= s;
+ tr = (h11 + h22) / 2.;
+ det = (h11 - tr) * (h22 - tr) - h12 * h21;
+ rtdisc = sqrt((abs(det)));
+ if (det >= 0.) {
+
+/* ==== complex conjugate shifts ==== */
+
+ rt1r = tr * s;
+ rt2r = rt1r;
+ rt1i = rtdisc * s;
+ rt2i = -rt1i;
+ } else {
+
+/* ==== real shifts (use only one of them) ==== */
+
+ rt1r = tr + rtdisc;
+ rt2r = tr - rtdisc;
+ if ((d__1 = rt1r - h22, abs(d__1)) <= (d__2 = rt2r - h22, abs(
+ d__2))) {
+ rt1r *= s;
+ rt2r = rt1r;
+ } else {
+ rt2r *= s;
+ rt1r = rt2r;
+ }
+ rt1i = 0.;
+ rt2i = 0.;
+ }
+ }
+
+/* Look for two consecutive small subdiagonal elements. */
+
+ i__1 = l;
+ for (m = i__ - 2; m >= i__1; --m) {
+/* Determine the effect of starting the double-shift QR */
+/* iteration at row M, and see if this would make H(M,M-1) */
+/* negligible. (The following uses scaling to avoid */
+/* overflows and most underflows.) */
+
+ h21s = h__[m + 1 + m * h_dim1];
+ s = (d__1 = h__[m + m * h_dim1] - rt2r, abs(d__1)) + abs(rt2i) +
+ abs(h21s);
+ h21s = h__[m + 1 + m * h_dim1] / s;
+ v[0] = h21s * h__[m + (m + 1) * h_dim1] + (h__[m + m * h_dim1] -
+ rt1r) * ((h__[m + m * h_dim1] - rt2r) / s) - rt1i * (rt2i
+ / s);
+ v[1] = h21s * (h__[m + m * h_dim1] + h__[m + 1 + (m + 1) * h_dim1]
+ - rt1r - rt2r);
+ v[2] = h21s * h__[m + 2 + (m + 1) * h_dim1];
+ s = abs(v[0]) + abs(v[1]) + abs(v[2]);
+ v[0] /= s;
+ v[1] /= s;
+ v[2] /= s;
+ if (m == l) {
+ goto L60;
+ }
+ if ((d__1 = h__[m + (m - 1) * h_dim1], abs(d__1)) * (abs(v[1]) +
+ abs(v[2])) <= ulp * abs(v[0]) * ((d__2 = h__[m - 1 + (m -
+ 1) * h_dim1], abs(d__2)) + (d__3 = h__[m + m * h_dim1],
+ abs(d__3)) + (d__4 = h__[m + 1 + (m + 1) * h_dim1], abs(
+ d__4)))) {
+ goto L60;
+ }
+/* L50: */
+ }
+L60:
+
+/* Double-shift QR step */
+
+ i__1 = i__ - 1;
+ for (k = m; k <= i__1; ++k) {
+
+/* The first iteration of this loop determines a reflection G */
+/* from the vector V and applies it from left and right to H, */
+/* thus creating a nonzero bulge below the subdiagonal. */
+
+/* Each subsequent iteration determines a reflection G to */
+/* restore the Hessenberg form in the (K-1)th column, and thus */
+/* chases the bulge one step toward the bottom of the active */
+/* submatrix. NR is the order of G. */
+
+/* Computing MIN */
+ i__2 = 3, i__3 = i__ - k + 1;
+ nr = min(i__2,i__3);
+ if (k > m) {
+ dcopy_(&nr, &h__[k + (k - 1) * h_dim1], &c__1, v, &c__1);
+ }
+ dlarfg_(&nr, v, &v[1], &c__1, &t1);
+ if (k > m) {
+ h__[k + (k - 1) * h_dim1] = v[0];
+ h__[k + 1 + (k - 1) * h_dim1] = 0.;
+ if (k < i__ - 1) {
+ h__[k + 2 + (k - 1) * h_dim1] = 0.;
+ }
+ } else if (m > l) {
+/* ==== Use the following instead of */
+/* . H( K, K-1 ) = -H( K, K-1 ) to */
+/* . avoid a bug when v(2) and v(3) */
+/* . underflow. ==== */
+ h__[k + (k - 1) * h_dim1] *= 1. - t1;
+ }
+ v2 = v[1];
+ t2 = t1 * v2;
+ if (nr == 3) {
+ v3 = v[2];
+ t3 = t1 * v3;
+
+/* Apply G from the left to transform the rows of the matrix */
+/* in columns K to I2. */
+
+ i__2 = i2;
+ for (j = k; j <= i__2; ++j) {
+ sum = h__[k + j * h_dim1] + v2 * h__[k + 1 + j * h_dim1]
+ + v3 * h__[k + 2 + j * h_dim1];
+ h__[k + j * h_dim1] -= sum * t1;
+ h__[k + 1 + j * h_dim1] -= sum * t2;
+ h__[k + 2 + j * h_dim1] -= sum * t3;
+/* L70: */
+ }
+
+/* Apply G from the right to transform the columns of the */
+/* matrix in rows I1 to min(K+3,I). */
+
+/* Computing MIN */
+ i__3 = k + 3;
+ i__2 = min(i__3,i__);
+ for (j = i1; j <= i__2; ++j) {
+ sum = h__[j + k * h_dim1] + v2 * h__[j + (k + 1) * h_dim1]
+ + v3 * h__[j + (k + 2) * h_dim1];
+ h__[j + k * h_dim1] -= sum * t1;
+ h__[j + (k + 1) * h_dim1] -= sum * t2;
+ h__[j + (k + 2) * h_dim1] -= sum * t3;
+/* L80: */
+ }
+
+ if (*wantz) {
+
+/* Accumulate transformations in the matrix Z */
+
+ i__2 = *ihiz;
+ for (j = *iloz; j <= i__2; ++j) {
+ sum = z__[j + k * z_dim1] + v2 * z__[j + (k + 1) *
+ z_dim1] + v3 * z__[j + (k + 2) * z_dim1];
+ z__[j + k * z_dim1] -= sum * t1;
+ z__[j + (k + 1) * z_dim1] -= sum * t2;
+ z__[j + (k + 2) * z_dim1] -= sum * t3;
+/* L90: */
+ }
+ }
+ } else if (nr == 2) {
+
+/* Apply G from the left to transform the rows of the matrix */
+/* in columns K to I2. */
+
+ i__2 = i2;
+ for (j = k; j <= i__2; ++j) {
+ sum = h__[k + j * h_dim1] + v2 * h__[k + 1 + j * h_dim1];
+ h__[k + j * h_dim1] -= sum * t1;
+ h__[k + 1 + j * h_dim1] -= sum * t2;
+/* L100: */
+ }
+
+/* Apply G from the right to transform the columns of the */
+/* matrix in rows I1 to min(K+3,I). */
+
+ i__2 = i__;
+ for (j = i1; j <= i__2; ++j) {
+ sum = h__[j + k * h_dim1] + v2 * h__[j + (k + 1) * h_dim1]
+ ;
+ h__[j + k * h_dim1] -= sum * t1;
+ h__[j + (k + 1) * h_dim1] -= sum * t2;
+/* L110: */
+ }
+
+ if (*wantz) {
+
+/* Accumulate transformations in the matrix Z */
+
+ i__2 = *ihiz;
+ for (j = *iloz; j <= i__2; ++j) {
+ sum = z__[j + k * z_dim1] + v2 * z__[j + (k + 1) *
+ z_dim1];
+ z__[j + k * z_dim1] -= sum * t1;
+ z__[j + (k + 1) * z_dim1] -= sum * t2;
+/* L120: */
+ }
+ }
+ }
+/* L130: */
+ }
+
+/* L140: */
+ }
+
+/* Failure to converge in remaining number of iterations */
+
+ *info = i__;
+ return 0;
+
+L150:
+
+ if (l == i__) {
+
+/* H(I,I-1) is negligible: one eigenvalue has converged. */
+
+ wr[i__] = h__[i__ + i__ * h_dim1];
+ wi[i__] = 0.;
+ } else if (l == i__ - 1) {
+
+/* H(I-1,I-2) is negligible: a pair of eigenvalues have converged. */
+
+/* Transform the 2-by-2 submatrix to standard Schur form, */
+/* and compute and store the eigenvalues. */
+
+ dlanv2_(&h__[i__ - 1 + (i__ - 1) * h_dim1], &h__[i__ - 1 + i__ *
+ h_dim1], &h__[i__ + (i__ - 1) * h_dim1], &h__[i__ + i__ *
+ h_dim1], &wr[i__ - 1], &wi[i__ - 1], &wr[i__], &wi[i__], &cs,
+ &sn);
+
+ if (*wantt) {
+
+/* Apply the transformation to the rest of H. */
+
+ if (i2 > i__) {
+ i__1 = i2 - i__;
+ drot_(&i__1, &h__[i__ - 1 + (i__ + 1) * h_dim1], ldh, &h__[
+ i__ + (i__ + 1) * h_dim1], ldh, &cs, &sn);
+ }
+ i__1 = i__ - i1 - 1;
+ drot_(&i__1, &h__[i1 + (i__ - 1) * h_dim1], &c__1, &h__[i1 + i__ *
+ h_dim1], &c__1, &cs, &sn);
+ }
+ if (*wantz) {
+
+/* Apply the transformation to Z. */
+
+ drot_(&nz, &z__[*iloz + (i__ - 1) * z_dim1], &c__1, &z__[*iloz +
+ i__ * z_dim1], &c__1, &cs, &sn);
+ }
+ }
+
+/* return to start of the main loop with new value of I. */
+
+ i__ = l - 1;
+ goto L20;
+
+L160:
+ return 0;
+
+/* End of DLAHQR */
+
+} /* dlahqr_ */
diff --git a/contrib/libs/clapack/dlahr2.c b/contrib/libs/clapack/dlahr2.c
new file mode 100644
index 0000000000..9a17c87f8b
--- /dev/null
+++ b/contrib/libs/clapack/dlahr2.c
@@ -0,0 +1,315 @@
+/* dlahr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b4 = -1.;
+static doublereal c_b5 = 1.;
+static integer c__1 = 1;
+static doublereal c_b38 = 0.;
+
+/* Subroutine */ int dlahr2_(integer *n, integer *k, integer *nb, doublereal *
+ a, integer *lda, doublereal *tau, doublereal *t, integer *ldt,
+ doublereal *y, integer *ldy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, t_dim1, t_offset, y_dim1, y_offset, i__1, i__2,
+ i__3;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__;
+ doublereal ei;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dgemm_(char *, char *, integer *, integer *, integer *
+, doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), dgemv_(
+ char *, integer *, integer *, doublereal *, doublereal *, integer
+ *, doublereal *, integer *, doublereal *, doublereal *, integer *), dcopy_(integer *, doublereal *, integer *, doublereal *,
+ integer *), dtrmm_(char *, char *, char *, char *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *), daxpy_(integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *),
+ dtrmv_(char *, char *, char *, integer *, doublereal *, integer *,
+ doublereal *, integer *), dlarfg_(
+ integer *, doublereal *, doublereal *, integer *, doublereal *),
+ dlacpy_(char *, integer *, integer *, doublereal *, integer *,
+ doublereal *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAHR2 reduces the first NB columns of A real general n-BY-(n-k+1) */
+/* matrix A so that elements below the k-th subdiagonal are zero. The */
+/* reduction is performed by an orthogonal similarity transformation */
+/* Q' * A * Q. The routine returns the matrices V and T which determine */
+/* Q as a block reflector I - V*T*V', and also the matrix Y = A * V * T. */
+
+/* This is an auxiliary routine called by DGEHRD. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* K (input) INTEGER */
+/* The offset for the reduction. Elements below the k-th */
+/* subdiagonal in the first NB columns are reduced to zero. */
+/* K < N. */
+
+/* NB (input) INTEGER */
+/* The number of columns to be reduced. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N-K+1) */
+/* On entry, the n-by-(n-k+1) general matrix A. */
+/* On exit, the elements on and above the k-th subdiagonal in */
+/* the first NB columns are overwritten with the corresponding */
+/* elements of the reduced matrix; the elements below the k-th */
+/* subdiagonal, with the array TAU, represent the matrix Q as a */
+/* product of elementary reflectors. The other columns of A are */
+/* unchanged. See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAU (output) DOUBLE PRECISION array, dimension (NB) */
+/* The scalar factors of the elementary reflectors. See Further */
+/* Details. */
+
+/* T (output) DOUBLE PRECISION array, dimension (LDT,NB) */
+/* The upper triangular matrix T. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= NB. */
+
+/* Y (output) DOUBLE PRECISION array, dimension (LDY,NB) */
+/* The n-by-nb matrix Y. */
+
+/* LDY (input) INTEGER */
+/* The leading dimension of the array Y. LDY >= N. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of nb elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(nb). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i+k-1) = 0, v(i+k) = 1; v(i+k+1:n) is stored on exit in */
+/* A(i+k+1:n,i), and tau in TAU(i). */
+
+/* The elements of the vectors v together form the (n-k+1)-by-nb matrix */
+/* V which is needed, with T and Y, to apply the transformation to the */
+/* unreduced part of the matrix, using an update of the form: */
+/* A := (I - V*T*V') * (A - Y*V'). */
+
+/* The contents of A on exit are illustrated by the following example */
+/* with n = 7, k = 3 and nb = 2: */
+
+/* ( a a a a a ) */
+/* ( a a a a a ) */
+/* ( a a a a a ) */
+/* ( h h a a a ) */
+/* ( v1 h a a a ) */
+/* ( v1 v2 a a a ) */
+/* ( v1 v2 a a a ) */
+
+/* where a denotes an element of the original matrix A, h denotes a */
+/* modified element of the upper Hessenberg matrix H, and vi denotes an */
+/* element of the vector defining H(i). */
+
+/* This file is a slight modification of LAPACK-3.0's DLAHRD */
+/* incorporating improvements proposed by Quintana-Orti and Van de */
+/* Gejin. Note that the entries of A(1:K,2:NB) differ from those */
+/* returned by the original LAPACK routine. This function is */
+/* not backward compatible with LAPACK3.0. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --tau;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ y_dim1 = *ldy;
+ y_offset = 1 + y_dim1;
+ y -= y_offset;
+
+ /* Function Body */
+ if (*n <= 1) {
+ return 0;
+ }
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (i__ > 1) {
+
+/* Update A(K+1:N,I) */
+
+/* Update I-th column of A - Y * V' */
+
+ i__2 = *n - *k;
+ i__3 = i__ - 1;
+ dgemv_("NO TRANSPOSE", &i__2, &i__3, &c_b4, &y[*k + 1 + y_dim1],
+ ldy, &a[*k + i__ - 1 + a_dim1], lda, &c_b5, &a[*k + 1 +
+ i__ * a_dim1], &c__1);
+
+/* Apply I - V * T' * V' to this column (call it b) from the */
+/* left, using the last column of T as workspace */
+
+/* Let V = ( V1 ) and b = ( b1 ) (first I-1 rows) */
+/* ( V2 ) ( b2 ) */
+
+/* where V1 is unit lower triangular */
+
+/* w := V1' * b1 */
+
+ i__2 = i__ - 1;
+ dcopy_(&i__2, &a[*k + 1 + i__ * a_dim1], &c__1, &t[*nb * t_dim1 +
+ 1], &c__1);
+ i__2 = i__ - 1;
+ dtrmv_("Lower", "Transpose", "UNIT", &i__2, &a[*k + 1 + a_dim1],
+ lda, &t[*nb * t_dim1 + 1], &c__1);
+
+/* w := w + V2'*b2 */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b5, &a[*k + i__ + a_dim1],
+ lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b5, &t[*nb *
+ t_dim1 + 1], &c__1);
+
+/* w := T'*w */
+
+ i__2 = i__ - 1;
+ dtrmv_("Upper", "Transpose", "NON-UNIT", &i__2, &t[t_offset], ldt,
+ &t[*nb * t_dim1 + 1], &c__1);
+
+/* b2 := b2 - V2*w */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("NO TRANSPOSE", &i__2, &i__3, &c_b4, &a[*k + i__ + a_dim1],
+ lda, &t[*nb * t_dim1 + 1], &c__1, &c_b5, &a[*k + i__ +
+ i__ * a_dim1], &c__1);
+
+/* b1 := b1 - V1*w */
+
+ i__2 = i__ - 1;
+ dtrmv_("Lower", "NO TRANSPOSE", "UNIT", &i__2, &a[*k + 1 + a_dim1]
+, lda, &t[*nb * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ daxpy_(&i__2, &c_b4, &t[*nb * t_dim1 + 1], &c__1, &a[*k + 1 + i__
+ * a_dim1], &c__1);
+
+ a[*k + i__ - 1 + (i__ - 1) * a_dim1] = ei;
+ }
+
+/* Generate the elementary reflector H(I) to annihilate */
+/* A(K+I+1:N,I) */
+
+ i__2 = *n - *k - i__ + 1;
+/* Computing MIN */
+ i__3 = *k + i__ + 1;
+ dlarfg_(&i__2, &a[*k + i__ + i__ * a_dim1], &a[min(i__3, *n)+ i__ *
+ a_dim1], &c__1, &tau[i__]);
+ ei = a[*k + i__ + i__ * a_dim1];
+ a[*k + i__ + i__ * a_dim1] = 1.;
+
+/* Compute Y(K+1:N,I) */
+
+ i__2 = *n - *k;
+ i__3 = *n - *k - i__ + 1;
+ dgemv_("NO TRANSPOSE", &i__2, &i__3, &c_b5, &a[*k + 1 + (i__ + 1) *
+ a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b38, &y[*
+ k + 1 + i__ * y_dim1], &c__1);
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b5, &a[*k + i__ + a_dim1], lda, &
+ a[*k + i__ + i__ * a_dim1], &c__1, &c_b38, &t[i__ * t_dim1 +
+ 1], &c__1);
+ i__2 = *n - *k;
+ i__3 = i__ - 1;
+ dgemv_("NO TRANSPOSE", &i__2, &i__3, &c_b4, &y[*k + 1 + y_dim1], ldy,
+ &t[i__ * t_dim1 + 1], &c__1, &c_b5, &y[*k + 1 + i__ * y_dim1],
+ &c__1);
+ i__2 = *n - *k;
+ dscal_(&i__2, &tau[i__], &y[*k + 1 + i__ * y_dim1], &c__1);
+
+/* Compute T(1:I,I) */
+
+ i__2 = i__ - 1;
+ d__1 = -tau[i__];
+ dscal_(&i__2, &d__1, &t[i__ * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ dtrmv_("Upper", "No Transpose", "NON-UNIT", &i__2, &t[t_offset], ldt,
+ &t[i__ * t_dim1 + 1], &c__1)
+ ;
+ t[i__ + i__ * t_dim1] = tau[i__];
+
+/* L10: */
+ }
+ a[*k + *nb + *nb * a_dim1] = ei;
+
+/* Compute Y(1:K,1:NB) */
+
+ dlacpy_("ALL", k, nb, &a[(a_dim1 << 1) + 1], lda, &y[y_offset], ldy);
+ dtrmm_("RIGHT", "Lower", "NO TRANSPOSE", "UNIT", k, nb, &c_b5, &a[*k + 1
+ + a_dim1], lda, &y[y_offset], ldy);
+ if (*n > *k + *nb) {
+ i__1 = *n - *k - *nb;
+ dgemm_("NO TRANSPOSE", "NO TRANSPOSE", k, nb, &i__1, &c_b5, &a[(*nb +
+ 2) * a_dim1 + 1], lda, &a[*k + 1 + *nb + a_dim1], lda, &c_b5,
+ &y[y_offset], ldy);
+ }
+ dtrmm_("RIGHT", "Upper", "NO TRANSPOSE", "NON-UNIT", k, nb, &c_b5, &t[
+ t_offset], ldt, &y[y_offset], ldy);
+
+ return 0;
+
+/* End of DLAHR2 */
+
+} /* dlahr2_ */
diff --git a/contrib/libs/clapack/dlahrd.c b/contrib/libs/clapack/dlahrd.c
new file mode 100644
index 0000000000..4516fbad61
--- /dev/null
+++ b/contrib/libs/clapack/dlahrd.c
@@ -0,0 +1,285 @@
+/* dlahrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b4 = -1.;
+static doublereal c_b5 = 1.;
+static integer c__1 = 1;
+static doublereal c_b38 = 0.;
+
+/* Subroutine */ int dlahrd_(integer *n, integer *k, integer *nb, doublereal *
+ a, integer *lda, doublereal *tau, doublereal *t, integer *ldt,
+ doublereal *y, integer *ldy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, t_dim1, t_offset, y_dim1, y_offset, i__1, i__2,
+ i__3;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__;
+ doublereal ei;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dgemv_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *), dcopy_(integer *, doublereal *,
+ integer *, doublereal *, integer *), daxpy_(integer *, doublereal
+ *, doublereal *, integer *, doublereal *, integer *), dtrmv_(char
+ *, char *, char *, integer *, doublereal *, integer *, doublereal
+ *, integer *), dlarfg_(integer *,
+ doublereal *, doublereal *, integer *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAHRD reduces the first NB columns of a real general n-by-(n-k+1) */
+/* matrix A so that elements below the k-th subdiagonal are zero. The */
+/* reduction is performed by an orthogonal similarity transformation */
+/* Q' * A * Q. The routine returns the matrices V and T which determine */
+/* Q as a block reflector I - V*T*V', and also the matrix Y = A * V * T. */
+
+/* This is an OBSOLETE auxiliary routine. */
+/* This routine will be 'deprecated' in a future release. */
+/* Please use the new routine DLAHR2 instead. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* K (input) INTEGER */
+/* The offset for the reduction. Elements below the k-th */
+/* subdiagonal in the first NB columns are reduced to zero. */
+
+/* NB (input) INTEGER */
+/* The number of columns to be reduced. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N-K+1) */
+/* On entry, the n-by-(n-k+1) general matrix A. */
+/* On exit, the elements on and above the k-th subdiagonal in */
+/* the first NB columns are overwritten with the corresponding */
+/* elements of the reduced matrix; the elements below the k-th */
+/* subdiagonal, with the array TAU, represent the matrix Q as a */
+/* product of elementary reflectors. The other columns of A are */
+/* unchanged. See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAU (output) DOUBLE PRECISION array, dimension (NB) */
+/* The scalar factors of the elementary reflectors. See Further */
+/* Details. */
+
+/* T (output) DOUBLE PRECISION array, dimension (LDT,NB) */
+/* The upper triangular matrix T. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= NB. */
+
+/* Y (output) DOUBLE PRECISION array, dimension (LDY,NB) */
+/* The n-by-nb matrix Y. */
+
+/* LDY (input) INTEGER */
+/* The leading dimension of the array Y. LDY >= N. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of nb elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(nb). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i+k-1) = 0, v(i+k) = 1; v(i+k+1:n) is stored on exit in */
+/* A(i+k+1:n,i), and tau in TAU(i). */
+
+/* The elements of the vectors v together form the (n-k+1)-by-nb matrix */
+/* V which is needed, with T and Y, to apply the transformation to the */
+/* unreduced part of the matrix, using an update of the form: */
+/* A := (I - V*T*V') * (A - Y*V'). */
+
+/* The contents of A on exit are illustrated by the following example */
+/* with n = 7, k = 3 and nb = 2: */
+
+/* ( a h a a a ) */
+/* ( a h a a a ) */
+/* ( a h a a a ) */
+/* ( h h a a a ) */
+/* ( v1 h a a a ) */
+/* ( v1 v2 a a a ) */
+/* ( v1 v2 a a a ) */
+
+/* where a denotes an element of the original matrix A, h denotes a */
+/* modified element of the upper Hessenberg matrix H, and vi denotes an */
+/* element of the vector defining H(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --tau;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ y_dim1 = *ldy;
+ y_offset = 1 + y_dim1;
+ y -= y_offset;
+
+ /* Function Body */
+ if (*n <= 1) {
+ return 0;
+ }
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (i__ > 1) {
+
+/* Update A(1:n,i) */
+
+/* Compute i-th column of A - Y * V' */
+
+ i__2 = i__ - 1;
+ dgemv_("No transpose", n, &i__2, &c_b4, &y[y_offset], ldy, &a[*k
+ + i__ - 1 + a_dim1], lda, &c_b5, &a[i__ * a_dim1 + 1], &
+ c__1);
+
+/* Apply I - V * T' * V' to this column (call it b) from the */
+/* left, using the last column of T as workspace */
+
+/* Let V = ( V1 ) and b = ( b1 ) (first I-1 rows) */
+/* ( V2 ) ( b2 ) */
+
+/* where V1 is unit lower triangular */
+
+/* w := V1' * b1 */
+
+ i__2 = i__ - 1;
+ dcopy_(&i__2, &a[*k + 1 + i__ * a_dim1], &c__1, &t[*nb * t_dim1 +
+ 1], &c__1);
+ i__2 = i__ - 1;
+ dtrmv_("Lower", "Transpose", "Unit", &i__2, &a[*k + 1 + a_dim1],
+ lda, &t[*nb * t_dim1 + 1], &c__1);
+
+/* w := w + V2'*b2 */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b5, &a[*k + i__ + a_dim1],
+ lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b5, &t[*nb *
+ t_dim1 + 1], &c__1);
+
+/* w := T'*w */
+
+ i__2 = i__ - 1;
+ dtrmv_("Upper", "Transpose", "Non-unit", &i__2, &t[t_offset], ldt,
+ &t[*nb * t_dim1 + 1], &c__1);
+
+/* b2 := b2 - V2*w */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b4, &a[*k + i__ + a_dim1],
+ lda, &t[*nb * t_dim1 + 1], &c__1, &c_b5, &a[*k + i__ +
+ i__ * a_dim1], &c__1);
+
+/* b1 := b1 - V1*w */
+
+ i__2 = i__ - 1;
+ dtrmv_("Lower", "No transpose", "Unit", &i__2, &a[*k + 1 + a_dim1]
+, lda, &t[*nb * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ daxpy_(&i__2, &c_b4, &t[*nb * t_dim1 + 1], &c__1, &a[*k + 1 + i__
+ * a_dim1], &c__1);
+
+ a[*k + i__ - 1 + (i__ - 1) * a_dim1] = ei;
+ }
+
+/* Generate the elementary reflector H(i) to annihilate */
+/* A(k+i+1:n,i) */
+
+ i__2 = *n - *k - i__ + 1;
+/* Computing MIN */
+ i__3 = *k + i__ + 1;
+ dlarfg_(&i__2, &a[*k + i__ + i__ * a_dim1], &a[min(i__3, *n)+ i__ *
+ a_dim1], &c__1, &tau[i__]);
+ ei = a[*k + i__ + i__ * a_dim1];
+ a[*k + i__ + i__ * a_dim1] = 1.;
+
+/* Compute Y(1:n,i) */
+
+ i__2 = *n - *k - i__ + 1;
+ dgemv_("No transpose", n, &i__2, &c_b5, &a[(i__ + 1) * a_dim1 + 1],
+ lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b38, &y[i__ *
+ y_dim1 + 1], &c__1);
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b5, &a[*k + i__ + a_dim1], lda, &
+ a[*k + i__ + i__ * a_dim1], &c__1, &c_b38, &t[i__ * t_dim1 +
+ 1], &c__1);
+ i__2 = i__ - 1;
+ dgemv_("No transpose", n, &i__2, &c_b4, &y[y_offset], ldy, &t[i__ *
+ t_dim1 + 1], &c__1, &c_b5, &y[i__ * y_dim1 + 1], &c__1);
+ dscal_(n, &tau[i__], &y[i__ * y_dim1 + 1], &c__1);
+
+/* Compute T(1:i,i) */
+
+ i__2 = i__ - 1;
+ d__1 = -tau[i__];
+ dscal_(&i__2, &d__1, &t[i__ * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ dtrmv_("Upper", "No transpose", "Non-unit", &i__2, &t[t_offset], ldt,
+ &t[i__ * t_dim1 + 1], &c__1)
+ ;
+ t[i__ + i__ * t_dim1] = tau[i__];
+
+/* L10: */
+ }
+ a[*k + *nb + *nb * a_dim1] = ei;
+
+ return 0;
+
+/* End of DLAHRD */
+
+} /* dlahrd_ */
diff --git a/contrib/libs/clapack/dlaic1.c b/contrib/libs/clapack/dlaic1.c
new file mode 100644
index 0000000000..b1a54fc12c
--- /dev/null
+++ b/contrib/libs/clapack/dlaic1.c
@@ -0,0 +1,326 @@
+/* dlaic1.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b5 = 1.;
+
+/* Subroutine */ int dlaic1_(integer *job, integer *j, doublereal *x,
+ doublereal *sest, doublereal *w, doublereal *gamma, doublereal *
+ sestpr, doublereal *s, doublereal *c__)
+{
+ /* System generated locals */
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ doublereal b, t, s1, s2, eps, tmp;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ doublereal sine, test, zeta1, zeta2, alpha, norma;
+ extern doublereal dlamch_(char *);
+ doublereal absgam, absalp, cosine, absest;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAIC1 applies one step of incremental condition estimation in */
+/* its simplest version: */
+
+/* Let x, twonorm(x) = 1, be an approximate singular vector of an j-by-j */
+/* lower triangular matrix L, such that */
+/* twonorm(L*x) = sest */
+/* Then DLAIC1 computes sestpr, s, c such that */
+/* the vector */
+/* [ s*x ] */
+/* xhat = [ c ] */
+/* is an approximate singular vector of */
+/* [ L 0 ] */
+/* Lhat = [ w' gamma ] */
+/* in the sense that */
+/* twonorm(Lhat*xhat) = sestpr. */
+
+/* Depending on JOB, an estimate for the largest or smallest singular */
+/* value is computed. */
+
+/* Note that [s c]' and sestpr**2 is an eigenpair of the system */
+
+/* diag(sest*sest, 0) + [alpha gamma] * [ alpha ] */
+/* [ gamma ] */
+
+/* where alpha = x'*w. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) INTEGER */
+/* = 1: an estimate for the largest singular value is computed. */
+/* = 2: an estimate for the smallest singular value is computed. */
+
+/* J (input) INTEGER */
+/* Length of X and W */
+
+/* X (input) DOUBLE PRECISION array, dimension (J) */
+/* The j-vector x. */
+
+/* SEST (input) DOUBLE PRECISION */
+/* Estimated singular value of j by j matrix L */
+
+/* W (input) DOUBLE PRECISION array, dimension (J) */
+/* The j-vector w. */
+
+/* GAMMA (input) DOUBLE PRECISION */
+/* The diagonal element gamma. */
+
+/* SESTPR (output) DOUBLE PRECISION */
+/* Estimated singular value of (j+1) by (j+1) matrix Lhat. */
+
+/* S (output) DOUBLE PRECISION */
+/* Sine needed in forming xhat. */
+
+/* C (output) DOUBLE PRECISION */
+/* Cosine needed in forming xhat. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --w;
+ --x;
+
+ /* Function Body */
+ eps = dlamch_("Epsilon");
+ alpha = ddot_(j, &x[1], &c__1, &w[1], &c__1);
+
+ absalp = abs(alpha);
+ absgam = abs(*gamma);
+ absest = abs(*sest);
+
+ if (*job == 1) {
+
+/* Estimating largest singular value */
+
+/* special cases */
+
+ if (*sest == 0.) {
+ s1 = max(absgam,absalp);
+ if (s1 == 0.) {
+ *s = 0.;
+ *c__ = 1.;
+ *sestpr = 0.;
+ } else {
+ *s = alpha / s1;
+ *c__ = *gamma / s1;
+ tmp = sqrt(*s * *s + *c__ * *c__);
+ *s /= tmp;
+ *c__ /= tmp;
+ *sestpr = s1 * tmp;
+ }
+ return 0;
+ } else if (absgam <= eps * absest) {
+ *s = 1.;
+ *c__ = 0.;
+ tmp = max(absest,absalp);
+ s1 = absest / tmp;
+ s2 = absalp / tmp;
+ *sestpr = tmp * sqrt(s1 * s1 + s2 * s2);
+ return 0;
+ } else if (absalp <= eps * absest) {
+ s1 = absgam;
+ s2 = absest;
+ if (s1 <= s2) {
+ *s = 1.;
+ *c__ = 0.;
+ *sestpr = s2;
+ } else {
+ *s = 0.;
+ *c__ = 1.;
+ *sestpr = s1;
+ }
+ return 0;
+ } else if (absest <= eps * absalp || absest <= eps * absgam) {
+ s1 = absgam;
+ s2 = absalp;
+ if (s1 <= s2) {
+ tmp = s1 / s2;
+ *s = sqrt(tmp * tmp + 1.);
+ *sestpr = s2 * *s;
+ *c__ = *gamma / s2 / *s;
+ *s = d_sign(&c_b5, &alpha) / *s;
+ } else {
+ tmp = s2 / s1;
+ *c__ = sqrt(tmp * tmp + 1.);
+ *sestpr = s1 * *c__;
+ *s = alpha / s1 / *c__;
+ *c__ = d_sign(&c_b5, gamma) / *c__;
+ }
+ return 0;
+ } else {
+
+/* normal case */
+
+ zeta1 = alpha / absest;
+ zeta2 = *gamma / absest;
+
+ b = (1. - zeta1 * zeta1 - zeta2 * zeta2) * .5;
+ *c__ = zeta1 * zeta1;
+ if (b > 0.) {
+ t = *c__ / (b + sqrt(b * b + *c__));
+ } else {
+ t = sqrt(b * b + *c__) - b;
+ }
+
+ sine = -zeta1 / t;
+ cosine = -zeta2 / (t + 1.);
+ tmp = sqrt(sine * sine + cosine * cosine);
+ *s = sine / tmp;
+ *c__ = cosine / tmp;
+ *sestpr = sqrt(t + 1.) * absest;
+ return 0;
+ }
+
+ } else if (*job == 2) {
+
+/* Estimating smallest singular value */
+
+/* special cases */
+
+ if (*sest == 0.) {
+ *sestpr = 0.;
+ if (max(absgam,absalp) == 0.) {
+ sine = 1.;
+ cosine = 0.;
+ } else {
+ sine = -(*gamma);
+ cosine = alpha;
+ }
+/* Computing MAX */
+ d__1 = abs(sine), d__2 = abs(cosine);
+ s1 = max(d__1,d__2);
+ *s = sine / s1;
+ *c__ = cosine / s1;
+ tmp = sqrt(*s * *s + *c__ * *c__);
+ *s /= tmp;
+ *c__ /= tmp;
+ return 0;
+ } else if (absgam <= eps * absest) {
+ *s = 0.;
+ *c__ = 1.;
+ *sestpr = absgam;
+ return 0;
+ } else if (absalp <= eps * absest) {
+ s1 = absgam;
+ s2 = absest;
+ if (s1 <= s2) {
+ *s = 0.;
+ *c__ = 1.;
+ *sestpr = s1;
+ } else {
+ *s = 1.;
+ *c__ = 0.;
+ *sestpr = s2;
+ }
+ return 0;
+ } else if (absest <= eps * absalp || absest <= eps * absgam) {
+ s1 = absgam;
+ s2 = absalp;
+ if (s1 <= s2) {
+ tmp = s1 / s2;
+ *c__ = sqrt(tmp * tmp + 1.);
+ *sestpr = absest * (tmp / *c__);
+ *s = -(*gamma / s2) / *c__;
+ *c__ = d_sign(&c_b5, &alpha) / *c__;
+ } else {
+ tmp = s2 / s1;
+ *s = sqrt(tmp * tmp + 1.);
+ *sestpr = absest / *s;
+ *c__ = alpha / s1 / *s;
+ *s = -d_sign(&c_b5, gamma) / *s;
+ }
+ return 0;
+ } else {
+
+/* normal case */
+
+ zeta1 = alpha / absest;
+ zeta2 = *gamma / absest;
+
+/* Computing MAX */
+ d__3 = zeta1 * zeta1 + 1. + (d__1 = zeta1 * zeta2, abs(d__1)),
+ d__4 = (d__2 = zeta1 * zeta2, abs(d__2)) + zeta2 * zeta2;
+ norma = max(d__3,d__4);
+
+/* See if root is closer to zero or to ONE */
+
+ test = (zeta1 - zeta2) * 2. * (zeta1 + zeta2) + 1.;
+ if (test >= 0.) {
+
+/* root is close to zero, compute directly */
+
+ b = (zeta1 * zeta1 + zeta2 * zeta2 + 1.) * .5;
+ *c__ = zeta2 * zeta2;
+ t = *c__ / (b + sqrt((d__1 = b * b - *c__, abs(d__1))));
+ sine = zeta1 / (1. - t);
+ cosine = -zeta2 / t;
+ *sestpr = sqrt(t + eps * 4. * eps * norma) * absest;
+ } else {
+
+/* root is closer to ONE, shift by that amount */
+
+ b = (zeta2 * zeta2 + zeta1 * zeta1 - 1.) * .5;
+ *c__ = zeta1 * zeta1;
+ if (b >= 0.) {
+ t = -(*c__) / (b + sqrt(b * b + *c__));
+ } else {
+ t = b - sqrt(b * b + *c__);
+ }
+ sine = -zeta1 / t;
+ cosine = -zeta2 / (t + 1.);
+ *sestpr = sqrt(t + 1. + eps * 4. * eps * norma) * absest;
+ }
+ tmp = sqrt(sine * sine + cosine * cosine);
+ *s = sine / tmp;
+ *c__ = cosine / tmp;
+ return 0;
+
+ }
+ }
+ return 0;
+
+/* End of DLAIC1 */
+
+} /* dlaic1_ */
diff --git a/contrib/libs/clapack/dlaisnan.c b/contrib/libs/clapack/dlaisnan.c
new file mode 100644
index 0000000000..ea4703c5a6
--- /dev/null
+++ b/contrib/libs/clapack/dlaisnan.c
@@ -0,0 +1,58 @@
+/* dlaisnan.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+logical dlaisnan_(doublereal *din1, doublereal *din2)
+{
+ /* System generated locals */
+ logical ret_val;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is not for general use. It exists solely to avoid */
+/* over-optimization in DISNAN. */
+
+/* DLAISNAN checks for NaNs by comparing its two arguments for */
+/* inequality. NaN is the only floating-point value where NaN != NaN */
+/* returns .TRUE. To check for NaNs, pass the same variable as both */
+/* arguments. */
+
+/* A compiler must assume that the two arguments are */
+/* not the same variable, and the test will not be optimized away. */
+/* Interprocedural or whole-program optimization may delete this */
+/* test. The ISNAN functions will be replaced by the correct */
+/* Fortran 03 intrinsic once the intrinsic is widely available. */
+
+/* Arguments */
+/* ========= */
+
+/* DIN1 (input) DOUBLE PRECISION */
+/* DIN2 (input) DOUBLE PRECISION */
+/* Two numbers to compare for inequality. */
+
+/* ===================================================================== */
+
+/* .. Executable Statements .. */
+ ret_val = *din1 != *din2;
+ return ret_val;
+} /* dlaisnan_ */
diff --git a/contrib/libs/clapack/dlaln2.c b/contrib/libs/clapack/dlaln2.c
new file mode 100644
index 0000000000..9eaa3edff6
--- /dev/null
+++ b/contrib/libs/clapack/dlaln2.c
@@ -0,0 +1,575 @@
+/* dlaln2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlaln2_(logical *ltrans, integer *na, integer *nw,
+ doublereal *smin, doublereal *ca, doublereal *a, integer *lda,
+ doublereal *d1, doublereal *d2, doublereal *b, integer *ldb,
+ doublereal *wr, doublereal *wi, doublereal *x, integer *ldx,
+ doublereal *scale, doublereal *xnorm, integer *info)
+{
+ /* Initialized data */
+
+ static logical zswap[4] = { FALSE_,FALSE_,TRUE_,TRUE_ };
+ static logical rswap[4] = { FALSE_,TRUE_,FALSE_,TRUE_ };
+ static integer ipivot[16] /* was [4][4] */ = { 1,2,3,4,2,1,4,3,3,4,1,2,
+ 4,3,2,1 };
+
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, x_dim1, x_offset;
+ doublereal d__1, d__2, d__3, d__4, d__5, d__6;
+ static doublereal equiv_0[4], equiv_1[4];
+
+ /* Local variables */
+ integer j;
+#define ci (equiv_0)
+#define cr (equiv_1)
+ doublereal bi1, bi2, br1, br2, xi1, xi2, xr1, xr2, ci21, ci22, cr21, cr22,
+ li21, csi, ui11, lr21, ui12, ui22;
+#define civ (equiv_0)
+ doublereal csr, ur11, ur12, ur22;
+#define crv (equiv_1)
+ doublereal bbnd, cmax, ui11r, ui12s, temp, ur11r, ur12s, u22abs;
+ integer icmax;
+ doublereal bnorm, cnorm, smini;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dladiv_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *);
+ doublereal bignum, smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLALN2 solves a system of the form (ca A - w D ) X = s B */
+/* or (ca A' - w D) X = s B with possible scaling ("s") and */
+/* perturbation of A. (A' means A-transpose.) */
+
+/* A is an NA x NA real matrix, ca is a real scalar, D is an NA x NA */
+/* real diagonal matrix, w is a real or complex value, and X and B are */
+/* NA x 1 matrices -- real if w is real, complex if w is complex. NA */
+/* may be 1 or 2. */
+
+/* If w is complex, X and B are represented as NA x 2 matrices, */
+/* the first column of each being the real part and the second */
+/* being the imaginary part. */
+
+/* "s" is a scaling factor (.LE. 1), computed by DLALN2, which is */
+/* so chosen that X can be computed without overflow. X is further */
+/* scaled if necessary to assure that norm(ca A - w D)*norm(X) is less */
+/* than overflow. */
+
+/* If both singular values of (ca A - w D) are less than SMIN, */
+/* SMIN*identity will be used instead of (ca A - w D). If only one */
+/* singular value is less than SMIN, one element of (ca A - w D) will be */
+/* perturbed enough to make the smallest singular value roughly SMIN. */
+/* If both singular values are at least SMIN, (ca A - w D) will not be */
+/* perturbed. In any case, the perturbation will be at most some small */
+/* multiple of max( SMIN, ulp*norm(ca A - w D) ). The singular values */
+/* are computed by infinity-norm approximations, and thus will only be */
+/* correct to a factor of 2 or so. */
+
+/* Note: all input quantities are assumed to be smaller than overflow */
+/* by a reasonable factor. (See BIGNUM.) */
+
+/* Arguments */
+/* ========== */
+
+/* LTRANS (input) LOGICAL */
+/* =.TRUE.: A-transpose will be used. */
+/* =.FALSE.: A will be used (not transposed.) */
+
+/* NA (input) INTEGER */
+/* The size of the matrix A. It may (only) be 1 or 2. */
+
+/* NW (input) INTEGER */
+/* 1 if "w" is real, 2 if "w" is complex. It may only be 1 */
+/* or 2. */
+
+/* SMIN (input) DOUBLE PRECISION */
+/* The desired lower bound on the singular values of A. This */
+/* should be a safe distance away from underflow or overflow, */
+/* say, between (underflow/machine precision) and (machine */
+/* precision * overflow ). (See BIGNUM and ULP.) */
+
+/* CA (input) DOUBLE PRECISION */
+/* The coefficient c, which A is multiplied by. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,NA) */
+/* The NA x NA matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. It must be at least NA. */
+
+/* D1 (input) DOUBLE PRECISION */
+/* The 1,1 element in the diagonal matrix D. */
+
+/* D2 (input) DOUBLE PRECISION */
+/* The 2,2 element in the diagonal matrix D. Not used if NW=1. */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB,NW) */
+/* The NA x NW matrix B (right-hand side). If NW=2 ("w" is */
+/* complex), column 1 contains the real part of B and column 2 */
+/* contains the imaginary part. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. It must be at least NA. */
+
+/* WR (input) DOUBLE PRECISION */
+/* The real part of the scalar "w". */
+
+/* WI (input) DOUBLE PRECISION */
+/* The imaginary part of the scalar "w". Not used if NW=1. */
+
+/* X (output) DOUBLE PRECISION array, dimension (LDX,NW) */
+/* The NA x NW matrix X (unknowns), as computed by DLALN2. */
+/* If NW=2 ("w" is complex), on exit, column 1 will contain */
+/* the real part of X and column 2 will contain the imaginary */
+/* part. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of X. It must be at least NA. */
+
+/* SCALE (output) DOUBLE PRECISION */
+/* The scale factor that B must be multiplied by to insure */
+/* that overflow does not occur when computing X. Thus, */
+/* (ca A - w D) X will be SCALE*B, not B (ignoring */
+/* perturbations of A.) It will be at most 1. */
+
+/* XNORM (output) DOUBLE PRECISION */
+/* The infinity-norm of X, when X is regarded as an NA x NW */
+/* real matrix. */
+
+/* INFO (output) INTEGER */
+/* An error flag. It will be set to zero if no error occurs, */
+/* a negative number if an argument is in error, or a positive */
+/* number if ca A - w D had to be perturbed. */
+/* The possible values are: */
+/* = 0: No error occurred, and (ca A - w D) did not have to be */
+/* perturbed. */
+/* = 1: (ca A - w D) had to be perturbed to make its smallest */
+/* (or only) singular value greater than SMIN. */
+/* NOTE: In the interests of speed, this routine does not */
+/* check the inputs for errors. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Equivalences .. */
+/* .. */
+/* .. Data statements .. */
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+
+ /* Function Body */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Compute BIGNUM */
+
+ smlnum = 2. * dlamch_("Safe minimum");
+ bignum = 1. / smlnum;
+ smini = max(*smin,smlnum);
+
+/* Don't check for input errors */
+
+ *info = 0;
+
+/* Standard Initializations */
+
+ *scale = 1.;
+
+ if (*na == 1) {
+
+/* 1 x 1 (i.e., scalar) system C X = B */
+
+ if (*nw == 1) {
+
+/* Real 1x1 system. */
+
+/* C = ca A - w D */
+
+ csr = *ca * a[a_dim1 + 1] - *wr * *d1;
+ cnorm = abs(csr);
+
+/* If | C | < SMINI, use C = SMINI */
+
+ if (cnorm < smini) {
+ csr = smini;
+ cnorm = smini;
+ *info = 1;
+ }
+
+/* Check scaling for X = B / C */
+
+ bnorm = (d__1 = b[b_dim1 + 1], abs(d__1));
+ if (cnorm < 1. && bnorm > 1.) {
+ if (bnorm > bignum * cnorm) {
+ *scale = 1. / bnorm;
+ }
+ }
+
+/* Compute X */
+
+ x[x_dim1 + 1] = b[b_dim1 + 1] * *scale / csr;
+ *xnorm = (d__1 = x[x_dim1 + 1], abs(d__1));
+ } else {
+
+/* Complex 1x1 system (w is complex) */
+
+/* C = ca A - w D */
+
+ csr = *ca * a[a_dim1 + 1] - *wr * *d1;
+ csi = -(*wi) * *d1;
+ cnorm = abs(csr) + abs(csi);
+
+/* If | C | < SMINI, use C = SMINI */
+
+ if (cnorm < smini) {
+ csr = smini;
+ csi = 0.;
+ cnorm = smini;
+ *info = 1;
+ }
+
+/* Check scaling for X = B / C */
+
+ bnorm = (d__1 = b[b_dim1 + 1], abs(d__1)) + (d__2 = b[(b_dim1 <<
+ 1) + 1], abs(d__2));
+ if (cnorm < 1. && bnorm > 1.) {
+ if (bnorm > bignum * cnorm) {
+ *scale = 1. / bnorm;
+ }
+ }
+
+/* Compute X */
+
+ d__1 = *scale * b[b_dim1 + 1];
+ d__2 = *scale * b[(b_dim1 << 1) + 1];
+ dladiv_(&d__1, &d__2, &csr, &csi, &x[x_dim1 + 1], &x[(x_dim1 << 1)
+ + 1]);
+ *xnorm = (d__1 = x[x_dim1 + 1], abs(d__1)) + (d__2 = x[(x_dim1 <<
+ 1) + 1], abs(d__2));
+ }
+
+ } else {
+
+/* 2x2 System */
+
+/* Compute the real part of C = ca A - w D (or ca A' - w D ) */
+
+ cr[0] = *ca * a[a_dim1 + 1] - *wr * *d1;
+ cr[3] = *ca * a[(a_dim1 << 1) + 2] - *wr * *d2;
+ if (*ltrans) {
+ cr[2] = *ca * a[a_dim1 + 2];
+ cr[1] = *ca * a[(a_dim1 << 1) + 1];
+ } else {
+ cr[1] = *ca * a[a_dim1 + 2];
+ cr[2] = *ca * a[(a_dim1 << 1) + 1];
+ }
+
+ if (*nw == 1) {
+
+/* Real 2x2 system (w is real) */
+
+/* Find the largest element in C */
+
+ cmax = 0.;
+ icmax = 0;
+
+ for (j = 1; j <= 4; ++j) {
+ if ((d__1 = crv[j - 1], abs(d__1)) > cmax) {
+ cmax = (d__1 = crv[j - 1], abs(d__1));
+ icmax = j;
+ }
+/* L10: */
+ }
+
+/* If norm(C) < SMINI, use SMINI*identity. */
+
+ if (cmax < smini) {
+/* Computing MAX */
+ d__3 = (d__1 = b[b_dim1 + 1], abs(d__1)), d__4 = (d__2 = b[
+ b_dim1 + 2], abs(d__2));
+ bnorm = max(d__3,d__4);
+ if (smini < 1. && bnorm > 1.) {
+ if (bnorm > bignum * smini) {
+ *scale = 1. / bnorm;
+ }
+ }
+ temp = *scale / smini;
+ x[x_dim1 + 1] = temp * b[b_dim1 + 1];
+ x[x_dim1 + 2] = temp * b[b_dim1 + 2];
+ *xnorm = temp * bnorm;
+ *info = 1;
+ return 0;
+ }
+
+/* Gaussian elimination with complete pivoting. */
+
+ ur11 = crv[icmax - 1];
+ cr21 = crv[ipivot[(icmax << 2) - 3] - 1];
+ ur12 = crv[ipivot[(icmax << 2) - 2] - 1];
+ cr22 = crv[ipivot[(icmax << 2) - 1] - 1];
+ ur11r = 1. / ur11;
+ lr21 = ur11r * cr21;
+ ur22 = cr22 - ur12 * lr21;
+
+/* If smaller pivot < SMINI, use SMINI */
+
+ if (abs(ur22) < smini) {
+ ur22 = smini;
+ *info = 1;
+ }
+ if (rswap[icmax - 1]) {
+ br1 = b[b_dim1 + 2];
+ br2 = b[b_dim1 + 1];
+ } else {
+ br1 = b[b_dim1 + 1];
+ br2 = b[b_dim1 + 2];
+ }
+ br2 -= lr21 * br1;
+/* Computing MAX */
+ d__2 = (d__1 = br1 * (ur22 * ur11r), abs(d__1)), d__3 = abs(br2);
+ bbnd = max(d__2,d__3);
+ if (bbnd > 1. && abs(ur22) < 1.) {
+ if (bbnd >= bignum * abs(ur22)) {
+ *scale = 1. / bbnd;
+ }
+ }
+
+ xr2 = br2 * *scale / ur22;
+ xr1 = *scale * br1 * ur11r - xr2 * (ur11r * ur12);
+ if (zswap[icmax - 1]) {
+ x[x_dim1 + 1] = xr2;
+ x[x_dim1 + 2] = xr1;
+ } else {
+ x[x_dim1 + 1] = xr1;
+ x[x_dim1 + 2] = xr2;
+ }
+/* Computing MAX */
+ d__1 = abs(xr1), d__2 = abs(xr2);
+ *xnorm = max(d__1,d__2);
+
+/* Further scaling if norm(A) norm(X) > overflow */
+
+ if (*xnorm > 1. && cmax > 1.) {
+ if (*xnorm > bignum / cmax) {
+ temp = cmax / bignum;
+ x[x_dim1 + 1] = temp * x[x_dim1 + 1];
+ x[x_dim1 + 2] = temp * x[x_dim1 + 2];
+ *xnorm = temp * *xnorm;
+ *scale = temp * *scale;
+ }
+ }
+ } else {
+
+/* Complex 2x2 system (w is complex) */
+
+/* Find the largest element in C */
+
+ ci[0] = -(*wi) * *d1;
+ ci[1] = 0.;
+ ci[2] = 0.;
+ ci[3] = -(*wi) * *d2;
+ cmax = 0.;
+ icmax = 0;
+
+ for (j = 1; j <= 4; ++j) {
+ if ((d__1 = crv[j - 1], abs(d__1)) + (d__2 = civ[j - 1], abs(
+ d__2)) > cmax) {
+ cmax = (d__1 = crv[j - 1], abs(d__1)) + (d__2 = civ[j - 1]
+ , abs(d__2));
+ icmax = j;
+ }
+/* L20: */
+ }
+
+/* If norm(C) < SMINI, use SMINI*identity. */
+
+ if (cmax < smini) {
+/* Computing MAX */
+ d__5 = (d__1 = b[b_dim1 + 1], abs(d__1)) + (d__2 = b[(b_dim1
+ << 1) + 1], abs(d__2)), d__6 = (d__3 = b[b_dim1 + 2],
+ abs(d__3)) + (d__4 = b[(b_dim1 << 1) + 2], abs(d__4));
+ bnorm = max(d__5,d__6);
+ if (smini < 1. && bnorm > 1.) {
+ if (bnorm > bignum * smini) {
+ *scale = 1. / bnorm;
+ }
+ }
+ temp = *scale / smini;
+ x[x_dim1 + 1] = temp * b[b_dim1 + 1];
+ x[x_dim1 + 2] = temp * b[b_dim1 + 2];
+ x[(x_dim1 << 1) + 1] = temp * b[(b_dim1 << 1) + 1];
+ x[(x_dim1 << 1) + 2] = temp * b[(b_dim1 << 1) + 2];
+ *xnorm = temp * bnorm;
+ *info = 1;
+ return 0;
+ }
+
+/* Gaussian elimination with complete pivoting. */
+
+ ur11 = crv[icmax - 1];
+ ui11 = civ[icmax - 1];
+ cr21 = crv[ipivot[(icmax << 2) - 3] - 1];
+ ci21 = civ[ipivot[(icmax << 2) - 3] - 1];
+ ur12 = crv[ipivot[(icmax << 2) - 2] - 1];
+ ui12 = civ[ipivot[(icmax << 2) - 2] - 1];
+ cr22 = crv[ipivot[(icmax << 2) - 1] - 1];
+ ci22 = civ[ipivot[(icmax << 2) - 1] - 1];
+ if (icmax == 1 || icmax == 4) {
+
+/* Code when off-diagonals of pivoted C are real */
+
+ if (abs(ur11) > abs(ui11)) {
+ temp = ui11 / ur11;
+/* Computing 2nd power */
+ d__1 = temp;
+ ur11r = 1. / (ur11 * (d__1 * d__1 + 1.));
+ ui11r = -temp * ur11r;
+ } else {
+ temp = ur11 / ui11;
+/* Computing 2nd power */
+ d__1 = temp;
+ ui11r = -1. / (ui11 * (d__1 * d__1 + 1.));
+ ur11r = -temp * ui11r;
+ }
+ lr21 = cr21 * ur11r;
+ li21 = cr21 * ui11r;
+ ur12s = ur12 * ur11r;
+ ui12s = ur12 * ui11r;
+ ur22 = cr22 - ur12 * lr21;
+ ui22 = ci22 - ur12 * li21;
+ } else {
+
+/* Code when diagonals of pivoted C are real */
+
+ ur11r = 1. / ur11;
+ ui11r = 0.;
+ lr21 = cr21 * ur11r;
+ li21 = ci21 * ur11r;
+ ur12s = ur12 * ur11r;
+ ui12s = ui12 * ur11r;
+ ur22 = cr22 - ur12 * lr21 + ui12 * li21;
+ ui22 = -ur12 * li21 - ui12 * lr21;
+ }
+ u22abs = abs(ur22) + abs(ui22);
+
+/* If smaller pivot < SMINI, use SMINI */
+
+ if (u22abs < smini) {
+ ur22 = smini;
+ ui22 = 0.;
+ *info = 1;
+ }
+ if (rswap[icmax - 1]) {
+ br2 = b[b_dim1 + 1];
+ br1 = b[b_dim1 + 2];
+ bi2 = b[(b_dim1 << 1) + 1];
+ bi1 = b[(b_dim1 << 1) + 2];
+ } else {
+ br1 = b[b_dim1 + 1];
+ br2 = b[b_dim1 + 2];
+ bi1 = b[(b_dim1 << 1) + 1];
+ bi2 = b[(b_dim1 << 1) + 2];
+ }
+ br2 = br2 - lr21 * br1 + li21 * bi1;
+ bi2 = bi2 - li21 * br1 - lr21 * bi1;
+/* Computing MAX */
+ d__1 = (abs(br1) + abs(bi1)) * (u22abs * (abs(ur11r) + abs(ui11r))
+ ), d__2 = abs(br2) + abs(bi2);
+ bbnd = max(d__1,d__2);
+ if (bbnd > 1. && u22abs < 1.) {
+ if (bbnd >= bignum * u22abs) {
+ *scale = 1. / bbnd;
+ br1 = *scale * br1;
+ bi1 = *scale * bi1;
+ br2 = *scale * br2;
+ bi2 = *scale * bi2;
+ }
+ }
+
+ dladiv_(&br2, &bi2, &ur22, &ui22, &xr2, &xi2);
+ xr1 = ur11r * br1 - ui11r * bi1 - ur12s * xr2 + ui12s * xi2;
+ xi1 = ui11r * br1 + ur11r * bi1 - ui12s * xr2 - ur12s * xi2;
+ if (zswap[icmax - 1]) {
+ x[x_dim1 + 1] = xr2;
+ x[x_dim1 + 2] = xr1;
+ x[(x_dim1 << 1) + 1] = xi2;
+ x[(x_dim1 << 1) + 2] = xi1;
+ } else {
+ x[x_dim1 + 1] = xr1;
+ x[x_dim1 + 2] = xr2;
+ x[(x_dim1 << 1) + 1] = xi1;
+ x[(x_dim1 << 1) + 2] = xi2;
+ }
+/* Computing MAX */
+ d__1 = abs(xr1) + abs(xi1), d__2 = abs(xr2) + abs(xi2);
+ *xnorm = max(d__1,d__2);
+
+/* Further scaling if norm(A) norm(X) > overflow */
+
+ if (*xnorm > 1. && cmax > 1.) {
+ if (*xnorm > bignum / cmax) {
+ temp = cmax / bignum;
+ x[x_dim1 + 1] = temp * x[x_dim1 + 1];
+ x[x_dim1 + 2] = temp * x[x_dim1 + 2];
+ x[(x_dim1 << 1) + 1] = temp * x[(x_dim1 << 1) + 1];
+ x[(x_dim1 << 1) + 2] = temp * x[(x_dim1 << 1) + 2];
+ *xnorm = temp * *xnorm;
+ *scale = temp * *scale;
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DLALN2 */
+
+} /* dlaln2_ */
+
+#undef crv
+#undef civ
+#undef cr
+#undef ci
diff --git a/contrib/libs/clapack/dlals0.c b/contrib/libs/clapack/dlals0.c
new file mode 100644
index 0000000000..9f55866ce3
--- /dev/null
+++ b/contrib/libs/clapack/dlals0.c
@@ -0,0 +1,473 @@
+/* dlals0.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b5 = -1.;
+static integer c__1 = 1;
+static doublereal c_b11 = 1.;
+static doublereal c_b13 = 0.;
+static integer c__0 = 0;
+
+/* Subroutine */ int dlals0_(integer *icompq, integer *nl, integer *nr,
+ integer *sqre, integer *nrhs, doublereal *b, integer *ldb, doublereal
+ *bx, integer *ldbx, integer *perm, integer *givptr, integer *givcol,
+ integer *ldgcol, doublereal *givnum, integer *ldgnum, doublereal *
+ poles, doublereal *difl, doublereal *difr, doublereal *z__, integer *
+ k, doublereal *c__, doublereal *s, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, b_dim1, b_offset, bx_dim1, bx_offset,
+ difr_dim1, difr_offset, givnum_dim1, givnum_offset, poles_dim1,
+ poles_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__, j, m, n;
+ doublereal dj;
+ integer nlp1;
+ doublereal temp;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal diflj, difrj, dsigj;
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), dcopy_(integer *,
+ doublereal *, integer *, doublereal *, integer *);
+ extern doublereal dlamc3_(doublereal *, doublereal *);
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dlacpy_(char *, integer *, integer
+ *, doublereal *, integer *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ doublereal dsigjp;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLALS0 applies back the multiplying factors of either the left or the */
+/* right singular vector matrix of a diagonal matrix appended by a row */
+/* to the right hand side matrix B in solving the least squares problem */
+/* using the divide-and-conquer SVD approach. */
+
+/* For the left singular vector matrix, three types of orthogonal */
+/* matrices are involved: */
+
+/* (1L) Givens rotations: the number of such rotations is GIVPTR; the */
+/* pairs of columns/rows they were applied to are stored in GIVCOL; */
+/* and the C- and S-values of these rotations are stored in GIVNUM. */
+
+/* (2L) Permutation. The (NL+1)-st row of B is to be moved to the first */
+/* row, and for J=2:N, PERM(J)-th row of B is to be moved to the */
+/* J-th row. */
+
+/* (3L) The left singular vector matrix of the remaining matrix. */
+
+/* For the right singular vector matrix, four types of orthogonal */
+/* matrices are involved: */
+
+/* (1R) The right singular vector matrix of the remaining matrix. */
+
+/* (2R) If SQRE = 1, one extra Givens rotation to generate the right */
+/* null space. */
+
+/* (3R) The inverse transformation of (2L). */
+
+/* (4R) The inverse transformation of (1L). */
+
+/* Arguments */
+/* ========= */
+
+/* ICOMPQ (input) INTEGER */
+/* Specifies whether singular vectors are to be computed in */
+/* factored form: */
+/* = 0: Left singular vector matrix. */
+/* = 1: Right singular vector matrix. */
+
+/* NL (input) INTEGER */
+/* The row dimension of the upper block. NL >= 1. */
+
+/* NR (input) INTEGER */
+/* The row dimension of the lower block. NR >= 1. */
+
+/* SQRE (input) INTEGER */
+/* = 0: the lower block is an NR-by-NR square matrix. */
+/* = 1: the lower block is an NR-by-(NR+1) rectangular matrix. */
+
+/* The bidiagonal matrix has row dimension N = NL + NR + 1, */
+/* and column dimension M = N + SQRE. */
+
+/* NRHS (input) INTEGER */
+/* The number of columns of B and BX. NRHS must be at least 1. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension ( LDB, NRHS ) */
+/* On input, B contains the right hand sides of the least */
+/* squares problem in rows 1 through M. On output, B contains */
+/* the solution X in rows 1 through N. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB must be at least */
+/* max(1,MAX( M, N ) ). */
+
+/* BX (workspace) DOUBLE PRECISION array, dimension ( LDBX, NRHS ) */
+
+/* LDBX (input) INTEGER */
+/* The leading dimension of BX. */
+
+/* PERM (input) INTEGER array, dimension ( N ) */
+/* The permutations (from deflation and sorting) applied */
+/* to the two blocks. */
+
+/* GIVPTR (input) INTEGER */
+/* The number of Givens rotations which took place in this */
+/* subproblem. */
+
+/* GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 ) */
+/* Each pair of numbers indicates a pair of rows/columns */
+/* involved in a Givens rotation. */
+
+/* LDGCOL (input) INTEGER */
+/* The leading dimension of GIVCOL, must be at least N. */
+
+/* GIVNUM (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) */
+/* Each number indicates the C or S value used in the */
+/* corresponding Givens rotation. */
+
+/* LDGNUM (input) INTEGER */
+/* The leading dimension of arrays DIFR, POLES and */
+/* GIVNUM, must be at least K. */
+
+/* POLES (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) */
+/* On entry, POLES(1:K, 1) contains the new singular */
+/* values obtained from solving the secular equation, and */
+/* POLES(1:K, 2) is an array containing the poles in the secular */
+/* equation. */
+
+/* DIFL (input) DOUBLE PRECISION array, dimension ( K ). */
+/* On entry, DIFL(I) is the distance between I-th updated */
+/* (undeflated) singular value and the I-th (undeflated) old */
+/* singular value. */
+
+/* DIFR (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ). */
+/* On entry, DIFR(I, 1) contains the distances between I-th */
+/* updated (undeflated) singular value and the I+1-th */
+/* (undeflated) old singular value. And DIFR(I, 2) is the */
+/* normalizing factor for the I-th right singular vector. */
+
+/* Z (input) DOUBLE PRECISION array, dimension ( K ) */
+/* Contain the components of the deflation-adjusted updating row */
+/* vector. */
+
+/* K (input) INTEGER */
+/* Contains the dimension of the non-deflated matrix, */
+/* This is the order of the related secular equation. 1 <= K <=N. */
+
+/* C (input) DOUBLE PRECISION */
+/* C contains garbage if SQRE =0 and the C-value of a Givens */
+/* rotation related to the right null space if SQRE = 1. */
+
+/* S (input) DOUBLE PRECISION */
+/* S contains garbage if SQRE =0 and the S-value of a Givens */
+/* rotation related to the right null space if SQRE = 1. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension ( K ) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Ren-Cang Li, Computer Science Division, University of */
+/* California at Berkeley, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ bx_dim1 = *ldbx;
+ bx_offset = 1 + bx_dim1;
+ bx -= bx_offset;
+ --perm;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ difr_dim1 = *ldgnum;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ poles_dim1 = *ldgnum;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ givnum_dim1 = *ldgnum;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ --difl;
+ --z__;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*nl < 1) {
+ *info = -2;
+ } else if (*nr < 1) {
+ *info = -3;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -4;
+ }
+
+ n = *nl + *nr + 1;
+
+ if (*nrhs < 1) {
+ *info = -5;
+ } else if (*ldb < n) {
+ *info = -7;
+ } else if (*ldbx < n) {
+ *info = -9;
+ } else if (*givptr < 0) {
+ *info = -11;
+ } else if (*ldgcol < n) {
+ *info = -13;
+ } else if (*ldgnum < n) {
+ *info = -15;
+ } else if (*k < 1) {
+ *info = -20;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLALS0", &i__1);
+ return 0;
+ }
+
+ m = n + *sqre;
+ nlp1 = *nl + 1;
+
+ if (*icompq == 0) {
+
+/* Apply back orthogonal transformations from the left. */
+
+/* Step (1L): apply back the Givens rotations performed. */
+
+ i__1 = *givptr;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ drot_(nrhs, &b[givcol[i__ + (givcol_dim1 << 1)] + b_dim1], ldb, &
+ b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[i__ +
+ (givnum_dim1 << 1)], &givnum[i__ + givnum_dim1]);
+/* L10: */
+ }
+
+/* Step (2L): permute rows of B. */
+
+ dcopy_(nrhs, &b[nlp1 + b_dim1], ldb, &bx[bx_dim1 + 1], ldbx);
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ dcopy_(nrhs, &b[perm[i__] + b_dim1], ldb, &bx[i__ + bx_dim1],
+ ldbx);
+/* L20: */
+ }
+
+/* Step (3L): apply the inverse of the left singular vector */
+/* matrix to BX. */
+
+ if (*k == 1) {
+ dcopy_(nrhs, &bx[bx_offset], ldbx, &b[b_offset], ldb);
+ if (z__[1] < 0.) {
+ dscal_(nrhs, &c_b5, &b[b_offset], ldb);
+ }
+ } else {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ diflj = difl[j];
+ dj = poles[j + poles_dim1];
+ dsigj = -poles[j + (poles_dim1 << 1)];
+ if (j < *k) {
+ difrj = -difr[j + difr_dim1];
+ dsigjp = -poles[j + 1 + (poles_dim1 << 1)];
+ }
+ if (z__[j] == 0. || poles[j + (poles_dim1 << 1)] == 0.) {
+ work[j] = 0.;
+ } else {
+ work[j] = -poles[j + (poles_dim1 << 1)] * z__[j] / diflj /
+ (poles[j + (poles_dim1 << 1)] + dj);
+ }
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (z__[i__] == 0. || poles[i__ + (poles_dim1 << 1)] ==
+ 0.) {
+ work[i__] = 0.;
+ } else {
+ work[i__] = poles[i__ + (poles_dim1 << 1)] * z__[i__]
+ / (dlamc3_(&poles[i__ + (poles_dim1 << 1)], &
+ dsigj) - diflj) / (poles[i__ + (poles_dim1 <<
+ 1)] + dj);
+ }
+/* L30: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ if (z__[i__] == 0. || poles[i__ + (poles_dim1 << 1)] ==
+ 0.) {
+ work[i__] = 0.;
+ } else {
+ work[i__] = poles[i__ + (poles_dim1 << 1)] * z__[i__]
+ / (dlamc3_(&poles[i__ + (poles_dim1 << 1)], &
+ dsigjp) + difrj) / (poles[i__ + (poles_dim1 <<
+ 1)] + dj);
+ }
+/* L40: */
+ }
+ work[1] = -1.;
+ temp = dnrm2_(k, &work[1], &c__1);
+ dgemv_("T", k, nrhs, &c_b11, &bx[bx_offset], ldbx, &work[1], &
+ c__1, &c_b13, &b[j + b_dim1], ldb);
+ dlascl_("G", &c__0, &c__0, &temp, &c_b11, &c__1, nrhs, &b[j +
+ b_dim1], ldb, info);
+/* L50: */
+ }
+ }
+
+/* Move the deflated rows of BX to B also. */
+
+ if (*k < max(m,n)) {
+ i__1 = n - *k;
+ dlacpy_("A", &i__1, nrhs, &bx[*k + 1 + bx_dim1], ldbx, &b[*k + 1
+ + b_dim1], ldb);
+ }
+ } else {
+
+/* Apply back the right orthogonal transformations. */
+
+/* Step (1R): apply back the new right singular vector matrix */
+/* to B. */
+
+ if (*k == 1) {
+ dcopy_(nrhs, &b[b_offset], ldb, &bx[bx_offset], ldbx);
+ } else {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dsigj = poles[j + (poles_dim1 << 1)];
+ if (z__[j] == 0.) {
+ work[j] = 0.;
+ } else {
+ work[j] = -z__[j] / difl[j] / (dsigj + poles[j +
+ poles_dim1]) / difr[j + (difr_dim1 << 1)];
+ }
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (z__[j] == 0.) {
+ work[i__] = 0.;
+ } else {
+ d__1 = -poles[i__ + 1 + (poles_dim1 << 1)];
+ work[i__] = z__[j] / (dlamc3_(&dsigj, &d__1) - difr[
+ i__ + difr_dim1]) / (dsigj + poles[i__ +
+ poles_dim1]) / difr[i__ + (difr_dim1 << 1)];
+ }
+/* L60: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ if (z__[j] == 0.) {
+ work[i__] = 0.;
+ } else {
+ d__1 = -poles[i__ + (poles_dim1 << 1)];
+ work[i__] = z__[j] / (dlamc3_(&dsigj, &d__1) - difl[
+ i__]) / (dsigj + poles[i__ + poles_dim1]) /
+ difr[i__ + (difr_dim1 << 1)];
+ }
+/* L70: */
+ }
+ dgemv_("T", k, nrhs, &c_b11, &b[b_offset], ldb, &work[1], &
+ c__1, &c_b13, &bx[j + bx_dim1], ldbx);
+/* L80: */
+ }
+ }
+
+/* Step (2R): if SQRE = 1, apply back the rotation that is */
+/* related to the right null space of the subproblem. */
+
+ if (*sqre == 1) {
+ dcopy_(nrhs, &b[m + b_dim1], ldb, &bx[m + bx_dim1], ldbx);
+ drot_(nrhs, &bx[bx_dim1 + 1], ldbx, &bx[m + bx_dim1], ldbx, c__,
+ s);
+ }
+ if (*k < max(m,n)) {
+ i__1 = n - *k;
+ dlacpy_("A", &i__1, nrhs, &b[*k + 1 + b_dim1], ldb, &bx[*k + 1 +
+ bx_dim1], ldbx);
+ }
+
+/* Step (3R): permute rows of B. */
+
+ dcopy_(nrhs, &bx[bx_dim1 + 1], ldbx, &b[nlp1 + b_dim1], ldb);
+ if (*sqre == 1) {
+ dcopy_(nrhs, &bx[m + bx_dim1], ldbx, &b[m + b_dim1], ldb);
+ }
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ dcopy_(nrhs, &bx[i__ + bx_dim1], ldbx, &b[perm[i__] + b_dim1],
+ ldb);
+/* L90: */
+ }
+
+/* Step (4R): apply back the Givens rotations performed. */
+
+ for (i__ = *givptr; i__ >= 1; --i__) {
+ d__1 = -givnum[i__ + givnum_dim1];
+ drot_(nrhs, &b[givcol[i__ + (givcol_dim1 << 1)] + b_dim1], ldb, &
+ b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[i__ +
+ (givnum_dim1 << 1)], &d__1);
+/* L100: */
+ }
+ }
+
+ return 0;
+
+/* End of DLALS0 */
+
+} /* dlals0_ */
diff --git a/contrib/libs/clapack/dlalsa.c b/contrib/libs/clapack/dlalsa.c
new file mode 100644
index 0000000000..7c01dee151
--- /dev/null
+++ b/contrib/libs/clapack/dlalsa.c
@@ -0,0 +1,456 @@
+/* dlalsa.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b7 = 1.;
+static doublereal c_b8 = 0.;
+static integer c__2 = 2;
+
+/* Subroutine */ int dlalsa_(integer *icompq, integer *smlsiz, integer *n,
+ integer *nrhs, doublereal *b, integer *ldb, doublereal *bx, integer *
+ ldbx, doublereal *u, integer *ldu, doublereal *vt, integer *k,
+ doublereal *difl, doublereal *difr, doublereal *z__, doublereal *
+ poles, integer *givptr, integer *givcol, integer *ldgcol, integer *
+ perm, doublereal *givnum, doublereal *c__, doublereal *s, doublereal *
+ work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, perm_dim1, perm_offset, b_dim1,
+ b_offset, bx_dim1, bx_offset, difl_dim1, difl_offset, difr_dim1,
+ difr_offset, givnum_dim1, givnum_offset, poles_dim1, poles_offset,
+ u_dim1, u_offset, vt_dim1, vt_offset, z_dim1, z_offset, i__1,
+ i__2;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ integer i__, j, i1, ic, lf, nd, ll, nl, nr, im1, nlf, nrf, lvl, ndb1,
+ nlp1, lvl2, nrp1, nlvl, sqre;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ integer inode, ndiml, ndimr;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dlals0_(integer *, integer *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ integer *, integer *, integer *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *), dlasdt_(integer *, integer *, integer *, integer *,
+ integer *, integer *, integer *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLALSA is an itermediate step in solving the least squares problem */
+/* by computing the SVD of the coefficient matrix in compact form (The */
+/* singular vectors are computed as products of simple orthorgonal */
+/* matrices.). */
+
+/* If ICOMPQ = 0, DLALSA applies the inverse of the left singular vector */
+/* matrix of an upper bidiagonal matrix to the right hand side; and if */
+/* ICOMPQ = 1, DLALSA applies the right singular vector matrix to the */
+/* right hand side. The singular vector matrices were generated in */
+/* compact form by DLALSA. */
+
+/* Arguments */
+/* ========= */
+
+
+/* ICOMPQ (input) INTEGER */
+/* Specifies whether the left or the right singular vector */
+/* matrix is involved. */
+/* = 0: Left singular vector matrix */
+/* = 1: Right singular vector matrix */
+
+/* SMLSIZ (input) INTEGER */
+/* The maximum size of the subproblems at the bottom of the */
+/* computation tree. */
+
+/* N (input) INTEGER */
+/* The row and column dimensions of the upper bidiagonal matrix. */
+
+/* NRHS (input) INTEGER */
+/* The number of columns of B and BX. NRHS must be at least 1. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension ( LDB, NRHS ) */
+/* On input, B contains the right hand sides of the least */
+/* squares problem in rows 1 through M. */
+/* On output, B contains the solution X in rows 1 through N. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B in the calling subprogram. */
+/* LDB must be at least max(1,MAX( M, N ) ). */
+
+/* BX (output) DOUBLE PRECISION array, dimension ( LDBX, NRHS ) */
+/* On exit, the result of applying the left or right singular */
+/* vector matrix to B. */
+
+/* LDBX (input) INTEGER */
+/* The leading dimension of BX. */
+
+/* U (input) DOUBLE PRECISION array, dimension ( LDU, SMLSIZ ). */
+/* On entry, U contains the left singular vector matrices of all */
+/* subproblems at the bottom level. */
+
+/* LDU (input) INTEGER, LDU = > N. */
+/* The leading dimension of arrays U, VT, DIFL, DIFR, */
+/* POLES, GIVNUM, and Z. */
+
+/* VT (input) DOUBLE PRECISION array, dimension ( LDU, SMLSIZ+1 ). */
+/* On entry, VT' contains the right singular vector matrices of */
+/* all subproblems at the bottom level. */
+
+/* K (input) INTEGER array, dimension ( N ). */
+
+/* DIFL (input) DOUBLE PRECISION array, dimension ( LDU, NLVL ). */
+/* where NLVL = INT(log_2 (N/(SMLSIZ+1))) + 1. */
+
+/* DIFR (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ). */
+/* On entry, DIFL(*, I) and DIFR(*, 2 * I -1) record */
+/* distances between singular values on the I-th level and */
+/* singular values on the (I -1)-th level, and DIFR(*, 2 * I) */
+/* record the normalizing factors of the right singular vectors */
+/* matrices of subproblems on I-th level. */
+
+/* Z (input) DOUBLE PRECISION array, dimension ( LDU, NLVL ). */
+/* On entry, Z(1, I) contains the components of the deflation- */
+/* adjusted updating row vector for subproblems on the I-th */
+/* level. */
+
+/* POLES (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ). */
+/* On entry, POLES(*, 2 * I -1: 2 * I) contains the new and old */
+/* singular values involved in the secular equations on the I-th */
+/* level. */
+
+/* GIVPTR (input) INTEGER array, dimension ( N ). */
+/* On entry, GIVPTR( I ) records the number of Givens */
+/* rotations performed on the I-th problem on the computation */
+/* tree. */
+
+/* GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 * NLVL ). */
+/* On entry, for each I, GIVCOL(*, 2 * I - 1: 2 * I) records the */
+/* locations of Givens rotations performed on the I-th level on */
+/* the computation tree. */
+
+/* LDGCOL (input) INTEGER, LDGCOL = > N. */
+/* The leading dimension of arrays GIVCOL and PERM. */
+
+/* PERM (input) INTEGER array, dimension ( LDGCOL, NLVL ). */
+/* On entry, PERM(*, I) records permutations done on the I-th */
+/* level of the computation tree. */
+
+/* GIVNUM (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ). */
+/* On entry, GIVNUM(*, 2 *I -1 : 2 * I) records the C- and S- */
+/* values of Givens rotations performed on the I-th level on the */
+/* computation tree. */
+
+/* C (input) DOUBLE PRECISION array, dimension ( N ). */
+/* On entry, if the I-th subproblem is not square, */
+/* C( I ) contains the C-value of a Givens rotation related to */
+/* the right null space of the I-th subproblem. */
+
+/* S (input) DOUBLE PRECISION array, dimension ( N ). */
+/* On entry, if the I-th subproblem is not square, */
+/* S( I ) contains the S-value of a Givens rotation related to */
+/* the right null space of the I-th subproblem. */
+
+/* WORK (workspace) DOUBLE PRECISION array. */
+/* The dimension must be at least N. */
+
+/* IWORK (workspace) INTEGER array. */
+/* The dimension must be at least 3 * N */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Ren-Cang Li, Computer Science Division, University of */
+/* California at Berkeley, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ bx_dim1 = *ldbx;
+ bx_offset = 1 + bx_dim1;
+ bx -= bx_offset;
+ givnum_dim1 = *ldu;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ poles_dim1 = *ldu;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ z_dim1 = *ldu;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ difr_dim1 = *ldu;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ difl_dim1 = *ldu;
+ difl_offset = 1 + difl_dim1;
+ difl -= difl_offset;
+ vt_dim1 = *ldu;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ --k;
+ --givptr;
+ perm_dim1 = *ldgcol;
+ perm_offset = 1 + perm_dim1;
+ perm -= perm_offset;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ --c__;
+ --s;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*smlsiz < 3) {
+ *info = -2;
+ } else if (*n < *smlsiz) {
+ *info = -3;
+ } else if (*nrhs < 1) {
+ *info = -4;
+ } else if (*ldb < *n) {
+ *info = -6;
+ } else if (*ldbx < *n) {
+ *info = -8;
+ } else if (*ldu < *n) {
+ *info = -10;
+ } else if (*ldgcol < *n) {
+ *info = -19;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLALSA", &i__1);
+ return 0;
+ }
+
+/* Book-keeping and setting up the computation tree. */
+
+ inode = 1;
+ ndiml = inode + *n;
+ ndimr = ndiml + *n;
+
+ dlasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr],
+ smlsiz);
+
+/* The following code applies back the left singular vector factors. */
+/* For applying back the right singular vector factors, go to 50. */
+
+ if (*icompq == 1) {
+ goto L50;
+ }
+
+/* The nodes on the bottom level of the tree were solved */
+/* by DLASDQ. The corresponding left and right singular vector */
+/* matrices are in explicit form. First apply back the left */
+/* singular vector matrices. */
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+
+/* IC : center row of each node */
+/* NL : number of rows of left subproblem */
+/* NR : number of rows of right subproblem */
+/* NLF: starting row of the left subproblem */
+/* NRF: starting row of the right subproblem */
+
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nr = iwork[ndimr + i1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ dgemm_("T", "N", &nl, nrhs, &nl, &c_b7, &u[nlf + u_dim1], ldu, &b[nlf
+ + b_dim1], ldb, &c_b8, &bx[nlf + bx_dim1], ldbx);
+ dgemm_("T", "N", &nr, nrhs, &nr, &c_b7, &u[nrf + u_dim1], ldu, &b[nrf
+ + b_dim1], ldb, &c_b8, &bx[nrf + bx_dim1], ldbx);
+/* L10: */
+ }
+
+/* Next copy the rows of B that correspond to unchanged rows */
+/* in the bidiagonal matrix to BX. */
+
+ i__1 = nd;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ic = iwork[inode + i__ - 1];
+ dcopy_(nrhs, &b[ic + b_dim1], ldb, &bx[ic + bx_dim1], ldbx);
+/* L20: */
+ }
+
+/* Finally go through the left singular vector matrices of all */
+/* the other subproblems bottom-up on the tree. */
+
+ j = pow_ii(&c__2, &nlvl);
+ sqre = 0;
+
+ for (lvl = nlvl; lvl >= 1; --lvl) {
+ lvl2 = (lvl << 1) - 1;
+
+/* find the first node LF and last node LL on */
+/* the current level LVL */
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__1 = lvl - 1;
+ lf = pow_ii(&c__2, &i__1);
+ ll = (lf << 1) - 1;
+ }
+ i__1 = ll;
+ for (i__ = lf; i__ <= i__1; ++i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ --j;
+ dlals0_(icompq, &nl, &nr, &sqre, nrhs, &bx[nlf + bx_dim1], ldbx, &
+ b[nlf + b_dim1], ldb, &perm[nlf + lvl * perm_dim1], &
+ givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &
+ givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 *
+ poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf +
+ lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[
+ j], &s[j], &work[1], info);
+/* L30: */
+ }
+/* L40: */
+ }
+ goto L90;
+
+/* ICOMPQ = 1: applying back the right singular vector factors. */
+
+L50:
+
+/* First now go through the right singular vector matrices of all */
+/* the tree nodes top-down. */
+
+ j = 0;
+ i__1 = nlvl;
+ for (lvl = 1; lvl <= i__1; ++lvl) {
+ lvl2 = (lvl << 1) - 1;
+
+/* Find the first node LF and last node LL on */
+/* the current level LVL. */
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__2 = lvl - 1;
+ lf = pow_ii(&c__2, &i__2);
+ ll = (lf << 1) - 1;
+ }
+ i__2 = lf;
+ for (i__ = ll; i__ >= i__2; --i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ if (i__ == ll) {
+ sqre = 0;
+ } else {
+ sqre = 1;
+ }
+ ++j;
+ dlals0_(icompq, &nl, &nr, &sqre, nrhs, &b[nlf + b_dim1], ldb, &bx[
+ nlf + bx_dim1], ldbx, &perm[nlf + lvl * perm_dim1], &
+ givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &
+ givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 *
+ poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf +
+ lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[
+ j], &s[j], &work[1], info);
+/* L60: */
+ }
+/* L70: */
+ }
+
+/* The nodes on the bottom level of the tree were solved */
+/* by DLASDQ. The corresponding right singular vector */
+/* matrices are in explicit form. Apply them back. */
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nr = iwork[ndimr + i1];
+ nlp1 = nl + 1;
+ if (i__ == nd) {
+ nrp1 = nr;
+ } else {
+ nrp1 = nr + 1;
+ }
+ nlf = ic - nl;
+ nrf = ic + 1;
+ dgemm_("T", "N", &nlp1, nrhs, &nlp1, &c_b7, &vt[nlf + vt_dim1], ldu, &
+ b[nlf + b_dim1], ldb, &c_b8, &bx[nlf + bx_dim1], ldbx);
+ dgemm_("T", "N", &nrp1, nrhs, &nrp1, &c_b7, &vt[nrf + vt_dim1], ldu, &
+ b[nrf + b_dim1], ldb, &c_b8, &bx[nrf + bx_dim1], ldbx);
+/* L80: */
+ }
+
+L90:
+
+ return 0;
+
+/* End of DLALSA */
+
+} /* dlalsa_ */
diff --git a/contrib/libs/clapack/dlalsd.c b/contrib/libs/clapack/dlalsd.c
new file mode 100644
index 0000000000..2e52d50075
--- /dev/null
+++ b/contrib/libs/clapack/dlalsd.c
@@ -0,0 +1,529 @@
+/* dlalsd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b6 = 0.;
+static integer c__0 = 0;
+static doublereal c_b11 = 1.;
+
+/* Subroutine */ int dlalsd_(char *uplo, integer *smlsiz, integer *n, integer
+ *nrhs, doublereal *d__, doublereal *e, doublereal *b, integer *ldb,
+ doublereal *rcond, integer *rank, doublereal *work, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double log(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ integer c__, i__, j, k;
+ doublereal r__;
+ integer s, u, z__;
+ doublereal cs;
+ integer bx;
+ doublereal sn;
+ integer st, vt, nm1, st1;
+ doublereal eps;
+ integer iwk;
+ doublereal tol;
+ integer difl, difr;
+ doublereal rcnd;
+ integer perm, nsub;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ integer nlvl, sqre, bxst;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *),
+ dcopy_(integer *, doublereal *, integer *, doublereal *, integer
+ *);
+ integer poles, sizei, nsize, nwork, icmpq1, icmpq2;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dlasda_(integer *, integer *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ integer *), dlalsa_(integer *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, integer *, integer *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ integer *, integer *), dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlasdq_(char *, integer *, integer *, integer
+ *, integer *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *), dlacpy_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *), dlartg_(doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *), dlaset_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ integer givcol;
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *,
+ integer *);
+ doublereal orgnrm;
+ integer givnum, givptr, smlszp;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLALSD uses the singular value decomposition of A to solve the least */
+/* squares problem of finding X to minimize the Euclidean norm of each */
+/* column of A*X-B, where A is N-by-N upper bidiagonal, and X and B */
+/* are N-by-NRHS. The solution X overwrites B. */
+
+/* The singular values of A smaller than RCOND times the largest */
+/* singular value are treated as zero in solving the least squares */
+/* problem; in this case a minimum norm solution is returned. */
+/* The actual singular values are returned in D in ascending order. */
+
+/* This code makes very mild assumptions about floating point */
+/* arithmetic. It will work on machines with a guard digit in */
+/* add/subtract, or on those binary machines without guard digits */
+/* which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2. */
+/* It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': D and E define an upper bidiagonal matrix. */
+/* = 'L': D and E define a lower bidiagonal matrix. */
+
+/* SMLSIZ (input) INTEGER */
+/* The maximum size of the subproblems at the bottom of the */
+/* computation tree. */
+
+/* N (input) INTEGER */
+/* The dimension of the bidiagonal matrix. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of columns of B. NRHS must be at least 1. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry D contains the main diagonal of the bidiagonal */
+/* matrix. On exit, if INFO = 0, D contains its singular values. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (N-1) */
+/* Contains the super-diagonal entries of the bidiagonal matrix. */
+/* On exit, E has been destroyed. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On input, B contains the right hand sides of the least */
+/* squares problem. On output, B contains the solution X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B in the calling subprogram. */
+/* LDB must be at least max(1,N). */
+
+/* RCOND (input) DOUBLE PRECISION */
+/* The singular values of A less than or equal to RCOND times */
+/* the largest singular value are treated as zero in solving */
+/* the least squares problem. If RCOND is negative, */
+/* machine precision is used instead. */
+/* For example, if diag(S)*X=B were the least squares problem, */
+/* where diag(S) is a diagonal matrix of singular values, the */
+/* solution would be X(i) = B(i) / S(i) if S(i) is greater than */
+/* RCOND*max(S), and X(i) = 0 if S(i) is less than or equal to */
+/* RCOND*max(S). */
+
+/* RANK (output) INTEGER */
+/* The number of singular values of A greater than RCOND times */
+/* the largest singular value. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension at least */
+/* (9*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS + (SMLSIZ+1)**2), */
+/* where NLVL = max(0, INT(log_2 (N/(SMLSIZ+1))) + 1). */
+
+/* IWORK (workspace) INTEGER array, dimension at least */
+/* (3*N*NLVL + 11*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: The algorithm failed to compute an singular value while */
+/* working on the submatrix lying in rows and columns */
+/* INFO/(N+1) through MOD(INFO,N+1). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Ren-Cang Li, Computer Science Division, University of */
+/* California at Berkeley, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 1) {
+ *info = -4;
+ } else if (*ldb < 1 || *ldb < *n) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLALSD", &i__1);
+ return 0;
+ }
+
+ eps = dlamch_("Epsilon");
+
+/* Set up the tolerance. */
+
+ if (*rcond <= 0. || *rcond >= 1.) {
+ rcnd = eps;
+ } else {
+ rcnd = *rcond;
+ }
+
+ *rank = 0;
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ } else if (*n == 1) {
+ if (d__[1] == 0.) {
+ dlaset_("A", &c__1, nrhs, &c_b6, &c_b6, &b[b_offset], ldb);
+ } else {
+ *rank = 1;
+ dlascl_("G", &c__0, &c__0, &d__[1], &c_b11, &c__1, nrhs, &b[
+ b_offset], ldb, info);
+ d__[1] = abs(d__[1]);
+ }
+ return 0;
+ }
+
+/* Rotate the matrix if it is lower bidiagonal. */
+
+ if (*(unsigned char *)uplo == 'L') {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (*nrhs == 1) {
+ drot_(&c__1, &b[i__ + b_dim1], &c__1, &b[i__ + 1 + b_dim1], &
+ c__1, &cs, &sn);
+ } else {
+ work[(i__ << 1) - 1] = cs;
+ work[i__ * 2] = sn;
+ }
+/* L10: */
+ }
+ if (*nrhs > 1) {
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *n - 1;
+ for (j = 1; j <= i__2; ++j) {
+ cs = work[(j << 1) - 1];
+ sn = work[j * 2];
+ drot_(&c__1, &b[j + i__ * b_dim1], &c__1, &b[j + 1 + i__ *
+ b_dim1], &c__1, &cs, &sn);
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+ }
+
+/* Scale. */
+
+ nm1 = *n - 1;
+ orgnrm = dlanst_("M", n, &d__[1], &e[1]);
+ if (orgnrm == 0.) {
+ dlaset_("A", n, nrhs, &c_b6, &c_b6, &b[b_offset], ldb);
+ return 0;
+ }
+
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b11, n, &c__1, &d__[1], n, info);
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b11, &nm1, &c__1, &e[1], &nm1,
+ info);
+
+/* If N is smaller than the minimum divide size SMLSIZ, then solve */
+/* the problem with another solver. */
+
+ if (*n <= *smlsiz) {
+ nwork = *n * *n + 1;
+ dlaset_("A", n, n, &c_b6, &c_b11, &work[1], n);
+ dlasdq_("U", &c__0, n, n, &c__0, nrhs, &d__[1], &e[1], &work[1], n, &
+ work[1], n, &b[b_offset], ldb, &work[nwork], info);
+ if (*info != 0) {
+ return 0;
+ }
+ tol = rcnd * (d__1 = d__[idamax_(n, &d__[1], &c__1)], abs(d__1));
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] <= tol) {
+ dlaset_("A", &c__1, nrhs, &c_b6, &c_b6, &b[i__ + b_dim1], ldb);
+ } else {
+ dlascl_("G", &c__0, &c__0, &d__[i__], &c_b11, &c__1, nrhs, &b[
+ i__ + b_dim1], ldb, info);
+ ++(*rank);
+ }
+/* L40: */
+ }
+ dgemm_("T", "N", n, nrhs, n, &c_b11, &work[1], n, &b[b_offset], ldb, &
+ c_b6, &work[nwork], n);
+ dlacpy_("A", n, nrhs, &work[nwork], n, &b[b_offset], ldb);
+
+/* Unscale. */
+
+ dlascl_("G", &c__0, &c__0, &c_b11, &orgnrm, n, &c__1, &d__[1], n,
+ info);
+ dlasrt_("D", n, &d__[1], info);
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b11, n, nrhs, &b[b_offset],
+ ldb, info);
+
+ return 0;
+ }
+
+/* Book-keeping and setting up some constants. */
+
+ nlvl = (integer) (log((doublereal) (*n) / (doublereal) (*smlsiz + 1)) /
+ log(2.)) + 1;
+
+ smlszp = *smlsiz + 1;
+
+ u = 1;
+ vt = *smlsiz * *n + 1;
+ difl = vt + smlszp * *n;
+ difr = difl + nlvl * *n;
+ z__ = difr + (nlvl * *n << 1);
+ c__ = z__ + nlvl * *n;
+ s = c__ + *n;
+ poles = s + *n;
+ givnum = poles + (nlvl << 1) * *n;
+ bx = givnum + (nlvl << 1) * *n;
+ nwork = bx + *n * *nrhs;
+
+ sizei = *n + 1;
+ k = sizei + *n;
+ givptr = k + *n;
+ perm = givptr + *n;
+ givcol = perm + nlvl * *n;
+ iwk = givcol + (nlvl * *n << 1);
+
+ st = 1;
+ sqre = 0;
+ icmpq1 = 1;
+ icmpq2 = 0;
+ nsub = 0;
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = d__[i__], abs(d__1)) < eps) {
+ d__[i__] = d_sign(&eps, &d__[i__]);
+ }
+/* L50: */
+ }
+
+ i__1 = nm1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = e[i__], abs(d__1)) < eps || i__ == nm1) {
+ ++nsub;
+ iwork[nsub] = st;
+
+/* Subproblem found. First determine its size and then */
+/* apply divide and conquer on it. */
+
+ if (i__ < nm1) {
+
+/* A subproblem with E(I) small for I < NM1. */
+
+ nsize = i__ - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ } else if ((d__1 = e[i__], abs(d__1)) >= eps) {
+
+/* A subproblem with E(NM1) not too small but I = NM1. */
+
+ nsize = *n - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ } else {
+
+/* A subproblem with E(NM1) small. This implies an */
+/* 1-by-1 subproblem at D(N), which is not solved */
+/* explicitly. */
+
+ nsize = i__ - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ ++nsub;
+ iwork[nsub] = *n;
+ iwork[sizei + nsub - 1] = 1;
+ dcopy_(nrhs, &b[*n + b_dim1], ldb, &work[bx + nm1], n);
+ }
+ st1 = st - 1;
+ if (nsize == 1) {
+
+/* This is a 1-by-1 subproblem and is not solved */
+/* explicitly. */
+
+ dcopy_(nrhs, &b[st + b_dim1], ldb, &work[bx + st1], n);
+ } else if (nsize <= *smlsiz) {
+
+/* This is a small subproblem and is solved by DLASDQ. */
+
+ dlaset_("A", &nsize, &nsize, &c_b6, &c_b11, &work[vt + st1],
+ n);
+ dlasdq_("U", &c__0, &nsize, &nsize, &c__0, nrhs, &d__[st], &e[
+ st], &work[vt + st1], n, &work[nwork], n, &b[st +
+ b_dim1], ldb, &work[nwork], info);
+ if (*info != 0) {
+ return 0;
+ }
+ dlacpy_("A", &nsize, nrhs, &b[st + b_dim1], ldb, &work[bx +
+ st1], n);
+ } else {
+
+/* A large problem. Solve it using divide and conquer. */
+
+ dlasda_(&icmpq1, smlsiz, &nsize, &sqre, &d__[st], &e[st], &
+ work[u + st1], n, &work[vt + st1], &iwork[k + st1], &
+ work[difl + st1], &work[difr + st1], &work[z__ + st1],
+ &work[poles + st1], &iwork[givptr + st1], &iwork[
+ givcol + st1], n, &iwork[perm + st1], &work[givnum +
+ st1], &work[c__ + st1], &work[s + st1], &work[nwork],
+ &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ bxst = bx + st1;
+ dlalsa_(&icmpq2, smlsiz, &nsize, nrhs, &b[st + b_dim1], ldb, &
+ work[bxst], n, &work[u + st1], n, &work[vt + st1], &
+ iwork[k + st1], &work[difl + st1], &work[difr + st1],
+ &work[z__ + st1], &work[poles + st1], &iwork[givptr +
+ st1], &iwork[givcol + st1], n, &iwork[perm + st1], &
+ work[givnum + st1], &work[c__ + st1], &work[s + st1],
+ &work[nwork], &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+ st = i__ + 1;
+ }
+/* L60: */
+ }
+
+/* Apply the singular values and treat the tiny ones as zero. */
+
+ tol = rcnd * (d__1 = d__[idamax_(n, &d__[1], &c__1)], abs(d__1));
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Some of the elements in D can be negative because 1-by-1 */
+/* subproblems were not solved explicitly. */
+
+ if ((d__1 = d__[i__], abs(d__1)) <= tol) {
+ dlaset_("A", &c__1, nrhs, &c_b6, &c_b6, &work[bx + i__ - 1], n);
+ } else {
+ ++(*rank);
+ dlascl_("G", &c__0, &c__0, &d__[i__], &c_b11, &c__1, nrhs, &work[
+ bx + i__ - 1], n, info);
+ }
+ d__[i__] = (d__1 = d__[i__], abs(d__1));
+/* L70: */
+ }
+
+/* Now apply back the right singular vectors. */
+
+ icmpq2 = 1;
+ i__1 = nsub;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ st = iwork[i__];
+ st1 = st - 1;
+ nsize = iwork[sizei + i__ - 1];
+ bxst = bx + st1;
+ if (nsize == 1) {
+ dcopy_(nrhs, &work[bxst], n, &b[st + b_dim1], ldb);
+ } else if (nsize <= *smlsiz) {
+ dgemm_("T", "N", &nsize, nrhs, &nsize, &c_b11, &work[vt + st1], n,
+ &work[bxst], n, &c_b6, &b[st + b_dim1], ldb);
+ } else {
+ dlalsa_(&icmpq2, smlsiz, &nsize, nrhs, &work[bxst], n, &b[st +
+ b_dim1], ldb, &work[u + st1], n, &work[vt + st1], &iwork[
+ k + st1], &work[difl + st1], &work[difr + st1], &work[z__
+ + st1], &work[poles + st1], &iwork[givptr + st1], &iwork[
+ givcol + st1], n, &iwork[perm + st1], &work[givnum + st1],
+ &work[c__ + st1], &work[s + st1], &work[nwork], &iwork[
+ iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+/* L80: */
+ }
+
+/* Unscale and sort the singular values. */
+
+ dlascl_("G", &c__0, &c__0, &c_b11, &orgnrm, n, &c__1, &d__[1], n, info);
+ dlasrt_("D", n, &d__[1], info);
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b11, n, nrhs, &b[b_offset], ldb,
+ info);
+
+ return 0;
+
+/* End of DLALSD */
+
+} /* dlalsd_ */
diff --git a/contrib/libs/clapack/dlamch.c b/contrib/libs/clapack/dlamch.c
new file mode 100644
index 0000000000..1243e82642
--- /dev/null
+++ b/contrib/libs/clapack/dlamch.c
@@ -0,0 +1,1001 @@
+/* dlamch.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b32 = 0.;
+
+doublereal dlamch_(char *cmach)
+{
+ /* Initialized data */
+
+ static logical first = TRUE_;
+
+ /* System generated locals */
+ integer i__1;
+ doublereal ret_val;
+
+ /* Builtin functions */
+ double pow_di(doublereal *, integer *);
+
+ /* Local variables */
+ static doublereal t;
+ integer it;
+ static doublereal rnd, eps, base;
+ integer beta;
+ static doublereal emin, prec, emax;
+ integer imin, imax;
+ logical lrnd;
+ static doublereal rmin, rmax;
+ doublereal rmach;
+ extern logical lsame_(char *, char *);
+ doublereal small;
+ static doublereal sfmin;
+ extern /* Subroutine */ int dlamc2_(integer *, integer *, logical *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAMCH determines double precision machine parameters. */
+
+/* Arguments */
+/* ========= */
+
+/* CMACH (input) CHARACTER*1 */
+/* Specifies the value to be returned by DLAMCH: */
+/* = 'E' or 'e', DLAMCH := eps */
+/* = 'S' or 's , DLAMCH := sfmin */
+/* = 'B' or 'b', DLAMCH := base */
+/* = 'P' or 'p', DLAMCH := eps*base */
+/* = 'N' or 'n', DLAMCH := t */
+/* = 'R' or 'r', DLAMCH := rnd */
+/* = 'M' or 'm', DLAMCH := emin */
+/* = 'U' or 'u', DLAMCH := rmin */
+/* = 'L' or 'l', DLAMCH := emax */
+/* = 'O' or 'o', DLAMCH := rmax */
+
+/* where */
+
+/* eps = relative machine precision */
+/* sfmin = safe minimum, such that 1/sfmin does not overflow */
+/* base = base of the machine */
+/* prec = eps*base */
+/* t = number of (base) digits in the mantissa */
+/* rnd = 1.0 when rounding occurs in addition, 0.0 otherwise */
+/* emin = minimum exponent before (gradual) underflow */
+/* rmin = underflow threshold - base**(emin-1) */
+/* emax = largest exponent before overflow */
+/* rmax = overflow threshold - (base**emax)*(1-eps) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Save statement .. */
+/* .. */
+/* .. Data statements .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ if (first) {
+ dlamc2_(&beta, &it, &lrnd, &eps, &imin, &rmin, &imax, &rmax);
+ base = (doublereal) beta;
+ t = (doublereal) it;
+ if (lrnd) {
+ rnd = 1.;
+ i__1 = 1 - it;
+ eps = pow_di(&base, &i__1) / 2;
+ } else {
+ rnd = 0.;
+ i__1 = 1 - it;
+ eps = pow_di(&base, &i__1);
+ }
+ prec = eps * base;
+ emin = (doublereal) imin;
+ emax = (doublereal) imax;
+ sfmin = rmin;
+ small = 1. / rmax;
+ if (small >= sfmin) {
+
+/* Use SMALL plus a bit, to avoid the possibility of rounding */
+/* causing overflow when computing 1/sfmin. */
+
+ sfmin = small * (eps + 1.);
+ }
+ }
+
+ if (lsame_(cmach, "E")) {
+ rmach = eps;
+ } else if (lsame_(cmach, "S")) {
+ rmach = sfmin;
+ } else if (lsame_(cmach, "B")) {
+ rmach = base;
+ } else if (lsame_(cmach, "P")) {
+ rmach = prec;
+ } else if (lsame_(cmach, "N")) {
+ rmach = t;
+ } else if (lsame_(cmach, "R")) {
+ rmach = rnd;
+ } else if (lsame_(cmach, "M")) {
+ rmach = emin;
+ } else if (lsame_(cmach, "U")) {
+ rmach = rmin;
+ } else if (lsame_(cmach, "L")) {
+ rmach = emax;
+ } else if (lsame_(cmach, "O")) {
+ rmach = rmax;
+ }
+
+ ret_val = rmach;
+ first = FALSE_;
+ return ret_val;
+
+/* End of DLAMCH */
+
+} /* dlamch_ */
+
+
+/* *********************************************************************** */
+
+/* Subroutine */ int dlamc1_(integer *beta, integer *t, logical *rnd, logical
+ *ieee1)
+{
+ /* Initialized data */
+
+ static logical first = TRUE_;
+
+ /* System generated locals */
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ doublereal a, b, c__, f, t1, t2;
+ static integer lt;
+ doublereal one, qtr;
+ static logical lrnd;
+ static integer lbeta;
+ doublereal savec;
+ extern doublereal dlamc3_(doublereal *, doublereal *);
+ static logical lieee1;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAMC1 determines the machine parameters given by BETA, T, RND, and */
+/* IEEE1. */
+
+/* Arguments */
+/* ========= */
+
+/* BETA (output) INTEGER */
+/* The base of the machine. */
+
+/* T (output) INTEGER */
+/* The number of ( BETA ) digits in the mantissa. */
+
+/* RND (output) LOGICAL */
+/* Specifies whether proper rounding ( RND = .TRUE. ) or */
+/* chopping ( RND = .FALSE. ) occurs in addition. This may not */
+/* be a reliable guide to the way in which the machine performs */
+/* its arithmetic. */
+
+/* IEEE1 (output) LOGICAL */
+/* Specifies whether rounding appears to be done in the IEEE */
+/* 'round to nearest' style. */
+
+/* Further Details */
+/* =============== */
+
+/* The routine is based on the routine ENVRON by Malcolm and */
+/* incorporates suggestions by Gentleman and Marovich. See */
+
+/* Malcolm M. A. (1972) Algorithms to reveal properties of */
+/* floating-point arithmetic. Comms. of the ACM, 15, 949-951. */
+
+/* Gentleman W. M. and Marovich S. B. (1974) More on algorithms */
+/* that reveal properties of floating point arithmetic units. */
+/* Comms. of the ACM, 17, 276-277. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Save statement .. */
+/* .. */
+/* .. Data statements .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ if (first) {
+ one = 1.;
+
+/* LBETA, LIEEE1, LT and LRND are the local values of BETA, */
+/* IEEE1, T and RND. */
+
+/* Throughout this routine we use the function DLAMC3 to ensure */
+/* that relevant values are stored and not held in registers, or */
+/* are not affected by optimizers. */
+
+/* Compute a = 2.0**m with the smallest positive integer m such */
+/* that */
+
+/* fl( a + 1.0 ) = a. */
+
+ a = 1.;
+ c__ = 1.;
+
+/* + WHILE( C.EQ.ONE )LOOP */
+L10:
+ if (c__ == one) {
+ a *= 2;
+ c__ = dlamc3_(&a, &one);
+ d__1 = -a;
+ c__ = dlamc3_(&c__, &d__1);
+ goto L10;
+ }
+/* + END WHILE */
+
+/* Now compute b = 2.0**m with the smallest positive integer m */
+/* such that */
+
+/* fl( a + b ) .gt. a. */
+
+ b = 1.;
+ c__ = dlamc3_(&a, &b);
+
+/* + WHILE( C.EQ.A )LOOP */
+L20:
+ if (c__ == a) {
+ b *= 2;
+ c__ = dlamc3_(&a, &b);
+ goto L20;
+ }
+/* + END WHILE */
+
+/* Now compute the base. a and c are neighbouring floating point */
+/* numbers in the interval ( beta**t, beta**( t + 1 ) ) and so */
+/* their difference is beta. Adding 0.25 to c is to ensure that it */
+/* is truncated to beta and not ( beta - 1 ). */
+
+ qtr = one / 4;
+ savec = c__;
+ d__1 = -a;
+ c__ = dlamc3_(&c__, &d__1);
+ lbeta = (integer) (c__ + qtr);
+
+/* Now determine whether rounding or chopping occurs, by adding a */
+/* bit less than beta/2 and a bit more than beta/2 to a. */
+
+ b = (doublereal) lbeta;
+ d__1 = b / 2;
+ d__2 = -b / 100;
+ f = dlamc3_(&d__1, &d__2);
+ c__ = dlamc3_(&f, &a);
+ if (c__ == a) {
+ lrnd = TRUE_;
+ } else {
+ lrnd = FALSE_;
+ }
+ d__1 = b / 2;
+ d__2 = b / 100;
+ f = dlamc3_(&d__1, &d__2);
+ c__ = dlamc3_(&f, &a);
+ if (lrnd && c__ == a) {
+ lrnd = FALSE_;
+ }
+
+/* Try and decide whether rounding is done in the IEEE 'round to */
+/* nearest' style. B/2 is half a unit in the last place of the two */
+/* numbers A and SAVEC. Furthermore, A is even, i.e. has last bit */
+/* zero, and SAVEC is odd. Thus adding B/2 to A should not change */
+/* A, but adding B/2 to SAVEC should change SAVEC. */
+
+ d__1 = b / 2;
+ t1 = dlamc3_(&d__1, &a);
+ d__1 = b / 2;
+ t2 = dlamc3_(&d__1, &savec);
+ lieee1 = t1 == a && t2 > savec && lrnd;
+
+/* Now find the mantissa, t. It should be the integer part of */
+/* log to the base beta of a, however it is safer to determine t */
+/* by powering. So we find t as the smallest positive integer for */
+/* which */
+
+/* fl( beta**t + 1.0 ) = 1.0. */
+
+ lt = 0;
+ a = 1.;
+ c__ = 1.;
+
+/* + WHILE( C.EQ.ONE )LOOP */
+L30:
+ if (c__ == one) {
+ ++lt;
+ a *= lbeta;
+ c__ = dlamc3_(&a, &one);
+ d__1 = -a;
+ c__ = dlamc3_(&c__, &d__1);
+ goto L30;
+ }
+/* + END WHILE */
+
+ }
+
+ *beta = lbeta;
+ *t = lt;
+ *rnd = lrnd;
+ *ieee1 = lieee1;
+ first = FALSE_;
+ return 0;
+
+/* End of DLAMC1 */
+
+} /* dlamc1_ */
+
+
+/* *********************************************************************** */
+
+/* Subroutine */ int dlamc2_(integer *beta, integer *t, logical *rnd,
+ doublereal *eps, integer *emin, doublereal *rmin, integer *emax,
+ doublereal *rmax)
+{
+ /* Initialized data */
+
+ static logical first = TRUE_;
+ static logical iwarn = FALSE_;
+
+ /* Format strings */
+ static char fmt_9999[] = "(//\002 WARNING. The value EMIN may be incorre"
+ "ct:-\002,\002 EMIN = \002,i8,/\002 If, after inspection, the va"
+ "lue EMIN looks\002,\002 acceptable please comment out \002,/\002"
+ " the IF block as marked within the code of routine\002,\002 DLAM"
+ "C2,\002,/\002 otherwise supply EMIN explicitly.\002,/)";
+
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2, d__3, d__4, d__5;
+
+ /* Builtin functions */
+ double pow_di(doublereal *, integer *);
+ integer s_wsfe(cilist *), do_fio(integer *, char *, ftnlen), e_wsfe(void);
+
+ /* Local variables */
+ doublereal a, b, c__;
+ integer i__;
+ static integer lt;
+ doublereal one, two;
+ logical ieee;
+ doublereal half;
+ logical lrnd;
+ static doublereal leps;
+ doublereal zero;
+ static integer lbeta;
+ doublereal rbase;
+ static integer lemin, lemax;
+ integer gnmin;
+ doublereal small;
+ integer gpmin;
+ doublereal third;
+ static doublereal lrmin, lrmax;
+ doublereal sixth;
+ extern /* Subroutine */ int dlamc1_(integer *, integer *, logical *,
+ logical *);
+ extern doublereal dlamc3_(doublereal *, doublereal *);
+ logical lieee1;
+ extern /* Subroutine */ int dlamc4_(integer *, doublereal *, integer *),
+ dlamc5_(integer *, integer *, integer *, logical *, integer *,
+ doublereal *);
+ integer ngnmin, ngpmin;
+
+ /* Fortran I/O blocks */
+ static cilist io___58 = { 0, 6, 0, fmt_9999, 0 };
+
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAMC2 determines the machine parameters specified in its argument */
+/* list. */
+
+/* Arguments */
+/* ========= */
+
+/* BETA (output) INTEGER */
+/* The base of the machine. */
+
+/* T (output) INTEGER */
+/* The number of ( BETA ) digits in the mantissa. */
+
+/* RND (output) LOGICAL */
+/* Specifies whether proper rounding ( RND = .TRUE. ) or */
+/* chopping ( RND = .FALSE. ) occurs in addition. This may not */
+/* be a reliable guide to the way in which the machine performs */
+/* its arithmetic. */
+
+/* EPS (output) DOUBLE PRECISION */
+/* The smallest positive number such that */
+
+/* fl( 1.0 - EPS ) .LT. 1.0, */
+
+/* where fl denotes the computed value. */
+
+/* EMIN (output) INTEGER */
+/* The minimum exponent before (gradual) underflow occurs. */
+
+/* RMIN (output) DOUBLE PRECISION */
+/* The smallest normalized number for the machine, given by */
+/* BASE**( EMIN - 1 ), where BASE is the floating point value */
+/* of BETA. */
+
+/* EMAX (output) INTEGER */
+/* The maximum exponent before overflow occurs. */
+
+/* RMAX (output) DOUBLE PRECISION */
+/* The largest positive number for the machine, given by */
+/* BASE**EMAX * ( 1 - EPS ), where BASE is the floating point */
+/* value of BETA. */
+
+/* Further Details */
+/* =============== */
+
+/* The computation of EPS is based on a routine PARANOIA by */
+/* W. Kahan of the University of California at Berkeley. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Save statement .. */
+/* .. */
+/* .. Data statements .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ if (first) {
+ zero = 0.;
+ one = 1.;
+ two = 2.;
+
+/* LBETA, LT, LRND, LEPS, LEMIN and LRMIN are the local values of */
+/* BETA, T, RND, EPS, EMIN and RMIN. */
+
+/* Throughout this routine we use the function DLAMC3 to ensure */
+/* that relevant values are stored and not held in registers, or */
+/* are not affected by optimizers. */
+
+/* DLAMC1 returns the parameters LBETA, LT, LRND and LIEEE1. */
+
+ dlamc1_(&lbeta, &lt, &lrnd, &lieee1);
+
+/* Start to find EPS. */
+
+ b = (doublereal) lbeta;
+ i__1 = -lt;
+ a = pow_di(&b, &i__1);
+ leps = a;
+
+/* Try some tricks to see whether or not this is the correct EPS. */
+
+ b = two / 3;
+ half = one / 2;
+ d__1 = -half;
+ sixth = dlamc3_(&b, &d__1);
+ third = dlamc3_(&sixth, &sixth);
+ d__1 = -half;
+ b = dlamc3_(&third, &d__1);
+ b = dlamc3_(&b, &sixth);
+ b = abs(b);
+ if (b < leps) {
+ b = leps;
+ }
+
+ leps = 1.;
+
+/* + WHILE( ( LEPS.GT.B ).AND.( B.GT.ZERO ) )LOOP */
+L10:
+ if (leps > b && b > zero) {
+ leps = b;
+ d__1 = half * leps;
+/* Computing 5th power */
+ d__3 = two, d__4 = d__3, d__3 *= d__3;
+/* Computing 2nd power */
+ d__5 = leps;
+ d__2 = d__4 * (d__3 * d__3) * (d__5 * d__5);
+ c__ = dlamc3_(&d__1, &d__2);
+ d__1 = -c__;
+ c__ = dlamc3_(&half, &d__1);
+ b = dlamc3_(&half, &c__);
+ d__1 = -b;
+ c__ = dlamc3_(&half, &d__1);
+ b = dlamc3_(&half, &c__);
+ goto L10;
+ }
+/* + END WHILE */
+
+ if (a < leps) {
+ leps = a;
+ }
+
+/* Computation of EPS complete. */
+
+/* Now find EMIN. Let A = + or - 1, and + or - (1 + BASE**(-3)). */
+/* Keep dividing A by BETA until (gradual) underflow occurs. This */
+/* is detected when we cannot recover the previous A. */
+
+ rbase = one / lbeta;
+ small = one;
+ for (i__ = 1; i__ <= 3; ++i__) {
+ d__1 = small * rbase;
+ small = dlamc3_(&d__1, &zero);
+/* L20: */
+ }
+ a = dlamc3_(&one, &small);
+ dlamc4_(&ngpmin, &one, &lbeta);
+ d__1 = -one;
+ dlamc4_(&ngnmin, &d__1, &lbeta);
+ dlamc4_(&gpmin, &a, &lbeta);
+ d__1 = -a;
+ dlamc4_(&gnmin, &d__1, &lbeta);
+ ieee = FALSE_;
+
+ if (ngpmin == ngnmin && gpmin == gnmin) {
+ if (ngpmin == gpmin) {
+ lemin = ngpmin;
+/* ( Non twos-complement machines, no gradual underflow; */
+/* e.g., VAX ) */
+ } else if (gpmin - ngpmin == 3) {
+ lemin = ngpmin - 1 + lt;
+ ieee = TRUE_;
+/* ( Non twos-complement machines, with gradual underflow; */
+/* e.g., IEEE standard followers ) */
+ } else {
+ lemin = min(ngpmin,gpmin);
+/* ( A guess; no known machine ) */
+ iwarn = TRUE_;
+ }
+
+ } else if (ngpmin == gpmin && ngnmin == gnmin) {
+ if ((i__1 = ngpmin - ngnmin, abs(i__1)) == 1) {
+ lemin = max(ngpmin,ngnmin);
+/* ( Twos-complement machines, no gradual underflow; */
+/* e.g., CYBER 205 ) */
+ } else {
+ lemin = min(ngpmin,ngnmin);
+/* ( A guess; no known machine ) */
+ iwarn = TRUE_;
+ }
+
+ } else if ((i__1 = ngpmin - ngnmin, abs(i__1)) == 1 && gpmin == gnmin)
+ {
+ if (gpmin - min(ngpmin,ngnmin) == 3) {
+ lemin = max(ngpmin,ngnmin) - 1 + lt;
+/* ( Twos-complement machines with gradual underflow; */
+/* no known machine ) */
+ } else {
+ lemin = min(ngpmin,ngnmin);
+/* ( A guess; no known machine ) */
+ iwarn = TRUE_;
+ }
+
+ } else {
+/* Computing MIN */
+ i__1 = min(ngpmin,ngnmin), i__1 = min(i__1,gpmin);
+ lemin = min(i__1,gnmin);
+/* ( A guess; no known machine ) */
+ iwarn = TRUE_;
+ }
+ first = FALSE_;
+/* ** */
+/* Comment out this if block if EMIN is ok */
+ if (iwarn) {
+ first = TRUE_;
+ s_wsfe(&io___58);
+ do_fio(&c__1, (char *)&lemin, (ftnlen)sizeof(integer));
+ e_wsfe();
+ }
+/* ** */
+
+/* Assume IEEE arithmetic if we found denormalised numbers above, */
+/* or if arithmetic seems to round in the IEEE style, determined */
+/* in routine DLAMC1. A true IEEE machine should have both things */
+/* true; however, faulty machines may have one or the other. */
+
+ ieee = ieee || lieee1;
+
+/* Compute RMIN by successive division by BETA. We could compute */
+/* RMIN as BASE**( EMIN - 1 ), but some machines underflow during */
+/* this computation. */
+
+ lrmin = 1.;
+ i__1 = 1 - lemin;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__1 = lrmin * rbase;
+ lrmin = dlamc3_(&d__1, &zero);
+/* L30: */
+ }
+
+/* Finally, call DLAMC5 to compute EMAX and RMAX. */
+
+ dlamc5_(&lbeta, &lt, &lemin, &ieee, &lemax, &lrmax);
+ }
+
+ *beta = lbeta;
+ *t = lt;
+ *rnd = lrnd;
+ *eps = leps;
+ *emin = lemin;
+ *rmin = lrmin;
+ *emax = lemax;
+ *rmax = lrmax;
+
+ return 0;
+
+
+/* End of DLAMC2 */
+
+} /* dlamc2_ */
+
+
+/* *********************************************************************** */
+
+doublereal dlamc3_(doublereal *a, doublereal *b)
+{
+ /* System generated locals */
+ doublereal ret_val;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAMC3 is intended to force A and B to be stored prior to doing */
+/* the addition of A and B , for use in situations where optimizers */
+/* might hold one of these in a register. */
+
+/* Arguments */
+/* ========= */
+
+/* A (input) DOUBLE PRECISION */
+/* B (input) DOUBLE PRECISION */
+/* The values A and B. */
+
+/* ===================================================================== */
+
+/* .. Executable Statements .. */
+
+ ret_val = *a + *b;
+
+ return ret_val;
+
+/* End of DLAMC3 */
+
+} /* dlamc3_ */
+
+
+/* *********************************************************************** */
+
+/* Subroutine */ int dlamc4_(integer *emin, doublereal *start, integer *base)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1;
+
+ /* Local variables */
+ doublereal a;
+ integer i__;
+ doublereal b1, b2, c1, c2, d1, d2, one, zero, rbase;
+ extern doublereal dlamc3_(doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAMC4 is a service routine for DLAMC2. */
+
+/* Arguments */
+/* ========= */
+
+/* EMIN (output) INTEGER */
+/* The minimum exponent before (gradual) underflow, computed by */
+/* setting A = START and dividing by BASE until the previous A */
+/* can not be recovered. */
+
+/* START (input) DOUBLE PRECISION */
+/* The starting point for determining EMIN. */
+
+/* BASE (input) INTEGER */
+/* The base of the machine. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ a = *start;
+ one = 1.;
+ rbase = one / *base;
+ zero = 0.;
+ *emin = 1;
+ d__1 = a * rbase;
+ b1 = dlamc3_(&d__1, &zero);
+ c1 = a;
+ c2 = a;
+ d1 = a;
+ d2 = a;
+/* + WHILE( ( C1.EQ.A ).AND.( C2.EQ.A ).AND. */
+/* $ ( D1.EQ.A ).AND.( D2.EQ.A ) )LOOP */
+L10:
+ if (c1 == a && c2 == a && d1 == a && d2 == a) {
+ --(*emin);
+ a = b1;
+ d__1 = a / *base;
+ b1 = dlamc3_(&d__1, &zero);
+ d__1 = b1 * *base;
+ c1 = dlamc3_(&d__1, &zero);
+ d1 = zero;
+ i__1 = *base;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d1 += b1;
+/* L20: */
+ }
+ d__1 = a * rbase;
+ b2 = dlamc3_(&d__1, &zero);
+ d__1 = b2 / rbase;
+ c2 = dlamc3_(&d__1, &zero);
+ d2 = zero;
+ i__1 = *base;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d2 += b2;
+/* L30: */
+ }
+ goto L10;
+ }
+/* + END WHILE */
+
+ return 0;
+
+/* End of DLAMC4 */
+
+} /* dlamc4_ */
+
+
+/* *********************************************************************** */
+
+/* Subroutine */ int dlamc5_(integer *beta, integer *p, integer *emin,
+ logical *ieee, integer *emax, doublereal *rmax)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__;
+ doublereal y, z__;
+ integer try__, lexp;
+ doublereal oldy;
+ integer uexp, nbits;
+ extern doublereal dlamc3_(doublereal *, doublereal *);
+ doublereal recbas;
+ integer exbits, expsum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAMC5 attempts to compute RMAX, the largest machine floating-point */
+/* number, without overflow. It assumes that EMAX + abs(EMIN) sum */
+/* approximately to a power of 2. It will fail on machines where this */
+/* assumption does not hold, for example, the Cyber 205 (EMIN = -28625, */
+/* EMAX = 28718). It will also fail if the value supplied for EMIN is */
+/* too large (i.e. too close to zero), probably with overflow. */
+
+/* Arguments */
+/* ========= */
+
+/* BETA (input) INTEGER */
+/* The base of floating-point arithmetic. */
+
+/* P (input) INTEGER */
+/* The number of base BETA digits in the mantissa of a */
+/* floating-point value. */
+
+/* EMIN (input) INTEGER */
+/* The minimum exponent before (gradual) underflow. */
+
+/* IEEE (input) LOGICAL */
+/* A logical flag specifying whether or not the arithmetic */
+/* system is thought to comply with the IEEE standard. */
+
+/* EMAX (output) INTEGER */
+/* The largest exponent before overflow */
+
+/* RMAX (output) DOUBLE PRECISION */
+/* The largest machine floating-point number. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* First compute LEXP and UEXP, two powers of 2 that bound */
+/* abs(EMIN). We then assume that EMAX + abs(EMIN) will sum */
+/* approximately to the bound that is closest to abs(EMIN). */
+/* (EMAX is the exponent of the required number RMAX). */
+
+ lexp = 1;
+ exbits = 1;
+L10:
+ try__ = lexp << 1;
+ if (try__ <= -(*emin)) {
+ lexp = try__;
+ ++exbits;
+ goto L10;
+ }
+ if (lexp == -(*emin)) {
+ uexp = lexp;
+ } else {
+ uexp = try__;
+ ++exbits;
+ }
+
+/* Now -LEXP is less than or equal to EMIN, and -UEXP is greater */
+/* than or equal to EMIN. EXBITS is the number of bits needed to */
+/* store the exponent. */
+
+ if (uexp + *emin > -lexp - *emin) {
+ expsum = lexp << 1;
+ } else {
+ expsum = uexp << 1;
+ }
+
+/* EXPSUM is the exponent range, approximately equal to */
+/* EMAX - EMIN + 1 . */
+
+ *emax = expsum + *emin - 1;
+ nbits = exbits + 1 + *p;
+
+/* NBITS is the total number of bits needed to store a */
+/* floating-point number. */
+
+ if (nbits % 2 == 1 && *beta == 2) {
+
+/* Either there are an odd number of bits used to store a */
+/* floating-point number, which is unlikely, or some bits are */
+/* not used in the representation of numbers, which is possible, */
+/* (e.g. Cray machines) or the mantissa has an implicit bit, */
+/* (e.g. IEEE machines, Dec Vax machines), which is perhaps the */
+/* most likely. We have to assume the last alternative. */
+/* If this is true, then we need to reduce EMAX by one because */
+/* there must be some way of representing zero in an implicit-bit */
+/* system. On machines like Cray, we are reducing EMAX by one */
+/* unnecessarily. */
+
+ --(*emax);
+ }
+
+ if (*ieee) {
+
+/* Assume we are on an IEEE machine which reserves one exponent */
+/* for infinity and NaN. */
+
+ --(*emax);
+ }
+
+/* Now create RMAX, the largest machine number, which should */
+/* be equal to (1.0 - BETA**(-P)) * BETA**EMAX . */
+
+/* First compute 1.0 - BETA**(-P), being careful that the */
+/* result is less than 1.0 . */
+
+ recbas = 1. / *beta;
+ z__ = *beta - 1.;
+ y = 0.;
+ i__1 = *p;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ z__ *= recbas;
+ if (y < 1.) {
+ oldy = y;
+ }
+ y = dlamc3_(&y, &z__);
+/* L20: */
+ }
+ if (y >= 1.) {
+ y = oldy;
+ }
+
+/* Now multiply by BETA**EMAX to get RMAX. */
+
+ i__1 = *emax;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__1 = y * *beta;
+ y = dlamc3_(&d__1, &c_b32);
+/* L30: */
+ }
+
+ *rmax = y;
+ return 0;
+
+/* End of DLAMC5 */
+
+} /* dlamc5_ */
diff --git a/contrib/libs/clapack/dlamrg.c b/contrib/libs/clapack/dlamrg.c
new file mode 100644
index 0000000000..ce814be803
--- /dev/null
+++ b/contrib/libs/clapack/dlamrg.c
@@ -0,0 +1,131 @@
+/* dlamrg.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlamrg_(integer *n1, integer *n2, doublereal *a, integer
+ *dtrd1, integer *dtrd2, integer *index)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ integer i__, ind1, ind2, n1sv, n2sv;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAMRG will create a permutation list which will merge the elements */
+/* of A (which is composed of two independently sorted sets) into a */
+/* single set which is sorted in ascending order. */
+
+/* Arguments */
+/* ========= */
+
+/* N1 (input) INTEGER */
+/* N2 (input) INTEGER */
+/* These arguements contain the respective lengths of the two */
+/* sorted lists to be merged. */
+
+/* A (input) DOUBLE PRECISION array, dimension (N1+N2) */
+/* The first N1 elements of A contain a list of numbers which */
+/* are sorted in either ascending or descending order. Likewise */
+/* for the final N2 elements. */
+
+/* DTRD1 (input) INTEGER */
+/* DTRD2 (input) INTEGER */
+/* These are the strides to be taken through the array A. */
+/* Allowable strides are 1 and -1. They indicate whether a */
+/* subset of A is sorted in ascending (DTRDx = 1) or descending */
+/* (DTRDx = -1) order. */
+
+/* INDEX (output) INTEGER array, dimension (N1+N2) */
+/* On exit this array will contain a permutation such that */
+/* if B( I ) = A( INDEX( I ) ) for I=1,N1+N2, then B will be */
+/* sorted in ascending order. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --index;
+ --a;
+
+ /* Function Body */
+ n1sv = *n1;
+ n2sv = *n2;
+ if (*dtrd1 > 0) {
+ ind1 = 1;
+ } else {
+ ind1 = *n1;
+ }
+ if (*dtrd2 > 0) {
+ ind2 = *n1 + 1;
+ } else {
+ ind2 = *n1 + *n2;
+ }
+ i__ = 1;
+/* while ( (N1SV > 0) & (N2SV > 0) ) */
+L10:
+ if (n1sv > 0 && n2sv > 0) {
+ if (a[ind1] <= a[ind2]) {
+ index[i__] = ind1;
+ ++i__;
+ ind1 += *dtrd1;
+ --n1sv;
+ } else {
+ index[i__] = ind2;
+ ++i__;
+ ind2 += *dtrd2;
+ --n2sv;
+ }
+ goto L10;
+ }
+/* end while */
+ if (n1sv == 0) {
+ i__1 = n2sv;
+ for (n1sv = 1; n1sv <= i__1; ++n1sv) {
+ index[i__] = ind2;
+ ++i__;
+ ind2 += *dtrd2;
+/* L20: */
+ }
+ } else {
+/* N2SV .EQ. 0 */
+ i__1 = n1sv;
+ for (n2sv = 1; n2sv <= i__1; ++n2sv) {
+ index[i__] = ind1;
+ ++i__;
+ ind1 += *dtrd1;
+/* L30: */
+ }
+ }
+
+ return 0;
+
+/* End of DLAMRG */
+
+} /* dlamrg_ */
diff --git a/contrib/libs/clapack/dlaneg.c b/contrib/libs/clapack/dlaneg.c
new file mode 100644
index 0000000000..ee37d6545c
--- /dev/null
+++ b/contrib/libs/clapack/dlaneg.c
@@ -0,0 +1,218 @@
+/* dlaneg.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+integer dlaneg_(integer *n, doublereal *d__, doublereal *lld, doublereal *
+ sigma, doublereal *pivmin, integer *r__)
+{
+ /* System generated locals */
+ integer ret_val, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer j;
+ doublereal p, t;
+ integer bj;
+ doublereal tmp;
+ integer neg1, neg2;
+ doublereal bsav, gamma, dplus;
+ extern logical disnan_(doublereal *);
+ integer negcnt;
+ logical sawnan;
+ doublereal dminus;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLANEG computes the Sturm count, the number of negative pivots */
+/* encountered while factoring tridiagonal T - sigma I = L D L^T. */
+/* This implementation works directly on the factors without forming */
+/* the tridiagonal matrix T. The Sturm count is also the number of */
+/* eigenvalues of T less than sigma. */
+
+/* This routine is called from DLARRB. */
+
+/* The current routine does not use the PIVMIN parameter but rather */
+/* requires IEEE-754 propagation of Infinities and NaNs. This */
+/* routine also has no input range restrictions but does require */
+/* default exception handling such that x/0 produces Inf when x is */
+/* non-zero, and Inf/Inf produces NaN. For more information, see: */
+
+/* Marques, Riedy, and Voemel, "Benefits of IEEE-754 Features in */
+/* Modern Symmetric Tridiagonal Eigensolvers," SIAM Journal on */
+/* Scientific Computing, v28, n5, 2006. DOI 10.1137/050641624 */
+/* (Tech report version in LAWN 172 with the same title.) */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The N diagonal elements of the diagonal matrix D. */
+
+/* LLD (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (N-1) elements L(i)*L(i)*D(i). */
+
+/* SIGMA (input) DOUBLE PRECISION */
+/* Shift amount in T - sigma I = L D L^T. */
+
+/* PIVMIN (input) DOUBLE PRECISION */
+/* The minimum pivot in the Sturm sequence. May be used */
+/* when zero pivots are encountered on non-IEEE-754 */
+/* architectures. */
+
+/* R (input) INTEGER */
+/* The twist index for the twisted factorization that is used */
+/* for the negcount. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+/* Jason Riedy, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* Some architectures propagate Infinities and NaNs very slowly, so */
+/* the code computes counts in BLKLEN chunks. Then a NaN can */
+/* propagate at most BLKLEN columns before being detected. This is */
+/* not a general tuning parameter; it needs only to be just large */
+/* enough that the overhead is tiny in common cases. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+ /* Parameter adjustments */
+ --lld;
+ --d__;
+
+ /* Function Body */
+ negcnt = 0;
+/* I) upper part: L D L^T - SIGMA I = L+ D+ L+^T */
+ t = -(*sigma);
+ i__1 = *r__ - 1;
+ for (bj = 1; bj <= i__1; bj += 128) {
+ neg1 = 0;
+ bsav = t;
+/* Computing MIN */
+ i__3 = bj + 127, i__4 = *r__ - 1;
+ i__2 = min(i__3,i__4);
+ for (j = bj; j <= i__2; ++j) {
+ dplus = d__[j] + t;
+ if (dplus < 0.) {
+ ++neg1;
+ }
+ tmp = t / dplus;
+ t = tmp * lld[j] - *sigma;
+/* L21: */
+ }
+ sawnan = disnan_(&t);
+/* Run a slower version of the above loop if a NaN is detected. */
+/* A NaN should occur only with a zero pivot after an infinite */
+/* pivot. In that case, substituting 1 for T/DPLUS is the */
+/* correct limit. */
+ if (sawnan) {
+ neg1 = 0;
+ t = bsav;
+/* Computing MIN */
+ i__3 = bj + 127, i__4 = *r__ - 1;
+ i__2 = min(i__3,i__4);
+ for (j = bj; j <= i__2; ++j) {
+ dplus = d__[j] + t;
+ if (dplus < 0.) {
+ ++neg1;
+ }
+ tmp = t / dplus;
+ if (disnan_(&tmp)) {
+ tmp = 1.;
+ }
+ t = tmp * lld[j] - *sigma;
+/* L22: */
+ }
+ }
+ negcnt += neg1;
+/* L210: */
+ }
+
+/* II) lower part: L D L^T - SIGMA I = U- D- U-^T */
+ p = d__[*n] - *sigma;
+ i__1 = *r__;
+ for (bj = *n - 1; bj >= i__1; bj += -128) {
+ neg2 = 0;
+ bsav = p;
+/* Computing MAX */
+ i__3 = bj - 127;
+ i__2 = max(i__3,*r__);
+ for (j = bj; j >= i__2; --j) {
+ dminus = lld[j] + p;
+ if (dminus < 0.) {
+ ++neg2;
+ }
+ tmp = p / dminus;
+ p = tmp * d__[j] - *sigma;
+/* L23: */
+ }
+ sawnan = disnan_(&p);
+/* As above, run a slower version that substitutes 1 for Inf/Inf. */
+
+ if (sawnan) {
+ neg2 = 0;
+ p = bsav;
+/* Computing MAX */
+ i__3 = bj - 127;
+ i__2 = max(i__3,*r__);
+ for (j = bj; j >= i__2; --j) {
+ dminus = lld[j] + p;
+ if (dminus < 0.) {
+ ++neg2;
+ }
+ tmp = p / dminus;
+ if (disnan_(&tmp)) {
+ tmp = 1.;
+ }
+ p = tmp * d__[j] - *sigma;
+/* L24: */
+ }
+ }
+ negcnt += neg2;
+/* L230: */
+ }
+
+/* III) Twist index */
+/* T was shifted by SIGMA initially. */
+ gamma = t + *sigma + p;
+ if (gamma < 0.) {
+ ++negcnt;
+ }
+ ret_val = negcnt;
+ return ret_val;
+} /* dlaneg_ */
diff --git a/contrib/libs/clapack/dlangb.c b/contrib/libs/clapack/dlangb.c
new file mode 100644
index 0000000000..cb125461e4
--- /dev/null
+++ b/contrib/libs/clapack/dlangb.c
@@ -0,0 +1,226 @@
+/* dlangb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal dlangb_(char *norm, integer *n, integer *kl, integer *ku,
+ doublereal *ab, integer *ldab, doublereal *work)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ doublereal ret_val, d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, l;
+ doublereal sum, scale;
+ extern logical lsame_(char *, char *);
+ doublereal value;
+ extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLANGB returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of an */
+/* n by n band matrix A, with kl sub-diagonals and ku super-diagonals. */
+
+/* Description */
+/* =========== */
+
+/* DLANGB returns the value */
+
+/* DLANGB = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in DLANGB as described */
+/* above. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, DLANGB is */
+/* set to zero. */
+
+/* KL (input) INTEGER */
+/* The number of sub-diagonals of the matrix A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of super-diagonals of the matrix A. KU >= 0. */
+
+/* AB (input) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* The band matrix A, stored in rows 1 to KL+KU+1. The j-th */
+/* column of A is stored in the j-th column of the array AB as */
+/* follows: */
+/* AB(ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(n,j+kl). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KL+KU+1. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I'; otherwise, WORK is not */
+/* referenced. */
+
+/* ===================================================================== */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = *ku + 2 - j;
+/* Computing MIN */
+ i__4 = *n + *ku + 1 - j, i__5 = *kl + *ku + 1;
+ i__3 = min(i__4,i__5);
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = ab[i__ + j * ab_dim1], abs(d__1))
+ ;
+ value = max(d__2,d__3);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.;
+/* Computing MAX */
+ i__3 = *ku + 2 - j;
+/* Computing MIN */
+ i__4 = *n + *ku + 1 - j, i__5 = *kl + *ku + 1;
+ i__2 = min(i__4,i__5);
+ for (i__ = max(i__3,1); i__ <= i__2; ++i__) {
+ sum += (d__1 = ab[i__ + j * ab_dim1], abs(d__1));
+/* L30: */
+ }
+ value = max(value,sum);
+/* L40: */
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L50: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ k = *ku + 1 - j;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *ku;
+/* Computing MIN */
+ i__5 = *n, i__6 = j + *kl;
+ i__4 = min(i__5,i__6);
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ work[i__] += (d__1 = ab[k + i__ + j * ab_dim1], abs(d__1));
+/* L60: */
+ }
+/* L70: */
+ }
+ value = 0.;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L80: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *ku;
+ l = max(i__4,i__2);
+ k = *ku + 1 - j + l;
+/* Computing MIN */
+ i__2 = *n, i__3 = j + *kl;
+ i__4 = min(i__2,i__3) - l + 1;
+ dlassq_(&i__4, &ab[k + j * ab_dim1], &c__1, &scale, &sum);
+/* L90: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of DLANGB */
+
+} /* dlangb_ */
diff --git a/contrib/libs/clapack/dlange.c b/contrib/libs/clapack/dlange.c
new file mode 100644
index 0000000000..34c3039c1a
--- /dev/null
+++ b/contrib/libs/clapack/dlange.c
@@ -0,0 +1,199 @@
+/* dlange.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal dlange_(char *norm, integer *m, integer *n, doublereal *a, integer
+ *lda, doublereal *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal ret_val, d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal sum, scale;
+ extern logical lsame_(char *, char *);
+ doublereal value;
+ extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLANGE returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* real matrix A. */
+
+/* Description */
+/* =========== */
+
+/* DLANGE returns the value */
+
+/* DLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in DLANGE as described */
+/* above. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. When M = 0, */
+/* DLANGE is set to zero. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. When N = 0, */
+/* DLANGE is set to zero. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The m by n matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(M,1). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= M when NORM = 'I'; otherwise, WORK is not */
+/* referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (min(*m,*n) == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs(d__1));
+ value = max(d__2,d__3);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += (d__1 = a[i__ + j * a_dim1], abs(d__1));
+/* L30: */
+ }
+ value = max(value,sum);
+/* L40: */
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L50: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += (d__1 = a[i__ + j * a_dim1], abs(d__1));
+/* L60: */
+ }
+/* L70: */
+ }
+ value = 0.;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L80: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ dlassq_(m, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L90: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of DLANGE */
+
+} /* dlange_ */
diff --git a/contrib/libs/clapack/dlangt.c b/contrib/libs/clapack/dlangt.c
new file mode 100644
index 0000000000..806f39caa7
--- /dev/null
+++ b/contrib/libs/clapack/dlangt.c
@@ -0,0 +1,195 @@
+/* dlangt.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal dlangt_(char *norm, integer *n, doublereal *dl, doublereal *d__,
+ doublereal *du)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal ret_val, d__1, d__2, d__3, d__4, d__5;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ doublereal sum, scale;
+ extern logical lsame_(char *, char *);
+ doublereal anorm;
+ extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLANGT returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* real tridiagonal matrix A. */
+
+/* Description */
+/* =========== */
+
+/* DLANGT returns the value */
+
+/* DLANGT = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in DLANGT as described */
+/* above. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, DLANGT is */
+/* set to zero. */
+
+/* DL (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) sub-diagonal elements of A. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The diagonal elements of A. */
+
+/* DU (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) super-diagonal elements of A. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --du;
+ --d__;
+ --dl;
+
+ /* Function Body */
+ if (*n <= 0) {
+ anorm = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ anorm = (d__1 = d__[*n], abs(d__1));
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__2 = anorm, d__3 = (d__1 = dl[i__], abs(d__1));
+ anorm = max(d__2,d__3);
+/* Computing MAX */
+ d__2 = anorm, d__3 = (d__1 = d__[i__], abs(d__1));
+ anorm = max(d__2,d__3);
+/* Computing MAX */
+ d__2 = anorm, d__3 = (d__1 = du[i__], abs(d__1));
+ anorm = max(d__2,d__3);
+/* L10: */
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ if (*n == 1) {
+ anorm = abs(d__[1]);
+ } else {
+/* Computing MAX */
+ d__3 = abs(d__[1]) + abs(dl[1]), d__4 = (d__1 = d__[*n], abs(d__1)
+ ) + (d__2 = du[*n - 1], abs(d__2));
+ anorm = max(d__3,d__4);
+ i__1 = *n - 1;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__4 = anorm, d__5 = (d__1 = d__[i__], abs(d__1)) + (d__2 =
+ dl[i__], abs(d__2)) + (d__3 = du[i__ - 1], abs(d__3));
+ anorm = max(d__4,d__5);
+/* L20: */
+ }
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ if (*n == 1) {
+ anorm = abs(d__[1]);
+ } else {
+/* Computing MAX */
+ d__3 = abs(d__[1]) + abs(du[1]), d__4 = (d__1 = d__[*n], abs(d__1)
+ ) + (d__2 = dl[*n - 1], abs(d__2));
+ anorm = max(d__3,d__4);
+ i__1 = *n - 1;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__4 = anorm, d__5 = (d__1 = d__[i__], abs(d__1)) + (d__2 =
+ du[i__], abs(d__2)) + (d__3 = dl[i__ - 1], abs(d__3));
+ anorm = max(d__4,d__5);
+/* L30: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ dlassq_(n, &d__[1], &c__1, &scale, &sum);
+ if (*n > 1) {
+ i__1 = *n - 1;
+ dlassq_(&i__1, &dl[1], &c__1, &scale, &sum);
+ i__1 = *n - 1;
+ dlassq_(&i__1, &du[1], &c__1, &scale, &sum);
+ }
+ anorm = scale * sqrt(sum);
+ }
+
+ ret_val = anorm;
+ return ret_val;
+
+/* End of DLANGT */
+
+} /* dlangt_ */
diff --git a/contrib/libs/clapack/dlanhs.c b/contrib/libs/clapack/dlanhs.c
new file mode 100644
index 0000000000..35711c0ce2
--- /dev/null
+++ b/contrib/libs/clapack/dlanhs.c
@@ -0,0 +1,205 @@
+/* dlanhs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal dlanhs_(char *norm, integer *n, doublereal *a, integer *lda,
+ doublereal *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ doublereal ret_val, d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal sum, scale;
+ extern logical lsame_(char *, char *);
+ doublereal value;
+ extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLANHS returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* Hessenberg matrix A. */
+
+/* Description */
+/* =========== */
+
+/* DLANHS returns the value */
+
+/* DLANHS = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in DLANHS as described */
+/* above. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, DLANHS is */
+/* set to zero. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The n by n upper Hessenberg matrix A; the part of A below the */
+/* first sub-diagonal is not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(N,1). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I'; otherwise, WORK is not */
+/* referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs(d__1));
+ value = max(d__2,d__3);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.;
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += (d__1 = a[i__ + j * a_dim1], abs(d__1));
+/* L30: */
+ }
+ value = max(value,sum);
+/* L40: */
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L50: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += (d__1 = a[i__ + j * a_dim1], abs(d__1));
+/* L60: */
+ }
+/* L70: */
+ }
+ value = 0.;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L80: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ dlassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L90: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of DLANHS */
+
+} /* dlanhs_ */
diff --git a/contrib/libs/clapack/dlansb.c b/contrib/libs/clapack/dlansb.c
new file mode 100644
index 0000000000..d6b9175275
--- /dev/null
+++ b/contrib/libs/clapack/dlansb.c
@@ -0,0 +1,263 @@
+/* dlansb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal dlansb_(char *norm, char *uplo, integer *n, integer *k, doublereal
+ *ab, integer *ldab, doublereal *work)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+ doublereal ret_val, d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, l;
+ doublereal sum, absa, scale;
+ extern logical lsame_(char *, char *);
+ doublereal value;
+ extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLANSB returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of an */
+/* n by n symmetric band matrix A, with k super-diagonals. */
+
+/* Description */
+/* =========== */
+
+/* DLANSB returns the value */
+
+/* DLANSB = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in DLANSB as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* band matrix A is supplied. */
+/* = 'U': Upper triangular part is supplied */
+/* = 'L': Lower triangular part is supplied */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, DLANSB is */
+/* set to zero. */
+
+/* K (input) INTEGER */
+/* The number of super-diagonals or sub-diagonals of the */
+/* band matrix A. K >= 0. */
+
+/* AB (input) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* The upper or lower triangle of the symmetric band matrix A, */
+/* stored in the first K+1 rows of AB. The j-th column of A is */
+/* stored in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(k+1+i-j,j) = A(i,j) for max(1,j-k)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+k). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= K+1. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, */
+/* WORK is not referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = *k + 2 - j;
+ i__3 = *k + 1;
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = ab[i__ + j * ab_dim1], abs(
+ d__1));
+ value = max(d__2,d__3);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = *n + 1 - j, i__4 = *k + 1;
+ i__3 = min(i__2,i__4);
+ for (i__ = 1; i__ <= i__3; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = ab[i__ + j * ab_dim1], abs(
+ d__1));
+ value = max(d__2,d__3);
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
+
+/* Find normI(A) ( = norm1(A), since A is symmetric). */
+
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.;
+ l = *k + 1 - j;
+/* Computing MAX */
+ i__3 = 1, i__2 = j - *k;
+ i__4 = j - 1;
+ for (i__ = max(i__3,i__2); i__ <= i__4; ++i__) {
+ absa = (d__1 = ab[l + i__ + j * ab_dim1], abs(d__1));
+ sum += absa;
+ work[i__] += absa;
+/* L50: */
+ }
+ work[j] = sum + (d__1 = ab[*k + 1 + j * ab_dim1], abs(d__1));
+/* L60: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L80: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = work[j] + (d__1 = ab[j * ab_dim1 + 1], abs(d__1));
+ l = 1 - j;
+/* Computing MIN */
+ i__3 = *n, i__2 = j + *k;
+ i__4 = min(i__3,i__2);
+ for (i__ = j + 1; i__ <= i__4; ++i__) {
+ absa = (d__1 = ab[l + i__ + j * ab_dim1], abs(d__1));
+ sum += absa;
+ work[i__] += absa;
+/* L90: */
+ }
+ value = max(value,sum);
+/* L100: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ if (*k > 0) {
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = j - 1;
+ i__4 = min(i__3,*k);
+/* Computing MAX */
+ i__2 = *k + 2 - j;
+ dlassq_(&i__4, &ab[max(i__2, 1)+ j * ab_dim1], &c__1, &
+ scale, &sum);
+/* L110: */
+ }
+ l = *k + 1;
+ } else {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n - j;
+ i__4 = min(i__3,*k);
+ dlassq_(&i__4, &ab[j * ab_dim1 + 2], &c__1, &scale, &sum);
+/* L120: */
+ }
+ l = 1;
+ }
+ sum *= 2;
+ } else {
+ l = 1;
+ }
+ dlassq_(n, &ab[l + ab_dim1], ldab, &scale, &sum);
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of DLANSB */
+
+} /* dlansb_ */
diff --git a/contrib/libs/clapack/dlansf.c b/contrib/libs/clapack/dlansf.c
new file mode 100644
index 0000000000..19ce810770
--- /dev/null
+++ b/contrib/libs/clapack/dlansf.c
@@ -0,0 +1,1012 @@
+/* dlansf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal dlansf_(char *norm, char *transr, char *uplo, integer *n,
+ doublereal *a, doublereal *work)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal ret_val, d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, l;
+ doublereal s;
+ integer n1;
+ doublereal aa;
+ integer lda, ifm, noe, ilu;
+ doublereal scale;
+ extern logical lsame_(char *, char *);
+ doublereal value;
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLANSF returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* real symmetric matrix A in RFP format. */
+
+/* Description */
+/* =========== */
+
+/* DLANSF returns the value */
+
+/* DLANSF = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER */
+/* Specifies the value to be returned in DLANSF as described */
+/* above. */
+
+/* TRANSR (input) CHARACTER */
+/* Specifies whether the RFP format of A is normal or */
+/* transposed format. */
+/* = 'N': RFP format is Normal; */
+/* = 'T': RFP format is Transpose. */
+
+/* UPLO (input) CHARACTER */
+/* On entry, UPLO specifies whether the RFP matrix A came from */
+/* an upper or lower triangular matrix as follows: */
+/* = 'U': RFP A came from an upper triangular matrix; */
+/* = 'L': RFP A came from a lower triangular matrix. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, DLANSF is */
+/* set to zero. */
+
+/* A (input) DOUBLE PRECISION array, dimension ( N*(N+1)/2 ); */
+/* On entry, the upper (if UPLO = 'U') or lower (if UPLO = 'L') */
+/* part of the symmetric matrix A stored in RFP format. See the */
+/* "Notes" below for more details. */
+/* Unchanged on exit. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, */
+/* WORK is not referenced. */
+
+/* Notes */
+/* ===== */
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* even. We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* the transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* the transpose of the last three columns of AP lower. */
+/* This covers the case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 03 04 05 33 43 53 */
+/* 13 14 15 00 44 54 */
+/* 23 24 25 10 11 55 */
+/* 33 34 35 20 21 22 */
+/* 00 44 45 30 31 32 */
+/* 01 11 55 40 41 42 */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* odd. We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* the transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* the transpose of the last two columns of AP lower. */
+/* This covers the case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 02 03 04 00 33 43 */
+/* 12 13 14 10 11 44 */
+/* 22 23 24 20 21 22 */
+/* 00 33 34 30 31 32 */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+/* RFP A RFP A */
+
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* Reference */
+/* ========= */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ if (*n == 0) {
+ ret_val = 0.;
+ return ret_val;
+ }
+
+/* set noe = 1 if n is odd. if n is even set noe=0 */
+
+ noe = 1;
+ if (*n % 2 == 0) {
+ noe = 0;
+ }
+
+/* set ifm = 0 when form='T or 't' and 1 otherwise */
+
+ ifm = 1;
+ if (lsame_(transr, "T")) {
+ ifm = 0;
+ }
+
+/* set ilu = 0 when uplo='U or 'u' and 1 otherwise */
+
+ ilu = 1;
+ if (lsame_(uplo, "U")) {
+ ilu = 0;
+ }
+
+/* set lda = (n+1)/2 when ifm = 0 */
+/* set lda = n when ifm = 1 and noe = 1 */
+/* set lda = n+1 when ifm = 1 and noe = 0 */
+
+ if (ifm == 1) {
+ if (noe == 1) {
+ lda = *n;
+ } else {
+/* noe=0 */
+ lda = *n + 1;
+ }
+ } else {
+/* ifm=0 */
+ lda = (*n + 1) / 2;
+ }
+
+ if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ k = (*n + 1) / 2;
+ value = 0.;
+ if (noe == 1) {
+/* n is odd */
+ if (ifm == 1) {
+/* A is n by k */
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = a[i__ + j * lda], abs(
+ d__1));
+ value = max(d__2,d__3);
+ }
+ }
+ } else {
+/* xpose case; A is k by n */
+ i__1 = *n - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = a[i__ + j * lda], abs(
+ d__1));
+ value = max(d__2,d__3);
+ }
+ }
+ }
+ } else {
+/* n is even */
+ if (ifm == 1) {
+/* A is n+1 by k */
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = a[i__ + j * lda], abs(
+ d__1));
+ value = max(d__2,d__3);
+ }
+ }
+ } else {
+/* xpose case; A is k by n+1 */
+ i__1 = *n;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = a[i__ + j * lda], abs(
+ d__1));
+ value = max(d__2,d__3);
+ }
+ }
+ }
+ }
+ } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
+
+/* Find normI(A) ( = norm1(A), since A is symmetric). */
+
+ if (ifm == 1) {
+ k = *n / 2;
+ if (noe == 1) {
+/* n is odd */
+ if (ilu == 0) {
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+ }
+ i__1 = k;
+ for (j = 0; j <= i__1; ++j) {
+ s = 0.;
+ i__2 = k + j - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* -> A(i,j+k) */
+ s += aa;
+ work[i__] += aa;
+ }
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* -> A(j+k,j+k) */
+ work[j + k] = s + aa;
+ if (i__ == k + k) {
+ goto L10;
+ }
+ ++i__;
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* -> A(j,j) */
+ work[j] += aa;
+ s = 0.;
+ i__2 = k - 1;
+ for (l = j + 1; l <= i__2; ++l) {
+ ++i__;
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* -> A(l,j) */
+ s += aa;
+ work[l] += aa;
+ }
+ work[j] += s;
+ }
+L10:
+ i__ = idamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ } else {
+/* ilu = 1 */
+ ++k;
+/* k=(n+1)/2 for n odd and ilu=1 */
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+ }
+ for (j = k - 1; j >= 0; --j) {
+ s = 0.;
+ i__1 = j - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* -> A(j+k,i+k) */
+ s += aa;
+ work[i__ + k] += aa;
+ }
+ if (j > 0) {
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* -> A(j+k,j+k) */
+ s += aa;
+ work[i__ + k] += s;
+/* i=j */
+ ++i__;
+ }
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* -> A(j,j) */
+ work[j] = aa;
+ s = 0.;
+ i__1 = *n - 1;
+ for (l = j + 1; l <= i__1; ++l) {
+ ++i__;
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* -> A(l,j) */
+ s += aa;
+ work[l] += aa;
+ }
+ work[j] += s;
+ }
+ i__ = idamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ }
+ } else {
+/* n is even */
+ if (ilu == 0) {
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+ }
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ s = 0.;
+ i__2 = k + j - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* -> A(i,j+k) */
+ s += aa;
+ work[i__] += aa;
+ }
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* -> A(j+k,j+k) */
+ work[j + k] = s + aa;
+ ++i__;
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* -> A(j,j) */
+ work[j] += aa;
+ s = 0.;
+ i__2 = k - 1;
+ for (l = j + 1; l <= i__2; ++l) {
+ ++i__;
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* -> A(l,j) */
+ s += aa;
+ work[l] += aa;
+ }
+ work[j] += s;
+ }
+ i__ = idamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ } else {
+/* ilu = 1 */
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+ }
+ for (j = k - 1; j >= 0; --j) {
+ s = 0.;
+ i__1 = j - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* -> A(j+k,i+k) */
+ s += aa;
+ work[i__ + k] += aa;
+ }
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* -> A(j+k,j+k) */
+ s += aa;
+ work[i__ + k] += s;
+/* i=j */
+ ++i__;
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* -> A(j,j) */
+ work[j] = aa;
+ s = 0.;
+ i__1 = *n - 1;
+ for (l = j + 1; l <= i__1; ++l) {
+ ++i__;
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* -> A(l,j) */
+ s += aa;
+ work[l] += aa;
+ }
+ work[j] += s;
+ }
+ i__ = idamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ }
+ }
+ } else {
+/* ifm=0 */
+ k = *n / 2;
+ if (noe == 1) {
+/* n is odd */
+ if (ilu == 0) {
+ n1 = k;
+/* n/2 */
+ ++k;
+/* k is the row size and lda */
+ i__1 = *n - 1;
+ for (i__ = n1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+ }
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ s = 0.;
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* A(j,n1+i) */
+ work[i__ + n1] += aa;
+ s += aa;
+ }
+ work[j] = s;
+ }
+/* j=n1=k-1 is special */
+ s = (d__1 = a[j * lda], abs(d__1));
+/* A(k-1,k-1) */
+ i__1 = k - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* A(k-1,i+n1) */
+ work[i__ + n1] += aa;
+ s += aa;
+ }
+ work[j] += s;
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+ s = 0.;
+ i__2 = j - k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* A(i,j-k) */
+ work[i__] += aa;
+ s += aa;
+ }
+/* i=j-k */
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* A(j-k,j-k) */
+ s += aa;
+ work[j - k] += s;
+ ++i__;
+ s = (d__1 = a[i__ + j * lda], abs(d__1));
+/* A(j,j) */
+ i__2 = *n - 1;
+ for (l = j + 1; l <= i__2; ++l) {
+ ++i__;
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* A(j,l) */
+ work[l] += aa;
+ s += aa;
+ }
+ work[j] += s;
+ }
+ i__ = idamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ } else {
+/* ilu=1 */
+ ++k;
+/* k=(n+1)/2 for n odd and ilu=1 */
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+/* process */
+ s = 0.;
+ i__2 = j - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* A(j,i) */
+ work[i__] += aa;
+ s += aa;
+ }
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* i=j so process of A(j,j) */
+ s += aa;
+ work[j] = s;
+/* is initialised here */
+ ++i__;
+/* i=j process A(j+k,j+k) */
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+ s = aa;
+ i__2 = *n - 1;
+ for (l = k + j + 1; l <= i__2; ++l) {
+ ++i__;
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* A(l,k+j) */
+ s += aa;
+ work[l] += aa;
+ }
+ work[k + j] += s;
+ }
+/* j=k-1 is special :process col A(k-1,0:k-1) */
+ s = 0.;
+ i__1 = k - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* A(k,i) */
+ work[i__] += aa;
+ s += aa;
+ }
+/* i=k-1 */
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* A(k-1,k-1) */
+ s += aa;
+ work[i__] = s;
+/* done with col j=k+1 */
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+/* process col j of A = A(j,0:k-1) */
+ s = 0.;
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* A(j,i) */
+ work[i__] += aa;
+ s += aa;
+ }
+ work[j] += s;
+ }
+ i__ = idamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ }
+ } else {
+/* n is even */
+ if (ilu == 0) {
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+ }
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ s = 0.;
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* A(j,i+k) */
+ work[i__ + k] += aa;
+ s += aa;
+ }
+ work[j] = s;
+ }
+/* j=k */
+ aa = (d__1 = a[j * lda], abs(d__1));
+/* A(k,k) */
+ s = aa;
+ i__1 = k - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* A(k,k+i) */
+ work[i__ + k] += aa;
+ s += aa;
+ }
+ work[j] += s;
+ i__1 = *n - 1;
+ for (j = k + 1; j <= i__1; ++j) {
+ s = 0.;
+ i__2 = j - 2 - k;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* A(i,j-k-1) */
+ work[i__] += aa;
+ s += aa;
+ }
+/* i=j-1-k */
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* A(j-k-1,j-k-1) */
+ s += aa;
+ work[j - k - 1] += s;
+ ++i__;
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* A(j,j) */
+ s = aa;
+ i__2 = *n - 1;
+ for (l = j + 1; l <= i__2; ++l) {
+ ++i__;
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* A(j,l) */
+ work[l] += aa;
+ s += aa;
+ }
+ work[j] += s;
+ }
+/* j=n */
+ s = 0.;
+ i__1 = k - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* A(i,k-1) */
+ work[i__] += aa;
+ s += aa;
+ }
+/* i=k-1 */
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* A(k-1,k-1) */
+ s += aa;
+ work[i__] += s;
+ i__ = idamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ } else {
+/* ilu=1 */
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+ }
+/* j=0 is special :process col A(k:n-1,k) */
+ s = abs(a[0]);
+/* A(k,k) */
+ i__1 = k - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ aa = (d__1 = a[i__], abs(d__1));
+/* A(k+i,k) */
+ work[i__ + k] += aa;
+ s += aa;
+ }
+ work[k] += s;
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* process */
+ s = 0.;
+ i__2 = j - 2;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* A(j-1,i) */
+ work[i__] += aa;
+ s += aa;
+ }
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* i=j-1 so process of A(j-1,j-1) */
+ s += aa;
+ work[j - 1] = s;
+/* is initialised here */
+ ++i__;
+/* i=j process A(j+k,j+k) */
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+ s = aa;
+ i__2 = *n - 1;
+ for (l = k + j + 1; l <= i__2; ++l) {
+ ++i__;
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* A(l,k+j) */
+ s += aa;
+ work[l] += aa;
+ }
+ work[k + j] += s;
+ }
+/* j=k is special :process col A(k,0:k-1) */
+ s = 0.;
+ i__1 = k - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* A(k,i) */
+ work[i__] += aa;
+ s += aa;
+ }
+/* i=k-1 */
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* A(k-1,k-1) */
+ s += aa;
+ work[i__] = s;
+/* done with col j=k+1 */
+ i__1 = *n;
+ for (j = k + 1; j <= i__1; ++j) {
+/* process col j-1 of A = A(j-1,0:k-1) */
+ s = 0.;
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = (d__1 = a[i__ + j * lda], abs(d__1));
+/* A(j-1,i) */
+ work[i__] += aa;
+ s += aa;
+ }
+ work[j - 1] += s;
+ }
+ i__ = idamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ }
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ k = (*n + 1) / 2;
+ scale = 0.;
+ s = 1.;
+ if (noe == 1) {
+/* n is odd */
+ if (ifm == 1) {
+/* A is normal */
+ if (ilu == 0) {
+/* A is upper */
+ i__1 = k - 3;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - j - 2;
+ dlassq_(&i__2, &a[k + j + 1 + j * lda], &c__1, &scale,
+ &s);
+/* L at A(k,0) */
+ }
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k + j - 1;
+ dlassq_(&i__2, &a[j * lda], &c__1, &scale, &s);
+/* trap U at A(0,0) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ i__1 = k - 1;
+ i__2 = lda + 1;
+ dlassq_(&i__1, &a[k], &i__2, &scale, &s);
+/* tri L at A(k,0) */
+ i__1 = lda + 1;
+ dlassq_(&k, &a[k - 1], &i__1, &scale, &s);
+/* tri U at A(k-1,0) */
+ } else {
+/* ilu=1 & A is lower */
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - j - 1;
+ dlassq_(&i__2, &a[j + 1 + j * lda], &c__1, &scale, &s)
+ ;
+/* trap L at A(0,0) */
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ dlassq_(&j, &a[(j + 1) * lda], &c__1, &scale, &s);
+/* U at A(0,1) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ i__1 = lda + 1;
+ dlassq_(&k, a, &i__1, &scale, &s);
+/* tri L at A(0,0) */
+ i__1 = k - 1;
+ i__2 = lda + 1;
+ dlassq_(&i__1, &a[lda], &i__2, &scale, &s);
+/* tri U at A(0,1) */
+ }
+ } else {
+/* A is xpose */
+ if (ilu == 0) {
+/* A' is upper */
+ i__1 = k - 2;
+ for (j = 1; j <= i__1; ++j) {
+ dlassq_(&j, &a[(k + j) * lda], &c__1, &scale, &s);
+/* U at A(0,k) */
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ dlassq_(&k, &a[j * lda], &c__1, &scale, &s);
+/* k by k-1 rect. at A(0,0) */
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - j - 1;
+ dlassq_(&i__2, &a[j + 1 + (j + k - 1) * lda], &c__1, &
+ scale, &s);
+/* L at A(0,k-1) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ i__1 = k - 1;
+ i__2 = lda + 1;
+ dlassq_(&i__1, &a[k * lda], &i__2, &scale, &s);
+/* tri U at A(0,k) */
+ i__1 = lda + 1;
+ dlassq_(&k, &a[(k - 1) * lda], &i__1, &scale, &s);
+/* tri L at A(0,k-1) */
+ } else {
+/* A' is lower */
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ dlassq_(&j, &a[j * lda], &c__1, &scale, &s);
+/* U at A(0,0) */
+ }
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+ dlassq_(&k, &a[j * lda], &c__1, &scale, &s);
+/* k by k-1 rect. at A(0,k) */
+ }
+ i__1 = k - 3;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - j - 2;
+ dlassq_(&i__2, &a[j + 2 + j * lda], &c__1, &scale, &s)
+ ;
+/* L at A(1,0) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ i__1 = lda + 1;
+ dlassq_(&k, a, &i__1, &scale, &s);
+/* tri U at A(0,0) */
+ i__1 = k - 1;
+ i__2 = lda + 1;
+ dlassq_(&i__1, &a[1], &i__2, &scale, &s);
+/* tri L at A(1,0) */
+ }
+ }
+ } else {
+/* n is even */
+ if (ifm == 1) {
+/* A is normal */
+ if (ilu == 0) {
+/* A is upper */
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - j - 1;
+ dlassq_(&i__2, &a[k + j + 2 + j * lda], &c__1, &scale,
+ &s);
+/* L at A(k+1,0) */
+ }
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k + j;
+ dlassq_(&i__2, &a[j * lda], &c__1, &scale, &s);
+/* trap U at A(0,0) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ i__1 = lda + 1;
+ dlassq_(&k, &a[k + 1], &i__1, &scale, &s);
+/* tri L at A(k+1,0) */
+ i__1 = lda + 1;
+ dlassq_(&k, &a[k], &i__1, &scale, &s);
+/* tri U at A(k,0) */
+ } else {
+/* ilu=1 & A is lower */
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - j - 1;
+ dlassq_(&i__2, &a[j + 2 + j * lda], &c__1, &scale, &s)
+ ;
+/* trap L at A(1,0) */
+ }
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ dlassq_(&j, &a[j * lda], &c__1, &scale, &s);
+/* U at A(0,0) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ i__1 = lda + 1;
+ dlassq_(&k, &a[1], &i__1, &scale, &s);
+/* tri L at A(1,0) */
+ i__1 = lda + 1;
+ dlassq_(&k, a, &i__1, &scale, &s);
+/* tri U at A(0,0) */
+ }
+ } else {
+/* A is xpose */
+ if (ilu == 0) {
+/* A' is upper */
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ dlassq_(&j, &a[(k + 1 + j) * lda], &c__1, &scale, &s);
+/* U at A(0,k+1) */
+ }
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ dlassq_(&k, &a[j * lda], &c__1, &scale, &s);
+/* k by k rect. at A(0,0) */
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - j - 1;
+ dlassq_(&i__2, &a[j + 1 + (j + k) * lda], &c__1, &
+ scale, &s);
+/* L at A(0,k) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ i__1 = lda + 1;
+ dlassq_(&k, &a[(k + 1) * lda], &i__1, &scale, &s);
+/* tri U at A(0,k+1) */
+ i__1 = lda + 1;
+ dlassq_(&k, &a[k * lda], &i__1, &scale, &s);
+/* tri L at A(0,k) */
+ } else {
+/* A' is lower */
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ dlassq_(&j, &a[(j + 1) * lda], &c__1, &scale, &s);
+/* U at A(0,1) */
+ }
+ i__1 = *n;
+ for (j = k + 1; j <= i__1; ++j) {
+ dlassq_(&k, &a[j * lda], &c__1, &scale, &s);
+/* k by k rect. at A(0,k+1) */
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - j - 1;
+ dlassq_(&i__2, &a[j + 1 + j * lda], &c__1, &scale, &s)
+ ;
+/* L at A(0,0) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ i__1 = lda + 1;
+ dlassq_(&k, &a[lda], &i__1, &scale, &s);
+/* tri L at A(0,1) */
+ i__1 = lda + 1;
+ dlassq_(&k, a, &i__1, &scale, &s);
+/* tri U at A(0,0) */
+ }
+ }
+ }
+ value = scale * sqrt(s);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of DLANSF */
+
+} /* dlansf_ */
diff --git a/contrib/libs/clapack/dlansp.c b/contrib/libs/clapack/dlansp.c
new file mode 100644
index 0000000000..de8f50cbb6
--- /dev/null
+++ b/contrib/libs/clapack/dlansp.c
@@ -0,0 +1,263 @@
+/* dlansp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal dlansp_(char *norm, char *uplo, integer *n, doublereal *ap,
+ doublereal *work)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal ret_val, d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal sum, absa, scale;
+ extern logical lsame_(char *, char *);
+ doublereal value;
+ extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLANSP returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* real symmetric matrix A, supplied in packed form. */
+
+/* Description */
+/* =========== */
+
+/* DLANSP returns the value */
+
+/* DLANSP = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in DLANSP as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is supplied. */
+/* = 'U': Upper triangular part of A is supplied */
+/* = 'L': Lower triangular part of A is supplied */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, DLANSP is */
+/* set to zero. */
+
+/* AP (input) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the symmetric matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, */
+/* WORK is not referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --work;
+ --ap;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ k = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + j - 1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = ap[i__], abs(d__1));
+ value = max(d__2,d__3);
+/* L10: */
+ }
+ k += j;
+/* L20: */
+ }
+ } else {
+ k = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + *n - j;
+ for (i__ = k; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = ap[i__], abs(d__1));
+ value = max(d__2,d__3);
+/* L30: */
+ }
+ k = k + *n - j + 1;
+/* L40: */
+ }
+ }
+ } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
+
+/* Find normI(A) ( = norm1(A), since A is symmetric). */
+
+ value = 0.;
+ k = 1;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ absa = (d__1 = ap[k], abs(d__1));
+ sum += absa;
+ work[i__] += absa;
+ ++k;
+/* L50: */
+ }
+ work[j] = sum + (d__1 = ap[k], abs(d__1));
+ ++k;
+/* L60: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L80: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = work[j] + (d__1 = ap[k], abs(d__1));
+ ++k;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ absa = (d__1 = ap[k], abs(d__1));
+ sum += absa;
+ work[i__] += absa;
+ ++k;
+/* L90: */
+ }
+ value = max(value,sum);
+/* L100: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ k = 2;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ dlassq_(&i__2, &ap[k], &c__1, &scale, &sum);
+ k += j;
+/* L110: */
+ }
+ } else {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ dlassq_(&i__2, &ap[k], &c__1, &scale, &sum);
+ k = k + *n - j + 1;
+/* L120: */
+ }
+ }
+ sum *= 2;
+ k = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (ap[k] != 0.) {
+ absa = (d__1 = ap[k], abs(d__1));
+ if (scale < absa) {
+/* Computing 2nd power */
+ d__1 = scale / absa;
+ sum = sum * (d__1 * d__1) + 1.;
+ scale = absa;
+ } else {
+/* Computing 2nd power */
+ d__1 = absa / scale;
+ sum += d__1 * d__1;
+ }
+ }
+ if (lsame_(uplo, "U")) {
+ k = k + i__ + 1;
+ } else {
+ k = k + *n - i__ + 1;
+ }
+/* L130: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of DLANSP */
+
+} /* dlansp_ */
diff --git a/contrib/libs/clapack/dlanst.c b/contrib/libs/clapack/dlanst.c
new file mode 100644
index 0000000000..323713d510
--- /dev/null
+++ b/contrib/libs/clapack/dlanst.c
@@ -0,0 +1,166 @@
+/* dlanst.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal dlanst_(char *norm, integer *n, doublereal *d__, doublereal *e)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal ret_val, d__1, d__2, d__3, d__4, d__5;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ doublereal sum, scale;
+ extern logical lsame_(char *, char *);
+ doublereal anorm;
+ extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLANST returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* real symmetric tridiagonal matrix A. */
+
+/* Description */
+/* =========== */
+
+/* DLANST returns the value */
+
+/* DLANST = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in DLANST as described */
+/* above. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, DLANST is */
+/* set to zero. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The diagonal elements of A. */
+
+/* E (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) sub-diagonal or super-diagonal elements of A. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --e;
+ --d__;
+
+ /* Function Body */
+ if (*n <= 0) {
+ anorm = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ anorm = (d__1 = d__[*n], abs(d__1));
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__2 = anorm, d__3 = (d__1 = d__[i__], abs(d__1));
+ anorm = max(d__2,d__3);
+/* Computing MAX */
+ d__2 = anorm, d__3 = (d__1 = e[i__], abs(d__1));
+ anorm = max(d__2,d__3);
+/* L10: */
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1' || lsame_(norm, "I")) {
+
+/* Find norm1(A). */
+
+ if (*n == 1) {
+ anorm = abs(d__[1]);
+ } else {
+/* Computing MAX */
+ d__3 = abs(d__[1]) + abs(e[1]), d__4 = (d__1 = e[*n - 1], abs(
+ d__1)) + (d__2 = d__[*n], abs(d__2));
+ anorm = max(d__3,d__4);
+ i__1 = *n - 1;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__4 = anorm, d__5 = (d__1 = d__[i__], abs(d__1)) + (d__2 = e[
+ i__], abs(d__2)) + (d__3 = e[i__ - 1], abs(d__3));
+ anorm = max(d__4,d__5);
+/* L20: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ if (*n > 1) {
+ i__1 = *n - 1;
+ dlassq_(&i__1, &e[1], &c__1, &scale, &sum);
+ sum *= 2;
+ }
+ dlassq_(n, &d__[1], &c__1, &scale, &sum);
+ anorm = scale * sqrt(sum);
+ }
+
+ ret_val = anorm;
+ return ret_val;
+
+/* End of DLANST */
+
+} /* dlanst_ */
diff --git a/contrib/libs/clapack/dlansy.c b/contrib/libs/clapack/dlansy.c
new file mode 100644
index 0000000000..58d5c30e22
--- /dev/null
+++ b/contrib/libs/clapack/dlansy.c
@@ -0,0 +1,239 @@
+/* dlansy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal dlansy_(char *norm, char *uplo, integer *n, doublereal *a, integer
+ *lda, doublereal *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal ret_val, d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal sum, absa, scale;
+ extern logical lsame_(char *, char *);
+ doublereal value;
+ extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLANSY returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* real symmetric matrix A. */
+
+/* Description */
+/* =========== */
+
+/* DLANSY returns the value */
+
+/* DLANSY = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in DLANSY as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is to be referenced. */
+/* = 'U': Upper triangular part of A is referenced */
+/* = 'L': Lower triangular part of A is referenced */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, DLANSY is */
+/* set to zero. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The symmetric matrix A. If UPLO = 'U', the leading n by n */
+/* upper triangular part of A contains the upper triangular part */
+/* of the matrix A, and the strictly lower triangular part of A */
+/* is not referenced. If UPLO = 'L', the leading n by n lower */
+/* triangular part of A contains the lower triangular part of */
+/* the matrix A, and the strictly upper triangular part of A is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(N,1). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, */
+/* WORK is not referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs(
+ d__1));
+ value = max(d__2,d__3);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs(
+ d__1));
+ value = max(d__2,d__3);
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
+
+/* Find normI(A) ( = norm1(A), since A is symmetric). */
+
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ absa = (d__1 = a[i__ + j * a_dim1], abs(d__1));
+ sum += absa;
+ work[i__] += absa;
+/* L50: */
+ }
+ work[j] = sum + (d__1 = a[j + j * a_dim1], abs(d__1));
+/* L60: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L80: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = work[j] + (d__1 = a[j + j * a_dim1], abs(d__1));
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ absa = (d__1 = a[i__ + j * a_dim1], abs(d__1));
+ sum += absa;
+ work[i__] += absa;
+/* L90: */
+ }
+ value = max(value,sum);
+/* L100: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ dlassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L110: */
+ }
+ } else {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ dlassq_(&i__2, &a[j + 1 + j * a_dim1], &c__1, &scale, &sum);
+/* L120: */
+ }
+ }
+ sum *= 2;
+ i__1 = *lda + 1;
+ dlassq_(n, &a[a_offset], &i__1, &scale, &sum);
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of DLANSY */
+
+} /* dlansy_ */
diff --git a/contrib/libs/clapack/dlantb.c b/contrib/libs/clapack/dlantb.c
new file mode 100644
index 0000000000..7aa82d8e84
--- /dev/null
+++ b/contrib/libs/clapack/dlantb.c
@@ -0,0 +1,434 @@
+/* dlantb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal dlantb_(char *norm, char *uplo, char *diag, integer *n, integer *k,
+ doublereal *ab, integer *ldab, doublereal *work)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal ret_val, d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, l;
+ doublereal sum, scale;
+ logical udiag;
+ extern logical lsame_(char *, char *);
+ doublereal value;
+ extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLANTB returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of an */
+/* n by n triangular band matrix A, with ( k + 1 ) diagonals. */
+
+/* Description */
+/* =========== */
+
+/* DLANTB returns the value */
+
+/* DLANTB = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in DLANTB as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower triangular. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A is unit triangular. */
+/* = 'N': Non-unit triangular */
+/* = 'U': Unit triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, DLANTB is */
+/* set to zero. */
+
+/* K (input) INTEGER */
+/* The number of super-diagonals of the matrix A if UPLO = 'U', */
+/* or the number of sub-diagonals of the matrix A if UPLO = 'L'. */
+/* K >= 0. */
+
+/* AB (input) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* The upper or lower triangular band matrix A, stored in the */
+/* first k+1 rows of AB. The j-th column of A is stored */
+/* in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(k+1+i-j,j) = A(i,j) for max(1,j-k)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+k). */
+/* Note that when DIAG = 'U', the elements of the array AB */
+/* corresponding to the diagonal elements of the matrix A are */
+/* not referenced, but are assumed to be one. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= K+1. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I'; otherwise, WORK is not */
+/* referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ if (lsame_(diag, "U")) {
+ value = 1.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = *k + 2 - j;
+ i__3 = *k;
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = ab[i__ + j * ab_dim1],
+ abs(d__1));
+ value = max(d__2,d__3);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = *n + 1 - j, i__4 = *k + 1;
+ i__3 = min(i__2,i__4);
+ for (i__ = 2; i__ <= i__3; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = ab[i__ + j * ab_dim1],
+ abs(d__1));
+ value = max(d__2,d__3);
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else {
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__3 = *k + 2 - j;
+ i__2 = *k + 1;
+ for (i__ = max(i__3,1); i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = ab[i__ + j * ab_dim1],
+ abs(d__1));
+ value = max(d__2,d__3);
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n + 1 - j, i__4 = *k + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = ab[i__ + j * ab_dim1],
+ abs(d__1));
+ value = max(d__2,d__3);
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.;
+ udiag = lsame_(diag, "U");
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (udiag) {
+ sum = 1.;
+/* Computing MAX */
+ i__2 = *k + 2 - j;
+ i__3 = *k;
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+ sum += (d__1 = ab[i__ + j * ab_dim1], abs(d__1));
+/* L90: */
+ }
+ } else {
+ sum = 0.;
+/* Computing MAX */
+ i__3 = *k + 2 - j;
+ i__2 = *k + 1;
+ for (i__ = max(i__3,1); i__ <= i__2; ++i__) {
+ sum += (d__1 = ab[i__ + j * ab_dim1], abs(d__1));
+/* L100: */
+ }
+ }
+ value = max(value,sum);
+/* L110: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (udiag) {
+ sum = 1.;
+/* Computing MIN */
+ i__3 = *n + 1 - j, i__4 = *k + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ sum += (d__1 = ab[i__ + j * ab_dim1], abs(d__1));
+/* L120: */
+ }
+ } else {
+ sum = 0.;
+/* Computing MIN */
+ i__3 = *n + 1 - j, i__4 = *k + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += (d__1 = ab[i__ + j * ab_dim1], abs(d__1));
+/* L130: */
+ }
+ }
+ value = max(value,sum);
+/* L140: */
+ }
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ if (lsame_(diag, "U")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 1.;
+/* L150: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ l = *k + 1 - j;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__4 = j - 1;
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ work[i__] += (d__1 = ab[l + i__ + j * ab_dim1], abs(
+ d__1));
+/* L160: */
+ }
+/* L170: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L180: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ l = *k + 1 - j;
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *k;
+ i__3 = j;
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ work[i__] += (d__1 = ab[l + i__ + j * ab_dim1], abs(
+ d__1));
+/* L190: */
+ }
+/* L200: */
+ }
+ }
+ } else {
+ if (lsame_(diag, "U")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 1.;
+/* L210: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ l = 1 - j;
+/* Computing MIN */
+ i__4 = *n, i__2 = j + *k;
+ i__3 = min(i__4,i__2);
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ work[i__] += (d__1 = ab[l + i__ + j * ab_dim1], abs(
+ d__1));
+/* L220: */
+ }
+/* L230: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L240: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ l = 1 - j;
+/* Computing MIN */
+ i__4 = *n, i__2 = j + *k;
+ i__3 = min(i__4,i__2);
+ for (i__ = j; i__ <= i__3; ++i__) {
+ work[i__] += (d__1 = ab[l + i__ + j * ab_dim1], abs(
+ d__1));
+/* L250: */
+ }
+/* L260: */
+ }
+ }
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L270: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ if (lsame_(uplo, "U")) {
+ if (lsame_(diag, "U")) {
+ scale = 1.;
+ sum = (doublereal) (*n);
+ if (*k > 0) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing MIN */
+ i__4 = j - 1;
+ i__3 = min(i__4,*k);
+/* Computing MAX */
+ i__2 = *k + 2 - j;
+ dlassq_(&i__3, &ab[max(i__2, 1)+ j * ab_dim1], &c__1,
+ &scale, &sum);
+/* L280: */
+ }
+ }
+ } else {
+ scale = 0.;
+ sum = 1.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__4 = j, i__2 = *k + 1;
+ i__3 = min(i__4,i__2);
+/* Computing MAX */
+ i__5 = *k + 2 - j;
+ dlassq_(&i__3, &ab[max(i__5, 1)+ j * ab_dim1], &c__1, &
+ scale, &sum);
+/* L290: */
+ }
+ }
+ } else {
+ if (lsame_(diag, "U")) {
+ scale = 1.;
+ sum = (doublereal) (*n);
+ if (*k > 0) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__4 = *n - j;
+ i__3 = min(i__4,*k);
+ dlassq_(&i__3, &ab[j * ab_dim1 + 2], &c__1, &scale, &
+ sum);
+/* L300: */
+ }
+ }
+ } else {
+ scale = 0.;
+ sum = 1.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__4 = *n - j + 1, i__2 = *k + 1;
+ i__3 = min(i__4,i__2);
+ dlassq_(&i__3, &ab[j * ab_dim1 + 1], &c__1, &scale, &sum);
+/* L310: */
+ }
+ }
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of DLANTB */
+
+} /* dlantb_ */
diff --git a/contrib/libs/clapack/dlantp.c b/contrib/libs/clapack/dlantp.c
new file mode 100644
index 0000000000..dd110985ff
--- /dev/null
+++ b/contrib/libs/clapack/dlantp.c
@@ -0,0 +1,391 @@
+/* dlantp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal dlantp_(char *norm, char *uplo, char *diag, integer *n, doublereal
+ *ap, doublereal *work)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal ret_val, d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal sum, scale;
+ logical udiag;
+ extern logical lsame_(char *, char *);
+ doublereal value;
+ extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLANTP returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* triangular matrix A, supplied in packed form. */
+
+/* Description */
+/* =========== */
+
+/* DLANTP returns the value */
+
+/* DLANTP = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in DLANTP as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower triangular. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A is unit triangular. */
+/* = 'N': Non-unit triangular */
+/* = 'U': Unit triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, DLANTP is */
+/* set to zero. */
+
+/* AP (input) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* The upper or lower triangular matrix A, packed columnwise in */
+/* a linear array. The j-th column of A is stored in the array */
+/* AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* Note that when DIAG = 'U', the elements of the array AP */
+/* corresponding to the diagonal elements of the matrix A are */
+/* not referenced, but are assumed to be one. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I'; otherwise, WORK is not */
+/* referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --work;
+ --ap;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ k = 1;
+ if (lsame_(diag, "U")) {
+ value = 1.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + j - 2;
+ for (i__ = k; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = ap[i__], abs(d__1));
+ value = max(d__2,d__3);
+/* L10: */
+ }
+ k += j;
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + *n - j;
+ for (i__ = k + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = ap[i__], abs(d__1));
+ value = max(d__2,d__3);
+/* L30: */
+ }
+ k = k + *n - j + 1;
+/* L40: */
+ }
+ }
+ } else {
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + j - 1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = ap[i__], abs(d__1));
+ value = max(d__2,d__3);
+/* L50: */
+ }
+ k += j;
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + *n - j;
+ for (i__ = k; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = ap[i__], abs(d__1));
+ value = max(d__2,d__3);
+/* L70: */
+ }
+ k = k + *n - j + 1;
+/* L80: */
+ }
+ }
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.;
+ k = 1;
+ udiag = lsame_(diag, "U");
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (udiag) {
+ sum = 1.;
+ i__2 = k + j - 2;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ sum += (d__1 = ap[i__], abs(d__1));
+/* L90: */
+ }
+ } else {
+ sum = 0.;
+ i__2 = k + j - 1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ sum += (d__1 = ap[i__], abs(d__1));
+/* L100: */
+ }
+ }
+ k += j;
+ value = max(value,sum);
+/* L110: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (udiag) {
+ sum = 1.;
+ i__2 = k + *n - j;
+ for (i__ = k + 1; i__ <= i__2; ++i__) {
+ sum += (d__1 = ap[i__], abs(d__1));
+/* L120: */
+ }
+ } else {
+ sum = 0.;
+ i__2 = k + *n - j;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ sum += (d__1 = ap[i__], abs(d__1));
+/* L130: */
+ }
+ }
+ k = k + *n - j + 1;
+ value = max(value,sum);
+/* L140: */
+ }
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ k = 1;
+ if (lsame_(uplo, "U")) {
+ if (lsame_(diag, "U")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 1.;
+/* L150: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += (d__1 = ap[k], abs(d__1));
+ ++k;
+/* L160: */
+ }
+ ++k;
+/* L170: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L180: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += (d__1 = ap[k], abs(d__1));
+ ++k;
+/* L190: */
+ }
+/* L200: */
+ }
+ }
+ } else {
+ if (lsame_(diag, "U")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 1.;
+/* L210: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ ++k;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ work[i__] += (d__1 = ap[k], abs(d__1));
+ ++k;
+/* L220: */
+ }
+/* L230: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L240: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ work[i__] += (d__1 = ap[k], abs(d__1));
+ ++k;
+/* L250: */
+ }
+/* L260: */
+ }
+ }
+ }
+ value = 0.;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L270: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ if (lsame_(uplo, "U")) {
+ if (lsame_(diag, "U")) {
+ scale = 1.;
+ sum = (doublereal) (*n);
+ k = 2;
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ dlassq_(&i__2, &ap[k], &c__1, &scale, &sum);
+ k += j;
+/* L280: */
+ }
+ } else {
+ scale = 0.;
+ sum = 1.;
+ k = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ dlassq_(&j, &ap[k], &c__1, &scale, &sum);
+ k += j;
+/* L290: */
+ }
+ }
+ } else {
+ if (lsame_(diag, "U")) {
+ scale = 1.;
+ sum = (doublereal) (*n);
+ k = 2;
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ dlassq_(&i__2, &ap[k], &c__1, &scale, &sum);
+ k = k + *n - j + 1;
+/* L300: */
+ }
+ } else {
+ scale = 0.;
+ sum = 1.;
+ k = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j + 1;
+ dlassq_(&i__2, &ap[k], &c__1, &scale, &sum);
+ k = k + *n - j + 1;
+/* L310: */
+ }
+ }
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of DLANTP */
+
+} /* dlantp_ */
diff --git a/contrib/libs/clapack/dlantr.c b/contrib/libs/clapack/dlantr.c
new file mode 100644
index 0000000000..cc28cde570
--- /dev/null
+++ b/contrib/libs/clapack/dlantr.c
@@ -0,0 +1,398 @@
+/* dlantr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal dlantr_(char *norm, char *uplo, char *diag, integer *m, integer *n,
+ doublereal *a, integer *lda, doublereal *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ doublereal ret_val, d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal sum, scale;
+ logical udiag;
+ extern logical lsame_(char *, char *);
+ doublereal value;
+ extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLANTR returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* trapezoidal or triangular matrix A. */
+
+/* Description */
+/* =========== */
+
+/* DLANTR returns the value */
+
+/* DLANTR = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in DLANTR as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower trapezoidal. */
+/* = 'U': Upper trapezoidal */
+/* = 'L': Lower trapezoidal */
+/* Note that A is triangular instead of trapezoidal if M = N. */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A has unit diagonal. */
+/* = 'N': Non-unit diagonal */
+/* = 'U': Unit diagonal */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0, and if */
+/* UPLO = 'U', M <= N. When M = 0, DLANTR is set to zero. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0, and if */
+/* UPLO = 'L', N <= M. When N = 0, DLANTR is set to zero. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The trapezoidal matrix A (A is triangular if M = N). */
+/* If UPLO = 'U', the leading m by n upper trapezoidal part of */
+/* the array A contains the upper trapezoidal matrix, and the */
+/* strictly lower triangular part of A is not referenced. */
+/* If UPLO = 'L', the leading m by n lower trapezoidal part of */
+/* the array A contains the lower trapezoidal matrix, and the */
+/* strictly upper triangular part of A is not referenced. Note */
+/* that when DIAG = 'U', the diagonal elements of A are not */
+/* referenced and are assumed to be one. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(M,1). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= M when NORM = 'I'; otherwise, WORK is not */
+/* referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (min(*m,*n) == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ if (lsame_(diag, "U")) {
+ value = 1.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *m, i__4 = j - 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs(
+ d__1));
+ value = max(d__2,d__3);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs(
+ d__1));
+ value = max(d__2,d__3);
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else {
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(*m,j);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs(
+ d__1));
+ value = max(d__2,d__3);
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs(
+ d__1));
+ value = max(d__2,d__3);
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.;
+ udiag = lsame_(diag, "U");
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (udiag && j <= *m) {
+ sum = 1.;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += (d__1 = a[i__ + j * a_dim1], abs(d__1));
+/* L90: */
+ }
+ } else {
+ sum = 0.;
+ i__2 = min(*m,j);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += (d__1 = a[i__ + j * a_dim1], abs(d__1));
+/* L100: */
+ }
+ }
+ value = max(value,sum);
+/* L110: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (udiag) {
+ sum = 1.;
+ i__2 = *m;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ sum += (d__1 = a[i__ + j * a_dim1], abs(d__1));
+/* L120: */
+ }
+ } else {
+ sum = 0.;
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ sum += (d__1 = a[i__ + j * a_dim1], abs(d__1));
+/* L130: */
+ }
+ }
+ value = max(value,sum);
+/* L140: */
+ }
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ if (lsame_(uplo, "U")) {
+ if (lsame_(diag, "U")) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 1.;
+/* L150: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *m, i__4 = j - 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += (d__1 = a[i__ + j * a_dim1], abs(d__1));
+/* L160: */
+ }
+/* L170: */
+ }
+ } else {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L180: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(*m,j);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += (d__1 = a[i__ + j * a_dim1], abs(d__1));
+/* L190: */
+ }
+/* L200: */
+ }
+ }
+ } else {
+ if (lsame_(diag, "U")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 1.;
+/* L210: */
+ }
+ i__1 = *m;
+ for (i__ = *n + 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L220: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ work[i__] += (d__1 = a[i__ + j * a_dim1], abs(d__1));
+/* L230: */
+ }
+/* L240: */
+ }
+ } else {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L250: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ work[i__] += (d__1 = a[i__ + j * a_dim1], abs(d__1));
+/* L260: */
+ }
+/* L270: */
+ }
+ }
+ }
+ value = 0.;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L280: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ if (lsame_(uplo, "U")) {
+ if (lsame_(diag, "U")) {
+ scale = 1.;
+ sum = (doublereal) min(*m,*n);
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *m, i__4 = j - 1;
+ i__2 = min(i__3,i__4);
+ dlassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L290: */
+ }
+ } else {
+ scale = 0.;
+ sum = 1.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(*m,j);
+ dlassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L300: */
+ }
+ }
+ } else {
+ if (lsame_(diag, "U")) {
+ scale = 1.;
+ sum = (doublereal) min(*m,*n);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m - j;
+/* Computing MIN */
+ i__3 = *m, i__4 = j + 1;
+ dlassq_(&i__2, &a[min(i__3, i__4)+ j * a_dim1], &c__1, &
+ scale, &sum);
+/* L310: */
+ }
+ } else {
+ scale = 0.;
+ sum = 1.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m - j + 1;
+ dlassq_(&i__2, &a[j + j * a_dim1], &c__1, &scale, &sum);
+/* L320: */
+ }
+ }
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of DLANTR */
+
+} /* dlantr_ */
diff --git a/contrib/libs/clapack/dlanv2.c b/contrib/libs/clapack/dlanv2.c
new file mode 100644
index 0000000000..14aa951fe6
--- /dev/null
+++ b/contrib/libs/clapack/dlanv2.c
@@ -0,0 +1,235 @@
+/* dlanv2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b4 = 1.;
+
+/* Subroutine */ int dlanv2_(doublereal *a, doublereal *b, doublereal *c__,
+ doublereal *d__, doublereal *rt1r, doublereal *rt1i, doublereal *rt2r,
+ doublereal *rt2i, doublereal *cs, doublereal *sn)
+{
+ /* System generated locals */
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double d_sign(doublereal *, doublereal *), sqrt(doublereal);
+
+ /* Local variables */
+ doublereal p, z__, aa, bb, cc, dd, cs1, sn1, sab, sac, eps, tau, temp,
+ scale, bcmax, bcmis, sigma;
+ extern doublereal dlapy2_(doublereal *, doublereal *), dlamch_(char *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLANV2 computes the Schur factorization of a real 2-by-2 nonsymmetric */
+/* matrix in standard form: */
+
+/* [ A B ] = [ CS -SN ] [ AA BB ] [ CS SN ] */
+/* [ C D ] [ SN CS ] [ CC DD ] [-SN CS ] */
+
+/* where either */
+/* 1) CC = 0 so that AA and DD are real eigenvalues of the matrix, or */
+/* 2) AA = DD and BB*CC < 0, so that AA + or - sqrt(BB*CC) are complex */
+/* conjugate eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* A (input/output) DOUBLE PRECISION */
+/* B (input/output) DOUBLE PRECISION */
+/* C (input/output) DOUBLE PRECISION */
+/* D (input/output) DOUBLE PRECISION */
+/* On entry, the elements of the input matrix. */
+/* On exit, they are overwritten by the elements of the */
+/* standardised Schur form. */
+
+/* RT1R (output) DOUBLE PRECISION */
+/* RT1I (output) DOUBLE PRECISION */
+/* RT2R (output) DOUBLE PRECISION */
+/* RT2I (output) DOUBLE PRECISION */
+/* The real and imaginary parts of the eigenvalues. If the */
+/* eigenvalues are a complex conjugate pair, RT1I > 0. */
+
+/* CS (output) DOUBLE PRECISION */
+/* SN (output) DOUBLE PRECISION */
+/* Parameters of the rotation matrix. */
+
+/* Further Details */
+/* =============== */
+
+/* Modified by V. Sima, Research Institute for Informatics, Bucharest, */
+/* Romania, to reduce the risk of cancellation errors, */
+/* when computing real eigenvalues, and to ensure, if possible, that */
+/* abs(RT1R) >= abs(RT2R). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ eps = dlamch_("P");
+ if (*c__ == 0.) {
+ *cs = 1.;
+ *sn = 0.;
+ goto L10;
+
+ } else if (*b == 0.) {
+
+/* Swap rows and columns */
+
+ *cs = 0.;
+ *sn = 1.;
+ temp = *d__;
+ *d__ = *a;
+ *a = temp;
+ *b = -(*c__);
+ *c__ = 0.;
+ goto L10;
+ } else if (*a - *d__ == 0. && d_sign(&c_b4, b) != d_sign(&c_b4, c__)) {
+ *cs = 1.;
+ *sn = 0.;
+ goto L10;
+ } else {
+
+ temp = *a - *d__;
+ p = temp * .5;
+/* Computing MAX */
+ d__1 = abs(*b), d__2 = abs(*c__);
+ bcmax = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = abs(*b), d__2 = abs(*c__);
+ bcmis = min(d__1,d__2) * d_sign(&c_b4, b) * d_sign(&c_b4, c__);
+/* Computing MAX */
+ d__1 = abs(p);
+ scale = max(d__1,bcmax);
+ z__ = p / scale * p + bcmax / scale * bcmis;
+
+/* If Z is of the order of the machine accuracy, postpone the */
+/* decision on the nature of eigenvalues */
+
+ if (z__ >= eps * 4.) {
+
+/* Real eigenvalues. Compute A and D. */
+
+ d__1 = sqrt(scale) * sqrt(z__);
+ z__ = p + d_sign(&d__1, &p);
+ *a = *d__ + z__;
+ *d__ -= bcmax / z__ * bcmis;
+
+/* Compute B and the rotation matrix */
+
+ tau = dlapy2_(c__, &z__);
+ *cs = z__ / tau;
+ *sn = *c__ / tau;
+ *b -= *c__;
+ *c__ = 0.;
+ } else {
+
+/* Complex eigenvalues, or real (almost) equal eigenvalues. */
+/* Make diagonal elements equal. */
+
+ sigma = *b + *c__;
+ tau = dlapy2_(&sigma, &temp);
+ *cs = sqrt((abs(sigma) / tau + 1.) * .5);
+ *sn = -(p / (tau * *cs)) * d_sign(&c_b4, &sigma);
+
+/* Compute [ AA BB ] = [ A B ] [ CS -SN ] */
+/* [ CC DD ] [ C D ] [ SN CS ] */
+
+ aa = *a * *cs + *b * *sn;
+ bb = -(*a) * *sn + *b * *cs;
+ cc = *c__ * *cs + *d__ * *sn;
+ dd = -(*c__) * *sn + *d__ * *cs;
+
+/* Compute [ A B ] = [ CS SN ] [ AA BB ] */
+/* [ C D ] [-SN CS ] [ CC DD ] */
+
+ *a = aa * *cs + cc * *sn;
+ *b = bb * *cs + dd * *sn;
+ *c__ = -aa * *sn + cc * *cs;
+ *d__ = -bb * *sn + dd * *cs;
+
+ temp = (*a + *d__) * .5;
+ *a = temp;
+ *d__ = temp;
+
+ if (*c__ != 0.) {
+ if (*b != 0.) {
+ if (d_sign(&c_b4, b) == d_sign(&c_b4, c__)) {
+
+/* Real eigenvalues: reduce to upper triangular form */
+
+ sab = sqrt((abs(*b)));
+ sac = sqrt((abs(*c__)));
+ d__1 = sab * sac;
+ p = d_sign(&d__1, c__);
+ tau = 1. / sqrt((d__1 = *b + *c__, abs(d__1)));
+ *a = temp + p;
+ *d__ = temp - p;
+ *b -= *c__;
+ *c__ = 0.;
+ cs1 = sab * tau;
+ sn1 = sac * tau;
+ temp = *cs * cs1 - *sn * sn1;
+ *sn = *cs * sn1 + *sn * cs1;
+ *cs = temp;
+ }
+ } else {
+ *b = -(*c__);
+ *c__ = 0.;
+ temp = *cs;
+ *cs = -(*sn);
+ *sn = temp;
+ }
+ }
+ }
+
+ }
+
+L10:
+
+/* Store eigenvalues in (RT1R,RT1I) and (RT2R,RT2I). */
+
+ *rt1r = *a;
+ *rt2r = *d__;
+ if (*c__ == 0.) {
+ *rt1i = 0.;
+ *rt2i = 0.;
+ } else {
+ *rt1i = sqrt((abs(*b))) * sqrt((abs(*c__)));
+ *rt2i = -(*rt1i);
+ }
+ return 0;
+
+/* End of DLANV2 */
+
+} /* dlanv2_ */
diff --git a/contrib/libs/clapack/dlapll.c b/contrib/libs/clapack/dlapll.c
new file mode 100644
index 0000000000..90ef53ff0d
--- /dev/null
+++ b/contrib/libs/clapack/dlapll.c
@@ -0,0 +1,127 @@
+/* dlapll.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlapll_(integer *n, doublereal *x, integer *incx,
+ doublereal *y, integer *incy, doublereal *ssmin)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ doublereal c__, a11, a12, a22, tau;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ extern /* Subroutine */ int dlas2_(doublereal *, doublereal *, doublereal
+ *, doublereal *, doublereal *), daxpy_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *);
+ doublereal ssmax;
+ extern /* Subroutine */ int dlarfg_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Given two column vectors X and Y, let */
+
+/* A = ( X Y ). */
+
+/* The subroutine first computes the QR factorization of A = Q*R, */
+/* and then computes the SVD of the 2-by-2 upper triangular matrix R. */
+/* The smaller singular value of R is returned in SSMIN, which is used */
+/* as the measurement of the linear dependency of the vectors X and Y. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The length of the vectors X and Y. */
+
+/* X (input/output) DOUBLE PRECISION array, */
+/* dimension (1+(N-1)*INCX) */
+/* On entry, X contains the N-vector X. */
+/* On exit, X is overwritten. */
+
+/* INCX (input) INTEGER */
+/* The increment between successive elements of X. INCX > 0. */
+
+/* Y (input/output) DOUBLE PRECISION array, */
+/* dimension (1+(N-1)*INCY) */
+/* On entry, Y contains the N-vector Y. */
+/* On exit, Y is overwritten. */
+
+/* INCY (input) INTEGER */
+/* The increment between successive elements of Y. INCY > 0. */
+
+/* SSMIN (output) DOUBLE PRECISION */
+/* The smallest singular value of the N-by-2 matrix A = ( X Y ). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --y;
+ --x;
+
+ /* Function Body */
+ if (*n <= 1) {
+ *ssmin = 0.;
+ return 0;
+ }
+
+/* Compute the QR factorization of the N-by-2 matrix ( X Y ) */
+
+ dlarfg_(n, &x[1], &x[*incx + 1], incx, &tau);
+ a11 = x[1];
+ x[1] = 1.;
+
+ c__ = -tau * ddot_(n, &x[1], incx, &y[1], incy);
+ daxpy_(n, &c__, &x[1], incx, &y[1], incy);
+
+ i__1 = *n - 1;
+ dlarfg_(&i__1, &y[*incy + 1], &y[(*incy << 1) + 1], incy, &tau);
+
+ a12 = y[1];
+ a22 = y[*incy + 1];
+
+/* Compute the SVD of 2-by-2 Upper triangular matrix. */
+
+ dlas2_(&a11, &a12, &a22, ssmin, &ssmax);
+
+ return 0;
+
+/* End of DLAPLL */
+
+} /* dlapll_ */
diff --git a/contrib/libs/clapack/dlapmt.c b/contrib/libs/clapack/dlapmt.c
new file mode 100644
index 0000000000..8a93785a5d
--- /dev/null
+++ b/contrib/libs/clapack/dlapmt.c
@@ -0,0 +1,178 @@
+/* dlapmt.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlapmt_(logical *forwrd, integer *m, integer *n,
+ doublereal *x, integer *ldx, integer *k)
+{
+ /* System generated locals */
+ integer x_dim1, x_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, ii, in;
+ doublereal temp;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAPMT rearranges the columns of the M by N matrix X as specified */
+/* by the permutation K(1),K(2),...,K(N) of the integers 1,...,N. */
+/* If FORWRD = .TRUE., forward permutation: */
+
+/* X(*,K(J)) is moved X(*,J) for J = 1,2,...,N. */
+
+/* If FORWRD = .FALSE., backward permutation: */
+
+/* X(*,J) is moved to X(*,K(J)) for J = 1,2,...,N. */
+
+/* Arguments */
+/* ========= */
+
+/* FORWRD (input) LOGICAL */
+/* = .TRUE., forward permutation */
+/* = .FALSE., backward permutation */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix X. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix X. N >= 0. */
+
+/* X (input/output) DOUBLE PRECISION array, dimension (LDX,N) */
+/* On entry, the M by N matrix X. */
+/* On exit, X contains the permuted matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X, LDX >= MAX(1,M). */
+
+/* K (input/output) INTEGER array, dimension (N) */
+/* On entry, K contains the permutation vector. K is used as */
+/* internal workspace, but reset to its original value on */
+/* output. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --k;
+
+ /* Function Body */
+ if (*n <= 1) {
+ return 0;
+ }
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ k[i__] = -k[i__];
+/* L10: */
+ }
+
+ if (*forwrd) {
+
+/* Forward permutation */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+ if (k[i__] > 0) {
+ goto L40;
+ }
+
+ j = i__;
+ k[j] = -k[j];
+ in = k[j];
+
+L20:
+ if (k[in] > 0) {
+ goto L40;
+ }
+
+ i__2 = *m;
+ for (ii = 1; ii <= i__2; ++ii) {
+ temp = x[ii + j * x_dim1];
+ x[ii + j * x_dim1] = x[ii + in * x_dim1];
+ x[ii + in * x_dim1] = temp;
+/* L30: */
+ }
+
+ k[in] = -k[in];
+ j = in;
+ in = k[in];
+ goto L20;
+
+L40:
+
+/* L50: */
+ ;
+ }
+
+ } else {
+
+/* Backward permutation */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+ if (k[i__] > 0) {
+ goto L80;
+ }
+
+ k[i__] = -k[i__];
+ j = k[i__];
+L60:
+ if (j == i__) {
+ goto L80;
+ }
+
+ i__2 = *m;
+ for (ii = 1; ii <= i__2; ++ii) {
+ temp = x[ii + i__ * x_dim1];
+ x[ii + i__ * x_dim1] = x[ii + j * x_dim1];
+ x[ii + j * x_dim1] = temp;
+/* L70: */
+ }
+
+ k[j] = -k[j];
+ j = k[j];
+ goto L60;
+
+L80:
+
+/* L90: */
+ ;
+ }
+
+ }
+
+ return 0;
+
+/* End of DLAPMT */
+
+} /* dlapmt_ */
diff --git a/contrib/libs/clapack/dlapy2.c b/contrib/libs/clapack/dlapy2.c
new file mode 100644
index 0000000000..6e88cd19b1
--- /dev/null
+++ b/contrib/libs/clapack/dlapy2.c
@@ -0,0 +1,73 @@
+/* dlapy2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+doublereal dlapy2_(doublereal *x, doublereal *y)
+{
+ /* System generated locals */
+ doublereal ret_val, d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal w, z__, xabs, yabs;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAPY2 returns sqrt(x**2+y**2), taking care not to cause unnecessary */
+/* overflow. */
+
+/* Arguments */
+/* ========= */
+
+/* X (input) DOUBLE PRECISION */
+/* Y (input) DOUBLE PRECISION */
+/* X and Y specify the values x and y. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ xabs = abs(*x);
+ yabs = abs(*y);
+ w = max(xabs,yabs);
+ z__ = min(xabs,yabs);
+ if (z__ == 0.) {
+ ret_val = w;
+ } else {
+/* Computing 2nd power */
+ d__1 = z__ / w;
+ ret_val = w * sqrt(d__1 * d__1 + 1.);
+ }
+ return ret_val;
+
+/* End of DLAPY2 */
+
+} /* dlapy2_ */
diff --git a/contrib/libs/clapack/dlapy3.c b/contrib/libs/clapack/dlapy3.c
new file mode 100644
index 0000000000..6aec3d0198
--- /dev/null
+++ b/contrib/libs/clapack/dlapy3.c
@@ -0,0 +1,83 @@
+/* dlapy3.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+doublereal dlapy3_(doublereal *x, doublereal *y, doublereal *z__)
+{
+ /* System generated locals */
+ doublereal ret_val, d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal w, xabs, yabs, zabs;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAPY3 returns sqrt(x**2+y**2+z**2), taking care not to cause */
+/* unnecessary overflow. */
+
+/* Arguments */
+/* ========= */
+
+/* X (input) DOUBLE PRECISION */
+/* Y (input) DOUBLE PRECISION */
+/* Z (input) DOUBLE PRECISION */
+/* X, Y and Z specify the values x, y and z. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ xabs = abs(*x);
+ yabs = abs(*y);
+ zabs = abs(*z__);
+/* Computing MAX */
+ d__1 = max(xabs,yabs);
+ w = max(d__1,zabs);
+ if (w == 0.) {
+/* W can be zero for max(0,nan,0) */
+/* adding all three entries together will make sure */
+/* NaN will not disappear. */
+ ret_val = xabs + yabs + zabs;
+ } else {
+/* Computing 2nd power */
+ d__1 = xabs / w;
+/* Computing 2nd power */
+ d__2 = yabs / w;
+/* Computing 2nd power */
+ d__3 = zabs / w;
+ ret_val = w * sqrt(d__1 * d__1 + d__2 * d__2 + d__3 * d__3);
+ }
+ return ret_val;
+
+/* End of DLAPY3 */
+
+} /* dlapy3_ */
diff --git a/contrib/libs/clapack/dlaqgb.c b/contrib/libs/clapack/dlaqgb.c
new file mode 100644
index 0000000000..6d80b13b13
--- /dev/null
+++ b/contrib/libs/clapack/dlaqgb.c
@@ -0,0 +1,216 @@
+/* dlaqgb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlaqgb_(integer *m, integer *n, integer *kl, integer *ku,
+ doublereal *ab, integer *ldab, doublereal *r__, doublereal *c__,
+ doublereal *rowcnd, doublereal *colcnd, doublereal *amax, char *equed)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal cj, large, small;
+ extern doublereal dlamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAQGB equilibrates a general M by N band matrix A with KL */
+/* subdiagonals and KU superdiagonals using the row and scaling factors */
+/* in the vectors R and C. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* AB (input/output) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* On entry, the matrix A in band storage, in rows 1 to KL+KU+1. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(m,j+kl) */
+
+/* On exit, the equilibrated matrix, in the same storage format */
+/* as A. See EQUED for the form of the equilibrated matrix. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDA >= KL+KU+1. */
+
+/* R (input) DOUBLE PRECISION array, dimension (M) */
+/* The row scale factors for A. */
+
+/* C (input) DOUBLE PRECISION array, dimension (N) */
+/* The column scale factors for A. */
+
+/* ROWCND (input) DOUBLE PRECISION */
+/* Ratio of the smallest R(i) to the largest R(i). */
+
+/* COLCND (input) DOUBLE PRECISION */
+/* Ratio of the smallest C(i) to the largest C(i). */
+
+/* AMAX (input) DOUBLE PRECISION */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration */
+/* = 'R': Row equilibration, i.e., A has been premultiplied by */
+/* diag(R). */
+/* = 'C': Column equilibration, i.e., A has been postmultiplied */
+/* by diag(C). */
+/* = 'B': Both row and column equilibration, i.e., A has been */
+/* replaced by diag(R) * A * diag(C). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if row or column scaling */
+/* should be done based on the ratio of the row or column scaling */
+/* factors. If ROWCND < THRESH, row scaling is done, and if */
+/* COLCND < THRESH, column scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if row scaling */
+/* should be done based on the absolute size of the largest matrix */
+/* element. If AMAX > LARGE or AMAX < SMALL, row scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --r__;
+ --c__;
+
+ /* Function Body */
+ if (*m <= 0 || *n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = dlamch_("Safe minimum") / dlamch_("Precision");
+ large = 1. / small;
+
+ if (*rowcnd >= .1 && *amax >= small && *amax <= large) {
+
+/* No row scaling */
+
+ if (*colcnd >= .1) {
+
+/* No column scaling */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Column scaling */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = c__[j];
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__4 = min(i__5,i__6);
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ ab[*ku + 1 + i__ - j + j * ab_dim1] = cj * ab[*ku + 1 +
+ i__ - j + j * ab_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+ *(unsigned char *)equed = 'C';
+ }
+ } else if (*colcnd >= .1) {
+
+/* Row scaling, no column scaling */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__3 = min(i__5,i__6);
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ ab[*ku + 1 + i__ - j + j * ab_dim1] = r__[i__] * ab[*ku + 1 +
+ i__ - j + j * ab_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ *(unsigned char *)equed = 'R';
+ } else {
+
+/* Row and column scaling */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = c__[j];
+/* Computing MAX */
+ i__3 = 1, i__4 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__2 = min(i__5,i__6);
+ for (i__ = max(i__3,i__4); i__ <= i__2; ++i__) {
+ ab[*ku + 1 + i__ - j + j * ab_dim1] = cj * r__[i__] * ab[*ku
+ + 1 + i__ - j + j * ab_dim1];
+/* L50: */
+ }
+/* L60: */
+ }
+ *(unsigned char *)equed = 'B';
+ }
+
+ return 0;
+
+/* End of DLAQGB */
+
+} /* dlaqgb_ */
diff --git a/contrib/libs/clapack/dlaqge.c b/contrib/libs/clapack/dlaqge.c
new file mode 100644
index 0000000000..1a43bf3d9b
--- /dev/null
+++ b/contrib/libs/clapack/dlaqge.c
@@ -0,0 +1,188 @@
+/* dlaqge.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlaqge_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *r__, doublereal *c__, doublereal *rowcnd, doublereal
+ *colcnd, doublereal *amax, char *equed)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal cj, large, small;
+ extern doublereal dlamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAQGE equilibrates a general M by N matrix A using the row and */
+/* column scaling factors in the vectors R and C. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the M by N matrix A. */
+/* On exit, the equilibrated matrix. See EQUED for the form of */
+/* the equilibrated matrix. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(M,1). */
+
+/* R (input) DOUBLE PRECISION array, dimension (M) */
+/* The row scale factors for A. */
+
+/* C (input) DOUBLE PRECISION array, dimension (N) */
+/* The column scale factors for A. */
+
+/* ROWCND (input) DOUBLE PRECISION */
+/* Ratio of the smallest R(i) to the largest R(i). */
+
+/* COLCND (input) DOUBLE PRECISION */
+/* Ratio of the smallest C(i) to the largest C(i). */
+
+/* AMAX (input) DOUBLE PRECISION */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration */
+/* = 'R': Row equilibration, i.e., A has been premultiplied by */
+/* diag(R). */
+/* = 'C': Column equilibration, i.e., A has been postmultiplied */
+/* by diag(C). */
+/* = 'B': Both row and column equilibration, i.e., A has been */
+/* replaced by diag(R) * A * diag(C). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if row or column scaling */
+/* should be done based on the ratio of the row or column scaling */
+/* factors. If ROWCND < THRESH, row scaling is done, and if */
+/* COLCND < THRESH, column scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if row scaling */
+/* should be done based on the absolute size of the largest matrix */
+/* element. If AMAX > LARGE or AMAX < SMALL, row scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --r__;
+ --c__;
+
+ /* Function Body */
+ if (*m <= 0 || *n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = dlamch_("Safe minimum") / dlamch_("Precision");
+ large = 1. / small;
+
+ if (*rowcnd >= .1 && *amax >= small && *amax <= large) {
+
+/* No row scaling */
+
+ if (*colcnd >= .1) {
+
+/* No column scaling */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Column scaling */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = c__[j];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = cj * a[i__ + j * a_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+ *(unsigned char *)equed = 'C';
+ }
+ } else if (*colcnd >= .1) {
+
+/* Row scaling, no column scaling */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = r__[i__] * a[i__ + j * a_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ *(unsigned char *)equed = 'R';
+ } else {
+
+/* Row and column scaling */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = c__[j];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = cj * r__[i__] * a[i__ + j * a_dim1];
+/* L50: */
+ }
+/* L60: */
+ }
+ *(unsigned char *)equed = 'B';
+ }
+
+ return 0;
+
+/* End of DLAQGE */
+
+} /* dlaqge_ */
diff --git a/contrib/libs/clapack/dlaqp2.c b/contrib/libs/clapack/dlaqp2.c
new file mode 100644
index 0000000000..a8fdf272f0
--- /dev/null
+++ b/contrib/libs/clapack/dlaqp2.c
@@ -0,0 +1,237 @@
+/* dlaqp2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dlaqp2_(integer *m, integer *n, integer *offset,
+ doublereal *a, integer *lda, integer *jpvt, doublereal *tau,
+ doublereal *vn1, doublereal *vn2, doublereal *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, mn;
+ doublereal aii;
+ integer pvt;
+ doublereal temp;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ doublereal temp2, tol3z;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *);
+ integer offpi, itemp;
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ extern doublereal dlamch_(char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlarfp_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAQP2 computes a QR factorization with column pivoting of */
+/* the block A(OFFSET+1:M,1:N). */
+/* The block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* OFFSET (input) INTEGER */
+/* The number of rows of the matrix A that must be pivoted */
+/* but no factorized. OFFSET >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the upper triangle of block A(OFFSET+1:M,1:N) is */
+/* the triangular factor obtained; the elements in block */
+/* A(OFFSET+1:M,1:N) below the diagonal, together with the */
+/* array TAU, represent the orthogonal matrix Q as a product of */
+/* elementary reflectors. Block A(1:OFFSET,1:N) has been */
+/* accordingly pivoted, but no factorized. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* JPVT (input/output) INTEGER array, dimension (N) */
+/* On entry, if JPVT(i) .ne. 0, the i-th column of A is permuted */
+/* to the front of A*P (a leading column); if JPVT(i) = 0, */
+/* the i-th column of A is a free column. */
+/* On exit, if JPVT(i) = k, then the i-th column of A*P */
+/* was the k-th column of A. */
+
+/* TAU (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors. */
+
+/* VN1 (input/output) DOUBLE PRECISION array, dimension (N) */
+/* The vector with the partial column norms. */
+
+/* VN2 (input/output) DOUBLE PRECISION array, dimension (N) */
+/* The vector with the exact column norms. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* G. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */
+/* X. Sun, Computer Science Dept., Duke University, USA */
+
+/* Partial column norm updating strategy modified by */
+/* Z. Drmac and Z. Bujanovic, Dept. of Mathematics, */
+/* University of Zagreb, Croatia. */
+/* June 2006. */
+/* For more details see LAPACK Working Note 176. */
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --jpvt;
+ --tau;
+ --vn1;
+ --vn2;
+ --work;
+
+ /* Function Body */
+/* Computing MIN */
+ i__1 = *m - *offset;
+ mn = min(i__1,*n);
+ tol3z = sqrt(dlamch_("Epsilon"));
+
+/* Compute factorization. */
+
+ i__1 = mn;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+ offpi = *offset + i__;
+
+/* Determine ith pivot column and swap if necessary. */
+
+ i__2 = *n - i__ + 1;
+ pvt = i__ - 1 + idamax_(&i__2, &vn1[i__], &c__1);
+
+ if (pvt != i__) {
+ dswap_(m, &a[pvt * a_dim1 + 1], &c__1, &a[i__ * a_dim1 + 1], &
+ c__1);
+ itemp = jpvt[pvt];
+ jpvt[pvt] = jpvt[i__];
+ jpvt[i__] = itemp;
+ vn1[pvt] = vn1[i__];
+ vn2[pvt] = vn2[i__];
+ }
+
+/* Generate elementary reflector H(i). */
+
+ if (offpi < *m) {
+ i__2 = *m - offpi + 1;
+ dlarfp_(&i__2, &a[offpi + i__ * a_dim1], &a[offpi + 1 + i__ *
+ a_dim1], &c__1, &tau[i__]);
+ } else {
+ dlarfp_(&c__1, &a[*m + i__ * a_dim1], &a[*m + i__ * a_dim1], &
+ c__1, &tau[i__]);
+ }
+
+ if (i__ <= *n) {
+
+/* Apply H(i)' to A(offset+i:m,i+1:n) from the left. */
+
+ aii = a[offpi + i__ * a_dim1];
+ a[offpi + i__ * a_dim1] = 1.;
+ i__2 = *m - offpi + 1;
+ i__3 = *n - i__;
+ dlarf_("Left", &i__2, &i__3, &a[offpi + i__ * a_dim1], &c__1, &
+ tau[i__], &a[offpi + (i__ + 1) * a_dim1], lda, &work[1]);
+ a[offpi + i__ * a_dim1] = aii;
+ }
+
+/* Update partial column norms. */
+
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ if (vn1[j] != 0.) {
+
+/* NOTE: The following 4 lines follow from the analysis in */
+/* Lapack Working Note 176. */
+
+/* Computing 2nd power */
+ d__2 = (d__1 = a[offpi + j * a_dim1], abs(d__1)) / vn1[j];
+ temp = 1. - d__2 * d__2;
+ temp = max(temp,0.);
+/* Computing 2nd power */
+ d__1 = vn1[j] / vn2[j];
+ temp2 = temp * (d__1 * d__1);
+ if (temp2 <= tol3z) {
+ if (offpi < *m) {
+ i__3 = *m - offpi;
+ vn1[j] = dnrm2_(&i__3, &a[offpi + 1 + j * a_dim1], &
+ c__1);
+ vn2[j] = vn1[j];
+ } else {
+ vn1[j] = 0.;
+ vn2[j] = 0.;
+ }
+ } else {
+ vn1[j] *= sqrt(temp);
+ }
+ }
+/* L10: */
+ }
+
+/* L20: */
+ }
+
+ return 0;
+
+/* End of DLAQP2 */
+
+} /* dlaqp2_ */
diff --git a/contrib/libs/clapack/dlaqps.c b/contrib/libs/clapack/dlaqps.c
new file mode 100644
index 0000000000..ad6d3a0fea
--- /dev/null
+++ b/contrib/libs/clapack/dlaqps.c
@@ -0,0 +1,345 @@
+/* dlaqps.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b8 = -1.;
+static doublereal c_b9 = 1.;
+static doublereal c_b16 = 0.;
+
+/* Subroutine */ int dlaqps_(integer *m, integer *n, integer *offset, integer
+ *nb, integer *kb, doublereal *a, integer *lda, integer *jpvt,
+ doublereal *tau, doublereal *vn1, doublereal *vn2, doublereal *auxv,
+ doublereal *f, integer *ldf)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, f_dim1, f_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+ integer i_dnnt(doublereal *);
+
+ /* Local variables */
+ integer j, k, rk;
+ doublereal akk;
+ integer pvt;
+ doublereal temp;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ doublereal temp2, tol3z;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *),
+ dgemv_(char *, integer *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *);
+ integer itemp;
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ extern doublereal dlamch_(char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlarfp_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *);
+ integer lsticc, lastrk;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAQPS computes a step of QR factorization with column pivoting */
+/* of a real M-by-N matrix A by using Blas-3. It tries to factorize */
+/* NB columns from A starting from the row OFFSET+1, and updates all */
+/* of the matrix with Blas-3 xGEMM. */
+
+/* In some cases, due to catastrophic cancellations, it cannot */
+/* factorize NB columns. Hence, the actual number of factorized */
+/* columns is returned in KB. */
+
+/* Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0 */
+
+/* OFFSET (input) INTEGER */
+/* The number of rows of A that have been factorized in */
+/* previous steps. */
+
+/* NB (input) INTEGER */
+/* The number of columns to factorize. */
+
+/* KB (output) INTEGER */
+/* The number of columns actually factorized. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, block A(OFFSET+1:M,1:KB) is the triangular */
+/* factor obtained and block A(1:OFFSET,1:N) has been */
+/* accordingly pivoted, but no factorized. */
+/* The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has */
+/* been updated. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* JPVT (input/output) INTEGER array, dimension (N) */
+/* JPVT(I) = K <==> Column K of the full matrix A has been */
+/* permuted into position I in AP. */
+
+/* TAU (output) DOUBLE PRECISION array, dimension (KB) */
+/* The scalar factors of the elementary reflectors. */
+
+/* VN1 (input/output) DOUBLE PRECISION array, dimension (N) */
+/* The vector with the partial column norms. */
+
+/* VN2 (input/output) DOUBLE PRECISION array, dimension (N) */
+/* The vector with the exact column norms. */
+
+/* AUXV (input/output) DOUBLE PRECISION array, dimension (NB) */
+/* Auxiliar vector. */
+
+/* F (input/output) DOUBLE PRECISION array, dimension (LDF,NB) */
+/* Matrix F' = L*Y'*A. */
+
+/* LDF (input) INTEGER */
+/* The leading dimension of the array F. LDF >= max(1,N). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* G. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */
+/* X. Sun, Computer Science Dept., Duke University, USA */
+
+/* Partial column norm updating strategy modified by */
+/* Z. Drmac and Z. Bujanovic, Dept. of Mathematics, */
+/* University of Zagreb, Croatia. */
+/* June 2006. */
+/* For more details see LAPACK Working Note 176. */
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --jpvt;
+ --tau;
+ --vn1;
+ --vn2;
+ --auxv;
+ f_dim1 = *ldf;
+ f_offset = 1 + f_dim1;
+ f -= f_offset;
+
+ /* Function Body */
+/* Computing MIN */
+ i__1 = *m, i__2 = *n + *offset;
+ lastrk = min(i__1,i__2);
+ lsticc = 0;
+ k = 0;
+ tol3z = sqrt(dlamch_("Epsilon"));
+
+/* Beginning of while loop. */
+
+L10:
+ if (k < *nb && lsticc == 0) {
+ ++k;
+ rk = *offset + k;
+
+/* Determine ith pivot column and swap if necessary */
+
+ i__1 = *n - k + 1;
+ pvt = k - 1 + idamax_(&i__1, &vn1[k], &c__1);
+ if (pvt != k) {
+ dswap_(m, &a[pvt * a_dim1 + 1], &c__1, &a[k * a_dim1 + 1], &c__1);
+ i__1 = k - 1;
+ dswap_(&i__1, &f[pvt + f_dim1], ldf, &f[k + f_dim1], ldf);
+ itemp = jpvt[pvt];
+ jpvt[pvt] = jpvt[k];
+ jpvt[k] = itemp;
+ vn1[pvt] = vn1[k];
+ vn2[pvt] = vn2[k];
+ }
+
+/* Apply previous Householder reflectors to column K: */
+/* A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)'. */
+
+ if (k > 1) {
+ i__1 = *m - rk + 1;
+ i__2 = k - 1;
+ dgemv_("No transpose", &i__1, &i__2, &c_b8, &a[rk + a_dim1], lda,
+ &f[k + f_dim1], ldf, &c_b9, &a[rk + k * a_dim1], &c__1);
+ }
+
+/* Generate elementary reflector H(k). */
+
+ if (rk < *m) {
+ i__1 = *m - rk + 1;
+ dlarfp_(&i__1, &a[rk + k * a_dim1], &a[rk + 1 + k * a_dim1], &
+ c__1, &tau[k]);
+ } else {
+ dlarfp_(&c__1, &a[rk + k * a_dim1], &a[rk + k * a_dim1], &c__1, &
+ tau[k]);
+ }
+
+ akk = a[rk + k * a_dim1];
+ a[rk + k * a_dim1] = 1.;
+
+/* Compute Kth column of F: */
+
+/* Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K). */
+
+ if (k < *n) {
+ i__1 = *m - rk + 1;
+ i__2 = *n - k;
+ dgemv_("Transpose", &i__1, &i__2, &tau[k], &a[rk + (k + 1) *
+ a_dim1], lda, &a[rk + k * a_dim1], &c__1, &c_b16, &f[k +
+ 1 + k * f_dim1], &c__1);
+ }
+
+/* Padding F(1:K,K) with zeros. */
+
+ i__1 = k;
+ for (j = 1; j <= i__1; ++j) {
+ f[j + k * f_dim1] = 0.;
+/* L20: */
+ }
+
+/* Incremental updating of F: */
+/* F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)' */
+/* *A(RK:M,K). */
+
+ if (k > 1) {
+ i__1 = *m - rk + 1;
+ i__2 = k - 1;
+ d__1 = -tau[k];
+ dgemv_("Transpose", &i__1, &i__2, &d__1, &a[rk + a_dim1], lda, &a[
+ rk + k * a_dim1], &c__1, &c_b16, &auxv[1], &c__1);
+
+ i__1 = k - 1;
+ dgemv_("No transpose", n, &i__1, &c_b9, &f[f_dim1 + 1], ldf, &
+ auxv[1], &c__1, &c_b9, &f[k * f_dim1 + 1], &c__1);
+ }
+
+/* Update the current row of A: */
+/* A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ dgemv_("No transpose", &i__1, &k, &c_b8, &f[k + 1 + f_dim1], ldf,
+ &a[rk + a_dim1], lda, &c_b9, &a[rk + (k + 1) * a_dim1],
+ lda);
+ }
+
+/* Update partial column norms. */
+
+ if (rk < lastrk) {
+ i__1 = *n;
+ for (j = k + 1; j <= i__1; ++j) {
+ if (vn1[j] != 0.) {
+
+/* NOTE: The following 4 lines follow from the analysis in */
+/* Lapack Working Note 176. */
+
+ temp = (d__1 = a[rk + j * a_dim1], abs(d__1)) / vn1[j];
+/* Computing MAX */
+ d__1 = 0., d__2 = (temp + 1.) * (1. - temp);
+ temp = max(d__1,d__2);
+/* Computing 2nd power */
+ d__1 = vn1[j] / vn2[j];
+ temp2 = temp * (d__1 * d__1);
+ if (temp2 <= tol3z) {
+ vn2[j] = (doublereal) lsticc;
+ lsticc = j;
+ } else {
+ vn1[j] *= sqrt(temp);
+ }
+ }
+/* L30: */
+ }
+ }
+
+ a[rk + k * a_dim1] = akk;
+
+/* End of while loop. */
+
+ goto L10;
+ }
+ *kb = k;
+ rk = *offset + *kb;
+
+/* Apply the block reflector to the rest of the matrix: */
+/* A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) - */
+/* A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)'. */
+
+/* Computing MIN */
+ i__1 = *n, i__2 = *m - *offset;
+ if (*kb < min(i__1,i__2)) {
+ i__1 = *m - rk;
+ i__2 = *n - *kb;
+ dgemm_("No transpose", "Transpose", &i__1, &i__2, kb, &c_b8, &a[rk +
+ 1 + a_dim1], lda, &f[*kb + 1 + f_dim1], ldf, &c_b9, &a[rk + 1
+ + (*kb + 1) * a_dim1], lda);
+ }
+
+/* Recomputation of difficult columns. */
+
+L40:
+ if (lsticc > 0) {
+ itemp = i_dnnt(&vn2[lsticc]);
+ i__1 = *m - rk;
+ vn1[lsticc] = dnrm2_(&i__1, &a[rk + 1 + lsticc * a_dim1], &c__1);
+
+/* NOTE: The computation of VN1( LSTICC ) relies on the fact that */
+/* SNRM2 does not fail on vectors with norm below the value of */
+/* SQRT(DLAMCH('S')) */
+
+ vn2[lsticc] = vn1[lsticc];
+ lsticc = itemp;
+ goto L40;
+ }
+
+ return 0;
+
+/* End of DLAQPS */
+
+} /* dlaqps_ */
diff --git a/contrib/libs/clapack/dlaqr0.c b/contrib/libs/clapack/dlaqr0.c
new file mode 100644
index 0000000000..174608700b
--- /dev/null
+++ b/contrib/libs/clapack/dlaqr0.c
@@ -0,0 +1,758 @@
+/* dlaqr0.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__13 = 13;
+static integer c__15 = 15;
+static integer c_n1 = -1;
+static integer c__12 = 12;
+static integer c__14 = 14;
+static integer c__16 = 16;
+static logical c_false = FALSE_;
+static integer c__1 = 1;
+static integer c__3 = 3;
+
+/* Subroutine */ int dlaqr0_(logical *wantt, logical *wantz, integer *n,
+ integer *ilo, integer *ihi, doublereal *h__, integer *ldh, doublereal
+ *wr, doublereal *wi, integer *iloz, integer *ihiz, doublereal *z__,
+ integer *ldz, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Local variables */
+ integer i__, k;
+ doublereal aa, bb, cc, dd;
+ integer ld;
+ doublereal cs;
+ integer nh, it, ks, kt;
+ doublereal sn;
+ integer ku, kv, ls, ns;
+ doublereal ss;
+ integer nw, inf, kdu, nho, nve, kwh, nsr, nwr, kwv, ndec, ndfl, kbot,
+ nmin;
+ doublereal swap;
+ integer ktop;
+ doublereal zdum[1] /* was [1][1] */;
+ integer kacc22, itmax, nsmax, nwmax, kwtop;
+ extern /* Subroutine */ int dlanv2_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *), dlaqr3_(
+ logical *, logical *, integer *, integer *, integer *, integer *,
+ doublereal *, integer *, integer *, integer *, doublereal *,
+ integer *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *),
+ dlaqr4_(logical *, logical *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ integer *), dlaqr5_(logical *, logical *, integer *, integer *,
+ integer *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, integer *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ integer *, doublereal *, integer *, integer *, doublereal *,
+ integer *);
+ integer nibble;
+ extern /* Subroutine */ int dlahqr_(logical *, logical *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *), dlacpy_(char *, integer *, integer *, doublereal *,
+ integer *, doublereal *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ char jbcmpz[1];
+ integer nwupbd;
+ logical sorted;
+ integer lwkopt;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAQR0 computes the eigenvalues of a Hessenberg matrix H */
+/* and, optionally, the matrices T and Z from the Schur decomposition */
+/* H = Z T Z**T, where T is an upper quasi-triangular matrix (the */
+/* Schur form), and Z is the orthogonal matrix of Schur vectors. */
+
+/* Optionally Z may be postmultiplied into an input orthogonal */
+/* matrix Q so that this routine can give the Schur factorization */
+/* of a matrix A which has been reduced to the Hessenberg form H */
+/* by the orthogonal matrix Q: A = Q*H*Q**T = (QZ)*T*(QZ)**T. */
+
+/* Arguments */
+/* ========= */
+
+/* WANTT (input) LOGICAL */
+/* = .TRUE. : the full Schur form T is required; */
+/* = .FALSE.: only eigenvalues are required. */
+
+/* WANTZ (input) LOGICAL */
+/* = .TRUE. : the matrix of Schur vectors Z is required; */
+/* = .FALSE.: Schur vectors are not required. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H. N .GE. 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* It is assumed that H is already upper triangular in rows */
+/* and columns 1:ILO-1 and IHI+1:N and, if ILO.GT.1, */
+/* H(ILO,ILO-1) is zero. ILO and IHI are normally set by a */
+/* previous call to DGEBAL, and then passed to DGEHRD when the */
+/* matrix output by DGEBAL is reduced to Hessenberg form. */
+/* Otherwise, ILO and IHI should be set to 1 and N, */
+/* respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. */
+/* If N = 0, then ILO = 1 and IHI = 0. */
+
+/* H (input/output) DOUBLE PRECISION array, dimension (LDH,N) */
+/* On entry, the upper Hessenberg matrix H. */
+/* On exit, if INFO = 0 and WANTT is .TRUE., then H contains */
+/* the upper quasi-triangular matrix T from the Schur */
+/* decomposition (the Schur form); 2-by-2 diagonal blocks */
+/* (corresponding to complex conjugate pairs of eigenvalues) */
+/* are returned in standard form, with H(i,i) = H(i+1,i+1) */
+/* and H(i+1,i)*H(i,i+1).LT.0. If INFO = 0 and WANTT is */
+/* .FALSE., then the contents of H are unspecified on exit. */
+/* (The output value of H when INFO.GT.0 is given under the */
+/* description of INFO below.) */
+
+/* This subroutine may explicitly set H(i,j) = 0 for i.GT.j and */
+/* j = 1, 2, ... ILO-1 or j = IHI+1, IHI+2, ... N. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH .GE. max(1,N). */
+
+/* WR (output) DOUBLE PRECISION array, dimension (IHI) */
+/* WI (output) DOUBLE PRECISION array, dimension (IHI) */
+/* The real and imaginary parts, respectively, of the computed */
+/* eigenvalues of H(ILO:IHI,ILO:IHI) are stored in WR(ILO:IHI) */
+/* and WI(ILO:IHI). If two eigenvalues are computed as a */
+/* complex conjugate pair, they are stored in consecutive */
+/* elements of WR and WI, say the i-th and (i+1)th, with */
+/* WI(i) .GT. 0 and WI(i+1) .LT. 0. If WANTT is .TRUE., then */
+/* the eigenvalues are stored in the same order as on the */
+/* diagonal of the Schur form returned in H, with */
+/* WR(i) = H(i,i) and, if H(i:i+1,i:i+1) is a 2-by-2 diagonal */
+/* block, WI(i) = sqrt(-H(i+1,i)*H(i,i+1)) and */
+/* WI(i+1) = -WI(i). */
+
+/* ILOZ (input) INTEGER */
+/* IHIZ (input) INTEGER */
+/* Specify the rows of Z to which transformations must be */
+/* applied if WANTZ is .TRUE.. */
+/* 1 .LE. ILOZ .LE. ILO; IHI .LE. IHIZ .LE. N. */
+
+/* Z (input/output) DOUBLE PRECISION array, dimension (LDZ,IHI) */
+/* If WANTZ is .FALSE., then Z is not referenced. */
+/* If WANTZ is .TRUE., then Z(ILO:IHI,ILOZ:IHIZ) is */
+/* replaced by Z(ILO:IHI,ILOZ:IHIZ)*U where U is the */
+/* orthogonal Schur factor of H(ILO:IHI,ILO:IHI). */
+/* (The output value of Z when INFO.GT.0 is given under */
+/* the description of INFO below.) */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. if WANTZ is .TRUE. */
+/* then LDZ.GE.MAX(1,IHIZ). Otherwize, LDZ.GE.1. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension LWORK */
+/* On exit, if LWORK = -1, WORK(1) returns an estimate of */
+/* the optimal value for LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK .GE. max(1,N) */
+/* is sufficient, but LWORK typically as large as 6*N may */
+/* be required for optimal performance. A workspace query */
+/* to determine the optimal workspace size is recommended. */
+
+/* If LWORK = -1, then DLAQR0 does a workspace query. */
+/* In this case, DLAQR0 checks the input parameters and */
+/* estimates the optimal workspace size for the given */
+/* values of N, ILO and IHI. The estimate is returned */
+/* in WORK(1). No error message related to LWORK is */
+/* issued by XERBLA. Neither H nor Z are accessed. */
+
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* .GT. 0: if INFO = i, DLAQR0 failed to compute all of */
+/* the eigenvalues. Elements 1:ilo-1 and i+1:n of WR */
+/* and WI contain those eigenvalues which have been */
+/* successfully computed. (Failures are rare.) */
+
+/* If INFO .GT. 0 and WANT is .FALSE., then on exit, */
+/* the remaining unconverged eigenvalues are the eigen- */
+/* values of the upper Hessenberg matrix rows and */
+/* columns ILO through INFO of the final, output */
+/* value of H. */
+
+/* If INFO .GT. 0 and WANTT is .TRUE., then on exit */
+
+/* (*) (initial value of H)*U = U*(final value of H) */
+
+/* where U is an orthogonal matrix. The final */
+/* value of H is upper Hessenberg and quasi-triangular */
+/* in rows and columns INFO+1 through IHI. */
+
+/* If INFO .GT. 0 and WANTZ is .TRUE., then on exit */
+
+/* (final value of Z(ILO:IHI,ILOZ:IHIZ) */
+/* = (initial value of Z(ILO:IHI,ILOZ:IHIZ)*U */
+
+/* where U is the orthogonal matrix in (*) (regard- */
+/* less of the value of WANTT.) */
+
+/* If INFO .GT. 0 and WANTZ is .FALSE., then Z is not */
+/* accessed. */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+/* References: */
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part I: Maintaining Well Focused Shifts, and Level 3 */
+/* Performance, SIAM Journal of Matrix Analysis, volume 23, pages */
+/* 929--947, 2002. */
+
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part II: Aggressive Early Deflation, SIAM Journal */
+/* of Matrix Analysis, volume 23, pages 948--973, 2002. */
+
+/* ================================================================ */
+/* .. Parameters .. */
+
+/* ==== Matrices of order NTINY or smaller must be processed by */
+/* . DLAHQR because of insufficient subdiagonal scratch space. */
+/* . (This is a hard limit.) ==== */
+
+/* ==== Exceptional deflation windows: try to cure rare */
+/* . slow convergence by varying the size of the */
+/* . deflation window after KEXNW iterations. ==== */
+
+/* ==== Exceptional shifts: try to cure rare slow convergence */
+/* . with ad-hoc exceptional shifts every KEXSH iterations. */
+/* . ==== */
+
+/* ==== The constants WILK1 and WILK2 are used to form the */
+/* . exceptional shifts. ==== */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --wr;
+ --wi;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+/* ==== Quick return for N = 0: nothing to do. ==== */
+
+ if (*n == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ if (*n <= 11) {
+
+/* ==== Tiny matrices must use DLAHQR. ==== */
+
+ lwkopt = 1;
+ if (*lwork != -1) {
+ dlahqr_(wantt, wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1], &
+ wi[1], iloz, ihiz, &z__[z_offset], ldz, info);
+ }
+ } else {
+
+/* ==== Use small bulge multi-shift QR with aggressive early */
+/* . deflation on larger-than-tiny matrices. ==== */
+
+/* ==== Hope for the best. ==== */
+
+ *info = 0;
+
+/* ==== Set up job flags for ILAENV. ==== */
+
+ if (*wantt) {
+ *(unsigned char *)jbcmpz = 'S';
+ } else {
+ *(unsigned char *)jbcmpz = 'E';
+ }
+ if (*wantz) {
+ *(unsigned char *)&jbcmpz[1] = 'V';
+ } else {
+ *(unsigned char *)&jbcmpz[1] = 'N';
+ }
+
+/* ==== NWR = recommended deflation window size. At this */
+/* . point, N .GT. NTINY = 11, so there is enough */
+/* . subdiagonal workspace for NWR.GE.2 as required. */
+/* . (In fact, there is enough subdiagonal space for */
+/* . NWR.GE.3.) ==== */
+
+ nwr = ilaenv_(&c__13, "DLAQR0", jbcmpz, n, ilo, ihi, lwork);
+ nwr = max(2,nwr);
+/* Computing MIN */
+ i__1 = *ihi - *ilo + 1, i__2 = (*n - 1) / 3, i__1 = min(i__1,i__2);
+ nwr = min(i__1,nwr);
+
+/* ==== NSR = recommended number of simultaneous shifts. */
+/* . At this point N .GT. NTINY = 11, so there is at */
+/* . enough subdiagonal workspace for NSR to be even */
+/* . and greater than or equal to two as required. ==== */
+
+ nsr = ilaenv_(&c__15, "DLAQR0", jbcmpz, n, ilo, ihi, lwork);
+/* Computing MIN */
+ i__1 = nsr, i__2 = (*n + 6) / 9, i__1 = min(i__1,i__2), i__2 = *ihi -
+ *ilo;
+ nsr = min(i__1,i__2);
+/* Computing MAX */
+ i__1 = 2, i__2 = nsr - nsr % 2;
+ nsr = max(i__1,i__2);
+
+/* ==== Estimate optimal workspace ==== */
+
+/* ==== Workspace query call to DLAQR3 ==== */
+
+ i__1 = nwr + 1;
+ dlaqr3_(wantt, wantz, n, ilo, ihi, &i__1, &h__[h_offset], ldh, iloz,
+ ihiz, &z__[z_offset], ldz, &ls, &ld, &wr[1], &wi[1], &h__[
+ h_offset], ldh, n, &h__[h_offset], ldh, n, &h__[h_offset],
+ ldh, &work[1], &c_n1);
+
+/* ==== Optimal workspace = MAX(DLAQR5, DLAQR3) ==== */
+
+/* Computing MAX */
+ i__1 = nsr * 3 / 2, i__2 = (integer) work[1];
+ lwkopt = max(i__1,i__2);
+
+/* ==== Quick return in case of workspace query. ==== */
+
+ if (*lwork == -1) {
+ work[1] = (doublereal) lwkopt;
+ return 0;
+ }
+
+/* ==== DLAHQR/DLAQR0 crossover point ==== */
+
+ nmin = ilaenv_(&c__12, "DLAQR0", jbcmpz, n, ilo, ihi, lwork);
+ nmin = max(11,nmin);
+
+/* ==== Nibble crossover point ==== */
+
+ nibble = ilaenv_(&c__14, "DLAQR0", jbcmpz, n, ilo, ihi, lwork);
+ nibble = max(0,nibble);
+
+/* ==== Accumulate reflections during ttswp? Use block */
+/* . 2-by-2 structure during matrix-matrix multiply? ==== */
+
+ kacc22 = ilaenv_(&c__16, "DLAQR0", jbcmpz, n, ilo, ihi, lwork);
+ kacc22 = max(0,kacc22);
+ kacc22 = min(2,kacc22);
+
+/* ==== NWMAX = the largest possible deflation window for */
+/* . which there is sufficient workspace. ==== */
+
+/* Computing MIN */
+ i__1 = (*n - 1) / 3, i__2 = *lwork / 2;
+ nwmax = min(i__1,i__2);
+ nw = nwmax;
+
+/* ==== NSMAX = the Largest number of simultaneous shifts */
+/* . for which there is sufficient workspace. ==== */
+
+/* Computing MIN */
+ i__1 = (*n + 6) / 9, i__2 = (*lwork << 1) / 3;
+ nsmax = min(i__1,i__2);
+ nsmax -= nsmax % 2;
+
+/* ==== NDFL: an iteration count restarted at deflation. ==== */
+
+ ndfl = 1;
+
+/* ==== ITMAX = iteration limit ==== */
+
+/* Computing MAX */
+ i__1 = 10, i__2 = *ihi - *ilo + 1;
+ itmax = max(i__1,i__2) * 30;
+
+/* ==== Last row and column in the active block ==== */
+
+ kbot = *ihi;
+
+/* ==== Main Loop ==== */
+
+ i__1 = itmax;
+ for (it = 1; it <= i__1; ++it) {
+
+/* ==== Done when KBOT falls below ILO ==== */
+
+ if (kbot < *ilo) {
+ goto L90;
+ }
+
+/* ==== Locate active block ==== */
+
+ i__2 = *ilo + 1;
+ for (k = kbot; k >= i__2; --k) {
+ if (h__[k + (k - 1) * h_dim1] == 0.) {
+ goto L20;
+ }
+/* L10: */
+ }
+ k = *ilo;
+L20:
+ ktop = k;
+
+/* ==== Select deflation window size: */
+/* . Typical Case: */
+/* . If possible and advisable, nibble the entire */
+/* . active block. If not, use size MIN(NWR,NWMAX) */
+/* . or MIN(NWR+1,NWMAX) depending upon which has */
+/* . the smaller corresponding subdiagonal entry */
+/* . (a heuristic). */
+/* . */
+/* . Exceptional Case: */
+/* . If there have been no deflations in KEXNW or */
+/* . more iterations, then vary the deflation window */
+/* . size. At first, because, larger windows are, */
+/* . in general, more powerful than smaller ones, */
+/* . rapidly increase the window to the maximum possible. */
+/* . Then, gradually reduce the window size. ==== */
+
+ nh = kbot - ktop + 1;
+ nwupbd = min(nh,nwmax);
+ if (ndfl < 5) {
+ nw = min(nwupbd,nwr);
+ } else {
+/* Computing MIN */
+ i__2 = nwupbd, i__3 = nw << 1;
+ nw = min(i__2,i__3);
+ }
+ if (nw < nwmax) {
+ if (nw >= nh - 1) {
+ nw = nh;
+ } else {
+ kwtop = kbot - nw + 1;
+ if ((d__1 = h__[kwtop + (kwtop - 1) * h_dim1], abs(d__1))
+ > (d__2 = h__[kwtop - 1 + (kwtop - 2) * h_dim1],
+ abs(d__2))) {
+ ++nw;
+ }
+ }
+ }
+ if (ndfl < 5) {
+ ndec = -1;
+ } else if (ndec >= 0 || nw >= nwupbd) {
+ ++ndec;
+ if (nw - ndec < 2) {
+ ndec = 0;
+ }
+ nw -= ndec;
+ }
+
+/* ==== Aggressive early deflation: */
+/* . split workspace under the subdiagonal into */
+/* . - an nw-by-nw work array V in the lower */
+/* . left-hand-corner, */
+/* . - an NW-by-at-least-NW-but-more-is-better */
+/* . (NW-by-NHO) horizontal work array along */
+/* . the bottom edge, */
+/* . - an at-least-NW-but-more-is-better (NHV-by-NW) */
+/* . vertical work array along the left-hand-edge. */
+/* . ==== */
+
+ kv = *n - nw + 1;
+ kt = nw + 1;
+ nho = *n - nw - 1 - kt + 1;
+ kwv = nw + 2;
+ nve = *n - nw - kwv + 1;
+
+/* ==== Aggressive early deflation ==== */
+
+ dlaqr3_(wantt, wantz, n, &ktop, &kbot, &nw, &h__[h_offset], ldh,
+ iloz, ihiz, &z__[z_offset], ldz, &ls, &ld, &wr[1], &wi[1],
+ &h__[kv + h_dim1], ldh, &nho, &h__[kv + kt * h_dim1],
+ ldh, &nve, &h__[kwv + h_dim1], ldh, &work[1], lwork);
+
+/* ==== Adjust KBOT accounting for new deflations. ==== */
+
+ kbot -= ld;
+
+/* ==== KS points to the shifts. ==== */
+
+ ks = kbot - ls + 1;
+
+/* ==== Skip an expensive QR sweep if there is a (partly */
+/* . heuristic) reason to expect that many eigenvalues */
+/* . will deflate without it. Here, the QR sweep is */
+/* . skipped if many eigenvalues have just been deflated */
+/* . or if the remaining active block is small. */
+
+ if (ld == 0 || ld * 100 <= nw * nibble && kbot - ktop + 1 > min(
+ nmin,nwmax)) {
+
+/* ==== NS = nominal number of simultaneous shifts. */
+/* . This may be lowered (slightly) if DLAQR3 */
+/* . did not provide that many shifts. ==== */
+
+/* Computing MIN */
+/* Computing MAX */
+ i__4 = 2, i__5 = kbot - ktop;
+ i__2 = min(nsmax,nsr), i__3 = max(i__4,i__5);
+ ns = min(i__2,i__3);
+ ns -= ns % 2;
+
+/* ==== If there have been no deflations */
+/* . in a multiple of KEXSH iterations, */
+/* . then try exceptional shifts. */
+/* . Otherwise use shifts provided by */
+/* . DLAQR3 above or from the eigenvalues */
+/* . of a trailing principal submatrix. ==== */
+
+ if (ndfl % 6 == 0) {
+ ks = kbot - ns + 1;
+/* Computing MAX */
+ i__3 = ks + 1, i__4 = ktop + 2;
+ i__2 = max(i__3,i__4);
+ for (i__ = kbot; i__ >= i__2; i__ += -2) {
+ ss = (d__1 = h__[i__ + (i__ - 1) * h_dim1], abs(d__1))
+ + (d__2 = h__[i__ - 1 + (i__ - 2) * h_dim1],
+ abs(d__2));
+ aa = ss * .75 + h__[i__ + i__ * h_dim1];
+ bb = ss;
+ cc = ss * -.4375;
+ dd = aa;
+ dlanv2_(&aa, &bb, &cc, &dd, &wr[i__ - 1], &wi[i__ - 1]
+, &wr[i__], &wi[i__], &cs, &sn);
+/* L30: */
+ }
+ if (ks == ktop) {
+ wr[ks + 1] = h__[ks + 1 + (ks + 1) * h_dim1];
+ wi[ks + 1] = 0.;
+ wr[ks] = wr[ks + 1];
+ wi[ks] = wi[ks + 1];
+ }
+ } else {
+
+/* ==== Got NS/2 or fewer shifts? Use DLAQR4 or */
+/* . DLAHQR on a trailing principal submatrix to */
+/* . get more. (Since NS.LE.NSMAX.LE.(N+6)/9, */
+/* . there is enough space below the subdiagonal */
+/* . to fit an NS-by-NS scratch array.) ==== */
+
+ if (kbot - ks + 1 <= ns / 2) {
+ ks = kbot - ns + 1;
+ kt = *n - ns + 1;
+ dlacpy_("A", &ns, &ns, &h__[ks + ks * h_dim1], ldh, &
+ h__[kt + h_dim1], ldh);
+ if (ns > nmin) {
+ dlaqr4_(&c_false, &c_false, &ns, &c__1, &ns, &h__[
+ kt + h_dim1], ldh, &wr[ks], &wi[ks], &
+ c__1, &c__1, zdum, &c__1, &work[1], lwork,
+ &inf);
+ } else {
+ dlahqr_(&c_false, &c_false, &ns, &c__1, &ns, &h__[
+ kt + h_dim1], ldh, &wr[ks], &wi[ks], &
+ c__1, &c__1, zdum, &c__1, &inf);
+ }
+ ks += inf;
+
+/* ==== In case of a rare QR failure use */
+/* . eigenvalues of the trailing 2-by-2 */
+/* . principal submatrix. ==== */
+
+ if (ks >= kbot) {
+ aa = h__[kbot - 1 + (kbot - 1) * h_dim1];
+ cc = h__[kbot + (kbot - 1) * h_dim1];
+ bb = h__[kbot - 1 + kbot * h_dim1];
+ dd = h__[kbot + kbot * h_dim1];
+ dlanv2_(&aa, &bb, &cc, &dd, &wr[kbot - 1], &wi[
+ kbot - 1], &wr[kbot], &wi[kbot], &cs, &sn)
+ ;
+ ks = kbot - 1;
+ }
+ }
+
+ if (kbot - ks + 1 > ns) {
+
+/* ==== Sort the shifts (Helps a little) */
+/* . Bubble sort keeps complex conjugate */
+/* . pairs together. ==== */
+
+ sorted = FALSE_;
+ i__2 = ks + 1;
+ for (k = kbot; k >= i__2; --k) {
+ if (sorted) {
+ goto L60;
+ }
+ sorted = TRUE_;
+ i__3 = k - 1;
+ for (i__ = ks; i__ <= i__3; ++i__) {
+ if ((d__1 = wr[i__], abs(d__1)) + (d__2 = wi[
+ i__], abs(d__2)) < (d__3 = wr[i__ + 1]
+ , abs(d__3)) + (d__4 = wi[i__ + 1],
+ abs(d__4))) {
+ sorted = FALSE_;
+
+ swap = wr[i__];
+ wr[i__] = wr[i__ + 1];
+ wr[i__ + 1] = swap;
+
+ swap = wi[i__];
+ wi[i__] = wi[i__ + 1];
+ wi[i__ + 1] = swap;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+L60:
+ ;
+ }
+
+/* ==== Shuffle shifts into pairs of real shifts */
+/* . and pairs of complex conjugate shifts */
+/* . assuming complex conjugate shifts are */
+/* . already adjacent to one another. (Yes, */
+/* . they are.) ==== */
+
+ i__2 = ks + 2;
+ for (i__ = kbot; i__ >= i__2; i__ += -2) {
+ if (wi[i__] != -wi[i__ - 1]) {
+
+ swap = wr[i__];
+ wr[i__] = wr[i__ - 1];
+ wr[i__ - 1] = wr[i__ - 2];
+ wr[i__ - 2] = swap;
+
+ swap = wi[i__];
+ wi[i__] = wi[i__ - 1];
+ wi[i__ - 1] = wi[i__ - 2];
+ wi[i__ - 2] = swap;
+ }
+/* L70: */
+ }
+ }
+
+/* ==== If there are only two shifts and both are */
+/* . real, then use only one. ==== */
+
+ if (kbot - ks + 1 == 2) {
+ if (wi[kbot] == 0.) {
+ if ((d__1 = wr[kbot] - h__[kbot + kbot * h_dim1], abs(
+ d__1)) < (d__2 = wr[kbot - 1] - h__[kbot +
+ kbot * h_dim1], abs(d__2))) {
+ wr[kbot - 1] = wr[kbot];
+ } else {
+ wr[kbot] = wr[kbot - 1];
+ }
+ }
+ }
+
+/* ==== Use up to NS of the the smallest magnatiude */
+/* . shifts. If there aren't NS shifts available, */
+/* . then use them all, possibly dropping one to */
+/* . make the number of shifts even. ==== */
+
+/* Computing MIN */
+ i__2 = ns, i__3 = kbot - ks + 1;
+ ns = min(i__2,i__3);
+ ns -= ns % 2;
+ ks = kbot - ns + 1;
+
+/* ==== Small-bulge multi-shift QR sweep: */
+/* . split workspace under the subdiagonal into */
+/* . - a KDU-by-KDU work array U in the lower */
+/* . left-hand-corner, */
+/* . - a KDU-by-at-least-KDU-but-more-is-better */
+/* . (KDU-by-NHo) horizontal work array WH along */
+/* . the bottom edge, */
+/* . - and an at-least-KDU-but-more-is-better-by-KDU */
+/* . (NVE-by-KDU) vertical work WV arrow along */
+/* . the left-hand-edge. ==== */
+
+ kdu = ns * 3 - 3;
+ ku = *n - kdu + 1;
+ kwh = kdu + 1;
+ nho = *n - kdu - 3 - (kdu + 1) + 1;
+ kwv = kdu + 4;
+ nve = *n - kdu - kwv + 1;
+
+/* ==== Small-bulge multi-shift QR sweep ==== */
+
+ dlaqr5_(wantt, wantz, &kacc22, n, &ktop, &kbot, &ns, &wr[ks],
+ &wi[ks], &h__[h_offset], ldh, iloz, ihiz, &z__[
+ z_offset], ldz, &work[1], &c__3, &h__[ku + h_dim1],
+ ldh, &nve, &h__[kwv + h_dim1], ldh, &nho, &h__[ku +
+ kwh * h_dim1], ldh);
+ }
+
+/* ==== Note progress (or the lack of it). ==== */
+
+ if (ld > 0) {
+ ndfl = 1;
+ } else {
+ ++ndfl;
+ }
+
+/* ==== End of main loop ==== */
+/* L80: */
+ }
+
+/* ==== Iteration limit exceeded. Set INFO to show where */
+/* . the problem occurred and exit. ==== */
+
+ *info = kbot;
+L90:
+ ;
+ }
+
+/* ==== Return the optimal value of LWORK. ==== */
+
+ work[1] = (doublereal) lwkopt;
+
+/* ==== End of DLAQR0 ==== */
+
+ return 0;
+} /* dlaqr0_ */
diff --git a/contrib/libs/clapack/dlaqr1.c b/contrib/libs/clapack/dlaqr1.c
new file mode 100644
index 0000000000..5870b41631
--- /dev/null
+++ b/contrib/libs/clapack/dlaqr1.c
@@ -0,0 +1,127 @@
+/* dlaqr1.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlaqr1_(integer *n, doublereal *h__, integer *ldh,
+ doublereal *sr1, doublereal *si1, doublereal *sr2, doublereal *si2,
+ doublereal *v)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset;
+ doublereal d__1, d__2, d__3;
+
+ /* Local variables */
+ doublereal s, h21s, h31s;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Given a 2-by-2 or 3-by-3 matrix H, DLAQR1 sets v to a */
+/* scalar multiple of the first column of the product */
+
+/* (*) K = (H - (sr1 + i*si1)*I)*(H - (sr2 + i*si2)*I) */
+
+/* scaling to avoid overflows and most underflows. It */
+/* is assumed that either */
+
+/* 1) sr1 = sr2 and si1 = -si2 */
+/* or */
+/* 2) si1 = si2 = 0. */
+
+/* This is useful for starting double implicit shift bulges */
+/* in the QR algorithm. */
+
+
+/* N (input) integer */
+/* Order of the matrix H. N must be either 2 or 3. */
+
+/* H (input) DOUBLE PRECISION array of dimension (LDH,N) */
+/* The 2-by-2 or 3-by-3 matrix H in (*). */
+
+/* LDH (input) integer */
+/* The leading dimension of H as declared in */
+/* the calling procedure. LDH.GE.N */
+
+/* SR1 (input) DOUBLE PRECISION */
+/* SI1 The shifts in (*). */
+/* SR2 */
+/* SI2 */
+
+/* V (output) DOUBLE PRECISION array of dimension N */
+/* A scalar multiple of the first column of the */
+/* matrix K in (*). */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --v;
+
+ /* Function Body */
+ if (*n == 2) {
+ s = (d__1 = h__[h_dim1 + 1] - *sr2, abs(d__1)) + abs(*si2) + (d__2 =
+ h__[h_dim1 + 2], abs(d__2));
+ if (s == 0.) {
+ v[1] = 0.;
+ v[2] = 0.;
+ } else {
+ h21s = h__[h_dim1 + 2] / s;
+ v[1] = h21s * h__[(h_dim1 << 1) + 1] + (h__[h_dim1 + 1] - *sr1) *
+ ((h__[h_dim1 + 1] - *sr2) / s) - *si1 * (*si2 / s);
+ v[2] = h21s * (h__[h_dim1 + 1] + h__[(h_dim1 << 1) + 2] - *sr1 - *
+ sr2);
+ }
+ } else {
+ s = (d__1 = h__[h_dim1 + 1] - *sr2, abs(d__1)) + abs(*si2) + (d__2 =
+ h__[h_dim1 + 2], abs(d__2)) + (d__3 = h__[h_dim1 + 3], abs(
+ d__3));
+ if (s == 0.) {
+ v[1] = 0.;
+ v[2] = 0.;
+ v[3] = 0.;
+ } else {
+ h21s = h__[h_dim1 + 2] / s;
+ h31s = h__[h_dim1 + 3] / s;
+ v[1] = (h__[h_dim1 + 1] - *sr1) * ((h__[h_dim1 + 1] - *sr2) / s)
+ - *si1 * (*si2 / s) + h__[(h_dim1 << 1) + 1] * h21s + h__[
+ h_dim1 * 3 + 1] * h31s;
+ v[2] = h21s * (h__[h_dim1 + 1] + h__[(h_dim1 << 1) + 2] - *sr1 - *
+ sr2) + h__[h_dim1 * 3 + 2] * h31s;
+ v[3] = h31s * (h__[h_dim1 + 1] + h__[h_dim1 * 3 + 3] - *sr1 - *
+ sr2) + h21s * h__[(h_dim1 << 1) + 3];
+ }
+ }
+ return 0;
+} /* dlaqr1_ */
diff --git a/contrib/libs/clapack/dlaqr2.c b/contrib/libs/clapack/dlaqr2.c
new file mode 100644
index 0000000000..9990445654
--- /dev/null
+++ b/contrib/libs/clapack/dlaqr2.c
@@ -0,0 +1,698 @@
+/* dlaqr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static doublereal c_b12 = 0.;
+static doublereal c_b13 = 1.;
+static logical c_true = TRUE_;
+
+/* Subroutine */ int dlaqr2_(logical *wantt, logical *wantz, integer *n,
+ integer *ktop, integer *kbot, integer *nw, doublereal *h__, integer *
+ ldh, integer *iloz, integer *ihiz, doublereal *z__, integer *ldz,
+ integer *ns, integer *nd, doublereal *sr, doublereal *si, doublereal *
+ v, integer *ldv, integer *nh, doublereal *t, integer *ldt, integer *
+ nv, doublereal *wv, integer *ldwv, doublereal *work, integer *lwork)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, t_dim1, t_offset, v_dim1, v_offset, wv_dim1,
+ wv_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1, d__2, d__3, d__4, d__5, d__6;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal s, aa, bb, cc, dd, cs, sn;
+ integer jw;
+ doublereal evi, evk, foo;
+ integer kln;
+ doublereal tau, ulp;
+ integer lwk1, lwk2;
+ doublereal beta;
+ integer kend, kcol, info, ifst, ilst, ltop, krow;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *), dgemm_(char *, char *, integer *, integer *
+, integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ logical bulge;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ integer infqr, kwtop;
+ extern /* Subroutine */ int dlanv2_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *), dlabad_(
+ doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dgehrd_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *), dlarfg_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *), dlahqr_(logical *, logical *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *), dlacpy_(char *, integer *, integer *, doublereal *,
+ integer *, doublereal *, integer *);
+ doublereal safmin;
+ extern /* Subroutine */ int dlaset_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *);
+ doublereal safmax;
+ extern /* Subroutine */ int dtrexc_(char *, integer *, doublereal *,
+ integer *, doublereal *, integer *, integer *, integer *,
+ doublereal *, integer *), dormhr_(char *, char *, integer
+ *, integer *, integer *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ integer *);
+ logical sorted;
+ doublereal smlnum;
+ integer lwkopt;
+
+
+/* -- LAPACK auxiliary routine (version 3.2.1) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* -- April 2009 -- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* This subroutine is identical to DLAQR3 except that it avoids */
+/* recursion by calling DLAHQR instead of DLAQR4. */
+
+
+/* ****************************************************************** */
+/* Aggressive early deflation: */
+
+/* This subroutine accepts as input an upper Hessenberg matrix */
+/* H and performs an orthogonal similarity transformation */
+/* designed to detect and deflate fully converged eigenvalues from */
+/* a trailing principal submatrix. On output H has been over- */
+/* written by a new Hessenberg matrix that is a perturbation of */
+/* an orthogonal similarity transformation of H. It is to be */
+/* hoped that the final version of H has many zero subdiagonal */
+/* entries. */
+
+/* ****************************************************************** */
+/* WANTT (input) LOGICAL */
+/* If .TRUE., then the Hessenberg matrix H is fully updated */
+/* so that the quasi-triangular Schur factor may be */
+/* computed (in cooperation with the calling subroutine). */
+/* If .FALSE., then only enough of H is updated to preserve */
+/* the eigenvalues. */
+
+/* WANTZ (input) LOGICAL */
+/* If .TRUE., then the orthogonal matrix Z is updated so */
+/* so that the orthogonal Schur factor may be computed */
+/* (in cooperation with the calling subroutine). */
+/* If .FALSE., then Z is not referenced. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H and (if WANTZ is .TRUE.) the */
+/* order of the orthogonal matrix Z. */
+
+/* KTOP (input) INTEGER */
+/* It is assumed that either KTOP = 1 or H(KTOP,KTOP-1)=0. */
+/* KBOT and KTOP together determine an isolated block */
+/* along the diagonal of the Hessenberg matrix. */
+
+/* KBOT (input) INTEGER */
+/* It is assumed without a check that either */
+/* KBOT = N or H(KBOT+1,KBOT)=0. KBOT and KTOP together */
+/* determine an isolated block along the diagonal of the */
+/* Hessenberg matrix. */
+
+/* NW (input) INTEGER */
+/* Deflation window size. 1 .LE. NW .LE. (KBOT-KTOP+1). */
+
+/* H (input/output) DOUBLE PRECISION array, dimension (LDH,N) */
+/* On input the initial N-by-N section of H stores the */
+/* Hessenberg matrix undergoing aggressive early deflation. */
+/* On output H has been transformed by an orthogonal */
+/* similarity transformation, perturbed, and the returned */
+/* to Hessenberg form that (it is to be hoped) has some */
+/* zero subdiagonal entries. */
+
+/* LDH (input) integer */
+/* Leading dimension of H just as declared in the calling */
+/* subroutine. N .LE. LDH */
+
+/* ILOZ (input) INTEGER */
+/* IHIZ (input) INTEGER */
+/* Specify the rows of Z to which transformations must be */
+/* applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N. */
+
+/* Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N) */
+/* IF WANTZ is .TRUE., then on output, the orthogonal */
+/* similarity transformation mentioned above has been */
+/* accumulated into Z(ILOZ:IHIZ,ILO:IHI) from the right. */
+/* If WANTZ is .FALSE., then Z is unreferenced. */
+
+/* LDZ (input) integer */
+/* The leading dimension of Z just as declared in the */
+/* calling subroutine. 1 .LE. LDZ. */
+
+/* NS (output) integer */
+/* The number of unconverged (ie approximate) eigenvalues */
+/* returned in SR and SI that may be used as shifts by the */
+/* calling subroutine. */
+
+/* ND (output) integer */
+/* The number of converged eigenvalues uncovered by this */
+/* subroutine. */
+
+/* SR (output) DOUBLE PRECISION array, dimension KBOT */
+/* SI (output) DOUBLE PRECISION array, dimension KBOT */
+/* On output, the real and imaginary parts of approximate */
+/* eigenvalues that may be used for shifts are stored in */
+/* SR(KBOT-ND-NS+1) through SR(KBOT-ND) and */
+/* SI(KBOT-ND-NS+1) through SI(KBOT-ND), respectively. */
+/* The real and imaginary parts of converged eigenvalues */
+/* are stored in SR(KBOT-ND+1) through SR(KBOT) and */
+/* SI(KBOT-ND+1) through SI(KBOT), respectively. */
+
+/* V (workspace) DOUBLE PRECISION array, dimension (LDV,NW) */
+/* An NW-by-NW work array. */
+
+/* LDV (input) integer scalar */
+/* The leading dimension of V just as declared in the */
+/* calling subroutine. NW .LE. LDV */
+
+/* NH (input) integer scalar */
+/* The number of columns of T. NH.GE.NW. */
+
+/* T (workspace) DOUBLE PRECISION array, dimension (LDT,NW) */
+
+/* LDT (input) integer */
+/* The leading dimension of T just as declared in the */
+/* calling subroutine. NW .LE. LDT */
+
+/* NV (input) integer */
+/* The number of rows of work array WV available for */
+/* workspace. NV.GE.NW. */
+
+/* WV (workspace) DOUBLE PRECISION array, dimension (LDWV,NW) */
+
+/* LDWV (input) integer */
+/* The leading dimension of W just as declared in the */
+/* calling subroutine. NW .LE. LDV */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension LWORK. */
+/* On exit, WORK(1) is set to an estimate of the optimal value */
+/* of LWORK for the given values of N, NW, KTOP and KBOT. */
+
+/* LWORK (input) integer */
+/* The dimension of the work array WORK. LWORK = 2*NW */
+/* suffices, but greater efficiency may result from larger */
+/* values of LWORK. */
+
+/* If LWORK = -1, then a workspace query is assumed; DLAQR2 */
+/* only estimates the optimal workspace size for the given */
+/* values of N, NW, KTOP and KBOT. The estimate is returned */
+/* in WORK(1). No error message related to LWORK is issued */
+/* by XERBLA. Neither H nor Z are accessed. */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* ==== Estimate optimal workspace. ==== */
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --sr;
+ --si;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ wv_dim1 = *ldwv;
+ wv_offset = 1 + wv_dim1;
+ wv -= wv_offset;
+ --work;
+
+ /* Function Body */
+/* Computing MIN */
+ i__1 = *nw, i__2 = *kbot - *ktop + 1;
+ jw = min(i__1,i__2);
+ if (jw <= 2) {
+ lwkopt = 1;
+ } else {
+
+/* ==== Workspace query call to DGEHRD ==== */
+
+ i__1 = jw - 1;
+ dgehrd_(&jw, &c__1, &i__1, &t[t_offset], ldt, &work[1], &work[1], &
+ c_n1, &info);
+ lwk1 = (integer) work[1];
+
+/* ==== Workspace query call to DORMHR ==== */
+
+ i__1 = jw - 1;
+ dormhr_("R", "N", &jw, &jw, &c__1, &i__1, &t[t_offset], ldt, &work[1],
+ &v[v_offset], ldv, &work[1], &c_n1, &info);
+ lwk2 = (integer) work[1];
+
+/* ==== Optimal workspace ==== */
+
+ lwkopt = jw + max(lwk1,lwk2);
+ }
+
+/* ==== Quick return in case of workspace query. ==== */
+
+ if (*lwork == -1) {
+ work[1] = (doublereal) lwkopt;
+ return 0;
+ }
+
+/* ==== Nothing to do ... */
+/* ... for an empty active block ... ==== */
+ *ns = 0;
+ *nd = 0;
+ work[1] = 1.;
+ if (*ktop > *kbot) {
+ return 0;
+ }
+/* ... nor for an empty deflation window. ==== */
+ if (*nw < 1) {
+ return 0;
+ }
+
+/* ==== Machine constants ==== */
+
+ safmin = dlamch_("SAFE MINIMUM");
+ safmax = 1. / safmin;
+ dlabad_(&safmin, &safmax);
+ ulp = dlamch_("PRECISION");
+ smlnum = safmin * ((doublereal) (*n) / ulp);
+
+/* ==== Setup deflation window ==== */
+
+/* Computing MIN */
+ i__1 = *nw, i__2 = *kbot - *ktop + 1;
+ jw = min(i__1,i__2);
+ kwtop = *kbot - jw + 1;
+ if (kwtop == *ktop) {
+ s = 0.;
+ } else {
+ s = h__[kwtop + (kwtop - 1) * h_dim1];
+ }
+
+ if (*kbot == kwtop) {
+
+/* ==== 1-by-1 deflation window: not much to do ==== */
+
+ sr[kwtop] = h__[kwtop + kwtop * h_dim1];
+ si[kwtop] = 0.;
+ *ns = 1;
+ *nd = 0;
+/* Computing MAX */
+ d__2 = smlnum, d__3 = ulp * (d__1 = h__[kwtop + kwtop * h_dim1], abs(
+ d__1));
+ if (abs(s) <= max(d__2,d__3)) {
+ *ns = 0;
+ *nd = 1;
+ if (kwtop > *ktop) {
+ h__[kwtop + (kwtop - 1) * h_dim1] = 0.;
+ }
+ }
+ work[1] = 1.;
+ return 0;
+ }
+
+/* ==== Convert to spike-triangular form. (In case of a */
+/* . rare QR failure, this routine continues to do */
+/* . aggressive early deflation using that part of */
+/* . the deflation window that converged using INFQR */
+/* . here and there to keep track.) ==== */
+
+ dlacpy_("U", &jw, &jw, &h__[kwtop + kwtop * h_dim1], ldh, &t[t_offset],
+ ldt);
+ i__1 = jw - 1;
+ i__2 = *ldh + 1;
+ i__3 = *ldt + 1;
+ dcopy_(&i__1, &h__[kwtop + 1 + kwtop * h_dim1], &i__2, &t[t_dim1 + 2], &
+ i__3);
+
+ dlaset_("A", &jw, &jw, &c_b12, &c_b13, &v[v_offset], ldv);
+ dlahqr_(&c_true, &c_true, &jw, &c__1, &jw, &t[t_offset], ldt, &sr[kwtop],
+ &si[kwtop], &c__1, &jw, &v[v_offset], ldv, &infqr);
+
+/* ==== DTREXC needs a clean margin near the diagonal ==== */
+
+ i__1 = jw - 3;
+ for (j = 1; j <= i__1; ++j) {
+ t[j + 2 + j * t_dim1] = 0.;
+ t[j + 3 + j * t_dim1] = 0.;
+/* L10: */
+ }
+ if (jw > 2) {
+ t[jw + (jw - 2) * t_dim1] = 0.;
+ }
+
+/* ==== Deflation detection loop ==== */
+
+ *ns = jw;
+ ilst = infqr + 1;
+L20:
+ if (ilst <= *ns) {
+ if (*ns == 1) {
+ bulge = FALSE_;
+ } else {
+ bulge = t[*ns + (*ns - 1) * t_dim1] != 0.;
+ }
+
+/* ==== Small spike tip test for deflation ==== */
+
+ if (! bulge) {
+
+/* ==== Real eigenvalue ==== */
+
+ foo = (d__1 = t[*ns + *ns * t_dim1], abs(d__1));
+ if (foo == 0.) {
+ foo = abs(s);
+ }
+/* Computing MAX */
+ d__2 = smlnum, d__3 = ulp * foo;
+ if ((d__1 = s * v[*ns * v_dim1 + 1], abs(d__1)) <= max(d__2,d__3))
+ {
+
+/* ==== Deflatable ==== */
+
+ --(*ns);
+ } else {
+
+/* ==== Undeflatable. Move it up out of the way. */
+/* . (DTREXC can not fail in this case.) ==== */
+
+ ifst = *ns;
+ dtrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst,
+ &ilst, &work[1], &info);
+ ++ilst;
+ }
+ } else {
+
+/* ==== Complex conjugate pair ==== */
+
+ foo = (d__3 = t[*ns + *ns * t_dim1], abs(d__3)) + sqrt((d__1 = t[*
+ ns + (*ns - 1) * t_dim1], abs(d__1))) * sqrt((d__2 = t[*
+ ns - 1 + *ns * t_dim1], abs(d__2)));
+ if (foo == 0.) {
+ foo = abs(s);
+ }
+/* Computing MAX */
+ d__3 = (d__1 = s * v[*ns * v_dim1 + 1], abs(d__1)), d__4 = (d__2 =
+ s * v[(*ns - 1) * v_dim1 + 1], abs(d__2));
+/* Computing MAX */
+ d__5 = smlnum, d__6 = ulp * foo;
+ if (max(d__3,d__4) <= max(d__5,d__6)) {
+
+/* ==== Deflatable ==== */
+
+ *ns += -2;
+ } else {
+
+/* ==== Undeflatable. Move them up out of the way. */
+/* . Fortunately, DTREXC does the right thing with */
+/* . ILST in case of a rare exchange failure. ==== */
+
+ ifst = *ns;
+ dtrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst,
+ &ilst, &work[1], &info);
+ ilst += 2;
+ }
+ }
+
+/* ==== End deflation detection loop ==== */
+
+ goto L20;
+ }
+
+/* ==== Return to Hessenberg form ==== */
+
+ if (*ns == 0) {
+ s = 0.;
+ }
+
+ if (*ns < jw) {
+
+/* ==== sorting diagonal blocks of T improves accuracy for */
+/* . graded matrices. Bubble sort deals well with */
+/* . exchange failures. ==== */
+
+ sorted = FALSE_;
+ i__ = *ns + 1;
+L30:
+ if (sorted) {
+ goto L50;
+ }
+ sorted = TRUE_;
+
+ kend = i__ - 1;
+ i__ = infqr + 1;
+ if (i__ == *ns) {
+ k = i__ + 1;
+ } else if (t[i__ + 1 + i__ * t_dim1] == 0.) {
+ k = i__ + 1;
+ } else {
+ k = i__ + 2;
+ }
+L40:
+ if (k <= kend) {
+ if (k == i__ + 1) {
+ evi = (d__1 = t[i__ + i__ * t_dim1], abs(d__1));
+ } else {
+ evi = (d__3 = t[i__ + i__ * t_dim1], abs(d__3)) + sqrt((d__1 =
+ t[i__ + 1 + i__ * t_dim1], abs(d__1))) * sqrt((d__2 =
+ t[i__ + (i__ + 1) * t_dim1], abs(d__2)));
+ }
+
+ if (k == kend) {
+ evk = (d__1 = t[k + k * t_dim1], abs(d__1));
+ } else if (t[k + 1 + k * t_dim1] == 0.) {
+ evk = (d__1 = t[k + k * t_dim1], abs(d__1));
+ } else {
+ evk = (d__3 = t[k + k * t_dim1], abs(d__3)) + sqrt((d__1 = t[
+ k + 1 + k * t_dim1], abs(d__1))) * sqrt((d__2 = t[k +
+ (k + 1) * t_dim1], abs(d__2)));
+ }
+
+ if (evi >= evk) {
+ i__ = k;
+ } else {
+ sorted = FALSE_;
+ ifst = i__;
+ ilst = k;
+ dtrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst,
+ &ilst, &work[1], &info);
+ if (info == 0) {
+ i__ = ilst;
+ } else {
+ i__ = k;
+ }
+ }
+ if (i__ == kend) {
+ k = i__ + 1;
+ } else if (t[i__ + 1 + i__ * t_dim1] == 0.) {
+ k = i__ + 1;
+ } else {
+ k = i__ + 2;
+ }
+ goto L40;
+ }
+ goto L30;
+L50:
+ ;
+ }
+
+/* ==== Restore shift/eigenvalue array from T ==== */
+
+ i__ = jw;
+L60:
+ if (i__ >= infqr + 1) {
+ if (i__ == infqr + 1) {
+ sr[kwtop + i__ - 1] = t[i__ + i__ * t_dim1];
+ si[kwtop + i__ - 1] = 0.;
+ --i__;
+ } else if (t[i__ + (i__ - 1) * t_dim1] == 0.) {
+ sr[kwtop + i__ - 1] = t[i__ + i__ * t_dim1];
+ si[kwtop + i__ - 1] = 0.;
+ --i__;
+ } else {
+ aa = t[i__ - 1 + (i__ - 1) * t_dim1];
+ cc = t[i__ + (i__ - 1) * t_dim1];
+ bb = t[i__ - 1 + i__ * t_dim1];
+ dd = t[i__ + i__ * t_dim1];
+ dlanv2_(&aa, &bb, &cc, &dd, &sr[kwtop + i__ - 2], &si[kwtop + i__
+ - 2], &sr[kwtop + i__ - 1], &si[kwtop + i__ - 1], &cs, &
+ sn);
+ i__ += -2;
+ }
+ goto L60;
+ }
+
+ if (*ns < jw || s == 0.) {
+ if (*ns > 1 && s != 0.) {
+
+/* ==== Reflect spike back into lower triangle ==== */
+
+ dcopy_(ns, &v[v_offset], ldv, &work[1], &c__1);
+ beta = work[1];
+ dlarfg_(ns, &beta, &work[2], &c__1, &tau);
+ work[1] = 1.;
+
+ i__1 = jw - 2;
+ i__2 = jw - 2;
+ dlaset_("L", &i__1, &i__2, &c_b12, &c_b12, &t[t_dim1 + 3], ldt);
+
+ dlarf_("L", ns, &jw, &work[1], &c__1, &tau, &t[t_offset], ldt, &
+ work[jw + 1]);
+ dlarf_("R", ns, ns, &work[1], &c__1, &tau, &t[t_offset], ldt, &
+ work[jw + 1]);
+ dlarf_("R", &jw, ns, &work[1], &c__1, &tau, &v[v_offset], ldv, &
+ work[jw + 1]);
+
+ i__1 = *lwork - jw;
+ dgehrd_(&jw, &c__1, ns, &t[t_offset], ldt, &work[1], &work[jw + 1]
+, &i__1, &info);
+ }
+
+/* ==== Copy updated reduced window into place ==== */
+
+ if (kwtop > 1) {
+ h__[kwtop + (kwtop - 1) * h_dim1] = s * v[v_dim1 + 1];
+ }
+ dlacpy_("U", &jw, &jw, &t[t_offset], ldt, &h__[kwtop + kwtop * h_dim1]
+, ldh);
+ i__1 = jw - 1;
+ i__2 = *ldt + 1;
+ i__3 = *ldh + 1;
+ dcopy_(&i__1, &t[t_dim1 + 2], &i__2, &h__[kwtop + 1 + kwtop * h_dim1],
+ &i__3);
+
+/* ==== Accumulate orthogonal matrix in order update */
+/* . H and Z, if requested. ==== */
+
+ if (*ns > 1 && s != 0.) {
+ i__1 = *lwork - jw;
+ dormhr_("R", "N", &jw, ns, &c__1, ns, &t[t_offset], ldt, &work[1],
+ &v[v_offset], ldv, &work[jw + 1], &i__1, &info);
+ }
+
+/* ==== Update vertical slab in H ==== */
+
+ if (*wantt) {
+ ltop = 1;
+ } else {
+ ltop = *ktop;
+ }
+ i__1 = kwtop - 1;
+ i__2 = *nv;
+ for (krow = ltop; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *nv, i__4 = kwtop - krow;
+ kln = min(i__3,i__4);
+ dgemm_("N", "N", &kln, &jw, &jw, &c_b13, &h__[krow + kwtop *
+ h_dim1], ldh, &v[v_offset], ldv, &c_b12, &wv[wv_offset],
+ ldwv);
+ dlacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &h__[krow + kwtop *
+ h_dim1], ldh);
+/* L70: */
+ }
+
+/* ==== Update horizontal slab in H ==== */
+
+ if (*wantt) {
+ i__2 = *n;
+ i__1 = *nh;
+ for (kcol = *kbot + 1; i__1 < 0 ? kcol >= i__2 : kcol <= i__2;
+ kcol += i__1) {
+/* Computing MIN */
+ i__3 = *nh, i__4 = *n - kcol + 1;
+ kln = min(i__3,i__4);
+ dgemm_("C", "N", &jw, &kln, &jw, &c_b13, &v[v_offset], ldv, &
+ h__[kwtop + kcol * h_dim1], ldh, &c_b12, &t[t_offset],
+ ldt);
+ dlacpy_("A", &jw, &kln, &t[t_offset], ldt, &h__[kwtop + kcol *
+ h_dim1], ldh);
+/* L80: */
+ }
+ }
+
+/* ==== Update vertical slab in Z ==== */
+
+ if (*wantz) {
+ i__1 = *ihiz;
+ i__2 = *nv;
+ for (krow = *iloz; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *nv, i__4 = *ihiz - krow + 1;
+ kln = min(i__3,i__4);
+ dgemm_("N", "N", &kln, &jw, &jw, &c_b13, &z__[krow + kwtop *
+ z_dim1], ldz, &v[v_offset], ldv, &c_b12, &wv[
+ wv_offset], ldwv);
+ dlacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &z__[krow +
+ kwtop * z_dim1], ldz);
+/* L90: */
+ }
+ }
+ }
+
+/* ==== Return the number of deflations ... ==== */
+
+ *nd = jw - *ns;
+
+/* ==== ... and the number of shifts. (Subtracting */
+/* . INFQR from the spike length takes care */
+/* . of the case of a rare QR failure while */
+/* . calculating eigenvalues of the deflation */
+/* . window.) ==== */
+
+ *ns -= infqr;
+
+/* ==== Return optimal workspace. ==== */
+
+ work[1] = (doublereal) lwkopt;
+
+/* ==== End of DLAQR2 ==== */
+
+ return 0;
+} /* dlaqr2_ */
diff --git a/contrib/libs/clapack/dlaqr3.c b/contrib/libs/clapack/dlaqr3.c
new file mode 100644
index 0000000000..a8bae817cb
--- /dev/null
+++ b/contrib/libs/clapack/dlaqr3.c
@@ -0,0 +1,715 @@
+/* dlaqr3.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static logical c_true = TRUE_;
+static doublereal c_b17 = 0.;
+static doublereal c_b18 = 1.;
+static integer c__12 = 12;
+
+/* Subroutine */ int dlaqr3_(logical *wantt, logical *wantz, integer *n,
+ integer *ktop, integer *kbot, integer *nw, doublereal *h__, integer *
+ ldh, integer *iloz, integer *ihiz, doublereal *z__, integer *ldz,
+ integer *ns, integer *nd, doublereal *sr, doublereal *si, doublereal *
+ v, integer *ldv, integer *nh, doublereal *t, integer *ldt, integer *
+ nv, doublereal *wv, integer *ldwv, doublereal *work, integer *lwork)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, t_dim1, t_offset, v_dim1, v_offset, wv_dim1,
+ wv_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1, d__2, d__3, d__4, d__5, d__6;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal s, aa, bb, cc, dd, cs, sn;
+ integer jw;
+ doublereal evi, evk, foo;
+ integer kln;
+ doublereal tau, ulp;
+ integer lwk1, lwk2, lwk3;
+ doublereal beta;
+ integer kend, kcol, info, nmin, ifst, ilst, ltop, krow;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *), dgemm_(char *, char *, integer *, integer *
+, integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ logical bulge;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ integer infqr, kwtop;
+ extern /* Subroutine */ int dlanv2_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *), dlaqr4_(
+ logical *, logical *, integer *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, integer *),
+ dlabad_(doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dgehrd_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *), dlarfg_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *), dlahqr_(logical *, logical *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *), dlacpy_(char *, integer *, integer *, doublereal *,
+ integer *, doublereal *, integer *);
+ doublereal safmin;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ doublereal safmax;
+ extern /* Subroutine */ int dlaset_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *),
+ dtrexc_(char *, integer *, doublereal *, integer *, doublereal *,
+ integer *, integer *, integer *, doublereal *, integer *),
+ dormhr_(char *, char *, integer *, integer *, integer *, integer
+ *, doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, integer *);
+ logical sorted;
+ doublereal smlnum;
+ integer lwkopt;
+
+
+/* -- LAPACK auxiliary routine (version 3.2.1) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* -- April 2009 -- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* ****************************************************************** */
+/* Aggressive early deflation: */
+
+/* This subroutine accepts as input an upper Hessenberg matrix */
+/* H and performs an orthogonal similarity transformation */
+/* designed to detect and deflate fully converged eigenvalues from */
+/* a trailing principal submatrix. On output H has been over- */
+/* written by a new Hessenberg matrix that is a perturbation of */
+/* an orthogonal similarity transformation of H. It is to be */
+/* hoped that the final version of H has many zero subdiagonal */
+/* entries. */
+
+/* ****************************************************************** */
+/* WANTT (input) LOGICAL */
+/* If .TRUE., then the Hessenberg matrix H is fully updated */
+/* so that the quasi-triangular Schur factor may be */
+/* computed (in cooperation with the calling subroutine). */
+/* If .FALSE., then only enough of H is updated to preserve */
+/* the eigenvalues. */
+
+/* WANTZ (input) LOGICAL */
+/* If .TRUE., then the orthogonal matrix Z is updated so */
+/* so that the orthogonal Schur factor may be computed */
+/* (in cooperation with the calling subroutine). */
+/* If .FALSE., then Z is not referenced. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H and (if WANTZ is .TRUE.) the */
+/* order of the orthogonal matrix Z. */
+
+/* KTOP (input) INTEGER */
+/* It is assumed that either KTOP = 1 or H(KTOP,KTOP-1)=0. */
+/* KBOT and KTOP together determine an isolated block */
+/* along the diagonal of the Hessenberg matrix. */
+
+/* KBOT (input) INTEGER */
+/* It is assumed without a check that either */
+/* KBOT = N or H(KBOT+1,KBOT)=0. KBOT and KTOP together */
+/* determine an isolated block along the diagonal of the */
+/* Hessenberg matrix. */
+
+/* NW (input) INTEGER */
+/* Deflation window size. 1 .LE. NW .LE. (KBOT-KTOP+1). */
+
+/* H (input/output) DOUBLE PRECISION array, dimension (LDH,N) */
+/* On input the initial N-by-N section of H stores the */
+/* Hessenberg matrix undergoing aggressive early deflation. */
+/* On output H has been transformed by an orthogonal */
+/* similarity transformation, perturbed, and the returned */
+/* to Hessenberg form that (it is to be hoped) has some */
+/* zero subdiagonal entries. */
+
+/* LDH (input) integer */
+/* Leading dimension of H just as declared in the calling */
+/* subroutine. N .LE. LDH */
+
+/* ILOZ (input) INTEGER */
+/* IHIZ (input) INTEGER */
+/* Specify the rows of Z to which transformations must be */
+/* applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N. */
+
+/* Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N) */
+/* IF WANTZ is .TRUE., then on output, the orthogonal */
+/* similarity transformation mentioned above has been */
+/* accumulated into Z(ILOZ:IHIZ,ILO:IHI) from the right. */
+/* If WANTZ is .FALSE., then Z is unreferenced. */
+
+/* LDZ (input) integer */
+/* The leading dimension of Z just as declared in the */
+/* calling subroutine. 1 .LE. LDZ. */
+
+/* NS (output) integer */
+/* The number of unconverged (ie approximate) eigenvalues */
+/* returned in SR and SI that may be used as shifts by the */
+/* calling subroutine. */
+
+/* ND (output) integer */
+/* The number of converged eigenvalues uncovered by this */
+/* subroutine. */
+
+/* SR (output) DOUBLE PRECISION array, dimension KBOT */
+/* SI (output) DOUBLE PRECISION array, dimension KBOT */
+/* On output, the real and imaginary parts of approximate */
+/* eigenvalues that may be used for shifts are stored in */
+/* SR(KBOT-ND-NS+1) through SR(KBOT-ND) and */
+/* SI(KBOT-ND-NS+1) through SI(KBOT-ND), respectively. */
+/* The real and imaginary parts of converged eigenvalues */
+/* are stored in SR(KBOT-ND+1) through SR(KBOT) and */
+/* SI(KBOT-ND+1) through SI(KBOT), respectively. */
+
+/* V (workspace) DOUBLE PRECISION array, dimension (LDV,NW) */
+/* An NW-by-NW work array. */
+
+/* LDV (input) integer scalar */
+/* The leading dimension of V just as declared in the */
+/* calling subroutine. NW .LE. LDV */
+
+/* NH (input) integer scalar */
+/* The number of columns of T. NH.GE.NW. */
+
+/* T (workspace) DOUBLE PRECISION array, dimension (LDT,NW) */
+
+/* LDT (input) integer */
+/* The leading dimension of T just as declared in the */
+/* calling subroutine. NW .LE. LDT */
+
+/* NV (input) integer */
+/* The number of rows of work array WV available for */
+/* workspace. NV.GE.NW. */
+
+/* WV (workspace) DOUBLE PRECISION array, dimension (LDWV,NW) */
+
+/* LDWV (input) integer */
+/* The leading dimension of W just as declared in the */
+/* calling subroutine. NW .LE. LDV */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension LWORK. */
+/* On exit, WORK(1) is set to an estimate of the optimal value */
+/* of LWORK for the given values of N, NW, KTOP and KBOT. */
+
+/* LWORK (input) integer */
+/* The dimension of the work array WORK. LWORK = 2*NW */
+/* suffices, but greater efficiency may result from larger */
+/* values of LWORK. */
+
+/* If LWORK = -1, then a workspace query is assumed; DLAQR3 */
+/* only estimates the optimal workspace size for the given */
+/* values of N, NW, KTOP and KBOT. The estimate is returned */
+/* in WORK(1). No error message related to LWORK is issued */
+/* by XERBLA. Neither H nor Z are accessed. */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* ==== Estimate optimal workspace. ==== */
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --sr;
+ --si;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ wv_dim1 = *ldwv;
+ wv_offset = 1 + wv_dim1;
+ wv -= wv_offset;
+ --work;
+
+ /* Function Body */
+/* Computing MIN */
+ i__1 = *nw, i__2 = *kbot - *ktop + 1;
+ jw = min(i__1,i__2);
+ if (jw <= 2) {
+ lwkopt = 1;
+ } else {
+
+/* ==== Workspace query call to DGEHRD ==== */
+
+ i__1 = jw - 1;
+ dgehrd_(&jw, &c__1, &i__1, &t[t_offset], ldt, &work[1], &work[1], &
+ c_n1, &info);
+ lwk1 = (integer) work[1];
+
+/* ==== Workspace query call to DORMHR ==== */
+
+ i__1 = jw - 1;
+ dormhr_("R", "N", &jw, &jw, &c__1, &i__1, &t[t_offset], ldt, &work[1],
+ &v[v_offset], ldv, &work[1], &c_n1, &info);
+ lwk2 = (integer) work[1];
+
+/* ==== Workspace query call to DLAQR4 ==== */
+
+ dlaqr4_(&c_true, &c_true, &jw, &c__1, &jw, &t[t_offset], ldt, &sr[1],
+ &si[1], &c__1, &jw, &v[v_offset], ldv, &work[1], &c_n1, &
+ infqr);
+ lwk3 = (integer) work[1];
+
+/* ==== Optimal workspace ==== */
+
+/* Computing MAX */
+ i__1 = jw + max(lwk1,lwk2);
+ lwkopt = max(i__1,lwk3);
+ }
+
+/* ==== Quick return in case of workspace query. ==== */
+
+ if (*lwork == -1) {
+ work[1] = (doublereal) lwkopt;
+ return 0;
+ }
+
+/* ==== Nothing to do ... */
+/* ... for an empty active block ... ==== */
+ *ns = 0;
+ *nd = 0;
+ work[1] = 1.;
+ if (*ktop > *kbot) {
+ return 0;
+ }
+/* ... nor for an empty deflation window. ==== */
+ if (*nw < 1) {
+ return 0;
+ }
+
+/* ==== Machine constants ==== */
+
+ safmin = dlamch_("SAFE MINIMUM");
+ safmax = 1. / safmin;
+ dlabad_(&safmin, &safmax);
+ ulp = dlamch_("PRECISION");
+ smlnum = safmin * ((doublereal) (*n) / ulp);
+
+/* ==== Setup deflation window ==== */
+
+/* Computing MIN */
+ i__1 = *nw, i__2 = *kbot - *ktop + 1;
+ jw = min(i__1,i__2);
+ kwtop = *kbot - jw + 1;
+ if (kwtop == *ktop) {
+ s = 0.;
+ } else {
+ s = h__[kwtop + (kwtop - 1) * h_dim1];
+ }
+
+ if (*kbot == kwtop) {
+
+/* ==== 1-by-1 deflation window: not much to do ==== */
+
+ sr[kwtop] = h__[kwtop + kwtop * h_dim1];
+ si[kwtop] = 0.;
+ *ns = 1;
+ *nd = 0;
+/* Computing MAX */
+ d__2 = smlnum, d__3 = ulp * (d__1 = h__[kwtop + kwtop * h_dim1], abs(
+ d__1));
+ if (abs(s) <= max(d__2,d__3)) {
+ *ns = 0;
+ *nd = 1;
+ if (kwtop > *ktop) {
+ h__[kwtop + (kwtop - 1) * h_dim1] = 0.;
+ }
+ }
+ work[1] = 1.;
+ return 0;
+ }
+
+/* ==== Convert to spike-triangular form. (In case of a */
+/* . rare QR failure, this routine continues to do */
+/* . aggressive early deflation using that part of */
+/* . the deflation window that converged using INFQR */
+/* . here and there to keep track.) ==== */
+
+ dlacpy_("U", &jw, &jw, &h__[kwtop + kwtop * h_dim1], ldh, &t[t_offset],
+ ldt);
+ i__1 = jw - 1;
+ i__2 = *ldh + 1;
+ i__3 = *ldt + 1;
+ dcopy_(&i__1, &h__[kwtop + 1 + kwtop * h_dim1], &i__2, &t[t_dim1 + 2], &
+ i__3);
+
+ dlaset_("A", &jw, &jw, &c_b17, &c_b18, &v[v_offset], ldv);
+ nmin = ilaenv_(&c__12, "DLAQR3", "SV", &jw, &c__1, &jw, lwork);
+ if (jw > nmin) {
+ dlaqr4_(&c_true, &c_true, &jw, &c__1, &jw, &t[t_offset], ldt, &sr[
+ kwtop], &si[kwtop], &c__1, &jw, &v[v_offset], ldv, &work[1],
+ lwork, &infqr);
+ } else {
+ dlahqr_(&c_true, &c_true, &jw, &c__1, &jw, &t[t_offset], ldt, &sr[
+ kwtop], &si[kwtop], &c__1, &jw, &v[v_offset], ldv, &infqr);
+ }
+
+/* ==== DTREXC needs a clean margin near the diagonal ==== */
+
+ i__1 = jw - 3;
+ for (j = 1; j <= i__1; ++j) {
+ t[j + 2 + j * t_dim1] = 0.;
+ t[j + 3 + j * t_dim1] = 0.;
+/* L10: */
+ }
+ if (jw > 2) {
+ t[jw + (jw - 2) * t_dim1] = 0.;
+ }
+
+/* ==== Deflation detection loop ==== */
+
+ *ns = jw;
+ ilst = infqr + 1;
+L20:
+ if (ilst <= *ns) {
+ if (*ns == 1) {
+ bulge = FALSE_;
+ } else {
+ bulge = t[*ns + (*ns - 1) * t_dim1] != 0.;
+ }
+
+/* ==== Small spike tip test for deflation ==== */
+
+ if (! bulge) {
+
+/* ==== Real eigenvalue ==== */
+
+ foo = (d__1 = t[*ns + *ns * t_dim1], abs(d__1));
+ if (foo == 0.) {
+ foo = abs(s);
+ }
+/* Computing MAX */
+ d__2 = smlnum, d__3 = ulp * foo;
+ if ((d__1 = s * v[*ns * v_dim1 + 1], abs(d__1)) <= max(d__2,d__3))
+ {
+
+/* ==== Deflatable ==== */
+
+ --(*ns);
+ } else {
+
+/* ==== Undeflatable. Move it up out of the way. */
+/* . (DTREXC can not fail in this case.) ==== */
+
+ ifst = *ns;
+ dtrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst,
+ &ilst, &work[1], &info);
+ ++ilst;
+ }
+ } else {
+
+/* ==== Complex conjugate pair ==== */
+
+ foo = (d__3 = t[*ns + *ns * t_dim1], abs(d__3)) + sqrt((d__1 = t[*
+ ns + (*ns - 1) * t_dim1], abs(d__1))) * sqrt((d__2 = t[*
+ ns - 1 + *ns * t_dim1], abs(d__2)));
+ if (foo == 0.) {
+ foo = abs(s);
+ }
+/* Computing MAX */
+ d__3 = (d__1 = s * v[*ns * v_dim1 + 1], abs(d__1)), d__4 = (d__2 =
+ s * v[(*ns - 1) * v_dim1 + 1], abs(d__2));
+/* Computing MAX */
+ d__5 = smlnum, d__6 = ulp * foo;
+ if (max(d__3,d__4) <= max(d__5,d__6)) {
+
+/* ==== Deflatable ==== */
+
+ *ns += -2;
+ } else {
+
+/* ==== Undeflatable. Move them up out of the way. */
+/* . Fortunately, DTREXC does the right thing with */
+/* . ILST in case of a rare exchange failure. ==== */
+
+ ifst = *ns;
+ dtrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst,
+ &ilst, &work[1], &info);
+ ilst += 2;
+ }
+ }
+
+/* ==== End deflation detection loop ==== */
+
+ goto L20;
+ }
+
+/* ==== Return to Hessenberg form ==== */
+
+ if (*ns == 0) {
+ s = 0.;
+ }
+
+ if (*ns < jw) {
+
+/* ==== sorting diagonal blocks of T improves accuracy for */
+/* . graded matrices. Bubble sort deals well with */
+/* . exchange failures. ==== */
+
+ sorted = FALSE_;
+ i__ = *ns + 1;
+L30:
+ if (sorted) {
+ goto L50;
+ }
+ sorted = TRUE_;
+
+ kend = i__ - 1;
+ i__ = infqr + 1;
+ if (i__ == *ns) {
+ k = i__ + 1;
+ } else if (t[i__ + 1 + i__ * t_dim1] == 0.) {
+ k = i__ + 1;
+ } else {
+ k = i__ + 2;
+ }
+L40:
+ if (k <= kend) {
+ if (k == i__ + 1) {
+ evi = (d__1 = t[i__ + i__ * t_dim1], abs(d__1));
+ } else {
+ evi = (d__3 = t[i__ + i__ * t_dim1], abs(d__3)) + sqrt((d__1 =
+ t[i__ + 1 + i__ * t_dim1], abs(d__1))) * sqrt((d__2 =
+ t[i__ + (i__ + 1) * t_dim1], abs(d__2)));
+ }
+
+ if (k == kend) {
+ evk = (d__1 = t[k + k * t_dim1], abs(d__1));
+ } else if (t[k + 1 + k * t_dim1] == 0.) {
+ evk = (d__1 = t[k + k * t_dim1], abs(d__1));
+ } else {
+ evk = (d__3 = t[k + k * t_dim1], abs(d__3)) + sqrt((d__1 = t[
+ k + 1 + k * t_dim1], abs(d__1))) * sqrt((d__2 = t[k +
+ (k + 1) * t_dim1], abs(d__2)));
+ }
+
+ if (evi >= evk) {
+ i__ = k;
+ } else {
+ sorted = FALSE_;
+ ifst = i__;
+ ilst = k;
+ dtrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst,
+ &ilst, &work[1], &info);
+ if (info == 0) {
+ i__ = ilst;
+ } else {
+ i__ = k;
+ }
+ }
+ if (i__ == kend) {
+ k = i__ + 1;
+ } else if (t[i__ + 1 + i__ * t_dim1] == 0.) {
+ k = i__ + 1;
+ } else {
+ k = i__ + 2;
+ }
+ goto L40;
+ }
+ goto L30;
+L50:
+ ;
+ }
+
+/* ==== Restore shift/eigenvalue array from T ==== */
+
+ i__ = jw;
+L60:
+ if (i__ >= infqr + 1) {
+ if (i__ == infqr + 1) {
+ sr[kwtop + i__ - 1] = t[i__ + i__ * t_dim1];
+ si[kwtop + i__ - 1] = 0.;
+ --i__;
+ } else if (t[i__ + (i__ - 1) * t_dim1] == 0.) {
+ sr[kwtop + i__ - 1] = t[i__ + i__ * t_dim1];
+ si[kwtop + i__ - 1] = 0.;
+ --i__;
+ } else {
+ aa = t[i__ - 1 + (i__ - 1) * t_dim1];
+ cc = t[i__ + (i__ - 1) * t_dim1];
+ bb = t[i__ - 1 + i__ * t_dim1];
+ dd = t[i__ + i__ * t_dim1];
+ dlanv2_(&aa, &bb, &cc, &dd, &sr[kwtop + i__ - 2], &si[kwtop + i__
+ - 2], &sr[kwtop + i__ - 1], &si[kwtop + i__ - 1], &cs, &
+ sn);
+ i__ += -2;
+ }
+ goto L60;
+ }
+
+ if (*ns < jw || s == 0.) {
+ if (*ns > 1 && s != 0.) {
+
+/* ==== Reflect spike back into lower triangle ==== */
+
+ dcopy_(ns, &v[v_offset], ldv, &work[1], &c__1);
+ beta = work[1];
+ dlarfg_(ns, &beta, &work[2], &c__1, &tau);
+ work[1] = 1.;
+
+ i__1 = jw - 2;
+ i__2 = jw - 2;
+ dlaset_("L", &i__1, &i__2, &c_b17, &c_b17, &t[t_dim1 + 3], ldt);
+
+ dlarf_("L", ns, &jw, &work[1], &c__1, &tau, &t[t_offset], ldt, &
+ work[jw + 1]);
+ dlarf_("R", ns, ns, &work[1], &c__1, &tau, &t[t_offset], ldt, &
+ work[jw + 1]);
+ dlarf_("R", &jw, ns, &work[1], &c__1, &tau, &v[v_offset], ldv, &
+ work[jw + 1]);
+
+ i__1 = *lwork - jw;
+ dgehrd_(&jw, &c__1, ns, &t[t_offset], ldt, &work[1], &work[jw + 1]
+, &i__1, &info);
+ }
+
+/* ==== Copy updated reduced window into place ==== */
+
+ if (kwtop > 1) {
+ h__[kwtop + (kwtop - 1) * h_dim1] = s * v[v_dim1 + 1];
+ }
+ dlacpy_("U", &jw, &jw, &t[t_offset], ldt, &h__[kwtop + kwtop * h_dim1]
+, ldh);
+ i__1 = jw - 1;
+ i__2 = *ldt + 1;
+ i__3 = *ldh + 1;
+ dcopy_(&i__1, &t[t_dim1 + 2], &i__2, &h__[kwtop + 1 + kwtop * h_dim1],
+ &i__3);
+
+/* ==== Accumulate orthogonal matrix in order update */
+/* . H and Z, if requested. ==== */
+
+ if (*ns > 1 && s != 0.) {
+ i__1 = *lwork - jw;
+ dormhr_("R", "N", &jw, ns, &c__1, ns, &t[t_offset], ldt, &work[1],
+ &v[v_offset], ldv, &work[jw + 1], &i__1, &info);
+ }
+
+/* ==== Update vertical slab in H ==== */
+
+ if (*wantt) {
+ ltop = 1;
+ } else {
+ ltop = *ktop;
+ }
+ i__1 = kwtop - 1;
+ i__2 = *nv;
+ for (krow = ltop; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *nv, i__4 = kwtop - krow;
+ kln = min(i__3,i__4);
+ dgemm_("N", "N", &kln, &jw, &jw, &c_b18, &h__[krow + kwtop *
+ h_dim1], ldh, &v[v_offset], ldv, &c_b17, &wv[wv_offset],
+ ldwv);
+ dlacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &h__[krow + kwtop *
+ h_dim1], ldh);
+/* L70: */
+ }
+
+/* ==== Update horizontal slab in H ==== */
+
+ if (*wantt) {
+ i__2 = *n;
+ i__1 = *nh;
+ for (kcol = *kbot + 1; i__1 < 0 ? kcol >= i__2 : kcol <= i__2;
+ kcol += i__1) {
+/* Computing MIN */
+ i__3 = *nh, i__4 = *n - kcol + 1;
+ kln = min(i__3,i__4);
+ dgemm_("C", "N", &jw, &kln, &jw, &c_b18, &v[v_offset], ldv, &
+ h__[kwtop + kcol * h_dim1], ldh, &c_b17, &t[t_offset],
+ ldt);
+ dlacpy_("A", &jw, &kln, &t[t_offset], ldt, &h__[kwtop + kcol *
+ h_dim1], ldh);
+/* L80: */
+ }
+ }
+
+/* ==== Update vertical slab in Z ==== */
+
+ if (*wantz) {
+ i__1 = *ihiz;
+ i__2 = *nv;
+ for (krow = *iloz; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *nv, i__4 = *ihiz - krow + 1;
+ kln = min(i__3,i__4);
+ dgemm_("N", "N", &kln, &jw, &jw, &c_b18, &z__[krow + kwtop *
+ z_dim1], ldz, &v[v_offset], ldv, &c_b17, &wv[
+ wv_offset], ldwv);
+ dlacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &z__[krow +
+ kwtop * z_dim1], ldz);
+/* L90: */
+ }
+ }
+ }
+
+/* ==== Return the number of deflations ... ==== */
+
+ *nd = jw - *ns;
+
+/* ==== ... and the number of shifts. (Subtracting */
+/* . INFQR from the spike length takes care */
+/* . of the case of a rare QR failure while */
+/* . calculating eigenvalues of the deflation */
+/* . window.) ==== */
+
+ *ns -= infqr;
+
+/* ==== Return optimal workspace. ==== */
+
+ work[1] = (doublereal) lwkopt;
+
+/* ==== End of DLAQR3 ==== */
+
+ return 0;
+} /* dlaqr3_ */
diff --git a/contrib/libs/clapack/dlaqr4.c b/contrib/libs/clapack/dlaqr4.c
new file mode 100644
index 0000000000..165924b868
--- /dev/null
+++ b/contrib/libs/clapack/dlaqr4.c
@@ -0,0 +1,754 @@
+/* dlaqr4.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__13 = 13;
+static integer c__15 = 15;
+static integer c_n1 = -1;
+static integer c__12 = 12;
+static integer c__14 = 14;
+static integer c__16 = 16;
+static logical c_false = FALSE_;
+static integer c__1 = 1;
+static integer c__3 = 3;
+
+/* Subroutine */ int dlaqr4_(logical *wantt, logical *wantz, integer *n,
+ integer *ilo, integer *ihi, doublereal *h__, integer *ldh, doublereal
+ *wr, doublereal *wi, integer *iloz, integer *ihiz, doublereal *z__,
+ integer *ldz, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Local variables */
+ integer i__, k;
+ doublereal aa, bb, cc, dd;
+ integer ld;
+ doublereal cs;
+ integer nh, it, ks, kt;
+ doublereal sn;
+ integer ku, kv, ls, ns;
+ doublereal ss;
+ integer nw, inf, kdu, nho, nve, kwh, nsr, nwr, kwv, ndec, ndfl, kbot,
+ nmin;
+ doublereal swap;
+ integer ktop;
+ doublereal zdum[1] /* was [1][1] */;
+ integer kacc22, itmax, nsmax, nwmax, kwtop;
+ extern /* Subroutine */ int dlaqr2_(logical *, logical *, integer *,
+ integer *, integer *, integer *, doublereal *, integer *, integer
+ *, integer *, doublereal *, integer *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ doublereal *, integer *), dlanv2_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *), dlaqr5_(
+ logical *, logical *, integer *, integer *, integer *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *, doublereal *, integer *);
+ integer nibble;
+ extern /* Subroutine */ int dlahqr_(logical *, logical *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *), dlacpy_(char *, integer *, integer *, doublereal *,
+ integer *, doublereal *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ char jbcmpz[1];
+ integer nwupbd;
+ logical sorted;
+ integer lwkopt;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* This subroutine implements one level of recursion for DLAQR0. */
+/* It is a complete implementation of the small bulge multi-shift */
+/* QR algorithm. It may be called by DLAQR0 and, for large enough */
+/* deflation window size, it may be called by DLAQR3. This */
+/* subroutine is identical to DLAQR0 except that it calls DLAQR2 */
+/* instead of DLAQR3. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAQR4 computes the eigenvalues of a Hessenberg matrix H */
+/* and, optionally, the matrices T and Z from the Schur decomposition */
+/* H = Z T Z**T, where T is an upper quasi-triangular matrix (the */
+/* Schur form), and Z is the orthogonal matrix of Schur vectors. */
+
+/* Optionally Z may be postmultiplied into an input orthogonal */
+/* matrix Q so that this routine can give the Schur factorization */
+/* of a matrix A which has been reduced to the Hessenberg form H */
+/* by the orthogonal matrix Q: A = Q*H*Q**T = (QZ)*T*(QZ)**T. */
+
+/* Arguments */
+/* ========= */
+
+/* WANTT (input) LOGICAL */
+/* = .TRUE. : the full Schur form T is required; */
+/* = .FALSE.: only eigenvalues are required. */
+
+/* WANTZ (input) LOGICAL */
+/* = .TRUE. : the matrix of Schur vectors Z is required; */
+/* = .FALSE.: Schur vectors are not required. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H. N .GE. 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* It is assumed that H is already upper triangular in rows */
+/* and columns 1:ILO-1 and IHI+1:N and, if ILO.GT.1, */
+/* H(ILO,ILO-1) is zero. ILO and IHI are normally set by a */
+/* previous call to DGEBAL, and then passed to DGEHRD when the */
+/* matrix output by DGEBAL is reduced to Hessenberg form. */
+/* Otherwise, ILO and IHI should be set to 1 and N, */
+/* respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. */
+/* If N = 0, then ILO = 1 and IHI = 0. */
+
+/* H (input/output) DOUBLE PRECISION array, dimension (LDH,N) */
+/* On entry, the upper Hessenberg matrix H. */
+/* On exit, if INFO = 0 and WANTT is .TRUE., then H contains */
+/* the upper quasi-triangular matrix T from the Schur */
+/* decomposition (the Schur form); 2-by-2 diagonal blocks */
+/* (corresponding to complex conjugate pairs of eigenvalues) */
+/* are returned in standard form, with H(i,i) = H(i+1,i+1) */
+/* and H(i+1,i)*H(i,i+1).LT.0. If INFO = 0 and WANTT is */
+/* .FALSE., then the contents of H are unspecified on exit. */
+/* (The output value of H when INFO.GT.0 is given under the */
+/* description of INFO below.) */
+
+/* This subroutine may explicitly set H(i,j) = 0 for i.GT.j and */
+/* j = 1, 2, ... ILO-1 or j = IHI+1, IHI+2, ... N. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH .GE. max(1,N). */
+
+/* WR (output) DOUBLE PRECISION array, dimension (IHI) */
+/* WI (output) DOUBLE PRECISION array, dimension (IHI) */
+/* The real and imaginary parts, respectively, of the computed */
+/* eigenvalues of H(ILO:IHI,ILO:IHI) are stored in WR(ILO:IHI) */
+/* and WI(ILO:IHI). If two eigenvalues are computed as a */
+/* complex conjugate pair, they are stored in consecutive */
+/* elements of WR and WI, say the i-th and (i+1)th, with */
+/* WI(i) .GT. 0 and WI(i+1) .LT. 0. If WANTT is .TRUE., then */
+/* the eigenvalues are stored in the same order as on the */
+/* diagonal of the Schur form returned in H, with */
+/* WR(i) = H(i,i) and, if H(i:i+1,i:i+1) is a 2-by-2 diagonal */
+/* block, WI(i) = sqrt(-H(i+1,i)*H(i,i+1)) and */
+/* WI(i+1) = -WI(i). */
+
+/* ILOZ (input) INTEGER */
+/* IHIZ (input) INTEGER */
+/* Specify the rows of Z to which transformations must be */
+/* applied if WANTZ is .TRUE.. */
+/* 1 .LE. ILOZ .LE. ILO; IHI .LE. IHIZ .LE. N. */
+
+/* Z (input/output) DOUBLE PRECISION array, dimension (LDZ,IHI) */
+/* If WANTZ is .FALSE., then Z is not referenced. */
+/* If WANTZ is .TRUE., then Z(ILO:IHI,ILOZ:IHIZ) is */
+/* replaced by Z(ILO:IHI,ILOZ:IHIZ)*U where U is the */
+/* orthogonal Schur factor of H(ILO:IHI,ILO:IHI). */
+/* (The output value of Z when INFO.GT.0 is given under */
+/* the description of INFO below.) */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. if WANTZ is .TRUE. */
+/* then LDZ.GE.MAX(1,IHIZ). Otherwize, LDZ.GE.1. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension LWORK */
+/* On exit, if LWORK = -1, WORK(1) returns an estimate of */
+/* the optimal value for LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK .GE. max(1,N) */
+/* is sufficient, but LWORK typically as large as 6*N may */
+/* be required for optimal performance. A workspace query */
+/* to determine the optimal workspace size is recommended. */
+
+/* If LWORK = -1, then DLAQR4 does a workspace query. */
+/* In this case, DLAQR4 checks the input parameters and */
+/* estimates the optimal workspace size for the given */
+/* values of N, ILO and IHI. The estimate is returned */
+/* in WORK(1). No error message related to LWORK is */
+/* issued by XERBLA. Neither H nor Z are accessed. */
+
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* .GT. 0: if INFO = i, DLAQR4 failed to compute all of */
+/* the eigenvalues. Elements 1:ilo-1 and i+1:n of WR */
+/* and WI contain those eigenvalues which have been */
+/* successfully computed. (Failures are rare.) */
+
+/* If INFO .GT. 0 and WANT is .FALSE., then on exit, */
+/* the remaining unconverged eigenvalues are the eigen- */
+/* values of the upper Hessenberg matrix rows and */
+/* columns ILO through INFO of the final, output */
+/* value of H. */
+
+/* If INFO .GT. 0 and WANTT is .TRUE., then on exit */
+
+/* (*) (initial value of H)*U = U*(final value of H) */
+
+/* where U is an orthogonal matrix. The final */
+/* value of H is upper Hessenberg and quasi-triangular */
+/* in rows and columns INFO+1 through IHI. */
+
+/* If INFO .GT. 0 and WANTZ is .TRUE., then on exit */
+
+/* (final value of Z(ILO:IHI,ILOZ:IHIZ) */
+/* = (initial value of Z(ILO:IHI,ILOZ:IHIZ)*U */
+
+/* where U is the orthogonal matrix in (*) (regard- */
+/* less of the value of WANTT.) */
+
+/* If INFO .GT. 0 and WANTZ is .FALSE., then Z is not */
+/* accessed. */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+/* References: */
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part I: Maintaining Well Focused Shifts, and Level 3 */
+/* Performance, SIAM Journal of Matrix Analysis, volume 23, pages */
+/* 929--947, 2002. */
+
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part II: Aggressive Early Deflation, SIAM Journal */
+/* of Matrix Analysis, volume 23, pages 948--973, 2002. */
+
+/* ================================================================ */
+/* .. Parameters .. */
+
+/* ==== Matrices of order NTINY or smaller must be processed by */
+/* . DLAHQR because of insufficient subdiagonal scratch space. */
+/* . (This is a hard limit.) ==== */
+
+/* ==== Exceptional deflation windows: try to cure rare */
+/* . slow convergence by varying the size of the */
+/* . deflation window after KEXNW iterations. ==== */
+
+/* ==== Exceptional shifts: try to cure rare slow convergence */
+/* . with ad-hoc exceptional shifts every KEXSH iterations. */
+/* . ==== */
+
+/* ==== The constants WILK1 and WILK2 are used to form the */
+/* . exceptional shifts. ==== */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --wr;
+ --wi;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+/* ==== Quick return for N = 0: nothing to do. ==== */
+
+ if (*n == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ if (*n <= 11) {
+
+/* ==== Tiny matrices must use DLAHQR. ==== */
+
+ lwkopt = 1;
+ if (*lwork != -1) {
+ dlahqr_(wantt, wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1], &
+ wi[1], iloz, ihiz, &z__[z_offset], ldz, info);
+ }
+ } else {
+
+/* ==== Use small bulge multi-shift QR with aggressive early */
+/* . deflation on larger-than-tiny matrices. ==== */
+
+/* ==== Hope for the best. ==== */
+
+ *info = 0;
+
+/* ==== Set up job flags for ILAENV. ==== */
+
+ if (*wantt) {
+ *(unsigned char *)jbcmpz = 'S';
+ } else {
+ *(unsigned char *)jbcmpz = 'E';
+ }
+ if (*wantz) {
+ *(unsigned char *)&jbcmpz[1] = 'V';
+ } else {
+ *(unsigned char *)&jbcmpz[1] = 'N';
+ }
+
+/* ==== NWR = recommended deflation window size. At this */
+/* . point, N .GT. NTINY = 11, so there is enough */
+/* . subdiagonal workspace for NWR.GE.2 as required. */
+/* . (In fact, there is enough subdiagonal space for */
+/* . NWR.GE.3.) ==== */
+
+ nwr = ilaenv_(&c__13, "DLAQR4", jbcmpz, n, ilo, ihi, lwork);
+ nwr = max(2,nwr);
+/* Computing MIN */
+ i__1 = *ihi - *ilo + 1, i__2 = (*n - 1) / 3, i__1 = min(i__1,i__2);
+ nwr = min(i__1,nwr);
+
+/* ==== NSR = recommended number of simultaneous shifts. */
+/* . At this point N .GT. NTINY = 11, so there is at */
+/* . enough subdiagonal workspace for NSR to be even */
+/* . and greater than or equal to two as required. ==== */
+
+ nsr = ilaenv_(&c__15, "DLAQR4", jbcmpz, n, ilo, ihi, lwork);
+/* Computing MIN */
+ i__1 = nsr, i__2 = (*n + 6) / 9, i__1 = min(i__1,i__2), i__2 = *ihi -
+ *ilo;
+ nsr = min(i__1,i__2);
+/* Computing MAX */
+ i__1 = 2, i__2 = nsr - nsr % 2;
+ nsr = max(i__1,i__2);
+
+/* ==== Estimate optimal workspace ==== */
+
+/* ==== Workspace query call to DLAQR2 ==== */
+
+ i__1 = nwr + 1;
+ dlaqr2_(wantt, wantz, n, ilo, ihi, &i__1, &h__[h_offset], ldh, iloz,
+ ihiz, &z__[z_offset], ldz, &ls, &ld, &wr[1], &wi[1], &h__[
+ h_offset], ldh, n, &h__[h_offset], ldh, n, &h__[h_offset],
+ ldh, &work[1], &c_n1);
+
+/* ==== Optimal workspace = MAX(DLAQR5, DLAQR2) ==== */
+
+/* Computing MAX */
+ i__1 = nsr * 3 / 2, i__2 = (integer) work[1];
+ lwkopt = max(i__1,i__2);
+
+/* ==== Quick return in case of workspace query. ==== */
+
+ if (*lwork == -1) {
+ work[1] = (doublereal) lwkopt;
+ return 0;
+ }
+
+/* ==== DLAHQR/DLAQR0 crossover point ==== */
+
+ nmin = ilaenv_(&c__12, "DLAQR4", jbcmpz, n, ilo, ihi, lwork);
+ nmin = max(11,nmin);
+
+/* ==== Nibble crossover point ==== */
+
+ nibble = ilaenv_(&c__14, "DLAQR4", jbcmpz, n, ilo, ihi, lwork);
+ nibble = max(0,nibble);
+
+/* ==== Accumulate reflections during ttswp? Use block */
+/* . 2-by-2 structure during matrix-matrix multiply? ==== */
+
+ kacc22 = ilaenv_(&c__16, "DLAQR4", jbcmpz, n, ilo, ihi, lwork);
+ kacc22 = max(0,kacc22);
+ kacc22 = min(2,kacc22);
+
+/* ==== NWMAX = the largest possible deflation window for */
+/* . which there is sufficient workspace. ==== */
+
+/* Computing MIN */
+ i__1 = (*n - 1) / 3, i__2 = *lwork / 2;
+ nwmax = min(i__1,i__2);
+ nw = nwmax;
+
+/* ==== NSMAX = the Largest number of simultaneous shifts */
+/* . for which there is sufficient workspace. ==== */
+
+/* Computing MIN */
+ i__1 = (*n + 6) / 9, i__2 = (*lwork << 1) / 3;
+ nsmax = min(i__1,i__2);
+ nsmax -= nsmax % 2;
+
+/* ==== NDFL: an iteration count restarted at deflation. ==== */
+
+ ndfl = 1;
+
+/* ==== ITMAX = iteration limit ==== */
+
+/* Computing MAX */
+ i__1 = 10, i__2 = *ihi - *ilo + 1;
+ itmax = max(i__1,i__2) * 30;
+
+/* ==== Last row and column in the active block ==== */
+
+ kbot = *ihi;
+
+/* ==== Main Loop ==== */
+
+ i__1 = itmax;
+ for (it = 1; it <= i__1; ++it) {
+
+/* ==== Done when KBOT falls below ILO ==== */
+
+ if (kbot < *ilo) {
+ goto L90;
+ }
+
+/* ==== Locate active block ==== */
+
+ i__2 = *ilo + 1;
+ for (k = kbot; k >= i__2; --k) {
+ if (h__[k + (k - 1) * h_dim1] == 0.) {
+ goto L20;
+ }
+/* L10: */
+ }
+ k = *ilo;
+L20:
+ ktop = k;
+
+/* ==== Select deflation window size: */
+/* . Typical Case: */
+/* . If possible and advisable, nibble the entire */
+/* . active block. If not, use size MIN(NWR,NWMAX) */
+/* . or MIN(NWR+1,NWMAX) depending upon which has */
+/* . the smaller corresponding subdiagonal entry */
+/* . (a heuristic). */
+/* . */
+/* . Exceptional Case: */
+/* . If there have been no deflations in KEXNW or */
+/* . more iterations, then vary the deflation window */
+/* . size. At first, because, larger windows are, */
+/* . in general, more powerful than smaller ones, */
+/* . rapidly increase the window to the maximum possible. */
+/* . Then, gradually reduce the window size. ==== */
+
+ nh = kbot - ktop + 1;
+ nwupbd = min(nh,nwmax);
+ if (ndfl < 5) {
+ nw = min(nwupbd,nwr);
+ } else {
+/* Computing MIN */
+ i__2 = nwupbd, i__3 = nw << 1;
+ nw = min(i__2,i__3);
+ }
+ if (nw < nwmax) {
+ if (nw >= nh - 1) {
+ nw = nh;
+ } else {
+ kwtop = kbot - nw + 1;
+ if ((d__1 = h__[kwtop + (kwtop - 1) * h_dim1], abs(d__1))
+ > (d__2 = h__[kwtop - 1 + (kwtop - 2) * h_dim1],
+ abs(d__2))) {
+ ++nw;
+ }
+ }
+ }
+ if (ndfl < 5) {
+ ndec = -1;
+ } else if (ndec >= 0 || nw >= nwupbd) {
+ ++ndec;
+ if (nw - ndec < 2) {
+ ndec = 0;
+ }
+ nw -= ndec;
+ }
+
+/* ==== Aggressive early deflation: */
+/* . split workspace under the subdiagonal into */
+/* . - an nw-by-nw work array V in the lower */
+/* . left-hand-corner, */
+/* . - an NW-by-at-least-NW-but-more-is-better */
+/* . (NW-by-NHO) horizontal work array along */
+/* . the bottom edge, */
+/* . - an at-least-NW-but-more-is-better (NHV-by-NW) */
+/* . vertical work array along the left-hand-edge. */
+/* . ==== */
+
+ kv = *n - nw + 1;
+ kt = nw + 1;
+ nho = *n - nw - 1 - kt + 1;
+ kwv = nw + 2;
+ nve = *n - nw - kwv + 1;
+
+/* ==== Aggressive early deflation ==== */
+
+ dlaqr2_(wantt, wantz, n, &ktop, &kbot, &nw, &h__[h_offset], ldh,
+ iloz, ihiz, &z__[z_offset], ldz, &ls, &ld, &wr[1], &wi[1],
+ &h__[kv + h_dim1], ldh, &nho, &h__[kv + kt * h_dim1],
+ ldh, &nve, &h__[kwv + h_dim1], ldh, &work[1], lwork);
+
+/* ==== Adjust KBOT accounting for new deflations. ==== */
+
+ kbot -= ld;
+
+/* ==== KS points to the shifts. ==== */
+
+ ks = kbot - ls + 1;
+
+/* ==== Skip an expensive QR sweep if there is a (partly */
+/* . heuristic) reason to expect that many eigenvalues */
+/* . will deflate without it. Here, the QR sweep is */
+/* . skipped if many eigenvalues have just been deflated */
+/* . or if the remaining active block is small. */
+
+ if (ld == 0 || ld * 100 <= nw * nibble && kbot - ktop + 1 > min(
+ nmin,nwmax)) {
+
+/* ==== NS = nominal number of simultaneous shifts. */
+/* . This may be lowered (slightly) if DLAQR2 */
+/* . did not provide that many shifts. ==== */
+
+/* Computing MIN */
+/* Computing MAX */
+ i__4 = 2, i__5 = kbot - ktop;
+ i__2 = min(nsmax,nsr), i__3 = max(i__4,i__5);
+ ns = min(i__2,i__3);
+ ns -= ns % 2;
+
+/* ==== If there have been no deflations */
+/* . in a multiple of KEXSH iterations, */
+/* . then try exceptional shifts. */
+/* . Otherwise use shifts provided by */
+/* . DLAQR2 above or from the eigenvalues */
+/* . of a trailing principal submatrix. ==== */
+
+ if (ndfl % 6 == 0) {
+ ks = kbot - ns + 1;
+/* Computing MAX */
+ i__3 = ks + 1, i__4 = ktop + 2;
+ i__2 = max(i__3,i__4);
+ for (i__ = kbot; i__ >= i__2; i__ += -2) {
+ ss = (d__1 = h__[i__ + (i__ - 1) * h_dim1], abs(d__1))
+ + (d__2 = h__[i__ - 1 + (i__ - 2) * h_dim1],
+ abs(d__2));
+ aa = ss * .75 + h__[i__ + i__ * h_dim1];
+ bb = ss;
+ cc = ss * -.4375;
+ dd = aa;
+ dlanv2_(&aa, &bb, &cc, &dd, &wr[i__ - 1], &wi[i__ - 1]
+, &wr[i__], &wi[i__], &cs, &sn);
+/* L30: */
+ }
+ if (ks == ktop) {
+ wr[ks + 1] = h__[ks + 1 + (ks + 1) * h_dim1];
+ wi[ks + 1] = 0.;
+ wr[ks] = wr[ks + 1];
+ wi[ks] = wi[ks + 1];
+ }
+ } else {
+
+/* ==== Got NS/2 or fewer shifts? Use DLAHQR */
+/* . on a trailing principal submatrix to */
+/* . get more. (Since NS.LE.NSMAX.LE.(N+6)/9, */
+/* . there is enough space below the subdiagonal */
+/* . to fit an NS-by-NS scratch array.) ==== */
+
+ if (kbot - ks + 1 <= ns / 2) {
+ ks = kbot - ns + 1;
+ kt = *n - ns + 1;
+ dlacpy_("A", &ns, &ns, &h__[ks + ks * h_dim1], ldh, &
+ h__[kt + h_dim1], ldh);
+ dlahqr_(&c_false, &c_false, &ns, &c__1, &ns, &h__[kt
+ + h_dim1], ldh, &wr[ks], &wi[ks], &c__1, &
+ c__1, zdum, &c__1, &inf);
+ ks += inf;
+
+/* ==== In case of a rare QR failure use */
+/* . eigenvalues of the trailing 2-by-2 */
+/* . principal submatrix. ==== */
+
+ if (ks >= kbot) {
+ aa = h__[kbot - 1 + (kbot - 1) * h_dim1];
+ cc = h__[kbot + (kbot - 1) * h_dim1];
+ bb = h__[kbot - 1 + kbot * h_dim1];
+ dd = h__[kbot + kbot * h_dim1];
+ dlanv2_(&aa, &bb, &cc, &dd, &wr[kbot - 1], &wi[
+ kbot - 1], &wr[kbot], &wi[kbot], &cs, &sn)
+ ;
+ ks = kbot - 1;
+ }
+ }
+
+ if (kbot - ks + 1 > ns) {
+
+/* ==== Sort the shifts (Helps a little) */
+/* . Bubble sort keeps complex conjugate */
+/* . pairs together. ==== */
+
+ sorted = FALSE_;
+ i__2 = ks + 1;
+ for (k = kbot; k >= i__2; --k) {
+ if (sorted) {
+ goto L60;
+ }
+ sorted = TRUE_;
+ i__3 = k - 1;
+ for (i__ = ks; i__ <= i__3; ++i__) {
+ if ((d__1 = wr[i__], abs(d__1)) + (d__2 = wi[
+ i__], abs(d__2)) < (d__3 = wr[i__ + 1]
+ , abs(d__3)) + (d__4 = wi[i__ + 1],
+ abs(d__4))) {
+ sorted = FALSE_;
+
+ swap = wr[i__];
+ wr[i__] = wr[i__ + 1];
+ wr[i__ + 1] = swap;
+
+ swap = wi[i__];
+ wi[i__] = wi[i__ + 1];
+ wi[i__ + 1] = swap;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+L60:
+ ;
+ }
+
+/* ==== Shuffle shifts into pairs of real shifts */
+/* . and pairs of complex conjugate shifts */
+/* . assuming complex conjugate shifts are */
+/* . already adjacent to one another. (Yes, */
+/* . they are.) ==== */
+
+ i__2 = ks + 2;
+ for (i__ = kbot; i__ >= i__2; i__ += -2) {
+ if (wi[i__] != -wi[i__ - 1]) {
+
+ swap = wr[i__];
+ wr[i__] = wr[i__ - 1];
+ wr[i__ - 1] = wr[i__ - 2];
+ wr[i__ - 2] = swap;
+
+ swap = wi[i__];
+ wi[i__] = wi[i__ - 1];
+ wi[i__ - 1] = wi[i__ - 2];
+ wi[i__ - 2] = swap;
+ }
+/* L70: */
+ }
+ }
+
+/* ==== If there are only two shifts and both are */
+/* . real, then use only one. ==== */
+
+ if (kbot - ks + 1 == 2) {
+ if (wi[kbot] == 0.) {
+ if ((d__1 = wr[kbot] - h__[kbot + kbot * h_dim1], abs(
+ d__1)) < (d__2 = wr[kbot - 1] - h__[kbot +
+ kbot * h_dim1], abs(d__2))) {
+ wr[kbot - 1] = wr[kbot];
+ } else {
+ wr[kbot] = wr[kbot - 1];
+ }
+ }
+ }
+
+/* ==== Use up to NS of the the smallest magnatiude */
+/* . shifts. If there aren't NS shifts available, */
+/* . then use them all, possibly dropping one to */
+/* . make the number of shifts even. ==== */
+
+/* Computing MIN */
+ i__2 = ns, i__3 = kbot - ks + 1;
+ ns = min(i__2,i__3);
+ ns -= ns % 2;
+ ks = kbot - ns + 1;
+
+/* ==== Small-bulge multi-shift QR sweep: */
+/* . split workspace under the subdiagonal into */
+/* . - a KDU-by-KDU work array U in the lower */
+/* . left-hand-corner, */
+/* . - a KDU-by-at-least-KDU-but-more-is-better */
+/* . (KDU-by-NHo) horizontal work array WH along */
+/* . the bottom edge, */
+/* . - and an at-least-KDU-but-more-is-better-by-KDU */
+/* . (NVE-by-KDU) vertical work WV arrow along */
+/* . the left-hand-edge. ==== */
+
+ kdu = ns * 3 - 3;
+ ku = *n - kdu + 1;
+ kwh = kdu + 1;
+ nho = *n - kdu - 3 - (kdu + 1) + 1;
+ kwv = kdu + 4;
+ nve = *n - kdu - kwv + 1;
+
+/* ==== Small-bulge multi-shift QR sweep ==== */
+
+ dlaqr5_(wantt, wantz, &kacc22, n, &ktop, &kbot, &ns, &wr[ks],
+ &wi[ks], &h__[h_offset], ldh, iloz, ihiz, &z__[
+ z_offset], ldz, &work[1], &c__3, &h__[ku + h_dim1],
+ ldh, &nve, &h__[kwv + h_dim1], ldh, &nho, &h__[ku +
+ kwh * h_dim1], ldh);
+ }
+
+/* ==== Note progress (or the lack of it). ==== */
+
+ if (ld > 0) {
+ ndfl = 1;
+ } else {
+ ++ndfl;
+ }
+
+/* ==== End of main loop ==== */
+/* L80: */
+ }
+
+/* ==== Iteration limit exceeded. Set INFO to show where */
+/* . the problem occurred and exit. ==== */
+
+ *info = kbot;
+L90:
+ ;
+ }
+
+/* ==== Return the optimal value of LWORK. ==== */
+
+ work[1] = (doublereal) lwkopt;
+
+/* ==== End of DLAQR4 ==== */
+
+ return 0;
+} /* dlaqr4_ */
diff --git a/contrib/libs/clapack/dlaqr5.c b/contrib/libs/clapack/dlaqr5.c
new file mode 100644
index 0000000000..ee5f7a1bbb
--- /dev/null
+++ b/contrib/libs/clapack/dlaqr5.c
@@ -0,0 +1,1025 @@
+/* dlaqr5.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b7 = 0.;
+static doublereal c_b8 = 1.;
+static integer c__3 = 3;
+static integer c__1 = 1;
+static integer c__2 = 2;
+
+/* Subroutine */ int dlaqr5_(logical *wantt, logical *wantz, integer *kacc22,
+ integer *n, integer *ktop, integer *kbot, integer *nshfts, doublereal
+ *sr, doublereal *si, doublereal *h__, integer *ldh, integer *iloz,
+ integer *ihiz, doublereal *z__, integer *ldz, doublereal *v, integer *
+ ldv, doublereal *u, integer *ldu, integer *nv, doublereal *wv,
+ integer *ldwv, integer *nh, doublereal *wh, integer *ldwh)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, u_dim1, u_offset, v_dim1, v_offset, wh_dim1,
+ wh_offset, wv_dim1, wv_offset, z_dim1, z_offset, i__1, i__2, i__3,
+ i__4, i__5, i__6, i__7;
+ doublereal d__1, d__2, d__3, d__4, d__5;
+
+ /* Local variables */
+ integer i__, j, k, m, i2, j2, i4, j4, k1;
+ doublereal h11, h12, h21, h22;
+ integer m22, ns, nu;
+ doublereal vt[3], scl;
+ integer kdu, kms;
+ doublereal ulp;
+ integer knz, kzs;
+ doublereal tst1, tst2, beta;
+ logical blk22, bmp22;
+ integer mend, jcol, jlen, jbot, mbot;
+ doublereal swap;
+ integer jtop, jrow, mtop;
+ doublereal alpha;
+ logical accum;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ integer ndcol, incol, krcol, nbmps;
+ extern /* Subroutine */ int dtrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), dlaqr1_(
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *), dlabad_(doublereal *,
+ doublereal *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dlarfg_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *), dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *);
+ doublereal safmin;
+ extern /* Subroutine */ int dlaset_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *);
+ doublereal safmax, refsum;
+ integer mstart;
+ doublereal smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* This auxiliary subroutine called by DLAQR0 performs a */
+/* single small-bulge multi-shift QR sweep. */
+
+/* WANTT (input) logical scalar */
+/* WANTT = .true. if the quasi-triangular Schur factor */
+/* is being computed. WANTT is set to .false. otherwise. */
+
+/* WANTZ (input) logical scalar */
+/* WANTZ = .true. if the orthogonal Schur factor is being */
+/* computed. WANTZ is set to .false. otherwise. */
+
+/* KACC22 (input) integer with value 0, 1, or 2. */
+/* Specifies the computation mode of far-from-diagonal */
+/* orthogonal updates. */
+/* = 0: DLAQR5 does not accumulate reflections and does not */
+/* use matrix-matrix multiply to update far-from-diagonal */
+/* matrix entries. */
+/* = 1: DLAQR5 accumulates reflections and uses matrix-matrix */
+/* multiply to update the far-from-diagonal matrix entries. */
+/* = 2: DLAQR5 accumulates reflections, uses matrix-matrix */
+/* multiply to update the far-from-diagonal matrix entries, */
+/* and takes advantage of 2-by-2 block structure during */
+/* matrix multiplies. */
+
+/* N (input) integer scalar */
+/* N is the order of the Hessenberg matrix H upon which this */
+/* subroutine operates. */
+
+/* KTOP (input) integer scalar */
+/* KBOT (input) integer scalar */
+/* These are the first and last rows and columns of an */
+/* isolated diagonal block upon which the QR sweep is to be */
+/* applied. It is assumed without a check that */
+/* either KTOP = 1 or H(KTOP,KTOP-1) = 0 */
+/* and */
+/* either KBOT = N or H(KBOT+1,KBOT) = 0. */
+
+/* NSHFTS (input) integer scalar */
+/* NSHFTS gives the number of simultaneous shifts. NSHFTS */
+/* must be positive and even. */
+
+/* SR (input/output) DOUBLE PRECISION array of size (NSHFTS) */
+/* SI (input/output) DOUBLE PRECISION array of size (NSHFTS) */
+/* SR contains the real parts and SI contains the imaginary */
+/* parts of the NSHFTS shifts of origin that define the */
+/* multi-shift QR sweep. On output SR and SI may be */
+/* reordered. */
+
+/* H (input/output) DOUBLE PRECISION array of size (LDH,N) */
+/* On input H contains a Hessenberg matrix. On output a */
+/* multi-shift QR sweep with shifts SR(J)+i*SI(J) is applied */
+/* to the isolated diagonal block in rows and columns KTOP */
+/* through KBOT. */
+
+/* LDH (input) integer scalar */
+/* LDH is the leading dimension of H just as declared in the */
+/* calling procedure. LDH.GE.MAX(1,N). */
+
+/* ILOZ (input) INTEGER */
+/* IHIZ (input) INTEGER */
+/* Specify the rows of Z to which transformations must be */
+/* applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N */
+
+/* Z (input/output) DOUBLE PRECISION array of size (LDZ,IHI) */
+/* If WANTZ = .TRUE., then the QR Sweep orthogonal */
+/* similarity transformation is accumulated into */
+/* Z(ILOZ:IHIZ,ILO:IHI) from the right. */
+/* If WANTZ = .FALSE., then Z is unreferenced. */
+
+/* LDZ (input) integer scalar */
+/* LDA is the leading dimension of Z just as declared in */
+/* the calling procedure. LDZ.GE.N. */
+
+/* V (workspace) DOUBLE PRECISION array of size (LDV,NSHFTS/2) */
+
+/* LDV (input) integer scalar */
+/* LDV is the leading dimension of V as declared in the */
+/* calling procedure. LDV.GE.3. */
+
+/* U (workspace) DOUBLE PRECISION array of size */
+/* (LDU,3*NSHFTS-3) */
+
+/* LDU (input) integer scalar */
+/* LDU is the leading dimension of U just as declared in the */
+/* in the calling subroutine. LDU.GE.3*NSHFTS-3. */
+
+/* NH (input) integer scalar */
+/* NH is the number of columns in array WH available for */
+/* workspace. NH.GE.1. */
+
+/* WH (workspace) DOUBLE PRECISION array of size (LDWH,NH) */
+
+/* LDWH (input) integer scalar */
+/* Leading dimension of WH just as declared in the */
+/* calling procedure. LDWH.GE.3*NSHFTS-3. */
+
+/* NV (input) integer scalar */
+/* NV is the number of rows in WV agailable for workspace. */
+/* NV.GE.1. */
+
+/* WV (workspace) DOUBLE PRECISION array of size */
+/* (LDWV,3*NSHFTS-3) */
+
+/* LDWV (input) integer scalar */
+/* LDWV is the leading dimension of WV as declared in the */
+/* in the calling subroutine. LDWV.GE.NV. */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+/* Reference: */
+
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part I: Maintaining Well Focused Shifts, and */
+/* Level 3 Performance, SIAM Journal of Matrix Analysis, */
+/* volume 23, pages 929--947, 2002. */
+
+/* ================================================================ */
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* ==== If there are no shifts, then there is nothing to do. ==== */
+
+ /* Parameter adjustments */
+ --sr;
+ --si;
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ wv_dim1 = *ldwv;
+ wv_offset = 1 + wv_dim1;
+ wv -= wv_offset;
+ wh_dim1 = *ldwh;
+ wh_offset = 1 + wh_dim1;
+ wh -= wh_offset;
+
+ /* Function Body */
+ if (*nshfts < 2) {
+ return 0;
+ }
+
+/* ==== If the active block is empty or 1-by-1, then there */
+/* . is nothing to do. ==== */
+
+ if (*ktop >= *kbot) {
+ return 0;
+ }
+
+/* ==== Shuffle shifts into pairs of real shifts and pairs */
+/* . of complex conjugate shifts assuming complex */
+/* . conjugate shifts are already adjacent to one */
+/* . another. ==== */
+
+ i__1 = *nshfts - 2;
+ for (i__ = 1; i__ <= i__1; i__ += 2) {
+ if (si[i__] != -si[i__ + 1]) {
+
+ swap = sr[i__];
+ sr[i__] = sr[i__ + 1];
+ sr[i__ + 1] = sr[i__ + 2];
+ sr[i__ + 2] = swap;
+
+ swap = si[i__];
+ si[i__] = si[i__ + 1];
+ si[i__ + 1] = si[i__ + 2];
+ si[i__ + 2] = swap;
+ }
+/* L10: */
+ }
+
+/* ==== NSHFTS is supposed to be even, but if it is odd, */
+/* . then simply reduce it by one. The shuffle above */
+/* . ensures that the dropped shift is real and that */
+/* . the remaining shifts are paired. ==== */
+
+ ns = *nshfts - *nshfts % 2;
+
+/* ==== Machine constants for deflation ==== */
+
+ safmin = dlamch_("SAFE MINIMUM");
+ safmax = 1. / safmin;
+ dlabad_(&safmin, &safmax);
+ ulp = dlamch_("PRECISION");
+ smlnum = safmin * ((doublereal) (*n) / ulp);
+
+/* ==== Use accumulated reflections to update far-from-diagonal */
+/* . entries ? ==== */
+
+ accum = *kacc22 == 1 || *kacc22 == 2;
+
+/* ==== If so, exploit the 2-by-2 block structure? ==== */
+
+ blk22 = ns > 2 && *kacc22 == 2;
+
+/* ==== clear trash ==== */
+
+ if (*ktop + 2 <= *kbot) {
+ h__[*ktop + 2 + *ktop * h_dim1] = 0.;
+ }
+
+/* ==== NBMPS = number of 2-shift bulges in the chain ==== */
+
+ nbmps = ns / 2;
+
+/* ==== KDU = width of slab ==== */
+
+ kdu = nbmps * 6 - 3;
+
+/* ==== Create and chase chains of NBMPS bulges ==== */
+
+ i__1 = *kbot - 2;
+ i__2 = nbmps * 3 - 2;
+ for (incol = (1 - nbmps) * 3 + *ktop - 1; i__2 < 0 ? incol >= i__1 :
+ incol <= i__1; incol += i__2) {
+ ndcol = incol + kdu;
+ if (accum) {
+ dlaset_("ALL", &kdu, &kdu, &c_b7, &c_b8, &u[u_offset], ldu);
+ }
+
+/* ==== Near-the-diagonal bulge chase. The following loop */
+/* . performs the near-the-diagonal part of a small bulge */
+/* . multi-shift QR sweep. Each 6*NBMPS-2 column diagonal */
+/* . chunk extends from column INCOL to column NDCOL */
+/* . (including both column INCOL and column NDCOL). The */
+/* . following loop chases a 3*NBMPS column long chain of */
+/* . NBMPS bulges 3*NBMPS-2 columns to the right. (INCOL */
+/* . may be less than KTOP and and NDCOL may be greater than */
+/* . KBOT indicating phantom columns from which to chase */
+/* . bulges before they are actually introduced or to which */
+/* . to chase bulges beyond column KBOT.) ==== */
+
+/* Computing MIN */
+ i__4 = incol + nbmps * 3 - 3, i__5 = *kbot - 2;
+ i__3 = min(i__4,i__5);
+ for (krcol = incol; krcol <= i__3; ++krcol) {
+
+/* ==== Bulges number MTOP to MBOT are active double implicit */
+/* . shift bulges. There may or may not also be small */
+/* . 2-by-2 bulge, if there is room. The inactive bulges */
+/* . (if any) must wait until the active bulges have moved */
+/* . down the diagonal to make room. The phantom matrix */
+/* . paradigm described above helps keep track. ==== */
+
+/* Computing MAX */
+ i__4 = 1, i__5 = (*ktop - 1 - krcol + 2) / 3 + 1;
+ mtop = max(i__4,i__5);
+/* Computing MIN */
+ i__4 = nbmps, i__5 = (*kbot - krcol) / 3;
+ mbot = min(i__4,i__5);
+ m22 = mbot + 1;
+ bmp22 = mbot < nbmps && krcol + (m22 - 1) * 3 == *kbot - 2;
+
+/* ==== Generate reflections to chase the chain right */
+/* . one column. (The minimum value of K is KTOP-1.) ==== */
+
+ i__4 = mbot;
+ for (m = mtop; m <= i__4; ++m) {
+ k = krcol + (m - 1) * 3;
+ if (k == *ktop - 1) {
+ dlaqr1_(&c__3, &h__[*ktop + *ktop * h_dim1], ldh, &sr[(m
+ << 1) - 1], &si[(m << 1) - 1], &sr[m * 2], &si[m *
+ 2], &v[m * v_dim1 + 1]);
+ alpha = v[m * v_dim1 + 1];
+ dlarfg_(&c__3, &alpha, &v[m * v_dim1 + 2], &c__1, &v[m *
+ v_dim1 + 1]);
+ } else {
+ beta = h__[k + 1 + k * h_dim1];
+ v[m * v_dim1 + 2] = h__[k + 2 + k * h_dim1];
+ v[m * v_dim1 + 3] = h__[k + 3 + k * h_dim1];
+ dlarfg_(&c__3, &beta, &v[m * v_dim1 + 2], &c__1, &v[m *
+ v_dim1 + 1]);
+
+/* ==== A Bulge may collapse because of vigilant */
+/* . deflation or destructive underflow. In the */
+/* . underflow case, try the two-small-subdiagonals */
+/* . trick to try to reinflate the bulge. ==== */
+
+ if (h__[k + 3 + k * h_dim1] != 0. || h__[k + 3 + (k + 1) *
+ h_dim1] != 0. || h__[k + 3 + (k + 2) * h_dim1] ==
+ 0.) {
+
+/* ==== Typical case: not collapsed (yet). ==== */
+
+ h__[k + 1 + k * h_dim1] = beta;
+ h__[k + 2 + k * h_dim1] = 0.;
+ h__[k + 3 + k * h_dim1] = 0.;
+ } else {
+
+/* ==== Atypical case: collapsed. Attempt to */
+/* . reintroduce ignoring H(K+1,K) and H(K+2,K). */
+/* . If the fill resulting from the new */
+/* . reflector is too large, then abandon it. */
+/* . Otherwise, use the new one. ==== */
+
+ dlaqr1_(&c__3, &h__[k + 1 + (k + 1) * h_dim1], ldh, &
+ sr[(m << 1) - 1], &si[(m << 1) - 1], &sr[m *
+ 2], &si[m * 2], vt);
+ alpha = vt[0];
+ dlarfg_(&c__3, &alpha, &vt[1], &c__1, vt);
+ refsum = vt[0] * (h__[k + 1 + k * h_dim1] + vt[1] *
+ h__[k + 2 + k * h_dim1]);
+
+ if ((d__1 = h__[k + 2 + k * h_dim1] - refsum * vt[1],
+ abs(d__1)) + (d__2 = refsum * vt[2], abs(d__2)
+ ) > ulp * ((d__3 = h__[k + k * h_dim1], abs(
+ d__3)) + (d__4 = h__[k + 1 + (k + 1) * h_dim1]
+ , abs(d__4)) + (d__5 = h__[k + 2 + (k + 2) *
+ h_dim1], abs(d__5)))) {
+
+/* ==== Starting a new bulge here would */
+/* . create non-negligible fill. Use */
+/* . the old one with trepidation. ==== */
+
+ h__[k + 1 + k * h_dim1] = beta;
+ h__[k + 2 + k * h_dim1] = 0.;
+ h__[k + 3 + k * h_dim1] = 0.;
+ } else {
+
+/* ==== Stating a new bulge here would */
+/* . create only negligible fill. */
+/* . Replace the old reflector with */
+/* . the new one. ==== */
+
+ h__[k + 1 + k * h_dim1] -= refsum;
+ h__[k + 2 + k * h_dim1] = 0.;
+ h__[k + 3 + k * h_dim1] = 0.;
+ v[m * v_dim1 + 1] = vt[0];
+ v[m * v_dim1 + 2] = vt[1];
+ v[m * v_dim1 + 3] = vt[2];
+ }
+ }
+ }
+/* L20: */
+ }
+
+/* ==== Generate a 2-by-2 reflection, if needed. ==== */
+
+ k = krcol + (m22 - 1) * 3;
+ if (bmp22) {
+ if (k == *ktop - 1) {
+ dlaqr1_(&c__2, &h__[k + 1 + (k + 1) * h_dim1], ldh, &sr[(
+ m22 << 1) - 1], &si[(m22 << 1) - 1], &sr[m22 * 2],
+ &si[m22 * 2], &v[m22 * v_dim1 + 1]);
+ beta = v[m22 * v_dim1 + 1];
+ dlarfg_(&c__2, &beta, &v[m22 * v_dim1 + 2], &c__1, &v[m22
+ * v_dim1 + 1]);
+ } else {
+ beta = h__[k + 1 + k * h_dim1];
+ v[m22 * v_dim1 + 2] = h__[k + 2 + k * h_dim1];
+ dlarfg_(&c__2, &beta, &v[m22 * v_dim1 + 2], &c__1, &v[m22
+ * v_dim1 + 1]);
+ h__[k + 1 + k * h_dim1] = beta;
+ h__[k + 2 + k * h_dim1] = 0.;
+ }
+ }
+
+/* ==== Multiply H by reflections from the left ==== */
+
+ if (accum) {
+ jbot = min(ndcol,*kbot);
+ } else if (*wantt) {
+ jbot = *n;
+ } else {
+ jbot = *kbot;
+ }
+ i__4 = jbot;
+ for (j = max(*ktop,krcol); j <= i__4; ++j) {
+/* Computing MIN */
+ i__5 = mbot, i__6 = (j - krcol + 2) / 3;
+ mend = min(i__5,i__6);
+ i__5 = mend;
+ for (m = mtop; m <= i__5; ++m) {
+ k = krcol + (m - 1) * 3;
+ refsum = v[m * v_dim1 + 1] * (h__[k + 1 + j * h_dim1] + v[
+ m * v_dim1 + 2] * h__[k + 2 + j * h_dim1] + v[m *
+ v_dim1 + 3] * h__[k + 3 + j * h_dim1]);
+ h__[k + 1 + j * h_dim1] -= refsum;
+ h__[k + 2 + j * h_dim1] -= refsum * v[m * v_dim1 + 2];
+ h__[k + 3 + j * h_dim1] -= refsum * v[m * v_dim1 + 3];
+/* L30: */
+ }
+/* L40: */
+ }
+ if (bmp22) {
+ k = krcol + (m22 - 1) * 3;
+/* Computing MAX */
+ i__4 = k + 1;
+ i__5 = jbot;
+ for (j = max(i__4,*ktop); j <= i__5; ++j) {
+ refsum = v[m22 * v_dim1 + 1] * (h__[k + 1 + j * h_dim1] +
+ v[m22 * v_dim1 + 2] * h__[k + 2 + j * h_dim1]);
+ h__[k + 1 + j * h_dim1] -= refsum;
+ h__[k + 2 + j * h_dim1] -= refsum * v[m22 * v_dim1 + 2];
+/* L50: */
+ }
+ }
+
+/* ==== Multiply H by reflections from the right. */
+/* . Delay filling in the last row until the */
+/* . vigilant deflation check is complete. ==== */
+
+ if (accum) {
+ jtop = max(*ktop,incol);
+ } else if (*wantt) {
+ jtop = 1;
+ } else {
+ jtop = *ktop;
+ }
+ i__5 = mbot;
+ for (m = mtop; m <= i__5; ++m) {
+ if (v[m * v_dim1 + 1] != 0.) {
+ k = krcol + (m - 1) * 3;
+/* Computing MIN */
+ i__6 = *kbot, i__7 = k + 3;
+ i__4 = min(i__6,i__7);
+ for (j = jtop; j <= i__4; ++j) {
+ refsum = v[m * v_dim1 + 1] * (h__[j + (k + 1) *
+ h_dim1] + v[m * v_dim1 + 2] * h__[j + (k + 2)
+ * h_dim1] + v[m * v_dim1 + 3] * h__[j + (k +
+ 3) * h_dim1]);
+ h__[j + (k + 1) * h_dim1] -= refsum;
+ h__[j + (k + 2) * h_dim1] -= refsum * v[m * v_dim1 +
+ 2];
+ h__[j + (k + 3) * h_dim1] -= refsum * v[m * v_dim1 +
+ 3];
+/* L60: */
+ }
+
+ if (accum) {
+
+/* ==== Accumulate U. (If necessary, update Z later */
+/* . with with an efficient matrix-matrix */
+/* . multiply.) ==== */
+
+ kms = k - incol;
+/* Computing MAX */
+ i__4 = 1, i__6 = *ktop - incol;
+ i__7 = kdu;
+ for (j = max(i__4,i__6); j <= i__7; ++j) {
+ refsum = v[m * v_dim1 + 1] * (u[j + (kms + 1) *
+ u_dim1] + v[m * v_dim1 + 2] * u[j + (kms
+ + 2) * u_dim1] + v[m * v_dim1 + 3] * u[j
+ + (kms + 3) * u_dim1]);
+ u[j + (kms + 1) * u_dim1] -= refsum;
+ u[j + (kms + 2) * u_dim1] -= refsum * v[m *
+ v_dim1 + 2];
+ u[j + (kms + 3) * u_dim1] -= refsum * v[m *
+ v_dim1 + 3];
+/* L70: */
+ }
+ } else if (*wantz) {
+
+/* ==== U is not accumulated, so update Z */
+/* . now by multiplying by reflections */
+/* . from the right. ==== */
+
+ i__7 = *ihiz;
+ for (j = *iloz; j <= i__7; ++j) {
+ refsum = v[m * v_dim1 + 1] * (z__[j + (k + 1) *
+ z_dim1] + v[m * v_dim1 + 2] * z__[j + (k
+ + 2) * z_dim1] + v[m * v_dim1 + 3] * z__[
+ j + (k + 3) * z_dim1]);
+ z__[j + (k + 1) * z_dim1] -= refsum;
+ z__[j + (k + 2) * z_dim1] -= refsum * v[m *
+ v_dim1 + 2];
+ z__[j + (k + 3) * z_dim1] -= refsum * v[m *
+ v_dim1 + 3];
+/* L80: */
+ }
+ }
+ }
+/* L90: */
+ }
+
+/* ==== Special case: 2-by-2 reflection (if needed) ==== */
+
+ k = krcol + (m22 - 1) * 3;
+ if (bmp22 && v[m22 * v_dim1 + 1] != 0.) {
+/* Computing MIN */
+ i__7 = *kbot, i__4 = k + 3;
+ i__5 = min(i__7,i__4);
+ for (j = jtop; j <= i__5; ++j) {
+ refsum = v[m22 * v_dim1 + 1] * (h__[j + (k + 1) * h_dim1]
+ + v[m22 * v_dim1 + 2] * h__[j + (k + 2) * h_dim1])
+ ;
+ h__[j + (k + 1) * h_dim1] -= refsum;
+ h__[j + (k + 2) * h_dim1] -= refsum * v[m22 * v_dim1 + 2];
+/* L100: */
+ }
+
+ if (accum) {
+ kms = k - incol;
+/* Computing MAX */
+ i__5 = 1, i__7 = *ktop - incol;
+ i__4 = kdu;
+ for (j = max(i__5,i__7); j <= i__4; ++j) {
+ refsum = v[m22 * v_dim1 + 1] * (u[j + (kms + 1) *
+ u_dim1] + v[m22 * v_dim1 + 2] * u[j + (kms +
+ 2) * u_dim1]);
+ u[j + (kms + 1) * u_dim1] -= refsum;
+ u[j + (kms + 2) * u_dim1] -= refsum * v[m22 * v_dim1
+ + 2];
+/* L110: */
+ }
+ } else if (*wantz) {
+ i__4 = *ihiz;
+ for (j = *iloz; j <= i__4; ++j) {
+ refsum = v[m22 * v_dim1 + 1] * (z__[j + (k + 1) *
+ z_dim1] + v[m22 * v_dim1 + 2] * z__[j + (k +
+ 2) * z_dim1]);
+ z__[j + (k + 1) * z_dim1] -= refsum;
+ z__[j + (k + 2) * z_dim1] -= refsum * v[m22 * v_dim1
+ + 2];
+/* L120: */
+ }
+ }
+ }
+
+/* ==== Vigilant deflation check ==== */
+
+ mstart = mtop;
+ if (krcol + (mstart - 1) * 3 < *ktop) {
+ ++mstart;
+ }
+ mend = mbot;
+ if (bmp22) {
+ ++mend;
+ }
+ if (krcol == *kbot - 2) {
+ ++mend;
+ }
+ i__4 = mend;
+ for (m = mstart; m <= i__4; ++m) {
+/* Computing MIN */
+ i__5 = *kbot - 1, i__7 = krcol + (m - 1) * 3;
+ k = min(i__5,i__7);
+
+/* ==== The following convergence test requires that */
+/* . the tradition small-compared-to-nearby-diagonals */
+/* . criterion and the Ahues & Tisseur (LAWN 122, 1997) */
+/* . criteria both be satisfied. The latter improves */
+/* . accuracy in some examples. Falling back on an */
+/* . alternate convergence criterion when TST1 or TST2 */
+/* . is zero (as done here) is traditional but probably */
+/* . unnecessary. ==== */
+
+ if (h__[k + 1 + k * h_dim1] != 0.) {
+ tst1 = (d__1 = h__[k + k * h_dim1], abs(d__1)) + (d__2 =
+ h__[k + 1 + (k + 1) * h_dim1], abs(d__2));
+ if (tst1 == 0.) {
+ if (k >= *ktop + 1) {
+ tst1 += (d__1 = h__[k + (k - 1) * h_dim1], abs(
+ d__1));
+ }
+ if (k >= *ktop + 2) {
+ tst1 += (d__1 = h__[k + (k - 2) * h_dim1], abs(
+ d__1));
+ }
+ if (k >= *ktop + 3) {
+ tst1 += (d__1 = h__[k + (k - 3) * h_dim1], abs(
+ d__1));
+ }
+ if (k <= *kbot - 2) {
+ tst1 += (d__1 = h__[k + 2 + (k + 1) * h_dim1],
+ abs(d__1));
+ }
+ if (k <= *kbot - 3) {
+ tst1 += (d__1 = h__[k + 3 + (k + 1) * h_dim1],
+ abs(d__1));
+ }
+ if (k <= *kbot - 4) {
+ tst1 += (d__1 = h__[k + 4 + (k + 1) * h_dim1],
+ abs(d__1));
+ }
+ }
+/* Computing MAX */
+ d__2 = smlnum, d__3 = ulp * tst1;
+ if ((d__1 = h__[k + 1 + k * h_dim1], abs(d__1)) <= max(
+ d__2,d__3)) {
+/* Computing MAX */
+ d__3 = (d__1 = h__[k + 1 + k * h_dim1], abs(d__1)),
+ d__4 = (d__2 = h__[k + (k + 1) * h_dim1], abs(
+ d__2));
+ h12 = max(d__3,d__4);
+/* Computing MIN */
+ d__3 = (d__1 = h__[k + 1 + k * h_dim1], abs(d__1)),
+ d__4 = (d__2 = h__[k + (k + 1) * h_dim1], abs(
+ d__2));
+ h21 = min(d__3,d__4);
+/* Computing MAX */
+ d__3 = (d__1 = h__[k + 1 + (k + 1) * h_dim1], abs(
+ d__1)), d__4 = (d__2 = h__[k + k * h_dim1] -
+ h__[k + 1 + (k + 1) * h_dim1], abs(d__2));
+ h11 = max(d__3,d__4);
+/* Computing MIN */
+ d__3 = (d__1 = h__[k + 1 + (k + 1) * h_dim1], abs(
+ d__1)), d__4 = (d__2 = h__[k + k * h_dim1] -
+ h__[k + 1 + (k + 1) * h_dim1], abs(d__2));
+ h22 = min(d__3,d__4);
+ scl = h11 + h12;
+ tst2 = h22 * (h11 / scl);
+
+/* Computing MAX */
+ d__1 = smlnum, d__2 = ulp * tst2;
+ if (tst2 == 0. || h21 * (h12 / scl) <= max(d__1,d__2))
+ {
+ h__[k + 1 + k * h_dim1] = 0.;
+ }
+ }
+ }
+/* L130: */
+ }
+
+/* ==== Fill in the last row of each bulge. ==== */
+
+/* Computing MIN */
+ i__4 = nbmps, i__5 = (*kbot - krcol - 1) / 3;
+ mend = min(i__4,i__5);
+ i__4 = mend;
+ for (m = mtop; m <= i__4; ++m) {
+ k = krcol + (m - 1) * 3;
+ refsum = v[m * v_dim1 + 1] * v[m * v_dim1 + 3] * h__[k + 4 + (
+ k + 3) * h_dim1];
+ h__[k + 4 + (k + 1) * h_dim1] = -refsum;
+ h__[k + 4 + (k + 2) * h_dim1] = -refsum * v[m * v_dim1 + 2];
+ h__[k + 4 + (k + 3) * h_dim1] -= refsum * v[m * v_dim1 + 3];
+/* L140: */
+ }
+
+/* ==== End of near-the-diagonal bulge chase. ==== */
+
+/* L150: */
+ }
+
+/* ==== Use U (if accumulated) to update far-from-diagonal */
+/* . entries in H. If required, use U to update Z as */
+/* . well. ==== */
+
+ if (accum) {
+ if (*wantt) {
+ jtop = 1;
+ jbot = *n;
+ } else {
+ jtop = *ktop;
+ jbot = *kbot;
+ }
+ if (! blk22 || incol < *ktop || ndcol > *kbot || ns <= 2) {
+
+/* ==== Updates not exploiting the 2-by-2 block */
+/* . structure of U. K1 and NU keep track of */
+/* . the location and size of U in the special */
+/* . cases of introducing bulges and chasing */
+/* . bulges off the bottom. In these special */
+/* . cases and in case the number of shifts */
+/* . is NS = 2, there is no 2-by-2 block */
+/* . structure to exploit. ==== */
+
+/* Computing MAX */
+ i__3 = 1, i__4 = *ktop - incol;
+ k1 = max(i__3,i__4);
+/* Computing MAX */
+ i__3 = 0, i__4 = ndcol - *kbot;
+ nu = kdu - max(i__3,i__4) - k1 + 1;
+
+/* ==== Horizontal Multiply ==== */
+
+ i__3 = jbot;
+ i__4 = *nh;
+ for (jcol = min(ndcol,*kbot) + 1; i__4 < 0 ? jcol >= i__3 :
+ jcol <= i__3; jcol += i__4) {
+/* Computing MIN */
+ i__5 = *nh, i__7 = jbot - jcol + 1;
+ jlen = min(i__5,i__7);
+ dgemm_("C", "N", &nu, &jlen, &nu, &c_b8, &u[k1 + k1 *
+ u_dim1], ldu, &h__[incol + k1 + jcol * h_dim1],
+ ldh, &c_b7, &wh[wh_offset], ldwh);
+ dlacpy_("ALL", &nu, &jlen, &wh[wh_offset], ldwh, &h__[
+ incol + k1 + jcol * h_dim1], ldh);
+/* L160: */
+ }
+
+/* ==== Vertical multiply ==== */
+
+ i__4 = max(*ktop,incol) - 1;
+ i__3 = *nv;
+ for (jrow = jtop; i__3 < 0 ? jrow >= i__4 : jrow <= i__4;
+ jrow += i__3) {
+/* Computing MIN */
+ i__5 = *nv, i__7 = max(*ktop,incol) - jrow;
+ jlen = min(i__5,i__7);
+ dgemm_("N", "N", &jlen, &nu, &nu, &c_b8, &h__[jrow + (
+ incol + k1) * h_dim1], ldh, &u[k1 + k1 * u_dim1],
+ ldu, &c_b7, &wv[wv_offset], ldwv);
+ dlacpy_("ALL", &jlen, &nu, &wv[wv_offset], ldwv, &h__[
+ jrow + (incol + k1) * h_dim1], ldh);
+/* L170: */
+ }
+
+/* ==== Z multiply (also vertical) ==== */
+
+ if (*wantz) {
+ i__3 = *ihiz;
+ i__4 = *nv;
+ for (jrow = *iloz; i__4 < 0 ? jrow >= i__3 : jrow <= i__3;
+ jrow += i__4) {
+/* Computing MIN */
+ i__5 = *nv, i__7 = *ihiz - jrow + 1;
+ jlen = min(i__5,i__7);
+ dgemm_("N", "N", &jlen, &nu, &nu, &c_b8, &z__[jrow + (
+ incol + k1) * z_dim1], ldz, &u[k1 + k1 *
+ u_dim1], ldu, &c_b7, &wv[wv_offset], ldwv);
+ dlacpy_("ALL", &jlen, &nu, &wv[wv_offset], ldwv, &z__[
+ jrow + (incol + k1) * z_dim1], ldz)
+ ;
+/* L180: */
+ }
+ }
+ } else {
+
+/* ==== Updates exploiting U's 2-by-2 block structure. */
+/* . (I2, I4, J2, J4 are the last rows and columns */
+/* . of the blocks.) ==== */
+
+ i2 = (kdu + 1) / 2;
+ i4 = kdu;
+ j2 = i4 - i2;
+ j4 = kdu;
+
+/* ==== KZS and KNZ deal with the band of zeros */
+/* . along the diagonal of one of the triangular */
+/* . blocks. ==== */
+
+ kzs = j4 - j2 - (ns + 1);
+ knz = ns + 1;
+
+/* ==== Horizontal multiply ==== */
+
+ i__4 = jbot;
+ i__3 = *nh;
+ for (jcol = min(ndcol,*kbot) + 1; i__3 < 0 ? jcol >= i__4 :
+ jcol <= i__4; jcol += i__3) {
+/* Computing MIN */
+ i__5 = *nh, i__7 = jbot - jcol + 1;
+ jlen = min(i__5,i__7);
+
+/* ==== Copy bottom of H to top+KZS of scratch ==== */
+/* (The first KZS rows get multiplied by zero.) ==== */
+
+ dlacpy_("ALL", &knz, &jlen, &h__[incol + 1 + j2 + jcol *
+ h_dim1], ldh, &wh[kzs + 1 + wh_dim1], ldwh);
+
+/* ==== Multiply by U21' ==== */
+
+ dlaset_("ALL", &kzs, &jlen, &c_b7, &c_b7, &wh[wh_offset],
+ ldwh);
+ dtrmm_("L", "U", "C", "N", &knz, &jlen, &c_b8, &u[j2 + 1
+ + (kzs + 1) * u_dim1], ldu, &wh[kzs + 1 + wh_dim1]
+, ldwh);
+
+/* ==== Multiply top of H by U11' ==== */
+
+ dgemm_("C", "N", &i2, &jlen, &j2, &c_b8, &u[u_offset],
+ ldu, &h__[incol + 1 + jcol * h_dim1], ldh, &c_b8,
+ &wh[wh_offset], ldwh);
+
+/* ==== Copy top of H to bottom of WH ==== */
+
+ dlacpy_("ALL", &j2, &jlen, &h__[incol + 1 + jcol * h_dim1]
+, ldh, &wh[i2 + 1 + wh_dim1], ldwh);
+
+/* ==== Multiply by U21' ==== */
+
+ dtrmm_("L", "L", "C", "N", &j2, &jlen, &c_b8, &u[(i2 + 1)
+ * u_dim1 + 1], ldu, &wh[i2 + 1 + wh_dim1], ldwh);
+
+/* ==== Multiply by U22 ==== */
+
+ i__5 = i4 - i2;
+ i__7 = j4 - j2;
+ dgemm_("C", "N", &i__5, &jlen, &i__7, &c_b8, &u[j2 + 1 + (
+ i2 + 1) * u_dim1], ldu, &h__[incol + 1 + j2 +
+ jcol * h_dim1], ldh, &c_b8, &wh[i2 + 1 + wh_dim1],
+ ldwh);
+
+/* ==== Copy it back ==== */
+
+ dlacpy_("ALL", &kdu, &jlen, &wh[wh_offset], ldwh, &h__[
+ incol + 1 + jcol * h_dim1], ldh);
+/* L190: */
+ }
+
+/* ==== Vertical multiply ==== */
+
+ i__3 = max(incol,*ktop) - 1;
+ i__4 = *nv;
+ for (jrow = jtop; i__4 < 0 ? jrow >= i__3 : jrow <= i__3;
+ jrow += i__4) {
+/* Computing MIN */
+ i__5 = *nv, i__7 = max(incol,*ktop) - jrow;
+ jlen = min(i__5,i__7);
+
+/* ==== Copy right of H to scratch (the first KZS */
+/* . columns get multiplied by zero) ==== */
+
+ dlacpy_("ALL", &jlen, &knz, &h__[jrow + (incol + 1 + j2) *
+ h_dim1], ldh, &wv[(kzs + 1) * wv_dim1 + 1], ldwv);
+
+/* ==== Multiply by U21 ==== */
+
+ dlaset_("ALL", &jlen, &kzs, &c_b7, &c_b7, &wv[wv_offset],
+ ldwv);
+ dtrmm_("R", "U", "N", "N", &jlen, &knz, &c_b8, &u[j2 + 1
+ + (kzs + 1) * u_dim1], ldu, &wv[(kzs + 1) *
+ wv_dim1 + 1], ldwv);
+
+/* ==== Multiply by U11 ==== */
+
+ dgemm_("N", "N", &jlen, &i2, &j2, &c_b8, &h__[jrow + (
+ incol + 1) * h_dim1], ldh, &u[u_offset], ldu, &
+ c_b8, &wv[wv_offset], ldwv);
+
+/* ==== Copy left of H to right of scratch ==== */
+
+ dlacpy_("ALL", &jlen, &j2, &h__[jrow + (incol + 1) *
+ h_dim1], ldh, &wv[(i2 + 1) * wv_dim1 + 1], ldwv);
+
+/* ==== Multiply by U21 ==== */
+
+ i__5 = i4 - i2;
+ dtrmm_("R", "L", "N", "N", &jlen, &i__5, &c_b8, &u[(i2 +
+ 1) * u_dim1 + 1], ldu, &wv[(i2 + 1) * wv_dim1 + 1]
+, ldwv);
+
+/* ==== Multiply by U22 ==== */
+
+ i__5 = i4 - i2;
+ i__7 = j4 - j2;
+ dgemm_("N", "N", &jlen, &i__5, &i__7, &c_b8, &h__[jrow + (
+ incol + 1 + j2) * h_dim1], ldh, &u[j2 + 1 + (i2 +
+ 1) * u_dim1], ldu, &c_b8, &wv[(i2 + 1) * wv_dim1
+ + 1], ldwv);
+
+/* ==== Copy it back ==== */
+
+ dlacpy_("ALL", &jlen, &kdu, &wv[wv_offset], ldwv, &h__[
+ jrow + (incol + 1) * h_dim1], ldh);
+/* L200: */
+ }
+
+/* ==== Multiply Z (also vertical) ==== */
+
+ if (*wantz) {
+ i__4 = *ihiz;
+ i__3 = *nv;
+ for (jrow = *iloz; i__3 < 0 ? jrow >= i__4 : jrow <= i__4;
+ jrow += i__3) {
+/* Computing MIN */
+ i__5 = *nv, i__7 = *ihiz - jrow + 1;
+ jlen = min(i__5,i__7);
+
+/* ==== Copy right of Z to left of scratch (first */
+/* . KZS columns get multiplied by zero) ==== */
+
+ dlacpy_("ALL", &jlen, &knz, &z__[jrow + (incol + 1 +
+ j2) * z_dim1], ldz, &wv[(kzs + 1) * wv_dim1 +
+ 1], ldwv);
+
+/* ==== Multiply by U12 ==== */
+
+ dlaset_("ALL", &jlen, &kzs, &c_b7, &c_b7, &wv[
+ wv_offset], ldwv);
+ dtrmm_("R", "U", "N", "N", &jlen, &knz, &c_b8, &u[j2
+ + 1 + (kzs + 1) * u_dim1], ldu, &wv[(kzs + 1)
+ * wv_dim1 + 1], ldwv);
+
+/* ==== Multiply by U11 ==== */
+
+ dgemm_("N", "N", &jlen, &i2, &j2, &c_b8, &z__[jrow + (
+ incol + 1) * z_dim1], ldz, &u[u_offset], ldu,
+ &c_b8, &wv[wv_offset], ldwv);
+
+/* ==== Copy left of Z to right of scratch ==== */
+
+ dlacpy_("ALL", &jlen, &j2, &z__[jrow + (incol + 1) *
+ z_dim1], ldz, &wv[(i2 + 1) * wv_dim1 + 1],
+ ldwv);
+
+/* ==== Multiply by U21 ==== */
+
+ i__5 = i4 - i2;
+ dtrmm_("R", "L", "N", "N", &jlen, &i__5, &c_b8, &u[(
+ i2 + 1) * u_dim1 + 1], ldu, &wv[(i2 + 1) *
+ wv_dim1 + 1], ldwv);
+
+/* ==== Multiply by U22 ==== */
+
+ i__5 = i4 - i2;
+ i__7 = j4 - j2;
+ dgemm_("N", "N", &jlen, &i__5, &i__7, &c_b8, &z__[
+ jrow + (incol + 1 + j2) * z_dim1], ldz, &u[j2
+ + 1 + (i2 + 1) * u_dim1], ldu, &c_b8, &wv[(i2
+ + 1) * wv_dim1 + 1], ldwv);
+
+/* ==== Copy the result back to Z ==== */
+
+ dlacpy_("ALL", &jlen, &kdu, &wv[wv_offset], ldwv, &
+ z__[jrow + (incol + 1) * z_dim1], ldz);
+/* L210: */
+ }
+ }
+ }
+ }
+/* L220: */
+ }
+
+/* ==== End of DLAQR5 ==== */
+
+ return 0;
+} /* dlaqr5_ */
diff --git a/contrib/libs/clapack/dlaqsb.c b/contrib/libs/clapack/dlaqsb.c
new file mode 100644
index 0000000000..c7f457e307
--- /dev/null
+++ b/contrib/libs/clapack/dlaqsb.c
@@ -0,0 +1,185 @@
+/* dlaqsb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlaqsb_(char *uplo, integer *n, integer *kd, doublereal *
+ ab, integer *ldab, doublereal *s, doublereal *scond, doublereal *amax,
+ char *equed)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal cj, large;
+ extern logical lsame_(char *, char *);
+ doublereal small;
+ extern doublereal dlamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAQSB equilibrates a symmetric band matrix A using the scaling */
+/* factors in the vector S. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of super-diagonals of the matrix A if UPLO = 'U', */
+/* or the number of sub-diagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, if INFO = 0, the triangular factor U or L from the */
+/* Cholesky factorization A = U'*U or A = L*L' of the band */
+/* matrix A, in the same storage format as A. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* S (input) DOUBLE PRECISION array, dimension (N) */
+/* The scale factors for A. */
+
+/* SCOND (input) DOUBLE PRECISION */
+/* Ratio of the smallest S(i) to the largest S(i). */
+
+/* AMAX (input) DOUBLE PRECISION */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies whether or not equilibration was done. */
+/* = 'N': No equilibration. */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if scaling should be done */
+/* based on the ratio of the scaling factors. If SCOND < THRESH, */
+/* scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if scaling should */
+/* be done based on the absolute size of the largest matrix element. */
+/* If AMAX > LARGE or AMAX < SMALL, scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --s;
+
+ /* Function Body */
+ if (*n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = dlamch_("Safe minimum") / dlamch_("Precision");
+ large = 1. / small;
+
+ if (*scond >= .1 && *amax >= small && *amax <= large) {
+
+/* No equilibration */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Replace A by diag(S) * A * diag(S). */
+
+ if (lsame_(uplo, "U")) {
+
+/* Upper triangle of A is stored in band format. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *kd;
+ i__4 = j;
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ ab[*kd + 1 + i__ - j + j * ab_dim1] = cj * s[i__] * ab[*
+ kd + 1 + i__ - j + j * ab_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+
+/* Lower triangle of A is stored. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+/* Computing MIN */
+ i__2 = *n, i__3 = j + *kd;
+ i__4 = min(i__2,i__3);
+ for (i__ = j; i__ <= i__4; ++i__) {
+ ab[i__ + 1 - j + j * ab_dim1] = cj * s[i__] * ab[i__ + 1
+ - j + j * ab_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ *(unsigned char *)equed = 'Y';
+ }
+
+ return 0;
+
+/* End of DLAQSB */
+
+} /* dlaqsb_ */
diff --git a/contrib/libs/clapack/dlaqsp.c b/contrib/libs/clapack/dlaqsp.c
new file mode 100644
index 0000000000..dd22ceae5b
--- /dev/null
+++ b/contrib/libs/clapack/dlaqsp.c
@@ -0,0 +1,169 @@
+/* dlaqsp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlaqsp_(char *uplo, integer *n, doublereal *ap,
+ doublereal *s, doublereal *scond, doublereal *amax, char *equed)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, jc;
+ doublereal cj, large;
+ extern logical lsame_(char *, char *);
+ doublereal small;
+ extern doublereal dlamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAQSP equilibrates a symmetric matrix A using the scaling factors */
+/* in the vector S. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, the equilibrated matrix: diag(S) * A * diag(S), in */
+/* the same storage format as A. */
+
+/* S (input) DOUBLE PRECISION array, dimension (N) */
+/* The scale factors for A. */
+
+/* SCOND (input) DOUBLE PRECISION */
+/* Ratio of the smallest S(i) to the largest S(i). */
+
+/* AMAX (input) DOUBLE PRECISION */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies whether or not equilibration was done. */
+/* = 'N': No equilibration. */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if scaling should be done */
+/* based on the ratio of the scaling factors. If SCOND < THRESH, */
+/* scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if scaling should */
+/* be done based on the absolute size of the largest matrix element. */
+/* If AMAX > LARGE or AMAX < SMALL, scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --s;
+ --ap;
+
+ /* Function Body */
+ if (*n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = dlamch_("Safe minimum") / dlamch_("Precision");
+ large = 1. / small;
+
+ if (*scond >= .1 && *amax >= small && *amax <= large) {
+
+/* No equilibration */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Replace A by diag(S) * A * diag(S). */
+
+ if (lsame_(uplo, "U")) {
+
+/* Upper triangle of A is stored. */
+
+ jc = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ ap[jc + i__ - 1] = cj * s[i__] * ap[jc + i__ - 1];
+/* L10: */
+ }
+ jc += j;
+/* L20: */
+ }
+ } else {
+
+/* Lower triangle of A is stored. */
+
+ jc = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ap[jc + i__ - j] = cj * s[i__] * ap[jc + i__ - j];
+/* L30: */
+ }
+ jc = jc + *n - j + 1;
+/* L40: */
+ }
+ }
+ *(unsigned char *)equed = 'Y';
+ }
+
+ return 0;
+
+/* End of DLAQSP */
+
+} /* dlaqsp_ */
diff --git a/contrib/libs/clapack/dlaqsy.c b/contrib/libs/clapack/dlaqsy.c
new file mode 100644
index 0000000000..698f9a3342
--- /dev/null
+++ b/contrib/libs/clapack/dlaqsy.c
@@ -0,0 +1,172 @@
+/* dlaqsy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlaqsy_(char *uplo, integer *n, doublereal *a, integer *
+ lda, doublereal *s, doublereal *scond, doublereal *amax, char *equed)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal cj, large;
+ extern logical lsame_(char *, char *);
+ doublereal small;
+ extern doublereal dlamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAQSY equilibrates a symmetric matrix A using the scaling factors */
+/* in the vector S. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n by n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if EQUED = 'Y', the equilibrated matrix: */
+/* diag(S) * A * diag(S). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(N,1). */
+
+/* S (input) DOUBLE PRECISION array, dimension (N) */
+/* The scale factors for A. */
+
+/* SCOND (input) DOUBLE PRECISION */
+/* Ratio of the smallest S(i) to the largest S(i). */
+
+/* AMAX (input) DOUBLE PRECISION */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies whether or not equilibration was done. */
+/* = 'N': No equilibration. */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if scaling should be done */
+/* based on the ratio of the scaling factors. If SCOND < THRESH, */
+/* scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if scaling should */
+/* be done based on the absolute size of the largest matrix element. */
+/* If AMAX > LARGE or AMAX < SMALL, scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+
+ /* Function Body */
+ if (*n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = dlamch_("Safe minimum") / dlamch_("Precision");
+ large = 1. / small;
+
+ if (*scond >= .1 && *amax >= small && *amax <= large) {
+
+/* No equilibration */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Replace A by diag(S) * A * diag(S). */
+
+ if (lsame_(uplo, "U")) {
+
+/* Upper triangle of A is stored. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = cj * s[i__] * a[i__ + j * a_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+
+/* Lower triangle of A is stored. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = cj * s[i__] * a[i__ + j * a_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ *(unsigned char *)equed = 'Y';
+ }
+
+ return 0;
+
+/* End of DLAQSY */
+
+} /* dlaqsy_ */
diff --git a/contrib/libs/clapack/dlaqtr.c b/contrib/libs/clapack/dlaqtr.c
new file mode 100644
index 0000000000..e8c7e93fcb
--- /dev/null
+++ b/contrib/libs/clapack/dlaqtr.c
@@ -0,0 +1,832 @@
+/* dlaqtr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static logical c_false = FALSE_;
+static integer c__2 = 2;
+static doublereal c_b21 = 1.;
+static doublereal c_b25 = 0.;
+static logical c_true = TRUE_;
+
+/* Subroutine */ int dlaqtr_(logical *ltran, logical *lreal, integer *n,
+ doublereal *t, integer *ldt, doublereal *b, doublereal *w, doublereal
+ *scale, doublereal *x, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, i__1, i__2;
+ doublereal d__1, d__2, d__3, d__4, d__5, d__6;
+
+ /* Local variables */
+ doublereal d__[4] /* was [2][2] */;
+ integer i__, j, k;
+ doublereal v[4] /* was [2][2] */, z__;
+ integer j1, j2, n1, n2;
+ doublereal si, xj, sr, rec, eps, tjj, tmp;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ integer ierr;
+ doublereal smin, xmax;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern doublereal dasum_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *);
+ integer jnext;
+ doublereal sminw, xnorm;
+ extern /* Subroutine */ int dlaln2_(logical *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *
+, doublereal *, integer *, doublereal *, doublereal *, integer *);
+ extern doublereal dlamch_(char *), dlange_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ doublereal scaloc;
+ extern /* Subroutine */ int dladiv_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *);
+ doublereal bignum;
+ logical notran;
+ doublereal smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAQTR solves the real quasi-triangular system */
+
+/* op(T)*p = scale*c, if LREAL = .TRUE. */
+
+/* or the complex quasi-triangular systems */
+
+/* op(T + iB)*(p+iq) = scale*(c+id), if LREAL = .FALSE. */
+
+/* in real arithmetic, where T is upper quasi-triangular. */
+/* If LREAL = .FALSE., then the first diagonal block of T must be */
+/* 1 by 1, B is the specially structured matrix */
+
+/* B = [ b(1) b(2) ... b(n) ] */
+/* [ w ] */
+/* [ w ] */
+/* [ . ] */
+/* [ w ] */
+
+/* op(A) = A or A', A' denotes the conjugate transpose of */
+/* matrix A. */
+
+/* On input, X = [ c ]. On output, X = [ p ]. */
+/* [ d ] [ q ] */
+
+/* This subroutine is designed for the condition number estimation */
+/* in routine DTRSNA. */
+
+/* Arguments */
+/* ========= */
+
+/* LTRAN (input) LOGICAL */
+/* On entry, LTRAN specifies the option of conjugate transpose: */
+/* = .FALSE., op(T+i*B) = T+i*B, */
+/* = .TRUE., op(T+i*B) = (T+i*B)'. */
+
+/* LREAL (input) LOGICAL */
+/* On entry, LREAL specifies the input matrix structure: */
+/* = .FALSE., the input is complex */
+/* = .TRUE., the input is real */
+
+/* N (input) INTEGER */
+/* On entry, N specifies the order of T+i*B. N >= 0. */
+
+/* T (input) DOUBLE PRECISION array, dimension (LDT,N) */
+/* On entry, T contains a matrix in Schur canonical form. */
+/* If LREAL = .FALSE., then the first diagonal block of T mu */
+/* be 1 by 1. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the matrix T. LDT >= max(1,N). */
+
+/* B (input) DOUBLE PRECISION array, dimension (N) */
+/* On entry, B contains the elements to form the matrix */
+/* B as described above. */
+/* If LREAL = .TRUE., B is not referenced. */
+
+/* W (input) DOUBLE PRECISION */
+/* On entry, W is the diagonal element of the matrix B. */
+/* If LREAL = .TRUE., W is not referenced. */
+
+/* SCALE (output) DOUBLE PRECISION */
+/* On exit, SCALE is the scale factor. */
+
+/* X (input/output) DOUBLE PRECISION array, dimension (2*N) */
+/* On entry, X contains the right hand side of the system. */
+/* On exit, X is overwritten by the solution. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* On exit, INFO is set to */
+/* 0: successful exit. */
+/* 1: the some diagonal 1 by 1 block has been perturbed by */
+/* a small number SMIN to keep nonsingularity. */
+/* 2: the some diagonal 2 by 2 block has been perturbed by */
+/* a small number in DLALN2 to keep nonsingularity. */
+/* NOTE: In the interests of speed, this routine does not */
+/* check the inputs for errors. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Do not test the input parameters for errors */
+
+ /* Parameter adjustments */
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ --b;
+ --x;
+ --work;
+
+ /* Function Body */
+ notran = ! (*ltran);
+ *info = 0;
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Set constants to control overflow */
+
+ eps = dlamch_("P");
+ smlnum = dlamch_("S") / eps;
+ bignum = 1. / smlnum;
+
+ xnorm = dlange_("M", n, n, &t[t_offset], ldt, d__);
+ if (! (*lreal)) {
+/* Computing MAX */
+ d__1 = xnorm, d__2 = abs(*w), d__1 = max(d__1,d__2), d__2 = dlange_(
+ "M", n, &c__1, &b[1], n, d__);
+ xnorm = max(d__1,d__2);
+ }
+/* Computing MAX */
+ d__1 = smlnum, d__2 = eps * xnorm;
+ smin = max(d__1,d__2);
+
+/* Compute 1-norm of each column of strictly upper triangular */
+/* part of T to control overflow in triangular solver. */
+
+ work[1] = 0.;
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ work[j] = dasum_(&i__2, &t[j * t_dim1 + 1], &c__1);
+/* L10: */
+ }
+
+ if (! (*lreal)) {
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ work[i__] += (d__1 = b[i__], abs(d__1));
+/* L20: */
+ }
+ }
+
+ n2 = *n << 1;
+ n1 = *n;
+ if (! (*lreal)) {
+ n1 = n2;
+ }
+ k = idamax_(&n1, &x[1], &c__1);
+ xmax = (d__1 = x[k], abs(d__1));
+ *scale = 1.;
+
+ if (xmax > bignum) {
+ *scale = bignum / xmax;
+ dscal_(&n1, scale, &x[1], &c__1);
+ xmax = bignum;
+ }
+
+ if (*lreal) {
+
+ if (notran) {
+
+/* Solve T*p = scale*c */
+
+ jnext = *n;
+ for (j = *n; j >= 1; --j) {
+ if (j > jnext) {
+ goto L30;
+ }
+ j1 = j;
+ j2 = j;
+ jnext = j - 1;
+ if (j > 1) {
+ if (t[j + (j - 1) * t_dim1] != 0.) {
+ j1 = j - 1;
+ jnext = j - 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/* Meet 1 by 1 diagonal block */
+
+/* Scale to avoid overflow when computing */
+/* x(j) = b(j)/T(j,j) */
+
+ xj = (d__1 = x[j1], abs(d__1));
+ tjj = (d__1 = t[j1 + j1 * t_dim1], abs(d__1));
+ tmp = t[j1 + j1 * t_dim1];
+ if (tjj < smin) {
+ tmp = smin;
+ tjj = smin;
+ *info = 1;
+ }
+
+ if (xj == 0.) {
+ goto L30;
+ }
+
+ if (tjj < 1.) {
+ if (xj > bignum * tjj) {
+ rec = 1. / xj;
+ dscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ x[j1] /= tmp;
+ xj = (d__1 = x[j1], abs(d__1));
+
+/* Scale x if necessary to avoid overflow when adding a */
+/* multiple of column j1 of T. */
+
+ if (xj > 1.) {
+ rec = 1. / xj;
+ if (work[j1] > (bignum - xmax) * rec) {
+ dscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ }
+ }
+ if (j1 > 1) {
+ i__1 = j1 - 1;
+ d__1 = -x[j1];
+ daxpy_(&i__1, &d__1, &t[j1 * t_dim1 + 1], &c__1, &x[1]
+, &c__1);
+ i__1 = j1 - 1;
+ k = idamax_(&i__1, &x[1], &c__1);
+ xmax = (d__1 = x[k], abs(d__1));
+ }
+
+ } else {
+
+/* Meet 2 by 2 diagonal block */
+
+/* Call 2 by 2 linear system solve, to take */
+/* care of possible overflow by scaling factor. */
+
+ d__[0] = x[j1];
+ d__[1] = x[j2];
+ dlaln2_(&c_false, &c__2, &c__1, &smin, &c_b21, &t[j1 + j1
+ * t_dim1], ldt, &c_b21, &c_b21, d__, &c__2, &
+ c_b25, &c_b25, v, &c__2, &scaloc, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 2;
+ }
+
+ if (scaloc != 1.) {
+ dscal_(n, &scaloc, &x[1], &c__1);
+ *scale *= scaloc;
+ }
+ x[j1] = v[0];
+ x[j2] = v[1];
+
+/* Scale V(1,1) (= X(J1)) and/or V(2,1) (=X(J2)) */
+/* to avoid overflow in updating right-hand side. */
+
+/* Computing MAX */
+ d__1 = abs(v[0]), d__2 = abs(v[1]);
+ xj = max(d__1,d__2);
+ if (xj > 1.) {
+ rec = 1. / xj;
+/* Computing MAX */
+ d__1 = work[j1], d__2 = work[j2];
+ if (max(d__1,d__2) > (bignum - xmax) * rec) {
+ dscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ }
+ }
+
+/* Update right-hand side */
+
+ if (j1 > 1) {
+ i__1 = j1 - 1;
+ d__1 = -x[j1];
+ daxpy_(&i__1, &d__1, &t[j1 * t_dim1 + 1], &c__1, &x[1]
+, &c__1);
+ i__1 = j1 - 1;
+ d__1 = -x[j2];
+ daxpy_(&i__1, &d__1, &t[j2 * t_dim1 + 1], &c__1, &x[1]
+, &c__1);
+ i__1 = j1 - 1;
+ k = idamax_(&i__1, &x[1], &c__1);
+ xmax = (d__1 = x[k], abs(d__1));
+ }
+
+ }
+
+L30:
+ ;
+ }
+
+ } else {
+
+/* Solve T'*p = scale*c */
+
+ jnext = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (j < jnext) {
+ goto L40;
+ }
+ j1 = j;
+ j2 = j;
+ jnext = j + 1;
+ if (j < *n) {
+ if (t[j + 1 + j * t_dim1] != 0.) {
+ j2 = j + 1;
+ jnext = j + 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/* 1 by 1 diagonal block */
+
+/* Scale if necessary to avoid overflow in forming the */
+/* right-hand side element by inner product. */
+
+ xj = (d__1 = x[j1], abs(d__1));
+ if (xmax > 1.) {
+ rec = 1. / xmax;
+ if (work[j1] > (bignum - xj) * rec) {
+ dscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ i__2 = j1 - 1;
+ x[j1] -= ddot_(&i__2, &t[j1 * t_dim1 + 1], &c__1, &x[1], &
+ c__1);
+
+ xj = (d__1 = x[j1], abs(d__1));
+ tjj = (d__1 = t[j1 + j1 * t_dim1], abs(d__1));
+ tmp = t[j1 + j1 * t_dim1];
+ if (tjj < smin) {
+ tmp = smin;
+ tjj = smin;
+ *info = 1;
+ }
+
+ if (tjj < 1.) {
+ if (xj > bignum * tjj) {
+ rec = 1. / xj;
+ dscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ x[j1] /= tmp;
+/* Computing MAX */
+ d__2 = xmax, d__3 = (d__1 = x[j1], abs(d__1));
+ xmax = max(d__2,d__3);
+
+ } else {
+
+/* 2 by 2 diagonal block */
+
+/* Scale if necessary to avoid overflow in forming the */
+/* right-hand side elements by inner product. */
+
+/* Computing MAX */
+ d__3 = (d__1 = x[j1], abs(d__1)), d__4 = (d__2 = x[j2],
+ abs(d__2));
+ xj = max(d__3,d__4);
+ if (xmax > 1.) {
+ rec = 1. / xmax;
+/* Computing MAX */
+ d__1 = work[j2], d__2 = work[j1];
+ if (max(d__1,d__2) > (bignum - xj) * rec) {
+ dscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ i__2 = j1 - 1;
+ d__[0] = x[j1] - ddot_(&i__2, &t[j1 * t_dim1 + 1], &c__1,
+ &x[1], &c__1);
+ i__2 = j1 - 1;
+ d__[1] = x[j2] - ddot_(&i__2, &t[j2 * t_dim1 + 1], &c__1,
+ &x[1], &c__1);
+
+ dlaln2_(&c_true, &c__2, &c__1, &smin, &c_b21, &t[j1 + j1 *
+ t_dim1], ldt, &c_b21, &c_b21, d__, &c__2, &c_b25,
+ &c_b25, v, &c__2, &scaloc, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 2;
+ }
+
+ if (scaloc != 1.) {
+ dscal_(n, &scaloc, &x[1], &c__1);
+ *scale *= scaloc;
+ }
+ x[j1] = v[0];
+ x[j2] = v[1];
+/* Computing MAX */
+ d__3 = (d__1 = x[j1], abs(d__1)), d__4 = (d__2 = x[j2],
+ abs(d__2)), d__3 = max(d__3,d__4);
+ xmax = max(d__3,xmax);
+
+ }
+L40:
+ ;
+ }
+ }
+
+ } else {
+
+/* Computing MAX */
+ d__1 = eps * abs(*w);
+ sminw = max(d__1,smin);
+ if (notran) {
+
+/* Solve (T + iB)*(p+iq) = c+id */
+
+ jnext = *n;
+ for (j = *n; j >= 1; --j) {
+ if (j > jnext) {
+ goto L70;
+ }
+ j1 = j;
+ j2 = j;
+ jnext = j - 1;
+ if (j > 1) {
+ if (t[j + (j - 1) * t_dim1] != 0.) {
+ j1 = j - 1;
+ jnext = j - 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/* 1 by 1 diagonal block */
+
+/* Scale if necessary to avoid overflow in division */
+
+ z__ = *w;
+ if (j1 == 1) {
+ z__ = b[1];
+ }
+ xj = (d__1 = x[j1], abs(d__1)) + (d__2 = x[*n + j1], abs(
+ d__2));
+ tjj = (d__1 = t[j1 + j1 * t_dim1], abs(d__1)) + abs(z__);
+ tmp = t[j1 + j1 * t_dim1];
+ if (tjj < sminw) {
+ tmp = sminw;
+ tjj = sminw;
+ *info = 1;
+ }
+
+ if (xj == 0.) {
+ goto L70;
+ }
+
+ if (tjj < 1.) {
+ if (xj > bignum * tjj) {
+ rec = 1. / xj;
+ dscal_(&n2, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ dladiv_(&x[j1], &x[*n + j1], &tmp, &z__, &sr, &si);
+ x[j1] = sr;
+ x[*n + j1] = si;
+ xj = (d__1 = x[j1], abs(d__1)) + (d__2 = x[*n + j1], abs(
+ d__2));
+
+/* Scale x if necessary to avoid overflow when adding a */
+/* multiple of column j1 of T. */
+
+ if (xj > 1.) {
+ rec = 1. / xj;
+ if (work[j1] > (bignum - xmax) * rec) {
+ dscal_(&n2, &rec, &x[1], &c__1);
+ *scale *= rec;
+ }
+ }
+
+ if (j1 > 1) {
+ i__1 = j1 - 1;
+ d__1 = -x[j1];
+ daxpy_(&i__1, &d__1, &t[j1 * t_dim1 + 1], &c__1, &x[1]
+, &c__1);
+ i__1 = j1 - 1;
+ d__1 = -x[*n + j1];
+ daxpy_(&i__1, &d__1, &t[j1 * t_dim1 + 1], &c__1, &x[*
+ n + 1], &c__1);
+
+ x[1] += b[j1] * x[*n + j1];
+ x[*n + 1] -= b[j1] * x[j1];
+
+ xmax = 0.;
+ i__1 = j1 - 1;
+ for (k = 1; k <= i__1; ++k) {
+/* Computing MAX */
+ d__3 = xmax, d__4 = (d__1 = x[k], abs(d__1)) + (
+ d__2 = x[k + *n], abs(d__2));
+ xmax = max(d__3,d__4);
+/* L50: */
+ }
+ }
+
+ } else {
+
+/* Meet 2 by 2 diagonal block */
+
+ d__[0] = x[j1];
+ d__[1] = x[j2];
+ d__[2] = x[*n + j1];
+ d__[3] = x[*n + j2];
+ d__1 = -(*w);
+ dlaln2_(&c_false, &c__2, &c__2, &sminw, &c_b21, &t[j1 +
+ j1 * t_dim1], ldt, &c_b21, &c_b21, d__, &c__2, &
+ c_b25, &d__1, v, &c__2, &scaloc, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 2;
+ }
+
+ if (scaloc != 1.) {
+ i__1 = *n << 1;
+ dscal_(&i__1, &scaloc, &x[1], &c__1);
+ *scale = scaloc * *scale;
+ }
+ x[j1] = v[0];
+ x[j2] = v[1];
+ x[*n + j1] = v[2];
+ x[*n + j2] = v[3];
+
+/* Scale X(J1), .... to avoid overflow in */
+/* updating right hand side. */
+
+/* Computing MAX */
+ d__1 = abs(v[0]) + abs(v[2]), d__2 = abs(v[1]) + abs(v[3])
+ ;
+ xj = max(d__1,d__2);
+ if (xj > 1.) {
+ rec = 1. / xj;
+/* Computing MAX */
+ d__1 = work[j1], d__2 = work[j2];
+ if (max(d__1,d__2) > (bignum - xmax) * rec) {
+ dscal_(&n2, &rec, &x[1], &c__1);
+ *scale *= rec;
+ }
+ }
+
+/* Update the right-hand side. */
+
+ if (j1 > 1) {
+ i__1 = j1 - 1;
+ d__1 = -x[j1];
+ daxpy_(&i__1, &d__1, &t[j1 * t_dim1 + 1], &c__1, &x[1]
+, &c__1);
+ i__1 = j1 - 1;
+ d__1 = -x[j2];
+ daxpy_(&i__1, &d__1, &t[j2 * t_dim1 + 1], &c__1, &x[1]
+, &c__1);
+
+ i__1 = j1 - 1;
+ d__1 = -x[*n + j1];
+ daxpy_(&i__1, &d__1, &t[j1 * t_dim1 + 1], &c__1, &x[*
+ n + 1], &c__1);
+ i__1 = j1 - 1;
+ d__1 = -x[*n + j2];
+ daxpy_(&i__1, &d__1, &t[j2 * t_dim1 + 1], &c__1, &x[*
+ n + 1], &c__1);
+
+ x[1] = x[1] + b[j1] * x[*n + j1] + b[j2] * x[*n + j2];
+ x[*n + 1] = x[*n + 1] - b[j1] * x[j1] - b[j2] * x[j2];
+
+ xmax = 0.;
+ i__1 = j1 - 1;
+ for (k = 1; k <= i__1; ++k) {
+/* Computing MAX */
+ d__3 = (d__1 = x[k], abs(d__1)) + (d__2 = x[k + *
+ n], abs(d__2));
+ xmax = max(d__3,xmax);
+/* L60: */
+ }
+ }
+
+ }
+L70:
+ ;
+ }
+
+ } else {
+
+/* Solve (T + iB)'*(p+iq) = c+id */
+
+ jnext = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (j < jnext) {
+ goto L80;
+ }
+ j1 = j;
+ j2 = j;
+ jnext = j + 1;
+ if (j < *n) {
+ if (t[j + 1 + j * t_dim1] != 0.) {
+ j2 = j + 1;
+ jnext = j + 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/* 1 by 1 diagonal block */
+
+/* Scale if necessary to avoid overflow in forming the */
+/* right-hand side element by inner product. */
+
+ xj = (d__1 = x[j1], abs(d__1)) + (d__2 = x[j1 + *n], abs(
+ d__2));
+ if (xmax > 1.) {
+ rec = 1. / xmax;
+ if (work[j1] > (bignum - xj) * rec) {
+ dscal_(&n2, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ i__2 = j1 - 1;
+ x[j1] -= ddot_(&i__2, &t[j1 * t_dim1 + 1], &c__1, &x[1], &
+ c__1);
+ i__2 = j1 - 1;
+ x[*n + j1] -= ddot_(&i__2, &t[j1 * t_dim1 + 1], &c__1, &x[
+ *n + 1], &c__1);
+ if (j1 > 1) {
+ x[j1] -= b[j1] * x[*n + 1];
+ x[*n + j1] += b[j1] * x[1];
+ }
+ xj = (d__1 = x[j1], abs(d__1)) + (d__2 = x[j1 + *n], abs(
+ d__2));
+
+ z__ = *w;
+ if (j1 == 1) {
+ z__ = b[1];
+ }
+
+/* Scale if necessary to avoid overflow in */
+/* complex division */
+
+ tjj = (d__1 = t[j1 + j1 * t_dim1], abs(d__1)) + abs(z__);
+ tmp = t[j1 + j1 * t_dim1];
+ if (tjj < sminw) {
+ tmp = sminw;
+ tjj = sminw;
+ *info = 1;
+ }
+
+ if (tjj < 1.) {
+ if (xj > bignum * tjj) {
+ rec = 1. / xj;
+ dscal_(&n2, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ d__1 = -z__;
+ dladiv_(&x[j1], &x[*n + j1], &tmp, &d__1, &sr, &si);
+ x[j1] = sr;
+ x[j1 + *n] = si;
+/* Computing MAX */
+ d__3 = (d__1 = x[j1], abs(d__1)) + (d__2 = x[j1 + *n],
+ abs(d__2));
+ xmax = max(d__3,xmax);
+
+ } else {
+
+/* 2 by 2 diagonal block */
+
+/* Scale if necessary to avoid overflow in forming the */
+/* right-hand side element by inner product. */
+
+/* Computing MAX */
+ d__5 = (d__1 = x[j1], abs(d__1)) + (d__2 = x[*n + j1],
+ abs(d__2)), d__6 = (d__3 = x[j2], abs(d__3)) + (
+ d__4 = x[*n + j2], abs(d__4));
+ xj = max(d__5,d__6);
+ if (xmax > 1.) {
+ rec = 1. / xmax;
+/* Computing MAX */
+ d__1 = work[j1], d__2 = work[j2];
+ if (max(d__1,d__2) > (bignum - xj) / xmax) {
+ dscal_(&n2, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ i__2 = j1 - 1;
+ d__[0] = x[j1] - ddot_(&i__2, &t[j1 * t_dim1 + 1], &c__1,
+ &x[1], &c__1);
+ i__2 = j1 - 1;
+ d__[1] = x[j2] - ddot_(&i__2, &t[j2 * t_dim1 + 1], &c__1,
+ &x[1], &c__1);
+ i__2 = j1 - 1;
+ d__[2] = x[*n + j1] - ddot_(&i__2, &t[j1 * t_dim1 + 1], &
+ c__1, &x[*n + 1], &c__1);
+ i__2 = j1 - 1;
+ d__[3] = x[*n + j2] - ddot_(&i__2, &t[j2 * t_dim1 + 1], &
+ c__1, &x[*n + 1], &c__1);
+ d__[0] -= b[j1] * x[*n + 1];
+ d__[1] -= b[j2] * x[*n + 1];
+ d__[2] += b[j1] * x[1];
+ d__[3] += b[j2] * x[1];
+
+ dlaln2_(&c_true, &c__2, &c__2, &sminw, &c_b21, &t[j1 + j1
+ * t_dim1], ldt, &c_b21, &c_b21, d__, &c__2, &
+ c_b25, w, v, &c__2, &scaloc, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 2;
+ }
+
+ if (scaloc != 1.) {
+ dscal_(&n2, &scaloc, &x[1], &c__1);
+ *scale = scaloc * *scale;
+ }
+ x[j1] = v[0];
+ x[j2] = v[1];
+ x[*n + j1] = v[2];
+ x[*n + j2] = v[3];
+/* Computing MAX */
+ d__5 = (d__1 = x[j1], abs(d__1)) + (d__2 = x[*n + j1],
+ abs(d__2)), d__6 = (d__3 = x[j2], abs(d__3)) + (
+ d__4 = x[*n + j2], abs(d__4)), d__5 = max(d__5,
+ d__6);
+ xmax = max(d__5,xmax);
+
+ }
+
+L80:
+ ;
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of DLAQTR */
+
+} /* dlaqtr_ */
diff --git a/contrib/libs/clapack/dlar1v.c b/contrib/libs/clapack/dlar1v.c
new file mode 100644
index 0000000000..2b3573e8ce
--- /dev/null
+++ b/contrib/libs/clapack/dlar1v.c
@@ -0,0 +1,441 @@
+/* dlar1v.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlar1v_(integer *n, integer *b1, integer *bn, doublereal
+ *lambda, doublereal *d__, doublereal *l, doublereal *ld, doublereal *
+ lld, doublereal *pivmin, doublereal *gaptol, doublereal *z__, logical
+ *wantnc, integer *negcnt, doublereal *ztz, doublereal *mingma,
+ integer *r__, integer *isuppz, doublereal *nrminv, doublereal *resid,
+ doublereal *rqcorr, doublereal *work)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ doublereal s;
+ integer r1, r2;
+ doublereal eps, tmp;
+ integer neg1, neg2, indp, inds;
+ doublereal dplus;
+ extern doublereal dlamch_(char *);
+ extern logical disnan_(doublereal *);
+ integer indlpl, indumn;
+ doublereal dminus;
+ logical sawnan1, sawnan2;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAR1V computes the (scaled) r-th column of the inverse of */
+/* the sumbmatrix in rows B1 through BN of the tridiagonal matrix */
+/* L D L^T - sigma I. When sigma is close to an eigenvalue, the */
+/* computed vector is an accurate eigenvector. Usually, r corresponds */
+/* to the index where the eigenvector is largest in magnitude. */
+/* The following steps accomplish this computation : */
+/* (a) Stationary qd transform, L D L^T - sigma I = L(+) D(+) L(+)^T, */
+/* (b) Progressive qd transform, L D L^T - sigma I = U(-) D(-) U(-)^T, */
+/* (c) Computation of the diagonal elements of the inverse of */
+/* L D L^T - sigma I by combining the above transforms, and choosing */
+/* r as the index where the diagonal of the inverse is (one of the) */
+/* largest in magnitude. */
+/* (d) Computation of the (scaled) r-th column of the inverse using the */
+/* twisted factorization obtained by combining the top part of the */
+/* the stationary and the bottom part of the progressive transform. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix L D L^T. */
+
+/* B1 (input) INTEGER */
+/* First index of the submatrix of L D L^T. */
+
+/* BN (input) INTEGER */
+/* Last index of the submatrix of L D L^T. */
+
+/* LAMBDA (input) DOUBLE PRECISION */
+/* The shift. In order to compute an accurate eigenvector, */
+/* LAMBDA should be a good approximation to an eigenvalue */
+/* of L D L^T. */
+
+/* L (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of the unit bidiagonal matrix */
+/* L, in elements 1 to N-1. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The n diagonal elements of the diagonal matrix D. */
+
+/* LD (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The n-1 elements L(i)*D(i). */
+
+/* LLD (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The n-1 elements L(i)*L(i)*D(i). */
+
+/* PIVMIN (input) DOUBLE PRECISION */
+/* The minimum pivot in the Sturm sequence. */
+
+/* GAPTOL (input) DOUBLE PRECISION */
+/* Tolerance that indicates when eigenvector entries are negligible */
+/* w.r.t. their contribution to the residual. */
+
+/* Z (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On input, all entries of Z must be set to 0. */
+/* On output, Z contains the (scaled) r-th column of the */
+/* inverse. The scaling is such that Z(R) equals 1. */
+
+/* WANTNC (input) LOGICAL */
+/* Specifies whether NEGCNT has to be computed. */
+
+/* NEGCNT (output) INTEGER */
+/* If WANTNC is .TRUE. then NEGCNT = the number of pivots < pivmin */
+/* in the matrix factorization L D L^T, and NEGCNT = -1 otherwise. */
+
+/* ZTZ (output) DOUBLE PRECISION */
+/* The square of the 2-norm of Z. */
+
+/* MINGMA (output) DOUBLE PRECISION */
+/* The reciprocal of the largest (in magnitude) diagonal */
+/* element of the inverse of L D L^T - sigma I. */
+
+/* R (input/output) INTEGER */
+/* The twist index for the twisted factorization used to */
+/* compute Z. */
+/* On input, 0 <= R <= N. If R is input as 0, R is set to */
+/* the index where (L D L^T - sigma I)^{-1} is largest */
+/* in magnitude. If 1 <= R <= N, R is unchanged. */
+/* On output, R contains the twist index used to compute Z. */
+/* Ideally, R designates the position of the maximum entry in the */
+/* eigenvector. */
+
+/* ISUPPZ (output) INTEGER array, dimension (2) */
+/* The support of the vector in Z, i.e., the vector Z is */
+/* nonzero only in elements ISUPPZ(1) through ISUPPZ( 2 ). */
+
+/* NRMINV (output) DOUBLE PRECISION */
+/* NRMINV = 1/SQRT( ZTZ ) */
+
+/* RESID (output) DOUBLE PRECISION */
+/* The residual of the FP vector. */
+/* RESID = ABS( MINGMA )/SQRT( ZTZ ) */
+
+/* RQCORR (output) DOUBLE PRECISION */
+/* The Rayleigh Quotient correction to LAMBDA. */
+/* RQCORR = MINGMA*TMP */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (4*N) */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --work;
+ --isuppz;
+ --z__;
+ --lld;
+ --ld;
+ --l;
+ --d__;
+
+ /* Function Body */
+ eps = dlamch_("Precision");
+ if (*r__ == 0) {
+ r1 = *b1;
+ r2 = *bn;
+ } else {
+ r1 = *r__;
+ r2 = *r__;
+ }
+/* Storage for LPLUS */
+ indlpl = 0;
+/* Storage for UMINUS */
+ indumn = *n;
+ inds = (*n << 1) + 1;
+ indp = *n * 3 + 1;
+ if (*b1 == 1) {
+ work[inds] = 0.;
+ } else {
+ work[inds + *b1 - 1] = lld[*b1 - 1];
+ }
+
+/* Compute the stationary transform (using the differential form) */
+/* until the index R2. */
+
+ sawnan1 = FALSE_;
+ neg1 = 0;
+ s = work[inds + *b1 - 1] - *lambda;
+ i__1 = r1 - 1;
+ for (i__ = *b1; i__ <= i__1; ++i__) {
+ dplus = d__[i__] + s;
+ work[indlpl + i__] = ld[i__] / dplus;
+ if (dplus < 0.) {
+ ++neg1;
+ }
+ work[inds + i__] = s * work[indlpl + i__] * l[i__];
+ s = work[inds + i__] - *lambda;
+/* L50: */
+ }
+ sawnan1 = disnan_(&s);
+ if (sawnan1) {
+ goto L60;
+ }
+ i__1 = r2 - 1;
+ for (i__ = r1; i__ <= i__1; ++i__) {
+ dplus = d__[i__] + s;
+ work[indlpl + i__] = ld[i__] / dplus;
+ work[inds + i__] = s * work[indlpl + i__] * l[i__];
+ s = work[inds + i__] - *lambda;
+/* L51: */
+ }
+ sawnan1 = disnan_(&s);
+
+L60:
+ if (sawnan1) {
+/* Runs a slower version of the above loop if a NaN is detected */
+ neg1 = 0;
+ s = work[inds + *b1 - 1] - *lambda;
+ i__1 = r1 - 1;
+ for (i__ = *b1; i__ <= i__1; ++i__) {
+ dplus = d__[i__] + s;
+ if (abs(dplus) < *pivmin) {
+ dplus = -(*pivmin);
+ }
+ work[indlpl + i__] = ld[i__] / dplus;
+ if (dplus < 0.) {
+ ++neg1;
+ }
+ work[inds + i__] = s * work[indlpl + i__] * l[i__];
+ if (work[indlpl + i__] == 0.) {
+ work[inds + i__] = lld[i__];
+ }
+ s = work[inds + i__] - *lambda;
+/* L70: */
+ }
+ i__1 = r2 - 1;
+ for (i__ = r1; i__ <= i__1; ++i__) {
+ dplus = d__[i__] + s;
+ if (abs(dplus) < *pivmin) {
+ dplus = -(*pivmin);
+ }
+ work[indlpl + i__] = ld[i__] / dplus;
+ work[inds + i__] = s * work[indlpl + i__] * l[i__];
+ if (work[indlpl + i__] == 0.) {
+ work[inds + i__] = lld[i__];
+ }
+ s = work[inds + i__] - *lambda;
+/* L71: */
+ }
+ }
+
+/* Compute the progressive transform (using the differential form) */
+/* until the index R1 */
+
+ sawnan2 = FALSE_;
+ neg2 = 0;
+ work[indp + *bn - 1] = d__[*bn] - *lambda;
+ i__1 = r1;
+ for (i__ = *bn - 1; i__ >= i__1; --i__) {
+ dminus = lld[i__] + work[indp + i__];
+ tmp = d__[i__] / dminus;
+ if (dminus < 0.) {
+ ++neg2;
+ }
+ work[indumn + i__] = l[i__] * tmp;
+ work[indp + i__ - 1] = work[indp + i__] * tmp - *lambda;
+/* L80: */
+ }
+ tmp = work[indp + r1 - 1];
+ sawnan2 = disnan_(&tmp);
+ if (sawnan2) {
+/* Runs a slower version of the above loop if a NaN is detected */
+ neg2 = 0;
+ i__1 = r1;
+ for (i__ = *bn - 1; i__ >= i__1; --i__) {
+ dminus = lld[i__] + work[indp + i__];
+ if (abs(dminus) < *pivmin) {
+ dminus = -(*pivmin);
+ }
+ tmp = d__[i__] / dminus;
+ if (dminus < 0.) {
+ ++neg2;
+ }
+ work[indumn + i__] = l[i__] * tmp;
+ work[indp + i__ - 1] = work[indp + i__] * tmp - *lambda;
+ if (tmp == 0.) {
+ work[indp + i__ - 1] = d__[i__] - *lambda;
+ }
+/* L100: */
+ }
+ }
+
+/* Find the index (from R1 to R2) of the largest (in magnitude) */
+/* diagonal element of the inverse */
+
+ *mingma = work[inds + r1 - 1] + work[indp + r1 - 1];
+ if (*mingma < 0.) {
+ ++neg1;
+ }
+ if (*wantnc) {
+ *negcnt = neg1 + neg2;
+ } else {
+ *negcnt = -1;
+ }
+ if (abs(*mingma) == 0.) {
+ *mingma = eps * work[inds + r1 - 1];
+ }
+ *r__ = r1;
+ i__1 = r2 - 1;
+ for (i__ = r1; i__ <= i__1; ++i__) {
+ tmp = work[inds + i__] + work[indp + i__];
+ if (tmp == 0.) {
+ tmp = eps * work[inds + i__];
+ }
+ if (abs(tmp) <= abs(*mingma)) {
+ *mingma = tmp;
+ *r__ = i__ + 1;
+ }
+/* L110: */
+ }
+
+/* Compute the FP vector: solve N^T v = e_r */
+
+ isuppz[1] = *b1;
+ isuppz[2] = *bn;
+ z__[*r__] = 1.;
+ *ztz = 1.;
+
+/* Compute the FP vector upwards from R */
+
+ if (! sawnan1 && ! sawnan2) {
+ i__1 = *b1;
+ for (i__ = *r__ - 1; i__ >= i__1; --i__) {
+ z__[i__] = -(work[indlpl + i__] * z__[i__ + 1]);
+ if (((d__1 = z__[i__], abs(d__1)) + (d__2 = z__[i__ + 1], abs(
+ d__2))) * (d__3 = ld[i__], abs(d__3)) < *gaptol) {
+ z__[i__] = 0.;
+ isuppz[1] = i__ + 1;
+ goto L220;
+ }
+ *ztz += z__[i__] * z__[i__];
+/* L210: */
+ }
+L220:
+ ;
+ } else {
+/* Run slower loop if NaN occurred. */
+ i__1 = *b1;
+ for (i__ = *r__ - 1; i__ >= i__1; --i__) {
+ if (z__[i__ + 1] == 0.) {
+ z__[i__] = -(ld[i__ + 1] / ld[i__]) * z__[i__ + 2];
+ } else {
+ z__[i__] = -(work[indlpl + i__] * z__[i__ + 1]);
+ }
+ if (((d__1 = z__[i__], abs(d__1)) + (d__2 = z__[i__ + 1], abs(
+ d__2))) * (d__3 = ld[i__], abs(d__3)) < *gaptol) {
+ z__[i__] = 0.;
+ isuppz[1] = i__ + 1;
+ goto L240;
+ }
+ *ztz += z__[i__] * z__[i__];
+/* L230: */
+ }
+L240:
+ ;
+ }
+/* Compute the FP vector downwards from R in blocks of size BLKSIZ */
+ if (! sawnan1 && ! sawnan2) {
+ i__1 = *bn - 1;
+ for (i__ = *r__; i__ <= i__1; ++i__) {
+ z__[i__ + 1] = -(work[indumn + i__] * z__[i__]);
+ if (((d__1 = z__[i__], abs(d__1)) + (d__2 = z__[i__ + 1], abs(
+ d__2))) * (d__3 = ld[i__], abs(d__3)) < *gaptol) {
+ z__[i__ + 1] = 0.;
+ isuppz[2] = i__;
+ goto L260;
+ }
+ *ztz += z__[i__ + 1] * z__[i__ + 1];
+/* L250: */
+ }
+L260:
+ ;
+ } else {
+/* Run slower loop if NaN occurred. */
+ i__1 = *bn - 1;
+ for (i__ = *r__; i__ <= i__1; ++i__) {
+ if (z__[i__] == 0.) {
+ z__[i__ + 1] = -(ld[i__ - 1] / ld[i__]) * z__[i__ - 1];
+ } else {
+ z__[i__ + 1] = -(work[indumn + i__] * z__[i__]);
+ }
+ if (((d__1 = z__[i__], abs(d__1)) + (d__2 = z__[i__ + 1], abs(
+ d__2))) * (d__3 = ld[i__], abs(d__3)) < *gaptol) {
+ z__[i__ + 1] = 0.;
+ isuppz[2] = i__;
+ goto L280;
+ }
+ *ztz += z__[i__ + 1] * z__[i__ + 1];
+/* L270: */
+ }
+L280:
+ ;
+ }
+
+/* Compute quantities for convergence test */
+
+ tmp = 1. / *ztz;
+ *nrminv = sqrt(tmp);
+ *resid = abs(*mingma) * *nrminv;
+ *rqcorr = *mingma * tmp;
+
+
+ return 0;
+
+/* End of DLAR1V */
+
+} /* dlar1v_ */
diff --git a/contrib/libs/clapack/dlar2v.c b/contrib/libs/clapack/dlar2v.c
new file mode 100644
index 0000000000..cd343d0dec
--- /dev/null
+++ b/contrib/libs/clapack/dlar2v.c
@@ -0,0 +1,121 @@
+/* dlar2v.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlar2v_(integer *n, doublereal *x, doublereal *y,
+ doublereal *z__, integer *incx, doublereal *c__, doublereal *s,
+ integer *incc)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ integer i__;
+ doublereal t1, t2, t3, t4, t5, t6;
+ integer ic;
+ doublereal ci, si;
+ integer ix;
+ doublereal xi, yi, zi;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAR2V applies a vector of real plane rotations from both sides to */
+/* a sequence of 2-by-2 real symmetric matrices, defined by the elements */
+/* of the vectors x, y and z. For i = 1,2,...,n */
+
+/* ( x(i) z(i) ) := ( c(i) s(i) ) ( x(i) z(i) ) ( c(i) -s(i) ) */
+/* ( z(i) y(i) ) ( -s(i) c(i) ) ( z(i) y(i) ) ( s(i) c(i) ) */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of plane rotations to be applied. */
+
+/* X (input/output) DOUBLE PRECISION array, */
+/* dimension (1+(N-1)*INCX) */
+/* The vector x. */
+
+/* Y (input/output) DOUBLE PRECISION array, */
+/* dimension (1+(N-1)*INCX) */
+/* The vector y. */
+
+/* Z (input/output) DOUBLE PRECISION array, */
+/* dimension (1+(N-1)*INCX) */
+/* The vector z. */
+
+/* INCX (input) INTEGER */
+/* The increment between elements of X, Y and Z. INCX > 0. */
+
+/* C (input) DOUBLE PRECISION array, dimension (1+(N-1)*INCC) */
+/* The cosines of the plane rotations. */
+
+/* S (input) DOUBLE PRECISION array, dimension (1+(N-1)*INCC) */
+/* The sines of the plane rotations. */
+
+/* INCC (input) INTEGER */
+/* The increment between elements of C and S. INCC > 0. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --s;
+ --c__;
+ --z__;
+ --y;
+ --x;
+
+ /* Function Body */
+ ix = 1;
+ ic = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ xi = x[ix];
+ yi = y[ix];
+ zi = z__[ix];
+ ci = c__[ic];
+ si = s[ic];
+ t1 = si * zi;
+ t2 = ci * zi;
+ t3 = t2 - si * xi;
+ t4 = t2 + si * yi;
+ t5 = ci * xi + t1;
+ t6 = ci * yi - t1;
+ x[ix] = ci * t5 + si * t4;
+ y[ix] = ci * t6 - si * t3;
+ z__[ix] = ci * t4 - si * t5;
+ ix += *incx;
+ ic += *incc;
+/* L10: */
+ }
+
+/* End of DLAR2V */
+
+ return 0;
+} /* dlar2v_ */
diff --git a/contrib/libs/clapack/dlarf.c b/contrib/libs/clapack/dlarf.c
new file mode 100644
index 0000000000..aba8a5929b
--- /dev/null
+++ b/contrib/libs/clapack/dlarf.c
@@ -0,0 +1,193 @@
+/* dlarf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b4 = 1.;
+static doublereal c_b5 = 0.;
+static integer c__1 = 1;
+
+/* Subroutine */ int dlarf_(char *side, integer *m, integer *n, doublereal *v,
+ integer *incv, doublereal *tau, doublereal *c__, integer *ldc,
+ doublereal *work)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__;
+ logical applyleft;
+ extern /* Subroutine */ int dger_(integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+ integer lastc, lastv;
+ extern integer iladlc_(integer *, integer *, doublereal *, integer *),
+ iladlr_(integer *, integer *, doublereal *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLARF applies a real elementary reflector H to a real m by n matrix */
+/* C, from either the left or the right. H is represented in the form */
+
+/* H = I - tau * v * v' */
+
+/* where tau is a real scalar and v is a real vector. */
+
+/* If tau = 0, then H is taken to be the unit matrix. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': form H * C */
+/* = 'R': form C * H */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. */
+
+/* V (input) DOUBLE PRECISION array, dimension */
+/* (1 + (M-1)*abs(INCV)) if SIDE = 'L' */
+/* or (1 + (N-1)*abs(INCV)) if SIDE = 'R' */
+/* The vector v in the representation of H. V is not used if */
+/* TAU = 0. */
+
+/* INCV (input) INTEGER */
+/* The increment between elements of v. INCV <> 0. */
+
+/* TAU (input) DOUBLE PRECISION */
+/* The value tau in the representation of H. */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (LDC,N) */
+/* On entry, the m by n matrix C. */
+/* On exit, C is overwritten by the matrix H * C if SIDE = 'L', */
+/* or C * H if SIDE = 'R'. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension */
+/* (N) if SIDE = 'L' */
+/* or (M) if SIDE = 'R' */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --v;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ applyleft = lsame_(side, "L");
+ lastv = 0;
+ lastc = 0;
+ if (*tau != 0.) {
+/* Set up variables for scanning V. LASTV begins pointing to the end */
+/* of V. */
+ if (applyleft) {
+ lastv = *m;
+ } else {
+ lastv = *n;
+ }
+ if (*incv > 0) {
+ i__ = (lastv - 1) * *incv + 1;
+ } else {
+ i__ = 1;
+ }
+/* Look for the last non-zero row in V. */
+ while(lastv > 0 && v[i__] == 0.) {
+ --lastv;
+ i__ -= *incv;
+ }
+ if (applyleft) {
+/* Scan for the last non-zero column in C(1:lastv,:). */
+ lastc = iladlc_(&lastv, n, &c__[c_offset], ldc);
+ } else {
+/* Scan for the last non-zero row in C(:,1:lastv). */
+ lastc = iladlr_(m, &lastv, &c__[c_offset], ldc);
+ }
+ }
+/* Note that lastc.eq.0 renders the BLAS operations null; no special */
+/* case is needed at this level. */
+ if (applyleft) {
+
+/* Form H * C */
+
+ if (lastv > 0) {
+
+/* w(1:lastc,1) := C(1:lastv,1:lastc)' * v(1:lastv,1) */
+
+ dgemv_("Transpose", &lastv, &lastc, &c_b4, &c__[c_offset], ldc, &
+ v[1], incv, &c_b5, &work[1], &c__1);
+
+/* C(1:lastv,1:lastc) := C(...) - v(1:lastv,1) * w(1:lastc,1)' */
+
+ d__1 = -(*tau);
+ dger_(&lastv, &lastc, &d__1, &v[1], incv, &work[1], &c__1, &c__[
+ c_offset], ldc);
+ }
+ } else {
+
+/* Form C * H */
+
+ if (lastv > 0) {
+
+/* w(1:lastc,1) := C(1:lastc,1:lastv) * v(1:lastv,1) */
+
+ dgemv_("No transpose", &lastc, &lastv, &c_b4, &c__[c_offset], ldc,
+ &v[1], incv, &c_b5, &work[1], &c__1);
+
+/* C(1:lastc,1:lastv) := C(...) - w(1:lastc,1) * v(1:lastv,1)' */
+
+ d__1 = -(*tau);
+ dger_(&lastc, &lastv, &d__1, &work[1], &c__1, &v[1], incv, &c__[
+ c_offset], ldc);
+ }
+ }
+ return 0;
+
+/* End of DLARF */
+
+} /* dlarf_ */
diff --git a/contrib/libs/clapack/dlarfb.c b/contrib/libs/clapack/dlarfb.c
new file mode 100644
index 0000000000..9833b69359
--- /dev/null
+++ b/contrib/libs/clapack/dlarfb.c
@@ -0,0 +1,774 @@
+/* dlarfb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b14 = 1.;
+static doublereal c_b25 = -1.;
+
+/* Subroutine */ int dlarfb_(char *side, char *trans, char *direct, char *
+ storev, integer *m, integer *n, integer *k, doublereal *v, integer *
+ ldv, doublereal *t, integer *ldt, doublereal *c__, integer *ldc,
+ doublereal *work, integer *ldwork)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, t_dim1, t_offset, v_dim1, v_offset, work_dim1,
+ work_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ integer lastc;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dtrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *);
+ integer lastv;
+ extern integer iladlc_(integer *, integer *, doublereal *, integer *),
+ iladlr_(integer *, integer *, doublereal *, integer *);
+ char transt[1];
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLARFB applies a real block reflector H or its transpose H' to a */
+/* real m by n matrix C, from either the left or the right. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply H or H' from the Left */
+/* = 'R': apply H or H' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply H (No transpose) */
+/* = 'T': apply H' (Transpose) */
+
+/* DIRECT (input) CHARACTER*1 */
+/* Indicates how H is formed from a product of elementary */
+/* reflectors */
+/* = 'F': H = H(1) H(2) . . . H(k) (Forward) */
+/* = 'B': H = H(k) . . . H(2) H(1) (Backward) */
+
+/* STOREV (input) CHARACTER*1 */
+/* Indicates how the vectors which define the elementary */
+/* reflectors are stored: */
+/* = 'C': Columnwise */
+/* = 'R': Rowwise */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. */
+
+/* K (input) INTEGER */
+/* The order of the matrix T (= the number of elementary */
+/* reflectors whose product defines the block reflector). */
+
+/* V (input) DOUBLE PRECISION array, dimension */
+/* (LDV,K) if STOREV = 'C' */
+/* (LDV,M) if STOREV = 'R' and SIDE = 'L' */
+/* (LDV,N) if STOREV = 'R' and SIDE = 'R' */
+/* The matrix V. See further details. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. */
+/* If STOREV = 'C' and SIDE = 'L', LDV >= max(1,M); */
+/* if STOREV = 'C' and SIDE = 'R', LDV >= max(1,N); */
+/* if STOREV = 'R', LDV >= K. */
+
+/* T (input) DOUBLE PRECISION array, dimension (LDT,K) */
+/* The triangular k by k matrix T in the representation of the */
+/* block reflector. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= K. */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (LDC,N) */
+/* On entry, the m by n matrix C. */
+/* On exit, C is overwritten by H*C or H'*C or C*H or C*H'. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDA >= max(1,M). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (LDWORK,K) */
+
+/* LDWORK (input) INTEGER */
+/* The leading dimension of the array WORK. */
+/* If SIDE = 'L', LDWORK >= max(1,N); */
+/* if SIDE = 'R', LDWORK >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ work_dim1 = *ldwork;
+ work_offset = 1 + work_dim1;
+ work -= work_offset;
+
+ /* Function Body */
+ if (*m <= 0 || *n <= 0) {
+ return 0;
+ }
+
+ if (lsame_(trans, "N")) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ if (lsame_(storev, "C")) {
+
+ if (lsame_(direct, "F")) {
+
+/* Let V = ( V1 ) (first K rows) */
+/* ( V2 ) */
+/* where V1 is unit lower triangular. */
+
+ if (lsame_(side, "L")) {
+
+/* Form H * C or H' * C where C = ( C1 ) */
+/* ( C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = iladlr_(m, k, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = iladlc_(&lastv, n, &c__[c_offset], ldc);
+
+/* W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK) */
+
+/* W := C1' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(&lastc, &c__[j + c_dim1], ldc, &work[j * work_dim1
+ + 1], &c__1);
+/* L10: */
+ }
+
+/* W := W * V1 */
+
+ dtrmm_("Right", "Lower", "No transpose", "Unit", &lastc, k, &
+ c_b14, &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C2'*V2 */
+
+ i__1 = lastv - *k;
+ dgemm_("Transpose", "No transpose", &lastc, k, &i__1, &
+ c_b14, &c__[*k + 1 + c_dim1], ldc, &v[*k + 1 +
+ v_dim1], ldv, &c_b14, &work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ dtrmm_("Right", "Upper", transt, "Non-unit", &lastc, k, &
+ c_b14, &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V * W' */
+
+ if (lastv > *k) {
+
+/* C2 := C2 - V2 * W' */
+
+ i__1 = lastv - *k;
+ dgemm_("No transpose", "Transpose", &i__1, &lastc, k, &
+ c_b25, &v[*k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork, &c_b14, &c__[*k + 1 +
+ c_dim1], ldc);
+ }
+
+/* W := W * V1' */
+
+ dtrmm_("Right", "Lower", "Transpose", "Unit", &lastc, k, &
+ c_b14, &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[j + i__ * c_dim1] -= work[i__ + j * work_dim1];
+/* L20: */
+ }
+/* L30: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/* Form C * H or C * H' where C = ( C1 C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = iladlr_(n, k, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = iladlr_(m, &lastv, &c__[c_offset], ldc);
+
+/* W := C * V = (C1*V1 + C2*V2) (stored in WORK) */
+
+/* W := C1 */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(&lastc, &c__[j * c_dim1 + 1], &c__1, &work[j *
+ work_dim1 + 1], &c__1);
+/* L40: */
+ }
+
+/* W := W * V1 */
+
+ dtrmm_("Right", "Lower", "No transpose", "Unit", &lastc, k, &
+ c_b14, &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C2 * V2 */
+
+ i__1 = lastv - *k;
+ dgemm_("No transpose", "No transpose", &lastc, k, &i__1, &
+ c_b14, &c__[(*k + 1) * c_dim1 + 1], ldc, &v[*k +
+ 1 + v_dim1], ldv, &c_b14, &work[work_offset],
+ ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ dtrmm_("Right", "Upper", trans, "Non-unit", &lastc, k, &c_b14,
+ &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V' */
+
+ if (lastv > *k) {
+
+/* C2 := C2 - W * V2' */
+
+ i__1 = lastv - *k;
+ dgemm_("No transpose", "Transpose", &lastc, &i__1, k, &
+ c_b25, &work[work_offset], ldwork, &v[*k + 1 +
+ v_dim1], ldv, &c_b14, &c__[(*k + 1) * c_dim1 + 1],
+ ldc);
+ }
+
+/* W := W * V1' */
+
+ dtrmm_("Right", "Lower", "Transpose", "Unit", &lastc, k, &
+ c_b14, &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] -= work[i__ + j * work_dim1];
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+ } else {
+
+/* Let V = ( V1 ) */
+/* ( V2 ) (last K rows) */
+/* where V2 is unit upper triangular. */
+
+ if (lsame_(side, "L")) {
+
+/* Form H * C or H' * C where C = ( C1 ) */
+/* ( C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = iladlr_(m, k, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = iladlc_(&lastv, n, &c__[c_offset], ldc);
+
+/* W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK) */
+
+/* W := C2' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(&lastc, &c__[lastv - *k + j + c_dim1], ldc, &work[
+ j * work_dim1 + 1], &c__1);
+/* L70: */
+ }
+
+/* W := W * V2 */
+
+ dtrmm_("Right", "Upper", "No transpose", "Unit", &lastc, k, &
+ c_b14, &v[lastv - *k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C1'*V1 */
+
+ i__1 = lastv - *k;
+ dgemm_("Transpose", "No transpose", &lastc, k, &i__1, &
+ c_b14, &c__[c_offset], ldc, &v[v_offset], ldv, &
+ c_b14, &work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ dtrmm_("Right", "Lower", transt, "Non-unit", &lastc, k, &
+ c_b14, &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V * W' */
+
+ if (lastv > *k) {
+
+/* C1 := C1 - V1 * W' */
+
+ i__1 = lastv - *k;
+ dgemm_("No transpose", "Transpose", &i__1, &lastc, k, &
+ c_b25, &v[v_offset], ldv, &work[work_offset],
+ ldwork, &c_b14, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2' */
+
+ dtrmm_("Right", "Upper", "Transpose", "Unit", &lastc, k, &
+ c_b14, &v[lastv - *k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork);
+
+/* C2 := C2 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[lastv - *k + j + i__ * c_dim1] -= work[i__ + j *
+ work_dim1];
+/* L80: */
+ }
+/* L90: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/* Form C * H or C * H' where C = ( C1 C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = iladlr_(n, k, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = iladlr_(m, &lastv, &c__[c_offset], ldc);
+
+/* W := C * V = (C1*V1 + C2*V2) (stored in WORK) */
+
+/* W := C2 */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(&lastc, &c__[(*n - *k + j) * c_dim1 + 1], &c__1, &
+ work[j * work_dim1 + 1], &c__1);
+/* L100: */
+ }
+
+/* W := W * V2 */
+
+ dtrmm_("Right", "Upper", "No transpose", "Unit", &lastc, k, &
+ c_b14, &v[lastv - *k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C1 * V1 */
+
+ i__1 = lastv - *k;
+ dgemm_("No transpose", "No transpose", &lastc, k, &i__1, &
+ c_b14, &c__[c_offset], ldc, &v[v_offset], ldv, &
+ c_b14, &work[work_offset], ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ dtrmm_("Right", "Lower", trans, "Non-unit", &lastc, k, &c_b14,
+ &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V' */
+
+ if (lastv > *k) {
+
+/* C1 := C1 - W * V1' */
+
+ i__1 = lastv - *k;
+ dgemm_("No transpose", "Transpose", &lastc, &i__1, k, &
+ c_b25, &work[work_offset], ldwork, &v[v_offset],
+ ldv, &c_b14, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2' */
+
+ dtrmm_("Right", "Upper", "Transpose", "Unit", &lastc, k, &
+ c_b14, &v[lastv - *k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork);
+
+/* C2 := C2 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + (lastv - *k + j) * c_dim1] -= work[i__ + j *
+ work_dim1];
+/* L110: */
+ }
+/* L120: */
+ }
+ }
+ }
+
+ } else if (lsame_(storev, "R")) {
+
+ if (lsame_(direct, "F")) {
+
+/* Let V = ( V1 V2 ) (V1: first K columns) */
+/* where V1 is unit upper triangular. */
+
+ if (lsame_(side, "L")) {
+
+/* Form H * C or H' * C where C = ( C1 ) */
+/* ( C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = iladlc_(k, m, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = iladlc_(&lastv, n, &c__[c_offset], ldc);
+
+/* W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK) */
+
+/* W := C1' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(&lastc, &c__[j + c_dim1], ldc, &work[j * work_dim1
+ + 1], &c__1);
+/* L130: */
+ }
+
+/* W := W * V1' */
+
+ dtrmm_("Right", "Upper", "Transpose", "Unit", &lastc, k, &
+ c_b14, &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C2'*V2' */
+
+ i__1 = lastv - *k;
+ dgemm_("Transpose", "Transpose", &lastc, k, &i__1, &c_b14,
+ &c__[*k + 1 + c_dim1], ldc, &v[(*k + 1) * v_dim1
+ + 1], ldv, &c_b14, &work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ dtrmm_("Right", "Upper", transt, "Non-unit", &lastc, k, &
+ c_b14, &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V' * W' */
+
+ if (lastv > *k) {
+
+/* C2 := C2 - V2' * W' */
+
+ i__1 = lastv - *k;
+ dgemm_("Transpose", "Transpose", &i__1, &lastc, k, &c_b25,
+ &v[(*k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork, &c_b14, &c__[*k + 1 +
+ c_dim1], ldc);
+ }
+
+/* W := W * V1 */
+
+ dtrmm_("Right", "Upper", "No transpose", "Unit", &lastc, k, &
+ c_b14, &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[j + i__ * c_dim1] -= work[i__ + j * work_dim1];
+/* L140: */
+ }
+/* L150: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/* Form C * H or C * H' where C = ( C1 C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = iladlc_(k, n, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = iladlr_(m, &lastv, &c__[c_offset], ldc);
+
+/* W := C * V' = (C1*V1' + C2*V2') (stored in WORK) */
+
+/* W := C1 */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(&lastc, &c__[j * c_dim1 + 1], &c__1, &work[j *
+ work_dim1 + 1], &c__1);
+/* L160: */
+ }
+
+/* W := W * V1' */
+
+ dtrmm_("Right", "Upper", "Transpose", "Unit", &lastc, k, &
+ c_b14, &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C2 * V2' */
+
+ i__1 = lastv - *k;
+ dgemm_("No transpose", "Transpose", &lastc, k, &i__1, &
+ c_b14, &c__[(*k + 1) * c_dim1 + 1], ldc, &v[(*k +
+ 1) * v_dim1 + 1], ldv, &c_b14, &work[work_offset],
+ ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ dtrmm_("Right", "Upper", trans, "Non-unit", &lastc, k, &c_b14,
+ &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V */
+
+ if (lastv > *k) {
+
+/* C2 := C2 - W * V2 */
+
+ i__1 = lastv - *k;
+ dgemm_("No transpose", "No transpose", &lastc, &i__1, k, &
+ c_b25, &work[work_offset], ldwork, &v[(*k + 1) *
+ v_dim1 + 1], ldv, &c_b14, &c__[(*k + 1) * c_dim1
+ + 1], ldc);
+ }
+
+/* W := W * V1 */
+
+ dtrmm_("Right", "Upper", "No transpose", "Unit", &lastc, k, &
+ c_b14, &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] -= work[i__ + j * work_dim1];
+/* L170: */
+ }
+/* L180: */
+ }
+
+ }
+
+ } else {
+
+/* Let V = ( V1 V2 ) (V2: last K columns) */
+/* where V2 is unit lower triangular. */
+
+ if (lsame_(side, "L")) {
+
+/* Form H * C or H' * C where C = ( C1 ) */
+/* ( C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = iladlc_(k, m, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = iladlc_(&lastv, n, &c__[c_offset], ldc);
+
+/* W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK) */
+
+/* W := C2' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(&lastc, &c__[lastv - *k + j + c_dim1], ldc, &work[
+ j * work_dim1 + 1], &c__1);
+/* L190: */
+ }
+
+/* W := W * V2' */
+
+ dtrmm_("Right", "Lower", "Transpose", "Unit", &lastc, k, &
+ c_b14, &v[(lastv - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C1'*V1' */
+
+ i__1 = lastv - *k;
+ dgemm_("Transpose", "Transpose", &lastc, k, &i__1, &c_b14,
+ &c__[c_offset], ldc, &v[v_offset], ldv, &c_b14, &
+ work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ dtrmm_("Right", "Lower", transt, "Non-unit", &lastc, k, &
+ c_b14, &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V' * W' */
+
+ if (lastv > *k) {
+
+/* C1 := C1 - V1' * W' */
+
+ i__1 = lastv - *k;
+ dgemm_("Transpose", "Transpose", &i__1, &lastc, k, &c_b25,
+ &v[v_offset], ldv, &work[work_offset], ldwork, &
+ c_b14, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2 */
+
+ dtrmm_("Right", "Lower", "No transpose", "Unit", &lastc, k, &
+ c_b14, &v[(lastv - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+
+/* C2 := C2 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[lastv - *k + j + i__ * c_dim1] -= work[i__ + j *
+ work_dim1];
+/* L200: */
+ }
+/* L210: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/* Form C * H or C * H' where C = ( C1 C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = iladlc_(k, n, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = iladlr_(m, &lastv, &c__[c_offset], ldc);
+
+/* W := C * V' = (C1*V1' + C2*V2') (stored in WORK) */
+
+/* W := C2 */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(&lastc, &c__[(lastv - *k + j) * c_dim1 + 1], &c__1,
+ &work[j * work_dim1 + 1], &c__1);
+/* L220: */
+ }
+
+/* W := W * V2' */
+
+ dtrmm_("Right", "Lower", "Transpose", "Unit", &lastc, k, &
+ c_b14, &v[(lastv - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C1 * V1' */
+
+ i__1 = lastv - *k;
+ dgemm_("No transpose", "Transpose", &lastc, k, &i__1, &
+ c_b14, &c__[c_offset], ldc, &v[v_offset], ldv, &
+ c_b14, &work[work_offset], ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ dtrmm_("Right", "Lower", trans, "Non-unit", &lastc, k, &c_b14,
+ &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V */
+
+ if (lastv > *k) {
+
+/* C1 := C1 - W * V1 */
+
+ i__1 = lastv - *k;
+ dgemm_("No transpose", "No transpose", &lastc, &i__1, k, &
+ c_b25, &work[work_offset], ldwork, &v[v_offset],
+ ldv, &c_b14, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2 */
+
+ dtrmm_("Right", "Lower", "No transpose", "Unit", &lastc, k, &
+ c_b14, &v[(lastv - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + (lastv - *k + j) * c_dim1] -= work[i__ + j *
+ work_dim1];
+/* L230: */
+ }
+/* L240: */
+ }
+
+ }
+
+ }
+ }
+
+ return 0;
+
+/* End of DLARFB */
+
+} /* dlarfb_ */
diff --git a/contrib/libs/clapack/dlarfg.c b/contrib/libs/clapack/dlarfg.c
new file mode 100644
index 0000000000..2a052caa33
--- /dev/null
+++ b/contrib/libs/clapack/dlarfg.c
@@ -0,0 +1,170 @@
+/* dlarfg.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlarfg_(integer *n, doublereal *alpha, doublereal *x,
+ integer *incx, doublereal *tau)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ integer j, knt;
+ doublereal beta;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal xnorm;
+ extern doublereal dlapy2_(doublereal *, doublereal *), dlamch_(char *);
+ doublereal safmin, rsafmn;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLARFG generates a real elementary reflector H of order n, such */
+/* that */
+
+/* H * ( alpha ) = ( beta ), H' * H = I. */
+/* ( x ) ( 0 ) */
+
+/* where alpha and beta are scalars, and x is an (n-1)-element real */
+/* vector. H is represented in the form */
+
+/* H = I - tau * ( 1 ) * ( 1 v' ) , */
+/* ( v ) */
+
+/* where tau is a real scalar and v is a real (n-1)-element */
+/* vector. */
+
+/* If the elements of x are all zero, then tau = 0 and H is taken to be */
+/* the unit matrix. */
+
+/* Otherwise 1 <= tau <= 2. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the elementary reflector. */
+
+/* ALPHA (input/output) DOUBLE PRECISION */
+/* On entry, the value alpha. */
+/* On exit, it is overwritten with the value beta. */
+
+/* X (input/output) DOUBLE PRECISION array, dimension */
+/* (1+(N-2)*abs(INCX)) */
+/* On entry, the vector x. */
+/* On exit, it is overwritten with the vector v. */
+
+/* INCX (input) INTEGER */
+/* The increment between elements of X. INCX > 0. */
+
+/* TAU (output) DOUBLE PRECISION */
+/* The value tau. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n <= 1) {
+ *tau = 0.;
+ return 0;
+ }
+
+ i__1 = *n - 1;
+ xnorm = dnrm2_(&i__1, &x[1], incx);
+
+ if (xnorm == 0.) {
+
+/* H = I */
+
+ *tau = 0.;
+ } else {
+
+/* general case */
+
+ d__1 = dlapy2_(alpha, &xnorm);
+ beta = -d_sign(&d__1, alpha);
+ safmin = dlamch_("S") / dlamch_("E");
+ knt = 0;
+ if (abs(beta) < safmin) {
+
+/* XNORM, BETA may be inaccurate; scale X and recompute them */
+
+ rsafmn = 1. / safmin;
+L10:
+ ++knt;
+ i__1 = *n - 1;
+ dscal_(&i__1, &rsafmn, &x[1], incx);
+ beta *= rsafmn;
+ *alpha *= rsafmn;
+ if (abs(beta) < safmin) {
+ goto L10;
+ }
+
+/* New BETA is at most 1, at least SAFMIN */
+
+ i__1 = *n - 1;
+ xnorm = dnrm2_(&i__1, &x[1], incx);
+ d__1 = dlapy2_(alpha, &xnorm);
+ beta = -d_sign(&d__1, alpha);
+ }
+ *tau = (beta - *alpha) / beta;
+ i__1 = *n - 1;
+ d__1 = 1. / (*alpha - beta);
+ dscal_(&i__1, &d__1, &x[1], incx);
+
+/* If ALPHA is subnormal, it may lose relative accuracy */
+
+ i__1 = knt;
+ for (j = 1; j <= i__1; ++j) {
+ beta *= safmin;
+/* L20: */
+ }
+ *alpha = beta;
+ }
+
+ return 0;
+
+/* End of DLARFG */
+
+} /* dlarfg_ */
diff --git a/contrib/libs/clapack/dlarfp.c b/contrib/libs/clapack/dlarfp.c
new file mode 100644
index 0000000000..234ee7ab1c
--- /dev/null
+++ b/contrib/libs/clapack/dlarfp.c
@@ -0,0 +1,192 @@
+/* dlarfp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlarfp_(integer *n, doublereal *alpha, doublereal *x,
+ integer *incx, doublereal *tau)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ integer j, knt;
+ doublereal beta;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal xnorm;
+ extern doublereal dlapy2_(doublereal *, doublereal *), dlamch_(char *);
+ doublereal safmin, rsafmn;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLARFP generates a real elementary reflector H of order n, such */
+/* that */
+
+/* H * ( alpha ) = ( beta ), H' * H = I. */
+/* ( x ) ( 0 ) */
+
+/* where alpha and beta are scalars, beta is non-negative, and x is */
+/* an (n-1)-element real vector. H is represented in the form */
+
+/* H = I - tau * ( 1 ) * ( 1 v' ) , */
+/* ( v ) */
+
+/* where tau is a real scalar and v is a real (n-1)-element */
+/* vector. */
+
+/* If the elements of x are all zero, then tau = 0 and H is taken to be */
+/* the unit matrix. */
+
+/* Otherwise 1 <= tau <= 2. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the elementary reflector. */
+
+/* ALPHA (input/output) DOUBLE PRECISION */
+/* On entry, the value alpha. */
+/* On exit, it is overwritten with the value beta. */
+
+/* X (input/output) DOUBLE PRECISION array, dimension */
+/* (1+(N-2)*abs(INCX)) */
+/* On entry, the vector x. */
+/* On exit, it is overwritten with the vector v. */
+
+/* INCX (input) INTEGER */
+/* The increment between elements of X. INCX > 0. */
+
+/* TAU (output) DOUBLE PRECISION */
+/* The value tau. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n <= 0) {
+ *tau = 0.;
+ return 0;
+ }
+
+ i__1 = *n - 1;
+ xnorm = dnrm2_(&i__1, &x[1], incx);
+
+ if (xnorm == 0.) {
+
+/* H = [+/-1, 0; I], sign chosen so ALPHA >= 0 */
+
+ if (*alpha >= 0.) {
+/* When TAU.eq.ZERO, the vector is special-cased to be */
+/* all zeros in the application routines. We do not need */
+/* to clear it. */
+ *tau = 0.;
+ } else {
+/* However, the application routines rely on explicit */
+/* zero checks when TAU.ne.ZERO, and we must clear X. */
+ *tau = 2.;
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ x[(j - 1) * *incx + 1] = 0.;
+ }
+ *alpha = -(*alpha);
+ }
+ } else {
+
+/* general case */
+
+ d__1 = dlapy2_(alpha, &xnorm);
+ beta = d_sign(&d__1, alpha);
+ safmin = dlamch_("S") / dlamch_("E");
+ knt = 0;
+ if (abs(beta) < safmin) {
+
+/* XNORM, BETA may be inaccurate; scale X and recompute them */
+
+ rsafmn = 1. / safmin;
+L10:
+ ++knt;
+ i__1 = *n - 1;
+ dscal_(&i__1, &rsafmn, &x[1], incx);
+ beta *= rsafmn;
+ *alpha *= rsafmn;
+ if (abs(beta) < safmin) {
+ goto L10;
+ }
+
+/* New BETA is at most 1, at least SAFMIN */
+
+ i__1 = *n - 1;
+ xnorm = dnrm2_(&i__1, &x[1], incx);
+ d__1 = dlapy2_(alpha, &xnorm);
+ beta = d_sign(&d__1, alpha);
+ }
+ *alpha += beta;
+ if (beta < 0.) {
+ beta = -beta;
+ *tau = -(*alpha) / beta;
+ } else {
+ *alpha = xnorm * (xnorm / *alpha);
+ *tau = *alpha / beta;
+ *alpha = -(*alpha);
+ }
+ i__1 = *n - 1;
+ d__1 = 1. / *alpha;
+ dscal_(&i__1, &d__1, &x[1], incx);
+
+/* If BETA is subnormal, it may lose relative accuracy */
+
+ i__1 = knt;
+ for (j = 1; j <= i__1; ++j) {
+ beta *= safmin;
+/* L20: */
+ }
+ *alpha = beta;
+ }
+
+ return 0;
+
+/* End of DLARFP */
+
+} /* dlarfp_ */
diff --git a/contrib/libs/clapack/dlarft.c b/contrib/libs/clapack/dlarft.c
new file mode 100644
index 0000000000..0d4951c521
--- /dev/null
+++ b/contrib/libs/clapack/dlarft.c
@@ -0,0 +1,325 @@
+/* dlarft.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b8 = 0.;
+
+/* Subroutine */ int dlarft_(char *direct, char *storev, integer *n, integer *
+ k, doublereal *v, integer *ldv, doublereal *tau, doublereal *t,
+ integer *ldt)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, v_dim1, v_offset, i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__, j, prevlastv;
+ doublereal vii;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+ integer lastv;
+ extern /* Subroutine */ int dtrmv_(char *, char *, char *, integer *,
+ doublereal *, integer *, doublereal *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLARFT forms the triangular factor T of a real block reflector H */
+/* of order n, which is defined as a product of k elementary reflectors. */
+
+/* If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular; */
+
+/* If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular. */
+
+/* If STOREV = 'C', the vector which defines the elementary reflector */
+/* H(i) is stored in the i-th column of the array V, and */
+
+/* H = I - V * T * V' */
+
+/* If STOREV = 'R', the vector which defines the elementary reflector */
+/* H(i) is stored in the i-th row of the array V, and */
+
+/* H = I - V' * T * V */
+
+/* Arguments */
+/* ========= */
+
+/* DIRECT (input) CHARACTER*1 */
+/* Specifies the order in which the elementary reflectors are */
+/* multiplied to form the block reflector: */
+/* = 'F': H = H(1) H(2) . . . H(k) (Forward) */
+/* = 'B': H = H(k) . . . H(2) H(1) (Backward) */
+
+/* STOREV (input) CHARACTER*1 */
+/* Specifies how the vectors which define the elementary */
+/* reflectors are stored (see also Further Details): */
+/* = 'C': columnwise */
+/* = 'R': rowwise */
+
+/* N (input) INTEGER */
+/* The order of the block reflector H. N >= 0. */
+
+/* K (input) INTEGER */
+/* The order of the triangular factor T (= the number of */
+/* elementary reflectors). K >= 1. */
+
+/* V (input/output) DOUBLE PRECISION array, dimension */
+/* (LDV,K) if STOREV = 'C' */
+/* (LDV,N) if STOREV = 'R' */
+/* The matrix V. See further details. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. */
+/* If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K. */
+
+/* TAU (input) DOUBLE PRECISION array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i). */
+
+/* T (output) DOUBLE PRECISION array, dimension (LDT,K) */
+/* The k by k triangular factor T of the block reflector. */
+/* If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is */
+/* lower triangular. The rest of the array is not used. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= K. */
+
+/* Further Details */
+/* =============== */
+
+/* The shape of the matrix V and the storage of the vectors which define */
+/* the H(i) is best illustrated by the following example with n = 5 and */
+/* k = 3. The elements equal to 1 are not stored; the corresponding */
+/* array elements are modified but restored on exit. The rest of the */
+/* array is not used. */
+
+/* DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R': */
+
+/* V = ( 1 ) V = ( 1 v1 v1 v1 v1 ) */
+/* ( v1 1 ) ( 1 v2 v2 v2 ) */
+/* ( v1 v2 1 ) ( 1 v3 v3 ) */
+/* ( v1 v2 v3 ) */
+/* ( v1 v2 v3 ) */
+
+/* DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R': */
+
+/* V = ( v1 v2 v3 ) V = ( v1 v1 1 ) */
+/* ( v1 v2 v3 ) ( v2 v2 v2 1 ) */
+/* ( 1 v2 v3 ) ( v3 v3 v3 v3 1 ) */
+/* ( 1 v3 ) */
+/* ( 1 ) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ --tau;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+
+ /* Function Body */
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (lsame_(direct, "F")) {
+ prevlastv = *n;
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ prevlastv = max(i__,prevlastv);
+ if (tau[i__] == 0.) {
+
+/* H(i) = I */
+
+ i__2 = i__;
+ for (j = 1; j <= i__2; ++j) {
+ t[j + i__ * t_dim1] = 0.;
+/* L10: */
+ }
+ } else {
+
+/* general case */
+
+ vii = v[i__ + i__ * v_dim1];
+ v[i__ + i__ * v_dim1] = 1.;
+ if (lsame_(storev, "C")) {
+/* Skip any trailing zeros. */
+ i__2 = i__ + 1;
+ for (lastv = *n; lastv >= i__2; --lastv) {
+ if (v[lastv + i__ * v_dim1] != 0.) {
+ break;
+ }
+ }
+ j = min(lastv,prevlastv);
+
+/* T(1:i-1,i) := - tau(i) * V(i:j,1:i-1)' * V(i:j,i) */
+
+ i__2 = j - i__ + 1;
+ i__3 = i__ - 1;
+ d__1 = -tau[i__];
+ dgemv_("Transpose", &i__2, &i__3, &d__1, &v[i__ + v_dim1],
+ ldv, &v[i__ + i__ * v_dim1], &c__1, &c_b8, &t[
+ i__ * t_dim1 + 1], &c__1);
+ } else {
+/* Skip any trailing zeros. */
+ i__2 = i__ + 1;
+ for (lastv = *n; lastv >= i__2; --lastv) {
+ if (v[i__ + lastv * v_dim1] != 0.) {
+ break;
+ }
+ }
+ j = min(lastv,prevlastv);
+
+/* T(1:i-1,i) := - tau(i) * V(1:i-1,i:j) * V(i,i:j)' */
+
+ i__2 = i__ - 1;
+ i__3 = j - i__ + 1;
+ d__1 = -tau[i__];
+ dgemv_("No transpose", &i__2, &i__3, &d__1, &v[i__ *
+ v_dim1 + 1], ldv, &v[i__ + i__ * v_dim1], ldv, &
+ c_b8, &t[i__ * t_dim1 + 1], &c__1);
+ }
+ v[i__ + i__ * v_dim1] = vii;
+
+/* T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) */
+
+ i__2 = i__ - 1;
+ dtrmv_("Upper", "No transpose", "Non-unit", &i__2, &t[
+ t_offset], ldt, &t[i__ * t_dim1 + 1], &c__1);
+ t[i__ + i__ * t_dim1] = tau[i__];
+ if (i__ > 1) {
+ prevlastv = max(prevlastv,lastv);
+ } else {
+ prevlastv = lastv;
+ }
+ }
+/* L20: */
+ }
+ } else {
+ prevlastv = 1;
+ for (i__ = *k; i__ >= 1; --i__) {
+ if (tau[i__] == 0.) {
+
+/* H(i) = I */
+
+ i__1 = *k;
+ for (j = i__; j <= i__1; ++j) {
+ t[j + i__ * t_dim1] = 0.;
+/* L30: */
+ }
+ } else {
+
+/* general case */
+
+ if (i__ < *k) {
+ if (lsame_(storev, "C")) {
+ vii = v[*n - *k + i__ + i__ * v_dim1];
+ v[*n - *k + i__ + i__ * v_dim1] = 1.;
+/* Skip any leading zeros. */
+ i__1 = i__ - 1;
+ for (lastv = 1; lastv <= i__1; ++lastv) {
+ if (v[lastv + i__ * v_dim1] != 0.) {
+ break;
+ }
+ }
+ j = max(lastv,prevlastv);
+
+/* T(i+1:k,i) := */
+/* - tau(i) * V(j:n-k+i,i+1:k)' * V(j:n-k+i,i) */
+
+ i__1 = *n - *k + i__ - j + 1;
+ i__2 = *k - i__;
+ d__1 = -tau[i__];
+ dgemv_("Transpose", &i__1, &i__2, &d__1, &v[j + (i__
+ + 1) * v_dim1], ldv, &v[j + i__ * v_dim1], &
+ c__1, &c_b8, &t[i__ + 1 + i__ * t_dim1], &
+ c__1);
+ v[*n - *k + i__ + i__ * v_dim1] = vii;
+ } else {
+ vii = v[i__ + (*n - *k + i__) * v_dim1];
+ v[i__ + (*n - *k + i__) * v_dim1] = 1.;
+/* Skip any leading zeros. */
+ i__1 = i__ - 1;
+ for (lastv = 1; lastv <= i__1; ++lastv) {
+ if (v[i__ + lastv * v_dim1] != 0.) {
+ break;
+ }
+ }
+ j = max(lastv,prevlastv);
+
+/* T(i+1:k,i) := */
+/* - tau(i) * V(i+1:k,j:n-k+i) * V(i,j:n-k+i)' */
+
+ i__1 = *k - i__;
+ i__2 = *n - *k + i__ - j + 1;
+ d__1 = -tau[i__];
+ dgemv_("No transpose", &i__1, &i__2, &d__1, &v[i__ +
+ 1 + j * v_dim1], ldv, &v[i__ + j * v_dim1],
+ ldv, &c_b8, &t[i__ + 1 + i__ * t_dim1], &c__1);
+ v[i__ + (*n - *k + i__) * v_dim1] = vii;
+ }
+
+/* T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) */
+
+ i__1 = *k - i__;
+ dtrmv_("Lower", "No transpose", "Non-unit", &i__1, &t[i__
+ + 1 + (i__ + 1) * t_dim1], ldt, &t[i__ + 1 + i__ *
+ t_dim1], &c__1)
+ ;
+ if (i__ > 1) {
+ prevlastv = min(prevlastv,lastv);
+ } else {
+ prevlastv = lastv;
+ }
+ }
+ t[i__ + i__ * t_dim1] = tau[i__];
+ }
+/* L40: */
+ }
+ }
+ return 0;
+
+/* End of DLARFT */
+
+} /* dlarft_ */
diff --git a/contrib/libs/clapack/dlarfx.c b/contrib/libs/clapack/dlarfx.c
new file mode 100644
index 0000000000..37a20232fa
--- /dev/null
+++ b/contrib/libs/clapack/dlarfx.c
@@ -0,0 +1,730 @@
+/* dlarfx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dlarfx_(char *side, integer *m, integer *n, doublereal *
+ v, doublereal *tau, doublereal *c__, integer *ldc, doublereal *work)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, i__1;
+
+ /* Local variables */
+ integer j;
+ doublereal t1, t2, t3, t4, t5, t6, t7, t8, t9, v1, v2, v3, v4, v5, v6, v7,
+ v8, v9, t10, v10, sum;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *);
+ extern logical lsame_(char *, char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLARFX applies a real elementary reflector H to a real m by n */
+/* matrix C, from either the left or the right. H is represented in the */
+/* form */
+
+/* H = I - tau * v * v' */
+
+/* where tau is a real scalar and v is a real vector. */
+
+/* If tau = 0, then H is taken to be the unit matrix */
+
+/* This version uses inline code if H has order < 11. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': form H * C */
+/* = 'R': form C * H */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. */
+
+/* V (input) DOUBLE PRECISION array, dimension (M) if SIDE = 'L' */
+/* or (N) if SIDE = 'R' */
+/* The vector v in the representation of H. */
+
+/* TAU (input) DOUBLE PRECISION */
+/* The value tau in the representation of H. */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (LDC,N) */
+/* On entry, the m by n matrix C. */
+/* On exit, C is overwritten by the matrix H * C if SIDE = 'L', */
+/* or C * H if SIDE = 'R'. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDA >= (1,M). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension */
+/* (N) if SIDE = 'L' */
+/* or (M) if SIDE = 'R' */
+/* WORK is not referenced if H has order < 11. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --v;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ if (*tau == 0.) {
+ return 0;
+ }
+ if (lsame_(side, "L")) {
+
+/* Form H * C, where H has order m. */
+
+ switch (*m) {
+ case 1: goto L10;
+ case 2: goto L30;
+ case 3: goto L50;
+ case 4: goto L70;
+ case 5: goto L90;
+ case 6: goto L110;
+ case 7: goto L130;
+ case 8: goto L150;
+ case 9: goto L170;
+ case 10: goto L190;
+ }
+
+/* Code for general M */
+
+ dlarf_(side, m, n, &v[1], &c__1, tau, &c__[c_offset], ldc, &work[1]);
+ goto L410;
+L10:
+
+/* Special code for 1 x 1 Householder */
+
+ t1 = 1. - *tau * v[1] * v[1];
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j * c_dim1 + 1] = t1 * c__[j * c_dim1 + 1];
+/* L20: */
+ }
+ goto L410;
+L30:
+
+/* Special code for 2 x 2 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+/* L40: */
+ }
+ goto L410;
+L50:
+
+/* Special code for 3 x 3 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+/* L60: */
+ }
+ goto L410;
+L70:
+
+/* Special code for 4 x 4 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+/* L80: */
+ }
+ goto L410;
+L90:
+
+/* Special code for 5 x 5 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+/* L100: */
+ }
+ goto L410;
+L110:
+
+/* Special code for 6 x 6 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+ c__[j * c_dim1 + 6] -= sum * t6;
+/* L120: */
+ }
+ goto L410;
+L130:
+
+/* Special code for 7 x 7 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j *
+ c_dim1 + 7];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+ c__[j * c_dim1 + 6] -= sum * t6;
+ c__[j * c_dim1 + 7] -= sum * t7;
+/* L140: */
+ }
+ goto L410;
+L150:
+
+/* Special code for 8 x 8 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j *
+ c_dim1 + 7] + v8 * c__[j * c_dim1 + 8];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+ c__[j * c_dim1 + 6] -= sum * t6;
+ c__[j * c_dim1 + 7] -= sum * t7;
+ c__[j * c_dim1 + 8] -= sum * t8;
+/* L160: */
+ }
+ goto L410;
+L170:
+
+/* Special code for 9 x 9 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ v9 = v[9];
+ t9 = *tau * v9;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j *
+ c_dim1 + 7] + v8 * c__[j * c_dim1 + 8] + v9 * c__[j *
+ c_dim1 + 9];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+ c__[j * c_dim1 + 6] -= sum * t6;
+ c__[j * c_dim1 + 7] -= sum * t7;
+ c__[j * c_dim1 + 8] -= sum * t8;
+ c__[j * c_dim1 + 9] -= sum * t9;
+/* L180: */
+ }
+ goto L410;
+L190:
+
+/* Special code for 10 x 10 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ v9 = v[9];
+ t9 = *tau * v9;
+ v10 = v[10];
+ t10 = *tau * v10;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j *
+ c_dim1 + 7] + v8 * c__[j * c_dim1 + 8] + v9 * c__[j *
+ c_dim1 + 9] + v10 * c__[j * c_dim1 + 10];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+ c__[j * c_dim1 + 6] -= sum * t6;
+ c__[j * c_dim1 + 7] -= sum * t7;
+ c__[j * c_dim1 + 8] -= sum * t8;
+ c__[j * c_dim1 + 9] -= sum * t9;
+ c__[j * c_dim1 + 10] -= sum * t10;
+/* L200: */
+ }
+ goto L410;
+ } else {
+
+/* Form C * H, where H has order n. */
+
+ switch (*n) {
+ case 1: goto L210;
+ case 2: goto L230;
+ case 3: goto L250;
+ case 4: goto L270;
+ case 5: goto L290;
+ case 6: goto L310;
+ case 7: goto L330;
+ case 8: goto L350;
+ case 9: goto L370;
+ case 10: goto L390;
+ }
+
+/* Code for general N */
+
+ dlarf_(side, m, n, &v[1], &c__1, tau, &c__[c_offset], ldc, &work[1]);
+ goto L410;
+L210:
+
+/* Special code for 1 x 1 Householder */
+
+ t1 = 1. - *tau * v[1] * v[1];
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j + c_dim1] = t1 * c__[j + c_dim1];
+/* L220: */
+ }
+ goto L410;
+L230:
+
+/* Special code for 2 x 2 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + (c_dim1 << 1)] -= sum * t2;
+/* L240: */
+ }
+ goto L410;
+L250:
+
+/* Special code for 3 x 3 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 *
+ c__[j + c_dim1 * 3];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + (c_dim1 << 1)] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+/* L260: */
+ }
+ goto L410;
+L270:
+
+/* Special code for 4 x 4 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 *
+ c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + (c_dim1 << 1)] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + (c_dim1 << 2)] -= sum * t4;
+/* L280: */
+ }
+ goto L410;
+L290:
+
+/* Special code for 5 x 5 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 *
+ c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 *
+ c__[j + c_dim1 * 5];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + (c_dim1 << 1)] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + (c_dim1 << 2)] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+/* L300: */
+ }
+ goto L410;
+L310:
+
+/* Special code for 6 x 6 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 *
+ c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 *
+ c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + (c_dim1 << 1)] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + (c_dim1 << 2)] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+ c__[j + c_dim1 * 6] -= sum * t6;
+/* L320: */
+ }
+ goto L410;
+L330:
+
+/* Special code for 7 x 7 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 *
+ c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 *
+ c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * c__[
+ j + c_dim1 * 7];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + (c_dim1 << 1)] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + (c_dim1 << 2)] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+ c__[j + c_dim1 * 6] -= sum * t6;
+ c__[j + c_dim1 * 7] -= sum * t7;
+/* L340: */
+ }
+ goto L410;
+L350:
+
+/* Special code for 8 x 8 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 *
+ c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 *
+ c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * c__[
+ j + c_dim1 * 7] + v8 * c__[j + (c_dim1 << 3)];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + (c_dim1 << 1)] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + (c_dim1 << 2)] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+ c__[j + c_dim1 * 6] -= sum * t6;
+ c__[j + c_dim1 * 7] -= sum * t7;
+ c__[j + (c_dim1 << 3)] -= sum * t8;
+/* L360: */
+ }
+ goto L410;
+L370:
+
+/* Special code for 9 x 9 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ v9 = v[9];
+ t9 = *tau * v9;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 *
+ c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 *
+ c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * c__[
+ j + c_dim1 * 7] + v8 * c__[j + (c_dim1 << 3)] + v9 * c__[
+ j + c_dim1 * 9];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + (c_dim1 << 1)] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + (c_dim1 << 2)] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+ c__[j + c_dim1 * 6] -= sum * t6;
+ c__[j + c_dim1 * 7] -= sum * t7;
+ c__[j + (c_dim1 << 3)] -= sum * t8;
+ c__[j + c_dim1 * 9] -= sum * t9;
+/* L380: */
+ }
+ goto L410;
+L390:
+
+/* Special code for 10 x 10 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ v9 = v[9];
+ t9 = *tau * v9;
+ v10 = v[10];
+ t10 = *tau * v10;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 *
+ c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 *
+ c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * c__[
+ j + c_dim1 * 7] + v8 * c__[j + (c_dim1 << 3)] + v9 * c__[
+ j + c_dim1 * 9] + v10 * c__[j + c_dim1 * 10];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + (c_dim1 << 1)] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + (c_dim1 << 2)] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+ c__[j + c_dim1 * 6] -= sum * t6;
+ c__[j + c_dim1 * 7] -= sum * t7;
+ c__[j + (c_dim1 << 3)] -= sum * t8;
+ c__[j + c_dim1 * 9] -= sum * t9;
+ c__[j + c_dim1 * 10] -= sum * t10;
+/* L400: */
+ }
+ goto L410;
+ }
+L410:
+ return 0;
+
+/* End of DLARFX */
+
+} /* dlarfx_ */
diff --git a/contrib/libs/clapack/dlargv.c b/contrib/libs/clapack/dlargv.c
new file mode 100644
index 0000000000..dee0e67d0b
--- /dev/null
+++ b/contrib/libs/clapack/dlargv.c
@@ -0,0 +1,130 @@
+/* dlargv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlargv_(integer *n, doublereal *x, integer *incx,
+ doublereal *y, integer *incy, doublereal *c__, integer *incc)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal f, g;
+ integer i__;
+ doublereal t;
+ integer ic, ix, iy;
+ doublereal tt;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLARGV generates a vector of real plane rotations, determined by */
+/* elements of the real vectors x and y. For i = 1,2,...,n */
+
+/* ( c(i) s(i) ) ( x(i) ) = ( a(i) ) */
+/* ( -s(i) c(i) ) ( y(i) ) = ( 0 ) */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of plane rotations to be generated. */
+
+/* X (input/output) DOUBLE PRECISION array, */
+/* dimension (1+(N-1)*INCX) */
+/* On entry, the vector x. */
+/* On exit, x(i) is overwritten by a(i), for i = 1,...,n. */
+
+/* INCX (input) INTEGER */
+/* The increment between elements of X. INCX > 0. */
+
+/* Y (input/output) DOUBLE PRECISION array, */
+/* dimension (1+(N-1)*INCY) */
+/* On entry, the vector y. */
+/* On exit, the sines of the plane rotations. */
+
+/* INCY (input) INTEGER */
+/* The increment between elements of Y. INCY > 0. */
+
+/* C (output) DOUBLE PRECISION array, dimension (1+(N-1)*INCC) */
+/* The cosines of the plane rotations. */
+
+/* INCC (input) INTEGER */
+/* The increment between elements of C. INCC > 0. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --c__;
+ --y;
+ --x;
+
+ /* Function Body */
+ ix = 1;
+ iy = 1;
+ ic = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ f = x[ix];
+ g = y[iy];
+ if (g == 0.) {
+ c__[ic] = 1.;
+ } else if (f == 0.) {
+ c__[ic] = 0.;
+ y[iy] = 1.;
+ x[ix] = g;
+ } else if (abs(f) > abs(g)) {
+ t = g / f;
+ tt = sqrt(t * t + 1.);
+ c__[ic] = 1. / tt;
+ y[iy] = t * c__[ic];
+ x[ix] = f * tt;
+ } else {
+ t = f / g;
+ tt = sqrt(t * t + 1.);
+ y[iy] = 1. / tt;
+ c__[ic] = t * y[iy];
+ x[ix] = g * tt;
+ }
+ ic += *incc;
+ iy += *incy;
+ ix += *incx;
+/* L10: */
+ }
+ return 0;
+
+/* End of DLARGV */
+
+} /* dlargv_ */
diff --git a/contrib/libs/clapack/dlarnv.c b/contrib/libs/clapack/dlarnv.c
new file mode 100644
index 0000000000..a91185309a
--- /dev/null
+++ b/contrib/libs/clapack/dlarnv.c
@@ -0,0 +1,146 @@
+/* dlarnv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlarnv_(integer *idist, integer *iseed, integer *n,
+ doublereal *x)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+
+ /* Builtin functions */
+ double log(doublereal), sqrt(doublereal), cos(doublereal);
+
+ /* Local variables */
+ integer i__;
+ doublereal u[128];
+ integer il, iv, il2;
+ extern /* Subroutine */ int dlaruv_(integer *, integer *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLARNV returns a vector of n random real numbers from a uniform or */
+/* normal distribution. */
+
+/* Arguments */
+/* ========= */
+
+/* IDIST (input) INTEGER */
+/* Specifies the distribution of the random numbers: */
+/* = 1: uniform (0,1) */
+/* = 2: uniform (-1,1) */
+/* = 3: normal (0,1) */
+
+/* ISEED (input/output) INTEGER array, dimension (4) */
+/* On entry, the seed of the random number generator; the array */
+/* elements must be between 0 and 4095, and ISEED(4) must be */
+/* odd. */
+/* On exit, the seed is updated. */
+
+/* N (input) INTEGER */
+/* The number of random numbers to be generated. */
+
+/* X (output) DOUBLE PRECISION array, dimension (N) */
+/* The generated random numbers. */
+
+/* Further Details */
+/* =============== */
+
+/* This routine calls the auxiliary routine DLARUV to generate random */
+/* real numbers from a uniform (0,1) distribution, in batches of up to */
+/* 128 using vectorisable code. The Box-Muller method is used to */
+/* transform numbers from a uniform to a normal distribution. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --x;
+ --iseed;
+
+ /* Function Body */
+ i__1 = *n;
+ for (iv = 1; iv <= i__1; iv += 64) {
+/* Computing MIN */
+ i__2 = 64, i__3 = *n - iv + 1;
+ il = min(i__2,i__3);
+ if (*idist == 3) {
+ il2 = il << 1;
+ } else {
+ il2 = il;
+ }
+
+/* Call DLARUV to generate IL2 numbers from a uniform (0,1) */
+/* distribution (IL2 <= LV) */
+
+ dlaruv_(&iseed[1], &il2, u);
+
+ if (*idist == 1) {
+
+/* Copy generated numbers */
+
+ i__2 = il;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[iv + i__ - 1] = u[i__ - 1];
+/* L10: */
+ }
+ } else if (*idist == 2) {
+
+/* Convert generated numbers to uniform (-1,1) distribution */
+
+ i__2 = il;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[iv + i__ - 1] = u[i__ - 1] * 2. - 1.;
+/* L20: */
+ }
+ } else if (*idist == 3) {
+
+/* Convert generated numbers to normal (0,1) distribution */
+
+ i__2 = il;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[iv + i__ - 1] = sqrt(log(u[(i__ << 1) - 2]) * -2.) * cos(u[(
+ i__ << 1) - 1] * 6.2831853071795864769252867663);
+/* L30: */
+ }
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of DLARNV */
+
+} /* dlarnv_ */
diff --git a/contrib/libs/clapack/dlarra.c b/contrib/libs/clapack/dlarra.c
new file mode 100644
index 0000000000..4571662b8e
--- /dev/null
+++ b/contrib/libs/clapack/dlarra.c
@@ -0,0 +1,156 @@
+/* dlarra.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlarra_(integer *n, doublereal *d__, doublereal *e,
+ doublereal *e2, doublereal *spltol, doublereal *tnrm, integer *nsplit,
+ integer *isplit, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ doublereal tmp1, eabs;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Compute the splitting points with threshold SPLTOL. */
+/* DLARRA sets any "small" off-diagonal elements to zero. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N > 0. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the N diagonal elements of the tridiagonal */
+/* matrix T. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the first (N-1) entries contain the subdiagonal */
+/* elements of the tridiagonal matrix T; E(N) need not be set. */
+/* On exit, the entries E( ISPLIT( I ) ), 1 <= I <= NSPLIT, */
+/* are set to zero, the other entries of E are untouched. */
+
+/* E2 (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the first (N-1) entries contain the SQUARES of the */
+/* subdiagonal elements of the tridiagonal matrix T; */
+/* E2(N) need not be set. */
+/* On exit, the entries E2( ISPLIT( I ) ), */
+/* 1 <= I <= NSPLIT, have been set to zero */
+
+/* SPLTOL (input) DOUBLE PRECISION */
+/* The threshold for splitting. Two criteria can be used: */
+/* SPLTOL<0 : criterion based on absolute off-diagonal value */
+/* SPLTOL>0 : criterion that preserves relative accuracy */
+
+/* TNRM (input) DOUBLE PRECISION */
+/* The norm of the matrix. */
+
+/* NSPLIT (output) INTEGER */
+/* The number of blocks T splits into. 1 <= NSPLIT <= N. */
+
+/* ISPLIT (output) INTEGER array, dimension (N) */
+/* The splitting points, at which T breaks up into blocks. */
+/* The first block consists of rows/columns 1 to ISPLIT(1), */
+/* the second of rows/columns ISPLIT(1)+1 through ISPLIT(2), */
+/* etc., and the NSPLIT-th consists of rows/columns */
+/* ISPLIT(NSPLIT-1)+1 through ISPLIT(NSPLIT)=N. */
+
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --isplit;
+ --e2;
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+/* Compute splitting points */
+ *nsplit = 1;
+ if (*spltol < 0.) {
+/* Criterion based on absolute off-diagonal value */
+ tmp1 = abs(*spltol) * *tnrm;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ eabs = (d__1 = e[i__], abs(d__1));
+ if (eabs <= tmp1) {
+ e[i__] = 0.;
+ e2[i__] = 0.;
+ isplit[*nsplit] = i__;
+ ++(*nsplit);
+ }
+/* L9: */
+ }
+ } else {
+/* Criterion that guarantees relative accuracy */
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ eabs = (d__1 = e[i__], abs(d__1));
+ if (eabs <= *spltol * sqrt((d__1 = d__[i__], abs(d__1))) * sqrt((
+ d__2 = d__[i__ + 1], abs(d__2)))) {
+ e[i__] = 0.;
+ e2[i__] = 0.;
+ isplit[*nsplit] = i__;
+ ++(*nsplit);
+ }
+/* L10: */
+ }
+ }
+ isplit[*nsplit] = *n;
+ return 0;
+
+/* End of DLARRA */
+
+} /* dlarra_ */
diff --git a/contrib/libs/clapack/dlarrb.c b/contrib/libs/clapack/dlarrb.c
new file mode 100644
index 0000000000..31077e335b
--- /dev/null
+++ b/contrib/libs/clapack/dlarrb.c
@@ -0,0 +1,350 @@
+/* dlarrb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlarrb_(integer *n, doublereal *d__, doublereal *lld,
+ integer *ifirst, integer *ilast, doublereal *rtol1, doublereal *rtol2,
+ integer *offset, doublereal *w, doublereal *wgap, doublereal *werr,
+ doublereal *work, integer *iwork, doublereal *pivmin, doublereal *
+ spdiam, integer *twist, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double log(doublereal);
+
+ /* Local variables */
+ integer i__, k, r__, i1, ii, ip;
+ doublereal gap, mid, tmp, back, lgap, rgap, left;
+ integer iter, nint, prev, next;
+ doublereal cvrgd, right, width;
+ extern integer dlaneg_(integer *, doublereal *, doublereal *, doublereal *
+, doublereal *, integer *);
+ integer negcnt;
+ doublereal mnwdth;
+ integer olnint, maxitr;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Given the relatively robust representation(RRR) L D L^T, DLARRB */
+/* does "limited" bisection to refine the eigenvalues of L D L^T, */
+/* W( IFIRST-OFFSET ) through W( ILAST-OFFSET ), to more accuracy. Initial */
+/* guesses for these eigenvalues are input in W, the corresponding estimate */
+/* of the error in these guesses and their gaps are input in WERR */
+/* and WGAP, respectively. During bisection, intervals */
+/* [left, right] are maintained by storing their mid-points and */
+/* semi-widths in the arrays W and WERR respectively. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The N diagonal elements of the diagonal matrix D. */
+
+/* LLD (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (N-1) elements L(i)*L(i)*D(i). */
+
+/* IFIRST (input) INTEGER */
+/* The index of the first eigenvalue to be computed. */
+
+/* ILAST (input) INTEGER */
+/* The index of the last eigenvalue to be computed. */
+
+/* RTOL1 (input) DOUBLE PRECISION */
+/* RTOL2 (input) DOUBLE PRECISION */
+/* Tolerance for the convergence of the bisection intervals. */
+/* An interval [LEFT,RIGHT] has converged if */
+/* RIGHT-LEFT.LT.MAX( RTOL1*GAP, RTOL2*MAX(|LEFT|,|RIGHT|) ) */
+/* where GAP is the (estimated) distance to the nearest */
+/* eigenvalue. */
+
+/* OFFSET (input) INTEGER */
+/* Offset for the arrays W, WGAP and WERR, i.e., the IFIRST-OFFSET */
+/* through ILAST-OFFSET elements of these arrays are to be used. */
+
+/* W (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On input, W( IFIRST-OFFSET ) through W( ILAST-OFFSET ) are */
+/* estimates of the eigenvalues of L D L^T indexed IFIRST throug */
+/* ILAST. */
+/* On output, these estimates are refined. */
+
+/* WGAP (input/output) DOUBLE PRECISION array, dimension (N-1) */
+/* On input, the (estimated) gaps between consecutive */
+/* eigenvalues of L D L^T, i.e., WGAP(I-OFFSET) is the gap between */
+/* eigenvalues I and I+1. Note that if IFIRST.EQ.ILAST */
+/* then WGAP(IFIRST-OFFSET) must be set to ZERO. */
+/* On output, these gaps are refined. */
+
+/* WERR (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On input, WERR( IFIRST-OFFSET ) through WERR( ILAST-OFFSET ) are */
+/* the errors in the estimates of the corresponding elements in W. */
+/* On output, these errors are refined. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (2*N) */
+/* Workspace. */
+
+/* IWORK (workspace) INTEGER array, dimension (2*N) */
+/* Workspace. */
+
+/* PIVMIN (input) DOUBLE PRECISION */
+/* The minimum pivot in the Sturm sequence. */
+
+/* SPDIAM (input) DOUBLE PRECISION */
+/* The spectral diameter of the matrix. */
+
+/* TWIST (input) INTEGER */
+/* The twist index for the twisted factorization that is used */
+/* for the negcount. */
+/* TWIST = N: Compute negcount from L D L^T - LAMBDA I = L+ D+ L+^T */
+/* TWIST = 1: Compute negcount from L D L^T - LAMBDA I = U- D- U-^T */
+/* TWIST = R: Compute negcount from L D L^T - LAMBDA I = N(r) D(r) N(r) */
+
+/* INFO (output) INTEGER */
+/* Error flag. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --iwork;
+ --work;
+ --werr;
+ --wgap;
+ --w;
+ --lld;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+
+ maxitr = (integer) ((log(*spdiam + *pivmin) - log(*pivmin)) / log(2.)) +
+ 2;
+ mnwdth = *pivmin * 2.;
+
+ r__ = *twist;
+ if (r__ < 1 || r__ > *n) {
+ r__ = *n;
+ }
+
+/* Initialize unconverged intervals in [ WORK(2*I-1), WORK(2*I) ]. */
+/* The Sturm Count, Count( WORK(2*I-1) ) is arranged to be I-1, while */
+/* Count( WORK(2*I) ) is stored in IWORK( 2*I ). The integer IWORK( 2*I-1 ) */
+/* for an unconverged interval is set to the index of the next unconverged */
+/* interval, and is -1 or 0 for a converged interval. Thus a linked */
+/* list of unconverged intervals is set up. */
+
+ i1 = *ifirst;
+/* The number of unconverged intervals */
+ nint = 0;
+/* The last unconverged interval found */
+ prev = 0;
+ rgap = wgap[i1 - *offset];
+ i__1 = *ilast;
+ for (i__ = i1; i__ <= i__1; ++i__) {
+ k = i__ << 1;
+ ii = i__ - *offset;
+ left = w[ii] - werr[ii];
+ right = w[ii] + werr[ii];
+ lgap = rgap;
+ rgap = wgap[ii];
+ gap = min(lgap,rgap);
+/* Make sure that [LEFT,RIGHT] contains the desired eigenvalue */
+/* Compute negcount from dstqds facto L+D+L+^T = L D L^T - LEFT */
+
+/* Do while( NEGCNT(LEFT).GT.I-1 ) */
+
+ back = werr[ii];
+L20:
+ negcnt = dlaneg_(n, &d__[1], &lld[1], &left, pivmin, &r__);
+ if (negcnt > i__ - 1) {
+ left -= back;
+ back *= 2.;
+ goto L20;
+ }
+
+/* Do while( NEGCNT(RIGHT).LT.I ) */
+/* Compute negcount from dstqds facto L+D+L+^T = L D L^T - RIGHT */
+
+ back = werr[ii];
+L50:
+ negcnt = dlaneg_(n, &d__[1], &lld[1], &right, pivmin, &r__);
+ if (negcnt < i__) {
+ right += back;
+ back *= 2.;
+ goto L50;
+ }
+ width = (d__1 = left - right, abs(d__1)) * .5;
+/* Computing MAX */
+ d__1 = abs(left), d__2 = abs(right);
+ tmp = max(d__1,d__2);
+/* Computing MAX */
+ d__1 = *rtol1 * gap, d__2 = *rtol2 * tmp;
+ cvrgd = max(d__1,d__2);
+ if (width <= cvrgd || width <= mnwdth) {
+/* This interval has already converged and does not need refinement. */
+/* (Note that the gaps might change through refining the */
+/* eigenvalues, however, they can only get bigger.) */
+/* Remove it from the list. */
+ iwork[k - 1] = -1;
+/* Make sure that I1 always points to the first unconverged interval */
+ if (i__ == i1 && i__ < *ilast) {
+ i1 = i__ + 1;
+ }
+ if (prev >= i1 && i__ <= *ilast) {
+ iwork[(prev << 1) - 1] = i__ + 1;
+ }
+ } else {
+/* unconverged interval found */
+ prev = i__;
+ ++nint;
+ iwork[k - 1] = i__ + 1;
+ iwork[k] = negcnt;
+ }
+ work[k - 1] = left;
+ work[k] = right;
+/* L75: */
+ }
+
+/* Do while( NINT.GT.0 ), i.e. there are still unconverged intervals */
+/* and while (ITER.LT.MAXITR) */
+
+ iter = 0;
+L80:
+ prev = i1 - 1;
+ i__ = i1;
+ olnint = nint;
+ i__1 = olnint;
+ for (ip = 1; ip <= i__1; ++ip) {
+ k = i__ << 1;
+ ii = i__ - *offset;
+ rgap = wgap[ii];
+ lgap = rgap;
+ if (ii > 1) {
+ lgap = wgap[ii - 1];
+ }
+ gap = min(lgap,rgap);
+ next = iwork[k - 1];
+ left = work[k - 1];
+ right = work[k];
+ mid = (left + right) * .5;
+/* semiwidth of interval */
+ width = right - mid;
+/* Computing MAX */
+ d__1 = abs(left), d__2 = abs(right);
+ tmp = max(d__1,d__2);
+/* Computing MAX */
+ d__1 = *rtol1 * gap, d__2 = *rtol2 * tmp;
+ cvrgd = max(d__1,d__2);
+ if (width <= cvrgd || width <= mnwdth || iter == maxitr) {
+/* reduce number of unconverged intervals */
+ --nint;
+/* Mark interval as converged. */
+ iwork[k - 1] = 0;
+ if (i1 == i__) {
+ i1 = next;
+ } else {
+/* Prev holds the last unconverged interval previously examined */
+ if (prev >= i1) {
+ iwork[(prev << 1) - 1] = next;
+ }
+ }
+ i__ = next;
+ goto L100;
+ }
+ prev = i__;
+
+/* Perform one bisection step */
+
+ negcnt = dlaneg_(n, &d__[1], &lld[1], &mid, pivmin, &r__);
+ if (negcnt <= i__ - 1) {
+ work[k - 1] = mid;
+ } else {
+ work[k] = mid;
+ }
+ i__ = next;
+L100:
+ ;
+ }
+ ++iter;
+/* do another loop if there are still unconverged intervals */
+/* However, in the last iteration, all intervals are accepted */
+/* since this is the best we can do. */
+ if (nint > 0 && iter <= maxitr) {
+ goto L80;
+ }
+
+
+/* At this point, all the intervals have converged */
+ i__1 = *ilast;
+ for (i__ = *ifirst; i__ <= i__1; ++i__) {
+ k = i__ << 1;
+ ii = i__ - *offset;
+/* All intervals marked by '0' have been refined. */
+ if (iwork[k - 1] == 0) {
+ w[ii] = (work[k - 1] + work[k]) * .5;
+ werr[ii] = work[k] - w[ii];
+ }
+/* L110: */
+ }
+
+ i__1 = *ilast;
+ for (i__ = *ifirst + 1; i__ <= i__1; ++i__) {
+ k = i__ << 1;
+ ii = i__ - *offset;
+/* Computing MAX */
+ d__1 = 0., d__2 = w[ii] - werr[ii] - w[ii - 1] - werr[ii - 1];
+ wgap[ii - 1] = max(d__1,d__2);
+/* L111: */
+ }
+ return 0;
+
+/* End of DLARRB */
+
+} /* dlarrb_ */
diff --git a/contrib/libs/clapack/dlarrc.c b/contrib/libs/clapack/dlarrc.c
new file mode 100644
index 0000000000..ac08bff9da
--- /dev/null
+++ b/contrib/libs/clapack/dlarrc.c
@@ -0,0 +1,183 @@
+/* dlarrc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlarrc_(char *jobt, integer *n, doublereal *vl,
+ doublereal *vu, doublereal *d__, doublereal *e, doublereal *pivmin,
+ integer *eigcnt, integer *lcnt, integer *rcnt, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__;
+ doublereal sl, su, tmp, tmp2;
+ logical matt;
+ extern logical lsame_(char *, char *);
+ doublereal lpivot, rpivot;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Find the number of eigenvalues of the symmetric tridiagonal matrix T */
+/* that are in the interval (VL,VU] if JOBT = 'T', and of L D L^T */
+/* if JOBT = 'L'. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBT (input) CHARACTER*1 */
+/* = 'T': Compute Sturm count for matrix T. */
+/* = 'L': Compute Sturm count for matrix L D L^T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N > 0. */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* The lower and upper bounds for the eigenvalues. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* JOBT = 'T': The N diagonal elements of the tridiagonal matrix T. */
+/* JOBT = 'L': The N diagonal elements of the diagonal matrix D. */
+
+/* E (input) DOUBLE PRECISION array, dimension (N) */
+/* JOBT = 'T': The N-1 offdiagonal elements of the matrix T. */
+/* JOBT = 'L': The N-1 offdiagonal elements of the matrix L. */
+
+/* PIVMIN (input) DOUBLE PRECISION */
+/* The minimum pivot in the Sturm sequence for T. */
+
+/* EIGCNT (output) INTEGER */
+/* The number of eigenvalues of the symmetric tridiagonal matrix T */
+/* that are in the interval (VL,VU] */
+
+/* LCNT (output) INTEGER */
+/* RCNT (output) INTEGER */
+/* The left and right negcounts of the interval. */
+
+/* INFO (output) INTEGER */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ *lcnt = 0;
+ *rcnt = 0;
+ *eigcnt = 0;
+ matt = lsame_(jobt, "T");
+ if (matt) {
+/* Sturm sequence count on T */
+ lpivot = d__[1] - *vl;
+ rpivot = d__[1] - *vu;
+ if (lpivot <= 0.) {
+ ++(*lcnt);
+ }
+ if (rpivot <= 0.) {
+ ++(*rcnt);
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing 2nd power */
+ d__1 = e[i__];
+ tmp = d__1 * d__1;
+ lpivot = d__[i__ + 1] - *vl - tmp / lpivot;
+ rpivot = d__[i__ + 1] - *vu - tmp / rpivot;
+ if (lpivot <= 0.) {
+ ++(*lcnt);
+ }
+ if (rpivot <= 0.) {
+ ++(*rcnt);
+ }
+/* L10: */
+ }
+ } else {
+/* Sturm sequence count on L D L^T */
+ sl = -(*vl);
+ su = -(*vu);
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ lpivot = d__[i__] + sl;
+ rpivot = d__[i__] + su;
+ if (lpivot <= 0.) {
+ ++(*lcnt);
+ }
+ if (rpivot <= 0.) {
+ ++(*rcnt);
+ }
+ tmp = e[i__] * d__[i__] * e[i__];
+
+ tmp2 = tmp / lpivot;
+ if (tmp2 == 0.) {
+ sl = tmp - *vl;
+ } else {
+ sl = sl * tmp2 - *vl;
+ }
+
+ tmp2 = tmp / rpivot;
+ if (tmp2 == 0.) {
+ su = tmp - *vu;
+ } else {
+ su = su * tmp2 - *vu;
+ }
+/* L20: */
+ }
+ lpivot = d__[*n] + sl;
+ rpivot = d__[*n] + su;
+ if (lpivot <= 0.) {
+ ++(*lcnt);
+ }
+ if (rpivot <= 0.) {
+ ++(*rcnt);
+ }
+ }
+ *eigcnt = *rcnt - *lcnt;
+ return 0;
+
+/* end of DLARRC */
+
+} /* dlarrc_ */
diff --git a/contrib/libs/clapack/dlarrd.c b/contrib/libs/clapack/dlarrd.c
new file mode 100644
index 0000000000..a265762750
--- /dev/null
+++ b/contrib/libs/clapack/dlarrd.c
@@ -0,0 +1,793 @@
+/* dlarrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+static integer c__0 = 0;
+
+/* Subroutine */ int dlarrd_(char *range, char *order, integer *n, doublereal
+ *vl, doublereal *vu, integer *il, integer *iu, doublereal *gers,
+ doublereal *reltol, doublereal *d__, doublereal *e, doublereal *e2,
+ doublereal *pivmin, integer *nsplit, integer *isplit, integer *m,
+ doublereal *w, doublereal *werr, doublereal *wl, doublereal *wu,
+ integer *iblock, integer *indexw, doublereal *work, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double log(doublereal);
+
+ /* Local variables */
+ integer i__, j, ib, ie, je, nb;
+ doublereal gl;
+ integer im, in;
+ doublereal gu;
+ integer iw, jee;
+ doublereal eps;
+ integer nwl;
+ doublereal wlu, wul;
+ integer nwu;
+ doublereal tmp1, tmp2;
+ integer iend, jblk, ioff, iout, itmp1, itmp2, jdisc;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ doublereal atoli;
+ integer iwoff, itmax;
+ doublereal wkill, rtoli, uflow, tnorm;
+ extern doublereal dlamch_(char *);
+ integer ibegin;
+ extern /* Subroutine */ int dlaebz_(integer *, integer *, integer *,
+ integer *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *);
+ integer irange, idiscl, idumma[1];
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer idiscu;
+ logical ncnvrg, toofew;
+
+
+/* -- LAPACK auxiliary routine (version 3.2.1) -- */
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+/* -- April 2009 -- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLARRD computes the eigenvalues of a symmetric tridiagonal */
+/* matrix T to suitable accuracy. This is an auxiliary code to be */
+/* called from DSTEMR. */
+/* The user may ask for all eigenvalues, all eigenvalues */
+/* in the half-open interval (VL, VU], or the IL-th through IU-th */
+/* eigenvalues. */
+
+/* To avoid overflow, the matrix must be scaled so that its */
+/* largest element is no greater than overflow**(1/2) * */
+/* underflow**(1/4) in absolute value, and for greatest */
+/* accuracy, it should not be much smaller than that. */
+
+/* See W. Kahan "Accurate Eigenvalues of a Symmetric Tridiagonal */
+/* Matrix", Report CS41, Computer Science Dept., Stanford */
+/* University, July 21, 1966. */
+
+/* Arguments */
+/* ========= */
+
+/* RANGE (input) CHARACTER */
+/* = 'A': ("All") all eigenvalues will be found. */
+/* = 'V': ("Value") all eigenvalues in the half-open interval */
+/* (VL, VU] will be found. */
+/* = 'I': ("Index") the IL-th through IU-th eigenvalues (of the */
+/* entire matrix) will be found. */
+
+/* ORDER (input) CHARACTER */
+/* = 'B': ("By Block") the eigenvalues will be grouped by */
+/* split-off block (see IBLOCK, ISPLIT) and */
+/* ordered from smallest to largest within */
+/* the block. */
+/* = 'E': ("Entire matrix") */
+/* the eigenvalues for the entire matrix */
+/* will be ordered from smallest to */
+/* largest. */
+
+/* N (input) INTEGER */
+/* The order of the tridiagonal matrix T. N >= 0. */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. Eigenvalues less than or equal */
+/* to VL, or greater than VU, will not be returned. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* GERS (input) DOUBLE PRECISION array, dimension (2*N) */
+/* The N Gerschgorin intervals (the i-th Gerschgorin interval */
+/* is (GERS(2*i-1), GERS(2*i)). */
+
+/* RELTOL (input) DOUBLE PRECISION */
+/* The minimum relative width of an interval. When an interval */
+/* is narrower than RELTOL times the larger (in */
+/* magnitude) endpoint, then it is considered to be */
+/* sufficiently small, i.e., converged. Note: this should */
+/* always be at least radix*machine epsilon. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The n diagonal elements of the tridiagonal matrix T. */
+
+/* E (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) off-diagonal elements of the tridiagonal matrix T. */
+
+/* E2 (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) squared off-diagonal elements of the tridiagonal matrix T. */
+
+/* PIVMIN (input) DOUBLE PRECISION */
+/* The minimum pivot allowed in the Sturm sequence for T. */
+
+/* NSPLIT (input) INTEGER */
+/* The number of diagonal blocks in the matrix T. */
+/* 1 <= NSPLIT <= N. */
+
+/* ISPLIT (input) INTEGER array, dimension (N) */
+/* The splitting points, at which T breaks up into submatrices. */
+/* The first submatrix consists of rows/columns 1 to ISPLIT(1), */
+/* the second of rows/columns ISPLIT(1)+1 through ISPLIT(2), */
+/* etc., and the NSPLIT-th consists of rows/columns */
+/* ISPLIT(NSPLIT-1)+1 through ISPLIT(NSPLIT)=N. */
+/* (Only the first NSPLIT elements will actually be used, but */
+/* since the user cannot know a priori what value NSPLIT will */
+/* have, N words must be reserved for ISPLIT.) */
+
+/* M (output) INTEGER */
+/* The actual number of eigenvalues found. 0 <= M <= N. */
+/* (See also the description of INFO=2,3.) */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* On exit, the first M elements of W will contain the */
+/* eigenvalue approximations. DLARRD computes an interval */
+/* I_j = (a_j, b_j] that includes eigenvalue j. The eigenvalue */
+/* approximation is given as the interval midpoint */
+/* W(j)= ( a_j + b_j)/2. The corresponding error is bounded by */
+/* WERR(j) = abs( a_j - b_j)/2 */
+
+/* WERR (output) DOUBLE PRECISION array, dimension (N) */
+/* The error bound on the corresponding eigenvalue approximation */
+/* in W. */
+
+/* WL (output) DOUBLE PRECISION */
+/* WU (output) DOUBLE PRECISION */
+/* The interval (WL, WU] contains all the wanted eigenvalues. */
+/* If RANGE='V', then WL=VL and WU=VU. */
+/* If RANGE='A', then WL and WU are the global Gerschgorin bounds */
+/* on the spectrum. */
+/* If RANGE='I', then WL and WU are computed by DLAEBZ from the */
+/* index range specified. */
+
+/* IBLOCK (output) INTEGER array, dimension (N) */
+/* At each row/column j where E(j) is zero or small, the */
+/* matrix T is considered to split into a block diagonal */
+/* matrix. On exit, if INFO = 0, IBLOCK(i) specifies to which */
+/* block (from 1 to the number of blocks) the eigenvalue W(i) */
+/* belongs. (DLARRD may use the remaining N-M elements as */
+/* workspace.) */
+
+/* INDEXW (output) INTEGER array, dimension (N) */
+/* The indices of the eigenvalues within each block (submatrix); */
+/* for example, INDEXW(i)= j and IBLOCK(i)=k imply that the */
+/* i-th eigenvalue W(i) is the j-th eigenvalue in block k. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (4*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (3*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: some or all of the eigenvalues failed to converge or */
+/* were not computed: */
+/* =1 or 3: Bisection failed to converge for some */
+/* eigenvalues; these eigenvalues are flagged by a */
+/* negative block number. The effect is that the */
+/* eigenvalues may not be as accurate as the */
+/* absolute and relative tolerances. This is */
+/* generally caused by unexpectedly inaccurate */
+/* arithmetic. */
+/* =2 or 3: RANGE='I' only: Not all of the eigenvalues */
+/* IL:IU were found. */
+/* Effect: M < IU+1-IL */
+/* Cause: non-monotonic arithmetic, causing the */
+/* Sturm sequence to be non-monotonic. */
+/* Cure: recalculate, using RANGE='A', and pick */
+/* out eigenvalues IL:IU. In some cases, */
+/* increasing the PARAMETER "FUDGE" may */
+/* make things work. */
+/* = 4: RANGE='I', and the Gershgorin interval */
+/* initially used was too small. No eigenvalues */
+/* were computed. */
+/* Probable cause: your machine has sloppy */
+/* floating-point arithmetic. */
+/* Cure: Increase the PARAMETER "FUDGE", */
+/* recompile, and try again. */
+
+/* Internal Parameters */
+/* =================== */
+
+/* FUDGE DOUBLE PRECISION, default = 2 */
+/* A "fudge factor" to widen the Gershgorin intervals. Ideally, */
+/* a value of 1 should work, but on machines with sloppy */
+/* arithmetic, this needs to be larger. The default for */
+/* publicly released versions should be large enough to handle */
+/* the worst machine around. Note that this has no effect */
+/* on accuracy of the solution. */
+
+/* Based on contributions by */
+/* W. Kahan, University of California, Berkeley, USA */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --iwork;
+ --work;
+ --indexw;
+ --iblock;
+ --werr;
+ --w;
+ --isplit;
+ --e2;
+ --e;
+ --d__;
+ --gers;
+
+ /* Function Body */
+ *info = 0;
+
+/* Decode RANGE */
+
+ if (lsame_(range, "A")) {
+ irange = 1;
+ } else if (lsame_(range, "V")) {
+ irange = 2;
+ } else if (lsame_(range, "I")) {
+ irange = 3;
+ } else {
+ irange = 0;
+ }
+
+/* Check for Errors */
+
+ if (irange <= 0) {
+ *info = -1;
+ } else if (! (lsame_(order, "B") || lsame_(order,
+ "E"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (irange == 2) {
+ if (*vl >= *vu) {
+ *info = -5;
+ }
+ } else if (irange == 3 && (*il < 1 || *il > max(1,*n))) {
+ *info = -6;
+ } else if (irange == 3 && (*iu < min(*n,*il) || *iu > *n)) {
+ *info = -7;
+ }
+
+ if (*info != 0) {
+ return 0;
+ }
+/* Initialize error flags */
+ *info = 0;
+ ncnvrg = FALSE_;
+ toofew = FALSE_;
+/* Quick return if possible */
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+/* Simplification: */
+ if (irange == 3 && *il == 1 && *iu == *n) {
+ irange = 1;
+ }
+/* Get machine constants */
+ eps = dlamch_("P");
+ uflow = dlamch_("U");
+/* Special Case when N=1 */
+/* Treat case of 1x1 matrix for quick return */
+ if (*n == 1) {
+ if (irange == 1 || irange == 2 && d__[1] > *vl && d__[1] <= *vu ||
+ irange == 3 && *il == 1 && *iu == 1) {
+ *m = 1;
+ w[1] = d__[1];
+/* The computation error of the eigenvalue is zero */
+ werr[1] = 0.;
+ iblock[1] = 1;
+ indexw[1] = 1;
+ }
+ return 0;
+ }
+/* NB is the minimum vector length for vector bisection, or 0 */
+/* if only scalar is to be done. */
+ nb = ilaenv_(&c__1, "DSTEBZ", " ", n, &c_n1, &c_n1, &c_n1);
+ if (nb <= 1) {
+ nb = 0;
+ }
+/* Find global spectral radius */
+ gl = d__[1];
+ gu = d__[1];
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MIN */
+ d__1 = gl, d__2 = gers[(i__ << 1) - 1];
+ gl = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = gu, d__2 = gers[i__ * 2];
+ gu = max(d__1,d__2);
+/* L5: */
+ }
+/* Compute global Gerschgorin bounds and spectral diameter */
+/* Computing MAX */
+ d__1 = abs(gl), d__2 = abs(gu);
+ tnorm = max(d__1,d__2);
+ gl = gl - tnorm * 2. * eps * *n - *pivmin * 4.;
+ gu = gu + tnorm * 2. * eps * *n + *pivmin * 4.;
+/* [JAN/28/2009] remove the line below since SPDIAM variable not use */
+/* SPDIAM = GU - GL */
+/* Input arguments for DLAEBZ: */
+/* The relative tolerance. An interval (a,b] lies within */
+/* "relative tolerance" if b-a < RELTOL*max(|a|,|b|), */
+ rtoli = *reltol;
+/* Set the absolute tolerance for interval convergence to zero to force */
+/* interval convergence based on relative size of the interval. */
+/* This is dangerous because intervals might not converge when RELTOL is */
+/* small. But at least a very small number should be selected so that for */
+/* strongly graded matrices, the code can get relatively accurate */
+/* eigenvalues. */
+ atoli = uflow * 4. + *pivmin * 4.;
+ if (irange == 3) {
+/* RANGE='I': Compute an interval containing eigenvalues */
+/* IL through IU. The initial interval [GL,GU] from the global */
+/* Gerschgorin bounds GL and GU is refined by DLAEBZ. */
+ itmax = (integer) ((log(tnorm + *pivmin) - log(*pivmin)) / log(2.)) +
+ 2;
+ work[*n + 1] = gl;
+ work[*n + 2] = gl;
+ work[*n + 3] = gu;
+ work[*n + 4] = gu;
+ work[*n + 5] = gl;
+ work[*n + 6] = gu;
+ iwork[1] = -1;
+ iwork[2] = -1;
+ iwork[3] = *n + 1;
+ iwork[4] = *n + 1;
+ iwork[5] = *il - 1;
+ iwork[6] = *iu;
+
+ dlaebz_(&c__3, &itmax, n, &c__2, &c__2, &nb, &atoli, &rtoli, pivmin, &
+ d__[1], &e[1], &e2[1], &iwork[5], &work[*n + 1], &work[*n + 5]
+, &iout, &iwork[1], &w[1], &iblock[1], &iinfo);
+ if (iinfo != 0) {
+ *info = iinfo;
+ return 0;
+ }
+/* On exit, output intervals may not be ordered by ascending negcount */
+ if (iwork[6] == *iu) {
+ *wl = work[*n + 1];
+ wlu = work[*n + 3];
+ nwl = iwork[1];
+ *wu = work[*n + 4];
+ wul = work[*n + 2];
+ nwu = iwork[4];
+ } else {
+ *wl = work[*n + 2];
+ wlu = work[*n + 4];
+ nwl = iwork[2];
+ *wu = work[*n + 3];
+ wul = work[*n + 1];
+ nwu = iwork[3];
+ }
+/* On exit, the interval [WL, WLU] contains a value with negcount NWL, */
+/* and [WUL, WU] contains a value with negcount NWU. */
+ if (nwl < 0 || nwl >= *n || nwu < 1 || nwu > *n) {
+ *info = 4;
+ return 0;
+ }
+ } else if (irange == 2) {
+ *wl = *vl;
+ *wu = *vu;
+ } else if (irange == 1) {
+ *wl = gl;
+ *wu = gu;
+ }
+/* Find Eigenvalues -- Loop Over blocks and recompute NWL and NWU. */
+/* NWL accumulates the number of eigenvalues .le. WL, */
+/* NWU accumulates the number of eigenvalues .le. WU */
+ *m = 0;
+ iend = 0;
+ *info = 0;
+ nwl = 0;
+ nwu = 0;
+
+ i__1 = *nsplit;
+ for (jblk = 1; jblk <= i__1; ++jblk) {
+ ioff = iend;
+ ibegin = ioff + 1;
+ iend = isplit[jblk];
+ in = iend - ioff;
+
+ if (in == 1) {
+/* 1x1 block */
+ if (*wl >= d__[ibegin] - *pivmin) {
+ ++nwl;
+ }
+ if (*wu >= d__[ibegin] - *pivmin) {
+ ++nwu;
+ }
+ if (irange == 1 || *wl < d__[ibegin] - *pivmin && *wu >= d__[
+ ibegin] - *pivmin) {
+ ++(*m);
+ w[*m] = d__[ibegin];
+ werr[*m] = 0.;
+/* The gap for a single block doesn't matter for the later */
+/* algorithm and is assigned an arbitrary large value */
+ iblock[*m] = jblk;
+ indexw[*m] = 1;
+ }
+/* Disabled 2x2 case because of a failure on the following matrix */
+/* RANGE = 'I', IL = IU = 4 */
+/* Original Tridiagonal, d = [ */
+/* -0.150102010615740E+00 */
+/* -0.849897989384260E+00 */
+/* -0.128208148052635E-15 */
+/* 0.128257718286320E-15 */
+/* ]; */
+/* e = [ */
+/* -0.357171383266986E+00 */
+/* -0.180411241501588E-15 */
+/* -0.175152352710251E-15 */
+/* ]; */
+
+/* ELSE IF( IN.EQ.2 ) THEN */
+/* * 2x2 block */
+/* DISC = SQRT( (HALF*(D(IBEGIN)-D(IEND)))**2 + E(IBEGIN)**2 ) */
+/* TMP1 = HALF*(D(IBEGIN)+D(IEND)) */
+/* L1 = TMP1 - DISC */
+/* IF( WL.GE. L1-PIVMIN ) */
+/* $ NWL = NWL + 1 */
+/* IF( WU.GE. L1-PIVMIN ) */
+/* $ NWU = NWU + 1 */
+/* IF( IRANGE.EQ.ALLRNG .OR. ( WL.LT.L1-PIVMIN .AND. WU.GE. */
+/* $ L1-PIVMIN ) ) THEN */
+/* M = M + 1 */
+/* W( M ) = L1 */
+/* * The uncertainty of eigenvalues of a 2x2 matrix is very small */
+/* WERR( M ) = EPS * ABS( W( M ) ) * TWO */
+/* IBLOCK( M ) = JBLK */
+/* INDEXW( M ) = 1 */
+/* ENDIF */
+/* L2 = TMP1 + DISC */
+/* IF( WL.GE. L2-PIVMIN ) */
+/* $ NWL = NWL + 1 */
+/* IF( WU.GE. L2-PIVMIN ) */
+/* $ NWU = NWU + 1 */
+/* IF( IRANGE.EQ.ALLRNG .OR. ( WL.LT.L2-PIVMIN .AND. WU.GE. */
+/* $ L2-PIVMIN ) ) THEN */
+/* M = M + 1 */
+/* W( M ) = L2 */
+/* * The uncertainty of eigenvalues of a 2x2 matrix is very small */
+/* WERR( M ) = EPS * ABS( W( M ) ) * TWO */
+/* IBLOCK( M ) = JBLK */
+/* INDEXW( M ) = 2 */
+/* ENDIF */
+ } else {
+/* General Case - block of size IN >= 2 */
+/* Compute local Gerschgorin interval and use it as the initial */
+/* interval for DLAEBZ */
+ gu = d__[ibegin];
+ gl = d__[ibegin];
+ tmp1 = 0.;
+ i__2 = iend;
+ for (j = ibegin; j <= i__2; ++j) {
+/* Computing MIN */
+ d__1 = gl, d__2 = gers[(j << 1) - 1];
+ gl = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = gu, d__2 = gers[j * 2];
+ gu = max(d__1,d__2);
+/* L40: */
+ }
+/* [JAN/28/2009] */
+/* change SPDIAM by TNORM in lines 2 and 3 thereafter */
+/* line 1: remove computation of SPDIAM (not useful anymore) */
+/* SPDIAM = GU - GL */
+/* GL = GL - FUDGE*SPDIAM*EPS*IN - FUDGE*PIVMIN */
+/* GU = GU + FUDGE*SPDIAM*EPS*IN + FUDGE*PIVMIN */
+ gl = gl - tnorm * 2. * eps * in - *pivmin * 2.;
+ gu = gu + tnorm * 2. * eps * in + *pivmin * 2.;
+
+ if (irange > 1) {
+ if (gu < *wl) {
+/* the local block contains none of the wanted eigenvalues */
+ nwl += in;
+ nwu += in;
+ goto L70;
+ }
+/* refine search interval if possible, only range (WL,WU] matters */
+ gl = max(gl,*wl);
+ gu = min(gu,*wu);
+ if (gl >= gu) {
+ goto L70;
+ }
+ }
+/* Find negcount of initial interval boundaries GL and GU */
+ work[*n + 1] = gl;
+ work[*n + in + 1] = gu;
+ dlaebz_(&c__1, &c__0, &in, &in, &c__1, &nb, &atoli, &rtoli,
+ pivmin, &d__[ibegin], &e[ibegin], &e2[ibegin], idumma, &
+ work[*n + 1], &work[*n + (in << 1) + 1], &im, &iwork[1], &
+ w[*m + 1], &iblock[*m + 1], &iinfo);
+ if (iinfo != 0) {
+ *info = iinfo;
+ return 0;
+ }
+
+ nwl += iwork[1];
+ nwu += iwork[in + 1];
+ iwoff = *m - iwork[1];
+/* Compute Eigenvalues */
+ itmax = (integer) ((log(gu - gl + *pivmin) - log(*pivmin)) / log(
+ 2.)) + 2;
+ dlaebz_(&c__2, &itmax, &in, &in, &c__1, &nb, &atoli, &rtoli,
+ pivmin, &d__[ibegin], &e[ibegin], &e2[ibegin], idumma, &
+ work[*n + 1], &work[*n + (in << 1) + 1], &iout, &iwork[1],
+ &w[*m + 1], &iblock[*m + 1], &iinfo);
+ if (iinfo != 0) {
+ *info = iinfo;
+ return 0;
+ }
+
+/* Copy eigenvalues into W and IBLOCK */
+/* Use -JBLK for block number for unconverged eigenvalues. */
+/* Loop over the number of output intervals from DLAEBZ */
+ i__2 = iout;
+ for (j = 1; j <= i__2; ++j) {
+/* eigenvalue approximation is middle point of interval */
+ tmp1 = (work[j + *n] + work[j + in + *n]) * .5;
+/* semi length of error interval */
+ tmp2 = (d__1 = work[j + *n] - work[j + in + *n], abs(d__1)) *
+ .5;
+ if (j > iout - iinfo) {
+/* Flag non-convergence. */
+ ncnvrg = TRUE_;
+ ib = -jblk;
+ } else {
+ ib = jblk;
+ }
+ i__3 = iwork[j + in] + iwoff;
+ for (je = iwork[j] + 1 + iwoff; je <= i__3; ++je) {
+ w[je] = tmp1;
+ werr[je] = tmp2;
+ indexw[je] = je - iwoff;
+ iblock[je] = ib;
+/* L50: */
+ }
+/* L60: */
+ }
+
+ *m += im;
+ }
+L70:
+ ;
+ }
+/* If RANGE='I', then (WL,WU) contains eigenvalues NWL+1,...,NWU */
+/* If NWL+1 < IL or NWU > IU, discard extra eigenvalues. */
+ if (irange == 3) {
+ idiscl = *il - 1 - nwl;
+ idiscu = nwu - *iu;
+
+ if (idiscl > 0) {
+ im = 0;
+ i__1 = *m;
+ for (je = 1; je <= i__1; ++je) {
+/* Remove some of the smallest eigenvalues from the left so that */
+/* at the end IDISCL =0. Move all eigenvalues up to the left. */
+ if (w[je] <= wlu && idiscl > 0) {
+ --idiscl;
+ } else {
+ ++im;
+ w[im] = w[je];
+ werr[im] = werr[je];
+ indexw[im] = indexw[je];
+ iblock[im] = iblock[je];
+ }
+/* L80: */
+ }
+ *m = im;
+ }
+ if (idiscu > 0) {
+/* Remove some of the largest eigenvalues from the right so that */
+/* at the end IDISCU =0. Move all eigenvalues up to the left. */
+ im = *m + 1;
+ for (je = *m; je >= 1; --je) {
+ if (w[je] >= wul && idiscu > 0) {
+ --idiscu;
+ } else {
+ --im;
+ w[im] = w[je];
+ werr[im] = werr[je];
+ indexw[im] = indexw[je];
+ iblock[im] = iblock[je];
+ }
+/* L81: */
+ }
+ jee = 0;
+ i__1 = *m;
+ for (je = im; je <= i__1; ++je) {
+ ++jee;
+ w[jee] = w[je];
+ werr[jee] = werr[je];
+ indexw[jee] = indexw[je];
+ iblock[jee] = iblock[je];
+/* L82: */
+ }
+ *m = *m - im + 1;
+ }
+ if (idiscl > 0 || idiscu > 0) {
+/* Code to deal with effects of bad arithmetic. (If N(w) is */
+/* monotone non-decreasing, this should never happen.) */
+/* Some low eigenvalues to be discarded are not in (WL,WLU], */
+/* or high eigenvalues to be discarded are not in (WUL,WU] */
+/* so just kill off the smallest IDISCL/largest IDISCU */
+/* eigenvalues, by marking the corresponding IBLOCK = 0 */
+ if (idiscl > 0) {
+ wkill = *wu;
+ i__1 = idiscl;
+ for (jdisc = 1; jdisc <= i__1; ++jdisc) {
+ iw = 0;
+ i__2 = *m;
+ for (je = 1; je <= i__2; ++je) {
+ if (iblock[je] != 0 && (w[je] < wkill || iw == 0)) {
+ iw = je;
+ wkill = w[je];
+ }
+/* L90: */
+ }
+ iblock[iw] = 0;
+/* L100: */
+ }
+ }
+ if (idiscu > 0) {
+ wkill = *wl;
+ i__1 = idiscu;
+ for (jdisc = 1; jdisc <= i__1; ++jdisc) {
+ iw = 0;
+ i__2 = *m;
+ for (je = 1; je <= i__2; ++je) {
+ if (iblock[je] != 0 && (w[je] >= wkill || iw == 0)) {
+ iw = je;
+ wkill = w[je];
+ }
+/* L110: */
+ }
+ iblock[iw] = 0;
+/* L120: */
+ }
+ }
+/* Now erase all eigenvalues with IBLOCK set to zero */
+ im = 0;
+ i__1 = *m;
+ for (je = 1; je <= i__1; ++je) {
+ if (iblock[je] != 0) {
+ ++im;
+ w[im] = w[je];
+ werr[im] = werr[je];
+ indexw[im] = indexw[je];
+ iblock[im] = iblock[je];
+ }
+/* L130: */
+ }
+ *m = im;
+ }
+ if (idiscl < 0 || idiscu < 0) {
+ toofew = TRUE_;
+ }
+ }
+
+ if (irange == 1 && *m != *n || irange == 3 && *m != *iu - *il + 1) {
+ toofew = TRUE_;
+ }
+/* If ORDER='B', do nothing the eigenvalues are already sorted by */
+/* block. */
+/* If ORDER='E', sort the eigenvalues from smallest to largest */
+ if (lsame_(order, "E") && *nsplit > 1) {
+ i__1 = *m - 1;
+ for (je = 1; je <= i__1; ++je) {
+ ie = 0;
+ tmp1 = w[je];
+ i__2 = *m;
+ for (j = je + 1; j <= i__2; ++j) {
+ if (w[j] < tmp1) {
+ ie = j;
+ tmp1 = w[j];
+ }
+/* L140: */
+ }
+ if (ie != 0) {
+ tmp2 = werr[ie];
+ itmp1 = iblock[ie];
+ itmp2 = indexw[ie];
+ w[ie] = w[je];
+ werr[ie] = werr[je];
+ iblock[ie] = iblock[je];
+ indexw[ie] = indexw[je];
+ w[je] = tmp1;
+ werr[je] = tmp2;
+ iblock[je] = itmp1;
+ indexw[je] = itmp2;
+ }
+/* L150: */
+ }
+ }
+
+ *info = 0;
+ if (ncnvrg) {
+ ++(*info);
+ }
+ if (toofew) {
+ *info += 2;
+ }
+ return 0;
+
+/* End of DLARRD */
+
+} /* dlarrd_ */
diff --git a/contrib/libs/clapack/dlarre.c b/contrib/libs/clapack/dlarre.c
new file mode 100644
index 0000000000..763c416234
--- /dev/null
+++ b/contrib/libs/clapack/dlarre.c
@@ -0,0 +1,861 @@
+/* dlarre.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__2 = 2;
+
+/* Subroutine */ int dlarre_(char *range, integer *n, doublereal *vl,
+ doublereal *vu, integer *il, integer *iu, doublereal *d__, doublereal
+ *e, doublereal *e2, doublereal *rtol1, doublereal *rtol2, doublereal *
+ spltol, integer *nsplit, integer *isplit, integer *m, doublereal *w,
+ doublereal *werr, doublereal *wgap, integer *iblock, integer *indexw,
+ doublereal *gers, doublereal *pivmin, doublereal *work, integer *
+ iwork, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal), log(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal s1, s2;
+ integer mb;
+ doublereal gl;
+ integer in, mm;
+ doublereal gu;
+ integer cnt;
+ doublereal eps, tau, tmp, rtl;
+ integer cnt1, cnt2;
+ doublereal tmp1, eabs;
+ integer iend, jblk;
+ doublereal eold;
+ integer indl;
+ doublereal dmax__, emax;
+ integer wend, idum, indu;
+ doublereal rtol;
+ integer iseed[4];
+ doublereal avgap, sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical norep;
+ extern /* Subroutine */ int dlasq2_(integer *, doublereal *, integer *);
+ extern doublereal dlamch_(char *);
+ integer ibegin;
+ logical forceb;
+ integer irange;
+ doublereal sgndef;
+ extern /* Subroutine */ int dlarra_(integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *,
+ integer *), dlarrb_(integer *, doublereal *, doublereal *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *, integer *), dlarrc_(char *
+, integer *, doublereal *, doublereal *, doublereal *, doublereal
+ *, doublereal *, integer *, integer *, integer *, integer *);
+ integer wbegin;
+ extern /* Subroutine */ int dlarrd_(char *, char *, integer *, doublereal
+ *, doublereal *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *
+, integer *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *);
+ doublereal safmin, spdiam;
+ extern /* Subroutine */ int dlarrk_(integer *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *);
+ logical usedqd;
+ doublereal clwdth, isleft;
+ extern /* Subroutine */ int dlarnv_(integer *, integer *, integer *,
+ doublereal *);
+ doublereal isrght, bsrtol, dpivot;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* To find the desired eigenvalues of a given real symmetric */
+/* tridiagonal matrix T, DLARRE sets any "small" off-diagonal */
+/* elements to zero, and for each unreduced block T_i, it finds */
+/* (a) a suitable shift at one end of the block's spectrum, */
+/* (b) the base representation, T_i - sigma_i I = L_i D_i L_i^T, and */
+/* (c) eigenvalues of each L_i D_i L_i^T. */
+/* The representations and eigenvalues found are then used by */
+/* DSTEMR to compute the eigenvectors of T. */
+/* The accuracy varies depending on whether bisection is used to */
+/* find a few eigenvalues or the dqds algorithm (subroutine DLASQ2) to */
+/* conpute all and then discard any unwanted one. */
+/* As an added benefit, DLARRE also outputs the n */
+/* Gerschgorin intervals for the matrices L_i D_i L_i^T. */
+
+/* Arguments */
+/* ========= */
+
+/* RANGE (input) CHARACTER */
+/* = 'A': ("All") all eigenvalues will be found. */
+/* = 'V': ("Value") all eigenvalues in the half-open interval */
+/* (VL, VU] will be found. */
+/* = 'I': ("Index") the IL-th through IU-th eigenvalues (of the */
+/* entire matrix) will be found. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N > 0. */
+
+/* VL (input/output) DOUBLE PRECISION */
+/* VU (input/output) DOUBLE PRECISION */
+/* If RANGE='V', the lower and upper bounds for the eigenvalues. */
+/* Eigenvalues less than or equal to VL, or greater than VU, */
+/* will not be returned. VL < VU. */
+/* If RANGE='I' or ='A', DLARRE computes bounds on the desired */
+/* part of the spectrum. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the N diagonal elements of the tridiagonal */
+/* matrix T. */
+/* On exit, the N diagonal elements of the diagonal */
+/* matrices D_i. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the first (N-1) entries contain the subdiagonal */
+/* elements of the tridiagonal matrix T; E(N) need not be set. */
+/* On exit, E contains the subdiagonal elements of the unit */
+/* bidiagonal matrices L_i. The entries E( ISPLIT( I ) ), */
+/* 1 <= I <= NSPLIT, contain the base points sigma_i on output. */
+
+/* E2 (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the first (N-1) entries contain the SQUARES of the */
+/* subdiagonal elements of the tridiagonal matrix T; */
+/* E2(N) need not be set. */
+/* On exit, the entries E2( ISPLIT( I ) ), */
+/* 1 <= I <= NSPLIT, have been set to zero */
+
+/* RTOL1 (input) DOUBLE PRECISION */
+/* RTOL2 (input) DOUBLE PRECISION */
+/* Parameters for bisection. */
+/* An interval [LEFT,RIGHT] has converged if */
+/* RIGHT-LEFT.LT.MAX( RTOL1*GAP, RTOL2*MAX(|LEFT|,|RIGHT|) ) */
+
+/* SPLTOL (input) DOUBLE PRECISION */
+/* The threshold for splitting. */
+
+/* NSPLIT (output) INTEGER */
+/* The number of blocks T splits into. 1 <= NSPLIT <= N. */
+
+/* ISPLIT (output) INTEGER array, dimension (N) */
+/* The splitting points, at which T breaks up into blocks. */
+/* The first block consists of rows/columns 1 to ISPLIT(1), */
+/* the second of rows/columns ISPLIT(1)+1 through ISPLIT(2), */
+/* etc., and the NSPLIT-th consists of rows/columns */
+/* ISPLIT(NSPLIT-1)+1 through ISPLIT(NSPLIT)=N. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues (of all L_i D_i L_i^T) */
+/* found. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* The first M elements contain the eigenvalues. The */
+/* eigenvalues of each of the blocks, L_i D_i L_i^T, are */
+/* sorted in ascending order ( DLARRE may use the */
+/* remaining N-M elements as workspace). */
+
+/* WERR (output) DOUBLE PRECISION array, dimension (N) */
+/* The error bound on the corresponding eigenvalue in W. */
+
+/* WGAP (output) DOUBLE PRECISION array, dimension (N) */
+/* The separation from the right neighbor eigenvalue in W. */
+/* The gap is only with respect to the eigenvalues of the same block */
+/* as each block has its own representation tree. */
+/* Exception: at the right end of a block we store the left gap */
+
+/* IBLOCK (output) INTEGER array, dimension (N) */
+/* The indices of the blocks (submatrices) associated with the */
+/* corresponding eigenvalues in W; IBLOCK(i)=1 if eigenvalue */
+/* W(i) belongs to the first block from the top, =2 if W(i) */
+/* belongs to the second block, etc. */
+
+/* INDEXW (output) INTEGER array, dimension (N) */
+/* The indices of the eigenvalues within each block (submatrix); */
+/* for example, INDEXW(i)= 10 and IBLOCK(i)=2 imply that the */
+/* i-th eigenvalue W(i) is the 10-th eigenvalue in block 2 */
+
+/* GERS (output) DOUBLE PRECISION array, dimension (2*N) */
+/* The N Gerschgorin intervals (the i-th Gerschgorin interval */
+/* is (GERS(2*i-1), GERS(2*i)). */
+
+/* PIVMIN (output) DOUBLE PRECISION */
+/* The minimum pivot in the Sturm sequence for T. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (6*N) */
+/* Workspace. */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+/* Workspace. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* > 0: A problem occured in DLARRE. */
+/* < 0: One of the called subroutines signaled an internal problem. */
+/* Needs inspection of the corresponding parameter IINFO */
+/* for further information. */
+
+/* =-1: Problem in DLARRD. */
+/* = 2: No base representation could be found in MAXTRY iterations. */
+/* Increasing MAXTRY and recompilation might be a remedy. */
+/* =-3: Problem in DLARRB when computing the refined root */
+/* representation for DLASQ2. */
+/* =-4: Problem in DLARRB when preforming bisection on the */
+/* desired part of the spectrum. */
+/* =-5: Problem in DLASQ2. */
+/* =-6: Problem in DLASQ2. */
+
+/* Further Details */
+/* The base representations are required to suffer very little */
+/* element growth and consequently define all their eigenvalues to */
+/* high relative accuracy. */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --iwork;
+ --work;
+ --gers;
+ --indexw;
+ --iblock;
+ --wgap;
+ --werr;
+ --w;
+ --isplit;
+ --e2;
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+
+/* Decode RANGE */
+
+ if (lsame_(range, "A")) {
+ irange = 1;
+ } else if (lsame_(range, "V")) {
+ irange = 3;
+ } else if (lsame_(range, "I")) {
+ irange = 2;
+ }
+ *m = 0;
+/* Get machine constants */
+ safmin = dlamch_("S");
+ eps = dlamch_("P");
+/* Set parameters */
+ rtl = sqrt(eps);
+ bsrtol = sqrt(eps);
+/* Treat case of 1x1 matrix for quick return */
+ if (*n == 1) {
+ if (irange == 1 || irange == 3 && d__[1] > *vl && d__[1] <= *vu ||
+ irange == 2 && *il == 1 && *iu == 1) {
+ *m = 1;
+ w[1] = d__[1];
+/* The computation error of the eigenvalue is zero */
+ werr[1] = 0.;
+ wgap[1] = 0.;
+ iblock[1] = 1;
+ indexw[1] = 1;
+ gers[1] = d__[1];
+ gers[2] = d__[1];
+ }
+/* store the shift for the initial RRR, which is zero in this case */
+ e[1] = 0.;
+ return 0;
+ }
+/* General case: tridiagonal matrix of order > 1 */
+
+/* Init WERR, WGAP. Compute Gerschgorin intervals and spectral diameter. */
+/* Compute maximum off-diagonal entry and pivmin. */
+ gl = d__[1];
+ gu = d__[1];
+ eold = 0.;
+ emax = 0.;
+ e[*n] = 0.;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ werr[i__] = 0.;
+ wgap[i__] = 0.;
+ eabs = (d__1 = e[i__], abs(d__1));
+ if (eabs >= emax) {
+ emax = eabs;
+ }
+ tmp1 = eabs + eold;
+ gers[(i__ << 1) - 1] = d__[i__] - tmp1;
+/* Computing MIN */
+ d__1 = gl, d__2 = gers[(i__ << 1) - 1];
+ gl = min(d__1,d__2);
+ gers[i__ * 2] = d__[i__] + tmp1;
+/* Computing MAX */
+ d__1 = gu, d__2 = gers[i__ * 2];
+ gu = max(d__1,d__2);
+ eold = eabs;
+/* L5: */
+ }
+/* The minimum pivot allowed in the Sturm sequence for T */
+/* Computing MAX */
+/* Computing 2nd power */
+ d__3 = emax;
+ d__1 = 1., d__2 = d__3 * d__3;
+ *pivmin = safmin * max(d__1,d__2);
+/* Compute spectral diameter. The Gerschgorin bounds give an */
+/* estimate that is wrong by at most a factor of SQRT(2) */
+ spdiam = gu - gl;
+/* Compute splitting points */
+ dlarra_(n, &d__[1], &e[1], &e2[1], spltol, &spdiam, nsplit, &isplit[1], &
+ iinfo);
+/* Can force use of bisection instead of faster DQDS. */
+/* Option left in the code for future multisection work. */
+ forceb = FALSE_;
+/* Initialize USEDQD, DQDS should be used for ALLRNG unless someone */
+/* explicitly wants bisection. */
+ usedqd = irange == 1 && ! forceb;
+ if (irange == 1 && ! forceb) {
+/* Set interval [VL,VU] that contains all eigenvalues */
+ *vl = gl;
+ *vu = gu;
+ } else {
+/* We call DLARRD to find crude approximations to the eigenvalues */
+/* in the desired range. In case IRANGE = INDRNG, we also obtain the */
+/* interval (VL,VU] that contains all the wanted eigenvalues. */
+/* An interval [LEFT,RIGHT] has converged if */
+/* RIGHT-LEFT.LT.RTOL*MAX(ABS(LEFT),ABS(RIGHT)) */
+/* DLARRD needs a WORK of size 4*N, IWORK of size 3*N */
+ dlarrd_(range, "B", n, vl, vu, il, iu, &gers[1], &bsrtol, &d__[1], &e[
+ 1], &e2[1], pivmin, nsplit, &isplit[1], &mm, &w[1], &werr[1],
+ vl, vu, &iblock[1], &indexw[1], &work[1], &iwork[1], &iinfo);
+ if (iinfo != 0) {
+ *info = -1;
+ return 0;
+ }
+/* Make sure that the entries M+1 to N in W, WERR, IBLOCK, INDEXW are 0 */
+ i__1 = *n;
+ for (i__ = mm + 1; i__ <= i__1; ++i__) {
+ w[i__] = 0.;
+ werr[i__] = 0.;
+ iblock[i__] = 0;
+ indexw[i__] = 0;
+/* L14: */
+ }
+ }
+/* ** */
+/* Loop over unreduced blocks */
+ ibegin = 1;
+ wbegin = 1;
+ i__1 = *nsplit;
+ for (jblk = 1; jblk <= i__1; ++jblk) {
+ iend = isplit[jblk];
+ in = iend - ibegin + 1;
+/* 1 X 1 block */
+ if (in == 1) {
+ if (irange == 1 || irange == 3 && d__[ibegin] > *vl && d__[ibegin]
+ <= *vu || irange == 2 && iblock[wbegin] == jblk) {
+ ++(*m);
+ w[*m] = d__[ibegin];
+ werr[*m] = 0.;
+/* The gap for a single block doesn't matter for the later */
+/* algorithm and is assigned an arbitrary large value */
+ wgap[*m] = 0.;
+ iblock[*m] = jblk;
+ indexw[*m] = 1;
+ ++wbegin;
+ }
+/* E( IEND ) holds the shift for the initial RRR */
+ e[iend] = 0.;
+ ibegin = iend + 1;
+ goto L170;
+ }
+
+/* Blocks of size larger than 1x1 */
+
+/* E( IEND ) will hold the shift for the initial RRR, for now set it =0 */
+ e[iend] = 0.;
+
+/* Find local outer bounds GL,GU for the block */
+ gl = d__[ibegin];
+ gu = d__[ibegin];
+ i__2 = iend;
+ for (i__ = ibegin; i__ <= i__2; ++i__) {
+/* Computing MIN */
+ d__1 = gers[(i__ << 1) - 1];
+ gl = min(d__1,gl);
+/* Computing MAX */
+ d__1 = gers[i__ * 2];
+ gu = max(d__1,gu);
+/* L15: */
+ }
+ spdiam = gu - gl;
+ if (! (irange == 1 && ! forceb)) {
+/* Count the number of eigenvalues in the current block. */
+ mb = 0;
+ i__2 = mm;
+ for (i__ = wbegin; i__ <= i__2; ++i__) {
+ if (iblock[i__] == jblk) {
+ ++mb;
+ } else {
+ goto L21;
+ }
+/* L20: */
+ }
+L21:
+ if (mb == 0) {
+/* No eigenvalue in the current block lies in the desired range */
+/* E( IEND ) holds the shift for the initial RRR */
+ e[iend] = 0.;
+ ibegin = iend + 1;
+ goto L170;
+ } else {
+/* Decide whether dqds or bisection is more efficient */
+ usedqd = (doublereal) mb > in * .5 && ! forceb;
+ wend = wbegin + mb - 1;
+/* Calculate gaps for the current block */
+/* In later stages, when representations for individual */
+/* eigenvalues are different, we use SIGMA = E( IEND ). */
+ sigma = 0.;
+ i__2 = wend - 1;
+ for (i__ = wbegin; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = 0., d__2 = w[i__ + 1] - werr[i__ + 1] - (w[i__] +
+ werr[i__]);
+ wgap[i__] = max(d__1,d__2);
+/* L30: */
+ }
+/* Computing MAX */
+ d__1 = 0., d__2 = *vu - sigma - (w[wend] + werr[wend]);
+ wgap[wend] = max(d__1,d__2);
+/* Find local index of the first and last desired evalue. */
+ indl = indexw[wbegin];
+ indu = indexw[wend];
+ }
+ }
+ if (irange == 1 && ! forceb || usedqd) {
+/* Case of DQDS */
+/* Find approximations to the extremal eigenvalues of the block */
+ dlarrk_(&in, &c__1, &gl, &gu, &d__[ibegin], &e2[ibegin], pivmin, &
+ rtl, &tmp, &tmp1, &iinfo);
+ if (iinfo != 0) {
+ *info = -1;
+ return 0;
+ }
+/* Computing MAX */
+ d__2 = gl, d__3 = tmp - tmp1 - eps * 100. * (d__1 = tmp - tmp1,
+ abs(d__1));
+ isleft = max(d__2,d__3);
+ dlarrk_(&in, &in, &gl, &gu, &d__[ibegin], &e2[ibegin], pivmin, &
+ rtl, &tmp, &tmp1, &iinfo);
+ if (iinfo != 0) {
+ *info = -1;
+ return 0;
+ }
+/* Computing MIN */
+ d__2 = gu, d__3 = tmp + tmp1 + eps * 100. * (d__1 = tmp + tmp1,
+ abs(d__1));
+ isrght = min(d__2,d__3);
+/* Improve the estimate of the spectral diameter */
+ spdiam = isrght - isleft;
+ } else {
+/* Case of bisection */
+/* Find approximations to the wanted extremal eigenvalues */
+/* Computing MAX */
+ d__2 = gl, d__3 = w[wbegin] - werr[wbegin] - eps * 100. * (d__1 =
+ w[wbegin] - werr[wbegin], abs(d__1));
+ isleft = max(d__2,d__3);
+/* Computing MIN */
+ d__2 = gu, d__3 = w[wend] + werr[wend] + eps * 100. * (d__1 = w[
+ wend] + werr[wend], abs(d__1));
+ isrght = min(d__2,d__3);
+ }
+/* Decide whether the base representation for the current block */
+/* L_JBLK D_JBLK L_JBLK^T = T_JBLK - sigma_JBLK I */
+/* should be on the left or the right end of the current block. */
+/* The strategy is to shift to the end which is "more populated" */
+/* Furthermore, decide whether to use DQDS for the computation of */
+/* the eigenvalue approximations at the end of DLARRE or bisection. */
+/* dqds is chosen if all eigenvalues are desired or the number of */
+/* eigenvalues to be computed is large compared to the blocksize. */
+ if (irange == 1 && ! forceb) {
+/* If all the eigenvalues have to be computed, we use dqd */
+ usedqd = TRUE_;
+/* INDL is the local index of the first eigenvalue to compute */
+ indl = 1;
+ indu = in;
+/* MB = number of eigenvalues to compute */
+ mb = in;
+ wend = wbegin + mb - 1;
+/* Define 1/4 and 3/4 points of the spectrum */
+ s1 = isleft + spdiam * .25;
+ s2 = isrght - spdiam * .25;
+ } else {
+/* DLARRD has computed IBLOCK and INDEXW for each eigenvalue */
+/* approximation. */
+/* choose sigma */
+ if (usedqd) {
+ s1 = isleft + spdiam * .25;
+ s2 = isrght - spdiam * .25;
+ } else {
+ tmp = min(isrght,*vu) - max(isleft,*vl);
+ s1 = max(isleft,*vl) + tmp * .25;
+ s2 = min(isrght,*vu) - tmp * .25;
+ }
+ }
+/* Compute the negcount at the 1/4 and 3/4 points */
+ if (mb > 1) {
+ dlarrc_("T", &in, &s1, &s2, &d__[ibegin], &e[ibegin], pivmin, &
+ cnt, &cnt1, &cnt2, &iinfo);
+ }
+ if (mb == 1) {
+ sigma = gl;
+ sgndef = 1.;
+ } else if (cnt1 - indl >= indu - cnt2) {
+ if (irange == 1 && ! forceb) {
+ sigma = max(isleft,gl);
+ } else if (usedqd) {
+/* use Gerschgorin bound as shift to get pos def matrix */
+/* for dqds */
+ sigma = isleft;
+ } else {
+/* use approximation of the first desired eigenvalue of the */
+/* block as shift */
+ sigma = max(isleft,*vl);
+ }
+ sgndef = 1.;
+ } else {
+ if (irange == 1 && ! forceb) {
+ sigma = min(isrght,gu);
+ } else if (usedqd) {
+/* use Gerschgorin bound as shift to get neg def matrix */
+/* for dqds */
+ sigma = isrght;
+ } else {
+/* use approximation of the first desired eigenvalue of the */
+/* block as shift */
+ sigma = min(isrght,*vu);
+ }
+ sgndef = -1.;
+ }
+/* An initial SIGMA has been chosen that will be used for computing */
+/* T - SIGMA I = L D L^T */
+/* Define the increment TAU of the shift in case the initial shift */
+/* needs to be refined to obtain a factorization with not too much */
+/* element growth. */
+ if (usedqd) {
+/* The initial SIGMA was to the outer end of the spectrum */
+/* the matrix is definite and we need not retreat. */
+ tau = spdiam * eps * *n + *pivmin * 2.;
+ } else {
+ if (mb > 1) {
+ clwdth = w[wend] + werr[wend] - w[wbegin] - werr[wbegin];
+ avgap = (d__1 = clwdth / (doublereal) (wend - wbegin), abs(
+ d__1));
+ if (sgndef == 1.) {
+/* Computing MAX */
+ d__1 = wgap[wbegin];
+ tau = max(d__1,avgap) * .5;
+/* Computing MAX */
+ d__1 = tau, d__2 = werr[wbegin];
+ tau = max(d__1,d__2);
+ } else {
+/* Computing MAX */
+ d__1 = wgap[wend - 1];
+ tau = max(d__1,avgap) * .5;
+/* Computing MAX */
+ d__1 = tau, d__2 = werr[wend];
+ tau = max(d__1,d__2);
+ }
+ } else {
+ tau = werr[wbegin];
+ }
+ }
+
+ for (idum = 1; idum <= 6; ++idum) {
+/* Compute L D L^T factorization of tridiagonal matrix T - sigma I. */
+/* Store D in WORK(1:IN), L in WORK(IN+1:2*IN), and reciprocals of */
+/* pivots in WORK(2*IN+1:3*IN) */
+ dpivot = d__[ibegin] - sigma;
+ work[1] = dpivot;
+ dmax__ = abs(work[1]);
+ j = ibegin;
+ i__2 = in - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[(in << 1) + i__] = 1. / work[i__];
+ tmp = e[j] * work[(in << 1) + i__];
+ work[in + i__] = tmp;
+ dpivot = d__[j + 1] - sigma - tmp * e[j];
+ work[i__ + 1] = dpivot;
+/* Computing MAX */
+ d__1 = dmax__, d__2 = abs(dpivot);
+ dmax__ = max(d__1,d__2);
+ ++j;
+/* L70: */
+ }
+/* check for element growth */
+ if (dmax__ > spdiam * 64.) {
+ norep = TRUE_;
+ } else {
+ norep = FALSE_;
+ }
+ if (usedqd && ! norep) {
+/* Ensure the definiteness of the representation */
+/* All entries of D (of L D L^T) must have the same sign */
+ i__2 = in;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ tmp = sgndef * work[i__];
+ if (tmp < 0.) {
+ norep = TRUE_;
+ }
+/* L71: */
+ }
+ }
+ if (norep) {
+/* Note that in the case of IRANGE=ALLRNG, we use the Gerschgorin */
+/* shift which makes the matrix definite. So we should end up */
+/* here really only in the case of IRANGE = VALRNG or INDRNG. */
+ if (idum == 5) {
+ if (sgndef == 1.) {
+/* The fudged Gerschgorin shift should succeed */
+ sigma = gl - spdiam * 2. * eps * *n - *pivmin * 4.;
+ } else {
+ sigma = gu + spdiam * 2. * eps * *n + *pivmin * 4.;
+ }
+ } else {
+ sigma -= sgndef * tau;
+ tau *= 2.;
+ }
+ } else {
+/* an initial RRR is found */
+ goto L83;
+ }
+/* L80: */
+ }
+/* if the program reaches this point, no base representation could be */
+/* found in MAXTRY iterations. */
+ *info = 2;
+ return 0;
+L83:
+/* At this point, we have found an initial base representation */
+/* T - SIGMA I = L D L^T with not too much element growth. */
+/* Store the shift. */
+ e[iend] = sigma;
+/* Store D and L. */
+ dcopy_(&in, &work[1], &c__1, &d__[ibegin], &c__1);
+ i__2 = in - 1;
+ dcopy_(&i__2, &work[in + 1], &c__1, &e[ibegin], &c__1);
+ if (mb > 1) {
+
+/* Perturb each entry of the base representation by a small */
+/* (but random) relative amount to overcome difficulties with */
+/* glued matrices. */
+
+ for (i__ = 1; i__ <= 4; ++i__) {
+ iseed[i__ - 1] = 1;
+/* L122: */
+ }
+ i__2 = (in << 1) - 1;
+ dlarnv_(&c__2, iseed, &i__2, &work[1]);
+ i__2 = in - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ d__[ibegin + i__ - 1] *= eps * 8. * work[i__] + 1.;
+ e[ibegin + i__ - 1] *= eps * 8. * work[in + i__] + 1.;
+/* L125: */
+ }
+ d__[iend] *= eps * 4. * work[in] + 1.;
+
+ }
+
+/* Don't update the Gerschgorin intervals because keeping track */
+/* of the updates would be too much work in DLARRV. */
+/* We update W instead and use it to locate the proper Gerschgorin */
+/* intervals. */
+/* Compute the required eigenvalues of L D L' by bisection or dqds */
+ if (! usedqd) {
+/* If DLARRD has been used, shift the eigenvalue approximations */
+/* according to their representation. This is necessary for */
+/* a uniform DLARRV since dqds computes eigenvalues of the */
+/* shifted representation. In DLARRV, W will always hold the */
+/* UNshifted eigenvalue approximation. */
+ i__2 = wend;
+ for (j = wbegin; j <= i__2; ++j) {
+ w[j] -= sigma;
+ werr[j] += (d__1 = w[j], abs(d__1)) * eps;
+/* L134: */
+ }
+/* call DLARRB to reduce eigenvalue error of the approximations */
+/* from DLARRD */
+ i__2 = iend - 1;
+ for (i__ = ibegin; i__ <= i__2; ++i__) {
+/* Computing 2nd power */
+ d__1 = e[i__];
+ work[i__] = d__[i__] * (d__1 * d__1);
+/* L135: */
+ }
+/* use bisection to find EV from INDL to INDU */
+ i__2 = indl - 1;
+ dlarrb_(&in, &d__[ibegin], &work[ibegin], &indl, &indu, rtol1,
+ rtol2, &i__2, &w[wbegin], &wgap[wbegin], &werr[wbegin], &
+ work[(*n << 1) + 1], &iwork[1], pivmin, &spdiam, &in, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = -4;
+ return 0;
+ }
+/* DLARRB computes all gaps correctly except for the last one */
+/* Record distance to VU/GU */
+/* Computing MAX */
+ d__1 = 0., d__2 = *vu - sigma - (w[wend] + werr[wend]);
+ wgap[wend] = max(d__1,d__2);
+ i__2 = indu;
+ for (i__ = indl; i__ <= i__2; ++i__) {
+ ++(*m);
+ iblock[*m] = jblk;
+ indexw[*m] = i__;
+/* L138: */
+ }
+ } else {
+/* Call dqds to get all eigs (and then possibly delete unwanted */
+/* eigenvalues). */
+/* Note that dqds finds the eigenvalues of the L D L^T representation */
+/* of T to high relative accuracy. High relative accuracy */
+/* might be lost when the shift of the RRR is subtracted to obtain */
+/* the eigenvalues of T. However, T is not guaranteed to define its */
+/* eigenvalues to high relative accuracy anyway. */
+/* Set RTOL to the order of the tolerance used in DLASQ2 */
+/* This is an ESTIMATED error, the worst case bound is 4*N*EPS */
+/* which is usually too large and requires unnecessary work to be */
+/* done by bisection when computing the eigenvectors */
+ rtol = log((doublereal) in) * 4. * eps;
+ j = ibegin;
+ i__2 = in - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[(i__ << 1) - 1] = (d__1 = d__[j], abs(d__1));
+ work[i__ * 2] = e[j] * e[j] * work[(i__ << 1) - 1];
+ ++j;
+/* L140: */
+ }
+ work[(in << 1) - 1] = (d__1 = d__[iend], abs(d__1));
+ work[in * 2] = 0.;
+ dlasq2_(&in, &work[1], &iinfo);
+ if (iinfo != 0) {
+/* If IINFO = -5 then an index is part of a tight cluster */
+/* and should be changed. The index is in IWORK(1) and the */
+/* gap is in WORK(N+1) */
+ *info = -5;
+ return 0;
+ } else {
+/* Test that all eigenvalues are positive as expected */
+ i__2 = in;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] < 0.) {
+ *info = -6;
+ return 0;
+ }
+/* L149: */
+ }
+ }
+ if (sgndef > 0.) {
+ i__2 = indu;
+ for (i__ = indl; i__ <= i__2; ++i__) {
+ ++(*m);
+ w[*m] = work[in - i__ + 1];
+ iblock[*m] = jblk;
+ indexw[*m] = i__;
+/* L150: */
+ }
+ } else {
+ i__2 = indu;
+ for (i__ = indl; i__ <= i__2; ++i__) {
+ ++(*m);
+ w[*m] = -work[i__];
+ iblock[*m] = jblk;
+ indexw[*m] = i__;
+/* L160: */
+ }
+ }
+ i__2 = *m;
+ for (i__ = *m - mb + 1; i__ <= i__2; ++i__) {
+/* the value of RTOL below should be the tolerance in DLASQ2 */
+ werr[i__] = rtol * (d__1 = w[i__], abs(d__1));
+/* L165: */
+ }
+ i__2 = *m - 1;
+ for (i__ = *m - mb + 1; i__ <= i__2; ++i__) {
+/* compute the right gap between the intervals */
+/* Computing MAX */
+ d__1 = 0., d__2 = w[i__ + 1] - werr[i__ + 1] - (w[i__] + werr[
+ i__]);
+ wgap[i__] = max(d__1,d__2);
+/* L166: */
+ }
+/* Computing MAX */
+ d__1 = 0., d__2 = *vu - sigma - (w[*m] + werr[*m]);
+ wgap[*m] = max(d__1,d__2);
+ }
+/* proceed with next block */
+ ibegin = iend + 1;
+ wbegin = wend + 1;
+L170:
+ ;
+ }
+
+ return 0;
+
+/* end of DLARRE */
+
+} /* dlarre_ */
diff --git a/contrib/libs/clapack/dlarrf.c b/contrib/libs/clapack/dlarrf.c
new file mode 100644
index 0000000000..1d42c881b1
--- /dev/null
+++ b/contrib/libs/clapack/dlarrf.c
@@ -0,0 +1,423 @@
+/* dlarrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dlarrf_(integer *n, doublereal *d__, doublereal *l,
+ doublereal *ld, integer *clstrt, integer *clend, doublereal *w,
+ doublereal *wgap, doublereal *werr, doublereal *spdiam, doublereal *
+ clgapl, doublereal *clgapr, doublereal *pivmin, doublereal *sigma,
+ doublereal *dplus, doublereal *lplus, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ doublereal s, bestshift, smlgrowth, eps, tmp, max1, max2, rrr1, rrr2,
+ znm2, growthbound, fail, fact, oldp;
+ integer indx;
+ doublereal prod;
+ integer ktry;
+ doublereal fail2, avgap, ldmax, rdmax;
+ integer shift;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical dorrr1;
+ extern doublereal dlamch_(char *);
+ doublereal ldelta;
+ logical nofail;
+ doublereal mingap, lsigma, rdelta;
+ extern logical disnan_(doublereal *);
+ logical forcer;
+ doublereal rsigma, clwdth;
+ logical sawnan1, sawnan2, tryrrr1;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+/* * */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Given the initial representation L D L^T and its cluster of close */
+/* eigenvalues (in a relative measure), W( CLSTRT ), W( CLSTRT+1 ), ... */
+/* W( CLEND ), DLARRF finds a new relatively robust representation */
+/* L D L^T - SIGMA I = L(+) D(+) L(+)^T such that at least one of the */
+/* eigenvalues of L(+) D(+) L(+)^T is relatively isolated. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix (subblock, if the matrix splitted). */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The N diagonal elements of the diagonal matrix D. */
+
+/* L (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (N-1) subdiagonal elements of the unit bidiagonal */
+/* matrix L. */
+
+/* LD (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (N-1) elements L(i)*D(i). */
+
+/* CLSTRT (input) INTEGER */
+/* The index of the first eigenvalue in the cluster. */
+
+/* CLEND (input) INTEGER */
+/* The index of the last eigenvalue in the cluster. */
+
+/* W (input) DOUBLE PRECISION array, dimension >= (CLEND-CLSTRT+1) */
+/* The eigenvalue APPROXIMATIONS of L D L^T in ascending order. */
+/* W( CLSTRT ) through W( CLEND ) form the cluster of relatively */
+/* close eigenalues. */
+
+/* WGAP (input/output) DOUBLE PRECISION array, dimension >= (CLEND-CLSTRT+1) */
+/* The separation from the right neighbor eigenvalue in W. */
+
+/* WERR (input) DOUBLE PRECISION array, dimension >= (CLEND-CLSTRT+1) */
+/* WERR contain the semiwidth of the uncertainty */
+/* interval of the corresponding eigenvalue APPROXIMATION in W */
+
+/* SPDIAM (input) estimate of the spectral diameter obtained from the */
+/* Gerschgorin intervals */
+
+/* CLGAPL, CLGAPR (input) absolute gap on each end of the cluster. */
+/* Set by the calling routine to protect against shifts too close */
+/* to eigenvalues outside the cluster. */
+
+/* PIVMIN (input) DOUBLE PRECISION */
+/* The minimum pivot allowed in the Sturm sequence. */
+
+/* SIGMA (output) DOUBLE PRECISION */
+/* The shift used to form L(+) D(+) L(+)^T. */
+
+/* DPLUS (output) DOUBLE PRECISION array, dimension (N) */
+/* The N diagonal elements of the diagonal matrix D(+). */
+
+/* LPLUS (output) DOUBLE PRECISION array, dimension (N-1) */
+/* The first (N-1) elements of LPLUS contain the subdiagonal */
+/* elements of the unit bidiagonal matrix L(+). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (2*N) */
+/* Workspace. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --work;
+ --lplus;
+ --dplus;
+ --werr;
+ --wgap;
+ --w;
+ --ld;
+ --l;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ fact = 2.;
+ eps = dlamch_("Precision");
+ shift = 0;
+ forcer = FALSE_;
+/* Note that we cannot guarantee that for any of the shifts tried, */
+/* the factorization has a small or even moderate element growth. */
+/* There could be Ritz values at both ends of the cluster and despite */
+/* backing off, there are examples where all factorizations tried */
+/* (in IEEE mode, allowing zero pivots & infinities) have INFINITE */
+/* element growth. */
+/* For this reason, we should use PIVMIN in this subroutine so that at */
+/* least the L D L^T factorization exists. It can be checked afterwards */
+/* whether the element growth caused bad residuals/orthogonality. */
+/* Decide whether the code should accept the best among all */
+/* representations despite large element growth or signal INFO=1 */
+ nofail = TRUE_;
+
+/* Compute the average gap length of the cluster */
+ clwdth = (d__1 = w[*clend] - w[*clstrt], abs(d__1)) + werr[*clend] + werr[
+ *clstrt];
+ avgap = clwdth / (doublereal) (*clend - *clstrt);
+ mingap = min(*clgapl,*clgapr);
+/* Initial values for shifts to both ends of cluster */
+/* Computing MIN */
+ d__1 = w[*clstrt], d__2 = w[*clend];
+ lsigma = min(d__1,d__2) - werr[*clstrt];
+/* Computing MAX */
+ d__1 = w[*clstrt], d__2 = w[*clend];
+ rsigma = max(d__1,d__2) + werr[*clend];
+/* Use a small fudge to make sure that we really shift to the outside */
+ lsigma -= abs(lsigma) * 4. * eps;
+ rsigma += abs(rsigma) * 4. * eps;
+/* Compute upper bounds for how much to back off the initial shifts */
+ ldmax = mingap * .25 + *pivmin * 2.;
+ rdmax = mingap * .25 + *pivmin * 2.;
+/* Computing MAX */
+ d__1 = avgap, d__2 = wgap[*clstrt];
+ ldelta = max(d__1,d__2) / fact;
+/* Computing MAX */
+ d__1 = avgap, d__2 = wgap[*clend - 1];
+ rdelta = max(d__1,d__2) / fact;
+
+/* Initialize the record of the best representation found */
+
+ s = dlamch_("S");
+ smlgrowth = 1. / s;
+ fail = (doublereal) (*n - 1) * mingap / (*spdiam * eps);
+ fail2 = (doublereal) (*n - 1) * mingap / (*spdiam * sqrt(eps));
+ bestshift = lsigma;
+
+/* while (KTRY <= KTRYMAX) */
+ ktry = 0;
+ growthbound = *spdiam * 8.;
+L5:
+ sawnan1 = FALSE_;
+ sawnan2 = FALSE_;
+/* Ensure that we do not back off too much of the initial shifts */
+ ldelta = min(ldmax,ldelta);
+ rdelta = min(rdmax,rdelta);
+/* Compute the element growth when shifting to both ends of the cluster */
+/* accept the shift if there is no element growth at one of the two ends */
+/* Left end */
+ s = -lsigma;
+ dplus[1] = d__[1] + s;
+ if (abs(dplus[1]) < *pivmin) {
+ dplus[1] = -(*pivmin);
+/* Need to set SAWNAN1 because refined RRR test should not be used */
+/* in this case */
+ sawnan1 = TRUE_;
+ }
+ max1 = abs(dplus[1]);
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ lplus[i__] = ld[i__] / dplus[i__];
+ s = s * lplus[i__] * l[i__] - lsigma;
+ dplus[i__ + 1] = d__[i__ + 1] + s;
+ if ((d__1 = dplus[i__ + 1], abs(d__1)) < *pivmin) {
+ dplus[i__ + 1] = -(*pivmin);
+/* Need to set SAWNAN1 because refined RRR test should not be used */
+/* in this case */
+ sawnan1 = TRUE_;
+ }
+/* Computing MAX */
+ d__2 = max1, d__3 = (d__1 = dplus[i__ + 1], abs(d__1));
+ max1 = max(d__2,d__3);
+/* L6: */
+ }
+ sawnan1 = sawnan1 || disnan_(&max1);
+ if (forcer || max1 <= growthbound && ! sawnan1) {
+ *sigma = lsigma;
+ shift = 1;
+ goto L100;
+ }
+/* Right end */
+ s = -rsigma;
+ work[1] = d__[1] + s;
+ if (abs(work[1]) < *pivmin) {
+ work[1] = -(*pivmin);
+/* Need to set SAWNAN2 because refined RRR test should not be used */
+/* in this case */
+ sawnan2 = TRUE_;
+ }
+ max2 = abs(work[1]);
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[*n + i__] = ld[i__] / work[i__];
+ s = s * work[*n + i__] * l[i__] - rsigma;
+ work[i__ + 1] = d__[i__ + 1] + s;
+ if ((d__1 = work[i__ + 1], abs(d__1)) < *pivmin) {
+ work[i__ + 1] = -(*pivmin);
+/* Need to set SAWNAN2 because refined RRR test should not be used */
+/* in this case */
+ sawnan2 = TRUE_;
+ }
+/* Computing MAX */
+ d__2 = max2, d__3 = (d__1 = work[i__ + 1], abs(d__1));
+ max2 = max(d__2,d__3);
+/* L7: */
+ }
+ sawnan2 = sawnan2 || disnan_(&max2);
+ if (forcer || max2 <= growthbound && ! sawnan2) {
+ *sigma = rsigma;
+ shift = 2;
+ goto L100;
+ }
+/* If we are at this point, both shifts led to too much element growth */
+/* Record the better of the two shifts (provided it didn't lead to NaN) */
+ if (sawnan1 && sawnan2) {
+/* both MAX1 and MAX2 are NaN */
+ goto L50;
+ } else {
+ if (! sawnan1) {
+ indx = 1;
+ if (max1 <= smlgrowth) {
+ smlgrowth = max1;
+ bestshift = lsigma;
+ }
+ }
+ if (! sawnan2) {
+ if (sawnan1 || max2 <= max1) {
+ indx = 2;
+ }
+ if (max2 <= smlgrowth) {
+ smlgrowth = max2;
+ bestshift = rsigma;
+ }
+ }
+ }
+/* If we are here, both the left and the right shift led to */
+/* element growth. If the element growth is moderate, then */
+/* we may still accept the representation, if it passes a */
+/* refined test for RRR. This test supposes that no NaN occurred. */
+/* Moreover, we use the refined RRR test only for isolated clusters. */
+ if (clwdth < mingap / 128. && min(max1,max2) < fail2 && ! sawnan1 && !
+ sawnan2) {
+ dorrr1 = TRUE_;
+ } else {
+ dorrr1 = FALSE_;
+ }
+ tryrrr1 = TRUE_;
+ if (tryrrr1 && dorrr1) {
+ if (indx == 1) {
+ tmp = (d__1 = dplus[*n], abs(d__1));
+ znm2 = 1.;
+ prod = 1.;
+ oldp = 1.;
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ if (prod <= eps) {
+ prod = dplus[i__ + 1] * work[*n + i__ + 1] / (dplus[i__] *
+ work[*n + i__]) * oldp;
+ } else {
+ prod *= (d__1 = work[*n + i__], abs(d__1));
+ }
+ oldp = prod;
+/* Computing 2nd power */
+ d__1 = prod;
+ znm2 += d__1 * d__1;
+/* Computing MAX */
+ d__2 = tmp, d__3 = (d__1 = dplus[i__] * prod, abs(d__1));
+ tmp = max(d__2,d__3);
+/* L15: */
+ }
+ rrr1 = tmp / (*spdiam * sqrt(znm2));
+ if (rrr1 <= 8.) {
+ *sigma = lsigma;
+ shift = 1;
+ goto L100;
+ }
+ } else if (indx == 2) {
+ tmp = (d__1 = work[*n], abs(d__1));
+ znm2 = 1.;
+ prod = 1.;
+ oldp = 1.;
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ if (prod <= eps) {
+ prod = work[i__ + 1] * lplus[i__ + 1] / (work[i__] *
+ lplus[i__]) * oldp;
+ } else {
+ prod *= (d__1 = lplus[i__], abs(d__1));
+ }
+ oldp = prod;
+/* Computing 2nd power */
+ d__1 = prod;
+ znm2 += d__1 * d__1;
+/* Computing MAX */
+ d__2 = tmp, d__3 = (d__1 = work[i__] * prod, abs(d__1));
+ tmp = max(d__2,d__3);
+/* L16: */
+ }
+ rrr2 = tmp / (*spdiam * sqrt(znm2));
+ if (rrr2 <= 8.) {
+ *sigma = rsigma;
+ shift = 2;
+ goto L100;
+ }
+ }
+ }
+L50:
+ if (ktry < 1) {
+/* If we are here, both shifts failed also the RRR test. */
+/* Back off to the outside */
+/* Computing MAX */
+ d__1 = lsigma - ldelta, d__2 = lsigma - ldmax;
+ lsigma = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = rsigma + rdelta, d__2 = rsigma + rdmax;
+ rsigma = min(d__1,d__2);
+ ldelta *= 2.;
+ rdelta *= 2.;
+ ++ktry;
+ goto L5;
+ } else {
+/* None of the representations investigated satisfied our */
+/* criteria. Take the best one we found. */
+ if (smlgrowth < fail || nofail) {
+ lsigma = bestshift;
+ rsigma = bestshift;
+ forcer = TRUE_;
+ goto L5;
+ } else {
+ *info = 1;
+ return 0;
+ }
+ }
+L100:
+ if (shift == 1) {
+ } else if (shift == 2) {
+/* store new L and D back into DPLUS, LPLUS */
+ dcopy_(n, &work[1], &c__1, &dplus[1], &c__1);
+ i__1 = *n - 1;
+ dcopy_(&i__1, &work[*n + 1], &c__1, &lplus[1], &c__1);
+ }
+ return 0;
+
+/* End of DLARRF */
+
+} /* dlarrf_ */
diff --git a/contrib/libs/clapack/dlarrj.c b/contrib/libs/clapack/dlarrj.c
new file mode 100644
index 0000000000..306a500b98
--- /dev/null
+++ b/contrib/libs/clapack/dlarrj.c
@@ -0,0 +1,338 @@
+/* dlarrj.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlarrj_(integer *n, doublereal *d__, doublereal *e2,
+ integer *ifirst, integer *ilast, doublereal *rtol, integer *offset,
+ doublereal *w, doublereal *werr, doublereal *work, integer *iwork,
+ doublereal *pivmin, doublereal *spdiam, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double log(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, p;
+ doublereal s;
+ integer i1, i2, ii;
+ doublereal fac, mid;
+ integer cnt;
+ doublereal tmp, left;
+ integer iter, nint, prev, next, savi1;
+ doublereal right, width, dplus;
+ integer olnint, maxitr;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Given the initial eigenvalue approximations of T, DLARRJ */
+/* does bisection to refine the eigenvalues of T, */
+/* W( IFIRST-OFFSET ) through W( ILAST-OFFSET ), to more accuracy. Initial */
+/* guesses for these eigenvalues are input in W, the corresponding estimate */
+/* of the error in these guesses in WERR. During bisection, intervals */
+/* [left, right] are maintained by storing their mid-points and */
+/* semi-widths in the arrays W and WERR respectively. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The N diagonal elements of T. */
+
+/* E2 (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The Squares of the (N-1) subdiagonal elements of T. */
+
+/* IFIRST (input) INTEGER */
+/* The index of the first eigenvalue to be computed. */
+
+/* ILAST (input) INTEGER */
+/* The index of the last eigenvalue to be computed. */
+
+/* RTOL (input) DOUBLE PRECISION */
+/* Tolerance for the convergence of the bisection intervals. */
+/* An interval [LEFT,RIGHT] has converged if */
+/* RIGHT-LEFT.LT.RTOL*MAX(|LEFT|,|RIGHT|). */
+
+/* OFFSET (input) INTEGER */
+/* Offset for the arrays W and WERR, i.e., the IFIRST-OFFSET */
+/* through ILAST-OFFSET elements of these arrays are to be used. */
+
+/* W (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On input, W( IFIRST-OFFSET ) through W( ILAST-OFFSET ) are */
+/* estimates of the eigenvalues of L D L^T indexed IFIRST through */
+/* ILAST. */
+/* On output, these estimates are refined. */
+
+/* WERR (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On input, WERR( IFIRST-OFFSET ) through WERR( ILAST-OFFSET ) are */
+/* the errors in the estimates of the corresponding elements in W. */
+/* On output, these errors are refined. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (2*N) */
+/* Workspace. */
+
+/* IWORK (workspace) INTEGER array, dimension (2*N) */
+/* Workspace. */
+
+/* PIVMIN (input) DOUBLE PRECISION */
+/* The minimum pivot in the Sturm sequence for T. */
+
+/* SPDIAM (input) DOUBLE PRECISION */
+/* The spectral diameter of T. */
+
+/* INFO (output) INTEGER */
+/* Error flag. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --iwork;
+ --work;
+ --werr;
+ --w;
+ --e2;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+
+ maxitr = (integer) ((log(*spdiam + *pivmin) - log(*pivmin)) / log(2.)) +
+ 2;
+
+/* Initialize unconverged intervals in [ WORK(2*I-1), WORK(2*I) ]. */
+/* The Sturm Count, Count( WORK(2*I-1) ) is arranged to be I-1, while */
+/* Count( WORK(2*I) ) is stored in IWORK( 2*I ). The integer IWORK( 2*I-1 ) */
+/* for an unconverged interval is set to the index of the next unconverged */
+/* interval, and is -1 or 0 for a converged interval. Thus a linked */
+/* list of unconverged intervals is set up. */
+
+ i1 = *ifirst;
+ i2 = *ilast;
+/* The number of unconverged intervals */
+ nint = 0;
+/* The last unconverged interval found */
+ prev = 0;
+ i__1 = i2;
+ for (i__ = i1; i__ <= i__1; ++i__) {
+ k = i__ << 1;
+ ii = i__ - *offset;
+ left = w[ii] - werr[ii];
+ mid = w[ii];
+ right = w[ii] + werr[ii];
+ width = right - mid;
+/* Computing MAX */
+ d__1 = abs(left), d__2 = abs(right);
+ tmp = max(d__1,d__2);
+/* The following test prevents the test of converged intervals */
+ if (width < *rtol * tmp) {
+/* This interval has already converged and does not need refinement. */
+/* (Note that the gaps might change through refining the */
+/* eigenvalues, however, they can only get bigger.) */
+/* Remove it from the list. */
+ iwork[k - 1] = -1;
+/* Make sure that I1 always points to the first unconverged interval */
+ if (i__ == i1 && i__ < i2) {
+ i1 = i__ + 1;
+ }
+ if (prev >= i1 && i__ <= i2) {
+ iwork[(prev << 1) - 1] = i__ + 1;
+ }
+ } else {
+/* unconverged interval found */
+ prev = i__;
+/* Make sure that [LEFT,RIGHT] contains the desired eigenvalue */
+
+/* Do while( CNT(LEFT).GT.I-1 ) */
+
+ fac = 1.;
+L20:
+ cnt = 0;
+ s = left;
+ dplus = d__[1] - s;
+ if (dplus < 0.) {
+ ++cnt;
+ }
+ i__2 = *n;
+ for (j = 2; j <= i__2; ++j) {
+ dplus = d__[j] - s - e2[j - 1] / dplus;
+ if (dplus < 0.) {
+ ++cnt;
+ }
+/* L30: */
+ }
+ if (cnt > i__ - 1) {
+ left -= werr[ii] * fac;
+ fac *= 2.;
+ goto L20;
+ }
+
+/* Do while( CNT(RIGHT).LT.I ) */
+
+ fac = 1.;
+L50:
+ cnt = 0;
+ s = right;
+ dplus = d__[1] - s;
+ if (dplus < 0.) {
+ ++cnt;
+ }
+ i__2 = *n;
+ for (j = 2; j <= i__2; ++j) {
+ dplus = d__[j] - s - e2[j - 1] / dplus;
+ if (dplus < 0.) {
+ ++cnt;
+ }
+/* L60: */
+ }
+ if (cnt < i__) {
+ right += werr[ii] * fac;
+ fac *= 2.;
+ goto L50;
+ }
+ ++nint;
+ iwork[k - 1] = i__ + 1;
+ iwork[k] = cnt;
+ }
+ work[k - 1] = left;
+ work[k] = right;
+/* L75: */
+ }
+ savi1 = i1;
+
+/* Do while( NINT.GT.0 ), i.e. there are still unconverged intervals */
+/* and while (ITER.LT.MAXITR) */
+
+ iter = 0;
+L80:
+ prev = i1 - 1;
+ i__ = i1;
+ olnint = nint;
+ i__1 = olnint;
+ for (p = 1; p <= i__1; ++p) {
+ k = i__ << 1;
+ ii = i__ - *offset;
+ next = iwork[k - 1];
+ left = work[k - 1];
+ right = work[k];
+ mid = (left + right) * .5;
+/* semiwidth of interval */
+ width = right - mid;
+/* Computing MAX */
+ d__1 = abs(left), d__2 = abs(right);
+ tmp = max(d__1,d__2);
+ if (width < *rtol * tmp || iter == maxitr) {
+/* reduce number of unconverged intervals */
+ --nint;
+/* Mark interval as converged. */
+ iwork[k - 1] = 0;
+ if (i1 == i__) {
+ i1 = next;
+ } else {
+/* Prev holds the last unconverged interval previously examined */
+ if (prev >= i1) {
+ iwork[(prev << 1) - 1] = next;
+ }
+ }
+ i__ = next;
+ goto L100;
+ }
+ prev = i__;
+
+/* Perform one bisection step */
+
+ cnt = 0;
+ s = mid;
+ dplus = d__[1] - s;
+ if (dplus < 0.) {
+ ++cnt;
+ }
+ i__2 = *n;
+ for (j = 2; j <= i__2; ++j) {
+ dplus = d__[j] - s - e2[j - 1] / dplus;
+ if (dplus < 0.) {
+ ++cnt;
+ }
+/* L90: */
+ }
+ if (cnt <= i__ - 1) {
+ work[k - 1] = mid;
+ } else {
+ work[k] = mid;
+ }
+ i__ = next;
+L100:
+ ;
+ }
+ ++iter;
+/* do another loop if there are still unconverged intervals */
+/* However, in the last iteration, all intervals are accepted */
+/* since this is the best we can do. */
+ if (nint > 0 && iter <= maxitr) {
+ goto L80;
+ }
+
+
+/* At this point, all the intervals have converged */
+ i__1 = *ilast;
+ for (i__ = savi1; i__ <= i__1; ++i__) {
+ k = i__ << 1;
+ ii = i__ - *offset;
+/* All intervals marked by '0' have been refined. */
+ if (iwork[k - 1] == 0) {
+ w[ii] = (work[k - 1] + work[k]) * .5;
+ werr[ii] = work[k] - w[ii];
+ }
+/* L110: */
+ }
+
+ return 0;
+
+/* End of DLARRJ */
+
+} /* dlarrj_ */
diff --git a/contrib/libs/clapack/dlarrk.c b/contrib/libs/clapack/dlarrk.c
new file mode 100644
index 0000000000..aab5fc3311
--- /dev/null
+++ b/contrib/libs/clapack/dlarrk.c
@@ -0,0 +1,193 @@
+/* dlarrk.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlarrk_(integer *n, integer *iw, doublereal *gl,
+ doublereal *gu, doublereal *d__, doublereal *e2, doublereal *pivmin,
+ doublereal *reltol, doublereal *w, doublereal *werr, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double log(doublereal);
+
+ /* Local variables */
+ integer i__, it;
+ doublereal mid, eps, tmp1, tmp2, left, atoli, right;
+ integer itmax;
+ doublereal rtoli, tnorm;
+ extern doublereal dlamch_(char *);
+ integer negcnt;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLARRK computes one eigenvalue of a symmetric tridiagonal */
+/* matrix T to suitable accuracy. This is an auxiliary code to be */
+/* called from DSTEMR. */
+
+/* To avoid overflow, the matrix must be scaled so that its */
+/* largest element is no greater than overflow**(1/2) * */
+/* underflow**(1/4) in absolute value, and for greatest */
+/* accuracy, it should not be much smaller than that. */
+
+/* See W. Kahan "Accurate Eigenvalues of a Symmetric Tridiagonal */
+/* Matrix", Report CS41, Computer Science Dept., Stanford */
+/* University, July 21, 1966. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the tridiagonal matrix T. N >= 0. */
+
+/* IW (input) INTEGER */
+/* The index of the eigenvalues to be returned. */
+
+/* GL (input) DOUBLE PRECISION */
+/* GU (input) DOUBLE PRECISION */
+/* An upper and a lower bound on the eigenvalue. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The n diagonal elements of the tridiagonal matrix T. */
+
+/* E2 (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) squared off-diagonal elements of the tridiagonal matrix T. */
+
+/* PIVMIN (input) DOUBLE PRECISION */
+/* The minimum pivot allowed in the Sturm sequence for T. */
+
+/* RELTOL (input) DOUBLE PRECISION */
+/* The minimum relative width of an interval. When an interval */
+/* is narrower than RELTOL times the larger (in */
+/* magnitude) endpoint, then it is considered to be */
+/* sufficiently small, i.e., converged. Note: this should */
+/* always be at least radix*machine epsilon. */
+
+/* W (output) DOUBLE PRECISION */
+
+/* WERR (output) DOUBLE PRECISION */
+/* The error bound on the corresponding eigenvalue approximation */
+/* in W. */
+
+/* INFO (output) INTEGER */
+/* = 0: Eigenvalue converged */
+/* = -1: Eigenvalue did NOT converge */
+
+/* Internal Parameters */
+/* =================== */
+
+/* FUDGE DOUBLE PRECISION, default = 2 */
+/* A "fudge factor" to widen the Gershgorin intervals. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Get machine constants */
+ /* Parameter adjustments */
+ --e2;
+ --d__;
+
+ /* Function Body */
+ eps = dlamch_("P");
+/* Computing MAX */
+ d__1 = abs(*gl), d__2 = abs(*gu);
+ tnorm = max(d__1,d__2);
+ rtoli = *reltol;
+ atoli = *pivmin * 4.;
+ itmax = (integer) ((log(tnorm + *pivmin) - log(*pivmin)) / log(2.)) + 2;
+ *info = -1;
+ left = *gl - tnorm * 2. * eps * *n - *pivmin * 4.;
+ right = *gu + tnorm * 2. * eps * *n + *pivmin * 4.;
+ it = 0;
+L10:
+
+/* Check if interval converged or maximum number of iterations reached */
+
+ tmp1 = (d__1 = right - left, abs(d__1));
+/* Computing MAX */
+ d__1 = abs(right), d__2 = abs(left);
+ tmp2 = max(d__1,d__2);
+/* Computing MAX */
+ d__1 = max(atoli,*pivmin), d__2 = rtoli * tmp2;
+ if (tmp1 < max(d__1,d__2)) {
+ *info = 0;
+ goto L30;
+ }
+ if (it > itmax) {
+ goto L30;
+ }
+
+/* Count number of negative pivots for mid-point */
+
+ ++it;
+ mid = (left + right) * .5;
+ negcnt = 0;
+ tmp1 = d__[1] - mid;
+ if (abs(tmp1) < *pivmin) {
+ tmp1 = -(*pivmin);
+ }
+ if (tmp1 <= 0.) {
+ ++negcnt;
+ }
+
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ tmp1 = d__[i__] - e2[i__ - 1] / tmp1 - mid;
+ if (abs(tmp1) < *pivmin) {
+ tmp1 = -(*pivmin);
+ }
+ if (tmp1 <= 0.) {
+ ++negcnt;
+ }
+/* L20: */
+ }
+ if (negcnt >= *iw) {
+ right = mid;
+ } else {
+ left = mid;
+ }
+ goto L10;
+L30:
+
+/* Converged or maximum number of iterations reached */
+
+ *w = (left + right) * .5;
+ *werr = (d__1 = right - left, abs(d__1)) * .5;
+ return 0;
+
+/* End of DLARRK */
+
+} /* dlarrk_ */
diff --git a/contrib/libs/clapack/dlarrr.c b/contrib/libs/clapack/dlarrr.c
new file mode 100644
index 0000000000..adb0133974
--- /dev/null
+++ b/contrib/libs/clapack/dlarrr.c
@@ -0,0 +1,176 @@
+/* dlarrr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlarrr_(integer *n, doublereal *d__, doublereal *e,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ doublereal eps, tmp, tmp2, rmin;
+ extern doublereal dlamch_(char *);
+ doublereal offdig, safmin;
+ logical yesrel;
+ doublereal smlnum, offdig2;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+
+/* Purpose */
+/* ======= */
+
+/* Perform tests to decide whether the symmetric tridiagonal matrix T */
+/* warrants expensive computations which guarantee high relative accuracy */
+/* in the eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N > 0. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The N diagonal elements of the tridiagonal matrix T. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the first (N-1) entries contain the subdiagonal */
+/* elements of the tridiagonal matrix T; E(N) is set to ZERO. */
+
+/* INFO (output) INTEGER */
+/* INFO = 0(default) : the matrix warrants computations preserving */
+/* relative accuracy. */
+/* INFO = 1 : the matrix warrants computations guaranteeing */
+/* only absolute accuracy. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* As a default, do NOT go for relative-accuracy preserving computations. */
+ /* Parameter adjustments */
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 1;
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ rmin = sqrt(smlnum);
+/* Tests for relative accuracy */
+
+/* Test for scaled diagonal dominance */
+/* Scale the diagonal entries to one and check whether the sum of the */
+/* off-diagonals is less than one */
+
+/* The sdd relative error bounds have a 1/(1- 2*x) factor in them, */
+/* x = max(OFFDIG + OFFDIG2), so when x is close to 1/2, no relative */
+/* accuracy is promised. In the notation of the code fragment below, */
+/* 1/(1 - (OFFDIG + OFFDIG2)) is the condition number. */
+/* We don't think it is worth going into "sdd mode" unless the relative */
+/* condition number is reasonable, not 1/macheps. */
+/* The threshold should be compatible with other thresholds used in the */
+/* code. We set OFFDIG + OFFDIG2 <= .999 =: RELCOND, it corresponds */
+/* to losing at most 3 decimal digits: 1 / (1 - (OFFDIG + OFFDIG2)) <= 1000 */
+/* instead of the current OFFDIG + OFFDIG2 < 1 */
+
+ yesrel = TRUE_;
+ offdig = 0.;
+ tmp = sqrt((abs(d__[1])));
+ if (tmp < rmin) {
+ yesrel = FALSE_;
+ }
+ if (! yesrel) {
+ goto L11;
+ }
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ tmp2 = sqrt((d__1 = d__[i__], abs(d__1)));
+ if (tmp2 < rmin) {
+ yesrel = FALSE_;
+ }
+ if (! yesrel) {
+ goto L11;
+ }
+ offdig2 = (d__1 = e[i__ - 1], abs(d__1)) / (tmp * tmp2);
+ if (offdig + offdig2 >= .999) {
+ yesrel = FALSE_;
+ }
+ if (! yesrel) {
+ goto L11;
+ }
+ tmp = tmp2;
+ offdig = offdig2;
+/* L10: */
+ }
+L11:
+ if (yesrel) {
+ *info = 0;
+ return 0;
+ } else {
+ }
+
+
+/* *** MORE TO BE IMPLEMENTED *** */
+
+
+/* Test if the lower bidiagonal matrix L from T = L D L^T */
+/* (zero shift facto) is well conditioned */
+
+
+/* Test if the upper bidiagonal matrix U from T = U D U^T */
+/* (zero shift facto) is well conditioned. */
+/* In this case, the matrix needs to be flipped and, at the end */
+/* of the eigenvector computation, the flip needs to be applied */
+/* to the computed eigenvectors (and the support) */
+
+
+ return 0;
+
+/* END OF DLARRR */
+
+} /* dlarrr_ */
diff --git a/contrib/libs/clapack/dlarrv.c b/contrib/libs/clapack/dlarrv.c
new file mode 100644
index 0000000000..4b80eecb5a
--- /dev/null
+++ b/contrib/libs/clapack/dlarrv.c
@@ -0,0 +1,988 @@
+/* dlarrv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b5 = 0.;
+static integer c__1 = 1;
+static integer c__2 = 2;
+
+/* Subroutine */ int dlarrv_(integer *n, doublereal *vl, doublereal *vu,
+ doublereal *d__, doublereal *l, doublereal *pivmin, integer *isplit,
+ integer *m, integer *dol, integer *dou, doublereal *minrgp,
+ doublereal *rtol1, doublereal *rtol2, doublereal *w, doublereal *werr,
+ doublereal *wgap, integer *iblock, integer *indexw, doublereal *gers,
+ doublereal *z__, integer *ldz, integer *isuppz, doublereal *work,
+ integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2;
+ logical L__1;
+
+ /* Builtin functions */
+ double log(doublereal);
+
+ /* Local variables */
+ integer minwsize, i__, j, k, p, q, miniwsize, ii;
+ doublereal gl;
+ integer im, in;
+ doublereal gu, gap, eps, tau, tol, tmp;
+ integer zto;
+ doublereal ztz;
+ integer iend, jblk;
+ doublereal lgap;
+ integer done;
+ doublereal rgap, left;
+ integer wend, iter;
+ doublereal bstw;
+ integer itmp1;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ integer indld;
+ doublereal fudge;
+ integer idone;
+ doublereal sigma;
+ integer iinfo, iindr;
+ doublereal resid;
+ logical eskip;
+ doublereal right;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ integer nclus, zfrom;
+ doublereal rqtol;
+ integer iindc1, iindc2;
+ extern /* Subroutine */ int dlar1v_(integer *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *, logical *,
+ integer *, doublereal *, doublereal *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *);
+ logical stp2ii;
+ doublereal lambda;
+ extern doublereal dlamch_(char *);
+ integer ibegin, indeig;
+ logical needbs;
+ integer indlld;
+ doublereal sgndef, mingma;
+ extern /* Subroutine */ int dlarrb_(integer *, doublereal *, doublereal *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *, integer *);
+ integer oldien, oldncl, wbegin;
+ doublereal spdiam;
+ integer negcnt;
+ extern /* Subroutine */ int dlarrf_(integer *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *);
+ integer oldcls;
+ doublereal savgap;
+ integer ndepth;
+ doublereal ssigma;
+ extern /* Subroutine */ int dlaset_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *);
+ logical usedbs;
+ integer iindwk, offset;
+ doublereal gaptol;
+ integer newcls, oldfst, indwrk, windex, oldlst;
+ logical usedrq;
+ integer newfst, newftt, parity, windmn, windpl, isupmn, newlst, zusedl;
+ doublereal bstres;
+ integer newsiz, zusedu, zusedw;
+ doublereal nrminv, rqcorr;
+ logical tryrqc;
+ integer isupmx;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLARRV computes the eigenvectors of the tridiagonal matrix */
+/* T = L D L^T given L, D and APPROXIMATIONS to the eigenvalues of L D L^T. */
+/* The input eigenvalues should have been computed by DLARRE. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* Lower and upper bounds of the interval that contains the desired */
+/* eigenvalues. VL < VU. Needed to compute gaps on the left or right */
+/* end of the extremal eigenvalues in the desired RANGE. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the N diagonal elements of the diagonal matrix D. */
+/* On exit, D may be overwritten. */
+
+/* L (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the (N-1) subdiagonal elements of the unit */
+/* bidiagonal matrix L are in elements 1 to N-1 of L */
+/* (if the matrix is not splitted.) At the end of each block */
+/* is stored the corresponding shift as given by DLARRE. */
+/* On exit, L is overwritten. */
+
+/* PIVMIN (in) DOUBLE PRECISION */
+/* The minimum pivot allowed in the Sturm sequence. */
+
+/* ISPLIT (input) INTEGER array, dimension (N) */
+/* The splitting points, at which T breaks up into blocks. */
+/* The first block consists of rows/columns 1 to */
+/* ISPLIT( 1 ), the second of rows/columns ISPLIT( 1 )+1 */
+/* through ISPLIT( 2 ), etc. */
+
+/* M (input) INTEGER */
+/* The total number of input eigenvalues. 0 <= M <= N. */
+
+/* DOL (input) INTEGER */
+/* DOU (input) INTEGER */
+/* If the user wants to compute only selected eigenvectors from all */
+/* the eigenvalues supplied, he can specify an index range DOL:DOU. */
+/* Or else the setting DOL=1, DOU=M should be applied. */
+/* Note that DOL and DOU refer to the order in which the eigenvalues */
+/* are stored in W. */
+/* If the user wants to compute only selected eigenpairs, then */
+/* the columns DOL-1 to DOU+1 of the eigenvector space Z contain the */
+/* computed eigenvectors. All other columns of Z are set to zero. */
+
+/* MINRGP (input) DOUBLE PRECISION */
+
+/* RTOL1 (input) DOUBLE PRECISION */
+/* RTOL2 (input) DOUBLE PRECISION */
+/* Parameters for bisection. */
+/* An interval [LEFT,RIGHT] has converged if */
+/* RIGHT-LEFT.LT.MAX( RTOL1*GAP, RTOL2*MAX(|LEFT|,|RIGHT|) ) */
+
+/* W (input/output) DOUBLE PRECISION array, dimension (N) */
+/* The first M elements of W contain the APPROXIMATE eigenvalues for */
+/* which eigenvectors are to be computed. The eigenvalues */
+/* should be grouped by split-off block and ordered from */
+/* smallest to largest within the block ( The output array */
+/* W from DLARRE is expected here ). Furthermore, they are with */
+/* respect to the shift of the corresponding root representation */
+/* for their block. On exit, W holds the eigenvalues of the */
+/* UNshifted matrix. */
+
+/* WERR (input/output) DOUBLE PRECISION array, dimension (N) */
+/* The first M elements contain the semiwidth of the uncertainty */
+/* interval of the corresponding eigenvalue in W */
+
+/* WGAP (input/output) DOUBLE PRECISION array, dimension (N) */
+/* The separation from the right neighbor eigenvalue in W. */
+
+/* IBLOCK (input) INTEGER array, dimension (N) */
+/* The indices of the blocks (submatrices) associated with the */
+/* corresponding eigenvalues in W; IBLOCK(i)=1 if eigenvalue */
+/* W(i) belongs to the first block from the top, =2 if W(i) */
+/* belongs to the second block, etc. */
+
+/* INDEXW (input) INTEGER array, dimension (N) */
+/* The indices of the eigenvalues within each block (submatrix); */
+/* for example, INDEXW(i)= 10 and IBLOCK(i)=2 imply that the */
+/* i-th eigenvalue W(i) is the 10-th eigenvalue in the second block. */
+
+/* GERS (input) DOUBLE PRECISION array, dimension (2*N) */
+/* The N Gerschgorin intervals (the i-th Gerschgorin interval */
+/* is (GERS(2*i-1), GERS(2*i)). The Gerschgorin intervals should */
+/* be computed from the original UNshifted matrix. */
+
+/* Z (output) DOUBLE PRECISION array, dimension (LDZ, max(1,M) ) */
+/* If INFO = 0, the first M columns of Z contain the */
+/* orthonormal eigenvectors of the matrix T */
+/* corresponding to the input eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* ISUPPZ (output) INTEGER array, dimension ( 2*max(1,M) ) */
+/* The support of the eigenvectors in Z, i.e., the indices */
+/* indicating the nonzero elements in Z. The I-th eigenvector */
+/* is nonzero only in elements ISUPPZ( 2*I-1 ) through */
+/* ISUPPZ( 2*I ). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (12*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (7*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+
+/* > 0: A problem occured in DLARRV. */
+/* < 0: One of the called subroutines signaled an internal problem. */
+/* Needs inspection of the corresponding parameter IINFO */
+/* for further information. */
+
+/* =-1: Problem in DLARRB when refining a child's eigenvalues. */
+/* =-2: Problem in DLARRF when computing the RRR of a child. */
+/* When a child is inside a tight cluster, it can be difficult */
+/* to find an RRR. A partial remedy from the user's point of */
+/* view is to make the parameter MINRGP smaller and recompile. */
+/* However, as the orthogonality of the computed vectors is */
+/* proportional to 1/MINRGP, the user should be aware that */
+/* he might be trading in precision when he decreases MINRGP. */
+/* =-3: Problem in DLARRB when refining a single eigenvalue */
+/* after the Rayleigh correction was rejected. */
+/* = 5: The Rayleigh Quotient Iteration failed to converge to */
+/* full accuracy in MAXITR steps. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+/* .. */
+/* The first N entries of WORK are reserved for the eigenvalues */
+ /* Parameter adjustments */
+ --d__;
+ --l;
+ --isplit;
+ --w;
+ --werr;
+ --wgap;
+ --iblock;
+ --indexw;
+ --gers;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --isuppz;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ indld = *n + 1;
+ indlld = (*n << 1) + 1;
+ indwrk = *n * 3 + 1;
+ minwsize = *n * 12;
+ i__1 = minwsize;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L5: */
+ }
+/* IWORK(IINDR+1:IINDR+N) hold the twist indices R for the */
+/* factorization used to compute the FP vector */
+ iindr = 0;
+/* IWORK(IINDC1+1:IINC2+N) are used to store the clusters of the current */
+/* layer and the one above. */
+ iindc1 = *n;
+ iindc2 = *n << 1;
+ iindwk = *n * 3 + 1;
+ miniwsize = *n * 7;
+ i__1 = miniwsize;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ iwork[i__] = 0;
+/* L10: */
+ }
+ zusedl = 1;
+ if (*dol > 1) {
+/* Set lower bound for use of Z */
+ zusedl = *dol - 1;
+ }
+ zusedu = *m;
+ if (*dou < *m) {
+/* Set lower bound for use of Z */
+ zusedu = *dou + 1;
+ }
+/* The width of the part of Z that is used */
+ zusedw = zusedu - zusedl + 1;
+ dlaset_("Full", n, &zusedw, &c_b5, &c_b5, &z__[zusedl * z_dim1 + 1], ldz);
+ eps = dlamch_("Precision");
+ rqtol = eps * 2.;
+
+/* Set expert flags for standard code. */
+ tryrqc = TRUE_;
+ if (*dol == 1 && *dou == *m) {
+ } else {
+/* Only selected eigenpairs are computed. Since the other evalues */
+/* are not refined by RQ iteration, bisection has to compute to full */
+/* accuracy. */
+ *rtol1 = eps * 4.;
+ *rtol2 = eps * 4.;
+ }
+/* The entries WBEGIN:WEND in W, WERR, WGAP correspond to the */
+/* desired eigenvalues. The support of the nonzero eigenvector */
+/* entries is contained in the interval IBEGIN:IEND. */
+/* Remark that if k eigenpairs are desired, then the eigenvectors */
+/* are stored in k contiguous columns of Z. */
+/* DONE is the number of eigenvectors already computed */
+ done = 0;
+ ibegin = 1;
+ wbegin = 1;
+ i__1 = iblock[*m];
+ for (jblk = 1; jblk <= i__1; ++jblk) {
+ iend = isplit[jblk];
+ sigma = l[iend];
+/* Find the eigenvectors of the submatrix indexed IBEGIN */
+/* through IEND. */
+ wend = wbegin - 1;
+L15:
+ if (wend < *m) {
+ if (iblock[wend + 1] == jblk) {
+ ++wend;
+ goto L15;
+ }
+ }
+ if (wend < wbegin) {
+ ibegin = iend + 1;
+ goto L170;
+ } else if (wend < *dol || wbegin > *dou) {
+ ibegin = iend + 1;
+ wbegin = wend + 1;
+ goto L170;
+ }
+/* Find local spectral diameter of the block */
+ gl = gers[(ibegin << 1) - 1];
+ gu = gers[ibegin * 2];
+ i__2 = iend;
+ for (i__ = ibegin + 1; i__ <= i__2; ++i__) {
+/* Computing MIN */
+ d__1 = gers[(i__ << 1) - 1];
+ gl = min(d__1,gl);
+/* Computing MAX */
+ d__1 = gers[i__ * 2];
+ gu = max(d__1,gu);
+/* L20: */
+ }
+ spdiam = gu - gl;
+/* OLDIEN is the last index of the previous block */
+ oldien = ibegin - 1;
+/* Calculate the size of the current block */
+ in = iend - ibegin + 1;
+/* The number of eigenvalues in the current block */
+ im = wend - wbegin + 1;
+/* This is for a 1x1 block */
+ if (ibegin == iend) {
+ ++done;
+ z__[ibegin + wbegin * z_dim1] = 1.;
+ isuppz[(wbegin << 1) - 1] = ibegin;
+ isuppz[wbegin * 2] = ibegin;
+ w[wbegin] += sigma;
+ work[wbegin] = w[wbegin];
+ ibegin = iend + 1;
+ ++wbegin;
+ goto L170;
+ }
+/* The desired (shifted) eigenvalues are stored in W(WBEGIN:WEND) */
+/* Note that these can be approximations, in this case, the corresp. */
+/* entries of WERR give the size of the uncertainty interval. */
+/* The eigenvalue approximations will be refined when necessary as */
+/* high relative accuracy is required for the computation of the */
+/* corresponding eigenvectors. */
+ dcopy_(&im, &w[wbegin], &c__1, &work[wbegin], &c__1);
+/* We store in W the eigenvalue approximations w.r.t. the original */
+/* matrix T. */
+ i__2 = im;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ w[wbegin + i__ - 1] += sigma;
+/* L30: */
+ }
+/* NDEPTH is the current depth of the representation tree */
+ ndepth = 0;
+/* PARITY is either 1 or 0 */
+ parity = 1;
+/* NCLUS is the number of clusters for the next level of the */
+/* representation tree, we start with NCLUS = 1 for the root */
+ nclus = 1;
+ iwork[iindc1 + 1] = 1;
+ iwork[iindc1 + 2] = im;
+/* IDONE is the number of eigenvectors already computed in the current */
+/* block */
+ idone = 0;
+/* loop while( IDONE.LT.IM ) */
+/* generate the representation tree for the current block and */
+/* compute the eigenvectors */
+L40:
+ if (idone < im) {
+/* This is a crude protection against infinitely deep trees */
+ if (ndepth > *m) {
+ *info = -2;
+ return 0;
+ }
+/* breadth first processing of the current level of the representation */
+/* tree: OLDNCL = number of clusters on current level */
+ oldncl = nclus;
+/* reset NCLUS to count the number of child clusters */
+ nclus = 0;
+
+ parity = 1 - parity;
+ if (parity == 0) {
+ oldcls = iindc1;
+ newcls = iindc2;
+ } else {
+ oldcls = iindc2;
+ newcls = iindc1;
+ }
+/* Process the clusters on the current level */
+ i__2 = oldncl;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ j = oldcls + (i__ << 1);
+/* OLDFST, OLDLST = first, last index of current cluster. */
+/* cluster indices start with 1 and are relative */
+/* to WBEGIN when accessing W, WGAP, WERR, Z */
+ oldfst = iwork[j - 1];
+ oldlst = iwork[j];
+ if (ndepth > 0) {
+/* Retrieve relatively robust representation (RRR) of cluster */
+/* that has been computed at the previous level */
+/* The RRR is stored in Z and overwritten once the eigenvectors */
+/* have been computed or when the cluster is refined */
+ if (*dol == 1 && *dou == *m) {
+/* Get representation from location of the leftmost evalue */
+/* of the cluster */
+ j = wbegin + oldfst - 1;
+ } else {
+ if (wbegin + oldfst - 1 < *dol) {
+/* Get representation from the left end of Z array */
+ j = *dol - 1;
+ } else if (wbegin + oldfst - 1 > *dou) {
+/* Get representation from the right end of Z array */
+ j = *dou;
+ } else {
+ j = wbegin + oldfst - 1;
+ }
+ }
+ dcopy_(&in, &z__[ibegin + j * z_dim1], &c__1, &d__[ibegin]
+, &c__1);
+ i__3 = in - 1;
+ dcopy_(&i__3, &z__[ibegin + (j + 1) * z_dim1], &c__1, &l[
+ ibegin], &c__1);
+ sigma = z__[iend + (j + 1) * z_dim1];
+/* Set the corresponding entries in Z to zero */
+ dlaset_("Full", &in, &c__2, &c_b5, &c_b5, &z__[ibegin + j
+ * z_dim1], ldz);
+ }
+/* Compute DL and DLL of current RRR */
+ i__3 = iend - 1;
+ for (j = ibegin; j <= i__3; ++j) {
+ tmp = d__[j] * l[j];
+ work[indld - 1 + j] = tmp;
+ work[indlld - 1 + j] = tmp * l[j];
+/* L50: */
+ }
+ if (ndepth > 0) {
+/* P and Q are index of the first and last eigenvalue to compute */
+/* within the current block */
+ p = indexw[wbegin - 1 + oldfst];
+ q = indexw[wbegin - 1 + oldlst];
+/* Offset for the arrays WORK, WGAP and WERR, i.e., th P-OFFSET */
+/* thru' Q-OFFSET elements of these arrays are to be used. */
+/* OFFSET = P-OLDFST */
+ offset = indexw[wbegin] - 1;
+/* perform limited bisection (if necessary) to get approximate */
+/* eigenvalues to the precision needed. */
+ dlarrb_(&in, &d__[ibegin], &work[indlld + ibegin - 1], &p,
+ &q, rtol1, rtol2, &offset, &work[wbegin], &wgap[
+ wbegin], &werr[wbegin], &work[indwrk], &iwork[
+ iindwk], pivmin, &spdiam, &in, &iinfo);
+ if (iinfo != 0) {
+ *info = -1;
+ return 0;
+ }
+/* We also recompute the extremal gaps. W holds all eigenvalues */
+/* of the unshifted matrix and must be used for computation */
+/* of WGAP, the entries of WORK might stem from RRRs with */
+/* different shifts. The gaps from WBEGIN-1+OLDFST to */
+/* WBEGIN-1+OLDLST are correctly computed in DLARRB. */
+/* However, we only allow the gaps to become greater since */
+/* this is what should happen when we decrease WERR */
+ if (oldfst > 1) {
+/* Computing MAX */
+ d__1 = wgap[wbegin + oldfst - 2], d__2 = w[wbegin +
+ oldfst - 1] - werr[wbegin + oldfst - 1] - w[
+ wbegin + oldfst - 2] - werr[wbegin + oldfst -
+ 2];
+ wgap[wbegin + oldfst - 2] = max(d__1,d__2);
+ }
+ if (wbegin + oldlst - 1 < wend) {
+/* Computing MAX */
+ d__1 = wgap[wbegin + oldlst - 1], d__2 = w[wbegin +
+ oldlst] - werr[wbegin + oldlst] - w[wbegin +
+ oldlst - 1] - werr[wbegin + oldlst - 1];
+ wgap[wbegin + oldlst - 1] = max(d__1,d__2);
+ }
+/* Each time the eigenvalues in WORK get refined, we store */
+/* the newly found approximation with all shifts applied in W */
+ i__3 = oldlst;
+ for (j = oldfst; j <= i__3; ++j) {
+ w[wbegin + j - 1] = work[wbegin + j - 1] + sigma;
+/* L53: */
+ }
+ }
+/* Process the current node. */
+ newfst = oldfst;
+ i__3 = oldlst;
+ for (j = oldfst; j <= i__3; ++j) {
+ if (j == oldlst) {
+/* we are at the right end of the cluster, this is also the */
+/* boundary of the child cluster */
+ newlst = j;
+ } else if (wgap[wbegin + j - 1] >= *minrgp * (d__1 = work[
+ wbegin + j - 1], abs(d__1))) {
+/* the right relative gap is big enough, the child cluster */
+/* (NEWFST,..,NEWLST) is well separated from the following */
+ newlst = j;
+ } else {
+/* inside a child cluster, the relative gap is not */
+/* big enough. */
+ goto L140;
+ }
+/* Compute size of child cluster found */
+ newsiz = newlst - newfst + 1;
+/* NEWFTT is the place in Z where the new RRR or the computed */
+/* eigenvector is to be stored */
+ if (*dol == 1 && *dou == *m) {
+/* Store representation at location of the leftmost evalue */
+/* of the cluster */
+ newftt = wbegin + newfst - 1;
+ } else {
+ if (wbegin + newfst - 1 < *dol) {
+/* Store representation at the left end of Z array */
+ newftt = *dol - 1;
+ } else if (wbegin + newfst - 1 > *dou) {
+/* Store representation at the right end of Z array */
+ newftt = *dou;
+ } else {
+ newftt = wbegin + newfst - 1;
+ }
+ }
+ if (newsiz > 1) {
+
+/* Current child is not a singleton but a cluster. */
+/* Compute and store new representation of child. */
+
+
+/* Compute left and right cluster gap. */
+
+/* LGAP and RGAP are not computed from WORK because */
+/* the eigenvalue approximations may stem from RRRs */
+/* different shifts. However, W hold all eigenvalues */
+/* of the unshifted matrix. Still, the entries in WGAP */
+/* have to be computed from WORK since the entries */
+/* in W might be of the same order so that gaps are not */
+/* exhibited correctly for very close eigenvalues. */
+ if (newfst == 1) {
+/* Computing MAX */
+ d__1 = 0., d__2 = w[wbegin] - werr[wbegin] - *vl;
+ lgap = max(d__1,d__2);
+ } else {
+ lgap = wgap[wbegin + newfst - 2];
+ }
+ rgap = wgap[wbegin + newlst - 1];
+
+/* Compute left- and rightmost eigenvalue of child */
+/* to high precision in order to shift as close */
+/* as possible and obtain as large relative gaps */
+/* as possible */
+
+ for (k = 1; k <= 2; ++k) {
+ if (k == 1) {
+ p = indexw[wbegin - 1 + newfst];
+ } else {
+ p = indexw[wbegin - 1 + newlst];
+ }
+ offset = indexw[wbegin] - 1;
+ dlarrb_(&in, &d__[ibegin], &work[indlld + ibegin
+ - 1], &p, &p, &rqtol, &rqtol, &offset, &
+ work[wbegin], &wgap[wbegin], &werr[wbegin]
+, &work[indwrk], &iwork[iindwk], pivmin, &
+ spdiam, &in, &iinfo);
+/* L55: */
+ }
+
+ if (wbegin + newlst - 1 < *dol || wbegin + newfst - 1
+ > *dou) {
+/* if the cluster contains no desired eigenvalues */
+/* skip the computation of that branch of the rep. tree */
+
+/* We could skip before the refinement of the extremal */
+/* eigenvalues of the child, but then the representation */
+/* tree could be different from the one when nothing is */
+/* skipped. For this reason we skip at this place. */
+ idone = idone + newlst - newfst + 1;
+ goto L139;
+ }
+
+/* Compute RRR of child cluster. */
+/* Note that the new RRR is stored in Z */
+
+/* DLARRF needs LWORK = 2*N */
+ dlarrf_(&in, &d__[ibegin], &l[ibegin], &work[indld +
+ ibegin - 1], &newfst, &newlst, &work[wbegin],
+ &wgap[wbegin], &werr[wbegin], &spdiam, &lgap,
+ &rgap, pivmin, &tau, &z__[ibegin + newftt *
+ z_dim1], &z__[ibegin + (newftt + 1) * z_dim1],
+ &work[indwrk], &iinfo);
+ if (iinfo == 0) {
+/* a new RRR for the cluster was found by DLARRF */
+/* update shift and store it */
+ ssigma = sigma + tau;
+ z__[iend + (newftt + 1) * z_dim1] = ssigma;
+/* WORK() are the midpoints and WERR() the semi-width */
+/* Note that the entries in W are unchanged. */
+ i__4 = newlst;
+ for (k = newfst; k <= i__4; ++k) {
+ fudge = eps * 3. * (d__1 = work[wbegin + k -
+ 1], abs(d__1));
+ work[wbegin + k - 1] -= tau;
+ fudge += eps * 4. * (d__1 = work[wbegin + k -
+ 1], abs(d__1));
+/* Fudge errors */
+ werr[wbegin + k - 1] += fudge;
+/* Gaps are not fudged. Provided that WERR is small */
+/* when eigenvalues are close, a zero gap indicates */
+/* that a new representation is needed for resolving */
+/* the cluster. A fudge could lead to a wrong decision */
+/* of judging eigenvalues 'separated' which in */
+/* reality are not. This could have a negative impact */
+/* on the orthogonality of the computed eigenvectors. */
+/* L116: */
+ }
+ ++nclus;
+ k = newcls + (nclus << 1);
+ iwork[k - 1] = newfst;
+ iwork[k] = newlst;
+ } else {
+ *info = -2;
+ return 0;
+ }
+ } else {
+
+/* Compute eigenvector of singleton */
+
+ iter = 0;
+
+ tol = log((doublereal) in) * 4. * eps;
+
+ k = newfst;
+ windex = wbegin + k - 1;
+/* Computing MAX */
+ i__4 = windex - 1;
+ windmn = max(i__4,1);
+/* Computing MIN */
+ i__4 = windex + 1;
+ windpl = min(i__4,*m);
+ lambda = work[windex];
+ ++done;
+/* Check if eigenvector computation is to be skipped */
+ if (windex < *dol || windex > *dou) {
+ eskip = TRUE_;
+ goto L125;
+ } else {
+ eskip = FALSE_;
+ }
+ left = work[windex] - werr[windex];
+ right = work[windex] + werr[windex];
+ indeig = indexw[windex];
+/* Note that since we compute the eigenpairs for a child, */
+/* all eigenvalue approximations are w.r.t the same shift. */
+/* In this case, the entries in WORK should be used for */
+/* computing the gaps since they exhibit even very small */
+/* differences in the eigenvalues, as opposed to the */
+/* entries in W which might "look" the same. */
+ if (k == 1) {
+/* In the case RANGE='I' and with not much initial */
+/* accuracy in LAMBDA and VL, the formula */
+/* LGAP = MAX( ZERO, (SIGMA - VL) + LAMBDA ) */
+/* can lead to an overestimation of the left gap and */
+/* thus to inadequately early RQI 'convergence'. */
+/* Prevent this by forcing a small left gap. */
+/* Computing MAX */
+ d__1 = abs(left), d__2 = abs(right);
+ lgap = eps * max(d__1,d__2);
+ } else {
+ lgap = wgap[windmn];
+ }
+ if (k == im) {
+/* In the case RANGE='I' and with not much initial */
+/* accuracy in LAMBDA and VU, the formula */
+/* can lead to an overestimation of the right gap and */
+/* thus to inadequately early RQI 'convergence'. */
+/* Prevent this by forcing a small right gap. */
+/* Computing MAX */
+ d__1 = abs(left), d__2 = abs(right);
+ rgap = eps * max(d__1,d__2);
+ } else {
+ rgap = wgap[windex];
+ }
+ gap = min(lgap,rgap);
+ if (k == 1 || k == im) {
+/* The eigenvector support can become wrong */
+/* because significant entries could be cut off due to a */
+/* large GAPTOL parameter in LAR1V. Prevent this. */
+ gaptol = 0.;
+ } else {
+ gaptol = gap * eps;
+ }
+ isupmn = in;
+ isupmx = 1;
+/* Update WGAP so that it holds the minimum gap */
+/* to the left or the right. This is crucial in the */
+/* case where bisection is used to ensure that the */
+/* eigenvalue is refined up to the required precision. */
+/* The correct value is restored afterwards. */
+ savgap = wgap[windex];
+ wgap[windex] = gap;
+/* We want to use the Rayleigh Quotient Correction */
+/* as often as possible since it converges quadratically */
+/* when we are close enough to the desired eigenvalue. */
+/* However, the Rayleigh Quotient can have the wrong sign */
+/* and lead us away from the desired eigenvalue. In this */
+/* case, the best we can do is to use bisection. */
+ usedbs = FALSE_;
+ usedrq = FALSE_;
+/* Bisection is initially turned off unless it is forced */
+ needbs = ! tryrqc;
+L120:
+/* Check if bisection should be used to refine eigenvalue */
+ if (needbs) {
+/* Take the bisection as new iterate */
+ usedbs = TRUE_;
+ itmp1 = iwork[iindr + windex];
+ offset = indexw[wbegin] - 1;
+ d__1 = eps * 2.;
+ dlarrb_(&in, &d__[ibegin], &work[indlld + ibegin
+ - 1], &indeig, &indeig, &c_b5, &d__1, &
+ offset, &work[wbegin], &wgap[wbegin], &
+ werr[wbegin], &work[indwrk], &iwork[
+ iindwk], pivmin, &spdiam, &itmp1, &iinfo);
+ if (iinfo != 0) {
+ *info = -3;
+ return 0;
+ }
+ lambda = work[windex];
+/* Reset twist index from inaccurate LAMBDA to */
+/* force computation of true MINGMA */
+ iwork[iindr + windex] = 0;
+ }
+/* Given LAMBDA, compute the eigenvector. */
+ L__1 = ! usedbs;
+ dlar1v_(&in, &c__1, &in, &lambda, &d__[ibegin], &l[
+ ibegin], &work[indld + ibegin - 1], &work[
+ indlld + ibegin - 1], pivmin, &gaptol, &z__[
+ ibegin + windex * z_dim1], &L__1, &negcnt, &
+ ztz, &mingma, &iwork[iindr + windex], &isuppz[
+ (windex << 1) - 1], &nrminv, &resid, &rqcorr,
+ &work[indwrk]);
+ if (iter == 0) {
+ bstres = resid;
+ bstw = lambda;
+ } else if (resid < bstres) {
+ bstres = resid;
+ bstw = lambda;
+ }
+/* Computing MIN */
+ i__4 = isupmn, i__5 = isuppz[(windex << 1) - 1];
+ isupmn = min(i__4,i__5);
+/* Computing MAX */
+ i__4 = isupmx, i__5 = isuppz[windex * 2];
+ isupmx = max(i__4,i__5);
+ ++iter;
+/* sin alpha <= |resid|/gap */
+/* Note that both the residual and the gap are */
+/* proportional to the matrix, so ||T|| doesn't play */
+/* a role in the quotient */
+
+/* Convergence test for Rayleigh-Quotient iteration */
+/* (omitted when Bisection has been used) */
+
+ if (resid > tol * gap && abs(rqcorr) > rqtol * abs(
+ lambda) && ! usedbs) {
+/* We need to check that the RQCORR update doesn't */
+/* move the eigenvalue away from the desired one and */
+/* towards a neighbor. -> protection with bisection */
+ if (indeig <= negcnt) {
+/* The wanted eigenvalue lies to the left */
+ sgndef = -1.;
+ } else {
+/* The wanted eigenvalue lies to the right */
+ sgndef = 1.;
+ }
+/* We only use the RQCORR if it improves the */
+/* the iterate reasonably. */
+ if (rqcorr * sgndef >= 0. && lambda + rqcorr <=
+ right && lambda + rqcorr >= left) {
+ usedrq = TRUE_;
+/* Store new midpoint of bisection interval in WORK */
+ if (sgndef == 1.) {
+/* The current LAMBDA is on the left of the true */
+/* eigenvalue */
+ left = lambda;
+/* We prefer to assume that the error estimate */
+/* is correct. We could make the interval not */
+/* as a bracket but to be modified if the RQCORR */
+/* chooses to. In this case, the RIGHT side should */
+/* be modified as follows: */
+/* RIGHT = MAX(RIGHT, LAMBDA + RQCORR) */
+ } else {
+/* The current LAMBDA is on the right of the true */
+/* eigenvalue */
+ right = lambda;
+/* See comment about assuming the error estimate is */
+/* correct above. */
+/* LEFT = MIN(LEFT, LAMBDA + RQCORR) */
+ }
+ work[windex] = (right + left) * .5;
+/* Take RQCORR since it has the correct sign and */
+/* improves the iterate reasonably */
+ lambda += rqcorr;
+/* Update width of error interval */
+ werr[windex] = (right - left) * .5;
+ } else {
+ needbs = TRUE_;
+ }
+ if (right - left < rqtol * abs(lambda)) {
+/* The eigenvalue is computed to bisection accuracy */
+/* compute eigenvector and stop */
+ usedbs = TRUE_;
+ goto L120;
+ } else if (iter < 10) {
+ goto L120;
+ } else if (iter == 10) {
+ needbs = TRUE_;
+ goto L120;
+ } else {
+ *info = 5;
+ return 0;
+ }
+ } else {
+ stp2ii = FALSE_;
+ if (usedrq && usedbs && bstres <= resid) {
+ lambda = bstw;
+ stp2ii = TRUE_;
+ }
+ if (stp2ii) {
+/* improve error angle by second step */
+ L__1 = ! usedbs;
+ dlar1v_(&in, &c__1, &in, &lambda, &d__[ibegin]
+, &l[ibegin], &work[indld + ibegin -
+ 1], &work[indlld + ibegin - 1],
+ pivmin, &gaptol, &z__[ibegin + windex
+ * z_dim1], &L__1, &negcnt, &ztz, &
+ mingma, &iwork[iindr + windex], &
+ isuppz[(windex << 1) - 1], &nrminv, &
+ resid, &rqcorr, &work[indwrk]);
+ }
+ work[windex] = lambda;
+ }
+
+/* Compute FP-vector support w.r.t. whole matrix */
+
+ isuppz[(windex << 1) - 1] += oldien;
+ isuppz[windex * 2] += oldien;
+ zfrom = isuppz[(windex << 1) - 1];
+ zto = isuppz[windex * 2];
+ isupmn += oldien;
+ isupmx += oldien;
+/* Ensure vector is ok if support in the RQI has changed */
+ if (isupmn < zfrom) {
+ i__4 = zfrom - 1;
+ for (ii = isupmn; ii <= i__4; ++ii) {
+ z__[ii + windex * z_dim1] = 0.;
+/* L122: */
+ }
+ }
+ if (isupmx > zto) {
+ i__4 = isupmx;
+ for (ii = zto + 1; ii <= i__4; ++ii) {
+ z__[ii + windex * z_dim1] = 0.;
+/* L123: */
+ }
+ }
+ i__4 = zto - zfrom + 1;
+ dscal_(&i__4, &nrminv, &z__[zfrom + windex * z_dim1],
+ &c__1);
+L125:
+/* Update W */
+ w[windex] = lambda + sigma;
+/* Recompute the gaps on the left and right */
+/* But only allow them to become larger and not */
+/* smaller (which can only happen through "bad" */
+/* cancellation and doesn't reflect the theory */
+/* where the initial gaps are underestimated due */
+/* to WERR being too crude.) */
+ if (! eskip) {
+ if (k > 1) {
+/* Computing MAX */
+ d__1 = wgap[windmn], d__2 = w[windex] - werr[
+ windex] - w[windmn] - werr[windmn];
+ wgap[windmn] = max(d__1,d__2);
+ }
+ if (windex < wend) {
+/* Computing MAX */
+ d__1 = savgap, d__2 = w[windpl] - werr[windpl]
+ - w[windex] - werr[windex];
+ wgap[windex] = max(d__1,d__2);
+ }
+ }
+ ++idone;
+ }
+/* here ends the code for the current child */
+
+L139:
+/* Proceed to any remaining child nodes */
+ newfst = j + 1;
+L140:
+ ;
+ }
+/* L150: */
+ }
+ ++ndepth;
+ goto L40;
+ }
+ ibegin = iend + 1;
+ wbegin = wend + 1;
+L170:
+ ;
+ }
+
+ return 0;
+
+/* End of DLARRV */
+
+} /* dlarrv_ */
diff --git a/contrib/libs/clapack/dlartg.c b/contrib/libs/clapack/dlartg.c
new file mode 100644
index 0000000000..40179e51ef
--- /dev/null
+++ b/contrib/libs/clapack/dlartg.c
@@ -0,0 +1,190 @@
+/* dlartg.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlartg_(doublereal *f, doublereal *g, doublereal *cs,
+ doublereal *sn, doublereal *r__)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double log(doublereal), pow_di(doublereal *, integer *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ doublereal f1, g1, eps, scale;
+ integer count;
+ doublereal safmn2, safmx2;
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLARTG generate a plane rotation so that */
+
+/* [ CS SN ] . [ F ] = [ R ] where CS**2 + SN**2 = 1. */
+/* [ -SN CS ] [ G ] [ 0 ] */
+
+/* This is a slower, more accurate version of the BLAS1 routine DROTG, */
+/* with the following other differences: */
+/* F and G are unchanged on return. */
+/* If G=0, then CS=1 and SN=0. */
+/* If F=0 and (G .ne. 0), then CS=0 and SN=1 without doing any */
+/* floating point operations (saves work in DBDSQR when */
+/* there are zeros on the diagonal). */
+
+/* If F exceeds G in magnitude, CS will be positive. */
+
+/* Arguments */
+/* ========= */
+
+/* F (input) DOUBLE PRECISION */
+/* The first component of vector to be rotated. */
+
+/* G (input) DOUBLE PRECISION */
+/* The second component of vector to be rotated. */
+
+/* CS (output) DOUBLE PRECISION */
+/* The cosine of the rotation. */
+
+/* SN (output) DOUBLE PRECISION */
+/* The sine of the rotation. */
+
+/* R (output) DOUBLE PRECISION */
+/* The nonzero component of the rotated vector. */
+
+/* This version has a few statements commented out for thread safety */
+/* (machine parameters are computed on each entry). 10 feb 03, SJH. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* LOGICAL FIRST */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Save statement .. */
+/* SAVE FIRST, SAFMX2, SAFMIN, SAFMN2 */
+/* .. */
+/* .. Data statements .. */
+/* DATA FIRST / .TRUE. / */
+/* .. */
+/* .. Executable Statements .. */
+
+/* IF( FIRST ) THEN */
+ safmin = dlamch_("S");
+ eps = dlamch_("E");
+ d__1 = dlamch_("B");
+ i__1 = (integer) (log(safmin / eps) / log(dlamch_("B")) / 2.);
+ safmn2 = pow_di(&d__1, &i__1);
+ safmx2 = 1. / safmn2;
+/* FIRST = .FALSE. */
+/* END IF */
+ if (*g == 0.) {
+ *cs = 1.;
+ *sn = 0.;
+ *r__ = *f;
+ } else if (*f == 0.) {
+ *cs = 0.;
+ *sn = 1.;
+ *r__ = *g;
+ } else {
+ f1 = *f;
+ g1 = *g;
+/* Computing MAX */
+ d__1 = abs(f1), d__2 = abs(g1);
+ scale = max(d__1,d__2);
+ if (scale >= safmx2) {
+ count = 0;
+L10:
+ ++count;
+ f1 *= safmn2;
+ g1 *= safmn2;
+/* Computing MAX */
+ d__1 = abs(f1), d__2 = abs(g1);
+ scale = max(d__1,d__2);
+ if (scale >= safmx2) {
+ goto L10;
+ }
+/* Computing 2nd power */
+ d__1 = f1;
+/* Computing 2nd power */
+ d__2 = g1;
+ *r__ = sqrt(d__1 * d__1 + d__2 * d__2);
+ *cs = f1 / *r__;
+ *sn = g1 / *r__;
+ i__1 = count;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ *r__ *= safmx2;
+/* L20: */
+ }
+ } else if (scale <= safmn2) {
+ count = 0;
+L30:
+ ++count;
+ f1 *= safmx2;
+ g1 *= safmx2;
+/* Computing MAX */
+ d__1 = abs(f1), d__2 = abs(g1);
+ scale = max(d__1,d__2);
+ if (scale <= safmn2) {
+ goto L30;
+ }
+/* Computing 2nd power */
+ d__1 = f1;
+/* Computing 2nd power */
+ d__2 = g1;
+ *r__ = sqrt(d__1 * d__1 + d__2 * d__2);
+ *cs = f1 / *r__;
+ *sn = g1 / *r__;
+ i__1 = count;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ *r__ *= safmn2;
+/* L40: */
+ }
+ } else {
+/* Computing 2nd power */
+ d__1 = f1;
+/* Computing 2nd power */
+ d__2 = g1;
+ *r__ = sqrt(d__1 * d__1 + d__2 * d__2);
+ *cs = f1 / *r__;
+ *sn = g1 / *r__;
+ }
+ if (abs(*f) > abs(*g) && *cs < 0.) {
+ *cs = -(*cs);
+ *sn = -(*sn);
+ *r__ = -(*r__);
+ }
+ }
+ return 0;
+
+/* End of DLARTG */
+
+} /* dlartg_ */
diff --git a/contrib/libs/clapack/dlartv.c b/contrib/libs/clapack/dlartv.c
new file mode 100644
index 0000000000..6519c552fa
--- /dev/null
+++ b/contrib/libs/clapack/dlartv.c
@@ -0,0 +1,106 @@
+/* dlartv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlartv_(integer *n, doublereal *x, integer *incx,
+ doublereal *y, integer *incy, doublereal *c__, doublereal *s, integer
+ *incc)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ integer i__, ic, ix, iy;
+ doublereal xi, yi;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLARTV applies a vector of real plane rotations to elements of the */
+/* real vectors x and y. For i = 1,2,...,n */
+
+/* ( x(i) ) := ( c(i) s(i) ) ( x(i) ) */
+/* ( y(i) ) ( -s(i) c(i) ) ( y(i) ) */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of plane rotations to be applied. */
+
+/* X (input/output) DOUBLE PRECISION array, */
+/* dimension (1+(N-1)*INCX) */
+/* The vector x. */
+
+/* INCX (input) INTEGER */
+/* The increment between elements of X. INCX > 0. */
+
+/* Y (input/output) DOUBLE PRECISION array, */
+/* dimension (1+(N-1)*INCY) */
+/* The vector y. */
+
+/* INCY (input) INTEGER */
+/* The increment between elements of Y. INCY > 0. */
+
+/* C (input) DOUBLE PRECISION array, dimension (1+(N-1)*INCC) */
+/* The cosines of the plane rotations. */
+
+/* S (input) DOUBLE PRECISION array, dimension (1+(N-1)*INCC) */
+/* The sines of the plane rotations. */
+
+/* INCC (input) INTEGER */
+/* The increment between elements of C and S. INCC > 0. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --s;
+ --c__;
+ --y;
+ --x;
+
+ /* Function Body */
+ ix = 1;
+ iy = 1;
+ ic = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ xi = x[ix];
+ yi = y[iy];
+ x[ix] = c__[ic] * xi + s[ic] * yi;
+ y[iy] = c__[ic] * yi - s[ic] * xi;
+ ix += *incx;
+ iy += *incy;
+ ic += *incc;
+/* L10: */
+ }
+ return 0;
+
+/* End of DLARTV */
+
+} /* dlartv_ */
diff --git a/contrib/libs/clapack/dlaruv.c b/contrib/libs/clapack/dlaruv.c
new file mode 100644
index 0000000000..fa9f96f804
--- /dev/null
+++ b/contrib/libs/clapack/dlaruv.c
@@ -0,0 +1,192 @@
+/* dlaruv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlaruv_(integer *iseed, integer *n, doublereal *x)
+{
+ /* Initialized data */
+
+ static integer mm[512] /* was [128][4] */ = { 494,2637,255,2008,1253,
+ 3344,4084,1739,3143,3468,688,1657,1238,3166,1292,3422,1270,2016,
+ 154,2862,697,1706,491,931,1444,444,3577,3944,2184,1661,3482,657,
+ 3023,3618,1267,1828,164,3798,3087,2400,2870,3876,1905,1593,1797,
+ 1234,3460,328,2861,1950,617,2070,3331,769,1558,2412,2800,189,287,
+ 2045,1227,2838,209,2770,3654,3993,192,2253,3491,2889,2857,2094,
+ 1818,688,1407,634,3231,815,3524,1914,516,164,303,2144,3480,119,
+ 3357,837,2826,2332,2089,3780,1700,3712,150,2000,3375,1621,3090,
+ 3765,1149,3146,33,3082,2741,359,3316,1749,185,2784,2202,2199,1364,
+ 1244,2020,3160,2785,2772,1217,1822,1245,2252,3904,2774,997,2573,
+ 1148,545,322,789,1440,752,2859,123,1848,643,2405,2638,2344,46,
+ 3814,913,3649,339,3808,822,2832,3078,3633,2970,637,2249,2081,4019,
+ 1478,242,481,2075,4058,622,3376,812,234,641,4005,1122,3135,2640,
+ 2302,40,1832,2247,2034,2637,1287,1691,496,1597,2394,2584,1843,336,
+ 1472,2407,433,2096,1761,2810,566,442,41,1238,1086,603,840,3168,
+ 1499,1084,3438,2408,1589,2391,288,26,512,1456,171,1677,2657,2270,
+ 2587,2961,1970,1817,676,1410,3723,2803,3185,184,663,499,3784,1631,
+ 1925,3912,1398,1349,1441,2224,2411,1907,3192,2786,382,37,759,2948,
+ 1862,3802,2423,2051,2295,1332,1832,2405,3638,3661,327,3660,716,
+ 1842,3987,1368,1848,2366,2508,3754,1766,3572,2893,307,1297,3966,
+ 758,2598,3406,2922,1038,2934,2091,2451,1580,1958,2055,1507,1078,
+ 3273,17,854,2916,3971,2889,3831,2621,1541,893,736,3992,787,2125,
+ 2364,2460,257,1574,3912,1216,3248,3401,2124,2762,149,2245,166,466,
+ 4018,1399,190,2879,153,2320,18,712,2159,2318,2091,3443,1510,449,
+ 1956,2201,3137,3399,1321,2271,3667,2703,629,2365,2431,1113,3922,
+ 2554,184,2099,3228,4012,1921,3452,3901,572,3309,3171,817,3039,
+ 1696,1256,3715,2077,3019,1497,1101,717,51,981,1978,1813,3881,76,
+ 3846,3694,1682,124,1660,3997,479,1141,886,3514,1301,3604,1888,
+ 1836,1990,2058,692,1194,20,3285,2046,2107,3508,3525,3801,2549,
+ 1145,2253,305,3301,1065,3133,2913,3285,1241,1197,3729,2501,1673,
+ 541,2753,949,2361,1165,4081,2725,3305,3069,3617,3733,409,2157,
+ 1361,3973,1865,2525,1409,3445,3577,77,3761,2149,1449,3005,225,85,
+ 3673,3117,3089,1349,2057,413,65,1845,697,3085,3441,1573,3689,2941,
+ 929,533,2841,4077,721,2821,2249,2397,2817,245,1913,1997,3121,997,
+ 1833,2877,1633,981,2009,941,2449,197,2441,285,1473,2741,3129,909,
+ 2801,421,4073,2813,2337,1429,1177,1901,81,1669,2633,2269,129,1141,
+ 249,3917,2481,3941,2217,2749,3041,1877,345,2861,1809,3141,2825,
+ 157,2881,3637,1465,2829,2161,3365,361,2685,3745,2325,3609,3821,
+ 3537,517,3017,2141,1537 };
+
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ integer i__, i1, i2, i3, i4, it1, it2, it3, it4;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLARUV returns a vector of n random real numbers from a uniform (0,1) */
+/* distribution (n <= 128). */
+
+/* This is an auxiliary routine called by DLARNV and ZLARNV. */
+
+/* Arguments */
+/* ========= */
+
+/* ISEED (input/output) INTEGER array, dimension (4) */
+/* On entry, the seed of the random number generator; the array */
+/* elements must be between 0 and 4095, and ISEED(4) must be */
+/* odd. */
+/* On exit, the seed is updated. */
+
+/* N (input) INTEGER */
+/* The number of random numbers to be generated. N <= 128. */
+
+/* X (output) DOUBLE PRECISION array, dimension (N) */
+/* The generated random numbers. */
+
+/* Further Details */
+/* =============== */
+
+/* This routine uses a multiplicative congruential method with modulus */
+/* 2**48 and multiplier 33952834046453 (see G.S.Fishman, */
+/* 'Multiplicative congruential random number generators with modulus */
+/* 2**b: an exhaustive analysis for b = 32 and a partial analysis for */
+/* b = 48', Math. Comp. 189, pp 331-344, 1990). */
+
+/* 48-bit integers are stored in 4 integer array elements with 12 bits */
+/* per element. Hence the routine is portable across machines with */
+/* integers of 32 bits or more. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Data statements .. */
+ /* Parameter adjustments */
+ --iseed;
+ --x;
+
+ /* Function Body */
+/* .. */
+/* .. Executable Statements .. */
+
+ i1 = iseed[1];
+ i2 = iseed[2];
+ i3 = iseed[3];
+ i4 = iseed[4];
+
+ i__1 = min(*n,128);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+L20:
+
+/* Multiply the seed by i-th power of the multiplier modulo 2**48 */
+
+ it4 = i4 * mm[i__ + 383];
+ it3 = it4 / 4096;
+ it4 -= it3 << 12;
+ it3 = it3 + i3 * mm[i__ + 383] + i4 * mm[i__ + 255];
+ it2 = it3 / 4096;
+ it3 -= it2 << 12;
+ it2 = it2 + i2 * mm[i__ + 383] + i3 * mm[i__ + 255] + i4 * mm[i__ +
+ 127];
+ it1 = it2 / 4096;
+ it2 -= it1 << 12;
+ it1 = it1 + i1 * mm[i__ + 383] + i2 * mm[i__ + 255] + i3 * mm[i__ +
+ 127] + i4 * mm[i__ - 1];
+ it1 %= 4096;
+
+/* Convert 48-bit integer to a real number in the interval (0,1) */
+
+ x[i__] = ((doublereal) it1 + ((doublereal) it2 + ((doublereal) it3 + (
+ doublereal) it4 * 2.44140625e-4) * 2.44140625e-4) *
+ 2.44140625e-4) * 2.44140625e-4;
+
+ if (x[i__] == 1.) {
+/* If a real number has n bits of precision, and the first */
+/* n bits of the 48-bit integer above happen to be all 1 (which */
+/* will occur about once every 2**n calls), then X( I ) will */
+/* be rounded to exactly 1.0. */
+/* Since X( I ) is not supposed to return exactly 0.0 or 1.0, */
+/* the statistically correct thing to do in this situation is */
+/* simply to iterate again. */
+/* N.B. the case X( I ) = 0.0 should not be possible. */
+ i1 += 2;
+ i2 += 2;
+ i3 += 2;
+ i4 += 2;
+ goto L20;
+ }
+
+/* L10: */
+ }
+
+/* Return final value of seed */
+
+ iseed[1] = it1;
+ iseed[2] = it2;
+ iseed[3] = it3;
+ iseed[4] = it4;
+ return 0;
+
+/* End of DLARUV */
+
+} /* dlaruv_ */
diff --git a/contrib/libs/clapack/dlarz.c b/contrib/libs/clapack/dlarz.c
new file mode 100644
index 0000000000..574487831e
--- /dev/null
+++ b/contrib/libs/clapack/dlarz.c
@@ -0,0 +1,194 @@
+/* dlarz.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b5 = 1.;
+
+/* Subroutine */ int dlarz_(char *side, integer *m, integer *n, integer *l,
+ doublereal *v, integer *incv, doublereal *tau, doublereal *c__,
+ integer *ldc, doublereal *work)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset;
+ doublereal d__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int dger_(integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), dcopy_(integer *,
+ doublereal *, integer *, doublereal *, integer *), daxpy_(integer
+ *, doublereal *, doublereal *, integer *, doublereal *, integer *)
+ ;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLARZ applies a real elementary reflector H to a real M-by-N */
+/* matrix C, from either the left or the right. H is represented in the */
+/* form */
+
+/* H = I - tau * v * v' */
+
+/* where tau is a real scalar and v is a real vector. */
+
+/* If tau = 0, then H is taken to be the unit matrix. */
+
+
+/* H is a product of k elementary reflectors as returned by DTZRZF. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': form H * C */
+/* = 'R': form C * H */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. */
+
+/* L (input) INTEGER */
+/* The number of entries of the vector V containing */
+/* the meaningful part of the Householder vectors. */
+/* If SIDE = 'L', M >= L >= 0, if SIDE = 'R', N >= L >= 0. */
+
+/* V (input) DOUBLE PRECISION array, dimension (1+(L-1)*abs(INCV)) */
+/* The vector v in the representation of H as returned by */
+/* DTZRZF. V is not used if TAU = 0. */
+
+/* INCV (input) INTEGER */
+/* The increment between elements of v. INCV <> 0. */
+
+/* TAU (input) DOUBLE PRECISION */
+/* The value tau in the representation of H. */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by the matrix H * C if SIDE = 'L', */
+/* or C * H if SIDE = 'R'. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension */
+/* (N) if SIDE = 'L' */
+/* or (M) if SIDE = 'R' */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --v;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ if (lsame_(side, "L")) {
+
+/* Form H * C */
+
+ if (*tau != 0.) {
+
+/* w( 1:n ) = C( 1, 1:n ) */
+
+ dcopy_(n, &c__[c_offset], ldc, &work[1], &c__1);
+
+/* w( 1:n ) = w( 1:n ) + C( m-l+1:m, 1:n )' * v( 1:l ) */
+
+ dgemv_("Transpose", l, n, &c_b5, &c__[*m - *l + 1 + c_dim1], ldc,
+ &v[1], incv, &c_b5, &work[1], &c__1);
+
+/* C( 1, 1:n ) = C( 1, 1:n ) - tau * w( 1:n ) */
+
+ d__1 = -(*tau);
+ daxpy_(n, &d__1, &work[1], &c__1, &c__[c_offset], ldc);
+
+/* C( m-l+1:m, 1:n ) = C( m-l+1:m, 1:n ) - ... */
+/* tau * v( 1:l ) * w( 1:n )' */
+
+ d__1 = -(*tau);
+ dger_(l, n, &d__1, &v[1], incv, &work[1], &c__1, &c__[*m - *l + 1
+ + c_dim1], ldc);
+ }
+
+ } else {
+
+/* Form C * H */
+
+ if (*tau != 0.) {
+
+/* w( 1:m ) = C( 1:m, 1 ) */
+
+ dcopy_(m, &c__[c_offset], &c__1, &work[1], &c__1);
+
+/* w( 1:m ) = w( 1:m ) + C( 1:m, n-l+1:n, 1:n ) * v( 1:l ) */
+
+ dgemv_("No transpose", m, l, &c_b5, &c__[(*n - *l + 1) * c_dim1 +
+ 1], ldc, &v[1], incv, &c_b5, &work[1], &c__1);
+
+/* C( 1:m, 1 ) = C( 1:m, 1 ) - tau * w( 1:m ) */
+
+ d__1 = -(*tau);
+ daxpy_(m, &d__1, &work[1], &c__1, &c__[c_offset], &c__1);
+
+/* C( 1:m, n-l+1:n ) = C( 1:m, n-l+1:n ) - ... */
+/* tau * w( 1:m ) * v( 1:l )' */
+
+ d__1 = -(*tau);
+ dger_(m, l, &d__1, &work[1], &c__1, &v[1], incv, &c__[(*n - *l +
+ 1) * c_dim1 + 1], ldc);
+
+ }
+
+ }
+
+ return 0;
+
+/* End of DLARZ */
+
+} /* dlarz_ */
diff --git a/contrib/libs/clapack/dlarzb.c b/contrib/libs/clapack/dlarzb.c
new file mode 100644
index 0000000000..2cf3e7fae2
--- /dev/null
+++ b/contrib/libs/clapack/dlarzb.c
@@ -0,0 +1,288 @@
+/* dlarzb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b13 = 1.;
+static doublereal c_b23 = -1.;
+
+/* Subroutine */ int dlarzb_(char *side, char *trans, char *direct, char *
+ storev, integer *m, integer *n, integer *k, integer *l, doublereal *v,
+ integer *ldv, doublereal *t, integer *ldt, doublereal *c__, integer *
+ ldc, doublereal *work, integer *ldwork)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, t_dim1, t_offset, v_dim1, v_offset, work_dim1,
+ work_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, info;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dtrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), xerbla_(
+ char *, integer *);
+ char transt[1];
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLARZB applies a real block reflector H or its transpose H**T to */
+/* a real distributed M-by-N C from the left or the right. */
+
+/* Currently, only STOREV = 'R' and DIRECT = 'B' are supported. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply H or H' from the Left */
+/* = 'R': apply H or H' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply H (No transpose) */
+/* = 'C': apply H' (Transpose) */
+
+/* DIRECT (input) CHARACTER*1 */
+/* Indicates how H is formed from a product of elementary */
+/* reflectors */
+/* = 'F': H = H(1) H(2) . . . H(k) (Forward, not supported yet) */
+/* = 'B': H = H(k) . . . H(2) H(1) (Backward) */
+
+/* STOREV (input) CHARACTER*1 */
+/* Indicates how the vectors which define the elementary */
+/* reflectors are stored: */
+/* = 'C': Columnwise (not supported yet) */
+/* = 'R': Rowwise */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. */
+
+/* K (input) INTEGER */
+/* The order of the matrix T (= the number of elementary */
+/* reflectors whose product defines the block reflector). */
+
+/* L (input) INTEGER */
+/* The number of columns of the matrix V containing the */
+/* meaningful part of the Householder reflectors. */
+/* If SIDE = 'L', M >= L >= 0, if SIDE = 'R', N >= L >= 0. */
+
+/* V (input) DOUBLE PRECISION array, dimension (LDV,NV). */
+/* If STOREV = 'C', NV = K; if STOREV = 'R', NV = L. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. */
+/* If STOREV = 'C', LDV >= L; if STOREV = 'R', LDV >= K. */
+
+/* T (input) DOUBLE PRECISION array, dimension (LDT,K) */
+/* The triangular K-by-K matrix T in the representation of the */
+/* block reflector. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= K. */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by H*C or H'*C or C*H or C*H'. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (LDWORK,K) */
+
+/* LDWORK (input) INTEGER */
+/* The leading dimension of the array WORK. */
+/* If SIDE = 'L', LDWORK >= max(1,N); */
+/* if SIDE = 'R', LDWORK >= max(1,M). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ work_dim1 = *ldwork;
+ work_offset = 1 + work_dim1;
+ work -= work_offset;
+
+ /* Function Body */
+ if (*m <= 0 || *n <= 0) {
+ return 0;
+ }
+
+/* Check for currently supported options */
+
+ info = 0;
+ if (! lsame_(direct, "B")) {
+ info = -3;
+ } else if (! lsame_(storev, "R")) {
+ info = -4;
+ }
+ if (info != 0) {
+ i__1 = -info;
+ xerbla_("DLARZB", &i__1);
+ return 0;
+ }
+
+ if (lsame_(trans, "N")) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ if (lsame_(side, "L")) {
+
+/* Form H * C or H' * C */
+
+/* W( 1:n, 1:k ) = C( 1:k, 1:n )' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(n, &c__[j + c_dim1], ldc, &work[j * work_dim1 + 1], &c__1);
+/* L10: */
+ }
+
+/* W( 1:n, 1:k ) = W( 1:n, 1:k ) + ... */
+/* C( m-l+1:m, 1:n )' * V( 1:k, 1:l )' */
+
+ if (*l > 0) {
+ dgemm_("Transpose", "Transpose", n, k, l, &c_b13, &c__[*m - *l +
+ 1 + c_dim1], ldc, &v[v_offset], ldv, &c_b13, &work[
+ work_offset], ldwork);
+ }
+
+/* W( 1:n, 1:k ) = W( 1:n, 1:k ) * T' or W( 1:m, 1:k ) * T */
+
+ dtrmm_("Right", "Lower", transt, "Non-unit", n, k, &c_b13, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C( 1:k, 1:n ) = C( 1:k, 1:n ) - W( 1:n, 1:k )' */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *k;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] -= work[j + i__ * work_dim1];
+/* L20: */
+ }
+/* L30: */
+ }
+
+/* C( m-l+1:m, 1:n ) = C( m-l+1:m, 1:n ) - ... */
+/* V( 1:k, 1:l )' * W( 1:n, 1:k )' */
+
+ if (*l > 0) {
+ dgemm_("Transpose", "Transpose", l, n, k, &c_b23, &v[v_offset],
+ ldv, &work[work_offset], ldwork, &c_b13, &c__[*m - *l + 1
+ + c_dim1], ldc);
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/* Form C * H or C * H' */
+
+/* W( 1:m, 1:k ) = C( 1:m, 1:k ) */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(m, &c__[j * c_dim1 + 1], &c__1, &work[j * work_dim1 + 1], &
+ c__1);
+/* L40: */
+ }
+
+/* W( 1:m, 1:k ) = W( 1:m, 1:k ) + ... */
+/* C( 1:m, n-l+1:n ) * V( 1:k, 1:l )' */
+
+ if (*l > 0) {
+ dgemm_("No transpose", "Transpose", m, k, l, &c_b13, &c__[(*n - *
+ l + 1) * c_dim1 + 1], ldc, &v[v_offset], ldv, &c_b13, &
+ work[work_offset], ldwork);
+ }
+
+/* W( 1:m, 1:k ) = W( 1:m, 1:k ) * T or W( 1:m, 1:k ) * T' */
+
+ dtrmm_("Right", "Lower", trans, "Non-unit", m, k, &c_b13, &t[t_offset]
+, ldt, &work[work_offset], ldwork);
+
+/* C( 1:m, 1:k ) = C( 1:m, 1:k ) - W( 1:m, 1:k ) */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] -= work[i__ + j * work_dim1];
+/* L50: */
+ }
+/* L60: */
+ }
+
+/* C( 1:m, n-l+1:n ) = C( 1:m, n-l+1:n ) - ... */
+/* W( 1:m, 1:k ) * V( 1:k, 1:l ) */
+
+ if (*l > 0) {
+ dgemm_("No transpose", "No transpose", m, l, k, &c_b23, &work[
+ work_offset], ldwork, &v[v_offset], ldv, &c_b13, &c__[(*n
+ - *l + 1) * c_dim1 + 1], ldc);
+ }
+
+ }
+
+ return 0;
+
+/* End of DLARZB */
+
+} /* dlarzb_ */
diff --git a/contrib/libs/clapack/dlarzt.c b/contrib/libs/clapack/dlarzt.c
new file mode 100644
index 0000000000..8e8450abe2
--- /dev/null
+++ b/contrib/libs/clapack/dlarzt.c
@@ -0,0 +1,229 @@
+/* dlarzt.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b8 = 0.;
+static integer c__1 = 1;
+
+/* Subroutine */ int dlarzt_(char *direct, char *storev, integer *n, integer *
+ k, doublereal *v, integer *ldv, doublereal *tau, doublereal *t,
+ integer *ldt)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, v_dim1, v_offset, i__1;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__, j, info;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), dtrmv_(char *,
+ char *, char *, integer *, doublereal *, integer *, doublereal *,
+ integer *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLARZT forms the triangular factor T of a real block reflector */
+/* H of order > n, which is defined as a product of k elementary */
+/* reflectors. */
+
+/* If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular; */
+
+/* If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular. */
+
+/* If STOREV = 'C', the vector which defines the elementary reflector */
+/* H(i) is stored in the i-th column of the array V, and */
+
+/* H = I - V * T * V' */
+
+/* If STOREV = 'R', the vector which defines the elementary reflector */
+/* H(i) is stored in the i-th row of the array V, and */
+
+/* H = I - V' * T * V */
+
+/* Currently, only STOREV = 'R' and DIRECT = 'B' are supported. */
+
+/* Arguments */
+/* ========= */
+
+/* DIRECT (input) CHARACTER*1 */
+/* Specifies the order in which the elementary reflectors are */
+/* multiplied to form the block reflector: */
+/* = 'F': H = H(1) H(2) . . . H(k) (Forward, not supported yet) */
+/* = 'B': H = H(k) . . . H(2) H(1) (Backward) */
+
+/* STOREV (input) CHARACTER*1 */
+/* Specifies how the vectors which define the elementary */
+/* reflectors are stored (see also Further Details): */
+/* = 'C': columnwise (not supported yet) */
+/* = 'R': rowwise */
+
+/* N (input) INTEGER */
+/* The order of the block reflector H. N >= 0. */
+
+/* K (input) INTEGER */
+/* The order of the triangular factor T (= the number of */
+/* elementary reflectors). K >= 1. */
+
+/* V (input/output) DOUBLE PRECISION array, dimension */
+/* (LDV,K) if STOREV = 'C' */
+/* (LDV,N) if STOREV = 'R' */
+/* The matrix V. See further details. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. */
+/* If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K. */
+
+/* TAU (input) DOUBLE PRECISION array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i). */
+
+/* T (output) DOUBLE PRECISION array, dimension (LDT,K) */
+/* The k by k triangular factor T of the block reflector. */
+/* If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is */
+/* lower triangular. The rest of the array is not used. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= K. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* The shape of the matrix V and the storage of the vectors which define */
+/* the H(i) is best illustrated by the following example with n = 5 and */
+/* k = 3. The elements equal to 1 are not stored; the corresponding */
+/* array elements are modified but restored on exit. The rest of the */
+/* array is not used. */
+
+/* DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R': */
+
+/* ______V_____ */
+/* ( v1 v2 v3 ) / \ */
+/* ( v1 v2 v3 ) ( v1 v1 v1 v1 v1 . . . . 1 ) */
+/* V = ( v1 v2 v3 ) ( v2 v2 v2 v2 v2 . . . 1 ) */
+/* ( v1 v2 v3 ) ( v3 v3 v3 v3 v3 . . 1 ) */
+/* ( v1 v2 v3 ) */
+/* . . . */
+/* . . . */
+/* 1 . . */
+/* 1 . */
+/* 1 */
+
+/* DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R': */
+
+/* ______V_____ */
+/* 1 / \ */
+/* . 1 ( 1 . . . . v1 v1 v1 v1 v1 ) */
+/* . . 1 ( . 1 . . . v2 v2 v2 v2 v2 ) */
+/* . . . ( . . 1 . . v3 v3 v3 v3 v3 ) */
+/* . . . */
+/* ( v1 v2 v3 ) */
+/* ( v1 v2 v3 ) */
+/* V = ( v1 v2 v3 ) */
+/* ( v1 v2 v3 ) */
+/* ( v1 v2 v3 ) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Check for currently supported options */
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ --tau;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(direct, "B")) {
+ info = -1;
+ } else if (! lsame_(storev, "R")) {
+ info = -2;
+ }
+ if (info != 0) {
+ i__1 = -info;
+ xerbla_("DLARZT", &i__1);
+ return 0;
+ }
+
+ for (i__ = *k; i__ >= 1; --i__) {
+ if (tau[i__] == 0.) {
+
+/* H(i) = I */
+
+ i__1 = *k;
+ for (j = i__; j <= i__1; ++j) {
+ t[j + i__ * t_dim1] = 0.;
+/* L10: */
+ }
+ } else {
+
+/* general case */
+
+ if (i__ < *k) {
+
+/* T(i+1:k,i) = - tau(i) * V(i+1:k,1:n) * V(i,1:n)' */
+
+ i__1 = *k - i__;
+ d__1 = -tau[i__];
+ dgemv_("No transpose", &i__1, n, &d__1, &v[i__ + 1 + v_dim1],
+ ldv, &v[i__ + v_dim1], ldv, &c_b8, &t[i__ + 1 + i__ *
+ t_dim1], &c__1);
+
+/* T(i+1:k,i) = T(i+1:k,i+1:k) * T(i+1:k,i) */
+
+ i__1 = *k - i__;
+ dtrmv_("Lower", "No transpose", "Non-unit", &i__1, &t[i__ + 1
+ + (i__ + 1) * t_dim1], ldt, &t[i__ + 1 + i__ * t_dim1]
+, &c__1);
+ }
+ t[i__ + i__ * t_dim1] = tau[i__];
+ }
+/* L20: */
+ }
+ return 0;
+
+/* End of DLARZT */
+
+} /* dlarzt_ */
diff --git a/contrib/libs/clapack/dlas2.c b/contrib/libs/clapack/dlas2.c
new file mode 100644
index 0000000000..1362a1a371
--- /dev/null
+++ b/contrib/libs/clapack/dlas2.c
@@ -0,0 +1,144 @@
+/* dlas2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlas2_(doublereal *f, doublereal *g, doublereal *h__,
+ doublereal *ssmin, doublereal *ssmax)
+{
+ /* System generated locals */
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal c__, fa, ga, ha, as, at, au, fhmn, fhmx;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAS2 computes the singular values of the 2-by-2 matrix */
+/* [ F G ] */
+/* [ 0 H ]. */
+/* On return, SSMIN is the smaller singular value and SSMAX is the */
+/* larger singular value. */
+
+/* Arguments */
+/* ========= */
+
+/* F (input) DOUBLE PRECISION */
+/* The (1,1) element of the 2-by-2 matrix. */
+
+/* G (input) DOUBLE PRECISION */
+/* The (1,2) element of the 2-by-2 matrix. */
+
+/* H (input) DOUBLE PRECISION */
+/* The (2,2) element of the 2-by-2 matrix. */
+
+/* SSMIN (output) DOUBLE PRECISION */
+/* The smaller singular value. */
+
+/* SSMAX (output) DOUBLE PRECISION */
+/* The larger singular value. */
+
+/* Further Details */
+/* =============== */
+
+/* Barring over/underflow, all output quantities are correct to within */
+/* a few units in the last place (ulps), even in the absence of a guard */
+/* digit in addition/subtraction. */
+
+/* In IEEE arithmetic, the code works correctly if one matrix element is */
+/* infinite. */
+
+/* Overflow will not occur unless the largest singular value itself */
+/* overflows, or is within a few ulps of overflow. (On machines with */
+/* partial overflow, like the Cray, overflow may occur if the largest */
+/* singular value is within a factor of 2 of overflow.) */
+
+/* Underflow is harmless if underflow is gradual. Otherwise, results */
+/* may correspond to a matrix modified by perturbations of size near */
+/* the underflow threshold. */
+
+/* ==================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ fa = abs(*f);
+ ga = abs(*g);
+ ha = abs(*h__);
+ fhmn = min(fa,ha);
+ fhmx = max(fa,ha);
+ if (fhmn == 0.) {
+ *ssmin = 0.;
+ if (fhmx == 0.) {
+ *ssmax = ga;
+ } else {
+/* Computing 2nd power */
+ d__1 = min(fhmx,ga) / max(fhmx,ga);
+ *ssmax = max(fhmx,ga) * sqrt(d__1 * d__1 + 1.);
+ }
+ } else {
+ if (ga < fhmx) {
+ as = fhmn / fhmx + 1.;
+ at = (fhmx - fhmn) / fhmx;
+/* Computing 2nd power */
+ d__1 = ga / fhmx;
+ au = d__1 * d__1;
+ c__ = 2. / (sqrt(as * as + au) + sqrt(at * at + au));
+ *ssmin = fhmn * c__;
+ *ssmax = fhmx / c__;
+ } else {
+ au = fhmx / ga;
+ if (au == 0.) {
+
+/* Avoid possible harmful underflow if exponent range */
+/* asymmetric (true SSMIN may not underflow even if */
+/* AU underflows) */
+
+ *ssmin = fhmn * fhmx / ga;
+ *ssmax = ga;
+ } else {
+ as = fhmn / fhmx + 1.;
+ at = (fhmx - fhmn) / fhmx;
+/* Computing 2nd power */
+ d__1 = as * au;
+/* Computing 2nd power */
+ d__2 = at * au;
+ c__ = 1. / (sqrt(d__1 * d__1 + 1.) + sqrt(d__2 * d__2 + 1.));
+ *ssmin = fhmn * c__ * au;
+ *ssmin += *ssmin;
+ *ssmax = ga / (c__ + c__);
+ }
+ }
+ }
+ return 0;
+
+/* End of DLAS2 */
+
+} /* dlas2_ */
diff --git a/contrib/libs/clapack/dlascl.c b/contrib/libs/clapack/dlascl.c
new file mode 100644
index 0000000000..b39a68b293
--- /dev/null
+++ b/contrib/libs/clapack/dlascl.c
@@ -0,0 +1,354 @@
+/* dlascl.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlascl_(char *type__, integer *kl, integer *ku,
+ doublereal *cfrom, doublereal *cto, integer *m, integer *n,
+ doublereal *a, integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+
+ /* Local variables */
+ integer i__, j, k1, k2, k3, k4;
+ doublereal mul, cto1;
+ logical done;
+ doublereal ctoc;
+ extern logical lsame_(char *, char *);
+ integer itype;
+ doublereal cfrom1;
+ extern doublereal dlamch_(char *);
+ doublereal cfromc;
+ extern logical disnan_(doublereal *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum, smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLASCL multiplies the M by N real matrix A by the real scalar */
+/* CTO/CFROM. This is done without over/underflow as long as the final */
+/* result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that */
+/* A may be full, upper triangular, lower triangular, upper Hessenberg, */
+/* or banded. */
+
+/* Arguments */
+/* ========= */
+
+/* TYPE (input) CHARACTER*1 */
+/* TYPE indices the storage type of the input matrix. */
+/* = 'G': A is a full matrix. */
+/* = 'L': A is a lower triangular matrix. */
+/* = 'U': A is an upper triangular matrix. */
+/* = 'H': A is an upper Hessenberg matrix. */
+/* = 'B': A is a symmetric band matrix with lower bandwidth KL */
+/* and upper bandwidth KU and with the only the lower */
+/* half stored. */
+/* = 'Q': A is a symmetric band matrix with lower bandwidth KL */
+/* and upper bandwidth KU and with the only the upper */
+/* half stored. */
+/* = 'Z': A is a band matrix with lower bandwidth KL and upper */
+/* bandwidth KU. */
+
+/* KL (input) INTEGER */
+/* The lower bandwidth of A. Referenced only if TYPE = 'B', */
+/* 'Q' or 'Z'. */
+
+/* KU (input) INTEGER */
+/* The upper bandwidth of A. Referenced only if TYPE = 'B', */
+/* 'Q' or 'Z'. */
+
+/* CFROM (input) DOUBLE PRECISION */
+/* CTO (input) DOUBLE PRECISION */
+/* The matrix A is multiplied by CTO/CFROM. A(I,J) is computed */
+/* without over/underflow if the final result CTO*A(I,J)/CFROM */
+/* can be represented without over/underflow. CFROM must be */
+/* nonzero. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The matrix to be multiplied by CTO/CFROM. See TYPE for the */
+/* storage type. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* INFO (output) INTEGER */
+/* 0 - successful exit */
+/* <0 - if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+
+ if (lsame_(type__, "G")) {
+ itype = 0;
+ } else if (lsame_(type__, "L")) {
+ itype = 1;
+ } else if (lsame_(type__, "U")) {
+ itype = 2;
+ } else if (lsame_(type__, "H")) {
+ itype = 3;
+ } else if (lsame_(type__, "B")) {
+ itype = 4;
+ } else if (lsame_(type__, "Q")) {
+ itype = 5;
+ } else if (lsame_(type__, "Z")) {
+ itype = 6;
+ } else {
+ itype = -1;
+ }
+
+ if (itype == -1) {
+ *info = -1;
+ } else if (*cfrom == 0. || disnan_(cfrom)) {
+ *info = -4;
+ } else if (disnan_(cto)) {
+ *info = -5;
+ } else if (*m < 0) {
+ *info = -6;
+ } else if (*n < 0 || itype == 4 && *n != *m || itype == 5 && *n != *m) {
+ *info = -7;
+ } else if (itype <= 3 && *lda < max(1,*m)) {
+ *info = -9;
+ } else if (itype >= 4) {
+/* Computing MAX */
+ i__1 = *m - 1;
+ if (*kl < 0 || *kl > max(i__1,0)) {
+ *info = -2;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = *n - 1;
+ if (*ku < 0 || *ku > max(i__1,0) || (itype == 4 || itype == 5) &&
+ *kl != *ku) {
+ *info = -3;
+ } else if (itype == 4 && *lda < *kl + 1 || itype == 5 && *lda < *
+ ku + 1 || itype == 6 && *lda < (*kl << 1) + *ku + 1) {
+ *info = -9;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASCL", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *m == 0) {
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+
+ cfromc = *cfrom;
+ ctoc = *cto;
+
+L10:
+ cfrom1 = cfromc * smlnum;
+ if (cfrom1 == cfromc) {
+/* CFROMC is an inf. Multiply by a correctly signed zero for */
+/* finite CTOC, or a NaN if CTOC is infinite. */
+ mul = ctoc / cfromc;
+ done = TRUE_;
+ cto1 = ctoc;
+ } else {
+ cto1 = ctoc / bignum;
+ if (cto1 == ctoc) {
+/* CTOC is either 0 or an inf. In both cases, CTOC itself */
+/* serves as the correct multiplication factor. */
+ mul = ctoc;
+ done = TRUE_;
+ cfromc = 1.;
+ } else if (abs(cfrom1) > abs(ctoc) && ctoc != 0.) {
+ mul = smlnum;
+ done = FALSE_;
+ cfromc = cfrom1;
+ } else if (abs(cto1) > abs(cfromc)) {
+ mul = bignum;
+ done = FALSE_;
+ ctoc = cto1;
+ } else {
+ mul = ctoc / cfromc;
+ done = TRUE_;
+ }
+ }
+
+ if (itype == 0) {
+
+/* Full matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L20: */
+ }
+/* L30: */
+ }
+
+ } else if (itype == 1) {
+
+/* Lower triangular matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L40: */
+ }
+/* L50: */
+ }
+
+ } else if (itype == 2) {
+
+/* Upper triangular matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(j,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L60: */
+ }
+/* L70: */
+ }
+
+ } else if (itype == 3) {
+
+/* Upper Hessenberg matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = j + 1;
+ i__2 = min(i__3,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L80: */
+ }
+/* L90: */
+ }
+
+ } else if (itype == 4) {
+
+/* Lower half of a symmetric band matrix */
+
+ k3 = *kl + 1;
+ k4 = *n + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = k3, i__4 = k4 - j;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L100: */
+ }
+/* L110: */
+ }
+
+ } else if (itype == 5) {
+
+/* Upper half of a symmetric band matrix */
+
+ k1 = *ku + 2;
+ k3 = *ku + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = k1 - j;
+ i__3 = k3;
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L120: */
+ }
+/* L130: */
+ }
+
+ } else if (itype == 6) {
+
+/* Band matrix */
+
+ k1 = *kl + *ku + 2;
+ k2 = *kl + 1;
+ k3 = (*kl << 1) + *ku + 1;
+ k4 = *kl + *ku + 1 + *m;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__3 = k1 - j;
+/* Computing MIN */
+ i__4 = k3, i__5 = k4 - j;
+ i__2 = min(i__4,i__5);
+ for (i__ = max(i__3,k2); i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L140: */
+ }
+/* L150: */
+ }
+
+ }
+
+ if (! done) {
+ goto L10;
+ }
+
+ return 0;
+
+/* End of DLASCL */
+
+} /* dlascl_ */
diff --git a/contrib/libs/clapack/dlasd0.c b/contrib/libs/clapack/dlasd0.c
new file mode 100644
index 0000000000..a8b7197156
--- /dev/null
+++ b/contrib/libs/clapack/dlasd0.c
@@ -0,0 +1,291 @@
+/* dlasd0.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__0 = 0;
+static integer c__2 = 2;
+
+/* Subroutine */ int dlasd0_(integer *n, integer *sqre, doublereal *d__,
+ doublereal *e, doublereal *u, integer *ldu, doublereal *vt, integer *
+ ldvt, integer *smlsiz, integer *iwork, doublereal *work, integer *
+ info)
+{
+ /* System generated locals */
+ integer u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ integer i__, j, m, i1, ic, lf, nd, ll, nl, nr, im1, ncc, nlf, nrf, iwk,
+ lvl, ndb1, nlp1, nrp1;
+ doublereal beta;
+ integer idxq, nlvl;
+ doublereal alpha;
+ integer inode, ndiml, idxqc, ndimr, itemp, sqrei;
+ extern /* Subroutine */ int dlasd1_(integer *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, integer *, integer *, doublereal *,
+ integer *), dlasdq_(char *, integer *, integer *, integer *,
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *), dlasdt_(integer *, integer *,
+ integer *, integer *, integer *, integer *, integer *), xerbla_(
+ char *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Using a divide and conquer approach, DLASD0 computes the singular */
+/* value decomposition (SVD) of a real upper bidiagonal N-by-M */
+/* matrix B with diagonal D and offdiagonal E, where M = N + SQRE. */
+/* The algorithm computes orthogonal matrices U and VT such that */
+/* B = U * S * VT. The singular values S are overwritten on D. */
+
+/* A related subroutine, DLASDA, computes only the singular values, */
+/* and optionally, the singular vectors in compact form. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* On entry, the row dimension of the upper bidiagonal matrix. */
+/* This is also the dimension of the main diagonal array D. */
+
+/* SQRE (input) INTEGER */
+/* Specifies the column dimension of the bidiagonal matrix. */
+/* = 0: The bidiagonal matrix has column dimension M = N; */
+/* = 1: The bidiagonal matrix has column dimension M = N+1; */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry D contains the main diagonal of the bidiagonal */
+/* matrix. */
+/* On exit D, if INFO = 0, contains its singular values. */
+
+/* E (input) DOUBLE PRECISION array, dimension (M-1) */
+/* Contains the subdiagonal entries of the bidiagonal matrix. */
+/* On exit, E has been destroyed. */
+
+/* U (output) DOUBLE PRECISION array, dimension at least (LDQ, N) */
+/* On exit, U contains the left singular vectors. */
+
+/* LDU (input) INTEGER */
+/* On entry, leading dimension of U. */
+
+/* VT (output) DOUBLE PRECISION array, dimension at least (LDVT, M) */
+/* On exit, VT' contains the right singular vectors. */
+
+/* LDVT (input) INTEGER */
+/* On entry, leading dimension of VT. */
+
+/* SMLSIZ (input) INTEGER */
+/* On entry, maximum size of the subproblems at the */
+/* bottom of the computation tree. */
+
+/* IWORK (workspace) INTEGER work array. */
+/* Dimension must be at least (8 * N) */
+
+/* WORK (workspace) DOUBLE PRECISION work array. */
+/* Dimension must be at least (3 * M**2 + 2 * M) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, an singular value did not converge */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --iwork;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -1;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -2;
+ }
+
+ m = *n + *sqre;
+
+ if (*ldu < *n) {
+ *info = -6;
+ } else if (*ldvt < m) {
+ *info = -8;
+ } else if (*smlsiz < 3) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASD0", &i__1);
+ return 0;
+ }
+
+/* If the input matrix is too small, call DLASDQ to find the SVD. */
+
+ if (*n <= *smlsiz) {
+ dlasdq_("U", sqre, n, &m, n, &c__0, &d__[1], &e[1], &vt[vt_offset],
+ ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[1], info);
+ return 0;
+ }
+
+/* Set up the computation tree. */
+
+ inode = 1;
+ ndiml = inode + *n;
+ ndimr = ndiml + *n;
+ idxq = ndimr + *n;
+ iwk = idxq + *n;
+ dlasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr],
+ smlsiz);
+
+/* For the nodes on bottom level of the tree, solve */
+/* their subproblems by DLASDQ. */
+
+ ndb1 = (nd + 1) / 2;
+ ncc = 0;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+
+/* IC : center row of each node */
+/* NL : number of rows of left subproblem */
+/* NR : number of rows of right subproblem */
+/* NLF: starting row of the left subproblem */
+/* NRF: starting row of the right subproblem */
+
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nlp1 = nl + 1;
+ nr = iwork[ndimr + i1];
+ nrp1 = nr + 1;
+ nlf = ic - nl;
+ nrf = ic + 1;
+ sqrei = 1;
+ dlasdq_("U", &sqrei, &nl, &nlp1, &nl, &ncc, &d__[nlf], &e[nlf], &vt[
+ nlf + nlf * vt_dim1], ldvt, &u[nlf + nlf * u_dim1], ldu, &u[
+ nlf + nlf * u_dim1], ldu, &work[1], info);
+ if (*info != 0) {
+ return 0;
+ }
+ itemp = idxq + nlf - 2;
+ i__2 = nl;
+ for (j = 1; j <= i__2; ++j) {
+ iwork[itemp + j] = j;
+/* L10: */
+ }
+ if (i__ == nd) {
+ sqrei = *sqre;
+ } else {
+ sqrei = 1;
+ }
+ nrp1 = nr + sqrei;
+ dlasdq_("U", &sqrei, &nr, &nrp1, &nr, &ncc, &d__[nrf], &e[nrf], &vt[
+ nrf + nrf * vt_dim1], ldvt, &u[nrf + nrf * u_dim1], ldu, &u[
+ nrf + nrf * u_dim1], ldu, &work[1], info);
+ if (*info != 0) {
+ return 0;
+ }
+ itemp = idxq + ic;
+ i__2 = nr;
+ for (j = 1; j <= i__2; ++j) {
+ iwork[itemp + j - 1] = j;
+/* L20: */
+ }
+/* L30: */
+ }
+
+/* Now conquer each subproblem bottom-up. */
+
+ for (lvl = nlvl; lvl >= 1; --lvl) {
+
+/* Find the first node LF and last node LL on the */
+/* current level LVL. */
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__1 = lvl - 1;
+ lf = pow_ii(&c__2, &i__1);
+ ll = (lf << 1) - 1;
+ }
+ i__1 = ll;
+ for (i__ = lf; i__ <= i__1; ++i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ if (*sqre == 0 && i__ == ll) {
+ sqrei = *sqre;
+ } else {
+ sqrei = 1;
+ }
+ idxqc = idxq + nlf - 1;
+ alpha = d__[ic];
+ beta = e[ic];
+ dlasd1_(&nl, &nr, &sqrei, &d__[nlf], &alpha, &beta, &u[nlf + nlf *
+ u_dim1], ldu, &vt[nlf + nlf * vt_dim1], ldvt, &iwork[
+ idxqc], &iwork[iwk], &work[1], info);
+ if (*info != 0) {
+ return 0;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+
+ return 0;
+
+/* End of DLASD0 */
+
+} /* dlasd0_ */
diff --git a/contrib/libs/clapack/dlasd1.c b/contrib/libs/clapack/dlasd1.c
new file mode 100644
index 0000000000..84fb8e5f0b
--- /dev/null
+++ b/contrib/libs/clapack/dlasd1.c
@@ -0,0 +1,288 @@
+/* dlasd1.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__0 = 0;
+static doublereal c_b7 = 1.;
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int dlasd1_(integer *nl, integer *nr, integer *sqre,
+ doublereal *d__, doublereal *alpha, doublereal *beta, doublereal *u,
+ integer *ldu, doublereal *vt, integer *ldvt, integer *idxq, integer *
+ iwork, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer u_dim1, u_offset, vt_dim1, vt_offset, i__1;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ integer i__, k, m, n, n1, n2, iq, iz, iu2, ldq, idx, ldu2, ivt2, idxc,
+ idxp, ldvt2;
+ extern /* Subroutine */ int dlasd2_(integer *, integer *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *,
+ integer *, integer *, integer *, integer *, integer *), dlasd3_(
+ integer *, integer *, integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, integer *, integer *, doublereal *, integer *),
+ dlascl_(char *, integer *, integer *, doublereal *, doublereal *,
+ integer *, integer *, doublereal *, integer *, integer *),
+ dlamrg_(integer *, integer *, doublereal *, integer *, integer *,
+ integer *);
+ integer isigma;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal orgnrm;
+ integer coltyp;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLASD1 computes the SVD of an upper bidiagonal N-by-M matrix B, */
+/* where N = NL + NR + 1 and M = N + SQRE. DLASD1 is called from DLASD0. */
+
+/* A related subroutine DLASD7 handles the case in which the singular */
+/* values (and the singular vectors in factored form) are desired. */
+
+/* DLASD1 computes the SVD as follows: */
+
+/* ( D1(in) 0 0 0 ) */
+/* B = U(in) * ( Z1' a Z2' b ) * VT(in) */
+/* ( 0 0 D2(in) 0 ) */
+
+/* = U(out) * ( D(out) 0) * VT(out) */
+
+/* where Z' = (Z1' a Z2' b) = u' VT', and u is a vector of dimension M */
+/* with ALPHA and BETA in the NL+1 and NL+2 th entries and zeros */
+/* elsewhere; and the entry b is empty if SQRE = 0. */
+
+/* The left singular vectors of the original matrix are stored in U, and */
+/* the transpose of the right singular vectors are stored in VT, and the */
+/* singular values are in D. The algorithm consists of three stages: */
+
+/* The first stage consists of deflating the size of the problem */
+/* when there are multiple singular values or when there are zeros in */
+/* the Z vector. For each such occurence the dimension of the */
+/* secular equation problem is reduced by one. This stage is */
+/* performed by the routine DLASD2. */
+
+/* The second stage consists of calculating the updated */
+/* singular values. This is done by finding the square roots of the */
+/* roots of the secular equation via the routine DLASD4 (as called */
+/* by DLASD3). This routine also calculates the singular vectors of */
+/* the current problem. */
+
+/* The final stage consists of computing the updated singular vectors */
+/* directly using the updated singular values. The singular vectors */
+/* for the current problem are multiplied with the singular vectors */
+/* from the overall problem. */
+
+/* Arguments */
+/* ========= */
+
+/* NL (input) INTEGER */
+/* The row dimension of the upper block. NL >= 1. */
+
+/* NR (input) INTEGER */
+/* The row dimension of the lower block. NR >= 1. */
+
+/* SQRE (input) INTEGER */
+/* = 0: the lower block is an NR-by-NR square matrix. */
+/* = 1: the lower block is an NR-by-(NR+1) rectangular matrix. */
+
+/* The bidiagonal matrix has row dimension N = NL + NR + 1, */
+/* and column dimension M = N + SQRE. */
+
+/* D (input/output) DOUBLE PRECISION array, */
+/* dimension (N = NL+NR+1). */
+/* On entry D(1:NL,1:NL) contains the singular values of the */
+/* upper block; and D(NL+2:N) contains the singular values of */
+/* the lower block. On exit D(1:N) contains the singular values */
+/* of the modified matrix. */
+
+/* ALPHA (input/output) DOUBLE PRECISION */
+/* Contains the diagonal element associated with the added row. */
+
+/* BETA (input/output) DOUBLE PRECISION */
+/* Contains the off-diagonal element associated with the added */
+/* row. */
+
+/* U (input/output) DOUBLE PRECISION array, dimension(LDU,N) */
+/* On entry U(1:NL, 1:NL) contains the left singular vectors of */
+/* the upper block; U(NL+2:N, NL+2:N) contains the left singular */
+/* vectors of the lower block. On exit U contains the left */
+/* singular vectors of the bidiagonal matrix. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= max( 1, N ). */
+
+/* VT (input/output) DOUBLE PRECISION array, dimension(LDVT,M) */
+/* where M = N + SQRE. */
+/* On entry VT(1:NL+1, 1:NL+1)' contains the right singular */
+/* vectors of the upper block; VT(NL+2:M, NL+2:M)' contains */
+/* the right singular vectors of the lower block. On exit */
+/* VT' contains the right singular vectors of the */
+/* bidiagonal matrix. */
+
+/* LDVT (input) INTEGER */
+/* The leading dimension of the array VT. LDVT >= max( 1, M ). */
+
+/* IDXQ (output) INTEGER array, dimension(N) */
+/* This contains the permutation which will reintegrate the */
+/* subproblem just solved back into sorted order, i.e. */
+/* D( IDXQ( I = 1, N ) ) will be in ascending order. */
+
+/* IWORK (workspace) INTEGER array, dimension( 4 * N ) */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension( 3*M**2 + 2*M ) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, an singular value did not converge */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --idxq;
+ --iwork;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*nl < 1) {
+ *info = -1;
+ } else if (*nr < 1) {
+ *info = -2;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASD1", &i__1);
+ return 0;
+ }
+
+ n = *nl + *nr + 1;
+ m = n + *sqre;
+
+/* The following values are for bookkeeping purposes only. They are */
+/* integer pointers which indicate the portion of the workspace */
+/* used by a particular array in DLASD2 and DLASD3. */
+
+ ldu2 = n;
+ ldvt2 = m;
+
+ iz = 1;
+ isigma = iz + m;
+ iu2 = isigma + n;
+ ivt2 = iu2 + ldu2 * n;
+ iq = ivt2 + ldvt2 * m;
+
+ idx = 1;
+ idxc = idx + n;
+ coltyp = idxc + n;
+ idxp = coltyp + n;
+
+/* Scale. */
+
+/* Computing MAX */
+ d__1 = abs(*alpha), d__2 = abs(*beta);
+ orgnrm = max(d__1,d__2);
+ d__[*nl + 1] = 0.;
+ i__1 = n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = d__[i__], abs(d__1)) > orgnrm) {
+ orgnrm = (d__1 = d__[i__], abs(d__1));
+ }
+/* L10: */
+ }
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b7, &n, &c__1, &d__[1], &n, info);
+ *alpha /= orgnrm;
+ *beta /= orgnrm;
+
+/* Deflate singular values. */
+
+ dlasd2_(nl, nr, sqre, &k, &d__[1], &work[iz], alpha, beta, &u[u_offset],
+ ldu, &vt[vt_offset], ldvt, &work[isigma], &work[iu2], &ldu2, &
+ work[ivt2], &ldvt2, &iwork[idxp], &iwork[idx], &iwork[idxc], &
+ idxq[1], &iwork[coltyp], info);
+
+/* Solve Secular Equation and update singular vectors. */
+
+ ldq = k;
+ dlasd3_(nl, nr, sqre, &k, &d__[1], &work[iq], &ldq, &work[isigma], &u[
+ u_offset], ldu, &work[iu2], &ldu2, &vt[vt_offset], ldvt, &work[
+ ivt2], &ldvt2, &iwork[idxc], &iwork[coltyp], &work[iz], info);
+ if (*info != 0) {
+ return 0;
+ }
+
+/* Unscale. */
+
+ dlascl_("G", &c__0, &c__0, &c_b7, &orgnrm, &n, &c__1, &d__[1], &n, info);
+
+/* Prepare the IDXQ sorting permutation. */
+
+ n1 = k;
+ n2 = n - k;
+ dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &idxq[1]);
+
+ return 0;
+
+/* End of DLASD1 */
+
+} /* dlasd1_ */
diff --git a/contrib/libs/clapack/dlasd2.c b/contrib/libs/clapack/dlasd2.c
new file mode 100644
index 0000000000..441aa7dbc3
--- /dev/null
+++ b/contrib/libs/clapack/dlasd2.c
@@ -0,0 +1,609 @@
+/* dlasd2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b30 = 0.;
+
+/* Subroutine */ int dlasd2_(integer *nl, integer *nr, integer *sqre, integer
+ *k, doublereal *d__, doublereal *z__, doublereal *alpha, doublereal *
+ beta, doublereal *u, integer *ldu, doublereal *vt, integer *ldvt,
+ doublereal *dsigma, doublereal *u2, integer *ldu2, doublereal *vt2,
+ integer *ldvt2, integer *idxp, integer *idx, integer *idxc, integer *
+ idxq, integer *coltyp, integer *info)
+{
+ /* System generated locals */
+ integer u_dim1, u_offset, u2_dim1, u2_offset, vt_dim1, vt_offset,
+ vt2_dim1, vt2_offset, i__1;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ doublereal c__;
+ integer i__, j, m, n;
+ doublereal s;
+ integer k2;
+ doublereal z1;
+ integer ct, jp;
+ doublereal eps, tau, tol;
+ integer psm[4], nlp1, nlp2, idxi, idxj;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ integer ctot[4], idxjp;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ integer jprev;
+ extern doublereal dlapy2_(doublereal *, doublereal *), dlamch_(char *);
+ extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
+ integer *, integer *, integer *), dlacpy_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *), dlaset_(char *, integer *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *), xerbla_(char *,
+ integer *);
+ doublereal hlftol;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLASD2 merges the two sets of singular values together into a single */
+/* sorted set. Then it tries to deflate the size of the problem. */
+/* There are two ways in which deflation can occur: when two or more */
+/* singular values are close together or if there is a tiny entry in the */
+/* Z vector. For each such occurrence the order of the related secular */
+/* equation problem is reduced by one. */
+
+/* DLASD2 is called from DLASD1. */
+
+/* Arguments */
+/* ========= */
+
+/* NL (input) INTEGER */
+/* The row dimension of the upper block. NL >= 1. */
+
+/* NR (input) INTEGER */
+/* The row dimension of the lower block. NR >= 1. */
+
+/* SQRE (input) INTEGER */
+/* = 0: the lower block is an NR-by-NR square matrix. */
+/* = 1: the lower block is an NR-by-(NR+1) rectangular matrix. */
+
+/* The bidiagonal matrix has N = NL + NR + 1 rows and */
+/* M = N + SQRE >= N columns. */
+
+/* K (output) INTEGER */
+/* Contains the dimension of the non-deflated matrix, */
+/* This is the order of the related secular equation. 1 <= K <=N. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension(N) */
+/* On entry D contains the singular values of the two submatrices */
+/* to be combined. On exit D contains the trailing (N-K) updated */
+/* singular values (those which were deflated) sorted into */
+/* increasing order. */
+
+/* Z (output) DOUBLE PRECISION array, dimension(N) */
+/* On exit Z contains the updating row vector in the secular */
+/* equation. */
+
+/* ALPHA (input) DOUBLE PRECISION */
+/* Contains the diagonal element associated with the added row. */
+
+/* BETA (input) DOUBLE PRECISION */
+/* Contains the off-diagonal element associated with the added */
+/* row. */
+
+/* U (input/output) DOUBLE PRECISION array, dimension(LDU,N) */
+/* On entry U contains the left singular vectors of two */
+/* submatrices in the two square blocks with corners at (1,1), */
+/* (NL, NL), and (NL+2, NL+2), (N,N). */
+/* On exit U contains the trailing (N-K) updated left singular */
+/* vectors (those which were deflated) in its last N-K columns. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= N. */
+
+/* VT (input/output) DOUBLE PRECISION array, dimension(LDVT,M) */
+/* On entry VT' contains the right singular vectors of two */
+/* submatrices in the two square blocks with corners at (1,1), */
+/* (NL+1, NL+1), and (NL+2, NL+2), (M,M). */
+/* On exit VT' contains the trailing (N-K) updated right singular */
+/* vectors (those which were deflated) in its last N-K columns. */
+/* In case SQRE =1, the last row of VT spans the right null */
+/* space. */
+
+/* LDVT (input) INTEGER */
+/* The leading dimension of the array VT. LDVT >= M. */
+
+/* DSIGMA (output) DOUBLE PRECISION array, dimension (N) */
+/* Contains a copy of the diagonal elements (K-1 singular values */
+/* and one zero) in the secular equation. */
+
+/* U2 (output) DOUBLE PRECISION array, dimension(LDU2,N) */
+/* Contains a copy of the first K-1 left singular vectors which */
+/* will be used by DLASD3 in a matrix multiply (DGEMM) to solve */
+/* for the new left singular vectors. U2 is arranged into four */
+/* blocks. The first block contains a column with 1 at NL+1 and */
+/* zero everywhere else; the second block contains non-zero */
+/* entries only at and above NL; the third contains non-zero */
+/* entries only below NL+1; and the fourth is dense. */
+
+/* LDU2 (input) INTEGER */
+/* The leading dimension of the array U2. LDU2 >= N. */
+
+/* VT2 (output) DOUBLE PRECISION array, dimension(LDVT2,N) */
+/* VT2' contains a copy of the first K right singular vectors */
+/* which will be used by DLASD3 in a matrix multiply (DGEMM) to */
+/* solve for the new right singular vectors. VT2 is arranged into */
+/* three blocks. The first block contains a row that corresponds */
+/* to the special 0 diagonal element in SIGMA; the second block */
+/* contains non-zeros only at and before NL +1; the third block */
+/* contains non-zeros only at and after NL +2. */
+
+/* LDVT2 (input) INTEGER */
+/* The leading dimension of the array VT2. LDVT2 >= M. */
+
+/* IDXP (workspace) INTEGER array dimension(N) */
+/* This will contain the permutation used to place deflated */
+/* values of D at the end of the array. On output IDXP(2:K) */
+/* points to the nondeflated D-values and IDXP(K+1:N) */
+/* points to the deflated singular values. */
+
+/* IDX (workspace) INTEGER array dimension(N) */
+/* This will contain the permutation used to sort the contents of */
+/* D into ascending order. */
+
+/* IDXC (output) INTEGER array dimension(N) */
+/* This will contain the permutation used to arrange the columns */
+/* of the deflated U matrix into three groups: the first group */
+/* contains non-zero entries only at and above NL, the second */
+/* contains non-zero entries only below NL+2, and the third is */
+/* dense. */
+
+/* IDXQ (input/output) INTEGER array dimension(N) */
+/* This contains the permutation which separately sorts the two */
+/* sub-problems in D into ascending order. Note that entries in */
+/* the first hlaf of this permutation must first be moved one */
+/* position backward; and entries in the second half */
+/* must first have NL+1 added to their values. */
+
+/* COLTYP (workspace/output) INTEGER array dimension(N) */
+/* As workspace, this will contain a label which will indicate */
+/* which of the following types a column in the U2 matrix or a */
+/* row in the VT2 matrix is: */
+/* 1 : non-zero in the upper half only */
+/* 2 : non-zero in the lower half only */
+/* 3 : dense */
+/* 4 : deflated */
+
+/* On exit, it is an array of dimension 4, with COLTYP(I) being */
+/* the dimension of the I-th type columns. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --z__;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --dsigma;
+ u2_dim1 = *ldu2;
+ u2_offset = 1 + u2_dim1;
+ u2 -= u2_offset;
+ vt2_dim1 = *ldvt2;
+ vt2_offset = 1 + vt2_dim1;
+ vt2 -= vt2_offset;
+ --idxp;
+ --idx;
+ --idxc;
+ --idxq;
+ --coltyp;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*nl < 1) {
+ *info = -1;
+ } else if (*nr < 1) {
+ *info = -2;
+ } else if (*sqre != 1 && *sqre != 0) {
+ *info = -3;
+ }
+
+ n = *nl + *nr + 1;
+ m = n + *sqre;
+
+ if (*ldu < n) {
+ *info = -10;
+ } else if (*ldvt < m) {
+ *info = -12;
+ } else if (*ldu2 < n) {
+ *info = -15;
+ } else if (*ldvt2 < m) {
+ *info = -17;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASD2", &i__1);
+ return 0;
+ }
+
+ nlp1 = *nl + 1;
+ nlp2 = *nl + 2;
+
+/* Generate the first part of the vector Z; and move the singular */
+/* values in the first part of D one position backward. */
+
+ z1 = *alpha * vt[nlp1 + nlp1 * vt_dim1];
+ z__[1] = z1;
+ for (i__ = *nl; i__ >= 1; --i__) {
+ z__[i__ + 1] = *alpha * vt[i__ + nlp1 * vt_dim1];
+ d__[i__ + 1] = d__[i__];
+ idxq[i__ + 1] = idxq[i__] + 1;
+/* L10: */
+ }
+
+/* Generate the second part of the vector Z. */
+
+ i__1 = m;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ z__[i__] = *beta * vt[i__ + nlp2 * vt_dim1];
+/* L20: */
+ }
+
+/* Initialize some reference arrays. */
+
+ i__1 = nlp1;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ coltyp[i__] = 1;
+/* L30: */
+ }
+ i__1 = n;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ coltyp[i__] = 2;
+/* L40: */
+ }
+
+/* Sort the singular values into increasing order */
+
+ i__1 = n;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ idxq[i__] += nlp1;
+/* L50: */
+ }
+
+/* DSIGMA, IDXC, IDXC, and the first column of U2 */
+/* are used as storage space. */
+
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ dsigma[i__] = d__[idxq[i__]];
+ u2[i__ + u2_dim1] = z__[idxq[i__]];
+ idxc[i__] = coltyp[idxq[i__]];
+/* L60: */
+ }
+
+ dlamrg_(nl, nr, &dsigma[2], &c__1, &c__1, &idx[2]);
+
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ idxi = idx[i__] + 1;
+ d__[i__] = dsigma[idxi];
+ z__[i__] = u2[idxi + u2_dim1];
+ coltyp[i__] = idxc[idxi];
+/* L70: */
+ }
+
+/* Calculate the allowable deflation tolerance */
+
+ eps = dlamch_("Epsilon");
+/* Computing MAX */
+ d__1 = abs(*alpha), d__2 = abs(*beta);
+ tol = max(d__1,d__2);
+/* Computing MAX */
+ d__2 = (d__1 = d__[n], abs(d__1));
+ tol = eps * 8. * max(d__2,tol);
+
+/* There are 2 kinds of deflation -- first a value in the z-vector */
+/* is small, second two (or more) singular values are very close */
+/* together (their difference is small). */
+
+/* If the value in the z-vector is small, we simply permute the */
+/* array so that the corresponding singular value is moved to the */
+/* end. */
+
+/* If two values in the D-vector are close, we perform a two-sided */
+/* rotation designed to make one of the corresponding z-vector */
+/* entries zero, and then permute the array so that the deflated */
+/* singular value is moved to the end. */
+
+/* If there are multiple singular values then the problem deflates. */
+/* Here the number of equal singular values are found. As each equal */
+/* singular value is found, an elementary reflector is computed to */
+/* rotate the corresponding singular subspace so that the */
+/* corresponding components of Z are zero in this new basis. */
+
+ *k = 1;
+ k2 = n + 1;
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ if ((d__1 = z__[j], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ idxp[k2] = j;
+ coltyp[j] = 4;
+ if (j == n) {
+ goto L120;
+ }
+ } else {
+ jprev = j;
+ goto L90;
+ }
+/* L80: */
+ }
+L90:
+ j = jprev;
+L100:
+ ++j;
+ if (j > n) {
+ goto L110;
+ }
+ if ((d__1 = z__[j], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ idxp[k2] = j;
+ coltyp[j] = 4;
+ } else {
+
+/* Check if singular values are close enough to allow deflation. */
+
+ if ((d__1 = d__[j] - d__[jprev], abs(d__1)) <= tol) {
+
+/* Deflation is possible. */
+
+ s = z__[jprev];
+ c__ = z__[j];
+
+/* Find sqrt(a**2+b**2) without overflow or */
+/* destructive underflow. */
+
+ tau = dlapy2_(&c__, &s);
+ c__ /= tau;
+ s = -s / tau;
+ z__[j] = tau;
+ z__[jprev] = 0.;
+
+/* Apply back the Givens rotation to the left and right */
+/* singular vector matrices. */
+
+ idxjp = idxq[idx[jprev] + 1];
+ idxj = idxq[idx[j] + 1];
+ if (idxjp <= nlp1) {
+ --idxjp;
+ }
+ if (idxj <= nlp1) {
+ --idxj;
+ }
+ drot_(&n, &u[idxjp * u_dim1 + 1], &c__1, &u[idxj * u_dim1 + 1], &
+ c__1, &c__, &s);
+ drot_(&m, &vt[idxjp + vt_dim1], ldvt, &vt[idxj + vt_dim1], ldvt, &
+ c__, &s);
+ if (coltyp[j] != coltyp[jprev]) {
+ coltyp[j] = 3;
+ }
+ coltyp[jprev] = 4;
+ --k2;
+ idxp[k2] = jprev;
+ jprev = j;
+ } else {
+ ++(*k);
+ u2[*k + u2_dim1] = z__[jprev];
+ dsigma[*k] = d__[jprev];
+ idxp[*k] = jprev;
+ jprev = j;
+ }
+ }
+ goto L100;
+L110:
+
+/* Record the last singular value. */
+
+ ++(*k);
+ u2[*k + u2_dim1] = z__[jprev];
+ dsigma[*k] = d__[jprev];
+ idxp[*k] = jprev;
+
+L120:
+
+/* Count up the total number of the various types of columns, then */
+/* form a permutation which positions the four column types into */
+/* four groups of uniform structure (although one or more of these */
+/* groups may be empty). */
+
+ for (j = 1; j <= 4; ++j) {
+ ctot[j - 1] = 0;
+/* L130: */
+ }
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ ct = coltyp[j];
+ ++ctot[ct - 1];
+/* L140: */
+ }
+
+/* PSM(*) = Position in SubMatrix (of types 1 through 4) */
+
+ psm[0] = 2;
+ psm[1] = ctot[0] + 2;
+ psm[2] = psm[1] + ctot[1];
+ psm[3] = psm[2] + ctot[2];
+
+/* Fill out the IDXC array so that the permutation which it induces */
+/* will place all type-1 columns first, all type-2 columns next, */
+/* then all type-3's, and finally all type-4's, starting from the */
+/* second column. This applies similarly to the rows of VT. */
+
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ jp = idxp[j];
+ ct = coltyp[jp];
+ idxc[psm[ct - 1]] = j;
+ ++psm[ct - 1];
+/* L150: */
+ }
+
+/* Sort the singular values and corresponding singular vectors into */
+/* DSIGMA, U2, and VT2 respectively. The singular values/vectors */
+/* which were not deflated go into the first K slots of DSIGMA, U2, */
+/* and VT2 respectively, while those which were deflated go into the */
+/* last N - K slots, except that the first column/row will be treated */
+/* separately. */
+
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ jp = idxp[j];
+ dsigma[j] = d__[jp];
+ idxj = idxq[idx[idxp[idxc[j]]] + 1];
+ if (idxj <= nlp1) {
+ --idxj;
+ }
+ dcopy_(&n, &u[idxj * u_dim1 + 1], &c__1, &u2[j * u2_dim1 + 1], &c__1);
+ dcopy_(&m, &vt[idxj + vt_dim1], ldvt, &vt2[j + vt2_dim1], ldvt2);
+/* L160: */
+ }
+
+/* Determine DSIGMA(1), DSIGMA(2) and Z(1) */
+
+ dsigma[1] = 0.;
+ hlftol = tol / 2.;
+ if (abs(dsigma[2]) <= hlftol) {
+ dsigma[2] = hlftol;
+ }
+ if (m > n) {
+ z__[1] = dlapy2_(&z1, &z__[m]);
+ if (z__[1] <= tol) {
+ c__ = 1.;
+ s = 0.;
+ z__[1] = tol;
+ } else {
+ c__ = z1 / z__[1];
+ s = z__[m] / z__[1];
+ }
+ } else {
+ if (abs(z1) <= tol) {
+ z__[1] = tol;
+ } else {
+ z__[1] = z1;
+ }
+ }
+
+/* Move the rest of the updating row to Z. */
+
+ i__1 = *k - 1;
+ dcopy_(&i__1, &u2[u2_dim1 + 2], &c__1, &z__[2], &c__1);
+
+/* Determine the first column of U2, the first row of VT2 and the */
+/* last row of VT. */
+
+ dlaset_("A", &n, &c__1, &c_b30, &c_b30, &u2[u2_offset], ldu2);
+ u2[nlp1 + u2_dim1] = 1.;
+ if (m > n) {
+ i__1 = nlp1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ vt[m + i__ * vt_dim1] = -s * vt[nlp1 + i__ * vt_dim1];
+ vt2[i__ * vt2_dim1 + 1] = c__ * vt[nlp1 + i__ * vt_dim1];
+/* L170: */
+ }
+ i__1 = m;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ vt2[i__ * vt2_dim1 + 1] = s * vt[m + i__ * vt_dim1];
+ vt[m + i__ * vt_dim1] = c__ * vt[m + i__ * vt_dim1];
+/* L180: */
+ }
+ } else {
+ dcopy_(&m, &vt[nlp1 + vt_dim1], ldvt, &vt2[vt2_dim1 + 1], ldvt2);
+ }
+ if (m > n) {
+ dcopy_(&m, &vt[m + vt_dim1], ldvt, &vt2[m + vt2_dim1], ldvt2);
+ }
+
+/* The deflated singular values and their corresponding vectors go */
+/* into the back of D, U, and V respectively. */
+
+ if (n > *k) {
+ i__1 = n - *k;
+ dcopy_(&i__1, &dsigma[*k + 1], &c__1, &d__[*k + 1], &c__1);
+ i__1 = n - *k;
+ dlacpy_("A", &n, &i__1, &u2[(*k + 1) * u2_dim1 + 1], ldu2, &u[(*k + 1)
+ * u_dim1 + 1], ldu);
+ i__1 = n - *k;
+ dlacpy_("A", &i__1, &m, &vt2[*k + 1 + vt2_dim1], ldvt2, &vt[*k + 1 +
+ vt_dim1], ldvt);
+ }
+
+/* Copy CTOT into COLTYP for referencing in DLASD3. */
+
+ for (j = 1; j <= 4; ++j) {
+ coltyp[j] = ctot[j - 1];
+/* L190: */
+ }
+
+ return 0;
+
+/* End of DLASD2 */
+
+} /* dlasd2_ */
diff --git a/contrib/libs/clapack/dlasd3.c b/contrib/libs/clapack/dlasd3.c
new file mode 100644
index 0000000000..db8089b8ea
--- /dev/null
+++ b/contrib/libs/clapack/dlasd3.c
@@ -0,0 +1,452 @@
+/* dlasd3.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static doublereal c_b13 = 1.;
+static doublereal c_b26 = 0.;
+
+/* Subroutine */ int dlasd3_(integer *nl, integer *nr, integer *sqre, integer
+ *k, doublereal *d__, doublereal *q, integer *ldq, doublereal *dsigma,
+ doublereal *u, integer *ldu, doublereal *u2, integer *ldu2,
+ doublereal *vt, integer *ldvt, doublereal *vt2, integer *ldvt2,
+ integer *idxc, integer *ctot, doublereal *z__, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, u_dim1, u_offset, u2_dim1, u2_offset, vt_dim1,
+ vt_offset, vt2_dim1, vt2_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ integer i__, j, m, n, jc;
+ doublereal rho;
+ integer nlp1, nlp2, nrp1;
+ doublereal temp;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ integer ctemp;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ integer ktemp;
+ extern doublereal dlamc3_(doublereal *, doublereal *);
+ extern /* Subroutine */ int dlasd4_(integer *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *), dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dlacpy_(char *, integer *, integer
+ *, doublereal *, integer *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLASD3 finds all the square roots of the roots of the secular */
+/* equation, as defined by the values in D and Z. It makes the */
+/* appropriate calls to DLASD4 and then updates the singular */
+/* vectors by matrix multiplication. */
+
+/* This code makes very mild assumptions about floating point */
+/* arithmetic. It will work on machines with a guard digit in */
+/* add/subtract, or on those binary machines without guard digits */
+/* which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2. */
+/* It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* DLASD3 is called from DLASD1. */
+
+/* Arguments */
+/* ========= */
+
+/* NL (input) INTEGER */
+/* The row dimension of the upper block. NL >= 1. */
+
+/* NR (input) INTEGER */
+/* The row dimension of the lower block. NR >= 1. */
+
+/* SQRE (input) INTEGER */
+/* = 0: the lower block is an NR-by-NR square matrix. */
+/* = 1: the lower block is an NR-by-(NR+1) rectangular matrix. */
+
+/* The bidiagonal matrix has N = NL + NR + 1 rows and */
+/* M = N + SQRE >= N columns. */
+
+/* K (input) INTEGER */
+/* The size of the secular equation, 1 =< K = < N. */
+
+/* D (output) DOUBLE PRECISION array, dimension(K) */
+/* On exit the square roots of the roots of the secular equation, */
+/* in ascending order. */
+
+/* Q (workspace) DOUBLE PRECISION array, */
+/* dimension at least (LDQ,K). */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= K. */
+
+/* DSIGMA (input) DOUBLE PRECISION array, dimension(K) */
+/* The first K elements of this array contain the old roots */
+/* of the deflated updating problem. These are the poles */
+/* of the secular equation. */
+
+/* U (output) DOUBLE PRECISION array, dimension (LDU, N) */
+/* The last N - K columns of this matrix contain the deflated */
+/* left singular vectors. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= N. */
+
+/* U2 (input/output) DOUBLE PRECISION array, dimension (LDU2, N) */
+/* The first K columns of this matrix contain the non-deflated */
+/* left singular vectors for the split problem. */
+
+/* LDU2 (input) INTEGER */
+/* The leading dimension of the array U2. LDU2 >= N. */
+
+/* VT (output) DOUBLE PRECISION array, dimension (LDVT, M) */
+/* The last M - K columns of VT' contain the deflated */
+/* right singular vectors. */
+
+/* LDVT (input) INTEGER */
+/* The leading dimension of the array VT. LDVT >= N. */
+
+/* VT2 (input/output) DOUBLE PRECISION array, dimension (LDVT2, N) */
+/* The first K columns of VT2' contain the non-deflated */
+/* right singular vectors for the split problem. */
+
+/* LDVT2 (input) INTEGER */
+/* The leading dimension of the array VT2. LDVT2 >= N. */
+
+/* IDXC (input) INTEGER array, dimension ( N ) */
+/* The permutation used to arrange the columns of U (and rows of */
+/* VT) into three groups: the first group contains non-zero */
+/* entries only at and above (or before) NL +1; the second */
+/* contains non-zero entries only at and below (or after) NL+2; */
+/* and the third is dense. The first column of U and the row of */
+/* VT are treated separately, however. */
+
+/* The rows of the singular vectors found by DLASD4 */
+/* must be likewise permuted before the matrix multiplies can */
+/* take place. */
+
+/* CTOT (input) INTEGER array, dimension ( 4 ) */
+/* A count of the total number of the various types of columns */
+/* in U (or rows in VT), as described in IDXC. The fourth column */
+/* type is any column which has been deflated. */
+
+/* Z (input) DOUBLE PRECISION array, dimension (K) */
+/* The first K elements of this array contain the components */
+/* of the deflation-adjusted updating row vector. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, an singular value did not converge */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --dsigma;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ u2_dim1 = *ldu2;
+ u2_offset = 1 + u2_dim1;
+ u2 -= u2_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ vt2_dim1 = *ldvt2;
+ vt2_offset = 1 + vt2_dim1;
+ vt2 -= vt2_offset;
+ --idxc;
+ --ctot;
+ --z__;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*nl < 1) {
+ *info = -1;
+ } else if (*nr < 1) {
+ *info = -2;
+ } else if (*sqre != 1 && *sqre != 0) {
+ *info = -3;
+ }
+
+ n = *nl + *nr + 1;
+ m = n + *sqre;
+ nlp1 = *nl + 1;
+ nlp2 = *nl + 2;
+
+ if (*k < 1 || *k > n) {
+ *info = -4;
+ } else if (*ldq < *k) {
+ *info = -7;
+ } else if (*ldu < n) {
+ *info = -10;
+ } else if (*ldu2 < n) {
+ *info = -12;
+ } else if (*ldvt < m) {
+ *info = -14;
+ } else if (*ldvt2 < m) {
+ *info = -16;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASD3", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*k == 1) {
+ d__[1] = abs(z__[1]);
+ dcopy_(&m, &vt2[vt2_dim1 + 1], ldvt2, &vt[vt_dim1 + 1], ldvt);
+ if (z__[1] > 0.) {
+ dcopy_(&n, &u2[u2_dim1 + 1], &c__1, &u[u_dim1 + 1], &c__1);
+ } else {
+ i__1 = n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ u[i__ + u_dim1] = -u2[i__ + u2_dim1];
+/* L10: */
+ }
+ }
+ return 0;
+ }
+
+/* Modify values DSIGMA(i) to make sure all DSIGMA(i)-DSIGMA(j) can */
+/* be computed with high relative accuracy (barring over/underflow). */
+/* This is a problem on machines without a guard digit in */
+/* add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2). */
+/* The following code replaces DSIGMA(I) by 2*DSIGMA(I)-DSIGMA(I), */
+/* which on any of these machines zeros out the bottommost */
+/* bit of DSIGMA(I) if it is 1; this makes the subsequent */
+/* subtractions DSIGMA(I)-DSIGMA(J) unproblematic when cancellation */
+/* occurs. On binary machines with a guard digit (almost all */
+/* machines) it does not change DSIGMA(I) at all. On hexadecimal */
+/* and decimal machines with a guard digit, it slightly */
+/* changes the bottommost bits of DSIGMA(I). It does not account */
+/* for hexadecimal or decimal machines without guard digits */
+/* (we know of none). We use a subroutine call to compute */
+/* 2*DSIGMA(I) to prevent optimizing compilers from eliminating */
+/* this code. */
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dsigma[i__] = dlamc3_(&dsigma[i__], &dsigma[i__]) - dsigma[i__];
+/* L20: */
+ }
+
+/* Keep a copy of Z. */
+
+ dcopy_(k, &z__[1], &c__1, &q[q_offset], &c__1);
+
+/* Normalize Z. */
+
+ rho = dnrm2_(k, &z__[1], &c__1);
+ dlascl_("G", &c__0, &c__0, &rho, &c_b13, k, &c__1, &z__[1], k, info);
+ rho *= rho;
+
+/* Find the new singular values. */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dlasd4_(k, &j, &dsigma[1], &z__[1], &u[j * u_dim1 + 1], &rho, &d__[j],
+ &vt[j * vt_dim1 + 1], info);
+
+/* If the zero finder fails, the computation is terminated. */
+
+ if (*info != 0) {
+ return 0;
+ }
+/* L30: */
+ }
+
+/* Compute updated Z. */
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ z__[i__] = u[i__ + *k * u_dim1] * vt[i__ + *k * vt_dim1];
+ i__2 = i__ - 1;
+ for (j = 1; j <= i__2; ++j) {
+ z__[i__] *= u[i__ + j * u_dim1] * vt[i__ + j * vt_dim1] / (dsigma[
+ i__] - dsigma[j]) / (dsigma[i__] + dsigma[j]);
+/* L40: */
+ }
+ i__2 = *k - 1;
+ for (j = i__; j <= i__2; ++j) {
+ z__[i__] *= u[i__ + j * u_dim1] * vt[i__ + j * vt_dim1] / (dsigma[
+ i__] - dsigma[j + 1]) / (dsigma[i__] + dsigma[j + 1]);
+/* L50: */
+ }
+ d__2 = sqrt((d__1 = z__[i__], abs(d__1)));
+ z__[i__] = d_sign(&d__2, &q[i__ + q_dim1]);
+/* L60: */
+ }
+
+/* Compute left singular vectors of the modified diagonal matrix, */
+/* and store related information for the right singular vectors. */
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ vt[i__ * vt_dim1 + 1] = z__[1] / u[i__ * u_dim1 + 1] / vt[i__ *
+ vt_dim1 + 1];
+ u[i__ * u_dim1 + 1] = -1.;
+ i__2 = *k;
+ for (j = 2; j <= i__2; ++j) {
+ vt[j + i__ * vt_dim1] = z__[j] / u[j + i__ * u_dim1] / vt[j + i__
+ * vt_dim1];
+ u[j + i__ * u_dim1] = dsigma[j] * vt[j + i__ * vt_dim1];
+/* L70: */
+ }
+ temp = dnrm2_(k, &u[i__ * u_dim1 + 1], &c__1);
+ q[i__ * q_dim1 + 1] = u[i__ * u_dim1 + 1] / temp;
+ i__2 = *k;
+ for (j = 2; j <= i__2; ++j) {
+ jc = idxc[j];
+ q[j + i__ * q_dim1] = u[jc + i__ * u_dim1] / temp;
+/* L80: */
+ }
+/* L90: */
+ }
+
+/* Update the left singular vector matrix. */
+
+ if (*k == 2) {
+ dgemm_("N", "N", &n, k, k, &c_b13, &u2[u2_offset], ldu2, &q[q_offset],
+ ldq, &c_b26, &u[u_offset], ldu);
+ goto L100;
+ }
+ if (ctot[1] > 0) {
+ dgemm_("N", "N", nl, k, &ctot[1], &c_b13, &u2[(u2_dim1 << 1) + 1],
+ ldu2, &q[q_dim1 + 2], ldq, &c_b26, &u[u_dim1 + 1], ldu);
+ if (ctot[3] > 0) {
+ ktemp = ctot[1] + 2 + ctot[2];
+ dgemm_("N", "N", nl, k, &ctot[3], &c_b13, &u2[ktemp * u2_dim1 + 1]
+, ldu2, &q[ktemp + q_dim1], ldq, &c_b13, &u[u_dim1 + 1],
+ ldu);
+ }
+ } else if (ctot[3] > 0) {
+ ktemp = ctot[1] + 2 + ctot[2];
+ dgemm_("N", "N", nl, k, &ctot[3], &c_b13, &u2[ktemp * u2_dim1 + 1],
+ ldu2, &q[ktemp + q_dim1], ldq, &c_b26, &u[u_dim1 + 1], ldu);
+ } else {
+ dlacpy_("F", nl, k, &u2[u2_offset], ldu2, &u[u_offset], ldu);
+ }
+ dcopy_(k, &q[q_dim1 + 1], ldq, &u[nlp1 + u_dim1], ldu);
+ ktemp = ctot[1] + 2;
+ ctemp = ctot[2] + ctot[3];
+ dgemm_("N", "N", nr, k, &ctemp, &c_b13, &u2[nlp2 + ktemp * u2_dim1], ldu2,
+ &q[ktemp + q_dim1], ldq, &c_b26, &u[nlp2 + u_dim1], ldu);
+
+/* Generate the right singular vectors. */
+
+L100:
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = dnrm2_(k, &vt[i__ * vt_dim1 + 1], &c__1);
+ q[i__ + q_dim1] = vt[i__ * vt_dim1 + 1] / temp;
+ i__2 = *k;
+ for (j = 2; j <= i__2; ++j) {
+ jc = idxc[j];
+ q[i__ + j * q_dim1] = vt[jc + i__ * vt_dim1] / temp;
+/* L110: */
+ }
+/* L120: */
+ }
+
+/* Update the right singular vector matrix. */
+
+ if (*k == 2) {
+ dgemm_("N", "N", k, &m, k, &c_b13, &q[q_offset], ldq, &vt2[vt2_offset]
+, ldvt2, &c_b26, &vt[vt_offset], ldvt);
+ return 0;
+ }
+ ktemp = ctot[1] + 1;
+ dgemm_("N", "N", k, &nlp1, &ktemp, &c_b13, &q[q_dim1 + 1], ldq, &vt2[
+ vt2_dim1 + 1], ldvt2, &c_b26, &vt[vt_dim1 + 1], ldvt);
+ ktemp = ctot[1] + 2 + ctot[2];
+ if (ktemp <= *ldvt2) {
+ dgemm_("N", "N", k, &nlp1, &ctot[3], &c_b13, &q[ktemp * q_dim1 + 1],
+ ldq, &vt2[ktemp + vt2_dim1], ldvt2, &c_b13, &vt[vt_dim1 + 1],
+ ldvt);
+ }
+
+ ktemp = ctot[1] + 1;
+ nrp1 = *nr + *sqre;
+ if (ktemp > 1) {
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ q[i__ + ktemp * q_dim1] = q[i__ + q_dim1];
+/* L130: */
+ }
+ i__1 = m;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ vt2[ktemp + i__ * vt2_dim1] = vt2[i__ * vt2_dim1 + 1];
+/* L140: */
+ }
+ }
+ ctemp = ctot[2] + 1 + ctot[3];
+ dgemm_("N", "N", k, &nrp1, &ctemp, &c_b13, &q[ktemp * q_dim1 + 1], ldq, &
+ vt2[ktemp + nlp2 * vt2_dim1], ldvt2, &c_b26, &vt[nlp2 * vt_dim1 +
+ 1], ldvt);
+
+ return 0;
+
+/* End of DLASD3 */
+
+} /* dlasd3_ */
diff --git a/contrib/libs/clapack/dlasd4.c b/contrib/libs/clapack/dlasd4.c
new file mode 100644
index 0000000000..54455ed26a
--- /dev/null
+++ b/contrib/libs/clapack/dlasd4.c
@@ -0,0 +1,1010 @@
+/* dlasd4.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlasd4_(integer *n, integer *i__, doublereal *d__,
+ doublereal *z__, doublereal *delta, doublereal *rho, doublereal *
+ sigma, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal a, b, c__;
+ integer j;
+ doublereal w, dd[3];
+ integer ii;
+ doublereal dw, zz[3];
+ integer ip1;
+ doublereal eta, phi, eps, tau, psi;
+ integer iim1, iip1;
+ doublereal dphi, dpsi;
+ integer iter;
+ doublereal temp, prew, sg2lb, sg2ub, temp1, temp2, dtiim, delsq, dtiip;
+ integer niter;
+ doublereal dtisq;
+ logical swtch;
+ doublereal dtnsq;
+ extern /* Subroutine */ int dlaed6_(integer *, logical *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *)
+ , dlasd5_(integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *);
+ doublereal delsq2, dtnsq1;
+ logical swtch3;
+ extern doublereal dlamch_(char *);
+ logical orgati;
+ doublereal erretm, dtipsq, rhoinv;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This subroutine computes the square root of the I-th updated */
+/* eigenvalue of a positive symmetric rank-one modification to */
+/* a positive diagonal matrix whose entries are given as the squares */
+/* of the corresponding entries in the array d, and that */
+
+/* 0 <= D(i) < D(j) for i < j */
+
+/* and that RHO > 0. This is arranged by the calling routine, and is */
+/* no loss in generality. The rank-one modified system is thus */
+
+/* diag( D ) * diag( D ) + RHO * Z * Z_transpose. */
+
+/* where we assume the Euclidean norm of Z is 1. */
+
+/* The method consists of approximating the rational functions in the */
+/* secular equation by simpler interpolating rational functions. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The length of all arrays. */
+
+/* I (input) INTEGER */
+/* The index of the eigenvalue to be computed. 1 <= I <= N. */
+
+/* D (input) DOUBLE PRECISION array, dimension ( N ) */
+/* The original eigenvalues. It is assumed that they are in */
+/* order, 0 <= D(I) < D(J) for I < J. */
+
+/* Z (input) DOUBLE PRECISION array, dimension ( N ) */
+/* The components of the updating vector. */
+
+/* DELTA (output) DOUBLE PRECISION array, dimension ( N ) */
+/* If N .ne. 1, DELTA contains (D(j) - sigma_I) in its j-th */
+/* component. If N = 1, then DELTA(1) = 1. The vector DELTA */
+/* contains the information necessary to construct the */
+/* (singular) eigenvectors. */
+
+/* RHO (input) DOUBLE PRECISION */
+/* The scalar in the symmetric updating formula. */
+
+/* SIGMA (output) DOUBLE PRECISION */
+/* The computed sigma_I, the I-th updated eigenvalue. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension ( N ) */
+/* If N .ne. 1, WORK contains (D(j) + sigma_I) in its j-th */
+/* component. If N = 1, then WORK( 1 ) = 1. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* > 0: if INFO = 1, the updating process failed. */
+
+/* Internal Parameters */
+/* =================== */
+
+/* Logical variable ORGATI (origin-at-i?) is used for distinguishing */
+/* whether D(i) or D(i+1) is treated as the origin. */
+
+/* ORGATI = .true. origin at i */
+/* ORGATI = .false. origin at i+1 */
+
+/* Logical variable SWTCH3 (switch-for-3-poles?) is for noting */
+/* if we are working with THREE poles! */
+
+/* MAXIT is the maximum number of iterations allowed for each */
+/* eigenvalue. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ren-Cang Li, Computer Science Division, University of California */
+/* at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Since this routine is called in an inner loop, we do no argument */
+/* checking. */
+
+/* Quick return for N=1 and 2. */
+
+ /* Parameter adjustments */
+ --work;
+ --delta;
+ --z__;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ if (*n == 1) {
+
+/* Presumably, I=1 upon entry */
+
+ *sigma = sqrt(d__[1] * d__[1] + *rho * z__[1] * z__[1]);
+ delta[1] = 1.;
+ work[1] = 1.;
+ return 0;
+ }
+ if (*n == 2) {
+ dlasd5_(i__, &d__[1], &z__[1], &delta[1], rho, sigma, &work[1]);
+ return 0;
+ }
+
+/* Compute machine epsilon */
+
+ eps = dlamch_("Epsilon");
+ rhoinv = 1. / *rho;
+
+/* The case I = N */
+
+ if (*i__ == *n) {
+
+/* Initialize some basic variables */
+
+ ii = *n - 1;
+ niter = 1;
+
+/* Calculate initial guess */
+
+ temp = *rho / 2.;
+
+/* If ||Z||_2 is not one, then TEMP should be set to */
+/* RHO * ||Z||_2^2 / TWO */
+
+ temp1 = temp / (d__[*n] + sqrt(d__[*n] * d__[*n] + temp));
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] = d__[j] + d__[*n] + temp1;
+ delta[j] = d__[j] - d__[*n] - temp1;
+/* L10: */
+ }
+
+ psi = 0.;
+ i__1 = *n - 2;
+ for (j = 1; j <= i__1; ++j) {
+ psi += z__[j] * z__[j] / (delta[j] * work[j]);
+/* L20: */
+ }
+
+ c__ = rhoinv + psi;
+ w = c__ + z__[ii] * z__[ii] / (delta[ii] * work[ii]) + z__[*n] * z__[*
+ n] / (delta[*n] * work[*n]);
+
+ if (w <= 0.) {
+ temp1 = sqrt(d__[*n] * d__[*n] + *rho);
+ temp = z__[*n - 1] * z__[*n - 1] / ((d__[*n - 1] + temp1) * (d__[*
+ n] - d__[*n - 1] + *rho / (d__[*n] + temp1))) + z__[*n] *
+ z__[*n] / *rho;
+
+/* The following TAU is to approximate */
+/* SIGMA_n^2 - D( N )*D( N ) */
+
+ if (c__ <= temp) {
+ tau = *rho;
+ } else {
+ delsq = (d__[*n] - d__[*n - 1]) * (d__[*n] + d__[*n - 1]);
+ a = -c__ * delsq + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*
+ n];
+ b = z__[*n] * z__[*n] * delsq;
+ if (a < 0.) {
+ tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a);
+ } else {
+ tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.);
+ }
+ }
+
+/* It can be proved that */
+/* D(N)^2+RHO/2 <= SIGMA_n^2 < D(N)^2+TAU <= D(N)^2+RHO */
+
+ } else {
+ delsq = (d__[*n] - d__[*n - 1]) * (d__[*n] + d__[*n - 1]);
+ a = -c__ * delsq + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n];
+ b = z__[*n] * z__[*n] * delsq;
+
+/* The following TAU is to approximate */
+/* SIGMA_n^2 - D( N )*D( N ) */
+
+ if (a < 0.) {
+ tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a);
+ } else {
+ tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.);
+ }
+
+/* It can be proved that */
+/* D(N)^2 < D(N)^2+TAU < SIGMA(N)^2 < D(N)^2+RHO/2 */
+
+ }
+
+/* The following ETA is to approximate SIGMA_n - D( N ) */
+
+ eta = tau / (d__[*n] + sqrt(d__[*n] * d__[*n] + tau));
+
+ *sigma = d__[*n] + eta;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[*i__] - eta;
+ work[j] = d__[j] + d__[*i__] + eta;
+/* L30: */
+ }
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (delta[j] * work[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L40: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / (delta[*n] * work[*n]);
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi
+ + dphi);
+
+ w = rhoinv + phi + psi;
+
+/* Test for convergence */
+
+ if (abs(w) <= eps * erretm) {
+ goto L240;
+ }
+
+/* Calculate the new step */
+
+ ++niter;
+ dtnsq1 = work[*n - 1] * delta[*n - 1];
+ dtnsq = work[*n] * delta[*n];
+ c__ = w - dtnsq1 * dpsi - dtnsq * dphi;
+ a = (dtnsq + dtnsq1) * w - dtnsq * dtnsq1 * (dpsi + dphi);
+ b = dtnsq * dtnsq1 * w;
+ if (c__ < 0.) {
+ c__ = abs(c__);
+ }
+ if (c__ == 0.) {
+ eta = *rho - *sigma * *sigma;
+ } else if (a >= 0.) {
+ eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__
+ * 2.);
+ } else {
+ eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))
+ );
+ }
+
+/* Note, eta should be positive if w is negative, and */
+/* eta should be negative otherwise. However, */
+/* if for some reason caused by roundoff, eta*w > 0, */
+/* we simply use one Newton step instead. This way */
+/* will guarantee eta*w < 0. */
+
+ if (w * eta > 0.) {
+ eta = -w / (dpsi + dphi);
+ }
+ temp = eta - dtnsq;
+ if (temp > *rho) {
+ eta = *rho + dtnsq;
+ }
+
+ tau += eta;
+ eta /= *sigma + sqrt(eta + *sigma * *sigma);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+ work[j] += eta;
+/* L50: */
+ }
+
+ *sigma += eta;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L60: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / (work[*n] * delta[*n]);
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi
+ + dphi);
+
+ w = rhoinv + phi + psi;
+
+/* Main loop to update the values of the array DELTA */
+
+ iter = niter + 1;
+
+ for (niter = iter; niter <= 20; ++niter) {
+
+/* Test for convergence */
+
+ if (abs(w) <= eps * erretm) {
+ goto L240;
+ }
+
+/* Calculate the new step */
+
+ dtnsq1 = work[*n - 1] * delta[*n - 1];
+ dtnsq = work[*n] * delta[*n];
+ c__ = w - dtnsq1 * dpsi - dtnsq * dphi;
+ a = (dtnsq + dtnsq1) * w - dtnsq1 * dtnsq * (dpsi + dphi);
+ b = dtnsq1 * dtnsq * w;
+ if (a >= 0.) {
+ eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
+ c__ * 2.);
+ } else {
+ eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs(
+ d__1))));
+ }
+
+/* Note, eta should be positive if w is negative, and */
+/* eta should be negative otherwise. However, */
+/* if for some reason caused by roundoff, eta*w > 0, */
+/* we simply use one Newton step instead. This way */
+/* will guarantee eta*w < 0. */
+
+ if (w * eta > 0.) {
+ eta = -w / (dpsi + dphi);
+ }
+ temp = eta - dtnsq;
+ if (temp <= 0.) {
+ eta /= 2.;
+ }
+
+ tau += eta;
+ eta /= *sigma + sqrt(eta + *sigma * *sigma);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+ work[j] += eta;
+/* L70: */
+ }
+
+ *sigma += eta;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L80: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / (work[*n] * delta[*n]);
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (
+ dpsi + dphi);
+
+ w = rhoinv + phi + psi;
+/* L90: */
+ }
+
+/* Return with INFO = 1, NITER = MAXIT and not converged */
+
+ *info = 1;
+ goto L240;
+
+/* End for the case I = N */
+
+ } else {
+
+/* The case for I < N */
+
+ niter = 1;
+ ip1 = *i__ + 1;
+
+/* Calculate initial guess */
+
+ delsq = (d__[ip1] - d__[*i__]) * (d__[ip1] + d__[*i__]);
+ delsq2 = delsq / 2.;
+ temp = delsq2 / (d__[*i__] + sqrt(d__[*i__] * d__[*i__] + delsq2));
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] = d__[j] + d__[*i__] + temp;
+ delta[j] = d__[j] - d__[*i__] - temp;
+/* L100: */
+ }
+
+ psi = 0.;
+ i__1 = *i__ - 1;
+ for (j = 1; j <= i__1; ++j) {
+ psi += z__[j] * z__[j] / (work[j] * delta[j]);
+/* L110: */
+ }
+
+ phi = 0.;
+ i__1 = *i__ + 2;
+ for (j = *n; j >= i__1; --j) {
+ phi += z__[j] * z__[j] / (work[j] * delta[j]);
+/* L120: */
+ }
+ c__ = rhoinv + psi + phi;
+ w = c__ + z__[*i__] * z__[*i__] / (work[*i__] * delta[*i__]) + z__[
+ ip1] * z__[ip1] / (work[ip1] * delta[ip1]);
+
+ if (w > 0.) {
+
+/* d(i)^2 < the ith sigma^2 < (d(i)^2+d(i+1)^2)/2 */
+
+/* We choose d(i) as origin. */
+
+ orgati = TRUE_;
+ sg2lb = 0.;
+ sg2ub = delsq2;
+ a = c__ * delsq + z__[*i__] * z__[*i__] + z__[ip1] * z__[ip1];
+ b = z__[*i__] * z__[*i__] * delsq;
+ if (a > 0.) {
+ tau = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(
+ d__1))));
+ } else {
+ tau = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
+ c__ * 2.);
+ }
+
+/* TAU now is an estimation of SIGMA^2 - D( I )^2. The */
+/* following, however, is the corresponding estimation of */
+/* SIGMA - D( I ). */
+
+ eta = tau / (d__[*i__] + sqrt(d__[*i__] * d__[*i__] + tau));
+ } else {
+
+/* (d(i)^2+d(i+1)^2)/2 <= the ith sigma^2 < d(i+1)^2/2 */
+
+/* We choose d(i+1) as origin. */
+
+ orgati = FALSE_;
+ sg2lb = -delsq2;
+ sg2ub = 0.;
+ a = c__ * delsq - z__[*i__] * z__[*i__] - z__[ip1] * z__[ip1];
+ b = z__[ip1] * z__[ip1] * delsq;
+ if (a < 0.) {
+ tau = b * 2. / (a - sqrt((d__1 = a * a + b * 4. * c__, abs(
+ d__1))));
+ } else {
+ tau = -(a + sqrt((d__1 = a * a + b * 4. * c__, abs(d__1)))) /
+ (c__ * 2.);
+ }
+
+/* TAU now is an estimation of SIGMA^2 - D( IP1 )^2. The */
+/* following, however, is the corresponding estimation of */
+/* SIGMA - D( IP1 ). */
+
+ eta = tau / (d__[ip1] + sqrt((d__1 = d__[ip1] * d__[ip1] + tau,
+ abs(d__1))));
+ }
+
+ if (orgati) {
+ ii = *i__;
+ *sigma = d__[*i__] + eta;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] = d__[j] + d__[*i__] + eta;
+ delta[j] = d__[j] - d__[*i__] - eta;
+/* L130: */
+ }
+ } else {
+ ii = *i__ + 1;
+ *sigma = d__[ip1] + eta;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] = d__[j] + d__[ip1] + eta;
+ delta[j] = d__[j] - d__[ip1] - eta;
+/* L140: */
+ }
+ }
+ iim1 = ii - 1;
+ iip1 = ii + 1;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L150: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.;
+ phi = 0.;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L160: */
+ }
+
+ w = rhoinv + phi + psi;
+
+/* W is the value of the secular function with */
+/* its ii-th element removed. */
+
+ swtch3 = FALSE_;
+ if (orgati) {
+ if (w < 0.) {
+ swtch3 = TRUE_;
+ }
+ } else {
+ if (w > 0.) {
+ swtch3 = TRUE_;
+ }
+ }
+ if (ii == 1 || ii == *n) {
+ swtch3 = FALSE_;
+ }
+
+ temp = z__[ii] / (work[ii] * delta[ii]);
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w += temp;
+ erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. +
+ abs(tau) * dw;
+
+/* Test for convergence */
+
+ if (abs(w) <= eps * erretm) {
+ goto L240;
+ }
+
+ if (w <= 0.) {
+ sg2lb = max(sg2lb,tau);
+ } else {
+ sg2ub = min(sg2ub,tau);
+ }
+
+/* Calculate the new step */
+
+ ++niter;
+ if (! swtch3) {
+ dtipsq = work[ip1] * delta[ip1];
+ dtisq = work[*i__] * delta[*i__];
+ if (orgati) {
+/* Computing 2nd power */
+ d__1 = z__[*i__] / dtisq;
+ c__ = w - dtipsq * dw + delsq * (d__1 * d__1);
+ } else {
+/* Computing 2nd power */
+ d__1 = z__[ip1] / dtipsq;
+ c__ = w - dtisq * dw - delsq * (d__1 * d__1);
+ }
+ a = (dtipsq + dtisq) * w - dtipsq * dtisq * dw;
+ b = dtipsq * dtisq * w;
+ if (c__ == 0.) {
+ if (a == 0.) {
+ if (orgati) {
+ a = z__[*i__] * z__[*i__] + dtipsq * dtipsq * (dpsi +
+ dphi);
+ } else {
+ a = z__[ip1] * z__[ip1] + dtisq * dtisq * (dpsi +
+ dphi);
+ }
+ }
+ eta = b / a;
+ } else if (a <= 0.) {
+ eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
+ c__ * 2.);
+ } else {
+ eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(
+ d__1))));
+ }
+ } else {
+
+/* Interpolation using THREE most relevant poles */
+
+ dtiim = work[iim1] * delta[iim1];
+ dtiip = work[iip1] * delta[iip1];
+ temp = rhoinv + psi + phi;
+ if (orgati) {
+ temp1 = z__[iim1] / dtiim;
+ temp1 *= temp1;
+ c__ = temp - dtiip * (dpsi + dphi) - (d__[iim1] - d__[iip1]) *
+ (d__[iim1] + d__[iip1]) * temp1;
+ zz[0] = z__[iim1] * z__[iim1];
+ if (dpsi < temp1) {
+ zz[2] = dtiip * dtiip * dphi;
+ } else {
+ zz[2] = dtiip * dtiip * (dpsi - temp1 + dphi);
+ }
+ } else {
+ temp1 = z__[iip1] / dtiip;
+ temp1 *= temp1;
+ c__ = temp - dtiim * (dpsi + dphi) - (d__[iip1] - d__[iim1]) *
+ (d__[iim1] + d__[iip1]) * temp1;
+ if (dphi < temp1) {
+ zz[0] = dtiim * dtiim * dpsi;
+ } else {
+ zz[0] = dtiim * dtiim * (dpsi + (dphi - temp1));
+ }
+ zz[2] = z__[iip1] * z__[iip1];
+ }
+ zz[1] = z__[ii] * z__[ii];
+ dd[0] = dtiim;
+ dd[1] = delta[ii] * work[ii];
+ dd[2] = dtiip;
+ dlaed6_(&niter, &orgati, &c__, dd, zz, &w, &eta, info);
+ if (*info != 0) {
+ goto L240;
+ }
+ }
+
+/* Note, eta should be positive if w is negative, and */
+/* eta should be negative otherwise. However, */
+/* if for some reason caused by roundoff, eta*w > 0, */
+/* we simply use one Newton step instead. This way */
+/* will guarantee eta*w < 0. */
+
+ if (w * eta >= 0.) {
+ eta = -w / dw;
+ }
+ if (orgati) {
+ temp1 = work[*i__] * delta[*i__];
+ temp = eta - temp1;
+ } else {
+ temp1 = work[ip1] * delta[ip1];
+ temp = eta - temp1;
+ }
+ if (temp > sg2ub || temp < sg2lb) {
+ if (w < 0.) {
+ eta = (sg2ub - tau) / 2.;
+ } else {
+ eta = (sg2lb - tau) / 2.;
+ }
+ }
+
+ tau += eta;
+ eta /= *sigma + sqrt(*sigma * *sigma + eta);
+
+ prew = w;
+
+ *sigma += eta;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] += eta;
+ delta[j] -= eta;
+/* L170: */
+ }
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L180: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.;
+ phi = 0.;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L190: */
+ }
+
+ temp = z__[ii] / (work[ii] * delta[ii]);
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w = rhoinv + phi + psi + temp;
+ erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. +
+ abs(tau) * dw;
+
+ if (w <= 0.) {
+ sg2lb = max(sg2lb,tau);
+ } else {
+ sg2ub = min(sg2ub,tau);
+ }
+
+ swtch = FALSE_;
+ if (orgati) {
+ if (-w > abs(prew) / 10.) {
+ swtch = TRUE_;
+ }
+ } else {
+ if (w > abs(prew) / 10.) {
+ swtch = TRUE_;
+ }
+ }
+
+/* Main loop to update the values of the array DELTA and WORK */
+
+ iter = niter + 1;
+
+ for (niter = iter; niter <= 20; ++niter) {
+
+/* Test for convergence */
+
+ if (abs(w) <= eps * erretm) {
+ goto L240;
+ }
+
+/* Calculate the new step */
+
+ if (! swtch3) {
+ dtipsq = work[ip1] * delta[ip1];
+ dtisq = work[*i__] * delta[*i__];
+ if (! swtch) {
+ if (orgati) {
+/* Computing 2nd power */
+ d__1 = z__[*i__] / dtisq;
+ c__ = w - dtipsq * dw + delsq * (d__1 * d__1);
+ } else {
+/* Computing 2nd power */
+ d__1 = z__[ip1] / dtipsq;
+ c__ = w - dtisq * dw - delsq * (d__1 * d__1);
+ }
+ } else {
+ temp = z__[ii] / (work[ii] * delta[ii]);
+ if (orgati) {
+ dpsi += temp * temp;
+ } else {
+ dphi += temp * temp;
+ }
+ c__ = w - dtisq * dpsi - dtipsq * dphi;
+ }
+ a = (dtipsq + dtisq) * w - dtipsq * dtisq * dw;
+ b = dtipsq * dtisq * w;
+ if (c__ == 0.) {
+ if (a == 0.) {
+ if (! swtch) {
+ if (orgati) {
+ a = z__[*i__] * z__[*i__] + dtipsq * dtipsq *
+ (dpsi + dphi);
+ } else {
+ a = z__[ip1] * z__[ip1] + dtisq * dtisq * (
+ dpsi + dphi);
+ }
+ } else {
+ a = dtisq * dtisq * dpsi + dtipsq * dtipsq * dphi;
+ }
+ }
+ eta = b / a;
+ } else if (a <= 0.) {
+ eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1))))
+ / (c__ * 2.);
+ } else {
+ eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__,
+ abs(d__1))));
+ }
+ } else {
+
+/* Interpolation using THREE most relevant poles */
+
+ dtiim = work[iim1] * delta[iim1];
+ dtiip = work[iip1] * delta[iip1];
+ temp = rhoinv + psi + phi;
+ if (swtch) {
+ c__ = temp - dtiim * dpsi - dtiip * dphi;
+ zz[0] = dtiim * dtiim * dpsi;
+ zz[2] = dtiip * dtiip * dphi;
+ } else {
+ if (orgati) {
+ temp1 = z__[iim1] / dtiim;
+ temp1 *= temp1;
+ temp2 = (d__[iim1] - d__[iip1]) * (d__[iim1] + d__[
+ iip1]) * temp1;
+ c__ = temp - dtiip * (dpsi + dphi) - temp2;
+ zz[0] = z__[iim1] * z__[iim1];
+ if (dpsi < temp1) {
+ zz[2] = dtiip * dtiip * dphi;
+ } else {
+ zz[2] = dtiip * dtiip * (dpsi - temp1 + dphi);
+ }
+ } else {
+ temp1 = z__[iip1] / dtiip;
+ temp1 *= temp1;
+ temp2 = (d__[iip1] - d__[iim1]) * (d__[iim1] + d__[
+ iip1]) * temp1;
+ c__ = temp - dtiim * (dpsi + dphi) - temp2;
+ if (dphi < temp1) {
+ zz[0] = dtiim * dtiim * dpsi;
+ } else {
+ zz[0] = dtiim * dtiim * (dpsi + (dphi - temp1));
+ }
+ zz[2] = z__[iip1] * z__[iip1];
+ }
+ }
+ dd[0] = dtiim;
+ dd[1] = delta[ii] * work[ii];
+ dd[2] = dtiip;
+ dlaed6_(&niter, &orgati, &c__, dd, zz, &w, &eta, info);
+ if (*info != 0) {
+ goto L240;
+ }
+ }
+
+/* Note, eta should be positive if w is negative, and */
+/* eta should be negative otherwise. However, */
+/* if for some reason caused by roundoff, eta*w > 0, */
+/* we simply use one Newton step instead. This way */
+/* will guarantee eta*w < 0. */
+
+ if (w * eta >= 0.) {
+ eta = -w / dw;
+ }
+ if (orgati) {
+ temp1 = work[*i__] * delta[*i__];
+ temp = eta - temp1;
+ } else {
+ temp1 = work[ip1] * delta[ip1];
+ temp = eta - temp1;
+ }
+ if (temp > sg2ub || temp < sg2lb) {
+ if (w < 0.) {
+ eta = (sg2ub - tau) / 2.;
+ } else {
+ eta = (sg2lb - tau) / 2.;
+ }
+ }
+
+ tau += eta;
+ eta /= *sigma + sqrt(*sigma * *sigma + eta);
+
+ *sigma += eta;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] += eta;
+ delta[j] -= eta;
+/* L200: */
+ }
+
+ prew = w;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L210: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.;
+ phi = 0.;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L220: */
+ }
+
+ temp = z__[ii] / (work[ii] * delta[ii]);
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w = rhoinv + phi + psi + temp;
+ erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3.
+ + abs(tau) * dw;
+ if (w * prew > 0. && abs(w) > abs(prew) / 10.) {
+ swtch = ! swtch;
+ }
+
+ if (w <= 0.) {
+ sg2lb = max(sg2lb,tau);
+ } else {
+ sg2ub = min(sg2ub,tau);
+ }
+
+/* L230: */
+ }
+
+/* Return with INFO = 1, NITER = MAXIT and not converged */
+
+ *info = 1;
+
+ }
+
+L240:
+ return 0;
+
+/* End of DLASD4 */
+
+} /* dlasd4_ */
diff --git a/contrib/libs/clapack/dlasd5.c b/contrib/libs/clapack/dlasd5.c
new file mode 100644
index 0000000000..26fff2650d
--- /dev/null
+++ b/contrib/libs/clapack/dlasd5.c
@@ -0,0 +1,189 @@
+/* dlasd5.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlasd5_(integer *i__, doublereal *d__, doublereal *z__,
+ doublereal *delta, doublereal *rho, doublereal *dsigma, doublereal *
+ work)
+{
+ /* System generated locals */
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal b, c__, w, del, tau, delsq;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This subroutine computes the square root of the I-th eigenvalue */
+/* of a positive symmetric rank-one modification of a 2-by-2 diagonal */
+/* matrix */
+
+/* diag( D ) * diag( D ) + RHO * Z * transpose(Z) . */
+
+/* The diagonal entries in the array D are assumed to satisfy */
+
+/* 0 <= D(i) < D(j) for i < j . */
+
+/* We also assume RHO > 0 and that the Euclidean norm of the vector */
+/* Z is one. */
+
+/* Arguments */
+/* ========= */
+
+/* I (input) INTEGER */
+/* The index of the eigenvalue to be computed. I = 1 or I = 2. */
+
+/* D (input) DOUBLE PRECISION array, dimension ( 2 ) */
+/* The original eigenvalues. We assume 0 <= D(1) < D(2). */
+
+/* Z (input) DOUBLE PRECISION array, dimension ( 2 ) */
+/* The components of the updating vector. */
+
+/* DELTA (output) DOUBLE PRECISION array, dimension ( 2 ) */
+/* Contains (D(j) - sigma_I) in its j-th component. */
+/* The vector DELTA contains the information necessary */
+/* to construct the eigenvectors. */
+
+/* RHO (input) DOUBLE PRECISION */
+/* The scalar in the symmetric updating formula. */
+
+/* DSIGMA (output) DOUBLE PRECISION */
+/* The computed sigma_I, the I-th updated eigenvalue. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension ( 2 ) */
+/* WORK contains (D(j) + sigma_I) in its j-th component. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ren-Cang Li, Computer Science Division, University of California */
+/* at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --work;
+ --delta;
+ --z__;
+ --d__;
+
+ /* Function Body */
+ del = d__[2] - d__[1];
+ delsq = del * (d__[2] + d__[1]);
+ if (*i__ == 1) {
+ w = *rho * 4. * (z__[2] * z__[2] / (d__[1] + d__[2] * 3.) - z__[1] *
+ z__[1] / (d__[1] * 3. + d__[2])) / del + 1.;
+ if (w > 0.) {
+ b = delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[1] * z__[1] * delsq;
+
+/* B > ZERO, always */
+
+/* The following TAU is DSIGMA * DSIGMA - D( 1 ) * D( 1 ) */
+
+ tau = c__ * 2. / (b + sqrt((d__1 = b * b - c__ * 4., abs(d__1))));
+
+/* The following TAU is DSIGMA - D( 1 ) */
+
+ tau /= d__[1] + sqrt(d__[1] * d__[1] + tau);
+ *dsigma = d__[1] + tau;
+ delta[1] = -tau;
+ delta[2] = del - tau;
+ work[1] = d__[1] * 2. + tau;
+ work[2] = d__[1] + tau + d__[2];
+/* DELTA( 1 ) = -Z( 1 ) / TAU */
+/* DELTA( 2 ) = Z( 2 ) / ( DEL-TAU ) */
+ } else {
+ b = -delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[2] * z__[2] * delsq;
+
+/* The following TAU is DSIGMA * DSIGMA - D( 2 ) * D( 2 ) */
+
+ if (b > 0.) {
+ tau = c__ * -2. / (b + sqrt(b * b + c__ * 4.));
+ } else {
+ tau = (b - sqrt(b * b + c__ * 4.)) / 2.;
+ }
+
+/* The following TAU is DSIGMA - D( 2 ) */
+
+ tau /= d__[2] + sqrt((d__1 = d__[2] * d__[2] + tau, abs(d__1)));
+ *dsigma = d__[2] + tau;
+ delta[1] = -(del + tau);
+ delta[2] = -tau;
+ work[1] = d__[1] + tau + d__[2];
+ work[2] = d__[2] * 2. + tau;
+/* DELTA( 1 ) = -Z( 1 ) / ( DEL+TAU ) */
+/* DELTA( 2 ) = -Z( 2 ) / TAU */
+ }
+/* TEMP = SQRT( DELTA( 1 )*DELTA( 1 )+DELTA( 2 )*DELTA( 2 ) ) */
+/* DELTA( 1 ) = DELTA( 1 ) / TEMP */
+/* DELTA( 2 ) = DELTA( 2 ) / TEMP */
+ } else {
+
+/* Now I=2 */
+
+ b = -delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[2] * z__[2] * delsq;
+
+/* The following TAU is DSIGMA * DSIGMA - D( 2 ) * D( 2 ) */
+
+ if (b > 0.) {
+ tau = (b + sqrt(b * b + c__ * 4.)) / 2.;
+ } else {
+ tau = c__ * 2. / (-b + sqrt(b * b + c__ * 4.));
+ }
+
+/* The following TAU is DSIGMA - D( 2 ) */
+
+ tau /= d__[2] + sqrt(d__[2] * d__[2] + tau);
+ *dsigma = d__[2] + tau;
+ delta[1] = -(del + tau);
+ delta[2] = -tau;
+ work[1] = d__[1] + tau + d__[2];
+ work[2] = d__[2] * 2. + tau;
+/* DELTA( 1 ) = -Z( 1 ) / ( DEL+TAU ) */
+/* DELTA( 2 ) = -Z( 2 ) / TAU */
+/* TEMP = SQRT( DELTA( 1 )*DELTA( 1 )+DELTA( 2 )*DELTA( 2 ) ) */
+/* DELTA( 1 ) = DELTA( 1 ) / TEMP */
+/* DELTA( 2 ) = DELTA( 2 ) / TEMP */
+ }
+ return 0;
+
+/* End of DLASD5 */
+
+} /* dlasd5_ */
diff --git a/contrib/libs/clapack/dlasd6.c b/contrib/libs/clapack/dlasd6.c
new file mode 100644
index 0000000000..f1d0ec6cd1
--- /dev/null
+++ b/contrib/libs/clapack/dlasd6.c
@@ -0,0 +1,367 @@
+/* dlasd6.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__0 = 0;
+static doublereal c_b7 = 1.;
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int dlasd6_(integer *icompq, integer *nl, integer *nr,
+ integer *sqre, doublereal *d__, doublereal *vf, doublereal *vl,
+ doublereal *alpha, doublereal *beta, integer *idxq, integer *perm,
+ integer *givptr, integer *givcol, integer *ldgcol, doublereal *givnum,
+ integer *ldgnum, doublereal *poles, doublereal *difl, doublereal *
+ difr, doublereal *z__, integer *k, doublereal *c__, doublereal *s,
+ doublereal *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, givnum_dim1, givnum_offset,
+ poles_dim1, poles_offset, i__1;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ integer i__, m, n, n1, n2, iw, idx, idxc, idxp, ivfw, ivlw;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dlasd7_(integer *, integer *, integer *,
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *,
+ integer *, integer *, integer *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, integer *), dlasd8_(
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *), dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dlamrg_(integer *, integer *,
+ doublereal *, integer *, integer *, integer *);
+ integer isigma;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal orgnrm;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLASD6 computes the SVD of an updated upper bidiagonal matrix B */
+/* obtained by merging two smaller ones by appending a row. This */
+/* routine is used only for the problem which requires all singular */
+/* values and optionally singular vector matrices in factored form. */
+/* B is an N-by-M matrix with N = NL + NR + 1 and M = N + SQRE. */
+/* A related subroutine, DLASD1, handles the case in which all singular */
+/* values and singular vectors of the bidiagonal matrix are desired. */
+
+/* DLASD6 computes the SVD as follows: */
+
+/* ( D1(in) 0 0 0 ) */
+/* B = U(in) * ( Z1' a Z2' b ) * VT(in) */
+/* ( 0 0 D2(in) 0 ) */
+
+/* = U(out) * ( D(out) 0) * VT(out) */
+
+/* where Z' = (Z1' a Z2' b) = u' VT', and u is a vector of dimension M */
+/* with ALPHA and BETA in the NL+1 and NL+2 th entries and zeros */
+/* elsewhere; and the entry b is empty if SQRE = 0. */
+
+/* The singular values of B can be computed using D1, D2, the first */
+/* components of all the right singular vectors of the lower block, and */
+/* the last components of all the right singular vectors of the upper */
+/* block. These components are stored and updated in VF and VL, */
+/* respectively, in DLASD6. Hence U and VT are not explicitly */
+/* referenced. */
+
+/* The singular values are stored in D. The algorithm consists of two */
+/* stages: */
+
+/* The first stage consists of deflating the size of the problem */
+/* when there are multiple singular values or if there is a zero */
+/* in the Z vector. For each such occurence the dimension of the */
+/* secular equation problem is reduced by one. This stage is */
+/* performed by the routine DLASD7. */
+
+/* The second stage consists of calculating the updated */
+/* singular values. This is done by finding the roots of the */
+/* secular equation via the routine DLASD4 (as called by DLASD8). */
+/* This routine also updates VF and VL and computes the distances */
+/* between the updated singular values and the old singular */
+/* values. */
+
+/* DLASD6 is called from DLASDA. */
+
+/* Arguments */
+/* ========= */
+
+/* ICOMPQ (input) INTEGER */
+/* Specifies whether singular vectors are to be computed in */
+/* factored form: */
+/* = 0: Compute singular values only. */
+/* = 1: Compute singular vectors in factored form as well. */
+
+/* NL (input) INTEGER */
+/* The row dimension of the upper block. NL >= 1. */
+
+/* NR (input) INTEGER */
+/* The row dimension of the lower block. NR >= 1. */
+
+/* SQRE (input) INTEGER */
+/* = 0: the lower block is an NR-by-NR square matrix. */
+/* = 1: the lower block is an NR-by-(NR+1) rectangular matrix. */
+
+/* The bidiagonal matrix has row dimension N = NL + NR + 1, */
+/* and column dimension M = N + SQRE. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension ( NL+NR+1 ). */
+/* On entry D(1:NL,1:NL) contains the singular values of the */
+/* upper block, and D(NL+2:N) contains the singular values */
+/* of the lower block. On exit D(1:N) contains the singular */
+/* values of the modified matrix. */
+
+/* VF (input/output) DOUBLE PRECISION array, dimension ( M ) */
+/* On entry, VF(1:NL+1) contains the first components of all */
+/* right singular vectors of the upper block; and VF(NL+2:M) */
+/* contains the first components of all right singular vectors */
+/* of the lower block. On exit, VF contains the first components */
+/* of all right singular vectors of the bidiagonal matrix. */
+
+/* VL (input/output) DOUBLE PRECISION array, dimension ( M ) */
+/* On entry, VL(1:NL+1) contains the last components of all */
+/* right singular vectors of the upper block; and VL(NL+2:M) */
+/* contains the last components of all right singular vectors of */
+/* the lower block. On exit, VL contains the last components of */
+/* all right singular vectors of the bidiagonal matrix. */
+
+/* ALPHA (input/output) DOUBLE PRECISION */
+/* Contains the diagonal element associated with the added row. */
+
+/* BETA (input/output) DOUBLE PRECISION */
+/* Contains the off-diagonal element associated with the added */
+/* row. */
+
+/* IDXQ (output) INTEGER array, dimension ( N ) */
+/* This contains the permutation which will reintegrate the */
+/* subproblem just solved back into sorted order, i.e. */
+/* D( IDXQ( I = 1, N ) ) will be in ascending order. */
+
+/* PERM (output) INTEGER array, dimension ( N ) */
+/* The permutations (from deflation and sorting) to be applied */
+/* to each block. Not referenced if ICOMPQ = 0. */
+
+/* GIVPTR (output) INTEGER */
+/* The number of Givens rotations which took place in this */
+/* subproblem. Not referenced if ICOMPQ = 0. */
+
+/* GIVCOL (output) INTEGER array, dimension ( LDGCOL, 2 ) */
+/* Each pair of numbers indicates a pair of columns to take place */
+/* in a Givens rotation. Not referenced if ICOMPQ = 0. */
+
+/* LDGCOL (input) INTEGER */
+/* leading dimension of GIVCOL, must be at least N. */
+
+/* GIVNUM (output) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) */
+/* Each number indicates the C or S value to be used in the */
+/* corresponding Givens rotation. Not referenced if ICOMPQ = 0. */
+
+/* LDGNUM (input) INTEGER */
+/* The leading dimension of GIVNUM and POLES, must be at least N. */
+
+/* POLES (output) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) */
+/* On exit, POLES(1,*) is an array containing the new singular */
+/* values obtained from solving the secular equation, and */
+/* POLES(2,*) is an array containing the poles in the secular */
+/* equation. Not referenced if ICOMPQ = 0. */
+
+/* DIFL (output) DOUBLE PRECISION array, dimension ( N ) */
+/* On exit, DIFL(I) is the distance between I-th updated */
+/* (undeflated) singular value and the I-th (undeflated) old */
+/* singular value. */
+
+/* DIFR (output) DOUBLE PRECISION array, */
+/* dimension ( LDGNUM, 2 ) if ICOMPQ = 1 and */
+/* dimension ( N ) if ICOMPQ = 0. */
+/* On exit, DIFR(I, 1) is the distance between I-th updated */
+/* (undeflated) singular value and the I+1-th (undeflated) old */
+/* singular value. */
+
+/* If ICOMPQ = 1, DIFR(1:K,2) is an array containing the */
+/* normalizing factors for the right singular vector matrix. */
+
+/* See DLASD8 for details on DIFL and DIFR. */
+
+/* Z (output) DOUBLE PRECISION array, dimension ( M ) */
+/* The first elements of this array contain the components */
+/* of the deflation-adjusted updating row vector. */
+
+/* K (output) INTEGER */
+/* Contains the dimension of the non-deflated matrix, */
+/* This is the order of the related secular equation. 1 <= K <=N. */
+
+/* C (output) DOUBLE PRECISION */
+/* C contains garbage if SQRE =0 and the C-value of a Givens */
+/* rotation related to the right null space if SQRE = 1. */
+
+/* S (output) DOUBLE PRECISION */
+/* S contains garbage if SQRE =0 and the S-value of a Givens */
+/* rotation related to the right null space if SQRE = 1. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension ( 4 * M ) */
+
+/* IWORK (workspace) INTEGER array, dimension ( 3 * N ) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, an singular value did not converge */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --vf;
+ --vl;
+ --idxq;
+ --perm;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ poles_dim1 = *ldgnum;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ givnum_dim1 = *ldgnum;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ --difl;
+ --difr;
+ --z__;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ n = *nl + *nr + 1;
+ m = n + *sqre;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*nl < 1) {
+ *info = -2;
+ } else if (*nr < 1) {
+ *info = -3;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -4;
+ } else if (*ldgcol < n) {
+ *info = -14;
+ } else if (*ldgnum < n) {
+ *info = -16;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASD6", &i__1);
+ return 0;
+ }
+
+/* The following values are for bookkeeping purposes only. They are */
+/* integer pointers which indicate the portion of the workspace */
+/* used by a particular array in DLASD7 and DLASD8. */
+
+ isigma = 1;
+ iw = isigma + n;
+ ivfw = iw + m;
+ ivlw = ivfw + m;
+
+ idx = 1;
+ idxc = idx + n;
+ idxp = idxc + n;
+
+/* Scale. */
+
+/* Computing MAX */
+ d__1 = abs(*alpha), d__2 = abs(*beta);
+ orgnrm = max(d__1,d__2);
+ d__[*nl + 1] = 0.;
+ i__1 = n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = d__[i__], abs(d__1)) > orgnrm) {
+ orgnrm = (d__1 = d__[i__], abs(d__1));
+ }
+/* L10: */
+ }
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b7, &n, &c__1, &d__[1], &n, info);
+ *alpha /= orgnrm;
+ *beta /= orgnrm;
+
+/* Sort and Deflate singular values. */
+
+ dlasd7_(icompq, nl, nr, sqre, k, &d__[1], &z__[1], &work[iw], &vf[1], &
+ work[ivfw], &vl[1], &work[ivlw], alpha, beta, &work[isigma], &
+ iwork[idx], &iwork[idxp], &idxq[1], &perm[1], givptr, &givcol[
+ givcol_offset], ldgcol, &givnum[givnum_offset], ldgnum, c__, s,
+ info);
+
+/* Solve Secular Equation, compute DIFL, DIFR, and update VF, VL. */
+
+ dlasd8_(icompq, k, &d__[1], &z__[1], &vf[1], &vl[1], &difl[1], &difr[1],
+ ldgnum, &work[isigma], &work[iw], info);
+
+/* Save the poles if ICOMPQ = 1. */
+
+ if (*icompq == 1) {
+ dcopy_(k, &d__[1], &c__1, &poles[poles_dim1 + 1], &c__1);
+ dcopy_(k, &work[isigma], &c__1, &poles[(poles_dim1 << 1) + 1], &c__1);
+ }
+
+/* Unscale. */
+
+ dlascl_("G", &c__0, &c__0, &c_b7, &orgnrm, &n, &c__1, &d__[1], &n, info);
+
+/* Prepare the IDXQ sorting permutation. */
+
+ n1 = *k;
+ n2 = n - *k;
+ dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &idxq[1]);
+
+ return 0;
+
+/* End of DLASD6 */
+
+} /* dlasd6_ */
diff --git a/contrib/libs/clapack/dlasd7.c b/contrib/libs/clapack/dlasd7.c
new file mode 100644
index 0000000000..ea4ca056bf
--- /dev/null
+++ b/contrib/libs/clapack/dlasd7.c
@@ -0,0 +1,518 @@
+/* dlasd7.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dlasd7_(integer *icompq, integer *nl, integer *nr,
+ integer *sqre, integer *k, doublereal *d__, doublereal *z__,
+ doublereal *zw, doublereal *vf, doublereal *vfw, doublereal *vl,
+ doublereal *vlw, doublereal *alpha, doublereal *beta, doublereal *
+ dsigma, integer *idx, integer *idxp, integer *idxq, integer *perm,
+ integer *givptr, integer *givcol, integer *ldgcol, doublereal *givnum,
+ integer *ldgnum, doublereal *c__, doublereal *s, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, givnum_dim1, givnum_offset, i__1;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ integer i__, j, m, n, k2;
+ doublereal z1;
+ integer jp;
+ doublereal eps, tau, tol;
+ integer nlp1, nlp2, idxi, idxj;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ integer idxjp;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ integer jprev;
+ extern doublereal dlapy2_(doublereal *, doublereal *), dlamch_(char *);
+ extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
+ integer *, integer *, integer *), xerbla_(char *, integer *);
+ doublereal hlftol;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLASD7 merges the two sets of singular values together into a single */
+/* sorted set. Then it tries to deflate the size of the problem. There */
+/* are two ways in which deflation can occur: when two or more singular */
+/* values are close together or if there is a tiny entry in the Z */
+/* vector. For each such occurrence the order of the related */
+/* secular equation problem is reduced by one. */
+
+/* DLASD7 is called from DLASD6. */
+
+/* Arguments */
+/* ========= */
+
+/* ICOMPQ (input) INTEGER */
+/* Specifies whether singular vectors are to be computed */
+/* in compact form, as follows: */
+/* = 0: Compute singular values only. */
+/* = 1: Compute singular vectors of upper */
+/* bidiagonal matrix in compact form. */
+
+/* NL (input) INTEGER */
+/* The row dimension of the upper block. NL >= 1. */
+
+/* NR (input) INTEGER */
+/* The row dimension of the lower block. NR >= 1. */
+
+/* SQRE (input) INTEGER */
+/* = 0: the lower block is an NR-by-NR square matrix. */
+/* = 1: the lower block is an NR-by-(NR+1) rectangular matrix. */
+
+/* The bidiagonal matrix has */
+/* N = NL + NR + 1 rows and */
+/* M = N + SQRE >= N columns. */
+
+/* K (output) INTEGER */
+/* Contains the dimension of the non-deflated matrix, this is */
+/* the order of the related secular equation. 1 <= K <=N. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension ( N ) */
+/* On entry D contains the singular values of the two submatrices */
+/* to be combined. On exit D contains the trailing (N-K) updated */
+/* singular values (those which were deflated) sorted into */
+/* increasing order. */
+
+/* Z (output) DOUBLE PRECISION array, dimension ( M ) */
+/* On exit Z contains the updating row vector in the secular */
+/* equation. */
+
+/* ZW (workspace) DOUBLE PRECISION array, dimension ( M ) */
+/* Workspace for Z. */
+
+/* VF (input/output) DOUBLE PRECISION array, dimension ( M ) */
+/* On entry, VF(1:NL+1) contains the first components of all */
+/* right singular vectors of the upper block; and VF(NL+2:M) */
+/* contains the first components of all right singular vectors */
+/* of the lower block. On exit, VF contains the first components */
+/* of all right singular vectors of the bidiagonal matrix. */
+
+/* VFW (workspace) DOUBLE PRECISION array, dimension ( M ) */
+/* Workspace for VF. */
+
+/* VL (input/output) DOUBLE PRECISION array, dimension ( M ) */
+/* On entry, VL(1:NL+1) contains the last components of all */
+/* right singular vectors of the upper block; and VL(NL+2:M) */
+/* contains the last components of all right singular vectors */
+/* of the lower block. On exit, VL contains the last components */
+/* of all right singular vectors of the bidiagonal matrix. */
+
+/* VLW (workspace) DOUBLE PRECISION array, dimension ( M ) */
+/* Workspace for VL. */
+
+/* ALPHA (input) DOUBLE PRECISION */
+/* Contains the diagonal element associated with the added row. */
+
+/* BETA (input) DOUBLE PRECISION */
+/* Contains the off-diagonal element associated with the added */
+/* row. */
+
+/* DSIGMA (output) DOUBLE PRECISION array, dimension ( N ) */
+/* Contains a copy of the diagonal elements (K-1 singular values */
+/* and one zero) in the secular equation. */
+
+/* IDX (workspace) INTEGER array, dimension ( N ) */
+/* This will contain the permutation used to sort the contents of */
+/* D into ascending order. */
+
+/* IDXP (workspace) INTEGER array, dimension ( N ) */
+/* This will contain the permutation used to place deflated */
+/* values of D at the end of the array. On output IDXP(2:K) */
+/* points to the nondeflated D-values and IDXP(K+1:N) */
+/* points to the deflated singular values. */
+
+/* IDXQ (input) INTEGER array, dimension ( N ) */
+/* This contains the permutation which separately sorts the two */
+/* sub-problems in D into ascending order. Note that entries in */
+/* the first half of this permutation must first be moved one */
+/* position backward; and entries in the second half */
+/* must first have NL+1 added to their values. */
+
+/* PERM (output) INTEGER array, dimension ( N ) */
+/* The permutations (from deflation and sorting) to be applied */
+/* to each singular block. Not referenced if ICOMPQ = 0. */
+
+/* GIVPTR (output) INTEGER */
+/* The number of Givens rotations which took place in this */
+/* subproblem. Not referenced if ICOMPQ = 0. */
+
+/* GIVCOL (output) INTEGER array, dimension ( LDGCOL, 2 ) */
+/* Each pair of numbers indicates a pair of columns to take place */
+/* in a Givens rotation. Not referenced if ICOMPQ = 0. */
+
+/* LDGCOL (input) INTEGER */
+/* The leading dimension of GIVCOL, must be at least N. */
+
+/* GIVNUM (output) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) */
+/* Each number indicates the C or S value to be used in the */
+/* corresponding Givens rotation. Not referenced if ICOMPQ = 0. */
+
+/* LDGNUM (input) INTEGER */
+/* The leading dimension of GIVNUM, must be at least N. */
+
+/* C (output) DOUBLE PRECISION */
+/* C contains garbage if SQRE =0 and the C-value of a Givens */
+/* rotation related to the right null space if SQRE = 1. */
+
+/* S (output) DOUBLE PRECISION */
+/* S contains garbage if SQRE =0 and the S-value of a Givens */
+/* rotation related to the right null space if SQRE = 1. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --z__;
+ --zw;
+ --vf;
+ --vfw;
+ --vl;
+ --vlw;
+ --dsigma;
+ --idx;
+ --idxp;
+ --idxq;
+ --perm;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ givnum_dim1 = *ldgnum;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+
+ /* Function Body */
+ *info = 0;
+ n = *nl + *nr + 1;
+ m = n + *sqre;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*nl < 1) {
+ *info = -2;
+ } else if (*nr < 1) {
+ *info = -3;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -4;
+ } else if (*ldgcol < n) {
+ *info = -22;
+ } else if (*ldgnum < n) {
+ *info = -24;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASD7", &i__1);
+ return 0;
+ }
+
+ nlp1 = *nl + 1;
+ nlp2 = *nl + 2;
+ if (*icompq == 1) {
+ *givptr = 0;
+ }
+
+/* Generate the first part of the vector Z and move the singular */
+/* values in the first part of D one position backward. */
+
+ z1 = *alpha * vl[nlp1];
+ vl[nlp1] = 0.;
+ tau = vf[nlp1];
+ for (i__ = *nl; i__ >= 1; --i__) {
+ z__[i__ + 1] = *alpha * vl[i__];
+ vl[i__] = 0.;
+ vf[i__ + 1] = vf[i__];
+ d__[i__ + 1] = d__[i__];
+ idxq[i__ + 1] = idxq[i__] + 1;
+/* L10: */
+ }
+ vf[1] = tau;
+
+/* Generate the second part of the vector Z. */
+
+ i__1 = m;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ z__[i__] = *beta * vf[i__];
+ vf[i__] = 0.;
+/* L20: */
+ }
+
+/* Sort the singular values into increasing order */
+
+ i__1 = n;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ idxq[i__] += nlp1;
+/* L30: */
+ }
+
+/* DSIGMA, IDXC, IDXC, and ZW are used as storage space. */
+
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ dsigma[i__] = d__[idxq[i__]];
+ zw[i__] = z__[idxq[i__]];
+ vfw[i__] = vf[idxq[i__]];
+ vlw[i__] = vl[idxq[i__]];
+/* L40: */
+ }
+
+ dlamrg_(nl, nr, &dsigma[2], &c__1, &c__1, &idx[2]);
+
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ idxi = idx[i__] + 1;
+ d__[i__] = dsigma[idxi];
+ z__[i__] = zw[idxi];
+ vf[i__] = vfw[idxi];
+ vl[i__] = vlw[idxi];
+/* L50: */
+ }
+
+/* Calculate the allowable deflation tolerence */
+
+ eps = dlamch_("Epsilon");
+/* Computing MAX */
+ d__1 = abs(*alpha), d__2 = abs(*beta);
+ tol = max(d__1,d__2);
+/* Computing MAX */
+ d__2 = (d__1 = d__[n], abs(d__1));
+ tol = eps * 64. * max(d__2,tol);
+
+/* There are 2 kinds of deflation -- first a value in the z-vector */
+/* is small, second two (or more) singular values are very close */
+/* together (their difference is small). */
+
+/* If the value in the z-vector is small, we simply permute the */
+/* array so that the corresponding singular value is moved to the */
+/* end. */
+
+/* If two values in the D-vector are close, we perform a two-sided */
+/* rotation designed to make one of the corresponding z-vector */
+/* entries zero, and then permute the array so that the deflated */
+/* singular value is moved to the end. */
+
+/* If there are multiple singular values then the problem deflates. */
+/* Here the number of equal singular values are found. As each equal */
+/* singular value is found, an elementary reflector is computed to */
+/* rotate the corresponding singular subspace so that the */
+/* corresponding components of Z are zero in this new basis. */
+
+ *k = 1;
+ k2 = n + 1;
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ if ((d__1 = z__[j], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ idxp[k2] = j;
+ if (j == n) {
+ goto L100;
+ }
+ } else {
+ jprev = j;
+ goto L70;
+ }
+/* L60: */
+ }
+L70:
+ j = jprev;
+L80:
+ ++j;
+ if (j > n) {
+ goto L90;
+ }
+ if ((d__1 = z__[j], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ idxp[k2] = j;
+ } else {
+
+/* Check if singular values are close enough to allow deflation. */
+
+ if ((d__1 = d__[j] - d__[jprev], abs(d__1)) <= tol) {
+
+/* Deflation is possible. */
+
+ *s = z__[jprev];
+ *c__ = z__[j];
+
+/* Find sqrt(a**2+b**2) without overflow or */
+/* destructive underflow. */
+
+ tau = dlapy2_(c__, s);
+ z__[j] = tau;
+ z__[jprev] = 0.;
+ *c__ /= tau;
+ *s = -(*s) / tau;
+
+/* Record the appropriate Givens rotation */
+
+ if (*icompq == 1) {
+ ++(*givptr);
+ idxjp = idxq[idx[jprev] + 1];
+ idxj = idxq[idx[j] + 1];
+ if (idxjp <= nlp1) {
+ --idxjp;
+ }
+ if (idxj <= nlp1) {
+ --idxj;
+ }
+ givcol[*givptr + (givcol_dim1 << 1)] = idxjp;
+ givcol[*givptr + givcol_dim1] = idxj;
+ givnum[*givptr + (givnum_dim1 << 1)] = *c__;
+ givnum[*givptr + givnum_dim1] = *s;
+ }
+ drot_(&c__1, &vf[jprev], &c__1, &vf[j], &c__1, c__, s);
+ drot_(&c__1, &vl[jprev], &c__1, &vl[j], &c__1, c__, s);
+ --k2;
+ idxp[k2] = jprev;
+ jprev = j;
+ } else {
+ ++(*k);
+ zw[*k] = z__[jprev];
+ dsigma[*k] = d__[jprev];
+ idxp[*k] = jprev;
+ jprev = j;
+ }
+ }
+ goto L80;
+L90:
+
+/* Record the last singular value. */
+
+ ++(*k);
+ zw[*k] = z__[jprev];
+ dsigma[*k] = d__[jprev];
+ idxp[*k] = jprev;
+
+L100:
+
+/* Sort the singular values into DSIGMA. The singular values which */
+/* were not deflated go into the first K slots of DSIGMA, except */
+/* that DSIGMA(1) is treated separately. */
+
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ jp = idxp[j];
+ dsigma[j] = d__[jp];
+ vfw[j] = vf[jp];
+ vlw[j] = vl[jp];
+/* L110: */
+ }
+ if (*icompq == 1) {
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ jp = idxp[j];
+ perm[j] = idxq[idx[jp] + 1];
+ if (perm[j] <= nlp1) {
+ --perm[j];
+ }
+/* L120: */
+ }
+ }
+
+/* The deflated singular values go back into the last N - K slots of */
+/* D. */
+
+ i__1 = n - *k;
+ dcopy_(&i__1, &dsigma[*k + 1], &c__1, &d__[*k + 1], &c__1);
+
+/* Determine DSIGMA(1), DSIGMA(2), Z(1), VF(1), VL(1), VF(M), and */
+/* VL(M). */
+
+ dsigma[1] = 0.;
+ hlftol = tol / 2.;
+ if (abs(dsigma[2]) <= hlftol) {
+ dsigma[2] = hlftol;
+ }
+ if (m > n) {
+ z__[1] = dlapy2_(&z1, &z__[m]);
+ if (z__[1] <= tol) {
+ *c__ = 1.;
+ *s = 0.;
+ z__[1] = tol;
+ } else {
+ *c__ = z1 / z__[1];
+ *s = -z__[m] / z__[1];
+ }
+ drot_(&c__1, &vf[m], &c__1, &vf[1], &c__1, c__, s);
+ drot_(&c__1, &vl[m], &c__1, &vl[1], &c__1, c__, s);
+ } else {
+ if (abs(z1) <= tol) {
+ z__[1] = tol;
+ } else {
+ z__[1] = z1;
+ }
+ }
+
+/* Restore Z, VF, and VL. */
+
+ i__1 = *k - 1;
+ dcopy_(&i__1, &zw[2], &c__1, &z__[2], &c__1);
+ i__1 = n - 1;
+ dcopy_(&i__1, &vfw[2], &c__1, &vf[2], &c__1);
+ i__1 = n - 1;
+ dcopy_(&i__1, &vlw[2], &c__1, &vl[2], &c__1);
+
+ return 0;
+
+/* End of DLASD7 */
+
+} /* dlasd7_ */
diff --git a/contrib/libs/clapack/dlasd8.c b/contrib/libs/clapack/dlasd8.c
new file mode 100644
index 0000000000..ab1b6c2403
--- /dev/null
+++ b/contrib/libs/clapack/dlasd8.c
@@ -0,0 +1,326 @@
+/* dlasd8.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static doublereal c_b8 = 1.;
+
+/* Subroutine */ int dlasd8_(integer *icompq, integer *k, doublereal *d__,
+ doublereal *z__, doublereal *vf, doublereal *vl, doublereal *difl,
+ doublereal *difr, integer *lddifr, doublereal *dsigma, doublereal *
+ work, integer *info)
+{
+ /* System generated locals */
+ integer difr_dim1, difr_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal dj, rho;
+ integer iwk1, iwk2, iwk3;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ doublereal temp;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ integer iwk2i, iwk3i;
+ doublereal diflj, difrj, dsigj;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ extern doublereal dlamc3_(doublereal *, doublereal *);
+ extern /* Subroutine */ int dlasd4_(integer *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *), dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dlaset_(char *, integer *, integer
+ *, doublereal *, doublereal *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ doublereal dsigjp;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* October 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLASD8 finds the square roots of the roots of the secular equation, */
+/* as defined by the values in DSIGMA and Z. It makes the appropriate */
+/* calls to DLASD4, and stores, for each element in D, the distance */
+/* to its two nearest poles (elements in DSIGMA). It also updates */
+/* the arrays VF and VL, the first and last components of all the */
+/* right singular vectors of the original bidiagonal matrix. */
+
+/* DLASD8 is called from DLASD6. */
+
+/* Arguments */
+/* ========= */
+
+/* ICOMPQ (input) INTEGER */
+/* Specifies whether singular vectors are to be computed in */
+/* factored form in the calling routine: */
+/* = 0: Compute singular values only. */
+/* = 1: Compute singular vectors in factored form as well. */
+
+/* K (input) INTEGER */
+/* The number of terms in the rational function to be solved */
+/* by DLASD4. K >= 1. */
+
+/* D (output) DOUBLE PRECISION array, dimension ( K ) */
+/* On output, D contains the updated singular values. */
+
+/* Z (input/output) DOUBLE PRECISION array, dimension ( K ) */
+/* On entry, the first K elements of this array contain the */
+/* components of the deflation-adjusted updating row vector. */
+/* On exit, Z is updated. */
+
+/* VF (input/output) DOUBLE PRECISION array, dimension ( K ) */
+/* On entry, VF contains information passed through DBEDE8. */
+/* On exit, VF contains the first K components of the first */
+/* components of all right singular vectors of the bidiagonal */
+/* matrix. */
+
+/* VL (input/output) DOUBLE PRECISION array, dimension ( K ) */
+/* On entry, VL contains information passed through DBEDE8. */
+/* On exit, VL contains the first K components of the last */
+/* components of all right singular vectors of the bidiagonal */
+/* matrix. */
+
+/* DIFL (output) DOUBLE PRECISION array, dimension ( K ) */
+/* On exit, DIFL(I) = D(I) - DSIGMA(I). */
+
+/* DIFR (output) DOUBLE PRECISION array, */
+/* dimension ( LDDIFR, 2 ) if ICOMPQ = 1 and */
+/* dimension ( K ) if ICOMPQ = 0. */
+/* On exit, DIFR(I,1) = D(I) - DSIGMA(I+1), DIFR(K,1) is not */
+/* defined and will not be referenced. */
+
+/* If ICOMPQ = 1, DIFR(1:K,2) is an array containing the */
+/* normalizing factors for the right singular vector matrix. */
+
+/* LDDIFR (input) INTEGER */
+/* The leading dimension of DIFR, must be at least K. */
+
+/* DSIGMA (input/output) DOUBLE PRECISION array, dimension ( K ) */
+/* On entry, the first K elements of this array contain the old */
+/* roots of the deflated updating problem. These are the poles */
+/* of the secular equation. */
+/* On exit, the elements of DSIGMA may be very slightly altered */
+/* in value. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension at least 3 * K */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, an singular value did not converge */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --z__;
+ --vf;
+ --vl;
+ --difl;
+ difr_dim1 = *lddifr;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ --dsigma;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*k < 1) {
+ *info = -2;
+ } else if (*lddifr < *k) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASD8", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*k == 1) {
+ d__[1] = abs(z__[1]);
+ difl[1] = d__[1];
+ if (*icompq == 1) {
+ difl[2] = 1.;
+ difr[(difr_dim1 << 1) + 1] = 1.;
+ }
+ return 0;
+ }
+
+/* Modify values DSIGMA(i) to make sure all DSIGMA(i)-DSIGMA(j) can */
+/* be computed with high relative accuracy (barring over/underflow). */
+/* This is a problem on machines without a guard digit in */
+/* add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2). */
+/* The following code replaces DSIGMA(I) by 2*DSIGMA(I)-DSIGMA(I), */
+/* which on any of these machines zeros out the bottommost */
+/* bit of DSIGMA(I) if it is 1; this makes the subsequent */
+/* subtractions DSIGMA(I)-DSIGMA(J) unproblematic when cancellation */
+/* occurs. On binary machines with a guard digit (almost all */
+/* machines) it does not change DSIGMA(I) at all. On hexadecimal */
+/* and decimal machines with a guard digit, it slightly */
+/* changes the bottommost bits of DSIGMA(I). It does not account */
+/* for hexadecimal or decimal machines without guard digits */
+/* (we know of none). We use a subroutine call to compute */
+/* 2*DLAMBDA(I) to prevent optimizing compilers from eliminating */
+/* this code. */
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dsigma[i__] = dlamc3_(&dsigma[i__], &dsigma[i__]) - dsigma[i__];
+/* L10: */
+ }
+
+/* Book keeping. */
+
+ iwk1 = 1;
+ iwk2 = iwk1 + *k;
+ iwk3 = iwk2 + *k;
+ iwk2i = iwk2 - 1;
+ iwk3i = iwk3 - 1;
+
+/* Normalize Z. */
+
+ rho = dnrm2_(k, &z__[1], &c__1);
+ dlascl_("G", &c__0, &c__0, &rho, &c_b8, k, &c__1, &z__[1], k, info);
+ rho *= rho;
+
+/* Initialize WORK(IWK3). */
+
+ dlaset_("A", k, &c__1, &c_b8, &c_b8, &work[iwk3], k);
+
+/* Compute the updated singular values, the arrays DIFL, DIFR, */
+/* and the updated Z. */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dlasd4_(k, &j, &dsigma[1], &z__[1], &work[iwk1], &rho, &d__[j], &work[
+ iwk2], info);
+
+/* If the root finder fails, the computation is terminated. */
+
+ if (*info != 0) {
+ return 0;
+ }
+ work[iwk3i + j] = work[iwk3i + j] * work[j] * work[iwk2i + j];
+ difl[j] = -work[j];
+ difr[j + difr_dim1] = -work[j + 1];
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[iwk3i + i__] = work[iwk3i + i__] * work[i__] * work[iwk2i +
+ i__] / (dsigma[i__] - dsigma[j]) / (dsigma[i__] + dsigma[
+ j]);
+/* L20: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ work[iwk3i + i__] = work[iwk3i + i__] * work[i__] * work[iwk2i +
+ i__] / (dsigma[i__] - dsigma[j]) / (dsigma[i__] + dsigma[
+ j]);
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Compute updated Z. */
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__2 = sqrt((d__1 = work[iwk3i + i__], abs(d__1)));
+ z__[i__] = d_sign(&d__2, &z__[i__]);
+/* L50: */
+ }
+
+/* Update VF and VL. */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ diflj = difl[j];
+ dj = d__[j];
+ dsigj = -dsigma[j];
+ if (j < *k) {
+ difrj = -difr[j + difr_dim1];
+ dsigjp = -dsigma[j + 1];
+ }
+ work[j] = -z__[j] / diflj / (dsigma[j] + dj);
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] = z__[i__] / (dlamc3_(&dsigma[i__], &dsigj) - diflj) / (
+ dsigma[i__] + dj);
+/* L60: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ work[i__] = z__[i__] / (dlamc3_(&dsigma[i__], &dsigjp) + difrj) /
+ (dsigma[i__] + dj);
+/* L70: */
+ }
+ temp = dnrm2_(k, &work[1], &c__1);
+ work[iwk2i + j] = ddot_(k, &work[1], &c__1, &vf[1], &c__1) / temp;
+ work[iwk3i + j] = ddot_(k, &work[1], &c__1, &vl[1], &c__1) / temp;
+ if (*icompq == 1) {
+ difr[j + (difr_dim1 << 1)] = temp;
+ }
+/* L80: */
+ }
+
+ dcopy_(k, &work[iwk2], &c__1, &vf[1], &c__1);
+ dcopy_(k, &work[iwk3], &c__1, &vl[1], &c__1);
+
+ return 0;
+
+/* End of DLASD8 */
+
+} /* dlasd8_ */
diff --git a/contrib/libs/clapack/dlasda.c b/contrib/libs/clapack/dlasda.c
new file mode 100644
index 0000000000..4a501d195b
--- /dev/null
+++ b/contrib/libs/clapack/dlasda.c
@@ -0,0 +1,488 @@
+/* dlasda.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__0 = 0;
+static doublereal c_b11 = 0.;
+static doublereal c_b12 = 1.;
+static integer c__1 = 1;
+static integer c__2 = 2;
+
+/* Subroutine */ int dlasda_(integer *icompq, integer *smlsiz, integer *n,
+ integer *sqre, doublereal *d__, doublereal *e, doublereal *u, integer
+ *ldu, doublereal *vt, integer *k, doublereal *difl, doublereal *difr,
+ doublereal *z__, doublereal *poles, integer *givptr, integer *givcol,
+ integer *ldgcol, integer *perm, doublereal *givnum, doublereal *c__,
+ doublereal *s, doublereal *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, perm_dim1, perm_offset, difl_dim1,
+ difl_offset, difr_dim1, difr_offset, givnum_dim1, givnum_offset,
+ poles_dim1, poles_offset, u_dim1, u_offset, vt_dim1, vt_offset,
+ z_dim1, z_offset, i__1, i__2;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ integer i__, j, m, i1, ic, lf, nd, ll, nl, vf, nr, vl, im1, ncc, nlf, nrf,
+ vfi, iwk, vli, lvl, nru, ndb1, nlp1, lvl2, nrp1;
+ doublereal beta;
+ integer idxq, nlvl;
+ doublereal alpha;
+ integer inode, ndiml, ndimr, idxqi, itemp;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ integer sqrei;
+ extern /* Subroutine */ int dlasd6_(integer *, integer *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *);
+ integer nwork1, nwork2;
+ extern /* Subroutine */ int dlasdq_(char *, integer *, integer *, integer
+ *, integer *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *), dlasdt_(integer *, integer *,
+ integer *, integer *, integer *, integer *, integer *), dlaset_(
+ char *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *), xerbla_(char *, integer *);
+ integer smlszp;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Using a divide and conquer approach, DLASDA computes the singular */
+/* value decomposition (SVD) of a real upper bidiagonal N-by-M matrix */
+/* B with diagonal D and offdiagonal E, where M = N + SQRE. The */
+/* algorithm computes the singular values in the SVD B = U * S * VT. */
+/* The orthogonal matrices U and VT are optionally computed in */
+/* compact form. */
+
+/* A related subroutine, DLASD0, computes the singular values and */
+/* the singular vectors in explicit form. */
+
+/* Arguments */
+/* ========= */
+
+/* ICOMPQ (input) INTEGER */
+/* Specifies whether singular vectors are to be computed */
+/* in compact form, as follows */
+/* = 0: Compute singular values only. */
+/* = 1: Compute singular vectors of upper bidiagonal */
+/* matrix in compact form. */
+
+/* SMLSIZ (input) INTEGER */
+/* The maximum size of the subproblems at the bottom of the */
+/* computation tree. */
+
+/* N (input) INTEGER */
+/* The row dimension of the upper bidiagonal matrix. This is */
+/* also the dimension of the main diagonal array D. */
+
+/* SQRE (input) INTEGER */
+/* Specifies the column dimension of the bidiagonal matrix. */
+/* = 0: The bidiagonal matrix has column dimension M = N; */
+/* = 1: The bidiagonal matrix has column dimension M = N + 1. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension ( N ) */
+/* On entry D contains the main diagonal of the bidiagonal */
+/* matrix. On exit D, if INFO = 0, contains its singular values. */
+
+/* E (input) DOUBLE PRECISION array, dimension ( M-1 ) */
+/* Contains the subdiagonal entries of the bidiagonal matrix. */
+/* On exit, E has been destroyed. */
+
+/* U (output) DOUBLE PRECISION array, */
+/* dimension ( LDU, SMLSIZ ) if ICOMPQ = 1, and not referenced */
+/* if ICOMPQ = 0. If ICOMPQ = 1, on exit, U contains the left */
+/* singular vector matrices of all subproblems at the bottom */
+/* level. */
+
+/* LDU (input) INTEGER, LDU = > N. */
+/* The leading dimension of arrays U, VT, DIFL, DIFR, POLES, */
+/* GIVNUM, and Z. */
+
+/* VT (output) DOUBLE PRECISION array, */
+/* dimension ( LDU, SMLSIZ+1 ) if ICOMPQ = 1, and not referenced */
+/* if ICOMPQ = 0. If ICOMPQ = 1, on exit, VT' contains the right */
+/* singular vector matrices of all subproblems at the bottom */
+/* level. */
+
+/* K (output) INTEGER array, */
+/* dimension ( N ) if ICOMPQ = 1 and dimension 1 if ICOMPQ = 0. */
+/* If ICOMPQ = 1, on exit, K(I) is the dimension of the I-th */
+/* secular equation on the computation tree. */
+
+/* DIFL (output) DOUBLE PRECISION array, dimension ( LDU, NLVL ), */
+/* where NLVL = floor(log_2 (N/SMLSIZ))). */
+
+/* DIFR (output) DOUBLE PRECISION array, */
+/* dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1 and */
+/* dimension ( N ) if ICOMPQ = 0. */
+/* If ICOMPQ = 1, on exit, DIFL(1:N, I) and DIFR(1:N, 2 * I - 1) */
+/* record distances between singular values on the I-th */
+/* level and singular values on the (I -1)-th level, and */
+/* DIFR(1:N, 2 * I ) contains the normalizing factors for */
+/* the right singular vector matrix. See DLASD8 for details. */
+
+/* Z (output) DOUBLE PRECISION array, */
+/* dimension ( LDU, NLVL ) if ICOMPQ = 1 and */
+/* dimension ( N ) if ICOMPQ = 0. */
+/* The first K elements of Z(1, I) contain the components of */
+/* the deflation-adjusted updating row vector for subproblems */
+/* on the I-th level. */
+
+/* POLES (output) DOUBLE PRECISION array, */
+/* dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1, and not referenced */
+/* if ICOMPQ = 0. If ICOMPQ = 1, on exit, POLES(1, 2*I - 1) and */
+/* POLES(1, 2*I) contain the new and old singular values */
+/* involved in the secular equations on the I-th level. */
+
+/* GIVPTR (output) INTEGER array, */
+/* dimension ( N ) if ICOMPQ = 1, and not referenced if */
+/* ICOMPQ = 0. If ICOMPQ = 1, on exit, GIVPTR( I ) records */
+/* the number of Givens rotations performed on the I-th */
+/* problem on the computation tree. */
+
+/* GIVCOL (output) INTEGER array, */
+/* dimension ( LDGCOL, 2 * NLVL ) if ICOMPQ = 1, and not */
+/* referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, for each I, */
+/* GIVCOL(1, 2 *I - 1) and GIVCOL(1, 2 *I) record the locations */
+/* of Givens rotations performed on the I-th level on the */
+/* computation tree. */
+
+/* LDGCOL (input) INTEGER, LDGCOL = > N. */
+/* The leading dimension of arrays GIVCOL and PERM. */
+
+/* PERM (output) INTEGER array, */
+/* dimension ( LDGCOL, NLVL ) if ICOMPQ = 1, and not referenced */
+/* if ICOMPQ = 0. If ICOMPQ = 1, on exit, PERM(1, I) records */
+/* permutations done on the I-th level of the computation tree. */
+
+/* GIVNUM (output) DOUBLE PRECISION array, */
+/* dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1, and not */
+/* referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, for each I, */
+/* GIVNUM(1, 2 *I - 1) and GIVNUM(1, 2 *I) record the C- and S- */
+/* values of Givens rotations performed on the I-th level on */
+/* the computation tree. */
+
+/* C (output) DOUBLE PRECISION array, */
+/* dimension ( N ) if ICOMPQ = 1, and dimension 1 if ICOMPQ = 0. */
+/* If ICOMPQ = 1 and the I-th subproblem is not square, on exit, */
+/* C( I ) contains the C-value of a Givens rotation related to */
+/* the right null space of the I-th subproblem. */
+
+/* S (output) DOUBLE PRECISION array, dimension ( N ) if */
+/* ICOMPQ = 1, and dimension 1 if ICOMPQ = 0. If ICOMPQ = 1 */
+/* and the I-th subproblem is not square, on exit, S( I ) */
+/* contains the S-value of a Givens rotation related to */
+/* the right null space of the I-th subproblem. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension */
+/* (6 * N + (SMLSIZ + 1)*(SMLSIZ + 1)). */
+
+/* IWORK (workspace) INTEGER array. */
+/* Dimension must be at least (7 * N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, an singular value did not converge */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ givnum_dim1 = *ldu;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ poles_dim1 = *ldu;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ z_dim1 = *ldu;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ difr_dim1 = *ldu;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ difl_dim1 = *ldu;
+ difl_offset = 1 + difl_dim1;
+ difl -= difl_offset;
+ vt_dim1 = *ldu;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ --k;
+ --givptr;
+ perm_dim1 = *ldgcol;
+ perm_offset = 1 + perm_dim1;
+ perm -= perm_offset;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ --c__;
+ --s;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*smlsiz < 3) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -4;
+ } else if (*ldu < *n + *sqre) {
+ *info = -8;
+ } else if (*ldgcol < *n) {
+ *info = -17;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASDA", &i__1);
+ return 0;
+ }
+
+ m = *n + *sqre;
+
+/* If the input matrix is too small, call DLASDQ to find the SVD. */
+
+ if (*n <= *smlsiz) {
+ if (*icompq == 0) {
+ dlasdq_("U", sqre, n, &c__0, &c__0, &c__0, &d__[1], &e[1], &vt[
+ vt_offset], ldu, &u[u_offset], ldu, &u[u_offset], ldu, &
+ work[1], info);
+ } else {
+ dlasdq_("U", sqre, n, &m, n, &c__0, &d__[1], &e[1], &vt[vt_offset]
+, ldu, &u[u_offset], ldu, &u[u_offset], ldu, &work[1],
+ info);
+ }
+ return 0;
+ }
+
+/* Book-keeping and set up the computation tree. */
+
+ inode = 1;
+ ndiml = inode + *n;
+ ndimr = ndiml + *n;
+ idxq = ndimr + *n;
+ iwk = idxq + *n;
+
+ ncc = 0;
+ nru = 0;
+
+ smlszp = *smlsiz + 1;
+ vf = 1;
+ vl = vf + m;
+ nwork1 = vl + m;
+ nwork2 = nwork1 + smlszp * smlszp;
+
+ dlasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr],
+ smlsiz);
+
+/* for the nodes on bottom level of the tree, solve */
+/* their subproblems by DLASDQ. */
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+
+/* IC : center row of each node */
+/* NL : number of rows of left subproblem */
+/* NR : number of rows of right subproblem */
+/* NLF: starting row of the left subproblem */
+/* NRF: starting row of the right subproblem */
+
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nlp1 = nl + 1;
+ nr = iwork[ndimr + i1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ idxqi = idxq + nlf - 2;
+ vfi = vf + nlf - 1;
+ vli = vl + nlf - 1;
+ sqrei = 1;
+ if (*icompq == 0) {
+ dlaset_("A", &nlp1, &nlp1, &c_b11, &c_b12, &work[nwork1], &smlszp);
+ dlasdq_("U", &sqrei, &nl, &nlp1, &nru, &ncc, &d__[nlf], &e[nlf], &
+ work[nwork1], &smlszp, &work[nwork2], &nl, &work[nwork2],
+ &nl, &work[nwork2], info);
+ itemp = nwork1 + nl * smlszp;
+ dcopy_(&nlp1, &work[nwork1], &c__1, &work[vfi], &c__1);
+ dcopy_(&nlp1, &work[itemp], &c__1, &work[vli], &c__1);
+ } else {
+ dlaset_("A", &nl, &nl, &c_b11, &c_b12, &u[nlf + u_dim1], ldu);
+ dlaset_("A", &nlp1, &nlp1, &c_b11, &c_b12, &vt[nlf + vt_dim1],
+ ldu);
+ dlasdq_("U", &sqrei, &nl, &nlp1, &nl, &ncc, &d__[nlf], &e[nlf], &
+ vt[nlf + vt_dim1], ldu, &u[nlf + u_dim1], ldu, &u[nlf +
+ u_dim1], ldu, &work[nwork1], info);
+ dcopy_(&nlp1, &vt[nlf + vt_dim1], &c__1, &work[vfi], &c__1);
+ dcopy_(&nlp1, &vt[nlf + nlp1 * vt_dim1], &c__1, &work[vli], &c__1)
+ ;
+ }
+ if (*info != 0) {
+ return 0;
+ }
+ i__2 = nl;
+ for (j = 1; j <= i__2; ++j) {
+ iwork[idxqi + j] = j;
+/* L10: */
+ }
+ if (i__ == nd && *sqre == 0) {
+ sqrei = 0;
+ } else {
+ sqrei = 1;
+ }
+ idxqi += nlp1;
+ vfi += nlp1;
+ vli += nlp1;
+ nrp1 = nr + sqrei;
+ if (*icompq == 0) {
+ dlaset_("A", &nrp1, &nrp1, &c_b11, &c_b12, &work[nwork1], &smlszp);
+ dlasdq_("U", &sqrei, &nr, &nrp1, &nru, &ncc, &d__[nrf], &e[nrf], &
+ work[nwork1], &smlszp, &work[nwork2], &nr, &work[nwork2],
+ &nr, &work[nwork2], info);
+ itemp = nwork1 + (nrp1 - 1) * smlszp;
+ dcopy_(&nrp1, &work[nwork1], &c__1, &work[vfi], &c__1);
+ dcopy_(&nrp1, &work[itemp], &c__1, &work[vli], &c__1);
+ } else {
+ dlaset_("A", &nr, &nr, &c_b11, &c_b12, &u[nrf + u_dim1], ldu);
+ dlaset_("A", &nrp1, &nrp1, &c_b11, &c_b12, &vt[nrf + vt_dim1],
+ ldu);
+ dlasdq_("U", &sqrei, &nr, &nrp1, &nr, &ncc, &d__[nrf], &e[nrf], &
+ vt[nrf + vt_dim1], ldu, &u[nrf + u_dim1], ldu, &u[nrf +
+ u_dim1], ldu, &work[nwork1], info);
+ dcopy_(&nrp1, &vt[nrf + vt_dim1], &c__1, &work[vfi], &c__1);
+ dcopy_(&nrp1, &vt[nrf + nrp1 * vt_dim1], &c__1, &work[vli], &c__1)
+ ;
+ }
+ if (*info != 0) {
+ return 0;
+ }
+ i__2 = nr;
+ for (j = 1; j <= i__2; ++j) {
+ iwork[idxqi + j] = j;
+/* L20: */
+ }
+/* L30: */
+ }
+
+/* Now conquer each subproblem bottom-up. */
+
+ j = pow_ii(&c__2, &nlvl);
+ for (lvl = nlvl; lvl >= 1; --lvl) {
+ lvl2 = (lvl << 1) - 1;
+
+/* Find the first node LF and last node LL on */
+/* the current level LVL. */
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__1 = lvl - 1;
+ lf = pow_ii(&c__2, &i__1);
+ ll = (lf << 1) - 1;
+ }
+ i__1 = ll;
+ for (i__ = lf; i__ <= i__1; ++i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ if (i__ == ll) {
+ sqrei = *sqre;
+ } else {
+ sqrei = 1;
+ }
+ vfi = vf + nlf - 1;
+ vli = vl + nlf - 1;
+ idxqi = idxq + nlf - 1;
+ alpha = d__[ic];
+ beta = e[ic];
+ if (*icompq == 0) {
+ dlasd6_(icompq, &nl, &nr, &sqrei, &d__[nlf], &work[vfi], &
+ work[vli], &alpha, &beta, &iwork[idxqi], &perm[
+ perm_offset], &givptr[1], &givcol[givcol_offset],
+ ldgcol, &givnum[givnum_offset], ldu, &poles[
+ poles_offset], &difl[difl_offset], &difr[difr_offset],
+ &z__[z_offset], &k[1], &c__[1], &s[1], &work[nwork1],
+ &iwork[iwk], info);
+ } else {
+ --j;
+ dlasd6_(icompq, &nl, &nr, &sqrei, &d__[nlf], &work[vfi], &
+ work[vli], &alpha, &beta, &iwork[idxqi], &perm[nlf +
+ lvl * perm_dim1], &givptr[j], &givcol[nlf + lvl2 *
+ givcol_dim1], ldgcol, &givnum[nlf + lvl2 *
+ givnum_dim1], ldu, &poles[nlf + lvl2 * poles_dim1], &
+ difl[nlf + lvl * difl_dim1], &difr[nlf + lvl2 *
+ difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[j],
+ &s[j], &work[nwork1], &iwork[iwk], info);
+ }
+ if (*info != 0) {
+ return 0;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+
+ return 0;
+
+/* End of DLASDA */
+
+} /* dlasda_ */
diff --git a/contrib/libs/clapack/dlasdq.c b/contrib/libs/clapack/dlasdq.c
new file mode 100644
index 0000000000..581a872b87
--- /dev/null
+++ b/contrib/libs/clapack/dlasdq.c
@@ -0,0 +1,380 @@
+/* dlasdq.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dlasdq_(char *uplo, integer *sqre, integer *n, integer *
+ ncvt, integer *nru, integer *ncc, doublereal *d__, doublereal *e,
+ doublereal *vt, integer *ldvt, doublereal *u, integer *ldu,
+ doublereal *c__, integer *ldc, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1,
+ i__2;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal r__, cs, sn;
+ integer np1, isub;
+ doublereal smin;
+ integer sqre1;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *), dswap_(integer *, doublereal *, integer *
+, doublereal *, integer *);
+ integer iuplo;
+ extern /* Subroutine */ int dlartg_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *), xerbla_(char *,
+ integer *), dbdsqr_(char *, integer *, integer *, integer
+ *, integer *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ logical rotate;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLASDQ computes the singular value decomposition (SVD) of a real */
+/* (upper or lower) bidiagonal matrix with diagonal D and offdiagonal */
+/* E, accumulating the transformations if desired. Letting B denote */
+/* the input bidiagonal matrix, the algorithm computes orthogonal */
+/* matrices Q and P such that B = Q * S * P' (P' denotes the transpose */
+/* of P). The singular values S are overwritten on D. */
+
+/* The input matrix U is changed to U * Q if desired. */
+/* The input matrix VT is changed to P' * VT if desired. */
+/* The input matrix C is changed to Q' * C if desired. */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices With */
+/* Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan, */
+/* LAPACK Working Note #3, for a detailed description of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* On entry, UPLO specifies whether the input bidiagonal matrix */
+/* is upper or lower bidiagonal, and wether it is square are */
+/* not. */
+/* UPLO = 'U' or 'u' B is upper bidiagonal. */
+/* UPLO = 'L' or 'l' B is lower bidiagonal. */
+
+/* SQRE (input) INTEGER */
+/* = 0: then the input matrix is N-by-N. */
+/* = 1: then the input matrix is N-by-(N+1) if UPLU = 'U' and */
+/* (N+1)-by-N if UPLU = 'L'. */
+
+/* The bidiagonal matrix has */
+/* N = NL + NR + 1 rows and */
+/* M = N + SQRE >= N columns. */
+
+/* N (input) INTEGER */
+/* On entry, N specifies the number of rows and columns */
+/* in the matrix. N must be at least 0. */
+
+/* NCVT (input) INTEGER */
+/* On entry, NCVT specifies the number of columns of */
+/* the matrix VT. NCVT must be at least 0. */
+
+/* NRU (input) INTEGER */
+/* On entry, NRU specifies the number of rows of */
+/* the matrix U. NRU must be at least 0. */
+
+/* NCC (input) INTEGER */
+/* On entry, NCC specifies the number of columns of */
+/* the matrix C. NCC must be at least 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, D contains the diagonal entries of the */
+/* bidiagonal matrix whose SVD is desired. On normal exit, */
+/* D contains the singular values in ascending order. */
+
+/* E (input/output) DOUBLE PRECISION array. */
+/* dimension is (N-1) if SQRE = 0 and N if SQRE = 1. */
+/* On entry, the entries of E contain the offdiagonal entries */
+/* of the bidiagonal matrix whose SVD is desired. On normal */
+/* exit, E will contain 0. If the algorithm does not converge, */
+/* D and E will contain the diagonal and superdiagonal entries */
+/* of a bidiagonal matrix orthogonally equivalent to the one */
+/* given as input. */
+
+/* VT (input/output) DOUBLE PRECISION array, dimension (LDVT, NCVT) */
+/* On entry, contains a matrix which on exit has been */
+/* premultiplied by P', dimension N-by-NCVT if SQRE = 0 */
+/* and (N+1)-by-NCVT if SQRE = 1 (not referenced if NCVT=0). */
+
+/* LDVT (input) INTEGER */
+/* On entry, LDVT specifies the leading dimension of VT as */
+/* declared in the calling (sub) program. LDVT must be at */
+/* least 1. If NCVT is nonzero LDVT must also be at least N. */
+
+/* U (input/output) DOUBLE PRECISION array, dimension (LDU, N) */
+/* On entry, contains a matrix which on exit has been */
+/* postmultiplied by Q, dimension NRU-by-N if SQRE = 0 */
+/* and NRU-by-(N+1) if SQRE = 1 (not referenced if NRU=0). */
+
+/* LDU (input) INTEGER */
+/* On entry, LDU specifies the leading dimension of U as */
+/* declared in the calling (sub) program. LDU must be at */
+/* least max( 1, NRU ) . */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (LDC, NCC) */
+/* On entry, contains an N-by-NCC matrix which on exit */
+/* has been premultiplied by Q' dimension N-by-NCC if SQRE = 0 */
+/* and (N+1)-by-NCC if SQRE = 1 (not referenced if NCC=0). */
+
+/* LDC (input) INTEGER */
+/* On entry, LDC specifies the leading dimension of C as */
+/* declared in the calling (sub) program. LDC must be at */
+/* least 1. If NCC is nonzero, LDC must also be at least N. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (4*N) */
+/* Workspace. Only referenced if one of NCVT, NRU, or NCC is */
+/* nonzero, and if N is at least 2. */
+
+/* INFO (output) INTEGER */
+/* On exit, a value of 0 indicates a successful exit. */
+/* If INFO < 0, argument number -INFO is illegal. */
+/* If INFO > 0, the algorithm did not converge, and INFO */
+/* specifies how many superdiagonals did not converge. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ iuplo = 0;
+ if (lsame_(uplo, "U")) {
+ iuplo = 1;
+ }
+ if (lsame_(uplo, "L")) {
+ iuplo = 2;
+ }
+ if (iuplo == 0) {
+ *info = -1;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ncvt < 0) {
+ *info = -4;
+ } else if (*nru < 0) {
+ *info = -5;
+ } else if (*ncc < 0) {
+ *info = -6;
+ } else if (*ncvt == 0 && *ldvt < 1 || *ncvt > 0 && *ldvt < max(1,*n)) {
+ *info = -10;
+ } else if (*ldu < max(1,*nru)) {
+ *info = -12;
+ } else if (*ncc == 0 && *ldc < 1 || *ncc > 0 && *ldc < max(1,*n)) {
+ *info = -14;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASDQ", &i__1);
+ return 0;
+ }
+ if (*n == 0) {
+ return 0;
+ }
+
+/* ROTATE is true if any singular vectors desired, false otherwise */
+
+ rotate = *ncvt > 0 || *nru > 0 || *ncc > 0;
+ np1 = *n + 1;
+ sqre1 = *sqre;
+
+/* If matrix non-square upper bidiagonal, rotate to be lower */
+/* bidiagonal. The rotations are on the right. */
+
+ if (iuplo == 1 && sqre1 == 1) {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (rotate) {
+ work[i__] = cs;
+ work[*n + i__] = sn;
+ }
+/* L10: */
+ }
+ dlartg_(&d__[*n], &e[*n], &cs, &sn, &r__);
+ d__[*n] = r__;
+ e[*n] = 0.;
+ if (rotate) {
+ work[*n] = cs;
+ work[*n + *n] = sn;
+ }
+ iuplo = 2;
+ sqre1 = 0;
+
+/* Update singular vectors if desired. */
+
+ if (*ncvt > 0) {
+ dlasr_("L", "V", "F", &np1, ncvt, &work[1], &work[np1], &vt[
+ vt_offset], ldvt);
+ }
+ }
+
+/* If matrix lower bidiagonal, rotate to be upper bidiagonal */
+/* by applying Givens rotations on the left. */
+
+ if (iuplo == 2) {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (rotate) {
+ work[i__] = cs;
+ work[*n + i__] = sn;
+ }
+/* L20: */
+ }
+
+/* If matrix (N+1)-by-N lower bidiagonal, one additional */
+/* rotation is needed. */
+
+ if (sqre1 == 1) {
+ dlartg_(&d__[*n], &e[*n], &cs, &sn, &r__);
+ d__[*n] = r__;
+ if (rotate) {
+ work[*n] = cs;
+ work[*n + *n] = sn;
+ }
+ }
+
+/* Update singular vectors if desired. */
+
+ if (*nru > 0) {
+ if (sqre1 == 0) {
+ dlasr_("R", "V", "F", nru, n, &work[1], &work[np1], &u[
+ u_offset], ldu);
+ } else {
+ dlasr_("R", "V", "F", nru, &np1, &work[1], &work[np1], &u[
+ u_offset], ldu);
+ }
+ }
+ if (*ncc > 0) {
+ if (sqre1 == 0) {
+ dlasr_("L", "V", "F", n, ncc, &work[1], &work[np1], &c__[
+ c_offset], ldc);
+ } else {
+ dlasr_("L", "V", "F", &np1, ncc, &work[1], &work[np1], &c__[
+ c_offset], ldc);
+ }
+ }
+ }
+
+/* Call DBDSQR to compute the SVD of the reduced real */
+/* N-by-N upper bidiagonal matrix. */
+
+ dbdsqr_("U", n, ncvt, nru, ncc, &d__[1], &e[1], &vt[vt_offset], ldvt, &u[
+ u_offset], ldu, &c__[c_offset], ldc, &work[1], info);
+
+/* Sort the singular values into ascending order (insertion sort on */
+/* singular values, but only one transposition per singular vector) */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Scan for smallest D(I). */
+
+ isub = i__;
+ smin = d__[i__];
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ if (d__[j] < smin) {
+ isub = j;
+ smin = d__[j];
+ }
+/* L30: */
+ }
+ if (isub != i__) {
+
+/* Swap singular values and vectors. */
+
+ d__[isub] = d__[i__];
+ d__[i__] = smin;
+ if (*ncvt > 0) {
+ dswap_(ncvt, &vt[isub + vt_dim1], ldvt, &vt[i__ + vt_dim1],
+ ldvt);
+ }
+ if (*nru > 0) {
+ dswap_(nru, &u[isub * u_dim1 + 1], &c__1, &u[i__ * u_dim1 + 1]
+, &c__1);
+ }
+ if (*ncc > 0) {
+ dswap_(ncc, &c__[isub + c_dim1], ldc, &c__[i__ + c_dim1], ldc)
+ ;
+ }
+ }
+/* L40: */
+ }
+
+ return 0;
+
+/* End of DLASDQ */
+
+} /* dlasdq_ */
diff --git a/contrib/libs/clapack/dlasdt.c b/contrib/libs/clapack/dlasdt.c
new file mode 100644
index 0000000000..0f25bddac8
--- /dev/null
+++ b/contrib/libs/clapack/dlasdt.c
@@ -0,0 +1,136 @@
+/* dlasdt.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlasdt_(integer *n, integer *lvl, integer *nd, integer *
+ inode, integer *ndiml, integer *ndimr, integer *msub)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Builtin functions */
+ double log(doublereal);
+
+ /* Local variables */
+ integer i__, il, ir, maxn;
+ doublereal temp;
+ integer nlvl, llst, ncrnt;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLASDT creates a tree of subproblems for bidiagonal divide and */
+/* conquer. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* On entry, the number of diagonal elements of the */
+/* bidiagonal matrix. */
+
+/* LVL (output) INTEGER */
+/* On exit, the number of levels on the computation tree. */
+
+/* ND (output) INTEGER */
+/* On exit, the number of nodes on the tree. */
+
+/* INODE (output) INTEGER array, dimension ( N ) */
+/* On exit, centers of subproblems. */
+
+/* NDIML (output) INTEGER array, dimension ( N ) */
+/* On exit, row dimensions of left children. */
+
+/* NDIMR (output) INTEGER array, dimension ( N ) */
+/* On exit, row dimensions of right children. */
+
+/* MSUB (input) INTEGER. */
+/* On entry, the maximum row dimension each subproblem at the */
+/* bottom of the tree can be of. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Find the number of levels on the tree. */
+
+ /* Parameter adjustments */
+ --ndimr;
+ --ndiml;
+ --inode;
+
+ /* Function Body */
+ maxn = max(1,*n);
+ temp = log((doublereal) maxn / (doublereal) (*msub + 1)) / log(2.);
+ *lvl = (integer) temp + 1;
+
+ i__ = *n / 2;
+ inode[1] = i__ + 1;
+ ndiml[1] = i__;
+ ndimr[1] = *n - i__ - 1;
+ il = 0;
+ ir = 1;
+ llst = 1;
+ i__1 = *lvl - 1;
+ for (nlvl = 1; nlvl <= i__1; ++nlvl) {
+
+/* Constructing the tree at (NLVL+1)-st level. The number of */
+/* nodes created on this level is LLST * 2. */
+
+ i__2 = llst - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ il += 2;
+ ir += 2;
+ ncrnt = llst + i__;
+ ndiml[il] = ndiml[ncrnt] / 2;
+ ndimr[il] = ndiml[ncrnt] - ndiml[il] - 1;
+ inode[il] = inode[ncrnt] - ndimr[il] - 1;
+ ndiml[ir] = ndimr[ncrnt] / 2;
+ ndimr[ir] = ndimr[ncrnt] - ndiml[ir] - 1;
+ inode[ir] = inode[ncrnt] + ndiml[ir] + 1;
+/* L10: */
+ }
+ llst <<= 1;
+/* L20: */
+ }
+ *nd = (llst << 1) - 1;
+
+ return 0;
+
+/* End of DLASDT */
+
+} /* dlasdt_ */
diff --git a/contrib/libs/clapack/dlaset.c b/contrib/libs/clapack/dlaset.c
new file mode 100644
index 0000000000..98d304e6db
--- /dev/null
+++ b/contrib/libs/clapack/dlaset.c
@@ -0,0 +1,152 @@
+/* dlaset.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlaset_(char *uplo, integer *m, integer *n, doublereal *
+ alpha, doublereal *beta, doublereal *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLASET initializes an m-by-n matrix A to BETA on the diagonal and */
+/* ALPHA on the offdiagonals. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies the part of the matrix A to be set. */
+/* = 'U': Upper triangular part is set; the strictly lower */
+/* triangular part of A is not changed. */
+/* = 'L': Lower triangular part is set; the strictly upper */
+/* triangular part of A is not changed. */
+/* Otherwise: All of the matrix A is set. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* ALPHA (input) DOUBLE PRECISION */
+/* The constant to which the offdiagonal elements are to be set. */
+
+/* BETA (input) DOUBLE PRECISION */
+/* The constant to which the diagonal elements are to be set. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On exit, the leading m-by-n submatrix of A is set as follows: */
+
+/* if UPLO = 'U', A(i,j) = ALPHA, 1<=i<=j-1, 1<=j<=n, */
+/* if UPLO = 'L', A(i,j) = ALPHA, j+1<=i<=m, 1<=j<=n, */
+/* otherwise, A(i,j) = ALPHA, 1<=i<=m, 1<=j<=n, i.ne.j, */
+
+/* and, for all UPLO, A(i,i) = BETA, 1<=i<=min(m,n). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ if (lsame_(uplo, "U")) {
+
+/* Set the strictly upper triangular or trapezoidal part of the */
+/* array to ALPHA. */
+
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = j - 1;
+ i__2 = min(i__3,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = *alpha;
+/* L10: */
+ }
+/* L20: */
+ }
+
+ } else if (lsame_(uplo, "L")) {
+
+/* Set the strictly lower triangular or trapezoidal part of the */
+/* array to ALPHA. */
+
+ i__1 = min(*m,*n);
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = *alpha;
+/* L30: */
+ }
+/* L40: */
+ }
+
+ } else {
+
+/* Set the leading m-by-n submatrix to ALPHA. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = *alpha;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+/* Set the first min(M,N) diagonal elements to BETA. */
+
+ i__1 = min(*m,*n);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ a[i__ + i__ * a_dim1] = *beta;
+/* L70: */
+ }
+
+ return 0;
+
+/* End of DLASET */
+
+} /* dlaset_ */
diff --git a/contrib/libs/clapack/dlasq1.c b/contrib/libs/clapack/dlasq1.c
new file mode 100644
index 0000000000..7a6e8b7c07
--- /dev/null
+++ b/contrib/libs/clapack/dlasq1.c
@@ -0,0 +1,219 @@
+/* dlasq1.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__2 = 2;
+static integer c__0 = 0;
+
+/* Subroutine */ int dlasq1_(integer *n, doublereal *d__, doublereal *e,
+ doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ doublereal eps;
+ extern /* Subroutine */ int dlas2_(doublereal *, doublereal *, doublereal
+ *, doublereal *, doublereal *);
+ doublereal scale;
+ integer iinfo;
+ doublereal sigmn;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ doublereal sigmx;
+ extern /* Subroutine */ int dlasq2_(integer *, doublereal *, integer *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *), dlasrt_(
+ char *, integer *, doublereal *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Osni Marques of the Lawrence Berkeley National -- */
+/* -- Laboratory and Beresford Parlett of the Univ. of California at -- */
+/* -- Berkeley -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLASQ1 computes the singular values of a real N-by-N bidiagonal */
+/* matrix with diagonal D and off-diagonal E. The singular values */
+/* are computed to high relative accuracy, in the absence of */
+/* denormalization, underflow and overflow. The algorithm was first */
+/* presented in */
+
+/* "Accurate singular values and differential qd algorithms" by K. V. */
+/* Fernando and B. N. Parlett, Numer. Math., Vol-67, No. 2, pp. 191-230, */
+/* 1994, */
+
+/* and the present implementation is described in "An implementation of */
+/* the dqds Algorithm (Positive Case)", LAPACK Working Note. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of rows and columns in the matrix. N >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, D contains the diagonal elements of the */
+/* bidiagonal matrix whose SVD is desired. On normal exit, */
+/* D contains the singular values in decreasing order. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, elements E(1:N-1) contain the off-diagonal elements */
+/* of the bidiagonal matrix whose SVD is desired. */
+/* On exit, E is overwritten. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (4*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: the algorithm failed */
+/* = 1, a split was marked by a positive value in E */
+/* = 2, current block of Z not diagonalized after 30*N */
+/* iterations (in inner while loop) */
+/* = 3, termination criterion of outer while loop not met */
+/* (program created more than N unreduced blocks) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --work;
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -2;
+ i__1 = -(*info);
+ xerbla_("DLASQ1", &i__1);
+ return 0;
+ } else if (*n == 0) {
+ return 0;
+ } else if (*n == 1) {
+ d__[1] = abs(d__[1]);
+ return 0;
+ } else if (*n == 2) {
+ dlas2_(&d__[1], &e[1], &d__[2], &sigmn, &sigmx);
+ d__[1] = sigmx;
+ d__[2] = sigmn;
+ return 0;
+ }
+
+/* Estimate the largest singular value. */
+
+ sigmx = 0.;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = (d__1 = d__[i__], abs(d__1));
+/* Computing MAX */
+ d__2 = sigmx, d__3 = (d__1 = e[i__], abs(d__1));
+ sigmx = max(d__2,d__3);
+/* L10: */
+ }
+ d__[*n] = (d__1 = d__[*n], abs(d__1));
+
+/* Early return if SIGMX is zero (matrix is already diagonal). */
+
+ if (sigmx == 0.) {
+ dlasrt_("D", n, &d__[1], &iinfo);
+ return 0;
+ }
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = sigmx, d__2 = d__[i__];
+ sigmx = max(d__1,d__2);
+/* L20: */
+ }
+
+/* Copy D and E into WORK (in the Z format) and scale (squaring the */
+/* input data makes scaling by a power of the radix pointless). */
+
+ eps = dlamch_("Precision");
+ safmin = dlamch_("Safe minimum");
+ scale = sqrt(eps / safmin);
+ dcopy_(n, &d__[1], &c__1, &work[1], &c__2);
+ i__1 = *n - 1;
+ dcopy_(&i__1, &e[1], &c__1, &work[2], &c__2);
+ i__1 = (*n << 1) - 1;
+ i__2 = (*n << 1) - 1;
+ dlascl_("G", &c__0, &c__0, &sigmx, &scale, &i__1, &c__1, &work[1], &i__2,
+ &iinfo);
+
+/* Compute the q's and e's. */
+
+ i__1 = (*n << 1) - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing 2nd power */
+ d__1 = work[i__];
+ work[i__] = d__1 * d__1;
+/* L30: */
+ }
+ work[*n * 2] = 0.;
+
+ dlasq2_(n, &work[1], info);
+
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = sqrt(work[i__]);
+/* L40: */
+ }
+ dlascl_("G", &c__0, &c__0, &scale, &sigmx, n, &c__1, &d__[1], n, &
+ iinfo);
+ }
+
+ return 0;
+
+/* End of DLASQ1 */
+
+} /* dlasq1_ */
diff --git a/contrib/libs/clapack/dlasq2.c b/contrib/libs/clapack/dlasq2.c
new file mode 100644
index 0000000000..3b041a2d8d
--- /dev/null
+++ b/contrib/libs/clapack/dlasq2.c
@@ -0,0 +1,602 @@
+/* dlasq2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__2 = 2;
+static integer c__10 = 10;
+static integer c__3 = 3;
+static integer c__4 = 4;
+static integer c__11 = 11;
+
+/* Subroutine */ int dlasq2_(integer *n, doublereal *z__, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal d__, e, g;
+ integer k;
+ doublereal s, t;
+ integer i0, i4, n0;
+ doublereal dn;
+ integer pp;
+ doublereal dn1, dn2, dee, eps, tau, tol;
+ integer ipn4;
+ doublereal tol2;
+ logical ieee;
+ integer nbig;
+ doublereal dmin__, emin, emax;
+ integer kmin, ndiv, iter;
+ doublereal qmin, temp, qmax, zmax;
+ integer splt;
+ doublereal dmin1, dmin2;
+ integer nfail;
+ doublereal desig, trace, sigma;
+ integer iinfo, ttype;
+ extern /* Subroutine */ int dlasq3_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ integer *, integer *, integer *, logical *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ doublereal deemin;
+ integer iwhila, iwhilb;
+ doublereal oldemn, safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Osni Marques of the Lawrence Berkeley National -- */
+/* -- Laboratory and Beresford Parlett of the Univ. of California at -- */
+/* -- Berkeley -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLASQ2 computes all the eigenvalues of the symmetric positive */
+/* definite tridiagonal matrix associated with the qd array Z to high */
+/* relative accuracy are computed to high relative accuracy, in the */
+/* absence of denormalization, underflow and overflow. */
+
+/* To see the relation of Z to the tridiagonal matrix, let L be a */
+/* unit lower bidiagonal matrix with subdiagonals Z(2,4,6,,..) and */
+/* let U be an upper bidiagonal matrix with 1's above and diagonal */
+/* Z(1,3,5,,..). The tridiagonal is L*U or, if you prefer, the */
+/* symmetric tridiagonal to which it is similar. */
+
+/* Note : DLASQ2 defines a logical variable, IEEE, which is true */
+/* on machines which follow ieee-754 floating-point standard in their */
+/* handling of infinities and NaNs, and false otherwise. This variable */
+/* is passed to DLASQ3. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of rows and columns in the matrix. N >= 0. */
+
+/* Z (input/output) DOUBLE PRECISION array, dimension ( 4*N ) */
+/* On entry Z holds the qd array. On exit, entries 1 to N hold */
+/* the eigenvalues in decreasing order, Z( 2*N+1 ) holds the */
+/* trace, and Z( 2*N+2 ) holds the sum of the eigenvalues. If */
+/* N > 2, then Z( 2*N+3 ) holds the iteration count, Z( 2*N+4 ) */
+/* holds NDIVS/NIN^2, and Z( 2*N+5 ) holds the percentage of */
+/* shifts that failed. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if the i-th argument is a scalar and had an illegal */
+/* value, then INFO = -i, if the i-th argument is an */
+/* array and the j-entry had an illegal value, then */
+/* INFO = -(i*100+j) */
+/* > 0: the algorithm failed */
+/* = 1, a split was marked by a positive value in E */
+/* = 2, current block of Z not diagonalized after 30*N */
+/* iterations (in inner while loop) */
+/* = 3, termination criterion of outer while loop not met */
+/* (program created more than N unreduced blocks) */
+
+/* Further Details */
+/* =============== */
+/* Local Variables: I0:N0 defines a current unreduced segment of Z. */
+/* The shifts are accumulated in SIGMA. Iteration count is in ITER. */
+/* Ping-pong is controlled by PP (alternates between 0 and 1). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments. */
+/* (in case DLASQ2 is not called by DLASQ1) */
+
+ /* Parameter adjustments */
+ --z__;
+
+ /* Function Body */
+ *info = 0;
+ eps = dlamch_("Precision");
+ safmin = dlamch_("Safe minimum");
+ tol = eps * 100.;
+/* Computing 2nd power */
+ d__1 = tol;
+ tol2 = d__1 * d__1;
+
+ if (*n < 0) {
+ *info = -1;
+ xerbla_("DLASQ2", &c__1);
+ return 0;
+ } else if (*n == 0) {
+ return 0;
+ } else if (*n == 1) {
+
+/* 1-by-1 case. */
+
+ if (z__[1] < 0.) {
+ *info = -201;
+ xerbla_("DLASQ2", &c__2);
+ }
+ return 0;
+ } else if (*n == 2) {
+
+/* 2-by-2 case. */
+
+ if (z__[2] < 0. || z__[3] < 0.) {
+ *info = -2;
+ xerbla_("DLASQ2", &c__2);
+ return 0;
+ } else if (z__[3] > z__[1]) {
+ d__ = z__[3];
+ z__[3] = z__[1];
+ z__[1] = d__;
+ }
+ z__[5] = z__[1] + z__[2] + z__[3];
+ if (z__[2] > z__[3] * tol2) {
+ t = (z__[1] - z__[3] + z__[2]) * .5;
+ s = z__[3] * (z__[2] / t);
+ if (s <= t) {
+ s = z__[3] * (z__[2] / (t * (sqrt(s / t + 1.) + 1.)));
+ } else {
+ s = z__[3] * (z__[2] / (t + sqrt(t) * sqrt(t + s)));
+ }
+ t = z__[1] + (s + z__[2]);
+ z__[3] *= z__[1] / t;
+ z__[1] = t;
+ }
+ z__[2] = z__[3];
+ z__[6] = z__[2] + z__[1];
+ return 0;
+ }
+
+/* Check for negative data and compute sums of q's and e's. */
+
+ z__[*n * 2] = 0.;
+ emin = z__[2];
+ qmax = 0.;
+ zmax = 0.;
+ d__ = 0.;
+ e = 0.;
+
+ i__1 = *n - 1 << 1;
+ for (k = 1; k <= i__1; k += 2) {
+ if (z__[k] < 0.) {
+ *info = -(k + 200);
+ xerbla_("DLASQ2", &c__2);
+ return 0;
+ } else if (z__[k + 1] < 0.) {
+ *info = -(k + 201);
+ xerbla_("DLASQ2", &c__2);
+ return 0;
+ }
+ d__ += z__[k];
+ e += z__[k + 1];
+/* Computing MAX */
+ d__1 = qmax, d__2 = z__[k];
+ qmax = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = emin, d__2 = z__[k + 1];
+ emin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = max(qmax,zmax), d__2 = z__[k + 1];
+ zmax = max(d__1,d__2);
+/* L10: */
+ }
+ if (z__[(*n << 1) - 1] < 0.) {
+ *info = -((*n << 1) + 199);
+ xerbla_("DLASQ2", &c__2);
+ return 0;
+ }
+ d__ += z__[(*n << 1) - 1];
+/* Computing MAX */
+ d__1 = qmax, d__2 = z__[(*n << 1) - 1];
+ qmax = max(d__1,d__2);
+ zmax = max(qmax,zmax);
+
+/* Check for diagonality. */
+
+ if (e == 0.) {
+ i__1 = *n;
+ for (k = 2; k <= i__1; ++k) {
+ z__[k] = z__[(k << 1) - 1];
+/* L20: */
+ }
+ dlasrt_("D", n, &z__[1], &iinfo);
+ z__[(*n << 1) - 1] = d__;
+ return 0;
+ }
+
+ trace = d__ + e;
+
+/* Check for zero data. */
+
+ if (trace == 0.) {
+ z__[(*n << 1) - 1] = 0.;
+ return 0;
+ }
+
+/* Check whether the machine is IEEE conformable. */
+
+ ieee = ilaenv_(&c__10, "DLASQ2", "N", &c__1, &c__2, &c__3, &c__4) == 1 && ilaenv_(&c__11, "DLASQ2", "N", &c__1, &c__2,
+ &c__3, &c__4) == 1;
+
+/* Rearrange data for locality: Z=(q1,qq1,e1,ee1,q2,qq2,e2,ee2,...). */
+
+ for (k = *n << 1; k >= 2; k += -2) {
+ z__[k * 2] = 0.;
+ z__[(k << 1) - 1] = z__[k];
+ z__[(k << 1) - 2] = 0.;
+ z__[(k << 1) - 3] = z__[k - 1];
+/* L30: */
+ }
+
+ i0 = 1;
+ n0 = *n;
+
+/* Reverse the qd-array, if warranted. */
+
+ if (z__[(i0 << 2) - 3] * 1.5 < z__[(n0 << 2) - 3]) {
+ ipn4 = i0 + n0 << 2;
+ i__1 = i0 + n0 - 1 << 1;
+ for (i4 = i0 << 2; i4 <= i__1; i4 += 4) {
+ temp = z__[i4 - 3];
+ z__[i4 - 3] = z__[ipn4 - i4 - 3];
+ z__[ipn4 - i4 - 3] = temp;
+ temp = z__[i4 - 1];
+ z__[i4 - 1] = z__[ipn4 - i4 - 5];
+ z__[ipn4 - i4 - 5] = temp;
+/* L40: */
+ }
+ }
+
+/* Initial split checking via dqd and Li's test. */
+
+ pp = 0;
+
+ for (k = 1; k <= 2; ++k) {
+
+ d__ = z__[(n0 << 2) + pp - 3];
+ i__1 = (i0 << 2) + pp;
+ for (i4 = (n0 - 1 << 2) + pp; i4 >= i__1; i4 += -4) {
+ if (z__[i4 - 1] <= tol2 * d__) {
+ z__[i4 - 1] = -0.;
+ d__ = z__[i4 - 3];
+ } else {
+ d__ = z__[i4 - 3] * (d__ / (d__ + z__[i4 - 1]));
+ }
+/* L50: */
+ }
+
+/* dqd maps Z to ZZ plus Li's test. */
+
+ emin = z__[(i0 << 2) + pp + 1];
+ d__ = z__[(i0 << 2) + pp - 3];
+ i__1 = (n0 - 1 << 2) + pp;
+ for (i4 = (i0 << 2) + pp; i4 <= i__1; i4 += 4) {
+ z__[i4 - (pp << 1) - 2] = d__ + z__[i4 - 1];
+ if (z__[i4 - 1] <= tol2 * d__) {
+ z__[i4 - 1] = -0.;
+ z__[i4 - (pp << 1) - 2] = d__;
+ z__[i4 - (pp << 1)] = 0.;
+ d__ = z__[i4 + 1];
+ } else if (safmin * z__[i4 + 1] < z__[i4 - (pp << 1) - 2] &&
+ safmin * z__[i4 - (pp << 1) - 2] < z__[i4 + 1]) {
+ temp = z__[i4 + 1] / z__[i4 - (pp << 1) - 2];
+ z__[i4 - (pp << 1)] = z__[i4 - 1] * temp;
+ d__ *= temp;
+ } else {
+ z__[i4 - (pp << 1)] = z__[i4 + 1] * (z__[i4 - 1] / z__[i4 - (
+ pp << 1) - 2]);
+ d__ = z__[i4 + 1] * (d__ / z__[i4 - (pp << 1) - 2]);
+ }
+/* Computing MIN */
+ d__1 = emin, d__2 = z__[i4 - (pp << 1)];
+ emin = min(d__1,d__2);
+/* L60: */
+ }
+ z__[(n0 << 2) - pp - 2] = d__;
+
+/* Now find qmax. */
+
+ qmax = z__[(i0 << 2) - pp - 2];
+ i__1 = (n0 << 2) - pp - 2;
+ for (i4 = (i0 << 2) - pp + 2; i4 <= i__1; i4 += 4) {
+/* Computing MAX */
+ d__1 = qmax, d__2 = z__[i4];
+ qmax = max(d__1,d__2);
+/* L70: */
+ }
+
+/* Prepare for the next iteration on K. */
+
+ pp = 1 - pp;
+/* L80: */
+ }
+
+/* Initialise variables to pass to DLASQ3. */
+
+ ttype = 0;
+ dmin1 = 0.;
+ dmin2 = 0.;
+ dn = 0.;
+ dn1 = 0.;
+ dn2 = 0.;
+ g = 0.;
+ tau = 0.;
+
+ iter = 2;
+ nfail = 0;
+ ndiv = n0 - i0 << 1;
+
+ i__1 = *n + 1;
+ for (iwhila = 1; iwhila <= i__1; ++iwhila) {
+ if (n0 < 1) {
+ goto L170;
+ }
+
+/* While array unfinished do */
+
+/* E(N0) holds the value of SIGMA when submatrix in I0:N0 */
+/* splits from the rest of the array, but is negated. */
+
+ desig = 0.;
+ if (n0 == *n) {
+ sigma = 0.;
+ } else {
+ sigma = -z__[(n0 << 2) - 1];
+ }
+ if (sigma < 0.) {
+ *info = 1;
+ return 0;
+ }
+
+/* Find last unreduced submatrix's top index I0, find QMAX and */
+/* EMIN. Find Gershgorin-type bound if Q's much greater than E's. */
+
+ emax = 0.;
+ if (n0 > i0) {
+ emin = (d__1 = z__[(n0 << 2) - 5], abs(d__1));
+ } else {
+ emin = 0.;
+ }
+ qmin = z__[(n0 << 2) - 3];
+ qmax = qmin;
+ for (i4 = n0 << 2; i4 >= 8; i4 += -4) {
+ if (z__[i4 - 5] <= 0.) {
+ goto L100;
+ }
+ if (qmin >= emax * 4.) {
+/* Computing MIN */
+ d__1 = qmin, d__2 = z__[i4 - 3];
+ qmin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = emax, d__2 = z__[i4 - 5];
+ emax = max(d__1,d__2);
+ }
+/* Computing MAX */
+ d__1 = qmax, d__2 = z__[i4 - 7] + z__[i4 - 5];
+ qmax = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = emin, d__2 = z__[i4 - 5];
+ emin = min(d__1,d__2);
+/* L90: */
+ }
+ i4 = 4;
+
+L100:
+ i0 = i4 / 4;
+ pp = 0;
+
+ if (n0 - i0 > 1) {
+ dee = z__[(i0 << 2) - 3];
+ deemin = dee;
+ kmin = i0;
+ i__2 = (n0 << 2) - 3;
+ for (i4 = (i0 << 2) + 1; i4 <= i__2; i4 += 4) {
+ dee = z__[i4] * (dee / (dee + z__[i4 - 2]));
+ if (dee <= deemin) {
+ deemin = dee;
+ kmin = (i4 + 3) / 4;
+ }
+/* L110: */
+ }
+ if (kmin - i0 << 1 < n0 - kmin && deemin <= z__[(n0 << 2) - 3] *
+ .5) {
+ ipn4 = i0 + n0 << 2;
+ pp = 2;
+ i__2 = i0 + n0 - 1 << 1;
+ for (i4 = i0 << 2; i4 <= i__2; i4 += 4) {
+ temp = z__[i4 - 3];
+ z__[i4 - 3] = z__[ipn4 - i4 - 3];
+ z__[ipn4 - i4 - 3] = temp;
+ temp = z__[i4 - 2];
+ z__[i4 - 2] = z__[ipn4 - i4 - 2];
+ z__[ipn4 - i4 - 2] = temp;
+ temp = z__[i4 - 1];
+ z__[i4 - 1] = z__[ipn4 - i4 - 5];
+ z__[ipn4 - i4 - 5] = temp;
+ temp = z__[i4];
+ z__[i4] = z__[ipn4 - i4 - 4];
+ z__[ipn4 - i4 - 4] = temp;
+/* L120: */
+ }
+ }
+ }
+
+/* Put -(initial shift) into DMIN. */
+
+/* Computing MAX */
+ d__1 = 0., d__2 = qmin - sqrt(qmin) * 2. * sqrt(emax);
+ dmin__ = -max(d__1,d__2);
+
+/* Now I0:N0 is unreduced. */
+/* PP = 0 for ping, PP = 1 for pong. */
+/* PP = 2 indicates that flipping was applied to the Z array and */
+/* and that the tests for deflation upon entry in DLASQ3 */
+/* should not be performed. */
+
+ nbig = (n0 - i0 + 1) * 30;
+ i__2 = nbig;
+ for (iwhilb = 1; iwhilb <= i__2; ++iwhilb) {
+ if (i0 > n0) {
+ goto L150;
+ }
+
+/* While submatrix unfinished take a good dqds step. */
+
+ dlasq3_(&i0, &n0, &z__[1], &pp, &dmin__, &sigma, &desig, &qmax, &
+ nfail, &iter, &ndiv, &ieee, &ttype, &dmin1, &dmin2, &dn, &
+ dn1, &dn2, &g, &tau);
+
+ pp = 1 - pp;
+
+/* When EMIN is very small check for splits. */
+
+ if (pp == 0 && n0 - i0 >= 3) {
+ if (z__[n0 * 4] <= tol2 * qmax || z__[(n0 << 2) - 1] <= tol2 *
+ sigma) {
+ splt = i0 - 1;
+ qmax = z__[(i0 << 2) - 3];
+ emin = z__[(i0 << 2) - 1];
+ oldemn = z__[i0 * 4];
+ i__3 = n0 - 3 << 2;
+ for (i4 = i0 << 2; i4 <= i__3; i4 += 4) {
+ if (z__[i4] <= tol2 * z__[i4 - 3] || z__[i4 - 1] <=
+ tol2 * sigma) {
+ z__[i4 - 1] = -sigma;
+ splt = i4 / 4;
+ qmax = 0.;
+ emin = z__[i4 + 3];
+ oldemn = z__[i4 + 4];
+ } else {
+/* Computing MAX */
+ d__1 = qmax, d__2 = z__[i4 + 1];
+ qmax = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = emin, d__2 = z__[i4 - 1];
+ emin = min(d__1,d__2);
+/* Computing MIN */
+ d__1 = oldemn, d__2 = z__[i4];
+ oldemn = min(d__1,d__2);
+ }
+/* L130: */
+ }
+ z__[(n0 << 2) - 1] = emin;
+ z__[n0 * 4] = oldemn;
+ i0 = splt + 1;
+ }
+ }
+
+/* L140: */
+ }
+
+ *info = 2;
+ return 0;
+
+/* end IWHILB */
+
+L150:
+
+/* L160: */
+ ;
+ }
+
+ *info = 3;
+ return 0;
+
+/* end IWHILA */
+
+L170:
+
+/* Move q's to the front. */
+
+ i__1 = *n;
+ for (k = 2; k <= i__1; ++k) {
+ z__[k] = z__[(k << 2) - 3];
+/* L180: */
+ }
+
+/* Sort and compute sum of eigenvalues. */
+
+ dlasrt_("D", n, &z__[1], &iinfo);
+
+ e = 0.;
+ for (k = *n; k >= 1; --k) {
+ e += z__[k];
+/* L190: */
+ }
+
+/* Store trace, sum(eigenvalues) and information on performance. */
+
+ z__[(*n << 1) + 1] = trace;
+ z__[(*n << 1) + 2] = e;
+ z__[(*n << 1) + 3] = (doublereal) iter;
+/* Computing 2nd power */
+ i__1 = *n;
+ z__[(*n << 1) + 4] = (doublereal) ndiv / (doublereal) (i__1 * i__1);
+ z__[(*n << 1) + 5] = nfail * 100. / (doublereal) iter;
+ return 0;
+
+/* End of DLASQ2 */
+
+} /* dlasq2_ */
diff --git a/contrib/libs/clapack/dlasq3.c b/contrib/libs/clapack/dlasq3.c
new file mode 100644
index 0000000000..0e59c94591
--- /dev/null
+++ b/contrib/libs/clapack/dlasq3.c
@@ -0,0 +1,350 @@
+/* dlasq3.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlasq3_(integer *i0, integer *n0, doublereal *z__,
+ integer *pp, doublereal *dmin__, doublereal *sigma, doublereal *desig,
+ doublereal *qmax, integer *nfail, integer *iter, integer *ndiv,
+ logical *ieee, integer *ttype, doublereal *dmin1, doublereal *dmin2,
+ doublereal *dn, doublereal *dn1, doublereal *dn2, doublereal *g,
+ doublereal *tau)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal s, t;
+ integer j4, nn;
+ doublereal eps, tol;
+ integer n0in, ipn4;
+ doublereal tol2, temp;
+ extern /* Subroutine */ int dlasq4_(integer *, integer *, doublereal *,
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *), dlasq5_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, logical *), dlasq6_(
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *);
+ extern doublereal dlamch_(char *);
+ extern logical disnan_(doublereal *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Osni Marques of the Lawrence Berkeley National -- */
+/* -- Laboratory and Beresford Parlett of the Univ. of California at -- */
+/* -- Berkeley -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLASQ3 checks for deflation, computes a shift (TAU) and calls dqds. */
+/* In case of failure it changes shifts, and tries again until output */
+/* is positive. */
+
+/* Arguments */
+/* ========= */
+
+/* I0 (input) INTEGER */
+/* First index. */
+
+/* N0 (input) INTEGER */
+/* Last index. */
+
+/* Z (input) DOUBLE PRECISION array, dimension ( 4*N ) */
+/* Z holds the qd array. */
+
+/* PP (input/output) INTEGER */
+/* PP=0 for ping, PP=1 for pong. */
+/* PP=2 indicates that flipping was applied to the Z array */
+/* and that the initial tests for deflation should not be */
+/* performed. */
+
+/* DMIN (output) DOUBLE PRECISION */
+/* Minimum value of d. */
+
+/* SIGMA (output) DOUBLE PRECISION */
+/* Sum of shifts used in current segment. */
+
+/* DESIG (input/output) DOUBLE PRECISION */
+/* Lower order part of SIGMA */
+
+/* QMAX (input) DOUBLE PRECISION */
+/* Maximum value of q. */
+
+/* NFAIL (output) INTEGER */
+/* Number of times shift was too big. */
+
+/* ITER (output) INTEGER */
+/* Number of iterations. */
+
+/* NDIV (output) INTEGER */
+/* Number of divisions. */
+
+/* IEEE (input) LOGICAL */
+/* Flag for IEEE or non IEEE arithmetic (passed to DLASQ5). */
+
+/* TTYPE (input/output) INTEGER */
+/* Shift type. */
+
+/* DMIN1, DMIN2, DN, DN1, DN2, G, TAU (input/output) DOUBLE PRECISION */
+/* These are passed as arguments in order to save their values */
+/* between calls to DLASQ3. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Function .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --z__;
+
+ /* Function Body */
+ n0in = *n0;
+ eps = dlamch_("Precision");
+ tol = eps * 100.;
+/* Computing 2nd power */
+ d__1 = tol;
+ tol2 = d__1 * d__1;
+
+/* Check for deflation. */
+
+L10:
+
+ if (*n0 < *i0) {
+ return 0;
+ }
+ if (*n0 == *i0) {
+ goto L20;
+ }
+ nn = (*n0 << 2) + *pp;
+ if (*n0 == *i0 + 1) {
+ goto L40;
+ }
+
+/* Check whether E(N0-1) is negligible, 1 eigenvalue. */
+
+ if (z__[nn - 5] > tol2 * (*sigma + z__[nn - 3]) && z__[nn - (*pp << 1) -
+ 4] > tol2 * z__[nn - 7]) {
+ goto L30;
+ }
+
+L20:
+
+ z__[(*n0 << 2) - 3] = z__[(*n0 << 2) + *pp - 3] + *sigma;
+ --(*n0);
+ goto L10;
+
+/* Check whether E(N0-2) is negligible, 2 eigenvalues. */
+
+L30:
+
+ if (z__[nn - 9] > tol2 * *sigma && z__[nn - (*pp << 1) - 8] > tol2 * z__[
+ nn - 11]) {
+ goto L50;
+ }
+
+L40:
+
+ if (z__[nn - 3] > z__[nn - 7]) {
+ s = z__[nn - 3];
+ z__[nn - 3] = z__[nn - 7];
+ z__[nn - 7] = s;
+ }
+ if (z__[nn - 5] > z__[nn - 3] * tol2) {
+ t = (z__[nn - 7] - z__[nn - 3] + z__[nn - 5]) * .5;
+ s = z__[nn - 3] * (z__[nn - 5] / t);
+ if (s <= t) {
+ s = z__[nn - 3] * (z__[nn - 5] / (t * (sqrt(s / t + 1.) + 1.)));
+ } else {
+ s = z__[nn - 3] * (z__[nn - 5] / (t + sqrt(t) * sqrt(t + s)));
+ }
+ t = z__[nn - 7] + (s + z__[nn - 5]);
+ z__[nn - 3] *= z__[nn - 7] / t;
+ z__[nn - 7] = t;
+ }
+ z__[(*n0 << 2) - 7] = z__[nn - 7] + *sigma;
+ z__[(*n0 << 2) - 3] = z__[nn - 3] + *sigma;
+ *n0 += -2;
+ goto L10;
+
+L50:
+ if (*pp == 2) {
+ *pp = 0;
+ }
+
+/* Reverse the qd-array, if warranted. */
+
+ if (*dmin__ <= 0. || *n0 < n0in) {
+ if (z__[(*i0 << 2) + *pp - 3] * 1.5 < z__[(*n0 << 2) + *pp - 3]) {
+ ipn4 = *i0 + *n0 << 2;
+ i__1 = *i0 + *n0 - 1 << 1;
+ for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) {
+ temp = z__[j4 - 3];
+ z__[j4 - 3] = z__[ipn4 - j4 - 3];
+ z__[ipn4 - j4 - 3] = temp;
+ temp = z__[j4 - 2];
+ z__[j4 - 2] = z__[ipn4 - j4 - 2];
+ z__[ipn4 - j4 - 2] = temp;
+ temp = z__[j4 - 1];
+ z__[j4 - 1] = z__[ipn4 - j4 - 5];
+ z__[ipn4 - j4 - 5] = temp;
+ temp = z__[j4];
+ z__[j4] = z__[ipn4 - j4 - 4];
+ z__[ipn4 - j4 - 4] = temp;
+/* L60: */
+ }
+ if (*n0 - *i0 <= 4) {
+ z__[(*n0 << 2) + *pp - 1] = z__[(*i0 << 2) + *pp - 1];
+ z__[(*n0 << 2) - *pp] = z__[(*i0 << 2) - *pp];
+ }
+/* Computing MIN */
+ d__1 = *dmin2, d__2 = z__[(*n0 << 2) + *pp - 1];
+ *dmin2 = min(d__1,d__2);
+/* Computing MIN */
+ d__1 = z__[(*n0 << 2) + *pp - 1], d__2 = z__[(*i0 << 2) + *pp - 1]
+ , d__1 = min(d__1,d__2), d__2 = z__[(*i0 << 2) + *pp + 3];
+ z__[(*n0 << 2) + *pp - 1] = min(d__1,d__2);
+/* Computing MIN */
+ d__1 = z__[(*n0 << 2) - *pp], d__2 = z__[(*i0 << 2) - *pp], d__1 =
+ min(d__1,d__2), d__2 = z__[(*i0 << 2) - *pp + 4];
+ z__[(*n0 << 2) - *pp] = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = *qmax, d__2 = z__[(*i0 << 2) + *pp - 3], d__1 = max(d__1,
+ d__2), d__2 = z__[(*i0 << 2) + *pp + 1];
+ *qmax = max(d__1,d__2);
+ *dmin__ = -0.;
+ }
+ }
+
+/* Choose a shift. */
+
+ dlasq4_(i0, n0, &z__[1], pp, &n0in, dmin__, dmin1, dmin2, dn, dn1, dn2,
+ tau, ttype, g);
+
+/* Call dqds until DMIN > 0. */
+
+L70:
+
+ dlasq5_(i0, n0, &z__[1], pp, tau, dmin__, dmin1, dmin2, dn, dn1, dn2,
+ ieee);
+
+ *ndiv += *n0 - *i0 + 2;
+ ++(*iter);
+
+/* Check status. */
+
+ if (*dmin__ >= 0. && *dmin1 > 0.) {
+
+/* Success. */
+
+ goto L90;
+
+ } else if (*dmin__ < 0. && *dmin1 > 0. && z__[(*n0 - 1 << 2) - *pp] < tol
+ * (*sigma + *dn1) && abs(*dn) < tol * *sigma) {
+
+/* Convergence hidden by negative DN. */
+
+ z__[(*n0 - 1 << 2) - *pp + 2] = 0.;
+ *dmin__ = 0.;
+ goto L90;
+ } else if (*dmin__ < 0.) {
+
+/* TAU too big. Select new TAU and try again. */
+
+ ++(*nfail);
+ if (*ttype < -22) {
+
+/* Failed twice. Play it safe. */
+
+ *tau = 0.;
+ } else if (*dmin1 > 0.) {
+
+/* Late failure. Gives excellent shift. */
+
+ *tau = (*tau + *dmin__) * (1. - eps * 2.);
+ *ttype += -11;
+ } else {
+
+/* Early failure. Divide by 4. */
+
+ *tau *= .25;
+ *ttype += -12;
+ }
+ goto L70;
+ } else if (disnan_(dmin__)) {
+
+/* NaN. */
+
+ if (*tau == 0.) {
+ goto L80;
+ } else {
+ *tau = 0.;
+ goto L70;
+ }
+ } else {
+
+/* Possible underflow. Play it safe. */
+
+ goto L80;
+ }
+
+/* Risk of underflow. */
+
+L80:
+ dlasq6_(i0, n0, &z__[1], pp, dmin__, dmin1, dmin2, dn, dn1, dn2);
+ *ndiv += *n0 - *i0 + 2;
+ ++(*iter);
+ *tau = 0.;
+
+L90:
+ if (*tau < *sigma) {
+ *desig += *tau;
+ t = *sigma + *desig;
+ *desig -= t - *sigma;
+ } else {
+ t = *sigma + *tau;
+ *desig = *sigma - (t - *tau) + *desig;
+ }
+ *sigma = t;
+
+ return 0;
+
+/* End of DLASQ3 */
+
+} /* dlasq3_ */
diff --git a/contrib/libs/clapack/dlasq4.c b/contrib/libs/clapack/dlasq4.c
new file mode 100644
index 0000000000..333a1d2222
--- /dev/null
+++ b/contrib/libs/clapack/dlasq4.c
@@ -0,0 +1,403 @@
+/* dlasq4.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlasq4_(integer *i0, integer *n0, doublereal *z__,
+ integer *pp, integer *n0in, doublereal *dmin__, doublereal *dmin1,
+ doublereal *dmin2, doublereal *dn, doublereal *dn1, doublereal *dn2,
+ doublereal *tau, integer *ttype, doublereal *g)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal s, a2, b1, b2;
+ integer i4, nn, np;
+ doublereal gam, gap1, gap2;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Osni Marques of the Lawrence Berkeley National -- */
+/* -- Laboratory and Beresford Parlett of the Univ. of California at -- */
+/* -- Berkeley -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLASQ4 computes an approximation TAU to the smallest eigenvalue */
+/* using values of d from the previous transform. */
+
+/* I0 (input) INTEGER */
+/* First index. */
+
+/* N0 (input) INTEGER */
+/* Last index. */
+
+/* Z (input) DOUBLE PRECISION array, dimension ( 4*N ) */
+/* Z holds the qd array. */
+
+/* PP (input) INTEGER */
+/* PP=0 for ping, PP=1 for pong. */
+
+/* NOIN (input) INTEGER */
+/* The value of N0 at start of EIGTEST. */
+
+/* DMIN (input) DOUBLE PRECISION */
+/* Minimum value of d. */
+
+/* DMIN1 (input) DOUBLE PRECISION */
+/* Minimum value of d, excluding D( N0 ). */
+
+/* DMIN2 (input) DOUBLE PRECISION */
+/* Minimum value of d, excluding D( N0 ) and D( N0-1 ). */
+
+/* DN (input) DOUBLE PRECISION */
+/* d(N) */
+
+/* DN1 (input) DOUBLE PRECISION */
+/* d(N-1) */
+
+/* DN2 (input) DOUBLE PRECISION */
+/* d(N-2) */
+
+/* TAU (output) DOUBLE PRECISION */
+/* This is the shift. */
+
+/* TTYPE (output) INTEGER */
+/* Shift type. */
+
+/* G (input/output) REAL */
+/* G is passed as an argument in order to save its value between */
+/* calls to DLASQ4. */
+
+/* Further Details */
+/* =============== */
+/* CNST1 = 9/16 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* A negative DMIN forces the shift to take that absolute value */
+/* TTYPE records the type of shift. */
+
+ /* Parameter adjustments */
+ --z__;
+
+ /* Function Body */
+ if (*dmin__ <= 0.) {
+ *tau = -(*dmin__);
+ *ttype = -1;
+ return 0;
+ }
+
+ nn = (*n0 << 2) + *pp;
+ if (*n0in == *n0) {
+
+/* No eigenvalues deflated. */
+
+ if (*dmin__ == *dn || *dmin__ == *dn1) {
+
+ b1 = sqrt(z__[nn - 3]) * sqrt(z__[nn - 5]);
+ b2 = sqrt(z__[nn - 7]) * sqrt(z__[nn - 9]);
+ a2 = z__[nn - 7] + z__[nn - 5];
+
+/* Cases 2 and 3. */
+
+ if (*dmin__ == *dn && *dmin1 == *dn1) {
+ gap2 = *dmin2 - a2 - *dmin2 * .25;
+ if (gap2 > 0. && gap2 > b2) {
+ gap1 = a2 - *dn - b2 / gap2 * b2;
+ } else {
+ gap1 = a2 - *dn - (b1 + b2);
+ }
+ if (gap1 > 0. && gap1 > b1) {
+/* Computing MAX */
+ d__1 = *dn - b1 / gap1 * b1, d__2 = *dmin__ * .5;
+ s = max(d__1,d__2);
+ *ttype = -2;
+ } else {
+ s = 0.;
+ if (*dn > b1) {
+ s = *dn - b1;
+ }
+ if (a2 > b1 + b2) {
+/* Computing MIN */
+ d__1 = s, d__2 = a2 - (b1 + b2);
+ s = min(d__1,d__2);
+ }
+/* Computing MAX */
+ d__1 = s, d__2 = *dmin__ * .333;
+ s = max(d__1,d__2);
+ *ttype = -3;
+ }
+ } else {
+
+/* Case 4. */
+
+ *ttype = -4;
+ s = *dmin__ * .25;
+ if (*dmin__ == *dn) {
+ gam = *dn;
+ a2 = 0.;
+ if (z__[nn - 5] > z__[nn - 7]) {
+ return 0;
+ }
+ b2 = z__[nn - 5] / z__[nn - 7];
+ np = nn - 9;
+ } else {
+ np = nn - (*pp << 1);
+ b2 = z__[np - 2];
+ gam = *dn1;
+ if (z__[np - 4] > z__[np - 2]) {
+ return 0;
+ }
+ a2 = z__[np - 4] / z__[np - 2];
+ if (z__[nn - 9] > z__[nn - 11]) {
+ return 0;
+ }
+ b2 = z__[nn - 9] / z__[nn - 11];
+ np = nn - 13;
+ }
+
+/* Approximate contribution to norm squared from I < NN-1. */
+
+ a2 += b2;
+ i__1 = (*i0 << 2) - 1 + *pp;
+ for (i4 = np; i4 >= i__1; i4 += -4) {
+ if (b2 == 0.) {
+ goto L20;
+ }
+ b1 = b2;
+ if (z__[i4] > z__[i4 - 2]) {
+ return 0;
+ }
+ b2 *= z__[i4] / z__[i4 - 2];
+ a2 += b2;
+ if (max(b2,b1) * 100. < a2 || .563 < a2) {
+ goto L20;
+ }
+/* L10: */
+ }
+L20:
+ a2 *= 1.05;
+
+/* Rayleigh quotient residual bound. */
+
+ if (a2 < .563) {
+ s = gam * (1. - sqrt(a2)) / (a2 + 1.);
+ }
+ }
+ } else if (*dmin__ == *dn2) {
+
+/* Case 5. */
+
+ *ttype = -5;
+ s = *dmin__ * .25;
+
+/* Compute contribution to norm squared from I > NN-2. */
+
+ np = nn - (*pp << 1);
+ b1 = z__[np - 2];
+ b2 = z__[np - 6];
+ gam = *dn2;
+ if (z__[np - 8] > b2 || z__[np - 4] > b1) {
+ return 0;
+ }
+ a2 = z__[np - 8] / b2 * (z__[np - 4] / b1 + 1.);
+
+/* Approximate contribution to norm squared from I < NN-2. */
+
+ if (*n0 - *i0 > 2) {
+ b2 = z__[nn - 13] / z__[nn - 15];
+ a2 += b2;
+ i__1 = (*i0 << 2) - 1 + *pp;
+ for (i4 = nn - 17; i4 >= i__1; i4 += -4) {
+ if (b2 == 0.) {
+ goto L40;
+ }
+ b1 = b2;
+ if (z__[i4] > z__[i4 - 2]) {
+ return 0;
+ }
+ b2 *= z__[i4] / z__[i4 - 2];
+ a2 += b2;
+ if (max(b2,b1) * 100. < a2 || .563 < a2) {
+ goto L40;
+ }
+/* L30: */
+ }
+L40:
+ a2 *= 1.05;
+ }
+
+ if (a2 < .563) {
+ s = gam * (1. - sqrt(a2)) / (a2 + 1.);
+ }
+ } else {
+
+/* Case 6, no information to guide us. */
+
+ if (*ttype == -6) {
+ *g += (1. - *g) * .333;
+ } else if (*ttype == -18) {
+ *g = .083250000000000005;
+ } else {
+ *g = .25;
+ }
+ s = *g * *dmin__;
+ *ttype = -6;
+ }
+
+ } else if (*n0in == *n0 + 1) {
+
+/* One eigenvalue just deflated. Use DMIN1, DN1 for DMIN and DN. */
+
+ if (*dmin1 == *dn1 && *dmin2 == *dn2) {
+
+/* Cases 7 and 8. */
+
+ *ttype = -7;
+ s = *dmin1 * .333;
+ if (z__[nn - 5] > z__[nn - 7]) {
+ return 0;
+ }
+ b1 = z__[nn - 5] / z__[nn - 7];
+ b2 = b1;
+ if (b2 == 0.) {
+ goto L60;
+ }
+ i__1 = (*i0 << 2) - 1 + *pp;
+ for (i4 = (*n0 << 2) - 9 + *pp; i4 >= i__1; i4 += -4) {
+ a2 = b1;
+ if (z__[i4] > z__[i4 - 2]) {
+ return 0;
+ }
+ b1 *= z__[i4] / z__[i4 - 2];
+ b2 += b1;
+ if (max(b1,a2) * 100. < b2) {
+ goto L60;
+ }
+/* L50: */
+ }
+L60:
+ b2 = sqrt(b2 * 1.05);
+/* Computing 2nd power */
+ d__1 = b2;
+ a2 = *dmin1 / (d__1 * d__1 + 1.);
+ gap2 = *dmin2 * .5 - a2;
+ if (gap2 > 0. && gap2 > b2 * a2) {
+/* Computing MAX */
+ d__1 = s, d__2 = a2 * (1. - a2 * 1.01 * (b2 / gap2) * b2);
+ s = max(d__1,d__2);
+ } else {
+/* Computing MAX */
+ d__1 = s, d__2 = a2 * (1. - b2 * 1.01);
+ s = max(d__1,d__2);
+ *ttype = -8;
+ }
+ } else {
+
+/* Case 9. */
+
+ s = *dmin1 * .25;
+ if (*dmin1 == *dn1) {
+ s = *dmin1 * .5;
+ }
+ *ttype = -9;
+ }
+
+ } else if (*n0in == *n0 + 2) {
+
+/* Two eigenvalues deflated. Use DMIN2, DN2 for DMIN and DN. */
+
+/* Cases 10 and 11. */
+
+ if (*dmin2 == *dn2 && z__[nn - 5] * 2. < z__[nn - 7]) {
+ *ttype = -10;
+ s = *dmin2 * .333;
+ if (z__[nn - 5] > z__[nn - 7]) {
+ return 0;
+ }
+ b1 = z__[nn - 5] / z__[nn - 7];
+ b2 = b1;
+ if (b2 == 0.) {
+ goto L80;
+ }
+ i__1 = (*i0 << 2) - 1 + *pp;
+ for (i4 = (*n0 << 2) - 9 + *pp; i4 >= i__1; i4 += -4) {
+ if (z__[i4] > z__[i4 - 2]) {
+ return 0;
+ }
+ b1 *= z__[i4] / z__[i4 - 2];
+ b2 += b1;
+ if (b1 * 100. < b2) {
+ goto L80;
+ }
+/* L70: */
+ }
+L80:
+ b2 = sqrt(b2 * 1.05);
+/* Computing 2nd power */
+ d__1 = b2;
+ a2 = *dmin2 / (d__1 * d__1 + 1.);
+ gap2 = z__[nn - 7] + z__[nn - 9] - sqrt(z__[nn - 11]) * sqrt(z__[
+ nn - 9]) - a2;
+ if (gap2 > 0. && gap2 > b2 * a2) {
+/* Computing MAX */
+ d__1 = s, d__2 = a2 * (1. - a2 * 1.01 * (b2 / gap2) * b2);
+ s = max(d__1,d__2);
+ } else {
+/* Computing MAX */
+ d__1 = s, d__2 = a2 * (1. - b2 * 1.01);
+ s = max(d__1,d__2);
+ }
+ } else {
+ s = *dmin2 * .25;
+ *ttype = -11;
+ }
+ } else if (*n0in > *n0 + 2) {
+
+/* Case 12, more than two eigenvalues deflated. No information. */
+
+ s = 0.;
+ *ttype = -12;
+ }
+
+ *tau = s;
+ return 0;
+
+/* End of DLASQ4 */
+
+} /* dlasq4_ */
diff --git a/contrib/libs/clapack/dlasq5.c b/contrib/libs/clapack/dlasq5.c
new file mode 100644
index 0000000000..df306bdc86
--- /dev/null
+++ b/contrib/libs/clapack/dlasq5.c
@@ -0,0 +1,240 @@
+/* dlasq5.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlasq5_(integer *i0, integer *n0, doublereal *z__,
+ integer *pp, doublereal *tau, doublereal *dmin__, doublereal *dmin1,
+ doublereal *dmin2, doublereal *dn, doublereal *dnm1, doublereal *dnm2,
+ logical *ieee)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ doublereal d__;
+ integer j4, j4p2;
+ doublereal emin, temp;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Osni Marques of the Lawrence Berkeley National -- */
+/* -- Laboratory and Beresford Parlett of the Univ. of California at -- */
+/* -- Berkeley -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLASQ5 computes one dqds transform in ping-pong form, one */
+/* version for IEEE machines another for non IEEE machines. */
+
+/* Arguments */
+/* ========= */
+
+/* I0 (input) INTEGER */
+/* First index. */
+
+/* N0 (input) INTEGER */
+/* Last index. */
+
+/* Z (input) DOUBLE PRECISION array, dimension ( 4*N ) */
+/* Z holds the qd array. EMIN is stored in Z(4*N0) to avoid */
+/* an extra argument. */
+
+/* PP (input) INTEGER */
+/* PP=0 for ping, PP=1 for pong. */
+
+/* TAU (input) DOUBLE PRECISION */
+/* This is the shift. */
+
+/* DMIN (output) DOUBLE PRECISION */
+/* Minimum value of d. */
+
+/* DMIN1 (output) DOUBLE PRECISION */
+/* Minimum value of d, excluding D( N0 ). */
+
+/* DMIN2 (output) DOUBLE PRECISION */
+/* Minimum value of d, excluding D( N0 ) and D( N0-1 ). */
+
+/* DN (output) DOUBLE PRECISION */
+/* d(N0), the last value of d. */
+
+/* DNM1 (output) DOUBLE PRECISION */
+/* d(N0-1). */
+
+/* DNM2 (output) DOUBLE PRECISION */
+/* d(N0-2). */
+
+/* IEEE (input) LOGICAL */
+/* Flag for IEEE or non IEEE arithmetic. */
+
+/* ===================================================================== */
+
+/* .. Parameter .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --z__;
+
+ /* Function Body */
+ if (*n0 - *i0 - 1 <= 0) {
+ return 0;
+ }
+
+ j4 = (*i0 << 2) + *pp - 3;
+ emin = z__[j4 + 4];
+ d__ = z__[j4] - *tau;
+ *dmin__ = d__;
+ *dmin1 = -z__[j4];
+
+ if (*ieee) {
+
+/* Code for IEEE arithmetic. */
+
+ if (*pp == 0) {
+ i__1 = *n0 - 3 << 2;
+ for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) {
+ z__[j4 - 2] = d__ + z__[j4 - 1];
+ temp = z__[j4 + 1] / z__[j4 - 2];
+ d__ = d__ * temp - *tau;
+ *dmin__ = min(*dmin__,d__);
+ z__[j4] = z__[j4 - 1] * temp;
+/* Computing MIN */
+ d__1 = z__[j4];
+ emin = min(d__1,emin);
+/* L10: */
+ }
+ } else {
+ i__1 = *n0 - 3 << 2;
+ for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) {
+ z__[j4 - 3] = d__ + z__[j4];
+ temp = z__[j4 + 2] / z__[j4 - 3];
+ d__ = d__ * temp - *tau;
+ *dmin__ = min(*dmin__,d__);
+ z__[j4 - 1] = z__[j4] * temp;
+/* Computing MIN */
+ d__1 = z__[j4 - 1];
+ emin = min(d__1,emin);
+/* L20: */
+ }
+ }
+
+/* Unroll last two steps. */
+
+ *dnm2 = d__;
+ *dmin2 = *dmin__;
+ j4 = (*n0 - 2 << 2) - *pp;
+ j4p2 = j4 + (*pp << 1) - 1;
+ z__[j4 - 2] = *dnm2 + z__[j4p2];
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]) - *tau;
+ *dmin__ = min(*dmin__,*dnm1);
+
+ *dmin1 = *dmin__;
+ j4 += 4;
+ j4p2 = j4 + (*pp << 1) - 1;
+ z__[j4 - 2] = *dnm1 + z__[j4p2];
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]) - *tau;
+ *dmin__ = min(*dmin__,*dn);
+
+ } else {
+
+/* Code for non IEEE arithmetic. */
+
+ if (*pp == 0) {
+ i__1 = *n0 - 3 << 2;
+ for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) {
+ z__[j4 - 2] = d__ + z__[j4 - 1];
+ if (d__ < 0.) {
+ return 0;
+ } else {
+ z__[j4] = z__[j4 + 1] * (z__[j4 - 1] / z__[j4 - 2]);
+ d__ = z__[j4 + 1] * (d__ / z__[j4 - 2]) - *tau;
+ }
+ *dmin__ = min(*dmin__,d__);
+/* Computing MIN */
+ d__1 = emin, d__2 = z__[j4];
+ emin = min(d__1,d__2);
+/* L30: */
+ }
+ } else {
+ i__1 = *n0 - 3 << 2;
+ for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) {
+ z__[j4 - 3] = d__ + z__[j4];
+ if (d__ < 0.) {
+ return 0;
+ } else {
+ z__[j4 - 1] = z__[j4 + 2] * (z__[j4] / z__[j4 - 3]);
+ d__ = z__[j4 + 2] * (d__ / z__[j4 - 3]) - *tau;
+ }
+ *dmin__ = min(*dmin__,d__);
+/* Computing MIN */
+ d__1 = emin, d__2 = z__[j4 - 1];
+ emin = min(d__1,d__2);
+/* L40: */
+ }
+ }
+
+/* Unroll last two steps. */
+
+ *dnm2 = d__;
+ *dmin2 = *dmin__;
+ j4 = (*n0 - 2 << 2) - *pp;
+ j4p2 = j4 + (*pp << 1) - 1;
+ z__[j4 - 2] = *dnm2 + z__[j4p2];
+ if (*dnm2 < 0.) {
+ return 0;
+ } else {
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]) - *tau;
+ }
+ *dmin__ = min(*dmin__,*dnm1);
+
+ *dmin1 = *dmin__;
+ j4 += 4;
+ j4p2 = j4 + (*pp << 1) - 1;
+ z__[j4 - 2] = *dnm1 + z__[j4p2];
+ if (*dnm1 < 0.) {
+ return 0;
+ } else {
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]) - *tau;
+ }
+ *dmin__ = min(*dmin__,*dn);
+
+ }
+
+ z__[j4 + 2] = *dn;
+ z__[(*n0 << 2) - *pp] = emin;
+ return 0;
+
+/* End of DLASQ5 */
+
+} /* dlasq5_ */
diff --git a/contrib/libs/clapack/dlasq6.c b/contrib/libs/clapack/dlasq6.c
new file mode 100644
index 0000000000..22d809fc68
--- /dev/null
+++ b/contrib/libs/clapack/dlasq6.c
@@ -0,0 +1,212 @@
+/* dlasq6.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlasq6_(integer *i0, integer *n0, doublereal *z__,
+ integer *pp, doublereal *dmin__, doublereal *dmin1, doublereal *dmin2,
+ doublereal *dn, doublereal *dnm1, doublereal *dnm2)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ doublereal d__;
+ integer j4, j4p2;
+ doublereal emin, temp;
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Osni Marques of the Lawrence Berkeley National -- */
+/* -- Laboratory and Beresford Parlett of the Univ. of California at -- */
+/* -- Berkeley -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLASQ6 computes one dqd (shift equal to zero) transform in */
+/* ping-pong form, with protection against underflow and overflow. */
+
+/* Arguments */
+/* ========= */
+
+/* I0 (input) INTEGER */
+/* First index. */
+
+/* N0 (input) INTEGER */
+/* Last index. */
+
+/* Z (input) DOUBLE PRECISION array, dimension ( 4*N ) */
+/* Z holds the qd array. EMIN is stored in Z(4*N0) to avoid */
+/* an extra argument. */
+
+/* PP (input) INTEGER */
+/* PP=0 for ping, PP=1 for pong. */
+
+/* DMIN (output) DOUBLE PRECISION */
+/* Minimum value of d. */
+
+/* DMIN1 (output) DOUBLE PRECISION */
+/* Minimum value of d, excluding D( N0 ). */
+
+/* DMIN2 (output) DOUBLE PRECISION */
+/* Minimum value of d, excluding D( N0 ) and D( N0-1 ). */
+
+/* DN (output) DOUBLE PRECISION */
+/* d(N0), the last value of d. */
+
+/* DNM1 (output) DOUBLE PRECISION */
+/* d(N0-1). */
+
+/* DNM2 (output) DOUBLE PRECISION */
+/* d(N0-2). */
+
+/* ===================================================================== */
+
+/* .. Parameter .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Function .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --z__;
+
+ /* Function Body */
+ if (*n0 - *i0 - 1 <= 0) {
+ return 0;
+ }
+
+ safmin = dlamch_("Safe minimum");
+ j4 = (*i0 << 2) + *pp - 3;
+ emin = z__[j4 + 4];
+ d__ = z__[j4];
+ *dmin__ = d__;
+
+ if (*pp == 0) {
+ i__1 = *n0 - 3 << 2;
+ for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) {
+ z__[j4 - 2] = d__ + z__[j4 - 1];
+ if (z__[j4 - 2] == 0.) {
+ z__[j4] = 0.;
+ d__ = z__[j4 + 1];
+ *dmin__ = d__;
+ emin = 0.;
+ } else if (safmin * z__[j4 + 1] < z__[j4 - 2] && safmin * z__[j4
+ - 2] < z__[j4 + 1]) {
+ temp = z__[j4 + 1] / z__[j4 - 2];
+ z__[j4] = z__[j4 - 1] * temp;
+ d__ *= temp;
+ } else {
+ z__[j4] = z__[j4 + 1] * (z__[j4 - 1] / z__[j4 - 2]);
+ d__ = z__[j4 + 1] * (d__ / z__[j4 - 2]);
+ }
+ *dmin__ = min(*dmin__,d__);
+/* Computing MIN */
+ d__1 = emin, d__2 = z__[j4];
+ emin = min(d__1,d__2);
+/* L10: */
+ }
+ } else {
+ i__1 = *n0 - 3 << 2;
+ for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) {
+ z__[j4 - 3] = d__ + z__[j4];
+ if (z__[j4 - 3] == 0.) {
+ z__[j4 - 1] = 0.;
+ d__ = z__[j4 + 2];
+ *dmin__ = d__;
+ emin = 0.;
+ } else if (safmin * z__[j4 + 2] < z__[j4 - 3] && safmin * z__[j4
+ - 3] < z__[j4 + 2]) {
+ temp = z__[j4 + 2] / z__[j4 - 3];
+ z__[j4 - 1] = z__[j4] * temp;
+ d__ *= temp;
+ } else {
+ z__[j4 - 1] = z__[j4 + 2] * (z__[j4] / z__[j4 - 3]);
+ d__ = z__[j4 + 2] * (d__ / z__[j4 - 3]);
+ }
+ *dmin__ = min(*dmin__,d__);
+/* Computing MIN */
+ d__1 = emin, d__2 = z__[j4 - 1];
+ emin = min(d__1,d__2);
+/* L20: */
+ }
+ }
+
+/* Unroll last two steps. */
+
+ *dnm2 = d__;
+ *dmin2 = *dmin__;
+ j4 = (*n0 - 2 << 2) - *pp;
+ j4p2 = j4 + (*pp << 1) - 1;
+ z__[j4 - 2] = *dnm2 + z__[j4p2];
+ if (z__[j4 - 2] == 0.) {
+ z__[j4] = 0.;
+ *dnm1 = z__[j4p2 + 2];
+ *dmin__ = *dnm1;
+ emin = 0.;
+ } else if (safmin * z__[j4p2 + 2] < z__[j4 - 2] && safmin * z__[j4 - 2] <
+ z__[j4p2 + 2]) {
+ temp = z__[j4p2 + 2] / z__[j4 - 2];
+ z__[j4] = z__[j4p2] * temp;
+ *dnm1 = *dnm2 * temp;
+ } else {
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]);
+ }
+ *dmin__ = min(*dmin__,*dnm1);
+
+ *dmin1 = *dmin__;
+ j4 += 4;
+ j4p2 = j4 + (*pp << 1) - 1;
+ z__[j4 - 2] = *dnm1 + z__[j4p2];
+ if (z__[j4 - 2] == 0.) {
+ z__[j4] = 0.;
+ *dn = z__[j4p2 + 2];
+ *dmin__ = *dn;
+ emin = 0.;
+ } else if (safmin * z__[j4p2 + 2] < z__[j4 - 2] && safmin * z__[j4 - 2] <
+ z__[j4p2 + 2]) {
+ temp = z__[j4p2 + 2] / z__[j4 - 2];
+ z__[j4] = z__[j4p2] * temp;
+ *dn = *dnm1 * temp;
+ } else {
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]);
+ }
+ *dmin__ = min(*dmin__,*dn);
+
+ z__[j4 + 2] = *dn;
+ z__[(*n0 << 2) - *pp] = emin;
+ return 0;
+
+/* End of DLASQ6 */
+
+} /* dlasq6_ */
diff --git a/contrib/libs/clapack/dlasr.c b/contrib/libs/clapack/dlasr.c
new file mode 100644
index 0000000000..6abfa8140a
--- /dev/null
+++ b/contrib/libs/clapack/dlasr.c
@@ -0,0 +1,453 @@
+/* dlasr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlasr_(char *side, char *pivot, char *direct, integer *m,
+ integer *n, doublereal *c__, doublereal *s, doublereal *a, integer *
+ lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, info;
+ doublereal temp;
+ extern logical lsame_(char *, char *);
+ doublereal ctemp, stemp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLASR applies a sequence of plane rotations to a real matrix A, */
+/* from either the left or the right. */
+
+/* When SIDE = 'L', the transformation takes the form */
+
+/* A := P*A */
+
+/* and when SIDE = 'R', the transformation takes the form */
+
+/* A := A*P**T */
+
+/* where P is an orthogonal matrix consisting of a sequence of z plane */
+/* rotations, with z = M when SIDE = 'L' and z = N when SIDE = 'R', */
+/* and P**T is the transpose of P. */
+
+/* When DIRECT = 'F' (Forward sequence), then */
+
+/* P = P(z-1) * ... * P(2) * P(1) */
+
+/* and when DIRECT = 'B' (Backward sequence), then */
+
+/* P = P(1) * P(2) * ... * P(z-1) */
+
+/* where P(k) is a plane rotation matrix defined by the 2-by-2 rotation */
+
+/* R(k) = ( c(k) s(k) ) */
+/* = ( -s(k) c(k) ). */
+
+/* When PIVOT = 'V' (Variable pivot), the rotation is performed */
+/* for the plane (k,k+1), i.e., P(k) has the form */
+
+/* P(k) = ( 1 ) */
+/* ( ... ) */
+/* ( 1 ) */
+/* ( c(k) s(k) ) */
+/* ( -s(k) c(k) ) */
+/* ( 1 ) */
+/* ( ... ) */
+/* ( 1 ) */
+
+/* where R(k) appears as a rank-2 modification to the identity matrix in */
+/* rows and columns k and k+1. */
+
+/* When PIVOT = 'T' (Top pivot), the rotation is performed for the */
+/* plane (1,k+1), so P(k) has the form */
+
+/* P(k) = ( c(k) s(k) ) */
+/* ( 1 ) */
+/* ( ... ) */
+/* ( 1 ) */
+/* ( -s(k) c(k) ) */
+/* ( 1 ) */
+/* ( ... ) */
+/* ( 1 ) */
+
+/* where R(k) appears in rows and columns 1 and k+1. */
+
+/* Similarly, when PIVOT = 'B' (Bottom pivot), the rotation is */
+/* performed for the plane (k,z), giving P(k) the form */
+
+/* P(k) = ( 1 ) */
+/* ( ... ) */
+/* ( 1 ) */
+/* ( c(k) s(k) ) */
+/* ( 1 ) */
+/* ( ... ) */
+/* ( 1 ) */
+/* ( -s(k) c(k) ) */
+
+/* where R(k) appears in rows and columns k and z. The rotations are */
+/* performed without ever forming P(k) explicitly. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* Specifies whether the plane rotation matrix P is applied to */
+/* A on the left or the right. */
+/* = 'L': Left, compute A := P*A */
+/* = 'R': Right, compute A:= A*P**T */
+
+/* PIVOT (input) CHARACTER*1 */
+/* Specifies the plane for which P(k) is a plane rotation */
+/* matrix. */
+/* = 'V': Variable pivot, the plane (k,k+1) */
+/* = 'T': Top pivot, the plane (1,k+1) */
+/* = 'B': Bottom pivot, the plane (k,z) */
+
+/* DIRECT (input) CHARACTER*1 */
+/* Specifies whether P is a forward or backward sequence of */
+/* plane rotations. */
+/* = 'F': Forward, P = P(z-1)*...*P(2)*P(1) */
+/* = 'B': Backward, P = P(1)*P(2)*...*P(z-1) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. If m <= 1, an immediate */
+/* return is effected. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. If n <= 1, an */
+/* immediate return is effected. */
+
+/* C (input) DOUBLE PRECISION array, dimension */
+/* (M-1) if SIDE = 'L' */
+/* (N-1) if SIDE = 'R' */
+/* The cosines c(k) of the plane rotations. */
+
+/* S (input) DOUBLE PRECISION array, dimension */
+/* (M-1) if SIDE = 'L' */
+/* (N-1) if SIDE = 'R' */
+/* The sines s(k) of the plane rotations. The 2-by-2 plane */
+/* rotation part of the matrix P(k), R(k), has the form */
+/* R(k) = ( c(k) s(k) ) */
+/* ( -s(k) c(k) ). */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The M-by-N matrix A. On exit, A is overwritten by P*A if */
+/* SIDE = 'R' or by A*P**T if SIDE = 'L'. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ --c__;
+ --s;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! (lsame_(side, "L") || lsame_(side, "R"))) {
+ info = 1;
+ } else if (! (lsame_(pivot, "V") || lsame_(pivot,
+ "T") || lsame_(pivot, "B"))) {
+ info = 2;
+ } else if (! (lsame_(direct, "F") || lsame_(direct,
+ "B"))) {
+ info = 3;
+ } else if (*m < 0) {
+ info = 4;
+ } else if (*n < 0) {
+ info = 5;
+ } else if (*lda < max(1,*m)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("DLASR ", &info);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+ if (lsame_(side, "L")) {
+
+/* Form P * A */
+
+ if (lsame_(pivot, "V")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[j + 1 + i__ * a_dim1];
+ a[j + 1 + i__ * a_dim1] = ctemp * temp - stemp *
+ a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = stemp * temp + ctemp * a[j
+ + i__ * a_dim1];
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[j + 1 + i__ * a_dim1];
+ a[j + 1 + i__ * a_dim1] = ctemp * temp - stemp *
+ a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = stemp * temp + ctemp * a[j
+ + i__ * a_dim1];
+/* L30: */
+ }
+ }
+/* L40: */
+ }
+ }
+ } else if (lsame_(pivot, "T")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m;
+ for (j = 2; j <= i__1; ++j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if (ctemp != 1. || stemp != 0.) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = ctemp * temp - stemp * a[
+ i__ * a_dim1 + 1];
+ a[i__ * a_dim1 + 1] = stemp * temp + ctemp * a[
+ i__ * a_dim1 + 1];
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m; j >= 2; --j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if (ctemp != 1. || stemp != 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = ctemp * temp - stemp * a[
+ i__ * a_dim1 + 1];
+ a[i__ * a_dim1 + 1] = stemp * temp + ctemp * a[
+ i__ * a_dim1 + 1];
+/* L70: */
+ }
+ }
+/* L80: */
+ }
+ }
+ } else if (lsame_(pivot, "B")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = stemp * a[*m + i__ * a_dim1]
+ + ctemp * temp;
+ a[*m + i__ * a_dim1] = ctemp * a[*m + i__ *
+ a_dim1] - stemp * temp;
+/* L90: */
+ }
+ }
+/* L100: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = stemp * a[*m + i__ * a_dim1]
+ + ctemp * temp;
+ a[*m + i__ * a_dim1] = ctemp * a[*m + i__ *
+ a_dim1] - stemp * temp;
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+ }
+ }
+ } else if (lsame_(side, "R")) {
+
+/* Form A * P' */
+
+ if (lsame_(pivot, "V")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[i__ + (j + 1) * a_dim1];
+ a[i__ + (j + 1) * a_dim1] = ctemp * temp - stemp *
+ a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = stemp * temp + ctemp * a[
+ i__ + j * a_dim1];
+/* L130: */
+ }
+ }
+/* L140: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[i__ + (j + 1) * a_dim1];
+ a[i__ + (j + 1) * a_dim1] = ctemp * temp - stemp *
+ a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = stemp * temp + ctemp * a[
+ i__ + j * a_dim1];
+/* L150: */
+ }
+ }
+/* L160: */
+ }
+ }
+ } else if (lsame_(pivot, "T")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if (ctemp != 1. || stemp != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = ctemp * temp - stemp * a[
+ i__ + a_dim1];
+ a[i__ + a_dim1] = stemp * temp + ctemp * a[i__ +
+ a_dim1];
+/* L170: */
+ }
+ }
+/* L180: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n; j >= 2; --j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if (ctemp != 1. || stemp != 0.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = ctemp * temp - stemp * a[
+ i__ + a_dim1];
+ a[i__ + a_dim1] = stemp * temp + ctemp * a[i__ +
+ a_dim1];
+/* L190: */
+ }
+ }
+/* L200: */
+ }
+ }
+ } else if (lsame_(pivot, "B")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = stemp * a[i__ + *n * a_dim1]
+ + ctemp * temp;
+ a[i__ + *n * a_dim1] = ctemp * a[i__ + *n *
+ a_dim1] - stemp * temp;
+/* L210: */
+ }
+ }
+/* L220: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = stemp * a[i__ + *n * a_dim1]
+ + ctemp * temp;
+ a[i__ + *n * a_dim1] = ctemp * a[i__ + *n *
+ a_dim1] - stemp * temp;
+/* L230: */
+ }
+ }
+/* L240: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DLASR */
+
+} /* dlasr_ */
diff --git a/contrib/libs/clapack/dlasrt.c b/contrib/libs/clapack/dlasrt.c
new file mode 100644
index 0000000000..5df285c3a7
--- /dev/null
+++ b/contrib/libs/clapack/dlasrt.c
@@ -0,0 +1,286 @@
+/* dlasrt.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlasrt_(char *id, integer *n, doublereal *d__, integer *
+ info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal d1, d2, d3;
+ integer dir;
+ doublereal tmp;
+ integer endd;
+ extern logical lsame_(char *, char *);
+ integer stack[64] /* was [2][32] */;
+ doublereal dmnmx;
+ integer start;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ integer stkpnt;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Sort the numbers in D in increasing order (if ID = 'I') or */
+/* in decreasing order (if ID = 'D' ). */
+
+/* Use Quick Sort, reverting to Insertion sort on arrays of */
+/* size <= 20. Dimension of STACK limits N to about 2**32. */
+
+/* Arguments */
+/* ========= */
+
+/* ID (input) CHARACTER*1 */
+/* = 'I': sort D in increasing order; */
+/* = 'D': sort D in decreasing order. */
+
+/* N (input) INTEGER */
+/* The length of the array D. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the array to be sorted. */
+/* On exit, D has been sorted into increasing order */
+/* (D(1) <= ... <= D(N) ) or into decreasing order */
+/* (D(1) >= ... >= D(N) ), depending on ID. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input paramters. */
+
+ /* Parameter adjustments */
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ dir = -1;
+ if (lsame_(id, "D")) {
+ dir = 0;
+ } else if (lsame_(id, "I")) {
+ dir = 1;
+ }
+ if (dir == -1) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASRT", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 1) {
+ return 0;
+ }
+
+ stkpnt = 1;
+ stack[0] = 1;
+ stack[1] = *n;
+L10:
+ start = stack[(stkpnt << 1) - 2];
+ endd = stack[(stkpnt << 1) - 1];
+ --stkpnt;
+ if (endd - start <= 20 && endd - start > 0) {
+
+/* Do Insertion sort on D( START:ENDD ) */
+
+ if (dir == 0) {
+
+/* Sort into decreasing order */
+
+ i__1 = endd;
+ for (i__ = start + 1; i__ <= i__1; ++i__) {
+ i__2 = start + 1;
+ for (j = i__; j >= i__2; --j) {
+ if (d__[j] > d__[j - 1]) {
+ dmnmx = d__[j];
+ d__[j] = d__[j - 1];
+ d__[j - 1] = dmnmx;
+ } else {
+ goto L30;
+ }
+/* L20: */
+ }
+L30:
+ ;
+ }
+
+ } else {
+
+/* Sort into increasing order */
+
+ i__1 = endd;
+ for (i__ = start + 1; i__ <= i__1; ++i__) {
+ i__2 = start + 1;
+ for (j = i__; j >= i__2; --j) {
+ if (d__[j] < d__[j - 1]) {
+ dmnmx = d__[j];
+ d__[j] = d__[j - 1];
+ d__[j - 1] = dmnmx;
+ } else {
+ goto L50;
+ }
+/* L40: */
+ }
+L50:
+ ;
+ }
+
+ }
+
+ } else if (endd - start > 20) {
+
+/* Partition D( START:ENDD ) and stack parts, largest one first */
+
+/* Choose partition entry as median of 3 */
+
+ d1 = d__[start];
+ d2 = d__[endd];
+ i__ = (start + endd) / 2;
+ d3 = d__[i__];
+ if (d1 < d2) {
+ if (d3 < d1) {
+ dmnmx = d1;
+ } else if (d3 < d2) {
+ dmnmx = d3;
+ } else {
+ dmnmx = d2;
+ }
+ } else {
+ if (d3 < d2) {
+ dmnmx = d2;
+ } else if (d3 < d1) {
+ dmnmx = d3;
+ } else {
+ dmnmx = d1;
+ }
+ }
+
+ if (dir == 0) {
+
+/* Sort into decreasing order */
+
+ i__ = start - 1;
+ j = endd + 1;
+L60:
+L70:
+ --j;
+ if (d__[j] < dmnmx) {
+ goto L70;
+ }
+L80:
+ ++i__;
+ if (d__[i__] > dmnmx) {
+ goto L80;
+ }
+ if (i__ < j) {
+ tmp = d__[i__];
+ d__[i__] = d__[j];
+ d__[j] = tmp;
+ goto L60;
+ }
+ if (j - start > endd - j - 1) {
+ ++stkpnt;
+ stack[(stkpnt << 1) - 2] = start;
+ stack[(stkpnt << 1) - 1] = j;
+ ++stkpnt;
+ stack[(stkpnt << 1) - 2] = j + 1;
+ stack[(stkpnt << 1) - 1] = endd;
+ } else {
+ ++stkpnt;
+ stack[(stkpnt << 1) - 2] = j + 1;
+ stack[(stkpnt << 1) - 1] = endd;
+ ++stkpnt;
+ stack[(stkpnt << 1) - 2] = start;
+ stack[(stkpnt << 1) - 1] = j;
+ }
+ } else {
+
+/* Sort into increasing order */
+
+ i__ = start - 1;
+ j = endd + 1;
+L90:
+L100:
+ --j;
+ if (d__[j] > dmnmx) {
+ goto L100;
+ }
+L110:
+ ++i__;
+ if (d__[i__] < dmnmx) {
+ goto L110;
+ }
+ if (i__ < j) {
+ tmp = d__[i__];
+ d__[i__] = d__[j];
+ d__[j] = tmp;
+ goto L90;
+ }
+ if (j - start > endd - j - 1) {
+ ++stkpnt;
+ stack[(stkpnt << 1) - 2] = start;
+ stack[(stkpnt << 1) - 1] = j;
+ ++stkpnt;
+ stack[(stkpnt << 1) - 2] = j + 1;
+ stack[(stkpnt << 1) - 1] = endd;
+ } else {
+ ++stkpnt;
+ stack[(stkpnt << 1) - 2] = j + 1;
+ stack[(stkpnt << 1) - 1] = endd;
+ ++stkpnt;
+ stack[(stkpnt << 1) - 2] = start;
+ stack[(stkpnt << 1) - 1] = j;
+ }
+ }
+ }
+ if (stkpnt > 0) {
+ goto L10;
+ }
+ return 0;
+
+/* End of DLASRT */
+
+} /* dlasrt_ */
diff --git a/contrib/libs/clapack/dlassq.c b/contrib/libs/clapack/dlassq.c
new file mode 100644
index 0000000000..9b3aee5ee4
--- /dev/null
+++ b/contrib/libs/clapack/dlassq.c
@@ -0,0 +1,116 @@
+/* dlassq.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlassq_(integer *n, doublereal *x, integer *incx,
+ doublereal *scale, doublereal *sumsq)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal d__1;
+
+ /* Local variables */
+ integer ix;
+ doublereal absxi;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLASSQ returns the values scl and smsq such that */
+
+/* ( scl**2 )*smsq = x( 1 )**2 +...+ x( n )**2 + ( scale**2 )*sumsq, */
+
+/* where x( i ) = X( 1 + ( i - 1 )*INCX ). The value of sumsq is */
+/* assumed to be non-negative and scl returns the value */
+
+/* scl = max( scale, abs( x( i ) ) ). */
+
+/* scale and sumsq must be supplied in SCALE and SUMSQ and */
+/* scl and smsq are overwritten on SCALE and SUMSQ respectively. */
+
+/* The routine makes only one pass through the vector x. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of elements to be used from the vector X. */
+
+/* X (input) DOUBLE PRECISION array, dimension (N) */
+/* The vector for which a scaled sum of squares is computed. */
+/* x( i ) = X( 1 + ( i - 1 )*INCX ), 1 <= i <= n. */
+
+/* INCX (input) INTEGER */
+/* The increment between successive values of the vector X. */
+/* INCX > 0. */
+
+/* SCALE (input/output) DOUBLE PRECISION */
+/* On entry, the value scale in the equation above. */
+/* On exit, SCALE is overwritten with scl , the scaling factor */
+/* for the sum of squares. */
+
+/* SUMSQ (input/output) DOUBLE PRECISION */
+/* On entry, the value sumsq in the equation above. */
+/* On exit, SUMSQ is overwritten with smsq , the basic sum of */
+/* squares from which scl has been factored out. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n > 0) {
+ i__1 = (*n - 1) * *incx + 1;
+ i__2 = *incx;
+ for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) {
+ if (x[ix] != 0.) {
+ absxi = (d__1 = x[ix], abs(d__1));
+ if (*scale < absxi) {
+/* Computing 2nd power */
+ d__1 = *scale / absxi;
+ *sumsq = *sumsq * (d__1 * d__1) + 1;
+ *scale = absxi;
+ } else {
+/* Computing 2nd power */
+ d__1 = absxi / *scale;
+ *sumsq += d__1 * d__1;
+ }
+ }
+/* L10: */
+ }
+ }
+ return 0;
+
+/* End of DLASSQ */
+
+} /* dlassq_ */
diff --git a/contrib/libs/clapack/dlasv2.c b/contrib/libs/clapack/dlasv2.c
new file mode 100644
index 0000000000..2418af3b60
--- /dev/null
+++ b/contrib/libs/clapack/dlasv2.c
@@ -0,0 +1,274 @@
+/* dlasv2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b3 = 2.;
+static doublereal c_b4 = 1.;
+
+/* Subroutine */ int dlasv2_(doublereal *f, doublereal *g, doublereal *h__,
+ doublereal *ssmin, doublereal *ssmax, doublereal *snr, doublereal *
+ csr, doublereal *snl, doublereal *csl)
+{
+ /* System generated locals */
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ doublereal a, d__, l, m, r__, s, t, fa, ga, ha, ft, gt, ht, mm, tt, clt,
+ crt, slt, srt;
+ integer pmax;
+ doublereal temp;
+ logical swap;
+ doublereal tsign;
+ extern doublereal dlamch_(char *);
+ logical gasmal;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLASV2 computes the singular value decomposition of a 2-by-2 */
+/* triangular matrix */
+/* [ F G ] */
+/* [ 0 H ]. */
+/* On return, abs(SSMAX) is the larger singular value, abs(SSMIN) is the */
+/* smaller singular value, and (CSL,SNL) and (CSR,SNR) are the left and */
+/* right singular vectors for abs(SSMAX), giving the decomposition */
+
+/* [ CSL SNL ] [ F G ] [ CSR -SNR ] = [ SSMAX 0 ] */
+/* [-SNL CSL ] [ 0 H ] [ SNR CSR ] [ 0 SSMIN ]. */
+
+/* Arguments */
+/* ========= */
+
+/* F (input) DOUBLE PRECISION */
+/* The (1,1) element of the 2-by-2 matrix. */
+
+/* G (input) DOUBLE PRECISION */
+/* The (1,2) element of the 2-by-2 matrix. */
+
+/* H (input) DOUBLE PRECISION */
+/* The (2,2) element of the 2-by-2 matrix. */
+
+/* SSMIN (output) DOUBLE PRECISION */
+/* abs(SSMIN) is the smaller singular value. */
+
+/* SSMAX (output) DOUBLE PRECISION */
+/* abs(SSMAX) is the larger singular value. */
+
+/* SNL (output) DOUBLE PRECISION */
+/* CSL (output) DOUBLE PRECISION */
+/* The vector (CSL, SNL) is a unit left singular vector for the */
+/* singular value abs(SSMAX). */
+
+/* SNR (output) DOUBLE PRECISION */
+/* CSR (output) DOUBLE PRECISION */
+/* The vector (CSR, SNR) is a unit right singular vector for the */
+/* singular value abs(SSMAX). */
+
+/* Further Details */
+/* =============== */
+
+/* Any input parameter may be aliased with any output parameter. */
+
+/* Barring over/underflow and assuming a guard digit in subtraction, all */
+/* output quantities are correct to within a few units in the last */
+/* place (ulps). */
+
+/* In IEEE arithmetic, the code works correctly if one matrix element is */
+/* infinite. */
+
+/* Overflow will not occur unless the largest singular value itself */
+/* overflows or is within a few ulps of overflow. (On machines with */
+/* partial overflow, like the Cray, overflow may occur if the largest */
+/* singular value is within a factor of 2 of overflow.) */
+
+/* Underflow is harmless if underflow is gradual. Otherwise, results */
+/* may correspond to a matrix modified by perturbations of size near */
+/* the underflow threshold. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ ft = *f;
+ fa = abs(ft);
+ ht = *h__;
+ ha = abs(*h__);
+
+/* PMAX points to the maximum absolute element of matrix */
+/* PMAX = 1 if F largest in absolute values */
+/* PMAX = 2 if G largest in absolute values */
+/* PMAX = 3 if H largest in absolute values */
+
+ pmax = 1;
+ swap = ha > fa;
+ if (swap) {
+ pmax = 3;
+ temp = ft;
+ ft = ht;
+ ht = temp;
+ temp = fa;
+ fa = ha;
+ ha = temp;
+
+/* Now FA .ge. HA */
+
+ }
+ gt = *g;
+ ga = abs(gt);
+ if (ga == 0.) {
+
+/* Diagonal matrix */
+
+ *ssmin = ha;
+ *ssmax = fa;
+ clt = 1.;
+ crt = 1.;
+ slt = 0.;
+ srt = 0.;
+ } else {
+ gasmal = TRUE_;
+ if (ga > fa) {
+ pmax = 2;
+ if (fa / ga < dlamch_("EPS")) {
+
+/* Case of very large GA */
+
+ gasmal = FALSE_;
+ *ssmax = ga;
+ if (ha > 1.) {
+ *ssmin = fa / (ga / ha);
+ } else {
+ *ssmin = fa / ga * ha;
+ }
+ clt = 1.;
+ slt = ht / gt;
+ srt = 1.;
+ crt = ft / gt;
+ }
+ }
+ if (gasmal) {
+
+/* Normal case */
+
+ d__ = fa - ha;
+ if (d__ == fa) {
+
+/* Copes with infinite F or H */
+
+ l = 1.;
+ } else {
+ l = d__ / fa;
+ }
+
+/* Note that 0 .le. L .le. 1 */
+
+ m = gt / ft;
+
+/* Note that abs(M) .le. 1/macheps */
+
+ t = 2. - l;
+
+/* Note that T .ge. 1 */
+
+ mm = m * m;
+ tt = t * t;
+ s = sqrt(tt + mm);
+
+/* Note that 1 .le. S .le. 1 + 1/macheps */
+
+ if (l == 0.) {
+ r__ = abs(m);
+ } else {
+ r__ = sqrt(l * l + mm);
+ }
+
+/* Note that 0 .le. R .le. 1 + 1/macheps */
+
+ a = (s + r__) * .5;
+
+/* Note that 1 .le. A .le. 1 + abs(M) */
+
+ *ssmin = ha / a;
+ *ssmax = fa * a;
+ if (mm == 0.) {
+
+/* Note that M is very tiny */
+
+ if (l == 0.) {
+ t = d_sign(&c_b3, &ft) * d_sign(&c_b4, &gt);
+ } else {
+ t = gt / d_sign(&d__, &ft) + m / t;
+ }
+ } else {
+ t = (m / (s + t) + m / (r__ + l)) * (a + 1.);
+ }
+ l = sqrt(t * t + 4.);
+ crt = 2. / l;
+ srt = t / l;
+ clt = (crt + srt * m) / a;
+ slt = ht / ft * srt / a;
+ }
+ }
+ if (swap) {
+ *csl = srt;
+ *snl = crt;
+ *csr = slt;
+ *snr = clt;
+ } else {
+ *csl = clt;
+ *snl = slt;
+ *csr = crt;
+ *snr = srt;
+ }
+
+/* Correct signs of SSMAX and SSMIN */
+
+ if (pmax == 1) {
+ tsign = d_sign(&c_b4, csr) * d_sign(&c_b4, csl) * d_sign(&c_b4, f);
+ }
+ if (pmax == 2) {
+ tsign = d_sign(&c_b4, snr) * d_sign(&c_b4, csl) * d_sign(&c_b4, g);
+ }
+ if (pmax == 3) {
+ tsign = d_sign(&c_b4, snr) * d_sign(&c_b4, snl) * d_sign(&c_b4, h__);
+ }
+ *ssmax = d_sign(ssmax, &tsign);
+ d__1 = tsign * d_sign(&c_b4, f) * d_sign(&c_b4, h__);
+ *ssmin = d_sign(ssmin, &d__1);
+ return 0;
+
+/* End of DLASV2 */
+
+} /* dlasv2_ */
diff --git a/contrib/libs/clapack/dlaswp.c b/contrib/libs/clapack/dlaswp.c
new file mode 100644
index 0000000000..862938ba6e
--- /dev/null
+++ b/contrib/libs/clapack/dlaswp.c
@@ -0,0 +1,158 @@
+/* dlaswp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlaswp_(integer *n, doublereal *a, integer *lda, integer
+ *k1, integer *k2, integer *ipiv, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, k, i1, i2, n32, ip, ix, ix0, inc;
+ doublereal temp;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLASWP performs a series of row interchanges on the matrix A. */
+/* One row interchange is initiated for each of rows K1 through K2 of A. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the matrix of column dimension N to which the row */
+/* interchanges will be applied. */
+/* On exit, the permuted matrix. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+
+/* K1 (input) INTEGER */
+/* The first element of IPIV for which a row interchange will */
+/* be done. */
+
+/* K2 (input) INTEGER */
+/* The last element of IPIV for which a row interchange will */
+/* be done. */
+
+/* IPIV (input) INTEGER array, dimension (K2*abs(INCX)) */
+/* The vector of pivot indices. Only the elements in positions */
+/* K1 through K2 of IPIV are accessed. */
+/* IPIV(K) = L implies rows K and L are to be interchanged. */
+
+/* INCX (input) INTEGER */
+/* The increment between successive values of IPIV. If IPIV */
+/* is negative, the pivots are applied in reverse order. */
+
+/* Further Details */
+/* =============== */
+
+/* Modified by */
+/* R. C. Whaley, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Interchange row I with row IPIV(I) for each of rows K1 through K2. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ if (*incx > 0) {
+ ix0 = *k1;
+ i1 = *k1;
+ i2 = *k2;
+ inc = 1;
+ } else if (*incx < 0) {
+ ix0 = (1 - *k2) * *incx + 1;
+ i1 = *k2;
+ i2 = *k1;
+ inc = -1;
+ } else {
+ return 0;
+ }
+
+ n32 = *n / 32 << 5;
+ if (n32 != 0) {
+ i__1 = n32;
+ for (j = 1; j <= i__1; j += 32) {
+ ix = ix0;
+ i__2 = i2;
+ i__3 = inc;
+ for (i__ = i1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__3)
+ {
+ ip = ipiv[ix];
+ if (ip != i__) {
+ i__4 = j + 31;
+ for (k = j; k <= i__4; ++k) {
+ temp = a[i__ + k * a_dim1];
+ a[i__ + k * a_dim1] = a[ip + k * a_dim1];
+ a[ip + k * a_dim1] = temp;
+/* L10: */
+ }
+ }
+ ix += *incx;
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+ if (n32 != *n) {
+ ++n32;
+ ix = ix0;
+ i__1 = i2;
+ i__3 = inc;
+ for (i__ = i1; i__3 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__3) {
+ ip = ipiv[ix];
+ if (ip != i__) {
+ i__2 = *n;
+ for (k = n32; k <= i__2; ++k) {
+ temp = a[i__ + k * a_dim1];
+ a[i__ + k * a_dim1] = a[ip + k * a_dim1];
+ a[ip + k * a_dim1] = temp;
+/* L40: */
+ }
+ }
+ ix += *incx;
+/* L50: */
+ }
+ }
+
+ return 0;
+
+/* End of DLASWP */
+
+} /* dlaswp_ */
diff --git a/contrib/libs/clapack/dlasy2.c b/contrib/libs/clapack/dlasy2.c
new file mode 100644
index 0000000000..352463d559
--- /dev/null
+++ b/contrib/libs/clapack/dlasy2.c
@@ -0,0 +1,478 @@
+/* dlasy2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__4 = 4;
+static integer c__1 = 1;
+static integer c__16 = 16;
+static integer c__0 = 0;
+
+/* Subroutine */ int dlasy2_(logical *ltranl, logical *ltranr, integer *isgn,
+ integer *n1, integer *n2, doublereal *tl, integer *ldtl, doublereal *
+ tr, integer *ldtr, doublereal *b, integer *ldb, doublereal *scale,
+ doublereal *x, integer *ldx, doublereal *xnorm, integer *info)
+{
+ /* Initialized data */
+
+ static integer locu12[4] = { 3,4,1,2 };
+ static integer locl21[4] = { 2,1,4,3 };
+ static integer locu22[4] = { 4,3,2,1 };
+ static logical xswpiv[4] = { FALSE_,FALSE_,TRUE_,TRUE_ };
+ static logical bswpiv[4] = { FALSE_,TRUE_,FALSE_,TRUE_ };
+
+ /* System generated locals */
+ integer b_dim1, b_offset, tl_dim1, tl_offset, tr_dim1, tr_offset, x_dim1,
+ x_offset;
+ doublereal d__1, d__2, d__3, d__4, d__5, d__6, d__7, d__8;
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal x2[2], l21, u11, u12;
+ integer ip, jp;
+ doublereal u22, t16[16] /* was [4][4] */, gam, bet, eps, sgn, tmp[4],
+ tau1, btmp[4], smin;
+ integer ipiv;
+ doublereal temp;
+ integer jpiv[4];
+ doublereal xmax;
+ integer ipsv, jpsv;
+ logical bswap;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dswap_(integer *, doublereal *, integer
+ *, doublereal *, integer *);
+ logical xswap;
+ extern doublereal dlamch_(char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ doublereal smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLASY2 solves for the N1 by N2 matrix X, 1 <= N1,N2 <= 2, in */
+
+/* op(TL)*X + ISGN*X*op(TR) = SCALE*B, */
+
+/* where TL is N1 by N1, TR is N2 by N2, B is N1 by N2, and ISGN = 1 or */
+/* -1. op(T) = T or T', where T' denotes the transpose of T. */
+
+/* Arguments */
+/* ========= */
+
+/* LTRANL (input) LOGICAL */
+/* On entry, LTRANL specifies the op(TL): */
+/* = .FALSE., op(TL) = TL, */
+/* = .TRUE., op(TL) = TL'. */
+
+/* LTRANR (input) LOGICAL */
+/* On entry, LTRANR specifies the op(TR): */
+/* = .FALSE., op(TR) = TR, */
+/* = .TRUE., op(TR) = TR'. */
+
+/* ISGN (input) INTEGER */
+/* On entry, ISGN specifies the sign of the equation */
+/* as described before. ISGN may only be 1 or -1. */
+
+/* N1 (input) INTEGER */
+/* On entry, N1 specifies the order of matrix TL. */
+/* N1 may only be 0, 1 or 2. */
+
+/* N2 (input) INTEGER */
+/* On entry, N2 specifies the order of matrix TR. */
+/* N2 may only be 0, 1 or 2. */
+
+/* TL (input) DOUBLE PRECISION array, dimension (LDTL,2) */
+/* On entry, TL contains an N1 by N1 matrix. */
+
+/* LDTL (input) INTEGER */
+/* The leading dimension of the matrix TL. LDTL >= max(1,N1). */
+
+/* TR (input) DOUBLE PRECISION array, dimension (LDTR,2) */
+/* On entry, TR contains an N2 by N2 matrix. */
+
+/* LDTR (input) INTEGER */
+/* The leading dimension of the matrix TR. LDTR >= max(1,N2). */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB,2) */
+/* On entry, the N1 by N2 matrix B contains the right-hand */
+/* side of the equation. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the matrix B. LDB >= max(1,N1). */
+
+/* SCALE (output) DOUBLE PRECISION */
+/* On exit, SCALE contains the scale factor. SCALE is chosen */
+/* less than or equal to 1 to prevent the solution overflowing. */
+
+/* X (output) DOUBLE PRECISION array, dimension (LDX,2) */
+/* On exit, X contains the N1 by N2 solution. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the matrix X. LDX >= max(1,N1). */
+
+/* XNORM (output) DOUBLE PRECISION */
+/* On exit, XNORM is the infinity-norm of the solution. */
+
+/* INFO (output) INTEGER */
+/* On exit, INFO is set to */
+/* 0: successful exit. */
+/* 1: TL and TR have too close eigenvalues, so TL or */
+/* TR is perturbed to get a nonsingular equation. */
+/* NOTE: In the interests of speed, this routine does not */
+/* check the inputs for errors. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Data statements .. */
+ /* Parameter adjustments */
+ tl_dim1 = *ldtl;
+ tl_offset = 1 + tl_dim1;
+ tl -= tl_offset;
+ tr_dim1 = *ldtr;
+ tr_offset = 1 + tr_dim1;
+ tr -= tr_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+
+ /* Function Body */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Do not check the input parameters for errors */
+
+ *info = 0;
+
+/* Quick return if possible */
+
+ if (*n1 == 0 || *n2 == 0) {
+ return 0;
+ }
+
+/* Set constants to control overflow */
+
+ eps = dlamch_("P");
+ smlnum = dlamch_("S") / eps;
+ sgn = (doublereal) (*isgn);
+
+ k = *n1 + *n1 + *n2 - 2;
+ switch (k) {
+ case 1: goto L10;
+ case 2: goto L20;
+ case 3: goto L30;
+ case 4: goto L50;
+ }
+
+/* 1 by 1: TL11*X + SGN*X*TR11 = B11 */
+
+L10:
+ tau1 = tl[tl_dim1 + 1] + sgn * tr[tr_dim1 + 1];
+ bet = abs(tau1);
+ if (bet <= smlnum) {
+ tau1 = smlnum;
+ bet = smlnum;
+ *info = 1;
+ }
+
+ *scale = 1.;
+ gam = (d__1 = b[b_dim1 + 1], abs(d__1));
+ if (smlnum * gam > bet) {
+ *scale = 1. / gam;
+ }
+
+ x[x_dim1 + 1] = b[b_dim1 + 1] * *scale / tau1;
+ *xnorm = (d__1 = x[x_dim1 + 1], abs(d__1));
+ return 0;
+
+/* 1 by 2: */
+/* TL11*[X11 X12] + ISGN*[X11 X12]*op[TR11 TR12] = [B11 B12] */
+/* [TR21 TR22] */
+
+L20:
+
+/* Computing MAX */
+/* Computing MAX */
+ d__7 = (d__1 = tl[tl_dim1 + 1], abs(d__1)), d__8 = (d__2 = tr[tr_dim1 + 1]
+ , abs(d__2)), d__7 = max(d__7,d__8), d__8 = (d__3 = tr[(tr_dim1 <<
+ 1) + 1], abs(d__3)), d__7 = max(d__7,d__8), d__8 = (d__4 = tr[
+ tr_dim1 + 2], abs(d__4)), d__7 = max(d__7,d__8), d__8 = (d__5 =
+ tr[(tr_dim1 << 1) + 2], abs(d__5));
+ d__6 = eps * max(d__7,d__8);
+ smin = max(d__6,smlnum);
+ tmp[0] = tl[tl_dim1 + 1] + sgn * tr[tr_dim1 + 1];
+ tmp[3] = tl[tl_dim1 + 1] + sgn * tr[(tr_dim1 << 1) + 2];
+ if (*ltranr) {
+ tmp[1] = sgn * tr[tr_dim1 + 2];
+ tmp[2] = sgn * tr[(tr_dim1 << 1) + 1];
+ } else {
+ tmp[1] = sgn * tr[(tr_dim1 << 1) + 1];
+ tmp[2] = sgn * tr[tr_dim1 + 2];
+ }
+ btmp[0] = b[b_dim1 + 1];
+ btmp[1] = b[(b_dim1 << 1) + 1];
+ goto L40;
+
+/* 2 by 1: */
+/* op[TL11 TL12]*[X11] + ISGN* [X11]*TR11 = [B11] */
+/* [TL21 TL22] [X21] [X21] [B21] */
+
+L30:
+/* Computing MAX */
+/* Computing MAX */
+ d__7 = (d__1 = tr[tr_dim1 + 1], abs(d__1)), d__8 = (d__2 = tl[tl_dim1 + 1]
+ , abs(d__2)), d__7 = max(d__7,d__8), d__8 = (d__3 = tl[(tl_dim1 <<
+ 1) + 1], abs(d__3)), d__7 = max(d__7,d__8), d__8 = (d__4 = tl[
+ tl_dim1 + 2], abs(d__4)), d__7 = max(d__7,d__8), d__8 = (d__5 =
+ tl[(tl_dim1 << 1) + 2], abs(d__5));
+ d__6 = eps * max(d__7,d__8);
+ smin = max(d__6,smlnum);
+ tmp[0] = tl[tl_dim1 + 1] + sgn * tr[tr_dim1 + 1];
+ tmp[3] = tl[(tl_dim1 << 1) + 2] + sgn * tr[tr_dim1 + 1];
+ if (*ltranl) {
+ tmp[1] = tl[(tl_dim1 << 1) + 1];
+ tmp[2] = tl[tl_dim1 + 2];
+ } else {
+ tmp[1] = tl[tl_dim1 + 2];
+ tmp[2] = tl[(tl_dim1 << 1) + 1];
+ }
+ btmp[0] = b[b_dim1 + 1];
+ btmp[1] = b[b_dim1 + 2];
+L40:
+
+/* Solve 2 by 2 system using complete pivoting. */
+/* Set pivots less than SMIN to SMIN. */
+
+ ipiv = idamax_(&c__4, tmp, &c__1);
+ u11 = tmp[ipiv - 1];
+ if (abs(u11) <= smin) {
+ *info = 1;
+ u11 = smin;
+ }
+ u12 = tmp[locu12[ipiv - 1] - 1];
+ l21 = tmp[locl21[ipiv - 1] - 1] / u11;
+ u22 = tmp[locu22[ipiv - 1] - 1] - u12 * l21;
+ xswap = xswpiv[ipiv - 1];
+ bswap = bswpiv[ipiv - 1];
+ if (abs(u22) <= smin) {
+ *info = 1;
+ u22 = smin;
+ }
+ if (bswap) {
+ temp = btmp[1];
+ btmp[1] = btmp[0] - l21 * temp;
+ btmp[0] = temp;
+ } else {
+ btmp[1] -= l21 * btmp[0];
+ }
+ *scale = 1.;
+ if (smlnum * 2. * abs(btmp[1]) > abs(u22) || smlnum * 2. * abs(btmp[0]) >
+ abs(u11)) {
+/* Computing MAX */
+ d__1 = abs(btmp[0]), d__2 = abs(btmp[1]);
+ *scale = .5 / max(d__1,d__2);
+ btmp[0] *= *scale;
+ btmp[1] *= *scale;
+ }
+ x2[1] = btmp[1] / u22;
+ x2[0] = btmp[0] / u11 - u12 / u11 * x2[1];
+ if (xswap) {
+ temp = x2[1];
+ x2[1] = x2[0];
+ x2[0] = temp;
+ }
+ x[x_dim1 + 1] = x2[0];
+ if (*n1 == 1) {
+ x[(x_dim1 << 1) + 1] = x2[1];
+ *xnorm = (d__1 = x[x_dim1 + 1], abs(d__1)) + (d__2 = x[(x_dim1 << 1)
+ + 1], abs(d__2));
+ } else {
+ x[x_dim1 + 2] = x2[1];
+/* Computing MAX */
+ d__3 = (d__1 = x[x_dim1 + 1], abs(d__1)), d__4 = (d__2 = x[x_dim1 + 2]
+ , abs(d__2));
+ *xnorm = max(d__3,d__4);
+ }
+ return 0;
+
+/* 2 by 2: */
+/* op[TL11 TL12]*[X11 X12] +ISGN* [X11 X12]*op[TR11 TR12] = [B11 B12] */
+/* [TL21 TL22] [X21 X22] [X21 X22] [TR21 TR22] [B21 B22] */
+
+/* Solve equivalent 4 by 4 system using complete pivoting. */
+/* Set pivots less than SMIN to SMIN. */
+
+L50:
+/* Computing MAX */
+ d__5 = (d__1 = tr[tr_dim1 + 1], abs(d__1)), d__6 = (d__2 = tr[(tr_dim1 <<
+ 1) + 1], abs(d__2)), d__5 = max(d__5,d__6), d__6 = (d__3 = tr[
+ tr_dim1 + 2], abs(d__3)), d__5 = max(d__5,d__6), d__6 = (d__4 =
+ tr[(tr_dim1 << 1) + 2], abs(d__4));
+ smin = max(d__5,d__6);
+/* Computing MAX */
+ d__5 = smin, d__6 = (d__1 = tl[tl_dim1 + 1], abs(d__1)), d__5 = max(d__5,
+ d__6), d__6 = (d__2 = tl[(tl_dim1 << 1) + 1], abs(d__2)), d__5 =
+ max(d__5,d__6), d__6 = (d__3 = tl[tl_dim1 + 2], abs(d__3)), d__5 =
+ max(d__5,d__6), d__6 = (d__4 = tl[(tl_dim1 << 1) + 2], abs(d__4))
+ ;
+ smin = max(d__5,d__6);
+/* Computing MAX */
+ d__1 = eps * smin;
+ smin = max(d__1,smlnum);
+ btmp[0] = 0.;
+ dcopy_(&c__16, btmp, &c__0, t16, &c__1);
+ t16[0] = tl[tl_dim1 + 1] + sgn * tr[tr_dim1 + 1];
+ t16[5] = tl[(tl_dim1 << 1) + 2] + sgn * tr[tr_dim1 + 1];
+ t16[10] = tl[tl_dim1 + 1] + sgn * tr[(tr_dim1 << 1) + 2];
+ t16[15] = tl[(tl_dim1 << 1) + 2] + sgn * tr[(tr_dim1 << 1) + 2];
+ if (*ltranl) {
+ t16[4] = tl[tl_dim1 + 2];
+ t16[1] = tl[(tl_dim1 << 1) + 1];
+ t16[14] = tl[tl_dim1 + 2];
+ t16[11] = tl[(tl_dim1 << 1) + 1];
+ } else {
+ t16[4] = tl[(tl_dim1 << 1) + 1];
+ t16[1] = tl[tl_dim1 + 2];
+ t16[14] = tl[(tl_dim1 << 1) + 1];
+ t16[11] = tl[tl_dim1 + 2];
+ }
+ if (*ltranr) {
+ t16[8] = sgn * tr[(tr_dim1 << 1) + 1];
+ t16[13] = sgn * tr[(tr_dim1 << 1) + 1];
+ t16[2] = sgn * tr[tr_dim1 + 2];
+ t16[7] = sgn * tr[tr_dim1 + 2];
+ } else {
+ t16[8] = sgn * tr[tr_dim1 + 2];
+ t16[13] = sgn * tr[tr_dim1 + 2];
+ t16[2] = sgn * tr[(tr_dim1 << 1) + 1];
+ t16[7] = sgn * tr[(tr_dim1 << 1) + 1];
+ }
+ btmp[0] = b[b_dim1 + 1];
+ btmp[1] = b[b_dim1 + 2];
+ btmp[2] = b[(b_dim1 << 1) + 1];
+ btmp[3] = b[(b_dim1 << 1) + 2];
+
+/* Perform elimination */
+
+ for (i__ = 1; i__ <= 3; ++i__) {
+ xmax = 0.;
+ for (ip = i__; ip <= 4; ++ip) {
+ for (jp = i__; jp <= 4; ++jp) {
+ if ((d__1 = t16[ip + (jp << 2) - 5], abs(d__1)) >= xmax) {
+ xmax = (d__1 = t16[ip + (jp << 2) - 5], abs(d__1));
+ ipsv = ip;
+ jpsv = jp;
+ }
+/* L60: */
+ }
+/* L70: */
+ }
+ if (ipsv != i__) {
+ dswap_(&c__4, &t16[ipsv - 1], &c__4, &t16[i__ - 1], &c__4);
+ temp = btmp[i__ - 1];
+ btmp[i__ - 1] = btmp[ipsv - 1];
+ btmp[ipsv - 1] = temp;
+ }
+ if (jpsv != i__) {
+ dswap_(&c__4, &t16[(jpsv << 2) - 4], &c__1, &t16[(i__ << 2) - 4],
+ &c__1);
+ }
+ jpiv[i__ - 1] = jpsv;
+ if ((d__1 = t16[i__ + (i__ << 2) - 5], abs(d__1)) < smin) {
+ *info = 1;
+ t16[i__ + (i__ << 2) - 5] = smin;
+ }
+ for (j = i__ + 1; j <= 4; ++j) {
+ t16[j + (i__ << 2) - 5] /= t16[i__ + (i__ << 2) - 5];
+ btmp[j - 1] -= t16[j + (i__ << 2) - 5] * btmp[i__ - 1];
+ for (k = i__ + 1; k <= 4; ++k) {
+ t16[j + (k << 2) - 5] -= t16[j + (i__ << 2) - 5] * t16[i__ + (
+ k << 2) - 5];
+/* L80: */
+ }
+/* L90: */
+ }
+/* L100: */
+ }
+ if (abs(t16[15]) < smin) {
+ t16[15] = smin;
+ }
+ *scale = 1.;
+ if (smlnum * 8. * abs(btmp[0]) > abs(t16[0]) || smlnum * 8. * abs(btmp[1])
+ > abs(t16[5]) || smlnum * 8. * abs(btmp[2]) > abs(t16[10]) ||
+ smlnum * 8. * abs(btmp[3]) > abs(t16[15])) {
+/* Computing MAX */
+ d__1 = abs(btmp[0]), d__2 = abs(btmp[1]), d__1 = max(d__1,d__2), d__2
+ = abs(btmp[2]), d__1 = max(d__1,d__2), d__2 = abs(btmp[3]);
+ *scale = .125 / max(d__1,d__2);
+ btmp[0] *= *scale;
+ btmp[1] *= *scale;
+ btmp[2] *= *scale;
+ btmp[3] *= *scale;
+ }
+ for (i__ = 1; i__ <= 4; ++i__) {
+ k = 5 - i__;
+ temp = 1. / t16[k + (k << 2) - 5];
+ tmp[k - 1] = btmp[k - 1] * temp;
+ for (j = k + 1; j <= 4; ++j) {
+ tmp[k - 1] -= temp * t16[k + (j << 2) - 5] * tmp[j - 1];
+/* L110: */
+ }
+/* L120: */
+ }
+ for (i__ = 1; i__ <= 3; ++i__) {
+ if (jpiv[4 - i__ - 1] != 4 - i__) {
+ temp = tmp[4 - i__ - 1];
+ tmp[4 - i__ - 1] = tmp[jpiv[4 - i__ - 1] - 1];
+ tmp[jpiv[4 - i__ - 1] - 1] = temp;
+ }
+/* L130: */
+ }
+ x[x_dim1 + 1] = tmp[0];
+ x[x_dim1 + 2] = tmp[1];
+ x[(x_dim1 << 1) + 1] = tmp[2];
+ x[(x_dim1 << 1) + 2] = tmp[3];
+/* Computing MAX */
+ d__1 = abs(tmp[0]) + abs(tmp[2]), d__2 = abs(tmp[1]) + abs(tmp[3]);
+ *xnorm = max(d__1,d__2);
+ return 0;
+
+/* End of DLASY2 */
+
+} /* dlasy2_ */
diff --git a/contrib/libs/clapack/dlasyf.c b/contrib/libs/clapack/dlasyf.c
new file mode 100644
index 0000000000..c0d98e2b20
--- /dev/null
+++ b/contrib/libs/clapack/dlasyf.c
@@ -0,0 +1,721 @@
+/* dlasyf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b8 = -1.;
+static doublereal c_b9 = 1.;
+
+/* Subroutine */ int dlasyf_(char *uplo, integer *n, integer *nb, integer *kb,
+ doublereal *a, integer *lda, integer *ipiv, doublereal *w, integer *
+ ldw, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer j, k;
+ doublereal t, r1, d11, d21, d22;
+ integer jb, jj, kk, jp, kp, kw, kkw, imax, jmax;
+ doublereal alpha;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dgemm_(char *, char *, integer *, integer *, integer *
+, doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), dcopy_(integer *,
+ doublereal *, integer *, doublereal *, integer *), dswap_(integer
+ *, doublereal *, integer *, doublereal *, integer *);
+ integer kstep;
+ doublereal absakk;
+ extern integer idamax_(integer *, doublereal *, integer *);
+ doublereal colmax, rowmax;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLASYF computes a partial factorization of a real symmetric matrix A */
+/* using the Bunch-Kaufman diagonal pivoting method. The partial */
+/* factorization has the form: */
+
+/* A = ( I U12 ) ( A11 0 ) ( I 0 ) if UPLO = 'U', or: */
+/* ( 0 U22 ) ( 0 D ) ( U12' U22' ) */
+
+/* A = ( L11 0 ) ( D 0 ) ( L11' L21' ) if UPLO = 'L' */
+/* ( L21 I ) ( 0 A22 ) ( 0 I ) */
+
+/* where the order of D is at most NB. The actual order is returned in */
+/* the argument KB, and is either NB or NB-1, or N if N <= NB. */
+
+/* DLASYF is an auxiliary routine called by DSYTRF. It uses blocked code */
+/* (calling Level 3 BLAS) to update the submatrix A11 (if UPLO = 'U') or */
+/* A22 (if UPLO = 'L'). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NB (input) INTEGER */
+/* The maximum number of columns of the matrix A that should be */
+/* factored. NB should be at least 2 to allow for 2-by-2 pivot */
+/* blocks. */
+
+/* KB (output) INTEGER */
+/* The number of columns of A that were actually factored. */
+/* KB is either NB-1 or NB, or N if N <= NB. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n-by-n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n-by-n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+/* On exit, A contains details of the partial factorization. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D. */
+/* If UPLO = 'U', only the last KB elements of IPIV are set; */
+/* if UPLO = 'L', only the first KB elements are set. */
+
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* W (workspace) DOUBLE PRECISION array, dimension (LDW,NB) */
+
+/* LDW (input) INTEGER */
+/* The leading dimension of the array W. LDW >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* > 0: if INFO = k, D(k,k) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ w_dim1 = *ldw;
+ w_offset = 1 + w_dim1;
+ w -= w_offset;
+
+ /* Function Body */
+ *info = 0;
+
+/* Initialize ALPHA for use in choosing pivot block size. */
+
+ alpha = (sqrt(17.) + 1.) / 8.;
+
+ if (lsame_(uplo, "U")) {
+
+/* Factorize the trailing columns of A using the upper triangle */
+/* of A and working backwards, and compute the matrix W = U12*D */
+/* for use in updating A11 */
+
+/* K is the main loop index, decreasing from N in steps of 1 or 2 */
+
+/* KW is the column of W which corresponds to column K of A */
+
+ k = *n;
+L10:
+ kw = *nb + k - *n;
+
+/* Exit from loop */
+
+ if (k <= *n - *nb + 1 && *nb < *n || k < 1) {
+ goto L30;
+ }
+
+/* Copy column K of A to column KW of W and update it */
+
+ dcopy_(&k, &a[k * a_dim1 + 1], &c__1, &w[kw * w_dim1 + 1], &c__1);
+ if (k < *n) {
+ i__1 = *n - k;
+ dgemv_("No transpose", &k, &i__1, &c_b8, &a[(k + 1) * a_dim1 + 1],
+ lda, &w[k + (kw + 1) * w_dim1], ldw, &c_b9, &w[kw *
+ w_dim1 + 1], &c__1);
+ }
+
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ absakk = (d__1 = w[k + kw * w_dim1], abs(d__1));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ imax = idamax_(&i__1, &w[kw * w_dim1 + 1], &c__1);
+ colmax = (d__1 = w[imax + kw * w_dim1], abs(d__1));
+ } else {
+ colmax = 0.;
+ }
+
+ if (max(absakk,colmax) == 0.) {
+
+/* Column K is zero: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* Copy column IMAX to column KW-1 of W and update it */
+
+ dcopy_(&imax, &a[imax * a_dim1 + 1], &c__1, &w[(kw - 1) *
+ w_dim1 + 1], &c__1);
+ i__1 = k - imax;
+ dcopy_(&i__1, &a[imax + (imax + 1) * a_dim1], lda, &w[imax +
+ 1 + (kw - 1) * w_dim1], &c__1);
+ if (k < *n) {
+ i__1 = *n - k;
+ dgemv_("No transpose", &k, &i__1, &c_b8, &a[(k + 1) *
+ a_dim1 + 1], lda, &w[imax + (kw + 1) * w_dim1],
+ ldw, &c_b9, &w[(kw - 1) * w_dim1 + 1], &c__1);
+ }
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ i__1 = k - imax;
+ jmax = imax + idamax_(&i__1, &w[imax + 1 + (kw - 1) * w_dim1],
+ &c__1);
+ rowmax = (d__1 = w[jmax + (kw - 1) * w_dim1], abs(d__1));
+ if (imax > 1) {
+ i__1 = imax - 1;
+ jmax = idamax_(&i__1, &w[(kw - 1) * w_dim1 + 1], &c__1);
+/* Computing MAX */
+ d__2 = rowmax, d__3 = (d__1 = w[jmax + (kw - 1) * w_dim1],
+ abs(d__1));
+ rowmax = max(d__2,d__3);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else if ((d__1 = w[imax + (kw - 1) * w_dim1], abs(d__1)) >=
+ alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+
+/* copy column KW-1 of W to column KW */
+
+ dcopy_(&k, &w[(kw - 1) * w_dim1 + 1], &c__1, &w[kw *
+ w_dim1 + 1], &c__1);
+ } else {
+
+/* interchange rows and columns K-1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+
+ kk = k - kstep + 1;
+ kkw = *nb + kk - *n;
+
+/* Updated column KP is already stored in column KKW of W */
+
+ if (kp != kk) {
+
+/* Copy non-updated column KK to column KP */
+
+ a[kp + k * a_dim1] = a[kk + k * a_dim1];
+ i__1 = k - 1 - kp;
+ dcopy_(&i__1, &a[kp + 1 + kk * a_dim1], &c__1, &a[kp + (kp +
+ 1) * a_dim1], lda);
+ dcopy_(&kp, &a[kk * a_dim1 + 1], &c__1, &a[kp * a_dim1 + 1], &
+ c__1);
+
+/* Interchange rows KK and KP in last KK columns of A and W */
+
+ i__1 = *n - kk + 1;
+ dswap_(&i__1, &a[kk + kk * a_dim1], lda, &a[kp + kk * a_dim1],
+ lda);
+ i__1 = *n - kk + 1;
+ dswap_(&i__1, &w[kk + kkw * w_dim1], ldw, &w[kp + kkw *
+ w_dim1], ldw);
+ }
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column KW of W now holds */
+
+/* W(k) = U(k)*D(k) */
+
+/* where U(k) is the k-th column of U */
+
+/* Store U(k) in column k of A */
+
+ dcopy_(&k, &w[kw * w_dim1 + 1], &c__1, &a[k * a_dim1 + 1], &
+ c__1);
+ r1 = 1. / a[k + k * a_dim1];
+ i__1 = k - 1;
+ dscal_(&i__1, &r1, &a[k * a_dim1 + 1], &c__1);
+ } else {
+
+/* 2-by-2 pivot block D(k): columns KW and KW-1 of W now */
+/* hold */
+
+/* ( W(k-1) W(k) ) = ( U(k-1) U(k) )*D(k) */
+
+/* where U(k) and U(k-1) are the k-th and (k-1)-th columns */
+/* of U */
+
+ if (k > 2) {
+
+/* Store U(k) and U(k-1) in columns k and k-1 of A */
+
+ d21 = w[k - 1 + kw * w_dim1];
+ d11 = w[k + kw * w_dim1] / d21;
+ d22 = w[k - 1 + (kw - 1) * w_dim1] / d21;
+ t = 1. / (d11 * d22 - 1.);
+ d21 = t / d21;
+ i__1 = k - 2;
+ for (j = 1; j <= i__1; ++j) {
+ a[j + (k - 1) * a_dim1] = d21 * (d11 * w[j + (kw - 1)
+ * w_dim1] - w[j + kw * w_dim1]);
+ a[j + k * a_dim1] = d21 * (d22 * w[j + kw * w_dim1] -
+ w[j + (kw - 1) * w_dim1]);
+/* L20: */
+ }
+ }
+
+/* Copy D(k) to A */
+
+ a[k - 1 + (k - 1) * a_dim1] = w[k - 1 + (kw - 1) * w_dim1];
+ a[k - 1 + k * a_dim1] = w[k - 1 + kw * w_dim1];
+ a[k + k * a_dim1] = w[k + kw * w_dim1];
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k - 1] = -kp;
+ }
+
+/* Decrease K and return to the start of the main loop */
+
+ k -= kstep;
+ goto L10;
+
+L30:
+
+/* Update the upper triangle of A11 (= A(1:k,1:k)) as */
+
+/* A11 := A11 - U12*D*U12' = A11 - U12*W' */
+
+/* computing blocks of NB columns at a time */
+
+ i__1 = -(*nb);
+ for (j = (k - 1) / *nb * *nb + 1; i__1 < 0 ? j >= 1 : j <= 1; j +=
+ i__1) {
+/* Computing MIN */
+ i__2 = *nb, i__3 = k - j + 1;
+ jb = min(i__2,i__3);
+
+/* Update the upper triangle of the diagonal block */
+
+ i__2 = j + jb - 1;
+ for (jj = j; jj <= i__2; ++jj) {
+ i__3 = jj - j + 1;
+ i__4 = *n - k;
+ dgemv_("No transpose", &i__3, &i__4, &c_b8, &a[j + (k + 1) *
+ a_dim1], lda, &w[jj + (kw + 1) * w_dim1], ldw, &c_b9,
+ &a[j + jj * a_dim1], &c__1);
+/* L40: */
+ }
+
+/* Update the rectangular superdiagonal block */
+
+ i__2 = j - 1;
+ i__3 = *n - k;
+ dgemm_("No transpose", "Transpose", &i__2, &jb, &i__3, &c_b8, &a[(
+ k + 1) * a_dim1 + 1], lda, &w[j + (kw + 1) * w_dim1], ldw,
+ &c_b9, &a[j * a_dim1 + 1], lda);
+/* L50: */
+ }
+
+/* Put U12 in standard form by partially undoing the interchanges */
+/* in columns k+1:n */
+
+ j = k + 1;
+L60:
+ jj = j;
+ jp = ipiv[j];
+ if (jp < 0) {
+ jp = -jp;
+ ++j;
+ }
+ ++j;
+ if (jp != jj && j <= *n) {
+ i__1 = *n - j + 1;
+ dswap_(&i__1, &a[jp + j * a_dim1], lda, &a[jj + j * a_dim1], lda);
+ }
+ if (j <= *n) {
+ goto L60;
+ }
+
+/* Set KB to the number of columns factorized */
+
+ *kb = *n - k;
+
+ } else {
+
+/* Factorize the leading columns of A using the lower triangle */
+/* of A and working forwards, and compute the matrix W = L21*D */
+/* for use in updating A22 */
+
+/* K is the main loop index, increasing from 1 in steps of 1 or 2 */
+
+ k = 1;
+L70:
+
+/* Exit from loop */
+
+ if (k >= *nb && *nb < *n || k > *n) {
+ goto L90;
+ }
+
+/* Copy column K of A to column K of W and update it */
+
+ i__1 = *n - k + 1;
+ dcopy_(&i__1, &a[k + k * a_dim1], &c__1, &w[k + k * w_dim1], &c__1);
+ i__1 = *n - k + 1;
+ i__2 = k - 1;
+ dgemv_("No transpose", &i__1, &i__2, &c_b8, &a[k + a_dim1], lda, &w[k
+ + w_dim1], ldw, &c_b9, &w[k + k * w_dim1], &c__1);
+
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ absakk = (d__1 = w[k + k * w_dim1], abs(d__1));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ imax = k + idamax_(&i__1, &w[k + 1 + k * w_dim1], &c__1);
+ colmax = (d__1 = w[imax + k * w_dim1], abs(d__1));
+ } else {
+ colmax = 0.;
+ }
+
+ if (max(absakk,colmax) == 0.) {
+
+/* Column K is zero: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* Copy column IMAX to column K+1 of W and update it */
+
+ i__1 = imax - k;
+ dcopy_(&i__1, &a[imax + k * a_dim1], lda, &w[k + (k + 1) *
+ w_dim1], &c__1);
+ i__1 = *n - imax + 1;
+ dcopy_(&i__1, &a[imax + imax * a_dim1], &c__1, &w[imax + (k +
+ 1) * w_dim1], &c__1);
+ i__1 = *n - k + 1;
+ i__2 = k - 1;
+ dgemv_("No transpose", &i__1, &i__2, &c_b8, &a[k + a_dim1],
+ lda, &w[imax + w_dim1], ldw, &c_b9, &w[k + (k + 1) *
+ w_dim1], &c__1);
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ i__1 = imax - k;
+ jmax = k - 1 + idamax_(&i__1, &w[k + (k + 1) * w_dim1], &c__1)
+ ;
+ rowmax = (d__1 = w[jmax + (k + 1) * w_dim1], abs(d__1));
+ if (imax < *n) {
+ i__1 = *n - imax;
+ jmax = imax + idamax_(&i__1, &w[imax + 1 + (k + 1) *
+ w_dim1], &c__1);
+/* Computing MAX */
+ d__2 = rowmax, d__3 = (d__1 = w[jmax + (k + 1) * w_dim1],
+ abs(d__1));
+ rowmax = max(d__2,d__3);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else if ((d__1 = w[imax + (k + 1) * w_dim1], abs(d__1)) >=
+ alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+
+/* copy column K+1 of W to column K */
+
+ i__1 = *n - k + 1;
+ dcopy_(&i__1, &w[k + (k + 1) * w_dim1], &c__1, &w[k + k *
+ w_dim1], &c__1);
+ } else {
+
+/* interchange rows and columns K+1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+
+ kk = k + kstep - 1;
+
+/* Updated column KP is already stored in column KK of W */
+
+ if (kp != kk) {
+
+/* Copy non-updated column KK to column KP */
+
+ a[kp + k * a_dim1] = a[kk + k * a_dim1];
+ i__1 = kp - k - 1;
+ dcopy_(&i__1, &a[k + 1 + kk * a_dim1], &c__1, &a[kp + (k + 1)
+ * a_dim1], lda);
+ i__1 = *n - kp + 1;
+ dcopy_(&i__1, &a[kp + kk * a_dim1], &c__1, &a[kp + kp *
+ a_dim1], &c__1);
+
+/* Interchange rows KK and KP in first KK columns of A and W */
+
+ dswap_(&kk, &a[kk + a_dim1], lda, &a[kp + a_dim1], lda);
+ dswap_(&kk, &w[kk + w_dim1], ldw, &w[kp + w_dim1], ldw);
+ }
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k of W now holds */
+
+/* W(k) = L(k)*D(k) */
+
+/* where L(k) is the k-th column of L */
+
+/* Store L(k) in column k of A */
+
+ i__1 = *n - k + 1;
+ dcopy_(&i__1, &w[k + k * w_dim1], &c__1, &a[k + k * a_dim1], &
+ c__1);
+ if (k < *n) {
+ r1 = 1. / a[k + k * a_dim1];
+ i__1 = *n - k;
+ dscal_(&i__1, &r1, &a[k + 1 + k * a_dim1], &c__1);
+ }
+ } else {
+
+/* 2-by-2 pivot block D(k): columns k and k+1 of W now hold */
+
+/* ( W(k) W(k+1) ) = ( L(k) L(k+1) )*D(k) */
+
+/* where L(k) and L(k+1) are the k-th and (k+1)-th columns */
+/* of L */
+
+ if (k < *n - 1) {
+
+/* Store L(k) and L(k+1) in columns k and k+1 of A */
+
+ d21 = w[k + 1 + k * w_dim1];
+ d11 = w[k + 1 + (k + 1) * w_dim1] / d21;
+ d22 = w[k + k * w_dim1] / d21;
+ t = 1. / (d11 * d22 - 1.);
+ d21 = t / d21;
+ i__1 = *n;
+ for (j = k + 2; j <= i__1; ++j) {
+ a[j + k * a_dim1] = d21 * (d11 * w[j + k * w_dim1] -
+ w[j + (k + 1) * w_dim1]);
+ a[j + (k + 1) * a_dim1] = d21 * (d22 * w[j + (k + 1) *
+ w_dim1] - w[j + k * w_dim1]);
+/* L80: */
+ }
+ }
+
+/* Copy D(k) to A */
+
+ a[k + k * a_dim1] = w[k + k * w_dim1];
+ a[k + 1 + k * a_dim1] = w[k + 1 + k * w_dim1];
+ a[k + 1 + (k + 1) * a_dim1] = w[k + 1 + (k + 1) * w_dim1];
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k + 1] = -kp;
+ }
+
+/* Increase K and return to the start of the main loop */
+
+ k += kstep;
+ goto L70;
+
+L90:
+
+/* Update the lower triangle of A22 (= A(k:n,k:n)) as */
+
+/* A22 := A22 - L21*D*L21' = A22 - L21*W' */
+
+/* computing blocks of NB columns at a time */
+
+ i__1 = *n;
+ i__2 = *nb;
+ for (j = k; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = *nb, i__4 = *n - j + 1;
+ jb = min(i__3,i__4);
+
+/* Update the lower triangle of the diagonal block */
+
+ i__3 = j + jb - 1;
+ for (jj = j; jj <= i__3; ++jj) {
+ i__4 = j + jb - jj;
+ i__5 = k - 1;
+ dgemv_("No transpose", &i__4, &i__5, &c_b8, &a[jj + a_dim1],
+ lda, &w[jj + w_dim1], ldw, &c_b9, &a[jj + jj * a_dim1]
+, &c__1);
+/* L100: */
+ }
+
+/* Update the rectangular subdiagonal block */
+
+ if (j + jb <= *n) {
+ i__3 = *n - j - jb + 1;
+ i__4 = k - 1;
+ dgemm_("No transpose", "Transpose", &i__3, &jb, &i__4, &c_b8,
+ &a[j + jb + a_dim1], lda, &w[j + w_dim1], ldw, &c_b9,
+ &a[j + jb + j * a_dim1], lda);
+ }
+/* L110: */
+ }
+
+/* Put L21 in standard form by partially undoing the interchanges */
+/* in columns 1:k-1 */
+
+ j = k - 1;
+L120:
+ jj = j;
+ jp = ipiv[j];
+ if (jp < 0) {
+ jp = -jp;
+ --j;
+ }
+ --j;
+ if (jp != jj && j >= 1) {
+ dswap_(&j, &a[jp + a_dim1], lda, &a[jj + a_dim1], lda);
+ }
+ if (j >= 1) {
+ goto L120;
+ }
+
+/* Set KB to the number of columns factorized */
+
+ *kb = k - 1;
+
+ }
+ return 0;
+
+/* End of DLASYF */
+
+} /* dlasyf_ */
diff --git a/contrib/libs/clapack/dlat2s.c b/contrib/libs/clapack/dlat2s.c
new file mode 100644
index 0000000000..8c8479c3f7
--- /dev/null
+++ b/contrib/libs/clapack/dlat2s.c
@@ -0,0 +1,137 @@
+/* dlat2s.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlat2s_(char *uplo, integer *n, doublereal *a, integer *
+ lda, real *sa, integer *ldsa, integer *info)
+{
+ /* System generated locals */
+ integer sa_dim1, sa_offset, a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal rmax;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern doublereal slamch_(char *);
+
+
+/* -- LAPACK PROTOTYPE auxiliary routine (version 3.1.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* May 2007 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAT2S converts a DOUBLE PRECISION triangular matrix, SA, to a SINGLE */
+/* PRECISION triangular matrix, A. */
+
+/* RMAX is the overflow for the SINGLE PRECISION arithmetic */
+/* DLAS2S checks that all the entries of A are between -RMAX and */
+/* RMAX. If not the convertion is aborted and a flag is raised. */
+
+/* This is an auxiliary routine so there is no argument checking. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* N (input) INTEGER */
+/* The number of rows and columns of the matrix A. N >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the N-by-N triangular coefficient matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* SA (output) REAL array, dimension (LDSA,N) */
+/* Only the UPLO part of SA is referenced. On exit, if INFO=0, */
+/* the N-by-N coefficient matrix SA; if INFO>0, the content of */
+/* the UPLO part of SA is unspecified. */
+
+/* LDSA (input) INTEGER */
+/* The leading dimension of the array SA. LDSA >= max(1,M). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* = 1: an entry of the matrix A is greater than the SINGLE */
+/* PRECISION overflow threshold, in this case, the content */
+/* of the UPLO part of SA in exit is unspecified. */
+
+/* ========= */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ sa_dim1 = *ldsa;
+ sa_offset = 1 + sa_dim1;
+ sa -= sa_offset;
+
+ /* Function Body */
+ rmax = slamch_("O");
+ upper = lsame_(uplo, "U");
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (a[i__ + j * a_dim1] < -rmax || a[i__ + j * a_dim1] > rmax)
+ {
+ *info = 1;
+ goto L50;
+ }
+ sa[i__ + j * sa_dim1] = a[i__ + j * a_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ if (a[i__ + j * a_dim1] < -rmax || a[i__ + j * a_dim1] > rmax)
+ {
+ *info = 1;
+ goto L50;
+ }
+ sa[i__ + j * sa_dim1] = a[i__ + j * a_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+L50:
+
+ return 0;
+
+/* End of DLAT2S */
+
+} /* dlat2s_ */
diff --git a/contrib/libs/clapack/dlatbs.c b/contrib/libs/clapack/dlatbs.c
new file mode 100644
index 0000000000..cfd8566e08
--- /dev/null
+++ b/contrib/libs/clapack/dlatbs.c
@@ -0,0 +1,850 @@
+/* dlatbs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b36 = .5;
+
+/* Subroutine */ int dlatbs_(char *uplo, char *trans, char *diag, char *
+ normin, integer *n, integer *kd, doublereal *ab, integer *ldab,
+ doublereal *x, doublereal *scale, doublereal *cnorm, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1, d__2, d__3;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal xj, rec, tjj;
+ integer jinc, jlen;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ doublereal xbnd;
+ integer imax;
+ doublereal tmax, tjjs, xmax, grow, sumj;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ integer maind;
+ extern logical lsame_(char *, char *);
+ doublereal tscal, uscal;
+ extern doublereal dasum_(integer *, doublereal *, integer *);
+ integer jlast;
+ extern /* Subroutine */ int dtbsv_(char *, char *, char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *), daxpy_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *);
+ logical upper;
+ extern doublereal dlamch_(char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ logical notran;
+ integer jfirst;
+ doublereal smlnum;
+ logical nounit;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLATBS solves one of the triangular systems */
+
+/* A *x = s*b or A'*x = s*b */
+
+/* with scaling to prevent overflow, where A is an upper or lower */
+/* triangular band matrix. Here A' denotes the transpose of A, x and b */
+/* are n-element vectors, and s is a scaling factor, usually less than */
+/* or equal to 1, chosen so that the components of x will be less than */
+/* the overflow threshold. If the unscaled problem will not cause */
+/* overflow, the Level 2 BLAS routine DTBSV is called. If the matrix A */
+/* is singular (A(j,j) = 0 for some j), then s is set to 0 and a */
+/* non-trivial solution to A*x = 0 is returned. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower triangular. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the operation applied to A. */
+/* = 'N': Solve A * x = s*b (No transpose) */
+/* = 'T': Solve A'* x = s*b (Transpose) */
+/* = 'C': Solve A'* x = s*b (Conjugate transpose = Transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A is unit triangular. */
+/* = 'N': Non-unit triangular */
+/* = 'U': Unit triangular */
+
+/* NORMIN (input) CHARACTER*1 */
+/* Specifies whether CNORM has been set or not. */
+/* = 'Y': CNORM contains the column norms on entry */
+/* = 'N': CNORM is not set on entry. On exit, the norms will */
+/* be computed and stored in CNORM. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of subdiagonals or superdiagonals in the */
+/* triangular matrix A. KD >= 0. */
+
+/* AB (input) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* The upper or lower triangular band matrix A, stored in the */
+/* first KD+1 rows of the array. The j-th column of A is stored */
+/* in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* X (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the right hand side b of the triangular system. */
+/* On exit, X is overwritten by the solution vector x. */
+
+/* SCALE (output) DOUBLE PRECISION */
+/* The scaling factor s for the triangular system */
+/* A * x = s*b or A'* x = s*b. */
+/* If SCALE = 0, the matrix A is singular or badly scaled, and */
+/* the vector x is an exact or approximate solution to A*x = 0. */
+
+/* CNORM (input or output) DOUBLE PRECISION array, dimension (N) */
+
+/* If NORMIN = 'Y', CNORM is an input argument and CNORM(j) */
+/* contains the norm of the off-diagonal part of the j-th column */
+/* of A. If TRANS = 'N', CNORM(j) must be greater than or equal */
+/* to the infinity-norm, and if TRANS = 'T' or 'C', CNORM(j) */
+/* must be greater than or equal to the 1-norm. */
+
+/* If NORMIN = 'N', CNORM is an output argument and CNORM(j) */
+/* returns the 1-norm of the offdiagonal part of the j-th column */
+/* of A. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* Further Details */
+/* ======= ======= */
+
+/* A rough bound on x is computed; if that is less than overflow, DTBSV */
+/* is called, otherwise, specific code is used which checks for possible */
+/* overflow or divide-by-zero at every operation. */
+
+/* A columnwise scheme is used for solving A*x = b. The basic algorithm */
+/* if A is lower triangular is */
+
+/* x[1:n] := b[1:n] */
+/* for j = 1, ..., n */
+/* x(j) := x(j) / A(j,j) */
+/* x[j+1:n] := x[j+1:n] - x(j) * A[j+1:n,j] */
+/* end */
+
+/* Define bounds on the components of x after j iterations of the loop: */
+/* M(j) = bound on x[1:j] */
+/* G(j) = bound on x[j+1:n] */
+/* Initially, let M(0) = 0 and G(0) = max{x(i), i=1,...,n}. */
+
+/* Then for iteration j+1 we have */
+/* M(j+1) <= G(j) / | A(j+1,j+1) | */
+/* G(j+1) <= G(j) + M(j+1) * | A[j+2:n,j+1] | */
+/* <= G(j) ( 1 + CNORM(j+1) / | A(j+1,j+1) | ) */
+
+/* where CNORM(j+1) is greater than or equal to the infinity-norm of */
+/* column j+1 of A, not counting the diagonal. Hence */
+
+/* G(j) <= G(0) product ( 1 + CNORM(i) / | A(i,i) | ) */
+/* 1<=i<=j */
+/* and */
+
+/* |x(j)| <= ( G(0) / |A(j,j)| ) product ( 1 + CNORM(i) / |A(i,i)| ) */
+/* 1<=i< j */
+
+/* Since |x(j)| <= M(j), we use the Level 2 BLAS routine DTBSV if the */
+/* reciprocal of the largest M(j), j=1,..,n, is larger than */
+/* max(underflow, 1/overflow). */
+
+/* The bound on x(j) is also used to determine when a step in the */
+/* columnwise method can be performed without fear of overflow. If */
+/* the computed bound is greater than a large constant, x is scaled to */
+/* prevent overflow, but if the bound overflows, x is set to 0, x(j) to */
+/* 1, and scale to 0, and a non-trivial solution to A*x = 0 is found. */
+
+/* Similarly, a row-wise scheme is used to solve A'*x = b. The basic */
+/* algorithm for A upper triangular is */
+
+/* for j = 1, ..., n */
+/* x(j) := ( b(j) - A[1:j-1,j]' * x[1:j-1] ) / A(j,j) */
+/* end */
+
+/* We simultaneously compute two bounds */
+/* G(j) = bound on ( b(i) - A[1:i-1,i]' * x[1:i-1] ), 1<=i<=j */
+/* M(j) = bound on x(i), 1<=i<=j */
+
+/* The initial values are G(0) = 0, M(0) = max{b(i), i=1,..,n}, and we */
+/* add the constraint G(j) >= G(j-1) and M(j) >= M(j-1) for j >= 1. */
+/* Then the bound on x(j) is */
+
+/* M(j) <= M(j-1) * ( 1 + CNORM(j) ) / | A(j,j) | */
+
+/* <= M(0) * product ( ( 1 + CNORM(i) ) / |A(i,i)| ) */
+/* 1<=i<=j */
+
+/* and we can safely call DTBSV if 1/M(n) and 1/G(n) are both greater */
+/* than max(underflow, 1/overflow). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --x;
+ --cnorm;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+/* Test the input parameters. */
+
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (! lsame_(normin, "Y") && ! lsame_(normin,
+ "N")) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*kd < 0) {
+ *info = -6;
+ } else if (*ldab < *kd + 1) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLATBS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine machine dependent parameters to control overflow. */
+
+ smlnum = dlamch_("Safe minimum") / dlamch_("Precision");
+ bignum = 1. / smlnum;
+ *scale = 1.;
+
+ if (lsame_(normin, "N")) {
+
+/* Compute the 1-norm of each column, not including the diagonal. */
+
+ if (upper) {
+
+/* A is upper triangular. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = *kd, i__3 = j - 1;
+ jlen = min(i__2,i__3);
+ cnorm[j] = dasum_(&jlen, &ab[*kd + 1 - jlen + j * ab_dim1], &
+ c__1);
+/* L10: */
+ }
+ } else {
+
+/* A is lower triangular. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = *kd, i__3 = *n - j;
+ jlen = min(i__2,i__3);
+ if (jlen > 0) {
+ cnorm[j] = dasum_(&jlen, &ab[j * ab_dim1 + 2], &c__1);
+ } else {
+ cnorm[j] = 0.;
+ }
+/* L20: */
+ }
+ }
+ }
+
+/* Scale the column norms by TSCAL if the maximum element in CNORM is */
+/* greater than BIGNUM. */
+
+ imax = idamax_(n, &cnorm[1], &c__1);
+ tmax = cnorm[imax];
+ if (tmax <= bignum) {
+ tscal = 1.;
+ } else {
+ tscal = 1. / (smlnum * tmax);
+ dscal_(n, &tscal, &cnorm[1], &c__1);
+ }
+
+/* Compute a bound on the computed solution vector to see if the */
+/* Level 2 BLAS routine DTBSV can be used. */
+
+ j = idamax_(n, &x[1], &c__1);
+ xmax = (d__1 = x[j], abs(d__1));
+ xbnd = xmax;
+ if (notran) {
+
+/* Compute the growth in A * x = b. */
+
+ if (upper) {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ maind = *kd + 1;
+ } else {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ maind = 1;
+ }
+
+ if (tscal != 1.) {
+ grow = 0.;
+ goto L50;
+ }
+
+ if (nounit) {
+
+/* A is non-unit triangular. */
+
+/* Compute GROW = 1/G(j) and XBND = 1/M(j). */
+/* Initially, G(0) = max{x(i), i=1,...,n}. */
+
+ grow = 1. / max(xbnd,smlnum);
+ xbnd = grow;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L50;
+ }
+
+/* M(j) = G(j-1) / abs(A(j,j)) */
+
+ tjj = (d__1 = ab[maind + j * ab_dim1], abs(d__1));
+/* Computing MIN */
+ d__1 = xbnd, d__2 = min(1.,tjj) * grow;
+ xbnd = min(d__1,d__2);
+ if (tjj + cnorm[j] >= smlnum) {
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) / abs(A(j,j)) ) */
+
+ grow *= tjj / (tjj + cnorm[j]);
+ } else {
+
+/* G(j) could overflow, set GROW to 0. */
+
+ grow = 0.;
+ }
+/* L30: */
+ }
+ grow = xbnd;
+ } else {
+
+/* A is unit triangular. */
+
+/* Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. */
+
+/* Computing MIN */
+ d__1 = 1., d__2 = 1. / max(xbnd,smlnum);
+ grow = min(d__1,d__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L50;
+ }
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) ) */
+
+ grow *= 1. / (cnorm[j] + 1.);
+/* L40: */
+ }
+ }
+L50:
+
+ ;
+ } else {
+
+/* Compute the growth in A' * x = b. */
+
+ if (upper) {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ maind = *kd + 1;
+ } else {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ maind = 1;
+ }
+
+ if (tscal != 1.) {
+ grow = 0.;
+ goto L80;
+ }
+
+ if (nounit) {
+
+/* A is non-unit triangular. */
+
+/* Compute GROW = 1/G(j) and XBND = 1/M(j). */
+/* Initially, M(0) = max{x(i), i=1,...,n}. */
+
+ grow = 1. / max(xbnd,smlnum);
+ xbnd = grow;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L80;
+ }
+
+/* G(j) = max( G(j-1), M(j-1)*( 1 + CNORM(j) ) ) */
+
+ xj = cnorm[j] + 1.;
+/* Computing MIN */
+ d__1 = grow, d__2 = xbnd / xj;
+ grow = min(d__1,d__2);
+
+/* M(j) = M(j-1)*( 1 + CNORM(j) ) / abs(A(j,j)) */
+
+ tjj = (d__1 = ab[maind + j * ab_dim1], abs(d__1));
+ if (xj > tjj) {
+ xbnd *= tjj / xj;
+ }
+/* L60: */
+ }
+ grow = min(grow,xbnd);
+ } else {
+
+/* A is unit triangular. */
+
+/* Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. */
+
+/* Computing MIN */
+ d__1 = 1., d__2 = 1. / max(xbnd,smlnum);
+ grow = min(d__1,d__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L80;
+ }
+
+/* G(j) = ( 1 + CNORM(j) )*G(j-1) */
+
+ xj = cnorm[j] + 1.;
+ grow /= xj;
+/* L70: */
+ }
+ }
+L80:
+ ;
+ }
+
+ if (grow * tscal > smlnum) {
+
+/* Use the Level 2 BLAS solve if the reciprocal of the bound on */
+/* elements of X is not too small. */
+
+ dtbsv_(uplo, trans, diag, n, kd, &ab[ab_offset], ldab, &x[1], &c__1);
+ } else {
+
+/* Use a Level 1 BLAS solve, scaling intermediate results. */
+
+ if (xmax > bignum) {
+
+/* Scale X so that its components are less than or equal to */
+/* BIGNUM in absolute value. */
+
+ *scale = bignum / xmax;
+ dscal_(n, scale, &x[1], &c__1);
+ xmax = bignum;
+ }
+
+ if (notran) {
+
+/* Solve A * x = b */
+
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Compute x(j) = b(j) / A(j,j), scaling x if necessary. */
+
+ xj = (d__1 = x[j], abs(d__1));
+ if (nounit) {
+ tjjs = ab[maind + j * ab_dim1] * tscal;
+ } else {
+ tjjs = tscal;
+ if (tscal == 1.) {
+ goto L100;
+ }
+ }
+ tjj = abs(tjjs);
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.) {
+ if (xj > tjj * bignum) {
+
+/* Scale x by 1/b(j). */
+
+ rec = 1. / xj;
+ dscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ x[j] /= tjjs;
+ xj = (d__1 = x[j], abs(d__1));
+ } else if (tjj > 0.) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM */
+/* to avoid overflow when dividing by A(j,j). */
+
+ rec = tjj * bignum / xj;
+ if (cnorm[j] > 1.) {
+
+/* Scale by 1/CNORM(j) to avoid overflow when */
+/* multiplying x(j) times column j. */
+
+ rec /= cnorm[j];
+ }
+ dscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ x[j] /= tjjs;
+ xj = (d__1 = x[j], abs(d__1));
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0, and compute a solution to A*x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ x[i__] = 0.;
+/* L90: */
+ }
+ x[j] = 1.;
+ xj = 1.;
+ *scale = 0.;
+ xmax = 0.;
+ }
+L100:
+
+/* Scale x if necessary to avoid overflow when adding a */
+/* multiple of column j of A. */
+
+ if (xj > 1.) {
+ rec = 1. / xj;
+ if (cnorm[j] > (bignum - xmax) * rec) {
+
+/* Scale x by 1/(2*abs(x(j))). */
+
+ rec *= .5;
+ dscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ }
+ } else if (xj * cnorm[j] > bignum - xmax) {
+
+/* Scale x by 1/2. */
+
+ dscal_(n, &c_b36, &x[1], &c__1);
+ *scale *= .5;
+ }
+
+ if (upper) {
+ if (j > 1) {
+
+/* Compute the update */
+/* x(max(1,j-kd):j-1) := x(max(1,j-kd):j-1) - */
+/* x(j)* A(max(1,j-kd):j-1,j) */
+
+/* Computing MIN */
+ i__3 = *kd, i__4 = j - 1;
+ jlen = min(i__3,i__4);
+ d__1 = -x[j] * tscal;
+ daxpy_(&jlen, &d__1, &ab[*kd + 1 - jlen + j * ab_dim1]
+, &c__1, &x[j - jlen], &c__1);
+ i__3 = j - 1;
+ i__ = idamax_(&i__3, &x[1], &c__1);
+ xmax = (d__1 = x[i__], abs(d__1));
+ }
+ } else if (j < *n) {
+
+/* Compute the update */
+/* x(j+1:min(j+kd,n)) := x(j+1:min(j+kd,n)) - */
+/* x(j) * A(j+1:min(j+kd,n),j) */
+
+/* Computing MIN */
+ i__3 = *kd, i__4 = *n - j;
+ jlen = min(i__3,i__4);
+ if (jlen > 0) {
+ d__1 = -x[j] * tscal;
+ daxpy_(&jlen, &d__1, &ab[j * ab_dim1 + 2], &c__1, &x[
+ j + 1], &c__1);
+ }
+ i__3 = *n - j;
+ i__ = j + idamax_(&i__3, &x[j + 1], &c__1);
+ xmax = (d__1 = x[i__], abs(d__1));
+ }
+/* L110: */
+ }
+
+ } else {
+
+/* Solve A' * x = b */
+
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Compute x(j) = b(j) - sum A(k,j)*x(k). */
+/* k<>j */
+
+ xj = (d__1 = x[j], abs(d__1));
+ uscal = tscal;
+ rec = 1. / max(xmax,1.);
+ if (cnorm[j] > (bignum - xj) * rec) {
+
+/* If x(j) could overflow, scale x by 1/(2*XMAX). */
+
+ rec *= .5;
+ if (nounit) {
+ tjjs = ab[maind + j * ab_dim1] * tscal;
+ } else {
+ tjjs = tscal;
+ }
+ tjj = abs(tjjs);
+ if (tjj > 1.) {
+
+/* Divide by A(j,j) when scaling x if A(j,j) > 1. */
+
+/* Computing MIN */
+ d__1 = 1., d__2 = rec * tjj;
+ rec = min(d__1,d__2);
+ uscal /= tjjs;
+ }
+ if (rec < 1.) {
+ dscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ sumj = 0.;
+ if (uscal == 1.) {
+
+/* If the scaling needed for A in the dot product is 1, */
+/* call DDOT to perform the dot product. */
+
+ if (upper) {
+/* Computing MIN */
+ i__3 = *kd, i__4 = j - 1;
+ jlen = min(i__3,i__4);
+ sumj = ddot_(&jlen, &ab[*kd + 1 - jlen + j * ab_dim1],
+ &c__1, &x[j - jlen], &c__1);
+ } else {
+/* Computing MIN */
+ i__3 = *kd, i__4 = *n - j;
+ jlen = min(i__3,i__4);
+ if (jlen > 0) {
+ sumj = ddot_(&jlen, &ab[j * ab_dim1 + 2], &c__1, &
+ x[j + 1], &c__1);
+ }
+ }
+ } else {
+
+/* Otherwise, use in-line code for the dot product. */
+
+ if (upper) {
+/* Computing MIN */
+ i__3 = *kd, i__4 = j - 1;
+ jlen = min(i__3,i__4);
+ i__3 = jlen;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ sumj += ab[*kd + i__ - jlen + j * ab_dim1] *
+ uscal * x[j - jlen - 1 + i__];
+/* L120: */
+ }
+ } else {
+/* Computing MIN */
+ i__3 = *kd, i__4 = *n - j;
+ jlen = min(i__3,i__4);
+ i__3 = jlen;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ sumj += ab[i__ + 1 + j * ab_dim1] * uscal * x[j +
+ i__];
+/* L130: */
+ }
+ }
+ }
+
+ if (uscal == tscal) {
+
+/* Compute x(j) := ( x(j) - sumj ) / A(j,j) if 1/A(j,j) */
+/* was not used to scale the dotproduct. */
+
+ x[j] -= sumj;
+ xj = (d__1 = x[j], abs(d__1));
+ if (nounit) {
+
+/* Compute x(j) = x(j) / A(j,j), scaling if necessary. */
+
+ tjjs = ab[maind + j * ab_dim1] * tscal;
+ } else {
+ tjjs = tscal;
+ if (tscal == 1.) {
+ goto L150;
+ }
+ }
+ tjj = abs(tjjs);
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.) {
+ if (xj > tjj * bignum) {
+
+/* Scale X by 1/abs(x(j)). */
+
+ rec = 1. / xj;
+ dscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ x[j] /= tjjs;
+ } else if (tjj > 0.) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */
+
+ rec = tjj * bignum / xj;
+ dscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ x[j] /= tjjs;
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0, and compute a solution to A'*x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ x[i__] = 0.;
+/* L140: */
+ }
+ x[j] = 1.;
+ *scale = 0.;
+ xmax = 0.;
+ }
+L150:
+ ;
+ } else {
+
+/* Compute x(j) := x(j) / A(j,j) - sumj if the dot */
+/* product has already been divided by 1/A(j,j). */
+
+ x[j] = x[j] / tjjs - sumj;
+ }
+/* Computing MAX */
+ d__2 = xmax, d__3 = (d__1 = x[j], abs(d__1));
+ xmax = max(d__2,d__3);
+/* L160: */
+ }
+ }
+ *scale /= tscal;
+ }
+
+/* Scale the column norms by 1/TSCAL for return. */
+
+ if (tscal != 1.) {
+ d__1 = 1. / tscal;
+ dscal_(n, &d__1, &cnorm[1], &c__1);
+ }
+
+ return 0;
+
+/* End of DLATBS */
+
+} /* dlatbs_ */
diff --git a/contrib/libs/clapack/dlatdf.c b/contrib/libs/clapack/dlatdf.c
new file mode 100644
index 0000000000..263fc87dd9
--- /dev/null
+++ b/contrib/libs/clapack/dlatdf.c
@@ -0,0 +1,303 @@
+/* dlatdf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static doublereal c_b23 = 1.;
+static doublereal c_b37 = -1.;
+
+/* Subroutine */ int dlatdf_(integer *ijob, integer *n, doublereal *z__,
+ integer *ldz, doublereal *rhs, doublereal *rdsum, doublereal *rdscal,
+ integer *ipiv, integer *jpiv)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal bm, bp, xm[8], xp[8];
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ integer info;
+ doublereal temp, work[32];
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern doublereal dasum_(integer *, doublereal *, integer *);
+ doublereal pmone;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), daxpy_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *);
+ doublereal sminu;
+ integer iwork[8];
+ doublereal splus;
+ extern /* Subroutine */ int dgesc2_(integer *, doublereal *, integer *,
+ doublereal *, integer *, integer *, doublereal *), dgecon_(char *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *), dlassq_(integer *,
+ doublereal *, integer *, doublereal *, doublereal *), dlaswp_(
+ integer *, doublereal *, integer *, integer *, integer *, integer
+ *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLATDF uses the LU factorization of the n-by-n matrix Z computed by */
+/* DGETC2 and computes a contribution to the reciprocal Dif-estimate */
+/* by solving Z * x = b for x, and choosing the r.h.s. b such that */
+/* the norm of x is as large as possible. On entry RHS = b holds the */
+/* contribution from earlier solved sub-systems, and on return RHS = x. */
+
+/* The factorization of Z returned by DGETC2 has the form Z = P*L*U*Q, */
+/* where P and Q are permutation matrices. L is lower triangular with */
+/* unit diagonal elements and U is upper triangular. */
+
+/* Arguments */
+/* ========= */
+
+/* IJOB (input) INTEGER */
+/* IJOB = 2: First compute an approximative null-vector e */
+/* of Z using DGECON, e is normalized and solve for */
+/* Zx = +-e - f with the sign giving the greater value */
+/* of 2-norm(x). About 5 times as expensive as Default. */
+/* IJOB .ne. 2: Local look ahead strategy where all entries of */
+/* the r.h.s. b is choosen as either +1 or -1 (Default). */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Z. */
+
+/* Z (input) DOUBLE PRECISION array, dimension (LDZ, N) */
+/* On entry, the LU part of the factorization of the n-by-n */
+/* matrix Z computed by DGETC2: Z = P * L * U * Q */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDA >= max(1, N). */
+
+/* RHS (input/output) DOUBLE PRECISION array, dimension N. */
+/* On entry, RHS contains contributions from other subsystems. */
+/* On exit, RHS contains the solution of the subsystem with */
+/* entries acoording to the value of IJOB (see above). */
+
+/* RDSUM (input/output) DOUBLE PRECISION */
+/* On entry, the sum of squares of computed contributions to */
+/* the Dif-estimate under computation by DTGSYL, where the */
+/* scaling factor RDSCAL (see below) has been factored out. */
+/* On exit, the corresponding sum of squares updated with the */
+/* contributions from the current sub-system. */
+/* If TRANS = 'T' RDSUM is not touched. */
+/* NOTE: RDSUM only makes sense when DTGSY2 is called by STGSYL. */
+
+/* RDSCAL (input/output) DOUBLE PRECISION */
+/* On entry, scaling factor used to prevent overflow in RDSUM. */
+/* On exit, RDSCAL is updated w.r.t. the current contributions */
+/* in RDSUM. */
+/* If TRANS = 'T', RDSCAL is not touched. */
+/* NOTE: RDSCAL only makes sense when DTGSY2 is called by */
+/* DTGSYL. */
+
+/* IPIV (input) INTEGER array, dimension (N). */
+/* The pivot indices; for 1 <= i <= N, row i of the */
+/* matrix has been interchanged with row IPIV(i). */
+
+/* JPIV (input) INTEGER array, dimension (N). */
+/* The pivot indices; for 1 <= j <= N, column j of the */
+/* matrix has been interchanged with column JPIV(j). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* This routine is a further developed implementation of algorithm */
+/* BSOLVE in [1] using complete pivoting in the LU factorization. */
+
+/* [1] Bo Kagstrom and Lars Westin, */
+/* Generalized Schur Methods with Condition Estimators for */
+/* Solving the Generalized Sylvester Equation, IEEE Transactions */
+/* on Automatic Control, Vol. 34, No. 7, July 1989, pp 745-751. */
+
+/* [2] Peter Poromaa, */
+/* On Efficient and Robust Estimators for the Separation */
+/* between two Regular Matrix Pairs with Applications in */
+/* Condition Estimation. Report IMINF-95.05, Departement of */
+/* Computing Science, Umea University, S-901 87 Umea, Sweden, 1995. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --rhs;
+ --ipiv;
+ --jpiv;
+
+ /* Function Body */
+ if (*ijob != 2) {
+
+/* Apply permutations IPIV to RHS */
+
+ i__1 = *n - 1;
+ dlaswp_(&c__1, &rhs[1], ldz, &c__1, &i__1, &ipiv[1], &c__1);
+
+/* Solve for L-part choosing RHS either to +1 or -1. */
+
+ pmone = -1.;
+
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ bp = rhs[j] + 1.;
+ bm = rhs[j] - 1.;
+ splus = 1.;
+
+/* Look-ahead for L-part RHS(1:N-1) = + or -1, SPLUS and */
+/* SMIN computed more efficiently than in BSOLVE [1]. */
+
+ i__2 = *n - j;
+ splus += ddot_(&i__2, &z__[j + 1 + j * z_dim1], &c__1, &z__[j + 1
+ + j * z_dim1], &c__1);
+ i__2 = *n - j;
+ sminu = ddot_(&i__2, &z__[j + 1 + j * z_dim1], &c__1, &rhs[j + 1],
+ &c__1);
+ splus *= rhs[j];
+ if (splus > sminu) {
+ rhs[j] = bp;
+ } else if (sminu > splus) {
+ rhs[j] = bm;
+ } else {
+
+/* In this case the updating sums are equal and we can */
+/* choose RHS(J) +1 or -1. The first time this happens */
+/* we choose -1, thereafter +1. This is a simple way to */
+/* get good estimates of matrices like Byers well-known */
+/* example (see [1]). (Not done in BSOLVE.) */
+
+ rhs[j] += pmone;
+ pmone = 1.;
+ }
+
+/* Compute the remaining r.h.s. */
+
+ temp = -rhs[j];
+ i__2 = *n - j;
+ daxpy_(&i__2, &temp, &z__[j + 1 + j * z_dim1], &c__1, &rhs[j + 1],
+ &c__1);
+
+/* L10: */
+ }
+
+/* Solve for U-part, look-ahead for RHS(N) = +-1. This is not done */
+/* in BSOLVE and will hopefully give us a better estimate because */
+/* any ill-conditioning of the original matrix is transfered to U */
+/* and not to L. U(N, N) is an approximation to sigma_min(LU). */
+
+ i__1 = *n - 1;
+ dcopy_(&i__1, &rhs[1], &c__1, xp, &c__1);
+ xp[*n - 1] = rhs[*n] + 1.;
+ rhs[*n] += -1.;
+ splus = 0.;
+ sminu = 0.;
+ for (i__ = *n; i__ >= 1; --i__) {
+ temp = 1. / z__[i__ + i__ * z_dim1];
+ xp[i__ - 1] *= temp;
+ rhs[i__] *= temp;
+ i__1 = *n;
+ for (k = i__ + 1; k <= i__1; ++k) {
+ xp[i__ - 1] -= xp[k - 1] * (z__[i__ + k * z_dim1] * temp);
+ rhs[i__] -= rhs[k] * (z__[i__ + k * z_dim1] * temp);
+/* L20: */
+ }
+ splus += (d__1 = xp[i__ - 1], abs(d__1));
+ sminu += (d__1 = rhs[i__], abs(d__1));
+/* L30: */
+ }
+ if (splus > sminu) {
+ dcopy_(n, xp, &c__1, &rhs[1], &c__1);
+ }
+
+/* Apply the permutations JPIV to the computed solution (RHS) */
+
+ i__1 = *n - 1;
+ dlaswp_(&c__1, &rhs[1], ldz, &c__1, &i__1, &jpiv[1], &c_n1);
+
+/* Compute the sum of squares */
+
+ dlassq_(n, &rhs[1], &c__1, rdscal, rdsum);
+
+ } else {
+
+/* IJOB = 2, Compute approximate nullvector XM of Z */
+
+ dgecon_("I", n, &z__[z_offset], ldz, &c_b23, &temp, work, iwork, &
+ info);
+ dcopy_(n, &work[*n], &c__1, xm, &c__1);
+
+/* Compute RHS */
+
+ i__1 = *n - 1;
+ dlaswp_(&c__1, xm, ldz, &c__1, &i__1, &ipiv[1], &c_n1);
+ temp = 1. / sqrt(ddot_(n, xm, &c__1, xm, &c__1));
+ dscal_(n, &temp, xm, &c__1);
+ dcopy_(n, xm, &c__1, xp, &c__1);
+ daxpy_(n, &c_b23, &rhs[1], &c__1, xp, &c__1);
+ daxpy_(n, &c_b37, xm, &c__1, &rhs[1], &c__1);
+ dgesc2_(n, &z__[z_offset], ldz, &rhs[1], &ipiv[1], &jpiv[1], &temp);
+ dgesc2_(n, &z__[z_offset], ldz, xp, &ipiv[1], &jpiv[1], &temp);
+ if (dasum_(n, xp, &c__1) > dasum_(n, &rhs[1], &c__1)) {
+ dcopy_(n, xp, &c__1, &rhs[1], &c__1);
+ }
+
+/* Compute the sum of squares */
+
+ dlassq_(n, &rhs[1], &c__1, rdscal, rdsum);
+
+ }
+
+ return 0;
+
+/* End of DLATDF */
+
+} /* dlatdf_ */
diff --git a/contrib/libs/clapack/dlatps.c b/contrib/libs/clapack/dlatps.c
new file mode 100644
index 0000000000..ebaa06cb29
--- /dev/null
+++ b/contrib/libs/clapack/dlatps.c
@@ -0,0 +1,824 @@
+/* dlatps.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b36 = .5;
+
+/* Subroutine */ int dlatps_(char *uplo, char *trans, char *diag, char *
+ normin, integer *n, doublereal *ap, doublereal *x, doublereal *scale,
+ doublereal *cnorm, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublereal d__1, d__2, d__3;
+
+ /* Local variables */
+ integer i__, j, ip;
+ doublereal xj, rec, tjj;
+ integer jinc, jlen;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ doublereal xbnd;
+ integer imax;
+ doublereal tmax, tjjs, xmax, grow, sumj;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ doublereal tscal, uscal;
+ extern doublereal dasum_(integer *, doublereal *, integer *);
+ integer jlast;
+ extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *);
+ logical upper;
+ extern /* Subroutine */ int dtpsv_(char *, char *, char *, integer *,
+ doublereal *, doublereal *, integer *);
+ extern doublereal dlamch_(char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ logical notran;
+ integer jfirst;
+ doublereal smlnum;
+ logical nounit;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLATPS solves one of the triangular systems */
+
+/* A *x = s*b or A'*x = s*b */
+
+/* with scaling to prevent overflow, where A is an upper or lower */
+/* triangular matrix stored in packed form. Here A' denotes the */
+/* transpose of A, x and b are n-element vectors, and s is a scaling */
+/* factor, usually less than or equal to 1, chosen so that the */
+/* components of x will be less than the overflow threshold. If the */
+/* unscaled problem will not cause overflow, the Level 2 BLAS routine */
+/* DTPSV is called. If the matrix A is singular (A(j,j) = 0 for some j), */
+/* then s is set to 0 and a non-trivial solution to A*x = 0 is returned. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower triangular. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the operation applied to A. */
+/* = 'N': Solve A * x = s*b (No transpose) */
+/* = 'T': Solve A'* x = s*b (Transpose) */
+/* = 'C': Solve A'* x = s*b (Conjugate transpose = Transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A is unit triangular. */
+/* = 'N': Non-unit triangular */
+/* = 'U': Unit triangular */
+
+/* NORMIN (input) CHARACTER*1 */
+/* Specifies whether CNORM has been set or not. */
+/* = 'Y': CNORM contains the column norms on entry */
+/* = 'N': CNORM is not set on entry. On exit, the norms will */
+/* be computed and stored in CNORM. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* The upper or lower triangular matrix A, packed columnwise in */
+/* a linear array. The j-th column of A is stored in the array */
+/* AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* X (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the right hand side b of the triangular system. */
+/* On exit, X is overwritten by the solution vector x. */
+
+/* SCALE (output) DOUBLE PRECISION */
+/* The scaling factor s for the triangular system */
+/* A * x = s*b or A'* x = s*b. */
+/* If SCALE = 0, the matrix A is singular or badly scaled, and */
+/* the vector x is an exact or approximate solution to A*x = 0. */
+
+/* CNORM (input or output) DOUBLE PRECISION array, dimension (N) */
+
+/* If NORMIN = 'Y', CNORM is an input argument and CNORM(j) */
+/* contains the norm of the off-diagonal part of the j-th column */
+/* of A. If TRANS = 'N', CNORM(j) must be greater than or equal */
+/* to the infinity-norm, and if TRANS = 'T' or 'C', CNORM(j) */
+/* must be greater than or equal to the 1-norm. */
+
+/* If NORMIN = 'N', CNORM is an output argument and CNORM(j) */
+/* returns the 1-norm of the offdiagonal part of the j-th column */
+/* of A. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* Further Details */
+/* ======= ======= */
+
+/* A rough bound on x is computed; if that is less than overflow, DTPSV */
+/* is called, otherwise, specific code is used which checks for possible */
+/* overflow or divide-by-zero at every operation. */
+
+/* A columnwise scheme is used for solving A*x = b. The basic algorithm */
+/* if A is lower triangular is */
+
+/* x[1:n] := b[1:n] */
+/* for j = 1, ..., n */
+/* x(j) := x(j) / A(j,j) */
+/* x[j+1:n] := x[j+1:n] - x(j) * A[j+1:n,j] */
+/* end */
+
+/* Define bounds on the components of x after j iterations of the loop: */
+/* M(j) = bound on x[1:j] */
+/* G(j) = bound on x[j+1:n] */
+/* Initially, let M(0) = 0 and G(0) = max{x(i), i=1,...,n}. */
+
+/* Then for iteration j+1 we have */
+/* M(j+1) <= G(j) / | A(j+1,j+1) | */
+/* G(j+1) <= G(j) + M(j+1) * | A[j+2:n,j+1] | */
+/* <= G(j) ( 1 + CNORM(j+1) / | A(j+1,j+1) | ) */
+
+/* where CNORM(j+1) is greater than or equal to the infinity-norm of */
+/* column j+1 of A, not counting the diagonal. Hence */
+
+/* G(j) <= G(0) product ( 1 + CNORM(i) / | A(i,i) | ) */
+/* 1<=i<=j */
+/* and */
+
+/* |x(j)| <= ( G(0) / |A(j,j)| ) product ( 1 + CNORM(i) / |A(i,i)| ) */
+/* 1<=i< j */
+
+/* Since |x(j)| <= M(j), we use the Level 2 BLAS routine DTPSV if the */
+/* reciprocal of the largest M(j), j=1,..,n, is larger than */
+/* max(underflow, 1/overflow). */
+
+/* The bound on x(j) is also used to determine when a step in the */
+/* columnwise method can be performed without fear of overflow. If */
+/* the computed bound is greater than a large constant, x is scaled to */
+/* prevent overflow, but if the bound overflows, x is set to 0, x(j) to */
+/* 1, and scale to 0, and a non-trivial solution to A*x = 0 is found. */
+
+/* Similarly, a row-wise scheme is used to solve A'*x = b. The basic */
+/* algorithm for A upper triangular is */
+
+/* for j = 1, ..., n */
+/* x(j) := ( b(j) - A[1:j-1,j]' * x[1:j-1] ) / A(j,j) */
+/* end */
+
+/* We simultaneously compute two bounds */
+/* G(j) = bound on ( b(i) - A[1:i-1,i]' * x[1:i-1] ), 1<=i<=j */
+/* M(j) = bound on x(i), 1<=i<=j */
+
+/* The initial values are G(0) = 0, M(0) = max{b(i), i=1,..,n}, and we */
+/* add the constraint G(j) >= G(j-1) and M(j) >= M(j-1) for j >= 1. */
+/* Then the bound on x(j) is */
+
+/* M(j) <= M(j-1) * ( 1 + CNORM(j) ) / | A(j,j) | */
+
+/* <= M(0) * product ( ( 1 + CNORM(i) ) / |A(i,i)| ) */
+/* 1<=i<=j */
+
+/* and we can safely call DTPSV if 1/M(n) and 1/G(n) are both greater */
+/* than max(underflow, 1/overflow). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --cnorm;
+ --x;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+/* Test the input parameters. */
+
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (! lsame_(normin, "Y") && ! lsame_(normin,
+ "N")) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLATPS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine machine dependent parameters to control overflow. */
+
+ smlnum = dlamch_("Safe minimum") / dlamch_("Precision");
+ bignum = 1. / smlnum;
+ *scale = 1.;
+
+ if (lsame_(normin, "N")) {
+
+/* Compute the 1-norm of each column, not including the diagonal. */
+
+ if (upper) {
+
+/* A is upper triangular. */
+
+ ip = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ cnorm[j] = dasum_(&i__2, &ap[ip], &c__1);
+ ip += j;
+/* L10: */
+ }
+ } else {
+
+/* A is lower triangular. */
+
+ ip = 1;
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ cnorm[j] = dasum_(&i__2, &ap[ip + 1], &c__1);
+ ip = ip + *n - j + 1;
+/* L20: */
+ }
+ cnorm[*n] = 0.;
+ }
+ }
+
+/* Scale the column norms by TSCAL if the maximum element in CNORM is */
+/* greater than BIGNUM. */
+
+ imax = idamax_(n, &cnorm[1], &c__1);
+ tmax = cnorm[imax];
+ if (tmax <= bignum) {
+ tscal = 1.;
+ } else {
+ tscal = 1. / (smlnum * tmax);
+ dscal_(n, &tscal, &cnorm[1], &c__1);
+ }
+
+/* Compute a bound on the computed solution vector to see if the */
+/* Level 2 BLAS routine DTPSV can be used. */
+
+ j = idamax_(n, &x[1], &c__1);
+ xmax = (d__1 = x[j], abs(d__1));
+ xbnd = xmax;
+ if (notran) {
+
+/* Compute the growth in A * x = b. */
+
+ if (upper) {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ } else {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ }
+
+ if (tscal != 1.) {
+ grow = 0.;
+ goto L50;
+ }
+
+ if (nounit) {
+
+/* A is non-unit triangular. */
+
+/* Compute GROW = 1/G(j) and XBND = 1/M(j). */
+/* Initially, G(0) = max{x(i), i=1,...,n}. */
+
+ grow = 1. / max(xbnd,smlnum);
+ xbnd = grow;
+ ip = jfirst * (jfirst + 1) / 2;
+ jlen = *n;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L50;
+ }
+
+/* M(j) = G(j-1) / abs(A(j,j)) */
+
+ tjj = (d__1 = ap[ip], abs(d__1));
+/* Computing MIN */
+ d__1 = xbnd, d__2 = min(1.,tjj) * grow;
+ xbnd = min(d__1,d__2);
+ if (tjj + cnorm[j] >= smlnum) {
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) / abs(A(j,j)) ) */
+
+ grow *= tjj / (tjj + cnorm[j]);
+ } else {
+
+/* G(j) could overflow, set GROW to 0. */
+
+ grow = 0.;
+ }
+ ip += jinc * jlen;
+ --jlen;
+/* L30: */
+ }
+ grow = xbnd;
+ } else {
+
+/* A is unit triangular. */
+
+/* Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. */
+
+/* Computing MIN */
+ d__1 = 1., d__2 = 1. / max(xbnd,smlnum);
+ grow = min(d__1,d__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L50;
+ }
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) ) */
+
+ grow *= 1. / (cnorm[j] + 1.);
+/* L40: */
+ }
+ }
+L50:
+
+ ;
+ } else {
+
+/* Compute the growth in A' * x = b. */
+
+ if (upper) {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ } else {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ }
+
+ if (tscal != 1.) {
+ grow = 0.;
+ goto L80;
+ }
+
+ if (nounit) {
+
+/* A is non-unit triangular. */
+
+/* Compute GROW = 1/G(j) and XBND = 1/M(j). */
+/* Initially, M(0) = max{x(i), i=1,...,n}. */
+
+ grow = 1. / max(xbnd,smlnum);
+ xbnd = grow;
+ ip = jfirst * (jfirst + 1) / 2;
+ jlen = 1;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L80;
+ }
+
+/* G(j) = max( G(j-1), M(j-1)*( 1 + CNORM(j) ) ) */
+
+ xj = cnorm[j] + 1.;
+/* Computing MIN */
+ d__1 = grow, d__2 = xbnd / xj;
+ grow = min(d__1,d__2);
+
+/* M(j) = M(j-1)*( 1 + CNORM(j) ) / abs(A(j,j)) */
+
+ tjj = (d__1 = ap[ip], abs(d__1));
+ if (xj > tjj) {
+ xbnd *= tjj / xj;
+ }
+ ++jlen;
+ ip += jinc * jlen;
+/* L60: */
+ }
+ grow = min(grow,xbnd);
+ } else {
+
+/* A is unit triangular. */
+
+/* Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. */
+
+/* Computing MIN */
+ d__1 = 1., d__2 = 1. / max(xbnd,smlnum);
+ grow = min(d__1,d__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L80;
+ }
+
+/* G(j) = ( 1 + CNORM(j) )*G(j-1) */
+
+ xj = cnorm[j] + 1.;
+ grow /= xj;
+/* L70: */
+ }
+ }
+L80:
+ ;
+ }
+
+ if (grow * tscal > smlnum) {
+
+/* Use the Level 2 BLAS solve if the reciprocal of the bound on */
+/* elements of X is not too small. */
+
+ dtpsv_(uplo, trans, diag, n, &ap[1], &x[1], &c__1);
+ } else {
+
+/* Use a Level 1 BLAS solve, scaling intermediate results. */
+
+ if (xmax > bignum) {
+
+/* Scale X so that its components are less than or equal to */
+/* BIGNUM in absolute value. */
+
+ *scale = bignum / xmax;
+ dscal_(n, scale, &x[1], &c__1);
+ xmax = bignum;
+ }
+
+ if (notran) {
+
+/* Solve A * x = b */
+
+ ip = jfirst * (jfirst + 1) / 2;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Compute x(j) = b(j) / A(j,j), scaling x if necessary. */
+
+ xj = (d__1 = x[j], abs(d__1));
+ if (nounit) {
+ tjjs = ap[ip] * tscal;
+ } else {
+ tjjs = tscal;
+ if (tscal == 1.) {
+ goto L100;
+ }
+ }
+ tjj = abs(tjjs);
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.) {
+ if (xj > tjj * bignum) {
+
+/* Scale x by 1/b(j). */
+
+ rec = 1. / xj;
+ dscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ x[j] /= tjjs;
+ xj = (d__1 = x[j], abs(d__1));
+ } else if (tjj > 0.) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM */
+/* to avoid overflow when dividing by A(j,j). */
+
+ rec = tjj * bignum / xj;
+ if (cnorm[j] > 1.) {
+
+/* Scale by 1/CNORM(j) to avoid overflow when */
+/* multiplying x(j) times column j. */
+
+ rec /= cnorm[j];
+ }
+ dscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ x[j] /= tjjs;
+ xj = (d__1 = x[j], abs(d__1));
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0, and compute a solution to A*x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ x[i__] = 0.;
+/* L90: */
+ }
+ x[j] = 1.;
+ xj = 1.;
+ *scale = 0.;
+ xmax = 0.;
+ }
+L100:
+
+/* Scale x if necessary to avoid overflow when adding a */
+/* multiple of column j of A. */
+
+ if (xj > 1.) {
+ rec = 1. / xj;
+ if (cnorm[j] > (bignum - xmax) * rec) {
+
+/* Scale x by 1/(2*abs(x(j))). */
+
+ rec *= .5;
+ dscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ }
+ } else if (xj * cnorm[j] > bignum - xmax) {
+
+/* Scale x by 1/2. */
+
+ dscal_(n, &c_b36, &x[1], &c__1);
+ *scale *= .5;
+ }
+
+ if (upper) {
+ if (j > 1) {
+
+/* Compute the update */
+/* x(1:j-1) := x(1:j-1) - x(j) * A(1:j-1,j) */
+
+ i__3 = j - 1;
+ d__1 = -x[j] * tscal;
+ daxpy_(&i__3, &d__1, &ap[ip - j + 1], &c__1, &x[1], &
+ c__1);
+ i__3 = j - 1;
+ i__ = idamax_(&i__3, &x[1], &c__1);
+ xmax = (d__1 = x[i__], abs(d__1));
+ }
+ ip -= j;
+ } else {
+ if (j < *n) {
+
+/* Compute the update */
+/* x(j+1:n) := x(j+1:n) - x(j) * A(j+1:n,j) */
+
+ i__3 = *n - j;
+ d__1 = -x[j] * tscal;
+ daxpy_(&i__3, &d__1, &ap[ip + 1], &c__1, &x[j + 1], &
+ c__1);
+ i__3 = *n - j;
+ i__ = j + idamax_(&i__3, &x[j + 1], &c__1);
+ xmax = (d__1 = x[i__], abs(d__1));
+ }
+ ip = ip + *n - j + 1;
+ }
+/* L110: */
+ }
+
+ } else {
+
+/* Solve A' * x = b */
+
+ ip = jfirst * (jfirst + 1) / 2;
+ jlen = 1;
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Compute x(j) = b(j) - sum A(k,j)*x(k). */
+/* k<>j */
+
+ xj = (d__1 = x[j], abs(d__1));
+ uscal = tscal;
+ rec = 1. / max(xmax,1.);
+ if (cnorm[j] > (bignum - xj) * rec) {
+
+/* If x(j) could overflow, scale x by 1/(2*XMAX). */
+
+ rec *= .5;
+ if (nounit) {
+ tjjs = ap[ip] * tscal;
+ } else {
+ tjjs = tscal;
+ }
+ tjj = abs(tjjs);
+ if (tjj > 1.) {
+
+/* Divide by A(j,j) when scaling x if A(j,j) > 1. */
+
+/* Computing MIN */
+ d__1 = 1., d__2 = rec * tjj;
+ rec = min(d__1,d__2);
+ uscal /= tjjs;
+ }
+ if (rec < 1.) {
+ dscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ sumj = 0.;
+ if (uscal == 1.) {
+
+/* If the scaling needed for A in the dot product is 1, */
+/* call DDOT to perform the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ sumj = ddot_(&i__3, &ap[ip - j + 1], &c__1, &x[1], &
+ c__1);
+ } else if (j < *n) {
+ i__3 = *n - j;
+ sumj = ddot_(&i__3, &ap[ip + 1], &c__1, &x[j + 1], &
+ c__1);
+ }
+ } else {
+
+/* Otherwise, use in-line code for the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ sumj += ap[ip - j + i__] * uscal * x[i__];
+/* L120: */
+ }
+ } else if (j < *n) {
+ i__3 = *n - j;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ sumj += ap[ip + i__] * uscal * x[j + i__];
+/* L130: */
+ }
+ }
+ }
+
+ if (uscal == tscal) {
+
+/* Compute x(j) := ( x(j) - sumj ) / A(j,j) if 1/A(j,j) */
+/* was not used to scale the dotproduct. */
+
+ x[j] -= sumj;
+ xj = (d__1 = x[j], abs(d__1));
+ if (nounit) {
+
+/* Compute x(j) = x(j) / A(j,j), scaling if necessary. */
+
+ tjjs = ap[ip] * tscal;
+ } else {
+ tjjs = tscal;
+ if (tscal == 1.) {
+ goto L150;
+ }
+ }
+ tjj = abs(tjjs);
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.) {
+ if (xj > tjj * bignum) {
+
+/* Scale X by 1/abs(x(j)). */
+
+ rec = 1. / xj;
+ dscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ x[j] /= tjjs;
+ } else if (tjj > 0.) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */
+
+ rec = tjj * bignum / xj;
+ dscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ x[j] /= tjjs;
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0, and compute a solution to A'*x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ x[i__] = 0.;
+/* L140: */
+ }
+ x[j] = 1.;
+ *scale = 0.;
+ xmax = 0.;
+ }
+L150:
+ ;
+ } else {
+
+/* Compute x(j) := x(j) / A(j,j) - sumj if the dot */
+/* product has already been divided by 1/A(j,j). */
+
+ x[j] = x[j] / tjjs - sumj;
+ }
+/* Computing MAX */
+ d__2 = xmax, d__3 = (d__1 = x[j], abs(d__1));
+ xmax = max(d__2,d__3);
+ ++jlen;
+ ip += jinc * jlen;
+/* L160: */
+ }
+ }
+ *scale /= tscal;
+ }
+
+/* Scale the column norms by 1/TSCAL for return. */
+
+ if (tscal != 1.) {
+ d__1 = 1. / tscal;
+ dscal_(n, &d__1, &cnorm[1], &c__1);
+ }
+
+ return 0;
+
+/* End of DLATPS */
+
+} /* dlatps_ */
diff --git a/contrib/libs/clapack/dlatrd.c b/contrib/libs/clapack/dlatrd.c
new file mode 100644
index 0000000000..6d07750611
--- /dev/null
+++ b/contrib/libs/clapack/dlatrd.c
@@ -0,0 +1,355 @@
+/* dlatrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b5 = -1.;
+static doublereal c_b6 = 1.;
+static integer c__1 = 1;
+static doublereal c_b16 = 0.;
+
+/* Subroutine */ int dlatrd_(char *uplo, integer *n, integer *nb, doublereal *
+ a, integer *lda, doublereal *e, doublereal *tau, doublereal *w,
+ integer *ldw)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, iw;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ doublereal alpha;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), daxpy_(integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *),
+ dsymv_(char *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *), dlarfg_(integer *, doublereal *, doublereal *, integer *,
+ doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLATRD reduces NB rows and columns of a real symmetric matrix A to */
+/* symmetric tridiagonal form by an orthogonal similarity */
+/* transformation Q' * A * Q, and returns the matrices V and W which are */
+/* needed to apply the transformation to the unreduced part of A. */
+
+/* If UPLO = 'U', DLATRD reduces the last NB rows and columns of a */
+/* matrix, of which the upper triangle is supplied; */
+/* if UPLO = 'L', DLATRD reduces the first NB rows and columns of a */
+/* matrix, of which the lower triangle is supplied. */
+
+/* This is an auxiliary routine called by DSYTRD. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* NB (input) INTEGER */
+/* The number of rows and columns to be reduced. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n-by-n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n-by-n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+/* On exit: */
+/* if UPLO = 'U', the last NB columns have been reduced to */
+/* tridiagonal form, with the diagonal elements overwriting */
+/* the diagonal elements of A; the elements above the diagonal */
+/* with the array TAU, represent the orthogonal matrix Q as a */
+/* product of elementary reflectors; */
+/* if UPLO = 'L', the first NB columns have been reduced to */
+/* tridiagonal form, with the diagonal elements overwriting */
+/* the diagonal elements of A; the elements below the diagonal */
+/* with the array TAU, represent the orthogonal matrix Q as a */
+/* product of elementary reflectors. */
+/* See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= (1,N). */
+
+/* E (output) DOUBLE PRECISION array, dimension (N-1) */
+/* If UPLO = 'U', E(n-nb:n-1) contains the superdiagonal */
+/* elements of the last NB columns of the reduced matrix; */
+/* if UPLO = 'L', E(1:nb) contains the subdiagonal elements of */
+/* the first NB columns of the reduced matrix. */
+
+/* TAU (output) DOUBLE PRECISION array, dimension (N-1) */
+/* The scalar factors of the elementary reflectors, stored in */
+/* TAU(n-nb:n-1) if UPLO = 'U', and in TAU(1:nb) if UPLO = 'L'. */
+/* See Further Details. */
+
+/* W (output) DOUBLE PRECISION array, dimension (LDW,NB) */
+/* The n-by-nb matrix W required to update the unreduced part */
+/* of A. */
+
+/* LDW (input) INTEGER */
+/* The leading dimension of the array W. LDW >= max(1,N). */
+
+/* Further Details */
+/* =============== */
+
+/* If UPLO = 'U', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(n) H(n-1) . . . H(n-nb+1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(i:n) = 0 and v(i-1) = 1; v(1:i-1) is stored on exit in A(1:i-1,i), */
+/* and tau in TAU(i-1). */
+
+/* If UPLO = 'L', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(1) H(2) . . . H(nb). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i) = 0 and v(i+1) = 1; v(i+1:n) is stored on exit in A(i+1:n,i), */
+/* and tau in TAU(i). */
+
+/* The elements of the vectors v together form the n-by-nb matrix V */
+/* which is needed, with W, to apply the transformation to the unreduced */
+/* part of the matrix, using a symmetric rank-2k update of the form: */
+/* A := A - V*W' - W*V'. */
+
+/* The contents of A on exit are illustrated by the following examples */
+/* with n = 5 and nb = 2: */
+
+/* if UPLO = 'U': if UPLO = 'L': */
+
+/* ( a a a v4 v5 ) ( d ) */
+/* ( a a v4 v5 ) ( 1 d ) */
+/* ( a 1 v5 ) ( v1 1 a ) */
+/* ( d 1 ) ( v1 v2 a a ) */
+/* ( d ) ( v1 v2 a a a ) */
+
+/* where d denotes a diagonal element of the reduced matrix, a denotes */
+/* an element of the original matrix that is unchanged, and vi denotes */
+/* an element of the vector defining H(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --e;
+ --tau;
+ w_dim1 = *ldw;
+ w_offset = 1 + w_dim1;
+ w -= w_offset;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+
+ if (lsame_(uplo, "U")) {
+
+/* Reduce last NB columns of upper triangle */
+
+ i__1 = *n - *nb + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ iw = i__ - *n + *nb;
+ if (i__ < *n) {
+
+/* Update A(1:i,i) */
+
+ i__2 = *n - i__;
+ dgemv_("No transpose", &i__, &i__2, &c_b5, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &w[i__ + (iw + 1) * w_dim1], ldw, &
+ c_b6, &a[i__ * a_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ dgemv_("No transpose", &i__, &i__2, &c_b5, &w[(iw + 1) *
+ w_dim1 + 1], ldw, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ c_b6, &a[i__ * a_dim1 + 1], &c__1);
+ }
+ if (i__ > 1) {
+
+/* Generate elementary reflector H(i) to annihilate */
+/* A(1:i-2,i) */
+
+ i__2 = i__ - 1;
+ dlarfg_(&i__2, &a[i__ - 1 + i__ * a_dim1], &a[i__ * a_dim1 +
+ 1], &c__1, &tau[i__ - 1]);
+ e[i__ - 1] = a[i__ - 1 + i__ * a_dim1];
+ a[i__ - 1 + i__ * a_dim1] = 1.;
+
+/* Compute W(1:i-1,i) */
+
+ i__2 = i__ - 1;
+ dsymv_("Upper", &i__2, &c_b6, &a[a_offset], lda, &a[i__ *
+ a_dim1 + 1], &c__1, &c_b16, &w[iw * w_dim1 + 1], &
+ c__1);
+ if (i__ < *n) {
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ dgemv_("Transpose", &i__2, &i__3, &c_b6, &w[(iw + 1) *
+ w_dim1 + 1], ldw, &a[i__ * a_dim1 + 1], &c__1, &
+ c_b16, &w[i__ + 1 + iw * w_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ dgemv_("No transpose", &i__2, &i__3, &c_b5, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &w[i__ + 1 + iw * w_dim1], &
+ c__1, &c_b6, &w[iw * w_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ dgemv_("Transpose", &i__2, &i__3, &c_b6, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &a[i__ * a_dim1 + 1], &c__1, &
+ c_b16, &w[i__ + 1 + iw * w_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ dgemv_("No transpose", &i__2, &i__3, &c_b5, &w[(iw + 1) *
+ w_dim1 + 1], ldw, &w[i__ + 1 + iw * w_dim1], &
+ c__1, &c_b6, &w[iw * w_dim1 + 1], &c__1);
+ }
+ i__2 = i__ - 1;
+ dscal_(&i__2, &tau[i__ - 1], &w[iw * w_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ alpha = tau[i__ - 1] * -.5 * ddot_(&i__2, &w[iw * w_dim1 + 1],
+ &c__1, &a[i__ * a_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ daxpy_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &w[iw *
+ w_dim1 + 1], &c__1);
+ }
+
+/* L10: */
+ }
+ } else {
+
+/* Reduce first NB columns of lower triangle */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i:n,i) */
+
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b5, &a[i__ + a_dim1], lda,
+ &w[i__ + w_dim1], ldw, &c_b6, &a[i__ + i__ * a_dim1], &
+ c__1);
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b5, &w[i__ + w_dim1], ldw,
+ &a[i__ + a_dim1], lda, &c_b6, &a[i__ + i__ * a_dim1], &
+ c__1);
+ if (i__ < *n) {
+
+/* Generate elementary reflector H(i) to annihilate */
+/* A(i+2:n,i) */
+
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3, *n)+
+ i__ * a_dim1], &c__1, &tau[i__]);
+ e[i__] = a[i__ + 1 + i__ * a_dim1];
+ a[i__ + 1 + i__ * a_dim1] = 1.;
+
+/* Compute W(i+1:n,i) */
+
+ i__2 = *n - i__;
+ dsymv_("Lower", &i__2, &c_b6, &a[i__ + 1 + (i__ + 1) * a_dim1]
+, lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b16, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b6, &w[i__ + 1 + w_dim1],
+ ldw, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b16, &w[
+ i__ * w_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b5, &a[i__ + 1 +
+ a_dim1], lda, &w[i__ * w_dim1 + 1], &c__1, &c_b6, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b6, &a[i__ + 1 + a_dim1],
+ lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b16, &w[
+ i__ * w_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b5, &w[i__ + 1 +
+ w_dim1], ldw, &w[i__ * w_dim1 + 1], &c__1, &c_b6, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ dscal_(&i__2, &tau[i__], &w[i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ alpha = tau[i__] * -.5 * ddot_(&i__2, &w[i__ + 1 + i__ *
+ w_dim1], &c__1, &a[i__ + 1 + i__ * a_dim1], &c__1);
+ i__2 = *n - i__;
+ daxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ }
+
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of DLATRD */
+
+} /* dlatrd_ */
diff --git a/contrib/libs/clapack/dlatrs.c b/contrib/libs/clapack/dlatrs.c
new file mode 100644
index 0000000000..2bb27845f3
--- /dev/null
+++ b/contrib/libs/clapack/dlatrs.c
@@ -0,0 +1,815 @@
+/* dlatrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b36 = .5;
+
+/* Subroutine */ int dlatrs_(char *uplo, char *trans, char *diag, char *
+ normin, integer *n, doublereal *a, integer *lda, doublereal *x,
+ doublereal *scale, doublereal *cnorm, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1, d__2, d__3;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal xj, rec, tjj;
+ integer jinc;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ doublereal xbnd;
+ integer imax;
+ doublereal tmax, tjjs, xmax, grow, sumj;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ doublereal tscal, uscal;
+ extern doublereal dasum_(integer *, doublereal *, integer *);
+ integer jlast;
+ extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *);
+ logical upper;
+ extern /* Subroutine */ int dtrsv_(char *, char *, char *, integer *,
+ doublereal *, integer *, doublereal *, integer *);
+ extern doublereal dlamch_(char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ logical notran;
+ integer jfirst;
+ doublereal smlnum;
+ logical nounit;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLATRS solves one of the triangular systems */
+
+/* A *x = s*b or A'*x = s*b */
+
+/* with scaling to prevent overflow. Here A is an upper or lower */
+/* triangular matrix, A' denotes the transpose of A, x and b are */
+/* n-element vectors, and s is a scaling factor, usually less than */
+/* or equal to 1, chosen so that the components of x will be less than */
+/* the overflow threshold. If the unscaled problem will not cause */
+/* overflow, the Level 2 BLAS routine DTRSV is called. If the matrix A */
+/* is singular (A(j,j) = 0 for some j), then s is set to 0 and a */
+/* non-trivial solution to A*x = 0 is returned. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower triangular. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the operation applied to A. */
+/* = 'N': Solve A * x = s*b (No transpose) */
+/* = 'T': Solve A'* x = s*b (Transpose) */
+/* = 'C': Solve A'* x = s*b (Conjugate transpose = Transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A is unit triangular. */
+/* = 'N': Non-unit triangular */
+/* = 'U': Unit triangular */
+
+/* NORMIN (input) CHARACTER*1 */
+/* Specifies whether CNORM has been set or not. */
+/* = 'Y': CNORM contains the column norms on entry */
+/* = 'N': CNORM is not set on entry. On exit, the norms will */
+/* be computed and stored in CNORM. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The triangular matrix A. If UPLO = 'U', the leading n by n */
+/* upper triangular part of the array A contains the upper */
+/* triangular matrix, and the strictly lower triangular part of */
+/* A is not referenced. If UPLO = 'L', the leading n by n lower */
+/* triangular part of the array A contains the lower triangular */
+/* matrix, and the strictly upper triangular part of A is not */
+/* referenced. If DIAG = 'U', the diagonal elements of A are */
+/* also not referenced and are assumed to be 1. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max (1,N). */
+
+/* X (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the right hand side b of the triangular system. */
+/* On exit, X is overwritten by the solution vector x. */
+
+/* SCALE (output) DOUBLE PRECISION */
+/* The scaling factor s for the triangular system */
+/* A * x = s*b or A'* x = s*b. */
+/* If SCALE = 0, the matrix A is singular or badly scaled, and */
+/* the vector x is an exact or approximate solution to A*x = 0. */
+
+/* CNORM (input or output) DOUBLE PRECISION array, dimension (N) */
+
+/* If NORMIN = 'Y', CNORM is an input argument and CNORM(j) */
+/* contains the norm of the off-diagonal part of the j-th column */
+/* of A. If TRANS = 'N', CNORM(j) must be greater than or equal */
+/* to the infinity-norm, and if TRANS = 'T' or 'C', CNORM(j) */
+/* must be greater than or equal to the 1-norm. */
+
+/* If NORMIN = 'N', CNORM is an output argument and CNORM(j) */
+/* returns the 1-norm of the offdiagonal part of the j-th column */
+/* of A. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* Further Details */
+/* ======= ======= */
+
+/* A rough bound on x is computed; if that is less than overflow, DTRSV */
+/* is called, otherwise, specific code is used which checks for possible */
+/* overflow or divide-by-zero at every operation. */
+
+/* A columnwise scheme is used for solving A*x = b. The basic algorithm */
+/* if A is lower triangular is */
+
+/* x[1:n] := b[1:n] */
+/* for j = 1, ..., n */
+/* x(j) := x(j) / A(j,j) */
+/* x[j+1:n] := x[j+1:n] - x(j) * A[j+1:n,j] */
+/* end */
+
+/* Define bounds on the components of x after j iterations of the loop: */
+/* M(j) = bound on x[1:j] */
+/* G(j) = bound on x[j+1:n] */
+/* Initially, let M(0) = 0 and G(0) = max{x(i), i=1,...,n}. */
+
+/* Then for iteration j+1 we have */
+/* M(j+1) <= G(j) / | A(j+1,j+1) | */
+/* G(j+1) <= G(j) + M(j+1) * | A[j+2:n,j+1] | */
+/* <= G(j) ( 1 + CNORM(j+1) / | A(j+1,j+1) | ) */
+
+/* where CNORM(j+1) is greater than or equal to the infinity-norm of */
+/* column j+1 of A, not counting the diagonal. Hence */
+
+/* G(j) <= G(0) product ( 1 + CNORM(i) / | A(i,i) | ) */
+/* 1<=i<=j */
+/* and */
+
+/* |x(j)| <= ( G(0) / |A(j,j)| ) product ( 1 + CNORM(i) / |A(i,i)| ) */
+/* 1<=i< j */
+
+/* Since |x(j)| <= M(j), we use the Level 2 BLAS routine DTRSV if the */
+/* reciprocal of the largest M(j), j=1,..,n, is larger than */
+/* max(underflow, 1/overflow). */
+
+/* The bound on x(j) is also used to determine when a step in the */
+/* columnwise method can be performed without fear of overflow. If */
+/* the computed bound is greater than a large constant, x is scaled to */
+/* prevent overflow, but if the bound overflows, x is set to 0, x(j) to */
+/* 1, and scale to 0, and a non-trivial solution to A*x = 0 is found. */
+
+/* Similarly, a row-wise scheme is used to solve A'*x = b. The basic */
+/* algorithm for A upper triangular is */
+
+/* for j = 1, ..., n */
+/* x(j) := ( b(j) - A[1:j-1,j]' * x[1:j-1] ) / A(j,j) */
+/* end */
+
+/* We simultaneously compute two bounds */
+/* G(j) = bound on ( b(i) - A[1:i-1,i]' * x[1:i-1] ), 1<=i<=j */
+/* M(j) = bound on x(i), 1<=i<=j */
+
+/* The initial values are G(0) = 0, M(0) = max{b(i), i=1,..,n}, and we */
+/* add the constraint G(j) >= G(j-1) and M(j) >= M(j-1) for j >= 1. */
+/* Then the bound on x(j) is */
+
+/* M(j) <= M(j-1) * ( 1 + CNORM(j) ) / | A(j,j) | */
+
+/* <= M(0) * product ( ( 1 + CNORM(i) ) / |A(i,i)| ) */
+/* 1<=i<=j */
+
+/* and we can safely call DTRSV if 1/M(n) and 1/G(n) are both greater */
+/* than max(underflow, 1/overflow). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --cnorm;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+/* Test the input parameters. */
+
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (! lsame_(normin, "Y") && ! lsame_(normin,
+ "N")) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLATRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine machine dependent parameters to control overflow. */
+
+ smlnum = dlamch_("Safe minimum") / dlamch_("Precision");
+ bignum = 1. / smlnum;
+ *scale = 1.;
+
+ if (lsame_(normin, "N")) {
+
+/* Compute the 1-norm of each column, not including the diagonal. */
+
+ if (upper) {
+
+/* A is upper triangular. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ cnorm[j] = dasum_(&i__2, &a[j * a_dim1 + 1], &c__1);
+/* L10: */
+ }
+ } else {
+
+/* A is lower triangular. */
+
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ cnorm[j] = dasum_(&i__2, &a[j + 1 + j * a_dim1], &c__1);
+/* L20: */
+ }
+ cnorm[*n] = 0.;
+ }
+ }
+
+/* Scale the column norms by TSCAL if the maximum element in CNORM is */
+/* greater than BIGNUM. */
+
+ imax = idamax_(n, &cnorm[1], &c__1);
+ tmax = cnorm[imax];
+ if (tmax <= bignum) {
+ tscal = 1.;
+ } else {
+ tscal = 1. / (smlnum * tmax);
+ dscal_(n, &tscal, &cnorm[1], &c__1);
+ }
+
+/* Compute a bound on the computed solution vector to see if the */
+/* Level 2 BLAS routine DTRSV can be used. */
+
+ j = idamax_(n, &x[1], &c__1);
+ xmax = (d__1 = x[j], abs(d__1));
+ xbnd = xmax;
+ if (notran) {
+
+/* Compute the growth in A * x = b. */
+
+ if (upper) {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ } else {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ }
+
+ if (tscal != 1.) {
+ grow = 0.;
+ goto L50;
+ }
+
+ if (nounit) {
+
+/* A is non-unit triangular. */
+
+/* Compute GROW = 1/G(j) and XBND = 1/M(j). */
+/* Initially, G(0) = max{x(i), i=1,...,n}. */
+
+ grow = 1. / max(xbnd,smlnum);
+ xbnd = grow;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L50;
+ }
+
+/* M(j) = G(j-1) / abs(A(j,j)) */
+
+ tjj = (d__1 = a[j + j * a_dim1], abs(d__1));
+/* Computing MIN */
+ d__1 = xbnd, d__2 = min(1.,tjj) * grow;
+ xbnd = min(d__1,d__2);
+ if (tjj + cnorm[j] >= smlnum) {
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) / abs(A(j,j)) ) */
+
+ grow *= tjj / (tjj + cnorm[j]);
+ } else {
+
+/* G(j) could overflow, set GROW to 0. */
+
+ grow = 0.;
+ }
+/* L30: */
+ }
+ grow = xbnd;
+ } else {
+
+/* A is unit triangular. */
+
+/* Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. */
+
+/* Computing MIN */
+ d__1 = 1., d__2 = 1. / max(xbnd,smlnum);
+ grow = min(d__1,d__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L50;
+ }
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) ) */
+
+ grow *= 1. / (cnorm[j] + 1.);
+/* L40: */
+ }
+ }
+L50:
+
+ ;
+ } else {
+
+/* Compute the growth in A' * x = b. */
+
+ if (upper) {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ } else {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ }
+
+ if (tscal != 1.) {
+ grow = 0.;
+ goto L80;
+ }
+
+ if (nounit) {
+
+/* A is non-unit triangular. */
+
+/* Compute GROW = 1/G(j) and XBND = 1/M(j). */
+/* Initially, M(0) = max{x(i), i=1,...,n}. */
+
+ grow = 1. / max(xbnd,smlnum);
+ xbnd = grow;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L80;
+ }
+
+/* G(j) = max( G(j-1), M(j-1)*( 1 + CNORM(j) ) ) */
+
+ xj = cnorm[j] + 1.;
+/* Computing MIN */
+ d__1 = grow, d__2 = xbnd / xj;
+ grow = min(d__1,d__2);
+
+/* M(j) = M(j-1)*( 1 + CNORM(j) ) / abs(A(j,j)) */
+
+ tjj = (d__1 = a[j + j * a_dim1], abs(d__1));
+ if (xj > tjj) {
+ xbnd *= tjj / xj;
+ }
+/* L60: */
+ }
+ grow = min(grow,xbnd);
+ } else {
+
+/* A is unit triangular. */
+
+/* Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. */
+
+/* Computing MIN */
+ d__1 = 1., d__2 = 1. / max(xbnd,smlnum);
+ grow = min(d__1,d__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L80;
+ }
+
+/* G(j) = ( 1 + CNORM(j) )*G(j-1) */
+
+ xj = cnorm[j] + 1.;
+ grow /= xj;
+/* L70: */
+ }
+ }
+L80:
+ ;
+ }
+
+ if (grow * tscal > smlnum) {
+
+/* Use the Level 2 BLAS solve if the reciprocal of the bound on */
+/* elements of X is not too small. */
+
+ dtrsv_(uplo, trans, diag, n, &a[a_offset], lda, &x[1], &c__1);
+ } else {
+
+/* Use a Level 1 BLAS solve, scaling intermediate results. */
+
+ if (xmax > bignum) {
+
+/* Scale X so that its components are less than or equal to */
+/* BIGNUM in absolute value. */
+
+ *scale = bignum / xmax;
+ dscal_(n, scale, &x[1], &c__1);
+ xmax = bignum;
+ }
+
+ if (notran) {
+
+/* Solve A * x = b */
+
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Compute x(j) = b(j) / A(j,j), scaling x if necessary. */
+
+ xj = (d__1 = x[j], abs(d__1));
+ if (nounit) {
+ tjjs = a[j + j * a_dim1] * tscal;
+ } else {
+ tjjs = tscal;
+ if (tscal == 1.) {
+ goto L100;
+ }
+ }
+ tjj = abs(tjjs);
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.) {
+ if (xj > tjj * bignum) {
+
+/* Scale x by 1/b(j). */
+
+ rec = 1. / xj;
+ dscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ x[j] /= tjjs;
+ xj = (d__1 = x[j], abs(d__1));
+ } else if (tjj > 0.) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM */
+/* to avoid overflow when dividing by A(j,j). */
+
+ rec = tjj * bignum / xj;
+ if (cnorm[j] > 1.) {
+
+/* Scale by 1/CNORM(j) to avoid overflow when */
+/* multiplying x(j) times column j. */
+
+ rec /= cnorm[j];
+ }
+ dscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ x[j] /= tjjs;
+ xj = (d__1 = x[j], abs(d__1));
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0, and compute a solution to A*x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ x[i__] = 0.;
+/* L90: */
+ }
+ x[j] = 1.;
+ xj = 1.;
+ *scale = 0.;
+ xmax = 0.;
+ }
+L100:
+
+/* Scale x if necessary to avoid overflow when adding a */
+/* multiple of column j of A. */
+
+ if (xj > 1.) {
+ rec = 1. / xj;
+ if (cnorm[j] > (bignum - xmax) * rec) {
+
+/* Scale x by 1/(2*abs(x(j))). */
+
+ rec *= .5;
+ dscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ }
+ } else if (xj * cnorm[j] > bignum - xmax) {
+
+/* Scale x by 1/2. */
+
+ dscal_(n, &c_b36, &x[1], &c__1);
+ *scale *= .5;
+ }
+
+ if (upper) {
+ if (j > 1) {
+
+/* Compute the update */
+/* x(1:j-1) := x(1:j-1) - x(j) * A(1:j-1,j) */
+
+ i__3 = j - 1;
+ d__1 = -x[j] * tscal;
+ daxpy_(&i__3, &d__1, &a[j * a_dim1 + 1], &c__1, &x[1],
+ &c__1);
+ i__3 = j - 1;
+ i__ = idamax_(&i__3, &x[1], &c__1);
+ xmax = (d__1 = x[i__], abs(d__1));
+ }
+ } else {
+ if (j < *n) {
+
+/* Compute the update */
+/* x(j+1:n) := x(j+1:n) - x(j) * A(j+1:n,j) */
+
+ i__3 = *n - j;
+ d__1 = -x[j] * tscal;
+ daxpy_(&i__3, &d__1, &a[j + 1 + j * a_dim1], &c__1, &
+ x[j + 1], &c__1);
+ i__3 = *n - j;
+ i__ = j + idamax_(&i__3, &x[j + 1], &c__1);
+ xmax = (d__1 = x[i__], abs(d__1));
+ }
+ }
+/* L110: */
+ }
+
+ } else {
+
+/* Solve A' * x = b */
+
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Compute x(j) = b(j) - sum A(k,j)*x(k). */
+/* k<>j */
+
+ xj = (d__1 = x[j], abs(d__1));
+ uscal = tscal;
+ rec = 1. / max(xmax,1.);
+ if (cnorm[j] > (bignum - xj) * rec) {
+
+/* If x(j) could overflow, scale x by 1/(2*XMAX). */
+
+ rec *= .5;
+ if (nounit) {
+ tjjs = a[j + j * a_dim1] * tscal;
+ } else {
+ tjjs = tscal;
+ }
+ tjj = abs(tjjs);
+ if (tjj > 1.) {
+
+/* Divide by A(j,j) when scaling x if A(j,j) > 1. */
+
+/* Computing MIN */
+ d__1 = 1., d__2 = rec * tjj;
+ rec = min(d__1,d__2);
+ uscal /= tjjs;
+ }
+ if (rec < 1.) {
+ dscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ sumj = 0.;
+ if (uscal == 1.) {
+
+/* If the scaling needed for A in the dot product is 1, */
+/* call DDOT to perform the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ sumj = ddot_(&i__3, &a[j * a_dim1 + 1], &c__1, &x[1],
+ &c__1);
+ } else if (j < *n) {
+ i__3 = *n - j;
+ sumj = ddot_(&i__3, &a[j + 1 + j * a_dim1], &c__1, &x[
+ j + 1], &c__1);
+ }
+ } else {
+
+/* Otherwise, use in-line code for the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ sumj += a[i__ + j * a_dim1] * uscal * x[i__];
+/* L120: */
+ }
+ } else if (j < *n) {
+ i__3 = *n;
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ sumj += a[i__ + j * a_dim1] * uscal * x[i__];
+/* L130: */
+ }
+ }
+ }
+
+ if (uscal == tscal) {
+
+/* Compute x(j) := ( x(j) - sumj ) / A(j,j) if 1/A(j,j) */
+/* was not used to scale the dotproduct. */
+
+ x[j] -= sumj;
+ xj = (d__1 = x[j], abs(d__1));
+ if (nounit) {
+ tjjs = a[j + j * a_dim1] * tscal;
+ } else {
+ tjjs = tscal;
+ if (tscal == 1.) {
+ goto L150;
+ }
+ }
+
+/* Compute x(j) = x(j) / A(j,j), scaling if necessary. */
+
+ tjj = abs(tjjs);
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.) {
+ if (xj > tjj * bignum) {
+
+/* Scale X by 1/abs(x(j)). */
+
+ rec = 1. / xj;
+ dscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ x[j] /= tjjs;
+ } else if (tjj > 0.) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */
+
+ rec = tjj * bignum / xj;
+ dscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ x[j] /= tjjs;
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0, and compute a solution to A'*x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ x[i__] = 0.;
+/* L140: */
+ }
+ x[j] = 1.;
+ *scale = 0.;
+ xmax = 0.;
+ }
+L150:
+ ;
+ } else {
+
+/* Compute x(j) := x(j) / A(j,j) - sumj if the dot */
+/* product has already been divided by 1/A(j,j). */
+
+ x[j] = x[j] / tjjs - sumj;
+ }
+/* Computing MAX */
+ d__2 = xmax, d__3 = (d__1 = x[j], abs(d__1));
+ xmax = max(d__2,d__3);
+/* L160: */
+ }
+ }
+ *scale /= tscal;
+ }
+
+/* Scale the column norms by 1/TSCAL for return. */
+
+ if (tscal != 1.) {
+ d__1 = 1. / tscal;
+ dscal_(n, &d__1, &cnorm[1], &c__1);
+ }
+
+ return 0;
+
+/* End of DLATRS */
+
+} /* dlatrs_ */
diff --git a/contrib/libs/clapack/dlatrz.c b/contrib/libs/clapack/dlatrz.c
new file mode 100644
index 0000000000..9590da3e15
--- /dev/null
+++ b/contrib/libs/clapack/dlatrz.c
@@ -0,0 +1,163 @@
+/* dlatrz.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dlatrz_(integer *m, integer *n, integer *l, doublereal *
+ a, integer *lda, doublereal *tau, doublereal *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__;
+ extern /* Subroutine */ int dlarz_(char *, integer *, integer *, integer *
+, doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *), dlarfp_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLATRZ factors the M-by-(M+L) real upper trapezoidal matrix */
+/* [ A1 A2 ] = [ A(1:M,1:M) A(1:M,N-L+1:N) ] as ( R 0 ) * Z, by means */
+/* of orthogonal transformations. Z is an (M+L)-by-(M+L) orthogonal */
+/* matrix and, R and A1 are M-by-M upper triangular matrices. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* L (input) INTEGER */
+/* The number of columns of the matrix A containing the */
+/* meaningful part of the Householder vectors. N-M >= L >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the leading M-by-N upper trapezoidal part of the */
+/* array A must contain the matrix to be factorized. */
+/* On exit, the leading M-by-M upper triangular part of A */
+/* contains the upper triangular matrix R, and elements N-L+1 to */
+/* N of the first M rows of A, with the array TAU, represent the */
+/* orthogonal matrix Z as a product of M elementary reflectors. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) DOUBLE PRECISION array, dimension (M) */
+/* The scalar factors of the elementary reflectors. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (M) */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* The factorization is obtained by Householder's method. The kth */
+/* transformation matrix, Z( k ), which is used to introduce zeros into */
+/* the ( m - k + 1 )th row of A, is given in the form */
+
+/* Z( k ) = ( I 0 ), */
+/* ( 0 T( k ) ) */
+
+/* where */
+
+/* T( k ) = I - tau*u( k )*u( k )', u( k ) = ( 1 ), */
+/* ( 0 ) */
+/* ( z( k ) ) */
+
+/* tau is a scalar and z( k ) is an l element vector. tau and z( k ) */
+/* are chosen to annihilate the elements of the kth row of A2. */
+
+/* The scalar tau is returned in the kth element of TAU and the vector */
+/* u( k ) in the kth row of A2, such that the elements of z( k ) are */
+/* in a( k, l + 1 ), ..., a( k, n ). The elements of R are returned in */
+/* the upper triangular part of A1. */
+
+/* Z is given by */
+
+/* Z = Z( 1 ) * Z( 2 ) * ... * Z( m ). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ if (*m == 0) {
+ return 0;
+ } else if (*m == *n) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ tau[i__] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+ for (i__ = *m; i__ >= 1; --i__) {
+
+/* Generate elementary reflector H(i) to annihilate */
+/* [ A(i,i) A(i,n-l+1:n) ] */
+
+ i__1 = *l + 1;
+ dlarfp_(&i__1, &a[i__ + i__ * a_dim1], &a[i__ + (*n - *l + 1) *
+ a_dim1], lda, &tau[i__]);
+
+/* Apply H(i) to A(1:i-1,i:n) from the right */
+
+ i__1 = i__ - 1;
+ i__2 = *n - i__ + 1;
+ dlarz_("Right", &i__1, &i__2, l, &a[i__ + (*n - *l + 1) * a_dim1],
+ lda, &tau[i__], &a[i__ * a_dim1 + 1], lda, &work[1]);
+
+/* L20: */
+ }
+
+ return 0;
+
+/* End of DLATRZ */
+
+} /* dlatrz_ */
diff --git a/contrib/libs/clapack/dlatzm.c b/contrib/libs/clapack/dlatzm.c
new file mode 100644
index 0000000000..3ba8a8c8b2
--- /dev/null
+++ b/contrib/libs/clapack/dlatzm.c
@@ -0,0 +1,193 @@
+/* dlatzm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b5 = 1.;
+
+/* Subroutine */ int dlatzm_(char *side, integer *m, integer *n, doublereal *
+ v, integer *incv, doublereal *tau, doublereal *c1, doublereal *c2,
+ integer *ldc, doublereal *work)
+{
+ /* System generated locals */
+ integer c1_dim1, c1_offset, c2_dim1, c2_offset, i__1;
+ doublereal d__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int dger_(integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), dcopy_(integer *,
+ doublereal *, integer *, doublereal *, integer *), daxpy_(integer
+ *, doublereal *, doublereal *, integer *, doublereal *, integer *)
+ ;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is deprecated and has been replaced by routine DORMRZ. */
+
+/* DLATZM applies a Householder matrix generated by DTZRQF to a matrix. */
+
+/* Let P = I - tau*u*u', u = ( 1 ), */
+/* ( v ) */
+/* where v is an (m-1) vector if SIDE = 'L', or a (n-1) vector if */
+/* SIDE = 'R'. */
+
+/* If SIDE equals 'L', let */
+/* C = [ C1 ] 1 */
+/* [ C2 ] m-1 */
+/* n */
+/* Then C is overwritten by P*C. */
+
+/* If SIDE equals 'R', let */
+/* C = [ C1, C2 ] m */
+/* 1 n-1 */
+/* Then C is overwritten by C*P. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': form P * C */
+/* = 'R': form C * P */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. */
+
+/* V (input) DOUBLE PRECISION array, dimension */
+/* (1 + (M-1)*abs(INCV)) if SIDE = 'L' */
+/* (1 + (N-1)*abs(INCV)) if SIDE = 'R' */
+/* The vector v in the representation of P. V is not used */
+/* if TAU = 0. */
+
+/* INCV (input) INTEGER */
+/* The increment between elements of v. INCV <> 0 */
+
+/* TAU (input) DOUBLE PRECISION */
+/* The value tau in the representation of P. */
+
+/* C1 (input/output) DOUBLE PRECISION array, dimension */
+/* (LDC,N) if SIDE = 'L' */
+/* (M,1) if SIDE = 'R' */
+/* On entry, the n-vector C1 if SIDE = 'L', or the m-vector C1 */
+/* if SIDE = 'R'. */
+
+/* On exit, the first row of P*C if SIDE = 'L', or the first */
+/* column of C*P if SIDE = 'R'. */
+
+/* C2 (input/output) DOUBLE PRECISION array, dimension */
+/* (LDC, N) if SIDE = 'L' */
+/* (LDC, N-1) if SIDE = 'R' */
+/* On entry, the (m - 1) x n matrix C2 if SIDE = 'L', or the */
+/* m x (n - 1) matrix C2 if SIDE = 'R'. */
+
+/* On exit, rows 2:m of P*C if SIDE = 'L', or columns 2:m of C*P */
+/* if SIDE = 'R'. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the arrays C1 and C2. LDC >= (1,M). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension */
+/* (N) if SIDE = 'L' */
+/* (M) if SIDE = 'R' */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --v;
+ c2_dim1 = *ldc;
+ c2_offset = 1 + c2_dim1;
+ c2 -= c2_offset;
+ c1_dim1 = *ldc;
+ c1_offset = 1 + c1_dim1;
+ c1 -= c1_offset;
+ --work;
+
+ /* Function Body */
+ if (min(*m,*n) == 0 || *tau == 0.) {
+ return 0;
+ }
+
+ if (lsame_(side, "L")) {
+
+/* w := C1 + v' * C2 */
+
+ dcopy_(n, &c1[c1_offset], ldc, &work[1], &c__1);
+ i__1 = *m - 1;
+ dgemv_("Transpose", &i__1, n, &c_b5, &c2[c2_offset], ldc, &v[1], incv,
+ &c_b5, &work[1], &c__1);
+
+/* [ C1 ] := [ C1 ] - tau* [ 1 ] * w' */
+/* [ C2 ] [ C2 ] [ v ] */
+
+ d__1 = -(*tau);
+ daxpy_(n, &d__1, &work[1], &c__1, &c1[c1_offset], ldc);
+ i__1 = *m - 1;
+ d__1 = -(*tau);
+ dger_(&i__1, n, &d__1, &v[1], incv, &work[1], &c__1, &c2[c2_offset],
+ ldc);
+
+ } else if (lsame_(side, "R")) {
+
+/* w := C1 + C2 * v */
+
+ dcopy_(m, &c1[c1_offset], &c__1, &work[1], &c__1);
+ i__1 = *n - 1;
+ dgemv_("No transpose", m, &i__1, &c_b5, &c2[c2_offset], ldc, &v[1],
+ incv, &c_b5, &work[1], &c__1);
+
+/* [ C1, C2 ] := [ C1, C2 ] - tau* w * [ 1 , v'] */
+
+ d__1 = -(*tau);
+ daxpy_(m, &d__1, &work[1], &c__1, &c1[c1_offset], &c__1);
+ i__1 = *n - 1;
+ d__1 = -(*tau);
+ dger_(m, &i__1, &d__1, &work[1], &c__1, &v[1], incv, &c2[c2_offset],
+ ldc);
+ }
+
+ return 0;
+
+/* End of DLATZM */
+
+} /* dlatzm_ */
diff --git a/contrib/libs/clapack/dlauu2.c b/contrib/libs/clapack/dlauu2.c
new file mode 100644
index 0000000000..1f8f03f843
--- /dev/null
+++ b/contrib/libs/clapack/dlauu2.c
@@ -0,0 +1,183 @@
+/* dlauu2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b7 = 1.;
+static integer c__1 = 1;
+
+/* Subroutine */ int dlauu2_(char *uplo, integer *n, doublereal *a, integer *
+ lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__;
+ doublereal aii;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAUU2 computes the product U * U' or L' * L, where the triangular */
+/* factor U or L is stored in the upper or lower triangular part of */
+/* the array A. */
+
+/* If UPLO = 'U' or 'u' then the upper triangle of the result is stored, */
+/* overwriting the factor U in A. */
+/* If UPLO = 'L' or 'l' then the lower triangle of the result is stored, */
+/* overwriting the factor L in A. */
+
+/* This is the unblocked form of the algorithm, calling Level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the triangular factor stored in the array A */
+/* is upper or lower triangular: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the triangular factor U or L. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the triangular factor U or L. */
+/* On exit, if UPLO = 'U', the upper triangle of A is */
+/* overwritten with the upper triangle of the product U * U'; */
+/* if UPLO = 'L', the lower triangle of A is overwritten with */
+/* the lower triangle of the product L' * L. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAUU2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute the product U * U'. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ aii = a[i__ + i__ * a_dim1];
+ if (i__ < *n) {
+ i__2 = *n - i__ + 1;
+ a[i__ + i__ * a_dim1] = ddot_(&i__2, &a[i__ + i__ * a_dim1],
+ lda, &a[i__ + i__ * a_dim1], lda);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ dgemv_("No transpose", &i__2, &i__3, &c_b7, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ aii, &a[i__ * a_dim1 + 1], &c__1);
+ } else {
+ dscal_(&i__, &aii, &a[i__ * a_dim1 + 1], &c__1);
+ }
+/* L10: */
+ }
+
+ } else {
+
+/* Compute the product L' * L. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ aii = a[i__ + i__ * a_dim1];
+ if (i__ < *n) {
+ i__2 = *n - i__ + 1;
+ a[i__ + i__ * a_dim1] = ddot_(&i__2, &a[i__ + i__ * a_dim1], &
+ c__1, &a[i__ + i__ * a_dim1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b7, &a[i__ + 1 + a_dim1],
+ lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &aii, &a[i__
+ + a_dim1], lda);
+ } else {
+ dscal_(&i__, &aii, &a[i__ + a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of DLAUU2 */
+
+} /* dlauu2_ */
diff --git a/contrib/libs/clapack/dlauum.c b/contrib/libs/clapack/dlauum.c
new file mode 100644
index 0000000000..0d5d706708
--- /dev/null
+++ b/contrib/libs/clapack/dlauum.c
@@ -0,0 +1,217 @@
+/* dlauum.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static doublereal c_b15 = 1.;
+
+/* Subroutine */ int dlauum_(char *uplo, integer *n, doublereal *a, integer *
+ lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, ib, nb;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dtrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical upper;
+ extern /* Subroutine */ int dsyrk_(char *, char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *), dlauu2_(char *, integer *,
+ doublereal *, integer *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DLAUUM computes the product U * U' or L' * L, where the triangular */
+/* factor U or L is stored in the upper or lower triangular part of */
+/* the array A. */
+
+/* If UPLO = 'U' or 'u' then the upper triangle of the result is stored, */
+/* overwriting the factor U in A. */
+/* If UPLO = 'L' or 'l' then the lower triangle of the result is stored, */
+/* overwriting the factor L in A. */
+
+/* This is the blocked form of the algorithm, calling Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the triangular factor stored in the array A */
+/* is upper or lower triangular: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the triangular factor U or L. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the triangular factor U or L. */
+/* On exit, if UPLO = 'U', the upper triangle of A is */
+/* overwritten with the upper triangle of the product U * U'; */
+/* if UPLO = 'L', the lower triangle of A is overwritten with */
+/* the lower triangle of the product L' * L. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAUUM", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "DLAUUM", uplo, n, &c_n1, &c_n1, &c_n1);
+
+ if (nb <= 1 || nb >= *n) {
+
+/* Use unblocked code */
+
+ dlauu2_(uplo, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code */
+
+ if (upper) {
+
+/* Compute the product U * U'. */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - i__ + 1;
+ ib = min(i__3,i__4);
+ i__3 = i__ - 1;
+ dtrmm_("Right", "Upper", "Transpose", "Non-unit", &i__3, &ib,
+ &c_b15, &a[i__ + i__ * a_dim1], lda, &a[i__ * a_dim1
+ + 1], lda)
+ ;
+ dlauu2_("Upper", &ib, &a[i__ + i__ * a_dim1], lda, info);
+ if (i__ + ib <= *n) {
+ i__3 = i__ - 1;
+ i__4 = *n - i__ - ib + 1;
+ dgemm_("No transpose", "Transpose", &i__3, &ib, &i__4, &
+ c_b15, &a[(i__ + ib) * a_dim1 + 1], lda, &a[i__ +
+ (i__ + ib) * a_dim1], lda, &c_b15, &a[i__ *
+ a_dim1 + 1], lda);
+ i__3 = *n - i__ - ib + 1;
+ dsyrk_("Upper", "No transpose", &ib, &i__3, &c_b15, &a[
+ i__ + (i__ + ib) * a_dim1], lda, &c_b15, &a[i__ +
+ i__ * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute the product L' * L. */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - i__ + 1;
+ ib = min(i__3,i__4);
+ i__3 = i__ - 1;
+ dtrmm_("Left", "Lower", "Transpose", "Non-unit", &ib, &i__3, &
+ c_b15, &a[i__ + i__ * a_dim1], lda, &a[i__ + a_dim1],
+ lda);
+ dlauu2_("Lower", &ib, &a[i__ + i__ * a_dim1], lda, info);
+ if (i__ + ib <= *n) {
+ i__3 = i__ - 1;
+ i__4 = *n - i__ - ib + 1;
+ dgemm_("Transpose", "No transpose", &ib, &i__3, &i__4, &
+ c_b15, &a[i__ + ib + i__ * a_dim1], lda, &a[i__ +
+ ib + a_dim1], lda, &c_b15, &a[i__ + a_dim1], lda);
+ i__3 = *n - i__ - ib + 1;
+ dsyrk_("Lower", "Transpose", &ib, &i__3, &c_b15, &a[i__ +
+ ib + i__ * a_dim1], lda, &c_b15, &a[i__ + i__ *
+ a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DLAUUM */
+
+} /* dlauum_ */
diff --git a/contrib/libs/clapack/dopgtr.c b/contrib/libs/clapack/dopgtr.c
new file mode 100644
index 0000000000..ea1541cd2b
--- /dev/null
+++ b/contrib/libs/clapack/dopgtr.c
@@ -0,0 +1,210 @@
+/* dopgtr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dopgtr_(char *uplo, integer *n, doublereal *ap,
+ doublereal *tau, doublereal *q, integer *ldq, doublereal *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, ij;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical upper;
+ extern /* Subroutine */ int dorg2l_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *),
+ dorg2r_(integer *, integer *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DOPGTR generates a real orthogonal matrix Q which is defined as the */
+/* product of n-1 elementary reflectors H(i) of order n, as returned by */
+/* DSPTRD using packed storage: */
+
+/* if UPLO = 'U', Q = H(n-1) . . . H(2) H(1), */
+
+/* if UPLO = 'L', Q = H(1) H(2) . . . H(n-1). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangular packed storage used in previous */
+/* call to DSPTRD; */
+/* = 'L': Lower triangular packed storage used in previous */
+/* call to DSPTRD. */
+
+/* N (input) INTEGER */
+/* The order of the matrix Q. N >= 0. */
+
+/* AP (input) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* The vectors which define the elementary reflectors, as */
+/* returned by DSPTRD. */
+
+/* TAU (input) DOUBLE PRECISION array, dimension (N-1) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by DSPTRD. */
+
+/* Q (output) DOUBLE PRECISION array, dimension (LDQ,N) */
+/* The N-by-N orthogonal matrix Q. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (N-1) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ --ap;
+ --tau;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldq < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DOPGTR", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Q was determined by a call to DSPTRD with UPLO = 'U' */
+
+/* Unpack the vectors which define the elementary reflectors and */
+/* set the last row and column of Q equal to those of the unit */
+/* matrix */
+
+ ij = 2;
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ q[i__ + j * q_dim1] = ap[ij];
+ ++ij;
+/* L10: */
+ }
+ ij += 2;
+ q[*n + j * q_dim1] = 0.;
+/* L20: */
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ q[i__ + *n * q_dim1] = 0.;
+/* L30: */
+ }
+ q[*n + *n * q_dim1] = 1.;
+
+/* Generate Q(1:n-1,1:n-1) */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ dorg2l_(&i__1, &i__2, &i__3, &q[q_offset], ldq, &tau[1], &work[1], &
+ iinfo);
+
+ } else {
+
+/* Q was determined by a call to DSPTRD with UPLO = 'L'. */
+
+/* Unpack the vectors which define the elementary reflectors and */
+/* set the first row and column of Q equal to those of the unit */
+/* matrix */
+
+ q[q_dim1 + 1] = 1.;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ q[i__ + q_dim1] = 0.;
+/* L40: */
+ }
+ ij = 3;
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ q[j * q_dim1 + 1] = 0.;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ q[i__ + j * q_dim1] = ap[ij];
+ ++ij;
+/* L50: */
+ }
+ ij += 2;
+/* L60: */
+ }
+ if (*n > 1) {
+
+/* Generate Q(2:n,2:n) */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ dorg2r_(&i__1, &i__2, &i__3, &q[(q_dim1 << 1) + 2], ldq, &tau[1],
+ &work[1], &iinfo);
+ }
+ }
+ return 0;
+
+/* End of DOPGTR */
+
+} /* dopgtr_ */
diff --git a/contrib/libs/clapack/dopmtr.c b/contrib/libs/clapack/dopmtr.c
new file mode 100644
index 0000000000..fda595a513
--- /dev/null
+++ b/contrib/libs/clapack/dopmtr.c
@@ -0,0 +1,296 @@
+/* dopmtr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dopmtr_(char *side, char *uplo, char *trans, integer *m,
+ integer *n, doublereal *ap, doublereal *tau, doublereal *c__, integer
+ *ldc, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, i1, i2, i3, ic, jc, ii, mi, ni, nq;
+ doublereal aii;
+ logical left;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *);
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran, forwrd;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DOPMTR overwrites the general real M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'T': Q**T * C C * Q**T */
+
+/* where Q is a real orthogonal matrix of order nq, with nq = m if */
+/* SIDE = 'L' and nq = n if SIDE = 'R'. Q is defined as the product of */
+/* nq-1 elementary reflectors, as returned by DSPTRD using packed */
+/* storage: */
+
+/* if UPLO = 'U', Q = H(nq-1) . . . H(2) H(1); */
+
+/* if UPLO = 'L', Q = H(1) H(2) . . . H(nq-1). */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**T from the Left; */
+/* = 'R': apply Q or Q**T from the Right. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangular packed storage used in previous */
+/* call to DSPTRD; */
+/* = 'L': Lower triangular packed storage used in previous */
+/* call to DSPTRD. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'T': Transpose, apply Q**T. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* AP (input) DOUBLE PRECISION array, dimension */
+/* (M*(M+1)/2) if SIDE = 'L' */
+/* (N*(N+1)/2) if SIDE = 'R' */
+/* The vectors which define the elementary reflectors, as */
+/* returned by DSPTRD. AP is modified by the routine but */
+/* restored on exit. */
+
+/* TAU (input) DOUBLE PRECISION array, dimension (M-1) if SIDE = 'L' */
+/* or (N-1) if SIDE = 'R' */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by DSPTRD. */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension */
+/* (N) if SIDE = 'L' */
+/* (M) if SIDE = 'R' */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ --ap;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ upper = lsame_(uplo, "U");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*ldc < max(1,*m)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DOPMTR", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Q was determined by a call to DSPTRD with UPLO = 'U' */
+
+ forwrd = left && notran || ! left && ! notran;
+
+ if (forwrd) {
+ i1 = 1;
+ i2 = nq - 1;
+ i3 = 1;
+ ii = 2;
+ } else {
+ i1 = nq - 1;
+ i2 = 1;
+ i3 = -1;
+ ii = nq * (nq + 1) / 2 - 1;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) is applied to C(1:i,1:n) */
+
+ mi = i__;
+ } else {
+
+/* H(i) is applied to C(1:m,1:i) */
+
+ ni = i__;
+ }
+
+/* Apply H(i) */
+
+ aii = ap[ii];
+ ap[ii] = 1.;
+ dlarf_(side, &mi, &ni, &ap[ii - i__ + 1], &c__1, &tau[i__], &c__[
+ c_offset], ldc, &work[1]);
+ ap[ii] = aii;
+
+ if (forwrd) {
+ ii = ii + i__ + 2;
+ } else {
+ ii = ii - i__ - 1;
+ }
+/* L10: */
+ }
+ } else {
+
+/* Q was determined by a call to DSPTRD with UPLO = 'L'. */
+
+ forwrd = left && ! notran || ! left && notran;
+
+ if (forwrd) {
+ i1 = 1;
+ i2 = nq - 1;
+ i3 = 1;
+ ii = 2;
+ } else {
+ i1 = nq - 1;
+ i2 = 1;
+ i3 = -1;
+ ii = nq * (nq + 1) / 2 - 1;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__2 = i2;
+ i__1 = i3;
+ for (i__ = i1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+ aii = ap[ii];
+ ap[ii] = 1.;
+ if (left) {
+
+/* H(i) is applied to C(i+1:m,1:n) */
+
+ mi = *m - i__;
+ ic = i__ + 1;
+ } else {
+
+/* H(i) is applied to C(1:m,i+1:n) */
+
+ ni = *n - i__;
+ jc = i__ + 1;
+ }
+
+/* Apply H(i) */
+
+ dlarf_(side, &mi, &ni, &ap[ii], &c__1, &tau[i__], &c__[ic + jc *
+ c_dim1], ldc, &work[1]);
+ ap[ii] = aii;
+
+ if (forwrd) {
+ ii = ii + nq - i__ + 1;
+ } else {
+ ii = ii - nq + i__ - 2;
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of DOPMTR */
+
+} /* dopmtr_ */
diff --git a/contrib/libs/clapack/dorg2l.c b/contrib/libs/clapack/dorg2l.c
new file mode 100644
index 0000000000..0fa59f1380
--- /dev/null
+++ b/contrib/libs/clapack/dorg2l.c
@@ -0,0 +1,173 @@
+/* dorg2l.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dorg2l_(integer *m, integer *n, integer *k, doublereal *
+ a, integer *lda, doublereal *tau, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__, j, l, ii;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dlarf_(char *, integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DORG2L generates an m by n real matrix Q with orthonormal columns, */
+/* which is defined as the last n columns of a product of k elementary */
+/* reflectors of order m */
+
+/* Q = H(k) . . . H(2) H(1) */
+
+/* as returned by DGEQLF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. M >= N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. N >= K >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the (n-k+i)-th column must contain the vector which */
+/* defines the elementary reflector H(i), for i = 1,2,...,k, as */
+/* returned by DGEQLF in the last k columns of its array */
+/* argument A. */
+/* On exit, the m by n matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) DOUBLE PRECISION array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by DGEQLF. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0 || *n > *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORG2L", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+/* Initialise columns 1:n-k to columns of the unit matrix */
+
+ i__1 = *n - *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (l = 1; l <= i__2; ++l) {
+ a[l + j * a_dim1] = 0.;
+/* L10: */
+ }
+ a[*m - *n + j + j * a_dim1] = 1.;
+/* L20: */
+ }
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ii = *n - *k + i__;
+
+/* Apply H(i) to A(1:m-k+i,1:n-k+i) from the left */
+
+ a[*m - *n + ii + ii * a_dim1] = 1.;
+ i__2 = *m - *n + ii;
+ i__3 = ii - 1;
+ dlarf_("Left", &i__2, &i__3, &a[ii * a_dim1 + 1], &c__1, &tau[i__], &
+ a[a_offset], lda, &work[1]);
+ i__2 = *m - *n + ii - 1;
+ d__1 = -tau[i__];
+ dscal_(&i__2, &d__1, &a[ii * a_dim1 + 1], &c__1);
+ a[*m - *n + ii + ii * a_dim1] = 1. - tau[i__];
+
+/* Set A(m-k+i+1:m,n-k+i) to zero */
+
+ i__2 = *m;
+ for (l = *m - *n + ii + 1; l <= i__2; ++l) {
+ a[l + ii * a_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of DORG2L */
+
+} /* dorg2l_ */
diff --git a/contrib/libs/clapack/dorg2r.c b/contrib/libs/clapack/dorg2r.c
new file mode 100644
index 0000000000..892807c577
--- /dev/null
+++ b/contrib/libs/clapack/dorg2r.c
@@ -0,0 +1,175 @@
+/* dorg2r.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dorg2r_(integer *m, integer *n, integer *k, doublereal *
+ a, integer *lda, doublereal *tau, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__, j, l;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dlarf_(char *, integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DORG2R generates an m by n real matrix Q with orthonormal columns, */
+/* which is defined as the first n columns of a product of k elementary */
+/* reflectors of order m */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by DGEQRF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. M >= N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. N >= K >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the i-th column must contain the vector which */
+/* defines the elementary reflector H(i), for i = 1,2,...,k, as */
+/* returned by DGEQRF in the first k columns of its array */
+/* argument A. */
+/* On exit, the m-by-n matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) DOUBLE PRECISION array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by DGEQRF. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0 || *n > *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORG2R", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+/* Initialise columns k+1:n to columns of the unit matrix */
+
+ i__1 = *n;
+ for (j = *k + 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (l = 1; l <= i__2; ++l) {
+ a[l + j * a_dim1] = 0.;
+/* L10: */
+ }
+ a[j + j * a_dim1] = 1.;
+/* L20: */
+ }
+
+ for (i__ = *k; i__ >= 1; --i__) {
+
+/* Apply H(i) to A(i:m,i:n) from the left */
+
+ if (i__ < *n) {
+ a[i__ + i__ * a_dim1] = 1.;
+ i__1 = *m - i__ + 1;
+ i__2 = *n - i__;
+ dlarf_("Left", &i__1, &i__2, &a[i__ + i__ * a_dim1], &c__1, &tau[
+ i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ }
+ if (i__ < *m) {
+ i__1 = *m - i__;
+ d__1 = -tau[i__];
+ dscal_(&i__1, &d__1, &a[i__ + 1 + i__ * a_dim1], &c__1);
+ }
+ a[i__ + i__ * a_dim1] = 1. - tau[i__];
+
+/* Set A(1:i-1,i) to zero */
+
+ i__1 = i__ - 1;
+ for (l = 1; l <= i__1; ++l) {
+ a[l + i__ * a_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of DORG2R */
+
+} /* dorg2r_ */
diff --git a/contrib/libs/clapack/dorgbr.c b/contrib/libs/clapack/dorgbr.c
new file mode 100644
index 0000000000..e649640b47
--- /dev/null
+++ b/contrib/libs/clapack/dorgbr.c
@@ -0,0 +1,299 @@
+/* dorgbr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int dorgbr_(char *vect, integer *m, integer *n, integer *k,
+ doublereal *a, integer *lda, doublereal *tau, doublereal *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, nb, mn;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical wantq;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int dorglq_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *), dorgqr_(integer *, integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DORGBR generates one of the real orthogonal matrices Q or P**T */
+/* determined by DGEBRD when reducing a real matrix A to bidiagonal */
+/* form: A = Q * B * P**T. Q and P**T are defined as products of */
+/* elementary reflectors H(i) or G(i) respectively. */
+
+/* If VECT = 'Q', A is assumed to have been an M-by-K matrix, and Q */
+/* is of order M: */
+/* if m >= k, Q = H(1) H(2) . . . H(k) and DORGBR returns the first n */
+/* columns of Q, where m >= n >= k; */
+/* if m < k, Q = H(1) H(2) . . . H(m-1) and DORGBR returns Q as an */
+/* M-by-M matrix. */
+
+/* If VECT = 'P', A is assumed to have been a K-by-N matrix, and P**T */
+/* is of order N: */
+/* if k < n, P**T = G(k) . . . G(2) G(1) and DORGBR returns the first m */
+/* rows of P**T, where n >= m >= k; */
+/* if k >= n, P**T = G(n-1) . . . G(2) G(1) and DORGBR returns P**T as */
+/* an N-by-N matrix. */
+
+/* Arguments */
+/* ========= */
+
+/* VECT (input) CHARACTER*1 */
+/* Specifies whether the matrix Q or the matrix P**T is */
+/* required, as defined in the transformation applied by DGEBRD: */
+/* = 'Q': generate Q; */
+/* = 'P': generate P**T. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q or P**T to be returned. */
+/* M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q or P**T to be returned. */
+/* N >= 0. */
+/* If VECT = 'Q', M >= N >= min(M,K); */
+/* if VECT = 'P', N >= M >= min(N,K). */
+
+/* K (input) INTEGER */
+/* If VECT = 'Q', the number of columns in the original M-by-K */
+/* matrix reduced by DGEBRD. */
+/* If VECT = 'P', the number of rows in the original K-by-N */
+/* matrix reduced by DGEBRD. */
+/* K >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the vectors which define the elementary reflectors, */
+/* as returned by DGEBRD. */
+/* On exit, the M-by-N matrix Q or P**T. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) DOUBLE PRECISION array, dimension */
+/* (min(M,K)) if VECT = 'Q' */
+/* (min(N,K)) if VECT = 'P' */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i) or G(i), which determines Q or P**T, as */
+/* returned by DGEBRD in its array argument TAUQ or TAUP. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,min(M,N)). */
+/* For optimum performance LWORK >= min(M,N)*NB, where NB */
+/* is the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ wantq = lsame_(vect, "Q");
+ mn = min(*m,*n);
+ lquery = *lwork == -1;
+ if (! wantq && ! lsame_(vect, "P")) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0 || wantq && (*n > *m || *n < min(*m,*k)) || ! wantq && (
+ *m > *n || *m < min(*n,*k))) {
+ *info = -3;
+ } else if (*k < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*m)) {
+ *info = -6;
+ } else if (*lwork < max(1,mn) && ! lquery) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+ if (wantq) {
+ nb = ilaenv_(&c__1, "DORGQR", " ", m, n, k, &c_n1);
+ } else {
+ nb = ilaenv_(&c__1, "DORGLQ", " ", m, n, k, &c_n1);
+ }
+ lwkopt = max(1,mn) * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORGBR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ if (wantq) {
+
+/* Form Q, determined by a call to DGEBRD to reduce an m-by-k */
+/* matrix */
+
+ if (*m >= *k) {
+
+/* If m >= k, assume m >= n >= k */
+
+ dorgqr_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, &
+ iinfo);
+
+ } else {
+
+/* If m < k, assume m = n */
+
+/* Shift the vectors which define the elementary reflectors one */
+/* column to the right, and set the first row and column of Q */
+/* to those of the unit matrix */
+
+ for (j = *m; j >= 2; --j) {
+ a[j * a_dim1 + 1] = 0.;
+ i__1 = *m;
+ for (i__ = j + 1; i__ <= i__1; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + (j - 1) * a_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+ a[a_dim1 + 1] = 1.;
+ i__1 = *m;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ a[i__ + a_dim1] = 0.;
+/* L30: */
+ }
+ if (*m > 1) {
+
+/* Form Q(2:m,2:m) */
+
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ dorgqr_(&i__1, &i__2, &i__3, &a[(a_dim1 << 1) + 2], lda, &tau[
+ 1], &work[1], lwork, &iinfo);
+ }
+ }
+ } else {
+
+/* Form P', determined by a call to DGEBRD to reduce a k-by-n */
+/* matrix */
+
+ if (*k < *n) {
+
+/* If k < n, assume k <= m <= n */
+
+ dorglq_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, &
+ iinfo);
+
+ } else {
+
+/* If k >= n, assume m = n */
+
+/* Shift the vectors which define the elementary reflectors one */
+/* row downward, and set the first row and column of P' to */
+/* those of the unit matrix */
+
+ a[a_dim1 + 1] = 1.;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ a[i__ + a_dim1] = 0.;
+/* L40: */
+ }
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ for (i__ = j - 1; i__ >= 2; --i__) {
+ a[i__ + j * a_dim1] = a[i__ - 1 + j * a_dim1];
+/* L50: */
+ }
+ a[j * a_dim1 + 1] = 0.;
+/* L60: */
+ }
+ if (*n > 1) {
+
+/* Form P'(2:n,2:n) */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ dorglq_(&i__1, &i__2, &i__3, &a[(a_dim1 << 1) + 2], lda, &tau[
+ 1], &work[1], lwork, &iinfo);
+ }
+ }
+ }
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DORGBR */
+
+} /* dorgbr_ */
diff --git a/contrib/libs/clapack/dorghr.c b/contrib/libs/clapack/dorghr.c
new file mode 100644
index 0000000000..291825b226
--- /dev/null
+++ b/contrib/libs/clapack/dorghr.c
@@ -0,0 +1,216 @@
+/* dorghr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int dorghr_(integer *n, integer *ilo, integer *ihi,
+ doublereal *a, integer *lda, doublereal *tau, doublereal *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, nb, nh, iinfo;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int dorgqr_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DORGHR generates a real orthogonal matrix Q which is defined as the */
+/* product of IHI-ILO elementary reflectors of order N, as returned by */
+/* DGEHRD: */
+
+/* Q = H(ilo) H(ilo+1) . . . H(ihi-1). */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix Q. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* ILO and IHI must have the same values as in the previous call */
+/* of DGEHRD. Q is equal to the unit matrix except in the */
+/* submatrix Q(ilo+1:ihi,ilo+1:ihi). */
+/* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the vectors which define the elementary reflectors, */
+/* as returned by DGEHRD. */
+/* On exit, the N-by-N orthogonal matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAU (input) DOUBLE PRECISION array, dimension (N-1) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by DGEHRD. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= IHI-ILO. */
+/* For optimum performance LWORK >= (IHI-ILO)*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nh = *ihi - *ilo;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -2;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*lwork < max(1,nh) && ! lquery) {
+ *info = -8;
+ }
+
+ if (*info == 0) {
+ nb = ilaenv_(&c__1, "DORGQR", " ", &nh, &nh, &nh, &c_n1);
+ lwkopt = max(1,nh) * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORGHR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+/* Shift the vectors which define the elementary reflectors one */
+/* column to the right, and set the first ilo and the last n-ihi */
+/* rows and columns to those of the unit matrix */
+
+ i__1 = *ilo + 1;
+ for (j = *ihi; j >= i__1; --j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.;
+/* L10: */
+ }
+ i__2 = *ihi;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + (j - 1) * a_dim1];
+/* L20: */
+ }
+ i__2 = *n;
+ for (i__ = *ihi + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+ i__1 = *ilo;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.;
+/* L50: */
+ }
+ a[j + j * a_dim1] = 1.;
+/* L60: */
+ }
+ i__1 = *n;
+ for (j = *ihi + 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.;
+/* L70: */
+ }
+ a[j + j * a_dim1] = 1.;
+/* L80: */
+ }
+
+ if (nh > 0) {
+
+/* Generate Q(ilo+1:ihi,ilo+1:ihi) */
+
+ dorgqr_(&nh, &nh, &nh, &a[*ilo + 1 + (*ilo + 1) * a_dim1], lda, &tau[*
+ ilo], &work[1], lwork, &iinfo);
+ }
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DORGHR */
+
+} /* dorghr_ */
diff --git a/contrib/libs/clapack/dorgl2.c b/contrib/libs/clapack/dorgl2.c
new file mode 100644
index 0000000000..f880992a4a
--- /dev/null
+++ b/contrib/libs/clapack/dorgl2.c
@@ -0,0 +1,175 @@
+/* dorgl2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dorgl2_(integer *m, integer *n, integer *k, doublereal *
+ a, integer *lda, doublereal *tau, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__, j, l;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dlarf_(char *, integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DORGL2 generates an m by n real matrix Q with orthonormal rows, */
+/* which is defined as the first m rows of a product of k elementary */
+/* reflectors of order n */
+
+/* Q = H(k) . . . H(2) H(1) */
+
+/* as returned by DGELQF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. N >= M. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. M >= K >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the i-th row must contain the vector which defines */
+/* the elementary reflector H(i), for i = 1,2,...,k, as returned */
+/* by DGELQF in the first k rows of its array argument A. */
+/* On exit, the m-by-n matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) DOUBLE PRECISION array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by DGELQF. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (M) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *m) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORGL2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ return 0;
+ }
+
+ if (*k < *m) {
+
+/* Initialise rows k+1:m to rows of the unit matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (l = *k + 1; l <= i__2; ++l) {
+ a[l + j * a_dim1] = 0.;
+/* L10: */
+ }
+ if (j > *k && j <= *m) {
+ a[j + j * a_dim1] = 1.;
+ }
+/* L20: */
+ }
+ }
+
+ for (i__ = *k; i__ >= 1; --i__) {
+
+/* Apply H(i) to A(i:m,i:n) from the right */
+
+ if (i__ < *n) {
+ if (i__ < *m) {
+ a[i__ + i__ * a_dim1] = 1.;
+ i__1 = *m - i__;
+ i__2 = *n - i__ + 1;
+ dlarf_("Right", &i__1, &i__2, &a[i__ + i__ * a_dim1], lda, &
+ tau[i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
+ }
+ i__1 = *n - i__;
+ d__1 = -tau[i__];
+ dscal_(&i__1, &d__1, &a[i__ + (i__ + 1) * a_dim1], lda);
+ }
+ a[i__ + i__ * a_dim1] = 1. - tau[i__];
+
+/* Set A(i,1:i-1) to zero */
+
+ i__1 = i__ - 1;
+ for (l = 1; l <= i__1; ++l) {
+ a[i__ + l * a_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of DORGL2 */
+
+} /* dorgl2_ */
diff --git a/contrib/libs/clapack/dorglq.c b/contrib/libs/clapack/dorglq.c
new file mode 100644
index 0000000000..a5b90192cd
--- /dev/null
+++ b/contrib/libs/clapack/dorglq.c
@@ -0,0 +1,280 @@
+/* dorglq.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int dorglq_(integer *m, integer *n, integer *k, doublereal *
+ a, integer *lda, doublereal *tau, doublereal *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, l, ib, nb, ki, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int dorgl2_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *),
+ dlarfb_(char *, char *, char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DORGLQ generates an M-by-N real matrix Q with orthonormal rows, */
+/* which is defined as the first M rows of a product of K elementary */
+/* reflectors of order N */
+
+/* Q = H(k) . . . H(2) H(1) */
+
+/* as returned by DGELQF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. N >= M. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. M >= K >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the i-th row must contain the vector which defines */
+/* the elementary reflector H(i), for i = 1,2,...,k, as returned */
+/* by DGELQF in the first k rows of its array argument A. */
+/* On exit, the M-by-N matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) DOUBLE PRECISION array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by DGELQF. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,M). */
+/* For optimum performance LWORK >= M*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "DORGLQ", " ", m, n, k, &c_n1);
+ lwkopt = max(1,*m) * nb;
+ work[1] = (doublereal) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *m) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*lwork < max(1,*m) && ! lquery) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORGLQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *m;
+ if (nb > 1 && nb < *k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "DORGLQ", " ", m, n, k, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DORGLQ", " ", m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *k && nx < *k) {
+
+/* Use blocked code after the last block. */
+/* The first kk rows are handled by the block method. */
+
+ ki = (*k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = *k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+/* Set A(kk+1:m,1:kk) to zero. */
+
+ i__1 = kk;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = kk + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the last or only block. */
+
+ if (kk < *m) {
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ dorgl2_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, &
+ tau[kk + 1], &work[1], &iinfo);
+ }
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = -nb;
+ for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) {
+/* Computing MIN */
+ i__2 = nb, i__3 = *k - i__ + 1;
+ ib = min(i__2,i__3);
+ if (i__ + ib <= *m) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i) H(i+1) . . . H(i+ib-1) */
+
+ i__2 = *n - i__ + 1;
+ dlarft_("Forward", "Rowwise", &i__2, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(i+ib:m,i:n) from the right */
+
+ i__2 = *m - i__ - ib + 1;
+ i__3 = *n - i__ + 1;
+ dlarfb_("Right", "Transpose", "Forward", "Rowwise", &i__2, &
+ i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], &
+ ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[ib +
+ 1], &ldwork);
+ }
+
+/* Apply H' to columns i:n of current block */
+
+ i__2 = *n - i__ + 1;
+ dorgl2_(&ib, &i__2, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &
+ work[1], &iinfo);
+
+/* Set columns 1:i-1 of current block to zero */
+
+ i__2 = i__ - 1;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = i__ + ib - 1;
+ for (l = i__; l <= i__3; ++l) {
+ a[l + j * a_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1] = (doublereal) iws;
+ return 0;
+
+/* End of DORGLQ */
+
+} /* dorglq_ */
diff --git a/contrib/libs/clapack/dorgql.c b/contrib/libs/clapack/dorgql.c
new file mode 100644
index 0000000000..85d2f8a319
--- /dev/null
+++ b/contrib/libs/clapack/dorgql.c
@@ -0,0 +1,289 @@
+/* dorgql.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int dorgql_(integer *m, integer *n, integer *k, doublereal *
+ a, integer *lda, doublereal *tau, doublereal *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, l, ib, nb, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int dorg2l_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *),
+ dlarfb_(char *, char *, char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DORGQL generates an M-by-N real matrix Q with orthonormal columns, */
+/* which is defined as the last N columns of a product of K elementary */
+/* reflectors of order M */
+
+/* Q = H(k) . . . H(2) H(1) */
+
+/* as returned by DGEQLF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. M >= N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. N >= K >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the (n-k+i)-th column must contain the vector which */
+/* defines the elementary reflector H(i), for i = 1,2,...,k, as */
+/* returned by DGEQLF in the last k columns of its array */
+/* argument A. */
+/* On exit, the M-by-N matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) DOUBLE PRECISION array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by DGEQLF. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+/* For optimum performance LWORK >= N*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0 || *n > *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+
+ if (*info == 0) {
+ if (*n == 0) {
+ lwkopt = 1;
+ } else {
+ nb = ilaenv_(&c__1, "DORGQL", " ", m, n, k, &c_n1);
+ lwkopt = *n * nb;
+ }
+ work[1] = (doublereal) lwkopt;
+
+ if (*lwork < max(1,*n) && ! lquery) {
+ *info = -8;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORGQL", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *n;
+ if (nb > 1 && nb < *k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "DORGQL", " ", m, n, k, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DORGQL", " ", m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *k && nx < *k) {
+
+/* Use blocked code after the first block. */
+/* The last kk columns are handled by the block method. */
+
+/* Computing MIN */
+ i__1 = *k, i__2 = (*k - nx + nb - 1) / nb * nb;
+ kk = min(i__1,i__2);
+
+/* Set A(m-kk+1:m,1:n-kk) to zero. */
+
+ i__1 = *n - kk;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = *m - kk + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the first or only block. */
+
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ dorg2l_(&i__1, &i__2, &i__3, &a[a_offset], lda, &tau[1], &work[1], &iinfo)
+ ;
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = *k;
+ i__2 = nb;
+ for (i__ = *k - kk + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *k - i__ + 1;
+ ib = min(i__3,i__4);
+ if (*n - *k + i__ > 1) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__3 = *m - *k + i__ + ib - 1;
+ dlarft_("Backward", "Columnwise", &i__3, &ib, &a[(*n - *k +
+ i__) * a_dim1 + 1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(1:m-k+i+ib-1,1:n-k+i-1) from the left */
+
+ i__3 = *m - *k + i__ + ib - 1;
+ i__4 = *n - *k + i__ - 1;
+ dlarfb_("Left", "No transpose", "Backward", "Columnwise", &
+ i__3, &i__4, &ib, &a[(*n - *k + i__) * a_dim1 + 1],
+ lda, &work[1], &ldwork, &a[a_offset], lda, &work[ib +
+ 1], &ldwork);
+ }
+
+/* Apply H to rows 1:m-k+i+ib-1 of current block */
+
+ i__3 = *m - *k + i__ + ib - 1;
+ dorg2l_(&i__3, &ib, &ib, &a[(*n - *k + i__) * a_dim1 + 1], lda, &
+ tau[i__], &work[1], &iinfo);
+
+/* Set rows m-k+i+ib:m of current block to zero */
+
+ i__3 = *n - *k + i__ + ib - 1;
+ for (j = *n - *k + i__; j <= i__3; ++j) {
+ i__4 = *m;
+ for (l = *m - *k + i__ + ib; l <= i__4; ++l) {
+ a[l + j * a_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1] = (doublereal) iws;
+ return 0;
+
+/* End of DORGQL */
+
+} /* dorgql_ */
diff --git a/contrib/libs/clapack/dorgqr.c b/contrib/libs/clapack/dorgqr.c
new file mode 100644
index 0000000000..3b72b73b3a
--- /dev/null
+++ b/contrib/libs/clapack/dorgqr.c
@@ -0,0 +1,281 @@
+/* dorgqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int dorgqr_(integer *m, integer *n, integer *k, doublereal *
+ a, integer *lda, doublereal *tau, doublereal *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, l, ib, nb, ki, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int dorg2r_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *),
+ dlarfb_(char *, char *, char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DORGQR generates an M-by-N real matrix Q with orthonormal columns, */
+/* which is defined as the first N columns of a product of K elementary */
+/* reflectors of order M */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by DGEQRF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. M >= N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. N >= K >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the i-th column must contain the vector which */
+/* defines the elementary reflector H(i), for i = 1,2,...,k, as */
+/* returned by DGEQRF in the first k columns of its array */
+/* argument A. */
+/* On exit, the M-by-N matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) DOUBLE PRECISION array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by DGEQRF. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+/* For optimum performance LWORK >= N*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "DORGQR", " ", m, n, k, &c_n1);
+ lwkopt = max(1,*n) * nb;
+ work[1] = (doublereal) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0 || *n > *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORGQR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *n;
+ if (nb > 1 && nb < *k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "DORGQR", " ", m, n, k, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DORGQR", " ", m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *k && nx < *k) {
+
+/* Use blocked code after the last block. */
+/* The first kk columns are handled by the block method. */
+
+ ki = (*k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = *k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+/* Set A(1:kk,kk+1:n) to zero. */
+
+ i__1 = *n;
+ for (j = kk + 1; j <= i__1; ++j) {
+ i__2 = kk;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the last or only block. */
+
+ if (kk < *n) {
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ dorg2r_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, &
+ tau[kk + 1], &work[1], &iinfo);
+ }
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = -nb;
+ for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) {
+/* Computing MIN */
+ i__2 = nb, i__3 = *k - i__ + 1;
+ ib = min(i__2,i__3);
+ if (i__ + ib <= *n) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i) H(i+1) . . . H(i+ib-1) */
+
+ i__2 = *m - i__ + 1;
+ dlarft_("Forward", "Columnwise", &i__2, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(i:m,i+ib:n) from the left */
+
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__ - ib + 1;
+ dlarfb_("Left", "No transpose", "Forward", "Columnwise", &
+ i__2, &i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[
+ 1], &ldwork, &a[i__ + (i__ + ib) * a_dim1], lda, &
+ work[ib + 1], &ldwork);
+ }
+
+/* Apply H to rows i:m of current block */
+
+ i__2 = *m - i__ + 1;
+ dorg2r_(&i__2, &ib, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &
+ work[1], &iinfo);
+
+/* Set rows 1:i-1 of current block to zero */
+
+ i__2 = i__ + ib - 1;
+ for (j = i__; j <= i__2; ++j) {
+ i__3 = i__ - 1;
+ for (l = 1; l <= i__3; ++l) {
+ a[l + j * a_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1] = (doublereal) iws;
+ return 0;
+
+/* End of DORGQR */
+
+} /* dorgqr_ */
diff --git a/contrib/libs/clapack/dorgr2.c b/contrib/libs/clapack/dorgr2.c
new file mode 100644
index 0000000000..ab2ce5915c
--- /dev/null
+++ b/contrib/libs/clapack/dorgr2.c
@@ -0,0 +1,174 @@
+/* dorgr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dorgr2_(integer *m, integer *n, integer *k, doublereal *
+ a, integer *lda, doublereal *tau, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__, j, l, ii;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dlarf_(char *, integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DORGR2 generates an m by n real matrix Q with orthonormal rows, */
+/* which is defined as the last m rows of a product of k elementary */
+/* reflectors of order n */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by DGERQF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. N >= M. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. M >= K >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the (m-k+i)-th row must contain the vector which */
+/* defines the elementary reflector H(i), for i = 1,2,...,k, as */
+/* returned by DGERQF in the last k rows of its array argument */
+/* A. */
+/* On exit, the m by n matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) DOUBLE PRECISION array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by DGERQF. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (M) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *m) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORGR2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ return 0;
+ }
+
+ if (*k < *m) {
+
+/* Initialise rows 1:m-k to rows of the unit matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m - *k;
+ for (l = 1; l <= i__2; ++l) {
+ a[l + j * a_dim1] = 0.;
+/* L10: */
+ }
+ if (j > *n - *m && j <= *n - *k) {
+ a[*m - *n + j + j * a_dim1] = 1.;
+ }
+/* L20: */
+ }
+ }
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ii = *m - *k + i__;
+
+/* Apply H(i) to A(1:m-k+i,1:n-k+i) from the right */
+
+ a[ii + (*n - *m + ii) * a_dim1] = 1.;
+ i__2 = ii - 1;
+ i__3 = *n - *m + ii;
+ dlarf_("Right", &i__2, &i__3, &a[ii + a_dim1], lda, &tau[i__], &a[
+ a_offset], lda, &work[1]);
+ i__2 = *n - *m + ii - 1;
+ d__1 = -tau[i__];
+ dscal_(&i__2, &d__1, &a[ii + a_dim1], lda);
+ a[ii + (*n - *m + ii) * a_dim1] = 1. - tau[i__];
+
+/* Set A(m-k+i,n-k+i+1:n) to zero */
+
+ i__2 = *n;
+ for (l = *n - *m + ii + 1; l <= i__2; ++l) {
+ a[ii + l * a_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of DORGR2 */
+
+} /* dorgr2_ */
diff --git a/contrib/libs/clapack/dorgrq.c b/contrib/libs/clapack/dorgrq.c
new file mode 100644
index 0000000000..5927a416bd
--- /dev/null
+++ b/contrib/libs/clapack/dorgrq.c
@@ -0,0 +1,289 @@
+/* dorgrq.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int dorgrq_(integer *m, integer *n, integer *k, doublereal *
+ a, integer *lda, doublereal *tau, doublereal *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, l, ib, nb, ii, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int dorgr2_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *),
+ dlarfb_(char *, char *, char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DORGRQ generates an M-by-N real matrix Q with orthonormal rows, */
+/* which is defined as the last M rows of a product of K elementary */
+/* reflectors of order N */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by DGERQF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. N >= M. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. M >= K >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the (m-k+i)-th row must contain the vector which */
+/* defines the elementary reflector H(i), for i = 1,2,...,k, as */
+/* returned by DGERQF in the last k rows of its array argument */
+/* A. */
+/* On exit, the M-by-N matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) DOUBLE PRECISION array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by DGERQF. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,M). */
+/* For optimum performance LWORK >= M*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *m) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+
+ if (*info == 0) {
+ if (*m <= 0) {
+ lwkopt = 1;
+ } else {
+ nb = ilaenv_(&c__1, "DORGRQ", " ", m, n, k, &c_n1);
+ lwkopt = *m * nb;
+ }
+ work[1] = (doublereal) lwkopt;
+
+ if (*lwork < max(1,*m) && ! lquery) {
+ *info = -8;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORGRQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *m;
+ if (nb > 1 && nb < *k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "DORGRQ", " ", m, n, k, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DORGRQ", " ", m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *k && nx < *k) {
+
+/* Use blocked code after the first block. */
+/* The last kk rows are handled by the block method. */
+
+/* Computing MIN */
+ i__1 = *k, i__2 = (*k - nx + nb - 1) / nb * nb;
+ kk = min(i__1,i__2);
+
+/* Set A(1:m-kk,n-kk+1:n) to zero. */
+
+ i__1 = *n;
+ for (j = *n - kk + 1; j <= i__1; ++j) {
+ i__2 = *m - kk;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the first or only block. */
+
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ dorgr2_(&i__1, &i__2, &i__3, &a[a_offset], lda, &tau[1], &work[1], &iinfo)
+ ;
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = *k;
+ i__2 = nb;
+ for (i__ = *k - kk + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *k - i__ + 1;
+ ib = min(i__3,i__4);
+ ii = *m - *k + i__;
+ if (ii > 1) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__3 = *n - *k + i__ + ib - 1;
+ dlarft_("Backward", "Rowwise", &i__3, &ib, &a[ii + a_dim1],
+ lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(1:m-k+i-1,1:n-k+i+ib-1) from the right */
+
+ i__3 = ii - 1;
+ i__4 = *n - *k + i__ + ib - 1;
+ dlarfb_("Right", "Transpose", "Backward", "Rowwise", &i__3, &
+ i__4, &ib, &a[ii + a_dim1], lda, &work[1], &ldwork, &
+ a[a_offset], lda, &work[ib + 1], &ldwork);
+ }
+
+/* Apply H' to columns 1:n-k+i+ib-1 of current block */
+
+ i__3 = *n - *k + i__ + ib - 1;
+ dorgr2_(&ib, &i__3, &ib, &a[ii + a_dim1], lda, &tau[i__], &work[1]
+, &iinfo);
+
+/* Set columns n-k+i+ib:n of current block to zero */
+
+ i__3 = *n;
+ for (l = *n - *k + i__ + ib; l <= i__3; ++l) {
+ i__4 = ii + ib - 1;
+ for (j = ii; j <= i__4; ++j) {
+ a[j + l * a_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1] = (doublereal) iws;
+ return 0;
+
+/* End of DORGRQ */
+
+} /* dorgrq_ */
diff --git a/contrib/libs/clapack/dorgtr.c b/contrib/libs/clapack/dorgtr.c
new file mode 100644
index 0000000000..c3806816e4
--- /dev/null
+++ b/contrib/libs/clapack/dorgtr.c
@@ -0,0 +1,250 @@
+/* dorgtr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int dorgtr_(char *uplo, integer *n, doublereal *a, integer *
+ lda, doublereal *tau, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, nb;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int dorgql_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *), dorgqr_(integer *, integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DORGTR generates a real orthogonal matrix Q which is defined as the */
+/* product of n-1 elementary reflectors of order N, as returned by */
+/* DSYTRD: */
+
+/* if UPLO = 'U', Q = H(n-1) . . . H(2) H(1), */
+
+/* if UPLO = 'L', Q = H(1) H(2) . . . H(n-1). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A contains elementary reflectors */
+/* from DSYTRD; */
+/* = 'L': Lower triangle of A contains elementary reflectors */
+/* from DSYTRD. */
+
+/* N (input) INTEGER */
+/* The order of the matrix Q. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the vectors which define the elementary reflectors, */
+/* as returned by DSYTRD. */
+/* On exit, the N-by-N orthogonal matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAU (input) DOUBLE PRECISION array, dimension (N-1) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by DSYTRD. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N-1). */
+/* For optimum performance LWORK >= (N-1)*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n - 1;
+ if (*lwork < max(i__1,i__2) && ! lquery) {
+ *info = -7;
+ }
+ }
+
+ if (*info == 0) {
+ if (upper) {
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "DORGQL", " ", &i__1, &i__2, &i__3, &c_n1);
+ } else {
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "DORGQR", " ", &i__1, &i__2, &i__3, &c_n1);
+ }
+/* Computing MAX */
+ i__1 = 1, i__2 = *n - 1;
+ lwkopt = max(i__1,i__2) * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORGTR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ if (upper) {
+
+/* Q was determined by a call to DSYTRD with UPLO = 'U' */
+
+/* Shift the vectors which define the elementary reflectors one */
+/* column to the left, and set the last row and column of Q to */
+/* those of the unit matrix */
+
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + (j + 1) * a_dim1];
+/* L10: */
+ }
+ a[*n + j * a_dim1] = 0.;
+/* L20: */
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ a[i__ + *n * a_dim1] = 0.;
+/* L30: */
+ }
+ a[*n + *n * a_dim1] = 1.;
+
+/* Generate Q(1:n-1,1:n-1) */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ dorgql_(&i__1, &i__2, &i__3, &a[a_offset], lda, &tau[1], &work[1],
+ lwork, &iinfo);
+
+ } else {
+
+/* Q was determined by a call to DSYTRD with UPLO = 'L'. */
+
+/* Shift the vectors which define the elementary reflectors one */
+/* column to the right, and set the first row and column of Q to */
+/* those of the unit matrix */
+
+ for (j = *n; j >= 2; --j) {
+ a[j * a_dim1 + 1] = 0.;
+ i__1 = *n;
+ for (i__ = j + 1; i__ <= i__1; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + (j - 1) * a_dim1];
+/* L40: */
+ }
+/* L50: */
+ }
+ a[a_dim1 + 1] = 1.;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ a[i__ + a_dim1] = 0.;
+/* L60: */
+ }
+ if (*n > 1) {
+
+/* Generate Q(2:n,2:n) */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ dorgqr_(&i__1, &i__2, &i__3, &a[(a_dim1 << 1) + 2], lda, &tau[1],
+ &work[1], lwork, &iinfo);
+ }
+ }
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DORGTR */
+
+} /* dorgtr_ */
diff --git a/contrib/libs/clapack/dorm2l.c b/contrib/libs/clapack/dorm2l.c
new file mode 100644
index 0000000000..de18e085d9
--- /dev/null
+++ b/contrib/libs/clapack/dorm2l.c
@@ -0,0 +1,231 @@
+/* dorm2l.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dorm2l_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
+ c__, integer *ldc, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, i1, i2, i3, mi, ni, nq;
+ doublereal aii;
+ logical left;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DORM2L overwrites the general real m by n matrix C with */
+
+/* Q * C if SIDE = 'L' and TRANS = 'N', or */
+
+/* Q'* C if SIDE = 'L' and TRANS = 'T', or */
+
+/* C * Q if SIDE = 'R' and TRANS = 'N', or */
+
+/* C * Q' if SIDE = 'R' and TRANS = 'T', */
+
+/* where Q is a real orthogonal matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(k) . . . H(2) H(1) */
+
+/* as returned by DGEQLF. Q is of order m if SIDE = 'L' and of order n */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q' from the Left */
+/* = 'R': apply Q or Q' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply Q (No transpose) */
+/* = 'T': apply Q' (Transpose) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,K) */
+/* The i-th column must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* DGEQLF in the last k columns of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* If SIDE = 'L', LDA >= max(1,M); */
+/* if SIDE = 'R', LDA >= max(1,N). */
+
+/* TAU (input) DOUBLE PRECISION array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by DGEQLF. */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (LDC,N) */
+/* On entry, the m by n matrix C. */
+/* On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension */
+/* (N) if SIDE = 'L', */
+/* (M) if SIDE = 'R' */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORM2L", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if (left && notran || ! left && ! notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) is applied to C(1:m-k+i,1:n) */
+
+ mi = *m - *k + i__;
+ } else {
+
+/* H(i) is applied to C(1:m,1:n-k+i) */
+
+ ni = *n - *k + i__;
+ }
+
+/* Apply H(i) */
+
+ aii = a[nq - *k + i__ + i__ * a_dim1];
+ a[nq - *k + i__ + i__ * a_dim1] = 1.;
+ dlarf_(side, &mi, &ni, &a[i__ * a_dim1 + 1], &c__1, &tau[i__], &c__[
+ c_offset], ldc, &work[1]);
+ a[nq - *k + i__ + i__ * a_dim1] = aii;
+/* L10: */
+ }
+ return 0;
+
+/* End of DORM2L */
+
+} /* dorm2l_ */
diff --git a/contrib/libs/clapack/dorm2r.c b/contrib/libs/clapack/dorm2r.c
new file mode 100644
index 0000000000..3a717566af
--- /dev/null
+++ b/contrib/libs/clapack/dorm2r.c
@@ -0,0 +1,235 @@
+/* dorm2r.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dorm2r_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
+ c__, integer *ldc, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, i1, i2, i3, ic, jc, mi, ni, nq;
+ doublereal aii;
+ logical left;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DORM2R overwrites the general real m by n matrix C with */
+
+/* Q * C if SIDE = 'L' and TRANS = 'N', or */
+
+/* Q'* C if SIDE = 'L' and TRANS = 'T', or */
+
+/* C * Q if SIDE = 'R' and TRANS = 'N', or */
+
+/* C * Q' if SIDE = 'R' and TRANS = 'T', */
+
+/* where Q is a real orthogonal matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by DGEQRF. Q is of order m if SIDE = 'L' and of order n */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q' from the Left */
+/* = 'R': apply Q or Q' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply Q (No transpose) */
+/* = 'T': apply Q' (Transpose) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,K) */
+/* The i-th column must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* DGEQRF in the first k columns of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* If SIDE = 'L', LDA >= max(1,M); */
+/* if SIDE = 'R', LDA >= max(1,N). */
+
+/* TAU (input) DOUBLE PRECISION array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by DGEQRF. */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (LDC,N) */
+/* On entry, the m by n matrix C. */
+/* On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension */
+/* (N) if SIDE = 'L', */
+/* (M) if SIDE = 'R' */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORM2R", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if (left && ! notran || ! left && notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H(i) is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H(i) */
+
+ aii = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.;
+ dlarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], &c__1, &tau[i__], &c__[
+ ic + jc * c_dim1], ldc, &work[1]);
+ a[i__ + i__ * a_dim1] = aii;
+/* L10: */
+ }
+ return 0;
+
+/* End of DORM2R */
+
+} /* dorm2r_ */
diff --git a/contrib/libs/clapack/dormbr.c b/contrib/libs/clapack/dormbr.c
new file mode 100644
index 0000000000..c4921dfaa7
--- /dev/null
+++ b/contrib/libs/clapack/dormbr.c
@@ -0,0 +1,361 @@
+/* dormbr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+
+/* Subroutine */ int dormbr_(char *vect, char *side, char *trans, integer *m,
+ integer *n, integer *k, doublereal *a, integer *lda, doublereal *tau,
+ doublereal *c__, integer *ldc, doublereal *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2];
+ char ch__1[3];
+ ch__1[2] = 0;
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i1, i2, nb, mi, ni, nq, nw;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int dormlq_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ logical notran;
+ extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ logical applyq;
+ char transt[1];
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* If VECT = 'Q', DORMBR overwrites the general real M-by-N matrix C */
+/* with */
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'T': Q**T * C C * Q**T */
+
+/* If VECT = 'P', DORMBR overwrites the general real M-by-N matrix C */
+/* with */
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': P * C C * P */
+/* TRANS = 'T': P**T * C C * P**T */
+
+/* Here Q and P**T are the orthogonal matrices determined by DGEBRD when */
+/* reducing a real matrix A to bidiagonal form: A = Q * B * P**T. Q and */
+/* P**T are defined as products of elementary reflectors H(i) and G(i) */
+/* respectively. */
+
+/* Let nq = m if SIDE = 'L' and nq = n if SIDE = 'R'. Thus nq is the */
+/* order of the orthogonal matrix Q or P**T that is applied. */
+
+/* If VECT = 'Q', A is assumed to have been an NQ-by-K matrix: */
+/* if nq >= k, Q = H(1) H(2) . . . H(k); */
+/* if nq < k, Q = H(1) H(2) . . . H(nq-1). */
+
+/* If VECT = 'P', A is assumed to have been a K-by-NQ matrix: */
+/* if k < nq, P = G(1) G(2) . . . G(k); */
+/* if k >= nq, P = G(1) G(2) . . . G(nq-1). */
+
+/* Arguments */
+/* ========= */
+
+/* VECT (input) CHARACTER*1 */
+/* = 'Q': apply Q or Q**T; */
+/* = 'P': apply P or P**T. */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q, Q**T, P or P**T from the Left; */
+/* = 'R': apply Q, Q**T, P or P**T from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q or P; */
+/* = 'T': Transpose, apply Q**T or P**T. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* If VECT = 'Q', the number of columns in the original */
+/* matrix reduced by DGEBRD. */
+/* If VECT = 'P', the number of rows in the original */
+/* matrix reduced by DGEBRD. */
+/* K >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension */
+/* (LDA,min(nq,K)) if VECT = 'Q' */
+/* (LDA,nq) if VECT = 'P' */
+/* The vectors which define the elementary reflectors H(i) and */
+/* G(i), whose products determine the matrices Q and P, as */
+/* returned by DGEBRD. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* If VECT = 'Q', LDA >= max(1,nq); */
+/* if VECT = 'P', LDA >= max(1,min(nq,K)). */
+
+/* TAU (input) DOUBLE PRECISION array, dimension (min(nq,K)) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i) or G(i) which determines Q or P, as returned */
+/* by DGEBRD in the array argument TAUQ or TAUP. */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q */
+/* or P*C or P**T*C or C*P or C*P**T. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ applyq = lsame_(vect, "Q");
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q or P and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! applyq && ! lsame_(vect, "P")) {
+ *info = -1;
+ } else if (! left && ! lsame_(side, "R")) {
+ *info = -2;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*k < 0) {
+ *info = -6;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = 1, i__2 = min(nq,*k);
+ if (applyq && *lda < max(1,nq) || ! applyq && *lda < max(i__1,i__2)) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -11;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info == 0) {
+ if (applyq) {
+ if (left) {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ nb = ilaenv_(&c__1, "DORMQR", ch__1, &i__1, n, &i__2, &c_n1);
+ } else {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ nb = ilaenv_(&c__1, "DORMQR", ch__1, m, &i__1, &i__2, &c_n1);
+ }
+ } else {
+ if (left) {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ nb = ilaenv_(&c__1, "DORMLQ", ch__1, &i__1, n, &i__2, &c_n1);
+ } else {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ nb = ilaenv_(&c__1, "DORMLQ", ch__1, m, &i__1, &i__2, &c_n1);
+ }
+ }
+ lwkopt = max(1,nw) * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORMBR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ work[1] = 1.;
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ if (applyq) {
+
+/* Apply Q */
+
+ if (nq >= *k) {
+
+/* Q was determined by a call to DGEBRD with nq >= k */
+
+ dormqr_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], lwork, &iinfo);
+ } else if (nq > 1) {
+
+/* Q was determined by a call to DGEBRD with nq < k */
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ i1 = 2;
+ i2 = 1;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ i1 = 1;
+ i2 = 2;
+ }
+ i__1 = nq - 1;
+ dormqr_(side, trans, &mi, &ni, &i__1, &a[a_dim1 + 2], lda, &tau[1]
+, &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo);
+ }
+ } else {
+
+/* Apply P */
+
+ if (notran) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+ if (nq > *k) {
+
+/* P was determined by a call to DGEBRD with nq > k */
+
+ dormlq_(side, transt, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], lwork, &iinfo);
+ } else if (nq > 1) {
+
+/* P was determined by a call to DGEBRD with nq <= k */
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ i1 = 2;
+ i2 = 1;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ i1 = 1;
+ i2 = 2;
+ }
+ i__1 = nq - 1;
+ dormlq_(side, transt, &mi, &ni, &i__1, &a[(a_dim1 << 1) + 1], lda,
+ &tau[1], &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &
+ iinfo);
+ }
+ }
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DORMBR */
+
+} /* dormbr_ */
diff --git a/contrib/libs/clapack/dormhr.c b/contrib/libs/clapack/dormhr.c
new file mode 100644
index 0000000000..aa47061b97
--- /dev/null
+++ b/contrib/libs/clapack/dormhr.c
@@ -0,0 +1,257 @@
+/* dormhr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+
+/* Subroutine */ int dormhr_(char *side, char *trans, integer *m, integer *n,
+ integer *ilo, integer *ihi, doublereal *a, integer *lda, doublereal *
+ tau, doublereal *c__, integer *ldc, doublereal *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i1, i2, nb, mi, nh, ni, nq, nw;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DORMHR overwrites the general real M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'T': Q**T * C C * Q**T */
+
+/* where Q is a real orthogonal matrix of order nq, with nq = m if */
+/* SIDE = 'L' and nq = n if SIDE = 'R'. Q is defined as the product of */
+/* IHI-ILO elementary reflectors, as returned by DGEHRD: */
+
+/* Q = H(ilo) H(ilo+1) . . . H(ihi-1). */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**T from the Left; */
+/* = 'R': apply Q or Q**T from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'T': Transpose, apply Q**T. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* ILO and IHI must have the same values as in the previous call */
+/* of DGEHRD. Q is equal to the unit matrix except in the */
+/* submatrix Q(ilo+1:ihi,ilo+1:ihi). */
+/* If SIDE = 'L', then 1 <= ILO <= IHI <= M, if M > 0, and */
+/* ILO = 1 and IHI = 0, if M = 0; */
+/* if SIDE = 'R', then 1 <= ILO <= IHI <= N, if N > 0, and */
+/* ILO = 1 and IHI = 0, if N = 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension */
+/* (LDA,M) if SIDE = 'L' */
+/* (LDA,N) if SIDE = 'R' */
+/* The vectors which define the elementary reflectors, as */
+/* returned by DGEHRD. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* LDA >= max(1,M) if SIDE = 'L'; LDA >= max(1,N) if SIDE = 'R'. */
+
+/* TAU (input) DOUBLE PRECISION array, dimension */
+/* (M-1) if SIDE = 'L' */
+/* (N-1) if SIDE = 'R' */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by DGEHRD. */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nh = *ihi - *ilo;
+ left = lsame_(side, "L");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ilo < 1 || *ilo > max(1,nq)) {
+ *info = -5;
+ } else if (*ihi < min(*ilo,nq) || *ihi > nq) {
+ *info = -6;
+ } else if (*lda < max(1,nq)) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -11;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -13;
+ }
+
+ if (*info == 0) {
+ if (left) {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ nb = ilaenv_(&c__1, "DORMQR", ch__1, &nh, n, &nh, &c_n1);
+ } else {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ nb = ilaenv_(&c__1, "DORMQR", ch__1, m, &nh, &nh, &c_n1);
+ }
+ lwkopt = max(1,nw) * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__2 = -(*info);
+ xerbla_("DORMHR", &i__2);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || nh == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ if (left) {
+ mi = nh;
+ ni = *n;
+ i1 = *ilo + 1;
+ i2 = 1;
+ } else {
+ mi = *m;
+ ni = nh;
+ i1 = 1;
+ i2 = *ilo + 1;
+ }
+
+ dormqr_(side, trans, &mi, &ni, &nh, &a[*ilo + 1 + *ilo * a_dim1], lda, &
+ tau[*ilo], &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo);
+
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DORMHR */
+
+} /* dormhr_ */
diff --git a/contrib/libs/clapack/dorml2.c b/contrib/libs/clapack/dorml2.c
new file mode 100644
index 0000000000..d482168442
--- /dev/null
+++ b/contrib/libs/clapack/dorml2.c
@@ -0,0 +1,231 @@
+/* dorml2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dorml2_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
+ c__, integer *ldc, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, i1, i2, i3, ic, jc, mi, ni, nq;
+ doublereal aii;
+ logical left;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DORML2 overwrites the general real m by n matrix C with */
+
+/* Q * C if SIDE = 'L' and TRANS = 'N', or */
+
+/* Q'* C if SIDE = 'L' and TRANS = 'T', or */
+
+/* C * Q if SIDE = 'R' and TRANS = 'N', or */
+
+/* C * Q' if SIDE = 'R' and TRANS = 'T', */
+
+/* where Q is a real orthogonal matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(k) . . . H(2) H(1) */
+
+/* as returned by DGELQF. Q is of order m if SIDE = 'L' and of order n */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q' from the Left */
+/* = 'R': apply Q or Q' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply Q (No transpose) */
+/* = 'T': apply Q' (Transpose) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension */
+/* (LDA,M) if SIDE = 'L', */
+/* (LDA,N) if SIDE = 'R' */
+/* The i-th row must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* DGELQF in the first k rows of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,K). */
+
+/* TAU (input) DOUBLE PRECISION array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by DGELQF. */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (LDC,N) */
+/* On entry, the m by n matrix C. */
+/* On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension */
+/* (N) if SIDE = 'L', */
+/* (M) if SIDE = 'R' */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORML2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if (left && notran || ! left && ! notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H(i) is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H(i) */
+
+ aii = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.;
+ dlarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], lda, &tau[i__], &c__[
+ ic + jc * c_dim1], ldc, &work[1]);
+ a[i__ + i__ * a_dim1] = aii;
+/* L10: */
+ }
+ return 0;
+
+/* End of DORML2 */
+
+} /* dorml2_ */
diff --git a/contrib/libs/clapack/dormlq.c b/contrib/libs/clapack/dormlq.c
new file mode 100644
index 0000000000..bcc1cc4700
--- /dev/null
+++ b/contrib/libs/clapack/dormlq.c
@@ -0,0 +1,335 @@
+/* dormlq.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static integer c__65 = 65;
+
+/* Subroutine */ int dormlq_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
+ c__, integer *ldc, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[3];
+ ch__1[2] = 0;
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i__;
+ doublereal t[4160] /* was [65][64] */;
+ integer i1, i2, i3, ib, ic, jc, nb, mi, ni, nq, nw, iws;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ extern /* Subroutine */ int dorml2_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *), dlarfb_(char
+ *, char *, char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ logical notran;
+ integer ldwork;
+ char transt[1];
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DORMLQ overwrites the general real M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'T': Q**T * C C * Q**T */
+
+/* where Q is a real orthogonal matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(k) . . . H(2) H(1) */
+
+/* as returned by DGELQF. Q is of order M if SIDE = 'L' and of order N */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**T from the Left; */
+/* = 'R': apply Q or Q**T from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'T': Transpose, apply Q**T. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension */
+/* (LDA,M) if SIDE = 'L', */
+/* (LDA,N) if SIDE = 'R' */
+/* The i-th row must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* DGELQF in the first k rows of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,K). */
+
+/* TAU (input) DOUBLE PRECISION array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by DGELQF. */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+
+/* Determine the block size. NB may be at most NBMAX, where NBMAX */
+/* is used to define the local array T. */
+
+/* Computing MIN */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "DORMLQ", ch__1, m, n, k, &c_n1);
+ nb = min(i__1,i__2);
+ lwkopt = max(1,nw) * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORMLQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/* Computing MAX */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DORMLQ", ch__1, m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ dorml2_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if (left && notran || ! left && ! notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i) H(i+1) . . . H(i+ib-1) */
+
+ i__4 = nq - i__ + 1;
+ dlarft_("Forward", "Rowwise", &i__4, &ib, &a[i__ + i__ * a_dim1],
+ lda, &tau[i__], t, &c__65);
+ if (left) {
+
+/* H or H' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H or H' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H or H' */
+
+ dlarfb_(side, transt, "Forward", "Rowwise", &mi, &ni, &ib, &a[i__
+ + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc * c_dim1],
+ ldc, &work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DORMLQ */
+
+} /* dormlq_ */
diff --git a/contrib/libs/clapack/dormql.c b/contrib/libs/clapack/dormql.c
new file mode 100644
index 0000000000..5e65effd38
--- /dev/null
+++ b/contrib/libs/clapack/dormql.c
@@ -0,0 +1,328 @@
+/* dormql.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static integer c__65 = 65;
+
+/* Subroutine */ int dormql_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
+ c__, integer *ldc, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[3];
+ ch__1[2] = 0;
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i__;
+ doublereal t[4160] /* was [65][64] */;
+ integer i1, i2, i3, ib, nb, mi, ni, nq, nw, iws;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ extern /* Subroutine */ int dorm2l_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *), dlarfb_(char
+ *, char *, char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ logical notran;
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DORMQL overwrites the general real M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'T': Q**T * C C * Q**T */
+
+/* where Q is a real orthogonal matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(k) . . . H(2) H(1) */
+
+/* as returned by DGEQLF. Q is of order M if SIDE = 'L' and of order N */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**T from the Left; */
+/* = 'R': apply Q or Q**T from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'T': Transpose, apply Q**T. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,K) */
+/* The i-th column must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* DGEQLF in the last k columns of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* If SIDE = 'L', LDA >= max(1,M); */
+/* if SIDE = 'R', LDA >= max(1,N). */
+
+/* TAU (input) DOUBLE PRECISION array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by DGEQLF. */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = max(1,*n);
+ } else {
+ nq = *n;
+ nw = max(1,*m);
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+
+ if (*info == 0) {
+ if (*m == 0 || *n == 0) {
+ lwkopt = 1;
+ } else {
+
+/* Determine the block size. NB may be at most NBMAX, where */
+/* NBMAX is used to define the local array T. */
+
+/* Computing MIN */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "DORMQL", ch__1, m, n, k, &c_n1);
+ nb = min(i__1,i__2);
+ lwkopt = nw * nb;
+ }
+ work[1] = (doublereal) lwkopt;
+
+ if (*lwork < nw && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORMQL", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/* Computing MAX */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DORMQL", ch__1, m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ dorm2l_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if (left && notran || ! left && ! notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__4 = nq - *k + i__ + ib - 1;
+ dlarft_("Backward", "Columnwise", &i__4, &ib, &a[i__ * a_dim1 + 1]
+, lda, &tau[i__], t, &c__65);
+ if (left) {
+
+/* H or H' is applied to C(1:m-k+i+ib-1,1:n) */
+
+ mi = *m - *k + i__ + ib - 1;
+ } else {
+
+/* H or H' is applied to C(1:m,1:n-k+i+ib-1) */
+
+ ni = *n - *k + i__ + ib - 1;
+ }
+
+/* Apply H or H' */
+
+ dlarfb_(side, trans, "Backward", "Columnwise", &mi, &ni, &ib, &a[
+ i__ * a_dim1 + 1], lda, t, &c__65, &c__[c_offset], ldc, &
+ work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DORMQL */
+
+} /* dormql_ */
diff --git a/contrib/libs/clapack/dormqr.c b/contrib/libs/clapack/dormqr.c
new file mode 100644
index 0000000000..416bfc2cf6
--- /dev/null
+++ b/contrib/libs/clapack/dormqr.c
@@ -0,0 +1,328 @@
+/* dormqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static integer c__65 = 65;
+
+/* Subroutine */ int dormqr_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
+ c__, integer *ldc, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[3];
+ ch__1[2] = 0;
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i__;
+ doublereal t[4160] /* was [65][64] */;
+ integer i1, i2, i3, ib, ic, jc, nb, mi, ni, nq, nw, iws;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ extern /* Subroutine */ int dorm2r_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *), dlarfb_(char
+ *, char *, char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ logical notran;
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DORMQR overwrites the general real M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'T': Q**T * C C * Q**T */
+
+/* where Q is a real orthogonal matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by DGEQRF. Q is of order M if SIDE = 'L' and of order N */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**T from the Left; */
+/* = 'R': apply Q or Q**T from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'T': Transpose, apply Q**T. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,K) */
+/* The i-th column must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* DGEQRF in the first k columns of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* If SIDE = 'L', LDA >= max(1,M); */
+/* if SIDE = 'R', LDA >= max(1,N). */
+
+/* TAU (input) DOUBLE PRECISION array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by DGEQRF. */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+
+/* Determine the block size. NB may be at most NBMAX, where NBMAX */
+/* is used to define the local array T. */
+
+/* Computing MIN */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "DORMQR", ch__1, m, n, k, &c_n1);
+ nb = min(i__1,i__2);
+ lwkopt = max(1,nw) * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORMQR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/* Computing MAX */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DORMQR", ch__1, m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ dorm2r_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if (left && ! notran || ! left && notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i) H(i+1) . . . H(i+ib-1) */
+
+ i__4 = nq - i__ + 1;
+ dlarft_("Forward", "Columnwise", &i__4, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], t, &c__65)
+ ;
+ if (left) {
+
+/* H or H' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H or H' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H or H' */
+
+ dlarfb_(side, trans, "Forward", "Columnwise", &mi, &ni, &ib, &a[
+ i__ + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc *
+ c_dim1], ldc, &work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DORMQR */
+
+} /* dormqr_ */
diff --git a/contrib/libs/clapack/dormr2.c b/contrib/libs/clapack/dormr2.c
new file mode 100644
index 0000000000..78f726b233
--- /dev/null
+++ b/contrib/libs/clapack/dormr2.c
@@ -0,0 +1,227 @@
+/* dormr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dormr2_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
+ c__, integer *ldc, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, i1, i2, i3, mi, ni, nq;
+ doublereal aii;
+ logical left;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DORMR2 overwrites the general real m by n matrix C with */
+
+/* Q * C if SIDE = 'L' and TRANS = 'N', or */
+
+/* Q'* C if SIDE = 'L' and TRANS = 'T', or */
+
+/* C * Q if SIDE = 'R' and TRANS = 'N', or */
+
+/* C * Q' if SIDE = 'R' and TRANS = 'T', */
+
+/* where Q is a real orthogonal matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by DGERQF. Q is of order m if SIDE = 'L' and of order n */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q' from the Left */
+/* = 'R': apply Q or Q' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply Q (No transpose) */
+/* = 'T': apply Q' (Transpose) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension */
+/* (LDA,M) if SIDE = 'L', */
+/* (LDA,N) if SIDE = 'R' */
+/* The i-th row must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* DGERQF in the last k rows of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,K). */
+
+/* TAU (input) DOUBLE PRECISION array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by DGERQF. */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (LDC,N) */
+/* On entry, the m by n matrix C. */
+/* On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension */
+/* (N) if SIDE = 'L', */
+/* (M) if SIDE = 'R' */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORMR2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if (left && ! notran || ! left && notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) is applied to C(1:m-k+i,1:n) */
+
+ mi = *m - *k + i__;
+ } else {
+
+/* H(i) is applied to C(1:m,1:n-k+i) */
+
+ ni = *n - *k + i__;
+ }
+
+/* Apply H(i) */
+
+ aii = a[i__ + (nq - *k + i__) * a_dim1];
+ a[i__ + (nq - *k + i__) * a_dim1] = 1.;
+ dlarf_(side, &mi, &ni, &a[i__ + a_dim1], lda, &tau[i__], &c__[
+ c_offset], ldc, &work[1]);
+ a[i__ + (nq - *k + i__) * a_dim1] = aii;
+/* L10: */
+ }
+ return 0;
+
+/* End of DORMR2 */
+
+} /* dormr2_ */
diff --git a/contrib/libs/clapack/dormr3.c b/contrib/libs/clapack/dormr3.c
new file mode 100644
index 0000000000..5fb412a1f2
--- /dev/null
+++ b/contrib/libs/clapack/dormr3.c
@@ -0,0 +1,241 @@
+/* dormr3.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dormr3_(char *side, char *trans, integer *m, integer *n,
+ integer *k, integer *l, doublereal *a, integer *lda, doublereal *tau,
+ doublereal *c__, integer *ldc, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, i1, i2, i3, ja, ic, jc, mi, ni, nq;
+ logical left;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dlarz_(char *, integer *, integer *, integer *
+, doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *), xerbla_(char *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DORMR3 overwrites the general real m by n matrix C with */
+
+/* Q * C if SIDE = 'L' and TRANS = 'N', or */
+
+/* Q'* C if SIDE = 'L' and TRANS = 'T', or */
+
+/* C * Q if SIDE = 'R' and TRANS = 'N', or */
+
+/* C * Q' if SIDE = 'R' and TRANS = 'T', */
+
+/* where Q is a real orthogonal matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by DTZRZF. Q is of order m if SIDE = 'L' and of order n */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q' from the Left */
+/* = 'R': apply Q or Q' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply Q (No transpose) */
+/* = 'T': apply Q' (Transpose) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* L (input) INTEGER */
+/* The number of columns of the matrix A containing */
+/* the meaningful part of the Householder reflectors. */
+/* If SIDE = 'L', M >= L >= 0, if SIDE = 'R', N >= L >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension */
+/* (LDA,M) if SIDE = 'L', */
+/* (LDA,N) if SIDE = 'R' */
+/* The i-th row must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* DTZRZF in the last k rows of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,K). */
+
+/* TAU (input) DOUBLE PRECISION array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by DTZRZF. */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (LDC,N) */
+/* On entry, the m-by-n matrix C. */
+/* On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension */
+/* (N) if SIDE = 'L', */
+/* (M) if SIDE = 'R' */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*l < 0 || left && *l > *m || ! left && *l > *n) {
+ *info = -6;
+ } else if (*lda < max(1,*k)) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORMR3", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if (left && ! notran || ! left && notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ ja = *m - *l + 1;
+ jc = 1;
+ } else {
+ mi = *m;
+ ja = *n - *l + 1;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) or H(i)' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H(i) or H(i)' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H(i) or H(i)' */
+
+ dlarz_(side, &mi, &ni, l, &a[i__ + ja * a_dim1], lda, &tau[i__], &c__[
+ ic + jc * c_dim1], ldc, &work[1]);
+
+/* L10: */
+ }
+
+ return 0;
+
+/* End of DORMR3 */
+
+} /* dormr3_ */
diff --git a/contrib/libs/clapack/dormrq.c b/contrib/libs/clapack/dormrq.c
new file mode 100644
index 0000000000..d52843b837
--- /dev/null
+++ b/contrib/libs/clapack/dormrq.c
@@ -0,0 +1,335 @@
+/* dormrq.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static integer c__65 = 65;
+
+/* Subroutine */ int dormrq_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
+ c__, integer *ldc, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i__;
+ doublereal t[4160] /* was [65][64] */;
+ integer i1, i2, i3, ib, nb, mi, ni, nq, nw, iws;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ extern /* Subroutine */ int dormr2_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *), dlarfb_(char
+ *, char *, char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ logical notran;
+ integer ldwork;
+ char transt[1];
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DORMRQ overwrites the general real M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'T': Q**T * C C * Q**T */
+
+/* where Q is a real orthogonal matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by DGERQF. Q is of order M if SIDE = 'L' and of order N */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**T from the Left; */
+/* = 'R': apply Q or Q**T from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'T': Transpose, apply Q**T. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension */
+/* (LDA,M) if SIDE = 'L', */
+/* (LDA,N) if SIDE = 'R' */
+/* The i-th row must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* DGERQF in the last k rows of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,K). */
+
+/* TAU (input) DOUBLE PRECISION array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by DGERQF. */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = max(1,*n);
+ } else {
+ nq = *n;
+ nw = max(1,*m);
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+
+ if (*info == 0) {
+ if (*m == 0 || *n == 0) {
+ lwkopt = 1;
+ } else {
+
+/* Determine the block size. NB may be at most NBMAX, where */
+/* NBMAX is used to define the local array T. */
+
+/* Computing MIN */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "DORMRQ", ch__1, m, n, k, &c_n1);
+ nb = min(i__1,i__2);
+ lwkopt = nw * nb;
+ }
+ work[1] = (doublereal) lwkopt;
+
+ if (*lwork < nw && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORMRQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/* Computing MAX */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DORMRQ", ch__1, m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ dormr2_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if (left && ! notran || ! left && notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__4 = nq - *k + i__ + ib - 1;
+ dlarft_("Backward", "Rowwise", &i__4, &ib, &a[i__ + a_dim1], lda,
+ &tau[i__], t, &c__65);
+ if (left) {
+
+/* H or H' is applied to C(1:m-k+i+ib-1,1:n) */
+
+ mi = *m - *k + i__ + ib - 1;
+ } else {
+
+/* H or H' is applied to C(1:m,1:n-k+i+ib-1) */
+
+ ni = *n - *k + i__ + ib - 1;
+ }
+
+/* Apply H or H' */
+
+ dlarfb_(side, transt, "Backward", "Rowwise", &mi, &ni, &ib, &a[
+ i__ + a_dim1], lda, t, &c__65, &c__[c_offset], ldc, &work[
+ 1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DORMRQ */
+
+} /* dormrq_ */
diff --git a/contrib/libs/clapack/dormrz.c b/contrib/libs/clapack/dormrz.c
new file mode 100644
index 0000000000..ec368fa7b8
--- /dev/null
+++ b/contrib/libs/clapack/dormrz.c
@@ -0,0 +1,362 @@
+/* dormrz.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static integer c__65 = 65;
+
+/* Subroutine */ int dormrz_(char *side, char *trans, integer *m, integer *n,
+ integer *k, integer *l, doublereal *a, integer *lda, doublereal *tau,
+ doublereal *c__, integer *ldc, doublereal *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i__;
+ doublereal t[4160] /* was [65][64] */;
+ integer i1, i2, i3, ib, ic, ja, jc, nb, mi, ni, nq, nw, iws;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ extern /* Subroutine */ int dormr3_(char *, char *, integer *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int dlarzb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, integer *, doublereal *, integer
+ *, doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *), dlarzt_(char *, char
+ *, integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *);
+ logical notran;
+ integer ldwork;
+ char transt[1];
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* January 2007 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DORMRZ overwrites the general real M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'T': Q**T * C C * Q**T */
+
+/* where Q is a real orthogonal matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by DTZRZF. Q is of order M if SIDE = 'L' and of order N */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**T from the Left; */
+/* = 'R': apply Q or Q**T from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'T': Transpose, apply Q**T. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* L (input) INTEGER */
+/* The number of columns of the matrix A containing */
+/* the meaningful part of the Householder reflectors. */
+/* If SIDE = 'L', M >= L >= 0, if SIDE = 'R', N >= L >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension */
+/* (LDA,M) if SIDE = 'L', */
+/* (LDA,N) if SIDE = 'R' */
+/* The i-th row must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* DTZRZF in the last k rows of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,K). */
+
+/* TAU (input) DOUBLE PRECISION array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by DTZRZF. */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = max(1,*n);
+ } else {
+ nq = *n;
+ nw = max(1,*m);
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*l < 0 || left && *l > *m || ! left && *l > *n) {
+ *info = -6;
+ } else if (*lda < max(1,*k)) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -11;
+ }
+
+ if (*info == 0) {
+ if (*m == 0 || *n == 0) {
+ lwkopt = 1;
+ } else {
+
+/* Determine the block size. NB may be at most NBMAX, where */
+/* NBMAX is used to define the local array T. */
+
+/* Computing MIN */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "DORMRQ", ch__1, m, n, k, &c_n1);
+ nb = min(i__1,i__2);
+ lwkopt = nw * nb;
+ }
+ work[1] = (doublereal) lwkopt;
+
+ if (*lwork < max(1,nw) && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORMRZ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/* Computing MAX */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DORMRQ", ch__1, m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ dormr3_(side, trans, m, n, k, l, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if (left && ! notran || ! left && notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ ja = *m - *l + 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ ja = *n - *l + 1;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ dlarzt_("Backward", "Rowwise", l, &ib, &a[i__ + ja * a_dim1], lda,
+ &tau[i__], t, &c__65);
+
+ if (left) {
+
+/* H or H' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H or H' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H or H' */
+
+ dlarzb_(side, transt, "Backward", "Rowwise", &mi, &ni, &ib, l, &a[
+ i__ + ja * a_dim1], lda, t, &c__65, &c__[ic + jc * c_dim1]
+, ldc, &work[1], &ldwork);
+/* L10: */
+ }
+
+ }
+
+ work[1] = (doublereal) lwkopt;
+
+ return 0;
+
+/* End of DORMRZ */
+
+} /* dormrz_ */
diff --git a/contrib/libs/clapack/dormtr.c b/contrib/libs/clapack/dormtr.c
new file mode 100644
index 0000000000..88dad7ef4b
--- /dev/null
+++ b/contrib/libs/clapack/dormtr.c
@@ -0,0 +1,296 @@
+/* dormtr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+
+/* Subroutine */ int dormtr_(char *side, char *uplo, char *trans, integer *m,
+ integer *n, doublereal *a, integer *lda, doublereal *tau, doublereal *
+ c__, integer *ldc, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2, i__3;
+ char ch__1[3];
+ ch__1[2] = 0;
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i1, i2, nb, mi, ni, nq, nw;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int dormql_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *),
+ dormqr_(char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DORMTR overwrites the general real M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'T': Q**T * C C * Q**T */
+
+/* where Q is a real orthogonal matrix of order nq, with nq = m if */
+/* SIDE = 'L' and nq = n if SIDE = 'R'. Q is defined as the product of */
+/* nq-1 elementary reflectors, as returned by DSYTRD: */
+
+/* if UPLO = 'U', Q = H(nq-1) . . . H(2) H(1); */
+
+/* if UPLO = 'L', Q = H(1) H(2) . . . H(nq-1). */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**T from the Left; */
+/* = 'R': apply Q or Q**T from the Right. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A contains elementary reflectors */
+/* from DSYTRD; */
+/* = 'L': Lower triangle of A contains elementary reflectors */
+/* from DSYTRD. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'T': Transpose, apply Q**T. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension */
+/* (LDA,M) if SIDE = 'L' */
+/* (LDA,N) if SIDE = 'R' */
+/* The vectors which define the elementary reflectors, as */
+/* returned by DSYTRD. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* LDA >= max(1,M) if SIDE = 'L'; LDA >= max(1,N) if SIDE = 'R'. */
+
+/* TAU (input) DOUBLE PRECISION array, dimension */
+/* (M-1) if SIDE = 'L' */
+/* (N-1) if SIDE = 'R' */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by DSYTRD. */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T")) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+ if (upper) {
+ if (left) {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ nb = ilaenv_(&c__1, "DORMQL", ch__1, &i__2, n, &i__3, &c_n1);
+ } else {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "DORMQL", ch__1, m, &i__2, &i__3, &c_n1);
+ }
+ } else {
+ if (left) {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ nb = ilaenv_(&c__1, "DORMQR", ch__1, &i__2, n, &i__3, &c_n1);
+ } else {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "DORMQR", ch__1, m, &i__2, &i__3, &c_n1);
+ }
+ }
+ lwkopt = max(1,nw) * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__2 = -(*info);
+ xerbla_("DORMTR", &i__2);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || nq == 1) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ }
+
+ if (upper) {
+
+/* Q was determined by a call to DSYTRD with UPLO = 'U' */
+
+ i__2 = nq - 1;
+ dormql_(side, trans, &mi, &ni, &i__2, &a[(a_dim1 << 1) + 1], lda, &
+ tau[1], &c__[c_offset], ldc, &work[1], lwork, &iinfo);
+ } else {
+
+/* Q was determined by a call to DSYTRD with UPLO = 'L' */
+
+ if (left) {
+ i1 = 2;
+ i2 = 1;
+ } else {
+ i1 = 1;
+ i2 = 2;
+ }
+ i__2 = nq - 1;
+ dormqr_(side, trans, &mi, &ni, &i__2, &a[a_dim1 + 2], lda, &tau[1], &
+ c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo);
+ }
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DORMTR */
+
+} /* dormtr_ */
diff --git a/contrib/libs/clapack/dpbcon.c b/contrib/libs/clapack/dpbcon.c
new file mode 100644
index 0000000000..0971a580ff
--- /dev/null
+++ b/contrib/libs/clapack/dpbcon.c
@@ -0,0 +1,233 @@
+/* dpbcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dpbcon_(char *uplo, integer *n, integer *kd, doublereal *
+ ab, integer *ldab, doublereal *anorm, doublereal *rcond, doublereal *
+ work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1;
+ doublereal d__1;
+
+ /* Local variables */
+ integer ix, kase;
+ doublereal scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int drscl_(integer *, doublereal *, doublereal *,
+ integer *);
+ logical upper;
+ extern /* Subroutine */ int dlacn2_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal scalel;
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlatbs_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *);
+ doublereal scaleu;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal ainvnm;
+ char normin[1];
+ doublereal smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call DLACN2 in place of DLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPBCON estimates the reciprocal of the condition number (in the */
+/* 1-norm) of a real symmetric positive definite band matrix using the */
+/* Cholesky factorization A = U**T*U or A = L*L**T computed by DPBTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangular factor stored in AB; */
+/* = 'L': Lower triangular factor stored in AB. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**T*U or A = L*L**T of the band matrix A, stored in the */
+/* first KD+1 rows of the array. The j-th column of U or L is */
+/* stored in the j-th column of the array AB as follows: */
+/* if UPLO ='U', AB(kd+1+i-j,j) = U(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO ='L', AB(1+i-j,j) = L(i,j) for j<=i<=min(n,j+kd). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* ANORM (input) DOUBLE PRECISION */
+/* The 1-norm (or infinity-norm) of the symmetric band matrix A. */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*ldab < *kd + 1) {
+ *info = -5;
+ } else if (*anorm < 0.) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPBCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.;
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ } else if (*anorm == 0.) {
+ return 0;
+ }
+
+ smlnum = dlamch_("Safe minimum");
+
+/* Estimate the 1-norm of the inverse. */
+
+ kase = 0;
+ *(unsigned char *)normin = 'N';
+L10:
+ dlacn2_(n, &work[*n + 1], &work[1], &iwork[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (upper) {
+
+/* Multiply by inv(U'). */
+
+ dlatbs_("Upper", "Transpose", "Non-unit", normin, n, kd, &ab[
+ ab_offset], ldab, &work[1], &scalel, &work[(*n << 1) + 1],
+ info);
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by inv(U). */
+
+ dlatbs_("Upper", "No transpose", "Non-unit", normin, n, kd, &ab[
+ ab_offset], ldab, &work[1], &scaleu, &work[(*n << 1) + 1],
+ info);
+ } else {
+
+/* Multiply by inv(L). */
+
+ dlatbs_("Lower", "No transpose", "Non-unit", normin, n, kd, &ab[
+ ab_offset], ldab, &work[1], &scalel, &work[(*n << 1) + 1],
+ info);
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by inv(L'). */
+
+ dlatbs_("Lower", "Transpose", "Non-unit", normin, n, kd, &ab[
+ ab_offset], ldab, &work[1], &scaleu, &work[(*n << 1) + 1],
+ info);
+ }
+
+/* Multiply by 1/SCALE if doing so will not cause overflow. */
+
+ scale = scalel * scaleu;
+ if (scale != 1.) {
+ ix = idamax_(n, &work[1], &c__1);
+ if (scale < (d__1 = work[ix], abs(d__1)) * smlnum || scale == 0.)
+ {
+ goto L20;
+ }
+ drscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / ainvnm / *anorm;
+ }
+
+L20:
+
+ return 0;
+
+/* End of DPBCON */
+
+} /* dpbcon_ */
diff --git a/contrib/libs/clapack/dpbequ.c b/contrib/libs/clapack/dpbequ.c
new file mode 100644
index 0000000000..18baa51cc6
--- /dev/null
+++ b/contrib/libs/clapack/dpbequ.c
@@ -0,0 +1,203 @@
+/* dpbequ.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dpbequ_(char *uplo, integer *n, integer *kd, doublereal *
+ ab, integer *ldab, doublereal *s, doublereal *scond, doublereal *amax,
+ integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal smin;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPBEQU computes row and column scalings intended to equilibrate a */
+/* symmetric positive definite band matrix A and reduce its condition */
+/* number (with respect to the two-norm). S contains the scale factors, */
+/* S(i) = 1/sqrt(A(i,i)), chosen so that the scaled matrix B with */
+/* elements B(i,j) = S(i)*A(i,j)*S(j) has ones on the diagonal. This */
+/* choice of S puts the condition number of B within a factor N of the */
+/* smallest possible condition number over all possible diagonal */
+/* scalings. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangular of A is stored; */
+/* = 'L': Lower triangular of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* The upper or lower triangle of the symmetric band matrix A, */
+/* stored in the first KD+1 rows of the array. The j-th column */
+/* of A is stored in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array A. LDAB >= KD+1. */
+
+/* S (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, S contains the scale factors for A. */
+
+/* SCOND (output) DOUBLE PRECISION */
+/* If INFO = 0, S contains the ratio of the smallest S(i) to */
+/* the largest S(i). If SCOND >= 0.1 and AMAX is neither too */
+/* large nor too small, it is not worth scaling by S. */
+
+/* AMAX (output) DOUBLE PRECISION */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, the i-th diagonal element is nonpositive. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --s;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*ldab < *kd + 1) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPBEQU", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *scond = 1.;
+ *amax = 0.;
+ return 0;
+ }
+
+ if (upper) {
+ j = *kd + 1;
+ } else {
+ j = 1;
+ }
+
+/* Initialize SMIN and AMAX. */
+
+ s[1] = ab[j + ab_dim1];
+ smin = s[1];
+ *amax = s[1];
+
+/* Find the minimum and maximum diagonal elements. */
+
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ s[i__] = ab[j + i__ * ab_dim1];
+/* Computing MIN */
+ d__1 = smin, d__2 = s[i__];
+ smin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = *amax, d__2 = s[i__];
+ *amax = max(d__1,d__2);
+/* L10: */
+ }
+
+ if (smin <= 0.) {
+
+/* Find the first non-positive diagonal element and return. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (s[i__] <= 0.) {
+ *info = i__;
+ return 0;
+ }
+/* L20: */
+ }
+ } else {
+
+/* Set the scale factors to the reciprocals */
+/* of the diagonal elements. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ s[i__] = 1. / sqrt(s[i__]);
+/* L30: */
+ }
+
+/* Compute SCOND = min(S(I)) / max(S(I)) */
+
+ *scond = sqrt(smin) / sqrt(*amax);
+ }
+ return 0;
+
+/* End of DPBEQU */
+
+} /* dpbequ_ */
diff --git a/contrib/libs/clapack/dpbrfs.c b/contrib/libs/clapack/dpbrfs.c
new file mode 100644
index 0000000000..9239551218
--- /dev/null
+++ b/contrib/libs/clapack/dpbrfs.c
@@ -0,0 +1,438 @@
+/* dpbrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b12 = -1.;
+static doublereal c_b14 = 1.;
+
+/* Subroutine */ int dpbrfs_(char *uplo, integer *n, integer *kd, integer *
+ nrhs, doublereal *ab, integer *ldab, doublereal *afb, integer *ldafb,
+ doublereal *b, integer *ldb, doublereal *x, integer *ldx, doublereal *
+ ferr, doublereal *berr, doublereal *work, integer *iwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, afb_dim1, afb_offset, b_dim1, b_offset,
+ x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3;
+
+ /* Local variables */
+ integer i__, j, k, l;
+ doublereal s, xk;
+ integer nz;
+ doublereal eps;
+ integer kase;
+ doublereal safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int dsbmv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), dcopy_(integer *,
+ doublereal *, integer *, doublereal *, integer *), daxpy_(integer
+ *, doublereal *, doublereal *, integer *, doublereal *, integer *)
+ ;
+ integer count;
+ logical upper;
+ extern /* Subroutine */ int dlacn2_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *), dpbtrs_(
+ char *, integer *, integer *, integer *, doublereal *, integer *,
+ doublereal *, integer *, integer *);
+ doublereal lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call DLACN2 in place of DLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPBRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is symmetric positive definite */
+/* and banded, and provides error bounds and backward error estimates */
+/* for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AB (input) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* The upper or lower triangle of the symmetric band matrix A, */
+/* stored in the first KD+1 rows of the array. The j-th column */
+/* of A is stored in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* AFB (input) DOUBLE PRECISION array, dimension (LDAFB,N) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**T*U or A = L*L**T of the band matrix A as computed by */
+/* DPBTRF, in the same storage format as A (see AB). */
+
+/* LDAFB (input) INTEGER */
+/* The leading dimension of the array AFB. LDAFB >= KD+1. */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) DOUBLE PRECISION array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by DPBTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ afb_dim1 = *ldafb;
+ afb_offset = 1 + afb_dim1;
+ afb -= afb_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldab < *kd + 1) {
+ *info = -6;
+ } else if (*ldafb < *kd + 1) {
+ *info = -8;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldx < max(1,*n)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPBRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+/* Computing MIN */
+ i__1 = *n + 1, i__2 = (*kd << 1) + 2;
+ nz = min(i__1,i__2);
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X */
+
+ dcopy_(n, &b[j * b_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+ dsbmv_(uplo, n, kd, &c_b12, &ab[ab_offset], ldab, &x[j * x_dim1 + 1],
+ &c__1, &c_b14, &work[*n + 1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] = (d__1 = b[i__ + j * b_dim1], abs(d__1));
+/* L30: */
+ }
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ xk = (d__1 = x[k + j * x_dim1], abs(d__1));
+ l = *kd + 1 - k;
+/* Computing MAX */
+ i__3 = 1, i__4 = k - *kd;
+ i__5 = k - 1;
+ for (i__ = max(i__3,i__4); i__ <= i__5; ++i__) {
+ work[i__] += (d__1 = ab[l + i__ + k * ab_dim1], abs(d__1))
+ * xk;
+ s += (d__1 = ab[l + i__ + k * ab_dim1], abs(d__1)) * (
+ d__2 = x[i__ + j * x_dim1], abs(d__2));
+/* L40: */
+ }
+ work[k] = work[k] + (d__1 = ab[*kd + 1 + k * ab_dim1], abs(
+ d__1)) * xk + s;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ xk = (d__1 = x[k + j * x_dim1], abs(d__1));
+ work[k] += (d__1 = ab[k * ab_dim1 + 1], abs(d__1)) * xk;
+ l = 1 - k;
+/* Computing MIN */
+ i__3 = *n, i__4 = k + *kd;
+ i__5 = min(i__3,i__4);
+ for (i__ = k + 1; i__ <= i__5; ++i__) {
+ work[i__] += (d__1 = ab[l + i__ + k * ab_dim1], abs(d__1))
+ * xk;
+ s += (d__1 = ab[l + i__ + k * ab_dim1], abs(d__1)) * (
+ d__2 = x[i__ + j * x_dim1], abs(d__2));
+/* L60: */
+ }
+ work[k] += s;
+/* L70: */
+ }
+ }
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+/* Computing MAX */
+ d__2 = s, d__3 = (d__1 = work[*n + i__], abs(d__1)) / work[
+ i__];
+ s = max(d__2,d__3);
+ } else {
+/* Computing MAX */
+ d__2 = s, d__3 = ((d__1 = work[*n + i__], abs(d__1)) + safe1)
+ / (work[i__] + safe1);
+ s = max(d__2,d__3);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2. <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ dpbtrs_(uplo, n, kd, &c__1, &afb[afb_offset], ldafb, &work[*n + 1]
+, n, info);
+ daxpy_(n, &c_b14, &work[*n + 1], &c__1, &x[j * x_dim1 + 1], &c__1)
+ ;
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use DLACN2 to estimate the infinity-norm of the matrix */
+/* inv(A) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+ work[i__] = (d__1 = work[*n + i__], abs(d__1)) + nz * eps *
+ work[i__];
+ } else {
+ work[i__] = (d__1 = work[*n + i__], abs(d__1)) + nz * eps *
+ work[i__] + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ dlacn2_(n, &work[(*n << 1) + 1], &work[*n + 1], &iwork[1], &ferr[j], &
+ kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(A'). */
+
+ dpbtrs_(uplo, n, kd, &c__1, &afb[afb_offset], ldafb, &work[*n
+ + 1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] *= work[i__];
+/* L110: */
+ }
+ } else if (kase == 2) {
+
+/* Multiply by inv(A)*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] *= work[i__];
+/* L120: */
+ }
+ dpbtrs_(uplo, n, kd, &c__1, &afb[afb_offset], ldafb, &work[*n
+ + 1], n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = lstres, d__3 = (d__1 = x[i__ + j * x_dim1], abs(d__1));
+ lstres = max(d__2,d__3);
+/* L130: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of DPBRFS */
+
+} /* dpbrfs_ */
diff --git a/contrib/libs/clapack/dpbstf.c b/contrib/libs/clapack/dpbstf.c
new file mode 100644
index 0000000000..83ae6c2d31
--- /dev/null
+++ b/contrib/libs/clapack/dpbstf.c
@@ -0,0 +1,312 @@
+/* dpbstf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b9 = -1.;
+
+/* Subroutine */ int dpbstf_(char *uplo, integer *n, integer *kd, doublereal *
+ ab, integer *ldab, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer j, m, km;
+ doublereal ajj;
+ integer kld;
+ extern /* Subroutine */ int dsyr_(char *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *), dscal_(
+ integer *, doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPBSTF computes a split Cholesky factorization of a real */
+/* symmetric positive definite band matrix A. */
+
+/* This routine is designed to be used in conjunction with DSBGST. */
+
+/* The factorization has the form A = S**T*S where S is a band matrix */
+/* of the same bandwidth as A and the following structure: */
+
+/* S = ( U ) */
+/* ( M L ) */
+
+/* where U is upper triangular of order m = (n+kd)/2, and L is lower */
+/* triangular of order n-m. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first kd+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, if INFO = 0, the factor S from the split Cholesky */
+/* factorization A = S**T*S. See Further Details. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the factorization could not be completed, */
+/* because the updated element a(i,i) was negative; the */
+/* matrix A is not positive definite. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* N = 7, KD = 2: */
+
+/* S = ( s11 s12 s13 ) */
+/* ( s22 s23 s24 ) */
+/* ( s33 s34 ) */
+/* ( s44 ) */
+/* ( s53 s54 s55 ) */
+/* ( s64 s65 s66 ) */
+/* ( s75 s76 s77 ) */
+
+/* If UPLO = 'U', the array AB holds: */
+
+/* on entry: on exit: */
+
+/* * * a13 a24 a35 a46 a57 * * s13 s24 s53 s64 s75 */
+/* * a12 a23 a34 a45 a56 a67 * s12 s23 s34 s54 s65 s76 */
+/* a11 a22 a33 a44 a55 a66 a77 s11 s22 s33 s44 s55 s66 s77 */
+
+/* If UPLO = 'L', the array AB holds: */
+
+/* on entry: on exit: */
+
+/* a11 a22 a33 a44 a55 a66 a77 s11 s22 s33 s44 s55 s66 s77 */
+/* a21 a32 a43 a54 a65 a76 * s12 s23 s34 s54 s65 s76 * */
+/* a31 a42 a53 a64 a64 * * s13 s24 s53 s64 s75 * * */
+
+/* Array elements marked * are not used by the routine. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*ldab < *kd + 1) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPBSTF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Computing MAX */
+ i__1 = 1, i__2 = *ldab - 1;
+ kld = max(i__1,i__2);
+
+/* Set the splitting point m. */
+
+ m = (*n + *kd) / 2;
+
+ if (upper) {
+
+/* Factorize A(m+1:n,m+1:n) as L**T*L, and update A(1:m,1:m). */
+
+ i__1 = m + 1;
+ for (j = *n; j >= i__1; --j) {
+
+/* Compute s(j,j) and test for non-positive-definiteness. */
+
+ ajj = ab[*kd + 1 + j * ab_dim1];
+ if (ajj <= 0.) {
+ goto L50;
+ }
+ ajj = sqrt(ajj);
+ ab[*kd + 1 + j * ab_dim1] = ajj;
+/* Computing MIN */
+ i__2 = j - 1;
+ km = min(i__2,*kd);
+
+/* Compute elements j-km:j-1 of the j-th column and update the */
+/* the leading submatrix within the band. */
+
+ d__1 = 1. / ajj;
+ dscal_(&km, &d__1, &ab[*kd + 1 - km + j * ab_dim1], &c__1);
+ dsyr_("Upper", &km, &c_b9, &ab[*kd + 1 - km + j * ab_dim1], &c__1,
+ &ab[*kd + 1 + (j - km) * ab_dim1], &kld);
+/* L10: */
+ }
+
+/* Factorize the updated submatrix A(1:m,1:m) as U**T*U. */
+
+ i__1 = m;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute s(j,j) and test for non-positive-definiteness. */
+
+ ajj = ab[*kd + 1 + j * ab_dim1];
+ if (ajj <= 0.) {
+ goto L50;
+ }
+ ajj = sqrt(ajj);
+ ab[*kd + 1 + j * ab_dim1] = ajj;
+/* Computing MIN */
+ i__2 = *kd, i__3 = m - j;
+ km = min(i__2,i__3);
+
+/* Compute elements j+1:j+km of the j-th row and update the */
+/* trailing submatrix within the band. */
+
+ if (km > 0) {
+ d__1 = 1. / ajj;
+ dscal_(&km, &d__1, &ab[*kd + (j + 1) * ab_dim1], &kld);
+ dsyr_("Upper", &km, &c_b9, &ab[*kd + (j + 1) * ab_dim1], &kld,
+ &ab[*kd + 1 + (j + 1) * ab_dim1], &kld);
+ }
+/* L20: */
+ }
+ } else {
+
+/* Factorize A(m+1:n,m+1:n) as L**T*L, and update A(1:m,1:m). */
+
+ i__1 = m + 1;
+ for (j = *n; j >= i__1; --j) {
+
+/* Compute s(j,j) and test for non-positive-definiteness. */
+
+ ajj = ab[j * ab_dim1 + 1];
+ if (ajj <= 0.) {
+ goto L50;
+ }
+ ajj = sqrt(ajj);
+ ab[j * ab_dim1 + 1] = ajj;
+/* Computing MIN */
+ i__2 = j - 1;
+ km = min(i__2,*kd);
+
+/* Compute elements j-km:j-1 of the j-th row and update the */
+/* trailing submatrix within the band. */
+
+ d__1 = 1. / ajj;
+ dscal_(&km, &d__1, &ab[km + 1 + (j - km) * ab_dim1], &kld);
+ dsyr_("Lower", &km, &c_b9, &ab[km + 1 + (j - km) * ab_dim1], &kld,
+ &ab[(j - km) * ab_dim1 + 1], &kld);
+/* L30: */
+ }
+
+/* Factorize the updated submatrix A(1:m,1:m) as U**T*U. */
+
+ i__1 = m;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute s(j,j) and test for non-positive-definiteness. */
+
+ ajj = ab[j * ab_dim1 + 1];
+ if (ajj <= 0.) {
+ goto L50;
+ }
+ ajj = sqrt(ajj);
+ ab[j * ab_dim1 + 1] = ajj;
+/* Computing MIN */
+ i__2 = *kd, i__3 = m - j;
+ km = min(i__2,i__3);
+
+/* Compute elements j+1:j+km of the j-th column and update the */
+/* trailing submatrix within the band. */
+
+ if (km > 0) {
+ d__1 = 1. / ajj;
+ dscal_(&km, &d__1, &ab[j * ab_dim1 + 2], &c__1);
+ dsyr_("Lower", &km, &c_b9, &ab[j * ab_dim1 + 2], &c__1, &ab[(
+ j + 1) * ab_dim1 + 1], &kld);
+ }
+/* L40: */
+ }
+ }
+ return 0;
+
+L50:
+ *info = j;
+ return 0;
+
+/* End of DPBSTF */
+
+} /* dpbstf_ */
diff --git a/contrib/libs/clapack/dpbsv.c b/contrib/libs/clapack/dpbsv.c
new file mode 100644
index 0000000000..510dd25e85
--- /dev/null
+++ b/contrib/libs/clapack/dpbsv.c
@@ -0,0 +1,182 @@
+/* dpbsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dpbsv_(char *uplo, integer *n, integer *kd, integer *
+ nrhs, doublereal *ab, integer *ldab, doublereal *b, integer *ldb,
+ integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), dpbtrf_(
+ char *, integer *, integer *, doublereal *, integer *, integer *), dpbtrs_(char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPBSV computes the solution to a real system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N symmetric positive definite band matrix and X */
+/* and B are N-by-NRHS matrices. */
+
+/* The Cholesky decomposition is used to factor A as */
+/* A = U**T * U, if UPLO = 'U', or */
+/* A = L * L**T, if UPLO = 'L', */
+/* where U is an upper triangular band matrix, and L is a lower */
+/* triangular band matrix, with the same number of superdiagonals or */
+/* subdiagonals as A. The factored form of A is then used to solve the */
+/* system of equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AB (input/output) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(KD+1+i-j,j) = A(i,j) for max(1,j-KD)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(N,j+KD). */
+/* See below for further details. */
+
+/* On exit, if INFO = 0, the triangular factor U or L from the */
+/* Cholesky factorization A = U**T*U or A = L*L**T of the band */
+/* matrix A, in the same storage format as A. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i of A is not */
+/* positive definite, so the factorization could not be */
+/* completed, and the solution has not been computed. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* N = 6, KD = 2, and UPLO = 'U': */
+
+/* On entry: On exit: */
+
+/* * * a13 a24 a35 a46 * * u13 u24 u35 u46 */
+/* * a12 a23 a34 a45 a56 * u12 u23 u34 u45 u56 */
+/* a11 a22 a33 a44 a55 a66 u11 u22 u33 u44 u55 u66 */
+
+/* Similarly, if UPLO = 'L' the format of A is as follows: */
+
+/* On entry: On exit: */
+
+/* a11 a22 a33 a44 a55 a66 l11 l22 l33 l44 l55 l66 */
+/* a21 a32 a43 a54 a65 * l21 l32 l43 l54 l65 * */
+/* a31 a42 a53 a64 * * l31 l42 l53 l64 * * */
+
+/* Array elements marked * are not used by the routine. */
+
+/* ===================================================================== */
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldab < *kd + 1) {
+ *info = -6;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPBSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the Cholesky factorization A = U'*U or A = L*L'. */
+
+ dpbtrf_(uplo, n, kd, &ab[ab_offset], ldab, info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ dpbtrs_(uplo, n, kd, nrhs, &ab[ab_offset], ldab, &b[b_offset], ldb,
+ info);
+
+ }
+ return 0;
+
+/* End of DPBSV */
+
+} /* dpbsv_ */
diff --git a/contrib/libs/clapack/dpbsvx.c b/contrib/libs/clapack/dpbsvx.c
new file mode 100644
index 0000000000..5ba6e225a3
--- /dev/null
+++ b/contrib/libs/clapack/dpbsvx.c
@@ -0,0 +1,515 @@
+/* dpbsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dpbsvx_(char *fact, char *uplo, integer *n, integer *kd,
+ integer *nrhs, doublereal *ab, integer *ldab, doublereal *afb,
+ integer *ldafb, char *equed, doublereal *s, doublereal *b, integer *
+ ldb, doublereal *x, integer *ldx, doublereal *rcond, doublereal *ferr,
+ doublereal *berr, doublereal *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, afb_dim1, afb_offset, b_dim1, b_offset,
+ x_dim1, x_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ integer i__, j, j1, j2;
+ doublereal amax, smin, smax;
+ extern logical lsame_(char *, char *);
+ doublereal scond, anorm;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical equil, rcequ, upper;
+ extern doublereal dlamch_(char *), dlansb_(char *, char *,
+ integer *, integer *, doublereal *, integer *, doublereal *);
+ extern /* Subroutine */ int dpbcon_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, integer *), dlaqsb_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, char *);
+ logical nofact;
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ xerbla_(char *, integer *), dpbequ_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *);
+ doublereal bignum;
+ extern /* Subroutine */ int dpbrfs_(char *, integer *, integer *, integer
+ *, doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *), dpbtrf_(char *,
+ integer *, integer *, doublereal *, integer *, integer *);
+ integer infequ;
+ extern /* Subroutine */ int dpbtrs_(char *, integer *, integer *, integer
+ *, doublereal *, integer *, doublereal *, integer *, integer *);
+ doublereal smlnum;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPBSVX uses the Cholesky factorization A = U**T*U or A = L*L**T to */
+/* compute the solution to a real system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N symmetric positive definite band matrix and X */
+/* and B are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'E', real scaling factors are computed to equilibrate */
+/* the system: */
+/* diag(S) * A * diag(S) * inv(diag(S)) * X = diag(S) * B */
+/* Whether or not the system will be equilibrated depends on the */
+/* scaling of the matrix A, but if equilibration is used, A is */
+/* overwritten by diag(S)*A*diag(S) and B by diag(S)*B. */
+
+/* 2. If FACT = 'N' or 'E', the Cholesky decomposition is used to */
+/* factor the matrix A (after equilibration if FACT = 'E') as */
+/* A = U**T * U, if UPLO = 'U', or */
+/* A = L * L**T, if UPLO = 'L', */
+/* where U is an upper triangular band matrix, and L is a lower */
+/* triangular band matrix. */
+
+/* 3. If the leading i-by-i principal minor is not positive definite, */
+/* then the routine returns with INFO = i. Otherwise, the factored */
+/* form of A is used to estimate the condition number of the matrix */
+/* A. If the reciprocal of the condition number is less than machine */
+/* precision, INFO = N+1 is returned as a warning, but the routine */
+/* still goes on to solve for X and compute error bounds as */
+/* described below. */
+
+/* 4. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 5. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* 6. If equilibration was used, the matrix X is premultiplied by */
+/* diag(S) so that it solves the original system before */
+/* equilibration. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of the matrix A is */
+/* supplied on entry, and if not, whether the matrix A should be */
+/* equilibrated before it is factored. */
+/* = 'F': On entry, AFB contains the factored form of A. */
+/* If EQUED = 'Y', the matrix A has been equilibrated */
+/* with scaling factors given by S. AB and AFB will not */
+/* be modified. */
+/* = 'N': The matrix A will be copied to AFB and factored. */
+/* = 'E': The matrix A will be equilibrated if necessary, then */
+/* copied to AFB and factored. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right-hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AB (input/output) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first KD+1 rows of the array, except */
+/* if FACT = 'F' and EQUED = 'Y', then A must contain the */
+/* equilibrated matrix diag(S)*A*diag(S). The j-th column of A */
+/* is stored in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(KD+1+i-j,j) = A(i,j) for max(1,j-KD)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(N,j+KD). */
+/* See below for further details. */
+
+/* On exit, if FACT = 'E' and EQUED = 'Y', A is overwritten by */
+/* diag(S)*A*diag(S). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array A. LDAB >= KD+1. */
+
+/* AFB (input or output) DOUBLE PRECISION array, dimension (LDAFB,N) */
+/* If FACT = 'F', then AFB is an input argument and on entry */
+/* contains the triangular factor U or L from the Cholesky */
+/* factorization A = U**T*U or A = L*L**T of the band matrix */
+/* A, in the same storage format as A (see AB). If EQUED = 'Y', */
+/* then AFB is the factored form of the equilibrated matrix A. */
+
+/* If FACT = 'N', then AFB is an output argument and on exit */
+/* returns the triangular factor U or L from the Cholesky */
+/* factorization A = U**T*U or A = L*L**T. */
+
+/* If FACT = 'E', then AFB is an output argument and on exit */
+/* returns the triangular factor U or L from the Cholesky */
+/* factorization A = U**T*U or A = L*L**T of the equilibrated */
+/* matrix A (see the description of A for the form of the */
+/* equilibrated matrix). */
+
+/* LDAFB (input) INTEGER */
+/* The leading dimension of the array AFB. LDAFB >= KD+1. */
+
+/* EQUED (input or output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration (always true if FACT = 'N'). */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+/* EQUED is an input argument if FACT = 'F'; otherwise, it is an */
+/* output argument. */
+
+/* S (input or output) DOUBLE PRECISION array, dimension (N) */
+/* The scale factors for A; not accessed if EQUED = 'N'. S is */
+/* an input argument if FACT = 'F'; otherwise, S is an output */
+/* argument. If FACT = 'F' and EQUED = 'Y', each element of S */
+/* must be positive. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if EQUED = 'N', B is not modified; if EQUED = 'Y', */
+/* B is overwritten by diag(S) * B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) DOUBLE PRECISION array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X to */
+/* the original system of equations. Note that if EQUED = 'Y', */
+/* A and B are modified on exit, and the solution to the */
+/* equilibrated system is inv(diag(S))*X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A after equilibration (if done). If RCOND is less than the */
+/* machine precision (in particular, if RCOND = 0), the matrix */
+/* is singular to working precision. This condition is */
+/* indicated by a return code of INFO > 0. */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: the leading minor of order i of A is */
+/* not positive definite, so the factorization */
+/* could not be completed, and the solution has not */
+/* been computed. RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* N = 6, KD = 2, and UPLO = 'U': */
+
+/* Two-dimensional storage of the symmetric matrix A: */
+
+/* a11 a12 a13 */
+/* a22 a23 a24 */
+/* a33 a34 a35 */
+/* a44 a45 a46 */
+/* a55 a56 */
+/* (aij=conjg(aji)) a66 */
+
+/* Band storage of the upper triangle of A: */
+
+/* * * a13 a24 a35 a46 */
+/* * a12 a23 a34 a45 a56 */
+/* a11 a22 a33 a44 a55 a66 */
+
+/* Similarly, if UPLO = 'L' the format of A is as follows: */
+
+/* a11 a22 a33 a44 a55 a66 */
+/* a21 a32 a43 a54 a65 * */
+/* a31 a42 a53 a64 * * */
+
+/* Array elements marked * are not used by the routine. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ afb_dim1 = *ldafb;
+ afb_offset = 1 + afb_dim1;
+ afb -= afb_offset;
+ --s;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ equil = lsame_(fact, "E");
+ upper = lsame_(uplo, "U");
+ if (nofact || equil) {
+ *(unsigned char *)equed = 'N';
+ rcequ = FALSE_;
+ } else {
+ rcequ = lsame_(equed, "Y");
+ smlnum = dlamch_("Safe minimum");
+ bignum = 1. / smlnum;
+ }
+
+/* Test the input parameters. */
+
+ if (! nofact && ! equil && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*kd < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*ldab < *kd + 1) {
+ *info = -7;
+ } else if (*ldafb < *kd + 1) {
+ *info = -9;
+ } else if (lsame_(fact, "F") && ! (rcequ || lsame_(
+ equed, "N"))) {
+ *info = -10;
+ } else {
+ if (rcequ) {
+ smin = bignum;
+ smax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ d__1 = smin, d__2 = s[j];
+ smin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = smax, d__2 = s[j];
+ smax = max(d__1,d__2);
+/* L10: */
+ }
+ if (smin <= 0.) {
+ *info = -11;
+ } else if (*n > 0) {
+ scond = max(smin,smlnum) / min(smax,bignum);
+ } else {
+ scond = 1.;
+ }
+ }
+ if (*info == 0) {
+ if (*ldb < max(1,*n)) {
+ *info = -13;
+ } else if (*ldx < max(1,*n)) {
+ *info = -15;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPBSVX", &i__1);
+ return 0;
+ }
+
+ if (equil) {
+
+/* Compute row and column scalings to equilibrate the matrix A. */
+
+ dpbequ_(uplo, n, kd, &ab[ab_offset], ldab, &s[1], &scond, &amax, &
+ infequ);
+ if (infequ == 0) {
+
+/* Equilibrate the matrix. */
+
+ dlaqsb_(uplo, n, kd, &ab[ab_offset], ldab, &s[1], &scond, &amax,
+ equed);
+ rcequ = lsame_(equed, "Y");
+ }
+ }
+
+/* Scale the right-hand side. */
+
+ if (rcequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = s[i__] * b[i__ + j * b_dim1];
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+
+ if (nofact || equil) {
+
+/* Compute the Cholesky factorization A = U'*U or A = L*L'. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j - *kd;
+ j1 = max(i__2,1);
+ i__2 = j - j1 + 1;
+ dcopy_(&i__2, &ab[*kd + 1 - j + j1 + j * ab_dim1], &c__1, &
+ afb[*kd + 1 - j + j1 + j * afb_dim1], &c__1);
+/* L40: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = j + *kd;
+ j2 = min(i__2,*n);
+ i__2 = j2 - j + 1;
+ dcopy_(&i__2, &ab[j * ab_dim1 + 1], &c__1, &afb[j * afb_dim1
+ + 1], &c__1);
+/* L50: */
+ }
+ }
+
+ dpbtrf_(uplo, n, kd, &afb[afb_offset], ldafb, info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = dlansb_("1", uplo, n, kd, &ab[ab_offset], ldab, &work[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ dpbcon_(uplo, n, kd, &afb[afb_offset], ldafb, &anorm, rcond, &work[1], &
+ iwork[1], info);
+
+/* Compute the solution matrix X. */
+
+ dlacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ dpbtrs_(uplo, n, kd, nrhs, &afb[afb_offset], ldafb, &x[x_offset], ldx,
+ info);
+
+/* Use iterative refinement to improve the computed solution and */
+/* compute error bounds and backward error estimates for it. */
+
+ dpbrfs_(uplo, n, kd, nrhs, &ab[ab_offset], ldab, &afb[afb_offset], ldafb,
+ &b[b_offset], ldb, &x[x_offset], ldx, &ferr[1], &berr[1], &work[1]
+, &iwork[1], info);
+
+/* Transform the solution matrix X to a solution of the original */
+/* system. */
+
+ if (rcequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[i__ + j * x_dim1] = s[i__] * x[i__ + j * x_dim1];
+/* L60: */
+ }
+/* L70: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= scond;
+/* L80: */
+ }
+ }
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < dlamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of DPBSVX */
+
+} /* dpbsvx_ */
diff --git a/contrib/libs/clapack/dpbtf2.c b/contrib/libs/clapack/dpbtf2.c
new file mode 100644
index 0000000000..893724168c
--- /dev/null
+++ b/contrib/libs/clapack/dpbtf2.c
@@ -0,0 +1,244 @@
+/* dpbtf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b8 = -1.;
+static integer c__1 = 1;
+
+/* Subroutine */ int dpbtf2_(char *uplo, integer *n, integer *kd, doublereal *
+ ab, integer *ldab, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer j, kn;
+ doublereal ajj;
+ integer kld;
+ extern /* Subroutine */ int dsyr_(char *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *), dscal_(
+ integer *, doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPBTF2 computes the Cholesky factorization of a real symmetric */
+/* positive definite band matrix A. */
+
+/* The factorization has the form */
+/* A = U' * U , if UPLO = 'U', or */
+/* A = L * L', if UPLO = 'L', */
+/* where U is an upper triangular matrix, U' is the transpose of U, and */
+/* L is lower triangular. */
+
+/* This is the unblocked version of the algorithm, calling Level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of super-diagonals of the matrix A if UPLO = 'U', */
+/* or the number of sub-diagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, if INFO = 0, the triangular factor U or L from the */
+/* Cholesky factorization A = U'*U or A = L*L' of the band */
+/* matrix A, in the same storage format as A. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, the leading minor of order k is not */
+/* positive definite, and the factorization could not be */
+/* completed. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* N = 6, KD = 2, and UPLO = 'U': */
+
+/* On entry: On exit: */
+
+/* * * a13 a24 a35 a46 * * u13 u24 u35 u46 */
+/* * a12 a23 a34 a45 a56 * u12 u23 u34 u45 u56 */
+/* a11 a22 a33 a44 a55 a66 u11 u22 u33 u44 u55 u66 */
+
+/* Similarly, if UPLO = 'L' the format of A is as follows: */
+
+/* On entry: On exit: */
+
+/* a11 a22 a33 a44 a55 a66 l11 l22 l33 l44 l55 l66 */
+/* a21 a32 a43 a54 a65 * l21 l32 l43 l54 l65 * */
+/* a31 a42 a53 a64 * * l31 l42 l53 l64 * * */
+
+/* Array elements marked * are not used by the routine. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*ldab < *kd + 1) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPBTF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Computing MAX */
+ i__1 = 1, i__2 = *ldab - 1;
+ kld = max(i__1,i__2);
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute U(J,J) and test for non-positive-definiteness. */
+
+ ajj = ab[*kd + 1 + j * ab_dim1];
+ if (ajj <= 0.) {
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ ab[*kd + 1 + j * ab_dim1] = ajj;
+
+/* Compute elements J+1:J+KN of row J and update the */
+/* trailing submatrix within the band. */
+
+/* Computing MIN */
+ i__2 = *kd, i__3 = *n - j;
+ kn = min(i__2,i__3);
+ if (kn > 0) {
+ d__1 = 1. / ajj;
+ dscal_(&kn, &d__1, &ab[*kd + (j + 1) * ab_dim1], &kld);
+ dsyr_("Upper", &kn, &c_b8, &ab[*kd + (j + 1) * ab_dim1], &kld,
+ &ab[*kd + 1 + (j + 1) * ab_dim1], &kld);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute L(J,J) and test for non-positive-definiteness. */
+
+ ajj = ab[j * ab_dim1 + 1];
+ if (ajj <= 0.) {
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ ab[j * ab_dim1 + 1] = ajj;
+
+/* Compute elements J+1:J+KN of column J and update the */
+/* trailing submatrix within the band. */
+
+/* Computing MIN */
+ i__2 = *kd, i__3 = *n - j;
+ kn = min(i__2,i__3);
+ if (kn > 0) {
+ d__1 = 1. / ajj;
+ dscal_(&kn, &d__1, &ab[j * ab_dim1 + 2], &c__1);
+ dsyr_("Lower", &kn, &c_b8, &ab[j * ab_dim1 + 2], &c__1, &ab[(
+ j + 1) * ab_dim1 + 1], &kld);
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+L30:
+ *info = j;
+ return 0;
+
+/* End of DPBTF2 */
+
+} /* dpbtf2_ */
diff --git a/contrib/libs/clapack/dpbtrf.c b/contrib/libs/clapack/dpbtrf.c
new file mode 100644
index 0000000000..36a8a254b7
--- /dev/null
+++ b/contrib/libs/clapack/dpbtrf.c
@@ -0,0 +1,471 @@
+/* dpbtrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static doublereal c_b18 = 1.;
+static doublereal c_b21 = -1.;
+static integer c__33 = 33;
+
+/* Subroutine */ int dpbtrf_(char *uplo, integer *n, integer *kd, doublereal *
+ ab, integer *ldab, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, i2, i3, ib, nb, ii, jj;
+ doublereal work[1056] /* was [33][32] */;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), dsyrk_(
+ char *, char *, integer *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *),
+ dpbtf2_(char *, integer *, integer *, doublereal *, integer *,
+ integer *), dpotf2_(char *, integer *, doublereal *,
+ integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPBTRF computes the Cholesky factorization of a real symmetric */
+/* positive definite band matrix A. */
+
+/* The factorization has the form */
+/* A = U**T * U, if UPLO = 'U', or */
+/* A = L * L**T, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, if INFO = 0, the triangular factor U or L from the */
+/* Cholesky factorization A = U**T*U or A = L*L**T of the band */
+/* matrix A, in the same storage format as A. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i is not */
+/* positive definite, and the factorization could not be */
+/* completed. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* N = 6, KD = 2, and UPLO = 'U': */
+
+/* On entry: On exit: */
+
+/* * * a13 a24 a35 a46 * * u13 u24 u35 u46 */
+/* * a12 a23 a34 a45 a56 * u12 u23 u34 u45 u56 */
+/* a11 a22 a33 a44 a55 a66 u11 u22 u33 u44 u55 u66 */
+
+/* Similarly, if UPLO = 'L' the format of A is as follows: */
+
+/* On entry: On exit: */
+
+/* a11 a22 a33 a44 a55 a66 l11 l22 l33 l44 l55 l66 */
+/* a21 a32 a43 a54 a65 * l21 l32 l43 l54 l65 * */
+/* a31 a42 a53 a64 * * l31 l42 l53 l64 * * */
+
+/* Array elements marked * are not used by the routine. */
+
+/* Contributed by */
+/* Peter Mayes and Giuseppe Radicati, IBM ECSEC, Rome, March 23, 1989 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*ldab < *kd + 1) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPBTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment */
+
+ nb = ilaenv_(&c__1, "DPBTRF", uplo, n, kd, &c_n1, &c_n1);
+
+/* The block size must not exceed the semi-bandwidth KD, and must not */
+/* exceed the limit set by the size of the local array WORK. */
+
+ nb = min(nb,32);
+
+ if (nb <= 1 || nb > *kd) {
+
+/* Use unblocked code */
+
+ dpbtf2_(uplo, n, kd, &ab[ab_offset], ldab, info);
+ } else {
+
+/* Use blocked code */
+
+ if (lsame_(uplo, "U")) {
+
+/* Compute the Cholesky factorization of a symmetric band */
+/* matrix, given the upper triangle of the matrix in band */
+/* storage. */
+
+/* Zero the upper triangle of the work array. */
+
+ i__1 = nb;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__ + j * 33 - 34] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* Process the band matrix one diagonal block at a time. */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - i__ + 1;
+ ib = min(i__3,i__4);
+
+/* Factorize the diagonal block */
+
+ i__3 = *ldab - 1;
+ dpotf2_(uplo, &ib, &ab[*kd + 1 + i__ * ab_dim1], &i__3, &ii);
+ if (ii != 0) {
+ *info = i__ + ii - 1;
+ goto L150;
+ }
+ if (i__ + ib <= *n) {
+
+/* Update the relevant part of the trailing submatrix. */
+/* If A11 denotes the diagonal block which has just been */
+/* factorized, then we need to update the remaining */
+/* blocks in the diagram: */
+
+/* A11 A12 A13 */
+/* A22 A23 */
+/* A33 */
+
+/* The numbers of rows and columns in the partitioning */
+/* are IB, I2, I3 respectively. The blocks A12, A22 and */
+/* A23 are empty if IB = KD. The upper triangle of A13 */
+/* lies outside the band. */
+
+/* Computing MIN */
+ i__3 = *kd - ib, i__4 = *n - i__ - ib + 1;
+ i2 = min(i__3,i__4);
+/* Computing MIN */
+ i__3 = ib, i__4 = *n - i__ - *kd + 1;
+ i3 = min(i__3,i__4);
+
+ if (i2 > 0) {
+
+/* Update A12 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ dtrsm_("Left", "Upper", "Transpose", "Non-unit", &ib,
+ &i2, &c_b18, &ab[*kd + 1 + i__ * ab_dim1], &
+ i__3, &ab[*kd + 1 - ib + (i__ + ib) * ab_dim1]
+, &i__4);
+
+/* Update A22 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ dsyrk_("Upper", "Transpose", &i2, &ib, &c_b21, &ab[*
+ kd + 1 - ib + (i__ + ib) * ab_dim1], &i__3, &
+ c_b18, &ab[*kd + 1 + (i__ + ib) * ab_dim1], &
+ i__4);
+ }
+
+ if (i3 > 0) {
+
+/* Copy the lower triangle of A13 into the work array. */
+
+ i__3 = i3;
+ for (jj = 1; jj <= i__3; ++jj) {
+ i__4 = ib;
+ for (ii = jj; ii <= i__4; ++ii) {
+ work[ii + jj * 33 - 34] = ab[ii - jj + 1 + (
+ jj + i__ + *kd - 1) * ab_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Update A13 (in the work array). */
+
+ i__3 = *ldab - 1;
+ dtrsm_("Left", "Upper", "Transpose", "Non-unit", &ib,
+ &i3, &c_b18, &ab[*kd + 1 + i__ * ab_dim1], &
+ i__3, work, &c__33);
+
+/* Update A23 */
+
+ if (i2 > 0) {
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ dgemm_("Transpose", "No Transpose", &i2, &i3, &ib,
+ &c_b21, &ab[*kd + 1 - ib + (i__ + ib) *
+ ab_dim1], &i__3, work, &c__33, &c_b18, &
+ ab[ib + 1 + (i__ + *kd) * ab_dim1], &i__4);
+ }
+
+/* Update A33 */
+
+ i__3 = *ldab - 1;
+ dsyrk_("Upper", "Transpose", &i3, &ib, &c_b21, work, &
+ c__33, &c_b18, &ab[*kd + 1 + (i__ + *kd) *
+ ab_dim1], &i__3);
+
+/* Copy the lower triangle of A13 back into place. */
+
+ i__3 = i3;
+ for (jj = 1; jj <= i__3; ++jj) {
+ i__4 = ib;
+ for (ii = jj; ii <= i__4; ++ii) {
+ ab[ii - jj + 1 + (jj + i__ + *kd - 1) *
+ ab_dim1] = work[ii + jj * 33 - 34];
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+ }
+/* L70: */
+ }
+ } else {
+
+/* Compute the Cholesky factorization of a symmetric band */
+/* matrix, given the lower triangle of the matrix in band */
+/* storage. */
+
+/* Zero the lower triangle of the work array. */
+
+ i__2 = nb;
+ for (j = 1; j <= i__2; ++j) {
+ i__1 = nb;
+ for (i__ = j + 1; i__ <= i__1; ++i__) {
+ work[i__ + j * 33 - 34] = 0.;
+/* L80: */
+ }
+/* L90: */
+ }
+
+/* Process the band matrix one diagonal block at a time. */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - i__ + 1;
+ ib = min(i__3,i__4);
+
+/* Factorize the diagonal block */
+
+ i__3 = *ldab - 1;
+ dpotf2_(uplo, &ib, &ab[i__ * ab_dim1 + 1], &i__3, &ii);
+ if (ii != 0) {
+ *info = i__ + ii - 1;
+ goto L150;
+ }
+ if (i__ + ib <= *n) {
+
+/* Update the relevant part of the trailing submatrix. */
+/* If A11 denotes the diagonal block which has just been */
+/* factorized, then we need to update the remaining */
+/* blocks in the diagram: */
+
+/* A11 */
+/* A21 A22 */
+/* A31 A32 A33 */
+
+/* The numbers of rows and columns in the partitioning */
+/* are IB, I2, I3 respectively. The blocks A21, A22 and */
+/* A32 are empty if IB = KD. The lower triangle of A31 */
+/* lies outside the band. */
+
+/* Computing MIN */
+ i__3 = *kd - ib, i__4 = *n - i__ - ib + 1;
+ i2 = min(i__3,i__4);
+/* Computing MIN */
+ i__3 = ib, i__4 = *n - i__ - *kd + 1;
+ i3 = min(i__3,i__4);
+
+ if (i2 > 0) {
+
+/* Update A21 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ dtrsm_("Right", "Lower", "Transpose", "Non-unit", &i2,
+ &ib, &c_b18, &ab[i__ * ab_dim1 + 1], &i__3, &
+ ab[ib + 1 + i__ * ab_dim1], &i__4);
+
+/* Update A22 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ dsyrk_("Lower", "No Transpose", &i2, &ib, &c_b21, &ab[
+ ib + 1 + i__ * ab_dim1], &i__3, &c_b18, &ab[(
+ i__ + ib) * ab_dim1 + 1], &i__4);
+ }
+
+ if (i3 > 0) {
+
+/* Copy the upper triangle of A31 into the work array. */
+
+ i__3 = ib;
+ for (jj = 1; jj <= i__3; ++jj) {
+ i__4 = min(jj,i3);
+ for (ii = 1; ii <= i__4; ++ii) {
+ work[ii + jj * 33 - 34] = ab[*kd + 1 - jj +
+ ii + (jj + i__ - 1) * ab_dim1];
+/* L100: */
+ }
+/* L110: */
+ }
+
+/* Update A31 (in the work array). */
+
+ i__3 = *ldab - 1;
+ dtrsm_("Right", "Lower", "Transpose", "Non-unit", &i3,
+ &ib, &c_b18, &ab[i__ * ab_dim1 + 1], &i__3,
+ work, &c__33);
+
+/* Update A32 */
+
+ if (i2 > 0) {
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ dgemm_("No transpose", "Transpose", &i3, &i2, &ib,
+ &c_b21, work, &c__33, &ab[ib + 1 + i__ *
+ ab_dim1], &i__3, &c_b18, &ab[*kd + 1 - ib
+ + (i__ + ib) * ab_dim1], &i__4);
+ }
+
+/* Update A33 */
+
+ i__3 = *ldab - 1;
+ dsyrk_("Lower", "No Transpose", &i3, &ib, &c_b21,
+ work, &c__33, &c_b18, &ab[(i__ + *kd) *
+ ab_dim1 + 1], &i__3);
+
+/* Copy the upper triangle of A31 back into place. */
+
+ i__3 = ib;
+ for (jj = 1; jj <= i__3; ++jj) {
+ i__4 = min(jj,i3);
+ for (ii = 1; ii <= i__4; ++ii) {
+ ab[*kd + 1 - jj + ii + (jj + i__ - 1) *
+ ab_dim1] = work[ii + jj * 33 - 34];
+/* L120: */
+ }
+/* L130: */
+ }
+ }
+ }
+/* L140: */
+ }
+ }
+ }
+ return 0;
+
+L150:
+ return 0;
+
+/* End of DPBTRF */
+
+} /* dpbtrf_ */
diff --git a/contrib/libs/clapack/dpbtrs.c b/contrib/libs/clapack/dpbtrs.c
new file mode 100644
index 0000000000..b850b9f9e9
--- /dev/null
+++ b/contrib/libs/clapack/dpbtrs.c
@@ -0,0 +1,184 @@
+/* dpbtrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dpbtrs_(char *uplo, integer *n, integer *kd, integer *
+ nrhs, doublereal *ab, integer *ldab, doublereal *b, integer *ldb,
+ integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ integer j;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dtbsv_(char *, char *, char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPBTRS solves a system of linear equations A*X = B with a symmetric */
+/* positive definite band matrix A using the Cholesky factorization */
+/* A = U**T*U or A = L*L**T computed by DPBTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangular factor stored in AB; */
+/* = 'L': Lower triangular factor stored in AB. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AB (input) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**T*U or A = L*L**T of the band matrix A, stored in the */
+/* first KD+1 rows of the array. The j-th column of U or L is */
+/* stored in the j-th column of the array AB as follows: */
+/* if UPLO ='U', AB(kd+1+i-j,j) = U(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO ='L', AB(1+i-j,j) = L(i,j) for j<=i<=min(n,j+kd). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldab < *kd + 1) {
+ *info = -6;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPBTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Solve A*X = B where A = U'*U. */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Solve U'*X = B, overwriting B with X. */
+
+ dtbsv_("Upper", "Transpose", "Non-unit", n, kd, &ab[ab_offset],
+ ldab, &b[j * b_dim1 + 1], &c__1);
+
+/* Solve U*X = B, overwriting B with X. */
+
+ dtbsv_("Upper", "No transpose", "Non-unit", n, kd, &ab[ab_offset],
+ ldab, &b[j * b_dim1 + 1], &c__1);
+/* L10: */
+ }
+ } else {
+
+/* Solve A*X = B where A = L*L'. */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Solve L*X = B, overwriting B with X. */
+
+ dtbsv_("Lower", "No transpose", "Non-unit", n, kd, &ab[ab_offset],
+ ldab, &b[j * b_dim1 + 1], &c__1);
+
+/* Solve L'*X = B, overwriting B with X. */
+
+ dtbsv_("Lower", "Transpose", "Non-unit", n, kd, &ab[ab_offset],
+ ldab, &b[j * b_dim1 + 1], &c__1);
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of DPBTRS */
+
+} /* dpbtrs_ */
diff --git a/contrib/libs/clapack/dpftrf.c b/contrib/libs/clapack/dpftrf.c
new file mode 100644
index 0000000000..d7a5300e63
--- /dev/null
+++ b/contrib/libs/clapack/dpftrf.c
@@ -0,0 +1,452 @@
+/* dpftrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b12 = 1.;
+static doublereal c_b15 = -1.;
+
+/* Subroutine */ int dpftrf_(char *transr, char *uplo, integer *n, doublereal
+ *a, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer k, n1, n2;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), dsyrk_(
+ char *, char *, integer *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ logical nisodd;
+ extern /* Subroutine */ int dpotrf_(char *, integer *, doublereal *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPFTRF computes the Cholesky factorization of a real symmetric */
+/* positive definite matrix A. */
+
+/* The factorization has the form */
+/* A = U**T * U, if UPLO = 'U', or */
+/* A = L * L**T, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular. */
+
+/* This is the block version of the algorithm, calling Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': The Normal TRANSR of RFP A is stored; */
+/* = 'T': The Transpose TRANSR of RFP A is stored. */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': Upper triangle of RFP A is stored; */
+/* = 'L': Lower triangle of RFP A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension ( N*(N+1)/2 ); */
+/* On entry, the symmetric matrix A in RFP format. RFP format is */
+/* described by TRANSR, UPLO, and N as follows: If TRANSR = 'N' */
+/* then RFP A is (0:N,0:k-1) when N is even; k=N/2. RFP A is */
+/* (0:N-1,0:k) when N is odd; k=N/2. IF TRANSR = 'T' then RFP is */
+/* the transpose of RFP A as defined when */
+/* TRANSR = 'N'. The contents of RFP A are defined by UPLO as */
+/* follows: If UPLO = 'U' the RFP A contains the NT elements of */
+/* upper packed A. If UPLO = 'L' the RFP A contains the elements */
+/* of lower packed A. The LDA of RFP A is (N+1)/2 when TRANSR = */
+/* 'T'. When TRANSR is 'N' the LDA is N+1 when N is even and N */
+/* is odd. See the Note below for more details. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization RFP A = U**T*U or RFP A = L*L**T. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i is not */
+/* positive definite, and the factorization could not be */
+/* completed. */
+
+/* Notes */
+/* ===== */
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* even. We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* the transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* the transpose of the last three columns of AP lower. */
+/* This covers the case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 03 04 05 33 43 53 */
+/* 13 14 15 00 44 54 */
+/* 23 24 25 10 11 55 */
+/* 33 34 35 20 21 22 */
+/* 00 44 45 30 31 32 */
+/* 01 11 55 40 41 42 */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* odd. We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* the transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* the transpose of the last two columns of AP lower. */
+/* This covers the case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 02 03 04 00 33 43 */
+/* 12 13 14 10 11 44 */
+/* 22 23 24 20 21 22 */
+/* 00 33 34 30 31 32 */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+/* RFP A RFP A */
+
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "T")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPFTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* If N is odd, set NISODD = .TRUE. */
+/* If N is even, set K = N/2 and NISODD = .FALSE. */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ } else {
+ nisodd = TRUE_;
+ }
+
+/* Set N1 and N2 depending on LOWER */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+/* start execution: there are eight cases */
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL and N is odd ( a(0:n-1,0:n1-1) ) */
+/* T1 -> a(0,0), T2 -> a(0,1), S -> a(n1,0) */
+/* T1 -> a(0), T2 -> a(n), S -> a(n1) */
+
+ dpotrf_("L", &n1, a, n, info);
+ if (*info > 0) {
+ return 0;
+ }
+ dtrsm_("R", "L", "T", "N", &n2, &n1, &c_b12, a, n, &a[n1], n);
+ dsyrk_("U", "N", &n2, &n1, &c_b15, &a[n1], n, &c_b12, &a[*n],
+ n);
+ dpotrf_("U", &n2, &a[*n], n, info);
+ if (*info > 0) {
+ *info += n1;
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL and N is odd ( a(0:n-1,0:n2-1) */
+/* T1 -> a(n1+1,0), T2 -> a(n1,0), S -> a(0,0) */
+/* T1 -> a(n2), T2 -> a(n1), S -> a(0) */
+
+ dpotrf_("L", &n1, &a[n2], n, info);
+ if (*info > 0) {
+ return 0;
+ }
+ dtrsm_("L", "L", "N", "N", &n1, &n2, &c_b12, &a[n2], n, a, n);
+ dsyrk_("U", "T", &n2, &n1, &c_b15, a, n, &c_b12, &a[n1], n);
+ dpotrf_("U", &n2, &a[n1], n, info);
+ if (*info > 0) {
+ *info += n1;
+ }
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'T' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is odd */
+/* T1 -> A(0,0) , T2 -> A(1,0) , S -> A(0,n1) */
+/* T1 -> a(0+0) , T2 -> a(1+0) , S -> a(0+n1*n1); lda=n1 */
+
+ dpotrf_("U", &n1, a, &n1, info);
+ if (*info > 0) {
+ return 0;
+ }
+ dtrsm_("L", "U", "T", "N", &n1, &n2, &c_b12, a, &n1, &a[n1 *
+ n1], &n1);
+ dsyrk_("L", "T", &n2, &n1, &c_b15, &a[n1 * n1], &n1, &c_b12, &
+ a[1], &n1);
+ dpotrf_("L", &n2, &a[1], &n1, info);
+ if (*info > 0) {
+ *info += n1;
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is odd */
+/* T1 -> A(0,n1+1), T2 -> A(0,n1), S -> A(0,0) */
+/* T1 -> a(n2*n2), T2 -> a(n1*n2), S -> a(0); lda = n2 */
+
+ dpotrf_("U", &n1, &a[n2 * n2], &n2, info);
+ if (*info > 0) {
+ return 0;
+ }
+ dtrsm_("R", "U", "N", "N", &n2, &n1, &c_b12, &a[n2 * n2], &n2,
+ a, &n2);
+ dsyrk_("L", "N", &n2, &n1, &c_b15, a, &n2, &c_b12, &a[n1 * n2]
+, &n2);
+ dpotrf_("L", &n2, &a[n1 * n2], &n2, info);
+ if (*info > 0) {
+ *info += n1;
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(1,0), T2 -> a(0,0), S -> a(k+1,0) */
+/* T1 -> a(1), T2 -> a(0), S -> a(k+1) */
+
+ i__1 = *n + 1;
+ dpotrf_("L", &k, &a[1], &i__1, info);
+ if (*info > 0) {
+ return 0;
+ }
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ dtrsm_("R", "L", "T", "N", &k, &k, &c_b12, &a[1], &i__1, &a[k
+ + 1], &i__2);
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ dsyrk_("U", "N", &k, &k, &c_b15, &a[k + 1], &i__1, &c_b12, a,
+ &i__2);
+ i__1 = *n + 1;
+ dpotrf_("U", &k, a, &i__1, info);
+ if (*info > 0) {
+ *info += k;
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(k+1,0) , T2 -> a(k,0), S -> a(0,0) */
+/* T1 -> a(k+1), T2 -> a(k), S -> a(0) */
+
+ i__1 = *n + 1;
+ dpotrf_("L", &k, &a[k + 1], &i__1, info);
+ if (*info > 0) {
+ return 0;
+ }
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ dtrsm_("L", "L", "N", "N", &k, &k, &c_b12, &a[k + 1], &i__1,
+ a, &i__2);
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ dsyrk_("U", "T", &k, &k, &c_b15, a, &i__1, &c_b12, &a[k], &
+ i__2);
+ i__1 = *n + 1;
+ dpotrf_("U", &k, &a[k], &i__1, info);
+ if (*info > 0) {
+ *info += k;
+ }
+
+ }
+
+ } else {
+
+/* N is even and TRANSR = 'T' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,1), T2 -> B(0,0), S -> B(0,k+1) */
+/* T1 -> a(0+k), T2 -> a(0+0), S -> a(0+k*(k+1)); lda=k */
+
+ dpotrf_("U", &k, &a[k], &k, info);
+ if (*info > 0) {
+ return 0;
+ }
+ dtrsm_("L", "U", "T", "N", &k, &k, &c_b12, &a[k], &n1, &a[k *
+ (k + 1)], &k);
+ dsyrk_("L", "T", &k, &k, &c_b15, &a[k * (k + 1)], &k, &c_b12,
+ a, &k);
+ dpotrf_("L", &k, a, &k, info);
+ if (*info > 0) {
+ *info += k;
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,k+1), T2 -> B(0,k), S -> B(0,0) */
+/* T1 -> a(0+k*(k+1)), T2 -> a(0+k*k), S -> a(0+0)); lda=k */
+
+ dpotrf_("U", &k, &a[k * (k + 1)], &k, info);
+ if (*info > 0) {
+ return 0;
+ }
+ dtrsm_("R", "U", "N", "N", &k, &k, &c_b12, &a[k * (k + 1)], &
+ k, a, &k);
+ dsyrk_("L", "N", &k, &k, &c_b15, a, &k, &c_b12, &a[k * k], &k);
+ dpotrf_("L", &k, &a[k * k], &k, info);
+ if (*info > 0) {
+ *info += k;
+ }
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of DPFTRF */
+
+} /* dpftrf_ */
diff --git a/contrib/libs/clapack/dpftri.c b/contrib/libs/clapack/dpftri.c
new file mode 100644
index 0000000000..c7331f6609
--- /dev/null
+++ b/contrib/libs/clapack/dpftri.c
@@ -0,0 +1,403 @@
+/* dpftri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b11 = 1.;
+
+/* Subroutine */ int dpftri_(char *transr, char *uplo, integer *n, doublereal
+ *a, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer k, n1, n2;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dtrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical lower;
+ extern /* Subroutine */ int dsyrk_(char *, char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *), xerbla_(char *, integer *);
+ logical nisodd;
+ extern /* Subroutine */ int dlauum_(char *, integer *, doublereal *,
+ integer *, integer *), dtftri_(char *, char *, char *,
+ integer *, doublereal *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPFTRI computes the inverse of a (real) symmetric positive definite */
+/* matrix A using the Cholesky factorization A = U**T*U or A = L*L**T */
+/* computed by DPFTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': The Normal TRANSR of RFP A is stored; */
+/* = 'T': The Transpose TRANSR of RFP A is stored. */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension ( N*(N+1)/2 ) */
+/* On entry, the symmetric matrix A in RFP format. RFP format is */
+/* described by TRANSR, UPLO, and N as follows: If TRANSR = 'N' */
+/* then RFP A is (0:N,0:k-1) when N is even; k=N/2. RFP A is */
+/* (0:N-1,0:k) when N is odd; k=N/2. IF TRANSR = 'T' then RFP is */
+/* the transpose of RFP A as defined when */
+/* TRANSR = 'N'. The contents of RFP A are defined by UPLO as */
+/* follows: If UPLO = 'U' the RFP A contains the nt elements of */
+/* upper packed A. If UPLO = 'L' the RFP A contains the elements */
+/* of lower packed A. The LDA of RFP A is (N+1)/2 when TRANSR = */
+/* 'T'. When TRANSR is 'N' the LDA is N+1 when N is even and N */
+/* is odd. See the Note below for more details. */
+
+/* On exit, the symmetric inverse of the original matrix, in the */
+/* same storage format. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the (i,i) element of the factor U or L is */
+/* zero, and the inverse could not be computed. */
+
+/* Notes */
+/* ===== */
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* even. We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* the transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* the transpose of the last three columns of AP lower. */
+/* This covers the case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 03 04 05 33 43 53 */
+/* 13 14 15 00 44 54 */
+/* 23 24 25 10 11 55 */
+/* 33 34 35 20 21 22 */
+/* 00 44 45 30 31 32 */
+/* 01 11 55 40 41 42 */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* odd. We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* the transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* the transpose of the last two columns of AP lower. */
+/* This covers the case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 02 03 04 00 33 43 */
+/* 12 13 14 10 11 44 */
+/* 22 23 24 20 21 22 */
+/* 00 33 34 30 31 32 */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+/* RFP A RFP A */
+
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "T")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPFTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Invert the triangular Cholesky factor U or L. */
+
+ dtftri_(transr, uplo, "N", n, a, info);
+ if (*info > 0) {
+ return 0;
+ }
+
+/* If N is odd, set NISODD = .TRUE. */
+/* If N is even, set K = N/2 and NISODD = .FALSE. */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ } else {
+ nisodd = TRUE_;
+ }
+
+/* Set N1 and N2 depending on LOWER */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+/* Start execution of triangular matrix multiply: inv(U)*inv(U)^C or */
+/* inv(L)^C*inv(L). There are eight cases. */
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL and N is odd ( a(0:n-1,0:N1-1) ) */
+/* T1 -> a(0,0), T2 -> a(0,1), S -> a(N1,0) */
+/* T1 -> a(0), T2 -> a(n), S -> a(N1) */
+
+ dlauum_("L", &n1, a, n, info);
+ dsyrk_("L", "T", &n1, &n2, &c_b11, &a[n1], n, &c_b11, a, n);
+ dtrmm_("L", "U", "N", "N", &n2, &n1, &c_b11, &a[*n], n, &a[n1]
+, n);
+ dlauum_("U", &n2, &a[*n], n, info);
+
+ } else {
+
+/* SRPA for UPPER, NORMAL and N is odd ( a(0:n-1,0:N2-1) */
+/* T1 -> a(N1+1,0), T2 -> a(N1,0), S -> a(0,0) */
+/* T1 -> a(N2), T2 -> a(N1), S -> a(0) */
+
+ dlauum_("L", &n1, &a[n2], n, info);
+ dsyrk_("L", "N", &n1, &n2, &c_b11, a, n, &c_b11, &a[n2], n);
+ dtrmm_("R", "U", "T", "N", &n1, &n2, &c_b11, &a[n1], n, a, n);
+ dlauum_("U", &n2, &a[n1], n, info);
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'T' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE, and N is odd */
+/* T1 -> a(0), T2 -> a(1), S -> a(0+N1*N1) */
+
+ dlauum_("U", &n1, a, &n1, info);
+ dsyrk_("U", "N", &n1, &n2, &c_b11, &a[n1 * n1], &n1, &c_b11,
+ a, &n1);
+ dtrmm_("R", "L", "N", "N", &n1, &n2, &c_b11, &a[1], &n1, &a[
+ n1 * n1], &n1);
+ dlauum_("L", &n2, &a[1], &n1, info);
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE, and N is odd */
+/* T1 -> a(0+N2*N2), T2 -> a(0+N1*N2), S -> a(0) */
+
+ dlauum_("U", &n1, &a[n2 * n2], &n2, info);
+ dsyrk_("U", "T", &n1, &n2, &c_b11, a, &n2, &c_b11, &a[n2 * n2]
+, &n2);
+ dtrmm_("L", "L", "T", "N", &n2, &n1, &c_b11, &a[n1 * n2], &n2,
+ a, &n2);
+ dlauum_("L", &n2, &a[n1 * n2], &n2, info);
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(1,0), T2 -> a(0,0), S -> a(k+1,0) */
+/* T1 -> a(1), T2 -> a(0), S -> a(k+1) */
+
+ i__1 = *n + 1;
+ dlauum_("L", &k, &a[1], &i__1, info);
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ dsyrk_("L", "T", &k, &k, &c_b11, &a[k + 1], &i__1, &c_b11, &a[
+ 1], &i__2);
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ dtrmm_("L", "U", "N", "N", &k, &k, &c_b11, a, &i__1, &a[k + 1]
+, &i__2);
+ i__1 = *n + 1;
+ dlauum_("U", &k, a, &i__1, info);
+
+ } else {
+
+/* SRPA for UPPER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(k+1,0) , T2 -> a(k,0), S -> a(0,0) */
+/* T1 -> a(k+1), T2 -> a(k), S -> a(0) */
+
+ i__1 = *n + 1;
+ dlauum_("L", &k, &a[k + 1], &i__1, info);
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ dsyrk_("L", "N", &k, &k, &c_b11, a, &i__1, &c_b11, &a[k + 1],
+ &i__2);
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ dtrmm_("R", "U", "T", "N", &k, &k, &c_b11, &a[k], &i__1, a, &
+ i__2);
+ i__1 = *n + 1;
+ dlauum_("U", &k, &a[k], &i__1, info);
+
+ }
+
+ } else {
+
+/* N is even and TRANSR = 'T' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE, and N is even (see paper) */
+/* T1 -> B(0,1), T2 -> B(0,0), S -> B(0,k+1), */
+/* T1 -> a(0+k), T2 -> a(0+0), S -> a(0+k*(k+1)); lda=k */
+
+ dlauum_("U", &k, &a[k], &k, info);
+ dsyrk_("U", "N", &k, &k, &c_b11, &a[k * (k + 1)], &k, &c_b11,
+ &a[k], &k);
+ dtrmm_("R", "L", "N", "N", &k, &k, &c_b11, a, &k, &a[k * (k +
+ 1)], &k);
+ dlauum_("L", &k, a, &k, info);
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE, and N is even (see paper) */
+/* T1 -> B(0,k+1), T2 -> B(0,k), S -> B(0,0), */
+/* T1 -> a(0+k*(k+1)), T2 -> a(0+k*k), S -> a(0+0)); lda=k */
+
+ dlauum_("U", &k, &a[k * (k + 1)], &k, info);
+ dsyrk_("U", "T", &k, &k, &c_b11, a, &k, &c_b11, &a[k * (k + 1)
+ ], &k);
+ dtrmm_("L", "L", "T", "N", &k, &k, &c_b11, &a[k * k], &k, a, &
+ k);
+ dlauum_("L", &k, &a[k * k], &k, info);
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of DPFTRI */
+
+} /* dpftri_ */
diff --git a/contrib/libs/clapack/dpftrs.c b/contrib/libs/clapack/dpftrs.c
new file mode 100644
index 0000000000..c0da3c5307
--- /dev/null
+++ b/contrib/libs/clapack/dpftrs.c
@@ -0,0 +1,240 @@
+/* dpftrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b10 = 1.;
+
+/* Subroutine */ int dpftrs_(char *transr, char *uplo, integer *n, integer *
+ nrhs, doublereal *a, doublereal *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dtfsm_(char *, char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPFTRS solves a system of linear equations A*X = B with a symmetric */
+/* positive definite matrix A using the Cholesky factorization */
+/* A = U**T*U or A = L*L**T computed by DPFTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': The Normal TRANSR of RFP A is stored; */
+/* = 'T': The Transpose TRANSR of RFP A is stored. */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': Upper triangle of RFP A is stored; */
+/* = 'L': Lower triangle of RFP A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension ( N*(N+1)/2 ). */
+/* The triangular factor U or L from the Cholesky factorization */
+/* of RFP A = U**T*U or RFP A = L*L**T, as computed by DPFTRF. */
+/* See note below for more details about RFP A. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Notes */
+/* ===== */
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* even. We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* the transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* the transpose of the last three columns of AP lower. */
+/* This covers the case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 03 04 05 33 43 53 */
+/* 13 14 15 00 44 54 */
+/* 23 24 25 10 11 55 */
+/* 33 34 35 20 21 22 */
+/* 00 44 45 30 31 32 */
+/* 01 11 55 40 41 42 */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* odd. We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* the transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* the transpose of the last two columns of AP lower. */
+/* This covers the case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 02 03 04 00 33 43 */
+/* 12 13 14 10 11 44 */
+/* 22 23 24 20 21 22 */
+/* 00 33 34 30 31 32 */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+/* RFP A RFP A */
+
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "T")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPFTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+/* start execution: there are two triangular solves */
+
+ if (lower) {
+ dtfsm_(transr, "L", uplo, "N", "N", n, nrhs, &c_b10, a, &b[b_offset],
+ ldb);
+ dtfsm_(transr, "L", uplo, "T", "N", n, nrhs, &c_b10, a, &b[b_offset],
+ ldb);
+ } else {
+ dtfsm_(transr, "L", uplo, "T", "N", n, nrhs, &c_b10, a, &b[b_offset],
+ ldb);
+ dtfsm_(transr, "L", uplo, "N", "N", n, nrhs, &c_b10, a, &b[b_offset],
+ ldb);
+ }
+
+ return 0;
+
+/* End of DPFTRS */
+
+} /* dpftrs_ */
diff --git a/contrib/libs/clapack/dpocon.c b/contrib/libs/clapack/dpocon.c
new file mode 100644
index 0000000000..3613498553
--- /dev/null
+++ b/contrib/libs/clapack/dpocon.c
@@ -0,0 +1,220 @@
+/* dpocon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dpocon_(char *uplo, integer *n, doublereal *a, integer *
+ lda, doublereal *anorm, doublereal *rcond, doublereal *work, integer *
+ iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+ doublereal d__1;
+
+ /* Local variables */
+ integer ix, kase;
+ doublereal scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int drscl_(integer *, doublereal *, doublereal *,
+ integer *);
+ logical upper;
+ extern /* Subroutine */ int dlacn2_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal scalel;
+ extern integer idamax_(integer *, doublereal *, integer *);
+ doublereal scaleu;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal ainvnm;
+ extern /* Subroutine */ int dlatrs_(char *, char *, char *, char *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *);
+ char normin[1];
+ doublereal smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call DLACN2 in place of DLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPOCON estimates the reciprocal of the condition number (in the */
+/* 1-norm) of a real symmetric positive definite matrix using the */
+/* Cholesky factorization A = U**T*U or A = L*L**T computed by DPOTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**T*U or A = L*L**T, as computed by DPOTRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* ANORM (input) DOUBLE PRECISION */
+/* The 1-norm (or infinity-norm) of the symmetric matrix A. */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*anorm < 0.) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPOCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.;
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ } else if (*anorm == 0.) {
+ return 0;
+ }
+
+ smlnum = dlamch_("Safe minimum");
+
+/* Estimate the 1-norm of inv(A). */
+
+ kase = 0;
+ *(unsigned char *)normin = 'N';
+L10:
+ dlacn2_(n, &work[*n + 1], &work[1], &iwork[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (upper) {
+
+/* Multiply by inv(U'). */
+
+ dlatrs_("Upper", "Transpose", "Non-unit", normin, n, &a[a_offset],
+ lda, &work[1], &scalel, &work[(*n << 1) + 1], info);
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by inv(U). */
+
+ dlatrs_("Upper", "No transpose", "Non-unit", normin, n, &a[
+ a_offset], lda, &work[1], &scaleu, &work[(*n << 1) + 1],
+ info);
+ } else {
+
+/* Multiply by inv(L). */
+
+ dlatrs_("Lower", "No transpose", "Non-unit", normin, n, &a[
+ a_offset], lda, &work[1], &scalel, &work[(*n << 1) + 1],
+ info);
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by inv(L'). */
+
+ dlatrs_("Lower", "Transpose", "Non-unit", normin, n, &a[a_offset],
+ lda, &work[1], &scaleu, &work[(*n << 1) + 1], info);
+ }
+
+/* Multiply by 1/SCALE if doing so will not cause overflow. */
+
+ scale = scalel * scaleu;
+ if (scale != 1.) {
+ ix = idamax_(n, &work[1], &c__1);
+ if (scale < (d__1 = work[ix], abs(d__1)) * smlnum || scale == 0.)
+ {
+ goto L20;
+ }
+ drscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / ainvnm / *anorm;
+ }
+
+L20:
+ return 0;
+
+/* End of DPOCON */
+
+} /* dpocon_ */
diff --git a/contrib/libs/clapack/dpoequ.c b/contrib/libs/clapack/dpoequ.c
new file mode 100644
index 0000000000..bde379822c
--- /dev/null
+++ b/contrib/libs/clapack/dpoequ.c
@@ -0,0 +1,174 @@
+/* dpoequ.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dpoequ_(integer *n, doublereal *a, integer *lda,
+ doublereal *s, doublereal *scond, doublereal *amax, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ doublereal smin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPOEQU computes row and column scalings intended to equilibrate a */
+/* symmetric positive definite matrix A and reduce its condition number */
+/* (with respect to the two-norm). S contains the scale factors, */
+/* S(i) = 1/sqrt(A(i,i)), chosen so that the scaled matrix B with */
+/* elements B(i,j) = S(i)*A(i,j)*S(j) has ones on the diagonal. This */
+/* choice of S puts the condition number of B within a factor N of the */
+/* smallest possible condition number over all possible diagonal */
+/* scalings. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The N-by-N symmetric positive definite matrix whose scaling */
+/* factors are to be computed. Only the diagonal elements of A */
+/* are referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* S (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, S contains the scale factors for A. */
+
+/* SCOND (output) DOUBLE PRECISION */
+/* If INFO = 0, S contains the ratio of the smallest S(i) to */
+/* the largest S(i). If SCOND >= 0.1 and AMAX is neither too */
+/* large nor too small, it is not worth scaling by S. */
+
+/* AMAX (output) DOUBLE PRECISION */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element is nonpositive. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*lda < max(1,*n)) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPOEQU", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *scond = 1.;
+ *amax = 0.;
+ return 0;
+ }
+
+/* Find the minimum and maximum diagonal elements. */
+
+ s[1] = a[a_dim1 + 1];
+ smin = s[1];
+ *amax = s[1];
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ s[i__] = a[i__ + i__ * a_dim1];
+/* Computing MIN */
+ d__1 = smin, d__2 = s[i__];
+ smin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = *amax, d__2 = s[i__];
+ *amax = max(d__1,d__2);
+/* L10: */
+ }
+
+ if (smin <= 0.) {
+
+/* Find the first non-positive diagonal element and return. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (s[i__] <= 0.) {
+ *info = i__;
+ return 0;
+ }
+/* L20: */
+ }
+ } else {
+
+/* Set the scale factors to the reciprocals */
+/* of the diagonal elements. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ s[i__] = 1. / sqrt(s[i__]);
+/* L30: */
+ }
+
+/* Compute SCOND = min(S(I)) / max(S(I)) */
+
+ *scond = sqrt(smin) / sqrt(*amax);
+ }
+ return 0;
+
+/* End of DPOEQU */
+
+} /* dpoequ_ */
diff --git a/contrib/libs/clapack/dpoequb.c b/contrib/libs/clapack/dpoequb.c
new file mode 100644
index 0000000000..a963fc38f7
--- /dev/null
+++ b/contrib/libs/clapack/dpoequb.c
@@ -0,0 +1,188 @@
+/* dpoequb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dpoequb_(integer *n, doublereal *a, integer *lda,
+ doublereal *s, doublereal *scond, doublereal *amax, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double log(doublereal), pow_di(doublereal *, integer *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ doublereal tmp, base, smin;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- Contributed by James Demmel, Deaglan Halligan, Yozo Hida and -- */
+/* -- Jason Riedy of Univ. of California Berkeley. -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley and NAG Ltd. -- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPOEQU computes row and column scalings intended to equilibrate a */
+/* symmetric positive definite matrix A and reduce its condition number */
+/* (with respect to the two-norm). S contains the scale factors, */
+/* S(i) = 1/sqrt(A(i,i)), chosen so that the scaled matrix B with */
+/* elements B(i,j) = S(i)*A(i,j)*S(j) has ones on the diagonal. This */
+/* choice of S puts the condition number of B within a factor N of the */
+/* smallest possible condition number over all possible diagonal */
+/* scalings. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The N-by-N symmetric positive definite matrix whose scaling */
+/* factors are to be computed. Only the diagonal elements of A */
+/* are referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* S (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, S contains the scale factors for A. */
+
+/* SCOND (output) DOUBLE PRECISION */
+/* If INFO = 0, S contains the ratio of the smallest S(i) to */
+/* the largest S(i). If SCOND >= 0.1 and AMAX is neither too */
+/* large nor too small, it is not worth scaling by S. */
+
+/* AMAX (output) DOUBLE PRECISION */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element is nonpositive. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+/* Positive definite only performs 1 pass of equilibration. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*lda < max(1,*n)) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPOEQUB", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ *scond = 1.;
+ *amax = 0.;
+ return 0;
+ }
+ base = dlamch_("B");
+ tmp = -.5 / log(base);
+
+/* Find the minimum and maximum diagonal elements. */
+
+ s[1] = a[a_dim1 + 1];
+ smin = s[1];
+ *amax = s[1];
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ s[i__] = a[i__ + i__ * a_dim1];
+/* Computing MIN */
+ d__1 = smin, d__2 = s[i__];
+ smin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = *amax, d__2 = s[i__];
+ *amax = max(d__1,d__2);
+/* L10: */
+ }
+
+ if (smin <= 0.) {
+
+/* Find the first non-positive diagonal element and return. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (s[i__] <= 0.) {
+ *info = i__;
+ return 0;
+ }
+/* L20: */
+ }
+ } else {
+
+/* Set the scale factors to the reciprocals */
+/* of the diagonal elements. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = (integer) (tmp * log(s[i__]));
+ s[i__] = pow_di(&base, &i__2);
+/* L30: */
+ }
+
+/* Compute SCOND = min(S(I)) / max(S(I)). */
+
+ *scond = sqrt(smin) / sqrt(*amax);
+ }
+
+ return 0;
+
+/* End of DPOEQUB */
+
+} /* dpoequb_ */
diff --git a/contrib/libs/clapack/dporfs.c b/contrib/libs/clapack/dporfs.c
new file mode 100644
index 0000000000..842f1a2df9
--- /dev/null
+++ b/contrib/libs/clapack/dporfs.c
@@ -0,0 +1,422 @@
+/* dporfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b12 = -1.;
+static doublereal c_b14 = 1.;
+
+/* Subroutine */ int dporfs_(char *uplo, integer *n, integer *nrhs,
+ doublereal *a, integer *lda, doublereal *af, integer *ldaf,
+ doublereal *b, integer *ldb, doublereal *x, integer *ldx, doublereal *
+ ferr, doublereal *berr, doublereal *work, integer *iwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2, i__3;
+ doublereal d__1, d__2, d__3;
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal s, xk;
+ integer nz;
+ doublereal eps;
+ integer kase;
+ doublereal safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), daxpy_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *);
+ integer count;
+ logical upper;
+ extern /* Subroutine */ int dsymv_(char *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *), dlacn2_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *), dpotrs_(
+ char *, integer *, integer *, doublereal *, integer *, doublereal
+ *, integer *, integer *);
+ doublereal lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call DLACN2 in place of DLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPORFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is symmetric positive definite, */
+/* and provides error bounds and backward error estimates for the */
+/* solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The symmetric matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of A contains the upper triangular part */
+/* of the matrix A, and the strictly lower triangular part of A */
+/* is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of A contains the lower triangular part of */
+/* the matrix A, and the strictly upper triangular part of A is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input) DOUBLE PRECISION array, dimension (LDAF,N) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**T*U or A = L*L**T, as computed by DPOTRF. */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) DOUBLE PRECISION array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by DPOTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldx < max(1,*n)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPORFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X */
+
+ dcopy_(n, &b[j * b_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+ dsymv_(uplo, n, &c_b12, &a[a_offset], lda, &x[j * x_dim1 + 1], &c__1,
+ &c_b14, &work[*n + 1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] = (d__1 = b[i__ + j * b_dim1], abs(d__1));
+/* L30: */
+ }
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ xk = (d__1 = x[k + j * x_dim1], abs(d__1));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ work[i__] += (d__1 = a[i__ + k * a_dim1], abs(d__1)) * xk;
+ s += (d__1 = a[i__ + k * a_dim1], abs(d__1)) * (d__2 = x[
+ i__ + j * x_dim1], abs(d__2));
+/* L40: */
+ }
+ work[k] = work[k] + (d__1 = a[k + k * a_dim1], abs(d__1)) *
+ xk + s;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ xk = (d__1 = x[k + j * x_dim1], abs(d__1));
+ work[k] += (d__1 = a[k + k * a_dim1], abs(d__1)) * xk;
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ work[i__] += (d__1 = a[i__ + k * a_dim1], abs(d__1)) * xk;
+ s += (d__1 = a[i__ + k * a_dim1], abs(d__1)) * (d__2 = x[
+ i__ + j * x_dim1], abs(d__2));
+/* L60: */
+ }
+ work[k] += s;
+/* L70: */
+ }
+ }
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+/* Computing MAX */
+ d__2 = s, d__3 = (d__1 = work[*n + i__], abs(d__1)) / work[
+ i__];
+ s = max(d__2,d__3);
+ } else {
+/* Computing MAX */
+ d__2 = s, d__3 = ((d__1 = work[*n + i__], abs(d__1)) + safe1)
+ / (work[i__] + safe1);
+ s = max(d__2,d__3);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2. <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ dpotrs_(uplo, n, &c__1, &af[af_offset], ldaf, &work[*n + 1], n,
+ info);
+ daxpy_(n, &c_b14, &work[*n + 1], &c__1, &x[j * x_dim1 + 1], &c__1)
+ ;
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use DLACN2 to estimate the infinity-norm of the matrix */
+/* inv(A) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+ work[i__] = (d__1 = work[*n + i__], abs(d__1)) + nz * eps *
+ work[i__];
+ } else {
+ work[i__] = (d__1 = work[*n + i__], abs(d__1)) + nz * eps *
+ work[i__] + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ dlacn2_(n, &work[(*n << 1) + 1], &work[*n + 1], &iwork[1], &ferr[j], &
+ kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(A'). */
+
+ dpotrs_(uplo, n, &c__1, &af[af_offset], ldaf, &work[*n + 1],
+ n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L110: */
+ }
+ } else if (kase == 2) {
+
+/* Multiply by inv(A)*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L120: */
+ }
+ dpotrs_(uplo, n, &c__1, &af[af_offset], ldaf, &work[*n + 1],
+ n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = lstres, d__3 = (d__1 = x[i__ + j * x_dim1], abs(d__1));
+ lstres = max(d__2,d__3);
+/* L130: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of DPORFS */
+
+} /* dporfs_ */
diff --git a/contrib/libs/clapack/dposv.c b/contrib/libs/clapack/dposv.c
new file mode 100644
index 0000000000..745ff731f0
--- /dev/null
+++ b/contrib/libs/clapack/dposv.c
@@ -0,0 +1,151 @@
+/* dposv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dposv_(char *uplo, integer *n, integer *nrhs, doublereal
+ *a, integer *lda, doublereal *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), dpotrf_(
+ char *, integer *, doublereal *, integer *, integer *),
+ dpotrs_(char *, integer *, integer *, doublereal *, integer *,
+ doublereal *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPOSV computes the solution to a real system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N symmetric positive definite matrix and X and B */
+/* are N-by-NRHS matrices. */
+
+/* The Cholesky decomposition is used to factor A as */
+/* A = U**T* U, if UPLO = 'U', or */
+/* A = L * L**T, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is a lower triangular */
+/* matrix. The factored form of A is then used to solve the system of */
+/* equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization A = U**T*U or A = L*L**T. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i of A is not */
+/* positive definite, so the factorization could not be */
+/* completed, and the solution has not been computed. */
+
+/* ===================================================================== */
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPOSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the Cholesky factorization A = U'*U or A = L*L'. */
+
+ dpotrf_(uplo, n, &a[a_offset], lda, info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ dpotrs_(uplo, n, nrhs, &a[a_offset], lda, &b[b_offset], ldb, info);
+
+ }
+ return 0;
+
+/* End of DPOSV */
+
+} /* dposv_ */
diff --git a/contrib/libs/clapack/dposvx.c b/contrib/libs/clapack/dposvx.c
new file mode 100644
index 0000000000..130455668c
--- /dev/null
+++ b/contrib/libs/clapack/dposvx.c
@@ -0,0 +1,450 @@
+/* dposvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dposvx_(char *fact, char *uplo, integer *n, integer *
+ nrhs, doublereal *a, integer *lda, doublereal *af, integer *ldaf,
+ char *equed, doublereal *s, doublereal *b, integer *ldb, doublereal *
+ x, integer *ldx, doublereal *rcond, doublereal *ferr, doublereal *
+ berr, doublereal *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal amax, smin, smax;
+ extern logical lsame_(char *, char *);
+ doublereal scond, anorm;
+ logical equil, rcequ;
+ extern doublereal dlamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ doublereal bignum;
+ extern /* Subroutine */ int dpocon_(char *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *,
+ integer *);
+ integer infequ;
+ extern doublereal dlansy_(char *, char *, integer *, doublereal *,
+ integer *, doublereal *);
+ extern /* Subroutine */ int dlaqsy_(char *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, char *), dpoequ_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *), dporfs_(
+ char *, integer *, integer *, doublereal *, integer *, doublereal
+ *, integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *), dpotrf_(char *, integer *, doublereal *, integer *,
+ integer *);
+ doublereal smlnum;
+ extern /* Subroutine */ int dpotrs_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPOSVX uses the Cholesky factorization A = U**T*U or A = L*L**T to */
+/* compute the solution to a real system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N symmetric positive definite matrix and X and B */
+/* are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'E', real scaling factors are computed to equilibrate */
+/* the system: */
+/* diag(S) * A * diag(S) * inv(diag(S)) * X = diag(S) * B */
+/* Whether or not the system will be equilibrated depends on the */
+/* scaling of the matrix A, but if equilibration is used, A is */
+/* overwritten by diag(S)*A*diag(S) and B by diag(S)*B. */
+
+/* 2. If FACT = 'N' or 'E', the Cholesky decomposition is used to */
+/* factor the matrix A (after equilibration if FACT = 'E') as */
+/* A = U**T* U, if UPLO = 'U', or */
+/* A = L * L**T, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is a lower triangular */
+/* matrix. */
+
+/* 3. If the leading i-by-i principal minor is not positive definite, */
+/* then the routine returns with INFO = i. Otherwise, the factored */
+/* form of A is used to estimate the condition number of the matrix */
+/* A. If the reciprocal of the condition number is less than machine */
+/* precision, INFO = N+1 is returned as a warning, but the routine */
+/* still goes on to solve for X and compute error bounds as */
+/* described below. */
+
+/* 4. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 5. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* 6. If equilibration was used, the matrix X is premultiplied by */
+/* diag(S) so that it solves the original system before */
+/* equilibration. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of the matrix A is */
+/* supplied on entry, and if not, whether the matrix A should be */
+/* equilibrated before it is factored. */
+/* = 'F': On entry, AF contains the factored form of A. */
+/* If EQUED = 'Y', the matrix A has been equilibrated */
+/* with scaling factors given by S. A and AF will not */
+/* be modified. */
+/* = 'N': The matrix A will be copied to AF and factored. */
+/* = 'E': The matrix A will be equilibrated if necessary, then */
+/* copied to AF and factored. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A, except if FACT = 'F' and */
+/* EQUED = 'Y', then A must contain the equilibrated matrix */
+/* diag(S)*A*diag(S). If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. A is not modified if */
+/* FACT = 'F' or 'N', or if FACT = 'E' and EQUED = 'N' on exit. */
+
+/* On exit, if FACT = 'E' and EQUED = 'Y', A is overwritten by */
+/* diag(S)*A*diag(S). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input or output) DOUBLE PRECISION array, dimension (LDAF,N) */
+/* If FACT = 'F', then AF is an input argument and on entry */
+/* contains the triangular factor U or L from the Cholesky */
+/* factorization A = U**T*U or A = L*L**T, in the same storage */
+/* format as A. If EQUED .ne. 'N', then AF is the factored form */
+/* of the equilibrated matrix diag(S)*A*diag(S). */
+
+/* If FACT = 'N', then AF is an output argument and on exit */
+/* returns the triangular factor U or L from the Cholesky */
+/* factorization A = U**T*U or A = L*L**T of the original */
+/* matrix A. */
+
+/* If FACT = 'E', then AF is an output argument and on exit */
+/* returns the triangular factor U or L from the Cholesky */
+/* factorization A = U**T*U or A = L*L**T of the equilibrated */
+/* matrix A (see the description of A for the form of the */
+/* equilibrated matrix). */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* EQUED (input or output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration (always true if FACT = 'N'). */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+/* EQUED is an input argument if FACT = 'F'; otherwise, it is an */
+/* output argument. */
+
+/* S (input or output) DOUBLE PRECISION array, dimension (N) */
+/* The scale factors for A; not accessed if EQUED = 'N'. S is */
+/* an input argument if FACT = 'F'; otherwise, S is an output */
+/* argument. If FACT = 'F' and EQUED = 'Y', each element of S */
+/* must be positive. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if EQUED = 'N', B is not modified; if EQUED = 'Y', */
+/* B is overwritten by diag(S) * B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) DOUBLE PRECISION array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X to */
+/* the original system of equations. Note that if EQUED = 'Y', */
+/* A and B are modified on exit, and the solution to the */
+/* equilibrated system is inv(diag(S))*X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A after equilibration (if done). If RCOND is less than the */
+/* machine precision (in particular, if RCOND = 0), the matrix */
+/* is singular to working precision. This condition is */
+/* indicated by a return code of INFO > 0. */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: the leading minor of order i of A is */
+/* not positive definite, so the factorization */
+/* could not be completed, and the solution has not */
+/* been computed. RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ --s;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ equil = lsame_(fact, "E");
+ if (nofact || equil) {
+ *(unsigned char *)equed = 'N';
+ rcequ = FALSE_;
+ } else {
+ rcequ = lsame_(equed, "Y");
+ smlnum = dlamch_("Safe minimum");
+ bignum = 1. / smlnum;
+ }
+
+/* Test the input parameters. */
+
+ if (! nofact && ! equil && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! lsame_(uplo, "U") && ! lsame_(uplo,
+ "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -8;
+ } else if (lsame_(fact, "F") && ! (rcequ || lsame_(
+ equed, "N"))) {
+ *info = -9;
+ } else {
+ if (rcequ) {
+ smin = bignum;
+ smax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ d__1 = smin, d__2 = s[j];
+ smin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = smax, d__2 = s[j];
+ smax = max(d__1,d__2);
+/* L10: */
+ }
+ if (smin <= 0.) {
+ *info = -10;
+ } else if (*n > 0) {
+ scond = max(smin,smlnum) / min(smax,bignum);
+ } else {
+ scond = 1.;
+ }
+ }
+ if (*info == 0) {
+ if (*ldb < max(1,*n)) {
+ *info = -12;
+ } else if (*ldx < max(1,*n)) {
+ *info = -14;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPOSVX", &i__1);
+ return 0;
+ }
+
+ if (equil) {
+
+/* Compute row and column scalings to equilibrate the matrix A. */
+
+ dpoequ_(n, &a[a_offset], lda, &s[1], &scond, &amax, &infequ);
+ if (infequ == 0) {
+
+/* Equilibrate the matrix. */
+
+ dlaqsy_(uplo, n, &a[a_offset], lda, &s[1], &scond, &amax, equed);
+ rcequ = lsame_(equed, "Y");
+ }
+ }
+
+/* Scale the right hand side. */
+
+ if (rcequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = s[i__] * b[i__ + j * b_dim1];
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+
+ if (nofact || equil) {
+
+/* Compute the Cholesky factorization A = U'*U or A = L*L'. */
+
+ dlacpy_(uplo, n, n, &a[a_offset], lda, &af[af_offset], ldaf);
+ dpotrf_(uplo, n, &af[af_offset], ldaf, info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = dlansy_("1", uplo, n, &a[a_offset], lda, &work[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ dpocon_(uplo, n, &af[af_offset], ldaf, &anorm, rcond, &work[1], &iwork[1],
+ info);
+
+/* Compute the solution matrix X. */
+
+ dlacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ dpotrs_(uplo, n, nrhs, &af[af_offset], ldaf, &x[x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solution and */
+/* compute error bounds and backward error estimates for it. */
+
+ dporfs_(uplo, n, nrhs, &a[a_offset], lda, &af[af_offset], ldaf, &b[
+ b_offset], ldb, &x[x_offset], ldx, &ferr[1], &berr[1], &work[1], &
+ iwork[1], info);
+
+/* Transform the solution matrix X to a solution of the original */
+/* system. */
+
+ if (rcequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[i__ + j * x_dim1] = s[i__] * x[i__ + j * x_dim1];
+/* L40: */
+ }
+/* L50: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= scond;
+/* L60: */
+ }
+ }
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < dlamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of DPOSVX */
+
+} /* dposvx_ */
diff --git a/contrib/libs/clapack/dpotf2.c b/contrib/libs/clapack/dpotf2.c
new file mode 100644
index 0000000000..fb237c3a46
--- /dev/null
+++ b/contrib/libs/clapack/dpotf2.c
@@ -0,0 +1,224 @@
+/* dpotf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b10 = -1.;
+static doublereal c_b12 = 1.;
+
+/* Subroutine */ int dpotf2_(char *uplo, integer *n, doublereal *a, integer *
+ lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer j;
+ doublereal ajj;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+ logical upper;
+ extern logical disnan_(doublereal *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPOTF2 computes the Cholesky factorization of a real symmetric */
+/* positive definite matrix A. */
+
+/* The factorization has the form */
+/* A = U' * U , if UPLO = 'U', or */
+/* A = L * L', if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular. */
+
+/* This is the unblocked version of the algorithm, calling Level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n by n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization A = U'*U or A = L*L'. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, the leading minor of order k is not */
+/* positive definite, and the factorization could not be */
+/* completed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPOTF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute U(J,J) and test for non-positive-definiteness. */
+
+ i__2 = j - 1;
+ ajj = a[j + j * a_dim1] - ddot_(&i__2, &a[j * a_dim1 + 1], &c__1,
+ &a[j * a_dim1 + 1], &c__1);
+ if (ajj <= 0. || disnan_(&ajj)) {
+ a[j + j * a_dim1] = ajj;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ a[j + j * a_dim1] = ajj;
+
+/* Compute elements J+1:N of row J. */
+
+ if (j < *n) {
+ i__2 = j - 1;
+ i__3 = *n - j;
+ dgemv_("Transpose", &i__2, &i__3, &c_b10, &a[(j + 1) * a_dim1
+ + 1], lda, &a[j * a_dim1 + 1], &c__1, &c_b12, &a[j + (
+ j + 1) * a_dim1], lda);
+ i__2 = *n - j;
+ d__1 = 1. / ajj;
+ dscal_(&i__2, &d__1, &a[j + (j + 1) * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute L(J,J) and test for non-positive-definiteness. */
+
+ i__2 = j - 1;
+ ajj = a[j + j * a_dim1] - ddot_(&i__2, &a[j + a_dim1], lda, &a[j
+ + a_dim1], lda);
+ if (ajj <= 0. || disnan_(&ajj)) {
+ a[j + j * a_dim1] = ajj;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ a[j + j * a_dim1] = ajj;
+
+/* Compute elements J+1:N of column J. */
+
+ if (j < *n) {
+ i__2 = *n - j;
+ i__3 = j - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b10, &a[j + 1 +
+ a_dim1], lda, &a[j + a_dim1], lda, &c_b12, &a[j + 1 +
+ j * a_dim1], &c__1);
+ i__2 = *n - j;
+ d__1 = 1. / ajj;
+ dscal_(&i__2, &d__1, &a[j + 1 + j * a_dim1], &c__1);
+ }
+/* L20: */
+ }
+ }
+ goto L40;
+
+L30:
+ *info = j;
+
+L40:
+ return 0;
+
+/* End of DPOTF2 */
+
+} /* dpotf2_ */
diff --git a/contrib/libs/clapack/dpotrf.c b/contrib/libs/clapack/dpotrf.c
new file mode 100644
index 0000000000..5ebdcfcb66
--- /dev/null
+++ b/contrib/libs/clapack/dpotrf.c
@@ -0,0 +1,245 @@
+/* dpotrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static doublereal c_b13 = -1.;
+static doublereal c_b14 = 1.;
+
+/* Subroutine */ int dpotrf_(char *uplo, integer *n, doublereal *a, integer *
+ lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer j, jb, nb;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical upper;
+ extern /* Subroutine */ int dsyrk_(char *, char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *), dpotf2_(char *, integer *,
+ doublereal *, integer *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPOTRF computes the Cholesky factorization of a real symmetric */
+/* positive definite matrix A. */
+
+/* The factorization has the form */
+/* A = U**T * U, if UPLO = 'U', or */
+/* A = L * L**T, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular. */
+
+/* This is the block version of the algorithm, calling Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization A = U**T*U or A = L*L**T. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i is not */
+/* positive definite, and the factorization could not be */
+/* completed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPOTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "DPOTRF", uplo, n, &c_n1, &c_n1, &c_n1);
+ if (nb <= 1 || nb >= *n) {
+
+/* Use unblocked code. */
+
+ dpotf2_(uplo, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code. */
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Update and factorize the current diagonal block and test */
+/* for non-positive-definiteness. */
+
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - j + 1;
+ jb = min(i__3,i__4);
+ i__3 = j - 1;
+ dsyrk_("Upper", "Transpose", &jb, &i__3, &c_b13, &a[j *
+ a_dim1 + 1], lda, &c_b14, &a[j + j * a_dim1], lda);
+ dpotf2_("Upper", &jb, &a[j + j * a_dim1], lda, info);
+ if (*info != 0) {
+ goto L30;
+ }
+ if (j + jb <= *n) {
+
+/* Compute the current block row. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j - 1;
+ dgemm_("Transpose", "No transpose", &jb, &i__3, &i__4, &
+ c_b13, &a[j * a_dim1 + 1], lda, &a[(j + jb) *
+ a_dim1 + 1], lda, &c_b14, &a[j + (j + jb) *
+ a_dim1], lda);
+ i__3 = *n - j - jb + 1;
+ dtrsm_("Left", "Upper", "Transpose", "Non-unit", &jb, &
+ i__3, &c_b14, &a[j + j * a_dim1], lda, &a[j + (j
+ + jb) * a_dim1], lda);
+ }
+/* L10: */
+ }
+
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (j = 1; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Update and factorize the current diagonal block and test */
+/* for non-positive-definiteness. */
+
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - j + 1;
+ jb = min(i__3,i__4);
+ i__3 = j - 1;
+ dsyrk_("Lower", "No transpose", &jb, &i__3, &c_b13, &a[j +
+ a_dim1], lda, &c_b14, &a[j + j * a_dim1], lda);
+ dpotf2_("Lower", &jb, &a[j + j * a_dim1], lda, info);
+ if (*info != 0) {
+ goto L30;
+ }
+ if (j + jb <= *n) {
+
+/* Compute the current block column. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j - 1;
+ dgemm_("No transpose", "Transpose", &i__3, &jb, &i__4, &
+ c_b13, &a[j + jb + a_dim1], lda, &a[j + a_dim1],
+ lda, &c_b14, &a[j + jb + j * a_dim1], lda);
+ i__3 = *n - j - jb + 1;
+ dtrsm_("Right", "Lower", "Transpose", "Non-unit", &i__3, &
+ jb, &c_b14, &a[j + j * a_dim1], lda, &a[j + jb +
+ j * a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+ }
+ goto L40;
+
+L30:
+ *info = *info + j - 1;
+
+L40:
+ return 0;
+
+/* End of DPOTRF */
+
+} /* dpotrf_ */
diff --git a/contrib/libs/clapack/dpotri.c b/contrib/libs/clapack/dpotri.c
new file mode 100644
index 0000000000..9141e4ccf6
--- /dev/null
+++ b/contrib/libs/clapack/dpotri.c
@@ -0,0 +1,125 @@
+/* dpotri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dpotri_(char *uplo, integer *n, doublereal *a, integer *
+ lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), dlauum_(
+ char *, integer *, doublereal *, integer *, integer *),
+ dtrtri_(char *, char *, integer *, doublereal *, integer *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPOTRI computes the inverse of a real symmetric positive definite */
+/* matrix A using the Cholesky factorization A = U**T*U or A = L*L**T */
+/* computed by DPOTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the triangular factor U or L from the Cholesky */
+/* factorization A = U**T*U or A = L*L**T, as computed by */
+/* DPOTRF. */
+/* On exit, the upper or lower triangle of the (symmetric) */
+/* inverse of A, overwriting the input factor U or L. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the (i,i) element of the factor U or L is */
+/* zero, and the inverse could not be computed. */
+
+/* ===================================================================== */
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPOTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Invert the triangular Cholesky factor U or L. */
+
+ dtrtri_(uplo, "Non-unit", n, &a[a_offset], lda, info);
+ if (*info > 0) {
+ return 0;
+ }
+
+/* Form inv(U)*inv(U)' or inv(L)'*inv(L). */
+
+ dlauum_(uplo, n, &a[a_offset], lda, info);
+
+ return 0;
+
+/* End of DPOTRI */
+
+} /* dpotri_ */
diff --git a/contrib/libs/clapack/dpotrs.c b/contrib/libs/clapack/dpotrs.c
new file mode 100644
index 0000000000..888a96f697
--- /dev/null
+++ b/contrib/libs/clapack/dpotrs.c
@@ -0,0 +1,166 @@
+/* dpotrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b9 = 1.;
+
+/* Subroutine */ int dpotrs_(char *uplo, integer *n, integer *nrhs,
+ doublereal *a, integer *lda, doublereal *b, integer *ldb, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPOTRS solves a system of linear equations A*X = B with a symmetric */
+/* positive definite matrix A using the Cholesky factorization */
+/* A = U**T*U or A = L*L**T computed by DPOTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**T*U or A = L*L**T, as computed by DPOTRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPOTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Solve A*X = B where A = U'*U. */
+
+/* Solve U'*X = B, overwriting B with X. */
+
+ dtrsm_("Left", "Upper", "Transpose", "Non-unit", n, nrhs, &c_b9, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+/* Solve U*X = B, overwriting B with X. */
+
+ dtrsm_("Left", "Upper", "No transpose", "Non-unit", n, nrhs, &c_b9, &
+ a[a_offset], lda, &b[b_offset], ldb);
+ } else {
+
+/* Solve A*X = B where A = L*L'. */
+
+/* Solve L*X = B, overwriting B with X. */
+
+ dtrsm_("Left", "Lower", "No transpose", "Non-unit", n, nrhs, &c_b9, &
+ a[a_offset], lda, &b[b_offset], ldb);
+
+/* Solve L'*X = B, overwriting B with X. */
+
+ dtrsm_("Left", "Lower", "Transpose", "Non-unit", n, nrhs, &c_b9, &a[
+ a_offset], lda, &b[b_offset], ldb);
+ }
+
+ return 0;
+
+/* End of DPOTRS */
+
+} /* dpotrs_ */
diff --git a/contrib/libs/clapack/dppcon.c b/contrib/libs/clapack/dppcon.c
new file mode 100644
index 0000000000..cfac4a172a
--- /dev/null
+++ b/contrib/libs/clapack/dppcon.c
@@ -0,0 +1,215 @@
+/* dppcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dppcon_(char *uplo, integer *n, doublereal *ap,
+ doublereal *anorm, doublereal *rcond, doublereal *work, integer *
+ iwork, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1;
+
+ /* Local variables */
+ integer ix, kase;
+ doublereal scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int drscl_(integer *, doublereal *, doublereal *,
+ integer *);
+ logical upper;
+ extern /* Subroutine */ int dlacn2_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal scalel;
+ extern integer idamax_(integer *, doublereal *, integer *);
+ doublereal scaleu;
+ extern /* Subroutine */ int xerbla_(char *, integer *), dlatps_(
+ char *, char *, char *, char *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *);
+ doublereal ainvnm;
+ char normin[1];
+ doublereal smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call DLACN2 in place of DLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPPCON estimates the reciprocal of the condition number (in the */
+/* 1-norm) of a real symmetric positive definite packed matrix using */
+/* the Cholesky factorization A = U**T*U or A = L*L**T computed by */
+/* DPPTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**T*U or A = L*L**T, packed columnwise in a linear */
+/* array. The j-th column of U or L is stored in the array AP */
+/* as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = U(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = L(i,j) for j<=i<=n. */
+
+/* ANORM (input) DOUBLE PRECISION */
+/* The 1-norm (or infinity-norm) of the symmetric matrix A. */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --iwork;
+ --work;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*anorm < 0.) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPPCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.;
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ } else if (*anorm == 0.) {
+ return 0;
+ }
+
+ smlnum = dlamch_("Safe minimum");
+
+/* Estimate the 1-norm of the inverse. */
+
+ kase = 0;
+ *(unsigned char *)normin = 'N';
+L10:
+ dlacn2_(n, &work[*n + 1], &work[1], &iwork[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (upper) {
+
+/* Multiply by inv(U'). */
+
+ dlatps_("Upper", "Transpose", "Non-unit", normin, n, &ap[1], &
+ work[1], &scalel, &work[(*n << 1) + 1], info);
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by inv(U). */
+
+ dlatps_("Upper", "No transpose", "Non-unit", normin, n, &ap[1], &
+ work[1], &scaleu, &work[(*n << 1) + 1], info);
+ } else {
+
+/* Multiply by inv(L). */
+
+ dlatps_("Lower", "No transpose", "Non-unit", normin, n, &ap[1], &
+ work[1], &scalel, &work[(*n << 1) + 1], info);
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by inv(L'). */
+
+ dlatps_("Lower", "Transpose", "Non-unit", normin, n, &ap[1], &
+ work[1], &scaleu, &work[(*n << 1) + 1], info);
+ }
+
+/* Multiply by 1/SCALE if doing so will not cause overflow. */
+
+ scale = scalel * scaleu;
+ if (scale != 1.) {
+ ix = idamax_(n, &work[1], &c__1);
+ if (scale < (d__1 = work[ix], abs(d__1)) * smlnum || scale == 0.)
+ {
+ goto L20;
+ }
+ drscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / ainvnm / *anorm;
+ }
+
+L20:
+ return 0;
+
+/* End of DPPCON */
+
+} /* dppcon_ */
diff --git a/contrib/libs/clapack/dppequ.c b/contrib/libs/clapack/dppequ.c
new file mode 100644
index 0000000000..d9eda2dcd6
--- /dev/null
+++ b/contrib/libs/clapack/dppequ.c
@@ -0,0 +1,208 @@
+/* dppequ.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dppequ_(char *uplo, integer *n, doublereal *ap,
+ doublereal *s, doublereal *scond, doublereal *amax, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, jj;
+ doublereal smin;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPPEQU computes row and column scalings intended to equilibrate a */
+/* symmetric positive definite matrix A in packed storage and reduce */
+/* its condition number (with respect to the two-norm). S contains the */
+/* scale factors, S(i)=1/sqrt(A(i,i)), chosen so that the scaled matrix */
+/* B with elements B(i,j)=S(i)*A(i,j)*S(j) has ones on the diagonal. */
+/* This choice of S puts the condition number of B within a factor N of */
+/* the smallest possible condition number over all possible diagonal */
+/* scalings. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the symmetric matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* S (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, S contains the scale factors for A. */
+
+/* SCOND (output) DOUBLE PRECISION */
+/* If INFO = 0, S contains the ratio of the smallest S(i) to */
+/* the largest S(i). If SCOND >= 0.1 and AMAX is neither too */
+/* large nor too small, it is not worth scaling by S. */
+
+/* AMAX (output) DOUBLE PRECISION */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element is nonpositive. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --s;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPPEQU", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *scond = 1.;
+ *amax = 0.;
+ return 0;
+ }
+
+/* Initialize SMIN and AMAX. */
+
+ s[1] = ap[1];
+ smin = s[1];
+ *amax = s[1];
+
+ if (upper) {
+
+/* UPLO = 'U': Upper triangle of A is stored. */
+/* Find the minimum and maximum diagonal elements. */
+
+ jj = 1;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ jj += i__;
+ s[i__] = ap[jj];
+/* Computing MIN */
+ d__1 = smin, d__2 = s[i__];
+ smin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = *amax, d__2 = s[i__];
+ *amax = max(d__1,d__2);
+/* L10: */
+ }
+
+ } else {
+
+/* UPLO = 'L': Lower triangle of A is stored. */
+/* Find the minimum and maximum diagonal elements. */
+
+ jj = 1;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ jj = jj + *n - i__ + 2;
+ s[i__] = ap[jj];
+/* Computing MIN */
+ d__1 = smin, d__2 = s[i__];
+ smin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = *amax, d__2 = s[i__];
+ *amax = max(d__1,d__2);
+/* L20: */
+ }
+ }
+
+ if (smin <= 0.) {
+
+/* Find the first non-positive diagonal element and return. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (s[i__] <= 0.) {
+ *info = i__;
+ return 0;
+ }
+/* L30: */
+ }
+ } else {
+
+/* Set the scale factors to the reciprocals */
+/* of the diagonal elements. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ s[i__] = 1. / sqrt(s[i__]);
+/* L40: */
+ }
+
+/* Compute SCOND = min(S(I)) / max(S(I)) */
+
+ *scond = sqrt(smin) / sqrt(*amax);
+ }
+ return 0;
+
+/* End of DPPEQU */
+
+} /* dppequ_ */
diff --git a/contrib/libs/clapack/dpprfs.c b/contrib/libs/clapack/dpprfs.c
new file mode 100644
index 0000000000..2eef815bd9
--- /dev/null
+++ b/contrib/libs/clapack/dpprfs.c
@@ -0,0 +1,413 @@
+/* dpprfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b12 = -1.;
+static doublereal c_b14 = 1.;
+
+/* Subroutine */ int dpprfs_(char *uplo, integer *n, integer *nrhs,
+ doublereal *ap, doublereal *afp, doublereal *b, integer *ldb,
+ doublereal *x, integer *ldx, doublereal *ferr, doublereal *berr,
+ doublereal *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3;
+ doublereal d__1, d__2, d__3;
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal s;
+ integer ik, kk;
+ doublereal xk;
+ integer nz;
+ doublereal eps;
+ integer kase;
+ doublereal safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), daxpy_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *);
+ integer count;
+ extern /* Subroutine */ int dspmv_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *);
+ logical upper;
+ extern /* Subroutine */ int dlacn2_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal lstres;
+ extern /* Subroutine */ int dpptrs_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call DLACN2 in place of DLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPPRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is symmetric positive definite */
+/* and packed, and provides error bounds and backward error estimates */
+/* for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AP (input) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the symmetric matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* AFP (input) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**T*U or A = L*L**T, as computed by DPPTRF/ZPPTRF, */
+/* packed columnwise in a linear array in the same format as A */
+/* (see AP). */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) DOUBLE PRECISION array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by DPPTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --afp;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ } else if (*ldx < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPPRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X */
+
+ dcopy_(n, &b[j * b_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+ dspmv_(uplo, n, &c_b12, &ap[1], &x[j * x_dim1 + 1], &c__1, &c_b14, &
+ work[*n + 1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] = (d__1 = b[i__ + j * b_dim1], abs(d__1));
+/* L30: */
+ }
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ kk = 1;
+ if (upper) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ xk = (d__1 = x[k + j * x_dim1], abs(d__1));
+ ik = kk;
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ work[i__] += (d__1 = ap[ik], abs(d__1)) * xk;
+ s += (d__1 = ap[ik], abs(d__1)) * (d__2 = x[i__ + j *
+ x_dim1], abs(d__2));
+ ++ik;
+/* L40: */
+ }
+ work[k] = work[k] + (d__1 = ap[kk + k - 1], abs(d__1)) * xk +
+ s;
+ kk += k;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ xk = (d__1 = x[k + j * x_dim1], abs(d__1));
+ work[k] += (d__1 = ap[kk], abs(d__1)) * xk;
+ ik = kk + 1;
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ work[i__] += (d__1 = ap[ik], abs(d__1)) * xk;
+ s += (d__1 = ap[ik], abs(d__1)) * (d__2 = x[i__ + j *
+ x_dim1], abs(d__2));
+ ++ik;
+/* L60: */
+ }
+ work[k] += s;
+ kk += *n - k + 1;
+/* L70: */
+ }
+ }
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+/* Computing MAX */
+ d__2 = s, d__3 = (d__1 = work[*n + i__], abs(d__1)) / work[
+ i__];
+ s = max(d__2,d__3);
+ } else {
+/* Computing MAX */
+ d__2 = s, d__3 = ((d__1 = work[*n + i__], abs(d__1)) + safe1)
+ / (work[i__] + safe1);
+ s = max(d__2,d__3);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2. <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ dpptrs_(uplo, n, &c__1, &afp[1], &work[*n + 1], n, info);
+ daxpy_(n, &c_b14, &work[*n + 1], &c__1, &x[j * x_dim1 + 1], &c__1)
+ ;
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use DLACN2 to estimate the infinity-norm of the matrix */
+/* inv(A) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+ work[i__] = (d__1 = work[*n + i__], abs(d__1)) + nz * eps *
+ work[i__];
+ } else {
+ work[i__] = (d__1 = work[*n + i__], abs(d__1)) + nz * eps *
+ work[i__] + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ dlacn2_(n, &work[(*n << 1) + 1], &work[*n + 1], &iwork[1], &ferr[j], &
+ kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(A'). */
+
+ dpptrs_(uplo, n, &c__1, &afp[1], &work[*n + 1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L110: */
+ }
+ } else if (kase == 2) {
+
+/* Multiply by inv(A)*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L120: */
+ }
+ dpptrs_(uplo, n, &c__1, &afp[1], &work[*n + 1], n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = lstres, d__3 = (d__1 = x[i__ + j * x_dim1], abs(d__1));
+ lstres = max(d__2,d__3);
+/* L130: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of DPPRFS */
+
+} /* dpprfs_ */
diff --git a/contrib/libs/clapack/dppsv.c b/contrib/libs/clapack/dppsv.c
new file mode 100644
index 0000000000..924d4294da
--- /dev/null
+++ b/contrib/libs/clapack/dppsv.c
@@ -0,0 +1,161 @@
+/* dppsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dppsv_(char *uplo, integer *n, integer *nrhs, doublereal
+ *ap, doublereal *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), dpptrf_(
+ char *, integer *, doublereal *, integer *), dpptrs_(char
+ *, integer *, integer *, doublereal *, doublereal *, integer *,
+ integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPPSV computes the solution to a real system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N symmetric positive definite matrix stored in */
+/* packed format and X and B are N-by-NRHS matrices. */
+
+/* The Cholesky decomposition is used to factor A as */
+/* A = U**T* U, if UPLO = 'U', or */
+/* A = L * L**T, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is a lower triangular */
+/* matrix. The factored form of A is then used to solve the system of */
+/* equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AP (input/output) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization A = U**T*U or A = L*L**T, in the same storage */
+/* format as A. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i of A is not */
+/* positive definite, so the factorization could not be */
+/* completed, and the solution has not been computed. */
+
+/* Further Details */
+/* =============== */
+
+/* The packed storage scheme is illustrated by the following example */
+/* when N = 4, UPLO = 'U': */
+
+/* Two-dimensional storage of the symmetric matrix A: */
+
+/* a11 a12 a13 a14 */
+/* a22 a23 a24 */
+/* a33 a34 (aij = conjg(aji)) */
+/* a44 */
+
+/* Packed storage of the upper triangle of A: */
+
+/* AP = [ a11, a12, a22, a13, a23, a33, a14, a24, a34, a44 ] */
+
+/* ===================================================================== */
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPPSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the Cholesky factorization A = U'*U or A = L*L'. */
+
+ dpptrf_(uplo, n, &ap[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ dpptrs_(uplo, n, nrhs, &ap[1], &b[b_offset], ldb, info);
+
+ }
+ return 0;
+
+/* End of DPPSV */
+
+} /* dppsv_ */
diff --git a/contrib/libs/clapack/dppsvx.c b/contrib/libs/clapack/dppsvx.c
new file mode 100644
index 0000000000..d0db68721a
--- /dev/null
+++ b/contrib/libs/clapack/dppsvx.c
@@ -0,0 +1,455 @@
+/* dppsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dppsvx_(char *fact, char *uplo, integer *n, integer *
+ nrhs, doublereal *ap, doublereal *afp, char *equed, doublereal *s,
+ doublereal *b, integer *ldb, doublereal *x, integer *ldx, doublereal *
+ rcond, doublereal *ferr, doublereal *berr, doublereal *work, integer *
+ iwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal amax, smin, smax;
+ extern logical lsame_(char *, char *);
+ doublereal scond, anorm;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical equil, rcequ;
+ extern doublereal dlamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ doublereal bignum;
+ extern doublereal dlansp_(char *, char *, integer *, doublereal *,
+ doublereal *);
+ extern /* Subroutine */ int dppcon_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *), dlaqsp_(char *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, char *);
+ integer infequ;
+ extern /* Subroutine */ int dppequ_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *),
+ dpprfs_(char *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, integer *),
+ dpptrf_(char *, integer *, doublereal *, integer *);
+ doublereal smlnum;
+ extern /* Subroutine */ int dpptrs_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPPSVX uses the Cholesky factorization A = U**T*U or A = L*L**T to */
+/* compute the solution to a real system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N symmetric positive definite matrix stored in */
+/* packed format and X and B are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'E', real scaling factors are computed to equilibrate */
+/* the system: */
+/* diag(S) * A * diag(S) * inv(diag(S)) * X = diag(S) * B */
+/* Whether or not the system will be equilibrated depends on the */
+/* scaling of the matrix A, but if equilibration is used, A is */
+/* overwritten by diag(S)*A*diag(S) and B by diag(S)*B. */
+
+/* 2. If FACT = 'N' or 'E', the Cholesky decomposition is used to */
+/* factor the matrix A (after equilibration if FACT = 'E') as */
+/* A = U**T* U, if UPLO = 'U', or */
+/* A = L * L**T, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is a lower triangular */
+/* matrix. */
+
+/* 3. If the leading i-by-i principal minor is not positive definite, */
+/* then the routine returns with INFO = i. Otherwise, the factored */
+/* form of A is used to estimate the condition number of the matrix */
+/* A. If the reciprocal of the condition number is less than machine */
+/* precision, INFO = N+1 is returned as a warning, but the routine */
+/* still goes on to solve for X and compute error bounds as */
+/* described below. */
+
+/* 4. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 5. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* 6. If equilibration was used, the matrix X is premultiplied by */
+/* diag(S) so that it solves the original system before */
+/* equilibration. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of the matrix A is */
+/* supplied on entry, and if not, whether the matrix A should be */
+/* equilibrated before it is factored. */
+/* = 'F': On entry, AFP contains the factored form of A. */
+/* If EQUED = 'Y', the matrix A has been equilibrated */
+/* with scaling factors given by S. AP and AFP will not */
+/* be modified. */
+/* = 'N': The matrix A will be copied to AFP and factored. */
+/* = 'E': The matrix A will be equilibrated if necessary, then */
+/* copied to AFP and factored. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AP (input/output) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array, except if FACT = 'F' */
+/* and EQUED = 'Y', then A must contain the equilibrated matrix */
+/* diag(S)*A*diag(S). The j-th column of A is stored in the */
+/* array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. A is not modified if */
+/* FACT = 'F' or 'N', or if FACT = 'E' and EQUED = 'N' on exit. */
+
+/* On exit, if FACT = 'E' and EQUED = 'Y', A is overwritten by */
+/* diag(S)*A*diag(S). */
+
+/* AFP (input or output) DOUBLE PRECISION array, dimension */
+/* (N*(N+1)/2) */
+/* If FACT = 'F', then AFP is an input argument and on entry */
+/* contains the triangular factor U or L from the Cholesky */
+/* factorization A = U'*U or A = L*L', in the same storage */
+/* format as A. If EQUED .ne. 'N', then AFP is the factored */
+/* form of the equilibrated matrix A. */
+
+/* If FACT = 'N', then AFP is an output argument and on exit */
+/* returns the triangular factor U or L from the Cholesky */
+/* factorization A = U'*U or A = L*L' of the original matrix A. */
+
+/* If FACT = 'E', then AFP is an output argument and on exit */
+/* returns the triangular factor U or L from the Cholesky */
+/* factorization A = U'*U or A = L*L' of the equilibrated */
+/* matrix A (see the description of AP for the form of the */
+/* equilibrated matrix). */
+
+/* EQUED (input or output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration (always true if FACT = 'N'). */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+/* EQUED is an input argument if FACT = 'F'; otherwise, it is an */
+/* output argument. */
+
+/* S (input or output) DOUBLE PRECISION array, dimension (N) */
+/* The scale factors for A; not accessed if EQUED = 'N'. S is */
+/* an input argument if FACT = 'F'; otherwise, S is an output */
+/* argument. If FACT = 'F' and EQUED = 'Y', each element of S */
+/* must be positive. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if EQUED = 'N', B is not modified; if EQUED = 'Y', */
+/* B is overwritten by diag(S) * B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) DOUBLE PRECISION array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X to */
+/* the original system of equations. Note that if EQUED = 'Y', */
+/* A and B are modified on exit, and the solution to the */
+/* equilibrated system is inv(diag(S))*X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A after equilibration (if done). If RCOND is less than the */
+/* machine precision (in particular, if RCOND = 0), the matrix */
+/* is singular to working precision. This condition is */
+/* indicated by a return code of INFO > 0. */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: the leading minor of order i of A is */
+/* not positive definite, so the factorization */
+/* could not be completed, and the solution has not */
+/* been computed. RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* Further Details */
+/* =============== */
+
+/* The packed storage scheme is illustrated by the following example */
+/* when N = 4, UPLO = 'U': */
+
+/* Two-dimensional storage of the symmetric matrix A: */
+
+/* a11 a12 a13 a14 */
+/* a22 a23 a24 */
+/* a33 a34 (aij = conjg(aji)) */
+/* a44 */
+
+/* Packed storage of the upper triangle of A: */
+
+/* AP = [ a11, a12, a22, a13, a23, a33, a14, a24, a34, a44 ] */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --ap;
+ --afp;
+ --s;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ equil = lsame_(fact, "E");
+ if (nofact || equil) {
+ *(unsigned char *)equed = 'N';
+ rcequ = FALSE_;
+ } else {
+ rcequ = lsame_(equed, "Y");
+ smlnum = dlamch_("Safe minimum");
+ bignum = 1. / smlnum;
+ }
+
+/* Test the input parameters. */
+
+ if (! nofact && ! equil && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! lsame_(uplo, "U") && ! lsame_(uplo,
+ "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (lsame_(fact, "F") && ! (rcequ || lsame_(
+ equed, "N"))) {
+ *info = -7;
+ } else {
+ if (rcequ) {
+ smin = bignum;
+ smax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ d__1 = smin, d__2 = s[j];
+ smin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = smax, d__2 = s[j];
+ smax = max(d__1,d__2);
+/* L10: */
+ }
+ if (smin <= 0.) {
+ *info = -8;
+ } else if (*n > 0) {
+ scond = max(smin,smlnum) / min(smax,bignum);
+ } else {
+ scond = 1.;
+ }
+ }
+ if (*info == 0) {
+ if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldx < max(1,*n)) {
+ *info = -12;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPPSVX", &i__1);
+ return 0;
+ }
+
+ if (equil) {
+
+/* Compute row and column scalings to equilibrate the matrix A. */
+
+ dppequ_(uplo, n, &ap[1], &s[1], &scond, &amax, &infequ);
+ if (infequ == 0) {
+
+/* Equilibrate the matrix. */
+
+ dlaqsp_(uplo, n, &ap[1], &s[1], &scond, &amax, equed);
+ rcequ = lsame_(equed, "Y");
+ }
+ }
+
+/* Scale the right-hand side. */
+
+ if (rcequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = s[i__] * b[i__ + j * b_dim1];
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+
+ if (nofact || equil) {
+
+/* Compute the Cholesky factorization A = U'*U or A = L*L'. */
+
+ i__1 = *n * (*n + 1) / 2;
+ dcopy_(&i__1, &ap[1], &c__1, &afp[1], &c__1);
+ dpptrf_(uplo, n, &afp[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = dlansp_("I", uplo, n, &ap[1], &work[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ dppcon_(uplo, n, &afp[1], &anorm, rcond, &work[1], &iwork[1], info);
+
+/* Compute the solution matrix X. */
+
+ dlacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ dpptrs_(uplo, n, nrhs, &afp[1], &x[x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solution and */
+/* compute error bounds and backward error estimates for it. */
+
+ dpprfs_(uplo, n, nrhs, &ap[1], &afp[1], &b[b_offset], ldb, &x[x_offset],
+ ldx, &ferr[1], &berr[1], &work[1], &iwork[1], info);
+
+/* Transform the solution matrix X to a solution of the original */
+/* system. */
+
+ if (rcequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[i__ + j * x_dim1] = s[i__] * x[i__ + j * x_dim1];
+/* L40: */
+ }
+/* L50: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= scond;
+/* L60: */
+ }
+ }
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < dlamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of DPPSVX */
+
+} /* dppsvx_ */
diff --git a/contrib/libs/clapack/dpptrf.c b/contrib/libs/clapack/dpptrf.c
new file mode 100644
index 0000000000..dff1a79c62
--- /dev/null
+++ b/contrib/libs/clapack/dpptrf.c
@@ -0,0 +1,223 @@
+/* dpptrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b16 = -1.;
+
+/* Subroutine */ int dpptrf_(char *uplo, integer *n, doublereal *ap, integer *
+ info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer j, jc, jj;
+ doublereal ajj;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ extern /* Subroutine */ int dspr_(char *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *), dscal_(integer *,
+ doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int dtpsv_(char *, char *, char *, integer *,
+ doublereal *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPPTRF computes the Cholesky factorization of a real symmetric */
+/* positive definite matrix A stored in packed format. */
+
+/* The factorization has the form */
+/* A = U**T * U, if UPLO = 'U', or */
+/* A = L * L**T, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. */
+
+/* On exit, if INFO = 0, the triangular factor U or L from the */
+/* Cholesky factorization A = U**T*U or A = L*L**T, in the same */
+/* storage format as A. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i is not */
+/* positive definite, and the factorization could not be */
+/* completed. */
+
+/* Further Details */
+/* ======= ======= */
+
+/* The packed storage scheme is illustrated by the following example */
+/* when N = 4, UPLO = 'U': */
+
+/* Two-dimensional storage of the symmetric matrix A: */
+
+/* a11 a12 a13 a14 */
+/* a22 a23 a24 */
+/* a33 a34 (aij = aji) */
+/* a44 */
+
+/* Packed storage of the upper triangle of A: */
+
+/* AP = [ a11, a12, a22, a13, a23, a33, a14, a24, a34, a44 ] */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPPTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ jj = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jc = jj + 1;
+ jj += j;
+
+/* Compute elements 1:J-1 of column J. */
+
+ if (j > 1) {
+ i__2 = j - 1;
+ dtpsv_("Upper", "Transpose", "Non-unit", &i__2, &ap[1], &ap[
+ jc], &c__1);
+ }
+
+/* Compute U(J,J) and test for non-positive-definiteness. */
+
+ i__2 = j - 1;
+ ajj = ap[jj] - ddot_(&i__2, &ap[jc], &c__1, &ap[jc], &c__1);
+ if (ajj <= 0.) {
+ ap[jj] = ajj;
+ goto L30;
+ }
+ ap[jj] = sqrt(ajj);
+/* L10: */
+ }
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ jj = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute L(J,J) and test for non-positive-definiteness. */
+
+ ajj = ap[jj];
+ if (ajj <= 0.) {
+ ap[jj] = ajj;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ ap[jj] = ajj;
+
+/* Compute elements J+1:N of column J and update the trailing */
+/* submatrix. */
+
+ if (j < *n) {
+ i__2 = *n - j;
+ d__1 = 1. / ajj;
+ dscal_(&i__2, &d__1, &ap[jj + 1], &c__1);
+ i__2 = *n - j;
+ dspr_("Lower", &i__2, &c_b16, &ap[jj + 1], &c__1, &ap[jj + *n
+ - j + 1]);
+ jj = jj + *n - j + 1;
+ }
+/* L20: */
+ }
+ }
+ goto L40;
+
+L30:
+ *info = j;
+
+L40:
+ return 0;
+
+/* End of DPPTRF */
+
+} /* dpptrf_ */
diff --git a/contrib/libs/clapack/dpptri.c b/contrib/libs/clapack/dpptri.c
new file mode 100644
index 0000000000..5de72f0c4c
--- /dev/null
+++ b/contrib/libs/clapack/dpptri.c
@@ -0,0 +1,173 @@
+/* dpptri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b8 = 1.;
+static integer c__1 = 1;
+
+/* Subroutine */ int dpptri_(char *uplo, integer *n, doublereal *ap, integer *
+ info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer j, jc, jj;
+ doublereal ajj;
+ integer jjn;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ extern /* Subroutine */ int dspr_(char *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *), dscal_(integer *,
+ doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dtpmv_(char *, char *, char *, integer *,
+ doublereal *, doublereal *, integer *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *), dtptri_(
+ char *, char *, integer *, doublereal *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPPTRI computes the inverse of a real symmetric positive definite */
+/* matrix A using the Cholesky factorization A = U**T*U or A = L*L**T */
+/* computed by DPPTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangular factor is stored in AP; */
+/* = 'L': Lower triangular factor is stored in AP. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* On entry, the triangular factor U or L from the Cholesky */
+/* factorization A = U**T*U or A = L*L**T, packed columnwise as */
+/* a linear array. The j-th column of U or L is stored in the */
+/* array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = U(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = L(i,j) for j<=i<=n. */
+
+/* On exit, the upper or lower triangle of the (symmetric) */
+/* inverse of A, overwriting the input factor U or L. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the (i,i) element of the factor U or L is */
+/* zero, and the inverse could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPPTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Invert the triangular Cholesky factor U or L. */
+
+ dtptri_(uplo, "Non-unit", n, &ap[1], info);
+ if (*info > 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute the product inv(U) * inv(U)'. */
+
+ jj = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jc = jj + 1;
+ jj += j;
+ if (j > 1) {
+ i__2 = j - 1;
+ dspr_("Upper", &i__2, &c_b8, &ap[jc], &c__1, &ap[1]);
+ }
+ ajj = ap[jj];
+ dscal_(&j, &ajj, &ap[jc], &c__1);
+/* L10: */
+ }
+
+ } else {
+
+/* Compute the product inv(L)' * inv(L). */
+
+ jj = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jjn = jj + *n - j + 1;
+ i__2 = *n - j + 1;
+ ap[jj] = ddot_(&i__2, &ap[jj], &c__1, &ap[jj], &c__1);
+ if (j < *n) {
+ i__2 = *n - j;
+ dtpmv_("Lower", "Transpose", "Non-unit", &i__2, &ap[jjn], &ap[
+ jj + 1], &c__1);
+ }
+ jj = jjn;
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of DPPTRI */
+
+} /* dpptri_ */
diff --git a/contrib/libs/clapack/dpptrs.c b/contrib/libs/clapack/dpptrs.c
new file mode 100644
index 0000000000..888bfef6e6
--- /dev/null
+++ b/contrib/libs/clapack/dpptrs.c
@@ -0,0 +1,170 @@
+/* dpptrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dpptrs_(char *uplo, integer *n, integer *nrhs,
+ doublereal *ap, doublereal *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ integer i__;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int dtpsv_(char *, char *, char *, integer *,
+ doublereal *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPPTRS solves a system of linear equations A*X = B with a symmetric */
+/* positive definite matrix A in packed storage using the Cholesky */
+/* factorization A = U**T*U or A = L*L**T computed by DPPTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AP (input) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**T*U or A = L*L**T, packed columnwise in a linear */
+/* array. The j-th column of U or L is stored in the array AP */
+/* as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = U(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = L(i,j) for j<=i<=n. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPPTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Solve A*X = B where A = U'*U. */
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Solve U'*X = B, overwriting B with X. */
+
+ dtpsv_("Upper", "Transpose", "Non-unit", n, &ap[1], &b[i__ *
+ b_dim1 + 1], &c__1);
+
+/* Solve U*X = B, overwriting B with X. */
+
+ dtpsv_("Upper", "No transpose", "Non-unit", n, &ap[1], &b[i__ *
+ b_dim1 + 1], &c__1);
+/* L10: */
+ }
+ } else {
+
+/* Solve A*X = B where A = L*L'. */
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Solve L*Y = B, overwriting B with X. */
+
+ dtpsv_("Lower", "No transpose", "Non-unit", n, &ap[1], &b[i__ *
+ b_dim1 + 1], &c__1);
+
+/* Solve L'*X = Y, overwriting B with X. */
+
+ dtpsv_("Lower", "Transpose", "Non-unit", n, &ap[1], &b[i__ *
+ b_dim1 + 1], &c__1);
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of DPPTRS */
+
+} /* dpptrs_ */
diff --git a/contrib/libs/clapack/dpstf2.c b/contrib/libs/clapack/dpstf2.c
new file mode 100644
index 0000000000..a66b6147f1
--- /dev/null
+++ b/contrib/libs/clapack/dpstf2.c
@@ -0,0 +1,395 @@
+/* dpstf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b16 = -1.;
+static doublereal c_b18 = 1.;
+
+/* Subroutine */ int dpstf2_(char *uplo, integer *n, doublereal *a, integer *
+ lda, integer *piv, integer *rank, doublereal *tol, doublereal *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, maxlocval;
+ doublereal ajj;
+ integer pvt;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+ doublereal dtemp;
+ integer itemp;
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ doublereal dstop;
+ logical upper;
+ extern doublereal dlamch_(char *);
+ extern logical disnan_(doublereal *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer dmaxloc_(doublereal *, integer *);
+
+
+/* -- LAPACK PROTOTYPE routine (version 3.2) -- */
+/* Craig Lucas, University of Manchester / NAG Ltd. */
+/* October, 2008 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPSTF2 computes the Cholesky factorization with complete */
+/* pivoting of a real symmetric positive semidefinite matrix A. */
+
+/* The factorization has the form */
+/* P' * A * P = U' * U , if UPLO = 'U', */
+/* P' * A * P = L * L', if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular, and */
+/* P is stored as vector PIV. */
+
+/* This algorithm does not attempt to check that A is positive */
+/* semidefinite. This version of the algorithm calls level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n by n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization as above. */
+
+/* PIV (output) INTEGER array, dimension (N) */
+/* PIV is such that the nonzero entries are P( PIV(K), K ) = 1. */
+
+/* RANK (output) INTEGER */
+/* The rank of A given by the number of steps the algorithm */
+/* completed. */
+
+/* TOL (input) DOUBLE PRECISION */
+/* User defined tolerance. If TOL < 0, then N*U*MAX( A( K,K ) ) */
+/* will be used. The algorithm terminates at the (K-1)st step */
+/* if the pivot <= TOL. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* WORK DOUBLE PRECISION array, dimension (2*N) */
+/* Work space. */
+
+/* INFO (output) INTEGER */
+/* < 0: If INFO = -K, the K-th argument had an illegal value, */
+/* = 0: algorithm completed successfully, and */
+/* > 0: the matrix A is either rank deficient with computed rank */
+/* as returned in RANK, or is indefinite. See Section 7 of */
+/* LAPACK Working Note #161 for further information. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ --work;
+ --piv;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPSTF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Initialize PIV */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ piv[i__] = i__;
+/* L100: */
+ }
+
+/* Compute stopping value */
+
+ pvt = 1;
+ ajj = a[pvt + pvt * a_dim1];
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if (a[i__ + i__ * a_dim1] > ajj) {
+ pvt = i__;
+ ajj = a[pvt + pvt * a_dim1];
+ }
+ }
+ if (ajj == 0. || disnan_(&ajj)) {
+ *rank = 0;
+ *info = 1;
+ goto L170;
+ }
+
+/* Compute stopping value if not supplied */
+
+ if (*tol < 0.) {
+ dstop = *n * dlamch_("Epsilon") * ajj;
+ } else {
+ dstop = *tol;
+ }
+
+/* Set first half of WORK to zero, holds dot products */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L110: */
+ }
+
+ if (upper) {
+
+/* Compute the Cholesky factorization P' * A * P = U' * U */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Find pivot, test for exit, else swap rows and columns */
+/* Update dot products, compute possible pivots which are */
+/* stored in the second half of WORK */
+
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+
+ if (j > 1) {
+/* Computing 2nd power */
+ d__1 = a[j - 1 + i__ * a_dim1];
+ work[i__] += d__1 * d__1;
+ }
+ work[*n + i__] = a[i__ + i__ * a_dim1] - work[i__];
+
+/* L120: */
+ }
+
+ if (j > 1) {
+ maxlocval = (*n << 1) - (*n + j) + 1;
+ itemp = dmaxloc_(&work[*n + j], &maxlocval);
+ pvt = itemp + j - 1;
+ ajj = work[*n + pvt];
+ if (ajj <= dstop || disnan_(&ajj)) {
+ a[j + j * a_dim1] = ajj;
+ goto L160;
+ }
+ }
+
+ if (j != pvt) {
+
+/* Pivot OK, so can now swap pivot rows and columns */
+
+ a[pvt + pvt * a_dim1] = a[j + j * a_dim1];
+ i__2 = j - 1;
+ dswap_(&i__2, &a[j * a_dim1 + 1], &c__1, &a[pvt * a_dim1 + 1],
+ &c__1);
+ if (pvt < *n) {
+ i__2 = *n - pvt;
+ dswap_(&i__2, &a[j + (pvt + 1) * a_dim1], lda, &a[pvt + (
+ pvt + 1) * a_dim1], lda);
+ }
+ i__2 = pvt - j - 1;
+ dswap_(&i__2, &a[j + (j + 1) * a_dim1], lda, &a[j + 1 + pvt *
+ a_dim1], &c__1);
+
+/* Swap dot products and PIV */
+
+ dtemp = work[j];
+ work[j] = work[pvt];
+ work[pvt] = dtemp;
+ itemp = piv[pvt];
+ piv[pvt] = piv[j];
+ piv[j] = itemp;
+ }
+
+ ajj = sqrt(ajj);
+ a[j + j * a_dim1] = ajj;
+
+/* Compute elements J+1:N of row J */
+
+ if (j < *n) {
+ i__2 = j - 1;
+ i__3 = *n - j;
+ dgemv_("Trans", &i__2, &i__3, &c_b16, &a[(j + 1) * a_dim1 + 1]
+, lda, &a[j * a_dim1 + 1], &c__1, &c_b18, &a[j + (j +
+ 1) * a_dim1], lda);
+ i__2 = *n - j;
+ d__1 = 1. / ajj;
+ dscal_(&i__2, &d__1, &a[j + (j + 1) * a_dim1], lda);
+ }
+
+/* L130: */
+ }
+
+ } else {
+
+/* Compute the Cholesky factorization P' * A * P = L * L' */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Find pivot, test for exit, else swap rows and columns */
+/* Update dot products, compute possible pivots which are */
+/* stored in the second half of WORK */
+
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+
+ if (j > 1) {
+/* Computing 2nd power */
+ d__1 = a[i__ + (j - 1) * a_dim1];
+ work[i__] += d__1 * d__1;
+ }
+ work[*n + i__] = a[i__ + i__ * a_dim1] - work[i__];
+
+/* L140: */
+ }
+
+ if (j > 1) {
+ maxlocval = (*n << 1) - (*n + j) + 1;
+ itemp = dmaxloc_(&work[*n + j], &maxlocval);
+ pvt = itemp + j - 1;
+ ajj = work[*n + pvt];
+ if (ajj <= dstop || disnan_(&ajj)) {
+ a[j + j * a_dim1] = ajj;
+ goto L160;
+ }
+ }
+
+ if (j != pvt) {
+
+/* Pivot OK, so can now swap pivot rows and columns */
+
+ a[pvt + pvt * a_dim1] = a[j + j * a_dim1];
+ i__2 = j - 1;
+ dswap_(&i__2, &a[j + a_dim1], lda, &a[pvt + a_dim1], lda);
+ if (pvt < *n) {
+ i__2 = *n - pvt;
+ dswap_(&i__2, &a[pvt + 1 + j * a_dim1], &c__1, &a[pvt + 1
+ + pvt * a_dim1], &c__1);
+ }
+ i__2 = pvt - j - 1;
+ dswap_(&i__2, &a[j + 1 + j * a_dim1], &c__1, &a[pvt + (j + 1)
+ * a_dim1], lda);
+
+/* Swap dot products and PIV */
+
+ dtemp = work[j];
+ work[j] = work[pvt];
+ work[pvt] = dtemp;
+ itemp = piv[pvt];
+ piv[pvt] = piv[j];
+ piv[j] = itemp;
+ }
+
+ ajj = sqrt(ajj);
+ a[j + j * a_dim1] = ajj;
+
+/* Compute elements J+1:N of column J */
+
+ if (j < *n) {
+ i__2 = *n - j;
+ i__3 = j - 1;
+ dgemv_("No Trans", &i__2, &i__3, &c_b16, &a[j + 1 + a_dim1],
+ lda, &a[j + a_dim1], lda, &c_b18, &a[j + 1 + j *
+ a_dim1], &c__1);
+ i__2 = *n - j;
+ d__1 = 1. / ajj;
+ dscal_(&i__2, &d__1, &a[j + 1 + j * a_dim1], &c__1);
+ }
+
+/* L150: */
+ }
+
+ }
+
+/* Ran to completion, A has full rank */
+
+ *rank = *n;
+
+ goto L170;
+L160:
+
+/* Rank is number of steps completed. Set INFO = 1 to signal */
+/* that the factorization cannot be used to solve a system. */
+
+ *rank = j - 1;
+ *info = 1;
+
+L170:
+ return 0;
+
+/* End of DPSTF2 */
+
+} /* dpstf2_ */
diff --git a/contrib/libs/clapack/dpstrf.c b/contrib/libs/clapack/dpstrf.c
new file mode 100644
index 0000000000..909c525cad
--- /dev/null
+++ b/contrib/libs/clapack/dpstrf.c
@@ -0,0 +1,471 @@
+/* dpstrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static doublereal c_b22 = -1.;
+static doublereal c_b24 = 1.;
+
+/* Subroutine */ int dpstrf_(char *uplo, integer *n, doublereal *a, integer *
+ lda, integer *piv, integer *rank, doublereal *tol, doublereal *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, maxlocvar, jb, nb;
+ doublereal ajj;
+ integer pvt;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+ doublereal dtemp;
+ integer itemp;
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ doublereal dstop;
+ logical upper;
+ extern /* Subroutine */ int dsyrk_(char *, char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *), dpstf2_(char *, integer *,
+ doublereal *, integer *, integer *, integer *, doublereal *,
+ doublereal *, integer *);
+ extern doublereal dlamch_(char *);
+ extern logical disnan_(doublereal *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern integer dmaxloc_(doublereal *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Craig Lucas, University of Manchester / NAG Ltd. */
+/* October, 2008 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPSTRF computes the Cholesky factorization with complete */
+/* pivoting of a real symmetric positive semidefinite matrix A. */
+
+/* The factorization has the form */
+/* P' * A * P = U' * U , if UPLO = 'U', */
+/* P' * A * P = L * L', if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular, and */
+/* P is stored as vector PIV. */
+
+/* This algorithm does not attempt to check that A is positive */
+/* semidefinite. This version of the algorithm calls level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n by n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization as above. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* PIV (output) INTEGER array, dimension (N) */
+/* PIV is such that the nonzero entries are P( PIV(K), K ) = 1. */
+
+/* RANK (output) INTEGER */
+/* The rank of A given by the number of steps the algorithm */
+/* completed. */
+
+/* TOL (input) DOUBLE PRECISION */
+/* User defined tolerance. If TOL < 0, then N*U*MAX( A(K,K) ) */
+/* will be used. The algorithm terminates at the (K-1)st step */
+/* if the pivot <= TOL. */
+
+/* WORK DOUBLE PRECISION array, dimension (2*N) */
+/* Work space. */
+
+/* INFO (output) INTEGER */
+/* < 0: If INFO = -K, the K-th argument had an illegal value, */
+/* = 0: algorithm completed successfully, and */
+/* > 0: the matrix A is either rank deficient with computed rank */
+/* as returned in RANK, or is indefinite. See Section 7 of */
+/* LAPACK Working Note #161 for further information. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --work;
+ --piv;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPSTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get block size */
+
+ nb = ilaenv_(&c__1, "DPOTRF", uplo, n, &c_n1, &c_n1, &c_n1);
+ if (nb <= 1 || nb >= *n) {
+
+/* Use unblocked code */
+
+ dpstf2_(uplo, n, &a[a_dim1 + 1], lda, &piv[1], rank, tol, &work[1],
+ info);
+ goto L200;
+
+ } else {
+
+/* Initialize PIV */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ piv[i__] = i__;
+/* L100: */
+ }
+
+/* Compute stopping value */
+
+ pvt = 1;
+ ajj = a[pvt + pvt * a_dim1];
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if (a[i__ + i__ * a_dim1] > ajj) {
+ pvt = i__;
+ ajj = a[pvt + pvt * a_dim1];
+ }
+ }
+ if (ajj == 0. || disnan_(&ajj)) {
+ *rank = 0;
+ *info = 1;
+ goto L200;
+ }
+
+/* Compute stopping value if not supplied */
+
+ if (*tol < 0.) {
+ dstop = *n * dlamch_("Epsilon") * ajj;
+ } else {
+ dstop = *tol;
+ }
+
+
+ if (upper) {
+
+/* Compute the Cholesky factorization P' * A * P = U' * U */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (k = 1; i__2 < 0 ? k >= i__1 : k <= i__1; k += i__2) {
+
+/* Account for last block not being NB wide */
+
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - k + 1;
+ jb = min(i__3,i__4);
+
+/* Set relevant part of first half of WORK to zero, */
+/* holds dot products */
+
+ i__3 = *n;
+ for (i__ = k; i__ <= i__3; ++i__) {
+ work[i__] = 0.;
+/* L110: */
+ }
+
+ i__3 = k + jb - 1;
+ for (j = k; j <= i__3; ++j) {
+
+/* Find pivot, test for exit, else swap rows and columns */
+/* Update dot products, compute possible pivots which are */
+/* stored in the second half of WORK */
+
+ i__4 = *n;
+ for (i__ = j; i__ <= i__4; ++i__) {
+
+ if (j > k) {
+/* Computing 2nd power */
+ d__1 = a[j - 1 + i__ * a_dim1];
+ work[i__] += d__1 * d__1;
+ }
+ work[*n + i__] = a[i__ + i__ * a_dim1] - work[i__];
+
+/* L120: */
+ }
+
+ if (j > 1) {
+ maxlocvar = (*n << 1) - (*n + j) + 1;
+ itemp = dmaxloc_(&work[*n + j], &maxlocvar);
+ pvt = itemp + j - 1;
+ ajj = work[*n + pvt];
+ if (ajj <= dstop || disnan_(&ajj)) {
+ a[j + j * a_dim1] = ajj;
+ goto L190;
+ }
+ }
+
+ if (j != pvt) {
+
+/* Pivot OK, so can now swap pivot rows and columns */
+
+ a[pvt + pvt * a_dim1] = a[j + j * a_dim1];
+ i__4 = j - 1;
+ dswap_(&i__4, &a[j * a_dim1 + 1], &c__1, &a[pvt *
+ a_dim1 + 1], &c__1);
+ if (pvt < *n) {
+ i__4 = *n - pvt;
+ dswap_(&i__4, &a[j + (pvt + 1) * a_dim1], lda, &a[
+ pvt + (pvt + 1) * a_dim1], lda);
+ }
+ i__4 = pvt - j - 1;
+ dswap_(&i__4, &a[j + (j + 1) * a_dim1], lda, &a[j + 1
+ + pvt * a_dim1], &c__1);
+
+/* Swap dot products and PIV */
+
+ dtemp = work[j];
+ work[j] = work[pvt];
+ work[pvt] = dtemp;
+ itemp = piv[pvt];
+ piv[pvt] = piv[j];
+ piv[j] = itemp;
+ }
+
+ ajj = sqrt(ajj);
+ a[j + j * a_dim1] = ajj;
+
+/* Compute elements J+1:N of row J. */
+
+ if (j < *n) {
+ i__4 = j - k;
+ i__5 = *n - j;
+ dgemv_("Trans", &i__4, &i__5, &c_b22, &a[k + (j + 1) *
+ a_dim1], lda, &a[k + j * a_dim1], &c__1, &
+ c_b24, &a[j + (j + 1) * a_dim1], lda);
+ i__4 = *n - j;
+ d__1 = 1. / ajj;
+ dscal_(&i__4, &d__1, &a[j + (j + 1) * a_dim1], lda);
+ }
+
+/* L130: */
+ }
+
+/* Update trailing matrix, J already incremented */
+
+ if (k + jb <= *n) {
+ i__3 = *n - j + 1;
+ dsyrk_("Upper", "Trans", &i__3, &jb, &c_b22, &a[k + j *
+ a_dim1], lda, &c_b24, &a[j + j * a_dim1], lda);
+ }
+
+/* L140: */
+ }
+
+ } else {
+
+/* Compute the Cholesky factorization P' * A * P = L * L' */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (k = 1; i__1 < 0 ? k >= i__2 : k <= i__2; k += i__1) {
+
+/* Account for last block not being NB wide */
+
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - k + 1;
+ jb = min(i__3,i__4);
+
+/* Set relevant part of first half of WORK to zero, */
+/* holds dot products */
+
+ i__3 = *n;
+ for (i__ = k; i__ <= i__3; ++i__) {
+ work[i__] = 0.;
+/* L150: */
+ }
+
+ i__3 = k + jb - 1;
+ for (j = k; j <= i__3; ++j) {
+
+/* Find pivot, test for exit, else swap rows and columns */
+/* Update dot products, compute possible pivots which are */
+/* stored in the second half of WORK */
+
+ i__4 = *n;
+ for (i__ = j; i__ <= i__4; ++i__) {
+
+ if (j > k) {
+/* Computing 2nd power */
+ d__1 = a[i__ + (j - 1) * a_dim1];
+ work[i__] += d__1 * d__1;
+ }
+ work[*n + i__] = a[i__ + i__ * a_dim1] - work[i__];
+
+/* L160: */
+ }
+
+ if (j > 1) {
+ maxlocvar = (*n << 1) - (*n + j) + 1;
+ itemp = dmaxloc_(&work[*n + j], &maxlocvar);
+ pvt = itemp + j - 1;
+ ajj = work[*n + pvt];
+ if (ajj <= dstop || disnan_(&ajj)) {
+ a[j + j * a_dim1] = ajj;
+ goto L190;
+ }
+ }
+
+ if (j != pvt) {
+
+/* Pivot OK, so can now swap pivot rows and columns */
+
+ a[pvt + pvt * a_dim1] = a[j + j * a_dim1];
+ i__4 = j - 1;
+ dswap_(&i__4, &a[j + a_dim1], lda, &a[pvt + a_dim1],
+ lda);
+ if (pvt < *n) {
+ i__4 = *n - pvt;
+ dswap_(&i__4, &a[pvt + 1 + j * a_dim1], &c__1, &a[
+ pvt + 1 + pvt * a_dim1], &c__1);
+ }
+ i__4 = pvt - j - 1;
+ dswap_(&i__4, &a[j + 1 + j * a_dim1], &c__1, &a[pvt +
+ (j + 1) * a_dim1], lda);
+
+/* Swap dot products and PIV */
+
+ dtemp = work[j];
+ work[j] = work[pvt];
+ work[pvt] = dtemp;
+ itemp = piv[pvt];
+ piv[pvt] = piv[j];
+ piv[j] = itemp;
+ }
+
+ ajj = sqrt(ajj);
+ a[j + j * a_dim1] = ajj;
+
+/* Compute elements J+1:N of column J. */
+
+ if (j < *n) {
+ i__4 = *n - j;
+ i__5 = j - k;
+ dgemv_("No Trans", &i__4, &i__5, &c_b22, &a[j + 1 + k
+ * a_dim1], lda, &a[j + k * a_dim1], lda, &
+ c_b24, &a[j + 1 + j * a_dim1], &c__1);
+ i__4 = *n - j;
+ d__1 = 1. / ajj;
+ dscal_(&i__4, &d__1, &a[j + 1 + j * a_dim1], &c__1);
+ }
+
+/* L170: */
+ }
+
+/* Update trailing matrix, J already incremented */
+
+ if (k + jb <= *n) {
+ i__3 = *n - j + 1;
+ dsyrk_("Lower", "No Trans", &i__3, &jb, &c_b22, &a[j + k *
+ a_dim1], lda, &c_b24, &a[j + j * a_dim1], lda);
+ }
+
+/* L180: */
+ }
+
+ }
+ }
+
+/* Ran to completion, A has full rank */
+
+ *rank = *n;
+
+ goto L200;
+L190:
+
+/* Rank is the number of steps completed. Set INFO = 1 to signal */
+/* that the factorization cannot be used to solve a system. */
+
+ *rank = j - 1;
+ *info = 1;
+
+L200:
+ return 0;
+
+/* End of DPSTRF */
+
+} /* dpstrf_ */
diff --git a/contrib/libs/clapack/dptcon.c b/contrib/libs/clapack/dptcon.c
new file mode 100644
index 0000000000..a2ffc134d1
--- /dev/null
+++ b/contrib/libs/clapack/dptcon.c
@@ -0,0 +1,184 @@
+/* dptcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dptcon_(integer *n, doublereal *d__, doublereal *e,
+ doublereal *anorm, doublereal *rcond, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__, ix;
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal ainvnm;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPTCON computes the reciprocal of the condition number (in the */
+/* 1-norm) of a real symmetric positive definite tridiagonal matrix */
+/* using the factorization A = L*D*L**T or A = U**T*D*U computed by */
+/* DPTTRF. */
+
+/* Norm(inv(A)) is computed by a direct method, and the reciprocal of */
+/* the condition number is computed as */
+/* RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The n diagonal elements of the diagonal matrix D from the */
+/* factorization of A, as computed by DPTTRF. */
+
+/* E (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) off-diagonal elements of the unit bidiagonal factor */
+/* U or L from the factorization of A, as computed by DPTTRF. */
+
+/* ANORM (input) DOUBLE PRECISION */
+/* The 1-norm of the original matrix A. */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is the */
+/* 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The method used is described in Nicholas J. Higham, "Efficient */
+/* Algorithms for Computing the Condition Number of a Tridiagonal */
+/* Matrix", SIAM J. Sci. Stat. Comput., Vol. 7, No. 1, January 1986. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments. */
+
+ /* Parameter adjustments */
+ --work;
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*anorm < 0.) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPTCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.;
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ } else if (*anorm == 0.) {
+ return 0;
+ }
+
+/* Check that D(1:N) is positive. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] <= 0.) {
+ return 0;
+ }
+/* L10: */
+ }
+
+/* Solve M(A) * x = e, where M(A) = (m(i,j)) is given by */
+
+/* m(i,j) = abs(A(i,j)), i = j, */
+/* m(i,j) = -abs(A(i,j)), i .ne. j, */
+
+/* and e = [ 1, 1, ..., 1 ]'. Note M(A) = M(L)*D*M(L)'. */
+
+/* Solve M(L) * x = e. */
+
+ work[1] = 1.;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ work[i__] = work[i__ - 1] * (d__1 = e[i__ - 1], abs(d__1)) + 1.;
+/* L20: */
+ }
+
+/* Solve D * M(L)' * x = b. */
+
+ work[*n] /= d__[*n];
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ work[i__] = work[i__] / d__[i__] + work[i__ + 1] * (d__1 = e[i__],
+ abs(d__1));
+/* L30: */
+ }
+
+/* Compute AINVNM = max(x(i)), 1<=i<=n. */
+
+ ix = idamax_(n, &work[1], &c__1);
+ ainvnm = (d__1 = work[ix], abs(d__1));
+
+/* Compute the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / ainvnm / *anorm;
+ }
+
+ return 0;
+
+/* End of DPTCON */
+
+} /* dptcon_ */
diff --git a/contrib/libs/clapack/dpteqr.c b/contrib/libs/clapack/dpteqr.c
new file mode 100644
index 0000000000..1da0beab2d
--- /dev/null
+++ b/contrib/libs/clapack/dpteqr.c
@@ -0,0 +1,244 @@
+/* dpteqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b7 = 0.;
+static doublereal c_b8 = 1.;
+static integer c__0 = 0;
+static integer c__1 = 1;
+
+/* Subroutine */ int dpteqr_(char *compz, integer *n, doublereal *d__,
+ doublereal *e, doublereal *z__, integer *ldz, doublereal *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal c__[1] /* was [1][1] */;
+ integer i__;
+ doublereal vt[1] /* was [1][1] */;
+ integer nru;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dlaset_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *),
+ xerbla_(char *, integer *), dbdsqr_(char *, integer *,
+ integer *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *);
+ integer icompz;
+ extern /* Subroutine */ int dpttrf_(integer *, doublereal *, doublereal *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPTEQR computes all eigenvalues and, optionally, eigenvectors of a */
+/* symmetric positive definite tridiagonal matrix by first factoring the */
+/* matrix using DPTTRF, and then calling DBDSQR to compute the singular */
+/* values of the bidiagonal factor. */
+
+/* This routine computes the eigenvalues of the positive definite */
+/* tridiagonal matrix to high relative accuracy. This means that if the */
+/* eigenvalues range over many orders of magnitude in size, then the */
+/* small eigenvalues and corresponding eigenvectors will be computed */
+/* more accurately than, for example, with the standard QR method. */
+
+/* The eigenvectors of a full or band symmetric positive definite matrix */
+/* can also be found if DSYTRD, DSPTRD, or DSBTRD has been used to */
+/* reduce this matrix to tridiagonal form. (The reduction to tridiagonal */
+/* form, however, may preclude the possibility of obtaining high */
+/* relative accuracy in the small eigenvalues of the original matrix, if */
+/* these eigenvalues range over many orders of magnitude.) */
+
+/* Arguments */
+/* ========= */
+
+/* COMPZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only. */
+/* = 'V': Compute eigenvectors of original symmetric */
+/* matrix also. Array Z contains the orthogonal */
+/* matrix used to reduce the original matrix to */
+/* tridiagonal form. */
+/* = 'I': Compute eigenvectors of tridiagonal matrix also. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the n diagonal elements of the tridiagonal */
+/* matrix. */
+/* On normal exit, D contains the eigenvalues, in descending */
+/* order. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (N-1) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix. */
+/* On exit, E has been destroyed. */
+
+/* Z (input/output) DOUBLE PRECISION array, dimension (LDZ, N) */
+/* On entry, if COMPZ = 'V', the orthogonal matrix used in the */
+/* reduction to tridiagonal form. */
+/* On exit, if COMPZ = 'V', the orthonormal eigenvectors of the */
+/* original symmetric matrix; */
+/* if COMPZ = 'I', the orthonormal eigenvectors of the */
+/* tridiagonal matrix. */
+/* If INFO > 0 on exit, Z contains the eigenvectors associated */
+/* with only the stored eigenvalues. */
+/* If COMPZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* COMPZ = 'V' or 'I', LDZ >= max(1,N). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (4*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, and i is: */
+/* <= N the Cholesky factorization of the matrix could */
+/* not be performed because the i-th principal minor */
+/* was not positive definite. */
+/* > N the SVD algorithm failed to converge; */
+/* if INFO = N+i, i off-diagonal elements of the */
+/* bidiagonal factor did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (lsame_(compz, "N")) {
+ icompz = 0;
+ } else if (lsame_(compz, "V")) {
+ icompz = 1;
+ } else if (lsame_(compz, "I")) {
+ icompz = 2;
+ } else {
+ icompz = -1;
+ }
+ if (icompz < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldz < 1 || icompz > 0 && *ldz < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPTEQR", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (icompz > 0) {
+ z__[z_dim1 + 1] = 1.;
+ }
+ return 0;
+ }
+ if (icompz == 2) {
+ dlaset_("Full", n, n, &c_b7, &c_b8, &z__[z_offset], ldz);
+ }
+
+/* Call DPTTRF to factor the matrix. */
+
+ dpttrf_(n, &d__[1], &e[1], info);
+ if (*info != 0) {
+ return 0;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = sqrt(d__[i__]);
+/* L10: */
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ e[i__] *= d__[i__];
+/* L20: */
+ }
+
+/* Call DBDSQR to compute the singular values/vectors of the */
+/* bidiagonal factor. */
+
+ if (icompz > 0) {
+ nru = *n;
+ } else {
+ nru = 0;
+ }
+ dbdsqr_("Lower", n, &c__0, &nru, &c__0, &d__[1], &e[1], vt, &c__1, &z__[
+ z_offset], ldz, c__, &c__1, &work[1], info);
+
+/* Square the singular values. */
+
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] *= d__[i__];
+/* L30: */
+ }
+ } else {
+ *info = *n + *info;
+ }
+
+ return 0;
+
+/* End of DPTEQR */
+
+} /* dpteqr_ */
diff --git a/contrib/libs/clapack/dptrfs.c b/contrib/libs/clapack/dptrfs.c
new file mode 100644
index 0000000000..2a0491ccc5
--- /dev/null
+++ b/contrib/libs/clapack/dptrfs.c
@@ -0,0 +1,365 @@
+/* dptrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b11 = 1.;
+
+/* Subroutine */ int dptrfs_(integer *n, integer *nrhs, doublereal *d__,
+ doublereal *e, doublereal *df, doublereal *ef, doublereal *b, integer
+ *ldb, doublereal *x, integer *ldx, doublereal *ferr, doublereal *berr,
+ doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2;
+ doublereal d__1, d__2, d__3;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal s, bi, cx, dx, ex;
+ integer ix, nz;
+ doublereal eps, safe1, safe2;
+ extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *);
+ integer count;
+ extern doublereal dlamch_(char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal lstres;
+ extern /* Subroutine */ int dpttrs_(integer *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPTRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is symmetric positive definite */
+/* and tridiagonal, and provides error bounds and backward error */
+/* estimates for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The n diagonal elements of the tridiagonal matrix A. */
+
+/* E (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of the tridiagonal matrix A. */
+
+/* DF (input) DOUBLE PRECISION array, dimension (N) */
+/* The n diagonal elements of the diagonal matrix D from the */
+/* factorization computed by DPTTRF. */
+
+/* EF (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of the unit bidiagonal factor */
+/* L from the factorization computed by DPTTRF. */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) DOUBLE PRECISION array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by DPTTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ --df;
+ --ef;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*nrhs < 0) {
+ *info = -2;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*ldx < max(1,*n)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPTRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = 4;
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X. Also compute */
+/* abs(A)*abs(x) + abs(b) for use in the backward error bound. */
+
+ if (*n == 1) {
+ bi = b[j * b_dim1 + 1];
+ dx = d__[1] * x[j * x_dim1 + 1];
+ work[*n + 1] = bi - dx;
+ work[1] = abs(bi) + abs(dx);
+ } else {
+ bi = b[j * b_dim1 + 1];
+ dx = d__[1] * x[j * x_dim1 + 1];
+ ex = e[1] * x[j * x_dim1 + 2];
+ work[*n + 1] = bi - dx - ex;
+ work[1] = abs(bi) + abs(dx) + abs(ex);
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ bi = b[i__ + j * b_dim1];
+ cx = e[i__ - 1] * x[i__ - 1 + j * x_dim1];
+ dx = d__[i__] * x[i__ + j * x_dim1];
+ ex = e[i__] * x[i__ + 1 + j * x_dim1];
+ work[*n + i__] = bi - cx - dx - ex;
+ work[i__] = abs(bi) + abs(cx) + abs(dx) + abs(ex);
+/* L30: */
+ }
+ bi = b[*n + j * b_dim1];
+ cx = e[*n - 1] * x[*n - 1 + j * x_dim1];
+ dx = d__[*n] * x[*n + j * x_dim1];
+ work[*n + *n] = bi - cx - dx;
+ work[*n] = abs(bi) + abs(cx) + abs(dx);
+ }
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+/* Computing MAX */
+ d__2 = s, d__3 = (d__1 = work[*n + i__], abs(d__1)) / work[
+ i__];
+ s = max(d__2,d__3);
+ } else {
+/* Computing MAX */
+ d__2 = s, d__3 = ((d__1 = work[*n + i__], abs(d__1)) + safe1)
+ / (work[i__] + safe1);
+ s = max(d__2,d__3);
+ }
+/* L40: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2. <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ dpttrs_(n, &c__1, &df[1], &ef[1], &work[*n + 1], n, info);
+ daxpy_(n, &c_b11, &work[*n + 1], &c__1, &x[j * x_dim1 + 1], &c__1)
+ ;
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+ work[i__] = (d__1 = work[*n + i__], abs(d__1)) + nz * eps *
+ work[i__];
+ } else {
+ work[i__] = (d__1 = work[*n + i__], abs(d__1)) + nz * eps *
+ work[i__] + safe1;
+ }
+/* L50: */
+ }
+ ix = idamax_(n, &work[1], &c__1);
+ ferr[j] = work[ix];
+
+/* Estimate the norm of inv(A). */
+
+/* Solve M(A) * x = e, where M(A) = (m(i,j)) is given by */
+
+/* m(i,j) = abs(A(i,j)), i = j, */
+/* m(i,j) = -abs(A(i,j)), i .ne. j, */
+
+/* and e = [ 1, 1, ..., 1 ]'. Note M(A) = M(L)*D*M(L)'. */
+
+/* Solve M(L) * x = e. */
+
+ work[1] = 1.;
+ i__2 = *n;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ work[i__] = work[i__ - 1] * (d__1 = ef[i__ - 1], abs(d__1)) + 1.;
+/* L60: */
+ }
+
+/* Solve D * M(L)' * x = b. */
+
+ work[*n] /= df[*n];
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ work[i__] = work[i__] / df[i__] + work[i__ + 1] * (d__1 = ef[i__],
+ abs(d__1));
+/* L70: */
+ }
+
+/* Compute norm(inv(A)) = max(x(i)), 1<=i<=n. */
+
+ ix = idamax_(n, &work[1], &c__1);
+ ferr[j] *= (d__1 = work[ix], abs(d__1));
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = lstres, d__3 = (d__1 = x[i__ + j * x_dim1], abs(d__1));
+ lstres = max(d__2,d__3);
+/* L80: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L90: */
+ }
+
+ return 0;
+
+/* End of DPTRFS */
+
+} /* dptrfs_ */
diff --git a/contrib/libs/clapack/dptsv.c b/contrib/libs/clapack/dptsv.c
new file mode 100644
index 0000000000..2c09ce913d
--- /dev/null
+++ b/contrib/libs/clapack/dptsv.c
@@ -0,0 +1,130 @@
+/* dptsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dptsv_(integer *n, integer *nrhs, doublereal *d__,
+ doublereal *e, doublereal *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int xerbla_(char *, integer *), dpttrf_(
+ integer *, doublereal *, doublereal *, integer *), dpttrs_(
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPTSV computes the solution to a real system of linear equations */
+/* A*X = B, where A is an N-by-N symmetric positive definite tridiagonal */
+/* matrix, and X and B are N-by-NRHS matrices. */
+
+/* A is factored as A = L*D*L**T, and the factored form of A is then */
+/* used to solve the system of equations. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the n diagonal elements of the tridiagonal matrix */
+/* A. On exit, the n diagonal elements of the diagonal matrix */
+/* D from the factorization A = L*D*L**T. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (N-1) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix A. On exit, the (n-1) subdiagonal elements of the */
+/* unit bidiagonal factor L from the L*D*L**T factorization of */
+/* A. (E can also be regarded as the superdiagonal of the unit */
+/* bidiagonal factor U from the U**T*D*U factorization of A.) */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i is not */
+/* positive definite, and the solution has not been */
+/* computed. The factorization has not been completed */
+/* unless i = N. */
+
+/* ===================================================================== */
+
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*nrhs < 0) {
+ *info = -2;
+ } else if (*ldb < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPTSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the L*D*L' (or U'*D*U) factorization of A. */
+
+ dpttrf_(n, &d__[1], &e[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ dpttrs_(n, nrhs, &d__[1], &e[1], &b[b_offset], ldb, info);
+ }
+ return 0;
+
+/* End of DPTSV */
+
+} /* dptsv_ */
diff --git a/contrib/libs/clapack/dptsvx.c b/contrib/libs/clapack/dptsvx.c
new file mode 100644
index 0000000000..ba37b180a7
--- /dev/null
+++ b/contrib/libs/clapack/dptsvx.c
@@ -0,0 +1,283 @@
+/* dptsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dptsvx_(char *fact, integer *n, integer *nrhs,
+ doublereal *d__, doublereal *e, doublereal *df, doublereal *ef,
+ doublereal *b, integer *ldb, doublereal *x, integer *ldx, doublereal *
+ rcond, doublereal *ferr, doublereal *berr, doublereal *work, integer *
+ info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ doublereal anorm;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ extern doublereal dlamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ extern /* Subroutine */ int dptcon_(integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *), dptrfs_(
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *), dpttrf_(
+ integer *, doublereal *, doublereal *, integer *), dpttrs_(
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPTSVX uses the factorization A = L*D*L**T to compute the solution */
+/* to a real system of linear equations A*X = B, where A is an N-by-N */
+/* symmetric positive definite tridiagonal matrix and X and B are */
+/* N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'N', the matrix A is factored as A = L*D*L**T, where L */
+/* is a unit lower bidiagonal matrix and D is diagonal. The */
+/* factorization can also be regarded as having the form */
+/* A = U**T*D*U. */
+
+/* 2. If the leading i-by-i principal minor is not positive definite, */
+/* then the routine returns with INFO = i. Otherwise, the factored */
+/* form of A is used to estimate the condition number of the matrix */
+/* A. If the reciprocal of the condition number is less than machine */
+/* precision, INFO = N+1 is returned as a warning, but the routine */
+/* still goes on to solve for X and compute error bounds as */
+/* described below. */
+
+/* 3. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 4. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of A has been */
+/* supplied on entry. */
+/* = 'F': On entry, DF and EF contain the factored form of A. */
+/* D, E, DF, and EF will not be modified. */
+/* = 'N': The matrix A will be copied to DF and EF and */
+/* factored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The n diagonal elements of the tridiagonal matrix A. */
+
+/* E (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of the tridiagonal matrix A. */
+
+/* DF (input or output) DOUBLE PRECISION array, dimension (N) */
+/* If FACT = 'F', then DF is an input argument and on entry */
+/* contains the n diagonal elements of the diagonal matrix D */
+/* from the L*D*L**T factorization of A. */
+/* If FACT = 'N', then DF is an output argument and on exit */
+/* contains the n diagonal elements of the diagonal matrix D */
+/* from the L*D*L**T factorization of A. */
+
+/* EF (input or output) DOUBLE PRECISION array, dimension (N-1) */
+/* If FACT = 'F', then EF is an input argument and on entry */
+/* contains the (n-1) subdiagonal elements of the unit */
+/* bidiagonal factor L from the L*D*L**T factorization of A. */
+/* If FACT = 'N', then EF is an output argument and on exit */
+/* contains the (n-1) subdiagonal elements of the unit */
+/* bidiagonal factor L from the L*D*L**T factorization of A. */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* The N-by-NRHS right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) DOUBLE PRECISION array, dimension (LDX,NRHS) */
+/* If INFO = 0 of INFO = N+1, the N-by-NRHS solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal condition number of the matrix A. If RCOND */
+/* is less than the machine precision (in particular, if */
+/* RCOND = 0), the matrix is singular to working precision. */
+/* This condition is indicated by a return code of INFO > 0. */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in any */
+/* element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: the leading minor of order i of A is */
+/* not positive definite, so the factorization */
+/* could not be completed, and the solution has not */
+/* been computed. RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ --df;
+ --ef;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ if (! nofact && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldx < max(1,*n)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPTSVX", &i__1);
+ return 0;
+ }
+
+ if (nofact) {
+
+/* Compute the L*D*L' (or U'*D*U) factorization of A. */
+
+ dcopy_(n, &d__[1], &c__1, &df[1], &c__1);
+ if (*n > 1) {
+ i__1 = *n - 1;
+ dcopy_(&i__1, &e[1], &c__1, &ef[1], &c__1);
+ }
+ dpttrf_(n, &df[1], &ef[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = dlanst_("1", n, &d__[1], &e[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ dptcon_(n, &df[1], &ef[1], &anorm, rcond, &work[1], info);
+
+/* Compute the solution vectors X. */
+
+ dlacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ dpttrs_(n, nrhs, &df[1], &ef[1], &x[x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solutions and */
+/* compute error bounds and backward error estimates for them. */
+
+ dptrfs_(n, nrhs, &d__[1], &e[1], &df[1], &ef[1], &b[b_offset], ldb, &x[
+ x_offset], ldx, &ferr[1], &berr[1], &work[1], info);
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < dlamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of DPTSVX */
+
+} /* dptsvx_ */
diff --git a/contrib/libs/clapack/dpttrf.c b/contrib/libs/clapack/dpttrf.c
new file mode 100644
index 0000000000..070ef3436e
--- /dev/null
+++ b/contrib/libs/clapack/dpttrf.c
@@ -0,0 +1,181 @@
+/* dpttrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dpttrf_(integer *n, doublereal *d__, doublereal *e,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ integer i__, i4;
+ doublereal ei;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPTTRF computes the L*D*L' factorization of a real symmetric */
+/* positive definite tridiagonal matrix A. The factorization may also */
+/* be regarded as having the form A = U'*D*U. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the n diagonal elements of the tridiagonal matrix */
+/* A. On exit, the n diagonal elements of the diagonal matrix */
+/* D from the L*D*L' factorization of A. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (N-1) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix A. On exit, the (n-1) subdiagonal elements of the */
+/* unit bidiagonal factor L from the L*D*L' factorization of A. */
+/* E can also be regarded as the superdiagonal of the unit */
+/* bidiagonal factor U from the U'*D*U factorization of A. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, the leading minor of order k is not */
+/* positive definite; if k < N, the factorization could not */
+/* be completed, while if k = N, the factorization was */
+/* completed, but D(N) <= 0. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ i__1 = -(*info);
+ xerbla_("DPTTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Compute the L*D*L' (or U'*D*U) factorization of A. */
+
+ i4 = (*n - 1) % 4;
+ i__1 = i4;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] <= 0.) {
+ *info = i__;
+ goto L30;
+ }
+ ei = e[i__];
+ e[i__] = ei / d__[i__];
+ d__[i__ + 1] -= e[i__] * ei;
+/* L10: */
+ }
+
+ i__1 = *n - 4;
+ for (i__ = i4 + 1; i__ <= i__1; i__ += 4) {
+
+/* Drop out of the loop if d(i) <= 0: the matrix is not positive */
+/* definite. */
+
+ if (d__[i__] <= 0.) {
+ *info = i__;
+ goto L30;
+ }
+
+/* Solve for e(i) and d(i+1). */
+
+ ei = e[i__];
+ e[i__] = ei / d__[i__];
+ d__[i__ + 1] -= e[i__] * ei;
+
+ if (d__[i__ + 1] <= 0.) {
+ *info = i__ + 1;
+ goto L30;
+ }
+
+/* Solve for e(i+1) and d(i+2). */
+
+ ei = e[i__ + 1];
+ e[i__ + 1] = ei / d__[i__ + 1];
+ d__[i__ + 2] -= e[i__ + 1] * ei;
+
+ if (d__[i__ + 2] <= 0.) {
+ *info = i__ + 2;
+ goto L30;
+ }
+
+/* Solve for e(i+2) and d(i+3). */
+
+ ei = e[i__ + 2];
+ e[i__ + 2] = ei / d__[i__ + 2];
+ d__[i__ + 3] -= e[i__ + 2] * ei;
+
+ if (d__[i__ + 3] <= 0.) {
+ *info = i__ + 3;
+ goto L30;
+ }
+
+/* Solve for e(i+3) and d(i+4). */
+
+ ei = e[i__ + 3];
+ e[i__ + 3] = ei / d__[i__ + 3];
+ d__[i__ + 4] -= e[i__ + 3] * ei;
+/* L20: */
+ }
+
+/* Check d(n) for positive definiteness. */
+
+ if (d__[*n] <= 0.) {
+ *info = *n;
+ }
+
+L30:
+ return 0;
+
+/* End of DPTTRF */
+
+} /* dpttrf_ */
diff --git a/contrib/libs/clapack/dpttrs.c b/contrib/libs/clapack/dpttrs.c
new file mode 100644
index 0000000000..52aaa90560
--- /dev/null
+++ b/contrib/libs/clapack/dpttrs.c
@@ -0,0 +1,156 @@
+/* dpttrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int dpttrs_(integer *n, integer *nrhs, doublereal *d__,
+ doublereal *e, doublereal *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer j, jb, nb;
+ extern /* Subroutine */ int dptts2_(integer *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPTTRS solves a tridiagonal system of the form */
+/* A * X = B */
+/* using the L*D*L' factorization of A computed by DPTTRF. D is a */
+/* diagonal matrix specified in the vector D, L is a unit bidiagonal */
+/* matrix whose subdiagonal is specified in the vector E, and X and B */
+/* are N by NRHS matrices. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the tridiagonal matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The n diagonal elements of the diagonal matrix D from the */
+/* L*D*L' factorization of A. */
+
+/* E (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of the unit bidiagonal factor */
+/* L from the L*D*L' factorization of A. E can also be regarded */
+/* as the superdiagonal of the unit bidiagonal factor U from the */
+/* factorization A = U'*D*U. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the right hand side vectors B for the system of */
+/* linear equations. */
+/* On exit, the solution vectors, X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*nrhs < 0) {
+ *info = -2;
+ } else if (*ldb < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPTTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+/* Determine the number of right-hand sides to solve at a time. */
+
+ if (*nrhs == 1) {
+ nb = 1;
+ } else {
+/* Computing MAX */
+ i__1 = 1, i__2 = ilaenv_(&c__1, "DPTTRS", " ", n, nrhs, &c_n1, &c_n1);
+ nb = max(i__1,i__2);
+ }
+
+ if (nb >= *nrhs) {
+ dptts2_(n, nrhs, &d__[1], &e[1], &b[b_offset], ldb);
+ } else {
+ i__1 = *nrhs;
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = *nrhs - j + 1;
+ jb = min(i__3,nb);
+ dptts2_(n, &jb, &d__[1], &e[1], &b[j * b_dim1 + 1], ldb);
+/* L10: */
+ }
+ }
+
+ return 0;
+
+/* End of DPTTRS */
+
+} /* dpttrs_ */
diff --git a/contrib/libs/clapack/dptts2.c b/contrib/libs/clapack/dptts2.c
new file mode 100644
index 0000000000..fc246bba8e
--- /dev/null
+++ b/contrib/libs/clapack/dptts2.c
@@ -0,0 +1,131 @@
+/* dptts2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dptts2_(integer *n, integer *nrhs, doublereal *d__,
+ doublereal *e, doublereal *b, integer *ldb)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__, j;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DPTTS2 solves a tridiagonal system of the form */
+/* A * X = B */
+/* using the L*D*L' factorization of A computed by DPTTRF. D is a */
+/* diagonal matrix specified in the vector D, L is a unit bidiagonal */
+/* matrix whose subdiagonal is specified in the vector E, and X and B */
+/* are N by NRHS matrices. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the tridiagonal matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The n diagonal elements of the diagonal matrix D from the */
+/* L*D*L' factorization of A. */
+
+/* E (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of the unit bidiagonal factor */
+/* L from the L*D*L' factorization of A. E can also be regarded */
+/* as the superdiagonal of the unit bidiagonal factor U from the */
+/* factorization A = U'*D*U. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the right hand side vectors B for the system of */
+/* linear equations. */
+/* On exit, the solution vectors, X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ if (*n <= 1) {
+ if (*n == 1) {
+ d__1 = 1. / d__[1];
+ dscal_(nrhs, &d__1, &b[b_offset], ldb);
+ }
+ return 0;
+ }
+
+/* Solve A * X = B using the factorization A = L*D*L', */
+/* overwriting each right hand side vector with its solution. */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Solve L * x = b. */
+
+ i__2 = *n;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] -= b[i__ - 1 + j * b_dim1] * e[i__ - 1];
+/* L10: */
+ }
+
+/* Solve D * L' * x = b. */
+
+ b[*n + j * b_dim1] /= d__[*n];
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ b[i__ + j * b_dim1] = b[i__ + j * b_dim1] / d__[i__] - b[i__ + 1
+ + j * b_dim1] * e[i__];
+/* L20: */
+ }
+/* L30: */
+ }
+
+ return 0;
+
+/* End of DPTTS2 */
+
+} /* dptts2_ */
diff --git a/contrib/libs/clapack/drscl.c b/contrib/libs/clapack/drscl.c
new file mode 100644
index 0000000000..03b09ce113
--- /dev/null
+++ b/contrib/libs/clapack/drscl.c
@@ -0,0 +1,134 @@
+/* drscl.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int drscl_(integer *n, doublereal *sa, doublereal *sx,
+ integer *incx)
+{
+ doublereal mul, cden;
+ logical done;
+ doublereal cnum, cden1, cnum1;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dlabad_(doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ doublereal bignum, smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DRSCL multiplies an n-element real vector x by the real scalar 1/a. */
+/* This is done without overflow or underflow as long as */
+/* the final result x/a does not overflow or underflow. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of components of the vector x. */
+
+/* SA (input) DOUBLE PRECISION */
+/* The scalar a which is used to divide each component of x. */
+/* SA must be >= 0, or the subroutine will divide by zero. */
+
+/* SX (input/output) DOUBLE PRECISION array, dimension */
+/* (1+(N-1)*abs(INCX)) */
+/* The n-element vector x. */
+
+/* INCX (input) INTEGER */
+/* The increment between successive values of the vector SX. */
+/* > 0: SX(1) = X(1) and SX(1+(i-1)*INCX) = x(i), 1< i<= n */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --sx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+
+/* Initialize the denominator to SA and the numerator to 1. */
+
+ cden = *sa;
+ cnum = 1.;
+
+L10:
+ cden1 = cden * smlnum;
+ cnum1 = cnum / bignum;
+ if (abs(cden1) > abs(cnum) && cnum != 0.) {
+
+/* Pre-multiply X by SMLNUM if CDEN is large compared to CNUM. */
+
+ mul = smlnum;
+ done = FALSE_;
+ cden = cden1;
+ } else if (abs(cnum1) > abs(cden)) {
+
+/* Pre-multiply X by BIGNUM if CDEN is small compared to CNUM. */
+
+ mul = bignum;
+ done = FALSE_;
+ cnum = cnum1;
+ } else {
+
+/* Multiply X by CNUM / CDEN and return. */
+
+ mul = cnum / cden;
+ done = TRUE_;
+ }
+
+/* Scale the vector X by MUL */
+
+ dscal_(n, &mul, &sx[1], incx);
+
+ if (! done) {
+ goto L10;
+ }
+
+ return 0;
+
+/* End of DRSCL */
+
+} /* drscl_ */
diff --git a/contrib/libs/clapack/dsbev.c b/contrib/libs/clapack/dsbev.c
new file mode 100644
index 0000000000..2d1efc5bff
--- /dev/null
+++ b/contrib/libs/clapack/dsbev.c
@@ -0,0 +1,268 @@
+/* dsbev.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b11 = 1.;
+static integer c__1 = 1;
+
+/* Subroutine */ int dsbev_(char *jobz, char *uplo, integer *n, integer *kd,
+ doublereal *ab, integer *ldab, doublereal *w, doublereal *z__,
+ integer *ldz, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, z_dim1, z_offset, i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal eps;
+ integer inde;
+ doublereal anrm;
+ integer imax;
+ doublereal rmin, rmax;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical lower, wantz;
+ extern doublereal dlamch_(char *);
+ integer iscale;
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *);
+ extern doublereal dlansb_(char *, char *, integer *, integer *,
+ doublereal *, integer *, doublereal *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ extern /* Subroutine */ int dsbtrd_(char *, char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *), dsterf_(
+ integer *, doublereal *, doublereal *, integer *);
+ integer indwrk;
+ extern /* Subroutine */ int dsteqr_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *);
+ doublereal smlnum;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSBEV computes all the eigenvalues and, optionally, eigenvectors of */
+/* a real symmetric band matrix A. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) DOUBLE PRECISION array, dimension (LDAB, N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, AB is overwritten by values generated during the */
+/* reduction to tridiagonal form. If UPLO = 'U', the first */
+/* superdiagonal and the diagonal of the tridiagonal matrix T */
+/* are returned in rows KD and KD+1 of AB, and if UPLO = 'L', */
+/* the diagonal and first subdiagonal of T are returned in the */
+/* first two rows of AB. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD + 1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) DOUBLE PRECISION array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the orthonormal */
+/* eigenvectors of the matrix A, with the i-th column of Z */
+/* holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (max(1,3*N-2)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the algorithm failed to converge; i */
+/* off-diagonal elements of an intermediate tridiagonal */
+/* form did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lower = lsame_(uplo, "L");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*kd < 0) {
+ *info = -4;
+ } else if (*ldab < *kd + 1) {
+ *info = -6;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -9;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSBEV ", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (lower) {
+ w[1] = ab[ab_dim1 + 1];
+ } else {
+ w[1] = ab[*kd + 1 + ab_dim1];
+ }
+ if (wantz) {
+ z__[z_dim1 + 1] = 1.;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = dlansb_("M", uplo, n, kd, &ab[ab_offset], ldab, &work[1]);
+ iscale = 0;
+ if (anrm > 0. && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ if (lower) {
+ dlascl_("B", kd, kd, &c_b11, &sigma, n, n, &ab[ab_offset], ldab,
+ info);
+ } else {
+ dlascl_("Q", kd, kd, &c_b11, &sigma, n, n, &ab[ab_offset], ldab,
+ info);
+ }
+ }
+
+/* Call DSBTRD to reduce symmetric band matrix to tridiagonal form. */
+
+ inde = 1;
+ indwrk = inde + *n;
+ dsbtrd_(jobz, uplo, n, kd, &ab[ab_offset], ldab, &w[1], &work[inde], &z__[
+ z_offset], ldz, &work[indwrk], &iinfo);
+
+/* For eigenvalues only, call DSTERF. For eigenvectors, call SSTEQR. */
+
+ if (! wantz) {
+ dsterf_(n, &w[1], &work[inde], info);
+ } else {
+ dsteqr_(jobz, n, &w[1], &work[inde], &z__[z_offset], ldz, &work[
+ indwrk], info);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *n;
+ } else {
+ imax = *info - 1;
+ }
+ d__1 = 1. / sigma;
+ dscal_(&imax, &d__1, &w[1], &c__1);
+ }
+
+ return 0;
+
+/* End of DSBEV */
+
+} /* dsbev_ */
diff --git a/contrib/libs/clapack/dsbevd.c b/contrib/libs/clapack/dsbevd.c
new file mode 100644
index 0000000000..e2717d073b
--- /dev/null
+++ b/contrib/libs/clapack/dsbevd.c
@@ -0,0 +1,338 @@
+/* dsbevd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b11 = 1.;
+static doublereal c_b18 = 0.;
+static integer c__1 = 1;
+
+/* Subroutine */ int dsbevd_(char *jobz, char *uplo, integer *n, integer *kd,
+ doublereal *ab, integer *ldab, doublereal *w, doublereal *z__,
+ integer *ldz, doublereal *work, integer *lwork, integer *iwork,
+ integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, z_dim1, z_offset, i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal eps;
+ integer inde;
+ doublereal anrm, rmin, rmax;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dgemm_(char *, char *, integer *, integer *, integer *
+, doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+ doublereal sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo, lwmin;
+ logical lower, wantz;
+ integer indwk2, llwrk2;
+ extern doublereal dlamch_(char *);
+ integer iscale;
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *);
+ extern doublereal dlansb_(char *, char *, integer *, integer *,
+ doublereal *, integer *, doublereal *);
+ extern /* Subroutine */ int dstedc_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ integer *, integer *, integer *), dlacpy_(char *, integer
+ *, integer *, doublereal *, integer *, doublereal *, integer *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ extern /* Subroutine */ int dsbtrd_(char *, char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *), dsterf_(
+ integer *, doublereal *, doublereal *, integer *);
+ integer indwrk, liwmin;
+ doublereal smlnum;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSBEVD computes all the eigenvalues and, optionally, eigenvectors of */
+/* a real symmetric band matrix A. If eigenvectors are desired, it uses */
+/* a divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) DOUBLE PRECISION array, dimension (LDAB, N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, AB is overwritten by values generated during the */
+/* reduction to tridiagonal form. If UPLO = 'U', the first */
+/* superdiagonal and the diagonal of the tridiagonal matrix T */
+/* are returned in rows KD and KD+1 of AB, and if UPLO = 'L', */
+/* the diagonal and first subdiagonal of T are returned in the */
+/* first two rows of AB. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD + 1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) DOUBLE PRECISION array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the orthonormal */
+/* eigenvectors of the matrix A, with the i-th column of Z */
+/* holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, */
+/* dimension (LWORK) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* IF N <= 1, LWORK must be at least 1. */
+/* If JOBZ = 'N' and N > 2, LWORK must be at least 2*N. */
+/* If JOBZ = 'V' and N > 2, LWORK must be at least */
+/* ( 1 + 5*N + 2*N**2 ). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal sizes of the WORK and IWORK */
+/* arrays, returns these values as the first entries of the WORK */
+/* and IWORK arrays, and no error message related to LWORK or */
+/* LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array LIWORK. */
+/* If JOBZ = 'N' or N <= 1, LIWORK must be at least 1. */
+/* If JOBZ = 'V' and N > 2, LIWORK must be at least 3 + 5*N. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK and IWORK arrays, and no error message related to */
+/* LWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the algorithm failed to converge; i */
+/* off-diagonal elements of an intermediate tridiagonal */
+/* form did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lower = lsame_(uplo, "L");
+ lquery = *lwork == -1 || *liwork == -1;
+
+ *info = 0;
+ if (*n <= 1) {
+ liwmin = 1;
+ lwmin = 1;
+ } else {
+ if (wantz) {
+ liwmin = *n * 5 + 3;
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = *n * 5 + 1 + (i__1 * i__1 << 1);
+ } else {
+ liwmin = 1;
+ lwmin = *n << 1;
+ }
+ }
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*kd < 0) {
+ *info = -4;
+ } else if (*ldab < *kd + 1) {
+ *info = -6;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+ work[1] = (doublereal) lwmin;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -11;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSBEVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ w[1] = ab[ab_dim1 + 1];
+ if (wantz) {
+ z__[z_dim1 + 1] = 1.;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = dlansb_("M", uplo, n, kd, &ab[ab_offset], ldab, &work[1]);
+ iscale = 0;
+ if (anrm > 0. && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ if (lower) {
+ dlascl_("B", kd, kd, &c_b11, &sigma, n, n, &ab[ab_offset], ldab,
+ info);
+ } else {
+ dlascl_("Q", kd, kd, &c_b11, &sigma, n, n, &ab[ab_offset], ldab,
+ info);
+ }
+ }
+
+/* Call DSBTRD to reduce symmetric band matrix to tridiagonal form. */
+
+ inde = 1;
+ indwrk = inde + *n;
+ indwk2 = indwrk + *n * *n;
+ llwrk2 = *lwork - indwk2 + 1;
+ dsbtrd_(jobz, uplo, n, kd, &ab[ab_offset], ldab, &w[1], &work[inde], &z__[
+ z_offset], ldz, &work[indwrk], &iinfo);
+
+/* For eigenvalues only, call DSTERF. For eigenvectors, call SSTEDC. */
+
+ if (! wantz) {
+ dsterf_(n, &w[1], &work[inde], info);
+ } else {
+ dstedc_("I", n, &w[1], &work[inde], &work[indwrk], n, &work[indwk2], &
+ llwrk2, &iwork[1], liwork, info);
+ dgemm_("N", "N", n, n, n, &c_b11, &z__[z_offset], ldz, &work[indwrk],
+ n, &c_b18, &work[indwk2], n);
+ dlacpy_("A", n, n, &work[indwk2], n, &z__[z_offset], ldz);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ d__1 = 1. / sigma;
+ dscal_(n, &d__1, &w[1], &c__1);
+ }
+
+ work[1] = (doublereal) lwmin;
+ iwork[1] = liwmin;
+ return 0;
+
+/* End of DSBEVD */
+
+} /* dsbevd_ */
diff --git a/contrib/libs/clapack/dsbevx.c b/contrib/libs/clapack/dsbevx.c
new file mode 100644
index 0000000000..29916a010c
--- /dev/null
+++ b/contrib/libs/clapack/dsbevx.c
@@ -0,0 +1,520 @@
+/* dsbevx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b14 = 1.;
+static integer c__1 = 1;
+static doublereal c_b34 = 0.;
+
+/* Subroutine */ int dsbevx_(char *jobz, char *range, char *uplo, integer *n,
+ integer *kd, doublereal *ab, integer *ldab, doublereal *q, integer *
+ ldq, doublereal *vl, doublereal *vu, integer *il, integer *iu,
+ doublereal *abstol, integer *m, doublereal *w, doublereal *z__,
+ integer *ldz, doublereal *work, integer *iwork, integer *ifail,
+ integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, q_dim1, q_offset, z_dim1, z_offset, i__1,
+ i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, jj;
+ doublereal eps, vll, vuu, tmp1;
+ integer indd, inde;
+ doublereal anrm;
+ integer imax;
+ doublereal rmin, rmax;
+ logical test;
+ integer itmp1, indee;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal sigma;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+ integer iinfo;
+ char order[1];
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dswap_(integer *, doublereal *, integer
+ *, doublereal *, integer *);
+ logical lower, wantz;
+ extern doublereal dlamch_(char *);
+ logical alleig, indeig;
+ integer iscale, indibl;
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *);
+ extern doublereal dlansb_(char *, char *, integer *, integer *,
+ doublereal *, integer *, doublereal *);
+ logical valeig;
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal abstll, bignum;
+ extern /* Subroutine */ int dsbtrd_(char *, char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *);
+ integer indisp;
+ extern /* Subroutine */ int dstein_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *, doublereal *,
+ integer *, doublereal *, integer *, integer *, integer *),
+ dsterf_(integer *, doublereal *, doublereal *, integer *);
+ integer indiwo;
+ extern /* Subroutine */ int dstebz_(char *, char *, integer *, doublereal
+ *, doublereal *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *, doublereal *, integer *, integer *);
+ integer indwrk;
+ extern /* Subroutine */ int dsteqr_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *);
+ integer nsplit;
+ doublereal smlnum;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSBEVX computes selected eigenvalues and, optionally, eigenvectors */
+/* of a real symmetric band matrix A. Eigenvalues and eigenvectors can */
+/* be selected by specifying either a range of values or a range of */
+/* indices for the desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found; */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found; */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) DOUBLE PRECISION array, dimension (LDAB, N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, AB is overwritten by values generated during the */
+/* reduction to tridiagonal form. If UPLO = 'U', the first */
+/* superdiagonal and the diagonal of the tridiagonal matrix T */
+/* are returned in rows KD and KD+1 of AB, and if UPLO = 'L', */
+/* the diagonal and first subdiagonal of T are returned in the */
+/* first two rows of AB. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD + 1. */
+
+/* Q (output) DOUBLE PRECISION array, dimension (LDQ, N) */
+/* If JOBZ = 'V', the N-by-N orthogonal matrix used in the */
+/* reduction to tridiagonal form. */
+/* If JOBZ = 'N', the array Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. If JOBZ = 'V', then */
+/* LDQ >= max(1,N). */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) DOUBLE PRECISION */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing AB to tridiagonal form. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*DLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*DLAMCH('S'). */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices */
+/* with Guaranteed High Relative Accuracy," by Demmel and */
+/* Kahan, LAPACK Working Note #3. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* The first M elements contain the selected eigenvalues in */
+/* ascending order. */
+
+/* Z (output) DOUBLE PRECISION array, dimension (LDZ, max(1,M)) */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If an eigenvector fails to converge, then that column of Z */
+/* contains the latest approximation to the eigenvector, and the */
+/* index of the eigenvector is returned in IFAIL. */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (7*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+
+/* IFAIL (output) INTEGER array, dimension (N) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvectors that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, then i eigenvectors failed to converge. */
+/* Their indices are stored in array IFAIL. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+ lower = lsame_(uplo, "L");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*kd < 0) {
+ *info = -5;
+ } else if (*ldab < *kd + 1) {
+ *info = -7;
+ } else if (wantz && *ldq < max(1,*n)) {
+ *info = -9;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -11;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -12;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -13;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -18;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSBEVX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ *m = 1;
+ if (lower) {
+ tmp1 = ab[ab_dim1 + 1];
+ } else {
+ tmp1 = ab[*kd + 1 + ab_dim1];
+ }
+ if (valeig) {
+ if (! (*vl < tmp1 && *vu >= tmp1)) {
+ *m = 0;
+ }
+ }
+ if (*m == 1) {
+ w[1] = tmp1;
+ if (wantz) {
+ z__[z_dim1 + 1] = 1.;
+ }
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+/* Computing MIN */
+ d__1 = sqrt(bignum), d__2 = 1. / sqrt(sqrt(safmin));
+ rmax = min(d__1,d__2);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ iscale = 0;
+ abstll = *abstol;
+ if (valeig) {
+ vll = *vl;
+ vuu = *vu;
+ } else {
+ vll = 0.;
+ vuu = 0.;
+ }
+ anrm = dlansb_("M", uplo, n, kd, &ab[ab_offset], ldab, &work[1]);
+ if (anrm > 0. && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ if (lower) {
+ dlascl_("B", kd, kd, &c_b14, &sigma, n, n, &ab[ab_offset], ldab,
+ info);
+ } else {
+ dlascl_("Q", kd, kd, &c_b14, &sigma, n, n, &ab[ab_offset], ldab,
+ info);
+ }
+ if (*abstol > 0.) {
+ abstll = *abstol * sigma;
+ }
+ if (valeig) {
+ vll = *vl * sigma;
+ vuu = *vu * sigma;
+ }
+ }
+
+/* Call DSBTRD to reduce symmetric band matrix to tridiagonal form. */
+
+ indd = 1;
+ inde = indd + *n;
+ indwrk = inde + *n;
+ dsbtrd_(jobz, uplo, n, kd, &ab[ab_offset], ldab, &work[indd], &work[inde],
+ &q[q_offset], ldq, &work[indwrk], &iinfo);
+
+/* If all eigenvalues are desired and ABSTOL is less than or equal */
+/* to zero, then call DSTERF or SSTEQR. If this fails for some */
+/* eigenvalue, then try DSTEBZ. */
+
+ test = FALSE_;
+ if (indeig) {
+ if (*il == 1 && *iu == *n) {
+ test = TRUE_;
+ }
+ }
+ if ((alleig || test) && *abstol <= 0.) {
+ dcopy_(n, &work[indd], &c__1, &w[1], &c__1);
+ indee = indwrk + (*n << 1);
+ if (! wantz) {
+ i__1 = *n - 1;
+ dcopy_(&i__1, &work[inde], &c__1, &work[indee], &c__1);
+ dsterf_(n, &w[1], &work[indee], info);
+ } else {
+ dlacpy_("A", n, n, &q[q_offset], ldq, &z__[z_offset], ldz);
+ i__1 = *n - 1;
+ dcopy_(&i__1, &work[inde], &c__1, &work[indee], &c__1);
+ dsteqr_(jobz, n, &w[1], &work[indee], &z__[z_offset], ldz, &work[
+ indwrk], info);
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ifail[i__] = 0;
+/* L10: */
+ }
+ }
+ }
+ if (*info == 0) {
+ *m = *n;
+ goto L30;
+ }
+ *info = 0;
+ }
+
+/* Otherwise, call DSTEBZ and, if eigenvectors are desired, SSTEIN. */
+
+ if (wantz) {
+ *(unsigned char *)order = 'B';
+ } else {
+ *(unsigned char *)order = 'E';
+ }
+ indibl = 1;
+ indisp = indibl + *n;
+ indiwo = indisp + *n;
+ dstebz_(range, order, n, &vll, &vuu, il, iu, &abstll, &work[indd], &work[
+ inde], m, &nsplit, &w[1], &iwork[indibl], &iwork[indisp], &work[
+ indwrk], &iwork[indiwo], info);
+
+ if (wantz) {
+ dstein_(n, &work[indd], &work[inde], m, &w[1], &iwork[indibl], &iwork[
+ indisp], &z__[z_offset], ldz, &work[indwrk], &iwork[indiwo], &
+ ifail[1], info);
+
+/* Apply orthogonal matrix used in reduction to tridiagonal */
+/* form to eigenvectors returned by DSTEIN. */
+
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(n, &z__[j * z_dim1 + 1], &c__1, &work[1], &c__1);
+ dgemv_("N", n, n, &c_b14, &q[q_offset], ldq, &work[1], &c__1, &
+ c_b34, &z__[j * z_dim1 + 1], &c__1);
+/* L20: */
+ }
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+L30:
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *m;
+ } else {
+ imax = *info - 1;
+ }
+ d__1 = 1. / sigma;
+ dscal_(&imax, &d__1, &w[1], &c__1);
+ }
+
+/* If eigenvalues are not in order, then sort them, along with */
+/* eigenvectors. */
+
+ if (wantz) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp1 = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp1) {
+ i__ = jj;
+ tmp1 = w[jj];
+ }
+/* L40: */
+ }
+
+ if (i__ != 0) {
+ itmp1 = iwork[indibl + i__ - 1];
+ w[i__] = w[j];
+ iwork[indibl + i__ - 1] = iwork[indibl + j - 1];
+ w[j] = tmp1;
+ iwork[indibl + j - 1] = itmp1;
+ dswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1],
+ &c__1);
+ if (*info != 0) {
+ itmp1 = ifail[i__];
+ ifail[i__] = ifail[j];
+ ifail[j] = itmp1;
+ }
+ }
+/* L50: */
+ }
+ }
+
+ return 0;
+
+/* End of DSBEVX */
+
+} /* dsbevx_ */
diff --git a/contrib/libs/clapack/dsbgst.c b/contrib/libs/clapack/dsbgst.c
new file mode 100644
index 0000000000..24c9aad461
--- /dev/null
+++ b/contrib/libs/clapack/dsbgst.c
@@ -0,0 +1,1755 @@
+/* dsbgst.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b8 = 0.;
+static doublereal c_b9 = 1.;
+static integer c__1 = 1;
+static doublereal c_b20 = -1.;
+
+/* Subroutine */ int dsbgst_(char *vect, char *uplo, integer *n, integer *ka,
+ integer *kb, doublereal *ab, integer *ldab, doublereal *bb, integer *
+ ldbb, doublereal *x, integer *ldx, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, bb_dim1, bb_offset, x_dim1, x_offset, i__1,
+ i__2, i__3, i__4;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__, j, k, l, m;
+ doublereal t;
+ integer i0, i1, i2, j1, j2;
+ doublereal ra;
+ integer nr, nx, ka1, kb1;
+ doublereal ra1;
+ integer j1t, j2t;
+ doublereal bii;
+ integer kbt, nrt, inca;
+ extern /* Subroutine */ int dger_(integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *), drot_(integer *, doublereal *, integer *, doublereal *
+, integer *, doublereal *, doublereal *), dscal_(integer *,
+ doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ logical upper, wantx;
+ extern /* Subroutine */ int dlar2v_(integer *, doublereal *, doublereal *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *),
+ dlaset_(char *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *), dlartg_(doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *), xerbla_(
+ char *, integer *), dlargv_(integer *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *);
+ logical update;
+ extern /* Subroutine */ int dlartv_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSBGST reduces a real symmetric-definite banded generalized */
+/* eigenproblem A*x = lambda*B*x to standard form C*y = lambda*y, */
+/* such that C has the same bandwidth as A. */
+
+/* B must have been previously factorized as S**T*S by DPBSTF, using a */
+/* split Cholesky factorization. A is overwritten by C = X**T*A*X, where */
+/* X = S**(-1)*Q and Q is an orthogonal matrix chosen to preserve the */
+/* bandwidth of A. */
+
+/* Arguments */
+/* ========= */
+
+/* VECT (input) CHARACTER*1 */
+/* = 'N': do not form the transformation matrix X; */
+/* = 'V': form X. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* KA (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KA >= 0. */
+
+/* KB (input) INTEGER */
+/* The number of superdiagonals of the matrix B if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KA >= KB >= 0. */
+
+/* AB (input/output) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first ka+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(ka+1+i-j,j) = A(i,j) for max(1,j-ka)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+ka). */
+
+/* On exit, the transformed matrix X**T*A*X, stored in the same */
+/* format as A. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KA+1. */
+
+/* BB (input) DOUBLE PRECISION array, dimension (LDBB,N) */
+/* The banded factor S from the split Cholesky factorization of */
+/* B, as returned by DPBSTF, stored in the first KB+1 rows of */
+/* the array. */
+
+/* LDBB (input) INTEGER */
+/* The leading dimension of the array BB. LDBB >= KB+1. */
+
+/* X (output) DOUBLE PRECISION array, dimension (LDX,N) */
+/* If VECT = 'V', the n-by-n matrix X. */
+/* If VECT = 'N', the array X is not referenced. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. */
+/* LDX >= max(1,N) if VECT = 'V'; LDX >= 1 otherwise. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ bb_dim1 = *ldbb;
+ bb_offset = 1 + bb_dim1;
+ bb -= bb_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --work;
+
+ /* Function Body */
+ wantx = lsame_(vect, "V");
+ upper = lsame_(uplo, "U");
+ ka1 = *ka + 1;
+ kb1 = *kb + 1;
+ *info = 0;
+ if (! wantx && ! lsame_(vect, "N")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ka < 0) {
+ *info = -4;
+ } else if (*kb < 0 || *kb > *ka) {
+ *info = -5;
+ } else if (*ldab < *ka + 1) {
+ *info = -7;
+ } else if (*ldbb < *kb + 1) {
+ *info = -9;
+ } else if (*ldx < 1 || wantx && *ldx < max(1,*n)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSBGST", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ inca = *ldab * ka1;
+
+/* Initialize X to the unit matrix, if needed */
+
+ if (wantx) {
+ dlaset_("Full", n, n, &c_b8, &c_b9, &x[x_offset], ldx);
+ }
+
+/* Set M to the splitting point m. It must be the same value as is */
+/* used in DPBSTF. The chosen value allows the arrays WORK and RWORK */
+/* to be of dimension (N). */
+
+ m = (*n + *kb) / 2;
+
+/* The routine works in two phases, corresponding to the two halves */
+/* of the split Cholesky factorization of B as S**T*S where */
+
+/* S = ( U ) */
+/* ( M L ) */
+
+/* with U upper triangular of order m, and L lower triangular of */
+/* order n-m. S has the same bandwidth as B. */
+
+/* S is treated as a product of elementary matrices: */
+
+/* S = S(m)*S(m-1)*...*S(2)*S(1)*S(m+1)*S(m+2)*...*S(n-1)*S(n) */
+
+/* where S(i) is determined by the i-th row of S. */
+
+/* In phase 1, the index i takes the values n, n-1, ... , m+1; */
+/* in phase 2, it takes the values 1, 2, ... , m. */
+
+/* For each value of i, the current matrix A is updated by forming */
+/* inv(S(i))**T*A*inv(S(i)). This creates a triangular bulge outside */
+/* the band of A. The bulge is then pushed down toward the bottom of */
+/* A in phase 1, and up toward the top of A in phase 2, by applying */
+/* plane rotations. */
+
+/* There are kb*(kb+1)/2 elements in the bulge, but at most 2*kb-1 */
+/* of them are linearly independent, so annihilating a bulge requires */
+/* only 2*kb-1 plane rotations. The rotations are divided into a 1st */
+/* set of kb-1 rotations, and a 2nd set of kb rotations. */
+
+/* Wherever possible, rotations are generated and applied in vector */
+/* operations of length NR between the indices J1 and J2 (sometimes */
+/* replaced by modified values NRT, J1T or J2T). */
+
+/* The cosines and sines of the rotations are stored in the array */
+/* WORK. The cosines of the 1st set of rotations are stored in */
+/* elements n+2:n+m-kb-1 and the sines of the 1st set in elements */
+/* 2:m-kb-1; the cosines of the 2nd set are stored in elements */
+/* n+m-kb+1:2*n and the sines of the second set in elements m-kb+1:n. */
+
+/* The bulges are not formed explicitly; nonzero elements outside the */
+/* band are created only when they are required for generating new */
+/* rotations; they are stored in the array WORK, in positions where */
+/* they are later overwritten by the sines of the rotations which */
+/* annihilate them. */
+
+/* **************************** Phase 1 ***************************** */
+
+/* The logical structure of this phase is: */
+
+/* UPDATE = .TRUE. */
+/* DO I = N, M + 1, -1 */
+/* use S(i) to update A and create a new bulge */
+/* apply rotations to push all bulges KA positions downward */
+/* END DO */
+/* UPDATE = .FALSE. */
+/* DO I = M + KA + 1, N - 1 */
+/* apply rotations to push all bulges KA positions downward */
+/* END DO */
+
+/* To avoid duplicating code, the two loops are merged. */
+
+ update = TRUE_;
+ i__ = *n + 1;
+L10:
+ if (update) {
+ --i__;
+/* Computing MIN */
+ i__1 = *kb, i__2 = i__ - 1;
+ kbt = min(i__1,i__2);
+ i0 = i__ - 1;
+/* Computing MIN */
+ i__1 = *n, i__2 = i__ + *ka;
+ i1 = min(i__1,i__2);
+ i2 = i__ - kbt + ka1;
+ if (i__ < m + 1) {
+ update = FALSE_;
+ ++i__;
+ i0 = m;
+ if (*ka == 0) {
+ goto L480;
+ }
+ goto L10;
+ }
+ } else {
+ i__ += *ka;
+ if (i__ > *n - 1) {
+ goto L480;
+ }
+ }
+
+ if (upper) {
+
+/* Transform A, working with the upper triangle */
+
+ if (update) {
+
+/* Form inv(S(i))**T * A * inv(S(i)) */
+
+ bii = bb[kb1 + i__ * bb_dim1];
+ i__1 = i1;
+ for (j = i__; j <= i__1; ++j) {
+ ab[i__ - j + ka1 + j * ab_dim1] /= bii;
+/* L20: */
+ }
+/* Computing MAX */
+ i__1 = 1, i__2 = i__ - *ka;
+ i__3 = i__;
+ for (j = max(i__1,i__2); j <= i__3; ++j) {
+ ab[j - i__ + ka1 + i__ * ab_dim1] /= bii;
+/* L30: */
+ }
+ i__3 = i__ - 1;
+ for (k = i__ - kbt; k <= i__3; ++k) {
+ i__1 = k;
+ for (j = i__ - kbt; j <= i__1; ++j) {
+ ab[j - k + ka1 + k * ab_dim1] = ab[j - k + ka1 + k *
+ ab_dim1] - bb[j - i__ + kb1 + i__ * bb_dim1] * ab[
+ k - i__ + ka1 + i__ * ab_dim1] - bb[k - i__ + kb1
+ + i__ * bb_dim1] * ab[j - i__ + ka1 + i__ *
+ ab_dim1] + ab[ka1 + i__ * ab_dim1] * bb[j - i__ +
+ kb1 + i__ * bb_dim1] * bb[k - i__ + kb1 + i__ *
+ bb_dim1];
+/* L40: */
+ }
+/* Computing MAX */
+ i__1 = 1, i__2 = i__ - *ka;
+ i__4 = i__ - kbt - 1;
+ for (j = max(i__1,i__2); j <= i__4; ++j) {
+ ab[j - k + ka1 + k * ab_dim1] -= bb[k - i__ + kb1 + i__ *
+ bb_dim1] * ab[j - i__ + ka1 + i__ * ab_dim1];
+/* L50: */
+ }
+/* L60: */
+ }
+ i__3 = i1;
+ for (j = i__; j <= i__3; ++j) {
+/* Computing MAX */
+ i__4 = j - *ka, i__1 = i__ - kbt;
+ i__2 = i__ - 1;
+ for (k = max(i__4,i__1); k <= i__2; ++k) {
+ ab[k - j + ka1 + j * ab_dim1] -= bb[k - i__ + kb1 + i__ *
+ bb_dim1] * ab[i__ - j + ka1 + j * ab_dim1];
+/* L70: */
+ }
+/* L80: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by inv(S(i)) */
+
+ i__3 = *n - m;
+ d__1 = 1. / bii;
+ dscal_(&i__3, &d__1, &x[m + 1 + i__ * x_dim1], &c__1);
+ if (kbt > 0) {
+ i__3 = *n - m;
+ dger_(&i__3, &kbt, &c_b20, &x[m + 1 + i__ * x_dim1], &
+ c__1, &bb[kb1 - kbt + i__ * bb_dim1], &c__1, &x[m
+ + 1 + (i__ - kbt) * x_dim1], ldx);
+ }
+ }
+
+/* store a(i,i1) in RA1 for use in next loop over K */
+
+ ra1 = ab[i__ - i1 + ka1 + i1 * ab_dim1];
+ }
+
+/* Generate and apply vectors of rotations to chase all the */
+/* existing bulges KA positions down toward the bottom of the */
+/* band */
+
+ i__3 = *kb - 1;
+ for (k = 1; k <= i__3; ++k) {
+ if (update) {
+
+/* Determine the rotations which would annihilate the bulge */
+/* which has in theory just been created */
+
+ if (i__ - k + *ka < *n && i__ - k > 1) {
+
+/* generate rotation to annihilate a(i,i-k+ka+1) */
+
+ dlartg_(&ab[k + 1 + (i__ - k + *ka) * ab_dim1], &ra1, &
+ work[*n + i__ - k + *ka - m], &work[i__ - k + *ka
+ - m], &ra);
+
+/* create nonzero element a(i-k,i-k+ka+1) outside the */
+/* band and store it in WORK(i-k) */
+
+ t = -bb[kb1 - k + i__ * bb_dim1] * ra1;
+ work[i__ - k] = work[*n + i__ - k + *ka - m] * t - work[
+ i__ - k + *ka - m] * ab[(i__ - k + *ka) * ab_dim1
+ + 1];
+ ab[(i__ - k + *ka) * ab_dim1 + 1] = work[i__ - k + *ka -
+ m] * t + work[*n + i__ - k + *ka - m] * ab[(i__ -
+ k + *ka) * ab_dim1 + 1];
+ ra1 = ra;
+ }
+ }
+/* Computing MAX */
+ i__2 = 1, i__4 = k - i0 + 2;
+ j2 = i__ - k - 1 + max(i__2,i__4) * ka1;
+ nr = (*n - j2 + *ka) / ka1;
+ j1 = j2 + (nr - 1) * ka1;
+ if (update) {
+/* Computing MAX */
+ i__2 = j2, i__4 = i__ + (*ka << 1) - k + 1;
+ j2t = max(i__2,i__4);
+ } else {
+ j2t = j2;
+ }
+ nrt = (*n - j2t + *ka) / ka1;
+ i__2 = j1;
+ i__4 = ka1;
+ for (j = j2t; i__4 < 0 ? j >= i__2 : j <= i__2; j += i__4) {
+
+/* create nonzero element a(j-ka,j+1) outside the band */
+/* and store it in WORK(j-m) */
+
+ work[j - m] *= ab[(j + 1) * ab_dim1 + 1];
+ ab[(j + 1) * ab_dim1 + 1] = work[*n + j - m] * ab[(j + 1) *
+ ab_dim1 + 1];
+/* L90: */
+ }
+
+/* generate rotations in 1st set to annihilate elements which */
+/* have been created outside the band */
+
+ if (nrt > 0) {
+ dlargv_(&nrt, &ab[j2t * ab_dim1 + 1], &inca, &work[j2t - m], &
+ ka1, &work[*n + j2t - m], &ka1);
+ }
+ if (nr > 0) {
+
+/* apply rotations in 1st set from the right */
+
+ i__4 = *ka - 1;
+ for (l = 1; l <= i__4; ++l) {
+ dlartv_(&nr, &ab[ka1 - l + j2 * ab_dim1], &inca, &ab[*ka
+ - l + (j2 + 1) * ab_dim1], &inca, &work[*n + j2 -
+ m], &work[j2 - m], &ka1);
+/* L100: */
+ }
+
+/* apply rotations in 1st set from both sides to diagonal */
+/* blocks */
+
+ dlar2v_(&nr, &ab[ka1 + j2 * ab_dim1], &ab[ka1 + (j2 + 1) *
+ ab_dim1], &ab[*ka + (j2 + 1) * ab_dim1], &inca, &work[
+ *n + j2 - m], &work[j2 - m], &ka1);
+
+ }
+
+/* start applying rotations in 1st set from the left */
+
+ i__4 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__4; --l) {
+ nrt = (*n - j2 + l) / ka1;
+ if (nrt > 0) {
+ dlartv_(&nrt, &ab[l + (j2 + ka1 - l) * ab_dim1], &inca, &
+ ab[l + 1 + (j2 + ka1 - l) * ab_dim1], &inca, &
+ work[*n + j2 - m], &work[j2 - m], &ka1);
+ }
+/* L110: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 1st set */
+
+ i__4 = j1;
+ i__2 = ka1;
+ for (j = j2; i__2 < 0 ? j >= i__4 : j <= i__4; j += i__2) {
+ i__1 = *n - m;
+ drot_(&i__1, &x[m + 1 + j * x_dim1], &c__1, &x[m + 1 + (j
+ + 1) * x_dim1], &c__1, &work[*n + j - m], &work[j
+ - m]);
+/* L120: */
+ }
+ }
+/* L130: */
+ }
+
+ if (update) {
+ if (i2 <= *n && kbt > 0) {
+
+/* create nonzero element a(i-kbt,i-kbt+ka+1) outside the */
+/* band and store it in WORK(i-kbt) */
+
+ work[i__ - kbt] = -bb[kb1 - kbt + i__ * bb_dim1] * ra1;
+ }
+ }
+
+ for (k = *kb; k >= 1; --k) {
+ if (update) {
+/* Computing MAX */
+ i__3 = 2, i__2 = k - i0 + 1;
+ j2 = i__ - k - 1 + max(i__3,i__2) * ka1;
+ } else {
+/* Computing MAX */
+ i__3 = 1, i__2 = k - i0 + 1;
+ j2 = i__ - k - 1 + max(i__3,i__2) * ka1;
+ }
+
+/* finish applying rotations in 2nd set from the left */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (*n - j2 + *ka + l) / ka1;
+ if (nrt > 0) {
+ dlartv_(&nrt, &ab[l + (j2 - l + 1) * ab_dim1], &inca, &ab[
+ l + 1 + (j2 - l + 1) * ab_dim1], &inca, &work[*n
+ + j2 - *ka], &work[j2 - *ka], &ka1);
+ }
+/* L140: */
+ }
+ nr = (*n - j2 + *ka) / ka1;
+ j1 = j2 + (nr - 1) * ka1;
+ i__3 = j2;
+ i__2 = -ka1;
+ for (j = j1; i__2 < 0 ? j >= i__3 : j <= i__3; j += i__2) {
+ work[j] = work[j - *ka];
+ work[*n + j] = work[*n + j - *ka];
+/* L150: */
+ }
+ i__2 = j1;
+ i__3 = ka1;
+ for (j = j2; i__3 < 0 ? j >= i__2 : j <= i__2; j += i__3) {
+
+/* create nonzero element a(j-ka,j+1) outside the band */
+/* and store it in WORK(j) */
+
+ work[j] *= ab[(j + 1) * ab_dim1 + 1];
+ ab[(j + 1) * ab_dim1 + 1] = work[*n + j] * ab[(j + 1) *
+ ab_dim1 + 1];
+/* L160: */
+ }
+ if (update) {
+ if (i__ - k < *n - *ka && k <= kbt) {
+ work[i__ - k + *ka] = work[i__ - k];
+ }
+ }
+/* L170: */
+ }
+
+ for (k = *kb; k >= 1; --k) {
+/* Computing MAX */
+ i__3 = 1, i__2 = k - i0 + 1;
+ j2 = i__ - k - 1 + max(i__3,i__2) * ka1;
+ nr = (*n - j2 + *ka) / ka1;
+ j1 = j2 + (nr - 1) * ka1;
+ if (nr > 0) {
+
+/* generate rotations in 2nd set to annihilate elements */
+/* which have been created outside the band */
+
+ dlargv_(&nr, &ab[j2 * ab_dim1 + 1], &inca, &work[j2], &ka1, &
+ work[*n + j2], &ka1);
+
+/* apply rotations in 2nd set from the right */
+
+ i__3 = *ka - 1;
+ for (l = 1; l <= i__3; ++l) {
+ dlartv_(&nr, &ab[ka1 - l + j2 * ab_dim1], &inca, &ab[*ka
+ - l + (j2 + 1) * ab_dim1], &inca, &work[*n + j2],
+ &work[j2], &ka1);
+/* L180: */
+ }
+
+/* apply rotations in 2nd set from both sides to diagonal */
+/* blocks */
+
+ dlar2v_(&nr, &ab[ka1 + j2 * ab_dim1], &ab[ka1 + (j2 + 1) *
+ ab_dim1], &ab[*ka + (j2 + 1) * ab_dim1], &inca, &work[
+ *n + j2], &work[j2], &ka1);
+
+ }
+
+/* start applying rotations in 2nd set from the left */
+
+ i__3 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__3; --l) {
+ nrt = (*n - j2 + l) / ka1;
+ if (nrt > 0) {
+ dlartv_(&nrt, &ab[l + (j2 + ka1 - l) * ab_dim1], &inca, &
+ ab[l + 1 + (j2 + ka1 - l) * ab_dim1], &inca, &
+ work[*n + j2], &work[j2], &ka1);
+ }
+/* L190: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 2nd set */
+
+ i__3 = j1;
+ i__2 = ka1;
+ for (j = j2; i__2 < 0 ? j >= i__3 : j <= i__3; j += i__2) {
+ i__4 = *n - m;
+ drot_(&i__4, &x[m + 1 + j * x_dim1], &c__1, &x[m + 1 + (j
+ + 1) * x_dim1], &c__1, &work[*n + j], &work[j]);
+/* L200: */
+ }
+ }
+/* L210: */
+ }
+
+ i__2 = *kb - 1;
+ for (k = 1; k <= i__2; ++k) {
+/* Computing MAX */
+ i__3 = 1, i__4 = k - i0 + 2;
+ j2 = i__ - k - 1 + max(i__3,i__4) * ka1;
+
+/* finish applying rotations in 1st set from the left */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (*n - j2 + l) / ka1;
+ if (nrt > 0) {
+ dlartv_(&nrt, &ab[l + (j2 + ka1 - l) * ab_dim1], &inca, &
+ ab[l + 1 + (j2 + ka1 - l) * ab_dim1], &inca, &
+ work[*n + j2 - m], &work[j2 - m], &ka1);
+ }
+/* L220: */
+ }
+/* L230: */
+ }
+
+ if (*kb > 1) {
+ i__2 = i__ - *kb + (*ka << 1) + 1;
+ for (j = *n - 1; j >= i__2; --j) {
+ work[*n + j - m] = work[*n + j - *ka - m];
+ work[j - m] = work[j - *ka - m];
+/* L240: */
+ }
+ }
+
+ } else {
+
+/* Transform A, working with the lower triangle */
+
+ if (update) {
+
+/* Form inv(S(i))**T * A * inv(S(i)) */
+
+ bii = bb[i__ * bb_dim1 + 1];
+ i__2 = i1;
+ for (j = i__; j <= i__2; ++j) {
+ ab[j - i__ + 1 + i__ * ab_dim1] /= bii;
+/* L250: */
+ }
+/* Computing MAX */
+ i__2 = 1, i__3 = i__ - *ka;
+ i__4 = i__;
+ for (j = max(i__2,i__3); j <= i__4; ++j) {
+ ab[i__ - j + 1 + j * ab_dim1] /= bii;
+/* L260: */
+ }
+ i__4 = i__ - 1;
+ for (k = i__ - kbt; k <= i__4; ++k) {
+ i__2 = k;
+ for (j = i__ - kbt; j <= i__2; ++j) {
+ ab[k - j + 1 + j * ab_dim1] = ab[k - j + 1 + j * ab_dim1]
+ - bb[i__ - j + 1 + j * bb_dim1] * ab[i__ - k + 1
+ + k * ab_dim1] - bb[i__ - k + 1 + k * bb_dim1] *
+ ab[i__ - j + 1 + j * ab_dim1] + ab[i__ * ab_dim1
+ + 1] * bb[i__ - j + 1 + j * bb_dim1] * bb[i__ - k
+ + 1 + k * bb_dim1];
+/* L270: */
+ }
+/* Computing MAX */
+ i__2 = 1, i__3 = i__ - *ka;
+ i__1 = i__ - kbt - 1;
+ for (j = max(i__2,i__3); j <= i__1; ++j) {
+ ab[k - j + 1 + j * ab_dim1] -= bb[i__ - k + 1 + k *
+ bb_dim1] * ab[i__ - j + 1 + j * ab_dim1];
+/* L280: */
+ }
+/* L290: */
+ }
+ i__4 = i1;
+ for (j = i__; j <= i__4; ++j) {
+/* Computing MAX */
+ i__1 = j - *ka, i__2 = i__ - kbt;
+ i__3 = i__ - 1;
+ for (k = max(i__1,i__2); k <= i__3; ++k) {
+ ab[j - k + 1 + k * ab_dim1] -= bb[i__ - k + 1 + k *
+ bb_dim1] * ab[j - i__ + 1 + i__ * ab_dim1];
+/* L300: */
+ }
+/* L310: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by inv(S(i)) */
+
+ i__4 = *n - m;
+ d__1 = 1. / bii;
+ dscal_(&i__4, &d__1, &x[m + 1 + i__ * x_dim1], &c__1);
+ if (kbt > 0) {
+ i__4 = *n - m;
+ i__3 = *ldbb - 1;
+ dger_(&i__4, &kbt, &c_b20, &x[m + 1 + i__ * x_dim1], &
+ c__1, &bb[kbt + 1 + (i__ - kbt) * bb_dim1], &i__3,
+ &x[m + 1 + (i__ - kbt) * x_dim1], ldx);
+ }
+ }
+
+/* store a(i1,i) in RA1 for use in next loop over K */
+
+ ra1 = ab[i1 - i__ + 1 + i__ * ab_dim1];
+ }
+
+/* Generate and apply vectors of rotations to chase all the */
+/* existing bulges KA positions down toward the bottom of the */
+/* band */
+
+ i__4 = *kb - 1;
+ for (k = 1; k <= i__4; ++k) {
+ if (update) {
+
+/* Determine the rotations which would annihilate the bulge */
+/* which has in theory just been created */
+
+ if (i__ - k + *ka < *n && i__ - k > 1) {
+
+/* generate rotation to annihilate a(i-k+ka+1,i) */
+
+ dlartg_(&ab[ka1 - k + i__ * ab_dim1], &ra1, &work[*n +
+ i__ - k + *ka - m], &work[i__ - k + *ka - m], &ra)
+ ;
+
+/* create nonzero element a(i-k+ka+1,i-k) outside the */
+/* band and store it in WORK(i-k) */
+
+ t = -bb[k + 1 + (i__ - k) * bb_dim1] * ra1;
+ work[i__ - k] = work[*n + i__ - k + *ka - m] * t - work[
+ i__ - k + *ka - m] * ab[ka1 + (i__ - k) * ab_dim1]
+ ;
+ ab[ka1 + (i__ - k) * ab_dim1] = work[i__ - k + *ka - m] *
+ t + work[*n + i__ - k + *ka - m] * ab[ka1 + (i__
+ - k) * ab_dim1];
+ ra1 = ra;
+ }
+ }
+/* Computing MAX */
+ i__3 = 1, i__1 = k - i0 + 2;
+ j2 = i__ - k - 1 + max(i__3,i__1) * ka1;
+ nr = (*n - j2 + *ka) / ka1;
+ j1 = j2 + (nr - 1) * ka1;
+ if (update) {
+/* Computing MAX */
+ i__3 = j2, i__1 = i__ + (*ka << 1) - k + 1;
+ j2t = max(i__3,i__1);
+ } else {
+ j2t = j2;
+ }
+ nrt = (*n - j2t + *ka) / ka1;
+ i__3 = j1;
+ i__1 = ka1;
+ for (j = j2t; i__1 < 0 ? j >= i__3 : j <= i__3; j += i__1) {
+
+/* create nonzero element a(j+1,j-ka) outside the band */
+/* and store it in WORK(j-m) */
+
+ work[j - m] *= ab[ka1 + (j - *ka + 1) * ab_dim1];
+ ab[ka1 + (j - *ka + 1) * ab_dim1] = work[*n + j - m] * ab[ka1
+ + (j - *ka + 1) * ab_dim1];
+/* L320: */
+ }
+
+/* generate rotations in 1st set to annihilate elements which */
+/* have been created outside the band */
+
+ if (nrt > 0) {
+ dlargv_(&nrt, &ab[ka1 + (j2t - *ka) * ab_dim1], &inca, &work[
+ j2t - m], &ka1, &work[*n + j2t - m], &ka1);
+ }
+ if (nr > 0) {
+
+/* apply rotations in 1st set from the left */
+
+ i__1 = *ka - 1;
+ for (l = 1; l <= i__1; ++l) {
+ dlartv_(&nr, &ab[l + 1 + (j2 - l) * ab_dim1], &inca, &ab[
+ l + 2 + (j2 - l) * ab_dim1], &inca, &work[*n + j2
+ - m], &work[j2 - m], &ka1);
+/* L330: */
+ }
+
+/* apply rotations in 1st set from both sides to diagonal */
+/* blocks */
+
+ dlar2v_(&nr, &ab[j2 * ab_dim1 + 1], &ab[(j2 + 1) * ab_dim1 +
+ 1], &ab[j2 * ab_dim1 + 2], &inca, &work[*n + j2 - m],
+ &work[j2 - m], &ka1);
+
+ }
+
+/* start applying rotations in 1st set from the right */
+
+ i__1 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__1; --l) {
+ nrt = (*n - j2 + l) / ka1;
+ if (nrt > 0) {
+ dlartv_(&nrt, &ab[ka1 - l + 1 + j2 * ab_dim1], &inca, &ab[
+ ka1 - l + (j2 + 1) * ab_dim1], &inca, &work[*n +
+ j2 - m], &work[j2 - m], &ka1);
+ }
+/* L340: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 1st set */
+
+ i__1 = j1;
+ i__3 = ka1;
+ for (j = j2; i__3 < 0 ? j >= i__1 : j <= i__1; j += i__3) {
+ i__2 = *n - m;
+ drot_(&i__2, &x[m + 1 + j * x_dim1], &c__1, &x[m + 1 + (j
+ + 1) * x_dim1], &c__1, &work[*n + j - m], &work[j
+ - m]);
+/* L350: */
+ }
+ }
+/* L360: */
+ }
+
+ if (update) {
+ if (i2 <= *n && kbt > 0) {
+
+/* create nonzero element a(i-kbt+ka+1,i-kbt) outside the */
+/* band and store it in WORK(i-kbt) */
+
+ work[i__ - kbt] = -bb[kbt + 1 + (i__ - kbt) * bb_dim1] * ra1;
+ }
+ }
+
+ for (k = *kb; k >= 1; --k) {
+ if (update) {
+/* Computing MAX */
+ i__4 = 2, i__3 = k - i0 + 1;
+ j2 = i__ - k - 1 + max(i__4,i__3) * ka1;
+ } else {
+/* Computing MAX */
+ i__4 = 1, i__3 = k - i0 + 1;
+ j2 = i__ - k - 1 + max(i__4,i__3) * ka1;
+ }
+
+/* finish applying rotations in 2nd set from the right */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (*n - j2 + *ka + l) / ka1;
+ if (nrt > 0) {
+ dlartv_(&nrt, &ab[ka1 - l + 1 + (j2 - *ka) * ab_dim1], &
+ inca, &ab[ka1 - l + (j2 - *ka + 1) * ab_dim1], &
+ inca, &work[*n + j2 - *ka], &work[j2 - *ka], &ka1)
+ ;
+ }
+/* L370: */
+ }
+ nr = (*n - j2 + *ka) / ka1;
+ j1 = j2 + (nr - 1) * ka1;
+ i__4 = j2;
+ i__3 = -ka1;
+ for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) {
+ work[j] = work[j - *ka];
+ work[*n + j] = work[*n + j - *ka];
+/* L380: */
+ }
+ i__3 = j1;
+ i__4 = ka1;
+ for (j = j2; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {
+
+/* create nonzero element a(j+1,j-ka) outside the band */
+/* and store it in WORK(j) */
+
+ work[j] *= ab[ka1 + (j - *ka + 1) * ab_dim1];
+ ab[ka1 + (j - *ka + 1) * ab_dim1] = work[*n + j] * ab[ka1 + (
+ j - *ka + 1) * ab_dim1];
+/* L390: */
+ }
+ if (update) {
+ if (i__ - k < *n - *ka && k <= kbt) {
+ work[i__ - k + *ka] = work[i__ - k];
+ }
+ }
+/* L400: */
+ }
+
+ for (k = *kb; k >= 1; --k) {
+/* Computing MAX */
+ i__4 = 1, i__3 = k - i0 + 1;
+ j2 = i__ - k - 1 + max(i__4,i__3) * ka1;
+ nr = (*n - j2 + *ka) / ka1;
+ j1 = j2 + (nr - 1) * ka1;
+ if (nr > 0) {
+
+/* generate rotations in 2nd set to annihilate elements */
+/* which have been created outside the band */
+
+ dlargv_(&nr, &ab[ka1 + (j2 - *ka) * ab_dim1], &inca, &work[j2]
+, &ka1, &work[*n + j2], &ka1);
+
+/* apply rotations in 2nd set from the left */
+
+ i__4 = *ka - 1;
+ for (l = 1; l <= i__4; ++l) {
+ dlartv_(&nr, &ab[l + 1 + (j2 - l) * ab_dim1], &inca, &ab[
+ l + 2 + (j2 - l) * ab_dim1], &inca, &work[*n + j2]
+, &work[j2], &ka1);
+/* L410: */
+ }
+
+/* apply rotations in 2nd set from both sides to diagonal */
+/* blocks */
+
+ dlar2v_(&nr, &ab[j2 * ab_dim1 + 1], &ab[(j2 + 1) * ab_dim1 +
+ 1], &ab[j2 * ab_dim1 + 2], &inca, &work[*n + j2], &
+ work[j2], &ka1);
+
+ }
+
+/* start applying rotations in 2nd set from the right */
+
+ i__4 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__4; --l) {
+ nrt = (*n - j2 + l) / ka1;
+ if (nrt > 0) {
+ dlartv_(&nrt, &ab[ka1 - l + 1 + j2 * ab_dim1], &inca, &ab[
+ ka1 - l + (j2 + 1) * ab_dim1], &inca, &work[*n +
+ j2], &work[j2], &ka1);
+ }
+/* L420: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 2nd set */
+
+ i__4 = j1;
+ i__3 = ka1;
+ for (j = j2; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) {
+ i__1 = *n - m;
+ drot_(&i__1, &x[m + 1 + j * x_dim1], &c__1, &x[m + 1 + (j
+ + 1) * x_dim1], &c__1, &work[*n + j], &work[j]);
+/* L430: */
+ }
+ }
+/* L440: */
+ }
+
+ i__3 = *kb - 1;
+ for (k = 1; k <= i__3; ++k) {
+/* Computing MAX */
+ i__4 = 1, i__1 = k - i0 + 2;
+ j2 = i__ - k - 1 + max(i__4,i__1) * ka1;
+
+/* finish applying rotations in 1st set from the right */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (*n - j2 + l) / ka1;
+ if (nrt > 0) {
+ dlartv_(&nrt, &ab[ka1 - l + 1 + j2 * ab_dim1], &inca, &ab[
+ ka1 - l + (j2 + 1) * ab_dim1], &inca, &work[*n +
+ j2 - m], &work[j2 - m], &ka1);
+ }
+/* L450: */
+ }
+/* L460: */
+ }
+
+ if (*kb > 1) {
+ i__3 = i__ - *kb + (*ka << 1) + 1;
+ for (j = *n - 1; j >= i__3; --j) {
+ work[*n + j - m] = work[*n + j - *ka - m];
+ work[j - m] = work[j - *ka - m];
+/* L470: */
+ }
+ }
+
+ }
+
+ goto L10;
+
+L480:
+
+/* **************************** Phase 2 ***************************** */
+
+/* The logical structure of this phase is: */
+
+/* UPDATE = .TRUE. */
+/* DO I = 1, M */
+/* use S(i) to update A and create a new bulge */
+/* apply rotations to push all bulges KA positions upward */
+/* END DO */
+/* UPDATE = .FALSE. */
+/* DO I = M - KA - 1, 2, -1 */
+/* apply rotations to push all bulges KA positions upward */
+/* END DO */
+
+/* To avoid duplicating code, the two loops are merged. */
+
+ update = TRUE_;
+ i__ = 0;
+L490:
+ if (update) {
+ ++i__;
+/* Computing MIN */
+ i__3 = *kb, i__4 = m - i__;
+ kbt = min(i__3,i__4);
+ i0 = i__ + 1;
+/* Computing MAX */
+ i__3 = 1, i__4 = i__ - *ka;
+ i1 = max(i__3,i__4);
+ i2 = i__ + kbt - ka1;
+ if (i__ > m) {
+ update = FALSE_;
+ --i__;
+ i0 = m + 1;
+ if (*ka == 0) {
+ return 0;
+ }
+ goto L490;
+ }
+ } else {
+ i__ -= *ka;
+ if (i__ < 2) {
+ return 0;
+ }
+ }
+
+ if (i__ < m - kbt) {
+ nx = m;
+ } else {
+ nx = *n;
+ }
+
+ if (upper) {
+
+/* Transform A, working with the upper triangle */
+
+ if (update) {
+
+/* Form inv(S(i))**T * A * inv(S(i)) */
+
+ bii = bb[kb1 + i__ * bb_dim1];
+ i__3 = i__;
+ for (j = i1; j <= i__3; ++j) {
+ ab[j - i__ + ka1 + i__ * ab_dim1] /= bii;
+/* L500: */
+ }
+/* Computing MIN */
+ i__4 = *n, i__1 = i__ + *ka;
+ i__3 = min(i__4,i__1);
+ for (j = i__; j <= i__3; ++j) {
+ ab[i__ - j + ka1 + j * ab_dim1] /= bii;
+/* L510: */
+ }
+ i__3 = i__ + kbt;
+ for (k = i__ + 1; k <= i__3; ++k) {
+ i__4 = i__ + kbt;
+ for (j = k; j <= i__4; ++j) {
+ ab[k - j + ka1 + j * ab_dim1] = ab[k - j + ka1 + j *
+ ab_dim1] - bb[i__ - j + kb1 + j * bb_dim1] * ab[
+ i__ - k + ka1 + k * ab_dim1] - bb[i__ - k + kb1 +
+ k * bb_dim1] * ab[i__ - j + ka1 + j * ab_dim1] +
+ ab[ka1 + i__ * ab_dim1] * bb[i__ - j + kb1 + j *
+ bb_dim1] * bb[i__ - k + kb1 + k * bb_dim1];
+/* L520: */
+ }
+/* Computing MIN */
+ i__1 = *n, i__2 = i__ + *ka;
+ i__4 = min(i__1,i__2);
+ for (j = i__ + kbt + 1; j <= i__4; ++j) {
+ ab[k - j + ka1 + j * ab_dim1] -= bb[i__ - k + kb1 + k *
+ bb_dim1] * ab[i__ - j + ka1 + j * ab_dim1];
+/* L530: */
+ }
+/* L540: */
+ }
+ i__3 = i__;
+ for (j = i1; j <= i__3; ++j) {
+/* Computing MIN */
+ i__1 = j + *ka, i__2 = i__ + kbt;
+ i__4 = min(i__1,i__2);
+ for (k = i__ + 1; k <= i__4; ++k) {
+ ab[j - k + ka1 + k * ab_dim1] -= bb[i__ - k + kb1 + k *
+ bb_dim1] * ab[j - i__ + ka1 + i__ * ab_dim1];
+/* L550: */
+ }
+/* L560: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by inv(S(i)) */
+
+ d__1 = 1. / bii;
+ dscal_(&nx, &d__1, &x[i__ * x_dim1 + 1], &c__1);
+ if (kbt > 0) {
+ i__3 = *ldbb - 1;
+ dger_(&nx, &kbt, &c_b20, &x[i__ * x_dim1 + 1], &c__1, &bb[
+ *kb + (i__ + 1) * bb_dim1], &i__3, &x[(i__ + 1) *
+ x_dim1 + 1], ldx);
+ }
+ }
+
+/* store a(i1,i) in RA1 for use in next loop over K */
+
+ ra1 = ab[i1 - i__ + ka1 + i__ * ab_dim1];
+ }
+
+/* Generate and apply vectors of rotations to chase all the */
+/* existing bulges KA positions up toward the top of the band */
+
+ i__3 = *kb - 1;
+ for (k = 1; k <= i__3; ++k) {
+ if (update) {
+
+/* Determine the rotations which would annihilate the bulge */
+/* which has in theory just been created */
+
+ if (i__ + k - ka1 > 0 && i__ + k < m) {
+
+/* generate rotation to annihilate a(i+k-ka-1,i) */
+
+ dlartg_(&ab[k + 1 + i__ * ab_dim1], &ra1, &work[*n + i__
+ + k - *ka], &work[i__ + k - *ka], &ra);
+
+/* create nonzero element a(i+k-ka-1,i+k) outside the */
+/* band and store it in WORK(m-kb+i+k) */
+
+ t = -bb[kb1 - k + (i__ + k) * bb_dim1] * ra1;
+ work[m - *kb + i__ + k] = work[*n + i__ + k - *ka] * t -
+ work[i__ + k - *ka] * ab[(i__ + k) * ab_dim1 + 1];
+ ab[(i__ + k) * ab_dim1 + 1] = work[i__ + k - *ka] * t +
+ work[*n + i__ + k - *ka] * ab[(i__ + k) * ab_dim1
+ + 1];
+ ra1 = ra;
+ }
+ }
+/* Computing MAX */
+ i__4 = 1, i__1 = k + i0 - m + 1;
+ j2 = i__ + k + 1 - max(i__4,i__1) * ka1;
+ nr = (j2 + *ka - 1) / ka1;
+ j1 = j2 - (nr - 1) * ka1;
+ if (update) {
+/* Computing MIN */
+ i__4 = j2, i__1 = i__ - (*ka << 1) + k - 1;
+ j2t = min(i__4,i__1);
+ } else {
+ j2t = j2;
+ }
+ nrt = (j2t + *ka - 1) / ka1;
+ i__4 = j2t;
+ i__1 = ka1;
+ for (j = j1; i__1 < 0 ? j >= i__4 : j <= i__4; j += i__1) {
+
+/* create nonzero element a(j-1,j+ka) outside the band */
+/* and store it in WORK(j) */
+
+ work[j] *= ab[(j + *ka - 1) * ab_dim1 + 1];
+ ab[(j + *ka - 1) * ab_dim1 + 1] = work[*n + j] * ab[(j + *ka
+ - 1) * ab_dim1 + 1];
+/* L570: */
+ }
+
+/* generate rotations in 1st set to annihilate elements which */
+/* have been created outside the band */
+
+ if (nrt > 0) {
+ dlargv_(&nrt, &ab[(j1 + *ka) * ab_dim1 + 1], &inca, &work[j1],
+ &ka1, &work[*n + j1], &ka1);
+ }
+ if (nr > 0) {
+
+/* apply rotations in 1st set from the left */
+
+ i__1 = *ka - 1;
+ for (l = 1; l <= i__1; ++l) {
+ dlartv_(&nr, &ab[ka1 - l + (j1 + l) * ab_dim1], &inca, &
+ ab[*ka - l + (j1 + l) * ab_dim1], &inca, &work[*n
+ + j1], &work[j1], &ka1);
+/* L580: */
+ }
+
+/* apply rotations in 1st set from both sides to diagonal */
+/* blocks */
+
+ dlar2v_(&nr, &ab[ka1 + j1 * ab_dim1], &ab[ka1 + (j1 - 1) *
+ ab_dim1], &ab[*ka + j1 * ab_dim1], &inca, &work[*n +
+ j1], &work[j1], &ka1);
+
+ }
+
+/* start applying rotations in 1st set from the right */
+
+ i__1 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__1; --l) {
+ nrt = (j2 + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ dlartv_(&nrt, &ab[l + j1t * ab_dim1], &inca, &ab[l + 1 + (
+ j1t - 1) * ab_dim1], &inca, &work[*n + j1t], &
+ work[j1t], &ka1);
+ }
+/* L590: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 1st set */
+
+ i__1 = j2;
+ i__4 = ka1;
+ for (j = j1; i__4 < 0 ? j >= i__1 : j <= i__1; j += i__4) {
+ drot_(&nx, &x[j * x_dim1 + 1], &c__1, &x[(j - 1) * x_dim1
+ + 1], &c__1, &work[*n + j], &work[j]);
+/* L600: */
+ }
+ }
+/* L610: */
+ }
+
+ if (update) {
+ if (i2 > 0 && kbt > 0) {
+
+/* create nonzero element a(i+kbt-ka-1,i+kbt) outside the */
+/* band and store it in WORK(m-kb+i+kbt) */
+
+ work[m - *kb + i__ + kbt] = -bb[kb1 - kbt + (i__ + kbt) *
+ bb_dim1] * ra1;
+ }
+ }
+
+ for (k = *kb; k >= 1; --k) {
+ if (update) {
+/* Computing MAX */
+ i__3 = 2, i__4 = k + i0 - m;
+ j2 = i__ + k + 1 - max(i__3,i__4) * ka1;
+ } else {
+/* Computing MAX */
+ i__3 = 1, i__4 = k + i0 - m;
+ j2 = i__ + k + 1 - max(i__3,i__4) * ka1;
+ }
+
+/* finish applying rotations in 2nd set from the right */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (j2 + *ka + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ dlartv_(&nrt, &ab[l + (j1t + *ka) * ab_dim1], &inca, &ab[
+ l + 1 + (j1t + *ka - 1) * ab_dim1], &inca, &work[*
+ n + m - *kb + j1t + *ka], &work[m - *kb + j1t + *
+ ka], &ka1);
+ }
+/* L620: */
+ }
+ nr = (j2 + *ka - 1) / ka1;
+ j1 = j2 - (nr - 1) * ka1;
+ i__3 = j2;
+ i__4 = ka1;
+ for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {
+ work[m - *kb + j] = work[m - *kb + j + *ka];
+ work[*n + m - *kb + j] = work[*n + m - *kb + j + *ka];
+/* L630: */
+ }
+ i__4 = j2;
+ i__3 = ka1;
+ for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) {
+
+/* create nonzero element a(j-1,j+ka) outside the band */
+/* and store it in WORK(m-kb+j) */
+
+ work[m - *kb + j] *= ab[(j + *ka - 1) * ab_dim1 + 1];
+ ab[(j + *ka - 1) * ab_dim1 + 1] = work[*n + m - *kb + j] * ab[
+ (j + *ka - 1) * ab_dim1 + 1];
+/* L640: */
+ }
+ if (update) {
+ if (i__ + k > ka1 && k <= kbt) {
+ work[m - *kb + i__ + k - *ka] = work[m - *kb + i__ + k];
+ }
+ }
+/* L650: */
+ }
+
+ for (k = *kb; k >= 1; --k) {
+/* Computing MAX */
+ i__3 = 1, i__4 = k + i0 - m;
+ j2 = i__ + k + 1 - max(i__3,i__4) * ka1;
+ nr = (j2 + *ka - 1) / ka1;
+ j1 = j2 - (nr - 1) * ka1;
+ if (nr > 0) {
+
+/* generate rotations in 2nd set to annihilate elements */
+/* which have been created outside the band */
+
+ dlargv_(&nr, &ab[(j1 + *ka) * ab_dim1 + 1], &inca, &work[m - *
+ kb + j1], &ka1, &work[*n + m - *kb + j1], &ka1);
+
+/* apply rotations in 2nd set from the left */
+
+ i__3 = *ka - 1;
+ for (l = 1; l <= i__3; ++l) {
+ dlartv_(&nr, &ab[ka1 - l + (j1 + l) * ab_dim1], &inca, &
+ ab[*ka - l + (j1 + l) * ab_dim1], &inca, &work[*n
+ + m - *kb + j1], &work[m - *kb + j1], &ka1);
+/* L660: */
+ }
+
+/* apply rotations in 2nd set from both sides to diagonal */
+/* blocks */
+
+ dlar2v_(&nr, &ab[ka1 + j1 * ab_dim1], &ab[ka1 + (j1 - 1) *
+ ab_dim1], &ab[*ka + j1 * ab_dim1], &inca, &work[*n +
+ m - *kb + j1], &work[m - *kb + j1], &ka1);
+
+ }
+
+/* start applying rotations in 2nd set from the right */
+
+ i__3 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__3; --l) {
+ nrt = (j2 + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ dlartv_(&nrt, &ab[l + j1t * ab_dim1], &inca, &ab[l + 1 + (
+ j1t - 1) * ab_dim1], &inca, &work[*n + m - *kb +
+ j1t], &work[m - *kb + j1t], &ka1);
+ }
+/* L670: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 2nd set */
+
+ i__3 = j2;
+ i__4 = ka1;
+ for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {
+ drot_(&nx, &x[j * x_dim1 + 1], &c__1, &x[(j - 1) * x_dim1
+ + 1], &c__1, &work[*n + m - *kb + j], &work[m - *
+ kb + j]);
+/* L680: */
+ }
+ }
+/* L690: */
+ }
+
+ i__4 = *kb - 1;
+ for (k = 1; k <= i__4; ++k) {
+/* Computing MAX */
+ i__3 = 1, i__1 = k + i0 - m + 1;
+ j2 = i__ + k + 1 - max(i__3,i__1) * ka1;
+
+/* finish applying rotations in 1st set from the right */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (j2 + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ dlartv_(&nrt, &ab[l + j1t * ab_dim1], &inca, &ab[l + 1 + (
+ j1t - 1) * ab_dim1], &inca, &work[*n + j1t], &
+ work[j1t], &ka1);
+ }
+/* L700: */
+ }
+/* L710: */
+ }
+
+ if (*kb > 1) {
+/* Computing MIN */
+ i__3 = i__ + *kb;
+ i__4 = min(i__3,m) - (*ka << 1) - 1;
+ for (j = 2; j <= i__4; ++j) {
+ work[*n + j] = work[*n + j + *ka];
+ work[j] = work[j + *ka];
+/* L720: */
+ }
+ }
+
+ } else {
+
+/* Transform A, working with the lower triangle */
+
+ if (update) {
+
+/* Form inv(S(i))**T * A * inv(S(i)) */
+
+ bii = bb[i__ * bb_dim1 + 1];
+ i__4 = i__;
+ for (j = i1; j <= i__4; ++j) {
+ ab[i__ - j + 1 + j * ab_dim1] /= bii;
+/* L730: */
+ }
+/* Computing MIN */
+ i__3 = *n, i__1 = i__ + *ka;
+ i__4 = min(i__3,i__1);
+ for (j = i__; j <= i__4; ++j) {
+ ab[j - i__ + 1 + i__ * ab_dim1] /= bii;
+/* L740: */
+ }
+ i__4 = i__ + kbt;
+ for (k = i__ + 1; k <= i__4; ++k) {
+ i__3 = i__ + kbt;
+ for (j = k; j <= i__3; ++j) {
+ ab[j - k + 1 + k * ab_dim1] = ab[j - k + 1 + k * ab_dim1]
+ - bb[j - i__ + 1 + i__ * bb_dim1] * ab[k - i__ +
+ 1 + i__ * ab_dim1] - bb[k - i__ + 1 + i__ *
+ bb_dim1] * ab[j - i__ + 1 + i__ * ab_dim1] + ab[
+ i__ * ab_dim1 + 1] * bb[j - i__ + 1 + i__ *
+ bb_dim1] * bb[k - i__ + 1 + i__ * bb_dim1];
+/* L750: */
+ }
+/* Computing MIN */
+ i__1 = *n, i__2 = i__ + *ka;
+ i__3 = min(i__1,i__2);
+ for (j = i__ + kbt + 1; j <= i__3; ++j) {
+ ab[j - k + 1 + k * ab_dim1] -= bb[k - i__ + 1 + i__ *
+ bb_dim1] * ab[j - i__ + 1 + i__ * ab_dim1];
+/* L760: */
+ }
+/* L770: */
+ }
+ i__4 = i__;
+ for (j = i1; j <= i__4; ++j) {
+/* Computing MIN */
+ i__1 = j + *ka, i__2 = i__ + kbt;
+ i__3 = min(i__1,i__2);
+ for (k = i__ + 1; k <= i__3; ++k) {
+ ab[k - j + 1 + j * ab_dim1] -= bb[k - i__ + 1 + i__ *
+ bb_dim1] * ab[i__ - j + 1 + j * ab_dim1];
+/* L780: */
+ }
+/* L790: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by inv(S(i)) */
+
+ d__1 = 1. / bii;
+ dscal_(&nx, &d__1, &x[i__ * x_dim1 + 1], &c__1);
+ if (kbt > 0) {
+ dger_(&nx, &kbt, &c_b20, &x[i__ * x_dim1 + 1], &c__1, &bb[
+ i__ * bb_dim1 + 2], &c__1, &x[(i__ + 1) * x_dim1
+ + 1], ldx);
+ }
+ }
+
+/* store a(i,i1) in RA1 for use in next loop over K */
+
+ ra1 = ab[i__ - i1 + 1 + i1 * ab_dim1];
+ }
+
+/* Generate and apply vectors of rotations to chase all the */
+/* existing bulges KA positions up toward the top of the band */
+
+ i__4 = *kb - 1;
+ for (k = 1; k <= i__4; ++k) {
+ if (update) {
+
+/* Determine the rotations which would annihilate the bulge */
+/* which has in theory just been created */
+
+ if (i__ + k - ka1 > 0 && i__ + k < m) {
+
+/* generate rotation to annihilate a(i,i+k-ka-1) */
+
+ dlartg_(&ab[ka1 - k + (i__ + k - *ka) * ab_dim1], &ra1, &
+ work[*n + i__ + k - *ka], &work[i__ + k - *ka], &
+ ra);
+
+/* create nonzero element a(i+k,i+k-ka-1) outside the */
+/* band and store it in WORK(m-kb+i+k) */
+
+ t = -bb[k + 1 + i__ * bb_dim1] * ra1;
+ work[m - *kb + i__ + k] = work[*n + i__ + k - *ka] * t -
+ work[i__ + k - *ka] * ab[ka1 + (i__ + k - *ka) *
+ ab_dim1];
+ ab[ka1 + (i__ + k - *ka) * ab_dim1] = work[i__ + k - *ka]
+ * t + work[*n + i__ + k - *ka] * ab[ka1 + (i__ +
+ k - *ka) * ab_dim1];
+ ra1 = ra;
+ }
+ }
+/* Computing MAX */
+ i__3 = 1, i__1 = k + i0 - m + 1;
+ j2 = i__ + k + 1 - max(i__3,i__1) * ka1;
+ nr = (j2 + *ka - 1) / ka1;
+ j1 = j2 - (nr - 1) * ka1;
+ if (update) {
+/* Computing MIN */
+ i__3 = j2, i__1 = i__ - (*ka << 1) + k - 1;
+ j2t = min(i__3,i__1);
+ } else {
+ j2t = j2;
+ }
+ nrt = (j2t + *ka - 1) / ka1;
+ i__3 = j2t;
+ i__1 = ka1;
+ for (j = j1; i__1 < 0 ? j >= i__3 : j <= i__3; j += i__1) {
+
+/* create nonzero element a(j+ka,j-1) outside the band */
+/* and store it in WORK(j) */
+
+ work[j] *= ab[ka1 + (j - 1) * ab_dim1];
+ ab[ka1 + (j - 1) * ab_dim1] = work[*n + j] * ab[ka1 + (j - 1)
+ * ab_dim1];
+/* L800: */
+ }
+
+/* generate rotations in 1st set to annihilate elements which */
+/* have been created outside the band */
+
+ if (nrt > 0) {
+ dlargv_(&nrt, &ab[ka1 + j1 * ab_dim1], &inca, &work[j1], &ka1,
+ &work[*n + j1], &ka1);
+ }
+ if (nr > 0) {
+
+/* apply rotations in 1st set from the right */
+
+ i__1 = *ka - 1;
+ for (l = 1; l <= i__1; ++l) {
+ dlartv_(&nr, &ab[l + 1 + j1 * ab_dim1], &inca, &ab[l + 2
+ + (j1 - 1) * ab_dim1], &inca, &work[*n + j1], &
+ work[j1], &ka1);
+/* L810: */
+ }
+
+/* apply rotations in 1st set from both sides to diagonal */
+/* blocks */
+
+ dlar2v_(&nr, &ab[j1 * ab_dim1 + 1], &ab[(j1 - 1) * ab_dim1 +
+ 1], &ab[(j1 - 1) * ab_dim1 + 2], &inca, &work[*n + j1]
+, &work[j1], &ka1);
+
+ }
+
+/* start applying rotations in 1st set from the left */
+
+ i__1 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__1; --l) {
+ nrt = (j2 + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ dlartv_(&nrt, &ab[ka1 - l + 1 + (j1t - ka1 + l) * ab_dim1]
+, &inca, &ab[ka1 - l + (j1t - ka1 + l) * ab_dim1],
+ &inca, &work[*n + j1t], &work[j1t], &ka1);
+ }
+/* L820: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 1st set */
+
+ i__1 = j2;
+ i__3 = ka1;
+ for (j = j1; i__3 < 0 ? j >= i__1 : j <= i__1; j += i__3) {
+ drot_(&nx, &x[j * x_dim1 + 1], &c__1, &x[(j - 1) * x_dim1
+ + 1], &c__1, &work[*n + j], &work[j]);
+/* L830: */
+ }
+ }
+/* L840: */
+ }
+
+ if (update) {
+ if (i2 > 0 && kbt > 0) {
+
+/* create nonzero element a(i+kbt,i+kbt-ka-1) outside the */
+/* band and store it in WORK(m-kb+i+kbt) */
+
+ work[m - *kb + i__ + kbt] = -bb[kbt + 1 + i__ * bb_dim1] *
+ ra1;
+ }
+ }
+
+ for (k = *kb; k >= 1; --k) {
+ if (update) {
+/* Computing MAX */
+ i__4 = 2, i__3 = k + i0 - m;
+ j2 = i__ + k + 1 - max(i__4,i__3) * ka1;
+ } else {
+/* Computing MAX */
+ i__4 = 1, i__3 = k + i0 - m;
+ j2 = i__ + k + 1 - max(i__4,i__3) * ka1;
+ }
+
+/* finish applying rotations in 2nd set from the left */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (j2 + *ka + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ dlartv_(&nrt, &ab[ka1 - l + 1 + (j1t + l - 1) * ab_dim1],
+ &inca, &ab[ka1 - l + (j1t + l - 1) * ab_dim1], &
+ inca, &work[*n + m - *kb + j1t + *ka], &work[m - *
+ kb + j1t + *ka], &ka1);
+ }
+/* L850: */
+ }
+ nr = (j2 + *ka - 1) / ka1;
+ j1 = j2 - (nr - 1) * ka1;
+ i__4 = j2;
+ i__3 = ka1;
+ for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) {
+ work[m - *kb + j] = work[m - *kb + j + *ka];
+ work[*n + m - *kb + j] = work[*n + m - *kb + j + *ka];
+/* L860: */
+ }
+ i__3 = j2;
+ i__4 = ka1;
+ for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {
+
+/* create nonzero element a(j+ka,j-1) outside the band */
+/* and store it in WORK(m-kb+j) */
+
+ work[m - *kb + j] *= ab[ka1 + (j - 1) * ab_dim1];
+ ab[ka1 + (j - 1) * ab_dim1] = work[*n + m - *kb + j] * ab[ka1
+ + (j - 1) * ab_dim1];
+/* L870: */
+ }
+ if (update) {
+ if (i__ + k > ka1 && k <= kbt) {
+ work[m - *kb + i__ + k - *ka] = work[m - *kb + i__ + k];
+ }
+ }
+/* L880: */
+ }
+
+ for (k = *kb; k >= 1; --k) {
+/* Computing MAX */
+ i__4 = 1, i__3 = k + i0 - m;
+ j2 = i__ + k + 1 - max(i__4,i__3) * ka1;
+ nr = (j2 + *ka - 1) / ka1;
+ j1 = j2 - (nr - 1) * ka1;
+ if (nr > 0) {
+
+/* generate rotations in 2nd set to annihilate elements */
+/* which have been created outside the band */
+
+ dlargv_(&nr, &ab[ka1 + j1 * ab_dim1], &inca, &work[m - *kb +
+ j1], &ka1, &work[*n + m - *kb + j1], &ka1);
+
+/* apply rotations in 2nd set from the right */
+
+ i__4 = *ka - 1;
+ for (l = 1; l <= i__4; ++l) {
+ dlartv_(&nr, &ab[l + 1 + j1 * ab_dim1], &inca, &ab[l + 2
+ + (j1 - 1) * ab_dim1], &inca, &work[*n + m - *kb
+ + j1], &work[m - *kb + j1], &ka1);
+/* L890: */
+ }
+
+/* apply rotations in 2nd set from both sides to diagonal */
+/* blocks */
+
+ dlar2v_(&nr, &ab[j1 * ab_dim1 + 1], &ab[(j1 - 1) * ab_dim1 +
+ 1], &ab[(j1 - 1) * ab_dim1 + 2], &inca, &work[*n + m
+ - *kb + j1], &work[m - *kb + j1], &ka1);
+
+ }
+
+/* start applying rotations in 2nd set from the left */
+
+ i__4 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__4; --l) {
+ nrt = (j2 + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ dlartv_(&nrt, &ab[ka1 - l + 1 + (j1t - ka1 + l) * ab_dim1]
+, &inca, &ab[ka1 - l + (j1t - ka1 + l) * ab_dim1],
+ &inca, &work[*n + m - *kb + j1t], &work[m - *kb
+ + j1t], &ka1);
+ }
+/* L900: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 2nd set */
+
+ i__4 = j2;
+ i__3 = ka1;
+ for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) {
+ drot_(&nx, &x[j * x_dim1 + 1], &c__1, &x[(j - 1) * x_dim1
+ + 1], &c__1, &work[*n + m - *kb + j], &work[m - *
+ kb + j]);
+/* L910: */
+ }
+ }
+/* L920: */
+ }
+
+ i__3 = *kb - 1;
+ for (k = 1; k <= i__3; ++k) {
+/* Computing MAX */
+ i__4 = 1, i__1 = k + i0 - m + 1;
+ j2 = i__ + k + 1 - max(i__4,i__1) * ka1;
+
+/* finish applying rotations in 1st set from the left */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (j2 + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ dlartv_(&nrt, &ab[ka1 - l + 1 + (j1t - ka1 + l) * ab_dim1]
+, &inca, &ab[ka1 - l + (j1t - ka1 + l) * ab_dim1],
+ &inca, &work[*n + j1t], &work[j1t], &ka1);
+ }
+/* L930: */
+ }
+/* L940: */
+ }
+
+ if (*kb > 1) {
+/* Computing MIN */
+ i__4 = i__ + *kb;
+ i__3 = min(i__4,m) - (*ka << 1) - 1;
+ for (j = 2; j <= i__3; ++j) {
+ work[*n + j] = work[*n + j + *ka];
+ work[j] = work[j + *ka];
+/* L950: */
+ }
+ }
+
+ }
+
+ goto L490;
+
+/* End of DSBGST */
+
+} /* dsbgst_ */
diff --git a/contrib/libs/clapack/dsbgv.c b/contrib/libs/clapack/dsbgv.c
new file mode 100644
index 0000000000..0fcf76776a
--- /dev/null
+++ b/contrib/libs/clapack/dsbgv.c
@@ -0,0 +1,234 @@
+/* dsbgv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dsbgv_(char *jobz, char *uplo, integer *n, integer *ka,
+ integer *kb, doublereal *ab, integer *ldab, doublereal *bb, integer *
+ ldbb, doublereal *w, doublereal *z__, integer *ldz, doublereal *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, bb_dim1, bb_offset, z_dim1, z_offset, i__1;
+
+ /* Local variables */
+ integer inde;
+ char vect[1];
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical upper, wantz;
+ extern /* Subroutine */ int xerbla_(char *, integer *), dpbstf_(
+ char *, integer *, integer *, doublereal *, integer *, integer *), dsbtrd_(char *, char *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), dsbgst_(char *, char *,
+ integer *, integer *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *), dsterf_(integer *, doublereal *,
+ doublereal *, integer *);
+ integer indwrk;
+ extern /* Subroutine */ int dsteqr_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSBGV computes all the eigenvalues, and optionally, the eigenvectors */
+/* of a real generalized symmetric-definite banded eigenproblem, of */
+/* the form A*x=(lambda)*B*x. Here A and B are assumed to be symmetric */
+/* and banded, and B is also positive definite. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* KA (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KA >= 0. */
+
+/* KB (input) INTEGER */
+/* The number of superdiagonals of the matrix B if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KB >= 0. */
+
+/* AB (input/output) DOUBLE PRECISION array, dimension (LDAB, N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first ka+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(ka+1+i-j,j) = A(i,j) for max(1,j-ka)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+ka). */
+
+/* On exit, the contents of AB are destroyed. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KA+1. */
+
+/* BB (input/output) DOUBLE PRECISION array, dimension (LDBB, N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix B, stored in the first kb+1 rows of the array. The */
+/* j-th column of B is stored in the j-th column of the array BB */
+/* as follows: */
+/* if UPLO = 'U', BB(kb+1+i-j,j) = B(i,j) for max(1,j-kb)<=i<=j; */
+/* if UPLO = 'L', BB(1+i-j,j) = B(i,j) for j<=i<=min(n,j+kb). */
+
+/* On exit, the factor S from the split Cholesky factorization */
+/* B = S**T*S, as returned by DPBSTF. */
+
+/* LDBB (input) INTEGER */
+/* The leading dimension of the array BB. LDBB >= KB+1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) DOUBLE PRECISION array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the matrix Z of */
+/* eigenvectors, with the i-th column of Z holding the */
+/* eigenvector associated with W(i). The eigenvectors are */
+/* normalized so that Z**T*B*Z = I. */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= N. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is: */
+/* <= N: the algorithm failed to converge: */
+/* i off-diagonal elements of an intermediate */
+/* tridiagonal form did not converge to zero; */
+/* > N: if INFO = N + i, for 1 <= i <= N, then DPBSTF */
+/* returned INFO = i: B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ bb_dim1 = *ldbb;
+ bb_offset = 1 + bb_dim1;
+ bb -= bb_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ka < 0) {
+ *info = -4;
+ } else if (*kb < 0 || *kb > *ka) {
+ *info = -5;
+ } else if (*ldab < *ka + 1) {
+ *info = -7;
+ } else if (*ldbb < *kb + 1) {
+ *info = -9;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSBGV ", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a split Cholesky factorization of B. */
+
+ dpbstf_(uplo, n, kb, &bb[bb_offset], ldbb, info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem. */
+
+ inde = 1;
+ indwrk = inde + *n;
+ dsbgst_(jobz, uplo, n, ka, kb, &ab[ab_offset], ldab, &bb[bb_offset], ldbb,
+ &z__[z_offset], ldz, &work[indwrk], &iinfo)
+ ;
+
+/* Reduce to tridiagonal form. */
+
+ if (wantz) {
+ *(unsigned char *)vect = 'U';
+ } else {
+ *(unsigned char *)vect = 'N';
+ }
+ dsbtrd_(vect, uplo, n, ka, &ab[ab_offset], ldab, &w[1], &work[inde], &z__[
+ z_offset], ldz, &work[indwrk], &iinfo);
+
+/* For eigenvalues only, call DSTERF. For eigenvectors, call SSTEQR. */
+
+ if (! wantz) {
+ dsterf_(n, &w[1], &work[inde], info);
+ } else {
+ dsteqr_(jobz, n, &w[1], &work[inde], &z__[z_offset], ldz, &work[
+ indwrk], info);
+ }
+ return 0;
+
+/* End of DSBGV */
+
+} /* dsbgv_ */
diff --git a/contrib/libs/clapack/dsbgvd.c b/contrib/libs/clapack/dsbgvd.c
new file mode 100644
index 0000000000..28f87a0cfa
--- /dev/null
+++ b/contrib/libs/clapack/dsbgvd.c
@@ -0,0 +1,327 @@
+/* dsbgvd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b12 = 1.;
+static doublereal c_b13 = 0.;
+
+/* Subroutine */ int dsbgvd_(char *jobz, char *uplo, integer *n, integer *ka,
+ integer *kb, doublereal *ab, integer *ldab, doublereal *bb, integer *
+ ldbb, doublereal *w, doublereal *z__, integer *ldz, doublereal *work,
+ integer *lwork, integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, bb_dim1, bb_offset, z_dim1, z_offset, i__1;
+
+ /* Local variables */
+ integer inde;
+ char vect[1];
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ integer iinfo, lwmin;
+ logical upper, wantz;
+ integer indwk2, llwrk2;
+ extern /* Subroutine */ int dstedc_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ integer *, integer *, integer *), dlacpy_(char *, integer
+ *, integer *, doublereal *, integer *, doublereal *, integer *), xerbla_(char *, integer *), dpbstf_(char *,
+ integer *, integer *, doublereal *, integer *, integer *),
+ dsbtrd_(char *, char *, integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), dsbgst_(char *, char *,
+ integer *, integer *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *), dsterf_(integer *, doublereal *,
+ doublereal *, integer *);
+ integer indwrk, liwmin;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSBGVD computes all the eigenvalues, and optionally, the eigenvectors */
+/* of a real generalized symmetric-definite banded eigenproblem, of the */
+/* form A*x=(lambda)*B*x. Here A and B are assumed to be symmetric and */
+/* banded, and B is also positive definite. If eigenvectors are */
+/* desired, it uses a divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* KA (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KA >= 0. */
+
+/* KB (input) INTEGER */
+/* The number of superdiagonals of the matrix B if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KB >= 0. */
+
+/* AB (input/output) DOUBLE PRECISION array, dimension (LDAB, N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first ka+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(ka+1+i-j,j) = A(i,j) for max(1,j-ka)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+ka). */
+
+/* On exit, the contents of AB are destroyed. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KA+1. */
+
+/* BB (input/output) DOUBLE PRECISION array, dimension (LDBB, N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix B, stored in the first kb+1 rows of the array. The */
+/* j-th column of B is stored in the j-th column of the array BB */
+/* as follows: */
+/* if UPLO = 'U', BB(ka+1+i-j,j) = B(i,j) for max(1,j-kb)<=i<=j; */
+/* if UPLO = 'L', BB(1+i-j,j) = B(i,j) for j<=i<=min(n,j+kb). */
+
+/* On exit, the factor S from the split Cholesky factorization */
+/* B = S**T*S, as returned by DPBSTF. */
+
+/* LDBB (input) INTEGER */
+/* The leading dimension of the array BB. LDBB >= KB+1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) DOUBLE PRECISION array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the matrix Z of */
+/* eigenvectors, with the i-th column of Z holding the */
+/* eigenvector associated with W(i). The eigenvectors are */
+/* normalized so Z**T*B*Z = I. */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If N <= 1, LWORK >= 1. */
+/* If JOBZ = 'N' and N > 1, LWORK >= 3*N. */
+/* If JOBZ = 'V' and N > 1, LWORK >= 1 + 5*N + 2*N**2. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal sizes of the WORK and IWORK */
+/* arrays, returns these values as the first entries of the WORK */
+/* and IWORK arrays, and no error message related to LWORK or */
+/* LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if LIWORK > 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* If JOBZ = 'N' or N <= 1, LIWORK >= 1. */
+/* If JOBZ = 'V' and N > 1, LIWORK >= 3 + 5*N. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK and IWORK arrays, and no error message related to */
+/* LWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is: */
+/* <= N: the algorithm failed to converge: */
+/* i off-diagonal elements of an intermediate */
+/* tridiagonal form did not converge to zero; */
+/* > N: if INFO = N + i, for 1 <= i <= N, then DPBSTF */
+/* returned INFO = i: B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ bb_dim1 = *ldbb;
+ bb_offset = 1 + bb_dim1;
+ bb -= bb_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1 || *liwork == -1;
+
+ *info = 0;
+ if (*n <= 1) {
+ liwmin = 1;
+ lwmin = 1;
+ } else if (wantz) {
+ liwmin = *n * 5 + 3;
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = *n * 5 + 1 + (i__1 * i__1 << 1);
+ } else {
+ liwmin = 1;
+ lwmin = *n << 1;
+ }
+
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ka < 0) {
+ *info = -4;
+ } else if (*kb < 0 || *kb > *ka) {
+ *info = -5;
+ } else if (*ldab < *ka + 1) {
+ *info = -7;
+ } else if (*ldbb < *kb + 1) {
+ *info = -9;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+ work[1] = (doublereal) lwmin;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -14;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -16;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSBGVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a split Cholesky factorization of B. */
+
+ dpbstf_(uplo, n, kb, &bb[bb_offset], ldbb, info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem. */
+
+ inde = 1;
+ indwrk = inde + *n;
+ indwk2 = indwrk + *n * *n;
+ llwrk2 = *lwork - indwk2 + 1;
+ dsbgst_(jobz, uplo, n, ka, kb, &ab[ab_offset], ldab, &bb[bb_offset], ldbb,
+ &z__[z_offset], ldz, &work[indwrk], &iinfo)
+ ;
+
+/* Reduce to tridiagonal form. */
+
+ if (wantz) {
+ *(unsigned char *)vect = 'U';
+ } else {
+ *(unsigned char *)vect = 'N';
+ }
+ dsbtrd_(vect, uplo, n, ka, &ab[ab_offset], ldab, &w[1], &work[inde], &z__[
+ z_offset], ldz, &work[indwrk], &iinfo);
+
+/* For eigenvalues only, call DSTERF. For eigenvectors, call SSTEDC. */
+
+ if (! wantz) {
+ dsterf_(n, &w[1], &work[inde], info);
+ } else {
+ dstedc_("I", n, &w[1], &work[inde], &work[indwrk], n, &work[indwk2], &
+ llwrk2, &iwork[1], liwork, info);
+ dgemm_("N", "N", n, n, n, &c_b12, &z__[z_offset], ldz, &work[indwrk],
+ n, &c_b13, &work[indwk2], n);
+ dlacpy_("A", n, n, &work[indwk2], n, &z__[z_offset], ldz);
+ }
+
+ work[1] = (doublereal) lwmin;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of DSBGVD */
+
+} /* dsbgvd_ */
diff --git a/contrib/libs/clapack/dsbgvx.c b/contrib/libs/clapack/dsbgvx.c
new file mode 100644
index 0000000000..b810b3a4e6
--- /dev/null
+++ b/contrib/libs/clapack/dsbgvx.c
@@ -0,0 +1,466 @@
+/* dsbgvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b25 = 1.;
+static doublereal c_b27 = 0.;
+
+/* Subroutine */ int dsbgvx_(char *jobz, char *range, char *uplo, integer *n,
+ integer *ka, integer *kb, doublereal *ab, integer *ldab, doublereal *
+ bb, integer *ldbb, doublereal *q, integer *ldq, doublereal *vl,
+ doublereal *vu, integer *il, integer *iu, doublereal *abstol, integer
+ *m, doublereal *w, doublereal *z__, integer *ldz, doublereal *work,
+ integer *iwork, integer *ifail, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, bb_dim1, bb_offset, q_dim1, q_offset, z_dim1,
+ z_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, jj;
+ doublereal tmp1;
+ integer indd, inde;
+ char vect[1];
+ logical test;
+ integer itmp1, indee;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+ integer iinfo;
+ char order[1];
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dswap_(integer *, doublereal *, integer
+ *, doublereal *, integer *);
+ logical upper, wantz, alleig, indeig;
+ integer indibl;
+ logical valeig;
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ xerbla_(char *, integer *), dpbstf_(char *, integer *,
+ integer *, doublereal *, integer *, integer *), dsbtrd_(
+ char *, char *, integer *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *);
+ integer indisp;
+ extern /* Subroutine */ int dsbgst_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ dstein_(integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ doublereal *, integer *, integer *, integer *);
+ integer indiwo;
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *), dstebz_(char *, char *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *, doublereal *, integer *, integer *);
+ integer indwrk;
+ extern /* Subroutine */ int dsteqr_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *);
+ integer nsplit;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSBGVX computes selected eigenvalues, and optionally, eigenvectors */
+/* of a real generalized symmetric-definite banded eigenproblem, of */
+/* the form A*x=(lambda)*B*x. Here A and B are assumed to be symmetric */
+/* and banded, and B is also positive definite. Eigenvalues and */
+/* eigenvectors can be selected by specifying either all eigenvalues, */
+/* a range of values or a range of indices for the desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* KA (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KA >= 0. */
+
+/* KB (input) INTEGER */
+/* The number of superdiagonals of the matrix B if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KB >= 0. */
+
+/* AB (input/output) DOUBLE PRECISION array, dimension (LDAB, N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first ka+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(ka+1+i-j,j) = A(i,j) for max(1,j-ka)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+ka). */
+
+/* On exit, the contents of AB are destroyed. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KA+1. */
+
+/* BB (input/output) DOUBLE PRECISION array, dimension (LDBB, N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix B, stored in the first kb+1 rows of the array. The */
+/* j-th column of B is stored in the j-th column of the array BB */
+/* as follows: */
+/* if UPLO = 'U', BB(ka+1+i-j,j) = B(i,j) for max(1,j-kb)<=i<=j; */
+/* if UPLO = 'L', BB(1+i-j,j) = B(i,j) for j<=i<=min(n,j+kb). */
+
+/* On exit, the factor S from the split Cholesky factorization */
+/* B = S**T*S, as returned by DPBSTF. */
+
+/* LDBB (input) INTEGER */
+/* The leading dimension of the array BB. LDBB >= KB+1. */
+
+/* Q (output) DOUBLE PRECISION array, dimension (LDQ, N) */
+/* If JOBZ = 'V', the n-by-n matrix used in the reduction of */
+/* A*x = (lambda)*B*x to standard form, i.e. C*x = (lambda)*x, */
+/* and consequently C to tridiagonal form. */
+/* If JOBZ = 'N', the array Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. If JOBZ = 'N', */
+/* LDQ >= 1. If JOBZ = 'V', LDQ >= max(1,N). */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) DOUBLE PRECISION */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing A to tridiagonal form. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*DLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*DLAMCH('S'). */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) DOUBLE PRECISION array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the matrix Z of */
+/* eigenvectors, with the i-th column of Z holding the */
+/* eigenvector associated with W(i). The eigenvectors are */
+/* normalized so Z**T*B*Z = I. */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (7*N) */
+
+/* IWORK (workspace/output) INTEGER array, dimension (5*N) */
+
+/* IFAIL (output) INTEGER array, dimension (M) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvalues that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0 : successful exit */
+/* < 0 : if INFO = -i, the i-th argument had an illegal value */
+/* <= N: if INFO = i, then i eigenvectors failed to converge. */
+/* Their indices are stored in IFAIL. */
+/* > N : DPBSTF returned an error code; i.e., */
+/* if INFO = N + i, for 1 <= i <= N, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ bb_dim1 = *ldbb;
+ bb_offset = 1 + bb_dim1;
+ bb -= bb_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ka < 0) {
+ *info = -5;
+ } else if (*kb < 0 || *kb > *ka) {
+ *info = -6;
+ } else if (*ldab < *ka + 1) {
+ *info = -8;
+ } else if (*ldbb < *kb + 1) {
+ *info = -10;
+ } else if (*ldq < 1 || wantz && *ldq < *n) {
+ *info = -12;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -14;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -15;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -16;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -21;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSBGVX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a split Cholesky factorization of B. */
+
+ dpbstf_(uplo, n, kb, &bb[bb_offset], ldbb, info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem. */
+
+ dsbgst_(jobz, uplo, n, ka, kb, &ab[ab_offset], ldab, &bb[bb_offset], ldbb,
+ &q[q_offset], ldq, &work[1], &iinfo);
+
+/* Reduce symmetric band matrix to tridiagonal form. */
+
+ indd = 1;
+ inde = indd + *n;
+ indwrk = inde + *n;
+ if (wantz) {
+ *(unsigned char *)vect = 'U';
+ } else {
+ *(unsigned char *)vect = 'N';
+ }
+ dsbtrd_(vect, uplo, n, ka, &ab[ab_offset], ldab, &work[indd], &work[inde],
+ &q[q_offset], ldq, &work[indwrk], &iinfo);
+
+/* If all eigenvalues are desired and ABSTOL is less than or equal */
+/* to zero, then call DSTERF or SSTEQR. If this fails for some */
+/* eigenvalue, then try DSTEBZ. */
+
+ test = FALSE_;
+ if (indeig) {
+ if (*il == 1 && *iu == *n) {
+ test = TRUE_;
+ }
+ }
+ if ((alleig || test) && *abstol <= 0.) {
+ dcopy_(n, &work[indd], &c__1, &w[1], &c__1);
+ indee = indwrk + (*n << 1);
+ i__1 = *n - 1;
+ dcopy_(&i__1, &work[inde], &c__1, &work[indee], &c__1);
+ if (! wantz) {
+ dsterf_(n, &w[1], &work[indee], info);
+ } else {
+ dlacpy_("A", n, n, &q[q_offset], ldq, &z__[z_offset], ldz);
+ dsteqr_(jobz, n, &w[1], &work[indee], &z__[z_offset], ldz, &work[
+ indwrk], info);
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ifail[i__] = 0;
+/* L10: */
+ }
+ }
+ }
+ if (*info == 0) {
+ *m = *n;
+ goto L30;
+ }
+ *info = 0;
+ }
+
+/* Otherwise, call DSTEBZ and, if eigenvectors are desired, */
+/* call DSTEIN. */
+
+ if (wantz) {
+ *(unsigned char *)order = 'B';
+ } else {
+ *(unsigned char *)order = 'E';
+ }
+ indibl = 1;
+ indisp = indibl + *n;
+ indiwo = indisp + *n;
+ dstebz_(range, order, n, vl, vu, il, iu, abstol, &work[indd], &work[inde],
+ m, &nsplit, &w[1], &iwork[indibl], &iwork[indisp], &work[indwrk],
+ &iwork[indiwo], info);
+
+ if (wantz) {
+ dstein_(n, &work[indd], &work[inde], m, &w[1], &iwork[indibl], &iwork[
+ indisp], &z__[z_offset], ldz, &work[indwrk], &iwork[indiwo], &
+ ifail[1], info);
+
+/* Apply transformation matrix used in reduction to tridiagonal */
+/* form to eigenvectors returned by DSTEIN. */
+
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(n, &z__[j * z_dim1 + 1], &c__1, &work[1], &c__1);
+ dgemv_("N", n, n, &c_b25, &q[q_offset], ldq, &work[1], &c__1, &
+ c_b27, &z__[j * z_dim1 + 1], &c__1);
+/* L20: */
+ }
+ }
+
+L30:
+
+/* If eigenvalues are not in order, then sort them, along with */
+/* eigenvectors. */
+
+ if (wantz) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp1 = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp1) {
+ i__ = jj;
+ tmp1 = w[jj];
+ }
+/* L40: */
+ }
+
+ if (i__ != 0) {
+ itmp1 = iwork[indibl + i__ - 1];
+ w[i__] = w[j];
+ iwork[indibl + i__ - 1] = iwork[indibl + j - 1];
+ w[j] = tmp1;
+ iwork[indibl + j - 1] = itmp1;
+ dswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1],
+ &c__1);
+ if (*info != 0) {
+ itmp1 = ifail[i__];
+ ifail[i__] = ifail[j];
+ ifail[j] = itmp1;
+ }
+ }
+/* L50: */
+ }
+ }
+
+ return 0;
+
+/* End of DSBGVX */
+
+} /* dsbgvx_ */
diff --git a/contrib/libs/clapack/dsbtrd.c b/contrib/libs/clapack/dsbtrd.c
new file mode 100644
index 0000000000..70bddbdf97
--- /dev/null
+++ b/contrib/libs/clapack/dsbtrd.c
@@ -0,0 +1,713 @@
+/* dsbtrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b9 = 0.;
+static doublereal c_b10 = 1.;
+static integer c__1 = 1;
+
+/* Subroutine */ int dsbtrd_(char *vect, char *uplo, integer *n, integer *kd,
+ doublereal *ab, integer *ldab, doublereal *d__, doublereal *e,
+ doublereal *q, integer *ldq, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, q_dim1, q_offset, i__1, i__2, i__3, i__4,
+ i__5;
+
+ /* Local variables */
+ integer i__, j, k, l, i2, j1, j2, nq, nr, kd1, ibl, iqb, kdn, jin, nrt,
+ kdm1, inca, jend, lend, jinc, incx, last;
+ doublereal temp;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ integer j1end, j1inc, iqend;
+ extern logical lsame_(char *, char *);
+ logical initq, wantq, upper;
+ extern /* Subroutine */ int dlar2v_(integer *, doublereal *, doublereal *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *);
+ integer iqaend;
+ extern /* Subroutine */ int dlaset_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *),
+ dlartg_(doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *), xerbla_(char *, integer *), dlargv_(
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *), dlartv_(integer *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSBTRD reduces a real symmetric band matrix A to symmetric */
+/* tridiagonal form T by an orthogonal similarity transformation: */
+/* Q**T * A * Q = T. */
+
+/* Arguments */
+/* ========= */
+
+/* VECT (input) CHARACTER*1 */
+/* = 'N': do not form Q; */
+/* = 'V': form Q; */
+/* = 'U': update a matrix X, by forming X*Q. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+/* On exit, the diagonal elements of AB are overwritten by the */
+/* diagonal elements of the tridiagonal matrix T; if KD > 0, the */
+/* elements on the first superdiagonal (if UPLO = 'U') or the */
+/* first subdiagonal (if UPLO = 'L') are overwritten by the */
+/* off-diagonal elements of T; the rest of AB is overwritten by */
+/* values generated during the reduction. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* D (output) DOUBLE PRECISION array, dimension (N) */
+/* The diagonal elements of the tridiagonal matrix T. */
+
+/* E (output) DOUBLE PRECISION array, dimension (N-1) */
+/* The off-diagonal elements of the tridiagonal matrix T: */
+/* E(i) = T(i,i+1) if UPLO = 'U'; E(i) = T(i+1,i) if UPLO = 'L'. */
+
+/* Q (input/output) DOUBLE PRECISION array, dimension (LDQ,N) */
+/* On entry, if VECT = 'U', then Q must contain an N-by-N */
+/* matrix X; if VECT = 'N' or 'V', then Q need not be set. */
+
+/* On exit: */
+/* if VECT = 'V', Q contains the N-by-N orthogonal matrix Q; */
+/* if VECT = 'U', Q contains the product X*Q; */
+/* if VECT = 'N', the array Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. */
+/* LDQ >= 1, and LDQ >= N if VECT = 'V' or 'U'. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* Modified by Linda Kaufman, Bell Labs. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --d__;
+ --e;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --work;
+
+ /* Function Body */
+ initq = lsame_(vect, "V");
+ wantq = initq || lsame_(vect, "U");
+ upper = lsame_(uplo, "U");
+ kd1 = *kd + 1;
+ kdm1 = *kd - 1;
+ incx = *ldab - 1;
+ iqend = 1;
+
+ *info = 0;
+ if (! wantq && ! lsame_(vect, "N")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*kd < 0) {
+ *info = -4;
+ } else if (*ldab < kd1) {
+ *info = -6;
+ } else if (*ldq < max(1,*n) && wantq) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSBTRD", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Initialize Q to the unit matrix, if needed */
+
+ if (initq) {
+ dlaset_("Full", n, n, &c_b9, &c_b10, &q[q_offset], ldq);
+ }
+
+/* Wherever possible, plane rotations are generated and applied in */
+/* vector operations of length NR over the index set J1:J2:KD1. */
+
+/* The cosines and sines of the plane rotations are stored in the */
+/* arrays D and WORK. */
+
+ inca = kd1 * *ldab;
+/* Computing MIN */
+ i__1 = *n - 1;
+ kdn = min(i__1,*kd);
+ if (upper) {
+
+ if (*kd > 1) {
+
+/* Reduce to tridiagonal form, working with upper triangle */
+
+ nr = 0;
+ j1 = kdn + 2;
+ j2 = 1;
+
+ i__1 = *n - 2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Reduce i-th row of matrix to tridiagonal form */
+
+ for (k = kdn + 1; k >= 2; --k) {
+ j1 += kdn;
+ j2 += kdn;
+
+ if (nr > 0) {
+
+/* generate plane rotations to annihilate nonzero */
+/* elements which have been created outside the band */
+
+ dlargv_(&nr, &ab[(j1 - 1) * ab_dim1 + 1], &inca, &
+ work[j1], &kd1, &d__[j1], &kd1);
+
+/* apply rotations from the right */
+
+
+/* Dependent on the the number of diagonals either */
+/* DLARTV or DROT is used */
+
+ if (nr >= (*kd << 1) - 1) {
+ i__2 = *kd - 1;
+ for (l = 1; l <= i__2; ++l) {
+ dlartv_(&nr, &ab[l + 1 + (j1 - 1) * ab_dim1],
+ &inca, &ab[l + j1 * ab_dim1], &inca, &
+ d__[j1], &work[j1], &kd1);
+/* L10: */
+ }
+
+ } else {
+ jend = j1 + (nr - 1) * kd1;
+ i__2 = jend;
+ i__3 = kd1;
+ for (jinc = j1; i__3 < 0 ? jinc >= i__2 : jinc <=
+ i__2; jinc += i__3) {
+ drot_(&kdm1, &ab[(jinc - 1) * ab_dim1 + 2], &
+ c__1, &ab[jinc * ab_dim1 + 1], &c__1,
+ &d__[jinc], &work[jinc]);
+/* L20: */
+ }
+ }
+ }
+
+
+ if (k > 2) {
+ if (k <= *n - i__ + 1) {
+
+/* generate plane rotation to annihilate a(i,i+k-1) */
+/* within the band */
+
+ dlartg_(&ab[*kd - k + 3 + (i__ + k - 2) * ab_dim1]
+, &ab[*kd - k + 2 + (i__ + k - 1) *
+ ab_dim1], &d__[i__ + k - 1], &work[i__ +
+ k - 1], &temp);
+ ab[*kd - k + 3 + (i__ + k - 2) * ab_dim1] = temp;
+
+/* apply rotation from the right */
+
+ i__3 = k - 3;
+ drot_(&i__3, &ab[*kd - k + 4 + (i__ + k - 2) *
+ ab_dim1], &c__1, &ab[*kd - k + 3 + (i__ +
+ k - 1) * ab_dim1], &c__1, &d__[i__ + k -
+ 1], &work[i__ + k - 1]);
+ }
+ ++nr;
+ j1 = j1 - kdn - 1;
+ }
+
+/* apply plane rotations from both sides to diagonal */
+/* blocks */
+
+ if (nr > 0) {
+ dlar2v_(&nr, &ab[kd1 + (j1 - 1) * ab_dim1], &ab[kd1 +
+ j1 * ab_dim1], &ab[*kd + j1 * ab_dim1], &inca,
+ &d__[j1], &work[j1], &kd1);
+ }
+
+/* apply plane rotations from the left */
+
+ if (nr > 0) {
+ if ((*kd << 1) - 1 < nr) {
+
+/* Dependent on the the number of diagonals either */
+/* DLARTV or DROT is used */
+
+ i__3 = *kd - 1;
+ for (l = 1; l <= i__3; ++l) {
+ if (j2 + l > *n) {
+ nrt = nr - 1;
+ } else {
+ nrt = nr;
+ }
+ if (nrt > 0) {
+ dlartv_(&nrt, &ab[*kd - l + (j1 + l) *
+ ab_dim1], &inca, &ab[*kd - l + 1
+ + (j1 + l) * ab_dim1], &inca, &
+ d__[j1], &work[j1], &kd1);
+ }
+/* L30: */
+ }
+ } else {
+ j1end = j1 + kd1 * (nr - 2);
+ if (j1end >= j1) {
+ i__3 = j1end;
+ i__2 = kd1;
+ for (jin = j1; i__2 < 0 ? jin >= i__3 : jin <=
+ i__3; jin += i__2) {
+ i__4 = *kd - 1;
+ drot_(&i__4, &ab[*kd - 1 + (jin + 1) *
+ ab_dim1], &incx, &ab[*kd + (jin +
+ 1) * ab_dim1], &incx, &d__[jin], &
+ work[jin]);
+/* L40: */
+ }
+ }
+/* Computing MIN */
+ i__2 = kdm1, i__3 = *n - j2;
+ lend = min(i__2,i__3);
+ last = j1end + kd1;
+ if (lend > 0) {
+ drot_(&lend, &ab[*kd - 1 + (last + 1) *
+ ab_dim1], &incx, &ab[*kd + (last + 1)
+ * ab_dim1], &incx, &d__[last], &work[
+ last]);
+ }
+ }
+ }
+
+ if (wantq) {
+
+/* accumulate product of plane rotations in Q */
+
+ if (initq) {
+
+/* take advantage of the fact that Q was */
+/* initially the Identity matrix */
+
+ iqend = max(iqend,j2);
+/* Computing MAX */
+ i__2 = 0, i__3 = k - 3;
+ i2 = max(i__2,i__3);
+ iqaend = i__ * *kd + 1;
+ if (k == 2) {
+ iqaend += *kd;
+ }
+ iqaend = min(iqaend,iqend);
+ i__2 = j2;
+ i__3 = kd1;
+ for (j = j1; i__3 < 0 ? j >= i__2 : j <= i__2; j
+ += i__3) {
+ ibl = i__ - i2 / kdm1;
+ ++i2;
+/* Computing MAX */
+ i__4 = 1, i__5 = j - ibl;
+ iqb = max(i__4,i__5);
+ nq = iqaend + 1 - iqb;
+/* Computing MIN */
+ i__4 = iqaend + *kd;
+ iqaend = min(i__4,iqend);
+ drot_(&nq, &q[iqb + (j - 1) * q_dim1], &c__1,
+ &q[iqb + j * q_dim1], &c__1, &d__[j],
+ &work[j]);
+/* L50: */
+ }
+ } else {
+
+ i__3 = j2;
+ i__2 = kd1;
+ for (j = j1; i__2 < 0 ? j >= i__3 : j <= i__3; j
+ += i__2) {
+ drot_(n, &q[(j - 1) * q_dim1 + 1], &c__1, &q[
+ j * q_dim1 + 1], &c__1, &d__[j], &
+ work[j]);
+/* L60: */
+ }
+ }
+
+ }
+
+ if (j2 + kdn > *n) {
+
+/* adjust J2 to keep within the bounds of the matrix */
+
+ --nr;
+ j2 = j2 - kdn - 1;
+ }
+
+ i__2 = j2;
+ i__3 = kd1;
+ for (j = j1; i__3 < 0 ? j >= i__2 : j <= i__2; j += i__3)
+ {
+
+/* create nonzero element a(j-1,j+kd) outside the band */
+/* and store it in WORK */
+
+ work[j + *kd] = work[j] * ab[(j + *kd) * ab_dim1 + 1];
+ ab[(j + *kd) * ab_dim1 + 1] = d__[j] * ab[(j + *kd) *
+ ab_dim1 + 1];
+/* L70: */
+ }
+/* L80: */
+ }
+/* L90: */
+ }
+ }
+
+ if (*kd > 0) {
+
+/* copy off-diagonal elements to E */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ e[i__] = ab[*kd + (i__ + 1) * ab_dim1];
+/* L100: */
+ }
+ } else {
+
+/* set E to zero if original matrix was diagonal */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ e[i__] = 0.;
+/* L110: */
+ }
+ }
+
+/* copy diagonal elements to D */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = ab[kd1 + i__ * ab_dim1];
+/* L120: */
+ }
+
+ } else {
+
+ if (*kd > 1) {
+
+/* Reduce to tridiagonal form, working with lower triangle */
+
+ nr = 0;
+ j1 = kdn + 2;
+ j2 = 1;
+
+ i__1 = *n - 2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Reduce i-th column of matrix to tridiagonal form */
+
+ for (k = kdn + 1; k >= 2; --k) {
+ j1 += kdn;
+ j2 += kdn;
+
+ if (nr > 0) {
+
+/* generate plane rotations to annihilate nonzero */
+/* elements which have been created outside the band */
+
+ dlargv_(&nr, &ab[kd1 + (j1 - kd1) * ab_dim1], &inca, &
+ work[j1], &kd1, &d__[j1], &kd1);
+
+/* apply plane rotations from one side */
+
+
+/* Dependent on the the number of diagonals either */
+/* DLARTV or DROT is used */
+
+ if (nr > (*kd << 1) - 1) {
+ i__3 = *kd - 1;
+ for (l = 1; l <= i__3; ++l) {
+ dlartv_(&nr, &ab[kd1 - l + (j1 - kd1 + l) *
+ ab_dim1], &inca, &ab[kd1 - l + 1 + (
+ j1 - kd1 + l) * ab_dim1], &inca, &d__[
+ j1], &work[j1], &kd1);
+/* L130: */
+ }
+ } else {
+ jend = j1 + kd1 * (nr - 1);
+ i__3 = jend;
+ i__2 = kd1;
+ for (jinc = j1; i__2 < 0 ? jinc >= i__3 : jinc <=
+ i__3; jinc += i__2) {
+ drot_(&kdm1, &ab[*kd + (jinc - *kd) * ab_dim1]
+, &incx, &ab[kd1 + (jinc - *kd) *
+ ab_dim1], &incx, &d__[jinc], &work[
+ jinc]);
+/* L140: */
+ }
+ }
+
+ }
+
+ if (k > 2) {
+ if (k <= *n - i__ + 1) {
+
+/* generate plane rotation to annihilate a(i+k-1,i) */
+/* within the band */
+
+ dlartg_(&ab[k - 1 + i__ * ab_dim1], &ab[k + i__ *
+ ab_dim1], &d__[i__ + k - 1], &work[i__ +
+ k - 1], &temp);
+ ab[k - 1 + i__ * ab_dim1] = temp;
+
+/* apply rotation from the left */
+
+ i__2 = k - 3;
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ drot_(&i__2, &ab[k - 2 + (i__ + 1) * ab_dim1], &
+ i__3, &ab[k - 1 + (i__ + 1) * ab_dim1], &
+ i__4, &d__[i__ + k - 1], &work[i__ + k -
+ 1]);
+ }
+ ++nr;
+ j1 = j1 - kdn - 1;
+ }
+
+/* apply plane rotations from both sides to diagonal */
+/* blocks */
+
+ if (nr > 0) {
+ dlar2v_(&nr, &ab[(j1 - 1) * ab_dim1 + 1], &ab[j1 *
+ ab_dim1 + 1], &ab[(j1 - 1) * ab_dim1 + 2], &
+ inca, &d__[j1], &work[j1], &kd1);
+ }
+
+/* apply plane rotations from the right */
+
+
+/* Dependent on the the number of diagonals either */
+/* DLARTV or DROT is used */
+
+ if (nr > 0) {
+ if (nr > (*kd << 1) - 1) {
+ i__2 = *kd - 1;
+ for (l = 1; l <= i__2; ++l) {
+ if (j2 + l > *n) {
+ nrt = nr - 1;
+ } else {
+ nrt = nr;
+ }
+ if (nrt > 0) {
+ dlartv_(&nrt, &ab[l + 2 + (j1 - 1) *
+ ab_dim1], &inca, &ab[l + 1 + j1 *
+ ab_dim1], &inca, &d__[j1], &work[
+ j1], &kd1);
+ }
+/* L150: */
+ }
+ } else {
+ j1end = j1 + kd1 * (nr - 2);
+ if (j1end >= j1) {
+ i__2 = j1end;
+ i__3 = kd1;
+ for (j1inc = j1; i__3 < 0 ? j1inc >= i__2 :
+ j1inc <= i__2; j1inc += i__3) {
+ drot_(&kdm1, &ab[(j1inc - 1) * ab_dim1 +
+ 3], &c__1, &ab[j1inc * ab_dim1 +
+ 2], &c__1, &d__[j1inc], &work[
+ j1inc]);
+/* L160: */
+ }
+ }
+/* Computing MIN */
+ i__3 = kdm1, i__2 = *n - j2;
+ lend = min(i__3,i__2);
+ last = j1end + kd1;
+ if (lend > 0) {
+ drot_(&lend, &ab[(last - 1) * ab_dim1 + 3], &
+ c__1, &ab[last * ab_dim1 + 2], &c__1,
+ &d__[last], &work[last]);
+ }
+ }
+ }
+
+
+
+ if (wantq) {
+
+/* accumulate product of plane rotations in Q */
+
+ if (initq) {
+
+/* take advantage of the fact that Q was */
+/* initially the Identity matrix */
+
+ iqend = max(iqend,j2);
+/* Computing MAX */
+ i__3 = 0, i__2 = k - 3;
+ i2 = max(i__3,i__2);
+ iqaend = i__ * *kd + 1;
+ if (k == 2) {
+ iqaend += *kd;
+ }
+ iqaend = min(iqaend,iqend);
+ i__3 = j2;
+ i__2 = kd1;
+ for (j = j1; i__2 < 0 ? j >= i__3 : j <= i__3; j
+ += i__2) {
+ ibl = i__ - i2 / kdm1;
+ ++i2;
+/* Computing MAX */
+ i__4 = 1, i__5 = j - ibl;
+ iqb = max(i__4,i__5);
+ nq = iqaend + 1 - iqb;
+/* Computing MIN */
+ i__4 = iqaend + *kd;
+ iqaend = min(i__4,iqend);
+ drot_(&nq, &q[iqb + (j - 1) * q_dim1], &c__1,
+ &q[iqb + j * q_dim1], &c__1, &d__[j],
+ &work[j]);
+/* L170: */
+ }
+ } else {
+
+ i__2 = j2;
+ i__3 = kd1;
+ for (j = j1; i__3 < 0 ? j >= i__2 : j <= i__2; j
+ += i__3) {
+ drot_(n, &q[(j - 1) * q_dim1 + 1], &c__1, &q[
+ j * q_dim1 + 1], &c__1, &d__[j], &
+ work[j]);
+/* L180: */
+ }
+ }
+ }
+
+ if (j2 + kdn > *n) {
+
+/* adjust J2 to keep within the bounds of the matrix */
+
+ --nr;
+ j2 = j2 - kdn - 1;
+ }
+
+ i__3 = j2;
+ i__2 = kd1;
+ for (j = j1; i__2 < 0 ? j >= i__3 : j <= i__3; j += i__2)
+ {
+
+/* create nonzero element a(j+kd,j-1) outside the */
+/* band and store it in WORK */
+
+ work[j + *kd] = work[j] * ab[kd1 + j * ab_dim1];
+ ab[kd1 + j * ab_dim1] = d__[j] * ab[kd1 + j * ab_dim1]
+ ;
+/* L190: */
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+ }
+
+ if (*kd > 0) {
+
+/* copy off-diagonal elements to E */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ e[i__] = ab[i__ * ab_dim1 + 2];
+/* L220: */
+ }
+ } else {
+
+/* set E to zero if original matrix was diagonal */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ e[i__] = 0.;
+/* L230: */
+ }
+ }
+
+/* copy diagonal elements to D */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = ab[i__ * ab_dim1 + 1];
+/* L240: */
+ }
+ }
+
+ return 0;
+
+/* End of DSBTRD */
+
+} /* dsbtrd_ */
diff --git a/contrib/libs/clapack/dsfrk.c b/contrib/libs/clapack/dsfrk.c
new file mode 100644
index 0000000000..53c9268dc8
--- /dev/null
+++ b/contrib/libs/clapack/dsfrk.c
@@ -0,0 +1,517 @@
+/* dsfrk.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dsfrk_(char *transr, char *uplo, char *trans, integer *n,
+ integer *k, doublereal *alpha, doublereal *a, integer *lda,
+ doublereal *beta, doublereal *c__)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+
+ /* Local variables */
+ integer j, n1, n2, nk, info;
+ logical normaltransr;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ integer nrowa;
+ logical lower;
+ extern /* Subroutine */ int dsyrk_(char *, char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *), xerbla_(char *, integer *);
+ logical nisodd, notrans;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Julien Langou of the Univ. of Colorado Denver -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Level 3 BLAS like routine for C in RFP Format. */
+
+/* DSFRK performs one of the symmetric rank--k operations */
+
+/* C := alpha*A*A' + beta*C, */
+
+/* or */
+
+/* C := alpha*A'*A + beta*C, */
+
+/* where alpha and beta are real scalars, C is an n--by--n symmetric */
+/* matrix and A is an n--by--k matrix in the first case and a k--by--n */
+/* matrix in the second case. */
+
+/* Arguments */
+/* ========== */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': The Normal Form of RFP A is stored; */
+/* = 'T': The Transpose Form of RFP A is stored. */
+
+/* UPLO - (input) CHARACTER */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array C is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of C */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of C */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* TRANS - (input) CHARACTER */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' C := alpha*A*A' + beta*C. */
+
+/* TRANS = 'T' or 't' C := alpha*A'*A + beta*C. */
+
+/* Unchanged on exit. */
+
+/* N - (input) INTEGER. */
+/* On entry, N specifies the order of the matrix C. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* K - (input) INTEGER. */
+/* On entry with TRANS = 'N' or 'n', K specifies the number */
+/* of columns of the matrix A, and on entry with TRANS = 'T' */
+/* or 't', K specifies the number of rows of the matrix A. K */
+/* must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - (input) DOUBLE PRECISION. */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - (input) DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where KA */
+/* is K when TRANS = 'N' or 'n', and is N otherwise. Before */
+/* entry with TRANS = 'N' or 'n', the leading N--by--K part of */
+/* the array A must contain the matrix A, otherwise the leading */
+/* K--by--N part of the array A must contain the matrix A. */
+/* Unchanged on exit. */
+
+/* LDA - (input) INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When TRANS = 'N' or 'n' */
+/* then LDA must be at least max( 1, n ), otherwise LDA must */
+/* be at least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* BETA - (input) DOUBLE PRECISION. */
+/* On entry, BETA specifies the scalar beta. */
+/* Unchanged on exit. */
+
+
+/* C - (input/output) DOUBLE PRECISION array, dimension ( NT ); */
+/* NT = N*(N+1)/2. On entry, the symmetric matrix C in RFP */
+/* Format. RFP Format is described by TRANSR, UPLO and N. */
+
+/* Arguments */
+/* ========== */
+
+/* .. */
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --c__;
+
+ /* Function Body */
+ info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ notrans = lsame_(trans, "N");
+
+ if (notrans) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+
+ if (! normaltransr && ! lsame_(transr, "T")) {
+ info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ info = -2;
+ } else if (! notrans && ! lsame_(trans, "T")) {
+ info = -3;
+ } else if (*n < 0) {
+ info = -4;
+ } else if (*k < 0) {
+ info = -5;
+ } else if (*lda < max(1,nrowa)) {
+ info = -8;
+ }
+ if (info != 0) {
+ i__1 = -info;
+ xerbla_("DSFRK ", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+/* The quick return case: ((ALPHA.EQ.0).AND.(BETA.NE.ZERO)) is not */
+/* done (it is in DSYRK for example) and left in the general case. */
+
+ if (*n == 0 || (*alpha == 0. || *k == 0) && *beta == 1.) {
+ return 0;
+ }
+
+ if (*alpha == 0. && *beta == 0.) {
+ i__1 = *n * (*n + 1) / 2;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j] = 0.;
+ }
+ return 0;
+ }
+
+/* C is N-by-N. */
+/* If N is odd, set NISODD = .TRUE., and N1 and N2. */
+/* If N is even, NISODD = .FALSE., and NK. */
+
+ if (*n % 2 == 0) {
+ nisodd = FALSE_;
+ nk = *n / 2;
+ } else {
+ nisodd = TRUE_;
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+ }
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* N is odd, TRANSR = 'N', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* N is odd, TRANSR = 'N', UPLO = 'L', and TRANS = 'N' */
+
+ dsyrk_("L", "N", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[1], n);
+ dsyrk_("U", "N", &n2, k, alpha, &a[n1 + 1 + a_dim1], lda,
+ beta, &c__[*n + 1], n);
+ dgemm_("N", "T", &n2, &n1, k, alpha, &a[n1 + 1 + a_dim1],
+ lda, &a[a_dim1 + 1], lda, beta, &c__[n1 + 1], n);
+
+ } else {
+
+/* N is odd, TRANSR = 'N', UPLO = 'L', and TRANS = 'T' */
+
+ dsyrk_("L", "T", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[1], n);
+ dsyrk_("U", "T", &n2, k, alpha, &a[(n1 + 1) * a_dim1 + 1],
+ lda, beta, &c__[*n + 1], n)
+ ;
+ dgemm_("T", "N", &n2, &n1, k, alpha, &a[(n1 + 1) * a_dim1
+ + 1], lda, &a[a_dim1 + 1], lda, beta, &c__[n1 + 1]
+, n);
+
+ }
+
+ } else {
+
+/* N is odd, TRANSR = 'N', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* N is odd, TRANSR = 'N', UPLO = 'U', and TRANS = 'N' */
+
+ dsyrk_("L", "N", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[n2 + 1], n);
+ dsyrk_("U", "N", &n2, k, alpha, &a[n2 + a_dim1], lda,
+ beta, &c__[n1 + 1], n);
+ dgemm_("N", "T", &n1, &n2, k, alpha, &a[a_dim1 + 1], lda,
+ &a[n2 + a_dim1], lda, beta, &c__[1], n);
+
+ } else {
+
+/* N is odd, TRANSR = 'N', UPLO = 'U', and TRANS = 'T' */
+
+ dsyrk_("L", "T", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[n2 + 1], n);
+ dsyrk_("U", "T", &n2, k, alpha, &a[n2 * a_dim1 + 1], lda,
+ beta, &c__[n1 + 1], n);
+ dgemm_("T", "N", &n1, &n2, k, alpha, &a[a_dim1 + 1], lda,
+ &a[n2 * a_dim1 + 1], lda, beta, &c__[1], n);
+
+ }
+
+ }
+
+ } else {
+
+/* N is odd, and TRANSR = 'T' */
+
+ if (lower) {
+
+/* N is odd, TRANSR = 'T', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* N is odd, TRANSR = 'T', UPLO = 'L', and TRANS = 'N' */
+
+ dsyrk_("U", "N", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[1], &n1);
+ dsyrk_("L", "N", &n2, k, alpha, &a[n1 + 1 + a_dim1], lda,
+ beta, &c__[2], &n1);
+ dgemm_("N", "T", &n1, &n2, k, alpha, &a[a_dim1 + 1], lda,
+ &a[n1 + 1 + a_dim1], lda, beta, &c__[n1 * n1 + 1],
+ &n1);
+
+ } else {
+
+/* N is odd, TRANSR = 'T', UPLO = 'L', and TRANS = 'T' */
+
+ dsyrk_("U", "T", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[1], &n1);
+ dsyrk_("L", "T", &n2, k, alpha, &a[(n1 + 1) * a_dim1 + 1],
+ lda, beta, &c__[2], &n1);
+ dgemm_("T", "N", &n1, &n2, k, alpha, &a[a_dim1 + 1], lda,
+ &a[(n1 + 1) * a_dim1 + 1], lda, beta, &c__[n1 *
+ n1 + 1], &n1);
+
+ }
+
+ } else {
+
+/* N is odd, TRANSR = 'T', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* N is odd, TRANSR = 'T', UPLO = 'U', and TRANS = 'N' */
+
+ dsyrk_("U", "N", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[n2 * n2 + 1], &n2);
+ dsyrk_("L", "N", &n2, k, alpha, &a[n1 + 1 + a_dim1], lda,
+ beta, &c__[n1 * n2 + 1], &n2);
+ dgemm_("N", "T", &n2, &n1, k, alpha, &a[n1 + 1 + a_dim1],
+ lda, &a[a_dim1 + 1], lda, beta, &c__[1], &n2);
+
+ } else {
+
+/* N is odd, TRANSR = 'T', UPLO = 'U', and TRANS = 'T' */
+
+ dsyrk_("U", "T", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[n2 * n2 + 1], &n2);
+ dsyrk_("L", "T", &n2, k, alpha, &a[(n1 + 1) * a_dim1 + 1],
+ lda, beta, &c__[n1 * n2 + 1], &n2);
+ dgemm_("T", "N", &n2, &n1, k, alpha, &a[(n1 + 1) * a_dim1
+ + 1], lda, &a[a_dim1 + 1], lda, beta, &c__[1], &
+ n2);
+
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* N is even, TRANSR = 'N', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* N is even, TRANSR = 'N', UPLO = 'L', and TRANS = 'N' */
+
+ i__1 = *n + 1;
+ dsyrk_("L", "N", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[2], &i__1);
+ i__1 = *n + 1;
+ dsyrk_("U", "N", &nk, k, alpha, &a[nk + 1 + a_dim1], lda,
+ beta, &c__[1], &i__1);
+ i__1 = *n + 1;
+ dgemm_("N", "T", &nk, &nk, k, alpha, &a[nk + 1 + a_dim1],
+ lda, &a[a_dim1 + 1], lda, beta, &c__[nk + 2], &
+ i__1);
+
+ } else {
+
+/* N is even, TRANSR = 'N', UPLO = 'L', and TRANS = 'T' */
+
+ i__1 = *n + 1;
+ dsyrk_("L", "T", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[2], &i__1);
+ i__1 = *n + 1;
+ dsyrk_("U", "T", &nk, k, alpha, &a[(nk + 1) * a_dim1 + 1],
+ lda, beta, &c__[1], &i__1);
+ i__1 = *n + 1;
+ dgemm_("T", "N", &nk, &nk, k, alpha, &a[(nk + 1) * a_dim1
+ + 1], lda, &a[a_dim1 + 1], lda, beta, &c__[nk + 2]
+, &i__1);
+
+ }
+
+ } else {
+
+/* N is even, TRANSR = 'N', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* N is even, TRANSR = 'N', UPLO = 'U', and TRANS = 'N' */
+
+ i__1 = *n + 1;
+ dsyrk_("L", "N", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[nk + 2], &i__1);
+ i__1 = *n + 1;
+ dsyrk_("U", "N", &nk, k, alpha, &a[nk + 1 + a_dim1], lda,
+ beta, &c__[nk + 1], &i__1);
+ i__1 = *n + 1;
+ dgemm_("N", "T", &nk, &nk, k, alpha, &a[a_dim1 + 1], lda,
+ &a[nk + 1 + a_dim1], lda, beta, &c__[1], &i__1);
+
+ } else {
+
+/* N is even, TRANSR = 'N', UPLO = 'U', and TRANS = 'T' */
+
+ i__1 = *n + 1;
+ dsyrk_("L", "T", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[nk + 2], &i__1);
+ i__1 = *n + 1;
+ dsyrk_("U", "T", &nk, k, alpha, &a[(nk + 1) * a_dim1 + 1],
+ lda, beta, &c__[nk + 1], &i__1);
+ i__1 = *n + 1;
+ dgemm_("T", "N", &nk, &nk, k, alpha, &a[a_dim1 + 1], lda,
+ &a[(nk + 1) * a_dim1 + 1], lda, beta, &c__[1], &
+ i__1);
+
+ }
+
+ }
+
+ } else {
+
+/* N is even, and TRANSR = 'T' */
+
+ if (lower) {
+
+/* N is even, TRANSR = 'T', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* N is even, TRANSR = 'T', UPLO = 'L', and TRANS = 'N' */
+
+ dsyrk_("U", "N", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[nk + 1], &nk);
+ dsyrk_("L", "N", &nk, k, alpha, &a[nk + 1 + a_dim1], lda,
+ beta, &c__[1], &nk);
+ dgemm_("N", "T", &nk, &nk, k, alpha, &a[a_dim1 + 1], lda,
+ &a[nk + 1 + a_dim1], lda, beta, &c__[(nk + 1) *
+ nk + 1], &nk);
+
+ } else {
+
+/* N is even, TRANSR = 'T', UPLO = 'L', and TRANS = 'T' */
+
+ dsyrk_("U", "T", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[nk + 1], &nk);
+ dsyrk_("L", "T", &nk, k, alpha, &a[(nk + 1) * a_dim1 + 1],
+ lda, beta, &c__[1], &nk);
+ dgemm_("T", "N", &nk, &nk, k, alpha, &a[a_dim1 + 1], lda,
+ &a[(nk + 1) * a_dim1 + 1], lda, beta, &c__[(nk +
+ 1) * nk + 1], &nk);
+
+ }
+
+ } else {
+
+/* N is even, TRANSR = 'T', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* N is even, TRANSR = 'T', UPLO = 'U', and TRANS = 'N' */
+
+ dsyrk_("U", "N", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[nk * (nk + 1) + 1], &nk);
+ dsyrk_("L", "N", &nk, k, alpha, &a[nk + 1 + a_dim1], lda,
+ beta, &c__[nk * nk + 1], &nk);
+ dgemm_("N", "T", &nk, &nk, k, alpha, &a[nk + 1 + a_dim1],
+ lda, &a[a_dim1 + 1], lda, beta, &c__[1], &nk);
+
+ } else {
+
+/* N is even, TRANSR = 'T', UPLO = 'U', and TRANS = 'T' */
+
+ dsyrk_("U", "T", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[nk * (nk + 1) + 1], &nk);
+ dsyrk_("L", "T", &nk, k, alpha, &a[(nk + 1) * a_dim1 + 1],
+ lda, beta, &c__[nk * nk + 1], &nk);
+ dgemm_("T", "N", &nk, &nk, k, alpha, &a[(nk + 1) * a_dim1
+ + 1], lda, &a[a_dim1 + 1], lda, beta, &c__[1], &
+ nk);
+
+ }
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of DSFRK */
+
+} /* dsfrk_ */
diff --git a/contrib/libs/clapack/dsgesv.c b/contrib/libs/clapack/dsgesv.c
new file mode 100644
index 0000000000..44252af966
--- /dev/null
+++ b/contrib/libs/clapack/dsgesv.c
@@ -0,0 +1,416 @@
+/* dsgesv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b10 = -1.;
+static doublereal c_b11 = 1.;
+static integer c__1 = 1;
+
+/* Subroutine */ int dsgesv_(integer *n, integer *nrhs, doublereal *a,
+ integer *lda, integer *ipiv, doublereal *b, integer *ldb, doublereal *
+ x, integer *ldx, doublereal *work, real *swork, integer *iter,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, work_dim1, work_offset,
+ x_dim1, x_offset, i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ doublereal cte, eps, anrm;
+ integer ptsa;
+ doublereal rnrm, xnrm;
+ integer ptsx;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ integer iiter;
+ extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *), dlag2s_(integer *, integer *,
+ doublereal *, integer *, real *, integer *, integer *), slag2d_(
+ integer *, integer *, real *, integer *, doublereal *, integer *,
+ integer *);
+ extern doublereal dlamch_(char *), dlange_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ xerbla_(char *, integer *), dgetrf_(integer *, integer *,
+ doublereal *, integer *, integer *, integer *), dgetrs_(char *,
+ integer *, integer *, doublereal *, integer *, integer *,
+ doublereal *, integer *, integer *), sgetrf_(integer *,
+ integer *, real *, integer *, integer *, integer *), sgetrs_(char
+ *, integer *, integer *, real *, integer *, integer *, real *,
+ integer *, integer *);
+
+
+/* -- LAPACK PROTOTYPE driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* February 2007 */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSGESV computes the solution to a real system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N matrix and X and B are N-by-NRHS matrices. */
+
+/* DSGESV first attempts to factorize the matrix in SINGLE PRECISION */
+/* and use this factorization within an iterative refinement procedure */
+/* to produce a solution with DOUBLE PRECISION normwise backward error */
+/* quality (see below). If the approach fails the method switches to a */
+/* DOUBLE PRECISION factorization and solve. */
+
+/* The iterative refinement is not going to be a winning strategy if */
+/* the ratio SINGLE PRECISION performance over DOUBLE PRECISION */
+/* performance is too small. A reasonable strategy should take the */
+/* number of right-hand sides and the size of the matrix into account. */
+/* This might be done with a call to ILAENV in the future. Up to now, we */
+/* always try iterative refinement. */
+
+/* The iterative refinement process is stopped if */
+/* ITER > ITERMAX */
+/* or for all the RHS we have: */
+/* RNRM < SQRT(N)*XNRM*ANRM*EPS*BWDMAX */
+/* where */
+/* o ITER is the number of the current iteration in the iterative */
+/* refinement process */
+/* o RNRM is the infinity-norm of the residual */
+/* o XNRM is the infinity-norm of the solution */
+/* o ANRM is the infinity-operator-norm of the matrix A */
+/* o EPS is the machine epsilon returned by DLAMCH('Epsilon') */
+/* The value ITERMAX and BWDMAX are fixed to 30 and 1.0D+00 */
+/* respectively. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input or input/ouptut) DOUBLE PRECISION array, */
+/* dimension (LDA,N) */
+/* On entry, the N-by-N coefficient matrix A. */
+/* On exit, if iterative refinement has been successfully used */
+/* (INFO.EQ.0 and ITER.GE.0, see description below), then A is */
+/* unchanged, if double precision factorization has been used */
+/* (INFO.EQ.0 and ITER.LT.0, see description below), then the */
+/* array A contains the factors L and U from the factorization */
+/* A = P*L*U; the unit diagonal elements of L are not stored. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* The pivot indices that define the permutation matrix P; */
+/* row i of the matrix was interchanged with row IPIV(i). */
+/* Corresponds either to the single precision factorization */
+/* (if INFO.EQ.0 and ITER.GE.0) or the double precision */
+/* factorization (if INFO.EQ.0 and ITER.LT.0). */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* The N-by-NRHS right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) DOUBLE PRECISION array, dimension (LDX,NRHS) */
+/* If INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (N*NRHS) */
+/* This array is used to hold the residual vectors. */
+
+/* SWORK (workspace) REAL array, dimension (N*(N+NRHS)) */
+/* This array is used to use the single precision matrix and the */
+/* right-hand sides or solutions in single precision. */
+
+/* ITER (output) INTEGER */
+/* < 0: iterative refinement has failed, double precision */
+/* factorization has been performed */
+/* -1 : the routine fell back to full precision for */
+/* implementation- or machine-specific reasons */
+/* -2 : narrowing the precision induced an overflow, */
+/* the routine fell back to full precision */
+/* -3 : failure of SGETRF */
+/* -31: stop the iterative refinement after the 30th */
+/* iterations */
+/* > 0: iterative refinement has been sucessfully used. */
+/* Returns the number of iterations */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, U(i,i) computed in DOUBLE PRECISION is */
+/* exactly zero. The factorization has been completed, */
+/* but the factor U is exactly singular, so the solution */
+/* could not be computed. */
+
+/* ========= */
+
+/* .. Parameters .. */
+
+
+
+
+/* .. Local Scalars .. */
+
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ work_dim1 = *n;
+ work_offset = 1 + work_dim1;
+ work -= work_offset;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --swork;
+
+ /* Function Body */
+ *info = 0;
+ *iter = 0;
+
+/* Test the input parameters. */
+
+ if (*n < 0) {
+ *info = -1;
+ } else if (*nrhs < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ } else if (*ldx < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSGESV", &i__1);
+ return 0;
+ }
+
+/* Quick return if (N.EQ.0). */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Skip single precision iterative refinement if a priori slower */
+/* than double precision factorization. */
+
+ if (FALSE_) {
+ *iter = -1;
+ goto L40;
+ }
+
+/* Compute some constants. */
+
+ anrm = dlange_("I", n, n, &a[a_offset], lda, &work[work_offset]);
+ eps = dlamch_("Epsilon");
+ cte = anrm * eps * sqrt((doublereal) (*n)) * 1.;
+
+/* Set the indices PTSA, PTSX for referencing SA and SX in SWORK. */
+
+ ptsa = 1;
+ ptsx = ptsa + *n * *n;
+
+/* Convert B from double precision to single precision and store the */
+/* result in SX. */
+
+ dlag2s_(n, nrhs, &b[b_offset], ldb, &swork[ptsx], n, info);
+
+ if (*info != 0) {
+ *iter = -2;
+ goto L40;
+ }
+
+/* Convert A from double precision to single precision and store the */
+/* result in SA. */
+
+ dlag2s_(n, n, &a[a_offset], lda, &swork[ptsa], n, info);
+
+ if (*info != 0) {
+ *iter = -2;
+ goto L40;
+ }
+
+/* Compute the LU factorization of SA. */
+
+ sgetrf_(n, n, &swork[ptsa], n, &ipiv[1], info);
+
+ if (*info != 0) {
+ *iter = -3;
+ goto L40;
+ }
+
+/* Solve the system SA*SX = SB. */
+
+ sgetrs_("No transpose", n, nrhs, &swork[ptsa], n, &ipiv[1], &swork[ptsx],
+ n, info);
+
+/* Convert SX back to double precision */
+
+ slag2d_(n, nrhs, &swork[ptsx], n, &x[x_offset], ldx, info);
+
+/* Compute R = B - AX (R is WORK). */
+
+ dlacpy_("All", n, nrhs, &b[b_offset], ldb, &work[work_offset], n);
+
+ dgemm_("No Transpose", "No Transpose", n, nrhs, n, &c_b10, &a[a_offset],
+ lda, &x[x_offset], ldx, &c_b11, &work[work_offset], n);
+
+/* Check whether the NRHS normwise backward errors satisfy the */
+/* stopping criterion. If yes, set ITER=0 and return. */
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ xnrm = (d__1 = x[idamax_(n, &x[i__ * x_dim1 + 1], &c__1) + i__ *
+ x_dim1], abs(d__1));
+ rnrm = (d__1 = work[idamax_(n, &work[i__ * work_dim1 + 1], &c__1) +
+ i__ * work_dim1], abs(d__1));
+ if (rnrm > xnrm * cte) {
+ goto L10;
+ }
+ }
+
+/* If we are here, the NRHS normwise backward errors satisfy the */
+/* stopping criterion. We are good to exit. */
+
+ *iter = 0;
+ return 0;
+
+L10:
+
+ for (iiter = 1; iiter <= 30; ++iiter) {
+
+/* Convert R (in WORK) from double precision to single precision */
+/* and store the result in SX. */
+
+ dlag2s_(n, nrhs, &work[work_offset], n, &swork[ptsx], n, info);
+
+ if (*info != 0) {
+ *iter = -2;
+ goto L40;
+ }
+
+/* Solve the system SA*SX = SR. */
+
+ sgetrs_("No transpose", n, nrhs, &swork[ptsa], n, &ipiv[1], &swork[
+ ptsx], n, info);
+
+/* Convert SX back to double precision and update the current */
+/* iterate. */
+
+ slag2d_(n, nrhs, &swork[ptsx], n, &work[work_offset], n, info);
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ daxpy_(n, &c_b11, &work[i__ * work_dim1 + 1], &c__1, &x[i__ *
+ x_dim1 + 1], &c__1);
+ }
+
+/* Compute R = B - AX (R is WORK). */
+
+ dlacpy_("All", n, nrhs, &b[b_offset], ldb, &work[work_offset], n);
+
+ dgemm_("No Transpose", "No Transpose", n, nrhs, n, &c_b10, &a[
+ a_offset], lda, &x[x_offset], ldx, &c_b11, &work[work_offset],
+ n);
+
+/* Check whether the NRHS normwise backward errors satisfy the */
+/* stopping criterion. If yes, set ITER=IITER>0 and return. */
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ xnrm = (d__1 = x[idamax_(n, &x[i__ * x_dim1 + 1], &c__1) + i__ *
+ x_dim1], abs(d__1));
+ rnrm = (d__1 = work[idamax_(n, &work[i__ * work_dim1 + 1], &c__1)
+ + i__ * work_dim1], abs(d__1));
+ if (rnrm > xnrm * cte) {
+ goto L20;
+ }
+ }
+
+/* If we are here, the NRHS normwise backward errors satisfy the */
+/* stopping criterion, we are good to exit. */
+
+ *iter = iiter;
+
+ return 0;
+
+L20:
+
+/* L30: */
+ ;
+ }
+
+/* If we are at this place of the code, this is because we have */
+/* performed ITER=ITERMAX iterations and never satisified the */
+/* stopping criterion, set up the ITER flag accordingly and follow up */
+/* on double precision routine. */
+
+ *iter = -31;
+
+L40:
+
+/* Single-precision iterative refinement failed to converge to a */
+/* satisfactory solution, so we resort to double precision. */
+
+ dgetrf_(n, n, &a[a_offset], lda, &ipiv[1], info);
+
+ if (*info != 0) {
+ return 0;
+ }
+
+ dlacpy_("All", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ dgetrs_("No transpose", n, nrhs, &a[a_offset], lda, &ipiv[1], &x[x_offset]
+, ldx, info);
+
+ return 0;
+
+/* End of DSGESV. */
+
+} /* dsgesv_ */
diff --git a/contrib/libs/clapack/dspcon.c b/contrib/libs/clapack/dspcon.c
new file mode 100644
index 0000000000..5a826cd2e0
--- /dev/null
+++ b/contrib/libs/clapack/dspcon.c
@@ -0,0 +1,198 @@
+/* dspcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dspcon_(char *uplo, integer *n, doublereal *ap, integer *
+ ipiv, doublereal *anorm, doublereal *rcond, doublereal *work, integer
+ *iwork, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ integer i__, ip, kase;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical upper;
+ extern /* Subroutine */ int dlacn2_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *), xerbla_(char *,
+ integer *);
+ doublereal ainvnm;
+ extern /* Subroutine */ int dsptrs_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call DLACN2 in place of DLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSPCON estimates the reciprocal of the condition number (in the */
+/* 1-norm) of a real symmetric packed matrix A using the factorization */
+/* A = U*D*U**T or A = L*D*L**T computed by DSPTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**T; */
+/* = 'L': Lower triangular, form is A = L*D*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* The block diagonal matrix D and the multipliers used to */
+/* obtain the factor U or L as computed by DSPTRF, stored as a */
+/* packed triangular matrix. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by DSPTRF. */
+
+/* ANORM (input) DOUBLE PRECISION */
+/* The 1-norm of the original matrix A. */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (2*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --iwork;
+ --work;
+ --ipiv;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*anorm < 0.) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSPCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.;
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ } else if (*anorm <= 0.) {
+ return 0;
+ }
+
+/* Check that the diagonal matrix D is nonsingular. */
+
+ if (upper) {
+
+/* Upper triangular storage: examine D from bottom to top */
+
+ ip = *n * (*n + 1) / 2;
+ for (i__ = *n; i__ >= 1; --i__) {
+ if (ipiv[i__] > 0 && ap[ip] == 0.) {
+ return 0;
+ }
+ ip -= i__;
+/* L10: */
+ }
+ } else {
+
+/* Lower triangular storage: examine D from top to bottom. */
+
+ ip = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (ipiv[i__] > 0 && ap[ip] == 0.) {
+ return 0;
+ }
+ ip = ip + *n - i__ + 1;
+/* L20: */
+ }
+ }
+
+/* Estimate the 1-norm of the inverse. */
+
+ kase = 0;
+L30:
+ dlacn2_(n, &work[*n + 1], &work[1], &iwork[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+
+/* Multiply by inv(L*D*L') or inv(U*D*U'). */
+
+ dsptrs_(uplo, n, &c__1, &ap[1], &ipiv[1], &work[1], n, info);
+ goto L30;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / ainvnm / *anorm;
+ }
+
+ return 0;
+
+/* End of DSPCON */
+
+} /* dspcon_ */
diff --git a/contrib/libs/clapack/dspev.c b/contrib/libs/clapack/dspev.c
new file mode 100644
index 0000000000..3e9d3c1138
--- /dev/null
+++ b/contrib/libs/clapack/dspev.c
@@ -0,0 +1,246 @@
+/* dspev.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dspev_(char *jobz, char *uplo, integer *n, doublereal *
+ ap, doublereal *w, doublereal *z__, integer *ldz, doublereal *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal eps;
+ integer inde;
+ doublereal anrm;
+ integer imax;
+ doublereal rmin, rmax;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical wantz;
+ extern doublereal dlamch_(char *);
+ integer iscale;
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ extern doublereal dlansp_(char *, char *, integer *, doublereal *,
+ doublereal *);
+ integer indtau;
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *);
+ integer indwrk;
+ extern /* Subroutine */ int dopgtr_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *), dsptrd_(char *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, integer *), dsteqr_(char *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *);
+ doublereal smlnum;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSPEV computes all the eigenvalues and, optionally, eigenvectors of a */
+/* real symmetric matrix A in packed storage. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, AP is overwritten by values generated during the */
+/* reduction to tridiagonal form. If UPLO = 'U', the diagonal */
+/* and first superdiagonal of the tridiagonal matrix T overwrite */
+/* the corresponding elements of A, and if UPLO = 'L', the */
+/* diagonal and first subdiagonal of T overwrite the */
+/* corresponding elements of A. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) DOUBLE PRECISION array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the orthonormal */
+/* eigenvectors of the matrix A, with the i-th column of Z */
+/* holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, the algorithm failed to converge; i */
+/* off-diagonal elements of an intermediate tridiagonal */
+/* form did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lsame_(uplo, "U") || lsame_(uplo,
+ "L"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -7;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSPEV ", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ w[1] = ap[1];
+ if (wantz) {
+ z__[z_dim1 + 1] = 1.;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = dlansp_("M", uplo, n, &ap[1], &work[1]);
+ iscale = 0;
+ if (anrm > 0. && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ i__1 = *n * (*n + 1) / 2;
+ dscal_(&i__1, &sigma, &ap[1], &c__1);
+ }
+
+/* Call DSPTRD to reduce symmetric packed matrix to tridiagonal form. */
+
+ inde = 1;
+ indtau = inde + *n;
+ dsptrd_(uplo, n, &ap[1], &w[1], &work[inde], &work[indtau], &iinfo);
+
+/* For eigenvalues only, call DSTERF. For eigenvectors, first call */
+/* DOPGTR to generate the orthogonal matrix, then call DSTEQR. */
+
+ if (! wantz) {
+ dsterf_(n, &w[1], &work[inde], info);
+ } else {
+ indwrk = indtau + *n;
+ dopgtr_(uplo, n, &ap[1], &work[indtau], &z__[z_offset], ldz, &work[
+ indwrk], &iinfo);
+ dsteqr_(jobz, n, &w[1], &work[inde], &z__[z_offset], ldz, &work[
+ indtau], info);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *n;
+ } else {
+ imax = *info - 1;
+ }
+ d__1 = 1. / sigma;
+ dscal_(&imax, &d__1, &w[1], &c__1);
+ }
+
+ return 0;
+
+/* End of DSPEV */
+
+} /* dspev_ */
diff --git a/contrib/libs/clapack/dspevd.c b/contrib/libs/clapack/dspevd.c
new file mode 100644
index 0000000000..ff663f945f
--- /dev/null
+++ b/contrib/libs/clapack/dspevd.c
@@ -0,0 +1,314 @@
+/* dspevd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dspevd_(char *jobz, char *uplo, integer *n, doublereal *
+ ap, doublereal *w, doublereal *z__, integer *ldz, doublereal *work,
+ integer *lwork, integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal eps;
+ integer inde;
+ doublereal anrm, rmin, rmax;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo, lwmin;
+ logical wantz;
+ extern doublereal dlamch_(char *);
+ integer iscale;
+ extern /* Subroutine */ int dstedc_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ integer *, integer *, integer *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ extern doublereal dlansp_(char *, char *, integer *, doublereal *,
+ doublereal *);
+ integer indtau;
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *);
+ integer indwrk, liwmin;
+ extern /* Subroutine */ int dsptrd_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *),
+ dopmtr_(char *, char *, char *, integer *, integer *, doublereal *
+, doublereal *, doublereal *, integer *, doublereal *, integer *);
+ integer llwork;
+ doublereal smlnum;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSPEVD computes all the eigenvalues and, optionally, eigenvectors */
+/* of a real symmetric matrix A in packed storage. If eigenvectors are */
+/* desired, it uses a divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, AP is overwritten by values generated during the */
+/* reduction to tridiagonal form. If UPLO = 'U', the diagonal */
+/* and first superdiagonal of the tridiagonal matrix T overwrite */
+/* the corresponding elements of A, and if UPLO = 'L', the */
+/* diagonal and first subdiagonal of T overwrite the */
+/* corresponding elements of A. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) DOUBLE PRECISION array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the orthonormal */
+/* eigenvectors of the matrix A, with the i-th column of Z */
+/* holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, */
+/* dimension (LWORK) */
+/* On exit, if INFO = 0, WORK(1) returns the required LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If N <= 1, LWORK must be at least 1. */
+/* If JOBZ = 'N' and N > 1, LWORK must be at least 2*N. */
+/* If JOBZ = 'V' and N > 1, LWORK must be at least */
+/* 1 + 6*N + N**2. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the required sizes of the WORK and IWORK */
+/* arrays, returns these values as the first entries of the WORK */
+/* and IWORK arrays, and no error message related to LWORK or */
+/* LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the required LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* If JOBZ = 'N' or N <= 1, LIWORK must be at least 1. */
+/* If JOBZ = 'V' and N > 1, LIWORK must be at least 3 + 5*N. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the required sizes of the WORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK and IWORK arrays, and no error message related to */
+/* LWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, the algorithm failed to converge; i */
+/* off-diagonal elements of an intermediate tridiagonal */
+/* form did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lquery = *lwork == -1 || *liwork == -1;
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lsame_(uplo, "U") || lsame_(uplo,
+ "L"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -7;
+ }
+
+ if (*info == 0) {
+ if (*n <= 1) {
+ liwmin = 1;
+ lwmin = 1;
+ } else {
+ if (wantz) {
+ liwmin = *n * 5 + 3;
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = *n * 6 + 1 + i__1 * i__1;
+ } else {
+ liwmin = 1;
+ lwmin = *n << 1;
+ }
+ }
+ iwork[1] = liwmin;
+ work[1] = (doublereal) lwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -9;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -11;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSPEVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ w[1] = ap[1];
+ if (wantz) {
+ z__[z_dim1 + 1] = 1.;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = dlansp_("M", uplo, n, &ap[1], &work[1]);
+ iscale = 0;
+ if (anrm > 0. && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ i__1 = *n * (*n + 1) / 2;
+ dscal_(&i__1, &sigma, &ap[1], &c__1);
+ }
+
+/* Call DSPTRD to reduce symmetric packed matrix to tridiagonal form. */
+
+ inde = 1;
+ indtau = inde + *n;
+ dsptrd_(uplo, n, &ap[1], &w[1], &work[inde], &work[indtau], &iinfo);
+
+/* For eigenvalues only, call DSTERF. For eigenvectors, first call */
+/* DSTEDC to generate the eigenvector matrix, WORK(INDWRK), of the */
+/* tridiagonal matrix, then call DOPMTR to multiply it by the */
+/* Householder transformations represented in AP. */
+
+ if (! wantz) {
+ dsterf_(n, &w[1], &work[inde], info);
+ } else {
+ indwrk = indtau + *n;
+ llwork = *lwork - indwrk + 1;
+ dstedc_("I", n, &w[1], &work[inde], &z__[z_offset], ldz, &work[indwrk]
+, &llwork, &iwork[1], liwork, info);
+ dopmtr_("L", uplo, "N", n, n, &ap[1], &work[indtau], &z__[z_offset],
+ ldz, &work[indwrk], &iinfo);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ d__1 = 1. / sigma;
+ dscal_(n, &d__1, &w[1], &c__1);
+ }
+
+ work[1] = (doublereal) lwmin;
+ iwork[1] = liwmin;
+ return 0;
+
+/* End of DSPEVD */
+
+} /* dspevd_ */
diff --git a/contrib/libs/clapack/dspevx.c b/contrib/libs/clapack/dspevx.c
new file mode 100644
index 0000000000..f1f6053487
--- /dev/null
+++ b/contrib/libs/clapack/dspevx.c
@@ -0,0 +1,467 @@
+/* dspevx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dspevx_(char *jobz, char *range, char *uplo, integer *n,
+ doublereal *ap, doublereal *vl, doublereal *vu, integer *il, integer *
+ iu, doublereal *abstol, integer *m, doublereal *w, doublereal *z__,
+ integer *ldz, doublereal *work, integer *iwork, integer *ifail,
+ integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, jj;
+ doublereal eps, vll, vuu, tmp1;
+ integer indd, inde;
+ doublereal anrm;
+ integer imax;
+ doublereal rmin, rmax;
+ logical test;
+ integer itmp1, indee;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ char order[1];
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dswap_(integer *, doublereal *, integer
+ *, doublereal *, integer *);
+ logical wantz;
+ extern doublereal dlamch_(char *);
+ logical alleig, indeig;
+ integer iscale, indibl;
+ logical valeig;
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal abstll, bignum;
+ extern doublereal dlansp_(char *, char *, integer *, doublereal *,
+ doublereal *);
+ integer indtau, indisp;
+ extern /* Subroutine */ int dstein_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *, doublereal *,
+ integer *, doublereal *, integer *, integer *, integer *),
+ dsterf_(integer *, doublereal *, doublereal *, integer *);
+ integer indiwo;
+ extern /* Subroutine */ int dstebz_(char *, char *, integer *, doublereal
+ *, doublereal *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *, doublereal *, integer *, integer *);
+ integer indwrk;
+ extern /* Subroutine */ int dopgtr_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *), dsptrd_(char *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, integer *), dsteqr_(char *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), dopmtr_(char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *);
+ integer nsplit;
+ doublereal smlnum;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSPEVX computes selected eigenvalues and, optionally, eigenvectors */
+/* of a real symmetric matrix A in packed storage. Eigenvalues/vectors */
+/* can be selected by specifying either a range of values or a range of */
+/* indices for the desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found; */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found; */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, AP is overwritten by values generated during the */
+/* reduction to tridiagonal form. If UPLO = 'U', the diagonal */
+/* and first superdiagonal of the tridiagonal matrix T overwrite */
+/* the corresponding elements of A, and if UPLO = 'L', the */
+/* diagonal and first subdiagonal of T overwrite the */
+/* corresponding elements of A. */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) DOUBLE PRECISION */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing AP to tridiagonal form. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*DLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*DLAMCH('S'). */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices */
+/* with Guaranteed High Relative Accuracy," by Demmel and */
+/* Kahan, LAPACK Working Note #3. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the selected eigenvalues in ascending order. */
+
+/* Z (output) DOUBLE PRECISION array, dimension (LDZ, max(1,M)) */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If an eigenvector fails to converge, then that column of Z */
+/* contains the latest approximation to the eigenvector, and the */
+/* index of the eigenvector is returned in IFAIL. */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (8*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+
+/* IFAIL (output) INTEGER array, dimension (N) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvectors that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, then i eigenvectors failed to converge. */
+/* Their indices are stored in array IFAIL. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (! (lsame_(uplo, "L") || lsame_(uplo,
+ "U"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -7;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -8;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -9;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -14;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSPEVX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (alleig || indeig) {
+ *m = 1;
+ w[1] = ap[1];
+ } else {
+ if (*vl < ap[1] && *vu >= ap[1]) {
+ *m = 1;
+ w[1] = ap[1];
+ }
+ }
+ if (wantz) {
+ z__[z_dim1 + 1] = 1.;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+/* Computing MIN */
+ d__1 = sqrt(bignum), d__2 = 1. / sqrt(sqrt(safmin));
+ rmax = min(d__1,d__2);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ iscale = 0;
+ abstll = *abstol;
+ if (valeig) {
+ vll = *vl;
+ vuu = *vu;
+ } else {
+ vll = 0.;
+ vuu = 0.;
+ }
+ anrm = dlansp_("M", uplo, n, &ap[1], &work[1]);
+ if (anrm > 0. && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ i__1 = *n * (*n + 1) / 2;
+ dscal_(&i__1, &sigma, &ap[1], &c__1);
+ if (*abstol > 0.) {
+ abstll = *abstol * sigma;
+ }
+ if (valeig) {
+ vll = *vl * sigma;
+ vuu = *vu * sigma;
+ }
+ }
+
+/* Call DSPTRD to reduce symmetric packed matrix to tridiagonal form. */
+
+ indtau = 1;
+ inde = indtau + *n;
+ indd = inde + *n;
+ indwrk = indd + *n;
+ dsptrd_(uplo, n, &ap[1], &work[indd], &work[inde], &work[indtau], &iinfo);
+
+/* If all eigenvalues are desired and ABSTOL is less than or equal */
+/* to zero, then call DSTERF or DOPGTR and SSTEQR. If this fails */
+/* for some eigenvalue, then try DSTEBZ. */
+
+ test = FALSE_;
+ if (indeig) {
+ if (*il == 1 && *iu == *n) {
+ test = TRUE_;
+ }
+ }
+ if ((alleig || test) && *abstol <= 0.) {
+ dcopy_(n, &work[indd], &c__1, &w[1], &c__1);
+ indee = indwrk + (*n << 1);
+ if (! wantz) {
+ i__1 = *n - 1;
+ dcopy_(&i__1, &work[inde], &c__1, &work[indee], &c__1);
+ dsterf_(n, &w[1], &work[indee], info);
+ } else {
+ dopgtr_(uplo, n, &ap[1], &work[indtau], &z__[z_offset], ldz, &
+ work[indwrk], &iinfo);
+ i__1 = *n - 1;
+ dcopy_(&i__1, &work[inde], &c__1, &work[indee], &c__1);
+ dsteqr_(jobz, n, &w[1], &work[indee], &z__[z_offset], ldz, &work[
+ indwrk], info);
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ifail[i__] = 0;
+/* L10: */
+ }
+ }
+ }
+ if (*info == 0) {
+ *m = *n;
+ goto L20;
+ }
+ *info = 0;
+ }
+
+/* Otherwise, call DSTEBZ and, if eigenvectors are desired, SSTEIN. */
+
+ if (wantz) {
+ *(unsigned char *)order = 'B';
+ } else {
+ *(unsigned char *)order = 'E';
+ }
+ indibl = 1;
+ indisp = indibl + *n;
+ indiwo = indisp + *n;
+ dstebz_(range, order, n, &vll, &vuu, il, iu, &abstll, &work[indd], &work[
+ inde], m, &nsplit, &w[1], &iwork[indibl], &iwork[indisp], &work[
+ indwrk], &iwork[indiwo], info);
+
+ if (wantz) {
+ dstein_(n, &work[indd], &work[inde], m, &w[1], &iwork[indibl], &iwork[
+ indisp], &z__[z_offset], ldz, &work[indwrk], &iwork[indiwo], &
+ ifail[1], info);
+
+/* Apply orthogonal matrix used in reduction to tridiagonal */
+/* form to eigenvectors returned by DSTEIN. */
+
+ dopmtr_("L", uplo, "N", n, m, &ap[1], &work[indtau], &z__[z_offset],
+ ldz, &work[indwrk], &iinfo);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+L20:
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *m;
+ } else {
+ imax = *info - 1;
+ }
+ d__1 = 1. / sigma;
+ dscal_(&imax, &d__1, &w[1], &c__1);
+ }
+
+/* If eigenvalues are not in order, then sort them, along with */
+/* eigenvectors. */
+
+ if (wantz) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp1 = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp1) {
+ i__ = jj;
+ tmp1 = w[jj];
+ }
+/* L30: */
+ }
+
+ if (i__ != 0) {
+ itmp1 = iwork[indibl + i__ - 1];
+ w[i__] = w[j];
+ iwork[indibl + i__ - 1] = iwork[indibl + j - 1];
+ w[j] = tmp1;
+ iwork[indibl + j - 1] = itmp1;
+ dswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1],
+ &c__1);
+ if (*info != 0) {
+ itmp1 = ifail[i__];
+ ifail[i__] = ifail[j];
+ ifail[j] = itmp1;
+ }
+ }
+/* L40: */
+ }
+ }
+
+ return 0;
+
+/* End of DSPEVX */
+
+} /* dspevx_ */
diff --git a/contrib/libs/clapack/dspgst.c b/contrib/libs/clapack/dspgst.c
new file mode 100644
index 0000000000..f42db72829
--- /dev/null
+++ b/contrib/libs/clapack/dspgst.c
@@ -0,0 +1,284 @@
+/* dspgst.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b9 = -1.;
+static doublereal c_b11 = 1.;
+
+/* Subroutine */ int dspgst_(integer *itype, char *uplo, integer *n,
+ doublereal *ap, doublereal *bp, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal d__1;
+
+ /* Local variables */
+ integer j, k, j1, k1, jj, kk;
+ doublereal ct, ajj;
+ integer j1j1;
+ doublereal akk;
+ integer k1k1;
+ doublereal bjj, bkk;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ extern /* Subroutine */ int dspr2_(char *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *), dscal_(integer *, doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *), dspmv_(char *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *);
+ logical upper;
+ extern /* Subroutine */ int dtpmv_(char *, char *, char *, integer *,
+ doublereal *, doublereal *, integer *),
+ dtpsv_(char *, char *, char *, integer *, doublereal *,
+ doublereal *, integer *), xerbla_(char *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSPGST reduces a real symmetric-definite generalized eigenproblem */
+/* to standard form, using packed storage. */
+
+/* If ITYPE = 1, the problem is A*x = lambda*B*x, */
+/* and A is overwritten by inv(U**T)*A*inv(U) or inv(L)*A*inv(L**T) */
+
+/* If ITYPE = 2 or 3, the problem is A*B*x = lambda*x or */
+/* B*A*x = lambda*x, and A is overwritten by U*A*U**T or L**T*A*L. */
+
+/* B must have been previously factorized as U**T*U or L*L**T by DPPTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* = 1: compute inv(U**T)*A*inv(U) or inv(L)*A*inv(L**T); */
+/* = 2 or 3: compute U*A*U**T or L**T*A*L. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored and B is factored as */
+/* U**T*U; */
+/* = 'L': Lower triangle of A is stored and B is factored as */
+/* L*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* AP (input/output) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, if INFO = 0, the transformed matrix, stored in the */
+/* same format as A. */
+
+/* BP (input) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* The triangular factor from the Cholesky factorization of B, */
+/* stored in the same format as A, as returned by DPPTRF. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --bp;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSPGST", &i__1);
+ return 0;
+ }
+
+ if (*itype == 1) {
+ if (upper) {
+
+/* Compute inv(U')*A*inv(U) */
+
+/* J1 and JJ are the indices of A(1,j) and A(j,j) */
+
+ jj = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ j1 = jj + 1;
+ jj += j;
+
+/* Compute the j-th column of the upper triangle of A */
+
+ bjj = bp[jj];
+ dtpsv_(uplo, "Transpose", "Nonunit", &j, &bp[1], &ap[j1], &
+ c__1);
+ i__2 = j - 1;
+ dspmv_(uplo, &i__2, &c_b9, &ap[1], &bp[j1], &c__1, &c_b11, &
+ ap[j1], &c__1);
+ i__2 = j - 1;
+ d__1 = 1. / bjj;
+ dscal_(&i__2, &d__1, &ap[j1], &c__1);
+ i__2 = j - 1;
+ ap[jj] = (ap[jj] - ddot_(&i__2, &ap[j1], &c__1, &bp[j1], &
+ c__1)) / bjj;
+/* L10: */
+ }
+ } else {
+
+/* Compute inv(L)*A*inv(L') */
+
+/* KK and K1K1 are the indices of A(k,k) and A(k+1,k+1) */
+
+ kk = 1;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ k1k1 = kk + *n - k + 1;
+
+/* Update the lower triangle of A(k:n,k:n) */
+
+ akk = ap[kk];
+ bkk = bp[kk];
+/* Computing 2nd power */
+ d__1 = bkk;
+ akk /= d__1 * d__1;
+ ap[kk] = akk;
+ if (k < *n) {
+ i__2 = *n - k;
+ d__1 = 1. / bkk;
+ dscal_(&i__2, &d__1, &ap[kk + 1], &c__1);
+ ct = akk * -.5;
+ i__2 = *n - k;
+ daxpy_(&i__2, &ct, &bp[kk + 1], &c__1, &ap[kk + 1], &c__1)
+ ;
+ i__2 = *n - k;
+ dspr2_(uplo, &i__2, &c_b9, &ap[kk + 1], &c__1, &bp[kk + 1]
+, &c__1, &ap[k1k1]);
+ i__2 = *n - k;
+ daxpy_(&i__2, &ct, &bp[kk + 1], &c__1, &ap[kk + 1], &c__1)
+ ;
+ i__2 = *n - k;
+ dtpsv_(uplo, "No transpose", "Non-unit", &i__2, &bp[k1k1],
+ &ap[kk + 1], &c__1);
+ }
+ kk = k1k1;
+/* L20: */
+ }
+ }
+ } else {
+ if (upper) {
+
+/* Compute U*A*U' */
+
+/* K1 and KK are the indices of A(1,k) and A(k,k) */
+
+ kk = 0;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ k1 = kk + 1;
+ kk += k;
+
+/* Update the upper triangle of A(1:k,1:k) */
+
+ akk = ap[kk];
+ bkk = bp[kk];
+ i__2 = k - 1;
+ dtpmv_(uplo, "No transpose", "Non-unit", &i__2, &bp[1], &ap[
+ k1], &c__1);
+ ct = akk * .5;
+ i__2 = k - 1;
+ daxpy_(&i__2, &ct, &bp[k1], &c__1, &ap[k1], &c__1);
+ i__2 = k - 1;
+ dspr2_(uplo, &i__2, &c_b11, &ap[k1], &c__1, &bp[k1], &c__1, &
+ ap[1]);
+ i__2 = k - 1;
+ daxpy_(&i__2, &ct, &bp[k1], &c__1, &ap[k1], &c__1);
+ i__2 = k - 1;
+ dscal_(&i__2, &bkk, &ap[k1], &c__1);
+/* Computing 2nd power */
+ d__1 = bkk;
+ ap[kk] = akk * (d__1 * d__1);
+/* L30: */
+ }
+ } else {
+
+/* Compute L'*A*L */
+
+/* JJ and J1J1 are the indices of A(j,j) and A(j+1,j+1) */
+
+ jj = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ j1j1 = jj + *n - j + 1;
+
+/* Compute the j-th column of the lower triangle of A */
+
+ ajj = ap[jj];
+ bjj = bp[jj];
+ i__2 = *n - j;
+ ap[jj] = ajj * bjj + ddot_(&i__2, &ap[jj + 1], &c__1, &bp[jj
+ + 1], &c__1);
+ i__2 = *n - j;
+ dscal_(&i__2, &bjj, &ap[jj + 1], &c__1);
+ i__2 = *n - j;
+ dspmv_(uplo, &i__2, &c_b11, &ap[j1j1], &bp[jj + 1], &c__1, &
+ c_b11, &ap[jj + 1], &c__1);
+ i__2 = *n - j + 1;
+ dtpmv_(uplo, "Transpose", "Non-unit", &i__2, &bp[jj], &ap[jj],
+ &c__1);
+ jj = j1j1;
+/* L40: */
+ }
+ }
+ }
+ return 0;
+
+/* End of DSPGST */
+
+} /* dspgst_ */
diff --git a/contrib/libs/clapack/dspgv.c b/contrib/libs/clapack/dspgv.c
new file mode 100644
index 0000000000..6b14de3341
--- /dev/null
+++ b/contrib/libs/clapack/dspgv.c
@@ -0,0 +1,243 @@
+/* dspgv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dspgv_(integer *itype, char *jobz, char *uplo, integer *
+ n, doublereal *ap, doublereal *bp, doublereal *w, doublereal *z__,
+ integer *ldz, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+
+ /* Local variables */
+ integer j, neig;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dspev_(char *, char *, integer *, doublereal *
+, doublereal *, doublereal *, integer *, doublereal *, integer *);
+ char trans[1];
+ logical upper;
+ extern /* Subroutine */ int dtpmv_(char *, char *, char *, integer *,
+ doublereal *, doublereal *, integer *),
+ dtpsv_(char *, char *, char *, integer *, doublereal *,
+ doublereal *, integer *);
+ logical wantz;
+ extern /* Subroutine */ int xerbla_(char *, integer *), dpptrf_(
+ char *, integer *, doublereal *, integer *), dspgst_(
+ integer *, char *, integer *, doublereal *, doublereal *, integer
+ *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSPGV computes all the eigenvalues and, optionally, the eigenvectors */
+/* of a real generalized symmetric-definite eigenproblem, of the form */
+/* A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x. */
+/* Here A and B are assumed to be symmetric, stored in packed format, */
+/* and B is also positive definite. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* Specifies the problem type to be solved: */
+/* = 1: A*x = (lambda)*B*x */
+/* = 2: A*B*x = (lambda)*x */
+/* = 3: B*A*x = (lambda)*x */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* AP (input/output) DOUBLE PRECISION array, dimension */
+/* (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, the contents of AP are destroyed. */
+
+/* BP (input/output) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* B, packed columnwise in a linear array. The j-th column of B */
+/* is stored in the array BP as follows: */
+/* if UPLO = 'U', BP(i + (j-1)*j/2) = B(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', BP(i + (j-1)*(2*n-j)/2) = B(i,j) for j<=i<=n. */
+
+/* On exit, the triangular factor U or L from the Cholesky */
+/* factorization B = U**T*U or B = L*L**T, in the same storage */
+/* format as B. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) DOUBLE PRECISION array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the matrix Z of */
+/* eigenvectors. The eigenvectors are normalized as follows: */
+/* if ITYPE = 1 or 2, Z**T*B*Z = I; */
+/* if ITYPE = 3, Z**T*inv(B)*Z = I. */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: DPPTRF or DSPEV returned an error code: */
+/* <= N: if INFO = i, DSPEV failed to converge; */
+/* i off-diagonal elements of an intermediate */
+/* tridiagonal form did not converge to zero. */
+/* > N: if INFO = n + i, for 1 <= i <= n, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --bp;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+
+ *info = 0;
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -2;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSPGV ", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a Cholesky factorization of B. */
+
+ dpptrf_(uplo, n, &bp[1], info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem and solve. */
+
+ dspgst_(itype, uplo, n, &ap[1], &bp[1], info);
+ dspev_(jobz, uplo, n, &ap[1], &w[1], &z__[z_offset], ldz, &work[1], info);
+
+ if (wantz) {
+
+/* Backtransform eigenvectors to the original problem. */
+
+ neig = *n;
+ if (*info > 0) {
+ neig = *info - 1;
+ }
+ if (*itype == 1 || *itype == 2) {
+
+/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x; */
+/* backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'N';
+ } else {
+ *(unsigned char *)trans = 'T';
+ }
+
+ i__1 = neig;
+ for (j = 1; j <= i__1; ++j) {
+ dtpsv_(uplo, trans, "Non-unit", n, &bp[1], &z__[j * z_dim1 +
+ 1], &c__1);
+/* L10: */
+ }
+
+ } else if (*itype == 3) {
+
+/* For B*A*x=(lambda)*x; */
+/* backtransform eigenvectors: x = L*y or U'*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'T';
+ } else {
+ *(unsigned char *)trans = 'N';
+ }
+
+ i__1 = neig;
+ for (j = 1; j <= i__1; ++j) {
+ dtpmv_(uplo, trans, "Non-unit", n, &bp[1], &z__[j * z_dim1 +
+ 1], &c__1);
+/* L20: */
+ }
+ }
+ }
+ return 0;
+
+/* End of DSPGV */
+
+} /* dspgv_ */
diff --git a/contrib/libs/clapack/dspgvd.c b/contrib/libs/clapack/dspgvd.c
new file mode 100644
index 0000000000..75ceb69e6b
--- /dev/null
+++ b/contrib/libs/clapack/dspgvd.c
@@ -0,0 +1,334 @@
+/* dspgvd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dspgvd_(integer *itype, char *jobz, char *uplo, integer *
+ n, doublereal *ap, doublereal *bp, doublereal *w, doublereal *z__,
+ integer *ldz, doublereal *work, integer *lwork, integer *iwork,
+ integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ integer j, neig;
+ extern logical lsame_(char *, char *);
+ integer lwmin;
+ char trans[1];
+ logical upper;
+ extern /* Subroutine */ int dtpmv_(char *, char *, char *, integer *,
+ doublereal *, doublereal *, integer *),
+ dtpsv_(char *, char *, char *, integer *, doublereal *,
+ doublereal *, integer *);
+ logical wantz;
+ extern /* Subroutine */ int xerbla_(char *, integer *), dspevd_(
+ char *, char *, integer *, doublereal *, doublereal *, doublereal
+ *, integer *, doublereal *, integer *, integer *, integer *,
+ integer *);
+ integer liwmin;
+ extern /* Subroutine */ int dpptrf_(char *, integer *, doublereal *,
+ integer *), dspgst_(integer *, char *, integer *,
+ doublereal *, doublereal *, integer *);
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSPGVD computes all the eigenvalues, and optionally, the eigenvectors */
+/* of a real generalized symmetric-definite eigenproblem, of the form */
+/* A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x. Here A and */
+/* B are assumed to be symmetric, stored in packed format, and B is also */
+/* positive definite. */
+/* If eigenvectors are desired, it uses a divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* Specifies the problem type to be solved: */
+/* = 1: A*x = (lambda)*B*x */
+/* = 2: A*B*x = (lambda)*x */
+/* = 3: B*A*x = (lambda)*x */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* AP (input/output) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, the contents of AP are destroyed. */
+
+/* BP (input/output) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* B, packed columnwise in a linear array. The j-th column of B */
+/* is stored in the array BP as follows: */
+/* if UPLO = 'U', BP(i + (j-1)*j/2) = B(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', BP(i + (j-1)*(2*n-j)/2) = B(i,j) for j<=i<=n. */
+
+/* On exit, the triangular factor U or L from the Cholesky */
+/* factorization B = U**T*U or B = L*L**T, in the same storage */
+/* format as B. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) DOUBLE PRECISION array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the matrix Z of */
+/* eigenvectors. The eigenvectors are normalized as follows: */
+/* if ITYPE = 1 or 2, Z**T*B*Z = I; */
+/* if ITYPE = 3, Z**T*inv(B)*Z = I. */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the required LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If N <= 1, LWORK >= 1. */
+/* If JOBZ = 'N' and N > 1, LWORK >= 2*N. */
+/* If JOBZ = 'V' and N > 1, LWORK >= 1 + 6*N + 2*N**2. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the required sizes of the WORK and IWORK */
+/* arrays, returns these values as the first entries of the WORK */
+/* and IWORK arrays, and no error message related to LWORK or */
+/* LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the required LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* If JOBZ = 'N' or N <= 1, LIWORK >= 1. */
+/* If JOBZ = 'V' and N > 1, LIWORK >= 3 + 5*N. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the required sizes of the WORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK and IWORK arrays, and no error message related to */
+/* LWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: DPPTRF or DSPEVD returned an error code: */
+/* <= N: if INFO = i, DSPEVD failed to converge; */
+/* i off-diagonal elements of an intermediate */
+/* tridiagonal form did not converge to zero; */
+/* > N: if INFO = N + i, for 1 <= i <= N, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --bp;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1 || *liwork == -1;
+
+ *info = 0;
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -2;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+ if (*n <= 1) {
+ liwmin = 1;
+ lwmin = 1;
+ } else {
+ if (wantz) {
+ liwmin = *n * 5 + 3;
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = *n * 6 + 1 + (i__1 * i__1 << 1);
+ } else {
+ liwmin = 1;
+ lwmin = *n << 1;
+ }
+ }
+ work[1] = (doublereal) lwmin;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -11;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSPGVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a Cholesky factorization of BP. */
+
+ dpptrf_(uplo, n, &bp[1], info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem and solve. */
+
+ dspgst_(itype, uplo, n, &ap[1], &bp[1], info);
+ dspevd_(jobz, uplo, n, &ap[1], &w[1], &z__[z_offset], ldz, &work[1],
+ lwork, &iwork[1], liwork, info);
+/* Computing MAX */
+ d__1 = (doublereal) lwmin;
+ lwmin = (integer) max(d__1,work[1]);
+/* Computing MAX */
+ d__1 = (doublereal) liwmin, d__2 = (doublereal) iwork[1];
+ liwmin = (integer) max(d__1,d__2);
+
+ if (wantz) {
+
+/* Backtransform eigenvectors to the original problem. */
+
+ neig = *n;
+ if (*info > 0) {
+ neig = *info - 1;
+ }
+ if (*itype == 1 || *itype == 2) {
+
+/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x; */
+/* backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'N';
+ } else {
+ *(unsigned char *)trans = 'T';
+ }
+
+ i__1 = neig;
+ for (j = 1; j <= i__1; ++j) {
+ dtpsv_(uplo, trans, "Non-unit", n, &bp[1], &z__[j * z_dim1 +
+ 1], &c__1);
+/* L10: */
+ }
+
+ } else if (*itype == 3) {
+
+/* For B*A*x=(lambda)*x; */
+/* backtransform eigenvectors: x = L*y or U'*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'T';
+ } else {
+ *(unsigned char *)trans = 'N';
+ }
+
+ i__1 = neig;
+ for (j = 1; j <= i__1; ++j) {
+ dtpmv_(uplo, trans, "Non-unit", n, &bp[1], &z__[j * z_dim1 +
+ 1], &c__1);
+/* L20: */
+ }
+ }
+ }
+
+ work[1] = (doublereal) lwmin;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of DSPGVD */
+
+} /* dspgvd_ */
diff --git a/contrib/libs/clapack/dspgvx.c b/contrib/libs/clapack/dspgvx.c
new file mode 100644
index 0000000000..90c8aa572e
--- /dev/null
+++ b/contrib/libs/clapack/dspgvx.c
@@ -0,0 +1,341 @@
+/* dspgvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dspgvx_(integer *itype, char *jobz, char *range, char *
+ uplo, integer *n, doublereal *ap, doublereal *bp, doublereal *vl,
+ doublereal *vu, integer *il, integer *iu, doublereal *abstol, integer
+ *m, doublereal *w, doublereal *z__, integer *ldz, doublereal *work,
+ integer *iwork, integer *ifail, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+
+ /* Local variables */
+ integer j;
+ extern logical lsame_(char *, char *);
+ char trans[1];
+ logical upper;
+ extern /* Subroutine */ int dtpmv_(char *, char *, char *, integer *,
+ doublereal *, doublereal *, integer *),
+ dtpsv_(char *, char *, char *, integer *, doublereal *,
+ doublereal *, integer *);
+ logical wantz, alleig, indeig, valeig;
+ extern /* Subroutine */ int xerbla_(char *, integer *), dpptrf_(
+ char *, integer *, doublereal *, integer *), dspgst_(
+ integer *, char *, integer *, doublereal *, doublereal *, integer
+ *), dspevx_(char *, char *, char *, integer *, doublereal
+ *, doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSPGVX computes selected eigenvalues, and optionally, eigenvectors */
+/* of a real generalized symmetric-definite eigenproblem, of the form */
+/* A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x. Here A */
+/* and B are assumed to be symmetric, stored in packed storage, and B */
+/* is also positive definite. Eigenvalues and eigenvectors can be */
+/* selected by specifying either a range of values or a range of indices */
+/* for the desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* Specifies the problem type to be solved: */
+/* = 1: A*x = (lambda)*B*x */
+/* = 2: A*B*x = (lambda)*x */
+/* = 3: B*A*x = (lambda)*x */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A and B are stored; */
+/* = 'L': Lower triangle of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix pencil (A,B). N >= 0. */
+
+/* AP (input/output) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, the contents of AP are destroyed. */
+
+/* BP (input/output) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* B, packed columnwise in a linear array. The j-th column of B */
+/* is stored in the array BP as follows: */
+/* if UPLO = 'U', BP(i + (j-1)*j/2) = B(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', BP(i + (j-1)*(2*n-j)/2) = B(i,j) for j<=i<=n. */
+
+/* On exit, the triangular factor U or L from the Cholesky */
+/* factorization B = U**T*U or B = L*L**T, in the same storage */
+/* format as B. */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) DOUBLE PRECISION */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing A to tridiagonal form. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*DLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*DLAMCH('S'). */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* On normal exit, the first M elements contain the selected */
+/* eigenvalues in ascending order. */
+
+/* Z (output) DOUBLE PRECISION array, dimension (LDZ, max(1,M)) */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* The eigenvectors are normalized as follows: */
+/* if ITYPE = 1 or 2, Z**T*B*Z = I; */
+/* if ITYPE = 3, Z**T*inv(B)*Z = I. */
+
+/* If an eigenvector fails to converge, then that column of Z */
+/* contains the latest approximation to the eigenvector, and the */
+/* index of the eigenvector is returned in IFAIL. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (8*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+
+/* IFAIL (output) INTEGER array, dimension (N) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvectors that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: DPPTRF or DSPEVX returned an error code: */
+/* <= N: if INFO = i, DSPEVX failed to converge; */
+/* i eigenvectors failed to converge. Their indices */
+/* are stored in array IFAIL. */
+/* > N: if INFO = N + i, for 1 <= i <= N, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --bp;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ upper = lsame_(uplo, "U");
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+
+ *info = 0;
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -2;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -3;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -9;
+ }
+ } else if (indeig) {
+ if (*il < 1) {
+ *info = -10;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -11;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -16;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSPGVX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a Cholesky factorization of B. */
+
+ dpptrf_(uplo, n, &bp[1], info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem and solve. */
+
+ dspgst_(itype, uplo, n, &ap[1], &bp[1], info);
+ dspevx_(jobz, range, uplo, n, &ap[1], vl, vu, il, iu, abstol, m, &w[1], &
+ z__[z_offset], ldz, &work[1], &iwork[1], &ifail[1], info);
+
+ if (wantz) {
+
+/* Backtransform eigenvectors to the original problem. */
+
+ if (*info > 0) {
+ *m = *info - 1;
+ }
+ if (*itype == 1 || *itype == 2) {
+
+/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x; */
+/* backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'N';
+ } else {
+ *(unsigned char *)trans = 'T';
+ }
+
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ dtpsv_(uplo, trans, "Non-unit", n, &bp[1], &z__[j * z_dim1 +
+ 1], &c__1);
+/* L10: */
+ }
+
+ } else if (*itype == 3) {
+
+/* For B*A*x=(lambda)*x; */
+/* backtransform eigenvectors: x = L*y or U'*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'T';
+ } else {
+ *(unsigned char *)trans = 'N';
+ }
+
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ dtpmv_(uplo, trans, "Non-unit", n, &bp[1], &z__[j * z_dim1 +
+ 1], &c__1);
+/* L20: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DSPGVX */
+
+} /* dspgvx_ */
diff --git a/contrib/libs/clapack/dsposv.c b/contrib/libs/clapack/dsposv.c
new file mode 100644
index 0000000000..c7892b4da0
--- /dev/null
+++ b/contrib/libs/clapack/dsposv.c
@@ -0,0 +1,418 @@
+/* dsposv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b10 = -1.;
+static doublereal c_b11 = 1.;
+static integer c__1 = 1;
+
+/* Subroutine */ int dsposv_(char *uplo, integer *n, integer *nrhs,
+ doublereal *a, integer *lda, doublereal *b, integer *ldb, doublereal *
+ x, integer *ldx, doublereal *work, real *swork, integer *iter,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, work_dim1, work_offset,
+ x_dim1, x_offset, i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ doublereal cte, eps, anrm;
+ integer ptsa;
+ doublereal rnrm, xnrm;
+ integer ptsx;
+ extern logical lsame_(char *, char *);
+ integer iiter;
+ extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *), dsymm_(char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *), dlag2s_(integer *, integer *, doublereal *,
+ integer *, real *, integer *, integer *), slag2d_(integer *,
+ integer *, real *, integer *, doublereal *, integer *, integer *),
+ dlat2s_(char *, integer *, doublereal *, integer *, real *,
+ integer *, integer *);
+ extern doublereal dlamch_(char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ extern doublereal dlansy_(char *, char *, integer *, doublereal *,
+ integer *, doublereal *);
+ extern /* Subroutine */ int dpotrf_(char *, integer *, doublereal *,
+ integer *, integer *), dpotrs_(char *, integer *, integer
+ *, doublereal *, integer *, doublereal *, integer *, integer *), spotrf_(char *, integer *, real *, integer *, integer *), spotrs_(char *, integer *, integer *, real *, integer *,
+ real *, integer *, integer *);
+
+
+/* -- LAPACK PROTOTYPE driver routine (version 3.1.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* May 2007 */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSPOSV computes the solution to a real system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N symmetric positive definite matrix and X and B */
+/* are N-by-NRHS matrices. */
+
+/* DSPOSV first attempts to factorize the matrix in SINGLE PRECISION */
+/* and use this factorization within an iterative refinement procedure */
+/* to produce a solution with DOUBLE PRECISION normwise backward error */
+/* quality (see below). If the approach fails the method switches to a */
+/* DOUBLE PRECISION factorization and solve. */
+
+/* The iterative refinement is not going to be a winning strategy if */
+/* the ratio SINGLE PRECISION performance over DOUBLE PRECISION */
+/* performance is too small. A reasonable strategy should take the */
+/* number of right-hand sides and the size of the matrix into account. */
+/* This might be done with a call to ILAENV in the future. Up to now, we */
+/* always try iterative refinement. */
+
+/* The iterative refinement process is stopped if */
+/* ITER > ITERMAX */
+/* or for all the RHS we have: */
+/* RNRM < SQRT(N)*XNRM*ANRM*EPS*BWDMAX */
+/* where */
+/* o ITER is the number of the current iteration in the iterative */
+/* refinement process */
+/* o RNRM is the infinity-norm of the residual */
+/* o XNRM is the infinity-norm of the solution */
+/* o ANRM is the infinity-operator-norm of the matrix A */
+/* o EPS is the machine epsilon returned by DLAMCH('Epsilon') */
+/* The value ITERMAX and BWDMAX are fixed to 30 and 1.0D+00 */
+/* respectively. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input or input/ouptut) DOUBLE PRECISION array, */
+/* dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+/* On exit, if iterative refinement has been successfully used */
+/* (INFO.EQ.0 and ITER.GE.0, see description below), then A is */
+/* unchanged, if double precision factorization has been used */
+/* (INFO.EQ.0 and ITER.LT.0, see description below), then the */
+/* array A contains the factor U or L from the Cholesky */
+/* factorization A = U**T*U or A = L*L**T. */
+
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* The N-by-NRHS right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) DOUBLE PRECISION array, dimension (LDX,NRHS) */
+/* If INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (N*NRHS) */
+/* This array is used to hold the residual vectors. */
+
+/* SWORK (workspace) REAL array, dimension (N*(N+NRHS)) */
+/* This array is used to use the single precision matrix and the */
+/* right-hand sides or solutions in single precision. */
+
+/* ITER (output) INTEGER */
+/* < 0: iterative refinement has failed, double precision */
+/* factorization has been performed */
+/* -1 : the routine fell back to full precision for */
+/* implementation- or machine-specific reasons */
+/* -2 : narrowing the precision induced an overflow, */
+/* the routine fell back to full precision */
+/* -3 : failure of SPOTRF */
+/* -31: stop the iterative refinement after the 30th */
+/* iterations */
+/* > 0: iterative refinement has been sucessfully used. */
+/* Returns the number of iterations */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i of (DOUBLE */
+/* PRECISION) A is not positive definite, so the */
+/* factorization could not be completed, and the solution */
+/* has not been computed. */
+
+/* ========= */
+
+/* .. Parameters .. */
+
+
+
+
+/* .. Local Scalars .. */
+
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ work_dim1 = *n;
+ work_offset = 1 + work_dim1;
+ work -= work_offset;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --swork;
+
+ /* Function Body */
+ *info = 0;
+ *iter = 0;
+
+/* Test the input parameters. */
+
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ } else if (*ldx < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSPOSV", &i__1);
+ return 0;
+ }
+
+/* Quick return if (N.EQ.0). */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Skip single precision iterative refinement if a priori slower */
+/* than double precision factorization. */
+
+ if (FALSE_) {
+ *iter = -1;
+ goto L40;
+ }
+
+/* Compute some constants. */
+
+ anrm = dlansy_("I", uplo, n, &a[a_offset], lda, &work[work_offset]);
+ eps = dlamch_("Epsilon");
+ cte = anrm * eps * sqrt((doublereal) (*n)) * 1.;
+
+/* Set the indices PTSA, PTSX for referencing SA and SX in SWORK. */
+
+ ptsa = 1;
+ ptsx = ptsa + *n * *n;
+
+/* Convert B from double precision to single precision and store the */
+/* result in SX. */
+
+ dlag2s_(n, nrhs, &b[b_offset], ldb, &swork[ptsx], n, info);
+
+ if (*info != 0) {
+ *iter = -2;
+ goto L40;
+ }
+
+/* Convert A from double precision to single precision and store the */
+/* result in SA. */
+
+ dlat2s_(uplo, n, &a[a_offset], lda, &swork[ptsa], n, info);
+
+ if (*info != 0) {
+ *iter = -2;
+ goto L40;
+ }
+
+/* Compute the Cholesky factorization of SA. */
+
+ spotrf_(uplo, n, &swork[ptsa], n, info);
+
+ if (*info != 0) {
+ *iter = -3;
+ goto L40;
+ }
+
+/* Solve the system SA*SX = SB. */
+
+ spotrs_(uplo, n, nrhs, &swork[ptsa], n, &swork[ptsx], n, info);
+
+/* Convert SX back to double precision */
+
+ slag2d_(n, nrhs, &swork[ptsx], n, &x[x_offset], ldx, info);
+
+/* Compute R = B - AX (R is WORK). */
+
+ dlacpy_("All", n, nrhs, &b[b_offset], ldb, &work[work_offset], n);
+
+ dsymm_("Left", uplo, n, nrhs, &c_b10, &a[a_offset], lda, &x[x_offset],
+ ldx, &c_b11, &work[work_offset], n);
+
+/* Check whether the NRHS normwise backward errors satisfy the */
+/* stopping criterion. If yes, set ITER=0 and return. */
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ xnrm = (d__1 = x[idamax_(n, &x[i__ * x_dim1 + 1], &c__1) + i__ *
+ x_dim1], abs(d__1));
+ rnrm = (d__1 = work[idamax_(n, &work[i__ * work_dim1 + 1], &c__1) +
+ i__ * work_dim1], abs(d__1));
+ if (rnrm > xnrm * cte) {
+ goto L10;
+ }
+ }
+
+/* If we are here, the NRHS normwise backward errors satisfy the */
+/* stopping criterion. We are good to exit. */
+
+ *iter = 0;
+ return 0;
+
+L10:
+
+ for (iiter = 1; iiter <= 30; ++iiter) {
+
+/* Convert R (in WORK) from double precision to single precision */
+/* and store the result in SX. */
+
+ dlag2s_(n, nrhs, &work[work_offset], n, &swork[ptsx], n, info);
+
+ if (*info != 0) {
+ *iter = -2;
+ goto L40;
+ }
+
+/* Solve the system SA*SX = SR. */
+
+ spotrs_(uplo, n, nrhs, &swork[ptsa], n, &swork[ptsx], n, info);
+
+/* Convert SX back to double precision and update the current */
+/* iterate. */
+
+ slag2d_(n, nrhs, &swork[ptsx], n, &work[work_offset], n, info);
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ daxpy_(n, &c_b11, &work[i__ * work_dim1 + 1], &c__1, &x[i__ *
+ x_dim1 + 1], &c__1);
+ }
+
+/* Compute R = B - AX (R is WORK). */
+
+ dlacpy_("All", n, nrhs, &b[b_offset], ldb, &work[work_offset], n);
+
+ dsymm_("L", uplo, n, nrhs, &c_b10, &a[a_offset], lda, &x[x_offset],
+ ldx, &c_b11, &work[work_offset], n);
+
+/* Check whether the NRHS normwise backward errors satisfy the */
+/* stopping criterion. If yes, set ITER=IITER>0 and return. */
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ xnrm = (d__1 = x[idamax_(n, &x[i__ * x_dim1 + 1], &c__1) + i__ *
+ x_dim1], abs(d__1));
+ rnrm = (d__1 = work[idamax_(n, &work[i__ * work_dim1 + 1], &c__1)
+ + i__ * work_dim1], abs(d__1));
+ if (rnrm > xnrm * cte) {
+ goto L20;
+ }
+ }
+
+/* If we are here, the NRHS normwise backward errors satisfy the */
+/* stopping criterion, we are good to exit. */
+
+ *iter = iiter;
+
+ return 0;
+
+L20:
+
+/* L30: */
+ ;
+ }
+
+/* If we are at this place of the code, this is because we have */
+/* performed ITER=ITERMAX iterations and never satisified the */
+/* stopping criterion, set up the ITER flag accordingly and follow */
+/* up on double precision routine. */
+
+ *iter = -31;
+
+L40:
+
+/* Single-precision iterative refinement failed to converge to a */
+/* satisfactory solution, so we resort to double precision. */
+
+ dpotrf_(uplo, n, &a[a_offset], lda, info);
+
+ if (*info != 0) {
+ return 0;
+ }
+
+ dlacpy_("All", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ dpotrs_(uplo, n, nrhs, &a[a_offset], lda, &x[x_offset], ldx, info);
+
+ return 0;
+
+/* End of DSPOSV. */
+
+} /* dsposv_ */
diff --git a/contrib/libs/clapack/dsprfs.c b/contrib/libs/clapack/dsprfs.c
new file mode 100644
index 0000000000..4b0bb0dcea
--- /dev/null
+++ b/contrib/libs/clapack/dsprfs.c
@@ -0,0 +1,421 @@
+/* dsprfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b12 = -1.;
+static doublereal c_b14 = 1.;
+
+/* Subroutine */ int dsprfs_(char *uplo, integer *n, integer *nrhs,
+ doublereal *ap, doublereal *afp, integer *ipiv, doublereal *b,
+ integer *ldb, doublereal *x, integer *ldx, doublereal *ferr,
+ doublereal *berr, doublereal *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3;
+ doublereal d__1, d__2, d__3;
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal s;
+ integer ik, kk;
+ doublereal xk;
+ integer nz;
+ doublereal eps;
+ integer kase;
+ doublereal safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), daxpy_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *);
+ integer count;
+ extern /* Subroutine */ int dspmv_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *);
+ logical upper;
+ extern /* Subroutine */ int dlacn2_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal lstres;
+ extern /* Subroutine */ int dsptrs_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call DLACN2 in place of DLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSPRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is symmetric indefinite */
+/* and packed, and provides error bounds and backward error estimates */
+/* for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AP (input) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the symmetric matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* AFP (input) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* The factored form of the matrix A. AFP contains the block */
+/* diagonal matrix D and the multipliers used to obtain the */
+/* factor U or L from the factorization A = U*D*U**T or */
+/* A = L*D*L**T as computed by DSPTRF, stored as a packed */
+/* triangular matrix. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by DSPTRF. */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) DOUBLE PRECISION array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by DSPTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --afp;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*ldx < max(1,*n)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSPRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X */
+
+ dcopy_(n, &b[j * b_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+ dspmv_(uplo, n, &c_b12, &ap[1], &x[j * x_dim1 + 1], &c__1, &c_b14, &
+ work[*n + 1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] = (d__1 = b[i__ + j * b_dim1], abs(d__1));
+/* L30: */
+ }
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ kk = 1;
+ if (upper) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ xk = (d__1 = x[k + j * x_dim1], abs(d__1));
+ ik = kk;
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ work[i__] += (d__1 = ap[ik], abs(d__1)) * xk;
+ s += (d__1 = ap[ik], abs(d__1)) * (d__2 = x[i__ + j *
+ x_dim1], abs(d__2));
+ ++ik;
+/* L40: */
+ }
+ work[k] = work[k] + (d__1 = ap[kk + k - 1], abs(d__1)) * xk +
+ s;
+ kk += k;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ xk = (d__1 = x[k + j * x_dim1], abs(d__1));
+ work[k] += (d__1 = ap[kk], abs(d__1)) * xk;
+ ik = kk + 1;
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ work[i__] += (d__1 = ap[ik], abs(d__1)) * xk;
+ s += (d__1 = ap[ik], abs(d__1)) * (d__2 = x[i__ + j *
+ x_dim1], abs(d__2));
+ ++ik;
+/* L60: */
+ }
+ work[k] += s;
+ kk += *n - k + 1;
+/* L70: */
+ }
+ }
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+/* Computing MAX */
+ d__2 = s, d__3 = (d__1 = work[*n + i__], abs(d__1)) / work[
+ i__];
+ s = max(d__2,d__3);
+ } else {
+/* Computing MAX */
+ d__2 = s, d__3 = ((d__1 = work[*n + i__], abs(d__1)) + safe1)
+ / (work[i__] + safe1);
+ s = max(d__2,d__3);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2. <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ dsptrs_(uplo, n, &c__1, &afp[1], &ipiv[1], &work[*n + 1], n, info);
+ daxpy_(n, &c_b14, &work[*n + 1], &c__1, &x[j * x_dim1 + 1], &c__1)
+ ;
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use DLACN2 to estimate the infinity-norm of the matrix */
+/* inv(A) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+ work[i__] = (d__1 = work[*n + i__], abs(d__1)) + nz * eps *
+ work[i__];
+ } else {
+ work[i__] = (d__1 = work[*n + i__], abs(d__1)) + nz * eps *
+ work[i__] + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ dlacn2_(n, &work[(*n << 1) + 1], &work[*n + 1], &iwork[1], &ferr[j], &
+ kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(A'). */
+
+ dsptrs_(uplo, n, &c__1, &afp[1], &ipiv[1], &work[*n + 1], n,
+ info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L110: */
+ }
+ } else if (kase == 2) {
+
+/* Multiply by inv(A)*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L120: */
+ }
+ dsptrs_(uplo, n, &c__1, &afp[1], &ipiv[1], &work[*n + 1], n,
+ info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = lstres, d__3 = (d__1 = x[i__ + j * x_dim1], abs(d__1));
+ lstres = max(d__2,d__3);
+/* L130: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of DSPRFS */
+
+} /* dsprfs_ */
diff --git a/contrib/libs/clapack/dspsv.c b/contrib/libs/clapack/dspsv.c
new file mode 100644
index 0000000000..ebf99a2c6b
--- /dev/null
+++ b/contrib/libs/clapack/dspsv.c
@@ -0,0 +1,176 @@
+/* dspsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dspsv_(char *uplo, integer *n, integer *nrhs, doublereal
+ *ap, integer *ipiv, doublereal *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), dsptrf_(
+ char *, integer *, doublereal *, integer *, integer *),
+ dsptrs_(char *, integer *, integer *, doublereal *, integer *,
+ doublereal *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSPSV computes the solution to a real system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N symmetric matrix stored in packed format and X */
+/* and B are N-by-NRHS matrices. */
+
+/* The diagonal pivoting method is used to factor A as */
+/* A = U * D * U**T, if UPLO = 'U', or */
+/* A = L * D * L**T, if UPLO = 'L', */
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, D is symmetric and block diagonal with 1-by-1 */
+/* and 2-by-2 diagonal blocks. The factored form of A is then used to */
+/* solve the system of equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AP (input/output) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. */
+
+/* On exit, the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**T or A = L*D*L**T as computed by DSPTRF, stored as */
+/* a packed triangular matrix in the same storage format as A. */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D, as */
+/* determined by DSPTRF. If IPIV(k) > 0, then rows and columns */
+/* k and IPIV(k) were interchanged, and D(k,k) is a 1-by-1 */
+/* diagonal block. If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, */
+/* then rows and columns k-1 and -IPIV(k) were interchanged and */
+/* D(k-1:k,k-1:k) is a 2-by-2 diagonal block. If UPLO = 'L' and */
+/* IPIV(k) = IPIV(k+1) < 0, then rows and columns k+1 and */
+/* -IPIV(k) were interchanged and D(k:k+1,k:k+1) is a 2-by-2 */
+/* diagonal block. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, so the solution could not be */
+/* computed. */
+
+/* Further Details */
+/* =============== */
+
+/* The packed storage scheme is illustrated by the following example */
+/* when N = 4, UPLO = 'U': */
+
+/* Two-dimensional storage of the symmetric matrix A: */
+
+/* a11 a12 a13 a14 */
+/* a22 a23 a24 */
+/* a33 a34 (aij = aji) */
+/* a44 */
+
+/* Packed storage of the upper triangle of A: */
+
+/* AP = [ a11, a12, a22, a13, a23, a33, a14, a24, a34, a44 ] */
+
+/* ===================================================================== */
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSPSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the factorization A = U*D*U' or A = L*D*L'. */
+
+ dsptrf_(uplo, n, &ap[1], &ipiv[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ dsptrs_(uplo, n, nrhs, &ap[1], &ipiv[1], &b[b_offset], ldb, info);
+
+ }
+ return 0;
+
+/* End of DSPSV */
+
+} /* dspsv_ */
diff --git a/contrib/libs/clapack/dspsvx.c b/contrib/libs/clapack/dspsvx.c
new file mode 100644
index 0000000000..2737ef3903
--- /dev/null
+++ b/contrib/libs/clapack/dspsvx.c
@@ -0,0 +1,329 @@
+/* dspsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dspsvx_(char *fact, char *uplo, integer *n, integer *
+ nrhs, doublereal *ap, doublereal *afp, integer *ipiv, doublereal *b,
+ integer *ldb, doublereal *x, integer *ldx, doublereal *rcond,
+ doublereal *ferr, doublereal *berr, doublereal *work, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ doublereal anorm;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ extern doublereal dlamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ extern doublereal dlansp_(char *, char *, integer *, doublereal *,
+ doublereal *);
+ extern /* Subroutine */ int dspcon_(char *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *,
+ integer *), dsprfs_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, integer *), dsptrf_(char *, integer *,
+ doublereal *, integer *, integer *), dsptrs_(char *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSPSVX uses the diagonal pivoting factorization A = U*D*U**T or */
+/* A = L*D*L**T to compute the solution to a real system of linear */
+/* equations A * X = B, where A is an N-by-N symmetric matrix stored */
+/* in packed format and X and B are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'N', the diagonal pivoting method is used to factor A as */
+/* A = U * D * U**T, if UPLO = 'U', or */
+/* A = L * D * L**T, if UPLO = 'L', */
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices and D is symmetric and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* 2. If some D(i,i)=0, so that D is exactly singular, then the routine */
+/* returns with INFO = i. Otherwise, the factored form of A is used */
+/* to estimate the condition number of the matrix A. If the */
+/* reciprocal of the condition number is less than machine precision, */
+/* INFO = N+1 is returned as a warning, but the routine still goes on */
+/* to solve for X and compute error bounds as described below. */
+
+/* 3. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 4. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of A has been */
+/* supplied on entry. */
+/* = 'F': On entry, AFP and IPIV contain the factored form of */
+/* A. AP, AFP and IPIV will not be modified. */
+/* = 'N': The matrix A will be copied to AFP and factored. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AP (input) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the symmetric matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. */
+
+/* AFP (input or output) DOUBLE PRECISION array, dimension */
+/* (N*(N+1)/2) */
+/* If FACT = 'F', then AFP is an input argument and on entry */
+/* contains the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**T or A = L*D*L**T as computed by DSPTRF, stored as */
+/* a packed triangular matrix in the same storage format as A. */
+
+/* If FACT = 'N', then AFP is an output argument and on exit */
+/* contains the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**T or A = L*D*L**T as computed by DSPTRF, stored as */
+/* a packed triangular matrix in the same storage format as A. */
+
+/* IPIV (input or output) INTEGER array, dimension (N) */
+/* If FACT = 'F', then IPIV is an input argument and on entry */
+/* contains details of the interchanges and the block structure */
+/* of D, as determined by DSPTRF. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* If FACT = 'N', then IPIV is an output argument and on exit */
+/* contains details of the interchanges and the block structure */
+/* of D, as determined by DSPTRF. */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* The N-by-NRHS right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) DOUBLE PRECISION array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A. If RCOND is less than the machine precision (in */
+/* particular, if RCOND = 0), the matrix is singular to working */
+/* precision. This condition is indicated by a return code of */
+/* INFO > 0. */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: D(i,i) is exactly zero. The factorization */
+/* has been completed but the factor D is exactly */
+/* singular, so the solution and error bounds could */
+/* not be computed. RCOND = 0 is returned. */
+/* = N+1: D is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* Further Details */
+/* =============== */
+
+/* The packed storage scheme is illustrated by the following example */
+/* when N = 4, UPLO = 'U': */
+
+/* Two-dimensional storage of the symmetric matrix A: */
+
+/* a11 a12 a13 a14 */
+/* a22 a23 a24 */
+/* a33 a34 (aij = aji) */
+/* a44 */
+
+/* Packed storage of the upper triangle of A: */
+
+/* AP = [ a11, a12, a22, a13, a23, a33, a14, a24, a34, a44 ] */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --afp;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ if (! nofact && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! lsame_(uplo, "U") && ! lsame_(uplo,
+ "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldx < max(1,*n)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSPSVX", &i__1);
+ return 0;
+ }
+
+ if (nofact) {
+
+/* Compute the factorization A = U*D*U' or A = L*D*L'. */
+
+ i__1 = *n * (*n + 1) / 2;
+ dcopy_(&i__1, &ap[1], &c__1, &afp[1], &c__1);
+ dsptrf_(uplo, n, &afp[1], &ipiv[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = dlansp_("I", uplo, n, &ap[1], &work[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ dspcon_(uplo, n, &afp[1], &ipiv[1], &anorm, rcond, &work[1], &iwork[1],
+ info);
+
+/* Compute the solution vectors X. */
+
+ dlacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ dsptrs_(uplo, n, nrhs, &afp[1], &ipiv[1], &x[x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solutions and */
+/* compute error bounds and backward error estimates for them. */
+
+ dsprfs_(uplo, n, nrhs, &ap[1], &afp[1], &ipiv[1], &b[b_offset], ldb, &x[
+ x_offset], ldx, &ferr[1], &berr[1], &work[1], &iwork[1], info);
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < dlamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of DSPSVX */
+
+} /* dspsvx_ */
diff --git a/contrib/libs/clapack/dsptrd.c b/contrib/libs/clapack/dsptrd.c
new file mode 100644
index 0000000000..f5814b8765
--- /dev/null
+++ b/contrib/libs/clapack/dsptrd.c
@@ -0,0 +1,277 @@
+/* dsptrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b8 = 0.;
+static doublereal c_b14 = -1.;
+
+/* Subroutine */ int dsptrd_(char *uplo, integer *n, doublereal *ap,
+ doublereal *d__, doublereal *e, doublereal *tau, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__, i1, ii, i1i1;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ doublereal taui;
+ extern /* Subroutine */ int dspr2_(char *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *);
+ doublereal alpha;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *), dspmv_(char *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *);
+ logical upper;
+ extern /* Subroutine */ int dlarfg_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSPTRD reduces a real symmetric matrix A stored in packed form to */
+/* symmetric tridiagonal form T by an orthogonal similarity */
+/* transformation: Q**T * A * Q = T. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+/* On exit, if UPLO = 'U', the diagonal and first superdiagonal */
+/* of A are overwritten by the corresponding elements of the */
+/* tridiagonal matrix T, and the elements above the first */
+/* superdiagonal, with the array TAU, represent the orthogonal */
+/* matrix Q as a product of elementary reflectors; if UPLO */
+/* = 'L', the diagonal and first subdiagonal of A are over- */
+/* written by the corresponding elements of the tridiagonal */
+/* matrix T, and the elements below the first subdiagonal, with */
+/* the array TAU, represent the orthogonal matrix Q as a product */
+/* of elementary reflectors. See Further Details. */
+
+/* D (output) DOUBLE PRECISION array, dimension (N) */
+/* The diagonal elements of the tridiagonal matrix T: */
+/* D(i) = A(i,i). */
+
+/* E (output) DOUBLE PRECISION array, dimension (N-1) */
+/* The off-diagonal elements of the tridiagonal matrix T: */
+/* E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'. */
+
+/* TAU (output) DOUBLE PRECISION array, dimension (N-1) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* If UPLO = 'U', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(n-1) . . . H(2) H(1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in AP, */
+/* overwriting A(1:i-1,i+1), and tau is stored in TAU(i). */
+
+/* If UPLO = 'L', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(1) H(2) . . . H(n-1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in AP, */
+/* overwriting A(i+2:n,i), and tau is stored in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ --tau;
+ --e;
+ --d__;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSPTRD", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Reduce the upper triangle of A. */
+/* I1 is the index in AP of A(1,I+1). */
+
+ i1 = *n * (*n - 1) / 2 + 1;
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+
+/* Generate elementary reflector H(i) = I - tau * v * v' */
+/* to annihilate A(1:i-1,i+1) */
+
+ dlarfg_(&i__, &ap[i1 + i__ - 1], &ap[i1], &c__1, &taui);
+ e[i__] = ap[i1 + i__ - 1];
+
+ if (taui != 0.) {
+
+/* Apply H(i) from both sides to A(1:i,1:i) */
+
+ ap[i1 + i__ - 1] = 1.;
+
+/* Compute y := tau * A * v storing y in TAU(1:i) */
+
+ dspmv_(uplo, &i__, &taui, &ap[1], &ap[i1], &c__1, &c_b8, &tau[
+ 1], &c__1);
+
+/* Compute w := y - 1/2 * tau * (y'*v) * v */
+
+ alpha = taui * -.5 * ddot_(&i__, &tau[1], &c__1, &ap[i1], &
+ c__1);
+ daxpy_(&i__, &alpha, &ap[i1], &c__1, &tau[1], &c__1);
+
+/* Apply the transformation as a rank-2 update: */
+/* A := A - v * w' - w * v' */
+
+ dspr2_(uplo, &i__, &c_b14, &ap[i1], &c__1, &tau[1], &c__1, &
+ ap[1]);
+
+ ap[i1 + i__ - 1] = e[i__];
+ }
+ d__[i__ + 1] = ap[i1 + i__];
+ tau[i__] = taui;
+ i1 -= i__;
+/* L10: */
+ }
+ d__[1] = ap[1];
+ } else {
+
+/* Reduce the lower triangle of A. II is the index in AP of */
+/* A(i,i) and I1I1 is the index of A(i+1,i+1). */
+
+ ii = 1;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i1i1 = ii + *n - i__ + 1;
+
+/* Generate elementary reflector H(i) = I - tau * v * v' */
+/* to annihilate A(i+2:n,i) */
+
+ i__2 = *n - i__;
+ dlarfg_(&i__2, &ap[ii + 1], &ap[ii + 2], &c__1, &taui);
+ e[i__] = ap[ii + 1];
+
+ if (taui != 0.) {
+
+/* Apply H(i) from both sides to A(i+1:n,i+1:n) */
+
+ ap[ii + 1] = 1.;
+
+/* Compute y := tau * A * v storing y in TAU(i:n-1) */
+
+ i__2 = *n - i__;
+ dspmv_(uplo, &i__2, &taui, &ap[i1i1], &ap[ii + 1], &c__1, &
+ c_b8, &tau[i__], &c__1);
+
+/* Compute w := y - 1/2 * tau * (y'*v) * v */
+
+ i__2 = *n - i__;
+ alpha = taui * -.5 * ddot_(&i__2, &tau[i__], &c__1, &ap[ii +
+ 1], &c__1);
+ i__2 = *n - i__;
+ daxpy_(&i__2, &alpha, &ap[ii + 1], &c__1, &tau[i__], &c__1);
+
+/* Apply the transformation as a rank-2 update: */
+/* A := A - v * w' - w * v' */
+
+ i__2 = *n - i__;
+ dspr2_(uplo, &i__2, &c_b14, &ap[ii + 1], &c__1, &tau[i__], &
+ c__1, &ap[i1i1]);
+
+ ap[ii + 1] = e[i__];
+ }
+ d__[i__] = ap[ii];
+ tau[i__] = taui;
+ ii = i1i1;
+/* L20: */
+ }
+ d__[*n] = ap[ii];
+ }
+
+ return 0;
+
+/* End of DSPTRD */
+
+} /* dsptrd_ */
diff --git a/contrib/libs/clapack/dsptrf.c b/contrib/libs/clapack/dsptrf.c
new file mode 100644
index 0000000000..35ad4737a4
--- /dev/null
+++ b/contrib/libs/clapack/dsptrf.c
@@ -0,0 +1,628 @@
+/* dsptrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dsptrf_(char *uplo, integer *n, doublereal *ap, integer *
+ ipiv, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal t, r1, d11, d12, d21, d22;
+ integer kc, kk, kp;
+ doublereal wk;
+ integer kx, knc, kpc, npp;
+ doublereal wkm1, wkp1;
+ integer imax, jmax;
+ extern /* Subroutine */ int dspr_(char *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *);
+ doublereal alpha;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ integer kstep;
+ logical upper;
+ doublereal absakk;
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal colmax, rowmax;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSPTRF computes the factorization of a real symmetric matrix A stored */
+/* in packed format using the Bunch-Kaufman diagonal pivoting method: */
+
+/* A = U*D*U**T or A = L*D*L**T */
+
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, and D is symmetric and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L, stored as a packed triangular */
+/* matrix overwriting A (see below for further details). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, and division by zero will occur if it */
+/* is used to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* 5-96 - Based on modifications by J. Lewis, Boeing Computer Services */
+/* Company */
+
+/* If UPLO = 'U', then A = U*D*U', where */
+/* U = P(n)*U(n)* ... *P(k)U(k)* ..., */
+/* i.e., U is a product of terms P(k)*U(k), where k decreases from n to */
+/* 1 in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and U(k) is a unit upper triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I v 0 ) k-s */
+/* U(k) = ( 0 I 0 ) s */
+/* ( 0 0 I ) n-k */
+/* k-s s n-k */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(1:k-1,k). */
+/* If s = 2, the upper triangle of D(k) overwrites A(k-1,k-1), A(k-1,k), */
+/* and A(k,k), and v overwrites A(1:k-2,k-1:k). */
+
+/* If UPLO = 'L', then A = L*D*L', where */
+/* L = P(1)*L(1)* ... *P(k)*L(k)* ..., */
+/* i.e., L is a product of terms P(k)*L(k), where k increases from 1 to */
+/* n in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and L(k) is a unit lower triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I 0 0 ) k-1 */
+/* L(k) = ( 0 I 0 ) s */
+/* ( 0 v I ) n-k-s+1 */
+/* k-1 s n-k-s+1 */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(k+1:n,k). */
+/* If s = 2, the lower triangle of D(k) overwrites A(k,k), A(k+1,k), */
+/* and A(k+1,k+1), and v overwrites A(k+2:n,k:k+1). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ipiv;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSPTRF", &i__1);
+ return 0;
+ }
+
+/* Initialize ALPHA for use in choosing pivot block size. */
+
+ alpha = (sqrt(17.) + 1.) / 8.;
+
+ if (upper) {
+
+/* Factorize A as U*D*U' using the upper triangle of A */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2 */
+
+ k = *n;
+ kc = (*n - 1) * *n / 2 + 1;
+L10:
+ knc = kc;
+
+/* If K < 1, exit from loop */
+
+ if (k < 1) {
+ goto L110;
+ }
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ absakk = (d__1 = ap[kc + k - 1], abs(d__1));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ imax = idamax_(&i__1, &ap[kc], &c__1);
+ colmax = (d__1 = ap[kc + imax - 1], abs(d__1));
+ } else {
+ colmax = 0.;
+ }
+
+ if (max(absakk,colmax) == 0.) {
+
+/* Column K is zero: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ rowmax = 0.;
+ jmax = imax;
+ kx = imax * (imax + 1) / 2 + imax;
+ i__1 = k;
+ for (j = imax + 1; j <= i__1; ++j) {
+ if ((d__1 = ap[kx], abs(d__1)) > rowmax) {
+ rowmax = (d__1 = ap[kx], abs(d__1));
+ jmax = j;
+ }
+ kx += j;
+/* L20: */
+ }
+ kpc = (imax - 1) * imax / 2 + 1;
+ if (imax > 1) {
+ i__1 = imax - 1;
+ jmax = idamax_(&i__1, &ap[kpc], &c__1);
+/* Computing MAX */
+ d__2 = rowmax, d__3 = (d__1 = ap[kpc + jmax - 1], abs(
+ d__1));
+ rowmax = max(d__2,d__3);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else if ((d__1 = ap[kpc + imax - 1], abs(d__1)) >= alpha *
+ rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+ } else {
+
+/* interchange rows and columns K-1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+
+ kk = k - kstep + 1;
+ if (kstep == 2) {
+ knc = knc - k + 1;
+ }
+ if (kp != kk) {
+
+/* Interchange rows and columns KK and KP in the leading */
+/* submatrix A(1:k,1:k) */
+
+ i__1 = kp - 1;
+ dswap_(&i__1, &ap[knc], &c__1, &ap[kpc], &c__1);
+ kx = kpc + kp - 1;
+ i__1 = kk - 1;
+ for (j = kp + 1; j <= i__1; ++j) {
+ kx = kx + j - 1;
+ t = ap[knc + j - 1];
+ ap[knc + j - 1] = ap[kx];
+ ap[kx] = t;
+/* L30: */
+ }
+ t = ap[knc + kk - 1];
+ ap[knc + kk - 1] = ap[kpc + kp - 1];
+ ap[kpc + kp - 1] = t;
+ if (kstep == 2) {
+ t = ap[kc + k - 2];
+ ap[kc + k - 2] = ap[kc + kp - 1];
+ ap[kc + kp - 1] = t;
+ }
+ }
+
+/* Update the leading submatrix */
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k now holds */
+
+/* W(k) = U(k)*D(k) */
+
+/* where U(k) is the k-th column of U */
+
+/* Perform a rank-1 update of A(1:k-1,1:k-1) as */
+
+/* A := A - U(k)*D(k)*U(k)' = A - W(k)*1/D(k)*W(k)' */
+
+ r1 = 1. / ap[kc + k - 1];
+ i__1 = k - 1;
+ d__1 = -r1;
+ dspr_(uplo, &i__1, &d__1, &ap[kc], &c__1, &ap[1]);
+
+/* Store U(k) in column k */
+
+ i__1 = k - 1;
+ dscal_(&i__1, &r1, &ap[kc], &c__1);
+ } else {
+
+/* 2-by-2 pivot block D(k): columns k and k-1 now hold */
+
+/* ( W(k-1) W(k) ) = ( U(k-1) U(k) )*D(k) */
+
+/* where U(k) and U(k-1) are the k-th and (k-1)-th columns */
+/* of U */
+
+/* Perform a rank-2 update of A(1:k-2,1:k-2) as */
+
+/* A := A - ( U(k-1) U(k) )*D(k)*( U(k-1) U(k) )' */
+/* = A - ( W(k-1) W(k) )*inv(D(k))*( W(k-1) W(k) )' */
+
+ if (k > 2) {
+
+ d12 = ap[k - 1 + (k - 1) * k / 2];
+ d22 = ap[k - 1 + (k - 2) * (k - 1) / 2] / d12;
+ d11 = ap[k + (k - 1) * k / 2] / d12;
+ t = 1. / (d11 * d22 - 1.);
+ d12 = t / d12;
+
+ for (j = k - 2; j >= 1; --j) {
+ wkm1 = d12 * (d11 * ap[j + (k - 2) * (k - 1) / 2] -
+ ap[j + (k - 1) * k / 2]);
+ wk = d12 * (d22 * ap[j + (k - 1) * k / 2] - ap[j + (k
+ - 2) * (k - 1) / 2]);
+ for (i__ = j; i__ >= 1; --i__) {
+ ap[i__ + (j - 1) * j / 2] = ap[i__ + (j - 1) * j /
+ 2] - ap[i__ + (k - 1) * k / 2] * wk - ap[
+ i__ + (k - 2) * (k - 1) / 2] * wkm1;
+/* L40: */
+ }
+ ap[j + (k - 1) * k / 2] = wk;
+ ap[j + (k - 2) * (k - 1) / 2] = wkm1;
+/* L50: */
+ }
+
+ }
+
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k - 1] = -kp;
+ }
+
+/* Decrease K and return to the start of the main loop */
+
+ k -= kstep;
+ kc = knc - k;
+ goto L10;
+
+ } else {
+
+/* Factorize A as L*D*L' using the lower triangle of A */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2 */
+
+ k = 1;
+ kc = 1;
+ npp = *n * (*n + 1) / 2;
+L60:
+ knc = kc;
+
+/* If K > N, exit from loop */
+
+ if (k > *n) {
+ goto L110;
+ }
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ absakk = (d__1 = ap[kc], abs(d__1));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ imax = k + idamax_(&i__1, &ap[kc + 1], &c__1);
+ colmax = (d__1 = ap[kc + imax - k], abs(d__1));
+ } else {
+ colmax = 0.;
+ }
+
+ if (max(absakk,colmax) == 0.) {
+
+/* Column K is zero: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ rowmax = 0.;
+ kx = kc + imax - k;
+ i__1 = imax - 1;
+ for (j = k; j <= i__1; ++j) {
+ if ((d__1 = ap[kx], abs(d__1)) > rowmax) {
+ rowmax = (d__1 = ap[kx], abs(d__1));
+ jmax = j;
+ }
+ kx = kx + *n - j;
+/* L70: */
+ }
+ kpc = npp - (*n - imax + 1) * (*n - imax + 2) / 2 + 1;
+ if (imax < *n) {
+ i__1 = *n - imax;
+ jmax = imax + idamax_(&i__1, &ap[kpc + 1], &c__1);
+/* Computing MAX */
+ d__2 = rowmax, d__3 = (d__1 = ap[kpc + jmax - imax], abs(
+ d__1));
+ rowmax = max(d__2,d__3);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else if ((d__1 = ap[kpc], abs(d__1)) >= alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+ } else {
+
+/* interchange rows and columns K+1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+
+ kk = k + kstep - 1;
+ if (kstep == 2) {
+ knc = knc + *n - k + 1;
+ }
+ if (kp != kk) {
+
+/* Interchange rows and columns KK and KP in the trailing */
+/* submatrix A(k:n,k:n) */
+
+ if (kp < *n) {
+ i__1 = *n - kp;
+ dswap_(&i__1, &ap[knc + kp - kk + 1], &c__1, &ap[kpc + 1],
+ &c__1);
+ }
+ kx = knc + kp - kk;
+ i__1 = kp - 1;
+ for (j = kk + 1; j <= i__1; ++j) {
+ kx = kx + *n - j + 1;
+ t = ap[knc + j - kk];
+ ap[knc + j - kk] = ap[kx];
+ ap[kx] = t;
+/* L80: */
+ }
+ t = ap[knc];
+ ap[knc] = ap[kpc];
+ ap[kpc] = t;
+ if (kstep == 2) {
+ t = ap[kc + 1];
+ ap[kc + 1] = ap[kc + kp - k];
+ ap[kc + kp - k] = t;
+ }
+ }
+
+/* Update the trailing submatrix */
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k now holds */
+
+/* W(k) = L(k)*D(k) */
+
+/* where L(k) is the k-th column of L */
+
+ if (k < *n) {
+
+/* Perform a rank-1 update of A(k+1:n,k+1:n) as */
+
+/* A := A - L(k)*D(k)*L(k)' = A - W(k)*(1/D(k))*W(k)' */
+
+ r1 = 1. / ap[kc];
+ i__1 = *n - k;
+ d__1 = -r1;
+ dspr_(uplo, &i__1, &d__1, &ap[kc + 1], &c__1, &ap[kc + *n
+ - k + 1]);
+
+/* Store L(k) in column K */
+
+ i__1 = *n - k;
+ dscal_(&i__1, &r1, &ap[kc + 1], &c__1);
+ }
+ } else {
+
+/* 2-by-2 pivot block D(k): columns K and K+1 now hold */
+
+/* ( W(k) W(k+1) ) = ( L(k) L(k+1) )*D(k) */
+
+/* where L(k) and L(k+1) are the k-th and (k+1)-th columns */
+/* of L */
+
+ if (k < *n - 1) {
+
+/* Perform a rank-2 update of A(k+2:n,k+2:n) as */
+
+/* A := A - ( L(k) L(k+1) )*D(k)*( L(k) L(k+1) )' */
+/* = A - ( W(k) W(k+1) )*inv(D(k))*( W(k) W(k+1) )' */
+
+ d21 = ap[k + 1 + (k - 1) * ((*n << 1) - k) / 2];
+ d11 = ap[k + 1 + k * ((*n << 1) - k - 1) / 2] / d21;
+ d22 = ap[k + (k - 1) * ((*n << 1) - k) / 2] / d21;
+ t = 1. / (d11 * d22 - 1.);
+ d21 = t / d21;
+
+ i__1 = *n;
+ for (j = k + 2; j <= i__1; ++j) {
+ wk = d21 * (d11 * ap[j + (k - 1) * ((*n << 1) - k) /
+ 2] - ap[j + k * ((*n << 1) - k - 1) / 2]);
+ wkp1 = d21 * (d22 * ap[j + k * ((*n << 1) - k - 1) /
+ 2] - ap[j + (k - 1) * ((*n << 1) - k) / 2]);
+
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ap[i__ + (j - 1) * ((*n << 1) - j) / 2] = ap[i__
+ + (j - 1) * ((*n << 1) - j) / 2] - ap[i__
+ + (k - 1) * ((*n << 1) - k) / 2] * wk -
+ ap[i__ + k * ((*n << 1) - k - 1) / 2] *
+ wkp1;
+/* L90: */
+ }
+
+ ap[j + (k - 1) * ((*n << 1) - k) / 2] = wk;
+ ap[j + k * ((*n << 1) - k - 1) / 2] = wkp1;
+
+/* L100: */
+ }
+ }
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k + 1] = -kp;
+ }
+
+/* Increase K and return to the start of the main loop */
+
+ k += kstep;
+ kc = knc + *n - k + 2;
+ goto L60;
+
+ }
+
+L110:
+ return 0;
+
+/* End of DSPTRF */
+
+} /* dsptrf_ */
diff --git a/contrib/libs/clapack/dsptri.c b/contrib/libs/clapack/dsptri.c
new file mode 100644
index 0000000000..962be35295
--- /dev/null
+++ b/contrib/libs/clapack/dsptri.c
@@ -0,0 +1,411 @@
+/* dsptri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b11 = -1.;
+static doublereal c_b13 = 0.;
+
+/* Subroutine */ int dsptri_(char *uplo, integer *n, doublereal *ap, integer *
+ ipiv, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1;
+
+ /* Local variables */
+ doublereal d__;
+ integer j, k;
+ doublereal t, ak;
+ integer kc, kp, kx, kpc, npp;
+ doublereal akp1;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ doublereal temp, akkp1;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dswap_(integer *, doublereal *, integer
+ *, doublereal *, integer *);
+ integer kstep;
+ extern /* Subroutine */ int dspmv_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ integer kcnext;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSPTRI computes the inverse of a real symmetric indefinite matrix */
+/* A in packed storage using the factorization A = U*D*U**T or */
+/* A = L*D*L**T computed by DSPTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**T; */
+/* = 'L': Lower triangular, form is A = L*D*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* On entry, the block diagonal matrix D and the multipliers */
+/* used to obtain the factor U or L as computed by DSPTRF, */
+/* stored as a packed triangular matrix. */
+
+/* On exit, if INFO = 0, the (symmetric) inverse of the original */
+/* matrix, stored as a packed triangular matrix. The j-th column */
+/* of inv(A) is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = inv(A)(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', */
+/* AP(i + (j-1)*(2n-j)/2) = inv(A)(i,j) for j<=i<=n. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by DSPTRF. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) = 0; the matrix is singular and its */
+/* inverse could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --work;
+ --ipiv;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSPTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check that the diagonal matrix D is nonsingular. */
+
+ if (upper) {
+
+/* Upper triangular storage: examine D from bottom to top */
+
+ kp = *n * (*n + 1) / 2;
+ for (*info = *n; *info >= 1; --(*info)) {
+ if (ipiv[*info] > 0 && ap[kp] == 0.) {
+ return 0;
+ }
+ kp -= *info;
+/* L10: */
+ }
+ } else {
+
+/* Lower triangular storage: examine D from top to bottom. */
+
+ kp = 1;
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ if (ipiv[*info] > 0 && ap[kp] == 0.) {
+ return 0;
+ }
+ kp = kp + *n - *info + 1;
+/* L20: */
+ }
+ }
+ *info = 0;
+
+ if (upper) {
+
+/* Compute inv(A) from the factorization A = U*D*U'. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+ kc = 1;
+L30:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L50;
+ }
+
+ kcnext = kc + k;
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Invert the diagonal block. */
+
+ ap[kc + k - 1] = 1. / ap[kc + k - 1];
+
+/* Compute column K of the inverse. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ dcopy_(&i__1, &ap[kc], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ dspmv_(uplo, &i__1, &c_b11, &ap[1], &work[1], &c__1, &c_b13, &
+ ap[kc], &c__1);
+ i__1 = k - 1;
+ ap[kc + k - 1] -= ddot_(&i__1, &work[1], &c__1, &ap[kc], &
+ c__1);
+ }
+ kstep = 1;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Invert the diagonal block. */
+
+ t = (d__1 = ap[kcnext + k - 1], abs(d__1));
+ ak = ap[kc + k - 1] / t;
+ akp1 = ap[kcnext + k] / t;
+ akkp1 = ap[kcnext + k - 1] / t;
+ d__ = t * (ak * akp1 - 1.);
+ ap[kc + k - 1] = akp1 / d__;
+ ap[kcnext + k] = ak / d__;
+ ap[kcnext + k - 1] = -akkp1 / d__;
+
+/* Compute columns K and K+1 of the inverse. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ dcopy_(&i__1, &ap[kc], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ dspmv_(uplo, &i__1, &c_b11, &ap[1], &work[1], &c__1, &c_b13, &
+ ap[kc], &c__1);
+ i__1 = k - 1;
+ ap[kc + k - 1] -= ddot_(&i__1, &work[1], &c__1, &ap[kc], &
+ c__1);
+ i__1 = k - 1;
+ ap[kcnext + k - 1] -= ddot_(&i__1, &ap[kc], &c__1, &ap[kcnext]
+, &c__1);
+ i__1 = k - 1;
+ dcopy_(&i__1, &ap[kcnext], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ dspmv_(uplo, &i__1, &c_b11, &ap[1], &work[1], &c__1, &c_b13, &
+ ap[kcnext], &c__1);
+ i__1 = k - 1;
+ ap[kcnext + k] -= ddot_(&i__1, &work[1], &c__1, &ap[kcnext], &
+ c__1);
+ }
+ kstep = 2;
+ kcnext = kcnext + k + 1;
+ }
+
+ kp = (i__1 = ipiv[k], abs(i__1));
+ if (kp != k) {
+
+/* Interchange rows and columns K and KP in the leading */
+/* submatrix A(1:k+1,1:k+1) */
+
+ kpc = (kp - 1) * kp / 2 + 1;
+ i__1 = kp - 1;
+ dswap_(&i__1, &ap[kc], &c__1, &ap[kpc], &c__1);
+ kx = kpc + kp - 1;
+ i__1 = k - 1;
+ for (j = kp + 1; j <= i__1; ++j) {
+ kx = kx + j - 1;
+ temp = ap[kc + j - 1];
+ ap[kc + j - 1] = ap[kx];
+ ap[kx] = temp;
+/* L40: */
+ }
+ temp = ap[kc + k - 1];
+ ap[kc + k - 1] = ap[kpc + kp - 1];
+ ap[kpc + kp - 1] = temp;
+ if (kstep == 2) {
+ temp = ap[kc + k + k - 1];
+ ap[kc + k + k - 1] = ap[kc + k + kp - 1];
+ ap[kc + k + kp - 1] = temp;
+ }
+ }
+
+ k += kstep;
+ kc = kcnext;
+ goto L30;
+L50:
+
+ ;
+ } else {
+
+/* Compute inv(A) from the factorization A = L*D*L'. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ npp = *n * (*n + 1) / 2;
+ k = *n;
+ kc = npp;
+L60:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L80;
+ }
+
+ kcnext = kc - (*n - k + 2);
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Invert the diagonal block. */
+
+ ap[kc] = 1. / ap[kc];
+
+/* Compute column K of the inverse. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ dcopy_(&i__1, &ap[kc + 1], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ dspmv_(uplo, &i__1, &c_b11, &ap[kc + *n - k + 1], &work[1], &
+ c__1, &c_b13, &ap[kc + 1], &c__1);
+ i__1 = *n - k;
+ ap[kc] -= ddot_(&i__1, &work[1], &c__1, &ap[kc + 1], &c__1);
+ }
+ kstep = 1;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Invert the diagonal block. */
+
+ t = (d__1 = ap[kcnext + 1], abs(d__1));
+ ak = ap[kcnext] / t;
+ akp1 = ap[kc] / t;
+ akkp1 = ap[kcnext + 1] / t;
+ d__ = t * (ak * akp1 - 1.);
+ ap[kcnext] = akp1 / d__;
+ ap[kc] = ak / d__;
+ ap[kcnext + 1] = -akkp1 / d__;
+
+/* Compute columns K-1 and K of the inverse. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ dcopy_(&i__1, &ap[kc + 1], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ dspmv_(uplo, &i__1, &c_b11, &ap[kc + (*n - k + 1)], &work[1],
+ &c__1, &c_b13, &ap[kc + 1], &c__1);
+ i__1 = *n - k;
+ ap[kc] -= ddot_(&i__1, &work[1], &c__1, &ap[kc + 1], &c__1);
+ i__1 = *n - k;
+ ap[kcnext + 1] -= ddot_(&i__1, &ap[kc + 1], &c__1, &ap[kcnext
+ + 2], &c__1);
+ i__1 = *n - k;
+ dcopy_(&i__1, &ap[kcnext + 2], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ dspmv_(uplo, &i__1, &c_b11, &ap[kc + (*n - k + 1)], &work[1],
+ &c__1, &c_b13, &ap[kcnext + 2], &c__1);
+ i__1 = *n - k;
+ ap[kcnext] -= ddot_(&i__1, &work[1], &c__1, &ap[kcnext + 2], &
+ c__1);
+ }
+ kstep = 2;
+ kcnext -= *n - k + 3;
+ }
+
+ kp = (i__1 = ipiv[k], abs(i__1));
+ if (kp != k) {
+
+/* Interchange rows and columns K and KP in the trailing */
+/* submatrix A(k-1:n,k-1:n) */
+
+ kpc = npp - (*n - kp + 1) * (*n - kp + 2) / 2 + 1;
+ if (kp < *n) {
+ i__1 = *n - kp;
+ dswap_(&i__1, &ap[kc + kp - k + 1], &c__1, &ap[kpc + 1], &
+ c__1);
+ }
+ kx = kc + kp - k;
+ i__1 = kp - 1;
+ for (j = k + 1; j <= i__1; ++j) {
+ kx = kx + *n - j + 1;
+ temp = ap[kc + j - k];
+ ap[kc + j - k] = ap[kx];
+ ap[kx] = temp;
+/* L70: */
+ }
+ temp = ap[kc];
+ ap[kc] = ap[kpc];
+ ap[kpc] = temp;
+ if (kstep == 2) {
+ temp = ap[kc - *n + k - 1];
+ ap[kc - *n + k - 1] = ap[kc - *n + kp - 1];
+ ap[kc - *n + kp - 1] = temp;
+ }
+ }
+
+ k -= kstep;
+ kc = kcnext;
+ goto L60;
+L80:
+ ;
+ }
+
+ return 0;
+
+/* End of DSPTRI */
+
+} /* dsptri_ */
diff --git a/contrib/libs/clapack/dsptrs.c b/contrib/libs/clapack/dsptrs.c
new file mode 100644
index 0000000000..4dc105b8f1
--- /dev/null
+++ b/contrib/libs/clapack/dsptrs.c
@@ -0,0 +1,456 @@
+/* dsptrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b7 = -1.;
+static integer c__1 = 1;
+static doublereal c_b19 = 1.;
+
+/* Subroutine */ int dsptrs_(char *uplo, integer *n, integer *nrhs,
+ doublereal *ap, integer *ipiv, doublereal *b, integer *ldb, integer *
+ info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+ doublereal d__1;
+
+ /* Local variables */
+ integer j, k;
+ doublereal ak, bk;
+ integer kc, kp;
+ doublereal akm1, bkm1;
+ extern /* Subroutine */ int dger_(integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ doublereal akm1k;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ doublereal denom;
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), dswap_(integer *,
+ doublereal *, integer *, doublereal *, integer *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSPTRS solves a system of linear equations A*X = B with a real */
+/* symmetric matrix A stored in packed format using the factorization */
+/* A = U*D*U**T or A = L*D*L**T computed by DSPTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**T; */
+/* = 'L': Lower triangular, form is A = L*D*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AP (input) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* The block diagonal matrix D and the multipliers used to */
+/* obtain the factor U or L as computed by DSPTRF, stored as a */
+/* packed triangular matrix. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by DSPTRF. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --ap;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSPTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Solve A*X = B, where A = U*D*U'. */
+
+/* First solve U*D*X = B, overwriting B with X. */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+ kc = *n * (*n + 1) / 2 + 1;
+L10:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L30;
+ }
+
+ kc -= k;
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ dswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(U(K)), where U(K) is the transformation */
+/* stored in column K of A. */
+
+ i__1 = k - 1;
+ dger_(&i__1, nrhs, &c_b7, &ap[kc], &c__1, &b[k + b_dim1], ldb, &b[
+ b_dim1 + 1], ldb);
+
+/* Multiply by the inverse of the diagonal block. */
+
+ d__1 = 1. / ap[kc + k - 1];
+ dscal_(nrhs, &d__1, &b[k + b_dim1], ldb);
+ --k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Interchange rows K-1 and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k - 1) {
+ dswap_(nrhs, &b[k - 1 + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(U(K)), where U(K) is the transformation */
+/* stored in columns K-1 and K of A. */
+
+ i__1 = k - 2;
+ dger_(&i__1, nrhs, &c_b7, &ap[kc], &c__1, &b[k + b_dim1], ldb, &b[
+ b_dim1 + 1], ldb);
+ i__1 = k - 2;
+ dger_(&i__1, nrhs, &c_b7, &ap[kc - (k - 1)], &c__1, &b[k - 1 +
+ b_dim1], ldb, &b[b_dim1 + 1], ldb);
+
+/* Multiply by the inverse of the diagonal block. */
+
+ akm1k = ap[kc + k - 2];
+ akm1 = ap[kc - 1] / akm1k;
+ ak = ap[kc + k - 1] / akm1k;
+ denom = akm1 * ak - 1.;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ bkm1 = b[k - 1 + j * b_dim1] / akm1k;
+ bk = b[k + j * b_dim1] / akm1k;
+ b[k - 1 + j * b_dim1] = (ak * bkm1 - bk) / denom;
+ b[k + j * b_dim1] = (akm1 * bk - bkm1) / denom;
+/* L20: */
+ }
+ kc = kc - k + 1;
+ k += -2;
+ }
+
+ goto L10;
+L30:
+
+/* Next solve U'*X = B, overwriting B with X. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+ kc = 1;
+L40:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L50;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Multiply by inv(U'(K)), where U(K) is the transformation */
+/* stored in column K of A. */
+
+ i__1 = k - 1;
+ dgemv_("Transpose", &i__1, nrhs, &c_b7, &b[b_offset], ldb, &ap[kc]
+, &c__1, &c_b19, &b[k + b_dim1], ldb);
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ dswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ kc += k;
+ ++k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Multiply by inv(U'(K+1)), where U(K+1) is the transformation */
+/* stored in columns K and K+1 of A. */
+
+ i__1 = k - 1;
+ dgemv_("Transpose", &i__1, nrhs, &c_b7, &b[b_offset], ldb, &ap[kc]
+, &c__1, &c_b19, &b[k + b_dim1], ldb);
+ i__1 = k - 1;
+ dgemv_("Transpose", &i__1, nrhs, &c_b7, &b[b_offset], ldb, &ap[kc
+ + k], &c__1, &c_b19, &b[k + 1 + b_dim1], ldb);
+
+/* Interchange rows K and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k) {
+ dswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ kc = kc + (k << 1) + 1;
+ k += 2;
+ }
+
+ goto L40;
+L50:
+
+ ;
+ } else {
+
+/* Solve A*X = B, where A = L*D*L'. */
+
+/* First solve L*D*X = B, overwriting B with X. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+ kc = 1;
+L60:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L80;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ dswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(L(K)), where L(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ dger_(&i__1, nrhs, &c_b7, &ap[kc + 1], &c__1, &b[k + b_dim1],
+ ldb, &b[k + 1 + b_dim1], ldb);
+ }
+
+/* Multiply by the inverse of the diagonal block. */
+
+ d__1 = 1. / ap[kc];
+ dscal_(nrhs, &d__1, &b[k + b_dim1], ldb);
+ kc = kc + *n - k + 1;
+ ++k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Interchange rows K+1 and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k + 1) {
+ dswap_(nrhs, &b[k + 1 + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(L(K)), where L(K) is the transformation */
+/* stored in columns K and K+1 of A. */
+
+ if (k < *n - 1) {
+ i__1 = *n - k - 1;
+ dger_(&i__1, nrhs, &c_b7, &ap[kc + 2], &c__1, &b[k + b_dim1],
+ ldb, &b[k + 2 + b_dim1], ldb);
+ i__1 = *n - k - 1;
+ dger_(&i__1, nrhs, &c_b7, &ap[kc + *n - k + 2], &c__1, &b[k +
+ 1 + b_dim1], ldb, &b[k + 2 + b_dim1], ldb);
+ }
+
+/* Multiply by the inverse of the diagonal block. */
+
+ akm1k = ap[kc + 1];
+ akm1 = ap[kc] / akm1k;
+ ak = ap[kc + *n - k + 1] / akm1k;
+ denom = akm1 * ak - 1.;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ bkm1 = b[k + j * b_dim1] / akm1k;
+ bk = b[k + 1 + j * b_dim1] / akm1k;
+ b[k + j * b_dim1] = (ak * bkm1 - bk) / denom;
+ b[k + 1 + j * b_dim1] = (akm1 * bk - bkm1) / denom;
+/* L70: */
+ }
+ kc = kc + (*n - k << 1) + 1;
+ k += 2;
+ }
+
+ goto L60;
+L80:
+
+/* Next solve L'*X = B, overwriting B with X. */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+ kc = *n * (*n + 1) / 2 + 1;
+L90:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L100;
+ }
+
+ kc -= *n - k + 1;
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Multiply by inv(L'(K)), where L(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ dgemv_("Transpose", &i__1, nrhs, &c_b7, &b[k + 1 + b_dim1],
+ ldb, &ap[kc + 1], &c__1, &c_b19, &b[k + b_dim1], ldb);
+ }
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ dswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ --k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Multiply by inv(L'(K-1)), where L(K-1) is the transformation */
+/* stored in columns K-1 and K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ dgemv_("Transpose", &i__1, nrhs, &c_b7, &b[k + 1 + b_dim1],
+ ldb, &ap[kc + 1], &c__1, &c_b19, &b[k + b_dim1], ldb);
+ i__1 = *n - k;
+ dgemv_("Transpose", &i__1, nrhs, &c_b7, &b[k + 1 + b_dim1],
+ ldb, &ap[kc - (*n - k)], &c__1, &c_b19, &b[k - 1 +
+ b_dim1], ldb);
+ }
+
+/* Interchange rows K and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k) {
+ dswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ kc -= *n - k + 2;
+ k += -2;
+ }
+
+ goto L90;
+L100:
+ ;
+ }
+
+ return 0;
+
+/* End of DSPTRS */
+
+} /* dsptrs_ */
diff --git a/contrib/libs/clapack/dstebz.c b/contrib/libs/clapack/dstebz.c
new file mode 100644
index 0000000000..c4c752177a
--- /dev/null
+++ b/contrib/libs/clapack/dstebz.c
@@ -0,0 +1,774 @@
+/* dstebz.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+static integer c__0 = 0;
+
+/* Subroutine */ int dstebz_(char *range, char *order, integer *n, doublereal
+ *vl, doublereal *vu, integer *il, integer *iu, doublereal *abstol,
+ doublereal *d__, doublereal *e, integer *m, integer *nsplit,
+ doublereal *w, integer *iblock, integer *isplit, doublereal *work,
+ integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublereal d__1, d__2, d__3, d__4, d__5;
+
+ /* Builtin functions */
+ double sqrt(doublereal), log(doublereal);
+
+ /* Local variables */
+ integer j, ib, jb, ie, je, nb;
+ doublereal gl;
+ integer im, in;
+ doublereal gu;
+ integer iw;
+ doublereal wl, wu;
+ integer nwl;
+ doublereal ulp, wlu, wul;
+ integer nwu;
+ doublereal tmp1, tmp2;
+ integer iend, ioff, iout, itmp1, jdisc;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ doublereal atoli;
+ integer iwoff;
+ doublereal bnorm;
+ integer itmax;
+ doublereal wkill, rtoli, tnorm;
+ extern doublereal dlamch_(char *);
+ integer ibegin;
+ extern /* Subroutine */ int dlaebz_(integer *, integer *, integer *,
+ integer *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *);
+ integer irange, idiscl;
+ doublereal safemn;
+ integer idumma[1];
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer idiscu, iorder;
+ logical ncnvrg;
+ doublereal pivmin;
+ logical toofew;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+/* 8-18-00: Increase FUDGE factor for T3E (eca) */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSTEBZ computes the eigenvalues of a symmetric tridiagonal */
+/* matrix T. The user may ask for all eigenvalues, all eigenvalues */
+/* in the half-open interval (VL, VU], or the IL-th through IU-th */
+/* eigenvalues. */
+
+/* To avoid overflow, the matrix must be scaled so that its */
+/* largest element is no greater than overflow**(1/2) * */
+/* underflow**(1/4) in absolute value, and for greatest */
+/* accuracy, it should not be much smaller than that. */
+
+/* See W. Kahan "Accurate Eigenvalues of a Symmetric Tridiagonal */
+/* Matrix", Report CS41, Computer Science Dept., Stanford */
+/* University, July 21, 1966. */
+
+/* Arguments */
+/* ========= */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': ("All") all eigenvalues will be found. */
+/* = 'V': ("Value") all eigenvalues in the half-open interval */
+/* (VL, VU] will be found. */
+/* = 'I': ("Index") the IL-th through IU-th eigenvalues (of the */
+/* entire matrix) will be found. */
+
+/* ORDER (input) CHARACTER*1 */
+/* = 'B': ("By Block") the eigenvalues will be grouped by */
+/* split-off block (see IBLOCK, ISPLIT) and */
+/* ordered from smallest to largest within */
+/* the block. */
+/* = 'E': ("Entire matrix") */
+/* the eigenvalues for the entire matrix */
+/* will be ordered from smallest to */
+/* largest. */
+
+/* N (input) INTEGER */
+/* The order of the tridiagonal matrix T. N >= 0. */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. Eigenvalues less than or equal */
+/* to VL, or greater than VU, will not be returned. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) DOUBLE PRECISION */
+/* The absolute tolerance for the eigenvalues. An eigenvalue */
+/* (or cluster) is considered to be located if it has been */
+/* determined to lie in an interval whose width is ABSTOL or */
+/* less. If ABSTOL is less than or equal to zero, then ULP*|T| */
+/* will be used, where |T| means the 1-norm of T. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*DLAMCH('S'), not zero. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The n diagonal elements of the tridiagonal matrix T. */
+
+/* E (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) off-diagonal elements of the tridiagonal matrix T. */
+
+/* M (output) INTEGER */
+/* The actual number of eigenvalues found. 0 <= M <= N. */
+/* (See also the description of INFO=2,3.) */
+
+/* NSPLIT (output) INTEGER */
+/* The number of diagonal blocks in the matrix T. */
+/* 1 <= NSPLIT <= N. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* On exit, the first M elements of W will contain the */
+/* eigenvalues. (DSTEBZ may use the remaining N-M elements as */
+/* workspace.) */
+
+/* IBLOCK (output) INTEGER array, dimension (N) */
+/* At each row/column j where E(j) is zero or small, the */
+/* matrix T is considered to split into a block diagonal */
+/* matrix. On exit, if INFO = 0, IBLOCK(i) specifies to which */
+/* block (from 1 to the number of blocks) the eigenvalue W(i) */
+/* belongs. (DSTEBZ may use the remaining N-M elements as */
+/* workspace.) */
+
+/* ISPLIT (output) INTEGER array, dimension (N) */
+/* The splitting points, at which T breaks up into submatrices. */
+/* The first submatrix consists of rows/columns 1 to ISPLIT(1), */
+/* the second of rows/columns ISPLIT(1)+1 through ISPLIT(2), */
+/* etc., and the NSPLIT-th consists of rows/columns */
+/* ISPLIT(NSPLIT-1)+1 through ISPLIT(NSPLIT)=N. */
+/* (Only the first NSPLIT elements will actually be used, but */
+/* since the user cannot know a priori what value NSPLIT will */
+/* have, N words must be reserved for ISPLIT.) */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (4*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (3*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: some or all of the eigenvalues failed to converge or */
+/* were not computed: */
+/* =1 or 3: Bisection failed to converge for some */
+/* eigenvalues; these eigenvalues are flagged by a */
+/* negative block number. The effect is that the */
+/* eigenvalues may not be as accurate as the */
+/* absolute and relative tolerances. This is */
+/* generally caused by unexpectedly inaccurate */
+/* arithmetic. */
+/* =2 or 3: RANGE='I' only: Not all of the eigenvalues */
+/* IL:IU were found. */
+/* Effect: M < IU+1-IL */
+/* Cause: non-monotonic arithmetic, causing the */
+/* Sturm sequence to be non-monotonic. */
+/* Cure: recalculate, using RANGE='A', and pick */
+/* out eigenvalues IL:IU. In some cases, */
+/* increasing the PARAMETER "FUDGE" may */
+/* make things work. */
+/* = 4: RANGE='I', and the Gershgorin interval */
+/* initially used was too small. No eigenvalues */
+/* were computed. */
+/* Probable cause: your machine has sloppy */
+/* floating-point arithmetic. */
+/* Cure: Increase the PARAMETER "FUDGE", */
+/* recompile, and try again. */
+
+/* Internal Parameters */
+/* =================== */
+
+/* RELFAC DOUBLE PRECISION, default = 2.0e0 */
+/* The relative tolerance. An interval (a,b] lies within */
+/* "relative tolerance" if b-a < RELFAC*ulp*max(|a|,|b|), */
+/* where "ulp" is the machine precision (distance from 1 to */
+/* the next larger floating point number.) */
+
+/* FUDGE DOUBLE PRECISION, default = 2 */
+/* A "fudge factor" to widen the Gershgorin intervals. Ideally, */
+/* a value of 1 should work, but on machines with sloppy */
+/* arithmetic, this needs to be larger. The default for */
+/* publicly released versions should be large enough to handle */
+/* the worst machine around. Note that this has no effect */
+/* on accuracy of the solution. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --iwork;
+ --work;
+ --isplit;
+ --iblock;
+ --w;
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+
+/* Decode RANGE */
+
+ if (lsame_(range, "A")) {
+ irange = 1;
+ } else if (lsame_(range, "V")) {
+ irange = 2;
+ } else if (lsame_(range, "I")) {
+ irange = 3;
+ } else {
+ irange = 0;
+ }
+
+/* Decode ORDER */
+
+ if (lsame_(order, "B")) {
+ iorder = 2;
+ } else if (lsame_(order, "E")) {
+ iorder = 1;
+ } else {
+ iorder = 0;
+ }
+
+/* Check for Errors */
+
+ if (irange <= 0) {
+ *info = -1;
+ } else if (iorder <= 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (irange == 2) {
+ if (*vl >= *vu) {
+ *info = -5;
+ }
+ } else if (irange == 3 && (*il < 1 || *il > max(1,*n))) {
+ *info = -6;
+ } else if (irange == 3 && (*iu < min(*n,*il) || *iu > *n)) {
+ *info = -7;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSTEBZ", &i__1);
+ return 0;
+ }
+
+/* Initialize error flags */
+
+ *info = 0;
+ ncnvrg = FALSE_;
+ toofew = FALSE_;
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Simplifications: */
+
+ if (irange == 3 && *il == 1 && *iu == *n) {
+ irange = 1;
+ }
+
+/* Get machine constants */
+/* NB is the minimum vector length for vector bisection, or 0 */
+/* if only scalar is to be done. */
+
+ safemn = dlamch_("S");
+ ulp = dlamch_("P");
+ rtoli = ulp * 2.;
+ nb = ilaenv_(&c__1, "DSTEBZ", " ", n, &c_n1, &c_n1, &c_n1);
+ if (nb <= 1) {
+ nb = 0;
+ }
+
+/* Special Case when N=1 */
+
+ if (*n == 1) {
+ *nsplit = 1;
+ isplit[1] = 1;
+ if (irange == 2 && (*vl >= d__[1] || *vu < d__[1])) {
+ *m = 0;
+ } else {
+ w[1] = d__[1];
+ iblock[1] = 1;
+ *m = 1;
+ }
+ return 0;
+ }
+
+/* Compute Splitting Points */
+
+ *nsplit = 1;
+ work[*n] = 0.;
+ pivmin = 1.;
+
+/* DIR$ NOVECTOR */
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing 2nd power */
+ d__1 = e[j - 1];
+ tmp1 = d__1 * d__1;
+/* Computing 2nd power */
+ d__2 = ulp;
+ if ((d__1 = d__[j] * d__[j - 1], abs(d__1)) * (d__2 * d__2) + safemn
+ > tmp1) {
+ isplit[*nsplit] = j - 1;
+ ++(*nsplit);
+ work[j - 1] = 0.;
+ } else {
+ work[j - 1] = tmp1;
+ pivmin = max(pivmin,tmp1);
+ }
+/* L10: */
+ }
+ isplit[*nsplit] = *n;
+ pivmin *= safemn;
+
+/* Compute Interval and ATOLI */
+
+ if (irange == 3) {
+
+/* RANGE='I': Compute the interval containing eigenvalues */
+/* IL through IU. */
+
+/* Compute Gershgorin interval for entire (split) matrix */
+/* and use it as the initial interval */
+
+ gu = d__[1];
+ gl = d__[1];
+ tmp1 = 0.;
+
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ tmp2 = sqrt(work[j]);
+/* Computing MAX */
+ d__1 = gu, d__2 = d__[j] + tmp1 + tmp2;
+ gu = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = gl, d__2 = d__[j] - tmp1 - tmp2;
+ gl = min(d__1,d__2);
+ tmp1 = tmp2;
+/* L20: */
+ }
+
+/* Computing MAX */
+ d__1 = gu, d__2 = d__[*n] + tmp1;
+ gu = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = gl, d__2 = d__[*n] - tmp1;
+ gl = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = abs(gl), d__2 = abs(gu);
+ tnorm = max(d__1,d__2);
+ gl = gl - tnorm * 2.1 * ulp * *n - pivmin * 4.2000000000000002;
+ gu = gu + tnorm * 2.1 * ulp * *n + pivmin * 2.1;
+
+/* Compute Iteration parameters */
+
+ itmax = (integer) ((log(tnorm + pivmin) - log(pivmin)) / log(2.)) + 2;
+ if (*abstol <= 0.) {
+ atoli = ulp * tnorm;
+ } else {
+ atoli = *abstol;
+ }
+
+ work[*n + 1] = gl;
+ work[*n + 2] = gl;
+ work[*n + 3] = gu;
+ work[*n + 4] = gu;
+ work[*n + 5] = gl;
+ work[*n + 6] = gu;
+ iwork[1] = -1;
+ iwork[2] = -1;
+ iwork[3] = *n + 1;
+ iwork[4] = *n + 1;
+ iwork[5] = *il - 1;
+ iwork[6] = *iu;
+
+ dlaebz_(&c__3, &itmax, n, &c__2, &c__2, &nb, &atoli, &rtoli, &pivmin,
+ &d__[1], &e[1], &work[1], &iwork[5], &work[*n + 1], &work[*n
+ + 5], &iout, &iwork[1], &w[1], &iblock[1], &iinfo);
+
+ if (iwork[6] == *iu) {
+ wl = work[*n + 1];
+ wlu = work[*n + 3];
+ nwl = iwork[1];
+ wu = work[*n + 4];
+ wul = work[*n + 2];
+ nwu = iwork[4];
+ } else {
+ wl = work[*n + 2];
+ wlu = work[*n + 4];
+ nwl = iwork[2];
+ wu = work[*n + 3];
+ wul = work[*n + 1];
+ nwu = iwork[3];
+ }
+
+ if (nwl < 0 || nwl >= *n || nwu < 1 || nwu > *n) {
+ *info = 4;
+ return 0;
+ }
+ } else {
+
+/* RANGE='A' or 'V' -- Set ATOLI */
+
+/* Computing MAX */
+ d__3 = abs(d__[1]) + abs(e[1]), d__4 = (d__1 = d__[*n], abs(d__1)) + (
+ d__2 = e[*n - 1], abs(d__2));
+ tnorm = max(d__3,d__4);
+
+ i__1 = *n - 1;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing MAX */
+ d__4 = tnorm, d__5 = (d__1 = d__[j], abs(d__1)) + (d__2 = e[j - 1]
+ , abs(d__2)) + (d__3 = e[j], abs(d__3));
+ tnorm = max(d__4,d__5);
+/* L30: */
+ }
+
+ if (*abstol <= 0.) {
+ atoli = ulp * tnorm;
+ } else {
+ atoli = *abstol;
+ }
+
+ if (irange == 2) {
+ wl = *vl;
+ wu = *vu;
+ } else {
+ wl = 0.;
+ wu = 0.;
+ }
+ }
+
+/* Find Eigenvalues -- Loop Over Blocks and recompute NWL and NWU. */
+/* NWL accumulates the number of eigenvalues .le. WL, */
+/* NWU accumulates the number of eigenvalues .le. WU */
+
+ *m = 0;
+ iend = 0;
+ *info = 0;
+ nwl = 0;
+ nwu = 0;
+
+ i__1 = *nsplit;
+ for (jb = 1; jb <= i__1; ++jb) {
+ ioff = iend;
+ ibegin = ioff + 1;
+ iend = isplit[jb];
+ in = iend - ioff;
+
+ if (in == 1) {
+
+/* Special Case -- IN=1 */
+
+ if (irange == 1 || wl >= d__[ibegin] - pivmin) {
+ ++nwl;
+ }
+ if (irange == 1 || wu >= d__[ibegin] - pivmin) {
+ ++nwu;
+ }
+ if (irange == 1 || wl < d__[ibegin] - pivmin && wu >= d__[ibegin]
+ - pivmin) {
+ ++(*m);
+ w[*m] = d__[ibegin];
+ iblock[*m] = jb;
+ }
+ } else {
+
+/* General Case -- IN > 1 */
+
+/* Compute Gershgorin Interval */
+/* and use it as the initial interval */
+
+ gu = d__[ibegin];
+ gl = d__[ibegin];
+ tmp1 = 0.;
+
+ i__2 = iend - 1;
+ for (j = ibegin; j <= i__2; ++j) {
+ tmp2 = (d__1 = e[j], abs(d__1));
+/* Computing MAX */
+ d__1 = gu, d__2 = d__[j] + tmp1 + tmp2;
+ gu = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = gl, d__2 = d__[j] - tmp1 - tmp2;
+ gl = min(d__1,d__2);
+ tmp1 = tmp2;
+/* L40: */
+ }
+
+/* Computing MAX */
+ d__1 = gu, d__2 = d__[iend] + tmp1;
+ gu = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = gl, d__2 = d__[iend] - tmp1;
+ gl = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = abs(gl), d__2 = abs(gu);
+ bnorm = max(d__1,d__2);
+ gl = gl - bnorm * 2.1 * ulp * in - pivmin * 2.1;
+ gu = gu + bnorm * 2.1 * ulp * in + pivmin * 2.1;
+
+/* Compute ATOLI for the current submatrix */
+
+ if (*abstol <= 0.) {
+/* Computing MAX */
+ d__1 = abs(gl), d__2 = abs(gu);
+ atoli = ulp * max(d__1,d__2);
+ } else {
+ atoli = *abstol;
+ }
+
+ if (irange > 1) {
+ if (gu < wl) {
+ nwl += in;
+ nwu += in;
+ goto L70;
+ }
+ gl = max(gl,wl);
+ gu = min(gu,wu);
+ if (gl >= gu) {
+ goto L70;
+ }
+ }
+
+/* Set Up Initial Interval */
+
+ work[*n + 1] = gl;
+ work[*n + in + 1] = gu;
+ dlaebz_(&c__1, &c__0, &in, &in, &c__1, &nb, &atoli, &rtoli, &
+ pivmin, &d__[ibegin], &e[ibegin], &work[ibegin], idumma, &
+ work[*n + 1], &work[*n + (in << 1) + 1], &im, &iwork[1], &
+ w[*m + 1], &iblock[*m + 1], &iinfo);
+
+ nwl += iwork[1];
+ nwu += iwork[in + 1];
+ iwoff = *m - iwork[1];
+
+/* Compute Eigenvalues */
+
+ itmax = (integer) ((log(gu - gl + pivmin) - log(pivmin)) / log(2.)
+ ) + 2;
+ dlaebz_(&c__2, &itmax, &in, &in, &c__1, &nb, &atoli, &rtoli, &
+ pivmin, &d__[ibegin], &e[ibegin], &work[ibegin], idumma, &
+ work[*n + 1], &work[*n + (in << 1) + 1], &iout, &iwork[1],
+ &w[*m + 1], &iblock[*m + 1], &iinfo);
+
+/* Copy Eigenvalues Into W and IBLOCK */
+/* Use -JB for block number for unconverged eigenvalues. */
+
+ i__2 = iout;
+ for (j = 1; j <= i__2; ++j) {
+ tmp1 = (work[j + *n] + work[j + in + *n]) * .5;
+
+/* Flag non-convergence. */
+
+ if (j > iout - iinfo) {
+ ncnvrg = TRUE_;
+ ib = -jb;
+ } else {
+ ib = jb;
+ }
+ i__3 = iwork[j + in] + iwoff;
+ for (je = iwork[j] + 1 + iwoff; je <= i__3; ++je) {
+ w[je] = tmp1;
+ iblock[je] = ib;
+/* L50: */
+ }
+/* L60: */
+ }
+
+ *m += im;
+ }
+L70:
+ ;
+ }
+
+/* If RANGE='I', then (WL,WU) contains eigenvalues NWL+1,...,NWU */
+/* If NWL+1 < IL or NWU > IU, discard extra eigenvalues. */
+
+ if (irange == 3) {
+ im = 0;
+ idiscl = *il - 1 - nwl;
+ idiscu = nwu - *iu;
+
+ if (idiscl > 0 || idiscu > 0) {
+ i__1 = *m;
+ for (je = 1; je <= i__1; ++je) {
+ if (w[je] <= wlu && idiscl > 0) {
+ --idiscl;
+ } else if (w[je] >= wul && idiscu > 0) {
+ --idiscu;
+ } else {
+ ++im;
+ w[im] = w[je];
+ iblock[im] = iblock[je];
+ }
+/* L80: */
+ }
+ *m = im;
+ }
+ if (idiscl > 0 || idiscu > 0) {
+
+/* Code to deal with effects of bad arithmetic: */
+/* Some low eigenvalues to be discarded are not in (WL,WLU], */
+/* or high eigenvalues to be discarded are not in (WUL,WU] */
+/* so just kill off the smallest IDISCL/largest IDISCU */
+/* eigenvalues, by simply finding the smallest/largest */
+/* eigenvalue(s). */
+
+/* (If N(w) is monotone non-decreasing, this should never */
+/* happen.) */
+
+ if (idiscl > 0) {
+ wkill = wu;
+ i__1 = idiscl;
+ for (jdisc = 1; jdisc <= i__1; ++jdisc) {
+ iw = 0;
+ i__2 = *m;
+ for (je = 1; je <= i__2; ++je) {
+ if (iblock[je] != 0 && (w[je] < wkill || iw == 0)) {
+ iw = je;
+ wkill = w[je];
+ }
+/* L90: */
+ }
+ iblock[iw] = 0;
+/* L100: */
+ }
+ }
+ if (idiscu > 0) {
+
+ wkill = wl;
+ i__1 = idiscu;
+ for (jdisc = 1; jdisc <= i__1; ++jdisc) {
+ iw = 0;
+ i__2 = *m;
+ for (je = 1; je <= i__2; ++je) {
+ if (iblock[je] != 0 && (w[je] > wkill || iw == 0)) {
+ iw = je;
+ wkill = w[je];
+ }
+/* L110: */
+ }
+ iblock[iw] = 0;
+/* L120: */
+ }
+ }
+ im = 0;
+ i__1 = *m;
+ for (je = 1; je <= i__1; ++je) {
+ if (iblock[je] != 0) {
+ ++im;
+ w[im] = w[je];
+ iblock[im] = iblock[je];
+ }
+/* L130: */
+ }
+ *m = im;
+ }
+ if (idiscl < 0 || idiscu < 0) {
+ toofew = TRUE_;
+ }
+ }
+
+/* If ORDER='B', do nothing -- the eigenvalues are already sorted */
+/* by block. */
+/* If ORDER='E', sort the eigenvalues from smallest to largest */
+
+ if (iorder == 1 && *nsplit > 1) {
+ i__1 = *m - 1;
+ for (je = 1; je <= i__1; ++je) {
+ ie = 0;
+ tmp1 = w[je];
+ i__2 = *m;
+ for (j = je + 1; j <= i__2; ++j) {
+ if (w[j] < tmp1) {
+ ie = j;
+ tmp1 = w[j];
+ }
+/* L140: */
+ }
+
+ if (ie != 0) {
+ itmp1 = iblock[ie];
+ w[ie] = w[je];
+ iblock[ie] = iblock[je];
+ w[je] = tmp1;
+ iblock[je] = itmp1;
+ }
+/* L150: */
+ }
+ }
+
+ *info = 0;
+ if (ncnvrg) {
+ ++(*info);
+ }
+ if (toofew) {
+ *info += 2;
+ }
+ return 0;
+
+/* End of DSTEBZ */
+
+} /* dstebz_ */
diff --git a/contrib/libs/clapack/dstedc.c b/contrib/libs/clapack/dstedc.c
new file mode 100644
index 0000000000..6824ecc4b3
--- /dev/null
+++ b/contrib/libs/clapack/dstedc.c
@@ -0,0 +1,488 @@
+/* dstedc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__9 = 9;
+static integer c__0 = 0;
+static integer c__2 = 2;
+static doublereal c_b17 = 0.;
+static doublereal c_b18 = 1.;
+static integer c__1 = 1;
+
+/* Subroutine */ int dstedc_(char *compz, integer *n, doublereal *d__,
+ doublereal *e, doublereal *z__, integer *ldz, doublereal *work,
+ integer *lwork, integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double log(doublereal);
+ integer pow_ii(integer *, integer *);
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, m;
+ doublereal p;
+ integer ii, lgn;
+ doublereal eps, tiny;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ integer lwmin;
+ extern /* Subroutine */ int dlaed0_(integer *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ integer start;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dlacpy_(char *, integer *, integer
+ *, doublereal *, integer *, doublereal *, integer *),
+ dlaset_(char *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ integer finish;
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *), dlasrt_(char *, integer *, doublereal *, integer *);
+ integer liwmin, icompz;
+ extern /* Subroutine */ int dsteqr_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *);
+ doublereal orgnrm;
+ logical lquery;
+ integer smlsiz, storez, strtrw;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSTEDC computes all eigenvalues and, optionally, eigenvectors of a */
+/* symmetric tridiagonal matrix using the divide and conquer method. */
+/* The eigenvectors of a full or band real symmetric matrix can also be */
+/* found if DSYTRD or DSPTRD or DSBTRD has been used to reduce this */
+/* matrix to tridiagonal form. */
+
+/* This code makes very mild assumptions about floating point */
+/* arithmetic. It will work on machines with a guard digit in */
+/* add/subtract, or on those binary machines without guard digits */
+/* which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2. */
+/* It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. See DLAED3 for details. */
+
+/* Arguments */
+/* ========= */
+
+/* COMPZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only. */
+/* = 'I': Compute eigenvectors of tridiagonal matrix also. */
+/* = 'V': Compute eigenvectors of original dense symmetric */
+/* matrix also. On entry, Z contains the orthogonal */
+/* matrix used to reduce the original matrix to */
+/* tridiagonal form. */
+
+/* N (input) INTEGER */
+/* The dimension of the symmetric tridiagonal matrix. N >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the diagonal elements of the tridiagonal matrix. */
+/* On exit, if INFO = 0, the eigenvalues in ascending order. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (N-1) */
+/* On entry, the subdiagonal elements of the tridiagonal matrix. */
+/* On exit, E has been destroyed. */
+
+/* Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N) */
+/* On entry, if COMPZ = 'V', then Z contains the orthogonal */
+/* matrix used in the reduction to tridiagonal form. */
+/* On exit, if INFO = 0, then if COMPZ = 'V', Z contains the */
+/* orthonormal eigenvectors of the original symmetric matrix, */
+/* and if COMPZ = 'I', Z contains the orthonormal eigenvectors */
+/* of the symmetric tridiagonal matrix. */
+/* If COMPZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1. */
+/* If eigenvectors are desired, then LDZ >= max(1,N). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, */
+/* dimension (LWORK) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If COMPZ = 'N' or N <= 1 then LWORK must be at least 1. */
+/* If COMPZ = 'V' and N > 1 then LWORK must be at least */
+/* ( 1 + 3*N + 2*N*lg N + 3*N**2 ), */
+/* where lg( N ) = smallest integer k such */
+/* that 2**k >= N. */
+/* If COMPZ = 'I' and N > 1 then LWORK must be at least */
+/* ( 1 + 4*N + N**2 ). */
+/* Note that for COMPZ = 'I' or 'V', then if N is less than or */
+/* equal to the minimum divide size, usually 25, then LWORK need */
+/* only be max(1,2*(N-1)). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* If COMPZ = 'N' or N <= 1 then LIWORK must be at least 1. */
+/* If COMPZ = 'V' and N > 1 then LIWORK must be at least */
+/* ( 6 + 6*N + 5*N*lg N ). */
+/* If COMPZ = 'I' and N > 1 then LIWORK must be at least */
+/* ( 3 + 5*N ). */
+/* Note that for COMPZ = 'I' or 'V', then if N is less than or */
+/* equal to the minimum divide size, usually 25, then LIWORK */
+/* need only be 1. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal size of the IWORK array, */
+/* returns this value as the first entry of the IWORK array, and */
+/* no error message related to LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: The algorithm failed to compute an eigenvalue while */
+/* working on the submatrix lying in rows and columns */
+/* INFO/(N+1) through mod(INFO,N+1). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Jeff Rutter, Computer Science Division, University of California */
+/* at Berkeley, USA */
+/* Modified by Francoise Tisseur, University of Tennessee. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1 || *liwork == -1;
+
+ if (lsame_(compz, "N")) {
+ icompz = 0;
+ } else if (lsame_(compz, "V")) {
+ icompz = 1;
+ } else if (lsame_(compz, "I")) {
+ icompz = 2;
+ } else {
+ icompz = -1;
+ }
+ if (icompz < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldz < 1 || icompz > 0 && *ldz < max(1,*n)) {
+ *info = -6;
+ }
+
+ if (*info == 0) {
+
+/* Compute the workspace requirements */
+
+ smlsiz = ilaenv_(&c__9, "DSTEDC", " ", &c__0, &c__0, &c__0, &c__0);
+ if (*n <= 1 || icompz == 0) {
+ liwmin = 1;
+ lwmin = 1;
+ } else if (*n <= smlsiz) {
+ liwmin = 1;
+ lwmin = *n - 1 << 1;
+ } else {
+ lgn = (integer) (log((doublereal) (*n)) / log(2.));
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ if (icompz == 1) {
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = *n * 3 + 1 + (*n << 1) * lgn + i__1 * i__1 * 3;
+ liwmin = *n * 6 + 6 + *n * 5 * lgn;
+ } else if (icompz == 2) {
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = (*n << 2) + 1 + i__1 * i__1;
+ liwmin = *n * 5 + 3;
+ }
+ }
+ work[1] = (doublereal) lwmin;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -8;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -10;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSTEDC", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*n == 1) {
+ if (icompz != 0) {
+ z__[z_dim1 + 1] = 1.;
+ }
+ return 0;
+ }
+
+/* If the following conditional clause is removed, then the routine */
+/* will use the Divide and Conquer routine to compute only the */
+/* eigenvalues, which requires (3N + 3N**2) real workspace and */
+/* (2 + 5N + 2N lg(N)) integer workspace. */
+/* Since on many architectures DSTERF is much faster than any other */
+/* algorithm for finding eigenvalues only, it is used here */
+/* as the default. If the conditional clause is removed, then */
+/* information on the size of workspace needs to be changed. */
+
+/* If COMPZ = 'N', use DSTERF to compute the eigenvalues. */
+
+ if (icompz == 0) {
+ dsterf_(n, &d__[1], &e[1], info);
+ goto L50;
+ }
+
+/* If N is smaller than the minimum divide size (SMLSIZ+1), then */
+/* solve the problem with another solver. */
+
+ if (*n <= smlsiz) {
+
+ dsteqr_(compz, n, &d__[1], &e[1], &z__[z_offset], ldz, &work[1], info);
+
+ } else {
+
+/* If COMPZ = 'V', the Z matrix must be stored elsewhere for later */
+/* use. */
+
+ if (icompz == 1) {
+ storez = *n * *n + 1;
+ } else {
+ storez = 1;
+ }
+
+ if (icompz == 2) {
+ dlaset_("Full", n, n, &c_b17, &c_b18, &z__[z_offset], ldz);
+ }
+
+/* Scale. */
+
+ orgnrm = dlanst_("M", n, &d__[1], &e[1]);
+ if (orgnrm == 0.) {
+ goto L50;
+ }
+
+ eps = dlamch_("Epsilon");
+
+ start = 1;
+
+/* while ( START <= N ) */
+
+L10:
+ if (start <= *n) {
+
+/* Let FINISH be the position of the next subdiagonal entry */
+/* such that E( FINISH ) <= TINY or FINISH = N if no such */
+/* subdiagonal exists. The matrix identified by the elements */
+/* between START and FINISH constitutes an independent */
+/* sub-problem. */
+
+ finish = start;
+L20:
+ if (finish < *n) {
+ tiny = eps * sqrt((d__1 = d__[finish], abs(d__1))) * sqrt((
+ d__2 = d__[finish + 1], abs(d__2)));
+ if ((d__1 = e[finish], abs(d__1)) > tiny) {
+ ++finish;
+ goto L20;
+ }
+ }
+
+/* (Sub) Problem determined. Compute its size and solve it. */
+
+ m = finish - start + 1;
+ if (m == 1) {
+ start = finish + 1;
+ goto L10;
+ }
+ if (m > smlsiz) {
+
+/* Scale. */
+
+ orgnrm = dlanst_("M", &m, &d__[start], &e[start]);
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b18, &m, &c__1, &d__[
+ start], &m, info);
+ i__1 = m - 1;
+ i__2 = m - 1;
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b18, &i__1, &c__1, &e[
+ start], &i__2, info);
+
+ if (icompz == 1) {
+ strtrw = 1;
+ } else {
+ strtrw = start;
+ }
+ dlaed0_(&icompz, n, &m, &d__[start], &e[start], &z__[strtrw +
+ start * z_dim1], ldz, &work[1], n, &work[storez], &
+ iwork[1], info);
+ if (*info != 0) {
+ *info = (*info / (m + 1) + start - 1) * (*n + 1) + *info %
+ (m + 1) + start - 1;
+ goto L50;
+ }
+
+/* Scale back. */
+
+ dlascl_("G", &c__0, &c__0, &c_b18, &orgnrm, &m, &c__1, &d__[
+ start], &m, info);
+
+ } else {
+ if (icompz == 1) {
+
+/* Since QR won't update a Z matrix which is larger than */
+/* the length of D, we must solve the sub-problem in a */
+/* workspace and then multiply back into Z. */
+
+ dsteqr_("I", &m, &d__[start], &e[start], &work[1], &m, &
+ work[m * m + 1], info);
+ dlacpy_("A", n, &m, &z__[start * z_dim1 + 1], ldz, &work[
+ storez], n);
+ dgemm_("N", "N", n, &m, &m, &c_b18, &work[storez], n, &
+ work[1], &m, &c_b17, &z__[start * z_dim1 + 1],
+ ldz);
+ } else if (icompz == 2) {
+ dsteqr_("I", &m, &d__[start], &e[start], &z__[start +
+ start * z_dim1], ldz, &work[1], info);
+ } else {
+ dsterf_(&m, &d__[start], &e[start], info);
+ }
+ if (*info != 0) {
+ *info = start * (*n + 1) + finish;
+ goto L50;
+ }
+ }
+
+ start = finish + 1;
+ goto L10;
+ }
+
+/* endwhile */
+
+/* If the problem split any number of times, then the eigenvalues */
+/* will not be properly ordered. Here we permute the eigenvalues */
+/* (and the associated eigenvectors) into ascending order. */
+
+ if (m != *n) {
+ if (icompz == 0) {
+
+/* Use Quick Sort */
+
+ dlasrt_("I", n, &d__[1], info);
+
+ } else {
+
+/* Use Selection Sort to minimize swaps of eigenvectors */
+
+ i__1 = *n;
+ for (ii = 2; ii <= i__1; ++ii) {
+ i__ = ii - 1;
+ k = i__;
+ p = d__[i__];
+ i__2 = *n;
+ for (j = ii; j <= i__2; ++j) {
+ if (d__[j] < p) {
+ k = j;
+ p = d__[j];
+ }
+/* L30: */
+ }
+ if (k != i__) {
+ d__[k] = d__[i__];
+ d__[i__] = p;
+ dswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k *
+ z_dim1 + 1], &c__1);
+ }
+/* L40: */
+ }
+ }
+ }
+ }
+
+L50:
+ work[1] = (doublereal) lwmin;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of DSTEDC */
+
+} /* dstedc_ */
diff --git a/contrib/libs/clapack/dstegr.c b/contrib/libs/clapack/dstegr.c
new file mode 100644
index 0000000000..257e368785
--- /dev/null
+++ b/contrib/libs/clapack/dstegr.c
@@ -0,0 +1,211 @@
+/* dstegr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dstegr_(char *jobz, char *range, integer *n, doublereal *
+ d__, doublereal *e, doublereal *vl, doublereal *vu, integer *il,
+ integer *iu, doublereal *abstol, integer *m, doublereal *w,
+ doublereal *z__, integer *ldz, integer *isuppz, doublereal *work,
+ integer *lwork, integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset;
+
+ /* Local variables */
+ extern /* Subroutine */ int dstemr_(char *, char *, integer *, doublereal
+ *, doublereal *, doublereal *, doublereal *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, integer *,
+ integer *, logical *, doublereal *, integer *, integer *, integer
+ *, integer *);
+ logical tryrac;
+
+
+
+/* -- LAPACK computational routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSTEGR computes selected eigenvalues and, optionally, eigenvectors */
+/* of a real symmetric tridiagonal matrix T. Any such unreduced matrix has */
+/* a well defined set of pairwise different real eigenvalues, the corresponding */
+/* real eigenvectors are pairwise orthogonal. */
+
+/* The spectrum may be computed either completely or partially by specifying */
+/* either an interval (VL,VU] or a range of indices IL:IU for the desired */
+/* eigenvalues. */
+
+/* DSTEGR is a compatability wrapper around the improved DSTEMR routine. */
+/* See DSTEMR for further details. */
+
+/* One important change is that the ABSTOL parameter no longer provides any */
+/* benefit and hence is no longer used. */
+
+/* Note : DSTEGR and DSTEMR work only on machines which follow */
+/* IEEE-754 floating-point standard in their handling of infinities and */
+/* NaNs. Normal execution may create these exceptiona values and hence */
+/* may abort due to a floating point exception in environments which */
+/* do not conform to the IEEE-754 standard. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the N diagonal elements of the tridiagonal matrix */
+/* T. On exit, D is overwritten. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the (N-1) subdiagonal elements of the tridiagonal */
+/* matrix T in elements 1 to N-1 of E. E(N) need not be set on */
+/* input, but is used internally as workspace. */
+/* On exit, E is overwritten. */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) DOUBLE PRECISION */
+/* Unused. Was the absolute error tolerance for the */
+/* eigenvalues/eigenvectors in previous versions. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* The first M elements contain the selected eigenvalues in */
+/* ascending order. */
+
+/* Z (output) DOUBLE PRECISION array, dimension (LDZ, max(1,M) ) */
+/* If JOBZ = 'V', and if INFO = 0, then the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix T */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+/* Supplying N columns is always safe. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', then LDZ >= max(1,N). */
+
+/* ISUPPZ (output) INTEGER ARRAY, dimension ( 2*max(1,M) ) */
+/* The support of the eigenvectors in Z, i.e., the indices */
+/* indicating the nonzero elements in Z. The i-th computed eigenvector */
+/* is nonzero only in elements ISUPPZ( 2*i-1 ) through */
+/* ISUPPZ( 2*i ). This is relevant in the case when the matrix */
+/* is split. ISUPPZ is only accessed when JOBZ is 'V' and N > 0. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal */
+/* (and minimal) LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,18*N) */
+/* if JOBZ = 'V', and LWORK >= max(1,12*N) if JOBZ = 'N'. */
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (LIWORK) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. LIWORK >= max(1,10*N) */
+/* if the eigenvectors are desired, and LIWORK >= max(1,8*N) */
+/* if only the eigenvalues are to be computed. */
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal size of the IWORK array, */
+/* returns this value as the first entry of the IWORK array, and */
+/* no error message related to LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* On exit, INFO */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = 1X, internal error in DLARRE, */
+/* if INFO = 2X, internal error in DLARRV. */
+/* Here, the digit X = ABS( IINFO ) < 10, where IINFO is */
+/* the nonzero error code returned by DLARRE or */
+/* DLARRV, respectively. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Inderjit Dhillon, IBM Almaden, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, LBNL/NERSC, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --isuppz;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ tryrac = FALSE_;
+ dstemr_(jobz, range, n, &d__[1], &e[1], vl, vu, il, iu, m, &w[1], &z__[
+ z_offset], ldz, n, &isuppz[1], &tryrac, &work[1], lwork, &iwork[1]
+, liwork, info);
+
+/* End of DSTEGR */
+
+ return 0;
+} /* dstegr_ */
diff --git a/contrib/libs/clapack/dstein.c b/contrib/libs/clapack/dstein.c
new file mode 100644
index 0000000000..1035c8ccfa
--- /dev/null
+++ b/contrib/libs/clapack/dstein.c
@@ -0,0 +1,452 @@
+/* dstein.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__2 = 2;
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int dstein_(integer *n, doublereal *d__, doublereal *e,
+ integer *m, doublereal *w, integer *iblock, integer *isplit,
+ doublereal *z__, integer *ldz, doublereal *work, integer *iwork,
+ integer *ifail, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2, i__3;
+ doublereal d__1, d__2, d__3, d__4, d__5;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, b1, j1, bn;
+ doublereal xj, scl, eps, sep, nrm, tol;
+ integer its;
+ doublereal xjm, ztr, eps1;
+ integer jblk, nblk;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ integer jmax;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ integer iseed[4], gpind, iinfo;
+ extern doublereal dasum_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), daxpy_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *);
+ doublereal ortol;
+ integer indrv1, indrv2, indrv3, indrv4, indrv5;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dlagtf_(integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *
+, integer *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), dlagts_(
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *);
+ integer nrmchk;
+ extern /* Subroutine */ int dlarnv_(integer *, integer *, integer *,
+ doublereal *);
+ integer blksiz;
+ doublereal onenrm, dtpcrt, pertol;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSTEIN computes the eigenvectors of a real symmetric tridiagonal */
+/* matrix T corresponding to specified eigenvalues, using inverse */
+/* iteration. */
+
+/* The maximum number of iterations allowed for each eigenvector is */
+/* specified by an internal parameter MAXITS (currently set to 5). */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The n diagonal elements of the tridiagonal matrix T. */
+
+/* E (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of the tridiagonal matrix */
+/* T, in elements 1 to N-1. */
+
+/* M (input) INTEGER */
+/* The number of eigenvectors to be found. 0 <= M <= N. */
+
+/* W (input) DOUBLE PRECISION array, dimension (N) */
+/* The first M elements of W contain the eigenvalues for */
+/* which eigenvectors are to be computed. The eigenvalues */
+/* should be grouped by split-off block and ordered from */
+/* smallest to largest within the block. ( The output array */
+/* W from DSTEBZ with ORDER = 'B' is expected here. ) */
+
+/* IBLOCK (input) INTEGER array, dimension (N) */
+/* The submatrix indices associated with the corresponding */
+/* eigenvalues in W; IBLOCK(i)=1 if eigenvalue W(i) belongs to */
+/* the first submatrix from the top, =2 if W(i) belongs to */
+/* the second submatrix, etc. ( The output array IBLOCK */
+/* from DSTEBZ is expected here. ) */
+
+/* ISPLIT (input) INTEGER array, dimension (N) */
+/* The splitting points, at which T breaks up into submatrices. */
+/* The first submatrix consists of rows/columns 1 to */
+/* ISPLIT( 1 ), the second of rows/columns ISPLIT( 1 )+1 */
+/* through ISPLIT( 2 ), etc. */
+/* ( The output array ISPLIT from DSTEBZ is expected here. ) */
+
+/* Z (output) DOUBLE PRECISION array, dimension (LDZ, M) */
+/* The computed eigenvectors. The eigenvector associated */
+/* with the eigenvalue W(i) is stored in the i-th column of */
+/* Z. Any vector which fails to converge is set to its current */
+/* iterate after MAXITS iterations. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= max(1,N). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (5*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* IFAIL (output) INTEGER array, dimension (M) */
+/* On normal exit, all elements of IFAIL are zero. */
+/* If one or more eigenvectors fail to converge after */
+/* MAXITS iterations, then their indices are stored in */
+/* array IFAIL. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, then i eigenvectors failed to converge */
+/* in MAXITS iterations. Their indices are stored in */
+/* array IFAIL. */
+
+/* Internal Parameters */
+/* =================== */
+
+/* MAXITS INTEGER, default = 5 */
+/* The maximum number of iterations performed. */
+
+/* EXTRA INTEGER, default = 2 */
+/* The number of iterations performed after norm growth */
+/* criterion is satisfied, should be at least 1. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ --w;
+ --iblock;
+ --isplit;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ *info = 0;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ifail[i__] = 0;
+/* L10: */
+ }
+
+ if (*n < 0) {
+ *info = -1;
+ } else if (*m < 0 || *m > *n) {
+ *info = -4;
+ } else if (*ldz < max(1,*n)) {
+ *info = -9;
+ } else {
+ i__1 = *m;
+ for (j = 2; j <= i__1; ++j) {
+ if (iblock[j] < iblock[j - 1]) {
+ *info = -6;
+ goto L30;
+ }
+ if (iblock[j] == iblock[j - 1] && w[j] < w[j - 1]) {
+ *info = -5;
+ goto L30;
+ }
+/* L20: */
+ }
+L30:
+ ;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSTEIN", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *m == 0) {
+ return 0;
+ } else if (*n == 1) {
+ z__[z_dim1 + 1] = 1.;
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ eps = dlamch_("Precision");
+
+/* Initialize seed for random number generator DLARNV. */
+
+ for (i__ = 1; i__ <= 4; ++i__) {
+ iseed[i__ - 1] = 1;
+/* L40: */
+ }
+
+/* Initialize pointers. */
+
+ indrv1 = 0;
+ indrv2 = indrv1 + *n;
+ indrv3 = indrv2 + *n;
+ indrv4 = indrv3 + *n;
+ indrv5 = indrv4 + *n;
+
+/* Compute eigenvectors of matrix blocks. */
+
+ j1 = 1;
+ i__1 = iblock[*m];
+ for (nblk = 1; nblk <= i__1; ++nblk) {
+
+/* Find starting and ending indices of block nblk. */
+
+ if (nblk == 1) {
+ b1 = 1;
+ } else {
+ b1 = isplit[nblk - 1] + 1;
+ }
+ bn = isplit[nblk];
+ blksiz = bn - b1 + 1;
+ if (blksiz == 1) {
+ goto L60;
+ }
+ gpind = b1;
+
+/* Compute reorthogonalization criterion and stopping criterion. */
+
+ onenrm = (d__1 = d__[b1], abs(d__1)) + (d__2 = e[b1], abs(d__2));
+/* Computing MAX */
+ d__3 = onenrm, d__4 = (d__1 = d__[bn], abs(d__1)) + (d__2 = e[bn - 1],
+ abs(d__2));
+ onenrm = max(d__3,d__4);
+ i__2 = bn - 1;
+ for (i__ = b1 + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__4 = onenrm, d__5 = (d__1 = d__[i__], abs(d__1)) + (d__2 = e[
+ i__ - 1], abs(d__2)) + (d__3 = e[i__], abs(d__3));
+ onenrm = max(d__4,d__5);
+/* L50: */
+ }
+ ortol = onenrm * .001;
+
+ dtpcrt = sqrt(.1 / blksiz);
+
+/* Loop through eigenvalues of block nblk. */
+
+L60:
+ jblk = 0;
+ i__2 = *m;
+ for (j = j1; j <= i__2; ++j) {
+ if (iblock[j] != nblk) {
+ j1 = j;
+ goto L160;
+ }
+ ++jblk;
+ xj = w[j];
+
+/* Skip all the work if the block size is one. */
+
+ if (blksiz == 1) {
+ work[indrv1 + 1] = 1.;
+ goto L120;
+ }
+
+/* If eigenvalues j and j-1 are too close, add a relatively */
+/* small perturbation. */
+
+ if (jblk > 1) {
+ eps1 = (d__1 = eps * xj, abs(d__1));
+ pertol = eps1 * 10.;
+ sep = xj - xjm;
+ if (sep < pertol) {
+ xj = xjm + pertol;
+ }
+ }
+
+ its = 0;
+ nrmchk = 0;
+
+/* Get random starting vector. */
+
+ dlarnv_(&c__2, iseed, &blksiz, &work[indrv1 + 1]);
+
+/* Copy the matrix T so it won't be destroyed in factorization. */
+
+ dcopy_(&blksiz, &d__[b1], &c__1, &work[indrv4 + 1], &c__1);
+ i__3 = blksiz - 1;
+ dcopy_(&i__3, &e[b1], &c__1, &work[indrv2 + 2], &c__1);
+ i__3 = blksiz - 1;
+ dcopy_(&i__3, &e[b1], &c__1, &work[indrv3 + 1], &c__1);
+
+/* Compute LU factors with partial pivoting ( PT = LU ) */
+
+ tol = 0.;
+ dlagtf_(&blksiz, &work[indrv4 + 1], &xj, &work[indrv2 + 2], &work[
+ indrv3 + 1], &tol, &work[indrv5 + 1], &iwork[1], &iinfo);
+
+/* Update iteration count. */
+
+L70:
+ ++its;
+ if (its > 5) {
+ goto L100;
+ }
+
+/* Normalize and scale the righthand side vector Pb. */
+
+/* Computing MAX */
+ d__2 = eps, d__3 = (d__1 = work[indrv4 + blksiz], abs(d__1));
+ scl = blksiz * onenrm * max(d__2,d__3) / dasum_(&blksiz, &work[
+ indrv1 + 1], &c__1);
+ dscal_(&blksiz, &scl, &work[indrv1 + 1], &c__1);
+
+/* Solve the system LU = Pb. */
+
+ dlagts_(&c_n1, &blksiz, &work[indrv4 + 1], &work[indrv2 + 2], &
+ work[indrv3 + 1], &work[indrv5 + 1], &iwork[1], &work[
+ indrv1 + 1], &tol, &iinfo);
+
+/* Reorthogonalize by modified Gram-Schmidt if eigenvalues are */
+/* close enough. */
+
+ if (jblk == 1) {
+ goto L90;
+ }
+ if ((d__1 = xj - xjm, abs(d__1)) > ortol) {
+ gpind = j;
+ }
+ if (gpind != j) {
+ i__3 = j - 1;
+ for (i__ = gpind; i__ <= i__3; ++i__) {
+ ztr = -ddot_(&blksiz, &work[indrv1 + 1], &c__1, &z__[b1 +
+ i__ * z_dim1], &c__1);
+ daxpy_(&blksiz, &ztr, &z__[b1 + i__ * z_dim1], &c__1, &
+ work[indrv1 + 1], &c__1);
+/* L80: */
+ }
+ }
+
+/* Check the infinity norm of the iterate. */
+
+L90:
+ jmax = idamax_(&blksiz, &work[indrv1 + 1], &c__1);
+ nrm = (d__1 = work[indrv1 + jmax], abs(d__1));
+
+/* Continue for additional iterations after norm reaches */
+/* stopping criterion. */
+
+ if (nrm < dtpcrt) {
+ goto L70;
+ }
+ ++nrmchk;
+ if (nrmchk < 3) {
+ goto L70;
+ }
+
+ goto L110;
+
+/* If stopping criterion was not satisfied, update info and */
+/* store eigenvector number in array ifail. */
+
+L100:
+ ++(*info);
+ ifail[*info] = j;
+
+/* Accept iterate as jth eigenvector. */
+
+L110:
+ scl = 1. / dnrm2_(&blksiz, &work[indrv1 + 1], &c__1);
+ jmax = idamax_(&blksiz, &work[indrv1 + 1], &c__1);
+ if (work[indrv1 + jmax] < 0.) {
+ scl = -scl;
+ }
+ dscal_(&blksiz, &scl, &work[indrv1 + 1], &c__1);
+L120:
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ z__[i__ + j * z_dim1] = 0.;
+/* L130: */
+ }
+ i__3 = blksiz;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ z__[b1 + i__ - 1 + j * z_dim1] = work[indrv1 + i__];
+/* L140: */
+ }
+
+/* Save the shift to check eigenvalue spacing at next */
+/* iteration. */
+
+ xjm = xj;
+
+/* L150: */
+ }
+L160:
+ ;
+ }
+
+ return 0;
+
+/* End of DSTEIN */
+
+} /* dstein_ */
diff --git a/contrib/libs/clapack/dstemr.c b/contrib/libs/clapack/dstemr.c
new file mode 100644
index 0000000000..abea3d6909
--- /dev/null
+++ b/contrib/libs/clapack/dstemr.c
@@ -0,0 +1,728 @@
+/* dstemr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b18 = .001;
+
+/* Subroutine */ int dstemr_(char *jobz, char *range, integer *n, doublereal *
+ d__, doublereal *e, doublereal *vl, doublereal *vu, integer *il,
+ integer *iu, integer *m, doublereal *w, doublereal *z__, integer *ldz,
+ integer *nzc, integer *isuppz, logical *tryrac, doublereal *work,
+ integer *lwork, integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal r1, r2;
+ integer jj;
+ doublereal cs;
+ integer in;
+ doublereal sn, wl, wu;
+ integer iil, iiu;
+ doublereal eps, tmp;
+ integer indd, iend, jblk, wend;
+ doublereal rmin, rmax;
+ integer itmp;
+ doublereal tnrm;
+ extern /* Subroutine */ int dlae2_(doublereal *, doublereal *, doublereal
+ *, doublereal *, doublereal *);
+ integer inde2, itmp2;
+ doublereal rtol1, rtol2;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal scale;
+ integer indgp;
+ extern logical lsame_(char *, char *);
+ integer iinfo, iindw, ilast;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dswap_(integer *, doublereal *, integer
+ *, doublereal *, integer *);
+ integer lwmin;
+ logical wantz;
+ extern /* Subroutine */ int dlaev2_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *);
+ extern doublereal dlamch_(char *);
+ logical alleig;
+ integer ibegin;
+ logical indeig;
+ integer iindbl;
+ logical valeig;
+ extern /* Subroutine */ int dlarrc_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ integer *, integer *, integer *), dlarre_(char *,
+ integer *, doublereal *, doublereal *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, integer *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *);
+ integer wbegin;
+ doublereal safmin;
+ extern /* Subroutine */ int dlarrj_(integer *, doublereal *, doublereal *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *), xerbla_(char *, integer *);
+ doublereal bignum;
+ integer inderr, iindwk, indgrs, offset;
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ extern /* Subroutine */ int dlarrr_(integer *, doublereal *, doublereal *,
+ integer *), dlarrv_(integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *,
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dlasrt_(char *, integer *, doublereal *,
+ integer *);
+ doublereal thresh;
+ integer iinspl, ifirst, indwrk, liwmin, nzcmin;
+ doublereal pivmin;
+ integer nsplit;
+ doublereal smlnum;
+ logical lquery, zquery;
+
+
+/* -- LAPACK computational routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSTEMR computes selected eigenvalues and, optionally, eigenvectors */
+/* of a real symmetric tridiagonal matrix T. Any such unreduced matrix has */
+/* a well defined set of pairwise different real eigenvalues, the corresponding */
+/* real eigenvectors are pairwise orthogonal. */
+
+/* The spectrum may be computed either completely or partially by specifying */
+/* either an interval (VL,VU] or a range of indices IL:IU for the desired */
+/* eigenvalues. */
+
+/* Depending on the number of desired eigenvalues, these are computed either */
+/* by bisection or the dqds algorithm. Numerically orthogonal eigenvectors are */
+/* computed by the use of various suitable L D L^T factorizations near clusters */
+/* of close eigenvalues (referred to as RRRs, Relatively Robust */
+/* Representations). An informal sketch of the algorithm follows. */
+
+/* For each unreduced block (submatrix) of T, */
+/* (a) Compute T - sigma I = L D L^T, so that L and D */
+/* define all the wanted eigenvalues to high relative accuracy. */
+/* This means that small relative changes in the entries of D and L */
+/* cause only small relative changes in the eigenvalues and */
+/* eigenvectors. The standard (unfactored) representation of the */
+/* tridiagonal matrix T does not have this property in general. */
+/* (b) Compute the eigenvalues to suitable accuracy. */
+/* If the eigenvectors are desired, the algorithm attains full */
+/* accuracy of the computed eigenvalues only right before */
+/* the corresponding vectors have to be computed, see steps c) and d). */
+/* (c) For each cluster of close eigenvalues, select a new */
+/* shift close to the cluster, find a new factorization, and refine */
+/* the shifted eigenvalues to suitable accuracy. */
+/* (d) For each eigenvalue with a large enough relative separation compute */
+/* the corresponding eigenvector by forming a rank revealing twisted */
+/* factorization. Go back to (c) for any clusters that remain. */
+
+/* For more details, see: */
+/* - Inderjit S. Dhillon and Beresford N. Parlett: "Multiple representations */
+/* to compute orthogonal eigenvectors of symmetric tridiagonal matrices," */
+/* Linear Algebra and its Applications, 387(1), pp. 1-28, August 2004. */
+/* - Inderjit Dhillon and Beresford Parlett: "Orthogonal Eigenvectors and */
+/* Relative Gaps," SIAM Journal on Matrix Analysis and Applications, Vol. 25, */
+/* 2004. Also LAPACK Working Note 154. */
+/* - Inderjit Dhillon: "A new O(n^2) algorithm for the symmetric */
+/* tridiagonal eigenvalue/eigenvector problem", */
+/* Computer Science Division Technical Report No. UCB/CSD-97-971, */
+/* UC Berkeley, May 1997. */
+
+/* Notes: */
+/* 1.DSTEMR works only on machines which follow IEEE-754 */
+/* floating-point standard in their handling of infinities and NaNs. */
+/* This permits the use of efficient inner loops avoiding a check for */
+/* zero divisors. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the N diagonal elements of the tridiagonal matrix */
+/* T. On exit, D is overwritten. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the (N-1) subdiagonal elements of the tridiagonal */
+/* matrix T in elements 1 to N-1 of E. E(N) need not be set on */
+/* input, but is used internally as workspace. */
+/* On exit, E is overwritten. */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* The first M elements contain the selected eigenvalues in */
+/* ascending order. */
+
+/* Z (output) DOUBLE PRECISION array, dimension (LDZ, max(1,M) ) */
+/* If JOBZ = 'V', and if INFO = 0, then the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix T */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and can be computed with a workspace */
+/* query by setting NZC = -1, see below. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', then LDZ >= max(1,N). */
+
+/* NZC (input) INTEGER */
+/* The number of eigenvectors to be held in the array Z. */
+/* If RANGE = 'A', then NZC >= max(1,N). */
+/* If RANGE = 'V', then NZC >= the number of eigenvalues in (VL,VU]. */
+/* If RANGE = 'I', then NZC >= IU-IL+1. */
+/* If NZC = -1, then a workspace query is assumed; the */
+/* routine calculates the number of columns of the array Z that */
+/* are needed to hold the eigenvectors. */
+/* This value is returned as the first entry of the Z array, and */
+/* no error message related to NZC is issued by XERBLA. */
+
+/* ISUPPZ (output) INTEGER ARRAY, dimension ( 2*max(1,M) ) */
+/* The support of the eigenvectors in Z, i.e., the indices */
+/* indicating the nonzero elements in Z. The i-th computed eigenvector */
+/* is nonzero only in elements ISUPPZ( 2*i-1 ) through */
+/* ISUPPZ( 2*i ). This is relevant in the case when the matrix */
+/* is split. ISUPPZ is only accessed when JOBZ is 'V' and N > 0. */
+
+/* TRYRAC (input/output) LOGICAL */
+/* If TRYRAC.EQ..TRUE., indicates that the code should check whether */
+/* the tridiagonal matrix defines its eigenvalues to high relative */
+/* accuracy. If so, the code uses relative-accuracy preserving */
+/* algorithms that might be (a bit) slower depending on the matrix. */
+/* If the matrix does not define its eigenvalues to high relative */
+/* accuracy, the code can uses possibly faster algorithms. */
+/* If TRYRAC.EQ..FALSE., the code is not required to guarantee */
+/* relatively accurate eigenvalues and can use the fastest possible */
+/* techniques. */
+/* On exit, a .TRUE. TRYRAC will be set to .FALSE. if the matrix */
+/* does not define its eigenvalues to high relative accuracy. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal */
+/* (and minimal) LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,18*N) */
+/* if JOBZ = 'V', and LWORK >= max(1,12*N) if JOBZ = 'N'. */
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (LIWORK) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. LIWORK >= max(1,10*N) */
+/* if the eigenvectors are desired, and LIWORK >= max(1,8*N) */
+/* if only the eigenvalues are to be computed. */
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal size of the IWORK array, */
+/* returns this value as the first entry of the IWORK array, and */
+/* no error message related to LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* On exit, INFO */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = 1X, internal error in DLARRE, */
+/* if INFO = 2X, internal error in DLARRV. */
+/* Here, the digit X = ABS( IINFO ) < 10, where IINFO is */
+/* the nonzero error code returned by DLARRE or */
+/* DLARRV, respectively. */
+
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --isuppz;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+
+ lquery = *lwork == -1 || *liwork == -1;
+ zquery = *nzc == -1;
+/* DSTEMR needs WORK of size 6*N, IWORK of size 3*N. */
+/* In addition, DLARRE needs WORK of size 6*N, IWORK of size 5*N. */
+/* Furthermore, DLARRV needs WORK of size 12*N, IWORK of size 7*N. */
+ if (wantz) {
+ lwmin = *n * 18;
+ liwmin = *n * 10;
+ } else {
+/* need less workspace if only the eigenvalues are wanted */
+ lwmin = *n * 12;
+ liwmin = *n << 3;
+ }
+ wl = 0.;
+ wu = 0.;
+ iil = 0;
+ iiu = 0;
+ if (valeig) {
+/* We do not reference VL, VU in the cases RANGE = 'I','A' */
+/* The interval (WL, WU] contains all the wanted eigenvalues. */
+/* It is either given by the user or computed in DLARRE. */
+ wl = *vl;
+ wu = *vu;
+ } else if (indeig) {
+/* We do not reference IL, IU in the cases RANGE = 'V','A' */
+ iil = *il;
+ iiu = *iu;
+ }
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (valeig && *n > 0 && wu <= wl) {
+ *info = -7;
+ } else if (indeig && (iil < 1 || iil > *n)) {
+ *info = -8;
+ } else if (indeig && (iiu < iil || iiu > *n)) {
+ *info = -9;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -13;
+ } else if (*lwork < lwmin && ! lquery) {
+ *info = -17;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -19;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+/* Computing MIN */
+ d__1 = sqrt(bignum), d__2 = 1. / sqrt(sqrt(safmin));
+ rmax = min(d__1,d__2);
+
+ if (*info == 0) {
+ work[1] = (doublereal) lwmin;
+ iwork[1] = liwmin;
+
+ if (wantz && alleig) {
+ nzcmin = *n;
+ } else if (wantz && valeig) {
+ dlarrc_("T", n, vl, vu, &d__[1], &e[1], &safmin, &nzcmin, &itmp, &
+ itmp2, info);
+ } else if (wantz && indeig) {
+ nzcmin = iiu - iil + 1;
+ } else {
+/* WANTZ .EQ. FALSE. */
+ nzcmin = 0;
+ }
+ if (zquery && *info == 0) {
+ z__[z_dim1 + 1] = (doublereal) nzcmin;
+ } else if (*nzc < nzcmin && ! zquery) {
+ *info = -14;
+ }
+ }
+ if (*info != 0) {
+
+ i__1 = -(*info);
+ xerbla_("DSTEMR", &i__1);
+
+ return 0;
+ } else if (lquery || zquery) {
+ return 0;
+ }
+
+/* Handle N = 0, 1, and 2 cases immediately */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (alleig || indeig) {
+ *m = 1;
+ w[1] = d__[1];
+ } else {
+ if (wl < d__[1] && wu >= d__[1]) {
+ *m = 1;
+ w[1] = d__[1];
+ }
+ }
+ if (wantz && ! zquery) {
+ z__[z_dim1 + 1] = 1.;
+ isuppz[1] = 1;
+ isuppz[2] = 1;
+ }
+ return 0;
+ }
+
+ if (*n == 2) {
+ if (! wantz) {
+ dlae2_(&d__[1], &e[1], &d__[2], &r1, &r2);
+ } else if (wantz && ! zquery) {
+ dlaev2_(&d__[1], &e[1], &d__[2], &r1, &r2, &cs, &sn);
+ }
+ if (alleig || valeig && r2 > wl && r2 <= wu || indeig && iil == 1) {
+ ++(*m);
+ w[*m] = r2;
+ if (wantz && ! zquery) {
+ z__[*m * z_dim1 + 1] = -sn;
+ z__[*m * z_dim1 + 2] = cs;
+/* Note: At most one of SN and CS can be zero. */
+ if (sn != 0.) {
+ if (cs != 0.) {
+ isuppz[(*m << 1) - 1] = 1;
+ isuppz[(*m << 1) - 1] = 2;
+ } else {
+ isuppz[(*m << 1) - 1] = 1;
+ isuppz[(*m << 1) - 1] = 1;
+ }
+ } else {
+ isuppz[(*m << 1) - 1] = 2;
+ isuppz[*m * 2] = 2;
+ }
+ }
+ }
+ if (alleig || valeig && r1 > wl && r1 <= wu || indeig && iiu == 2) {
+ ++(*m);
+ w[*m] = r1;
+ if (wantz && ! zquery) {
+ z__[*m * z_dim1 + 1] = cs;
+ z__[*m * z_dim1 + 2] = sn;
+/* Note: At most one of SN and CS can be zero. */
+ if (sn != 0.) {
+ if (cs != 0.) {
+ isuppz[(*m << 1) - 1] = 1;
+ isuppz[(*m << 1) - 1] = 2;
+ } else {
+ isuppz[(*m << 1) - 1] = 1;
+ isuppz[(*m << 1) - 1] = 1;
+ }
+ } else {
+ isuppz[(*m << 1) - 1] = 2;
+ isuppz[*m * 2] = 2;
+ }
+ }
+ }
+ return 0;
+ }
+/* Continue with general N */
+ indgrs = 1;
+ inderr = (*n << 1) + 1;
+ indgp = *n * 3 + 1;
+ indd = (*n << 2) + 1;
+ inde2 = *n * 5 + 1;
+ indwrk = *n * 6 + 1;
+
+ iinspl = 1;
+ iindbl = *n + 1;
+ iindw = (*n << 1) + 1;
+ iindwk = *n * 3 + 1;
+
+/* Scale matrix to allowable range, if necessary. */
+/* The allowable range is related to the PIVMIN parameter; see the */
+/* comments in DLARRD. The preference for scaling small values */
+/* up is heuristic; we expect users' matrices not to be close to the */
+/* RMAX threshold. */
+
+ scale = 1.;
+ tnrm = dlanst_("M", n, &d__[1], &e[1]);
+ if (tnrm > 0. && tnrm < rmin) {
+ scale = rmin / tnrm;
+ } else if (tnrm > rmax) {
+ scale = rmax / tnrm;
+ }
+ if (scale != 1.) {
+ dscal_(n, &scale, &d__[1], &c__1);
+ i__1 = *n - 1;
+ dscal_(&i__1, &scale, &e[1], &c__1);
+ tnrm *= scale;
+ if (valeig) {
+/* If eigenvalues in interval have to be found, */
+/* scale (WL, WU] accordingly */
+ wl *= scale;
+ wu *= scale;
+ }
+ }
+
+/* Compute the desired eigenvalues of the tridiagonal after splitting */
+/* into smaller subblocks if the corresponding off-diagonal elements */
+/* are small */
+/* THRESH is the splitting parameter for DLARRE */
+/* A negative THRESH forces the old splitting criterion based on the */
+/* size of the off-diagonal. A positive THRESH switches to splitting */
+/* which preserves relative accuracy. */
+
+ if (*tryrac) {
+/* Test whether the matrix warrants the more expensive relative approach. */
+ dlarrr_(n, &d__[1], &e[1], &iinfo);
+ } else {
+/* The user does not care about relative accurately eigenvalues */
+ iinfo = -1;
+ }
+/* Set the splitting criterion */
+ if (iinfo == 0) {
+ thresh = eps;
+ } else {
+ thresh = -eps;
+/* relative accuracy is desired but T does not guarantee it */
+ *tryrac = FALSE_;
+ }
+
+ if (*tryrac) {
+/* Copy original diagonal, needed to guarantee relative accuracy */
+ dcopy_(n, &d__[1], &c__1, &work[indd], &c__1);
+ }
+/* Store the squares of the offdiagonal values of T */
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing 2nd power */
+ d__1 = e[j];
+ work[inde2 + j - 1] = d__1 * d__1;
+/* L5: */
+ }
+/* Set the tolerance parameters for bisection */
+ if (! wantz) {
+/* DLARRE computes the eigenvalues to full precision. */
+ rtol1 = eps * 4.;
+ rtol2 = eps * 4.;
+ } else {
+/* DLARRE computes the eigenvalues to less than full precision. */
+/* DLARRV will refine the eigenvalue approximations, and we can */
+/* need less accurate initial bisection in DLARRE. */
+/* Note: these settings do only affect the subset case and DLARRE */
+ rtol1 = sqrt(eps);
+/* Computing MAX */
+ d__1 = sqrt(eps) * .005, d__2 = eps * 4.;
+ rtol2 = max(d__1,d__2);
+ }
+ dlarre_(range, n, &wl, &wu, &iil, &iiu, &d__[1], &e[1], &work[inde2], &
+ rtol1, &rtol2, &thresh, &nsplit, &iwork[iinspl], m, &w[1], &work[
+ inderr], &work[indgp], &iwork[iindbl], &iwork[iindw], &work[
+ indgrs], &pivmin, &work[indwrk], &iwork[iindwk], &iinfo);
+ if (iinfo != 0) {
+ *info = abs(iinfo) + 10;
+ return 0;
+ }
+/* Note that if RANGE .NE. 'V', DLARRE computes bounds on the desired */
+/* part of the spectrum. All desired eigenvalues are contained in */
+/* (WL,WU] */
+ if (wantz) {
+
+/* Compute the desired eigenvectors corresponding to the computed */
+/* eigenvalues */
+
+ dlarrv_(n, &wl, &wu, &d__[1], &e[1], &pivmin, &iwork[iinspl], m, &
+ c__1, m, &c_b18, &rtol1, &rtol2, &w[1], &work[inderr], &work[
+ indgp], &iwork[iindbl], &iwork[iindw], &work[indgrs], &z__[
+ z_offset], ldz, &isuppz[1], &work[indwrk], &iwork[iindwk], &
+ iinfo);
+ if (iinfo != 0) {
+ *info = abs(iinfo) + 20;
+ return 0;
+ }
+ } else {
+/* DLARRE computes eigenvalues of the (shifted) root representation */
+/* DLARRV returns the eigenvalues of the unshifted matrix. */
+/* However, if the eigenvectors are not desired by the user, we need */
+/* to apply the corresponding shifts from DLARRE to obtain the */
+/* eigenvalues of the original matrix. */
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ itmp = iwork[iindbl + j - 1];
+ w[j] += e[iwork[iinspl + itmp - 1]];
+/* L20: */
+ }
+ }
+
+ if (*tryrac) {
+/* Refine computed eigenvalues so that they are relatively accurate */
+/* with respect to the original matrix T. */
+ ibegin = 1;
+ wbegin = 1;
+ i__1 = iwork[iindbl + *m - 1];
+ for (jblk = 1; jblk <= i__1; ++jblk) {
+ iend = iwork[iinspl + jblk - 1];
+ in = iend - ibegin + 1;
+ wend = wbegin - 1;
+/* check if any eigenvalues have to be refined in this block */
+L36:
+ if (wend < *m) {
+ if (iwork[iindbl + wend] == jblk) {
+ ++wend;
+ goto L36;
+ }
+ }
+ if (wend < wbegin) {
+ ibegin = iend + 1;
+ goto L39;
+ }
+ offset = iwork[iindw + wbegin - 1] - 1;
+ ifirst = iwork[iindw + wbegin - 1];
+ ilast = iwork[iindw + wend - 1];
+ rtol2 = eps * 4.;
+ dlarrj_(&in, &work[indd + ibegin - 1], &work[inde2 + ibegin - 1],
+ &ifirst, &ilast, &rtol2, &offset, &w[wbegin], &work[
+ inderr + wbegin - 1], &work[indwrk], &iwork[iindwk], &
+ pivmin, &tnrm, &iinfo);
+ ibegin = iend + 1;
+ wbegin = wend + 1;
+L39:
+ ;
+ }
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (scale != 1.) {
+ d__1 = 1. / scale;
+ dscal_(m, &d__1, &w[1], &c__1);
+ }
+
+/* If eigenvalues are not in increasing order, then sort them, */
+/* possibly along with eigenvectors. */
+
+ if (nsplit > 1) {
+ if (! wantz) {
+ dlasrt_("I", m, &w[1], &iinfo);
+ if (iinfo != 0) {
+ *info = 3;
+ return 0;
+ }
+ } else {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp) {
+ i__ = jj;
+ tmp = w[jj];
+ }
+/* L50: */
+ }
+ if (i__ != 0) {
+ w[i__] = w[j];
+ w[j] = tmp;
+ if (wantz) {
+ dswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j *
+ z_dim1 + 1], &c__1);
+ itmp = isuppz[(i__ << 1) - 1];
+ isuppz[(i__ << 1) - 1] = isuppz[(j << 1) - 1];
+ isuppz[(j << 1) - 1] = itmp;
+ itmp = isuppz[i__ * 2];
+ isuppz[i__ * 2] = isuppz[j * 2];
+ isuppz[j * 2] = itmp;
+ }
+ }
+/* L60: */
+ }
+ }
+ }
+
+
+ work[1] = (doublereal) lwmin;
+ iwork[1] = liwmin;
+ return 0;
+
+/* End of DSTEMR */
+
+} /* dstemr_ */
diff --git a/contrib/libs/clapack/dsteqr.c b/contrib/libs/clapack/dsteqr.c
new file mode 100644
index 0000000000..2d57ebbf16
--- /dev/null
+++ b/contrib/libs/clapack/dsteqr.c
@@ -0,0 +1,621 @@
+/* dsteqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b9 = 0.;
+static doublereal c_b10 = 1.;
+static integer c__0 = 0;
+static integer c__1 = 1;
+static integer c__2 = 2;
+
+/* Subroutine */ int dsteqr_(char *compz, integer *n, doublereal *d__,
+ doublereal *e, doublereal *z__, integer *ldz, doublereal *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ doublereal b, c__, f, g;
+ integer i__, j, k, l, m;
+ doublereal p, r__, s;
+ integer l1, ii, mm, lm1, mm1, nm1;
+ doublereal rt1, rt2, eps;
+ integer lsv;
+ doublereal tst, eps2;
+ integer lend, jtot;
+ extern /* Subroutine */ int dlae2_(doublereal *, doublereal *, doublereal
+ *, doublereal *, doublereal *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *);
+ doublereal anorm;
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dlaev2_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *);
+ integer lendm1, lendp1;
+ extern doublereal dlapy2_(doublereal *, doublereal *), dlamch_(char *);
+ integer iscale;
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dlaset_(char *, integer *, integer
+ *, doublereal *, doublereal *, doublereal *, integer *);
+ doublereal safmin;
+ extern /* Subroutine */ int dlartg_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *);
+ doublereal safmax;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *,
+ integer *);
+ integer lendsv;
+ doublereal ssfmin;
+ integer nmaxit, icompz;
+ doublereal ssfmax;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSTEQR computes all eigenvalues and, optionally, eigenvectors of a */
+/* symmetric tridiagonal matrix using the implicit QL or QR method. */
+/* The eigenvectors of a full or band symmetric matrix can also be found */
+/* if DSYTRD or DSPTRD or DSBTRD has been used to reduce this matrix to */
+/* tridiagonal form. */
+
+/* Arguments */
+/* ========= */
+
+/* COMPZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only. */
+/* = 'V': Compute eigenvalues and eigenvectors of the original */
+/* symmetric matrix. On entry, Z must contain the */
+/* orthogonal matrix used to reduce the original matrix */
+/* to tridiagonal form. */
+/* = 'I': Compute eigenvalues and eigenvectors of the */
+/* tridiagonal matrix. Z is initialized to the identity */
+/* matrix. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the diagonal elements of the tridiagonal matrix. */
+/* On exit, if INFO = 0, the eigenvalues in ascending order. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (N-1) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix. */
+/* On exit, E has been destroyed. */
+
+/* Z (input/output) DOUBLE PRECISION array, dimension (LDZ, N) */
+/* On entry, if COMPZ = 'V', then Z contains the orthogonal */
+/* matrix used in the reduction to tridiagonal form. */
+/* On exit, if INFO = 0, then if COMPZ = 'V', Z contains the */
+/* orthonormal eigenvectors of the original symmetric matrix, */
+/* and if COMPZ = 'I', Z contains the orthonormal eigenvectors */
+/* of the symmetric tridiagonal matrix. */
+/* If COMPZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* eigenvectors are desired, then LDZ >= max(1,N). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (max(1,2*N-2)) */
+/* If COMPZ = 'N', then WORK is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: the algorithm has failed to find all the eigenvalues in */
+/* a total of 30*N iterations; if INFO = i, then i */
+/* elements of E have not converged to zero; on exit, D */
+/* and E contain the elements of a symmetric tridiagonal */
+/* matrix which is orthogonally similar to the original */
+/* matrix. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (lsame_(compz, "N")) {
+ icompz = 0;
+ } else if (lsame_(compz, "V")) {
+ icompz = 1;
+ } else if (lsame_(compz, "I")) {
+ icompz = 2;
+ } else {
+ icompz = -1;
+ }
+ if (icompz < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldz < 1 || icompz > 0 && *ldz < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSTEQR", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (icompz == 2) {
+ z__[z_dim1 + 1] = 1.;
+ }
+ return 0;
+ }
+
+/* Determine the unit roundoff and over/underflow thresholds. */
+
+ eps = dlamch_("E");
+/* Computing 2nd power */
+ d__1 = eps;
+ eps2 = d__1 * d__1;
+ safmin = dlamch_("S");
+ safmax = 1. / safmin;
+ ssfmax = sqrt(safmax) / 3.;
+ ssfmin = sqrt(safmin) / eps2;
+
+/* Compute the eigenvalues and eigenvectors of the tridiagonal */
+/* matrix. */
+
+ if (icompz == 2) {
+ dlaset_("Full", n, n, &c_b9, &c_b10, &z__[z_offset], ldz);
+ }
+
+ nmaxit = *n * 30;
+ jtot = 0;
+
+/* Determine where the matrix splits and choose QL or QR iteration */
+/* for each block, according to whether top or bottom diagonal */
+/* element is smaller. */
+
+ l1 = 1;
+ nm1 = *n - 1;
+
+L10:
+ if (l1 > *n) {
+ goto L160;
+ }
+ if (l1 > 1) {
+ e[l1 - 1] = 0.;
+ }
+ if (l1 <= nm1) {
+ i__1 = nm1;
+ for (m = l1; m <= i__1; ++m) {
+ tst = (d__1 = e[m], abs(d__1));
+ if (tst == 0.) {
+ goto L30;
+ }
+ if (tst <= sqrt((d__1 = d__[m], abs(d__1))) * sqrt((d__2 = d__[m
+ + 1], abs(d__2))) * eps) {
+ e[m] = 0.;
+ goto L30;
+ }
+/* L20: */
+ }
+ }
+ m = *n;
+
+L30:
+ l = l1;
+ lsv = l;
+ lend = m;
+ lendsv = lend;
+ l1 = m + 1;
+ if (lend == l) {
+ goto L10;
+ }
+
+/* Scale submatrix in rows and columns L to LEND */
+
+ i__1 = lend - l + 1;
+ anorm = dlanst_("I", &i__1, &d__[l], &e[l]);
+ iscale = 0;
+ if (anorm == 0.) {
+ goto L10;
+ }
+ if (anorm > ssfmax) {
+ iscale = 1;
+ i__1 = lend - l + 1;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n,
+ info);
+ } else if (anorm < ssfmin) {
+ iscale = 2;
+ i__1 = lend - l + 1;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n,
+ info);
+ }
+
+/* Choose between QL and QR iteration */
+
+ if ((d__1 = d__[lend], abs(d__1)) < (d__2 = d__[l], abs(d__2))) {
+ lend = lsv;
+ l = lendsv;
+ }
+
+ if (lend > l) {
+
+/* QL Iteration */
+
+/* Look for small subdiagonal element. */
+
+L40:
+ if (l != lend) {
+ lendm1 = lend - 1;
+ i__1 = lendm1;
+ for (m = l; m <= i__1; ++m) {
+/* Computing 2nd power */
+ d__2 = (d__1 = e[m], abs(d__1));
+ tst = d__2 * d__2;
+ if (tst <= eps2 * (d__1 = d__[m], abs(d__1)) * (d__2 = d__[m
+ + 1], abs(d__2)) + safmin) {
+ goto L60;
+ }
+/* L50: */
+ }
+ }
+
+ m = lend;
+
+L60:
+ if (m < lend) {
+ e[m] = 0.;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L80;
+ }
+
+/* If remaining matrix is 2-by-2, use DLAE2 or SLAEV2 */
+/* to compute its eigensystem. */
+
+ if (m == l + 1) {
+ if (icompz > 0) {
+ dlaev2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2, &c__, &s);
+ work[l] = c__;
+ work[*n - 1 + l] = s;
+ dlasr_("R", "V", "B", n, &c__2, &work[l], &work[*n - 1 + l], &
+ z__[l * z_dim1 + 1], ldz);
+ } else {
+ dlae2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2);
+ }
+ d__[l] = rt1;
+ d__[l + 1] = rt2;
+ e[l] = 0.;
+ l += 2;
+ if (l <= lend) {
+ goto L40;
+ }
+ goto L140;
+ }
+
+ if (jtot == nmaxit) {
+ goto L140;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ g = (d__[l + 1] - p) / (e[l] * 2.);
+ r__ = dlapy2_(&g, &c_b10);
+ g = d__[m] - p + e[l] / (g + d_sign(&r__, &g));
+
+ s = 1.;
+ c__ = 1.;
+ p = 0.;
+
+/* Inner loop */
+
+ mm1 = m - 1;
+ i__1 = l;
+ for (i__ = mm1; i__ >= i__1; --i__) {
+ f = s * e[i__];
+ b = c__ * e[i__];
+ dlartg_(&g, &f, &c__, &s, &r__);
+ if (i__ != m - 1) {
+ e[i__ + 1] = r__;
+ }
+ g = d__[i__ + 1] - p;
+ r__ = (d__[i__] - g) * s + c__ * 2. * b;
+ p = s * r__;
+ d__[i__ + 1] = g + p;
+ g = c__ * r__ - b;
+
+/* If eigenvectors are desired, then save rotations. */
+
+ if (icompz > 0) {
+ work[i__] = c__;
+ work[*n - 1 + i__] = -s;
+ }
+
+/* L70: */
+ }
+
+/* If eigenvectors are desired, then apply saved rotations. */
+
+ if (icompz > 0) {
+ mm = m - l + 1;
+ dlasr_("R", "V", "B", n, &mm, &work[l], &work[*n - 1 + l], &z__[l
+ * z_dim1 + 1], ldz);
+ }
+
+ d__[l] -= p;
+ e[l] = g;
+ goto L40;
+
+/* Eigenvalue found. */
+
+L80:
+ d__[l] = p;
+
+ ++l;
+ if (l <= lend) {
+ goto L40;
+ }
+ goto L140;
+
+ } else {
+
+/* QR Iteration */
+
+/* Look for small superdiagonal element. */
+
+L90:
+ if (l != lend) {
+ lendp1 = lend + 1;
+ i__1 = lendp1;
+ for (m = l; m >= i__1; --m) {
+/* Computing 2nd power */
+ d__2 = (d__1 = e[m - 1], abs(d__1));
+ tst = d__2 * d__2;
+ if (tst <= eps2 * (d__1 = d__[m], abs(d__1)) * (d__2 = d__[m
+ - 1], abs(d__2)) + safmin) {
+ goto L110;
+ }
+/* L100: */
+ }
+ }
+
+ m = lend;
+
+L110:
+ if (m > lend) {
+ e[m - 1] = 0.;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L130;
+ }
+
+/* If remaining matrix is 2-by-2, use DLAE2 or SLAEV2 */
+/* to compute its eigensystem. */
+
+ if (m == l - 1) {
+ if (icompz > 0) {
+ dlaev2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2, &c__, &s)
+ ;
+ work[m] = c__;
+ work[*n - 1 + m] = s;
+ dlasr_("R", "V", "F", n, &c__2, &work[m], &work[*n - 1 + m], &
+ z__[(l - 1) * z_dim1 + 1], ldz);
+ } else {
+ dlae2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2);
+ }
+ d__[l - 1] = rt1;
+ d__[l] = rt2;
+ e[l - 1] = 0.;
+ l += -2;
+ if (l >= lend) {
+ goto L90;
+ }
+ goto L140;
+ }
+
+ if (jtot == nmaxit) {
+ goto L140;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ g = (d__[l - 1] - p) / (e[l - 1] * 2.);
+ r__ = dlapy2_(&g, &c_b10);
+ g = d__[m] - p + e[l - 1] / (g + d_sign(&r__, &g));
+
+ s = 1.;
+ c__ = 1.;
+ p = 0.;
+
+/* Inner loop */
+
+ lm1 = l - 1;
+ i__1 = lm1;
+ for (i__ = m; i__ <= i__1; ++i__) {
+ f = s * e[i__];
+ b = c__ * e[i__];
+ dlartg_(&g, &f, &c__, &s, &r__);
+ if (i__ != m) {
+ e[i__ - 1] = r__;
+ }
+ g = d__[i__] - p;
+ r__ = (d__[i__ + 1] - g) * s + c__ * 2. * b;
+ p = s * r__;
+ d__[i__] = g + p;
+ g = c__ * r__ - b;
+
+/* If eigenvectors are desired, then save rotations. */
+
+ if (icompz > 0) {
+ work[i__] = c__;
+ work[*n - 1 + i__] = s;
+ }
+
+/* L120: */
+ }
+
+/* If eigenvectors are desired, then apply saved rotations. */
+
+ if (icompz > 0) {
+ mm = l - m + 1;
+ dlasr_("R", "V", "F", n, &mm, &work[m], &work[*n - 1 + m], &z__[m
+ * z_dim1 + 1], ldz);
+ }
+
+ d__[l] -= p;
+ e[lm1] = g;
+ goto L90;
+
+/* Eigenvalue found. */
+
+L130:
+ d__[l] = p;
+
+ --l;
+ if (l >= lend) {
+ goto L90;
+ }
+ goto L140;
+
+ }
+
+/* Undo scaling if necessary */
+
+L140:
+ if (iscale == 1) {
+ i__1 = lendsv - lsv + 1;
+ dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ i__1 = lendsv - lsv;
+ dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &e[lsv], n,
+ info);
+ } else if (iscale == 2) {
+ i__1 = lendsv - lsv + 1;
+ dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ i__1 = lendsv - lsv;
+ dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &e[lsv], n,
+ info);
+ }
+
+/* Check for no convergence to an eigenvalue after a total */
+/* of N*MAXIT iterations. */
+
+ if (jtot < nmaxit) {
+ goto L10;
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (e[i__] != 0.) {
+ ++(*info);
+ }
+/* L150: */
+ }
+ goto L190;
+
+/* Order eigenvalues and eigenvectors. */
+
+L160:
+ if (icompz == 0) {
+
+/* Use Quick Sort */
+
+ dlasrt_("I", n, &d__[1], info);
+
+ } else {
+
+/* Use Selection Sort to minimize swaps of eigenvectors */
+
+ i__1 = *n;
+ for (ii = 2; ii <= i__1; ++ii) {
+ i__ = ii - 1;
+ k = i__;
+ p = d__[i__];
+ i__2 = *n;
+ for (j = ii; j <= i__2; ++j) {
+ if (d__[j] < p) {
+ k = j;
+ p = d__[j];
+ }
+/* L170: */
+ }
+ if (k != i__) {
+ d__[k] = d__[i__];
+ d__[i__] = p;
+ dswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1 + 1],
+ &c__1);
+ }
+/* L180: */
+ }
+ }
+
+L190:
+ return 0;
+
+/* End of DSTEQR */
+
+} /* dsteqr_ */
diff --git a/contrib/libs/clapack/dsterf.c b/contrib/libs/clapack/dsterf.c
new file mode 100644
index 0000000000..a950e5b433
--- /dev/null
+++ b/contrib/libs/clapack/dsterf.c
@@ -0,0 +1,461 @@
+/* dsterf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__0 = 0;
+static integer c__1 = 1;
+static doublereal c_b32 = 1.;
+
+/* Subroutine */ int dsterf_(integer *n, doublereal *d__, doublereal *e,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ doublereal c__;
+ integer i__, l, m;
+ doublereal p, r__, s;
+ integer l1;
+ doublereal bb, rt1, rt2, eps, rte;
+ integer lsv;
+ doublereal eps2, oldc;
+ integer lend, jtot;
+ extern /* Subroutine */ int dlae2_(doublereal *, doublereal *, doublereal
+ *, doublereal *, doublereal *);
+ doublereal gamma, alpha, sigma, anorm;
+ extern doublereal dlapy2_(doublereal *, doublereal *), dlamch_(char *);
+ integer iscale;
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *);
+ doublereal oldgam, safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal safmax;
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *,
+ integer *);
+ integer lendsv;
+ doublereal ssfmin;
+ integer nmaxit;
+ doublereal ssfmax;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSTERF computes all eigenvalues of a symmetric tridiagonal matrix */
+/* using the Pal-Walker-Kahan variant of the QL or QR algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the n diagonal elements of the tridiagonal matrix. */
+/* On exit, if INFO = 0, the eigenvalues in ascending order. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (N-1) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix. */
+/* On exit, E has been destroyed. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: the algorithm failed to find all of the eigenvalues in */
+/* a total of 30*N iterations; if INFO = i, then i */
+/* elements of E have not converged to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+
+/* Quick return if possible */
+
+ if (*n < 0) {
+ *info = -1;
+ i__1 = -(*info);
+ xerbla_("DSTERF", &i__1);
+ return 0;
+ }
+ if (*n <= 1) {
+ return 0;
+ }
+
+/* Determine the unit roundoff for this environment. */
+
+ eps = dlamch_("E");
+/* Computing 2nd power */
+ d__1 = eps;
+ eps2 = d__1 * d__1;
+ safmin = dlamch_("S");
+ safmax = 1. / safmin;
+ ssfmax = sqrt(safmax) / 3.;
+ ssfmin = sqrt(safmin) / eps2;
+
+/* Compute the eigenvalues of the tridiagonal matrix. */
+
+ nmaxit = *n * 30;
+ sigma = 0.;
+ jtot = 0;
+
+/* Determine where the matrix splits and choose QL or QR iteration */
+/* for each block, according to whether top or bottom diagonal */
+/* element is smaller. */
+
+ l1 = 1;
+
+L10:
+ if (l1 > *n) {
+ goto L170;
+ }
+ if (l1 > 1) {
+ e[l1 - 1] = 0.;
+ }
+ i__1 = *n - 1;
+ for (m = l1; m <= i__1; ++m) {
+ if ((d__3 = e[m], abs(d__3)) <= sqrt((d__1 = d__[m], abs(d__1))) *
+ sqrt((d__2 = d__[m + 1], abs(d__2))) * eps) {
+ e[m] = 0.;
+ goto L30;
+ }
+/* L20: */
+ }
+ m = *n;
+
+L30:
+ l = l1;
+ lsv = l;
+ lend = m;
+ lendsv = lend;
+ l1 = m + 1;
+ if (lend == l) {
+ goto L10;
+ }
+
+/* Scale submatrix in rows and columns L to LEND */
+
+ i__1 = lend - l + 1;
+ anorm = dlanst_("I", &i__1, &d__[l], &e[l]);
+ iscale = 0;
+ if (anorm > ssfmax) {
+ iscale = 1;
+ i__1 = lend - l + 1;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n,
+ info);
+ } else if (anorm < ssfmin) {
+ iscale = 2;
+ i__1 = lend - l + 1;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n,
+ info);
+ }
+
+ i__1 = lend - 1;
+ for (i__ = l; i__ <= i__1; ++i__) {
+/* Computing 2nd power */
+ d__1 = e[i__];
+ e[i__] = d__1 * d__1;
+/* L40: */
+ }
+
+/* Choose between QL and QR iteration */
+
+ if ((d__1 = d__[lend], abs(d__1)) < (d__2 = d__[l], abs(d__2))) {
+ lend = lsv;
+ l = lendsv;
+ }
+
+ if (lend >= l) {
+
+/* QL Iteration */
+
+/* Look for small subdiagonal element. */
+
+L50:
+ if (l != lend) {
+ i__1 = lend - 1;
+ for (m = l; m <= i__1; ++m) {
+ if ((d__2 = e[m], abs(d__2)) <= eps2 * (d__1 = d__[m] * d__[m
+ + 1], abs(d__1))) {
+ goto L70;
+ }
+/* L60: */
+ }
+ }
+ m = lend;
+
+L70:
+ if (m < lend) {
+ e[m] = 0.;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L90;
+ }
+
+/* If remaining matrix is 2 by 2, use DLAE2 to compute its */
+/* eigenvalues. */
+
+ if (m == l + 1) {
+ rte = sqrt(e[l]);
+ dlae2_(&d__[l], &rte, &d__[l + 1], &rt1, &rt2);
+ d__[l] = rt1;
+ d__[l + 1] = rt2;
+ e[l] = 0.;
+ l += 2;
+ if (l <= lend) {
+ goto L50;
+ }
+ goto L150;
+ }
+
+ if (jtot == nmaxit) {
+ goto L150;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ rte = sqrt(e[l]);
+ sigma = (d__[l + 1] - p) / (rte * 2.);
+ r__ = dlapy2_(&sigma, &c_b32);
+ sigma = p - rte / (sigma + d_sign(&r__, &sigma));
+
+ c__ = 1.;
+ s = 0.;
+ gamma = d__[m] - sigma;
+ p = gamma * gamma;
+
+/* Inner loop */
+
+ i__1 = l;
+ for (i__ = m - 1; i__ >= i__1; --i__) {
+ bb = e[i__];
+ r__ = p + bb;
+ if (i__ != m - 1) {
+ e[i__ + 1] = s * r__;
+ }
+ oldc = c__;
+ c__ = p / r__;
+ s = bb / r__;
+ oldgam = gamma;
+ alpha = d__[i__];
+ gamma = c__ * (alpha - sigma) - s * oldgam;
+ d__[i__ + 1] = oldgam + (alpha - gamma);
+ if (c__ != 0.) {
+ p = gamma * gamma / c__;
+ } else {
+ p = oldc * bb;
+ }
+/* L80: */
+ }
+
+ e[l] = s * p;
+ d__[l] = sigma + gamma;
+ goto L50;
+
+/* Eigenvalue found. */
+
+L90:
+ d__[l] = p;
+
+ ++l;
+ if (l <= lend) {
+ goto L50;
+ }
+ goto L150;
+
+ } else {
+
+/* QR Iteration */
+
+/* Look for small superdiagonal element. */
+
+L100:
+ i__1 = lend + 1;
+ for (m = l; m >= i__1; --m) {
+ if ((d__2 = e[m - 1], abs(d__2)) <= eps2 * (d__1 = d__[m] * d__[m
+ - 1], abs(d__1))) {
+ goto L120;
+ }
+/* L110: */
+ }
+ m = lend;
+
+L120:
+ if (m > lend) {
+ e[m - 1] = 0.;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L140;
+ }
+
+/* If remaining matrix is 2 by 2, use DLAE2 to compute its */
+/* eigenvalues. */
+
+ if (m == l - 1) {
+ rte = sqrt(e[l - 1]);
+ dlae2_(&d__[l], &rte, &d__[l - 1], &rt1, &rt2);
+ d__[l] = rt1;
+ d__[l - 1] = rt2;
+ e[l - 1] = 0.;
+ l += -2;
+ if (l >= lend) {
+ goto L100;
+ }
+ goto L150;
+ }
+
+ if (jtot == nmaxit) {
+ goto L150;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ rte = sqrt(e[l - 1]);
+ sigma = (d__[l - 1] - p) / (rte * 2.);
+ r__ = dlapy2_(&sigma, &c_b32);
+ sigma = p - rte / (sigma + d_sign(&r__, &sigma));
+
+ c__ = 1.;
+ s = 0.;
+ gamma = d__[m] - sigma;
+ p = gamma * gamma;
+
+/* Inner loop */
+
+ i__1 = l - 1;
+ for (i__ = m; i__ <= i__1; ++i__) {
+ bb = e[i__];
+ r__ = p + bb;
+ if (i__ != m) {
+ e[i__ - 1] = s * r__;
+ }
+ oldc = c__;
+ c__ = p / r__;
+ s = bb / r__;
+ oldgam = gamma;
+ alpha = d__[i__ + 1];
+ gamma = c__ * (alpha - sigma) - s * oldgam;
+ d__[i__] = oldgam + (alpha - gamma);
+ if (c__ != 0.) {
+ p = gamma * gamma / c__;
+ } else {
+ p = oldc * bb;
+ }
+/* L130: */
+ }
+
+ e[l - 1] = s * p;
+ d__[l] = sigma + gamma;
+ goto L100;
+
+/* Eigenvalue found. */
+
+L140:
+ d__[l] = p;
+
+ --l;
+ if (l >= lend) {
+ goto L100;
+ }
+ goto L150;
+
+ }
+
+/* Undo scaling if necessary */
+
+L150:
+ if (iscale == 1) {
+ i__1 = lendsv - lsv + 1;
+ dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ }
+ if (iscale == 2) {
+ i__1 = lendsv - lsv + 1;
+ dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ }
+
+/* Check for no convergence to an eigenvalue after a total */
+/* of N*MAXIT iterations. */
+
+ if (jtot < nmaxit) {
+ goto L10;
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (e[i__] != 0.) {
+ ++(*info);
+ }
+/* L160: */
+ }
+ goto L180;
+
+/* Sort eigenvalues in increasing order. */
+
+L170:
+ dlasrt_("I", n, &d__[1], info);
+
+L180:
+ return 0;
+
+/* End of DSTERF */
+
+} /* dsterf_ */
diff --git a/contrib/libs/clapack/dstev.c b/contrib/libs/clapack/dstev.c
new file mode 100644
index 0000000000..db343ac1a9
--- /dev/null
+++ b/contrib/libs/clapack/dstev.c
@@ -0,0 +1,212 @@
+/* dstev.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dstev_(char *jobz, integer *n, doublereal *d__,
+ doublereal *e, doublereal *z__, integer *ldz, doublereal *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal eps;
+ integer imax;
+ doublereal rmin, rmax, tnrm;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal sigma;
+ extern logical lsame_(char *, char *);
+ logical wantz;
+ extern doublereal dlamch_(char *);
+ integer iscale;
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *), dsteqr_(char *, integer *, doublereal *, doublereal *
+, doublereal *, integer *, doublereal *, integer *);
+ doublereal smlnum;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSTEV computes all eigenvalues and, optionally, eigenvectors of a */
+/* real symmetric tridiagonal matrix A. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the n diagonal elements of the tridiagonal matrix */
+/* A. */
+/* On exit, if INFO = 0, the eigenvalues in ascending order. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (N-1) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix A, stored in elements 1 to N-1 of E. */
+/* On exit, the contents of E are destroyed. */
+
+/* Z (output) DOUBLE PRECISION array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the orthonormal */
+/* eigenvectors of the matrix A, with the i-th column of Z */
+/* holding the eigenvector associated with D(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (max(1,2*N-2)) */
+/* If JOBZ = 'N', WORK is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the algorithm failed to converge; i */
+/* off-diagonal elements of E did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -6;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSTEV ", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (wantz) {
+ z__[z_dim1 + 1] = 1.;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ iscale = 0;
+ tnrm = dlanst_("M", n, &d__[1], &e[1]);
+ if (tnrm > 0. && tnrm < rmin) {
+ iscale = 1;
+ sigma = rmin / tnrm;
+ } else if (tnrm > rmax) {
+ iscale = 1;
+ sigma = rmax / tnrm;
+ }
+ if (iscale == 1) {
+ dscal_(n, &sigma, &d__[1], &c__1);
+ i__1 = *n - 1;
+ dscal_(&i__1, &sigma, &e[1], &c__1);
+ }
+
+/* For eigenvalues only, call DSTERF. For eigenvalues and */
+/* eigenvectors, call DSTEQR. */
+
+ if (! wantz) {
+ dsterf_(n, &d__[1], &e[1], info);
+ } else {
+ dsteqr_("I", n, &d__[1], &e[1], &z__[z_offset], ldz, &work[1], info);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *n;
+ } else {
+ imax = *info - 1;
+ }
+ d__1 = 1. / sigma;
+ dscal_(&imax, &d__1, &d__[1], &c__1);
+ }
+
+ return 0;
+
+/* End of DSTEV */
+
+} /* dstev_ */
diff --git a/contrib/libs/clapack/dstevd.c b/contrib/libs/clapack/dstevd.c
new file mode 100644
index 0000000000..ffd0167247
--- /dev/null
+++ b/contrib/libs/clapack/dstevd.c
@@ -0,0 +1,273 @@
+/* dstevd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dstevd_(char *jobz, integer *n, doublereal *d__,
+ doublereal *e, doublereal *z__, integer *ldz, doublereal *work,
+ integer *lwork, integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal eps, rmin, rmax, tnrm;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal sigma;
+ extern logical lsame_(char *, char *);
+ integer lwmin;
+ logical wantz;
+ extern doublereal dlamch_(char *);
+ integer iscale;
+ extern /* Subroutine */ int dstedc_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ integer *, integer *, integer *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *);
+ integer liwmin;
+ doublereal smlnum;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSTEVD computes all eigenvalues and, optionally, eigenvectors of a */
+/* real symmetric tridiagonal matrix. If eigenvectors are desired, it */
+/* uses a divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the n diagonal elements of the tridiagonal matrix */
+/* A. */
+/* On exit, if INFO = 0, the eigenvalues in ascending order. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (N-1) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix A, stored in elements 1 to N-1 of E. */
+/* On exit, the contents of E are destroyed. */
+
+/* Z (output) DOUBLE PRECISION array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the orthonormal */
+/* eigenvectors of the matrix A, with the i-th column of Z */
+/* holding the eigenvector associated with D(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, */
+/* dimension (LWORK) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If JOBZ = 'N' or N <= 1 then LWORK must be at least 1. */
+/* If JOBZ = 'V' and N > 1 then LWORK must be at least */
+/* ( 1 + 4*N + N**2 ). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal sizes of the WORK and IWORK */
+/* arrays, returns these values as the first entries of the WORK */
+/* and IWORK arrays, and no error message related to LWORK or */
+/* LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* If JOBZ = 'N' or N <= 1 then LIWORK must be at least 1. */
+/* If JOBZ = 'V' and N > 1 then LIWORK must be at least 3+5*N. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK and IWORK arrays, and no error message related to */
+/* LWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the algorithm failed to converge; i */
+/* off-diagonal elements of E did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lquery = *lwork == -1 || *liwork == -1;
+
+ *info = 0;
+ liwmin = 1;
+ lwmin = 1;
+ if (*n > 1 && wantz) {
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = (*n << 2) + 1 + i__1 * i__1;
+ liwmin = *n * 5 + 3;
+ }
+
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -6;
+ }
+
+ if (*info == 0) {
+ work[1] = (doublereal) lwmin;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -8;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -10;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSTEVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (wantz) {
+ z__[z_dim1 + 1] = 1.;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ iscale = 0;
+ tnrm = dlanst_("M", n, &d__[1], &e[1]);
+ if (tnrm > 0. && tnrm < rmin) {
+ iscale = 1;
+ sigma = rmin / tnrm;
+ } else if (tnrm > rmax) {
+ iscale = 1;
+ sigma = rmax / tnrm;
+ }
+ if (iscale == 1) {
+ dscal_(n, &sigma, &d__[1], &c__1);
+ i__1 = *n - 1;
+ dscal_(&i__1, &sigma, &e[1], &c__1);
+ }
+
+/* For eigenvalues only, call DSTERF. For eigenvalues and */
+/* eigenvectors, call DSTEDC. */
+
+ if (! wantz) {
+ dsterf_(n, &d__[1], &e[1], info);
+ } else {
+ dstedc_("I", n, &d__[1], &e[1], &z__[z_offset], ldz, &work[1], lwork,
+ &iwork[1], liwork, info);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ d__1 = 1. / sigma;
+ dscal_(n, &d__1, &d__[1], &c__1);
+ }
+
+ work[1] = (doublereal) lwmin;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of DSTEVD */
+
+} /* dstevd_ */
diff --git a/contrib/libs/clapack/dstevr.c b/contrib/libs/clapack/dstevr.c
new file mode 100644
index 0000000000..db78da67d3
--- /dev/null
+++ b/contrib/libs/clapack/dstevr.c
@@ -0,0 +1,550 @@
+/* dstevr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__10 = 10;
+static integer c__1 = 1;
+static integer c__2 = 2;
+static integer c__3 = 3;
+static integer c__4 = 4;
+
+/* Subroutine */ int dstevr_(char *jobz, char *range, integer *n, doublereal *
+ d__, doublereal *e, doublereal *vl, doublereal *vu, integer *il,
+ integer *iu, doublereal *abstol, integer *m, doublereal *w,
+ doublereal *z__, integer *ldz, integer *isuppz, doublereal *work,
+ integer *lwork, integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, jj;
+ doublereal eps, vll, vuu, tmp1;
+ integer imax;
+ doublereal rmin, rmax;
+ logical test;
+ doublereal tnrm;
+ integer itmp1;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal sigma;
+ extern logical lsame_(char *, char *);
+ char order[1];
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dswap_(integer *, doublereal *, integer
+ *, doublereal *, integer *);
+ integer lwmin;
+ logical wantz;
+ extern doublereal dlamch_(char *);
+ logical alleig, indeig;
+ integer iscale, ieeeok, indibl, indifl;
+ logical valeig;
+ doublereal safmin;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ integer indisp;
+ extern /* Subroutine */ int dstein_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *, doublereal *,
+ integer *, doublereal *, integer *, integer *, integer *),
+ dsterf_(integer *, doublereal *, doublereal *, integer *);
+ integer indiwo;
+ extern /* Subroutine */ int dstebz_(char *, char *, integer *, doublereal
+ *, doublereal *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *, doublereal *, integer *, integer *),
+ dstemr_(char *, char *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, integer *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, integer *,
+ logical *, doublereal *, integer *, integer *, integer *, integer
+ *);
+ integer liwmin;
+ logical tryrac;
+ integer nsplit;
+ doublereal smlnum;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSTEVR computes selected eigenvalues and, optionally, eigenvectors */
+/* of a real symmetric tridiagonal matrix T. Eigenvalues and */
+/* eigenvectors can be selected by specifying either a range of values */
+/* or a range of indices for the desired eigenvalues. */
+
+/* Whenever possible, DSTEVR calls DSTEMR to compute the */
+/* eigenspectrum using Relatively Robust Representations. DSTEMR */
+/* computes eigenvalues by the dqds algorithm, while orthogonal */
+/* eigenvectors are computed from various "good" L D L^T representations */
+/* (also known as Relatively Robust Representations). Gram-Schmidt */
+/* orthogonalization is avoided as far as possible. More specifically, */
+/* the various steps of the algorithm are as follows. For the i-th */
+/* unreduced block of T, */
+/* (a) Compute T - sigma_i = L_i D_i L_i^T, such that L_i D_i L_i^T */
+/* is a relatively robust representation, */
+/* (b) Compute the eigenvalues, lambda_j, of L_i D_i L_i^T to high */
+/* relative accuracy by the dqds algorithm, */
+/* (c) If there is a cluster of close eigenvalues, "choose" sigma_i */
+/* close to the cluster, and go to step (a), */
+/* (d) Given the approximate eigenvalue lambda_j of L_i D_i L_i^T, */
+/* compute the corresponding eigenvector by forming a */
+/* rank-revealing twisted factorization. */
+/* The desired accuracy of the output can be specified by the input */
+/* parameter ABSTOL. */
+
+/* For more details, see "A new O(n^2) algorithm for the symmetric */
+/* tridiagonal eigenvalue/eigenvector problem", by Inderjit Dhillon, */
+/* Computer Science Division Technical Report No. UCB//CSD-97-971, */
+/* UC Berkeley, May 1997. */
+
+
+/* Note 1 : DSTEVR calls DSTEMR when the full spectrum is requested */
+/* on machines which conform to the ieee-754 floating point standard. */
+/* DSTEVR calls DSTEBZ and DSTEIN on non-ieee machines and */
+/* when partial spectrum requests are made. */
+
+/* Normal execution of DSTEMR may create NaNs and infinities and */
+/* hence may abort due to a floating point exception in environments */
+/* which do not handle NaNs and infinities in the ieee standard default */
+/* manner. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+/* ********* For RANGE = 'V' or 'I' and IU - IL < N - 1, DSTEBZ and */
+/* ********* DSTEIN are called */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the n diagonal elements of the tridiagonal matrix */
+/* A. */
+/* On exit, D may be multiplied by a constant factor chosen */
+/* to avoid over/underflow in computing the eigenvalues. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (max(1,N-1)) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix A in elements 1 to N-1 of E. */
+/* On exit, E may be multiplied by a constant factor chosen */
+/* to avoid over/underflow in computing the eigenvalues. */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) DOUBLE PRECISION */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing A to tridiagonal form. */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices */
+/* with Guaranteed High Relative Accuracy," by Demmel and */
+/* Kahan, LAPACK Working Note #3. */
+
+/* If high relative accuracy is important, set ABSTOL to */
+/* DLAMCH( 'Safe minimum' ). Doing so will guarantee that */
+/* eigenvalues are computed to high relative accuracy when */
+/* possible in future releases. The current code does not */
+/* make any guarantees about high relative accuracy, but */
+/* future releases will. See J. Barlow and J. Demmel, */
+/* "Computing Accurate Eigensystems of Scaled Diagonally */
+/* Dominant Matrices", LAPACK Working Note #7, for a discussion */
+/* of which matrices define their eigenvalues to high relative */
+/* accuracy. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* The first M elements contain the selected eigenvalues in */
+/* ascending order. */
+
+/* Z (output) DOUBLE PRECISION array, dimension (LDZ, max(1,M) ) */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* ISUPPZ (output) INTEGER array, dimension ( 2*max(1,M) ) */
+/* The support of the eigenvectors in Z, i.e., the indices */
+/* indicating the nonzero elements in Z. The i-th eigenvector */
+/* is nonzero only in elements ISUPPZ( 2*i-1 ) through */
+/* ISUPPZ( 2*i ). */
+/* ********* Implemented only for RANGE = 'A' or 'I' and IU - IL = N - 1 */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal (and */
+/* minimal) LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,20*N). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal sizes of the WORK and IWORK */
+/* arrays, returns these values as the first entries of the WORK */
+/* and IWORK arrays, and no error message related to LWORK or */
+/* LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal (and */
+/* minimal) LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. LIWORK >= max(1,10*N). */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK and IWORK arrays, and no error message related to */
+/* LWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: Internal error */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Inderjit Dhillon, IBM Almaden, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Ken Stanley, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --isuppz;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ ieeeok = ilaenv_(&c__10, "DSTEVR", "N", &c__1, &c__2, &c__3, &c__4);
+
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+
+ lquery = *lwork == -1 || *liwork == -1;
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * 20;
+ lwmin = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * 10;
+ liwmin = max(i__1,i__2);
+
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -7;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -8;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -9;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -14;
+ }
+ }
+
+ if (*info == 0) {
+ work[1] = (doublereal) lwmin;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -17;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -19;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSTEVR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (alleig || indeig) {
+ *m = 1;
+ w[1] = d__[1];
+ } else {
+ if (*vl < d__[1] && *vu >= d__[1]) {
+ *m = 1;
+ w[1] = d__[1];
+ }
+ }
+ if (wantz) {
+ z__[z_dim1 + 1] = 1.;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+/* Computing MIN */
+ d__1 = sqrt(bignum), d__2 = 1. / sqrt(sqrt(safmin));
+ rmax = min(d__1,d__2);
+
+
+/* Scale matrix to allowable range, if necessary. */
+
+ iscale = 0;
+ vll = *vl;
+ vuu = *vu;
+
+ tnrm = dlanst_("M", n, &d__[1], &e[1]);
+ if (tnrm > 0. && tnrm < rmin) {
+ iscale = 1;
+ sigma = rmin / tnrm;
+ } else if (tnrm > rmax) {
+ iscale = 1;
+ sigma = rmax / tnrm;
+ }
+ if (iscale == 1) {
+ dscal_(n, &sigma, &d__[1], &c__1);
+ i__1 = *n - 1;
+ dscal_(&i__1, &sigma, &e[1], &c__1);
+ if (valeig) {
+ vll = *vl * sigma;
+ vuu = *vu * sigma;
+ }
+ }
+/* Initialize indices into workspaces. Note: These indices are used only */
+/* if DSTERF or DSTEMR fail. */
+/* IWORK(INDIBL:INDIBL+M-1) corresponds to IBLOCK in DSTEBZ and */
+/* stores the block indices of each of the M<=N eigenvalues. */
+ indibl = 1;
+/* IWORK(INDISP:INDISP+NSPLIT-1) corresponds to ISPLIT in DSTEBZ and */
+/* stores the starting and finishing indices of each block. */
+ indisp = indibl + *n;
+/* IWORK(INDIFL:INDIFL+N-1) stores the indices of eigenvectors */
+/* that corresponding to eigenvectors that fail to converge in */
+/* DSTEIN. This information is discarded; if any fail, the driver */
+/* returns INFO > 0. */
+ indifl = indisp + *n;
+/* INDIWO is the offset of the remaining integer workspace. */
+ indiwo = indisp + *n;
+
+/* If all eigenvalues are desired, then */
+/* call DSTERF or DSTEMR. If this fails for some eigenvalue, then */
+/* try DSTEBZ. */
+
+
+ test = FALSE_;
+ if (indeig) {
+ if (*il == 1 && *iu == *n) {
+ test = TRUE_;
+ }
+ }
+ if ((alleig || test) && ieeeok == 1) {
+ i__1 = *n - 1;
+ dcopy_(&i__1, &e[1], &c__1, &work[1], &c__1);
+ if (! wantz) {
+ dcopy_(n, &d__[1], &c__1, &w[1], &c__1);
+ dsterf_(n, &w[1], &work[1], info);
+ } else {
+ dcopy_(n, &d__[1], &c__1, &work[*n + 1], &c__1);
+ if (*abstol <= *n * 2. * eps) {
+ tryrac = TRUE_;
+ } else {
+ tryrac = FALSE_;
+ }
+ i__1 = *lwork - (*n << 1);
+ dstemr_(jobz, "A", n, &work[*n + 1], &work[1], vl, vu, il, iu, m,
+ &w[1], &z__[z_offset], ldz, n, &isuppz[1], &tryrac, &work[
+ (*n << 1) + 1], &i__1, &iwork[1], liwork, info);
+
+ }
+ if (*info == 0) {
+ *m = *n;
+ goto L10;
+ }
+ *info = 0;
+ }
+
+/* Otherwise, call DSTEBZ and, if eigenvectors are desired, DSTEIN. */
+
+ if (wantz) {
+ *(unsigned char *)order = 'B';
+ } else {
+ *(unsigned char *)order = 'E';
+ }
+ dstebz_(range, order, n, &vll, &vuu, il, iu, abstol, &d__[1], &e[1], m, &
+ nsplit, &w[1], &iwork[indibl], &iwork[indisp], &work[1], &iwork[
+ indiwo], info);
+
+ if (wantz) {
+ dstein_(n, &d__[1], &e[1], m, &w[1], &iwork[indibl], &iwork[indisp], &
+ z__[z_offset], ldz, &work[1], &iwork[indiwo], &iwork[indifl],
+ info);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+L10:
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *m;
+ } else {
+ imax = *info - 1;
+ }
+ d__1 = 1. / sigma;
+ dscal_(&imax, &d__1, &w[1], &c__1);
+ }
+
+/* If eigenvalues are not in order, then sort them, along with */
+/* eigenvectors. */
+
+ if (wantz) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp1 = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp1) {
+ i__ = jj;
+ tmp1 = w[jj];
+ }
+/* L20: */
+ }
+
+ if (i__ != 0) {
+ itmp1 = iwork[i__];
+ w[i__] = w[j];
+ iwork[i__] = iwork[j];
+ w[j] = tmp1;
+ iwork[j] = itmp1;
+ dswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1],
+ &c__1);
+ }
+/* L30: */
+ }
+ }
+
+/* Causes problems with tests 19 & 20: */
+/* IF (wantz .and. INDEIG ) Z( 1,1) = Z(1,1) / 1.002 + .002 */
+
+
+ work[1] = (doublereal) lwmin;
+ iwork[1] = liwmin;
+ return 0;
+
+/* End of DSTEVR */
+
+} /* dstevr_ */
diff --git a/contrib/libs/clapack/dstevx.c b/contrib/libs/clapack/dstevx.c
new file mode 100644
index 0000000000..d9f1bb1d44
--- /dev/null
+++ b/contrib/libs/clapack/dstevx.c
@@ -0,0 +1,432 @@
+/* dstevx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dstevx_(char *jobz, char *range, integer *n, doublereal *
+ d__, doublereal *e, doublereal *vl, doublereal *vu, integer *il,
+ integer *iu, doublereal *abstol, integer *m, doublereal *w,
+ doublereal *z__, integer *ldz, doublereal *work, integer *iwork,
+ integer *ifail, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, jj;
+ doublereal eps, vll, vuu, tmp1;
+ integer imax;
+ doublereal rmin, rmax;
+ logical test;
+ doublereal tnrm;
+ integer itmp1;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal sigma;
+ extern logical lsame_(char *, char *);
+ char order[1];
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dswap_(integer *, doublereal *, integer
+ *, doublereal *, integer *);
+ logical wantz;
+ extern doublereal dlamch_(char *);
+ logical alleig, indeig;
+ integer iscale, indibl;
+ logical valeig;
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ integer indisp;
+ extern /* Subroutine */ int dstein_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *, doublereal *,
+ integer *, doublereal *, integer *, integer *, integer *),
+ dsterf_(integer *, doublereal *, doublereal *, integer *);
+ integer indiwo;
+ extern /* Subroutine */ int dstebz_(char *, char *, integer *, doublereal
+ *, doublereal *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *, doublereal *, integer *, integer *);
+ integer indwrk;
+ extern /* Subroutine */ int dsteqr_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *);
+ integer nsplit;
+ doublereal smlnum;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSTEVX computes selected eigenvalues and, optionally, eigenvectors */
+/* of a real symmetric tridiagonal matrix A. Eigenvalues and */
+/* eigenvectors can be selected by specifying either a range of values */
+/* or a range of indices for the desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the n diagonal elements of the tridiagonal matrix */
+/* A. */
+/* On exit, D may be multiplied by a constant factor chosen */
+/* to avoid over/underflow in computing the eigenvalues. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (max(1,N-1)) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix A in elements 1 to N-1 of E. */
+/* On exit, E may be multiplied by a constant factor chosen */
+/* to avoid over/underflow in computing the eigenvalues. */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) DOUBLE PRECISION */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less */
+/* than or equal to zero, then EPS*|T| will be used in */
+/* its place, where |T| is the 1-norm of the tridiagonal */
+/* matrix. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*DLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*DLAMCH('S'). */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices */
+/* with Guaranteed High Relative Accuracy," by Demmel and */
+/* Kahan, LAPACK Working Note #3. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* The first M elements contain the selected eigenvalues in */
+/* ascending order. */
+
+/* Z (output) DOUBLE PRECISION array, dimension (LDZ, max(1,M) ) */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If an eigenvector fails to converge (INFO > 0), then that */
+/* column of Z contains the latest approximation to the */
+/* eigenvector, and the index of the eigenvector is returned */
+/* in IFAIL. If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (5*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+
+/* IFAIL (output) INTEGER array, dimension (N) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvectors that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, then i eigenvectors failed to converge. */
+/* Their indices are stored in array IFAIL. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -7;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -8;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -9;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -14;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSTEVX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (alleig || indeig) {
+ *m = 1;
+ w[1] = d__[1];
+ } else {
+ if (*vl < d__[1] && *vu >= d__[1]) {
+ *m = 1;
+ w[1] = d__[1];
+ }
+ }
+ if (wantz) {
+ z__[z_dim1 + 1] = 1.;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+/* Computing MIN */
+ d__1 = sqrt(bignum), d__2 = 1. / sqrt(sqrt(safmin));
+ rmax = min(d__1,d__2);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ iscale = 0;
+ if (valeig) {
+ vll = *vl;
+ vuu = *vu;
+ } else {
+ vll = 0.;
+ vuu = 0.;
+ }
+ tnrm = dlanst_("M", n, &d__[1], &e[1]);
+ if (tnrm > 0. && tnrm < rmin) {
+ iscale = 1;
+ sigma = rmin / tnrm;
+ } else if (tnrm > rmax) {
+ iscale = 1;
+ sigma = rmax / tnrm;
+ }
+ if (iscale == 1) {
+ dscal_(n, &sigma, &d__[1], &c__1);
+ i__1 = *n - 1;
+ dscal_(&i__1, &sigma, &e[1], &c__1);
+ if (valeig) {
+ vll = *vl * sigma;
+ vuu = *vu * sigma;
+ }
+ }
+
+/* If all eigenvalues are desired and ABSTOL is less than zero, then */
+/* call DSTERF or SSTEQR. If this fails for some eigenvalue, then */
+/* try DSTEBZ. */
+
+ test = FALSE_;
+ if (indeig) {
+ if (*il == 1 && *iu == *n) {
+ test = TRUE_;
+ }
+ }
+ if ((alleig || test) && *abstol <= 0.) {
+ dcopy_(n, &d__[1], &c__1, &w[1], &c__1);
+ i__1 = *n - 1;
+ dcopy_(&i__1, &e[1], &c__1, &work[1], &c__1);
+ indwrk = *n + 1;
+ if (! wantz) {
+ dsterf_(n, &w[1], &work[1], info);
+ } else {
+ dsteqr_("I", n, &w[1], &work[1], &z__[z_offset], ldz, &work[
+ indwrk], info);
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ifail[i__] = 0;
+/* L10: */
+ }
+ }
+ }
+ if (*info == 0) {
+ *m = *n;
+ goto L20;
+ }
+ *info = 0;
+ }
+
+/* Otherwise, call DSTEBZ and, if eigenvectors are desired, SSTEIN. */
+
+ if (wantz) {
+ *(unsigned char *)order = 'B';
+ } else {
+ *(unsigned char *)order = 'E';
+ }
+ indwrk = 1;
+ indibl = 1;
+ indisp = indibl + *n;
+ indiwo = indisp + *n;
+ dstebz_(range, order, n, &vll, &vuu, il, iu, abstol, &d__[1], &e[1], m, &
+ nsplit, &w[1], &iwork[indibl], &iwork[indisp], &work[indwrk], &
+ iwork[indiwo], info);
+
+ if (wantz) {
+ dstein_(n, &d__[1], &e[1], m, &w[1], &iwork[indibl], &iwork[indisp], &
+ z__[z_offset], ldz, &work[indwrk], &iwork[indiwo], &ifail[1],
+ info);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+L20:
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *m;
+ } else {
+ imax = *info - 1;
+ }
+ d__1 = 1. / sigma;
+ dscal_(&imax, &d__1, &w[1], &c__1);
+ }
+
+/* If eigenvalues are not in order, then sort them, along with */
+/* eigenvectors. */
+
+ if (wantz) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp1 = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp1) {
+ i__ = jj;
+ tmp1 = w[jj];
+ }
+/* L30: */
+ }
+
+ if (i__ != 0) {
+ itmp1 = iwork[indibl + i__ - 1];
+ w[i__] = w[j];
+ iwork[indibl + i__ - 1] = iwork[indibl + j - 1];
+ w[j] = tmp1;
+ iwork[indibl + j - 1] = itmp1;
+ dswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1],
+ &c__1);
+ if (*info != 0) {
+ itmp1 = ifail[i__];
+ ifail[i__] = ifail[j];
+ ifail[j] = itmp1;
+ }
+ }
+/* L40: */
+ }
+ }
+
+ return 0;
+
+/* End of DSTEVX */
+
+} /* dstevx_ */
diff --git a/contrib/libs/clapack/dsycon.c b/contrib/libs/clapack/dsycon.c
new file mode 100644
index 0000000000..c72e8ceb1f
--- /dev/null
+++ b/contrib/libs/clapack/dsycon.c
@@ -0,0 +1,204 @@
+/* dsycon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dsycon_(char *uplo, integer *n, doublereal *a, integer *
+ lda, integer *ipiv, doublereal *anorm, doublereal *rcond, doublereal *
+ work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+
+ /* Local variables */
+ integer i__, kase;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical upper;
+ extern /* Subroutine */ int dlacn2_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *), xerbla_(char *,
+ integer *);
+ doublereal ainvnm;
+ extern /* Subroutine */ int dsytrs_(char *, integer *, integer *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call DLACN2 in place of DLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYCON estimates the reciprocal of the condition number (in the */
+/* 1-norm) of a real symmetric matrix A using the factorization */
+/* A = U*D*U**T or A = L*D*L**T computed by DSYTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**T; */
+/* = 'L': Lower triangular, form is A = L*D*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The block diagonal matrix D and the multipliers used to */
+/* obtain the factor U or L as computed by DSYTRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by DSYTRF. */
+
+/* ANORM (input) DOUBLE PRECISION */
+/* The 1-norm of the original matrix A. */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (2*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*anorm < 0.) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.;
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ } else if (*anorm <= 0.) {
+ return 0;
+ }
+
+/* Check that the diagonal matrix D is nonsingular. */
+
+ if (upper) {
+
+/* Upper triangular storage: examine D from bottom to top */
+
+ for (i__ = *n; i__ >= 1; --i__) {
+ if (ipiv[i__] > 0 && a[i__ + i__ * a_dim1] == 0.) {
+ return 0;
+ }
+/* L10: */
+ }
+ } else {
+
+/* Lower triangular storage: examine D from top to bottom. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (ipiv[i__] > 0 && a[i__ + i__ * a_dim1] == 0.) {
+ return 0;
+ }
+/* L20: */
+ }
+ }
+
+/* Estimate the 1-norm of the inverse. */
+
+ kase = 0;
+L30:
+ dlacn2_(n, &work[*n + 1], &work[1], &iwork[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+
+/* Multiply by inv(L*D*L') or inv(U*D*U'). */
+
+ dsytrs_(uplo, n, &c__1, &a[a_offset], lda, &ipiv[1], &work[1], n,
+ info);
+ goto L30;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / ainvnm / *anorm;
+ }
+
+ return 0;
+
+/* End of DSYCON */
+
+} /* dsycon_ */
diff --git a/contrib/libs/clapack/dsyequb.c b/contrib/libs/clapack/dsyequb.c
new file mode 100644
index 0000000000..4485427d41
--- /dev/null
+++ b/contrib/libs/clapack/dsyequb.c
@@ -0,0 +1,333 @@
+/* dsyequb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dsyequb_(char *uplo, integer *n, doublereal *a, integer *
+ lda, doublereal *s, doublereal *scond, doublereal *amax, doublereal *
+ work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal), log(doublereal), pow_di(doublereal *, integer *);
+
+ /* Local variables */
+ doublereal d__;
+ integer i__, j;
+ doublereal t, u, c0, c1, c2, si;
+ logical up;
+ doublereal avg, std, tol, base;
+ integer iter;
+ doublereal smin, smax, scale;
+ extern logical lsame_(char *, char *);
+ doublereal sumsq;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *);
+ doublereal smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- Contributed by James Demmel, Deaglan Halligan, Yozo Hida and -- */
+/* -- Jason Riedy of Univ. of California Berkeley. -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley and NAG Ltd. -- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYEQUB computes row and column scalings intended to equilibrate a */
+/* symmetric matrix A and reduce its condition number */
+/* (with respect to the two-norm). S contains the scale factors, */
+/* S(i) = 1/sqrt(A(i,i)), chosen so that the scaled matrix B with */
+/* elements B(i,j) = S(i)*A(i,j)*S(j) has ones on the diagonal. This */
+/* choice of S puts the condition number of B within a factor N of the */
+/* smallest possible condition number over all possible diagonal */
+/* scalings. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The N-by-N symmetric matrix whose scaling */
+/* factors are to be computed. Only the diagonal elements of A */
+/* are referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* S (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, S contains the scale factors for A. */
+
+/* SCOND (output) DOUBLE PRECISION */
+/* If INFO = 0, S contains the ratio of the smallest S(i) to */
+/* the largest S(i). If SCOND >= 0.1 and AMAX is neither too */
+/* large nor too small, it is not worth scaling by S. */
+
+/* AMAX (output) DOUBLE PRECISION */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element is nonpositive. */
+
+/* Further Details */
+/* ======= ======= */
+
+/* Reference: Livne, O.E. and Golub, G.H., "Scaling by Binormalization", */
+/* Numerical Algorithms, vol. 35, no. 1, pp. 97-120, January 2004. */
+/* DOI 10.1023/B:NUMA.0000016606.32820.69 */
+/* Tech report version: http://ruready.utah.edu/archive/papers/bin.pdf */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (! (lsame_(uplo, "U") || lsame_(uplo, "L"))) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYEQUB", &i__1);
+ return 0;
+ }
+ up = lsame_(uplo, "U");
+ *amax = 0.;
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ *scond = 1.;
+ return 0;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ s[i__] = 0.;
+ }
+ *amax = 0.;
+ if (up) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = s[i__], d__3 = (d__1 = a[i__ + j * a_dim1], abs(d__1));
+ s[i__] = max(d__2,d__3);
+/* Computing MAX */
+ d__2 = s[j], d__3 = (d__1 = a[i__ + j * a_dim1], abs(d__1));
+ s[j] = max(d__2,d__3);
+/* Computing MAX */
+ d__2 = *amax, d__3 = (d__1 = a[i__ + j * a_dim1], abs(d__1));
+ *amax = max(d__2,d__3);
+ }
+/* Computing MAX */
+ d__2 = s[j], d__3 = (d__1 = a[j + j * a_dim1], abs(d__1));
+ s[j] = max(d__2,d__3);
+/* Computing MAX */
+ d__2 = *amax, d__3 = (d__1 = a[j + j * a_dim1], abs(d__1));
+ *amax = max(d__2,d__3);
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ d__2 = s[j], d__3 = (d__1 = a[j + j * a_dim1], abs(d__1));
+ s[j] = max(d__2,d__3);
+/* Computing MAX */
+ d__2 = *amax, d__3 = (d__1 = a[j + j * a_dim1], abs(d__1));
+ *amax = max(d__2,d__3);
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = s[i__], d__3 = (d__1 = a[i__ + j * a_dim1], abs(d__1));
+ s[i__] = max(d__2,d__3);
+/* Computing MAX */
+ d__2 = s[j], d__3 = (d__1 = a[i__ + j * a_dim1], abs(d__1));
+ s[j] = max(d__2,d__3);
+/* Computing MAX */
+ d__2 = *amax, d__3 = (d__1 = a[i__ + j * a_dim1], abs(d__1));
+ *amax = max(d__2,d__3);
+ }
+ }
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ s[j] = 1. / s[j];
+ }
+ tol = 1. / sqrt(*n * 2.);
+ for (iter = 1; iter <= 100; ++iter) {
+ scale = 0.;
+ sumsq = 0.;
+/* BETA = |A|S */
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+ }
+ if (up) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ t = (d__1 = a[i__ + j * a_dim1], abs(d__1));
+ work[i__] += (d__1 = a[i__ + j * a_dim1], abs(d__1)) * s[
+ j];
+ work[j] += (d__1 = a[i__ + j * a_dim1], abs(d__1)) * s[
+ i__];
+ }
+ work[j] += (d__1 = a[j + j * a_dim1], abs(d__1)) * s[j];
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] += (d__1 = a[j + j * a_dim1], abs(d__1)) * s[j];
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ t = (d__1 = a[i__ + j * a_dim1], abs(d__1));
+ work[i__] += (d__1 = a[i__ + j * a_dim1], abs(d__1)) * s[
+ j];
+ work[j] += (d__1 = a[i__ + j * a_dim1], abs(d__1)) * s[
+ i__];
+ }
+ }
+ }
+/* avg = s^T beta / n */
+ avg = 0.;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ avg += s[i__] * work[i__];
+ }
+ avg /= *n;
+ std = 0.;
+ i__1 = *n * 3;
+ for (i__ = (*n << 1) + 1; i__ <= i__1; ++i__) {
+ work[i__] = s[i__ - (*n << 1)] * work[i__ - (*n << 1)] - avg;
+ }
+ dlassq_(n, &work[(*n << 1) + 1], &c__1, &scale, &sumsq);
+ std = scale * sqrt(sumsq / *n);
+ if (std < tol * avg) {
+ goto L999;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ t = (d__1 = a[i__ + i__ * a_dim1], abs(d__1));
+ si = s[i__];
+ c2 = (*n - 1) * t;
+ c1 = (*n - 2) * (work[i__] - t * si);
+ c0 = -(t * si) * si + work[i__] * 2 * si - *n * avg;
+ d__ = c1 * c1 - c0 * 4 * c2;
+ if (d__ <= 0.) {
+ *info = -1;
+ return 0;
+ }
+ si = c0 * -2 / (c1 + sqrt(d__));
+ d__ = si - s[i__];
+ u = 0.;
+ if (up) {
+ i__2 = i__;
+ for (j = 1; j <= i__2; ++j) {
+ t = (d__1 = a[j + i__ * a_dim1], abs(d__1));
+ u += s[j] * t;
+ work[j] += d__ * t;
+ }
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ t = (d__1 = a[i__ + j * a_dim1], abs(d__1));
+ u += s[j] * t;
+ work[j] += d__ * t;
+ }
+ } else {
+ i__2 = i__;
+ for (j = 1; j <= i__2; ++j) {
+ t = (d__1 = a[i__ + j * a_dim1], abs(d__1));
+ u += s[j] * t;
+ work[j] += d__ * t;
+ }
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ t = (d__1 = a[j + i__ * a_dim1], abs(d__1));
+ u += s[j] * t;
+ work[j] += d__ * t;
+ }
+ }
+ avg += (u + work[i__]) * d__ / *n;
+ s[i__] = si;
+ }
+ }
+L999:
+ smlnum = dlamch_("SAFEMIN");
+ bignum = 1. / smlnum;
+ smin = bignum;
+ smax = 0.;
+ t = 1. / sqrt(avg);
+ base = dlamch_("B");
+ u = 1. / log(base);
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = (integer) (u * log(s[i__] * t));
+ s[i__] = pow_di(&base, &i__2);
+/* Computing MIN */
+ d__1 = smin, d__2 = s[i__];
+ smin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = smax, d__2 = s[i__];
+ smax = max(d__1,d__2);
+ }
+ *scond = max(smin,smlnum) / min(smax,bignum);
+
+ return 0;
+} /* dsyequb_ */
diff --git a/contrib/libs/clapack/dsyev.c b/contrib/libs/clapack/dsyev.c
new file mode 100644
index 0000000000..0fdcb3a512
--- /dev/null
+++ b/contrib/libs/clapack/dsyev.c
@@ -0,0 +1,283 @@
+/* dsyev.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__0 = 0;
+static doublereal c_b17 = 1.;
+
+/* Subroutine */ int dsyev_(char *jobz, char *uplo, integer *n, doublereal *a,
+ integer *lda, doublereal *w, doublereal *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer nb;
+ doublereal eps;
+ integer inde;
+ doublereal anrm;
+ integer imax;
+ doublereal rmin, rmax;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical lower, wantz;
+ extern doublereal dlamch_(char *);
+ integer iscale;
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *);
+ doublereal safmin;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ integer indtau;
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern doublereal dlansy_(char *, char *, integer *, doublereal *,
+ integer *, doublereal *);
+ integer indwrk;
+ extern /* Subroutine */ int dorgtr_(char *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *), dsteqr_(char *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *),
+ dsytrd_(char *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *);
+ integer llwork;
+ doublereal smlnum;
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYEV computes all eigenvalues and, optionally, eigenvectors of a */
+/* real symmetric matrix A. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA, N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+/* On exit, if JOBZ = 'V', then if INFO = 0, A contains the */
+/* orthonormal eigenvectors of the matrix A. */
+/* If JOBZ = 'N', then on exit the lower triangle (if UPLO='L') */
+/* or the upper triangle (if UPLO='U') of A, including the */
+/* diagonal, is destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= max(1,3*N-1). */
+/* For optimal efficiency, LWORK >= (NB+2)*N, */
+/* where NB is the blocksize for DSYTRD returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the algorithm failed to converge; i */
+/* off-diagonal elements of an intermediate tridiagonal */
+/* form did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ --work;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lower = lsame_(uplo, "L");
+ lquery = *lwork == -1;
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+
+ if (*info == 0) {
+ nb = ilaenv_(&c__1, "DSYTRD", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = 1, i__2 = (nb + 2) * *n;
+ lwkopt = max(i__1,i__2);
+ work[1] = (doublereal) lwkopt;
+
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * 3 - 1;
+ if (*lwork < max(i__1,i__2) && ! lquery) {
+ *info = -8;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYEV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ w[1] = a[a_dim1 + 1];
+ work[1] = 2.;
+ if (wantz) {
+ a[a_dim1 + 1] = 1.;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = dlansy_("M", uplo, n, &a[a_offset], lda, &work[1]);
+ iscale = 0;
+ if (anrm > 0. && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ dlascl_(uplo, &c__0, &c__0, &c_b17, &sigma, n, n, &a[a_offset], lda,
+ info);
+ }
+
+/* Call DSYTRD to reduce symmetric matrix to tridiagonal form. */
+
+ inde = 1;
+ indtau = inde + *n;
+ indwrk = indtau + *n;
+ llwork = *lwork - indwrk + 1;
+ dsytrd_(uplo, n, &a[a_offset], lda, &w[1], &work[inde], &work[indtau], &
+ work[indwrk], &llwork, &iinfo);
+
+/* For eigenvalues only, call DSTERF. For eigenvectors, first call */
+/* DORGTR to generate the orthogonal matrix, then call DSTEQR. */
+
+ if (! wantz) {
+ dsterf_(n, &w[1], &work[inde], info);
+ } else {
+ dorgtr_(uplo, n, &a[a_offset], lda, &work[indtau], &work[indwrk], &
+ llwork, &iinfo);
+ dsteqr_(jobz, n, &w[1], &work[inde], &a[a_offset], lda, &work[indtau],
+ info);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *n;
+ } else {
+ imax = *info - 1;
+ }
+ d__1 = 1. / sigma;
+ dscal_(&imax, &d__1, &w[1], &c__1);
+ }
+
+/* Set WORK(1) to optimal workspace size. */
+
+ work[1] = (doublereal) lwkopt;
+
+ return 0;
+
+/* End of DSYEV */
+
+} /* dsyev_ */
diff --git a/contrib/libs/clapack/dsyevd.c b/contrib/libs/clapack/dsyevd.c
new file mode 100644
index 0000000000..cd7f6d3bac
--- /dev/null
+++ b/contrib/libs/clapack/dsyevd.c
@@ -0,0 +1,353 @@
+/* dsyevd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__0 = 0;
+static doublereal c_b17 = 1.;
+
+/* Subroutine */ int dsyevd_(char *jobz, char *uplo, integer *n, doublereal *
+ a, integer *lda, doublereal *w, doublereal *work, integer *lwork,
+ integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal eps;
+ integer inde;
+ doublereal anrm, rmin, rmax;
+ integer lopt;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo, lwmin, liopt;
+ logical lower, wantz;
+ integer indwk2, llwrk2;
+ extern doublereal dlamch_(char *);
+ integer iscale;
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dstedc_(char *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, integer *, integer *, integer *), dlacpy_(
+ char *, integer *, integer *, doublereal *, integer *, doublereal
+ *, integer *);
+ doublereal safmin;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ integer indtau;
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern doublereal dlansy_(char *, char *, integer *, doublereal *,
+ integer *, doublereal *);
+ integer indwrk, liwmin;
+ extern /* Subroutine */ int dormtr_(char *, char *, char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *), dsytrd_(char *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ integer *);
+ integer llwork;
+ doublereal smlnum;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYEVD computes all eigenvalues and, optionally, eigenvectors of a */
+/* real symmetric matrix A. If eigenvectors are desired, it uses a */
+/* divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Because of large use of BLAS of level 3, DSYEVD needs N**2 more */
+/* workspace than DSYEVX. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA, N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+/* On exit, if JOBZ = 'V', then if INFO = 0, A contains the */
+/* orthonormal eigenvectors of the matrix A. */
+/* If JOBZ = 'N', then on exit the lower triangle (if UPLO='L') */
+/* or the upper triangle (if UPLO='U') of A, including the */
+/* diagonal, is destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, */
+/* dimension (LWORK) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If N <= 1, LWORK must be at least 1. */
+/* If JOBZ = 'N' and N > 1, LWORK must be at least 2*N+1. */
+/* If JOBZ = 'V' and N > 1, LWORK must be at least */
+/* 1 + 6*N + 2*N**2. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal sizes of the WORK and IWORK */
+/* arrays, returns these values as the first entries of the WORK */
+/* and IWORK arrays, and no error message related to LWORK or */
+/* LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* If N <= 1, LIWORK must be at least 1. */
+/* If JOBZ = 'N' and N > 1, LIWORK must be at least 1. */
+/* If JOBZ = 'V' and N > 1, LIWORK must be at least 3 + 5*N. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK and IWORK arrays, and no error message related to */
+/* LWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i and JOBZ = 'N', then the algorithm failed */
+/* to converge; i off-diagonal elements of an intermediate */
+/* tridiagonal form did not converge to zero; */
+/* if INFO = i and JOBZ = 'V', then the algorithm failed */
+/* to compute an eigenvalue while working on the submatrix */
+/* lying in rows and columns INFO/(N+1) through */
+/* mod(INFO,N+1). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Jeff Rutter, Computer Science Division, University of California */
+/* at Berkeley, USA */
+/* Modified by Francoise Tisseur, University of Tennessee. */
+
+/* Modified description of INFO. Sven, 16 Feb 05. */
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lower = lsame_(uplo, "L");
+ lquery = *lwork == -1 || *liwork == -1;
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+
+ if (*info == 0) {
+ if (*n <= 1) {
+ liwmin = 1;
+ lwmin = 1;
+ lopt = lwmin;
+ liopt = liwmin;
+ } else {
+ if (wantz) {
+ liwmin = *n * 5 + 3;
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = *n * 6 + 1 + (i__1 * i__1 << 1);
+ } else {
+ liwmin = 1;
+ lwmin = (*n << 1) + 1;
+ }
+/* Computing MAX */
+ i__1 = lwmin, i__2 = (*n << 1) + ilaenv_(&c__1, "DSYTRD", uplo, n,
+ &c_n1, &c_n1, &c_n1);
+ lopt = max(i__1,i__2);
+ liopt = liwmin;
+ }
+ work[1] = (doublereal) lopt;
+ iwork[1] = liopt;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -8;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -10;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYEVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ w[1] = a[a_dim1 + 1];
+ if (wantz) {
+ a[a_dim1 + 1] = 1.;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = dlansy_("M", uplo, n, &a[a_offset], lda, &work[1]);
+ iscale = 0;
+ if (anrm > 0. && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ dlascl_(uplo, &c__0, &c__0, &c_b17, &sigma, n, n, &a[a_offset], lda,
+ info);
+ }
+
+/* Call DSYTRD to reduce symmetric matrix to tridiagonal form. */
+
+ inde = 1;
+ indtau = inde + *n;
+ indwrk = indtau + *n;
+ llwork = *lwork - indwrk + 1;
+ indwk2 = indwrk + *n * *n;
+ llwrk2 = *lwork - indwk2 + 1;
+
+ dsytrd_(uplo, n, &a[a_offset], lda, &w[1], &work[inde], &work[indtau], &
+ work[indwrk], &llwork, &iinfo);
+ lopt = (integer) ((*n << 1) + work[indwrk]);
+
+/* For eigenvalues only, call DSTERF. For eigenvectors, first call */
+/* DSTEDC to generate the eigenvector matrix, WORK(INDWRK), of the */
+/* tridiagonal matrix, then call DORMTR to multiply it by the */
+/* Householder transformations stored in A. */
+
+ if (! wantz) {
+ dsterf_(n, &w[1], &work[inde], info);
+ } else {
+ dstedc_("I", n, &w[1], &work[inde], &work[indwrk], n, &work[indwk2], &
+ llwrk2, &iwork[1], liwork, info);
+ dormtr_("L", uplo, "N", n, n, &a[a_offset], lda, &work[indtau], &work[
+ indwrk], n, &work[indwk2], &llwrk2, &iinfo);
+ dlacpy_("A", n, n, &work[indwrk], n, &a[a_offset], lda);
+/* Computing MAX */
+/* Computing 2nd power */
+ i__3 = *n;
+ i__1 = lopt, i__2 = *n * 6 + 1 + (i__3 * i__3 << 1);
+ lopt = max(i__1,i__2);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ d__1 = 1. / sigma;
+ dscal_(n, &d__1, &w[1], &c__1);
+ }
+
+ work[1] = (doublereal) lopt;
+ iwork[1] = liopt;
+
+ return 0;
+
+/* End of DSYEVD */
+
+} /* dsyevd_ */
diff --git a/contrib/libs/clapack/dsyevr.c b/contrib/libs/clapack/dsyevr.c
new file mode 100644
index 0000000000..4574299fb7
--- /dev/null
+++ b/contrib/libs/clapack/dsyevr.c
@@ -0,0 +1,652 @@
+/* dsyevr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__10 = 10;
+static integer c__1 = 1;
+static integer c__2 = 2;
+static integer c__3 = 3;
+static integer c__4 = 4;
+static integer c_n1 = -1;
+
+/* Subroutine */ int dsyevr_(char *jobz, char *range, char *uplo, integer *n,
+ doublereal *a, integer *lda, doublereal *vl, doublereal *vu, integer *
+ il, integer *iu, doublereal *abstol, integer *m, doublereal *w,
+ doublereal *z__, integer *ldz, integer *isuppz, doublereal *work,
+ integer *lwork, integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, z_dim1, z_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, nb, jj;
+ doublereal eps, vll, vuu, tmp1;
+ integer indd, inde;
+ doublereal anrm;
+ integer imax;
+ doublereal rmin, rmax;
+ integer inddd, indee;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ char order[1];
+ integer indwk;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dswap_(integer *, doublereal *, integer
+ *, doublereal *, integer *);
+ integer lwmin;
+ logical lower, wantz;
+ extern doublereal dlamch_(char *);
+ logical alleig, indeig;
+ integer iscale, ieeeok, indibl, indifl;
+ logical valeig;
+ doublereal safmin;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal abstll, bignum;
+ integer indtau, indisp;
+ extern /* Subroutine */ int dstein_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *, doublereal *,
+ integer *, doublereal *, integer *, integer *, integer *),
+ dsterf_(integer *, doublereal *, doublereal *, integer *);
+ integer indiwo, indwkn;
+ extern doublereal dlansy_(char *, char *, integer *, doublereal *,
+ integer *, doublereal *);
+ extern /* Subroutine */ int dstebz_(char *, char *, integer *, doublereal
+ *, doublereal *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *, doublereal *, integer *, integer *),
+ dstemr_(char *, char *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, integer *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, integer *,
+ logical *, doublereal *, integer *, integer *, integer *, integer
+ *);
+ integer liwmin;
+ logical tryrac;
+ extern /* Subroutine */ int dormtr_(char *, char *, char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ integer llwrkn, llwork, nsplit;
+ doublereal smlnum;
+ extern /* Subroutine */ int dsytrd_(char *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYEVR computes selected eigenvalues and, optionally, eigenvectors */
+/* of a real symmetric matrix A. Eigenvalues and eigenvectors can be */
+/* selected by specifying either a range of values or a range of */
+/* indices for the desired eigenvalues. */
+
+/* DSYEVR first reduces the matrix A to tridiagonal form T with a call */
+/* to DSYTRD. Then, whenever possible, DSYEVR calls DSTEMR to compute */
+/* the eigenspectrum using Relatively Robust Representations. DSTEMR */
+/* computes eigenvalues by the dqds algorithm, while orthogonal */
+/* eigenvectors are computed from various "good" L D L^T representations */
+/* (also known as Relatively Robust Representations). Gram-Schmidt */
+/* orthogonalization is avoided as far as possible. More specifically, */
+/* the various steps of the algorithm are as follows. */
+
+/* For each unreduced block (submatrix) of T, */
+/* (a) Compute T - sigma I = L D L^T, so that L and D */
+/* define all the wanted eigenvalues to high relative accuracy. */
+/* This means that small relative changes in the entries of D and L */
+/* cause only small relative changes in the eigenvalues and */
+/* eigenvectors. The standard (unfactored) representation of the */
+/* tridiagonal matrix T does not have this property in general. */
+/* (b) Compute the eigenvalues to suitable accuracy. */
+/* If the eigenvectors are desired, the algorithm attains full */
+/* accuracy of the computed eigenvalues only right before */
+/* the corresponding vectors have to be computed, see steps c) and d). */
+/* (c) For each cluster of close eigenvalues, select a new */
+/* shift close to the cluster, find a new factorization, and refine */
+/* the shifted eigenvalues to suitable accuracy. */
+/* (d) For each eigenvalue with a large enough relative separation compute */
+/* the corresponding eigenvector by forming a rank revealing twisted */
+/* factorization. Go back to (c) for any clusters that remain. */
+
+/* The desired accuracy of the output can be specified by the input */
+/* parameter ABSTOL. */
+
+/* For more details, see DSTEMR's documentation and: */
+/* - Inderjit S. Dhillon and Beresford N. Parlett: "Multiple representations */
+/* to compute orthogonal eigenvectors of symmetric tridiagonal matrices," */
+/* Linear Algebra and its Applications, 387(1), pp. 1-28, August 2004. */
+/* - Inderjit Dhillon and Beresford Parlett: "Orthogonal Eigenvectors and */
+/* Relative Gaps," SIAM Journal on Matrix Analysis and Applications, Vol. 25, */
+/* 2004. Also LAPACK Working Note 154. */
+/* - Inderjit Dhillon: "A new O(n^2) algorithm for the symmetric */
+/* tridiagonal eigenvalue/eigenvector problem", */
+/* Computer Science Division Technical Report No. UCB/CSD-97-971, */
+/* UC Berkeley, May 1997. */
+
+
+/* Note 1 : DSYEVR calls DSTEMR when the full spectrum is requested */
+/* on machines which conform to the ieee-754 floating point standard. */
+/* DSYEVR calls DSTEBZ and SSTEIN on non-ieee machines and */
+/* when partial spectrum requests are made. */
+
+/* Normal execution of DSTEMR may create NaNs and infinities and */
+/* hence may abort due to a floating point exception in environments */
+/* which do not handle NaNs and infinities in the ieee standard default */
+/* manner. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+/* ********* For RANGE = 'V' or 'I' and IU - IL < N - 1, DSTEBZ and */
+/* ********* DSTEIN are called */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA, N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+/* On exit, the lower triangle (if UPLO='L') or the upper */
+/* triangle (if UPLO='U') of A, including the diagonal, is */
+/* destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) DOUBLE PRECISION */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing A to tridiagonal form. */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices */
+/* with Guaranteed High Relative Accuracy," by Demmel and */
+/* Kahan, LAPACK Working Note #3. */
+
+/* If high relative accuracy is important, set ABSTOL to */
+/* DLAMCH( 'Safe minimum' ). Doing so will guarantee that */
+/* eigenvalues are computed to high relative accuracy when */
+/* possible in future releases. The current code does not */
+/* make any guarantees about high relative accuracy, but */
+/* future releases will. See J. Barlow and J. Demmel, */
+/* "Computing Accurate Eigensystems of Scaled Diagonally */
+/* Dominant Matrices", LAPACK Working Note #7, for a discussion */
+/* of which matrices define their eigenvalues to high relative */
+/* accuracy. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* The first M elements contain the selected eigenvalues in */
+/* ascending order. */
+
+/* Z (output) DOUBLE PRECISION array, dimension (LDZ, max(1,M)) */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+/* Supplying N columns is always safe. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* ISUPPZ (output) INTEGER array, dimension ( 2*max(1,M) ) */
+/* The support of the eigenvectors in Z, i.e., the indices */
+/* indicating the nonzero elements in Z. The i-th eigenvector */
+/* is nonzero only in elements ISUPPZ( 2*i-1 ) through */
+/* ISUPPZ( 2*i ). */
+/* ********* Implemented only for RANGE = 'A' or 'I' and IU - IL = N - 1 */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,26*N). */
+/* For optimal efficiency, LWORK >= (NB+6)*N, */
+/* where NB is the max of the blocksize for DSYTRD and DORMTR */
+/* returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. LIWORK >= max(1,10*N). */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal size of the IWORK array, */
+/* returns this value as the first entry of the IWORK array, and */
+/* no error message related to LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: Internal error */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Inderjit Dhillon, IBM Almaden, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Ken Stanley, Computer Science Division, University of */
+/* California at Berkeley, USA */
+/* Jason Riedy, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --isuppz;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ ieeeok = ilaenv_(&c__10, "DSYEVR", "N", &c__1, &c__2, &c__3, &c__4);
+
+ lower = lsame_(uplo, "L");
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+
+ lquery = *lwork == -1 || *liwork == -1;
+
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * 26;
+ lwmin = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * 10;
+ liwmin = max(i__1,i__2);
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -8;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -9;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -10;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -15;
+ } else if (*lwork < lwmin && ! lquery) {
+ *info = -18;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -20;
+ }
+ }
+
+ if (*info == 0) {
+ nb = ilaenv_(&c__1, "DSYTRD", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__1, "DORMTR", uplo, n, &c_n1, &c_n1, &
+ c_n1);
+ nb = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = (nb + 1) * *n;
+ lwkopt = max(i__1,lwmin);
+ work[1] = (doublereal) lwkopt;
+ iwork[1] = liwmin;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYEVR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ if (*n == 1) {
+ work[1] = 7.;
+ if (alleig || indeig) {
+ *m = 1;
+ w[1] = a[a_dim1 + 1];
+ } else {
+ if (*vl < a[a_dim1 + 1] && *vu >= a[a_dim1 + 1]) {
+ *m = 1;
+ w[1] = a[a_dim1 + 1];
+ }
+ }
+ if (wantz) {
+ z__[z_dim1 + 1] = 1.;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+/* Computing MIN */
+ d__1 = sqrt(bignum), d__2 = 1. / sqrt(sqrt(safmin));
+ rmax = min(d__1,d__2);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ iscale = 0;
+ abstll = *abstol;
+ vll = *vl;
+ vuu = *vu;
+ anrm = dlansy_("M", uplo, n, &a[a_offset], lda, &work[1]);
+ if (anrm > 0. && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ if (lower) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j + 1;
+ dscal_(&i__2, &sigma, &a[j + j * a_dim1], &c__1);
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ dscal_(&j, &sigma, &a[j * a_dim1 + 1], &c__1);
+/* L20: */
+ }
+ }
+ if (*abstol > 0.) {
+ abstll = *abstol * sigma;
+ }
+ if (valeig) {
+ vll = *vl * sigma;
+ vuu = *vu * sigma;
+ }
+ }
+/* Initialize indices into workspaces. Note: The IWORK indices are */
+/* used only if DSTERF or DSTEMR fail. */
+/* WORK(INDTAU:INDTAU+N-1) stores the scalar factors of the */
+/* elementary reflectors used in DSYTRD. */
+ indtau = 1;
+/* WORK(INDD:INDD+N-1) stores the tridiagonal's diagonal entries. */
+ indd = indtau + *n;
+/* WORK(INDE:INDE+N-1) stores the off-diagonal entries of the */
+/* tridiagonal matrix from DSYTRD. */
+ inde = indd + *n;
+/* WORK(INDDD:INDDD+N-1) is a copy of the diagonal entries over */
+/* -written by DSTEMR (the DSTERF path copies the diagonal to W). */
+ inddd = inde + *n;
+/* WORK(INDEE:INDEE+N-1) is a copy of the off-diagonal entries over */
+/* -written while computing the eigenvalues in DSTERF and DSTEMR. */
+ indee = inddd + *n;
+/* INDWK is the starting offset of the left-over workspace, and */
+/* LLWORK is the remaining workspace size. */
+ indwk = indee + *n;
+ llwork = *lwork - indwk + 1;
+/* IWORK(INDIBL:INDIBL+M-1) corresponds to IBLOCK in DSTEBZ and */
+/* stores the block indices of each of the M<=N eigenvalues. */
+ indibl = 1;
+/* IWORK(INDISP:INDISP+NSPLIT-1) corresponds to ISPLIT in DSTEBZ and */
+/* stores the starting and finishing indices of each block. */
+ indisp = indibl + *n;
+/* IWORK(INDIFL:INDIFL+N-1) stores the indices of eigenvectors */
+/* that corresponding to eigenvectors that fail to converge in */
+/* DSTEIN. This information is discarded; if any fail, the driver */
+/* returns INFO > 0. */
+ indifl = indisp + *n;
+/* INDIWO is the offset of the remaining integer workspace. */
+ indiwo = indisp + *n;
+
+/* Call DSYTRD to reduce symmetric matrix to tridiagonal form. */
+
+ dsytrd_(uplo, n, &a[a_offset], lda, &work[indd], &work[inde], &work[
+ indtau], &work[indwk], &llwork, &iinfo);
+
+/* If all eigenvalues are desired */
+/* then call DSTERF or DSTEMR and DORMTR. */
+
+ if ((alleig || indeig && *il == 1 && *iu == *n) && ieeeok == 1) {
+ if (! wantz) {
+ dcopy_(n, &work[indd], &c__1, &w[1], &c__1);
+ i__1 = *n - 1;
+ dcopy_(&i__1, &work[inde], &c__1, &work[indee], &c__1);
+ dsterf_(n, &w[1], &work[indee], info);
+ } else {
+ i__1 = *n - 1;
+ dcopy_(&i__1, &work[inde], &c__1, &work[indee], &c__1);
+ dcopy_(n, &work[indd], &c__1, &work[inddd], &c__1);
+
+ if (*abstol <= *n * 2. * eps) {
+ tryrac = TRUE_;
+ } else {
+ tryrac = FALSE_;
+ }
+ dstemr_(jobz, "A", n, &work[inddd], &work[indee], vl, vu, il, iu,
+ m, &w[1], &z__[z_offset], ldz, n, &isuppz[1], &tryrac, &
+ work[indwk], lwork, &iwork[1], liwork, info);
+
+
+
+/* Apply orthogonal matrix used in reduction to tridiagonal */
+/* form to eigenvectors returned by DSTEIN. */
+
+ if (wantz && *info == 0) {
+ indwkn = inde;
+ llwrkn = *lwork - indwkn + 1;
+ dormtr_("L", uplo, "N", n, m, &a[a_offset], lda, &work[indtau]
+, &z__[z_offset], ldz, &work[indwkn], &llwrkn, &iinfo);
+ }
+ }
+
+
+ if (*info == 0) {
+/* Everything worked. Skip DSTEBZ/DSTEIN. IWORK(:) are */
+/* undefined. */
+ *m = *n;
+ goto L30;
+ }
+ *info = 0;
+ }
+
+/* Otherwise, call DSTEBZ and, if eigenvectors are desired, DSTEIN. */
+/* Also call DSTEBZ and DSTEIN if DSTEMR fails. */
+
+ if (wantz) {
+ *(unsigned char *)order = 'B';
+ } else {
+ *(unsigned char *)order = 'E';
+ }
+ dstebz_(range, order, n, &vll, &vuu, il, iu, &abstll, &work[indd], &work[
+ inde], m, &nsplit, &w[1], &iwork[indibl], &iwork[indisp], &work[
+ indwk], &iwork[indiwo], info);
+
+ if (wantz) {
+ dstein_(n, &work[indd], &work[inde], m, &w[1], &iwork[indibl], &iwork[
+ indisp], &z__[z_offset], ldz, &work[indwk], &iwork[indiwo], &
+ iwork[indifl], info);
+
+/* Apply orthogonal matrix used in reduction to tridiagonal */
+/* form to eigenvectors returned by DSTEIN. */
+
+ indwkn = inde;
+ llwrkn = *lwork - indwkn + 1;
+ dormtr_("L", uplo, "N", n, m, &a[a_offset], lda, &work[indtau], &z__[
+ z_offset], ldz, &work[indwkn], &llwrkn, &iinfo);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+/* Jump here if DSTEMR/DSTEIN succeeded. */
+L30:
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *m;
+ } else {
+ imax = *info - 1;
+ }
+ d__1 = 1. / sigma;
+ dscal_(&imax, &d__1, &w[1], &c__1);
+ }
+
+/* If eigenvalues are not in order, then sort them, along with */
+/* eigenvectors. Note: We do not sort the IFAIL portion of IWORK. */
+/* It may not be initialized (if DSTEMR/DSTEIN succeeded), and we do */
+/* not return this detailed information to the user. */
+
+ if (wantz) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp1 = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp1) {
+ i__ = jj;
+ tmp1 = w[jj];
+ }
+/* L40: */
+ }
+
+ if (i__ != 0) {
+ w[i__] = w[j];
+ w[j] = tmp1;
+ dswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1],
+ &c__1);
+ }
+/* L50: */
+ }
+ }
+
+/* Set WORK(1) to optimal workspace size. */
+
+ work[1] = (doublereal) lwkopt;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of DSYEVR */
+
+} /* dsyevr_ */
diff --git a/contrib/libs/clapack/dsyevx.c b/contrib/libs/clapack/dsyevx.c
new file mode 100644
index 0000000000..1b6a3c2b86
--- /dev/null
+++ b/contrib/libs/clapack/dsyevx.c
@@ -0,0 +1,536 @@
+/* dsyevx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int dsyevx_(char *jobz, char *range, char *uplo, integer *n,
+ doublereal *a, integer *lda, doublereal *vl, doublereal *vu, integer *
+ il, integer *iu, doublereal *abstol, integer *m, doublereal *w,
+ doublereal *z__, integer *ldz, doublereal *work, integer *lwork,
+ integer *iwork, integer *ifail, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, z_dim1, z_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, nb, jj;
+ doublereal eps, vll, vuu, tmp1;
+ integer indd, inde;
+ doublereal anrm;
+ integer imax;
+ doublereal rmin, rmax;
+ logical test;
+ integer itmp1, indee;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ char order[1];
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dswap_(integer *, doublereal *, integer
+ *, doublereal *, integer *);
+ logical lower, wantz;
+ extern doublereal dlamch_(char *);
+ logical alleig, indeig;
+ integer iscale, indibl;
+ logical valeig;
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *);
+ doublereal safmin;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal abstll, bignum;
+ integer indtau, indisp;
+ extern /* Subroutine */ int dstein_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *, doublereal *,
+ integer *, doublereal *, integer *, integer *, integer *),
+ dsterf_(integer *, doublereal *, doublereal *, integer *);
+ integer indiwo, indwkn;
+ extern doublereal dlansy_(char *, char *, integer *, doublereal *,
+ integer *, doublereal *);
+ extern /* Subroutine */ int dstebz_(char *, char *, integer *, doublereal
+ *, doublereal *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *, doublereal *, integer *, integer *);
+ integer indwrk, lwkmin;
+ extern /* Subroutine */ int dorgtr_(char *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *), dsteqr_(char *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *),
+ dormtr_(char *, char *, char *, integer *, integer *, doublereal *
+, integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, integer *);
+ integer llwrkn, llwork, nsplit;
+ doublereal smlnum;
+ extern /* Subroutine */ int dsytrd_(char *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYEVX computes selected eigenvalues and, optionally, eigenvectors */
+/* of a real symmetric matrix A. Eigenvalues and eigenvectors can be */
+/* selected by specifying either a range of values or a range of indices */
+/* for the desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA, N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+/* On exit, the lower triangle (if UPLO='L') or the upper */
+/* triangle (if UPLO='U') of A, including the diagonal, is */
+/* destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) DOUBLE PRECISION */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing A to tridiagonal form. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*DLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*DLAMCH('S'). */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices */
+/* with Guaranteed High Relative Accuracy," by Demmel and */
+/* Kahan, LAPACK Working Note #3. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* On normal exit, the first M elements contain the selected */
+/* eigenvalues in ascending order. */
+
+/* Z (output) DOUBLE PRECISION array, dimension (LDZ, max(1,M)) */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If an eigenvector fails to converge, then that column of Z */
+/* contains the latest approximation to the eigenvector, and the */
+/* index of the eigenvector is returned in IFAIL. */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= 1, when N <= 1; */
+/* otherwise 8*N. */
+/* For optimal efficiency, LWORK >= (NB+3)*N, */
+/* where NB is the max of the blocksize for DSYTRD and DORMTR */
+/* returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+
+/* IFAIL (output) INTEGER array, dimension (N) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvectors that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, then i eigenvectors failed to converge. */
+/* Their indices are stored in array IFAIL. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ lower = lsame_(uplo, "L");
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+ lquery = *lwork == -1;
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -8;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -9;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -10;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -15;
+ }
+ }
+
+ if (*info == 0) {
+ if (*n <= 1) {
+ lwkmin = 1;
+ work[1] = (doublereal) lwkmin;
+ } else {
+ lwkmin = *n << 3;
+ nb = ilaenv_(&c__1, "DSYTRD", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__1, "DORMTR", uplo, n, &c_n1, &c_n1,
+ &c_n1);
+ nb = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = lwkmin, i__2 = (nb + 3) * *n;
+ lwkopt = max(i__1,i__2);
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*lwork < lwkmin && ! lquery) {
+ *info = -17;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYEVX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (alleig || indeig) {
+ *m = 1;
+ w[1] = a[a_dim1 + 1];
+ } else {
+ if (*vl < a[a_dim1 + 1] && *vu >= a[a_dim1 + 1]) {
+ *m = 1;
+ w[1] = a[a_dim1 + 1];
+ }
+ }
+ if (wantz) {
+ z__[z_dim1 + 1] = 1.;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+/* Computing MIN */
+ d__1 = sqrt(bignum), d__2 = 1. / sqrt(sqrt(safmin));
+ rmax = min(d__1,d__2);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ iscale = 0;
+ abstll = *abstol;
+ if (valeig) {
+ vll = *vl;
+ vuu = *vu;
+ }
+ anrm = dlansy_("M", uplo, n, &a[a_offset], lda, &work[1]);
+ if (anrm > 0. && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ if (lower) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j + 1;
+ dscal_(&i__2, &sigma, &a[j + j * a_dim1], &c__1);
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ dscal_(&j, &sigma, &a[j * a_dim1 + 1], &c__1);
+/* L20: */
+ }
+ }
+ if (*abstol > 0.) {
+ abstll = *abstol * sigma;
+ }
+ if (valeig) {
+ vll = *vl * sigma;
+ vuu = *vu * sigma;
+ }
+ }
+
+/* Call DSYTRD to reduce symmetric matrix to tridiagonal form. */
+
+ indtau = 1;
+ inde = indtau + *n;
+ indd = inde + *n;
+ indwrk = indd + *n;
+ llwork = *lwork - indwrk + 1;
+ dsytrd_(uplo, n, &a[a_offset], lda, &work[indd], &work[inde], &work[
+ indtau], &work[indwrk], &llwork, &iinfo);
+
+/* If all eigenvalues are desired and ABSTOL is less than or equal to */
+/* zero, then call DSTERF or DORGTR and SSTEQR. If this fails for */
+/* some eigenvalue, then try DSTEBZ. */
+
+ test = FALSE_;
+ if (indeig) {
+ if (*il == 1 && *iu == *n) {
+ test = TRUE_;
+ }
+ }
+ if ((alleig || test) && *abstol <= 0.) {
+ dcopy_(n, &work[indd], &c__1, &w[1], &c__1);
+ indee = indwrk + (*n << 1);
+ if (! wantz) {
+ i__1 = *n - 1;
+ dcopy_(&i__1, &work[inde], &c__1, &work[indee], &c__1);
+ dsterf_(n, &w[1], &work[indee], info);
+ } else {
+ dlacpy_("A", n, n, &a[a_offset], lda, &z__[z_offset], ldz);
+ dorgtr_(uplo, n, &z__[z_offset], ldz, &work[indtau], &work[indwrk]
+, &llwork, &iinfo);
+ i__1 = *n - 1;
+ dcopy_(&i__1, &work[inde], &c__1, &work[indee], &c__1);
+ dsteqr_(jobz, n, &w[1], &work[indee], &z__[z_offset], ldz, &work[
+ indwrk], info);
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ifail[i__] = 0;
+/* L30: */
+ }
+ }
+ }
+ if (*info == 0) {
+ *m = *n;
+ goto L40;
+ }
+ *info = 0;
+ }
+
+/* Otherwise, call DSTEBZ and, if eigenvectors are desired, SSTEIN. */
+
+ if (wantz) {
+ *(unsigned char *)order = 'B';
+ } else {
+ *(unsigned char *)order = 'E';
+ }
+ indibl = 1;
+ indisp = indibl + *n;
+ indiwo = indisp + *n;
+ dstebz_(range, order, n, &vll, &vuu, il, iu, &abstll, &work[indd], &work[
+ inde], m, &nsplit, &w[1], &iwork[indibl], &iwork[indisp], &work[
+ indwrk], &iwork[indiwo], info);
+
+ if (wantz) {
+ dstein_(n, &work[indd], &work[inde], m, &w[1], &iwork[indibl], &iwork[
+ indisp], &z__[z_offset], ldz, &work[indwrk], &iwork[indiwo], &
+ ifail[1], info);
+
+/* Apply orthogonal matrix used in reduction to tridiagonal */
+/* form to eigenvectors returned by DSTEIN. */
+
+ indwkn = inde;
+ llwrkn = *lwork - indwkn + 1;
+ dormtr_("L", uplo, "N", n, m, &a[a_offset], lda, &work[indtau], &z__[
+ z_offset], ldz, &work[indwkn], &llwrkn, &iinfo);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+L40:
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *m;
+ } else {
+ imax = *info - 1;
+ }
+ d__1 = 1. / sigma;
+ dscal_(&imax, &d__1, &w[1], &c__1);
+ }
+
+/* If eigenvalues are not in order, then sort them, along with */
+/* eigenvectors. */
+
+ if (wantz) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp1 = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp1) {
+ i__ = jj;
+ tmp1 = w[jj];
+ }
+/* L50: */
+ }
+
+ if (i__ != 0) {
+ itmp1 = iwork[indibl + i__ - 1];
+ w[i__] = w[j];
+ iwork[indibl + i__ - 1] = iwork[indibl + j - 1];
+ w[j] = tmp1;
+ iwork[indibl + j - 1] = itmp1;
+ dswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1],
+ &c__1);
+ if (*info != 0) {
+ itmp1 = ifail[i__];
+ ifail[i__] = ifail[j];
+ ifail[j] = itmp1;
+ }
+ }
+/* L60: */
+ }
+ }
+
+/* Set WORK(1) to optimal workspace size. */
+
+ work[1] = (doublereal) lwkopt;
+
+ return 0;
+
+/* End of DSYEVX */
+
+} /* dsyevx_ */
diff --git a/contrib/libs/clapack/dsygs2.c b/contrib/libs/clapack/dsygs2.c
new file mode 100644
index 0000000000..9a12cefb7f
--- /dev/null
+++ b/contrib/libs/clapack/dsygs2.c
@@ -0,0 +1,299 @@
+/* dsygs2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b6 = -1.;
+static integer c__1 = 1;
+static doublereal c_b27 = 1.;
+
+/* Subroutine */ int dsygs2_(integer *itype, char *uplo, integer *n,
+ doublereal *a, integer *lda, doublereal *b, integer *ldb, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Local variables */
+ integer k;
+ doublereal ct, akk, bkk;
+ extern /* Subroutine */ int dsyr2_(char *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *), dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *);
+ logical upper;
+ extern /* Subroutine */ int dtrmv_(char *, char *, char *, integer *,
+ doublereal *, integer *, doublereal *, integer *), dtrsv_(char *, char *, char *, integer *, doublereal *,
+ integer *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYGS2 reduces a real symmetric-definite generalized eigenproblem */
+/* to standard form. */
+
+/* If ITYPE = 1, the problem is A*x = lambda*B*x, */
+/* and A is overwritten by inv(U')*A*inv(U) or inv(L)*A*inv(L') */
+
+/* If ITYPE = 2 or 3, the problem is A*B*x = lambda*x or */
+/* B*A*x = lambda*x, and A is overwritten by U*A*U` or L'*A*L. */
+
+/* B must have been previously factorized as U'*U or L*L' by DPOTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* = 1: compute inv(U')*A*inv(U) or inv(L)*A*inv(L'); */
+/* = 2 or 3: compute U*A*U' or L'*A*L. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored, and how B has been factorized. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n by n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the transformed matrix, stored in the */
+/* same format as A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB,N) */
+/* The triangular factor from the Cholesky factorization of B, */
+/* as returned by DPOTRF. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYGS2", &i__1);
+ return 0;
+ }
+
+ if (*itype == 1) {
+ if (upper) {
+
+/* Compute inv(U')*A*inv(U) */
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+
+/* Update the upper triangle of A(k:n,k:n) */
+
+ akk = a[k + k * a_dim1];
+ bkk = b[k + k * b_dim1];
+/* Computing 2nd power */
+ d__1 = bkk;
+ akk /= d__1 * d__1;
+ a[k + k * a_dim1] = akk;
+ if (k < *n) {
+ i__2 = *n - k;
+ d__1 = 1. / bkk;
+ dscal_(&i__2, &d__1, &a[k + (k + 1) * a_dim1], lda);
+ ct = akk * -.5;
+ i__2 = *n - k;
+ daxpy_(&i__2, &ct, &b[k + (k + 1) * b_dim1], ldb, &a[k + (
+ k + 1) * a_dim1], lda);
+ i__2 = *n - k;
+ dsyr2_(uplo, &i__2, &c_b6, &a[k + (k + 1) * a_dim1], lda,
+ &b[k + (k + 1) * b_dim1], ldb, &a[k + 1 + (k + 1)
+ * a_dim1], lda);
+ i__2 = *n - k;
+ daxpy_(&i__2, &ct, &b[k + (k + 1) * b_dim1], ldb, &a[k + (
+ k + 1) * a_dim1], lda);
+ i__2 = *n - k;
+ dtrsv_(uplo, "Transpose", "Non-unit", &i__2, &b[k + 1 + (
+ k + 1) * b_dim1], ldb, &a[k + (k + 1) * a_dim1],
+ lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute inv(L)*A*inv(L') */
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+
+/* Update the lower triangle of A(k:n,k:n) */
+
+ akk = a[k + k * a_dim1];
+ bkk = b[k + k * b_dim1];
+/* Computing 2nd power */
+ d__1 = bkk;
+ akk /= d__1 * d__1;
+ a[k + k * a_dim1] = akk;
+ if (k < *n) {
+ i__2 = *n - k;
+ d__1 = 1. / bkk;
+ dscal_(&i__2, &d__1, &a[k + 1 + k * a_dim1], &c__1);
+ ct = akk * -.5;
+ i__2 = *n - k;
+ daxpy_(&i__2, &ct, &b[k + 1 + k * b_dim1], &c__1, &a[k +
+ 1 + k * a_dim1], &c__1);
+ i__2 = *n - k;
+ dsyr2_(uplo, &i__2, &c_b6, &a[k + 1 + k * a_dim1], &c__1,
+ &b[k + 1 + k * b_dim1], &c__1, &a[k + 1 + (k + 1)
+ * a_dim1], lda);
+ i__2 = *n - k;
+ daxpy_(&i__2, &ct, &b[k + 1 + k * b_dim1], &c__1, &a[k +
+ 1 + k * a_dim1], &c__1);
+ i__2 = *n - k;
+ dtrsv_(uplo, "No transpose", "Non-unit", &i__2, &b[k + 1
+ + (k + 1) * b_dim1], ldb, &a[k + 1 + k * a_dim1],
+ &c__1);
+ }
+/* L20: */
+ }
+ }
+ } else {
+ if (upper) {
+
+/* Compute U*A*U' */
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+
+/* Update the upper triangle of A(1:k,1:k) */
+
+ akk = a[k + k * a_dim1];
+ bkk = b[k + k * b_dim1];
+ i__2 = k - 1;
+ dtrmv_(uplo, "No transpose", "Non-unit", &i__2, &b[b_offset],
+ ldb, &a[k * a_dim1 + 1], &c__1);
+ ct = akk * .5;
+ i__2 = k - 1;
+ daxpy_(&i__2, &ct, &b[k * b_dim1 + 1], &c__1, &a[k * a_dim1 +
+ 1], &c__1);
+ i__2 = k - 1;
+ dsyr2_(uplo, &i__2, &c_b27, &a[k * a_dim1 + 1], &c__1, &b[k *
+ b_dim1 + 1], &c__1, &a[a_offset], lda);
+ i__2 = k - 1;
+ daxpy_(&i__2, &ct, &b[k * b_dim1 + 1], &c__1, &a[k * a_dim1 +
+ 1], &c__1);
+ i__2 = k - 1;
+ dscal_(&i__2, &bkk, &a[k * a_dim1 + 1], &c__1);
+/* Computing 2nd power */
+ d__1 = bkk;
+ a[k + k * a_dim1] = akk * (d__1 * d__1);
+/* L30: */
+ }
+ } else {
+
+/* Compute L'*A*L */
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+
+/* Update the lower triangle of A(1:k,1:k) */
+
+ akk = a[k + k * a_dim1];
+ bkk = b[k + k * b_dim1];
+ i__2 = k - 1;
+ dtrmv_(uplo, "Transpose", "Non-unit", &i__2, &b[b_offset],
+ ldb, &a[k + a_dim1], lda);
+ ct = akk * .5;
+ i__2 = k - 1;
+ daxpy_(&i__2, &ct, &b[k + b_dim1], ldb, &a[k + a_dim1], lda);
+ i__2 = k - 1;
+ dsyr2_(uplo, &i__2, &c_b27, &a[k + a_dim1], lda, &b[k +
+ b_dim1], ldb, &a[a_offset], lda);
+ i__2 = k - 1;
+ daxpy_(&i__2, &ct, &b[k + b_dim1], ldb, &a[k + a_dim1], lda);
+ i__2 = k - 1;
+ dscal_(&i__2, &bkk, &a[k + a_dim1], lda);
+/* Computing 2nd power */
+ d__1 = bkk;
+ a[k + k * a_dim1] = akk * (d__1 * d__1);
+/* L40: */
+ }
+ }
+ }
+ return 0;
+
+/* End of DSYGS2 */
+
+} /* dsygs2_ */
diff --git a/contrib/libs/clapack/dsygst.c b/contrib/libs/clapack/dsygst.c
new file mode 100644
index 0000000000..55635fe3c0
--- /dev/null
+++ b/contrib/libs/clapack/dsygst.c
@@ -0,0 +1,347 @@
+/* dsygst.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static doublereal c_b14 = 1.;
+static doublereal c_b16 = -.5;
+static doublereal c_b19 = -1.;
+static doublereal c_b52 = .5;
+
+/* Subroutine */ int dsygst_(integer *itype, char *uplo, integer *n,
+ doublereal *a, integer *lda, doublereal *b, integer *ldb, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer k, kb, nb;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dtrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), dsymm_(
+ char *, char *, integer *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *);
+ logical upper;
+ extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), dsygs2_(
+ integer *, char *, integer *, doublereal *, integer *, doublereal
+ *, integer *, integer *), dsyr2k_(char *, char *, integer
+ *, integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *)
+ , xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYGST reduces a real symmetric-definite generalized eigenproblem */
+/* to standard form. */
+
+/* If ITYPE = 1, the problem is A*x = lambda*B*x, */
+/* and A is overwritten by inv(U**T)*A*inv(U) or inv(L)*A*inv(L**T) */
+
+/* If ITYPE = 2 or 3, the problem is A*B*x = lambda*x or */
+/* B*A*x = lambda*x, and A is overwritten by U*A*U**T or L**T*A*L. */
+
+/* B must have been previously factorized as U**T*U or L*L**T by DPOTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* = 1: compute inv(U**T)*A*inv(U) or inv(L)*A*inv(L**T); */
+/* = 2 or 3: compute U*A*U**T or L**T*A*L. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored and B is factored as */
+/* U**T*U; */
+/* = 'L': Lower triangle of A is stored and B is factored as */
+/* L*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the transformed matrix, stored in the */
+/* same format as A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB,N) */
+/* The triangular factor from the Cholesky factorization of B, */
+/* as returned by DPOTRF. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYGST", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "DSYGST", uplo, n, &c_n1, &c_n1, &c_n1);
+
+ if (nb <= 1 || nb >= *n) {
+
+/* Use unblocked code */
+
+ dsygs2_(itype, uplo, n, &a[a_offset], lda, &b[b_offset], ldb, info);
+ } else {
+
+/* Use blocked code */
+
+ if (*itype == 1) {
+ if (upper) {
+
+/* Compute inv(U')*A*inv(U) */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (k = 1; i__2 < 0 ? k >= i__1 : k <= i__1; k += i__2) {
+/* Computing MIN */
+ i__3 = *n - k + 1;
+ kb = min(i__3,nb);
+
+/* Update the upper triangle of A(k:n,k:n) */
+
+ dsygs2_(itype, uplo, &kb, &a[k + k * a_dim1], lda, &b[k +
+ k * b_dim1], ldb, info);
+ if (k + kb <= *n) {
+ i__3 = *n - k - kb + 1;
+ dtrsm_("Left", uplo, "Transpose", "Non-unit", &kb, &
+ i__3, &c_b14, &b[k + k * b_dim1], ldb, &a[k +
+ (k + kb) * a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ dsymm_("Left", uplo, &kb, &i__3, &c_b16, &a[k + k *
+ a_dim1], lda, &b[k + (k + kb) * b_dim1], ldb,
+ &c_b14, &a[k + (k + kb) * a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ dsyr2k_(uplo, "Transpose", &i__3, &kb, &c_b19, &a[k +
+ (k + kb) * a_dim1], lda, &b[k + (k + kb) *
+ b_dim1], ldb, &c_b14, &a[k + kb + (k + kb) *
+ a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ dsymm_("Left", uplo, &kb, &i__3, &c_b16, &a[k + k *
+ a_dim1], lda, &b[k + (k + kb) * b_dim1], ldb,
+ &c_b14, &a[k + (k + kb) * a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ dtrsm_("Right", uplo, "No transpose", "Non-unit", &kb,
+ &i__3, &c_b14, &b[k + kb + (k + kb) * b_dim1]
+, ldb, &a[k + (k + kb) * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute inv(L)*A*inv(L') */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (k = 1; i__1 < 0 ? k >= i__2 : k <= i__2; k += i__1) {
+/* Computing MIN */
+ i__3 = *n - k + 1;
+ kb = min(i__3,nb);
+
+/* Update the lower triangle of A(k:n,k:n) */
+
+ dsygs2_(itype, uplo, &kb, &a[k + k * a_dim1], lda, &b[k +
+ k * b_dim1], ldb, info);
+ if (k + kb <= *n) {
+ i__3 = *n - k - kb + 1;
+ dtrsm_("Right", uplo, "Transpose", "Non-unit", &i__3,
+ &kb, &c_b14, &b[k + k * b_dim1], ldb, &a[k +
+ kb + k * a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ dsymm_("Right", uplo, &i__3, &kb, &c_b16, &a[k + k *
+ a_dim1], lda, &b[k + kb + k * b_dim1], ldb, &
+ c_b14, &a[k + kb + k * a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ dsyr2k_(uplo, "No transpose", &i__3, &kb, &c_b19, &a[
+ k + kb + k * a_dim1], lda, &b[k + kb + k *
+ b_dim1], ldb, &c_b14, &a[k + kb + (k + kb) *
+ a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ dsymm_("Right", uplo, &i__3, &kb, &c_b16, &a[k + k *
+ a_dim1], lda, &b[k + kb + k * b_dim1], ldb, &
+ c_b14, &a[k + kb + k * a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ dtrsm_("Left", uplo, "No transpose", "Non-unit", &
+ i__3, &kb, &c_b14, &b[k + kb + (k + kb) *
+ b_dim1], ldb, &a[k + kb + k * a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+ } else {
+ if (upper) {
+
+/* Compute U*A*U' */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (k = 1; i__2 < 0 ? k >= i__1 : k <= i__1; k += i__2) {
+/* Computing MIN */
+ i__3 = *n - k + 1;
+ kb = min(i__3,nb);
+
+/* Update the upper triangle of A(1:k+kb-1,1:k+kb-1) */
+
+ i__3 = k - 1;
+ dtrmm_("Left", uplo, "No transpose", "Non-unit", &i__3, &
+ kb, &c_b14, &b[b_offset], ldb, &a[k * a_dim1 + 1],
+ lda)
+ ;
+ i__3 = k - 1;
+ dsymm_("Right", uplo, &i__3, &kb, &c_b52, &a[k + k *
+ a_dim1], lda, &b[k * b_dim1 + 1], ldb, &c_b14, &a[
+ k * a_dim1 + 1], lda);
+ i__3 = k - 1;
+ dsyr2k_(uplo, "No transpose", &i__3, &kb, &c_b14, &a[k *
+ a_dim1 + 1], lda, &b[k * b_dim1 + 1], ldb, &c_b14,
+ &a[a_offset], lda);
+ i__3 = k - 1;
+ dsymm_("Right", uplo, &i__3, &kb, &c_b52, &a[k + k *
+ a_dim1], lda, &b[k * b_dim1 + 1], ldb, &c_b14, &a[
+ k * a_dim1 + 1], lda);
+ i__3 = k - 1;
+ dtrmm_("Right", uplo, "Transpose", "Non-unit", &i__3, &kb,
+ &c_b14, &b[k + k * b_dim1], ldb, &a[k * a_dim1 +
+ 1], lda);
+ dsygs2_(itype, uplo, &kb, &a[k + k * a_dim1], lda, &b[k +
+ k * b_dim1], ldb, info);
+/* L30: */
+ }
+ } else {
+
+/* Compute L'*A*L */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (k = 1; i__1 < 0 ? k >= i__2 : k <= i__2; k += i__1) {
+/* Computing MIN */
+ i__3 = *n - k + 1;
+ kb = min(i__3,nb);
+
+/* Update the lower triangle of A(1:k+kb-1,1:k+kb-1) */
+
+ i__3 = k - 1;
+ dtrmm_("Right", uplo, "No transpose", "Non-unit", &kb, &
+ i__3, &c_b14, &b[b_offset], ldb, &a[k + a_dim1],
+ lda);
+ i__3 = k - 1;
+ dsymm_("Left", uplo, &kb, &i__3, &c_b52, &a[k + k *
+ a_dim1], lda, &b[k + b_dim1], ldb, &c_b14, &a[k +
+ a_dim1], lda);
+ i__3 = k - 1;
+ dsyr2k_(uplo, "Transpose", &i__3, &kb, &c_b14, &a[k +
+ a_dim1], lda, &b[k + b_dim1], ldb, &c_b14, &a[
+ a_offset], lda);
+ i__3 = k - 1;
+ dsymm_("Left", uplo, &kb, &i__3, &c_b52, &a[k + k *
+ a_dim1], lda, &b[k + b_dim1], ldb, &c_b14, &a[k +
+ a_dim1], lda);
+ i__3 = k - 1;
+ dtrmm_("Left", uplo, "Transpose", "Non-unit", &kb, &i__3,
+ &c_b14, &b[k + k * b_dim1], ldb, &a[k + a_dim1],
+ lda);
+ dsygs2_(itype, uplo, &kb, &a[k + k * a_dim1], lda, &b[k +
+ k * b_dim1], ldb, info);
+/* L40: */
+ }
+ }
+ }
+ }
+ return 0;
+
+/* End of DSYGST */
+
+} /* dsygst_ */
diff --git a/contrib/libs/clapack/dsygv.c b/contrib/libs/clapack/dsygv.c
new file mode 100644
index 0000000000..47a80c695d
--- /dev/null
+++ b/contrib/libs/clapack/dsygv.c
@@ -0,0 +1,285 @@
+/* dsygv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static doublereal c_b16 = 1.;
+
+/* Subroutine */ int dsygv_(integer *itype, char *jobz, char *uplo, integer *
+ n, doublereal *a, integer *lda, doublereal *b, integer *ldb,
+ doublereal *w, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+
+ /* Local variables */
+ integer nb, neig;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dtrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *);
+ char trans[1];
+ extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical upper;
+ extern /* Subroutine */ int dsyev_(char *, char *, integer *, doublereal *
+, integer *, doublereal *, doublereal *, integer *, integer *);
+ logical wantz;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int dpotrf_(char *, integer *, doublereal *,
+ integer *, integer *);
+ integer lwkmin;
+ extern /* Subroutine */ int dsygst_(integer *, char *, integer *,
+ doublereal *, integer *, doublereal *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYGV computes all the eigenvalues, and optionally, the eigenvectors */
+/* of a real generalized symmetric-definite eigenproblem, of the form */
+/* A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x. */
+/* Here A and B are assumed to be symmetric and B is also */
+/* positive definite. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* Specifies the problem type to be solved: */
+/* = 1: A*x = (lambda)*B*x */
+/* = 2: A*B*x = (lambda)*x */
+/* = 3: B*A*x = (lambda)*x */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA, N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+
+/* On exit, if JOBZ = 'V', then if INFO = 0, A contains the */
+/* matrix Z of eigenvectors. The eigenvectors are normalized */
+/* as follows: */
+/* if ITYPE = 1 or 2, Z**T*B*Z = I; */
+/* if ITYPE = 3, Z**T*inv(B)*Z = I. */
+/* If JOBZ = 'N', then on exit the upper triangle (if UPLO='U') */
+/* or the lower triangle (if UPLO='L') of A, including the */
+/* diagonal, is destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB, N) */
+/* On entry, the symmetric positive definite matrix B. */
+/* If UPLO = 'U', the leading N-by-N upper triangular part of B */
+/* contains the upper triangular part of the matrix B. */
+/* If UPLO = 'L', the leading N-by-N lower triangular part of B */
+/* contains the lower triangular part of the matrix B. */
+
+/* On exit, if INFO <= N, the part of B containing the matrix is */
+/* overwritten by the triangular factor U or L from the Cholesky */
+/* factorization B = U**T*U or B = L*L**T. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= max(1,3*N-1). */
+/* For optimal efficiency, LWORK >= (NB+2)*N, */
+/* where NB is the blocksize for DSYTRD returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: DPOTRF or DSYEV returned an error code: */
+/* <= N: if INFO = i, DSYEV failed to converge; */
+/* i off-diagonal elements of an intermediate */
+/* tridiagonal form did not converge to zero; */
+/* > N: if INFO = N + i, for 1 <= i <= N, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --w;
+ --work;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+
+ *info = 0;
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -2;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+
+ if (*info == 0) {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * 3 - 1;
+ lwkmin = max(i__1,i__2);
+ nb = ilaenv_(&c__1, "DSYTRD", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = lwkmin, i__2 = (nb + 2) * *n;
+ lwkopt = max(i__1,i__2);
+ work[1] = (doublereal) lwkopt;
+
+ if (*lwork < lwkmin && ! lquery) {
+ *info = -11;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYGV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a Cholesky factorization of B. */
+
+ dpotrf_(uplo, n, &b[b_offset], ldb, info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem and solve. */
+
+ dsygst_(itype, uplo, n, &a[a_offset], lda, &b[b_offset], ldb, info);
+ dsyev_(jobz, uplo, n, &a[a_offset], lda, &w[1], &work[1], lwork, info);
+
+ if (wantz) {
+
+/* Backtransform eigenvectors to the original problem. */
+
+ neig = *n;
+ if (*info > 0) {
+ neig = *info - 1;
+ }
+ if (*itype == 1 || *itype == 2) {
+
+/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x; */
+/* backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'N';
+ } else {
+ *(unsigned char *)trans = 'T';
+ }
+
+ dtrsm_("Left", uplo, trans, "Non-unit", n, &neig, &c_b16, &b[
+ b_offset], ldb, &a[a_offset], lda);
+
+ } else if (*itype == 3) {
+
+/* For B*A*x=(lambda)*x; */
+/* backtransform eigenvectors: x = L*y or U'*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'T';
+ } else {
+ *(unsigned char *)trans = 'N';
+ }
+
+ dtrmm_("Left", uplo, trans, "Non-unit", n, &neig, &c_b16, &b[
+ b_offset], ldb, &a[a_offset], lda);
+ }
+ }
+
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DSYGV */
+
+} /* dsygv_ */
diff --git a/contrib/libs/clapack/dsygvd.c b/contrib/libs/clapack/dsygvd.c
new file mode 100644
index 0000000000..c7469cb708
--- /dev/null
+++ b/contrib/libs/clapack/dsygvd.c
@@ -0,0 +1,338 @@
+/* dsygvd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b11 = 1.;
+
+/* Subroutine */ int dsygvd_(integer *itype, char *jobz, char *uplo, integer *
+ n, doublereal *a, integer *lda, doublereal *b, integer *ldb,
+ doublereal *w, doublereal *work, integer *lwork, integer *iwork,
+ integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ integer lopt;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dtrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *);
+ integer lwmin;
+ char trans[1];
+ integer liopt;
+ extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical upper, wantz;
+ extern /* Subroutine */ int xerbla_(char *, integer *), dpotrf_(
+ char *, integer *, doublereal *, integer *, integer *);
+ integer liwmin;
+ extern /* Subroutine */ int dsyevd_(char *, char *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, integer *, integer *,
+ integer *, integer *), dsygst_(integer *, char *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ integer *);
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYGVD computes all the eigenvalues, and optionally, the eigenvectors */
+/* of a real generalized symmetric-definite eigenproblem, of the form */
+/* A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x. Here A and */
+/* B are assumed to be symmetric and B is also positive definite. */
+/* If eigenvectors are desired, it uses a divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* Specifies the problem type to be solved: */
+/* = 1: A*x = (lambda)*B*x */
+/* = 2: A*B*x = (lambda)*x */
+/* = 3: B*A*x = (lambda)*x */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA, N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+
+/* On exit, if JOBZ = 'V', then if INFO = 0, A contains the */
+/* matrix Z of eigenvectors. The eigenvectors are normalized */
+/* as follows: */
+/* if ITYPE = 1 or 2, Z**T*B*Z = I; */
+/* if ITYPE = 3, Z**T*inv(B)*Z = I. */
+/* If JOBZ = 'N', then on exit the upper triangle (if UPLO='U') */
+/* or the lower triangle (if UPLO='L') of A, including the */
+/* diagonal, is destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB, N) */
+/* On entry, the symmetric matrix B. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of B contains the */
+/* upper triangular part of the matrix B. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of B contains */
+/* the lower triangular part of the matrix B. */
+
+/* On exit, if INFO <= N, the part of B containing the matrix is */
+/* overwritten by the triangular factor U or L from the Cholesky */
+/* factorization B = U**T*U or B = L*L**T. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If N <= 1, LWORK >= 1. */
+/* If JOBZ = 'N' and N > 1, LWORK >= 2*N+1. */
+/* If JOBZ = 'V' and N > 1, LWORK >= 1 + 6*N + 2*N**2. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal sizes of the WORK and IWORK */
+/* arrays, returns these values as the first entries of the WORK */
+/* and IWORK arrays, and no error message related to LWORK or */
+/* LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* If N <= 1, LIWORK >= 1. */
+/* If JOBZ = 'N' and N > 1, LIWORK >= 1. */
+/* If JOBZ = 'V' and N > 1, LIWORK >= 3 + 5*N. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK and IWORK arrays, and no error message related to */
+/* LWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: DPOTRF or DSYEVD returned an error code: */
+/* <= N: if INFO = i and JOBZ = 'N', then the algorithm */
+/* failed to converge; i off-diagonal elements of an */
+/* intermediate tridiagonal form did not converge to */
+/* zero; */
+/* if INFO = i and JOBZ = 'V', then the algorithm */
+/* failed to compute an eigenvalue while working on */
+/* the submatrix lying in rows and columns INFO/(N+1) */
+/* through mod(INFO,N+1); */
+/* > N: if INFO = N + i, for 1 <= i <= N, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* Modified so that no backsubstitution is performed if DSYEVD fails to */
+/* converge (NEIG in old code could be greater than N causing out of */
+/* bounds reference to A - reported by Ralf Meyer). Also corrected the */
+/* description of INFO and the test on ITYPE. Sven, 16 Feb 05. */
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --w;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1 || *liwork == -1;
+
+ *info = 0;
+ if (*n <= 1) {
+ liwmin = 1;
+ lwmin = 1;
+ } else if (wantz) {
+ liwmin = *n * 5 + 3;
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = *n * 6 + 1 + (i__1 * i__1 << 1);
+ } else {
+ liwmin = 1;
+ lwmin = (*n << 1) + 1;
+ }
+ lopt = lwmin;
+ liopt = liwmin;
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -2;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+
+ if (*info == 0) {
+ work[1] = (doublereal) lopt;
+ iwork[1] = liopt;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -11;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYGVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a Cholesky factorization of B. */
+
+ dpotrf_(uplo, n, &b[b_offset], ldb, info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem and solve. */
+
+ dsygst_(itype, uplo, n, &a[a_offset], lda, &b[b_offset], ldb, info);
+ dsyevd_(jobz, uplo, n, &a[a_offset], lda, &w[1], &work[1], lwork, &iwork[
+ 1], liwork, info);
+/* Computing MAX */
+ d__1 = (doublereal) lopt;
+ lopt = (integer) max(d__1,work[1]);
+/* Computing MAX */
+ d__1 = (doublereal) liopt, d__2 = (doublereal) iwork[1];
+ liopt = (integer) max(d__1,d__2);
+
+ if (wantz && *info == 0) {
+
+/* Backtransform eigenvectors to the original problem. */
+
+ if (*itype == 1 || *itype == 2) {
+
+/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x; */
+/* backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'N';
+ } else {
+ *(unsigned char *)trans = 'T';
+ }
+
+ dtrsm_("Left", uplo, trans, "Non-unit", n, n, &c_b11, &b[b_offset]
+, ldb, &a[a_offset], lda);
+
+ } else if (*itype == 3) {
+
+/* For B*A*x=(lambda)*x; */
+/* backtransform eigenvectors: x = L*y or U'*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'T';
+ } else {
+ *(unsigned char *)trans = 'N';
+ }
+
+ dtrmm_("Left", uplo, trans, "Non-unit", n, n, &c_b11, &b[b_offset]
+, ldb, &a[a_offset], lda);
+ }
+ }
+
+ work[1] = (doublereal) lopt;
+ iwork[1] = liopt;
+
+ return 0;
+
+/* End of DSYGVD */
+
+} /* dsygvd_ */
diff --git a/contrib/libs/clapack/dsygvx.c b/contrib/libs/clapack/dsygvx.c
new file mode 100644
index 0000000000..fa9756c8a8
--- /dev/null
+++ b/contrib/libs/clapack/dsygvx.c
@@ -0,0 +1,396 @@
+/* dsygvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static doublereal c_b19 = 1.;
+
+/* Subroutine */ int dsygvx_(integer *itype, char *jobz, char *range, char *
+ uplo, integer *n, doublereal *a, integer *lda, doublereal *b, integer
+ *ldb, doublereal *vl, doublereal *vu, integer *il, integer *iu,
+ doublereal *abstol, integer *m, doublereal *w, doublereal *z__,
+ integer *ldz, doublereal *work, integer *lwork, integer *iwork,
+ integer *ifail, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, z_dim1, z_offset, i__1, i__2;
+
+ /* Local variables */
+ integer nb;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dtrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *);
+ char trans[1];
+ extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical upper, wantz, alleig, indeig, valeig;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int dpotrf_(char *, integer *, doublereal *,
+ integer *, integer *);
+ integer lwkmin;
+ extern /* Subroutine */ int dsygst_(integer *, char *, integer *,
+ doublereal *, integer *, doublereal *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int dsyevx_(char *, char *, char *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *, integer *, integer
+ *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYGVX computes selected eigenvalues, and optionally, eigenvectors */
+/* of a real generalized symmetric-definite eigenproblem, of the form */
+/* A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x. Here A */
+/* and B are assumed to be symmetric and B is also positive definite. */
+/* Eigenvalues and eigenvectors can be selected by specifying either a */
+/* range of values or a range of indices for the desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* Specifies the problem type to be solved: */
+/* = 1: A*x = (lambda)*B*x */
+/* = 2: A*B*x = (lambda)*x */
+/* = 3: B*A*x = (lambda)*x */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A and B are stored; */
+/* = 'L': Lower triangle of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix pencil (A,B). N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA, N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+
+/* On exit, the lower triangle (if UPLO='L') or the upper */
+/* triangle (if UPLO='U') of A, including the diagonal, is */
+/* destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDA, N) */
+/* On entry, the symmetric matrix B. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of B contains the */
+/* upper triangular part of the matrix B. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of B contains */
+/* the lower triangular part of the matrix B. */
+
+/* On exit, if INFO <= N, the part of B containing the matrix is */
+/* overwritten by the triangular factor U or L from the Cholesky */
+/* factorization B = U**T*U or B = L*L**T. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) DOUBLE PRECISION */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing A to tridiagonal form. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*DLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*DLAMCH('S'). */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* On normal exit, the first M elements contain the selected */
+/* eigenvalues in ascending order. */
+
+/* Z (output) DOUBLE PRECISION array, dimension (LDZ, max(1,M)) */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* The eigenvectors are normalized as follows: */
+/* if ITYPE = 1 or 2, Z**T*B*Z = I; */
+/* if ITYPE = 3, Z**T*inv(B)*Z = I. */
+
+/* If an eigenvector fails to converge, then that column of Z */
+/* contains the latest approximation to the eigenvector, and the */
+/* index of the eigenvector is returned in IFAIL. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= max(1,8*N). */
+/* For optimal efficiency, LWORK >= (NB+3)*N, */
+/* where NB is the blocksize for DSYTRD returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+
+/* IFAIL (output) INTEGER array, dimension (N) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvectors that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: DPOTRF or DSYEVX returned an error code: */
+/* <= N: if INFO = i, DSYEVX failed to converge; */
+/* i eigenvectors failed to converge. Their indices */
+/* are stored in array IFAIL. */
+/* > N: if INFO = N + i, for 1 <= i <= N, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ upper = lsame_(uplo, "U");
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+ lquery = *lwork == -1;
+
+ *info = 0;
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -2;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -3;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -11;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -12;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -13;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -18;
+ }
+ }
+
+ if (*info == 0) {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n << 3;
+ lwkmin = max(i__1,i__2);
+ nb = ilaenv_(&c__1, "DSYTRD", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = lwkmin, i__2 = (nb + 3) * *n;
+ lwkopt = max(i__1,i__2);
+ work[1] = (doublereal) lwkopt;
+
+ if (*lwork < lwkmin && ! lquery) {
+ *info = -20;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYGVX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a Cholesky factorization of B. */
+
+ dpotrf_(uplo, n, &b[b_offset], ldb, info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem and solve. */
+
+ dsygst_(itype, uplo, n, &a[a_offset], lda, &b[b_offset], ldb, info);
+ dsyevx_(jobz, range, uplo, n, &a[a_offset], lda, vl, vu, il, iu, abstol,
+ m, &w[1], &z__[z_offset], ldz, &work[1], lwork, &iwork[1], &ifail[
+ 1], info);
+
+ if (wantz) {
+
+/* Backtransform eigenvectors to the original problem. */
+
+ if (*info > 0) {
+ *m = *info - 1;
+ }
+ if (*itype == 1 || *itype == 2) {
+
+/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x; */
+/* backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'N';
+ } else {
+ *(unsigned char *)trans = 'T';
+ }
+
+ dtrsm_("Left", uplo, trans, "Non-unit", n, m, &c_b19, &b[b_offset]
+, ldb, &z__[z_offset], ldz);
+
+ } else if (*itype == 3) {
+
+/* For B*A*x=(lambda)*x; */
+/* backtransform eigenvectors: x = L*y or U'*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'T';
+ } else {
+ *(unsigned char *)trans = 'N';
+ }
+
+ dtrmm_("Left", uplo, trans, "Non-unit", n, m, &c_b19, &b[b_offset]
+, ldb, &z__[z_offset], ldz);
+ }
+ }
+
+/* Set WORK(1) to optimal workspace size. */
+
+ work[1] = (doublereal) lwkopt;
+
+ return 0;
+
+/* End of DSYGVX */
+
+} /* dsygvx_ */
diff --git a/contrib/libs/clapack/dsyrfs.c b/contrib/libs/clapack/dsyrfs.c
new file mode 100644
index 0000000000..399d2f56d0
--- /dev/null
+++ b/contrib/libs/clapack/dsyrfs.c
@@ -0,0 +1,429 @@
+/* dsyrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b12 = -1.;
+static doublereal c_b14 = 1.;
+
+/* Subroutine */ int dsyrfs_(char *uplo, integer *n, integer *nrhs,
+ doublereal *a, integer *lda, doublereal *af, integer *ldaf, integer *
+ ipiv, doublereal *b, integer *ldb, doublereal *x, integer *ldx,
+ doublereal *ferr, doublereal *berr, doublereal *work, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2, i__3;
+ doublereal d__1, d__2, d__3;
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal s, xk;
+ integer nz;
+ doublereal eps;
+ integer kase;
+ doublereal safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), daxpy_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *);
+ integer count;
+ logical upper;
+ extern /* Subroutine */ int dsymv_(char *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *), dlacn2_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal lstres;
+ extern /* Subroutine */ int dsytrs_(char *, integer *, integer *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call DLACN2 in place of DLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is symmetric indefinite, and */
+/* provides error bounds and backward error estimates for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The symmetric matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of A contains the upper triangular part */
+/* of the matrix A, and the strictly lower triangular part of A */
+/* is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of A contains the lower triangular part of */
+/* the matrix A, and the strictly upper triangular part of A is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input) DOUBLE PRECISION array, dimension (LDAF,N) */
+/* The factored form of the matrix A. AF contains the block */
+/* diagonal matrix D and the multipliers used to obtain the */
+/* factor U or L from the factorization A = U*D*U**T or */
+/* A = L*D*L**T as computed by DSYTRF. */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by DSYTRF. */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) DOUBLE PRECISION array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by DSYTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldx < max(1,*n)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X */
+
+ dcopy_(n, &b[j * b_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+ dsymv_(uplo, n, &c_b12, &a[a_offset], lda, &x[j * x_dim1 + 1], &c__1,
+ &c_b14, &work[*n + 1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] = (d__1 = b[i__ + j * b_dim1], abs(d__1));
+/* L30: */
+ }
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ xk = (d__1 = x[k + j * x_dim1], abs(d__1));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ work[i__] += (d__1 = a[i__ + k * a_dim1], abs(d__1)) * xk;
+ s += (d__1 = a[i__ + k * a_dim1], abs(d__1)) * (d__2 = x[
+ i__ + j * x_dim1], abs(d__2));
+/* L40: */
+ }
+ work[k] = work[k] + (d__1 = a[k + k * a_dim1], abs(d__1)) *
+ xk + s;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ xk = (d__1 = x[k + j * x_dim1], abs(d__1));
+ work[k] += (d__1 = a[k + k * a_dim1], abs(d__1)) * xk;
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ work[i__] += (d__1 = a[i__ + k * a_dim1], abs(d__1)) * xk;
+ s += (d__1 = a[i__ + k * a_dim1], abs(d__1)) * (d__2 = x[
+ i__ + j * x_dim1], abs(d__2));
+/* L60: */
+ }
+ work[k] += s;
+/* L70: */
+ }
+ }
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+/* Computing MAX */
+ d__2 = s, d__3 = (d__1 = work[*n + i__], abs(d__1)) / work[
+ i__];
+ s = max(d__2,d__3);
+ } else {
+/* Computing MAX */
+ d__2 = s, d__3 = ((d__1 = work[*n + i__], abs(d__1)) + safe1)
+ / (work[i__] + safe1);
+ s = max(d__2,d__3);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2. <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ dsytrs_(uplo, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &work[*n
+ + 1], n, info);
+ daxpy_(n, &c_b14, &work[*n + 1], &c__1, &x[j * x_dim1 + 1], &c__1)
+ ;
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use DLACN2 to estimate the infinity-norm of the matrix */
+/* inv(A) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+ work[i__] = (d__1 = work[*n + i__], abs(d__1)) + nz * eps *
+ work[i__];
+ } else {
+ work[i__] = (d__1 = work[*n + i__], abs(d__1)) + nz * eps *
+ work[i__] + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ dlacn2_(n, &work[(*n << 1) + 1], &work[*n + 1], &iwork[1], &ferr[j], &
+ kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(A'). */
+
+ dsytrs_(uplo, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &work[
+ *n + 1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L110: */
+ }
+ } else if (kase == 2) {
+
+/* Multiply by inv(A)*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L120: */
+ }
+ dsytrs_(uplo, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &work[
+ *n + 1], n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = lstres, d__3 = (d__1 = x[i__ + j * x_dim1], abs(d__1));
+ lstres = max(d__2,d__3);
+/* L130: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of DSYRFS */
+
+} /* dsyrfs_ */
diff --git a/contrib/libs/clapack/dsysv.c b/contrib/libs/clapack/dsysv.c
new file mode 100644
index 0000000000..e53c63871a
--- /dev/null
+++ b/contrib/libs/clapack/dsysv.c
@@ -0,0 +1,215 @@
+/* dsysv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int dsysv_(char *uplo, integer *n, integer *nrhs, doublereal
+ *a, integer *lda, integer *ipiv, doublereal *b, integer *ldb,
+ doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ integer nb;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int dsytrf_(char *, integer *, doublereal *,
+ integer *, integer *, doublereal *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int dsytrs_(char *, integer *, integer *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYSV computes the solution to a real system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N symmetric matrix and X and B are N-by-NRHS */
+/* matrices. */
+
+/* The diagonal pivoting method is used to factor A as */
+/* A = U * D * U**T, if UPLO = 'U', or */
+/* A = L * D * L**T, if UPLO = 'L', */
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, and D is symmetric and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. The factored form of A is then */
+/* used to solve the system of equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the block diagonal matrix D and the */
+/* multipliers used to obtain the factor U or L from the */
+/* factorization A = U*D*U**T or A = L*D*L**T as computed by */
+/* DSYTRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D, as */
+/* determined by DSYTRF. If IPIV(k) > 0, then rows and columns */
+/* k and IPIV(k) were interchanged, and D(k,k) is a 1-by-1 */
+/* diagonal block. If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, */
+/* then rows and columns k-1 and -IPIV(k) were interchanged and */
+/* D(k-1:k,k-1:k) is a 2-by-2 diagonal block. If UPLO = 'L' and */
+/* IPIV(k) = IPIV(k+1) < 0, then rows and columns k+1 and */
+/* -IPIV(k) were interchanged and D(k:k+1,k:k+1) is a 2-by-2 */
+/* diagonal block. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of WORK. LWORK >= 1, and for best performance */
+/* LWORK >= max(1,N*NB), where NB is the optimal blocksize for */
+/* DSYTRF. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, so the solution could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*lwork < 1 && ! lquery) {
+ *info = -10;
+ }
+
+ if (*info == 0) {
+ if (*n == 0) {
+ lwkopt = 1;
+ } else {
+ nb = ilaenv_(&c__1, "DSYTRF", uplo, n, &c_n1, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ }
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYSV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Compute the factorization A = U*D*U' or A = L*D*L'. */
+
+ dsytrf_(uplo, n, &a[a_offset], lda, &ipiv[1], &work[1], lwork, info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ dsytrs_(uplo, n, nrhs, &a[a_offset], lda, &ipiv[1], &b[b_offset], ldb,
+ info);
+
+ }
+
+ work[1] = (doublereal) lwkopt;
+
+ return 0;
+
+/* End of DSYSV */
+
+} /* dsysv_ */
diff --git a/contrib/libs/clapack/dsysvx.c b/contrib/libs/clapack/dsysvx.c
new file mode 100644
index 0000000000..0877279296
--- /dev/null
+++ b/contrib/libs/clapack/dsysvx.c
@@ -0,0 +1,370 @@
+/* dsysvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int dsysvx_(char *fact, char *uplo, integer *n, integer *
+ nrhs, doublereal *a, integer *lda, doublereal *af, integer *ldaf,
+ integer *ipiv, doublereal *b, integer *ldb, doublereal *x, integer *
+ ldx, doublereal *rcond, doublereal *ferr, doublereal *berr,
+ doublereal *work, integer *lwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2;
+
+ /* Local variables */
+ integer nb;
+ extern logical lsame_(char *, char *);
+ doublereal anorm;
+ extern doublereal dlamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern doublereal dlansy_(char *, char *, integer *, doublereal *,
+ integer *, doublereal *);
+ extern /* Subroutine */ int dsycon_(char *, integer *, doublereal *,
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, integer *), dsyrfs_(char *, integer *, integer
+ *, doublereal *, integer *, doublereal *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, integer *),
+ dsytrf_(char *, integer *, doublereal *, integer *, integer *,
+ doublereal *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int dsytrs_(char *, integer *, integer *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYSVX uses the diagonal pivoting factorization to compute the */
+/* solution to a real system of linear equations A * X = B, */
+/* where A is an N-by-N symmetric matrix and X and B are N-by-NRHS */
+/* matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'N', the diagonal pivoting method is used to factor A. */
+/* The form of the factorization is */
+/* A = U * D * U**T, if UPLO = 'U', or */
+/* A = L * D * L**T, if UPLO = 'L', */
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, and D is symmetric and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* 2. If some D(i,i)=0, so that D is exactly singular, then the routine */
+/* returns with INFO = i. Otherwise, the factored form of A is used */
+/* to estimate the condition number of the matrix A. If the */
+/* reciprocal of the condition number is less than machine precision, */
+/* INFO = N+1 is returned as a warning, but the routine still goes on */
+/* to solve for X and compute error bounds as described below. */
+
+/* 3. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 4. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of A has been */
+/* supplied on entry. */
+/* = 'F': On entry, AF and IPIV contain the factored form of */
+/* A. AF and IPIV will not be modified. */
+/* = 'N': The matrix A will be copied to AF and factored. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The symmetric matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of A contains the upper triangular part */
+/* of the matrix A, and the strictly lower triangular part of A */
+/* is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of A contains the lower triangular part of */
+/* the matrix A, and the strictly upper triangular part of A is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input or output) DOUBLE PRECISION array, dimension (LDAF,N) */
+/* If FACT = 'F', then AF is an input argument and on entry */
+/* contains the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**T or A = L*D*L**T as computed by DSYTRF. */
+
+/* If FACT = 'N', then AF is an output argument and on exit */
+/* returns the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**T or A = L*D*L**T. */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* IPIV (input or output) INTEGER array, dimension (N) */
+/* If FACT = 'F', then IPIV is an input argument and on entry */
+/* contains details of the interchanges and the block structure */
+/* of D, as determined by DSYTRF. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* If FACT = 'N', then IPIV is an output argument and on exit */
+/* contains details of the interchanges and the block structure */
+/* of D, as determined by DSYTRF. */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* The N-by-NRHS right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) DOUBLE PRECISION array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A. If RCOND is less than the machine precision (in */
+/* particular, if RCOND = 0), the matrix is singular to working */
+/* precision. This condition is indicated by a return code of */
+/* INFO > 0. */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of WORK. LWORK >= max(1,3*N), and for best */
+/* performance, when FACT = 'N', LWORK >= max(1,3*N,N*NB), where */
+/* NB is the optimal blocksize for DSYTRF. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: D(i,i) is exactly zero. The factorization */
+/* has been completed but the factor D is exactly */
+/* singular, so the solution and error bounds could */
+/* not be computed. RCOND = 0 is returned. */
+/* = N+1: D is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ lquery = *lwork == -1;
+ if (! nofact && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! lsame_(uplo, "U") && ! lsame_(uplo,
+ "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -8;
+ } else if (*ldb < max(1,*n)) {
+ *info = -11;
+ } else if (*ldx < max(1,*n)) {
+ *info = -13;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * 3;
+ if (*lwork < max(i__1,i__2) && ! lquery) {
+ *info = -18;
+ }
+ }
+
+ if (*info == 0) {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * 3;
+ lwkopt = max(i__1,i__2);
+ if (nofact) {
+ nb = ilaenv_(&c__1, "DSYTRF", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = *n * nb;
+ lwkopt = max(i__1,i__2);
+ }
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYSVX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+ if (nofact) {
+
+/* Compute the factorization A = U*D*U' or A = L*D*L'. */
+
+ dlacpy_(uplo, n, n, &a[a_offset], lda, &af[af_offset], ldaf);
+ dsytrf_(uplo, n, &af[af_offset], ldaf, &ipiv[1], &work[1], lwork,
+ info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = dlansy_("I", uplo, n, &a[a_offset], lda, &work[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ dsycon_(uplo, n, &af[af_offset], ldaf, &ipiv[1], &anorm, rcond, &work[1],
+ &iwork[1], info);
+
+/* Compute the solution vectors X. */
+
+ dlacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ dsytrs_(uplo, n, nrhs, &af[af_offset], ldaf, &ipiv[1], &x[x_offset], ldx,
+ info);
+
+/* Use iterative refinement to improve the computed solutions and */
+/* compute error bounds and backward error estimates for them. */
+
+ dsyrfs_(uplo, n, nrhs, &a[a_offset], lda, &af[af_offset], ldaf, &ipiv[1],
+ &b[b_offset], ldb, &x[x_offset], ldx, &ferr[1], &berr[1], &work[1]
+, &iwork[1], info);
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < dlamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ work[1] = (doublereal) lwkopt;
+
+ return 0;
+
+/* End of DSYSVX */
+
+} /* dsysvx_ */
diff --git a/contrib/libs/clapack/dsytd2.c b/contrib/libs/clapack/dsytd2.c
new file mode 100644
index 0000000000..2f98901557
--- /dev/null
+++ b/contrib/libs/clapack/dsytd2.c
@@ -0,0 +1,306 @@
+/* dsytd2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b8 = 0.;
+static doublereal c_b14 = -1.;
+
+/* Subroutine */ int dsytd2_(char *uplo, integer *n, doublereal *a, integer *
+ lda, doublereal *d__, doublereal *e, doublereal *tau, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ doublereal taui;
+ extern /* Subroutine */ int dsyr2_(char *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ doublereal alpha;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *);
+ logical upper;
+ extern /* Subroutine */ int dsymv_(char *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *), dlarfg_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *), xerbla_(char *, integer *
+);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYTD2 reduces a real symmetric matrix A to symmetric tridiagonal */
+/* form T by an orthogonal similarity transformation: Q' * A * Q = T. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n-by-n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n-by-n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+/* On exit, if UPLO = 'U', the diagonal and first superdiagonal */
+/* of A are overwritten by the corresponding elements of the */
+/* tridiagonal matrix T, and the elements above the first */
+/* superdiagonal, with the array TAU, represent the orthogonal */
+/* matrix Q as a product of elementary reflectors; if UPLO */
+/* = 'L', the diagonal and first subdiagonal of A are over- */
+/* written by the corresponding elements of the tridiagonal */
+/* matrix T, and the elements below the first subdiagonal, with */
+/* the array TAU, represent the orthogonal matrix Q as a product */
+/* of elementary reflectors. See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* D (output) DOUBLE PRECISION array, dimension (N) */
+/* The diagonal elements of the tridiagonal matrix T: */
+/* D(i) = A(i,i). */
+
+/* E (output) DOUBLE PRECISION array, dimension (N-1) */
+/* The off-diagonal elements of the tridiagonal matrix T: */
+/* E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'. */
+
+/* TAU (output) DOUBLE PRECISION array, dimension (N-1) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* If UPLO = 'U', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(n-1) . . . H(2) H(1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in */
+/* A(1:i-1,i+1), and tau in TAU(i). */
+
+/* If UPLO = 'L', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(1) H(2) . . . H(n-1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i), */
+/* and tau in TAU(i). */
+
+/* The contents of A on exit are illustrated by the following examples */
+/* with n = 5: */
+
+/* if UPLO = 'U': if UPLO = 'L': */
+
+/* ( d e v2 v3 v4 ) ( d ) */
+/* ( d e v3 v4 ) ( e d ) */
+/* ( d e v4 ) ( v1 e d ) */
+/* ( d e ) ( v1 v2 e d ) */
+/* ( d ) ( v1 v2 v3 e d ) */
+
+/* where d and e denote diagonal and off-diagonal elements of T, and vi */
+/* denotes an element of the vector defining H(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tau;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYTD2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Reduce the upper triangle of A */
+
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+
+/* Generate elementary reflector H(i) = I - tau * v * v' */
+/* to annihilate A(1:i-1,i+1) */
+
+ dlarfg_(&i__, &a[i__ + (i__ + 1) * a_dim1], &a[(i__ + 1) * a_dim1
+ + 1], &c__1, &taui);
+ e[i__] = a[i__ + (i__ + 1) * a_dim1];
+
+ if (taui != 0.) {
+
+/* Apply H(i) from both sides to A(1:i,1:i) */
+
+ a[i__ + (i__ + 1) * a_dim1] = 1.;
+
+/* Compute x := tau * A * v storing x in TAU(1:i) */
+
+ dsymv_(uplo, &i__, &taui, &a[a_offset], lda, &a[(i__ + 1) *
+ a_dim1 + 1], &c__1, &c_b8, &tau[1], &c__1);
+
+/* Compute w := x - 1/2 * tau * (x'*v) * v */
+
+ alpha = taui * -.5 * ddot_(&i__, &tau[1], &c__1, &a[(i__ + 1)
+ * a_dim1 + 1], &c__1);
+ daxpy_(&i__, &alpha, &a[(i__ + 1) * a_dim1 + 1], &c__1, &tau[
+ 1], &c__1);
+
+/* Apply the transformation as a rank-2 update: */
+/* A := A - v * w' - w * v' */
+
+ dsyr2_(uplo, &i__, &c_b14, &a[(i__ + 1) * a_dim1 + 1], &c__1,
+ &tau[1], &c__1, &a[a_offset], lda);
+
+ a[i__ + (i__ + 1) * a_dim1] = e[i__];
+ }
+ d__[i__ + 1] = a[i__ + 1 + (i__ + 1) * a_dim1];
+ tau[i__] = taui;
+/* L10: */
+ }
+ d__[1] = a[a_dim1 + 1];
+ } else {
+
+/* Reduce the lower triangle of A */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) = I - tau * v * v' */
+/* to annihilate A(i+2:n,i) */
+
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3, *n)+ i__ *
+ a_dim1], &c__1, &taui);
+ e[i__] = a[i__ + 1 + i__ * a_dim1];
+
+ if (taui != 0.) {
+
+/* Apply H(i) from both sides to A(i+1:n,i+1:n) */
+
+ a[i__ + 1 + i__ * a_dim1] = 1.;
+
+/* Compute x := tau * A * v storing y in TAU(i:n-1) */
+
+ i__2 = *n - i__;
+ dsymv_(uplo, &i__2, &taui, &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b8, &tau[
+ i__], &c__1);
+
+/* Compute w := x - 1/2 * tau * (x'*v) * v */
+
+ i__2 = *n - i__;
+ alpha = taui * -.5 * ddot_(&i__2, &tau[i__], &c__1, &a[i__ +
+ 1 + i__ * a_dim1], &c__1);
+ i__2 = *n - i__;
+ daxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
+ i__], &c__1);
+
+/* Apply the transformation as a rank-2 update: */
+/* A := A - v * w' - w * v' */
+
+ i__2 = *n - i__;
+ dsyr2_(uplo, &i__2, &c_b14, &a[i__ + 1 + i__ * a_dim1], &c__1,
+ &tau[i__], &c__1, &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda);
+
+ a[i__ + 1 + i__ * a_dim1] = e[i__];
+ }
+ d__[i__] = a[i__ + i__ * a_dim1];
+ tau[i__] = taui;
+/* L20: */
+ }
+ d__[*n] = a[*n + *n * a_dim1];
+ }
+
+ return 0;
+
+/* End of DSYTD2 */
+
+} /* dsytd2_ */
diff --git a/contrib/libs/clapack/dsytf2.c b/contrib/libs/clapack/dsytf2.c
new file mode 100644
index 0000000000..11d394778d
--- /dev/null
+++ b/contrib/libs/clapack/dsytf2.c
@@ -0,0 +1,608 @@
+/* dsytf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dsytf2_(char *uplo, integer *n, doublereal *a, integer *
+ lda, integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal t, r1, d11, d12, d21, d22;
+ integer kk, kp;
+ doublereal wk, wkm1, wkp1;
+ integer imax, jmax;
+ extern /* Subroutine */ int dsyr_(char *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *);
+ doublereal alpha;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ integer kstep;
+ logical upper;
+ doublereal absakk;
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern logical disnan_(doublereal *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal colmax, rowmax;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYTF2 computes the factorization of a real symmetric matrix A using */
+/* the Bunch-Kaufman diagonal pivoting method: */
+
+/* A = U*D*U' or A = L*D*L' */
+
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, U' is the transpose of U, and D is symmetric and */
+/* block diagonal with 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* This is the unblocked version of the algorithm, calling Level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n-by-n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n-by-n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L (see below for further details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, D(k,k) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, and division by zero will occur if it */
+/* is used to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* 09-29-06 - patch from */
+/* Bobby Cheng, MathWorks */
+
+/* Replace l.204 and l.372 */
+/* IF( MAX( ABSAKK, COLMAX ).EQ.ZERO ) THEN */
+/* by */
+/* IF( (MAX( ABSAKK, COLMAX ).EQ.ZERO) .OR. DISNAN(ABSAKK) ) THEN */
+
+/* 01-01-96 - Based on modifications by */
+/* J. Lewis, Boeing Computer Services Company */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+/* 1-96 - Based on modifications by J. Lewis, Boeing Computer Services */
+/* Company */
+
+/* If UPLO = 'U', then A = U*D*U', where */
+/* U = P(n)*U(n)* ... *P(k)U(k)* ..., */
+/* i.e., U is a product of terms P(k)*U(k), where k decreases from n to */
+/* 1 in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and U(k) is a unit upper triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I v 0 ) k-s */
+/* U(k) = ( 0 I 0 ) s */
+/* ( 0 0 I ) n-k */
+/* k-s s n-k */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(1:k-1,k). */
+/* If s = 2, the upper triangle of D(k) overwrites A(k-1,k-1), A(k-1,k), */
+/* and A(k,k), and v overwrites A(1:k-2,k-1:k). */
+
+/* If UPLO = 'L', then A = L*D*L', where */
+/* L = P(1)*L(1)* ... *P(k)*L(k)* ..., */
+/* i.e., L is a product of terms P(k)*L(k), where k increases from 1 to */
+/* n in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and L(k) is a unit lower triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I 0 0 ) k-1 */
+/* L(k) = ( 0 I 0 ) s */
+/* ( 0 v I ) n-k-s+1 */
+/* k-1 s n-k-s+1 */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(k+1:n,k). */
+/* If s = 2, the lower triangle of D(k) overwrites A(k,k), A(k+1,k), */
+/* and A(k+1,k+1), and v overwrites A(k+2:n,k:k+1). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYTF2", &i__1);
+ return 0;
+ }
+
+/* Initialize ALPHA for use in choosing pivot block size. */
+
+ alpha = (sqrt(17.) + 1.) / 8.;
+
+ if (upper) {
+
+/* Factorize A as U*D*U' using the upper triangle of A */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2 */
+
+ k = *n;
+L10:
+
+/* If K < 1, exit from loop */
+
+ if (k < 1) {
+ goto L70;
+ }
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ absakk = (d__1 = a[k + k * a_dim1], abs(d__1));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ imax = idamax_(&i__1, &a[k * a_dim1 + 1], &c__1);
+ colmax = (d__1 = a[imax + k * a_dim1], abs(d__1));
+ } else {
+ colmax = 0.;
+ }
+
+ if (max(absakk,colmax) == 0. || disnan_(&absakk)) {
+
+/* Column K is zero or contains a NaN: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ i__1 = k - imax;
+ jmax = imax + idamax_(&i__1, &a[imax + (imax + 1) * a_dim1],
+ lda);
+ rowmax = (d__1 = a[imax + jmax * a_dim1], abs(d__1));
+ if (imax > 1) {
+ i__1 = imax - 1;
+ jmax = idamax_(&i__1, &a[imax * a_dim1 + 1], &c__1);
+/* Computing MAX */
+ d__2 = rowmax, d__3 = (d__1 = a[jmax + imax * a_dim1],
+ abs(d__1));
+ rowmax = max(d__2,d__3);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else if ((d__1 = a[imax + imax * a_dim1], abs(d__1)) >=
+ alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+ } else {
+
+/* interchange rows and columns K-1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+
+ kk = k - kstep + 1;
+ if (kp != kk) {
+
+/* Interchange rows and columns KK and KP in the leading */
+/* submatrix A(1:k,1:k) */
+
+ i__1 = kp - 1;
+ dswap_(&i__1, &a[kk * a_dim1 + 1], &c__1, &a[kp * a_dim1 + 1],
+ &c__1);
+ i__1 = kk - kp - 1;
+ dswap_(&i__1, &a[kp + 1 + kk * a_dim1], &c__1, &a[kp + (kp +
+ 1) * a_dim1], lda);
+ t = a[kk + kk * a_dim1];
+ a[kk + kk * a_dim1] = a[kp + kp * a_dim1];
+ a[kp + kp * a_dim1] = t;
+ if (kstep == 2) {
+ t = a[k - 1 + k * a_dim1];
+ a[k - 1 + k * a_dim1] = a[kp + k * a_dim1];
+ a[kp + k * a_dim1] = t;
+ }
+ }
+
+/* Update the leading submatrix */
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k now holds */
+
+/* W(k) = U(k)*D(k) */
+
+/* where U(k) is the k-th column of U */
+
+/* Perform a rank-1 update of A(1:k-1,1:k-1) as */
+
+/* A := A - U(k)*D(k)*U(k)' = A - W(k)*1/D(k)*W(k)' */
+
+ r1 = 1. / a[k + k * a_dim1];
+ i__1 = k - 1;
+ d__1 = -r1;
+ dsyr_(uplo, &i__1, &d__1, &a[k * a_dim1 + 1], &c__1, &a[
+ a_offset], lda);
+
+/* Store U(k) in column k */
+
+ i__1 = k - 1;
+ dscal_(&i__1, &r1, &a[k * a_dim1 + 1], &c__1);
+ } else {
+
+/* 2-by-2 pivot block D(k): columns k and k-1 now hold */
+
+/* ( W(k-1) W(k) ) = ( U(k-1) U(k) )*D(k) */
+
+/* where U(k) and U(k-1) are the k-th and (k-1)-th columns */
+/* of U */
+
+/* Perform a rank-2 update of A(1:k-2,1:k-2) as */
+
+/* A := A - ( U(k-1) U(k) )*D(k)*( U(k-1) U(k) )' */
+/* = A - ( W(k-1) W(k) )*inv(D(k))*( W(k-1) W(k) )' */
+
+ if (k > 2) {
+
+ d12 = a[k - 1 + k * a_dim1];
+ d22 = a[k - 1 + (k - 1) * a_dim1] / d12;
+ d11 = a[k + k * a_dim1] / d12;
+ t = 1. / (d11 * d22 - 1.);
+ d12 = t / d12;
+
+ for (j = k - 2; j >= 1; --j) {
+ wkm1 = d12 * (d11 * a[j + (k - 1) * a_dim1] - a[j + k
+ * a_dim1]);
+ wk = d12 * (d22 * a[j + k * a_dim1] - a[j + (k - 1) *
+ a_dim1]);
+ for (i__ = j; i__ >= 1; --i__) {
+ a[i__ + j * a_dim1] = a[i__ + j * a_dim1] - a[i__
+ + k * a_dim1] * wk - a[i__ + (k - 1) *
+ a_dim1] * wkm1;
+/* L20: */
+ }
+ a[j + k * a_dim1] = wk;
+ a[j + (k - 1) * a_dim1] = wkm1;
+/* L30: */
+ }
+
+ }
+
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k - 1] = -kp;
+ }
+
+/* Decrease K and return to the start of the main loop */
+
+ k -= kstep;
+ goto L10;
+
+ } else {
+
+/* Factorize A as L*D*L' using the lower triangle of A */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2 */
+
+ k = 1;
+L40:
+
+/* If K > N, exit from loop */
+
+ if (k > *n) {
+ goto L70;
+ }
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ absakk = (d__1 = a[k + k * a_dim1], abs(d__1));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ imax = k + idamax_(&i__1, &a[k + 1 + k * a_dim1], &c__1);
+ colmax = (d__1 = a[imax + k * a_dim1], abs(d__1));
+ } else {
+ colmax = 0.;
+ }
+
+ if (max(absakk,colmax) == 0. || disnan_(&absakk)) {
+
+/* Column K is zero or contains a NaN: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ i__1 = imax - k;
+ jmax = k - 1 + idamax_(&i__1, &a[imax + k * a_dim1], lda);
+ rowmax = (d__1 = a[imax + jmax * a_dim1], abs(d__1));
+ if (imax < *n) {
+ i__1 = *n - imax;
+ jmax = imax + idamax_(&i__1, &a[imax + 1 + imax * a_dim1],
+ &c__1);
+/* Computing MAX */
+ d__2 = rowmax, d__3 = (d__1 = a[jmax + imax * a_dim1],
+ abs(d__1));
+ rowmax = max(d__2,d__3);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else if ((d__1 = a[imax + imax * a_dim1], abs(d__1)) >=
+ alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+ } else {
+
+/* interchange rows and columns K+1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+
+ kk = k + kstep - 1;
+ if (kp != kk) {
+
+/* Interchange rows and columns KK and KP in the trailing */
+/* submatrix A(k:n,k:n) */
+
+ if (kp < *n) {
+ i__1 = *n - kp;
+ dswap_(&i__1, &a[kp + 1 + kk * a_dim1], &c__1, &a[kp + 1
+ + kp * a_dim1], &c__1);
+ }
+ i__1 = kp - kk - 1;
+ dswap_(&i__1, &a[kk + 1 + kk * a_dim1], &c__1, &a[kp + (kk +
+ 1) * a_dim1], lda);
+ t = a[kk + kk * a_dim1];
+ a[kk + kk * a_dim1] = a[kp + kp * a_dim1];
+ a[kp + kp * a_dim1] = t;
+ if (kstep == 2) {
+ t = a[k + 1 + k * a_dim1];
+ a[k + 1 + k * a_dim1] = a[kp + k * a_dim1];
+ a[kp + k * a_dim1] = t;
+ }
+ }
+
+/* Update the trailing submatrix */
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k now holds */
+
+/* W(k) = L(k)*D(k) */
+
+/* where L(k) is the k-th column of L */
+
+ if (k < *n) {
+
+/* Perform a rank-1 update of A(k+1:n,k+1:n) as */
+
+/* A := A - L(k)*D(k)*L(k)' = A - W(k)*(1/D(k))*W(k)' */
+
+ d11 = 1. / a[k + k * a_dim1];
+ i__1 = *n - k;
+ d__1 = -d11;
+ dsyr_(uplo, &i__1, &d__1, &a[k + 1 + k * a_dim1], &c__1, &
+ a[k + 1 + (k + 1) * a_dim1], lda);
+
+/* Store L(k) in column K */
+
+ i__1 = *n - k;
+ dscal_(&i__1, &d11, &a[k + 1 + k * a_dim1], &c__1);
+ }
+ } else {
+
+/* 2-by-2 pivot block D(k) */
+
+ if (k < *n - 1) {
+
+/* Perform a rank-2 update of A(k+2:n,k+2:n) as */
+
+/* A := A - ( (A(k) A(k+1))*D(k)**(-1) ) * (A(k) A(k+1))' */
+
+/* where L(k) and L(k+1) are the k-th and (k+1)-th */
+/* columns of L */
+
+ d21 = a[k + 1 + k * a_dim1];
+ d11 = a[k + 1 + (k + 1) * a_dim1] / d21;
+ d22 = a[k + k * a_dim1] / d21;
+ t = 1. / (d11 * d22 - 1.);
+ d21 = t / d21;
+
+ i__1 = *n;
+ for (j = k + 2; j <= i__1; ++j) {
+
+ wk = d21 * (d11 * a[j + k * a_dim1] - a[j + (k + 1) *
+ a_dim1]);
+ wkp1 = d21 * (d22 * a[j + (k + 1) * a_dim1] - a[j + k
+ * a_dim1]);
+
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + j * a_dim1] - a[i__
+ + k * a_dim1] * wk - a[i__ + (k + 1) *
+ a_dim1] * wkp1;
+/* L50: */
+ }
+
+ a[j + k * a_dim1] = wk;
+ a[j + (k + 1) * a_dim1] = wkp1;
+
+/* L60: */
+ }
+ }
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k + 1] = -kp;
+ }
+
+/* Increase K and return to the start of the main loop */
+
+ k += kstep;
+ goto L40;
+
+ }
+
+L70:
+
+ return 0;
+
+/* End of DSYTF2 */
+
+} /* dsytf2_ */
diff --git a/contrib/libs/clapack/dsytrd.c b/contrib/libs/clapack/dsytrd.c
new file mode 100644
index 0000000000..f0f9ded060
--- /dev/null
+++ b/contrib/libs/clapack/dsytrd.c
@@ -0,0 +1,360 @@
+/* dsytrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+static doublereal c_b22 = -1.;
+static doublereal c_b23 = 1.;
+
+/* Subroutine */ int dsytrd_(char *uplo, integer *n, doublereal *a, integer *
+ lda, doublereal *d__, doublereal *e, doublereal *tau, doublereal *
+ work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, nb, kk, nx, iws;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ logical upper;
+ extern /* Subroutine */ int dsytd2_(char *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *), dsyr2k_(char *, char *, integer *, integer *, doublereal
+ *, doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *), dlatrd_(char *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYTRD reduces a real symmetric matrix A to real symmetric */
+/* tridiagonal form T by an orthogonal similarity transformation: */
+/* Q**T * A * Q = T. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+/* On exit, if UPLO = 'U', the diagonal and first superdiagonal */
+/* of A are overwritten by the corresponding elements of the */
+/* tridiagonal matrix T, and the elements above the first */
+/* superdiagonal, with the array TAU, represent the orthogonal */
+/* matrix Q as a product of elementary reflectors; if UPLO */
+/* = 'L', the diagonal and first subdiagonal of A are over- */
+/* written by the corresponding elements of the tridiagonal */
+/* matrix T, and the elements below the first subdiagonal, with */
+/* the array TAU, represent the orthogonal matrix Q as a product */
+/* of elementary reflectors. See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* D (output) DOUBLE PRECISION array, dimension (N) */
+/* The diagonal elements of the tridiagonal matrix T: */
+/* D(i) = A(i,i). */
+
+/* E (output) DOUBLE PRECISION array, dimension (N-1) */
+/* The off-diagonal elements of the tridiagonal matrix T: */
+/* E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'. */
+
+/* TAU (output) DOUBLE PRECISION array, dimension (N-1) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= 1. */
+/* For optimum performance LWORK >= N*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* If UPLO = 'U', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(n-1) . . . H(2) H(1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in */
+/* A(1:i-1,i+1), and tau in TAU(i). */
+
+/* If UPLO = 'L', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(1) H(2) . . . H(n-1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i), */
+/* and tau in TAU(i). */
+
+/* The contents of A on exit are illustrated by the following examples */
+/* with n = 5: */
+
+/* if UPLO = 'U': if UPLO = 'L': */
+
+/* ( d e v2 v3 v4 ) ( d ) */
+/* ( d e v3 v4 ) ( e d ) */
+/* ( d e v4 ) ( v1 e d ) */
+/* ( d e ) ( v1 v2 e d ) */
+/* ( d ) ( v1 v2 v3 e d ) */
+
+/* where d and e denote diagonal and off-diagonal elements of T, and vi */
+/* denotes an element of the vector defining H(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*lwork < 1 && ! lquery) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+
+/* Determine the block size. */
+
+ nb = ilaenv_(&c__1, "DSYTRD", uplo, n, &c_n1, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYTRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ nx = *n;
+ iws = 1;
+ if (nb > 1 && nb < *n) {
+
+/* Determine when to cross over from blocked to unblocked code */
+/* (last block is always handled by unblocked code). */
+
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__3, "DSYTRD", uplo, n, &c_n1, &c_n1, &
+ c_n1);
+ nx = max(i__1,i__2);
+ if (nx < *n) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: determine the */
+/* minimum value of NB, and reduce NB or force use of */
+/* unblocked code by setting NX = N. */
+
+/* Computing MAX */
+ i__1 = *lwork / ldwork;
+ nb = max(i__1,1);
+ nbmin = ilaenv_(&c__2, "DSYTRD", uplo, n, &c_n1, &c_n1, &c_n1);
+ if (nb < nbmin) {
+ nx = *n;
+ }
+ }
+ } else {
+ nx = *n;
+ }
+ } else {
+ nb = 1;
+ }
+
+ if (upper) {
+
+/* Reduce the upper triangle of A. */
+/* Columns 1:kk are handled by the unblocked method. */
+
+ kk = *n - (*n - nx + nb - 1) / nb * nb;
+ i__1 = kk + 1;
+ i__2 = -nb;
+ for (i__ = *n - nb + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+
+/* Reduce columns i:i+nb-1 to tridiagonal form and form the */
+/* matrix W which is needed to update the unreduced part of */
+/* the matrix */
+
+ i__3 = i__ + nb - 1;
+ dlatrd_(uplo, &i__3, &nb, &a[a_offset], lda, &e[1], &tau[1], &
+ work[1], &ldwork);
+
+/* Update the unreduced submatrix A(1:i-1,1:i-1), using an */
+/* update of the form: A := A - V*W' - W*V' */
+
+ i__3 = i__ - 1;
+ dsyr2k_(uplo, "No transpose", &i__3, &nb, &c_b22, &a[i__ * a_dim1
+ + 1], lda, &work[1], &ldwork, &c_b23, &a[a_offset], lda);
+
+/* Copy superdiagonal elements back into A, and diagonal */
+/* elements into D */
+
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ a[j - 1 + j * a_dim1] = e[j - 1];
+ d__[j] = a[j + j * a_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* Use unblocked code to reduce the last or only block */
+
+ dsytd2_(uplo, &kk, &a[a_offset], lda, &d__[1], &e[1], &tau[1], &iinfo);
+ } else {
+
+/* Reduce the lower triangle of A */
+
+ i__2 = *n - nx;
+ i__1 = nb;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+
+/* Reduce columns i:i+nb-1 to tridiagonal form and form the */
+/* matrix W which is needed to update the unreduced part of */
+/* the matrix */
+
+ i__3 = *n - i__ + 1;
+ dlatrd_(uplo, &i__3, &nb, &a[i__ + i__ * a_dim1], lda, &e[i__], &
+ tau[i__], &work[1], &ldwork);
+
+/* Update the unreduced submatrix A(i+ib:n,i+ib:n), using */
+/* an update of the form: A := A - V*W' - W*V' */
+
+ i__3 = *n - i__ - nb + 1;
+ dsyr2k_(uplo, "No transpose", &i__3, &nb, &c_b22, &a[i__ + nb +
+ i__ * a_dim1], lda, &work[nb + 1], &ldwork, &c_b23, &a[
+ i__ + nb + (i__ + nb) * a_dim1], lda);
+
+/* Copy subdiagonal elements back into A, and diagonal */
+/* elements into D */
+
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ a[j + 1 + j * a_dim1] = e[j];
+ d__[j] = a[j + j * a_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Use unblocked code to reduce the last or only block */
+
+ i__1 = *n - i__ + 1;
+ dsytd2_(uplo, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__],
+ &tau[i__], &iinfo);
+ }
+
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DSYTRD */
+
+} /* dsytrd_ */
diff --git a/contrib/libs/clapack/dsytrf.c b/contrib/libs/clapack/dsytrf.c
new file mode 100644
index 0000000000..8492793421
--- /dev/null
+++ b/contrib/libs/clapack/dsytrf.c
@@ -0,0 +1,341 @@
+/* dsytrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+
+/* Subroutine */ int dsytrf_(char *uplo, integer *n, doublereal *a, integer *
+ lda, integer *ipiv, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer j, k, kb, nb, iws;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ logical upper;
+ extern /* Subroutine */ int dsytf2_(char *, integer *, doublereal *,
+ integer *, integer *, integer *), xerbla_(char *, integer
+ *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int dlasyf_(char *, integer *, integer *, integer
+ *, doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYTRF computes the factorization of a real symmetric matrix A using */
+/* the Bunch-Kaufman diagonal pivoting method. The form of the */
+/* factorization is */
+
+/* A = U*D*U**T or A = L*D*L**T */
+
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, and D is symmetric and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* This is the blocked version of the algorithm, calling Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L (see below for further details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of WORK. LWORK >=1. For best performance */
+/* LWORK >= N*NB, where NB is the block size returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, and division by zero will occur if it */
+/* is used to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* If UPLO = 'U', then A = U*D*U', where */
+/* U = P(n)*U(n)* ... *P(k)U(k)* ..., */
+/* i.e., U is a product of terms P(k)*U(k), where k decreases from n to */
+/* 1 in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and U(k) is a unit upper triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I v 0 ) k-s */
+/* U(k) = ( 0 I 0 ) s */
+/* ( 0 0 I ) n-k */
+/* k-s s n-k */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(1:k-1,k). */
+/* If s = 2, the upper triangle of D(k) overwrites A(k-1,k-1), A(k-1,k), */
+/* and A(k,k), and v overwrites A(1:k-2,k-1:k). */
+
+/* If UPLO = 'L', then A = L*D*L', where */
+/* L = P(1)*L(1)* ... *P(k)*L(k)* ..., */
+/* i.e., L is a product of terms P(k)*L(k), where k increases from 1 to */
+/* n in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and L(k) is a unit lower triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I 0 0 ) k-1 */
+/* L(k) = ( 0 I 0 ) s */
+/* ( 0 v I ) n-k-s+1 */
+/* k-1 s n-k-s+1 */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(k+1:n,k). */
+/* If s = 2, the lower triangle of D(k) overwrites A(k,k), A(k+1,k), */
+/* and A(k+1,k+1), and v overwrites A(k+2:n,k:k+1). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*lwork < 1 && ! lquery) {
+ *info = -7;
+ }
+
+ if (*info == 0) {
+
+/* Determine the block size */
+
+ nb = ilaenv_(&c__1, "DSYTRF", uplo, n, &c_n1, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYTRF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = *n;
+ if (nb > 1 && nb < *n) {
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+/* Computing MAX */
+ i__1 = *lwork / ldwork;
+ nb = max(i__1,1);
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DSYTRF", uplo, n, &c_n1, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = 1;
+ }
+ if (nb < nbmin) {
+ nb = *n;
+ }
+
+ if (upper) {
+
+/* Factorize A as U*D*U' using the upper triangle of A */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* KB, where KB is the number of columns factorized by DLASYF; */
+/* KB is either NB or NB-1, or K for the last block */
+
+ k = *n;
+L10:
+
+/* If K < 1, exit from loop */
+
+ if (k < 1) {
+ goto L40;
+ }
+
+ if (k > nb) {
+
+/* Factorize columns k-kb+1:k of A and use blocked code to */
+/* update columns 1:k-kb */
+
+ dlasyf_(uplo, &k, &nb, &kb, &a[a_offset], lda, &ipiv[1], &work[1],
+ &ldwork, &iinfo);
+ } else {
+
+/* Use unblocked code to factorize columns 1:k of A */
+
+ dsytf2_(uplo, &k, &a[a_offset], lda, &ipiv[1], &iinfo);
+ kb = k;
+ }
+
+/* Set INFO on the first occurrence of a zero pivot */
+
+ if (*info == 0 && iinfo > 0) {
+ *info = iinfo;
+ }
+
+/* Decrease K and return to the start of the main loop */
+
+ k -= kb;
+ goto L10;
+
+ } else {
+
+/* Factorize A as L*D*L' using the lower triangle of A */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* KB, where KB is the number of columns factorized by DLASYF; */
+/* KB is either NB or NB-1, or N-K+1 for the last block */
+
+ k = 1;
+L20:
+
+/* If K > N, exit from loop */
+
+ if (k > *n) {
+ goto L40;
+ }
+
+ if (k <= *n - nb) {
+
+/* Factorize columns k:k+kb-1 of A and use blocked code to */
+/* update columns k+kb:n */
+
+ i__1 = *n - k + 1;
+ dlasyf_(uplo, &i__1, &nb, &kb, &a[k + k * a_dim1], lda, &ipiv[k],
+ &work[1], &ldwork, &iinfo);
+ } else {
+
+/* Use unblocked code to factorize columns k:n of A */
+
+ i__1 = *n - k + 1;
+ dsytf2_(uplo, &i__1, &a[k + k * a_dim1], lda, &ipiv[k], &iinfo);
+ kb = *n - k + 1;
+ }
+
+/* Set INFO on the first occurrence of a zero pivot */
+
+ if (*info == 0 && iinfo > 0) {
+ *info = iinfo + k - 1;
+ }
+
+/* Adjust IPIV */
+
+ i__1 = k + kb - 1;
+ for (j = k; j <= i__1; ++j) {
+ if (ipiv[j] > 0) {
+ ipiv[j] = ipiv[j] + k - 1;
+ } else {
+ ipiv[j] = ipiv[j] - k + 1;
+ }
+/* L30: */
+ }
+
+/* Increase K and return to the start of the main loop */
+
+ k += kb;
+ goto L20;
+
+ }
+
+L40:
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DSYTRF */
+
+} /* dsytrf_ */
diff --git a/contrib/libs/clapack/dsytri.c b/contrib/libs/clapack/dsytri.c
new file mode 100644
index 0000000000..3ed3d7ca5f
--- /dev/null
+++ b/contrib/libs/clapack/dsytri.c
@@ -0,0 +1,396 @@
+/* dsytri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b11 = -1.;
+static doublereal c_b13 = 0.;
+
+/* Subroutine */ int dsytri_(char *uplo, integer *n, doublereal *a, integer *
+ lda, integer *ipiv, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+ doublereal d__1;
+
+ /* Local variables */
+ doublereal d__;
+ integer k;
+ doublereal t, ak;
+ integer kp;
+ doublereal akp1;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ doublereal temp, akkp1;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dswap_(integer *, doublereal *, integer
+ *, doublereal *, integer *);
+ integer kstep;
+ logical upper;
+ extern /* Subroutine */ int dsymv_(char *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYTRI computes the inverse of a real symmetric indefinite matrix */
+/* A using the factorization A = U*D*U**T or A = L*D*L**T computed by */
+/* DSYTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**T; */
+/* = 'L': Lower triangular, form is A = L*D*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the block diagonal matrix D and the multipliers */
+/* used to obtain the factor U or L as computed by DSYTRF. */
+
+/* On exit, if INFO = 0, the (symmetric) inverse of the original */
+/* matrix. If UPLO = 'U', the upper triangular part of the */
+/* inverse is formed and the part of A below the diagonal is not */
+/* referenced; if UPLO = 'L' the lower triangular part of the */
+/* inverse is formed and the part of A above the diagonal is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by DSYTRF. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) = 0; the matrix is singular and its */
+/* inverse could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check that the diagonal matrix D is nonsingular. */
+
+ if (upper) {
+
+/* Upper triangular storage: examine D from bottom to top */
+
+ for (*info = *n; *info >= 1; --(*info)) {
+ if (ipiv[*info] > 0 && a[*info + *info * a_dim1] == 0.) {
+ return 0;
+ }
+/* L10: */
+ }
+ } else {
+
+/* Lower triangular storage: examine D from top to bottom. */
+
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ if (ipiv[*info] > 0 && a[*info + *info * a_dim1] == 0.) {
+ return 0;
+ }
+/* L20: */
+ }
+ }
+ *info = 0;
+
+ if (upper) {
+
+/* Compute inv(A) from the factorization A = U*D*U'. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+L30:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L40;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Invert the diagonal block. */
+
+ a[k + k * a_dim1] = 1. / a[k + k * a_dim1];
+
+/* Compute column K of the inverse. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ dcopy_(&i__1, &a[k * a_dim1 + 1], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ dsymv_(uplo, &i__1, &c_b11, &a[a_offset], lda, &work[1], &
+ c__1, &c_b13, &a[k * a_dim1 + 1], &c__1);
+ i__1 = k - 1;
+ a[k + k * a_dim1] -= ddot_(&i__1, &work[1], &c__1, &a[k *
+ a_dim1 + 1], &c__1);
+ }
+ kstep = 1;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Invert the diagonal block. */
+
+ t = (d__1 = a[k + (k + 1) * a_dim1], abs(d__1));
+ ak = a[k + k * a_dim1] / t;
+ akp1 = a[k + 1 + (k + 1) * a_dim1] / t;
+ akkp1 = a[k + (k + 1) * a_dim1] / t;
+ d__ = t * (ak * akp1 - 1.);
+ a[k + k * a_dim1] = akp1 / d__;
+ a[k + 1 + (k + 1) * a_dim1] = ak / d__;
+ a[k + (k + 1) * a_dim1] = -akkp1 / d__;
+
+/* Compute columns K and K+1 of the inverse. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ dcopy_(&i__1, &a[k * a_dim1 + 1], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ dsymv_(uplo, &i__1, &c_b11, &a[a_offset], lda, &work[1], &
+ c__1, &c_b13, &a[k * a_dim1 + 1], &c__1);
+ i__1 = k - 1;
+ a[k + k * a_dim1] -= ddot_(&i__1, &work[1], &c__1, &a[k *
+ a_dim1 + 1], &c__1);
+ i__1 = k - 1;
+ a[k + (k + 1) * a_dim1] -= ddot_(&i__1, &a[k * a_dim1 + 1], &
+ c__1, &a[(k + 1) * a_dim1 + 1], &c__1);
+ i__1 = k - 1;
+ dcopy_(&i__1, &a[(k + 1) * a_dim1 + 1], &c__1, &work[1], &
+ c__1);
+ i__1 = k - 1;
+ dsymv_(uplo, &i__1, &c_b11, &a[a_offset], lda, &work[1], &
+ c__1, &c_b13, &a[(k + 1) * a_dim1 + 1], &c__1);
+ i__1 = k - 1;
+ a[k + 1 + (k + 1) * a_dim1] -= ddot_(&i__1, &work[1], &c__1, &
+ a[(k + 1) * a_dim1 + 1], &c__1);
+ }
+ kstep = 2;
+ }
+
+ kp = (i__1 = ipiv[k], abs(i__1));
+ if (kp != k) {
+
+/* Interchange rows and columns K and KP in the leading */
+/* submatrix A(1:k+1,1:k+1) */
+
+ i__1 = kp - 1;
+ dswap_(&i__1, &a[k * a_dim1 + 1], &c__1, &a[kp * a_dim1 + 1], &
+ c__1);
+ i__1 = k - kp - 1;
+ dswap_(&i__1, &a[kp + 1 + k * a_dim1], &c__1, &a[kp + (kp + 1) *
+ a_dim1], lda);
+ temp = a[k + k * a_dim1];
+ a[k + k * a_dim1] = a[kp + kp * a_dim1];
+ a[kp + kp * a_dim1] = temp;
+ if (kstep == 2) {
+ temp = a[k + (k + 1) * a_dim1];
+ a[k + (k + 1) * a_dim1] = a[kp + (k + 1) * a_dim1];
+ a[kp + (k + 1) * a_dim1] = temp;
+ }
+ }
+
+ k += kstep;
+ goto L30;
+L40:
+
+ ;
+ } else {
+
+/* Compute inv(A) from the factorization A = L*D*L'. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+L50:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L60;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Invert the diagonal block. */
+
+ a[k + k * a_dim1] = 1. / a[k + k * a_dim1];
+
+/* Compute column K of the inverse. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ dcopy_(&i__1, &a[k + 1 + k * a_dim1], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ dsymv_(uplo, &i__1, &c_b11, &a[k + 1 + (k + 1) * a_dim1], lda,
+ &work[1], &c__1, &c_b13, &a[k + 1 + k * a_dim1], &
+ c__1);
+ i__1 = *n - k;
+ a[k + k * a_dim1] -= ddot_(&i__1, &work[1], &c__1, &a[k + 1 +
+ k * a_dim1], &c__1);
+ }
+ kstep = 1;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Invert the diagonal block. */
+
+ t = (d__1 = a[k + (k - 1) * a_dim1], abs(d__1));
+ ak = a[k - 1 + (k - 1) * a_dim1] / t;
+ akp1 = a[k + k * a_dim1] / t;
+ akkp1 = a[k + (k - 1) * a_dim1] / t;
+ d__ = t * (ak * akp1 - 1.);
+ a[k - 1 + (k - 1) * a_dim1] = akp1 / d__;
+ a[k + k * a_dim1] = ak / d__;
+ a[k + (k - 1) * a_dim1] = -akkp1 / d__;
+
+/* Compute columns K-1 and K of the inverse. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ dcopy_(&i__1, &a[k + 1 + k * a_dim1], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ dsymv_(uplo, &i__1, &c_b11, &a[k + 1 + (k + 1) * a_dim1], lda,
+ &work[1], &c__1, &c_b13, &a[k + 1 + k * a_dim1], &
+ c__1);
+ i__1 = *n - k;
+ a[k + k * a_dim1] -= ddot_(&i__1, &work[1], &c__1, &a[k + 1 +
+ k * a_dim1], &c__1);
+ i__1 = *n - k;
+ a[k + (k - 1) * a_dim1] -= ddot_(&i__1, &a[k + 1 + k * a_dim1]
+, &c__1, &a[k + 1 + (k - 1) * a_dim1], &c__1);
+ i__1 = *n - k;
+ dcopy_(&i__1, &a[k + 1 + (k - 1) * a_dim1], &c__1, &work[1], &
+ c__1);
+ i__1 = *n - k;
+ dsymv_(uplo, &i__1, &c_b11, &a[k + 1 + (k + 1) * a_dim1], lda,
+ &work[1], &c__1, &c_b13, &a[k + 1 + (k - 1) * a_dim1]
+, &c__1);
+ i__1 = *n - k;
+ a[k - 1 + (k - 1) * a_dim1] -= ddot_(&i__1, &work[1], &c__1, &
+ a[k + 1 + (k - 1) * a_dim1], &c__1);
+ }
+ kstep = 2;
+ }
+
+ kp = (i__1 = ipiv[k], abs(i__1));
+ if (kp != k) {
+
+/* Interchange rows and columns K and KP in the trailing */
+/* submatrix A(k-1:n,k-1:n) */
+
+ if (kp < *n) {
+ i__1 = *n - kp;
+ dswap_(&i__1, &a[kp + 1 + k * a_dim1], &c__1, &a[kp + 1 + kp *
+ a_dim1], &c__1);
+ }
+ i__1 = kp - k - 1;
+ dswap_(&i__1, &a[k + 1 + k * a_dim1], &c__1, &a[kp + (k + 1) *
+ a_dim1], lda);
+ temp = a[k + k * a_dim1];
+ a[k + k * a_dim1] = a[kp + kp * a_dim1];
+ a[kp + kp * a_dim1] = temp;
+ if (kstep == 2) {
+ temp = a[k + (k - 1) * a_dim1];
+ a[k + (k - 1) * a_dim1] = a[kp + (k - 1) * a_dim1];
+ a[kp + (k - 1) * a_dim1] = temp;
+ }
+ }
+
+ k -= kstep;
+ goto L50;
+L60:
+ ;
+ }
+
+ return 0;
+
+/* End of DSYTRI */
+
+} /* dsytri_ */
diff --git a/contrib/libs/clapack/dsytrs.c b/contrib/libs/clapack/dsytrs.c
new file mode 100644
index 0000000000..26db5a7636
--- /dev/null
+++ b/contrib/libs/clapack/dsytrs.c
@@ -0,0 +1,453 @@
+/* dsytrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b7 = -1.;
+static integer c__1 = 1;
+static doublereal c_b19 = 1.;
+
+/* Subroutine */ int dsytrs_(char *uplo, integer *n, integer *nrhs,
+ doublereal *a, integer *lda, integer *ipiv, doublereal *b, integer *
+ ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+ doublereal d__1;
+
+ /* Local variables */
+ integer j, k;
+ doublereal ak, bk;
+ integer kp;
+ doublereal akm1, bkm1;
+ extern /* Subroutine */ int dger_(integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ doublereal akm1k;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ doublereal denom;
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), dswap_(integer *,
+ doublereal *, integer *, doublereal *, integer *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DSYTRS solves a system of linear equations A*X = B with a real */
+/* symmetric matrix A using the factorization A = U*D*U**T or */
+/* A = L*D*L**T computed by DSYTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**T; */
+/* = 'L': Lower triangular, form is A = L*D*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The block diagonal matrix D and the multipliers used to */
+/* obtain the factor U or L as computed by DSYTRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by DSYTRF. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Solve A*X = B, where A = U*D*U'. */
+
+/* First solve U*D*X = B, overwriting B with X. */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+L10:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L30;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ dswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(U(K)), where U(K) is the transformation */
+/* stored in column K of A. */
+
+ i__1 = k - 1;
+ dger_(&i__1, nrhs, &c_b7, &a[k * a_dim1 + 1], &c__1, &b[k +
+ b_dim1], ldb, &b[b_dim1 + 1], ldb);
+
+/* Multiply by the inverse of the diagonal block. */
+
+ d__1 = 1. / a[k + k * a_dim1];
+ dscal_(nrhs, &d__1, &b[k + b_dim1], ldb);
+ --k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Interchange rows K-1 and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k - 1) {
+ dswap_(nrhs, &b[k - 1 + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(U(K)), where U(K) is the transformation */
+/* stored in columns K-1 and K of A. */
+
+ i__1 = k - 2;
+ dger_(&i__1, nrhs, &c_b7, &a[k * a_dim1 + 1], &c__1, &b[k +
+ b_dim1], ldb, &b[b_dim1 + 1], ldb);
+ i__1 = k - 2;
+ dger_(&i__1, nrhs, &c_b7, &a[(k - 1) * a_dim1 + 1], &c__1, &b[k -
+ 1 + b_dim1], ldb, &b[b_dim1 + 1], ldb);
+
+/* Multiply by the inverse of the diagonal block. */
+
+ akm1k = a[k - 1 + k * a_dim1];
+ akm1 = a[k - 1 + (k - 1) * a_dim1] / akm1k;
+ ak = a[k + k * a_dim1] / akm1k;
+ denom = akm1 * ak - 1.;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ bkm1 = b[k - 1 + j * b_dim1] / akm1k;
+ bk = b[k + j * b_dim1] / akm1k;
+ b[k - 1 + j * b_dim1] = (ak * bkm1 - bk) / denom;
+ b[k + j * b_dim1] = (akm1 * bk - bkm1) / denom;
+/* L20: */
+ }
+ k += -2;
+ }
+
+ goto L10;
+L30:
+
+/* Next solve U'*X = B, overwriting B with X. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+L40:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L50;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Multiply by inv(U'(K)), where U(K) is the transformation */
+/* stored in column K of A. */
+
+ i__1 = k - 1;
+ dgemv_("Transpose", &i__1, nrhs, &c_b7, &b[b_offset], ldb, &a[k *
+ a_dim1 + 1], &c__1, &c_b19, &b[k + b_dim1], ldb);
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ dswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ ++k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Multiply by inv(U'(K+1)), where U(K+1) is the transformation */
+/* stored in columns K and K+1 of A. */
+
+ i__1 = k - 1;
+ dgemv_("Transpose", &i__1, nrhs, &c_b7, &b[b_offset], ldb, &a[k *
+ a_dim1 + 1], &c__1, &c_b19, &b[k + b_dim1], ldb);
+ i__1 = k - 1;
+ dgemv_("Transpose", &i__1, nrhs, &c_b7, &b[b_offset], ldb, &a[(k
+ + 1) * a_dim1 + 1], &c__1, &c_b19, &b[k + 1 + b_dim1],
+ ldb);
+
+/* Interchange rows K and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k) {
+ dswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ k += 2;
+ }
+
+ goto L40;
+L50:
+
+ ;
+ } else {
+
+/* Solve A*X = B, where A = L*D*L'. */
+
+/* First solve L*D*X = B, overwriting B with X. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+L60:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L80;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ dswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(L(K)), where L(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ dger_(&i__1, nrhs, &c_b7, &a[k + 1 + k * a_dim1], &c__1, &b[k
+ + b_dim1], ldb, &b[k + 1 + b_dim1], ldb);
+ }
+
+/* Multiply by the inverse of the diagonal block. */
+
+ d__1 = 1. / a[k + k * a_dim1];
+ dscal_(nrhs, &d__1, &b[k + b_dim1], ldb);
+ ++k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Interchange rows K+1 and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k + 1) {
+ dswap_(nrhs, &b[k + 1 + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(L(K)), where L(K) is the transformation */
+/* stored in columns K and K+1 of A. */
+
+ if (k < *n - 1) {
+ i__1 = *n - k - 1;
+ dger_(&i__1, nrhs, &c_b7, &a[k + 2 + k * a_dim1], &c__1, &b[k
+ + b_dim1], ldb, &b[k + 2 + b_dim1], ldb);
+ i__1 = *n - k - 1;
+ dger_(&i__1, nrhs, &c_b7, &a[k + 2 + (k + 1) * a_dim1], &c__1,
+ &b[k + 1 + b_dim1], ldb, &b[k + 2 + b_dim1], ldb);
+ }
+
+/* Multiply by the inverse of the diagonal block. */
+
+ akm1k = a[k + 1 + k * a_dim1];
+ akm1 = a[k + k * a_dim1] / akm1k;
+ ak = a[k + 1 + (k + 1) * a_dim1] / akm1k;
+ denom = akm1 * ak - 1.;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ bkm1 = b[k + j * b_dim1] / akm1k;
+ bk = b[k + 1 + j * b_dim1] / akm1k;
+ b[k + j * b_dim1] = (ak * bkm1 - bk) / denom;
+ b[k + 1 + j * b_dim1] = (akm1 * bk - bkm1) / denom;
+/* L70: */
+ }
+ k += 2;
+ }
+
+ goto L60;
+L80:
+
+/* Next solve L'*X = B, overwriting B with X. */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+L90:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L100;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Multiply by inv(L'(K)), where L(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ dgemv_("Transpose", &i__1, nrhs, &c_b7, &b[k + 1 + b_dim1],
+ ldb, &a[k + 1 + k * a_dim1], &c__1, &c_b19, &b[k +
+ b_dim1], ldb);
+ }
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ dswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ --k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Multiply by inv(L'(K-1)), where L(K-1) is the transformation */
+/* stored in columns K-1 and K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ dgemv_("Transpose", &i__1, nrhs, &c_b7, &b[k + 1 + b_dim1],
+ ldb, &a[k + 1 + k * a_dim1], &c__1, &c_b19, &b[k +
+ b_dim1], ldb);
+ i__1 = *n - k;
+ dgemv_("Transpose", &i__1, nrhs, &c_b7, &b[k + 1 + b_dim1],
+ ldb, &a[k + 1 + (k - 1) * a_dim1], &c__1, &c_b19, &b[
+ k - 1 + b_dim1], ldb);
+ }
+
+/* Interchange rows K and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k) {
+ dswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ k += -2;
+ }
+
+ goto L90;
+L100:
+ ;
+ }
+
+ return 0;
+
+/* End of DSYTRS */
+
+} /* dsytrs_ */
diff --git a/contrib/libs/clapack/dtbcon.c b/contrib/libs/clapack/dtbcon.c
new file mode 100644
index 0000000000..1c4e89cd41
--- /dev/null
+++ b/contrib/libs/clapack/dtbcon.c
@@ -0,0 +1,247 @@
+/* dtbcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dtbcon_(char *norm, char *uplo, char *diag, integer *n,
+ integer *kd, doublereal *ab, integer *ldab, doublereal *rcond,
+ doublereal *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1;
+ doublereal d__1;
+
+ /* Local variables */
+ integer ix, kase, kase1;
+ doublereal scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int drscl_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal anorm;
+ logical upper;
+ doublereal xnorm;
+ extern /* Subroutine */ int dlacn2_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern doublereal dlantb_(char *, char *, char *, integer *, integer *,
+ doublereal *, integer *, doublereal *);
+ extern /* Subroutine */ int dlatbs_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ doublereal ainvnm;
+ logical onenrm;
+ char normin[1];
+ doublereal smlnum;
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call DLACN2 in place of DLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTBCON estimates the reciprocal of the condition number of a */
+/* triangular band matrix A, in either the 1-norm or the infinity-norm. */
+
+/* The norm of A is computed and an estimate is obtained for */
+/* norm(inv(A)), then the reciprocal of the condition number is */
+/* computed as */
+/* RCOND = 1 / ( norm(A) * norm(inv(A)) ). */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies whether the 1-norm condition number or the */
+/* infinity-norm condition number is required: */
+/* = '1' or 'O': 1-norm; */
+/* = 'I': Infinity-norm. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals or subdiagonals of the */
+/* triangular band matrix A. KD >= 0. */
+
+/* AB (input) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* The upper or lower triangular band matrix A, stored in the */
+/* first kd+1 rows of the array. The j-th column of A is stored */
+/* in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+/* If DIAG = 'U', the diagonal elements of A are not referenced */
+/* and are assumed to be 1. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(norm(A) * norm(inv(A))). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O");
+ nounit = lsame_(diag, "N");
+
+ if (! onenrm && ! lsame_(norm, "I")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*kd < 0) {
+ *info = -5;
+ } else if (*ldab < *kd + 1) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTBCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ }
+
+ *rcond = 0.;
+ smlnum = dlamch_("Safe minimum") * (doublereal) max(1,*n);
+
+/* Compute the norm of the triangular matrix A. */
+
+ anorm = dlantb_(norm, uplo, diag, n, kd, &ab[ab_offset], ldab, &work[1]);
+
+/* Continue only if ANORM > 0. */
+
+ if (anorm > 0.) {
+
+/* Estimate the norm of the inverse of A. */
+
+ ainvnm = 0.;
+ *(unsigned char *)normin = 'N';
+ if (onenrm) {
+ kase1 = 1;
+ } else {
+ kase1 = 2;
+ }
+ kase = 0;
+L10:
+ dlacn2_(n, &work[*n + 1], &work[1], &iwork[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (kase == kase1) {
+
+/* Multiply by inv(A). */
+
+ dlatbs_(uplo, "No transpose", diag, normin, n, kd, &ab[
+ ab_offset], ldab, &work[1], &scale, &work[(*n << 1) +
+ 1], info)
+ ;
+ } else {
+
+/* Multiply by inv(A'). */
+
+ dlatbs_(uplo, "Transpose", diag, normin, n, kd, &ab[ab_offset]
+, ldab, &work[1], &scale, &work[(*n << 1) + 1], info);
+ }
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by 1/SCALE if doing so will not cause overflow. */
+
+ if (scale != 1.) {
+ ix = idamax_(n, &work[1], &c__1);
+ xnorm = (d__1 = work[ix], abs(d__1));
+ if (scale < xnorm * smlnum || scale == 0.) {
+ goto L20;
+ }
+ drscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / anorm / ainvnm;
+ }
+ }
+
+L20:
+ return 0;
+
+/* End of DTBCON */
+
+} /* dtbcon_ */
diff --git a/contrib/libs/clapack/dtbrfs.c b/contrib/libs/clapack/dtbrfs.c
new file mode 100644
index 0000000000..e1e50c81d8
--- /dev/null
+++ b/contrib/libs/clapack/dtbrfs.c
@@ -0,0 +1,519 @@
+/* dtbrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b19 = -1.;
+
+/* Subroutine */ int dtbrfs_(char *uplo, char *trans, char *diag, integer *n,
+ integer *kd, integer *nrhs, doublereal *ab, integer *ldab, doublereal
+ *b, integer *ldb, doublereal *x, integer *ldx, doublereal *ferr,
+ doublereal *berr, doublereal *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, b_dim1, b_offset, x_dim1, x_offset, i__1,
+ i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3;
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal s, xk;
+ integer nz;
+ doublereal eps;
+ integer kase;
+ doublereal safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int dtbmv_(char *, char *, char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *), dcopy_(integer *, doublereal *, integer *
+, doublereal *, integer *), dtbsv_(char *, char *, char *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ integer *), daxpy_(integer *, doublereal *
+, doublereal *, integer *, doublereal *, integer *);
+ logical upper;
+ extern /* Subroutine */ int dlacn2_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+ char transt[1];
+ logical nounit;
+ doublereal lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call DLACN2 in place of DLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTBRFS provides error bounds and backward error estimates for the */
+/* solution to a system of linear equations with a triangular band */
+/* coefficient matrix. */
+
+/* The solution matrix X must be computed by DTBTRS or some other */
+/* means before entering this routine. DTBRFS does not do iterative */
+/* refinement because doing so cannot improve the backward error. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose = Transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals or subdiagonals of the */
+/* triangular band matrix A. KD >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AB (input) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* The upper or lower triangular band matrix A, stored in the */
+/* first kd+1 rows of the array. The j-th column of A is stored */
+/* in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+/* If DIAG = 'U', the diagonal elements of A are not referenced */
+/* and are assumed to be 1. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input) DOUBLE PRECISION array, dimension (LDX,NRHS) */
+/* The solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*kd < 0) {
+ *info = -5;
+ } else if (*nrhs < 0) {
+ *info = -6;
+ } else if (*ldab < *kd + 1) {
+ *info = -8;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldx < max(1,*n)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTBRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *kd + 2;
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute residual R = B - op(A) * X, */
+/* where op(A) = A or A', depending on TRANS. */
+
+ dcopy_(n, &x[j * x_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+ dtbmv_(uplo, trans, diag, n, kd, &ab[ab_offset], ldab, &work[*n + 1],
+ &c__1);
+ daxpy_(n, &c_b19, &b[j * b_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] = (d__1 = b[i__ + j * b_dim1], abs(d__1));
+/* L20: */
+ }
+
+ if (notran) {
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (d__1 = x[k + j * x_dim1], abs(d__1));
+/* Computing MAX */
+ i__3 = 1, i__4 = k - *kd;
+ i__5 = k;
+ for (i__ = max(i__3,i__4); i__ <= i__5; ++i__) {
+ work[i__] += (d__1 = ab[*kd + 1 + i__ - k + k *
+ ab_dim1], abs(d__1)) * xk;
+/* L30: */
+ }
+/* L40: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (d__1 = x[k + j * x_dim1], abs(d__1));
+/* Computing MAX */
+ i__5 = 1, i__3 = k - *kd;
+ i__4 = k - 1;
+ for (i__ = max(i__5,i__3); i__ <= i__4; ++i__) {
+ work[i__] += (d__1 = ab[*kd + 1 + i__ - k + k *
+ ab_dim1], abs(d__1)) * xk;
+/* L50: */
+ }
+ work[k] += xk;
+/* L60: */
+ }
+ }
+ } else {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (d__1 = x[k + j * x_dim1], abs(d__1));
+/* Computing MIN */
+ i__5 = *n, i__3 = k + *kd;
+ i__4 = min(i__5,i__3);
+ for (i__ = k; i__ <= i__4; ++i__) {
+ work[i__] += (d__1 = ab[i__ + 1 - k + k * ab_dim1]
+ , abs(d__1)) * xk;
+/* L70: */
+ }
+/* L80: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (d__1 = x[k + j * x_dim1], abs(d__1));
+/* Computing MIN */
+ i__5 = *n, i__3 = k + *kd;
+ i__4 = min(i__5,i__3);
+ for (i__ = k + 1; i__ <= i__4; ++i__) {
+ work[i__] += (d__1 = ab[i__ + 1 - k + k * ab_dim1]
+ , abs(d__1)) * xk;
+/* L90: */
+ }
+ work[k] += xk;
+/* L100: */
+ }
+ }
+ }
+ } else {
+
+/* Compute abs(A')*abs(X) + abs(B). */
+
+ if (upper) {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+/* Computing MAX */
+ i__4 = 1, i__5 = k - *kd;
+ i__3 = k;
+ for (i__ = max(i__4,i__5); i__ <= i__3; ++i__) {
+ s += (d__1 = ab[*kd + 1 + i__ - k + k * ab_dim1],
+ abs(d__1)) * (d__2 = x[i__ + j * x_dim1],
+ abs(d__2));
+/* L110: */
+ }
+ work[k] += s;
+/* L120: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = (d__1 = x[k + j * x_dim1], abs(d__1));
+/* Computing MAX */
+ i__3 = 1, i__4 = k - *kd;
+ i__5 = k - 1;
+ for (i__ = max(i__3,i__4); i__ <= i__5; ++i__) {
+ s += (d__1 = ab[*kd + 1 + i__ - k + k * ab_dim1],
+ abs(d__1)) * (d__2 = x[i__ + j * x_dim1],
+ abs(d__2));
+/* L130: */
+ }
+ work[k] += s;
+/* L140: */
+ }
+ }
+ } else {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+/* Computing MIN */
+ i__3 = *n, i__4 = k + *kd;
+ i__5 = min(i__3,i__4);
+ for (i__ = k; i__ <= i__5; ++i__) {
+ s += (d__1 = ab[i__ + 1 - k + k * ab_dim1], abs(
+ d__1)) * (d__2 = x[i__ + j * x_dim1], abs(
+ d__2));
+/* L150: */
+ }
+ work[k] += s;
+/* L160: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = (d__1 = x[k + j * x_dim1], abs(d__1));
+/* Computing MIN */
+ i__3 = *n, i__4 = k + *kd;
+ i__5 = min(i__3,i__4);
+ for (i__ = k + 1; i__ <= i__5; ++i__) {
+ s += (d__1 = ab[i__ + 1 - k + k * ab_dim1], abs(
+ d__1)) * (d__2 = x[i__ + j * x_dim1], abs(
+ d__2));
+/* L170: */
+ }
+ work[k] += s;
+/* L180: */
+ }
+ }
+ }
+ }
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+/* Computing MAX */
+ d__2 = s, d__3 = (d__1 = work[*n + i__], abs(d__1)) / work[
+ i__];
+ s = max(d__2,d__3);
+ } else {
+/* Computing MAX */
+ d__2 = s, d__3 = ((d__1 = work[*n + i__], abs(d__1)) + safe1)
+ / (work[i__] + safe1);
+ s = max(d__2,d__3);
+ }
+/* L190: */
+ }
+ berr[j] = s;
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(op(A)))* */
+/* ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(op(A)) is the inverse of op(A) */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(op(A))*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use DLACN2 to estimate the infinity-norm of the matrix */
+/* inv(op(A)) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+ work[i__] = (d__1 = work[*n + i__], abs(d__1)) + nz * eps *
+ work[i__];
+ } else {
+ work[i__] = (d__1 = work[*n + i__], abs(d__1)) + nz * eps *
+ work[i__] + safe1;
+ }
+/* L200: */
+ }
+
+ kase = 0;
+L210:
+ dlacn2_(n, &work[(*n << 1) + 1], &work[*n + 1], &iwork[1], &ferr[j], &
+ kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(op(A)'). */
+
+ dtbsv_(uplo, transt, diag, n, kd, &ab[ab_offset], ldab, &work[
+ *n + 1], &c__1);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L220: */
+ }
+ } else {
+
+/* Multiply by inv(op(A))*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L230: */
+ }
+ dtbsv_(uplo, trans, diag, n, kd, &ab[ab_offset], ldab, &work[*
+ n + 1], &c__1);
+ }
+ goto L210;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = lstres, d__3 = (d__1 = x[i__ + j * x_dim1], abs(d__1));
+ lstres = max(d__2,d__3);
+/* L240: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L250: */
+ }
+
+ return 0;
+
+/* End of DTBRFS */
+
+} /* dtbrfs_ */
diff --git a/contrib/libs/clapack/dtbtrs.c b/contrib/libs/clapack/dtbtrs.c
new file mode 100644
index 0000000000..aee820c8a9
--- /dev/null
+++ b/contrib/libs/clapack/dtbtrs.c
@@ -0,0 +1,204 @@
+/* dtbtrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dtbtrs_(char *uplo, char *trans, char *diag, integer *n,
+ integer *kd, integer *nrhs, doublereal *ab, integer *ldab, doublereal
+ *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ integer j;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dtbsv_(char *, char *, char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTBTRS solves a triangular system of the form */
+
+/* A * X = B or A**T * X = B, */
+
+/* where A is a triangular band matrix of order N, and B is an */
+/* N-by NRHS matrix. A check is made to verify that A is nonsingular. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose = Transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals or subdiagonals of the */
+/* triangular band matrix A. KD >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AB (input) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* The upper or lower triangular band matrix A, stored in the */
+/* first kd+1 rows of AB. The j-th column of A is stored */
+/* in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+/* If DIAG = 'U', the diagonal elements of A are not referenced */
+/* and are assumed to be 1. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, if INFO = 0, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element of A is zero, */
+/* indicating that the matrix is singular and the */
+/* solutions X have not been computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ nounit = lsame_(diag, "N");
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*kd < 0) {
+ *info = -5;
+ } else if (*nrhs < 0) {
+ *info = -6;
+ } else if (*ldab < *kd + 1) {
+ *info = -8;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTBTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check for singularity. */
+
+ if (nounit) {
+ if (upper) {
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ if (ab[*kd + 1 + *info * ab_dim1] == 0.) {
+ return 0;
+ }
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ if (ab[*info * ab_dim1 + 1] == 0.) {
+ return 0;
+ }
+/* L20: */
+ }
+ }
+ }
+ *info = 0;
+
+/* Solve A * X = B or A' * X = B. */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ dtbsv_(uplo, trans, diag, n, kd, &ab[ab_offset], ldab, &b[j * b_dim1
+ + 1], &c__1);
+/* L30: */
+ }
+
+ return 0;
+
+/* End of DTBTRS */
+
+} /* dtbtrs_ */
diff --git a/contrib/libs/clapack/dtfsm.c b/contrib/libs/clapack/dtfsm.c
new file mode 100644
index 0000000000..0b28fc5a56
--- /dev/null
+++ b/contrib/libs/clapack/dtfsm.c
@@ -0,0 +1,976 @@
+/* dtfsm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b23 = -1.;
+static doublereal c_b27 = 1.;
+
+/* Subroutine */ int dtfsm_(char *transr, char *side, char *uplo, char *trans,
+ char *diag, integer *m, integer *n, doublereal *alpha, doublereal *a,
+ doublereal *b, integer *ldb)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, k, m1, m2, n1, n2, info;
+ logical normaltransr;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ logical lside;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), xerbla_(
+ char *, integer *);
+ logical misodd, nisodd, notrans;
+
+
+/* -- LAPACK routine (version 3.2.1) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- April 2009 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Level 3 BLAS like routine for A in RFP Format. */
+
+/* DTFSM solves the matrix equation */
+
+/* op( A )*X = alpha*B or X*op( A ) = alpha*B */
+
+/* where alpha is a scalar, X and B are m by n matrices, A is a unit, or */
+/* non-unit, upper or lower triangular matrix and op( A ) is one of */
+
+/* op( A ) = A or op( A ) = A'. */
+
+/* A is in Rectangular Full Packed (RFP) Format. */
+
+/* The matrix X is overwritten on B. */
+
+/* Arguments */
+/* ========== */
+
+/* TRANSR - (input) CHARACTER */
+/* = 'N': The Normal Form of RFP A is stored; */
+/* = 'T': The Transpose Form of RFP A is stored. */
+
+/* SIDE - (input) CHARACTER */
+/* On entry, SIDE specifies whether op( A ) appears on the left */
+/* or right of X as follows: */
+
+/* SIDE = 'L' or 'l' op( A )*X = alpha*B. */
+
+/* SIDE = 'R' or 'r' X*op( A ) = alpha*B. */
+
+/* Unchanged on exit. */
+
+/* UPLO - (input) CHARACTER */
+/* On entry, UPLO specifies whether the RFP matrix A came from */
+/* an upper or lower triangular matrix as follows: */
+/* UPLO = 'U' or 'u' RFP A came from an upper triangular matrix */
+/* UPLO = 'L' or 'l' RFP A came from a lower triangular matrix */
+
+/* Unchanged on exit. */
+
+/* TRANS - (input) CHARACTER */
+/* On entry, TRANS specifies the form of op( A ) to be used */
+/* in the matrix multiplication as follows: */
+
+/* TRANS = 'N' or 'n' op( A ) = A. */
+
+/* TRANS = 'T' or 't' op( A ) = A'. */
+
+/* Unchanged on exit. */
+
+/* DIAG - (input) CHARACTER */
+/* On entry, DIAG specifies whether or not RFP A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* M - (input) INTEGER. */
+/* On entry, M specifies the number of rows of B. M must be at */
+/* least zero. */
+/* Unchanged on exit. */
+
+/* N - (input) INTEGER. */
+/* On entry, N specifies the number of columns of B. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - (input) DOUBLE PRECISION. */
+/* On entry, ALPHA specifies the scalar alpha. When alpha is */
+/* zero then A is not referenced and B need not be set before */
+/* entry. */
+/* Unchanged on exit. */
+
+/* A - (input) DOUBLE PRECISION array, dimension (NT); */
+/* NT = N*(N+1)/2. On entry, the matrix A in RFP Format. */
+/* RFP Format is described by TRANSR, UPLO and N as follows: */
+/* If TRANSR='N' then RFP A is (0:N,0:K-1) when N is even; */
+/* K=N/2. RFP A is (0:N-1,0:K) when N is odd; K=N/2. If */
+/* TRANSR = 'T' then RFP is the transpose of RFP A as */
+/* defined when TRANSR = 'N'. The contents of RFP A are defined */
+/* by UPLO as follows: If UPLO = 'U' the RFP A contains the NT */
+/* elements of upper packed A either in normal or */
+/* transpose Format. If UPLO = 'L' the RFP A contains */
+/* the NT elements of lower packed A either in normal or */
+/* transpose Format. The LDA of RFP A is (N+1)/2 when */
+/* TRANSR = 'T'. When TRANSR is 'N' the LDA is N+1 when N is */
+/* even and is N when is odd. */
+/* See the Note below for more details. Unchanged on exit. */
+
+/* B - (input/ouptut) DOUBLE PRECISION array, DIMENSION (LDB,N) */
+/* Before entry, the leading m by n part of the array B must */
+/* contain the right-hand side matrix B, and on exit is */
+/* overwritten by the solution matrix X. */
+
+/* LDB - (input) INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. LDB must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+/* Further Details */
+/* =============== */
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* even. We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* the transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* the transpose of the last three columns of AP lower. */
+/* This covers the case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 03 04 05 33 43 53 */
+/* 13 14 15 00 44 54 */
+/* 23 24 25 10 11 55 */
+/* 33 34 35 20 21 22 */
+/* 00 44 45 30 31 32 */
+/* 01 11 55 40 41 42 */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* odd. We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* the transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* the transpose of the last two columns of AP lower. */
+/* This covers the case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 02 03 04 00 33 43 */
+/* 12 13 14 10 11 44 */
+/* 22 23 24 20 21 22 */
+/* 00 33 34 30 31 32 */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+/* RFP A RFP A */
+
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* Reference */
+/* ========= */
+
+/* ===================================================================== */
+
+/* .. */
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb - 1 - 0 + 1;
+ b_offset = 0 + b_dim1 * 0;
+ b -= b_offset;
+
+ /* Function Body */
+ info = 0;
+ normaltransr = lsame_(transr, "N");
+ lside = lsame_(side, "L");
+ lower = lsame_(uplo, "L");
+ notrans = lsame_(trans, "N");
+ if (! normaltransr && ! lsame_(transr, "T")) {
+ info = -1;
+ } else if (! lside && ! lsame_(side, "R")) {
+ info = -2;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ info = -3;
+ } else if (! notrans && ! lsame_(trans, "T")) {
+ info = -4;
+ } else if (! lsame_(diag, "N") && ! lsame_(diag,
+ "U")) {
+ info = -5;
+ } else if (*m < 0) {
+ info = -6;
+ } else if (*n < 0) {
+ info = -7;
+ } else if (*ldb < max(1,*m)) {
+ info = -11;
+ }
+ if (info != 0) {
+ i__1 = -info;
+ xerbla_("DTFSM ", &i__1);
+ return 0;
+ }
+
+/* Quick return when ( (N.EQ.0).OR.(M.EQ.0) ) */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Quick return when ALPHA.EQ.(0D+0) */
+
+ if (*alpha == 0.) {
+ i__1 = *n - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *m - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+ if (lside) {
+
+/* SIDE = 'L' */
+
+/* A is M-by-M. */
+/* If M is odd, set NISODD = .TRUE., and M1 and M2. */
+/* If M is even, NISODD = .FALSE., and M. */
+
+ if (*m % 2 == 0) {
+ misodd = FALSE_;
+ k = *m / 2;
+ } else {
+ misodd = TRUE_;
+ if (lower) {
+ m2 = *m / 2;
+ m1 = *m - m2;
+ } else {
+ m1 = *m / 2;
+ m2 = *m - m1;
+ }
+ }
+
+
+ if (misodd) {
+
+/* SIDE = 'L' and N is odd */
+
+ if (normaltransr) {
+
+/* SIDE = 'L', N is odd, and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SIDE ='L', N is odd, TRANSR = 'N', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='L', N is odd, TRANSR = 'N', UPLO = 'L', and */
+/* TRANS = 'N' */
+
+ if (*m == 1) {
+ dtrsm_("L", "L", "N", diag, &m1, n, alpha, a, m, &
+ b[b_offset], ldb);
+ } else {
+ dtrsm_("L", "L", "N", diag, &m1, n, alpha, a, m, &
+ b[b_offset], ldb);
+ dgemm_("N", "N", &m2, n, &m1, &c_b23, &a[m1], m, &
+ b[b_offset], ldb, alpha, &b[m1], ldb);
+ dtrsm_("L", "U", "T", diag, &m2, n, &c_b27, &a[*m]
+, m, &b[m1], ldb);
+ }
+
+ } else {
+
+/* SIDE ='L', N is odd, TRANSR = 'N', UPLO = 'L', and */
+/* TRANS = 'T' */
+
+ if (*m == 1) {
+ dtrsm_("L", "L", "T", diag, &m1, n, alpha, a, m, &
+ b[b_offset], ldb);
+ } else {
+ dtrsm_("L", "U", "N", diag, &m2, n, alpha, &a[*m],
+ m, &b[m1], ldb);
+ dgemm_("T", "N", &m1, n, &m2, &c_b23, &a[m1], m, &
+ b[m1], ldb, alpha, &b[b_offset], ldb);
+ dtrsm_("L", "L", "T", diag, &m1, n, &c_b27, a, m,
+ &b[b_offset], ldb);
+ }
+
+ }
+
+ } else {
+
+/* SIDE ='L', N is odd, TRANSR = 'N', and UPLO = 'U' */
+
+ if (! notrans) {
+
+/* SIDE ='L', N is odd, TRANSR = 'N', UPLO = 'U', and */
+/* TRANS = 'N' */
+
+ dtrsm_("L", "L", "N", diag, &m1, n, alpha, &a[m2], m,
+ &b[b_offset], ldb);
+ dgemm_("T", "N", &m2, n, &m1, &c_b23, a, m, &b[
+ b_offset], ldb, alpha, &b[m1], ldb);
+ dtrsm_("L", "U", "T", diag, &m2, n, &c_b27, &a[m1], m,
+ &b[m1], ldb);
+
+ } else {
+
+/* SIDE ='L', N is odd, TRANSR = 'N', UPLO = 'U', and */
+/* TRANS = 'T' */
+
+ dtrsm_("L", "U", "N", diag, &m2, n, alpha, &a[m1], m,
+ &b[m1], ldb);
+ dgemm_("N", "N", &m1, n, &m2, &c_b23, a, m, &b[m1],
+ ldb, alpha, &b[b_offset], ldb);
+ dtrsm_("L", "L", "T", diag, &m1, n, &c_b27, &a[m2], m,
+ &b[b_offset], ldb);
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'L', N is odd, and TRANSR = 'T' */
+
+ if (lower) {
+
+/* SIDE ='L', N is odd, TRANSR = 'T', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='L', N is odd, TRANSR = 'T', UPLO = 'L', and */
+/* TRANS = 'N' */
+
+ if (*m == 1) {
+ dtrsm_("L", "U", "T", diag, &m1, n, alpha, a, &m1,
+ &b[b_offset], ldb);
+ } else {
+ dtrsm_("L", "U", "T", diag, &m1, n, alpha, a, &m1,
+ &b[b_offset], ldb);
+ dgemm_("T", "N", &m2, n, &m1, &c_b23, &a[m1 * m1],
+ &m1, &b[b_offset], ldb, alpha, &b[m1],
+ ldb);
+ dtrsm_("L", "L", "N", diag, &m2, n, &c_b27, &a[1],
+ &m1, &b[m1], ldb);
+ }
+
+ } else {
+
+/* SIDE ='L', N is odd, TRANSR = 'T', UPLO = 'L', and */
+/* TRANS = 'T' */
+
+ if (*m == 1) {
+ dtrsm_("L", "U", "N", diag, &m1, n, alpha, a, &m1,
+ &b[b_offset], ldb);
+ } else {
+ dtrsm_("L", "L", "T", diag, &m2, n, alpha, &a[1],
+ &m1, &b[m1], ldb);
+ dgemm_("N", "N", &m1, n, &m2, &c_b23, &a[m1 * m1],
+ &m1, &b[m1], ldb, alpha, &b[b_offset],
+ ldb);
+ dtrsm_("L", "U", "N", diag, &m1, n, &c_b27, a, &
+ m1, &b[b_offset], ldb);
+ }
+
+ }
+
+ } else {
+
+/* SIDE ='L', N is odd, TRANSR = 'T', and UPLO = 'U' */
+
+ if (! notrans) {
+
+/* SIDE ='L', N is odd, TRANSR = 'T', UPLO = 'U', and */
+/* TRANS = 'N' */
+
+ dtrsm_("L", "U", "T", diag, &m1, n, alpha, &a[m2 * m2]
+, &m2, &b[b_offset], ldb);
+ dgemm_("N", "N", &m2, n, &m1, &c_b23, a, &m2, &b[
+ b_offset], ldb, alpha, &b[m1], ldb);
+ dtrsm_("L", "L", "N", diag, &m2, n, &c_b27, &a[m1 *
+ m2], &m2, &b[m1], ldb);
+
+ } else {
+
+/* SIDE ='L', N is odd, TRANSR = 'T', UPLO = 'U', and */
+/* TRANS = 'T' */
+
+ dtrsm_("L", "L", "T", diag, &m2, n, alpha, &a[m1 * m2]
+, &m2, &b[m1], ldb);
+ dgemm_("T", "N", &m1, n, &m2, &c_b23, a, &m2, &b[m1],
+ ldb, alpha, &b[b_offset], ldb);
+ dtrsm_("L", "U", "N", diag, &m1, n, &c_b27, &a[m2 *
+ m2], &m2, &b[b_offset], ldb);
+
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'L' and N is even */
+
+ if (normaltransr) {
+
+/* SIDE = 'L', N is even, and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SIDE ='L', N is even, TRANSR = 'N', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='L', N is even, TRANSR = 'N', UPLO = 'L', */
+/* and TRANS = 'N' */
+
+ i__1 = *m + 1;
+ dtrsm_("L", "L", "N", diag, &k, n, alpha, &a[1], &
+ i__1, &b[b_offset], ldb);
+ i__1 = *m + 1;
+ dgemm_("N", "N", &k, n, &k, &c_b23, &a[k + 1], &i__1,
+ &b[b_offset], ldb, alpha, &b[k], ldb);
+ i__1 = *m + 1;
+ dtrsm_("L", "U", "T", diag, &k, n, &c_b27, a, &i__1, &
+ b[k], ldb);
+
+ } else {
+
+/* SIDE ='L', N is even, TRANSR = 'N', UPLO = 'L', */
+/* and TRANS = 'T' */
+
+ i__1 = *m + 1;
+ dtrsm_("L", "U", "N", diag, &k, n, alpha, a, &i__1, &
+ b[k], ldb);
+ i__1 = *m + 1;
+ dgemm_("T", "N", &k, n, &k, &c_b23, &a[k + 1], &i__1,
+ &b[k], ldb, alpha, &b[b_offset], ldb);
+ i__1 = *m + 1;
+ dtrsm_("L", "L", "T", diag, &k, n, &c_b27, &a[1], &
+ i__1, &b[b_offset], ldb);
+
+ }
+
+ } else {
+
+/* SIDE ='L', N is even, TRANSR = 'N', and UPLO = 'U' */
+
+ if (! notrans) {
+
+/* SIDE ='L', N is even, TRANSR = 'N', UPLO = 'U', */
+/* and TRANS = 'N' */
+
+ i__1 = *m + 1;
+ dtrsm_("L", "L", "N", diag, &k, n, alpha, &a[k + 1], &
+ i__1, &b[b_offset], ldb);
+ i__1 = *m + 1;
+ dgemm_("T", "N", &k, n, &k, &c_b23, a, &i__1, &b[
+ b_offset], ldb, alpha, &b[k], ldb);
+ i__1 = *m + 1;
+ dtrsm_("L", "U", "T", diag, &k, n, &c_b27, &a[k], &
+ i__1, &b[k], ldb);
+
+ } else {
+
+/* SIDE ='L', N is even, TRANSR = 'N', UPLO = 'U', */
+/* and TRANS = 'T' */
+ i__1 = *m + 1;
+ dtrsm_("L", "U", "N", diag, &k, n, alpha, &a[k], &
+ i__1, &b[k], ldb);
+ i__1 = *m + 1;
+ dgemm_("N", "N", &k, n, &k, &c_b23, a, &i__1, &b[k],
+ ldb, alpha, &b[b_offset], ldb);
+ i__1 = *m + 1;
+ dtrsm_("L", "L", "T", diag, &k, n, &c_b27, &a[k + 1],
+ &i__1, &b[b_offset], ldb);
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'L', N is even, and TRANSR = 'T' */
+
+ if (lower) {
+
+/* SIDE ='L', N is even, TRANSR = 'T', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='L', N is even, TRANSR = 'T', UPLO = 'L', */
+/* and TRANS = 'N' */
+
+ dtrsm_("L", "U", "T", diag, &k, n, alpha, &a[k], &k, &
+ b[b_offset], ldb);
+ dgemm_("T", "N", &k, n, &k, &c_b23, &a[k * (k + 1)], &
+ k, &b[b_offset], ldb, alpha, &b[k], ldb);
+ dtrsm_("L", "L", "N", diag, &k, n, &c_b27, a, &k, &b[
+ k], ldb);
+
+ } else {
+
+/* SIDE ='L', N is even, TRANSR = 'T', UPLO = 'L', */
+/* and TRANS = 'T' */
+
+ dtrsm_("L", "L", "T", diag, &k, n, alpha, a, &k, &b[k]
+, ldb);
+ dgemm_("N", "N", &k, n, &k, &c_b23, &a[k * (k + 1)], &
+ k, &b[k], ldb, alpha, &b[b_offset], ldb);
+ dtrsm_("L", "U", "N", diag, &k, n, &c_b27, &a[k], &k,
+ &b[b_offset], ldb);
+
+ }
+
+ } else {
+
+/* SIDE ='L', N is even, TRANSR = 'T', and UPLO = 'U' */
+
+ if (! notrans) {
+
+/* SIDE ='L', N is even, TRANSR = 'T', UPLO = 'U', */
+/* and TRANS = 'N' */
+
+ dtrsm_("L", "U", "T", diag, &k, n, alpha, &a[k * (k +
+ 1)], &k, &b[b_offset], ldb);
+ dgemm_("N", "N", &k, n, &k, &c_b23, a, &k, &b[
+ b_offset], ldb, alpha, &b[k], ldb);
+ dtrsm_("L", "L", "N", diag, &k, n, &c_b27, &a[k * k],
+ &k, &b[k], ldb);
+
+ } else {
+
+/* SIDE ='L', N is even, TRANSR = 'T', UPLO = 'U', */
+/* and TRANS = 'T' */
+
+ dtrsm_("L", "L", "T", diag, &k, n, alpha, &a[k * k], &
+ k, &b[k], ldb);
+ dgemm_("T", "N", &k, n, &k, &c_b23, a, &k, &b[k], ldb,
+ alpha, &b[b_offset], ldb);
+ dtrsm_("L", "U", "N", diag, &k, n, &c_b27, &a[k * (k
+ + 1)], &k, &b[b_offset], ldb);
+
+ }
+
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'R' */
+
+/* A is N-by-N. */
+/* If N is odd, set NISODD = .TRUE., and N1 and N2. */
+/* If N is even, NISODD = .FALSE., and K. */
+
+ if (*n % 2 == 0) {
+ nisodd = FALSE_;
+ k = *n / 2;
+ } else {
+ nisodd = TRUE_;
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+ }
+
+ if (nisodd) {
+
+/* SIDE = 'R' and N is odd */
+
+ if (normaltransr) {
+
+/* SIDE = 'R', N is odd, and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SIDE ='R', N is odd, TRANSR = 'N', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is odd, TRANSR = 'N', UPLO = 'L', and */
+/* TRANS = 'N' */
+
+ dtrsm_("R", "U", "T", diag, m, &n2, alpha, &a[*n], n,
+ &b[n1 * b_dim1], ldb);
+ dgemm_("N", "N", m, &n1, &n2, &c_b23, &b[n1 * b_dim1],
+ ldb, &a[n1], n, alpha, b, ldb);
+ dtrsm_("R", "L", "N", diag, m, &n1, &c_b27, a, n, b,
+ ldb);
+
+ } else {
+
+/* SIDE ='R', N is odd, TRANSR = 'N', UPLO = 'L', and */
+/* TRANS = 'T' */
+
+ dtrsm_("R", "L", "T", diag, m, &n1, alpha, a, n, b,
+ ldb);
+ dgemm_("N", "T", m, &n2, &n1, &c_b23, b, ldb, &a[n1],
+ n, alpha, &b[n1 * b_dim1], ldb);
+ dtrsm_("R", "U", "N", diag, m, &n2, &c_b27, &a[*n], n,
+ &b[n1 * b_dim1], ldb);
+
+ }
+
+ } else {
+
+/* SIDE ='R', N is odd, TRANSR = 'N', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is odd, TRANSR = 'N', UPLO = 'U', and */
+/* TRANS = 'N' */
+
+ dtrsm_("R", "L", "T", diag, m, &n1, alpha, &a[n2], n,
+ b, ldb);
+ dgemm_("N", "N", m, &n2, &n1, &c_b23, b, ldb, a, n,
+ alpha, &b[n1 * b_dim1], ldb);
+ dtrsm_("R", "U", "N", diag, m, &n2, &c_b27, &a[n1], n,
+ &b[n1 * b_dim1], ldb);
+
+ } else {
+
+/* SIDE ='R', N is odd, TRANSR = 'N', UPLO = 'U', and */
+/* TRANS = 'T' */
+
+ dtrsm_("R", "U", "T", diag, m, &n2, alpha, &a[n1], n,
+ &b[n1 * b_dim1], ldb);
+ dgemm_("N", "T", m, &n1, &n2, &c_b23, &b[n1 * b_dim1],
+ ldb, a, n, alpha, b, ldb);
+ dtrsm_("R", "L", "N", diag, m, &n1, &c_b27, &a[n2], n,
+ b, ldb);
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'R', N is odd, and TRANSR = 'T' */
+
+ if (lower) {
+
+/* SIDE ='R', N is odd, TRANSR = 'T', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is odd, TRANSR = 'T', UPLO = 'L', and */
+/* TRANS = 'N' */
+
+ dtrsm_("R", "L", "N", diag, m, &n2, alpha, &a[1], &n1,
+ &b[n1 * b_dim1], ldb);
+ dgemm_("N", "T", m, &n1, &n2, &c_b23, &b[n1 * b_dim1],
+ ldb, &a[n1 * n1], &n1, alpha, b, ldb);
+ dtrsm_("R", "U", "T", diag, m, &n1, &c_b27, a, &n1, b,
+ ldb);
+
+ } else {
+
+/* SIDE ='R', N is odd, TRANSR = 'T', UPLO = 'L', and */
+/* TRANS = 'T' */
+
+ dtrsm_("R", "U", "N", diag, m, &n1, alpha, a, &n1, b,
+ ldb);
+ dgemm_("N", "N", m, &n2, &n1, &c_b23, b, ldb, &a[n1 *
+ n1], &n1, alpha, &b[n1 * b_dim1], ldb);
+ dtrsm_("R", "L", "T", diag, m, &n2, &c_b27, &a[1], &
+ n1, &b[n1 * b_dim1], ldb);
+
+ }
+
+ } else {
+
+/* SIDE ='R', N is odd, TRANSR = 'T', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is odd, TRANSR = 'T', UPLO = 'U', and */
+/* TRANS = 'N' */
+
+ dtrsm_("R", "U", "N", diag, m, &n1, alpha, &a[n2 * n2]
+, &n2, b, ldb);
+ dgemm_("N", "T", m, &n2, &n1, &c_b23, b, ldb, a, &n2,
+ alpha, &b[n1 * b_dim1], ldb);
+ dtrsm_("R", "L", "T", diag, m, &n2, &c_b27, &a[n1 *
+ n2], &n2, &b[n1 * b_dim1], ldb);
+
+ } else {
+
+/* SIDE ='R', N is odd, TRANSR = 'T', UPLO = 'U', and */
+/* TRANS = 'T' */
+
+ dtrsm_("R", "L", "N", diag, m, &n2, alpha, &a[n1 * n2]
+, &n2, &b[n1 * b_dim1], ldb);
+ dgemm_("N", "N", m, &n1, &n2, &c_b23, &b[n1 * b_dim1],
+ ldb, a, &n2, alpha, b, ldb);
+ dtrsm_("R", "U", "T", diag, m, &n1, &c_b27, &a[n2 *
+ n2], &n2, b, ldb);
+
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'R' and N is even */
+
+ if (normaltransr) {
+
+/* SIDE = 'R', N is even, and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SIDE ='R', N is even, TRANSR = 'N', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is even, TRANSR = 'N', UPLO = 'L', */
+/* and TRANS = 'N' */
+
+ i__1 = *n + 1;
+ dtrsm_("R", "U", "T", diag, m, &k, alpha, a, &i__1, &
+ b[k * b_dim1], ldb);
+ i__1 = *n + 1;
+ dgemm_("N", "N", m, &k, &k, &c_b23, &b[k * b_dim1],
+ ldb, &a[k + 1], &i__1, alpha, b, ldb);
+ i__1 = *n + 1;
+ dtrsm_("R", "L", "N", diag, m, &k, &c_b27, &a[1], &
+ i__1, b, ldb);
+
+ } else {
+
+/* SIDE ='R', N is even, TRANSR = 'N', UPLO = 'L', */
+/* and TRANS = 'T' */
+
+ i__1 = *n + 1;
+ dtrsm_("R", "L", "T", diag, m, &k, alpha, &a[1], &
+ i__1, b, ldb);
+ i__1 = *n + 1;
+ dgemm_("N", "T", m, &k, &k, &c_b23, b, ldb, &a[k + 1],
+ &i__1, alpha, &b[k * b_dim1], ldb);
+ i__1 = *n + 1;
+ dtrsm_("R", "U", "N", diag, m, &k, &c_b27, a, &i__1, &
+ b[k * b_dim1], ldb);
+
+ }
+
+ } else {
+
+/* SIDE ='R', N is even, TRANSR = 'N', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is even, TRANSR = 'N', UPLO = 'U', */
+/* and TRANS = 'N' */
+
+ i__1 = *n + 1;
+ dtrsm_("R", "L", "T", diag, m, &k, alpha, &a[k + 1], &
+ i__1, b, ldb);
+ i__1 = *n + 1;
+ dgemm_("N", "N", m, &k, &k, &c_b23, b, ldb, a, &i__1,
+ alpha, &b[k * b_dim1], ldb);
+ i__1 = *n + 1;
+ dtrsm_("R", "U", "N", diag, m, &k, &c_b27, &a[k], &
+ i__1, &b[k * b_dim1], ldb);
+
+ } else {
+
+/* SIDE ='R', N is even, TRANSR = 'N', UPLO = 'U', */
+/* and TRANS = 'T' */
+
+ i__1 = *n + 1;
+ dtrsm_("R", "U", "T", diag, m, &k, alpha, &a[k], &
+ i__1, &b[k * b_dim1], ldb);
+ i__1 = *n + 1;
+ dgemm_("N", "T", m, &k, &k, &c_b23, &b[k * b_dim1],
+ ldb, a, &i__1, alpha, b, ldb);
+ i__1 = *n + 1;
+ dtrsm_("R", "L", "N", diag, m, &k, &c_b27, &a[k + 1],
+ &i__1, b, ldb);
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'R', N is even, and TRANSR = 'T' */
+
+ if (lower) {
+
+/* SIDE ='R', N is even, TRANSR = 'T', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is even, TRANSR = 'T', UPLO = 'L', */
+/* and TRANS = 'N' */
+
+ dtrsm_("R", "L", "N", diag, m, &k, alpha, a, &k, &b[k
+ * b_dim1], ldb);
+ dgemm_("N", "T", m, &k, &k, &c_b23, &b[k * b_dim1],
+ ldb, &a[(k + 1) * k], &k, alpha, b, ldb);
+ dtrsm_("R", "U", "T", diag, m, &k, &c_b27, &a[k], &k,
+ b, ldb);
+
+ } else {
+
+/* SIDE ='R', N is even, TRANSR = 'T', UPLO = 'L', */
+/* and TRANS = 'T' */
+
+ dtrsm_("R", "U", "N", diag, m, &k, alpha, &a[k], &k,
+ b, ldb);
+ dgemm_("N", "N", m, &k, &k, &c_b23, b, ldb, &a[(k + 1)
+ * k], &k, alpha, &b[k * b_dim1], ldb);
+ dtrsm_("R", "L", "T", diag, m, &k, &c_b27, a, &k, &b[
+ k * b_dim1], ldb);
+
+ }
+
+ } else {
+
+/* SIDE ='R', N is even, TRANSR = 'T', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is even, TRANSR = 'T', UPLO = 'U', */
+/* and TRANS = 'N' */
+
+ dtrsm_("R", "U", "N", diag, m, &k, alpha, &a[(k + 1) *
+ k], &k, b, ldb);
+ dgemm_("N", "T", m, &k, &k, &c_b23, b, ldb, a, &k,
+ alpha, &b[k * b_dim1], ldb);
+ dtrsm_("R", "L", "T", diag, m, &k, &c_b27, &a[k * k],
+ &k, &b[k * b_dim1], ldb);
+
+ } else {
+
+/* SIDE ='R', N is even, TRANSR = 'T', UPLO = 'U', */
+/* and TRANS = 'T' */
+
+ dtrsm_("R", "L", "N", diag, m, &k, alpha, &a[k * k], &
+ k, &b[k * b_dim1], ldb);
+ dgemm_("N", "N", m, &k, &k, &c_b23, &b[k * b_dim1],
+ ldb, a, &k, alpha, b, ldb);
+ dtrsm_("R", "U", "T", diag, m, &k, &c_b27, &a[(k + 1)
+ * k], &k, b, ldb);
+
+ }
+
+ }
+
+ }
+
+ }
+ }
+
+ return 0;
+
+/* End of DTFSM */
+
+} /* dtfsm_ */
diff --git a/contrib/libs/clapack/dtftri.c b/contrib/libs/clapack/dtftri.c
new file mode 100644
index 0000000000..1f9edc18a6
--- /dev/null
+++ b/contrib/libs/clapack/dtftri.c
@@ -0,0 +1,474 @@
+/* dtftri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b13 = -1.;
+static doublereal c_b18 = 1.;
+
+/* Subroutine */ int dtftri_(char *transr, char *uplo, char *diag, integer *n,
+ doublereal *a, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer k, n1, n2;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dtrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nisodd;
+ extern /* Subroutine */ int dtrtri_(char *, char *, integer *, doublereal
+ *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTFTRI computes the inverse of a triangular matrix A stored in RFP */
+/* format. */
+
+/* This is a Level 3 BLAS version of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': The Normal TRANSR of RFP A is stored; */
+/* = 'T': The Transpose TRANSR of RFP A is stored. */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* DIAG (input) CHARACTER */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (0:nt-1); */
+/* nt=N*(N+1)/2. On entry, the triangular factor of a Hermitian */
+/* Positive Definite matrix A in RFP format. RFP format is */
+/* described by TRANSR, UPLO, and N as follows: If TRANSR = 'N' */
+/* then RFP A is (0:N,0:k-1) when N is even; k=N/2. RFP A is */
+/* (0:N-1,0:k) when N is odd; k=N/2. IF TRANSR = 'T' then RFP is */
+/* the transpose of RFP A as defined when */
+/* TRANSR = 'N'. The contents of RFP A are defined by UPLO as */
+/* follows: If UPLO = 'U' the RFP A contains the nt elements of */
+/* upper packed A; If UPLO = 'L' the RFP A contains the nt */
+/* elements of lower packed A. The LDA of RFP A is (N+1)/2 when */
+/* TRANSR = 'T'. When TRANSR is 'N' the LDA is N+1 when N is */
+/* even and N is odd. See the Note below for more details. */
+
+/* On exit, the (triangular) inverse of the original matrix, in */
+/* the same storage format. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, A(i,i) is exactly zero. The triangular */
+/* matrix is singular and its inverse can not be computed. */
+
+/* Notes */
+/* ===== */
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* even. We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* the transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* the transpose of the last three columns of AP lower. */
+/* This covers the case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 03 04 05 33 43 53 */
+/* 13 14 15 00 44 54 */
+/* 23 24 25 10 11 55 */
+/* 33 34 35 20 21 22 */
+/* 00 44 45 30 31 32 */
+/* 01 11 55 40 41 42 */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* odd. We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* the transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* the transpose of the last two columns of AP lower. */
+/* This covers the case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 02 03 04 00 33 43 */
+/* 12 13 14 10 11 44 */
+/* 22 23 24 20 21 22 */
+/* 00 33 34 30 31 32 */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+/* RFP A RFP A */
+
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "T")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (! lsame_(diag, "N") && ! lsame_(diag,
+ "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTFTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* If N is odd, set NISODD = .TRUE. */
+/* If N is even, set K = N/2 and NISODD = .FALSE. */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ } else {
+ nisodd = TRUE_;
+ }
+
+/* Set N1 and N2 depending on LOWER */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+
+/* start execution: there are eight cases */
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL and N is odd ( a(0:n-1,0:n1-1) ) */
+/* T1 -> a(0,0), T2 -> a(0,1), S -> a(n1,0) */
+/* T1 -> a(0), T2 -> a(n), S -> a(n1) */
+
+ dtrtri_("L", diag, &n1, a, n, info);
+ if (*info > 0) {
+ return 0;
+ }
+ dtrmm_("R", "L", "N", diag, &n2, &n1, &c_b13, a, n, &a[n1], n);
+ dtrtri_("U", diag, &n2, &a[*n], n, info)
+ ;
+ if (*info > 0) {
+ *info += n1;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ dtrmm_("L", "U", "T", diag, &n2, &n1, &c_b18, &a[*n], n, &a[
+ n1], n);
+
+ } else {
+
+/* SRPA for UPPER, NORMAL and N is odd ( a(0:n-1,0:n2-1) */
+/* T1 -> a(n1+1,0), T2 -> a(n1,0), S -> a(0,0) */
+/* T1 -> a(n2), T2 -> a(n1), S -> a(0) */
+
+ dtrtri_("L", diag, &n1, &a[n2], n, info)
+ ;
+ if (*info > 0) {
+ return 0;
+ }
+ dtrmm_("L", "L", "T", diag, &n1, &n2, &c_b13, &a[n2], n, a, n);
+ dtrtri_("U", diag, &n2, &a[n1], n, info)
+ ;
+ if (*info > 0) {
+ *info += n1;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ dtrmm_("R", "U", "N", diag, &n1, &n2, &c_b18, &a[n1], n, a, n);
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'T' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is odd */
+/* T1 -> a(0), T2 -> a(1), S -> a(0+n1*n1) */
+
+ dtrtri_("U", diag, &n1, a, &n1, info);
+ if (*info > 0) {
+ return 0;
+ }
+ dtrmm_("L", "U", "N", diag, &n1, &n2, &c_b13, a, &n1, &a[n1 *
+ n1], &n1);
+ dtrtri_("L", diag, &n2, &a[1], &n1, info);
+ if (*info > 0) {
+ *info += n1;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ dtrmm_("R", "L", "T", diag, &n1, &n2, &c_b18, &a[1], &n1, &a[
+ n1 * n1], &n1);
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is odd */
+/* T1 -> a(0+n2*n2), T2 -> a(0+n1*n2), S -> a(0) */
+
+ dtrtri_("U", diag, &n1, &a[n2 * n2], &n2, info);
+ if (*info > 0) {
+ return 0;
+ }
+ dtrmm_("R", "U", "T", diag, &n2, &n1, &c_b13, &a[n2 * n2], &
+ n2, a, &n2);
+ dtrtri_("L", diag, &n2, &a[n1 * n2], &n2, info);
+ if (*info > 0) {
+ *info += n1;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ dtrmm_("L", "L", "N", diag, &n2, &n1, &c_b18, &a[n1 * n2], &
+ n2, a, &n2);
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(1,0), T2 -> a(0,0), S -> a(k+1,0) */
+/* T1 -> a(1), T2 -> a(0), S -> a(k+1) */
+
+ i__1 = *n + 1;
+ dtrtri_("L", diag, &k, &a[1], &i__1, info);
+ if (*info > 0) {
+ return 0;
+ }
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ dtrmm_("R", "L", "N", diag, &k, &k, &c_b13, &a[1], &i__1, &a[
+ k + 1], &i__2);
+ i__1 = *n + 1;
+ dtrtri_("U", diag, &k, a, &i__1, info);
+ if (*info > 0) {
+ *info += k;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ dtrmm_("L", "U", "T", diag, &k, &k, &c_b18, a, &i__1, &a[k +
+ 1], &i__2)
+ ;
+
+ } else {
+
+/* SRPA for UPPER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(k+1,0) , T2 -> a(k,0), S -> a(0,0) */
+/* T1 -> a(k+1), T2 -> a(k), S -> a(0) */
+
+ i__1 = *n + 1;
+ dtrtri_("L", diag, &k, &a[k + 1], &i__1, info);
+ if (*info > 0) {
+ return 0;
+ }
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ dtrmm_("L", "L", "T", diag, &k, &k, &c_b13, &a[k + 1], &i__1,
+ a, &i__2);
+ i__1 = *n + 1;
+ dtrtri_("U", diag, &k, &a[k], &i__1, info);
+ if (*info > 0) {
+ *info += k;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ dtrmm_("R", "U", "N", diag, &k, &k, &c_b18, &a[k], &i__1, a, &
+ i__2);
+ }
+ } else {
+
+/* N is even and TRANSR = 'T' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,1), T2 -> B(0,0), S -> B(0,k+1) */
+/* T1 -> a(0+k), T2 -> a(0+0), S -> a(0+k*(k+1)); lda=k */
+
+ dtrtri_("U", diag, &k, &a[k], &k, info);
+ if (*info > 0) {
+ return 0;
+ }
+ dtrmm_("L", "U", "N", diag, &k, &k, &c_b13, &a[k], &k, &a[k *
+ (k + 1)], &k);
+ dtrtri_("L", diag, &k, a, &k, info);
+ if (*info > 0) {
+ *info += k;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ dtrmm_("R", "L", "T", diag, &k, &k, &c_b18, a, &k, &a[k * (k
+ + 1)], &k)
+ ;
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,k+1), T2 -> B(0,k), S -> B(0,0) */
+/* T1 -> a(0+k*(k+1)), T2 -> a(0+k*k), S -> a(0+0)); lda=k */
+
+ dtrtri_("U", diag, &k, &a[k * (k + 1)], &k, info);
+ if (*info > 0) {
+ return 0;
+ }
+ dtrmm_("R", "U", "T", diag, &k, &k, &c_b13, &a[k * (k + 1)], &
+ k, a, &k);
+ dtrtri_("L", diag, &k, &a[k * k], &k, info);
+ if (*info > 0) {
+ *info += k;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ dtrmm_("L", "L", "N", diag, &k, &k, &c_b18, &a[k * k], &k, a,
+ &k);
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DTFTRI */
+
+} /* dtftri_ */
diff --git a/contrib/libs/clapack/dtfttp.c b/contrib/libs/clapack/dtfttp.c
new file mode 100644
index 0000000000..33226186dc
--- /dev/null
+++ b/contrib/libs/clapack/dtfttp.c
@@ -0,0 +1,514 @@
+/* dtfttp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dtfttp_(char *transr, char *uplo, integer *n, doublereal
+ *arf, doublereal *ap, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, k, n1, n2, ij, jp, js, nt, lda, ijp;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nisodd;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTFTTP copies a triangular matrix A from rectangular full packed */
+/* format (TF) to standard packed format (TP). */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': ARF is in Normal format; */
+/* = 'T': ARF is in Transpose format; */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* ARF (input) DOUBLE PRECISION array, dimension ( N*(N+1)/2 ), */
+/* On entry, the upper or lower triangular matrix A stored in */
+/* RFP format. For a further discussion see Notes below. */
+
+/* AP (output) DOUBLE PRECISION array, dimension ( N*(N+1)/2 ), */
+/* On exit, the upper or lower triangular matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Notes */
+/* ===== */
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* even. We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* the transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* the transpose of the last three columns of AP lower. */
+/* This covers the case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 03 04 05 33 43 53 */
+/* 13 14 15 00 44 54 */
+/* 23 24 25 10 11 55 */
+/* 33 34 35 20 21 22 */
+/* 00 44 45 30 31 32 */
+/* 01 11 55 40 41 42 */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* odd. We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* the transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* the transpose of the last two columns of AP lower. */
+/* This covers the case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 02 03 04 00 33 43 */
+/* 12 13 14 10 11 44 */
+/* 22 23 24 20 21 22 */
+/* 00 33 34 30 31 32 */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+/* RFP A RFP A */
+
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "T")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTFTTP", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (normaltransr) {
+ ap[0] = arf[0];
+ } else {
+ ap[0] = arf[0];
+ }
+ return 0;
+ }
+
+/* Size of array ARF(0:NT-1) */
+
+ nt = *n * (*n + 1) / 2;
+
+/* Set N1 and N2 depending on LOWER */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+/* If N is odd, set NISODD = .TRUE. */
+/* If N is even, set K = N/2 and NISODD = .FALSE. */
+
+/* set lda of ARF^C; ARF^C is (0:(N+1)/2-1,0:N-noe) */
+/* where noe = 0 if n is even, noe = 1 if n is odd */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ lda = *n + 1;
+ } else {
+ nisodd = TRUE_;
+ lda = *n;
+ }
+
+/* ARF^C has lda rows and n+1-noe cols */
+
+ if (! normaltransr) {
+ lda = (*n + 1) / 2;
+ }
+
+/* start execution: there are eight cases */
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL and N is odd ( a(0:n-1,0:n1-1) ) */
+/* T1 -> a(0,0), T2 -> a(0,1), S -> a(n1,0) */
+/* T1 -> a(0), T2 -> a(n), S -> a(n1); lda = n */
+
+ ijp = 0;
+ jp = 0;
+ i__1 = n2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ij = i__ + jp;
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ jp += lda;
+ }
+ i__1 = n2 - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = n2;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ ij = i__ + j * lda;
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL and N is odd ( a(0:n-1,0:n2-1) */
+/* T1 -> a(n1+1,0), T2 -> a(n1,0), S -> a(0,0) */
+/* T1 -> a(n2), T2 -> a(n1), S -> a(0) */
+
+ ijp = 0;
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ ij = n2 + j;
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ ap[ijp] = arf[ij];
+ ++ijp;
+ ij += lda;
+ }
+ }
+ js = 0;
+ i__1 = *n - 1;
+ for (j = n1; j <= i__1; ++j) {
+ ij = js;
+ i__2 = js + j;
+ for (ij = js; ij <= i__2; ++ij) {
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ js += lda;
+ }
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'T' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is odd */
+/* T1 -> A(0,0) , T2 -> A(1,0) , S -> A(0,n1) */
+/* T1 -> a(0+0) , T2 -> a(1+0) , S -> a(0+n1*n1); lda=n1 */
+
+ ijp = 0;
+ i__1 = n2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = *n * lda - 1;
+ i__3 = lda;
+ for (ij = i__ * (lda + 1); i__3 < 0 ? ij >= i__2 : ij <=
+ i__2; ij += i__3) {
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ }
+ js = 1;
+ i__1 = n2 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + n2 - j - 1;
+ for (ij = js; ij <= i__3; ++ij) {
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ js = js + lda + 1;
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is odd */
+/* T1 -> A(0,n1+1), T2 -> A(0,n1), S -> A(0,0) */
+/* T1 -> a(n2*n2), T2 -> a(n1*n2), S -> a(0); lda = n2 */
+
+ ijp = 0;
+ js = n2 * lda;
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + j;
+ for (ij = js; ij <= i__3; ++ij) {
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ js += lda;
+ }
+ i__1 = n1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__3 = i__ + (n1 + i__) * lda;
+ i__2 = lda;
+ for (ij = i__; i__2 < 0 ? ij >= i__3 : ij <= i__3; ij +=
+ i__2) {
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(1,0), T2 -> a(0,0), S -> a(k+1,0) */
+/* T1 -> a(1), T2 -> a(0), S -> a(k+1) */
+
+ ijp = 0;
+ jp = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ij = i__ + 1 + jp;
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ jp += lda;
+ }
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = k - 1;
+ for (j = i__; j <= i__2; ++j) {
+ ij = i__ + j * lda;
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(k+1,0) , T2 -> a(k,0), S -> a(0,0) */
+/* T1 -> a(k+1), T2 -> a(k), S -> a(0) */
+
+ ijp = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ ij = k + 1 + j;
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ ap[ijp] = arf[ij];
+ ++ijp;
+ ij += lda;
+ }
+ }
+ js = 0;
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+ ij = js;
+ i__2 = js + j;
+ for (ij = js; ij <= i__2; ++ij) {
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ js += lda;
+ }
+
+ }
+
+ } else {
+
+/* N is even and TRANSR = 'T' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,1), T2 -> B(0,0), S -> B(0,k+1) */
+/* T1 -> a(0+k), T2 -> a(0+0), S -> a(0+k*(k+1)); lda=k */
+
+ ijp = 0;
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = (*n + 1) * lda - 1;
+ i__3 = lda;
+ for (ij = i__ + (i__ + 1) * lda; i__3 < 0 ? ij >= i__2 :
+ ij <= i__2; ij += i__3) {
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ }
+ js = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + k - j - 1;
+ for (ij = js; ij <= i__3; ++ij) {
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ js = js + lda + 1;
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,k+1), T2 -> B(0,k), S -> B(0,0) */
+/* T1 -> a(0+k*(k+1)), T2 -> a(0+k*k), S -> a(0+0)); lda=k */
+
+ ijp = 0;
+ js = (k + 1) * lda;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + j;
+ for (ij = js; ij <= i__3; ++ij) {
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ js += lda;
+ }
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__3 = i__ + (k + i__) * lda;
+ i__2 = lda;
+ for (ij = i__; i__2 < 0 ? ij >= i__3 : ij <= i__3; ij +=
+ i__2) {
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ }
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of DTFTTP */
+
+} /* dtfttp_ */
diff --git a/contrib/libs/clapack/dtfttr.c b/contrib/libs/clapack/dtfttr.c
new file mode 100644
index 0000000000..4f02d0cb69
--- /dev/null
+++ b/contrib/libs/clapack/dtfttr.c
@@ -0,0 +1,491 @@
+/* dtfttr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dtfttr_(char *transr, char *uplo, integer *n, doublereal
+ *arf, doublereal *a, integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, k, l, n1, n2, ij, nt, nx2, np1x2;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nisodd;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTFTTR copies a triangular matrix A from rectangular full packed */
+/* format (TF) to standard full format (TR). */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': ARF is in Normal format; */
+/* = 'T': ARF is in Transpose format. */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrices ARF and A. N >= 0. */
+
+/* ARF (input) DOUBLE PRECISION array, dimension (N*(N+1)/2). */
+/* On entry, the upper (if UPLO = 'U') or lower (if UPLO = 'L') */
+/* matrix A in RFP format. See the "Notes" below for more */
+/* details. */
+
+/* A (output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On exit, the triangular matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of the array A contains */
+/* the upper triangular matrix, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of the array A contains */
+/* the lower triangular matrix, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Notes */
+/* ===== */
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* even. We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* the transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* the transpose of the last three columns of AP lower. */
+/* This covers the case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 03 04 05 33 43 53 */
+/* 13 14 15 00 44 54 */
+/* 23 24 25 10 11 55 */
+/* 33 34 35 20 21 22 */
+/* 00 44 45 30 31 32 */
+/* 01 11 55 40 41 42 */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* odd. We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* the transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* the transpose of the last two columns of AP lower. */
+/* This covers the case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 02 03 04 00 33 43 */
+/* 12 13 14 10 11 44 */
+/* 22 23 24 20 21 22 */
+/* 00 33 34 30 31 32 */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+/* RFP A RFP A */
+
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* Reference */
+/* ========= */
+
+/* ===================================================================== */
+
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda - 1 - 0 + 1;
+ a_offset = 0 + a_dim1 * 0;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "T")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTFTTR", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 1) {
+ if (*n == 1) {
+ a[0] = arf[0];
+ }
+ return 0;
+ }
+
+/* Size of array ARF(0:nt-1) */
+
+ nt = *n * (*n + 1) / 2;
+
+/* set N1 and N2 depending on LOWER: for N even N1=N2=K */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+/* If N is odd, set NISODD = .TRUE., LDA=N+1 and A is (N+1)--by--K2. */
+/* If N is even, set K = N/2 and NISODD = .FALSE., LDA=N and A is */
+/* N--by--(N+1)/2. */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ if (! lower) {
+ np1x2 = *n + *n + 2;
+ }
+ } else {
+ nisodd = TRUE_;
+ if (! lower) {
+ nx2 = *n + *n;
+ }
+ }
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* N is odd, TRANSR = 'N', and UPLO = 'L' */
+
+ ij = 0;
+ i__1 = n2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = n2 + j;
+ for (i__ = n1; i__ <= i__2; ++i__) {
+ a[n2 + j + i__ * a_dim1] = arf[ij];
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = arf[ij];
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* N is odd, TRANSR = 'N', and UPLO = 'U' */
+
+ ij = nt - *n;
+ i__1 = n1;
+ for (j = *n - 1; j >= i__1; --j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = arf[ij];
+ ++ij;
+ }
+ i__2 = n1 - 1;
+ for (l = j - n1; l <= i__2; ++l) {
+ a[j - n1 + l * a_dim1] = arf[ij];
+ ++ij;
+ }
+ ij -= nx2;
+ }
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'T' */
+
+ if (lower) {
+
+/* N is odd, TRANSR = 'T', and UPLO = 'L' */
+
+ ij = 0;
+ i__1 = n2 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ a[j + i__ * a_dim1] = arf[ij];
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = n1 + j; i__ <= i__2; ++i__) {
+ a[i__ + (n1 + j) * a_dim1] = arf[ij];
+ ++ij;
+ }
+ }
+ i__1 = *n - 1;
+ for (j = n2; j <= i__1; ++j) {
+ i__2 = n1 - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ a[j + i__ * a_dim1] = arf[ij];
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* N is odd, TRANSR = 'T', and UPLO = 'U' */
+
+ ij = 0;
+ i__1 = n1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = n1; i__ <= i__2; ++i__) {
+ a[j + i__ * a_dim1] = arf[ij];
+ ++ij;
+ }
+ }
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = arf[ij];
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (l = n2 + j; l <= i__2; ++l) {
+ a[n2 + j + l * a_dim1] = arf[ij];
+ ++ij;
+ }
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* N is even, TRANSR = 'N', and UPLO = 'L' */
+
+ ij = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k + j;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ a[k + j + i__ * a_dim1] = arf[ij];
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = arf[ij];
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* N is even, TRANSR = 'N', and UPLO = 'U' */
+
+ ij = nt - *n - 1;
+ i__1 = k;
+ for (j = *n - 1; j >= i__1; --j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = arf[ij];
+ ++ij;
+ }
+ i__2 = k - 1;
+ for (l = j - k; l <= i__2; ++l) {
+ a[j - k + l * a_dim1] = arf[ij];
+ ++ij;
+ }
+ ij -= np1x2;
+ }
+
+ }
+
+ } else {
+
+/* N is even and TRANSR = 'T' */
+
+ if (lower) {
+
+/* N is even, TRANSR = 'T', and UPLO = 'L' */
+
+ ij = 0;
+ j = k;
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ a[i__ + j * a_dim1] = arf[ij];
+ ++ij;
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ a[j + i__ * a_dim1] = arf[ij];
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = k + 1 + j; i__ <= i__2; ++i__) {
+ a[i__ + (k + 1 + j) * a_dim1] = arf[ij];
+ ++ij;
+ }
+ }
+ i__1 = *n - 1;
+ for (j = k - 1; j <= i__1; ++j) {
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ a[j + i__ * a_dim1] = arf[ij];
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* N is even, TRANSR = 'T', and UPLO = 'U' */
+
+ ij = 0;
+ i__1 = k;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ a[j + i__ * a_dim1] = arf[ij];
+ ++ij;
+ }
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = arf[ij];
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (l = k + 1 + j; l <= i__2; ++l) {
+ a[k + 1 + j + l * a_dim1] = arf[ij];
+ ++ij;
+ }
+ }
+/* Note that here, on exit of the loop, J = K-1 */
+ i__1 = j;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ a[i__ + j * a_dim1] = arf[ij];
+ ++ij;
+ }
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of DTFTTR */
+
+} /* dtfttr_ */
diff --git a/contrib/libs/clapack/dtgevc.c b/contrib/libs/clapack/dtgevc.c
new file mode 100644
index 0000000000..e411ffcb0b
--- /dev/null
+++ b/contrib/libs/clapack/dtgevc.c
@@ -0,0 +1,1418 @@
+/* dtgevc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static logical c_true = TRUE_;
+static integer c__2 = 2;
+static doublereal c_b34 = 1.;
+static integer c__1 = 1;
+static doublereal c_b36 = 0.;
+static logical c_false = FALSE_;
+
+/* Subroutine */ int dtgevc_(char *side, char *howmny, logical *select,
+ integer *n, doublereal *s, integer *lds, doublereal *p, integer *ldp,
+ doublereal *vl, integer *ldvl, doublereal *vr, integer *ldvr, integer
+ *mm, integer *m, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer p_dim1, p_offset, s_dim1, s_offset, vl_dim1, vl_offset, vr_dim1,
+ vr_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4, d__5, d__6;
+
+ /* Local variables */
+ integer i__, j, ja, jc, je, na, im, jr, jw, nw;
+ doublereal big;
+ logical lsa, lsb;
+ doublereal ulp, sum[4] /* was [2][2] */;
+ integer ibeg, ieig, iend;
+ doublereal dmin__, temp, xmax, sump[4] /* was [2][2] */, sums[4]
+ /* was [2][2] */;
+ extern /* Subroutine */ int dlag2_(doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *);
+ doublereal cim2a, cim2b, cre2a, cre2b, temp2, bdiag[2], acoef, scale;
+ logical ilall;
+ integer iside;
+ doublereal sbeta;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+ logical il2by2;
+ integer iinfo;
+ doublereal small;
+ logical compl;
+ doublereal anorm, bnorm;
+ logical compr;
+ extern /* Subroutine */ int dlaln2_(logical *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *
+, doublereal *, integer *, doublereal *, doublereal *, integer *);
+ doublereal temp2i;
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *);
+ doublereal temp2r;
+ logical ilabad, ilbbad;
+ doublereal acoefa, bcoefa, cimaga, cimagb;
+ logical ilback;
+ doublereal bcoefi, ascale, bscale, creala, crealb;
+ extern doublereal dlamch_(char *);
+ doublereal bcoefr, salfar, safmin;
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *);
+ doublereal xscale, bignum;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical ilcomp, ilcplx;
+ integer ihwmny;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+
+/* Purpose */
+/* ======= */
+
+/* DTGEVC computes some or all of the right and/or left eigenvectors of */
+/* a pair of real matrices (S,P), where S is a quasi-triangular matrix */
+/* and P is upper triangular. Matrix pairs of this type are produced by */
+/* the generalized Schur factorization of a matrix pair (A,B): */
+
+/* A = Q*S*Z**T, B = Q*P*Z**T */
+
+/* as computed by DGGHRD + DHGEQZ. */
+
+/* The right eigenvector x and the left eigenvector y of (S,P) */
+/* corresponding to an eigenvalue w are defined by: */
+
+/* S*x = w*P*x, (y**H)*S = w*(y**H)*P, */
+
+/* where y**H denotes the conjugate tranpose of y. */
+/* The eigenvalues are not input to this routine, but are computed */
+/* directly from the diagonal blocks of S and P. */
+
+/* This routine returns the matrices X and/or Y of right and left */
+/* eigenvectors of (S,P), or the products Z*X and/or Q*Y, */
+/* where Z and Q are input matrices. */
+/* If Q and Z are the orthogonal factors from the generalized Schur */
+/* factorization of a matrix pair (A,B), then Z*X and Q*Y */
+/* are the matrices of right and left eigenvectors of (A,B). */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'R': compute right eigenvectors only; */
+/* = 'L': compute left eigenvectors only; */
+/* = 'B': compute both right and left eigenvectors. */
+
+/* HOWMNY (input) CHARACTER*1 */
+/* = 'A': compute all right and/or left eigenvectors; */
+/* = 'B': compute all right and/or left eigenvectors, */
+/* backtransformed by the matrices in VR and/or VL; */
+/* = 'S': compute selected right and/or left eigenvectors, */
+/* specified by the logical array SELECT. */
+
+/* SELECT (input) LOGICAL array, dimension (N) */
+/* If HOWMNY='S', SELECT specifies the eigenvectors to be */
+/* computed. If w(j) is a real eigenvalue, the corresponding */
+/* real eigenvector is computed if SELECT(j) is .TRUE.. */
+/* If w(j) and w(j+1) are the real and imaginary parts of a */
+/* complex eigenvalue, the corresponding complex eigenvector */
+/* is computed if either SELECT(j) or SELECT(j+1) is .TRUE., */
+/* and on exit SELECT(j) is set to .TRUE. and SELECT(j+1) is */
+/* set to .FALSE.. */
+/* Not referenced if HOWMNY = 'A' or 'B'. */
+
+/* N (input) INTEGER */
+/* The order of the matrices S and P. N >= 0. */
+
+/* S (input) DOUBLE PRECISION array, dimension (LDS,N) */
+/* The upper quasi-triangular matrix S from a generalized Schur */
+/* factorization, as computed by DHGEQZ. */
+
+/* LDS (input) INTEGER */
+/* The leading dimension of array S. LDS >= max(1,N). */
+
+/* P (input) DOUBLE PRECISION array, dimension (LDP,N) */
+/* The upper triangular matrix P from a generalized Schur */
+/* factorization, as computed by DHGEQZ. */
+/* 2-by-2 diagonal blocks of P corresponding to 2-by-2 blocks */
+/* of S must be in positive diagonal form. */
+
+/* LDP (input) INTEGER */
+/* The leading dimension of array P. LDP >= max(1,N). */
+
+/* VL (input/output) DOUBLE PRECISION array, dimension (LDVL,MM) */
+/* On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must */
+/* contain an N-by-N matrix Q (usually the orthogonal matrix Q */
+/* of left Schur vectors returned by DHGEQZ). */
+/* On exit, if SIDE = 'L' or 'B', VL contains: */
+/* if HOWMNY = 'A', the matrix Y of left eigenvectors of (S,P); */
+/* if HOWMNY = 'B', the matrix Q*Y; */
+/* if HOWMNY = 'S', the left eigenvectors of (S,P) specified by */
+/* SELECT, stored consecutively in the columns of */
+/* VL, in the same order as their eigenvalues. */
+
+/* A complex eigenvector corresponding to a complex eigenvalue */
+/* is stored in two consecutive columns, the first holding the */
+/* real part, and the second the imaginary part. */
+
+/* Not referenced if SIDE = 'R'. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of array VL. LDVL >= 1, and if */
+/* SIDE = 'L' or 'B', LDVL >= N. */
+
+/* VR (input/output) DOUBLE PRECISION array, dimension (LDVR,MM) */
+/* On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must */
+/* contain an N-by-N matrix Z (usually the orthogonal matrix Z */
+/* of right Schur vectors returned by DHGEQZ). */
+
+/* On exit, if SIDE = 'R' or 'B', VR contains: */
+/* if HOWMNY = 'A', the matrix X of right eigenvectors of (S,P); */
+/* if HOWMNY = 'B' or 'b', the matrix Z*X; */
+/* if HOWMNY = 'S' or 's', the right eigenvectors of (S,P) */
+/* specified by SELECT, stored consecutively in the */
+/* columns of VR, in the same order as their */
+/* eigenvalues. */
+
+/* A complex eigenvector corresponding to a complex eigenvalue */
+/* is stored in two consecutive columns, the first holding the */
+/* real part and the second the imaginary part. */
+
+/* Not referenced if SIDE = 'L'. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. LDVR >= 1, and if */
+/* SIDE = 'R' or 'B', LDVR >= N. */
+
+/* MM (input) INTEGER */
+/* The number of columns in the arrays VL and/or VR. MM >= M. */
+
+/* M (output) INTEGER */
+/* The number of columns in the arrays VL and/or VR actually */
+/* used to store the eigenvectors. If HOWMNY = 'A' or 'B', M */
+/* is set to N. Each selected real eigenvector occupies one */
+/* column and each selected complex eigenvector occupies two */
+/* columns. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (6*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: the 2-by-2 block (INFO:INFO+1) does not have a complex */
+/* eigenvalue. */
+
+/* Further Details */
+/* =============== */
+
+/* Allocation of workspace: */
+/* ---------- -- --------- */
+
+/* WORK( j ) = 1-norm of j-th column of A, above the diagonal */
+/* WORK( N+j ) = 1-norm of j-th column of B, above the diagonal */
+/* WORK( 2*N+1:3*N ) = real part of eigenvector */
+/* WORK( 3*N+1:4*N ) = imaginary part of eigenvector */
+/* WORK( 4*N+1:5*N ) = real part of back-transformed eigenvector */
+/* WORK( 5*N+1:6*N ) = imaginary part of back-transformed eigenvector */
+
+/* Rowwise vs. columnwise solution methods: */
+/* ------- -- ---------- -------- ------- */
+
+/* Finding a generalized eigenvector consists basically of solving the */
+/* singular triangular system */
+
+/* (A - w B) x = 0 (for right) or: (A - w B)**H y = 0 (for left) */
+
+/* Consider finding the i-th right eigenvector (assume all eigenvalues */
+/* are real). The equation to be solved is: */
+/* n i */
+/* 0 = sum C(j,k) v(k) = sum C(j,k) v(k) for j = i,. . .,1 */
+/* k=j k=j */
+
+/* where C = (A - w B) (The components v(i+1:n) are 0.) */
+
+/* The "rowwise" method is: */
+
+/* (1) v(i) := 1 */
+/* for j = i-1,. . .,1: */
+/* i */
+/* (2) compute s = - sum C(j,k) v(k) and */
+/* k=j+1 */
+
+/* (3) v(j) := s / C(j,j) */
+
+/* Step 2 is sometimes called the "dot product" step, since it is an */
+/* inner product between the j-th row and the portion of the eigenvector */
+/* that has been computed so far. */
+
+/* The "columnwise" method consists basically in doing the sums */
+/* for all the rows in parallel. As each v(j) is computed, the */
+/* contribution of v(j) times the j-th column of C is added to the */
+/* partial sums. Since FORTRAN arrays are stored columnwise, this has */
+/* the advantage that at each step, the elements of C that are accessed */
+/* are adjacent to one another, whereas with the rowwise method, the */
+/* elements accessed at a step are spaced LDS (and LDP) words apart. */
+
+/* When finding left eigenvectors, the matrix in question is the */
+/* transpose of the one in storage, so the rowwise method then */
+/* actually accesses columns of A and B at each step, and so is the */
+/* preferred method. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and Test the input parameters */
+
+ /* Parameter adjustments */
+ --select;
+ s_dim1 = *lds;
+ s_offset = 1 + s_dim1;
+ s -= s_offset;
+ p_dim1 = *ldp;
+ p_offset = 1 + p_dim1;
+ p -= p_offset;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+
+ /* Function Body */
+ if (lsame_(howmny, "A")) {
+ ihwmny = 1;
+ ilall = TRUE_;
+ ilback = FALSE_;
+ } else if (lsame_(howmny, "S")) {
+ ihwmny = 2;
+ ilall = FALSE_;
+ ilback = FALSE_;
+ } else if (lsame_(howmny, "B")) {
+ ihwmny = 3;
+ ilall = TRUE_;
+ ilback = TRUE_;
+ } else {
+ ihwmny = -1;
+ ilall = TRUE_;
+ }
+
+ if (lsame_(side, "R")) {
+ iside = 1;
+ compl = FALSE_;
+ compr = TRUE_;
+ } else if (lsame_(side, "L")) {
+ iside = 2;
+ compl = TRUE_;
+ compr = FALSE_;
+ } else if (lsame_(side, "B")) {
+ iside = 3;
+ compl = TRUE_;
+ compr = TRUE_;
+ } else {
+ iside = -1;
+ }
+
+ *info = 0;
+ if (iside < 0) {
+ *info = -1;
+ } else if (ihwmny < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lds < max(1,*n)) {
+ *info = -6;
+ } else if (*ldp < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTGEVC", &i__1);
+ return 0;
+ }
+
+/* Count the number of eigenvectors to be computed */
+
+ if (! ilall) {
+ im = 0;
+ ilcplx = FALSE_;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (ilcplx) {
+ ilcplx = FALSE_;
+ goto L10;
+ }
+ if (j < *n) {
+ if (s[j + 1 + j * s_dim1] != 0.) {
+ ilcplx = TRUE_;
+ }
+ }
+ if (ilcplx) {
+ if (select[j] || select[j + 1]) {
+ im += 2;
+ }
+ } else {
+ if (select[j]) {
+ ++im;
+ }
+ }
+L10:
+ ;
+ }
+ } else {
+ im = *n;
+ }
+
+/* Check 2-by-2 diagonal blocks of A, B */
+
+ ilabad = FALSE_;
+ ilbbad = FALSE_;
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ if (s[j + 1 + j * s_dim1] != 0.) {
+ if (p[j + j * p_dim1] == 0. || p[j + 1 + (j + 1) * p_dim1] == 0.
+ || p[j + (j + 1) * p_dim1] != 0.) {
+ ilbbad = TRUE_;
+ }
+ if (j < *n - 1) {
+ if (s[j + 2 + (j + 1) * s_dim1] != 0.) {
+ ilabad = TRUE_;
+ }
+ }
+ }
+/* L20: */
+ }
+
+ if (ilabad) {
+ *info = -5;
+ } else if (ilbbad) {
+ *info = -7;
+ } else if (compl && *ldvl < *n || *ldvl < 1) {
+ *info = -10;
+ } else if (compr && *ldvr < *n || *ldvr < 1) {
+ *info = -12;
+ } else if (*mm < im) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTGEVC", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = im;
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Machine Constants */
+
+ safmin = dlamch_("Safe minimum");
+ big = 1. / safmin;
+ dlabad_(&safmin, &big);
+ ulp = dlamch_("Epsilon") * dlamch_("Base");
+ small = safmin * *n / ulp;
+ big = 1. / small;
+ bignum = 1. / (safmin * *n);
+
+/* Compute the 1-norm of each column of the strictly upper triangular */
+/* part (i.e., excluding all elements belonging to the diagonal */
+/* blocks) of A and B to check for possible overflow in the */
+/* triangular solver. */
+
+ anorm = (d__1 = s[s_dim1 + 1], abs(d__1));
+ if (*n > 1) {
+ anorm += (d__1 = s[s_dim1 + 2], abs(d__1));
+ }
+ bnorm = (d__1 = p[p_dim1 + 1], abs(d__1));
+ work[1] = 0.;
+ work[*n + 1] = 0.;
+
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ temp = 0.;
+ temp2 = 0.;
+ if (s[j + (j - 1) * s_dim1] == 0.) {
+ iend = j - 1;
+ } else {
+ iend = j - 2;
+ }
+ i__2 = iend;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp += (d__1 = s[i__ + j * s_dim1], abs(d__1));
+ temp2 += (d__1 = p[i__ + j * p_dim1], abs(d__1));
+/* L30: */
+ }
+ work[j] = temp;
+ work[*n + j] = temp2;
+/* Computing MIN */
+ i__3 = j + 1;
+ i__2 = min(i__3,*n);
+ for (i__ = iend + 1; i__ <= i__2; ++i__) {
+ temp += (d__1 = s[i__ + j * s_dim1], abs(d__1));
+ temp2 += (d__1 = p[i__ + j * p_dim1], abs(d__1));
+/* L40: */
+ }
+ anorm = max(anorm,temp);
+ bnorm = max(bnorm,temp2);
+/* L50: */
+ }
+
+ ascale = 1. / max(anorm,safmin);
+ bscale = 1. / max(bnorm,safmin);
+
+/* Left eigenvectors */
+
+ if (compl) {
+ ieig = 0;
+
+/* Main loop over eigenvalues */
+
+ ilcplx = FALSE_;
+ i__1 = *n;
+ for (je = 1; je <= i__1; ++je) {
+
+/* Skip this iteration if (a) HOWMNY='S' and SELECT=.FALSE., or */
+/* (b) this would be the second of a complex pair. */
+/* Check for complex eigenvalue, so as to be sure of which */
+/* entry(-ies) of SELECT to look at. */
+
+ if (ilcplx) {
+ ilcplx = FALSE_;
+ goto L220;
+ }
+ nw = 1;
+ if (je < *n) {
+ if (s[je + 1 + je * s_dim1] != 0.) {
+ ilcplx = TRUE_;
+ nw = 2;
+ }
+ }
+ if (ilall) {
+ ilcomp = TRUE_;
+ } else if (ilcplx) {
+ ilcomp = select[je] || select[je + 1];
+ } else {
+ ilcomp = select[je];
+ }
+ if (! ilcomp) {
+ goto L220;
+ }
+
+/* Decide if (a) singular pencil, (b) real eigenvalue, or */
+/* (c) complex eigenvalue. */
+
+ if (! ilcplx) {
+ if ((d__1 = s[je + je * s_dim1], abs(d__1)) <= safmin && (
+ d__2 = p[je + je * p_dim1], abs(d__2)) <= safmin) {
+
+/* Singular matrix pencil -- return unit eigenvector */
+
+ ++ieig;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vl[jr + ieig * vl_dim1] = 0.;
+/* L60: */
+ }
+ vl[ieig + ieig * vl_dim1] = 1.;
+ goto L220;
+ }
+ }
+
+/* Clear vector */
+
+ i__2 = nw * *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ work[(*n << 1) + jr] = 0.;
+/* L70: */
+ }
+/* T */
+/* Compute coefficients in ( a A - b B ) y = 0 */
+/* a is ACOEF */
+/* b is BCOEFR + i*BCOEFI */
+
+ if (! ilcplx) {
+
+/* Real eigenvalue */
+
+/* Computing MAX */
+ d__3 = (d__1 = s[je + je * s_dim1], abs(d__1)) * ascale, d__4
+ = (d__2 = p[je + je * p_dim1], abs(d__2)) * bscale,
+ d__3 = max(d__3,d__4);
+ temp = 1. / max(d__3,safmin);
+ salfar = temp * s[je + je * s_dim1] * ascale;
+ sbeta = temp * p[je + je * p_dim1] * bscale;
+ acoef = sbeta * ascale;
+ bcoefr = salfar * bscale;
+ bcoefi = 0.;
+
+/* Scale to avoid underflow */
+
+ scale = 1.;
+ lsa = abs(sbeta) >= safmin && abs(acoef) < small;
+ lsb = abs(salfar) >= safmin && abs(bcoefr) < small;
+ if (lsa) {
+ scale = small / abs(sbeta) * min(anorm,big);
+ }
+ if (lsb) {
+/* Computing MAX */
+ d__1 = scale, d__2 = small / abs(salfar) * min(bnorm,big);
+ scale = max(d__1,d__2);
+ }
+ if (lsa || lsb) {
+/* Computing MIN */
+/* Computing MAX */
+ d__3 = 1., d__4 = abs(acoef), d__3 = max(d__3,d__4), d__4
+ = abs(bcoefr);
+ d__1 = scale, d__2 = 1. / (safmin * max(d__3,d__4));
+ scale = min(d__1,d__2);
+ if (lsa) {
+ acoef = ascale * (scale * sbeta);
+ } else {
+ acoef = scale * acoef;
+ }
+ if (lsb) {
+ bcoefr = bscale * (scale * salfar);
+ } else {
+ bcoefr = scale * bcoefr;
+ }
+ }
+ acoefa = abs(acoef);
+ bcoefa = abs(bcoefr);
+
+/* First component is 1 */
+
+ work[(*n << 1) + je] = 1.;
+ xmax = 1.;
+ } else {
+
+/* Complex eigenvalue */
+
+ d__1 = safmin * 100.;
+ dlag2_(&s[je + je * s_dim1], lds, &p[je + je * p_dim1], ldp, &
+ d__1, &acoef, &temp, &bcoefr, &temp2, &bcoefi);
+ bcoefi = -bcoefi;
+ if (bcoefi == 0.) {
+ *info = je;
+ return 0;
+ }
+
+/* Scale to avoid over/underflow */
+
+ acoefa = abs(acoef);
+ bcoefa = abs(bcoefr) + abs(bcoefi);
+ scale = 1.;
+ if (acoefa * ulp < safmin && acoefa >= safmin) {
+ scale = safmin / ulp / acoefa;
+ }
+ if (bcoefa * ulp < safmin && bcoefa >= safmin) {
+/* Computing MAX */
+ d__1 = scale, d__2 = safmin / ulp / bcoefa;
+ scale = max(d__1,d__2);
+ }
+ if (safmin * acoefa > ascale) {
+ scale = ascale / (safmin * acoefa);
+ }
+ if (safmin * bcoefa > bscale) {
+/* Computing MIN */
+ d__1 = scale, d__2 = bscale / (safmin * bcoefa);
+ scale = min(d__1,d__2);
+ }
+ if (scale != 1.) {
+ acoef = scale * acoef;
+ acoefa = abs(acoef);
+ bcoefr = scale * bcoefr;
+ bcoefi = scale * bcoefi;
+ bcoefa = abs(bcoefr) + abs(bcoefi);
+ }
+
+/* Compute first two components of eigenvector */
+
+ temp = acoef * s[je + 1 + je * s_dim1];
+ temp2r = acoef * s[je + je * s_dim1] - bcoefr * p[je + je *
+ p_dim1];
+ temp2i = -bcoefi * p[je + je * p_dim1];
+ if (abs(temp) > abs(temp2r) + abs(temp2i)) {
+ work[(*n << 1) + je] = 1.;
+ work[*n * 3 + je] = 0.;
+ work[(*n << 1) + je + 1] = -temp2r / temp;
+ work[*n * 3 + je + 1] = -temp2i / temp;
+ } else {
+ work[(*n << 1) + je + 1] = 1.;
+ work[*n * 3 + je + 1] = 0.;
+ temp = acoef * s[je + (je + 1) * s_dim1];
+ work[(*n << 1) + je] = (bcoefr * p[je + 1 + (je + 1) *
+ p_dim1] - acoef * s[je + 1 + (je + 1) * s_dim1]) /
+ temp;
+ work[*n * 3 + je] = bcoefi * p[je + 1 + (je + 1) * p_dim1]
+ / temp;
+ }
+/* Computing MAX */
+ d__5 = (d__1 = work[(*n << 1) + je], abs(d__1)) + (d__2 =
+ work[*n * 3 + je], abs(d__2)), d__6 = (d__3 = work[(*
+ n << 1) + je + 1], abs(d__3)) + (d__4 = work[*n * 3 +
+ je + 1], abs(d__4));
+ xmax = max(d__5,d__6);
+ }
+
+/* Computing MAX */
+ d__1 = ulp * acoefa * anorm, d__2 = ulp * bcoefa * bnorm, d__1 =
+ max(d__1,d__2);
+ dmin__ = max(d__1,safmin);
+
+/* T */
+/* Triangular solve of (a A - b B) y = 0 */
+
+/* T */
+/* (rowwise in (a A - b B) , or columnwise in (a A - b B) ) */
+
+ il2by2 = FALSE_;
+
+ i__2 = *n;
+ for (j = je + nw; j <= i__2; ++j) {
+ if (il2by2) {
+ il2by2 = FALSE_;
+ goto L160;
+ }
+
+ na = 1;
+ bdiag[0] = p[j + j * p_dim1];
+ if (j < *n) {
+ if (s[j + 1 + j * s_dim1] != 0.) {
+ il2by2 = TRUE_;
+ bdiag[1] = p[j + 1 + (j + 1) * p_dim1];
+ na = 2;
+ }
+ }
+
+/* Check whether scaling is necessary for dot products */
+
+ xscale = 1. / max(1.,xmax);
+/* Computing MAX */
+ d__1 = work[j], d__2 = work[*n + j], d__1 = max(d__1,d__2),
+ d__2 = acoefa * work[j] + bcoefa * work[*n + j];
+ temp = max(d__1,d__2);
+ if (il2by2) {
+/* Computing MAX */
+ d__1 = temp, d__2 = work[j + 1], d__1 = max(d__1,d__2),
+ d__2 = work[*n + j + 1], d__1 = max(d__1,d__2),
+ d__2 = acoefa * work[j + 1] + bcoefa * work[*n +
+ j + 1];
+ temp = max(d__1,d__2);
+ }
+ if (temp > bignum * xscale) {
+ i__3 = nw - 1;
+ for (jw = 0; jw <= i__3; ++jw) {
+ i__4 = j - 1;
+ for (jr = je; jr <= i__4; ++jr) {
+ work[(jw + 2) * *n + jr] = xscale * work[(jw + 2)
+ * *n + jr];
+/* L80: */
+ }
+/* L90: */
+ }
+ xmax *= xscale;
+ }
+
+/* Compute dot products */
+
+/* j-1 */
+/* SUM = sum conjg( a*S(k,j) - b*P(k,j) )*x(k) */
+/* k=je */
+
+/* To reduce the op count, this is done as */
+
+/* _ j-1 _ j-1 */
+/* a*conjg( sum S(k,j)*x(k) ) - b*conjg( sum P(k,j)*x(k) ) */
+/* k=je k=je */
+
+/* which may cause underflow problems if A or B are close */
+/* to underflow. (E.g., less than SMALL.) */
+
+
+/* A series of compiler directives to defeat vectorization */
+/* for the next loop */
+
+/* $PL$ CMCHAR=' ' */
+/* DIR$ NEXTSCALAR */
+/* $DIR SCALAR */
+/* DIR$ NEXT SCALAR */
+/* VD$L NOVECTOR */
+/* DEC$ NOVECTOR */
+/* VD$ NOVECTOR */
+/* VDIR NOVECTOR */
+/* VOCL LOOP,SCALAR */
+/* IBM PREFER SCALAR */
+/* $PL$ CMCHAR='*' */
+
+ i__3 = nw;
+ for (jw = 1; jw <= i__3; ++jw) {
+
+/* $PL$ CMCHAR=' ' */
+/* DIR$ NEXTSCALAR */
+/* $DIR SCALAR */
+/* DIR$ NEXT SCALAR */
+/* VD$L NOVECTOR */
+/* DEC$ NOVECTOR */
+/* VD$ NOVECTOR */
+/* VDIR NOVECTOR */
+/* VOCL LOOP,SCALAR */
+/* IBM PREFER SCALAR */
+/* $PL$ CMCHAR='*' */
+
+ i__4 = na;
+ for (ja = 1; ja <= i__4; ++ja) {
+ sums[ja + (jw << 1) - 3] = 0.;
+ sump[ja + (jw << 1) - 3] = 0.;
+
+ i__5 = j - 1;
+ for (jr = je; jr <= i__5; ++jr) {
+ sums[ja + (jw << 1) - 3] += s[jr + (j + ja - 1) *
+ s_dim1] * work[(jw + 1) * *n + jr];
+ sump[ja + (jw << 1) - 3] += p[jr + (j + ja - 1) *
+ p_dim1] * work[(jw + 1) * *n + jr];
+/* L100: */
+ }
+/* L110: */
+ }
+/* L120: */
+ }
+
+/* $PL$ CMCHAR=' ' */
+/* DIR$ NEXTSCALAR */
+/* $DIR SCALAR */
+/* DIR$ NEXT SCALAR */
+/* VD$L NOVECTOR */
+/* DEC$ NOVECTOR */
+/* VD$ NOVECTOR */
+/* VDIR NOVECTOR */
+/* VOCL LOOP,SCALAR */
+/* IBM PREFER SCALAR */
+/* $PL$ CMCHAR='*' */
+
+ i__3 = na;
+ for (ja = 1; ja <= i__3; ++ja) {
+ if (ilcplx) {
+ sum[ja - 1] = -acoef * sums[ja - 1] + bcoefr * sump[
+ ja - 1] - bcoefi * sump[ja + 1];
+ sum[ja + 1] = -acoef * sums[ja + 1] + bcoefr * sump[
+ ja + 1] + bcoefi * sump[ja - 1];
+ } else {
+ sum[ja - 1] = -acoef * sums[ja - 1] + bcoefr * sump[
+ ja - 1];
+ }
+/* L130: */
+ }
+
+/* T */
+/* Solve ( a A - b B ) y = SUM(,) */
+/* with scaling and perturbation of the denominator */
+
+ dlaln2_(&c_true, &na, &nw, &dmin__, &acoef, &s[j + j * s_dim1]
+, lds, bdiag, &bdiag[1], sum, &c__2, &bcoefr, &bcoefi,
+ &work[(*n << 1) + j], n, &scale, &temp, &iinfo);
+ if (scale < 1.) {
+ i__3 = nw - 1;
+ for (jw = 0; jw <= i__3; ++jw) {
+ i__4 = j - 1;
+ for (jr = je; jr <= i__4; ++jr) {
+ work[(jw + 2) * *n + jr] = scale * work[(jw + 2) *
+ *n + jr];
+/* L140: */
+ }
+/* L150: */
+ }
+ xmax = scale * xmax;
+ }
+ xmax = max(xmax,temp);
+L160:
+ ;
+ }
+
+/* Copy eigenvector to VL, back transforming if */
+/* HOWMNY='B'. */
+
+ ++ieig;
+ if (ilback) {
+ i__2 = nw - 1;
+ for (jw = 0; jw <= i__2; ++jw) {
+ i__3 = *n + 1 - je;
+ dgemv_("N", n, &i__3, &c_b34, &vl[je * vl_dim1 + 1], ldvl,
+ &work[(jw + 2) * *n + je], &c__1, &c_b36, &work[(
+ jw + 4) * *n + 1], &c__1);
+/* L170: */
+ }
+ dlacpy_(" ", n, &nw, &work[(*n << 2) + 1], n, &vl[je *
+ vl_dim1 + 1], ldvl);
+ ibeg = 1;
+ } else {
+ dlacpy_(" ", n, &nw, &work[(*n << 1) + 1], n, &vl[ieig *
+ vl_dim1 + 1], ldvl);
+ ibeg = je;
+ }
+
+/* Scale eigenvector */
+
+ xmax = 0.;
+ if (ilcplx) {
+ i__2 = *n;
+ for (j = ibeg; j <= i__2; ++j) {
+/* Computing MAX */
+ d__3 = xmax, d__4 = (d__1 = vl[j + ieig * vl_dim1], abs(
+ d__1)) + (d__2 = vl[j + (ieig + 1) * vl_dim1],
+ abs(d__2));
+ xmax = max(d__3,d__4);
+/* L180: */
+ }
+ } else {
+ i__2 = *n;
+ for (j = ibeg; j <= i__2; ++j) {
+/* Computing MAX */
+ d__2 = xmax, d__3 = (d__1 = vl[j + ieig * vl_dim1], abs(
+ d__1));
+ xmax = max(d__2,d__3);
+/* L190: */
+ }
+ }
+
+ if (xmax > safmin) {
+ xscale = 1. / xmax;
+
+ i__2 = nw - 1;
+ for (jw = 0; jw <= i__2; ++jw) {
+ i__3 = *n;
+ for (jr = ibeg; jr <= i__3; ++jr) {
+ vl[jr + (ieig + jw) * vl_dim1] = xscale * vl[jr + (
+ ieig + jw) * vl_dim1];
+/* L200: */
+ }
+/* L210: */
+ }
+ }
+ ieig = ieig + nw - 1;
+
+L220:
+ ;
+ }
+ }
+
+/* Right eigenvectors */
+
+ if (compr) {
+ ieig = im + 1;
+
+/* Main loop over eigenvalues */
+
+ ilcplx = FALSE_;
+ for (je = *n; je >= 1; --je) {
+
+/* Skip this iteration if (a) HOWMNY='S' and SELECT=.FALSE., or */
+/* (b) this would be the second of a complex pair. */
+/* Check for complex eigenvalue, so as to be sure of which */
+/* entry(-ies) of SELECT to look at -- if complex, SELECT(JE) */
+/* or SELECT(JE-1). */
+/* If this is a complex pair, the 2-by-2 diagonal block */
+/* corresponding to the eigenvalue is in rows/columns JE-1:JE */
+
+ if (ilcplx) {
+ ilcplx = FALSE_;
+ goto L500;
+ }
+ nw = 1;
+ if (je > 1) {
+ if (s[je + (je - 1) * s_dim1] != 0.) {
+ ilcplx = TRUE_;
+ nw = 2;
+ }
+ }
+ if (ilall) {
+ ilcomp = TRUE_;
+ } else if (ilcplx) {
+ ilcomp = select[je] || select[je - 1];
+ } else {
+ ilcomp = select[je];
+ }
+ if (! ilcomp) {
+ goto L500;
+ }
+
+/* Decide if (a) singular pencil, (b) real eigenvalue, or */
+/* (c) complex eigenvalue. */
+
+ if (! ilcplx) {
+ if ((d__1 = s[je + je * s_dim1], abs(d__1)) <= safmin && (
+ d__2 = p[je + je * p_dim1], abs(d__2)) <= safmin) {
+
+/* Singular matrix pencil -- unit eigenvector */
+
+ --ieig;
+ i__1 = *n;
+ for (jr = 1; jr <= i__1; ++jr) {
+ vr[jr + ieig * vr_dim1] = 0.;
+/* L230: */
+ }
+ vr[ieig + ieig * vr_dim1] = 1.;
+ goto L500;
+ }
+ }
+
+/* Clear vector */
+
+ i__1 = nw - 1;
+ for (jw = 0; jw <= i__1; ++jw) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ work[(jw + 2) * *n + jr] = 0.;
+/* L240: */
+ }
+/* L250: */
+ }
+
+/* Compute coefficients in ( a A - b B ) x = 0 */
+/* a is ACOEF */
+/* b is BCOEFR + i*BCOEFI */
+
+ if (! ilcplx) {
+
+/* Real eigenvalue */
+
+/* Computing MAX */
+ d__3 = (d__1 = s[je + je * s_dim1], abs(d__1)) * ascale, d__4
+ = (d__2 = p[je + je * p_dim1], abs(d__2)) * bscale,
+ d__3 = max(d__3,d__4);
+ temp = 1. / max(d__3,safmin);
+ salfar = temp * s[je + je * s_dim1] * ascale;
+ sbeta = temp * p[je + je * p_dim1] * bscale;
+ acoef = sbeta * ascale;
+ bcoefr = salfar * bscale;
+ bcoefi = 0.;
+
+/* Scale to avoid underflow */
+
+ scale = 1.;
+ lsa = abs(sbeta) >= safmin && abs(acoef) < small;
+ lsb = abs(salfar) >= safmin && abs(bcoefr) < small;
+ if (lsa) {
+ scale = small / abs(sbeta) * min(anorm,big);
+ }
+ if (lsb) {
+/* Computing MAX */
+ d__1 = scale, d__2 = small / abs(salfar) * min(bnorm,big);
+ scale = max(d__1,d__2);
+ }
+ if (lsa || lsb) {
+/* Computing MIN */
+/* Computing MAX */
+ d__3 = 1., d__4 = abs(acoef), d__3 = max(d__3,d__4), d__4
+ = abs(bcoefr);
+ d__1 = scale, d__2 = 1. / (safmin * max(d__3,d__4));
+ scale = min(d__1,d__2);
+ if (lsa) {
+ acoef = ascale * (scale * sbeta);
+ } else {
+ acoef = scale * acoef;
+ }
+ if (lsb) {
+ bcoefr = bscale * (scale * salfar);
+ } else {
+ bcoefr = scale * bcoefr;
+ }
+ }
+ acoefa = abs(acoef);
+ bcoefa = abs(bcoefr);
+
+/* First component is 1 */
+
+ work[(*n << 1) + je] = 1.;
+ xmax = 1.;
+
+/* Compute contribution from column JE of A and B to sum */
+/* (See "Further Details", above.) */
+
+ i__1 = je - 1;
+ for (jr = 1; jr <= i__1; ++jr) {
+ work[(*n << 1) + jr] = bcoefr * p[jr + je * p_dim1] -
+ acoef * s[jr + je * s_dim1];
+/* L260: */
+ }
+ } else {
+
+/* Complex eigenvalue */
+
+ d__1 = safmin * 100.;
+ dlag2_(&s[je - 1 + (je - 1) * s_dim1], lds, &p[je - 1 + (je -
+ 1) * p_dim1], ldp, &d__1, &acoef, &temp, &bcoefr, &
+ temp2, &bcoefi);
+ if (bcoefi == 0.) {
+ *info = je - 1;
+ return 0;
+ }
+
+/* Scale to avoid over/underflow */
+
+ acoefa = abs(acoef);
+ bcoefa = abs(bcoefr) + abs(bcoefi);
+ scale = 1.;
+ if (acoefa * ulp < safmin && acoefa >= safmin) {
+ scale = safmin / ulp / acoefa;
+ }
+ if (bcoefa * ulp < safmin && bcoefa >= safmin) {
+/* Computing MAX */
+ d__1 = scale, d__2 = safmin / ulp / bcoefa;
+ scale = max(d__1,d__2);
+ }
+ if (safmin * acoefa > ascale) {
+ scale = ascale / (safmin * acoefa);
+ }
+ if (safmin * bcoefa > bscale) {
+/* Computing MIN */
+ d__1 = scale, d__2 = bscale / (safmin * bcoefa);
+ scale = min(d__1,d__2);
+ }
+ if (scale != 1.) {
+ acoef = scale * acoef;
+ acoefa = abs(acoef);
+ bcoefr = scale * bcoefr;
+ bcoefi = scale * bcoefi;
+ bcoefa = abs(bcoefr) + abs(bcoefi);
+ }
+
+/* Compute first two components of eigenvector */
+/* and contribution to sums */
+
+ temp = acoef * s[je + (je - 1) * s_dim1];
+ temp2r = acoef * s[je + je * s_dim1] - bcoefr * p[je + je *
+ p_dim1];
+ temp2i = -bcoefi * p[je + je * p_dim1];
+ if (abs(temp) >= abs(temp2r) + abs(temp2i)) {
+ work[(*n << 1) + je] = 1.;
+ work[*n * 3 + je] = 0.;
+ work[(*n << 1) + je - 1] = -temp2r / temp;
+ work[*n * 3 + je - 1] = -temp2i / temp;
+ } else {
+ work[(*n << 1) + je - 1] = 1.;
+ work[*n * 3 + je - 1] = 0.;
+ temp = acoef * s[je - 1 + je * s_dim1];
+ work[(*n << 1) + je] = (bcoefr * p[je - 1 + (je - 1) *
+ p_dim1] - acoef * s[je - 1 + (je - 1) * s_dim1]) /
+ temp;
+ work[*n * 3 + je] = bcoefi * p[je - 1 + (je - 1) * p_dim1]
+ / temp;
+ }
+
+/* Computing MAX */
+ d__5 = (d__1 = work[(*n << 1) + je], abs(d__1)) + (d__2 =
+ work[*n * 3 + je], abs(d__2)), d__6 = (d__3 = work[(*
+ n << 1) + je - 1], abs(d__3)) + (d__4 = work[*n * 3 +
+ je - 1], abs(d__4));
+ xmax = max(d__5,d__6);
+
+/* Compute contribution from columns JE and JE-1 */
+/* of A and B to the sums. */
+
+ creala = acoef * work[(*n << 1) + je - 1];
+ cimaga = acoef * work[*n * 3 + je - 1];
+ crealb = bcoefr * work[(*n << 1) + je - 1] - bcoefi * work[*n
+ * 3 + je - 1];
+ cimagb = bcoefi * work[(*n << 1) + je - 1] + bcoefr * work[*n
+ * 3 + je - 1];
+ cre2a = acoef * work[(*n << 1) + je];
+ cim2a = acoef * work[*n * 3 + je];
+ cre2b = bcoefr * work[(*n << 1) + je] - bcoefi * work[*n * 3
+ + je];
+ cim2b = bcoefi * work[(*n << 1) + je] + bcoefr * work[*n * 3
+ + je];
+ i__1 = je - 2;
+ for (jr = 1; jr <= i__1; ++jr) {
+ work[(*n << 1) + jr] = -creala * s[jr + (je - 1) * s_dim1]
+ + crealb * p[jr + (je - 1) * p_dim1] - cre2a * s[
+ jr + je * s_dim1] + cre2b * p[jr + je * p_dim1];
+ work[*n * 3 + jr] = -cimaga * s[jr + (je - 1) * s_dim1] +
+ cimagb * p[jr + (je - 1) * p_dim1] - cim2a * s[jr
+ + je * s_dim1] + cim2b * p[jr + je * p_dim1];
+/* L270: */
+ }
+ }
+
+/* Computing MAX */
+ d__1 = ulp * acoefa * anorm, d__2 = ulp * bcoefa * bnorm, d__1 =
+ max(d__1,d__2);
+ dmin__ = max(d__1,safmin);
+
+/* Columnwise triangular solve of (a A - b B) x = 0 */
+
+ il2by2 = FALSE_;
+ for (j = je - nw; j >= 1; --j) {
+
+/* If a 2-by-2 block, is in position j-1:j, wait until */
+/* next iteration to process it (when it will be j:j+1) */
+
+ if (! il2by2 && j > 1) {
+ if (s[j + (j - 1) * s_dim1] != 0.) {
+ il2by2 = TRUE_;
+ goto L370;
+ }
+ }
+ bdiag[0] = p[j + j * p_dim1];
+ if (il2by2) {
+ na = 2;
+ bdiag[1] = p[j + 1 + (j + 1) * p_dim1];
+ } else {
+ na = 1;
+ }
+
+/* Compute x(j) (and x(j+1), if 2-by-2 block) */
+
+ dlaln2_(&c_false, &na, &nw, &dmin__, &acoef, &s[j + j *
+ s_dim1], lds, bdiag, &bdiag[1], &work[(*n << 1) + j],
+ n, &bcoefr, &bcoefi, sum, &c__2, &scale, &temp, &
+ iinfo);
+ if (scale < 1.) {
+
+ i__1 = nw - 1;
+ for (jw = 0; jw <= i__1; ++jw) {
+ i__2 = je;
+ for (jr = 1; jr <= i__2; ++jr) {
+ work[(jw + 2) * *n + jr] = scale * work[(jw + 2) *
+ *n + jr];
+/* L280: */
+ }
+/* L290: */
+ }
+ }
+/* Computing MAX */
+ d__1 = scale * xmax;
+ xmax = max(d__1,temp);
+
+ i__1 = nw;
+ for (jw = 1; jw <= i__1; ++jw) {
+ i__2 = na;
+ for (ja = 1; ja <= i__2; ++ja) {
+ work[(jw + 1) * *n + j + ja - 1] = sum[ja + (jw << 1)
+ - 3];
+/* L300: */
+ }
+/* L310: */
+ }
+
+/* w = w + x(j)*(a S(*,j) - b P(*,j) ) with scaling */
+
+ if (j > 1) {
+
+/* Check whether scaling is necessary for sum. */
+
+ xscale = 1. / max(1.,xmax);
+ temp = acoefa * work[j] + bcoefa * work[*n + j];
+ if (il2by2) {
+/* Computing MAX */
+ d__1 = temp, d__2 = acoefa * work[j + 1] + bcoefa *
+ work[*n + j + 1];
+ temp = max(d__1,d__2);
+ }
+/* Computing MAX */
+ d__1 = max(temp,acoefa);
+ temp = max(d__1,bcoefa);
+ if (temp > bignum * xscale) {
+
+ i__1 = nw - 1;
+ for (jw = 0; jw <= i__1; ++jw) {
+ i__2 = je;
+ for (jr = 1; jr <= i__2; ++jr) {
+ work[(jw + 2) * *n + jr] = xscale * work[(jw
+ + 2) * *n + jr];
+/* L320: */
+ }
+/* L330: */
+ }
+ xmax *= xscale;
+ }
+
+/* Compute the contributions of the off-diagonals of */
+/* column j (and j+1, if 2-by-2 block) of A and B to the */
+/* sums. */
+
+
+ i__1 = na;
+ for (ja = 1; ja <= i__1; ++ja) {
+ if (ilcplx) {
+ creala = acoef * work[(*n << 1) + j + ja - 1];
+ cimaga = acoef * work[*n * 3 + j + ja - 1];
+ crealb = bcoefr * work[(*n << 1) + j + ja - 1] -
+ bcoefi * work[*n * 3 + j + ja - 1];
+ cimagb = bcoefi * work[(*n << 1) + j + ja - 1] +
+ bcoefr * work[*n * 3 + j + ja - 1];
+ i__2 = j - 1;
+ for (jr = 1; jr <= i__2; ++jr) {
+ work[(*n << 1) + jr] = work[(*n << 1) + jr] -
+ creala * s[jr + (j + ja - 1) * s_dim1]
+ + crealb * p[jr + (j + ja - 1) *
+ p_dim1];
+ work[*n * 3 + jr] = work[*n * 3 + jr] -
+ cimaga * s[jr + (j + ja - 1) * s_dim1]
+ + cimagb * p[jr + (j + ja - 1) *
+ p_dim1];
+/* L340: */
+ }
+ } else {
+ creala = acoef * work[(*n << 1) + j + ja - 1];
+ crealb = bcoefr * work[(*n << 1) + j + ja - 1];
+ i__2 = j - 1;
+ for (jr = 1; jr <= i__2; ++jr) {
+ work[(*n << 1) + jr] = work[(*n << 1) + jr] -
+ creala * s[jr + (j + ja - 1) * s_dim1]
+ + crealb * p[jr + (j + ja - 1) *
+ p_dim1];
+/* L350: */
+ }
+ }
+/* L360: */
+ }
+ }
+
+ il2by2 = FALSE_;
+L370:
+ ;
+ }
+
+/* Copy eigenvector to VR, back transforming if */
+/* HOWMNY='B'. */
+
+ ieig -= nw;
+ if (ilback) {
+
+ i__1 = nw - 1;
+ for (jw = 0; jw <= i__1; ++jw) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ work[(jw + 4) * *n + jr] = work[(jw + 2) * *n + 1] *
+ vr[jr + vr_dim1];
+/* L380: */
+ }
+
+/* A series of compiler directives to defeat */
+/* vectorization for the next loop */
+
+
+ i__2 = je;
+ for (jc = 2; jc <= i__2; ++jc) {
+ i__3 = *n;
+ for (jr = 1; jr <= i__3; ++jr) {
+ work[(jw + 4) * *n + jr] += work[(jw + 2) * *n +
+ jc] * vr[jr + jc * vr_dim1];
+/* L390: */
+ }
+/* L400: */
+ }
+/* L410: */
+ }
+
+ i__1 = nw - 1;
+ for (jw = 0; jw <= i__1; ++jw) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vr[jr + (ieig + jw) * vr_dim1] = work[(jw + 4) * *n +
+ jr];
+/* L420: */
+ }
+/* L430: */
+ }
+
+ iend = *n;
+ } else {
+ i__1 = nw - 1;
+ for (jw = 0; jw <= i__1; ++jw) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vr[jr + (ieig + jw) * vr_dim1] = work[(jw + 2) * *n +
+ jr];
+/* L440: */
+ }
+/* L450: */
+ }
+
+ iend = je;
+ }
+
+/* Scale eigenvector */
+
+ xmax = 0.;
+ if (ilcplx) {
+ i__1 = iend;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ d__3 = xmax, d__4 = (d__1 = vr[j + ieig * vr_dim1], abs(
+ d__1)) + (d__2 = vr[j + (ieig + 1) * vr_dim1],
+ abs(d__2));
+ xmax = max(d__3,d__4);
+/* L460: */
+ }
+ } else {
+ i__1 = iend;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ d__2 = xmax, d__3 = (d__1 = vr[j + ieig * vr_dim1], abs(
+ d__1));
+ xmax = max(d__2,d__3);
+/* L470: */
+ }
+ }
+
+ if (xmax > safmin) {
+ xscale = 1. / xmax;
+ i__1 = nw - 1;
+ for (jw = 0; jw <= i__1; ++jw) {
+ i__2 = iend;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vr[jr + (ieig + jw) * vr_dim1] = xscale * vr[jr + (
+ ieig + jw) * vr_dim1];
+/* L480: */
+ }
+/* L490: */
+ }
+ }
+L500:
+ ;
+ }
+ }
+
+ return 0;
+
+/* End of DTGEVC */
+
+} /* dtgevc_ */
diff --git a/contrib/libs/clapack/dtgex2.c b/contrib/libs/clapack/dtgex2.c
new file mode 100644
index 0000000000..52d2b9f9ae
--- /dev/null
+++ b/contrib/libs/clapack/dtgex2.c
@@ -0,0 +1,711 @@
+/* dtgex2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__4 = 4;
+static doublereal c_b5 = 0.;
+static integer c__1 = 1;
+static integer c__2 = 2;
+static doublereal c_b42 = 1.;
+static doublereal c_b48 = -1.;
+static integer c__0 = 0;
+
+/* Subroutine */ int dtgex2_(logical *wantq, logical *wantz, integer *n,
+ doublereal *a, integer *lda, doublereal *b, integer *ldb, doublereal *
+ q, integer *ldq, doublereal *z__, integer *ldz, integer *j1, integer *
+ n1, integer *n2, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1,
+ z_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal f, g;
+ integer i__, m;
+ doublereal s[16] /* was [4][4] */, t[16] /* was [4][4] */, be[2], ai[2]
+ , ar[2], sa, sb, li[16] /* was [4][4] */, ir[16] /*
+ was [4][4] */, ss, ws, eps;
+ logical weak;
+ doublereal ddum;
+ integer idum;
+ doublereal taul[4], dsum;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ doublereal taur[4], scpy[16] /* was [4][4] */, tcpy[16] /*
+ was [4][4] */;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal scale, bqra21, brqa21;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ doublereal licop[16] /* was [4][4] */;
+ integer linfo;
+ doublereal ircop[16] /* was [4][4] */, dnorm;
+ integer iwork[4];
+ extern /* Subroutine */ int dlagv2_(doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *
+, doublereal *, doublereal *, doublereal *), dgeqr2_(integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *), dgerq2_(integer *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), dorg2r_(integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *), dorgr2_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *),
+ dorm2r_(char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), dormr2_(char *, char *,
+ integer *, integer *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *), dtgsy2_(char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal dscale;
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ dlartg_(doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *), dlaset_(char *, integer *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *), dlassq_(integer *
+, doublereal *, integer *, doublereal *, doublereal *);
+ logical dtrong;
+ doublereal thresh, smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTGEX2 swaps adjacent diagonal blocks (A11, B11) and (A22, B22) */
+/* of size 1-by-1 or 2-by-2 in an upper (quasi) triangular matrix pair */
+/* (A, B) by an orthogonal equivalence transformation. */
+
+/* (A, B) must be in generalized real Schur canonical form (as returned */
+/* by DGGES), i.e. A is block upper triangular with 1-by-1 and 2-by-2 */
+/* diagonal blocks. B is upper triangular. */
+
+/* Optionally, the matrices Q and Z of generalized Schur vectors are */
+/* updated. */
+
+/* Q(in) * A(in) * Z(in)' = Q(out) * A(out) * Z(out)' */
+/* Q(in) * B(in) * Z(in)' = Q(out) * B(out) * Z(out)' */
+
+
+/* Arguments */
+/* ========= */
+
+/* WANTQ (input) LOGICAL */
+/* .TRUE. : update the left transformation matrix Q; */
+/* .FALSE.: do not update Q. */
+
+/* WANTZ (input) LOGICAL */
+/* .TRUE. : update the right transformation matrix Z; */
+/* .FALSE.: do not update Z. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION arrays, dimensions (LDA,N) */
+/* On entry, the matrix A in the pair (A, B). */
+/* On exit, the updated matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) DOUBLE PRECISION arrays, dimensions (LDB,N) */
+/* On entry, the matrix B in the pair (A, B). */
+/* On exit, the updated matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* Q (input/output) DOUBLE PRECISION array, dimension (LDZ,N) */
+/* On entry, if WANTQ = .TRUE., the orthogonal matrix Q. */
+/* On exit, the updated matrix Q. */
+/* Not referenced if WANTQ = .FALSE.. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= 1. */
+/* If WANTQ = .TRUE., LDQ >= N. */
+
+/* Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N) */
+/* On entry, if WANTZ =.TRUE., the orthogonal matrix Z. */
+/* On exit, the updated matrix Z. */
+/* Not referenced if WANTZ = .FALSE.. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1. */
+/* If WANTZ = .TRUE., LDZ >= N. */
+
+/* J1 (input) INTEGER */
+/* The index to the first block (A11, B11). 1 <= J1 <= N. */
+
+/* N1 (input) INTEGER */
+/* The order of the first block (A11, B11). N1 = 0, 1 or 2. */
+
+/* N2 (input) INTEGER */
+/* The order of the second block (A22, B22). N2 = 0, 1 or 2. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)). */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* LWORK >= MAX( 1, N*(N2+N1), (N2+N1)*(N2+N1)*2 ) */
+
+/* INFO (output) INTEGER */
+/* =0: Successful exit */
+/* >0: If INFO = 1, the transformed matrix (A, B) would be */
+/* too far from generalized Schur form; the blocks are */
+/* not swapped and (A, B) and (Q, Z) are unchanged. */
+/* The problem of swapping is too ill-conditioned. */
+/* <0: If INFO = -16: LWORK is too small. Appropriate value */
+/* for LWORK is returned in WORK(1). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* In the current code both weak and strong stability tests are */
+/* performed. The user can omit the strong stability test by changing */
+/* the internal logical parameter WANDS to .FALSE.. See ref. [2] for */
+/* details. */
+
+/* [1] B. Kagstrom; A Direct Method for Reordering Eigenvalues in the */
+/* Generalized Real Schur Form of a Regular Matrix Pair (A, B), in */
+/* M.S. Moonen et al (eds), Linear Algebra for Large Scale and */
+/* Real-Time Applications, Kluwer Academic Publ. 1993, pp 195-218. */
+
+/* [2] B. Kagstrom and P. Poromaa; Computing Eigenspaces with Specified */
+/* Eigenvalues of a Regular Matrix Pair (A, B) and Condition */
+/* Estimation: Theory, Algorithms and Software, */
+/* Report UMINF - 94.04, Department of Computing Science, Umea */
+/* University, S-901 87 Umea, Sweden, 1994. Also as LAPACK Working */
+/* Note 87. To appear in Numerical Algorithms, 1996. */
+
+/* ===================================================================== */
+/* Replaced various illegal calls to DCOPY by calls to DLASET, or by DO */
+/* loops. Sven Hammarling, 1/5/02. */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+/* Quick return if possible */
+
+ if (*n <= 1 || *n1 <= 0 || *n2 <= 0) {
+ return 0;
+ }
+ if (*n1 > *n || *j1 + *n1 > *n) {
+ return 0;
+ }
+ m = *n1 + *n2;
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * m, i__1 = max(i__1,i__2), i__2 = m * m << 1;
+ if (*lwork < max(i__1,i__2)) {
+ *info = -16;
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * m, i__1 = max(i__1,i__2), i__2 = m * m << 1;
+ work[1] = (doublereal) max(i__1,i__2);
+ return 0;
+ }
+
+ weak = FALSE_;
+ dtrong = FALSE_;
+
+/* Make a local copy of selected block */
+
+ dlaset_("Full", &c__4, &c__4, &c_b5, &c_b5, li, &c__4);
+ dlaset_("Full", &c__4, &c__4, &c_b5, &c_b5, ir, &c__4);
+ dlacpy_("Full", &m, &m, &a[*j1 + *j1 * a_dim1], lda, s, &c__4);
+ dlacpy_("Full", &m, &m, &b[*j1 + *j1 * b_dim1], ldb, t, &c__4);
+
+/* Compute threshold for testing acceptance of swapping. */
+
+ eps = dlamch_("P");
+ smlnum = dlamch_("S") / eps;
+ dscale = 0.;
+ dsum = 1.;
+ dlacpy_("Full", &m, &m, s, &c__4, &work[1], &m);
+ i__1 = m * m;
+ dlassq_(&i__1, &work[1], &c__1, &dscale, &dsum);
+ dlacpy_("Full", &m, &m, t, &c__4, &work[1], &m);
+ i__1 = m * m;
+ dlassq_(&i__1, &work[1], &c__1, &dscale, &dsum);
+ dnorm = dscale * sqrt(dsum);
+/* Computing MAX */
+ d__1 = eps * 10. * dnorm;
+ thresh = max(d__1,smlnum);
+
+ if (m == 2) {
+
+/* CASE 1: Swap 1-by-1 and 1-by-1 blocks. */
+
+/* Compute orthogonal QL and RQ that swap 1-by-1 and 1-by-1 blocks */
+/* using Givens rotations and perform the swap tentatively. */
+
+ f = s[5] * t[0] - t[5] * s[0];
+ g = s[5] * t[4] - t[5] * s[4];
+ sb = abs(t[5]);
+ sa = abs(s[5]);
+ dlartg_(&f, &g, &ir[4], ir, &ddum);
+ ir[1] = -ir[4];
+ ir[5] = ir[0];
+ drot_(&c__2, s, &c__1, &s[4], &c__1, ir, &ir[1]);
+ drot_(&c__2, t, &c__1, &t[4], &c__1, ir, &ir[1]);
+ if (sa >= sb) {
+ dlartg_(s, &s[1], li, &li[1], &ddum);
+ } else {
+ dlartg_(t, &t[1], li, &li[1], &ddum);
+ }
+ drot_(&c__2, s, &c__4, &s[1], &c__4, li, &li[1]);
+ drot_(&c__2, t, &c__4, &t[1], &c__4, li, &li[1]);
+ li[5] = li[0];
+ li[4] = -li[1];
+
+/* Weak stability test: */
+/* |S21| + |T21| <= O(EPS * F-norm((S, T))) */
+
+ ws = abs(s[1]) + abs(t[1]);
+ weak = ws <= thresh;
+ if (! weak) {
+ goto L70;
+ }
+
+ if (TRUE_) {
+
+/* Strong stability test: */
+/* F-norm((A-QL'*S*QR, B-QL'*T*QR)) <= O(EPS*F-norm((A,B))) */
+
+ dlacpy_("Full", &m, &m, &a[*j1 + *j1 * a_dim1], lda, &work[m * m
+ + 1], &m);
+ dgemm_("N", "N", &m, &m, &m, &c_b42, li, &c__4, s, &c__4, &c_b5, &
+ work[1], &m);
+ dgemm_("N", "T", &m, &m, &m, &c_b48, &work[1], &m, ir, &c__4, &
+ c_b42, &work[m * m + 1], &m);
+ dscale = 0.;
+ dsum = 1.;
+ i__1 = m * m;
+ dlassq_(&i__1, &work[m * m + 1], &c__1, &dscale, &dsum);
+
+ dlacpy_("Full", &m, &m, &b[*j1 + *j1 * b_dim1], ldb, &work[m * m
+ + 1], &m);
+ dgemm_("N", "N", &m, &m, &m, &c_b42, li, &c__4, t, &c__4, &c_b5, &
+ work[1], &m);
+ dgemm_("N", "T", &m, &m, &m, &c_b48, &work[1], &m, ir, &c__4, &
+ c_b42, &work[m * m + 1], &m);
+ i__1 = m * m;
+ dlassq_(&i__1, &work[m * m + 1], &c__1, &dscale, &dsum);
+ ss = dscale * sqrt(dsum);
+ dtrong = ss <= thresh;
+ if (! dtrong) {
+ goto L70;
+ }
+ }
+
+/* Update (A(J1:J1+M-1, M+J1:N), B(J1:J1+M-1, M+J1:N)) and */
+/* (A(1:J1-1, J1:J1+M), B(1:J1-1, J1:J1+M)). */
+
+ i__1 = *j1 + 1;
+ drot_(&i__1, &a[*j1 * a_dim1 + 1], &c__1, &a[(*j1 + 1) * a_dim1 + 1],
+ &c__1, ir, &ir[1]);
+ i__1 = *j1 + 1;
+ drot_(&i__1, &b[*j1 * b_dim1 + 1], &c__1, &b[(*j1 + 1) * b_dim1 + 1],
+ &c__1, ir, &ir[1]);
+ i__1 = *n - *j1 + 1;
+ drot_(&i__1, &a[*j1 + *j1 * a_dim1], lda, &a[*j1 + 1 + *j1 * a_dim1],
+ lda, li, &li[1]);
+ i__1 = *n - *j1 + 1;
+ drot_(&i__1, &b[*j1 + *j1 * b_dim1], ldb, &b[*j1 + 1 + *j1 * b_dim1],
+ ldb, li, &li[1]);
+
+/* Set N1-by-N2 (2,1) - blocks to ZERO. */
+
+ a[*j1 + 1 + *j1 * a_dim1] = 0.;
+ b[*j1 + 1 + *j1 * b_dim1] = 0.;
+
+/* Accumulate transformations into Q and Z if requested. */
+
+ if (*wantz) {
+ drot_(n, &z__[*j1 * z_dim1 + 1], &c__1, &z__[(*j1 + 1) * z_dim1 +
+ 1], &c__1, ir, &ir[1]);
+ }
+ if (*wantq) {
+ drot_(n, &q[*j1 * q_dim1 + 1], &c__1, &q[(*j1 + 1) * q_dim1 + 1],
+ &c__1, li, &li[1]);
+ }
+
+/* Exit with INFO = 0 if swap was successfully performed. */
+
+ return 0;
+
+ } else {
+
+/* CASE 2: Swap 1-by-1 and 2-by-2 blocks, or 2-by-2 */
+/* and 2-by-2 blocks. */
+
+/* Solve the generalized Sylvester equation */
+/* S11 * R - L * S22 = SCALE * S12 */
+/* T11 * R - L * T22 = SCALE * T12 */
+/* for R and L. Solutions in LI and IR. */
+
+ dlacpy_("Full", n1, n2, &t[(*n1 + 1 << 2) - 4], &c__4, li, &c__4);
+ dlacpy_("Full", n1, n2, &s[(*n1 + 1 << 2) - 4], &c__4, &ir[*n2 + 1 + (
+ *n1 + 1 << 2) - 5], &c__4);
+ dtgsy2_("N", &c__0, n1, n2, s, &c__4, &s[*n1 + 1 + (*n1 + 1 << 2) - 5]
+, &c__4, &ir[*n2 + 1 + (*n1 + 1 << 2) - 5], &c__4, t, &c__4, &
+ t[*n1 + 1 + (*n1 + 1 << 2) - 5], &c__4, li, &c__4, &scale, &
+ dsum, &dscale, iwork, &idum, &linfo);
+
+/* Compute orthogonal matrix QL: */
+
+/* QL' * LI = [ TL ] */
+/* [ 0 ] */
+/* where */
+/* LI = [ -L ] */
+/* [ SCALE * identity(N2) ] */
+
+ i__1 = *n2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dscal_(n1, &c_b48, &li[(i__ << 2) - 4], &c__1);
+ li[*n1 + i__ + (i__ << 2) - 5] = scale;
+/* L10: */
+ }
+ dgeqr2_(&m, n2, li, &c__4, taul, &work[1], &linfo);
+ if (linfo != 0) {
+ goto L70;
+ }
+ dorg2r_(&m, &m, n2, li, &c__4, taul, &work[1], &linfo);
+ if (linfo != 0) {
+ goto L70;
+ }
+
+/* Compute orthogonal matrix RQ: */
+
+/* IR * RQ' = [ 0 TR], */
+
+/* where IR = [ SCALE * identity(N1), R ] */
+
+ i__1 = *n1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ir[*n2 + i__ + (i__ << 2) - 5] = scale;
+/* L20: */
+ }
+ dgerq2_(n1, &m, &ir[*n2], &c__4, taur, &work[1], &linfo);
+ if (linfo != 0) {
+ goto L70;
+ }
+ dorgr2_(&m, &m, n1, ir, &c__4, taur, &work[1], &linfo);
+ if (linfo != 0) {
+ goto L70;
+ }
+
+/* Perform the swapping tentatively: */
+
+ dgemm_("T", "N", &m, &m, &m, &c_b42, li, &c__4, s, &c__4, &c_b5, &
+ work[1], &m);
+ dgemm_("N", "T", &m, &m, &m, &c_b42, &work[1], &m, ir, &c__4, &c_b5,
+ s, &c__4);
+ dgemm_("T", "N", &m, &m, &m, &c_b42, li, &c__4, t, &c__4, &c_b5, &
+ work[1], &m);
+ dgemm_("N", "T", &m, &m, &m, &c_b42, &work[1], &m, ir, &c__4, &c_b5,
+ t, &c__4);
+ dlacpy_("F", &m, &m, s, &c__4, scpy, &c__4);
+ dlacpy_("F", &m, &m, t, &c__4, tcpy, &c__4);
+ dlacpy_("F", &m, &m, ir, &c__4, ircop, &c__4);
+ dlacpy_("F", &m, &m, li, &c__4, licop, &c__4);
+
+/* Triangularize the B-part by an RQ factorization. */
+/* Apply transformation (from left) to A-part, giving S. */
+
+ dgerq2_(&m, &m, t, &c__4, taur, &work[1], &linfo);
+ if (linfo != 0) {
+ goto L70;
+ }
+ dormr2_("R", "T", &m, &m, &m, t, &c__4, taur, s, &c__4, &work[1], &
+ linfo);
+ if (linfo != 0) {
+ goto L70;
+ }
+ dormr2_("L", "N", &m, &m, &m, t, &c__4, taur, ir, &c__4, &work[1], &
+ linfo);
+ if (linfo != 0) {
+ goto L70;
+ }
+
+/* Compute F-norm(S21) in BRQA21. (T21 is 0.) */
+
+ dscale = 0.;
+ dsum = 1.;
+ i__1 = *n2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlassq_(n1, &s[*n2 + 1 + (i__ << 2) - 5], &c__1, &dscale, &dsum);
+/* L30: */
+ }
+ brqa21 = dscale * sqrt(dsum);
+
+/* Triangularize the B-part by a QR factorization. */
+/* Apply transformation (from right) to A-part, giving S. */
+
+ dgeqr2_(&m, &m, tcpy, &c__4, taul, &work[1], &linfo);
+ if (linfo != 0) {
+ goto L70;
+ }
+ dorm2r_("L", "T", &m, &m, &m, tcpy, &c__4, taul, scpy, &c__4, &work[1]
+, info);
+ dorm2r_("R", "N", &m, &m, &m, tcpy, &c__4, taul, licop, &c__4, &work[
+ 1], info);
+ if (linfo != 0) {
+ goto L70;
+ }
+
+/* Compute F-norm(S21) in BQRA21. (T21 is 0.) */
+
+ dscale = 0.;
+ dsum = 1.;
+ i__1 = *n2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlassq_(n1, &scpy[*n2 + 1 + (i__ << 2) - 5], &c__1, &dscale, &
+ dsum);
+/* L40: */
+ }
+ bqra21 = dscale * sqrt(dsum);
+
+/* Decide which method to use. */
+/* Weak stability test: */
+/* F-norm(S21) <= O(EPS * F-norm((S, T))) */
+
+ if (bqra21 <= brqa21 && bqra21 <= thresh) {
+ dlacpy_("F", &m, &m, scpy, &c__4, s, &c__4);
+ dlacpy_("F", &m, &m, tcpy, &c__4, t, &c__4);
+ dlacpy_("F", &m, &m, ircop, &c__4, ir, &c__4);
+ dlacpy_("F", &m, &m, licop, &c__4, li, &c__4);
+ } else if (brqa21 >= thresh) {
+ goto L70;
+ }
+
+/* Set lower triangle of B-part to zero */
+
+ i__1 = m - 1;
+ i__2 = m - 1;
+ dlaset_("Lower", &i__1, &i__2, &c_b5, &c_b5, &t[1], &c__4);
+
+ if (TRUE_) {
+
+/* Strong stability test: */
+/* F-norm((A-QL*S*QR', B-QL*T*QR')) <= O(EPS*F-norm((A,B))) */
+
+ dlacpy_("Full", &m, &m, &a[*j1 + *j1 * a_dim1], lda, &work[m * m
+ + 1], &m);
+ dgemm_("N", "N", &m, &m, &m, &c_b42, li, &c__4, s, &c__4, &c_b5, &
+ work[1], &m);
+ dgemm_("N", "N", &m, &m, &m, &c_b48, &work[1], &m, ir, &c__4, &
+ c_b42, &work[m * m + 1], &m);
+ dscale = 0.;
+ dsum = 1.;
+ i__1 = m * m;
+ dlassq_(&i__1, &work[m * m + 1], &c__1, &dscale, &dsum);
+
+ dlacpy_("Full", &m, &m, &b[*j1 + *j1 * b_dim1], ldb, &work[m * m
+ + 1], &m);
+ dgemm_("N", "N", &m, &m, &m, &c_b42, li, &c__4, t, &c__4, &c_b5, &
+ work[1], &m);
+ dgemm_("N", "N", &m, &m, &m, &c_b48, &work[1], &m, ir, &c__4, &
+ c_b42, &work[m * m + 1], &m);
+ i__1 = m * m;
+ dlassq_(&i__1, &work[m * m + 1], &c__1, &dscale, &dsum);
+ ss = dscale * sqrt(dsum);
+ dtrong = ss <= thresh;
+ if (! dtrong) {
+ goto L70;
+ }
+
+ }
+
+/* If the swap is accepted ("weakly" and "strongly"), apply the */
+/* transformations and set N1-by-N2 (2,1)-block to zero. */
+
+ dlaset_("Full", n1, n2, &c_b5, &c_b5, &s[*n2], &c__4);
+
+/* copy back M-by-M diagonal block starting at index J1 of (A, B) */
+
+ dlacpy_("F", &m, &m, s, &c__4, &a[*j1 + *j1 * a_dim1], lda)
+ ;
+ dlacpy_("F", &m, &m, t, &c__4, &b[*j1 + *j1 * b_dim1], ldb)
+ ;
+ dlaset_("Full", &c__4, &c__4, &c_b5, &c_b5, t, &c__4);
+
+/* Standardize existing 2-by-2 blocks. */
+
+ i__1 = m * m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L50: */
+ }
+ work[1] = 1.;
+ t[0] = 1.;
+ idum = *lwork - m * m - 2;
+ if (*n2 > 1) {
+ dlagv2_(&a[*j1 + *j1 * a_dim1], lda, &b[*j1 + *j1 * b_dim1], ldb,
+ ar, ai, be, &work[1], &work[2], t, &t[1]);
+ work[m + 1] = -work[2];
+ work[m + 2] = work[1];
+ t[*n2 + (*n2 << 2) - 5] = t[0];
+ t[4] = -t[1];
+ }
+ work[m * m] = 1.;
+ t[m + (m << 2) - 5] = 1.;
+
+ if (*n1 > 1) {
+ dlagv2_(&a[*j1 + *n2 + (*j1 + *n2) * a_dim1], lda, &b[*j1 + *n2 +
+ (*j1 + *n2) * b_dim1], ldb, taur, taul, &work[m * m + 1],
+ &work[*n2 * m + *n2 + 1], &work[*n2 * m + *n2 + 2], &t[*
+ n2 + 1 + (*n2 + 1 << 2) - 5], &t[m + (m - 1 << 2) - 5]);
+ work[m * m] = work[*n2 * m + *n2 + 1];
+ work[m * m - 1] = -work[*n2 * m + *n2 + 2];
+ t[m + (m << 2) - 5] = t[*n2 + 1 + (*n2 + 1 << 2) - 5];
+ t[m - 1 + (m << 2) - 5] = -t[m + (m - 1 << 2) - 5];
+ }
+ dgemm_("T", "N", n2, n1, n2, &c_b42, &work[1], &m, &a[*j1 + (*j1 + *
+ n2) * a_dim1], lda, &c_b5, &work[m * m + 1], n2);
+ dlacpy_("Full", n2, n1, &work[m * m + 1], n2, &a[*j1 + (*j1 + *n2) *
+ a_dim1], lda);
+ dgemm_("T", "N", n2, n1, n2, &c_b42, &work[1], &m, &b[*j1 + (*j1 + *
+ n2) * b_dim1], ldb, &c_b5, &work[m * m + 1], n2);
+ dlacpy_("Full", n2, n1, &work[m * m + 1], n2, &b[*j1 + (*j1 + *n2) *
+ b_dim1], ldb);
+ dgemm_("N", "N", &m, &m, &m, &c_b42, li, &c__4, &work[1], &m, &c_b5, &
+ work[m * m + 1], &m);
+ dlacpy_("Full", &m, &m, &work[m * m + 1], &m, li, &c__4);
+ dgemm_("N", "N", n2, n1, n1, &c_b42, &a[*j1 + (*j1 + *n2) * a_dim1],
+ lda, &t[*n2 + 1 + (*n2 + 1 << 2) - 5], &c__4, &c_b5, &work[1],
+ n2);
+ dlacpy_("Full", n2, n1, &work[1], n2, &a[*j1 + (*j1 + *n2) * a_dim1],
+ lda);
+ dgemm_("N", "N", n2, n1, n1, &c_b42, &b[*j1 + (*j1 + *n2) * b_dim1],
+ ldb, &t[*n2 + 1 + (*n2 + 1 << 2) - 5], &c__4, &c_b5, &work[1],
+ n2);
+ dlacpy_("Full", n2, n1, &work[1], n2, &b[*j1 + (*j1 + *n2) * b_dim1],
+ ldb);
+ dgemm_("T", "N", &m, &m, &m, &c_b42, ir, &c__4, t, &c__4, &c_b5, &
+ work[1], &m);
+ dlacpy_("Full", &m, &m, &work[1], &m, ir, &c__4);
+
+/* Accumulate transformations into Q and Z if requested. */
+
+ if (*wantq) {
+ dgemm_("N", "N", n, &m, &m, &c_b42, &q[*j1 * q_dim1 + 1], ldq, li,
+ &c__4, &c_b5, &work[1], n);
+ dlacpy_("Full", n, &m, &work[1], n, &q[*j1 * q_dim1 + 1], ldq);
+
+ }
+
+ if (*wantz) {
+ dgemm_("N", "N", n, &m, &m, &c_b42, &z__[*j1 * z_dim1 + 1], ldz,
+ ir, &c__4, &c_b5, &work[1], n);
+ dlacpy_("Full", n, &m, &work[1], n, &z__[*j1 * z_dim1 + 1], ldz);
+
+ }
+
+/* Update (A(J1:J1+M-1, M+J1:N), B(J1:J1+M-1, M+J1:N)) and */
+/* (A(1:J1-1, J1:J1+M), B(1:J1-1, J1:J1+M)). */
+
+ i__ = *j1 + m;
+ if (i__ <= *n) {
+ i__1 = *n - i__ + 1;
+ dgemm_("T", "N", &m, &i__1, &m, &c_b42, li, &c__4, &a[*j1 + i__ *
+ a_dim1], lda, &c_b5, &work[1], &m);
+ i__1 = *n - i__ + 1;
+ dlacpy_("Full", &m, &i__1, &work[1], &m, &a[*j1 + i__ * a_dim1],
+ lda);
+ i__1 = *n - i__ + 1;
+ dgemm_("T", "N", &m, &i__1, &m, &c_b42, li, &c__4, &b[*j1 + i__ *
+ b_dim1], lda, &c_b5, &work[1], &m);
+ i__1 = *n - i__ + 1;
+ dlacpy_("Full", &m, &i__1, &work[1], &m, &b[*j1 + i__ * b_dim1],
+ ldb);
+ }
+ i__ = *j1 - 1;
+ if (i__ > 0) {
+ dgemm_("N", "N", &i__, &m, &m, &c_b42, &a[*j1 * a_dim1 + 1], lda,
+ ir, &c__4, &c_b5, &work[1], &i__);
+ dlacpy_("Full", &i__, &m, &work[1], &i__, &a[*j1 * a_dim1 + 1],
+ lda);
+ dgemm_("N", "N", &i__, &m, &m, &c_b42, &b[*j1 * b_dim1 + 1], ldb,
+ ir, &c__4, &c_b5, &work[1], &i__);
+ dlacpy_("Full", &i__, &m, &work[1], &i__, &b[*j1 * b_dim1 + 1],
+ ldb);
+ }
+
+/* Exit with INFO = 0 if swap was successfully performed. */
+
+ return 0;
+
+ }
+
+/* Exit with INFO = 1 if swap was rejected. */
+
+L70:
+
+ *info = 1;
+ return 0;
+
+/* End of DTGEX2 */
+
+} /* dtgex2_ */
diff --git a/contrib/libs/clapack/dtgexc.c b/contrib/libs/clapack/dtgexc.c
new file mode 100644
index 0000000000..d816a069ba
--- /dev/null
+++ b/contrib/libs/clapack/dtgexc.c
@@ -0,0 +1,514 @@
+/* dtgexc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__2 = 2;
+
+/* Subroutine */ int dtgexc_(logical *wantq, logical *wantz, integer *n,
+ doublereal *a, integer *lda, doublereal *b, integer *ldb, doublereal *
+ q, integer *ldq, doublereal *z__, integer *ldz, integer *ifst,
+ integer *ilst, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1,
+ z_offset, i__1;
+
+ /* Local variables */
+ integer nbf, nbl, here, lwmin;
+ extern /* Subroutine */ int dtgex2_(logical *, logical *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *, integer *, integer *, integer
+ *, doublereal *, integer *, integer *), xerbla_(char *, integer *);
+ integer nbnext;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTGEXC reorders the generalized real Schur decomposition of a real */
+/* matrix pair (A,B) using an orthogonal equivalence transformation */
+
+/* (A, B) = Q * (A, B) * Z', */
+
+/* so that the diagonal block of (A, B) with row index IFST is moved */
+/* to row ILST. */
+
+/* (A, B) must be in generalized real Schur canonical form (as returned */
+/* by DGGES), i.e. A is block upper triangular with 1-by-1 and 2-by-2 */
+/* diagonal blocks. B is upper triangular. */
+
+/* Optionally, the matrices Q and Z of generalized Schur vectors are */
+/* updated. */
+
+/* Q(in) * A(in) * Z(in)' = Q(out) * A(out) * Z(out)' */
+/* Q(in) * B(in) * Z(in)' = Q(out) * B(out) * Z(out)' */
+
+
+/* Arguments */
+/* ========= */
+
+/* WANTQ (input) LOGICAL */
+/* .TRUE. : update the left transformation matrix Q; */
+/* .FALSE.: do not update Q. */
+
+/* WANTZ (input) LOGICAL */
+/* .TRUE. : update the right transformation matrix Z; */
+/* .FALSE.: do not update Z. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the matrix A in generalized real Schur canonical */
+/* form. */
+/* On exit, the updated matrix A, again in generalized */
+/* real Schur canonical form. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,N) */
+/* On entry, the matrix B in generalized real Schur canonical */
+/* form (A,B). */
+/* On exit, the updated matrix B, again in generalized */
+/* real Schur canonical form (A,B). */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* Q (input/output) DOUBLE PRECISION array, dimension (LDZ,N) */
+/* On entry, if WANTQ = .TRUE., the orthogonal matrix Q. */
+/* On exit, the updated matrix Q. */
+/* If WANTQ = .FALSE., Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= 1. */
+/* If WANTQ = .TRUE., LDQ >= N. */
+
+/* Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N) */
+/* On entry, if WANTZ = .TRUE., the orthogonal matrix Z. */
+/* On exit, the updated matrix Z. */
+/* If WANTZ = .FALSE., Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1. */
+/* If WANTZ = .TRUE., LDZ >= N. */
+
+/* IFST (input/output) INTEGER */
+/* ILST (input/output) INTEGER */
+/* Specify the reordering of the diagonal blocks of (A, B). */
+/* The block with row index IFST is moved to row ILST, by a */
+/* sequence of swapping between adjacent blocks. */
+/* On exit, if IFST pointed on entry to the second row of */
+/* a 2-by-2 block, it is changed to point to the first row; */
+/* ILST always points to the first row of the block in its */
+/* final position (which may differ from its input value by */
+/* +1 or -1). 1 <= IFST, ILST <= N. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* LWORK >= 1 when N <= 1, otherwise LWORK >= 4*N + 16. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* =0: successful exit. */
+/* <0: if INFO = -i, the i-th argument had an illegal value. */
+/* =1: The transformed matrix pair (A, B) would be too far */
+/* from generalized Schur form; the problem is ill- */
+/* conditioned. (A, B) may have been partially reordered, */
+/* and ILST points to the first row of the current */
+/* position of the block being moved. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* [1] B. Kagstrom; A Direct Method for Reordering Eigenvalues in the */
+/* Generalized Real Schur Form of a Regular Matrix Pair (A, B), in */
+/* M.S. Moonen et al (eds), Linear Algebra for Large Scale and */
+/* Real-Time Applications, Kluwer Academic Publ. 1993, pp 195-218. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test input arguments. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ } else if (*ldq < 1 || *wantq && *ldq < max(1,*n)) {
+ *info = -9;
+ } else if (*ldz < 1 || *wantz && *ldz < max(1,*n)) {
+ *info = -11;
+ } else if (*ifst < 1 || *ifst > *n) {
+ *info = -12;
+ } else if (*ilst < 1 || *ilst > *n) {
+ *info = -13;
+ }
+
+ if (*info == 0) {
+ if (*n <= 1) {
+ lwmin = 1;
+ } else {
+ lwmin = (*n << 2) + 16;
+ }
+ work[1] = (doublereal) lwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -15;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTGEXC", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 1) {
+ return 0;
+ }
+
+/* Determine the first row of the specified block and find out */
+/* if it is 1-by-1 or 2-by-2. */
+
+ if (*ifst > 1) {
+ if (a[*ifst + (*ifst - 1) * a_dim1] != 0.) {
+ --(*ifst);
+ }
+ }
+ nbf = 1;
+ if (*ifst < *n) {
+ if (a[*ifst + 1 + *ifst * a_dim1] != 0.) {
+ nbf = 2;
+ }
+ }
+
+/* Determine the first row of the final block */
+/* and find out if it is 1-by-1 or 2-by-2. */
+
+ if (*ilst > 1) {
+ if (a[*ilst + (*ilst - 1) * a_dim1] != 0.) {
+ --(*ilst);
+ }
+ }
+ nbl = 1;
+ if (*ilst < *n) {
+ if (a[*ilst + 1 + *ilst * a_dim1] != 0.) {
+ nbl = 2;
+ }
+ }
+ if (*ifst == *ilst) {
+ return 0;
+ }
+
+ if (*ifst < *ilst) {
+
+/* Update ILST. */
+
+ if (nbf == 2 && nbl == 1) {
+ --(*ilst);
+ }
+ if (nbf == 1 && nbl == 2) {
+ ++(*ilst);
+ }
+
+ here = *ifst;
+
+L10:
+
+/* Swap with next one below. */
+
+ if (nbf == 1 || nbf == 2) {
+
+/* Current block either 1-by-1 or 2-by-2. */
+
+ nbnext = 1;
+ if (here + nbf + 1 <= *n) {
+ if (a[here + nbf + 1 + (here + nbf) * a_dim1] != 0.) {
+ nbnext = 2;
+ }
+ }
+ dtgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset], ldb, &q[
+ q_offset], ldq, &z__[z_offset], ldz, &here, &nbf, &nbnext,
+ &work[1], lwork, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ here += nbnext;
+
+/* Test if 2-by-2 block breaks into two 1-by-1 blocks. */
+
+ if (nbf == 2) {
+ if (a[here + 1 + here * a_dim1] == 0.) {
+ nbf = 3;
+ }
+ }
+
+ } else {
+
+/* Current block consists of two 1-by-1 blocks, each of which */
+/* must be swapped individually. */
+
+ nbnext = 1;
+ if (here + 3 <= *n) {
+ if (a[here + 3 + (here + 2) * a_dim1] != 0.) {
+ nbnext = 2;
+ }
+ }
+ i__1 = here + 1;
+ dtgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset], ldb, &q[
+ q_offset], ldq, &z__[z_offset], ldz, &i__1, &c__1, &
+ nbnext, &work[1], lwork, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ if (nbnext == 1) {
+
+/* Swap two 1-by-1 blocks. */
+
+ dtgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset], ldb,
+ &q[q_offset], ldq, &z__[z_offset], ldz, &here, &c__1,
+ &c__1, &work[1], lwork, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ ++here;
+
+ } else {
+
+/* Recompute NBNEXT in case of 2-by-2 split. */
+
+ if (a[here + 2 + (here + 1) * a_dim1] == 0.) {
+ nbnext = 1;
+ }
+ if (nbnext == 2) {
+
+/* 2-by-2 block did not split. */
+
+ dtgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset],
+ ldb, &q[q_offset], ldq, &z__[z_offset], ldz, &
+ here, &c__1, &nbnext, &work[1], lwork, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ here += 2;
+ } else {
+
+/* 2-by-2 block did split. */
+
+ dtgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset],
+ ldb, &q[q_offset], ldq, &z__[z_offset], ldz, &
+ here, &c__1, &c__1, &work[1], lwork, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ ++here;
+ dtgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset],
+ ldb, &q[q_offset], ldq, &z__[z_offset], ldz, &
+ here, &c__1, &c__1, &work[1], lwork, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ ++here;
+ }
+
+ }
+ }
+ if (here < *ilst) {
+ goto L10;
+ }
+ } else {
+ here = *ifst;
+
+L20:
+
+/* Swap with next one below. */
+
+ if (nbf == 1 || nbf == 2) {
+
+/* Current block either 1-by-1 or 2-by-2. */
+
+ nbnext = 1;
+ if (here >= 3) {
+ if (a[here - 1 + (here - 2) * a_dim1] != 0.) {
+ nbnext = 2;
+ }
+ }
+ i__1 = here - nbnext;
+ dtgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset], ldb, &q[
+ q_offset], ldq, &z__[z_offset], ldz, &i__1, &nbnext, &nbf,
+ &work[1], lwork, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ here -= nbnext;
+
+/* Test if 2-by-2 block breaks into two 1-by-1 blocks. */
+
+ if (nbf == 2) {
+ if (a[here + 1 + here * a_dim1] == 0.) {
+ nbf = 3;
+ }
+ }
+
+ } else {
+
+/* Current block consists of two 1-by-1 blocks, each of which */
+/* must be swapped individually. */
+
+ nbnext = 1;
+ if (here >= 3) {
+ if (a[here - 1 + (here - 2) * a_dim1] != 0.) {
+ nbnext = 2;
+ }
+ }
+ i__1 = here - nbnext;
+ dtgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset], ldb, &q[
+ q_offset], ldq, &z__[z_offset], ldz, &i__1, &nbnext, &
+ c__1, &work[1], lwork, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ if (nbnext == 1) {
+
+/* Swap two 1-by-1 blocks. */
+
+ dtgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset], ldb,
+ &q[q_offset], ldq, &z__[z_offset], ldz, &here, &
+ nbnext, &c__1, &work[1], lwork, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ --here;
+ } else {
+
+/* Recompute NBNEXT in case of 2-by-2 split. */
+
+ if (a[here + (here - 1) * a_dim1] == 0.) {
+ nbnext = 1;
+ }
+ if (nbnext == 2) {
+
+/* 2-by-2 block did not split. */
+
+ i__1 = here - 1;
+ dtgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset],
+ ldb, &q[q_offset], ldq, &z__[z_offset], ldz, &
+ i__1, &c__2, &c__1, &work[1], lwork, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ here += -2;
+ } else {
+
+/* 2-by-2 block did split. */
+
+ dtgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset],
+ ldb, &q[q_offset], ldq, &z__[z_offset], ldz, &
+ here, &c__1, &c__1, &work[1], lwork, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ --here;
+ dtgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset],
+ ldb, &q[q_offset], ldq, &z__[z_offset], ldz, &
+ here, &c__1, &c__1, &work[1], lwork, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ --here;
+ }
+ }
+ }
+ if (here > *ilst) {
+ goto L20;
+ }
+ }
+ *ilst = here;
+ work[1] = (doublereal) lwmin;
+ return 0;
+
+/* End of DTGEXC */
+
+} /* dtgexc_ */
diff --git a/contrib/libs/clapack/dtgsen.c b/contrib/libs/clapack/dtgsen.c
new file mode 100644
index 0000000000..053390bec4
--- /dev/null
+++ b/contrib/libs/clapack/dtgsen.c
@@ -0,0 +1,836 @@
+/* dtgsen.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__2 = 2;
+static doublereal c_b28 = 1.;
+
+/* Subroutine */ int dtgsen_(integer *ijob, logical *wantq, logical *wantz,
+ logical *select, integer *n, doublereal *a, integer *lda, doublereal *
+ b, integer *ldb, doublereal *alphar, doublereal *alphai, doublereal *
+ beta, doublereal *q, integer *ldq, doublereal *z__, integer *ldz,
+ integer *m, doublereal *pl, doublereal *pr, doublereal *dif,
+ doublereal *work, integer *lwork, integer *iwork, integer *liwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1,
+ z_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ integer i__, k, n1, n2, kk, ks, mn2, ijb;
+ doublereal eps;
+ integer kase;
+ logical pair;
+ integer ierr;
+ doublereal dsum;
+ logical swap;
+ extern /* Subroutine */ int dlag2_(doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *);
+ integer isave[3];
+ logical wantd;
+ integer lwmin;
+ logical wantp;
+ extern /* Subroutine */ int dlacn2_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ logical wantd1, wantd2;
+ extern doublereal dlamch_(char *);
+ doublereal dscale, rdscal;
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ xerbla_(char *, integer *), dtgexc_(logical *, logical *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *, integer *,
+ integer *, doublereal *, integer *, integer *), dlassq_(integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ integer liwmin;
+ extern /* Subroutine */ int dtgsyl_(char *, integer *, integer *, integer
+ *, doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, integer *, integer *);
+ doublereal smlnum;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+/* January 2007 */
+
+/* Modified to call DLACN2 in place of DLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTGSEN reorders the generalized real Schur decomposition of a real */
+/* matrix pair (A, B) (in terms of an orthonormal equivalence trans- */
+/* formation Q' * (A, B) * Z), so that a selected cluster of eigenvalues */
+/* appears in the leading diagonal blocks of the upper quasi-triangular */
+/* matrix A and the upper triangular B. The leading columns of Q and */
+/* Z form orthonormal bases of the corresponding left and right eigen- */
+/* spaces (deflating subspaces). (A, B) must be in generalized real */
+/* Schur canonical form (as returned by DGGES), i.e. A is block upper */
+/* triangular with 1-by-1 and 2-by-2 diagonal blocks. B is upper */
+/* triangular. */
+
+/* DTGSEN also computes the generalized eigenvalues */
+
+/* w(j) = (ALPHAR(j) + i*ALPHAI(j))/BETA(j) */
+
+/* of the reordered matrix pair (A, B). */
+
+/* Optionally, DTGSEN computes the estimates of reciprocal condition */
+/* numbers for eigenvalues and eigenspaces. These are Difu[(A11,B11), */
+/* (A22,B22)] and Difl[(A11,B11), (A22,B22)], i.e. the separation(s) */
+/* between the matrix pairs (A11, B11) and (A22,B22) that correspond to */
+/* the selected cluster and the eigenvalues outside the cluster, resp., */
+/* and norms of "projections" onto left and right eigenspaces w.r.t. */
+/* the selected cluster in the (1,1)-block. */
+
+/* Arguments */
+/* ========= */
+
+/* IJOB (input) INTEGER */
+/* Specifies whether condition numbers are required for the */
+/* cluster of eigenvalues (PL and PR) or the deflating subspaces */
+/* (Difu and Difl): */
+/* =0: Only reorder w.r.t. SELECT. No extras. */
+/* =1: Reciprocal of norms of "projections" onto left and right */
+/* eigenspaces w.r.t. the selected cluster (PL and PR). */
+/* =2: Upper bounds on Difu and Difl. F-norm-based estimate */
+/* (DIF(1:2)). */
+/* =3: Estimate of Difu and Difl. 1-norm-based estimate */
+/* (DIF(1:2)). */
+/* About 5 times as expensive as IJOB = 2. */
+/* =4: Compute PL, PR and DIF (i.e. 0, 1 and 2 above): Economic */
+/* version to get it all. */
+/* =5: Compute PL, PR and DIF (i.e. 0, 1 and 3 above) */
+
+/* WANTQ (input) LOGICAL */
+/* .TRUE. : update the left transformation matrix Q; */
+/* .FALSE.: do not update Q. */
+
+/* WANTZ (input) LOGICAL */
+/* .TRUE. : update the right transformation matrix Z; */
+/* .FALSE.: do not update Z. */
+
+/* SELECT (input) LOGICAL array, dimension (N) */
+/* SELECT specifies the eigenvalues in the selected cluster. */
+/* To select a real eigenvalue w(j), SELECT(j) must be set to */
+/* .TRUE.. To select a complex conjugate pair of eigenvalues */
+/* w(j) and w(j+1), corresponding to a 2-by-2 diagonal block, */
+/* either SELECT(j) or SELECT(j+1) or both must be set to */
+/* .TRUE.; a complex conjugate pair of eigenvalues must be */
+/* either both included in the cluster or both excluded. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension(LDA,N) */
+/* On entry, the upper quasi-triangular matrix A, with (A, B) in */
+/* generalized real Schur canonical form. */
+/* On exit, A is overwritten by the reordered matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension(LDB,N) */
+/* On entry, the upper triangular matrix B, with (A, B) in */
+/* generalized real Schur canonical form. */
+/* On exit, B is overwritten by the reordered matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* ALPHAR (output) DOUBLE PRECISION array, dimension (N) */
+/* ALPHAI (output) DOUBLE PRECISION array, dimension (N) */
+/* BETA (output) DOUBLE PRECISION array, dimension (N) */
+/* On exit, (ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N, will */
+/* be the generalized eigenvalues. ALPHAR(j) + ALPHAI(j)*i */
+/* and BETA(j),j=1,...,N are the diagonals of the complex Schur */
+/* form (S,T) that would result if the 2-by-2 diagonal blocks of */
+/* the real generalized Schur form of (A,B) were further reduced */
+/* to triangular form using complex unitary transformations. */
+/* If ALPHAI(j) is zero, then the j-th eigenvalue is real; if */
+/* positive, then the j-th and (j+1)-st eigenvalues are a */
+/* complex conjugate pair, with ALPHAI(j+1) negative. */
+
+/* Q (input/output) DOUBLE PRECISION array, dimension (LDQ,N) */
+/* On entry, if WANTQ = .TRUE., Q is an N-by-N matrix. */
+/* On exit, Q has been postmultiplied by the left orthogonal */
+/* transformation matrix which reorder (A, B); The leading M */
+/* columns of Q form orthonormal bases for the specified pair of */
+/* left eigenspaces (deflating subspaces). */
+/* If WANTQ = .FALSE., Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= 1; */
+/* and if WANTQ = .TRUE., LDQ >= N. */
+
+/* Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N) */
+/* On entry, if WANTZ = .TRUE., Z is an N-by-N matrix. */
+/* On exit, Z has been postmultiplied by the left orthogonal */
+/* transformation matrix which reorder (A, B); The leading M */
+/* columns of Z form orthonormal bases for the specified pair of */
+/* left eigenspaces (deflating subspaces). */
+/* If WANTZ = .FALSE., Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1; */
+/* If WANTZ = .TRUE., LDZ >= N. */
+
+/* M (output) INTEGER */
+/* The dimension of the specified pair of left and right eigen- */
+/* spaces (deflating subspaces). 0 <= M <= N. */
+
+/* PL (output) DOUBLE PRECISION */
+/* PR (output) DOUBLE PRECISION */
+/* If IJOB = 1, 4 or 5, PL, PR are lower bounds on the */
+/* reciprocal of the norm of "projections" onto left and right */
+/* eigenspaces with respect to the selected cluster. */
+/* 0 < PL, PR <= 1. */
+/* If M = 0 or M = N, PL = PR = 1. */
+/* If IJOB = 0, 2 or 3, PL and PR are not referenced. */
+
+/* DIF (output) DOUBLE PRECISION array, dimension (2). */
+/* If IJOB >= 2, DIF(1:2) store the estimates of Difu and Difl. */
+/* If IJOB = 2 or 4, DIF(1:2) are F-norm-based upper bounds on */
+/* Difu and Difl. If IJOB = 3 or 5, DIF(1:2) are 1-norm-based */
+/* estimates of Difu and Difl. */
+/* If M = 0 or N, DIF(1:2) = F-norm([A, B]). */
+/* If IJOB = 0 or 1, DIF is not referenced. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, */
+/* dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= 4*N+16. */
+/* If IJOB = 1, 2 or 4, LWORK >= MAX(4*N+16, 2*M*(N-M)). */
+/* If IJOB = 3 or 5, LWORK >= MAX(4*N+16, 4*M*(N-M)). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* IF IJOB = 0, IWORK is not referenced. Otherwise, */
+/* on exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. LIWORK >= 1. */
+/* If IJOB = 1, 2 or 4, LIWORK >= N+6. */
+/* If IJOB = 3 or 5, LIWORK >= MAX(2*M*(N-M), N+6). */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal size of the IWORK array, */
+/* returns this value as the first entry of the IWORK array, and */
+/* no error message related to LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* =0: Successful exit. */
+/* <0: If INFO = -i, the i-th argument had an illegal value. */
+/* =1: Reordering of (A, B) failed because the transformed */
+/* matrix pair (A, B) would be too far from generalized */
+/* Schur form; the problem is very ill-conditioned. */
+/* (A, B) may have been partially reordered. */
+/* If requested, 0 is returned in DIF(*), PL and PR. */
+
+/* Further Details */
+/* =============== */
+
+/* DTGSEN first collects the selected eigenvalues by computing */
+/* orthogonal U and W that move them to the top left corner of (A, B). */
+/* In other words, the selected eigenvalues are the eigenvalues of */
+/* (A11, B11) in: */
+
+/* U'*(A, B)*W = (A11 A12) (B11 B12) n1 */
+/* ( 0 A22),( 0 B22) n2 */
+/* n1 n2 n1 n2 */
+
+/* where N = n1+n2 and U' means the transpose of U. The first n1 columns */
+/* of U and W span the specified pair of left and right eigenspaces */
+/* (deflating subspaces) of (A, B). */
+
+/* If (A, B) has been obtained from the generalized real Schur */
+/* decomposition of a matrix pair (C, D) = Q*(A, B)*Z', then the */
+/* reordered generalized real Schur form of (C, D) is given by */
+
+/* (C, D) = (Q*U)*(U'*(A, B)*W)*(Z*W)', */
+
+/* and the first n1 columns of Q*U and Z*W span the corresponding */
+/* deflating subspaces of (C, D) (Q and Z store Q*U and Z*W, resp.). */
+
+/* Note that if the selected eigenvalue is sufficiently ill-conditioned, */
+/* then its value may differ significantly from its value before */
+/* reordering. */
+
+/* The reciprocal condition numbers of the left and right eigenspaces */
+/* spanned by the first n1 columns of U and W (or Q*U and Z*W) may */
+/* be returned in DIF(1:2), corresponding to Difu and Difl, resp. */
+
+/* The Difu and Difl are defined as: */
+
+/* Difu[(A11, B11), (A22, B22)] = sigma-min( Zu ) */
+/* and */
+/* Difl[(A11, B11), (A22, B22)] = Difu[(A22, B22), (A11, B11)], */
+
+/* where sigma-min(Zu) is the smallest singular value of the */
+/* (2*n1*n2)-by-(2*n1*n2) matrix */
+
+/* Zu = [ kron(In2, A11) -kron(A22', In1) ] */
+/* [ kron(In2, B11) -kron(B22', In1) ]. */
+
+/* Here, Inx is the identity matrix of size nx and A22' is the */
+/* transpose of A22. kron(X, Y) is the Kronecker product between */
+/* the matrices X and Y. */
+
+/* When DIF(2) is small, small changes in (A, B) can cause large changes */
+/* in the deflating subspace. An approximate (asymptotic) bound on the */
+/* maximum angular error in the computed deflating subspaces is */
+
+/* EPS * norm((A, B)) / DIF(2), */
+
+/* where EPS is the machine precision. */
+
+/* The reciprocal norm of the projectors on the left and right */
+/* eigenspaces associated with (A11, B11) may be returned in PL and PR. */
+/* They are computed as follows. First we compute L and R so that */
+/* P*(A, B)*Q is block diagonal, where */
+
+/* P = ( I -L ) n1 Q = ( I R ) n1 */
+/* ( 0 I ) n2 and ( 0 I ) n2 */
+/* n1 n2 n1 n2 */
+
+/* and (L, R) is the solution to the generalized Sylvester equation */
+
+/* A11*R - L*A22 = -A12 */
+/* B11*R - L*B22 = -B12 */
+
+/* Then PL = (F-norm(L)**2+1)**(-1/2) and PR = (F-norm(R)**2+1)**(-1/2). */
+/* An approximate (asymptotic) bound on the average absolute error of */
+/* the selected eigenvalues is */
+
+/* EPS * norm((A, B)) / PL. */
+
+/* There are also global error bounds which valid for perturbations up */
+/* to a certain restriction: A lower bound (x) on the smallest */
+/* F-norm(E,F) for which an eigenvalue of (A11, B11) may move and */
+/* coalesce with an eigenvalue of (A22, B22) under perturbation (E,F), */
+/* (i.e. (A + E, B + F), is */
+
+/* x = min(Difu,Difl)/((1/(PL*PL)+1/(PR*PR))**(1/2)+2*max(1/PL,1/PR)). */
+
+/* An approximate bound on x can be computed from DIF(1:2), PL and PR. */
+
+/* If y = ( F-norm(E,F) / x) <= 1, the angles between the perturbed */
+/* (L', R') and unperturbed (L, R) left and right deflating subspaces */
+/* associated with the selected cluster in the (1,1)-blocks can be */
+/* bounded as */
+
+/* max-angle(L, L') <= arctan( y * PL / (1 - y * (1 - PL * PL)**(1/2)) */
+/* max-angle(R, R') <= arctan( y * PR / (1 - y * (1 - PR * PR)**(1/2)) */
+
+/* See LAPACK User's Guide section 4.11 or the following references */
+/* for more information. */
+
+/* Note that if the default method for computing the Frobenius-norm- */
+/* based estimate DIF is not wanted (see DLATDF), then the parameter */
+/* IDIFJB (see below) should be changed from 3 to 4 (routine DLATDF */
+/* (IJOB = 2 will be used)). See DTGSYL for more details. */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* References */
+/* ========== */
+
+/* [1] B. Kagstrom; A Direct Method for Reordering Eigenvalues in the */
+/* Generalized Real Schur Form of a Regular Matrix Pair (A, B), in */
+/* M.S. Moonen et al (eds), Linear Algebra for Large Scale and */
+/* Real-Time Applications, Kluwer Academic Publ. 1993, pp 195-218. */
+
+/* [2] B. Kagstrom and P. Poromaa; Computing Eigenspaces with Specified */
+/* Eigenvalues of a Regular Matrix Pair (A, B) and Condition */
+/* Estimation: Theory, Algorithms and Software, */
+/* Report UMINF - 94.04, Department of Computing Science, Umea */
+/* University, S-901 87 Umea, Sweden, 1994. Also as LAPACK Working */
+/* Note 87. To appear in Numerical Algorithms, 1996. */
+
+/* [3] B. Kagstrom and P. Poromaa, LAPACK-Style Algorithms and Software */
+/* for Solving the Generalized Sylvester Equation and Estimating the */
+/* Separation between Regular Matrix Pairs, Report UMINF - 93.23, */
+/* Department of Computing Science, Umea University, S-901 87 Umea, */
+/* Sweden, December 1993, Revised April 1994, Also as LAPACK Working */
+/* Note 75. To appear in ACM Trans. on Math. Software, Vol 22, No 1, */
+/* 1996. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters */
+
+ /* Parameter adjustments */
+ --select;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alphar;
+ --alphai;
+ --beta;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --dif;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1 || *liwork == -1;
+
+ if (*ijob < 0 || *ijob > 5) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldq < 1 || *wantq && *ldq < *n) {
+ *info = -14;
+ } else if (*ldz < 1 || *wantz && *ldz < *n) {
+ *info = -16;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTGSEN", &i__1);
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("P");
+ smlnum = dlamch_("S") / eps;
+ ierr = 0;
+
+ wantp = *ijob == 1 || *ijob >= 4;
+ wantd1 = *ijob == 2 || *ijob == 4;
+ wantd2 = *ijob == 3 || *ijob == 5;
+ wantd = wantd1 || wantd2;
+
+/* Set M to the dimension of the specified pair of deflating */
+/* subspaces. */
+
+ *m = 0;
+ pair = FALSE_;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (pair) {
+ pair = FALSE_;
+ } else {
+ if (k < *n) {
+ if (a[k + 1 + k * a_dim1] == 0.) {
+ if (select[k]) {
+ ++(*m);
+ }
+ } else {
+ pair = TRUE_;
+ if (select[k] || select[k + 1]) {
+ *m += 2;
+ }
+ }
+ } else {
+ if (select[*n]) {
+ ++(*m);
+ }
+ }
+ }
+/* L10: */
+ }
+
+ if (*ijob == 1 || *ijob == 2 || *ijob == 4) {
+/* Computing MAX */
+ i__1 = 1, i__2 = (*n << 2) + 16, i__1 = max(i__1,i__2), i__2 = (*m <<
+ 1) * (*n - *m);
+ lwmin = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = 1, i__2 = *n + 6;
+ liwmin = max(i__1,i__2);
+ } else if (*ijob == 3 || *ijob == 5) {
+/* Computing MAX */
+ i__1 = 1, i__2 = (*n << 2) + 16, i__1 = max(i__1,i__2), i__2 = (*m <<
+ 2) * (*n - *m);
+ lwmin = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = 1, i__2 = (*m << 1) * (*n - *m), i__1 = max(i__1,i__2), i__2 =
+ *n + 6;
+ liwmin = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = 1, i__2 = (*n << 2) + 16;
+ lwmin = max(i__1,i__2);
+ liwmin = 1;
+ }
+
+ work[1] = (doublereal) lwmin;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -22;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -24;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTGSEN", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == *n || *m == 0) {
+ if (wantp) {
+ *pl = 1.;
+ *pr = 1.;
+ }
+ if (wantd) {
+ dscale = 0.;
+ dsum = 1.;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlassq_(n, &a[i__ * a_dim1 + 1], &c__1, &dscale, &dsum);
+ dlassq_(n, &b[i__ * b_dim1 + 1], &c__1, &dscale, &dsum);
+/* L20: */
+ }
+ dif[1] = dscale * sqrt(dsum);
+ dif[2] = dif[1];
+ }
+ goto L60;
+ }
+
+/* Collect the selected blocks at the top-left corner of (A, B). */
+
+ ks = 0;
+ pair = FALSE_;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (pair) {
+ pair = FALSE_;
+ } else {
+
+ swap = select[k];
+ if (k < *n) {
+ if (a[k + 1 + k * a_dim1] != 0.) {
+ pair = TRUE_;
+ swap = swap || select[k + 1];
+ }
+ }
+
+ if (swap) {
+ ++ks;
+
+/* Swap the K-th block to position KS. */
+/* Perform the reordering of diagonal blocks in (A, B) */
+/* by orthogonal transformation matrices and update */
+/* Q and Z accordingly (if requested): */
+
+ kk = k;
+ if (k != ks) {
+ dtgexc_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset],
+ ldb, &q[q_offset], ldq, &z__[z_offset], ldz, &kk,
+ &ks, &work[1], lwork, &ierr);
+ }
+
+ if (ierr > 0) {
+
+/* Swap is rejected: exit. */
+
+ *info = 1;
+ if (wantp) {
+ *pl = 0.;
+ *pr = 0.;
+ }
+ if (wantd) {
+ dif[1] = 0.;
+ dif[2] = 0.;
+ }
+ goto L60;
+ }
+
+ if (pair) {
+ ++ks;
+ }
+ }
+ }
+/* L30: */
+ }
+ if (wantp) {
+
+/* Solve generalized Sylvester equation for R and L */
+/* and compute PL and PR. */
+
+ n1 = *m;
+ n2 = *n - *m;
+ i__ = n1 + 1;
+ ijb = 0;
+ dlacpy_("Full", &n1, &n2, &a[i__ * a_dim1 + 1], lda, &work[1], &n1);
+ dlacpy_("Full", &n1, &n2, &b[i__ * b_dim1 + 1], ldb, &work[n1 * n2 +
+ 1], &n1);
+ i__1 = *lwork - (n1 << 1) * n2;
+ dtgsyl_("N", &ijb, &n1, &n2, &a[a_offset], lda, &a[i__ + i__ * a_dim1]
+, lda, &work[1], &n1, &b[b_offset], ldb, &b[i__ + i__ *
+ b_dim1], ldb, &work[n1 * n2 + 1], &n1, &dscale, &dif[1], &
+ work[(n1 * n2 << 1) + 1], &i__1, &iwork[1], &ierr);
+
+/* Estimate the reciprocal of norms of "projections" onto left */
+/* and right eigenspaces. */
+
+ rdscal = 0.;
+ dsum = 1.;
+ i__1 = n1 * n2;
+ dlassq_(&i__1, &work[1], &c__1, &rdscal, &dsum);
+ *pl = rdscal * sqrt(dsum);
+ if (*pl == 0.) {
+ *pl = 1.;
+ } else {
+ *pl = dscale / (sqrt(dscale * dscale / *pl + *pl) * sqrt(*pl));
+ }
+ rdscal = 0.;
+ dsum = 1.;
+ i__1 = n1 * n2;
+ dlassq_(&i__1, &work[n1 * n2 + 1], &c__1, &rdscal, &dsum);
+ *pr = rdscal * sqrt(dsum);
+ if (*pr == 0.) {
+ *pr = 1.;
+ } else {
+ *pr = dscale / (sqrt(dscale * dscale / *pr + *pr) * sqrt(*pr));
+ }
+ }
+
+ if (wantd) {
+
+/* Compute estimates of Difu and Difl. */
+
+ if (wantd1) {
+ n1 = *m;
+ n2 = *n - *m;
+ i__ = n1 + 1;
+ ijb = 3;
+
+/* Frobenius norm-based Difu-estimate. */
+
+ i__1 = *lwork - (n1 << 1) * n2;
+ dtgsyl_("N", &ijb, &n1, &n2, &a[a_offset], lda, &a[i__ + i__ *
+ a_dim1], lda, &work[1], &n1, &b[b_offset], ldb, &b[i__ +
+ i__ * b_dim1], ldb, &work[n1 * n2 + 1], &n1, &dscale, &
+ dif[1], &work[(n1 << 1) * n2 + 1], &i__1, &iwork[1], &
+ ierr);
+
+/* Frobenius norm-based Difl-estimate. */
+
+ i__1 = *lwork - (n1 << 1) * n2;
+ dtgsyl_("N", &ijb, &n2, &n1, &a[i__ + i__ * a_dim1], lda, &a[
+ a_offset], lda, &work[1], &n2, &b[i__ + i__ * b_dim1],
+ ldb, &b[b_offset], ldb, &work[n1 * n2 + 1], &n2, &dscale,
+ &dif[2], &work[(n1 << 1) * n2 + 1], &i__1, &iwork[1], &
+ ierr);
+ } else {
+
+
+/* Compute 1-norm-based estimates of Difu and Difl using */
+/* reversed communication with DLACN2. In each step a */
+/* generalized Sylvester equation or a transposed variant */
+/* is solved. */
+
+ kase = 0;
+ n1 = *m;
+ n2 = *n - *m;
+ i__ = n1 + 1;
+ ijb = 0;
+ mn2 = (n1 << 1) * n2;
+
+/* 1-norm-based estimate of Difu. */
+
+L40:
+ dlacn2_(&mn2, &work[mn2 + 1], &work[1], &iwork[1], &dif[1], &kase,
+ isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Solve generalized Sylvester equation. */
+
+ i__1 = *lwork - (n1 << 1) * n2;
+ dtgsyl_("N", &ijb, &n1, &n2, &a[a_offset], lda, &a[i__ +
+ i__ * a_dim1], lda, &work[1], &n1, &b[b_offset],
+ ldb, &b[i__ + i__ * b_dim1], ldb, &work[n1 * n2 +
+ 1], &n1, &dscale, &dif[1], &work[(n1 << 1) * n2 +
+ 1], &i__1, &iwork[1], &ierr);
+ } else {
+
+/* Solve the transposed variant. */
+
+ i__1 = *lwork - (n1 << 1) * n2;
+ dtgsyl_("T", &ijb, &n1, &n2, &a[a_offset], lda, &a[i__ +
+ i__ * a_dim1], lda, &work[1], &n1, &b[b_offset],
+ ldb, &b[i__ + i__ * b_dim1], ldb, &work[n1 * n2 +
+ 1], &n1, &dscale, &dif[1], &work[(n1 << 1) * n2 +
+ 1], &i__1, &iwork[1], &ierr);
+ }
+ goto L40;
+ }
+ dif[1] = dscale / dif[1];
+
+/* 1-norm-based estimate of Difl. */
+
+L50:
+ dlacn2_(&mn2, &work[mn2 + 1], &work[1], &iwork[1], &dif[2], &kase,
+ isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Solve generalized Sylvester equation. */
+
+ i__1 = *lwork - (n1 << 1) * n2;
+ dtgsyl_("N", &ijb, &n2, &n1, &a[i__ + i__ * a_dim1], lda,
+ &a[a_offset], lda, &work[1], &n2, &b[i__ + i__ *
+ b_dim1], ldb, &b[b_offset], ldb, &work[n1 * n2 +
+ 1], &n2, &dscale, &dif[2], &work[(n1 << 1) * n2 +
+ 1], &i__1, &iwork[1], &ierr);
+ } else {
+
+/* Solve the transposed variant. */
+
+ i__1 = *lwork - (n1 << 1) * n2;
+ dtgsyl_("T", &ijb, &n2, &n1, &a[i__ + i__ * a_dim1], lda,
+ &a[a_offset], lda, &work[1], &n2, &b[i__ + i__ *
+ b_dim1], ldb, &b[b_offset], ldb, &work[n1 * n2 +
+ 1], &n2, &dscale, &dif[2], &work[(n1 << 1) * n2 +
+ 1], &i__1, &iwork[1], &ierr);
+ }
+ goto L50;
+ }
+ dif[2] = dscale / dif[2];
+
+ }
+ }
+
+L60:
+
+/* Compute generalized eigenvalues of reordered pair (A, B) and */
+/* normalize the generalized Schur form. */
+
+ pair = FALSE_;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (pair) {
+ pair = FALSE_;
+ } else {
+
+ if (k < *n) {
+ if (a[k + 1 + k * a_dim1] != 0.) {
+ pair = TRUE_;
+ }
+ }
+
+ if (pair) {
+
+/* Compute the eigenvalue(s) at position K. */
+
+ work[1] = a[k + k * a_dim1];
+ work[2] = a[k + 1 + k * a_dim1];
+ work[3] = a[k + (k + 1) * a_dim1];
+ work[4] = a[k + 1 + (k + 1) * a_dim1];
+ work[5] = b[k + k * b_dim1];
+ work[6] = b[k + 1 + k * b_dim1];
+ work[7] = b[k + (k + 1) * b_dim1];
+ work[8] = b[k + 1 + (k + 1) * b_dim1];
+ d__1 = smlnum * eps;
+ dlag2_(&work[1], &c__2, &work[5], &c__2, &d__1, &beta[k], &
+ beta[k + 1], &alphar[k], &alphar[k + 1], &alphai[k]);
+ alphai[k + 1] = -alphai[k];
+
+ } else {
+
+ if (d_sign(&c_b28, &b[k + k * b_dim1]) < 0.) {
+
+/* If B(K,K) is negative, make it positive */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[k + i__ * a_dim1] = -a[k + i__ * a_dim1];
+ b[k + i__ * b_dim1] = -b[k + i__ * b_dim1];
+ if (*wantq) {
+ q[i__ + k * q_dim1] = -q[i__ + k * q_dim1];
+ }
+/* L70: */
+ }
+ }
+
+ alphar[k] = a[k + k * a_dim1];
+ alphai[k] = 0.;
+ beta[k] = b[k + k * b_dim1];
+
+ }
+ }
+/* L80: */
+ }
+
+ work[1] = (doublereal) lwmin;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of DTGSEN */
+
+} /* dtgsen_ */
diff --git a/contrib/libs/clapack/dtgsja.c b/contrib/libs/clapack/dtgsja.c
new file mode 100644
index 0000000000..5b295b99ac
--- /dev/null
+++ b/contrib/libs/clapack/dtgsja.c
@@ -0,0 +1,625 @@
+/* dtgsja.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b13 = 0.;
+static doublereal c_b14 = 1.;
+static integer c__1 = 1;
+static doublereal c_b43 = -1.;
+
+/* Subroutine */ int dtgsja_(char *jobu, char *jobv, char *jobq, integer *m,
+ integer *p, integer *n, integer *k, integer *l, doublereal *a,
+ integer *lda, doublereal *b, integer *ldb, doublereal *tola,
+ doublereal *tolb, doublereal *alpha, doublereal *beta, doublereal *u,
+ integer *ldu, doublereal *v, integer *ldv, doublereal *q, integer *
+ ldq, doublereal *work, integer *ncycle, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, u_dim1,
+ u_offset, v_dim1, v_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal a1, a2, a3, b1, b2, b3, csq, csu, csv, snq, rwk, snu, snv;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ doublereal gamma;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical initq, initu, initv, wantq, upper;
+ doublereal error, ssmin;
+ logical wantu, wantv;
+ extern /* Subroutine */ int dlags2_(logical *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *), dlapll_(integer *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *);
+ integer kcycle;
+ extern /* Subroutine */ int dlartg_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *), dlaset_(char *,
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTGSJA computes the generalized singular value decomposition (GSVD) */
+/* of two real upper triangular (or trapezoidal) matrices A and B. */
+
+/* On entry, it is assumed that matrices A and B have the following */
+/* forms, which may be obtained by the preprocessing subroutine DGGSVP */
+/* from a general M-by-N matrix A and P-by-N matrix B: */
+
+/* N-K-L K L */
+/* A = K ( 0 A12 A13 ) if M-K-L >= 0; */
+/* L ( 0 0 A23 ) */
+/* M-K-L ( 0 0 0 ) */
+
+/* N-K-L K L */
+/* A = K ( 0 A12 A13 ) if M-K-L < 0; */
+/* M-K ( 0 0 A23 ) */
+
+/* N-K-L K L */
+/* B = L ( 0 0 B13 ) */
+/* P-L ( 0 0 0 ) */
+
+/* where the K-by-K matrix A12 and L-by-L matrix B13 are nonsingular */
+/* upper triangular; A23 is L-by-L upper triangular if M-K-L >= 0, */
+/* otherwise A23 is (M-K)-by-L upper trapezoidal. */
+
+/* On exit, */
+
+/* U'*A*Q = D1*( 0 R ), V'*B*Q = D2*( 0 R ), */
+
+/* where U, V and Q are orthogonal matrices, Z' denotes the transpose */
+/* of Z, R is a nonsingular upper triangular matrix, and D1 and D2 are */
+/* ``diagonal'' matrices, which are of the following structures: */
+
+/* If M-K-L >= 0, */
+
+/* K L */
+/* D1 = K ( I 0 ) */
+/* L ( 0 C ) */
+/* M-K-L ( 0 0 ) */
+
+/* K L */
+/* D2 = L ( 0 S ) */
+/* P-L ( 0 0 ) */
+
+/* N-K-L K L */
+/* ( 0 R ) = K ( 0 R11 R12 ) K */
+/* L ( 0 0 R22 ) L */
+
+/* where */
+
+/* C = diag( ALPHA(K+1), ... , ALPHA(K+L) ), */
+/* S = diag( BETA(K+1), ... , BETA(K+L) ), */
+/* C**2 + S**2 = I. */
+
+/* R is stored in A(1:K+L,N-K-L+1:N) on exit. */
+
+/* If M-K-L < 0, */
+
+/* K M-K K+L-M */
+/* D1 = K ( I 0 0 ) */
+/* M-K ( 0 C 0 ) */
+
+/* K M-K K+L-M */
+/* D2 = M-K ( 0 S 0 ) */
+/* K+L-M ( 0 0 I ) */
+/* P-L ( 0 0 0 ) */
+
+/* N-K-L K M-K K+L-M */
+/* ( 0 R ) = K ( 0 R11 R12 R13 ) */
+/* M-K ( 0 0 R22 R23 ) */
+/* K+L-M ( 0 0 0 R33 ) */
+
+/* where */
+/* C = diag( ALPHA(K+1), ... , ALPHA(M) ), */
+/* S = diag( BETA(K+1), ... , BETA(M) ), */
+/* C**2 + S**2 = I. */
+
+/* R = ( R11 R12 R13 ) is stored in A(1:M, N-K-L+1:N) and R33 is stored */
+/* ( 0 R22 R23 ) */
+/* in B(M-K+1:L,N+M-K-L+1:N) on exit. */
+
+/* The computation of the orthogonal transformation matrices U, V or Q */
+/* is optional. These matrices may either be formed explicitly, or they */
+/* may be postmultiplied into input matrices U1, V1, or Q1. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBU (input) CHARACTER*1 */
+/* = 'U': U must contain an orthogonal matrix U1 on entry, and */
+/* the product U1*U is returned; */
+/* = 'I': U is initialized to the unit matrix, and the */
+/* orthogonal matrix U is returned; */
+/* = 'N': U is not computed. */
+
+/* JOBV (input) CHARACTER*1 */
+/* = 'V': V must contain an orthogonal matrix V1 on entry, and */
+/* the product V1*V is returned; */
+/* = 'I': V is initialized to the unit matrix, and the */
+/* orthogonal matrix V is returned; */
+/* = 'N': V is not computed. */
+
+/* JOBQ (input) CHARACTER*1 */
+/* = 'Q': Q must contain an orthogonal matrix Q1 on entry, and */
+/* the product Q1*Q is returned; */
+/* = 'I': Q is initialized to the unit matrix, and the */
+/* orthogonal matrix Q is returned; */
+/* = 'N': Q is not computed. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* P (input) INTEGER */
+/* The number of rows of the matrix B. P >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrices A and B. N >= 0. */
+
+/* K (input) INTEGER */
+/* L (input) INTEGER */
+/* K and L specify the subblocks in the input matrices A and B: */
+/* A23 = A(K+1:MIN(K+L,M),N-L+1:N) and B13 = B(1:L,N-L+1:N) */
+/* of A and B, whose GSVD is going to be computed by DTGSJA. */
+/* See Further details. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, A(N-K+1:N,1:MIN(K+L,M) ) contains the triangular */
+/* matrix R or part of R. See Purpose for details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,N) */
+/* On entry, the P-by-N matrix B. */
+/* On exit, if necessary, B(M-K+1:L,N+M-K-L+1:N) contains */
+/* a part of R. See Purpose for details. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,P). */
+
+/* TOLA (input) DOUBLE PRECISION */
+/* TOLB (input) DOUBLE PRECISION */
+/* TOLA and TOLB are the convergence criteria for the Jacobi- */
+/* Kogbetliantz iteration procedure. Generally, they are the */
+/* same as used in the preprocessing step, say */
+/* TOLA = max(M,N)*norm(A)*MAZHEPS, */
+/* TOLB = max(P,N)*norm(B)*MAZHEPS. */
+
+/* ALPHA (output) DOUBLE PRECISION array, dimension (N) */
+/* BETA (output) DOUBLE PRECISION array, dimension (N) */
+/* On exit, ALPHA and BETA contain the generalized singular */
+/* value pairs of A and B; */
+/* ALPHA(1:K) = 1, */
+/* BETA(1:K) = 0, */
+/* and if M-K-L >= 0, */
+/* ALPHA(K+1:K+L) = diag(C), */
+/* BETA(K+1:K+L) = diag(S), */
+/* or if M-K-L < 0, */
+/* ALPHA(K+1:M)= C, ALPHA(M+1:K+L)= 0 */
+/* BETA(K+1:M) = S, BETA(M+1:K+L) = 1. */
+/* Furthermore, if K+L < N, */
+/* ALPHA(K+L+1:N) = 0 and */
+/* BETA(K+L+1:N) = 0. */
+
+/* U (input/output) DOUBLE PRECISION array, dimension (LDU,M) */
+/* On entry, if JOBU = 'U', U must contain a matrix U1 (usually */
+/* the orthogonal matrix returned by DGGSVP). */
+/* On exit, */
+/* if JOBU = 'I', U contains the orthogonal matrix U; */
+/* if JOBU = 'U', U contains the product U1*U. */
+/* If JOBU = 'N', U is not referenced. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= max(1,M) if */
+/* JOBU = 'U'; LDU >= 1 otherwise. */
+
+/* V (input/output) DOUBLE PRECISION array, dimension (LDV,P) */
+/* On entry, if JOBV = 'V', V must contain a matrix V1 (usually */
+/* the orthogonal matrix returned by DGGSVP). */
+/* On exit, */
+/* if JOBV = 'I', V contains the orthogonal matrix V; */
+/* if JOBV = 'V', V contains the product V1*V. */
+/* If JOBV = 'N', V is not referenced. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. LDV >= max(1,P) if */
+/* JOBV = 'V'; LDV >= 1 otherwise. */
+
+/* Q (input/output) DOUBLE PRECISION array, dimension (LDQ,N) */
+/* On entry, if JOBQ = 'Q', Q must contain a matrix Q1 (usually */
+/* the orthogonal matrix returned by DGGSVP). */
+/* On exit, */
+/* if JOBQ = 'I', Q contains the orthogonal matrix Q; */
+/* if JOBQ = 'Q', Q contains the product Q1*Q. */
+/* If JOBQ = 'N', Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N) if */
+/* JOBQ = 'Q'; LDQ >= 1 otherwise. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (2*N) */
+
+/* NCYCLE (output) INTEGER */
+/* The number of cycles required for convergence. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1: the procedure does not converge after MAXIT cycles. */
+
+/* Internal Parameters */
+/* =================== */
+
+/* MAXIT INTEGER */
+/* MAXIT specifies the total loops that the iterative procedure */
+/* may take. If after MAXIT cycles, the routine fails to */
+/* converge, we return INFO = 1. */
+
+/* Further Details */
+/* =============== */
+
+/* DTGSJA essentially uses a variant of Kogbetliantz algorithm to reduce */
+/* min(L,M-K)-by-L triangular (or trapezoidal) matrix A23 and L-by-L */
+/* matrix B13 to the form: */
+
+/* U1'*A13*Q1 = C1*R1; V1'*B13*Q1 = S1*R1, */
+
+/* where U1, V1 and Q1 are orthogonal matrix, and Z' is the transpose */
+/* of Z. C1 and S1 are diagonal matrices satisfying */
+
+/* C1**2 + S1**2 = I, */
+
+/* and R1 is an L-by-L nonsingular upper triangular matrix. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alpha;
+ --beta;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --work;
+
+ /* Function Body */
+ initu = lsame_(jobu, "I");
+ wantu = initu || lsame_(jobu, "U");
+
+ initv = lsame_(jobv, "I");
+ wantv = initv || lsame_(jobv, "V");
+
+ initq = lsame_(jobq, "I");
+ wantq = initq || lsame_(jobq, "Q");
+
+ *info = 0;
+ if (! (initu || wantu || lsame_(jobu, "N"))) {
+ *info = -1;
+ } else if (! (initv || wantv || lsame_(jobv, "N")))
+ {
+ *info = -2;
+ } else if (! (initq || wantq || lsame_(jobq, "N")))
+ {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*p < 0) {
+ *info = -5;
+ } else if (*n < 0) {
+ *info = -6;
+ } else if (*lda < max(1,*m)) {
+ *info = -10;
+ } else if (*ldb < max(1,*p)) {
+ *info = -12;
+ } else if (*ldu < 1 || wantu && *ldu < *m) {
+ *info = -18;
+ } else if (*ldv < 1 || wantv && *ldv < *p) {
+ *info = -20;
+ } else if (*ldq < 1 || wantq && *ldq < *n) {
+ *info = -22;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTGSJA", &i__1);
+ return 0;
+ }
+
+/* Initialize U, V and Q, if necessary */
+
+ if (initu) {
+ dlaset_("Full", m, m, &c_b13, &c_b14, &u[u_offset], ldu);
+ }
+ if (initv) {
+ dlaset_("Full", p, p, &c_b13, &c_b14, &v[v_offset], ldv);
+ }
+ if (initq) {
+ dlaset_("Full", n, n, &c_b13, &c_b14, &q[q_offset], ldq);
+ }
+
+/* Loop until convergence */
+
+ upper = FALSE_;
+ for (kcycle = 1; kcycle <= 40; ++kcycle) {
+
+ upper = ! upper;
+
+ i__1 = *l - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *l;
+ for (j = i__ + 1; j <= i__2; ++j) {
+
+ a1 = 0.;
+ a2 = 0.;
+ a3 = 0.;
+ if (*k + i__ <= *m) {
+ a1 = a[*k + i__ + (*n - *l + i__) * a_dim1];
+ }
+ if (*k + j <= *m) {
+ a3 = a[*k + j + (*n - *l + j) * a_dim1];
+ }
+
+ b1 = b[i__ + (*n - *l + i__) * b_dim1];
+ b3 = b[j + (*n - *l + j) * b_dim1];
+
+ if (upper) {
+ if (*k + i__ <= *m) {
+ a2 = a[*k + i__ + (*n - *l + j) * a_dim1];
+ }
+ b2 = b[i__ + (*n - *l + j) * b_dim1];
+ } else {
+ if (*k + j <= *m) {
+ a2 = a[*k + j + (*n - *l + i__) * a_dim1];
+ }
+ b2 = b[j + (*n - *l + i__) * b_dim1];
+ }
+
+ dlags2_(&upper, &a1, &a2, &a3, &b1, &b2, &b3, &csu, &snu, &
+ csv, &snv, &csq, &snq);
+
+/* Update (K+I)-th and (K+J)-th rows of matrix A: U'*A */
+
+ if (*k + j <= *m) {
+ drot_(l, &a[*k + j + (*n - *l + 1) * a_dim1], lda, &a[*k
+ + i__ + (*n - *l + 1) * a_dim1], lda, &csu, &snu);
+ }
+
+/* Update I-th and J-th rows of matrix B: V'*B */
+
+ drot_(l, &b[j + (*n - *l + 1) * b_dim1], ldb, &b[i__ + (*n - *
+ l + 1) * b_dim1], ldb, &csv, &snv);
+
+/* Update (N-L+I)-th and (N-L+J)-th columns of matrices */
+/* A and B: A*Q and B*Q */
+
+/* Computing MIN */
+ i__4 = *k + *l;
+ i__3 = min(i__4,*m);
+ drot_(&i__3, &a[(*n - *l + j) * a_dim1 + 1], &c__1, &a[(*n - *
+ l + i__) * a_dim1 + 1], &c__1, &csq, &snq);
+
+ drot_(l, &b[(*n - *l + j) * b_dim1 + 1], &c__1, &b[(*n - *l +
+ i__) * b_dim1 + 1], &c__1, &csq, &snq);
+
+ if (upper) {
+ if (*k + i__ <= *m) {
+ a[*k + i__ + (*n - *l + j) * a_dim1] = 0.;
+ }
+ b[i__ + (*n - *l + j) * b_dim1] = 0.;
+ } else {
+ if (*k + j <= *m) {
+ a[*k + j + (*n - *l + i__) * a_dim1] = 0.;
+ }
+ b[j + (*n - *l + i__) * b_dim1] = 0.;
+ }
+
+/* Update orthogonal matrices U, V, Q, if desired. */
+
+ if (wantu && *k + j <= *m) {
+ drot_(m, &u[(*k + j) * u_dim1 + 1], &c__1, &u[(*k + i__) *
+ u_dim1 + 1], &c__1, &csu, &snu);
+ }
+
+ if (wantv) {
+ drot_(p, &v[j * v_dim1 + 1], &c__1, &v[i__ * v_dim1 + 1],
+ &c__1, &csv, &snv);
+ }
+
+ if (wantq) {
+ drot_(n, &q[(*n - *l + j) * q_dim1 + 1], &c__1, &q[(*n - *
+ l + i__) * q_dim1 + 1], &c__1, &csq, &snq);
+ }
+
+/* L10: */
+ }
+/* L20: */
+ }
+
+ if (! upper) {
+
+/* The matrices A13 and B13 were lower triangular at the start */
+/* of the cycle, and are now upper triangular. */
+
+/* Convergence test: test the parallelism of the corresponding */
+/* rows of A and B. */
+
+ error = 0.;
+/* Computing MIN */
+ i__2 = *l, i__3 = *m - *k;
+ i__1 = min(i__2,i__3);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *l - i__ + 1;
+ dcopy_(&i__2, &a[*k + i__ + (*n - *l + i__) * a_dim1], lda, &
+ work[1], &c__1);
+ i__2 = *l - i__ + 1;
+ dcopy_(&i__2, &b[i__ + (*n - *l + i__) * b_dim1], ldb, &work[*
+ l + 1], &c__1);
+ i__2 = *l - i__ + 1;
+ dlapll_(&i__2, &work[1], &c__1, &work[*l + 1], &c__1, &ssmin);
+ error = max(error,ssmin);
+/* L30: */
+ }
+
+ if (abs(error) <= min(*tola,*tolb)) {
+ goto L50;
+ }
+ }
+
+/* End of cycle loop */
+
+/* L40: */
+ }
+
+/* The algorithm has not converged after MAXIT cycles. */
+
+ *info = 1;
+ goto L100;
+
+L50:
+
+/* If ERROR <= MIN(TOLA,TOLB), then the algorithm has converged. */
+/* Compute the generalized singular value pairs (ALPHA, BETA), and */
+/* set the triangular matrix R to array A. */
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ alpha[i__] = 1.;
+ beta[i__] = 0.;
+/* L60: */
+ }
+
+/* Computing MIN */
+ i__2 = *l, i__3 = *m - *k;
+ i__1 = min(i__2,i__3);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+ a1 = a[*k + i__ + (*n - *l + i__) * a_dim1];
+ b1 = b[i__ + (*n - *l + i__) * b_dim1];
+
+ if (a1 != 0.) {
+ gamma = b1 / a1;
+
+/* change sign if necessary */
+
+ if (gamma < 0.) {
+ i__2 = *l - i__ + 1;
+ dscal_(&i__2, &c_b43, &b[i__ + (*n - *l + i__) * b_dim1], ldb)
+ ;
+ if (wantv) {
+ dscal_(p, &c_b43, &v[i__ * v_dim1 + 1], &c__1);
+ }
+ }
+
+ d__1 = abs(gamma);
+ dlartg_(&d__1, &c_b14, &beta[*k + i__], &alpha[*k + i__], &rwk);
+
+ if (alpha[*k + i__] >= beta[*k + i__]) {
+ i__2 = *l - i__ + 1;
+ d__1 = 1. / alpha[*k + i__];
+ dscal_(&i__2, &d__1, &a[*k + i__ + (*n - *l + i__) * a_dim1],
+ lda);
+ } else {
+ i__2 = *l - i__ + 1;
+ d__1 = 1. / beta[*k + i__];
+ dscal_(&i__2, &d__1, &b[i__ + (*n - *l + i__) * b_dim1], ldb);
+ i__2 = *l - i__ + 1;
+ dcopy_(&i__2, &b[i__ + (*n - *l + i__) * b_dim1], ldb, &a[*k
+ + i__ + (*n - *l + i__) * a_dim1], lda);
+ }
+
+ } else {
+
+ alpha[*k + i__] = 0.;
+ beta[*k + i__] = 1.;
+ i__2 = *l - i__ + 1;
+ dcopy_(&i__2, &b[i__ + (*n - *l + i__) * b_dim1], ldb, &a[*k +
+ i__ + (*n - *l + i__) * a_dim1], lda);
+
+ }
+
+/* L70: */
+ }
+
+/* Post-assignment */
+
+ i__1 = *k + *l;
+ for (i__ = *m + 1; i__ <= i__1; ++i__) {
+ alpha[i__] = 0.;
+ beta[i__] = 1.;
+/* L80: */
+ }
+
+ if (*k + *l < *n) {
+ i__1 = *n;
+ for (i__ = *k + *l + 1; i__ <= i__1; ++i__) {
+ alpha[i__] = 0.;
+ beta[i__] = 0.;
+/* L90: */
+ }
+ }
+
+L100:
+ *ncycle = kcycle;
+ return 0;
+
+/* End of DTGSJA */
+
+} /* dtgsja_ */
diff --git a/contrib/libs/clapack/dtgsna.c b/contrib/libs/clapack/dtgsna.c
new file mode 100644
index 0000000000..0dc35093a2
--- /dev/null
+++ b/contrib/libs/clapack/dtgsna.c
@@ -0,0 +1,695 @@
+/* dtgsna.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b19 = 1.;
+static doublereal c_b21 = 0.;
+static integer c__2 = 2;
+static logical c_false = FALSE_;
+static integer c__3 = 3;
+
+/* Subroutine */ int dtgsna_(char *job, char *howmny, logical *select,
+ integer *n, doublereal *a, integer *lda, doublereal *b, integer *ldb,
+ doublereal *vl, integer *ldvl, doublereal *vr, integer *ldvr,
+ doublereal *s, doublereal *dif, integer *mm, integer *m, doublereal *
+ work, integer *lwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vl_dim1, vl_offset, vr_dim1,
+ vr_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, k;
+ doublereal c1, c2;
+ integer n1, n2, ks, iz;
+ doublereal eps, beta, cond;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ logical pair;
+ integer ierr;
+ doublereal uhav, uhbv;
+ integer ifst;
+ doublereal lnrm;
+ integer ilst;
+ doublereal rnrm;
+ extern /* Subroutine */ int dlag2_(doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *);
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ doublereal root1, root2, scale;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+ doublereal uhavi, uhbvi, tmpii;
+ integer lwmin;
+ logical wants;
+ doublereal tmpir, tmpri, dummy[1], tmprr;
+ extern doublereal dlapy2_(doublereal *, doublereal *);
+ doublereal dummy1[1];
+ extern doublereal dlamch_(char *);
+ doublereal alphai, alphar;
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ xerbla_(char *, integer *), dtgexc_(logical *, logical *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *, integer *,
+ integer *, doublereal *, integer *, integer *);
+ logical wantbh, wantdf, somcon;
+ doublereal alprqt;
+ extern /* Subroutine */ int dtgsyl_(char *, integer *, integer *, integer
+ *, doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, integer *, integer *);
+ doublereal smlnum;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTGSNA estimates reciprocal condition numbers for specified */
+/* eigenvalues and/or eigenvectors of a matrix pair (A, B) in */
+/* generalized real Schur canonical form (or of any matrix pair */
+/* (Q*A*Z', Q*B*Z') with orthogonal matrices Q and Z, where */
+/* Z' denotes the transpose of Z. */
+
+/* (A, B) must be in generalized real Schur form (as returned by DGGES), */
+/* i.e. A is block upper triangular with 1-by-1 and 2-by-2 diagonal */
+/* blocks. B is upper triangular. */
+
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies whether condition numbers are required for */
+/* eigenvalues (S) or eigenvectors (DIF): */
+/* = 'E': for eigenvalues only (S); */
+/* = 'V': for eigenvectors only (DIF); */
+/* = 'B': for both eigenvalues and eigenvectors (S and DIF). */
+
+/* HOWMNY (input) CHARACTER*1 */
+/* = 'A': compute condition numbers for all eigenpairs; */
+/* = 'S': compute condition numbers for selected eigenpairs */
+/* specified by the array SELECT. */
+
+/* SELECT (input) LOGICAL array, dimension (N) */
+/* If HOWMNY = 'S', SELECT specifies the eigenpairs for which */
+/* condition numbers are required. To select condition numbers */
+/* for the eigenpair corresponding to a real eigenvalue w(j), */
+/* SELECT(j) must be set to .TRUE.. To select condition numbers */
+/* corresponding to a complex conjugate pair of eigenvalues w(j) */
+/* and w(j+1), either SELECT(j) or SELECT(j+1) or both, must be */
+/* set to .TRUE.. */
+/* If HOWMNY = 'A', SELECT is not referenced. */
+
+/* N (input) INTEGER */
+/* The order of the square matrix pair (A, B). N >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The upper quasi-triangular matrix A in the pair (A,B). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB,N) */
+/* The upper triangular matrix B in the pair (A,B). */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* VL (input) DOUBLE PRECISION array, dimension (LDVL,M) */
+/* If JOB = 'E' or 'B', VL must contain left eigenvectors of */
+/* (A, B), corresponding to the eigenpairs specified by HOWMNY */
+/* and SELECT. The eigenvectors must be stored in consecutive */
+/* columns of VL, as returned by DTGEVC. */
+/* If JOB = 'V', VL is not referenced. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the array VL. LDVL >= 1. */
+/* If JOB = 'E' or 'B', LDVL >= N. */
+
+/* VR (input) DOUBLE PRECISION array, dimension (LDVR,M) */
+/* If JOB = 'E' or 'B', VR must contain right eigenvectors of */
+/* (A, B), corresponding to the eigenpairs specified by HOWMNY */
+/* and SELECT. The eigenvectors must be stored in consecutive */
+/* columns ov VR, as returned by DTGEVC. */
+/* If JOB = 'V', VR is not referenced. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. LDVR >= 1. */
+/* If JOB = 'E' or 'B', LDVR >= N. */
+
+/* S (output) DOUBLE PRECISION array, dimension (MM) */
+/* If JOB = 'E' or 'B', the reciprocal condition numbers of the */
+/* selected eigenvalues, stored in consecutive elements of the */
+/* array. For a complex conjugate pair of eigenvalues two */
+/* consecutive elements of S are set to the same value. Thus */
+/* S(j), DIF(j), and the j-th columns of VL and VR all */
+/* correspond to the same eigenpair (but not in general the */
+/* j-th eigenpair, unless all eigenpairs are selected). */
+/* If JOB = 'V', S is not referenced. */
+
+/* DIF (output) DOUBLE PRECISION array, dimension (MM) */
+/* If JOB = 'V' or 'B', the estimated reciprocal condition */
+/* numbers of the selected eigenvectors, stored in consecutive */
+/* elements of the array. For a complex eigenvector two */
+/* consecutive elements of DIF are set to the same value. If */
+/* the eigenvalues cannot be reordered to compute DIF(j), DIF(j) */
+/* is set to 0; this can only occur when the true value would be */
+/* very small anyway. */
+/* If JOB = 'E', DIF is not referenced. */
+
+/* MM (input) INTEGER */
+/* The number of elements in the arrays S and DIF. MM >= M. */
+
+/* M (output) INTEGER */
+/* The number of elements of the arrays S and DIF used to store */
+/* the specified condition numbers; for each selected real */
+/* eigenvalue one element is used, and for each selected complex */
+/* conjugate pair of eigenvalues, two elements are used. */
+/* If HOWMNY = 'A', M is set to N. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+/* If JOB = 'V' or 'B' LWORK >= 2*N*(N+2)+16. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace) INTEGER array, dimension (N + 6) */
+/* If JOB = 'E', IWORK is not referenced. */
+
+/* INFO (output) INTEGER */
+/* =0: Successful exit */
+/* <0: If INFO = -i, the i-th argument had an illegal value */
+
+
+/* Further Details */
+/* =============== */
+
+/* The reciprocal of the condition number of a generalized eigenvalue */
+/* w = (a, b) is defined as */
+
+/* S(w) = (|u'Av|**2 + |u'Bv|**2)**(1/2) / (norm(u)*norm(v)) */
+
+/* where u and v are the left and right eigenvectors of (A, B) */
+/* corresponding to w; |z| denotes the absolute value of the complex */
+/* number, and norm(u) denotes the 2-norm of the vector u. */
+/* The pair (a, b) corresponds to an eigenvalue w = a/b (= u'Av/u'Bv) */
+/* of the matrix pair (A, B). If both a and b equal zero, then (A B) is */
+/* singular and S(I) = -1 is returned. */
+
+/* An approximate error bound on the chordal distance between the i-th */
+/* computed generalized eigenvalue w and the corresponding exact */
+/* eigenvalue lambda is */
+
+/* chord(w, lambda) <= EPS * norm(A, B) / S(I) */
+
+/* where EPS is the machine precision. */
+
+/* The reciprocal of the condition number DIF(i) of right eigenvector u */
+/* and left eigenvector v corresponding to the generalized eigenvalue w */
+/* is defined as follows: */
+
+/* a) If the i-th eigenvalue w = (a,b) is real */
+
+/* Suppose U and V are orthogonal transformations such that */
+
+/* U'*(A, B)*V = (S, T) = ( a * ) ( b * ) 1 */
+/* ( 0 S22 ),( 0 T22 ) n-1 */
+/* 1 n-1 1 n-1 */
+
+/* Then the reciprocal condition number DIF(i) is */
+
+/* Difl((a, b), (S22, T22)) = sigma-min( Zl ), */
+
+/* where sigma-min(Zl) denotes the smallest singular value of the */
+/* 2(n-1)-by-2(n-1) matrix */
+
+/* Zl = [ kron(a, In-1) -kron(1, S22) ] */
+/* [ kron(b, In-1) -kron(1, T22) ] . */
+
+/* Here In-1 is the identity matrix of size n-1. kron(X, Y) is the */
+/* Kronecker product between the matrices X and Y. */
+
+/* Note that if the default method for computing DIF(i) is wanted */
+/* (see DLATDF), then the parameter DIFDRI (see below) should be */
+/* changed from 3 to 4 (routine DLATDF(IJOB = 2 will be used)). */
+/* See DTGSYL for more details. */
+
+/* b) If the i-th and (i+1)-th eigenvalues are complex conjugate pair, */
+
+/* Suppose U and V are orthogonal transformations such that */
+
+/* U'*(A, B)*V = (S, T) = ( S11 * ) ( T11 * ) 2 */
+/* ( 0 S22 ),( 0 T22) n-2 */
+/* 2 n-2 2 n-2 */
+
+/* and (S11, T11) corresponds to the complex conjugate eigenvalue */
+/* pair (w, conjg(w)). There exist unitary matrices U1 and V1 such */
+/* that */
+
+/* U1'*S11*V1 = ( s11 s12 ) and U1'*T11*V1 = ( t11 t12 ) */
+/* ( 0 s22 ) ( 0 t22 ) */
+
+/* where the generalized eigenvalues w = s11/t11 and */
+/* conjg(w) = s22/t22. */
+
+/* Then the reciprocal condition number DIF(i) is bounded by */
+
+/* min( d1, max( 1, |real(s11)/real(s22)| )*d2 ) */
+
+/* where, d1 = Difl((s11, t11), (s22, t22)) = sigma-min(Z1), where */
+/* Z1 is the complex 2-by-2 matrix */
+
+/* Z1 = [ s11 -s22 ] */
+/* [ t11 -t22 ], */
+
+/* This is done by computing (using real arithmetic) the */
+/* roots of the characteristical polynomial det(Z1' * Z1 - lambda I), */
+/* where Z1' denotes the conjugate transpose of Z1 and det(X) denotes */
+/* the determinant of X. */
+
+/* and d2 is an upper bound on Difl((S11, T11), (S22, T22)), i.e. an */
+/* upper bound on sigma-min(Z2), where Z2 is (2n-2)-by-(2n-2) */
+
+/* Z2 = [ kron(S11', In-2) -kron(I2, S22) ] */
+/* [ kron(T11', In-2) -kron(I2, T22) ] */
+
+/* Note that if the default method for computing DIF is wanted (see */
+/* DLATDF), then the parameter DIFDRI (see below) should be changed */
+/* from 3 to 4 (routine DLATDF(IJOB = 2 will be used)). See DTGSYL */
+/* for more details. */
+
+/* For each eigenvalue/vector specified by SELECT, DIF stores a */
+/* Frobenius norm-based estimate of Difl. */
+
+/* An approximate error bound for the i-th computed eigenvector VL(i) or */
+/* VR(i) is given by */
+
+/* EPS * norm(A, B) / DIF(i). */
+
+/* See ref. [2-3] for more details and further references. */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* References */
+/* ========== */
+
+/* [1] B. Kagstrom; A Direct Method for Reordering Eigenvalues in the */
+/* Generalized Real Schur Form of a Regular Matrix Pair (A, B), in */
+/* M.S. Moonen et al (eds), Linear Algebra for Large Scale and */
+/* Real-Time Applications, Kluwer Academic Publ. 1993, pp 195-218. */
+
+/* [2] B. Kagstrom and P. Poromaa; Computing Eigenspaces with Specified */
+/* Eigenvalues of a Regular Matrix Pair (A, B) and Condition */
+/* Estimation: Theory, Algorithms and Software, */
+/* Report UMINF - 94.04, Department of Computing Science, Umea */
+/* University, S-901 87 Umea, Sweden, 1994. Also as LAPACK Working */
+/* Note 87. To appear in Numerical Algorithms, 1996. */
+
+/* [3] B. Kagstrom and P. Poromaa, LAPACK-Style Algorithms and Software */
+/* for Solving the Generalized Sylvester Equation and Estimating the */
+/* Separation between Regular Matrix Pairs, Report UMINF - 93.23, */
+/* Department of Computing Science, Umea University, S-901 87 Umea, */
+/* Sweden, December 1993, Revised April 1994, Also as LAPACK Working */
+/* Note 75. To appear in ACM Trans. on Math. Software, Vol 22, */
+/* No 1, 1996. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters */
+
+ /* Parameter adjustments */
+ --select;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --s;
+ --dif;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantbh = lsame_(job, "B");
+ wants = lsame_(job, "E") || wantbh;
+ wantdf = lsame_(job, "V") || wantbh;
+
+ somcon = lsame_(howmny, "S");
+
+ *info = 0;
+ lquery = *lwork == -1;
+
+ if (! wants && ! wantdf) {
+ *info = -1;
+ } else if (! lsame_(howmny, "A") && ! somcon) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (wants && *ldvl < *n) {
+ *info = -10;
+ } else if (wants && *ldvr < *n) {
+ *info = -12;
+ } else {
+
+/* Set M to the number of eigenpairs for which condition numbers */
+/* are required, and test MM. */
+
+ if (somcon) {
+ *m = 0;
+ pair = FALSE_;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (pair) {
+ pair = FALSE_;
+ } else {
+ if (k < *n) {
+ if (a[k + 1 + k * a_dim1] == 0.) {
+ if (select[k]) {
+ ++(*m);
+ }
+ } else {
+ pair = TRUE_;
+ if (select[k] || select[k + 1]) {
+ *m += 2;
+ }
+ }
+ } else {
+ if (select[*n]) {
+ ++(*m);
+ }
+ }
+ }
+/* L10: */
+ }
+ } else {
+ *m = *n;
+ }
+
+ if (*n == 0) {
+ lwmin = 1;
+ } else if (lsame_(job, "V") || lsame_(job,
+ "B")) {
+ lwmin = (*n << 1) * (*n + 2) + 16;
+ } else {
+ lwmin = *n;
+ }
+ work[1] = (doublereal) lwmin;
+
+ if (*mm < *m) {
+ *info = -15;
+ } else if (*lwork < lwmin && ! lquery) {
+ *info = -18;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTGSNA", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("P");
+ smlnum = dlamch_("S") / eps;
+ ks = 0;
+ pair = FALSE_;
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+
+/* Determine whether A(k,k) begins a 1-by-1 or 2-by-2 block. */
+
+ if (pair) {
+ pair = FALSE_;
+ goto L20;
+ } else {
+ if (k < *n) {
+ pair = a[k + 1 + k * a_dim1] != 0.;
+ }
+ }
+
+/* Determine whether condition numbers are required for the k-th */
+/* eigenpair. */
+
+ if (somcon) {
+ if (pair) {
+ if (! select[k] && ! select[k + 1]) {
+ goto L20;
+ }
+ } else {
+ if (! select[k]) {
+ goto L20;
+ }
+ }
+ }
+
+ ++ks;
+
+ if (wants) {
+
+/* Compute the reciprocal condition number of the k-th */
+/* eigenvalue. */
+
+ if (pair) {
+
+/* Complex eigenvalue pair. */
+
+ d__1 = dnrm2_(n, &vr[ks * vr_dim1 + 1], &c__1);
+ d__2 = dnrm2_(n, &vr[(ks + 1) * vr_dim1 + 1], &c__1);
+ rnrm = dlapy2_(&d__1, &d__2);
+ d__1 = dnrm2_(n, &vl[ks * vl_dim1 + 1], &c__1);
+ d__2 = dnrm2_(n, &vl[(ks + 1) * vl_dim1 + 1], &c__1);
+ lnrm = dlapy2_(&d__1, &d__2);
+ dgemv_("N", n, n, &c_b19, &a[a_offset], lda, &vr[ks * vr_dim1
+ + 1], &c__1, &c_b21, &work[1], &c__1);
+ tmprr = ddot_(n, &work[1], &c__1, &vl[ks * vl_dim1 + 1], &
+ c__1);
+ tmpri = ddot_(n, &work[1], &c__1, &vl[(ks + 1) * vl_dim1 + 1],
+ &c__1);
+ dgemv_("N", n, n, &c_b19, &a[a_offset], lda, &vr[(ks + 1) *
+ vr_dim1 + 1], &c__1, &c_b21, &work[1], &c__1);
+ tmpii = ddot_(n, &work[1], &c__1, &vl[(ks + 1) * vl_dim1 + 1],
+ &c__1);
+ tmpir = ddot_(n, &work[1], &c__1, &vl[ks * vl_dim1 + 1], &
+ c__1);
+ uhav = tmprr + tmpii;
+ uhavi = tmpir - tmpri;
+ dgemv_("N", n, n, &c_b19, &b[b_offset], ldb, &vr[ks * vr_dim1
+ + 1], &c__1, &c_b21, &work[1], &c__1);
+ tmprr = ddot_(n, &work[1], &c__1, &vl[ks * vl_dim1 + 1], &
+ c__1);
+ tmpri = ddot_(n, &work[1], &c__1, &vl[(ks + 1) * vl_dim1 + 1],
+ &c__1);
+ dgemv_("N", n, n, &c_b19, &b[b_offset], ldb, &vr[(ks + 1) *
+ vr_dim1 + 1], &c__1, &c_b21, &work[1], &c__1);
+ tmpii = ddot_(n, &work[1], &c__1, &vl[(ks + 1) * vl_dim1 + 1],
+ &c__1);
+ tmpir = ddot_(n, &work[1], &c__1, &vl[ks * vl_dim1 + 1], &
+ c__1);
+ uhbv = tmprr + tmpii;
+ uhbvi = tmpir - tmpri;
+ uhav = dlapy2_(&uhav, &uhavi);
+ uhbv = dlapy2_(&uhbv, &uhbvi);
+ cond = dlapy2_(&uhav, &uhbv);
+ s[ks] = cond / (rnrm * lnrm);
+ s[ks + 1] = s[ks];
+
+ } else {
+
+/* Real eigenvalue. */
+
+ rnrm = dnrm2_(n, &vr[ks * vr_dim1 + 1], &c__1);
+ lnrm = dnrm2_(n, &vl[ks * vl_dim1 + 1], &c__1);
+ dgemv_("N", n, n, &c_b19, &a[a_offset], lda, &vr[ks * vr_dim1
+ + 1], &c__1, &c_b21, &work[1], &c__1);
+ uhav = ddot_(n, &work[1], &c__1, &vl[ks * vl_dim1 + 1], &c__1)
+ ;
+ dgemv_("N", n, n, &c_b19, &b[b_offset], ldb, &vr[ks * vr_dim1
+ + 1], &c__1, &c_b21, &work[1], &c__1);
+ uhbv = ddot_(n, &work[1], &c__1, &vl[ks * vl_dim1 + 1], &c__1)
+ ;
+ cond = dlapy2_(&uhav, &uhbv);
+ if (cond == 0.) {
+ s[ks] = -1.;
+ } else {
+ s[ks] = cond / (rnrm * lnrm);
+ }
+ }
+ }
+
+ if (wantdf) {
+ if (*n == 1) {
+ dif[ks] = dlapy2_(&a[a_dim1 + 1], &b[b_dim1 + 1]);
+ goto L20;
+ }
+
+/* Estimate the reciprocal condition number of the k-th */
+/* eigenvectors. */
+ if (pair) {
+
+/* Copy the 2-by 2 pencil beginning at (A(k,k), B(k, k)). */
+/* Compute the eigenvalue(s) at position K. */
+
+ work[1] = a[k + k * a_dim1];
+ work[2] = a[k + 1 + k * a_dim1];
+ work[3] = a[k + (k + 1) * a_dim1];
+ work[4] = a[k + 1 + (k + 1) * a_dim1];
+ work[5] = b[k + k * b_dim1];
+ work[6] = b[k + 1 + k * b_dim1];
+ work[7] = b[k + (k + 1) * b_dim1];
+ work[8] = b[k + 1 + (k + 1) * b_dim1];
+ d__1 = smlnum * eps;
+ dlag2_(&work[1], &c__2, &work[5], &c__2, &d__1, &beta, dummy1,
+ &alphar, dummy, &alphai);
+ alprqt = 1.;
+ c1 = (alphar * alphar + alphai * alphai + beta * beta) * 2.;
+ c2 = beta * 4. * beta * alphai * alphai;
+ root1 = c1 + sqrt(c1 * c1 - c2 * 4.);
+ root2 = c2 / root1;
+ root1 /= 2.;
+/* Computing MIN */
+ d__1 = sqrt(root1), d__2 = sqrt(root2);
+ cond = min(d__1,d__2);
+ }
+
+/* Copy the matrix (A, B) to the array WORK and swap the */
+/* diagonal block beginning at A(k,k) to the (1,1) position. */
+
+ dlacpy_("Full", n, n, &a[a_offset], lda, &work[1], n);
+ dlacpy_("Full", n, n, &b[b_offset], ldb, &work[*n * *n + 1], n);
+ ifst = k;
+ ilst = 1;
+
+ i__2 = *lwork - (*n << 1) * *n;
+ dtgexc_(&c_false, &c_false, n, &work[1], n, &work[*n * *n + 1], n,
+ dummy, &c__1, dummy1, &c__1, &ifst, &ilst, &work[(*n * *
+ n << 1) + 1], &i__2, &ierr);
+
+ if (ierr > 0) {
+
+/* Ill-conditioned problem - swap rejected. */
+
+ dif[ks] = 0.;
+ } else {
+
+/* Reordering successful, solve generalized Sylvester */
+/* equation for R and L, */
+/* A22 * R - L * A11 = A12 */
+/* B22 * R - L * B11 = B12, */
+/* and compute estimate of Difl((A11,B11), (A22, B22)). */
+
+ n1 = 1;
+ if (work[2] != 0.) {
+ n1 = 2;
+ }
+ n2 = *n - n1;
+ if (n2 == 0) {
+ dif[ks] = cond;
+ } else {
+ i__ = *n * *n + 1;
+ iz = (*n << 1) * *n + 1;
+ i__2 = *lwork - (*n << 1) * *n;
+ dtgsyl_("N", &c__3, &n2, &n1, &work[*n * n1 + n1 + 1], n,
+ &work[1], n, &work[n1 + 1], n, &work[*n * n1 + n1
+ + i__], n, &work[i__], n, &work[n1 + i__], n, &
+ scale, &dif[ks], &work[iz + 1], &i__2, &iwork[1],
+ &ierr);
+
+ if (pair) {
+/* Computing MIN */
+ d__1 = max(1.,alprqt) * dif[ks];
+ dif[ks] = min(d__1,cond);
+ }
+ }
+ }
+ if (pair) {
+ dif[ks + 1] = dif[ks];
+ }
+ }
+ if (pair) {
+ ++ks;
+ }
+
+L20:
+ ;
+ }
+ work[1] = (doublereal) lwmin;
+ return 0;
+
+/* End of DTGSNA */
+
+} /* dtgsna_ */
diff --git a/contrib/libs/clapack/dtgsy2.c b/contrib/libs/clapack/dtgsy2.c
new file mode 100644
index 0000000000..ed0a92daaf
--- /dev/null
+++ b/contrib/libs/clapack/dtgsy2.c
@@ -0,0 +1,1113 @@
+/* dtgsy2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__8 = 8;
+static integer c__1 = 1;
+static doublereal c_b27 = -1.;
+static doublereal c_b42 = 1.;
+static doublereal c_b56 = 0.;
+
+/* Subroutine */ int dtgsy2_(char *trans, integer *ijob, integer *m, integer *
+ n, doublereal *a, integer *lda, doublereal *b, integer *ldb,
+ doublereal *c__, integer *ldc, doublereal *d__, integer *ldd,
+ doublereal *e, integer *lde, doublereal *f, integer *ldf, doublereal *
+ scale, doublereal *rdsum, doublereal *rdscal, integer *iwork, integer
+ *pq, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, d_dim1,
+ d_offset, e_dim1, e_offset, f_dim1, f_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, k, p, q;
+ doublereal z__[64] /* was [8][8] */;
+ integer ie, je, mb, nb, ii, jj, is, js;
+ doublereal rhs[8];
+ integer isp1, jsp1;
+ extern /* Subroutine */ int dger_(integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ integer ierr, zdim, ipiv[8], jpiv[8];
+ doublereal alpha;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dgemm_(char *, char *, integer *, integer *, integer *
+, doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), dcopy_(integer *,
+ doublereal *, integer *, doublereal *, integer *), daxpy_(integer
+ *, doublereal *, doublereal *, integer *, doublereal *, integer *)
+ , dgesc2_(integer *, doublereal *, integer *, doublereal *,
+ integer *, integer *, doublereal *), dgetc2_(integer *,
+ doublereal *, integer *, integer *, integer *, integer *),
+ dlatdf_(integer *, integer *, doublereal *, integer *, doublereal
+ *, doublereal *, doublereal *, integer *, integer *);
+ doublereal scaloc;
+ extern /* Subroutine */ int dlaset_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ logical notran;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* January 2007 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTGSY2 solves the generalized Sylvester equation: */
+
+/* A * R - L * B = scale * C (1) */
+/* D * R - L * E = scale * F, */
+
+/* using Level 1 and 2 BLAS. where R and L are unknown M-by-N matrices, */
+/* (A, D), (B, E) and (C, F) are given matrix pairs of size M-by-M, */
+/* N-by-N and M-by-N, respectively, with real entries. (A, D) and (B, E) */
+/* must be in generalized Schur canonical form, i.e. A, B are upper */
+/* quasi triangular and D, E are upper triangular. The solution (R, L) */
+/* overwrites (C, F). 0 <= SCALE <= 1 is an output scaling factor */
+/* chosen to avoid overflow. */
+
+/* In matrix notation solving equation (1) corresponds to solve */
+/* Z*x = scale*b, where Z is defined as */
+
+/* Z = [ kron(In, A) -kron(B', Im) ] (2) */
+/* [ kron(In, D) -kron(E', Im) ], */
+
+/* Ik is the identity matrix of size k and X' is the transpose of X. */
+/* kron(X, Y) is the Kronecker product between the matrices X and Y. */
+/* In the process of solving (1), we solve a number of such systems */
+/* where Dim(In), Dim(In) = 1 or 2. */
+
+/* If TRANS = 'T', solve the transposed system Z'*y = scale*b for y, */
+/* which is equivalent to solve for R and L in */
+
+/* A' * R + D' * L = scale * C (3) */
+/* R * B' + L * E' = scale * -F */
+
+/* This case is used to compute an estimate of Dif[(A, D), (B, E)] = */
+/* sigma_min(Z) using reverse communicaton with DLACON. */
+
+/* DTGSY2 also (IJOB >= 1) contributes to the computation in DTGSYL */
+/* of an upper bound on the separation between to matrix pairs. Then */
+/* the input (A, D), (B, E) are sub-pencils of the matrix pair in */
+/* DTGSYL. See DTGSYL for details. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N', solve the generalized Sylvester equation (1). */
+/* = 'T': solve the 'transposed' system (3). */
+
+/* IJOB (input) INTEGER */
+/* Specifies what kind of functionality to be performed. */
+/* = 0: solve (1) only. */
+/* = 1: A contribution from this subsystem to a Frobenius */
+/* norm-based estimate of the separation between two matrix */
+/* pairs is computed. (look ahead strategy is used). */
+/* = 2: A contribution from this subsystem to a Frobenius */
+/* norm-based estimate of the separation between two matrix */
+/* pairs is computed. (DGECON on sub-systems is used.) */
+/* Not referenced if TRANS = 'T'. */
+
+/* M (input) INTEGER */
+/* On entry, M specifies the order of A and D, and the row */
+/* dimension of C, F, R and L. */
+
+/* N (input) INTEGER */
+/* On entry, N specifies the order of B and E, and the column */
+/* dimension of C, F, R and L. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA, M) */
+/* On entry, A contains an upper quasi triangular matrix. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the matrix A. LDA >= max(1, M). */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB, N) */
+/* On entry, B contains an upper quasi triangular matrix. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the matrix B. LDB >= max(1, N). */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (LDC, N) */
+/* On entry, C contains the right-hand-side of the first matrix */
+/* equation in (1). */
+/* On exit, if IJOB = 0, C has been overwritten by the */
+/* solution R. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the matrix C. LDC >= max(1, M). */
+
+/* D (input) DOUBLE PRECISION array, dimension (LDD, M) */
+/* On entry, D contains an upper triangular matrix. */
+
+/* LDD (input) INTEGER */
+/* The leading dimension of the matrix D. LDD >= max(1, M). */
+
+/* E (input) DOUBLE PRECISION array, dimension (LDE, N) */
+/* On entry, E contains an upper triangular matrix. */
+
+/* LDE (input) INTEGER */
+/* The leading dimension of the matrix E. LDE >= max(1, N). */
+
+/* F (input/output) DOUBLE PRECISION array, dimension (LDF, N) */
+/* On entry, F contains the right-hand-side of the second matrix */
+/* equation in (1). */
+/* On exit, if IJOB = 0, F has been overwritten by the */
+/* solution L. */
+
+/* LDF (input) INTEGER */
+/* The leading dimension of the matrix F. LDF >= max(1, M). */
+
+/* SCALE (output) DOUBLE PRECISION */
+/* On exit, 0 <= SCALE <= 1. If 0 < SCALE < 1, the solutions */
+/* R and L (C and F on entry) will hold the solutions to a */
+/* slightly perturbed system but the input matrices A, B, D and */
+/* E have not been changed. If SCALE = 0, R and L will hold the */
+/* solutions to the homogeneous system with C = F = 0. Normally, */
+/* SCALE = 1. */
+
+/* RDSUM (input/output) DOUBLE PRECISION */
+/* On entry, the sum of squares of computed contributions to */
+/* the Dif-estimate under computation by DTGSYL, where the */
+/* scaling factor RDSCAL (see below) has been factored out. */
+/* On exit, the corresponding sum of squares updated with the */
+/* contributions from the current sub-system. */
+/* If TRANS = 'T' RDSUM is not touched. */
+/* NOTE: RDSUM only makes sense when DTGSY2 is called by DTGSYL. */
+
+/* RDSCAL (input/output) DOUBLE PRECISION */
+/* On entry, scaling factor used to prevent overflow in RDSUM. */
+/* On exit, RDSCAL is updated w.r.t. the current contributions */
+/* in RDSUM. */
+/* If TRANS = 'T', RDSCAL is not touched. */
+/* NOTE: RDSCAL only makes sense when DTGSY2 is called by */
+/* DTGSYL. */
+
+/* IWORK (workspace) INTEGER array, dimension (M+N+2) */
+
+/* PQ (output) INTEGER */
+/* On exit, the number of subsystems (of size 2-by-2, 4-by-4 and */
+/* 8-by-8) solved by this routine. */
+
+/* INFO (output) INTEGER */
+/* On exit, if INFO is set to */
+/* =0: Successful exit */
+/* <0: If INFO = -i, the i-th argument had an illegal value. */
+/* >0: The matrix pairs (A, D) and (B, E) have common or very */
+/* close eigenvalues. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* ===================================================================== */
+/* Replaced various illegal calls to DCOPY by calls to DLASET. */
+/* Sven Hammarling, 27/5/02. */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ d_dim1 = *ldd;
+ d_offset = 1 + d_dim1;
+ d__ -= d_offset;
+ e_dim1 = *lde;
+ e_offset = 1 + e_dim1;
+ e -= e_offset;
+ f_dim1 = *ldf;
+ f_offset = 1 + f_dim1;
+ f -= f_offset;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ ierr = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "T")) {
+ *info = -1;
+ } else if (notran) {
+ if (*ijob < 0 || *ijob > 2) {
+ *info = -2;
+ }
+ }
+ if (*info == 0) {
+ if (*m <= 0) {
+ *info = -3;
+ } else if (*n <= 0) {
+ *info = -4;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*ldd < max(1,*m)) {
+ *info = -12;
+ } else if (*lde < max(1,*n)) {
+ *info = -14;
+ } else if (*ldf < max(1,*m)) {
+ *info = -16;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTGSY2", &i__1);
+ return 0;
+ }
+
+/* Determine block structure of A */
+
+ *pq = 0;
+ p = 0;
+ i__ = 1;
+L10:
+ if (i__ > *m) {
+ goto L20;
+ }
+ ++p;
+ iwork[p] = i__;
+ if (i__ == *m) {
+ goto L20;
+ }
+ if (a[i__ + 1 + i__ * a_dim1] != 0.) {
+ i__ += 2;
+ } else {
+ ++i__;
+ }
+ goto L10;
+L20:
+ iwork[p + 1] = *m + 1;
+
+/* Determine block structure of B */
+
+ q = p + 1;
+ j = 1;
+L30:
+ if (j > *n) {
+ goto L40;
+ }
+ ++q;
+ iwork[q] = j;
+ if (j == *n) {
+ goto L40;
+ }
+ if (b[j + 1 + j * b_dim1] != 0.) {
+ j += 2;
+ } else {
+ ++j;
+ }
+ goto L30;
+L40:
+ iwork[q + 1] = *n + 1;
+ *pq = p * (q - p - 1);
+
+ if (notran) {
+
+/* Solve (I, J) - subsystem */
+/* A(I, I) * R(I, J) - L(I, J) * B(J, J) = C(I, J) */
+/* D(I, I) * R(I, J) - L(I, J) * E(J, J) = F(I, J) */
+/* for I = P, P - 1, ..., 1; J = 1, 2, ..., Q */
+
+ *scale = 1.;
+ scaloc = 1.;
+ i__1 = q;
+ for (j = p + 2; j <= i__1; ++j) {
+ js = iwork[j];
+ jsp1 = js + 1;
+ je = iwork[j + 1] - 1;
+ nb = je - js + 1;
+ for (i__ = p; i__ >= 1; --i__) {
+
+ is = iwork[i__];
+ isp1 = is + 1;
+ ie = iwork[i__ + 1] - 1;
+ mb = ie - is + 1;
+ zdim = mb * nb << 1;
+
+ if (mb == 1 && nb == 1) {
+
+/* Build a 2-by-2 system Z * x = RHS */
+
+ z__[0] = a[is + is * a_dim1];
+ z__[1] = d__[is + is * d_dim1];
+ z__[8] = -b[js + js * b_dim1];
+ z__[9] = -e[js + js * e_dim1];
+
+/* Set up right hand side(s) */
+
+ rhs[0] = c__[is + js * c_dim1];
+ rhs[1] = f[is + js * f_dim1];
+
+/* Solve Z * x = RHS */
+
+ dgetc2_(&zdim, z__, &c__8, ipiv, jpiv, &ierr);
+ if (ierr > 0) {
+ *info = ierr;
+ }
+
+ if (*ijob == 0) {
+ dgesc2_(&zdim, z__, &c__8, rhs, ipiv, jpiv, &scaloc);
+ if (scaloc != 1.) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ dscal_(m, &scaloc, &c__[k * c_dim1 + 1], &
+ c__1);
+ dscal_(m, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L50: */
+ }
+ *scale *= scaloc;
+ }
+ } else {
+ dlatdf_(ijob, &zdim, z__, &c__8, rhs, rdsum, rdscal,
+ ipiv, jpiv);
+ }
+
+/* Unpack solution vector(s) */
+
+ c__[is + js * c_dim1] = rhs[0];
+ f[is + js * f_dim1] = rhs[1];
+
+/* Substitute R(I, J) and L(I, J) into remaining */
+/* equation. */
+
+ if (i__ > 1) {
+ alpha = -rhs[0];
+ i__2 = is - 1;
+ daxpy_(&i__2, &alpha, &a[is * a_dim1 + 1], &c__1, &
+ c__[js * c_dim1 + 1], &c__1);
+ i__2 = is - 1;
+ daxpy_(&i__2, &alpha, &d__[is * d_dim1 + 1], &c__1, &
+ f[js * f_dim1 + 1], &c__1);
+ }
+ if (j < q) {
+ i__2 = *n - je;
+ daxpy_(&i__2, &rhs[1], &b[js + (je + 1) * b_dim1],
+ ldb, &c__[is + (je + 1) * c_dim1], ldc);
+ i__2 = *n - je;
+ daxpy_(&i__2, &rhs[1], &e[js + (je + 1) * e_dim1],
+ lde, &f[is + (je + 1) * f_dim1], ldf);
+ }
+
+ } else if (mb == 1 && nb == 2) {
+
+/* Build a 4-by-4 system Z * x = RHS */
+
+ z__[0] = a[is + is * a_dim1];
+ z__[1] = 0.;
+ z__[2] = d__[is + is * d_dim1];
+ z__[3] = 0.;
+
+ z__[8] = 0.;
+ z__[9] = a[is + is * a_dim1];
+ z__[10] = 0.;
+ z__[11] = d__[is + is * d_dim1];
+
+ z__[16] = -b[js + js * b_dim1];
+ z__[17] = -b[js + jsp1 * b_dim1];
+ z__[18] = -e[js + js * e_dim1];
+ z__[19] = -e[js + jsp1 * e_dim1];
+
+ z__[24] = -b[jsp1 + js * b_dim1];
+ z__[25] = -b[jsp1 + jsp1 * b_dim1];
+ z__[26] = 0.;
+ z__[27] = -e[jsp1 + jsp1 * e_dim1];
+
+/* Set up right hand side(s) */
+
+ rhs[0] = c__[is + js * c_dim1];
+ rhs[1] = c__[is + jsp1 * c_dim1];
+ rhs[2] = f[is + js * f_dim1];
+ rhs[3] = f[is + jsp1 * f_dim1];
+
+/* Solve Z * x = RHS */
+
+ dgetc2_(&zdim, z__, &c__8, ipiv, jpiv, &ierr);
+ if (ierr > 0) {
+ *info = ierr;
+ }
+
+ if (*ijob == 0) {
+ dgesc2_(&zdim, z__, &c__8, rhs, ipiv, jpiv, &scaloc);
+ if (scaloc != 1.) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ dscal_(m, &scaloc, &c__[k * c_dim1 + 1], &
+ c__1);
+ dscal_(m, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L60: */
+ }
+ *scale *= scaloc;
+ }
+ } else {
+ dlatdf_(ijob, &zdim, z__, &c__8, rhs, rdsum, rdscal,
+ ipiv, jpiv);
+ }
+
+/* Unpack solution vector(s) */
+
+ c__[is + js * c_dim1] = rhs[0];
+ c__[is + jsp1 * c_dim1] = rhs[1];
+ f[is + js * f_dim1] = rhs[2];
+ f[is + jsp1 * f_dim1] = rhs[3];
+
+/* Substitute R(I, J) and L(I, J) into remaining */
+/* equation. */
+
+ if (i__ > 1) {
+ i__2 = is - 1;
+ dger_(&i__2, &nb, &c_b27, &a[is * a_dim1 + 1], &c__1,
+ rhs, &c__1, &c__[js * c_dim1 + 1], ldc);
+ i__2 = is - 1;
+ dger_(&i__2, &nb, &c_b27, &d__[is * d_dim1 + 1], &
+ c__1, rhs, &c__1, &f[js * f_dim1 + 1], ldf);
+ }
+ if (j < q) {
+ i__2 = *n - je;
+ daxpy_(&i__2, &rhs[2], &b[js + (je + 1) * b_dim1],
+ ldb, &c__[is + (je + 1) * c_dim1], ldc);
+ i__2 = *n - je;
+ daxpy_(&i__2, &rhs[2], &e[js + (je + 1) * e_dim1],
+ lde, &f[is + (je + 1) * f_dim1], ldf);
+ i__2 = *n - je;
+ daxpy_(&i__2, &rhs[3], &b[jsp1 + (je + 1) * b_dim1],
+ ldb, &c__[is + (je + 1) * c_dim1], ldc);
+ i__2 = *n - je;
+ daxpy_(&i__2, &rhs[3], &e[jsp1 + (je + 1) * e_dim1],
+ lde, &f[is + (je + 1) * f_dim1], ldf);
+ }
+
+ } else if (mb == 2 && nb == 1) {
+
+/* Build a 4-by-4 system Z * x = RHS */
+
+ z__[0] = a[is + is * a_dim1];
+ z__[1] = a[isp1 + is * a_dim1];
+ z__[2] = d__[is + is * d_dim1];
+ z__[3] = 0.;
+
+ z__[8] = a[is + isp1 * a_dim1];
+ z__[9] = a[isp1 + isp1 * a_dim1];
+ z__[10] = d__[is + isp1 * d_dim1];
+ z__[11] = d__[isp1 + isp1 * d_dim1];
+
+ z__[16] = -b[js + js * b_dim1];
+ z__[17] = 0.;
+ z__[18] = -e[js + js * e_dim1];
+ z__[19] = 0.;
+
+ z__[24] = 0.;
+ z__[25] = -b[js + js * b_dim1];
+ z__[26] = 0.;
+ z__[27] = -e[js + js * e_dim1];
+
+/* Set up right hand side(s) */
+
+ rhs[0] = c__[is + js * c_dim1];
+ rhs[1] = c__[isp1 + js * c_dim1];
+ rhs[2] = f[is + js * f_dim1];
+ rhs[3] = f[isp1 + js * f_dim1];
+
+/* Solve Z * x = RHS */
+
+ dgetc2_(&zdim, z__, &c__8, ipiv, jpiv, &ierr);
+ if (ierr > 0) {
+ *info = ierr;
+ }
+ if (*ijob == 0) {
+ dgesc2_(&zdim, z__, &c__8, rhs, ipiv, jpiv, &scaloc);
+ if (scaloc != 1.) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ dscal_(m, &scaloc, &c__[k * c_dim1 + 1], &
+ c__1);
+ dscal_(m, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L70: */
+ }
+ *scale *= scaloc;
+ }
+ } else {
+ dlatdf_(ijob, &zdim, z__, &c__8, rhs, rdsum, rdscal,
+ ipiv, jpiv);
+ }
+
+/* Unpack solution vector(s) */
+
+ c__[is + js * c_dim1] = rhs[0];
+ c__[isp1 + js * c_dim1] = rhs[1];
+ f[is + js * f_dim1] = rhs[2];
+ f[isp1 + js * f_dim1] = rhs[3];
+
+/* Substitute R(I, J) and L(I, J) into remaining */
+/* equation. */
+
+ if (i__ > 1) {
+ i__2 = is - 1;
+ dgemv_("N", &i__2, &mb, &c_b27, &a[is * a_dim1 + 1],
+ lda, rhs, &c__1, &c_b42, &c__[js * c_dim1 + 1]
+, &c__1);
+ i__2 = is - 1;
+ dgemv_("N", &i__2, &mb, &c_b27, &d__[is * d_dim1 + 1],
+ ldd, rhs, &c__1, &c_b42, &f[js * f_dim1 + 1],
+ &c__1);
+ }
+ if (j < q) {
+ i__2 = *n - je;
+ dger_(&mb, &i__2, &c_b42, &rhs[2], &c__1, &b[js + (je
+ + 1) * b_dim1], ldb, &c__[is + (je + 1) *
+ c_dim1], ldc);
+ i__2 = *n - je;
+ dger_(&mb, &i__2, &c_b42, &rhs[2], &c__1, &e[js + (je
+ + 1) * e_dim1], lde, &f[is + (je + 1) *
+ f_dim1], ldf);
+ }
+
+ } else if (mb == 2 && nb == 2) {
+
+/* Build an 8-by-8 system Z * x = RHS */
+
+ dlaset_("F", &c__8, &c__8, &c_b56, &c_b56, z__, &c__8);
+
+ z__[0] = a[is + is * a_dim1];
+ z__[1] = a[isp1 + is * a_dim1];
+ z__[4] = d__[is + is * d_dim1];
+
+ z__[8] = a[is + isp1 * a_dim1];
+ z__[9] = a[isp1 + isp1 * a_dim1];
+ z__[12] = d__[is + isp1 * d_dim1];
+ z__[13] = d__[isp1 + isp1 * d_dim1];
+
+ z__[18] = a[is + is * a_dim1];
+ z__[19] = a[isp1 + is * a_dim1];
+ z__[22] = d__[is + is * d_dim1];
+
+ z__[26] = a[is + isp1 * a_dim1];
+ z__[27] = a[isp1 + isp1 * a_dim1];
+ z__[30] = d__[is + isp1 * d_dim1];
+ z__[31] = d__[isp1 + isp1 * d_dim1];
+
+ z__[32] = -b[js + js * b_dim1];
+ z__[34] = -b[js + jsp1 * b_dim1];
+ z__[36] = -e[js + js * e_dim1];
+ z__[38] = -e[js + jsp1 * e_dim1];
+
+ z__[41] = -b[js + js * b_dim1];
+ z__[43] = -b[js + jsp1 * b_dim1];
+ z__[45] = -e[js + js * e_dim1];
+ z__[47] = -e[js + jsp1 * e_dim1];
+
+ z__[48] = -b[jsp1 + js * b_dim1];
+ z__[50] = -b[jsp1 + jsp1 * b_dim1];
+ z__[54] = -e[jsp1 + jsp1 * e_dim1];
+
+ z__[57] = -b[jsp1 + js * b_dim1];
+ z__[59] = -b[jsp1 + jsp1 * b_dim1];
+ z__[63] = -e[jsp1 + jsp1 * e_dim1];
+
+/* Set up right hand side(s) */
+
+ k = 1;
+ ii = mb * nb + 1;
+ i__2 = nb - 1;
+ for (jj = 0; jj <= i__2; ++jj) {
+ dcopy_(&mb, &c__[is + (js + jj) * c_dim1], &c__1, &
+ rhs[k - 1], &c__1);
+ dcopy_(&mb, &f[is + (js + jj) * f_dim1], &c__1, &rhs[
+ ii - 1], &c__1);
+ k += mb;
+ ii += mb;
+/* L80: */
+ }
+
+/* Solve Z * x = RHS */
+
+ dgetc2_(&zdim, z__, &c__8, ipiv, jpiv, &ierr);
+ if (ierr > 0) {
+ *info = ierr;
+ }
+ if (*ijob == 0) {
+ dgesc2_(&zdim, z__, &c__8, rhs, ipiv, jpiv, &scaloc);
+ if (scaloc != 1.) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ dscal_(m, &scaloc, &c__[k * c_dim1 + 1], &
+ c__1);
+ dscal_(m, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L90: */
+ }
+ *scale *= scaloc;
+ }
+ } else {
+ dlatdf_(ijob, &zdim, z__, &c__8, rhs, rdsum, rdscal,
+ ipiv, jpiv);
+ }
+
+/* Unpack solution vector(s) */
+
+ k = 1;
+ ii = mb * nb + 1;
+ i__2 = nb - 1;
+ for (jj = 0; jj <= i__2; ++jj) {
+ dcopy_(&mb, &rhs[k - 1], &c__1, &c__[is + (js + jj) *
+ c_dim1], &c__1);
+ dcopy_(&mb, &rhs[ii - 1], &c__1, &f[is + (js + jj) *
+ f_dim1], &c__1);
+ k += mb;
+ ii += mb;
+/* L100: */
+ }
+
+/* Substitute R(I, J) and L(I, J) into remaining */
+/* equation. */
+
+ if (i__ > 1) {
+ i__2 = is - 1;
+ dgemm_("N", "N", &i__2, &nb, &mb, &c_b27, &a[is *
+ a_dim1 + 1], lda, rhs, &mb, &c_b42, &c__[js *
+ c_dim1 + 1], ldc);
+ i__2 = is - 1;
+ dgemm_("N", "N", &i__2, &nb, &mb, &c_b27, &d__[is *
+ d_dim1 + 1], ldd, rhs, &mb, &c_b42, &f[js *
+ f_dim1 + 1], ldf);
+ }
+ if (j < q) {
+ k = mb * nb + 1;
+ i__2 = *n - je;
+ dgemm_("N", "N", &mb, &i__2, &nb, &c_b42, &rhs[k - 1],
+ &mb, &b[js + (je + 1) * b_dim1], ldb, &c_b42,
+ &c__[is + (je + 1) * c_dim1], ldc);
+ i__2 = *n - je;
+ dgemm_("N", "N", &mb, &i__2, &nb, &c_b42, &rhs[k - 1],
+ &mb, &e[js + (je + 1) * e_dim1], lde, &c_b42,
+ &f[is + (je + 1) * f_dim1], ldf);
+ }
+
+ }
+
+/* L110: */
+ }
+/* L120: */
+ }
+ } else {
+
+/* Solve (I, J) - subsystem */
+/* A(I, I)' * R(I, J) + D(I, I)' * L(J, J) = C(I, J) */
+/* R(I, I) * B(J, J) + L(I, J) * E(J, J) = -F(I, J) */
+/* for I = 1, 2, ..., P, J = Q, Q - 1, ..., 1 */
+
+ *scale = 1.;
+ scaloc = 1.;
+ i__1 = p;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+ is = iwork[i__];
+ isp1 = is + 1;
+ ie = i__;
+ mb = ie - is + 1;
+ i__2 = p + 2;
+ for (j = q; j >= i__2; --j) {
+
+ js = iwork[j];
+ jsp1 = js + 1;
+ je = iwork[j + 1] - 1;
+ nb = je - js + 1;
+ zdim = mb * nb << 1;
+ if (mb == 1 && nb == 1) {
+
+/* Build a 2-by-2 system Z' * x = RHS */
+
+ z__[0] = a[is + is * a_dim1];
+ z__[1] = -b[js + js * b_dim1];
+ z__[8] = d__[is + is * d_dim1];
+ z__[9] = -e[js + js * e_dim1];
+
+/* Set up right hand side(s) */
+
+ rhs[0] = c__[is + js * c_dim1];
+ rhs[1] = f[is + js * f_dim1];
+
+/* Solve Z' * x = RHS */
+
+ dgetc2_(&zdim, z__, &c__8, ipiv, jpiv, &ierr);
+ if (ierr > 0) {
+ *info = ierr;
+ }
+
+ dgesc2_(&zdim, z__, &c__8, rhs, ipiv, jpiv, &scaloc);
+ if (scaloc != 1.) {
+ i__3 = *n;
+ for (k = 1; k <= i__3; ++k) {
+ dscal_(m, &scaloc, &c__[k * c_dim1 + 1], &c__1);
+ dscal_(m, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L130: */
+ }
+ *scale *= scaloc;
+ }
+
+/* Unpack solution vector(s) */
+
+ c__[is + js * c_dim1] = rhs[0];
+ f[is + js * f_dim1] = rhs[1];
+
+/* Substitute R(I, J) and L(I, J) into remaining */
+/* equation. */
+
+ if (j > p + 2) {
+ alpha = rhs[0];
+ i__3 = js - 1;
+ daxpy_(&i__3, &alpha, &b[js * b_dim1 + 1], &c__1, &f[
+ is + f_dim1], ldf);
+ alpha = rhs[1];
+ i__3 = js - 1;
+ daxpy_(&i__3, &alpha, &e[js * e_dim1 + 1], &c__1, &f[
+ is + f_dim1], ldf);
+ }
+ if (i__ < p) {
+ alpha = -rhs[0];
+ i__3 = *m - ie;
+ daxpy_(&i__3, &alpha, &a[is + (ie + 1) * a_dim1], lda,
+ &c__[ie + 1 + js * c_dim1], &c__1);
+ alpha = -rhs[1];
+ i__3 = *m - ie;
+ daxpy_(&i__3, &alpha, &d__[is + (ie + 1) * d_dim1],
+ ldd, &c__[ie + 1 + js * c_dim1], &c__1);
+ }
+
+ } else if (mb == 1 && nb == 2) {
+
+/* Build a 4-by-4 system Z' * x = RHS */
+
+ z__[0] = a[is + is * a_dim1];
+ z__[1] = 0.;
+ z__[2] = -b[js + js * b_dim1];
+ z__[3] = -b[jsp1 + js * b_dim1];
+
+ z__[8] = 0.;
+ z__[9] = a[is + is * a_dim1];
+ z__[10] = -b[js + jsp1 * b_dim1];
+ z__[11] = -b[jsp1 + jsp1 * b_dim1];
+
+ z__[16] = d__[is + is * d_dim1];
+ z__[17] = 0.;
+ z__[18] = -e[js + js * e_dim1];
+ z__[19] = 0.;
+
+ z__[24] = 0.;
+ z__[25] = d__[is + is * d_dim1];
+ z__[26] = -e[js + jsp1 * e_dim1];
+ z__[27] = -e[jsp1 + jsp1 * e_dim1];
+
+/* Set up right hand side(s) */
+
+ rhs[0] = c__[is + js * c_dim1];
+ rhs[1] = c__[is + jsp1 * c_dim1];
+ rhs[2] = f[is + js * f_dim1];
+ rhs[3] = f[is + jsp1 * f_dim1];
+
+/* Solve Z' * x = RHS */
+
+ dgetc2_(&zdim, z__, &c__8, ipiv, jpiv, &ierr);
+ if (ierr > 0) {
+ *info = ierr;
+ }
+ dgesc2_(&zdim, z__, &c__8, rhs, ipiv, jpiv, &scaloc);
+ if (scaloc != 1.) {
+ i__3 = *n;
+ for (k = 1; k <= i__3; ++k) {
+ dscal_(m, &scaloc, &c__[k * c_dim1 + 1], &c__1);
+ dscal_(m, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L140: */
+ }
+ *scale *= scaloc;
+ }
+
+/* Unpack solution vector(s) */
+
+ c__[is + js * c_dim1] = rhs[0];
+ c__[is + jsp1 * c_dim1] = rhs[1];
+ f[is + js * f_dim1] = rhs[2];
+ f[is + jsp1 * f_dim1] = rhs[3];
+
+/* Substitute R(I, J) and L(I, J) into remaining */
+/* equation. */
+
+ if (j > p + 2) {
+ i__3 = js - 1;
+ daxpy_(&i__3, rhs, &b[js * b_dim1 + 1], &c__1, &f[is
+ + f_dim1], ldf);
+ i__3 = js - 1;
+ daxpy_(&i__3, &rhs[1], &b[jsp1 * b_dim1 + 1], &c__1, &
+ f[is + f_dim1], ldf);
+ i__3 = js - 1;
+ daxpy_(&i__3, &rhs[2], &e[js * e_dim1 + 1], &c__1, &f[
+ is + f_dim1], ldf);
+ i__3 = js - 1;
+ daxpy_(&i__3, &rhs[3], &e[jsp1 * e_dim1 + 1], &c__1, &
+ f[is + f_dim1], ldf);
+ }
+ if (i__ < p) {
+ i__3 = *m - ie;
+ dger_(&i__3, &nb, &c_b27, &a[is + (ie + 1) * a_dim1],
+ lda, rhs, &c__1, &c__[ie + 1 + js * c_dim1],
+ ldc);
+ i__3 = *m - ie;
+ dger_(&i__3, &nb, &c_b27, &d__[is + (ie + 1) * d_dim1]
+, ldd, &rhs[2], &c__1, &c__[ie + 1 + js *
+ c_dim1], ldc);
+ }
+
+ } else if (mb == 2 && nb == 1) {
+
+/* Build a 4-by-4 system Z' * x = RHS */
+
+ z__[0] = a[is + is * a_dim1];
+ z__[1] = a[is + isp1 * a_dim1];
+ z__[2] = -b[js + js * b_dim1];
+ z__[3] = 0.;
+
+ z__[8] = a[isp1 + is * a_dim1];
+ z__[9] = a[isp1 + isp1 * a_dim1];
+ z__[10] = 0.;
+ z__[11] = -b[js + js * b_dim1];
+
+ z__[16] = d__[is + is * d_dim1];
+ z__[17] = d__[is + isp1 * d_dim1];
+ z__[18] = -e[js + js * e_dim1];
+ z__[19] = 0.;
+
+ z__[24] = 0.;
+ z__[25] = d__[isp1 + isp1 * d_dim1];
+ z__[26] = 0.;
+ z__[27] = -e[js + js * e_dim1];
+
+/* Set up right hand side(s) */
+
+ rhs[0] = c__[is + js * c_dim1];
+ rhs[1] = c__[isp1 + js * c_dim1];
+ rhs[2] = f[is + js * f_dim1];
+ rhs[3] = f[isp1 + js * f_dim1];
+
+/* Solve Z' * x = RHS */
+
+ dgetc2_(&zdim, z__, &c__8, ipiv, jpiv, &ierr);
+ if (ierr > 0) {
+ *info = ierr;
+ }
+
+ dgesc2_(&zdim, z__, &c__8, rhs, ipiv, jpiv, &scaloc);
+ if (scaloc != 1.) {
+ i__3 = *n;
+ for (k = 1; k <= i__3; ++k) {
+ dscal_(m, &scaloc, &c__[k * c_dim1 + 1], &c__1);
+ dscal_(m, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L150: */
+ }
+ *scale *= scaloc;
+ }
+
+/* Unpack solution vector(s) */
+
+ c__[is + js * c_dim1] = rhs[0];
+ c__[isp1 + js * c_dim1] = rhs[1];
+ f[is + js * f_dim1] = rhs[2];
+ f[isp1 + js * f_dim1] = rhs[3];
+
+/* Substitute R(I, J) and L(I, J) into remaining */
+/* equation. */
+
+ if (j > p + 2) {
+ i__3 = js - 1;
+ dger_(&mb, &i__3, &c_b42, rhs, &c__1, &b[js * b_dim1
+ + 1], &c__1, &f[is + f_dim1], ldf);
+ i__3 = js - 1;
+ dger_(&mb, &i__3, &c_b42, &rhs[2], &c__1, &e[js *
+ e_dim1 + 1], &c__1, &f[is + f_dim1], ldf);
+ }
+ if (i__ < p) {
+ i__3 = *m - ie;
+ dgemv_("T", &mb, &i__3, &c_b27, &a[is + (ie + 1) *
+ a_dim1], lda, rhs, &c__1, &c_b42, &c__[ie + 1
+ + js * c_dim1], &c__1);
+ i__3 = *m - ie;
+ dgemv_("T", &mb, &i__3, &c_b27, &d__[is + (ie + 1) *
+ d_dim1], ldd, &rhs[2], &c__1, &c_b42, &c__[ie
+ + 1 + js * c_dim1], &c__1);
+ }
+
+ } else if (mb == 2 && nb == 2) {
+
+/* Build an 8-by-8 system Z' * x = RHS */
+
+ dlaset_("F", &c__8, &c__8, &c_b56, &c_b56, z__, &c__8);
+
+ z__[0] = a[is + is * a_dim1];
+ z__[1] = a[is + isp1 * a_dim1];
+ z__[4] = -b[js + js * b_dim1];
+ z__[6] = -b[jsp1 + js * b_dim1];
+
+ z__[8] = a[isp1 + is * a_dim1];
+ z__[9] = a[isp1 + isp1 * a_dim1];
+ z__[13] = -b[js + js * b_dim1];
+ z__[15] = -b[jsp1 + js * b_dim1];
+
+ z__[18] = a[is + is * a_dim1];
+ z__[19] = a[is + isp1 * a_dim1];
+ z__[20] = -b[js + jsp1 * b_dim1];
+ z__[22] = -b[jsp1 + jsp1 * b_dim1];
+
+ z__[26] = a[isp1 + is * a_dim1];
+ z__[27] = a[isp1 + isp1 * a_dim1];
+ z__[29] = -b[js + jsp1 * b_dim1];
+ z__[31] = -b[jsp1 + jsp1 * b_dim1];
+
+ z__[32] = d__[is + is * d_dim1];
+ z__[33] = d__[is + isp1 * d_dim1];
+ z__[36] = -e[js + js * e_dim1];
+
+ z__[41] = d__[isp1 + isp1 * d_dim1];
+ z__[45] = -e[js + js * e_dim1];
+
+ z__[50] = d__[is + is * d_dim1];
+ z__[51] = d__[is + isp1 * d_dim1];
+ z__[52] = -e[js + jsp1 * e_dim1];
+ z__[54] = -e[jsp1 + jsp1 * e_dim1];
+
+ z__[59] = d__[isp1 + isp1 * d_dim1];
+ z__[61] = -e[js + jsp1 * e_dim1];
+ z__[63] = -e[jsp1 + jsp1 * e_dim1];
+
+/* Set up right hand side(s) */
+
+ k = 1;
+ ii = mb * nb + 1;
+ i__3 = nb - 1;
+ for (jj = 0; jj <= i__3; ++jj) {
+ dcopy_(&mb, &c__[is + (js + jj) * c_dim1], &c__1, &
+ rhs[k - 1], &c__1);
+ dcopy_(&mb, &f[is + (js + jj) * f_dim1], &c__1, &rhs[
+ ii - 1], &c__1);
+ k += mb;
+ ii += mb;
+/* L160: */
+ }
+
+
+/* Solve Z' * x = RHS */
+
+ dgetc2_(&zdim, z__, &c__8, ipiv, jpiv, &ierr);
+ if (ierr > 0) {
+ *info = ierr;
+ }
+
+ dgesc2_(&zdim, z__, &c__8, rhs, ipiv, jpiv, &scaloc);
+ if (scaloc != 1.) {
+ i__3 = *n;
+ for (k = 1; k <= i__3; ++k) {
+ dscal_(m, &scaloc, &c__[k * c_dim1 + 1], &c__1);
+ dscal_(m, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L170: */
+ }
+ *scale *= scaloc;
+ }
+
+/* Unpack solution vector(s) */
+
+ k = 1;
+ ii = mb * nb + 1;
+ i__3 = nb - 1;
+ for (jj = 0; jj <= i__3; ++jj) {
+ dcopy_(&mb, &rhs[k - 1], &c__1, &c__[is + (js + jj) *
+ c_dim1], &c__1);
+ dcopy_(&mb, &rhs[ii - 1], &c__1, &f[is + (js + jj) *
+ f_dim1], &c__1);
+ k += mb;
+ ii += mb;
+/* L180: */
+ }
+
+/* Substitute R(I, J) and L(I, J) into remaining */
+/* equation. */
+
+ if (j > p + 2) {
+ i__3 = js - 1;
+ dgemm_("N", "T", &mb, &i__3, &nb, &c_b42, &c__[is +
+ js * c_dim1], ldc, &b[js * b_dim1 + 1], ldb, &
+ c_b42, &f[is + f_dim1], ldf);
+ i__3 = js - 1;
+ dgemm_("N", "T", &mb, &i__3, &nb, &c_b42, &f[is + js *
+ f_dim1], ldf, &e[js * e_dim1 + 1], lde, &
+ c_b42, &f[is + f_dim1], ldf);
+ }
+ if (i__ < p) {
+ i__3 = *m - ie;
+ dgemm_("T", "N", &i__3, &nb, &mb, &c_b27, &a[is + (ie
+ + 1) * a_dim1], lda, &c__[is + js * c_dim1],
+ ldc, &c_b42, &c__[ie + 1 + js * c_dim1], ldc);
+ i__3 = *m - ie;
+ dgemm_("T", "N", &i__3, &nb, &mb, &c_b27, &d__[is + (
+ ie + 1) * d_dim1], ldd, &f[is + js * f_dim1],
+ ldf, &c_b42, &c__[ie + 1 + js * c_dim1], ldc);
+ }
+
+ }
+
+/* L190: */
+ }
+/* L200: */
+ }
+
+ }
+ return 0;
+
+/* End of DTGSY2 */
+
+} /* dtgsy2_ */
diff --git a/contrib/libs/clapack/dtgsyl.c b/contrib/libs/clapack/dtgsyl.c
new file mode 100644
index 0000000000..5c44e8a407
--- /dev/null
+++ b/contrib/libs/clapack/dtgsyl.c
@@ -0,0 +1,692 @@
+/* dtgsyl.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__2 = 2;
+static integer c_n1 = -1;
+static integer c__5 = 5;
+static doublereal c_b14 = 0.;
+static integer c__1 = 1;
+static doublereal c_b51 = -1.;
+static doublereal c_b52 = 1.;
+
+/* Subroutine */ int dtgsyl_(char *trans, integer *ijob, integer *m, integer *
+ n, doublereal *a, integer *lda, doublereal *b, integer *ldb,
+ doublereal *c__, integer *ldc, doublereal *d__, integer *ldd,
+ doublereal *e, integer *lde, doublereal *f, integer *ldf, doublereal *
+ scale, doublereal *dif, doublereal *work, integer *lwork, integer *
+ iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, d_dim1,
+ d_offset, e_dim1, e_offset, f_dim1, f_offset, i__1, i__2, i__3,
+ i__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, p, q, ie, je, mb, nb, is, js, pq;
+ doublereal dsum;
+ integer ppqq;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dgemm_(char *, char *, integer *, integer *, integer *
+, doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ integer ifunc, linfo, lwmin;
+ doublereal scale2;
+ extern /* Subroutine */ int dtgsy2_(char *, integer *, integer *, integer
+ *, doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, integer *, integer *);
+ doublereal dscale, scaloc;
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ dlaset_(char *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ integer iround;
+ logical notran;
+ integer isolve;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTGSYL solves the generalized Sylvester equation: */
+
+/* A * R - L * B = scale * C (1) */
+/* D * R - L * E = scale * F */
+
+/* where R and L are unknown m-by-n matrices, (A, D), (B, E) and */
+/* (C, F) are given matrix pairs of size m-by-m, n-by-n and m-by-n, */
+/* respectively, with real entries. (A, D) and (B, E) must be in */
+/* generalized (real) Schur canonical form, i.e. A, B are upper quasi */
+/* triangular and D, E are upper triangular. */
+
+/* The solution (R, L) overwrites (C, F). 0 <= SCALE <= 1 is an output */
+/* scaling factor chosen to avoid overflow. */
+
+/* In matrix notation (1) is equivalent to solve Zx = scale b, where */
+/* Z is defined as */
+
+/* Z = [ kron(In, A) -kron(B', Im) ] (2) */
+/* [ kron(In, D) -kron(E', Im) ]. */
+
+/* Here Ik is the identity matrix of size k and X' is the transpose of */
+/* X. kron(X, Y) is the Kronecker product between the matrices X and Y. */
+
+/* If TRANS = 'T', DTGSYL solves the transposed system Z'*y = scale*b, */
+/* which is equivalent to solve for R and L in */
+
+/* A' * R + D' * L = scale * C (3) */
+/* R * B' + L * E' = scale * (-F) */
+
+/* This case (TRANS = 'T') is used to compute an one-norm-based estimate */
+/* of Dif[(A,D), (B,E)], the separation between the matrix pairs (A,D) */
+/* and (B,E), using DLACON. */
+
+/* If IJOB >= 1, DTGSYL computes a Frobenius norm-based estimate */
+/* of Dif[(A,D),(B,E)]. That is, the reciprocal of a lower bound on the */
+/* reciprocal of the smallest singular value of Z. See [1-2] for more */
+/* information. */
+
+/* This is a level 3 BLAS algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N', solve the generalized Sylvester equation (1). */
+/* = 'T', solve the 'transposed' system (3). */
+
+/* IJOB (input) INTEGER */
+/* Specifies what kind of functionality to be performed. */
+/* =0: solve (1) only. */
+/* =1: The functionality of 0 and 3. */
+/* =2: The functionality of 0 and 4. */
+/* =3: Only an estimate of Dif[(A,D), (B,E)] is computed. */
+/* (look ahead strategy IJOB = 1 is used). */
+/* =4: Only an estimate of Dif[(A,D), (B,E)] is computed. */
+/* ( DGECON on sub-systems is used ). */
+/* Not referenced if TRANS = 'T'. */
+
+/* M (input) INTEGER */
+/* The order of the matrices A and D, and the row dimension of */
+/* the matrices C, F, R and L. */
+
+/* N (input) INTEGER */
+/* The order of the matrices B and E, and the column dimension */
+/* of the matrices C, F, R and L. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA, M) */
+/* The upper quasi triangular matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1, M). */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB, N) */
+/* The upper quasi triangular matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1, N). */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (LDC, N) */
+/* On entry, C contains the right-hand-side of the first matrix */
+/* equation in (1) or (3). */
+/* On exit, if IJOB = 0, 1 or 2, C has been overwritten by */
+/* the solution R. If IJOB = 3 or 4 and TRANS = 'N', C holds R, */
+/* the solution achieved during the computation of the */
+/* Dif-estimate. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1, M). */
+
+/* D (input) DOUBLE PRECISION array, dimension (LDD, M) */
+/* The upper triangular matrix D. */
+
+/* LDD (input) INTEGER */
+/* The leading dimension of the array D. LDD >= max(1, M). */
+
+/* E (input) DOUBLE PRECISION array, dimension (LDE, N) */
+/* The upper triangular matrix E. */
+
+/* LDE (input) INTEGER */
+/* The leading dimension of the array E. LDE >= max(1, N). */
+
+/* F (input/output) DOUBLE PRECISION array, dimension (LDF, N) */
+/* On entry, F contains the right-hand-side of the second matrix */
+/* equation in (1) or (3). */
+/* On exit, if IJOB = 0, 1 or 2, F has been overwritten by */
+/* the solution L. If IJOB = 3 or 4 and TRANS = 'N', F holds L, */
+/* the solution achieved during the computation of the */
+/* Dif-estimate. */
+
+/* LDF (input) INTEGER */
+/* The leading dimension of the array F. LDF >= max(1, M). */
+
+/* DIF (output) DOUBLE PRECISION */
+/* On exit DIF is the reciprocal of a lower bound of the */
+/* reciprocal of the Dif-function, i.e. DIF is an upper bound of */
+/* Dif[(A,D), (B,E)] = sigma_min(Z), where Z as in (2). */
+/* IF IJOB = 0 or TRANS = 'T', DIF is not touched. */
+
+/* SCALE (output) DOUBLE PRECISION */
+/* On exit SCALE is the scaling factor in (1) or (3). */
+/* If 0 < SCALE < 1, C and F hold the solutions R and L, resp., */
+/* to a slightly perturbed system but the input matrices A, B, D */
+/* and E have not been changed. If SCALE = 0, C and F hold the */
+/* solutions R and L, respectively, to the homogeneous system */
+/* with C = F = 0. Normally, SCALE = 1. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK > = 1. */
+/* If IJOB = 1 or 2 and TRANS = 'N', LWORK >= max(1,2*M*N). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace) INTEGER array, dimension (M+N+6) */
+
+/* INFO (output) INTEGER */
+/* =0: successful exit */
+/* <0: If INFO = -i, the i-th argument had an illegal value. */
+/* >0: (A, D) and (B, E) have common or close eigenvalues. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* [1] B. Kagstrom and P. Poromaa, LAPACK-Style Algorithms and Software */
+/* for Solving the Generalized Sylvester Equation and Estimating the */
+/* Separation between Regular Matrix Pairs, Report UMINF - 93.23, */
+/* Department of Computing Science, Umea University, S-901 87 Umea, */
+/* Sweden, December 1993, Revised April 1994, Also as LAPACK Working */
+/* Note 75. To appear in ACM Trans. on Math. Software, Vol 22, */
+/* No 1, 1996. */
+
+/* [2] B. Kagstrom, A Perturbation Analysis of the Generalized Sylvester */
+/* Equation (AR - LB, DR - LE ) = (C, F), SIAM J. Matrix Anal. */
+/* Appl., 15(4):1045-1060, 1994 */
+
+/* [3] B. Kagstrom and L. Westin, Generalized Schur Methods with */
+/* Condition Estimators for Solving the Generalized Sylvester */
+/* Equation, IEEE Transactions on Automatic Control, Vol. 34, No. 7, */
+/* July 1989, pp 745-751. */
+
+/* ===================================================================== */
+/* Replaced various illegal calls to DCOPY by calls to DLASET. */
+/* Sven Hammarling, 1/5/02. */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ d_dim1 = *ldd;
+ d_offset = 1 + d_dim1;
+ d__ -= d_offset;
+ e_dim1 = *lde;
+ e_offset = 1 + e_dim1;
+ e -= e_offset;
+ f_dim1 = *ldf;
+ f_offset = 1 + f_dim1;
+ f -= f_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+ if (! notran && ! lsame_(trans, "T")) {
+ *info = -1;
+ } else if (notran) {
+ if (*ijob < 0 || *ijob > 4) {
+ *info = -2;
+ }
+ }
+ if (*info == 0) {
+ if (*m <= 0) {
+ *info = -3;
+ } else if (*n <= 0) {
+ *info = -4;
+ } else if (*lda < max(1,*m)) {
+ *info = -6;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*ldd < max(1,*m)) {
+ *info = -12;
+ } else if (*lde < max(1,*n)) {
+ *info = -14;
+ } else if (*ldf < max(1,*m)) {
+ *info = -16;
+ }
+ }
+
+ if (*info == 0) {
+ if (notran) {
+ if (*ijob == 1 || *ijob == 2) {
+/* Computing MAX */
+ i__1 = 1, i__2 = (*m << 1) * *n;
+ lwmin = max(i__1,i__2);
+ } else {
+ lwmin = 1;
+ }
+ } else {
+ lwmin = 1;
+ }
+ work[1] = (doublereal) lwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -20;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTGSYL", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ *scale = 1.;
+ if (notran) {
+ if (*ijob != 0) {
+ *dif = 0.;
+ }
+ }
+ return 0;
+ }
+
+/* Determine optimal block sizes MB and NB */
+
+ mb = ilaenv_(&c__2, "DTGSYL", trans, m, n, &c_n1, &c_n1);
+ nb = ilaenv_(&c__5, "DTGSYL", trans, m, n, &c_n1, &c_n1);
+
+ isolve = 1;
+ ifunc = 0;
+ if (notran) {
+ if (*ijob >= 3) {
+ ifunc = *ijob - 2;
+ dlaset_("F", m, n, &c_b14, &c_b14, &c__[c_offset], ldc)
+ ;
+ dlaset_("F", m, n, &c_b14, &c_b14, &f[f_offset], ldf);
+ } else if (*ijob >= 1) {
+ isolve = 2;
+ }
+ }
+
+ if (mb <= 1 && nb <= 1 || mb >= *m && nb >= *n) {
+
+ i__1 = isolve;
+ for (iround = 1; iround <= i__1; ++iround) {
+
+/* Use unblocked Level 2 solver */
+
+ dscale = 0.;
+ dsum = 1.;
+ pq = 0;
+ dtgsy2_(trans, &ifunc, m, n, &a[a_offset], lda, &b[b_offset], ldb,
+ &c__[c_offset], ldc, &d__[d_offset], ldd, &e[e_offset],
+ lde, &f[f_offset], ldf, scale, &dsum, &dscale, &iwork[1],
+ &pq, info);
+ if (dscale != 0.) {
+ if (*ijob == 1 || *ijob == 3) {
+ *dif = sqrt((doublereal) ((*m << 1) * *n)) / (dscale *
+ sqrt(dsum));
+ } else {
+ *dif = sqrt((doublereal) pq) / (dscale * sqrt(dsum));
+ }
+ }
+
+ if (isolve == 2 && iround == 1) {
+ if (notran) {
+ ifunc = *ijob;
+ }
+ scale2 = *scale;
+ dlacpy_("F", m, n, &c__[c_offset], ldc, &work[1], m);
+ dlacpy_("F", m, n, &f[f_offset], ldf, &work[*m * *n + 1], m);
+ dlaset_("F", m, n, &c_b14, &c_b14, &c__[c_offset], ldc);
+ dlaset_("F", m, n, &c_b14, &c_b14, &f[f_offset], ldf);
+ } else if (isolve == 2 && iround == 2) {
+ dlacpy_("F", m, n, &work[1], m, &c__[c_offset], ldc);
+ dlacpy_("F", m, n, &work[*m * *n + 1], m, &f[f_offset], ldf);
+ *scale = scale2;
+ }
+/* L30: */
+ }
+
+ return 0;
+ }
+
+/* Determine block structure of A */
+
+ p = 0;
+ i__ = 1;
+L40:
+ if (i__ > *m) {
+ goto L50;
+ }
+ ++p;
+ iwork[p] = i__;
+ i__ += mb;
+ if (i__ >= *m) {
+ goto L50;
+ }
+ if (a[i__ + (i__ - 1) * a_dim1] != 0.) {
+ ++i__;
+ }
+ goto L40;
+L50:
+
+ iwork[p + 1] = *m + 1;
+ if (iwork[p] == iwork[p + 1]) {
+ --p;
+ }
+
+/* Determine block structure of B */
+
+ q = p + 1;
+ j = 1;
+L60:
+ if (j > *n) {
+ goto L70;
+ }
+ ++q;
+ iwork[q] = j;
+ j += nb;
+ if (j >= *n) {
+ goto L70;
+ }
+ if (b[j + (j - 1) * b_dim1] != 0.) {
+ ++j;
+ }
+ goto L60;
+L70:
+
+ iwork[q + 1] = *n + 1;
+ if (iwork[q] == iwork[q + 1]) {
+ --q;
+ }
+
+ if (notran) {
+
+ i__1 = isolve;
+ for (iround = 1; iround <= i__1; ++iround) {
+
+/* Solve (I, J)-subsystem */
+/* A(I, I) * R(I, J) - L(I, J) * B(J, J) = C(I, J) */
+/* D(I, I) * R(I, J) - L(I, J) * E(J, J) = F(I, J) */
+/* for I = P, P - 1,..., 1; J = 1, 2,..., Q */
+
+ dscale = 0.;
+ dsum = 1.;
+ pq = 0;
+ *scale = 1.;
+ i__2 = q;
+ for (j = p + 2; j <= i__2; ++j) {
+ js = iwork[j];
+ je = iwork[j + 1] - 1;
+ nb = je - js + 1;
+ for (i__ = p; i__ >= 1; --i__) {
+ is = iwork[i__];
+ ie = iwork[i__ + 1] - 1;
+ mb = ie - is + 1;
+ ppqq = 0;
+ dtgsy2_(trans, &ifunc, &mb, &nb, &a[is + is * a_dim1],
+ lda, &b[js + js * b_dim1], ldb, &c__[is + js *
+ c_dim1], ldc, &d__[is + is * d_dim1], ldd, &e[js
+ + js * e_dim1], lde, &f[is + js * f_dim1], ldf, &
+ scaloc, &dsum, &dscale, &iwork[q + 2], &ppqq, &
+ linfo);
+ if (linfo > 0) {
+ *info = linfo;
+ }
+
+ pq += ppqq;
+ if (scaloc != 1.) {
+ i__3 = js - 1;
+ for (k = 1; k <= i__3; ++k) {
+ dscal_(m, &scaloc, &c__[k * c_dim1 + 1], &c__1);
+ dscal_(m, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L80: */
+ }
+ i__3 = je;
+ for (k = js; k <= i__3; ++k) {
+ i__4 = is - 1;
+ dscal_(&i__4, &scaloc, &c__[k * c_dim1 + 1], &
+ c__1);
+ i__4 = is - 1;
+ dscal_(&i__4, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L90: */
+ }
+ i__3 = je;
+ for (k = js; k <= i__3; ++k) {
+ i__4 = *m - ie;
+ dscal_(&i__4, &scaloc, &c__[ie + 1 + k * c_dim1],
+ &c__1);
+ i__4 = *m - ie;
+ dscal_(&i__4, &scaloc, &f[ie + 1 + k * f_dim1], &
+ c__1);
+/* L100: */
+ }
+ i__3 = *n;
+ for (k = je + 1; k <= i__3; ++k) {
+ dscal_(m, &scaloc, &c__[k * c_dim1 + 1], &c__1);
+ dscal_(m, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L110: */
+ }
+ *scale *= scaloc;
+ }
+
+/* Substitute R(I, J) and L(I, J) into remaining */
+/* equation. */
+
+ if (i__ > 1) {
+ i__3 = is - 1;
+ dgemm_("N", "N", &i__3, &nb, &mb, &c_b51, &a[is *
+ a_dim1 + 1], lda, &c__[is + js * c_dim1], ldc,
+ &c_b52, &c__[js * c_dim1 + 1], ldc);
+ i__3 = is - 1;
+ dgemm_("N", "N", &i__3, &nb, &mb, &c_b51, &d__[is *
+ d_dim1 + 1], ldd, &c__[is + js * c_dim1], ldc,
+ &c_b52, &f[js * f_dim1 + 1], ldf);
+ }
+ if (j < q) {
+ i__3 = *n - je;
+ dgemm_("N", "N", &mb, &i__3, &nb, &c_b52, &f[is + js *
+ f_dim1], ldf, &b[js + (je + 1) * b_dim1],
+ ldb, &c_b52, &c__[is + (je + 1) * c_dim1],
+ ldc);
+ i__3 = *n - je;
+ dgemm_("N", "N", &mb, &i__3, &nb, &c_b52, &f[is + js *
+ f_dim1], ldf, &e[js + (je + 1) * e_dim1],
+ lde, &c_b52, &f[is + (je + 1) * f_dim1], ldf);
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ if (dscale != 0.) {
+ if (*ijob == 1 || *ijob == 3) {
+ *dif = sqrt((doublereal) ((*m << 1) * *n)) / (dscale *
+ sqrt(dsum));
+ } else {
+ *dif = sqrt((doublereal) pq) / (dscale * sqrt(dsum));
+ }
+ }
+ if (isolve == 2 && iround == 1) {
+ if (notran) {
+ ifunc = *ijob;
+ }
+ scale2 = *scale;
+ dlacpy_("F", m, n, &c__[c_offset], ldc, &work[1], m);
+ dlacpy_("F", m, n, &f[f_offset], ldf, &work[*m * *n + 1], m);
+ dlaset_("F", m, n, &c_b14, &c_b14, &c__[c_offset], ldc);
+ dlaset_("F", m, n, &c_b14, &c_b14, &f[f_offset], ldf);
+ } else if (isolve == 2 && iround == 2) {
+ dlacpy_("F", m, n, &work[1], m, &c__[c_offset], ldc);
+ dlacpy_("F", m, n, &work[*m * *n + 1], m, &f[f_offset], ldf);
+ *scale = scale2;
+ }
+/* L150: */
+ }
+
+ } else {
+
+/* Solve transposed (I, J)-subsystem */
+/* A(I, I)' * R(I, J) + D(I, I)' * L(I, J) = C(I, J) */
+/* R(I, J) * B(J, J)' + L(I, J) * E(J, J)' = -F(I, J) */
+/* for I = 1,2,..., P; J = Q, Q-1,..., 1 */
+
+ *scale = 1.;
+ i__1 = p;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ is = iwork[i__];
+ ie = iwork[i__ + 1] - 1;
+ mb = ie - is + 1;
+ i__2 = p + 2;
+ for (j = q; j >= i__2; --j) {
+ js = iwork[j];
+ je = iwork[j + 1] - 1;
+ nb = je - js + 1;
+ dtgsy2_(trans, &ifunc, &mb, &nb, &a[is + is * a_dim1], lda, &
+ b[js + js * b_dim1], ldb, &c__[is + js * c_dim1], ldc,
+ &d__[is + is * d_dim1], ldd, &e[js + js * e_dim1],
+ lde, &f[is + js * f_dim1], ldf, &scaloc, &dsum, &
+ dscale, &iwork[q + 2], &ppqq, &linfo);
+ if (linfo > 0) {
+ *info = linfo;
+ }
+ if (scaloc != 1.) {
+ i__3 = js - 1;
+ for (k = 1; k <= i__3; ++k) {
+ dscal_(m, &scaloc, &c__[k * c_dim1 + 1], &c__1);
+ dscal_(m, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L160: */
+ }
+ i__3 = je;
+ for (k = js; k <= i__3; ++k) {
+ i__4 = is - 1;
+ dscal_(&i__4, &scaloc, &c__[k * c_dim1 + 1], &c__1);
+ i__4 = is - 1;
+ dscal_(&i__4, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L170: */
+ }
+ i__3 = je;
+ for (k = js; k <= i__3; ++k) {
+ i__4 = *m - ie;
+ dscal_(&i__4, &scaloc, &c__[ie + 1 + k * c_dim1], &
+ c__1);
+ i__4 = *m - ie;
+ dscal_(&i__4, &scaloc, &f[ie + 1 + k * f_dim1], &c__1)
+ ;
+/* L180: */
+ }
+ i__3 = *n;
+ for (k = je + 1; k <= i__3; ++k) {
+ dscal_(m, &scaloc, &c__[k * c_dim1 + 1], &c__1);
+ dscal_(m, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L190: */
+ }
+ *scale *= scaloc;
+ }
+
+/* Substitute R(I, J) and L(I, J) into remaining equation. */
+
+ if (j > p + 2) {
+ i__3 = js - 1;
+ dgemm_("N", "T", &mb, &i__3, &nb, &c_b52, &c__[is + js *
+ c_dim1], ldc, &b[js * b_dim1 + 1], ldb, &c_b52, &
+ f[is + f_dim1], ldf);
+ i__3 = js - 1;
+ dgemm_("N", "T", &mb, &i__3, &nb, &c_b52, &f[is + js *
+ f_dim1], ldf, &e[js * e_dim1 + 1], lde, &c_b52, &
+ f[is + f_dim1], ldf);
+ }
+ if (i__ < p) {
+ i__3 = *m - ie;
+ dgemm_("T", "N", &i__3, &nb, &mb, &c_b51, &a[is + (ie + 1)
+ * a_dim1], lda, &c__[is + js * c_dim1], ldc, &
+ c_b52, &c__[ie + 1 + js * c_dim1], ldc);
+ i__3 = *m - ie;
+ dgemm_("T", "N", &i__3, &nb, &mb, &c_b51, &d__[is + (ie +
+ 1) * d_dim1], ldd, &f[is + js * f_dim1], ldf, &
+ c_b52, &c__[ie + 1 + js * c_dim1], ldc);
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+
+ }
+
+ work[1] = (doublereal) lwmin;
+
+ return 0;
+
+/* End of DTGSYL */
+
+} /* dtgsyl_ */
diff --git a/contrib/libs/clapack/dtpcon.c b/contrib/libs/clapack/dtpcon.c
new file mode 100644
index 0000000000..b789722eb2
--- /dev/null
+++ b/contrib/libs/clapack/dtpcon.c
@@ -0,0 +1,233 @@
+/* dtpcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dtpcon_(char *norm, char *uplo, char *diag, integer *n,
+ doublereal *ap, doublereal *rcond, doublereal *work, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1;
+
+ /* Local variables */
+ integer ix, kase, kase1;
+ doublereal scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int drscl_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal anorm;
+ logical upper;
+ doublereal xnorm;
+ extern /* Subroutine */ int dlacn2_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern doublereal dlantp_(char *, char *, char *, integer *, doublereal *,
+ doublereal *);
+ doublereal ainvnm;
+ extern /* Subroutine */ int dlatps_(char *, char *, char *, char *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ integer *);
+ logical onenrm;
+ char normin[1];
+ doublereal smlnum;
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call DLACN2 in place of DLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTPCON estimates the reciprocal of the condition number of a packed */
+/* triangular matrix A, in either the 1-norm or the infinity-norm. */
+
+/* The norm of A is computed and an estimate is obtained for */
+/* norm(inv(A)), then the reciprocal of the condition number is */
+/* computed as */
+/* RCOND = 1 / ( norm(A) * norm(inv(A)) ). */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies whether the 1-norm condition number or the */
+/* infinity-norm condition number is required: */
+/* = '1' or 'O': 1-norm; */
+/* = 'I': Infinity-norm. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* The upper or lower triangular matrix A, packed columnwise in */
+/* a linear array. The j-th column of A is stored in the array */
+/* AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* If DIAG = 'U', the diagonal elements of A are not referenced */
+/* and are assumed to be 1. */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(norm(A) * norm(inv(A))). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --iwork;
+ --work;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O");
+ nounit = lsame_(diag, "N");
+
+ if (! onenrm && ! lsame_(norm, "I")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTPCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ }
+
+ *rcond = 0.;
+ smlnum = dlamch_("Safe minimum") * (doublereal) max(1,*n);
+
+/* Compute the norm of the triangular matrix A. */
+
+ anorm = dlantp_(norm, uplo, diag, n, &ap[1], &work[1]);
+
+/* Continue only if ANORM > 0. */
+
+ if (anorm > 0.) {
+
+/* Estimate the norm of the inverse of A. */
+
+ ainvnm = 0.;
+ *(unsigned char *)normin = 'N';
+ if (onenrm) {
+ kase1 = 1;
+ } else {
+ kase1 = 2;
+ }
+ kase = 0;
+L10:
+ dlacn2_(n, &work[*n + 1], &work[1], &iwork[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (kase == kase1) {
+
+/* Multiply by inv(A). */
+
+ dlatps_(uplo, "No transpose", diag, normin, n, &ap[1], &work[
+ 1], &scale, &work[(*n << 1) + 1], info);
+ } else {
+
+/* Multiply by inv(A'). */
+
+ dlatps_(uplo, "Transpose", diag, normin, n, &ap[1], &work[1],
+ &scale, &work[(*n << 1) + 1], info);
+ }
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by 1/SCALE if doing so will not cause overflow. */
+
+ if (scale != 1.) {
+ ix = idamax_(n, &work[1], &c__1);
+ xnorm = (d__1 = work[ix], abs(d__1));
+ if (scale < xnorm * smlnum || scale == 0.) {
+ goto L20;
+ }
+ drscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / anorm / ainvnm;
+ }
+ }
+
+L20:
+ return 0;
+
+/* End of DTPCON */
+
+} /* dtpcon_ */
diff --git a/contrib/libs/clapack/dtprfs.c b/contrib/libs/clapack/dtprfs.c
new file mode 100644
index 0000000000..8226412094
--- /dev/null
+++ b/contrib/libs/clapack/dtprfs.c
@@ -0,0 +1,496 @@
+/* dtprfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b19 = -1.;
+
+/* Subroutine */ int dtprfs_(char *uplo, char *trans, char *diag, integer *n,
+ integer *nrhs, doublereal *ap, doublereal *b, integer *ldb,
+ doublereal *x, integer *ldx, doublereal *ferr, doublereal *berr,
+ doublereal *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3;
+ doublereal d__1, d__2, d__3;
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal s;
+ integer kc;
+ doublereal xk;
+ integer nz;
+ doublereal eps;
+ integer kase;
+ doublereal safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), daxpy_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *), dtpmv_(char *,
+ char *, char *, integer *, doublereal *, doublereal *, integer *);
+ logical upper;
+ extern /* Subroutine */ int dtpsv_(char *, char *, char *, integer *,
+ doublereal *, doublereal *, integer *),
+ dlacn2_(integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+ char transt[1];
+ logical nounit;
+ doublereal lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call DLACN2 in place of DLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTPRFS provides error bounds and backward error estimates for the */
+/* solution to a system of linear equations with a triangular packed */
+/* coefficient matrix. */
+
+/* The solution matrix X must be computed by DTPTRS or some other */
+/* means before entering this routine. DTPRFS does not do iterative */
+/* refinement because doing so cannot improve the backward error. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose = Transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AP (input) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* The upper or lower triangular matrix A, packed columnwise in */
+/* a linear array. The j-th column of A is stored in the array */
+/* AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+/* If DIAG = 'U', the diagonal elements of A are not referenced */
+/* and are assumed to be 1. */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input) DOUBLE PRECISION array, dimension (LDX,NRHS) */
+/* The solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*ldx < max(1,*n)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTPRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute residual R = B - op(A) * X, */
+/* where op(A) = A or A', depending on TRANS. */
+
+ dcopy_(n, &x[j * x_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+ dtpmv_(uplo, trans, diag, n, &ap[1], &work[*n + 1], &c__1);
+ daxpy_(n, &c_b19, &b[j * b_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] = (d__1 = b[i__ + j * b_dim1], abs(d__1));
+/* L20: */
+ }
+
+ if (notran) {
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ kc = 1;
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (d__1 = x[k + j * x_dim1], abs(d__1));
+ i__3 = k;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ work[i__] += (d__1 = ap[kc + i__ - 1], abs(d__1))
+ * xk;
+/* L30: */
+ }
+ kc += k;
+/* L40: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (d__1 = x[k + j * x_dim1], abs(d__1));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ work[i__] += (d__1 = ap[kc + i__ - 1], abs(d__1))
+ * xk;
+/* L50: */
+ }
+ work[k] += xk;
+ kc += k;
+/* L60: */
+ }
+ }
+ } else {
+ kc = 1;
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (d__1 = x[k + j * x_dim1], abs(d__1));
+ i__3 = *n;
+ for (i__ = k; i__ <= i__3; ++i__) {
+ work[i__] += (d__1 = ap[kc + i__ - k], abs(d__1))
+ * xk;
+/* L70: */
+ }
+ kc = kc + *n - k + 1;
+/* L80: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (d__1 = x[k + j * x_dim1], abs(d__1));
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ work[i__] += (d__1 = ap[kc + i__ - k], abs(d__1))
+ * xk;
+/* L90: */
+ }
+ work[k] += xk;
+ kc = kc + *n - k + 1;
+/* L100: */
+ }
+ }
+ }
+ } else {
+
+/* Compute abs(A')*abs(X) + abs(B). */
+
+ if (upper) {
+ kc = 1;
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ i__3 = k;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ s += (d__1 = ap[kc + i__ - 1], abs(d__1)) * (d__2
+ = x[i__ + j * x_dim1], abs(d__2));
+/* L110: */
+ }
+ work[k] += s;
+ kc += k;
+/* L120: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = (d__1 = x[k + j * x_dim1], abs(d__1));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ s += (d__1 = ap[kc + i__ - 1], abs(d__1)) * (d__2
+ = x[i__ + j * x_dim1], abs(d__2));
+/* L130: */
+ }
+ work[k] += s;
+ kc += k;
+/* L140: */
+ }
+ }
+ } else {
+ kc = 1;
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ i__3 = *n;
+ for (i__ = k; i__ <= i__3; ++i__) {
+ s += (d__1 = ap[kc + i__ - k], abs(d__1)) * (d__2
+ = x[i__ + j * x_dim1], abs(d__2));
+/* L150: */
+ }
+ work[k] += s;
+ kc = kc + *n - k + 1;
+/* L160: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = (d__1 = x[k + j * x_dim1], abs(d__1));
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ s += (d__1 = ap[kc + i__ - k], abs(d__1)) * (d__2
+ = x[i__ + j * x_dim1], abs(d__2));
+/* L170: */
+ }
+ work[k] += s;
+ kc = kc + *n - k + 1;
+/* L180: */
+ }
+ }
+ }
+ }
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+/* Computing MAX */
+ d__2 = s, d__3 = (d__1 = work[*n + i__], abs(d__1)) / work[
+ i__];
+ s = max(d__2,d__3);
+ } else {
+/* Computing MAX */
+ d__2 = s, d__3 = ((d__1 = work[*n + i__], abs(d__1)) + safe1)
+ / (work[i__] + safe1);
+ s = max(d__2,d__3);
+ }
+/* L190: */
+ }
+ berr[j] = s;
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(op(A)))* */
+/* ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(op(A)) is the inverse of op(A) */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(op(A))*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use DLACN2 to estimate the infinity-norm of the matrix */
+/* inv(op(A)) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+ work[i__] = (d__1 = work[*n + i__], abs(d__1)) + nz * eps *
+ work[i__];
+ } else {
+ work[i__] = (d__1 = work[*n + i__], abs(d__1)) + nz * eps *
+ work[i__] + safe1;
+ }
+/* L200: */
+ }
+
+ kase = 0;
+L210:
+ dlacn2_(n, &work[(*n << 1) + 1], &work[*n + 1], &iwork[1], &ferr[j], &
+ kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(op(A)'). */
+
+ dtpsv_(uplo, transt, diag, n, &ap[1], &work[*n + 1], &c__1);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L220: */
+ }
+ } else {
+
+/* Multiply by inv(op(A))*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L230: */
+ }
+ dtpsv_(uplo, trans, diag, n, &ap[1], &work[*n + 1], &c__1);
+ }
+ goto L210;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = lstres, d__3 = (d__1 = x[i__ + j * x_dim1], abs(d__1));
+ lstres = max(d__2,d__3);
+/* L240: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L250: */
+ }
+
+ return 0;
+
+/* End of DTPRFS */
+
+} /* dtprfs_ */
diff --git a/contrib/libs/clapack/dtptri.c b/contrib/libs/clapack/dtptri.c
new file mode 100644
index 0000000000..a6ce4b4fbb
--- /dev/null
+++ b/contrib/libs/clapack/dtptri.c
@@ -0,0 +1,219 @@
+/* dtptri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dtptri_(char *uplo, char *diag, integer *n, doublereal *
+ ap, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer j, jc, jj;
+ doublereal ajj;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dtpmv_(char *, char *, char *, integer *,
+ doublereal *, doublereal *, integer *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ integer jclast;
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTPTRI computes the inverse of a real upper or lower triangular */
+/* matrix A stored in packed format. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangular matrix A, stored */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*((2*n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. */
+/* On exit, the (triangular) inverse of the original matrix, in */
+/* the same packed storage format. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, A(i,i) is exactly zero. The triangular */
+/* matrix is singular and its inverse can not be computed. */
+
+/* Further Details */
+/* =============== */
+
+/* A triangular matrix A can be transferred to packed storage using one */
+/* of the following program segments: */
+
+/* UPLO = 'U': UPLO = 'L': */
+
+/* JC = 1 JC = 1 */
+/* DO 2 J = 1, N DO 2 J = 1, N */
+/* DO 1 I = 1, J DO 1 I = J, N */
+/* AP(JC+I-1) = A(I,J) AP(JC+I-J) = A(I,J) */
+/* 1 CONTINUE 1 CONTINUE */
+/* JC = JC + J JC = JC + N - J + 1 */
+/* 2 CONTINUE 2 CONTINUE */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ nounit = lsame_(diag, "N");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTPTRI", &i__1);
+ return 0;
+ }
+
+/* Check for singularity if non-unit. */
+
+ if (nounit) {
+ if (upper) {
+ jj = 0;
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ jj += *info;
+ if (ap[jj] == 0.) {
+ return 0;
+ }
+/* L10: */
+ }
+ } else {
+ jj = 1;
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ if (ap[jj] == 0.) {
+ return 0;
+ }
+ jj = jj + *n - *info + 1;
+/* L20: */
+ }
+ }
+ *info = 0;
+ }
+
+ if (upper) {
+
+/* Compute inverse of upper triangular matrix. */
+
+ jc = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (nounit) {
+ ap[jc + j - 1] = 1. / ap[jc + j - 1];
+ ajj = -ap[jc + j - 1];
+ } else {
+ ajj = -1.;
+ }
+
+/* Compute elements 1:j-1 of j-th column. */
+
+ i__2 = j - 1;
+ dtpmv_("Upper", "No transpose", diag, &i__2, &ap[1], &ap[jc], &
+ c__1);
+ i__2 = j - 1;
+ dscal_(&i__2, &ajj, &ap[jc], &c__1);
+ jc += j;
+/* L30: */
+ }
+
+ } else {
+
+/* Compute inverse of lower triangular matrix. */
+
+ jc = *n * (*n + 1) / 2;
+ for (j = *n; j >= 1; --j) {
+ if (nounit) {
+ ap[jc] = 1. / ap[jc];
+ ajj = -ap[jc];
+ } else {
+ ajj = -1.;
+ }
+ if (j < *n) {
+
+/* Compute elements j+1:n of j-th column. */
+
+ i__1 = *n - j;
+ dtpmv_("Lower", "No transpose", diag, &i__1, &ap[jclast], &ap[
+ jc + 1], &c__1);
+ i__1 = *n - j;
+ dscal_(&i__1, &ajj, &ap[jc + 1], &c__1);
+ }
+ jclast = jc;
+ jc = jc - *n + j - 2;
+/* L40: */
+ }
+ }
+
+ return 0;
+
+/* End of DTPTRI */
+
+} /* dtptri_ */
diff --git a/contrib/libs/clapack/dtptrs.c b/contrib/libs/clapack/dtptrs.c
new file mode 100644
index 0000000000..e1802549ee
--- /dev/null
+++ b/contrib/libs/clapack/dtptrs.c
@@ -0,0 +1,193 @@
+/* dtptrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dtptrs_(char *uplo, char *trans, char *diag, integer *n,
+ integer *nrhs, doublereal *ap, doublereal *b, integer *ldb, integer *
+ info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ integer j, jc;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int dtpsv_(char *, char *, char *, integer *,
+ doublereal *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTPTRS solves a triangular system of the form */
+
+/* A * X = B or A**T * X = B, */
+
+/* where A is a triangular matrix of order N stored in packed format, */
+/* and B is an N-by-NRHS matrix. A check is made to verify that A is */
+/* nonsingular. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose = Transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AP (input) DOUBLE PRECISION array, dimension (N*(N+1)/2) */
+/* The upper or lower triangular matrix A, packed columnwise in */
+/* a linear array. The j-th column of A is stored in the array */
+/* AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, if INFO = 0, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element of A is zero, */
+/* indicating that the matrix is singular and the */
+/* solutions X have not been computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ nounit = lsame_(diag, "N");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTPTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check for singularity. */
+
+ if (nounit) {
+ if (upper) {
+ jc = 1;
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ if (ap[jc + *info - 1] == 0.) {
+ return 0;
+ }
+ jc += *info;
+/* L10: */
+ }
+ } else {
+ jc = 1;
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ if (ap[jc] == 0.) {
+ return 0;
+ }
+ jc = jc + *n - *info + 1;
+/* L20: */
+ }
+ }
+ }
+ *info = 0;
+
+/* Solve A * x = b or A' * x = b. */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ dtpsv_(uplo, trans, diag, n, &ap[1], &b[j * b_dim1 + 1], &c__1);
+/* L30: */
+ }
+
+ return 0;
+
+/* End of DTPTRS */
+
+} /* dtptrs_ */
diff --git a/contrib/libs/clapack/dtpttf.c b/contrib/libs/clapack/dtpttf.c
new file mode 100644
index 0000000000..86a7ba0574
--- /dev/null
+++ b/contrib/libs/clapack/dtpttf.c
@@ -0,0 +1,499 @@
+/* dtpttf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dtpttf_(char *transr, char *uplo, integer *n, doublereal
+ *ap, doublereal *arf, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, k, n1, n2, ij, jp, js, nt, lda, ijp;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nisodd;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTPTTF copies a triangular matrix A from standard packed format (TP) */
+/* to rectangular full packed format (TF). */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': ARF in Normal format is wanted; */
+/* = 'T': ARF in Conjugate-transpose format is wanted. */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) DOUBLE PRECISION array, dimension ( N*(N+1)/2 ), */
+/* On entry, the upper or lower triangular matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* ARF (output) DOUBLE PRECISION array, dimension ( N*(N+1)/2 ), */
+/* On exit, the upper or lower triangular matrix A stored in */
+/* RFP format. For a further discussion see Notes below. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Notes */
+/* ===== */
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* even. We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* the transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* the transpose of the last three columns of AP lower. */
+/* This covers the case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 03 04 05 33 43 53 */
+/* 13 14 15 00 44 54 */
+/* 23 24 25 10 11 55 */
+/* 33 34 35 20 21 22 */
+/* 00 44 45 30 31 32 */
+/* 01 11 55 40 41 42 */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* odd. We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* the transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* the transpose of the last two columns of AP lower. */
+/* This covers the case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 02 03 04 00 33 43 */
+/* 12 13 14 10 11 44 */
+/* 22 23 24 20 21 22 */
+/* 00 33 34 30 31 32 */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+/* RFP A RFP A */
+
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "T")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTPTTF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (normaltransr) {
+ arf[0] = ap[0];
+ } else {
+ arf[0] = ap[0];
+ }
+ return 0;
+ }
+
+/* Size of array ARF(0:NT-1) */
+
+ nt = *n * (*n + 1) / 2;
+
+/* Set N1 and N2 depending on LOWER */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+/* If N is odd, set NISODD = .TRUE. */
+/* If N is even, set K = N/2 and NISODD = .FALSE. */
+
+/* set lda of ARF^C; ARF^C is (0:(N+1)/2-1,0:N-noe) */
+/* where noe = 0 if n is even, noe = 1 if n is odd */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ lda = *n + 1;
+ } else {
+ nisodd = TRUE_;
+ lda = *n;
+ }
+
+/* ARF^C has lda rows and n+1-noe cols */
+
+ if (! normaltransr) {
+ lda = (*n + 1) / 2;
+ }
+
+/* start execution: there are eight cases */
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* N is odd, TRANSR = 'N', and UPLO = 'L' */
+
+ ijp = 0;
+ jp = 0;
+ i__1 = n2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ij = i__ + jp;
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ jp += lda;
+ }
+ i__1 = n2 - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = n2;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ ij = i__ + j * lda;
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ }
+
+ } else {
+
+/* N is odd, TRANSR = 'N', and UPLO = 'U' */
+
+ ijp = 0;
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ ij = n2 + j;
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ arf[ij] = ap[ijp];
+ ++ijp;
+ ij += lda;
+ }
+ }
+ js = 0;
+ i__1 = *n - 1;
+ for (j = n1; j <= i__1; ++j) {
+ ij = js;
+ i__2 = js + j;
+ for (ij = js; ij <= i__2; ++ij) {
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ js += lda;
+ }
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'T' */
+
+ if (lower) {
+
+/* N is odd, TRANSR = 'T', and UPLO = 'L' */
+
+ ijp = 0;
+ i__1 = n2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = *n * lda - 1;
+ i__3 = lda;
+ for (ij = i__ * (lda + 1); i__3 < 0 ? ij >= i__2 : ij <=
+ i__2; ij += i__3) {
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ }
+ js = 1;
+ i__1 = n2 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + n2 - j - 1;
+ for (ij = js; ij <= i__3; ++ij) {
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ js = js + lda + 1;
+ }
+
+ } else {
+
+/* N is odd, TRANSR = 'T', and UPLO = 'U' */
+
+ ijp = 0;
+ js = n2 * lda;
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + j;
+ for (ij = js; ij <= i__3; ++ij) {
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ js += lda;
+ }
+ i__1 = n1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__3 = i__ + (n1 + i__) * lda;
+ i__2 = lda;
+ for (ij = i__; i__2 < 0 ? ij >= i__3 : ij <= i__3; ij +=
+ i__2) {
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* N is even, TRANSR = 'N', and UPLO = 'L' */
+
+ ijp = 0;
+ jp = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ij = i__ + 1 + jp;
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ jp += lda;
+ }
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = k - 1;
+ for (j = i__; j <= i__2; ++j) {
+ ij = i__ + j * lda;
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ }
+
+ } else {
+
+/* N is even, TRANSR = 'N', and UPLO = 'U' */
+
+ ijp = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ ij = k + 1 + j;
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ arf[ij] = ap[ijp];
+ ++ijp;
+ ij += lda;
+ }
+ }
+ js = 0;
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+ ij = js;
+ i__2 = js + j;
+ for (ij = js; ij <= i__2; ++ij) {
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ js += lda;
+ }
+
+ }
+
+ } else {
+
+/* N is even and TRANSR = 'T' */
+
+ if (lower) {
+
+/* N is even, TRANSR = 'T', and UPLO = 'L' */
+
+ ijp = 0;
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = (*n + 1) * lda - 1;
+ i__3 = lda;
+ for (ij = i__ + (i__ + 1) * lda; i__3 < 0 ? ij >= i__2 :
+ ij <= i__2; ij += i__3) {
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ }
+ js = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + k - j - 1;
+ for (ij = js; ij <= i__3; ++ij) {
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ js = js + lda + 1;
+ }
+
+ } else {
+
+/* N is even, TRANSR = 'T', and UPLO = 'U' */
+
+ ijp = 0;
+ js = (k + 1) * lda;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + j;
+ for (ij = js; ij <= i__3; ++ij) {
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ js += lda;
+ }
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__3 = i__ + (k + i__) * lda;
+ i__2 = lda;
+ for (ij = i__; i__2 < 0 ? ij >= i__3 : ij <= i__3; ij +=
+ i__2) {
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ }
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of DTPTTF */
+
+} /* dtpttf_ */
diff --git a/contrib/libs/clapack/dtpttr.c b/contrib/libs/clapack/dtpttr.c
new file mode 100644
index 0000000000..d08841b8c1
--- /dev/null
+++ b/contrib/libs/clapack/dtpttr.c
@@ -0,0 +1,144 @@
+/* dtpttr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dtpttr_(char *uplo, integer *n, doublereal *ap,
+ doublereal *a, integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, k;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Julien Langou of the Univ. of Colorado Denver -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTPTTR copies a triangular matrix A from standard packed format (TP) */
+/* to standard full format (TR). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular. */
+/* = 'L': A is lower triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) DOUBLE PRECISION array, dimension ( N*(N+1)/2 ), */
+/* On entry, the upper or lower triangular matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* A (output) DOUBLE PRECISION array, dimension ( LDA, N ) */
+/* On exit, the triangular matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ lower = lsame_(uplo, "L");
+ if (! lower && ! lsame_(uplo, "U")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTPTTR", &i__1);
+ return 0;
+ }
+
+ if (lower) {
+ k = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ++k;
+ a[i__ + j * a_dim1] = ap[k];
+ }
+ }
+ } else {
+ k = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ ++k;
+ a[i__ + j * a_dim1] = ap[k];
+ }
+ }
+ }
+
+
+ return 0;
+
+/* End of DTPTTR */
+
+} /* dtpttr_ */
diff --git a/contrib/libs/clapack/dtrcon.c b/contrib/libs/clapack/dtrcon.c
new file mode 100644
index 0000000000..aa424e95db
--- /dev/null
+++ b/contrib/libs/clapack/dtrcon.c
@@ -0,0 +1,241 @@
+/* dtrcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dtrcon_(char *norm, char *uplo, char *diag, integer *n,
+ doublereal *a, integer *lda, doublereal *rcond, doublereal *work,
+ integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+ doublereal d__1;
+
+ /* Local variables */
+ integer ix, kase, kase1;
+ doublereal scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int drscl_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal anorm;
+ logical upper;
+ doublereal xnorm;
+ extern /* Subroutine */ int dlacn2_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern doublereal dlantr_(char *, char *, char *, integer *, integer *,
+ doublereal *, integer *, doublereal *);
+ doublereal ainvnm;
+ extern /* Subroutine */ int dlatrs_(char *, char *, char *, char *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *);
+ logical onenrm;
+ char normin[1];
+ doublereal smlnum;
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call DLACN2 in place of DLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTRCON estimates the reciprocal of the condition number of a */
+/* triangular matrix A, in either the 1-norm or the infinity-norm. */
+
+/* The norm of A is computed and an estimate is obtained for */
+/* norm(inv(A)), then the reciprocal of the condition number is */
+/* computed as */
+/* RCOND = 1 / ( norm(A) * norm(inv(A)) ). */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies whether the 1-norm condition number or the */
+/* infinity-norm condition number is required: */
+/* = '1' or 'O': 1-norm; */
+/* = 'I': Infinity-norm. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The triangular matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of the array A contains the upper */
+/* triangular matrix, and the strictly lower triangular part of */
+/* A is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of the array A contains the lower triangular */
+/* matrix, and the strictly upper triangular part of A is not */
+/* referenced. If DIAG = 'U', the diagonal elements of A are */
+/* also not referenced and are assumed to be 1. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(norm(A) * norm(inv(A))). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O");
+ nounit = lsame_(diag, "N");
+
+ if (! onenrm && ! lsame_(norm, "I")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTRCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ }
+
+ *rcond = 0.;
+ smlnum = dlamch_("Safe minimum") * (doublereal) max(1,*n);
+
+/* Compute the norm of the triangular matrix A. */
+
+ anorm = dlantr_(norm, uplo, diag, n, n, &a[a_offset], lda, &work[1]);
+
+/* Continue only if ANORM > 0. */
+
+ if (anorm > 0.) {
+
+/* Estimate the norm of the inverse of A. */
+
+ ainvnm = 0.;
+ *(unsigned char *)normin = 'N';
+ if (onenrm) {
+ kase1 = 1;
+ } else {
+ kase1 = 2;
+ }
+ kase = 0;
+L10:
+ dlacn2_(n, &work[*n + 1], &work[1], &iwork[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (kase == kase1) {
+
+/* Multiply by inv(A). */
+
+ dlatrs_(uplo, "No transpose", diag, normin, n, &a[a_offset],
+ lda, &work[1], &scale, &work[(*n << 1) + 1], info);
+ } else {
+
+/* Multiply by inv(A'). */
+
+ dlatrs_(uplo, "Transpose", diag, normin, n, &a[a_offset], lda,
+ &work[1], &scale, &work[(*n << 1) + 1], info);
+ }
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by 1/SCALE if doing so will not cause overflow. */
+
+ if (scale != 1.) {
+ ix = idamax_(n, &work[1], &c__1);
+ xnorm = (d__1 = work[ix], abs(d__1));
+ if (scale < xnorm * smlnum || scale == 0.) {
+ goto L20;
+ }
+ drscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / anorm / ainvnm;
+ }
+ }
+
+L20:
+ return 0;
+
+/* End of DTRCON */
+
+} /* dtrcon_ */
diff --git a/contrib/libs/clapack/dtrevc.c b/contrib/libs/clapack/dtrevc.c
new file mode 100644
index 0000000000..84dc510e47
--- /dev/null
+++ b/contrib/libs/clapack/dtrevc.c
@@ -0,0 +1,1228 @@
+/* dtrevc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static logical c_false = FALSE_;
+static integer c__1 = 1;
+static doublereal c_b22 = 1.;
+static doublereal c_b25 = 0.;
+static integer c__2 = 2;
+static logical c_true = TRUE_;
+
+/* Subroutine */ int dtrevc_(char *side, char *howmny, logical *select,
+ integer *n, doublereal *t, integer *ldt, doublereal *vl, integer *
+ ldvl, doublereal *vr, integer *ldvr, integer *mm, integer *m,
+ doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2, i__3;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal x[4] /* was [2][2] */;
+ integer j1, j2, n2, ii, ki, ip, is;
+ doublereal wi, wr, rec, ulp, beta, emax;
+ logical pair;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ logical allv;
+ integer ierr;
+ doublereal unfl, ovfl, smin;
+ logical over;
+ doublereal vmax;
+ integer jnxt;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal scale;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+ doublereal remax;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical leftv, bothv;
+ extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *);
+ doublereal vcrit;
+ logical somev;
+ doublereal xnorm;
+ extern /* Subroutine */ int dlaln2_(logical *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *
+, doublereal *, integer *, doublereal *, doublereal *, integer *),
+ dlabad_(doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ logical rightv;
+ doublereal smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTREVC computes some or all of the right and/or left eigenvectors of */
+/* a real upper quasi-triangular matrix T. */
+/* Matrices of this type are produced by the Schur factorization of */
+/* a real general matrix: A = Q*T*Q**T, as computed by DHSEQR. */
+
+/* The right eigenvector x and the left eigenvector y of T corresponding */
+/* to an eigenvalue w are defined by: */
+
+/* T*x = w*x, (y**H)*T = w*(y**H) */
+
+/* where y**H denotes the conjugate transpose of y. */
+/* The eigenvalues are not input to this routine, but are read directly */
+/* from the diagonal blocks of T. */
+
+/* This routine returns the matrices X and/or Y of right and left */
+/* eigenvectors of T, or the products Q*X and/or Q*Y, where Q is an */
+/* input matrix. If Q is the orthogonal factor that reduces a matrix */
+/* A to Schur form T, then Q*X and Q*Y are the matrices of right and */
+/* left eigenvectors of A. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'R': compute right eigenvectors only; */
+/* = 'L': compute left eigenvectors only; */
+/* = 'B': compute both right and left eigenvectors. */
+
+/* HOWMNY (input) CHARACTER*1 */
+/* = 'A': compute all right and/or left eigenvectors; */
+/* = 'B': compute all right and/or left eigenvectors, */
+/* backtransformed by the matrices in VR and/or VL; */
+/* = 'S': compute selected right and/or left eigenvectors, */
+/* as indicated by the logical array SELECT. */
+
+/* SELECT (input/output) LOGICAL array, dimension (N) */
+/* If HOWMNY = 'S', SELECT specifies the eigenvectors to be */
+/* computed. */
+/* If w(j) is a real eigenvalue, the corresponding real */
+/* eigenvector is computed if SELECT(j) is .TRUE.. */
+/* If w(j) and w(j+1) are the real and imaginary parts of a */
+/* complex eigenvalue, the corresponding complex eigenvector is */
+/* computed if either SELECT(j) or SELECT(j+1) is .TRUE., and */
+/* on exit SELECT(j) is set to .TRUE. and SELECT(j+1) is set to */
+/* .FALSE.. */
+/* Not referenced if HOWMNY = 'A' or 'B'. */
+
+/* N (input) INTEGER */
+/* The order of the matrix T. N >= 0. */
+
+/* T (input) DOUBLE PRECISION array, dimension (LDT,N) */
+/* The upper quasi-triangular matrix T in Schur canonical form. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= max(1,N). */
+
+/* VL (input/output) DOUBLE PRECISION array, dimension (LDVL,MM) */
+/* On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must */
+/* contain an N-by-N matrix Q (usually the orthogonal matrix Q */
+/* of Schur vectors returned by DHSEQR). */
+/* On exit, if SIDE = 'L' or 'B', VL contains: */
+/* if HOWMNY = 'A', the matrix Y of left eigenvectors of T; */
+/* if HOWMNY = 'B', the matrix Q*Y; */
+/* if HOWMNY = 'S', the left eigenvectors of T specified by */
+/* SELECT, stored consecutively in the columns */
+/* of VL, in the same order as their */
+/* eigenvalues. */
+/* A complex eigenvector corresponding to a complex eigenvalue */
+/* is stored in two consecutive columns, the first holding the */
+/* real part, and the second the imaginary part. */
+/* Not referenced if SIDE = 'R'. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the array VL. LDVL >= 1, and if */
+/* SIDE = 'L' or 'B', LDVL >= N. */
+
+/* VR (input/output) DOUBLE PRECISION array, dimension (LDVR,MM) */
+/* On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must */
+/* contain an N-by-N matrix Q (usually the orthogonal matrix Q */
+/* of Schur vectors returned by DHSEQR). */
+/* On exit, if SIDE = 'R' or 'B', VR contains: */
+/* if HOWMNY = 'A', the matrix X of right eigenvectors of T; */
+/* if HOWMNY = 'B', the matrix Q*X; */
+/* if HOWMNY = 'S', the right eigenvectors of T specified by */
+/* SELECT, stored consecutively in the columns */
+/* of VR, in the same order as their */
+/* eigenvalues. */
+/* A complex eigenvector corresponding to a complex eigenvalue */
+/* is stored in two consecutive columns, the first holding the */
+/* real part and the second the imaginary part. */
+/* Not referenced if SIDE = 'L'. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. LDVR >= 1, and if */
+/* SIDE = 'R' or 'B', LDVR >= N. */
+
+/* MM (input) INTEGER */
+/* The number of columns in the arrays VL and/or VR. MM >= M. */
+
+/* M (output) INTEGER */
+/* The number of columns in the arrays VL and/or VR actually */
+/* used to store the eigenvectors. */
+/* If HOWMNY = 'A' or 'B', M is set to N. */
+/* Each selected real eigenvector occupies one column and each */
+/* selected complex eigenvector occupies two columns. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The algorithm used in this program is basically backward (forward) */
+/* substitution, with scaling to make the the code robust against */
+/* possible overflow. */
+
+/* Each eigenvector is normalized so that the element of largest */
+/* magnitude has magnitude 1; here the magnitude of a complex number */
+/* (x,y) is taken to be |x| + |y|. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters */
+
+ /* Parameter adjustments */
+ --select;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+
+ /* Function Body */
+ bothv = lsame_(side, "B");
+ rightv = lsame_(side, "R") || bothv;
+ leftv = lsame_(side, "L") || bothv;
+
+ allv = lsame_(howmny, "A");
+ over = lsame_(howmny, "B");
+ somev = lsame_(howmny, "S");
+
+ *info = 0;
+ if (! rightv && ! leftv) {
+ *info = -1;
+ } else if (! allv && ! over && ! somev) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ldt < max(1,*n)) {
+ *info = -6;
+ } else if (*ldvl < 1 || leftv && *ldvl < *n) {
+ *info = -8;
+ } else if (*ldvr < 1 || rightv && *ldvr < *n) {
+ *info = -10;
+ } else {
+
+/* Set M to the number of columns required to store the selected */
+/* eigenvectors, standardize the array SELECT if necessary, and */
+/* test MM. */
+
+ if (somev) {
+ *m = 0;
+ pair = FALSE_;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (pair) {
+ pair = FALSE_;
+ select[j] = FALSE_;
+ } else {
+ if (j < *n) {
+ if (t[j + 1 + j * t_dim1] == 0.) {
+ if (select[j]) {
+ ++(*m);
+ }
+ } else {
+ pair = TRUE_;
+ if (select[j] || select[j + 1]) {
+ select[j] = TRUE_;
+ *m += 2;
+ }
+ }
+ } else {
+ if (select[*n]) {
+ ++(*m);
+ }
+ }
+ }
+/* L10: */
+ }
+ } else {
+ *m = *n;
+ }
+
+ if (*mm < *m) {
+ *info = -11;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTREVC", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Set the constants to control overflow. */
+
+ unfl = dlamch_("Safe minimum");
+ ovfl = 1. / unfl;
+ dlabad_(&unfl, &ovfl);
+ ulp = dlamch_("Precision");
+ smlnum = unfl * (*n / ulp);
+ bignum = (1. - ulp) / smlnum;
+
+/* Compute 1-norm of each column of strictly upper triangular */
+/* part of T to control overflow in triangular solver. */
+
+ work[1] = 0.;
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ work[j] = 0.;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[j] += (d__1 = t[i__ + j * t_dim1], abs(d__1));
+/* L20: */
+ }
+/* L30: */
+ }
+
+/* Index IP is used to specify the real or complex eigenvalue: */
+/* IP = 0, real eigenvalue, */
+/* 1, first of conjugate complex pair: (wr,wi) */
+/* -1, second of conjugate complex pair: (wr,wi) */
+
+ n2 = *n << 1;
+
+ if (rightv) {
+
+/* Compute right eigenvectors. */
+
+ ip = 0;
+ is = *m;
+ for (ki = *n; ki >= 1; --ki) {
+
+ if (ip == 1) {
+ goto L130;
+ }
+ if (ki == 1) {
+ goto L40;
+ }
+ if (t[ki + (ki - 1) * t_dim1] == 0.) {
+ goto L40;
+ }
+ ip = -1;
+
+L40:
+ if (somev) {
+ if (ip == 0) {
+ if (! select[ki]) {
+ goto L130;
+ }
+ } else {
+ if (! select[ki - 1]) {
+ goto L130;
+ }
+ }
+ }
+
+/* Compute the KI-th eigenvalue (WR,WI). */
+
+ wr = t[ki + ki * t_dim1];
+ wi = 0.;
+ if (ip != 0) {
+ wi = sqrt((d__1 = t[ki + (ki - 1) * t_dim1], abs(d__1))) *
+ sqrt((d__2 = t[ki - 1 + ki * t_dim1], abs(d__2)));
+ }
+/* Computing MAX */
+ d__1 = ulp * (abs(wr) + abs(wi));
+ smin = max(d__1,smlnum);
+
+ if (ip == 0) {
+
+/* Real right eigenvector */
+
+ work[ki + *n] = 1.;
+
+/* Form right-hand side */
+
+ i__1 = ki - 1;
+ for (k = 1; k <= i__1; ++k) {
+ work[k + *n] = -t[k + ki * t_dim1];
+/* L50: */
+ }
+
+/* Solve the upper quasi-triangular system: */
+/* (T(1:KI-1,1:KI-1) - WR)*X = SCALE*WORK. */
+
+ jnxt = ki - 1;
+ for (j = ki - 1; j >= 1; --j) {
+ if (j > jnxt) {
+ goto L60;
+ }
+ j1 = j;
+ j2 = j;
+ jnxt = j - 1;
+ if (j > 1) {
+ if (t[j + (j - 1) * t_dim1] != 0.) {
+ j1 = j - 1;
+ jnxt = j - 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/* 1-by-1 diagonal block */
+
+ dlaln2_(&c_false, &c__1, &c__1, &smin, &c_b22, &t[j +
+ j * t_dim1], ldt, &c_b22, &c_b22, &work[j + *
+ n], n, &wr, &c_b25, x, &c__2, &scale, &xnorm,
+ &ierr);
+
+/* Scale X(1,1) to avoid overflow when updating */
+/* the right-hand side. */
+
+ if (xnorm > 1.) {
+ if (work[j] > bignum / xnorm) {
+ x[0] /= xnorm;
+ scale /= xnorm;
+ }
+ }
+
+/* Scale if necessary */
+
+ if (scale != 1.) {
+ dscal_(&ki, &scale, &work[*n + 1], &c__1);
+ }
+ work[j + *n] = x[0];
+
+/* Update right-hand side */
+
+ i__1 = j - 1;
+ d__1 = -x[0];
+ daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[
+ *n + 1], &c__1);
+
+ } else {
+
+/* 2-by-2 diagonal block */
+
+ dlaln2_(&c_false, &c__2, &c__1, &smin, &c_b22, &t[j -
+ 1 + (j - 1) * t_dim1], ldt, &c_b22, &c_b22, &
+ work[j - 1 + *n], n, &wr, &c_b25, x, &c__2, &
+ scale, &xnorm, &ierr);
+
+/* Scale X(1,1) and X(2,1) to avoid overflow when */
+/* updating the right-hand side. */
+
+ if (xnorm > 1.) {
+/* Computing MAX */
+ d__1 = work[j - 1], d__2 = work[j];
+ beta = max(d__1,d__2);
+ if (beta > bignum / xnorm) {
+ x[0] /= xnorm;
+ x[1] /= xnorm;
+ scale /= xnorm;
+ }
+ }
+
+/* Scale if necessary */
+
+ if (scale != 1.) {
+ dscal_(&ki, &scale, &work[*n + 1], &c__1);
+ }
+ work[j - 1 + *n] = x[0];
+ work[j + *n] = x[1];
+
+/* Update right-hand side */
+
+ i__1 = j - 2;
+ d__1 = -x[0];
+ daxpy_(&i__1, &d__1, &t[(j - 1) * t_dim1 + 1], &c__1,
+ &work[*n + 1], &c__1);
+ i__1 = j - 2;
+ d__1 = -x[1];
+ daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[
+ *n + 1], &c__1);
+ }
+L60:
+ ;
+ }
+
+/* Copy the vector x or Q*x to VR and normalize. */
+
+ if (! over) {
+ dcopy_(&ki, &work[*n + 1], &c__1, &vr[is * vr_dim1 + 1], &
+ c__1);
+
+ ii = idamax_(&ki, &vr[is * vr_dim1 + 1], &c__1);
+ remax = 1. / (d__1 = vr[ii + is * vr_dim1], abs(d__1));
+ dscal_(&ki, &remax, &vr[is * vr_dim1 + 1], &c__1);
+
+ i__1 = *n;
+ for (k = ki + 1; k <= i__1; ++k) {
+ vr[k + is * vr_dim1] = 0.;
+/* L70: */
+ }
+ } else {
+ if (ki > 1) {
+ i__1 = ki - 1;
+ dgemv_("N", n, &i__1, &c_b22, &vr[vr_offset], ldvr, &
+ work[*n + 1], &c__1, &work[ki + *n], &vr[ki *
+ vr_dim1 + 1], &c__1);
+ }
+
+ ii = idamax_(n, &vr[ki * vr_dim1 + 1], &c__1);
+ remax = 1. / (d__1 = vr[ii + ki * vr_dim1], abs(d__1));
+ dscal_(n, &remax, &vr[ki * vr_dim1 + 1], &c__1);
+ }
+
+ } else {
+
+/* Complex right eigenvector. */
+
+/* Initial solve */
+/* [ (T(KI-1,KI-1) T(KI-1,KI) ) - (WR + I* WI)]*X = 0. */
+/* [ (T(KI,KI-1) T(KI,KI) ) ] */
+
+ if ((d__1 = t[ki - 1 + ki * t_dim1], abs(d__1)) >= (d__2 = t[
+ ki + (ki - 1) * t_dim1], abs(d__2))) {
+ work[ki - 1 + *n] = 1.;
+ work[ki + n2] = wi / t[ki - 1 + ki * t_dim1];
+ } else {
+ work[ki - 1 + *n] = -wi / t[ki + (ki - 1) * t_dim1];
+ work[ki + n2] = 1.;
+ }
+ work[ki + *n] = 0.;
+ work[ki - 1 + n2] = 0.;
+
+/* Form right-hand side */
+
+ i__1 = ki - 2;
+ for (k = 1; k <= i__1; ++k) {
+ work[k + *n] = -work[ki - 1 + *n] * t[k + (ki - 1) *
+ t_dim1];
+ work[k + n2] = -work[ki + n2] * t[k + ki * t_dim1];
+/* L80: */
+ }
+
+/* Solve upper quasi-triangular system: */
+/* (T(1:KI-2,1:KI-2) - (WR+i*WI))*X = SCALE*(WORK+i*WORK2) */
+
+ jnxt = ki - 2;
+ for (j = ki - 2; j >= 1; --j) {
+ if (j > jnxt) {
+ goto L90;
+ }
+ j1 = j;
+ j2 = j;
+ jnxt = j - 1;
+ if (j > 1) {
+ if (t[j + (j - 1) * t_dim1] != 0.) {
+ j1 = j - 1;
+ jnxt = j - 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/* 1-by-1 diagonal block */
+
+ dlaln2_(&c_false, &c__1, &c__2, &smin, &c_b22, &t[j +
+ j * t_dim1], ldt, &c_b22, &c_b22, &work[j + *
+ n], n, &wr, &wi, x, &c__2, &scale, &xnorm, &
+ ierr);
+
+/* Scale X(1,1) and X(1,2) to avoid overflow when */
+/* updating the right-hand side. */
+
+ if (xnorm > 1.) {
+ if (work[j] > bignum / xnorm) {
+ x[0] /= xnorm;
+ x[2] /= xnorm;
+ scale /= xnorm;
+ }
+ }
+
+/* Scale if necessary */
+
+ if (scale != 1.) {
+ dscal_(&ki, &scale, &work[*n + 1], &c__1);
+ dscal_(&ki, &scale, &work[n2 + 1], &c__1);
+ }
+ work[j + *n] = x[0];
+ work[j + n2] = x[2];
+
+/* Update the right-hand side */
+
+ i__1 = j - 1;
+ d__1 = -x[0];
+ daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[
+ *n + 1], &c__1);
+ i__1 = j - 1;
+ d__1 = -x[2];
+ daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[
+ n2 + 1], &c__1);
+
+ } else {
+
+/* 2-by-2 diagonal block */
+
+ dlaln2_(&c_false, &c__2, &c__2, &smin, &c_b22, &t[j -
+ 1 + (j - 1) * t_dim1], ldt, &c_b22, &c_b22, &
+ work[j - 1 + *n], n, &wr, &wi, x, &c__2, &
+ scale, &xnorm, &ierr);
+
+/* Scale X to avoid overflow when updating */
+/* the right-hand side. */
+
+ if (xnorm > 1.) {
+/* Computing MAX */
+ d__1 = work[j - 1], d__2 = work[j];
+ beta = max(d__1,d__2);
+ if (beta > bignum / xnorm) {
+ rec = 1. / xnorm;
+ x[0] *= rec;
+ x[2] *= rec;
+ x[1] *= rec;
+ x[3] *= rec;
+ scale *= rec;
+ }
+ }
+
+/* Scale if necessary */
+
+ if (scale != 1.) {
+ dscal_(&ki, &scale, &work[*n + 1], &c__1);
+ dscal_(&ki, &scale, &work[n2 + 1], &c__1);
+ }
+ work[j - 1 + *n] = x[0];
+ work[j + *n] = x[1];
+ work[j - 1 + n2] = x[2];
+ work[j + n2] = x[3];
+
+/* Update the right-hand side */
+
+ i__1 = j - 2;
+ d__1 = -x[0];
+ daxpy_(&i__1, &d__1, &t[(j - 1) * t_dim1 + 1], &c__1,
+ &work[*n + 1], &c__1);
+ i__1 = j - 2;
+ d__1 = -x[1];
+ daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[
+ *n + 1], &c__1);
+ i__1 = j - 2;
+ d__1 = -x[2];
+ daxpy_(&i__1, &d__1, &t[(j - 1) * t_dim1 + 1], &c__1,
+ &work[n2 + 1], &c__1);
+ i__1 = j - 2;
+ d__1 = -x[3];
+ daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[
+ n2 + 1], &c__1);
+ }
+L90:
+ ;
+ }
+
+/* Copy the vector x or Q*x to VR and normalize. */
+
+ if (! over) {
+ dcopy_(&ki, &work[*n + 1], &c__1, &vr[(is - 1) * vr_dim1
+ + 1], &c__1);
+ dcopy_(&ki, &work[n2 + 1], &c__1, &vr[is * vr_dim1 + 1], &
+ c__1);
+
+ emax = 0.;
+ i__1 = ki;
+ for (k = 1; k <= i__1; ++k) {
+/* Computing MAX */
+ d__3 = emax, d__4 = (d__1 = vr[k + (is - 1) * vr_dim1]
+ , abs(d__1)) + (d__2 = vr[k + is * vr_dim1],
+ abs(d__2));
+ emax = max(d__3,d__4);
+/* L100: */
+ }
+
+ remax = 1. / emax;
+ dscal_(&ki, &remax, &vr[(is - 1) * vr_dim1 + 1], &c__1);
+ dscal_(&ki, &remax, &vr[is * vr_dim1 + 1], &c__1);
+
+ i__1 = *n;
+ for (k = ki + 1; k <= i__1; ++k) {
+ vr[k + (is - 1) * vr_dim1] = 0.;
+ vr[k + is * vr_dim1] = 0.;
+/* L110: */
+ }
+
+ } else {
+
+ if (ki > 2) {
+ i__1 = ki - 2;
+ dgemv_("N", n, &i__1, &c_b22, &vr[vr_offset], ldvr, &
+ work[*n + 1], &c__1, &work[ki - 1 + *n], &vr[(
+ ki - 1) * vr_dim1 + 1], &c__1);
+ i__1 = ki - 2;
+ dgemv_("N", n, &i__1, &c_b22, &vr[vr_offset], ldvr, &
+ work[n2 + 1], &c__1, &work[ki + n2], &vr[ki *
+ vr_dim1 + 1], &c__1);
+ } else {
+ dscal_(n, &work[ki - 1 + *n], &vr[(ki - 1) * vr_dim1
+ + 1], &c__1);
+ dscal_(n, &work[ki + n2], &vr[ki * vr_dim1 + 1], &
+ c__1);
+ }
+
+ emax = 0.;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+/* Computing MAX */
+ d__3 = emax, d__4 = (d__1 = vr[k + (ki - 1) * vr_dim1]
+ , abs(d__1)) + (d__2 = vr[k + ki * vr_dim1],
+ abs(d__2));
+ emax = max(d__3,d__4);
+/* L120: */
+ }
+ remax = 1. / emax;
+ dscal_(n, &remax, &vr[(ki - 1) * vr_dim1 + 1], &c__1);
+ dscal_(n, &remax, &vr[ki * vr_dim1 + 1], &c__1);
+ }
+ }
+
+ --is;
+ if (ip != 0) {
+ --is;
+ }
+L130:
+ if (ip == 1) {
+ ip = 0;
+ }
+ if (ip == -1) {
+ ip = 1;
+ }
+/* L140: */
+ }
+ }
+
+ if (leftv) {
+
+/* Compute left eigenvectors. */
+
+ ip = 0;
+ is = 1;
+ i__1 = *n;
+ for (ki = 1; ki <= i__1; ++ki) {
+
+ if (ip == -1) {
+ goto L250;
+ }
+ if (ki == *n) {
+ goto L150;
+ }
+ if (t[ki + 1 + ki * t_dim1] == 0.) {
+ goto L150;
+ }
+ ip = 1;
+
+L150:
+ if (somev) {
+ if (! select[ki]) {
+ goto L250;
+ }
+ }
+
+/* Compute the KI-th eigenvalue (WR,WI). */
+
+ wr = t[ki + ki * t_dim1];
+ wi = 0.;
+ if (ip != 0) {
+ wi = sqrt((d__1 = t[ki + (ki + 1) * t_dim1], abs(d__1))) *
+ sqrt((d__2 = t[ki + 1 + ki * t_dim1], abs(d__2)));
+ }
+/* Computing MAX */
+ d__1 = ulp * (abs(wr) + abs(wi));
+ smin = max(d__1,smlnum);
+
+ if (ip == 0) {
+
+/* Real left eigenvector. */
+
+ work[ki + *n] = 1.;
+
+/* Form right-hand side */
+
+ i__2 = *n;
+ for (k = ki + 1; k <= i__2; ++k) {
+ work[k + *n] = -t[ki + k * t_dim1];
+/* L160: */
+ }
+
+/* Solve the quasi-triangular system: */
+/* (T(KI+1:N,KI+1:N) - WR)'*X = SCALE*WORK */
+
+ vmax = 1.;
+ vcrit = bignum;
+
+ jnxt = ki + 1;
+ i__2 = *n;
+ for (j = ki + 1; j <= i__2; ++j) {
+ if (j < jnxt) {
+ goto L170;
+ }
+ j1 = j;
+ j2 = j;
+ jnxt = j + 1;
+ if (j < *n) {
+ if (t[j + 1 + j * t_dim1] != 0.) {
+ j2 = j + 1;
+ jnxt = j + 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/* 1-by-1 diagonal block */
+
+/* Scale if necessary to avoid overflow when forming */
+/* the right-hand side. */
+
+ if (work[j] > vcrit) {
+ rec = 1. / vmax;
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &rec, &work[ki + *n], &c__1);
+ vmax = 1.;
+ vcrit = bignum;
+ }
+
+ i__3 = j - ki - 1;
+ work[j + *n] -= ddot_(&i__3, &t[ki + 1 + j * t_dim1],
+ &c__1, &work[ki + 1 + *n], &c__1);
+
+/* Solve (T(J,J)-WR)'*X = WORK */
+
+ dlaln2_(&c_false, &c__1, &c__1, &smin, &c_b22, &t[j +
+ j * t_dim1], ldt, &c_b22, &c_b22, &work[j + *
+ n], n, &wr, &c_b25, x, &c__2, &scale, &xnorm,
+ &ierr);
+
+/* Scale if necessary */
+
+ if (scale != 1.) {
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &scale, &work[ki + *n], &c__1);
+ }
+ work[j + *n] = x[0];
+/* Computing MAX */
+ d__2 = (d__1 = work[j + *n], abs(d__1));
+ vmax = max(d__2,vmax);
+ vcrit = bignum / vmax;
+
+ } else {
+
+/* 2-by-2 diagonal block */
+
+/* Scale if necessary to avoid overflow when forming */
+/* the right-hand side. */
+
+/* Computing MAX */
+ d__1 = work[j], d__2 = work[j + 1];
+ beta = max(d__1,d__2);
+ if (beta > vcrit) {
+ rec = 1. / vmax;
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &rec, &work[ki + *n], &c__1);
+ vmax = 1.;
+ vcrit = bignum;
+ }
+
+ i__3 = j - ki - 1;
+ work[j + *n] -= ddot_(&i__3, &t[ki + 1 + j * t_dim1],
+ &c__1, &work[ki + 1 + *n], &c__1);
+
+ i__3 = j - ki - 1;
+ work[j + 1 + *n] -= ddot_(&i__3, &t[ki + 1 + (j + 1) *
+ t_dim1], &c__1, &work[ki + 1 + *n], &c__1);
+
+/* Solve */
+/* [T(J,J)-WR T(J,J+1) ]'* X = SCALE*( WORK1 ) */
+/* [T(J+1,J) T(J+1,J+1)-WR] ( WORK2 ) */
+
+ dlaln2_(&c_true, &c__2, &c__1, &smin, &c_b22, &t[j +
+ j * t_dim1], ldt, &c_b22, &c_b22, &work[j + *
+ n], n, &wr, &c_b25, x, &c__2, &scale, &xnorm,
+ &ierr);
+
+/* Scale if necessary */
+
+ if (scale != 1.) {
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &scale, &work[ki + *n], &c__1);
+ }
+ work[j + *n] = x[0];
+ work[j + 1 + *n] = x[1];
+
+/* Computing MAX */
+ d__3 = (d__1 = work[j + *n], abs(d__1)), d__4 = (d__2
+ = work[j + 1 + *n], abs(d__2)), d__3 = max(
+ d__3,d__4);
+ vmax = max(d__3,vmax);
+ vcrit = bignum / vmax;
+
+ }
+L170:
+ ;
+ }
+
+/* Copy the vector x or Q*x to VL and normalize. */
+
+ if (! over) {
+ i__2 = *n - ki + 1;
+ dcopy_(&i__2, &work[ki + *n], &c__1, &vl[ki + is *
+ vl_dim1], &c__1);
+
+ i__2 = *n - ki + 1;
+ ii = idamax_(&i__2, &vl[ki + is * vl_dim1], &c__1) + ki -
+ 1;
+ remax = 1. / (d__1 = vl[ii + is * vl_dim1], abs(d__1));
+ i__2 = *n - ki + 1;
+ dscal_(&i__2, &remax, &vl[ki + is * vl_dim1], &c__1);
+
+ i__2 = ki - 1;
+ for (k = 1; k <= i__2; ++k) {
+ vl[k + is * vl_dim1] = 0.;
+/* L180: */
+ }
+
+ } else {
+
+ if (ki < *n) {
+ i__2 = *n - ki;
+ dgemv_("N", n, &i__2, &c_b22, &vl[(ki + 1) * vl_dim1
+ + 1], ldvl, &work[ki + 1 + *n], &c__1, &work[
+ ki + *n], &vl[ki * vl_dim1 + 1], &c__1);
+ }
+
+ ii = idamax_(n, &vl[ki * vl_dim1 + 1], &c__1);
+ remax = 1. / (d__1 = vl[ii + ki * vl_dim1], abs(d__1));
+ dscal_(n, &remax, &vl[ki * vl_dim1 + 1], &c__1);
+
+ }
+
+ } else {
+
+/* Complex left eigenvector. */
+
+/* Initial solve: */
+/* ((T(KI,KI) T(KI,KI+1) )' - (WR - I* WI))*X = 0. */
+/* ((T(KI+1,KI) T(KI+1,KI+1)) ) */
+
+ if ((d__1 = t[ki + (ki + 1) * t_dim1], abs(d__1)) >= (d__2 =
+ t[ki + 1 + ki * t_dim1], abs(d__2))) {
+ work[ki + *n] = wi / t[ki + (ki + 1) * t_dim1];
+ work[ki + 1 + n2] = 1.;
+ } else {
+ work[ki + *n] = 1.;
+ work[ki + 1 + n2] = -wi / t[ki + 1 + ki * t_dim1];
+ }
+ work[ki + 1 + *n] = 0.;
+ work[ki + n2] = 0.;
+
+/* Form right-hand side */
+
+ i__2 = *n;
+ for (k = ki + 2; k <= i__2; ++k) {
+ work[k + *n] = -work[ki + *n] * t[ki + k * t_dim1];
+ work[k + n2] = -work[ki + 1 + n2] * t[ki + 1 + k * t_dim1]
+ ;
+/* L190: */
+ }
+
+/* Solve complex quasi-triangular system: */
+/* ( T(KI+2,N:KI+2,N) - (WR-i*WI) )*X = WORK1+i*WORK2 */
+
+ vmax = 1.;
+ vcrit = bignum;
+
+ jnxt = ki + 2;
+ i__2 = *n;
+ for (j = ki + 2; j <= i__2; ++j) {
+ if (j < jnxt) {
+ goto L200;
+ }
+ j1 = j;
+ j2 = j;
+ jnxt = j + 1;
+ if (j < *n) {
+ if (t[j + 1 + j * t_dim1] != 0.) {
+ j2 = j + 1;
+ jnxt = j + 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/* 1-by-1 diagonal block */
+
+/* Scale if necessary to avoid overflow when */
+/* forming the right-hand side elements. */
+
+ if (work[j] > vcrit) {
+ rec = 1. / vmax;
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &rec, &work[ki + *n], &c__1);
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &rec, &work[ki + n2], &c__1);
+ vmax = 1.;
+ vcrit = bignum;
+ }
+
+ i__3 = j - ki - 2;
+ work[j + *n] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1],
+ &c__1, &work[ki + 2 + *n], &c__1);
+ i__3 = j - ki - 2;
+ work[j + n2] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1],
+ &c__1, &work[ki + 2 + n2], &c__1);
+
+/* Solve (T(J,J)-(WR-i*WI))*(X11+i*X12)= WK+I*WK2 */
+
+ d__1 = -wi;
+ dlaln2_(&c_false, &c__1, &c__2, &smin, &c_b22, &t[j +
+ j * t_dim1], ldt, &c_b22, &c_b22, &work[j + *
+ n], n, &wr, &d__1, x, &c__2, &scale, &xnorm, &
+ ierr);
+
+/* Scale if necessary */
+
+ if (scale != 1.) {
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &scale, &work[ki + *n], &c__1);
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &scale, &work[ki + n2], &c__1);
+ }
+ work[j + *n] = x[0];
+ work[j + n2] = x[2];
+/* Computing MAX */
+ d__3 = (d__1 = work[j + *n], abs(d__1)), d__4 = (d__2
+ = work[j + n2], abs(d__2)), d__3 = max(d__3,
+ d__4);
+ vmax = max(d__3,vmax);
+ vcrit = bignum / vmax;
+
+ } else {
+
+/* 2-by-2 diagonal block */
+
+/* Scale if necessary to avoid overflow when forming */
+/* the right-hand side elements. */
+
+/* Computing MAX */
+ d__1 = work[j], d__2 = work[j + 1];
+ beta = max(d__1,d__2);
+ if (beta > vcrit) {
+ rec = 1. / vmax;
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &rec, &work[ki + *n], &c__1);
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &rec, &work[ki + n2], &c__1);
+ vmax = 1.;
+ vcrit = bignum;
+ }
+
+ i__3 = j - ki - 2;
+ work[j + *n] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1],
+ &c__1, &work[ki + 2 + *n], &c__1);
+
+ i__3 = j - ki - 2;
+ work[j + n2] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1],
+ &c__1, &work[ki + 2 + n2], &c__1);
+
+ i__3 = j - ki - 2;
+ work[j + 1 + *n] -= ddot_(&i__3, &t[ki + 2 + (j + 1) *
+ t_dim1], &c__1, &work[ki + 2 + *n], &c__1);
+
+ i__3 = j - ki - 2;
+ work[j + 1 + n2] -= ddot_(&i__3, &t[ki + 2 + (j + 1) *
+ t_dim1], &c__1, &work[ki + 2 + n2], &c__1);
+
+/* Solve 2-by-2 complex linear equation */
+/* ([T(j,j) T(j,j+1) ]'-(wr-i*wi)*I)*X = SCALE*B */
+/* ([T(j+1,j) T(j+1,j+1)] ) */
+
+ d__1 = -wi;
+ dlaln2_(&c_true, &c__2, &c__2, &smin, &c_b22, &t[j +
+ j * t_dim1], ldt, &c_b22, &c_b22, &work[j + *
+ n], n, &wr, &d__1, x, &c__2, &scale, &xnorm, &
+ ierr);
+
+/* Scale if necessary */
+
+ if (scale != 1.) {
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &scale, &work[ki + *n], &c__1);
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &scale, &work[ki + n2], &c__1);
+ }
+ work[j + *n] = x[0];
+ work[j + n2] = x[2];
+ work[j + 1 + *n] = x[1];
+ work[j + 1 + n2] = x[3];
+/* Computing MAX */
+ d__1 = abs(x[0]), d__2 = abs(x[2]), d__1 = max(d__1,
+ d__2), d__2 = abs(x[1]), d__1 = max(d__1,d__2)
+ , d__2 = abs(x[3]), d__1 = max(d__1,d__2);
+ vmax = max(d__1,vmax);
+ vcrit = bignum / vmax;
+
+ }
+L200:
+ ;
+ }
+
+/* Copy the vector x or Q*x to VL and normalize. */
+
+ if (! over) {
+ i__2 = *n - ki + 1;
+ dcopy_(&i__2, &work[ki + *n], &c__1, &vl[ki + is *
+ vl_dim1], &c__1);
+ i__2 = *n - ki + 1;
+ dcopy_(&i__2, &work[ki + n2], &c__1, &vl[ki + (is + 1) *
+ vl_dim1], &c__1);
+
+ emax = 0.;
+ i__2 = *n;
+ for (k = ki; k <= i__2; ++k) {
+/* Computing MAX */
+ d__3 = emax, d__4 = (d__1 = vl[k + is * vl_dim1], abs(
+ d__1)) + (d__2 = vl[k + (is + 1) * vl_dim1],
+ abs(d__2));
+ emax = max(d__3,d__4);
+/* L220: */
+ }
+ remax = 1. / emax;
+ i__2 = *n - ki + 1;
+ dscal_(&i__2, &remax, &vl[ki + is * vl_dim1], &c__1);
+ i__2 = *n - ki + 1;
+ dscal_(&i__2, &remax, &vl[ki + (is + 1) * vl_dim1], &c__1)
+ ;
+
+ i__2 = ki - 1;
+ for (k = 1; k <= i__2; ++k) {
+ vl[k + is * vl_dim1] = 0.;
+ vl[k + (is + 1) * vl_dim1] = 0.;
+/* L230: */
+ }
+ } else {
+ if (ki < *n - 1) {
+ i__2 = *n - ki - 1;
+ dgemv_("N", n, &i__2, &c_b22, &vl[(ki + 2) * vl_dim1
+ + 1], ldvl, &work[ki + 2 + *n], &c__1, &work[
+ ki + *n], &vl[ki * vl_dim1 + 1], &c__1);
+ i__2 = *n - ki - 1;
+ dgemv_("N", n, &i__2, &c_b22, &vl[(ki + 2) * vl_dim1
+ + 1], ldvl, &work[ki + 2 + n2], &c__1, &work[
+ ki + 1 + n2], &vl[(ki + 1) * vl_dim1 + 1], &
+ c__1);
+ } else {
+ dscal_(n, &work[ki + *n], &vl[ki * vl_dim1 + 1], &
+ c__1);
+ dscal_(n, &work[ki + 1 + n2], &vl[(ki + 1) * vl_dim1
+ + 1], &c__1);
+ }
+
+ emax = 0.;
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+/* Computing MAX */
+ d__3 = emax, d__4 = (d__1 = vl[k + ki * vl_dim1], abs(
+ d__1)) + (d__2 = vl[k + (ki + 1) * vl_dim1],
+ abs(d__2));
+ emax = max(d__3,d__4);
+/* L240: */
+ }
+ remax = 1. / emax;
+ dscal_(n, &remax, &vl[ki * vl_dim1 + 1], &c__1);
+ dscal_(n, &remax, &vl[(ki + 1) * vl_dim1 + 1], &c__1);
+
+ }
+
+ }
+
+ ++is;
+ if (ip != 0) {
+ ++is;
+ }
+L250:
+ if (ip == -1) {
+ ip = 0;
+ }
+ if (ip == 1) {
+ ip = -1;
+ }
+
+/* L260: */
+ }
+
+ }
+
+ return 0;
+
+/* End of DTREVC */
+
+} /* dtrevc_ */
diff --git a/contrib/libs/clapack/dtrexc.c b/contrib/libs/clapack/dtrexc.c
new file mode 100644
index 0000000000..9d77fbe8ea
--- /dev/null
+++ b/contrib/libs/clapack/dtrexc.c
@@ -0,0 +1,403 @@
+/* dtrexc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__2 = 2;
+
+/* Subroutine */ int dtrexc_(char *compq, integer *n, doublereal *t, integer *
+ ldt, doublereal *q, integer *ldq, integer *ifst, integer *ilst,
+ doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, t_dim1, t_offset, i__1;
+
+ /* Local variables */
+ integer nbf, nbl, here;
+ extern logical lsame_(char *, char *);
+ logical wantq;
+ extern /* Subroutine */ int dlaexc_(logical *, integer *, doublereal *,
+ integer *, doublereal *, integer *, integer *, integer *, integer
+ *, doublereal *, integer *), xerbla_(char *, integer *);
+ integer nbnext;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTREXC reorders the real Schur factorization of a real matrix */
+/* A = Q*T*Q**T, so that the diagonal block of T with row index IFST is */
+/* moved to row ILST. */
+
+/* The real Schur form T is reordered by an orthogonal similarity */
+/* transformation Z**T*T*Z, and optionally the matrix Q of Schur vectors */
+/* is updated by postmultiplying it with Z. */
+
+/* T must be in Schur canonical form (as returned by DHSEQR), that is, */
+/* block upper triangular with 1-by-1 and 2-by-2 diagonal blocks; each */
+/* 2-by-2 diagonal block has its diagonal elements equal and its */
+/* off-diagonal elements of opposite sign. */
+
+/* Arguments */
+/* ========= */
+
+/* COMPQ (input) CHARACTER*1 */
+/* = 'V': update the matrix Q of Schur vectors; */
+/* = 'N': do not update Q. */
+
+/* N (input) INTEGER */
+/* The order of the matrix T. N >= 0. */
+
+/* T (input/output) DOUBLE PRECISION array, dimension (LDT,N) */
+/* On entry, the upper quasi-triangular matrix T, in Schur */
+/* Schur canonical form. */
+/* On exit, the reordered upper quasi-triangular matrix, again */
+/* in Schur canonical form. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= max(1,N). */
+
+/* Q (input/output) DOUBLE PRECISION array, dimension (LDQ,N) */
+/* On entry, if COMPQ = 'V', the matrix Q of Schur vectors. */
+/* On exit, if COMPQ = 'V', Q has been postmultiplied by the */
+/* orthogonal transformation matrix Z which reorders T. */
+/* If COMPQ = 'N', Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N). */
+
+/* IFST (input/output) INTEGER */
+/* ILST (input/output) INTEGER */
+/* Specify the reordering of the diagonal blocks of T. */
+/* The block with row index IFST is moved to row ILST, by a */
+/* sequence of transpositions between adjacent blocks. */
+/* On exit, if IFST pointed on entry to the second row of a */
+/* 2-by-2 block, it is changed to point to the first row; ILST */
+/* always points to the first row of the block in its final */
+/* position (which may differ from its input value by +1 or -1). */
+/* 1 <= IFST <= N; 1 <= ILST <= N. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* = 1: two adjacent blocks were too close to swap (the problem */
+/* is very ill-conditioned); T may have been partially */
+/* reordered, and ILST points to the first row of the */
+/* current position of the block being moved. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input arguments. */
+
+ /* Parameter adjustments */
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ wantq = lsame_(compq, "V");
+ if (! wantq && ! lsame_(compq, "N")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldt < max(1,*n)) {
+ *info = -4;
+ } else if (*ldq < 1 || wantq && *ldq < max(1,*n)) {
+ *info = -6;
+ } else if (*ifst < 1 || *ifst > *n) {
+ *info = -7;
+ } else if (*ilst < 1 || *ilst > *n) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTREXC", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 1) {
+ return 0;
+ }
+
+/* Determine the first row of specified block */
+/* and find out it is 1 by 1 or 2 by 2. */
+
+ if (*ifst > 1) {
+ if (t[*ifst + (*ifst - 1) * t_dim1] != 0.) {
+ --(*ifst);
+ }
+ }
+ nbf = 1;
+ if (*ifst < *n) {
+ if (t[*ifst + 1 + *ifst * t_dim1] != 0.) {
+ nbf = 2;
+ }
+ }
+
+/* Determine the first row of the final block */
+/* and find out it is 1 by 1 or 2 by 2. */
+
+ if (*ilst > 1) {
+ if (t[*ilst + (*ilst - 1) * t_dim1] != 0.) {
+ --(*ilst);
+ }
+ }
+ nbl = 1;
+ if (*ilst < *n) {
+ if (t[*ilst + 1 + *ilst * t_dim1] != 0.) {
+ nbl = 2;
+ }
+ }
+
+ if (*ifst == *ilst) {
+ return 0;
+ }
+
+ if (*ifst < *ilst) {
+
+/* Update ILST */
+
+ if (nbf == 2 && nbl == 1) {
+ --(*ilst);
+ }
+ if (nbf == 1 && nbl == 2) {
+ ++(*ilst);
+ }
+
+ here = *ifst;
+
+L10:
+
+/* Swap block with next one below */
+
+ if (nbf == 1 || nbf == 2) {
+
+/* Current block either 1 by 1 or 2 by 2 */
+
+ nbnext = 1;
+ if (here + nbf + 1 <= *n) {
+ if (t[here + nbf + 1 + (here + nbf) * t_dim1] != 0.) {
+ nbnext = 2;
+ }
+ }
+ dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &here, &
+ nbf, &nbnext, &work[1], info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ here += nbnext;
+
+/* Test if 2 by 2 block breaks into two 1 by 1 blocks */
+
+ if (nbf == 2) {
+ if (t[here + 1 + here * t_dim1] == 0.) {
+ nbf = 3;
+ }
+ }
+
+ } else {
+
+/* Current block consists of two 1 by 1 blocks each of which */
+/* must be swapped individually */
+
+ nbnext = 1;
+ if (here + 3 <= *n) {
+ if (t[here + 3 + (here + 2) * t_dim1] != 0.) {
+ nbnext = 2;
+ }
+ }
+ i__1 = here + 1;
+ dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &i__1, &
+ c__1, &nbnext, &work[1], info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ if (nbnext == 1) {
+
+/* Swap two 1 by 1 blocks, no problems possible */
+
+ dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &
+ here, &c__1, &nbnext, &work[1], info);
+ ++here;
+ } else {
+
+/* Recompute NBNEXT in case 2 by 2 split */
+
+ if (t[here + 2 + (here + 1) * t_dim1] == 0.) {
+ nbnext = 1;
+ }
+ if (nbnext == 2) {
+
+/* 2 by 2 Block did not split */
+
+ dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &
+ here, &c__1, &nbnext, &work[1], info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ here += 2;
+ } else {
+
+/* 2 by 2 Block did split */
+
+ dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &
+ here, &c__1, &c__1, &work[1], info);
+ i__1 = here + 1;
+ dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &
+ i__1, &c__1, &c__1, &work[1], info);
+ here += 2;
+ }
+ }
+ }
+ if (here < *ilst) {
+ goto L10;
+ }
+
+ } else {
+
+ here = *ifst;
+L20:
+
+/* Swap block with next one above */
+
+ if (nbf == 1 || nbf == 2) {
+
+/* Current block either 1 by 1 or 2 by 2 */
+
+ nbnext = 1;
+ if (here >= 3) {
+ if (t[here - 1 + (here - 2) * t_dim1] != 0.) {
+ nbnext = 2;
+ }
+ }
+ i__1 = here - nbnext;
+ dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &i__1, &
+ nbnext, &nbf, &work[1], info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ here -= nbnext;
+
+/* Test if 2 by 2 block breaks into two 1 by 1 blocks */
+
+ if (nbf == 2) {
+ if (t[here + 1 + here * t_dim1] == 0.) {
+ nbf = 3;
+ }
+ }
+
+ } else {
+
+/* Current block consists of two 1 by 1 blocks each of which */
+/* must be swapped individually */
+
+ nbnext = 1;
+ if (here >= 3) {
+ if (t[here - 1 + (here - 2) * t_dim1] != 0.) {
+ nbnext = 2;
+ }
+ }
+ i__1 = here - nbnext;
+ dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &i__1, &
+ nbnext, &c__1, &work[1], info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ if (nbnext == 1) {
+
+/* Swap two 1 by 1 blocks, no problems possible */
+
+ dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &
+ here, &nbnext, &c__1, &work[1], info);
+ --here;
+ } else {
+
+/* Recompute NBNEXT in case 2 by 2 split */
+
+ if (t[here + (here - 1) * t_dim1] == 0.) {
+ nbnext = 1;
+ }
+ if (nbnext == 2) {
+
+/* 2 by 2 Block did not split */
+
+ i__1 = here - 1;
+ dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &
+ i__1, &c__2, &c__1, &work[1], info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ here += -2;
+ } else {
+
+/* 2 by 2 Block did split */
+
+ dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &
+ here, &c__1, &c__1, &work[1], info);
+ i__1 = here - 1;
+ dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &
+ i__1, &c__1, &c__1, &work[1], info);
+ here += -2;
+ }
+ }
+ }
+ if (here > *ilst) {
+ goto L20;
+ }
+ }
+ *ilst = here;
+
+ return 0;
+
+/* End of DTREXC */
+
+} /* dtrexc_ */
diff --git a/contrib/libs/clapack/dtrrfs.c b/contrib/libs/clapack/dtrrfs.c
new file mode 100644
index 0000000000..d07a7dfc2a
--- /dev/null
+++ b/contrib/libs/clapack/dtrrfs.c
@@ -0,0 +1,493 @@
+/* dtrrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b19 = -1.;
+
+/* Subroutine */ int dtrrfs_(char *uplo, char *trans, char *diag, integer *n,
+ integer *nrhs, doublereal *a, integer *lda, doublereal *b, integer *
+ ldb, doublereal *x, integer *ldx, doublereal *ferr, doublereal *berr,
+ doublereal *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, x_dim1, x_offset, i__1, i__2,
+ i__3;
+ doublereal d__1, d__2, d__3;
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal s, xk;
+ integer nz;
+ doublereal eps;
+ integer kase;
+ doublereal safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), daxpy_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *);
+ logical upper;
+ extern /* Subroutine */ int dtrmv_(char *, char *, char *, integer *,
+ doublereal *, integer *, doublereal *, integer *), dtrsv_(char *, char *, char *, integer *, doublereal *,
+ integer *, doublereal *, integer *),
+ dlacn2_(integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+ char transt[1];
+ logical nounit;
+ doublereal lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call DLACN2 in place of DLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTRRFS provides error bounds and backward error estimates for the */
+/* solution to a system of linear equations with a triangular */
+/* coefficient matrix. */
+
+/* The solution matrix X must be computed by DTRTRS or some other */
+/* means before entering this routine. DTRRFS does not do iterative */
+/* refinement because doing so cannot improve the backward error. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose = Transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The triangular matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of the array A contains the upper */
+/* triangular matrix, and the strictly lower triangular part of */
+/* A is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of the array A contains the lower triangular */
+/* matrix, and the strictly upper triangular part of A is not */
+/* referenced. If DIAG = 'U', the diagonal elements of A are */
+/* also not referenced and are assumed to be 1. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input) DOUBLE PRECISION array, dimension (LDX,NRHS) */
+/* The solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldx < max(1,*n)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTRRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute residual R = B - op(A) * X, */
+/* where op(A) = A or A', depending on TRANS. */
+
+ dcopy_(n, &x[j * x_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+ dtrmv_(uplo, trans, diag, n, &a[a_offset], lda, &work[*n + 1], &c__1);
+ daxpy_(n, &c_b19, &b[j * b_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] = (d__1 = b[i__ + j * b_dim1], abs(d__1));
+/* L20: */
+ }
+
+ if (notran) {
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (d__1 = x[k + j * x_dim1], abs(d__1));
+ i__3 = k;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ work[i__] += (d__1 = a[i__ + k * a_dim1], abs(
+ d__1)) * xk;
+/* L30: */
+ }
+/* L40: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (d__1 = x[k + j * x_dim1], abs(d__1));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ work[i__] += (d__1 = a[i__ + k * a_dim1], abs(
+ d__1)) * xk;
+/* L50: */
+ }
+ work[k] += xk;
+/* L60: */
+ }
+ }
+ } else {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (d__1 = x[k + j * x_dim1], abs(d__1));
+ i__3 = *n;
+ for (i__ = k; i__ <= i__3; ++i__) {
+ work[i__] += (d__1 = a[i__ + k * a_dim1], abs(
+ d__1)) * xk;
+/* L70: */
+ }
+/* L80: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (d__1 = x[k + j * x_dim1], abs(d__1));
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ work[i__] += (d__1 = a[i__ + k * a_dim1], abs(
+ d__1)) * xk;
+/* L90: */
+ }
+ work[k] += xk;
+/* L100: */
+ }
+ }
+ }
+ } else {
+
+/* Compute abs(A')*abs(X) + abs(B). */
+
+ if (upper) {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ i__3 = k;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ s += (d__1 = a[i__ + k * a_dim1], abs(d__1)) * (
+ d__2 = x[i__ + j * x_dim1], abs(d__2));
+/* L110: */
+ }
+ work[k] += s;
+/* L120: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = (d__1 = x[k + j * x_dim1], abs(d__1));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ s += (d__1 = a[i__ + k * a_dim1], abs(d__1)) * (
+ d__2 = x[i__ + j * x_dim1], abs(d__2));
+/* L130: */
+ }
+ work[k] += s;
+/* L140: */
+ }
+ }
+ } else {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ i__3 = *n;
+ for (i__ = k; i__ <= i__3; ++i__) {
+ s += (d__1 = a[i__ + k * a_dim1], abs(d__1)) * (
+ d__2 = x[i__ + j * x_dim1], abs(d__2));
+/* L150: */
+ }
+ work[k] += s;
+/* L160: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = (d__1 = x[k + j * x_dim1], abs(d__1));
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ s += (d__1 = a[i__ + k * a_dim1], abs(d__1)) * (
+ d__2 = x[i__ + j * x_dim1], abs(d__2));
+/* L170: */
+ }
+ work[k] += s;
+/* L180: */
+ }
+ }
+ }
+ }
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+/* Computing MAX */
+ d__2 = s, d__3 = (d__1 = work[*n + i__], abs(d__1)) / work[
+ i__];
+ s = max(d__2,d__3);
+ } else {
+/* Computing MAX */
+ d__2 = s, d__3 = ((d__1 = work[*n + i__], abs(d__1)) + safe1)
+ / (work[i__] + safe1);
+ s = max(d__2,d__3);
+ }
+/* L190: */
+ }
+ berr[j] = s;
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(op(A)))* */
+/* ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(op(A)) is the inverse of op(A) */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(op(A))*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use DLACN2 to estimate the infinity-norm of the matrix */
+/* inv(op(A)) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+ work[i__] = (d__1 = work[*n + i__], abs(d__1)) + nz * eps *
+ work[i__];
+ } else {
+ work[i__] = (d__1 = work[*n + i__], abs(d__1)) + nz * eps *
+ work[i__] + safe1;
+ }
+/* L200: */
+ }
+
+ kase = 0;
+L210:
+ dlacn2_(n, &work[(*n << 1) + 1], &work[*n + 1], &iwork[1], &ferr[j], &
+ kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(op(A)'). */
+
+ dtrsv_(uplo, transt, diag, n, &a[a_offset], lda, &work[*n + 1]
+, &c__1);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L220: */
+ }
+ } else {
+
+/* Multiply by inv(op(A))*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L230: */
+ }
+ dtrsv_(uplo, trans, diag, n, &a[a_offset], lda, &work[*n + 1],
+ &c__1);
+ }
+ goto L210;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = lstres, d__3 = (d__1 = x[i__ + j * x_dim1], abs(d__1));
+ lstres = max(d__2,d__3);
+/* L240: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L250: */
+ }
+
+ return 0;
+
+/* End of DTRRFS */
+
+} /* dtrrfs_ */
diff --git a/contrib/libs/clapack/dtrsen.c b/contrib/libs/clapack/dtrsen.c
new file mode 100644
index 0000000000..193ed024e1
--- /dev/null
+++ b/contrib/libs/clapack/dtrsen.c
@@ -0,0 +1,530 @@
+/* dtrsen.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c_n1 = -1;
+
+/* Subroutine */ int dtrsen_(char *job, char *compq, logical *select, integer
+ *n, doublereal *t, integer *ldt, doublereal *q, integer *ldq,
+ doublereal *wr, doublereal *wi, integer *m, doublereal *s, doublereal
+ *sep, doublereal *work, integer *lwork, integer *iwork, integer *
+ liwork, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, t_dim1, t_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer k, n1, n2, kk, nn, ks;
+ doublereal est;
+ integer kase;
+ logical pair;
+ integer ierr;
+ logical swap;
+ doublereal scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3], lwmin;
+ logical wantq, wants;
+ doublereal rnorm;
+ extern /* Subroutine */ int dlacn2_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ extern doublereal dlange_(char *, integer *, integer *, doublereal *,
+ integer *, doublereal *);
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ logical wantbh;
+ extern /* Subroutine */ int dtrexc_(char *, integer *, doublereal *,
+ integer *, doublereal *, integer *, integer *, integer *,
+ doublereal *, integer *);
+ integer liwmin;
+ logical wantsp, lquery;
+ extern /* Subroutine */ int dtrsyl_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTRSEN reorders the real Schur factorization of a real matrix */
+/* A = Q*T*Q**T, so that a selected cluster of eigenvalues appears in */
+/* the leading diagonal blocks of the upper quasi-triangular matrix T, */
+/* and the leading columns of Q form an orthonormal basis of the */
+/* corresponding right invariant subspace. */
+
+/* Optionally the routine computes the reciprocal condition numbers of */
+/* the cluster of eigenvalues and/or the invariant subspace. */
+
+/* T must be in Schur canonical form (as returned by DHSEQR), that is, */
+/* block upper triangular with 1-by-1 and 2-by-2 diagonal blocks; each */
+/* 2-by-2 diagonal block has its diagonal elemnts equal and its */
+/* off-diagonal elements of opposite sign. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies whether condition numbers are required for the */
+/* cluster of eigenvalues (S) or the invariant subspace (SEP): */
+/* = 'N': none; */
+/* = 'E': for eigenvalues only (S); */
+/* = 'V': for invariant subspace only (SEP); */
+/* = 'B': for both eigenvalues and invariant subspace (S and */
+/* SEP). */
+
+/* COMPQ (input) CHARACTER*1 */
+/* = 'V': update the matrix Q of Schur vectors; */
+/* = 'N': do not update Q. */
+
+/* SELECT (input) LOGICAL array, dimension (N) */
+/* SELECT specifies the eigenvalues in the selected cluster. To */
+/* select a real eigenvalue w(j), SELECT(j) must be set to */
+/* .TRUE.. To select a complex conjugate pair of eigenvalues */
+/* w(j) and w(j+1), corresponding to a 2-by-2 diagonal block, */
+/* either SELECT(j) or SELECT(j+1) or both must be set to */
+/* .TRUE.; a complex conjugate pair of eigenvalues must be */
+/* either both included in the cluster or both excluded. */
+
+/* N (input) INTEGER */
+/* The order of the matrix T. N >= 0. */
+
+/* T (input/output) DOUBLE PRECISION array, dimension (LDT,N) */
+/* On entry, the upper quasi-triangular matrix T, in Schur */
+/* canonical form. */
+/* On exit, T is overwritten by the reordered matrix T, again in */
+/* Schur canonical form, with the selected eigenvalues in the */
+/* leading diagonal blocks. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= max(1,N). */
+
+/* Q (input/output) DOUBLE PRECISION array, dimension (LDQ,N) */
+/* On entry, if COMPQ = 'V', the matrix Q of Schur vectors. */
+/* On exit, if COMPQ = 'V', Q has been postmultiplied by the */
+/* orthogonal transformation matrix which reorders T; the */
+/* leading M columns of Q form an orthonormal basis for the */
+/* specified invariant subspace. */
+/* If COMPQ = 'N', Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. */
+/* LDQ >= 1; and if COMPQ = 'V', LDQ >= N. */
+
+/* WR (output) DOUBLE PRECISION array, dimension (N) */
+/* WI (output) DOUBLE PRECISION array, dimension (N) */
+/* The real and imaginary parts, respectively, of the reordered */
+/* eigenvalues of T. The eigenvalues are stored in the same */
+/* order as on the diagonal of T, with WR(i) = T(i,i) and, if */
+/* T(i:i+1,i:i+1) is a 2-by-2 diagonal block, WI(i) > 0 and */
+/* WI(i+1) = -WI(i). Note that if a complex eigenvalue is */
+/* sufficiently ill-conditioned, then its value may differ */
+/* significantly from its value before reordering. */
+
+/* M (output) INTEGER */
+/* The dimension of the specified invariant subspace. */
+/* 0 < = M <= N. */
+
+/* S (output) DOUBLE PRECISION */
+/* If JOB = 'E' or 'B', S is a lower bound on the reciprocal */
+/* condition number for the selected cluster of eigenvalues. */
+/* S cannot underestimate the true reciprocal condition number */
+/* by more than a factor of sqrt(N). If M = 0 or N, S = 1. */
+/* If JOB = 'N' or 'V', S is not referenced. */
+
+/* SEP (output) DOUBLE PRECISION */
+/* If JOB = 'V' or 'B', SEP is the estimated reciprocal */
+/* condition number of the specified invariant subspace. If */
+/* M = 0 or N, SEP = norm(T). */
+/* If JOB = 'N' or 'E', SEP is not referenced. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If JOB = 'N', LWORK >= max(1,N); */
+/* if JOB = 'E', LWORK >= max(1,M*(N-M)); */
+/* if JOB = 'V' or 'B', LWORK >= max(1,2*M*(N-M)). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* If JOB = 'N' or 'E', LIWORK >= 1; */
+/* if JOB = 'V' or 'B', LIWORK >= max(1,M*(N-M)). */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal size of the IWORK array, */
+/* returns this value as the first entry of the IWORK array, and */
+/* no error message related to LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* = 1: reordering of T failed because some eigenvalues are too */
+/* close to separate (the problem is very ill-conditioned); */
+/* T may have been partially reordered, and WR and WI */
+/* contain the eigenvalues in the same order as in T; S and */
+/* SEP (if requested) are set to zero. */
+
+/* Further Details */
+/* =============== */
+
+/* DTRSEN first collects the selected eigenvalues by computing an */
+/* orthogonal transformation Z to move them to the top left corner of T. */
+/* In other words, the selected eigenvalues are the eigenvalues of T11 */
+/* in: */
+
+/* Z'*T*Z = ( T11 T12 ) n1 */
+/* ( 0 T22 ) n2 */
+/* n1 n2 */
+
+/* where N = n1+n2 and Z' means the transpose of Z. The first n1 columns */
+/* of Z span the specified invariant subspace of T. */
+
+/* If T has been obtained from the real Schur factorization of a matrix */
+/* A = Q*T*Q', then the reordered real Schur factorization of A is given */
+/* by A = (Q*Z)*(Z'*T*Z)*(Q*Z)', and the first n1 columns of Q*Z span */
+/* the corresponding invariant subspace of A. */
+
+/* The reciprocal condition number of the average of the eigenvalues of */
+/* T11 may be returned in S. S lies between 0 (very badly conditioned) */
+/* and 1 (very well conditioned). It is computed as follows. First we */
+/* compute R so that */
+
+/* P = ( I R ) n1 */
+/* ( 0 0 ) n2 */
+/* n1 n2 */
+
+/* is the projector on the invariant subspace associated with T11. */
+/* R is the solution of the Sylvester equation: */
+
+/* T11*R - R*T22 = T12. */
+
+/* Let F-norm(M) denote the Frobenius-norm of M and 2-norm(M) denote */
+/* the two-norm of M. Then S is computed as the lower bound */
+
+/* (1 + F-norm(R)**2)**(-1/2) */
+
+/* on the reciprocal of 2-norm(P), the true reciprocal condition number. */
+/* S cannot underestimate 1 / 2-norm(P) by more than a factor of */
+/* sqrt(N). */
+
+/* An approximate error bound for the computed average of the */
+/* eigenvalues of T11 is */
+
+/* EPS * norm(T) / S */
+
+/* where EPS is the machine precision. */
+
+/* The reciprocal condition number of the right invariant subspace */
+/* spanned by the first n1 columns of Z (or of Q*Z) is returned in SEP. */
+/* SEP is defined as the separation of T11 and T22: */
+
+/* sep( T11, T22 ) = sigma-min( C ) */
+
+/* where sigma-min(C) is the smallest singular value of the */
+/* n1*n2-by-n1*n2 matrix */
+
+/* C = kprod( I(n2), T11 ) - kprod( transpose(T22), I(n1) ) */
+
+/* I(m) is an m by m identity matrix, and kprod denotes the Kronecker */
+/* product. We estimate sigma-min(C) by the reciprocal of an estimate of */
+/* the 1-norm of inverse(C). The true reciprocal 1-norm of inverse(C) */
+/* cannot differ from sigma-min(C) by more than a factor of sqrt(n1*n2). */
+
+/* When SEP is small, small changes in T can cause large changes in */
+/* the invariant subspace. An approximate bound on the maximum angular */
+/* error in the computed right invariant subspace is */
+
+/* EPS * norm(T) / SEP */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters */
+
+ /* Parameter adjustments */
+ --select;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --wr;
+ --wi;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantbh = lsame_(job, "B");
+ wants = lsame_(job, "E") || wantbh;
+ wantsp = lsame_(job, "V") || wantbh;
+ wantq = lsame_(compq, "V");
+
+ *info = 0;
+ lquery = *lwork == -1;
+ if (! lsame_(job, "N") && ! wants && ! wantsp) {
+ *info = -1;
+ } else if (! lsame_(compq, "N") && ! wantq) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ldt < max(1,*n)) {
+ *info = -6;
+ } else if (*ldq < 1 || wantq && *ldq < *n) {
+ *info = -8;
+ } else {
+
+/* Set M to the dimension of the specified invariant subspace, */
+/* and test LWORK and LIWORK. */
+
+ *m = 0;
+ pair = FALSE_;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (pair) {
+ pair = FALSE_;
+ } else {
+ if (k < *n) {
+ if (t[k + 1 + k * t_dim1] == 0.) {
+ if (select[k]) {
+ ++(*m);
+ }
+ } else {
+ pair = TRUE_;
+ if (select[k] || select[k + 1]) {
+ *m += 2;
+ }
+ }
+ } else {
+ if (select[*n]) {
+ ++(*m);
+ }
+ }
+ }
+/* L10: */
+ }
+
+ n1 = *m;
+ n2 = *n - *m;
+ nn = n1 * n2;
+
+ if (wantsp) {
+/* Computing MAX */
+ i__1 = 1, i__2 = nn << 1;
+ lwmin = max(i__1,i__2);
+ liwmin = max(1,nn);
+ } else if (lsame_(job, "N")) {
+ lwmin = max(1,*n);
+ liwmin = 1;
+ } else if (lsame_(job, "E")) {
+ lwmin = max(1,nn);
+ liwmin = 1;
+ }
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -15;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -17;
+ }
+ }
+
+ if (*info == 0) {
+ work[1] = (doublereal) lwmin;
+ iwork[1] = liwmin;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTRSEN", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == *n || *m == 0) {
+ if (wants) {
+ *s = 1.;
+ }
+ if (wantsp) {
+ *sep = dlange_("1", n, n, &t[t_offset], ldt, &work[1]);
+ }
+ goto L40;
+ }
+
+/* Collect the selected blocks at the top-left corner of T. */
+
+ ks = 0;
+ pair = FALSE_;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (pair) {
+ pair = FALSE_;
+ } else {
+ swap = select[k];
+ if (k < *n) {
+ if (t[k + 1 + k * t_dim1] != 0.) {
+ pair = TRUE_;
+ swap = swap || select[k + 1];
+ }
+ }
+ if (swap) {
+ ++ks;
+
+/* Swap the K-th block to position KS. */
+
+ ierr = 0;
+ kk = k;
+ if (k != ks) {
+ dtrexc_(compq, n, &t[t_offset], ldt, &q[q_offset], ldq, &
+ kk, &ks, &work[1], &ierr);
+ }
+ if (ierr == 1 || ierr == 2) {
+
+/* Blocks too close to swap: exit. */
+
+ *info = 1;
+ if (wants) {
+ *s = 0.;
+ }
+ if (wantsp) {
+ *sep = 0.;
+ }
+ goto L40;
+ }
+ if (pair) {
+ ++ks;
+ }
+ }
+ }
+/* L20: */
+ }
+
+ if (wants) {
+
+/* Solve Sylvester equation for R: */
+
+/* T11*R - R*T22 = scale*T12 */
+
+ dlacpy_("F", &n1, &n2, &t[(n1 + 1) * t_dim1 + 1], ldt, &work[1], &n1);
+ dtrsyl_("N", "N", &c_n1, &n1, &n2, &t[t_offset], ldt, &t[n1 + 1 + (n1
+ + 1) * t_dim1], ldt, &work[1], &n1, &scale, &ierr);
+
+/* Estimate the reciprocal of the condition number of the cluster */
+/* of eigenvalues. */
+
+ rnorm = dlange_("F", &n1, &n2, &work[1], &n1, &work[1]);
+ if (rnorm == 0.) {
+ *s = 1.;
+ } else {
+ *s = scale / (sqrt(scale * scale / rnorm + rnorm) * sqrt(rnorm));
+ }
+ }
+
+ if (wantsp) {
+
+/* Estimate sep(T11,T22). */
+
+ est = 0.;
+ kase = 0;
+L30:
+ dlacn2_(&nn, &work[nn + 1], &work[1], &iwork[1], &est, &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Solve T11*R - R*T22 = scale*X. */
+
+ dtrsyl_("N", "N", &c_n1, &n1, &n2, &t[t_offset], ldt, &t[n1 +
+ 1 + (n1 + 1) * t_dim1], ldt, &work[1], &n1, &scale, &
+ ierr);
+ } else {
+
+/* Solve T11'*R - R*T22' = scale*X. */
+
+ dtrsyl_("T", "T", &c_n1, &n1, &n2, &t[t_offset], ldt, &t[n1 +
+ 1 + (n1 + 1) * t_dim1], ldt, &work[1], &n1, &scale, &
+ ierr);
+ }
+ goto L30;
+ }
+
+ *sep = scale / est;
+ }
+
+L40:
+
+/* Store the output eigenvalues in WR and WI. */
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ wr[k] = t[k + k * t_dim1];
+ wi[k] = 0.;
+/* L50: */
+ }
+ i__1 = *n - 1;
+ for (k = 1; k <= i__1; ++k) {
+ if (t[k + 1 + k * t_dim1] != 0.) {
+ wi[k] = sqrt((d__1 = t[k + (k + 1) * t_dim1], abs(d__1))) * sqrt((
+ d__2 = t[k + 1 + k * t_dim1], abs(d__2)));
+ wi[k + 1] = -wi[k];
+ }
+/* L60: */
+ }
+
+ work[1] = (doublereal) lwmin;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of DTRSEN */
+
+} /* dtrsen_ */
diff --git a/contrib/libs/clapack/dtrsna.c b/contrib/libs/clapack/dtrsna.c
new file mode 100644
index 0000000000..d0f26f187f
--- /dev/null
+++ b/contrib/libs/clapack/dtrsna.c
@@ -0,0 +1,606 @@
+/* dtrsna.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static logical c_true = TRUE_;
+static logical c_false = FALSE_;
+
+/* Subroutine */ int dtrsna_(char *job, char *howmny, logical *select,
+ integer *n, doublereal *t, integer *ldt, doublereal *vl, integer *
+ ldvl, doublereal *vr, integer *ldvr, doublereal *s, doublereal *sep,
+ integer *mm, integer *m, doublereal *work, integer *ldwork, integer *
+ iwork, integer *info)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, vl_dim1, vl_offset, vr_dim1, vr_offset,
+ work_dim1, work_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, n2;
+ doublereal cs;
+ integer nn, ks;
+ doublereal sn, mu, eps, est;
+ integer kase;
+ doublereal cond;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ logical pair;
+ integer ierr;
+ doublereal dumm, prod;
+ integer ifst;
+ doublereal lnrm;
+ integer ilst;
+ doublereal rnrm;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ doublereal prod1, prod2, scale, delta;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical wants;
+ doublereal dummy[1];
+ extern /* Subroutine */ int dlacn2_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ extern doublereal dlapy2_(doublereal *, doublereal *);
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ doublereal bignum;
+ logical wantbh;
+ extern /* Subroutine */ int dlaqtr_(logical *, logical *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, integer *), dtrexc_(char *, integer *
+, doublereal *, integer *, doublereal *, integer *, integer *,
+ integer *, doublereal *, integer *);
+ logical somcon;
+ doublereal smlnum;
+ logical wantsp;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call DLACN2 in place of DLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTRSNA estimates reciprocal condition numbers for specified */
+/* eigenvalues and/or right eigenvectors of a real upper */
+/* quasi-triangular matrix T (or of any matrix Q*T*Q**T with Q */
+/* orthogonal). */
+
+/* T must be in Schur canonical form (as returned by DHSEQR), that is, */
+/* block upper triangular with 1-by-1 and 2-by-2 diagonal blocks; each */
+/* 2-by-2 diagonal block has its diagonal elements equal and its */
+/* off-diagonal elements of opposite sign. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies whether condition numbers are required for */
+/* eigenvalues (S) or eigenvectors (SEP): */
+/* = 'E': for eigenvalues only (S); */
+/* = 'V': for eigenvectors only (SEP); */
+/* = 'B': for both eigenvalues and eigenvectors (S and SEP). */
+
+/* HOWMNY (input) CHARACTER*1 */
+/* = 'A': compute condition numbers for all eigenpairs; */
+/* = 'S': compute condition numbers for selected eigenpairs */
+/* specified by the array SELECT. */
+
+/* SELECT (input) LOGICAL array, dimension (N) */
+/* If HOWMNY = 'S', SELECT specifies the eigenpairs for which */
+/* condition numbers are required. To select condition numbers */
+/* for the eigenpair corresponding to a real eigenvalue w(j), */
+/* SELECT(j) must be set to .TRUE.. To select condition numbers */
+/* corresponding to a complex conjugate pair of eigenvalues w(j) */
+/* and w(j+1), either SELECT(j) or SELECT(j+1) or both, must be */
+/* set to .TRUE.. */
+/* If HOWMNY = 'A', SELECT is not referenced. */
+
+/* N (input) INTEGER */
+/* The order of the matrix T. N >= 0. */
+
+/* T (input) DOUBLE PRECISION array, dimension (LDT,N) */
+/* The upper quasi-triangular matrix T, in Schur canonical form. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= max(1,N). */
+
+/* VL (input) DOUBLE PRECISION array, dimension (LDVL,M) */
+/* If JOB = 'E' or 'B', VL must contain left eigenvectors of T */
+/* (or of any Q*T*Q**T with Q orthogonal), corresponding to the */
+/* eigenpairs specified by HOWMNY and SELECT. The eigenvectors */
+/* must be stored in consecutive columns of VL, as returned by */
+/* DHSEIN or DTREVC. */
+/* If JOB = 'V', VL is not referenced. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the array VL. */
+/* LDVL >= 1; and if JOB = 'E' or 'B', LDVL >= N. */
+
+/* VR (input) DOUBLE PRECISION array, dimension (LDVR,M) */
+/* If JOB = 'E' or 'B', VR must contain right eigenvectors of T */
+/* (or of any Q*T*Q**T with Q orthogonal), corresponding to the */
+/* eigenpairs specified by HOWMNY and SELECT. The eigenvectors */
+/* must be stored in consecutive columns of VR, as returned by */
+/* DHSEIN or DTREVC. */
+/* If JOB = 'V', VR is not referenced. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. */
+/* LDVR >= 1; and if JOB = 'E' or 'B', LDVR >= N. */
+
+/* S (output) DOUBLE PRECISION array, dimension (MM) */
+/* If JOB = 'E' or 'B', the reciprocal condition numbers of the */
+/* selected eigenvalues, stored in consecutive elements of the */
+/* array. For a complex conjugate pair of eigenvalues two */
+/* consecutive elements of S are set to the same value. Thus */
+/* S(j), SEP(j), and the j-th columns of VL and VR all */
+/* correspond to the same eigenpair (but not in general the */
+/* j-th eigenpair, unless all eigenpairs are selected). */
+/* If JOB = 'V', S is not referenced. */
+
+/* SEP (output) DOUBLE PRECISION array, dimension (MM) */
+/* If JOB = 'V' or 'B', the estimated reciprocal condition */
+/* numbers of the selected eigenvectors, stored in consecutive */
+/* elements of the array. For a complex eigenvector two */
+/* consecutive elements of SEP are set to the same value. If */
+/* the eigenvalues cannot be reordered to compute SEP(j), SEP(j) */
+/* is set to 0; this can only occur when the true value would be */
+/* very small anyway. */
+/* If JOB = 'E', SEP is not referenced. */
+
+/* MM (input) INTEGER */
+/* The number of elements in the arrays S (if JOB = 'E' or 'B') */
+/* and/or SEP (if JOB = 'V' or 'B'). MM >= M. */
+
+/* M (output) INTEGER */
+/* The number of elements of the arrays S and/or SEP actually */
+/* used to store the estimated condition numbers. */
+/* If HOWMNY = 'A', M is set to N. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (LDWORK,N+6) */
+/* If JOB = 'E', WORK is not referenced. */
+
+/* LDWORK (input) INTEGER */
+/* The leading dimension of the array WORK. */
+/* LDWORK >= 1; and if JOB = 'V' or 'B', LDWORK >= N. */
+
+/* IWORK (workspace) INTEGER array, dimension (2*(N-1)) */
+/* If JOB = 'E', IWORK is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The reciprocal of the condition number of an eigenvalue lambda is */
+/* defined as */
+
+/* S(lambda) = |v'*u| / (norm(u)*norm(v)) */
+
+/* where u and v are the right and left eigenvectors of T corresponding */
+/* to lambda; v' denotes the conjugate-transpose of v, and norm(u) */
+/* denotes the Euclidean norm. These reciprocal condition numbers always */
+/* lie between zero (very badly conditioned) and one (very well */
+/* conditioned). If n = 1, S(lambda) is defined to be 1. */
+
+/* An approximate error bound for a computed eigenvalue W(i) is given by */
+
+/* EPS * norm(T) / S(i) */
+
+/* where EPS is the machine precision. */
+
+/* The reciprocal of the condition number of the right eigenvector u */
+/* corresponding to lambda is defined as follows. Suppose */
+
+/* T = ( lambda c ) */
+/* ( 0 T22 ) */
+
+/* Then the reciprocal condition number is */
+
+/* SEP( lambda, T22 ) = sigma-min( T22 - lambda*I ) */
+
+/* where sigma-min denotes the smallest singular value. We approximate */
+/* the smallest singular value by the reciprocal of an estimate of the */
+/* one-norm of the inverse of T22 - lambda*I. If n = 1, SEP(1) is */
+/* defined to be abs(T(1,1)). */
+
+/* An approximate error bound for a computed right eigenvector VR(i) */
+/* is given by */
+
+/* EPS * norm(T) / SEP(i) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters */
+
+ /* Parameter adjustments */
+ --select;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --s;
+ --sep;
+ work_dim1 = *ldwork;
+ work_offset = 1 + work_dim1;
+ work -= work_offset;
+ --iwork;
+
+ /* Function Body */
+ wantbh = lsame_(job, "B");
+ wants = lsame_(job, "E") || wantbh;
+ wantsp = lsame_(job, "V") || wantbh;
+
+ somcon = lsame_(howmny, "S");
+
+ *info = 0;
+ if (! wants && ! wantsp) {
+ *info = -1;
+ } else if (! lsame_(howmny, "A") && ! somcon) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ldt < max(1,*n)) {
+ *info = -6;
+ } else if (*ldvl < 1 || wants && *ldvl < *n) {
+ *info = -8;
+ } else if (*ldvr < 1 || wants && *ldvr < *n) {
+ *info = -10;
+ } else {
+
+/* Set M to the number of eigenpairs for which condition numbers */
+/* are required, and test MM. */
+
+ if (somcon) {
+ *m = 0;
+ pair = FALSE_;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (pair) {
+ pair = FALSE_;
+ } else {
+ if (k < *n) {
+ if (t[k + 1 + k * t_dim1] == 0.) {
+ if (select[k]) {
+ ++(*m);
+ }
+ } else {
+ pair = TRUE_;
+ if (select[k] || select[k + 1]) {
+ *m += 2;
+ }
+ }
+ } else {
+ if (select[*n]) {
+ ++(*m);
+ }
+ }
+ }
+/* L10: */
+ }
+ } else {
+ *m = *n;
+ }
+
+ if (*mm < *m) {
+ *info = -13;
+ } else if (*ldwork < 1 || wantsp && *ldwork < *n) {
+ *info = -16;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTRSNA", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (somcon) {
+ if (! select[1]) {
+ return 0;
+ }
+ }
+ if (wants) {
+ s[1] = 1.;
+ }
+ if (wantsp) {
+ sep[1] = (d__1 = t[t_dim1 + 1], abs(d__1));
+ }
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("P");
+ smlnum = dlamch_("S") / eps;
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+
+ ks = 0;
+ pair = FALSE_;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+
+/* Determine whether T(k,k) begins a 1-by-1 or 2-by-2 block. */
+
+ if (pair) {
+ pair = FALSE_;
+ goto L60;
+ } else {
+ if (k < *n) {
+ pair = t[k + 1 + k * t_dim1] != 0.;
+ }
+ }
+
+/* Determine whether condition numbers are required for the k-th */
+/* eigenpair. */
+
+ if (somcon) {
+ if (pair) {
+ if (! select[k] && ! select[k + 1]) {
+ goto L60;
+ }
+ } else {
+ if (! select[k]) {
+ goto L60;
+ }
+ }
+ }
+
+ ++ks;
+
+ if (wants) {
+
+/* Compute the reciprocal condition number of the k-th */
+/* eigenvalue. */
+
+ if (! pair) {
+
+/* Real eigenvalue. */
+
+ prod = ddot_(n, &vr[ks * vr_dim1 + 1], &c__1, &vl[ks *
+ vl_dim1 + 1], &c__1);
+ rnrm = dnrm2_(n, &vr[ks * vr_dim1 + 1], &c__1);
+ lnrm = dnrm2_(n, &vl[ks * vl_dim1 + 1], &c__1);
+ s[ks] = abs(prod) / (rnrm * lnrm);
+ } else {
+
+/* Complex eigenvalue. */
+
+ prod1 = ddot_(n, &vr[ks * vr_dim1 + 1], &c__1, &vl[ks *
+ vl_dim1 + 1], &c__1);
+ prod1 += ddot_(n, &vr[(ks + 1) * vr_dim1 + 1], &c__1, &vl[(ks
+ + 1) * vl_dim1 + 1], &c__1);
+ prod2 = ddot_(n, &vl[ks * vl_dim1 + 1], &c__1, &vr[(ks + 1) *
+ vr_dim1 + 1], &c__1);
+ prod2 -= ddot_(n, &vl[(ks + 1) * vl_dim1 + 1], &c__1, &vr[ks *
+ vr_dim1 + 1], &c__1);
+ d__1 = dnrm2_(n, &vr[ks * vr_dim1 + 1], &c__1);
+ d__2 = dnrm2_(n, &vr[(ks + 1) * vr_dim1 + 1], &c__1);
+ rnrm = dlapy2_(&d__1, &d__2);
+ d__1 = dnrm2_(n, &vl[ks * vl_dim1 + 1], &c__1);
+ d__2 = dnrm2_(n, &vl[(ks + 1) * vl_dim1 + 1], &c__1);
+ lnrm = dlapy2_(&d__1, &d__2);
+ cond = dlapy2_(&prod1, &prod2) / (rnrm * lnrm);
+ s[ks] = cond;
+ s[ks + 1] = cond;
+ }
+ }
+
+ if (wantsp) {
+
+/* Estimate the reciprocal condition number of the k-th */
+/* eigenvector. */
+
+/* Copy the matrix T to the array WORK and swap the diagonal */
+/* block beginning at T(k,k) to the (1,1) position. */
+
+ dlacpy_("Full", n, n, &t[t_offset], ldt, &work[work_offset],
+ ldwork);
+ ifst = k;
+ ilst = 1;
+ dtrexc_("No Q", n, &work[work_offset], ldwork, dummy, &c__1, &
+ ifst, &ilst, &work[(*n + 1) * work_dim1 + 1], &ierr);
+
+ if (ierr == 1 || ierr == 2) {
+
+/* Could not swap because blocks not well separated */
+
+ scale = 1.;
+ est = bignum;
+ } else {
+
+/* Reordering successful */
+
+ if (work[work_dim1 + 2] == 0.) {
+
+/* Form C = T22 - lambda*I in WORK(2:N,2:N). */
+
+ i__2 = *n;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ work[i__ + i__ * work_dim1] -= work[work_dim1 + 1];
+/* L20: */
+ }
+ n2 = 1;
+ nn = *n - 1;
+ } else {
+
+/* Triangularize the 2 by 2 block by unitary */
+/* transformation U = [ cs i*ss ] */
+/* [ i*ss cs ]. */
+/* such that the (1,1) position of WORK is complex */
+/* eigenvalue lambda with positive imaginary part. (2,2) */
+/* position of WORK is the complex eigenvalue lambda */
+/* with negative imaginary part. */
+
+ mu = sqrt((d__1 = work[(work_dim1 << 1) + 1], abs(d__1)))
+ * sqrt((d__2 = work[work_dim1 + 2], abs(d__2)));
+ delta = dlapy2_(&mu, &work[work_dim1 + 2]);
+ cs = mu / delta;
+ sn = -work[work_dim1 + 2] / delta;
+
+/* Form */
+
+/* C' = WORK(2:N,2:N) + i*[rwork(1) ..... rwork(n-1) ] */
+/* [ mu ] */
+/* [ .. ] */
+/* [ .. ] */
+/* [ mu ] */
+/* where C' is conjugate transpose of complex matrix C, */
+/* and RWORK is stored starting in the N+1-st column of */
+/* WORK. */
+
+ i__2 = *n;
+ for (j = 3; j <= i__2; ++j) {
+ work[j * work_dim1 + 2] = cs * work[j * work_dim1 + 2]
+ ;
+ work[j + j * work_dim1] -= work[work_dim1 + 1];
+/* L30: */
+ }
+ work[(work_dim1 << 1) + 2] = 0.;
+
+ work[(*n + 1) * work_dim1 + 1] = mu * 2.;
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ work[i__ + (*n + 1) * work_dim1] = sn * work[(i__ + 1)
+ * work_dim1 + 1];
+/* L40: */
+ }
+ n2 = 2;
+ nn = *n - 1 << 1;
+ }
+
+/* Estimate norm(inv(C')) */
+
+ est = 0.;
+ kase = 0;
+L50:
+ dlacn2_(&nn, &work[(*n + 2) * work_dim1 + 1], &work[(*n + 4) *
+ work_dim1 + 1], &iwork[1], &est, &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+ if (n2 == 1) {
+
+/* Real eigenvalue: solve C'*x = scale*c. */
+
+ i__2 = *n - 1;
+ dlaqtr_(&c_true, &c_true, &i__2, &work[(work_dim1
+ << 1) + 2], ldwork, dummy, &dumm, &scale,
+ &work[(*n + 4) * work_dim1 + 1], &work[(*
+ n + 6) * work_dim1 + 1], &ierr);
+ } else {
+
+/* Complex eigenvalue: solve */
+/* C'*(p+iq) = scale*(c+id) in real arithmetic. */
+
+ i__2 = *n - 1;
+ dlaqtr_(&c_true, &c_false, &i__2, &work[(
+ work_dim1 << 1) + 2], ldwork, &work[(*n +
+ 1) * work_dim1 + 1], &mu, &scale, &work[(*
+ n + 4) * work_dim1 + 1], &work[(*n + 6) *
+ work_dim1 + 1], &ierr);
+ }
+ } else {
+ if (n2 == 1) {
+
+/* Real eigenvalue: solve C*x = scale*c. */
+
+ i__2 = *n - 1;
+ dlaqtr_(&c_false, &c_true, &i__2, &work[(
+ work_dim1 << 1) + 2], ldwork, dummy, &
+ dumm, &scale, &work[(*n + 4) * work_dim1
+ + 1], &work[(*n + 6) * work_dim1 + 1], &
+ ierr);
+ } else {
+
+/* Complex eigenvalue: solve */
+/* C*(p+iq) = scale*(c+id) in real arithmetic. */
+
+ i__2 = *n - 1;
+ dlaqtr_(&c_false, &c_false, &i__2, &work[(
+ work_dim1 << 1) + 2], ldwork, &work[(*n +
+ 1) * work_dim1 + 1], &mu, &scale, &work[(*
+ n + 4) * work_dim1 + 1], &work[(*n + 6) *
+ work_dim1 + 1], &ierr);
+
+ }
+ }
+
+ goto L50;
+ }
+ }
+
+ sep[ks] = scale / max(est,smlnum);
+ if (pair) {
+ sep[ks + 1] = sep[ks];
+ }
+ }
+
+ if (pair) {
+ ++ks;
+ }
+
+L60:
+ ;
+ }
+ return 0;
+
+/* End of DTRSNA */
+
+} /* dtrsna_ */
diff --git a/contrib/libs/clapack/dtrsyl.c b/contrib/libs/clapack/dtrsyl.c
new file mode 100644
index 0000000000..a23f564d2e
--- /dev/null
+++ b/contrib/libs/clapack/dtrsyl.c
@@ -0,0 +1,1319 @@
+/* dtrsyl.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static logical c_false = FALSE_;
+static integer c__2 = 2;
+static doublereal c_b26 = 1.;
+static doublereal c_b30 = 0.;
+static logical c_true = TRUE_;
+
+/* Subroutine */ int dtrsyl_(char *trana, char *tranb, integer *isgn, integer
+ *m, integer *n, doublereal *a, integer *lda, doublereal *b, integer *
+ ldb, doublereal *c__, integer *ldc, doublereal *scale, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ integer j, k, l;
+ doublereal x[4] /* was [2][2] */;
+ integer k1, k2, l1, l2;
+ doublereal a11, db, da11, vec[4] /* was [2][2] */, dum[1], eps, sgn;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ integer ierr;
+ doublereal smin, suml, sumr;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ integer knext, lnext;
+ doublereal xnorm;
+ extern /* Subroutine */ int dlaln2_(logical *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *
+, doublereal *, integer *, doublereal *, doublereal *, integer *),
+ dlasy2_(logical *, logical *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *), dlabad_(doublereal *, doublereal *);
+ extern doublereal dlamch_(char *), dlange_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *);
+ doublereal scaloc;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ logical notrna, notrnb;
+ doublereal smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTRSYL solves the real Sylvester matrix equation: */
+
+/* op(A)*X + X*op(B) = scale*C or */
+/* op(A)*X - X*op(B) = scale*C, */
+
+/* where op(A) = A or A**T, and A and B are both upper quasi- */
+/* triangular. A is M-by-M and B is N-by-N; the right hand side C and */
+/* the solution X are M-by-N; and scale is an output scale factor, set */
+/* <= 1 to avoid overflow in X. */
+
+/* A and B must be in Schur canonical form (as returned by DHSEQR), that */
+/* is, block upper triangular with 1-by-1 and 2-by-2 diagonal blocks; */
+/* each 2-by-2 diagonal block has its diagonal elements equal and its */
+/* off-diagonal elements of opposite sign. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANA (input) CHARACTER*1 */
+/* Specifies the option op(A): */
+/* = 'N': op(A) = A (No transpose) */
+/* = 'T': op(A) = A**T (Transpose) */
+/* = 'C': op(A) = A**H (Conjugate transpose = Transpose) */
+
+/* TRANB (input) CHARACTER*1 */
+/* Specifies the option op(B): */
+/* = 'N': op(B) = B (No transpose) */
+/* = 'T': op(B) = B**T (Transpose) */
+/* = 'C': op(B) = B**H (Conjugate transpose = Transpose) */
+
+/* ISGN (input) INTEGER */
+/* Specifies the sign in the equation: */
+/* = +1: solve op(A)*X + X*op(B) = scale*C */
+/* = -1: solve op(A)*X - X*op(B) = scale*C */
+
+/* M (input) INTEGER */
+/* The order of the matrix A, and the number of rows in the */
+/* matrices X and C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The order of the matrix B, and the number of columns in the */
+/* matrices X and C. N >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,M) */
+/* The upper quasi-triangular matrix A, in Schur canonical form. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB,N) */
+/* The upper quasi-triangular matrix B, in Schur canonical form. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* C (input/output) DOUBLE PRECISION array, dimension (LDC,N) */
+/* On entry, the M-by-N right hand side matrix C. */
+/* On exit, C is overwritten by the solution matrix X. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M) */
+
+/* SCALE (output) DOUBLE PRECISION */
+/* The scale factor, scale, set <= 1 to avoid overflow in X. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* = 1: A and B have common or very close eigenvalues; perturbed */
+/* values were used to solve the equation (but the matrices */
+/* A and B are unchanged). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and Test input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ notrna = lsame_(trana, "N");
+ notrnb = lsame_(tranb, "N");
+
+ *info = 0;
+ if (! notrna && ! lsame_(trana, "T") && ! lsame_(
+ trana, "C")) {
+ *info = -1;
+ } else if (! notrnb && ! lsame_(tranb, "T") && !
+ lsame_(tranb, "C")) {
+ *info = -2;
+ } else if (*isgn != 1 && *isgn != -1) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*m)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldc < max(1,*m)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTRSYL", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *scale = 1.;
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Set constants to control overflow */
+
+ eps = dlamch_("P");
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+ smlnum = smlnum * (doublereal) (*m * *n) / eps;
+ bignum = 1. / smlnum;
+
+/* Computing MAX */
+ d__1 = smlnum, d__2 = eps * dlange_("M", m, m, &a[a_offset], lda, dum), d__1 = max(d__1,d__2), d__2 = eps * dlange_("M", n, n,
+ &b[b_offset], ldb, dum);
+ smin = max(d__1,d__2);
+
+ sgn = (doublereal) (*isgn);
+
+ if (notrna && notrnb) {
+
+/* Solve A*X + ISGN*X*B = scale*C. */
+
+/* The (K,L)th block of X is determined starting from */
+/* bottom-left corner column by column by */
+
+/* A(K,K)*X(K,L) + ISGN*X(K,L)*B(L,L) = C(K,L) - R(K,L) */
+
+/* Where */
+/* M L-1 */
+/* R(K,L) = SUM [A(K,I)*X(I,L)] + ISGN*SUM [X(K,J)*B(J,L)]. */
+/* I=K+1 J=1 */
+
+/* Start column loop (index = L) */
+/* L1 (L2) : column index of the first (first) row of X(K,L). */
+
+ lnext = 1;
+ i__1 = *n;
+ for (l = 1; l <= i__1; ++l) {
+ if (l < lnext) {
+ goto L60;
+ }
+ if (l == *n) {
+ l1 = l;
+ l2 = l;
+ } else {
+ if (b[l + 1 + l * b_dim1] != 0.) {
+ l1 = l;
+ l2 = l + 1;
+ lnext = l + 2;
+ } else {
+ l1 = l;
+ l2 = l;
+ lnext = l + 1;
+ }
+ }
+
+/* Start row loop (index = K) */
+/* K1 (K2): row index of the first (last) row of X(K,L). */
+
+ knext = *m;
+ for (k = *m; k >= 1; --k) {
+ if (k > knext) {
+ goto L50;
+ }
+ if (k == 1) {
+ k1 = k;
+ k2 = k;
+ } else {
+ if (a[k + (k - 1) * a_dim1] != 0.) {
+ k1 = k - 1;
+ k2 = k;
+ knext = k - 2;
+ } else {
+ k1 = k;
+ k2 = k;
+ knext = k - 1;
+ }
+ }
+
+ if (l1 == l2 && k1 == k2) {
+ i__2 = *m - k1;
+/* Computing MIN */
+ i__3 = k1 + 1;
+/* Computing MIN */
+ i__4 = k1 + 1;
+ suml = ddot_(&i__2, &a[k1 + min(i__3, *m)* a_dim1], lda, &
+ c__[min(i__4, *m)+ l1 * c_dim1], &c__1);
+ i__2 = l1 - 1;
+ sumr = ddot_(&i__2, &c__[k1 + c_dim1], ldc, &b[l1 *
+ b_dim1 + 1], &c__1);
+ vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr);
+ scaloc = 1.;
+
+ a11 = a[k1 + k1 * a_dim1] + sgn * b[l1 + l1 * b_dim1];
+ da11 = abs(a11);
+ if (da11 <= smin) {
+ a11 = smin;
+ da11 = smin;
+ *info = 1;
+ }
+ db = abs(vec[0]);
+ if (da11 < 1. && db > 1.) {
+ if (db > bignum * da11) {
+ scaloc = 1. / db;
+ }
+ }
+ x[0] = vec[0] * scaloc / a11;
+
+ if (scaloc != 1.) {
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ dscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L10: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+
+ } else if (l1 == l2 && k1 != k2) {
+
+ i__2 = *m - k2;
+/* Computing MIN */
+ i__3 = k2 + 1;
+/* Computing MIN */
+ i__4 = k2 + 1;
+ suml = ddot_(&i__2, &a[k1 + min(i__3, *m)* a_dim1], lda, &
+ c__[min(i__4, *m)+ l1 * c_dim1], &c__1);
+ i__2 = l1 - 1;
+ sumr = ddot_(&i__2, &c__[k1 + c_dim1], ldc, &b[l1 *
+ b_dim1 + 1], &c__1);
+ vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ i__2 = *m - k2;
+/* Computing MIN */
+ i__3 = k2 + 1;
+/* Computing MIN */
+ i__4 = k2 + 1;
+ suml = ddot_(&i__2, &a[k2 + min(i__3, *m)* a_dim1], lda, &
+ c__[min(i__4, *m)+ l1 * c_dim1], &c__1);
+ i__2 = l1 - 1;
+ sumr = ddot_(&i__2, &c__[k2 + c_dim1], ldc, &b[l1 *
+ b_dim1 + 1], &c__1);
+ vec[1] = c__[k2 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ d__1 = -sgn * b[l1 + l1 * b_dim1];
+ dlaln2_(&c_false, &c__2, &c__1, &smin, &c_b26, &a[k1 + k1
+ * a_dim1], lda, &c_b26, &c_b26, vec, &c__2, &d__1,
+ &c_b30, x, &c__2, &scaloc, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 1;
+ }
+
+ if (scaloc != 1.) {
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ dscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L20: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+ c__[k2 + l1 * c_dim1] = x[1];
+
+ } else if (l1 != l2 && k1 == k2) {
+
+ i__2 = *m - k1;
+/* Computing MIN */
+ i__3 = k1 + 1;
+/* Computing MIN */
+ i__4 = k1 + 1;
+ suml = ddot_(&i__2, &a[k1 + min(i__3, *m)* a_dim1], lda, &
+ c__[min(i__4, *m)+ l1 * c_dim1], &c__1);
+ i__2 = l1 - 1;
+ sumr = ddot_(&i__2, &c__[k1 + c_dim1], ldc, &b[l1 *
+ b_dim1 + 1], &c__1);
+ vec[0] = sgn * (c__[k1 + l1 * c_dim1] - (suml + sgn *
+ sumr));
+
+ i__2 = *m - k1;
+/* Computing MIN */
+ i__3 = k1 + 1;
+/* Computing MIN */
+ i__4 = k1 + 1;
+ suml = ddot_(&i__2, &a[k1 + min(i__3, *m)* a_dim1], lda, &
+ c__[min(i__4, *m)+ l2 * c_dim1], &c__1);
+ i__2 = l1 - 1;
+ sumr = ddot_(&i__2, &c__[k1 + c_dim1], ldc, &b[l2 *
+ b_dim1 + 1], &c__1);
+ vec[1] = sgn * (c__[k1 + l2 * c_dim1] - (suml + sgn *
+ sumr));
+
+ d__1 = -sgn * a[k1 + k1 * a_dim1];
+ dlaln2_(&c_true, &c__2, &c__1, &smin, &c_b26, &b[l1 + l1 *
+ b_dim1], ldb, &c_b26, &c_b26, vec, &c__2, &d__1,
+ &c_b30, x, &c__2, &scaloc, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 1;
+ }
+
+ if (scaloc != 1.) {
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ dscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L30: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+ c__[k1 + l2 * c_dim1] = x[1];
+
+ } else if (l1 != l2 && k1 != k2) {
+
+ i__2 = *m - k2;
+/* Computing MIN */
+ i__3 = k2 + 1;
+/* Computing MIN */
+ i__4 = k2 + 1;
+ suml = ddot_(&i__2, &a[k1 + min(i__3, *m)* a_dim1], lda, &
+ c__[min(i__4, *m)+ l1 * c_dim1], &c__1);
+ i__2 = l1 - 1;
+ sumr = ddot_(&i__2, &c__[k1 + c_dim1], ldc, &b[l1 *
+ b_dim1 + 1], &c__1);
+ vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ i__2 = *m - k2;
+/* Computing MIN */
+ i__3 = k2 + 1;
+/* Computing MIN */
+ i__4 = k2 + 1;
+ suml = ddot_(&i__2, &a[k1 + min(i__3, *m)* a_dim1], lda, &
+ c__[min(i__4, *m)+ l2 * c_dim1], &c__1);
+ i__2 = l1 - 1;
+ sumr = ddot_(&i__2, &c__[k1 + c_dim1], ldc, &b[l2 *
+ b_dim1 + 1], &c__1);
+ vec[2] = c__[k1 + l2 * c_dim1] - (suml + sgn * sumr);
+
+ i__2 = *m - k2;
+/* Computing MIN */
+ i__3 = k2 + 1;
+/* Computing MIN */
+ i__4 = k2 + 1;
+ suml = ddot_(&i__2, &a[k2 + min(i__3, *m)* a_dim1], lda, &
+ c__[min(i__4, *m)+ l1 * c_dim1], &c__1);
+ i__2 = l1 - 1;
+ sumr = ddot_(&i__2, &c__[k2 + c_dim1], ldc, &b[l1 *
+ b_dim1 + 1], &c__1);
+ vec[1] = c__[k2 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ i__2 = *m - k2;
+/* Computing MIN */
+ i__3 = k2 + 1;
+/* Computing MIN */
+ i__4 = k2 + 1;
+ suml = ddot_(&i__2, &a[k2 + min(i__3, *m)* a_dim1], lda, &
+ c__[min(i__4, *m)+ l2 * c_dim1], &c__1);
+ i__2 = l1 - 1;
+ sumr = ddot_(&i__2, &c__[k2 + c_dim1], ldc, &b[l2 *
+ b_dim1 + 1], &c__1);
+ vec[3] = c__[k2 + l2 * c_dim1] - (suml + sgn * sumr);
+
+ dlasy2_(&c_false, &c_false, isgn, &c__2, &c__2, &a[k1 +
+ k1 * a_dim1], lda, &b[l1 + l1 * b_dim1], ldb, vec,
+ &c__2, &scaloc, x, &c__2, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 1;
+ }
+
+ if (scaloc != 1.) {
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ dscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L40: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+ c__[k1 + l2 * c_dim1] = x[2];
+ c__[k2 + l1 * c_dim1] = x[1];
+ c__[k2 + l2 * c_dim1] = x[3];
+ }
+
+L50:
+ ;
+ }
+
+L60:
+ ;
+ }
+
+ } else if (! notrna && notrnb) {
+
+/* Solve A' *X + ISGN*X*B = scale*C. */
+
+/* The (K,L)th block of X is determined starting from */
+/* upper-left corner column by column by */
+
+/* A(K,K)'*X(K,L) + ISGN*X(K,L)*B(L,L) = C(K,L) - R(K,L) */
+
+/* Where */
+/* K-1 L-1 */
+/* R(K,L) = SUM [A(I,K)'*X(I,L)] +ISGN*SUM [X(K,J)*B(J,L)] */
+/* I=1 J=1 */
+
+/* Start column loop (index = L) */
+/* L1 (L2): column index of the first (last) row of X(K,L) */
+
+ lnext = 1;
+ i__1 = *n;
+ for (l = 1; l <= i__1; ++l) {
+ if (l < lnext) {
+ goto L120;
+ }
+ if (l == *n) {
+ l1 = l;
+ l2 = l;
+ } else {
+ if (b[l + 1 + l * b_dim1] != 0.) {
+ l1 = l;
+ l2 = l + 1;
+ lnext = l + 2;
+ } else {
+ l1 = l;
+ l2 = l;
+ lnext = l + 1;
+ }
+ }
+
+/* Start row loop (index = K) */
+/* K1 (K2): row index of the first (last) row of X(K,L) */
+
+ knext = 1;
+ i__2 = *m;
+ for (k = 1; k <= i__2; ++k) {
+ if (k < knext) {
+ goto L110;
+ }
+ if (k == *m) {
+ k1 = k;
+ k2 = k;
+ } else {
+ if (a[k + 1 + k * a_dim1] != 0.) {
+ k1 = k;
+ k2 = k + 1;
+ knext = k + 2;
+ } else {
+ k1 = k;
+ k2 = k;
+ knext = k + 1;
+ }
+ }
+
+ if (l1 == l2 && k1 == k2) {
+ i__3 = k1 - 1;
+ suml = ddot_(&i__3, &a[k1 * a_dim1 + 1], &c__1, &c__[l1 *
+ c_dim1 + 1], &c__1);
+ i__3 = l1 - 1;
+ sumr = ddot_(&i__3, &c__[k1 + c_dim1], ldc, &b[l1 *
+ b_dim1 + 1], &c__1);
+ vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr);
+ scaloc = 1.;
+
+ a11 = a[k1 + k1 * a_dim1] + sgn * b[l1 + l1 * b_dim1];
+ da11 = abs(a11);
+ if (da11 <= smin) {
+ a11 = smin;
+ da11 = smin;
+ *info = 1;
+ }
+ db = abs(vec[0]);
+ if (da11 < 1. && db > 1.) {
+ if (db > bignum * da11) {
+ scaloc = 1. / db;
+ }
+ }
+ x[0] = vec[0] * scaloc / a11;
+
+ if (scaloc != 1.) {
+ i__3 = *n;
+ for (j = 1; j <= i__3; ++j) {
+ dscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L70: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+
+ } else if (l1 == l2 && k1 != k2) {
+
+ i__3 = k1 - 1;
+ suml = ddot_(&i__3, &a[k1 * a_dim1 + 1], &c__1, &c__[l1 *
+ c_dim1 + 1], &c__1);
+ i__3 = l1 - 1;
+ sumr = ddot_(&i__3, &c__[k1 + c_dim1], ldc, &b[l1 *
+ b_dim1 + 1], &c__1);
+ vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ i__3 = k1 - 1;
+ suml = ddot_(&i__3, &a[k2 * a_dim1 + 1], &c__1, &c__[l1 *
+ c_dim1 + 1], &c__1);
+ i__3 = l1 - 1;
+ sumr = ddot_(&i__3, &c__[k2 + c_dim1], ldc, &b[l1 *
+ b_dim1 + 1], &c__1);
+ vec[1] = c__[k2 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ d__1 = -sgn * b[l1 + l1 * b_dim1];
+ dlaln2_(&c_true, &c__2, &c__1, &smin, &c_b26, &a[k1 + k1 *
+ a_dim1], lda, &c_b26, &c_b26, vec, &c__2, &d__1,
+ &c_b30, x, &c__2, &scaloc, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 1;
+ }
+
+ if (scaloc != 1.) {
+ i__3 = *n;
+ for (j = 1; j <= i__3; ++j) {
+ dscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L80: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+ c__[k2 + l1 * c_dim1] = x[1];
+
+ } else if (l1 != l2 && k1 == k2) {
+
+ i__3 = k1 - 1;
+ suml = ddot_(&i__3, &a[k1 * a_dim1 + 1], &c__1, &c__[l1 *
+ c_dim1 + 1], &c__1);
+ i__3 = l1 - 1;
+ sumr = ddot_(&i__3, &c__[k1 + c_dim1], ldc, &b[l1 *
+ b_dim1 + 1], &c__1);
+ vec[0] = sgn * (c__[k1 + l1 * c_dim1] - (suml + sgn *
+ sumr));
+
+ i__3 = k1 - 1;
+ suml = ddot_(&i__3, &a[k1 * a_dim1 + 1], &c__1, &c__[l2 *
+ c_dim1 + 1], &c__1);
+ i__3 = l1 - 1;
+ sumr = ddot_(&i__3, &c__[k1 + c_dim1], ldc, &b[l2 *
+ b_dim1 + 1], &c__1);
+ vec[1] = sgn * (c__[k1 + l2 * c_dim1] - (suml + sgn *
+ sumr));
+
+ d__1 = -sgn * a[k1 + k1 * a_dim1];
+ dlaln2_(&c_true, &c__2, &c__1, &smin, &c_b26, &b[l1 + l1 *
+ b_dim1], ldb, &c_b26, &c_b26, vec, &c__2, &d__1,
+ &c_b30, x, &c__2, &scaloc, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 1;
+ }
+
+ if (scaloc != 1.) {
+ i__3 = *n;
+ for (j = 1; j <= i__3; ++j) {
+ dscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L90: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+ c__[k1 + l2 * c_dim1] = x[1];
+
+ } else if (l1 != l2 && k1 != k2) {
+
+ i__3 = k1 - 1;
+ suml = ddot_(&i__3, &a[k1 * a_dim1 + 1], &c__1, &c__[l1 *
+ c_dim1 + 1], &c__1);
+ i__3 = l1 - 1;
+ sumr = ddot_(&i__3, &c__[k1 + c_dim1], ldc, &b[l1 *
+ b_dim1 + 1], &c__1);
+ vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ i__3 = k1 - 1;
+ suml = ddot_(&i__3, &a[k1 * a_dim1 + 1], &c__1, &c__[l2 *
+ c_dim1 + 1], &c__1);
+ i__3 = l1 - 1;
+ sumr = ddot_(&i__3, &c__[k1 + c_dim1], ldc, &b[l2 *
+ b_dim1 + 1], &c__1);
+ vec[2] = c__[k1 + l2 * c_dim1] - (suml + sgn * sumr);
+
+ i__3 = k1 - 1;
+ suml = ddot_(&i__3, &a[k2 * a_dim1 + 1], &c__1, &c__[l1 *
+ c_dim1 + 1], &c__1);
+ i__3 = l1 - 1;
+ sumr = ddot_(&i__3, &c__[k2 + c_dim1], ldc, &b[l1 *
+ b_dim1 + 1], &c__1);
+ vec[1] = c__[k2 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ i__3 = k1 - 1;
+ suml = ddot_(&i__3, &a[k2 * a_dim1 + 1], &c__1, &c__[l2 *
+ c_dim1 + 1], &c__1);
+ i__3 = l1 - 1;
+ sumr = ddot_(&i__3, &c__[k2 + c_dim1], ldc, &b[l2 *
+ b_dim1 + 1], &c__1);
+ vec[3] = c__[k2 + l2 * c_dim1] - (suml + sgn * sumr);
+
+ dlasy2_(&c_true, &c_false, isgn, &c__2, &c__2, &a[k1 + k1
+ * a_dim1], lda, &b[l1 + l1 * b_dim1], ldb, vec, &
+ c__2, &scaloc, x, &c__2, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 1;
+ }
+
+ if (scaloc != 1.) {
+ i__3 = *n;
+ for (j = 1; j <= i__3; ++j) {
+ dscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L100: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+ c__[k1 + l2 * c_dim1] = x[2];
+ c__[k2 + l1 * c_dim1] = x[1];
+ c__[k2 + l2 * c_dim1] = x[3];
+ }
+
+L110:
+ ;
+ }
+L120:
+ ;
+ }
+
+ } else if (! notrna && ! notrnb) {
+
+/* Solve A'*X + ISGN*X*B' = scale*C. */
+
+/* The (K,L)th block of X is determined starting from */
+/* top-right corner column by column by */
+
+/* A(K,K)'*X(K,L) + ISGN*X(K,L)*B(L,L)' = C(K,L) - R(K,L) */
+
+/* Where */
+/* K-1 N */
+/* R(K,L) = SUM [A(I,K)'*X(I,L)] + ISGN*SUM [X(K,J)*B(L,J)']. */
+/* I=1 J=L+1 */
+
+/* Start column loop (index = L) */
+/* L1 (L2): column index of the first (last) row of X(K,L) */
+
+ lnext = *n;
+ for (l = *n; l >= 1; --l) {
+ if (l > lnext) {
+ goto L180;
+ }
+ if (l == 1) {
+ l1 = l;
+ l2 = l;
+ } else {
+ if (b[l + (l - 1) * b_dim1] != 0.) {
+ l1 = l - 1;
+ l2 = l;
+ lnext = l - 2;
+ } else {
+ l1 = l;
+ l2 = l;
+ lnext = l - 1;
+ }
+ }
+
+/* Start row loop (index = K) */
+/* K1 (K2): row index of the first (last) row of X(K,L) */
+
+ knext = 1;
+ i__1 = *m;
+ for (k = 1; k <= i__1; ++k) {
+ if (k < knext) {
+ goto L170;
+ }
+ if (k == *m) {
+ k1 = k;
+ k2 = k;
+ } else {
+ if (a[k + 1 + k * a_dim1] != 0.) {
+ k1 = k;
+ k2 = k + 1;
+ knext = k + 2;
+ } else {
+ k1 = k;
+ k2 = k;
+ knext = k + 1;
+ }
+ }
+
+ if (l1 == l2 && k1 == k2) {
+ i__2 = k1 - 1;
+ suml = ddot_(&i__2, &a[k1 * a_dim1 + 1], &c__1, &c__[l1 *
+ c_dim1 + 1], &c__1);
+ i__2 = *n - l1;
+/* Computing MIN */
+ i__3 = l1 + 1;
+/* Computing MIN */
+ i__4 = l1 + 1;
+ sumr = ddot_(&i__2, &c__[k1 + min(i__3, *n)* c_dim1], ldc,
+ &b[l1 + min(i__4, *n)* b_dim1], ldb);
+ vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr);
+ scaloc = 1.;
+
+ a11 = a[k1 + k1 * a_dim1] + sgn * b[l1 + l1 * b_dim1];
+ da11 = abs(a11);
+ if (da11 <= smin) {
+ a11 = smin;
+ da11 = smin;
+ *info = 1;
+ }
+ db = abs(vec[0]);
+ if (da11 < 1. && db > 1.) {
+ if (db > bignum * da11) {
+ scaloc = 1. / db;
+ }
+ }
+ x[0] = vec[0] * scaloc / a11;
+
+ if (scaloc != 1.) {
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ dscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L130: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+
+ } else if (l1 == l2 && k1 != k2) {
+
+ i__2 = k1 - 1;
+ suml = ddot_(&i__2, &a[k1 * a_dim1 + 1], &c__1, &c__[l1 *
+ c_dim1 + 1], &c__1);
+ i__2 = *n - l2;
+/* Computing MIN */
+ i__3 = l2 + 1;
+/* Computing MIN */
+ i__4 = l2 + 1;
+ sumr = ddot_(&i__2, &c__[k1 + min(i__3, *n)* c_dim1], ldc,
+ &b[l1 + min(i__4, *n)* b_dim1], ldb);
+ vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ i__2 = k1 - 1;
+ suml = ddot_(&i__2, &a[k2 * a_dim1 + 1], &c__1, &c__[l1 *
+ c_dim1 + 1], &c__1);
+ i__2 = *n - l2;
+/* Computing MIN */
+ i__3 = l2 + 1;
+/* Computing MIN */
+ i__4 = l2 + 1;
+ sumr = ddot_(&i__2, &c__[k2 + min(i__3, *n)* c_dim1], ldc,
+ &b[l1 + min(i__4, *n)* b_dim1], ldb);
+ vec[1] = c__[k2 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ d__1 = -sgn * b[l1 + l1 * b_dim1];
+ dlaln2_(&c_true, &c__2, &c__1, &smin, &c_b26, &a[k1 + k1 *
+ a_dim1], lda, &c_b26, &c_b26, vec, &c__2, &d__1,
+ &c_b30, x, &c__2, &scaloc, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 1;
+ }
+
+ if (scaloc != 1.) {
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ dscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L140: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+ c__[k2 + l1 * c_dim1] = x[1];
+
+ } else if (l1 != l2 && k1 == k2) {
+
+ i__2 = k1 - 1;
+ suml = ddot_(&i__2, &a[k1 * a_dim1 + 1], &c__1, &c__[l1 *
+ c_dim1 + 1], &c__1);
+ i__2 = *n - l2;
+/* Computing MIN */
+ i__3 = l2 + 1;
+/* Computing MIN */
+ i__4 = l2 + 1;
+ sumr = ddot_(&i__2, &c__[k1 + min(i__3, *n)* c_dim1], ldc,
+ &b[l1 + min(i__4, *n)* b_dim1], ldb);
+ vec[0] = sgn * (c__[k1 + l1 * c_dim1] - (suml + sgn *
+ sumr));
+
+ i__2 = k1 - 1;
+ suml = ddot_(&i__2, &a[k1 * a_dim1 + 1], &c__1, &c__[l2 *
+ c_dim1 + 1], &c__1);
+ i__2 = *n - l2;
+/* Computing MIN */
+ i__3 = l2 + 1;
+/* Computing MIN */
+ i__4 = l2 + 1;
+ sumr = ddot_(&i__2, &c__[k1 + min(i__3, *n)* c_dim1], ldc,
+ &b[l2 + min(i__4, *n)* b_dim1], ldb);
+ vec[1] = sgn * (c__[k1 + l2 * c_dim1] - (suml + sgn *
+ sumr));
+
+ d__1 = -sgn * a[k1 + k1 * a_dim1];
+ dlaln2_(&c_false, &c__2, &c__1, &smin, &c_b26, &b[l1 + l1
+ * b_dim1], ldb, &c_b26, &c_b26, vec, &c__2, &d__1,
+ &c_b30, x, &c__2, &scaloc, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 1;
+ }
+
+ if (scaloc != 1.) {
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ dscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L150: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+ c__[k1 + l2 * c_dim1] = x[1];
+
+ } else if (l1 != l2 && k1 != k2) {
+
+ i__2 = k1 - 1;
+ suml = ddot_(&i__2, &a[k1 * a_dim1 + 1], &c__1, &c__[l1 *
+ c_dim1 + 1], &c__1);
+ i__2 = *n - l2;
+/* Computing MIN */
+ i__3 = l2 + 1;
+/* Computing MIN */
+ i__4 = l2 + 1;
+ sumr = ddot_(&i__2, &c__[k1 + min(i__3, *n)* c_dim1], ldc,
+ &b[l1 + min(i__4, *n)* b_dim1], ldb);
+ vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ i__2 = k1 - 1;
+ suml = ddot_(&i__2, &a[k1 * a_dim1 + 1], &c__1, &c__[l2 *
+ c_dim1 + 1], &c__1);
+ i__2 = *n - l2;
+/* Computing MIN */
+ i__3 = l2 + 1;
+/* Computing MIN */
+ i__4 = l2 + 1;
+ sumr = ddot_(&i__2, &c__[k1 + min(i__3, *n)* c_dim1], ldc,
+ &b[l2 + min(i__4, *n)* b_dim1], ldb);
+ vec[2] = c__[k1 + l2 * c_dim1] - (suml + sgn * sumr);
+
+ i__2 = k1 - 1;
+ suml = ddot_(&i__2, &a[k2 * a_dim1 + 1], &c__1, &c__[l1 *
+ c_dim1 + 1], &c__1);
+ i__2 = *n - l2;
+/* Computing MIN */
+ i__3 = l2 + 1;
+/* Computing MIN */
+ i__4 = l2 + 1;
+ sumr = ddot_(&i__2, &c__[k2 + min(i__3, *n)* c_dim1], ldc,
+ &b[l1 + min(i__4, *n)* b_dim1], ldb);
+ vec[1] = c__[k2 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ i__2 = k1 - 1;
+ suml = ddot_(&i__2, &a[k2 * a_dim1 + 1], &c__1, &c__[l2 *
+ c_dim1 + 1], &c__1);
+ i__2 = *n - l2;
+/* Computing MIN */
+ i__3 = l2 + 1;
+/* Computing MIN */
+ i__4 = l2 + 1;
+ sumr = ddot_(&i__2, &c__[k2 + min(i__3, *n)* c_dim1], ldc,
+ &b[l2 + min(i__4, *n)* b_dim1], ldb);
+ vec[3] = c__[k2 + l2 * c_dim1] - (suml + sgn * sumr);
+
+ dlasy2_(&c_true, &c_true, isgn, &c__2, &c__2, &a[k1 + k1 *
+ a_dim1], lda, &b[l1 + l1 * b_dim1], ldb, vec, &
+ c__2, &scaloc, x, &c__2, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 1;
+ }
+
+ if (scaloc != 1.) {
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ dscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L160: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+ c__[k1 + l2 * c_dim1] = x[2];
+ c__[k2 + l1 * c_dim1] = x[1];
+ c__[k2 + l2 * c_dim1] = x[3];
+ }
+
+L170:
+ ;
+ }
+L180:
+ ;
+ }
+
+ } else if (notrna && ! notrnb) {
+
+/* Solve A*X + ISGN*X*B' = scale*C. */
+
+/* The (K,L)th block of X is determined starting from */
+/* bottom-right corner column by column by */
+
+/* A(K,K)*X(K,L) + ISGN*X(K,L)*B(L,L)' = C(K,L) - R(K,L) */
+
+/* Where */
+/* M N */
+/* R(K,L) = SUM [A(K,I)*X(I,L)] + ISGN*SUM [X(K,J)*B(L,J)']. */
+/* I=K+1 J=L+1 */
+
+/* Start column loop (index = L) */
+/* L1 (L2): column index of the first (last) row of X(K,L) */
+
+ lnext = *n;
+ for (l = *n; l >= 1; --l) {
+ if (l > lnext) {
+ goto L240;
+ }
+ if (l == 1) {
+ l1 = l;
+ l2 = l;
+ } else {
+ if (b[l + (l - 1) * b_dim1] != 0.) {
+ l1 = l - 1;
+ l2 = l;
+ lnext = l - 2;
+ } else {
+ l1 = l;
+ l2 = l;
+ lnext = l - 1;
+ }
+ }
+
+/* Start row loop (index = K) */
+/* K1 (K2): row index of the first (last) row of X(K,L) */
+
+ knext = *m;
+ for (k = *m; k >= 1; --k) {
+ if (k > knext) {
+ goto L230;
+ }
+ if (k == 1) {
+ k1 = k;
+ k2 = k;
+ } else {
+ if (a[k + (k - 1) * a_dim1] != 0.) {
+ k1 = k - 1;
+ k2 = k;
+ knext = k - 2;
+ } else {
+ k1 = k;
+ k2 = k;
+ knext = k - 1;
+ }
+ }
+
+ if (l1 == l2 && k1 == k2) {
+ i__1 = *m - k1;
+/* Computing MIN */
+ i__2 = k1 + 1;
+/* Computing MIN */
+ i__3 = k1 + 1;
+ suml = ddot_(&i__1, &a[k1 + min(i__2, *m)* a_dim1], lda, &
+ c__[min(i__3, *m)+ l1 * c_dim1], &c__1);
+ i__1 = *n - l1;
+/* Computing MIN */
+ i__2 = l1 + 1;
+/* Computing MIN */
+ i__3 = l1 + 1;
+ sumr = ddot_(&i__1, &c__[k1 + min(i__2, *n)* c_dim1], ldc,
+ &b[l1 + min(i__3, *n)* b_dim1], ldb);
+ vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr);
+ scaloc = 1.;
+
+ a11 = a[k1 + k1 * a_dim1] + sgn * b[l1 + l1 * b_dim1];
+ da11 = abs(a11);
+ if (da11 <= smin) {
+ a11 = smin;
+ da11 = smin;
+ *info = 1;
+ }
+ db = abs(vec[0]);
+ if (da11 < 1. && db > 1.) {
+ if (db > bignum * da11) {
+ scaloc = 1. / db;
+ }
+ }
+ x[0] = vec[0] * scaloc / a11;
+
+ if (scaloc != 1.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ dscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L190: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+
+ } else if (l1 == l2 && k1 != k2) {
+
+ i__1 = *m - k2;
+/* Computing MIN */
+ i__2 = k2 + 1;
+/* Computing MIN */
+ i__3 = k2 + 1;
+ suml = ddot_(&i__1, &a[k1 + min(i__2, *m)* a_dim1], lda, &
+ c__[min(i__3, *m)+ l1 * c_dim1], &c__1);
+ i__1 = *n - l2;
+/* Computing MIN */
+ i__2 = l2 + 1;
+/* Computing MIN */
+ i__3 = l2 + 1;
+ sumr = ddot_(&i__1, &c__[k1 + min(i__2, *n)* c_dim1], ldc,
+ &b[l1 + min(i__3, *n)* b_dim1], ldb);
+ vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ i__1 = *m - k2;
+/* Computing MIN */
+ i__2 = k2 + 1;
+/* Computing MIN */
+ i__3 = k2 + 1;
+ suml = ddot_(&i__1, &a[k2 + min(i__2, *m)* a_dim1], lda, &
+ c__[min(i__3, *m)+ l1 * c_dim1], &c__1);
+ i__1 = *n - l2;
+/* Computing MIN */
+ i__2 = l2 + 1;
+/* Computing MIN */
+ i__3 = l2 + 1;
+ sumr = ddot_(&i__1, &c__[k2 + min(i__2, *n)* c_dim1], ldc,
+ &b[l1 + min(i__3, *n)* b_dim1], ldb);
+ vec[1] = c__[k2 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ d__1 = -sgn * b[l1 + l1 * b_dim1];
+ dlaln2_(&c_false, &c__2, &c__1, &smin, &c_b26, &a[k1 + k1
+ * a_dim1], lda, &c_b26, &c_b26, vec, &c__2, &d__1,
+ &c_b30, x, &c__2, &scaloc, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 1;
+ }
+
+ if (scaloc != 1.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ dscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L200: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+ c__[k2 + l1 * c_dim1] = x[1];
+
+ } else if (l1 != l2 && k1 == k2) {
+
+ i__1 = *m - k1;
+/* Computing MIN */
+ i__2 = k1 + 1;
+/* Computing MIN */
+ i__3 = k1 + 1;
+ suml = ddot_(&i__1, &a[k1 + min(i__2, *m)* a_dim1], lda, &
+ c__[min(i__3, *m)+ l1 * c_dim1], &c__1);
+ i__1 = *n - l2;
+/* Computing MIN */
+ i__2 = l2 + 1;
+/* Computing MIN */
+ i__3 = l2 + 1;
+ sumr = ddot_(&i__1, &c__[k1 + min(i__2, *n)* c_dim1], ldc,
+ &b[l1 + min(i__3, *n)* b_dim1], ldb);
+ vec[0] = sgn * (c__[k1 + l1 * c_dim1] - (suml + sgn *
+ sumr));
+
+ i__1 = *m - k1;
+/* Computing MIN */
+ i__2 = k1 + 1;
+/* Computing MIN */
+ i__3 = k1 + 1;
+ suml = ddot_(&i__1, &a[k1 + min(i__2, *m)* a_dim1], lda, &
+ c__[min(i__3, *m)+ l2 * c_dim1], &c__1);
+ i__1 = *n - l2;
+/* Computing MIN */
+ i__2 = l2 + 1;
+/* Computing MIN */
+ i__3 = l2 + 1;
+ sumr = ddot_(&i__1, &c__[k1 + min(i__2, *n)* c_dim1], ldc,
+ &b[l2 + min(i__3, *n)* b_dim1], ldb);
+ vec[1] = sgn * (c__[k1 + l2 * c_dim1] - (suml + sgn *
+ sumr));
+
+ d__1 = -sgn * a[k1 + k1 * a_dim1];
+ dlaln2_(&c_false, &c__2, &c__1, &smin, &c_b26, &b[l1 + l1
+ * b_dim1], ldb, &c_b26, &c_b26, vec, &c__2, &d__1,
+ &c_b30, x, &c__2, &scaloc, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 1;
+ }
+
+ if (scaloc != 1.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ dscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L210: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+ c__[k1 + l2 * c_dim1] = x[1];
+
+ } else if (l1 != l2 && k1 != k2) {
+
+ i__1 = *m - k2;
+/* Computing MIN */
+ i__2 = k2 + 1;
+/* Computing MIN */
+ i__3 = k2 + 1;
+ suml = ddot_(&i__1, &a[k1 + min(i__2, *m)* a_dim1], lda, &
+ c__[min(i__3, *m)+ l1 * c_dim1], &c__1);
+ i__1 = *n - l2;
+/* Computing MIN */
+ i__2 = l2 + 1;
+/* Computing MIN */
+ i__3 = l2 + 1;
+ sumr = ddot_(&i__1, &c__[k1 + min(i__2, *n)* c_dim1], ldc,
+ &b[l1 + min(i__3, *n)* b_dim1], ldb);
+ vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ i__1 = *m - k2;
+/* Computing MIN */
+ i__2 = k2 + 1;
+/* Computing MIN */
+ i__3 = k2 + 1;
+ suml = ddot_(&i__1, &a[k1 + min(i__2, *m)* a_dim1], lda, &
+ c__[min(i__3, *m)+ l2 * c_dim1], &c__1);
+ i__1 = *n - l2;
+/* Computing MIN */
+ i__2 = l2 + 1;
+/* Computing MIN */
+ i__3 = l2 + 1;
+ sumr = ddot_(&i__1, &c__[k1 + min(i__2, *n)* c_dim1], ldc,
+ &b[l2 + min(i__3, *n)* b_dim1], ldb);
+ vec[2] = c__[k1 + l2 * c_dim1] - (suml + sgn * sumr);
+
+ i__1 = *m - k2;
+/* Computing MIN */
+ i__2 = k2 + 1;
+/* Computing MIN */
+ i__3 = k2 + 1;
+ suml = ddot_(&i__1, &a[k2 + min(i__2, *m)* a_dim1], lda, &
+ c__[min(i__3, *m)+ l1 * c_dim1], &c__1);
+ i__1 = *n - l2;
+/* Computing MIN */
+ i__2 = l2 + 1;
+/* Computing MIN */
+ i__3 = l2 + 1;
+ sumr = ddot_(&i__1, &c__[k2 + min(i__2, *n)* c_dim1], ldc,
+ &b[l1 + min(i__3, *n)* b_dim1], ldb);
+ vec[1] = c__[k2 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ i__1 = *m - k2;
+/* Computing MIN */
+ i__2 = k2 + 1;
+/* Computing MIN */
+ i__3 = k2 + 1;
+ suml = ddot_(&i__1, &a[k2 + min(i__2, *m)* a_dim1], lda, &
+ c__[min(i__3, *m)+ l2 * c_dim1], &c__1);
+ i__1 = *n - l2;
+/* Computing MIN */
+ i__2 = l2 + 1;
+/* Computing MIN */
+ i__3 = l2 + 1;
+ sumr = ddot_(&i__1, &c__[k2 + min(i__2, *n)* c_dim1], ldc,
+ &b[l2 + min(i__3, *n)* b_dim1], ldb);
+ vec[3] = c__[k2 + l2 * c_dim1] - (suml + sgn * sumr);
+
+ dlasy2_(&c_false, &c_true, isgn, &c__2, &c__2, &a[k1 + k1
+ * a_dim1], lda, &b[l1 + l1 * b_dim1], ldb, vec, &
+ c__2, &scaloc, x, &c__2, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 1;
+ }
+
+ if (scaloc != 1.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ dscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L220: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+ c__[k1 + l2 * c_dim1] = x[2];
+ c__[k2 + l1 * c_dim1] = x[1];
+ c__[k2 + l2 * c_dim1] = x[3];
+ }
+
+L230:
+ ;
+ }
+L240:
+ ;
+ }
+
+ }
+
+ return 0;
+
+/* End of DTRSYL */
+
+} /* dtrsyl_ */
diff --git a/contrib/libs/clapack/dtrti2.c b/contrib/libs/clapack/dtrti2.c
new file mode 100644
index 0000000000..2631b19d56
--- /dev/null
+++ b/contrib/libs/clapack/dtrti2.c
@@ -0,0 +1,183 @@
+/* dtrti2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int dtrti2_(char *uplo, char *diag, integer *n, doublereal *
+ a, integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer j;
+ doublereal ajj;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int dtrmv_(char *, char *, char *, integer *,
+ doublereal *, integer *, doublereal *, integer *), xerbla_(char *, integer *);
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTRTI2 computes the inverse of a real upper or lower triangular */
+/* matrix. */
+
+/* This is the Level 2 BLAS version of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower triangular. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A is unit triangular. */
+/* = 'N': Non-unit triangular */
+/* = 'U': Unit triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the triangular matrix A. If UPLO = 'U', the */
+/* leading n by n upper triangular part of the array A contains */
+/* the upper triangular matrix, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of the array A contains */
+/* the lower triangular matrix, and the strictly upper */
+/* triangular part of A is not referenced. If DIAG = 'U', the */
+/* diagonal elements of A are also not referenced and are */
+/* assumed to be 1. */
+
+/* On exit, the (triangular) inverse of the original matrix, in */
+/* the same storage format. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ nounit = lsame_(diag, "N");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTRTI2", &i__1);
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute inverse of upper triangular matrix. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (nounit) {
+ a[j + j * a_dim1] = 1. / a[j + j * a_dim1];
+ ajj = -a[j + j * a_dim1];
+ } else {
+ ajj = -1.;
+ }
+
+/* Compute elements 1:j-1 of j-th column. */
+
+ i__2 = j - 1;
+ dtrmv_("Upper", "No transpose", diag, &i__2, &a[a_offset], lda, &
+ a[j * a_dim1 + 1], &c__1);
+ i__2 = j - 1;
+ dscal_(&i__2, &ajj, &a[j * a_dim1 + 1], &c__1);
+/* L10: */
+ }
+ } else {
+
+/* Compute inverse of lower triangular matrix. */
+
+ for (j = *n; j >= 1; --j) {
+ if (nounit) {
+ a[j + j * a_dim1] = 1. / a[j + j * a_dim1];
+ ajj = -a[j + j * a_dim1];
+ } else {
+ ajj = -1.;
+ }
+ if (j < *n) {
+
+/* Compute elements j+1:n of j-th column. */
+
+ i__1 = *n - j;
+ dtrmv_("Lower", "No transpose", diag, &i__1, &a[j + 1 + (j +
+ 1) * a_dim1], lda, &a[j + 1 + j * a_dim1], &c__1);
+ i__1 = *n - j;
+ dscal_(&i__1, &ajj, &a[j + 1 + j * a_dim1], &c__1);
+ }
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of DTRTI2 */
+
+} /* dtrti2_ */
diff --git a/contrib/libs/clapack/dtrtri.c b/contrib/libs/clapack/dtrtri.c
new file mode 100644
index 0000000000..160eddf04a
--- /dev/null
+++ b/contrib/libs/clapack/dtrtri.c
@@ -0,0 +1,243 @@
+/* dtrtri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static doublereal c_b18 = 1.;
+static doublereal c_b22 = -1.;
+
+/* Subroutine */ int dtrtri_(char *uplo, char *diag, integer *n, doublereal *
+ a, integer *lda, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, i__1, i__2[2], i__3, i__4, i__5;
+ char ch__1[3];
+ ch__1[2] = 0;
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer j, jb, nb, nn;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dtrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), dtrsm_(
+ char *, char *, char *, char *, integer *, integer *, doublereal *
+, doublereal *, integer *, doublereal *, integer *);
+ logical upper;
+ extern /* Subroutine */ int dtrti2_(char *, char *, integer *, doublereal
+ *, integer *, integer *), xerbla_(char *, integer
+ *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTRTRI computes the inverse of a real upper or lower triangular */
+/* matrix A. */
+
+/* This is the Level 3 BLAS version of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the triangular matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of the array A contains */
+/* the upper triangular matrix, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of the array A contains */
+/* the lower triangular matrix, and the strictly upper */
+/* triangular part of A is not referenced. If DIAG = 'U', the */
+/* diagonal elements of A are also not referenced and are */
+/* assumed to be 1. */
+/* On exit, the (triangular) inverse of the original matrix, in */
+/* the same storage format. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, A(i,i) is exactly zero. The triangular */
+/* matrix is singular and its inverse can not be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ nounit = lsame_(diag, "N");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTRTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check for singularity if non-unit. */
+
+ if (nounit) {
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ if (a[*info + *info * a_dim1] == 0.) {
+ return 0;
+ }
+/* L10: */
+ }
+ *info = 0;
+ }
+
+/* Determine the block size for this environment. */
+
+/* Writing concatenation */
+ i__2[0] = 1, a__1[0] = uplo;
+ i__2[1] = 1, a__1[1] = diag;
+ s_cat(ch__1, a__1, i__2, &c__2, (ftnlen)2);
+ nb = ilaenv_(&c__1, "DTRTRI", ch__1, n, &c_n1, &c_n1, &c_n1);
+ if (nb <= 1 || nb >= *n) {
+
+/* Use unblocked code */
+
+ dtrti2_(uplo, diag, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code */
+
+ if (upper) {
+
+/* Compute inverse of upper triangular matrix */
+
+ i__1 = *n;
+ i__3 = nb;
+ for (j = 1; i__3 < 0 ? j >= i__1 : j <= i__1; j += i__3) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *n - j + 1;
+ jb = min(i__4,i__5);
+
+/* Compute rows 1:j-1 of current block column */
+
+ i__4 = j - 1;
+ dtrmm_("Left", "Upper", "No transpose", diag, &i__4, &jb, &
+ c_b18, &a[a_offset], lda, &a[j * a_dim1 + 1], lda);
+ i__4 = j - 1;
+ dtrsm_("Right", "Upper", "No transpose", diag, &i__4, &jb, &
+ c_b22, &a[j + j * a_dim1], lda, &a[j * a_dim1 + 1],
+ lda);
+
+/* Compute inverse of current diagonal block */
+
+ dtrti2_("Upper", diag, &jb, &a[j + j * a_dim1], lda, info);
+/* L20: */
+ }
+ } else {
+
+/* Compute inverse of lower triangular matrix */
+
+ nn = (*n - 1) / nb * nb + 1;
+ i__3 = -nb;
+ for (j = nn; i__3 < 0 ? j >= 1 : j <= 1; j += i__3) {
+/* Computing MIN */
+ i__1 = nb, i__4 = *n - j + 1;
+ jb = min(i__1,i__4);
+ if (j + jb <= *n) {
+
+/* Compute rows j+jb:n of current block column */
+
+ i__1 = *n - j - jb + 1;
+ dtrmm_("Left", "Lower", "No transpose", diag, &i__1, &jb,
+ &c_b18, &a[j + jb + (j + jb) * a_dim1], lda, &a[j
+ + jb + j * a_dim1], lda);
+ i__1 = *n - j - jb + 1;
+ dtrsm_("Right", "Lower", "No transpose", diag, &i__1, &jb,
+ &c_b22, &a[j + j * a_dim1], lda, &a[j + jb + j *
+ a_dim1], lda);
+ }
+
+/* Compute inverse of current diagonal block */
+
+ dtrti2_("Lower", diag, &jb, &a[j + j * a_dim1], lda, info);
+/* L30: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DTRTRI */
+
+} /* dtrtri_ */
diff --git a/contrib/libs/clapack/dtrtrs.c b/contrib/libs/clapack/dtrtrs.c
new file mode 100644
index 0000000000..da458c5459
--- /dev/null
+++ b/contrib/libs/clapack/dtrtrs.c
@@ -0,0 +1,183 @@
+/* dtrtrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b12 = 1.;
+
+/* Subroutine */ int dtrtrs_(char *uplo, char *trans, char *diag, integer *n,
+ integer *nrhs, doublereal *a, integer *lda, doublereal *b, integer *
+ ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), xerbla_(
+ char *, integer *);
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTRTRS solves a triangular system of the form */
+
+/* A * X = B or A**T * X = B, */
+
+/* where A is a triangular matrix of order N, and B is an N-by-NRHS */
+/* matrix. A check is made to verify that A is nonsingular. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose = Transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The triangular matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of the array A contains the upper */
+/* triangular matrix, and the strictly lower triangular part of */
+/* A is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of the array A contains the lower triangular */
+/* matrix, and the strictly upper triangular part of A is not */
+/* referenced. If DIAG = 'U', the diagonal elements of A are */
+/* also not referenced and are assumed to be 1. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, if INFO = 0, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element of A is zero, */
+/* indicating that the matrix is singular and the solutions */
+/* X have not been computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ nounit = lsame_(diag, "N");
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTRTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check for singularity. */
+
+ if (nounit) {
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ if (a[*info + *info * a_dim1] == 0.) {
+ return 0;
+ }
+/* L10: */
+ }
+ }
+ *info = 0;
+
+/* Solve A * x = b or A' * x = b. */
+
+ dtrsm_("Left", uplo, trans, diag, n, nrhs, &c_b12, &a[a_offset], lda, &b[
+ b_offset], ldb);
+
+ return 0;
+
+/* End of DTRTRS */
+
+} /* dtrtrs_ */
diff --git a/contrib/libs/clapack/dtrttf.c b/contrib/libs/clapack/dtrttf.c
new file mode 100644
index 0000000000..0e1746e716
--- /dev/null
+++ b/contrib/libs/clapack/dtrttf.c
@@ -0,0 +1,489 @@
+/* dtrttf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dtrttf_(char *transr, char *uplo, integer *n, doublereal
+ *a, integer *lda, doublereal *arf, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, k, l, n1, n2, ij, nt, nx2, np1x2;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nisodd;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTRTTF copies a triangular matrix A from standard full format (TR) */
+/* to rectangular full packed format (TF) . */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': ARF in Normal form is wanted; */
+/* = 'T': ARF in Transpose form is wanted. */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N). */
+/* On entry, the triangular matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of the array A contains */
+/* the upper triangular matrix, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of the array A contains */
+/* the lower triangular matrix, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the matrix A. LDA >= max(1,N). */
+
+/* ARF (output) DOUBLE PRECISION array, dimension (NT). */
+/* NT=N*(N+1)/2. On exit, the triangular matrix A in RFP format. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Notes */
+/* ===== */
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* even. We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* the transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* the transpose of the last three columns of AP lower. */
+/* This covers the case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 03 04 05 33 43 53 */
+/* 13 14 15 00 44 54 */
+/* 23 24 25 10 11 55 */
+/* 33 34 35 20 21 22 */
+/* 00 44 45 30 31 32 */
+/* 01 11 55 40 41 42 */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* odd. We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* the transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* the transpose of the last two columns of AP lower. */
+/* This covers the case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 02 03 04 00 33 43 */
+/* 12 13 14 10 11 44 */
+/* 22 23 24 20 21 22 */
+/* 00 33 34 30 31 32 */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+/* RFP A RFP A */
+
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* Reference */
+/* ========= */
+
+/* ===================================================================== */
+
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda - 1 - 0 + 1;
+ a_offset = 0 + a_dim1 * 0;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "T")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTRTTF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 1) {
+ if (*n == 1) {
+ arf[0] = a[0];
+ }
+ return 0;
+ }
+
+/* Size of array ARF(0:nt-1) */
+
+ nt = *n * (*n + 1) / 2;
+
+/* Set N1 and N2 depending on LOWER: for N even N1=N2=K */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+/* If N is odd, set NISODD = .TRUE., LDA=N+1 and A is (N+1)--by--K2. */
+/* If N is even, set K = N/2 and NISODD = .FALSE., LDA=N and A is */
+/* N--by--(N+1)/2. */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ if (! lower) {
+ np1x2 = *n + *n + 2;
+ }
+ } else {
+ nisodd = TRUE_;
+ if (! lower) {
+ nx2 = *n + *n;
+ }
+ }
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* N is odd, TRANSR = 'N', and UPLO = 'L' */
+
+ ij = 0;
+ i__1 = n2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = n2 + j;
+ for (i__ = n1; i__ <= i__2; ++i__) {
+ arf[ij] = a[n2 + j + i__ * a_dim1];
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ arf[ij] = a[i__ + j * a_dim1];
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* N is odd, TRANSR = 'N', and UPLO = 'U' */
+
+ ij = nt - *n;
+ i__1 = n1;
+ for (j = *n - 1; j >= i__1; --j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ arf[ij] = a[i__ + j * a_dim1];
+ ++ij;
+ }
+ i__2 = n1 - 1;
+ for (l = j - n1; l <= i__2; ++l) {
+ arf[ij] = a[j - n1 + l * a_dim1];
+ ++ij;
+ }
+ ij -= nx2;
+ }
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'T' */
+
+ if (lower) {
+
+/* N is odd, TRANSR = 'T', and UPLO = 'L' */
+
+ ij = 0;
+ i__1 = n2 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ arf[ij] = a[j + i__ * a_dim1];
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = n1 + j; i__ <= i__2; ++i__) {
+ arf[ij] = a[i__ + (n1 + j) * a_dim1];
+ ++ij;
+ }
+ }
+ i__1 = *n - 1;
+ for (j = n2; j <= i__1; ++j) {
+ i__2 = n1 - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ arf[ij] = a[j + i__ * a_dim1];
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* N is odd, TRANSR = 'T', and UPLO = 'U' */
+
+ ij = 0;
+ i__1 = n1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = n1; i__ <= i__2; ++i__) {
+ arf[ij] = a[j + i__ * a_dim1];
+ ++ij;
+ }
+ }
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ arf[ij] = a[i__ + j * a_dim1];
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (l = n2 + j; l <= i__2; ++l) {
+ arf[ij] = a[n2 + j + l * a_dim1];
+ ++ij;
+ }
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* N is even, TRANSR = 'N', and UPLO = 'L' */
+
+ ij = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k + j;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ arf[ij] = a[k + j + i__ * a_dim1];
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ arf[ij] = a[i__ + j * a_dim1];
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* N is even, TRANSR = 'N', and UPLO = 'U' */
+
+ ij = nt - *n - 1;
+ i__1 = k;
+ for (j = *n - 1; j >= i__1; --j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ arf[ij] = a[i__ + j * a_dim1];
+ ++ij;
+ }
+ i__2 = k - 1;
+ for (l = j - k; l <= i__2; ++l) {
+ arf[ij] = a[j - k + l * a_dim1];
+ ++ij;
+ }
+ ij -= np1x2;
+ }
+
+ }
+
+ } else {
+
+/* N is even and TRANSR = 'T' */
+
+ if (lower) {
+
+/* N is even, TRANSR = 'T', and UPLO = 'L' */
+
+ ij = 0;
+ j = k;
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ arf[ij] = a[i__ + j * a_dim1];
+ ++ij;
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ arf[ij] = a[j + i__ * a_dim1];
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = k + 1 + j; i__ <= i__2; ++i__) {
+ arf[ij] = a[i__ + (k + 1 + j) * a_dim1];
+ ++ij;
+ }
+ }
+ i__1 = *n - 1;
+ for (j = k - 1; j <= i__1; ++j) {
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ arf[ij] = a[j + i__ * a_dim1];
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* N is even, TRANSR = 'T', and UPLO = 'U' */
+
+ ij = 0;
+ i__1 = k;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ arf[ij] = a[j + i__ * a_dim1];
+ ++ij;
+ }
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ arf[ij] = a[i__ + j * a_dim1];
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (l = k + 1 + j; l <= i__2; ++l) {
+ arf[ij] = a[k + 1 + j + l * a_dim1];
+ ++ij;
+ }
+ }
+/* Note that here, on exit of the loop, J = K-1 */
+ i__1 = j;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ arf[ij] = a[i__ + j * a_dim1];
+ ++ij;
+ }
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of DTRTTF */
+
+} /* dtrttf_ */
diff --git a/contrib/libs/clapack/dtrttp.c b/contrib/libs/clapack/dtrttp.c
new file mode 100644
index 0000000000..1af49f1404
--- /dev/null
+++ b/contrib/libs/clapack/dtrttp.c
@@ -0,0 +1,144 @@
+/* dtrttp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int dtrttp_(char *uplo, integer *n, doublereal *a, integer *
+ lda, doublereal *ap, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, k;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- and Julien Langou of the Univ. of Colorado Denver -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTRTTP copies a triangular matrix A from full format (TR) to standard */
+/* packed format (TP). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular. */
+/* = 'L': A is lower triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrices AP and A. N >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On exit, the triangular matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AP (output) DOUBLE PRECISION array, dimension (N*(N+1)/2 */
+/* On exit, the upper or lower triangular matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ lower = lsame_(uplo, "L");
+ if (! lower && ! lsame_(uplo, "U")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTRTTP", &i__1);
+ return 0;
+ }
+
+ if (lower) {
+ k = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ++k;
+ ap[k] = a[i__ + j * a_dim1];
+ }
+ }
+ } else {
+ k = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ ++k;
+ ap[k] = a[i__ + j * a_dim1];
+ }
+ }
+ }
+
+
+ return 0;
+
+/* End of DTRTTP */
+
+} /* dtrttp_ */
diff --git a/contrib/libs/clapack/dtzrqf.c b/contrib/libs/clapack/dtzrqf.c
new file mode 100644
index 0000000000..08a3e7102f
--- /dev/null
+++ b/contrib/libs/clapack/dtzrqf.c
@@ -0,0 +1,221 @@
+/* dtzrqf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b8 = 1.;
+
+/* Subroutine */ int dtzrqf_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *tau, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__, k, m1;
+ extern /* Subroutine */ int dger_(integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *), dgemv_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *), dcopy_(integer *, doublereal *,
+ integer *, doublereal *, integer *), daxpy_(integer *, doublereal
+ *, doublereal *, integer *, doublereal *, integer *), dlarfp_(
+ integer *, doublereal *, doublereal *, integer *, doublereal *),
+ xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is deprecated and has been replaced by routine DTZRZF. */
+
+/* DTZRQF reduces the M-by-N ( M<=N ) real upper trapezoidal matrix A */
+/* to upper triangular form by means of orthogonal transformations. */
+
+/* The upper trapezoidal matrix A is factored as */
+
+/* A = ( R 0 ) * Z, */
+
+/* where Z is an N-by-N orthogonal matrix and R is an M-by-M upper */
+/* triangular matrix. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= M. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the leading M-by-N upper trapezoidal part of the */
+/* array A must contain the matrix to be factorized. */
+/* On exit, the leading M-by-M upper triangular part of A */
+/* contains the upper triangular matrix R, and elements M+1 to */
+/* N of the first M rows of A, with the array TAU, represent the */
+/* orthogonal matrix Z as a product of M elementary reflectors. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) DOUBLE PRECISION array, dimension (M) */
+/* The scalar factors of the elementary reflectors. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The factorization is obtained by Householder's method. The kth */
+/* transformation matrix, Z( k ), which is used to introduce zeros into */
+/* the ( m - k + 1 )th row of A, is given in the form */
+
+/* Z( k ) = ( I 0 ), */
+/* ( 0 T( k ) ) */
+
+/* where */
+
+/* T( k ) = I - tau*u( k )*u( k )', u( k ) = ( 1 ), */
+/* ( 0 ) */
+/* ( z( k ) ) */
+
+/* tau is a scalar and z( k ) is an ( n - m ) element vector. */
+/* tau and z( k ) are chosen to annihilate the elements of the kth row */
+/* of X. */
+
+/* The scalar tau is returned in the kth element of TAU and the vector */
+/* u( k ) in the kth row of A, such that the elements of z( k ) are */
+/* in a( k, m + 1 ), ..., a( k, n ). The elements of R are returned in */
+/* the upper triangular part of A. */
+
+/* Z is given by */
+
+/* Z = Z( 1 ) * Z( 2 ) * ... * Z( m ). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTZRQF", &i__1);
+ return 0;
+ }
+
+/* Perform the factorization. */
+
+ if (*m == 0) {
+ return 0;
+ }
+ if (*m == *n) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ tau[i__] = 0.;
+/* L10: */
+ }
+ } else {
+/* Computing MIN */
+ i__1 = *m + 1;
+ m1 = min(i__1,*n);
+ for (k = *m; k >= 1; --k) {
+
+/* Use a Householder reflection to zero the kth row of A. */
+/* First set up the reflection. */
+
+ i__1 = *n - *m + 1;
+ dlarfp_(&i__1, &a[k + k * a_dim1], &a[k + m1 * a_dim1], lda, &tau[
+ k]);
+
+ if (tau[k] != 0. && k > 1) {
+
+/* We now perform the operation A := A*P( k ). */
+
+/* Use the first ( k - 1 ) elements of TAU to store a( k ), */
+/* where a( k ) consists of the first ( k - 1 ) elements of */
+/* the kth column of A. Also let B denote the first */
+/* ( k - 1 ) rows of the last ( n - m ) columns of A. */
+
+ i__1 = k - 1;
+ dcopy_(&i__1, &a[k * a_dim1 + 1], &c__1, &tau[1], &c__1);
+
+/* Form w = a( k ) + B*z( k ) in TAU. */
+
+ i__1 = k - 1;
+ i__2 = *n - *m;
+ dgemv_("No transpose", &i__1, &i__2, &c_b8, &a[m1 * a_dim1 +
+ 1], lda, &a[k + m1 * a_dim1], lda, &c_b8, &tau[1], &
+ c__1);
+
+/* Now form a( k ) := a( k ) - tau*w */
+/* and B := B - tau*w*z( k )'. */
+
+ i__1 = k - 1;
+ d__1 = -tau[k];
+ daxpy_(&i__1, &d__1, &tau[1], &c__1, &a[k * a_dim1 + 1], &
+ c__1);
+ i__1 = k - 1;
+ i__2 = *n - *m;
+ d__1 = -tau[k];
+ dger_(&i__1, &i__2, &d__1, &tau[1], &c__1, &a[k + m1 * a_dim1]
+, lda, &a[m1 * a_dim1 + 1], lda);
+ }
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of DTZRQF */
+
+} /* dtzrqf_ */
diff --git a/contrib/libs/clapack/dtzrzf.c b/contrib/libs/clapack/dtzrzf.c
new file mode 100644
index 0000000000..ee5e09edd8
--- /dev/null
+++ b/contrib/libs/clapack/dtzrzf.c
@@ -0,0 +1,308 @@
+/* dtzrzf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int dtzrzf_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *tau, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+
+ /* Local variables */
+ integer i__, m1, ib, nb, ki, kk, mu, nx, iws, nbmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *), dlarzb_(
+ char *, char *, char *, char *, integer *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int dlarzt_(char *, char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *), dlatrz_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DTZRZF reduces the M-by-N ( M<=N ) real upper trapezoidal matrix A */
+/* to upper triangular form by means of orthogonal transformations. */
+
+/* The upper trapezoidal matrix A is factored as */
+
+/* A = ( R 0 ) * Z, */
+
+/* where Z is an N-by-N orthogonal matrix and R is an M-by-M upper */
+/* triangular matrix. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= M. */
+
+/* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On entry, the leading M-by-N upper trapezoidal part of the */
+/* array A must contain the matrix to be factorized. */
+/* On exit, the leading M-by-M upper triangular part of A */
+/* contains the upper triangular matrix R, and elements M+1 to */
+/* N of the first M rows of A, with the array TAU, represent the */
+/* orthogonal matrix Z as a product of M elementary reflectors. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) DOUBLE PRECISION array, dimension (M) */
+/* The scalar factors of the elementary reflectors. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,M). */
+/* For optimum performance LWORK >= M*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* The factorization is obtained by Householder's method. The kth */
+/* transformation matrix, Z( k ), which is used to introduce zeros into */
+/* the ( m - k + 1 )th row of A, is given in the form */
+
+/* Z( k ) = ( I 0 ), */
+/* ( 0 T( k ) ) */
+
+/* where */
+
+/* T( k ) = I - tau*u( k )*u( k )', u( k ) = ( 1 ), */
+/* ( 0 ) */
+/* ( z( k ) ) */
+
+/* tau is a scalar and z( k ) is an ( n - m ) element vector. */
+/* tau and z( k ) are chosen to annihilate the elements of the kth row */
+/* of X. */
+
+/* The scalar tau is returned in the kth element of TAU and the vector */
+/* u( k ) in the kth row of A, such that the elements of z( k ) are */
+/* in a( k, m + 1 ), ..., a( k, n ). The elements of R are returned in */
+/* the upper triangular part of A. */
+
+/* Z is given by */
+
+/* Z = Z( 1 ) * Z( 2 ) * ... * Z( m ). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+
+ if (*info == 0) {
+ if (*m == 0 || *m == *n) {
+ lwkopt = 1;
+ } else {
+
+/* Determine the block size. */
+
+ nb = ilaenv_(&c__1, "DGERQF", " ", m, n, &c_n1, &c_n1);
+ lwkopt = *m * nb;
+ }
+ work[1] = (doublereal) lwkopt;
+
+ if (*lwork < max(1,*m) && ! lquery) {
+ *info = -7;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTZRZF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0) {
+ return 0;
+ } else if (*m == *n) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ tau[i__] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 1;
+ iws = *m;
+ if (nb > 1 && nb < *m) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "DGERQF", " ", m, n, &c_n1, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < *m) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DGERQF", " ", m, n, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *m && nx < *m) {
+
+/* Use blocked code initially. */
+/* The last kk rows are handled by the block method. */
+
+/* Computing MIN */
+ i__1 = *m + 1;
+ m1 = min(i__1,*n);
+ ki = (*m - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = *m, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+ i__1 = *m - kk + 1;
+ i__2 = -nb;
+ for (i__ = *m - kk + ki + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1;
+ i__ += i__2) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ ib = min(i__3,nb);
+
+/* Compute the TZ factorization of the current block */
+/* A(i:i+ib-1,i:n) */
+
+ i__3 = *n - i__ + 1;
+ i__4 = *n - *m;
+ dlatrz_(&ib, &i__3, &i__4, &a[i__ + i__ * a_dim1], lda, &tau[i__],
+ &work[1]);
+ if (i__ > 1) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__3 = *n - *m;
+ dlarzt_("Backward", "Rowwise", &i__3, &ib, &a[i__ + m1 *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(1:i-1,i:n) from the right */
+
+ i__3 = i__ - 1;
+ i__4 = *n - i__ + 1;
+ i__5 = *n - *m;
+ dlarzb_("Right", "No transpose", "Backward", "Rowwise", &i__3,
+ &i__4, &ib, &i__5, &a[i__ + m1 * a_dim1], lda, &work[
+ 1], &ldwork, &a[i__ * a_dim1 + 1], lda, &work[ib + 1],
+ &ldwork)
+ ;
+ }
+/* L20: */
+ }
+ mu = i__ + nb - 1;
+ } else {
+ mu = *m;
+ }
+
+/* Use unblocked code to factor the last or only block */
+
+ if (mu > 0) {
+ i__2 = *n - *m;
+ dlatrz_(&mu, n, &i__2, &a[a_offset], lda, &tau[1], &work[1]);
+ }
+
+ work[1] = (doublereal) lwkopt;
+
+ return 0;
+
+/* End of DTZRZF */
+
+} /* dtzrzf_ */
diff --git a/contrib/libs/clapack/dzsum1.c b/contrib/libs/clapack/dzsum1.c
new file mode 100644
index 0000000000..9f455e9c69
--- /dev/null
+++ b/contrib/libs/clapack/dzsum1.c
@@ -0,0 +1,114 @@
+/* dzsum1.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+doublereal dzsum1_(integer *n, doublecomplex *cx, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal ret_val;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *);
+
+ /* Local variables */
+ integer i__, nincx;
+ doublereal stemp;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* DZSUM1 takes the sum of the absolute values of a complex */
+/* vector and returns a double precision result. */
+
+/* Based on DZASUM from the Level 1 BLAS. */
+/* The change is to use the 'genuine' absolute value. */
+
+/* Contributed by Nick Higham for use with ZLACON. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of elements in the vector CX. */
+
+/* CX (input) COMPLEX*16 array, dimension (N) */
+/* The vector whose elements will be summed. */
+
+/* INCX (input) INTEGER */
+/* The spacing between successive values of CX. INCX > 0. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --cx;
+
+ /* Function Body */
+ ret_val = 0.;
+ stemp = 0.;
+ if (*n <= 0) {
+ return ret_val;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* CODE FOR INCREMENT NOT EQUAL TO 1 */
+
+ nincx = *n * *incx;
+ i__1 = nincx;
+ i__2 = *incx;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+
+/* NEXT LINE MODIFIED. */
+
+ stemp += z_abs(&cx[i__]);
+/* L10: */
+ }
+ ret_val = stemp;
+ return ret_val;
+
+/* CODE FOR INCREMENT EQUAL TO 1 */
+
+L20:
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+
+/* NEXT LINE MODIFIED. */
+
+ stemp += z_abs(&cx[i__]);
+/* L30: */
+ }
+ ret_val = stemp;
+ return ret_val;
+
+/* End of DZSUM1 */
+
+} /* dzsum1_ */
diff --git a/contrib/libs/clapack/icmax1.c b/contrib/libs/clapack/icmax1.c
new file mode 100644
index 0000000000..98f589bb2f
--- /dev/null
+++ b/contrib/libs/clapack/icmax1.c
@@ -0,0 +1,127 @@
+/* icmax1.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+integer icmax1_(integer *n, complex *cx, integer *incx)
+{
+ /* System generated locals */
+ integer ret_val, i__1;
+
+ /* Builtin functions */
+ double c_abs(complex *);
+
+ /* Local variables */
+ integer i__, ix;
+ real smax;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ICMAX1 finds the index of the element whose real part has maximum */
+/* absolute value. */
+
+/* Based on ICAMAX from Level 1 BLAS. */
+/* The change is to use the 'genuine' absolute value. */
+
+/* Contributed by Nick Higham for use with CLACON. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of elements in the vector CX. */
+
+/* CX (input) COMPLEX array, dimension (N) */
+/* The vector whose elements will be summed. */
+
+/* INCX (input) INTEGER */
+/* The spacing between successive values of CX. INCX >= 1. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+
+/* NEXT LINE IS THE ONLY MODIFICATION. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --cx;
+
+ /* Function Body */
+ ret_val = 0;
+ if (*n < 1) {
+ return ret_val;
+ }
+ ret_val = 1;
+ if (*n == 1) {
+ return ret_val;
+ }
+ if (*incx == 1) {
+ goto L30;
+ }
+
+/* CODE FOR INCREMENT NOT EQUAL TO 1 */
+
+ ix = 1;
+ smax = c_abs(&cx[1]);
+ ix += *incx;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if (c_abs(&cx[ix]) <= smax) {
+ goto L10;
+ }
+ ret_val = i__;
+ smax = c_abs(&cx[ix]);
+L10:
+ ix += *incx;
+/* L20: */
+ }
+ return ret_val;
+
+/* CODE FOR INCREMENT EQUAL TO 1 */
+
+L30:
+ smax = c_abs(&cx[1]);
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if (c_abs(&cx[i__]) <= smax) {
+ goto L40;
+ }
+ ret_val = i__;
+ smax = c_abs(&cx[i__]);
+L40:
+ ;
+ }
+ return ret_val;
+
+/* End of ICMAX1 */
+
+} /* icmax1_ */
diff --git a/contrib/libs/clapack/ieeeck.c b/contrib/libs/clapack/ieeeck.c
new file mode 100644
index 0000000000..3d6f0b5834
--- /dev/null
+++ b/contrib/libs/clapack/ieeeck.c
@@ -0,0 +1,166 @@
+/* ieeeck.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+integer ieeeck_(integer *ispec, real *zero, real *one)
+{
+ /* System generated locals */
+ integer ret_val;
+
+ /* Local variables */
+ real nan1, nan2, nan3, nan4, nan5, nan6, neginf, posinf, negzro, newzro;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* IEEECK is called from the ILAENV to verify that Infinity and */
+/* possibly NaN arithmetic is safe (i.e. will not trap). */
+
+/* Arguments */
+/* ========= */
+
+/* ISPEC (input) INTEGER */
+/* Specifies whether to test just for inifinity arithmetic */
+/* or whether to test for infinity and NaN arithmetic. */
+/* = 0: Verify infinity arithmetic only. */
+/* = 1: Verify infinity and NaN arithmetic. */
+
+/* ZERO (input) REAL */
+/* Must contain the value 0.0 */
+/* This is passed to prevent the compiler from optimizing */
+/* away this code. */
+
+/* ONE (input) REAL */
+/* Must contain the value 1.0 */
+/* This is passed to prevent the compiler from optimizing */
+/* away this code. */
+
+/* RETURN VALUE: INTEGER */
+/* = 0: Arithmetic failed to produce the correct answers */
+/* = 1: Arithmetic produced the correct answers */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+ ret_val = 1;
+
+ posinf = *one / *zero;
+ if (posinf <= *one) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ neginf = -(*one) / *zero;
+ if (neginf >= *zero) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ negzro = *one / (neginf + *one);
+ if (negzro != *zero) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ neginf = *one / negzro;
+ if (neginf >= *zero) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ newzro = negzro + *zero;
+ if (newzro != *zero) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ posinf = *one / newzro;
+ if (posinf <= *one) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ neginf *= posinf;
+ if (neginf >= *zero) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ posinf *= posinf;
+ if (posinf <= *one) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+
+
+
+/* Return if we were only asked to check infinity arithmetic */
+
+ if (*ispec == 0) {
+ return ret_val;
+ }
+
+ nan1 = posinf + neginf;
+
+ nan2 = posinf / neginf;
+
+ nan3 = posinf / posinf;
+
+ nan4 = posinf * *zero;
+
+ nan5 = neginf * negzro;
+
+ nan6 = nan5 * 0.f;
+
+ if (nan1 == nan1) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ if (nan2 == nan2) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ if (nan3 == nan3) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ if (nan4 == nan4) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ if (nan5 == nan5) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ if (nan6 == nan6) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ return ret_val;
+} /* ieeeck_ */
diff --git a/contrib/libs/clapack/ilaclc.c b/contrib/libs/clapack/ilaclc.c
new file mode 100644
index 0000000000..98a051917c
--- /dev/null
+++ b/contrib/libs/clapack/ilaclc.c
@@ -0,0 +1,94 @@
+/* ilaclc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+integer ilaclc_(integer *m, integer *n, complex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, ret_val, i__1, i__2;
+
+ /* Local variables */
+ integer i__;
+
+
+/* -- LAPACK auxiliary routine (version 3.2.1) -- */
+
+/* -- April 2009 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ILACLC scans A for its last non-zero column. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The m by n matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick test for the common case where one corner is non-zero. */
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ if (*n == 0) {
+ ret_val = *n;
+ } else /* if(complicated condition) */ {
+ i__1 = *n * a_dim1 + 1;
+ i__2 = *m + *n * a_dim1;
+ if (a[i__1].r != 0.f || a[i__1].i != 0.f || (a[i__2].r != 0.f || a[
+ i__2].i != 0.f)) {
+ ret_val = *n;
+ } else {
+/* Now scan each column from the end, returning with the first non-zero. */
+ for (ret_val = *n; ret_val >= 1; --ret_val) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + ret_val * a_dim1;
+ if (a[i__2].r != 0.f || a[i__2].i != 0.f) {
+ return ret_val;
+ }
+ }
+ }
+ }
+ }
+ return ret_val;
+} /* ilaclc_ */
diff --git a/contrib/libs/clapack/ilaclr.c b/contrib/libs/clapack/ilaclr.c
new file mode 100644
index 0000000000..b28e22e90c
--- /dev/null
+++ b/contrib/libs/clapack/ilaclr.c
@@ -0,0 +1,96 @@
+/* ilaclr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+integer ilaclr_(integer *m, integer *n, complex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, ret_val, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j;
+
+
+/* -- LAPACK auxiliary routine (version 3.2.1) -- */
+
+/* -- April 2009 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ILACLR scans A for its last non-zero row. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. */
+
+/* A (input) COMPLEX array, dimension (LDA,N) */
+/* The m by n matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick test for the common case where one corner is non-zero. */
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ if (*m == 0) {
+ ret_val = *m;
+ } else /* if(complicated condition) */ {
+ i__1 = *m + a_dim1;
+ i__2 = *m + *n * a_dim1;
+ if (a[i__1].r != 0.f || a[i__1].i != 0.f || (a[i__2].r != 0.f || a[
+ i__2].i != 0.f)) {
+ ret_val = *m;
+ } else {
+/* Scan up each column tracking the last zero row seen. */
+ ret_val = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ i__2 = i__ + j * a_dim1;
+ if (a[i__2].r != 0.f || a[i__2].i != 0.f) {
+ break;
+ }
+ }
+ ret_val = max(ret_val,i__);
+ }
+ }
+ }
+ return ret_val;
+} /* ilaclr_ */
diff --git a/contrib/libs/clapack/iladiag.c b/contrib/libs/clapack/iladiag.c
new file mode 100644
index 0000000000..07cfe63040
--- /dev/null
+++ b/contrib/libs/clapack/iladiag.c
@@ -0,0 +1,65 @@
+/* iladiag.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+integer iladiag_(char *diag)
+{
+ /* System generated locals */
+ integer ret_val;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* October 2008 */
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This subroutine translated from a character string specifying if a */
+/* matrix has unit diagonal or not to the relevant BLAST-specified */
+/* integer constant. */
+
+/* ILADIAG returns an INTEGER. If ILADIAG < 0, then the input is not a */
+/* character indicating a unit or non-unit diagonal. Otherwise ILADIAG */
+/* returns the constant value corresponding to DIAG. */
+
+/* Arguments */
+/* ========= */
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+ if (lsame_(diag, "N")) {
+ ret_val = 131;
+ } else if (lsame_(diag, "U")) {
+ ret_val = 132;
+ } else {
+ ret_val = -1;
+ }
+ return ret_val;
+
+/* End of ILADIAG */
+
+} /* iladiag_ */
diff --git a/contrib/libs/clapack/iladlc.c b/contrib/libs/clapack/iladlc.c
new file mode 100644
index 0000000000..a18f02ed7b
--- /dev/null
+++ b/contrib/libs/clapack/iladlc.c
@@ -0,0 +1,88 @@
+/* iladlc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+integer iladlc_(integer *m, integer *n, doublereal *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, ret_val, i__1;
+
+ /* Local variables */
+ integer i__;
+
+
+/* -- LAPACK auxiliary routine (version 3.2.1) -- */
+
+/* -- April 2009 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ILADLC scans A for its last non-zero column. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The m by n matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick test for the common case where one corner is non-zero. */
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ if (*n == 0) {
+ ret_val = *n;
+ } else if (a[*n * a_dim1 + 1] != 0. || a[*m + *n * a_dim1] != 0.) {
+ ret_val = *n;
+ } else {
+/* Now scan each column from the end, returning with the first non-zero. */
+ for (ret_val = *n; ret_val >= 1; --ret_val) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (a[i__ + ret_val * a_dim1] != 0.) {
+ return ret_val;
+ }
+ }
+ }
+ }
+ return ret_val;
+} /* iladlc_ */
diff --git a/contrib/libs/clapack/iladlr.c b/contrib/libs/clapack/iladlr.c
new file mode 100644
index 0000000000..f1626e4790
--- /dev/null
+++ b/contrib/libs/clapack/iladlr.c
@@ -0,0 +1,90 @@
+/* iladlr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+integer iladlr_(integer *m, integer *n, doublereal *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, ret_val, i__1;
+
+ /* Local variables */
+ integer i__, j;
+
+
+/* -- LAPACK auxiliary routine (version 3.2.1) -- */
+
+/* -- April 2009 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ILADLR scans A for its last non-zero row. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The m by n matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick test for the common case where one corner is non-zero. */
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ if (*m == 0) {
+ ret_val = *m;
+ } else if (a[*m + a_dim1] != 0. || a[*m + *n * a_dim1] != 0.) {
+ ret_val = *m;
+ } else {
+/* Scan up each column tracking the last zero row seen. */
+ ret_val = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ if (a[i__ + j * a_dim1] != 0.) {
+ break;
+ }
+ }
+ ret_val = max(ret_val,i__);
+ }
+ }
+ return ret_val;
+} /* iladlr_ */
diff --git a/contrib/libs/clapack/ilaenv.c b/contrib/libs/clapack/ilaenv.c
new file mode 100644
index 0000000000..fb072301c3
--- /dev/null
+++ b/contrib/libs/clapack/ilaenv.c
@@ -0,0 +1,654 @@
+/* ilaenv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+#include "string.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b163 = 0.f;
+static real c_b164 = 1.f;
+static integer c__0 = 0;
+
+integer ilaenv_(integer *ispec, char *name__, char *opts, integer *n1,
+ integer *n2, integer *n3, integer *n4)
+{
+ /* System generated locals */
+ integer ret_val;
+
+ /* Builtin functions */
+ /* Subroutine */ int s_copy(char *, char *, ftnlen, ftnlen);
+ integer s_cmp(char *, char *, ftnlen, ftnlen);
+
+ /* Local variables */
+ integer i__;
+ char c1[1], c2[1], c3[1], c4[1];
+ integer ic, nb, iz, nx;
+ logical cname;
+ integer nbmin;
+ logical sname;
+ extern integer ieeeck_(integer *, real *, real *);
+ char subnam[1];
+ extern integer iparmq_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+
+ ftnlen name_len, opts_len;
+
+ name_len = strlen (name__);
+ opts_len = strlen (opts);
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* January 2007 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ILAENV is called from the LAPACK routines to choose problem-dependent */
+/* parameters for the local environment. See ISPEC for a description of */
+/* the parameters. */
+
+/* ILAENV returns an INTEGER */
+/* if ILAENV >= 0: ILAENV returns the value of the parameter specified by ISPEC */
+/* if ILAENV < 0: if ILAENV = -k, the k-th argument had an illegal value. */
+
+/* This version provides a set of parameters which should give good, */
+/* but not optimal, performance on many of the currently available */
+/* computers. Users are encouraged to modify this subroutine to set */
+/* the tuning parameters for their particular machine using the option */
+/* and problem size information in the arguments. */
+
+/* This routine will not function correctly if it is converted to all */
+/* lower case. Converting it to all upper case is allowed. */
+
+/* Arguments */
+/* ========= */
+
+/* ISPEC (input) INTEGER */
+/* Specifies the parameter to be returned as the value of */
+/* ILAENV. */
+/* = 1: the optimal blocksize; if this value is 1, an unblocked */
+/* algorithm will give the best performance. */
+/* = 2: the minimum block size for which the block routine */
+/* should be used; if the usable block size is less than */
+/* this value, an unblocked routine should be used. */
+/* = 3: the crossover point (in a block routine, for N less */
+/* than this value, an unblocked routine should be used) */
+/* = 4: the number of shifts, used in the nonsymmetric */
+/* eigenvalue routines (DEPRECATED) */
+/* = 5: the minimum column dimension for blocking to be used; */
+/* rectangular blocks must have dimension at least k by m, */
+/* where k is given by ILAENV(2,...) and m by ILAENV(5,...) */
+/* = 6: the crossover point for the SVD (when reducing an m by n */
+/* matrix to bidiagonal form, if max(m,n)/min(m,n) exceeds */
+/* this value, a QR factorization is used first to reduce */
+/* the matrix to a triangular form.) */
+/* = 7: the number of processors */
+/* = 8: the crossover point for the multishift QR method */
+/* for nonsymmetric eigenvalue problems (DEPRECATED) */
+/* = 9: maximum size of the subproblems at the bottom of the */
+/* computation tree in the divide-and-conquer algorithm */
+/* (used by xGELSD and xGESDD) */
+/* =10: ieee NaN arithmetic can be trusted not to trap */
+/* =11: infinity arithmetic can be trusted not to trap */
+/* 12 <= ISPEC <= 16: */
+/* xHSEQR or one of its subroutines, */
+/* see IPARMQ for detailed explanation */
+
+/* NAME (input) CHARACTER*(*) */
+/* The name of the calling subroutine, in either upper case or */
+/* lower case. */
+
+/* OPTS (input) CHARACTER*(*) */
+/* The character options to the subroutine NAME, concatenated */
+/* into a single character string. For example, UPLO = 'U', */
+/* TRANS = 'T', and DIAG = 'N' for a triangular routine would */
+/* be specified as OPTS = 'UTN'. */
+
+/* N1 (input) INTEGER */
+/* N2 (input) INTEGER */
+/* N3 (input) INTEGER */
+/* N4 (input) INTEGER */
+/* Problem dimensions for the subroutine NAME; these may not all */
+/* be required. */
+
+/* Further Details */
+/* =============== */
+
+/* The following conventions have been used when calling ILAENV from the */
+/* LAPACK routines: */
+/* 1) OPTS is a concatenation of all of the character options to */
+/* subroutine NAME, in the same order that they appear in the */
+/* argument list for NAME, even if they are not used in determining */
+/* the value of the parameter specified by ISPEC. */
+/* 2) The problem dimensions N1, N2, N3, N4 are specified in the order */
+/* that they appear in the argument list for NAME. N1 is used */
+/* first, N2 second, and so on, and unused problem dimensions are */
+/* passed a value of -1. */
+/* 3) The parameter value returned by ILAENV is checked for validity in */
+/* the calling subroutine. For example, ILAENV is used to retrieve */
+/* the optimal blocksize for STRTRI as follows: */
+
+/* NB = ILAENV( 1, 'STRTRI', UPLO // DIAG, N, -1, -1, -1 ) */
+/* IF( NB.LE.1 ) NB = MAX( 1, N ) */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ switch (*ispec) {
+ case 1: goto L10;
+ case 2: goto L10;
+ case 3: goto L10;
+ case 4: goto L80;
+ case 5: goto L90;
+ case 6: goto L100;
+ case 7: goto L110;
+ case 8: goto L120;
+ case 9: goto L130;
+ case 10: goto L140;
+ case 11: goto L150;
+ case 12: goto L160;
+ case 13: goto L160;
+ case 14: goto L160;
+ case 15: goto L160;
+ case 16: goto L160;
+ }
+
+/* Invalid value for ISPEC */
+
+ ret_val = -1;
+ return ret_val;
+
+L10:
+
+/* Convert NAME to upper case if the first character is lower case. */
+
+ ret_val = 1;
+ s_copy(subnam, name__, (ftnlen)1, name_len);
+ ic = *(unsigned char *)subnam;
+ iz = 'Z';
+ if (iz == 90 || iz == 122) {
+
+/* ASCII character set */
+
+ if (ic >= 97 && ic <= 122) {
+ *(unsigned char *)subnam = (char) (ic - 32);
+ for (i__ = 2; i__ <= 6; ++i__) {
+ ic = *(unsigned char *)&subnam[i__ - 1];
+ if (ic >= 97 && ic <= 122) {
+ *(unsigned char *)&subnam[i__ - 1] = (char) (ic - 32);
+ }
+/* L20: */
+ }
+ }
+
+ } else if (iz == 233 || iz == 169) {
+
+/* EBCDIC character set */
+
+ if (ic >= 129 && ic <= 137 || ic >= 145 && ic <= 153 || ic >= 162 &&
+ ic <= 169) {
+ *(unsigned char *)subnam = (char) (ic + 64);
+ for (i__ = 2; i__ <= 6; ++i__) {
+ ic = *(unsigned char *)&subnam[i__ - 1];
+ if (ic >= 129 && ic <= 137 || ic >= 145 && ic <= 153 || ic >=
+ 162 && ic <= 169) {
+ *(unsigned char *)&subnam[i__ - 1] = (char) (ic + 64);
+ }
+/* L30: */
+ }
+ }
+
+ } else if (iz == 218 || iz == 250) {
+
+/* Prime machines: ASCII+128 */
+
+ if (ic >= 225 && ic <= 250) {
+ *(unsigned char *)subnam = (char) (ic - 32);
+ for (i__ = 2; i__ <= 6; ++i__) {
+ ic = *(unsigned char *)&subnam[i__ - 1];
+ if (ic >= 225 && ic <= 250) {
+ *(unsigned char *)&subnam[i__ - 1] = (char) (ic - 32);
+ }
+/* L40: */
+ }
+ }
+ }
+
+ *(unsigned char *)c1 = *(unsigned char *)subnam;
+ sname = *(unsigned char *)c1 == 'S' || *(unsigned char *)c1 == 'D';
+ cname = *(unsigned char *)c1 == 'C' || *(unsigned char *)c1 == 'Z';
+ if (! (cname || sname)) {
+ return ret_val;
+ }
+ s_copy(c2, subnam + 1, (ftnlen)1, (ftnlen)2);
+ s_copy(c3, subnam + 3, (ftnlen)1, (ftnlen)3);
+ s_copy(c4, c3 + 1, (ftnlen)1, (ftnlen)2);
+
+ switch (*ispec) {
+ case 1: goto L50;
+ case 2: goto L60;
+ case 3: goto L70;
+ }
+
+L50:
+
+/* ISPEC = 1: block size */
+
+/* In these examples, separate code is provided for setting NB for */
+/* real and complex. We assume that NB will take the same value in */
+/* single or double precision. */
+
+ nb = 1;
+
+ if (s_cmp(c2, "GE", (ftnlen)1, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRF", (ftnlen)1, (ftnlen)3) == 0) {
+ if (sname) {
+ nb = 64;
+ } else {
+ nb = 64;
+ }
+ } else if (s_cmp(c3, "QRF", (ftnlen)1, (ftnlen)3) == 0 || s_cmp(c3,
+ "RQF", (ftnlen)1, (ftnlen)3) == 0 || s_cmp(c3, "LQF", (ftnlen)
+ 1, (ftnlen)3) == 0 || s_cmp(c3, "QLF", (ftnlen)1, (ftnlen)3)
+ == 0) {
+ if (sname) {
+ nb = 32;
+ } else {
+ nb = 32;
+ }
+ } else if (s_cmp(c3, "HRD", (ftnlen)1, (ftnlen)3) == 0) {
+ if (sname) {
+ nb = 32;
+ } else {
+ nb = 32;
+ }
+ } else if (s_cmp(c3, "BRD", (ftnlen)1, (ftnlen)3) == 0) {
+ if (sname) {
+ nb = 32;
+ } else {
+ nb = 32;
+ }
+ } else if (s_cmp(c3, "TRI", (ftnlen)1, (ftnlen)3) == 0) {
+ if (sname) {
+ nb = 64;
+ } else {
+ nb = 64;
+ }
+ }
+ } else if (s_cmp(c2, "PO", (ftnlen)1, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRF", (ftnlen)1, (ftnlen)3) == 0) {
+ if (sname) {
+ nb = 64;
+ } else {
+ nb = 64;
+ }
+ }
+ } else if (s_cmp(c2, "SY", (ftnlen)1, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRF", (ftnlen)1, (ftnlen)3) == 0) {
+ if (sname) {
+ nb = 64;
+ } else {
+ nb = 64;
+ }
+ } else if (sname && s_cmp(c3, "TRD", (ftnlen)1, (ftnlen)3) == 0) {
+ nb = 32;
+ } else if (sname && s_cmp(c3, "GST", (ftnlen)1, (ftnlen)3) == 0) {
+ nb = 64;
+ }
+ } else if (cname && s_cmp(c2, "HE", (ftnlen)1, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRF", (ftnlen)1, (ftnlen)3) == 0) {
+ nb = 64;
+ } else if (s_cmp(c3, "TRD", (ftnlen)1, (ftnlen)3) == 0) {
+ nb = 32;
+ } else if (s_cmp(c3, "GST", (ftnlen)1, (ftnlen)3) == 0) {
+ nb = 64;
+ }
+ } else if (sname && s_cmp(c2, "OR", (ftnlen)1, (ftnlen)2) == 0) {
+ if (*(unsigned char *)c3 == 'G') {
+ if (s_cmp(c4, "QR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
+ (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)1, (
+ ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)1, (ftnlen)2) ==
+ 0 || s_cmp(c4, "HR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(
+ c4, "TR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
+ ftnlen)1, (ftnlen)2) == 0) {
+ nb = 32;
+ }
+ } else if (*(unsigned char *)c3 == 'M') {
+ if (s_cmp(c4, "QR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
+ (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)1, (
+ ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)1, (ftnlen)2) ==
+ 0 || s_cmp(c4, "HR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(
+ c4, "TR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
+ ftnlen)1, (ftnlen)2) == 0) {
+ nb = 32;
+ }
+ }
+ } else if (cname && s_cmp(c2, "UN", (ftnlen)1, (ftnlen)2) == 0) {
+ if (*(unsigned char *)c3 == 'G') {
+ if (s_cmp(c4, "QR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
+ (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)1, (
+ ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)1, (ftnlen)2) ==
+ 0 || s_cmp(c4, "HR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(
+ c4, "TR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
+ ftnlen)1, (ftnlen)2) == 0) {
+ nb = 32;
+ }
+ } else if (*(unsigned char *)c3 == 'M') {
+ if (s_cmp(c4, "QR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
+ (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)1, (
+ ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)1, (ftnlen)2) ==
+ 0 || s_cmp(c4, "HR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(
+ c4, "TR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
+ ftnlen)1, (ftnlen)2) == 0) {
+ nb = 32;
+ }
+ }
+ } else if (s_cmp(c2, "GB", (ftnlen)1, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRF", (ftnlen)1, (ftnlen)3) == 0) {
+ if (sname) {
+ if (*n4 <= 64) {
+ nb = 1;
+ } else {
+ nb = 32;
+ }
+ } else {
+ if (*n4 <= 64) {
+ nb = 1;
+ } else {
+ nb = 32;
+ }
+ }
+ }
+ } else if (s_cmp(c2, "PB", (ftnlen)1, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRF", (ftnlen)1, (ftnlen)3) == 0) {
+ if (sname) {
+ if (*n2 <= 64) {
+ nb = 1;
+ } else {
+ nb = 32;
+ }
+ } else {
+ if (*n2 <= 64) {
+ nb = 1;
+ } else {
+ nb = 32;
+ }
+ }
+ }
+ } else if (s_cmp(c2, "TR", (ftnlen)1, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRI", (ftnlen)1, (ftnlen)3) == 0) {
+ if (sname) {
+ nb = 64;
+ } else {
+ nb = 64;
+ }
+ }
+ } else if (s_cmp(c2, "LA", (ftnlen)1, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "UUM", (ftnlen)1, (ftnlen)3) == 0) {
+ if (sname) {
+ nb = 64;
+ } else {
+ nb = 64;
+ }
+ }
+ } else if (sname && s_cmp(c2, "ST", (ftnlen)1, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "EBZ", (ftnlen)1, (ftnlen)3) == 0) {
+ nb = 1;
+ }
+ }
+ ret_val = nb;
+ return ret_val;
+
+L60:
+
+/* ISPEC = 2: minimum block size */
+
+ nbmin = 2;
+ if (s_cmp(c2, "GE", (ftnlen)1, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "QRF", (ftnlen)1, (ftnlen)3) == 0 || s_cmp(c3, "RQF", (
+ ftnlen)1, (ftnlen)3) == 0 || s_cmp(c3, "LQF", (ftnlen)1, (
+ ftnlen)3) == 0 || s_cmp(c3, "QLF", (ftnlen)1, (ftnlen)3) == 0)
+ {
+ if (sname) {
+ nbmin = 2;
+ } else {
+ nbmin = 2;
+ }
+ } else if (s_cmp(c3, "HRD", (ftnlen)1, (ftnlen)3) == 0) {
+ if (sname) {
+ nbmin = 2;
+ } else {
+ nbmin = 2;
+ }
+ } else if (s_cmp(c3, "BRD", (ftnlen)1, (ftnlen)3) == 0) {
+ if (sname) {
+ nbmin = 2;
+ } else {
+ nbmin = 2;
+ }
+ } else if (s_cmp(c3, "TRI", (ftnlen)1, (ftnlen)3) == 0) {
+ if (sname) {
+ nbmin = 2;
+ } else {
+ nbmin = 2;
+ }
+ }
+ } else if (s_cmp(c2, "SY", (ftnlen)1, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRF", (ftnlen)1, (ftnlen)3) == 0) {
+ if (sname) {
+ nbmin = 8;
+ } else {
+ nbmin = 8;
+ }
+ } else if (sname && s_cmp(c3, "TRD", (ftnlen)1, (ftnlen)3) == 0) {
+ nbmin = 2;
+ }
+ } else if (cname && s_cmp(c2, "HE", (ftnlen)1, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRD", (ftnlen)1, (ftnlen)3) == 0) {
+ nbmin = 2;
+ }
+ } else if (sname && s_cmp(c2, "OR", (ftnlen)1, (ftnlen)2) == 0) {
+ if (*(unsigned char *)c3 == 'G') {
+ if (s_cmp(c4, "QR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
+ (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)1, (
+ ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)1, (ftnlen)2) ==
+ 0 || s_cmp(c4, "HR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(
+ c4, "TR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
+ ftnlen)1, (ftnlen)2) == 0) {
+ nbmin = 2;
+ }
+ } else if (*(unsigned char *)c3 == 'M') {
+ if (s_cmp(c4, "QR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
+ (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)1, (
+ ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)1, (ftnlen)2) ==
+ 0 || s_cmp(c4, "HR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(
+ c4, "TR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
+ ftnlen)1, (ftnlen)2) == 0) {
+ nbmin = 2;
+ }
+ }
+ } else if (cname && s_cmp(c2, "UN", (ftnlen)1, (ftnlen)2) == 0) {
+ if (*(unsigned char *)c3 == 'G') {
+ if (s_cmp(c4, "QR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
+ (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)1, (
+ ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)1, (ftnlen)2) ==
+ 0 || s_cmp(c4, "HR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(
+ c4, "TR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
+ ftnlen)1, (ftnlen)2) == 0) {
+ nbmin = 2;
+ }
+ } else if (*(unsigned char *)c3 == 'M') {
+ if (s_cmp(c4, "QR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
+ (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)1, (
+ ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)1, (ftnlen)2) ==
+ 0 || s_cmp(c4, "HR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(
+ c4, "TR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
+ ftnlen)1, (ftnlen)2) == 0) {
+ nbmin = 2;
+ }
+ }
+ }
+ ret_val = nbmin;
+ return ret_val;
+
+L70:
+
+/* ISPEC = 3: crossover point */
+
+ nx = 0;
+ if (s_cmp(c2, "GE", (ftnlen)1, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "QRF", (ftnlen)1, (ftnlen)3) == 0 || s_cmp(c3, "RQF", (
+ ftnlen)1, (ftnlen)3) == 0 || s_cmp(c3, "LQF", (ftnlen)1, (
+ ftnlen)3) == 0 || s_cmp(c3, "QLF", (ftnlen)1, (ftnlen)3) == 0)
+ {
+ if (sname) {
+ nx = 128;
+ } else {
+ nx = 128;
+ }
+ } else if (s_cmp(c3, "HRD", (ftnlen)1, (ftnlen)3) == 0) {
+ if (sname) {
+ nx = 128;
+ } else {
+ nx = 128;
+ }
+ } else if (s_cmp(c3, "BRD", (ftnlen)1, (ftnlen)3) == 0) {
+ if (sname) {
+ nx = 128;
+ } else {
+ nx = 128;
+ }
+ }
+ } else if (s_cmp(c2, "SY", (ftnlen)1, (ftnlen)2) == 0) {
+ if (sname && s_cmp(c3, "TRD", (ftnlen)1, (ftnlen)3) == 0) {
+ nx = 32;
+ }
+ } else if (cname && s_cmp(c2, "HE", (ftnlen)1, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRD", (ftnlen)1, (ftnlen)3) == 0) {
+ nx = 32;
+ }
+ } else if (sname && s_cmp(c2, "OR", (ftnlen)1, (ftnlen)2) == 0) {
+ if (*(unsigned char *)c3 == 'G') {
+ if (s_cmp(c4, "QR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
+ (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)1, (
+ ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)1, (ftnlen)2) ==
+ 0 || s_cmp(c4, "HR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(
+ c4, "TR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
+ ftnlen)1, (ftnlen)2) == 0) {
+ nx = 128;
+ }
+ }
+ } else if (cname && s_cmp(c2, "UN", (ftnlen)1, (ftnlen)2) == 0) {
+ if (*(unsigned char *)c3 == 'G') {
+ if (s_cmp(c4, "QR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
+ (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)1, (
+ ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)1, (ftnlen)2) ==
+ 0 || s_cmp(c4, "HR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(
+ c4, "TR", (ftnlen)1, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
+ ftnlen)1, (ftnlen)2) == 0) {
+ nx = 128;
+ }
+ }
+ }
+ ret_val = nx;
+ return ret_val;
+
+L80:
+
+/* ISPEC = 4: number of shifts (used by xHSEQR) */
+
+ ret_val = 6;
+ return ret_val;
+
+L90:
+
+/* ISPEC = 5: minimum column dimension (not used) */
+
+ ret_val = 2;
+ return ret_val;
+
+L100:
+
+/* ISPEC = 6: crossover point for SVD (used by xGELSS and xGESVD) */
+
+ ret_val = (integer) ((real) min(*n1,*n2) * 1.6f);
+ return ret_val;
+
+L110:
+
+/* ISPEC = 7: number of processors (not used) */
+
+ ret_val = 1;
+ return ret_val;
+
+L120:
+
+/* ISPEC = 8: crossover point for multishift (used by xHSEQR) */
+
+ ret_val = 50;
+ return ret_val;
+
+L130:
+
+/* ISPEC = 9: maximum size of the subproblems at the bottom of the */
+/* computation tree in the divide-and-conquer algorithm */
+/* (used by xGELSD and xGESDD) */
+
+ ret_val = 25;
+ return ret_val;
+
+L140:
+
+/* ISPEC = 10: ieee NaN arithmetic can be trusted not to trap */
+
+/* ILAENV = 0 */
+ ret_val = 1;
+ if (ret_val == 1) {
+ ret_val = ieeeck_(&c__1, &c_b163, &c_b164);
+ }
+ return ret_val;
+
+L150:
+
+/* ISPEC = 11: infinity arithmetic can be trusted not to trap */
+
+/* ILAENV = 0 */
+ ret_val = 1;
+ if (ret_val == 1) {
+ ret_val = ieeeck_(&c__0, &c_b163, &c_b164);
+ }
+ return ret_val;
+
+L160:
+
+/* 12 <= ISPEC <= 16: xHSEQR or one of its subroutines. */
+
+ ret_val = iparmq_(ispec, name__, opts, n1, n2, n3, n4)
+ ;
+ return ret_val;
+
+/* End of ILAENV */
+
+} /* ilaenv_ */
diff --git a/contrib/libs/clapack/ilaprec.c b/contrib/libs/clapack/ilaprec.c
new file mode 100644
index 0000000000..62b46a13f0
--- /dev/null
+++ b/contrib/libs/clapack/ilaprec.c
@@ -0,0 +1,72 @@
+/* ilaprec.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+integer ilaprec_(char *prec)
+{
+ /* System generated locals */
+ integer ret_val;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* October 2008 */
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This subroutine translated from a character string specifying an */
+/* intermediate precision to the relevant BLAST-specified integer */
+/* constant. */
+
+/* ILAPREC returns an INTEGER. If ILAPREC < 0, then the input is not a */
+/* character indicating a supported intermediate precision. Otherwise */
+/* ILAPREC returns the constant value corresponding to PREC. */
+
+/* Arguments */
+/* ========= */
+/* PREC (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'S': Single */
+/* = 'D': Double */
+/* = 'I': Indigenous */
+/* = 'X', 'E': Extra */
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+ if (lsame_(prec, "S")) {
+ ret_val = 211;
+ } else if (lsame_(prec, "D")) {
+ ret_val = 212;
+ } else if (lsame_(prec, "I")) {
+ ret_val = 213;
+ } else if (lsame_(prec, "X") || lsame_(prec, "E")) {
+ ret_val = 214;
+ } else {
+ ret_val = -1;
+ }
+ return ret_val;
+
+/* End of ILAPREC */
+
+} /* ilaprec_ */
diff --git a/contrib/libs/clapack/ilaslc.c b/contrib/libs/clapack/ilaslc.c
new file mode 100644
index 0000000000..c87708470a
--- /dev/null
+++ b/contrib/libs/clapack/ilaslc.c
@@ -0,0 +1,88 @@
+/* ilaslc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+integer ilaslc_(integer *m, integer *n, real *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, ret_val, i__1;
+
+ /* Local variables */
+ integer i__;
+
+
+/* -- LAPACK auxiliary routine (version 3.2.1) -- */
+
+/* -- April 2009 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ILASLC scans A for its last non-zero column. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The m by n matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick test for the common case where one corner is non-zero. */
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ if (*n == 0) {
+ ret_val = *n;
+ } else if (a[*n * a_dim1 + 1] != 0.f || a[*m + *n * a_dim1] != 0.f) {
+ ret_val = *n;
+ } else {
+/* Now scan each column from the end, returning with the first non-zero. */
+ for (ret_val = *n; ret_val >= 1; --ret_val) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (a[i__ + ret_val * a_dim1] != 0.f) {
+ return ret_val;
+ }
+ }
+ }
+ }
+ return ret_val;
+} /* ilaslc_ */
diff --git a/contrib/libs/clapack/ilaslr.c b/contrib/libs/clapack/ilaslr.c
new file mode 100644
index 0000000000..0aaa4a0fbe
--- /dev/null
+++ b/contrib/libs/clapack/ilaslr.c
@@ -0,0 +1,90 @@
+/* ilaslr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+integer ilaslr_(integer *m, integer *n, real *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, ret_val, i__1;
+
+ /* Local variables */
+ integer i__, j;
+
+
+/* -- LAPACK auxiliary routine (version 3.2.1) -- */
+
+/* -- April 2009 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ILASLR scans A for its last non-zero row. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The m by n matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick test for the common case where one corner is non-zero. */
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ if (*m == 0) {
+ ret_val = *m;
+ } else if (a[*m + a_dim1] != 0.f || a[*m + *n * a_dim1] != 0.f) {
+ ret_val = *m;
+ } else {
+/* Scan up each column tracking the last zero row seen. */
+ ret_val = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ if (a[i__ + j * a_dim1] != 0.f) {
+ break;
+ }
+ }
+ ret_val = max(ret_val,i__);
+ }
+ }
+ return ret_val;
+} /* ilaslr_ */
diff --git a/contrib/libs/clapack/ilatrans.c b/contrib/libs/clapack/ilatrans.c
new file mode 100644
index 0000000000..afad201c80
--- /dev/null
+++ b/contrib/libs/clapack/ilatrans.c
@@ -0,0 +1,69 @@
+/* ilatrans.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+integer ilatrans_(char *trans)
+{
+ /* System generated locals */
+ integer ret_val;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* October 2008 */
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This subroutine translates from a character string specifying a */
+/* transposition operation to the relevant BLAST-specified integer */
+/* constant. */
+
+/* ILATRANS returns an INTEGER. If ILATRANS < 0, then the input is not */
+/* a character indicating a transposition operator. Otherwise ILATRANS */
+/* returns the constant value corresponding to TRANS. */
+
+/* Arguments */
+/* ========= */
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': No transpose */
+/* = 'T': Transpose */
+/* = 'C': Conjugate transpose */
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+ if (lsame_(trans, "N")) {
+ ret_val = 111;
+ } else if (lsame_(trans, "T")) {
+ ret_val = 112;
+ } else if (lsame_(trans, "C")) {
+ ret_val = 113;
+ } else {
+ ret_val = -1;
+ }
+ return ret_val;
+
+/* End of ILATRANS */
+
+} /* ilatrans_ */
diff --git a/contrib/libs/clapack/ilauplo.c b/contrib/libs/clapack/ilauplo.c
new file mode 100644
index 0000000000..3469f241fe
--- /dev/null
+++ b/contrib/libs/clapack/ilauplo.c
@@ -0,0 +1,65 @@
+/* ilauplo.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+integer ilauplo_(char *uplo)
+{
+ /* System generated locals */
+ integer ret_val;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* October 2008 */
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This subroutine translated from a character string specifying a */
+/* upper- or lower-triangular matrix to the relevant BLAST-specified */
+/* integer constant. */
+
+/* ILAUPLO returns an INTEGER. If ILAUPLO < 0, then the input is not */
+/* a character indicating an upper- or lower-triangular matrix. */
+/* Otherwise ILAUPLO returns the constant value corresponding to UPLO. */
+
+/* Arguments */
+/* ========= */
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+ if (lsame_(uplo, "U")) {
+ ret_val = 121;
+ } else if (lsame_(uplo, "L")) {
+ ret_val = 122;
+ } else {
+ ret_val = -1;
+ }
+ return ret_val;
+
+/* End of ILAUPLO */
+
+} /* ilauplo_ */
diff --git a/contrib/libs/clapack/ilaver.c b/contrib/libs/clapack/ilaver.c
new file mode 100644
index 0000000000..7ab30d6b76
--- /dev/null
+++ b/contrib/libs/clapack/ilaver.c
@@ -0,0 +1,50 @@
+/* ilaver.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ilaver_(integer *vers_major__, integer *vers_minor__,
+ integer *vers_patch__)
+{
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2008 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This subroutine return the Lapack version. */
+
+/* Arguments */
+/* ========= */
+
+/* VERS_MAJOR (output) INTEGER */
+/* return the lapack major version */
+/* VERS_MINOR (output) INTEGER */
+/* return the lapack minor version from the major version */
+/* VERS_PATCH (output) INTEGER */
+/* return the lapack patch version from the minor version */
+
+/* .. Executable Statements .. */
+
+ *vers_major__ = 3;
+ *vers_minor__ = 2;
+ *vers_patch__ = 0;
+/* ===================================================================== */
+
+ return 0;
+} /* ilaver_ */
diff --git a/contrib/libs/clapack/ilazlc.c b/contrib/libs/clapack/ilazlc.c
new file mode 100644
index 0000000000..2b20f40199
--- /dev/null
+++ b/contrib/libs/clapack/ilazlc.c
@@ -0,0 +1,94 @@
+/* ilazlc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+integer ilazlc_(integer *m, integer *n, doublecomplex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, ret_val, i__1, i__2;
+
+ /* Local variables */
+ integer i__;
+
+
+/* -- LAPACK auxiliary routine (version 3.2.1) -- */
+
+/* -- April 2009 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ILAZLC scans A for its last non-zero column. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The m by n matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick test for the common case where one corner is non-zero. */
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ if (*n == 0) {
+ ret_val = *n;
+ } else /* if(complicated condition) */ {
+ i__1 = *n * a_dim1 + 1;
+ i__2 = *m + *n * a_dim1;
+ if (a[i__1].r != 0. || a[i__1].i != 0. || (a[i__2].r != 0. || a[i__2]
+ .i != 0.)) {
+ ret_val = *n;
+ } else {
+/* Now scan each column from the end, returning with the first non-zero. */
+ for (ret_val = *n; ret_val >= 1; --ret_val) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + ret_val * a_dim1;
+ if (a[i__2].r != 0. || a[i__2].i != 0.) {
+ return ret_val;
+ }
+ }
+ }
+ }
+ }
+ return ret_val;
+} /* ilazlc_ */
diff --git a/contrib/libs/clapack/ilazlr.c b/contrib/libs/clapack/ilazlr.c
new file mode 100644
index 0000000000..373d077b55
--- /dev/null
+++ b/contrib/libs/clapack/ilazlr.c
@@ -0,0 +1,96 @@
+/* ilazlr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+integer ilazlr_(integer *m, integer *n, doublecomplex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, ret_val, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j;
+
+
+/* -- LAPACK auxiliary routine (version 3.2.1) -- */
+
+/* -- April 2009 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ILAZLR scans A for its last non-zero row. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The m by n matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick test for the common case where one corner is non-zero. */
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ if (*m == 0) {
+ ret_val = *m;
+ } else /* if(complicated condition) */ {
+ i__1 = *m + a_dim1;
+ i__2 = *m + *n * a_dim1;
+ if (a[i__1].r != 0. || a[i__1].i != 0. || (a[i__2].r != 0. || a[i__2]
+ .i != 0.)) {
+ ret_val = *m;
+ } else {
+/* Scan up each column tracking the last zero row seen. */
+ ret_val = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ i__2 = i__ + j * a_dim1;
+ if (a[i__2].r != 0. || a[i__2].i != 0.) {
+ break;
+ }
+ }
+ ret_val = max(ret_val,i__);
+ }
+ }
+ }
+ return ret_val;
+} /* ilazlr_ */
diff --git a/contrib/libs/clapack/iparmq.c b/contrib/libs/clapack/iparmq.c
new file mode 100644
index 0000000000..13fb9aa7d9
--- /dev/null
+++ b/contrib/libs/clapack/iparmq.c
@@ -0,0 +1,282 @@
+/* iparmq.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+integer iparmq_(integer *ispec, char *name__, char *opts, integer *n, integer
+ *ilo, integer *ihi, integer *lwork)
+{
+ /* System generated locals */
+ integer ret_val, i__1, i__2;
+ real r__1;
+
+ /* Builtin functions */
+ double log(doublereal);
+ integer i_nint(real *);
+
+ /* Local variables */
+ integer nh, ns;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+
+/* Purpose */
+/* ======= */
+
+/* This program sets problem and machine dependent parameters */
+/* useful for xHSEQR and its subroutines. It is called whenever */
+/* ILAENV is called with 12 <= ISPEC <= 16 */
+
+/* Arguments */
+/* ========= */
+
+/* ISPEC (input) integer scalar */
+/* ISPEC specifies which tunable parameter IPARMQ should */
+/* return. */
+
+/* ISPEC=12: (INMIN) Matrices of order nmin or less */
+/* are sent directly to xLAHQR, the implicit */
+/* double shift QR algorithm. NMIN must be */
+/* at least 11. */
+
+/* ISPEC=13: (INWIN) Size of the deflation window. */
+/* This is best set greater than or equal to */
+/* the number of simultaneous shifts NS. */
+/* Larger matrices benefit from larger deflation */
+/* windows. */
+
+/* ISPEC=14: (INIBL) Determines when to stop nibbling and */
+/* invest in an (expensive) multi-shift QR sweep. */
+/* If the aggressive early deflation subroutine */
+/* finds LD converged eigenvalues from an order */
+/* NW deflation window and LD.GT.(NW*NIBBLE)/100, */
+/* then the next QR sweep is skipped and early */
+/* deflation is applied immediately to the */
+/* remaining active diagonal block. Setting */
+/* IPARMQ(ISPEC=14) = 0 causes TTQRE to skip a */
+/* multi-shift QR sweep whenever early deflation */
+/* finds a converged eigenvalue. Setting */
+/* IPARMQ(ISPEC=14) greater than or equal to 100 */
+/* prevents TTQRE from skipping a multi-shift */
+/* QR sweep. */
+
+/* ISPEC=15: (NSHFTS) The number of simultaneous shifts in */
+/* a multi-shift QR iteration. */
+
+/* ISPEC=16: (IACC22) IPARMQ is set to 0, 1 or 2 with the */
+/* following meanings. */
+/* 0: During the multi-shift QR sweep, */
+/* xLAQR5 does not accumulate reflections and */
+/* does not use matrix-matrix multiply to */
+/* update the far-from-diagonal matrix */
+/* entries. */
+/* 1: During the multi-shift QR sweep, */
+/* xLAQR5 and/or xLAQRaccumulates reflections and uses */
+/* matrix-matrix multiply to update the */
+/* far-from-diagonal matrix entries. */
+/* 2: During the multi-shift QR sweep. */
+/* xLAQR5 accumulates reflections and takes */
+/* advantage of 2-by-2 block structure during */
+/* matrix-matrix multiplies. */
+/* (If xTRMM is slower than xGEMM, then */
+/* IPARMQ(ISPEC=16)=1 may be more efficient than */
+/* IPARMQ(ISPEC=16)=2 despite the greater level of */
+/* arithmetic work implied by the latter choice.) */
+
+/* NAME (input) character string */
+/* Name of the calling subroutine */
+
+/* OPTS (input) character string */
+/* This is a concatenation of the string arguments to */
+/* TTQRE. */
+
+/* N (input) integer scalar */
+/* N is the order of the Hessenberg matrix H. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* It is assumed that H is already upper triangular */
+/* in rows and columns 1:ILO-1 and IHI+1:N. */
+
+/* LWORK (input) integer scalar */
+/* The amount of workspace available. */
+
+/* Further Details */
+/* =============== */
+
+/* Little is known about how best to choose these parameters. */
+/* It is possible to use different values of the parameters */
+/* for each of CHSEQR, DHSEQR, SHSEQR and ZHSEQR. */
+
+/* It is probably best to choose different parameters for */
+/* different matrices and different parameters at different */
+/* times during the iteration, but this has not been */
+/* implemented --- yet. */
+
+
+/* The best choices of most of the parameters depend */
+/* in an ill-understood way on the relative execution */
+/* rate of xLAQR3 and xLAQR5 and on the nature of each */
+/* particular eigenvalue problem. Experiment may be the */
+/* only practical way to determine which choices are most */
+/* effective. */
+
+/* Following is a list of default values supplied by IPARMQ. */
+/* These defaults may be adjusted in order to attain better */
+/* performance in any particular computational environment. */
+
+/* IPARMQ(ISPEC=12) The xLAHQR vs xLAQR0 crossover point. */
+/* Default: 75. (Must be at least 11.) */
+
+/* IPARMQ(ISPEC=13) Recommended deflation window size. */
+/* This depends on ILO, IHI and NS, the */
+/* number of simultaneous shifts returned */
+/* by IPARMQ(ISPEC=15). The default for */
+/* (IHI-ILO+1).LE.500 is NS. The default */
+/* for (IHI-ILO+1).GT.500 is 3*NS/2. */
+
+/* IPARMQ(ISPEC=14) Nibble crossover point. Default: 14. */
+
+/* IPARMQ(ISPEC=15) Number of simultaneous shifts, NS. */
+/* a multi-shift QR iteration. */
+
+/* If IHI-ILO+1 is ... */
+
+/* greater than ...but less ... the */
+/* or equal to ... than default is */
+
+/* 0 30 NS = 2+ */
+/* 30 60 NS = 4+ */
+/* 60 150 NS = 10 */
+/* 150 590 NS = ** */
+/* 590 3000 NS = 64 */
+/* 3000 6000 NS = 128 */
+/* 6000 infinity NS = 256 */
+
+/* (+) By default matrices of this order are */
+/* passed to the implicit double shift routine */
+/* xLAHQR. See IPARMQ(ISPEC=12) above. These */
+/* values of NS are used only in case of a rare */
+/* xLAHQR failure. */
+
+/* (**) The asterisks (**) indicate an ad-hoc */
+/* function increasing from 10 to 64. */
+
+/* IPARMQ(ISPEC=16) Select structured matrix multiply. */
+/* (See ISPEC=16 above for details.) */
+/* Default: 3. */
+
+/* ================================================================ */
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+ if (*ispec == 15 || *ispec == 13 || *ispec == 16) {
+
+/* ==== Set the number simultaneous shifts ==== */
+
+ nh = *ihi - *ilo + 1;
+ ns = 2;
+ if (nh >= 30) {
+ ns = 4;
+ }
+ if (nh >= 60) {
+ ns = 10;
+ }
+ if (nh >= 150) {
+/* Computing MAX */
+ r__1 = log((real) nh) / log(2.f);
+ i__1 = 10, i__2 = nh / i_nint(&r__1);
+ ns = max(i__1,i__2);
+ }
+ if (nh >= 590) {
+ ns = 64;
+ }
+ if (nh >= 3000) {
+ ns = 128;
+ }
+ if (nh >= 6000) {
+ ns = 256;
+ }
+/* Computing MAX */
+ i__1 = 2, i__2 = ns - ns % 2;
+ ns = max(i__1,i__2);
+ }
+
+ if (*ispec == 12) {
+
+
+/* ===== Matrices of order smaller than NMIN get sent */
+/* . to xLAHQR, the classic double shift algorithm. */
+/* . This must be at least 11. ==== */
+
+ ret_val = 75;
+
+ } else if (*ispec == 14) {
+
+/* ==== INIBL: skip a multi-shift qr iteration and */
+/* . whenever aggressive early deflation finds */
+/* . at least (NIBBLE*(window size)/100) deflations. ==== */
+
+ ret_val = 14;
+
+ } else if (*ispec == 15) {
+
+/* ==== NSHFTS: The number of simultaneous shifts ===== */
+
+ ret_val = ns;
+
+ } else if (*ispec == 13) {
+
+/* ==== NW: deflation window size. ==== */
+
+ if (nh <= 500) {
+ ret_val = ns;
+ } else {
+ ret_val = ns * 3 / 2;
+ }
+
+ } else if (*ispec == 16) {
+
+/* ==== IACC22: Whether to accumulate reflections */
+/* . before updating the far-from-diagonal elements */
+/* . and whether to use 2-by-2 block structure while */
+/* . doing it. A small amount of work could be saved */
+/* . by making this choice dependent also upon the */
+/* . NH=IHI-ILO+1. */
+
+ ret_val = 0;
+ if (ns >= 14) {
+ ret_val = 1;
+ }
+ if (ns >= 14) {
+ ret_val = 2;
+ }
+
+ } else {
+/* ===== invalid value of ispec ===== */
+ ret_val = -1;
+
+ }
+
+/* ==== End of IPARMQ ==== */
+
+ return ret_val;
+} /* iparmq_ */
diff --git a/contrib/libs/clapack/izmax1.c b/contrib/libs/clapack/izmax1.c
new file mode 100644
index 0000000000..5545069c12
--- /dev/null
+++ b/contrib/libs/clapack/izmax1.c
@@ -0,0 +1,127 @@
+/* izmax1.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+integer izmax1_(integer *n, doublecomplex *cx, integer *incx)
+{
+ /* System generated locals */
+ integer ret_val, i__1;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *);
+
+ /* Local variables */
+ integer i__, ix;
+ doublereal smax;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* IZMAX1 finds the index of the element whose real part has maximum */
+/* absolute value. */
+
+/* Based on IZAMAX from Level 1 BLAS. */
+/* The change is to use the 'genuine' absolute value. */
+
+/* Contributed by Nick Higham for use with ZLACON. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of elements in the vector CX. */
+
+/* CX (input) COMPLEX*16 array, dimension (N) */
+/* The vector whose elements will be summed. */
+
+/* INCX (input) INTEGER */
+/* The spacing between successive values of CX. INCX >= 1. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+
+/* NEXT LINE IS THE ONLY MODIFICATION. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --cx;
+
+ /* Function Body */
+ ret_val = 0;
+ if (*n < 1) {
+ return ret_val;
+ }
+ ret_val = 1;
+ if (*n == 1) {
+ return ret_val;
+ }
+ if (*incx == 1) {
+ goto L30;
+ }
+
+/* CODE FOR INCREMENT NOT EQUAL TO 1 */
+
+ ix = 1;
+ smax = z_abs(&cx[1]);
+ ix += *incx;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if (z_abs(&cx[ix]) <= smax) {
+ goto L10;
+ }
+ ret_val = i__;
+ smax = z_abs(&cx[ix]);
+L10:
+ ix += *incx;
+/* L20: */
+ }
+ return ret_val;
+
+/* CODE FOR INCREMENT EQUAL TO 1 */
+
+L30:
+ smax = z_abs(&cx[1]);
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if (z_abs(&cx[i__]) <= smax) {
+ goto L40;
+ }
+ ret_val = i__;
+ smax = z_abs(&cx[i__]);
+L40:
+ ;
+ }
+ return ret_val;
+
+/* End of IZMAX1 */
+
+} /* izmax1_ */
diff --git a/contrib/libs/clapack/list.inc b/contrib/libs/clapack/list.inc
new file mode 100644
index 0000000000..a913c3917b
--- /dev/null
+++ b/contrib/libs/clapack/list.inc
@@ -0,0 +1,8 @@
+CFLAGS(
+ -DNO_BLAS_WRAP
+)
+
+PEERDIR(
+ ADDINCL contrib/libs/libf2c
+ ADDINCL contrib/libs/cblas
+) \ No newline at end of file
diff --git a/contrib/libs/clapack/lsamen.c b/contrib/libs/clapack/lsamen.c
new file mode 100644
index 0000000000..7411199dca
--- /dev/null
+++ b/contrib/libs/clapack/lsamen.c
@@ -0,0 +1,98 @@
+/* lsamen.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+#include "string.h"
+
+logical lsamen_(integer *n, char *ca, char *cb)
+{
+ /* System generated locals */
+ integer i__1;
+ logical ret_val;
+
+ /* Builtin functions */
+ integer i_len(char *, ftnlen);
+
+ /* Local variables */
+ integer i__;
+ extern logical lsame_(char *, char *);
+
+ ftnlen ca_len, cb_len;
+
+ ca_len = strlen (ca);
+ cb_len = strlen (cb);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* LSAMEN tests if the first N letters of CA are the same as the */
+/* first N letters of CB, regardless of case. */
+/* LSAMEN returns .TRUE. if CA and CB are equivalent except for case */
+/* and .FALSE. otherwise. LSAMEN also returns .FALSE. if LEN( CA ) */
+/* or LEN( CB ) is less than N. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of characters in CA and CB to be compared. */
+
+/* CA (input) CHARACTER*(*) */
+/* CB (input) CHARACTER*(*) */
+/* CA and CB specify two character strings of length at least N. */
+/* Only the first N characters of each string will be accessed. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ ret_val = FALSE_;
+ if (i_len(ca, ca_len) < *n || i_len(cb, cb_len) < *n) {
+ goto L20;
+ }
+
+/* Do for each character in the two strings. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Test if the characters are equal using LSAME. */
+
+ if (! lsame_(ca + (i__ - 1), cb + (i__ - 1))) {
+ goto L20;
+ }
+
+/* L10: */
+ }
+ ret_val = TRUE_;
+
+L20:
+ return ret_val;
+
+/* End of LSAMEN */
+
+} /* lsamen_ */
diff --git a/contrib/libs/clapack/maxloc.c b/contrib/libs/clapack/maxloc.c
new file mode 100644
index 0000000000..7f21d9c361
--- /dev/null
+++ b/contrib/libs/clapack/maxloc.c
@@ -0,0 +1,71 @@
+/* maxloc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+
+/* ********************************************************************************** */
+integer smaxloc_(real *a, integer *dimm)
+{
+ /* System generated locals */
+ integer ret_val, i__1;
+
+ /* Local variables */
+ integer i__;
+ real smax;
+
+
+
+ /* Parameter adjustments */
+ --a;
+
+ /* Function Body */
+ ret_val = 1;
+ smax = a[1];
+ i__1 = *dimm;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if (smax < a[i__]) {
+ smax = a[i__];
+ ret_val = i__;
+ }
+/* L10: */
+ }
+ return ret_val;
+} /* smaxloc_ */
+
+/* ********************************************************************************** */
+integer dmaxloc_(doublereal *a, integer *dimm)
+{
+ /* System generated locals */
+ integer ret_val, i__1;
+
+ /* Local variables */
+ integer i__;
+ doublereal dmax__;
+
+
+
+ /* Parameter adjustments */
+ --a;
+
+ /* Function Body */
+ ret_val = 1;
+ dmax__ = a[1];
+ i__1 = *dimm;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if (dmax__ < a[i__]) {
+ dmax__ = a[i__];
+ ret_val = i__;
+ }
+/* L20: */
+ }
+ return ret_val;
+} /* dmaxloc_ */
diff --git a/contrib/libs/clapack/sbdsdc.c b/contrib/libs/clapack/sbdsdc.c
new file mode 100644
index 0000000000..0794f6dc4e
--- /dev/null
+++ b/contrib/libs/clapack/sbdsdc.c
@@ -0,0 +1,511 @@
+/* sbdsdc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__9 = 9;
+static integer c__0 = 0;
+static real c_b15 = 1.f;
+static integer c__1 = 1;
+static real c_b29 = 0.f;
+
+/* Subroutine */ int sbdsdc_(char *uplo, char *compq, integer *n, real *d__,
+ real *e, real *u, integer *ldu, real *vt, integer *ldvt, real *q,
+ integer *iq, real *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2;
+ real r__1;
+
+ /* Builtin functions */
+ double r_sign(real *, real *), log(doublereal);
+
+ /* Local variables */
+ integer i__, j, k;
+ real p, r__;
+ integer z__, ic, ii, kk;
+ real cs;
+ integer is, iu;
+ real sn;
+ integer nm1;
+ real eps;
+ integer ivt, difl, difr, ierr, perm, mlvl, sqre;
+ extern logical lsame_(char *, char *);
+ integer poles;
+ extern /* Subroutine */ int slasr_(char *, char *, char *, integer *,
+ integer *, real *, real *, real *, integer *);
+ integer iuplo, nsize, start;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), sswap_(integer *, real *, integer *, real *, integer *
+), slasd0_(integer *, integer *, real *, real *, real *, integer *
+, real *, integer *, integer *, integer *, real *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int slasda_(integer *, integer *, integer *,
+ integer *, real *, real *, real *, integer *, real *, integer *,
+ real *, real *, real *, real *, integer *, integer *, integer *,
+ integer *, real *, real *, real *, real *, integer *, integer *),
+ xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ integer givcol;
+ extern /* Subroutine */ int slasdq_(char *, integer *, integer *, integer
+ *, integer *, integer *, real *, real *, real *, integer *, real *
+, integer *, real *, integer *, real *, integer *);
+ integer icompq;
+ extern /* Subroutine */ int slaset_(char *, integer *, integer *, real *,
+ real *, real *, integer *), slartg_(real *, real *, real *
+, real *, real *);
+ real orgnrm;
+ integer givnum;
+ extern doublereal slanst_(char *, integer *, real *, real *);
+ integer givptr, qstart, smlsiz, wstart, smlszp;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SBDSDC computes the singular value decomposition (SVD) of a real */
+/* N-by-N (upper or lower) bidiagonal matrix B: B = U * S * VT, */
+/* using a divide and conquer method, where S is a diagonal matrix */
+/* with non-negative diagonal elements (the singular values of B), and */
+/* U and VT are orthogonal matrices of left and right singular vectors, */
+/* respectively. SBDSDC can be used to compute all singular values, */
+/* and optionally, singular vectors or singular vectors in compact form. */
+
+/* This code makes very mild assumptions about floating point */
+/* arithmetic. It will work on machines with a guard digit in */
+/* add/subtract, or on those binary machines without guard digits */
+/* which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2. */
+/* It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. See SLASD3 for details. */
+
+/* The code currently calls SLASDQ if singular values only are desired. */
+/* However, it can be slightly modified to compute singular values */
+/* using the divide and conquer method. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': B is upper bidiagonal. */
+/* = 'L': B is lower bidiagonal. */
+
+/* COMPQ (input) CHARACTER*1 */
+/* Specifies whether singular vectors are to be computed */
+/* as follows: */
+/* = 'N': Compute singular values only; */
+/* = 'P': Compute singular values and compute singular */
+/* vectors in compact form; */
+/* = 'I': Compute singular values and singular vectors. */
+
+/* N (input) INTEGER */
+/* The order of the matrix B. N >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the n diagonal elements of the bidiagonal matrix B. */
+/* On exit, if INFO=0, the singular values of B. */
+
+/* E (input/output) REAL array, dimension (N-1) */
+/* On entry, the elements of E contain the offdiagonal */
+/* elements of the bidiagonal matrix whose SVD is desired. */
+/* On exit, E has been destroyed. */
+
+/* U (output) REAL array, dimension (LDU,N) */
+/* If COMPQ = 'I', then: */
+/* On exit, if INFO = 0, U contains the left singular vectors */
+/* of the bidiagonal matrix. */
+/* For other values of COMPQ, U is not referenced. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= 1. */
+/* If singular vectors are desired, then LDU >= max( 1, N ). */
+
+/* VT (output) REAL array, dimension (LDVT,N) */
+/* If COMPQ = 'I', then: */
+/* On exit, if INFO = 0, VT' contains the right singular */
+/* vectors of the bidiagonal matrix. */
+/* For other values of COMPQ, VT is not referenced. */
+
+/* LDVT (input) INTEGER */
+/* The leading dimension of the array VT. LDVT >= 1. */
+/* If singular vectors are desired, then LDVT >= max( 1, N ). */
+
+/* Q (output) REAL array, dimension (LDQ) */
+/* If COMPQ = 'P', then: */
+/* On exit, if INFO = 0, Q and IQ contain the left */
+/* and right singular vectors in a compact form, */
+/* requiring O(N log N) space instead of 2*N**2. */
+/* In particular, Q contains all the REAL data in */
+/* LDQ >= N*(11 + 2*SMLSIZ + 8*INT(LOG_2(N/(SMLSIZ+1)))) */
+/* words of memory, where SMLSIZ is returned by ILAENV and */
+/* is equal to the maximum size of the subproblems at the */
+/* bottom of the computation tree (usually about 25). */
+/* For other values of COMPQ, Q is not referenced. */
+
+/* IQ (output) INTEGER array, dimension (LDIQ) */
+/* If COMPQ = 'P', then: */
+/* On exit, if INFO = 0, Q and IQ contain the left */
+/* and right singular vectors in a compact form, */
+/* requiring O(N log N) space instead of 2*N**2. */
+/* In particular, IQ contains all INTEGER data in */
+/* LDIQ >= N*(3 + 3*INT(LOG_2(N/(SMLSIZ+1)))) */
+/* words of memory, where SMLSIZ is returned by ILAENV and */
+/* is equal to the maximum size of the subproblems at the */
+/* bottom of the computation tree (usually about 25). */
+/* For other values of COMPQ, IQ is not referenced. */
+
+/* WORK (workspace) REAL array, dimension (MAX(1,LWORK)) */
+/* If COMPQ = 'N' then LWORK >= (4 * N). */
+/* If COMPQ = 'P' then LWORK >= (6 * N). */
+/* If COMPQ = 'I' then LWORK >= (3 * N**2 + 4 * N). */
+
+/* IWORK (workspace) INTEGER array, dimension (8*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: The algorithm failed to compute an singular value. */
+/* The update process of divide and conquer failed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+/* ===================================================================== */
+/* Changed dimension statement in comment describing E from (N) to */
+/* (N-1). Sven, 17 Feb 05. */
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --q;
+ --iq;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ iuplo = 0;
+ if (lsame_(uplo, "U")) {
+ iuplo = 1;
+ }
+ if (lsame_(uplo, "L")) {
+ iuplo = 2;
+ }
+ if (lsame_(compq, "N")) {
+ icompq = 0;
+ } else if (lsame_(compq, "P")) {
+ icompq = 1;
+ } else if (lsame_(compq, "I")) {
+ icompq = 2;
+ } else {
+ icompq = -1;
+ }
+ if (iuplo == 0) {
+ *info = -1;
+ } else if (icompq < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ldu < 1 || icompq == 2 && *ldu < *n) {
+ *info = -7;
+ } else if (*ldvt < 1 || icompq == 2 && *ldvt < *n) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SBDSDC", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ smlsiz = ilaenv_(&c__9, "SBDSDC", " ", &c__0, &c__0, &c__0, &c__0);
+ if (*n == 1) {
+ if (icompq == 1) {
+ q[1] = r_sign(&c_b15, &d__[1]);
+ q[smlsiz * *n + 1] = 1.f;
+ } else if (icompq == 2) {
+ u[u_dim1 + 1] = r_sign(&c_b15, &d__[1]);
+ vt[vt_dim1 + 1] = 1.f;
+ }
+ d__[1] = dabs(d__[1]);
+ return 0;
+ }
+ nm1 = *n - 1;
+
+/* If matrix lower bidiagonal, rotate to be upper bidiagonal */
+/* by applying Givens rotations on the left */
+
+ wstart = 1;
+ qstart = 3;
+ if (icompq == 1) {
+ scopy_(n, &d__[1], &c__1, &q[1], &c__1);
+ i__1 = *n - 1;
+ scopy_(&i__1, &e[1], &c__1, &q[*n + 1], &c__1);
+ }
+ if (iuplo == 2) {
+ qstart = 5;
+ wstart = (*n << 1) - 1;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ slartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (icompq == 1) {
+ q[i__ + (*n << 1)] = cs;
+ q[i__ + *n * 3] = sn;
+ } else if (icompq == 2) {
+ work[i__] = cs;
+ work[nm1 + i__] = -sn;
+ }
+/* L10: */
+ }
+ }
+
+/* If ICOMPQ = 0, use SLASDQ to compute the singular values. */
+
+ if (icompq == 0) {
+ slasdq_("U", &c__0, n, &c__0, &c__0, &c__0, &d__[1], &e[1], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[
+ wstart], info);
+ goto L40;
+ }
+
+/* If N is smaller than the minimum divide size SMLSIZ, then solve */
+/* the problem with another solver. */
+
+ if (*n <= smlsiz) {
+ if (icompq == 2) {
+ slaset_("A", n, n, &c_b29, &c_b15, &u[u_offset], ldu);
+ slaset_("A", n, n, &c_b29, &c_b15, &vt[vt_offset], ldvt);
+ slasdq_("U", &c__0, n, n, n, &c__0, &d__[1], &e[1], &vt[vt_offset]
+, ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[
+ wstart], info);
+ } else if (icompq == 1) {
+ iu = 1;
+ ivt = iu + *n;
+ slaset_("A", n, n, &c_b29, &c_b15, &q[iu + (qstart - 1) * *n], n);
+ slaset_("A", n, n, &c_b29, &c_b15, &q[ivt + (qstart - 1) * *n], n);
+ slasdq_("U", &c__0, n, n, n, &c__0, &d__[1], &e[1], &q[ivt + (
+ qstart - 1) * *n], n, &q[iu + (qstart - 1) * *n], n, &q[
+ iu + (qstart - 1) * *n], n, &work[wstart], info);
+ }
+ goto L40;
+ }
+
+ if (icompq == 2) {
+ slaset_("A", n, n, &c_b29, &c_b15, &u[u_offset], ldu);
+ slaset_("A", n, n, &c_b29, &c_b15, &vt[vt_offset], ldvt);
+ }
+
+/* Scale. */
+
+ orgnrm = slanst_("M", n, &d__[1], &e[1]);
+ if (orgnrm == 0.f) {
+ return 0;
+ }
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b15, n, &c__1, &d__[1], n, &ierr);
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &nm1, &c__1, &e[1], &nm1, &
+ ierr);
+
+ eps = slamch_("Epsilon");
+
+ mlvl = (integer) (log((real) (*n) / (real) (smlsiz + 1)) / log(2.f)) + 1;
+ smlszp = smlsiz + 1;
+
+ if (icompq == 1) {
+ iu = 1;
+ ivt = smlsiz + 1;
+ difl = ivt + smlszp;
+ difr = difl + mlvl;
+ z__ = difr + (mlvl << 1);
+ ic = z__ + mlvl;
+ is = ic + 1;
+ poles = is + 1;
+ givnum = poles + (mlvl << 1);
+
+ k = 1;
+ givptr = 2;
+ perm = 3;
+ givcol = perm + mlvl;
+ }
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((r__1 = d__[i__], dabs(r__1)) < eps) {
+ d__[i__] = r_sign(&eps, &d__[i__]);
+ }
+/* L20: */
+ }
+
+ start = 1;
+ sqre = 0;
+
+ i__1 = nm1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((r__1 = e[i__], dabs(r__1)) < eps || i__ == nm1) {
+
+/* Subproblem found. First determine its size and then */
+/* apply divide and conquer on it. */
+
+ if (i__ < nm1) {
+
+/* A subproblem with E(I) small for I < NM1. */
+
+ nsize = i__ - start + 1;
+ } else if ((r__1 = e[i__], dabs(r__1)) >= eps) {
+
+/* A subproblem with E(NM1) not too small but I = NM1. */
+
+ nsize = *n - start + 1;
+ } else {
+
+/* A subproblem with E(NM1) small. This implies an */
+/* 1-by-1 subproblem at D(N). Solve this 1-by-1 problem */
+/* first. */
+
+ nsize = i__ - start + 1;
+ if (icompq == 2) {
+ u[*n + *n * u_dim1] = r_sign(&c_b15, &d__[*n]);
+ vt[*n + *n * vt_dim1] = 1.f;
+ } else if (icompq == 1) {
+ q[*n + (qstart - 1) * *n] = r_sign(&c_b15, &d__[*n]);
+ q[*n + (smlsiz + qstart - 1) * *n] = 1.f;
+ }
+ d__[*n] = (r__1 = d__[*n], dabs(r__1));
+ }
+ if (icompq == 2) {
+ slasd0_(&nsize, &sqre, &d__[start], &e[start], &u[start +
+ start * u_dim1], ldu, &vt[start + start * vt_dim1],
+ ldvt, &smlsiz, &iwork[1], &work[wstart], info);
+ } else {
+ slasda_(&icompq, &smlsiz, &nsize, &sqre, &d__[start], &e[
+ start], &q[start + (iu + qstart - 2) * *n], n, &q[
+ start + (ivt + qstart - 2) * *n], &iq[start + k * *n],
+ &q[start + (difl + qstart - 2) * *n], &q[start + (
+ difr + qstart - 2) * *n], &q[start + (z__ + qstart -
+ 2) * *n], &q[start + (poles + qstart - 2) * *n], &iq[
+ start + givptr * *n], &iq[start + givcol * *n], n, &
+ iq[start + perm * *n], &q[start + (givnum + qstart -
+ 2) * *n], &q[start + (ic + qstart - 2) * *n], &q[
+ start + (is + qstart - 2) * *n], &work[wstart], &
+ iwork[1], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+ start = i__ + 1;
+ }
+/* L30: */
+ }
+
+/* Unscale */
+
+ slascl_("G", &c__0, &c__0, &c_b15, &orgnrm, n, &c__1, &d__[1], n, &ierr);
+L40:
+
+/* Use Selection Sort to minimize swaps of singular vectors */
+
+ i__1 = *n;
+ for (ii = 2; ii <= i__1; ++ii) {
+ i__ = ii - 1;
+ kk = i__;
+ p = d__[i__];
+ i__2 = *n;
+ for (j = ii; j <= i__2; ++j) {
+ if (d__[j] > p) {
+ kk = j;
+ p = d__[j];
+ }
+/* L50: */
+ }
+ if (kk != i__) {
+ d__[kk] = d__[i__];
+ d__[i__] = p;
+ if (icompq == 1) {
+ iq[i__] = kk;
+ } else if (icompq == 2) {
+ sswap_(n, &u[i__ * u_dim1 + 1], &c__1, &u[kk * u_dim1 + 1], &
+ c__1);
+ sswap_(n, &vt[i__ + vt_dim1], ldvt, &vt[kk + vt_dim1], ldvt);
+ }
+ } else if (icompq == 1) {
+ iq[i__] = i__;
+ }
+/* L60: */
+ }
+
+/* If ICOMPQ = 1, use IQ(N,1) as the indicator for UPLO */
+
+ if (icompq == 1) {
+ if (iuplo == 1) {
+ iq[*n] = 1;
+ } else {
+ iq[*n] = 0;
+ }
+ }
+
+/* If B is lower bidiagonal, update U by those Givens rotations */
+/* which rotated B to be upper bidiagonal */
+
+ if (iuplo == 2 && icompq == 2) {
+ slasr_("L", "V", "B", n, n, &work[1], &work[*n], &u[u_offset], ldu);
+ }
+
+ return 0;
+
+/* End of SBDSDC */
+
+} /* sbdsdc_ */
diff --git a/contrib/libs/clapack/sbdsqr.c b/contrib/libs/clapack/sbdsqr.c
new file mode 100644
index 0000000000..954f88ca45
--- /dev/null
+++ b/contrib/libs/clapack/sbdsqr.c
@@ -0,0 +1,918 @@
+/* sbdsqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b15 = -.125;
+static integer c__1 = 1;
+static real c_b49 = 1.f;
+static real c_b72 = -1.f;
+
+/* Subroutine */ int sbdsqr_(char *uplo, integer *n, integer *ncvt, integer *
+ nru, integer *ncc, real *d__, real *e, real *vt, integer *ldvt, real *
+ u, integer *ldu, real *c__, integer *ldc, real *work, integer *info)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1,
+ i__2;
+ real r__1, r__2, r__3, r__4;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double pow_dd(doublereal *, doublereal *), sqrt(doublereal), r_sign(real *
+ , real *);
+
+ /* Local variables */
+ real f, g, h__;
+ integer i__, j, m;
+ real r__, cs;
+ integer ll;
+ real sn, mu;
+ integer nm1, nm12, nm13, lll;
+ real eps, sll, tol, abse;
+ integer idir;
+ real abss;
+ integer oldm;
+ real cosl;
+ integer isub, iter;
+ real unfl, sinl, cosr, smin, smax, sinr;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *), slas2_(real *, real *, real *, real *,
+ real *);
+ extern logical lsame_(char *, char *);
+ real oldcs;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ integer oldll;
+ real shift, sigmn, oldsn;
+ integer maxit;
+ real sminl;
+ extern /* Subroutine */ int slasr_(char *, char *, char *, integer *,
+ integer *, real *, real *, real *, integer *);
+ real sigmx;
+ logical lower;
+ extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
+ integer *), slasq1_(integer *, real *, real *, real *, integer *),
+ slasv2_(real *, real *, real *, real *, real *, real *, real *,
+ real *, real *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real sminoa;
+ extern /* Subroutine */ int slartg_(real *, real *, real *, real *, real *
+);
+ real thresh;
+ logical rotate;
+ real tolmul;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* January 2007 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SBDSQR computes the singular values and, optionally, the right and/or */
+/* left singular vectors from the singular value decomposition (SVD) of */
+/* a real N-by-N (upper or lower) bidiagonal matrix B using the implicit */
+/* zero-shift QR algorithm. The SVD of B has the form */
+
+/* B = Q * S * P**T */
+
+/* where S is the diagonal matrix of singular values, Q is an orthogonal */
+/* matrix of left singular vectors, and P is an orthogonal matrix of */
+/* right singular vectors. If left singular vectors are requested, this */
+/* subroutine actually returns U*Q instead of Q, and, if right singular */
+/* vectors are requested, this subroutine returns P**T*VT instead of */
+/* P**T, for given real input matrices U and VT. When U and VT are the */
+/* orthogonal matrices that reduce a general matrix A to bidiagonal */
+/* form: A = U*B*VT, as computed by SGEBRD, then */
+
+/* A = (U*Q) * S * (P**T*VT) */
+
+/* is the SVD of A. Optionally, the subroutine may also compute Q**T*C */
+/* for a given real input matrix C. */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices With */
+/* Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan, */
+/* LAPACK Working Note #3 (or SIAM J. Sci. Statist. Comput. vol. 11, */
+/* no. 5, pp. 873-912, Sept 1990) and */
+/* "Accurate singular values and differential qd algorithms," by */
+/* B. Parlett and V. Fernando, Technical Report CPAM-554, Mathematics */
+/* Department, University of California at Berkeley, July 1992 */
+/* for a detailed description of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': B is upper bidiagonal; */
+/* = 'L': B is lower bidiagonal. */
+
+/* N (input) INTEGER */
+/* The order of the matrix B. N >= 0. */
+
+/* NCVT (input) INTEGER */
+/* The number of columns of the matrix VT. NCVT >= 0. */
+
+/* NRU (input) INTEGER */
+/* The number of rows of the matrix U. NRU >= 0. */
+
+/* NCC (input) INTEGER */
+/* The number of columns of the matrix C. NCC >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the n diagonal elements of the bidiagonal matrix B. */
+/* On exit, if INFO=0, the singular values of B in decreasing */
+/* order. */
+
+/* E (input/output) REAL array, dimension (N-1) */
+/* On entry, the N-1 offdiagonal elements of the bidiagonal */
+/* matrix B. */
+/* On exit, if INFO = 0, E is destroyed; if INFO > 0, D and E */
+/* will contain the diagonal and superdiagonal elements of a */
+/* bidiagonal matrix orthogonally equivalent to the one given */
+/* as input. */
+
+/* VT (input/output) REAL array, dimension (LDVT, NCVT) */
+/* On entry, an N-by-NCVT matrix VT. */
+/* On exit, VT is overwritten by P**T * VT. */
+/* Not referenced if NCVT = 0. */
+
+/* LDVT (input) INTEGER */
+/* The leading dimension of the array VT. */
+/* LDVT >= max(1,N) if NCVT > 0; LDVT >= 1 if NCVT = 0. */
+
+/* U (input/output) REAL array, dimension (LDU, N) */
+/* On entry, an NRU-by-N matrix U. */
+/* On exit, U is overwritten by U * Q. */
+/* Not referenced if NRU = 0. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= max(1,NRU). */
+
+/* C (input/output) REAL array, dimension (LDC, NCC) */
+/* On entry, an N-by-NCC matrix C. */
+/* On exit, C is overwritten by Q**T * C. */
+/* Not referenced if NCC = 0. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. */
+/* LDC >= max(1,N) if NCC > 0; LDC >=1 if NCC = 0. */
+
+/* WORK (workspace) REAL array, dimension (4*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: If INFO = -i, the i-th argument had an illegal value */
+/* > 0: */
+/* if NCVT = NRU = NCC = 0, */
+/* = 1, a split was marked by a positive value in E */
+/* = 2, current block of Z not diagonalized after 30*N */
+/* iterations (in inner while loop) */
+/* = 3, termination criterion of outer while loop not met */
+/* (program created more than N unreduced blocks) */
+/* else NCVT = NRU = NCC = 0, */
+/* the algorithm did not converge; D and E contain the */
+/* elements of a bidiagonal matrix which is orthogonally */
+/* similar to the input matrix B; if INFO = i, i */
+/* elements of E have not converged to zero. */
+
+/* Internal Parameters */
+/* =================== */
+
+/* TOLMUL REAL, default = max(10,min(100,EPS**(-1/8))) */
+/* TOLMUL controls the convergence criterion of the QR loop. */
+/* If it is positive, TOLMUL*EPS is the desired relative */
+/* precision in the computed singular values. */
+/* If it is negative, abs(TOLMUL*EPS*sigma_max) is the */
+/* desired absolute accuracy in the computed singular */
+/* values (corresponds to relative accuracy */
+/* abs(TOLMUL*EPS) in the largest singular value. */
+/* abs(TOLMUL) should be between 1 and 1/EPS, and preferably */
+/* between 10 (for fast convergence) and .1/EPS */
+/* (for there to be some accuracy in the results). */
+/* Default is to lose at either one eighth or 2 of the */
+/* available decimal digits in each computed singular value */
+/* (whichever is smaller). */
+
+/* MAXITR INTEGER, default = 6 */
+/* MAXITR controls the maximum number of passes of the */
+/* algorithm through its inner loop. The algorithms stops */
+/* (and so fails to converge) if the number of passes */
+/* through the inner loop exceeds MAXITR*N**2. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lower = lsame_(uplo, "L");
+ if (! lsame_(uplo, "U") && ! lower) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ncvt < 0) {
+ *info = -3;
+ } else if (*nru < 0) {
+ *info = -4;
+ } else if (*ncc < 0) {
+ *info = -5;
+ } else if (*ncvt == 0 && *ldvt < 1 || *ncvt > 0 && *ldvt < max(1,*n)) {
+ *info = -9;
+ } else if (*ldu < max(1,*nru)) {
+ *info = -11;
+ } else if (*ncc == 0 && *ldc < 1 || *ncc > 0 && *ldc < max(1,*n)) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SBDSQR", &i__1);
+ return 0;
+ }
+ if (*n == 0) {
+ return 0;
+ }
+ if (*n == 1) {
+ goto L160;
+ }
+
+/* ROTATE is true if any singular vectors desired, false otherwise */
+
+ rotate = *ncvt > 0 || *nru > 0 || *ncc > 0;
+
+/* If no singular vectors desired, use qd algorithm */
+
+ if (! rotate) {
+ slasq1_(n, &d__[1], &e[1], &work[1], info);
+ return 0;
+ }
+
+ nm1 = *n - 1;
+ nm12 = nm1 + nm1;
+ nm13 = nm12 + nm1;
+ idir = 0;
+
+/* Get machine constants */
+
+ eps = slamch_("Epsilon");
+ unfl = slamch_("Safe minimum");
+
+/* If matrix lower bidiagonal, rotate to be upper bidiagonal */
+/* by applying Givens rotations on the left */
+
+ if (lower) {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ slartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ work[i__] = cs;
+ work[nm1 + i__] = sn;
+/* L10: */
+ }
+
+/* Update singular vectors if desired */
+
+ if (*nru > 0) {
+ slasr_("R", "V", "F", nru, n, &work[1], &work[*n], &u[u_offset],
+ ldu);
+ }
+ if (*ncc > 0) {
+ slasr_("L", "V", "F", n, ncc, &work[1], &work[*n], &c__[c_offset],
+ ldc);
+ }
+ }
+
+/* Compute singular values to relative accuracy TOL */
+/* (By setting TOL to be negative, algorithm will compute */
+/* singular values to absolute accuracy ABS(TOL)*norm(input matrix)) */
+
+/* Computing MAX */
+/* Computing MIN */
+ d__1 = (doublereal) eps;
+ r__3 = 100.f, r__4 = pow_dd(&d__1, &c_b15);
+ r__1 = 10.f, r__2 = dmin(r__3,r__4);
+ tolmul = dmax(r__1,r__2);
+ tol = tolmul * eps;
+
+/* Compute approximate maximum, minimum singular values */
+
+ smax = 0.f;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__2 = smax, r__3 = (r__1 = d__[i__], dabs(r__1));
+ smax = dmax(r__2,r__3);
+/* L20: */
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__2 = smax, r__3 = (r__1 = e[i__], dabs(r__1));
+ smax = dmax(r__2,r__3);
+/* L30: */
+ }
+ sminl = 0.f;
+ if (tol >= 0.f) {
+
+/* Relative accuracy desired */
+
+ sminoa = dabs(d__[1]);
+ if (sminoa == 0.f) {
+ goto L50;
+ }
+ mu = sminoa;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ mu = (r__2 = d__[i__], dabs(r__2)) * (mu / (mu + (r__1 = e[i__ -
+ 1], dabs(r__1))));
+ sminoa = dmin(sminoa,mu);
+ if (sminoa == 0.f) {
+ goto L50;
+ }
+/* L40: */
+ }
+L50:
+ sminoa /= sqrt((real) (*n));
+/* Computing MAX */
+ r__1 = tol * sminoa, r__2 = *n * 6 * *n * unfl;
+ thresh = dmax(r__1,r__2);
+ } else {
+
+/* Absolute accuracy desired */
+
+/* Computing MAX */
+ r__1 = dabs(tol) * smax, r__2 = *n * 6 * *n * unfl;
+ thresh = dmax(r__1,r__2);
+ }
+
+/* Prepare for main iteration loop for the singular values */
+/* (MAXIT is the maximum number of passes through the inner */
+/* loop permitted before nonconvergence signalled.) */
+
+ maxit = *n * 6 * *n;
+ iter = 0;
+ oldll = -1;
+ oldm = -1;
+
+/* M points to last element of unconverged part of matrix */
+
+ m = *n;
+
+/* Begin main iteration loop */
+
+L60:
+
+/* Check for convergence or exceeding iteration count */
+
+ if (m <= 1) {
+ goto L160;
+ }
+ if (iter > maxit) {
+ goto L200;
+ }
+
+/* Find diagonal block of matrix to work on */
+
+ if (tol < 0.f && (r__1 = d__[m], dabs(r__1)) <= thresh) {
+ d__[m] = 0.f;
+ }
+ smax = (r__1 = d__[m], dabs(r__1));
+ smin = smax;
+ i__1 = m - 1;
+ for (lll = 1; lll <= i__1; ++lll) {
+ ll = m - lll;
+ abss = (r__1 = d__[ll], dabs(r__1));
+ abse = (r__1 = e[ll], dabs(r__1));
+ if (tol < 0.f && abss <= thresh) {
+ d__[ll] = 0.f;
+ }
+ if (abse <= thresh) {
+ goto L80;
+ }
+ smin = dmin(smin,abss);
+/* Computing MAX */
+ r__1 = max(smax,abss);
+ smax = dmax(r__1,abse);
+/* L70: */
+ }
+ ll = 0;
+ goto L90;
+L80:
+ e[ll] = 0.f;
+
+/* Matrix splits since E(LL) = 0 */
+
+ if (ll == m - 1) {
+
+/* Convergence of bottom singular value, return to top of loop */
+
+ --m;
+ goto L60;
+ }
+L90:
+ ++ll;
+
+/* E(LL) through E(M-1) are nonzero, E(LL-1) is zero */
+
+ if (ll == m - 1) {
+
+/* 2 by 2 block, handle separately */
+
+ slasv2_(&d__[m - 1], &e[m - 1], &d__[m], &sigmn, &sigmx, &sinr, &cosr,
+ &sinl, &cosl);
+ d__[m - 1] = sigmx;
+ e[m - 1] = 0.f;
+ d__[m] = sigmn;
+
+/* Compute singular vectors, if desired */
+
+ if (*ncvt > 0) {
+ srot_(ncvt, &vt[m - 1 + vt_dim1], ldvt, &vt[m + vt_dim1], ldvt, &
+ cosr, &sinr);
+ }
+ if (*nru > 0) {
+ srot_(nru, &u[(m - 1) * u_dim1 + 1], &c__1, &u[m * u_dim1 + 1], &
+ c__1, &cosl, &sinl);
+ }
+ if (*ncc > 0) {
+ srot_(ncc, &c__[m - 1 + c_dim1], ldc, &c__[m + c_dim1], ldc, &
+ cosl, &sinl);
+ }
+ m += -2;
+ goto L60;
+ }
+
+/* If working on new submatrix, choose shift direction */
+/* (from larger end diagonal element towards smaller) */
+
+ if (ll > oldm || m < oldll) {
+ if ((r__1 = d__[ll], dabs(r__1)) >= (r__2 = d__[m], dabs(r__2))) {
+
+/* Chase bulge from top (big end) to bottom (small end) */
+
+ idir = 1;
+ } else {
+
+/* Chase bulge from bottom (big end) to top (small end) */
+
+ idir = 2;
+ }
+ }
+
+/* Apply convergence tests */
+
+ if (idir == 1) {
+
+/* Run convergence test in forward direction */
+/* First apply standard test to bottom of matrix */
+
+ if ((r__2 = e[m - 1], dabs(r__2)) <= dabs(tol) * (r__1 = d__[m], dabs(
+ r__1)) || tol < 0.f && (r__3 = e[m - 1], dabs(r__3)) <=
+ thresh) {
+ e[m - 1] = 0.f;
+ goto L60;
+ }
+
+ if (tol >= 0.f) {
+
+/* If relative accuracy desired, */
+/* apply convergence criterion forward */
+
+ mu = (r__1 = d__[ll], dabs(r__1));
+ sminl = mu;
+ i__1 = m - 1;
+ for (lll = ll; lll <= i__1; ++lll) {
+ if ((r__1 = e[lll], dabs(r__1)) <= tol * mu) {
+ e[lll] = 0.f;
+ goto L60;
+ }
+ mu = (r__2 = d__[lll + 1], dabs(r__2)) * (mu / (mu + (r__1 =
+ e[lll], dabs(r__1))));
+ sminl = dmin(sminl,mu);
+/* L100: */
+ }
+ }
+
+ } else {
+
+/* Run convergence test in backward direction */
+/* First apply standard test to top of matrix */
+
+ if ((r__2 = e[ll], dabs(r__2)) <= dabs(tol) * (r__1 = d__[ll], dabs(
+ r__1)) || tol < 0.f && (r__3 = e[ll], dabs(r__3)) <= thresh) {
+ e[ll] = 0.f;
+ goto L60;
+ }
+
+ if (tol >= 0.f) {
+
+/* If relative accuracy desired, */
+/* apply convergence criterion backward */
+
+ mu = (r__1 = d__[m], dabs(r__1));
+ sminl = mu;
+ i__1 = ll;
+ for (lll = m - 1; lll >= i__1; --lll) {
+ if ((r__1 = e[lll], dabs(r__1)) <= tol * mu) {
+ e[lll] = 0.f;
+ goto L60;
+ }
+ mu = (r__2 = d__[lll], dabs(r__2)) * (mu / (mu + (r__1 = e[
+ lll], dabs(r__1))));
+ sminl = dmin(sminl,mu);
+/* L110: */
+ }
+ }
+ }
+ oldll = ll;
+ oldm = m;
+
+/* Compute shift. First, test if shifting would ruin relative */
+/* accuracy, and if so set the shift to zero. */
+
+/* Computing MAX */
+ r__1 = eps, r__2 = tol * .01f;
+ if (tol >= 0.f && *n * tol * (sminl / smax) <= dmax(r__1,r__2)) {
+
+/* Use a zero shift to avoid loss of relative accuracy */
+
+ shift = 0.f;
+ } else {
+
+/* Compute the shift from 2-by-2 block at end of matrix */
+
+ if (idir == 1) {
+ sll = (r__1 = d__[ll], dabs(r__1));
+ slas2_(&d__[m - 1], &e[m - 1], &d__[m], &shift, &r__);
+ } else {
+ sll = (r__1 = d__[m], dabs(r__1));
+ slas2_(&d__[ll], &e[ll], &d__[ll + 1], &shift, &r__);
+ }
+
+/* Test if shift negligible, and if so set to zero */
+
+ if (sll > 0.f) {
+/* Computing 2nd power */
+ r__1 = shift / sll;
+ if (r__1 * r__1 < eps) {
+ shift = 0.f;
+ }
+ }
+ }
+
+/* Increment iteration count */
+
+ iter = iter + m - ll;
+
+/* If SHIFT = 0, do simplified QR iteration */
+
+ if (shift == 0.f) {
+ if (idir == 1) {
+
+/* Chase bulge from top to bottom */
+/* Save cosines and sines for later singular vector updates */
+
+ cs = 1.f;
+ oldcs = 1.f;
+ i__1 = m - 1;
+ for (i__ = ll; i__ <= i__1; ++i__) {
+ r__1 = d__[i__] * cs;
+ slartg_(&r__1, &e[i__], &cs, &sn, &r__);
+ if (i__ > ll) {
+ e[i__ - 1] = oldsn * r__;
+ }
+ r__1 = oldcs * r__;
+ r__2 = d__[i__ + 1] * sn;
+ slartg_(&r__1, &r__2, &oldcs, &oldsn, &d__[i__]);
+ work[i__ - ll + 1] = cs;
+ work[i__ - ll + 1 + nm1] = sn;
+ work[i__ - ll + 1 + nm12] = oldcs;
+ work[i__ - ll + 1 + nm13] = oldsn;
+/* L120: */
+ }
+ h__ = d__[m] * cs;
+ d__[m] = h__ * oldcs;
+ e[m - 1] = h__ * oldsn;
+
+/* Update singular vectors */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ slasr_("L", "V", "F", &i__1, ncvt, &work[1], &work[*n], &vt[
+ ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ slasr_("R", "V", "F", nru, &i__1, &work[nm12 + 1], &work[nm13
+ + 1], &u[ll * u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ slasr_("L", "V", "F", &i__1, ncc, &work[nm12 + 1], &work[nm13
+ + 1], &c__[ll + c_dim1], ldc);
+ }
+
+/* Test convergence */
+
+ if ((r__1 = e[m - 1], dabs(r__1)) <= thresh) {
+ e[m - 1] = 0.f;
+ }
+
+ } else {
+
+/* Chase bulge from bottom to top */
+/* Save cosines and sines for later singular vector updates */
+
+ cs = 1.f;
+ oldcs = 1.f;
+ i__1 = ll + 1;
+ for (i__ = m; i__ >= i__1; --i__) {
+ r__1 = d__[i__] * cs;
+ slartg_(&r__1, &e[i__ - 1], &cs, &sn, &r__);
+ if (i__ < m) {
+ e[i__] = oldsn * r__;
+ }
+ r__1 = oldcs * r__;
+ r__2 = d__[i__ - 1] * sn;
+ slartg_(&r__1, &r__2, &oldcs, &oldsn, &d__[i__]);
+ work[i__ - ll] = cs;
+ work[i__ - ll + nm1] = -sn;
+ work[i__ - ll + nm12] = oldcs;
+ work[i__ - ll + nm13] = -oldsn;
+/* L130: */
+ }
+ h__ = d__[ll] * cs;
+ d__[ll] = h__ * oldcs;
+ e[ll] = h__ * oldsn;
+
+/* Update singular vectors */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ slasr_("L", "V", "B", &i__1, ncvt, &work[nm12 + 1], &work[
+ nm13 + 1], &vt[ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ slasr_("R", "V", "B", nru, &i__1, &work[1], &work[*n], &u[ll *
+ u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ slasr_("L", "V", "B", &i__1, ncc, &work[1], &work[*n], &c__[
+ ll + c_dim1], ldc);
+ }
+
+/* Test convergence */
+
+ if ((r__1 = e[ll], dabs(r__1)) <= thresh) {
+ e[ll] = 0.f;
+ }
+ }
+ } else {
+
+/* Use nonzero shift */
+
+ if (idir == 1) {
+
+/* Chase bulge from top to bottom */
+/* Save cosines and sines for later singular vector updates */
+
+ f = ((r__1 = d__[ll], dabs(r__1)) - shift) * (r_sign(&c_b49, &d__[
+ ll]) + shift / d__[ll]);
+ g = e[ll];
+ i__1 = m - 1;
+ for (i__ = ll; i__ <= i__1; ++i__) {
+ slartg_(&f, &g, &cosr, &sinr, &r__);
+ if (i__ > ll) {
+ e[i__ - 1] = r__;
+ }
+ f = cosr * d__[i__] + sinr * e[i__];
+ e[i__] = cosr * e[i__] - sinr * d__[i__];
+ g = sinr * d__[i__ + 1];
+ d__[i__ + 1] = cosr * d__[i__ + 1];
+ slartg_(&f, &g, &cosl, &sinl, &r__);
+ d__[i__] = r__;
+ f = cosl * e[i__] + sinl * d__[i__ + 1];
+ d__[i__ + 1] = cosl * d__[i__ + 1] - sinl * e[i__];
+ if (i__ < m - 1) {
+ g = sinl * e[i__ + 1];
+ e[i__ + 1] = cosl * e[i__ + 1];
+ }
+ work[i__ - ll + 1] = cosr;
+ work[i__ - ll + 1 + nm1] = sinr;
+ work[i__ - ll + 1 + nm12] = cosl;
+ work[i__ - ll + 1 + nm13] = sinl;
+/* L140: */
+ }
+ e[m - 1] = f;
+
+/* Update singular vectors */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ slasr_("L", "V", "F", &i__1, ncvt, &work[1], &work[*n], &vt[
+ ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ slasr_("R", "V", "F", nru, &i__1, &work[nm12 + 1], &work[nm13
+ + 1], &u[ll * u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ slasr_("L", "V", "F", &i__1, ncc, &work[nm12 + 1], &work[nm13
+ + 1], &c__[ll + c_dim1], ldc);
+ }
+
+/* Test convergence */
+
+ if ((r__1 = e[m - 1], dabs(r__1)) <= thresh) {
+ e[m - 1] = 0.f;
+ }
+
+ } else {
+
+/* Chase bulge from bottom to top */
+/* Save cosines and sines for later singular vector updates */
+
+ f = ((r__1 = d__[m], dabs(r__1)) - shift) * (r_sign(&c_b49, &d__[
+ m]) + shift / d__[m]);
+ g = e[m - 1];
+ i__1 = ll + 1;
+ for (i__ = m; i__ >= i__1; --i__) {
+ slartg_(&f, &g, &cosr, &sinr, &r__);
+ if (i__ < m) {
+ e[i__] = r__;
+ }
+ f = cosr * d__[i__] + sinr * e[i__ - 1];
+ e[i__ - 1] = cosr * e[i__ - 1] - sinr * d__[i__];
+ g = sinr * d__[i__ - 1];
+ d__[i__ - 1] = cosr * d__[i__ - 1];
+ slartg_(&f, &g, &cosl, &sinl, &r__);
+ d__[i__] = r__;
+ f = cosl * e[i__ - 1] + sinl * d__[i__ - 1];
+ d__[i__ - 1] = cosl * d__[i__ - 1] - sinl * e[i__ - 1];
+ if (i__ > ll + 1) {
+ g = sinl * e[i__ - 2];
+ e[i__ - 2] = cosl * e[i__ - 2];
+ }
+ work[i__ - ll] = cosr;
+ work[i__ - ll + nm1] = -sinr;
+ work[i__ - ll + nm12] = cosl;
+ work[i__ - ll + nm13] = -sinl;
+/* L150: */
+ }
+ e[ll] = f;
+
+/* Test convergence */
+
+ if ((r__1 = e[ll], dabs(r__1)) <= thresh) {
+ e[ll] = 0.f;
+ }
+
+/* Update singular vectors if desired */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ slasr_("L", "V", "B", &i__1, ncvt, &work[nm12 + 1], &work[
+ nm13 + 1], &vt[ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ slasr_("R", "V", "B", nru, &i__1, &work[1], &work[*n], &u[ll *
+ u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ slasr_("L", "V", "B", &i__1, ncc, &work[1], &work[*n], &c__[
+ ll + c_dim1], ldc);
+ }
+ }
+ }
+
+/* QR iteration finished, go back and check convergence */
+
+ goto L60;
+
+/* All singular values converged, so make them positive */
+
+L160:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] < 0.f) {
+ d__[i__] = -d__[i__];
+
+/* Change sign of singular vectors, if desired */
+
+ if (*ncvt > 0) {
+ sscal_(ncvt, &c_b72, &vt[i__ + vt_dim1], ldvt);
+ }
+ }
+/* L170: */
+ }
+
+/* Sort the singular values into decreasing order (insertion sort on */
+/* singular values, but only one transposition per singular vector) */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Scan for smallest D(I) */
+
+ isub = 1;
+ smin = d__[1];
+ i__2 = *n + 1 - i__;
+ for (j = 2; j <= i__2; ++j) {
+ if (d__[j] <= smin) {
+ isub = j;
+ smin = d__[j];
+ }
+/* L180: */
+ }
+ if (isub != *n + 1 - i__) {
+
+/* Swap singular values and vectors */
+
+ d__[isub] = d__[*n + 1 - i__];
+ d__[*n + 1 - i__] = smin;
+ if (*ncvt > 0) {
+ sswap_(ncvt, &vt[isub + vt_dim1], ldvt, &vt[*n + 1 - i__ +
+ vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ sswap_(nru, &u[isub * u_dim1 + 1], &c__1, &u[(*n + 1 - i__) *
+ u_dim1 + 1], &c__1);
+ }
+ if (*ncc > 0) {
+ sswap_(ncc, &c__[isub + c_dim1], ldc, &c__[*n + 1 - i__ +
+ c_dim1], ldc);
+ }
+ }
+/* L190: */
+ }
+ goto L220;
+
+/* Maximum number of iterations exceeded, failure to converge */
+
+L200:
+ *info = 0;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (e[i__] != 0.f) {
+ ++(*info);
+ }
+/* L210: */
+ }
+L220:
+ return 0;
+
+/* End of SBDSQR */
+
+} /* sbdsqr_ */
diff --git a/contrib/libs/clapack/scsum1.c b/contrib/libs/clapack/scsum1.c
new file mode 100644
index 0000000000..6fd8f1f562
--- /dev/null
+++ b/contrib/libs/clapack/scsum1.c
@@ -0,0 +1,114 @@
+/* scsum1.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+doublereal scsum1_(integer *n, complex *cx, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ real ret_val;
+
+ /* Builtin functions */
+ double c_abs(complex *);
+
+ /* Local variables */
+ integer i__, nincx;
+ real stemp;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SCSUM1 takes the sum of the absolute values of a complex */
+/* vector and returns a single precision result. */
+
+/* Based on SCASUM from the Level 1 BLAS. */
+/* The change is to use the 'genuine' absolute value. */
+
+/* Contributed by Nick Higham for use with CLACON. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of elements in the vector CX. */
+
+/* CX (input) COMPLEX array, dimension (N) */
+/* The vector whose elements will be summed. */
+
+/* INCX (input) INTEGER */
+/* The spacing between successive values of CX. INCX > 0. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --cx;
+
+ /* Function Body */
+ ret_val = 0.f;
+ stemp = 0.f;
+ if (*n <= 0) {
+ return ret_val;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* CODE FOR INCREMENT NOT EQUAL TO 1 */
+
+ nincx = *n * *incx;
+ i__1 = nincx;
+ i__2 = *incx;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+
+/* NEXT LINE MODIFIED. */
+
+ stemp += c_abs(&cx[i__]);
+/* L10: */
+ }
+ ret_val = stemp;
+ return ret_val;
+
+/* CODE FOR INCREMENT EQUAL TO 1 */
+
+L20:
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+
+/* NEXT LINE MODIFIED. */
+
+ stemp += c_abs(&cx[i__]);
+/* L30: */
+ }
+ ret_val = stemp;
+ return ret_val;
+
+/* End of SCSUM1 */
+
+} /* scsum1_ */
diff --git a/contrib/libs/clapack/sdisna.c b/contrib/libs/clapack/sdisna.c
new file mode 100644
index 0000000000..a47cb17984
--- /dev/null
+++ b/contrib/libs/clapack/sdisna.c
@@ -0,0 +1,228 @@
+/* sdisna.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sdisna_(char *job, integer *m, integer *n, real *d__,
+ real *sep, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2, r__3;
+
+ /* Local variables */
+ integer i__, k;
+ real eps;
+ logical decr, left, incr, sing, eigen;
+ extern logical lsame_(char *, char *);
+ real anorm;
+ logical right;
+ real oldgap;
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real newgap, thresh;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SDISNA computes the reciprocal condition numbers for the eigenvectors */
+/* of a real symmetric or complex Hermitian matrix or for the left or */
+/* right singular vectors of a general m-by-n matrix. The reciprocal */
+/* condition number is the 'gap' between the corresponding eigenvalue or */
+/* singular value and the nearest other one. */
+
+/* The bound on the error, measured by angle in radians, in the I-th */
+/* computed vector is given by */
+
+/* SLAMCH( 'E' ) * ( ANORM / SEP( I ) ) */
+
+/* where ANORM = 2-norm(A) = max( abs( D(j) ) ). SEP(I) is not allowed */
+/* to be smaller than SLAMCH( 'E' )*ANORM in order to limit the size of */
+/* the error bound. */
+
+/* SDISNA may also be used to compute error bounds for eigenvectors of */
+/* the generalized symmetric definite eigenproblem. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies for which problem the reciprocal condition numbers */
+/* should be computed: */
+/* = 'E': the eigenvectors of a symmetric/Hermitian matrix; */
+/* = 'L': the left singular vectors of a general matrix; */
+/* = 'R': the right singular vectors of a general matrix. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix. M >= 0. */
+
+/* N (input) INTEGER */
+/* If JOB = 'L' or 'R', the number of columns of the matrix, */
+/* in which case N >= 0. Ignored if JOB = 'E'. */
+
+/* D (input) REAL array, dimension (M) if JOB = 'E' */
+/* dimension (min(M,N)) if JOB = 'L' or 'R' */
+/* The eigenvalues (if JOB = 'E') or singular values (if JOB = */
+/* 'L' or 'R') of the matrix, in either increasing or decreasing */
+/* order. If singular values, they must be non-negative. */
+
+/* SEP (output) REAL array, dimension (M) if JOB = 'E' */
+/* dimension (min(M,N)) if JOB = 'L' or 'R' */
+/* The reciprocal condition numbers of the vectors. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ --sep;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ eigen = lsame_(job, "E");
+ left = lsame_(job, "L");
+ right = lsame_(job, "R");
+ sing = left || right;
+ if (eigen) {
+ k = *m;
+ } else if (sing) {
+ k = min(*m,*n);
+ }
+ if (! eigen && ! sing) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (k < 0) {
+ *info = -3;
+ } else {
+ incr = TRUE_;
+ decr = TRUE_;
+ i__1 = k - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (incr) {
+ incr = incr && d__[i__] <= d__[i__ + 1];
+ }
+ if (decr) {
+ decr = decr && d__[i__] >= d__[i__ + 1];
+ }
+/* L10: */
+ }
+ if (sing && k > 0) {
+ if (incr) {
+ incr = incr && 0.f <= d__[1];
+ }
+ if (decr) {
+ decr = decr && d__[k] >= 0.f;
+ }
+ }
+ if (! (incr || decr)) {
+ *info = -4;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SDISNA", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (k == 0) {
+ return 0;
+ }
+
+/* Compute reciprocal condition numbers */
+
+ if (k == 1) {
+ sep[1] = slamch_("O");
+ } else {
+ oldgap = (r__1 = d__[2] - d__[1], dabs(r__1));
+ sep[1] = oldgap;
+ i__1 = k - 1;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ newgap = (r__1 = d__[i__ + 1] - d__[i__], dabs(r__1));
+ sep[i__] = dmin(oldgap,newgap);
+ oldgap = newgap;
+/* L20: */
+ }
+ sep[k] = oldgap;
+ }
+ if (sing) {
+ if (left && *m > *n || right && *m < *n) {
+ if (incr) {
+ sep[1] = dmin(sep[1],d__[1]);
+ }
+ if (decr) {
+/* Computing MIN */
+ r__1 = sep[k], r__2 = d__[k];
+ sep[k] = dmin(r__1,r__2);
+ }
+ }
+ }
+
+/* Ensure that reciprocal condition numbers are not less than */
+/* threshold, in order to limit the size of the error bound */
+
+ eps = slamch_("E");
+ safmin = slamch_("S");
+/* Computing MAX */
+ r__2 = dabs(d__[1]), r__3 = (r__1 = d__[k], dabs(r__1));
+ anorm = dmax(r__2,r__3);
+ if (anorm == 0.f) {
+ thresh = eps;
+ } else {
+/* Computing MAX */
+ r__1 = eps * anorm;
+ thresh = dmax(r__1,safmin);
+ }
+ i__1 = k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = sep[i__];
+ sep[i__] = dmax(r__1,thresh);
+/* L30: */
+ }
+
+ return 0;
+
+/* End of SDISNA */
+
+} /* sdisna_ */
diff --git a/contrib/libs/clapack/sgbbrd.c b/contrib/libs/clapack/sgbbrd.c
new file mode 100644
index 0000000000..451b1be339
--- /dev/null
+++ b/contrib/libs/clapack/sgbbrd.c
@@ -0,0 +1,562 @@
+/* sgbbrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b8 = 0.f;
+static real c_b9 = 1.f;
+static integer c__1 = 1;
+
+/* Subroutine */ int sgbbrd_(char *vect, integer *m, integer *n, integer *ncc,
+ integer *kl, integer *ku, real *ab, integer *ldab, real *d__, real *
+ e, real *q, integer *ldq, real *pt, integer *ldpt, real *c__, integer
+ *ldc, real *work, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, c_dim1, c_offset, pt_dim1, pt_offset, q_dim1,
+ q_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7;
+
+ /* Local variables */
+ integer i__, j, l, j1, j2, kb;
+ real ra, rb, rc;
+ integer kk, ml, mn, nr, mu;
+ real rs;
+ integer kb1, ml0, mu0, klm, kun, nrt, klu1, inca;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *);
+ extern logical lsame_(char *, char *);
+ logical wantb, wantc;
+ integer minmn;
+ logical wantq;
+ extern /* Subroutine */ int xerbla_(char *, integer *), slaset_(
+ char *, integer *, integer *, real *, real *, real *, integer *), slartg_(real *, real *, real *, real *, real *), slargv_(
+ integer *, real *, integer *, real *, integer *, real *, integer *
+), slartv_(integer *, real *, integer *, real *, integer *, real *
+, real *, integer *);
+ logical wantpt;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGBBRD reduces a real general m-by-n band matrix A to upper */
+/* bidiagonal form B by an orthogonal transformation: Q' * A * P = B. */
+
+/* The routine computes B, and optionally forms Q or P', or computes */
+/* Q'*C for a given matrix C. */
+
+/* Arguments */
+/* ========= */
+
+/* VECT (input) CHARACTER*1 */
+/* Specifies whether or not the matrices Q and P' are to be */
+/* formed. */
+/* = 'N': do not form Q or P'; */
+/* = 'Q': form Q only; */
+/* = 'P': form P' only; */
+/* = 'B': form both. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* NCC (input) INTEGER */
+/* The number of columns of the matrix C. NCC >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals of the matrix A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals of the matrix A. KU >= 0. */
+
+/* AB (input/output) REAL array, dimension (LDAB,N) */
+/* On entry, the m-by-n band matrix A, stored in rows 1 to */
+/* KL+KU+1. The j-th column of A is stored in the j-th column of */
+/* the array AB as follows: */
+/* AB(ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(m,j+kl). */
+/* On exit, A is overwritten by values generated during the */
+/* reduction. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array A. LDAB >= KL+KU+1. */
+
+/* D (output) REAL array, dimension (min(M,N)) */
+/* The diagonal elements of the bidiagonal matrix B. */
+
+/* E (output) REAL array, dimension (min(M,N)-1) */
+/* The superdiagonal elements of the bidiagonal matrix B. */
+
+/* Q (output) REAL array, dimension (LDQ,M) */
+/* If VECT = 'Q' or 'B', the m-by-m orthogonal matrix Q. */
+/* If VECT = 'N' or 'P', the array Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. */
+/* LDQ >= max(1,M) if VECT = 'Q' or 'B'; LDQ >= 1 otherwise. */
+
+/* PT (output) REAL array, dimension (LDPT,N) */
+/* If VECT = 'P' or 'B', the n-by-n orthogonal matrix P'. */
+/* If VECT = 'N' or 'Q', the array PT is not referenced. */
+
+/* LDPT (input) INTEGER */
+/* The leading dimension of the array PT. */
+/* LDPT >= max(1,N) if VECT = 'P' or 'B'; LDPT >= 1 otherwise. */
+
+/* C (input/output) REAL array, dimension (LDC,NCC) */
+/* On entry, an m-by-ncc matrix C. */
+/* On exit, C is overwritten by Q'*C. */
+/* C is not referenced if NCC = 0. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. */
+/* LDC >= max(1,M) if NCC > 0; LDC >= 1 if NCC = 0. */
+
+/* WORK (workspace) REAL array, dimension (2*max(M,N)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --d__;
+ --e;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ pt_dim1 = *ldpt;
+ pt_offset = 1 + pt_dim1;
+ pt -= pt_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ wantb = lsame_(vect, "B");
+ wantq = lsame_(vect, "Q") || wantb;
+ wantpt = lsame_(vect, "P") || wantb;
+ wantc = *ncc > 0;
+ klu1 = *kl + *ku + 1;
+ *info = 0;
+ if (! wantq && ! wantpt && ! lsame_(vect, "N")) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ncc < 0) {
+ *info = -4;
+ } else if (*kl < 0) {
+ *info = -5;
+ } else if (*ku < 0) {
+ *info = -6;
+ } else if (*ldab < klu1) {
+ *info = -8;
+ } else if (*ldq < 1 || wantq && *ldq < max(1,*m)) {
+ *info = -12;
+ } else if (*ldpt < 1 || wantpt && *ldpt < max(1,*n)) {
+ *info = -14;
+ } else if (*ldc < 1 || wantc && *ldc < max(1,*m)) {
+ *info = -16;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGBBRD", &i__1);
+ return 0;
+ }
+
+/* Initialize Q and P' to the unit matrix, if needed */
+
+ if (wantq) {
+ slaset_("Full", m, m, &c_b8, &c_b9, &q[q_offset], ldq);
+ }
+ if (wantpt) {
+ slaset_("Full", n, n, &c_b8, &c_b9, &pt[pt_offset], ldpt);
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ minmn = min(*m,*n);
+
+ if (*kl + *ku > 1) {
+
+/* Reduce to upper bidiagonal form if KU > 0; if KU = 0, reduce */
+/* first to lower bidiagonal form and then transform to upper */
+/* bidiagonal */
+
+ if (*ku > 0) {
+ ml0 = 1;
+ mu0 = 2;
+ } else {
+ ml0 = 2;
+ mu0 = 1;
+ }
+
+/* Wherever possible, plane rotations are generated and applied in */
+/* vector operations of length NR over the index set J1:J2:KLU1. */
+
+/* The sines of the plane rotations are stored in WORK(1:max(m,n)) */
+/* and the cosines in WORK(max(m,n)+1:2*max(m,n)). */
+
+ mn = max(*m,*n);
+/* Computing MIN */
+ i__1 = *m - 1;
+ klm = min(i__1,*kl);
+/* Computing MIN */
+ i__1 = *n - 1;
+ kun = min(i__1,*ku);
+ kb = klm + kun;
+ kb1 = kb + 1;
+ inca = kb1 * *ldab;
+ nr = 0;
+ j1 = klm + 2;
+ j2 = 1 - kun;
+
+ i__1 = minmn;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Reduce i-th column and i-th row of matrix to bidiagonal form */
+
+ ml = klm + 1;
+ mu = kun + 1;
+ i__2 = kb;
+ for (kk = 1; kk <= i__2; ++kk) {
+ j1 += kb;
+ j2 += kb;
+
+/* generate plane rotations to annihilate nonzero elements */
+/* which have been created below the band */
+
+ if (nr > 0) {
+ slargv_(&nr, &ab[klu1 + (j1 - klm - 1) * ab_dim1], &inca,
+ &work[j1], &kb1, &work[mn + j1], &kb1);
+ }
+
+/* apply plane rotations from the left */
+
+ i__3 = kb;
+ for (l = 1; l <= i__3; ++l) {
+ if (j2 - klm + l - 1 > *n) {
+ nrt = nr - 1;
+ } else {
+ nrt = nr;
+ }
+ if (nrt > 0) {
+ slartv_(&nrt, &ab[klu1 - l + (j1 - klm + l - 1) *
+ ab_dim1], &inca, &ab[klu1 - l + 1 + (j1 - klm
+ + l - 1) * ab_dim1], &inca, &work[mn + j1], &
+ work[j1], &kb1);
+ }
+/* L10: */
+ }
+
+ if (ml > ml0) {
+ if (ml <= *m - i__ + 1) {
+
+/* generate plane rotation to annihilate a(i+ml-1,i) */
+/* within the band, and apply rotation from the left */
+
+ slartg_(&ab[*ku + ml - 1 + i__ * ab_dim1], &ab[*ku +
+ ml + i__ * ab_dim1], &work[mn + i__ + ml - 1],
+ &work[i__ + ml - 1], &ra);
+ ab[*ku + ml - 1 + i__ * ab_dim1] = ra;
+ if (i__ < *n) {
+/* Computing MIN */
+ i__4 = *ku + ml - 2, i__5 = *n - i__;
+ i__3 = min(i__4,i__5);
+ i__6 = *ldab - 1;
+ i__7 = *ldab - 1;
+ srot_(&i__3, &ab[*ku + ml - 2 + (i__ + 1) *
+ ab_dim1], &i__6, &ab[*ku + ml - 1 + (i__
+ + 1) * ab_dim1], &i__7, &work[mn + i__ +
+ ml - 1], &work[i__ + ml - 1]);
+ }
+ }
+ ++nr;
+ j1 -= kb1;
+ }
+
+ if (wantq) {
+
+/* accumulate product of plane rotations in Q */
+
+ i__3 = j2;
+ i__4 = kb1;
+ for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4)
+ {
+ srot_(m, &q[(j - 1) * q_dim1 + 1], &c__1, &q[j *
+ q_dim1 + 1], &c__1, &work[mn + j], &work[j]);
+/* L20: */
+ }
+ }
+
+ if (wantc) {
+
+/* apply plane rotations to C */
+
+ i__4 = j2;
+ i__3 = kb1;
+ for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3)
+ {
+ srot_(ncc, &c__[j - 1 + c_dim1], ldc, &c__[j + c_dim1]
+, ldc, &work[mn + j], &work[j]);
+/* L30: */
+ }
+ }
+
+ if (j2 + kun > *n) {
+
+/* adjust J2 to keep within the bounds of the matrix */
+
+ --nr;
+ j2 -= kb1;
+ }
+
+ i__3 = j2;
+ i__4 = kb1;
+ for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {
+
+/* create nonzero element a(j-1,j+ku) above the band */
+/* and store it in WORK(n+1:2*n) */
+
+ work[j + kun] = work[j] * ab[(j + kun) * ab_dim1 + 1];
+ ab[(j + kun) * ab_dim1 + 1] = work[mn + j] * ab[(j + kun)
+ * ab_dim1 + 1];
+/* L40: */
+ }
+
+/* generate plane rotations to annihilate nonzero elements */
+/* which have been generated above the band */
+
+ if (nr > 0) {
+ slargv_(&nr, &ab[(j1 + kun - 1) * ab_dim1 + 1], &inca, &
+ work[j1 + kun], &kb1, &work[mn + j1 + kun], &kb1);
+ }
+
+/* apply plane rotations from the right */
+
+ i__4 = kb;
+ for (l = 1; l <= i__4; ++l) {
+ if (j2 + l - 1 > *m) {
+ nrt = nr - 1;
+ } else {
+ nrt = nr;
+ }
+ if (nrt > 0) {
+ slartv_(&nrt, &ab[l + 1 + (j1 + kun - 1) * ab_dim1], &
+ inca, &ab[l + (j1 + kun) * ab_dim1], &inca, &
+ work[mn + j1 + kun], &work[j1 + kun], &kb1);
+ }
+/* L50: */
+ }
+
+ if (ml == ml0 && mu > mu0) {
+ if (mu <= *n - i__ + 1) {
+
+/* generate plane rotation to annihilate a(i,i+mu-1) */
+/* within the band, and apply rotation from the right */
+
+ slartg_(&ab[*ku - mu + 3 + (i__ + mu - 2) * ab_dim1],
+ &ab[*ku - mu + 2 + (i__ + mu - 1) * ab_dim1],
+ &work[mn + i__ + mu - 1], &work[i__ + mu - 1],
+ &ra);
+ ab[*ku - mu + 3 + (i__ + mu - 2) * ab_dim1] = ra;
+/* Computing MIN */
+ i__3 = *kl + mu - 2, i__5 = *m - i__;
+ i__4 = min(i__3,i__5);
+ srot_(&i__4, &ab[*ku - mu + 4 + (i__ + mu - 2) *
+ ab_dim1], &c__1, &ab[*ku - mu + 3 + (i__ + mu
+ - 1) * ab_dim1], &c__1, &work[mn + i__ + mu -
+ 1], &work[i__ + mu - 1]);
+ }
+ ++nr;
+ j1 -= kb1;
+ }
+
+ if (wantpt) {
+
+/* accumulate product of plane rotations in P' */
+
+ i__4 = j2;
+ i__3 = kb1;
+ for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3)
+ {
+ srot_(n, &pt[j + kun - 1 + pt_dim1], ldpt, &pt[j +
+ kun + pt_dim1], ldpt, &work[mn + j + kun], &
+ work[j + kun]);
+/* L60: */
+ }
+ }
+
+ if (j2 + kb > *m) {
+
+/* adjust J2 to keep within the bounds of the matrix */
+
+ --nr;
+ j2 -= kb1;
+ }
+
+ i__3 = j2;
+ i__4 = kb1;
+ for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {
+
+/* create nonzero element a(j+kl+ku,j+ku-1) below the */
+/* band and store it in WORK(1:n) */
+
+ work[j + kb] = work[j + kun] * ab[klu1 + (j + kun) *
+ ab_dim1];
+ ab[klu1 + (j + kun) * ab_dim1] = work[mn + j + kun] * ab[
+ klu1 + (j + kun) * ab_dim1];
+/* L70: */
+ }
+
+ if (ml > ml0) {
+ --ml;
+ } else {
+ --mu;
+ }
+/* L80: */
+ }
+/* L90: */
+ }
+ }
+
+ if (*ku == 0 && *kl > 0) {
+
+/* A has been reduced to lower bidiagonal form */
+
+/* Transform lower bidiagonal form to upper bidiagonal by applying */
+/* plane rotations from the left, storing diagonal elements in D */
+/* and off-diagonal elements in E */
+
+/* Computing MIN */
+ i__2 = *m - 1;
+ i__1 = min(i__2,*n);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ slartg_(&ab[i__ * ab_dim1 + 1], &ab[i__ * ab_dim1 + 2], &rc, &rs,
+ &ra);
+ d__[i__] = ra;
+ if (i__ < *n) {
+ e[i__] = rs * ab[(i__ + 1) * ab_dim1 + 1];
+ ab[(i__ + 1) * ab_dim1 + 1] = rc * ab[(i__ + 1) * ab_dim1 + 1]
+ ;
+ }
+ if (wantq) {
+ srot_(m, &q[i__ * q_dim1 + 1], &c__1, &q[(i__ + 1) * q_dim1 +
+ 1], &c__1, &rc, &rs);
+ }
+ if (wantc) {
+ srot_(ncc, &c__[i__ + c_dim1], ldc, &c__[i__ + 1 + c_dim1],
+ ldc, &rc, &rs);
+ }
+/* L100: */
+ }
+ if (*m <= *n) {
+ d__[*m] = ab[*m * ab_dim1 + 1];
+ }
+ } else if (*ku > 0) {
+
+/* A has been reduced to upper bidiagonal form */
+
+ if (*m < *n) {
+
+/* Annihilate a(m,m+1) by applying plane rotations from the */
+/* right, storing diagonal elements in D and off-diagonal */
+/* elements in E */
+
+ rb = ab[*ku + (*m + 1) * ab_dim1];
+ for (i__ = *m; i__ >= 1; --i__) {
+ slartg_(&ab[*ku + 1 + i__ * ab_dim1], &rb, &rc, &rs, &ra);
+ d__[i__] = ra;
+ if (i__ > 1) {
+ rb = -rs * ab[*ku + i__ * ab_dim1];
+ e[i__ - 1] = rc * ab[*ku + i__ * ab_dim1];
+ }
+ if (wantpt) {
+ srot_(n, &pt[i__ + pt_dim1], ldpt, &pt[*m + 1 + pt_dim1],
+ ldpt, &rc, &rs);
+ }
+/* L110: */
+ }
+ } else {
+
+/* Copy off-diagonal elements to E and diagonal elements to D */
+
+ i__1 = minmn - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ e[i__] = ab[*ku + (i__ + 1) * ab_dim1];
+/* L120: */
+ }
+ i__1 = minmn;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = ab[*ku + 1 + i__ * ab_dim1];
+/* L130: */
+ }
+ }
+ } else {
+
+/* A is diagonal. Set elements of E to zero and copy diagonal */
+/* elements to D. */
+
+ i__1 = minmn - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ e[i__] = 0.f;
+/* L140: */
+ }
+ i__1 = minmn;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = ab[i__ * ab_dim1 + 1];
+/* L150: */
+ }
+ }
+ return 0;
+
+/* End of SGBBRD */
+
+} /* sgbbrd_ */
diff --git a/contrib/libs/clapack/sgbcon.c b/contrib/libs/clapack/sgbcon.c
new file mode 100644
index 0000000000..78fd7c16fc
--- /dev/null
+++ b/contrib/libs/clapack/sgbcon.c
@@ -0,0 +1,282 @@
+/* sgbcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sgbcon_(char *norm, integer *n, integer *kl, integer *ku,
+ real *ab, integer *ldab, integer *ipiv, real *anorm, real *rcond,
+ real *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3;
+ real r__1;
+
+ /* Local variables */
+ integer j;
+ real t;
+ integer kd, lm, jp, ix, kase;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ integer kase1;
+ real scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical lnoti;
+ extern /* Subroutine */ int srscl_(integer *, real *, real *, integer *),
+ saxpy_(integer *, real *, real *, integer *, real *, integer *),
+ slacn2_(integer *, real *, real *, integer *, real *, integer *,
+ integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ real ainvnm;
+ extern /* Subroutine */ int slatbs_(char *, char *, char *, char *,
+ integer *, integer *, real *, integer *, real *, real *, real *,
+ integer *);
+ logical onenrm;
+ char normin[1];
+ real smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call SLACN2 in place of SLACON, 7 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGBCON estimates the reciprocal of the condition number of a real */
+/* general band matrix A, in either the 1-norm or the infinity-norm, */
+/* using the LU factorization computed by SGBTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as */
+/* RCOND = 1 / ( norm(A) * norm(inv(A)) ). */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies whether the 1-norm condition number or the */
+/* infinity-norm condition number is required: */
+/* = '1' or 'O': 1-norm; */
+/* = 'I': Infinity-norm. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* AB (input) REAL array, dimension (LDAB,N) */
+/* Details of the LU factorization of the band matrix A, as */
+/* computed by SGBTRF. U is stored as an upper triangular band */
+/* matrix with KL+KU superdiagonals in rows 1 to KL+KU+1, and */
+/* the multipliers used during the factorization are stored in */
+/* rows KL+KU+2 to 2*KL+KU+1. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= 2*KL+KU+1. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= N, row i of the matrix was */
+/* interchanged with row IPIV(i). */
+
+/* ANORM (input) REAL */
+/* If NORM = '1' or 'O', the 1-norm of the original matrix A. */
+/* If NORM = 'I', the infinity-norm of the original matrix A. */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(norm(A) * norm(inv(A))). */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --ipiv;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O");
+ if (! onenrm && ! lsame_(norm, "I")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*ldab < (*kl << 1) + *ku + 1) {
+ *info = -6;
+ } else if (*anorm < 0.f) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGBCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.f;
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ } else if (*anorm == 0.f) {
+ return 0;
+ }
+
+ smlnum = slamch_("Safe minimum");
+
+/* Estimate the norm of inv(A). */
+
+ ainvnm = 0.f;
+ *(unsigned char *)normin = 'N';
+ if (onenrm) {
+ kase1 = 1;
+ } else {
+ kase1 = 2;
+ }
+ kd = *kl + *ku + 1;
+ lnoti = *kl > 0;
+ kase = 0;
+L10:
+ slacn2_(n, &work[*n + 1], &work[1], &iwork[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (kase == kase1) {
+
+/* Multiply by inv(L). */
+
+ if (lnoti) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = *kl, i__3 = *n - j;
+ lm = min(i__2,i__3);
+ jp = ipiv[j];
+ t = work[jp];
+ if (jp != j) {
+ work[jp] = work[j];
+ work[j] = t;
+ }
+ r__1 = -t;
+ saxpy_(&lm, &r__1, &ab[kd + 1 + j * ab_dim1], &c__1, &
+ work[j + 1], &c__1);
+/* L20: */
+ }
+ }
+
+/* Multiply by inv(U). */
+
+ i__1 = *kl + *ku;
+ slatbs_("Upper", "No transpose", "Non-unit", normin, n, &i__1, &
+ ab[ab_offset], ldab, &work[1], &scale, &work[(*n << 1) +
+ 1], info);
+ } else {
+
+/* Multiply by inv(U'). */
+
+ i__1 = *kl + *ku;
+ slatbs_("Upper", "Transpose", "Non-unit", normin, n, &i__1, &ab[
+ ab_offset], ldab, &work[1], &scale, &work[(*n << 1) + 1],
+ info);
+
+/* Multiply by inv(L'). */
+
+ if (lnoti) {
+ for (j = *n - 1; j >= 1; --j) {
+/* Computing MIN */
+ i__1 = *kl, i__2 = *n - j;
+ lm = min(i__1,i__2);
+ work[j] -= sdot_(&lm, &ab[kd + 1 + j * ab_dim1], &c__1, &
+ work[j + 1], &c__1);
+ jp = ipiv[j];
+ if (jp != j) {
+ t = work[jp];
+ work[jp] = work[j];
+ work[j] = t;
+ }
+/* L30: */
+ }
+ }
+ }
+
+/* Divide X by 1/SCALE if doing so will not cause overflow. */
+
+ *(unsigned char *)normin = 'Y';
+ if (scale != 1.f) {
+ ix = isamax_(n, &work[1], &c__1);
+ if (scale < (r__1 = work[ix], dabs(r__1)) * smlnum || scale ==
+ 0.f) {
+ goto L40;
+ }
+ srscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / ainvnm / *anorm;
+ }
+
+L40:
+ return 0;
+
+/* End of SGBCON */
+
+} /* sgbcon_ */
diff --git a/contrib/libs/clapack/sgbequ.c b/contrib/libs/clapack/sgbequ.c
new file mode 100644
index 0000000000..1f04493b75
--- /dev/null
+++ b/contrib/libs/clapack/sgbequ.c
@@ -0,0 +1,319 @@
+/* sgbequ.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sgbequ_(integer *m, integer *n, integer *kl, integer *ku,
+ real *ab, integer *ldab, real *r__, real *c__, real *rowcnd, real *
+ colcnd, real *amax, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+ real r__1, r__2, r__3;
+
+ /* Local variables */
+ integer i__, j, kd;
+ real rcmin, rcmax;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum, smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGBEQU computes row and column scalings intended to equilibrate an */
+/* M-by-N band matrix A and reduce its condition number. R returns the */
+/* row scale factors and C the column scale factors, chosen to try to */
+/* make the largest element in each row and column of the matrix B with */
+/* elements B(i,j)=R(i)*A(i,j)*C(j) have absolute value 1. */
+
+/* R(i) and C(j) are restricted to be between SMLNUM = smallest safe */
+/* number and BIGNUM = largest safe number. Use of these scaling */
+/* factors is not guaranteed to reduce the condition number of A but */
+/* works well in practice. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* AB (input) REAL array, dimension (LDAB,N) */
+/* The band matrix A, stored in rows 1 to KL+KU+1. The j-th */
+/* column of A is stored in the j-th column of the array AB as */
+/* follows: */
+/* AB(ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(m,j+kl). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KL+KU+1. */
+
+/* R (output) REAL array, dimension (M) */
+/* If INFO = 0, or INFO > M, R contains the row scale factors */
+/* for A. */
+
+/* C (output) REAL array, dimension (N) */
+/* If INFO = 0, C contains the column scale factors for A. */
+
+/* ROWCND (output) REAL */
+/* If INFO = 0 or INFO > M, ROWCND contains the ratio of the */
+/* smallest R(i) to the largest R(i). If ROWCND >= 0.1 and */
+/* AMAX is neither too large nor too small, it is not worth */
+/* scaling by R. */
+
+/* COLCND (output) REAL */
+/* If INFO = 0, COLCND contains the ratio of the smallest */
+/* C(i) to the largest C(i). If COLCND >= 0.1, it is not */
+/* worth scaling by C. */
+
+/* AMAX (output) REAL */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= M: the i-th row of A is exactly zero */
+/* > M: the (i-M)-th column of A is exactly zero */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --r__;
+ --c__;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*ldab < *kl + *ku + 1) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGBEQU", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ *rowcnd = 1.f;
+ *colcnd = 1.f;
+ *amax = 0.f;
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+
+/* Compute row scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__[i__] = 0.f;
+/* L10: */
+ }
+
+/* Find the maximum element in each row. */
+
+ kd = *ku + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j - *ku;
+/* Computing MIN */
+ i__4 = j + *kl;
+ i__3 = min(i__4,*m);
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ r__2 = r__[i__], r__3 = (r__1 = ab[kd + i__ - j + j * ab_dim1],
+ dabs(r__1));
+ r__[i__] = dmax(r__2,r__3);
+/* L20: */
+ }
+/* L30: */
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.f;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = rcmax, r__2 = r__[i__];
+ rcmax = dmax(r__1,r__2);
+/* Computing MIN */
+ r__1 = rcmin, r__2 = r__[i__];
+ rcmin = dmin(r__1,r__2);
+/* L40: */
+ }
+ *amax = rcmax;
+
+ if (rcmin == 0.f) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (r__[i__] == 0.f) {
+ *info = i__;
+ return 0;
+ }
+/* L50: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MIN */
+/* Computing MAX */
+ r__2 = r__[i__];
+ r__1 = dmax(r__2,smlnum);
+ r__[i__] = 1.f / dmin(r__1,bignum);
+/* L60: */
+ }
+
+/* Compute ROWCND = min(R(I)) / max(R(I)) */
+
+ *rowcnd = dmax(rcmin,smlnum) / dmin(rcmax,bignum);
+ }
+
+/* Compute column scale factors */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j] = 0.f;
+/* L70: */
+ }
+
+/* Find the maximum element in each column, */
+/* assuming the row scaling computed above. */
+
+ kd = *ku + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__3 = j - *ku;
+/* Computing MIN */
+ i__4 = j + *kl;
+ i__2 = min(i__4,*m);
+ for (i__ = max(i__3,1); i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = c__[j], r__3 = (r__1 = ab[kd + i__ - j + j * ab_dim1],
+ dabs(r__1)) * r__[i__];
+ c__[j] = dmax(r__2,r__3);
+/* L80: */
+ }
+/* L90: */
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ r__1 = rcmin, r__2 = c__[j];
+ rcmin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = rcmax, r__2 = c__[j];
+ rcmax = dmax(r__1,r__2);
+/* L100: */
+ }
+
+ if (rcmin == 0.f) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (c__[j] == 0.f) {
+ *info = *m + j;
+ return 0;
+ }
+/* L110: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+/* Computing MAX */
+ r__2 = c__[j];
+ r__1 = dmax(r__2,smlnum);
+ c__[j] = 1.f / dmin(r__1,bignum);
+/* L120: */
+ }
+
+/* Compute COLCND = min(C(J)) / max(C(J)) */
+
+ *colcnd = dmax(rcmin,smlnum) / dmin(rcmax,bignum);
+ }
+
+ return 0;
+
+/* End of SGBEQU */
+
+} /* sgbequ_ */
diff --git a/contrib/libs/clapack/sgbequb.c b/contrib/libs/clapack/sgbequb.c
new file mode 100644
index 0000000000..062eb8bfa1
--- /dev/null
+++ b/contrib/libs/clapack/sgbequb.c
@@ -0,0 +1,346 @@
+/* sgbequb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sgbequb_(integer *m, integer *n, integer *kl, integer *
+ ku, real *ab, integer *ldab, real *r__, real *c__, real *rowcnd, real
+ *colcnd, real *amax, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+ real r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double log(doublereal), pow_ri(real *, integer *);
+
+ /* Local variables */
+ integer i__, j, kd;
+ real radix, rcmin, rcmax;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum, logrdx, smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- Contributed by James Demmel, Deaglan Halligan, Yozo Hida and -- */
+/* -- Jason Riedy of Univ. of California Berkeley. -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley and NAG Ltd. -- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGBEQUB computes row and column scalings intended to equilibrate an */
+/* M-by-N matrix A and reduce its condition number. R returns the row */
+/* scale factors and C the column scale factors, chosen to try to make */
+/* the largest element in each row and column of the matrix B with */
+/* elements B(i,j)=R(i)*A(i,j)*C(j) have an absolute value of at most */
+/* the radix. */
+
+/* R(i) and C(j) are restricted to be a power of the radix between */
+/* SMLNUM = smallest safe number and BIGNUM = largest safe number. Use */
+/* of these scaling factors is not guaranteed to reduce the condition */
+/* number of A but works well in practice. */
+
+/* This routine differs from SGEEQU by restricting the scaling factors */
+/* to a power of the radix. Baring over- and underflow, scaling by */
+/* these factors introduces no additional rounding errors. However, the */
+/* scaled entries' magnitured are no longer approximately 1 but lie */
+/* between sqrt(radix) and 1/sqrt(radix). */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* AB (input) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* On entry, the matrix A in band storage, in rows 1 to KL+KU+1. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(KU+1+i-j,j) = A(i,j) for max(1,j-KU)<=i<=min(N,j+kl) */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array A. LDAB >= max(1,M). */
+
+/* R (output) REAL array, dimension (M) */
+/* If INFO = 0 or INFO > M, R contains the row scale factors */
+/* for A. */
+
+/* C (output) REAL array, dimension (N) */
+/* If INFO = 0, C contains the column scale factors for A. */
+
+/* ROWCND (output) REAL */
+/* If INFO = 0 or INFO > M, ROWCND contains the ratio of the */
+/* smallest R(i) to the largest R(i). If ROWCND >= 0.1 and */
+/* AMAX is neither too large nor too small, it is not worth */
+/* scaling by R. */
+
+/* COLCND (output) REAL */
+/* If INFO = 0, COLCND contains the ratio of the smallest */
+/* C(i) to the largest C(i). If COLCND >= 0.1, it is not */
+/* worth scaling by C. */
+
+/* AMAX (output) REAL */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= M: the i-th row of A is exactly zero */
+/* > M: the (i-M)-th column of A is exactly zero */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --r__;
+ --c__;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*ldab < *kl + *ku + 1) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGBEQUB", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ *rowcnd = 1.f;
+ *colcnd = 1.f;
+ *amax = 0.f;
+ return 0;
+ }
+
+/* Get machine constants. Assume SMLNUM is a power of the radix. */
+
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+ radix = slamch_("B");
+ logrdx = log(radix);
+
+/* Compute row scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__[i__] = 0.f;
+/* L10: */
+ }
+
+/* Find the maximum element in each row. */
+
+ kd = *ku + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j - *ku;
+/* Computing MIN */
+ i__4 = j + *kl;
+ i__3 = min(i__4,*m);
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ r__2 = r__[i__], r__3 = (r__1 = ab[kd + i__ - j + j * ab_dim1],
+ dabs(r__1));
+ r__[i__] = dmax(r__2,r__3);
+/* L20: */
+ }
+/* L30: */
+ }
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (r__[i__] > 0.f) {
+ i__3 = (integer) (log(r__[i__]) / logrdx);
+ r__[i__] = pow_ri(&radix, &i__3);
+ }
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.f;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = rcmax, r__2 = r__[i__];
+ rcmax = dmax(r__1,r__2);
+/* Computing MIN */
+ r__1 = rcmin, r__2 = r__[i__];
+ rcmin = dmin(r__1,r__2);
+/* L40: */
+ }
+ *amax = rcmax;
+
+ if (rcmin == 0.f) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (r__[i__] == 0.f) {
+ *info = i__;
+ return 0;
+ }
+/* L50: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MIN */
+/* Computing MAX */
+ r__2 = r__[i__];
+ r__1 = dmax(r__2,smlnum);
+ r__[i__] = 1.f / dmin(r__1,bignum);
+/* L60: */
+ }
+
+/* Compute ROWCND = min(R(I)) / max(R(I)). */
+
+ *rowcnd = dmax(rcmin,smlnum) / dmin(rcmax,bignum);
+ }
+
+/* Compute column scale factors. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j] = 0.f;
+/* L70: */
+ }
+
+/* Find the maximum element in each column, */
+/* assuming the row scaling computed above. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__3 = j - *ku;
+/* Computing MIN */
+ i__4 = j + *kl;
+ i__2 = min(i__4,*m);
+ for (i__ = max(i__3,1); i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = c__[j], r__3 = (r__1 = ab[kd + i__ - j + j * ab_dim1],
+ dabs(r__1)) * r__[i__];
+ c__[j] = dmax(r__2,r__3);
+/* L80: */
+ }
+ if (c__[j] > 0.f) {
+ i__2 = (integer) (log(c__[j]) / logrdx);
+ c__[j] = pow_ri(&radix, &i__2);
+ }
+/* L90: */
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ r__1 = rcmin, r__2 = c__[j];
+ rcmin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = rcmax, r__2 = c__[j];
+ rcmax = dmax(r__1,r__2);
+/* L100: */
+ }
+
+ if (rcmin == 0.f) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (c__[j] == 0.f) {
+ *info = *m + j;
+ return 0;
+ }
+/* L110: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+/* Computing MAX */
+ r__2 = c__[j];
+ r__1 = dmax(r__2,smlnum);
+ c__[j] = 1.f / dmin(r__1,bignum);
+/* L120: */
+ }
+
+/* Compute COLCND = min(C(J)) / max(C(J)). */
+
+ *colcnd = dmax(rcmin,smlnum) / dmin(rcmax,bignum);
+ }
+
+ return 0;
+
+/* End of SGBEQUB */
+
+} /* sgbequb_ */
diff --git a/contrib/libs/clapack/sgbrfs.c b/contrib/libs/clapack/sgbrfs.c
new file mode 100644
index 0000000000..d5e5d7ef51
--- /dev/null
+++ b/contrib/libs/clapack/sgbrfs.c
@@ -0,0 +1,454 @@
+/* sgbrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b15 = -1.f;
+static real c_b17 = 1.f;
+
+/* Subroutine */ int sgbrfs_(char *trans, integer *n, integer *kl, integer *
+ ku, integer *nrhs, real *ab, integer *ldab, real *afb, integer *ldafb,
+ integer *ipiv, real *b, integer *ldb, real *x, integer *ldx, real *
+ ferr, real *berr, real *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, afb_dim1, afb_offset, b_dim1, b_offset,
+ x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7;
+ real r__1, r__2, r__3;
+
+ /* Local variables */
+ integer i__, j, k;
+ real s;
+ integer kk;
+ real xk;
+ integer nz;
+ real eps;
+ integer kase;
+ real safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int sgbmv_(char *, integer *, integer *, integer *
+, integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ integer count;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), saxpy_(integer *, real *, real *, integer *, real *,
+ integer *), slacn2_(integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+ extern /* Subroutine */ int sgbtrs_(char *, integer *, integer *, integer
+ *, integer *, real *, integer *, integer *, real *, integer *,
+ integer *);
+ char transt[1];
+ real lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call SLACN2 in place of SLACON, 7 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGBRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is banded, and provides */
+/* error bounds and backward error estimates for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose = Transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AB (input) REAL array, dimension (LDAB,N) */
+/* The original band matrix A, stored in rows 1 to KL+KU+1. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(n,j+kl). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KL+KU+1. */
+
+/* AFB (input) REAL array, dimension (LDAFB,N) */
+/* Details of the LU factorization of the band matrix A, as */
+/* computed by SGBTRF. U is stored as an upper triangular band */
+/* matrix with KL+KU superdiagonals in rows 1 to KL+KU+1, and */
+/* the multipliers used during the factorization are stored in */
+/* rows KL+KU+2 to 2*KL+KU+1. */
+
+/* LDAFB (input) INTEGER */
+/* The leading dimension of the array AFB. LDAFB >= 2*KL*KU+1. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices from SGBTRF; for 1<=i<=N, row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* B (input) REAL array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) REAL array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by SGBTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ afb_dim1 = *ldafb;
+ afb_offset = 1 + afb_dim1;
+ afb -= afb_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "T") && ! lsame_(
+ trans, "C")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*ldab < *kl + *ku + 1) {
+ *info = -7;
+ } else if (*ldafb < (*kl << 1) + *ku + 1) {
+ *info = -9;
+ } else if (*ldb < max(1,*n)) {
+ *info = -12;
+ } else if (*ldx < max(1,*n)) {
+ *info = -14;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGBRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+/* Computing MIN */
+ i__1 = *kl + *ku + 2, i__2 = *n + 1;
+ nz = min(i__1,i__2);
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.f;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - op(A) * X, */
+/* where op(A) = A, A**T, or A**H, depending on TRANS. */
+
+ scopy_(n, &b[j * b_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+ sgbmv_(trans, n, n, kl, ku, &c_b15, &ab[ab_offset], ldab, &x[j *
+ x_dim1 + 1], &c__1, &c_b17, &work[*n + 1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] = (r__1 = b[i__ + j * b_dim1], dabs(r__1));
+/* L30: */
+ }
+
+/* Compute abs(op(A))*abs(X) + abs(B). */
+
+ if (notran) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ kk = *ku + 1 - k;
+ xk = (r__1 = x[k + j * x_dim1], dabs(r__1));
+/* Computing MAX */
+ i__3 = 1, i__4 = k - *ku;
+/* Computing MIN */
+ i__6 = *n, i__7 = k + *kl;
+ i__5 = min(i__6,i__7);
+ for (i__ = max(i__3,i__4); i__ <= i__5; ++i__) {
+ work[i__] += (r__1 = ab[kk + i__ + k * ab_dim1], dabs(
+ r__1)) * xk;
+/* L40: */
+ }
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ kk = *ku + 1 - k;
+/* Computing MAX */
+ i__5 = 1, i__3 = k - *ku;
+/* Computing MIN */
+ i__6 = *n, i__7 = k + *kl;
+ i__4 = min(i__6,i__7);
+ for (i__ = max(i__5,i__3); i__ <= i__4; ++i__) {
+ s += (r__1 = ab[kk + i__ + k * ab_dim1], dabs(r__1)) * (
+ r__2 = x[i__ + j * x_dim1], dabs(r__2));
+/* L60: */
+ }
+ work[k] += s;
+/* L70: */
+ }
+ }
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+/* Computing MAX */
+ r__2 = s, r__3 = (r__1 = work[*n + i__], dabs(r__1)) / work[
+ i__];
+ s = dmax(r__2,r__3);
+ } else {
+/* Computing MAX */
+ r__2 = s, r__3 = ((r__1 = work[*n + i__], dabs(r__1)) + safe1)
+ / (work[i__] + safe1);
+ s = dmax(r__2,r__3);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2.f <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ sgbtrs_(trans, n, kl, ku, &c__1, &afb[afb_offset], ldafb, &ipiv[1]
+, &work[*n + 1], n, info);
+ saxpy_(n, &c_b17, &work[*n + 1], &c__1, &x[j * x_dim1 + 1], &c__1)
+ ;
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(op(A)))* */
+/* ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(op(A)) is the inverse of op(A) */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(op(A))*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use SLACN2 to estimate the infinity-norm of the matrix */
+/* inv(op(A)) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+ work[i__] = (r__1 = work[*n + i__], dabs(r__1)) + nz * eps *
+ work[i__];
+ } else {
+ work[i__] = (r__1 = work[*n + i__], dabs(r__1)) + nz * eps *
+ work[i__] + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ slacn2_(n, &work[(*n << 1) + 1], &work[*n + 1], &iwork[1], &ferr[j], &
+ kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(op(A)**T). */
+
+ sgbtrs_(transt, n, kl, ku, &c__1, &afb[afb_offset], ldafb, &
+ ipiv[1], &work[*n + 1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] *= work[i__];
+/* L110: */
+ }
+ } else {
+
+/* Multiply by inv(op(A))*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] *= work[i__];
+/* L120: */
+ }
+ sgbtrs_(trans, n, kl, ku, &c__1, &afb[afb_offset], ldafb, &
+ ipiv[1], &work[*n + 1], n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = lstres, r__3 = (r__1 = x[i__ + j * x_dim1], dabs(r__1));
+ lstres = dmax(r__2,r__3);
+/* L130: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of SGBRFS */
+
+} /* sgbrfs_ */
diff --git a/contrib/libs/clapack/sgbsv.c b/contrib/libs/clapack/sgbsv.c
new file mode 100644
index 0000000000..ee6db5e1c8
--- /dev/null
+++ b/contrib/libs/clapack/sgbsv.c
@@ -0,0 +1,176 @@
+/* sgbsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sgbsv_(integer *n, integer *kl, integer *ku, integer *
+ nrhs, real *ab, integer *ldab, integer *ipiv, real *b, integer *ldb,
+ integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int xerbla_(char *, integer *), sgbtrf_(
+ integer *, integer *, integer *, integer *, real *, integer *,
+ integer *, integer *), sgbtrs_(char *, integer *, integer *,
+ integer *, integer *, real *, integer *, integer *, real *,
+ integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGBSV computes the solution to a real system of linear equations */
+/* A * X = B, where A is a band matrix of order N with KL subdiagonals */
+/* and KU superdiagonals, and X and B are N-by-NRHS matrices. */
+
+/* The LU decomposition with partial pivoting and row interchanges is */
+/* used to factor A as A = L * U, where L is a product of permutation */
+/* and unit lower triangular matrices with KL subdiagonals, and U is */
+/* upper triangular with KL+KU superdiagonals. The factored form of A */
+/* is then used to solve the system of equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AB (input/output) REAL array, dimension (LDAB,N) */
+/* On entry, the matrix A in band storage, in rows KL+1 to */
+/* 2*KL+KU+1; rows 1 to KL of the array need not be set. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(KL+KU+1+i-j,j) = A(i,j) for max(1,j-KU)<=i<=min(N,j+KL) */
+/* On exit, details of the factorization: U is stored as an */
+/* upper triangular band matrix with KL+KU superdiagonals in */
+/* rows 1 to KL+KU+1, and the multipliers used during the */
+/* factorization are stored in rows KL+KU+2 to 2*KL+KU+1. */
+/* See below for further details. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= 2*KL+KU+1. */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* The pivot indices that define the permutation matrix P; */
+/* row i of the matrix was interchanged with row IPIV(i). */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, U(i,i) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, and the solution has not been computed. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* M = N = 6, KL = 2, KU = 1: */
+
+/* On entry: On exit: */
+
+/* * * * + + + * * * u14 u25 u36 */
+/* * * + + + + * * u13 u24 u35 u46 */
+/* * a12 a23 a34 a45 a56 * u12 u23 u34 u45 u56 */
+/* a11 a22 a33 a44 a55 a66 u11 u22 u33 u44 u55 u66 */
+/* a21 a32 a43 a54 a65 * m21 m32 m43 m54 m65 * */
+/* a31 a42 a53 a64 * * m31 m42 m53 m64 * * */
+
+/* Array elements marked * are not used by the routine; elements marked */
+/* + need not be set on entry, but are required by the routine to store */
+/* elements of U because of fill-in resulting from the row interchanges. */
+
+/* ===================================================================== */
+
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*kl < 0) {
+ *info = -2;
+ } else if (*ku < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldab < (*kl << 1) + *ku + 1) {
+ *info = -6;
+ } else if (*ldb < max(*n,1)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGBSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the LU factorization of the band matrix A. */
+
+ sgbtrf_(n, n, kl, ku, &ab[ab_offset], ldab, &ipiv[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ sgbtrs_("No transpose", n, kl, ku, nrhs, &ab[ab_offset], ldab, &ipiv[
+ 1], &b[b_offset], ldb, info);
+ }
+ return 0;
+
+/* End of SGBSV */
+
+} /* sgbsv_ */
diff --git a/contrib/libs/clapack/sgbsvx.c b/contrib/libs/clapack/sgbsvx.c
new file mode 100644
index 0000000000..69346f4371
--- /dev/null
+++ b/contrib/libs/clapack/sgbsvx.c
@@ -0,0 +1,650 @@
+/* sgbsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sgbsvx_(char *fact, char *trans, integer *n, integer *kl,
+ integer *ku, integer *nrhs, real *ab, integer *ldab, real *afb,
+ integer *ldafb, integer *ipiv, char *equed, real *r__, real *c__,
+ real *b, integer *ldb, real *x, integer *ldx, real *rcond, real *ferr,
+ real *berr, real *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, afb_dim1, afb_offset, b_dim1, b_offset,
+ x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3;
+
+ /* Local variables */
+ integer i__, j, j1, j2;
+ real amax;
+ char norm[1];
+ extern logical lsame_(char *, char *);
+ real rcmin, rcmax, anorm;
+ logical equil;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ real colcnd;
+ extern doublereal slangb_(char *, integer *, integer *, integer *, real *,
+ integer *, real *), slamch_(char *);
+ extern /* Subroutine */ int slaqgb_(integer *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, real *, real *,
+ real *, char *);
+ logical nofact;
+ extern /* Subroutine */ int sgbcon_(char *, integer *, integer *, integer
+ *, real *, integer *, integer *, real *, real *, real *, integer *
+, integer *), xerbla_(char *, integer *);
+ real bignum;
+ extern doublereal slantb_(char *, char *, char *, integer *, integer *,
+ real *, integer *, real *);
+ extern /* Subroutine */ int sgbequ_(integer *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, real *, real *,
+ real *, integer *);
+ integer infequ;
+ logical colequ;
+ extern /* Subroutine */ int sgbrfs_(char *, integer *, integer *, integer
+ *, integer *, real *, integer *, real *, integer *, integer *,
+ real *, integer *, real *, integer *, real *, real *, real *,
+ integer *, integer *), sgbtrf_(integer *, integer *,
+ integer *, integer *, real *, integer *, integer *, integer *),
+ slacpy_(char *, integer *, integer *, real *, integer *, real *,
+ integer *);
+ real rowcnd;
+ logical notran;
+ extern /* Subroutine */ int sgbtrs_(char *, integer *, integer *, integer
+ *, integer *, real *, integer *, integer *, real *, integer *,
+ integer *);
+ real smlnum;
+ logical rowequ;
+ real rpvgrw;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGBSVX uses the LU factorization to compute the solution to a real */
+/* system of linear equations A * X = B, A**T * X = B, or A**H * X = B, */
+/* where A is a band matrix of order N with KL subdiagonals and KU */
+/* superdiagonals, and X and B are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed by this subroutine: */
+
+/* 1. If FACT = 'E', real scaling factors are computed to equilibrate */
+/* the system: */
+/* TRANS = 'N': diag(R)*A*diag(C) *inv(diag(C))*X = diag(R)*B */
+/* TRANS = 'T': (diag(R)*A*diag(C))**T *inv(diag(R))*X = diag(C)*B */
+/* TRANS = 'C': (diag(R)*A*diag(C))**H *inv(diag(R))*X = diag(C)*B */
+/* Whether or not the system will be equilibrated depends on the */
+/* scaling of the matrix A, but if equilibration is used, A is */
+/* overwritten by diag(R)*A*diag(C) and B by diag(R)*B (if TRANS='N') */
+/* or diag(C)*B (if TRANS = 'T' or 'C'). */
+
+/* 2. If FACT = 'N' or 'E', the LU decomposition is used to factor the */
+/* matrix A (after equilibration if FACT = 'E') as */
+/* A = L * U, */
+/* where L is a product of permutation and unit lower triangular */
+/* matrices with KL subdiagonals, and U is upper triangular with */
+/* KL+KU superdiagonals. */
+
+/* 3. If some U(i,i)=0, so that U is exactly singular, then the routine */
+/* returns with INFO = i. Otherwise, the factored form of A is used */
+/* to estimate the condition number of the matrix A. If the */
+/* reciprocal of the condition number is less than machine precision, */
+/* INFO = N+1 is returned as a warning, but the routine still goes on */
+/* to solve for X and compute error bounds as described below. */
+
+/* 4. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 5. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* 6. If equilibration was used, the matrix X is premultiplied by */
+/* diag(C) (if TRANS = 'N') or diag(R) (if TRANS = 'T' or 'C') so */
+/* that it solves the original system before equilibration. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of the matrix A is */
+/* supplied on entry, and if not, whether the matrix A should be */
+/* equilibrated before it is factored. */
+/* = 'F': On entry, AFB and IPIV contain the factored form of */
+/* A. If EQUED is not 'N', the matrix A has been */
+/* equilibrated with scaling factors given by R and C. */
+/* AB, AFB, and IPIV are not modified. */
+/* = 'N': The matrix A will be copied to AFB and factored. */
+/* = 'E': The matrix A will be equilibrated if necessary, then */
+/* copied to AFB and factored. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations. */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Transpose) */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AB (input/output) REAL array, dimension (LDAB,N) */
+/* On entry, the matrix A in band storage, in rows 1 to KL+KU+1. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(KU+1+i-j,j) = A(i,j) for max(1,j-KU)<=i<=min(N,j+kl) */
+
+/* If FACT = 'F' and EQUED is not 'N', then A must have been */
+/* equilibrated by the scaling factors in R and/or C. AB is not */
+/* modified if FACT = 'F' or 'N', or if FACT = 'E' and */
+/* EQUED = 'N' on exit. */
+
+/* On exit, if EQUED .ne. 'N', A is scaled as follows: */
+/* EQUED = 'R': A := diag(R) * A */
+/* EQUED = 'C': A := A * diag(C) */
+/* EQUED = 'B': A := diag(R) * A * diag(C). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KL+KU+1. */
+
+/* AFB (input or output) REAL array, dimension (LDAFB,N) */
+/* If FACT = 'F', then AFB is an input argument and on entry */
+/* contains details of the LU factorization of the band matrix */
+/* A, as computed by SGBTRF. U is stored as an upper triangular */
+/* band matrix with KL+KU superdiagonals in rows 1 to KL+KU+1, */
+/* and the multipliers used during the factorization are stored */
+/* in rows KL+KU+2 to 2*KL+KU+1. If EQUED .ne. 'N', then AFB is */
+/* the factored form of the equilibrated matrix A. */
+
+/* If FACT = 'N', then AFB is an output argument and on exit */
+/* returns details of the LU factorization of A. */
+
+/* If FACT = 'E', then AFB is an output argument and on exit */
+/* returns details of the LU factorization of the equilibrated */
+/* matrix A (see the description of AB for the form of the */
+/* equilibrated matrix). */
+
+/* LDAFB (input) INTEGER */
+/* The leading dimension of the array AFB. LDAFB >= 2*KL+KU+1. */
+
+/* IPIV (input or output) INTEGER array, dimension (N) */
+/* If FACT = 'F', then IPIV is an input argument and on entry */
+/* contains the pivot indices from the factorization A = L*U */
+/* as computed by SGBTRF; row i of the matrix was interchanged */
+/* with row IPIV(i). */
+
+/* If FACT = 'N', then IPIV is an output argument and on exit */
+/* contains the pivot indices from the factorization A = L*U */
+/* of the original matrix A. */
+
+/* If FACT = 'E', then IPIV is an output argument and on exit */
+/* contains the pivot indices from the factorization A = L*U */
+/* of the equilibrated matrix A. */
+
+/* EQUED (input or output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration (always true if FACT = 'N'). */
+/* = 'R': Row equilibration, i.e., A has been premultiplied by */
+/* diag(R). */
+/* = 'C': Column equilibration, i.e., A has been postmultiplied */
+/* by diag(C). */
+/* = 'B': Both row and column equilibration, i.e., A has been */
+/* replaced by diag(R) * A * diag(C). */
+/* EQUED is an input argument if FACT = 'F'; otherwise, it is an */
+/* output argument. */
+
+/* R (input or output) REAL array, dimension (N) */
+/* The row scale factors for A. If EQUED = 'R' or 'B', A is */
+/* multiplied on the left by diag(R); if EQUED = 'N' or 'C', R */
+/* is not accessed. R is an input argument if FACT = 'F'; */
+/* otherwise, R is an output argument. If FACT = 'F' and */
+/* EQUED = 'R' or 'B', each element of R must be positive. */
+
+/* C (input or output) REAL array, dimension (N) */
+/* The column scale factors for A. If EQUED = 'C' or 'B', A is */
+/* multiplied on the right by diag(C); if EQUED = 'N' or 'R', C */
+/* is not accessed. C is an input argument if FACT = 'F'; */
+/* otherwise, C is an output argument. If FACT = 'F' and */
+/* EQUED = 'C' or 'B', each element of C must be positive. */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, */
+/* if EQUED = 'N', B is not modified; */
+/* if TRANS = 'N' and EQUED = 'R' or 'B', B is overwritten by */
+/* diag(R)*B; */
+/* if TRANS = 'T' or 'C' and EQUED = 'C' or 'B', B is */
+/* overwritten by diag(C)*B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) REAL array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X */
+/* to the original system of equations. Note that A and B are */
+/* modified on exit if EQUED .ne. 'N', and the solution to the */
+/* equilibrated system is inv(diag(C))*X if TRANS = 'N' and */
+/* EQUED = 'C' or 'B', or inv(diag(R))*X if TRANS = 'T' or 'C' */
+/* and EQUED = 'R' or 'B'. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) REAL */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A after equilibration (if done). If RCOND is less than the */
+/* machine precision (in particular, if RCOND = 0), the matrix */
+/* is singular to working precision. This condition is */
+/* indicated by a return code of INFO > 0. */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace/output) REAL array, dimension (3*N) */
+/* On exit, WORK(1) contains the reciprocal pivot growth */
+/* factor norm(A)/norm(U). The "max absolute element" norm is */
+/* used. If WORK(1) is much less than 1, then the stability */
+/* of the LU factorization of the (equilibrated) matrix A */
+/* could be poor. This also means that the solution X, condition */
+/* estimator RCOND, and forward error bound FERR could be */
+/* unreliable. If factorization fails with 0<INFO<=N, then */
+/* WORK(1) contains the reciprocal pivot growth factor for the */
+/* leading INFO columns of A. */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: U(i,i) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, so the solution and error bounds */
+/* could not be computed. RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+
+/* value of RCOND would suggest. */
+/* ===================================================================== */
+/* Moved setting of INFO = N+1 so INFO does not subsequently get */
+/* overwritten. Sven, 17 Mar 05. */
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ afb_dim1 = *ldafb;
+ afb_offset = 1 + afb_dim1;
+ afb -= afb_offset;
+ --ipiv;
+ --r__;
+ --c__;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ equil = lsame_(fact, "E");
+ notran = lsame_(trans, "N");
+ if (nofact || equil) {
+ *(unsigned char *)equed = 'N';
+ rowequ = FALSE_;
+ colequ = FALSE_;
+ } else {
+ rowequ = lsame_(equed, "R") || lsame_(equed,
+ "B");
+ colequ = lsame_(equed, "C") || lsame_(equed,
+ "B");
+ smlnum = slamch_("Safe minimum");
+ bignum = 1.f / smlnum;
+ }
+
+/* Test the input parameters. */
+
+ if (! nofact && ! equil && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*kl < 0) {
+ *info = -4;
+ } else if (*ku < 0) {
+ *info = -5;
+ } else if (*nrhs < 0) {
+ *info = -6;
+ } else if (*ldab < *kl + *ku + 1) {
+ *info = -8;
+ } else if (*ldafb < (*kl << 1) + *ku + 1) {
+ *info = -10;
+ } else if (lsame_(fact, "F") && ! (rowequ || colequ
+ || lsame_(equed, "N"))) {
+ *info = -12;
+ } else {
+ if (rowequ) {
+ rcmin = bignum;
+ rcmax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ r__1 = rcmin, r__2 = r__[j];
+ rcmin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = rcmax, r__2 = r__[j];
+ rcmax = dmax(r__1,r__2);
+/* L10: */
+ }
+ if (rcmin <= 0.f) {
+ *info = -13;
+ } else if (*n > 0) {
+ rowcnd = dmax(rcmin,smlnum) / dmin(rcmax,bignum);
+ } else {
+ rowcnd = 1.f;
+ }
+ }
+ if (colequ && *info == 0) {
+ rcmin = bignum;
+ rcmax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ r__1 = rcmin, r__2 = c__[j];
+ rcmin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = rcmax, r__2 = c__[j];
+ rcmax = dmax(r__1,r__2);
+/* L20: */
+ }
+ if (rcmin <= 0.f) {
+ *info = -14;
+ } else if (*n > 0) {
+ colcnd = dmax(rcmin,smlnum) / dmin(rcmax,bignum);
+ } else {
+ colcnd = 1.f;
+ }
+ }
+ if (*info == 0) {
+ if (*ldb < max(1,*n)) {
+ *info = -16;
+ } else if (*ldx < max(1,*n)) {
+ *info = -18;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGBSVX", &i__1);
+ return 0;
+ }
+
+ if (equil) {
+
+/* Compute row and column scalings to equilibrate the matrix A. */
+
+ sgbequ_(n, n, kl, ku, &ab[ab_offset], ldab, &r__[1], &c__[1], &rowcnd,
+ &colcnd, &amax, &infequ);
+ if (infequ == 0) {
+
+/* Equilibrate the matrix. */
+
+ slaqgb_(n, n, kl, ku, &ab[ab_offset], ldab, &r__[1], &c__[1], &
+ rowcnd, &colcnd, &amax, equed);
+ rowequ = lsame_(equed, "R") || lsame_(equed,
+ "B");
+ colequ = lsame_(equed, "C") || lsame_(equed,
+ "B");
+ }
+ }
+
+/* Scale the right hand side. */
+
+ if (notran) {
+ if (rowequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = r__[i__] * b[i__ + j * b_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else if (colequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = c__[i__] * b[i__ + j * b_dim1];
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+ if (nofact || equil) {
+
+/* Compute the LU factorization of the band matrix A. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j - *ku;
+ j1 = max(i__2,1);
+/* Computing MIN */
+ i__2 = j + *kl;
+ j2 = min(i__2,*n);
+ i__2 = j2 - j1 + 1;
+ scopy_(&i__2, &ab[*ku + 1 - j + j1 + j * ab_dim1], &c__1, &afb[*
+ kl + *ku + 1 - j + j1 + j * afb_dim1], &c__1);
+/* L70: */
+ }
+
+ sgbtrf_(n, n, kl, ku, &afb[afb_offset], ldafb, &ipiv[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+
+/* Compute the reciprocal pivot growth factor of the */
+/* leading rank-deficient INFO columns of A. */
+
+ anorm = 0.f;
+ i__1 = *info;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = *ku + 2 - j;
+/* Computing MIN */
+ i__4 = *n + *ku + 1 - j, i__5 = *kl + *ku + 1;
+ i__3 = min(i__4,i__5);
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ r__2 = anorm, r__3 = (r__1 = ab[i__ + j * ab_dim1], dabs(
+ r__1));
+ anorm = dmax(r__2,r__3);
+/* L80: */
+ }
+/* L90: */
+ }
+/* Computing MIN */
+ i__3 = *info - 1, i__2 = *kl + *ku;
+ i__1 = min(i__3,i__2);
+/* Computing MAX */
+ i__4 = 1, i__5 = *kl + *ku + 2 - *info;
+ rpvgrw = slantb_("M", "U", "N", info, &i__1, &afb[max(i__4, i__5)
+ + afb_dim1], ldafb, &work[1]);
+ if (rpvgrw == 0.f) {
+ rpvgrw = 1.f;
+ } else {
+ rpvgrw = anorm / rpvgrw;
+ }
+ work[1] = rpvgrw;
+ *rcond = 0.f;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A and the */
+/* reciprocal pivot growth factor RPVGRW. */
+
+ if (notran) {
+ *(unsigned char *)norm = '1';
+ } else {
+ *(unsigned char *)norm = 'I';
+ }
+ anorm = slangb_(norm, n, kl, ku, &ab[ab_offset], ldab, &work[1]);
+ i__1 = *kl + *ku;
+ rpvgrw = slantb_("M", "U", "N", n, &i__1, &afb[afb_offset], ldafb, &work[
+ 1]);
+ if (rpvgrw == 0.f) {
+ rpvgrw = 1.f;
+ } else {
+ rpvgrw = slangb_("M", n, kl, ku, &ab[ab_offset], ldab, &work[1]) / rpvgrw;
+ }
+
+/* Compute the reciprocal of the condition number of A. */
+
+ sgbcon_(norm, n, kl, ku, &afb[afb_offset], ldafb, &ipiv[1], &anorm, rcond,
+ &work[1], &iwork[1], info);
+
+/* Compute the solution matrix X. */
+
+ slacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ sgbtrs_(trans, n, kl, ku, nrhs, &afb[afb_offset], ldafb, &ipiv[1], &x[
+ x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solution and */
+/* compute error bounds and backward error estimates for it. */
+
+ sgbrfs_(trans, n, kl, ku, nrhs, &ab[ab_offset], ldab, &afb[afb_offset],
+ ldafb, &ipiv[1], &b[b_offset], ldb, &x[x_offset], ldx, &ferr[1], &
+ berr[1], &work[1], &iwork[1], info);
+
+/* Transform the solution matrix X to a solution of the original */
+/* system. */
+
+ if (notran) {
+ if (colequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ x[i__ + j * x_dim1] = c__[i__] * x[i__ + j * x_dim1];
+/* L100: */
+ }
+/* L110: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= colcnd;
+/* L120: */
+ }
+ }
+ } else if (rowequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ x[i__ + j * x_dim1] = r__[i__] * x[i__ + j * x_dim1];
+/* L130: */
+ }
+/* L140: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= rowcnd;
+/* L150: */
+ }
+ }
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < slamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ work[1] = rpvgrw;
+ return 0;
+
+/* End of SGBSVX */
+
+} /* sgbsvx_ */
diff --git a/contrib/libs/clapack/sgbtf2.c b/contrib/libs/clapack/sgbtf2.c
new file mode 100644
index 0000000000..0d0fbfbfa9
--- /dev/null
+++ b/contrib/libs/clapack/sgbtf2.c
@@ -0,0 +1,260 @@
+/* sgbtf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b9 = -1.f;
+
+/* Subroutine */ int sgbtf2_(integer *m, integer *n, integer *kl, integer *ku,
+ real *ab, integer *ldab, integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+ real r__1;
+
+ /* Local variables */
+ integer i__, j, km, jp, ju, kv;
+ extern /* Subroutine */ int sger_(integer *, integer *, real *, real *,
+ integer *, real *, integer *, real *, integer *), sscal_(integer *
+, real *, real *, integer *), sswap_(integer *, real *, integer *,
+ real *, integer *), xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGBTF2 computes an LU factorization of a real m-by-n band matrix A */
+/* using partial pivoting with row interchanges. */
+
+/* This is the unblocked version of the algorithm, calling Level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* AB (input/output) REAL array, dimension (LDAB,N) */
+/* On entry, the matrix A in band storage, in rows KL+1 to */
+/* 2*KL+KU+1; rows 1 to KL of the array need not be set. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(kl+ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(m,j+kl) */
+
+/* On exit, details of the factorization: U is stored as an */
+/* upper triangular band matrix with KL+KU superdiagonals in */
+/* rows 1 to KL+KU+1, and the multipliers used during the */
+/* factorization are stored in rows KL+KU+2 to 2*KL+KU+1. */
+/* See below for further details. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= 2*KL+KU+1. */
+
+/* IPIV (output) INTEGER array, dimension (min(M,N)) */
+/* The pivot indices; for 1 <= i <= min(M,N), row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = +i, U(i,i) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, and division by zero will occur if it is used */
+/* to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* M = N = 6, KL = 2, KU = 1: */
+
+/* On entry: On exit: */
+
+/* * * * + + + * * * u14 u25 u36 */
+/* * * + + + + * * u13 u24 u35 u46 */
+/* * a12 a23 a34 a45 a56 * u12 u23 u34 u45 u56 */
+/* a11 a22 a33 a44 a55 a66 u11 u22 u33 u44 u55 u66 */
+/* a21 a32 a43 a54 a65 * m21 m32 m43 m54 m65 * */
+/* a31 a42 a53 a64 * * m31 m42 m53 m64 * * */
+
+/* Array elements marked * are not used by the routine; elements marked */
+/* + need not be set on entry, but are required by the routine to store */
+/* elements of U, because of fill-in resulting from the row */
+/* interchanges. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* KV is the number of superdiagonals in the factor U, allowing for */
+/* fill-in. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --ipiv;
+
+ /* Function Body */
+ kv = *ku + *kl;
+
+/* Test the input parameters. */
+
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*ldab < *kl + kv + 1) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGBTF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Gaussian elimination with partial pivoting */
+
+/* Set fill-in elements in columns KU+2 to KV to zero. */
+
+ i__1 = min(kv,*n);
+ for (j = *ku + 2; j <= i__1; ++j) {
+ i__2 = *kl;
+ for (i__ = kv - j + 2; i__ <= i__2; ++i__) {
+ ab[i__ + j * ab_dim1] = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* JU is the index of the last column affected by the current stage */
+/* of the factorization. */
+
+ ju = 1;
+
+ i__1 = min(*m,*n);
+ for (j = 1; j <= i__1; ++j) {
+
+/* Set fill-in elements in column J+KV to zero. */
+
+ if (j + kv <= *n) {
+ i__2 = *kl;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ ab[i__ + (j + kv) * ab_dim1] = 0.f;
+/* L30: */
+ }
+ }
+
+/* Find pivot and test for singularity. KM is the number of */
+/* subdiagonal elements in the current column. */
+
+/* Computing MIN */
+ i__2 = *kl, i__3 = *m - j;
+ km = min(i__2,i__3);
+ i__2 = km + 1;
+ jp = isamax_(&i__2, &ab[kv + 1 + j * ab_dim1], &c__1);
+ ipiv[j] = jp + j - 1;
+ if (ab[kv + jp + j * ab_dim1] != 0.f) {
+/* Computing MAX */
+/* Computing MIN */
+ i__4 = j + *ku + jp - 1;
+ i__2 = ju, i__3 = min(i__4,*n);
+ ju = max(i__2,i__3);
+
+/* Apply interchange to columns J to JU. */
+
+ if (jp != 1) {
+ i__2 = ju - j + 1;
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ sswap_(&i__2, &ab[kv + jp + j * ab_dim1], &i__3, &ab[kv + 1 +
+ j * ab_dim1], &i__4);
+ }
+
+ if (km > 0) {
+
+/* Compute multipliers. */
+
+ r__1 = 1.f / ab[kv + 1 + j * ab_dim1];
+ sscal_(&km, &r__1, &ab[kv + 2 + j * ab_dim1], &c__1);
+
+/* Update trailing submatrix within the band. */
+
+ if (ju > j) {
+ i__2 = ju - j;
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ sger_(&km, &i__2, &c_b9, &ab[kv + 2 + j * ab_dim1], &c__1,
+ &ab[kv + (j + 1) * ab_dim1], &i__3, &ab[kv + 1 +
+ (j + 1) * ab_dim1], &i__4);
+ }
+ }
+ } else {
+
+/* If pivot is zero, set INFO to the index of the pivot */
+/* unless a zero pivot has already been found. */
+
+ if (*info == 0) {
+ *info = j;
+ }
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of SGBTF2 */
+
+} /* sgbtf2_ */
diff --git a/contrib/libs/clapack/sgbtrf.c b/contrib/libs/clapack/sgbtrf.c
new file mode 100644
index 0000000000..fead045f68
--- /dev/null
+++ b/contrib/libs/clapack/sgbtrf.c
@@ -0,0 +1,583 @@
+/* sgbtrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__65 = 65;
+static real c_b18 = -1.f;
+static real c_b31 = 1.f;
+
+/* Subroutine */ int sgbtrf_(integer *m, integer *n, integer *kl, integer *ku,
+ real *ab, integer *ldab, integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ real r__1;
+
+ /* Local variables */
+ integer i__, j, i2, i3, j2, j3, k2, jb, nb, ii, jj, jm, ip, jp, km, ju,
+ kv, nw;
+ extern /* Subroutine */ int sger_(integer *, integer *, real *, real *,
+ integer *, real *, integer *, real *, integer *);
+ real temp;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ sgemm_(char *, char *, integer *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *);
+ real work13[4160] /* was [65][64] */, work31[4160] /* was [65][
+ 64] */;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), sswap_(integer *, real *, integer *, real *, integer *
+), strsm_(char *, char *, char *, char *, integer *, integer *,
+ real *, real *, integer *, real *, integer *), sgbtf2_(integer *, integer *, integer *, integer
+ *, real *, integer *, integer *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *), isamax_(integer *, real *,
+ integer *);
+ extern /* Subroutine */ int slaswp_(integer *, real *, integer *, integer
+ *, integer *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGBTRF computes an LU factorization of a real m-by-n band matrix A */
+/* using partial pivoting with row interchanges. */
+
+/* This is the blocked version of the algorithm, calling Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* AB (input/output) REAL array, dimension (LDAB,N) */
+/* On entry, the matrix A in band storage, in rows KL+1 to */
+/* 2*KL+KU+1; rows 1 to KL of the array need not be set. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(kl+ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(m,j+kl) */
+
+/* On exit, details of the factorization: U is stored as an */
+/* upper triangular band matrix with KL+KU superdiagonals in */
+/* rows 1 to KL+KU+1, and the multipliers used during the */
+/* factorization are stored in rows KL+KU+2 to 2*KL+KU+1. */
+/* See below for further details. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= 2*KL+KU+1. */
+
+/* IPIV (output) INTEGER array, dimension (min(M,N)) */
+/* The pivot indices; for 1 <= i <= min(M,N), row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = +i, U(i,i) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, and division by zero will occur if it is used */
+/* to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* M = N = 6, KL = 2, KU = 1: */
+
+/* On entry: On exit: */
+
+/* * * * + + + * * * u14 u25 u36 */
+/* * * + + + + * * u13 u24 u35 u46 */
+/* * a12 a23 a34 a45 a56 * u12 u23 u34 u45 u56 */
+/* a11 a22 a33 a44 a55 a66 u11 u22 u33 u44 u55 u66 */
+/* a21 a32 a43 a54 a65 * m21 m32 m43 m54 m65 * */
+/* a31 a42 a53 a64 * * m31 m42 m53 m64 * * */
+
+/* Array elements marked * are not used by the routine; elements marked */
+/* + need not be set on entry, but are required by the routine to store */
+/* elements of U because of fill-in resulting from the row interchanges. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* KV is the number of superdiagonals in the factor U, allowing for */
+/* fill-in */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --ipiv;
+
+ /* Function Body */
+ kv = *ku + *kl;
+
+/* Test the input parameters. */
+
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*ldab < *kl + kv + 1) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGBTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment */
+
+ nb = ilaenv_(&c__1, "SGBTRF", " ", m, n, kl, ku);
+
+/* The block size must not exceed the limit set by the size of the */
+/* local arrays WORK13 and WORK31. */
+
+ nb = min(nb,64);
+
+ if (nb <= 1 || nb > *kl) {
+
+/* Use unblocked code */
+
+ sgbtf2_(m, n, kl, ku, &ab[ab_offset], ldab, &ipiv[1], info);
+ } else {
+
+/* Use blocked code */
+
+/* Zero the superdiagonal elements of the work array WORK13 */
+
+ i__1 = nb;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work13[i__ + j * 65 - 66] = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* Zero the subdiagonal elements of the work array WORK31 */
+
+ i__1 = nb;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = nb;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ work31[i__ + j * 65 - 66] = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Gaussian elimination with partial pivoting */
+
+/* Set fill-in elements in columns KU+2 to KV to zero */
+
+ i__1 = min(kv,*n);
+ for (j = *ku + 2; j <= i__1; ++j) {
+ i__2 = *kl;
+ for (i__ = kv - j + 2; i__ <= i__2; ++i__) {
+ ab[i__ + j * ab_dim1] = 0.f;
+/* L50: */
+ }
+/* L60: */
+ }
+
+/* JU is the index of the last column affected by the current */
+/* stage of the factorization */
+
+ ju = 1;
+
+ i__1 = min(*m,*n);
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = min(*m,*n) - j + 1;
+ jb = min(i__3,i__4);
+
+/* The active part of the matrix is partitioned */
+
+/* A11 A12 A13 */
+/* A21 A22 A23 */
+/* A31 A32 A33 */
+
+/* Here A11, A21 and A31 denote the current block of JB columns */
+/* which is about to be factorized. The number of rows in the */
+/* partitioning are JB, I2, I3 respectively, and the numbers */
+/* of columns are JB, J2, J3. The superdiagonal elements of A13 */
+/* and the subdiagonal elements of A31 lie outside the band. */
+
+/* Computing MIN */
+ i__3 = *kl - jb, i__4 = *m - j - jb + 1;
+ i2 = min(i__3,i__4);
+/* Computing MIN */
+ i__3 = jb, i__4 = *m - j - *kl + 1;
+ i3 = min(i__3,i__4);
+
+/* J2 and J3 are computed after JU has been updated. */
+
+/* Factorize the current block of JB columns */
+
+ i__3 = j + jb - 1;
+ for (jj = j; jj <= i__3; ++jj) {
+
+/* Set fill-in elements in column JJ+KV to zero */
+
+ if (jj + kv <= *n) {
+ i__4 = *kl;
+ for (i__ = 1; i__ <= i__4; ++i__) {
+ ab[i__ + (jj + kv) * ab_dim1] = 0.f;
+/* L70: */
+ }
+ }
+
+/* Find pivot and test for singularity. KM is the number of */
+/* subdiagonal elements in the current column. */
+
+/* Computing MIN */
+ i__4 = *kl, i__5 = *m - jj;
+ km = min(i__4,i__5);
+ i__4 = km + 1;
+ jp = isamax_(&i__4, &ab[kv + 1 + jj * ab_dim1], &c__1);
+ ipiv[jj] = jp + jj - j;
+ if (ab[kv + jp + jj * ab_dim1] != 0.f) {
+/* Computing MAX */
+/* Computing MIN */
+ i__6 = jj + *ku + jp - 1;
+ i__4 = ju, i__5 = min(i__6,*n);
+ ju = max(i__4,i__5);
+ if (jp != 1) {
+
+/* Apply interchange to columns J to J+JB-1 */
+
+ if (jp + jj - 1 < j + *kl) {
+
+ i__4 = *ldab - 1;
+ i__5 = *ldab - 1;
+ sswap_(&jb, &ab[kv + 1 + jj - j + j * ab_dim1], &
+ i__4, &ab[kv + jp + jj - j + j * ab_dim1],
+ &i__5);
+ } else {
+
+/* The interchange affects columns J to JJ-1 of A31 */
+/* which are stored in the work array WORK31 */
+
+ i__4 = jj - j;
+ i__5 = *ldab - 1;
+ sswap_(&i__4, &ab[kv + 1 + jj - j + j * ab_dim1],
+ &i__5, &work31[jp + jj - j - *kl - 1], &
+ c__65);
+ i__4 = j + jb - jj;
+ i__5 = *ldab - 1;
+ i__6 = *ldab - 1;
+ sswap_(&i__4, &ab[kv + 1 + jj * ab_dim1], &i__5, &
+ ab[kv + jp + jj * ab_dim1], &i__6);
+ }
+ }
+
+/* Compute multipliers */
+
+ r__1 = 1.f / ab[kv + 1 + jj * ab_dim1];
+ sscal_(&km, &r__1, &ab[kv + 2 + jj * ab_dim1], &c__1);
+
+/* Update trailing submatrix within the band and within */
+/* the current block. JM is the index of the last column */
+/* which needs to be updated. */
+
+/* Computing MIN */
+ i__4 = ju, i__5 = j + jb - 1;
+ jm = min(i__4,i__5);
+ if (jm > jj) {
+ i__4 = jm - jj;
+ i__5 = *ldab - 1;
+ i__6 = *ldab - 1;
+ sger_(&km, &i__4, &c_b18, &ab[kv + 2 + jj * ab_dim1],
+ &c__1, &ab[kv + (jj + 1) * ab_dim1], &i__5, &
+ ab[kv + 1 + (jj + 1) * ab_dim1], &i__6);
+ }
+ } else {
+
+/* If pivot is zero, set INFO to the index of the pivot */
+/* unless a zero pivot has already been found. */
+
+ if (*info == 0) {
+ *info = jj;
+ }
+ }
+
+/* Copy current column of A31 into the work array WORK31 */
+
+/* Computing MIN */
+ i__4 = jj - j + 1;
+ nw = min(i__4,i3);
+ if (nw > 0) {
+ scopy_(&nw, &ab[kv + *kl + 1 - jj + j + jj * ab_dim1], &
+ c__1, &work31[(jj - j + 1) * 65 - 65], &c__1);
+ }
+/* L80: */
+ }
+ if (j + jb <= *n) {
+
+/* Apply the row interchanges to the other blocks. */
+
+/* Computing MIN */
+ i__3 = ju - j + 1;
+ j2 = min(i__3,kv) - jb;
+/* Computing MAX */
+ i__3 = 0, i__4 = ju - j - kv + 1;
+ j3 = max(i__3,i__4);
+
+/* Use SLASWP to apply the row interchanges to A12, A22, and */
+/* A32. */
+
+ i__3 = *ldab - 1;
+ slaswp_(&j2, &ab[kv + 1 - jb + (j + jb) * ab_dim1], &i__3, &
+ c__1, &jb, &ipiv[j], &c__1);
+
+/* Adjust the pivot indices. */
+
+ i__3 = j + jb - 1;
+ for (i__ = j; i__ <= i__3; ++i__) {
+ ipiv[i__] = ipiv[i__] + j - 1;
+/* L90: */
+ }
+
+/* Apply the row interchanges to A13, A23, and A33 */
+/* columnwise. */
+
+ k2 = j - 1 + jb + j2;
+ i__3 = j3;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ jj = k2 + i__;
+ i__4 = j + jb - 1;
+ for (ii = j + i__ - 1; ii <= i__4; ++ii) {
+ ip = ipiv[ii];
+ if (ip != ii) {
+ temp = ab[kv + 1 + ii - jj + jj * ab_dim1];
+ ab[kv + 1 + ii - jj + jj * ab_dim1] = ab[kv + 1 +
+ ip - jj + jj * ab_dim1];
+ ab[kv + 1 + ip - jj + jj * ab_dim1] = temp;
+ }
+/* L100: */
+ }
+/* L110: */
+ }
+
+/* Update the relevant part of the trailing submatrix */
+
+ if (j2 > 0) {
+
+/* Update A12 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ strsm_("Left", "Lower", "No transpose", "Unit", &jb, &j2,
+ &c_b31, &ab[kv + 1 + j * ab_dim1], &i__3, &ab[kv
+ + 1 - jb + (j + jb) * ab_dim1], &i__4);
+
+ if (i2 > 0) {
+
+/* Update A22 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ i__5 = *ldab - 1;
+ sgemm_("No transpose", "No transpose", &i2, &j2, &jb,
+ &c_b18, &ab[kv + 1 + jb + j * ab_dim1], &i__3,
+ &ab[kv + 1 - jb + (j + jb) * ab_dim1], &i__4,
+ &c_b31, &ab[kv + 1 + (j + jb) * ab_dim1], &
+ i__5);
+ }
+
+ if (i3 > 0) {
+
+/* Update A32 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ sgemm_("No transpose", "No transpose", &i3, &j2, &jb,
+ &c_b18, work31, &c__65, &ab[kv + 1 - jb + (j
+ + jb) * ab_dim1], &i__3, &c_b31, &ab[kv + *kl
+ + 1 - jb + (j + jb) * ab_dim1], &i__4);
+ }
+ }
+
+ if (j3 > 0) {
+
+/* Copy the lower triangle of A13 into the work array */
+/* WORK13 */
+
+ i__3 = j3;
+ for (jj = 1; jj <= i__3; ++jj) {
+ i__4 = jb;
+ for (ii = jj; ii <= i__4; ++ii) {
+ work13[ii + jj * 65 - 66] = ab[ii - jj + 1 + (jj
+ + j + kv - 1) * ab_dim1];
+/* L120: */
+ }
+/* L130: */
+ }
+
+/* Update A13 in the work array */
+
+ i__3 = *ldab - 1;
+ strsm_("Left", "Lower", "No transpose", "Unit", &jb, &j3,
+ &c_b31, &ab[kv + 1 + j * ab_dim1], &i__3, work13,
+ &c__65);
+
+ if (i2 > 0) {
+
+/* Update A23 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ sgemm_("No transpose", "No transpose", &i2, &j3, &jb,
+ &c_b18, &ab[kv + 1 + jb + j * ab_dim1], &i__3,
+ work13, &c__65, &c_b31, &ab[jb + 1 + (j + kv)
+ * ab_dim1], &i__4);
+ }
+
+ if (i3 > 0) {
+
+/* Update A33 */
+
+ i__3 = *ldab - 1;
+ sgemm_("No transpose", "No transpose", &i3, &j3, &jb,
+ &c_b18, work31, &c__65, work13, &c__65, &
+ c_b31, &ab[*kl + 1 + (j + kv) * ab_dim1], &
+ i__3);
+ }
+
+/* Copy the lower triangle of A13 back into place */
+
+ i__3 = j3;
+ for (jj = 1; jj <= i__3; ++jj) {
+ i__4 = jb;
+ for (ii = jj; ii <= i__4; ++ii) {
+ ab[ii - jj + 1 + (jj + j + kv - 1) * ab_dim1] =
+ work13[ii + jj * 65 - 66];
+/* L140: */
+ }
+/* L150: */
+ }
+ }
+ } else {
+
+/* Adjust the pivot indices. */
+
+ i__3 = j + jb - 1;
+ for (i__ = j; i__ <= i__3; ++i__) {
+ ipiv[i__] = ipiv[i__] + j - 1;
+/* L160: */
+ }
+ }
+
+/* Partially undo the interchanges in the current block to */
+/* restore the upper triangular form of A31 and copy the upper */
+/* triangle of A31 back into place */
+
+ i__3 = j;
+ for (jj = j + jb - 1; jj >= i__3; --jj) {
+ jp = ipiv[jj] - jj + 1;
+ if (jp != 1) {
+
+/* Apply interchange to columns J to JJ-1 */
+
+ if (jp + jj - 1 < j + *kl) {
+
+/* The interchange does not affect A31 */
+
+ i__4 = jj - j;
+ i__5 = *ldab - 1;
+ i__6 = *ldab - 1;
+ sswap_(&i__4, &ab[kv + 1 + jj - j + j * ab_dim1], &
+ i__5, &ab[kv + jp + jj - j + j * ab_dim1], &
+ i__6);
+ } else {
+
+/* The interchange does affect A31 */
+
+ i__4 = jj - j;
+ i__5 = *ldab - 1;
+ sswap_(&i__4, &ab[kv + 1 + jj - j + j * ab_dim1], &
+ i__5, &work31[jp + jj - j - *kl - 1], &c__65);
+ }
+ }
+
+/* Copy the current column of A31 back into place */
+
+/* Computing MIN */
+ i__4 = i3, i__5 = jj - j + 1;
+ nw = min(i__4,i__5);
+ if (nw > 0) {
+ scopy_(&nw, &work31[(jj - j + 1) * 65 - 65], &c__1, &ab[
+ kv + *kl + 1 - jj + j + jj * ab_dim1], &c__1);
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+
+ return 0;
+
+/* End of SGBTRF */
+
+} /* sgbtrf_ */
diff --git a/contrib/libs/clapack/sgbtrs.c b/contrib/libs/clapack/sgbtrs.c
new file mode 100644
index 0000000000..b99d3d2ddd
--- /dev/null
+++ b/contrib/libs/clapack/sgbtrs.c
@@ -0,0 +1,242 @@
+/* sgbtrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b7 = -1.f;
+static integer c__1 = 1;
+static real c_b23 = 1.f;
+
+/* Subroutine */ int sgbtrs_(char *trans, integer *n, integer *kl, integer *
+ ku, integer *nrhs, real *ab, integer *ldab, integer *ipiv, real *b,
+ integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, l, kd, lm;
+ extern /* Subroutine */ int sger_(integer *, integer *, real *, real *,
+ integer *, real *, integer *, real *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *);
+ logical lnoti;
+ extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
+ integer *), stbsv_(char *, char *, char *, integer *, integer *,
+ real *, integer *, real *, integer *),
+ xerbla_(char *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGBTRS solves a system of linear equations */
+/* A * X = B or A' * X = B */
+/* with a general band matrix A using the LU factorization computed */
+/* by SGBTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations. */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A'* X = B (Transpose) */
+/* = 'C': A'* X = B (Conjugate transpose = Transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AB (input) REAL array, dimension (LDAB,N) */
+/* Details of the LU factorization of the band matrix A, as */
+/* computed by SGBTRF. U is stored as an upper triangular band */
+/* matrix with KL+KU superdiagonals in rows 1 to KL+KU+1, and */
+/* the multipliers used during the factorization are stored in */
+/* rows KL+KU+2 to 2*KL+KU+1. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= 2*KL+KU+1. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= N, row i of the matrix was */
+/* interchanged with row IPIV(i). */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "T") && ! lsame_(
+ trans, "C")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*ldab < (*kl << 1) + *ku + 1) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGBTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ kd = *ku + *kl + 1;
+ lnoti = *kl > 0;
+
+ if (notran) {
+
+/* Solve A*X = B. */
+
+/* Solve L*X = B, overwriting B with X. */
+
+/* L is represented as a product of permutations and unit lower */
+/* triangular matrices L = P(1) * L(1) * ... * P(n-1) * L(n-1), */
+/* where each transformation L(i) is a rank-one modification of */
+/* the identity matrix. */
+
+ if (lnoti) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = *kl, i__3 = *n - j;
+ lm = min(i__2,i__3);
+ l = ipiv[j];
+ if (l != j) {
+ sswap_(nrhs, &b[l + b_dim1], ldb, &b[j + b_dim1], ldb);
+ }
+ sger_(&lm, nrhs, &c_b7, &ab[kd + 1 + j * ab_dim1], &c__1, &b[
+ j + b_dim1], ldb, &b[j + 1 + b_dim1], ldb);
+/* L10: */
+ }
+ }
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Solve U*X = B, overwriting B with X. */
+
+ i__2 = *kl + *ku;
+ stbsv_("Upper", "No transpose", "Non-unit", n, &i__2, &ab[
+ ab_offset], ldab, &b[i__ * b_dim1 + 1], &c__1);
+/* L20: */
+ }
+
+ } else {
+
+/* Solve A'*X = B. */
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Solve U'*X = B, overwriting B with X. */
+
+ i__2 = *kl + *ku;
+ stbsv_("Upper", "Transpose", "Non-unit", n, &i__2, &ab[ab_offset],
+ ldab, &b[i__ * b_dim1 + 1], &c__1);
+/* L30: */
+ }
+
+/* Solve L'*X = B, overwriting B with X. */
+
+ if (lnoti) {
+ for (j = *n - 1; j >= 1; --j) {
+/* Computing MIN */
+ i__1 = *kl, i__2 = *n - j;
+ lm = min(i__1,i__2);
+ sgemv_("Transpose", &lm, nrhs, &c_b7, &b[j + 1 + b_dim1], ldb,
+ &ab[kd + 1 + j * ab_dim1], &c__1, &c_b23, &b[j +
+ b_dim1], ldb);
+ l = ipiv[j];
+ if (l != j) {
+ sswap_(nrhs, &b[l + b_dim1], ldb, &b[j + b_dim1], ldb);
+ }
+/* L40: */
+ }
+ }
+ }
+ return 0;
+
+/* End of SGBTRS */
+
+} /* sgbtrs_ */
diff --git a/contrib/libs/clapack/sgebak.c b/contrib/libs/clapack/sgebak.c
new file mode 100644
index 0000000000..349055f4ce
--- /dev/null
+++ b/contrib/libs/clapack/sgebak.c
@@ -0,0 +1,235 @@
+/* sgebak.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sgebak_(char *job, char *side, integer *n, integer *ilo,
+ integer *ihi, real *scale, integer *m, real *v, integer *ldv, integer
+ *info)
+{
+ /* System generated locals */
+ integer v_dim1, v_offset, i__1;
+
+ /* Local variables */
+ integer i__, k;
+ real s;
+ integer ii;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ logical leftv;
+ extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
+ integer *), xerbla_(char *, integer *);
+ logical rightv;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGEBAK forms the right or left eigenvectors of a real general matrix */
+/* by backward transformation on the computed eigenvectors of the */
+/* balanced matrix output by SGEBAL. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies the type of backward transformation required: */
+/* = 'N', do nothing, return immediately; */
+/* = 'P', do backward transformation for permutation only; */
+/* = 'S', do backward transformation for scaling only; */
+/* = 'B', do backward transformations for both permutation and */
+/* scaling. */
+/* JOB must be the same as the argument JOB supplied to SGEBAL. */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'R': V contains right eigenvectors; */
+/* = 'L': V contains left eigenvectors. */
+
+/* N (input) INTEGER */
+/* The number of rows of the matrix V. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* The integers ILO and IHI determined by SGEBAL. */
+/* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */
+
+/* SCALE (input) REAL array, dimension (N) */
+/* Details of the permutation and scaling factors, as returned */
+/* by SGEBAL. */
+
+/* M (input) INTEGER */
+/* The number of columns of the matrix V. M >= 0. */
+
+/* V (input/output) REAL array, dimension (LDV,M) */
+/* On entry, the matrix of right or left eigenvectors to be */
+/* transformed, as returned by SHSEIN or STREVC. */
+/* On exit, V is overwritten by the transformed eigenvectors. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. LDV >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and Test the input parameters */
+
+ /* Parameter adjustments */
+ --scale;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+
+ /* Function Body */
+ rightv = lsame_(side, "R");
+ leftv = lsame_(side, "L");
+
+ *info = 0;
+ if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S")
+ && ! lsame_(job, "B")) {
+ *info = -1;
+ } else if (! rightv && ! leftv) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -4;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -5;
+ } else if (*m < 0) {
+ *info = -7;
+ } else if (*ldv < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEBAK", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*m == 0) {
+ return 0;
+ }
+ if (lsame_(job, "N")) {
+ return 0;
+ }
+
+ if (*ilo == *ihi) {
+ goto L30;
+ }
+
+/* Backward balance */
+
+ if (lsame_(job, "S") || lsame_(job, "B")) {
+
+ if (rightv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ s = scale[i__];
+ sscal_(m, &s, &v[i__ + v_dim1], ldv);
+/* L10: */
+ }
+ }
+
+ if (leftv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ s = 1.f / scale[i__];
+ sscal_(m, &s, &v[i__ + v_dim1], ldv);
+/* L20: */
+ }
+ }
+
+ }
+
+/* Backward permutation */
+
+/* For I = ILO-1 step -1 until 1, */
+/* IHI+1 step 1 until N do -- */
+
+L30:
+ if (lsame_(job, "P") || lsame_(job, "B")) {
+ if (rightv) {
+ i__1 = *n;
+ for (ii = 1; ii <= i__1; ++ii) {
+ i__ = ii;
+ if (i__ >= *ilo && i__ <= *ihi) {
+ goto L40;
+ }
+ if (i__ < *ilo) {
+ i__ = *ilo - ii;
+ }
+ k = scale[i__];
+ if (k == i__) {
+ goto L40;
+ }
+ sswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L40:
+ ;
+ }
+ }
+
+ if (leftv) {
+ i__1 = *n;
+ for (ii = 1; ii <= i__1; ++ii) {
+ i__ = ii;
+ if (i__ >= *ilo && i__ <= *ihi) {
+ goto L50;
+ }
+ if (i__ < *ilo) {
+ i__ = *ilo - ii;
+ }
+ k = scale[i__];
+ if (k == i__) {
+ goto L50;
+ }
+ sswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L50:
+ ;
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SGEBAK */
+
+} /* sgebak_ */
diff --git a/contrib/libs/clapack/sgebal.c b/contrib/libs/clapack/sgebal.c
new file mode 100644
index 0000000000..4c8c9b528e
--- /dev/null
+++ b/contrib/libs/clapack/sgebal.c
@@ -0,0 +1,400 @@
+/* sgebal.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sgebal_(char *job, integer *n, real *a, integer *lda,
+ integer *ilo, integer *ihi, real *scale, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Local variables */
+ real c__, f, g;
+ integer i__, j, k, l, m;
+ real r__, s, ca, ra;
+ integer ica, ira, iexc;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ sswap_(integer *, real *, integer *, real *, integer *);
+ real sfmin1, sfmin2, sfmax1, sfmax2;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ logical noconv;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGEBAL balances a general real matrix A. This involves, first, */
+/* permuting A by a similarity transformation to isolate eigenvalues */
+/* in the first 1 to ILO-1 and last IHI+1 to N elements on the */
+/* diagonal; and second, applying a diagonal similarity transformation */
+/* to rows and columns ILO to IHI to make the rows and columns as */
+/* close in norm as possible. Both steps are optional. */
+
+/* Balancing may reduce the 1-norm of the matrix, and improve the */
+/* accuracy of the computed eigenvalues and/or eigenvectors. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies the operations to be performed on A: */
+/* = 'N': none: simply set ILO = 1, IHI = N, SCALE(I) = 1.0 */
+/* for i = 1,...,N; */
+/* = 'P': permute only; */
+/* = 'S': scale only; */
+/* = 'B': both permute and scale. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the input matrix A. */
+/* On exit, A is overwritten by the balanced matrix. */
+/* If JOB = 'N', A is not referenced. */
+/* See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* ILO (output) INTEGER */
+/* IHI (output) INTEGER */
+/* ILO and IHI are set to integers such that on exit */
+/* A(i,j) = 0 if i > j and j = 1,...,ILO-1 or I = IHI+1,...,N. */
+/* If JOB = 'N' or 'S', ILO = 1 and IHI = N. */
+
+/* SCALE (output) REAL array, dimension (N) */
+/* Details of the permutations and scaling factors applied to */
+/* A. If P(j) is the index of the row and column interchanged */
+/* with row and column j and D(j) is the scaling factor */
+/* applied to row and column j, then */
+/* SCALE(j) = P(j) for j = 1,...,ILO-1 */
+/* = D(j) for j = ILO,...,IHI */
+/* = P(j) for j = IHI+1,...,N. */
+/* The order in which the interchanges are made is N to IHI+1, */
+/* then 1 to ILO-1. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The permutations consist of row and column interchanges which put */
+/* the matrix in the form */
+
+/* ( T1 X Y ) */
+/* P A P = ( 0 B Z ) */
+/* ( 0 0 T2 ) */
+
+/* where T1 and T2 are upper triangular matrices whose eigenvalues lie */
+/* along the diagonal. The column indices ILO and IHI mark the starting */
+/* and ending columns of the submatrix B. Balancing consists of applying */
+/* a diagonal similarity transformation inv(D) * B * D to make the */
+/* 1-norms of each row of B and its corresponding column nearly equal. */
+/* The output matrix is */
+
+/* ( T1 X*D Y ) */
+/* ( 0 inv(D)*B*D inv(D)*Z ). */
+/* ( 0 0 T2 ) */
+
+/* Information about the permutations P and the diagonal matrix D is */
+/* returned in the vector SCALE. */
+
+/* This subroutine is based on the EISPACK routine BALANC. */
+
+/* Modified by Tzu-Yi Chen, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --scale;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S")
+ && ! lsame_(job, "B")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEBAL", &i__1);
+ return 0;
+ }
+
+ k = 1;
+ l = *n;
+
+ if (*n == 0) {
+ goto L210;
+ }
+
+ if (lsame_(job, "N")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ scale[i__] = 1.f;
+/* L10: */
+ }
+ goto L210;
+ }
+
+ if (lsame_(job, "S")) {
+ goto L120;
+ }
+
+/* Permutation to isolate eigenvalues if possible */
+
+ goto L50;
+
+/* Row and column exchange. */
+
+L20:
+ scale[m] = (real) j;
+ if (j == m) {
+ goto L30;
+ }
+
+ sswap_(&l, &a[j * a_dim1 + 1], &c__1, &a[m * a_dim1 + 1], &c__1);
+ i__1 = *n - k + 1;
+ sswap_(&i__1, &a[j + k * a_dim1], lda, &a[m + k * a_dim1], lda);
+
+L30:
+ switch (iexc) {
+ case 1: goto L40;
+ case 2: goto L80;
+ }
+
+/* Search for rows isolating an eigenvalue and push them down. */
+
+L40:
+ if (l == 1) {
+ goto L210;
+ }
+ --l;
+
+L50:
+ for (j = l; j >= 1; --j) {
+
+ i__1 = l;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (i__ == j) {
+ goto L60;
+ }
+ if (a[j + i__ * a_dim1] != 0.f) {
+ goto L70;
+ }
+L60:
+ ;
+ }
+
+ m = l;
+ iexc = 1;
+ goto L20;
+L70:
+ ;
+ }
+
+ goto L90;
+
+/* Search for columns isolating an eigenvalue and push them left. */
+
+L80:
+ ++k;
+
+L90:
+ i__1 = l;
+ for (j = k; j <= i__1; ++j) {
+
+ i__2 = l;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ if (i__ == j) {
+ goto L100;
+ }
+ if (a[i__ + j * a_dim1] != 0.f) {
+ goto L110;
+ }
+L100:
+ ;
+ }
+
+ m = k;
+ iexc = 2;
+ goto L20;
+L110:
+ ;
+ }
+
+L120:
+ i__1 = l;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ scale[i__] = 1.f;
+/* L130: */
+ }
+
+ if (lsame_(job, "P")) {
+ goto L210;
+ }
+
+/* Balance the submatrix in rows K to L. */
+
+/* Iterative loop for norm reduction */
+
+ sfmin1 = slamch_("S") / slamch_("P");
+ sfmax1 = 1.f / sfmin1;
+ sfmin2 = sfmin1 * 2.f;
+ sfmax2 = 1.f / sfmin2;
+L140:
+ noconv = FALSE_;
+
+ i__1 = l;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ c__ = 0.f;
+ r__ = 0.f;
+
+ i__2 = l;
+ for (j = k; j <= i__2; ++j) {
+ if (j == i__) {
+ goto L150;
+ }
+ c__ += (r__1 = a[j + i__ * a_dim1], dabs(r__1));
+ r__ += (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+L150:
+ ;
+ }
+ ica = isamax_(&l, &a[i__ * a_dim1 + 1], &c__1);
+ ca = (r__1 = a[ica + i__ * a_dim1], dabs(r__1));
+ i__2 = *n - k + 1;
+ ira = isamax_(&i__2, &a[i__ + k * a_dim1], lda);
+ ra = (r__1 = a[i__ + (ira + k - 1) * a_dim1], dabs(r__1));
+
+/* Guard against zero C or R due to underflow. */
+
+ if (c__ == 0.f || r__ == 0.f) {
+ goto L200;
+ }
+ g = r__ / 2.f;
+ f = 1.f;
+ s = c__ + r__;
+L160:
+/* Computing MAX */
+ r__1 = max(f,c__);
+/* Computing MIN */
+ r__2 = min(r__,g);
+ if (c__ >= g || dmax(r__1,ca) >= sfmax2 || dmin(r__2,ra) <= sfmin2) {
+ goto L170;
+ }
+ f *= 2.f;
+ c__ *= 2.f;
+ ca *= 2.f;
+ r__ /= 2.f;
+ g /= 2.f;
+ ra /= 2.f;
+ goto L160;
+
+L170:
+ g = c__ / 2.f;
+L180:
+/* Computing MIN */
+ r__1 = min(f,c__), r__1 = min(r__1,g);
+ if (g < r__ || dmax(r__,ra) >= sfmax2 || dmin(r__1,ca) <= sfmin2) {
+ goto L190;
+ }
+ f /= 2.f;
+ c__ /= 2.f;
+ g /= 2.f;
+ ca /= 2.f;
+ r__ *= 2.f;
+ ra *= 2.f;
+ goto L180;
+
+/* Now balance. */
+
+L190:
+ if (c__ + r__ >= s * .95f) {
+ goto L200;
+ }
+ if (f < 1.f && scale[i__] < 1.f) {
+ if (f * scale[i__] <= sfmin1) {
+ goto L200;
+ }
+ }
+ if (f > 1.f && scale[i__] > 1.f) {
+ if (scale[i__] >= sfmax1 / f) {
+ goto L200;
+ }
+ }
+ g = 1.f / f;
+ scale[i__] *= f;
+ noconv = TRUE_;
+
+ i__2 = *n - k + 1;
+ sscal_(&i__2, &g, &a[i__ + k * a_dim1], lda);
+ sscal_(&l, &f, &a[i__ * a_dim1 + 1], &c__1);
+
+L200:
+ ;
+ }
+
+ if (noconv) {
+ goto L140;
+ }
+
+L210:
+ *ilo = k;
+ *ihi = l;
+
+ return 0;
+
+/* End of SGEBAL */
+
+} /* sgebal_ */
diff --git a/contrib/libs/clapack/sgebd2.c b/contrib/libs/clapack/sgebd2.c
new file mode 100644
index 0000000000..a5855ac444
--- /dev/null
+++ b/contrib/libs/clapack/sgebd2.c
@@ -0,0 +1,303 @@
+/* sgebd2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sgebd2_(integer *m, integer *n, real *a, integer *lda,
+ real *d__, real *e, real *tauq, real *taup, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__;
+ extern /* Subroutine */ int slarf_(char *, integer *, integer *, real *,
+ integer *, real *, real *, integer *, real *), xerbla_(
+ char *, integer *), slarfg_(integer *, real *, real *,
+ integer *, real *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGEBD2 reduces a real general m by n matrix A to upper or lower */
+/* bidiagonal form B by an orthogonal transformation: Q' * A * P = B. */
+
+/* If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows in the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns in the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the m by n general matrix to be reduced. */
+/* On exit, */
+/* if m >= n, the diagonal and the first superdiagonal are */
+/* overwritten with the upper bidiagonal matrix B; the */
+/* elements below the diagonal, with the array TAUQ, represent */
+/* the orthogonal matrix Q as a product of elementary */
+/* reflectors, and the elements above the first superdiagonal, */
+/* with the array TAUP, represent the orthogonal matrix P as */
+/* a product of elementary reflectors; */
+/* if m < n, the diagonal and the first subdiagonal are */
+/* overwritten with the lower bidiagonal matrix B; the */
+/* elements below the first subdiagonal, with the array TAUQ, */
+/* represent the orthogonal matrix Q as a product of */
+/* elementary reflectors, and the elements above the diagonal, */
+/* with the array TAUP, represent the orthogonal matrix P as */
+/* a product of elementary reflectors. */
+/* See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* D (output) REAL array, dimension (min(M,N)) */
+/* The diagonal elements of the bidiagonal matrix B: */
+/* D(i) = A(i,i). */
+
+/* E (output) REAL array, dimension (min(M,N)-1) */
+/* The off-diagonal elements of the bidiagonal matrix B: */
+/* if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1; */
+/* if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1. */
+
+/* TAUQ (output) REAL array dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the orthogonal matrix Q. See Further Details. */
+
+/* TAUP (output) REAL array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the orthogonal matrix P. See Further Details. */
+
+/* WORK (workspace) REAL array, dimension (max(M,N)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrices Q and P are represented as products of elementary */
+/* reflectors: */
+
+/* If m >= n, */
+
+/* Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1) */
+
+/* Each H(i) and G(i) has the form: */
+
+/* H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' */
+
+/* where tauq and taup are real scalars, and v and u are real vectors; */
+/* v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in A(i+1:m,i); */
+/* u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in A(i,i+2:n); */
+/* tauq is stored in TAUQ(i) and taup in TAUP(i). */
+
+/* If m < n, */
+
+/* Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m) */
+
+/* Each H(i) and G(i) has the form: */
+
+/* H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' */
+
+/* where tauq and taup are real scalars, and v and u are real vectors; */
+/* v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in A(i+2:m,i); */
+/* u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n); */
+/* tauq is stored in TAUQ(i) and taup in TAUP(i). */
+
+/* The contents of A on exit are illustrated by the following examples: */
+
+/* m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n): */
+
+/* ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 ) */
+/* ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 ) */
+/* ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 ) */
+/* ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 ) */
+/* ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 ) */
+/* ( v1 v2 v3 v4 v5 ) */
+
+/* where d and e denote diagonal and off-diagonal elements of B, vi */
+/* denotes an element of the vector defining H(i), and ui an element of */
+/* the vector defining G(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info < 0) {
+ i__1 = -(*info);
+ xerbla_("SGEBD2", &i__1);
+ return 0;
+ }
+
+ if (*m >= *n) {
+
+/* Reduce to upper bidiagonal form */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
+
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ slarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3, *m)+ i__ *
+ a_dim1], &c__1, &tauq[i__]);
+ d__[i__] = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.f;
+
+/* Apply H(i) to A(i:m,i+1:n) from the left */
+
+ if (i__ < *n) {
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ slarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &
+ tauq[i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]
+);
+ }
+ a[i__ + i__ * a_dim1] = d__[i__];
+
+ if (i__ < *n) {
+
+/* Generate elementary reflector G(i) to annihilate */
+/* A(i,i+2:n) */
+
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ slarfg_(&i__2, &a[i__ + (i__ + 1) * a_dim1], &a[i__ + min(
+ i__3, *n)* a_dim1], lda, &taup[i__]);
+ e[i__] = a[i__ + (i__ + 1) * a_dim1];
+ a[i__ + (i__ + 1) * a_dim1] = 1.f;
+
+/* Apply G(i) to A(i+1:m,i+1:n) from the right */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ slarf_("Right", &i__2, &i__3, &a[i__ + (i__ + 1) * a_dim1],
+ lda, &taup[i__], &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda, &work[1]);
+ a[i__ + (i__ + 1) * a_dim1] = e[i__];
+ } else {
+ taup[i__] = 0.f;
+ }
+/* L10: */
+ }
+ } else {
+
+/* Reduce to lower bidiagonal form */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector G(i) to annihilate A(i,i+1:n) */
+
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ slarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3, *n)*
+ a_dim1], lda, &taup[i__]);
+ d__[i__] = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.f;
+
+/* Apply G(i) to A(i+1:m,i:n) from the right */
+
+ if (i__ < *m) {
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+ slarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &
+ taup[i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
+ }
+ a[i__ + i__ * a_dim1] = d__[i__];
+
+ if (i__ < *m) {
+
+/* Generate elementary reflector H(i) to annihilate */
+/* A(i+2:m,i) */
+
+ i__2 = *m - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ slarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3, *m)+
+ i__ * a_dim1], &c__1, &tauq[i__]);
+ e[i__] = a[i__ + 1 + i__ * a_dim1];
+ a[i__ + 1 + i__ * a_dim1] = 1.f;
+
+/* Apply H(i) to A(i+1:m,i+1:n) from the left */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ slarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &
+ c__1, &tauq[i__], &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda, &work[1]);
+ a[i__ + 1 + i__ * a_dim1] = e[i__];
+ } else {
+ tauq[i__] = 0.f;
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of SGEBD2 */
+
+} /* sgebd2_ */
diff --git a/contrib/libs/clapack/sgebrd.c b/contrib/libs/clapack/sgebrd.c
new file mode 100644
index 0000000000..d0a0cef887
--- /dev/null
+++ b/contrib/libs/clapack/sgebrd.c
@@ -0,0 +1,336 @@
+/* sgebrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+static real c_b21 = -1.f;
+static real c_b22 = 1.f;
+
+/* Subroutine */ int sgebrd_(integer *m, integer *n, real *a, integer *lda,
+ real *d__, real *e, real *tauq, real *taup, real *work, integer *
+ lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, nb, nx;
+ real ws;
+ integer nbmin, iinfo;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ integer minmn;
+ extern /* Subroutine */ int sgebd2_(integer *, integer *, real *, integer
+ *, real *, real *, real *, real *, real *, integer *), slabrd_(
+ integer *, integer *, integer *, real *, integer *, real *, real *
+, real *, real *, real *, integer *, real *, integer *), xerbla_(
+ char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwrkx, ldwrky, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGEBRD reduces a general real M-by-N matrix A to upper or lower */
+/* bidiagonal form B by an orthogonal transformation: Q**T * A * P = B. */
+
+/* If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows in the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns in the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N general matrix to be reduced. */
+/* On exit, */
+/* if m >= n, the diagonal and the first superdiagonal are */
+/* overwritten with the upper bidiagonal matrix B; the */
+/* elements below the diagonal, with the array TAUQ, represent */
+/* the orthogonal matrix Q as a product of elementary */
+/* reflectors, and the elements above the first superdiagonal, */
+/* with the array TAUP, represent the orthogonal matrix P as */
+/* a product of elementary reflectors; */
+/* if m < n, the diagonal and the first subdiagonal are */
+/* overwritten with the lower bidiagonal matrix B; the */
+/* elements below the first subdiagonal, with the array TAUQ, */
+/* represent the orthogonal matrix Q as a product of */
+/* elementary reflectors, and the elements above the diagonal, */
+/* with the array TAUP, represent the orthogonal matrix P as */
+/* a product of elementary reflectors. */
+/* See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* D (output) REAL array, dimension (min(M,N)) */
+/* The diagonal elements of the bidiagonal matrix B: */
+/* D(i) = A(i,i). */
+
+/* E (output) REAL array, dimension (min(M,N)-1) */
+/* The off-diagonal elements of the bidiagonal matrix B: */
+/* if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1; */
+/* if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1. */
+
+/* TAUQ (output) REAL array dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the orthogonal matrix Q. See Further Details. */
+
+/* TAUP (output) REAL array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the orthogonal matrix P. See Further Details. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= max(1,M,N). */
+/* For optimum performance LWORK >= (M+N)*NB, where NB */
+/* is the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrices Q and P are represented as products of elementary */
+/* reflectors: */
+
+/* If m >= n, */
+
+/* Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1) */
+
+/* Each H(i) and G(i) has the form: */
+
+/* H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' */
+
+/* where tauq and taup are real scalars, and v and u are real vectors; */
+/* v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in A(i+1:m,i); */
+/* u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in A(i,i+2:n); */
+/* tauq is stored in TAUQ(i) and taup in TAUP(i). */
+
+/* If m < n, */
+
+/* Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m) */
+
+/* Each H(i) and G(i) has the form: */
+
+/* H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' */
+
+/* where tauq and taup are real scalars, and v and u are real vectors; */
+/* v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in A(i+2:m,i); */
+/* u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n); */
+/* tauq is stored in TAUQ(i) and taup in TAUP(i). */
+
+/* The contents of A on exit are illustrated by the following examples: */
+
+/* m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n): */
+
+/* ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 ) */
+/* ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 ) */
+/* ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 ) */
+/* ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 ) */
+/* ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 ) */
+/* ( v1 v2 v3 v4 v5 ) */
+
+/* where d and e denote diagonal and off-diagonal elements of B, vi */
+/* denotes an element of the vector defining H(i), and ui an element of */
+/* the vector defining G(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+/* Computing MAX */
+ i__1 = 1, i__2 = ilaenv_(&c__1, "SGEBRD", " ", m, n, &c_n1, &c_n1);
+ nb = max(i__1,i__2);
+ lwkopt = (*m + *n) * nb;
+ work[1] = (real) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*m);
+ if (*lwork < max(i__1,*n) && ! lquery) {
+ *info = -10;
+ }
+ }
+ if (*info < 0) {
+ i__1 = -(*info);
+ xerbla_("SGEBRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ minmn = min(*m,*n);
+ if (minmn == 0) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ ws = (real) max(*m,*n);
+ ldwrkx = *m;
+ ldwrky = *n;
+
+ if (nb > 1 && nb < minmn) {
+
+/* Set the crossover point NX. */
+
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__3, "SGEBRD", " ", m, n, &c_n1, &c_n1);
+ nx = max(i__1,i__2);
+
+/* Determine when to switch from blocked to unblocked code. */
+
+ if (nx < minmn) {
+ ws = (real) ((*m + *n) * nb);
+ if ((real) (*lwork) < ws) {
+
+/* Not enough work space for the optimal NB, consider using */
+/* a smaller block size. */
+
+ nbmin = ilaenv_(&c__2, "SGEBRD", " ", m, n, &c_n1, &c_n1);
+ if (*lwork >= (*m + *n) * nbmin) {
+ nb = *lwork / (*m + *n);
+ } else {
+ nb = 1;
+ nx = minmn;
+ }
+ }
+ }
+ } else {
+ nx = minmn;
+ }
+
+ i__1 = minmn - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+
+/* Reduce rows and columns i:i+nb-1 to bidiagonal form and return */
+/* the matrices X and Y which are needed to update the unreduced */
+/* part of the matrix */
+
+ i__3 = *m - i__ + 1;
+ i__4 = *n - i__ + 1;
+ slabrd_(&i__3, &i__4, &nb, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[
+ i__], &tauq[i__], &taup[i__], &work[1], &ldwrkx, &work[ldwrkx
+ * nb + 1], &ldwrky);
+
+/* Update the trailing submatrix A(i+nb:m,i+nb:n), using an update */
+/* of the form A := A - V*Y' - X*U' */
+
+ i__3 = *m - i__ - nb + 1;
+ i__4 = *n - i__ - nb + 1;
+ sgemm_("No transpose", "Transpose", &i__3, &i__4, &nb, &c_b21, &a[i__
+ + nb + i__ * a_dim1], lda, &work[ldwrkx * nb + nb + 1], &
+ ldwrky, &c_b22, &a[i__ + nb + (i__ + nb) * a_dim1], lda);
+ i__3 = *m - i__ - nb + 1;
+ i__4 = *n - i__ - nb + 1;
+ sgemm_("No transpose", "No transpose", &i__3, &i__4, &nb, &c_b21, &
+ work[nb + 1], &ldwrkx, &a[i__ + (i__ + nb) * a_dim1], lda, &
+ c_b22, &a[i__ + nb + (i__ + nb) * a_dim1], lda);
+
+/* Copy diagonal and off-diagonal elements of B back into A */
+
+ if (*m >= *n) {
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ a[j + j * a_dim1] = d__[j];
+ a[j + (j + 1) * a_dim1] = e[j];
+/* L10: */
+ }
+ } else {
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ a[j + j * a_dim1] = d__[j];
+ a[j + 1 + j * a_dim1] = e[j];
+/* L20: */
+ }
+ }
+/* L30: */
+ }
+
+/* Use unblocked code to reduce the remainder of the matrix */
+
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ sgebd2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__], &
+ tauq[i__], &taup[i__], &work[1], &iinfo);
+ work[1] = ws;
+ return 0;
+
+/* End of SGEBRD */
+
+} /* sgebrd_ */
diff --git a/contrib/libs/clapack/sgecon.c b/contrib/libs/clapack/sgecon.c
new file mode 100644
index 0000000000..83a66558ff
--- /dev/null
+++ b/contrib/libs/clapack/sgecon.c
@@ -0,0 +1,224 @@
+/* sgecon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sgecon_(char *norm, integer *n, real *a, integer *lda,
+ real *anorm, real *rcond, real *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+ real r__1;
+
+ /* Local variables */
+ real sl;
+ integer ix;
+ real su;
+ integer kase, kase1;
+ real scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int srscl_(integer *, real *, real *, integer *),
+ slacn2_(integer *, real *, real *, integer *, real *, integer *,
+ integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ real ainvnm;
+ logical onenrm;
+ char normin[1];
+ extern /* Subroutine */ int slatrs_(char *, char *, char *, char *,
+ integer *, real *, integer *, real *, real *, real *, integer *);
+ real smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call SLACN2 in place of SLACON, 7 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGECON estimates the reciprocal of the condition number of a general */
+/* real matrix A, in either the 1-norm or the infinity-norm, using */
+/* the LU factorization computed by SGETRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as */
+/* RCOND = 1 / ( norm(A) * norm(inv(A)) ). */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies whether the 1-norm condition number or the */
+/* infinity-norm condition number is required: */
+/* = '1' or 'O': 1-norm; */
+/* = 'I': Infinity-norm. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The factors L and U from the factorization A = P*L*U */
+/* as computed by SGETRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* ANORM (input) REAL */
+/* If NORM = '1' or 'O', the 1-norm of the original matrix A. */
+/* If NORM = 'I', the infinity-norm of the original matrix A. */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(norm(A) * norm(inv(A))). */
+
+/* WORK (workspace) REAL array, dimension (4*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O");
+ if (! onenrm && ! lsame_(norm, "I")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*anorm < 0.f) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGECON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.f;
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ } else if (*anorm == 0.f) {
+ return 0;
+ }
+
+ smlnum = slamch_("Safe minimum");
+
+/* Estimate the norm of inv(A). */
+
+ ainvnm = 0.f;
+ *(unsigned char *)normin = 'N';
+ if (onenrm) {
+ kase1 = 1;
+ } else {
+ kase1 = 2;
+ }
+ kase = 0;
+L10:
+ slacn2_(n, &work[*n + 1], &work[1], &iwork[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (kase == kase1) {
+
+/* Multiply by inv(L). */
+
+ slatrs_("Lower", "No transpose", "Unit", normin, n, &a[a_offset],
+ lda, &work[1], &sl, &work[(*n << 1) + 1], info);
+
+/* Multiply by inv(U). */
+
+ slatrs_("Upper", "No transpose", "Non-unit", normin, n, &a[
+ a_offset], lda, &work[1], &su, &work[*n * 3 + 1], info);
+ } else {
+
+/* Multiply by inv(U'). */
+
+ slatrs_("Upper", "Transpose", "Non-unit", normin, n, &a[a_offset],
+ lda, &work[1], &su, &work[*n * 3 + 1], info);
+
+/* Multiply by inv(L'). */
+
+ slatrs_("Lower", "Transpose", "Unit", normin, n, &a[a_offset],
+ lda, &work[1], &sl, &work[(*n << 1) + 1], info);
+ }
+
+/* Divide X by 1/(SL*SU) if doing so will not cause overflow. */
+
+ scale = sl * su;
+ *(unsigned char *)normin = 'Y';
+ if (scale != 1.f) {
+ ix = isamax_(n, &work[1], &c__1);
+ if (scale < (r__1 = work[ix], dabs(r__1)) * smlnum || scale ==
+ 0.f) {
+ goto L20;
+ }
+ srscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / ainvnm / *anorm;
+ }
+
+L20:
+ return 0;
+
+/* End of SGECON */
+
+} /* sgecon_ */
diff --git a/contrib/libs/clapack/sgeequ.c b/contrib/libs/clapack/sgeequ.c
new file mode 100644
index 0000000000..baa44c91d0
--- /dev/null
+++ b/contrib/libs/clapack/sgeequ.c
@@ -0,0 +1,296 @@
+/* sgeequ.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sgeequ_(integer *m, integer *n, real *a, integer *lda,
+ real *r__, real *c__, real *rowcnd, real *colcnd, real *amax, integer
+ *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real r__1, r__2, r__3;
+
+ /* Local variables */
+ integer i__, j;
+ real rcmin, rcmax;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum, smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGEEQU computes row and column scalings intended to equilibrate an */
+/* M-by-N matrix A and reduce its condition number. R returns the row */
+/* scale factors and C the column scale factors, chosen to try to make */
+/* the largest element in each row and column of the matrix B with */
+/* elements B(i,j)=R(i)*A(i,j)*C(j) have absolute value 1. */
+
+/* R(i) and C(j) are restricted to be between SMLNUM = smallest safe */
+/* number and BIGNUM = largest safe number. Use of these scaling */
+/* factors is not guaranteed to reduce the condition number of A but */
+/* works well in practice. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The M-by-N matrix whose equilibration factors are */
+/* to be computed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* R (output) REAL array, dimension (M) */
+/* If INFO = 0 or INFO > M, R contains the row scale factors */
+/* for A. */
+
+/* C (output) REAL array, dimension (N) */
+/* If INFO = 0, C contains the column scale factors for A. */
+
+/* ROWCND (output) REAL */
+/* If INFO = 0 or INFO > M, ROWCND contains the ratio of the */
+/* smallest R(i) to the largest R(i). If ROWCND >= 0.1 and */
+/* AMAX is neither too large nor too small, it is not worth */
+/* scaling by R. */
+
+/* COLCND (output) REAL */
+/* If INFO = 0, COLCND contains the ratio of the smallest */
+/* C(i) to the largest C(i). If COLCND >= 0.1, it is not */
+/* worth scaling by C. */
+
+/* AMAX (output) REAL */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= M: the i-th row of A is exactly zero */
+/* > M: the (i-M)-th column of A is exactly zero */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --r__;
+ --c__;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEEQU", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ *rowcnd = 1.f;
+ *colcnd = 1.f;
+ *amax = 0.f;
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+
+/* Compute row scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__[i__] = 0.f;
+/* L10: */
+ }
+
+/* Find the maximum element in each row. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = r__[i__], r__3 = (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+ r__[i__] = dmax(r__2,r__3);
+/* L20: */
+ }
+/* L30: */
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.f;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = rcmax, r__2 = r__[i__];
+ rcmax = dmax(r__1,r__2);
+/* Computing MIN */
+ r__1 = rcmin, r__2 = r__[i__];
+ rcmin = dmin(r__1,r__2);
+/* L40: */
+ }
+ *amax = rcmax;
+
+ if (rcmin == 0.f) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (r__[i__] == 0.f) {
+ *info = i__;
+ return 0;
+ }
+/* L50: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MIN */
+/* Computing MAX */
+ r__2 = r__[i__];
+ r__1 = dmax(r__2,smlnum);
+ r__[i__] = 1.f / dmin(r__1,bignum);
+/* L60: */
+ }
+
+/* Compute ROWCND = min(R(I)) / max(R(I)) */
+
+ *rowcnd = dmax(rcmin,smlnum) / dmin(rcmax,bignum);
+ }
+
+/* Compute column scale factors */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j] = 0.f;
+/* L70: */
+ }
+
+/* Find the maximum element in each column, */
+/* assuming the row scaling computed above. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = c__[j], r__3 = (r__1 = a[i__ + j * a_dim1], dabs(r__1)) *
+ r__[i__];
+ c__[j] = dmax(r__2,r__3);
+/* L80: */
+ }
+/* L90: */
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ r__1 = rcmin, r__2 = c__[j];
+ rcmin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = rcmax, r__2 = c__[j];
+ rcmax = dmax(r__1,r__2);
+/* L100: */
+ }
+
+ if (rcmin == 0.f) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (c__[j] == 0.f) {
+ *info = *m + j;
+ return 0;
+ }
+/* L110: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+/* Computing MAX */
+ r__2 = c__[j];
+ r__1 = dmax(r__2,smlnum);
+ c__[j] = 1.f / dmin(r__1,bignum);
+/* L120: */
+ }
+
+/* Compute COLCND = min(C(J)) / max(C(J)) */
+
+ *colcnd = dmax(rcmin,smlnum) / dmin(rcmax,bignum);
+ }
+
+ return 0;
+
+/* End of SGEEQU */
+
+} /* sgeequ_ */
diff --git a/contrib/libs/clapack/sgeequb.c b/contrib/libs/clapack/sgeequb.c
new file mode 100644
index 0000000000..607f5be6ad
--- /dev/null
+++ b/contrib/libs/clapack/sgeequb.c
@@ -0,0 +1,324 @@
+/* sgeequb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sgeequb_(integer *m, integer *n, real *a, integer *lda,
+ real *r__, real *c__, real *rowcnd, real *colcnd, real *amax, integer
+ *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double log(doublereal), pow_ri(real *, integer *);
+
+ /* Local variables */
+ integer i__, j;
+ real radix, rcmin, rcmax;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum, logrdx, smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- Contributed by James Demmel, Deaglan Halligan, Yozo Hida and -- */
+/* -- Jason Riedy of Univ. of California Berkeley. -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley and NAG Ltd. -- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGEEQUB computes row and column scalings intended to equilibrate an */
+/* M-by-N matrix A and reduce its condition number. R returns the row */
+/* scale factors and C the column scale factors, chosen to try to make */
+/* the largest element in each row and column of the matrix B with */
+/* elements B(i,j)=R(i)*A(i,j)*C(j) have an absolute value of at most */
+/* the radix. */
+
+/* R(i) and C(j) are restricted to be a power of the radix between */
+/* SMLNUM = smallest safe number and BIGNUM = largest safe number. Use */
+/* of these scaling factors is not guaranteed to reduce the condition */
+/* number of A but works well in practice. */
+
+/* This routine differs from SGEEQU by restricting the scaling factors */
+/* to a power of the radix. Baring over- and underflow, scaling by */
+/* these factors introduces no additional rounding errors. However, the */
+/* scaled entries' magnitured are no longer approximately 1 but lie */
+/* between sqrt(radix) and 1/sqrt(radix). */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The M-by-N matrix whose equilibration factors are */
+/* to be computed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* R (output) REAL array, dimension (M) */
+/* If INFO = 0 or INFO > M, R contains the row scale factors */
+/* for A. */
+
+/* C (output) REAL array, dimension (N) */
+/* If INFO = 0, C contains the column scale factors for A. */
+
+/* ROWCND (output) REAL */
+/* If INFO = 0 or INFO > M, ROWCND contains the ratio of the */
+/* smallest R(i) to the largest R(i). If ROWCND >= 0.1 and */
+/* AMAX is neither too large nor too small, it is not worth */
+/* scaling by R. */
+
+/* COLCND (output) REAL */
+/* If INFO = 0, COLCND contains the ratio of the smallest */
+/* C(i) to the largest C(i). If COLCND >= 0.1, it is not */
+/* worth scaling by C. */
+
+/* AMAX (output) REAL */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= M: the i-th row of A is exactly zero */
+/* > M: the (i-M)-th column of A is exactly zero */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --r__;
+ --c__;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEEQUB", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ *rowcnd = 1.f;
+ *colcnd = 1.f;
+ *amax = 0.f;
+ return 0;
+ }
+
+/* Get machine constants. Assume SMLNUM is a power of the radix. */
+
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+ radix = slamch_("B");
+ logrdx = log(radix);
+
+/* Compute row scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__[i__] = 0.f;
+/* L10: */
+ }
+
+/* Find the maximum element in each row. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = r__[i__], r__3 = (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+ r__[i__] = dmax(r__2,r__3);
+/* L20: */
+ }
+/* L30: */
+ }
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (r__[i__] > 0.f) {
+ i__2 = (integer) (log(r__[i__]) / logrdx);
+ r__[i__] = pow_ri(&radix, &i__2);
+ }
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.f;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = rcmax, r__2 = r__[i__];
+ rcmax = dmax(r__1,r__2);
+/* Computing MIN */
+ r__1 = rcmin, r__2 = r__[i__];
+ rcmin = dmin(r__1,r__2);
+/* L40: */
+ }
+ *amax = rcmax;
+
+ if (rcmin == 0.f) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (r__[i__] == 0.f) {
+ *info = i__;
+ return 0;
+ }
+/* L50: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MIN */
+/* Computing MAX */
+ r__2 = r__[i__];
+ r__1 = dmax(r__2,smlnum);
+ r__[i__] = 1.f / dmin(r__1,bignum);
+/* L60: */
+ }
+
+/* Compute ROWCND = min(R(I)) / max(R(I)). */
+
+ *rowcnd = dmax(rcmin,smlnum) / dmin(rcmax,bignum);
+ }
+
+/* Compute column scale factors */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j] = 0.f;
+/* L70: */
+ }
+
+/* Find the maximum element in each column, */
+/* assuming the row scaling computed above. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = c__[j], r__3 = (r__1 = a[i__ + j * a_dim1], dabs(r__1)) *
+ r__[i__];
+ c__[j] = dmax(r__2,r__3);
+/* L80: */
+ }
+ if (c__[j] > 0.f) {
+ i__2 = (integer) (log(c__[j]) / logrdx);
+ c__[j] = pow_ri(&radix, &i__2);
+ }
+/* L90: */
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ r__1 = rcmin, r__2 = c__[j];
+ rcmin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = rcmax, r__2 = c__[j];
+ rcmax = dmax(r__1,r__2);
+/* L100: */
+ }
+
+ if (rcmin == 0.f) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (c__[j] == 0.f) {
+ *info = *m + j;
+ return 0;
+ }
+/* L110: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+/* Computing MAX */
+ r__2 = c__[j];
+ r__1 = dmax(r__2,smlnum);
+ c__[j] = 1.f / dmin(r__1,bignum);
+/* L120: */
+ }
+
+/* Compute COLCND = min(C(J)) / max(C(J)). */
+
+ *colcnd = dmax(rcmin,smlnum) / dmin(rcmax,bignum);
+ }
+
+ return 0;
+
+/* End of SGEEQUB */
+
+} /* sgeequb_ */
diff --git a/contrib/libs/clapack/sgees.c b/contrib/libs/clapack/sgees.c
new file mode 100644
index 0000000000..be6b052e90
--- /dev/null
+++ b/contrib/libs/clapack/sgees.c
@@ -0,0 +1,547 @@
+/* sgees.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+
+/* Subroutine */ int sgees_(char *jobvs, char *sort, L_fp select, integer *n,
+ real *a, integer *lda, integer *sdim, real *wr, real *wi, real *vs,
+ integer *ldvs, real *work, integer *lwork, logical *bwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, vs_dim1, vs_offset, i__1, i__2, i__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ real s;
+ integer i1, i2, ip, ihi, ilo;
+ real dum[1], eps, sep;
+ integer ibal;
+ real anrm;
+ integer idum[1], ierr, itau, iwrk, inxt, icond, ieval;
+ extern logical lsame_(char *, char *);
+ logical cursl;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), sswap_(integer *, real *, integer *, real *, integer *
+);
+ logical lst2sl;
+ extern /* Subroutine */ int slabad_(real *, real *);
+ logical scalea;
+ real cscale;
+ extern /* Subroutine */ int sgebak_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, integer *, integer *), sgebal_(char *, integer *, real *, integer *,
+ integer *, integer *, real *, integer *);
+ extern doublereal slamch_(char *), slange_(char *, integer *,
+ integer *, real *, integer *, real *);
+ extern /* Subroutine */ int sgehrd_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *), xerbla_(char
+ *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ real bignum;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *), slacpy_(char *, integer *, integer *, real *, integer *,
+ real *, integer *);
+ logical lastsl;
+ extern /* Subroutine */ int sorghr_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *), shseqr_(char
+ *, char *, integer *, integer *, integer *, real *, integer *,
+ real *, real *, real *, integer *, real *, integer *, integer *);
+ integer minwrk, maxwrk;
+ real smlnum;
+ integer hswork;
+ extern /* Subroutine */ int strsen_(char *, char *, logical *, integer *,
+ real *, integer *, real *, integer *, real *, real *, integer *,
+ real *, real *, real *, integer *, integer *, integer *, integer *
+);
+ logical wantst, lquery, wantvs;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+/* .. Function Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGEES computes for an N-by-N real nonsymmetric matrix A, the */
+/* eigenvalues, the real Schur form T, and, optionally, the matrix of */
+/* Schur vectors Z. This gives the Schur factorization A = Z*T*(Z**T). */
+
+/* Optionally, it also orders the eigenvalues on the diagonal of the */
+/* real Schur form so that selected eigenvalues are at the top left. */
+/* The leading columns of Z then form an orthonormal basis for the */
+/* invariant subspace corresponding to the selected eigenvalues. */
+
+/* A matrix is in real Schur form if it is upper quasi-triangular with */
+/* 1-by-1 and 2-by-2 blocks. 2-by-2 blocks will be standardized in the */
+/* form */
+/* [ a b ] */
+/* [ c a ] */
+
+/* where b*c < 0. The eigenvalues of such a block are a +- sqrt(bc). */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVS (input) CHARACTER*1 */
+/* = 'N': Schur vectors are not computed; */
+/* = 'V': Schur vectors are computed. */
+
+/* SORT (input) CHARACTER*1 */
+/* Specifies whether or not to order the eigenvalues on the */
+/* diagonal of the Schur form. */
+/* = 'N': Eigenvalues are not ordered; */
+/* = 'S': Eigenvalues are ordered (see SELECT). */
+
+/* SELECT (external procedure) LOGICAL FUNCTION of two REAL arguments */
+/* SELECT must be declared EXTERNAL in the calling subroutine. */
+/* If SORT = 'S', SELECT is used to select eigenvalues to sort */
+/* to the top left of the Schur form. */
+/* If SORT = 'N', SELECT is not referenced. */
+/* An eigenvalue WR(j)+sqrt(-1)*WI(j) is selected if */
+/* SELECT(WR(j),WI(j)) is true; i.e., if either one of a complex */
+/* conjugate pair of eigenvalues is selected, then both complex */
+/* eigenvalues are selected. */
+/* Note that a selected complex eigenvalue may no longer */
+/* satisfy SELECT(WR(j),WI(j)) = .TRUE. after ordering, since */
+/* ordering may change the value of complex eigenvalues */
+/* (especially if the eigenvalue is ill-conditioned); in this */
+/* case INFO is set to N+2 (see INFO below). */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the N-by-N matrix A. */
+/* On exit, A has been overwritten by its real Schur form T. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* SDIM (output) INTEGER */
+/* If SORT = 'N', SDIM = 0. */
+/* If SORT = 'S', SDIM = number of eigenvalues (after sorting) */
+/* for which SELECT is true. (Complex conjugate */
+/* pairs for which SELECT is true for either */
+/* eigenvalue count as 2.) */
+
+/* WR (output) REAL array, dimension (N) */
+/* WI (output) REAL array, dimension (N) */
+/* WR and WI contain the real and imaginary parts, */
+/* respectively, of the computed eigenvalues in the same order */
+/* that they appear on the diagonal of the output Schur form T. */
+/* Complex conjugate pairs of eigenvalues will appear */
+/* consecutively with the eigenvalue having the positive */
+/* imaginary part first. */
+
+/* VS (output) REAL array, dimension (LDVS,N) */
+/* If JOBVS = 'V', VS contains the orthogonal matrix Z of Schur */
+/* vectors. */
+/* If JOBVS = 'N', VS is not referenced. */
+
+/* LDVS (input) INTEGER */
+/* The leading dimension of the array VS. LDVS >= 1; if */
+/* JOBVS = 'V', LDVS >= N. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) contains the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,3*N). */
+/* For good performance, LWORK must generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* BWORK (workspace) LOGICAL array, dimension (N) */
+/* Not referenced if SORT = 'N'. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, and i is */
+/* <= N: the QR algorithm failed to compute all the */
+/* eigenvalues; elements 1:ILO-1 and i+1:N of WR and WI */
+/* contain those eigenvalues which have converged; if */
+/* JOBVS = 'V', VS contains the matrix which reduces A */
+/* to its partially converged Schur form. */
+/* = N+1: the eigenvalues could not be reordered because some */
+/* eigenvalues were too close to separate (the problem */
+/* is very ill-conditioned); */
+/* = N+2: after reordering, roundoff changed values of some */
+/* complex eigenvalues so that leading eigenvalues in */
+/* the Schur form no longer satisfy SELECT=.TRUE. This */
+/* could also be caused by underflow due to scaling. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --wr;
+ --wi;
+ vs_dim1 = *ldvs;
+ vs_offset = 1 + vs_dim1;
+ vs -= vs_offset;
+ --work;
+ --bwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ wantvs = lsame_(jobvs, "V");
+ wantst = lsame_(sort, "S");
+ if (! wantvs && ! lsame_(jobvs, "N")) {
+ *info = -1;
+ } else if (! wantst && ! lsame_(sort, "N")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldvs < 1 || wantvs && *ldvs < *n) {
+ *info = -11;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV. */
+/* HSWORK refers to the workspace preferred by SHSEQR, as */
+/* calculated below. HSWORK is computed assuming ILO=1 and IHI=N, */
+/* the worst case.) */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ } else {
+ maxwrk = (*n << 1) + *n * ilaenv_(&c__1, "SGEHRD", " ", n, &c__1,
+ n, &c__0);
+ minwrk = *n * 3;
+
+ shseqr_("S", jobvs, n, &c__1, n, &a[a_offset], lda, &wr[1], &wi[1]
+, &vs[vs_offset], ldvs, &work[1], &c_n1, &ieval);
+ hswork = work[1];
+
+ if (! wantvs) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + hswork;
+ maxwrk = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + (*n - 1) * ilaenv_(&c__1,
+ "SORGHR", " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + hswork;
+ maxwrk = max(i__1,i__2);
+ }
+ }
+ work[1] = (real) maxwrk;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEES ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *sdim = 0;
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = slange_("M", n, n, &a[a_offset], lda, dum);
+ scalea = FALSE_;
+ if (anrm > 0.f && anrm < smlnum) {
+ scalea = TRUE_;
+ cscale = smlnum;
+ } else if (anrm > bignum) {
+ scalea = TRUE_;
+ cscale = bignum;
+ }
+ if (scalea) {
+ slascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Permute the matrix to make it more nearly triangular */
+/* (Workspace: need N) */
+
+ ibal = 1;
+ sgebal_("P", n, &a[a_offset], lda, &ilo, &ihi, &work[ibal], &ierr);
+
+/* Reduce to upper Hessenberg form */
+/* (Workspace: need 3*N, prefer 2*N+N*NB) */
+
+ itau = *n + ibal;
+ iwrk = *n + itau;
+ i__1 = *lwork - iwrk + 1;
+ sgehrd_(n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1,
+ &ierr);
+
+ if (wantvs) {
+
+/* Copy Householder vectors to VS */
+
+ slacpy_("L", n, n, &a[a_offset], lda, &vs[vs_offset], ldvs)
+ ;
+
+/* Generate orthogonal matrix in VS */
+/* (Workspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+
+ i__1 = *lwork - iwrk + 1;
+ sorghr_(n, &ilo, &ihi, &vs[vs_offset], ldvs, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+ }
+
+ *sdim = 0;
+
+/* Perform QR iteration, accumulating Schur vectors in VS if desired */
+/* (Workspace: need N+1, prefer N+HSWORK (see comments) ) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ shseqr_("S", jobvs, n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &vs[
+ vs_offset], ldvs, &work[iwrk], &i__1, &ieval);
+ if (ieval > 0) {
+ *info = ieval;
+ }
+
+/* Sort eigenvalues if desired */
+
+ if (wantst && *info == 0) {
+ if (scalea) {
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, n, &c__1, &wr[1], n, &
+ ierr);
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, n, &c__1, &wi[1], n, &
+ ierr);
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ bwork[i__] = (*select)(&wr[i__], &wi[i__]);
+/* L10: */
+ }
+
+/* Reorder eigenvalues and transform Schur vectors */
+/* (Workspace: none needed) */
+
+ i__1 = *lwork - iwrk + 1;
+ strsen_("N", jobvs, &bwork[1], n, &a[a_offset], lda, &vs[vs_offset],
+ ldvs, &wr[1], &wi[1], sdim, &s, &sep, &work[iwrk], &i__1,
+ idum, &c__1, &icond);
+ if (icond > 0) {
+ *info = *n + icond;
+ }
+ }
+
+ if (wantvs) {
+
+/* Undo balancing */
+/* (Workspace: need N) */
+
+ sgebak_("P", "R", n, &ilo, &ihi, &work[ibal], n, &vs[vs_offset], ldvs,
+ &ierr);
+ }
+
+ if (scalea) {
+
+/* Undo scaling for the Schur form of A */
+
+ slascl_("H", &c__0, &c__0, &cscale, &anrm, n, n, &a[a_offset], lda, &
+ ierr);
+ i__1 = *lda + 1;
+ scopy_(n, &a[a_offset], &i__1, &wr[1], &c__1);
+ if (cscale == smlnum) {
+
+/* If scaling back towards underflow, adjust WI if an */
+/* offdiagonal element of a 2-by-2 block in the Schur form */
+/* underflows. */
+
+ if (ieval > 0) {
+ i1 = ieval + 1;
+ i2 = ihi - 1;
+ i__1 = ilo - 1;
+/* Computing MAX */
+ i__3 = ilo - 1;
+ i__2 = max(i__3,1);
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[
+ 1], &i__2, &ierr);
+ } else if (wantst) {
+ i1 = 1;
+ i2 = *n - 1;
+ } else {
+ i1 = ilo;
+ i2 = ihi - 1;
+ }
+ inxt = i1 - 1;
+ i__1 = i2;
+ for (i__ = i1; i__ <= i__1; ++i__) {
+ if (i__ < inxt) {
+ goto L20;
+ }
+ if (wi[i__] == 0.f) {
+ inxt = i__ + 1;
+ } else {
+ if (a[i__ + 1 + i__ * a_dim1] == 0.f) {
+ wi[i__] = 0.f;
+ wi[i__ + 1] = 0.f;
+ } else if (a[i__ + 1 + i__ * a_dim1] != 0.f && a[i__ + (
+ i__ + 1) * a_dim1] == 0.f) {
+ wi[i__] = 0.f;
+ wi[i__ + 1] = 0.f;
+ if (i__ > 1) {
+ i__2 = i__ - 1;
+ sswap_(&i__2, &a[i__ * a_dim1 + 1], &c__1, &a[(
+ i__ + 1) * a_dim1 + 1], &c__1);
+ }
+ if (*n > i__ + 1) {
+ i__2 = *n - i__ - 1;
+ sswap_(&i__2, &a[i__ + (i__ + 2) * a_dim1], lda, &
+ a[i__ + 1 + (i__ + 2) * a_dim1], lda);
+ }
+ if (wantvs) {
+ sswap_(n, &vs[i__ * vs_dim1 + 1], &c__1, &vs[(i__
+ + 1) * vs_dim1 + 1], &c__1);
+ }
+ a[i__ + (i__ + 1) * a_dim1] = a[i__ + 1 + i__ *
+ a_dim1];
+ a[i__ + 1 + i__ * a_dim1] = 0.f;
+ }
+ inxt = i__ + 2;
+ }
+L20:
+ ;
+ }
+ }
+
+/* Undo scaling for the imaginary part of the eigenvalues */
+
+ i__1 = *n - ieval;
+/* Computing MAX */
+ i__3 = *n - ieval;
+ i__2 = max(i__3,1);
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[ieval +
+ 1], &i__2, &ierr);
+ }
+
+ if (wantst && *info == 0) {
+
+/* Check if reordering successful */
+
+ lastsl = TRUE_;
+ lst2sl = TRUE_;
+ *sdim = 0;
+ ip = 0;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ cursl = (*select)(&wr[i__], &wi[i__]);
+ if (wi[i__] == 0.f) {
+ if (cursl) {
+ ++(*sdim);
+ }
+ ip = 0;
+ if (cursl && ! lastsl) {
+ *info = *n + 2;
+ }
+ } else {
+ if (ip == 1) {
+
+/* Last eigenvalue of conjugate pair */
+
+ cursl = cursl || lastsl;
+ lastsl = cursl;
+ if (cursl) {
+ *sdim += 2;
+ }
+ ip = -1;
+ if (cursl && ! lst2sl) {
+ *info = *n + 2;
+ }
+ } else {
+
+/* First eigenvalue of conjugate pair */
+
+ ip = 1;
+ }
+ }
+ lst2sl = lastsl;
+ lastsl = cursl;
+/* L30: */
+ }
+ }
+
+ work[1] = (real) maxwrk;
+ return 0;
+
+/* End of SGEES */
+
+} /* sgees_ */
diff --git a/contrib/libs/clapack/sgeesx.c b/contrib/libs/clapack/sgeesx.c
new file mode 100644
index 0000000000..8f9cafa9bb
--- /dev/null
+++ b/contrib/libs/clapack/sgeesx.c
@@ -0,0 +1,643 @@
+/* sgeesx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+
+/* Subroutine */ int sgeesx_(char *jobvs, char *sort, L_fp select, char *
+ sense, integer *n, real *a, integer *lda, integer *sdim, real *wr,
+ real *wi, real *vs, integer *ldvs, real *rconde, real *rcondv, real *
+ work, integer *lwork, integer *iwork, integer *liwork, logical *bwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, vs_dim1, vs_offset, i__1, i__2, i__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, i1, i2, ip, ihi, ilo;
+ real dum[1], eps;
+ integer ibal;
+ real anrm;
+ integer ierr, itau, iwrk, lwrk, inxt, icond, ieval;
+ extern logical lsame_(char *, char *);
+ logical cursl;
+ integer liwrk;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), sswap_(integer *, real *, integer *, real *, integer *
+);
+ logical lst2sl;
+ extern /* Subroutine */ int slabad_(real *, real *);
+ logical scalea;
+ real cscale;
+ extern /* Subroutine */ int sgebak_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, integer *, integer *), sgebal_(char *, integer *, real *, integer *,
+ integer *, integer *, real *, integer *);
+ extern doublereal slamch_(char *), slange_(char *, integer *,
+ integer *, real *, integer *, real *);
+ extern /* Subroutine */ int sgehrd_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *), xerbla_(char
+ *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ real bignum;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *), slacpy_(char *, integer *, integer *, real *, integer *,
+ real *, integer *);
+ logical wantsb, wantse, lastsl;
+ extern /* Subroutine */ int sorghr_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *), shseqr_(char
+ *, char *, integer *, integer *, integer *, real *, integer *,
+ real *, real *, real *, integer *, real *, integer *, integer *);
+ integer minwrk, maxwrk;
+ logical wantsn;
+ real smlnum;
+ integer hswork;
+ extern /* Subroutine */ int strsen_(char *, char *, logical *, integer *,
+ real *, integer *, real *, integer *, real *, real *, integer *,
+ real *, real *, real *, integer *, integer *, integer *, integer *
+);
+ logical wantst, lquery, wantsv, wantvs;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+/* .. Function Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGEESX computes for an N-by-N real nonsymmetric matrix A, the */
+/* eigenvalues, the real Schur form T, and, optionally, the matrix of */
+/* Schur vectors Z. This gives the Schur factorization A = Z*T*(Z**T). */
+
+/* Optionally, it also orders the eigenvalues on the diagonal of the */
+/* real Schur form so that selected eigenvalues are at the top left; */
+/* computes a reciprocal condition number for the average of the */
+/* selected eigenvalues (RCONDE); and computes a reciprocal condition */
+/* number for the right invariant subspace corresponding to the */
+/* selected eigenvalues (RCONDV). The leading columns of Z form an */
+/* orthonormal basis for this invariant subspace. */
+
+/* For further explanation of the reciprocal condition numbers RCONDE */
+/* and RCONDV, see Section 4.10 of the LAPACK Users' Guide (where */
+/* these quantities are called s and sep respectively). */
+
+/* A real matrix is in real Schur form if it is upper quasi-triangular */
+/* with 1-by-1 and 2-by-2 blocks. 2-by-2 blocks will be standardized in */
+/* the form */
+/* [ a b ] */
+/* [ c a ] */
+
+/* where b*c < 0. The eigenvalues of such a block are a +- sqrt(bc). */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVS (input) CHARACTER*1 */
+/* = 'N': Schur vectors are not computed; */
+/* = 'V': Schur vectors are computed. */
+
+/* SORT (input) CHARACTER*1 */
+/* Specifies whether or not to order the eigenvalues on the */
+/* diagonal of the Schur form. */
+/* = 'N': Eigenvalues are not ordered; */
+/* = 'S': Eigenvalues are ordered (see SELECT). */
+
+/* SELECT (external procedure) LOGICAL FUNCTION of two REAL arguments */
+/* SELECT must be declared EXTERNAL in the calling subroutine. */
+/* If SORT = 'S', SELECT is used to select eigenvalues to sort */
+/* to the top left of the Schur form. */
+/* If SORT = 'N', SELECT is not referenced. */
+/* An eigenvalue WR(j)+sqrt(-1)*WI(j) is selected if */
+/* SELECT(WR(j),WI(j)) is true; i.e., if either one of a */
+/* complex conjugate pair of eigenvalues is selected, then both */
+/* are. Note that a selected complex eigenvalue may no longer */
+/* satisfy SELECT(WR(j),WI(j)) = .TRUE. after ordering, since */
+/* ordering may change the value of complex eigenvalues */
+/* (especially if the eigenvalue is ill-conditioned); in this */
+/* case INFO may be set to N+3 (see INFO below). */
+
+/* SENSE (input) CHARACTER*1 */
+/* Determines which reciprocal condition numbers are computed. */
+/* = 'N': None are computed; */
+/* = 'E': Computed for average of selected eigenvalues only; */
+/* = 'V': Computed for selected right invariant subspace only; */
+/* = 'B': Computed for both. */
+/* If SENSE = 'E', 'V' or 'B', SORT must equal 'S'. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA, N) */
+/* On entry, the N-by-N matrix A. */
+/* On exit, A is overwritten by its real Schur form T. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* SDIM (output) INTEGER */
+/* If SORT = 'N', SDIM = 0. */
+/* If SORT = 'S', SDIM = number of eigenvalues (after sorting) */
+/* for which SELECT is true. (Complex conjugate */
+/* pairs for which SELECT is true for either */
+/* eigenvalue count as 2.) */
+
+/* WR (output) REAL array, dimension (N) */
+/* WI (output) REAL array, dimension (N) */
+/* WR and WI contain the real and imaginary parts, respectively, */
+/* of the computed eigenvalues, in the same order that they */
+/* appear on the diagonal of the output Schur form T. Complex */
+/* conjugate pairs of eigenvalues appear consecutively with the */
+/* eigenvalue having the positive imaginary part first. */
+
+/* VS (output) REAL array, dimension (LDVS,N) */
+/* If JOBVS = 'V', VS contains the orthogonal matrix Z of Schur */
+/* vectors. */
+/* If JOBVS = 'N', VS is not referenced. */
+
+/* LDVS (input) INTEGER */
+/* The leading dimension of the array VS. LDVS >= 1, and if */
+/* JOBVS = 'V', LDVS >= N. */
+
+/* RCONDE (output) REAL */
+/* If SENSE = 'E' or 'B', RCONDE contains the reciprocal */
+/* condition number for the average of the selected eigenvalues. */
+/* Not referenced if SENSE = 'N' or 'V'. */
+
+/* RCONDV (output) REAL */
+/* If SENSE = 'V' or 'B', RCONDV contains the reciprocal */
+/* condition number for the selected right invariant subspace. */
+/* Not referenced if SENSE = 'N' or 'E'. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,3*N). */
+/* Also, if SENSE = 'E' or 'V' or 'B', */
+/* LWORK >= N+2*SDIM*(N-SDIM), where SDIM is the number of */
+/* selected eigenvalues computed by this routine. Note that */
+/* N+2*SDIM*(N-SDIM) <= N+N*N/2. Note also that an error is only */
+/* returned if LWORK < max(1,3*N), but if SENSE = 'E' or 'V' or */
+/* 'B' this may not be large enough. */
+/* For good performance, LWORK must generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates upper bounds on the optimal sizes of the */
+/* arrays WORK and IWORK, returns these values as the first */
+/* entries of the WORK and IWORK arrays, and no error messages */
+/* related to LWORK or LIWORK are issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* LIWORK >= 1; if SENSE = 'V' or 'B', LIWORK >= SDIM*(N-SDIM). */
+/* Note that SDIM*(N-SDIM) <= N*N/4. Note also that an error is */
+/* only returned if LIWORK < 1, but if SENSE = 'V' or 'B' this */
+/* may not be large enough. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates upper bounds on the optimal sizes of */
+/* the arrays WORK and IWORK, returns these values as the first */
+/* entries of the WORK and IWORK arrays, and no error messages */
+/* related to LWORK or LIWORK are issued by XERBLA. */
+
+/* BWORK (workspace) LOGICAL array, dimension (N) */
+/* Not referenced if SORT = 'N'. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, and i is */
+/* <= N: the QR algorithm failed to compute all the */
+/* eigenvalues; elements 1:ILO-1 and i+1:N of WR and WI */
+/* contain those eigenvalues which have converged; if */
+/* JOBVS = 'V', VS contains the transformation which */
+/* reduces A to its partially converged Schur form. */
+/* = N+1: the eigenvalues could not be reordered because some */
+/* eigenvalues were too close to separate (the problem */
+/* is very ill-conditioned); */
+/* = N+2: after reordering, roundoff changed values of some */
+/* complex eigenvalues so that leading eigenvalues in */
+/* the Schur form no longer satisfy SELECT=.TRUE. This */
+/* could also be caused by underflow due to scaling. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --wr;
+ --wi;
+ vs_dim1 = *ldvs;
+ vs_offset = 1 + vs_dim1;
+ vs -= vs_offset;
+ --work;
+ --iwork;
+ --bwork;
+
+ /* Function Body */
+ *info = 0;
+ wantvs = lsame_(jobvs, "V");
+ wantst = lsame_(sort, "S");
+ wantsn = lsame_(sense, "N");
+ wantse = lsame_(sense, "E");
+ wantsv = lsame_(sense, "V");
+ wantsb = lsame_(sense, "B");
+ lquery = *lwork == -1 || *liwork == -1;
+ if (! wantvs && ! lsame_(jobvs, "N")) {
+ *info = -1;
+ } else if (! wantst && ! lsame_(sort, "N")) {
+ *info = -2;
+ } else if (! (wantsn || wantse || wantsv || wantsb) || ! wantst && !
+ wantsn) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldvs < 1 || wantvs && *ldvs < *n) {
+ *info = -12;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "RWorkspace:" describe the */
+/* minimal amount of real workspace needed at that point in the */
+/* code, as well as the preferred amount for good performance. */
+/* IWorkspace refers to integer workspace. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV. */
+/* HSWORK refers to the workspace preferred by SHSEQR, as */
+/* calculated below. HSWORK is computed assuming ILO=1 and IHI=N, */
+/* the worst case. */
+/* If SENSE = 'E', 'V' or 'B', then the amount of workspace needed */
+/* depends on SDIM, which is computed by the routine STRSEN later */
+/* in the code.) */
+
+ if (*info == 0) {
+ liwrk = 1;
+ if (*n == 0) {
+ minwrk = 1;
+ lwrk = 1;
+ } else {
+ maxwrk = (*n << 1) + *n * ilaenv_(&c__1, "SGEHRD", " ", n, &c__1,
+ n, &c__0);
+ minwrk = *n * 3;
+
+ shseqr_("S", jobvs, n, &c__1, n, &a[a_offset], lda, &wr[1], &wi[1]
+, &vs[vs_offset], ldvs, &work[1], &c_n1, &ieval);
+ hswork = work[1];
+
+ if (! wantvs) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + hswork;
+ maxwrk = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + (*n - 1) * ilaenv_(&c__1,
+ "SORGHR", " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + hswork;
+ maxwrk = max(i__1,i__2);
+ }
+ lwrk = maxwrk;
+ if (! wantsn) {
+/* Computing MAX */
+ i__1 = lwrk, i__2 = *n + *n * *n / 2;
+ lwrk = max(i__1,i__2);
+ }
+ if (wantsv || wantsb) {
+ liwrk = *n * *n / 4;
+ }
+ }
+ iwork[1] = liwrk;
+ work[1] = (real) lwrk;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -16;
+ } else if (*liwork < 1 && ! lquery) {
+ *info = -18;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEESX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *sdim = 0;
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = slange_("M", n, n, &a[a_offset], lda, dum);
+ scalea = FALSE_;
+ if (anrm > 0.f && anrm < smlnum) {
+ scalea = TRUE_;
+ cscale = smlnum;
+ } else if (anrm > bignum) {
+ scalea = TRUE_;
+ cscale = bignum;
+ }
+ if (scalea) {
+ slascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Permute the matrix to make it more nearly triangular */
+/* (RWorkspace: need N) */
+
+ ibal = 1;
+ sgebal_("P", n, &a[a_offset], lda, &ilo, &ihi, &work[ibal], &ierr);
+
+/* Reduce to upper Hessenberg form */
+/* (RWorkspace: need 3*N, prefer 2*N+N*NB) */
+
+ itau = *n + ibal;
+ iwrk = *n + itau;
+ i__1 = *lwork - iwrk + 1;
+ sgehrd_(n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1,
+ &ierr);
+
+ if (wantvs) {
+
+/* Copy Householder vectors to VS */
+
+ slacpy_("L", n, n, &a[a_offset], lda, &vs[vs_offset], ldvs)
+ ;
+
+/* Generate orthogonal matrix in VS */
+/* (RWorkspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+
+ i__1 = *lwork - iwrk + 1;
+ sorghr_(n, &ilo, &ihi, &vs[vs_offset], ldvs, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+ }
+
+ *sdim = 0;
+
+/* Perform QR iteration, accumulating Schur vectors in VS if desired */
+/* (RWorkspace: need N+1, prefer N+HSWORK (see comments) ) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ shseqr_("S", jobvs, n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &vs[
+ vs_offset], ldvs, &work[iwrk], &i__1, &ieval);
+ if (ieval > 0) {
+ *info = ieval;
+ }
+
+/* Sort eigenvalues if desired */
+
+ if (wantst && *info == 0) {
+ if (scalea) {
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, n, &c__1, &wr[1], n, &
+ ierr);
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, n, &c__1, &wi[1], n, &
+ ierr);
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ bwork[i__] = (*select)(&wr[i__], &wi[i__]);
+/* L10: */
+ }
+
+/* Reorder eigenvalues, transform Schur vectors, and compute */
+/* reciprocal condition numbers */
+/* (RWorkspace: if SENSE is not 'N', need N+2*SDIM*(N-SDIM) */
+/* otherwise, need N ) */
+/* (IWorkspace: if SENSE is 'V' or 'B', need SDIM*(N-SDIM) */
+/* otherwise, need 0 ) */
+
+ i__1 = *lwork - iwrk + 1;
+ strsen_(sense, jobvs, &bwork[1], n, &a[a_offset], lda, &vs[vs_offset],
+ ldvs, &wr[1], &wi[1], sdim, rconde, rcondv, &work[iwrk], &
+ i__1, &iwork[1], liwork, &icond);
+ if (! wantsn) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + (*sdim << 1) * (*n - *sdim);
+ maxwrk = max(i__1,i__2);
+ }
+ if (icond == -15) {
+
+/* Not enough real workspace */
+
+ *info = -16;
+ } else if (icond == -17) {
+
+/* Not enough integer workspace */
+
+ *info = -18;
+ } else if (icond > 0) {
+
+/* STRSEN failed to reorder or to restore standard Schur form */
+
+ *info = icond + *n;
+ }
+ }
+
+ if (wantvs) {
+
+/* Undo balancing */
+/* (RWorkspace: need N) */
+
+ sgebak_("P", "R", n, &ilo, &ihi, &work[ibal], n, &vs[vs_offset], ldvs,
+ &ierr);
+ }
+
+ if (scalea) {
+
+/* Undo scaling for the Schur form of A */
+
+ slascl_("H", &c__0, &c__0, &cscale, &anrm, n, n, &a[a_offset], lda, &
+ ierr);
+ i__1 = *lda + 1;
+ scopy_(n, &a[a_offset], &i__1, &wr[1], &c__1);
+ if ((wantsv || wantsb) && *info == 0) {
+ dum[0] = *rcondv;
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, &c__1, &c__1, dum, &
+ c__1, &ierr);
+ *rcondv = dum[0];
+ }
+ if (cscale == smlnum) {
+
+/* If scaling back towards underflow, adjust WI if an */
+/* offdiagonal element of a 2-by-2 block in the Schur form */
+/* underflows. */
+
+ if (ieval > 0) {
+ i1 = ieval + 1;
+ i2 = ihi - 1;
+ i__1 = ilo - 1;
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[
+ 1], n, &ierr);
+ } else if (wantst) {
+ i1 = 1;
+ i2 = *n - 1;
+ } else {
+ i1 = ilo;
+ i2 = ihi - 1;
+ }
+ inxt = i1 - 1;
+ i__1 = i2;
+ for (i__ = i1; i__ <= i__1; ++i__) {
+ if (i__ < inxt) {
+ goto L20;
+ }
+ if (wi[i__] == 0.f) {
+ inxt = i__ + 1;
+ } else {
+ if (a[i__ + 1 + i__ * a_dim1] == 0.f) {
+ wi[i__] = 0.f;
+ wi[i__ + 1] = 0.f;
+ } else if (a[i__ + 1 + i__ * a_dim1] != 0.f && a[i__ + (
+ i__ + 1) * a_dim1] == 0.f) {
+ wi[i__] = 0.f;
+ wi[i__ + 1] = 0.f;
+ if (i__ > 1) {
+ i__2 = i__ - 1;
+ sswap_(&i__2, &a[i__ * a_dim1 + 1], &c__1, &a[(
+ i__ + 1) * a_dim1 + 1], &c__1);
+ }
+ if (*n > i__ + 1) {
+ i__2 = *n - i__ - 1;
+ sswap_(&i__2, &a[i__ + (i__ + 2) * a_dim1], lda, &
+ a[i__ + 1 + (i__ + 2) * a_dim1], lda);
+ }
+ sswap_(n, &vs[i__ * vs_dim1 + 1], &c__1, &vs[(i__ + 1)
+ * vs_dim1 + 1], &c__1);
+ a[i__ + (i__ + 1) * a_dim1] = a[i__ + 1 + i__ *
+ a_dim1];
+ a[i__ + 1 + i__ * a_dim1] = 0.f;
+ }
+ inxt = i__ + 2;
+ }
+L20:
+ ;
+ }
+ }
+ i__1 = *n - ieval;
+/* Computing MAX */
+ i__3 = *n - ieval;
+ i__2 = max(i__3,1);
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[ieval +
+ 1], &i__2, &ierr);
+ }
+
+ if (wantst && *info == 0) {
+
+/* Check if reordering successful */
+
+ lastsl = TRUE_;
+ lst2sl = TRUE_;
+ *sdim = 0;
+ ip = 0;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ cursl = (*select)(&wr[i__], &wi[i__]);
+ if (wi[i__] == 0.f) {
+ if (cursl) {
+ ++(*sdim);
+ }
+ ip = 0;
+ if (cursl && ! lastsl) {
+ *info = *n + 2;
+ }
+ } else {
+ if (ip == 1) {
+
+/* Last eigenvalue of conjugate pair */
+
+ cursl = cursl || lastsl;
+ lastsl = cursl;
+ if (cursl) {
+ *sdim += 2;
+ }
+ ip = -1;
+ if (cursl && ! lst2sl) {
+ *info = *n + 2;
+ }
+ } else {
+
+/* First eigenvalue of conjugate pair */
+
+ ip = 1;
+ }
+ }
+ lst2sl = lastsl;
+ lastsl = cursl;
+/* L30: */
+ }
+ }
+
+ work[1] = (real) maxwrk;
+ if (wantsv || wantsb) {
+ iwork[1] = *sdim * (*n - *sdim);
+ } else {
+ iwork[1] = 1;
+ }
+
+ return 0;
+
+/* End of SGEESX */
+
+} /* sgeesx_ */
diff --git a/contrib/libs/clapack/sgeev.c b/contrib/libs/clapack/sgeev.c
new file mode 100644
index 0000000000..e4d639fd60
--- /dev/null
+++ b/contrib/libs/clapack/sgeev.c
@@ -0,0 +1,558 @@
+/* sgeev.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+
+/* Subroutine */ int sgeev_(char *jobvl, char *jobvr, integer *n, real *a,
+ integer *lda, real *wr, real *wi, real *vl, integer *ldvl, real *vr,
+ integer *ldvr, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2, i__3;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, k;
+ real r__, cs, sn;
+ integer ihi;
+ real scl;
+ integer ilo;
+ real dum[1], eps;
+ integer ibal;
+ char side[1];
+ real anrm;
+ integer ierr, itau, iwrk, nout;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *);
+ extern doublereal snrm2_(integer *, real *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ extern doublereal slapy2_(real *, real *);
+ extern /* Subroutine */ int slabad_(real *, real *);
+ logical scalea;
+ real cscale;
+ extern /* Subroutine */ int sgebak_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, integer *, integer *), sgebal_(char *, integer *, real *, integer *,
+ integer *, integer *, real *, integer *);
+ extern doublereal slamch_(char *), slange_(char *, integer *,
+ integer *, real *, integer *, real *);
+ extern /* Subroutine */ int sgehrd_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *), xerbla_(char
+ *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ logical select[1];
+ real bignum;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *), slartg_(real *, real *,
+ real *, real *, real *), sorghr_(integer *, integer *, integer *,
+ real *, integer *, real *, real *, integer *, integer *), shseqr_(
+ char *, char *, integer *, integer *, integer *, real *, integer *
+, real *, real *, real *, integer *, real *, integer *, integer *), strevc_(char *, char *, logical *, integer *,
+ real *, integer *, real *, integer *, real *, integer *, integer *
+, integer *, real *, integer *);
+ integer minwrk, maxwrk;
+ logical wantvl;
+ real smlnum;
+ integer hswork;
+ logical lquery, wantvr;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGEEV computes for an N-by-N real nonsymmetric matrix A, the */
+/* eigenvalues and, optionally, the left and/or right eigenvectors. */
+
+/* The right eigenvector v(j) of A satisfies */
+/* A * v(j) = lambda(j) * v(j) */
+/* where lambda(j) is its eigenvalue. */
+/* The left eigenvector u(j) of A satisfies */
+/* u(j)**H * A = lambda(j) * u(j)**H */
+/* where u(j)**H denotes the conjugate transpose of u(j). */
+
+/* The computed eigenvectors are normalized to have Euclidean norm */
+/* equal to 1 and largest component real. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVL (input) CHARACTER*1 */
+/* = 'N': left eigenvectors of A are not computed; */
+/* = 'V': left eigenvectors of A are computed. */
+
+/* JOBVR (input) CHARACTER*1 */
+/* = 'N': right eigenvectors of A are not computed; */
+/* = 'V': right eigenvectors of A are computed. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the N-by-N matrix A. */
+/* On exit, A has been overwritten. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* WR (output) REAL array, dimension (N) */
+/* WI (output) REAL array, dimension (N) */
+/* WR and WI contain the real and imaginary parts, */
+/* respectively, of the computed eigenvalues. Complex */
+/* conjugate pairs of eigenvalues appear consecutively */
+/* with the eigenvalue having the positive imaginary part */
+/* first. */
+
+/* VL (output) REAL array, dimension (LDVL,N) */
+/* If JOBVL = 'V', the left eigenvectors u(j) are stored one */
+/* after another in the columns of VL, in the same order */
+/* as their eigenvalues. */
+/* If JOBVL = 'N', VL is not referenced. */
+/* If the j-th eigenvalue is real, then u(j) = VL(:,j), */
+/* the j-th column of VL. */
+/* If the j-th and (j+1)-st eigenvalues form a complex */
+/* conjugate pair, then u(j) = VL(:,j) + i*VL(:,j+1) and */
+/* u(j+1) = VL(:,j) - i*VL(:,j+1). */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the array VL. LDVL >= 1; if */
+/* JOBVL = 'V', LDVL >= N. */
+
+/* VR (output) REAL array, dimension (LDVR,N) */
+/* If JOBVR = 'V', the right eigenvectors v(j) are stored one */
+/* after another in the columns of VR, in the same order */
+/* as their eigenvalues. */
+/* If JOBVR = 'N', VR is not referenced. */
+/* If the j-th eigenvalue is real, then v(j) = VR(:,j), */
+/* the j-th column of VR. */
+/* If the j-th and (j+1)-st eigenvalues form a complex */
+/* conjugate pair, then v(j) = VR(:,j) + i*VR(:,j+1) and */
+/* v(j+1) = VR(:,j) - i*VR(:,j+1). */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. LDVR >= 1; if */
+/* JOBVR = 'V', LDVR >= N. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,3*N), and */
+/* if JOBVL = 'V' or JOBVR = 'V', LWORK >= 4*N. For good */
+/* performance, LWORK must generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, the QR algorithm failed to compute all the */
+/* eigenvalues, and no eigenvectors have been computed; */
+/* elements i+1:N of WR and WI contain eigenvalues which */
+/* have converged. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --wr;
+ --wi;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ wantvl = lsame_(jobvl, "V");
+ wantvr = lsame_(jobvr, "V");
+ if (! wantvl && ! lsame_(jobvl, "N")) {
+ *info = -1;
+ } else if (! wantvr && ! lsame_(jobvr, "N")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldvl < 1 || wantvl && *ldvl < *n) {
+ *info = -9;
+ } else if (*ldvr < 1 || wantvr && *ldvr < *n) {
+ *info = -11;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV. */
+/* HSWORK refers to the workspace preferred by SHSEQR, as */
+/* calculated below. HSWORK is computed assuming ILO=1 and IHI=N, */
+/* the worst case.) */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ } else {
+ maxwrk = (*n << 1) + *n * ilaenv_(&c__1, "SGEHRD", " ", n, &c__1,
+ n, &c__0);
+ if (wantvl) {
+ minwrk = *n << 2;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + (*n - 1) * ilaenv_(&c__1,
+ "SORGHR", " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ shseqr_("S", "V", n, &c__1, n, &a[a_offset], lda, &wr[1], &wi[
+ 1], &vl[vl_offset], ldvl, &work[1], &c_n1, info);
+ hswork = work[1];
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = *
+ n + hswork;
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n << 2;
+ maxwrk = max(i__1,i__2);
+ } else if (wantvr) {
+ minwrk = *n << 2;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + (*n - 1) * ilaenv_(&c__1,
+ "SORGHR", " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ shseqr_("S", "V", n, &c__1, n, &a[a_offset], lda, &wr[1], &wi[
+ 1], &vr[vr_offset], ldvr, &work[1], &c_n1, info);
+ hswork = work[1];
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = *
+ n + hswork;
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n << 2;
+ maxwrk = max(i__1,i__2);
+ } else {
+ minwrk = *n * 3;
+ shseqr_("E", "N", n, &c__1, n, &a[a_offset], lda, &wr[1], &wi[
+ 1], &vr[vr_offset], ldvr, &work[1], &c_n1, info);
+ hswork = work[1];
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = *
+ n + hswork;
+ maxwrk = max(i__1,i__2);
+ }
+ maxwrk = max(maxwrk,minwrk);
+ }
+ work[1] = (real) maxwrk;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEEV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = slange_("M", n, n, &a[a_offset], lda, dum);
+ scalea = FALSE_;
+ if (anrm > 0.f && anrm < smlnum) {
+ scalea = TRUE_;
+ cscale = smlnum;
+ } else if (anrm > bignum) {
+ scalea = TRUE_;
+ cscale = bignum;
+ }
+ if (scalea) {
+ slascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Balance the matrix */
+/* (Workspace: need N) */
+
+ ibal = 1;
+ sgebal_("B", n, &a[a_offset], lda, &ilo, &ihi, &work[ibal], &ierr);
+
+/* Reduce to upper Hessenberg form */
+/* (Workspace: need 3*N, prefer 2*N+N*NB) */
+
+ itau = ibal + *n;
+ iwrk = itau + *n;
+ i__1 = *lwork - iwrk + 1;
+ sgehrd_(n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1,
+ &ierr);
+
+ if (wantvl) {
+
+/* Want left eigenvectors */
+/* Copy Householder vectors to VL */
+
+ *(unsigned char *)side = 'L';
+ slacpy_("L", n, n, &a[a_offset], lda, &vl[vl_offset], ldvl)
+ ;
+
+/* Generate orthogonal matrix in VL */
+/* (Workspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+
+ i__1 = *lwork - iwrk + 1;
+ sorghr_(n, &ilo, &ihi, &vl[vl_offset], ldvl, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+
+/* Perform QR iteration, accumulating Schur vectors in VL */
+/* (Workspace: need N+1, prefer N+HSWORK (see comments) ) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ shseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &
+ vl[vl_offset], ldvl, &work[iwrk], &i__1, info);
+
+ if (wantvr) {
+
+/* Want left and right eigenvectors */
+/* Copy Schur vectors to VR */
+
+ *(unsigned char *)side = 'B';
+ slacpy_("F", n, n, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr);
+ }
+
+ } else if (wantvr) {
+
+/* Want right eigenvectors */
+/* Copy Householder vectors to VR */
+
+ *(unsigned char *)side = 'R';
+ slacpy_("L", n, n, &a[a_offset], lda, &vr[vr_offset], ldvr)
+ ;
+
+/* Generate orthogonal matrix in VR */
+/* (Workspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+
+ i__1 = *lwork - iwrk + 1;
+ sorghr_(n, &ilo, &ihi, &vr[vr_offset], ldvr, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+
+/* Perform QR iteration, accumulating Schur vectors in VR */
+/* (Workspace: need N+1, prefer N+HSWORK (see comments) ) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ shseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &
+ vr[vr_offset], ldvr, &work[iwrk], &i__1, info);
+
+ } else {
+
+/* Compute eigenvalues only */
+/* (Workspace: need N+1, prefer N+HSWORK (see comments) ) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ shseqr_("E", "N", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &
+ vr[vr_offset], ldvr, &work[iwrk], &i__1, info);
+ }
+
+/* If INFO > 0 from SHSEQR, then quit */
+
+ if (*info > 0) {
+ goto L50;
+ }
+
+ if (wantvl || wantvr) {
+
+/* Compute left and/or right eigenvectors */
+/* (Workspace: need 4*N) */
+
+ strevc_(side, "B", select, n, &a[a_offset], lda, &vl[vl_offset], ldvl,
+ &vr[vr_offset], ldvr, n, &nout, &work[iwrk], &ierr);
+ }
+
+ if (wantvl) {
+
+/* Undo balancing of left eigenvectors */
+/* (Workspace: need N) */
+
+ sgebak_("B", "L", n, &ilo, &ihi, &work[ibal], n, &vl[vl_offset], ldvl,
+ &ierr);
+
+/* Normalize left eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (wi[i__] == 0.f) {
+ scl = 1.f / snrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1);
+ sscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1);
+ } else if (wi[i__] > 0.f) {
+ r__1 = snrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1);
+ r__2 = snrm2_(n, &vl[(i__ + 1) * vl_dim1 + 1], &c__1);
+ scl = 1.f / slapy2_(&r__1, &r__2);
+ sscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1);
+ sscal_(n, &scl, &vl[(i__ + 1) * vl_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+/* Computing 2nd power */
+ r__1 = vl[k + i__ * vl_dim1];
+/* Computing 2nd power */
+ r__2 = vl[k + (i__ + 1) * vl_dim1];
+ work[iwrk + k - 1] = r__1 * r__1 + r__2 * r__2;
+/* L10: */
+ }
+ k = isamax_(n, &work[iwrk], &c__1);
+ slartg_(&vl[k + i__ * vl_dim1], &vl[k + (i__ + 1) * vl_dim1],
+ &cs, &sn, &r__);
+ srot_(n, &vl[i__ * vl_dim1 + 1], &c__1, &vl[(i__ + 1) *
+ vl_dim1 + 1], &c__1, &cs, &sn);
+ vl[k + (i__ + 1) * vl_dim1] = 0.f;
+ }
+/* L20: */
+ }
+ }
+
+ if (wantvr) {
+
+/* Undo balancing of right eigenvectors */
+/* (Workspace: need N) */
+
+ sgebak_("B", "R", n, &ilo, &ihi, &work[ibal], n, &vr[vr_offset], ldvr,
+ &ierr);
+
+/* Normalize right eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (wi[i__] == 0.f) {
+ scl = 1.f / snrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1);
+ sscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1);
+ } else if (wi[i__] > 0.f) {
+ r__1 = snrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1);
+ r__2 = snrm2_(n, &vr[(i__ + 1) * vr_dim1 + 1], &c__1);
+ scl = 1.f / slapy2_(&r__1, &r__2);
+ sscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1);
+ sscal_(n, &scl, &vr[(i__ + 1) * vr_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+/* Computing 2nd power */
+ r__1 = vr[k + i__ * vr_dim1];
+/* Computing 2nd power */
+ r__2 = vr[k + (i__ + 1) * vr_dim1];
+ work[iwrk + k - 1] = r__1 * r__1 + r__2 * r__2;
+/* L30: */
+ }
+ k = isamax_(n, &work[iwrk], &c__1);
+ slartg_(&vr[k + i__ * vr_dim1], &vr[k + (i__ + 1) * vr_dim1],
+ &cs, &sn, &r__);
+ srot_(n, &vr[i__ * vr_dim1 + 1], &c__1, &vr[(i__ + 1) *
+ vr_dim1 + 1], &c__1, &cs, &sn);
+ vr[k + (i__ + 1) * vr_dim1] = 0.f;
+ }
+/* L40: */
+ }
+ }
+
+/* Undo scaling if necessary */
+
+L50:
+ if (scalea) {
+ i__1 = *n - *info;
+/* Computing MAX */
+ i__3 = *n - *info;
+ i__2 = max(i__3,1);
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wr[*info +
+ 1], &i__2, &ierr);
+ i__1 = *n - *info;
+/* Computing MAX */
+ i__3 = *n - *info;
+ i__2 = max(i__3,1);
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[*info +
+ 1], &i__2, &ierr);
+ if (*info > 0) {
+ i__1 = ilo - 1;
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wr[1],
+ n, &ierr);
+ i__1 = ilo - 1;
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[1],
+ n, &ierr);
+ }
+ }
+
+ work[1] = (real) maxwrk;
+ return 0;
+
+/* End of SGEEV */
+
+} /* sgeev_ */
diff --git a/contrib/libs/clapack/sgeevx.c b/contrib/libs/clapack/sgeevx.c
new file mode 100644
index 0000000000..2e94a12fec
--- /dev/null
+++ b/contrib/libs/clapack/sgeevx.c
@@ -0,0 +1,696 @@
+/* sgeevx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+
+/* Subroutine */ int sgeevx_(char *balanc, char *jobvl, char *jobvr, char *
+ sense, integer *n, real *a, integer *lda, real *wr, real *wi, real *
+ vl, integer *ldvl, real *vr, integer *ldvr, integer *ilo, integer *
+ ihi, real *scale, real *abnrm, real *rconde, real *rcondv, real *work,
+ integer *lwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2, i__3;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, k;
+ real r__, cs, sn;
+ char job[1];
+ real scl, dum[1], eps;
+ char side[1];
+ real anrm;
+ integer ierr, itau, iwrk, nout;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *);
+ extern doublereal snrm2_(integer *, real *, integer *);
+ integer icond;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ extern doublereal slapy2_(real *, real *);
+ extern /* Subroutine */ int slabad_(real *, real *);
+ logical scalea;
+ real cscale;
+ extern /* Subroutine */ int sgebak_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, integer *, integer *), sgebal_(char *, integer *, real *, integer *,
+ integer *, integer *, real *, integer *);
+ extern doublereal slamch_(char *), slange_(char *, integer *,
+ integer *, real *, integer *, real *);
+ extern /* Subroutine */ int sgehrd_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *), xerbla_(char
+ *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ logical select[1];
+ real bignum;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *), slartg_(real *, real *,
+ real *, real *, real *), sorghr_(integer *, integer *, integer *,
+ real *, integer *, real *, real *, integer *, integer *), shseqr_(
+ char *, char *, integer *, integer *, integer *, real *, integer *
+, real *, real *, real *, integer *, real *, integer *, integer *), strevc_(char *, char *, logical *, integer *,
+ real *, integer *, real *, integer *, real *, integer *, integer *
+, integer *, real *, integer *);
+ integer minwrk, maxwrk;
+ extern /* Subroutine */ int strsna_(char *, char *, logical *, integer *,
+ real *, integer *, real *, integer *, real *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *,
+ integer *);
+ logical wantvl, wntsnb;
+ integer hswork;
+ logical wntsne;
+ real smlnum;
+ logical lquery, wantvr, wntsnn, wntsnv;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGEEVX computes for an N-by-N real nonsymmetric matrix A, the */
+/* eigenvalues and, optionally, the left and/or right eigenvectors. */
+
+/* Optionally also, it computes a balancing transformation to improve */
+/* the conditioning of the eigenvalues and eigenvectors (ILO, IHI, */
+/* SCALE, and ABNRM), reciprocal condition numbers for the eigenvalues */
+/* (RCONDE), and reciprocal condition numbers for the right */
+/* eigenvectors (RCONDV). */
+
+/* The right eigenvector v(j) of A satisfies */
+/* A * v(j) = lambda(j) * v(j) */
+/* where lambda(j) is its eigenvalue. */
+/* The left eigenvector u(j) of A satisfies */
+/* u(j)**H * A = lambda(j) * u(j)**H */
+/* where u(j)**H denotes the conjugate transpose of u(j). */
+
+/* The computed eigenvectors are normalized to have Euclidean norm */
+/* equal to 1 and largest component real. */
+
+/* Balancing a matrix means permuting the rows and columns to make it */
+/* more nearly upper triangular, and applying a diagonal similarity */
+/* transformation D * A * D**(-1), where D is a diagonal matrix, to */
+/* make its rows and columns closer in norm and the condition numbers */
+/* of its eigenvalues and eigenvectors smaller. The computed */
+/* reciprocal condition numbers correspond to the balanced matrix. */
+/* Permuting rows and columns will not change the condition numbers */
+/* (in exact arithmetic) but diagonal scaling will. For further */
+/* explanation of balancing, see section 4.10.2 of the LAPACK */
+/* Users' Guide. */
+
+/* Arguments */
+/* ========= */
+
+/* BALANC (input) CHARACTER*1 */
+/* Indicates how the input matrix should be diagonally scaled */
+/* and/or permuted to improve the conditioning of its */
+/* eigenvalues. */
+/* = 'N': Do not diagonally scale or permute; */
+/* = 'P': Perform permutations to make the matrix more nearly */
+/* upper triangular. Do not diagonally scale; */
+/* = 'S': Diagonally scale the matrix, i.e. replace A by */
+/* D*A*D**(-1), where D is a diagonal matrix chosen */
+/* to make the rows and columns of A more equal in */
+/* norm. Do not permute; */
+/* = 'B': Both diagonally scale and permute A. */
+
+/* Computed reciprocal condition numbers will be for the matrix */
+/* after balancing and/or permuting. Permuting does not change */
+/* condition numbers (in exact arithmetic), but balancing does. */
+
+/* JOBVL (input) CHARACTER*1 */
+/* = 'N': left eigenvectors of A are not computed; */
+/* = 'V': left eigenvectors of A are computed. */
+/* If SENSE = 'E' or 'B', JOBVL must = 'V'. */
+
+/* JOBVR (input) CHARACTER*1 */
+/* = 'N': right eigenvectors of A are not computed; */
+/* = 'V': right eigenvectors of A are computed. */
+/* If SENSE = 'E' or 'B', JOBVR must = 'V'. */
+
+/* SENSE (input) CHARACTER*1 */
+/* Determines which reciprocal condition numbers are computed. */
+/* = 'N': None are computed; */
+/* = 'E': Computed for eigenvalues only; */
+/* = 'V': Computed for right eigenvectors only; */
+/* = 'B': Computed for eigenvalues and right eigenvectors. */
+
+/* If SENSE = 'E' or 'B', both left and right eigenvectors */
+/* must also be computed (JOBVL = 'V' and JOBVR = 'V'). */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the N-by-N matrix A. */
+/* On exit, A has been overwritten. If JOBVL = 'V' or */
+/* JOBVR = 'V', A contains the real Schur form of the balanced */
+/* version of the input matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* WR (output) REAL array, dimension (N) */
+/* WI (output) REAL array, dimension (N) */
+/* WR and WI contain the real and imaginary parts, */
+/* respectively, of the computed eigenvalues. Complex */
+/* conjugate pairs of eigenvalues will appear consecutively */
+/* with the eigenvalue having the positive imaginary part */
+/* first. */
+
+/* VL (output) REAL array, dimension (LDVL,N) */
+/* If JOBVL = 'V', the left eigenvectors u(j) are stored one */
+/* after another in the columns of VL, in the same order */
+/* as their eigenvalues. */
+/* If JOBVL = 'N', VL is not referenced. */
+/* If the j-th eigenvalue is real, then u(j) = VL(:,j), */
+/* the j-th column of VL. */
+/* If the j-th and (j+1)-st eigenvalues form a complex */
+/* conjugate pair, then u(j) = VL(:,j) + i*VL(:,j+1) and */
+/* u(j+1) = VL(:,j) - i*VL(:,j+1). */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the array VL. LDVL >= 1; if */
+/* JOBVL = 'V', LDVL >= N. */
+
+/* VR (output) REAL array, dimension (LDVR,N) */
+/* If JOBVR = 'V', the right eigenvectors v(j) are stored one */
+/* after another in the columns of VR, in the same order */
+/* as their eigenvalues. */
+/* If JOBVR = 'N', VR is not referenced. */
+/* If the j-th eigenvalue is real, then v(j) = VR(:,j), */
+/* the j-th column of VR. */
+/* If the j-th and (j+1)-st eigenvalues form a complex */
+/* conjugate pair, then v(j) = VR(:,j) + i*VR(:,j+1) and */
+/* v(j+1) = VR(:,j) - i*VR(:,j+1). */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. LDVR >= 1, and if */
+/* JOBVR = 'V', LDVR >= N. */
+
+/* ILO (output) INTEGER */
+/* IHI (output) INTEGER */
+/* ILO and IHI are integer values determined when A was */
+/* balanced. The balanced A(i,j) = 0 if I > J and */
+/* J = 1,...,ILO-1 or I = IHI+1,...,N. */
+
+/* SCALE (output) REAL array, dimension (N) */
+/* Details of the permutations and scaling factors applied */
+/* when balancing A. If P(j) is the index of the row and column */
+/* interchanged with row and column j, and D(j) is the scaling */
+/* factor applied to row and column j, then */
+/* SCALE(J) = P(J), for J = 1,...,ILO-1 */
+/* = D(J), for J = ILO,...,IHI */
+/* = P(J) for J = IHI+1,...,N. */
+/* The order in which the interchanges are made is N to IHI+1, */
+/* then 1 to ILO-1. */
+
+/* ABNRM (output) REAL */
+/* The one-norm of the balanced matrix (the maximum */
+/* of the sum of absolute values of elements of any column). */
+
+/* RCONDE (output) REAL array, dimension (N) */
+/* RCONDE(j) is the reciprocal condition number of the j-th */
+/* eigenvalue. */
+
+/* RCONDV (output) REAL array, dimension (N) */
+/* RCONDV(j) is the reciprocal condition number of the j-th */
+/* right eigenvector. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. If SENSE = 'N' or 'E', */
+/* LWORK >= max(1,2*N), and if JOBVL = 'V' or JOBVR = 'V', */
+/* LWORK >= 3*N. If SENSE = 'V' or 'B', LWORK >= N*(N+6). */
+/* For good performance, LWORK must generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace) INTEGER array, dimension (2*N-2) */
+/* If SENSE = 'N' or 'E', not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, the QR algorithm failed to compute all the */
+/* eigenvalues, and no eigenvectors or condition numbers */
+/* have been computed; elements 1:ILO-1 and i+1:N of WR */
+/* and WI contain eigenvalues which have converged. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --wr;
+ --wi;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --scale;
+ --rconde;
+ --rcondv;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ wantvl = lsame_(jobvl, "V");
+ wantvr = lsame_(jobvr, "V");
+ wntsnn = lsame_(sense, "N");
+ wntsne = lsame_(sense, "E");
+ wntsnv = lsame_(sense, "V");
+ wntsnb = lsame_(sense, "B");
+ if (! (lsame_(balanc, "N") || lsame_(balanc, "S") || lsame_(balanc, "P")
+ || lsame_(balanc, "B"))) {
+ *info = -1;
+ } else if (! wantvl && ! lsame_(jobvl, "N")) {
+ *info = -2;
+ } else if (! wantvr && ! lsame_(jobvr, "N")) {
+ *info = -3;
+ } else if (! (wntsnn || wntsne || wntsnb || wntsnv) || (wntsne || wntsnb)
+ && ! (wantvl && wantvr)) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldvl < 1 || wantvl && *ldvl < *n) {
+ *info = -11;
+ } else if (*ldvr < 1 || wantvr && *ldvr < *n) {
+ *info = -13;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV. */
+/* HSWORK refers to the workspace preferred by SHSEQR, as */
+/* calculated below. HSWORK is computed assuming ILO=1 and IHI=N, */
+/* the worst case.) */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ } else {
+ maxwrk = *n + *n * ilaenv_(&c__1, "SGEHRD", " ", n, &c__1, n, &
+ c__0);
+
+ if (wantvl) {
+ shseqr_("S", "V", n, &c__1, n, &a[a_offset], lda, &wr[1], &wi[
+ 1], &vl[vl_offset], ldvl, &work[1], &c_n1, info);
+ } else if (wantvr) {
+ shseqr_("S", "V", n, &c__1, n, &a[a_offset], lda, &wr[1], &wi[
+ 1], &vr[vr_offset], ldvr, &work[1], &c_n1, info);
+ } else {
+ if (wntsnn) {
+ shseqr_("E", "N", n, &c__1, n, &a[a_offset], lda, &wr[1],
+ &wi[1], &vr[vr_offset], ldvr, &work[1], &c_n1,
+ info);
+ } else {
+ shseqr_("S", "N", n, &c__1, n, &a[a_offset], lda, &wr[1],
+ &wi[1], &vr[vr_offset], ldvr, &work[1], &c_n1,
+ info);
+ }
+ }
+ hswork = work[1];
+
+ if (! wantvl && ! wantvr) {
+ minwrk = *n << 1;
+ if (! wntsnn) {
+/* Computing MAX */
+ i__1 = minwrk, i__2 = *n * *n + *n * 6;
+ minwrk = max(i__1,i__2);
+ }
+ maxwrk = max(maxwrk,hswork);
+ if (! wntsnn) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * *n + *n * 6;
+ maxwrk = max(i__1,i__2);
+ }
+ } else {
+ minwrk = *n * 3;
+ if (! wntsnn && ! wntsne) {
+/* Computing MAX */
+ i__1 = minwrk, i__2 = *n * *n + *n * 6;
+ minwrk = max(i__1,i__2);
+ }
+ maxwrk = max(maxwrk,hswork);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + (*n - 1) * ilaenv_(&c__1, "SORGHR",
+ " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ if (! wntsnn && ! wntsne) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * *n + *n * 6;
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3;
+ maxwrk = max(i__1,i__2);
+ }
+ maxwrk = max(maxwrk,minwrk);
+ }
+ work[1] = (real) maxwrk;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -21;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEEVX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ icond = 0;
+ anrm = slange_("M", n, n, &a[a_offset], lda, dum);
+ scalea = FALSE_;
+ if (anrm > 0.f && anrm < smlnum) {
+ scalea = TRUE_;
+ cscale = smlnum;
+ } else if (anrm > bignum) {
+ scalea = TRUE_;
+ cscale = bignum;
+ }
+ if (scalea) {
+ slascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Balance the matrix and compute ABNRM */
+
+ sgebal_(balanc, n, &a[a_offset], lda, ilo, ihi, &scale[1], &ierr);
+ *abnrm = slange_("1", n, n, &a[a_offset], lda, dum);
+ if (scalea) {
+ dum[0] = *abnrm;
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, &c__1, &c__1, dum, &c__1, &
+ ierr);
+ *abnrm = dum[0];
+ }
+
+/* Reduce to upper Hessenberg form */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ itau = 1;
+ iwrk = itau + *n;
+ i__1 = *lwork - iwrk + 1;
+ sgehrd_(n, ilo, ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1, &
+ ierr);
+
+ if (wantvl) {
+
+/* Want left eigenvectors */
+/* Copy Householder vectors to VL */
+
+ *(unsigned char *)side = 'L';
+ slacpy_("L", n, n, &a[a_offset], lda, &vl[vl_offset], ldvl)
+ ;
+
+/* Generate orthogonal matrix in VL */
+/* (Workspace: need 2*N-1, prefer N+(N-1)*NB) */
+
+ i__1 = *lwork - iwrk + 1;
+ sorghr_(n, ilo, ihi, &vl[vl_offset], ldvl, &work[itau], &work[iwrk], &
+ i__1, &ierr);
+
+/* Perform QR iteration, accumulating Schur vectors in VL */
+/* (Workspace: need 1, prefer HSWORK (see comments) ) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ shseqr_("S", "V", n, ilo, ihi, &a[a_offset], lda, &wr[1], &wi[1], &vl[
+ vl_offset], ldvl, &work[iwrk], &i__1, info);
+
+ if (wantvr) {
+
+/* Want left and right eigenvectors */
+/* Copy Schur vectors to VR */
+
+ *(unsigned char *)side = 'B';
+ slacpy_("F", n, n, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr);
+ }
+
+ } else if (wantvr) {
+
+/* Want right eigenvectors */
+/* Copy Householder vectors to VR */
+
+ *(unsigned char *)side = 'R';
+ slacpy_("L", n, n, &a[a_offset], lda, &vr[vr_offset], ldvr)
+ ;
+
+/* Generate orthogonal matrix in VR */
+/* (Workspace: need 2*N-1, prefer N+(N-1)*NB) */
+
+ i__1 = *lwork - iwrk + 1;
+ sorghr_(n, ilo, ihi, &vr[vr_offset], ldvr, &work[itau], &work[iwrk], &
+ i__1, &ierr);
+
+/* Perform QR iteration, accumulating Schur vectors in VR */
+/* (Workspace: need 1, prefer HSWORK (see comments) ) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ shseqr_("S", "V", n, ilo, ihi, &a[a_offset], lda, &wr[1], &wi[1], &vr[
+ vr_offset], ldvr, &work[iwrk], &i__1, info);
+
+ } else {
+
+/* Compute eigenvalues only */
+/* If condition numbers desired, compute Schur form */
+
+ if (wntsnn) {
+ *(unsigned char *)job = 'E';
+ } else {
+ *(unsigned char *)job = 'S';
+ }
+
+/* (Workspace: need 1, prefer HSWORK (see comments) ) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ shseqr_(job, "N", n, ilo, ihi, &a[a_offset], lda, &wr[1], &wi[1], &vr[
+ vr_offset], ldvr, &work[iwrk], &i__1, info);
+ }
+
+/* If INFO > 0 from SHSEQR, then quit */
+
+ if (*info > 0) {
+ goto L50;
+ }
+
+ if (wantvl || wantvr) {
+
+/* Compute left and/or right eigenvectors */
+/* (Workspace: need 3*N) */
+
+ strevc_(side, "B", select, n, &a[a_offset], lda, &vl[vl_offset], ldvl,
+ &vr[vr_offset], ldvr, n, &nout, &work[iwrk], &ierr);
+ }
+
+/* Compute condition numbers if desired */
+/* (Workspace: need N*N+6*N unless SENSE = 'E') */
+
+ if (! wntsnn) {
+ strsna_(sense, "A", select, n, &a[a_offset], lda, &vl[vl_offset],
+ ldvl, &vr[vr_offset], ldvr, &rconde[1], &rcondv[1], n, &nout,
+ &work[iwrk], n, &iwork[1], &icond);
+ }
+
+ if (wantvl) {
+
+/* Undo balancing of left eigenvectors */
+
+ sgebak_(balanc, "L", n, ilo, ihi, &scale[1], n, &vl[vl_offset], ldvl,
+ &ierr);
+
+/* Normalize left eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (wi[i__] == 0.f) {
+ scl = 1.f / snrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1);
+ sscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1);
+ } else if (wi[i__] > 0.f) {
+ r__1 = snrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1);
+ r__2 = snrm2_(n, &vl[(i__ + 1) * vl_dim1 + 1], &c__1);
+ scl = 1.f / slapy2_(&r__1, &r__2);
+ sscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1);
+ sscal_(n, &scl, &vl[(i__ + 1) * vl_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+/* Computing 2nd power */
+ r__1 = vl[k + i__ * vl_dim1];
+/* Computing 2nd power */
+ r__2 = vl[k + (i__ + 1) * vl_dim1];
+ work[k] = r__1 * r__1 + r__2 * r__2;
+/* L10: */
+ }
+ k = isamax_(n, &work[1], &c__1);
+ slartg_(&vl[k + i__ * vl_dim1], &vl[k + (i__ + 1) * vl_dim1],
+ &cs, &sn, &r__);
+ srot_(n, &vl[i__ * vl_dim1 + 1], &c__1, &vl[(i__ + 1) *
+ vl_dim1 + 1], &c__1, &cs, &sn);
+ vl[k + (i__ + 1) * vl_dim1] = 0.f;
+ }
+/* L20: */
+ }
+ }
+
+ if (wantvr) {
+
+/* Undo balancing of right eigenvectors */
+
+ sgebak_(balanc, "R", n, ilo, ihi, &scale[1], n, &vr[vr_offset], ldvr,
+ &ierr);
+
+/* Normalize right eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (wi[i__] == 0.f) {
+ scl = 1.f / snrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1);
+ sscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1);
+ } else if (wi[i__] > 0.f) {
+ r__1 = snrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1);
+ r__2 = snrm2_(n, &vr[(i__ + 1) * vr_dim1 + 1], &c__1);
+ scl = 1.f / slapy2_(&r__1, &r__2);
+ sscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1);
+ sscal_(n, &scl, &vr[(i__ + 1) * vr_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+/* Computing 2nd power */
+ r__1 = vr[k + i__ * vr_dim1];
+/* Computing 2nd power */
+ r__2 = vr[k + (i__ + 1) * vr_dim1];
+ work[k] = r__1 * r__1 + r__2 * r__2;
+/* L30: */
+ }
+ k = isamax_(n, &work[1], &c__1);
+ slartg_(&vr[k + i__ * vr_dim1], &vr[k + (i__ + 1) * vr_dim1],
+ &cs, &sn, &r__);
+ srot_(n, &vr[i__ * vr_dim1 + 1], &c__1, &vr[(i__ + 1) *
+ vr_dim1 + 1], &c__1, &cs, &sn);
+ vr[k + (i__ + 1) * vr_dim1] = 0.f;
+ }
+/* L40: */
+ }
+ }
+
+/* Undo scaling if necessary */
+
+L50:
+ if (scalea) {
+ i__1 = *n - *info;
+/* Computing MAX */
+ i__3 = *n - *info;
+ i__2 = max(i__3,1);
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wr[*info +
+ 1], &i__2, &ierr);
+ i__1 = *n - *info;
+/* Computing MAX */
+ i__3 = *n - *info;
+ i__2 = max(i__3,1);
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[*info +
+ 1], &i__2, &ierr);
+ if (*info == 0) {
+ if ((wntsnv || wntsnb) && icond == 0) {
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, n, &c__1, &rcondv[
+ 1], n, &ierr);
+ }
+ } else {
+ i__1 = *ilo - 1;
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wr[1],
+ n, &ierr);
+ i__1 = *ilo - 1;
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[1],
+ n, &ierr);
+ }
+ }
+
+ work[1] = (real) maxwrk;
+ return 0;
+
+/* End of SGEEVX */
+
+} /* sgeevx_ */
diff --git a/contrib/libs/clapack/sgegs.c b/contrib/libs/clapack/sgegs.c
new file mode 100644
index 0000000000..86719621d4
--- /dev/null
+++ b/contrib/libs/clapack/sgegs.c
@@ -0,0 +1,545 @@
+/* sgegs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static real c_b36 = 0.f;
+static real c_b37 = 1.f;
+
+/* Subroutine */ int sgegs_(char *jobvsl, char *jobvsr, integer *n, real *a,
+ integer *lda, real *b, integer *ldb, real *alphar, real *alphai, real
+ *beta, real *vsl, integer *ldvsl, real *vsr, integer *ldvsr, real *
+ work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vsl_dim1, vsl_offset,
+ vsr_dim1, vsr_offset, i__1, i__2;
+
+ /* Local variables */
+ integer nb, nb1, nb2, nb3, ihi, ilo;
+ real eps, anrm, bnrm;
+ integer itau, lopt;
+ extern logical lsame_(char *, char *);
+ integer ileft, iinfo, icols;
+ logical ilvsl;
+ integer iwork;
+ logical ilvsr;
+ integer irows;
+ extern /* Subroutine */ int sggbak_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, integer *
+), sggbal_(char *, integer *, real *, integer *,
+ real *, integer *, integer *, integer *, real *, real *, real *,
+ integer *);
+ logical ilascl, ilbscl;
+ extern doublereal slamch_(char *), slange_(char *, integer *,
+ integer *, real *, integer *, real *);
+ real safmin;
+ extern /* Subroutine */ int sgghrd_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, integer *, real *, integer *
+, real *, integer *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ real bignum;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ integer ijobvl, iright;
+ extern /* Subroutine */ int sgeqrf_(integer *, integer *, real *, integer
+ *, real *, real *, integer *, integer *);
+ integer ijobvr;
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *), slaset_(char *, integer *,
+ integer *, real *, real *, real *, integer *);
+ real anrmto;
+ integer lwkmin;
+ real bnrmto;
+ extern /* Subroutine */ int shgeqz_(char *, char *, char *, integer *,
+ integer *, integer *, real *, integer *, real *, integer *, real *
+, real *, real *, real *, integer *, real *, integer *, real *,
+ integer *, integer *);
+ real smlnum;
+ extern /* Subroutine */ int sorgqr_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int sormqr_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is deprecated and has been replaced by routine SGGES. */
+
+/* SGEGS computes the eigenvalues, real Schur form, and, optionally, */
+/* left and or/right Schur vectors of a real matrix pair (A,B). */
+/* Given two square matrices A and B, the generalized real Schur */
+/* factorization has the form */
+
+/* A = Q*S*Z**T, B = Q*T*Z**T */
+
+/* where Q and Z are orthogonal matrices, T is upper triangular, and S */
+/* is an upper quasi-triangular matrix with 1-by-1 and 2-by-2 diagonal */
+/* blocks, the 2-by-2 blocks corresponding to complex conjugate pairs */
+/* of eigenvalues of (A,B). The columns of Q are the left Schur vectors */
+/* and the columns of Z are the right Schur vectors. */
+
+/* If only the eigenvalues of (A,B) are needed, the driver routine */
+/* SGEGV should be used instead. See SGEGV for a description of the */
+/* eigenvalues of the generalized nonsymmetric eigenvalue problem */
+/* (GNEP). */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVSL (input) CHARACTER*1 */
+/* = 'N': do not compute the left Schur vectors; */
+/* = 'V': compute the left Schur vectors (returned in VSL). */
+
+/* JOBVSR (input) CHARACTER*1 */
+/* = 'N': do not compute the right Schur vectors; */
+/* = 'V': compute the right Schur vectors (returned in VSR). */
+
+/* N (input) INTEGER */
+/* The order of the matrices A, B, VSL, and VSR. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA, N) */
+/* On entry, the matrix A. */
+/* On exit, the upper quasi-triangular matrix S from the */
+/* generalized real Schur factorization. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. LDA >= max(1,N). */
+
+/* B (input/output) REAL array, dimension (LDB, N) */
+/* On entry, the matrix B. */
+/* On exit, the upper triangular matrix T from the generalized */
+/* real Schur factorization. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB >= max(1,N). */
+
+/* ALPHAR (output) REAL array, dimension (N) */
+/* The real parts of each scalar alpha defining an eigenvalue */
+/* of GNEP. */
+
+/* ALPHAI (output) REAL array, dimension (N) */
+/* The imaginary parts of each scalar alpha defining an */
+/* eigenvalue of GNEP. If ALPHAI(j) is zero, then the j-th */
+/* eigenvalue is real; if positive, then the j-th and (j+1)-st */
+/* eigenvalues are a complex conjugate pair, with */
+/* ALPHAI(j+1) = -ALPHAI(j). */
+
+/* BETA (output) REAL array, dimension (N) */
+/* The scalars beta that define the eigenvalues of GNEP. */
+/* Together, the quantities alpha = (ALPHAR(j),ALPHAI(j)) and */
+/* beta = BETA(j) represent the j-th eigenvalue of the matrix */
+/* pair (A,B), in one of the forms lambda = alpha/beta or */
+/* mu = beta/alpha. Since either lambda or mu may overflow, */
+/* they should not, in general, be computed. */
+
+/* VSL (output) REAL array, dimension (LDVSL,N) */
+/* If JOBVSL = 'V', the matrix of left Schur vectors Q. */
+/* Not referenced if JOBVSL = 'N'. */
+
+/* LDVSL (input) INTEGER */
+/* The leading dimension of the matrix VSL. LDVSL >=1, and */
+/* if JOBVSL = 'V', LDVSL >= N. */
+
+/* VSR (output) REAL array, dimension (LDVSR,N) */
+/* If JOBVSR = 'V', the matrix of right Schur vectors Z. */
+/* Not referenced if JOBVSR = 'N'. */
+
+/* LDVSR (input) INTEGER */
+/* The leading dimension of the matrix VSR. LDVSR >= 1, and */
+/* if JOBVSR = 'V', LDVSR >= N. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,4*N). */
+/* For good performance, LWORK must generally be larger. */
+/* To compute the optimal value of LWORK, call ILAENV to get */
+/* blocksizes (for SGEQRF, SORMQR, and SORGQR.) Then compute: */
+/* NB -- MAX of the blocksizes for SGEQRF, SORMQR, and SORGQR */
+/* The optimal LWORK is 2*N + N*(NB+1). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1,...,N: */
+/* The QZ iteration failed. (A,B) are not in Schur */
+/* form, but ALPHAR(j), ALPHAI(j), and BETA(j) should */
+/* be correct for j=INFO+1,...,N. */
+/* > N: errors that usually indicate LAPACK problems: */
+/* =N+1: error return from SGGBAL */
+/* =N+2: error return from SGEQRF */
+/* =N+3: error return from SORMQR */
+/* =N+4: error return from SORGQR */
+/* =N+5: error return from SGGHRD */
+/* =N+6: error return from SHGEQZ (other than failed */
+/* iteration) */
+/* =N+7: error return from SGGBAK (computing VSL) */
+/* =N+8: error return from SGGBAK (computing VSR) */
+/* =N+9: error return from SLASCL (various places) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alphar;
+ --alphai;
+ --beta;
+ vsl_dim1 = *ldvsl;
+ vsl_offset = 1 + vsl_dim1;
+ vsl -= vsl_offset;
+ vsr_dim1 = *ldvsr;
+ vsr_offset = 1 + vsr_dim1;
+ vsr -= vsr_offset;
+ --work;
+
+ /* Function Body */
+ if (lsame_(jobvsl, "N")) {
+ ijobvl = 1;
+ ilvsl = FALSE_;
+ } else if (lsame_(jobvsl, "V")) {
+ ijobvl = 2;
+ ilvsl = TRUE_;
+ } else {
+ ijobvl = -1;
+ ilvsl = FALSE_;
+ }
+
+ if (lsame_(jobvsr, "N")) {
+ ijobvr = 1;
+ ilvsr = FALSE_;
+ } else if (lsame_(jobvsr, "V")) {
+ ijobvr = 2;
+ ilvsr = TRUE_;
+ } else {
+ ijobvr = -1;
+ ilvsr = FALSE_;
+ }
+
+/* Test the input arguments */
+
+/* Computing MAX */
+ i__1 = *n << 2;
+ lwkmin = max(i__1,1);
+ lwkopt = lwkmin;
+ work[1] = (real) lwkopt;
+ lquery = *lwork == -1;
+ *info = 0;
+ if (ijobvl <= 0) {
+ *info = -1;
+ } else if (ijobvr <= 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ } else if (*ldvsl < 1 || ilvsl && *ldvsl < *n) {
+ *info = -12;
+ } else if (*ldvsr < 1 || ilvsr && *ldvsr < *n) {
+ *info = -14;
+ } else if (*lwork < lwkmin && ! lquery) {
+ *info = -16;
+ }
+
+ if (*info == 0) {
+ nb1 = ilaenv_(&c__1, "SGEQRF", " ", n, n, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "SORMQR", " ", n, n, n, &c_n1);
+ nb3 = ilaenv_(&c__1, "SORGQR", " ", n, n, n, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2);
+ nb = max(i__1,nb3);
+ lopt = (*n << 1) + *n * (nb + 1);
+ work[1] = (real) lopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEGS ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("E") * slamch_("B");
+ safmin = slamch_("S");
+ smlnum = *n * safmin / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = slange_("M", n, n, &a[a_offset], lda, &work[1]);
+ ilascl = FALSE_;
+ if (anrm > 0.f && anrm < smlnum) {
+ anrmto = smlnum;
+ ilascl = TRUE_;
+ } else if (anrm > bignum) {
+ anrmto = bignum;
+ ilascl = TRUE_;
+ }
+
+ if (ilascl) {
+ slascl_("G", &c_n1, &c_n1, &anrm, &anrmto, n, n, &a[a_offset], lda, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ }
+
+/* Scale B if max element outside range [SMLNUM,BIGNUM] */
+
+ bnrm = slange_("M", n, n, &b[b_offset], ldb, &work[1]);
+ ilbscl = FALSE_;
+ if (bnrm > 0.f && bnrm < smlnum) {
+ bnrmto = smlnum;
+ ilbscl = TRUE_;
+ } else if (bnrm > bignum) {
+ bnrmto = bignum;
+ ilbscl = TRUE_;
+ }
+
+ if (ilbscl) {
+ slascl_("G", &c_n1, &c_n1, &bnrm, &bnrmto, n, n, &b[b_offset], ldb, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ }
+
+/* Permute the matrix to make it more nearly triangular */
+/* Workspace layout: (2*N words -- "work..." not actually used) */
+/* left_permutation, right_permutation, work... */
+
+ ileft = 1;
+ iright = *n + 1;
+ iwork = iright + *n;
+ sggbal_("P", n, &a[a_offset], lda, &b[b_offset], ldb, &ilo, &ihi, &work[
+ ileft], &work[iright], &work[iwork], &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 1;
+ goto L10;
+ }
+
+/* Reduce B to triangular form, and initialize VSL and/or VSR */
+/* Workspace layout: ("work..." must have at least N words) */
+/* left_permutation, right_permutation, tau, work... */
+
+ irows = ihi + 1 - ilo;
+ icols = *n + 1 - ilo;
+ itau = iwork;
+ iwork = itau + irows;
+ i__1 = *lwork + 1 - iwork;
+ sgeqrf_(&irows, &icols, &b[ilo + ilo * b_dim1], ldb, &work[itau], &work[
+ iwork], &i__1, &iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = (integer) work[iwork] + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ *info = *n + 2;
+ goto L10;
+ }
+
+ i__1 = *lwork + 1 - iwork;
+ sormqr_("L", "T", &irows, &icols, &irows, &b[ilo + ilo * b_dim1], ldb, &
+ work[itau], &a[ilo + ilo * a_dim1], lda, &work[iwork], &i__1, &
+ iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = (integer) work[iwork] + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ *info = *n + 3;
+ goto L10;
+ }
+
+ if (ilvsl) {
+ slaset_("Full", n, n, &c_b36, &c_b37, &vsl[vsl_offset], ldvsl);
+ i__1 = irows - 1;
+ i__2 = irows - 1;
+ slacpy_("L", &i__1, &i__2, &b[ilo + 1 + ilo * b_dim1], ldb, &vsl[ilo
+ + 1 + ilo * vsl_dim1], ldvsl);
+ i__1 = *lwork + 1 - iwork;
+ sorgqr_(&irows, &irows, &irows, &vsl[ilo + ilo * vsl_dim1], ldvsl, &
+ work[itau], &work[iwork], &i__1, &iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = (integer) work[iwork] + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ *info = *n + 4;
+ goto L10;
+ }
+ }
+
+ if (ilvsr) {
+ slaset_("Full", n, n, &c_b36, &c_b37, &vsr[vsr_offset], ldvsr);
+ }
+
+/* Reduce to generalized Hessenberg form */
+
+ sgghrd_(jobvsl, jobvsr, n, &ilo, &ihi, &a[a_offset], lda, &b[b_offset],
+ ldb, &vsl[vsl_offset], ldvsl, &vsr[vsr_offset], ldvsr, &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 5;
+ goto L10;
+ }
+
+/* Perform QZ algorithm, computing Schur vectors if desired */
+/* Workspace layout: ("work..." must have at least 1 word) */
+/* left_permutation, right_permutation, work... */
+
+ iwork = itau;
+ i__1 = *lwork + 1 - iwork;
+ shgeqz_("S", jobvsl, jobvsr, n, &ilo, &ihi, &a[a_offset], lda, &b[
+ b_offset], ldb, &alphar[1], &alphai[1], &beta[1], &vsl[vsl_offset]
+, ldvsl, &vsr[vsr_offset], ldvsr, &work[iwork], &i__1, &iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = (integer) work[iwork] + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ if (iinfo > 0 && iinfo <= *n) {
+ *info = iinfo;
+ } else if (iinfo > *n && iinfo <= *n << 1) {
+ *info = iinfo - *n;
+ } else {
+ *info = *n + 6;
+ }
+ goto L10;
+ }
+
+/* Apply permutation to VSL and VSR */
+
+ if (ilvsl) {
+ sggbak_("P", "L", n, &ilo, &ihi, &work[ileft], &work[iright], n, &vsl[
+ vsl_offset], ldvsl, &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 7;
+ goto L10;
+ }
+ }
+ if (ilvsr) {
+ sggbak_("P", "R", n, &ilo, &ihi, &work[ileft], &work[iright], n, &vsr[
+ vsr_offset], ldvsr, &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 8;
+ goto L10;
+ }
+ }
+
+/* Undo scaling */
+
+ if (ilascl) {
+ slascl_("H", &c_n1, &c_n1, &anrmto, &anrm, n, n, &a[a_offset], lda, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ slascl_("G", &c_n1, &c_n1, &anrmto, &anrm, n, &c__1, &alphar[1], n, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ slascl_("G", &c_n1, &c_n1, &anrmto, &anrm, n, &c__1, &alphai[1], n, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ }
+
+ if (ilbscl) {
+ slascl_("U", &c_n1, &c_n1, &bnrmto, &bnrm, n, n, &b[b_offset], ldb, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ slascl_("G", &c_n1, &c_n1, &bnrmto, &bnrm, n, &c__1, &beta[1], n, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ }
+
+L10:
+ work[1] = (real) lwkopt;
+
+ return 0;
+
+/* End of SGEGS */
+
+} /* sgegs_ */
diff --git a/contrib/libs/clapack/sgegv.c b/contrib/libs/clapack/sgegv.c
new file mode 100644
index 0000000000..34704c6e0b
--- /dev/null
+++ b/contrib/libs/clapack/sgegv.c
@@ -0,0 +1,837 @@
+/* sgegv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static real c_b27 = 1.f;
+static real c_b38 = 0.f;
+
+/* Subroutine */ int sgegv_(char *jobvl, char *jobvr, integer *n, real *a,
+ integer *lda, real *b, integer *ldb, real *alphar, real *alphai, real
+ *beta, real *vl, integer *ldvl, real *vr, integer *ldvr, real *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vl_dim1, vl_offset, vr_dim1,
+ vr_offset, i__1, i__2;
+ real r__1, r__2, r__3, r__4;
+
+ /* Local variables */
+ integer jc, nb, in, jr, nb1, nb2, nb3, ihi, ilo;
+ real eps;
+ logical ilv;
+ real absb, anrm, bnrm;
+ integer itau;
+ real temp;
+ logical ilvl, ilvr;
+ integer lopt;
+ real anrm1, anrm2, bnrm1, bnrm2, absai, scale, absar, sbeta;
+ extern logical lsame_(char *, char *);
+ integer ileft, iinfo, icols, iwork, irows;
+ real salfai;
+ extern /* Subroutine */ int sggbak_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, integer *
+), sggbal_(char *, integer *, real *, integer *,
+ real *, integer *, integer *, integer *, real *, real *, real *,
+ integer *);
+ real salfar;
+ extern doublereal slamch_(char *), slange_(char *, integer *,
+ integer *, real *, integer *, real *);
+ real safmin;
+ extern /* Subroutine */ int sgghrd_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, integer *, real *, integer *
+, real *, integer *, integer *);
+ real safmax;
+ char chtemp[1];
+ logical ldumma[1];
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ijobvl, iright;
+ logical ilimit;
+ extern /* Subroutine */ int sgeqrf_(integer *, integer *, real *, integer
+ *, real *, real *, integer *, integer *);
+ integer ijobvr;
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *), slaset_(char *, integer *,
+ integer *, real *, real *, real *, integer *), stgevc_(
+ char *, char *, logical *, integer *, real *, integer *, real *,
+ integer *, real *, integer *, real *, integer *, integer *,
+ integer *, real *, integer *);
+ real onepls;
+ integer lwkmin;
+ extern /* Subroutine */ int shgeqz_(char *, char *, char *, integer *,
+ integer *, integer *, real *, integer *, real *, integer *, real *
+, real *, real *, real *, integer *, real *, integer *, real *,
+ integer *, integer *), sorgqr_(integer *,
+ integer *, integer *, real *, integer *, real *, real *, integer *
+, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int sormqr_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is deprecated and has been replaced by routine SGGEV. */
+
+/* SGEGV computes the eigenvalues and, optionally, the left and/or right */
+/* eigenvectors of a real matrix pair (A,B). */
+/* Given two square matrices A and B, */
+/* the generalized nonsymmetric eigenvalue problem (GNEP) is to find the */
+/* eigenvalues lambda and corresponding (non-zero) eigenvectors x such */
+/* that */
+
+/* A*x = lambda*B*x. */
+
+/* An alternate form is to find the eigenvalues mu and corresponding */
+/* eigenvectors y such that */
+
+/* mu*A*y = B*y. */
+
+/* These two forms are equivalent with mu = 1/lambda and x = y if */
+/* neither lambda nor mu is zero. In order to deal with the case that */
+/* lambda or mu is zero or small, two values alpha and beta are returned */
+/* for each eigenvalue, such that lambda = alpha/beta and */
+/* mu = beta/alpha. */
+
+/* The vectors x and y in the above equations are right eigenvectors of */
+/* the matrix pair (A,B). Vectors u and v satisfying */
+
+/* u**H*A = lambda*u**H*B or mu*v**H*A = v**H*B */
+
+/* are left eigenvectors of (A,B). */
+
+/* Note: this routine performs "full balancing" on A and B -- see */
+/* "Further Details", below. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVL (input) CHARACTER*1 */
+/* = 'N': do not compute the left generalized eigenvectors; */
+/* = 'V': compute the left generalized eigenvectors (returned */
+/* in VL). */
+
+/* JOBVR (input) CHARACTER*1 */
+/* = 'N': do not compute the right generalized eigenvectors; */
+/* = 'V': compute the right generalized eigenvectors (returned */
+/* in VR). */
+
+/* N (input) INTEGER */
+/* The order of the matrices A, B, VL, and VR. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA, N) */
+/* On entry, the matrix A. */
+/* If JOBVL = 'V' or JOBVR = 'V', then on exit A */
+/* contains the real Schur form of A from the generalized Schur */
+/* factorization of the pair (A,B) after balancing. */
+/* If no eigenvectors were computed, then only the diagonal */
+/* blocks from the Schur form will be correct. See SGGHRD and */
+/* SHGEQZ for details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. LDA >= max(1,N). */
+
+/* B (input/output) REAL array, dimension (LDB, N) */
+/* On entry, the matrix B. */
+/* If JOBVL = 'V' or JOBVR = 'V', then on exit B contains the */
+/* upper triangular matrix obtained from B in the generalized */
+/* Schur factorization of the pair (A,B) after balancing. */
+/* If no eigenvectors were computed, then only those elements of */
+/* B corresponding to the diagonal blocks from the Schur form of */
+/* A will be correct. See SGGHRD and SHGEQZ for details. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB >= max(1,N). */
+
+/* ALPHAR (output) REAL array, dimension (N) */
+/* The real parts of each scalar alpha defining an eigenvalue of */
+/* GNEP. */
+
+/* ALPHAI (output) REAL array, dimension (N) */
+/* The imaginary parts of each scalar alpha defining an */
+/* eigenvalue of GNEP. If ALPHAI(j) is zero, then the j-th */
+/* eigenvalue is real; if positive, then the j-th and */
+/* (j+1)-st eigenvalues are a complex conjugate pair, with */
+/* ALPHAI(j+1) = -ALPHAI(j). */
+
+/* BETA (output) REAL array, dimension (N) */
+/* The scalars beta that define the eigenvalues of GNEP. */
+
+/* Together, the quantities alpha = (ALPHAR(j),ALPHAI(j)) and */
+/* beta = BETA(j) represent the j-th eigenvalue of the matrix */
+/* pair (A,B), in one of the forms lambda = alpha/beta or */
+/* mu = beta/alpha. Since either lambda or mu may overflow, */
+/* they should not, in general, be computed. */
+
+/* VL (output) REAL array, dimension (LDVL,N) */
+/* If JOBVL = 'V', the left eigenvectors u(j) are stored */
+/* in the columns of VL, in the same order as their eigenvalues. */
+/* If the j-th eigenvalue is real, then u(j) = VL(:,j). */
+/* If the j-th and (j+1)-st eigenvalues form a complex conjugate */
+/* pair, then */
+/* u(j) = VL(:,j) + i*VL(:,j+1) */
+/* and */
+/* u(j+1) = VL(:,j) - i*VL(:,j+1). */
+
+/* Each eigenvector is scaled so that its largest component has */
+/* abs(real part) + abs(imag. part) = 1, except for eigenvectors */
+/* corresponding to an eigenvalue with alpha = beta = 0, which */
+/* are set to zero. */
+/* Not referenced if JOBVL = 'N'. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the matrix VL. LDVL >= 1, and */
+/* if JOBVL = 'V', LDVL >= N. */
+
+/* VR (output) REAL array, dimension (LDVR,N) */
+/* If JOBVR = 'V', the right eigenvectors x(j) are stored */
+/* in the columns of VR, in the same order as their eigenvalues. */
+/* If the j-th eigenvalue is real, then x(j) = VR(:,j). */
+/* If the j-th and (j+1)-st eigenvalues form a complex conjugate */
+/* pair, then */
+/* x(j) = VR(:,j) + i*VR(:,j+1) */
+/* and */
+/* x(j+1) = VR(:,j) - i*VR(:,j+1). */
+
+/* Each eigenvector is scaled so that its largest component has */
+/* abs(real part) + abs(imag. part) = 1, except for eigenvalues */
+/* corresponding to an eigenvalue with alpha = beta = 0, which */
+/* are set to zero. */
+/* Not referenced if JOBVR = 'N'. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the matrix VR. LDVR >= 1, and */
+/* if JOBVR = 'V', LDVR >= N. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,8*N). */
+/* For good performance, LWORK must generally be larger. */
+/* To compute the optimal value of LWORK, call ILAENV to get */
+/* blocksizes (for SGEQRF, SORMQR, and SORGQR.) Then compute: */
+/* NB -- MAX of the blocksizes for SGEQRF, SORMQR, and SORGQR; */
+/* The optimal LWORK is: */
+/* 2*N + MAX( 6*N, N*(NB+1) ). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1,...,N: */
+/* The QZ iteration failed. No eigenvectors have been */
+/* calculated, but ALPHAR(j), ALPHAI(j), and BETA(j) */
+/* should be correct for j=INFO+1,...,N. */
+/* > N: errors that usually indicate LAPACK problems: */
+/* =N+1: error return from SGGBAL */
+/* =N+2: error return from SGEQRF */
+/* =N+3: error return from SORMQR */
+/* =N+4: error return from SORGQR */
+/* =N+5: error return from SGGHRD */
+/* =N+6: error return from SHGEQZ (other than failed */
+/* iteration) */
+/* =N+7: error return from STGEVC */
+/* =N+8: error return from SGGBAK (computing VL) */
+/* =N+9: error return from SGGBAK (computing VR) */
+/* =N+10: error return from SLASCL (various calls) */
+
+/* Further Details */
+/* =============== */
+
+/* Balancing */
+/* --------- */
+
+/* This driver calls SGGBAL to both permute and scale rows and columns */
+/* of A and B. The permutations PL and PR are chosen so that PL*A*PR */
+/* and PL*B*R will be upper triangular except for the diagonal blocks */
+/* A(i:j,i:j) and B(i:j,i:j), with i and j as close together as */
+/* possible. The diagonal scaling matrices DL and DR are chosen so */
+/* that the pair DL*PL*A*PR*DR, DL*PL*B*PR*DR have elements close to */
+/* one (except for the elements that start out zero.) */
+
+/* After the eigenvalues and eigenvectors of the balanced matrices */
+/* have been computed, SGGBAK transforms the eigenvectors back to what */
+/* they would have been (in perfect arithmetic) if they had not been */
+/* balanced. */
+
+/* Contents of A and B on Exit */
+/* -------- -- - --- - -- ---- */
+
+/* If any eigenvectors are computed (either JOBVL='V' or JOBVR='V' or */
+/* both), then on exit the arrays A and B will contain the real Schur */
+/* form[*] of the "balanced" versions of A and B. If no eigenvectors */
+/* are computed, then only the diagonal blocks will be correct. */
+
+/* [*] See SHGEQZ, SGEGS, or read the book "Matrix Computations", */
+/* by Golub & van Loan, pub. by Johns Hopkins U. Press. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alphar;
+ --alphai;
+ --beta;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+
+ /* Function Body */
+ if (lsame_(jobvl, "N")) {
+ ijobvl = 1;
+ ilvl = FALSE_;
+ } else if (lsame_(jobvl, "V")) {
+ ijobvl = 2;
+ ilvl = TRUE_;
+ } else {
+ ijobvl = -1;
+ ilvl = FALSE_;
+ }
+
+ if (lsame_(jobvr, "N")) {
+ ijobvr = 1;
+ ilvr = FALSE_;
+ } else if (lsame_(jobvr, "V")) {
+ ijobvr = 2;
+ ilvr = TRUE_;
+ } else {
+ ijobvr = -1;
+ ilvr = FALSE_;
+ }
+ ilv = ilvl || ilvr;
+
+/* Test the input arguments */
+
+/* Computing MAX */
+ i__1 = *n << 3;
+ lwkmin = max(i__1,1);
+ lwkopt = lwkmin;
+ work[1] = (real) lwkopt;
+ lquery = *lwork == -1;
+ *info = 0;
+ if (ijobvl <= 0) {
+ *info = -1;
+ } else if (ijobvr <= 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ } else if (*ldvl < 1 || ilvl && *ldvl < *n) {
+ *info = -12;
+ } else if (*ldvr < 1 || ilvr && *ldvr < *n) {
+ *info = -14;
+ } else if (*lwork < lwkmin && ! lquery) {
+ *info = -16;
+ }
+
+ if (*info == 0) {
+ nb1 = ilaenv_(&c__1, "SGEQRF", " ", n, n, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "SORMQR", " ", n, n, n, &c_n1);
+ nb3 = ilaenv_(&c__1, "SORGQR", " ", n, n, n, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2);
+ nb = max(i__1,nb3);
+/* Computing MAX */
+ i__1 = *n * 6, i__2 = *n * (nb + 1);
+ lopt = (*n << 1) + max(i__1,i__2);
+ work[1] = (real) lopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEGV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("E") * slamch_("B");
+ safmin = slamch_("S");
+ safmin += safmin;
+ safmax = 1.f / safmin;
+ onepls = eps * 4 + 1.f;
+
+/* Scale A */
+
+ anrm = slange_("M", n, n, &a[a_offset], lda, &work[1]);
+ anrm1 = anrm;
+ anrm2 = 1.f;
+ if (anrm < 1.f) {
+ if (safmax * anrm < 1.f) {
+ anrm1 = safmin;
+ anrm2 = safmax * anrm;
+ }
+ }
+
+ if (anrm > 0.f) {
+ slascl_("G", &c_n1, &c_n1, &anrm, &c_b27, n, n, &a[a_offset], lda, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 10;
+ return 0;
+ }
+ }
+
+/* Scale B */
+
+ bnrm = slange_("M", n, n, &b[b_offset], ldb, &work[1]);
+ bnrm1 = bnrm;
+ bnrm2 = 1.f;
+ if (bnrm < 1.f) {
+ if (safmax * bnrm < 1.f) {
+ bnrm1 = safmin;
+ bnrm2 = safmax * bnrm;
+ }
+ }
+
+ if (bnrm > 0.f) {
+ slascl_("G", &c_n1, &c_n1, &bnrm, &c_b27, n, n, &b[b_offset], ldb, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 10;
+ return 0;
+ }
+ }
+
+/* Permute the matrix to make it more nearly triangular */
+/* Workspace layout: (8*N words -- "work" requires 6*N words) */
+/* left_permutation, right_permutation, work... */
+
+ ileft = 1;
+ iright = *n + 1;
+ iwork = iright + *n;
+ sggbal_("P", n, &a[a_offset], lda, &b[b_offset], ldb, &ilo, &ihi, &work[
+ ileft], &work[iright], &work[iwork], &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 1;
+ goto L120;
+ }
+
+/* Reduce B to triangular form, and initialize VL and/or VR */
+/* Workspace layout: ("work..." must have at least N words) */
+/* left_permutation, right_permutation, tau, work... */
+
+ irows = ihi + 1 - ilo;
+ if (ilv) {
+ icols = *n + 1 - ilo;
+ } else {
+ icols = irows;
+ }
+ itau = iwork;
+ iwork = itau + irows;
+ i__1 = *lwork + 1 - iwork;
+ sgeqrf_(&irows, &icols, &b[ilo + ilo * b_dim1], ldb, &work[itau], &work[
+ iwork], &i__1, &iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = (integer) work[iwork] + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ *info = *n + 2;
+ goto L120;
+ }
+
+ i__1 = *lwork + 1 - iwork;
+ sormqr_("L", "T", &irows, &icols, &irows, &b[ilo + ilo * b_dim1], ldb, &
+ work[itau], &a[ilo + ilo * a_dim1], lda, &work[iwork], &i__1, &
+ iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = (integer) work[iwork] + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ *info = *n + 3;
+ goto L120;
+ }
+
+ if (ilvl) {
+ slaset_("Full", n, n, &c_b38, &c_b27, &vl[vl_offset], ldvl)
+ ;
+ i__1 = irows - 1;
+ i__2 = irows - 1;
+ slacpy_("L", &i__1, &i__2, &b[ilo + 1 + ilo * b_dim1], ldb, &vl[ilo +
+ 1 + ilo * vl_dim1], ldvl);
+ i__1 = *lwork + 1 - iwork;
+ sorgqr_(&irows, &irows, &irows, &vl[ilo + ilo * vl_dim1], ldvl, &work[
+ itau], &work[iwork], &i__1, &iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = (integer) work[iwork] + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ *info = *n + 4;
+ goto L120;
+ }
+ }
+
+ if (ilvr) {
+ slaset_("Full", n, n, &c_b38, &c_b27, &vr[vr_offset], ldvr)
+ ;
+ }
+
+/* Reduce to generalized Hessenberg form */
+
+ if (ilv) {
+
+/* Eigenvectors requested -- work on whole matrix. */
+
+ sgghrd_(jobvl, jobvr, n, &ilo, &ihi, &a[a_offset], lda, &b[b_offset],
+ ldb, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, &iinfo);
+ } else {
+ sgghrd_("N", "N", &irows, &c__1, &irows, &a[ilo + ilo * a_dim1], lda,
+ &b[ilo + ilo * b_dim1], ldb, &vl[vl_offset], ldvl, &vr[
+ vr_offset], ldvr, &iinfo);
+ }
+ if (iinfo != 0) {
+ *info = *n + 5;
+ goto L120;
+ }
+
+/* Perform QZ algorithm */
+/* Workspace layout: ("work..." must have at least 1 word) */
+/* left_permutation, right_permutation, work... */
+
+ iwork = itau;
+ if (ilv) {
+ *(unsigned char *)chtemp = 'S';
+ } else {
+ *(unsigned char *)chtemp = 'E';
+ }
+ i__1 = *lwork + 1 - iwork;
+ shgeqz_(chtemp, jobvl, jobvr, n, &ilo, &ihi, &a[a_offset], lda, &b[
+ b_offset], ldb, &alphar[1], &alphai[1], &beta[1], &vl[vl_offset],
+ ldvl, &vr[vr_offset], ldvr, &work[iwork], &i__1, &iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = (integer) work[iwork] + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ if (iinfo > 0 && iinfo <= *n) {
+ *info = iinfo;
+ } else if (iinfo > *n && iinfo <= *n << 1) {
+ *info = iinfo - *n;
+ } else {
+ *info = *n + 6;
+ }
+ goto L120;
+ }
+
+ if (ilv) {
+
+/* Compute Eigenvectors (STGEVC requires 6*N words of workspace) */
+
+ if (ilvl) {
+ if (ilvr) {
+ *(unsigned char *)chtemp = 'B';
+ } else {
+ *(unsigned char *)chtemp = 'L';
+ }
+ } else {
+ *(unsigned char *)chtemp = 'R';
+ }
+
+ stgevc_(chtemp, "B", ldumma, n, &a[a_offset], lda, &b[b_offset], ldb,
+ &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, n, &in, &work[
+ iwork], &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 7;
+ goto L120;
+ }
+
+/* Undo balancing on VL and VR, rescale */
+
+ if (ilvl) {
+ sggbak_("P", "L", n, &ilo, &ihi, &work[ileft], &work[iright], n, &
+ vl[vl_offset], ldvl, &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 8;
+ goto L120;
+ }
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ if (alphai[jc] < 0.f) {
+ goto L50;
+ }
+ temp = 0.f;
+ if (alphai[jc] == 0.f) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ r__2 = temp, r__3 = (r__1 = vl[jr + jc * vl_dim1],
+ dabs(r__1));
+ temp = dmax(r__2,r__3);
+/* L10: */
+ }
+ } else {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ r__3 = temp, r__4 = (r__1 = vl[jr + jc * vl_dim1],
+ dabs(r__1)) + (r__2 = vl[jr + (jc + 1) *
+ vl_dim1], dabs(r__2));
+ temp = dmax(r__3,r__4);
+/* L20: */
+ }
+ }
+ if (temp < safmin) {
+ goto L50;
+ }
+ temp = 1.f / temp;
+ if (alphai[jc] == 0.f) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vl[jr + jc * vl_dim1] *= temp;
+/* L30: */
+ }
+ } else {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vl[jr + jc * vl_dim1] *= temp;
+ vl[jr + (jc + 1) * vl_dim1] *= temp;
+/* L40: */
+ }
+ }
+L50:
+ ;
+ }
+ }
+ if (ilvr) {
+ sggbak_("P", "R", n, &ilo, &ihi, &work[ileft], &work[iright], n, &
+ vr[vr_offset], ldvr, &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ goto L120;
+ }
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ if (alphai[jc] < 0.f) {
+ goto L100;
+ }
+ temp = 0.f;
+ if (alphai[jc] == 0.f) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ r__2 = temp, r__3 = (r__1 = vr[jr + jc * vr_dim1],
+ dabs(r__1));
+ temp = dmax(r__2,r__3);
+/* L60: */
+ }
+ } else {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ r__3 = temp, r__4 = (r__1 = vr[jr + jc * vr_dim1],
+ dabs(r__1)) + (r__2 = vr[jr + (jc + 1) *
+ vr_dim1], dabs(r__2));
+ temp = dmax(r__3,r__4);
+/* L70: */
+ }
+ }
+ if (temp < safmin) {
+ goto L100;
+ }
+ temp = 1.f / temp;
+ if (alphai[jc] == 0.f) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vr[jr + jc * vr_dim1] *= temp;
+/* L80: */
+ }
+ } else {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vr[jr + jc * vr_dim1] *= temp;
+ vr[jr + (jc + 1) * vr_dim1] *= temp;
+/* L90: */
+ }
+ }
+L100:
+ ;
+ }
+ }
+
+/* End of eigenvector calculation */
+
+ }
+
+/* Undo scaling in alpha, beta */
+
+/* Note: this does not give the alpha and beta for the unscaled */
+/* problem. */
+
+/* Un-scaling is limited to avoid underflow in alpha and beta */
+/* if they are significant. */
+
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ absar = (r__1 = alphar[jc], dabs(r__1));
+ absai = (r__1 = alphai[jc], dabs(r__1));
+ absb = (r__1 = beta[jc], dabs(r__1));
+ salfar = anrm * alphar[jc];
+ salfai = anrm * alphai[jc];
+ sbeta = bnrm * beta[jc];
+ ilimit = FALSE_;
+ scale = 1.f;
+
+/* Check for significant underflow in ALPHAI */
+
+/* Computing MAX */
+ r__1 = safmin, r__2 = eps * absar, r__1 = max(r__1,r__2), r__2 = eps *
+ absb;
+ if (dabs(salfai) < safmin && absai >= dmax(r__1,r__2)) {
+ ilimit = TRUE_;
+/* Computing MAX */
+ r__1 = onepls * safmin, r__2 = anrm2 * absai;
+ scale = onepls * safmin / anrm1 / dmax(r__1,r__2);
+
+ } else if (salfai == 0.f) {
+
+/* If insignificant underflow in ALPHAI, then make the */
+/* conjugate eigenvalue real. */
+
+ if (alphai[jc] < 0.f && jc > 1) {
+ alphai[jc - 1] = 0.f;
+ } else if (alphai[jc] > 0.f && jc < *n) {
+ alphai[jc + 1] = 0.f;
+ }
+ }
+
+/* Check for significant underflow in ALPHAR */
+
+/* Computing MAX */
+ r__1 = safmin, r__2 = eps * absai, r__1 = max(r__1,r__2), r__2 = eps *
+ absb;
+ if (dabs(salfar) < safmin && absar >= dmax(r__1,r__2)) {
+ ilimit = TRUE_;
+/* Computing MAX */
+/* Computing MAX */
+ r__3 = onepls * safmin, r__4 = anrm2 * absar;
+ r__1 = scale, r__2 = onepls * safmin / anrm1 / dmax(r__3,r__4);
+ scale = dmax(r__1,r__2);
+ }
+
+/* Check for significant underflow in BETA */
+
+/* Computing MAX */
+ r__1 = safmin, r__2 = eps * absar, r__1 = max(r__1,r__2), r__2 = eps *
+ absai;
+ if (dabs(sbeta) < safmin && absb >= dmax(r__1,r__2)) {
+ ilimit = TRUE_;
+/* Computing MAX */
+/* Computing MAX */
+ r__3 = onepls * safmin, r__4 = bnrm2 * absb;
+ r__1 = scale, r__2 = onepls * safmin / bnrm1 / dmax(r__3,r__4);
+ scale = dmax(r__1,r__2);
+ }
+
+/* Check for possible overflow when limiting scaling */
+
+ if (ilimit) {
+/* Computing MAX */
+ r__1 = dabs(salfar), r__2 = dabs(salfai), r__1 = max(r__1,r__2),
+ r__2 = dabs(sbeta);
+ temp = scale * safmin * dmax(r__1,r__2);
+ if (temp > 1.f) {
+ scale /= temp;
+ }
+ if (scale < 1.f) {
+ ilimit = FALSE_;
+ }
+ }
+
+/* Recompute un-scaled ALPHAR, ALPHAI, BETA if necessary. */
+
+ if (ilimit) {
+ salfar = scale * alphar[jc] * anrm;
+ salfai = scale * alphai[jc] * anrm;
+ sbeta = scale * beta[jc] * bnrm;
+ }
+ alphar[jc] = salfar;
+ alphai[jc] = salfai;
+ beta[jc] = sbeta;
+/* L110: */
+ }
+
+L120:
+ work[1] = (real) lwkopt;
+
+ return 0;
+
+/* End of SGEGV */
+
+} /* sgegv_ */
diff --git a/contrib/libs/clapack/sgehd2.c b/contrib/libs/clapack/sgehd2.c
new file mode 100644
index 0000000000..e7ba6947db
--- /dev/null
+++ b/contrib/libs/clapack/sgehd2.c
@@ -0,0 +1,190 @@
+/* sgehd2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sgehd2_(integer *n, integer *ilo, integer *ihi, real *a,
+ integer *lda, real *tau, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__;
+ real aii;
+ extern /* Subroutine */ int slarf_(char *, integer *, integer *, real *,
+ integer *, real *, real *, integer *, real *), xerbla_(
+ char *, integer *), slarfg_(integer *, real *, real *,
+ integer *, real *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGEHD2 reduces a real general matrix A to upper Hessenberg form H by */
+/* an orthogonal similarity transformation: Q' * A * Q = H . */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* It is assumed that A is already upper triangular in rows */
+/* and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally */
+/* set by a previous call to SGEBAL; otherwise they should be */
+/* set to 1 and N respectively. See Further Details. */
+/* 1 <= ILO <= IHI <= max(1,N). */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the n by n general matrix to be reduced. */
+/* On exit, the upper triangle and the first subdiagonal of A */
+/* are overwritten with the upper Hessenberg matrix H, and the */
+/* elements below the first subdiagonal, with the array TAU, */
+/* represent the orthogonal matrix Q as a product of elementary */
+/* reflectors. See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAU (output) REAL array, dimension (N-1) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of (ihi-ilo) elementary */
+/* reflectors */
+
+/* Q = H(ilo) H(ilo+1) . . . H(ihi-1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on */
+/* exit in A(i+2:ihi,i), and tau in TAU(i). */
+
+/* The contents of A are illustrated by the following example, with */
+/* n = 7, ilo = 2 and ihi = 6: */
+
+/* on entry, on exit, */
+
+/* ( a a a a a a a ) ( a a h h h h a ) */
+/* ( a a a a a a ) ( a h h h h a ) */
+/* ( a a a a a a ) ( h h h h h h ) */
+/* ( a a a a a a ) ( v2 h h h h h ) */
+/* ( a a a a a a ) ( v2 v3 h h h h ) */
+/* ( a a a a a a ) ( v2 v3 v4 h h h ) */
+/* ( a ) ( a ) */
+
+/* where a denotes an element of the original matrix A, h denotes a */
+/* modified element of the upper Hessenberg matrix H, and vi denotes an */
+/* element of the vector defining H(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -2;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEHD2", &i__1);
+ return 0;
+ }
+
+ i__1 = *ihi - 1;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+
+/* Compute elementary reflector H(i) to annihilate A(i+2:ihi,i) */
+
+ i__2 = *ihi - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ slarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3, *n)+ i__ *
+ a_dim1], &c__1, &tau[i__]);
+ aii = a[i__ + 1 + i__ * a_dim1];
+ a[i__ + 1 + i__ * a_dim1] = 1.f;
+
+/* Apply H(i) to A(1:ihi,i+1:ihi) from the right */
+
+ i__2 = *ihi - i__;
+ slarf_("Right", ihi, &i__2, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
+ i__], &a[(i__ + 1) * a_dim1 + 1], lda, &work[1]);
+
+/* Apply H(i) to A(i+1:ihi,i+1:n) from the left */
+
+ i__2 = *ihi - i__;
+ i__3 = *n - i__;
+ slarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
+ i__], &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &work[1]);
+
+ a[i__ + 1 + i__ * a_dim1] = aii;
+/* L10: */
+ }
+
+ return 0;
+
+/* End of SGEHD2 */
+
+} /* sgehd2_ */
diff --git a/contrib/libs/clapack/sgehrd.c b/contrib/libs/clapack/sgehrd.c
new file mode 100644
index 0000000000..7d6e3cd43d
--- /dev/null
+++ b/contrib/libs/clapack/sgehrd.c
@@ -0,0 +1,338 @@
+/* sgehrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+static integer c__65 = 65;
+static real c_b25 = -1.f;
+static real c_b26 = 1.f;
+
+/* Subroutine */ int sgehrd_(integer *n, integer *ilo, integer *ihi, real *a,
+ integer *lda, real *tau, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j;
+ real t[4160] /* was [65][64] */;
+ integer ib;
+ real ei;
+ integer nb, nh, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *), strmm_(char *, char *, char *,
+ char *, integer *, integer *, real *, real *, integer *, real *,
+ integer *), saxpy_(integer *,
+ real *, real *, integer *, real *, integer *), sgehd2_(integer *,
+ integer *, integer *, real *, integer *, real *, real *, integer *
+), slahr2_(integer *, integer *, integer *, real *, integer *,
+ real *, real *, integer *, real *, integer *), slarfb_(char *,
+ char *, char *, char *, integer *, integer *, integer *, real *,
+ integer *, real *, integer *, real *, integer *, real *, integer *
+), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGEHRD reduces a real general matrix A to upper Hessenberg form H by */
+/* an orthogonal similarity transformation: Q' * A * Q = H . */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* It is assumed that A is already upper triangular in rows */
+/* and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally */
+/* set by a previous call to SGEBAL; otherwise they should be */
+/* set to 1 and N respectively. See Further Details. */
+/* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the N-by-N general matrix to be reduced. */
+/* On exit, the upper triangle and the first subdiagonal of A */
+/* are overwritten with the upper Hessenberg matrix H, and the */
+/* elements below the first subdiagonal, with the array TAU, */
+/* represent the orthogonal matrix Q as a product of elementary */
+/* reflectors. See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAU (output) REAL array, dimension (N-1) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). Elements 1:ILO-1 and IHI:N-1 of TAU are set to */
+/* zero. */
+
+/* WORK (workspace/output) REAL array, dimension (LWORK) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= max(1,N). */
+/* For optimum performance LWORK >= N*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of (ihi-ilo) elementary */
+/* reflectors */
+
+/* Q = H(ilo) H(ilo+1) . . . H(ihi-1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on */
+/* exit in A(i+2:ihi,i), and tau in TAU(i). */
+
+/* The contents of A are illustrated by the following example, with */
+/* n = 7, ilo = 2 and ihi = 6: */
+
+/* on entry, on exit, */
+
+/* ( a a a a a a a ) ( a a h h h h a ) */
+/* ( a a a a a a ) ( a h h h h a ) */
+/* ( a a a a a a ) ( h h h h h h ) */
+/* ( a a a a a a ) ( v2 h h h h h ) */
+/* ( a a a a a a ) ( v2 v3 h h h h ) */
+/* ( a a a a a a ) ( v2 v3 v4 h h h ) */
+/* ( a ) ( a ) */
+
+/* where a denotes an element of the original matrix A, h denotes a */
+/* modified element of the upper Hessenberg matrix H, and vi denotes an */
+/* element of the vector defining H(i). */
+
+/* This file is a slight modification of LAPACK-3.0's SGEHRD */
+/* subroutine incorporating improvements proposed by Quintana-Orti and */
+/* Van de Geijn (2005). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+/* Computing MIN */
+ i__1 = 64, i__2 = ilaenv_(&c__1, "SGEHRD", " ", n, ilo, ihi, &c_n1);
+ nb = min(i__1,i__2);
+ lwkopt = *n * nb;
+ work[1] = (real) lwkopt;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -2;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEHRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Set elements 1:ILO-1 and IHI:N-1 of TAU to zero */
+
+ i__1 = *ilo - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ tau[i__] = 0.f;
+/* L10: */
+ }
+ i__1 = *n - 1;
+ for (i__ = max(1,*ihi); i__ <= i__1; ++i__) {
+ tau[i__] = 0.f;
+/* L20: */
+ }
+
+/* Quick return if possible */
+
+ nh = *ihi - *ilo + 1;
+ if (nh <= 1) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+/* Determine the block size */
+
+/* Computing MIN */
+ i__1 = 64, i__2 = ilaenv_(&c__1, "SGEHRD", " ", n, ilo, ihi, &c_n1);
+ nb = min(i__1,i__2);
+ nbmin = 2;
+ iws = 1;
+ if (nb > 1 && nb < nh) {
+
+/* Determine when to cross over from blocked to unblocked code */
+/* (last block is always handled by unblocked code) */
+
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__3, "SGEHRD", " ", n, ilo, ihi, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < nh) {
+
+/* Determine if workspace is large enough for blocked code */
+
+ iws = *n * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: determine the */
+/* minimum value of NB, and reduce NB or force use of */
+/* unblocked code */
+
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SGEHRD", " ", n, ilo, ihi, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ if (*lwork >= *n * nbmin) {
+ nb = *lwork / *n;
+ } else {
+ nb = 1;
+ }
+ }
+ }
+ }
+ ldwork = *n;
+
+ if (nb < nbmin || nb >= nh) {
+
+/* Use unblocked code below */
+
+ i__ = *ilo;
+
+ } else {
+
+/* Use blocked code */
+
+ i__1 = *ihi - 1 - nx;
+ i__2 = nb;
+ for (i__ = *ilo; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *ihi - i__;
+ ib = min(i__3,i__4);
+
+/* Reduce columns i:i+ib-1 to Hessenberg form, returning the */
+/* matrices V and T of the block reflector H = I - V*T*V' */
+/* which performs the reduction, and also the matrix Y = A*V*T */
+
+ slahr2_(ihi, &i__, &ib, &a[i__ * a_dim1 + 1], lda, &tau[i__], t, &
+ c__65, &work[1], &ldwork);
+
+/* Apply the block reflector H to A(1:ihi,i+ib:ihi) from the */
+/* right, computing A := A - Y * V'. V(i+ib,ib-1) must be set */
+/* to 1 */
+
+ ei = a[i__ + ib + (i__ + ib - 1) * a_dim1];
+ a[i__ + ib + (i__ + ib - 1) * a_dim1] = 1.f;
+ i__3 = *ihi - i__ - ib + 1;
+ sgemm_("No transpose", "Transpose", ihi, &i__3, &ib, &c_b25, &
+ work[1], &ldwork, &a[i__ + ib + i__ * a_dim1], lda, &
+ c_b26, &a[(i__ + ib) * a_dim1 + 1], lda);
+ a[i__ + ib + (i__ + ib - 1) * a_dim1] = ei;
+
+/* Apply the block reflector H to A(1:i,i+1:i+ib-1) from the */
+/* right */
+
+ i__3 = ib - 1;
+ strmm_("Right", "Lower", "Transpose", "Unit", &i__, &i__3, &c_b26,
+ &a[i__ + 1 + i__ * a_dim1], lda, &work[1], &ldwork);
+ i__3 = ib - 2;
+ for (j = 0; j <= i__3; ++j) {
+ saxpy_(&i__, &c_b25, &work[ldwork * j + 1], &c__1, &a[(i__ +
+ j + 1) * a_dim1 + 1], &c__1);
+/* L30: */
+ }
+
+/* Apply the block reflector H to A(i+1:ihi,i+ib:n) from the */
+/* left */
+
+ i__3 = *ihi - i__;
+ i__4 = *n - i__ - ib + 1;
+ slarfb_("Left", "Transpose", "Forward", "Columnwise", &i__3, &
+ i__4, &ib, &a[i__ + 1 + i__ * a_dim1], lda, t, &c__65, &a[
+ i__ + 1 + (i__ + ib) * a_dim1], lda, &work[1], &ldwork);
+/* L40: */
+ }
+ }
+
+/* Use unblocked code to reduce the rest of the matrix */
+
+ sgehd2_(n, &i__, ihi, &a[a_offset], lda, &tau[1], &work[1], &iinfo);
+ work[1] = (real) iws;
+
+ return 0;
+
+/* End of SGEHRD */
+
+} /* sgehrd_ */
diff --git a/contrib/libs/clapack/sgejsv.c b/contrib/libs/clapack/sgejsv.c
new file mode 100644
index 0000000000..57fe14814d
--- /dev/null
+++ b/contrib/libs/clapack/sgejsv.c
@@ -0,0 +1,2210 @@
+/* sgejsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b34 = 0.f;
+static real c_b35 = 1.f;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+
+/* Subroutine */ int sgejsv_(char *joba, char *jobu, char *jobv, char *jobr,
+ char *jobt, char *jobp, integer *m, integer *n, real *a, integer *lda,
+ real *sva, real *u, integer *ldu, real *v, integer *ldv, real *work,
+ integer *lwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, u_dim1, u_offset, v_dim1, v_offset, i__1, i__2,
+ i__3, i__4, i__5, i__6, i__7, i__8, i__9, i__10;
+ real r__1, r__2, r__3, r__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal), log(doublereal), r_sign(real *, real *);
+ integer i_nint(real *);
+
+ /* Local variables */
+ integer p, q, n1, nr;
+ real big, xsc, big1;
+ logical defr;
+ real aapp, aaqq;
+ logical kill;
+ integer ierr;
+ real temp1;
+ extern doublereal snrm2_(integer *, real *, integer *);
+ logical jracc;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ real small, entra, sfmin;
+ logical lsvec;
+ real epsln;
+ logical rsvec;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), sswap_(integer *, real *, integer *, real *, integer *
+);
+ logical l2aber;
+ extern /* Subroutine */ int strsm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+);
+ real condr1, condr2, uscal1, uscal2;
+ logical l2kill, l2rank, l2tran;
+ extern /* Subroutine */ int sgeqp3_(integer *, integer *, real *, integer
+ *, integer *, real *, real *, integer *, integer *);
+ logical l2pert;
+ real scalem, sconda;
+ logical goscal;
+ real aatmin;
+ extern doublereal slamch_(char *);
+ real aatmax;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical noscal;
+ extern /* Subroutine */ int sgelqf_(integer *, integer *, real *, integer
+ *, real *, real *, integer *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *), sgeqrf_(integer *, integer *, real *, integer *, real *,
+ real *, integer *, integer *), slacpy_(char *, integer *, integer
+ *, real *, integer *, real *, integer *), slaset_(char *,
+ integer *, integer *, real *, real *, real *, integer *);
+ real entrat;
+ logical almort;
+ real maxprj;
+ extern /* Subroutine */ int spocon_(char *, integer *, real *, integer *,
+ real *, real *, real *, integer *, integer *);
+ logical errest;
+ extern /* Subroutine */ int sgesvj_(char *, char *, char *, integer *,
+ integer *, real *, integer *, real *, integer *, real *, integer *
+, real *, integer *, integer *), slassq_(
+ integer *, real *, integer *, real *, real *);
+ logical transp;
+ extern /* Subroutine */ int slaswp_(integer *, real *, integer *, integer
+ *, integer *, integer *, integer *), sorgqr_(integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, integer
+ *), sormlq_(char *, char *, integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, real *, integer *,
+ integer *), sormqr_(char *, char *, integer *,
+ integer *, integer *, real *, integer *, real *, real *, integer *
+, real *, integer *, integer *);
+ logical rowpiv;
+ real cond_ok__;
+ integer warning, numrank;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Zlatko Drmac of the University of Zagreb and -- */
+/* -- Kresimir Veselic of the Fernuniversitaet Hagen -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* This routine is also part of SIGMA (version 1.23, October 23. 2008.) */
+/* SIGMA is a library of algorithms for highly accurate algorithms for */
+/* computation of SVD, PSVD, QSVD, (H,K)-SVD, and for solution of the */
+/* eigenvalue problems Hx = lambda M x, H M x = lambda x with H, M > 0. */
+
+/* -#- Scalar Arguments -#- */
+
+
+/* -#- Array Arguments -#- */
+
+/* .. */
+
+/* Purpose */
+/* ~~~~~~~ */
+/* SGEJSV computes the singular value decomposition (SVD) of a real M-by-N */
+/* matrix [A], where M >= N. The SVD of [A] is written as */
+
+/* [A] = [U] * [SIGMA] * [V]^t, */
+
+/* where [SIGMA] is an N-by-N (M-by-N) matrix which is zero except for its N */
+/* diagonal elements, [U] is an M-by-N (or M-by-M) orthonormal matrix, and */
+/* [V] is an N-by-N orthogonal matrix. The diagonal elements of [SIGMA] are */
+/* the singular values of [A]. The columns of [U] and [V] are the left and */
+/* the right singular vectors of [A], respectively. The matrices [U] and [V] */
+/* are computed and stored in the arrays U and V, respectively. The diagonal */
+/* of [SIGMA] is computed and stored in the array SVA. */
+
+/* Further details */
+/* ~~~~~~~~~~~~~~~ */
+/* SGEJSV implements a preconditioned Jacobi SVD algorithm. It uses SGEQP3, */
+/* SGEQRF, and SGELQF as preprocessors and preconditioners. Optionally, an */
+/* additional row pivoting can be used as a preprocessor, which in some */
+/* cases results in much higher accuracy. An example is matrix A with the */
+/* structure A = D1 * C * D2, where D1, D2 are arbitrarily ill-conditioned */
+/* diagonal matrices and C is well-conditioned matrix. In that case, complete */
+/* pivoting in the first QR factorizations provides accuracy dependent on the */
+/* condition number of C, and independent of D1, D2. Such higher accuracy is */
+/* not completely understood theoretically, but it works well in practice. */
+/* Further, if A can be written as A = B*D, with well-conditioned B and some */
+/* diagonal D, then the high accuracy is guaranteed, both theoretically and */
+/* in software, independent of D. For more details see [1], [2]. */
+/* The computational range for the singular values can be the full range */
+/* ( UNDERFLOW,OVERFLOW ), provided that the machine arithmetic and the BLAS */
+/* & LAPACK routines called by SGEJSV are implemented to work in that range. */
+/* If that is not the case, then the restriction for safe computation with */
+/* the singular values in the range of normalized IEEE numbers is that the */
+/* spectral condition number kappa(A)=sigma_max(A)/sigma_min(A) does not */
+/* overflow. This code (SGEJSV) is best used in this restricted range, */
+/* meaning that singular values of magnitude below ||A||_2 / SLAMCH('O') are */
+/* returned as zeros. See JOBR for details on this. */
+/* Further, this implementation is somewhat slower than the one described */
+/* in [1,2] due to replacement of some non-LAPACK components, and because */
+/* the choice of some tuning parameters in the iterative part (SGESVJ) is */
+/* left to the implementer on a particular machine. */
+/* The rank revealing QR factorization (in this code: SGEQP3) should be */
+/* implemented as in [3]. We have a new version of SGEQP3 under development */
+/* that is more robust than the current one in LAPACK, with a cleaner cut in */
+/* rank defficient cases. It will be available in the SIGMA library [4]. */
+/* If M is much larger than N, it is obvious that the inital QRF with */
+/* column pivoting can be preprocessed by the QRF without pivoting. That */
+/* well known trick is not used in SGEJSV because in some cases heavy row */
+/* weighting can be treated with complete pivoting. The overhead in cases */
+/* M much larger than N is then only due to pivoting, but the benefits in */
+/* terms of accuracy have prevailed. The implementer/user can incorporate */
+/* this extra QRF step easily. The implementer can also improve data movement */
+/* (matrix transpose, matrix copy, matrix transposed copy) - this */
+/* implementation of SGEJSV uses only the simplest, naive data movement. */
+
+/* Contributors */
+/* ~~~~~~~~~~~~ */
+/* Zlatko Drmac (Zagreb, Croatia) and Kresimir Veselic (Hagen, Germany) */
+
+/* References */
+/* ~~~~~~~~~~ */
+/* [1] Z. Drmac and K. Veselic: New fast and accurate Jacobi SVD algorithm I. */
+/* SIAM J. Matrix Anal. Appl. Vol. 35, No. 2 (2008), pp. 1322-1342. */
+/* LAPACK Working note 169. */
+/* [2] Z. Drmac and K. Veselic: New fast and accurate Jacobi SVD algorithm II. */
+/* SIAM J. Matrix Anal. Appl. Vol. 35, No. 2 (2008), pp. 1343-1362. */
+/* LAPACK Working note 170. */
+/* [3] Z. Drmac and Z. Bujanovic: On the failure of rank-revealing QR */
+/* factorization software - a case study. */
+/* ACM Trans. math. Softw. Vol. 35, No 2 (2008), pp. 1-28. */
+/* LAPACK Working note 176. */
+/* [4] Z. Drmac: SIGMA - mathematical software library for accurate SVD, PSV, */
+/* QSVD, (H,K)-SVD computations. */
+/* Department of Mathematics, University of Zagreb, 2008. */
+
+/* Bugs, examples and comments */
+/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
+/* Please report all bugs and send interesting examples and/or comments to */
+/* drmac@math.hr. Thank you. */
+
+/* Arguments */
+/* ~~~~~~~~~ */
+/* ............................................................................ */
+/* . JOBA (input) CHARACTER*1 */
+/* . Specifies the level of accuracy: */
+/* . = 'C': This option works well (high relative accuracy) if A = B * D, */
+/* . with well-conditioned B and arbitrary diagonal matrix D. */
+/* . The accuracy cannot be spoiled by COLUMN scaling. The */
+/* . accuracy of the computed output depends on the condition of */
+/* . B, and the procedure aims at the best theoretical accuracy. */
+/* . The relative error max_{i=1:N}|d sigma_i| / sigma_i is */
+/* . bounded by f(M,N)*epsilon* cond(B), independent of D. */
+/* . The input matrix is preprocessed with the QRF with column */
+/* . pivoting. This initial preprocessing and preconditioning by */
+/* . a rank revealing QR factorization is common for all values of */
+/* . JOBA. Additional actions are specified as follows: */
+/* . = 'E': Computation as with 'C' with an additional estimate of the */
+/* . condition number of B. It provides a realistic error bound. */
+/* . = 'F': If A = D1 * C * D2 with ill-conditioned diagonal scalings */
+/* . D1, D2, and well-conditioned matrix C, this option gives */
+/* . higher accuracy than the 'C' option. If the structure of the */
+/* . input matrix is not known, and relative accuracy is */
+/* . desirable, then this option is advisable. The input matrix A */
+/* . is preprocessed with QR factorization with FULL (row and */
+/* . column) pivoting. */
+/* . = 'G' Computation as with 'F' with an additional estimate of the */
+/* . condition number of B, where A=D*B. If A has heavily weighted */
+/* . rows, then using this condition number gives too pessimistic */
+/* . error bound. */
+/* . = 'A': Small singular values are the noise and the matrix is treated */
+/* . as numerically rank defficient. The error in the computed */
+/* . singular values is bounded by f(m,n)*epsilon*||A||. */
+/* . The computed SVD A = U * S * V^t restores A up to */
+/* . f(m,n)*epsilon*||A||. */
+/* . This gives the procedure the licence to discard (set to zero) */
+/* . all singular values below N*epsilon*||A||. */
+/* . = 'R': Similar as in 'A'. Rank revealing property of the initial */
+/* . QR factorization is used do reveal (using triangular factor) */
+/* . a gap sigma_{r+1} < epsilon * sigma_r in which case the */
+/* . numerical RANK is declared to be r. The SVD is computed with */
+/* . absolute error bounds, but more accurately than with 'A'. */
+/* . */
+/* . JOBU (input) CHARACTER*1 */
+/* . Specifies whether to compute the columns of U: */
+/* . = 'U': N columns of U are returned in the array U. */
+/* . = 'F': full set of M left sing. vectors is returned in the array U. */
+/* . = 'W': U may be used as workspace of length M*N. See the description */
+/* . of U. */
+/* . = 'N': U is not computed. */
+/* . */
+/* . JOBV (input) CHARACTER*1 */
+/* . Specifies whether to compute the matrix V: */
+/* . = 'V': N columns of V are returned in the array V; Jacobi rotations */
+/* . are not explicitly accumulated. */
+/* . = 'J': N columns of V are returned in the array V, but they are */
+/* . computed as the product of Jacobi rotations. This option is */
+/* . allowed only if JOBU .NE. 'N', i.e. in computing the full SVD. */
+/* . = 'W': V may be used as workspace of length N*N. See the description */
+/* . of V. */
+/* . = 'N': V is not computed. */
+/* . */
+/* . JOBR (input) CHARACTER*1 */
+/* . Specifies the RANGE for the singular values. Issues the licence to */
+/* . set to zero small positive singular values if they are outside */
+/* . specified range. If A .NE. 0 is scaled so that the largest singular */
+/* . value of c*A is around SQRT(BIG), BIG=SLAMCH('O'), then JOBR issues */
+/* . the licence to kill columns of A whose norm in c*A is less than */
+/* . SQRT(SFMIN) (for JOBR.EQ.'R'), or less than SMALL=SFMIN/EPSLN, */
+/* . where SFMIN=SLAMCH('S'), EPSLN=SLAMCH('E'). */
+/* . = 'N': Do not kill small columns of c*A. This option assumes that */
+/* . BLAS and QR factorizations and triangular solvers are */
+/* . implemented to work in that range. If the condition of A */
+/* . is greater than BIG, use SGESVJ. */
+/* . = 'R': RESTRICTED range for sigma(c*A) is [SQRT(SFMIN), SQRT(BIG)] */
+/* . (roughly, as described above). This option is recommended. */
+/* . ~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
+/* . For computing the singular values in the FULL range [SFMIN,BIG] */
+/* . use SGESVJ. */
+/* . */
+/* . JOBT (input) CHARACTER*1 */
+/* . If the matrix is square then the procedure may determine to use */
+/* . transposed A if A^t seems to be better with respect to convergence. */
+/* . If the matrix is not square, JOBT is ignored. This is subject to */
+/* . changes in the future. */
+/* . The decision is based on two values of entropy over the adjoint */
+/* . orbit of A^t * A. See the descriptions of WORK(6) and WORK(7). */
+/* . = 'T': transpose if entropy test indicates possibly faster */
+/* . convergence of Jacobi process if A^t is taken as input. If A is */
+/* . replaced with A^t, then the row pivoting is included automatically. */
+/* . = 'N': do not speculate. */
+/* . This option can be used to compute only the singular values, or the */
+/* . full SVD (U, SIGMA and V). For only one set of singular vectors */
+/* . (U or V), the caller should provide both U and V, as one of the */
+/* . matrices is used as workspace if the matrix A is transposed. */
+/* . The implementer can easily remove this constraint and make the */
+/* . code more complicated. See the descriptions of U and V. */
+/* . */
+/* . JOBP (input) CHARACTER*1 */
+/* . Issues the licence to introduce structured perturbations to drown */
+/* . denormalized numbers. This licence should be active if the */
+/* . denormals are poorly implemented, causing slow computation, */
+/* . especially in cases of fast convergence (!). For details see [1,2]. */
+/* . For the sake of simplicity, this perturbations are included only */
+/* . when the full SVD or only the singular values are requested. The */
+/* . implementer/user can easily add the perturbation for the cases of */
+/* . computing one set of singular vectors. */
+/* . = 'P': introduce perturbation */
+/* . = 'N': do not perturb */
+/* ............................................................................ */
+
+/* M (input) INTEGER */
+/* The number of rows of the input matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the input matrix A. M >= N >= 0. */
+
+/* A (input/workspace) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* SVA (workspace/output) REAL array, dimension (N) */
+/* On exit, */
+/* - For WORK(1)/WORK(2) = ONE: The singular values of A. During the */
+/* computation SVA contains Euclidean column norms of the */
+/* iterated matrices in the array A. */
+/* - For WORK(1) .NE. WORK(2): The singular values of A are */
+/* (WORK(1)/WORK(2)) * SVA(1:N). This factored form is used if */
+/* sigma_max(A) overflows or if small singular values have been */
+/* saved from underflow by scaling the input matrix A. */
+/* - If JOBR='R' then some of the singular values may be returned */
+/* as exact zeros obtained by "set to zero" because they are */
+/* below the numerical rank threshold or are denormalized numbers. */
+
+/* U (workspace/output) REAL array, dimension ( LDU, N ) */
+/* If JOBU = 'U', then U contains on exit the M-by-N matrix of */
+/* the left singular vectors. */
+/* If JOBU = 'F', then U contains on exit the M-by-M matrix of */
+/* the left singular vectors, including an ONB */
+/* of the orthogonal complement of the Range(A). */
+/* If JOBU = 'W' .AND. (JOBV.EQ.'V' .AND. JOBT.EQ.'T' .AND. M.EQ.N), */
+/* then U is used as workspace if the procedure */
+/* replaces A with A^t. In that case, [V] is computed */
+/* in U as left singular vectors of A^t and then */
+/* copied back to the V array. This 'W' option is just */
+/* a reminder to the caller that in this case U is */
+/* reserved as workspace of length N*N. */
+/* If JOBU = 'N' U is not referenced. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U, LDU >= 1. */
+/* IF JOBU = 'U' or 'F' or 'W', then LDU >= M. */
+
+/* V (workspace/output) REAL array, dimension ( LDV, N ) */
+/* If JOBV = 'V', 'J' then V contains on exit the N-by-N matrix of */
+/* the right singular vectors; */
+/* If JOBV = 'W', AND (JOBU.EQ.'U' AND JOBT.EQ.'T' AND M.EQ.N), */
+/* then V is used as workspace if the pprocedure */
+/* replaces A with A^t. In that case, [U] is computed */
+/* in V as right singular vectors of A^t and then */
+/* copied back to the U array. This 'W' option is just */
+/* a reminder to the caller that in this case V is */
+/* reserved as workspace of length N*N. */
+/* If JOBV = 'N' V is not referenced. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V, LDV >= 1. */
+/* If JOBV = 'V' or 'J' or 'W', then LDV >= N. */
+
+/* WORK (workspace/output) REAL array, dimension at least LWORK. */
+/* On exit, */
+/* WORK(1) = SCALE = WORK(2) / WORK(1) is the scaling factor such */
+/* that SCALE*SVA(1:N) are the computed singular values */
+/* of A. (See the description of SVA().) */
+/* WORK(2) = See the description of WORK(1). */
+/* WORK(3) = SCONDA is an estimate for the condition number of */
+/* column equilibrated A. (If JOBA .EQ. 'E' or 'G') */
+/* SCONDA is an estimate of SQRT(||(R^t * R)^(-1)||_1). */
+/* It is computed using SPOCON. It holds */
+/* N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA */
+/* where R is the triangular factor from the QRF of A. */
+/* However, if R is truncated and the numerical rank is */
+/* determined to be strictly smaller than N, SCONDA is */
+/* returned as -1, thus indicating that the smallest */
+/* singular values might be lost. */
+
+/* If full SVD is needed, the following two condition numbers are */
+/* useful for the analysis of the algorithm. They are provied for */
+/* a developer/implementer who is familiar with the details of */
+/* the method. */
+
+/* WORK(4) = an estimate of the scaled condition number of the */
+/* triangular factor in the first QR factorization. */
+/* WORK(5) = an estimate of the scaled condition number of the */
+/* triangular factor in the second QR factorization. */
+/* The following two parameters are computed if JOBT .EQ. 'T'. */
+/* They are provided for a developer/implementer who is familiar */
+/* with the details of the method. */
+
+/* WORK(6) = the entropy of A^t*A :: this is the Shannon entropy */
+/* of diag(A^t*A) / Trace(A^t*A) taken as point in the */
+/* probability simplex. */
+/* WORK(7) = the entropy of A*A^t. */
+
+/* LWORK (input) INTEGER */
+/* Length of WORK to confirm proper allocation of work space. */
+/* LWORK depends on the job: */
+
+/* If only SIGMA is needed ( JOBU.EQ.'N', JOBV.EQ.'N' ) and */
+/* -> .. no scaled condition estimate required ( JOBE.EQ.'N'): */
+/* LWORK >= max(2*M+N,4*N+1,7). This is the minimal requirement. */
+/* For optimal performance (blocked code) the optimal value */
+/* is LWORK >= max(2*M+N,3*N+(N+1)*NB,7). Here NB is the optimal */
+/* block size for xGEQP3/xGEQRF. */
+/* -> .. an estimate of the scaled condition number of A is */
+/* required (JOBA='E', 'G'). In this case, LWORK is the maximum */
+/* of the above and N*N+4*N, i.e. LWORK >= max(2*M+N,N*N+4N,7). */
+
+/* If SIGMA and the right singular vectors are needed (JOBV.EQ.'V'), */
+/* -> the minimal requirement is LWORK >= max(2*N+M,7). */
+/* -> For optimal performance, LWORK >= max(2*N+M,2*N+N*NB,7), */
+/* where NB is the optimal block size. */
+
+/* If SIGMA and the left singular vectors are needed */
+/* -> the minimal requirement is LWORK >= max(2*N+M,7). */
+/* -> For optimal performance, LWORK >= max(2*N+M,2*N+N*NB,7), */
+/* where NB is the optimal block size. */
+
+/* If full SVD is needed ( JOBU.EQ.'U' or 'F', JOBV.EQ.'V' ) and */
+/* -> .. the singular vectors are computed without explicit */
+/* accumulation of the Jacobi rotations, LWORK >= 6*N+2*N*N */
+/* -> .. in the iterative part, the Jacobi rotations are */
+/* explicitly accumulated (option, see the description of JOBV), */
+/* then the minimal requirement is LWORK >= max(M+3*N+N*N,7). */
+/* For better performance, if NB is the optimal block size, */
+/* LWORK >= max(3*N+N*N+M,3*N+N*N+N*NB,7). */
+
+/* IWORK (workspace/output) INTEGER array, dimension M+3*N. */
+/* On exit, */
+/* IWORK(1) = the numerical rank determined after the initial */
+/* QR factorization with pivoting. See the descriptions */
+/* of JOBA and JOBR. */
+/* IWORK(2) = the number of the computed nonzero singular values */
+/* IWORK(3) = if nonzero, a warning message: */
+/* If IWORK(3).EQ.1 then some of the column norms of A */
+/* were denormalized floats. The requested high accuracy */
+/* is not warranted by the data. */
+
+/* INFO (output) INTEGER */
+/* < 0 : if INFO = -i, then the i-th argument had an illegal value. */
+/* = 0 : successfull exit; */
+/* > 0 : SGEJSV did not converge in the maximal allowed number */
+/* of sweeps. The computed values may be inaccurate. */
+
+/* ............................................................................ */
+
+/* Local Parameters: */
+
+
+/* Local Scalars: */
+
+
+/* Intrinsic Functions: */
+
+
+/* External Functions: */
+
+
+/* External Subroutines ( BLAS, LAPACK ): */
+
+
+
+/* ............................................................................ */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ --sva;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ lsvec = lsame_(jobu, "U") || lsame_(jobu, "F");
+ jracc = lsame_(jobv, "J");
+ rsvec = lsame_(jobv, "V") || jracc;
+ rowpiv = lsame_(joba, "F") || lsame_(joba, "G");
+ l2rank = lsame_(joba, "R");
+ l2aber = lsame_(joba, "A");
+ errest = lsame_(joba, "E") || lsame_(joba, "G");
+ l2tran = lsame_(jobt, "T");
+ l2kill = lsame_(jobr, "R");
+ defr = lsame_(jobr, "N");
+ l2pert = lsame_(jobp, "P");
+
+ if (! (rowpiv || l2rank || l2aber || errest || lsame_(joba, "C"))) {
+ *info = -1;
+ } else if (! (lsvec || lsame_(jobu, "N") || lsame_(
+ jobu, "W"))) {
+ *info = -2;
+ } else if (! (rsvec || lsame_(jobv, "N") || lsame_(
+ jobv, "W")) || jracc && ! lsvec) {
+ *info = -3;
+ } else if (! (l2kill || defr)) {
+ *info = -4;
+ } else if (! (l2tran || lsame_(jobt, "N"))) {
+ *info = -5;
+ } else if (! (l2pert || lsame_(jobp, "N"))) {
+ *info = -6;
+ } else if (*m < 0) {
+ *info = -7;
+ } else if (*n < 0 || *n > *m) {
+ *info = -8;
+ } else if (*lda < *m) {
+ *info = -10;
+ } else if (lsvec && *ldu < *m) {
+ *info = -13;
+ } else if (rsvec && *ldv < *n) {
+ *info = -14;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = 7, i__2 = (*n << 2) + 1, i__1 = max(i__1,i__2), i__2 = (*m <<
+ 1) + *n;
+/* Computing MAX */
+ i__3 = 7, i__4 = (*n << 2) + *n * *n, i__3 = max(i__3,i__4), i__4 = (*
+ m << 1) + *n;
+/* Computing MAX */
+ i__5 = 7, i__6 = (*n << 1) + *m;
+/* Computing MAX */
+ i__7 = 7, i__8 = (*n << 1) + *m;
+/* Computing MAX */
+ i__9 = 7, i__10 = *m + *n * 3 + *n * *n;
+ if (! (lsvec || rsvec || errest) && *lwork < max(i__1,i__2) || ! (
+ lsvec || lsvec) && errest && *lwork < max(i__3,i__4) || lsvec
+ && ! rsvec && *lwork < max(i__5,i__6) || rsvec && ! lsvec && *
+ lwork < max(i__7,i__8) || lsvec && rsvec && ! jracc && *lwork
+ < *n * 6 + (*n << 1) * *n || lsvec && rsvec && jracc && *
+ lwork < max(i__9,i__10)) {
+ *info = -17;
+ } else {
+/* #:) */
+ *info = 0;
+ }
+ }
+
+ if (*info != 0) {
+/* #:( */
+ i__1 = -(*info);
+ xerbla_("SGEJSV", &i__1);
+ }
+
+/* Quick return for void matrix (Y3K safe) */
+/* #:) */
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Determine whether the matrix U should be M x N or M x M */
+
+ if (lsvec) {
+ n1 = *n;
+ if (lsame_(jobu, "F")) {
+ n1 = *m;
+ }
+ }
+
+/* Set numerical parameters */
+
+/* ! NOTE: Make sure SLAMCH() does not fail on the target architecture. */
+
+ epsln = slamch_("Epsilon");
+ sfmin = slamch_("SafeMinimum");
+ small = sfmin / epsln;
+ big = slamch_("O");
+
+/* Initialize SVA(1:N) = diag( ||A e_i||_2 )_1^N */
+
+/* (!) If necessary, scale SVA() to protect the largest norm from */
+/* overflow. It is possible that this scaling pushes the smallest */
+/* column norm left from the underflow threshold (extreme case). */
+
+ scalem = 1.f / sqrt((real) (*m) * (real) (*n));
+ noscal = TRUE_;
+ goscal = TRUE_;
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ aapp = 0.f;
+ aaqq = 0.f;
+ slassq_(m, &a[p * a_dim1 + 1], &c__1, &aapp, &aaqq);
+ if (aapp > big) {
+ *info = -9;
+ i__2 = -(*info);
+ xerbla_("SGEJSV", &i__2);
+ return 0;
+ }
+ aaqq = sqrt(aaqq);
+ if (aapp < big / aaqq && noscal) {
+ sva[p] = aapp * aaqq;
+ } else {
+ noscal = FALSE_;
+ sva[p] = aapp * (aaqq * scalem);
+ if (goscal) {
+ goscal = FALSE_;
+ i__2 = p - 1;
+ sscal_(&i__2, &scalem, &sva[1], &c__1);
+ }
+ }
+/* L1874: */
+ }
+
+ if (noscal) {
+ scalem = 1.f;
+ }
+
+ aapp = 0.f;
+ aaqq = big;
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+/* Computing MAX */
+ r__1 = aapp, r__2 = sva[p];
+ aapp = dmax(r__1,r__2);
+ if (sva[p] != 0.f) {
+/* Computing MIN */
+ r__1 = aaqq, r__2 = sva[p];
+ aaqq = dmin(r__1,r__2);
+ }
+/* L4781: */
+ }
+
+/* Quick return for zero M x N matrix */
+/* #:) */
+ if (aapp == 0.f) {
+ if (lsvec) {
+ slaset_("G", m, &n1, &c_b34, &c_b35, &u[u_offset], ldu)
+ ;
+ }
+ if (rsvec) {
+ slaset_("G", n, n, &c_b34, &c_b35, &v[v_offset], ldv);
+ }
+ work[1] = 1.f;
+ work[2] = 1.f;
+ if (errest) {
+ work[3] = 1.f;
+ }
+ if (lsvec && rsvec) {
+ work[4] = 1.f;
+ work[5] = 1.f;
+ }
+ if (l2tran) {
+ work[6] = 0.f;
+ work[7] = 0.f;
+ }
+ iwork[1] = 0;
+ iwork[2] = 0;
+ return 0;
+ }
+
+/* Issue warning if denormalized column norms detected. Override the */
+/* high relative accuracy request. Issue licence to kill columns */
+/* (set them to zero) whose norm is less than sigma_max / BIG (roughly). */
+/* #:( */
+ warning = 0;
+ if (aaqq <= sfmin) {
+ l2rank = TRUE_;
+ l2kill = TRUE_;
+ warning = 1;
+ }
+
+/* Quick return for one-column matrix */
+/* #:) */
+ if (*n == 1) {
+
+ if (lsvec) {
+ slascl_("G", &c__0, &c__0, &sva[1], &scalem, m, &c__1, &a[a_dim1
+ + 1], lda, &ierr);
+ slacpy_("A", m, &c__1, &a[a_offset], lda, &u[u_offset], ldu);
+/* computing all M left singular vectors of the M x 1 matrix */
+ if (n1 != *n) {
+ i__1 = *lwork - *n;
+ sgeqrf_(m, n, &u[u_offset], ldu, &work[1], &work[*n + 1], &
+ i__1, &ierr);
+ i__1 = *lwork - *n;
+ sorgqr_(m, &n1, &c__1, &u[u_offset], ldu, &work[1], &work[*n
+ + 1], &i__1, &ierr);
+ scopy_(m, &a[a_dim1 + 1], &c__1, &u[u_dim1 + 1], &c__1);
+ }
+ }
+ if (rsvec) {
+ v[v_dim1 + 1] = 1.f;
+ }
+ if (sva[1] < big * scalem) {
+ sva[1] /= scalem;
+ scalem = 1.f;
+ }
+ work[1] = 1.f / scalem;
+ work[2] = 1.f;
+ if (sva[1] != 0.f) {
+ iwork[1] = 1;
+ if (sva[1] / scalem >= sfmin) {
+ iwork[2] = 1;
+ } else {
+ iwork[2] = 0;
+ }
+ } else {
+ iwork[1] = 0;
+ iwork[2] = 0;
+ }
+ if (errest) {
+ work[3] = 1.f;
+ }
+ if (lsvec && rsvec) {
+ work[4] = 1.f;
+ work[5] = 1.f;
+ }
+ if (l2tran) {
+ work[6] = 0.f;
+ work[7] = 0.f;
+ }
+ return 0;
+
+ }
+
+ transp = FALSE_;
+ l2tran = l2tran && *m == *n;
+
+ aatmax = -1.f;
+ aatmin = big;
+ if (rowpiv || l2tran) {
+
+/* Compute the row norms, needed to determine row pivoting sequence */
+/* (in the case of heavily row weighted A, row pivoting is strongly */
+/* advised) and to collect information needed to compare the */
+/* structures of A * A^t and A^t * A (in the case L2TRAN.EQ..TRUE.). */
+
+ if (l2tran) {
+ i__1 = *m;
+ for (p = 1; p <= i__1; ++p) {
+ xsc = 0.f;
+ temp1 = 0.f;
+ slassq_(n, &a[p + a_dim1], lda, &xsc, &temp1);
+/* SLASSQ gets both the ell_2 and the ell_infinity norm */
+/* in one pass through the vector */
+ work[*m + *n + p] = xsc * scalem;
+ work[*n + p] = xsc * (scalem * sqrt(temp1));
+/* Computing MAX */
+ r__1 = aatmax, r__2 = work[*n + p];
+ aatmax = dmax(r__1,r__2);
+ if (work[*n + p] != 0.f) {
+/* Computing MIN */
+ r__1 = aatmin, r__2 = work[*n + p];
+ aatmin = dmin(r__1,r__2);
+ }
+/* L1950: */
+ }
+ } else {
+ i__1 = *m;
+ for (p = 1; p <= i__1; ++p) {
+ work[*m + *n + p] = scalem * (r__1 = a[p + isamax_(n, &a[p +
+ a_dim1], lda) * a_dim1], dabs(r__1));
+/* Computing MAX */
+ r__1 = aatmax, r__2 = work[*m + *n + p];
+ aatmax = dmax(r__1,r__2);
+/* Computing MIN */
+ r__1 = aatmin, r__2 = work[*m + *n + p];
+ aatmin = dmin(r__1,r__2);
+/* L1904: */
+ }
+ }
+
+ }
+
+/* For square matrix A try to determine whether A^t would be better */
+/* input for the preconditioned Jacobi SVD, with faster convergence. */
+/* The decision is based on an O(N) function of the vector of column */
+/* and row norms of A, based on the Shannon entropy. This should give */
+/* the right choice in most cases when the difference actually matters. */
+/* It may fail and pick the slower converging side. */
+
+ entra = 0.f;
+ entrat = 0.f;
+ if (l2tran) {
+
+ xsc = 0.f;
+ temp1 = 0.f;
+ slassq_(n, &sva[1], &c__1, &xsc, &temp1);
+ temp1 = 1.f / temp1;
+
+ entra = 0.f;
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+/* Computing 2nd power */
+ r__1 = sva[p] / xsc;
+ big1 = r__1 * r__1 * temp1;
+ if (big1 != 0.f) {
+ entra += big1 * log(big1);
+ }
+/* L1113: */
+ }
+ entra = -entra / log((real) (*n));
+
+/* Now, SVA().^2/Trace(A^t * A) is a point in the probability simplex. */
+/* It is derived from the diagonal of A^t * A. Do the same with the */
+/* diagonal of A * A^t, compute the entropy of the corresponding */
+/* probability distribution. Note that A * A^t and A^t * A have the */
+/* same trace. */
+
+ entrat = 0.f;
+ i__1 = *n + *m;
+ for (p = *n + 1; p <= i__1; ++p) {
+/* Computing 2nd power */
+ r__1 = work[p] / xsc;
+ big1 = r__1 * r__1 * temp1;
+ if (big1 != 0.f) {
+ entrat += big1 * log(big1);
+ }
+/* L1114: */
+ }
+ entrat = -entrat / log((real) (*m));
+
+/* Analyze the entropies and decide A or A^t. Smaller entropy */
+/* usually means better input for the algorithm. */
+
+ transp = entrat < entra;
+
+/* If A^t is better than A, transpose A. */
+
+ if (transp) {
+/* In an optimal implementation, this trivial transpose */
+/* should be replaced with faster transpose. */
+ i__1 = *n - 1;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = *n;
+ for (q = p + 1; q <= i__2; ++q) {
+ temp1 = a[q + p * a_dim1];
+ a[q + p * a_dim1] = a[p + q * a_dim1];
+ a[p + q * a_dim1] = temp1;
+/* L1116: */
+ }
+/* L1115: */
+ }
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ work[*m + *n + p] = sva[p];
+ sva[p] = work[*n + p];
+/* L1117: */
+ }
+ temp1 = aapp;
+ aapp = aatmax;
+ aatmax = temp1;
+ temp1 = aaqq;
+ aaqq = aatmin;
+ aatmin = temp1;
+ kill = lsvec;
+ lsvec = rsvec;
+ rsvec = kill;
+
+ rowpiv = TRUE_;
+ }
+
+ }
+/* END IF L2TRAN */
+
+/* Scale the matrix so that its maximal singular value remains less */
+/* than SQRT(BIG) -- the matrix is scaled so that its maximal column */
+/* has Euclidean norm equal to SQRT(BIG/N). The only reason to keep */
+/* SQRT(BIG) instead of BIG is the fact that SGEJSV uses LAPACK and */
+/* BLAS routines that, in some implementations, are not capable of */
+/* working in the full interval [SFMIN,BIG] and that they may provoke */
+/* overflows in the intermediate results. If the singular values spread */
+/* from SFMIN to BIG, then SGESVJ will compute them. So, in that case, */
+/* one should use SGESVJ instead of SGEJSV. */
+
+ big1 = sqrt(big);
+ temp1 = sqrt(big / (real) (*n));
+
+ slascl_("G", &c__0, &c__0, &aapp, &temp1, n, &c__1, &sva[1], n, &ierr);
+ if (aaqq > aapp * sfmin) {
+ aaqq = aaqq / aapp * temp1;
+ } else {
+ aaqq = aaqq * temp1 / aapp;
+ }
+ temp1 *= scalem;
+ slascl_("G", &c__0, &c__0, &aapp, &temp1, m, n, &a[a_offset], lda, &ierr);
+
+/* To undo scaling at the end of this procedure, multiply the */
+/* computed singular values with USCAL2 / USCAL1. */
+
+ uscal1 = temp1;
+ uscal2 = aapp;
+
+ if (l2kill) {
+/* L2KILL enforces computation of nonzero singular values in */
+/* the restricted range of condition number of the initial A, */
+/* sigma_max(A) / sigma_min(A) approx. SQRT(BIG)/SQRT(SFMIN). */
+ xsc = sqrt(sfmin);
+ } else {
+ xsc = small;
+
+/* Now, if the condition number of A is too big, */
+/* sigma_max(A) / sigma_min(A) .GT. SQRT(BIG/N) * EPSLN / SFMIN, */
+/* as a precaution measure, the full SVD is computed using SGESVJ */
+/* with accumulated Jacobi rotations. This provides numerically */
+/* more robust computation, at the cost of slightly increased run */
+/* time. Depending on the concrete implementation of BLAS and LAPACK */
+/* (i.e. how they behave in presence of extreme ill-conditioning) the */
+/* implementor may decide to remove this switch. */
+ if (aaqq < sqrt(sfmin) && lsvec && rsvec) {
+ jracc = TRUE_;
+ }
+
+ }
+ if (aaqq < xsc) {
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ if (sva[p] < xsc) {
+ slaset_("A", m, &c__1, &c_b34, &c_b34, &a[p * a_dim1 + 1],
+ lda);
+ sva[p] = 0.f;
+ }
+/* L700: */
+ }
+ }
+
+/* Preconditioning using QR factorization with pivoting */
+
+ if (rowpiv) {
+/* Optional row permutation (Bjoerck row pivoting): */
+/* A result by Cox and Higham shows that the Bjoerck's */
+/* row pivoting combined with standard column pivoting */
+/* has similar effect as Powell-Reid complete pivoting. */
+/* The ell-infinity norms of A are made nonincreasing. */
+ i__1 = *m - 1;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = *m - p + 1;
+ q = isamax_(&i__2, &work[*m + *n + p], &c__1) + p - 1;
+ iwork[(*n << 1) + p] = q;
+ if (p != q) {
+ temp1 = work[*m + *n + p];
+ work[*m + *n + p] = work[*m + *n + q];
+ work[*m + *n + q] = temp1;
+ }
+/* L1952: */
+ }
+ i__1 = *m - 1;
+ slaswp_(n, &a[a_offset], lda, &c__1, &i__1, &iwork[(*n << 1) + 1], &
+ c__1);
+ }
+
+/* End of the preparation phase (scaling, optional sorting and */
+/* transposing, optional flushing of small columns). */
+
+/* Preconditioning */
+
+/* If the full SVD is needed, the right singular vectors are computed */
+/* from a matrix equation, and for that we need theoretical analysis */
+/* of the Businger-Golub pivoting. So we use SGEQP3 as the first RR QRF. */
+/* In all other cases the first RR QRF can be chosen by other criteria */
+/* (eg speed by replacing global with restricted window pivoting, such */
+/* as in SGEQPX from TOMS # 782). Good results will be obtained using */
+/* SGEQPX with properly (!) chosen numerical parameters. */
+/* Any improvement of SGEQP3 improves overal performance of SGEJSV. */
+
+/* A * P1 = Q1 * [ R1^t 0]^t: */
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+/* .. all columns are free columns */
+ iwork[p] = 0;
+/* L1963: */
+ }
+ i__1 = *lwork - *n;
+ sgeqp3_(m, n, &a[a_offset], lda, &iwork[1], &work[1], &work[*n + 1], &
+ i__1, &ierr);
+
+/* The upper triangular matrix R1 from the first QRF is inspected for */
+/* rank deficiency and possibilities for deflation, or possible */
+/* ill-conditioning. Depending on the user specified flag L2RANK, */
+/* the procedure explores possibilities to reduce the numerical */
+/* rank by inspecting the computed upper triangular factor. If */
+/* L2RANK or L2ABER are up, then SGEJSV will compute the SVD of */
+/* A + dA, where ||dA|| <= f(M,N)*EPSLN. */
+
+ nr = 1;
+ if (l2aber) {
+/* Standard absolute error bound suffices. All sigma_i with */
+/* sigma_i < N*EPSLN*||A|| are flushed to zero. This is an */
+/* agressive enforcement of lower numerical rank by introducing a */
+/* backward error of the order of N*EPSLN*||A||. */
+ temp1 = sqrt((real) (*n)) * epsln;
+ i__1 = *n;
+ for (p = 2; p <= i__1; ++p) {
+ if ((r__2 = a[p + p * a_dim1], dabs(r__2)) >= temp1 * (r__1 = a[
+ a_dim1 + 1], dabs(r__1))) {
+ ++nr;
+ } else {
+ goto L3002;
+ }
+/* L3001: */
+ }
+L3002:
+ ;
+ } else if (l2rank) {
+/* .. similarly as above, only slightly more gentle (less agressive). */
+/* Sudden drop on the diagonal of R1 is used as the criterion for */
+/* close-to-rank-defficient. */
+ temp1 = sqrt(sfmin);
+ i__1 = *n;
+ for (p = 2; p <= i__1; ++p) {
+ if ((r__2 = a[p + p * a_dim1], dabs(r__2)) < epsln * (r__1 = a[p
+ - 1 + (p - 1) * a_dim1], dabs(r__1)) || (r__3 = a[p + p *
+ a_dim1], dabs(r__3)) < small || l2kill && (r__4 = a[p + p
+ * a_dim1], dabs(r__4)) < temp1) {
+ goto L3402;
+ }
+ ++nr;
+/* L3401: */
+ }
+L3402:
+
+ ;
+ } else {
+/* The goal is high relative accuracy. However, if the matrix */
+/* has high scaled condition number the relative accuracy is in */
+/* general not feasible. Later on, a condition number estimator */
+/* will be deployed to estimate the scaled condition number. */
+/* Here we just remove the underflowed part of the triangular */
+/* factor. This prevents the situation in which the code is */
+/* working hard to get the accuracy not warranted by the data. */
+ temp1 = sqrt(sfmin);
+ i__1 = *n;
+ for (p = 2; p <= i__1; ++p) {
+ if ((r__1 = a[p + p * a_dim1], dabs(r__1)) < small || l2kill && (
+ r__2 = a[p + p * a_dim1], dabs(r__2)) < temp1) {
+ goto L3302;
+ }
+ ++nr;
+/* L3301: */
+ }
+L3302:
+
+ ;
+ }
+
+ almort = FALSE_;
+ if (nr == *n) {
+ maxprj = 1.f;
+ i__1 = *n;
+ for (p = 2; p <= i__1; ++p) {
+ temp1 = (r__1 = a[p + p * a_dim1], dabs(r__1)) / sva[iwork[p]];
+ maxprj = dmin(maxprj,temp1);
+/* L3051: */
+ }
+/* Computing 2nd power */
+ r__1 = maxprj;
+ if (r__1 * r__1 >= 1.f - (real) (*n) * epsln) {
+ almort = TRUE_;
+ }
+ }
+
+
+ sconda = -1.f;
+ condr1 = -1.f;
+ condr2 = -1.f;
+
+ if (errest) {
+ if (*n == nr) {
+ if (rsvec) {
+/* .. V is available as workspace */
+ slacpy_("U", n, n, &a[a_offset], lda, &v[v_offset], ldv);
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ temp1 = sva[iwork[p]];
+ r__1 = 1.f / temp1;
+ sscal_(&p, &r__1, &v[p * v_dim1 + 1], &c__1);
+/* L3053: */
+ }
+ spocon_("U", n, &v[v_offset], ldv, &c_b35, &temp1, &work[*n +
+ 1], &iwork[(*n << 1) + *m + 1], &ierr);
+ } else if (lsvec) {
+/* .. U is available as workspace */
+ slacpy_("U", n, n, &a[a_offset], lda, &u[u_offset], ldu);
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ temp1 = sva[iwork[p]];
+ r__1 = 1.f / temp1;
+ sscal_(&p, &r__1, &u[p * u_dim1 + 1], &c__1);
+/* L3054: */
+ }
+ spocon_("U", n, &u[u_offset], ldu, &c_b35, &temp1, &work[*n +
+ 1], &iwork[(*n << 1) + *m + 1], &ierr);
+ } else {
+ slacpy_("U", n, n, &a[a_offset], lda, &work[*n + 1], n);
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ temp1 = sva[iwork[p]];
+ r__1 = 1.f / temp1;
+ sscal_(&p, &r__1, &work[*n + (p - 1) * *n + 1], &c__1);
+/* L3052: */
+ }
+/* .. the columns of R are scaled to have unit Euclidean lengths. */
+ spocon_("U", n, &work[*n + 1], n, &c_b35, &temp1, &work[*n + *
+ n * *n + 1], &iwork[(*n << 1) + *m + 1], &ierr);
+ }
+ sconda = 1.f / sqrt(temp1);
+/* SCONDA is an estimate of SQRT(||(R^t * R)^(-1)||_1). */
+/* N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA */
+ } else {
+ sconda = -1.f;
+ }
+ }
+
+ l2pert = l2pert && (r__1 = a[a_dim1 + 1] / a[nr + nr * a_dim1], dabs(r__1)
+ ) > sqrt(big1);
+/* If there is no violent scaling, artificial perturbation is not needed. */
+
+/* Phase 3: */
+
+ if (! (rsvec || lsvec)) {
+
+/* Singular Values only */
+
+/* .. transpose A(1:NR,1:N) */
+/* Computing MIN */
+ i__2 = *n - 1;
+ i__1 = min(i__2,nr);
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = *n - p;
+ scopy_(&i__2, &a[p + (p + 1) * a_dim1], lda, &a[p + 1 + p *
+ a_dim1], &c__1);
+/* L1946: */
+ }
+
+/* The following two DO-loops introduce small relative perturbation */
+/* into the strict upper triangle of the lower triangular matrix. */
+/* Small entries below the main diagonal are also changed. */
+/* This modification is useful if the computing environment does not */
+/* provide/allow FLUSH TO ZERO underflow, for it prevents many */
+/* annoying denormalized numbers in case of strongly scaled matrices. */
+/* The perturbation is structured so that it does not introduce any */
+/* new perturbation of the singular values, and it does not destroy */
+/* the job done by the preconditioner. */
+/* The licence for this perturbation is in the variable L2PERT, which */
+/* should be .FALSE. if FLUSH TO ZERO underflow is active. */
+
+ if (! almort) {
+
+ if (l2pert) {
+/* XSC = SQRT(SMALL) */
+ xsc = epsln / (real) (*n);
+ i__1 = nr;
+ for (q = 1; q <= i__1; ++q) {
+ temp1 = xsc * (r__1 = a[q + q * a_dim1], dabs(r__1));
+ i__2 = *n;
+ for (p = 1; p <= i__2; ++p) {
+ if (p > q && (r__1 = a[p + q * a_dim1], dabs(r__1)) <=
+ temp1 || p < q) {
+ a[p + q * a_dim1] = r_sign(&temp1, &a[p + q *
+ a_dim1]);
+ }
+/* L4949: */
+ }
+/* L4947: */
+ }
+ } else {
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ slaset_("U", &i__1, &i__2, &c_b34, &c_b34, &a[(a_dim1 << 1) +
+ 1], lda);
+ }
+
+/* .. second preconditioning using the QR factorization */
+
+ i__1 = *lwork - *n;
+ sgeqrf_(n, &nr, &a[a_offset], lda, &work[1], &work[*n + 1], &i__1,
+ &ierr);
+
+/* .. and transpose upper to lower triangular */
+ i__1 = nr - 1;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = nr - p;
+ scopy_(&i__2, &a[p + (p + 1) * a_dim1], lda, &a[p + 1 + p *
+ a_dim1], &c__1);
+/* L1948: */
+ }
+
+ }
+
+/* Row-cyclic Jacobi SVD algorithm with column pivoting */
+
+/* .. again some perturbation (a "background noise") is added */
+/* to drown denormals */
+ if (l2pert) {
+/* XSC = SQRT(SMALL) */
+ xsc = epsln / (real) (*n);
+ i__1 = nr;
+ for (q = 1; q <= i__1; ++q) {
+ temp1 = xsc * (r__1 = a[q + q * a_dim1], dabs(r__1));
+ i__2 = nr;
+ for (p = 1; p <= i__2; ++p) {
+ if (p > q && (r__1 = a[p + q * a_dim1], dabs(r__1)) <=
+ temp1 || p < q) {
+ a[p + q * a_dim1] = r_sign(&temp1, &a[p + q * a_dim1])
+ ;
+ }
+/* L1949: */
+ }
+/* L1947: */
+ }
+ } else {
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ slaset_("U", &i__1, &i__2, &c_b34, &c_b34, &a[(a_dim1 << 1) + 1],
+ lda);
+ }
+
+/* .. and one-sided Jacobi rotations are started on a lower */
+/* triangular matrix (plus perturbation which is ignored in */
+/* the part which destroys triangular form (confusing?!)) */
+
+ sgesvj_("L", "NoU", "NoV", &nr, &nr, &a[a_offset], lda, &sva[1], n, &
+ v[v_offset], ldv, &work[1], lwork, info);
+
+ scalem = work[1];
+ numrank = i_nint(&work[2]);
+
+
+ } else if (rsvec && ! lsvec) {
+
+/* -> Singular Values and Right Singular Vectors <- */
+
+ if (almort) {
+
+/* .. in this case NR equals N */
+ i__1 = nr;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = *n - p + 1;
+ scopy_(&i__2, &a[p + p * a_dim1], lda, &v[p + p * v_dim1], &
+ c__1);
+/* L1998: */
+ }
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ slaset_("Upper", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 << 1) +
+ 1], ldv);
+
+ sgesvj_("L", "U", "N", n, &nr, &v[v_offset], ldv, &sva[1], &nr, &
+ a[a_offset], lda, &work[1], lwork, info);
+ scalem = work[1];
+ numrank = i_nint(&work[2]);
+ } else {
+
+/* .. two more QR factorizations ( one QRF is not enough, two require */
+/* accumulated product of Jacobi rotations, three are perfect ) */
+
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ slaset_("Lower", &i__1, &i__2, &c_b34, &c_b34, &a[a_dim1 + 2],
+ lda);
+ i__1 = *lwork - *n;
+ sgelqf_(&nr, n, &a[a_offset], lda, &work[1], &work[*n + 1], &i__1,
+ &ierr);
+ slacpy_("Lower", &nr, &nr, &a[a_offset], lda, &v[v_offset], ldv);
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ slaset_("Upper", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 << 1) +
+ 1], ldv);
+ i__1 = *lwork - (*n << 1);
+ sgeqrf_(&nr, &nr, &v[v_offset], ldv, &work[*n + 1], &work[(*n <<
+ 1) + 1], &i__1, &ierr);
+ i__1 = nr;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = nr - p + 1;
+ scopy_(&i__2, &v[p + p * v_dim1], ldv, &v[p + p * v_dim1], &
+ c__1);
+/* L8998: */
+ }
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ slaset_("Upper", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 << 1) +
+ 1], ldv);
+
+ sgesvj_("Lower", "U", "N", &nr, &nr, &v[v_offset], ldv, &sva[1], &
+ nr, &u[u_offset], ldu, &work[*n + 1], lwork, info);
+ scalem = work[*n + 1];
+ numrank = i_nint(&work[*n + 2]);
+ if (nr < *n) {
+ i__1 = *n - nr;
+ slaset_("A", &i__1, &nr, &c_b34, &c_b34, &v[nr + 1 + v_dim1],
+ ldv);
+ i__1 = *n - nr;
+ slaset_("A", &nr, &i__1, &c_b34, &c_b34, &v[(nr + 1) * v_dim1
+ + 1], ldv);
+ i__1 = *n - nr;
+ i__2 = *n - nr;
+ slaset_("A", &i__1, &i__2, &c_b34, &c_b35, &v[nr + 1 + (nr +
+ 1) * v_dim1], ldv);
+ }
+
+ i__1 = *lwork - *n;
+ sormlq_("Left", "Transpose", n, n, &nr, &a[a_offset], lda, &work[
+ 1], &v[v_offset], ldv, &work[*n + 1], &i__1, &ierr);
+
+ }
+
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ scopy_(n, &v[p + v_dim1], ldv, &a[iwork[p] + a_dim1], lda);
+/* L8991: */
+ }
+ slacpy_("All", n, n, &a[a_offset], lda, &v[v_offset], ldv);
+
+ if (transp) {
+ slacpy_("All", n, n, &v[v_offset], ldv, &u[u_offset], ldu);
+ }
+
+ } else if (lsvec && ! rsvec) {
+
+/* -#- Singular Values and Left Singular Vectors -#- */
+
+/* .. second preconditioning step to avoid need to accumulate */
+/* Jacobi rotations in the Jacobi iterations. */
+ i__1 = nr;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = *n - p + 1;
+ scopy_(&i__2, &a[p + p * a_dim1], lda, &u[p + p * u_dim1], &c__1);
+/* L1965: */
+ }
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ slaset_("Upper", &i__1, &i__2, &c_b34, &c_b34, &u[(u_dim1 << 1) + 1],
+ ldu);
+
+ i__1 = *lwork - (*n << 1);
+ sgeqrf_(n, &nr, &u[u_offset], ldu, &work[*n + 1], &work[(*n << 1) + 1]
+, &i__1, &ierr);
+
+ i__1 = nr - 1;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = nr - p;
+ scopy_(&i__2, &u[p + (p + 1) * u_dim1], ldu, &u[p + 1 + p *
+ u_dim1], &c__1);
+/* L1967: */
+ }
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ slaset_("Upper", &i__1, &i__2, &c_b34, &c_b34, &u[(u_dim1 << 1) + 1],
+ ldu);
+
+ i__1 = *lwork - *n;
+ sgesvj_("Lower", "U", "N", &nr, &nr, &u[u_offset], ldu, &sva[1], &nr,
+ &a[a_offset], lda, &work[*n + 1], &i__1, info);
+ scalem = work[*n + 1];
+ numrank = i_nint(&work[*n + 2]);
+
+ if (nr < *m) {
+ i__1 = *m - nr;
+ slaset_("A", &i__1, &nr, &c_b34, &c_b34, &u[nr + 1 + u_dim1], ldu);
+ if (nr < n1) {
+ i__1 = n1 - nr;
+ slaset_("A", &nr, &i__1, &c_b34, &c_b34, &u[(nr + 1) * u_dim1
+ + 1], ldu);
+ i__1 = *m - nr;
+ i__2 = n1 - nr;
+ slaset_("A", &i__1, &i__2, &c_b34, &c_b35, &u[nr + 1 + (nr +
+ 1) * u_dim1], ldu);
+ }
+ }
+
+ i__1 = *lwork - *n;
+ sormqr_("Left", "No Tr", m, &n1, n, &a[a_offset], lda, &work[1], &u[
+ u_offset], ldu, &work[*n + 1], &i__1, &ierr);
+
+ if (rowpiv) {
+ i__1 = *m - 1;
+ slaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[(*n << 1) +
+ 1], &c_n1);
+ }
+
+ i__1 = n1;
+ for (p = 1; p <= i__1; ++p) {
+ xsc = 1.f / snrm2_(m, &u[p * u_dim1 + 1], &c__1);
+ sscal_(m, &xsc, &u[p * u_dim1 + 1], &c__1);
+/* L1974: */
+ }
+
+ if (transp) {
+ slacpy_("All", n, n, &u[u_offset], ldu, &v[v_offset], ldv);
+ }
+
+ } else {
+
+/* -#- Full SVD -#- */
+
+ if (! jracc) {
+
+ if (! almort) {
+
+/* Second Preconditioning Step (QRF [with pivoting]) */
+/* Note that the composition of TRANSPOSE, QRF and TRANSPOSE is */
+/* equivalent to an LQF CALL. Since in many libraries the QRF */
+/* seems to be better optimized than the LQF, we do explicit */
+/* transpose and use the QRF. This is subject to changes in an */
+/* optimized implementation of SGEJSV. */
+
+ i__1 = nr;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = *n - p + 1;
+ scopy_(&i__2, &a[p + p * a_dim1], lda, &v[p + p * v_dim1],
+ &c__1);
+/* L1968: */
+ }
+
+/* .. the following two loops perturb small entries to avoid */
+/* denormals in the second QR factorization, where they are */
+/* as good as zeros. This is done to avoid painfully slow */
+/* computation with denormals. The relative size of the perturbation */
+/* is a parameter that can be changed by the implementer. */
+/* This perturbation device will be obsolete on machines with */
+/* properly implemented arithmetic. */
+/* To switch it off, set L2PERT=.FALSE. To remove it from the */
+/* code, remove the action under L2PERT=.TRUE., leave the ELSE part. */
+/* The following two loops should be blocked and fused with the */
+/* transposed copy above. */
+
+ if (l2pert) {
+ xsc = sqrt(small);
+ i__1 = nr;
+ for (q = 1; q <= i__1; ++q) {
+ temp1 = xsc * (r__1 = v[q + q * v_dim1], dabs(r__1));
+ i__2 = *n;
+ for (p = 1; p <= i__2; ++p) {
+ if (p > q && (r__1 = v[p + q * v_dim1], dabs(r__1)
+ ) <= temp1 || p < q) {
+ v[p + q * v_dim1] = r_sign(&temp1, &v[p + q *
+ v_dim1]);
+ }
+ if (p < q) {
+ v[p + q * v_dim1] = -v[p + q * v_dim1];
+ }
+/* L2968: */
+ }
+/* L2969: */
+ }
+ } else {
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ slaset_("U", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 <<
+ 1) + 1], ldv);
+ }
+
+/* Estimate the row scaled condition number of R1 */
+/* (If R1 is rectangular, N > NR, then the condition number */
+/* of the leading NR x NR submatrix is estimated.) */
+
+ slacpy_("L", &nr, &nr, &v[v_offset], ldv, &work[(*n << 1) + 1]
+, &nr);
+ i__1 = nr;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = nr - p + 1;
+ temp1 = snrm2_(&i__2, &work[(*n << 1) + (p - 1) * nr + p],
+ &c__1);
+ i__2 = nr - p + 1;
+ r__1 = 1.f / temp1;
+ sscal_(&i__2, &r__1, &work[(*n << 1) + (p - 1) * nr + p],
+ &c__1);
+/* L3950: */
+ }
+ spocon_("Lower", &nr, &work[(*n << 1) + 1], &nr, &c_b35, &
+ temp1, &work[(*n << 1) + nr * nr + 1], &iwork[*m + (*
+ n << 1) + 1], &ierr);
+ condr1 = 1.f / sqrt(temp1);
+/* .. here need a second oppinion on the condition number */
+/* .. then assume worst case scenario */
+/* R1 is OK for inverse <=> CONDR1 .LT. FLOAT(N) */
+/* more conservative <=> CONDR1 .LT. SQRT(FLOAT(N)) */
+
+ cond_ok__ = sqrt((real) nr);
+/* [TP] COND_OK is a tuning parameter. */
+ if (condr1 < cond_ok__) {
+/* .. the second QRF without pivoting. Note: in an optimized */
+/* implementation, this QRF should be implemented as the QRF */
+/* of a lower triangular matrix. */
+/* R1^t = Q2 * R2 */
+ i__1 = *lwork - (*n << 1);
+ sgeqrf_(n, &nr, &v[v_offset], ldv, &work[*n + 1], &work[(*
+ n << 1) + 1], &i__1, &ierr);
+
+ if (l2pert) {
+ xsc = sqrt(small) / epsln;
+ i__1 = nr;
+ for (p = 2; p <= i__1; ++p) {
+ i__2 = p - 1;
+ for (q = 1; q <= i__2; ++q) {
+/* Computing MIN */
+ r__3 = (r__1 = v[p + p * v_dim1], dabs(r__1)),
+ r__4 = (r__2 = v[q + q * v_dim1],
+ dabs(r__2));
+ temp1 = xsc * dmin(r__3,r__4);
+ if ((r__1 = v[q + p * v_dim1], dabs(r__1)) <=
+ temp1) {
+ v[q + p * v_dim1] = r_sign(&temp1, &v[q +
+ p * v_dim1]);
+ }
+/* L3958: */
+ }
+/* L3959: */
+ }
+ }
+
+ if (nr != *n) {
+ slacpy_("A", n, &nr, &v[v_offset], ldv, &work[(*n <<
+ 1) + 1], n);
+ }
+/* .. save ... */
+
+/* .. this transposed copy should be better than naive */
+ i__1 = nr - 1;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = nr - p;
+ scopy_(&i__2, &v[p + (p + 1) * v_dim1], ldv, &v[p + 1
+ + p * v_dim1], &c__1);
+/* L1969: */
+ }
+
+ condr2 = condr1;
+
+ } else {
+
+/* .. ill-conditioned case: second QRF with pivoting */
+/* Note that windowed pivoting would be equaly good */
+/* numerically, and more run-time efficient. So, in */
+/* an optimal implementation, the next call to SGEQP3 */
+/* should be replaced with eg. CALL SGEQPX (ACM TOMS #782) */
+/* with properly (carefully) chosen parameters. */
+
+/* R1^t * P2 = Q2 * R2 */
+ i__1 = nr;
+ for (p = 1; p <= i__1; ++p) {
+ iwork[*n + p] = 0;
+/* L3003: */
+ }
+ i__1 = *lwork - (*n << 1);
+ sgeqp3_(n, &nr, &v[v_offset], ldv, &iwork[*n + 1], &work[*
+ n + 1], &work[(*n << 1) + 1], &i__1, &ierr);
+/* * CALL SGEQRF( N, NR, V, LDV, WORK(N+1), WORK(2*N+1), */
+/* * & LWORK-2*N, IERR ) */
+ if (l2pert) {
+ xsc = sqrt(small);
+ i__1 = nr;
+ for (p = 2; p <= i__1; ++p) {
+ i__2 = p - 1;
+ for (q = 1; q <= i__2; ++q) {
+/* Computing MIN */
+ r__3 = (r__1 = v[p + p * v_dim1], dabs(r__1)),
+ r__4 = (r__2 = v[q + q * v_dim1],
+ dabs(r__2));
+ temp1 = xsc * dmin(r__3,r__4);
+ if ((r__1 = v[q + p * v_dim1], dabs(r__1)) <=
+ temp1) {
+ v[q + p * v_dim1] = r_sign(&temp1, &v[q +
+ p * v_dim1]);
+ }
+/* L3968: */
+ }
+/* L3969: */
+ }
+ }
+
+ slacpy_("A", n, &nr, &v[v_offset], ldv, &work[(*n << 1) +
+ 1], n);
+
+ if (l2pert) {
+ xsc = sqrt(small);
+ i__1 = nr;
+ for (p = 2; p <= i__1; ++p) {
+ i__2 = p - 1;
+ for (q = 1; q <= i__2; ++q) {
+/* Computing MIN */
+ r__3 = (r__1 = v[p + p * v_dim1], dabs(r__1)),
+ r__4 = (r__2 = v[q + q * v_dim1],
+ dabs(r__2));
+ temp1 = xsc * dmin(r__3,r__4);
+ v[p + q * v_dim1] = -r_sign(&temp1, &v[q + p *
+ v_dim1]);
+/* L8971: */
+ }
+/* L8970: */
+ }
+ } else {
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ slaset_("L", &i__1, &i__2, &c_b34, &c_b34, &v[v_dim1
+ + 2], ldv);
+ }
+/* Now, compute R2 = L3 * Q3, the LQ factorization. */
+ i__1 = *lwork - (*n << 1) - *n * nr - nr;
+ sgelqf_(&nr, &nr, &v[v_offset], ldv, &work[(*n << 1) + *n
+ * nr + 1], &work[(*n << 1) + *n * nr + nr + 1], &
+ i__1, &ierr);
+/* .. and estimate the condition number */
+ slacpy_("L", &nr, &nr, &v[v_offset], ldv, &work[(*n << 1)
+ + *n * nr + nr + 1], &nr);
+ i__1 = nr;
+ for (p = 1; p <= i__1; ++p) {
+ temp1 = snrm2_(&p, &work[(*n << 1) + *n * nr + nr + p]
+, &nr);
+ r__1 = 1.f / temp1;
+ sscal_(&p, &r__1, &work[(*n << 1) + *n * nr + nr + p],
+ &nr);
+/* L4950: */
+ }
+ spocon_("L", &nr, &work[(*n << 1) + *n * nr + nr + 1], &
+ nr, &c_b35, &temp1, &work[(*n << 1) + *n * nr +
+ nr + nr * nr + 1], &iwork[*m + (*n << 1) + 1], &
+ ierr);
+ condr2 = 1.f / sqrt(temp1);
+
+ if (condr2 >= cond_ok__) {
+/* .. save the Householder vectors used for Q3 */
+/* (this overwrittes the copy of R2, as it will not be */
+/* needed in this branch, but it does not overwritte the */
+/* Huseholder vectors of Q2.). */
+ slacpy_("U", &nr, &nr, &v[v_offset], ldv, &work[(*n <<
+ 1) + 1], n);
+/* .. and the rest of the information on Q3 is in */
+/* WORK(2*N+N*NR+1:2*N+N*NR+N) */
+ }
+
+ }
+
+ if (l2pert) {
+ xsc = sqrt(small);
+ i__1 = nr;
+ for (q = 2; q <= i__1; ++q) {
+ temp1 = xsc * v[q + q * v_dim1];
+ i__2 = q - 1;
+ for (p = 1; p <= i__2; ++p) {
+/* V(p,q) = - SIGN( TEMP1, V(q,p) ) */
+ v[p + q * v_dim1] = -r_sign(&temp1, &v[p + q *
+ v_dim1]);
+/* L4969: */
+ }
+/* L4968: */
+ }
+ } else {
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ slaset_("U", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 <<
+ 1) + 1], ldv);
+ }
+
+/* Second preconditioning finished; continue with Jacobi SVD */
+/* The input matrix is lower trinagular. */
+
+/* Recover the right singular vectors as solution of a well */
+/* conditioned triangular matrix equation. */
+
+ if (condr1 < cond_ok__) {
+
+ i__1 = *lwork - (*n << 1) - *n * nr - nr;
+ sgesvj_("L", "U", "N", &nr, &nr, &v[v_offset], ldv, &sva[
+ 1], &nr, &u[u_offset], ldu, &work[(*n << 1) + *n *
+ nr + nr + 1], &i__1, info);
+ scalem = work[(*n << 1) + *n * nr + nr + 1];
+ numrank = i_nint(&work[(*n << 1) + *n * nr + nr + 2]);
+ i__1 = nr;
+ for (p = 1; p <= i__1; ++p) {
+ scopy_(&nr, &v[p * v_dim1 + 1], &c__1, &u[p * u_dim1
+ + 1], &c__1);
+ sscal_(&nr, &sva[p], &v[p * v_dim1 + 1], &c__1);
+/* L3970: */
+ }
+/* .. pick the right matrix equation and solve it */
+
+ if (nr == *n) {
+/* :)) .. best case, R1 is inverted. The solution of this matrix */
+/* equation is Q2*V2 = the product of the Jacobi rotations */
+/* used in SGESVJ, premultiplied with the orthogonal matrix */
+/* from the second QR factorization. */
+ strsm_("L", "U", "N", "N", &nr, &nr, &c_b35, &a[
+ a_offset], lda, &v[v_offset], ldv);
+ } else {
+/* .. R1 is well conditioned, but non-square. Transpose(R2) */
+/* is inverted to get the product of the Jacobi rotations */
+/* used in SGESVJ. The Q-factor from the second QR */
+/* factorization is then built in explicitly. */
+ strsm_("L", "U", "T", "N", &nr, &nr, &c_b35, &work[(*
+ n << 1) + 1], n, &v[v_offset], ldv);
+ if (nr < *n) {
+ i__1 = *n - nr;
+ slaset_("A", &i__1, &nr, &c_b34, &c_b34, &v[nr +
+ 1 + v_dim1], ldv);
+ i__1 = *n - nr;
+ slaset_("A", &nr, &i__1, &c_b34, &c_b34, &v[(nr +
+ 1) * v_dim1 + 1], ldv);
+ i__1 = *n - nr;
+ i__2 = *n - nr;
+ slaset_("A", &i__1, &i__2, &c_b34, &c_b35, &v[nr
+ + 1 + (nr + 1) * v_dim1], ldv);
+ }
+ i__1 = *lwork - (*n << 1) - *n * nr - nr;
+ sormqr_("L", "N", n, n, &nr, &work[(*n << 1) + 1], n,
+ &work[*n + 1], &v[v_offset], ldv, &work[(*n <<
+ 1) + *n * nr + nr + 1], &i__1, &ierr);
+ }
+
+ } else if (condr2 < cond_ok__) {
+
+/* :) .. the input matrix A is very likely a relative of */
+/* the Kahan matrix :) */
+/* The matrix R2 is inverted. The solution of the matrix equation */
+/* is Q3^T*V3 = the product of the Jacobi rotations (appplied to */
+/* the lower triangular L3 from the LQ factorization of */
+/* R2=L3*Q3), pre-multiplied with the transposed Q3. */
+ i__1 = *lwork - (*n << 1) - *n * nr - nr;
+ sgesvj_("L", "U", "N", &nr, &nr, &v[v_offset], ldv, &sva[
+ 1], &nr, &u[u_offset], ldu, &work[(*n << 1) + *n *
+ nr + nr + 1], &i__1, info);
+ scalem = work[(*n << 1) + *n * nr + nr + 1];
+ numrank = i_nint(&work[(*n << 1) + *n * nr + nr + 2]);
+ i__1 = nr;
+ for (p = 1; p <= i__1; ++p) {
+ scopy_(&nr, &v[p * v_dim1 + 1], &c__1, &u[p * u_dim1
+ + 1], &c__1);
+ sscal_(&nr, &sva[p], &u[p * u_dim1 + 1], &c__1);
+/* L3870: */
+ }
+ strsm_("L", "U", "N", "N", &nr, &nr, &c_b35, &work[(*n <<
+ 1) + 1], n, &u[u_offset], ldu);
+/* .. apply the permutation from the second QR factorization */
+ i__1 = nr;
+ for (q = 1; q <= i__1; ++q) {
+ i__2 = nr;
+ for (p = 1; p <= i__2; ++p) {
+ work[(*n << 1) + *n * nr + nr + iwork[*n + p]] =
+ u[p + q * u_dim1];
+/* L872: */
+ }
+ i__2 = nr;
+ for (p = 1; p <= i__2; ++p) {
+ u[p + q * u_dim1] = work[(*n << 1) + *n * nr + nr
+ + p];
+/* L874: */
+ }
+/* L873: */
+ }
+ if (nr < *n) {
+ i__1 = *n - nr;
+ slaset_("A", &i__1, &nr, &c_b34, &c_b34, &v[nr + 1 +
+ v_dim1], ldv);
+ i__1 = *n - nr;
+ slaset_("A", &nr, &i__1, &c_b34, &c_b34, &v[(nr + 1) *
+ v_dim1 + 1], ldv);
+ i__1 = *n - nr;
+ i__2 = *n - nr;
+ slaset_("A", &i__1, &i__2, &c_b34, &c_b35, &v[nr + 1
+ + (nr + 1) * v_dim1], ldv);
+ }
+ i__1 = *lwork - (*n << 1) - *n * nr - nr;
+ sormqr_("L", "N", n, n, &nr, &work[(*n << 1) + 1], n, &
+ work[*n + 1], &v[v_offset], ldv, &work[(*n << 1)
+ + *n * nr + nr + 1], &i__1, &ierr);
+ } else {
+/* Last line of defense. */
+/* #:( This is a rather pathological case: no scaled condition */
+/* improvement after two pivoted QR factorizations. Other */
+/* possibility is that the rank revealing QR factorization */
+/* or the condition estimator has failed, or the COND_OK */
+/* is set very close to ONE (which is unnecessary). Normally, */
+/* this branch should never be executed, but in rare cases of */
+/* failure of the RRQR or condition estimator, the last line of */
+/* defense ensures that SGEJSV completes the task. */
+/* Compute the full SVD of L3 using SGESVJ with explicit */
+/* accumulation of Jacobi rotations. */
+ i__1 = *lwork - (*n << 1) - *n * nr - nr;
+ sgesvj_("L", "U", "V", &nr, &nr, &v[v_offset], ldv, &sva[
+ 1], &nr, &u[u_offset], ldu, &work[(*n << 1) + *n *
+ nr + nr + 1], &i__1, info);
+ scalem = work[(*n << 1) + *n * nr + nr + 1];
+ numrank = i_nint(&work[(*n << 1) + *n * nr + nr + 2]);
+ if (nr < *n) {
+ i__1 = *n - nr;
+ slaset_("A", &i__1, &nr, &c_b34, &c_b34, &v[nr + 1 +
+ v_dim1], ldv);
+ i__1 = *n - nr;
+ slaset_("A", &nr, &i__1, &c_b34, &c_b34, &v[(nr + 1) *
+ v_dim1 + 1], ldv);
+ i__1 = *n - nr;
+ i__2 = *n - nr;
+ slaset_("A", &i__1, &i__2, &c_b34, &c_b35, &v[nr + 1
+ + (nr + 1) * v_dim1], ldv);
+ }
+ i__1 = *lwork - (*n << 1) - *n * nr - nr;
+ sormqr_("L", "N", n, n, &nr, &work[(*n << 1) + 1], n, &
+ work[*n + 1], &v[v_offset], ldv, &work[(*n << 1)
+ + *n * nr + nr + 1], &i__1, &ierr);
+
+ i__1 = *lwork - (*n << 1) - *n * nr - nr;
+ sormlq_("L", "T", &nr, &nr, &nr, &work[(*n << 1) + 1], n,
+ &work[(*n << 1) + *n * nr + 1], &u[u_offset], ldu,
+ &work[(*n << 1) + *n * nr + nr + 1], &i__1, &
+ ierr);
+ i__1 = nr;
+ for (q = 1; q <= i__1; ++q) {
+ i__2 = nr;
+ for (p = 1; p <= i__2; ++p) {
+ work[(*n << 1) + *n * nr + nr + iwork[*n + p]] =
+ u[p + q * u_dim1];
+/* L772: */
+ }
+ i__2 = nr;
+ for (p = 1; p <= i__2; ++p) {
+ u[p + q * u_dim1] = work[(*n << 1) + *n * nr + nr
+ + p];
+/* L774: */
+ }
+/* L773: */
+ }
+
+ }
+
+/* Permute the rows of V using the (column) permutation from the */
+/* first QRF. Also, scale the columns to make them unit in */
+/* Euclidean norm. This applies to all cases. */
+
+ temp1 = sqrt((real) (*n)) * epsln;
+ i__1 = *n;
+ for (q = 1; q <= i__1; ++q) {
+ i__2 = *n;
+ for (p = 1; p <= i__2; ++p) {
+ work[(*n << 1) + *n * nr + nr + iwork[p]] = v[p + q *
+ v_dim1];
+/* L972: */
+ }
+ i__2 = *n;
+ for (p = 1; p <= i__2; ++p) {
+ v[p + q * v_dim1] = work[(*n << 1) + *n * nr + nr + p]
+ ;
+/* L973: */
+ }
+ xsc = 1.f / snrm2_(n, &v[q * v_dim1 + 1], &c__1);
+ if (xsc < 1.f - temp1 || xsc > temp1 + 1.f) {
+ sscal_(n, &xsc, &v[q * v_dim1 + 1], &c__1);
+ }
+/* L1972: */
+ }
+/* At this moment, V contains the right singular vectors of A. */
+/* Next, assemble the left singular vector matrix U (M x N). */
+ if (nr < *m) {
+ i__1 = *m - nr;
+ slaset_("A", &i__1, &nr, &c_b34, &c_b34, &u[nr + 1 +
+ u_dim1], ldu);
+ if (nr < n1) {
+ i__1 = n1 - nr;
+ slaset_("A", &nr, &i__1, &c_b34, &c_b34, &u[(nr + 1) *
+ u_dim1 + 1], ldu);
+ i__1 = *m - nr;
+ i__2 = n1 - nr;
+ slaset_("A", &i__1, &i__2, &c_b34, &c_b35, &u[nr + 1
+ + (nr + 1) * u_dim1], ldu);
+ }
+ }
+
+/* The Q matrix from the first QRF is built into the left singular */
+/* matrix U. This applies to all cases. */
+
+ i__1 = *lwork - *n;
+ sormqr_("Left", "No_Tr", m, &n1, n, &a[a_offset], lda, &work[
+ 1], &u[u_offset], ldu, &work[*n + 1], &i__1, &ierr);
+/* The columns of U are normalized. The cost is O(M*N) flops. */
+ temp1 = sqrt((real) (*m)) * epsln;
+ i__1 = nr;
+ for (p = 1; p <= i__1; ++p) {
+ xsc = 1.f / snrm2_(m, &u[p * u_dim1 + 1], &c__1);
+ if (xsc < 1.f - temp1 || xsc > temp1 + 1.f) {
+ sscal_(m, &xsc, &u[p * u_dim1 + 1], &c__1);
+ }
+/* L1973: */
+ }
+
+/* If the initial QRF is computed with row pivoting, the left */
+/* singular vectors must be adjusted. */
+
+ if (rowpiv) {
+ i__1 = *m - 1;
+ slaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[(*n
+ << 1) + 1], &c_n1);
+ }
+
+ } else {
+
+/* .. the initial matrix A has almost orthogonal columns and */
+/* the second QRF is not needed */
+
+ slacpy_("Upper", n, n, &a[a_offset], lda, &work[*n + 1], n);
+ if (l2pert) {
+ xsc = sqrt(small);
+ i__1 = *n;
+ for (p = 2; p <= i__1; ++p) {
+ temp1 = xsc * work[*n + (p - 1) * *n + p];
+ i__2 = p - 1;
+ for (q = 1; q <= i__2; ++q) {
+ work[*n + (q - 1) * *n + p] = -r_sign(&temp1, &
+ work[*n + (p - 1) * *n + q]);
+/* L5971: */
+ }
+/* L5970: */
+ }
+ } else {
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ slaset_("Lower", &i__1, &i__2, &c_b34, &c_b34, &work[*n +
+ 2], n);
+ }
+
+ i__1 = *lwork - *n - *n * *n;
+ sgesvj_("Upper", "U", "N", n, n, &work[*n + 1], n, &sva[1], n,
+ &u[u_offset], ldu, &work[*n + *n * *n + 1], &i__1,
+ info);
+
+ scalem = work[*n + *n * *n + 1];
+ numrank = i_nint(&work[*n + *n * *n + 2]);
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ scopy_(n, &work[*n + (p - 1) * *n + 1], &c__1, &u[p *
+ u_dim1 + 1], &c__1);
+ sscal_(n, &sva[p], &work[*n + (p - 1) * *n + 1], &c__1);
+/* L6970: */
+ }
+
+ strsm_("Left", "Upper", "NoTrans", "No UD", n, n, &c_b35, &a[
+ a_offset], lda, &work[*n + 1], n);
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ scopy_(n, &work[*n + p], n, &v[iwork[p] + v_dim1], ldv);
+/* L6972: */
+ }
+ temp1 = sqrt((real) (*n)) * epsln;
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ xsc = 1.f / snrm2_(n, &v[p * v_dim1 + 1], &c__1);
+ if (xsc < 1.f - temp1 || xsc > temp1 + 1.f) {
+ sscal_(n, &xsc, &v[p * v_dim1 + 1], &c__1);
+ }
+/* L6971: */
+ }
+
+/* Assemble the left singular vector matrix U (M x N). */
+
+ if (*n < *m) {
+ i__1 = *m - *n;
+ slaset_("A", &i__1, n, &c_b34, &c_b34, &u[nr + 1 + u_dim1]
+, ldu);
+ if (*n < n1) {
+ i__1 = n1 - *n;
+ slaset_("A", n, &i__1, &c_b34, &c_b34, &u[(*n + 1) *
+ u_dim1 + 1], ldu);
+ i__1 = *m - *n;
+ i__2 = n1 - *n;
+ slaset_("A", &i__1, &i__2, &c_b34, &c_b35, &u[nr + 1
+ + (*n + 1) * u_dim1], ldu);
+ }
+ }
+ i__1 = *lwork - *n;
+ sormqr_("Left", "No Tr", m, &n1, n, &a[a_offset], lda, &work[
+ 1], &u[u_offset], ldu, &work[*n + 1], &i__1, &ierr);
+ temp1 = sqrt((real) (*m)) * epsln;
+ i__1 = n1;
+ for (p = 1; p <= i__1; ++p) {
+ xsc = 1.f / snrm2_(m, &u[p * u_dim1 + 1], &c__1);
+ if (xsc < 1.f - temp1 || xsc > temp1 + 1.f) {
+ sscal_(m, &xsc, &u[p * u_dim1 + 1], &c__1);
+ }
+/* L6973: */
+ }
+
+ if (rowpiv) {
+ i__1 = *m - 1;
+ slaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[(*n
+ << 1) + 1], &c_n1);
+ }
+
+ }
+
+/* end of the >> almost orthogonal case << in the full SVD */
+
+ } else {
+
+/* This branch deploys a preconditioned Jacobi SVD with explicitly */
+/* accumulated rotations. It is included as optional, mainly for */
+/* experimental purposes. It does perfom well, and can also be used. */
+/* In this implementation, this branch will be automatically activated */
+/* if the condition number sigma_max(A) / sigma_min(A) is predicted */
+/* to be greater than the overflow threshold. This is because the */
+/* a posteriori computation of the singular vectors assumes robust */
+/* implementation of BLAS and some LAPACK procedures, capable of working */
+/* in presence of extreme values. Since that is not always the case, ... */
+
+ i__1 = nr;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = *n - p + 1;
+ scopy_(&i__2, &a[p + p * a_dim1], lda, &v[p + p * v_dim1], &
+ c__1);
+/* L7968: */
+ }
+
+ if (l2pert) {
+ xsc = sqrt(small / epsln);
+ i__1 = nr;
+ for (q = 1; q <= i__1; ++q) {
+ temp1 = xsc * (r__1 = v[q + q * v_dim1], dabs(r__1));
+ i__2 = *n;
+ for (p = 1; p <= i__2; ++p) {
+ if (p > q && (r__1 = v[p + q * v_dim1], dabs(r__1)) <=
+ temp1 || p < q) {
+ v[p + q * v_dim1] = r_sign(&temp1, &v[p + q *
+ v_dim1]);
+ }
+ if (p < q) {
+ v[p + q * v_dim1] = -v[p + q * v_dim1];
+ }
+/* L5968: */
+ }
+/* L5969: */
+ }
+ } else {
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ slaset_("U", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 << 1) +
+ 1], ldv);
+ }
+ i__1 = *lwork - (*n << 1);
+ sgeqrf_(n, &nr, &v[v_offset], ldv, &work[*n + 1], &work[(*n << 1)
+ + 1], &i__1, &ierr);
+ slacpy_("L", n, &nr, &v[v_offset], ldv, &work[(*n << 1) + 1], n);
+
+ i__1 = nr;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = nr - p + 1;
+ scopy_(&i__2, &v[p + p * v_dim1], ldv, &u[p + p * u_dim1], &
+ c__1);
+/* L7969: */
+ }
+ if (l2pert) {
+ xsc = sqrt(small / epsln);
+ i__1 = nr;
+ for (q = 2; q <= i__1; ++q) {
+ i__2 = q - 1;
+ for (p = 1; p <= i__2; ++p) {
+/* Computing MIN */
+ r__3 = (r__1 = u[p + p * u_dim1], dabs(r__1)), r__4 =
+ (r__2 = u[q + q * u_dim1], dabs(r__2));
+ temp1 = xsc * dmin(r__3,r__4);
+ u[p + q * u_dim1] = -r_sign(&temp1, &u[q + p * u_dim1]
+ );
+/* L9971: */
+ }
+/* L9970: */
+ }
+ } else {
+ i__1 = nr - 1;
+ i__2 = nr - 1;
+ slaset_("U", &i__1, &i__2, &c_b34, &c_b34, &u[(u_dim1 << 1) +
+ 1], ldu);
+ }
+ i__1 = *lwork - (*n << 1) - *n * nr;
+ sgesvj_("L", "U", "V", &nr, &nr, &u[u_offset], ldu, &sva[1], n, &
+ v[v_offset], ldv, &work[(*n << 1) + *n * nr + 1], &i__1,
+ info);
+ scalem = work[(*n << 1) + *n * nr + 1];
+ numrank = i_nint(&work[(*n << 1) + *n * nr + 2]);
+ if (nr < *n) {
+ i__1 = *n - nr;
+ slaset_("A", &i__1, &nr, &c_b34, &c_b34, &v[nr + 1 + v_dim1],
+ ldv);
+ i__1 = *n - nr;
+ slaset_("A", &nr, &i__1, &c_b34, &c_b34, &v[(nr + 1) * v_dim1
+ + 1], ldv);
+ i__1 = *n - nr;
+ i__2 = *n - nr;
+ slaset_("A", &i__1, &i__2, &c_b34, &c_b35, &v[nr + 1 + (nr +
+ 1) * v_dim1], ldv);
+ }
+ i__1 = *lwork - (*n << 1) - *n * nr - nr;
+ sormqr_("L", "N", n, n, &nr, &work[(*n << 1) + 1], n, &work[*n +
+ 1], &v[v_offset], ldv, &work[(*n << 1) + *n * nr + nr + 1]
+, &i__1, &ierr);
+
+/* Permute the rows of V using the (column) permutation from the */
+/* first QRF. Also, scale the columns to make them unit in */
+/* Euclidean norm. This applies to all cases. */
+
+ temp1 = sqrt((real) (*n)) * epsln;
+ i__1 = *n;
+ for (q = 1; q <= i__1; ++q) {
+ i__2 = *n;
+ for (p = 1; p <= i__2; ++p) {
+ work[(*n << 1) + *n * nr + nr + iwork[p]] = v[p + q *
+ v_dim1];
+/* L8972: */
+ }
+ i__2 = *n;
+ for (p = 1; p <= i__2; ++p) {
+ v[p + q * v_dim1] = work[(*n << 1) + *n * nr + nr + p];
+/* L8973: */
+ }
+ xsc = 1.f / snrm2_(n, &v[q * v_dim1 + 1], &c__1);
+ if (xsc < 1.f - temp1 || xsc > temp1 + 1.f) {
+ sscal_(n, &xsc, &v[q * v_dim1 + 1], &c__1);
+ }
+/* L7972: */
+ }
+
+/* At this moment, V contains the right singular vectors of A. */
+/* Next, assemble the left singular vector matrix U (M x N). */
+
+ if (*n < *m) {
+ i__1 = *m - *n;
+ slaset_("A", &i__1, n, &c_b34, &c_b34, &u[nr + 1 + u_dim1],
+ ldu);
+ if (*n < n1) {
+ i__1 = n1 - *n;
+ slaset_("A", n, &i__1, &c_b34, &c_b34, &u[(*n + 1) *
+ u_dim1 + 1], ldu);
+ i__1 = *m - *n;
+ i__2 = n1 - *n;
+ slaset_("A", &i__1, &i__2, &c_b34, &c_b35, &u[nr + 1 + (*
+ n + 1) * u_dim1], ldu);
+ }
+ }
+
+ i__1 = *lwork - *n;
+ sormqr_("Left", "No Tr", m, &n1, n, &a[a_offset], lda, &work[1], &
+ u[u_offset], ldu, &work[*n + 1], &i__1, &ierr);
+
+ if (rowpiv) {
+ i__1 = *m - 1;
+ slaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[(*n << 1)
+ + 1], &c_n1);
+ }
+
+
+ }
+ if (transp) {
+/* .. swap U and V because the procedure worked on A^t */
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ sswap_(n, &u[p * u_dim1 + 1], &c__1, &v[p * v_dim1 + 1], &
+ c__1);
+/* L6974: */
+ }
+ }
+
+ }
+/* end of the full SVD */
+
+/* Undo scaling, if necessary (and possible) */
+
+ if (uscal2 <= big / sva[1] * uscal1) {
+ slascl_("G", &c__0, &c__0, &uscal1, &uscal2, &nr, &c__1, &sva[1], n, &
+ ierr);
+ uscal1 = 1.f;
+ uscal2 = 1.f;
+ }
+
+ if (nr < *n) {
+ i__1 = *n;
+ for (p = nr + 1; p <= i__1; ++p) {
+ sva[p] = 0.f;
+/* L3004: */
+ }
+ }
+
+ work[1] = uscal2 * scalem;
+ work[2] = uscal1;
+ if (errest) {
+ work[3] = sconda;
+ }
+ if (lsvec && rsvec) {
+ work[4] = condr1;
+ work[5] = condr2;
+ }
+ if (l2tran) {
+ work[6] = entra;
+ work[7] = entrat;
+ }
+
+ iwork[1] = nr;
+ iwork[2] = numrank;
+ iwork[3] = warning;
+
+ return 0;
+/* .. */
+/* .. END OF SGEJSV */
+/* .. */
+} /* sgejsv_ */
diff --git a/contrib/libs/clapack/sgelq2.c b/contrib/libs/clapack/sgelq2.c
new file mode 100644
index 0000000000..a4082b4067
--- /dev/null
+++ b/contrib/libs/clapack/sgelq2.c
@@ -0,0 +1,157 @@
+/* sgelq2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sgelq2_(integer *m, integer *n, real *a, integer *lda,
+ real *tau, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, k;
+ real aii;
+ extern /* Subroutine */ int slarf_(char *, integer *, integer *, real *,
+ integer *, real *, real *, integer *, real *), xerbla_(
+ char *, integer *), slarfp_(integer *, real *, real *,
+ integer *, real *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGELQ2 computes an LQ factorization of a real m by n matrix A: */
+/* A = L * Q. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the m by n matrix A. */
+/* On exit, the elements on and below the diagonal of the array */
+/* contain the m by min(m,n) lower trapezoidal matrix L (L is */
+/* lower triangular if m <= n); the elements above the diagonal, */
+/* with the array TAU, represent the orthogonal matrix Q as a */
+/* product of elementary reflectors (see Further Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) REAL array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace) REAL array, dimension (M) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(k) . . . H(2) H(1), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i-1) = 0 and v(i) = 1; v(i+1:n) is stored on exit in A(i,i+1:n), */
+/* and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGELQ2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ i__1 = k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i,i+1:n) */
+
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ slarfp_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3, *n)* a_dim1]
+, lda, &tau[i__]);
+ if (i__ < *m) {
+
+/* Apply H(i) to A(i+1:m,i:n) from the right */
+
+ aii = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.f;
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+ slarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[
+ i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
+ a[i__ + i__ * a_dim1] = aii;
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of SGELQ2 */
+
+} /* sgelq2_ */
diff --git a/contrib/libs/clapack/sgelqf.c b/contrib/libs/clapack/sgelqf.c
new file mode 100644
index 0000000000..a3eb98fcd2
--- /dev/null
+++ b/contrib/libs/clapack/sgelqf.c
@@ -0,0 +1,251 @@
+/* sgelqf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int sgelqf_(integer *m, integer *n, real *a, integer *lda,
+ real *tau, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, k, ib, nb, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int sgelq2_(integer *, integer *, real *, integer
+ *, real *, real *, integer *), slarfb_(char *, char *, char *,
+ char *, integer *, integer *, integer *, real *, integer *, real *
+, integer *, real *, integer *, real *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int slarft_(char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGELQF computes an LQ factorization of a real M-by-N matrix A: */
+/* A = L * Q. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the elements on and below the diagonal of the array */
+/* contain the m-by-min(m,n) lower trapezoidal matrix L (L is */
+/* lower triangular if m <= n); the elements above the diagonal, */
+/* with the array TAU, represent the orthogonal matrix Q as a */
+/* product of elementary reflectors (see Further Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) REAL array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,M). */
+/* For optimum performance LWORK >= M*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(k) . . . H(2) H(1), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i-1) = 0 and v(i) = 1; v(i+1:n) is stored on exit in A(i,i+1:n), */
+/* and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "SGELQF", " ", m, n, &c_n1, &c_n1);
+ lwkopt = *m * nb;
+ work[1] = (real) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else if (*lwork < max(1,*m) && ! lquery) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGELQF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ k = min(*m,*n);
+ if (k == 0) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *m;
+ if (nb > 1 && nb < k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "SGELQF", " ", m, n, &c_n1, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SGELQF", " ", m, n, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < k && nx < k) {
+
+/* Use blocked code initially */
+
+ i__1 = k - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/* Compute the LQ factorization of the current block */
+/* A(i:i+ib-1,i:n) */
+
+ i__3 = *n - i__ + 1;
+ sgelq2_(&ib, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[
+ 1], &iinfo);
+ if (i__ + ib <= *m) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i) H(i+1) . . . H(i+ib-1) */
+
+ i__3 = *n - i__ + 1;
+ slarft_("Forward", "Rowwise", &i__3, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(i+ib:m,i:n) from the right */
+
+ i__3 = *m - i__ - ib + 1;
+ i__4 = *n - i__ + 1;
+ slarfb_("Right", "No transpose", "Forward", "Rowwise", &i__3,
+ &i__4, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], &
+ ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[ib +
+ 1], &ldwork);
+ }
+/* L10: */
+ }
+ } else {
+ i__ = 1;
+ }
+
+/* Use unblocked code to factor the last or only block. */
+
+ if (i__ <= k) {
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ sgelq2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1]
+, &iinfo);
+ }
+
+ work[1] = (real) iws;
+ return 0;
+
+/* End of SGELQF */
+
+} /* sgelqf_ */
diff --git a/contrib/libs/clapack/sgels.c b/contrib/libs/clapack/sgels.c
new file mode 100644
index 0000000000..2cb9e73ad5
--- /dev/null
+++ b/contrib/libs/clapack/sgels.c
@@ -0,0 +1,513 @@
+/* sgels.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static real c_b33 = 0.f;
+static integer c__0 = 0;
+
+/* Subroutine */ int sgels_(char *trans, integer *m, integer *n, integer *
+ nrhs, real *a, integer *lda, real *b, integer *ldb, real *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, nb, mn;
+ real anrm, bnrm;
+ integer brow;
+ logical tpsd;
+ integer iascl, ibscl;
+ extern logical lsame_(char *, char *);
+ integer wsize;
+ real rwork[1];
+ extern /* Subroutine */ int slabad_(real *, real *);
+ extern doublereal slamch_(char *), slange_(char *, integer *,
+ integer *, real *, integer *, real *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer scllen;
+ real bignum;
+ extern /* Subroutine */ int sgelqf_(integer *, integer *, real *, integer
+ *, real *, real *, integer *, integer *), slascl_(char *, integer
+ *, integer *, real *, real *, integer *, integer *, real *,
+ integer *, integer *), sgeqrf_(integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *), slaset_(char
+ *, integer *, integer *, real *, real *, real *, integer *);
+ real smlnum;
+ extern /* Subroutine */ int sormlq_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+ logical lquery;
+ extern /* Subroutine */ int sormqr_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *), strtrs_(char *, char *,
+ char *, integer *, integer *, real *, integer *, real *, integer *
+, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGELS solves overdetermined or underdetermined real linear systems */
+/* involving an M-by-N matrix A, or its transpose, using a QR or LQ */
+/* factorization of A. It is assumed that A has full rank. */
+
+/* The following options are provided: */
+
+/* 1. If TRANS = 'N' and m >= n: find the least squares solution of */
+/* an overdetermined system, i.e., solve the least squares problem */
+/* minimize || B - A*X ||. */
+
+/* 2. If TRANS = 'N' and m < n: find the minimum norm solution of */
+/* an underdetermined system A * X = B. */
+
+/* 3. If TRANS = 'T' and m >= n: find the minimum norm solution of */
+/* an undetermined system A**T * X = B. */
+
+/* 4. If TRANS = 'T' and m < n: find the least squares solution of */
+/* an overdetermined system, i.e., solve the least squares problem */
+/* minimize || B - A**T * X ||. */
+
+/* Several right hand side vectors b and solution vectors x can be */
+/* handled in a single call; they are stored as the columns of the */
+/* M-by-NRHS right hand side matrix B and the N-by-NRHS solution */
+/* matrix X. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': the linear system involves A; */
+/* = 'T': the linear system involves A**T. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of */
+/* columns of the matrices B and X. NRHS >=0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, */
+/* if M >= N, A is overwritten by details of its QR */
+/* factorization as returned by SGEQRF; */
+/* if M < N, A is overwritten by details of its LQ */
+/* factorization as returned by SGELQF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the matrix B of right hand side vectors, stored */
+/* columnwise; B is M-by-NRHS if TRANS = 'N', or N-by-NRHS */
+/* if TRANS = 'T'. */
+/* On exit, if INFO = 0, B is overwritten by the solution */
+/* vectors, stored columnwise: */
+/* if TRANS = 'N' and m >= n, rows 1 to n of B contain the least */
+/* squares solution vectors; the residual sum of squares for the */
+/* solution in each column is given by the sum of squares of */
+/* elements N+1 to M in that column; */
+/* if TRANS = 'N' and m < n, rows 1 to N of B contain the */
+/* minimum norm solution vectors; */
+/* if TRANS = 'T' and m >= n, rows 1 to M of B contain the */
+/* minimum norm solution vectors; */
+/* if TRANS = 'T' and m < n, rows 1 to M of B contain the */
+/* least squares solution vectors; the residual sum of squares */
+/* for the solution in each column is given by the sum of */
+/* squares of elements M+1 to N in that column. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= MAX(1,M,N). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* LWORK >= max( 1, MN + max( MN, NRHS ) ). */
+/* For optimal performance, */
+/* LWORK >= max( 1, MN + max( MN, NRHS )*NB ). */
+/* where MN = min(M,N) and NB is the optimum block size. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element of the */
+/* triangular factor of A is zero, so that A does not have */
+/* full rank; the least squares solution could not be */
+/* computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ mn = min(*m,*n);
+ lquery = *lwork == -1;
+ if (! (lsame_(trans, "N") || lsame_(trans, "T"))) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*m)) {
+ *info = -6;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*m);
+ if (*ldb < max(i__1,*n)) {
+ *info = -8;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = 1, i__2 = mn + max(mn,*nrhs);
+ if (*lwork < max(i__1,i__2) && ! lquery) {
+ *info = -10;
+ }
+ }
+ }
+
+/* Figure out optimal block size */
+
+ if (*info == 0 || *info == -10) {
+
+ tpsd = TRUE_;
+ if (lsame_(trans, "N")) {
+ tpsd = FALSE_;
+ }
+
+ if (*m >= *n) {
+ nb = ilaenv_(&c__1, "SGEQRF", " ", m, n, &c_n1, &c_n1);
+ if (tpsd) {
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__1, "SORMQR", "LN", m, nrhs, n, &
+ c_n1);
+ nb = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__1, "SORMQR", "LT", m, nrhs, n, &
+ c_n1);
+ nb = max(i__1,i__2);
+ }
+ } else {
+ nb = ilaenv_(&c__1, "SGELQF", " ", m, n, &c_n1, &c_n1);
+ if (tpsd) {
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__1, "SORMLQ", "LT", n, nrhs, m, &
+ c_n1);
+ nb = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__1, "SORMLQ", "LN", n, nrhs, m, &
+ c_n1);
+ nb = max(i__1,i__2);
+ }
+ }
+
+/* Computing MAX */
+ i__1 = 1, i__2 = mn + max(mn,*nrhs) * nb;
+ wsize = max(i__1,i__2);
+ work[1] = (real) wsize;
+
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGELS ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+/* Computing MIN */
+ i__1 = min(*m,*n);
+ if (min(i__1,*nrhs) == 0) {
+ i__1 = max(*m,*n);
+ slaset_("Full", &i__1, nrhs, &c_b33, &c_b33, &b[b_offset], ldb);
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ smlnum = slamch_("S") / slamch_("P");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+
+/* Scale A, B if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = slange_("M", m, n, &a[a_offset], lda, rwork);
+ iascl = 0;
+ if (anrm > 0.f && anrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ slascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ slascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.f) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ slaset_("F", &i__1, nrhs, &c_b33, &c_b33, &b[b_offset], ldb);
+ goto L50;
+ }
+
+ brow = *m;
+ if (tpsd) {
+ brow = *n;
+ }
+ bnrm = slange_("M", &brow, nrhs, &b[b_offset], ldb, rwork);
+ ibscl = 0;
+ if (bnrm > 0.f && bnrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ slascl_("G", &c__0, &c__0, &bnrm, &smlnum, &brow, nrhs, &b[b_offset],
+ ldb, info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ slascl_("G", &c__0, &c__0, &bnrm, &bignum, &brow, nrhs, &b[b_offset],
+ ldb, info);
+ ibscl = 2;
+ }
+
+ if (*m >= *n) {
+
+/* compute QR factorization of A */
+
+ i__1 = *lwork - mn;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[1], &work[mn + 1], &i__1, info)
+ ;
+
+/* workspace at least N, optimally N*NB */
+
+ if (! tpsd) {
+
+/* Least-Squares Problem min || A * X - B || */
+
+/* B(1:M,1:NRHS) := Q' * B(1:M,1:NRHS) */
+
+ i__1 = *lwork - mn;
+ sormqr_("Left", "Transpose", m, nrhs, n, &a[a_offset], lda, &work[
+ 1], &b[b_offset], ldb, &work[mn + 1], &i__1, info);
+
+/* workspace at least NRHS, optimally NRHS*NB */
+
+/* B(1:N,1:NRHS) := inv(R) * B(1:N,1:NRHS) */
+
+ strtrs_("Upper", "No transpose", "Non-unit", n, nrhs, &a[a_offset]
+, lda, &b[b_offset], ldb, info);
+
+ if (*info > 0) {
+ return 0;
+ }
+
+ scllen = *n;
+
+ } else {
+
+/* Overdetermined system of equations A' * X = B */
+
+/* B(1:N,1:NRHS) := inv(R') * B(1:N,1:NRHS) */
+
+ strtrs_("Upper", "Transpose", "Non-unit", n, nrhs, &a[a_offset],
+ lda, &b[b_offset], ldb, info);
+
+ if (*info > 0) {
+ return 0;
+ }
+
+/* B(N+1:M,1:NRHS) = ZERO */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = *n + 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* B(1:M,1:NRHS) := Q(1:N,:) * B(1:N,1:NRHS) */
+
+ i__1 = *lwork - mn;
+ sormqr_("Left", "No transpose", m, nrhs, n, &a[a_offset], lda, &
+ work[1], &b[b_offset], ldb, &work[mn + 1], &i__1, info);
+
+/* workspace at least NRHS, optimally NRHS*NB */
+
+ scllen = *m;
+
+ }
+
+ } else {
+
+/* Compute LQ factorization of A */
+
+ i__1 = *lwork - mn;
+ sgelqf_(m, n, &a[a_offset], lda, &work[1], &work[mn + 1], &i__1, info)
+ ;
+
+/* workspace at least M, optimally M*NB. */
+
+ if (! tpsd) {
+
+/* underdetermined system of equations A * X = B */
+
+/* B(1:M,1:NRHS) := inv(L) * B(1:M,1:NRHS) */
+
+ strtrs_("Lower", "No transpose", "Non-unit", m, nrhs, &a[a_offset]
+, lda, &b[b_offset], ldb, info);
+
+ if (*info > 0) {
+ return 0;
+ }
+
+/* B(M+1:N,1:NRHS) = 0 */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = *m + 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* B(1:N,1:NRHS) := Q(1:N,:)' * B(1:M,1:NRHS) */
+
+ i__1 = *lwork - mn;
+ sormlq_("Left", "Transpose", n, nrhs, m, &a[a_offset], lda, &work[
+ 1], &b[b_offset], ldb, &work[mn + 1], &i__1, info);
+
+/* workspace at least NRHS, optimally NRHS*NB */
+
+ scllen = *n;
+
+ } else {
+
+/* overdetermined system min || A' * X - B || */
+
+/* B(1:N,1:NRHS) := Q * B(1:N,1:NRHS) */
+
+ i__1 = *lwork - mn;
+ sormlq_("Left", "No transpose", n, nrhs, m, &a[a_offset], lda, &
+ work[1], &b[b_offset], ldb, &work[mn + 1], &i__1, info);
+
+/* workspace at least NRHS, optimally NRHS*NB */
+
+/* B(1:M,1:NRHS) := inv(L') * B(1:M,1:NRHS) */
+
+ strtrs_("Lower", "Transpose", "Non-unit", m, nrhs, &a[a_offset],
+ lda, &b[b_offset], ldb, info);
+
+ if (*info > 0) {
+ return 0;
+ }
+
+ scllen = *m;
+
+ }
+
+ }
+
+/* Undo scaling */
+
+ if (iascl == 1) {
+ slascl_("G", &c__0, &c__0, &anrm, &smlnum, &scllen, nrhs, &b[b_offset]
+, ldb, info);
+ } else if (iascl == 2) {
+ slascl_("G", &c__0, &c__0, &anrm, &bignum, &scllen, nrhs, &b[b_offset]
+, ldb, info);
+ }
+ if (ibscl == 1) {
+ slascl_("G", &c__0, &c__0, &smlnum, &bnrm, &scllen, nrhs, &b[b_offset]
+, ldb, info);
+ } else if (ibscl == 2) {
+ slascl_("G", &c__0, &c__0, &bignum, &bnrm, &scllen, nrhs, &b[b_offset]
+, ldb, info);
+ }
+
+L50:
+ work[1] = (real) wsize;
+
+ return 0;
+
+/* End of SGELS */
+
+} /* sgels_ */
diff --git a/contrib/libs/clapack/sgelsd.c b/contrib/libs/clapack/sgelsd.c
new file mode 100644
index 0000000000..8d2b9119a1
--- /dev/null
+++ b/contrib/libs/clapack/sgelsd.c
@@ -0,0 +1,699 @@
+/* sgelsd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__9 = 9;
+static integer c__0 = 0;
+static integer c__6 = 6;
+static integer c_n1 = -1;
+static integer c__1 = 1;
+static real c_b81 = 0.f;
+
+/* Subroutine */ int sgelsd_(integer *m, integer *n, integer *nrhs, real *a,
+ integer *lda, real *b, integer *ldb, real *s, real *rcond, integer *
+ rank, real *work, integer *lwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+
+ /* Builtin functions */
+ double log(doublereal);
+
+ /* Local variables */
+ integer ie, il, mm;
+ real eps, anrm, bnrm;
+ integer itau, nlvl, iascl, ibscl;
+ real sfmin;
+ integer minmn, maxmn, itaup, itauq, mnthr, nwork;
+ extern /* Subroutine */ int slabad_(real *, real *), sgebrd_(integer *,
+ integer *, real *, integer *, real *, real *, real *, real *,
+ real *, integer *, integer *);
+ extern doublereal slamch_(char *), slange_(char *, integer *,
+ integer *, real *, integer *, real *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ real bignum;
+ extern /* Subroutine */ int sgelqf_(integer *, integer *, real *, integer
+ *, real *, real *, integer *, integer *), slalsd_(char *, integer
+ *, integer *, integer *, real *, real *, real *, integer *, real *
+, integer *, real *, integer *, integer *), slascl_(char *
+, integer *, integer *, real *, real *, integer *, integer *,
+ real *, integer *, integer *);
+ integer wlalsd;
+ extern /* Subroutine */ int sgeqrf_(integer *, integer *, real *, integer
+ *, real *, real *, integer *, integer *), slacpy_(char *, integer
+ *, integer *, real *, integer *, real *, integer *),
+ slaset_(char *, integer *, integer *, real *, real *, real *,
+ integer *);
+ integer ldwork;
+ extern /* Subroutine */ int sormbr_(char *, char *, char *, integer *,
+ integer *, integer *, real *, integer *, real *, real *, integer *
+, real *, integer *, integer *);
+ integer liwork, minwrk, maxwrk;
+ real smlnum;
+ extern /* Subroutine */ int sormlq_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+ logical lquery;
+ integer smlsiz;
+ extern /* Subroutine */ int sormqr_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGELSD computes the minimum-norm solution to a real linear least */
+/* squares problem: */
+/* minimize 2-norm(| b - A*x |) */
+/* using the singular value decomposition (SVD) of A. A is an M-by-N */
+/* matrix which may be rank-deficient. */
+
+/* Several right hand side vectors b and solution vectors x can be */
+/* handled in a single call; they are stored as the columns of the */
+/* M-by-NRHS right hand side matrix B and the N-by-NRHS solution */
+/* matrix X. */
+
+/* The problem is solved in three steps: */
+/* (1) Reduce the coefficient matrix A to bidiagonal form with */
+/* Householder transformations, reducing the original problem */
+/* into a "bidiagonal least squares problem" (BLS) */
+/* (2) Solve the BLS using a divide and conquer approach. */
+/* (3) Apply back all the Householder tranformations to solve */
+/* the original least squares problem. */
+
+/* The effective rank of A is determined by treating as zero those */
+/* singular values which are less than RCOND times the largest singular */
+/* value. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, A has been destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the M-by-NRHS right hand side matrix B. */
+/* On exit, B is overwritten by the N-by-NRHS solution */
+/* matrix X. If m >= n and RANK = n, the residual */
+/* sum-of-squares for the solution in the i-th column is given */
+/* by the sum of squares of elements n+1:m in that column. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,max(M,N)). */
+
+/* S (output) REAL array, dimension (min(M,N)) */
+/* The singular values of A in decreasing order. */
+/* The condition number of A in the 2-norm = S(1)/S(min(m,n)). */
+
+/* RCOND (input) REAL */
+/* RCOND is used to determine the effective rank of A. */
+/* Singular values S(i) <= RCOND*S(1) are treated as zero. */
+/* If RCOND < 0, machine precision is used instead. */
+
+/* RANK (output) INTEGER */
+/* The effective rank of A, i.e., the number of singular values */
+/* which are greater than RCOND*S(1). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK must be at least 1. */
+/* The exact minimum amount of workspace needed depends on M, */
+/* N and NRHS. As long as LWORK is at least */
+/* 12*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS + (SMLSIZ+1)**2, */
+/* if M is greater than or equal to N or */
+/* 12*M + 2*M*SMLSIZ + 8*M*NLVL + M*NRHS + (SMLSIZ+1)**2, */
+/* if M is less than N, the code will execute correctly. */
+/* SMLSIZ is returned by ILAENV and is equal to the maximum */
+/* size of the subproblems at the bottom of the computation */
+/* tree (usually about 25), and */
+/* NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 ) */
+/* For good performance, LWORK should generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the array WORK and the */
+/* minimum size of the array IWORK, and returns these values as */
+/* the first entries of the WORK and IWORK arrays, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* LIWORK >= max(1, 3*MINMN*NLVL + 11*MINMN), */
+/* where MINMN = MIN( M,N ). */
+/* On exit, if INFO = 0, IWORK(1) returns the minimum LIWORK. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: the algorithm for computing the SVD failed to converge; */
+/* if INFO = i, i off-diagonal elements of an intermediate */
+/* bidiagonal form did not converge to zero. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Ren-Cang Li, Computer Science Division, University of */
+/* California at Berkeley, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --s;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ maxmn = max(*m,*n);
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldb < max(1,maxmn)) {
+ *info = -7;
+ }
+
+/* Compute workspace. */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV.) */
+
+ if (*info == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ liwork = 1;
+ if (minmn > 0) {
+ smlsiz = ilaenv_(&c__9, "SGELSD", " ", &c__0, &c__0, &c__0, &c__0);
+ mnthr = ilaenv_(&c__6, "SGELSD", " ", m, n, nrhs, &c_n1);
+/* Computing MAX */
+ i__1 = (integer) (log((real) minmn / (real) (smlsiz + 1)) / log(
+ 2.f)) + 1;
+ nlvl = max(i__1,0);
+ liwork = minmn * 3 * nlvl + minmn * 11;
+ mm = *m;
+ if (*m >= *n && *m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than */
+/* columns. */
+
+ mm = *n;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "SGEQRF",
+ " ", m, n, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *nrhs * ilaenv_(&c__1, "SORMQR",
+ "LT", m, nrhs, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ }
+ if (*m >= *n) {
+
+/* Path 1 - overdetermined or exactly determined. */
+
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + (mm + *n) * ilaenv_(&c__1,
+ "SGEBRD", " ", &mm, n, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + *nrhs * ilaenv_(&c__1, "SORMBR"
+, "QLT", &mm, nrhs, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + (*n - 1) * ilaenv_(&c__1,
+ "SORMBR", "PLN", n, nrhs, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing 2nd power */
+ i__1 = smlsiz + 1;
+ wlalsd = *n * 9 + (*n << 1) * smlsiz + (*n << 3) * nlvl + *n *
+ *nrhs + i__1 * i__1;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + wlalsd;
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = *n * 3 + mm, i__2 = *n * 3 + *nrhs, i__1 = max(i__1,
+ i__2), i__2 = *n * 3 + wlalsd;
+ minwrk = max(i__1,i__2);
+ }
+ if (*n > *m) {
+/* Computing 2nd power */
+ i__1 = smlsiz + 1;
+ wlalsd = *m * 9 + (*m << 1) * smlsiz + (*m << 3) * nlvl + *m *
+ *nrhs + i__1 * i__1;
+ if (*n >= mnthr) {
+
+/* Path 2a - underdetermined, with many more columns */
+/* than rows. */
+
+ maxwrk = *m + *m * ilaenv_(&c__1, "SGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + (*m << 1) *
+ ilaenv_(&c__1, "SGEBRD", " ", m, m, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + *nrhs *
+ ilaenv_(&c__1, "SORMBR", "QLT", m, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + (*m - 1) *
+ ilaenv_(&c__1, "SORMBR", "PLN", m, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+ if (*nrhs > 1) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + *m + *m * *nrhs;
+ maxwrk = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 1);
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m + *nrhs * ilaenv_(&c__1, "SORMLQ"
+, "LT", n, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + wlalsd;
+ maxwrk = max(i__1,i__2);
+/* XXX: Ensure the Path 2a case below is triggered. The workspace */
+/* calculation should use queries for all routines eventually. */
+/* Computing MAX */
+/* Computing MAX */
+ i__3 = *m, i__4 = (*m << 1) - 4, i__3 = max(i__3,i__4),
+ i__3 = max(i__3,*nrhs), i__4 = *n - *m * 3;
+ i__1 = maxwrk, i__2 = (*m << 2) + *m * *m + max(i__3,i__4)
+ ;
+ maxwrk = max(i__1,i__2);
+ } else {
+
+/* Path 2 - remaining underdetermined cases. */
+
+ maxwrk = *m * 3 + (*n + *m) * ilaenv_(&c__1, "SGEBRD",
+ " ", m, n, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + *nrhs * ilaenv_(&c__1,
+ "SORMBR", "QLT", m, nrhs, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORM"
+ "BR", "PLN", n, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + wlalsd;
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = *m * 3 + *nrhs, i__2 = *m * 3 + *m, i__1 = max(i__1,
+ i__2), i__2 = *m * 3 + wlalsd;
+ minwrk = max(i__1,i__2);
+ }
+ }
+ minwrk = min(minwrk,maxwrk);
+ work[1] = (real) maxwrk;
+ iwork[1] = liwork;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGELSD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ *rank = 0;
+ return 0;
+ }
+
+/* Get machine parameters. */
+
+ eps = slamch_("P");
+ sfmin = slamch_("S");
+ smlnum = sfmin / eps;
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+
+/* Scale A if max entry outside range [SMLNUM,BIGNUM]. */
+
+ anrm = slange_("M", m, n, &a[a_offset], lda, &work[1]);
+ iascl = 0;
+ if (anrm > 0.f && anrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM. */
+
+ slascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM. */
+
+ slascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.f) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ slaset_("F", &i__1, nrhs, &c_b81, &c_b81, &b[b_offset], ldb);
+ slaset_("F", &minmn, &c__1, &c_b81, &c_b81, &s[1], &c__1);
+ *rank = 0;
+ goto L10;
+ }
+
+/* Scale B if max entry outside range [SMLNUM,BIGNUM]. */
+
+ bnrm = slange_("M", m, nrhs, &b[b_offset], ldb, &work[1]);
+ ibscl = 0;
+ if (bnrm > 0.f && bnrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM. */
+
+ slascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM. */
+
+ slascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 2;
+ }
+
+/* If M < N make sure certain entries of B are zero. */
+
+ if (*m < *n) {
+ i__1 = *n - *m;
+ slaset_("F", &i__1, nrhs, &c_b81, &c_b81, &b[*m + 1 + b_dim1], ldb);
+ }
+
+/* Overdetermined case. */
+
+ if (*m >= *n) {
+
+/* Path 1 - overdetermined or exactly determined. */
+
+ mm = *m;
+ if (*m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than columns. */
+
+ mm = *n;
+ itau = 1;
+ nwork = itau + *n;
+
+/* Compute A=Q*R. */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1,
+ info);
+
+/* Multiply B by transpose(Q). */
+/* (Workspace: need N+NRHS, prefer N+NRHS*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sormqr_("L", "T", m, nrhs, n, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Zero out below R. */
+
+ if (*n > 1) {
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ slaset_("L", &i__1, &i__2, &c_b81, &c_b81, &a[a_dim1 + 2],
+ lda);
+ }
+ }
+
+ ie = 1;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/* Bidiagonalize R in A. */
+/* (Workspace: need 3*N+MM, prefer 3*N+(MM+N)*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sgebrd_(&mm, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__1, info);
+
+/* Multiply B by transpose of left bidiagonalizing vectors of R. */
+/* (Workspace: need 3*N+NRHS, prefer 3*N+NRHS*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "T", &mm, nrhs, n, &a[a_offset], lda, &work[itauq],
+ &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ slalsd_("U", &smlsiz, n, nrhs, &s[1], &work[ie], &b[b_offset], ldb,
+ rcond, rank, &work[nwork], &iwork[1], info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of R. */
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("P", "L", "N", n, nrhs, n, &a[a_offset], lda, &work[itaup], &
+ b[b_offset], ldb, &work[nwork], &i__1, info);
+
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = *m, i__2 = (*m << 1) - 4, i__1 = max(i__1,i__2), i__1 = max(
+ i__1,*nrhs), i__2 = *n - *m * 3, i__1 = max(i__1,i__2);
+ if (*n >= mnthr && *lwork >= (*m << 2) + *m * *m + max(i__1,wlalsd)) {
+
+/* Path 2a - underdetermined, with many more columns than rows */
+/* and sufficient workspace for an efficient algorithm. */
+
+ ldwork = *m;
+/* Computing MAX */
+/* Computing MAX */
+ i__3 = *m, i__4 = (*m << 1) - 4, i__3 = max(i__3,i__4), i__3 =
+ max(i__3,*nrhs), i__4 = *n - *m * 3;
+ i__1 = (*m << 2) + *m * *lda + max(i__3,i__4), i__2 = *m * *lda +
+ *m + *m * *nrhs, i__1 = max(i__1,i__2), i__2 = (*m << 2)
+ + *m * *lda + wlalsd;
+ if (*lwork >= max(i__1,i__2)) {
+ ldwork = *lda;
+ }
+ itau = 1;
+ nwork = *m + 1;
+
+/* Compute A=L*Q. */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1,
+ info);
+ il = nwork;
+
+/* Copy L to WORK(IL), zeroing out above its diagonal. */
+
+ slacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwork);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ slaset_("U", &i__1, &i__2, &c_b81, &c_b81, &work[il + ldwork], &
+ ldwork);
+ ie = il + ldwork * *m;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IL). */
+/* (Workspace: need M*M+5*M, prefer M*M+4*M+2*M*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sgebrd_(m, m, &work[il], &ldwork, &s[1], &work[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__1, info);
+
+/* Multiply B by transpose of left bidiagonalizing vectors of L. */
+/* (Workspace: need M*M+4*M+NRHS, prefer M*M+4*M+NRHS*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "T", m, nrhs, m, &work[il], &ldwork, &work[
+ itauq], &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ slalsd_("U", &smlsiz, m, nrhs, &s[1], &work[ie], &b[b_offset],
+ ldb, rcond, rank, &work[nwork], &iwork[1], info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of L. */
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("P", "L", "N", m, nrhs, m, &work[il], &ldwork, &work[
+ itaup], &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Zero out below first M rows of B. */
+
+ i__1 = *n - *m;
+ slaset_("F", &i__1, nrhs, &c_b81, &c_b81, &b[*m + 1 + b_dim1],
+ ldb);
+ nwork = itau + *m;
+
+/* Multiply transpose(Q) by B. */
+/* (Workspace: need M+NRHS, prefer M+NRHS*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sormlq_("L", "T", n, nrhs, m, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[nwork], &i__1, info);
+
+ } else {
+
+/* Path 2 - remaining underdetermined cases. */
+
+ ie = 1;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize A. */
+/* (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__1, info);
+
+/* Multiply B by transpose of left bidiagonalizing vectors. */
+/* (Workspace: need 3*M+NRHS, prefer 3*M+NRHS*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "T", m, nrhs, n, &a[a_offset], lda, &work[itauq]
+, &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ slalsd_("L", &smlsiz, m, nrhs, &s[1], &work[ie], &b[b_offset],
+ ldb, rcond, rank, &work[nwork], &iwork[1], info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of A. */
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("P", "L", "N", n, nrhs, m, &a[a_offset], lda, &work[itaup]
+, &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+ }
+ }
+
+/* Undo scaling. */
+
+ if (iascl == 1) {
+ slascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb,
+ info);
+ slascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ } else if (iascl == 2) {
+ slascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb,
+ info);
+ slascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ }
+ if (ibscl == 1) {
+ slascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ } else if (ibscl == 2) {
+ slascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ }
+
+L10:
+ work[1] = (real) maxwrk;
+ iwork[1] = liwork;
+ return 0;
+
+/* End of SGELSD */
+
+} /* sgelsd_ */
diff --git a/contrib/libs/clapack/sgelss.c b/contrib/libs/clapack/sgelss.c
new file mode 100644
index 0000000000..6de4676665
--- /dev/null
+++ b/contrib/libs/clapack/sgelss.c
@@ -0,0 +1,822 @@
+/* sgelss.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__6 = 6;
+static integer c_n1 = -1;
+static integer c__1 = 1;
+static integer c__0 = 0;
+static real c_b74 = 0.f;
+static real c_b108 = 1.f;
+
+/* Subroutine */ int sgelss_(integer *m, integer *n, integer *nrhs, real *a,
+ integer *lda, real *b, integer *ldb, real *s, real *rcond, integer *
+ rank, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+ real r__1;
+
+ /* Local variables */
+ integer i__, bl, ie, il, mm;
+ real eps, thr, anrm, bnrm;
+ integer itau;
+ real vdum[1];
+ integer iascl, ibscl, chunk;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ real sfmin;
+ integer minmn, maxmn;
+ extern /* Subroutine */ int sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *);
+ integer itaup, itauq;
+ extern /* Subroutine */ int srscl_(integer *, real *, real *, integer *);
+ integer mnthr, iwork;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), slabad_(real *, real *);
+ integer bdspac;
+ extern /* Subroutine */ int sgebrd_(integer *, integer *, real *, integer
+ *, real *, real *, real *, real *, real *, integer *, integer *);
+ extern doublereal slamch_(char *), slange_(char *, integer *,
+ integer *, real *, integer *, real *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ real bignum;
+ extern /* Subroutine */ int sgelqf_(integer *, integer *, real *, integer
+ *, real *, real *, integer *, integer *), slascl_(char *, integer
+ *, integer *, real *, real *, integer *, integer *, real *,
+ integer *, integer *), sgeqrf_(integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *), slacpy_(char
+ *, integer *, integer *, real *, integer *, real *, integer *), slaset_(char *, integer *, integer *, real *, real *,
+ real *, integer *), sbdsqr_(char *, integer *, integer *,
+ integer *, integer *, real *, real *, real *, integer *, real *,
+ integer *, real *, integer *, real *, integer *), sorgbr_(
+ char *, integer *, integer *, integer *, real *, integer *, real *
+, real *, integer *, integer *);
+ integer ldwork;
+ extern /* Subroutine */ int sormbr_(char *, char *, char *, integer *,
+ integer *, integer *, real *, integer *, real *, real *, integer *
+, real *, integer *, integer *);
+ integer minwrk, maxwrk;
+ real smlnum;
+ extern /* Subroutine */ int sormlq_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+ logical lquery;
+ extern /* Subroutine */ int sormqr_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGELSS computes the minimum norm solution to a real linear least */
+/* squares problem: */
+
+/* Minimize 2-norm(| b - A*x |). */
+
+/* using the singular value decomposition (SVD) of A. A is an M-by-N */
+/* matrix which may be rank-deficient. */
+
+/* Several right hand side vectors b and solution vectors x can be */
+/* handled in a single call; they are stored as the columns of the */
+/* M-by-NRHS right hand side matrix B and the N-by-NRHS solution matrix */
+/* X. */
+
+/* The effective rank of A is determined by treating as zero those */
+/* singular values which are less than RCOND times the largest singular */
+/* value. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the first min(m,n) rows of A are overwritten with */
+/* its right singular vectors, stored rowwise. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the M-by-NRHS right hand side matrix B. */
+/* On exit, B is overwritten by the N-by-NRHS solution */
+/* matrix X. If m >= n and RANK = n, the residual */
+/* sum-of-squares for the solution in the i-th column is given */
+/* by the sum of squares of elements n+1:m in that column. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,max(M,N)). */
+
+/* S (output) REAL array, dimension (min(M,N)) */
+/* The singular values of A in decreasing order. */
+/* The condition number of A in the 2-norm = S(1)/S(min(m,n)). */
+
+/* RCOND (input) REAL */
+/* RCOND is used to determine the effective rank of A. */
+/* Singular values S(i) <= RCOND*S(1) are treated as zero. */
+/* If RCOND < 0, machine precision is used instead. */
+
+/* RANK (output) INTEGER */
+/* The effective rank of A, i.e., the number of singular values */
+/* which are greater than RCOND*S(1). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= 1, and also: */
+/* LWORK >= 3*min(M,N) + max( 2*min(M,N), max(M,N), NRHS ) */
+/* For good performance, LWORK should generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: the algorithm for computing the SVD failed to converge; */
+/* if INFO = i, i off-diagonal elements of an intermediate */
+/* bidiagonal form did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --s;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ maxmn = max(*m,*n);
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldb < max(1,maxmn)) {
+ *info = -7;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV.) */
+
+ if (*info == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ if (minmn > 0) {
+ mm = *m;
+ mnthr = ilaenv_(&c__6, "SGELSS", " ", m, n, nrhs, &c_n1);
+ if (*m >= *n && *m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than */
+/* columns */
+
+ mm = *n;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "SGEQRF",
+ " ", m, n, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *nrhs * ilaenv_(&c__1, "SORMQR",
+ "LT", m, nrhs, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ }
+ if (*m >= *n) {
+
+/* Path 1 - overdetermined or exactly determined */
+
+/* Compute workspace needed for SBDSQR */
+
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * 5;
+ bdspac = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + (mm + *n) * ilaenv_(&c__1,
+ "SGEBRD", " ", &mm, n, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + *nrhs * ilaenv_(&c__1, "SORMBR"
+, "QLT", &mm, nrhs, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + (*n - 1) * ilaenv_(&c__1,
+ "SORGBR", "P", n, n, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ maxwrk = max(maxwrk,bdspac);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * *nrhs;
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = *n * 3 + mm, i__2 = *n * 3 + *nrhs, i__1 = max(i__1,
+ i__2);
+ minwrk = max(i__1,bdspac);
+ maxwrk = max(minwrk,maxwrk);
+ }
+ if (*n > *m) {
+
+/* Compute workspace needed for SBDSQR */
+
+/* Computing MAX */
+ i__1 = 1, i__2 = *m * 5;
+ bdspac = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = *m * 3 + *nrhs, i__2 = *m * 3 + *n, i__1 = max(i__1,
+ i__2);
+ minwrk = max(i__1,bdspac);
+ if (*n >= mnthr) {
+
+/* Path 2a - underdetermined, with many more columns */
+/* than rows */
+
+ maxwrk = *m + *m * ilaenv_(&c__1, "SGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + (*m << 1) *
+ ilaenv_(&c__1, "SGEBRD", " ", m, m, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + *nrhs *
+ ilaenv_(&c__1, "SORMBR", "QLT", m, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + (*m - 1) *
+ ilaenv_(&c__1, "SORGBR", "P", m, m, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + *m + bdspac;
+ maxwrk = max(i__1,i__2);
+ if (*nrhs > 1) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + *m + *m * *nrhs;
+ maxwrk = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 1);
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m + *nrhs * ilaenv_(&c__1, "SORMLQ"
+, "LT", n, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+ } else {
+
+/* Path 2 - underdetermined */
+
+ maxwrk = *m * 3 + (*n + *m) * ilaenv_(&c__1, "SGEBRD",
+ " ", m, n, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + *nrhs * ilaenv_(&c__1,
+ "SORMBR", "QLT", m, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORG"
+ "BR", "P", m, n, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+ maxwrk = max(maxwrk,bdspac);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * *nrhs;
+ maxwrk = max(i__1,i__2);
+ }
+ }
+ maxwrk = max(minwrk,maxwrk);
+ }
+ work[1] = (real) maxwrk;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGELSS", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ *rank = 0;
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ eps = slamch_("P");
+ sfmin = slamch_("S");
+ smlnum = sfmin / eps;
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = slange_("M", m, n, &a[a_offset], lda, &work[1]);
+ iascl = 0;
+ if (anrm > 0.f && anrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ slascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ slascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.f) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ slaset_("F", &i__1, nrhs, &c_b74, &c_b74, &b[b_offset], ldb);
+ slaset_("F", &minmn, &c__1, &c_b74, &c_b74, &s[1], &c__1);
+ *rank = 0;
+ goto L70;
+ }
+
+/* Scale B if max element outside range [SMLNUM,BIGNUM] */
+
+ bnrm = slange_("M", m, nrhs, &b[b_offset], ldb, &work[1]);
+ ibscl = 0;
+ if (bnrm > 0.f && bnrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ slascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ slascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 2;
+ }
+
+/* Overdetermined case */
+
+ if (*m >= *n) {
+
+/* Path 1 - overdetermined or exactly determined */
+
+ mm = *m;
+ if (*m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than columns */
+
+ mm = *n;
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__1 = *lwork - iwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__1,
+ info);
+
+/* Multiply B by transpose(Q) */
+/* (Workspace: need N+NRHS, prefer N+NRHS*NB) */
+
+ i__1 = *lwork - iwork + 1;
+ sormqr_("L", "T", m, nrhs, n, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[iwork], &i__1, info);
+
+/* Zero out below R */
+
+ if (*n > 1) {
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ slaset_("L", &i__1, &i__2, &c_b74, &c_b74, &a[a_dim1 + 2],
+ lda);
+ }
+ }
+
+ ie = 1;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in A */
+/* (Workspace: need 3*N+MM, prefer 3*N+(MM+N)*NB) */
+
+ i__1 = *lwork - iwork + 1;
+ sgebrd_(&mm, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[iwork], &i__1, info);
+
+/* Multiply B by transpose of left bidiagonalizing vectors of R */
+/* (Workspace: need 3*N+NRHS, prefer 3*N+NRHS*NB) */
+
+ i__1 = *lwork - iwork + 1;
+ sormbr_("Q", "L", "T", &mm, nrhs, n, &a[a_offset], lda, &work[itauq],
+ &b[b_offset], ldb, &work[iwork], &i__1, info);
+
+/* Generate right bidiagonalizing vectors of R in A */
+/* (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB) */
+
+ i__1 = *lwork - iwork + 1;
+ sorgbr_("P", n, n, n, &a[a_offset], lda, &work[itaup], &work[iwork], &
+ i__1, info);
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration */
+/* multiply B by transpose of left singular vectors */
+/* compute right singular vectors in A */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("U", n, n, &c__0, nrhs, &s[1], &work[ie], &a[a_offset], lda,
+ vdum, &c__1, &b[b_offset], ldb, &work[iwork], info)
+ ;
+ if (*info != 0) {
+ goto L70;
+ }
+
+/* Multiply B by reciprocals of singular values */
+
+/* Computing MAX */
+ r__1 = *rcond * s[1];
+ thr = dmax(r__1,sfmin);
+ if (*rcond < 0.f) {
+/* Computing MAX */
+ r__1 = eps * s[1];
+ thr = dmax(r__1,sfmin);
+ }
+ *rank = 0;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (s[i__] > thr) {
+ srscl_(nrhs, &s[i__], &b[i__ + b_dim1], ldb);
+ ++(*rank);
+ } else {
+ slaset_("F", &c__1, nrhs, &c_b74, &c_b74, &b[i__ + b_dim1],
+ ldb);
+ }
+/* L10: */
+ }
+
+/* Multiply B by right singular vectors */
+/* (Workspace: need N, prefer N*NRHS) */
+
+ if (*lwork >= *ldb * *nrhs && *nrhs > 1) {
+ sgemm_("T", "N", n, nrhs, n, &c_b108, &a[a_offset], lda, &b[
+ b_offset], ldb, &c_b74, &work[1], ldb);
+ slacpy_("G", n, nrhs, &work[1], ldb, &b[b_offset], ldb)
+ ;
+ } else if (*nrhs > 1) {
+ chunk = *lwork / *n;
+ i__1 = *nrhs;
+ i__2 = chunk;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = *nrhs - i__ + 1;
+ bl = min(i__3,chunk);
+ sgemm_("T", "N", n, &bl, n, &c_b108, &a[a_offset], lda, &b[
+ i__ * b_dim1 + 1], ldb, &c_b74, &work[1], n);
+ slacpy_("G", n, &bl, &work[1], n, &b[i__ * b_dim1 + 1], ldb);
+/* L20: */
+ }
+ } else {
+ sgemv_("T", n, n, &c_b108, &a[a_offset], lda, &b[b_offset], &c__1,
+ &c_b74, &work[1], &c__1);
+ scopy_(n, &work[1], &c__1, &b[b_offset], &c__1);
+ }
+
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__2 = *m, i__1 = (*m << 1) - 4, i__2 = max(i__2,i__1), i__2 = max(
+ i__2,*nrhs), i__1 = *n - *m * 3;
+ if (*n >= mnthr && *lwork >= (*m << 2) + *m * *m + max(i__2,i__1)) {
+
+/* Path 2a - underdetermined, with many more columns than rows */
+/* and sufficient workspace for an efficient algorithm */
+
+ ldwork = *m;
+/* Computing MAX */
+/* Computing MAX */
+ i__3 = *m, i__4 = (*m << 1) - 4, i__3 = max(i__3,i__4), i__3 =
+ max(i__3,*nrhs), i__4 = *n - *m * 3;
+ i__2 = (*m << 2) + *m * *lda + max(i__3,i__4), i__1 = *m * *lda +
+ *m + *m * *nrhs;
+ if (*lwork >= max(i__2,i__1)) {
+ ldwork = *lda;
+ }
+ itau = 1;
+ iwork = *m + 1;
+
+/* Compute A=L*Q */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2,
+ info);
+ il = iwork;
+
+/* Copy L to WORK(IL), zeroing out above it */
+
+ slacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwork);
+ i__2 = *m - 1;
+ i__1 = *m - 1;
+ slaset_("U", &i__2, &i__1, &c_b74, &c_b74, &work[il + ldwork], &
+ ldwork);
+ ie = il + ldwork * *m;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IL) */
+/* (Workspace: need M*M+5*M, prefer M*M+4*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(m, m, &work[il], &ldwork, &s[1], &work[ie], &work[itauq],
+ &work[itaup], &work[iwork], &i__2, info);
+
+/* Multiply B by transpose of left bidiagonalizing vectors of L */
+/* (Workspace: need M*M+4*M+NRHS, prefer M*M+4*M+NRHS*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sormbr_("Q", "L", "T", m, nrhs, m, &work[il], &ldwork, &work[
+ itauq], &b[b_offset], ldb, &work[iwork], &i__2, info);
+
+/* Generate right bidiagonalizing vectors of R in WORK(IL) */
+/* (Workspace: need M*M+5*M-1, prefer M*M+4*M+(M-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("P", m, m, m, &work[il], &ldwork, &work[itaup], &work[
+ iwork], &i__2, info);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, */
+/* computing right singular vectors of L in WORK(IL) and */
+/* multiplying B by transpose of left singular vectors */
+/* (Workspace: need M*M+M+BDSPAC) */
+
+ sbdsqr_("U", m, m, &c__0, nrhs, &s[1], &work[ie], &work[il], &
+ ldwork, &a[a_offset], lda, &b[b_offset], ldb, &work[iwork]
+, info);
+ if (*info != 0) {
+ goto L70;
+ }
+
+/* Multiply B by reciprocals of singular values */
+
+/* Computing MAX */
+ r__1 = *rcond * s[1];
+ thr = dmax(r__1,sfmin);
+ if (*rcond < 0.f) {
+/* Computing MAX */
+ r__1 = eps * s[1];
+ thr = dmax(r__1,sfmin);
+ }
+ *rank = 0;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (s[i__] > thr) {
+ srscl_(nrhs, &s[i__], &b[i__ + b_dim1], ldb);
+ ++(*rank);
+ } else {
+ slaset_("F", &c__1, nrhs, &c_b74, &c_b74, &b[i__ + b_dim1]
+, ldb);
+ }
+/* L30: */
+ }
+ iwork = ie;
+
+/* Multiply B by right singular vectors of L in WORK(IL) */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NRHS) */
+
+ if (*lwork >= *ldb * *nrhs + iwork - 1 && *nrhs > 1) {
+ sgemm_("T", "N", m, nrhs, m, &c_b108, &work[il], &ldwork, &b[
+ b_offset], ldb, &c_b74, &work[iwork], ldb);
+ slacpy_("G", m, nrhs, &work[iwork], ldb, &b[b_offset], ldb);
+ } else if (*nrhs > 1) {
+ chunk = (*lwork - iwork + 1) / *m;
+ i__2 = *nrhs;
+ i__1 = chunk;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *nrhs - i__ + 1;
+ bl = min(i__3,chunk);
+ sgemm_("T", "N", m, &bl, m, &c_b108, &work[il], &ldwork, &
+ b[i__ * b_dim1 + 1], ldb, &c_b74, &work[iwork], m);
+ slacpy_("G", m, &bl, &work[iwork], m, &b[i__ * b_dim1 + 1]
+, ldb);
+/* L40: */
+ }
+ } else {
+ sgemv_("T", m, m, &c_b108, &work[il], &ldwork, &b[b_dim1 + 1],
+ &c__1, &c_b74, &work[iwork], &c__1);
+ scopy_(m, &work[iwork], &c__1, &b[b_dim1 + 1], &c__1);
+ }
+
+/* Zero out below first M rows of B */
+
+ i__1 = *n - *m;
+ slaset_("F", &i__1, nrhs, &c_b74, &c_b74, &b[*m + 1 + b_dim1],
+ ldb);
+ iwork = itau + *m;
+
+/* Multiply transpose(Q) by B */
+/* (Workspace: need M+NRHS, prefer M+NRHS*NB) */
+
+ i__1 = *lwork - iwork + 1;
+ sormlq_("L", "T", n, nrhs, m, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[iwork], &i__1, info);
+
+ } else {
+
+/* Path 2 - remaining underdetermined cases */
+
+ ie = 1;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize A */
+/* (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB) */
+
+ i__1 = *lwork - iwork + 1;
+ sgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[iwork], &i__1, info);
+
+/* Multiply B by transpose of left bidiagonalizing vectors */
+/* (Workspace: need 3*M+NRHS, prefer 3*M+NRHS*NB) */
+
+ i__1 = *lwork - iwork + 1;
+ sormbr_("Q", "L", "T", m, nrhs, n, &a[a_offset], lda, &work[itauq]
+, &b[b_offset], ldb, &work[iwork], &i__1, info);
+
+/* Generate right bidiagonalizing vectors in A */
+/* (Workspace: need 4*M, prefer 3*M+M*NB) */
+
+ i__1 = *lwork - iwork + 1;
+ sorgbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &work[
+ iwork], &i__1, info);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, */
+/* computing right singular vectors of A in A and */
+/* multiplying B by transpose of left singular vectors */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("L", m, n, &c__0, nrhs, &s[1], &work[ie], &a[a_offset],
+ lda, vdum, &c__1, &b[b_offset], ldb, &work[iwork], info);
+ if (*info != 0) {
+ goto L70;
+ }
+
+/* Multiply B by reciprocals of singular values */
+
+/* Computing MAX */
+ r__1 = *rcond * s[1];
+ thr = dmax(r__1,sfmin);
+ if (*rcond < 0.f) {
+/* Computing MAX */
+ r__1 = eps * s[1];
+ thr = dmax(r__1,sfmin);
+ }
+ *rank = 0;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (s[i__] > thr) {
+ srscl_(nrhs, &s[i__], &b[i__ + b_dim1], ldb);
+ ++(*rank);
+ } else {
+ slaset_("F", &c__1, nrhs, &c_b74, &c_b74, &b[i__ + b_dim1]
+, ldb);
+ }
+/* L50: */
+ }
+
+/* Multiply B by right singular vectors of A */
+/* (Workspace: need N, prefer N*NRHS) */
+
+ if (*lwork >= *ldb * *nrhs && *nrhs > 1) {
+ sgemm_("T", "N", n, nrhs, m, &c_b108, &a[a_offset], lda, &b[
+ b_offset], ldb, &c_b74, &work[1], ldb);
+ slacpy_("F", n, nrhs, &work[1], ldb, &b[b_offset], ldb);
+ } else if (*nrhs > 1) {
+ chunk = *lwork / *n;
+ i__1 = *nrhs;
+ i__2 = chunk;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *nrhs - i__ + 1;
+ bl = min(i__3,chunk);
+ sgemm_("T", "N", n, &bl, m, &c_b108, &a[a_offset], lda, &
+ b[i__ * b_dim1 + 1], ldb, &c_b74, &work[1], n);
+ slacpy_("F", n, &bl, &work[1], n, &b[i__ * b_dim1 + 1],
+ ldb);
+/* L60: */
+ }
+ } else {
+ sgemv_("T", m, n, &c_b108, &a[a_offset], lda, &b[b_offset], &
+ c__1, &c_b74, &work[1], &c__1);
+ scopy_(n, &work[1], &c__1, &b[b_offset], &c__1);
+ }
+ }
+ }
+
+/* Undo scaling */
+
+ if (iascl == 1) {
+ slascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb,
+ info);
+ slascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ } else if (iascl == 2) {
+ slascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb,
+ info);
+ slascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ }
+ if (ibscl == 1) {
+ slascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ } else if (ibscl == 2) {
+ slascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ }
+
+L70:
+ work[1] = (real) maxwrk;
+ return 0;
+
+/* End of SGELSS */
+
+} /* sgelss_ */
diff --git a/contrib/libs/clapack/sgelsx.c b/contrib/libs/clapack/sgelsx.c
new file mode 100644
index 0000000000..d678656a05
--- /dev/null
+++ b/contrib/libs/clapack/sgelsx.c
@@ -0,0 +1,433 @@
+/* sgelsx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__0 = 0;
+static real c_b13 = 0.f;
+static integer c__2 = 2;
+static integer c__1 = 1;
+static real c_b36 = 1.f;
+
+/* Subroutine */ int sgelsx_(integer *m, integer *n, integer *nrhs, real *a,
+ integer *lda, real *b, integer *ldb, integer *jpvt, real *rcond,
+ integer *rank, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+ real r__1;
+
+ /* Local variables */
+ integer i__, j, k;
+ real c1, c2, s1, s2, t1, t2;
+ integer mn;
+ real anrm, bnrm, smin, smax;
+ integer iascl, ibscl, ismin, ismax;
+ extern /* Subroutine */ int strsm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+), slaic1_(integer *, integer *,
+ real *, real *, real *, real *, real *, real *, real *), sorm2r_(
+ char *, char *, integer *, integer *, integer *, real *, integer *
+, real *, real *, integer *, real *, integer *),
+ slabad_(real *, real *);
+ extern doublereal slamch_(char *), slange_(char *, integer *,
+ integer *, real *, integer *, real *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *), sgeqpf_(integer *, integer *, real *, integer *, integer
+ *, real *, real *, integer *), slaset_(char *, integer *, integer
+ *, real *, real *, real *, integer *);
+ real sminpr, smaxpr, smlnum;
+ extern /* Subroutine */ int slatzm_(char *, integer *, integer *, real *,
+ integer *, real *, real *, real *, integer *, real *),
+ stzrqf_(integer *, integer *, real *, integer *, real *, integer *
+);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is deprecated and has been replaced by routine SGELSY. */
+
+/* SGELSX computes the minimum-norm solution to a real linear least */
+/* squares problem: */
+/* minimize || A * X - B || */
+/* using a complete orthogonal factorization of A. A is an M-by-N */
+/* matrix which may be rank-deficient. */
+
+/* Several right hand side vectors b and solution vectors x can be */
+/* handled in a single call; they are stored as the columns of the */
+/* M-by-NRHS right hand side matrix B and the N-by-NRHS solution */
+/* matrix X. */
+
+/* The routine first computes a QR factorization with column pivoting: */
+/* A * P = Q * [ R11 R12 ] */
+/* [ 0 R22 ] */
+/* with R11 defined as the largest leading submatrix whose estimated */
+/* condition number is less than 1/RCOND. The order of R11, RANK, */
+/* is the effective rank of A. */
+
+/* Then, R22 is considered to be negligible, and R12 is annihilated */
+/* by orthogonal transformations from the right, arriving at the */
+/* complete orthogonal factorization: */
+/* A * P = Q * [ T11 0 ] * Z */
+/* [ 0 0 ] */
+/* The minimum-norm solution is then */
+/* X = P * Z' [ inv(T11)*Q1'*B ] */
+/* [ 0 ] */
+/* where Q1 consists of the first RANK columns of Q. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of */
+/* columns of matrices B and X. NRHS >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, A has been overwritten by details of its */
+/* complete orthogonal factorization. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the M-by-NRHS right hand side matrix B. */
+/* On exit, the N-by-NRHS solution matrix X. */
+/* If m >= n and RANK = n, the residual sum-of-squares for */
+/* the solution in the i-th column is given by the sum of */
+/* squares of elements N+1:M in that column. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,M,N). */
+
+/* JPVT (input/output) INTEGER array, dimension (N) */
+/* On entry, if JPVT(i) .ne. 0, the i-th column of A is an */
+/* initial column, otherwise it is a free column. Before */
+/* the QR factorization of A, all initial columns are */
+/* permuted to the leading positions; only the remaining */
+/* free columns are moved as a result of column pivoting */
+/* during the factorization. */
+/* On exit, if JPVT(i) = k, then the i-th column of A*P */
+/* was the k-th column of A. */
+
+/* RCOND (input) REAL */
+/* RCOND is used to determine the effective rank of A, which */
+/* is defined as the order of the largest leading triangular */
+/* submatrix R11 in the QR factorization with pivoting of A, */
+/* whose estimated condition number < 1/RCOND. */
+
+/* RANK (output) INTEGER */
+/* The effective rank of A, i.e., the order of the submatrix */
+/* R11. This is the same as the order of the submatrix T11 */
+/* in the complete orthogonal factorization of A. */
+
+/* WORK (workspace) REAL array, dimension */
+/* (max( min(M,N)+3*N, 2*min(M,N)+NRHS )), */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --jpvt;
+ --work;
+
+ /* Function Body */
+ mn = min(*m,*n);
+ ismin = mn + 1;
+ ismax = (mn << 1) + 1;
+
+/* Test the input arguments. */
+
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*m);
+ if (*ldb < max(i__1,*n)) {
+ *info = -7;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGELSX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+/* Computing MIN */
+ i__1 = min(*m,*n);
+ if (min(i__1,*nrhs) == 0) {
+ *rank = 0;
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ smlnum = slamch_("S") / slamch_("P");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+
+/* Scale A, B if max elements outside range [SMLNUM,BIGNUM] */
+
+ anrm = slange_("M", m, n, &a[a_offset], lda, &work[1]);
+ iascl = 0;
+ if (anrm > 0.f && anrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ slascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ slascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.f) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ slaset_("F", &i__1, nrhs, &c_b13, &c_b13, &b[b_offset], ldb);
+ *rank = 0;
+ goto L100;
+ }
+
+ bnrm = slange_("M", m, nrhs, &b[b_offset], ldb, &work[1]);
+ ibscl = 0;
+ if (bnrm > 0.f && bnrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ slascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ slascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 2;
+ }
+
+/* Compute QR factorization with column pivoting of A: */
+/* A * P = Q * R */
+
+ sgeqpf_(m, n, &a[a_offset], lda, &jpvt[1], &work[1], &work[mn + 1], info);
+
+/* workspace 3*N. Details of Householder rotations stored */
+/* in WORK(1:MN). */
+
+/* Determine RANK using incremental condition estimation */
+
+ work[ismin] = 1.f;
+ work[ismax] = 1.f;
+ smax = (r__1 = a[a_dim1 + 1], dabs(r__1));
+ smin = smax;
+ if ((r__1 = a[a_dim1 + 1], dabs(r__1)) == 0.f) {
+ *rank = 0;
+ i__1 = max(*m,*n);
+ slaset_("F", &i__1, nrhs, &c_b13, &c_b13, &b[b_offset], ldb);
+ goto L100;
+ } else {
+ *rank = 1;
+ }
+
+L10:
+ if (*rank < mn) {
+ i__ = *rank + 1;
+ slaic1_(&c__2, rank, &work[ismin], &smin, &a[i__ * a_dim1 + 1], &a[
+ i__ + i__ * a_dim1], &sminpr, &s1, &c1);
+ slaic1_(&c__1, rank, &work[ismax], &smax, &a[i__ * a_dim1 + 1], &a[
+ i__ + i__ * a_dim1], &smaxpr, &s2, &c2);
+
+ if (smaxpr * *rcond <= sminpr) {
+ i__1 = *rank;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[ismin + i__ - 1] = s1 * work[ismin + i__ - 1];
+ work[ismax + i__ - 1] = s2 * work[ismax + i__ - 1];
+/* L20: */
+ }
+ work[ismin + *rank] = c1;
+ work[ismax + *rank] = c2;
+ smin = sminpr;
+ smax = smaxpr;
+ ++(*rank);
+ goto L10;
+ }
+ }
+
+/* Logically partition R = [ R11 R12 ] */
+/* [ 0 R22 ] */
+/* where R11 = R(1:RANK,1:RANK) */
+
+/* [R11,R12] = [ T11, 0 ] * Y */
+
+ if (*rank < *n) {
+ stzrqf_(rank, n, &a[a_offset], lda, &work[mn + 1], info);
+ }
+
+/* Details of Householder rotations stored in WORK(MN+1:2*MN) */
+
+/* B(1:M,1:NRHS) := Q' * B(1:M,1:NRHS) */
+
+ sorm2r_("Left", "Transpose", m, nrhs, &mn, &a[a_offset], lda, &work[1], &
+ b[b_offset], ldb, &work[(mn << 1) + 1], info);
+
+/* workspace NRHS */
+
+/* B(1:RANK,1:NRHS) := inv(T11) * B(1:RANK,1:NRHS) */
+
+ strsm_("Left", "Upper", "No transpose", "Non-unit", rank, nrhs, &c_b36, &
+ a[a_offset], lda, &b[b_offset], ldb);
+
+ i__1 = *n;
+ for (i__ = *rank + 1; i__ <= i__1; ++i__) {
+ i__2 = *nrhs;
+ for (j = 1; j <= i__2; ++j) {
+ b[i__ + j * b_dim1] = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* B(1:N,1:NRHS) := Y' * B(1:N,1:NRHS) */
+
+ if (*rank < *n) {
+ i__1 = *rank;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *n - *rank + 1;
+ slatzm_("Left", &i__2, nrhs, &a[i__ + (*rank + 1) * a_dim1], lda,
+ &work[mn + i__], &b[i__ + b_dim1], &b[*rank + 1 + b_dim1],
+ ldb, &work[(mn << 1) + 1]);
+/* L50: */
+ }
+ }
+
+/* workspace NRHS */
+
+/* B(1:N,1:NRHS) := P * B(1:N,1:NRHS) */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[(mn << 1) + i__] = 1.f;
+/* L60: */
+ }
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[(mn << 1) + i__] == 1.f) {
+ if (jpvt[i__] != i__) {
+ k = i__;
+ t1 = b[k + j * b_dim1];
+ t2 = b[jpvt[k] + j * b_dim1];
+L70:
+ b[jpvt[k] + j * b_dim1] = t1;
+ work[(mn << 1) + k] = 0.f;
+ t1 = t2;
+ k = jpvt[k];
+ t2 = b[jpvt[k] + j * b_dim1];
+ if (jpvt[k] != i__) {
+ goto L70;
+ }
+ b[i__ + j * b_dim1] = t1;
+ work[(mn << 1) + k] = 0.f;
+ }
+ }
+/* L80: */
+ }
+/* L90: */
+ }
+
+/* Undo scaling */
+
+ if (iascl == 1) {
+ slascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb,
+ info);
+ slascl_("U", &c__0, &c__0, &smlnum, &anrm, rank, rank, &a[a_offset],
+ lda, info);
+ } else if (iascl == 2) {
+ slascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb,
+ info);
+ slascl_("U", &c__0, &c__0, &bignum, &anrm, rank, rank, &a[a_offset],
+ lda, info);
+ }
+ if (ibscl == 1) {
+ slascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ } else if (ibscl == 2) {
+ slascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ }
+
+L100:
+
+ return 0;
+
+/* End of SGELSX */
+
+} /* sgelsx_ */
diff --git a/contrib/libs/clapack/sgelsy.c b/contrib/libs/clapack/sgelsy.c
new file mode 100644
index 0000000000..c910c23969
--- /dev/null
+++ b/contrib/libs/clapack/sgelsy.c
@@ -0,0 +1,488 @@
+/* sgelsy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__0 = 0;
+static real c_b31 = 0.f;
+static integer c__2 = 2;
+static real c_b54 = 1.f;
+
+/* Subroutine */ int sgelsy_(integer *m, integer *n, integer *nrhs, real *a,
+ integer *lda, real *b, integer *ldb, integer *jpvt, real *rcond,
+ integer *rank, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Local variables */
+ integer i__, j;
+ real c1, c2, s1, s2;
+ integer nb, mn, nb1, nb2, nb3, nb4;
+ real anrm, bnrm, smin, smax;
+ integer iascl, ibscl, ismin, ismax;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ real wsize;
+ extern /* Subroutine */ int strsm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+), slaic1_(integer *, integer *,
+ real *, real *, real *, real *, real *, real *, real *), sgeqp3_(
+ integer *, integer *, real *, integer *, integer *, real *, real *
+, integer *, integer *), slabad_(real *, real *);
+ extern doublereal slamch_(char *), slange_(char *, integer *,
+ integer *, real *, integer *, real *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ real bignum;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *), slaset_(char *, integer *, integer *, real *, real *,
+ real *, integer *);
+ integer lwkmin;
+ real sminpr, smaxpr, smlnum;
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int sormqr_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *), sormrz_(char *, char *,
+ integer *, integer *, integer *, integer *, real *, integer *,
+ real *, real *, integer *, real *, integer *, integer *), stzrzf_(integer *, integer *, real *, integer *, real *,
+ real *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGELSY computes the minimum-norm solution to a real linear least */
+/* squares problem: */
+/* minimize || A * X - B || */
+/* using a complete orthogonal factorization of A. A is an M-by-N */
+/* matrix which may be rank-deficient. */
+
+/* Several right hand side vectors b and solution vectors x can be */
+/* handled in a single call; they are stored as the columns of the */
+/* M-by-NRHS right hand side matrix B and the N-by-NRHS solution */
+/* matrix X. */
+
+/* The routine first computes a QR factorization with column pivoting: */
+/* A * P = Q * [ R11 R12 ] */
+/* [ 0 R22 ] */
+/* with R11 defined as the largest leading submatrix whose estimated */
+/* condition number is less than 1/RCOND. The order of R11, RANK, */
+/* is the effective rank of A. */
+
+/* Then, R22 is considered to be negligible, and R12 is annihilated */
+/* by orthogonal transformations from the right, arriving at the */
+/* complete orthogonal factorization: */
+/* A * P = Q * [ T11 0 ] * Z */
+/* [ 0 0 ] */
+/* The minimum-norm solution is then */
+/* X = P * Z' [ inv(T11)*Q1'*B ] */
+/* [ 0 ] */
+/* where Q1 consists of the first RANK columns of Q. */
+
+/* This routine is basically identical to the original xGELSX except */
+/* three differences: */
+/* o The call to the subroutine xGEQPF has been substituted by the */
+/* the call to the subroutine xGEQP3. This subroutine is a Blas-3 */
+/* version of the QR factorization with column pivoting. */
+/* o Matrix B (the right hand side) is updated with Blas-3. */
+/* o The permutation of matrix B (the right hand side) is faster and */
+/* more simple. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of */
+/* columns of matrices B and X. NRHS >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, A has been overwritten by details of its */
+/* complete orthogonal factorization. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the M-by-NRHS right hand side matrix B. */
+/* On exit, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,M,N). */
+
+/* JPVT (input/output) INTEGER array, dimension (N) */
+/* On entry, if JPVT(i) .ne. 0, the i-th column of A is permuted */
+/* to the front of AP, otherwise column i is a free column. */
+/* On exit, if JPVT(i) = k, then the i-th column of AP */
+/* was the k-th column of A. */
+
+/* RCOND (input) REAL */
+/* RCOND is used to determine the effective rank of A, which */
+/* is defined as the order of the largest leading triangular */
+/* submatrix R11 in the QR factorization with pivoting of A, */
+/* whose estimated condition number < 1/RCOND. */
+
+/* RANK (output) INTEGER */
+/* The effective rank of A, i.e., the order of the submatrix */
+/* R11. This is the same as the order of the submatrix T11 */
+/* in the complete orthogonal factorization of A. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* The unblocked strategy requires that: */
+/* LWORK >= MAX( MN+3*N+1, 2*MN+NRHS ), */
+/* where MN = min( M, N ). */
+/* The block algorithm requires that: */
+/* LWORK >= MAX( MN+2*N+NB*(N+1), 2*MN+NB*NRHS ), */
+/* where NB is an upper bound on the blocksize returned */
+/* by ILAENV for the routines SGEQP3, STZRZF, STZRQF, SORMQR, */
+/* and SORMRZ. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: If INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+/* E. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */
+/* G. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --jpvt;
+ --work;
+
+ /* Function Body */
+ mn = min(*m,*n);
+ ismin = mn + 1;
+ ismax = (mn << 1) + 1;
+
+/* Test the input arguments. */
+
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*m);
+ if (*ldb < max(i__1,*n)) {
+ *info = -7;
+ }
+ }
+
+/* Figure out optimal block size */
+
+ if (*info == 0) {
+ if (mn == 0 || *nrhs == 0) {
+ lwkmin = 1;
+ lwkopt = 1;
+ } else {
+ nb1 = ilaenv_(&c__1, "SGEQRF", " ", m, n, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "SGERQF", " ", m, n, &c_n1, &c_n1);
+ nb3 = ilaenv_(&c__1, "SORMQR", " ", m, n, nrhs, &c_n1);
+ nb4 = ilaenv_(&c__1, "SORMRQ", " ", m, n, nrhs, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2), i__1 = max(i__1,nb3);
+ nb = max(i__1,nb4);
+/* Computing MAX */
+ i__1 = mn << 1, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = mn +
+ *nrhs;
+ lwkmin = mn + max(i__1,i__2);
+/* Computing MAX */
+ i__1 = lwkmin, i__2 = mn + (*n << 1) + nb * (*n + 1), i__1 = max(
+ i__1,i__2), i__2 = (mn << 1) + nb * *nrhs;
+ lwkopt = max(i__1,i__2);
+ }
+ work[1] = (real) lwkopt;
+
+ if (*lwork < lwkmin && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGELSY", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (mn == 0 || *nrhs == 0) {
+ *rank = 0;
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ smlnum = slamch_("S") / slamch_("P");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+
+/* Scale A, B if max entries outside range [SMLNUM,BIGNUM] */
+
+ anrm = slange_("M", m, n, &a[a_offset], lda, &work[1]);
+ iascl = 0;
+ if (anrm > 0.f && anrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ slascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ slascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.f) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ slaset_("F", &i__1, nrhs, &c_b31, &c_b31, &b[b_offset], ldb);
+ *rank = 0;
+ goto L70;
+ }
+
+ bnrm = slange_("M", m, nrhs, &b[b_offset], ldb, &work[1]);
+ ibscl = 0;
+ if (bnrm > 0.f && bnrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ slascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ slascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 2;
+ }
+
+/* Compute QR factorization with column pivoting of A: */
+/* A * P = Q * R */
+
+ i__1 = *lwork - mn;
+ sgeqp3_(m, n, &a[a_offset], lda, &jpvt[1], &work[1], &work[mn + 1], &i__1,
+ info);
+ wsize = mn + work[mn + 1];
+
+/* workspace: MN+2*N+NB*(N+1). */
+/* Details of Householder rotations stored in WORK(1:MN). */
+
+/* Determine RANK using incremental condition estimation */
+
+ work[ismin] = 1.f;
+ work[ismax] = 1.f;
+ smax = (r__1 = a[a_dim1 + 1], dabs(r__1));
+ smin = smax;
+ if ((r__1 = a[a_dim1 + 1], dabs(r__1)) == 0.f) {
+ *rank = 0;
+ i__1 = max(*m,*n);
+ slaset_("F", &i__1, nrhs, &c_b31, &c_b31, &b[b_offset], ldb);
+ goto L70;
+ } else {
+ *rank = 1;
+ }
+
+L10:
+ if (*rank < mn) {
+ i__ = *rank + 1;
+ slaic1_(&c__2, rank, &work[ismin], &smin, &a[i__ * a_dim1 + 1], &a[
+ i__ + i__ * a_dim1], &sminpr, &s1, &c1);
+ slaic1_(&c__1, rank, &work[ismax], &smax, &a[i__ * a_dim1 + 1], &a[
+ i__ + i__ * a_dim1], &smaxpr, &s2, &c2);
+
+ if (smaxpr * *rcond <= sminpr) {
+ i__1 = *rank;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[ismin + i__ - 1] = s1 * work[ismin + i__ - 1];
+ work[ismax + i__ - 1] = s2 * work[ismax + i__ - 1];
+/* L20: */
+ }
+ work[ismin + *rank] = c1;
+ work[ismax + *rank] = c2;
+ smin = sminpr;
+ smax = smaxpr;
+ ++(*rank);
+ goto L10;
+ }
+ }
+
+/* workspace: 3*MN. */
+
+/* Logically partition R = [ R11 R12 ] */
+/* [ 0 R22 ] */
+/* where R11 = R(1:RANK,1:RANK) */
+
+/* [R11,R12] = [ T11, 0 ] * Y */
+
+ if (*rank < *n) {
+ i__1 = *lwork - (mn << 1);
+ stzrzf_(rank, n, &a[a_offset], lda, &work[mn + 1], &work[(mn << 1) +
+ 1], &i__1, info);
+ }
+
+/* workspace: 2*MN. */
+/* Details of Householder rotations stored in WORK(MN+1:2*MN) */
+
+/* B(1:M,1:NRHS) := Q' * B(1:M,1:NRHS) */
+
+ i__1 = *lwork - (mn << 1);
+ sormqr_("Left", "Transpose", m, nrhs, &mn, &a[a_offset], lda, &work[1], &
+ b[b_offset], ldb, &work[(mn << 1) + 1], &i__1, info);
+/* Computing MAX */
+ r__1 = wsize, r__2 = (mn << 1) + work[(mn << 1) + 1];
+ wsize = dmax(r__1,r__2);
+
+/* workspace: 2*MN+NB*NRHS. */
+
+/* B(1:RANK,1:NRHS) := inv(T11) * B(1:RANK,1:NRHS) */
+
+ strsm_("Left", "Upper", "No transpose", "Non-unit", rank, nrhs, &c_b54, &
+ a[a_offset], lda, &b[b_offset], ldb);
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = *rank + 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* B(1:N,1:NRHS) := Y' * B(1:N,1:NRHS) */
+
+ if (*rank < *n) {
+ i__1 = *n - *rank;
+ i__2 = *lwork - (mn << 1);
+ sormrz_("Left", "Transpose", n, nrhs, rank, &i__1, &a[a_offset], lda,
+ &work[mn + 1], &b[b_offset], ldb, &work[(mn << 1) + 1], &i__2,
+ info);
+ }
+
+/* workspace: 2*MN+NRHS. */
+
+/* B(1:N,1:NRHS) := P * B(1:N,1:NRHS) */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[jpvt[i__]] = b[i__ + j * b_dim1];
+/* L50: */
+ }
+ scopy_(n, &work[1], &c__1, &b[j * b_dim1 + 1], &c__1);
+/* L60: */
+ }
+
+/* workspace: N. */
+
+/* Undo scaling */
+
+ if (iascl == 1) {
+ slascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb,
+ info);
+ slascl_("U", &c__0, &c__0, &smlnum, &anrm, rank, rank, &a[a_offset],
+ lda, info);
+ } else if (iascl == 2) {
+ slascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb,
+ info);
+ slascl_("U", &c__0, &c__0, &bignum, &anrm, rank, rank, &a[a_offset],
+ lda, info);
+ }
+ if (ibscl == 1) {
+ slascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ } else if (ibscl == 2) {
+ slascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ }
+
+L70:
+ work[1] = (real) lwkopt;
+
+ return 0;
+
+/* End of SGELSY */
+
+} /* sgelsy_ */
diff --git a/contrib/libs/clapack/sgeql2.c b/contrib/libs/clapack/sgeql2.c
new file mode 100644
index 0000000000..13e292cdde
--- /dev/null
+++ b/contrib/libs/clapack/sgeql2.c
@@ -0,0 +1,159 @@
+/* sgeql2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sgeql2_(integer *m, integer *n, real *a, integer *lda,
+ real *tau, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, k;
+ real aii;
+ extern /* Subroutine */ int slarf_(char *, integer *, integer *, real *,
+ integer *, real *, real *, integer *, real *), xerbla_(
+ char *, integer *), slarfp_(integer *, real *, real *,
+ integer *, real *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGEQL2 computes a QL factorization of a real m by n matrix A: */
+/* A = Q * L. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the m by n matrix A. */
+/* On exit, if m >= n, the lower triangle of the subarray */
+/* A(m-n+1:m,1:n) contains the n by n lower triangular matrix L; */
+/* if m <= n, the elements on and below the (n-m)-th */
+/* superdiagonal contain the m by n lower trapezoidal matrix L; */
+/* the remaining elements, with the array TAU, represent the */
+/* orthogonal matrix Q as a product of elementary reflectors */
+/* (see Further Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) REAL array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(k) . . . H(2) H(1), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(m-k+i+1:m) = 0 and v(m-k+i) = 1; v(1:m-k+i-1) is stored on exit in */
+/* A(1:m-k+i-1,n-k+i), and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEQL2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ for (i__ = k; i__ >= 1; --i__) {
+
+/* Generate elementary reflector H(i) to annihilate */
+/* A(1:m-k+i-1,n-k+i) */
+
+ i__1 = *m - k + i__;
+ slarfp_(&i__1, &a[*m - k + i__ + (*n - k + i__) * a_dim1], &a[(*n - k
+ + i__) * a_dim1 + 1], &c__1, &tau[i__]);
+
+/* Apply H(i) to A(1:m-k+i,1:n-k+i-1) from the left */
+
+ aii = a[*m - k + i__ + (*n - k + i__) * a_dim1];
+ a[*m - k + i__ + (*n - k + i__) * a_dim1] = 1.f;
+ i__1 = *m - k + i__;
+ i__2 = *n - k + i__ - 1;
+ slarf_("Left", &i__1, &i__2, &a[(*n - k + i__) * a_dim1 + 1], &c__1, &
+ tau[i__], &a[a_offset], lda, &work[1]);
+ a[*m - k + i__ + (*n - k + i__) * a_dim1] = aii;
+/* L10: */
+ }
+ return 0;
+
+/* End of SGEQL2 */
+
+} /* sgeql2_ */
diff --git a/contrib/libs/clapack/sgeqlf.c b/contrib/libs/clapack/sgeqlf.c
new file mode 100644
index 0000000000..3d2b2e6e35
--- /dev/null
+++ b/contrib/libs/clapack/sgeqlf.c
@@ -0,0 +1,270 @@
+/* sgeqlf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int sgeqlf_(integer *m, integer *n, real *a, integer *lda,
+ real *tau, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, k, ib, nb, ki, kk, mu, nu, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int sgeql2_(integer *, integer *, real *, integer
+ *, real *, real *, integer *), slarfb_(char *, char *, char *,
+ char *, integer *, integer *, integer *, real *, integer *, real *
+, integer *, real *, integer *, real *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int slarft_(char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGEQLF computes a QL factorization of a real M-by-N matrix A: */
+/* A = Q * L. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, */
+/* if m >= n, the lower triangle of the subarray */
+/* A(m-n+1:m,1:n) contains the N-by-N lower triangular matrix L; */
+/* if m <= n, the elements on and below the (n-m)-th */
+/* superdiagonal contain the M-by-N lower trapezoidal matrix L; */
+/* the remaining elements, with the array TAU, represent the */
+/* orthogonal matrix Q as a product of elementary reflectors */
+/* (see Further Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) REAL array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+/* For optimum performance LWORK >= N*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(k) . . . H(2) H(1), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(m-k+i+1:m) = 0 and v(m-k+i) = 1; v(1:m-k+i-1) is stored on exit in */
+/* A(1:m-k+i-1,n-k+i), and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+
+ if (*info == 0) {
+ k = min(*m,*n);
+ if (k == 0) {
+ lwkopt = 1;
+ } else {
+ nb = ilaenv_(&c__1, "SGEQLF", " ", m, n, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ }
+ work[1] = (real) lwkopt;
+
+ if (*lwork < max(1,*n) && ! lquery) {
+ *info = -7;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEQLF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (k == 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 1;
+ iws = *n;
+ if (nb > 1 && nb < k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "SGEQLF", " ", m, n, &c_n1, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SGEQLF", " ", m, n, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < k && nx < k) {
+
+/* Use blocked code initially. */
+/* The last kk columns are handled by the block method. */
+
+ ki = (k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+ i__1 = k - kk + 1;
+ i__2 = -nb;
+ for (i__ = k - kk + ki + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__
+ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/* Compute the QL factorization of the current block */
+/* A(1:m-k+i+ib-1,n-k+i:n-k+i+ib-1) */
+
+ i__3 = *m - k + i__ + ib - 1;
+ sgeql2_(&i__3, &ib, &a[(*n - k + i__) * a_dim1 + 1], lda, &tau[
+ i__], &work[1], &iinfo);
+ if (*n - k + i__ > 1) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__3 = *m - k + i__ + ib - 1;
+ slarft_("Backward", "Columnwise", &i__3, &ib, &a[(*n - k +
+ i__) * a_dim1 + 1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(1:m-k+i+ib-1,1:n-k+i-1) from the left */
+
+ i__3 = *m - k + i__ + ib - 1;
+ i__4 = *n - k + i__ - 1;
+ slarfb_("Left", "Transpose", "Backward", "Columnwise", &i__3,
+ &i__4, &ib, &a[(*n - k + i__) * a_dim1 + 1], lda, &
+ work[1], &ldwork, &a[a_offset], lda, &work[ib + 1], &
+ ldwork);
+ }
+/* L10: */
+ }
+ mu = *m - k + i__ + nb - 1;
+ nu = *n - k + i__ + nb - 1;
+ } else {
+ mu = *m;
+ nu = *n;
+ }
+
+/* Use unblocked code to factor the last or only block */
+
+ if (mu > 0 && nu > 0) {
+ sgeql2_(&mu, &nu, &a[a_offset], lda, &tau[1], &work[1], &iinfo);
+ }
+
+ work[1] = (real) iws;
+ return 0;
+
+/* End of SGEQLF */
+
+} /* sgeqlf_ */
diff --git a/contrib/libs/clapack/sgeqp3.c b/contrib/libs/clapack/sgeqp3.c
new file mode 100644
index 0000000000..b9c8817fcb
--- /dev/null
+++ b/contrib/libs/clapack/sgeqp3.c
@@ -0,0 +1,351 @@
+/* sgeqp3.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int sgeqp3_(integer *m, integer *n, real *a, integer *lda,
+ integer *jpvt, real *tau, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer j, jb, na, nb, sm, sn, nx, fjb, iws, nfxd;
+ extern doublereal snrm2_(integer *, real *, integer *);
+ integer nbmin, minmn, minws;
+ extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
+ integer *), slaqp2_(integer *, integer *, integer *, real *,
+ integer *, integer *, real *, real *, real *, real *), xerbla_(
+ char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int sgeqrf_(integer *, integer *, real *, integer
+ *, real *, real *, integer *, integer *);
+ integer topbmn, sminmn;
+ extern /* Subroutine */ int slaqps_(integer *, integer *, integer *,
+ integer *, integer *, real *, integer *, integer *, real *, real *
+, real *, real *, real *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int sormqr_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGEQP3 computes a QR factorization with column pivoting of a */
+/* matrix A: A*P = Q*R using Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the upper triangle of the array contains the */
+/* min(M,N)-by-N upper trapezoidal matrix R; the elements below */
+/* the diagonal, together with the array TAU, represent the */
+/* orthogonal matrix Q as a product of min(M,N) elementary */
+/* reflectors. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* JPVT (input/output) INTEGER array, dimension (N) */
+/* On entry, if JPVT(J).ne.0, the J-th column of A is permuted */
+/* to the front of A*P (a leading column); if JPVT(J)=0, */
+/* the J-th column of A is a free column. */
+/* On exit, if JPVT(J)=K, then the J-th column of A*P was the */
+/* the K-th column of A. */
+
+/* TAU (output) REAL array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO=0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= 3*N+1. */
+/* For optimal performance LWORK >= 2*N+( N+1 )*NB, where NB */
+/* is the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real/complex scalar, and v is a real/complex vector */
+/* with v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in */
+/* A(i+1:m,i), and tau in TAU(i). */
+
+/* Based on contributions by */
+/* G. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */
+/* X. Sun, Computer Science Dept., Duke University, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --jpvt;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+
+ if (*info == 0) {
+ minmn = min(*m,*n);
+ if (minmn == 0) {
+ iws = 1;
+ lwkopt = 1;
+ } else {
+ iws = *n * 3 + 1;
+ nb = ilaenv_(&c__1, "SGEQRF", " ", m, n, &c_n1, &c_n1);
+ lwkopt = (*n << 1) + (*n + 1) * nb;
+ }
+ work[1] = (real) lwkopt;
+
+ if (*lwork < iws && ! lquery) {
+ *info = -8;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEQP3", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (minmn == 0) {
+ return 0;
+ }
+
+/* Move initial columns up front. */
+
+ nfxd = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (jpvt[j] != 0) {
+ if (j != nfxd) {
+ sswap_(m, &a[j * a_dim1 + 1], &c__1, &a[nfxd * a_dim1 + 1], &
+ c__1);
+ jpvt[j] = jpvt[nfxd];
+ jpvt[nfxd] = j;
+ } else {
+ jpvt[j] = j;
+ }
+ ++nfxd;
+ } else {
+ jpvt[j] = j;
+ }
+/* L10: */
+ }
+ --nfxd;
+
+/* Factorize fixed columns */
+/* ======================= */
+
+/* Compute the QR factorization of fixed columns and update */
+/* remaining columns. */
+
+ if (nfxd > 0) {
+ na = min(*m,nfxd);
+/* CC CALL SGEQR2( M, NA, A, LDA, TAU, WORK, INFO ) */
+ sgeqrf_(m, &na, &a[a_offset], lda, &tau[1], &work[1], lwork, info);
+/* Computing MAX */
+ i__1 = iws, i__2 = (integer) work[1];
+ iws = max(i__1,i__2);
+ if (na < *n) {
+/* CC CALL SORM2R( 'Left', 'Transpose', M, N-NA, NA, A, LDA, */
+/* CC $ TAU, A( 1, NA+1 ), LDA, WORK, INFO ) */
+ i__1 = *n - na;
+ sormqr_("Left", "Transpose", m, &i__1, &na, &a[a_offset], lda, &
+ tau[1], &a[(na + 1) * a_dim1 + 1], lda, &work[1], lwork,
+ info);
+/* Computing MAX */
+ i__1 = iws, i__2 = (integer) work[1];
+ iws = max(i__1,i__2);
+ }
+ }
+
+/* Factorize free columns */
+/* ====================== */
+
+ if (nfxd < minmn) {
+
+ sm = *m - nfxd;
+ sn = *n - nfxd;
+ sminmn = minmn - nfxd;
+
+/* Determine the block size. */
+
+ nb = ilaenv_(&c__1, "SGEQRF", " ", &sm, &sn, &c_n1, &c_n1);
+ nbmin = 2;
+ nx = 0;
+
+ if (nb > 1 && nb < sminmn) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "SGEQRF", " ", &sm, &sn, &c_n1, &
+ c_n1);
+ nx = max(i__1,i__2);
+
+
+ if (nx < sminmn) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ minws = (sn << 1) + (sn + 1) * nb;
+ iws = max(iws,minws);
+ if (*lwork < minws) {
+
+/* Not enough workspace to use optimal NB: Reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = (*lwork - (sn << 1)) / (sn + 1);
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SGEQRF", " ", &sm, &sn, &
+ c_n1, &c_n1);
+ nbmin = max(i__1,i__2);
+
+
+ }
+ }
+ }
+
+/* Initialize partial column norms. The first N elements of work */
+/* store the exact column norms. */
+
+ i__1 = *n;
+ for (j = nfxd + 1; j <= i__1; ++j) {
+ work[j] = snrm2_(&sm, &a[nfxd + 1 + j * a_dim1], &c__1);
+ work[*n + j] = work[j];
+/* L20: */
+ }
+
+ if (nb >= nbmin && nb < sminmn && nx < sminmn) {
+
+/* Use blocked code initially. */
+
+ j = nfxd + 1;
+
+/* Compute factorization: while loop. */
+
+
+ topbmn = minmn - nx;
+L30:
+ if (j <= topbmn) {
+/* Computing MIN */
+ i__1 = nb, i__2 = topbmn - j + 1;
+ jb = min(i__1,i__2);
+
+/* Factorize JB columns among columns J:N. */
+
+ i__1 = *n - j + 1;
+ i__2 = j - 1;
+ i__3 = *n - j + 1;
+ slaqps_(m, &i__1, &i__2, &jb, &fjb, &a[j * a_dim1 + 1], lda, &
+ jpvt[j], &tau[j], &work[j], &work[*n + j], &work[(*n
+ << 1) + 1], &work[(*n << 1) + jb + 1], &i__3);
+
+ j += fjb;
+ goto L30;
+ }
+ } else {
+ j = nfxd + 1;
+ }
+
+/* Use unblocked code to factor the last or only block. */
+
+
+ if (j <= minmn) {
+ i__1 = *n - j + 1;
+ i__2 = j - 1;
+ slaqp2_(m, &i__1, &i__2, &a[j * a_dim1 + 1], lda, &jpvt[j], &tau[
+ j], &work[j], &work[*n + j], &work[(*n << 1) + 1]);
+ }
+
+ }
+
+ work[1] = (real) iws;
+ return 0;
+
+/* End of SGEQP3 */
+
+} /* sgeqp3_ */
diff --git a/contrib/libs/clapack/sgeqpf.c b/contrib/libs/clapack/sgeqpf.c
new file mode 100644
index 0000000000..1610cfdb6d
--- /dev/null
+++ b/contrib/libs/clapack/sgeqpf.c
@@ -0,0 +1,303 @@
+/* sgeqpf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sgeqpf_(integer *m, integer *n, real *a, integer *lda,
+ integer *jpvt, real *tau, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, ma, mn;
+ real aii;
+ integer pvt;
+ real temp, temp2;
+ extern doublereal snrm2_(integer *, real *, integer *);
+ real tol3z;
+ extern /* Subroutine */ int slarf_(char *, integer *, integer *, real *,
+ integer *, real *, real *, integer *, real *);
+ integer itemp;
+ extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
+ integer *), sgeqr2_(integer *, integer *, real *, integer *, real
+ *, real *, integer *), sorm2r_(char *, char *, integer *, integer
+ *, integer *, real *, integer *, real *, real *, integer *, real *
+, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int slarfp_(integer *, real *, real *, integer *,
+ real *);
+
+
+/* -- LAPACK deprecated driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is deprecated and has been replaced by routine SGEQP3. */
+
+/* SGEQPF computes a QR factorization with column pivoting of a */
+/* real M-by-N matrix A: A*P = Q*R. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0 */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the upper triangle of the array contains the */
+/* min(M,N)-by-N upper triangular matrix R; the elements */
+/* below the diagonal, together with the array TAU, */
+/* represent the orthogonal matrix Q as a product of */
+/* min(m,n) elementary reflectors. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* JPVT (input/output) INTEGER array, dimension (N) */
+/* On entry, if JPVT(i) .ne. 0, the i-th column of A is permuted */
+/* to the front of A*P (a leading column); if JPVT(i) = 0, */
+/* the i-th column of A is a free column. */
+/* On exit, if JPVT(i) = k, then the i-th column of A*P */
+/* was the k-th column of A. */
+
+/* TAU (output) REAL array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors. */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(n) */
+
+/* Each H(i) has the form */
+
+/* H = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i). */
+
+/* The matrix P is represented in jpvt as follows: If */
+/* jpvt(j) = i */
+/* then the jth column of P is the ith canonical unit vector. */
+
+/* Partial column norm updating strategy modified by */
+/* Z. Drmac and Z. Bujanovic, Dept. of Mathematics, */
+/* University of Zagreb, Croatia. */
+/* June 2006. */
+/* For more details see LAPACK Working Note 176. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --jpvt;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEQPF", &i__1);
+ return 0;
+ }
+
+ mn = min(*m,*n);
+ tol3z = sqrt(slamch_("Epsilon"));
+
+/* Move initial columns up front */
+
+ itemp = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (jpvt[i__] != 0) {
+ if (i__ != itemp) {
+ sswap_(m, &a[i__ * a_dim1 + 1], &c__1, &a[itemp * a_dim1 + 1],
+ &c__1);
+ jpvt[i__] = jpvt[itemp];
+ jpvt[itemp] = i__;
+ } else {
+ jpvt[i__] = i__;
+ }
+ ++itemp;
+ } else {
+ jpvt[i__] = i__;
+ }
+/* L10: */
+ }
+ --itemp;
+
+/* Compute the QR factorization and update remaining columns */
+
+ if (itemp > 0) {
+ ma = min(itemp,*m);
+ sgeqr2_(m, &ma, &a[a_offset], lda, &tau[1], &work[1], info);
+ if (ma < *n) {
+ i__1 = *n - ma;
+ sorm2r_("Left", "Transpose", m, &i__1, &ma, &a[a_offset], lda, &
+ tau[1], &a[(ma + 1) * a_dim1 + 1], lda, &work[1], info);
+ }
+ }
+
+ if (itemp < mn) {
+
+/* Initialize partial column norms. The first n elements of */
+/* work store the exact column norms. */
+
+ i__1 = *n;
+ for (i__ = itemp + 1; i__ <= i__1; ++i__) {
+ i__2 = *m - itemp;
+ work[i__] = snrm2_(&i__2, &a[itemp + 1 + i__ * a_dim1], &c__1);
+ work[*n + i__] = work[i__];
+/* L20: */
+ }
+
+/* Compute factorization */
+
+ i__1 = mn;
+ for (i__ = itemp + 1; i__ <= i__1; ++i__) {
+
+/* Determine ith pivot column and swap if necessary */
+
+ i__2 = *n - i__ + 1;
+ pvt = i__ - 1 + isamax_(&i__2, &work[i__], &c__1);
+
+ if (pvt != i__) {
+ sswap_(m, &a[pvt * a_dim1 + 1], &c__1, &a[i__ * a_dim1 + 1], &
+ c__1);
+ itemp = jpvt[pvt];
+ jpvt[pvt] = jpvt[i__];
+ jpvt[i__] = itemp;
+ work[pvt] = work[i__];
+ work[*n + pvt] = work[*n + i__];
+ }
+
+/* Generate elementary reflector H(i) */
+
+ if (i__ < *m) {
+ i__2 = *m - i__ + 1;
+ slarfp_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + 1 + i__ *
+ a_dim1], &c__1, &tau[i__]);
+ } else {
+ slarfp_(&c__1, &a[*m + *m * a_dim1], &a[*m + *m * a_dim1], &
+ c__1, &tau[*m]);
+ }
+
+ if (i__ < *n) {
+
+/* Apply H(i) to A(i:m,i+1:n) from the left */
+
+ aii = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.f;
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ slarf_("LEFT", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &
+ tau[i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[(*
+ n << 1) + 1]);
+ a[i__ + i__ * a_dim1] = aii;
+ }
+
+/* Update partial column norms */
+
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ if (work[j] != 0.f) {
+
+/* NOTE: The following 4 lines follow from the analysis in */
+/* Lapack Working Note 176. */
+
+ temp = (r__1 = a[i__ + j * a_dim1], dabs(r__1)) / work[j];
+/* Computing MAX */
+ r__1 = 0.f, r__2 = (temp + 1.f) * (1.f - temp);
+ temp = dmax(r__1,r__2);
+/* Computing 2nd power */
+ r__1 = work[j] / work[*n + j];
+ temp2 = temp * (r__1 * r__1);
+ if (temp2 <= tol3z) {
+ if (*m - i__ > 0) {
+ i__3 = *m - i__;
+ work[j] = snrm2_(&i__3, &a[i__ + 1 + j * a_dim1],
+ &c__1);
+ work[*n + j] = work[j];
+ } else {
+ work[j] = 0.f;
+ work[*n + j] = 0.f;
+ }
+ } else {
+ work[j] *= sqrt(temp);
+ }
+ }
+/* L30: */
+ }
+
+/* L40: */
+ }
+ }
+ return 0;
+
+/* End of SGEQPF */
+
+} /* sgeqpf_ */
diff --git a/contrib/libs/clapack/sgeqr2.c b/contrib/libs/clapack/sgeqr2.c
new file mode 100644
index 0000000000..7698d16b0c
--- /dev/null
+++ b/contrib/libs/clapack/sgeqr2.c
@@ -0,0 +1,161 @@
+/* sgeqr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sgeqr2_(integer *m, integer *n, real *a, integer *lda,
+ real *tau, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, k;
+ real aii;
+ extern /* Subroutine */ int slarf_(char *, integer *, integer *, real *,
+ integer *, real *, real *, integer *, real *), xerbla_(
+ char *, integer *), slarfp_(integer *, real *, real *,
+ integer *, real *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGEQR2 computes a QR factorization of a real m by n matrix A: */
+/* A = Q * R. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the m by n matrix A. */
+/* On exit, the elements on and above the diagonal of the array */
+/* contain the min(m,n) by n upper trapezoidal matrix R (R is */
+/* upper triangular if m >= n); the elements below the diagonal, */
+/* with the array TAU, represent the orthogonal matrix Q as a */
+/* product of elementary reflectors (see Further Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) REAL array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), */
+/* and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEQR2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ i__1 = k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
+
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ slarfp_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3, *m)+ i__ * a_dim1]
+, &c__1, &tau[i__]);
+ if (i__ < *n) {
+
+/* Apply H(i) to A(i:m,i+1:n) from the left */
+
+ aii = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.f;
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ slarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &tau[
+ i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ a[i__ + i__ * a_dim1] = aii;
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of SGEQR2 */
+
+} /* sgeqr2_ */
diff --git a/contrib/libs/clapack/sgeqrf.c b/contrib/libs/clapack/sgeqrf.c
new file mode 100644
index 0000000000..64c165ba5c
--- /dev/null
+++ b/contrib/libs/clapack/sgeqrf.c
@@ -0,0 +1,252 @@
+/* sgeqrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int sgeqrf_(integer *m, integer *n, real *a, integer *lda,
+ real *tau, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, k, ib, nb, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int sgeqr2_(integer *, integer *, real *, integer
+ *, real *, real *, integer *), slarfb_(char *, char *, char *,
+ char *, integer *, integer *, integer *, real *, integer *, real *
+, integer *, real *, integer *, real *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int slarft_(char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGEQRF computes a QR factorization of a real M-by-N matrix A: */
+/* A = Q * R. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the elements on and above the diagonal of the array */
+/* contain the min(M,N)-by-N upper trapezoidal matrix R (R is */
+/* upper triangular if m >= n); the elements below the diagonal, */
+/* with the array TAU, represent the orthogonal matrix Q as a */
+/* product of min(m,n) elementary reflectors (see Further */
+/* Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) REAL array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+/* For optimum performance LWORK >= N*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), */
+/* and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "SGEQRF", " ", m, n, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ work[1] = (real) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEQRF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ k = min(*m,*n);
+ if (k == 0) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *n;
+ if (nb > 1 && nb < k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "SGEQRF", " ", m, n, &c_n1, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SGEQRF", " ", m, n, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < k && nx < k) {
+
+/* Use blocked code initially */
+
+ i__1 = k - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/* Compute the QR factorization of the current block */
+/* A(i:m,i:i+ib-1) */
+
+ i__3 = *m - i__ + 1;
+ sgeqr2_(&i__3, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[
+ 1], &iinfo);
+ if (i__ + ib <= *n) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i) H(i+1) . . . H(i+ib-1) */
+
+ i__3 = *m - i__ + 1;
+ slarft_("Forward", "Columnwise", &i__3, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(i:m,i+ib:n) from the left */
+
+ i__3 = *m - i__ + 1;
+ i__4 = *n - i__ - ib + 1;
+ slarfb_("Left", "Transpose", "Forward", "Columnwise", &i__3, &
+ i__4, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], &
+ ldwork, &a[i__ + (i__ + ib) * a_dim1], lda, &work[ib
+ + 1], &ldwork);
+ }
+/* L10: */
+ }
+ } else {
+ i__ = 1;
+ }
+
+/* Use unblocked code to factor the last or only block. */
+
+ if (i__ <= k) {
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ sgeqr2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1]
+, &iinfo);
+ }
+
+ work[1] = (real) iws;
+ return 0;
+
+/* End of SGEQRF */
+
+} /* sgeqrf_ */
diff --git a/contrib/libs/clapack/sgerfs.c b/contrib/libs/clapack/sgerfs.c
new file mode 100644
index 0000000000..d053026b91
--- /dev/null
+++ b/contrib/libs/clapack/sgerfs.c
@@ -0,0 +1,422 @@
+/* sgerfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b15 = -1.f;
+static real c_b17 = 1.f;
+
+/* Subroutine */ int sgerfs_(char *trans, integer *n, integer *nrhs, real *a,
+ integer *lda, real *af, integer *ldaf, integer *ipiv, real *b,
+ integer *ldb, real *x, integer *ldx, real *ferr, real *berr, real *
+ work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2, i__3;
+ real r__1, r__2, r__3;
+
+ /* Local variables */
+ integer i__, j, k;
+ real s, xk;
+ integer nz;
+ real eps;
+ integer kase;
+ real safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *);
+ integer count;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), saxpy_(integer *, real *, real *, integer *, real *,
+ integer *), slacn2_(integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+ extern /* Subroutine */ int sgetrs_(char *, integer *, integer *, real *,
+ integer *, integer *, real *, integer *, integer *);
+ char transt[1];
+ real lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call SLACN2 in place of SLACON, 7 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGERFS improves the computed solution to a system of linear */
+/* equations and provides error bounds and backward error estimates for */
+/* the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose = Transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The original N-by-N matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input) REAL array, dimension (LDAF,N) */
+/* The factors L and U from the factorization A = P*L*U */
+/* as computed by SGETRF. */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices from SGETRF; for 1<=i<=N, row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* B (input) REAL array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) REAL array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by SGETRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "T") && ! lsame_(
+ trans, "C")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldx < max(1,*n)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGERFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.f;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - op(A) * X, */
+/* where op(A) = A, A**T, or A**H, depending on TRANS. */
+
+ scopy_(n, &b[j * b_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+ sgemv_(trans, n, n, &c_b15, &a[a_offset], lda, &x[j * x_dim1 + 1], &
+ c__1, &c_b17, &work[*n + 1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] = (r__1 = b[i__ + j * b_dim1], dabs(r__1));
+/* L30: */
+ }
+
+/* Compute abs(op(A))*abs(X) + abs(B). */
+
+ if (notran) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (r__1 = x[k + j * x_dim1], dabs(r__1));
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ work[i__] += (r__1 = a[i__ + k * a_dim1], dabs(r__1)) *
+ xk;
+/* L40: */
+ }
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ s += (r__1 = a[i__ + k * a_dim1], dabs(r__1)) * (r__2 = x[
+ i__ + j * x_dim1], dabs(r__2));
+/* L60: */
+ }
+ work[k] += s;
+/* L70: */
+ }
+ }
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+/* Computing MAX */
+ r__2 = s, r__3 = (r__1 = work[*n + i__], dabs(r__1)) / work[
+ i__];
+ s = dmax(r__2,r__3);
+ } else {
+/* Computing MAX */
+ r__2 = s, r__3 = ((r__1 = work[*n + i__], dabs(r__1)) + safe1)
+ / (work[i__] + safe1);
+ s = dmax(r__2,r__3);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2.f <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ sgetrs_(trans, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &work[*n
+ + 1], n, info);
+ saxpy_(n, &c_b17, &work[*n + 1], &c__1, &x[j * x_dim1 + 1], &c__1)
+ ;
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(op(A)))* */
+/* ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(op(A)) is the inverse of op(A) */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(op(A))*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use SLACN2 to estimate the infinity-norm of the matrix */
+/* inv(op(A)) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+ work[i__] = (r__1 = work[*n + i__], dabs(r__1)) + nz * eps *
+ work[i__];
+ } else {
+ work[i__] = (r__1 = work[*n + i__], dabs(r__1)) + nz * eps *
+ work[i__] + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ slacn2_(n, &work[(*n << 1) + 1], &work[*n + 1], &iwork[1], &ferr[j], &
+ kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(op(A)**T). */
+
+ sgetrs_(transt, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &
+ work[*n + 1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L110: */
+ }
+ } else {
+
+/* Multiply by inv(op(A))*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L120: */
+ }
+ sgetrs_(trans, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &
+ work[*n + 1], n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = lstres, r__3 = (r__1 = x[i__ + j * x_dim1], dabs(r__1));
+ lstres = dmax(r__2,r__3);
+/* L130: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of SGERFS */
+
+} /* sgerfs_ */
diff --git a/contrib/libs/clapack/sgerq2.c b/contrib/libs/clapack/sgerq2.c
new file mode 100644
index 0000000000..9a902f0ec4
--- /dev/null
+++ b/contrib/libs/clapack/sgerq2.c
@@ -0,0 +1,155 @@
+/* sgerq2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sgerq2_(integer *m, integer *n, real *a, integer *lda,
+ real *tau, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, k;
+ real aii;
+ extern /* Subroutine */ int slarf_(char *, integer *, integer *, real *,
+ integer *, real *, real *, integer *, real *), xerbla_(
+ char *, integer *), slarfp_(integer *, real *, real *,
+ integer *, real *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGERQ2 computes an RQ factorization of a real m by n matrix A: */
+/* A = R * Q. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the m by n matrix A. */
+/* On exit, if m <= n, the upper triangle of the subarray */
+/* A(1:m,n-m+1:n) contains the m by m upper triangular matrix R; */
+/* if m >= n, the elements on and above the (m-n)-th subdiagonal */
+/* contain the m by n upper trapezoidal matrix R; the remaining */
+/* elements, with the array TAU, represent the orthogonal matrix */
+/* Q as a product of elementary reflectors (see Further */
+/* Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) REAL array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace) REAL array, dimension (M) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(n-k+i+1:n) = 0 and v(n-k+i) = 1; v(1:n-k+i-1) is stored on exit in */
+/* A(m-k+i,1:n-k+i-1), and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGERQ2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ for (i__ = k; i__ >= 1; --i__) {
+
+/* Generate elementary reflector H(i) to annihilate */
+/* A(m-k+i,1:n-k+i-1) */
+
+ i__1 = *n - k + i__;
+ slarfp_(&i__1, &a[*m - k + i__ + (*n - k + i__) * a_dim1], &a[*m - k
+ + i__ + a_dim1], lda, &tau[i__]);
+
+/* Apply H(i) to A(1:m-k+i-1,1:n-k+i) from the right */
+
+ aii = a[*m - k + i__ + (*n - k + i__) * a_dim1];
+ a[*m - k + i__ + (*n - k + i__) * a_dim1] = 1.f;
+ i__1 = *m - k + i__ - 1;
+ i__2 = *n - k + i__;
+ slarf_("Right", &i__1, &i__2, &a[*m - k + i__ + a_dim1], lda, &tau[
+ i__], &a[a_offset], lda, &work[1]);
+ a[*m - k + i__ + (*n - k + i__) * a_dim1] = aii;
+/* L10: */
+ }
+ return 0;
+
+/* End of SGERQ2 */
+
+} /* sgerq2_ */
diff --git a/contrib/libs/clapack/sgerqf.c b/contrib/libs/clapack/sgerqf.c
new file mode 100644
index 0000000000..749609e187
--- /dev/null
+++ b/contrib/libs/clapack/sgerqf.c
@@ -0,0 +1,272 @@
+/* sgerqf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int sgerqf_(integer *m, integer *n, real *a, integer *lda,
+ real *tau, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, k, ib, nb, ki, kk, mu, nu, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int sgerq2_(integer *, integer *, real *, integer
+ *, real *, real *, integer *), slarfb_(char *, char *, char *,
+ char *, integer *, integer *, integer *, real *, integer *, real *
+, integer *, real *, integer *, real *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int slarft_(char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGERQF computes an RQ factorization of a real M-by-N matrix A: */
+/* A = R * Q. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, */
+/* if m <= n, the upper triangle of the subarray */
+/* A(1:m,n-m+1:n) contains the M-by-M upper triangular matrix R; */
+/* if m >= n, the elements on and above the (m-n)-th subdiagonal */
+/* contain the M-by-N upper trapezoidal matrix R; */
+/* the remaining elements, with the array TAU, represent the */
+/* orthogonal matrix Q as a product of min(m,n) elementary */
+/* reflectors (see Further Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) REAL array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,M). */
+/* For optimum performance LWORK >= M*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(n-k+i+1:n) = 0 and v(n-k+i) = 1; v(1:n-k+i-1) is stored on exit in */
+/* A(m-k+i,1:n-k+i-1), and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else if (*lwork < max(1,*m) && ! lquery) {
+ *info = -7;
+ }
+
+ if (*info == 0) {
+ k = min(*m,*n);
+ if (k == 0) {
+ lwkopt = 1;
+ } else {
+ nb = ilaenv_(&c__1, "SGERQF", " ", m, n, &c_n1, &c_n1);
+ lwkopt = *m * nb;
+ work[1] = (real) lwkopt;
+ }
+ work[1] = (real) lwkopt;
+
+ if (*lwork < max(1,*m) && ! lquery) {
+ *info = -7;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGERQF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (k == 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 1;
+ iws = *m;
+ if (nb > 1 && nb < k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "SGERQF", " ", m, n, &c_n1, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SGERQF", " ", m, n, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < k && nx < k) {
+
+/* Use blocked code initially. */
+/* The last kk rows are handled by the block method. */
+
+ ki = (k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+ i__1 = k - kk + 1;
+ i__2 = -nb;
+ for (i__ = k - kk + ki + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__
+ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/* Compute the RQ factorization of the current block */
+/* A(m-k+i:m-k+i+ib-1,1:n-k+i+ib-1) */
+
+ i__3 = *n - k + i__ + ib - 1;
+ sgerq2_(&ib, &i__3, &a[*m - k + i__ + a_dim1], lda, &tau[i__], &
+ work[1], &iinfo);
+ if (*m - k + i__ > 1) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__3 = *n - k + i__ + ib - 1;
+ slarft_("Backward", "Rowwise", &i__3, &ib, &a[*m - k + i__ +
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(1:m-k+i-1,1:n-k+i+ib-1) from the right */
+
+ i__3 = *m - k + i__ - 1;
+ i__4 = *n - k + i__ + ib - 1;
+ slarfb_("Right", "No transpose", "Backward", "Rowwise", &i__3,
+ &i__4, &ib, &a[*m - k + i__ + a_dim1], lda, &work[1],
+ &ldwork, &a[a_offset], lda, &work[ib + 1], &ldwork);
+ }
+/* L10: */
+ }
+ mu = *m - k + i__ + nb - 1;
+ nu = *n - k + i__ + nb - 1;
+ } else {
+ mu = *m;
+ nu = *n;
+ }
+
+/* Use unblocked code to factor the last or only block */
+
+ if (mu > 0 && nu > 0) {
+ sgerq2_(&mu, &nu, &a[a_offset], lda, &tau[1], &work[1], &iinfo);
+ }
+
+ work[1] = (real) iws;
+ return 0;
+
+/* End of SGERQF */
+
+} /* sgerqf_ */
diff --git a/contrib/libs/clapack/sgesc2.c b/contrib/libs/clapack/sgesc2.c
new file mode 100644
index 0000000000..4afd793ca8
--- /dev/null
+++ b/contrib/libs/clapack/sgesc2.c
@@ -0,0 +1,176 @@
+/* sgesc2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int sgesc2_(integer *n, real *a, integer *lda, real *rhs,
+ integer *ipiv, integer *jpiv, real *scale)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Local variables */
+ integer i__, j;
+ real eps, temp;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ slabad_(real *, real *);
+ extern doublereal slamch_(char *);
+ real bignum;
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int slaswp_(integer *, real *, integer *, integer
+ *, integer *, integer *, integer *);
+ real smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGESC2 solves a system of linear equations */
+
+/* A * X = scale* RHS */
+
+/* with a general N-by-N matrix A using the LU factorization with */
+/* complete pivoting computed by SGETC2. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* On entry, the LU part of the factorization of the n-by-n */
+/* matrix A computed by SGETC2: A = P * L * U * Q */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1, N). */
+
+/* RHS (input/output) REAL array, dimension (N). */
+/* On entry, the right hand side vector b. */
+/* On exit, the solution vector X. */
+
+/* IPIV (input) INTEGER array, dimension (N). */
+/* The pivot indices; for 1 <= i <= N, row i of the */
+/* matrix has been interchanged with row IPIV(i). */
+
+/* JPIV (input) INTEGER array, dimension (N). */
+/* The pivot indices; for 1 <= j <= N, column j of the */
+/* matrix has been interchanged with column JPIV(j). */
+
+/* SCALE (output) REAL */
+/* On exit, SCALE contains the scale factor. SCALE is chosen */
+/* 0 <= SCALE <= 1 to prevent owerflow in the solution. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Set constant to control owerflow */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --rhs;
+ --ipiv;
+ --jpiv;
+
+ /* Function Body */
+ eps = slamch_("P");
+ smlnum = slamch_("S") / eps;
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+
+/* Apply permutations IPIV to RHS */
+
+ i__1 = *n - 1;
+ slaswp_(&c__1, &rhs[1], lda, &c__1, &i__1, &ipiv[1], &c__1);
+
+/* Solve for L part */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ rhs[j] -= a[j + i__ * a_dim1] * rhs[i__];
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* Solve for U part */
+
+ *scale = 1.f;
+
+/* Check for scaling */
+
+ i__ = isamax_(n, &rhs[1], &c__1);
+ if (smlnum * 2.f * (r__1 = rhs[i__], dabs(r__1)) > (r__2 = a[*n + *n *
+ a_dim1], dabs(r__2))) {
+ temp = .5f / (r__1 = rhs[i__], dabs(r__1));
+ sscal_(n, &temp, &rhs[1], &c__1);
+ *scale *= temp;
+ }
+
+ for (i__ = *n; i__ >= 1; --i__) {
+ temp = 1.f / a[i__ + i__ * a_dim1];
+ rhs[i__] *= temp;
+ i__1 = *n;
+ for (j = i__ + 1; j <= i__1; ++j) {
+ rhs[i__] -= rhs[j] * (a[i__ + j * a_dim1] * temp);
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Apply permutations JPIV to the solution (RHS) */
+
+ i__1 = *n - 1;
+ slaswp_(&c__1, &rhs[1], lda, &c__1, &i__1, &jpiv[1], &c_n1);
+ return 0;
+
+/* End of SGESC2 */
+
+} /* sgesc2_ */
diff --git a/contrib/libs/clapack/sgesdd.c b/contrib/libs/clapack/sgesdd.c
new file mode 100644
index 0000000000..b9680dafde
--- /dev/null
+++ b/contrib/libs/clapack/sgesdd.c
@@ -0,0 +1,1611 @@
+/* sgesdd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__0 = 0;
+static real c_b227 = 0.f;
+static real c_b248 = 1.f;
+
+/* Subroutine */ int sgesdd_(char *jobz, integer *m, integer *n, real *a,
+ integer *lda, real *s, real *u, integer *ldu, real *vt, integer *ldvt,
+ real *work, integer *lwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1,
+ i__2, i__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, ie, il, ir, iu, blk;
+ real dum[1], eps;
+ integer ivt, iscl;
+ real anrm;
+ integer idum[1], ierr, itau;
+ extern logical lsame_(char *, char *);
+ integer chunk;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ integer minmn, wrkbl, itaup, itauq, mnthr;
+ logical wntqa;
+ integer nwork;
+ logical wntqn, wntqo, wntqs;
+ integer bdspac;
+ extern /* Subroutine */ int sbdsdc_(char *, char *, integer *, real *,
+ real *, real *, integer *, real *, integer *, real *, integer *,
+ real *, integer *, integer *), sgebrd_(integer *,
+ integer *, real *, integer *, real *, real *, real *, real *,
+ real *, integer *, integer *);
+ extern doublereal slamch_(char *), slange_(char *, integer *,
+ integer *, real *, integer *, real *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ real bignum;
+ extern /* Subroutine */ int sgelqf_(integer *, integer *, real *, integer
+ *, real *, real *, integer *, integer *), slascl_(char *, integer
+ *, integer *, real *, real *, integer *, integer *, real *,
+ integer *, integer *), sgeqrf_(integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *), slacpy_(char
+ *, integer *, integer *, real *, integer *, real *, integer *), slaset_(char *, integer *, integer *, real *, real *,
+ real *, integer *), sorgbr_(char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, integer *
+);
+ integer ldwrkl;
+ extern /* Subroutine */ int sormbr_(char *, char *, char *, integer *,
+ integer *, integer *, real *, integer *, real *, real *, integer *
+, real *, integer *, integer *);
+ integer ldwrkr, minwrk, ldwrku, maxwrk;
+ extern /* Subroutine */ int sorglq_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *);
+ integer ldwkvt;
+ real smlnum;
+ logical wntqas;
+ extern /* Subroutine */ int sorgqr_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *);
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2.1) -- */
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+/* March 2009 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGESDD computes the singular value decomposition (SVD) of a real */
+/* M-by-N matrix A, optionally computing the left and right singular */
+/* vectors. If singular vectors are desired, it uses a */
+/* divide-and-conquer algorithm. */
+
+/* The SVD is written */
+
+/* A = U * SIGMA * transpose(V) */
+
+/* where SIGMA is an M-by-N matrix which is zero except for its */
+/* min(m,n) diagonal elements, U is an M-by-M orthogonal matrix, and */
+/* V is an N-by-N orthogonal matrix. The diagonal elements of SIGMA */
+/* are the singular values of A; they are real and non-negative, and */
+/* are returned in descending order. The first min(m,n) columns of */
+/* U and V are the left and right singular vectors of A. */
+
+/* Note that the routine returns VT = V**T, not V. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* Specifies options for computing all or part of the matrix U: */
+/* = 'A': all M columns of U and all N rows of V**T are */
+/* returned in the arrays U and VT; */
+/* = 'S': the first min(M,N) columns of U and the first */
+/* min(M,N) rows of V**T are returned in the arrays U */
+/* and VT; */
+/* = 'O': If M >= N, the first N columns of U are overwritten */
+/* on the array A and all rows of V**T are returned in */
+/* the array VT; */
+/* otherwise, all columns of U are returned in the */
+/* array U and the first M rows of V**T are overwritten */
+/* in the array A; */
+/* = 'N': no columns of U or rows of V**T are computed. */
+
+/* M (input) INTEGER */
+/* The number of rows of the input matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the input matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, */
+/* if JOBZ = 'O', A is overwritten with the first N columns */
+/* of U (the left singular vectors, stored */
+/* columnwise) if M >= N; */
+/* A is overwritten with the first M rows */
+/* of V**T (the right singular vectors, stored */
+/* rowwise) otherwise. */
+/* if JOBZ .ne. 'O', the contents of A are destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* S (output) REAL array, dimension (min(M,N)) */
+/* The singular values of A, sorted so that S(i) >= S(i+1). */
+
+/* U (output) REAL array, dimension (LDU,UCOL) */
+/* UCOL = M if JOBZ = 'A' or JOBZ = 'O' and M < N; */
+/* UCOL = min(M,N) if JOBZ = 'S'. */
+/* If JOBZ = 'A' or JOBZ = 'O' and M < N, U contains the M-by-M */
+/* orthogonal matrix U; */
+/* if JOBZ = 'S', U contains the first min(M,N) columns of U */
+/* (the left singular vectors, stored columnwise); */
+/* if JOBZ = 'O' and M >= N, or JOBZ = 'N', U is not referenced. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= 1; if */
+/* JOBZ = 'S' or 'A' or JOBZ = 'O' and M < N, LDU >= M. */
+
+/* VT (output) REAL array, dimension (LDVT,N) */
+/* If JOBZ = 'A' or JOBZ = 'O' and M >= N, VT contains the */
+/* N-by-N orthogonal matrix V**T; */
+/* if JOBZ = 'S', VT contains the first min(M,N) rows of */
+/* V**T (the right singular vectors, stored rowwise); */
+/* if JOBZ = 'O' and M < N, or JOBZ = 'N', VT is not referenced. */
+
+/* LDVT (input) INTEGER */
+/* The leading dimension of the array VT. LDVT >= 1; if */
+/* JOBZ = 'A' or JOBZ = 'O' and M >= N, LDVT >= N; */
+/* if JOBZ = 'S', LDVT >= min(M,N). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK; */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= 1. */
+/* If JOBZ = 'N', */
+/* LWORK >= 3*min(M,N) + max(max(M,N),6*min(M,N)). */
+/* If JOBZ = 'O', */
+/* LWORK >= 3*min(M,N) + */
+/* max(max(M,N),5*min(M,N)*min(M,N)+4*min(M,N)). */
+/* If JOBZ = 'S' or 'A' */
+/* LWORK >= 3*min(M,N) + */
+/* max(max(M,N),4*min(M,N)*min(M,N)+4*min(M,N)). */
+/* For good performance, LWORK should generally be larger. */
+/* If LWORK = -1 but other input arguments are legal, WORK(1) */
+/* returns the optimal LWORK. */
+
+/* IWORK (workspace) INTEGER array, dimension (8*min(M,N)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: SBDSDC did not converge, updating process failed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ wntqa = lsame_(jobz, "A");
+ wntqs = lsame_(jobz, "S");
+ wntqas = wntqa || wntqs;
+ wntqo = lsame_(jobz, "O");
+ wntqn = lsame_(jobz, "N");
+ lquery = *lwork == -1;
+
+ if (! (wntqa || wntqs || wntqo || wntqn)) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldu < 1 || wntqas && *ldu < *m || wntqo && *m < *n && *ldu < *
+ m) {
+ *info = -8;
+ } else if (*ldvt < 1 || wntqa && *ldvt < *n || wntqs && *ldvt < minmn ||
+ wntqo && *m >= *n && *ldvt < *n) {
+ *info = -10;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV.) */
+
+ if (*info == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ if (*m >= *n && minmn > 0) {
+
+/* Compute space needed for SBDSDC */
+
+ mnthr = (integer) (minmn * 11.f / 6.f);
+ if (wntqn) {
+ bdspac = *n * 7;
+ } else {
+ bdspac = *n * 3 * *n + (*n << 2);
+ }
+ if (*m >= mnthr) {
+ if (wntqn) {
+
+/* Path 1 (M much larger than N, JOBZ='N') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "SGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n;
+ maxwrk = max(i__1,i__2);
+ minwrk = bdspac + *n;
+ } else if (wntqo) {
+
+/* Path 2 (M much larger than N, JOBZ='O') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "SGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "SORGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "SORMBR"
+, "QLN", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "SORMBR"
+, "PRT", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + (*n << 1) * *n;
+ minwrk = bdspac + (*n << 1) * *n + *n * 3;
+ } else if (wntqs) {
+
+/* Path 3 (M much larger than N, JOBZ='S') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "SGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "SORGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "SORMBR"
+, "QLN", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "SORMBR"
+, "PRT", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *n * *n;
+ minwrk = bdspac + *n * *n + *n * 3;
+ } else if (wntqa) {
+
+/* Path 4 (M much larger than N, JOBZ='A') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "SGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *m * ilaenv_(&c__1, "SORGQR",
+ " ", m, m, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "SORMBR"
+, "QLN", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "SORMBR"
+, "PRT", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *n * *n;
+ minwrk = bdspac + *n * *n + *n * 3;
+ }
+ } else {
+
+/* Path 5 (M at least N, but not much larger) */
+
+ wrkbl = *n * 3 + (*m + *n) * ilaenv_(&c__1, "SGEBRD", " ", m,
+ n, &c_n1, &c_n1);
+ if (wntqn) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *n * 3 + max(*m,bdspac);
+ } else if (wntqo) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "SORMBR"
+, "QLN", m, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "SORMBR"
+, "PRT", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *m * *n;
+/* Computing MAX */
+ i__1 = *m, i__2 = *n * *n + bdspac;
+ minwrk = *n * 3 + max(i__1,i__2);
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "SORMBR"
+, "QLN", m, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "SORMBR"
+, "PRT", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *n * 3 + max(*m,bdspac);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *m * ilaenv_(&c__1, "SORMBR"
+, "QLN", m, m, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "SORMBR"
+, "PRT", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = bdspac + *n * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *n * 3 + max(*m,bdspac);
+ }
+ }
+ } else if (minmn > 0) {
+
+/* Compute space needed for SBDSDC */
+
+ mnthr = (integer) (minmn * 11.f / 6.f);
+ if (wntqn) {
+ bdspac = *m * 7;
+ } else {
+ bdspac = *m * 3 * *m + (*m << 2);
+ }
+ if (*n >= mnthr) {
+ if (wntqn) {
+
+/* Path 1t (N much larger than M, JOBZ='N') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "SGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m;
+ maxwrk = max(i__1,i__2);
+ minwrk = bdspac + *m;
+ } else if (wntqo) {
+
+/* Path 2t (N much larger than M, JOBZ='O') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "SGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "SORGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORMBR"
+, "QLN", m, m, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORMBR"
+, "PRT", m, m, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + (*m << 1) * *m;
+ minwrk = bdspac + (*m << 1) * *m + *m * 3;
+ } else if (wntqs) {
+
+/* Path 3t (N much larger than M, JOBZ='S') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "SGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "SORGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORMBR"
+, "QLN", m, m, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORMBR"
+, "PRT", m, m, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *m * *m;
+ minwrk = bdspac + *m * *m + *m * 3;
+ } else if (wntqa) {
+
+/* Path 4t (N much larger than M, JOBZ='A') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "SGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *n * ilaenv_(&c__1, "SORGLQ",
+ " ", n, n, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORMBR"
+, "QLN", m, m, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORMBR"
+, "PRT", m, m, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *m * *m;
+ minwrk = bdspac + *m * *m + *m * 3;
+ }
+ } else {
+
+/* Path 5t (N greater than M, but not much larger) */
+
+ wrkbl = *m * 3 + (*m + *n) * ilaenv_(&c__1, "SGEBRD", " ", m,
+ n, &c_n1, &c_n1);
+ if (wntqn) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *m * 3 + max(*n,bdspac);
+ } else if (wntqo) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORMBR"
+, "QLN", m, m, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORMBR"
+, "PRT", m, n, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *m * *n;
+/* Computing MAX */
+ i__1 = *n, i__2 = *m * *m + bdspac;
+ minwrk = *m * 3 + max(i__1,i__2);
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORMBR"
+, "QLN", m, m, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORMBR"
+, "PRT", m, n, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *m * 3 + max(*n,bdspac);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORMBR"
+, "QLN", m, m, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORMBR"
+, "PRT", n, n, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *m * 3 + max(*n,bdspac);
+ }
+ }
+ }
+ maxwrk = max(maxwrk,minwrk);
+ work[1] = (real) maxwrk;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGESDD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = sqrt(slamch_("S")) / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = slange_("M", m, n, &a[a_offset], lda, dum);
+ iscl = 0;
+ if (anrm > 0.f && anrm < smlnum) {
+ iscl = 1;
+ slascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, &
+ ierr);
+ } else if (anrm > bignum) {
+ iscl = 1;
+ slascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+ if (*m >= *n) {
+
+/* A has at least as many rows as columns. If A has sufficiently */
+/* more rows than columns, first reduce using the QR */
+/* decomposition (if sufficient workspace available) */
+
+ if (*m >= mnthr) {
+
+ if (wntqn) {
+
+/* Path 1 (M much larger than N, JOBZ='N') */
+/* No singular vectors to be computed */
+
+ itau = 1;
+ nwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Zero out below R */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ slaset_("L", &i__1, &i__2, &c_b227, &c_b227, &a[a_dim1 + 2],
+ lda);
+ ie = 1;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/* Bidiagonalize R in A */
+/* (Workspace: need 4*N, prefer 3*N+2*N*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+ nwork = ie + *n;
+
+/* Perform bidiagonal SVD, computing singular values only */
+/* (Workspace: need N+BDSPAC) */
+
+ sbdsdc_("U", "N", n, &s[1], &work[ie], dum, &c__1, dum, &c__1,
+ dum, idum, &work[nwork], &iwork[1], info);
+
+ } else if (wntqo) {
+
+/* Path 2 (M much larger than N, JOBZ = 'O') */
+/* N left singular vectors to be overwritten on A and */
+/* N right singular vectors to be computed in VT */
+
+ ir = 1;
+
+/* WORK(IR) is LDWRKR by N */
+
+ if (*lwork >= *lda * *n + *n * *n + *n * 3 + bdspac) {
+ ldwrkr = *lda;
+ } else {
+ ldwrkr = (*lwork - *n * *n - *n * 3 - bdspac) / *n;
+ }
+ itau = ir + ldwrkr * *n;
+ nwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Copy R to WORK(IR), zeroing out below it */
+
+ slacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ slaset_("L", &i__1, &i__2, &c_b227, &c_b227, &work[ir + 1], &
+ ldwrkr);
+
+/* Generate Q in A */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__1, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/* Bidiagonalize R in VT, copying result to WORK(IR) */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+
+/* WORK(IU) is N by N */
+
+ iu = nwork;
+ nwork = iu + *n * *n;
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in WORK(IU) and computing right */
+/* singular vectors of bidiagonal matrix in VT */
+/* (Workspace: need N+N*N+BDSPAC) */
+
+ sbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], n, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/* Overwrite WORK(IU) by left singular vectors of R */
+/* and VT by right singular vectors of R */
+/* (Workspace: need 2*N*N+3*N, prefer 2*N*N+2*N+N*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[
+ itauq], &work[iu], n, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ sormbr_("P", "R", "T", n, n, n, &work[ir], &ldwrkr, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IU), storing result in WORK(IR) and copying to A */
+/* (Workspace: need 2*N*N, prefer N*N+M*N) */
+
+ i__1 = *m;
+ i__2 = ldwrkr;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ chunk = min(i__3,ldwrkr);
+ sgemm_("N", "N", &chunk, n, n, &c_b248, &a[i__ + a_dim1],
+ lda, &work[iu], n, &c_b227, &work[ir], &ldwrkr);
+ slacpy_("F", &chunk, n, &work[ir], &ldwrkr, &a[i__ +
+ a_dim1], lda);
+/* L10: */
+ }
+
+ } else if (wntqs) {
+
+/* Path 3 (M much larger than N, JOBZ='S') */
+/* N left singular vectors to be computed in U and */
+/* N right singular vectors to be computed in VT */
+
+ ir = 1;
+
+/* WORK(IR) is N by N */
+
+ ldwrkr = *n;
+ itau = ir + ldwrkr * *n;
+ nwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+
+/* Copy R to WORK(IR), zeroing out below it */
+
+ slacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__2 = *n - 1;
+ i__1 = *n - 1;
+ slaset_("L", &i__2, &i__1, &c_b227, &c_b227, &work[ir + 1], &
+ ldwrkr);
+
+/* Generate Q in A */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ sorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IR) */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ sgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagoal matrix in U and computing right singular */
+/* vectors of bidiagonal matrix in VT */
+/* (Workspace: need N+BDSPAC) */
+
+ sbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/* Overwrite U by left singular vectors of R and VT */
+/* by right singular vectors of R */
+/* (Workspace: need N*N+3*N, prefer N*N+2*N+N*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+ i__2 = *lwork - nwork + 1;
+ sormbr_("P", "R", "T", n, n, n, &work[ir], &ldwrkr, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IR), storing result in U */
+/* (Workspace: need N*N) */
+
+ slacpy_("F", n, n, &u[u_offset], ldu, &work[ir], &ldwrkr);
+ sgemm_("N", "N", m, n, n, &c_b248, &a[a_offset], lda, &work[
+ ir], &ldwrkr, &c_b227, &u[u_offset], ldu);
+
+ } else if (wntqa) {
+
+/* Path 4 (M much larger than N, JOBZ='A') */
+/* M left singular vectors to be computed in U and */
+/* N right singular vectors to be computed in VT */
+
+ iu = 1;
+
+/* WORK(IU) is N by N */
+
+ ldwrku = *n;
+ itau = iu + ldwrku * *n;
+ nwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+ slacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+
+/* Generate Q in U */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+ i__2 = *lwork - nwork + 1;
+ sorgqr_(m, m, n, &u[u_offset], ldu, &work[itau], &work[nwork],
+ &i__2, &ierr);
+
+/* Produce R in A, zeroing out other entries */
+
+ i__2 = *n - 1;
+ i__1 = *n - 1;
+ slaset_("L", &i__2, &i__1, &c_b227, &c_b227, &a[a_dim1 + 2],
+ lda);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/* Bidiagonalize R in A */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ sgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in WORK(IU) and computing right */
+/* singular vectors of bidiagonal matrix in VT */
+/* (Workspace: need N+N*N+BDSPAC) */
+
+ sbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], n, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/* Overwrite WORK(IU) by left singular vectors of R and VT */
+/* by right singular vectors of R */
+/* (Workspace: need N*N+3*N, prefer N*N+2*N+N*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "N", n, n, n, &a[a_offset], lda, &work[
+ itauq], &work[iu], &ldwrku, &work[nwork], &i__2, &
+ ierr);
+ i__2 = *lwork - nwork + 1;
+ sormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+/* Multiply Q in U by left singular vectors of R in */
+/* WORK(IU), storing result in A */
+/* (Workspace: need N*N) */
+
+ sgemm_("N", "N", m, n, n, &c_b248, &u[u_offset], ldu, &work[
+ iu], &ldwrku, &c_b227, &a[a_offset], lda);
+
+/* Copy left singular vectors of A from A to U */
+
+ slacpy_("F", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+
+ }
+
+ } else {
+
+/* M .LT. MNTHR */
+
+/* Path 5 (M at least N, but not much larger) */
+/* Reduce to bidiagonal form without QR decomposition */
+
+ ie = 1;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/* Bidiagonalize A */
+/* (Workspace: need 3*N+M, prefer 3*N+(M+N)*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ sgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__2, &ierr);
+ if (wntqn) {
+
+/* Perform bidiagonal SVD, only computing singular values */
+/* (Workspace: need N+BDSPAC) */
+
+ sbdsdc_("U", "N", n, &s[1], &work[ie], dum, &c__1, dum, &c__1,
+ dum, idum, &work[nwork], &iwork[1], info);
+ } else if (wntqo) {
+ iu = nwork;
+ if (*lwork >= *m * *n + *n * 3 + bdspac) {
+
+/* WORK( IU ) is M by N */
+
+ ldwrku = *m;
+ nwork = iu + ldwrku * *n;
+ slaset_("F", m, n, &c_b227, &c_b227, &work[iu], &ldwrku);
+ } else {
+
+/* WORK( IU ) is N by N */
+
+ ldwrku = *n;
+ nwork = iu + ldwrku * *n;
+
+/* WORK(IR) is LDWRKR by N */
+
+ ir = nwork;
+ ldwrkr = (*lwork - *n * *n - *n * 3) / *n;
+ }
+ nwork = iu + ldwrku * *n;
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in WORK(IU) and computing right */
+/* singular vectors of bidiagonal matrix in VT */
+/* (Workspace: need N+N*N+BDSPAC) */
+
+ sbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], &ldwrku, &
+ vt[vt_offset], ldvt, dum, idum, &work[nwork], &iwork[
+ 1], info);
+
+/* Overwrite VT by right singular vectors of A */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ sormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+ if (*lwork >= *m * *n + *n * 3 + bdspac) {
+
+/* Overwrite WORK(IU) by left singular vectors of A */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[
+ itauq], &work[iu], &ldwrku, &work[nwork], &i__2, &
+ ierr);
+
+/* Copy left singular vectors of A from WORK(IU) to A */
+
+ slacpy_("F", m, n, &work[iu], &ldwrku, &a[a_offset], lda);
+ } else {
+
+/* Generate Q in A */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ sorgbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], &
+ work[nwork], &i__2, &ierr);
+
+/* Multiply Q in A by left singular vectors of */
+/* bidiagonal matrix in WORK(IU), storing result in */
+/* WORK(IR) and copying to A */
+/* (Workspace: need 2*N*N, prefer N*N+M*N) */
+
+ i__2 = *m;
+ i__1 = ldwrkr;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ chunk = min(i__3,ldwrkr);
+ sgemm_("N", "N", &chunk, n, n, &c_b248, &a[i__ +
+ a_dim1], lda, &work[iu], &ldwrku, &c_b227, &
+ work[ir], &ldwrkr);
+ slacpy_("F", &chunk, n, &work[ir], &ldwrkr, &a[i__ +
+ a_dim1], lda);
+/* L20: */
+ }
+ }
+
+ } else if (wntqs) {
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in U and computing right singular */
+/* vectors of bidiagonal matrix in VT */
+/* (Workspace: need N+BDSPAC) */
+
+ slaset_("F", m, n, &c_b227, &c_b227, &u[u_offset], ldu);
+ sbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/* Overwrite U by left singular vectors of A and VT */
+/* by right singular vectors of A */
+/* (Workspace: need 3*N, prefer 2*N+N*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ sormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ } else if (wntqa) {
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in U and computing right singular */
+/* vectors of bidiagonal matrix in VT */
+/* (Workspace: need N+BDSPAC) */
+
+ slaset_("F", m, m, &c_b227, &c_b227, &u[u_offset], ldu);
+ sbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/* Set the right corner of U to identity matrix */
+
+ if (*m > *n) {
+ i__1 = *m - *n;
+ i__2 = *m - *n;
+ slaset_("F", &i__1, &i__2, &c_b227, &c_b248, &u[*n + 1 + (
+ *n + 1) * u_dim1], ldu);
+ }
+
+/* Overwrite U by left singular vectors of A and VT */
+/* by right singular vectors of A */
+/* (Workspace: need N*N+2*N+M, prefer N*N+2*N+M*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ sormbr_("P", "R", "T", n, n, m, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ }
+
+ }
+
+ } else {
+
+/* A has more columns than rows. If A has sufficiently more */
+/* columns than rows, first reduce using the LQ decomposition (if */
+/* sufficient workspace available) */
+
+ if (*n >= mnthr) {
+
+ if (wntqn) {
+
+/* Path 1t (N much larger than M, JOBZ='N') */
+/* No singular vectors to be computed */
+
+ itau = 1;
+ nwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Zero out above L */
+
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ slaset_("U", &i__1, &i__2, &c_b227, &c_b227, &a[(a_dim1 << 1)
+ + 1], lda);
+ ie = 1;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize L in A */
+/* (Workspace: need 4*M, prefer 3*M+2*M*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+ nwork = ie + *m;
+
+/* Perform bidiagonal SVD, computing singular values only */
+/* (Workspace: need M+BDSPAC) */
+
+ sbdsdc_("U", "N", m, &s[1], &work[ie], dum, &c__1, dum, &c__1,
+ dum, idum, &work[nwork], &iwork[1], info);
+
+ } else if (wntqo) {
+
+/* Path 2t (N much larger than M, JOBZ='O') */
+/* M right singular vectors to be overwritten on A and */
+/* M left singular vectors to be computed in U */
+
+ ivt = 1;
+
+/* IVT is M by M */
+
+ il = ivt + *m * *m;
+ if (*lwork >= *m * *n + *m * *m + *m * 3 + bdspac) {
+
+/* WORK(IL) is M by N */
+
+ ldwrkl = *m;
+ chunk = *n;
+ } else {
+ ldwrkl = *m;
+ chunk = (*lwork - *m * *m) / *m;
+ }
+ itau = il + ldwrkl * *m;
+ nwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Copy L to WORK(IL), zeroing about above it */
+
+ slacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ slaset_("U", &i__1, &i__2, &c_b227, &c_b227, &work[il +
+ ldwrkl], &ldwrkl);
+
+/* Generate Q in A */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__1, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IL) */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sgebrd_(m, m, &work[il], &ldwrkl, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in U, and computing right singular */
+/* vectors of bidiagonal matrix in WORK(IVT) */
+/* (Workspace: need M+M*M+BDSPAC) */
+
+ sbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &
+ work[ivt], m, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/* Overwrite U by left singular vectors of L and WORK(IVT) */
+/* by right singular vectors of L */
+/* (Workspace: need 2*M*M+3*M, prefer 2*M*M+2*M+M*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ sormbr_("P", "R", "T", m, m, m, &work[il], &ldwrkl, &work[
+ itaup], &work[ivt], m, &work[nwork], &i__1, &ierr);
+
+/* Multiply right singular vectors of L in WORK(IVT) by Q */
+/* in A, storing result in WORK(IL) and copying to A */
+/* (Workspace: need 2*M*M, prefer M*M+M*N) */
+
+ i__1 = *n;
+ i__2 = chunk;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *n - i__ + 1;
+ blk = min(i__3,chunk);
+ sgemm_("N", "N", m, &blk, m, &c_b248, &work[ivt], m, &a[
+ i__ * a_dim1 + 1], lda, &c_b227, &work[il], &
+ ldwrkl);
+ slacpy_("F", m, &blk, &work[il], &ldwrkl, &a[i__ * a_dim1
+ + 1], lda);
+/* L30: */
+ }
+
+ } else if (wntqs) {
+
+/* Path 3t (N much larger than M, JOBZ='S') */
+/* M right singular vectors to be computed in VT and */
+/* M left singular vectors to be computed in U */
+
+ il = 1;
+
+/* WORK(IL) is M by M */
+
+ ldwrkl = *m;
+ itau = il + ldwrkl * *m;
+ nwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+
+/* Copy L to WORK(IL), zeroing out above it */
+
+ slacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl);
+ i__2 = *m - 1;
+ i__1 = *m - 1;
+ slaset_("U", &i__2, &i__1, &c_b227, &c_b227, &work[il +
+ ldwrkl], &ldwrkl);
+
+/* Generate Q in A */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ sorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IU), copying result to U */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ sgebrd_(m, m, &work[il], &ldwrkl, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in U and computing right singular */
+/* vectors of bidiagonal matrix in VT */
+/* (Workspace: need M+BDSPAC) */
+
+ sbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/* Overwrite U by left singular vectors of L and VT */
+/* by right singular vectors of L */
+/* (Workspace: need M*M+3*M, prefer M*M+2*M+M*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+ i__2 = *lwork - nwork + 1;
+ sormbr_("P", "R", "T", m, m, m, &work[il], &ldwrkl, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+/* Multiply right singular vectors of L in WORK(IL) by */
+/* Q in A, storing result in VT */
+/* (Workspace: need M*M) */
+
+ slacpy_("F", m, m, &vt[vt_offset], ldvt, &work[il], &ldwrkl);
+ sgemm_("N", "N", m, n, m, &c_b248, &work[il], &ldwrkl, &a[
+ a_offset], lda, &c_b227, &vt[vt_offset], ldvt);
+
+ } else if (wntqa) {
+
+/* Path 4t (N much larger than M, JOBZ='A') */
+/* N right singular vectors to be computed in VT and */
+/* M left singular vectors to be computed in U */
+
+ ivt = 1;
+
+/* WORK(IVT) is M by M */
+
+ ldwkvt = *m;
+ itau = ivt + ldwkvt * *m;
+ nwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+ slacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+/* Generate Q in VT */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ sorglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &work[
+ nwork], &i__2, &ierr);
+
+/* Produce L in A, zeroing out other entries */
+
+ i__2 = *m - 1;
+ i__1 = *m - 1;
+ slaset_("U", &i__2, &i__1, &c_b227, &c_b227, &a[(a_dim1 << 1)
+ + 1], lda);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize L in A */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ sgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in U and computing right singular */
+/* vectors of bidiagonal matrix in WORK(IVT) */
+/* (Workspace: need M+M*M+BDSPAC) */
+
+ sbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &
+ work[ivt], &ldwkvt, dum, idum, &work[nwork], &iwork[1]
+, info);
+
+/* Overwrite U by left singular vectors of L and WORK(IVT) */
+/* by right singular vectors of L */
+/* (Workspace: need M*M+3*M, prefer M*M+2*M+M*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "N", m, m, m, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+ i__2 = *lwork - nwork + 1;
+ sormbr_("P", "R", "T", m, m, m, &a[a_offset], lda, &work[
+ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2, &
+ ierr);
+
+/* Multiply right singular vectors of L in WORK(IVT) by */
+/* Q in VT, storing result in A */
+/* (Workspace: need M*M) */
+
+ sgemm_("N", "N", m, n, m, &c_b248, &work[ivt], &ldwkvt, &vt[
+ vt_offset], ldvt, &c_b227, &a[a_offset], lda);
+
+/* Copy right singular vectors of A from A to VT */
+
+ slacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+ }
+
+ } else {
+
+/* N .LT. MNTHR */
+
+/* Path 5t (N greater than M, but not much larger) */
+/* Reduce to bidiagonal form without LQ decomposition */
+
+ ie = 1;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize A */
+/* (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ sgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__2, &ierr);
+ if (wntqn) {
+
+/* Perform bidiagonal SVD, only computing singular values */
+/* (Workspace: need M+BDSPAC) */
+
+ sbdsdc_("L", "N", m, &s[1], &work[ie], dum, &c__1, dum, &c__1,
+ dum, idum, &work[nwork], &iwork[1], info);
+ } else if (wntqo) {
+ ldwkvt = *m;
+ ivt = nwork;
+ if (*lwork >= *m * *n + *m * 3 + bdspac) {
+
+/* WORK( IVT ) is M by N */
+
+ slaset_("F", m, n, &c_b227, &c_b227, &work[ivt], &ldwkvt);
+ nwork = ivt + ldwkvt * *n;
+ } else {
+
+/* WORK( IVT ) is M by M */
+
+ nwork = ivt + ldwkvt * *m;
+ il = nwork;
+
+/* WORK(IL) is M by CHUNK */
+
+ chunk = (*lwork - *m * *m - *m * 3) / *m;
+ }
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in U and computing right singular */
+/* vectors of bidiagonal matrix in WORK(IVT) */
+/* (Workspace: need M*M+BDSPAC) */
+
+ sbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &
+ work[ivt], &ldwkvt, dum, idum, &work[nwork], &iwork[1]
+, info);
+
+/* Overwrite U by left singular vectors of A */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+ if (*lwork >= *m * *n + *m * 3 + bdspac) {
+
+/* Overwrite WORK(IVT) by left singular vectors of A */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ sormbr_("P", "R", "T", m, n, m, &a[a_offset], lda, &work[
+ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2,
+ &ierr);
+
+/* Copy right singular vectors of A from WORK(IVT) to A */
+
+ slacpy_("F", m, n, &work[ivt], &ldwkvt, &a[a_offset], lda);
+ } else {
+
+/* Generate P**T in A */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - nwork + 1;
+ sorgbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &
+ work[nwork], &i__2, &ierr);
+
+/* Multiply Q in A by right singular vectors of */
+/* bidiagonal matrix in WORK(IVT), storing result in */
+/* WORK(IL) and copying to A */
+/* (Workspace: need 2*M*M, prefer M*M+M*N) */
+
+ i__2 = *n;
+ i__1 = chunk;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *n - i__ + 1;
+ blk = min(i__3,chunk);
+ sgemm_("N", "N", m, &blk, m, &c_b248, &work[ivt], &
+ ldwkvt, &a[i__ * a_dim1 + 1], lda, &c_b227, &
+ work[il], m);
+ slacpy_("F", m, &blk, &work[il], m, &a[i__ * a_dim1 +
+ 1], lda);
+/* L40: */
+ }
+ }
+ } else if (wntqs) {
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in U and computing right singular */
+/* vectors of bidiagonal matrix in VT */
+/* (Workspace: need M+BDSPAC) */
+
+ slaset_("F", m, n, &c_b227, &c_b227, &vt[vt_offset], ldvt);
+ sbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/* Overwrite U by left singular vectors of A and VT */
+/* by right singular vectors of A */
+/* (Workspace: need 3*M, prefer 2*M+M*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ sormbr_("P", "R", "T", m, n, m, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ } else if (wntqa) {
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in U and computing right singular */
+/* vectors of bidiagonal matrix in VT */
+/* (Workspace: need M+BDSPAC) */
+
+ slaset_("F", n, n, &c_b227, &c_b227, &vt[vt_offset], ldvt);
+ sbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/* Set the right corner of VT to identity matrix */
+
+ if (*n > *m) {
+ i__1 = *n - *m;
+ i__2 = *n - *m;
+ slaset_("F", &i__1, &i__2, &c_b227, &c_b248, &vt[*m + 1 +
+ (*m + 1) * vt_dim1], ldvt);
+ }
+
+/* Overwrite U by left singular vectors of A and VT */
+/* by right singular vectors of A */
+/* (Workspace: need 2*M+N, prefer 2*M+N*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ sormbr_("P", "R", "T", n, n, m, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ }
+
+ }
+
+ }
+
+/* Undo scaling if necessary */
+
+ if (iscl == 1) {
+ if (anrm > bignum) {
+ slascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ if (anrm < smlnum) {
+ slascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ }
+
+/* Return optimal workspace in WORK(1) */
+
+ work[1] = (real) maxwrk;
+
+ return 0;
+
+/* End of SGESDD */
+
+} /* sgesdd_ */
diff --git a/contrib/libs/clapack/sgesv.c b/contrib/libs/clapack/sgesv.c
new file mode 100644
index 0000000000..ae114ce670
--- /dev/null
+++ b/contrib/libs/clapack/sgesv.c
@@ -0,0 +1,139 @@
+/* sgesv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sgesv_(integer *n, integer *nrhs, real *a, integer *lda,
+ integer *ipiv, real *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int xerbla_(char *, integer *), sgetrf_(
+ integer *, integer *, real *, integer *, integer *, integer *),
+ sgetrs_(char *, integer *, integer *, real *, integer *, integer *
+, real *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGESV computes the solution to a real system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N matrix and X and B are N-by-NRHS matrices. */
+
+/* The LU decomposition with partial pivoting and row interchanges is */
+/* used to factor A as */
+/* A = P * L * U, */
+/* where P is a permutation matrix, L is unit lower triangular, and U is */
+/* upper triangular. The factored form of A is then used to solve the */
+/* system of equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the N-by-N coefficient matrix A. */
+/* On exit, the factors L and U from the factorization */
+/* A = P*L*U; the unit diagonal elements of L are not stored. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* The pivot indices that define the permutation matrix P; */
+/* row i of the matrix was interchanged with row IPIV(i). */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS matrix of right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, U(i,i) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, so the solution could not be computed. */
+
+/* ===================================================================== */
+
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*nrhs < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGESV ", &i__1);
+ return 0;
+ }
+
+/* Compute the LU factorization of A. */
+
+ sgetrf_(n, n, &a[a_offset], lda, &ipiv[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ sgetrs_("No transpose", n, nrhs, &a[a_offset], lda, &ipiv[1], &b[
+ b_offset], ldb, info);
+ }
+ return 0;
+
+/* End of SGESV */
+
+} /* sgesv_ */
diff --git a/contrib/libs/clapack/sgesvd.c b/contrib/libs/clapack/sgesvd.c
new file mode 100644
index 0000000000..358cbd6f63
--- /dev/null
+++ b/contrib/libs/clapack/sgesvd.c
@@ -0,0 +1,4047 @@
+/* sgesvd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__6 = 6;
+static integer c__0 = 0;
+static integer c__2 = 2;
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static real c_b421 = 0.f;
+static real c_b443 = 1.f;
+
+/* Subroutine */ int sgesvd_(char *jobu, char *jobvt, integer *m, integer *n,
+ real *a, integer *lda, real *s, real *u, integer *ldu, real *vt,
+ integer *ldvt, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1[2],
+ i__2, i__3, i__4;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, ie, ir, iu, blk, ncu;
+ real dum[1], eps;
+ integer nru, iscl;
+ real anrm;
+ integer ierr, itau, ncvt, nrvt;
+ extern logical lsame_(char *, char *);
+ integer chunk;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ integer minmn, wrkbl, itaup, itauq, mnthr, iwork;
+ logical wntua, wntva, wntun, wntuo, wntvn, wntvo, wntus, wntvs;
+ integer bdspac;
+ extern /* Subroutine */ int sgebrd_(integer *, integer *, real *, integer
+ *, real *, real *, real *, real *, real *, integer *, integer *);
+ extern doublereal slamch_(char *), slange_(char *, integer *,
+ integer *, real *, integer *, real *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ real bignum;
+ extern /* Subroutine */ int sgelqf_(integer *, integer *, real *, integer
+ *, real *, real *, integer *, integer *), slascl_(char *, integer
+ *, integer *, real *, real *, integer *, integer *, real *,
+ integer *, integer *), sgeqrf_(integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *), slacpy_(char
+ *, integer *, integer *, real *, integer *, real *, integer *), slaset_(char *, integer *, integer *, real *, real *,
+ real *, integer *), sbdsqr_(char *, integer *, integer *,
+ integer *, integer *, real *, real *, real *, integer *, real *,
+ integer *, real *, integer *, real *, integer *), sorgbr_(
+ char *, integer *, integer *, integer *, real *, integer *, real *
+, real *, integer *, integer *), sormbr_(char *, char *,
+ char *, integer *, integer *, integer *, real *, integer *, real *
+, real *, integer *, real *, integer *, integer *);
+ integer ldwrkr, minwrk, ldwrku, maxwrk;
+ extern /* Subroutine */ int sorglq_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *);
+ real smlnum;
+ extern /* Subroutine */ int sorgqr_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *);
+ logical lquery, wntuas, wntvas;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGESVD computes the singular value decomposition (SVD) of a real */
+/* M-by-N matrix A, optionally computing the left and/or right singular */
+/* vectors. The SVD is written */
+
+/* A = U * SIGMA * transpose(V) */
+
+/* where SIGMA is an M-by-N matrix which is zero except for its */
+/* min(m,n) diagonal elements, U is an M-by-M orthogonal matrix, and */
+/* V is an N-by-N orthogonal matrix. The diagonal elements of SIGMA */
+/* are the singular values of A; they are real and non-negative, and */
+/* are returned in descending order. The first min(m,n) columns of */
+/* U and V are the left and right singular vectors of A. */
+
+/* Note that the routine returns V**T, not V. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBU (input) CHARACTER*1 */
+/* Specifies options for computing all or part of the matrix U: */
+/* = 'A': all M columns of U are returned in array U: */
+/* = 'S': the first min(m,n) columns of U (the left singular */
+/* vectors) are returned in the array U; */
+/* = 'O': the first min(m,n) columns of U (the left singular */
+/* vectors) are overwritten on the array A; */
+/* = 'N': no columns of U (no left singular vectors) are */
+/* computed. */
+
+/* JOBVT (input) CHARACTER*1 */
+/* Specifies options for computing all or part of the matrix */
+/* V**T: */
+/* = 'A': all N rows of V**T are returned in the array VT; */
+/* = 'S': the first min(m,n) rows of V**T (the right singular */
+/* vectors) are returned in the array VT; */
+/* = 'O': the first min(m,n) rows of V**T (the right singular */
+/* vectors) are overwritten on the array A; */
+/* = 'N': no rows of V**T (no right singular vectors) are */
+/* computed. */
+
+/* JOBVT and JOBU cannot both be 'O'. */
+
+/* M (input) INTEGER */
+/* The number of rows of the input matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the input matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, */
+/* if JOBU = 'O', A is overwritten with the first min(m,n) */
+/* columns of U (the left singular vectors, */
+/* stored columnwise); */
+/* if JOBVT = 'O', A is overwritten with the first min(m,n) */
+/* rows of V**T (the right singular vectors, */
+/* stored rowwise); */
+/* if JOBU .ne. 'O' and JOBVT .ne. 'O', the contents of A */
+/* are destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* S (output) REAL array, dimension (min(M,N)) */
+/* The singular values of A, sorted so that S(i) >= S(i+1). */
+
+/* U (output) REAL array, dimension (LDU,UCOL) */
+/* (LDU,M) if JOBU = 'A' or (LDU,min(M,N)) if JOBU = 'S'. */
+/* If JOBU = 'A', U contains the M-by-M orthogonal matrix U; */
+/* if JOBU = 'S', U contains the first min(m,n) columns of U */
+/* (the left singular vectors, stored columnwise); */
+/* if JOBU = 'N' or 'O', U is not referenced. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= 1; if */
+/* JOBU = 'S' or 'A', LDU >= M. */
+
+/* VT (output) REAL array, dimension (LDVT,N) */
+/* If JOBVT = 'A', VT contains the N-by-N orthogonal matrix */
+/* V**T; */
+/* if JOBVT = 'S', VT contains the first min(m,n) rows of */
+/* V**T (the right singular vectors, stored rowwise); */
+/* if JOBVT = 'N' or 'O', VT is not referenced. */
+
+/* LDVT (input) INTEGER */
+/* The leading dimension of the array VT. LDVT >= 1; if */
+/* JOBVT = 'A', LDVT >= N; if JOBVT = 'S', LDVT >= min(M,N). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK; */
+/* if INFO > 0, WORK(2:MIN(M,N)) contains the unconverged */
+/* superdiagonal elements of an upper bidiagonal matrix B */
+/* whose diagonal is in S (not necessarily sorted). B */
+/* satisfies A = U * B * VT, so it has the same singular values */
+/* as A, and singular vectors related by U and VT. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* LWORK >= MAX(1,3*MIN(M,N)+MAX(M,N),5*MIN(M,N)). */
+/* For good performance, LWORK should generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if SBDSQR did not converge, INFO specifies how many */
+/* superdiagonals of an intermediate bidiagonal form B */
+/* did not converge to zero. See the description of WORK */
+/* above for details. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ wntua = lsame_(jobu, "A");
+ wntus = lsame_(jobu, "S");
+ wntuas = wntua || wntus;
+ wntuo = lsame_(jobu, "O");
+ wntun = lsame_(jobu, "N");
+ wntva = lsame_(jobvt, "A");
+ wntvs = lsame_(jobvt, "S");
+ wntvas = wntva || wntvs;
+ wntvo = lsame_(jobvt, "O");
+ wntvn = lsame_(jobvt, "N");
+ lquery = *lwork == -1;
+
+ if (! (wntua || wntus || wntuo || wntun)) {
+ *info = -1;
+ } else if (! (wntva || wntvs || wntvo || wntvn) || wntvo && wntuo) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*m)) {
+ *info = -6;
+ } else if (*ldu < 1 || wntuas && *ldu < *m) {
+ *info = -9;
+ } else if (*ldvt < 1 || wntva && *ldvt < *n || wntvs && *ldvt < minmn) {
+ *info = -11;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV.) */
+
+ if (*info == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ if (*m >= *n && minmn > 0) {
+
+/* Compute space needed for SBDSQR */
+
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = jobu;
+ i__1[1] = 1, a__1[1] = jobvt;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ mnthr = ilaenv_(&c__6, "SGESVD", ch__1, m, n, &c__0, &c__0);
+ bdspac = *n * 5;
+ if (*m >= mnthr) {
+ if (wntun) {
+
+/* Path 1 (M much larger than N, JOBU='N') */
+
+ maxwrk = *n + *n * ilaenv_(&c__1, "SGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", n, n, &c_n1, &c_n1);
+ maxwrk = max(i__2,i__3);
+ if (wntvo || wntvas) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = *n * 3 + (*n - 1) * ilaenv_(&
+ c__1, "SORGBR", "P", n, n, n, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ maxwrk = max(maxwrk,bdspac);
+/* Computing MAX */
+ i__2 = *n << 2;
+ minwrk = max(i__2,bdspac);
+ } else if (wntuo && wntvn) {
+
+/* Path 2 (M much larger than N, JOBU='O', JOBVT='N') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "SGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *n * ilaenv_(&c__1, "SORGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + *n * ilaenv_(&c__1, "SORGBR"
+, "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+/* Computing MAX */
+ i__2 = *n * *n + wrkbl, i__3 = *n * *n + *m * *n + *n;
+ maxwrk = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = *n * 3 + *m;
+ minwrk = max(i__2,bdspac);
+ } else if (wntuo && wntvas) {
+
+/* Path 3 (M much larger than N, JOBU='O', JOBVT='S' or */
+/* 'A') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "SGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *n * ilaenv_(&c__1, "SORGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + *n * ilaenv_(&c__1, "SORGBR"
+, "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n - 1) * ilaenv_(&c__1,
+ "SORGBR", "P", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+/* Computing MAX */
+ i__2 = *n * *n + wrkbl, i__3 = *n * *n + *m * *n + *n;
+ maxwrk = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = *n * 3 + *m;
+ minwrk = max(i__2,bdspac);
+ } else if (wntus && wntvn) {
+
+/* Path 4 (M much larger than N, JOBU='S', JOBVT='N') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "SGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *n * ilaenv_(&c__1, "SORGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + *n * ilaenv_(&c__1, "SORGBR"
+, "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+ maxwrk = *n * *n + wrkbl;
+/* Computing MAX */
+ i__2 = *n * 3 + *m;
+ minwrk = max(i__2,bdspac);
+ } else if (wntus && wntvo) {
+
+/* Path 5 (M much larger than N, JOBU='S', JOBVT='O') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "SGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *n * ilaenv_(&c__1, "SORGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + *n * ilaenv_(&c__1, "SORGBR"
+, "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n - 1) * ilaenv_(&c__1,
+ "SORGBR", "P", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+ maxwrk = (*n << 1) * *n + wrkbl;
+/* Computing MAX */
+ i__2 = *n * 3 + *m;
+ minwrk = max(i__2,bdspac);
+ } else if (wntus && wntvas) {
+
+/* Path 6 (M much larger than N, JOBU='S', JOBVT='S' or */
+/* 'A') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "SGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *n * ilaenv_(&c__1, "SORGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + *n * ilaenv_(&c__1, "SORGBR"
+, "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n - 1) * ilaenv_(&c__1,
+ "SORGBR", "P", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+ maxwrk = *n * *n + wrkbl;
+/* Computing MAX */
+ i__2 = *n * 3 + *m;
+ minwrk = max(i__2,bdspac);
+ } else if (wntua && wntvn) {
+
+/* Path 7 (M much larger than N, JOBU='A', JOBVT='N') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "SGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *m * ilaenv_(&c__1, "SORGQR",
+ " ", m, m, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + *n * ilaenv_(&c__1, "SORGBR"
+, "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+ maxwrk = *n * *n + wrkbl;
+/* Computing MAX */
+ i__2 = *n * 3 + *m;
+ minwrk = max(i__2,bdspac);
+ } else if (wntua && wntvo) {
+
+/* Path 8 (M much larger than N, JOBU='A', JOBVT='O') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "SGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *m * ilaenv_(&c__1, "SORGQR",
+ " ", m, m, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + *n * ilaenv_(&c__1, "SORGBR"
+, "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n - 1) * ilaenv_(&c__1,
+ "SORGBR", "P", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+ maxwrk = (*n << 1) * *n + wrkbl;
+/* Computing MAX */
+ i__2 = *n * 3 + *m;
+ minwrk = max(i__2,bdspac);
+ } else if (wntua && wntvas) {
+
+/* Path 9 (M much larger than N, JOBU='A', JOBVT='S' or */
+/* 'A') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "SGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *m * ilaenv_(&c__1, "SORGQR",
+ " ", m, m, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + *n * ilaenv_(&c__1, "SORGBR"
+, "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n * 3 + (*n - 1) * ilaenv_(&c__1,
+ "SORGBR", "P", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+ maxwrk = *n * *n + wrkbl;
+/* Computing MAX */
+ i__2 = *n * 3 + *m;
+ minwrk = max(i__2,bdspac);
+ }
+ } else {
+
+/* Path 10 (M at least N, but not much larger) */
+
+ maxwrk = *n * 3 + (*m + *n) * ilaenv_(&c__1, "SGEBRD", " ", m,
+ n, &c_n1, &c_n1);
+ if (wntus || wntuo) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = *n * 3 + *n * ilaenv_(&c__1, "SORG"
+ "BR", "Q", m, n, n, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ if (wntua) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = *n * 3 + *m * ilaenv_(&c__1, "SORG"
+ "BR", "Q", m, m, n, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ if (! wntvn) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = *n * 3 + (*n - 1) * ilaenv_(&c__1,
+ "SORGBR", "P", n, n, n, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ maxwrk = max(maxwrk,bdspac);
+/* Computing MAX */
+ i__2 = *n * 3 + *m;
+ minwrk = max(i__2,bdspac);
+ }
+ } else if (minmn > 0) {
+
+/* Compute space needed for SBDSQR */
+
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = jobu;
+ i__1[1] = 1, a__1[1] = jobvt;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ mnthr = ilaenv_(&c__6, "SGESVD", ch__1, m, n, &c__0, &c__0);
+ bdspac = *m * 5;
+ if (*n >= mnthr) {
+ if (wntvn) {
+
+/* Path 1t(N much larger than M, JOBVT='N') */
+
+ maxwrk = *m + *m * ilaenv_(&c__1, "SGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", m, m, &c_n1, &c_n1);
+ maxwrk = max(i__2,i__3);
+ if (wntuo || wntuas) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = *m * 3 + *m * ilaenv_(&c__1,
+ "SORGBR", "Q", m, m, m, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ maxwrk = max(maxwrk,bdspac);
+/* Computing MAX */
+ i__2 = *m << 2;
+ minwrk = max(i__2,bdspac);
+ } else if (wntvo && wntun) {
+
+/* Path 2t(N much larger than M, JOBU='N', JOBVT='O') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "SGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *m * ilaenv_(&c__1, "SORGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m - 1) * ilaenv_(&c__1,
+ "SORGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+/* Computing MAX */
+ i__2 = *m * *m + wrkbl, i__3 = *m * *m + *m * *n + *m;
+ maxwrk = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = *m * 3 + *n;
+ minwrk = max(i__2,bdspac);
+ } else if (wntvo && wntuas) {
+
+/* Path 3t(N much larger than M, JOBU='S' or 'A', */
+/* JOBVT='O') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "SGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *m * ilaenv_(&c__1, "SORGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m - 1) * ilaenv_(&c__1,
+ "SORGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + *m * ilaenv_(&c__1, "SORGBR"
+, "Q", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+/* Computing MAX */
+ i__2 = *m * *m + wrkbl, i__3 = *m * *m + *m * *n + *m;
+ maxwrk = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = *m * 3 + *n;
+ minwrk = max(i__2,bdspac);
+ } else if (wntvs && wntun) {
+
+/* Path 4t(N much larger than M, JOBU='N', JOBVT='S') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "SGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *m * ilaenv_(&c__1, "SORGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m - 1) * ilaenv_(&c__1,
+ "SORGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+ maxwrk = *m * *m + wrkbl;
+/* Computing MAX */
+ i__2 = *m * 3 + *n;
+ minwrk = max(i__2,bdspac);
+ } else if (wntvs && wntuo) {
+
+/* Path 5t(N much larger than M, JOBU='O', JOBVT='S') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "SGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *m * ilaenv_(&c__1, "SORGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m - 1) * ilaenv_(&c__1,
+ "SORGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + *m * ilaenv_(&c__1, "SORGBR"
+, "Q", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+ maxwrk = (*m << 1) * *m + wrkbl;
+/* Computing MAX */
+ i__2 = *m * 3 + *n;
+ minwrk = max(i__2,bdspac);
+ maxwrk = max(maxwrk,minwrk);
+ } else if (wntvs && wntuas) {
+
+/* Path 6t(N much larger than M, JOBU='S' or 'A', */
+/* JOBVT='S') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "SGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *m * ilaenv_(&c__1, "SORGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m - 1) * ilaenv_(&c__1,
+ "SORGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + *m * ilaenv_(&c__1, "SORGBR"
+, "Q", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+ maxwrk = *m * *m + wrkbl;
+/* Computing MAX */
+ i__2 = *m * 3 + *n;
+ minwrk = max(i__2,bdspac);
+ } else if (wntva && wntun) {
+
+/* Path 7t(N much larger than M, JOBU='N', JOBVT='A') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "SGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *n * ilaenv_(&c__1, "SORGLQ",
+ " ", n, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m - 1) * ilaenv_(&c__1,
+ "SORGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+ maxwrk = *m * *m + wrkbl;
+/* Computing MAX */
+ i__2 = *m * 3 + *n;
+ minwrk = max(i__2,bdspac);
+ } else if (wntva && wntuo) {
+
+/* Path 8t(N much larger than M, JOBU='O', JOBVT='A') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "SGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *n * ilaenv_(&c__1, "SORGLQ",
+ " ", n, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m - 1) * ilaenv_(&c__1,
+ "SORGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + *m * ilaenv_(&c__1, "SORGBR"
+, "Q", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+ maxwrk = (*m << 1) * *m + wrkbl;
+/* Computing MAX */
+ i__2 = *m * 3 + *n;
+ minwrk = max(i__2,bdspac);
+ } else if (wntva && wntuas) {
+
+/* Path 9t(N much larger than M, JOBU='S' or 'A', */
+/* JOBVT='A') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "SGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *n * ilaenv_(&c__1, "SORGLQ",
+ " ", n, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m << 1) * ilaenv_(&c__1,
+ "SGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + (*m - 1) * ilaenv_(&c__1,
+ "SORGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m * 3 + *m * ilaenv_(&c__1, "SORGBR"
+, "Q", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ wrkbl = max(wrkbl,bdspac);
+ maxwrk = *m * *m + wrkbl;
+/* Computing MAX */
+ i__2 = *m * 3 + *n;
+ minwrk = max(i__2,bdspac);
+ }
+ } else {
+
+/* Path 10t(N greater than M, but not much larger) */
+
+ maxwrk = *m * 3 + (*m + *n) * ilaenv_(&c__1, "SGEBRD", " ", m,
+ n, &c_n1, &c_n1);
+ if (wntvs || wntvo) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = *m * 3 + *m * ilaenv_(&c__1, "SORG"
+ "BR", "P", m, n, m, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ if (wntva) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = *m * 3 + *n * ilaenv_(&c__1, "SORG"
+ "BR", "P", n, n, m, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ if (! wntun) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = *m * 3 + (*m - 1) * ilaenv_(&c__1,
+ "SORGBR", "Q", m, m, m, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ maxwrk = max(maxwrk,bdspac);
+/* Computing MAX */
+ i__2 = *m * 3 + *n;
+ minwrk = max(i__2,bdspac);
+ }
+ }
+ maxwrk = max(maxwrk,minwrk);
+ work[1] = (real) maxwrk;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info != 0) {
+ i__2 = -(*info);
+ xerbla_("SGESVD", &i__2);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = sqrt(slamch_("S")) / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = slange_("M", m, n, &a[a_offset], lda, dum);
+ iscl = 0;
+ if (anrm > 0.f && anrm < smlnum) {
+ iscl = 1;
+ slascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, &
+ ierr);
+ } else if (anrm > bignum) {
+ iscl = 1;
+ slascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+ if (*m >= *n) {
+
+/* A has at least as many rows as columns. If A has sufficiently */
+/* more rows than columns, first reduce using the QR */
+/* decomposition (if sufficient workspace available) */
+
+ if (*m >= mnthr) {
+
+ if (wntun) {
+
+/* Path 1 (M much larger than N, JOBU='N') */
+/* No left singular vectors to be computed */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork], &
+ i__2, &ierr);
+
+/* Zero out below R */
+
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ slaset_("L", &i__2, &i__3, &c_b421, &c_b421, &a[a_dim1 + 2],
+ lda);
+ ie = 1;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in A */
+/* (Workspace: need 4*N, prefer 3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__2, &ierr);
+ ncvt = 0;
+ if (wntvo || wntvas) {
+
+/* If right singular vectors desired, generate P'. */
+/* (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("P", n, n, n, &a[a_offset], lda, &work[itaup], &
+ work[iwork], &i__2, &ierr);
+ ncvt = *n;
+ }
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of A in A if desired */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("U", n, &ncvt, &c__0, &c__0, &s[1], &work[ie], &a[
+ a_offset], lda, dum, &c__1, dum, &c__1, &work[iwork],
+ info);
+
+/* If right singular vectors desired in VT, copy them there */
+
+ if (wntvas) {
+ slacpy_("F", n, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+ }
+
+ } else if (wntuo && wntvn) {
+
+/* Path 2 (M much larger than N, JOBU='O', JOBVT='N') */
+/* N left singular vectors to be overwritten on A and */
+/* no right singular vectors to be computed */
+
+/* Computing MAX */
+ i__2 = *n << 2;
+ if (*lwork >= *n * *n + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *lda * *n + *n;
+ if (*lwork >= max(i__2,i__3) + *lda * *n) {
+
+/* WORK(IU) is LDA by N, WORK(IR) is LDA by N */
+
+ ldwrku = *lda;
+ ldwrkr = *lda;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *lda * *n + *n;
+ if (*lwork >= max(i__2,i__3) + *n * *n) {
+
+/* WORK(IU) is LDA by N, WORK(IR) is N by N */
+
+ ldwrku = *lda;
+ ldwrkr = *n;
+ } else {
+
+/* WORK(IU) is LDWRKU by N, WORK(IR) is N by N */
+
+ ldwrku = (*lwork - *n * *n - *n) / *n;
+ ldwrkr = *n;
+ }
+ }
+ itau = ir + ldwrkr * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork]
+, &i__2, &ierr);
+
+/* Copy R to WORK(IR) and zero out below it */
+
+ slacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ slaset_("L", &i__2, &i__3, &c_b421, &c_b421, &work[ir + 1]
+, &ldwrkr);
+
+/* Generate Q in A */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IR) */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__2, &ierr);
+
+/* Generate left vectors bidiagonalizing R */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("Q", n, n, n, &work[ir], &ldwrkr, &work[itauq], &
+ work[iwork], &i__2, &ierr);
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IR) */
+/* (Workspace: need N*N+BDSPAC) */
+
+ sbdsqr_("U", n, &c__0, n, &c__0, &s[1], &work[ie], dum, &
+ c__1, &work[ir], &ldwrkr, dum, &c__1, &work[iwork]
+, info);
+ iu = ie + *n;
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IR), storing result in WORK(IU) and copying to A */
+/* (Workspace: need N*N+2*N, prefer N*N+M*N+N) */
+
+ i__2 = *m;
+ i__3 = ldwrku;
+ for (i__ = 1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__3) {
+/* Computing MIN */
+ i__4 = *m - i__ + 1;
+ chunk = min(i__4,ldwrku);
+ sgemm_("N", "N", &chunk, n, n, &c_b443, &a[i__ +
+ a_dim1], lda, &work[ir], &ldwrkr, &c_b421, &
+ work[iu], &ldwrku);
+ slacpy_("F", &chunk, n, &work[iu], &ldwrku, &a[i__ +
+ a_dim1], lda);
+/* L10: */
+ }
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ ie = 1;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize A */
+/* (Workspace: need 3*N+M, prefer 3*N+(M+N)*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ sgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__3, &ierr);
+
+/* Generate left vectors bidiagonalizing A */
+/* (Workspace: need 4*N, prefer 3*N+N*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ sorgbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], &
+ work[iwork], &i__3, &ierr);
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in A */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("U", n, &c__0, m, &c__0, &s[1], &work[ie], dum, &
+ c__1, &a[a_offset], lda, dum, &c__1, &work[iwork],
+ info);
+
+ }
+
+ } else if (wntuo && wntvas) {
+
+/* Path 3 (M much larger than N, JOBU='O', JOBVT='S' or 'A') */
+/* N left singular vectors to be overwritten on A and */
+/* N right singular vectors to be computed in VT */
+
+/* Computing MAX */
+ i__3 = *n << 2;
+ if (*lwork >= *n * *n + max(i__3,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+/* Computing MAX */
+ i__3 = wrkbl, i__2 = *lda * *n + *n;
+ if (*lwork >= max(i__3,i__2) + *lda * *n) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is LDA by N */
+
+ ldwrku = *lda;
+ ldwrkr = *lda;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__3 = wrkbl, i__2 = *lda * *n + *n;
+ if (*lwork >= max(i__3,i__2) + *n * *n) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is N by N */
+
+ ldwrku = *lda;
+ ldwrkr = *n;
+ } else {
+
+/* WORK(IU) is LDWRKU by N and WORK(IR) is N by N */
+
+ ldwrku = (*lwork - *n * *n - *n) / *n;
+ ldwrkr = *n;
+ }
+ }
+ itau = ir + ldwrkr * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork]
+, &i__3, &ierr);
+
+/* Copy R to VT, zeroing out below it */
+
+ slacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+ if (*n > 1) {
+ i__3 = *n - 1;
+ i__2 = *n - 1;
+ slaset_("L", &i__3, &i__2, &c_b421, &c_b421, &vt[
+ vt_dim1 + 2], ldvt);
+ }
+
+/* Generate Q in A */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ sorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__3, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in VT, copying result to WORK(IR) */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ sgebrd_(n, n, &vt[vt_offset], ldvt, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &i__3, &
+ ierr);
+ slacpy_("L", n, n, &vt[vt_offset], ldvt, &work[ir], &
+ ldwrkr);
+
+/* Generate left vectors bidiagonalizing R in WORK(IR) */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+N*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ sorgbr_("Q", n, n, n, &work[ir], &ldwrkr, &work[itauq], &
+ work[iwork], &i__3, &ierr);
+
+/* Generate right vectors bidiagonalizing R in VT */
+/* (Workspace: need N*N+4*N-1, prefer N*N+3*N+(N-1)*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ sorgbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup],
+ &work[iwork], &i__3, &ierr);
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IR) and computing right */
+/* singular vectors of R in VT */
+/* (Workspace: need N*N+BDSPAC) */
+
+ sbdsqr_("U", n, n, n, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &work[ir], &ldwrkr, dum, &c__1,
+ &work[iwork], info);
+ iu = ie + *n;
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IR), storing result in WORK(IU) and copying to A */
+/* (Workspace: need N*N+2*N, prefer N*N+M*N+N) */
+
+ i__3 = *m;
+ i__2 = ldwrku;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__3 : i__ <= i__3; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__4 = *m - i__ + 1;
+ chunk = min(i__4,ldwrku);
+ sgemm_("N", "N", &chunk, n, n, &c_b443, &a[i__ +
+ a_dim1], lda, &work[ir], &ldwrkr, &c_b421, &
+ work[iu], &ldwrku);
+ slacpy_("F", &chunk, n, &work[iu], &ldwrku, &a[i__ +
+ a_dim1], lda);
+/* L20: */
+ }
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork]
+, &i__2, &ierr);
+
+/* Copy R to VT, zeroing out below it */
+
+ slacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+ if (*n > 1) {
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ slaset_("L", &i__2, &i__3, &c_b421, &c_b421, &vt[
+ vt_dim1 + 2], ldvt);
+ }
+
+/* Generate Q in A */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in VT */
+/* (Workspace: need 4*N, prefer 3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(n, n, &vt[vt_offset], ldvt, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &i__2, &
+ ierr);
+
+/* Multiply Q in A by left vectors bidiagonalizing R */
+/* (Workspace: need 3*N+M, prefer 3*N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sormbr_("Q", "R", "N", m, n, n, &vt[vt_offset], ldvt, &
+ work[itauq], &a[a_offset], lda, &work[iwork], &
+ i__2, &ierr);
+
+/* Generate right vectors bidiagonalizing R in VT */
+/* (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup],
+ &work[iwork], &i__2, &ierr);
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in A and computing right */
+/* singular vectors of A in VT */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("U", n, n, m, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &a[a_offset], lda, dum, &c__1, &
+ work[iwork], info);
+
+ }
+
+ } else if (wntus) {
+
+ if (wntvn) {
+
+/* Path 4 (M much larger than N, JOBU='S', JOBVT='N') */
+/* N left singular vectors to be computed in U and */
+/* no right singular vectors to be computed */
+
+/* Computing MAX */
+ i__2 = *n << 2;
+ if (*lwork >= *n * *n + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+ if (*lwork >= wrkbl + *lda * *n) {
+
+/* WORK(IR) is LDA by N */
+
+ ldwrkr = *lda;
+ } else {
+
+/* WORK(IR) is N by N */
+
+ ldwrkr = *n;
+ }
+ itau = ir + ldwrkr * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy R to WORK(IR), zeroing out below it */
+
+ slacpy_("U", n, n, &a[a_offset], lda, &work[ir], &
+ ldwrkr);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ slaset_("L", &i__2, &i__3, &c_b421, &c_b421, &work[ir
+ + 1], &ldwrkr);
+
+/* Generate Q in A */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IR) */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Generate left vectors bidiagonalizing R in WORK(IR) */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("Q", n, n, n, &work[ir], &ldwrkr, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IR) */
+/* (Workspace: need N*N+BDSPAC) */
+
+ sbdsqr_("U", n, &c__0, n, &c__0, &s[1], &work[ie],
+ dum, &c__1, &work[ir], &ldwrkr, dum, &c__1, &
+ work[iwork], info);
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IR), storing result in U */
+/* (Workspace: need N*N) */
+
+ sgemm_("N", "N", m, n, n, &c_b443, &a[a_offset], lda,
+ &work[ir], &ldwrkr, &c_b421, &u[u_offset],
+ ldu);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ slacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgqr_(m, n, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Zero out below R in A */
+
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ slaset_("L", &i__2, &i__3, &c_b421, &c_b421, &a[
+ a_dim1 + 2], lda);
+
+/* Bidiagonalize R in A */
+/* (Workspace: need 4*N, prefer 3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply Q in U by left vectors bidiagonalizing R */
+/* (Workspace: need 3*N+M, prefer 3*N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sormbr_("Q", "R", "N", m, n, n, &a[a_offset], lda, &
+ work[itauq], &u[u_offset], ldu, &work[iwork],
+ &i__2, &ierr)
+ ;
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("U", n, &c__0, m, &c__0, &s[1], &work[ie],
+ dum, &c__1, &u[u_offset], ldu, dum, &c__1, &
+ work[iwork], info);
+
+ }
+
+ } else if (wntvo) {
+
+/* Path 5 (M much larger than N, JOBU='S', JOBVT='O') */
+/* N left singular vectors to be computed in U and */
+/* N right singular vectors to be overwritten on A */
+
+/* Computing MAX */
+ i__2 = *n << 2;
+ if (*lwork >= (*n << 1) * *n + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + (*lda << 1) * *n) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is LDA by N */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *n;
+ ldwrkr = *lda;
+ } else if (*lwork >= wrkbl + (*lda + *n) * *n) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is N by N */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *n;
+ ldwrkr = *n;
+ } else {
+
+/* WORK(IU) is N by N and WORK(IR) is N by N */
+
+ ldwrku = *n;
+ ir = iu + ldwrku * *n;
+ ldwrkr = *n;
+ }
+ itau = ir + ldwrkr * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (Workspace: need 2*N*N+2*N, prefer 2*N*N+N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy R to WORK(IU), zeroing out below it */
+
+ slacpy_("U", n, n, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ slaset_("L", &i__2, &i__3, &c_b421, &c_b421, &work[iu
+ + 1], &ldwrku);
+
+/* Generate Q in A */
+/* (Workspace: need 2*N*N+2*N, prefer 2*N*N+N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IU), copying result to */
+/* WORK(IR) */
+/* (Workspace: need 2*N*N+4*N, */
+/* prefer 2*N*N+3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(n, n, &work[iu], &ldwrku, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ slacpy_("U", n, n, &work[iu], &ldwrku, &work[ir], &
+ ldwrkr);
+
+/* Generate left bidiagonalizing vectors in WORK(IU) */
+/* (Workspace: need 2*N*N+4*N, prefer 2*N*N+3*N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("Q", n, n, n, &work[iu], &ldwrku, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in WORK(IR) */
+/* (Workspace: need 2*N*N+4*N-1, */
+/* prefer 2*N*N+3*N+(N-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("P", n, n, n, &work[ir], &ldwrkr, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IU) and computing */
+/* right singular vectors of R in WORK(IR) */
+/* (Workspace: need 2*N*N+BDSPAC) */
+
+ sbdsqr_("U", n, n, n, &c__0, &s[1], &work[ie], &work[
+ ir], &ldwrkr, &work[iu], &ldwrku, dum, &c__1,
+ &work[iwork], info);
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IU), storing result in U */
+/* (Workspace: need N*N) */
+
+ sgemm_("N", "N", m, n, n, &c_b443, &a[a_offset], lda,
+ &work[iu], &ldwrku, &c_b421, &u[u_offset],
+ ldu);
+
+/* Copy right singular vectors of R to A */
+/* (Workspace: need N*N) */
+
+ slacpy_("F", n, n, &work[ir], &ldwrkr, &a[a_offset],
+ lda);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ slacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgqr_(m, n, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Zero out below R in A */
+
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ slaset_("L", &i__2, &i__3, &c_b421, &c_b421, &a[
+ a_dim1 + 2], lda);
+
+/* Bidiagonalize R in A */
+/* (Workspace: need 4*N, prefer 3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply Q in U by left vectors bidiagonalizing R */
+/* (Workspace: need 3*N+M, prefer 3*N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sormbr_("Q", "R", "N", m, n, n, &a[a_offset], lda, &
+ work[itauq], &u[u_offset], ldu, &work[iwork],
+ &i__2, &ierr)
+ ;
+
+/* Generate right vectors bidiagonalizing R in A */
+/* (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("P", n, n, n, &a[a_offset], lda, &work[itaup],
+ &work[iwork], &i__2, &ierr);
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in A */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("U", n, n, m, &c__0, &s[1], &work[ie], &a[
+ a_offset], lda, &u[u_offset], ldu, dum, &c__1,
+ &work[iwork], info);
+
+ }
+
+ } else if (wntvas) {
+
+/* Path 6 (M much larger than N, JOBU='S', JOBVT='S' */
+/* or 'A') */
+/* N left singular vectors to be computed in U and */
+/* N right singular vectors to be computed in VT */
+
+/* Computing MAX */
+ i__2 = *n << 2;
+ if (*lwork >= *n * *n + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + *lda * *n) {
+
+/* WORK(IU) is LDA by N */
+
+ ldwrku = *lda;
+ } else {
+
+/* WORK(IU) is N by N */
+
+ ldwrku = *n;
+ }
+ itau = iu + ldwrku * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy R to WORK(IU), zeroing out below it */
+
+ slacpy_("U", n, n, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ slaset_("L", &i__2, &i__3, &c_b421, &c_b421, &work[iu
+ + 1], &ldwrku);
+
+/* Generate Q in A */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IU), copying result to VT */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(n, n, &work[iu], &ldwrku, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ slacpy_("U", n, n, &work[iu], &ldwrku, &vt[vt_offset],
+ ldvt);
+
+/* Generate left bidiagonalizing vectors in WORK(IU) */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("Q", n, n, n, &work[iu], &ldwrku, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in VT */
+/* (Workspace: need N*N+4*N-1, */
+/* prefer N*N+3*N+(N-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[
+ itaup], &work[iwork], &i__2, &ierr)
+ ;
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IU) and computing */
+/* right singular vectors of R in VT */
+/* (Workspace: need N*N+BDSPAC) */
+
+ sbdsqr_("U", n, n, n, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &work[iu], &ldwrku, dum, &
+ c__1, &work[iwork], info);
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IU), storing result in U */
+/* (Workspace: need N*N) */
+
+ sgemm_("N", "N", m, n, n, &c_b443, &a[a_offset], lda,
+ &work[iu], &ldwrku, &c_b421, &u[u_offset],
+ ldu);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ slacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgqr_(m, n, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy R to VT, zeroing out below it */
+
+ slacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+ if (*n > 1) {
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ slaset_("L", &i__2, &i__3, &c_b421, &c_b421, &vt[
+ vt_dim1 + 2], ldvt);
+ }
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in VT */
+/* (Workspace: need 4*N, prefer 3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(n, n, &vt[vt_offset], ldvt, &s[1], &work[ie],
+ &work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply Q in U by left bidiagonalizing vectors */
+/* in VT */
+/* (Workspace: need 3*N+M, prefer 3*N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sormbr_("Q", "R", "N", m, n, n, &vt[vt_offset], ldvt,
+ &work[itauq], &u[u_offset], ldu, &work[iwork],
+ &i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in VT */
+/* (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[
+ itaup], &work[iwork], &i__2, &ierr)
+ ;
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in VT */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("U", n, n, m, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, dum, &
+ c__1, &work[iwork], info);
+
+ }
+
+ }
+
+ } else if (wntua) {
+
+ if (wntvn) {
+
+/* Path 7 (M much larger than N, JOBU='A', JOBVT='N') */
+/* M left singular vectors to be computed in U and */
+/* no right singular vectors to be computed */
+
+/* Computing MAX */
+ i__2 = *n + *m, i__3 = *n << 2, i__2 = max(i__2,i__3);
+ if (*lwork >= *n * *n + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+ if (*lwork >= wrkbl + *lda * *n) {
+
+/* WORK(IR) is LDA by N */
+
+ ldwrkr = *lda;
+ } else {
+
+/* WORK(IR) is N by N */
+
+ ldwrkr = *n;
+ }
+ itau = ir + ldwrkr * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ slacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Copy R to WORK(IR), zeroing out below it */
+
+ slacpy_("U", n, n, &a[a_offset], lda, &work[ir], &
+ ldwrkr);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ slaset_("L", &i__2, &i__3, &c_b421, &c_b421, &work[ir
+ + 1], &ldwrkr);
+
+/* Generate Q in U */
+/* (Workspace: need N*N+N+M, prefer N*N+N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgqr_(m, m, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IR) */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in WORK(IR) */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("Q", n, n, n, &work[ir], &ldwrkr, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IR) */
+/* (Workspace: need N*N+BDSPAC) */
+
+ sbdsqr_("U", n, &c__0, n, &c__0, &s[1], &work[ie],
+ dum, &c__1, &work[ir], &ldwrkr, dum, &c__1, &
+ work[iwork], info);
+
+/* Multiply Q in U by left singular vectors of R in */
+/* WORK(IR), storing result in A */
+/* (Workspace: need N*N) */
+
+ sgemm_("N", "N", m, n, n, &c_b443, &u[u_offset], ldu,
+ &work[ir], &ldwrkr, &c_b421, &a[a_offset],
+ lda);
+
+/* Copy left singular vectors of A from A to U */
+
+ slacpy_("F", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ slacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (Workspace: need N+M, prefer N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgqr_(m, m, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Zero out below R in A */
+
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ slaset_("L", &i__2, &i__3, &c_b421, &c_b421, &a[
+ a_dim1 + 2], lda);
+
+/* Bidiagonalize R in A */
+/* (Workspace: need 4*N, prefer 3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply Q in U by left bidiagonalizing vectors */
+/* in A */
+/* (Workspace: need 3*N+M, prefer 3*N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sormbr_("Q", "R", "N", m, n, n, &a[a_offset], lda, &
+ work[itauq], &u[u_offset], ldu, &work[iwork],
+ &i__2, &ierr)
+ ;
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("U", n, &c__0, m, &c__0, &s[1], &work[ie],
+ dum, &c__1, &u[u_offset], ldu, dum, &c__1, &
+ work[iwork], info);
+
+ }
+
+ } else if (wntvo) {
+
+/* Path 8 (M much larger than N, JOBU='A', JOBVT='O') */
+/* M left singular vectors to be computed in U and */
+/* N right singular vectors to be overwritten on A */
+
+/* Computing MAX */
+ i__2 = *n + *m, i__3 = *n << 2, i__2 = max(i__2,i__3);
+ if (*lwork >= (*n << 1) * *n + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + (*lda << 1) * *n) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is LDA by N */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *n;
+ ldwrkr = *lda;
+ } else if (*lwork >= wrkbl + (*lda + *n) * *n) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is N by N */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *n;
+ ldwrkr = *n;
+ } else {
+
+/* WORK(IU) is N by N and WORK(IR) is N by N */
+
+ ldwrku = *n;
+ ir = iu + ldwrku * *n;
+ ldwrkr = *n;
+ }
+ itau = ir + ldwrkr * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (Workspace: need 2*N*N+2*N, prefer 2*N*N+N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ slacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (Workspace: need 2*N*N+N+M, prefer 2*N*N+N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgqr_(m, m, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy R to WORK(IU), zeroing out below it */
+
+ slacpy_("U", n, n, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ slaset_("L", &i__2, &i__3, &c_b421, &c_b421, &work[iu
+ + 1], &ldwrku);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IU), copying result to */
+/* WORK(IR) */
+/* (Workspace: need 2*N*N+4*N, */
+/* prefer 2*N*N+3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(n, n, &work[iu], &ldwrku, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ slacpy_("U", n, n, &work[iu], &ldwrku, &work[ir], &
+ ldwrkr);
+
+/* Generate left bidiagonalizing vectors in WORK(IU) */
+/* (Workspace: need 2*N*N+4*N, prefer 2*N*N+3*N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("Q", n, n, n, &work[iu], &ldwrku, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in WORK(IR) */
+/* (Workspace: need 2*N*N+4*N-1, */
+/* prefer 2*N*N+3*N+(N-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("P", n, n, n, &work[ir], &ldwrkr, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IU) and computing */
+/* right singular vectors of R in WORK(IR) */
+/* (Workspace: need 2*N*N+BDSPAC) */
+
+ sbdsqr_("U", n, n, n, &c__0, &s[1], &work[ie], &work[
+ ir], &ldwrkr, &work[iu], &ldwrku, dum, &c__1,
+ &work[iwork], info);
+
+/* Multiply Q in U by left singular vectors of R in */
+/* WORK(IU), storing result in A */
+/* (Workspace: need N*N) */
+
+ sgemm_("N", "N", m, n, n, &c_b443, &u[u_offset], ldu,
+ &work[iu], &ldwrku, &c_b421, &a[a_offset],
+ lda);
+
+/* Copy left singular vectors of A from A to U */
+
+ slacpy_("F", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Copy right singular vectors of R from WORK(IR) to A */
+
+ slacpy_("F", n, n, &work[ir], &ldwrkr, &a[a_offset],
+ lda);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ slacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (Workspace: need N+M, prefer N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgqr_(m, m, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Zero out below R in A */
+
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ slaset_("L", &i__2, &i__3, &c_b421, &c_b421, &a[
+ a_dim1 + 2], lda);
+
+/* Bidiagonalize R in A */
+/* (Workspace: need 4*N, prefer 3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply Q in U by left bidiagonalizing vectors */
+/* in A */
+/* (Workspace: need 3*N+M, prefer 3*N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sormbr_("Q", "R", "N", m, n, n, &a[a_offset], lda, &
+ work[itauq], &u[u_offset], ldu, &work[iwork],
+ &i__2, &ierr)
+ ;
+
+/* Generate right bidiagonalizing vectors in A */
+/* (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("P", n, n, n, &a[a_offset], lda, &work[itaup],
+ &work[iwork], &i__2, &ierr);
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in A */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("U", n, n, m, &c__0, &s[1], &work[ie], &a[
+ a_offset], lda, &u[u_offset], ldu, dum, &c__1,
+ &work[iwork], info);
+
+ }
+
+ } else if (wntvas) {
+
+/* Path 9 (M much larger than N, JOBU='A', JOBVT='S' */
+/* or 'A') */
+/* M left singular vectors to be computed in U and */
+/* N right singular vectors to be computed in VT */
+
+/* Computing MAX */
+ i__2 = *n + *m, i__3 = *n << 2, i__2 = max(i__2,i__3);
+ if (*lwork >= *n * *n + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + *lda * *n) {
+
+/* WORK(IU) is LDA by N */
+
+ ldwrku = *lda;
+ } else {
+
+/* WORK(IU) is N by N */
+
+ ldwrku = *n;
+ }
+ itau = iu + ldwrku * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ slacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (Workspace: need N*N+N+M, prefer N*N+N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgqr_(m, m, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy R to WORK(IU), zeroing out below it */
+
+ slacpy_("U", n, n, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ slaset_("L", &i__2, &i__3, &c_b421, &c_b421, &work[iu
+ + 1], &ldwrku);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IU), copying result to VT */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(n, n, &work[iu], &ldwrku, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ slacpy_("U", n, n, &work[iu], &ldwrku, &vt[vt_offset],
+ ldvt);
+
+/* Generate left bidiagonalizing vectors in WORK(IU) */
+/* (Workspace: need N*N+4*N, prefer N*N+3*N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("Q", n, n, n, &work[iu], &ldwrku, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in VT */
+/* (Workspace: need N*N+4*N-1, */
+/* prefer N*N+3*N+(N-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[
+ itaup], &work[iwork], &i__2, &ierr)
+ ;
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IU) and computing */
+/* right singular vectors of R in VT */
+/* (Workspace: need N*N+BDSPAC) */
+
+ sbdsqr_("U", n, n, n, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &work[iu], &ldwrku, dum, &
+ c__1, &work[iwork], info);
+
+/* Multiply Q in U by left singular vectors of R in */
+/* WORK(IU), storing result in A */
+/* (Workspace: need N*N) */
+
+ sgemm_("N", "N", m, n, n, &c_b443, &u[u_offset], ldu,
+ &work[iu], &ldwrku, &c_b421, &a[a_offset],
+ lda);
+
+/* Copy left singular vectors of A from A to U */
+
+ slacpy_("F", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (Workspace: need 2*N, prefer N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ slacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (Workspace: need N+M, prefer N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgqr_(m, m, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy R from A to VT, zeroing out below it */
+
+ slacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+ if (*n > 1) {
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ slaset_("L", &i__2, &i__3, &c_b421, &c_b421, &vt[
+ vt_dim1 + 2], ldvt);
+ }
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in VT */
+/* (Workspace: need 4*N, prefer 3*N+2*N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(n, n, &vt[vt_offset], ldvt, &s[1], &work[ie],
+ &work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply Q in U by left bidiagonalizing vectors */
+/* in VT */
+/* (Workspace: need 3*N+M, prefer 3*N+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sormbr_("Q", "R", "N", m, n, n, &vt[vt_offset], ldvt,
+ &work[itauq], &u[u_offset], ldu, &work[iwork],
+ &i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in VT */
+/* (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[
+ itaup], &work[iwork], &i__2, &ierr)
+ ;
+ iwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in VT */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("U", n, n, m, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, dum, &
+ c__1, &work[iwork], info);
+
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* M .LT. MNTHR */
+
+/* Path 10 (M at least N, but not much larger) */
+/* Reduce to bidiagonal form without QR decomposition */
+
+ ie = 1;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize A */
+/* (Workspace: need 3*N+M, prefer 3*N+(M+N)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[iwork], &i__2, &ierr);
+ if (wntuas) {
+
+/* If left singular vectors desired in U, copy result to U */
+/* and generate left bidiagonalizing vectors in U */
+/* (Workspace: need 3*N+NCU, prefer 3*N+NCU*NB) */
+
+ slacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+ if (wntus) {
+ ncu = *n;
+ }
+ if (wntua) {
+ ncu = *m;
+ }
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("Q", m, &ncu, n, &u[u_offset], ldu, &work[itauq], &
+ work[iwork], &i__2, &ierr);
+ }
+ if (wntvas) {
+
+/* If right singular vectors desired in VT, copy result to */
+/* VT and generate right bidiagonalizing vectors in VT */
+/* (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB) */
+
+ slacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], &
+ work[iwork], &i__2, &ierr);
+ }
+ if (wntuo) {
+
+/* If left singular vectors desired in A, generate left */
+/* bidiagonalizing vectors in A */
+/* (Workspace: need 4*N, prefer 3*N+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], &work[
+ iwork], &i__2, &ierr);
+ }
+ if (wntvo) {
+
+/* If right singular vectors desired in A, generate right */
+/* bidiagonalizing vectors in A */
+/* (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("P", n, n, n, &a[a_offset], lda, &work[itaup], &work[
+ iwork], &i__2, &ierr);
+ }
+ iwork = ie + *n;
+ if (wntuas || wntuo) {
+ nru = *m;
+ }
+ if (wntun) {
+ nru = 0;
+ }
+ if (wntvas || wntvo) {
+ ncvt = *n;
+ }
+ if (wntvn) {
+ ncvt = 0;
+ }
+ if (! wntuo && ! wntvo) {
+
+/* Perform bidiagonal QR iteration, if desired, computing */
+/* left singular vectors in U and computing right singular */
+/* vectors in VT */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("U", n, &ncvt, &nru, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, dum, &c__1, &
+ work[iwork], info);
+ } else if (! wntuo && wntvo) {
+
+/* Perform bidiagonal QR iteration, if desired, computing */
+/* left singular vectors in U and computing right singular */
+/* vectors in A */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("U", n, &ncvt, &nru, &c__0, &s[1], &work[ie], &a[
+ a_offset], lda, &u[u_offset], ldu, dum, &c__1, &work[
+ iwork], info);
+ } else {
+
+/* Perform bidiagonal QR iteration, if desired, computing */
+/* left singular vectors in A and computing right singular */
+/* vectors in VT */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("U", n, &ncvt, &nru, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &a[a_offset], lda, dum, &c__1, &
+ work[iwork], info);
+ }
+
+ }
+
+ } else {
+
+/* A has more columns than rows. If A has sufficiently more */
+/* columns than rows, first reduce using the LQ decomposition (if */
+/* sufficient workspace available) */
+
+ if (*n >= mnthr) {
+
+ if (wntvn) {
+
+/* Path 1t(N much larger than M, JOBVT='N') */
+/* No right singular vectors to be computed */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork], &
+ i__2, &ierr);
+
+/* Zero out above L */
+
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ slaset_("U", &i__2, &i__3, &c_b421, &c_b421, &a[(a_dim1 << 1)
+ + 1], lda);
+ ie = 1;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in A */
+/* (Workspace: need 4*M, prefer 3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__2, &ierr);
+ if (wntuo || wntuas) {
+
+/* If left singular vectors desired, generate Q */
+/* (Workspace: need 4*M, prefer 3*M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("Q", m, m, m, &a[a_offset], lda, &work[itauq], &
+ work[iwork], &i__2, &ierr);
+ }
+ iwork = ie + *m;
+ nru = 0;
+ if (wntuo || wntuas) {
+ nru = *m;
+ }
+
+/* Perform bidiagonal QR iteration, computing left singular */
+/* vectors of A in A if desired */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("U", m, &c__0, &nru, &c__0, &s[1], &work[ie], dum, &
+ c__1, &a[a_offset], lda, dum, &c__1, &work[iwork],
+ info);
+
+/* If left singular vectors desired in U, copy them there */
+
+ if (wntuas) {
+ slacpy_("F", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ }
+
+ } else if (wntvo && wntun) {
+
+/* Path 2t(N much larger than M, JOBU='N', JOBVT='O') */
+/* M right singular vectors to be overwritten on A and */
+/* no left singular vectors to be computed */
+
+/* Computing MAX */
+ i__2 = *m << 2;
+ if (*lwork >= *m * *m + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *lda * *n + *m;
+ if (*lwork >= max(i__2,i__3) + *lda * *m) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is LDA by M */
+
+ ldwrku = *lda;
+ chunk = *n;
+ ldwrkr = *lda;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *lda * *n + *m;
+ if (*lwork >= max(i__2,i__3) + *m * *m) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is M by M */
+
+ ldwrku = *lda;
+ chunk = *n;
+ ldwrkr = *m;
+ } else {
+
+/* WORK(IU) is M by CHUNK and WORK(IR) is M by M */
+
+ ldwrku = *m;
+ chunk = (*lwork - *m * *m - *m) / *m;
+ ldwrkr = *m;
+ }
+ }
+ itau = ir + ldwrkr * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork]
+, &i__2, &ierr);
+
+/* Copy L to WORK(IR) and zero out above it */
+
+ slacpy_("L", m, m, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ slaset_("U", &i__2, &i__3, &c_b421, &c_b421, &work[ir +
+ ldwrkr], &ldwrkr);
+
+/* Generate Q in A */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IR) */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(m, m, &work[ir], &ldwrkr, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__2, &ierr);
+
+/* Generate right vectors bidiagonalizing L */
+/* (Workspace: need M*M+4*M-1, prefer M*M+3*M+(M-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("P", m, m, m, &work[ir], &ldwrkr, &work[itaup], &
+ work[iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of L in WORK(IR) */
+/* (Workspace: need M*M+BDSPAC) */
+
+ sbdsqr_("U", m, m, &c__0, &c__0, &s[1], &work[ie], &work[
+ ir], &ldwrkr, dum, &c__1, dum, &c__1, &work[iwork]
+, info);
+ iu = ie + *m;
+
+/* Multiply right singular vectors of L in WORK(IR) by Q */
+/* in A, storing result in WORK(IU) and copying to A */
+/* (Workspace: need M*M+2*M, prefer M*M+M*N+M) */
+
+ i__2 = *n;
+ i__3 = chunk;
+ for (i__ = 1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__3) {
+/* Computing MIN */
+ i__4 = *n - i__ + 1;
+ blk = min(i__4,chunk);
+ sgemm_("N", "N", m, &blk, m, &c_b443, &work[ir], &
+ ldwrkr, &a[i__ * a_dim1 + 1], lda, &c_b421, &
+ work[iu], &ldwrku);
+ slacpy_("F", m, &blk, &work[iu], &ldwrku, &a[i__ *
+ a_dim1 + 1], lda);
+/* L30: */
+ }
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ ie = 1;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize A */
+/* (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ sgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__3, &ierr);
+
+/* Generate right vectors bidiagonalizing A */
+/* (Workspace: need 4*M, prefer 3*M+M*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ sorgbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &
+ work[iwork], &i__3, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of A in A */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("L", m, n, &c__0, &c__0, &s[1], &work[ie], &a[
+ a_offset], lda, dum, &c__1, dum, &c__1, &work[
+ iwork], info);
+
+ }
+
+ } else if (wntvo && wntuas) {
+
+/* Path 3t(N much larger than M, JOBU='S' or 'A', JOBVT='O') */
+/* M right singular vectors to be overwritten on A and */
+/* M left singular vectors to be computed in U */
+
+/* Computing MAX */
+ i__3 = *m << 2;
+ if (*lwork >= *m * *m + max(i__3,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+/* Computing MAX */
+ i__3 = wrkbl, i__2 = *lda * *n + *m;
+ if (*lwork >= max(i__3,i__2) + *lda * *m) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is LDA by M */
+
+ ldwrku = *lda;
+ chunk = *n;
+ ldwrkr = *lda;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__3 = wrkbl, i__2 = *lda * *n + *m;
+ if (*lwork >= max(i__3,i__2) + *m * *m) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is M by M */
+
+ ldwrku = *lda;
+ chunk = *n;
+ ldwrkr = *m;
+ } else {
+
+/* WORK(IU) is M by CHUNK and WORK(IR) is M by M */
+
+ ldwrku = *m;
+ chunk = (*lwork - *m * *m - *m) / *m;
+ ldwrkr = *m;
+ }
+ }
+ itau = ir + ldwrkr * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork]
+, &i__3, &ierr);
+
+/* Copy L to U, zeroing about above it */
+
+ slacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ i__3 = *m - 1;
+ i__2 = *m - 1;
+ slaset_("U", &i__3, &i__2, &c_b421, &c_b421, &u[(u_dim1 <<
+ 1) + 1], ldu);
+
+/* Generate Q in A */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ sorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__3, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in U, copying result to WORK(IR) */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ sgebrd_(m, m, &u[u_offset], ldu, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__3, &ierr);
+ slacpy_("U", m, m, &u[u_offset], ldu, &work[ir], &ldwrkr);
+
+/* Generate right vectors bidiagonalizing L in WORK(IR) */
+/* (Workspace: need M*M+4*M-1, prefer M*M+3*M+(M-1)*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ sorgbr_("P", m, m, m, &work[ir], &ldwrkr, &work[itaup], &
+ work[iwork], &i__3, &ierr);
+
+/* Generate left vectors bidiagonalizing L in U */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+M*NB) */
+
+ i__3 = *lwork - iwork + 1;
+ sorgbr_("Q", m, m, m, &u[u_offset], ldu, &work[itauq], &
+ work[iwork], &i__3, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of L in U, and computing right */
+/* singular vectors of L in WORK(IR) */
+/* (Workspace: need M*M+BDSPAC) */
+
+ sbdsqr_("U", m, m, m, &c__0, &s[1], &work[ie], &work[ir],
+ &ldwrkr, &u[u_offset], ldu, dum, &c__1, &work[
+ iwork], info);
+ iu = ie + *m;
+
+/* Multiply right singular vectors of L in WORK(IR) by Q */
+/* in A, storing result in WORK(IU) and copying to A */
+/* (Workspace: need M*M+2*M, prefer M*M+M*N+M)) */
+
+ i__3 = *n;
+ i__2 = chunk;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__3 : i__ <= i__3; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__4 = *n - i__ + 1;
+ blk = min(i__4,chunk);
+ sgemm_("N", "N", m, &blk, m, &c_b443, &work[ir], &
+ ldwrkr, &a[i__ * a_dim1 + 1], lda, &c_b421, &
+ work[iu], &ldwrku);
+ slacpy_("F", m, &blk, &work[iu], &ldwrku, &a[i__ *
+ a_dim1 + 1], lda);
+/* L40: */
+ }
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork]
+, &i__2, &ierr);
+
+/* Copy L to U, zeroing out above it */
+
+ slacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ slaset_("U", &i__2, &i__3, &c_b421, &c_b421, &u[(u_dim1 <<
+ 1) + 1], ldu);
+
+/* Generate Q in A */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in U */
+/* (Workspace: need 4*M, prefer 3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(m, m, &u[u_offset], ldu, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__2, &ierr);
+
+/* Multiply right vectors bidiagonalizing L by Q in A */
+/* (Workspace: need 3*M+N, prefer 3*M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sormbr_("P", "L", "T", m, n, m, &u[u_offset], ldu, &work[
+ itaup], &a[a_offset], lda, &work[iwork], &i__2, &
+ ierr);
+
+/* Generate left vectors bidiagonalizing L in U */
+/* (Workspace: need 4*M, prefer 3*M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("Q", m, m, m, &u[u_offset], ldu, &work[itauq], &
+ work[iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in A */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("U", m, n, m, &c__0, &s[1], &work[ie], &a[
+ a_offset], lda, &u[u_offset], ldu, dum, &c__1, &
+ work[iwork], info);
+
+ }
+
+ } else if (wntvs) {
+
+ if (wntun) {
+
+/* Path 4t(N much larger than M, JOBU='N', JOBVT='S') */
+/* M right singular vectors to be computed in VT and */
+/* no left singular vectors to be computed */
+
+/* Computing MAX */
+ i__2 = *m << 2;
+ if (*lwork >= *m * *m + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+ if (*lwork >= wrkbl + *lda * *m) {
+
+/* WORK(IR) is LDA by M */
+
+ ldwrkr = *lda;
+ } else {
+
+/* WORK(IR) is M by M */
+
+ ldwrkr = *m;
+ }
+ itau = ir + ldwrkr * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy L to WORK(IR), zeroing out above it */
+
+ slacpy_("L", m, m, &a[a_offset], lda, &work[ir], &
+ ldwrkr);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ slaset_("U", &i__2, &i__3, &c_b421, &c_b421, &work[ir
+ + ldwrkr], &ldwrkr);
+
+/* Generate Q in A */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorglq_(m, n, m, &a[a_offset], lda, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IR) */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(m, m, &work[ir], &ldwrkr, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Generate right vectors bidiagonalizing L in */
+/* WORK(IR) */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+(M-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("P", m, m, m, &work[ir], &ldwrkr, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of L in WORK(IR) */
+/* (Workspace: need M*M+BDSPAC) */
+
+ sbdsqr_("U", m, m, &c__0, &c__0, &s[1], &work[ie], &
+ work[ir], &ldwrkr, dum, &c__1, dum, &c__1, &
+ work[iwork], info);
+
+/* Multiply right singular vectors of L in WORK(IR) by */
+/* Q in A, storing result in VT */
+/* (Workspace: need M*M) */
+
+ sgemm_("N", "N", m, n, m, &c_b443, &work[ir], &ldwrkr,
+ &a[a_offset], lda, &c_b421, &vt[vt_offset],
+ ldvt);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy result to VT */
+
+ slacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorglq_(m, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Zero out above L in A */
+
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ slaset_("U", &i__2, &i__3, &c_b421, &c_b421, &a[(
+ a_dim1 << 1) + 1], lda);
+
+/* Bidiagonalize L in A */
+/* (Workspace: need 4*M, prefer 3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply right vectors bidiagonalizing L by Q in VT */
+/* (Workspace: need 3*M+N, prefer 3*M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sormbr_("P", "L", "T", m, n, m, &a[a_offset], lda, &
+ work[itaup], &vt[vt_offset], ldvt, &work[
+ iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of A in VT */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("U", m, n, &c__0, &c__0, &s[1], &work[ie], &
+ vt[vt_offset], ldvt, dum, &c__1, dum, &c__1, &
+ work[iwork], info);
+
+ }
+
+ } else if (wntuo) {
+
+/* Path 5t(N much larger than M, JOBU='O', JOBVT='S') */
+/* M right singular vectors to be computed in VT and */
+/* M left singular vectors to be overwritten on A */
+
+/* Computing MAX */
+ i__2 = *m << 2;
+ if (*lwork >= (*m << 1) * *m + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + (*lda << 1) * *m) {
+
+/* WORK(IU) is LDA by M and WORK(IR) is LDA by M */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *m;
+ ldwrkr = *lda;
+ } else if (*lwork >= wrkbl + (*lda + *m) * *m) {
+
+/* WORK(IU) is LDA by M and WORK(IR) is M by M */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *m;
+ ldwrkr = *m;
+ } else {
+
+/* WORK(IU) is M by M and WORK(IR) is M by M */
+
+ ldwrku = *m;
+ ir = iu + ldwrku * *m;
+ ldwrkr = *m;
+ }
+ itau = ir + ldwrkr * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (Workspace: need 2*M*M+2*M, prefer 2*M*M+M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy L to WORK(IU), zeroing out below it */
+
+ slacpy_("L", m, m, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ slaset_("U", &i__2, &i__3, &c_b421, &c_b421, &work[iu
+ + ldwrku], &ldwrku);
+
+/* Generate Q in A */
+/* (Workspace: need 2*M*M+2*M, prefer 2*M*M+M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorglq_(m, n, m, &a[a_offset], lda, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IU), copying result to */
+/* WORK(IR) */
+/* (Workspace: need 2*M*M+4*M, */
+/* prefer 2*M*M+3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(m, m, &work[iu], &ldwrku, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ slacpy_("L", m, m, &work[iu], &ldwrku, &work[ir], &
+ ldwrkr);
+
+/* Generate right bidiagonalizing vectors in WORK(IU) */
+/* (Workspace: need 2*M*M+4*M-1, */
+/* prefer 2*M*M+3*M+(M-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("P", m, m, m, &work[iu], &ldwrku, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in WORK(IR) */
+/* (Workspace: need 2*M*M+4*M, prefer 2*M*M+3*M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("Q", m, m, m, &work[ir], &ldwrkr, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of L in WORK(IR) and computing */
+/* right singular vectors of L in WORK(IU) */
+/* (Workspace: need 2*M*M+BDSPAC) */
+
+ sbdsqr_("U", m, m, m, &c__0, &s[1], &work[ie], &work[
+ iu], &ldwrku, &work[ir], &ldwrkr, dum, &c__1,
+ &work[iwork], info);
+
+/* Multiply right singular vectors of L in WORK(IU) by */
+/* Q in A, storing result in VT */
+/* (Workspace: need M*M) */
+
+ sgemm_("N", "N", m, n, m, &c_b443, &work[iu], &ldwrku,
+ &a[a_offset], lda, &c_b421, &vt[vt_offset],
+ ldvt);
+
+/* Copy left singular vectors of L to A */
+/* (Workspace: need M*M) */
+
+ slacpy_("F", m, m, &work[ir], &ldwrkr, &a[a_offset],
+ lda);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ slacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorglq_(m, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Zero out above L in A */
+
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ slaset_("U", &i__2, &i__3, &c_b421, &c_b421, &a[(
+ a_dim1 << 1) + 1], lda);
+
+/* Bidiagonalize L in A */
+/* (Workspace: need 4*M, prefer 3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply right vectors bidiagonalizing L by Q in VT */
+/* (Workspace: need 3*M+N, prefer 3*M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sormbr_("P", "L", "T", m, n, m, &a[a_offset], lda, &
+ work[itaup], &vt[vt_offset], ldvt, &work[
+ iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors of L in A */
+/* (Workspace: need 4*M, prefer 3*M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("Q", m, m, m, &a[a_offset], lda, &work[itauq],
+ &work[iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, compute left */
+/* singular vectors of A in A and compute right */
+/* singular vectors of A in VT */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("U", m, n, m, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &a[a_offset], lda, dum, &
+ c__1, &work[iwork], info);
+
+ }
+
+ } else if (wntuas) {
+
+/* Path 6t(N much larger than M, JOBU='S' or 'A', */
+/* JOBVT='S') */
+/* M right singular vectors to be computed in VT and */
+/* M left singular vectors to be computed in U */
+
+/* Computing MAX */
+ i__2 = *m << 2;
+ if (*lwork >= *m * *m + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + *lda * *m) {
+
+/* WORK(IU) is LDA by N */
+
+ ldwrku = *lda;
+ } else {
+
+/* WORK(IU) is LDA by M */
+
+ ldwrku = *m;
+ }
+ itau = iu + ldwrku * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy L to WORK(IU), zeroing out above it */
+
+ slacpy_("L", m, m, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ slaset_("U", &i__2, &i__3, &c_b421, &c_b421, &work[iu
+ + ldwrku], &ldwrku);
+
+/* Generate Q in A */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorglq_(m, n, m, &a[a_offset], lda, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IU), copying result to U */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(m, m, &work[iu], &ldwrku, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ slacpy_("L", m, m, &work[iu], &ldwrku, &u[u_offset],
+ ldu);
+
+/* Generate right bidiagonalizing vectors in WORK(IU) */
+/* (Workspace: need M*M+4*M-1, */
+/* prefer M*M+3*M+(M-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("P", m, m, m, &work[iu], &ldwrku, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in U */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("Q", m, m, m, &u[u_offset], ldu, &work[itauq],
+ &work[iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of L in U and computing right */
+/* singular vectors of L in WORK(IU) */
+/* (Workspace: need M*M+BDSPAC) */
+
+ sbdsqr_("U", m, m, m, &c__0, &s[1], &work[ie], &work[
+ iu], &ldwrku, &u[u_offset], ldu, dum, &c__1, &
+ work[iwork], info);
+
+/* Multiply right singular vectors of L in WORK(IU) by */
+/* Q in A, storing result in VT */
+/* (Workspace: need M*M) */
+
+ sgemm_("N", "N", m, n, m, &c_b443, &work[iu], &ldwrku,
+ &a[a_offset], lda, &c_b421, &vt[vt_offset],
+ ldvt);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ slacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorglq_(m, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy L to U, zeroing out above it */
+
+ slacpy_("L", m, m, &a[a_offset], lda, &u[u_offset],
+ ldu);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ slaset_("U", &i__2, &i__3, &c_b421, &c_b421, &u[(
+ u_dim1 << 1) + 1], ldu);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in U */
+/* (Workspace: need 4*M, prefer 3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(m, m, &u[u_offset], ldu, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply right bidiagonalizing vectors in U by Q */
+/* in VT */
+/* (Workspace: need 3*M+N, prefer 3*M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sormbr_("P", "L", "T", m, n, m, &u[u_offset], ldu, &
+ work[itaup], &vt[vt_offset], ldvt, &work[
+ iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in U */
+/* (Workspace: need 4*M, prefer 3*M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("Q", m, m, m, &u[u_offset], ldu, &work[itauq],
+ &work[iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in VT */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("U", m, n, m, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, dum, &
+ c__1, &work[iwork], info);
+
+ }
+
+ }
+
+ } else if (wntva) {
+
+ if (wntun) {
+
+/* Path 7t(N much larger than M, JOBU='N', JOBVT='A') */
+/* N right singular vectors to be computed in VT and */
+/* no left singular vectors to be computed */
+
+/* Computing MAX */
+ i__2 = *n + *m, i__3 = *m << 2, i__2 = max(i__2,i__3);
+ if (*lwork >= *m * *m + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+ if (*lwork >= wrkbl + *lda * *m) {
+
+/* WORK(IR) is LDA by M */
+
+ ldwrkr = *lda;
+ } else {
+
+/* WORK(IR) is M by M */
+
+ ldwrkr = *m;
+ }
+ itau = ir + ldwrkr * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ slacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Copy L to WORK(IR), zeroing out above it */
+
+ slacpy_("L", m, m, &a[a_offset], lda, &work[ir], &
+ ldwrkr);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ slaset_("U", &i__2, &i__3, &c_b421, &c_b421, &work[ir
+ + ldwrkr], &ldwrkr);
+
+/* Generate Q in VT */
+/* (Workspace: need M*M+M+N, prefer M*M+M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IR) */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(m, m, &work[ir], &ldwrkr, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in WORK(IR) */
+/* (Workspace: need M*M+4*M-1, */
+/* prefer M*M+3*M+(M-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("P", m, m, m, &work[ir], &ldwrkr, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of L in WORK(IR) */
+/* (Workspace: need M*M+BDSPAC) */
+
+ sbdsqr_("U", m, m, &c__0, &c__0, &s[1], &work[ie], &
+ work[ir], &ldwrkr, dum, &c__1, dum, &c__1, &
+ work[iwork], info);
+
+/* Multiply right singular vectors of L in WORK(IR) by */
+/* Q in VT, storing result in A */
+/* (Workspace: need M*M) */
+
+ sgemm_("N", "N", m, n, m, &c_b443, &work[ir], &ldwrkr,
+ &vt[vt_offset], ldvt, &c_b421, &a[a_offset],
+ lda);
+
+/* Copy right singular vectors of A from A to VT */
+
+ slacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ slacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (Workspace: need M+N, prefer M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Zero out above L in A */
+
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ slaset_("U", &i__2, &i__3, &c_b421, &c_b421, &a[(
+ a_dim1 << 1) + 1], lda);
+
+/* Bidiagonalize L in A */
+/* (Workspace: need 4*M, prefer 3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply right bidiagonalizing vectors in A by Q */
+/* in VT */
+/* (Workspace: need 3*M+N, prefer 3*M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sormbr_("P", "L", "T", m, n, m, &a[a_offset], lda, &
+ work[itaup], &vt[vt_offset], ldvt, &work[
+ iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of A in VT */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("U", m, n, &c__0, &c__0, &s[1], &work[ie], &
+ vt[vt_offset], ldvt, dum, &c__1, dum, &c__1, &
+ work[iwork], info);
+
+ }
+
+ } else if (wntuo) {
+
+/* Path 8t(N much larger than M, JOBU='O', JOBVT='A') */
+/* N right singular vectors to be computed in VT and */
+/* M left singular vectors to be overwritten on A */
+
+/* Computing MAX */
+ i__2 = *n + *m, i__3 = *m << 2, i__2 = max(i__2,i__3);
+ if (*lwork >= (*m << 1) * *m + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + (*lda << 1) * *m) {
+
+/* WORK(IU) is LDA by M and WORK(IR) is LDA by M */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *m;
+ ldwrkr = *lda;
+ } else if (*lwork >= wrkbl + (*lda + *m) * *m) {
+
+/* WORK(IU) is LDA by M and WORK(IR) is M by M */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *m;
+ ldwrkr = *m;
+ } else {
+
+/* WORK(IU) is M by M and WORK(IR) is M by M */
+
+ ldwrku = *m;
+ ir = iu + ldwrku * *m;
+ ldwrkr = *m;
+ }
+ itau = ir + ldwrkr * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (Workspace: need 2*M*M+2*M, prefer 2*M*M+M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ slacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (Workspace: need 2*M*M+M+N, prefer 2*M*M+M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy L to WORK(IU), zeroing out above it */
+
+ slacpy_("L", m, m, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ slaset_("U", &i__2, &i__3, &c_b421, &c_b421, &work[iu
+ + ldwrku], &ldwrku);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IU), copying result to */
+/* WORK(IR) */
+/* (Workspace: need 2*M*M+4*M, */
+/* prefer 2*M*M+3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(m, m, &work[iu], &ldwrku, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ slacpy_("L", m, m, &work[iu], &ldwrku, &work[ir], &
+ ldwrkr);
+
+/* Generate right bidiagonalizing vectors in WORK(IU) */
+/* (Workspace: need 2*M*M+4*M-1, */
+/* prefer 2*M*M+3*M+(M-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("P", m, m, m, &work[iu], &ldwrku, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in WORK(IR) */
+/* (Workspace: need 2*M*M+4*M, prefer 2*M*M+3*M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("Q", m, m, m, &work[ir], &ldwrkr, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of L in WORK(IR) and computing */
+/* right singular vectors of L in WORK(IU) */
+/* (Workspace: need 2*M*M+BDSPAC) */
+
+ sbdsqr_("U", m, m, m, &c__0, &s[1], &work[ie], &work[
+ iu], &ldwrku, &work[ir], &ldwrkr, dum, &c__1,
+ &work[iwork], info);
+
+/* Multiply right singular vectors of L in WORK(IU) by */
+/* Q in VT, storing result in A */
+/* (Workspace: need M*M) */
+
+ sgemm_("N", "N", m, n, m, &c_b443, &work[iu], &ldwrku,
+ &vt[vt_offset], ldvt, &c_b421, &a[a_offset],
+ lda);
+
+/* Copy right singular vectors of A from A to VT */
+
+ slacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Copy left singular vectors of A from WORK(IR) to A */
+
+ slacpy_("F", m, m, &work[ir], &ldwrkr, &a[a_offset],
+ lda);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ slacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (Workspace: need M+N, prefer M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Zero out above L in A */
+
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ slaset_("U", &i__2, &i__3, &c_b421, &c_b421, &a[(
+ a_dim1 << 1) + 1], lda);
+
+/* Bidiagonalize L in A */
+/* (Workspace: need 4*M, prefer 3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply right bidiagonalizing vectors in A by Q */
+/* in VT */
+/* (Workspace: need 3*M+N, prefer 3*M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sormbr_("P", "L", "T", m, n, m, &a[a_offset], lda, &
+ work[itaup], &vt[vt_offset], ldvt, &work[
+ iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in A */
+/* (Workspace: need 4*M, prefer 3*M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("Q", m, m, m, &a[a_offset], lda, &work[itauq],
+ &work[iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in A and computing right */
+/* singular vectors of A in VT */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("U", m, n, m, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &a[a_offset], lda, dum, &
+ c__1, &work[iwork], info);
+
+ }
+
+ } else if (wntuas) {
+
+/* Path 9t(N much larger than M, JOBU='S' or 'A', */
+/* JOBVT='A') */
+/* N right singular vectors to be computed in VT and */
+/* M left singular vectors to be computed in U */
+
+/* Computing MAX */
+ i__2 = *n + *m, i__3 = *m << 2, i__2 = max(i__2,i__3);
+ if (*lwork >= *m * *m + max(i__2,bdspac)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + *lda * *m) {
+
+/* WORK(IU) is LDA by M */
+
+ ldwrku = *lda;
+ } else {
+
+/* WORK(IU) is M by M */
+
+ ldwrku = *m;
+ }
+ itau = iu + ldwrku * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ slacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (Workspace: need M*M+M+N, prefer M*M+M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy L to WORK(IU), zeroing out above it */
+
+ slacpy_("L", m, m, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ slaset_("U", &i__2, &i__3, &c_b421, &c_b421, &work[iu
+ + ldwrku], &ldwrku);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IU), copying result to U */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(m, m, &work[iu], &ldwrku, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ slacpy_("L", m, m, &work[iu], &ldwrku, &u[u_offset],
+ ldu);
+
+/* Generate right bidiagonalizing vectors in WORK(IU) */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+(M-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("P", m, m, m, &work[iu], &ldwrku, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in U */
+/* (Workspace: need M*M+4*M, prefer M*M+3*M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("Q", m, m, m, &u[u_offset], ldu, &work[itauq],
+ &work[iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of L in U and computing right */
+/* singular vectors of L in WORK(IU) */
+/* (Workspace: need M*M+BDSPAC) */
+
+ sbdsqr_("U", m, m, m, &c__0, &s[1], &work[ie], &work[
+ iu], &ldwrku, &u[u_offset], ldu, dum, &c__1, &
+ work[iwork], info);
+
+/* Multiply right singular vectors of L in WORK(IU) by */
+/* Q in VT, storing result in A */
+/* (Workspace: need M*M) */
+
+ sgemm_("N", "N", m, n, m, &c_b443, &work[iu], &ldwrku,
+ &vt[vt_offset], ldvt, &c_b421, &a[a_offset],
+ lda);
+
+/* Copy right singular vectors of A from A to VT */
+
+ slacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (Workspace: need 2*M, prefer M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ slacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (Workspace: need M+N, prefer M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy L to U, zeroing out above it */
+
+ slacpy_("L", m, m, &a[a_offset], lda, &u[u_offset],
+ ldu);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ slaset_("U", &i__2, &i__3, &c_b421, &c_b421, &u[(
+ u_dim1 << 1) + 1], ldu);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in U */
+/* (Workspace: need 4*M, prefer 3*M+2*M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(m, m, &u[u_offset], ldu, &s[1], &work[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply right bidiagonalizing vectors in U by Q */
+/* in VT */
+/* (Workspace: need 3*M+N, prefer 3*M+N*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sormbr_("P", "L", "T", m, n, m, &u[u_offset], ldu, &
+ work[itaup], &vt[vt_offset], ldvt, &work[
+ iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in U */
+/* (Workspace: need 4*M, prefer 3*M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("Q", m, m, m, &u[u_offset], ldu, &work[itauq],
+ &work[iwork], &i__2, &ierr);
+ iwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in VT */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("U", m, n, m, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, dum, &
+ c__1, &work[iwork], info);
+
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N .LT. MNTHR */
+
+/* Path 10t(N greater than M, but not much larger) */
+/* Reduce to bidiagonal form without LQ decomposition */
+
+ ie = 1;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize A */
+/* (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[iwork], &i__2, &ierr);
+ if (wntuas) {
+
+/* If left singular vectors desired in U, copy result to U */
+/* and generate left bidiagonalizing vectors in U */
+/* (Workspace: need 4*M-1, prefer 3*M+(M-1)*NB) */
+
+ slacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("Q", m, m, n, &u[u_offset], ldu, &work[itauq], &work[
+ iwork], &i__2, &ierr);
+ }
+ if (wntvas) {
+
+/* If right singular vectors desired in VT, copy result to */
+/* VT and generate right bidiagonalizing vectors in VT */
+/* (Workspace: need 3*M+NRVT, prefer 3*M+NRVT*NB) */
+
+ slacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ if (wntva) {
+ nrvt = *n;
+ }
+ if (wntvs) {
+ nrvt = *m;
+ }
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("P", &nrvt, n, m, &vt[vt_offset], ldvt, &work[itaup],
+ &work[iwork], &i__2, &ierr);
+ }
+ if (wntuo) {
+
+/* If left singular vectors desired in A, generate left */
+/* bidiagonalizing vectors in A */
+/* (Workspace: need 4*M-1, prefer 3*M+(M-1)*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("Q", m, m, n, &a[a_offset], lda, &work[itauq], &work[
+ iwork], &i__2, &ierr);
+ }
+ if (wntvo) {
+
+/* If right singular vectors desired in A, generate right */
+/* bidiagonalizing vectors in A */
+/* (Workspace: need 4*M, prefer 3*M+M*NB) */
+
+ i__2 = *lwork - iwork + 1;
+ sorgbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &work[
+ iwork], &i__2, &ierr);
+ }
+ iwork = ie + *m;
+ if (wntuas || wntuo) {
+ nru = *m;
+ }
+ if (wntun) {
+ nru = 0;
+ }
+ if (wntvas || wntvo) {
+ ncvt = *n;
+ }
+ if (wntvn) {
+ ncvt = 0;
+ }
+ if (! wntuo && ! wntvo) {
+
+/* Perform bidiagonal QR iteration, if desired, computing */
+/* left singular vectors in U and computing right singular */
+/* vectors in VT */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("L", m, &ncvt, &nru, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, dum, &c__1, &
+ work[iwork], info);
+ } else if (! wntuo && wntvo) {
+
+/* Perform bidiagonal QR iteration, if desired, computing */
+/* left singular vectors in U and computing right singular */
+/* vectors in A */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("L", m, &ncvt, &nru, &c__0, &s[1], &work[ie], &a[
+ a_offset], lda, &u[u_offset], ldu, dum, &c__1, &work[
+ iwork], info);
+ } else {
+
+/* Perform bidiagonal QR iteration, if desired, computing */
+/* left singular vectors in A and computing right singular */
+/* vectors in VT */
+/* (Workspace: need BDSPAC) */
+
+ sbdsqr_("L", m, &ncvt, &nru, &c__0, &s[1], &work[ie], &vt[
+ vt_offset], ldvt, &a[a_offset], lda, dum, &c__1, &
+ work[iwork], info);
+ }
+
+ }
+
+ }
+
+/* If SBDSQR failed to converge, copy unconverged superdiagonals */
+/* to WORK( 2:MINMN ) */
+
+ if (*info != 0) {
+ if (ie > 2) {
+ i__2 = minmn - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__ + 1] = work[i__ + ie - 1];
+/* L50: */
+ }
+ }
+ if (ie < 2) {
+ for (i__ = minmn - 1; i__ >= 1; --i__) {
+ work[i__ + 1] = work[i__ + ie - 1];
+/* L60: */
+ }
+ }
+ }
+
+/* Undo scaling if necessary */
+
+ if (iscl == 1) {
+ if (anrm > bignum) {
+ slascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ if (*info != 0 && anrm > bignum) {
+ i__2 = minmn - 1;
+ slascl_("G", &c__0, &c__0, &bignum, &anrm, &i__2, &c__1, &work[2],
+ &minmn, &ierr);
+ }
+ if (anrm < smlnum) {
+ slascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ if (*info != 0 && anrm < smlnum) {
+ i__2 = minmn - 1;
+ slascl_("G", &c__0, &c__0, &smlnum, &anrm, &i__2, &c__1, &work[2],
+ &minmn, &ierr);
+ }
+ }
+
+/* Return optimal workspace in WORK(1) */
+
+ work[1] = (real) maxwrk;
+
+ return 0;
+
+/* End of SGESVD */
+
+} /* sgesvd_ */
diff --git a/contrib/libs/clapack/sgesvj.c b/contrib/libs/clapack/sgesvj.c
new file mode 100644
index 0000000000..0510c9991e
--- /dev/null
+++ b/contrib/libs/clapack/sgesvj.c
@@ -0,0 +1,1785 @@
+/* sgesvj.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b17 = 0.f;
+static real c_b18 = 1.f;
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c__2 = 2;
+
+/* Subroutine */ int sgesvj_(char *joba, char *jobu, char *jobv, integer *m,
+ integer *n, real *a, integer *lda, real *sva, integer *mv, real *v,
+ integer *ldv, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, v_dim1, v_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ real bigtheta;
+ integer pskipped, i__, p, q;
+ real t;
+ integer n2, n4;
+ real rootsfmin;
+ integer n34;
+ real cs, sn;
+ integer ir1, jbc;
+ real big;
+ integer kbl, igl, ibr, jgl, nbl;
+ real tol;
+ integer mvl;
+ real aapp, aapq, aaqq, ctol;
+ integer ierr;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ real aapp0, temp1;
+ extern doublereal snrm2_(integer *, real *, integer *);
+ real scale, large, apoaq, aqoap;
+ extern logical lsame_(char *, char *);
+ real theta;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ real small, sfmin;
+ logical lsvec;
+ real fastr[5];
+ logical applv, rsvec, uctol, lower, upper;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ logical rotok;
+ extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
+ integer *), saxpy_(integer *, real *, real *, integer *, real *,
+ integer *), srotm_(integer *, real *, integer *, real *, integer *
+, real *), sgsvj0_(char *, integer *, integer *, real *, integer *
+, real *, real *, integer *, real *, integer *, real *, real *,
+ real *, integer *, real *, integer *, integer *), sgsvj1_(
+ char *, integer *, integer *, integer *, real *, integer *, real *
+, real *, integer *, real *, integer *, real *, real *, real *,
+ integer *, real *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ integer ijblsk, swband;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ integer blskip;
+ real mxaapq;
+ extern /* Subroutine */ int slaset_(char *, integer *, integer *, real *,
+ real *, real *, integer *);
+ real thsign;
+ extern /* Subroutine */ int slassq_(integer *, real *, integer *, real *,
+ real *);
+ real mxsinj;
+ integer emptsw, notrot, iswrot, lkahead;
+ logical goscale, noscale;
+ real rootbig, epsilon, rooteps;
+ integer rowskip;
+ real roottol;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Zlatko Drmac of the University of Zagreb and -- */
+/* -- Kresimir Veselic of the Fernuniversitaet Hagen -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* This routine is also part of SIGMA (version 1.23, October 23. 2008.) */
+/* SIGMA is a library of algorithms for highly accurate algorithms for */
+/* computation of SVD, PSVD, QSVD, (H,K)-SVD, and for solution of the */
+/* eigenvalue problems Hx = lambda M x, H M x = lambda x with H, M > 0. */
+
+/* -#- Scalar Arguments -#- */
+
+
+/* -#- Array Arguments -#- */
+
+/* .. */
+
+/* Purpose */
+/* ~~~~~~~ */
+/* SGESVJ computes the singular value decomposition (SVD) of a real */
+/* M-by-N matrix A, where M >= N. The SVD of A is written as */
+/* [++] [xx] [x0] [xx] */
+/* A = U * SIGMA * V^t, [++] = [xx] * [ox] * [xx] */
+/* [++] [xx] */
+/* where SIGMA is an N-by-N diagonal matrix, U is an M-by-N orthonormal */
+/* matrix, and V is an N-by-N orthogonal matrix. The diagonal elements */
+/* of SIGMA are the singular values of A. The columns of U and V are the */
+/* left and the right singular vectors of A, respectively. */
+
+/* Further Details */
+/* ~~~~~~~~~~~~~~~ */
+/* The orthogonal N-by-N matrix V is obtained as a product of Jacobi plane */
+/* rotations. The rotations are implemented as fast scaled rotations of */
+/* Anda and Park [1]. In the case of underflow of the Jacobi angle, a */
+/* modified Jacobi transformation of Drmac [4] is used. Pivot strategy uses */
+/* column interchanges of de Rijk [2]. The relative accuracy of the computed */
+/* singular values and the accuracy of the computed singular vectors (in */
+/* angle metric) is as guaranteed by the theory of Demmel and Veselic [3]. */
+/* The condition number that determines the accuracy in the full rank case */
+/* is essentially min_{D=diag} kappa(A*D), where kappa(.) is the */
+/* spectral condition number. The best performance of this Jacobi SVD */
+/* procedure is achieved if used in an accelerated version of Drmac and */
+/* Veselic [5,6], and it is the kernel routine in the SIGMA library [7]. */
+/* Some tunning parameters (marked with [TP]) are available for the */
+/* implementer. */
+/* The computational range for the nonzero singular values is the machine */
+/* number interval ( UNDERFLOW , OVERFLOW ). In extreme cases, even */
+/* denormalized singular values can be computed with the corresponding */
+/* gradual loss of accurate digits. */
+
+/* Contributors */
+/* ~~~~~~~~~~~~ */
+/* Zlatko Drmac (Zagreb, Croatia) and Kresimir Veselic (Hagen, Germany) */
+
+/* References */
+/* ~~~~~~~~~~ */
+/* [1] A. A. Anda and H. Park: Fast plane rotations with dynamic scaling. */
+/* SIAM J. matrix Anal. Appl., Vol. 15 (1994), pp. 162-174. */
+/* [2] P. P. M. De Rijk: A one-sided Jacobi algorithm for computing the */
+/* singular value decomposition on a vector computer. */
+/* SIAM J. Sci. Stat. Comp., Vol. 10 (1998), pp. 359-371. */
+/* [3] J. Demmel and K. Veselic: Jacobi method is more accurate than QR. */
+/* [4] Z. Drmac: Implementation of Jacobi rotations for accurate singular */
+/* value computation in floating point arithmetic. */
+/* SIAM J. Sci. Comp., Vol. 18 (1997), pp. 1200-1222. */
+/* [5] Z. Drmac and K. Veselic: New fast and accurate Jacobi SVD algorithm I. */
+/* SIAM J. Matrix Anal. Appl. Vol. 35, No. 2 (2008), pp. 1322-1342. */
+/* LAPACK Working note 169. */
+/* [6] Z. Drmac and K. Veselic: New fast and accurate Jacobi SVD algorithm II. */
+/* SIAM J. Matrix Anal. Appl. Vol. 35, No. 2 (2008), pp. 1343-1362. */
+/* LAPACK Working note 170. */
+/* [7] Z. Drmac: SIGMA - mathematical software library for accurate SVD, PSV, */
+/* QSVD, (H,K)-SVD computations. */
+/* Department of Mathematics, University of Zagreb, 2008. */
+
+/* Bugs, Examples and Comments */
+/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
+/* Please report all bugs and send interesting test examples and comments to */
+/* drmac@math.hr. Thank you. */
+
+/* Arguments */
+/* ~~~~~~~~~ */
+
+/* JOBA (input) CHARACTER* 1 */
+/* Specifies the structure of A. */
+/* = 'L': The input matrix A is lower triangular; */
+/* = 'U': The input matrix A is upper triangular; */
+/* = 'G': The input matrix A is general M-by-N matrix, M >= N. */
+
+/* JOBU (input) CHARACTER*1 */
+/* Specifies whether to compute the left singular vectors */
+/* (columns of U): */
+
+/* = 'U': The left singular vectors corresponding to the nonzero */
+/* singular values are computed and returned in the leading */
+/* columns of A. See more details in the description of A. */
+/* The default numerical orthogonality threshold is set to */
+/* approximately TOL=CTOL*EPS, CTOL=SQRT(M), EPS=SLAMCH('E'). */
+/* = 'C': Analogous to JOBU='U', except that user can control the */
+/* level of numerical orthogonality of the computed left */
+/* singular vectors. TOL can be set to TOL = CTOL*EPS, where */
+/* CTOL is given on input in the array WORK. */
+/* No CTOL smaller than ONE is allowed. CTOL greater */
+/* than 1 / EPS is meaningless. The option 'C' */
+/* can be used if M*EPS is satisfactory orthogonality */
+/* of the computed left singular vectors, so CTOL=M could */
+/* save few sweeps of Jacobi rotations. */
+/* See the descriptions of A and WORK(1). */
+/* = 'N': The matrix U is not computed. However, see the */
+/* description of A. */
+
+/* JOBV (input) CHARACTER*1 */
+/* Specifies whether to compute the right singular vectors, that */
+/* is, the matrix V: */
+/* = 'V' : the matrix V is computed and returned in the array V */
+/* = 'A' : the Jacobi rotations are applied to the MV-by-N */
+/* array V. In other words, the right singular vector */
+/* matrix V is not computed explicitly; instead it is */
+/* applied to an MV-by-N matrix initially stored in the */
+/* first MV rows of V. */
+/* = 'N' : the matrix V is not computed and the array V is not */
+/* referenced */
+
+/* M (input) INTEGER */
+/* The number of rows of the input matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the input matrix A. */
+/* M >= N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, */
+/* If JOBU .EQ. 'U' .OR. JOBU .EQ. 'C': */
+/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
+/* If INFO .EQ. 0, */
+/* ~~~~~~~~~~~~~~~ */
+/* RANKA orthonormal columns of U are returned in the */
+/* leading RANKA columns of the array A. Here RANKA <= N */
+/* is the number of computed singular values of A that are */
+/* above the underflow threshold SLAMCH('S'). The singular */
+/* vectors corresponding to underflowed or zero singular */
+/* values are not computed. The value of RANKA is returned */
+/* in the array WORK as RANKA=NINT(WORK(2)). Also see the */
+/* descriptions of SVA and WORK. The computed columns of U */
+/* are mutually numerically orthogonal up to approximately */
+/* TOL=SQRT(M)*EPS (default); or TOL=CTOL*EPS (JOBU.EQ.'C'), */
+/* see the description of JOBU. */
+/* If INFO .GT. 0, */
+/* ~~~~~~~~~~~~~~~ */
+/* the procedure SGESVJ did not converge in the given number */
+/* of iterations (sweeps). In that case, the computed */
+/* columns of U may not be orthogonal up to TOL. The output */
+/* U (stored in A), SIGMA (given by the computed singular */
+/* values in SVA(1:N)) and V is still a decomposition of the */
+/* input matrix A in the sense that the residual */
+/* ||A-SCALE*U*SIGMA*V^T||_2 / ||A||_2 is small. */
+
+/* If JOBU .EQ. 'N': */
+/* ~~~~~~~~~~~~~~~~~ */
+/* If INFO .EQ. 0 */
+/* ~~~~~~~~~~~~~~ */
+/* Note that the left singular vectors are 'for free' in the */
+/* one-sided Jacobi SVD algorithm. However, if only the */
+/* singular values are needed, the level of numerical */
+/* orthogonality of U is not an issue and iterations are */
+/* stopped when the columns of the iterated matrix are */
+/* numerically orthogonal up to approximately M*EPS. Thus, */
+/* on exit, A contains the columns of U scaled with the */
+/* corresponding singular values. */
+/* If INFO .GT. 0, */
+/* ~~~~~~~~~~~~~~~ */
+/* the procedure SGESVJ did not converge in the given number */
+/* of iterations (sweeps). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* SVA (workspace/output) REAL array, dimension (N) */
+/* On exit, */
+/* If INFO .EQ. 0, */
+/* ~~~~~~~~~~~~~~~ */
+/* depending on the value SCALE = WORK(1), we have: */
+/* If SCALE .EQ. ONE: */
+/* ~~~~~~~~~~~~~~~~~~ */
+/* SVA(1:N) contains the computed singular values of A. */
+/* During the computation SVA contains the Euclidean column */
+/* norms of the iterated matrices in the array A. */
+/* If SCALE .NE. ONE: */
+/* ~~~~~~~~~~~~~~~~~~ */
+/* The singular values of A are SCALE*SVA(1:N), and this */
+/* factored representation is due to the fact that some of the */
+/* singular values of A might underflow or overflow. */
+
+/* If INFO .GT. 0, */
+/* ~~~~~~~~~~~~~~~ */
+/* the procedure SGESVJ did not converge in the given number of */
+/* iterations (sweeps) and SCALE*SVA(1:N) may not be accurate. */
+
+/* MV (input) INTEGER */
+/* If JOBV .EQ. 'A', then the product of Jacobi rotations in SGESVJ */
+/* is applied to the first MV rows of V. See the description of JOBV. */
+
+/* V (input/output) REAL array, dimension (LDV,N) */
+/* If JOBV = 'V', then V contains on exit the N-by-N matrix of */
+/* the right singular vectors; */
+/* If JOBV = 'A', then V contains the product of the computed right */
+/* singular vector matrix and the initial matrix in */
+/* the array V. */
+/* If JOBV = 'N', then V is not referenced. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V, LDV .GE. 1. */
+/* If JOBV .EQ. 'V', then LDV .GE. max(1,N). */
+/* If JOBV .EQ. 'A', then LDV .GE. max(1,MV) . */
+
+/* WORK (input/workspace/output) REAL array, dimension max(4,M+N). */
+/* On entry, */
+/* If JOBU .EQ. 'C', */
+/* ~~~~~~~~~~~~~~~~~ */
+/* WORK(1) = CTOL, where CTOL defines the threshold for convergence. */
+/* The process stops if all columns of A are mutually */
+/* orthogonal up to CTOL*EPS, EPS=SLAMCH('E'). */
+/* It is required that CTOL >= ONE, i.e. it is not */
+/* allowed to force the routine to obtain orthogonality */
+/* below EPSILON. */
+/* On exit, */
+/* WORK(1) = SCALE is the scaling factor such that SCALE*SVA(1:N) */
+/* are the computed singular vcalues of A. */
+/* (See description of SVA().) */
+/* WORK(2) = NINT(WORK(2)) is the number of the computed nonzero */
+/* singular values. */
+/* WORK(3) = NINT(WORK(3)) is the number of the computed singular */
+/* values that are larger than the underflow threshold. */
+/* WORK(4) = NINT(WORK(4)) is the number of sweeps of Jacobi */
+/* rotations needed for numerical convergence. */
+/* WORK(5) = max_{i.NE.j} |COS(A(:,i),A(:,j))| in the last sweep. */
+/* This is useful information in cases when SGESVJ did */
+/* not converge, as it can be used to estimate whether */
+/* the output is stil useful and for post festum analysis. */
+/* WORK(6) = the largest absolute value over all sines of the */
+/* Jacobi rotation angles in the last sweep. It can be */
+/* useful for a post festum analysis. */
+
+/* LWORK length of WORK, WORK >= MAX(6,M+N) */
+
+/* INFO (output) INTEGER */
+/* = 0 : successful exit. */
+/* < 0 : if INFO = -i, then the i-th argument had an illegal value */
+/* > 0 : SGESVJ did not converge in the maximal allowed number (30) */
+/* of sweeps. The output may still be useful. See the */
+/* description of WORK. */
+
+/* Local Parameters */
+
+
+/* Local Scalars */
+
+
+/* Local Arrays */
+
+
+/* Intrinsic Functions */
+
+
+/* External Functions */
+/* .. from BLAS */
+/* .. from LAPACK */
+
+/* External Subroutines */
+/* .. from BLAS */
+/* .. from LAPACK */
+
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ --sva;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ --work;
+
+ /* Function Body */
+ lsvec = lsame_(jobu, "U");
+ uctol = lsame_(jobu, "C");
+ rsvec = lsame_(jobv, "V");
+ applv = lsame_(jobv, "A");
+ upper = lsame_(joba, "U");
+ lower = lsame_(joba, "L");
+
+ if (! (upper || lower || lsame_(joba, "G"))) {
+ *info = -1;
+ } else if (! (lsvec || uctol || lsame_(jobu, "N")))
+ {
+ *info = -2;
+ } else if (! (rsvec || applv || lsame_(jobv, "N")))
+ {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0 || *n > *m) {
+ *info = -5;
+ } else if (*lda < *m) {
+ *info = -7;
+ } else if (*mv < 0) {
+ *info = -9;
+ } else if (rsvec && *ldv < *n || applv && *ldv < *mv) {
+ *info = -11;
+ } else if (uctol && work[1] <= 1.f) {
+ *info = -12;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = *m + *n;
+ if (*lwork < max(i__1,6)) {
+ *info = -13;
+ } else {
+ *info = 0;
+ }
+ }
+
+/* #:( */
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGESVJ", &i__1);
+ return 0;
+ }
+
+/* #:) Quick return for void matrix */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Set numerical parameters */
+/* The stopping criterion for Jacobi rotations is */
+
+/* max_{i<>j}|A(:,i)^T * A(:,j)|/(||A(:,i)||*||A(:,j)||) < CTOL*EPS */
+
+/* where EPS is the round-off and CTOL is defined as follows: */
+
+ if (uctol) {
+/* ... user controlled */
+ ctol = work[1];
+ } else {
+/* ... default */
+ if (lsvec || rsvec || applv) {
+ ctol = sqrt((real) (*m));
+ } else {
+ ctol = (real) (*m);
+ }
+ }
+/* ... and the machine dependent parameters are */
+/* [!] (Make sure that SLAMCH() works properly on the target machine.) */
+
+ epsilon = slamch_("Epsilon");
+ rooteps = sqrt(epsilon);
+ sfmin = slamch_("SafeMinimum");
+ rootsfmin = sqrt(sfmin);
+ small = sfmin / epsilon;
+ big = slamch_("Overflow");
+ rootbig = 1.f / rootsfmin;
+ large = big / sqrt((real) (*m * *n));
+ bigtheta = 1.f / rooteps;
+
+ tol = ctol * epsilon;
+ roottol = sqrt(tol);
+
+ if ((real) (*m) * epsilon >= 1.f) {
+ *info = -5;
+ i__1 = -(*info);
+ xerbla_("SGESVJ", &i__1);
+ return 0;
+ }
+
+/* Initialize the right singular vector matrix. */
+
+ if (rsvec) {
+ mvl = *n;
+ slaset_("A", &mvl, n, &c_b17, &c_b18, &v[v_offset], ldv);
+ } else if (applv) {
+ mvl = *mv;
+ }
+ rsvec = rsvec || applv;
+
+/* Initialize SVA( 1:N ) = ( ||A e_i||_2, i = 1:N ) */
+/* (!) If necessary, scale A to protect the largest singular value */
+/* from overflow. It is possible that saving the largest singular */
+/* value destroys the information about the small ones. */
+/* This initial scaling is almost minimal in the sense that the */
+/* goal is to make sure that no column norm overflows, and that */
+/* SQRT(N)*max_i SVA(i) does not overflow. If INFinite entries */
+/* in A are detected, the procedure returns with INFO=-6. */
+
+ scale = 1.f / sqrt((real) (*m) * (real) (*n));
+ noscale = TRUE_;
+ goscale = TRUE_;
+
+ if (lower) {
+/* the input matrix is M-by-N lower triangular (trapezoidal) */
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ aapp = 0.f;
+ aaqq = 0.f;
+ i__2 = *m - p + 1;
+ slassq_(&i__2, &a[p + p * a_dim1], &c__1, &aapp, &aaqq);
+ if (aapp > big) {
+ *info = -6;
+ i__2 = -(*info);
+ xerbla_("SGESVJ", &i__2);
+ return 0;
+ }
+ aaqq = sqrt(aaqq);
+ if (aapp < big / aaqq && noscale) {
+ sva[p] = aapp * aaqq;
+ } else {
+ noscale = FALSE_;
+ sva[p] = aapp * (aaqq * scale);
+ if (goscale) {
+ goscale = FALSE_;
+ i__2 = p - 1;
+ for (q = 1; q <= i__2; ++q) {
+ sva[q] *= scale;
+/* L1873: */
+ }
+ }
+ }
+/* L1874: */
+ }
+ } else if (upper) {
+/* the input matrix is M-by-N upper triangular (trapezoidal) */
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ aapp = 0.f;
+ aaqq = 0.f;
+ slassq_(&p, &a[p * a_dim1 + 1], &c__1, &aapp, &aaqq);
+ if (aapp > big) {
+ *info = -6;
+ i__2 = -(*info);
+ xerbla_("SGESVJ", &i__2);
+ return 0;
+ }
+ aaqq = sqrt(aaqq);
+ if (aapp < big / aaqq && noscale) {
+ sva[p] = aapp * aaqq;
+ } else {
+ noscale = FALSE_;
+ sva[p] = aapp * (aaqq * scale);
+ if (goscale) {
+ goscale = FALSE_;
+ i__2 = p - 1;
+ for (q = 1; q <= i__2; ++q) {
+ sva[q] *= scale;
+/* L2873: */
+ }
+ }
+ }
+/* L2874: */
+ }
+ } else {
+/* the input matrix is M-by-N general dense */
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ aapp = 0.f;
+ aaqq = 0.f;
+ slassq_(m, &a[p * a_dim1 + 1], &c__1, &aapp, &aaqq);
+ if (aapp > big) {
+ *info = -6;
+ i__2 = -(*info);
+ xerbla_("SGESVJ", &i__2);
+ return 0;
+ }
+ aaqq = sqrt(aaqq);
+ if (aapp < big / aaqq && noscale) {
+ sva[p] = aapp * aaqq;
+ } else {
+ noscale = FALSE_;
+ sva[p] = aapp * (aaqq * scale);
+ if (goscale) {
+ goscale = FALSE_;
+ i__2 = p - 1;
+ for (q = 1; q <= i__2; ++q) {
+ sva[q] *= scale;
+/* L3873: */
+ }
+ }
+ }
+/* L3874: */
+ }
+ }
+
+ if (noscale) {
+ scale = 1.f;
+ }
+
+/* Move the smaller part of the spectrum from the underflow threshold */
+/* (!) Start by determining the position of the nonzero entries of the */
+/* array SVA() relative to ( SFMIN, BIG ). */
+
+ aapp = 0.f;
+ aaqq = big;
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ if (sva[p] != 0.f) {
+/* Computing MIN */
+ r__1 = aaqq, r__2 = sva[p];
+ aaqq = dmin(r__1,r__2);
+ }
+/* Computing MAX */
+ r__1 = aapp, r__2 = sva[p];
+ aapp = dmax(r__1,r__2);
+/* L4781: */
+ }
+
+/* #:) Quick return for zero matrix */
+
+ if (aapp == 0.f) {
+ if (lsvec) {
+ slaset_("G", m, n, &c_b17, &c_b18, &a[a_offset], lda);
+ }
+ work[1] = 1.f;
+ work[2] = 0.f;
+ work[3] = 0.f;
+ work[4] = 0.f;
+ work[5] = 0.f;
+ work[6] = 0.f;
+ return 0;
+ }
+
+/* #:) Quick return for one-column matrix */
+
+ if (*n == 1) {
+ if (lsvec) {
+ slascl_("G", &c__0, &c__0, &sva[1], &scale, m, &c__1, &a[a_dim1 +
+ 1], lda, &ierr);
+ }
+ work[1] = 1.f / scale;
+ if (sva[1] >= sfmin) {
+ work[2] = 1.f;
+ } else {
+ work[2] = 0.f;
+ }
+ work[3] = 0.f;
+ work[4] = 0.f;
+ work[5] = 0.f;
+ work[6] = 0.f;
+ return 0;
+ }
+
+/* Protect small singular values from underflow, and try to */
+/* avoid underflows/overflows in computing Jacobi rotations. */
+
+ sn = sqrt(sfmin / epsilon);
+ temp1 = sqrt(big / (real) (*n));
+ if (aapp <= sn || aaqq >= temp1 || sn <= aaqq && aapp <= temp1) {
+/* Computing MIN */
+ r__1 = big, r__2 = temp1 / aapp;
+ temp1 = dmin(r__1,r__2);
+/* AAQQ = AAQQ*TEMP1 */
+/* AAPP = AAPP*TEMP1 */
+ } else if (aaqq <= sn && aapp <= temp1) {
+/* Computing MIN */
+ r__1 = sn / aaqq, r__2 = big / (aapp * sqrt((real) (*n)));
+ temp1 = dmin(r__1,r__2);
+/* AAQQ = AAQQ*TEMP1 */
+/* AAPP = AAPP*TEMP1 */
+ } else if (aaqq >= sn && aapp >= temp1) {
+/* Computing MAX */
+ r__1 = sn / aaqq, r__2 = temp1 / aapp;
+ temp1 = dmax(r__1,r__2);
+/* AAQQ = AAQQ*TEMP1 */
+/* AAPP = AAPP*TEMP1 */
+ } else if (aaqq <= sn && aapp >= temp1) {
+/* Computing MIN */
+ r__1 = sn / aaqq, r__2 = big / (sqrt((real) (*n)) * aapp);
+ temp1 = dmin(r__1,r__2);
+/* AAQQ = AAQQ*TEMP1 */
+/* AAPP = AAPP*TEMP1 */
+ } else {
+ temp1 = 1.f;
+ }
+
+/* Scale, if necessary */
+
+ if (temp1 != 1.f) {
+ slascl_("G", &c__0, &c__0, &c_b18, &temp1, n, &c__1, &sva[1], n, &
+ ierr);
+ }
+ scale = temp1 * scale;
+ if (scale != 1.f) {
+ slascl_(joba, &c__0, &c__0, &c_b18, &scale, m, n, &a[a_offset], lda, &
+ ierr);
+ scale = 1.f / scale;
+ }
+
+/* Row-cyclic Jacobi SVD algorithm with column pivoting */
+
+ emptsw = *n * (*n - 1) / 2;
+ notrot = 0;
+ fastr[0] = 0.f;
+
+/* A is represented in factored form A = A * diag(WORK), where diag(WORK) */
+/* is initialized to identity. WORK is updated during fast scaled */
+/* rotations. */
+
+ i__1 = *n;
+ for (q = 1; q <= i__1; ++q) {
+ work[q] = 1.f;
+/* L1868: */
+ }
+
+
+ swband = 3;
+/* [TP] SWBAND is a tuning parameter [TP]. It is meaningful and effective */
+/* if SGESVJ is used as a computational routine in the preconditioned */
+/* Jacobi SVD algorithm SGESVJ. For sweeps i=1:SWBAND the procedure */
+/* works on pivots inside a band-like region around the diagonal. */
+/* The boundaries are determined dynamically, based on the number of */
+/* pivots above a threshold. */
+
+ kbl = min(8,*n);
+/* [TP] KBL is a tuning parameter that defines the tile size in the */
+/* tiling of the p-q loops of pivot pairs. In general, an optimal */
+/* value of KBL depends on the matrix dimensions and on the */
+/* parameters of the computer's memory. */
+
+ nbl = *n / kbl;
+ if (nbl * kbl != *n) {
+ ++nbl;
+ }
+
+/* Computing 2nd power */
+ i__1 = kbl;
+ blskip = i__1 * i__1;
+/* [TP] BLKSKIP is a tuning parameter that depends on SWBAND and KBL. */
+
+ rowskip = min(5,kbl);
+/* [TP] ROWSKIP is a tuning parameter. */
+
+ lkahead = 1;
+/* [TP] LKAHEAD is a tuning parameter. */
+
+/* Quasi block transformations, using the lower (upper) triangular */
+/* structure of the input matrix. The quasi-block-cycling usually */
+/* invokes cubic convergence. Big part of this cycle is done inside */
+/* canonical subspaces of dimensions less than M. */
+
+/* Computing MAX */
+ i__1 = 64, i__2 = kbl << 2;
+ if ((lower || upper) && *n > max(i__1,i__2)) {
+/* [TP] The number of partition levels and the actual partition are */
+/* tuning parameters. */
+ n4 = *n / 4;
+ n2 = *n / 2;
+ n34 = n4 * 3;
+ if (applv) {
+ q = 0;
+ } else {
+ q = 1;
+ }
+
+ if (lower) {
+
+/* This works very well on lower triangular matrices, in particular */
+/* in the framework of the preconditioned Jacobi SVD (xGEJSV). */
+/* The idea is simple: */
+/* [+ 0 0 0] Note that Jacobi transformations of [0 0] */
+/* [+ + 0 0] [0 0] */
+/* [+ + x 0] actually work on [x 0] [x 0] */
+/* [+ + x x] [x x]. [x x] */
+
+ i__1 = *m - n34;
+ i__2 = *n - n34;
+ i__3 = *lwork - *n;
+ sgsvj0_(jobv, &i__1, &i__2, &a[n34 + 1 + (n34 + 1) * a_dim1], lda,
+ &work[n34 + 1], &sva[n34 + 1], &mvl, &v[n34 * q + 1 + (
+ n34 + 1) * v_dim1], ldv, &epsilon, &sfmin, &tol, &c__2, &
+ work[*n + 1], &i__3, &ierr);
+
+ i__1 = *m - n2;
+ i__2 = n34 - n2;
+ i__3 = *lwork - *n;
+ sgsvj0_(jobv, &i__1, &i__2, &a[n2 + 1 + (n2 + 1) * a_dim1], lda, &
+ work[n2 + 1], &sva[n2 + 1], &mvl, &v[n2 * q + 1 + (n2 + 1)
+ * v_dim1], ldv, &epsilon, &sfmin, &tol, &c__2, &work[*n
+ + 1], &i__3, &ierr);
+
+ i__1 = *m - n2;
+ i__2 = *n - n2;
+ i__3 = *lwork - *n;
+ sgsvj1_(jobv, &i__1, &i__2, &n4, &a[n2 + 1 + (n2 + 1) * a_dim1],
+ lda, &work[n2 + 1], &sva[n2 + 1], &mvl, &v[n2 * q + 1 + (
+ n2 + 1) * v_dim1], ldv, &epsilon, &sfmin, &tol, &c__1, &
+ work[*n + 1], &i__3, &ierr);
+
+ i__1 = *m - n4;
+ i__2 = n2 - n4;
+ i__3 = *lwork - *n;
+ sgsvj0_(jobv, &i__1, &i__2, &a[n4 + 1 + (n4 + 1) * a_dim1], lda, &
+ work[n4 + 1], &sva[n4 + 1], &mvl, &v[n4 * q + 1 + (n4 + 1)
+ * v_dim1], ldv, &epsilon, &sfmin, &tol, &c__1, &work[*n
+ + 1], &i__3, &ierr);
+
+ i__1 = *lwork - *n;
+ sgsvj0_(jobv, m, &n4, &a[a_offset], lda, &work[1], &sva[1], &mvl,
+ &v[v_offset], ldv, &epsilon, &sfmin, &tol, &c__1, &work[*
+ n + 1], &i__1, &ierr);
+
+ i__1 = *lwork - *n;
+ sgsvj1_(jobv, m, &n2, &n4, &a[a_offset], lda, &work[1], &sva[1], &
+ mvl, &v[v_offset], ldv, &epsilon, &sfmin, &tol, &c__1, &
+ work[*n + 1], &i__1, &ierr);
+
+
+ } else if (upper) {
+
+
+ i__1 = *lwork - *n;
+ sgsvj0_(jobv, &n4, &n4, &a[a_offset], lda, &work[1], &sva[1], &
+ mvl, &v[v_offset], ldv, &epsilon, &sfmin, &tol, &c__2, &
+ work[*n + 1], &i__1, &ierr);
+
+ i__1 = *lwork - *n;
+ sgsvj0_(jobv, &n2, &n4, &a[(n4 + 1) * a_dim1 + 1], lda, &work[n4
+ + 1], &sva[n4 + 1], &mvl, &v[n4 * q + 1 + (n4 + 1) *
+ v_dim1], ldv, &epsilon, &sfmin, &tol, &c__1, &work[*n + 1]
+, &i__1, &ierr);
+
+ i__1 = *lwork - *n;
+ sgsvj1_(jobv, &n2, &n2, &n4, &a[a_offset], lda, &work[1], &sva[1],
+ &mvl, &v[v_offset], ldv, &epsilon, &sfmin, &tol, &c__1, &
+ work[*n + 1], &i__1, &ierr);
+
+ i__1 = n2 + n4;
+ i__2 = *lwork - *n;
+ sgsvj0_(jobv, &i__1, &n4, &a[(n2 + 1) * a_dim1 + 1], lda, &work[
+ n2 + 1], &sva[n2 + 1], &mvl, &v[n2 * q + 1 + (n2 + 1) *
+ v_dim1], ldv, &epsilon, &sfmin, &tol, &c__1, &work[*n + 1]
+, &i__2, &ierr);
+ }
+
+ }
+
+/* -#- Row-cyclic pivot strategy with de Rijk's pivoting -#- */
+
+ for (i__ = 1; i__ <= 30; ++i__) {
+/* .. go go go ... */
+
+ mxaapq = 0.f;
+ mxsinj = 0.f;
+ iswrot = 0;
+
+ notrot = 0;
+ pskipped = 0;
+
+/* Each sweep is unrolled using KBL-by-KBL tiles over the pivot pairs */
+/* 1 <= p < q <= N. This is the first step toward a blocked implementation */
+/* of the rotations. New implementation, based on block transformations, */
+/* is under development. */
+
+ i__1 = nbl;
+ for (ibr = 1; ibr <= i__1; ++ibr) {
+
+ igl = (ibr - 1) * kbl + 1;
+
+/* Computing MIN */
+ i__3 = lkahead, i__4 = nbl - ibr;
+ i__2 = min(i__3,i__4);
+ for (ir1 = 0; ir1 <= i__2; ++ir1) {
+
+ igl += ir1 * kbl;
+
+/* Computing MIN */
+ i__4 = igl + kbl - 1, i__5 = *n - 1;
+ i__3 = min(i__4,i__5);
+ for (p = igl; p <= i__3; ++p) {
+
+/* .. de Rijk's pivoting */
+
+ i__4 = *n - p + 1;
+ q = isamax_(&i__4, &sva[p], &c__1) + p - 1;
+ if (p != q) {
+ sswap_(m, &a[p * a_dim1 + 1], &c__1, &a[q * a_dim1 +
+ 1], &c__1);
+ if (rsvec) {
+ sswap_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[q *
+ v_dim1 + 1], &c__1);
+ }
+ temp1 = sva[p];
+ sva[p] = sva[q];
+ sva[q] = temp1;
+ temp1 = work[p];
+ work[p] = work[q];
+ work[q] = temp1;
+ }
+
+ if (ir1 == 0) {
+
+/* Column norms are periodically updated by explicit */
+/* norm computation. */
+/* Caveat: */
+/* Unfortunately, some BLAS implementations compute SNRM2(M,A(1,p),1) */
+/* as SQRT(SDOT(M,A(1,p),1,A(1,p),1)), which may cause the result to */
+/* overflow for ||A(:,p)||_2 > SQRT(overflow_threshold), and to */
+/* underflow for ||A(:,p)||_2 < SQRT(underflow_threshold). */
+/* Hence, SNRM2 cannot be trusted, not even in the case when */
+/* the true norm is far from the under(over)flow boundaries. */
+/* If properly implemented SNRM2 is available, the IF-THEN-ELSE */
+/* below should read "AAPP = SNRM2( M, A(1,p), 1 ) * WORK(p)". */
+
+ if (sva[p] < rootbig && sva[p] > rootsfmin) {
+ sva[p] = snrm2_(m, &a[p * a_dim1 + 1], &c__1) *
+ work[p];
+ } else {
+ temp1 = 0.f;
+ aapp = 0.f;
+ slassq_(m, &a[p * a_dim1 + 1], &c__1, &temp1, &
+ aapp);
+ sva[p] = temp1 * sqrt(aapp) * work[p];
+ }
+ aapp = sva[p];
+ } else {
+ aapp = sva[p];
+ }
+
+ if (aapp > 0.f) {
+
+ pskipped = 0;
+
+/* Computing MIN */
+ i__5 = igl + kbl - 1;
+ i__4 = min(i__5,*n);
+ for (q = p + 1; q <= i__4; ++q) {
+
+ aaqq = sva[q];
+
+ if (aaqq > 0.f) {
+
+ aapp0 = aapp;
+ if (aaqq >= 1.f) {
+ rotok = small * aapp <= aaqq;
+ if (aapp < big / aaqq) {
+ aapq = sdot_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1], &
+ c__1) * work[p] * work[q] /
+ aaqq / aapp;
+ } else {
+ scopy_(m, &a[p * a_dim1 + 1], &c__1, &
+ work[*n + 1], &c__1);
+ slascl_("G", &c__0, &c__0, &aapp, &
+ work[p], m, &c__1, &work[*n +
+ 1], lda, &ierr);
+ aapq = sdot_(m, &work[*n + 1], &c__1,
+ &a[q * a_dim1 + 1], &c__1) *
+ work[q] / aaqq;
+ }
+ } else {
+ rotok = aapp <= aaqq / small;
+ if (aapp > small / aaqq) {
+ aapq = sdot_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1], &
+ c__1) * work[p] * work[q] /
+ aaqq / aapp;
+ } else {
+ scopy_(m, &a[q * a_dim1 + 1], &c__1, &
+ work[*n + 1], &c__1);
+ slascl_("G", &c__0, &c__0, &aaqq, &
+ work[q], m, &c__1, &work[*n +
+ 1], lda, &ierr);
+ aapq = sdot_(m, &work[*n + 1], &c__1,
+ &a[p * a_dim1 + 1], &c__1) *
+ work[p] / aapp;
+ }
+ }
+
+/* Computing MAX */
+ r__1 = mxaapq, r__2 = dabs(aapq);
+ mxaapq = dmax(r__1,r__2);
+
+/* TO rotate or NOT to rotate, THAT is the question ... */
+
+ if (dabs(aapq) > tol) {
+
+/* .. rotate */
+/* [RTD] ROTATED = ROTATED + ONE */
+
+ if (ir1 == 0) {
+ notrot = 0;
+ pskipped = 0;
+ ++iswrot;
+ }
+
+ if (rotok) {
+
+ aqoap = aaqq / aapp;
+ apoaq = aapp / aaqq;
+ theta = (r__1 = aqoap - apoaq, dabs(
+ r__1)) * -.5f / aapq;
+
+ if (dabs(theta) > bigtheta) {
+
+ t = .5f / theta;
+ fastr[2] = t * work[p] / work[q];
+ fastr[3] = -t * work[q] / work[p];
+ srotm_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1],
+ &c__1, fastr);
+ if (rsvec) {
+ srotm_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[q *
+ v_dim1 + 1], &c__1, fastr);
+ }
+/* Computing MAX */
+ r__1 = 0.f, r__2 = t * apoaq *
+ aapq + 1.f;
+ sva[q] = aaqq * sqrt((dmax(r__1,
+ r__2)));
+ aapp *= sqrt(1.f - t * aqoap *
+ aapq);
+/* Computing MAX */
+ r__1 = mxsinj, r__2 = dabs(t);
+ mxsinj = dmax(r__1,r__2);
+
+ } else {
+
+/* .. choose correct signum for THETA and rotate */
+
+ thsign = -r_sign(&c_b18, &aapq);
+ t = 1.f / (theta + thsign * sqrt(
+ theta * theta + 1.f));
+ cs = sqrt(1.f / (t * t + 1.f));
+ sn = t * cs;
+
+/* Computing MAX */
+ r__1 = mxsinj, r__2 = dabs(sn);
+ mxsinj = dmax(r__1,r__2);
+/* Computing MAX */
+ r__1 = 0.f, r__2 = t * apoaq *
+ aapq + 1.f;
+ sva[q] = aaqq * sqrt((dmax(r__1,
+ r__2)));
+/* Computing MAX */
+ r__1 = 0.f, r__2 = 1.f - t *
+ aqoap * aapq;
+ aapp *= sqrt((dmax(r__1,r__2)));
+
+ apoaq = work[p] / work[q];
+ aqoap = work[q] / work[p];
+ if (work[p] >= 1.f) {
+ if (work[q] >= 1.f) {
+ fastr[2] = t * apoaq;
+ fastr[3] = -t * aqoap;
+ work[p] *= cs;
+ work[q] *= cs;
+ srotm_(m, &a[p * a_dim1 + 1], &c__1, &a[q *
+ a_dim1 + 1], &c__1, fastr);
+ if (rsvec) {
+ srotm_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[
+ q * v_dim1 + 1], &c__1, fastr);
+ }
+ } else {
+ r__1 = -t * aqoap;
+ saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1, &a[
+ p * a_dim1 + 1], &c__1);
+ r__1 = cs * sn * apoaq;
+ saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1, &a[
+ q * a_dim1 + 1], &c__1);
+ work[p] *= cs;
+ work[q] /= cs;
+ if (rsvec) {
+ r__1 = -t * aqoap;
+ saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1], &
+ c__1, &v[p * v_dim1 + 1], &c__1);
+ r__1 = cs * sn * apoaq;
+ saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1], &
+ c__1, &v[q * v_dim1 + 1], &c__1);
+ }
+ }
+ } else {
+ if (work[q] >= 1.f) {
+ r__1 = t * apoaq;
+ saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1, &a[
+ q * a_dim1 + 1], &c__1);
+ r__1 = -cs * sn * aqoap;
+ saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1, &a[
+ p * a_dim1 + 1], &c__1);
+ work[p] /= cs;
+ work[q] *= cs;
+ if (rsvec) {
+ r__1 = t * apoaq;
+ saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1], &
+ c__1, &v[q * v_dim1 + 1], &c__1);
+ r__1 = -cs * sn * aqoap;
+ saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1], &
+ c__1, &v[p * v_dim1 + 1], &c__1);
+ }
+ } else {
+ if (work[p] >= work[q]) {
+ r__1 = -t * aqoap;
+ saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1,
+ &a[p * a_dim1 + 1], &c__1);
+ r__1 = cs * sn * apoaq;
+ saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1,
+ &a[q * a_dim1 + 1], &c__1);
+ work[p] *= cs;
+ work[q] /= cs;
+ if (rsvec) {
+ r__1 = -t * aqoap;
+ saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1],
+ &c__1, &v[p * v_dim1 + 1], &
+ c__1);
+ r__1 = cs * sn * apoaq;
+ saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1],
+ &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ }
+ } else {
+ r__1 = t * apoaq;
+ saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1,
+ &a[q * a_dim1 + 1], &c__1);
+ r__1 = -cs * sn * aqoap;
+ saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1,
+ &a[p * a_dim1 + 1], &c__1);
+ work[p] /= cs;
+ work[q] *= cs;
+ if (rsvec) {
+ r__1 = t * apoaq;
+ saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1],
+ &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ r__1 = -cs * sn * aqoap;
+ saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1],
+ &c__1, &v[p * v_dim1 + 1], &
+ c__1);
+ }
+ }
+ }
+ }
+ }
+
+ } else {
+/* .. have to use modified Gram-Schmidt like transformation */
+ scopy_(m, &a[p * a_dim1 + 1], &c__1, &
+ work[*n + 1], &c__1);
+ slascl_("G", &c__0, &c__0, &aapp, &
+ c_b18, m, &c__1, &work[*n + 1]
+, lda, &ierr);
+ slascl_("G", &c__0, &c__0, &aaqq, &
+ c_b18, m, &c__1, &a[q *
+ a_dim1 + 1], lda, &ierr);
+ temp1 = -aapq * work[p] / work[q];
+ saxpy_(m, &temp1, &work[*n + 1], &
+ c__1, &a[q * a_dim1 + 1], &
+ c__1);
+ slascl_("G", &c__0, &c__0, &c_b18, &
+ aaqq, m, &c__1, &a[q * a_dim1
+ + 1], lda, &ierr);
+/* Computing MAX */
+ r__1 = 0.f, r__2 = 1.f - aapq * aapq;
+ sva[q] = aaqq * sqrt((dmax(r__1,r__2))
+ );
+ mxsinj = dmax(mxsinj,sfmin);
+ }
+/* END IF ROTOK THEN ... ELSE */
+
+/* In the case of cancellation in updating SVA(q), SVA(p) */
+/* recompute SVA(q), SVA(p). */
+
+/* Computing 2nd power */
+ r__1 = sva[q] / aaqq;
+ if (r__1 * r__1 <= rooteps) {
+ if (aaqq < rootbig && aaqq >
+ rootsfmin) {
+ sva[q] = snrm2_(m, &a[q * a_dim1
+ + 1], &c__1) * work[q];
+ } else {
+ t = 0.f;
+ aaqq = 0.f;
+ slassq_(m, &a[q * a_dim1 + 1], &
+ c__1, &t, &aaqq);
+ sva[q] = t * sqrt(aaqq) * work[q];
+ }
+ }
+ if (aapp / aapp0 <= rooteps) {
+ if (aapp < rootbig && aapp >
+ rootsfmin) {
+ aapp = snrm2_(m, &a[p * a_dim1 +
+ 1], &c__1) * work[p];
+ } else {
+ t = 0.f;
+ aapp = 0.f;
+ slassq_(m, &a[p * a_dim1 + 1], &
+ c__1, &t, &aapp);
+ aapp = t * sqrt(aapp) * work[p];
+ }
+ sva[p] = aapp;
+ }
+
+ } else {
+/* A(:,p) and A(:,q) already numerically orthogonal */
+ if (ir1 == 0) {
+ ++notrot;
+ }
+/* [RTD] SKIPPED = SKIPPED + 1 */
+ ++pskipped;
+ }
+ } else {
+/* A(:,q) is zero column */
+ if (ir1 == 0) {
+ ++notrot;
+ }
+ ++pskipped;
+ }
+
+ if (i__ <= swband && pskipped > rowskip) {
+ if (ir1 == 0) {
+ aapp = -aapp;
+ }
+ notrot = 0;
+ goto L2103;
+ }
+
+/* L2002: */
+ }
+/* END q-LOOP */
+
+L2103:
+/* bailed out of q-loop */
+
+ sva[p] = aapp;
+
+ } else {
+ sva[p] = aapp;
+ if (ir1 == 0 && aapp == 0.f) {
+/* Computing MIN */
+ i__4 = igl + kbl - 1;
+ notrot = notrot + min(i__4,*n) - p;
+ }
+ }
+
+/* L2001: */
+ }
+/* end of the p-loop */
+/* end of doing the block ( ibr, ibr ) */
+/* L1002: */
+ }
+/* end of ir1-loop */
+
+/* ... go to the off diagonal blocks */
+
+ igl = (ibr - 1) * kbl + 1;
+
+ i__2 = nbl;
+ for (jbc = ibr + 1; jbc <= i__2; ++jbc) {
+
+ jgl = (jbc - 1) * kbl + 1;
+
+/* doing the block at ( ibr, jbc ) */
+
+ ijblsk = 0;
+/* Computing MIN */
+ i__4 = igl + kbl - 1;
+ i__3 = min(i__4,*n);
+ for (p = igl; p <= i__3; ++p) {
+
+ aapp = sva[p];
+ if (aapp > 0.f) {
+
+ pskipped = 0;
+
+/* Computing MIN */
+ i__5 = jgl + kbl - 1;
+ i__4 = min(i__5,*n);
+ for (q = jgl; q <= i__4; ++q) {
+
+ aaqq = sva[q];
+ if (aaqq > 0.f) {
+ aapp0 = aapp;
+
+/* -#- M x 2 Jacobi SVD -#- */
+
+/* Safe Gram matrix computation */
+
+ if (aaqq >= 1.f) {
+ if (aapp >= aaqq) {
+ rotok = small * aapp <= aaqq;
+ } else {
+ rotok = small * aaqq <= aapp;
+ }
+ if (aapp < big / aaqq) {
+ aapq = sdot_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1], &
+ c__1) * work[p] * work[q] /
+ aaqq / aapp;
+ } else {
+ scopy_(m, &a[p * a_dim1 + 1], &c__1, &
+ work[*n + 1], &c__1);
+ slascl_("G", &c__0, &c__0, &aapp, &
+ work[p], m, &c__1, &work[*n +
+ 1], lda, &ierr);
+ aapq = sdot_(m, &work[*n + 1], &c__1,
+ &a[q * a_dim1 + 1], &c__1) *
+ work[q] / aaqq;
+ }
+ } else {
+ if (aapp >= aaqq) {
+ rotok = aapp <= aaqq / small;
+ } else {
+ rotok = aaqq <= aapp / small;
+ }
+ if (aapp > small / aaqq) {
+ aapq = sdot_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1], &
+ c__1) * work[p] * work[q] /
+ aaqq / aapp;
+ } else {
+ scopy_(m, &a[q * a_dim1 + 1], &c__1, &
+ work[*n + 1], &c__1);
+ slascl_("G", &c__0, &c__0, &aaqq, &
+ work[q], m, &c__1, &work[*n +
+ 1], lda, &ierr);
+ aapq = sdot_(m, &work[*n + 1], &c__1,
+ &a[p * a_dim1 + 1], &c__1) *
+ work[p] / aapp;
+ }
+ }
+
+/* Computing MAX */
+ r__1 = mxaapq, r__2 = dabs(aapq);
+ mxaapq = dmax(r__1,r__2);
+
+/* TO rotate or NOT to rotate, THAT is the question ... */
+
+ if (dabs(aapq) > tol) {
+ notrot = 0;
+/* [RTD] ROTATED = ROTATED + 1 */
+ pskipped = 0;
+ ++iswrot;
+
+ if (rotok) {
+
+ aqoap = aaqq / aapp;
+ apoaq = aapp / aaqq;
+ theta = (r__1 = aqoap - apoaq, dabs(
+ r__1)) * -.5f / aapq;
+ if (aaqq > aapp0) {
+ theta = -theta;
+ }
+
+ if (dabs(theta) > bigtheta) {
+ t = .5f / theta;
+ fastr[2] = t * work[p] / work[q];
+ fastr[3] = -t * work[q] / work[p];
+ srotm_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1],
+ &c__1, fastr);
+ if (rsvec) {
+ srotm_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[q *
+ v_dim1 + 1], &c__1, fastr);
+ }
+/* Computing MAX */
+ r__1 = 0.f, r__2 = t * apoaq *
+ aapq + 1.f;
+ sva[q] = aaqq * sqrt((dmax(r__1,
+ r__2)));
+/* Computing MAX */
+ r__1 = 0.f, r__2 = 1.f - t *
+ aqoap * aapq;
+ aapp *= sqrt((dmax(r__1,r__2)));
+/* Computing MAX */
+ r__1 = mxsinj, r__2 = dabs(t);
+ mxsinj = dmax(r__1,r__2);
+ } else {
+
+/* .. choose correct signum for THETA and rotate */
+
+ thsign = -r_sign(&c_b18, &aapq);
+ if (aaqq > aapp0) {
+ thsign = -thsign;
+ }
+ t = 1.f / (theta + thsign * sqrt(
+ theta * theta + 1.f));
+ cs = sqrt(1.f / (t * t + 1.f));
+ sn = t * cs;
+/* Computing MAX */
+ r__1 = mxsinj, r__2 = dabs(sn);
+ mxsinj = dmax(r__1,r__2);
+/* Computing MAX */
+ r__1 = 0.f, r__2 = t * apoaq *
+ aapq + 1.f;
+ sva[q] = aaqq * sqrt((dmax(r__1,
+ r__2)));
+ aapp *= sqrt(1.f - t * aqoap *
+ aapq);
+
+ apoaq = work[p] / work[q];
+ aqoap = work[q] / work[p];
+ if (work[p] >= 1.f) {
+
+ if (work[q] >= 1.f) {
+ fastr[2] = t * apoaq;
+ fastr[3] = -t * aqoap;
+ work[p] *= cs;
+ work[q] *= cs;
+ srotm_(m, &a[p * a_dim1 + 1], &c__1, &a[q *
+ a_dim1 + 1], &c__1, fastr);
+ if (rsvec) {
+ srotm_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[
+ q * v_dim1 + 1], &c__1, fastr);
+ }
+ } else {
+ r__1 = -t * aqoap;
+ saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1, &a[
+ p * a_dim1 + 1], &c__1);
+ r__1 = cs * sn * apoaq;
+ saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1, &a[
+ q * a_dim1 + 1], &c__1);
+ if (rsvec) {
+ r__1 = -t * aqoap;
+ saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1], &
+ c__1, &v[p * v_dim1 + 1], &c__1);
+ r__1 = cs * sn * apoaq;
+ saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1], &
+ c__1, &v[q * v_dim1 + 1], &c__1);
+ }
+ work[p] *= cs;
+ work[q] /= cs;
+ }
+ } else {
+ if (work[q] >= 1.f) {
+ r__1 = t * apoaq;
+ saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1, &a[
+ q * a_dim1 + 1], &c__1);
+ r__1 = -cs * sn * aqoap;
+ saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1, &a[
+ p * a_dim1 + 1], &c__1);
+ if (rsvec) {
+ r__1 = t * apoaq;
+ saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1], &
+ c__1, &v[q * v_dim1 + 1], &c__1);
+ r__1 = -cs * sn * aqoap;
+ saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1], &
+ c__1, &v[p * v_dim1 + 1], &c__1);
+ }
+ work[p] /= cs;
+ work[q] *= cs;
+ } else {
+ if (work[p] >= work[q]) {
+ r__1 = -t * aqoap;
+ saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1,
+ &a[p * a_dim1 + 1], &c__1);
+ r__1 = cs * sn * apoaq;
+ saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1,
+ &a[q * a_dim1 + 1], &c__1);
+ work[p] *= cs;
+ work[q] /= cs;
+ if (rsvec) {
+ r__1 = -t * aqoap;
+ saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1],
+ &c__1, &v[p * v_dim1 + 1], &
+ c__1);
+ r__1 = cs * sn * apoaq;
+ saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1],
+ &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ }
+ } else {
+ r__1 = t * apoaq;
+ saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1,
+ &a[q * a_dim1 + 1], &c__1);
+ r__1 = -cs * sn * aqoap;
+ saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1,
+ &a[p * a_dim1 + 1], &c__1);
+ work[p] /= cs;
+ work[q] *= cs;
+ if (rsvec) {
+ r__1 = t * apoaq;
+ saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1],
+ &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ r__1 = -cs * sn * aqoap;
+ saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1],
+ &c__1, &v[p * v_dim1 + 1], &
+ c__1);
+ }
+ }
+ }
+ }
+ }
+
+ } else {
+ if (aapp > aaqq) {
+ scopy_(m, &a[p * a_dim1 + 1], &
+ c__1, &work[*n + 1], &
+ c__1);
+ slascl_("G", &c__0, &c__0, &aapp,
+ &c_b18, m, &c__1, &work[*
+ n + 1], lda, &ierr);
+ slascl_("G", &c__0, &c__0, &aaqq,
+ &c_b18, m, &c__1, &a[q *
+ a_dim1 + 1], lda, &ierr);
+ temp1 = -aapq * work[p] / work[q];
+ saxpy_(m, &temp1, &work[*n + 1], &
+ c__1, &a[q * a_dim1 + 1],
+ &c__1);
+ slascl_("G", &c__0, &c__0, &c_b18,
+ &aaqq, m, &c__1, &a[q *
+ a_dim1 + 1], lda, &ierr);
+/* Computing MAX */
+ r__1 = 0.f, r__2 = 1.f - aapq *
+ aapq;
+ sva[q] = aaqq * sqrt((dmax(r__1,
+ r__2)));
+ mxsinj = dmax(mxsinj,sfmin);
+ } else {
+ scopy_(m, &a[q * a_dim1 + 1], &
+ c__1, &work[*n + 1], &
+ c__1);
+ slascl_("G", &c__0, &c__0, &aaqq,
+ &c_b18, m, &c__1, &work[*
+ n + 1], lda, &ierr);
+ slascl_("G", &c__0, &c__0, &aapp,
+ &c_b18, m, &c__1, &a[p *
+ a_dim1 + 1], lda, &ierr);
+ temp1 = -aapq * work[q] / work[p];
+ saxpy_(m, &temp1, &work[*n + 1], &
+ c__1, &a[p * a_dim1 + 1],
+ &c__1);
+ slascl_("G", &c__0, &c__0, &c_b18,
+ &aapp, m, &c__1, &a[p *
+ a_dim1 + 1], lda, &ierr);
+/* Computing MAX */
+ r__1 = 0.f, r__2 = 1.f - aapq *
+ aapq;
+ sva[p] = aapp * sqrt((dmax(r__1,
+ r__2)));
+ mxsinj = dmax(mxsinj,sfmin);
+ }
+ }
+/* END IF ROTOK THEN ... ELSE */
+
+/* In the case of cancellation in updating SVA(q) */
+/* .. recompute SVA(q) */
+/* Computing 2nd power */
+ r__1 = sva[q] / aaqq;
+ if (r__1 * r__1 <= rooteps) {
+ if (aaqq < rootbig && aaqq >
+ rootsfmin) {
+ sva[q] = snrm2_(m, &a[q * a_dim1
+ + 1], &c__1) * work[q];
+ } else {
+ t = 0.f;
+ aaqq = 0.f;
+ slassq_(m, &a[q * a_dim1 + 1], &
+ c__1, &t, &aaqq);
+ sva[q] = t * sqrt(aaqq) * work[q];
+ }
+ }
+/* Computing 2nd power */
+ r__1 = aapp / aapp0;
+ if (r__1 * r__1 <= rooteps) {
+ if (aapp < rootbig && aapp >
+ rootsfmin) {
+ aapp = snrm2_(m, &a[p * a_dim1 +
+ 1], &c__1) * work[p];
+ } else {
+ t = 0.f;
+ aapp = 0.f;
+ slassq_(m, &a[p * a_dim1 + 1], &
+ c__1, &t, &aapp);
+ aapp = t * sqrt(aapp) * work[p];
+ }
+ sva[p] = aapp;
+ }
+/* end of OK rotation */
+ } else {
+ ++notrot;
+/* [RTD] SKIPPED = SKIPPED + 1 */
+ ++pskipped;
+ ++ijblsk;
+ }
+ } else {
+ ++notrot;
+ ++pskipped;
+ ++ijblsk;
+ }
+
+ if (i__ <= swband && ijblsk >= blskip) {
+ sva[p] = aapp;
+ notrot = 0;
+ goto L2011;
+ }
+ if (i__ <= swband && pskipped > rowskip) {
+ aapp = -aapp;
+ notrot = 0;
+ goto L2203;
+ }
+
+/* L2200: */
+ }
+/* end of the q-loop */
+L2203:
+
+ sva[p] = aapp;
+
+ } else {
+
+ if (aapp == 0.f) {
+/* Computing MIN */
+ i__4 = jgl + kbl - 1;
+ notrot = notrot + min(i__4,*n) - jgl + 1;
+ }
+ if (aapp < 0.f) {
+ notrot = 0;
+ }
+
+ }
+
+/* L2100: */
+ }
+/* end of the p-loop */
+/* L2010: */
+ }
+/* end of the jbc-loop */
+L2011:
+/* 2011 bailed out of the jbc-loop */
+/* Computing MIN */
+ i__3 = igl + kbl - 1;
+ i__2 = min(i__3,*n);
+ for (p = igl; p <= i__2; ++p) {
+ sva[p] = (r__1 = sva[p], dabs(r__1));
+/* L2012: */
+ }
+/* ** */
+/* L2000: */
+ }
+/* 2000 :: end of the ibr-loop */
+
+/* .. update SVA(N) */
+ if (sva[*n] < rootbig && sva[*n] > rootsfmin) {
+ sva[*n] = snrm2_(m, &a[*n * a_dim1 + 1], &c__1) * work[*n];
+ } else {
+ t = 0.f;
+ aapp = 0.f;
+ slassq_(m, &a[*n * a_dim1 + 1], &c__1, &t, &aapp);
+ sva[*n] = t * sqrt(aapp) * work[*n];
+ }
+
+/* Additional steering devices */
+
+ if (i__ < swband && (mxaapq <= roottol || iswrot <= *n)) {
+ swband = i__;
+ }
+
+ if (i__ > swband + 1 && mxaapq < sqrt((real) (*n)) * tol && (real) (*
+ n) * mxaapq * mxsinj < tol) {
+ goto L1994;
+ }
+
+ if (notrot >= emptsw) {
+ goto L1994;
+ }
+
+/* L1993: */
+ }
+/* end i=1:NSWEEP loop */
+
+/* #:( Reaching this point means that the procedure has not converged. */
+ *info = 29;
+ goto L1995;
+
+L1994:
+/* #:) Reaching this point means numerical convergence after the i-th */
+/* sweep. */
+
+ *info = 0;
+/* #:) INFO = 0 confirms successful iterations. */
+L1995:
+
+/* Sort the singular values and find how many are above */
+/* the underflow threshold. */
+
+ n2 = 0;
+ n4 = 0;
+ i__1 = *n - 1;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = *n - p + 1;
+ q = isamax_(&i__2, &sva[p], &c__1) + p - 1;
+ if (p != q) {
+ temp1 = sva[p];
+ sva[p] = sva[q];
+ sva[q] = temp1;
+ temp1 = work[p];
+ work[p] = work[q];
+ work[q] = temp1;
+ sswap_(m, &a[p * a_dim1 + 1], &c__1, &a[q * a_dim1 + 1], &c__1);
+ if (rsvec) {
+ sswap_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ }
+ }
+ if (sva[p] != 0.f) {
+ ++n4;
+ if (sva[p] * scale > sfmin) {
+ ++n2;
+ }
+ }
+/* L5991: */
+ }
+ if (sva[*n] != 0.f) {
+ ++n4;
+ if (sva[*n] * scale > sfmin) {
+ ++n2;
+ }
+ }
+
+/* Normalize the left singular vectors. */
+
+ if (lsvec || uctol) {
+ i__1 = n2;
+ for (p = 1; p <= i__1; ++p) {
+ r__1 = work[p] / sva[p];
+ sscal_(m, &r__1, &a[p * a_dim1 + 1], &c__1);
+/* L1998: */
+ }
+ }
+
+/* Scale the product of Jacobi rotations (assemble the fast rotations). */
+
+ if (rsvec) {
+ if (applv) {
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ sscal_(&mvl, &work[p], &v[p * v_dim1 + 1], &c__1);
+/* L2398: */
+ }
+ } else {
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ temp1 = 1.f / snrm2_(&mvl, &v[p * v_dim1 + 1], &c__1);
+ sscal_(&mvl, &temp1, &v[p * v_dim1 + 1], &c__1);
+/* L2399: */
+ }
+ }
+ }
+
+/* Undo scaling, if necessary (and possible). */
+ if (scale > 1.f && sva[1] < big / scale || scale < 1.f && sva[n2] > sfmin
+ / scale) {
+ i__1 = *n;
+ for (p = 1; p <= i__1; ++p) {
+ sva[p] = scale * sva[p];
+/* L2400: */
+ }
+ scale = 1.f;
+ }
+
+ work[1] = scale;
+/* The singular values of A are SCALE*SVA(1:N). If SCALE.NE.ONE */
+/* then some of the singular values may overflow or underflow and */
+/* the spectrum is given in this factored representation. */
+
+ work[2] = (real) n4;
+/* N4 is the number of computed nonzero singular values of A. */
+
+ work[3] = (real) n2;
+/* N2 is the number of singular values of A greater than SFMIN. */
+/* If N2<N, SVA(N2:N) contains ZEROS and/or denormalized numbers */
+/* that may carry some information. */
+
+ work[4] = (real) i__;
+/* i is the index of the last sweep before declaring convergence. */
+
+ work[5] = mxaapq;
+/* MXAAPQ is the largest absolute value of scaled pivots in the */
+/* last sweep */
+
+ work[6] = mxsinj;
+/* MXSINJ is the largest absolute value of the sines of Jacobi angles */
+/* in the last sweep */
+
+ return 0;
+/* .. */
+/* .. END OF SGESVJ */
+/* .. */
+} /* sgesvj_ */
diff --git a/contrib/libs/clapack/sgesvx.c b/contrib/libs/clapack/sgesvx.c
new file mode 100644
index 0000000000..932cccf32d
--- /dev/null
+++ b/contrib/libs/clapack/sgesvx.c
@@ -0,0 +1,582 @@
+/* sgesvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sgesvx_(char *fact, char *trans, integer *n, integer *
+ nrhs, real *a, integer *lda, real *af, integer *ldaf, integer *ipiv,
+ char *equed, real *r__, real *c__, real *b, integer *ldb, real *x,
+ integer *ldx, real *rcond, real *ferr, real *berr, real *work,
+ integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Local variables */
+ integer i__, j;
+ real amax;
+ char norm[1];
+ extern logical lsame_(char *, char *);
+ real rcmin, rcmax, anorm;
+ logical equil;
+ real colcnd;
+ extern doublereal slamch_(char *), slange_(char *, integer *,
+ integer *, real *, integer *, real *);
+ logical nofact;
+ extern /* Subroutine */ int slaqge_(integer *, integer *, real *, integer
+ *, real *, real *, real *, real *, real *, char *),
+ xerbla_(char *, integer *), sgecon_(char *, integer *,
+ real *, integer *, real *, real *, real *, integer *, integer *);
+ real bignum;
+ integer infequ;
+ logical colequ;
+ extern /* Subroutine */ int sgeequ_(integer *, integer *, real *, integer
+ *, real *, real *, real *, real *, real *, integer *), sgerfs_(
+ char *, integer *, integer *, real *, integer *, real *, integer *
+, integer *, real *, integer *, real *, integer *, real *, real *,
+ real *, integer *, integer *), sgetrf_(integer *,
+ integer *, real *, integer *, integer *, integer *);
+ real rowcnd;
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *);
+ logical notran;
+ extern doublereal slantr_(char *, char *, char *, integer *, integer *,
+ real *, integer *, real *);
+ extern /* Subroutine */ int sgetrs_(char *, integer *, integer *, real *,
+ integer *, integer *, real *, integer *, integer *);
+ real smlnum;
+ logical rowequ;
+ real rpvgrw;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGESVX uses the LU factorization to compute the solution to a real */
+/* system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N matrix and X and B are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'E', real scaling factors are computed to equilibrate */
+/* the system: */
+/* TRANS = 'N': diag(R)*A*diag(C) *inv(diag(C))*X = diag(R)*B */
+/* TRANS = 'T': (diag(R)*A*diag(C))**T *inv(diag(R))*X = diag(C)*B */
+/* TRANS = 'C': (diag(R)*A*diag(C))**H *inv(diag(R))*X = diag(C)*B */
+/* Whether or not the system will be equilibrated depends on the */
+/* scaling of the matrix A, but if equilibration is used, A is */
+/* overwritten by diag(R)*A*diag(C) and B by diag(R)*B (if TRANS='N') */
+/* or diag(C)*B (if TRANS = 'T' or 'C'). */
+
+/* 2. If FACT = 'N' or 'E', the LU decomposition is used to factor the */
+/* matrix A (after equilibration if FACT = 'E') as */
+/* A = P * L * U, */
+/* where P is a permutation matrix, L is a unit lower triangular */
+/* matrix, and U is upper triangular. */
+
+/* 3. If some U(i,i)=0, so that U is exactly singular, then the routine */
+/* returns with INFO = i. Otherwise, the factored form of A is used */
+/* to estimate the condition number of the matrix A. If the */
+/* reciprocal of the condition number is less than machine precision, */
+/* INFO = N+1 is returned as a warning, but the routine still goes on */
+/* to solve for X and compute error bounds as described below. */
+
+/* 4. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 5. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* 6. If equilibration was used, the matrix X is premultiplied by */
+/* diag(C) (if TRANS = 'N') or diag(R) (if TRANS = 'T' or 'C') so */
+/* that it solves the original system before equilibration. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of the matrix A is */
+/* supplied on entry, and if not, whether the matrix A should be */
+/* equilibrated before it is factored. */
+/* = 'F': On entry, AF and IPIV contain the factored form of A. */
+/* If EQUED is not 'N', the matrix A has been */
+/* equilibrated with scaling factors given by R and C. */
+/* A, AF, and IPIV are not modified. */
+/* = 'N': The matrix A will be copied to AF and factored. */
+/* = 'E': The matrix A will be equilibrated if necessary, then */
+/* copied to AF and factored. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Transpose) */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the N-by-N matrix A. If FACT = 'F' and EQUED is */
+/* not 'N', then A must have been equilibrated by the scaling */
+/* factors in R and/or C. A is not modified if FACT = 'F' or */
+/* 'N', or if FACT = 'E' and EQUED = 'N' on exit. */
+
+/* On exit, if EQUED .ne. 'N', A is scaled as follows: */
+/* EQUED = 'R': A := diag(R) * A */
+/* EQUED = 'C': A := A * diag(C) */
+/* EQUED = 'B': A := diag(R) * A * diag(C). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input or output) REAL array, dimension (LDAF,N) */
+/* If FACT = 'F', then AF is an input argument and on entry */
+/* contains the factors L and U from the factorization */
+/* A = P*L*U as computed by SGETRF. If EQUED .ne. 'N', then */
+/* AF is the factored form of the equilibrated matrix A. */
+
+/* If FACT = 'N', then AF is an output argument and on exit */
+/* returns the factors L and U from the factorization A = P*L*U */
+/* of the original matrix A. */
+
+/* If FACT = 'E', then AF is an output argument and on exit */
+/* returns the factors L and U from the factorization A = P*L*U */
+/* of the equilibrated matrix A (see the description of A for */
+/* the form of the equilibrated matrix). */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* IPIV (input or output) INTEGER array, dimension (N) */
+/* If FACT = 'F', then IPIV is an input argument and on entry */
+/* contains the pivot indices from the factorization A = P*L*U */
+/* as computed by SGETRF; row i of the matrix was interchanged */
+/* with row IPIV(i). */
+
+/* If FACT = 'N', then IPIV is an output argument and on exit */
+/* contains the pivot indices from the factorization A = P*L*U */
+/* of the original matrix A. */
+
+/* If FACT = 'E', then IPIV is an output argument and on exit */
+/* contains the pivot indices from the factorization A = P*L*U */
+/* of the equilibrated matrix A. */
+
+/* EQUED (input or output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration (always true if FACT = 'N'). */
+/* = 'R': Row equilibration, i.e., A has been premultiplied by */
+/* diag(R). */
+/* = 'C': Column equilibration, i.e., A has been postmultiplied */
+/* by diag(C). */
+/* = 'B': Both row and column equilibration, i.e., A has been */
+/* replaced by diag(R) * A * diag(C). */
+/* EQUED is an input argument if FACT = 'F'; otherwise, it is an */
+/* output argument. */
+
+/* R (input or output) REAL array, dimension (N) */
+/* The row scale factors for A. If EQUED = 'R' or 'B', A is */
+/* multiplied on the left by diag(R); if EQUED = 'N' or 'C', R */
+/* is not accessed. R is an input argument if FACT = 'F'; */
+/* otherwise, R is an output argument. If FACT = 'F' and */
+/* EQUED = 'R' or 'B', each element of R must be positive. */
+
+/* C (input or output) REAL array, dimension (N) */
+/* The column scale factors for A. If EQUED = 'C' or 'B', A is */
+/* multiplied on the right by diag(C); if EQUED = 'N' or 'R', C */
+/* is not accessed. C is an input argument if FACT = 'F'; */
+/* otherwise, C is an output argument. If FACT = 'F' and */
+/* EQUED = 'C' or 'B', each element of C must be positive. */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, */
+/* if EQUED = 'N', B is not modified; */
+/* if TRANS = 'N' and EQUED = 'R' or 'B', B is overwritten by */
+/* diag(R)*B; */
+/* if TRANS = 'T' or 'C' and EQUED = 'C' or 'B', B is */
+/* overwritten by diag(C)*B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) REAL array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X */
+/* to the original system of equations. Note that A and B are */
+/* modified on exit if EQUED .ne. 'N', and the solution to the */
+/* equilibrated system is inv(diag(C))*X if TRANS = 'N' and */
+/* EQUED = 'C' or 'B', or inv(diag(R))*X if TRANS = 'T' or 'C' */
+/* and EQUED = 'R' or 'B'. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) REAL */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A after equilibration (if done). If RCOND is less than the */
+/* machine precision (in particular, if RCOND = 0), the matrix */
+/* is singular to working precision. This condition is */
+/* indicated by a return code of INFO > 0. */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace/output) REAL array, dimension (4*N) */
+/* On exit, WORK(1) contains the reciprocal pivot growth */
+/* factor norm(A)/norm(U). The "max absolute element" norm is */
+/* used. If WORK(1) is much less than 1, then the stability */
+/* of the LU factorization of the (equilibrated) matrix A */
+/* could be poor. This also means that the solution X, condition */
+/* estimator RCOND, and forward error bound FERR could be */
+/* unreliable. If factorization fails with 0<INFO<=N, then */
+/* WORK(1) contains the reciprocal pivot growth factor for the */
+/* leading INFO columns of A. */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: U(i,i) is exactly zero. The factorization has */
+/* been completed, but the factor U is exactly */
+/* singular, so the solution and error bounds */
+/* could not be computed. RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ --ipiv;
+ --r__;
+ --c__;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ equil = lsame_(fact, "E");
+ notran = lsame_(trans, "N");
+ if (nofact || equil) {
+ *(unsigned char *)equed = 'N';
+ rowequ = FALSE_;
+ colequ = FALSE_;
+ } else {
+ rowequ = lsame_(equed, "R") || lsame_(equed,
+ "B");
+ colequ = lsame_(equed, "C") || lsame_(equed,
+ "B");
+ smlnum = slamch_("Safe minimum");
+ bignum = 1.f / smlnum;
+ }
+
+/* Test the input parameters. */
+
+ if (! nofact && ! equil && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -8;
+ } else if (lsame_(fact, "F") && ! (rowequ || colequ
+ || lsame_(equed, "N"))) {
+ *info = -10;
+ } else {
+ if (rowequ) {
+ rcmin = bignum;
+ rcmax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ r__1 = rcmin, r__2 = r__[j];
+ rcmin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = rcmax, r__2 = r__[j];
+ rcmax = dmax(r__1,r__2);
+/* L10: */
+ }
+ if (rcmin <= 0.f) {
+ *info = -11;
+ } else if (*n > 0) {
+ rowcnd = dmax(rcmin,smlnum) / dmin(rcmax,bignum);
+ } else {
+ rowcnd = 1.f;
+ }
+ }
+ if (colequ && *info == 0) {
+ rcmin = bignum;
+ rcmax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ r__1 = rcmin, r__2 = c__[j];
+ rcmin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = rcmax, r__2 = c__[j];
+ rcmax = dmax(r__1,r__2);
+/* L20: */
+ }
+ if (rcmin <= 0.f) {
+ *info = -12;
+ } else if (*n > 0) {
+ colcnd = dmax(rcmin,smlnum) / dmin(rcmax,bignum);
+ } else {
+ colcnd = 1.f;
+ }
+ }
+ if (*info == 0) {
+ if (*ldb < max(1,*n)) {
+ *info = -14;
+ } else if (*ldx < max(1,*n)) {
+ *info = -16;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGESVX", &i__1);
+ return 0;
+ }
+
+ if (equil) {
+
+/* Compute row and column scalings to equilibrate the matrix A. */
+
+ sgeequ_(n, n, &a[a_offset], lda, &r__[1], &c__[1], &rowcnd, &colcnd, &
+ amax, &infequ);
+ if (infequ == 0) {
+
+/* Equilibrate the matrix. */
+
+ slaqge_(n, n, &a[a_offset], lda, &r__[1], &c__[1], &rowcnd, &
+ colcnd, &amax, equed);
+ rowequ = lsame_(equed, "R") || lsame_(equed,
+ "B");
+ colequ = lsame_(equed, "C") || lsame_(equed,
+ "B");
+ }
+ }
+
+/* Scale the right hand side. */
+
+ if (notran) {
+ if (rowequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = r__[i__] * b[i__ + j * b_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else if (colequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = c__[i__] * b[i__ + j * b_dim1];
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+ if (nofact || equil) {
+
+/* Compute the LU factorization of A. */
+
+ slacpy_("Full", n, n, &a[a_offset], lda, &af[af_offset], ldaf);
+ sgetrf_(n, n, &af[af_offset], ldaf, &ipiv[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+
+/* Compute the reciprocal pivot growth factor of the */
+/* leading rank-deficient INFO columns of A. */
+
+ rpvgrw = slantr_("M", "U", "N", info, info, &af[af_offset], ldaf,
+ &work[1]);
+ if (rpvgrw == 0.f) {
+ rpvgrw = 1.f;
+ } else {
+ rpvgrw = slange_("M", n, info, &a[a_offset], lda, &work[1]) / rpvgrw;
+ }
+ work[1] = rpvgrw;
+ *rcond = 0.f;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A and the */
+/* reciprocal pivot growth factor RPVGRW. */
+
+ if (notran) {
+ *(unsigned char *)norm = '1';
+ } else {
+ *(unsigned char *)norm = 'I';
+ }
+ anorm = slange_(norm, n, n, &a[a_offset], lda, &work[1]);
+ rpvgrw = slantr_("M", "U", "N", n, n, &af[af_offset], ldaf, &work[1]);
+ if (rpvgrw == 0.f) {
+ rpvgrw = 1.f;
+ } else {
+ rpvgrw = slange_("M", n, n, &a[a_offset], lda, &work[1]) /
+ rpvgrw;
+ }
+
+/* Compute the reciprocal of the condition number of A. */
+
+ sgecon_(norm, n, &af[af_offset], ldaf, &anorm, rcond, &work[1], &iwork[1],
+ info);
+
+/* Compute the solution matrix X. */
+
+ slacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ sgetrs_(trans, n, nrhs, &af[af_offset], ldaf, &ipiv[1], &x[x_offset], ldx,
+ info);
+
+/* Use iterative refinement to improve the computed solution and */
+/* compute error bounds and backward error estimates for it. */
+
+ sgerfs_(trans, n, nrhs, &a[a_offset], lda, &af[af_offset], ldaf, &ipiv[1],
+ &b[b_offset], ldb, &x[x_offset], ldx, &ferr[1], &berr[1], &work[
+ 1], &iwork[1], info);
+
+/* Transform the solution matrix X to a solution of the original */
+/* system. */
+
+ if (notran) {
+ if (colequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[i__ + j * x_dim1] = c__[i__] * x[i__ + j * x_dim1];
+/* L70: */
+ }
+/* L80: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= colcnd;
+/* L90: */
+ }
+ }
+ } else if (rowequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[i__ + j * x_dim1] = r__[i__] * x[i__ + j * x_dim1];
+/* L100: */
+ }
+/* L110: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= rowcnd;
+/* L120: */
+ }
+ }
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < slamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ work[1] = rpvgrw;
+ return 0;
+
+/* End of SGESVX */
+
+} /* sgesvx_ */
diff --git a/contrib/libs/clapack/sgetc2.c b/contrib/libs/clapack/sgetc2.c
new file mode 100644
index 0000000000..5c375cb113
--- /dev/null
+++ b/contrib/libs/clapack/sgetc2.c
@@ -0,0 +1,198 @@
+/* sgetc2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b10 = -1.f;
+
+/* Subroutine */ int sgetc2_(integer *n, real *a, integer *lda, integer *ipiv,
+ integer *jpiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1;
+
+ /* Local variables */
+ integer i__, j, ip, jp;
+ real eps;
+ integer ipv, jpv;
+ extern /* Subroutine */ int sger_(integer *, integer *, real *, real *,
+ integer *, real *, integer *, real *, integer *);
+ real smin, xmax;
+ extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
+ integer *), slabad_(real *, real *);
+ extern doublereal slamch_(char *);
+ real bignum, smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGETC2 computes an LU factorization with complete pivoting of the */
+/* n-by-n matrix A. The factorization has the form A = P * L * U * Q, */
+/* where P and Q are permutation matrices, L is lower triangular with */
+/* unit diagonal elements and U is upper triangular. */
+
+/* This is the Level 2 BLAS algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA, N) */
+/* On entry, the n-by-n matrix A to be factored. */
+/* On exit, the factors L and U from the factorization */
+/* A = P*L*U*Q; the unit diagonal elements of L are not stored. */
+/* If U(k, k) appears to be less than SMIN, U(k, k) is given the */
+/* value of SMIN, i.e., giving a nonsingular perturbed system. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension(N). */
+/* The pivot indices; for 1 <= i <= N, row i of the */
+/* matrix has been interchanged with row IPIV(i). */
+
+/* JPIV (output) INTEGER array, dimension(N). */
+/* The pivot indices; for 1 <= j <= N, column j of the */
+/* matrix has been interchanged with column JPIV(j). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* > 0: if INFO = k, U(k, k) is likely to produce owerflow if */
+/* we try to solve for x in Ax = b. So U is perturbed to */
+/* avoid the overflow. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Set constants to control overflow */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --jpiv;
+
+ /* Function Body */
+ *info = 0;
+ eps = slamch_("P");
+ smlnum = slamch_("S") / eps;
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+
+/* Factorize A using complete pivoting. */
+/* Set pivots less than SMIN to SMIN. */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Find max element in matrix A */
+
+ xmax = 0.f;
+ i__2 = *n;
+ for (ip = i__; ip <= i__2; ++ip) {
+ i__3 = *n;
+ for (jp = i__; jp <= i__3; ++jp) {
+ if ((r__1 = a[ip + jp * a_dim1], dabs(r__1)) >= xmax) {
+ xmax = (r__1 = a[ip + jp * a_dim1], dabs(r__1));
+ ipv = ip;
+ jpv = jp;
+ }
+/* L10: */
+ }
+/* L20: */
+ }
+ if (i__ == 1) {
+/* Computing MAX */
+ r__1 = eps * xmax;
+ smin = dmax(r__1,smlnum);
+ }
+
+/* Swap rows */
+
+ if (ipv != i__) {
+ sswap_(n, &a[ipv + a_dim1], lda, &a[i__ + a_dim1], lda);
+ }
+ ipiv[i__] = ipv;
+
+/* Swap columns */
+
+ if (jpv != i__) {
+ sswap_(n, &a[jpv * a_dim1 + 1], &c__1, &a[i__ * a_dim1 + 1], &
+ c__1);
+ }
+ jpiv[i__] = jpv;
+
+/* Check for singularity */
+
+ if ((r__1 = a[i__ + i__ * a_dim1], dabs(r__1)) < smin) {
+ *info = i__;
+ a[i__ + i__ * a_dim1] = smin;
+ }
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ a[j + i__ * a_dim1] /= a[i__ + i__ * a_dim1];
+/* L30: */
+ }
+ i__2 = *n - i__;
+ i__3 = *n - i__;
+ sger_(&i__2, &i__3, &c_b10, &a[i__ + 1 + i__ * a_dim1], &c__1, &a[i__
+ + (i__ + 1) * a_dim1], lda, &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda);
+/* L40: */
+ }
+
+ if ((r__1 = a[*n + *n * a_dim1], dabs(r__1)) < smin) {
+ *info = *n;
+ a[*n + *n * a_dim1] = smin;
+ }
+
+ return 0;
+
+/* End of SGETC2 */
+
+} /* sgetc2_ */
diff --git a/contrib/libs/clapack/sgetf2.c b/contrib/libs/clapack/sgetf2.c
new file mode 100644
index 0000000000..a8393fb2bc
--- /dev/null
+++ b/contrib/libs/clapack/sgetf2.c
@@ -0,0 +1,192 @@
+/* sgetf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b8 = -1.f;
+
+/* Subroutine */ int sgetf2_(integer *m, integer *n, real *a, integer *lda,
+ integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1;
+
+ /* Local variables */
+ integer i__, j, jp;
+ extern /* Subroutine */ int sger_(integer *, integer *, real *, real *,
+ integer *, real *, integer *, real *, integer *), sscal_(integer *
+, real *, real *, integer *);
+ real sfmin;
+ extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
+ integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGETF2 computes an LU factorization of a general m-by-n matrix A */
+/* using partial pivoting with row interchanges. */
+
+/* The factorization has the form */
+/* A = P * L * U */
+/* where P is a permutation matrix, L is lower triangular with unit */
+/* diagonal elements (lower trapezoidal if m > n), and U is upper */
+/* triangular (upper trapezoidal if m < n). */
+
+/* This is the right-looking Level 2 BLAS version of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the m by n matrix to be factored. */
+/* On exit, the factors L and U from the factorization */
+/* A = P*L*U; the unit diagonal elements of L are not stored. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* IPIV (output) INTEGER array, dimension (min(M,N)) */
+/* The pivot indices; for 1 <= i <= min(M,N), row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, U(k,k) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, and division by zero will occur if it is used */
+/* to solve a system of equations. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGETF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Compute machine safe minimum */
+
+ sfmin = slamch_("S");
+
+ i__1 = min(*m,*n);
+ for (j = 1; j <= i__1; ++j) {
+
+/* Find pivot and test for singularity. */
+
+ i__2 = *m - j + 1;
+ jp = j - 1 + isamax_(&i__2, &a[j + j * a_dim1], &c__1);
+ ipiv[j] = jp;
+ if (a[jp + j * a_dim1] != 0.f) {
+
+/* Apply the interchange to columns 1:N. */
+
+ if (jp != j) {
+ sswap_(n, &a[j + a_dim1], lda, &a[jp + a_dim1], lda);
+ }
+
+/* Compute elements J+1:M of J-th column. */
+
+ if (j < *m) {
+ if ((r__1 = a[j + j * a_dim1], dabs(r__1)) >= sfmin) {
+ i__2 = *m - j;
+ r__1 = 1.f / a[j + j * a_dim1];
+ sscal_(&i__2, &r__1, &a[j + 1 + j * a_dim1], &c__1);
+ } else {
+ i__2 = *m - j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[j + i__ + j * a_dim1] /= a[j + j * a_dim1];
+/* L20: */
+ }
+ }
+ }
+
+ } else if (*info == 0) {
+
+ *info = j;
+ }
+
+ if (j < min(*m,*n)) {
+
+/* Update trailing submatrix. */
+
+ i__2 = *m - j;
+ i__3 = *n - j;
+ sger_(&i__2, &i__3, &c_b8, &a[j + 1 + j * a_dim1], &c__1, &a[j + (
+ j + 1) * a_dim1], lda, &a[j + 1 + (j + 1) * a_dim1], lda);
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of SGETF2 */
+
+} /* sgetf2_ */
diff --git a/contrib/libs/clapack/sgetrf.c b/contrib/libs/clapack/sgetrf.c
new file mode 100644
index 0000000000..fc9e5642d1
--- /dev/null
+++ b/contrib/libs/clapack/sgetrf.c
@@ -0,0 +1,217 @@
+/* sgetrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static real c_b16 = 1.f;
+static real c_b19 = -1.f;
+
+/* Subroutine */ int sgetrf_(integer *m, integer *n, real *a, integer *lda,
+ integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+
+ /* Local variables */
+ integer i__, j, jb, nb, iinfo;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *), strsm_(char *, char *, char *,
+ char *, integer *, integer *, real *, real *, integer *, real *,
+ integer *), sgetf2_(integer *,
+ integer *, real *, integer *, integer *, integer *), xerbla_(char
+ *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int slaswp_(integer *, real *, integer *, integer
+ *, integer *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGETRF computes an LU factorization of a general M-by-N matrix A */
+/* using partial pivoting with row interchanges. */
+
+/* The factorization has the form */
+/* A = P * L * U */
+/* where P is a permutation matrix, L is lower triangular with unit */
+/* diagonal elements (lower trapezoidal if m > n), and U is upper */
+/* triangular (upper trapezoidal if m < n). */
+
+/* This is the right-looking Level 3 BLAS version of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix to be factored. */
+/* On exit, the factors L and U from the factorization */
+/* A = P*L*U; the unit diagonal elements of L are not stored. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* IPIV (output) INTEGER array, dimension (min(M,N)) */
+/* The pivot indices; for 1 <= i <= min(M,N), row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, U(i,i) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, and division by zero will occur if it is used */
+/* to solve a system of equations. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGETRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "SGETRF", " ", m, n, &c_n1, &c_n1);
+ if (nb <= 1 || nb >= min(*m,*n)) {
+
+/* Use unblocked code. */
+
+ sgetf2_(m, n, &a[a_offset], lda, &ipiv[1], info);
+ } else {
+
+/* Use blocked code. */
+
+ i__1 = min(*m,*n);
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = min(*m,*n) - j + 1;
+ jb = min(i__3,nb);
+
+/* Factor diagonal and subdiagonal blocks and test for exact */
+/* singularity. */
+
+ i__3 = *m - j + 1;
+ sgetf2_(&i__3, &jb, &a[j + j * a_dim1], lda, &ipiv[j], &iinfo);
+
+/* Adjust INFO and the pivot indices. */
+
+ if (*info == 0 && iinfo > 0) {
+ *info = iinfo + j - 1;
+ }
+/* Computing MIN */
+ i__4 = *m, i__5 = j + jb - 1;
+ i__3 = min(i__4,i__5);
+ for (i__ = j; i__ <= i__3; ++i__) {
+ ipiv[i__] = j - 1 + ipiv[i__];
+/* L10: */
+ }
+
+/* Apply interchanges to columns 1:J-1. */
+
+ i__3 = j - 1;
+ i__4 = j + jb - 1;
+ slaswp_(&i__3, &a[a_offset], lda, &j, &i__4, &ipiv[1], &c__1);
+
+ if (j + jb <= *n) {
+
+/* Apply interchanges to columns J+JB:N. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j + jb - 1;
+ slaswp_(&i__3, &a[(j + jb) * a_dim1 + 1], lda, &j, &i__4, &
+ ipiv[1], &c__1);
+
+/* Compute block row of U. */
+
+ i__3 = *n - j - jb + 1;
+ strsm_("Left", "Lower", "No transpose", "Unit", &jb, &i__3, &
+ c_b16, &a[j + j * a_dim1], lda, &a[j + (j + jb) *
+ a_dim1], lda);
+ if (j + jb <= *m) {
+
+/* Update trailing submatrix. */
+
+ i__3 = *m - j - jb + 1;
+ i__4 = *n - j - jb + 1;
+ sgemm_("No transpose", "No transpose", &i__3, &i__4, &jb,
+ &c_b19, &a[j + jb + j * a_dim1], lda, &a[j + (j +
+ jb) * a_dim1], lda, &c_b16, &a[j + jb + (j + jb) *
+ a_dim1], lda);
+ }
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of SGETRF */
+
+} /* sgetrf_ */
diff --git a/contrib/libs/clapack/sgetri.c b/contrib/libs/clapack/sgetri.c
new file mode 100644
index 0000000000..af5156b5f4
--- /dev/null
+++ b/contrib/libs/clapack/sgetri.c
@@ -0,0 +1,259 @@
+/* sgetri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static real c_b20 = -1.f;
+static real c_b22 = 1.f;
+
+/* Subroutine */ int sgetri_(integer *n, real *a, integer *lda, integer *ipiv,
+ real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, jb, nb, jj, jp, nn, iws, nbmin;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *), sgemv_(char *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *), sswap_(integer *, real *, integer *,
+ real *, integer *), strsm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int strtri_(char *, char *, integer *, real *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGETRI computes the inverse of a matrix using the LU factorization */
+/* computed by SGETRF. */
+
+/* This method inverts U and then computes inv(A) by solving the system */
+/* inv(A)*L = inv(U) for inv(A). */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the factors L and U from the factorization */
+/* A = P*L*U as computed by SGETRF. */
+/* On exit, if INFO = 0, the inverse of the original matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices from SGETRF; for 1<=i<=N, row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO=0, then WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+/* For optimal performance LWORK >= N*NB, where NB is */
+/* the optimal blocksize returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, U(i,i) is exactly zero; the matrix is */
+/* singular and its inverse could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "SGETRI", " ", n, &c_n1, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ work[1] = (real) lwkopt;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*lda < max(1,*n)) {
+ *info = -3;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGETRI", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form inv(U). If INFO > 0 from STRTRI, then U is singular, */
+/* and the inverse is not computed. */
+
+ strtri_("Upper", "Non-unit", n, &a[a_offset], lda, info);
+ if (*info > 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = *n;
+ if (nb > 1 && nb < *n) {
+/* Computing MAX */
+ i__1 = ldwork * nb;
+ iws = max(i__1,1);
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SGETRI", " ", n, &c_n1, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = *n;
+ }
+
+/* Solve the equation inv(A)*L = inv(U) for inv(A). */
+
+ if (nb < nbmin || nb >= *n) {
+
+/* Use unblocked code. */
+
+ for (j = *n; j >= 1; --j) {
+
+/* Copy current column of L to WORK and replace with zeros. */
+
+ i__1 = *n;
+ for (i__ = j + 1; i__ <= i__1; ++i__) {
+ work[i__] = a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = 0.f;
+/* L10: */
+ }
+
+/* Compute current column of inv(A). */
+
+ if (j < *n) {
+ i__1 = *n - j;
+ sgemv_("No transpose", n, &i__1, &c_b20, &a[(j + 1) * a_dim1
+ + 1], lda, &work[j + 1], &c__1, &c_b22, &a[j * a_dim1
+ + 1], &c__1);
+ }
+/* L20: */
+ }
+ } else {
+
+/* Use blocked code. */
+
+ nn = (*n - 1) / nb * nb + 1;
+ i__1 = -nb;
+ for (j = nn; i__1 < 0 ? j >= 1 : j <= 1; j += i__1) {
+/* Computing MIN */
+ i__2 = nb, i__3 = *n - j + 1;
+ jb = min(i__2,i__3);
+
+/* Copy current block column of L to WORK and replace with */
+/* zeros. */
+
+ i__2 = j + jb - 1;
+ for (jj = j; jj <= i__2; ++jj) {
+ i__3 = *n;
+ for (i__ = jj + 1; i__ <= i__3; ++i__) {
+ work[i__ + (jj - j) * ldwork] = a[i__ + jj * a_dim1];
+ a[i__ + jj * a_dim1] = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Compute current block column of inv(A). */
+
+ if (j + jb <= *n) {
+ i__2 = *n - j - jb + 1;
+ sgemm_("No transpose", "No transpose", n, &jb, &i__2, &c_b20,
+ &a[(j + jb) * a_dim1 + 1], lda, &work[j + jb], &
+ ldwork, &c_b22, &a[j * a_dim1 + 1], lda);
+ }
+ strsm_("Right", "Lower", "No transpose", "Unit", n, &jb, &c_b22, &
+ work[j], &ldwork, &a[j * a_dim1 + 1], lda);
+/* L50: */
+ }
+ }
+
+/* Apply column interchanges. */
+
+ for (j = *n - 1; j >= 1; --j) {
+ jp = ipiv[j];
+ if (jp != j) {
+ sswap_(n, &a[j * a_dim1 + 1], &c__1, &a[jp * a_dim1 + 1], &c__1);
+ }
+/* L60: */
+ }
+
+ work[1] = (real) iws;
+ return 0;
+
+/* End of SGETRI */
+
+} /* sgetri_ */
diff --git a/contrib/libs/clapack/sgetrs.c b/contrib/libs/clapack/sgetrs.c
new file mode 100644
index 0000000000..2ab2c4d1b8
--- /dev/null
+++ b/contrib/libs/clapack/sgetrs.c
@@ -0,0 +1,185 @@
+/* sgetrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b12 = 1.f;
+static integer c_n1 = -1;
+
+/* Subroutine */ int sgetrs_(char *trans, integer *n, integer *nrhs, real *a,
+ integer *lda, integer *ipiv, real *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int strsm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+), xerbla_(char *, integer *);
+ logical notran;
+ extern /* Subroutine */ int slaswp_(integer *, real *, integer *, integer
+ *, integer *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGETRS solves a system of linear equations */
+/* A * X = B or A' * X = B */
+/* with a general N-by-N matrix A using the LU factorization computed */
+/* by SGETRF. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A'* X = B (Transpose) */
+/* = 'C': A'* X = B (Conjugate transpose = Transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The factors L and U from the factorization A = P*L*U */
+/* as computed by SGETRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices from SGETRF; for 1<=i<=N, row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "T") && ! lsame_(
+ trans, "C")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGETRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (notran) {
+
+/* Solve A * X = B. */
+
+/* Apply row interchanges to the right hand sides. */
+
+ slaswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c__1);
+
+/* Solve L*X = B, overwriting B with X. */
+
+ strsm_("Left", "Lower", "No transpose", "Unit", n, nrhs, &c_b12, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+/* Solve U*X = B, overwriting B with X. */
+
+ strsm_("Left", "Upper", "No transpose", "Non-unit", n, nrhs, &c_b12, &
+ a[a_offset], lda, &b[b_offset], ldb);
+ } else {
+
+/* Solve A' * X = B. */
+
+/* Solve U'*X = B, overwriting B with X. */
+
+ strsm_("Left", "Upper", "Transpose", "Non-unit", n, nrhs, &c_b12, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+/* Solve L'*X = B, overwriting B with X. */
+
+ strsm_("Left", "Lower", "Transpose", "Unit", n, nrhs, &c_b12, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+/* Apply row interchanges to the solution vectors. */
+
+ slaswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c_n1);
+ }
+
+ return 0;
+
+/* End of SGETRS */
+
+} /* sgetrs_ */
diff --git a/contrib/libs/clapack/sggbak.c b/contrib/libs/clapack/sggbak.c
new file mode 100644
index 0000000000..30460ae52b
--- /dev/null
+++ b/contrib/libs/clapack/sggbak.c
@@ -0,0 +1,274 @@
+/* sggbak.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sggbak_(char *job, char *side, integer *n, integer *ilo,
+ integer *ihi, real *lscale, real *rscale, integer *m, real *v,
+ integer *ldv, integer *info)
+{
+ /* System generated locals */
+ integer v_dim1, v_offset, i__1;
+
+ /* Local variables */
+ integer i__, k;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ logical leftv;
+ extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
+ integer *), xerbla_(char *, integer *);
+ logical rightv;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGGBAK forms the right or left eigenvectors of a real generalized */
+/* eigenvalue problem A*x = lambda*B*x, by backward transformation on */
+/* the computed eigenvectors of the balanced pair of matrices output by */
+/* SGGBAL. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies the type of backward transformation required: */
+/* = 'N': do nothing, return immediately; */
+/* = 'P': do backward transformation for permutation only; */
+/* = 'S': do backward transformation for scaling only; */
+/* = 'B': do backward transformations for both permutation and */
+/* scaling. */
+/* JOB must be the same as the argument JOB supplied to SGGBAL. */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'R': V contains right eigenvectors; */
+/* = 'L': V contains left eigenvectors. */
+
+/* N (input) INTEGER */
+/* The number of rows of the matrix V. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* The integers ILO and IHI determined by SGGBAL. */
+/* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */
+
+/* LSCALE (input) REAL array, dimension (N) */
+/* Details of the permutations and/or scaling factors applied */
+/* to the left side of A and B, as returned by SGGBAL. */
+
+/* RSCALE (input) REAL array, dimension (N) */
+/* Details of the permutations and/or scaling factors applied */
+/* to the right side of A and B, as returned by SGGBAL. */
+
+/* M (input) INTEGER */
+/* The number of columns of the matrix V. M >= 0. */
+
+/* V (input/output) REAL array, dimension (LDV,M) */
+/* On entry, the matrix of right or left eigenvectors to be */
+/* transformed, as returned by STGEVC. */
+/* On exit, V is overwritten by the transformed eigenvectors. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the matrix V. LDV >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* See R.C. Ward, Balancing the generalized eigenvalue problem, */
+/* SIAM J. Sci. Stat. Comp. 2 (1981), 141-152. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ --lscale;
+ --rscale;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+
+ /* Function Body */
+ rightv = lsame_(side, "R");
+ leftv = lsame_(side, "L");
+
+ *info = 0;
+ if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S")
+ && ! lsame_(job, "B")) {
+ *info = -1;
+ } else if (! rightv && ! leftv) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ilo < 1) {
+ *info = -4;
+ } else if (*n == 0 && *ihi == 0 && *ilo != 1) {
+ *info = -4;
+ } else if (*n > 0 && (*ihi < *ilo || *ihi > max(1,*n))) {
+ *info = -5;
+ } else if (*n == 0 && *ilo == 1 && *ihi != 0) {
+ *info = -5;
+ } else if (*m < 0) {
+ *info = -8;
+ } else if (*ldv < max(1,*n)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGGBAK", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*m == 0) {
+ return 0;
+ }
+ if (lsame_(job, "N")) {
+ return 0;
+ }
+
+ if (*ilo == *ihi) {
+ goto L30;
+ }
+
+/* Backward balance */
+
+ if (lsame_(job, "S") || lsame_(job, "B")) {
+
+/* Backward transformation on right eigenvectors */
+
+ if (rightv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ sscal_(m, &rscale[i__], &v[i__ + v_dim1], ldv);
+/* L10: */
+ }
+ }
+
+/* Backward transformation on left eigenvectors */
+
+ if (leftv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ sscal_(m, &lscale[i__], &v[i__ + v_dim1], ldv);
+/* L20: */
+ }
+ }
+ }
+
+/* Backward permutation */
+
+L30:
+ if (lsame_(job, "P") || lsame_(job, "B")) {
+
+/* Backward permutation on right eigenvectors */
+
+ if (rightv) {
+ if (*ilo == 1) {
+ goto L50;
+ }
+
+ for (i__ = *ilo - 1; i__ >= 1; --i__) {
+ k = rscale[i__];
+ if (k == i__) {
+ goto L40;
+ }
+ sswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L40:
+ ;
+ }
+
+L50:
+ if (*ihi == *n) {
+ goto L70;
+ }
+ i__1 = *n;
+ for (i__ = *ihi + 1; i__ <= i__1; ++i__) {
+ k = rscale[i__];
+ if (k == i__) {
+ goto L60;
+ }
+ sswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L60:
+ ;
+ }
+ }
+
+/* Backward permutation on left eigenvectors */
+
+L70:
+ if (leftv) {
+ if (*ilo == 1) {
+ goto L90;
+ }
+ for (i__ = *ilo - 1; i__ >= 1; --i__) {
+ k = lscale[i__];
+ if (k == i__) {
+ goto L80;
+ }
+ sswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L80:
+ ;
+ }
+
+L90:
+ if (*ihi == *n) {
+ goto L110;
+ }
+ i__1 = *n;
+ for (i__ = *ihi + 1; i__ <= i__1; ++i__) {
+ k = lscale[i__];
+ if (k == i__) {
+ goto L100;
+ }
+ sswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L100:
+ ;
+ }
+ }
+ }
+
+L110:
+
+ return 0;
+
+/* End of SGGBAK */
+
+} /* sggbak_ */
diff --git a/contrib/libs/clapack/sggbal.c b/contrib/libs/clapack/sggbal.c
new file mode 100644
index 0000000000..c2df1fe332
--- /dev/null
+++ b/contrib/libs/clapack/sggbal.c
@@ -0,0 +1,623 @@
+/* sggbal.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b35 = 10.f;
+static real c_b71 = .5f;
+
+/* Subroutine */ int sggbal_(char *job, integer *n, real *a, integer *lda,
+ real *b, integer *ldb, integer *ilo, integer *ihi, real *lscale, real
+ *rscale, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+ real r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double r_lg10(real *), r_sign(real *, real *), pow_ri(real *, integer *);
+
+ /* Local variables */
+ integer i__, j, k, l, m;
+ real t;
+ integer jc;
+ real ta, tb, tc;
+ integer ir;
+ real ew;
+ integer it, nr, ip1, jp1, lm1;
+ real cab, rab, ewc, cor, sum;
+ integer nrp2, icab, lcab;
+ real beta, coef;
+ integer irab, lrab;
+ real basl, cmax;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ real coef2, coef5, gamma, alpha;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ real sfmin, sfmax;
+ integer iflow;
+ extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
+ integer *);
+ integer kount;
+ extern /* Subroutine */ int saxpy_(integer *, real *, real *, integer *,
+ real *, integer *);
+ real pgamma;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ integer lsfmin, lsfmax;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGGBAL balances a pair of general real matrices (A,B). This */
+/* involves, first, permuting A and B by similarity transformations to */
+/* isolate eigenvalues in the first 1 to ILO$-$1 and last IHI+1 to N */
+/* elements on the diagonal; and second, applying a diagonal similarity */
+/* transformation to rows and columns ILO to IHI to make the rows */
+/* and columns as close in norm as possible. Both steps are optional. */
+
+/* Balancing may reduce the 1-norm of the matrices, and improve the */
+/* accuracy of the computed eigenvalues and/or eigenvectors in the */
+/* generalized eigenvalue problem A*x = lambda*B*x. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies the operations to be performed on A and B: */
+/* = 'N': none: simply set ILO = 1, IHI = N, LSCALE(I) = 1.0 */
+/* and RSCALE(I) = 1.0 for i = 1,...,N. */
+/* = 'P': permute only; */
+/* = 'S': scale only; */
+/* = 'B': both permute and scale. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the input matrix A. */
+/* On exit, A is overwritten by the balanced matrix. */
+/* If JOB = 'N', A is not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) REAL array, dimension (LDB,N) */
+/* On entry, the input matrix B. */
+/* On exit, B is overwritten by the balanced matrix. */
+/* If JOB = 'N', B is not referenced. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* ILO (output) INTEGER */
+/* IHI (output) INTEGER */
+/* ILO and IHI are set to integers such that on exit */
+/* A(i,j) = 0 and B(i,j) = 0 if i > j and */
+/* j = 1,...,ILO-1 or i = IHI+1,...,N. */
+/* If JOB = 'N' or 'S', ILO = 1 and IHI = N. */
+
+/* LSCALE (output) REAL array, dimension (N) */
+/* Details of the permutations and scaling factors applied */
+/* to the left side of A and B. If P(j) is the index of the */
+/* row interchanged with row j, and D(j) */
+/* is the scaling factor applied to row j, then */
+/* LSCALE(j) = P(j) for J = 1,...,ILO-1 */
+/* = D(j) for J = ILO,...,IHI */
+/* = P(j) for J = IHI+1,...,N. */
+/* The order in which the interchanges are made is N to IHI+1, */
+/* then 1 to ILO-1. */
+
+/* RSCALE (output) REAL array, dimension (N) */
+/* Details of the permutations and scaling factors applied */
+/* to the right side of A and B. If P(j) is the index of the */
+/* column interchanged with column j, and D(j) */
+/* is the scaling factor applied to column j, then */
+/* LSCALE(j) = P(j) for J = 1,...,ILO-1 */
+/* = D(j) for J = ILO,...,IHI */
+/* = P(j) for J = IHI+1,...,N. */
+/* The order in which the interchanges are made is N to IHI+1, */
+/* then 1 to ILO-1. */
+
+/* WORK (workspace) REAL array, dimension (lwork) */
+/* lwork must be at least max(1,6*N) when JOB = 'S' or 'B', and */
+/* at least 1 when JOB = 'N' or 'P'. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* See R.C. WARD, Balancing the generalized eigenvalue problem, */
+/* SIAM J. Sci. Stat. Comp. 2 (1981), 141-152. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --lscale;
+ --rscale;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S")
+ && ! lsame_(job, "B")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGGBAL", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *ilo = 1;
+ *ihi = *n;
+ return 0;
+ }
+
+ if (*n == 1) {
+ *ilo = 1;
+ *ihi = *n;
+ lscale[1] = 1.f;
+ rscale[1] = 1.f;
+ return 0;
+ }
+
+ if (lsame_(job, "N")) {
+ *ilo = 1;
+ *ihi = *n;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ lscale[i__] = 1.f;
+ rscale[i__] = 1.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+ k = 1;
+ l = *n;
+ if (lsame_(job, "S")) {
+ goto L190;
+ }
+
+ goto L30;
+
+/* Permute the matrices A and B to isolate the eigenvalues. */
+
+/* Find row with one nonzero in columns 1 through L */
+
+L20:
+ l = lm1;
+ if (l != 1) {
+ goto L30;
+ }
+
+ rscale[1] = 1.f;
+ lscale[1] = 1.f;
+ goto L190;
+
+L30:
+ lm1 = l - 1;
+ for (i__ = l; i__ >= 1; --i__) {
+ i__1 = lm1;
+ for (j = 1; j <= i__1; ++j) {
+ jp1 = j + 1;
+ if (a[i__ + j * a_dim1] != 0.f || b[i__ + j * b_dim1] != 0.f) {
+ goto L50;
+ }
+/* L40: */
+ }
+ j = l;
+ goto L70;
+
+L50:
+ i__1 = l;
+ for (j = jp1; j <= i__1; ++j) {
+ if (a[i__ + j * a_dim1] != 0.f || b[i__ + j * b_dim1] != 0.f) {
+ goto L80;
+ }
+/* L60: */
+ }
+ j = jp1 - 1;
+
+L70:
+ m = l;
+ iflow = 1;
+ goto L160;
+L80:
+ ;
+ }
+ goto L100;
+
+/* Find column with one nonzero in rows K through N */
+
+L90:
+ ++k;
+
+L100:
+ i__1 = l;
+ for (j = k; j <= i__1; ++j) {
+ i__2 = lm1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ ip1 = i__ + 1;
+ if (a[i__ + j * a_dim1] != 0.f || b[i__ + j * b_dim1] != 0.f) {
+ goto L120;
+ }
+/* L110: */
+ }
+ i__ = l;
+ goto L140;
+L120:
+ i__2 = l;
+ for (i__ = ip1; i__ <= i__2; ++i__) {
+ if (a[i__ + j * a_dim1] != 0.f || b[i__ + j * b_dim1] != 0.f) {
+ goto L150;
+ }
+/* L130: */
+ }
+ i__ = ip1 - 1;
+L140:
+ m = k;
+ iflow = 2;
+ goto L160;
+L150:
+ ;
+ }
+ goto L190;
+
+/* Permute rows M and I */
+
+L160:
+ lscale[m] = (real) i__;
+ if (i__ == m) {
+ goto L170;
+ }
+ i__1 = *n - k + 1;
+ sswap_(&i__1, &a[i__ + k * a_dim1], lda, &a[m + k * a_dim1], lda);
+ i__1 = *n - k + 1;
+ sswap_(&i__1, &b[i__ + k * b_dim1], ldb, &b[m + k * b_dim1], ldb);
+
+/* Permute columns M and J */
+
+L170:
+ rscale[m] = (real) j;
+ if (j == m) {
+ goto L180;
+ }
+ sswap_(&l, &a[j * a_dim1 + 1], &c__1, &a[m * a_dim1 + 1], &c__1);
+ sswap_(&l, &b[j * b_dim1 + 1], &c__1, &b[m * b_dim1 + 1], &c__1);
+
+L180:
+ switch (iflow) {
+ case 1: goto L20;
+ case 2: goto L90;
+ }
+
+L190:
+ *ilo = k;
+ *ihi = l;
+
+ if (lsame_(job, "P")) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ lscale[i__] = 1.f;
+ rscale[i__] = 1.f;
+/* L195: */
+ }
+ return 0;
+ }
+
+ if (*ilo == *ihi) {
+ return 0;
+ }
+
+/* Balance the submatrix in rows ILO to IHI. */
+
+ nr = *ihi - *ilo + 1;
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ rscale[i__] = 0.f;
+ lscale[i__] = 0.f;
+
+ work[i__] = 0.f;
+ work[i__ + *n] = 0.f;
+ work[i__ + (*n << 1)] = 0.f;
+ work[i__ + *n * 3] = 0.f;
+ work[i__ + (*n << 2)] = 0.f;
+ work[i__ + *n * 5] = 0.f;
+/* L200: */
+ }
+
+/* Compute right side vector in resulting linear equations */
+
+ basl = r_lg10(&c_b35);
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ i__2 = *ihi;
+ for (j = *ilo; j <= i__2; ++j) {
+ tb = b[i__ + j * b_dim1];
+ ta = a[i__ + j * a_dim1];
+ if (ta == 0.f) {
+ goto L210;
+ }
+ r__1 = dabs(ta);
+ ta = r_lg10(&r__1) / basl;
+L210:
+ if (tb == 0.f) {
+ goto L220;
+ }
+ r__1 = dabs(tb);
+ tb = r_lg10(&r__1) / basl;
+L220:
+ work[i__ + (*n << 2)] = work[i__ + (*n << 2)] - ta - tb;
+ work[j + *n * 5] = work[j + *n * 5] - ta - tb;
+/* L230: */
+ }
+/* L240: */
+ }
+
+ coef = 1.f / (real) (nr << 1);
+ coef2 = coef * coef;
+ coef5 = coef2 * .5f;
+ nrp2 = nr + 2;
+ beta = 0.f;
+ it = 1;
+
+/* Start generalized conjugate gradient iteration */
+
+L250:
+
+ gamma = sdot_(&nr, &work[*ilo + (*n << 2)], &c__1, &work[*ilo + (*n << 2)]
+, &c__1) + sdot_(&nr, &work[*ilo + *n * 5], &c__1, &work[*ilo + *
+ n * 5], &c__1);
+
+ ew = 0.f;
+ ewc = 0.f;
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ ew += work[i__ + (*n << 2)];
+ ewc += work[i__ + *n * 5];
+/* L260: */
+ }
+
+/* Computing 2nd power */
+ r__1 = ew;
+/* Computing 2nd power */
+ r__2 = ewc;
+/* Computing 2nd power */
+ r__3 = ew - ewc;
+ gamma = coef * gamma - coef2 * (r__1 * r__1 + r__2 * r__2) - coef5 * (
+ r__3 * r__3);
+ if (gamma == 0.f) {
+ goto L350;
+ }
+ if (it != 1) {
+ beta = gamma / pgamma;
+ }
+ t = coef5 * (ewc - ew * 3.f);
+ tc = coef5 * (ew - ewc * 3.f);
+
+ sscal_(&nr, &beta, &work[*ilo], &c__1);
+ sscal_(&nr, &beta, &work[*ilo + *n], &c__1);
+
+ saxpy_(&nr, &coef, &work[*ilo + (*n << 2)], &c__1, &work[*ilo + *n], &
+ c__1);
+ saxpy_(&nr, &coef, &work[*ilo + *n * 5], &c__1, &work[*ilo], &c__1);
+
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ work[i__] += tc;
+ work[i__ + *n] += t;
+/* L270: */
+ }
+
+/* Apply matrix to vector */
+
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ kount = 0;
+ sum = 0.f;
+ i__2 = *ihi;
+ for (j = *ilo; j <= i__2; ++j) {
+ if (a[i__ + j * a_dim1] == 0.f) {
+ goto L280;
+ }
+ ++kount;
+ sum += work[j];
+L280:
+ if (b[i__ + j * b_dim1] == 0.f) {
+ goto L290;
+ }
+ ++kount;
+ sum += work[j];
+L290:
+ ;
+ }
+ work[i__ + (*n << 1)] = (real) kount * work[i__ + *n] + sum;
+/* L300: */
+ }
+
+ i__1 = *ihi;
+ for (j = *ilo; j <= i__1; ++j) {
+ kount = 0;
+ sum = 0.f;
+ i__2 = *ihi;
+ for (i__ = *ilo; i__ <= i__2; ++i__) {
+ if (a[i__ + j * a_dim1] == 0.f) {
+ goto L310;
+ }
+ ++kount;
+ sum += work[i__ + *n];
+L310:
+ if (b[i__ + j * b_dim1] == 0.f) {
+ goto L320;
+ }
+ ++kount;
+ sum += work[i__ + *n];
+L320:
+ ;
+ }
+ work[j + *n * 3] = (real) kount * work[j] + sum;
+/* L330: */
+ }
+
+ sum = sdot_(&nr, &work[*ilo + *n], &c__1, &work[*ilo + (*n << 1)], &c__1)
+ + sdot_(&nr, &work[*ilo], &c__1, &work[*ilo + *n * 3], &c__1);
+ alpha = gamma / sum;
+
+/* Determine correction to current iteration */
+
+ cmax = 0.f;
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ cor = alpha * work[i__ + *n];
+ if (dabs(cor) > cmax) {
+ cmax = dabs(cor);
+ }
+ lscale[i__] += cor;
+ cor = alpha * work[i__];
+ if (dabs(cor) > cmax) {
+ cmax = dabs(cor);
+ }
+ rscale[i__] += cor;
+/* L340: */
+ }
+ if (cmax < .5f) {
+ goto L350;
+ }
+
+ r__1 = -alpha;
+ saxpy_(&nr, &r__1, &work[*ilo + (*n << 1)], &c__1, &work[*ilo + (*n << 2)]
+, &c__1);
+ r__1 = -alpha;
+ saxpy_(&nr, &r__1, &work[*ilo + *n * 3], &c__1, &work[*ilo + *n * 5], &
+ c__1);
+
+ pgamma = gamma;
+ ++it;
+ if (it <= nrp2) {
+ goto L250;
+ }
+
+/* End generalized conjugate gradient iteration */
+
+L350:
+ sfmin = slamch_("S");
+ sfmax = 1.f / sfmin;
+ lsfmin = (integer) (r_lg10(&sfmin) / basl + 1.f);
+ lsfmax = (integer) (r_lg10(&sfmax) / basl);
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ i__2 = *n - *ilo + 1;
+ irab = isamax_(&i__2, &a[i__ + *ilo * a_dim1], lda);
+ rab = (r__1 = a[i__ + (irab + *ilo - 1) * a_dim1], dabs(r__1));
+ i__2 = *n - *ilo + 1;
+ irab = isamax_(&i__2, &b[i__ + *ilo * b_dim1], ldb);
+/* Computing MAX */
+ r__2 = rab, r__3 = (r__1 = b[i__ + (irab + *ilo - 1) * b_dim1], dabs(
+ r__1));
+ rab = dmax(r__2,r__3);
+ r__1 = rab + sfmin;
+ lrab = (integer) (r_lg10(&r__1) / basl + 1.f);
+ ir = lscale[i__] + r_sign(&c_b71, &lscale[i__]);
+/* Computing MIN */
+ i__2 = max(ir,lsfmin), i__2 = min(i__2,lsfmax), i__3 = lsfmax - lrab;
+ ir = min(i__2,i__3);
+ lscale[i__] = pow_ri(&c_b35, &ir);
+ icab = isamax_(ihi, &a[i__ * a_dim1 + 1], &c__1);
+ cab = (r__1 = a[icab + i__ * a_dim1], dabs(r__1));
+ icab = isamax_(ihi, &b[i__ * b_dim1 + 1], &c__1);
+/* Computing MAX */
+ r__2 = cab, r__3 = (r__1 = b[icab + i__ * b_dim1], dabs(r__1));
+ cab = dmax(r__2,r__3);
+ r__1 = cab + sfmin;
+ lcab = (integer) (r_lg10(&r__1) / basl + 1.f);
+ jc = rscale[i__] + r_sign(&c_b71, &rscale[i__]);
+/* Computing MIN */
+ i__2 = max(jc,lsfmin), i__2 = min(i__2,lsfmax), i__3 = lsfmax - lcab;
+ jc = min(i__2,i__3);
+ rscale[i__] = pow_ri(&c_b35, &jc);
+/* L360: */
+ }
+
+/* Row scaling of matrices A and B */
+
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ i__2 = *n - *ilo + 1;
+ sscal_(&i__2, &lscale[i__], &a[i__ + *ilo * a_dim1], lda);
+ i__2 = *n - *ilo + 1;
+ sscal_(&i__2, &lscale[i__], &b[i__ + *ilo * b_dim1], ldb);
+/* L370: */
+ }
+
+/* Column scaling of matrices A and B */
+
+ i__1 = *ihi;
+ for (j = *ilo; j <= i__1; ++j) {
+ sscal_(ihi, &rscale[j], &a[j * a_dim1 + 1], &c__1);
+ sscal_(ihi, &rscale[j], &b[j * b_dim1 + 1], &c__1);
+/* L380: */
+ }
+
+ return 0;
+
+/* End of SGGBAL */
+
+} /* sggbal_ */
diff --git a/contrib/libs/clapack/sgges.c b/contrib/libs/clapack/sgges.c
new file mode 100644
index 0000000000..925f15003e
--- /dev/null
+++ b/contrib/libs/clapack/sgges.c
@@ -0,0 +1,687 @@
+/* sgges.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+static real c_b38 = 0.f;
+static real c_b39 = 1.f;
+
+/* Subroutine */ int sgges_(char *jobvsl, char *jobvsr, char *sort, L_fp
+ selctg, integer *n, real *a, integer *lda, real *b, integer *ldb,
+ integer *sdim, real *alphar, real *alphai, real *beta, real *vsl,
+ integer *ldvsl, real *vsr, integer *ldvsr, real *work, integer *lwork,
+ logical *bwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vsl_dim1, vsl_offset,
+ vsr_dim1, vsr_offset, i__1, i__2;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, ip;
+ real dif[2];
+ integer ihi, ilo;
+ real eps, anrm, bnrm;
+ integer idum[1], ierr, itau, iwrk;
+ real pvsl, pvsr;
+ extern logical lsame_(char *, char *);
+ integer ileft, icols;
+ logical cursl, ilvsl, ilvsr;
+ integer irows;
+ logical lst2sl;
+ extern /* Subroutine */ int slabad_(real *, real *), sggbak_(char *, char
+ *, integer *, integer *, integer *, real *, real *, integer *,
+ real *, integer *, integer *), sggbal_(char *,
+ integer *, real *, integer *, real *, integer *, integer *,
+ integer *, real *, real *, real *, integer *);
+ logical ilascl, ilbscl;
+ extern doublereal slamch_(char *), slange_(char *, integer *,
+ integer *, real *, integer *, real *);
+ real safmin;
+ extern /* Subroutine */ int sgghrd_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, integer *, real *, integer *
+, real *, integer *, integer *);
+ real safmax;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ijobvl, iright;
+ extern /* Subroutine */ int sgeqrf_(integer *, integer *, real *, integer
+ *, real *, real *, integer *, integer *);
+ integer ijobvr;
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *), slaset_(char *, integer *,
+ integer *, real *, real *, real *, integer *);
+ real anrmto, bnrmto;
+ logical lastsl;
+ extern /* Subroutine */ int shgeqz_(char *, char *, char *, integer *,
+ integer *, integer *, real *, integer *, real *, integer *, real *
+, real *, real *, real *, integer *, real *, integer *, real *,
+ integer *, integer *), stgsen_(integer *,
+ logical *, logical *, logical *, integer *, real *, integer *,
+ real *, integer *, real *, real *, real *, real *, integer *,
+ real *, integer *, integer *, real *, real *, real *, real *,
+ integer *, integer *, integer *, integer *);
+ integer minwrk, maxwrk;
+ real smlnum;
+ extern /* Subroutine */ int sorgqr_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *);
+ logical wantst, lquery;
+ extern /* Subroutine */ int sormqr_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+/* .. Function Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGGES computes for a pair of N-by-N real nonsymmetric matrices (A,B), */
+/* the generalized eigenvalues, the generalized real Schur form (S,T), */
+/* optionally, the left and/or right matrices of Schur vectors (VSL and */
+/* VSR). This gives the generalized Schur factorization */
+
+/* (A,B) = ( (VSL)*S*(VSR)**T, (VSL)*T*(VSR)**T ) */
+
+/* Optionally, it also orders the eigenvalues so that a selected cluster */
+/* of eigenvalues appears in the leading diagonal blocks of the upper */
+/* quasi-triangular matrix S and the upper triangular matrix T.The */
+/* leading columns of VSL and VSR then form an orthonormal basis for the */
+/* corresponding left and right eigenspaces (deflating subspaces). */
+
+/* (If only the generalized eigenvalues are needed, use the driver */
+/* SGGEV instead, which is faster.) */
+
+/* A generalized eigenvalue for a pair of matrices (A,B) is a scalar w */
+/* or a ratio alpha/beta = w, such that A - w*B is singular. It is */
+/* usually represented as the pair (alpha,beta), as there is a */
+/* reasonable interpretation for beta=0 or both being zero. */
+
+/* A pair of matrices (S,T) is in generalized real Schur form if T is */
+/* upper triangular with non-negative diagonal and S is block upper */
+/* triangular with 1-by-1 and 2-by-2 blocks. 1-by-1 blocks correspond */
+/* to real generalized eigenvalues, while 2-by-2 blocks of S will be */
+/* "standardized" by making the corresponding elements of T have the */
+/* form: */
+/* [ a 0 ] */
+/* [ 0 b ] */
+
+/* and the pair of corresponding 2-by-2 blocks in S and T will have a */
+/* complex conjugate pair of generalized eigenvalues. */
+
+
+/* Arguments */
+/* ========= */
+
+/* JOBVSL (input) CHARACTER*1 */
+/* = 'N': do not compute the left Schur vectors; */
+/* = 'V': compute the left Schur vectors. */
+
+/* JOBVSR (input) CHARACTER*1 */
+/* = 'N': do not compute the right Schur vectors; */
+/* = 'V': compute the right Schur vectors. */
+
+/* SORT (input) CHARACTER*1 */
+/* Specifies whether or not to order the eigenvalues on the */
+/* diagonal of the generalized Schur form. */
+/* = 'N': Eigenvalues are not ordered; */
+/* = 'S': Eigenvalues are ordered (see SELCTG); */
+
+/* SELCTG (external procedure) LOGICAL FUNCTION of three REAL arguments */
+/* SELCTG must be declared EXTERNAL in the calling subroutine. */
+/* If SORT = 'N', SELCTG is not referenced. */
+/* If SORT = 'S', SELCTG is used to select eigenvalues to sort */
+/* to the top left of the Schur form. */
+/* An eigenvalue (ALPHAR(j)+ALPHAI(j))/BETA(j) is selected if */
+/* SELCTG(ALPHAR(j),ALPHAI(j),BETA(j)) is true; i.e. if either */
+/* one of a complex conjugate pair of eigenvalues is selected, */
+/* then both complex eigenvalues are selected. */
+
+/* Note that in the ill-conditioned case, a selected complex */
+/* eigenvalue may no longer satisfy SELCTG(ALPHAR(j),ALPHAI(j), */
+/* BETA(j)) = .TRUE. after ordering. INFO is to be set to N+2 */
+/* in this case. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A, B, VSL, and VSR. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA, N) */
+/* On entry, the first of the pair of matrices. */
+/* On exit, A has been overwritten by its generalized Schur */
+/* form S. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. LDA >= max(1,N). */
+
+/* B (input/output) REAL array, dimension (LDB, N) */
+/* On entry, the second of the pair of matrices. */
+/* On exit, B has been overwritten by its generalized Schur */
+/* form T. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB >= max(1,N). */
+
+/* SDIM (output) INTEGER */
+/* If SORT = 'N', SDIM = 0. */
+/* If SORT = 'S', SDIM = number of eigenvalues (after sorting) */
+/* for which SELCTG is true. (Complex conjugate pairs for which */
+/* SELCTG is true for either eigenvalue count as 2.) */
+
+/* ALPHAR (output) REAL array, dimension (N) */
+/* ALPHAI (output) REAL array, dimension (N) */
+/* BETA (output) REAL array, dimension (N) */
+/* On exit, (ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N, will */
+/* be the generalized eigenvalues. ALPHAR(j) + ALPHAI(j)*i, */
+/* and BETA(j),j=1,...,N are the diagonals of the complex Schur */
+/* form (S,T) that would result if the 2-by-2 diagonal blocks of */
+/* the real Schur form of (A,B) were further reduced to */
+/* triangular form using 2-by-2 complex unitary transformations. */
+/* If ALPHAI(j) is zero, then the j-th eigenvalue is real; if */
+/* positive, then the j-th and (j+1)-st eigenvalues are a */
+/* complex conjugate pair, with ALPHAI(j+1) negative. */
+
+/* Note: the quotients ALPHAR(j)/BETA(j) and ALPHAI(j)/BETA(j) */
+/* may easily over- or underflow, and BETA(j) may even be zero. */
+/* Thus, the user should avoid naively computing the ratio. */
+/* However, ALPHAR and ALPHAI will be always less than and */
+/* usually comparable with norm(A) in magnitude, and BETA always */
+/* less than and usually comparable with norm(B). */
+
+/* VSL (output) REAL array, dimension (LDVSL,N) */
+/* If JOBVSL = 'V', VSL will contain the left Schur vectors. */
+/* Not referenced if JOBVSL = 'N'. */
+
+/* LDVSL (input) INTEGER */
+/* The leading dimension of the matrix VSL. LDVSL >=1, and */
+/* if JOBVSL = 'V', LDVSL >= N. */
+
+/* VSR (output) REAL array, dimension (LDVSR,N) */
+/* If JOBVSR = 'V', VSR will contain the right Schur vectors. */
+/* Not referenced if JOBVSR = 'N'. */
+
+/* LDVSR (input) INTEGER */
+/* The leading dimension of the matrix VSR. LDVSR >= 1, and */
+/* if JOBVSR = 'V', LDVSR >= N. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If N = 0, LWORK >= 1, else LWORK >= max(8*N,6*N+16). */
+/* For good performance , LWORK must generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* BWORK (workspace) LOGICAL array, dimension (N) */
+/* Not referenced if SORT = 'N'. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1,...,N: */
+/* The QZ iteration failed. (A,B) are not in Schur */
+/* form, but ALPHAR(j), ALPHAI(j), and BETA(j) should */
+/* be correct for j=INFO+1,...,N. */
+/* > N: =N+1: other than QZ iteration failed in SHGEQZ. */
+/* =N+2: after reordering, roundoff changed values of */
+/* some complex eigenvalues so that leading */
+/* eigenvalues in the Generalized Schur form no */
+/* longer satisfy SELCTG=.TRUE. This could also */
+/* be caused due to scaling. */
+/* =N+3: reordering failed in STGSEN. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alphar;
+ --alphai;
+ --beta;
+ vsl_dim1 = *ldvsl;
+ vsl_offset = 1 + vsl_dim1;
+ vsl -= vsl_offset;
+ vsr_dim1 = *ldvsr;
+ vsr_offset = 1 + vsr_dim1;
+ vsr -= vsr_offset;
+ --work;
+ --bwork;
+
+ /* Function Body */
+ if (lsame_(jobvsl, "N")) {
+ ijobvl = 1;
+ ilvsl = FALSE_;
+ } else if (lsame_(jobvsl, "V")) {
+ ijobvl = 2;
+ ilvsl = TRUE_;
+ } else {
+ ijobvl = -1;
+ ilvsl = FALSE_;
+ }
+
+ if (lsame_(jobvsr, "N")) {
+ ijobvr = 1;
+ ilvsr = FALSE_;
+ } else if (lsame_(jobvsr, "V")) {
+ ijobvr = 2;
+ ilvsr = TRUE_;
+ } else {
+ ijobvr = -1;
+ ilvsr = FALSE_;
+ }
+
+ wantst = lsame_(sort, "S");
+
+/* Test the input arguments */
+
+ *info = 0;
+ lquery = *lwork == -1;
+ if (ijobvl <= 0) {
+ *info = -1;
+ } else if (ijobvr <= 0) {
+ *info = -2;
+ } else if (! wantst && ! lsame_(sort, "N")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldvsl < 1 || ilvsl && *ldvsl < *n) {
+ *info = -15;
+ } else if (*ldvsr < 1 || ilvsr && *ldvsr < *n) {
+ *info = -17;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV.) */
+
+ if (*info == 0) {
+ if (*n > 0) {
+/* Computing MAX */
+ i__1 = *n << 3, i__2 = *n * 6 + 16;
+ minwrk = max(i__1,i__2);
+ maxwrk = minwrk - *n + *n * ilaenv_(&c__1, "SGEQRF", " ", n, &
+ c__1, n, &c__0);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = minwrk - *n + *n * ilaenv_(&c__1, "SORMQR",
+ " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ if (ilvsl) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = minwrk - *n + *n * ilaenv_(&c__1, "SOR"
+ "GQR", " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ }
+ } else {
+ minwrk = 1;
+ maxwrk = 1;
+ }
+ work[1] = (real) maxwrk;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -19;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGGES ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *sdim = 0;
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ safmin = slamch_("S");
+ safmax = 1.f / safmin;
+ slabad_(&safmin, &safmax);
+ smlnum = sqrt(safmin) / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = slange_("M", n, n, &a[a_offset], lda, &work[1]);
+ ilascl = FALSE_;
+ if (anrm > 0.f && anrm < smlnum) {
+ anrmto = smlnum;
+ ilascl = TRUE_;
+ } else if (anrm > bignum) {
+ anrmto = bignum;
+ ilascl = TRUE_;
+ }
+ if (ilascl) {
+ slascl_("G", &c__0, &c__0, &anrm, &anrmto, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Scale B if max element outside range [SMLNUM,BIGNUM] */
+
+ bnrm = slange_("M", n, n, &b[b_offset], ldb, &work[1]);
+ ilbscl = FALSE_;
+ if (bnrm > 0.f && bnrm < smlnum) {
+ bnrmto = smlnum;
+ ilbscl = TRUE_;
+ } else if (bnrm > bignum) {
+ bnrmto = bignum;
+ ilbscl = TRUE_;
+ }
+ if (ilbscl) {
+ slascl_("G", &c__0, &c__0, &bnrm, &bnrmto, n, n, &b[b_offset], ldb, &
+ ierr);
+ }
+
+/* Permute the matrix to make it more nearly triangular */
+/* (Workspace: need 6*N + 2*N space for storing balancing factors) */
+
+ ileft = 1;
+ iright = *n + 1;
+ iwrk = iright + *n;
+ sggbal_("P", n, &a[a_offset], lda, &b[b_offset], ldb, &ilo, &ihi, &work[
+ ileft], &work[iright], &work[iwrk], &ierr);
+
+/* Reduce B to triangular form (QR decomposition of B) */
+/* (Workspace: need N, prefer N*NB) */
+
+ irows = ihi + 1 - ilo;
+ icols = *n + 1 - ilo;
+ itau = iwrk;
+ iwrk = itau + irows;
+ i__1 = *lwork + 1 - iwrk;
+ sgeqrf_(&irows, &icols, &b[ilo + ilo * b_dim1], ldb, &work[itau], &work[
+ iwrk], &i__1, &ierr);
+
+/* Apply the orthogonal transformation to matrix A */
+/* (Workspace: need N, prefer N*NB) */
+
+ i__1 = *lwork + 1 - iwrk;
+ sormqr_("L", "T", &irows, &icols, &irows, &b[ilo + ilo * b_dim1], ldb, &
+ work[itau], &a[ilo + ilo * a_dim1], lda, &work[iwrk], &i__1, &
+ ierr);
+
+/* Initialize VSL */
+/* (Workspace: need N, prefer N*NB) */
+
+ if (ilvsl) {
+ slaset_("Full", n, n, &c_b38, &c_b39, &vsl[vsl_offset], ldvsl);
+ if (irows > 1) {
+ i__1 = irows - 1;
+ i__2 = irows - 1;
+ slacpy_("L", &i__1, &i__2, &b[ilo + 1 + ilo * b_dim1], ldb, &vsl[
+ ilo + 1 + ilo * vsl_dim1], ldvsl);
+ }
+ i__1 = *lwork + 1 - iwrk;
+ sorgqr_(&irows, &irows, &irows, &vsl[ilo + ilo * vsl_dim1], ldvsl, &
+ work[itau], &work[iwrk], &i__1, &ierr);
+ }
+
+/* Initialize VSR */
+
+ if (ilvsr) {
+ slaset_("Full", n, n, &c_b38, &c_b39, &vsr[vsr_offset], ldvsr);
+ }
+
+/* Reduce to generalized Hessenberg form */
+/* (Workspace: none needed) */
+
+ sgghrd_(jobvsl, jobvsr, n, &ilo, &ihi, &a[a_offset], lda, &b[b_offset],
+ ldb, &vsl[vsl_offset], ldvsl, &vsr[vsr_offset], ldvsr, &ierr);
+
+/* Perform QZ algorithm, computing Schur vectors if desired */
+/* (Workspace: need N) */
+
+ iwrk = itau;
+ i__1 = *lwork + 1 - iwrk;
+ shgeqz_("S", jobvsl, jobvsr, n, &ilo, &ihi, &a[a_offset], lda, &b[
+ b_offset], ldb, &alphar[1], &alphai[1], &beta[1], &vsl[vsl_offset]
+, ldvsl, &vsr[vsr_offset], ldvsr, &work[iwrk], &i__1, &ierr);
+ if (ierr != 0) {
+ if (ierr > 0 && ierr <= *n) {
+ *info = ierr;
+ } else if (ierr > *n && ierr <= *n << 1) {
+ *info = ierr - *n;
+ } else {
+ *info = *n + 1;
+ }
+ goto L40;
+ }
+
+/* Sort eigenvalues ALPHA/BETA if desired */
+/* (Workspace: need 4*N+16 ) */
+
+ *sdim = 0;
+ if (wantst) {
+
+/* Undo scaling on eigenvalues before SELCTGing */
+
+ if (ilascl) {
+ slascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alphar[1],
+ n, &ierr);
+ slascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alphai[1],
+ n, &ierr);
+ }
+ if (ilbscl) {
+ slascl_("G", &c__0, &c__0, &bnrmto, &bnrm, n, &c__1, &beta[1], n,
+ &ierr);
+ }
+
+/* Select eigenvalues */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ bwork[i__] = (*selctg)(&alphar[i__], &alphai[i__], &beta[i__]);
+/* L10: */
+ }
+
+ i__1 = *lwork - iwrk + 1;
+ stgsen_(&c__0, &ilvsl, &ilvsr, &bwork[1], n, &a[a_offset], lda, &b[
+ b_offset], ldb, &alphar[1], &alphai[1], &beta[1], &vsl[
+ vsl_offset], ldvsl, &vsr[vsr_offset], ldvsr, sdim, &pvsl, &
+ pvsr, dif, &work[iwrk], &i__1, idum, &c__1, &ierr);
+ if (ierr == 1) {
+ *info = *n + 3;
+ }
+
+ }
+
+/* Apply back-permutation to VSL and VSR */
+/* (Workspace: none needed) */
+
+ if (ilvsl) {
+ sggbak_("P", "L", n, &ilo, &ihi, &work[ileft], &work[iright], n, &vsl[
+ vsl_offset], ldvsl, &ierr);
+ }
+
+ if (ilvsr) {
+ sggbak_("P", "R", n, &ilo, &ihi, &work[ileft], &work[iright], n, &vsr[
+ vsr_offset], ldvsr, &ierr);
+ }
+
+/* Check if unscaling would cause over/underflow, if so, rescale */
+/* (ALPHAR(I),ALPHAI(I),BETA(I)) so BETA(I) is on the order of */
+/* B(I,I) and ALPHAR(I) and ALPHAI(I) are on the order of A(I,I) */
+
+ if (ilascl) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (alphai[i__] != 0.f) {
+ if (alphar[i__] / safmax > anrmto / anrm || safmin / alphar[
+ i__] > anrm / anrmto) {
+ work[1] = (r__1 = a[i__ + i__ * a_dim1] / alphar[i__],
+ dabs(r__1));
+ beta[i__] *= work[1];
+ alphar[i__] *= work[1];
+ alphai[i__] *= work[1];
+ } else if (alphai[i__] / safmax > anrmto / anrm || safmin /
+ alphai[i__] > anrm / anrmto) {
+ work[1] = (r__1 = a[i__ + (i__ + 1) * a_dim1] / alphai[
+ i__], dabs(r__1));
+ beta[i__] *= work[1];
+ alphar[i__] *= work[1];
+ alphai[i__] *= work[1];
+ }
+ }
+/* L50: */
+ }
+ }
+
+ if (ilbscl) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (alphai[i__] != 0.f) {
+ if (beta[i__] / safmax > bnrmto / bnrm || safmin / beta[i__]
+ > bnrm / bnrmto) {
+ work[1] = (r__1 = b[i__ + i__ * b_dim1] / beta[i__], dabs(
+ r__1));
+ beta[i__] *= work[1];
+ alphar[i__] *= work[1];
+ alphai[i__] *= work[1];
+ }
+ }
+/* L60: */
+ }
+ }
+
+/* Undo scaling */
+
+ if (ilascl) {
+ slascl_("H", &c__0, &c__0, &anrmto, &anrm, n, n, &a[a_offset], lda, &
+ ierr);
+ slascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alphar[1], n, &
+ ierr);
+ slascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alphai[1], n, &
+ ierr);
+ }
+
+ if (ilbscl) {
+ slascl_("U", &c__0, &c__0, &bnrmto, &bnrm, n, n, &b[b_offset], ldb, &
+ ierr);
+ slascl_("G", &c__0, &c__0, &bnrmto, &bnrm, n, &c__1, &beta[1], n, &
+ ierr);
+ }
+
+ if (wantst) {
+
+/* Check if reordering is correct */
+
+ lastsl = TRUE_;
+ lst2sl = TRUE_;
+ *sdim = 0;
+ ip = 0;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ cursl = (*selctg)(&alphar[i__], &alphai[i__], &beta[i__]);
+ if (alphai[i__] == 0.f) {
+ if (cursl) {
+ ++(*sdim);
+ }
+ ip = 0;
+ if (cursl && ! lastsl) {
+ *info = *n + 2;
+ }
+ } else {
+ if (ip == 1) {
+
+/* Last eigenvalue of conjugate pair */
+
+ cursl = cursl || lastsl;
+ lastsl = cursl;
+ if (cursl) {
+ *sdim += 2;
+ }
+ ip = -1;
+ if (cursl && ! lst2sl) {
+ *info = *n + 2;
+ }
+ } else {
+
+/* First eigenvalue of conjugate pair */
+
+ ip = 1;
+ }
+ }
+ lst2sl = lastsl;
+ lastsl = cursl;
+/* L30: */
+ }
+
+ }
+
+L40:
+
+ work[1] = (real) maxwrk;
+
+ return 0;
+
+/* End of SGGES */
+
+} /* sgges_ */
diff --git a/contrib/libs/clapack/sggesx.c b/contrib/libs/clapack/sggesx.c
new file mode 100644
index 0000000000..9128e03431
--- /dev/null
+++ b/contrib/libs/clapack/sggesx.c
@@ -0,0 +1,811 @@
+/* sggesx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+static real c_b42 = 0.f;
+static real c_b43 = 1.f;
+
+/* Subroutine */ int sggesx_(char *jobvsl, char *jobvsr, char *sort, L_fp
+ selctg, char *sense, integer *n, real *a, integer *lda, real *b,
+ integer *ldb, integer *sdim, real *alphar, real *alphai, real *beta,
+ real *vsl, integer *ldvsl, real *vsr, integer *ldvsr, real *rconde,
+ real *rcondv, real *work, integer *lwork, integer *iwork, integer *
+ liwork, logical *bwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vsl_dim1, vsl_offset,
+ vsr_dim1, vsr_offset, i__1, i__2;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, ip;
+ real pl, pr, dif[2];
+ integer ihi, ilo;
+ real eps;
+ integer ijob;
+ real anrm, bnrm;
+ integer ierr, itau, iwrk, lwrk;
+ extern logical lsame_(char *, char *);
+ integer ileft, icols;
+ logical cursl, ilvsl, ilvsr;
+ integer irows;
+ logical lst2sl;
+ extern /* Subroutine */ int slabad_(real *, real *), sggbak_(char *, char
+ *, integer *, integer *, integer *, real *, real *, integer *,
+ real *, integer *, integer *), sggbal_(char *,
+ integer *, real *, integer *, real *, integer *, integer *,
+ integer *, real *, real *, real *, integer *);
+ logical ilascl, ilbscl;
+ extern doublereal slamch_(char *), slange_(char *, integer *,
+ integer *, real *, integer *, real *);
+ real safmin;
+ extern /* Subroutine */ int sgghrd_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, integer *, real *, integer *
+, real *, integer *, integer *);
+ real safmax;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ijobvl, iright;
+ extern /* Subroutine */ int sgeqrf_(integer *, integer *, real *, integer
+ *, real *, real *, integer *, integer *);
+ integer ijobvr;
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *);
+ logical wantsb, wantse, lastsl;
+ integer liwmin;
+ real anrmto, bnrmto;
+ integer minwrk, maxwrk;
+ logical wantsn;
+ real smlnum;
+ extern /* Subroutine */ int shgeqz_(char *, char *, char *, integer *,
+ integer *, integer *, real *, integer *, real *, integer *, real *
+, real *, real *, real *, integer *, real *, integer *, real *,
+ integer *, integer *), slaset_(char *,
+ integer *, integer *, real *, real *, real *, integer *),
+ sorgqr_(integer *, integer *, integer *, real *, integer *, real *
+, real *, integer *, integer *), stgsen_(integer *, logical *,
+ logical *, logical *, integer *, real *, integer *, real *,
+ integer *, real *, real *, real *, real *, integer *, real *,
+ integer *, integer *, real *, real *, real *, real *, integer *,
+ integer *, integer *, integer *);
+ logical wantst, lquery, wantsv;
+ extern /* Subroutine */ int sormqr_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+/* .. Function Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGGESX computes for a pair of N-by-N real nonsymmetric matrices */
+/* (A,B), the generalized eigenvalues, the real Schur form (S,T), and, */
+/* optionally, the left and/or right matrices of Schur vectors (VSL and */
+/* VSR). This gives the generalized Schur factorization */
+
+/* (A,B) = ( (VSL) S (VSR)**T, (VSL) T (VSR)**T ) */
+
+/* Optionally, it also orders the eigenvalues so that a selected cluster */
+/* of eigenvalues appears in the leading diagonal blocks of the upper */
+/* quasi-triangular matrix S and the upper triangular matrix T; computes */
+/* a reciprocal condition number for the average of the selected */
+/* eigenvalues (RCONDE); and computes a reciprocal condition number for */
+/* the right and left deflating subspaces corresponding to the selected */
+/* eigenvalues (RCONDV). The leading columns of VSL and VSR then form */
+/* an orthonormal basis for the corresponding left and right eigenspaces */
+/* (deflating subspaces). */
+
+/* A generalized eigenvalue for a pair of matrices (A,B) is a scalar w */
+/* or a ratio alpha/beta = w, such that A - w*B is singular. It is */
+/* usually represented as the pair (alpha,beta), as there is a */
+/* reasonable interpretation for beta=0 or for both being zero. */
+
+/* A pair of matrices (S,T) is in generalized real Schur form if T is */
+/* upper triangular with non-negative diagonal and S is block upper */
+/* triangular with 1-by-1 and 2-by-2 blocks. 1-by-1 blocks correspond */
+/* to real generalized eigenvalues, while 2-by-2 blocks of S will be */
+/* "standardized" by making the corresponding elements of T have the */
+/* form: */
+/* [ a 0 ] */
+/* [ 0 b ] */
+
+/* and the pair of corresponding 2-by-2 blocks in S and T will have a */
+/* complex conjugate pair of generalized eigenvalues. */
+
+
+/* Arguments */
+/* ========= */
+
+/* JOBVSL (input) CHARACTER*1 */
+/* = 'N': do not compute the left Schur vectors; */
+/* = 'V': compute the left Schur vectors. */
+
+/* JOBVSR (input) CHARACTER*1 */
+/* = 'N': do not compute the right Schur vectors; */
+/* = 'V': compute the right Schur vectors. */
+
+/* SORT (input) CHARACTER*1 */
+/* Specifies whether or not to order the eigenvalues on the */
+/* diagonal of the generalized Schur form. */
+/* = 'N': Eigenvalues are not ordered; */
+/* = 'S': Eigenvalues are ordered (see SELCTG). */
+
+/* SELCTG (external procedure) LOGICAL FUNCTION of three REAL arguments */
+/* SELCTG must be declared EXTERNAL in the calling subroutine. */
+/* If SORT = 'N', SELCTG is not referenced. */
+/* If SORT = 'S', SELCTG is used to select eigenvalues to sort */
+/* to the top left of the Schur form. */
+/* An eigenvalue (ALPHAR(j)+ALPHAI(j))/BETA(j) is selected if */
+/* SELCTG(ALPHAR(j),ALPHAI(j),BETA(j)) is true; i.e. if either */
+/* one of a complex conjugate pair of eigenvalues is selected, */
+/* then both complex eigenvalues are selected. */
+/* Note that a selected complex eigenvalue may no longer satisfy */
+/* SELCTG(ALPHAR(j),ALPHAI(j),BETA(j)) = .TRUE. after ordering, */
+/* since ordering may change the value of complex eigenvalues */
+/* (especially if the eigenvalue is ill-conditioned), in this */
+/* case INFO is set to N+3. */
+
+/* SENSE (input) CHARACTER*1 */
+/* Determines which reciprocal condition numbers are computed. */
+/* = 'N' : None are computed; */
+/* = 'E' : Computed for average of selected eigenvalues only; */
+/* = 'V' : Computed for selected deflating subspaces only; */
+/* = 'B' : Computed for both. */
+/* If SENSE = 'E', 'V', or 'B', SORT must equal 'S'. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A, B, VSL, and VSR. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA, N) */
+/* On entry, the first of the pair of matrices. */
+/* On exit, A has been overwritten by its generalized Schur */
+/* form S. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. LDA >= max(1,N). */
+
+/* B (input/output) REAL array, dimension (LDB, N) */
+/* On entry, the second of the pair of matrices. */
+/* On exit, B has been overwritten by its generalized Schur */
+/* form T. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB >= max(1,N). */
+
+/* SDIM (output) INTEGER */
+/* If SORT = 'N', SDIM = 0. */
+/* If SORT = 'S', SDIM = number of eigenvalues (after sorting) */
+/* for which SELCTG is true. (Complex conjugate pairs for which */
+/* SELCTG is true for either eigenvalue count as 2.) */
+
+/* ALPHAR (output) REAL array, dimension (N) */
+/* ALPHAI (output) REAL array, dimension (N) */
+/* BETA (output) REAL array, dimension (N) */
+/* On exit, (ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N, will */
+/* be the generalized eigenvalues. ALPHAR(j) + ALPHAI(j)*i */
+/* and BETA(j),j=1,...,N are the diagonals of the complex Schur */
+/* form (S,T) that would result if the 2-by-2 diagonal blocks of */
+/* the real Schur form of (A,B) were further reduced to */
+/* triangular form using 2-by-2 complex unitary transformations. */
+/* If ALPHAI(j) is zero, then the j-th eigenvalue is real; if */
+/* positive, then the j-th and (j+1)-st eigenvalues are a */
+/* complex conjugate pair, with ALPHAI(j+1) negative. */
+
+/* Note: the quotients ALPHAR(j)/BETA(j) and ALPHAI(j)/BETA(j) */
+/* may easily over- or underflow, and BETA(j) may even be zero. */
+/* Thus, the user should avoid naively computing the ratio. */
+/* However, ALPHAR and ALPHAI will be always less than and */
+/* usually comparable with norm(A) in magnitude, and BETA always */
+/* less than and usually comparable with norm(B). */
+
+/* VSL (output) REAL array, dimension (LDVSL,N) */
+/* If JOBVSL = 'V', VSL will contain the left Schur vectors. */
+/* Not referenced if JOBVSL = 'N'. */
+
+/* LDVSL (input) INTEGER */
+/* The leading dimension of the matrix VSL. LDVSL >=1, and */
+/* if JOBVSL = 'V', LDVSL >= N. */
+
+/* VSR (output) REAL array, dimension (LDVSR,N) */
+/* If JOBVSR = 'V', VSR will contain the right Schur vectors. */
+/* Not referenced if JOBVSR = 'N'. */
+
+/* LDVSR (input) INTEGER */
+/* The leading dimension of the matrix VSR. LDVSR >= 1, and */
+/* if JOBVSR = 'V', LDVSR >= N. */
+
+/* RCONDE (output) REAL array, dimension ( 2 ) */
+/* If SENSE = 'E' or 'B', RCONDE(1) and RCONDE(2) contain the */
+/* reciprocal condition numbers for the average of the selected */
+/* eigenvalues. */
+/* Not referenced if SENSE = 'N' or 'V'. */
+
+/* RCONDV (output) REAL array, dimension ( 2 ) */
+/* If SENSE = 'V' or 'B', RCONDV(1) and RCONDV(2) contain the */
+/* reciprocal condition numbers for the selected deflating */
+/* subspaces. */
+/* Not referenced if SENSE = 'N' or 'E'. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If N = 0, LWORK >= 1, else if SENSE = 'E', 'V', or 'B', */
+/* LWORK >= max( 8*N, 6*N+16, 2*SDIM*(N-SDIM) ), else */
+/* LWORK >= max( 8*N, 6*N+16 ). */
+/* Note that 2*SDIM*(N-SDIM) <= N*N/2. */
+/* Note also that an error is only returned if */
+/* LWORK < max( 8*N, 6*N+16), but if SENSE = 'E' or 'V' or 'B' */
+/* this may not be large enough. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the bound on the optimal size of the WORK */
+/* array and the minimum size of the IWORK array, returns these */
+/* values as the first entries of the WORK and IWORK arrays, and */
+/* no error message related to LWORK or LIWORK is issued by */
+/* XERBLA. */
+
+/* IWORK (workspace) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the minimum LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* If SENSE = 'N' or N = 0, LIWORK >= 1, otherwise */
+/* LIWORK >= N+6. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the bound on the optimal size of the */
+/* WORK array and the minimum size of the IWORK array, returns */
+/* these values as the first entries of the WORK and IWORK */
+/* arrays, and no error message related to LWORK or LIWORK is */
+/* issued by XERBLA. */
+
+/* BWORK (workspace) LOGICAL array, dimension (N) */
+/* Not referenced if SORT = 'N'. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1,...,N: */
+/* The QZ iteration failed. (A,B) are not in Schur */
+/* form, but ALPHAR(j), ALPHAI(j), and BETA(j) should */
+/* be correct for j=INFO+1,...,N. */
+/* > N: =N+1: other than QZ iteration failed in SHGEQZ */
+/* =N+2: after reordering, roundoff changed values of */
+/* some complex eigenvalues so that leading */
+/* eigenvalues in the Generalized Schur form no */
+/* longer satisfy SELCTG=.TRUE. This could also */
+/* be caused due to scaling. */
+/* =N+3: reordering failed in STGSEN. */
+
+/* Further details */
+/* =============== */
+
+/* An approximate (asymptotic) bound on the average absolute error of */
+/* the selected eigenvalues is */
+
+/* EPS * norm((A, B)) / RCONDE( 1 ). */
+
+/* An approximate (asymptotic) bound on the maximum angular error in */
+/* the computed deflating subspaces is */
+
+/* EPS * norm((A, B)) / RCONDV( 2 ). */
+
+/* See LAPACK User's Guide, section 4.11 for more information. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alphar;
+ --alphai;
+ --beta;
+ vsl_dim1 = *ldvsl;
+ vsl_offset = 1 + vsl_dim1;
+ vsl -= vsl_offset;
+ vsr_dim1 = *ldvsr;
+ vsr_offset = 1 + vsr_dim1;
+ vsr -= vsr_offset;
+ --rconde;
+ --rcondv;
+ --work;
+ --iwork;
+ --bwork;
+
+ /* Function Body */
+ if (lsame_(jobvsl, "N")) {
+ ijobvl = 1;
+ ilvsl = FALSE_;
+ } else if (lsame_(jobvsl, "V")) {
+ ijobvl = 2;
+ ilvsl = TRUE_;
+ } else {
+ ijobvl = -1;
+ ilvsl = FALSE_;
+ }
+
+ if (lsame_(jobvsr, "N")) {
+ ijobvr = 1;
+ ilvsr = FALSE_;
+ } else if (lsame_(jobvsr, "V")) {
+ ijobvr = 2;
+ ilvsr = TRUE_;
+ } else {
+ ijobvr = -1;
+ ilvsr = FALSE_;
+ }
+
+ wantst = lsame_(sort, "S");
+ wantsn = lsame_(sense, "N");
+ wantse = lsame_(sense, "E");
+ wantsv = lsame_(sense, "V");
+ wantsb = lsame_(sense, "B");
+ lquery = *lwork == -1 || *liwork == -1;
+ if (wantsn) {
+ ijob = 0;
+ } else if (wantse) {
+ ijob = 1;
+ } else if (wantsv) {
+ ijob = 2;
+ } else if (wantsb) {
+ ijob = 4;
+ }
+
+/* Test the input arguments */
+
+ *info = 0;
+ if (ijobvl <= 0) {
+ *info = -1;
+ } else if (ijobvr <= 0) {
+ *info = -2;
+ } else if (! wantst && ! lsame_(sort, "N")) {
+ *info = -3;
+ } else if (! (wantsn || wantse || wantsv || wantsb) || ! wantst && !
+ wantsn) {
+ *info = -5;
+ } else if (*n < 0) {
+ *info = -6;
+ } else if (*lda < max(1,*n)) {
+ *info = -8;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldvsl < 1 || ilvsl && *ldvsl < *n) {
+ *info = -16;
+ } else if (*ldvsr < 1 || ilvsr && *ldvsr < *n) {
+ *info = -18;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV.) */
+
+ if (*info == 0) {
+ if (*n > 0) {
+/* Computing MAX */
+ i__1 = *n << 3, i__2 = *n * 6 + 16;
+ minwrk = max(i__1,i__2);
+ maxwrk = minwrk - *n + *n * ilaenv_(&c__1, "SGEQRF", " ", n, &
+ c__1, n, &c__0);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = minwrk - *n + *n * ilaenv_(&c__1, "SORMQR",
+ " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ if (ilvsl) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = minwrk - *n + *n * ilaenv_(&c__1, "SOR"
+ "GQR", " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ }
+ lwrk = maxwrk;
+ if (ijob >= 1) {
+/* Computing MAX */
+ i__1 = lwrk, i__2 = *n * *n / 2;
+ lwrk = max(i__1,i__2);
+ }
+ } else {
+ minwrk = 1;
+ maxwrk = 1;
+ lwrk = 1;
+ }
+ work[1] = (real) lwrk;
+ if (wantsn || *n == 0) {
+ liwmin = 1;
+ } else {
+ liwmin = *n + 6;
+ }
+ iwork[1] = liwmin;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -22;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -24;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGGESX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *sdim = 0;
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ safmin = slamch_("S");
+ safmax = 1.f / safmin;
+ slabad_(&safmin, &safmax);
+ smlnum = sqrt(safmin) / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = slange_("M", n, n, &a[a_offset], lda, &work[1]);
+ ilascl = FALSE_;
+ if (anrm > 0.f && anrm < smlnum) {
+ anrmto = smlnum;
+ ilascl = TRUE_;
+ } else if (anrm > bignum) {
+ anrmto = bignum;
+ ilascl = TRUE_;
+ }
+ if (ilascl) {
+ slascl_("G", &c__0, &c__0, &anrm, &anrmto, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Scale B if max element outside range [SMLNUM,BIGNUM] */
+
+ bnrm = slange_("M", n, n, &b[b_offset], ldb, &work[1]);
+ ilbscl = FALSE_;
+ if (bnrm > 0.f && bnrm < smlnum) {
+ bnrmto = smlnum;
+ ilbscl = TRUE_;
+ } else if (bnrm > bignum) {
+ bnrmto = bignum;
+ ilbscl = TRUE_;
+ }
+ if (ilbscl) {
+ slascl_("G", &c__0, &c__0, &bnrm, &bnrmto, n, n, &b[b_offset], ldb, &
+ ierr);
+ }
+
+/* Permute the matrix to make it more nearly triangular */
+/* (Workspace: need 6*N + 2*N for permutation parameters) */
+
+ ileft = 1;
+ iright = *n + 1;
+ iwrk = iright + *n;
+ sggbal_("P", n, &a[a_offset], lda, &b[b_offset], ldb, &ilo, &ihi, &work[
+ ileft], &work[iright], &work[iwrk], &ierr);
+
+/* Reduce B to triangular form (QR decomposition of B) */
+/* (Workspace: need N, prefer N*NB) */
+
+ irows = ihi + 1 - ilo;
+ icols = *n + 1 - ilo;
+ itau = iwrk;
+ iwrk = itau + irows;
+ i__1 = *lwork + 1 - iwrk;
+ sgeqrf_(&irows, &icols, &b[ilo + ilo * b_dim1], ldb, &work[itau], &work[
+ iwrk], &i__1, &ierr);
+
+/* Apply the orthogonal transformation to matrix A */
+/* (Workspace: need N, prefer N*NB) */
+
+ i__1 = *lwork + 1 - iwrk;
+ sormqr_("L", "T", &irows, &icols, &irows, &b[ilo + ilo * b_dim1], ldb, &
+ work[itau], &a[ilo + ilo * a_dim1], lda, &work[iwrk], &i__1, &
+ ierr);
+
+/* Initialize VSL */
+/* (Workspace: need N, prefer N*NB) */
+
+ if (ilvsl) {
+ slaset_("Full", n, n, &c_b42, &c_b43, &vsl[vsl_offset], ldvsl);
+ if (irows > 1) {
+ i__1 = irows - 1;
+ i__2 = irows - 1;
+ slacpy_("L", &i__1, &i__2, &b[ilo + 1 + ilo * b_dim1], ldb, &vsl[
+ ilo + 1 + ilo * vsl_dim1], ldvsl);
+ }
+ i__1 = *lwork + 1 - iwrk;
+ sorgqr_(&irows, &irows, &irows, &vsl[ilo + ilo * vsl_dim1], ldvsl, &
+ work[itau], &work[iwrk], &i__1, &ierr);
+ }
+
+/* Initialize VSR */
+
+ if (ilvsr) {
+ slaset_("Full", n, n, &c_b42, &c_b43, &vsr[vsr_offset], ldvsr);
+ }
+
+/* Reduce to generalized Hessenberg form */
+/* (Workspace: none needed) */
+
+ sgghrd_(jobvsl, jobvsr, n, &ilo, &ihi, &a[a_offset], lda, &b[b_offset],
+ ldb, &vsl[vsl_offset], ldvsl, &vsr[vsr_offset], ldvsr, &ierr);
+
+ *sdim = 0;
+
+/* Perform QZ algorithm, computing Schur vectors if desired */
+/* (Workspace: need N) */
+
+ iwrk = itau;
+ i__1 = *lwork + 1 - iwrk;
+ shgeqz_("S", jobvsl, jobvsr, n, &ilo, &ihi, &a[a_offset], lda, &b[
+ b_offset], ldb, &alphar[1], &alphai[1], &beta[1], &vsl[vsl_offset]
+, ldvsl, &vsr[vsr_offset], ldvsr, &work[iwrk], &i__1, &ierr);
+ if (ierr != 0) {
+ if (ierr > 0 && ierr <= *n) {
+ *info = ierr;
+ } else if (ierr > *n && ierr <= *n << 1) {
+ *info = ierr - *n;
+ } else {
+ *info = *n + 1;
+ }
+ goto L50;
+ }
+
+/* Sort eigenvalues ALPHA/BETA and compute the reciprocal of */
+/* condition number(s) */
+/* (Workspace: If IJOB >= 1, need MAX( 8*(N+1), 2*SDIM*(N-SDIM) ) */
+/* otherwise, need 8*(N+1) ) */
+
+ if (wantst) {
+
+/* Undo scaling on eigenvalues before SELCTGing */
+
+ if (ilascl) {
+ slascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alphar[1],
+ n, &ierr);
+ slascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alphai[1],
+ n, &ierr);
+ }
+ if (ilbscl) {
+ slascl_("G", &c__0, &c__0, &bnrmto, &bnrm, n, &c__1, &beta[1], n,
+ &ierr);
+ }
+
+/* Select eigenvalues */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ bwork[i__] = (*selctg)(&alphar[i__], &alphai[i__], &beta[i__]);
+/* L10: */
+ }
+
+/* Reorder eigenvalues, transform Generalized Schur vectors, and */
+/* compute reciprocal condition numbers */
+
+ i__1 = *lwork - iwrk + 1;
+ stgsen_(&ijob, &ilvsl, &ilvsr, &bwork[1], n, &a[a_offset], lda, &b[
+ b_offset], ldb, &alphar[1], &alphai[1], &beta[1], &vsl[
+ vsl_offset], ldvsl, &vsr[vsr_offset], ldvsr, sdim, &pl, &pr,
+ dif, &work[iwrk], &i__1, &iwork[1], liwork, &ierr);
+
+ if (ijob >= 1) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*sdim << 1) * (*n - *sdim);
+ maxwrk = max(i__1,i__2);
+ }
+ if (ierr == -22) {
+
+/* not enough real workspace */
+
+ *info = -22;
+ } else {
+ if (ijob == 1 || ijob == 4) {
+ rconde[1] = pl;
+ rconde[2] = pr;
+ }
+ if (ijob == 2 || ijob == 4) {
+ rcondv[1] = dif[0];
+ rcondv[2] = dif[1];
+ }
+ if (ierr == 1) {
+ *info = *n + 3;
+ }
+ }
+
+ }
+
+/* Apply permutation to VSL and VSR */
+/* (Workspace: none needed) */
+
+ if (ilvsl) {
+ sggbak_("P", "L", n, &ilo, &ihi, &work[ileft], &work[iright], n, &vsl[
+ vsl_offset], ldvsl, &ierr);
+ }
+
+ if (ilvsr) {
+ sggbak_("P", "R", n, &ilo, &ihi, &work[ileft], &work[iright], n, &vsr[
+ vsr_offset], ldvsr, &ierr);
+ }
+
+/* Check if unscaling would cause over/underflow, if so, rescale */
+/* (ALPHAR(I),ALPHAI(I),BETA(I)) so BETA(I) is on the order of */
+/* B(I,I) and ALPHAR(I) and ALPHAI(I) are on the order of A(I,I) */
+
+ if (ilascl) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (alphai[i__] != 0.f) {
+ if (alphar[i__] / safmax > anrmto / anrm || safmin / alphar[
+ i__] > anrm / anrmto) {
+ work[1] = (r__1 = a[i__ + i__ * a_dim1] / alphar[i__],
+ dabs(r__1));
+ beta[i__] *= work[1];
+ alphar[i__] *= work[1];
+ alphai[i__] *= work[1];
+ } else if (alphai[i__] / safmax > anrmto / anrm || safmin /
+ alphai[i__] > anrm / anrmto) {
+ work[1] = (r__1 = a[i__ + (i__ + 1) * a_dim1] / alphai[
+ i__], dabs(r__1));
+ beta[i__] *= work[1];
+ alphar[i__] *= work[1];
+ alphai[i__] *= work[1];
+ }
+ }
+/* L20: */
+ }
+ }
+
+ if (ilbscl) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (alphai[i__] != 0.f) {
+ if (beta[i__] / safmax > bnrmto / bnrm || safmin / beta[i__]
+ > bnrm / bnrmto) {
+ work[1] = (r__1 = b[i__ + i__ * b_dim1] / beta[i__], dabs(
+ r__1));
+ beta[i__] *= work[1];
+ alphar[i__] *= work[1];
+ alphai[i__] *= work[1];
+ }
+ }
+/* L25: */
+ }
+ }
+
+/* Undo scaling */
+
+ if (ilascl) {
+ slascl_("H", &c__0, &c__0, &anrmto, &anrm, n, n, &a[a_offset], lda, &
+ ierr);
+ slascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alphar[1], n, &
+ ierr);
+ slascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alphai[1], n, &
+ ierr);
+ }
+
+ if (ilbscl) {
+ slascl_("U", &c__0, &c__0, &bnrmto, &bnrm, n, n, &b[b_offset], ldb, &
+ ierr);
+ slascl_("G", &c__0, &c__0, &bnrmto, &bnrm, n, &c__1, &beta[1], n, &
+ ierr);
+ }
+
+ if (wantst) {
+
+/* Check if reordering is correct */
+
+ lastsl = TRUE_;
+ lst2sl = TRUE_;
+ *sdim = 0;
+ ip = 0;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ cursl = (*selctg)(&alphar[i__], &alphai[i__], &beta[i__]);
+ if (alphai[i__] == 0.f) {
+ if (cursl) {
+ ++(*sdim);
+ }
+ ip = 0;
+ if (cursl && ! lastsl) {
+ *info = *n + 2;
+ }
+ } else {
+ if (ip == 1) {
+
+/* Last eigenvalue of conjugate pair */
+
+ cursl = cursl || lastsl;
+ lastsl = cursl;
+ if (cursl) {
+ *sdim += 2;
+ }
+ ip = -1;
+ if (cursl && ! lst2sl) {
+ *info = *n + 2;
+ }
+ } else {
+
+/* First eigenvalue of conjugate pair */
+
+ ip = 1;
+ }
+ }
+ lst2sl = lastsl;
+ lastsl = cursl;
+/* L40: */
+ }
+
+ }
+
+L50:
+
+ work[1] = (real) maxwrk;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of SGGESX */
+
+} /* sggesx_ */
diff --git a/contrib/libs/clapack/sggev.c b/contrib/libs/clapack/sggev.c
new file mode 100644
index 0000000000..7c3537ea7d
--- /dev/null
+++ b/contrib/libs/clapack/sggev.c
@@ -0,0 +1,640 @@
+/* sggev.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+static real c_b36 = 0.f;
+static real c_b37 = 1.f;
+
+/* Subroutine */ int sggev_(char *jobvl, char *jobvr, integer *n, real *a,
+ integer *lda, real *b, integer *ldb, real *alphar, real *alphai, real
+ *beta, real *vl, integer *ldvl, real *vr, integer *ldvr, real *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vl_dim1, vl_offset, vr_dim1,
+ vr_offset, i__1, i__2;
+ real r__1, r__2, r__3, r__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer jc, in, jr, ihi, ilo;
+ real eps;
+ logical ilv;
+ real anrm, bnrm;
+ integer ierr, itau;
+ real temp;
+ logical ilvl, ilvr;
+ integer iwrk;
+ extern logical lsame_(char *, char *);
+ integer ileft, icols, irows;
+ extern /* Subroutine */ int slabad_(real *, real *), sggbak_(char *, char
+ *, integer *, integer *, integer *, real *, real *, integer *,
+ real *, integer *, integer *), sggbal_(char *,
+ integer *, real *, integer *, real *, integer *, integer *,
+ integer *, real *, real *, real *, integer *);
+ logical ilascl, ilbscl;
+ extern doublereal slamch_(char *), slange_(char *, integer *,
+ integer *, real *, integer *, real *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), sgghrd_(
+ char *, char *, integer *, integer *, integer *, real *, integer *
+, real *, integer *, real *, integer *, real *, integer *,
+ integer *);
+ logical ldumma[1];
+ char chtemp[1];
+ real bignum;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ijobvl, iright;
+ extern /* Subroutine */ int sgeqrf_(integer *, integer *, real *, integer
+ *, real *, real *, integer *, integer *);
+ integer ijobvr;
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *), slaset_(char *, integer *,
+ integer *, real *, real *, real *, integer *), stgevc_(
+ char *, char *, logical *, integer *, real *, integer *, real *,
+ integer *, real *, integer *, real *, integer *, integer *,
+ integer *, real *, integer *);
+ real anrmto, bnrmto;
+ extern /* Subroutine */ int shgeqz_(char *, char *, char *, integer *,
+ integer *, integer *, real *, integer *, real *, integer *, real *
+, real *, real *, real *, integer *, real *, integer *, real *,
+ integer *, integer *);
+ integer minwrk, maxwrk;
+ real smlnum;
+ extern /* Subroutine */ int sorgqr_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *);
+ logical lquery;
+ extern /* Subroutine */ int sormqr_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGGEV computes for a pair of N-by-N real nonsymmetric matrices (A,B) */
+/* the generalized eigenvalues, and optionally, the left and/or right */
+/* generalized eigenvectors. */
+
+/* A generalized eigenvalue for a pair of matrices (A,B) is a scalar */
+/* lambda or a ratio alpha/beta = lambda, such that A - lambda*B is */
+/* singular. It is usually represented as the pair (alpha,beta), as */
+/* there is a reasonable interpretation for beta=0, and even for both */
+/* being zero. */
+
+/* The right eigenvector v(j) corresponding to the eigenvalue lambda(j) */
+/* of (A,B) satisfies */
+
+/* A * v(j) = lambda(j) * B * v(j). */
+
+/* The left eigenvector u(j) corresponding to the eigenvalue lambda(j) */
+/* of (A,B) satisfies */
+
+/* u(j)**H * A = lambda(j) * u(j)**H * B . */
+
+/* where u(j)**H is the conjugate-transpose of u(j). */
+
+
+/* Arguments */
+/* ========= */
+
+/* JOBVL (input) CHARACTER*1 */
+/* = 'N': do not compute the left generalized eigenvectors; */
+/* = 'V': compute the left generalized eigenvectors. */
+
+/* JOBVR (input) CHARACTER*1 */
+/* = 'N': do not compute the right generalized eigenvectors; */
+/* = 'V': compute the right generalized eigenvectors. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A, B, VL, and VR. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA, N) */
+/* On entry, the matrix A in the pair (A,B). */
+/* On exit, A has been overwritten. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. LDA >= max(1,N). */
+
+/* B (input/output) REAL array, dimension (LDB, N) */
+/* On entry, the matrix B in the pair (A,B). */
+/* On exit, B has been overwritten. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB >= max(1,N). */
+
+/* ALPHAR (output) REAL array, dimension (N) */
+/* ALPHAI (output) REAL array, dimension (N) */
+/* BETA (output) REAL array, dimension (N) */
+/* On exit, (ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N, will */
+/* be the generalized eigenvalues. If ALPHAI(j) is zero, then */
+/* the j-th eigenvalue is real; if positive, then the j-th and */
+/* (j+1)-st eigenvalues are a complex conjugate pair, with */
+/* ALPHAI(j+1) negative. */
+
+/* Note: the quotients ALPHAR(j)/BETA(j) and ALPHAI(j)/BETA(j) */
+/* may easily over- or underflow, and BETA(j) may even be zero. */
+/* Thus, the user should avoid naively computing the ratio */
+/* alpha/beta. However, ALPHAR and ALPHAI will be always less */
+/* than and usually comparable with norm(A) in magnitude, and */
+/* BETA always less than and usually comparable with norm(B). */
+
+/* VL (output) REAL array, dimension (LDVL,N) */
+/* If JOBVL = 'V', the left eigenvectors u(j) are stored one */
+/* after another in the columns of VL, in the same order as */
+/* their eigenvalues. If the j-th eigenvalue is real, then */
+/* u(j) = VL(:,j), the j-th column of VL. If the j-th and */
+/* (j+1)-th eigenvalues form a complex conjugate pair, then */
+/* u(j) = VL(:,j)+i*VL(:,j+1) and u(j+1) = VL(:,j)-i*VL(:,j+1). */
+/* Each eigenvector is scaled so the largest component has */
+/* abs(real part)+abs(imag. part)=1. */
+/* Not referenced if JOBVL = 'N'. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the matrix VL. LDVL >= 1, and */
+/* if JOBVL = 'V', LDVL >= N. */
+
+/* VR (output) REAL array, dimension (LDVR,N) */
+/* If JOBVR = 'V', the right eigenvectors v(j) are stored one */
+/* after another in the columns of VR, in the same order as */
+/* their eigenvalues. If the j-th eigenvalue is real, then */
+/* v(j) = VR(:,j), the j-th column of VR. If the j-th and */
+/* (j+1)-th eigenvalues form a complex conjugate pair, then */
+/* v(j) = VR(:,j)+i*VR(:,j+1) and v(j+1) = VR(:,j)-i*VR(:,j+1). */
+/* Each eigenvector is scaled so the largest component has */
+/* abs(real part)+abs(imag. part)=1. */
+/* Not referenced if JOBVR = 'N'. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the matrix VR. LDVR >= 1, and */
+/* if JOBVR = 'V', LDVR >= N. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,8*N). */
+/* For good performance, LWORK must generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1,...,N: */
+/* The QZ iteration failed. No eigenvectors have been */
+/* calculated, but ALPHAR(j), ALPHAI(j), and BETA(j) */
+/* should be correct for j=INFO+1,...,N. */
+/* > N: =N+1: other than QZ iteration failed in SHGEQZ. */
+/* =N+2: error return from STGEVC. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alphar;
+ --alphai;
+ --beta;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+
+ /* Function Body */
+ if (lsame_(jobvl, "N")) {
+ ijobvl = 1;
+ ilvl = FALSE_;
+ } else if (lsame_(jobvl, "V")) {
+ ijobvl = 2;
+ ilvl = TRUE_;
+ } else {
+ ijobvl = -1;
+ ilvl = FALSE_;
+ }
+
+ if (lsame_(jobvr, "N")) {
+ ijobvr = 1;
+ ilvr = FALSE_;
+ } else if (lsame_(jobvr, "V")) {
+ ijobvr = 2;
+ ilvr = TRUE_;
+ } else {
+ ijobvr = -1;
+ ilvr = FALSE_;
+ }
+ ilv = ilvl || ilvr;
+
+/* Test the input arguments */
+
+ *info = 0;
+ lquery = *lwork == -1;
+ if (ijobvl <= 0) {
+ *info = -1;
+ } else if (ijobvr <= 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ } else if (*ldvl < 1 || ilvl && *ldvl < *n) {
+ *info = -12;
+ } else if (*ldvr < 1 || ilvr && *ldvr < *n) {
+ *info = -14;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV. The workspace is */
+/* computed assuming ILO = 1 and IHI = N, the worst case.) */
+
+ if (*info == 0) {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n << 3;
+ minwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * (ilaenv_(&c__1, "SGEQRF", " ", n, &c__1, n, &
+ c__0) + 7);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * (ilaenv_(&c__1, "SORMQR", " ", n, &c__1, n,
+ &c__0) + 7);
+ maxwrk = max(i__1,i__2);
+ if (ilvl) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * (ilaenv_(&c__1, "SORGQR", " ", n, &
+ c__1, n, &c_n1) + 7);
+ maxwrk = max(i__1,i__2);
+ }
+ work[1] = (real) maxwrk;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -16;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGGEV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = slange_("M", n, n, &a[a_offset], lda, &work[1]);
+ ilascl = FALSE_;
+ if (anrm > 0.f && anrm < smlnum) {
+ anrmto = smlnum;
+ ilascl = TRUE_;
+ } else if (anrm > bignum) {
+ anrmto = bignum;
+ ilascl = TRUE_;
+ }
+ if (ilascl) {
+ slascl_("G", &c__0, &c__0, &anrm, &anrmto, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Scale B if max element outside range [SMLNUM,BIGNUM] */
+
+ bnrm = slange_("M", n, n, &b[b_offset], ldb, &work[1]);
+ ilbscl = FALSE_;
+ if (bnrm > 0.f && bnrm < smlnum) {
+ bnrmto = smlnum;
+ ilbscl = TRUE_;
+ } else if (bnrm > bignum) {
+ bnrmto = bignum;
+ ilbscl = TRUE_;
+ }
+ if (ilbscl) {
+ slascl_("G", &c__0, &c__0, &bnrm, &bnrmto, n, n, &b[b_offset], ldb, &
+ ierr);
+ }
+
+/* Permute the matrices A, B to isolate eigenvalues if possible */
+/* (Workspace: need 6*N) */
+
+ ileft = 1;
+ iright = *n + 1;
+ iwrk = iright + *n;
+ sggbal_("P", n, &a[a_offset], lda, &b[b_offset], ldb, &ilo, &ihi, &work[
+ ileft], &work[iright], &work[iwrk], &ierr);
+
+/* Reduce B to triangular form (QR decomposition of B) */
+/* (Workspace: need N, prefer N*NB) */
+
+ irows = ihi + 1 - ilo;
+ if (ilv) {
+ icols = *n + 1 - ilo;
+ } else {
+ icols = irows;
+ }
+ itau = iwrk;
+ iwrk = itau + irows;
+ i__1 = *lwork + 1 - iwrk;
+ sgeqrf_(&irows, &icols, &b[ilo + ilo * b_dim1], ldb, &work[itau], &work[
+ iwrk], &i__1, &ierr);
+
+/* Apply the orthogonal transformation to matrix A */
+/* (Workspace: need N, prefer N*NB) */
+
+ i__1 = *lwork + 1 - iwrk;
+ sormqr_("L", "T", &irows, &icols, &irows, &b[ilo + ilo * b_dim1], ldb, &
+ work[itau], &a[ilo + ilo * a_dim1], lda, &work[iwrk], &i__1, &
+ ierr);
+
+/* Initialize VL */
+/* (Workspace: need N, prefer N*NB) */
+
+ if (ilvl) {
+ slaset_("Full", n, n, &c_b36, &c_b37, &vl[vl_offset], ldvl)
+ ;
+ if (irows > 1) {
+ i__1 = irows - 1;
+ i__2 = irows - 1;
+ slacpy_("L", &i__1, &i__2, &b[ilo + 1 + ilo * b_dim1], ldb, &vl[
+ ilo + 1 + ilo * vl_dim1], ldvl);
+ }
+ i__1 = *lwork + 1 - iwrk;
+ sorgqr_(&irows, &irows, &irows, &vl[ilo + ilo * vl_dim1], ldvl, &work[
+ itau], &work[iwrk], &i__1, &ierr);
+ }
+
+/* Initialize VR */
+
+ if (ilvr) {
+ slaset_("Full", n, n, &c_b36, &c_b37, &vr[vr_offset], ldvr)
+ ;
+ }
+
+/* Reduce to generalized Hessenberg form */
+/* (Workspace: none needed) */
+
+ if (ilv) {
+
+/* Eigenvectors requested -- work on whole matrix. */
+
+ sgghrd_(jobvl, jobvr, n, &ilo, &ihi, &a[a_offset], lda, &b[b_offset],
+ ldb, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, &ierr);
+ } else {
+ sgghrd_("N", "N", &irows, &c__1, &irows, &a[ilo + ilo * a_dim1], lda,
+ &b[ilo + ilo * b_dim1], ldb, &vl[vl_offset], ldvl, &vr[
+ vr_offset], ldvr, &ierr);
+ }
+
+/* Perform QZ algorithm (Compute eigenvalues, and optionally, the */
+/* Schur forms and Schur vectors) */
+/* (Workspace: need N) */
+
+ iwrk = itau;
+ if (ilv) {
+ *(unsigned char *)chtemp = 'S';
+ } else {
+ *(unsigned char *)chtemp = 'E';
+ }
+ i__1 = *lwork + 1 - iwrk;
+ shgeqz_(chtemp, jobvl, jobvr, n, &ilo, &ihi, &a[a_offset], lda, &b[
+ b_offset], ldb, &alphar[1], &alphai[1], &beta[1], &vl[vl_offset],
+ ldvl, &vr[vr_offset], ldvr, &work[iwrk], &i__1, &ierr);
+ if (ierr != 0) {
+ if (ierr > 0 && ierr <= *n) {
+ *info = ierr;
+ } else if (ierr > *n && ierr <= *n << 1) {
+ *info = ierr - *n;
+ } else {
+ *info = *n + 1;
+ }
+ goto L110;
+ }
+
+/* Compute Eigenvectors */
+/* (Workspace: need 6*N) */
+
+ if (ilv) {
+ if (ilvl) {
+ if (ilvr) {
+ *(unsigned char *)chtemp = 'B';
+ } else {
+ *(unsigned char *)chtemp = 'L';
+ }
+ } else {
+ *(unsigned char *)chtemp = 'R';
+ }
+ stgevc_(chtemp, "B", ldumma, n, &a[a_offset], lda, &b[b_offset], ldb,
+ &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, n, &in, &work[
+ iwrk], &ierr);
+ if (ierr != 0) {
+ *info = *n + 2;
+ goto L110;
+ }
+
+/* Undo balancing on VL and VR and normalization */
+/* (Workspace: none needed) */
+
+ if (ilvl) {
+ sggbak_("P", "L", n, &ilo, &ihi, &work[ileft], &work[iright], n, &
+ vl[vl_offset], ldvl, &ierr);
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ if (alphai[jc] < 0.f) {
+ goto L50;
+ }
+ temp = 0.f;
+ if (alphai[jc] == 0.f) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ r__2 = temp, r__3 = (r__1 = vl[jr + jc * vl_dim1],
+ dabs(r__1));
+ temp = dmax(r__2,r__3);
+/* L10: */
+ }
+ } else {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ r__3 = temp, r__4 = (r__1 = vl[jr + jc * vl_dim1],
+ dabs(r__1)) + (r__2 = vl[jr + (jc + 1) *
+ vl_dim1], dabs(r__2));
+ temp = dmax(r__3,r__4);
+/* L20: */
+ }
+ }
+ if (temp < smlnum) {
+ goto L50;
+ }
+ temp = 1.f / temp;
+ if (alphai[jc] == 0.f) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vl[jr + jc * vl_dim1] *= temp;
+/* L30: */
+ }
+ } else {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vl[jr + jc * vl_dim1] *= temp;
+ vl[jr + (jc + 1) * vl_dim1] *= temp;
+/* L40: */
+ }
+ }
+L50:
+ ;
+ }
+ }
+ if (ilvr) {
+ sggbak_("P", "R", n, &ilo, &ihi, &work[ileft], &work[iright], n, &
+ vr[vr_offset], ldvr, &ierr);
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ if (alphai[jc] < 0.f) {
+ goto L100;
+ }
+ temp = 0.f;
+ if (alphai[jc] == 0.f) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ r__2 = temp, r__3 = (r__1 = vr[jr + jc * vr_dim1],
+ dabs(r__1));
+ temp = dmax(r__2,r__3);
+/* L60: */
+ }
+ } else {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ r__3 = temp, r__4 = (r__1 = vr[jr + jc * vr_dim1],
+ dabs(r__1)) + (r__2 = vr[jr + (jc + 1) *
+ vr_dim1], dabs(r__2));
+ temp = dmax(r__3,r__4);
+/* L70: */
+ }
+ }
+ if (temp < smlnum) {
+ goto L100;
+ }
+ temp = 1.f / temp;
+ if (alphai[jc] == 0.f) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vr[jr + jc * vr_dim1] *= temp;
+/* L80: */
+ }
+ } else {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vr[jr + jc * vr_dim1] *= temp;
+ vr[jr + (jc + 1) * vr_dim1] *= temp;
+/* L90: */
+ }
+ }
+L100:
+ ;
+ }
+ }
+
+/* End of eigenvector calculation */
+
+ }
+
+/* Undo scaling if necessary */
+
+ if (ilascl) {
+ slascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alphar[1], n, &
+ ierr);
+ slascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alphai[1], n, &
+ ierr);
+ }
+
+ if (ilbscl) {
+ slascl_("G", &c__0, &c__0, &bnrmto, &bnrm, n, &c__1, &beta[1], n, &
+ ierr);
+ }
+
+L110:
+
+ work[1] = (real) maxwrk;
+
+ return 0;
+
+/* End of SGGEV */
+
+} /* sggev_ */
diff --git a/contrib/libs/clapack/sggevx.c b/contrib/libs/clapack/sggevx.c
new file mode 100644
index 0000000000..7130d3ad03
--- /dev/null
+++ b/contrib/libs/clapack/sggevx.c
@@ -0,0 +1,879 @@
+/* sggevx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static real c_b57 = 0.f;
+static real c_b58 = 1.f;
+
+/* Subroutine */ int sggevx_(char *balanc, char *jobvl, char *jobvr, char *
+ sense, integer *n, real *a, integer *lda, real *b, integer *ldb, real
+ *alphar, real *alphai, real *beta, real *vl, integer *ldvl, real *vr,
+ integer *ldvr, integer *ilo, integer *ihi, real *lscale, real *rscale,
+ real *abnrm, real *bbnrm, real *rconde, real *rcondv, real *work,
+ integer *lwork, integer *iwork, logical *bwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vl_dim1, vl_offset, vr_dim1,
+ vr_offset, i__1, i__2;
+ real r__1, r__2, r__3, r__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, m, jc, in, mm, jr;
+ real eps;
+ logical ilv, pair;
+ real anrm, bnrm;
+ integer ierr, itau;
+ real temp;
+ logical ilvl, ilvr;
+ integer iwrk, iwrk1;
+ extern logical lsame_(char *, char *);
+ integer icols;
+ logical noscl;
+ integer irows;
+ extern /* Subroutine */ int slabad_(real *, real *), sggbak_(char *, char
+ *, integer *, integer *, integer *, real *, real *, integer *,
+ real *, integer *, integer *), sggbal_(char *,
+ integer *, real *, integer *, real *, integer *, integer *,
+ integer *, real *, real *, real *, integer *);
+ logical ilascl, ilbscl;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), sgghrd_(
+ char *, char *, integer *, integer *, integer *, real *, integer *
+, real *, integer *, real *, integer *, real *, integer *,
+ integer *);
+ logical ldumma[1];
+ char chtemp[1];
+ real bignum;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern doublereal slange_(char *, integer *, integer *, real *, integer *,
+ real *);
+ integer ijobvl;
+ extern /* Subroutine */ int sgeqrf_(integer *, integer *, real *, integer
+ *, real *, real *, integer *, integer *);
+ integer ijobvr;
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *);
+ logical wantsb;
+ extern /* Subroutine */ int slaset_(char *, integer *, integer *, real *,
+ real *, real *, integer *);
+ real anrmto;
+ logical wantse;
+ real bnrmto;
+ extern /* Subroutine */ int shgeqz_(char *, char *, char *, integer *,
+ integer *, integer *, real *, integer *, real *, integer *, real *
+, real *, real *, real *, integer *, real *, integer *, real *,
+ integer *, integer *), stgevc_(char *,
+ char *, logical *, integer *, real *, integer *, real *, integer *
+, real *, integer *, real *, integer *, integer *, integer *,
+ real *, integer *), stgsna_(char *, char *,
+ logical *, integer *, real *, integer *, real *, integer *, real *
+, integer *, real *, integer *, real *, real *, integer *,
+ integer *, real *, integer *, integer *, integer *);
+ integer minwrk, maxwrk;
+ logical wantsn;
+ real smlnum;
+ extern /* Subroutine */ int sorgqr_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *);
+ logical lquery, wantsv;
+ extern /* Subroutine */ int sormqr_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGGEVX computes for a pair of N-by-N real nonsymmetric matrices (A,B) */
+/* the generalized eigenvalues, and optionally, the left and/or right */
+/* generalized eigenvectors. */
+
+/* Optionally also, it computes a balancing transformation to improve */
+/* the conditioning of the eigenvalues and eigenvectors (ILO, IHI, */
+/* LSCALE, RSCALE, ABNRM, and BBNRM), reciprocal condition numbers for */
+/* the eigenvalues (RCONDE), and reciprocal condition numbers for the */
+/* right eigenvectors (RCONDV). */
+
+/* A generalized eigenvalue for a pair of matrices (A,B) is a scalar */
+/* lambda or a ratio alpha/beta = lambda, such that A - lambda*B is */
+/* singular. It is usually represented as the pair (alpha,beta), as */
+/* there is a reasonable interpretation for beta=0, and even for both */
+/* being zero. */
+
+/* The right eigenvector v(j) corresponding to the eigenvalue lambda(j) */
+/* of (A,B) satisfies */
+
+/* A * v(j) = lambda(j) * B * v(j) . */
+
+/* The left eigenvector u(j) corresponding to the eigenvalue lambda(j) */
+/* of (A,B) satisfies */
+
+/* u(j)**H * A = lambda(j) * u(j)**H * B. */
+
+/* where u(j)**H is the conjugate-transpose of u(j). */
+
+
+/* Arguments */
+/* ========= */
+
+/* BALANC (input) CHARACTER*1 */
+/* Specifies the balance option to be performed. */
+/* = 'N': do not diagonally scale or permute; */
+/* = 'P': permute only; */
+/* = 'S': scale only; */
+/* = 'B': both permute and scale. */
+/* Computed reciprocal condition numbers will be for the */
+/* matrices after permuting and/or balancing. Permuting does */
+/* not change condition numbers (in exact arithmetic), but */
+/* balancing does. */
+
+/* JOBVL (input) CHARACTER*1 */
+/* = 'N': do not compute the left generalized eigenvectors; */
+/* = 'V': compute the left generalized eigenvectors. */
+
+/* JOBVR (input) CHARACTER*1 */
+/* = 'N': do not compute the right generalized eigenvectors; */
+/* = 'V': compute the right generalized eigenvectors. */
+
+/* SENSE (input) CHARACTER*1 */
+/* Determines which reciprocal condition numbers are computed. */
+/* = 'N': none are computed; */
+/* = 'E': computed for eigenvalues only; */
+/* = 'V': computed for eigenvectors only; */
+/* = 'B': computed for eigenvalues and eigenvectors. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A, B, VL, and VR. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA, N) */
+/* On entry, the matrix A in the pair (A,B). */
+/* On exit, A has been overwritten. If JOBVL='V' or JOBVR='V' */
+/* or both, then A contains the first part of the real Schur */
+/* form of the "balanced" versions of the input A and B. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. LDA >= max(1,N). */
+
+/* B (input/output) REAL array, dimension (LDB, N) */
+/* On entry, the matrix B in the pair (A,B). */
+/* On exit, B has been overwritten. If JOBVL='V' or JOBVR='V' */
+/* or both, then B contains the second part of the real Schur */
+/* form of the "balanced" versions of the input A and B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB >= max(1,N). */
+
+/* ALPHAR (output) REAL array, dimension (N) */
+/* ALPHAI (output) REAL array, dimension (N) */
+/* BETA (output) REAL array, dimension (N) */
+/* On exit, (ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N, will */
+/* be the generalized eigenvalues. If ALPHAI(j) is zero, then */
+/* the j-th eigenvalue is real; if positive, then the j-th and */
+/* (j+1)-st eigenvalues are a complex conjugate pair, with */
+/* ALPHAI(j+1) negative. */
+
+/* Note: the quotients ALPHAR(j)/BETA(j) and ALPHAI(j)/BETA(j) */
+/* may easily over- or underflow, and BETA(j) may even be zero. */
+/* Thus, the user should avoid naively computing the ratio */
+/* ALPHA/BETA. However, ALPHAR and ALPHAI will be always less */
+/* than and usually comparable with norm(A) in magnitude, and */
+/* BETA always less than and usually comparable with norm(B). */
+
+/* VL (output) REAL array, dimension (LDVL,N) */
+/* If JOBVL = 'V', the left eigenvectors u(j) are stored one */
+/* after another in the columns of VL, in the same order as */
+/* their eigenvalues. If the j-th eigenvalue is real, then */
+/* u(j) = VL(:,j), the j-th column of VL. If the j-th and */
+/* (j+1)-th eigenvalues form a complex conjugate pair, then */
+/* u(j) = VL(:,j)+i*VL(:,j+1) and u(j+1) = VL(:,j)-i*VL(:,j+1). */
+/* Each eigenvector will be scaled so the largest component have */
+/* abs(real part) + abs(imag. part) = 1. */
+/* Not referenced if JOBVL = 'N'. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the matrix VL. LDVL >= 1, and */
+/* if JOBVL = 'V', LDVL >= N. */
+
+/* VR (output) REAL array, dimension (LDVR,N) */
+/* If JOBVR = 'V', the right eigenvectors v(j) are stored one */
+/* after another in the columns of VR, in the same order as */
+/* their eigenvalues. If the j-th eigenvalue is real, then */
+/* v(j) = VR(:,j), the j-th column of VR. If the j-th and */
+/* (j+1)-th eigenvalues form a complex conjugate pair, then */
+/* v(j) = VR(:,j)+i*VR(:,j+1) and v(j+1) = VR(:,j)-i*VR(:,j+1). */
+/* Each eigenvector will be scaled so the largest component have */
+/* abs(real part) + abs(imag. part) = 1. */
+/* Not referenced if JOBVR = 'N'. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the matrix VR. LDVR >= 1, and */
+/* if JOBVR = 'V', LDVR >= N. */
+
+/* ILO (output) INTEGER */
+/* IHI (output) INTEGER */
+/* ILO and IHI are integer values such that on exit */
+/* A(i,j) = 0 and B(i,j) = 0 if i > j and */
+/* j = 1,...,ILO-1 or i = IHI+1,...,N. */
+/* If BALANC = 'N' or 'S', ILO = 1 and IHI = N. */
+
+/* LSCALE (output) REAL array, dimension (N) */
+/* Details of the permutations and scaling factors applied */
+/* to the left side of A and B. If PL(j) is the index of the */
+/* row interchanged with row j, and DL(j) is the scaling */
+/* factor applied to row j, then */
+/* LSCALE(j) = PL(j) for j = 1,...,ILO-1 */
+/* = DL(j) for j = ILO,...,IHI */
+/* = PL(j) for j = IHI+1,...,N. */
+/* The order in which the interchanges are made is N to IHI+1, */
+/* then 1 to ILO-1. */
+
+/* RSCALE (output) REAL array, dimension (N) */
+/* Details of the permutations and scaling factors applied */
+/* to the right side of A and B. If PR(j) is the index of the */
+/* column interchanged with column j, and DR(j) is the scaling */
+/* factor applied to column j, then */
+/* RSCALE(j) = PR(j) for j = 1,...,ILO-1 */
+/* = DR(j) for j = ILO,...,IHI */
+/* = PR(j) for j = IHI+1,...,N */
+/* The order in which the interchanges are made is N to IHI+1, */
+/* then 1 to ILO-1. */
+
+/* ABNRM (output) REAL */
+/* The one-norm of the balanced matrix A. */
+
+/* BBNRM (output) REAL */
+/* The one-norm of the balanced matrix B. */
+
+/* RCONDE (output) REAL array, dimension (N) */
+/* If SENSE = 'E' or 'B', the reciprocal condition numbers of */
+/* the eigenvalues, stored in consecutive elements of the array. */
+/* For a complex conjugate pair of eigenvalues two consecutive */
+/* elements of RCONDE are set to the same value. Thus RCONDE(j), */
+/* RCONDV(j), and the j-th columns of VL and VR all correspond */
+/* to the j-th eigenpair. */
+/* If SENSE = 'N' or 'V', RCONDE is not referenced. */
+
+/* RCONDV (output) REAL array, dimension (N) */
+/* If SENSE = 'V' or 'B', the estimated reciprocal condition */
+/* numbers of the eigenvectors, stored in consecutive elements */
+/* of the array. For a complex eigenvector two consecutive */
+/* elements of RCONDV are set to the same value. If the */
+/* eigenvalues cannot be reordered to compute RCONDV(j), */
+/* RCONDV(j) is set to 0; this can only occur when the true */
+/* value would be very small anyway. */
+/* If SENSE = 'N' or 'E', RCONDV is not referenced. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,2*N). */
+/* If BALANC = 'S' or 'B', or JOBVL = 'V', or JOBVR = 'V', */
+/* LWORK >= max(1,6*N). */
+/* If SENSE = 'E', LWORK >= max(1,10*N). */
+/* If SENSE = 'V' or 'B', LWORK >= 2*N*N+8*N+16. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace) INTEGER array, dimension (N+6) */
+/* If SENSE = 'E', IWORK is not referenced. */
+
+/* BWORK (workspace) LOGICAL array, dimension (N) */
+/* If SENSE = 'N', BWORK is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1,...,N: */
+/* The QZ iteration failed. No eigenvectors have been */
+/* calculated, but ALPHAR(j), ALPHAI(j), and BETA(j) */
+/* should be correct for j=INFO+1,...,N. */
+/* > N: =N+1: other than QZ iteration failed in SHGEQZ. */
+/* =N+2: error return from STGEVC. */
+
+/* Further Details */
+/* =============== */
+
+/* Balancing a matrix pair (A,B) includes, first, permuting rows and */
+/* columns to isolate eigenvalues, second, applying diagonal similarity */
+/* transformation to the rows and columns to make the rows and columns */
+/* as close in norm as possible. The computed reciprocal condition */
+/* numbers correspond to the balanced matrix. Permuting rows and columns */
+/* will not change the condition numbers (in exact arithmetic) but */
+/* diagonal scaling will. For further explanation of balancing, see */
+/* section 4.11.1.2 of LAPACK Users' Guide. */
+
+/* An approximate error bound on the chordal distance between the i-th */
+/* computed generalized eigenvalue w and the corresponding exact */
+/* eigenvalue lambda is */
+
+/* chord(w, lambda) <= EPS * norm(ABNRM, BBNRM) / RCONDE(I) */
+
+/* An approximate error bound for the angle between the i-th computed */
+/* eigenvector VL(i) or VR(i) is given by */
+
+/* EPS * norm(ABNRM, BBNRM) / DIF(i). */
+
+/* For further explanation of the reciprocal condition numbers RCONDE */
+/* and RCONDV, see section 4.11 of LAPACK User's Guide. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alphar;
+ --alphai;
+ --beta;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --lscale;
+ --rscale;
+ --rconde;
+ --rcondv;
+ --work;
+ --iwork;
+ --bwork;
+
+ /* Function Body */
+ if (lsame_(jobvl, "N")) {
+ ijobvl = 1;
+ ilvl = FALSE_;
+ } else if (lsame_(jobvl, "V")) {
+ ijobvl = 2;
+ ilvl = TRUE_;
+ } else {
+ ijobvl = -1;
+ ilvl = FALSE_;
+ }
+
+ if (lsame_(jobvr, "N")) {
+ ijobvr = 1;
+ ilvr = FALSE_;
+ } else if (lsame_(jobvr, "V")) {
+ ijobvr = 2;
+ ilvr = TRUE_;
+ } else {
+ ijobvr = -1;
+ ilvr = FALSE_;
+ }
+ ilv = ilvl || ilvr;
+
+ noscl = lsame_(balanc, "N") || lsame_(balanc, "P");
+ wantsn = lsame_(sense, "N");
+ wantse = lsame_(sense, "E");
+ wantsv = lsame_(sense, "V");
+ wantsb = lsame_(sense, "B");
+
+/* Test the input arguments */
+
+ *info = 0;
+ lquery = *lwork == -1;
+ if (! (noscl || lsame_(balanc, "S") || lsame_(
+ balanc, "B"))) {
+ *info = -1;
+ } else if (ijobvl <= 0) {
+ *info = -2;
+ } else if (ijobvr <= 0) {
+ *info = -3;
+ } else if (! (wantsn || wantse || wantsb || wantsv)) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldvl < 1 || ilvl && *ldvl < *n) {
+ *info = -14;
+ } else if (*ldvr < 1 || ilvr && *ldvr < *n) {
+ *info = -16;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV. The workspace is */
+/* computed assuming ILO = 1 and IHI = N, the worst case.) */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ } else {
+ if (noscl && ! ilv) {
+ minwrk = *n << 1;
+ } else {
+ minwrk = *n * 6;
+ }
+ if (wantse) {
+ minwrk = *n * 10;
+ } else if (wantsv || wantsb) {
+ minwrk = (*n << 1) * (*n + 4) + 16;
+ }
+ maxwrk = minwrk;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "SGEQRF", " ", n, &
+ c__1, n, &c__0);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "SORMQR", " ", n, &
+ c__1, n, &c__0);
+ maxwrk = max(i__1,i__2);
+ if (ilvl) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "SORGQR",
+ " ", n, &c__1, n, &c__0);
+ maxwrk = max(i__1,i__2);
+ }
+ }
+ work[1] = (real) maxwrk;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -26;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGGEVX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = slange_("M", n, n, &a[a_offset], lda, &work[1]);
+ ilascl = FALSE_;
+ if (anrm > 0.f && anrm < smlnum) {
+ anrmto = smlnum;
+ ilascl = TRUE_;
+ } else if (anrm > bignum) {
+ anrmto = bignum;
+ ilascl = TRUE_;
+ }
+ if (ilascl) {
+ slascl_("G", &c__0, &c__0, &anrm, &anrmto, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Scale B if max element outside range [SMLNUM,BIGNUM] */
+
+ bnrm = slange_("M", n, n, &b[b_offset], ldb, &work[1]);
+ ilbscl = FALSE_;
+ if (bnrm > 0.f && bnrm < smlnum) {
+ bnrmto = smlnum;
+ ilbscl = TRUE_;
+ } else if (bnrm > bignum) {
+ bnrmto = bignum;
+ ilbscl = TRUE_;
+ }
+ if (ilbscl) {
+ slascl_("G", &c__0, &c__0, &bnrm, &bnrmto, n, n, &b[b_offset], ldb, &
+ ierr);
+ }
+
+/* Permute and/or balance the matrix pair (A,B) */
+/* (Workspace: need 6*N if BALANC = 'S' or 'B', 1 otherwise) */
+
+ sggbal_(balanc, n, &a[a_offset], lda, &b[b_offset], ldb, ilo, ihi, &
+ lscale[1], &rscale[1], &work[1], &ierr);
+
+/* Compute ABNRM and BBNRM */
+
+ *abnrm = slange_("1", n, n, &a[a_offset], lda, &work[1]);
+ if (ilascl) {
+ work[1] = *abnrm;
+ slascl_("G", &c__0, &c__0, &anrmto, &anrm, &c__1, &c__1, &work[1], &
+ c__1, &ierr);
+ *abnrm = work[1];
+ }
+
+ *bbnrm = slange_("1", n, n, &b[b_offset], ldb, &work[1]);
+ if (ilbscl) {
+ work[1] = *bbnrm;
+ slascl_("G", &c__0, &c__0, &bnrmto, &bnrm, &c__1, &c__1, &work[1], &
+ c__1, &ierr);
+ *bbnrm = work[1];
+ }
+
+/* Reduce B to triangular form (QR decomposition of B) */
+/* (Workspace: need N, prefer N*NB ) */
+
+ irows = *ihi + 1 - *ilo;
+ if (ilv || ! wantsn) {
+ icols = *n + 1 - *ilo;
+ } else {
+ icols = irows;
+ }
+ itau = 1;
+ iwrk = itau + irows;
+ i__1 = *lwork + 1 - iwrk;
+ sgeqrf_(&irows, &icols, &b[*ilo + *ilo * b_dim1], ldb, &work[itau], &work[
+ iwrk], &i__1, &ierr);
+
+/* Apply the orthogonal transformation to A */
+/* (Workspace: need N, prefer N*NB) */
+
+ i__1 = *lwork + 1 - iwrk;
+ sormqr_("L", "T", &irows, &icols, &irows, &b[*ilo + *ilo * b_dim1], ldb, &
+ work[itau], &a[*ilo + *ilo * a_dim1], lda, &work[iwrk], &i__1, &
+ ierr);
+
+/* Initialize VL and/or VR */
+/* (Workspace: need N, prefer N*NB) */
+
+ if (ilvl) {
+ slaset_("Full", n, n, &c_b57, &c_b58, &vl[vl_offset], ldvl)
+ ;
+ if (irows > 1) {
+ i__1 = irows - 1;
+ i__2 = irows - 1;
+ slacpy_("L", &i__1, &i__2, &b[*ilo + 1 + *ilo * b_dim1], ldb, &vl[
+ *ilo + 1 + *ilo * vl_dim1], ldvl);
+ }
+ i__1 = *lwork + 1 - iwrk;
+ sorgqr_(&irows, &irows, &irows, &vl[*ilo + *ilo * vl_dim1], ldvl, &
+ work[itau], &work[iwrk], &i__1, &ierr);
+ }
+
+ if (ilvr) {
+ slaset_("Full", n, n, &c_b57, &c_b58, &vr[vr_offset], ldvr)
+ ;
+ }
+
+/* Reduce to generalized Hessenberg form */
+/* (Workspace: none needed) */
+
+ if (ilv || ! wantsn) {
+
+/* Eigenvectors requested -- work on whole matrix. */
+
+ sgghrd_(jobvl, jobvr, n, ilo, ihi, &a[a_offset], lda, &b[b_offset],
+ ldb, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, &ierr);
+ } else {
+ sgghrd_("N", "N", &irows, &c__1, &irows, &a[*ilo + *ilo * a_dim1],
+ lda, &b[*ilo + *ilo * b_dim1], ldb, &vl[vl_offset], ldvl, &vr[
+ vr_offset], ldvr, &ierr);
+ }
+
+/* Perform QZ algorithm (Compute eigenvalues, and optionally, the */
+/* Schur forms and Schur vectors) */
+/* (Workspace: need N) */
+
+ if (ilv || ! wantsn) {
+ *(unsigned char *)chtemp = 'S';
+ } else {
+ *(unsigned char *)chtemp = 'E';
+ }
+
+ shgeqz_(chtemp, jobvl, jobvr, n, ilo, ihi, &a[a_offset], lda, &b[b_offset]
+, ldb, &alphar[1], &alphai[1], &beta[1], &vl[vl_offset], ldvl, &
+ vr[vr_offset], ldvr, &work[1], lwork, &ierr);
+ if (ierr != 0) {
+ if (ierr > 0 && ierr <= *n) {
+ *info = ierr;
+ } else if (ierr > *n && ierr <= *n << 1) {
+ *info = ierr - *n;
+ } else {
+ *info = *n + 1;
+ }
+ goto L130;
+ }
+
+/* Compute Eigenvectors and estimate condition numbers if desired */
+/* (Workspace: STGEVC: need 6*N */
+/* STGSNA: need 2*N*(N+2)+16 if SENSE = 'V' or 'B', */
+/* need N otherwise ) */
+
+ if (ilv || ! wantsn) {
+ if (ilv) {
+ if (ilvl) {
+ if (ilvr) {
+ *(unsigned char *)chtemp = 'B';
+ } else {
+ *(unsigned char *)chtemp = 'L';
+ }
+ } else {
+ *(unsigned char *)chtemp = 'R';
+ }
+
+ stgevc_(chtemp, "B", ldumma, n, &a[a_offset], lda, &b[b_offset],
+ ldb, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, n, &in, &
+ work[1], &ierr);
+ if (ierr != 0) {
+ *info = *n + 2;
+ goto L130;
+ }
+ }
+
+ if (! wantsn) {
+
+/* compute eigenvectors (STGEVC) and estimate condition */
+/* numbers (STGSNA). Note that the definition of the condition */
+/* number is not invariant under transformation (u,v) to */
+/* (Q*u, Z*v), where (u,v) are eigenvectors of the generalized */
+/* Schur form (S,T), Q and Z are orthogonal matrices. In order */
+/* to avoid using extra 2*N*N workspace, we have to recalculate */
+/* eigenvectors and estimate one condition numbers at a time. */
+
+ pair = FALSE_;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+ if (pair) {
+ pair = FALSE_;
+ goto L20;
+ }
+ mm = 1;
+ if (i__ < *n) {
+ if (a[i__ + 1 + i__ * a_dim1] != 0.f) {
+ pair = TRUE_;
+ mm = 2;
+ }
+ }
+
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ bwork[j] = FALSE_;
+/* L10: */
+ }
+ if (mm == 1) {
+ bwork[i__] = TRUE_;
+ } else if (mm == 2) {
+ bwork[i__] = TRUE_;
+ bwork[i__ + 1] = TRUE_;
+ }
+
+ iwrk = mm * *n + 1;
+ iwrk1 = iwrk + mm * *n;
+
+/* Compute a pair of left and right eigenvectors. */
+/* (compute workspace: need up to 4*N + 6*N) */
+
+ if (wantse || wantsb) {
+ stgevc_("B", "S", &bwork[1], n, &a[a_offset], lda, &b[
+ b_offset], ldb, &work[1], n, &work[iwrk], n, &mm,
+ &m, &work[iwrk1], &ierr);
+ if (ierr != 0) {
+ *info = *n + 2;
+ goto L130;
+ }
+ }
+
+ i__2 = *lwork - iwrk1 + 1;
+ stgsna_(sense, "S", &bwork[1], n, &a[a_offset], lda, &b[
+ b_offset], ldb, &work[1], n, &work[iwrk], n, &rconde[
+ i__], &rcondv[i__], &mm, &m, &work[iwrk1], &i__2, &
+ iwork[1], &ierr);
+
+L20:
+ ;
+ }
+ }
+ }
+
+/* Undo balancing on VL and VR and normalization */
+/* (Workspace: none needed) */
+
+ if (ilvl) {
+ sggbak_(balanc, "L", n, ilo, ihi, &lscale[1], &rscale[1], n, &vl[
+ vl_offset], ldvl, &ierr);
+
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ if (alphai[jc] < 0.f) {
+ goto L70;
+ }
+ temp = 0.f;
+ if (alphai[jc] == 0.f) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ r__2 = temp, r__3 = (r__1 = vl[jr + jc * vl_dim1], dabs(
+ r__1));
+ temp = dmax(r__2,r__3);
+/* L30: */
+ }
+ } else {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ r__3 = temp, r__4 = (r__1 = vl[jr + jc * vl_dim1], dabs(
+ r__1)) + (r__2 = vl[jr + (jc + 1) * vl_dim1],
+ dabs(r__2));
+ temp = dmax(r__3,r__4);
+/* L40: */
+ }
+ }
+ if (temp < smlnum) {
+ goto L70;
+ }
+ temp = 1.f / temp;
+ if (alphai[jc] == 0.f) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vl[jr + jc * vl_dim1] *= temp;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vl[jr + jc * vl_dim1] *= temp;
+ vl[jr + (jc + 1) * vl_dim1] *= temp;
+/* L60: */
+ }
+ }
+L70:
+ ;
+ }
+ }
+ if (ilvr) {
+ sggbak_(balanc, "R", n, ilo, ihi, &lscale[1], &rscale[1], n, &vr[
+ vr_offset], ldvr, &ierr);
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ if (alphai[jc] < 0.f) {
+ goto L120;
+ }
+ temp = 0.f;
+ if (alphai[jc] == 0.f) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ r__2 = temp, r__3 = (r__1 = vr[jr + jc * vr_dim1], dabs(
+ r__1));
+ temp = dmax(r__2,r__3);
+/* L80: */
+ }
+ } else {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ r__3 = temp, r__4 = (r__1 = vr[jr + jc * vr_dim1], dabs(
+ r__1)) + (r__2 = vr[jr + (jc + 1) * vr_dim1],
+ dabs(r__2));
+ temp = dmax(r__3,r__4);
+/* L90: */
+ }
+ }
+ if (temp < smlnum) {
+ goto L120;
+ }
+ temp = 1.f / temp;
+ if (alphai[jc] == 0.f) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vr[jr + jc * vr_dim1] *= temp;
+/* L100: */
+ }
+ } else {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vr[jr + jc * vr_dim1] *= temp;
+ vr[jr + (jc + 1) * vr_dim1] *= temp;
+/* L110: */
+ }
+ }
+L120:
+ ;
+ }
+ }
+
+/* Undo scaling if necessary */
+
+ if (ilascl) {
+ slascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alphar[1], n, &
+ ierr);
+ slascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alphai[1], n, &
+ ierr);
+ }
+
+ if (ilbscl) {
+ slascl_("G", &c__0, &c__0, &bnrmto, &bnrm, n, &c__1, &beta[1], n, &
+ ierr);
+ }
+
+L130:
+ work[1] = (real) maxwrk;
+
+ return 0;
+
+/* End of SGGEVX */
+
+} /* sggevx_ */
diff --git a/contrib/libs/clapack/sggglm.c b/contrib/libs/clapack/sggglm.c
new file mode 100644
index 0000000000..254ea8c882
--- /dev/null
+++ b/contrib/libs/clapack/sggglm.c
@@ -0,0 +1,326 @@
+/* sggglm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static real c_b32 = -1.f;
+static real c_b34 = 1.f;
+
+/* Subroutine */ int sggglm_(integer *n, integer *m, integer *p, real *a,
+ integer *lda, real *b, integer *ldb, real *d__, real *x, real *y,
+ real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, nb, np, nb1, nb2, nb3, nb4, lopt;
+ extern /* Subroutine */ int sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *), scopy_(integer *, real *, integer *, real *, integer *),
+ xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int sggqrf_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, real *, real *, integer *
+, integer *);
+ integer lwkmin, lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int sormqr_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *), sormrq_(char *, char *,
+ integer *, integer *, integer *, real *, integer *, real *, real *
+, integer *, real *, integer *, integer *),
+ strtrs_(char *, char *, char *, integer *, integer *, real *,
+ integer *, real *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGGGLM solves a general Gauss-Markov linear model (GLM) problem: */
+
+/* minimize || y ||_2 subject to d = A*x + B*y */
+/* x */
+
+/* where A is an N-by-M matrix, B is an N-by-P matrix, and d is a */
+/* given N-vector. It is assumed that M <= N <= M+P, and */
+
+/* rank(A) = M and rank( A B ) = N. */
+
+/* Under these assumptions, the constrained equation is always */
+/* consistent, and there is a unique solution x and a minimal 2-norm */
+/* solution y, which is obtained using a generalized QR factorization */
+/* of the matrices (A, B) given by */
+
+/* A = Q*(R), B = Q*T*Z. */
+/* (0) */
+
+/* In particular, if matrix B is square nonsingular, then the problem */
+/* GLM is equivalent to the following weighted linear least squares */
+/* problem */
+
+/* minimize || inv(B)*(d-A*x) ||_2 */
+/* x */
+
+/* where inv(B) denotes the inverse of B. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of rows of the matrices A and B. N >= 0. */
+
+/* M (input) INTEGER */
+/* The number of columns of the matrix A. 0 <= M <= N. */
+
+/* P (input) INTEGER */
+/* The number of columns of the matrix B. P >= N-M. */
+
+/* A (input/output) REAL array, dimension (LDA,M) */
+/* On entry, the N-by-M matrix A. */
+/* On exit, the upper triangular part of the array A contains */
+/* the M-by-M upper triangular matrix R. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) REAL array, dimension (LDB,P) */
+/* On entry, the N-by-P matrix B. */
+/* On exit, if N <= P, the upper triangle of the subarray */
+/* B(1:N,P-N+1:P) contains the N-by-N upper triangular matrix T; */
+/* if N > P, the elements on and above the (N-P)th subdiagonal */
+/* contain the N-by-P upper trapezoidal matrix T. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, D is the left hand side of the GLM equation. */
+/* On exit, D is destroyed. */
+
+/* X (output) REAL array, dimension (M) */
+/* Y (output) REAL array, dimension (P) */
+/* On exit, X and Y are the solutions of the GLM problem. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N+M+P). */
+/* For optimum performance, LWORK >= M+min(N,P)+max(N,P)*NB, */
+/* where NB is an upper bound for the optimal blocksizes for */
+/* SGEQRF, SGERQF, SORMQR and SORMRQ. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1: the upper triangular factor R associated with A in the */
+/* generalized QR factorization of the pair (A, B) is */
+/* singular, so that rank(A) < M; the least squares */
+/* solution could not be computed. */
+/* = 2: the bottom (N-M) by (N-M) part of the upper trapezoidal */
+/* factor T associated with B in the generalized QR */
+/* factorization of the pair (A, B) is singular, so that */
+/* rank( A B ) < N; the least squares solution could not */
+/* be computed. */
+
+/* =================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --d__;
+ --x;
+ --y;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ np = min(*n,*p);
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*m < 0 || *m > *n) {
+ *info = -2;
+ } else if (*p < 0 || *p < *n - *m) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+
+/* Calculate workspace */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ lwkmin = 1;
+ lwkopt = 1;
+ } else {
+ nb1 = ilaenv_(&c__1, "SGEQRF", " ", n, m, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "SGERQF", " ", n, m, &c_n1, &c_n1);
+ nb3 = ilaenv_(&c__1, "SORMQR", " ", n, m, p, &c_n1);
+ nb4 = ilaenv_(&c__1, "SORMRQ", " ", n, m, p, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2), i__1 = max(i__1,nb3);
+ nb = max(i__1,nb4);
+ lwkmin = *m + *n + *p;
+ lwkopt = *m + np + max(*n,*p) * nb;
+ }
+ work[1] = (real) lwkopt;
+
+ if (*lwork < lwkmin && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGGGLM", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Compute the GQR factorization of matrices A and B: */
+
+/* Q'*A = ( R11 ) M, Q'*B*Z' = ( T11 T12 ) M */
+/* ( 0 ) N-M ( 0 T22 ) N-M */
+/* M M+P-N N-M */
+
+/* where R11 and T22 are upper triangular, and Q and Z are */
+/* orthogonal. */
+
+ i__1 = *lwork - *m - np;
+ sggqrf_(n, m, p, &a[a_offset], lda, &work[1], &b[b_offset], ldb, &work[*m
+ + 1], &work[*m + np + 1], &i__1, info);
+ lopt = work[*m + np + 1];
+
+/* Update left-hand-side vector d = Q'*d = ( d1 ) M */
+/* ( d2 ) N-M */
+
+ i__1 = max(1,*n);
+ i__2 = *lwork - *m - np;
+ sormqr_("Left", "Transpose", n, &c__1, m, &a[a_offset], lda, &work[1], &
+ d__[1], &i__1, &work[*m + np + 1], &i__2, info);
+/* Computing MAX */
+ i__1 = lopt, i__2 = (integer) work[*m + np + 1];
+ lopt = max(i__1,i__2);
+
+/* Solve T22*y2 = d2 for y2 */
+
+ if (*n > *m) {
+ i__1 = *n - *m;
+ i__2 = *n - *m;
+ strtrs_("Upper", "No transpose", "Non unit", &i__1, &c__1, &b[*m + 1
+ + (*m + *p - *n + 1) * b_dim1], ldb, &d__[*m + 1], &i__2,
+ info);
+
+ if (*info > 0) {
+ *info = 1;
+ return 0;
+ }
+
+ i__1 = *n - *m;
+ scopy_(&i__1, &d__[*m + 1], &c__1, &y[*m + *p - *n + 1], &c__1);
+ }
+
+/* Set y1 = 0 */
+
+ i__1 = *m + *p - *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = 0.f;
+/* L10: */
+ }
+
+/* Update d1 = d1 - T12*y2 */
+
+ i__1 = *n - *m;
+ sgemv_("No transpose", m, &i__1, &c_b32, &b[(*m + *p - *n + 1) * b_dim1 +
+ 1], ldb, &y[*m + *p - *n + 1], &c__1, &c_b34, &d__[1], &c__1);
+
+/* Solve triangular system: R11*x = d1 */
+
+ if (*m > 0) {
+ strtrs_("Upper", "No Transpose", "Non unit", m, &c__1, &a[a_offset],
+ lda, &d__[1], m, info);
+
+ if (*info > 0) {
+ *info = 2;
+ return 0;
+ }
+
+/* Copy D to X */
+
+ scopy_(m, &d__[1], &c__1, &x[1], &c__1);
+ }
+
+/* Backward transformation y = Z'*y */
+
+/* Computing MAX */
+ i__1 = 1, i__2 = *n - *p + 1;
+ i__3 = max(1,*p);
+ i__4 = *lwork - *m - np;
+ sormrq_("Left", "Transpose", p, &c__1, &np, &b[max(i__1, i__2)+ b_dim1],
+ ldb, &work[*m + 1], &y[1], &i__3, &work[*m + np + 1], &i__4, info);
+/* Computing MAX */
+ i__1 = lopt, i__2 = (integer) work[*m + np + 1];
+ work[1] = (real) (*m + np + max(i__1,i__2));
+
+ return 0;
+
+/* End of SGGGLM */
+
+} /* sggglm_ */
diff --git a/contrib/libs/clapack/sgghrd.c b/contrib/libs/clapack/sgghrd.c
new file mode 100644
index 0000000000..c78893c68f
--- /dev/null
+++ b/contrib/libs/clapack/sgghrd.c
@@ -0,0 +1,329 @@
+/* sgghrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b10 = 0.f;
+static real c_b11 = 1.f;
+static integer c__1 = 1;
+
+/* Subroutine */ int sgghrd_(char *compq, char *compz, integer *n, integer *
+ ilo, integer *ihi, real *a, integer *lda, real *b, integer *ldb, real
+ *q, integer *ldq, real *z__, integer *ldz, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1,
+ z_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ real c__, s;
+ logical ilq, ilz;
+ integer jcol;
+ real temp;
+ integer jrow;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ integer icompq;
+ extern /* Subroutine */ int slaset_(char *, integer *, integer *, real *,
+ real *, real *, integer *), slartg_(real *, real *, real *
+, real *, real *);
+ integer icompz;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGGHRD reduces a pair of real matrices (A,B) to generalized upper */
+/* Hessenberg form using orthogonal transformations, where A is a */
+/* general matrix and B is upper triangular. The form of the */
+/* generalized eigenvalue problem is */
+/* A*x = lambda*B*x, */
+/* and B is typically made upper triangular by computing its QR */
+/* factorization and moving the orthogonal matrix Q to the left side */
+/* of the equation. */
+
+/* This subroutine simultaneously reduces A to a Hessenberg matrix H: */
+/* Q**T*A*Z = H */
+/* and transforms B to another upper triangular matrix T: */
+/* Q**T*B*Z = T */
+/* in order to reduce the problem to its standard form */
+/* H*y = lambda*T*y */
+/* where y = Z**T*x. */
+
+/* The orthogonal matrices Q and Z are determined as products of Givens */
+/* rotations. They may either be formed explicitly, or they may be */
+/* postmultiplied into input matrices Q1 and Z1, so that */
+
+/* Q1 * A * Z1**T = (Q1*Q) * H * (Z1*Z)**T */
+
+/* Q1 * B * Z1**T = (Q1*Q) * T * (Z1*Z)**T */
+
+/* If Q1 is the orthogonal matrix from the QR factorization of B in the */
+/* original equation A*x = lambda*B*x, then SGGHRD reduces the original */
+/* problem to generalized Hessenberg form. */
+
+/* Arguments */
+/* ========= */
+
+/* COMPQ (input) CHARACTER*1 */
+/* = 'N': do not compute Q; */
+/* = 'I': Q is initialized to the unit matrix, and the */
+/* orthogonal matrix Q is returned; */
+/* = 'V': Q must contain an orthogonal matrix Q1 on entry, */
+/* and the product Q1*Q is returned. */
+
+/* COMPZ (input) CHARACTER*1 */
+/* = 'N': do not compute Z; */
+/* = 'I': Z is initialized to the unit matrix, and the */
+/* orthogonal matrix Z is returned; */
+/* = 'V': Z must contain an orthogonal matrix Z1 on entry, */
+/* and the product Z1*Z is returned. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* ILO and IHI mark the rows and columns of A which are to be */
+/* reduced. It is assumed that A is already upper triangular */
+/* in rows and columns 1:ILO-1 and IHI+1:N. ILO and IHI are */
+/* normally set by a previous call to SGGBAL; otherwise they */
+/* should be set to 1 and N respectively. */
+/* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */
+
+/* A (input/output) REAL array, dimension (LDA, N) */
+/* On entry, the N-by-N general matrix to be reduced. */
+/* On exit, the upper triangle and the first subdiagonal of A */
+/* are overwritten with the upper Hessenberg matrix H, and the */
+/* rest is set to zero. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) REAL array, dimension (LDB, N) */
+/* On entry, the N-by-N upper triangular matrix B. */
+/* On exit, the upper triangular matrix T = Q**T B Z. The */
+/* elements below the diagonal are set to zero. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* Q (input/output) REAL array, dimension (LDQ, N) */
+/* On entry, if COMPQ = 'V', the orthogonal matrix Q1, */
+/* typically from the QR factorization of B. */
+/* On exit, if COMPQ='I', the orthogonal matrix Q, and if */
+/* COMPQ = 'V', the product Q1*Q. */
+/* Not referenced if COMPQ='N'. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. */
+/* LDQ >= N if COMPQ='V' or 'I'; LDQ >= 1 otherwise. */
+
+/* Z (input/output) REAL array, dimension (LDZ, N) */
+/* On entry, if COMPZ = 'V', the orthogonal matrix Z1. */
+/* On exit, if COMPZ='I', the orthogonal matrix Z, and if */
+/* COMPZ = 'V', the product Z1*Z. */
+/* Not referenced if COMPZ='N'. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. */
+/* LDZ >= N if COMPZ='V' or 'I'; LDZ >= 1 otherwise. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* This routine reduces A to Hessenberg and B to triangular form by */
+/* an unblocked reduction, as described in _Matrix_Computations_, */
+/* by Golub and Van Loan (Johns Hopkins Press.) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode COMPQ */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+
+ /* Function Body */
+ if (lsame_(compq, "N")) {
+ ilq = FALSE_;
+ icompq = 1;
+ } else if (lsame_(compq, "V")) {
+ ilq = TRUE_;
+ icompq = 2;
+ } else if (lsame_(compq, "I")) {
+ ilq = TRUE_;
+ icompq = 3;
+ } else {
+ icompq = 0;
+ }
+
+/* Decode COMPZ */
+
+ if (lsame_(compz, "N")) {
+ ilz = FALSE_;
+ icompz = 1;
+ } else if (lsame_(compz, "V")) {
+ ilz = TRUE_;
+ icompz = 2;
+ } else if (lsame_(compz, "I")) {
+ ilz = TRUE_;
+ icompz = 3;
+ } else {
+ icompz = 0;
+ }
+
+/* Test the input parameters. */
+
+ *info = 0;
+ if (icompq <= 0) {
+ *info = -1;
+ } else if (icompz <= 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ilo < 1) {
+ *info = -4;
+ } else if (*ihi > *n || *ihi < *ilo - 1) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (ilq && *ldq < *n || *ldq < 1) {
+ *info = -11;
+ } else if (ilz && *ldz < *n || *ldz < 1) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGGHRD", &i__1);
+ return 0;
+ }
+
+/* Initialize Q and Z if desired. */
+
+ if (icompq == 3) {
+ slaset_("Full", n, n, &c_b10, &c_b11, &q[q_offset], ldq);
+ }
+ if (icompz == 3) {
+ slaset_("Full", n, n, &c_b10, &c_b11, &z__[z_offset], ldz);
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 1) {
+ return 0;
+ }
+
+/* Zero out lower triangle of B */
+
+ i__1 = *n - 1;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = jcol + 1; jrow <= i__2; ++jrow) {
+ b[jrow + jcol * b_dim1] = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* Reduce A and B */
+
+ i__1 = *ihi - 2;
+ for (jcol = *ilo; jcol <= i__1; ++jcol) {
+
+ i__2 = jcol + 2;
+ for (jrow = *ihi; jrow >= i__2; --jrow) {
+
+/* Step 1: rotate rows JROW-1, JROW to kill A(JROW,JCOL) */
+
+ temp = a[jrow - 1 + jcol * a_dim1];
+ slartg_(&temp, &a[jrow + jcol * a_dim1], &c__, &s, &a[jrow - 1 +
+ jcol * a_dim1]);
+ a[jrow + jcol * a_dim1] = 0.f;
+ i__3 = *n - jcol;
+ srot_(&i__3, &a[jrow - 1 + (jcol + 1) * a_dim1], lda, &a[jrow + (
+ jcol + 1) * a_dim1], lda, &c__, &s);
+ i__3 = *n + 2 - jrow;
+ srot_(&i__3, &b[jrow - 1 + (jrow - 1) * b_dim1], ldb, &b[jrow + (
+ jrow - 1) * b_dim1], ldb, &c__, &s);
+ if (ilq) {
+ srot_(n, &q[(jrow - 1) * q_dim1 + 1], &c__1, &q[jrow * q_dim1
+ + 1], &c__1, &c__, &s);
+ }
+
+/* Step 2: rotate columns JROW, JROW-1 to kill B(JROW,JROW-1) */
+
+ temp = b[jrow + jrow * b_dim1];
+ slartg_(&temp, &b[jrow + (jrow - 1) * b_dim1], &c__, &s, &b[jrow
+ + jrow * b_dim1]);
+ b[jrow + (jrow - 1) * b_dim1] = 0.f;
+ srot_(ihi, &a[jrow * a_dim1 + 1], &c__1, &a[(jrow - 1) * a_dim1 +
+ 1], &c__1, &c__, &s);
+ i__3 = jrow - 1;
+ srot_(&i__3, &b[jrow * b_dim1 + 1], &c__1, &b[(jrow - 1) * b_dim1
+ + 1], &c__1, &c__, &s);
+ if (ilz) {
+ srot_(n, &z__[jrow * z_dim1 + 1], &c__1, &z__[(jrow - 1) *
+ z_dim1 + 1], &c__1, &c__, &s);
+ }
+/* L30: */
+ }
+/* L40: */
+ }
+
+ return 0;
+
+/* End of SGGHRD */
+
+} /* sgghrd_ */
diff --git a/contrib/libs/clapack/sgglse.c b/contrib/libs/clapack/sgglse.c
new file mode 100644
index 0000000000..fb87eb9e18
--- /dev/null
+++ b/contrib/libs/clapack/sgglse.c
@@ -0,0 +1,334 @@
+/* sgglse.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static real c_b31 = -1.f;
+static real c_b33 = 1.f;
+
+/* Subroutine */ int sgglse_(integer *m, integer *n, integer *p, real *a,
+ integer *lda, real *b, integer *ldb, real *c__, real *d__, real *x,
+ real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+
+ /* Local variables */
+ integer nb, mn, nr, nb1, nb2, nb3, nb4, lopt;
+ extern /* Subroutine */ int sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *), scopy_(integer *, real *, integer *, real *, integer *),
+ saxpy_(integer *, real *, real *, integer *, real *, integer *),
+ strmv_(char *, char *, char *, integer *, real *, integer *, real
+ *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int sggrqf_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, real *, real *, integer *
+, integer *);
+ integer lwkmin, lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int sormqr_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *), sormrq_(char *, char *,
+ integer *, integer *, integer *, real *, integer *, real *, real *
+, integer *, real *, integer *, integer *),
+ strtrs_(char *, char *, char *, integer *, integer *, real *,
+ integer *, real *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGGLSE solves the linear equality-constrained least squares (LSE) */
+/* problem: */
+
+/* minimize || c - A*x ||_2 subject to B*x = d */
+
+/* where A is an M-by-N matrix, B is a P-by-N matrix, c is a given */
+/* M-vector, and d is a given P-vector. It is assumed that */
+/* P <= N <= M+P, and */
+
+/* rank(B) = P and rank( (A) ) = N. */
+/* ( (B) ) */
+
+/* These conditions ensure that the LSE problem has a unique solution, */
+/* which is obtained using a generalized RQ factorization of the */
+/* matrices (B, A) given by */
+
+/* B = (0 R)*Q, A = Z*T*Q. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrices A and B. N >= 0. */
+
+/* P (input) INTEGER */
+/* The number of rows of the matrix B. 0 <= P <= N <= M+P. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the elements on and above the diagonal of the array */
+/* contain the min(M,N)-by-N upper trapezoidal matrix T. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) REAL array, dimension (LDB,N) */
+/* On entry, the P-by-N matrix B. */
+/* On exit, the upper triangle of the subarray B(1:P,N-P+1:N) */
+/* contains the P-by-P upper triangular matrix R. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,P). */
+
+/* C (input/output) REAL array, dimension (M) */
+/* On entry, C contains the right hand side vector for the */
+/* least squares part of the LSE problem. */
+/* On exit, the residual sum of squares for the solution */
+/* is given by the sum of squares of elements N-P+1 to M of */
+/* vector C. */
+
+/* D (input/output) REAL array, dimension (P) */
+/* On entry, D contains the right hand side vector for the */
+/* constrained equation. */
+/* On exit, D is destroyed. */
+
+/* X (output) REAL array, dimension (N) */
+/* On exit, X is the solution of the LSE problem. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,M+N+P). */
+/* For optimum performance LWORK >= P+min(M,N)+max(M,N)*NB, */
+/* where NB is an upper bound for the optimal blocksizes for */
+/* SGEQRF, SGERQF, SORMQR and SORMRQ. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1: the upper triangular factor R associated with B in the */
+/* generalized RQ factorization of the pair (B, A) is */
+/* singular, so that rank(B) < P; the least squares */
+/* solution could not be computed. */
+/* = 2: the (N-P) by (N-P) part of the upper trapezoidal factor */
+/* T associated with A in the generalized RQ factorization */
+/* of the pair (B, A) is singular, so that */
+/* rank( (A) ) < N; the least squares solution could not */
+/* ( (B) ) */
+/* be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --c__;
+ --d__;
+ --x;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ mn = min(*m,*n);
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*p < 0 || *p > *n || *p < *n - *m) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldb < max(1,*p)) {
+ *info = -7;
+ }
+
+/* Calculate workspace */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ lwkmin = 1;
+ lwkopt = 1;
+ } else {
+ nb1 = ilaenv_(&c__1, "SGEQRF", " ", m, n, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "SGERQF", " ", m, n, &c_n1, &c_n1);
+ nb3 = ilaenv_(&c__1, "SORMQR", " ", m, n, p, &c_n1);
+ nb4 = ilaenv_(&c__1, "SORMRQ", " ", m, n, p, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2), i__1 = max(i__1,nb3);
+ nb = max(i__1,nb4);
+ lwkmin = *m + *n + *p;
+ lwkopt = *p + mn + max(*m,*n) * nb;
+ }
+ work[1] = (real) lwkopt;
+
+ if (*lwork < lwkmin && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGGLSE", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Compute the GRQ factorization of matrices B and A: */
+
+/* B*Q' = ( 0 T12 ) P Z'*A*Q' = ( R11 R12 ) N-P */
+/* N-P P ( 0 R22 ) M+P-N */
+/* N-P P */
+
+/* where T12 and R11 are upper triangular, and Q and Z are */
+/* orthogonal. */
+
+ i__1 = *lwork - *p - mn;
+ sggrqf_(p, m, n, &b[b_offset], ldb, &work[1], &a[a_offset], lda, &work[*p
+ + 1], &work[*p + mn + 1], &i__1, info);
+ lopt = work[*p + mn + 1];
+
+/* Update c = Z'*c = ( c1 ) N-P */
+/* ( c2 ) M+P-N */
+
+ i__1 = max(1,*m);
+ i__2 = *lwork - *p - mn;
+ sormqr_("Left", "Transpose", m, &c__1, &mn, &a[a_offset], lda, &work[*p +
+ 1], &c__[1], &i__1, &work[*p + mn + 1], &i__2, info);
+/* Computing MAX */
+ i__1 = lopt, i__2 = (integer) work[*p + mn + 1];
+ lopt = max(i__1,i__2);
+
+/* Solve T12*x2 = d for x2 */
+
+ if (*p > 0) {
+ strtrs_("Upper", "No transpose", "Non-unit", p, &c__1, &b[(*n - *p +
+ 1) * b_dim1 + 1], ldb, &d__[1], p, info);
+
+ if (*info > 0) {
+ *info = 1;
+ return 0;
+ }
+
+/* Put the solution in X */
+
+ scopy_(p, &d__[1], &c__1, &x[*n - *p + 1], &c__1);
+
+/* Update c1 */
+
+ i__1 = *n - *p;
+ sgemv_("No transpose", &i__1, p, &c_b31, &a[(*n - *p + 1) * a_dim1 +
+ 1], lda, &d__[1], &c__1, &c_b33, &c__[1], &c__1);
+ }
+
+/* Solve R11*x1 = c1 for x1 */
+
+ if (*n > *p) {
+ i__1 = *n - *p;
+ i__2 = *n - *p;
+ strtrs_("Upper", "No transpose", "Non-unit", &i__1, &c__1, &a[
+ a_offset], lda, &c__[1], &i__2, info);
+
+ if (*info > 0) {
+ *info = 2;
+ return 0;
+ }
+
+/* Put the solution in X */
+
+ i__1 = *n - *p;
+ scopy_(&i__1, &c__[1], &c__1, &x[1], &c__1);
+ }
+
+/* Compute the residual vector: */
+
+ if (*m < *n) {
+ nr = *m + *p - *n;
+ if (nr > 0) {
+ i__1 = *n - *m;
+ sgemv_("No transpose", &nr, &i__1, &c_b31, &a[*n - *p + 1 + (*m +
+ 1) * a_dim1], lda, &d__[nr + 1], &c__1, &c_b33, &c__[*n -
+ *p + 1], &c__1);
+ }
+ } else {
+ nr = *p;
+ }
+ if (nr > 0) {
+ strmv_("Upper", "No transpose", "Non unit", &nr, &a[*n - *p + 1 + (*n
+ - *p + 1) * a_dim1], lda, &d__[1], &c__1);
+ saxpy_(&nr, &c_b31, &d__[1], &c__1, &c__[*n - *p + 1], &c__1);
+ }
+
+/* Backward transformation x = Q'*x */
+
+ i__1 = *lwork - *p - mn;
+ sormrq_("Left", "Transpose", n, &c__1, p, &b[b_offset], ldb, &work[1], &x[
+ 1], n, &work[*p + mn + 1], &i__1, info);
+/* Computing MAX */
+ i__1 = lopt, i__2 = (integer) work[*p + mn + 1];
+ work[1] = (real) (*p + mn + max(i__1,i__2));
+
+ return 0;
+
+/* End of SGGLSE */
+
+} /* sgglse_ */
diff --git a/contrib/libs/clapack/sggqrf.c b/contrib/libs/clapack/sggqrf.c
new file mode 100644
index 0000000000..c8ba376c2f
--- /dev/null
+++ b/contrib/libs/clapack/sggqrf.c
@@ -0,0 +1,268 @@
+/* sggqrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int sggqrf_(integer *n, integer *m, integer *p, real *a,
+ integer *lda, real *taua, real *b, integer *ldb, real *taub, real *
+ work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+
+ /* Local variables */
+ integer nb, nb1, nb2, nb3, lopt;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int sgeqrf_(integer *, integer *, real *, integer
+ *, real *, real *, integer *, integer *), sgerqf_(integer *,
+ integer *, real *, integer *, real *, real *, integer *, integer *
+);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int sormqr_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGGQRF computes a generalized QR factorization of an N-by-M matrix A */
+/* and an N-by-P matrix B: */
+
+/* A = Q*R, B = Q*T*Z, */
+
+/* where Q is an N-by-N orthogonal matrix, Z is a P-by-P orthogonal */
+/* matrix, and R and T assume one of the forms: */
+
+/* if N >= M, R = ( R11 ) M , or if N < M, R = ( R11 R12 ) N, */
+/* ( 0 ) N-M N M-N */
+/* M */
+
+/* where R11 is upper triangular, and */
+
+/* if N <= P, T = ( 0 T12 ) N, or if N > P, T = ( T11 ) N-P, */
+/* P-N N ( T21 ) P */
+/* P */
+
+/* where T12 or T21 is upper triangular. */
+
+/* In particular, if B is square and nonsingular, the GQR factorization */
+/* of A and B implicitly gives the QR factorization of inv(B)*A: */
+
+/* inv(B)*A = Z'*(inv(T)*R) */
+
+/* where inv(B) denotes the inverse of the matrix B, and Z' denotes the */
+/* transpose of the matrix Z. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of rows of the matrices A and B. N >= 0. */
+
+/* M (input) INTEGER */
+/* The number of columns of the matrix A. M >= 0. */
+
+/* P (input) INTEGER */
+/* The number of columns of the matrix B. P >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,M) */
+/* On entry, the N-by-M matrix A. */
+/* On exit, the elements on and above the diagonal of the array */
+/* contain the min(N,M)-by-M upper trapezoidal matrix R (R is */
+/* upper triangular if N >= M); the elements below the diagonal, */
+/* with the array TAUA, represent the orthogonal matrix Q as a */
+/* product of min(N,M) elementary reflectors (see Further */
+/* Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAUA (output) REAL array, dimension (min(N,M)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the orthogonal matrix Q (see Further Details). */
+
+/* B (input/output) REAL array, dimension (LDB,P) */
+/* On entry, the N-by-P matrix B. */
+/* On exit, if N <= P, the upper triangle of the subarray */
+/* B(1:N,P-N+1:P) contains the N-by-N upper triangular matrix T; */
+/* if N > P, the elements on and above the (N-P)-th subdiagonal */
+/* contain the N-by-P upper trapezoidal matrix T; the remaining */
+/* elements, with the array TAUB, represent the orthogonal */
+/* matrix Z as a product of elementary reflectors (see Further */
+/* Details). */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* TAUB (output) REAL array, dimension (min(N,P)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the orthogonal matrix Z (see Further Details). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N,M,P). */
+/* For optimum performance LWORK >= max(N,M,P)*max(NB1,NB2,NB3), */
+/* where NB1 is the optimal blocksize for the QR factorization */
+/* of an N-by-M matrix, NB2 is the optimal blocksize for the */
+/* RQ factorization of an N-by-P matrix, and NB3 is the optimal */
+/* blocksize for a call of SORMQR. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k), where k = min(n,m). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - taua * v * v' */
+
+/* where taua is a real scalar, and v is a real vector with */
+/* v(1:i-1) = 0 and v(i) = 1; v(i+1:n) is stored on exit in A(i+1:n,i), */
+/* and taua in TAUA(i). */
+/* To form Q explicitly, use LAPACK subroutine SORGQR. */
+/* To use Q to update another matrix, use LAPACK subroutine SORMQR. */
+
+/* The matrix Z is represented as a product of elementary reflectors */
+
+/* Z = H(1) H(2) . . . H(k), where k = min(n,p). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - taub * v * v' */
+
+/* where taub is a real scalar, and v is a real vector with */
+/* v(p-k+i+1:p) = 0 and v(p-k+i) = 1; v(1:p-k+i-1) is stored on exit in */
+/* B(n-k+i,1:p-k+i-1), and taub in TAUB(i). */
+/* To form Z explicitly, use LAPACK subroutine SORGRQ. */
+/* To use Z to update another matrix, use LAPACK subroutine SORMRQ. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --taua;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --taub;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb1 = ilaenv_(&c__1, "SGEQRF", " ", n, m, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "SGERQF", " ", n, p, &c_n1, &c_n1);
+ nb3 = ilaenv_(&c__1, "SORMQR", " ", n, m, p, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2);
+ nb = max(i__1,nb3);
+/* Computing MAX */
+ i__1 = max(*n,*m);
+ lwkopt = max(i__1,*p) * nb;
+ work[1] = (real) lwkopt;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*p < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*n), i__1 = max(i__1,*m);
+ if (*lwork < max(i__1,*p) && ! lquery) {
+ *info = -11;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGGQRF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* QR factorization of N-by-M matrix A: A = Q*R */
+
+ sgeqrf_(n, m, &a[a_offset], lda, &taua[1], &work[1], lwork, info);
+ lopt = work[1];
+
+/* Update B := Q'*B. */
+
+ i__1 = min(*n,*m);
+ sormqr_("Left", "Transpose", n, p, &i__1, &a[a_offset], lda, &taua[1], &b[
+ b_offset], ldb, &work[1], lwork, info);
+/* Computing MAX */
+ i__1 = lopt, i__2 = (integer) work[1];
+ lopt = max(i__1,i__2);
+
+/* RQ factorization of N-by-P matrix B: B = T*Z. */
+
+ sgerqf_(n, p, &b[b_offset], ldb, &taub[1], &work[1], lwork, info);
+/* Computing MAX */
+ i__1 = lopt, i__2 = (integer) work[1];
+ work[1] = (real) max(i__1,i__2);
+
+ return 0;
+
+/* End of SGGQRF */
+
+} /* sggqrf_ */
diff --git a/contrib/libs/clapack/sggrqf.c b/contrib/libs/clapack/sggrqf.c
new file mode 100644
index 0000000000..2979920c73
--- /dev/null
+++ b/contrib/libs/clapack/sggrqf.c
@@ -0,0 +1,269 @@
+/* sggrqf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int sggrqf_(integer *m, integer *p, integer *n, real *a,
+ integer *lda, real *taua, real *b, integer *ldb, real *taub, real *
+ work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer nb, nb1, nb2, nb3, lopt;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int sgeqrf_(integer *, integer *, real *, integer
+ *, real *, real *, integer *, integer *), sgerqf_(integer *,
+ integer *, real *, integer *, real *, real *, integer *, integer *
+);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int sormrq_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGGRQF computes a generalized RQ factorization of an M-by-N matrix A */
+/* and a P-by-N matrix B: */
+
+/* A = R*Q, B = Z*T*Q, */
+
+/* where Q is an N-by-N orthogonal matrix, Z is a P-by-P orthogonal */
+/* matrix, and R and T assume one of the forms: */
+
+/* if M <= N, R = ( 0 R12 ) M, or if M > N, R = ( R11 ) M-N, */
+/* N-M M ( R21 ) N */
+/* N */
+
+/* where R12 or R21 is upper triangular, and */
+
+/* if P >= N, T = ( T11 ) N , or if P < N, T = ( T11 T12 ) P, */
+/* ( 0 ) P-N P N-P */
+/* N */
+
+/* where T11 is upper triangular. */
+
+/* In particular, if B is square and nonsingular, the GRQ factorization */
+/* of A and B implicitly gives the RQ factorization of A*inv(B): */
+
+/* A*inv(B) = (R*inv(T))*Z' */
+
+/* where inv(B) denotes the inverse of the matrix B, and Z' denotes the */
+/* transpose of the matrix Z. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* P (input) INTEGER */
+/* The number of rows of the matrix B. P >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrices A and B. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, if M <= N, the upper triangle of the subarray */
+/* A(1:M,N-M+1:N) contains the M-by-M upper triangular matrix R; */
+/* if M > N, the elements on and above the (M-N)-th subdiagonal */
+/* contain the M-by-N upper trapezoidal matrix R; the remaining */
+/* elements, with the array TAUA, represent the orthogonal */
+/* matrix Q as a product of elementary reflectors (see Further */
+/* Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAUA (output) REAL array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the orthogonal matrix Q (see Further Details). */
+
+/* B (input/output) REAL array, dimension (LDB,N) */
+/* On entry, the P-by-N matrix B. */
+/* On exit, the elements on and above the diagonal of the array */
+/* contain the min(P,N)-by-N upper trapezoidal matrix T (T is */
+/* upper triangular if P >= N); the elements below the diagonal, */
+/* with the array TAUB, represent the orthogonal matrix Z as a */
+/* product of elementary reflectors (see Further Details). */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,P). */
+
+/* TAUB (output) REAL array, dimension (min(P,N)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the orthogonal matrix Z (see Further Details). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N,M,P). */
+/* For optimum performance LWORK >= max(N,M,P)*max(NB1,NB2,NB3), */
+/* where NB1 is the optimal blocksize for the RQ factorization */
+/* of an M-by-N matrix, NB2 is the optimal blocksize for the */
+/* QR factorization of a P-by-N matrix, and NB3 is the optimal */
+/* blocksize for a call of SORMRQ. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INF0= -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - taua * v * v' */
+
+/* where taua is a real scalar, and v is a real vector with */
+/* v(n-k+i+1:n) = 0 and v(n-k+i) = 1; v(1:n-k+i-1) is stored on exit in */
+/* A(m-k+i,1:n-k+i-1), and taua in TAUA(i). */
+/* To form Q explicitly, use LAPACK subroutine SORGRQ. */
+/* To use Q to update another matrix, use LAPACK subroutine SORMRQ. */
+
+/* The matrix Z is represented as a product of elementary reflectors */
+
+/* Z = H(1) H(2) . . . H(k), where k = min(p,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - taub * v * v' */
+
+/* where taub is a real scalar, and v is a real vector with */
+/* v(1:i-1) = 0 and v(i) = 1; v(i+1:p) is stored on exit in B(i+1:p,i), */
+/* and taub in TAUB(i). */
+/* To form Z explicitly, use LAPACK subroutine SORGQR. */
+/* To use Z to update another matrix, use LAPACK subroutine SORMQR. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --taua;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --taub;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb1 = ilaenv_(&c__1, "SGERQF", " ", m, n, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "SGEQRF", " ", p, n, &c_n1, &c_n1);
+ nb3 = ilaenv_(&c__1, "SORMRQ", " ", m, n, p, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2);
+ nb = max(i__1,nb3);
+/* Computing MAX */
+ i__1 = max(*n,*m);
+ lwkopt = max(i__1,*p) * nb;
+ work[1] = (real) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*p < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldb < max(1,*p)) {
+ *info = -8;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*m), i__1 = max(i__1,*p);
+ if (*lwork < max(i__1,*n) && ! lquery) {
+ *info = -11;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGGRQF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* RQ factorization of M-by-N matrix A: A = R*Q */
+
+ sgerqf_(m, n, &a[a_offset], lda, &taua[1], &work[1], lwork, info);
+ lopt = work[1];
+
+/* Update B := B*Q' */
+
+ i__1 = min(*m,*n);
+/* Computing MAX */
+ i__2 = 1, i__3 = *m - *n + 1;
+ sormrq_("Right", "Transpose", p, n, &i__1, &a[max(i__2, i__3)+ a_dim1],
+ lda, &taua[1], &b[b_offset], ldb, &work[1], lwork, info);
+/* Computing MAX */
+ i__1 = lopt, i__2 = (integer) work[1];
+ lopt = max(i__1,i__2);
+
+/* QR factorization of P-by-N matrix B: B = Z*T */
+
+ sgeqrf_(p, n, &b[b_offset], ldb, &taub[1], &work[1], lwork, info);
+/* Computing MAX */
+ i__1 = lopt, i__2 = (integer) work[1];
+ work[1] = (real) max(i__1,i__2);
+
+ return 0;
+
+/* End of SGGRQF */
+
+} /* sggrqf_ */
diff --git a/contrib/libs/clapack/sggsvd.c b/contrib/libs/clapack/sggsvd.c
new file mode 100644
index 0000000000..48ad2495d7
--- /dev/null
+++ b/contrib/libs/clapack/sggsvd.c
@@ -0,0 +1,402 @@
+/* sggsvd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sggsvd_(char *jobu, char *jobv, char *jobq, integer *m,
+ integer *n, integer *p, integer *k, integer *l, real *a, integer *lda,
+ real *b, integer *ldb, real *alpha, real *beta, real *u, integer *
+ ldu, real *v, integer *ldv, real *q, integer *ldq, real *work,
+ integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, u_dim1,
+ u_offset, v_dim1, v_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j;
+ real ulp;
+ integer ibnd;
+ real tola;
+ integer isub;
+ real tolb, unfl, temp, smax;
+ extern logical lsame_(char *, char *);
+ real anorm, bnorm;
+ logical wantq;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ logical wantu, wantv;
+ extern doublereal slamch_(char *), slange_(char *, integer *,
+ integer *, real *, integer *, real *);
+ integer ncycle;
+ extern /* Subroutine */ int xerbla_(char *, integer *), stgsja_(
+ char *, char *, char *, integer *, integer *, integer *, integer *
+, integer *, real *, integer *, real *, integer *, real *, real *,
+ real *, real *, real *, integer *, real *, integer *, real *,
+ integer *, real *, integer *, integer *),
+ sggsvp_(char *, char *, char *, integer *, integer *, integer *,
+ real *, integer *, real *, integer *, real *, real *, integer *,
+ integer *, real *, integer *, real *, integer *, real *, integer *
+, integer *, real *, real *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGGSVD computes the generalized singular value decomposition (GSVD) */
+/* of an M-by-N real matrix A and P-by-N real matrix B: */
+
+/* U'*A*Q = D1*( 0 R ), V'*B*Q = D2*( 0 R ) */
+
+/* where U, V and Q are orthogonal matrices, and Z' is the transpose */
+/* of Z. Let K+L = the effective numerical rank of the matrix (A',B')', */
+/* then R is a K+L-by-K+L nonsingular upper triangular matrix, D1 and */
+/* D2 are M-by-(K+L) and P-by-(K+L) "diagonal" matrices and of the */
+/* following structures, respectively: */
+
+/* If M-K-L >= 0, */
+
+/* K L */
+/* D1 = K ( I 0 ) */
+/* L ( 0 C ) */
+/* M-K-L ( 0 0 ) */
+
+/* K L */
+/* D2 = L ( 0 S ) */
+/* P-L ( 0 0 ) */
+
+/* N-K-L K L */
+/* ( 0 R ) = K ( 0 R11 R12 ) */
+/* L ( 0 0 R22 ) */
+
+/* where */
+
+/* C = diag( ALPHA(K+1), ... , ALPHA(K+L) ), */
+/* S = diag( BETA(K+1), ... , BETA(K+L) ), */
+/* C**2 + S**2 = I. */
+
+/* R is stored in A(1:K+L,N-K-L+1:N) on exit. */
+
+/* If M-K-L < 0, */
+
+/* K M-K K+L-M */
+/* D1 = K ( I 0 0 ) */
+/* M-K ( 0 C 0 ) */
+
+/* K M-K K+L-M */
+/* D2 = M-K ( 0 S 0 ) */
+/* K+L-M ( 0 0 I ) */
+/* P-L ( 0 0 0 ) */
+
+/* N-K-L K M-K K+L-M */
+/* ( 0 R ) = K ( 0 R11 R12 R13 ) */
+/* M-K ( 0 0 R22 R23 ) */
+/* K+L-M ( 0 0 0 R33 ) */
+
+/* where */
+
+/* C = diag( ALPHA(K+1), ... , ALPHA(M) ), */
+/* S = diag( BETA(K+1), ... , BETA(M) ), */
+/* C**2 + S**2 = I. */
+
+/* (R11 R12 R13 ) is stored in A(1:M, N-K-L+1:N), and R33 is stored */
+/* ( 0 R22 R23 ) */
+/* in B(M-K+1:L,N+M-K-L+1:N) on exit. */
+
+/* The routine computes C, S, R, and optionally the orthogonal */
+/* transformation matrices U, V and Q. */
+
+/* In particular, if B is an N-by-N nonsingular matrix, then the GSVD of */
+/* A and B implicitly gives the SVD of A*inv(B): */
+/* A*inv(B) = U*(D1*inv(D2))*V'. */
+/* If ( A',B')' has orthonormal columns, then the GSVD of A and B is */
+/* also equal to the CS decomposition of A and B. Furthermore, the GSVD */
+/* can be used to derive the solution of the eigenvalue problem: */
+/* A'*A x = lambda* B'*B x. */
+/* In some literature, the GSVD of A and B is presented in the form */
+/* U'*A*X = ( 0 D1 ), V'*B*X = ( 0 D2 ) */
+/* where U and V are orthogonal and X is nonsingular, D1 and D2 are */
+/* ``diagonal''. The former GSVD form can be converted to the latter */
+/* form by taking the nonsingular matrix X as */
+
+/* X = Q*( I 0 ) */
+/* ( 0 inv(R) ). */
+
+/* Arguments */
+/* ========= */
+
+/* JOBU (input) CHARACTER*1 */
+/* = 'U': Orthogonal matrix U is computed; */
+/* = 'N': U is not computed. */
+
+/* JOBV (input) CHARACTER*1 */
+/* = 'V': Orthogonal matrix V is computed; */
+/* = 'N': V is not computed. */
+
+/* JOBQ (input) CHARACTER*1 */
+/* = 'Q': Orthogonal matrix Q is computed; */
+/* = 'N': Q is not computed. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrices A and B. N >= 0. */
+
+/* P (input) INTEGER */
+/* The number of rows of the matrix B. P >= 0. */
+
+/* K (output) INTEGER */
+/* L (output) INTEGER */
+/* On exit, K and L specify the dimension of the subblocks */
+/* described in the Purpose section. */
+/* K + L = effective numerical rank of (A',B')'. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, A contains the triangular matrix R, or part of R. */
+/* See Purpose for details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) REAL array, dimension (LDB,N) */
+/* On entry, the P-by-N matrix B. */
+/* On exit, B contains the triangular matrix R if M-K-L < 0. */
+/* See Purpose for details. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,P). */
+
+/* ALPHA (output) REAL array, dimension (N) */
+/* BETA (output) REAL array, dimension (N) */
+/* On exit, ALPHA and BETA contain the generalized singular */
+/* value pairs of A and B; */
+/* ALPHA(1:K) = 1, */
+/* BETA(1:K) = 0, */
+/* and if M-K-L >= 0, */
+/* ALPHA(K+1:K+L) = C, */
+/* BETA(K+1:K+L) = S, */
+/* or if M-K-L < 0, */
+/* ALPHA(K+1:M)=C, ALPHA(M+1:K+L)=0 */
+/* BETA(K+1:M) =S, BETA(M+1:K+L) =1 */
+/* and */
+/* ALPHA(K+L+1:N) = 0 */
+/* BETA(K+L+1:N) = 0 */
+
+/* U (output) REAL array, dimension (LDU,M) */
+/* If JOBU = 'U', U contains the M-by-M orthogonal matrix U. */
+/* If JOBU = 'N', U is not referenced. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= max(1,M) if */
+/* JOBU = 'U'; LDU >= 1 otherwise. */
+
+/* V (output) REAL array, dimension (LDV,P) */
+/* If JOBV = 'V', V contains the P-by-P orthogonal matrix V. */
+/* If JOBV = 'N', V is not referenced. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. LDV >= max(1,P) if */
+/* JOBV = 'V'; LDV >= 1 otherwise. */
+
+/* Q (output) REAL array, dimension (LDQ,N) */
+/* If JOBQ = 'Q', Q contains the N-by-N orthogonal matrix Q. */
+/* If JOBQ = 'N', Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N) if */
+/* JOBQ = 'Q'; LDQ >= 1 otherwise. */
+
+/* WORK (workspace) REAL array, */
+/* dimension (max(3*N,M,P)+N) */
+
+/* IWORK (workspace/output) INTEGER array, dimension (N) */
+/* On exit, IWORK stores the sorting information. More */
+/* precisely, the following loop will sort ALPHA */
+/* for I = K+1, min(M,K+L) */
+/* swap ALPHA(I) and ALPHA(IWORK(I)) */
+/* endfor */
+/* such that ALPHA(1) >= ALPHA(2) >= ... >= ALPHA(N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, the Jacobi-type procedure failed to */
+/* converge. For further details, see subroutine STGSJA. */
+
+/* Internal Parameters */
+/* =================== */
+
+/* TOLA REAL */
+/* TOLB REAL */
+/* TOLA and TOLB are the thresholds to determine the effective */
+/* rank of (A',B')'. Generally, they are set to */
+/* TOLA = MAX(M,N)*norm(A)*MACHEPS, */
+/* TOLB = MAX(P,N)*norm(B)*MACHEPS. */
+/* The size of TOLA and TOLB may affect the size of backward */
+/* errors of the decomposition. */
+
+/* Further Details */
+/* =============== */
+
+/* 2-96 Based on modifications by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alpha;
+ --beta;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantu = lsame_(jobu, "U");
+ wantv = lsame_(jobv, "V");
+ wantq = lsame_(jobq, "Q");
+
+ *info = 0;
+ if (! (wantu || lsame_(jobu, "N"))) {
+ *info = -1;
+ } else if (! (wantv || lsame_(jobv, "N"))) {
+ *info = -2;
+ } else if (! (wantq || lsame_(jobq, "N"))) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*p < 0) {
+ *info = -6;
+ } else if (*lda < max(1,*m)) {
+ *info = -10;
+ } else if (*ldb < max(1,*p)) {
+ *info = -12;
+ } else if (*ldu < 1 || wantu && *ldu < *m) {
+ *info = -16;
+ } else if (*ldv < 1 || wantv && *ldv < *p) {
+ *info = -18;
+ } else if (*ldq < 1 || wantq && *ldq < *n) {
+ *info = -20;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGGSVD", &i__1);
+ return 0;
+ }
+
+/* Compute the Frobenius norm of matrices A and B */
+
+ anorm = slange_("1", m, n, &a[a_offset], lda, &work[1]);
+ bnorm = slange_("1", p, n, &b[b_offset], ldb, &work[1]);
+
+/* Get machine precision and set up threshold for determining */
+/* the effective numerical rank of the matrices A and B. */
+
+ ulp = slamch_("Precision");
+ unfl = slamch_("Safe Minimum");
+ tola = max(*m,*n) * dmax(anorm,unfl) * ulp;
+ tolb = max(*p,*n) * dmax(bnorm,unfl) * ulp;
+
+/* Preprocessing */
+
+ sggsvp_(jobu, jobv, jobq, m, p, n, &a[a_offset], lda, &b[b_offset], ldb, &
+ tola, &tolb, k, l, &u[u_offset], ldu, &v[v_offset], ldv, &q[
+ q_offset], ldq, &iwork[1], &work[1], &work[*n + 1], info);
+
+/* Compute the GSVD of two upper "triangular" matrices */
+
+ stgsja_(jobu, jobv, jobq, m, p, n, k, l, &a[a_offset], lda, &b[b_offset],
+ ldb, &tola, &tolb, &alpha[1], &beta[1], &u[u_offset], ldu, &v[
+ v_offset], ldv, &q[q_offset], ldq, &work[1], &ncycle, info);
+
+/* Sort the singular values and store the pivot indices in IWORK */
+/* Copy ALPHA to WORK, then sort ALPHA in WORK */
+
+ scopy_(n, &alpha[1], &c__1, &work[1], &c__1);
+/* Computing MIN */
+ i__1 = *l, i__2 = *m - *k;
+ ibnd = min(i__1,i__2);
+ i__1 = ibnd;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Scan for largest ALPHA(K+I) */
+
+ isub = i__;
+ smax = work[*k + i__];
+ i__2 = ibnd;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ temp = work[*k + j];
+ if (temp > smax) {
+ isub = j;
+ smax = temp;
+ }
+/* L10: */
+ }
+ if (isub != i__) {
+ work[*k + isub] = work[*k + i__];
+ work[*k + i__] = smax;
+ iwork[*k + i__] = *k + isub;
+ } else {
+ iwork[*k + i__] = *k + i__;
+ }
+/* L20: */
+ }
+
+ return 0;
+
+/* End of SGGSVD */
+
+} /* sggsvd_ */
diff --git a/contrib/libs/clapack/sggsvp.c b/contrib/libs/clapack/sggsvp.c
new file mode 100644
index 0000000000..97fa0ded0f
--- /dev/null
+++ b/contrib/libs/clapack/sggsvp.c
@@ -0,0 +1,508 @@
+/* sggsvp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b12 = 0.f;
+static real c_b22 = 1.f;
+
+/* Subroutine */ int sggsvp_(char *jobu, char *jobv, char *jobq, integer *m,
+ integer *p, integer *n, real *a, integer *lda, real *b, integer *ldb,
+ real *tola, real *tolb, integer *k, integer *l, real *u, integer *ldu,
+ real *v, integer *ldv, real *q, integer *ldq, integer *iwork, real *
+ tau, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, u_dim1,
+ u_offset, v_dim1, v_offset, i__1, i__2, i__3;
+ real r__1;
+
+ /* Local variables */
+ integer i__, j;
+ extern logical lsame_(char *, char *);
+ logical wantq, wantu, wantv;
+ extern /* Subroutine */ int sgeqr2_(integer *, integer *, real *, integer
+ *, real *, real *, integer *), sgerq2_(integer *, integer *, real
+ *, integer *, real *, real *, integer *), sorg2r_(integer *,
+ integer *, integer *, real *, integer *, real *, real *, integer *
+), sorm2r_(char *, char *, integer *, integer *, integer *, real *
+, integer *, real *, real *, integer *, real *, integer *), sormr2_(char *, char *, integer *, integer *, integer *,
+ real *, integer *, real *, real *, integer *, real *, integer *), xerbla_(char *, integer *), sgeqpf_(
+ integer *, integer *, real *, integer *, integer *, real *, real *
+, integer *), slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *), slaset_(char *, integer *,
+ integer *, real *, real *, real *, integer *), slapmt_(
+ logical *, integer *, integer *, real *, integer *, integer *);
+ logical forwrd;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGGSVP computes orthogonal matrices U, V and Q such that */
+
+/* N-K-L K L */
+/* U'*A*Q = K ( 0 A12 A13 ) if M-K-L >= 0; */
+/* L ( 0 0 A23 ) */
+/* M-K-L ( 0 0 0 ) */
+
+/* N-K-L K L */
+/* = K ( 0 A12 A13 ) if M-K-L < 0; */
+/* M-K ( 0 0 A23 ) */
+
+/* N-K-L K L */
+/* V'*B*Q = L ( 0 0 B13 ) */
+/* P-L ( 0 0 0 ) */
+
+/* where the K-by-K matrix A12 and L-by-L matrix B13 are nonsingular */
+/* upper triangular; A23 is L-by-L upper triangular if M-K-L >= 0, */
+/* otherwise A23 is (M-K)-by-L upper trapezoidal. K+L = the effective */
+/* numerical rank of the (M+P)-by-N matrix (A',B')'. Z' denotes the */
+/* transpose of Z. */
+
+/* This decomposition is the preprocessing step for computing the */
+/* Generalized Singular Value Decomposition (GSVD), see subroutine */
+/* SGGSVD. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBU (input) CHARACTER*1 */
+/* = 'U': Orthogonal matrix U is computed; */
+/* = 'N': U is not computed. */
+
+/* JOBV (input) CHARACTER*1 */
+/* = 'V': Orthogonal matrix V is computed; */
+/* = 'N': V is not computed. */
+
+/* JOBQ (input) CHARACTER*1 */
+/* = 'Q': Orthogonal matrix Q is computed; */
+/* = 'N': Q is not computed. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* P (input) INTEGER */
+/* The number of rows of the matrix B. P >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrices A and B. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, A contains the triangular (or trapezoidal) matrix */
+/* described in the Purpose section. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) REAL array, dimension (LDB,N) */
+/* On entry, the P-by-N matrix B. */
+/* On exit, B contains the triangular matrix described in */
+/* the Purpose section. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,P). */
+
+/* TOLA (input) REAL */
+/* TOLB (input) REAL */
+/* TOLA and TOLB are the thresholds to determine the effective */
+/* numerical rank of matrix B and a subblock of A. Generally, */
+/* they are set to */
+/* TOLA = MAX(M,N)*norm(A)*MACHEPS, */
+/* TOLB = MAX(P,N)*norm(B)*MACHEPS. */
+/* The size of TOLA and TOLB may affect the size of backward */
+/* errors of the decomposition. */
+
+/* K (output) INTEGER */
+/* L (output) INTEGER */
+/* On exit, K and L specify the dimension of the subblocks */
+/* described in Purpose. */
+/* K + L = effective numerical rank of (A',B')'. */
+
+/* U (output) REAL array, dimension (LDU,M) */
+/* If JOBU = 'U', U contains the orthogonal matrix U. */
+/* If JOBU = 'N', U is not referenced. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= max(1,M) if */
+/* JOBU = 'U'; LDU >= 1 otherwise. */
+
+/* V (output) REAL array, dimension (LDV,P) */
+/* If JOBV = 'V', V contains the orthogonal matrix V. */
+/* If JOBV = 'N', V is not referenced. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. LDV >= max(1,P) if */
+/* JOBV = 'V'; LDV >= 1 otherwise. */
+
+/* Q (output) REAL array, dimension (LDQ,N) */
+/* If JOBQ = 'Q', Q contains the orthogonal matrix Q. */
+/* If JOBQ = 'N', Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N) if */
+/* JOBQ = 'Q'; LDQ >= 1 otherwise. */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* TAU (workspace) REAL array, dimension (N) */
+
+/* WORK (workspace) REAL array, dimension (max(3*N,M,P)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+
+/* Further Details */
+/* =============== */
+
+/* The subroutine uses LAPACK subroutine SGEQPF for the QR factorization */
+/* with column pivoting to detect the effective numerical rank of the */
+/* a matrix. It may be replaced by a better rank determination strategy. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --iwork;
+ --tau;
+ --work;
+
+ /* Function Body */
+ wantu = lsame_(jobu, "U");
+ wantv = lsame_(jobv, "V");
+ wantq = lsame_(jobq, "Q");
+ forwrd = TRUE_;
+
+ *info = 0;
+ if (! (wantu || lsame_(jobu, "N"))) {
+ *info = -1;
+ } else if (! (wantv || lsame_(jobv, "N"))) {
+ *info = -2;
+ } else if (! (wantq || lsame_(jobq, "N"))) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*p < 0) {
+ *info = -5;
+ } else if (*n < 0) {
+ *info = -6;
+ } else if (*lda < max(1,*m)) {
+ *info = -8;
+ } else if (*ldb < max(1,*p)) {
+ *info = -10;
+ } else if (*ldu < 1 || wantu && *ldu < *m) {
+ *info = -16;
+ } else if (*ldv < 1 || wantv && *ldv < *p) {
+ *info = -18;
+ } else if (*ldq < 1 || wantq && *ldq < *n) {
+ *info = -20;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGGSVP", &i__1);
+ return 0;
+ }
+
+/* QR with column pivoting of B: B*P = V*( S11 S12 ) */
+/* ( 0 0 ) */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ iwork[i__] = 0;
+/* L10: */
+ }
+ sgeqpf_(p, n, &b[b_offset], ldb, &iwork[1], &tau[1], &work[1], info);
+
+/* Update A := A*P */
+
+ slapmt_(&forwrd, m, n, &a[a_offset], lda, &iwork[1]);
+
+/* Determine the effective rank of matrix B. */
+
+ *l = 0;
+ i__1 = min(*p,*n);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((r__1 = b[i__ + i__ * b_dim1], dabs(r__1)) > *tolb) {
+ ++(*l);
+ }
+/* L20: */
+ }
+
+ if (wantv) {
+
+/* Copy the details of V, and form V. */
+
+ slaset_("Full", p, p, &c_b12, &c_b12, &v[v_offset], ldv);
+ if (*p > 1) {
+ i__1 = *p - 1;
+ slacpy_("Lower", &i__1, n, &b[b_dim1 + 2], ldb, &v[v_dim1 + 2],
+ ldv);
+ }
+ i__1 = min(*p,*n);
+ sorg2r_(p, p, &i__1, &v[v_offset], ldv, &tau[1], &work[1], info);
+ }
+
+/* Clean up B */
+
+ i__1 = *l - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *l;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+ if (*p > *l) {
+ i__1 = *p - *l;
+ slaset_("Full", &i__1, n, &c_b12, &c_b12, &b[*l + 1 + b_dim1], ldb);
+ }
+
+ if (wantq) {
+
+/* Set Q = I and Update Q := Q*P */
+
+ slaset_("Full", n, n, &c_b12, &c_b22, &q[q_offset], ldq);
+ slapmt_(&forwrd, n, n, &q[q_offset], ldq, &iwork[1]);
+ }
+
+ if (*p >= *l && *n != *l) {
+
+/* RQ factorization of (S11 S12): ( S11 S12 ) = ( 0 S12 )*Z */
+
+ sgerq2_(l, n, &b[b_offset], ldb, &tau[1], &work[1], info);
+
+/* Update A := A*Z' */
+
+ sormr2_("Right", "Transpose", m, n, l, &b[b_offset], ldb, &tau[1], &a[
+ a_offset], lda, &work[1], info);
+
+ if (wantq) {
+
+/* Update Q := Q*Z' */
+
+ sormr2_("Right", "Transpose", n, n, l, &b[b_offset], ldb, &tau[1],
+ &q[q_offset], ldq, &work[1], info);
+ }
+
+/* Clean up B */
+
+ i__1 = *n - *l;
+ slaset_("Full", l, &i__1, &c_b12, &c_b12, &b[b_offset], ldb);
+ i__1 = *n;
+ for (j = *n - *l + 1; j <= i__1; ++j) {
+ i__2 = *l;
+ for (i__ = j - *n + *l + 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = 0.f;
+/* L50: */
+ }
+/* L60: */
+ }
+
+ }
+
+/* Let N-L L */
+/* A = ( A11 A12 ) M, */
+
+/* then the following does the complete QR decomposition of A11: */
+
+/* A11 = U*( 0 T12 )*P1' */
+/* ( 0 0 ) */
+
+ i__1 = *n - *l;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ iwork[i__] = 0;
+/* L70: */
+ }
+ i__1 = *n - *l;
+ sgeqpf_(m, &i__1, &a[a_offset], lda, &iwork[1], &tau[1], &work[1], info);
+
+/* Determine the effective rank of A11 */
+
+ *k = 0;
+/* Computing MIN */
+ i__2 = *m, i__3 = *n - *l;
+ i__1 = min(i__2,i__3);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((r__1 = a[i__ + i__ * a_dim1], dabs(r__1)) > *tola) {
+ ++(*k);
+ }
+/* L80: */
+ }
+
+/* Update A12 := U'*A12, where A12 = A( 1:M, N-L+1:N ) */
+
+/* Computing MIN */
+ i__2 = *m, i__3 = *n - *l;
+ i__1 = min(i__2,i__3);
+ sorm2r_("Left", "Transpose", m, l, &i__1, &a[a_offset], lda, &tau[1], &a[(
+ *n - *l + 1) * a_dim1 + 1], lda, &work[1], info);
+
+ if (wantu) {
+
+/* Copy the details of U, and form U */
+
+ slaset_("Full", m, m, &c_b12, &c_b12, &u[u_offset], ldu);
+ if (*m > 1) {
+ i__1 = *m - 1;
+ i__2 = *n - *l;
+ slacpy_("Lower", &i__1, &i__2, &a[a_dim1 + 2], lda, &u[u_dim1 + 2]
+, ldu);
+ }
+/* Computing MIN */
+ i__2 = *m, i__3 = *n - *l;
+ i__1 = min(i__2,i__3);
+ sorg2r_(m, m, &i__1, &u[u_offset], ldu, &tau[1], &work[1], info);
+ }
+
+ if (wantq) {
+
+/* Update Q( 1:N, 1:N-L ) = Q( 1:N, 1:N-L )*P1 */
+
+ i__1 = *n - *l;
+ slapmt_(&forwrd, n, &i__1, &q[q_offset], ldq, &iwork[1]);
+ }
+
+/* Clean up A: set the strictly lower triangular part of */
+/* A(1:K, 1:K) = 0, and A( K+1:M, 1:N-L ) = 0. */
+
+ i__1 = *k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.f;
+/* L90: */
+ }
+/* L100: */
+ }
+ if (*m > *k) {
+ i__1 = *m - *k;
+ i__2 = *n - *l;
+ slaset_("Full", &i__1, &i__2, &c_b12, &c_b12, &a[*k + 1 + a_dim1],
+ lda);
+ }
+
+ if (*n - *l > *k) {
+
+/* RQ factorization of ( T11 T12 ) = ( 0 T12 )*Z1 */
+
+ i__1 = *n - *l;
+ sgerq2_(k, &i__1, &a[a_offset], lda, &tau[1], &work[1], info);
+
+ if (wantq) {
+
+/* Update Q( 1:N,1:N-L ) = Q( 1:N,1:N-L )*Z1' */
+
+ i__1 = *n - *l;
+ sormr2_("Right", "Transpose", n, &i__1, k, &a[a_offset], lda, &
+ tau[1], &q[q_offset], ldq, &work[1], info);
+ }
+
+/* Clean up A */
+
+ i__1 = *n - *l - *k;
+ slaset_("Full", k, &i__1, &c_b12, &c_b12, &a[a_offset], lda);
+ i__1 = *n - *l;
+ for (j = *n - *l - *k + 1; j <= i__1; ++j) {
+ i__2 = *k;
+ for (i__ = j - *n + *l + *k + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.f;
+/* L110: */
+ }
+/* L120: */
+ }
+
+ }
+
+ if (*m > *k) {
+
+/* QR factorization of A( K+1:M,N-L+1:N ) */
+
+ i__1 = *m - *k;
+ sgeqr2_(&i__1, l, &a[*k + 1 + (*n - *l + 1) * a_dim1], lda, &tau[1], &
+ work[1], info);
+
+ if (wantu) {
+
+/* Update U(:,K+1:M) := U(:,K+1:M)*U1 */
+
+ i__1 = *m - *k;
+/* Computing MIN */
+ i__3 = *m - *k;
+ i__2 = min(i__3,*l);
+ sorm2r_("Right", "No transpose", m, &i__1, &i__2, &a[*k + 1 + (*n
+ - *l + 1) * a_dim1], lda, &tau[1], &u[(*k + 1) * u_dim1 +
+ 1], ldu, &work[1], info);
+ }
+
+/* Clean up */
+
+ i__1 = *n;
+ for (j = *n - *l + 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j - *n + *k + *l + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.f;
+/* L130: */
+ }
+/* L140: */
+ }
+
+ }
+
+ return 0;
+
+/* End of SGGSVP */
+
+} /* sggsvp_ */
diff --git a/contrib/libs/clapack/sgsvj0.c b/contrib/libs/clapack/sgsvj0.c
new file mode 100644
index 0000000000..b254b5b1fb
--- /dev/null
+++ b/contrib/libs/clapack/sgsvj0.c
@@ -0,0 +1,1150 @@
+/* sgsvj0.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static real c_b42 = 1.f;
+
+/* Subroutine */ int sgsvj0_(char *jobv, integer *m, integer *n, real *a,
+ integer *lda, real *d__, real *sva, integer *mv, real *v, integer *
+ ldv, real *eps, real *sfmin, real *tol, integer *nsweep, real *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, v_dim1, v_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ real bigtheta;
+ integer pskipped, i__, p, q;
+ real t, rootsfmin, cs, sn;
+ integer ir1, jbc;
+ real big;
+ integer kbl, igl, ibr, jgl, nbl, mvl;
+ real aapp, aapq, aaqq;
+ integer ierr;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ real aapp0, temp1;
+ extern doublereal snrm2_(integer *, real *, integer *);
+ real apoaq, aqoap;
+ extern logical lsame_(char *, char *);
+ real theta, small, fastr[5];
+ logical applv, rsvec;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ logical rotok;
+ extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
+ integer *), saxpy_(integer *, real *, real *, integer *, real *,
+ integer *), srotm_(integer *, real *, integer *, real *, integer *
+, real *), xerbla_(char *, integer *);
+ integer ijblsk, swband;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ integer blskip;
+ real mxaapq, thsign;
+ extern /* Subroutine */ int slassq_(integer *, real *, integer *, real *,
+ real *);
+ real mxsinj;
+ integer emptsw, notrot, iswrot, lkahead;
+ real rootbig, rooteps;
+ integer rowskip;
+ real roottol;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Zlatko Drmac of the University of Zagreb and -- */
+/* -- Kresimir Veselic of the Fernuniversitaet Hagen -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* This routine is also part of SIGMA (version 1.23, October 23. 2008.) */
+/* SIGMA is a library of algorithms for highly accurate algorithms for */
+/* computation of SVD, PSVD, QSVD, (H,K)-SVD, and for solution of the */
+/* eigenvalue problems Hx = lambda M x, H M x = lambda x with H, M > 0. */
+
+/* Scalar Arguments */
+
+
+/* Array Arguments */
+
+/* .. */
+
+/* Purpose */
+/* ~~~~~~~ */
+/* SGSVJ0 is called from SGESVJ as a pre-processor and that is its main */
+/* purpose. It applies Jacobi rotations in the same way as SGESVJ does, but */
+/* it does not check convergence (stopping criterion). Few tuning */
+/* parameters (marked by [TP]) are available for the implementer. */
+
+/* Further details */
+/* ~~~~~~~~~~~~~~~ */
+/* SGSVJ0 is used just to enable SGESVJ to call a simplified version of */
+/* itself to work on a submatrix of the original matrix. */
+
+/* Contributors */
+/* ~~~~~~~~~~~~ */
+/* Zlatko Drmac (Zagreb, Croatia) and Kresimir Veselic (Hagen, Germany) */
+
+/* Bugs, Examples and Comments */
+/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
+/* Please report all bugs and send interesting test examples and comments to */
+/* drmac@math.hr. Thank you. */
+
+/* Arguments */
+/* ~~~~~~~~~ */
+
+/* JOBV (input) CHARACTER*1 */
+/* Specifies whether the output from this procedure is used */
+/* to compute the matrix V: */
+/* = 'V': the product of the Jacobi rotations is accumulated */
+/* by postmulyiplying the N-by-N array V. */
+/* (See the description of V.) */
+/* = 'A': the product of the Jacobi rotations is accumulated */
+/* by postmulyiplying the MV-by-N array V. */
+/* (See the descriptions of MV and V.) */
+/* = 'N': the Jacobi rotations are not accumulated. */
+
+/* M (input) INTEGER */
+/* The number of rows of the input matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the input matrix A. */
+/* M >= N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, M-by-N matrix A, such that A*diag(D) represents */
+/* the input matrix. */
+/* On exit, */
+/* A_onexit * D_onexit represents the input matrix A*diag(D) */
+/* post-multiplied by a sequence of Jacobi rotations, where the */
+/* rotation threshold and the total number of sweeps are given in */
+/* TOL and NSWEEP, respectively. */
+/* (See the descriptions of D, TOL and NSWEEP.) */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* D (input/workspace/output) REAL array, dimension (N) */
+/* The array D accumulates the scaling factors from the fast scaled */
+/* Jacobi rotations. */
+/* On entry, A*diag(D) represents the input matrix. */
+/* On exit, A_onexit*diag(D_onexit) represents the input matrix */
+/* post-multiplied by a sequence of Jacobi rotations, where the */
+/* rotation threshold and the total number of sweeps are given in */
+/* TOL and NSWEEP, respectively. */
+/* (See the descriptions of A, TOL and NSWEEP.) */
+
+/* SVA (input/workspace/output) REAL array, dimension (N) */
+/* On entry, SVA contains the Euclidean norms of the columns of */
+/* the matrix A*diag(D). */
+/* On exit, SVA contains the Euclidean norms of the columns of */
+/* the matrix onexit*diag(D_onexit). */
+
+/* MV (input) INTEGER */
+/* If JOBV .EQ. 'A', then MV rows of V are post-multipled by a */
+/* sequence of Jacobi rotations. */
+/* If JOBV = 'N', then MV is not referenced. */
+
+/* V (input/output) REAL array, dimension (LDV,N) */
+/* If JOBV .EQ. 'V' then N rows of V are post-multipled by a */
+/* sequence of Jacobi rotations. */
+/* If JOBV .EQ. 'A' then MV rows of V are post-multipled by a */
+/* sequence of Jacobi rotations. */
+/* If JOBV = 'N', then V is not referenced. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V, LDV >= 1. */
+/* If JOBV = 'V', LDV .GE. N. */
+/* If JOBV = 'A', LDV .GE. MV. */
+
+/* EPS (input) INTEGER */
+/* EPS = SLAMCH('Epsilon') */
+
+/* SFMIN (input) INTEGER */
+/* SFMIN = SLAMCH('Safe Minimum') */
+
+/* TOL (input) REAL */
+/* TOL is the threshold for Jacobi rotations. For a pair */
+/* A(:,p), A(:,q) of pivot columns, the Jacobi rotation is */
+/* applied only if ABS(COS(angle(A(:,p),A(:,q)))) .GT. TOL. */
+
+/* NSWEEP (input) INTEGER */
+/* NSWEEP is the number of sweeps of Jacobi rotations to be */
+/* performed. */
+
+/* WORK (workspace) REAL array, dimension LWORK. */
+
+/* LWORK (input) INTEGER */
+/* LWORK is the dimension of WORK. LWORK .GE. M. */
+
+/* INFO (output) INTEGER */
+/* = 0 : successful exit. */
+/* < 0 : if INFO = -i, then the i-th argument had an illegal value */
+
+/* Local Parameters */
+/* Local Scalars */
+/* Local Arrays */
+
+/* Intrinsic Functions */
+
+/* External Functions */
+
+/* External Subroutines */
+
+/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~| */
+
+ /* Parameter adjustments */
+ --sva;
+ --d__;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ --work;
+
+ /* Function Body */
+ applv = lsame_(jobv, "A");
+ rsvec = lsame_(jobv, "V");
+ if (! (rsvec || applv || lsame_(jobv, "N"))) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0 || *n > *m) {
+ *info = -3;
+ } else if (*lda < *m) {
+ *info = -5;
+ } else if (*mv < 0) {
+ *info = -8;
+ } else if (*ldv < *m) {
+ *info = -10;
+ } else if (*tol <= *eps) {
+ *info = -13;
+ } else if (*nsweep < 0) {
+ *info = -14;
+ } else if (*lwork < *m) {
+ *info = -16;
+ } else {
+ *info = 0;
+ }
+
+/* #:( */
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGSVJ0", &i__1);
+ return 0;
+ }
+
+ if (rsvec) {
+ mvl = *n;
+ } else if (applv) {
+ mvl = *mv;
+ }
+ rsvec = rsvec || applv;
+ rooteps = sqrt(*eps);
+ rootsfmin = sqrt(*sfmin);
+ small = *sfmin / *eps;
+ big = 1.f / *sfmin;
+ rootbig = 1.f / rootsfmin;
+ bigtheta = 1.f / rooteps;
+ roottol = sqrt(*tol);
+
+
+/* -#- Row-cyclic Jacobi SVD algorithm with column pivoting -#- */
+
+ emptsw = *n * (*n - 1) / 2;
+ notrot = 0;
+ fastr[0] = 0.f;
+
+/* -#- Row-cyclic pivot strategy with de Rijk's pivoting -#- */
+
+ swband = 0;
+/* [TP] SWBAND is a tuning parameter. It is meaningful and effective */
+/* if SGESVJ is used as a computational routine in the preconditioned */
+/* Jacobi SVD algorithm SGESVJ. For sweeps i=1:SWBAND the procedure */
+/* ...... */
+ kbl = min(8,*n);
+/* [TP] KBL is a tuning parameter that defines the tile size in the */
+/* tiling of the p-q loops of pivot pairs. In general, an optimal */
+/* value of KBL depends on the matrix dimensions and on the */
+/* parameters of the computer's memory. */
+
+ nbl = *n / kbl;
+ if (nbl * kbl != *n) {
+ ++nbl;
+ }
+/* Computing 2nd power */
+ i__1 = kbl;
+ blskip = i__1 * i__1 + 1;
+/* [TP] BLKSKIP is a tuning parameter that depends on SWBAND and KBL. */
+ rowskip = min(5,kbl);
+/* [TP] ROWSKIP is a tuning parameter. */
+ lkahead = 1;
+/* [TP] LKAHEAD is a tuning parameter. */
+ swband = 0;
+ pskipped = 0;
+
+ i__1 = *nsweep;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* .. go go go ... */
+
+ mxaapq = 0.f;
+ mxsinj = 0.f;
+ iswrot = 0;
+
+ notrot = 0;
+ pskipped = 0;
+
+ i__2 = nbl;
+ for (ibr = 1; ibr <= i__2; ++ibr) {
+ igl = (ibr - 1) * kbl + 1;
+
+/* Computing MIN */
+ i__4 = lkahead, i__5 = nbl - ibr;
+ i__3 = min(i__4,i__5);
+ for (ir1 = 0; ir1 <= i__3; ++ir1) {
+
+ igl += ir1 * kbl;
+
+/* Computing MIN */
+ i__5 = igl + kbl - 1, i__6 = *n - 1;
+ i__4 = min(i__5,i__6);
+ for (p = igl; p <= i__4; ++p) {
+/* .. de Rijk's pivoting */
+ i__5 = *n - p + 1;
+ q = isamax_(&i__5, &sva[p], &c__1) + p - 1;
+ if (p != q) {
+ sswap_(m, &a[p * a_dim1 + 1], &c__1, &a[q * a_dim1 +
+ 1], &c__1);
+ if (rsvec) {
+ sswap_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[q *
+ v_dim1 + 1], &c__1);
+ }
+ temp1 = sva[p];
+ sva[p] = sva[q];
+ sva[q] = temp1;
+ temp1 = d__[p];
+ d__[p] = d__[q];
+ d__[q] = temp1;
+ }
+
+ if (ir1 == 0) {
+
+/* Column norms are periodically updated by explicit */
+/* norm computation. */
+/* Caveat: */
+/* Some BLAS implementations compute SNRM2(M,A(1,p),1) */
+/* as SQRT(SDOT(M,A(1,p),1,A(1,p),1)), which may result in */
+/* overflow for ||A(:,p)||_2 > SQRT(overflow_threshold), and */
+/* undeflow for ||A(:,p)||_2 < SQRT(underflow_threshold). */
+/* Hence, SNRM2 cannot be trusted, not even in the case when */
+/* the true norm is far from the under(over)flow boundaries. */
+/* If properly implemented SNRM2 is available, the IF-THEN-ELSE */
+/* below should read "AAPP = SNRM2( M, A(1,p), 1 ) * D(p)". */
+
+ if (sva[p] < rootbig && sva[p] > rootsfmin) {
+ sva[p] = snrm2_(m, &a[p * a_dim1 + 1], &c__1) *
+ d__[p];
+ } else {
+ temp1 = 0.f;
+ aapp = 0.f;
+ slassq_(m, &a[p * a_dim1 + 1], &c__1, &temp1, &
+ aapp);
+ sva[p] = temp1 * sqrt(aapp) * d__[p];
+ }
+ aapp = sva[p];
+ } else {
+ aapp = sva[p];
+ }
+
+ if (aapp > 0.f) {
+
+ pskipped = 0;
+
+/* Computing MIN */
+ i__6 = igl + kbl - 1;
+ i__5 = min(i__6,*n);
+ for (q = p + 1; q <= i__5; ++q) {
+
+ aaqq = sva[q];
+ if (aaqq > 0.f) {
+
+ aapp0 = aapp;
+ if (aaqq >= 1.f) {
+ rotok = small * aapp <= aaqq;
+ if (aapp < big / aaqq) {
+ aapq = sdot_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1], &
+ c__1) * d__[p] * d__[q] /
+ aaqq / aapp;
+ } else {
+ scopy_(m, &a[p * a_dim1 + 1], &c__1, &
+ work[1], &c__1);
+ slascl_("G", &c__0, &c__0, &aapp, &
+ d__[p], m, &c__1, &work[1],
+ lda, &ierr);
+ aapq = sdot_(m, &work[1], &c__1, &a[q
+ * a_dim1 + 1], &c__1) * d__[q]
+ / aaqq;
+ }
+ } else {
+ rotok = aapp <= aaqq / small;
+ if (aapp > small / aaqq) {
+ aapq = sdot_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1], &
+ c__1) * d__[p] * d__[q] /
+ aaqq / aapp;
+ } else {
+ scopy_(m, &a[q * a_dim1 + 1], &c__1, &
+ work[1], &c__1);
+ slascl_("G", &c__0, &c__0, &aaqq, &
+ d__[q], m, &c__1, &work[1],
+ lda, &ierr);
+ aapq = sdot_(m, &work[1], &c__1, &a[p
+ * a_dim1 + 1], &c__1) * d__[p]
+ / aapp;
+ }
+ }
+
+/* Computing MAX */
+ r__1 = mxaapq, r__2 = dabs(aapq);
+ mxaapq = dmax(r__1,r__2);
+
+/* TO rotate or NOT to rotate, THAT is the question ... */
+
+ if (dabs(aapq) > *tol) {
+
+/* .. rotate */
+/* ROTATED = ROTATED + ONE */
+
+ if (ir1 == 0) {
+ notrot = 0;
+ pskipped = 0;
+ ++iswrot;
+ }
+
+ if (rotok) {
+
+ aqoap = aaqq / aapp;
+ apoaq = aapp / aaqq;
+ theta = (r__1 = aqoap - apoaq, dabs(
+ r__1)) * -.5f / aapq;
+
+ if (dabs(theta) > bigtheta) {
+
+ t = .5f / theta;
+ fastr[2] = t * d__[p] / d__[q];
+ fastr[3] = -t * d__[q] / d__[p];
+ srotm_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1],
+ &c__1, fastr);
+ if (rsvec) {
+ srotm_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[q *
+ v_dim1 + 1], &c__1, fastr);
+ }
+/* Computing MAX */
+ r__1 = 0.f, r__2 = t * apoaq *
+ aapq + 1.f;
+ sva[q] = aaqq * sqrt((dmax(r__1,
+ r__2)));
+ aapp *= sqrt(1.f - t * aqoap *
+ aapq);
+/* Computing MAX */
+ r__1 = mxsinj, r__2 = dabs(t);
+ mxsinj = dmax(r__1,r__2);
+
+ } else {
+
+/* .. choose correct signum for THETA and rotate */
+
+ thsign = -r_sign(&c_b42, &aapq);
+ t = 1.f / (theta + thsign * sqrt(
+ theta * theta + 1.f));
+ cs = sqrt(1.f / (t * t + 1.f));
+ sn = t * cs;
+
+/* Computing MAX */
+ r__1 = mxsinj, r__2 = dabs(sn);
+ mxsinj = dmax(r__1,r__2);
+/* Computing MAX */
+ r__1 = 0.f, r__2 = t * apoaq *
+ aapq + 1.f;
+ sva[q] = aaqq * sqrt((dmax(r__1,
+ r__2)));
+/* Computing MAX */
+ r__1 = 0.f, r__2 = 1.f - t *
+ aqoap * aapq;
+ aapp *= sqrt((dmax(r__1,r__2)));
+
+ apoaq = d__[p] / d__[q];
+ aqoap = d__[q] / d__[p];
+ if (d__[p] >= 1.f) {
+ if (d__[q] >= 1.f) {
+ fastr[2] = t * apoaq;
+ fastr[3] = -t * aqoap;
+ d__[p] *= cs;
+ d__[q] *= cs;
+ srotm_(m, &a[p * a_dim1 + 1], &c__1, &a[q *
+ a_dim1 + 1], &c__1, fastr);
+ if (rsvec) {
+ srotm_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[
+ q * v_dim1 + 1], &c__1, fastr);
+ }
+ } else {
+ r__1 = -t * aqoap;
+ saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1, &a[
+ p * a_dim1 + 1], &c__1);
+ r__1 = cs * sn * apoaq;
+ saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1, &a[
+ q * a_dim1 + 1], &c__1);
+ d__[p] *= cs;
+ d__[q] /= cs;
+ if (rsvec) {
+ r__1 = -t * aqoap;
+ saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1], &
+ c__1, &v[p * v_dim1 + 1], &c__1);
+ r__1 = cs * sn * apoaq;
+ saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1], &
+ c__1, &v[q * v_dim1 + 1], &c__1);
+ }
+ }
+ } else {
+ if (d__[q] >= 1.f) {
+ r__1 = t * apoaq;
+ saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1, &a[
+ q * a_dim1 + 1], &c__1);
+ r__1 = -cs * sn * aqoap;
+ saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1, &a[
+ p * a_dim1 + 1], &c__1);
+ d__[p] /= cs;
+ d__[q] *= cs;
+ if (rsvec) {
+ r__1 = t * apoaq;
+ saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1], &
+ c__1, &v[q * v_dim1 + 1], &c__1);
+ r__1 = -cs * sn * aqoap;
+ saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1], &
+ c__1, &v[p * v_dim1 + 1], &c__1);
+ }
+ } else {
+ if (d__[p] >= d__[q]) {
+ r__1 = -t * aqoap;
+ saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1,
+ &a[p * a_dim1 + 1], &c__1);
+ r__1 = cs * sn * apoaq;
+ saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1,
+ &a[q * a_dim1 + 1], &c__1);
+ d__[p] *= cs;
+ d__[q] /= cs;
+ if (rsvec) {
+ r__1 = -t * aqoap;
+ saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1],
+ &c__1, &v[p * v_dim1 + 1], &
+ c__1);
+ r__1 = cs * sn * apoaq;
+ saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1],
+ &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ }
+ } else {
+ r__1 = t * apoaq;
+ saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1,
+ &a[q * a_dim1 + 1], &c__1);
+ r__1 = -cs * sn * aqoap;
+ saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1,
+ &a[p * a_dim1 + 1], &c__1);
+ d__[p] /= cs;
+ d__[q] *= cs;
+ if (rsvec) {
+ r__1 = t * apoaq;
+ saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1],
+ &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ r__1 = -cs * sn * aqoap;
+ saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1],
+ &c__1, &v[p * v_dim1 + 1], &
+ c__1);
+ }
+ }
+ }
+ }
+ }
+
+ } else {
+/* .. have to use modified Gram-Schmidt like transformation */
+ scopy_(m, &a[p * a_dim1 + 1], &c__1, &
+ work[1], &c__1);
+ slascl_("G", &c__0, &c__0, &aapp, &
+ c_b42, m, &c__1, &work[1],
+ lda, &ierr);
+ slascl_("G", &c__0, &c__0, &aaqq, &
+ c_b42, m, &c__1, &a[q *
+ a_dim1 + 1], lda, &ierr);
+ temp1 = -aapq * d__[p] / d__[q];
+ saxpy_(m, &temp1, &work[1], &c__1, &a[
+ q * a_dim1 + 1], &c__1);
+ slascl_("G", &c__0, &c__0, &c_b42, &
+ aaqq, m, &c__1, &a[q * a_dim1
+ + 1], lda, &ierr);
+/* Computing MAX */
+ r__1 = 0.f, r__2 = 1.f - aapq * aapq;
+ sva[q] = aaqq * sqrt((dmax(r__1,r__2))
+ );
+ mxsinj = dmax(mxsinj,*sfmin);
+ }
+/* END IF ROTOK THEN ... ELSE */
+
+/* In the case of cancellation in updating SVA(q), SVA(p) */
+/* recompute SVA(q), SVA(p). */
+/* Computing 2nd power */
+ r__1 = sva[q] / aaqq;
+ if (r__1 * r__1 <= rooteps) {
+ if (aaqq < rootbig && aaqq >
+ rootsfmin) {
+ sva[q] = snrm2_(m, &a[q * a_dim1
+ + 1], &c__1) * d__[q];
+ } else {
+ t = 0.f;
+ aaqq = 0.f;
+ slassq_(m, &a[q * a_dim1 + 1], &
+ c__1, &t, &aaqq);
+ sva[q] = t * sqrt(aaqq) * d__[q];
+ }
+ }
+ if (aapp / aapp0 <= rooteps) {
+ if (aapp < rootbig && aapp >
+ rootsfmin) {
+ aapp = snrm2_(m, &a[p * a_dim1 +
+ 1], &c__1) * d__[p];
+ } else {
+ t = 0.f;
+ aapp = 0.f;
+ slassq_(m, &a[p * a_dim1 + 1], &
+ c__1, &t, &aapp);
+ aapp = t * sqrt(aapp) * d__[p];
+ }
+ sva[p] = aapp;
+ }
+
+ } else {
+/* A(:,p) and A(:,q) already numerically orthogonal */
+ if (ir1 == 0) {
+ ++notrot;
+ }
+ ++pskipped;
+ }
+ } else {
+/* A(:,q) is zero column */
+ if (ir1 == 0) {
+ ++notrot;
+ }
+ ++pskipped;
+ }
+
+ if (i__ <= swband && pskipped > rowskip) {
+ if (ir1 == 0) {
+ aapp = -aapp;
+ }
+ notrot = 0;
+ goto L2103;
+ }
+
+/* L2002: */
+ }
+/* END q-LOOP */
+
+L2103:
+/* bailed out of q-loop */
+ sva[p] = aapp;
+ } else {
+ sva[p] = aapp;
+ if (ir1 == 0 && aapp == 0.f) {
+/* Computing MIN */
+ i__5 = igl + kbl - 1;
+ notrot = notrot + min(i__5,*n) - p;
+ }
+ }
+
+/* L2001: */
+ }
+/* end of the p-loop */
+/* end of doing the block ( ibr, ibr ) */
+/* L1002: */
+ }
+/* end of ir1-loop */
+
+/* ........................................................ */
+/* ... go to the off diagonal blocks */
+
+ igl = (ibr - 1) * kbl + 1;
+
+ i__3 = nbl;
+ for (jbc = ibr + 1; jbc <= i__3; ++jbc) {
+
+ jgl = (jbc - 1) * kbl + 1;
+
+/* doing the block at ( ibr, jbc ) */
+
+ ijblsk = 0;
+/* Computing MIN */
+ i__5 = igl + kbl - 1;
+ i__4 = min(i__5,*n);
+ for (p = igl; p <= i__4; ++p) {
+
+ aapp = sva[p];
+
+ if (aapp > 0.f) {
+
+ pskipped = 0;
+
+/* Computing MIN */
+ i__6 = jgl + kbl - 1;
+ i__5 = min(i__6,*n);
+ for (q = jgl; q <= i__5; ++q) {
+
+ aaqq = sva[q];
+
+ if (aaqq > 0.f) {
+ aapp0 = aapp;
+
+/* -#- M x 2 Jacobi SVD -#- */
+
+/* -#- Safe Gram matrix computation -#- */
+
+ if (aaqq >= 1.f) {
+ if (aapp >= aaqq) {
+ rotok = small * aapp <= aaqq;
+ } else {
+ rotok = small * aaqq <= aapp;
+ }
+ if (aapp < big / aaqq) {
+ aapq = sdot_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1], &
+ c__1) * d__[p] * d__[q] /
+ aaqq / aapp;
+ } else {
+ scopy_(m, &a[p * a_dim1 + 1], &c__1, &
+ work[1], &c__1);
+ slascl_("G", &c__0, &c__0, &aapp, &
+ d__[p], m, &c__1, &work[1],
+ lda, &ierr);
+ aapq = sdot_(m, &work[1], &c__1, &a[q
+ * a_dim1 + 1], &c__1) * d__[q]
+ / aaqq;
+ }
+ } else {
+ if (aapp >= aaqq) {
+ rotok = aapp <= aaqq / small;
+ } else {
+ rotok = aaqq <= aapp / small;
+ }
+ if (aapp > small / aaqq) {
+ aapq = sdot_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1], &
+ c__1) * d__[p] * d__[q] /
+ aaqq / aapp;
+ } else {
+ scopy_(m, &a[q * a_dim1 + 1], &c__1, &
+ work[1], &c__1);
+ slascl_("G", &c__0, &c__0, &aaqq, &
+ d__[q], m, &c__1, &work[1],
+ lda, &ierr);
+ aapq = sdot_(m, &work[1], &c__1, &a[p
+ * a_dim1 + 1], &c__1) * d__[p]
+ / aapp;
+ }
+ }
+
+/* Computing MAX */
+ r__1 = mxaapq, r__2 = dabs(aapq);
+ mxaapq = dmax(r__1,r__2);
+
+/* TO rotate or NOT to rotate, THAT is the question ... */
+
+ if (dabs(aapq) > *tol) {
+ notrot = 0;
+/* ROTATED = ROTATED + 1 */
+ pskipped = 0;
+ ++iswrot;
+
+ if (rotok) {
+
+ aqoap = aaqq / aapp;
+ apoaq = aapp / aaqq;
+ theta = (r__1 = aqoap - apoaq, dabs(
+ r__1)) * -.5f / aapq;
+ if (aaqq > aapp0) {
+ theta = -theta;
+ }
+
+ if (dabs(theta) > bigtheta) {
+ t = .5f / theta;
+ fastr[2] = t * d__[p] / d__[q];
+ fastr[3] = -t * d__[q] / d__[p];
+ srotm_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1],
+ &c__1, fastr);
+ if (rsvec) {
+ srotm_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[q *
+ v_dim1 + 1], &c__1, fastr);
+ }
+/* Computing MAX */
+ r__1 = 0.f, r__2 = t * apoaq *
+ aapq + 1.f;
+ sva[q] = aaqq * sqrt((dmax(r__1,
+ r__2)));
+/* Computing MAX */
+ r__1 = 0.f, r__2 = 1.f - t *
+ aqoap * aapq;
+ aapp *= sqrt((dmax(r__1,r__2)));
+/* Computing MAX */
+ r__1 = mxsinj, r__2 = dabs(t);
+ mxsinj = dmax(r__1,r__2);
+ } else {
+
+/* .. choose correct signum for THETA and rotate */
+
+ thsign = -r_sign(&c_b42, &aapq);
+ if (aaqq > aapp0) {
+ thsign = -thsign;
+ }
+ t = 1.f / (theta + thsign * sqrt(
+ theta * theta + 1.f));
+ cs = sqrt(1.f / (t * t + 1.f));
+ sn = t * cs;
+/* Computing MAX */
+ r__1 = mxsinj, r__2 = dabs(sn);
+ mxsinj = dmax(r__1,r__2);
+/* Computing MAX */
+ r__1 = 0.f, r__2 = t * apoaq *
+ aapq + 1.f;
+ sva[q] = aaqq * sqrt((dmax(r__1,
+ r__2)));
+ aapp *= sqrt(1.f - t * aqoap *
+ aapq);
+
+ apoaq = d__[p] / d__[q];
+ aqoap = d__[q] / d__[p];
+ if (d__[p] >= 1.f) {
+
+ if (d__[q] >= 1.f) {
+ fastr[2] = t * apoaq;
+ fastr[3] = -t * aqoap;
+ d__[p] *= cs;
+ d__[q] *= cs;
+ srotm_(m, &a[p * a_dim1 + 1], &c__1, &a[q *
+ a_dim1 + 1], &c__1, fastr);
+ if (rsvec) {
+ srotm_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[
+ q * v_dim1 + 1], &c__1, fastr);
+ }
+ } else {
+ r__1 = -t * aqoap;
+ saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1, &a[
+ p * a_dim1 + 1], &c__1);
+ r__1 = cs * sn * apoaq;
+ saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1, &a[
+ q * a_dim1 + 1], &c__1);
+ if (rsvec) {
+ r__1 = -t * aqoap;
+ saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1], &
+ c__1, &v[p * v_dim1 + 1], &c__1);
+ r__1 = cs * sn * apoaq;
+ saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1], &
+ c__1, &v[q * v_dim1 + 1], &c__1);
+ }
+ d__[p] *= cs;
+ d__[q] /= cs;
+ }
+ } else {
+ if (d__[q] >= 1.f) {
+ r__1 = t * apoaq;
+ saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1, &a[
+ q * a_dim1 + 1], &c__1);
+ r__1 = -cs * sn * aqoap;
+ saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1, &a[
+ p * a_dim1 + 1], &c__1);
+ if (rsvec) {
+ r__1 = t * apoaq;
+ saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1], &
+ c__1, &v[q * v_dim1 + 1], &c__1);
+ r__1 = -cs * sn * aqoap;
+ saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1], &
+ c__1, &v[p * v_dim1 + 1], &c__1);
+ }
+ d__[p] /= cs;
+ d__[q] *= cs;
+ } else {
+ if (d__[p] >= d__[q]) {
+ r__1 = -t * aqoap;
+ saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1,
+ &a[p * a_dim1 + 1], &c__1);
+ r__1 = cs * sn * apoaq;
+ saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1,
+ &a[q * a_dim1 + 1], &c__1);
+ d__[p] *= cs;
+ d__[q] /= cs;
+ if (rsvec) {
+ r__1 = -t * aqoap;
+ saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1],
+ &c__1, &v[p * v_dim1 + 1], &
+ c__1);
+ r__1 = cs * sn * apoaq;
+ saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1],
+ &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ }
+ } else {
+ r__1 = t * apoaq;
+ saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1,
+ &a[q * a_dim1 + 1], &c__1);
+ r__1 = -cs * sn * aqoap;
+ saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1,
+ &a[p * a_dim1 + 1], &c__1);
+ d__[p] /= cs;
+ d__[q] *= cs;
+ if (rsvec) {
+ r__1 = t * apoaq;
+ saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1],
+ &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ r__1 = -cs * sn * aqoap;
+ saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1],
+ &c__1, &v[p * v_dim1 + 1], &
+ c__1);
+ }
+ }
+ }
+ }
+ }
+
+ } else {
+ if (aapp > aaqq) {
+ scopy_(m, &a[p * a_dim1 + 1], &
+ c__1, &work[1], &c__1);
+ slascl_("G", &c__0, &c__0, &aapp,
+ &c_b42, m, &c__1, &work[1]
+, lda, &ierr);
+ slascl_("G", &c__0, &c__0, &aaqq,
+ &c_b42, m, &c__1, &a[q *
+ a_dim1 + 1], lda, &ierr);
+ temp1 = -aapq * d__[p] / d__[q];
+ saxpy_(m, &temp1, &work[1], &c__1,
+ &a[q * a_dim1 + 1], &
+ c__1);
+ slascl_("G", &c__0, &c__0, &c_b42,
+ &aaqq, m, &c__1, &a[q *
+ a_dim1 + 1], lda, &ierr);
+/* Computing MAX */
+ r__1 = 0.f, r__2 = 1.f - aapq *
+ aapq;
+ sva[q] = aaqq * sqrt((dmax(r__1,
+ r__2)));
+ mxsinj = dmax(mxsinj,*sfmin);
+ } else {
+ scopy_(m, &a[q * a_dim1 + 1], &
+ c__1, &work[1], &c__1);
+ slascl_("G", &c__0, &c__0, &aaqq,
+ &c_b42, m, &c__1, &work[1]
+, lda, &ierr);
+ slascl_("G", &c__0, &c__0, &aapp,
+ &c_b42, m, &c__1, &a[p *
+ a_dim1 + 1], lda, &ierr);
+ temp1 = -aapq * d__[q] / d__[p];
+ saxpy_(m, &temp1, &work[1], &c__1,
+ &a[p * a_dim1 + 1], &
+ c__1);
+ slascl_("G", &c__0, &c__0, &c_b42,
+ &aapp, m, &c__1, &a[p *
+ a_dim1 + 1], lda, &ierr);
+/* Computing MAX */
+ r__1 = 0.f, r__2 = 1.f - aapq *
+ aapq;
+ sva[p] = aapp * sqrt((dmax(r__1,
+ r__2)));
+ mxsinj = dmax(mxsinj,*sfmin);
+ }
+ }
+/* END IF ROTOK THEN ... ELSE */
+
+/* In the case of cancellation in updating SVA(q) */
+/* .. recompute SVA(q) */
+/* Computing 2nd power */
+ r__1 = sva[q] / aaqq;
+ if (r__1 * r__1 <= rooteps) {
+ if (aaqq < rootbig && aaqq >
+ rootsfmin) {
+ sva[q] = snrm2_(m, &a[q * a_dim1
+ + 1], &c__1) * d__[q];
+ } else {
+ t = 0.f;
+ aaqq = 0.f;
+ slassq_(m, &a[q * a_dim1 + 1], &
+ c__1, &t, &aaqq);
+ sva[q] = t * sqrt(aaqq) * d__[q];
+ }
+ }
+/* Computing 2nd power */
+ r__1 = aapp / aapp0;
+ if (r__1 * r__1 <= rooteps) {
+ if (aapp < rootbig && aapp >
+ rootsfmin) {
+ aapp = snrm2_(m, &a[p * a_dim1 +
+ 1], &c__1) * d__[p];
+ } else {
+ t = 0.f;
+ aapp = 0.f;
+ slassq_(m, &a[p * a_dim1 + 1], &
+ c__1, &t, &aapp);
+ aapp = t * sqrt(aapp) * d__[p];
+ }
+ sva[p] = aapp;
+ }
+/* end of OK rotation */
+ } else {
+ ++notrot;
+ ++pskipped;
+ ++ijblsk;
+ }
+ } else {
+ ++notrot;
+ ++pskipped;
+ ++ijblsk;
+ }
+
+ if (i__ <= swband && ijblsk >= blskip) {
+ sva[p] = aapp;
+ notrot = 0;
+ goto L2011;
+ }
+ if (i__ <= swband && pskipped > rowskip) {
+ aapp = -aapp;
+ notrot = 0;
+ goto L2203;
+ }
+
+/* L2200: */
+ }
+/* end of the q-loop */
+L2203:
+
+ sva[p] = aapp;
+
+ } else {
+ if (aapp == 0.f) {
+/* Computing MIN */
+ i__5 = jgl + kbl - 1;
+ notrot = notrot + min(i__5,*n) - jgl + 1;
+ }
+ if (aapp < 0.f) {
+ notrot = 0;
+ }
+ }
+/* L2100: */
+ }
+/* end of the p-loop */
+/* L2010: */
+ }
+/* end of the jbc-loop */
+L2011:
+/* 2011 bailed out of the jbc-loop */
+/* Computing MIN */
+ i__4 = igl + kbl - 1;
+ i__3 = min(i__4,*n);
+ for (p = igl; p <= i__3; ++p) {
+ sva[p] = (r__1 = sva[p], dabs(r__1));
+/* L2012: */
+ }
+
+/* L2000: */
+ }
+/* 2000 :: end of the ibr-loop */
+
+/* .. update SVA(N) */
+ if (sva[*n] < rootbig && sva[*n] > rootsfmin) {
+ sva[*n] = snrm2_(m, &a[*n * a_dim1 + 1], &c__1) * d__[*n];
+ } else {
+ t = 0.f;
+ aapp = 0.f;
+ slassq_(m, &a[*n * a_dim1 + 1], &c__1, &t, &aapp);
+ sva[*n] = t * sqrt(aapp) * d__[*n];
+ }
+
+/* Additional steering devices */
+
+ if (i__ < swband && (mxaapq <= roottol || iswrot <= *n)) {
+ swband = i__;
+ }
+
+ if (i__ > swband + 1 && mxaapq < (real) (*n) * *tol && (real) (*n) *
+ mxaapq * mxsinj < *tol) {
+ goto L1994;
+ }
+
+ if (notrot >= emptsw) {
+ goto L1994;
+ }
+/* L1993: */
+ }
+/* end i=1:NSWEEP loop */
+/* #:) Reaching this point means that the procedure has comleted the given */
+/* number of iterations. */
+ *info = *nsweep - 1;
+ goto L1995;
+L1994:
+/* #:) Reaching this point means that during the i-th sweep all pivots were */
+/* below the given tolerance, causing early exit. */
+
+ *info = 0;
+/* #:) INFO = 0 confirms successful iterations. */
+L1995:
+
+/* Sort the vector D. */
+ i__1 = *n - 1;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = *n - p + 1;
+ q = isamax_(&i__2, &sva[p], &c__1) + p - 1;
+ if (p != q) {
+ temp1 = sva[p];
+ sva[p] = sva[q];
+ sva[q] = temp1;
+ temp1 = d__[p];
+ d__[p] = d__[q];
+ d__[q] = temp1;
+ sswap_(m, &a[p * a_dim1 + 1], &c__1, &a[q * a_dim1 + 1], &c__1);
+ if (rsvec) {
+ sswap_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ }
+ }
+/* L5991: */
+ }
+
+ return 0;
+/* .. */
+/* .. END OF SGSVJ0 */
+/* .. */
+} /* sgsvj0_ */
diff --git a/contrib/libs/clapack/sgsvj1.c b/contrib/libs/clapack/sgsvj1.c
new file mode 100644
index 0000000000..584b7f508a
--- /dev/null
+++ b/contrib/libs/clapack/sgsvj1.c
@@ -0,0 +1,789 @@
+/* sgsvj1.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static real c_b35 = 1.f;
+
+/* Subroutine */ int sgsvj1_(char *jobv, integer *m, integer *n, integer *n1,
+ real *a, integer *lda, real *d__, real *sva, integer *mv, real *v,
+ integer *ldv, real *eps, real *sfmin, real *tol, integer *nsweep,
+ real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, v_dim1, v_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ real bigtheta;
+ integer pskipped, i__, p, q;
+ real t, rootsfmin, cs, sn;
+ integer jbc;
+ real big;
+ integer kbl, igl, ibr, jgl, mvl, nblc;
+ real aapp, aapq, aaqq;
+ integer nblr, ierr;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ real aapp0, temp1;
+ extern doublereal snrm2_(integer *, real *, integer *);
+ real large, apoaq, aqoap;
+ extern logical lsame_(char *, char *);
+ real theta, small, fastr[5];
+ logical applv, rsvec;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ logical rotok;
+ extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
+ integer *), saxpy_(integer *, real *, real *, integer *, real *,
+ integer *), srotm_(integer *, real *, integer *, real *, integer *
+, real *), xerbla_(char *, integer *);
+ integer ijblsk, swband;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ integer blskip;
+ real mxaapq, thsign;
+ extern /* Subroutine */ int slassq_(integer *, real *, integer *, real *,
+ real *);
+ real mxsinj;
+ integer emptsw, notrot, iswrot;
+ real rootbig, rooteps;
+ integer rowskip;
+ real roottol;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Zlatko Drmac of the University of Zagreb and -- */
+/* -- Kresimir Veselic of the Fernuniversitaet Hagen -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* This routine is also part of SIGMA (version 1.23, October 23. 2008.) */
+/* SIGMA is a library of algorithms for highly accurate algorithms for */
+/* computation of SVD, PSVD, QSVD, (H,K)-SVD, and for solution of the */
+/* eigenvalue problems Hx = lambda M x, H M x = lambda x with H, M > 0. */
+
+/* -#- Scalar Arguments -#- */
+
+
+/* -#- Array Arguments -#- */
+
+/* .. */
+
+/* Purpose */
+/* ~~~~~~~ */
+/* SGSVJ1 is called from SGESVJ as a pre-processor and that is its main */
+/* purpose. It applies Jacobi rotations in the same way as SGESVJ does, but */
+/* it targets only particular pivots and it does not check convergence */
+/* (stopping criterion). Few tunning parameters (marked by [TP]) are */
+/* available for the implementer. */
+
+/* Further details */
+/* ~~~~~~~~~~~~~~~ */
+/* SGSVJ1 applies few sweeps of Jacobi rotations in the column space of */
+/* the input M-by-N matrix A. The pivot pairs are taken from the (1,2) */
+/* off-diagonal block in the corresponding N-by-N Gram matrix A^T * A. The */
+/* block-entries (tiles) of the (1,2) off-diagonal block are marked by the */
+/* [x]'s in the following scheme: */
+
+/* | * * * [x] [x] [x]| */
+/* | * * * [x] [x] [x]| Row-cycling in the nblr-by-nblc [x] blocks. */
+/* | * * * [x] [x] [x]| Row-cyclic pivoting inside each [x] block. */
+/* |[x] [x] [x] * * * | */
+/* |[x] [x] [x] * * * | */
+/* |[x] [x] [x] * * * | */
+
+/* In terms of the columns of A, the first N1 columns are rotated 'against' */
+/* the remaining N-N1 columns, trying to increase the angle between the */
+/* corresponding subspaces. The off-diagonal block is N1-by(N-N1) and it is */
+/* tiled using quadratic tiles of side KBL. Here, KBL is a tunning parmeter. */
+/* The number of sweeps is given in NSWEEP and the orthogonality threshold */
+/* is given in TOL. */
+
+/* Contributors */
+/* ~~~~~~~~~~~~ */
+/* Zlatko Drmac (Zagreb, Croatia) and Kresimir Veselic (Hagen, Germany) */
+
+/* Arguments */
+/* ~~~~~~~~~ */
+
+/* JOBV (input) CHARACTER*1 */
+/* Specifies whether the output from this procedure is used */
+/* to compute the matrix V: */
+/* = 'V': the product of the Jacobi rotations is accumulated */
+/* by postmulyiplying the N-by-N array V. */
+/* (See the description of V.) */
+/* = 'A': the product of the Jacobi rotations is accumulated */
+/* by postmulyiplying the MV-by-N array V. */
+/* (See the descriptions of MV and V.) */
+/* = 'N': the Jacobi rotations are not accumulated. */
+
+/* M (input) INTEGER */
+/* The number of rows of the input matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the input matrix A. */
+/* M >= N >= 0. */
+
+/* N1 (input) INTEGER */
+/* N1 specifies the 2 x 2 block partition, the first N1 columns are */
+/* rotated 'against' the remaining N-N1 columns of A. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, M-by-N matrix A, such that A*diag(D) represents */
+/* the input matrix. */
+/* On exit, */
+/* A_onexit * D_onexit represents the input matrix A*diag(D) */
+/* post-multiplied by a sequence of Jacobi rotations, where the */
+/* rotation threshold and the total number of sweeps are given in */
+/* TOL and NSWEEP, respectively. */
+/* (See the descriptions of N1, D, TOL and NSWEEP.) */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* D (input/workspace/output) REAL array, dimension (N) */
+/* The array D accumulates the scaling factors from the fast scaled */
+/* Jacobi rotations. */
+/* On entry, A*diag(D) represents the input matrix. */
+/* On exit, A_onexit*diag(D_onexit) represents the input matrix */
+/* post-multiplied by a sequence of Jacobi rotations, where the */
+/* rotation threshold and the total number of sweeps are given in */
+/* TOL and NSWEEP, respectively. */
+/* (See the descriptions of N1, A, TOL and NSWEEP.) */
+
+/* SVA (input/workspace/output) REAL array, dimension (N) */
+/* On entry, SVA contains the Euclidean norms of the columns of */
+/* the matrix A*diag(D). */
+/* On exit, SVA contains the Euclidean norms of the columns of */
+/* the matrix onexit*diag(D_onexit). */
+
+/* MV (input) INTEGER */
+/* If JOBV .EQ. 'A', then MV rows of V are post-multipled by a */
+/* sequence of Jacobi rotations. */
+/* If JOBV = 'N', then MV is not referenced. */
+
+/* V (input/output) REAL array, dimension (LDV,N) */
+/* If JOBV .EQ. 'V' then N rows of V are post-multipled by a */
+/* sequence of Jacobi rotations. */
+/* If JOBV .EQ. 'A' then MV rows of V are post-multipled by a */
+/* sequence of Jacobi rotations. */
+/* If JOBV = 'N', then V is not referenced. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V, LDV >= 1. */
+/* If JOBV = 'V', LDV .GE. N. */
+/* If JOBV = 'A', LDV .GE. MV. */
+
+/* EPS (input) INTEGER */
+/* EPS = SLAMCH('Epsilon') */
+
+/* SFMIN (input) INTEGER */
+/* SFMIN = SLAMCH('Safe Minimum') */
+
+/* TOL (input) REAL */
+/* TOL is the threshold for Jacobi rotations. For a pair */
+/* A(:,p), A(:,q) of pivot columns, the Jacobi rotation is */
+/* applied only if ABS(COS(angle(A(:,p),A(:,q)))) .GT. TOL. */
+
+/* NSWEEP (input) INTEGER */
+/* NSWEEP is the number of sweeps of Jacobi rotations to be */
+/* performed. */
+
+/* WORK (workspace) REAL array, dimension LWORK. */
+
+/* LWORK (input) INTEGER */
+/* LWORK is the dimension of WORK. LWORK .GE. M. */
+
+/* INFO (output) INTEGER */
+/* = 0 : successful exit. */
+/* < 0 : if INFO = -i, then the i-th argument had an illegal value */
+
+/* -#- Local Parameters -#- */
+
+/* -#- Local Scalars -#- */
+
+
+/* Local Arrays */
+
+/* Intrinsic Functions */
+
+/* External Functions */
+
+/* External Subroutines */
+
+
+ /* Parameter adjustments */
+ --sva;
+ --d__;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ --work;
+
+ /* Function Body */
+ applv = lsame_(jobv, "A");
+ rsvec = lsame_(jobv, "V");
+ if (! (rsvec || applv || lsame_(jobv, "N"))) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0 || *n > *m) {
+ *info = -3;
+ } else if (*n1 < 0) {
+ *info = -4;
+ } else if (*lda < *m) {
+ *info = -6;
+ } else if (*mv < 0) {
+ *info = -9;
+ } else if (*ldv < *m) {
+ *info = -11;
+ } else if (*tol <= *eps) {
+ *info = -14;
+ } else if (*nsweep < 0) {
+ *info = -15;
+ } else if (*lwork < *m) {
+ *info = -17;
+ } else {
+ *info = 0;
+ }
+
+/* #:( */
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGSVJ1", &i__1);
+ return 0;
+ }
+
+ if (rsvec) {
+ mvl = *n;
+ } else if (applv) {
+ mvl = *mv;
+ }
+ rsvec = rsvec || applv;
+ rooteps = sqrt(*eps);
+ rootsfmin = sqrt(*sfmin);
+ small = *sfmin / *eps;
+ big = 1.f / *sfmin;
+ rootbig = 1.f / rootsfmin;
+ large = big / sqrt((real) (*m * *n));
+ bigtheta = 1.f / rooteps;
+ roottol = sqrt(*tol);
+
+/* -#- Initialize the right singular vector matrix -#- */
+
+/* RSVEC = LSAME( JOBV, 'Y' ) */
+
+ emptsw = *n1 * (*n - *n1);
+ notrot = 0;
+ fastr[0] = 0.f;
+
+/* -#- Row-cyclic pivot strategy with de Rijk's pivoting -#- */
+
+ kbl = min(8,*n);
+ nblr = *n1 / kbl;
+ if (nblr * kbl != *n1) {
+ ++nblr;
+ }
+/* .. the tiling is nblr-by-nblc [tiles] */
+ nblc = (*n - *n1) / kbl;
+ if (nblc * kbl != *n - *n1) {
+ ++nblc;
+ }
+/* Computing 2nd power */
+ i__1 = kbl;
+ blskip = i__1 * i__1 + 1;
+/* [TP] BLKSKIP is a tuning parameter that depends on SWBAND and KBL. */
+ rowskip = min(5,kbl);
+/* [TP] ROWSKIP is a tuning parameter. */
+ swband = 0;
+/* [TP] SWBAND is a tuning parameter. It is meaningful and effective */
+/* if SGESVJ is used as a computational routine in the preconditioned */
+/* Jacobi SVD algorithm SGESVJ. */
+
+
+/* | * * * [x] [x] [x]| */
+/* | * * * [x] [x] [x]| Row-cycling in the nblr-by-nblc [x] blocks. */
+/* | * * * [x] [x] [x]| Row-cyclic pivoting inside each [x] block. */
+/* |[x] [x] [x] * * * | */
+/* |[x] [x] [x] * * * | */
+/* |[x] [x] [x] * * * | */
+
+
+ i__1 = *nsweep;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* .. go go go ... */
+
+ mxaapq = 0.f;
+ mxsinj = 0.f;
+ iswrot = 0;
+
+ notrot = 0;
+ pskipped = 0;
+
+ i__2 = nblr;
+ for (ibr = 1; ibr <= i__2; ++ibr) {
+ igl = (ibr - 1) * kbl + 1;
+
+
+/* ........................................................ */
+/* ... go to the off diagonal blocks */
+ igl = (ibr - 1) * kbl + 1;
+ i__3 = nblc;
+ for (jbc = 1; jbc <= i__3; ++jbc) {
+ jgl = *n1 + (jbc - 1) * kbl + 1;
+/* doing the block at ( ibr, jbc ) */
+ ijblsk = 0;
+/* Computing MIN */
+ i__5 = igl + kbl - 1;
+ i__4 = min(i__5,*n1);
+ for (p = igl; p <= i__4; ++p) {
+ aapp = sva[p];
+ if (aapp > 0.f) {
+ pskipped = 0;
+/* Computing MIN */
+ i__6 = jgl + kbl - 1;
+ i__5 = min(i__6,*n);
+ for (q = jgl; q <= i__5; ++q) {
+
+ aaqq = sva[q];
+ if (aaqq > 0.f) {
+ aapp0 = aapp;
+
+/* -#- M x 2 Jacobi SVD -#- */
+
+/* -#- Safe Gram matrix computation -#- */
+
+ if (aaqq >= 1.f) {
+ if (aapp >= aaqq) {
+ rotok = small * aapp <= aaqq;
+ } else {
+ rotok = small * aaqq <= aapp;
+ }
+ if (aapp < big / aaqq) {
+ aapq = sdot_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1], &
+ c__1) * d__[p] * d__[q] /
+ aaqq / aapp;
+ } else {
+ scopy_(m, &a[p * a_dim1 + 1], &c__1, &
+ work[1], &c__1);
+ slascl_("G", &c__0, &c__0, &aapp, &
+ d__[p], m, &c__1, &work[1],
+ lda, &ierr);
+ aapq = sdot_(m, &work[1], &c__1, &a[q
+ * a_dim1 + 1], &c__1) * d__[q]
+ / aaqq;
+ }
+ } else {
+ if (aapp >= aaqq) {
+ rotok = aapp <= aaqq / small;
+ } else {
+ rotok = aaqq <= aapp / small;
+ }
+ if (aapp > small / aaqq) {
+ aapq = sdot_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1], &
+ c__1) * d__[p] * d__[q] /
+ aaqq / aapp;
+ } else {
+ scopy_(m, &a[q * a_dim1 + 1], &c__1, &
+ work[1], &c__1);
+ slascl_("G", &c__0, &c__0, &aaqq, &
+ d__[q], m, &c__1, &work[1],
+ lda, &ierr);
+ aapq = sdot_(m, &work[1], &c__1, &a[p
+ * a_dim1 + 1], &c__1) * d__[p]
+ / aapp;
+ }
+ }
+/* Computing MAX */
+ r__1 = mxaapq, r__2 = dabs(aapq);
+ mxaapq = dmax(r__1,r__2);
+/* TO rotate or NOT to rotate, THAT is the question ... */
+
+ if (dabs(aapq) > *tol) {
+ notrot = 0;
+/* ROTATED = ROTATED + 1 */
+ pskipped = 0;
+ ++iswrot;
+
+ if (rotok) {
+
+ aqoap = aaqq / aapp;
+ apoaq = aapp / aaqq;
+ theta = (r__1 = aqoap - apoaq, dabs(
+ r__1)) * -.5f / aapq;
+ if (aaqq > aapp0) {
+ theta = -theta;
+ }
+ if (dabs(theta) > bigtheta) {
+ t = .5f / theta;
+ fastr[2] = t * d__[p] / d__[q];
+ fastr[3] = -t * d__[q] / d__[p];
+ srotm_(m, &a[p * a_dim1 + 1], &
+ c__1, &a[q * a_dim1 + 1],
+ &c__1, fastr);
+ if (rsvec) {
+ srotm_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[q *
+ v_dim1 + 1], &c__1, fastr);
+ }
+/* Computing MAX */
+ r__1 = 0.f, r__2 = t * apoaq *
+ aapq + 1.f;
+ sva[q] = aaqq * sqrt((dmax(r__1,
+ r__2)));
+/* Computing MAX */
+ r__1 = 0.f, r__2 = 1.f - t *
+ aqoap * aapq;
+ aapp *= sqrt((dmax(r__1,r__2)));
+/* Computing MAX */
+ r__1 = mxsinj, r__2 = dabs(t);
+ mxsinj = dmax(r__1,r__2);
+ } else {
+
+/* .. choose correct signum for THETA and rotate */
+
+ thsign = -r_sign(&c_b35, &aapq);
+ if (aaqq > aapp0) {
+ thsign = -thsign;
+ }
+ t = 1.f / (theta + thsign * sqrt(
+ theta * theta + 1.f));
+ cs = sqrt(1.f / (t * t + 1.f));
+ sn = t * cs;
+/* Computing MAX */
+ r__1 = mxsinj, r__2 = dabs(sn);
+ mxsinj = dmax(r__1,r__2);
+/* Computing MAX */
+ r__1 = 0.f, r__2 = t * apoaq *
+ aapq + 1.f;
+ sva[q] = aaqq * sqrt((dmax(r__1,
+ r__2)));
+ aapp *= sqrt(1.f - t * aqoap *
+ aapq);
+ apoaq = d__[p] / d__[q];
+ aqoap = d__[q] / d__[p];
+ if (d__[p] >= 1.f) {
+
+ if (d__[q] >= 1.f) {
+ fastr[2] = t * apoaq;
+ fastr[3] = -t * aqoap;
+ d__[p] *= cs;
+ d__[q] *= cs;
+ srotm_(m, &a[p * a_dim1 + 1], &c__1, &a[q *
+ a_dim1 + 1], &c__1, fastr);
+ if (rsvec) {
+ srotm_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[
+ q * v_dim1 + 1], &c__1, fastr);
+ }
+ } else {
+ r__1 = -t * aqoap;
+ saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1, &a[
+ p * a_dim1 + 1], &c__1);
+ r__1 = cs * sn * apoaq;
+ saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1, &a[
+ q * a_dim1 + 1], &c__1);
+ if (rsvec) {
+ r__1 = -t * aqoap;
+ saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1], &
+ c__1, &v[p * v_dim1 + 1], &c__1);
+ r__1 = cs * sn * apoaq;
+ saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1], &
+ c__1, &v[q * v_dim1 + 1], &c__1);
+ }
+ d__[p] *= cs;
+ d__[q] /= cs;
+ }
+ } else {
+ if (d__[q] >= 1.f) {
+ r__1 = t * apoaq;
+ saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1, &a[
+ q * a_dim1 + 1], &c__1);
+ r__1 = -cs * sn * aqoap;
+ saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1, &a[
+ p * a_dim1 + 1], &c__1);
+ if (rsvec) {
+ r__1 = t * apoaq;
+ saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1], &
+ c__1, &v[q * v_dim1 + 1], &c__1);
+ r__1 = -cs * sn * aqoap;
+ saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1], &
+ c__1, &v[p * v_dim1 + 1], &c__1);
+ }
+ d__[p] /= cs;
+ d__[q] *= cs;
+ } else {
+ if (d__[p] >= d__[q]) {
+ r__1 = -t * aqoap;
+ saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1,
+ &a[p * a_dim1 + 1], &c__1);
+ r__1 = cs * sn * apoaq;
+ saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1,
+ &a[q * a_dim1 + 1], &c__1);
+ d__[p] *= cs;
+ d__[q] /= cs;
+ if (rsvec) {
+ r__1 = -t * aqoap;
+ saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1],
+ &c__1, &v[p * v_dim1 + 1], &
+ c__1);
+ r__1 = cs * sn * apoaq;
+ saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1],
+ &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ }
+ } else {
+ r__1 = t * apoaq;
+ saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1,
+ &a[q * a_dim1 + 1], &c__1);
+ r__1 = -cs * sn * aqoap;
+ saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1,
+ &a[p * a_dim1 + 1], &c__1);
+ d__[p] /= cs;
+ d__[q] *= cs;
+ if (rsvec) {
+ r__1 = t * apoaq;
+ saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1],
+ &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ r__1 = -cs * sn * aqoap;
+ saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1],
+ &c__1, &v[p * v_dim1 + 1], &
+ c__1);
+ }
+ }
+ }
+ }
+ }
+ } else {
+ if (aapp > aaqq) {
+ scopy_(m, &a[p * a_dim1 + 1], &
+ c__1, &work[1], &c__1);
+ slascl_("G", &c__0, &c__0, &aapp,
+ &c_b35, m, &c__1, &work[1]
+, lda, &ierr);
+ slascl_("G", &c__0, &c__0, &aaqq,
+ &c_b35, m, &c__1, &a[q *
+ a_dim1 + 1], lda, &ierr);
+ temp1 = -aapq * d__[p] / d__[q];
+ saxpy_(m, &temp1, &work[1], &c__1,
+ &a[q * a_dim1 + 1], &
+ c__1);
+ slascl_("G", &c__0, &c__0, &c_b35,
+ &aaqq, m, &c__1, &a[q *
+ a_dim1 + 1], lda, &ierr);
+/* Computing MAX */
+ r__1 = 0.f, r__2 = 1.f - aapq *
+ aapq;
+ sva[q] = aaqq * sqrt((dmax(r__1,
+ r__2)));
+ mxsinj = dmax(mxsinj,*sfmin);
+ } else {
+ scopy_(m, &a[q * a_dim1 + 1], &
+ c__1, &work[1], &c__1);
+ slascl_("G", &c__0, &c__0, &aaqq,
+ &c_b35, m, &c__1, &work[1]
+, lda, &ierr);
+ slascl_("G", &c__0, &c__0, &aapp,
+ &c_b35, m, &c__1, &a[p *
+ a_dim1 + 1], lda, &ierr);
+ temp1 = -aapq * d__[q] / d__[p];
+ saxpy_(m, &temp1, &work[1], &c__1,
+ &a[p * a_dim1 + 1], &
+ c__1);
+ slascl_("G", &c__0, &c__0, &c_b35,
+ &aapp, m, &c__1, &a[p *
+ a_dim1 + 1], lda, &ierr);
+/* Computing MAX */
+ r__1 = 0.f, r__2 = 1.f - aapq *
+ aapq;
+ sva[p] = aapp * sqrt((dmax(r__1,
+ r__2)));
+ mxsinj = dmax(mxsinj,*sfmin);
+ }
+ }
+/* END IF ROTOK THEN ... ELSE */
+
+/* In the case of cancellation in updating SVA(q) */
+/* .. recompute SVA(q) */
+/* Computing 2nd power */
+ r__1 = sva[q] / aaqq;
+ if (r__1 * r__1 <= rooteps) {
+ if (aaqq < rootbig && aaqq >
+ rootsfmin) {
+ sva[q] = snrm2_(m, &a[q * a_dim1
+ + 1], &c__1) * d__[q];
+ } else {
+ t = 0.f;
+ aaqq = 0.f;
+ slassq_(m, &a[q * a_dim1 + 1], &
+ c__1, &t, &aaqq);
+ sva[q] = t * sqrt(aaqq) * d__[q];
+ }
+ }
+/* Computing 2nd power */
+ r__1 = aapp / aapp0;
+ if (r__1 * r__1 <= rooteps) {
+ if (aapp < rootbig && aapp >
+ rootsfmin) {
+ aapp = snrm2_(m, &a[p * a_dim1 +
+ 1], &c__1) * d__[p];
+ } else {
+ t = 0.f;
+ aapp = 0.f;
+ slassq_(m, &a[p * a_dim1 + 1], &
+ c__1, &t, &aapp);
+ aapp = t * sqrt(aapp) * d__[p];
+ }
+ sva[p] = aapp;
+ }
+/* end of OK rotation */
+ } else {
+ ++notrot;
+/* SKIPPED = SKIPPED + 1 */
+ ++pskipped;
+ ++ijblsk;
+ }
+ } else {
+ ++notrot;
+ ++pskipped;
+ ++ijblsk;
+ }
+/* IF ( NOTROT .GE. EMPTSW ) GO TO 2011 */
+ if (i__ <= swband && ijblsk >= blskip) {
+ sva[p] = aapp;
+ notrot = 0;
+ goto L2011;
+ }
+ if (i__ <= swband && pskipped > rowskip) {
+ aapp = -aapp;
+ notrot = 0;
+ goto L2203;
+ }
+
+/* L2200: */
+ }
+/* end of the q-loop */
+L2203:
+ sva[p] = aapp;
+
+ } else {
+ if (aapp == 0.f) {
+/* Computing MIN */
+ i__5 = jgl + kbl - 1;
+ notrot = notrot + min(i__5,*n) - jgl + 1;
+ }
+ if (aapp < 0.f) {
+ notrot = 0;
+ }
+/* ** IF ( NOTROT .GE. EMPTSW ) GO TO 2011 */
+ }
+/* L2100: */
+ }
+/* end of the p-loop */
+/* L2010: */
+ }
+/* end of the jbc-loop */
+L2011:
+/* 2011 bailed out of the jbc-loop */
+/* Computing MIN */
+ i__4 = igl + kbl - 1;
+ i__3 = min(i__4,*n);
+ for (p = igl; p <= i__3; ++p) {
+ sva[p] = (r__1 = sva[p], dabs(r__1));
+/* L2012: */
+ }
+/* ** IF ( NOTROT .GE. EMPTSW ) GO TO 1994 */
+/* L2000: */
+ }
+/* 2000 :: end of the ibr-loop */
+
+/* .. update SVA(N) */
+ if (sva[*n] < rootbig && sva[*n] > rootsfmin) {
+ sva[*n] = snrm2_(m, &a[*n * a_dim1 + 1], &c__1) * d__[*n];
+ } else {
+ t = 0.f;
+ aapp = 0.f;
+ slassq_(m, &a[*n * a_dim1 + 1], &c__1, &t, &aapp);
+ sva[*n] = t * sqrt(aapp) * d__[*n];
+ }
+
+/* Additional steering devices */
+
+ if (i__ < swband && (mxaapq <= roottol || iswrot <= *n)) {
+ swband = i__;
+ }
+ if (i__ > swband + 1 && mxaapq < (real) (*n) * *tol && (real) (*n) *
+ mxaapq * mxsinj < *tol) {
+ goto L1994;
+ }
+
+ if (notrot >= emptsw) {
+ goto L1994;
+ }
+/* L1993: */
+ }
+/* end i=1:NSWEEP loop */
+/* #:) Reaching this point means that the procedure has completed the given */
+/* number of sweeps. */
+ *info = *nsweep - 1;
+ goto L1995;
+L1994:
+/* #:) Reaching this point means that during the i-th sweep all pivots were */
+/* below the given threshold, causing early exit. */
+ *info = 0;
+/* #:) INFO = 0 confirms successful iterations. */
+L1995:
+
+/* Sort the vector D */
+
+ i__1 = *n - 1;
+ for (p = 1; p <= i__1; ++p) {
+ i__2 = *n - p + 1;
+ q = isamax_(&i__2, &sva[p], &c__1) + p - 1;
+ if (p != q) {
+ temp1 = sva[p];
+ sva[p] = sva[q];
+ sva[q] = temp1;
+ temp1 = d__[p];
+ d__[p] = d__[q];
+ d__[q] = temp1;
+ sswap_(m, &a[p * a_dim1 + 1], &c__1, &a[q * a_dim1 + 1], &c__1);
+ if (rsvec) {
+ sswap_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[q * v_dim1 + 1], &
+ c__1);
+ }
+ }
+/* L5991: */
+ }
+
+ return 0;
+/* .. */
+/* .. END OF SGSVJ1 */
+/* .. */
+} /* sgsvj1_ */
diff --git a/contrib/libs/clapack/sgtcon.c b/contrib/libs/clapack/sgtcon.c
new file mode 100644
index 0000000000..2410e5090a
--- /dev/null
+++ b/contrib/libs/clapack/sgtcon.c
@@ -0,0 +1,206 @@
+/* sgtcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sgtcon_(char *norm, integer *n, real *dl, real *d__,
+ real *du, real *du2, integer *ipiv, real *anorm, real *rcond, real *
+ work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ integer i__, kase, kase1;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int slacn2_(integer *, real *, real *, integer *,
+ real *, integer *, integer *), xerbla_(char *, integer *);
+ real ainvnm;
+ logical onenrm;
+ extern /* Subroutine */ int sgttrs_(char *, integer *, integer *, real *,
+ real *, real *, real *, integer *, real *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call SLACN2 in place of SLACON, 7 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGTCON estimates the reciprocal of the condition number of a real */
+/* tridiagonal matrix A using the LU factorization as computed by */
+/* SGTTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies whether the 1-norm condition number or the */
+/* infinity-norm condition number is required: */
+/* = '1' or 'O': 1-norm; */
+/* = 'I': Infinity-norm. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* DL (input) REAL array, dimension (N-1) */
+/* The (n-1) multipliers that define the matrix L from the */
+/* LU factorization of A as computed by SGTTRF. */
+
+/* D (input) REAL array, dimension (N) */
+/* The n diagonal elements of the upper triangular matrix U from */
+/* the LU factorization of A. */
+
+/* DU (input) REAL array, dimension (N-1) */
+/* The (n-1) elements of the first superdiagonal of U. */
+
+/* DU2 (input) REAL array, dimension (N-2) */
+/* The (n-2) elements of the second superdiagonal of U. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= n, row i of the matrix was */
+/* interchanged with row IPIV(i). IPIV(i) will always be either */
+/* i or i+1; IPIV(i) = i indicates a row interchange was not */
+/* required. */
+
+/* ANORM (input) REAL */
+/* If NORM = '1' or 'O', the 1-norm of the original matrix A. */
+/* If NORM = 'I', the infinity-norm of the original matrix A. */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) REAL array, dimension (2*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments. */
+
+ /* Parameter adjustments */
+ --iwork;
+ --work;
+ --ipiv;
+ --du2;
+ --du;
+ --d__;
+ --dl;
+
+ /* Function Body */
+ *info = 0;
+ onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O");
+ if (! onenrm && ! lsame_(norm, "I")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*anorm < 0.f) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGTCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.f;
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ } else if (*anorm == 0.f) {
+ return 0;
+ }
+
+/* Check that D(1:N) is non-zero. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] == 0.f) {
+ return 0;
+ }
+/* L10: */
+ }
+
+ ainvnm = 0.f;
+ if (onenrm) {
+ kase1 = 1;
+ } else {
+ kase1 = 2;
+ }
+ kase = 0;
+L20:
+ slacn2_(n, &work[*n + 1], &work[1], &iwork[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (kase == kase1) {
+
+/* Multiply by inv(U)*inv(L). */
+
+ sgttrs_("No transpose", n, &c__1, &dl[1], &d__[1], &du[1], &du2[1]
+, &ipiv[1], &work[1], n, info);
+ } else {
+
+/* Multiply by inv(L')*inv(U'). */
+
+ sgttrs_("Transpose", n, &c__1, &dl[1], &d__[1], &du[1], &du2[1], &
+ ipiv[1], &work[1], n, info);
+ }
+ goto L20;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / ainvnm / *anorm;
+ }
+
+ return 0;
+
+/* End of SGTCON */
+
+} /* sgtcon_ */
diff --git a/contrib/libs/clapack/sgtrfs.c b/contrib/libs/clapack/sgtrfs.c
new file mode 100644
index 0000000000..0168d1dd92
--- /dev/null
+++ b/contrib/libs/clapack/sgtrfs.c
@@ -0,0 +1,444 @@
+/* sgtrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b18 = -1.f;
+static real c_b19 = 1.f;
+
+/* Subroutine */ int sgtrfs_(char *trans, integer *n, integer *nrhs, real *dl,
+ real *d__, real *du, real *dlf, real *df, real *duf, real *du2,
+ integer *ipiv, real *b, integer *ldb, real *x, integer *ldx, real *
+ ferr, real *berr, real *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2;
+ real r__1, r__2, r__3, r__4;
+
+ /* Local variables */
+ integer i__, j;
+ real s;
+ integer nz;
+ real eps;
+ integer kase;
+ real safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3], count;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), saxpy_(integer *, real *, real *, integer *, real *,
+ integer *), slacn2_(integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *), slagtm_(
+ char *, integer *, integer *, real *, real *, real *, real *,
+ real *, integer *, real *, real *, integer *);
+ logical notran;
+ char transn[1], transt[1];
+ real lstres;
+ extern /* Subroutine */ int sgttrs_(char *, integer *, integer *, real *,
+ real *, real *, real *, integer *, real *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call SLACN2 in place of SLACON, 7 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGTRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is tridiagonal, and provides */
+/* error bounds and backward error estimates for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose = Transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* DL (input) REAL array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of A. */
+
+/* D (input) REAL array, dimension (N) */
+/* The diagonal elements of A. */
+
+/* DU (input) REAL array, dimension (N-1) */
+/* The (n-1) superdiagonal elements of A. */
+
+/* DLF (input) REAL array, dimension (N-1) */
+/* The (n-1) multipliers that define the matrix L from the */
+/* LU factorization of A as computed by SGTTRF. */
+
+/* DF (input) REAL array, dimension (N) */
+/* The n diagonal elements of the upper triangular matrix U from */
+/* the LU factorization of A. */
+
+/* DUF (input) REAL array, dimension (N-1) */
+/* The (n-1) elements of the first superdiagonal of U. */
+
+/* DU2 (input) REAL array, dimension (N-2) */
+/* The (n-2) elements of the second superdiagonal of U. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= n, row i of the matrix was */
+/* interchanged with row IPIV(i). IPIV(i) will always be either */
+/* i or i+1; IPIV(i) = i indicates a row interchange was not */
+/* required. */
+
+/* B (input) REAL array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) REAL array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by SGTTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --dl;
+ --d__;
+ --du;
+ --dlf;
+ --df;
+ --duf;
+ --du2;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "T") && ! lsame_(
+ trans, "C")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -13;
+ } else if (*ldx < max(1,*n)) {
+ *info = -15;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGTRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+ if (notran) {
+ *(unsigned char *)transn = 'N';
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transn = 'T';
+ *(unsigned char *)transt = 'N';
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = 4;
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.f;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - op(A) * X, */
+/* where op(A) = A, A**T, or A**H, depending on TRANS. */
+
+ scopy_(n, &b[j * b_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+ slagtm_(trans, n, &c__1, &c_b18, &dl[1], &d__[1], &du[1], &x[j *
+ x_dim1 + 1], ldx, &c_b19, &work[*n + 1], n);
+
+/* Compute abs(op(A))*abs(x) + abs(b) for use in the backward */
+/* error bound. */
+
+ if (notran) {
+ if (*n == 1) {
+ work[1] = (r__1 = b[j * b_dim1 + 1], dabs(r__1)) + (r__2 =
+ d__[1] * x[j * x_dim1 + 1], dabs(r__2));
+ } else {
+ work[1] = (r__1 = b[j * b_dim1 + 1], dabs(r__1)) + (r__2 =
+ d__[1] * x[j * x_dim1 + 1], dabs(r__2)) + (r__3 = du[
+ 1] * x[j * x_dim1 + 2], dabs(r__3));
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ work[i__] = (r__1 = b[i__ + j * b_dim1], dabs(r__1)) + (
+ r__2 = dl[i__ - 1] * x[i__ - 1 + j * x_dim1],
+ dabs(r__2)) + (r__3 = d__[i__] * x[i__ + j *
+ x_dim1], dabs(r__3)) + (r__4 = du[i__] * x[i__ +
+ 1 + j * x_dim1], dabs(r__4));
+/* L30: */
+ }
+ work[*n] = (r__1 = b[*n + j * b_dim1], dabs(r__1)) + (r__2 =
+ dl[*n - 1] * x[*n - 1 + j * x_dim1], dabs(r__2)) + (
+ r__3 = d__[*n] * x[*n + j * x_dim1], dabs(r__3));
+ }
+ } else {
+ if (*n == 1) {
+ work[1] = (r__1 = b[j * b_dim1 + 1], dabs(r__1)) + (r__2 =
+ d__[1] * x[j * x_dim1 + 1], dabs(r__2));
+ } else {
+ work[1] = (r__1 = b[j * b_dim1 + 1], dabs(r__1)) + (r__2 =
+ d__[1] * x[j * x_dim1 + 1], dabs(r__2)) + (r__3 = dl[
+ 1] * x[j * x_dim1 + 2], dabs(r__3));
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ work[i__] = (r__1 = b[i__ + j * b_dim1], dabs(r__1)) + (
+ r__2 = du[i__ - 1] * x[i__ - 1 + j * x_dim1],
+ dabs(r__2)) + (r__3 = d__[i__] * x[i__ + j *
+ x_dim1], dabs(r__3)) + (r__4 = dl[i__] * x[i__ +
+ 1 + j * x_dim1], dabs(r__4));
+/* L40: */
+ }
+ work[*n] = (r__1 = b[*n + j * b_dim1], dabs(r__1)) + (r__2 =
+ du[*n - 1] * x[*n - 1 + j * x_dim1], dabs(r__2)) + (
+ r__3 = d__[*n] * x[*n + j * x_dim1], dabs(r__3));
+ }
+ }
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+/* Computing MAX */
+ r__2 = s, r__3 = (r__1 = work[*n + i__], dabs(r__1)) / work[
+ i__];
+ s = dmax(r__2,r__3);
+ } else {
+/* Computing MAX */
+ r__2 = s, r__3 = ((r__1 = work[*n + i__], dabs(r__1)) + safe1)
+ / (work[i__] + safe1);
+ s = dmax(r__2,r__3);
+ }
+/* L50: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2.f <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ sgttrs_(trans, n, &c__1, &dlf[1], &df[1], &duf[1], &du2[1], &ipiv[
+ 1], &work[*n + 1], n, info);
+ saxpy_(n, &c_b19, &work[*n + 1], &c__1, &x[j * x_dim1 + 1], &c__1)
+ ;
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(op(A)))* */
+/* ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(op(A)) is the inverse of op(A) */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(op(A))*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use SLACN2 to estimate the infinity-norm of the matrix */
+/* inv(op(A)) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+ work[i__] = (r__1 = work[*n + i__], dabs(r__1)) + nz * eps *
+ work[i__];
+ } else {
+ work[i__] = (r__1 = work[*n + i__], dabs(r__1)) + nz * eps *
+ work[i__] + safe1;
+ }
+/* L60: */
+ }
+
+ kase = 0;
+L70:
+ slacn2_(n, &work[(*n << 1) + 1], &work[*n + 1], &iwork[1], &ferr[j], &
+ kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(op(A)**T). */
+
+ sgttrs_(transt, n, &c__1, &dlf[1], &df[1], &duf[1], &du2[1], &
+ ipiv[1], &work[*n + 1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L80: */
+ }
+ } else {
+
+/* Multiply by inv(op(A))*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L90: */
+ }
+ sgttrs_(transn, n, &c__1, &dlf[1], &df[1], &duf[1], &du2[1], &
+ ipiv[1], &work[*n + 1], n, info);
+ }
+ goto L70;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = lstres, r__3 = (r__1 = x[i__ + j * x_dim1], dabs(r__1));
+ lstres = dmax(r__2,r__3);
+/* L100: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L110: */
+ }
+
+ return 0;
+
+/* End of SGTRFS */
+
+} /* sgtrfs_ */
diff --git a/contrib/libs/clapack/sgtsv.c b/contrib/libs/clapack/sgtsv.c
new file mode 100644
index 0000000000..1fff65d230
--- /dev/null
+++ b/contrib/libs/clapack/sgtsv.c
@@ -0,0 +1,318 @@
+/* sgtsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sgtsv_(integer *n, integer *nrhs, real *dl, real *d__,
+ real *du, real *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Local variables */
+ integer i__, j;
+ real fact, temp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGTSV solves the equation */
+
+/* A*X = B, */
+
+/* where A is an n by n tridiagonal matrix, by Gaussian elimination with */
+/* partial pivoting. */
+
+/* Note that the equation A'*X = B may be solved by interchanging the */
+/* order of the arguments DU and DL. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* DL (input/output) REAL array, dimension (N-1) */
+/* On entry, DL must contain the (n-1) sub-diagonal elements of */
+/* A. */
+
+/* On exit, DL is overwritten by the (n-2) elements of the */
+/* second super-diagonal of the upper triangular matrix U from */
+/* the LU factorization of A, in DL(1), ..., DL(n-2). */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, D must contain the diagonal elements of A. */
+
+/* On exit, D is overwritten by the n diagonal elements of U. */
+
+/* DU (input/output) REAL array, dimension (N-1) */
+/* On entry, DU must contain the (n-1) super-diagonal elements */
+/* of A. */
+
+/* On exit, DU is overwritten by the (n-1) elements of the first */
+/* super-diagonal of U. */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the N by NRHS matrix of right hand side matrix B. */
+/* On exit, if INFO = 0, the N by NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, U(i,i) is exactly zero, and the solution */
+/* has not been computed. The factorization has not been */
+/* completed unless i = N. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --dl;
+ --d__;
+ --du;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*nrhs < 0) {
+ *info = -2;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGTSV ", &i__1);
+ return 0;
+ }
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*nrhs == 1) {
+ i__1 = *n - 2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((r__1 = d__[i__], dabs(r__1)) >= (r__2 = dl[i__], dabs(r__2)))
+ {
+
+/* No row interchange required */
+
+ if (d__[i__] != 0.f) {
+ fact = dl[i__] / d__[i__];
+ d__[i__ + 1] -= fact * du[i__];
+ b[i__ + 1 + b_dim1] -= fact * b[i__ + b_dim1];
+ } else {
+ *info = i__;
+ return 0;
+ }
+ dl[i__] = 0.f;
+ } else {
+
+/* Interchange rows I and I+1 */
+
+ fact = d__[i__] / dl[i__];
+ d__[i__] = dl[i__];
+ temp = d__[i__ + 1];
+ d__[i__ + 1] = du[i__] - fact * temp;
+ dl[i__] = du[i__ + 1];
+ du[i__ + 1] = -fact * dl[i__];
+ du[i__] = temp;
+ temp = b[i__ + b_dim1];
+ b[i__ + b_dim1] = b[i__ + 1 + b_dim1];
+ b[i__ + 1 + b_dim1] = temp - fact * b[i__ + 1 + b_dim1];
+ }
+/* L10: */
+ }
+ if (*n > 1) {
+ i__ = *n - 1;
+ if ((r__1 = d__[i__], dabs(r__1)) >= (r__2 = dl[i__], dabs(r__2)))
+ {
+ if (d__[i__] != 0.f) {
+ fact = dl[i__] / d__[i__];
+ d__[i__ + 1] -= fact * du[i__];
+ b[i__ + 1 + b_dim1] -= fact * b[i__ + b_dim1];
+ } else {
+ *info = i__;
+ return 0;
+ }
+ } else {
+ fact = d__[i__] / dl[i__];
+ d__[i__] = dl[i__];
+ temp = d__[i__ + 1];
+ d__[i__ + 1] = du[i__] - fact * temp;
+ du[i__] = temp;
+ temp = b[i__ + b_dim1];
+ b[i__ + b_dim1] = b[i__ + 1 + b_dim1];
+ b[i__ + 1 + b_dim1] = temp - fact * b[i__ + 1 + b_dim1];
+ }
+ }
+ if (d__[*n] == 0.f) {
+ *info = *n;
+ return 0;
+ }
+ } else {
+ i__1 = *n - 2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((r__1 = d__[i__], dabs(r__1)) >= (r__2 = dl[i__], dabs(r__2)))
+ {
+
+/* No row interchange required */
+
+ if (d__[i__] != 0.f) {
+ fact = dl[i__] / d__[i__];
+ d__[i__ + 1] -= fact * du[i__];
+ i__2 = *nrhs;
+ for (j = 1; j <= i__2; ++j) {
+ b[i__ + 1 + j * b_dim1] -= fact * b[i__ + j * b_dim1];
+/* L20: */
+ }
+ } else {
+ *info = i__;
+ return 0;
+ }
+ dl[i__] = 0.f;
+ } else {
+
+/* Interchange rows I and I+1 */
+
+ fact = d__[i__] / dl[i__];
+ d__[i__] = dl[i__];
+ temp = d__[i__ + 1];
+ d__[i__ + 1] = du[i__] - fact * temp;
+ dl[i__] = du[i__ + 1];
+ du[i__ + 1] = -fact * dl[i__];
+ du[i__] = temp;
+ i__2 = *nrhs;
+ for (j = 1; j <= i__2; ++j) {
+ temp = b[i__ + j * b_dim1];
+ b[i__ + j * b_dim1] = b[i__ + 1 + j * b_dim1];
+ b[i__ + 1 + j * b_dim1] = temp - fact * b[i__ + 1 + j *
+ b_dim1];
+/* L30: */
+ }
+ }
+/* L40: */
+ }
+ if (*n > 1) {
+ i__ = *n - 1;
+ if ((r__1 = d__[i__], dabs(r__1)) >= (r__2 = dl[i__], dabs(r__2)))
+ {
+ if (d__[i__] != 0.f) {
+ fact = dl[i__] / d__[i__];
+ d__[i__ + 1] -= fact * du[i__];
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ b[i__ + 1 + j * b_dim1] -= fact * b[i__ + j * b_dim1];
+/* L50: */
+ }
+ } else {
+ *info = i__;
+ return 0;
+ }
+ } else {
+ fact = d__[i__] / dl[i__];
+ d__[i__] = dl[i__];
+ temp = d__[i__ + 1];
+ d__[i__ + 1] = du[i__] - fact * temp;
+ du[i__] = temp;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ temp = b[i__ + j * b_dim1];
+ b[i__ + j * b_dim1] = b[i__ + 1 + j * b_dim1];
+ b[i__ + 1 + j * b_dim1] = temp - fact * b[i__ + 1 + j *
+ b_dim1];
+/* L60: */
+ }
+ }
+ }
+ if (d__[*n] == 0.f) {
+ *info = *n;
+ return 0;
+ }
+ }
+
+/* Back solve with the matrix U from the factorization. */
+
+ if (*nrhs <= 2) {
+ j = 1;
+L70:
+ b[*n + j * b_dim1] /= d__[*n];
+ if (*n > 1) {
+ b[*n - 1 + j * b_dim1] = (b[*n - 1 + j * b_dim1] - du[*n - 1] * b[
+ *n + j * b_dim1]) / d__[*n - 1];
+ }
+ for (i__ = *n - 2; i__ >= 1; --i__) {
+ b[i__ + j * b_dim1] = (b[i__ + j * b_dim1] - du[i__] * b[i__ + 1
+ + j * b_dim1] - dl[i__] * b[i__ + 2 + j * b_dim1]) / d__[
+ i__];
+/* L80: */
+ }
+ if (j < *nrhs) {
+ ++j;
+ goto L70;
+ }
+ } else {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ b[*n + j * b_dim1] /= d__[*n];
+ if (*n > 1) {
+ b[*n - 1 + j * b_dim1] = (b[*n - 1 + j * b_dim1] - du[*n - 1]
+ * b[*n + j * b_dim1]) / d__[*n - 1];
+ }
+ for (i__ = *n - 2; i__ >= 1; --i__) {
+ b[i__ + j * b_dim1] = (b[i__ + j * b_dim1] - du[i__] * b[i__
+ + 1 + j * b_dim1] - dl[i__] * b[i__ + 2 + j * b_dim1])
+ / d__[i__];
+/* L90: */
+ }
+/* L100: */
+ }
+ }
+
+ return 0;
+
+/* End of SGTSV */
+
+} /* sgtsv_ */
diff --git a/contrib/libs/clapack/sgtsvx.c b/contrib/libs/clapack/sgtsvx.c
new file mode 100644
index 0000000000..e3ca06a61a
--- /dev/null
+++ b/contrib/libs/clapack/sgtsvx.c
@@ -0,0 +1,347 @@
+/* sgtsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sgtsvx_(char *fact, char *trans, integer *n, integer *
+ nrhs, real *dl, real *d__, real *du, real *dlf, real *df, real *duf,
+ real *du2, integer *ipiv, real *b, integer *ldb, real *x, integer *
+ ldx, real *rcond, real *ferr, real *berr, real *work, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1;
+
+ /* Local variables */
+ char norm[1];
+ extern logical lsame_(char *, char *);
+ real anorm;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ extern doublereal slamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern doublereal slangt_(char *, integer *, real *, real *, real *);
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *), sgtcon_(char *, integer *,
+ real *, real *, real *, real *, integer *, real *, real *, real *,
+ integer *, integer *);
+ logical notran;
+ extern /* Subroutine */ int sgtrfs_(char *, integer *, integer *, real *,
+ real *, real *, real *, real *, real *, real *, integer *, real *,
+ integer *, real *, integer *, real *, real *, real *, integer *,
+ integer *), sgttrf_(integer *, real *, real *, real *,
+ real *, integer *, integer *), sgttrs_(char *, integer *, integer
+ *, real *, real *, real *, real *, integer *, real *, integer *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGTSVX uses the LU factorization to compute the solution to a real */
+/* system of linear equations A * X = B or A**T * X = B, */
+/* where A is a tridiagonal matrix of order N and X and B are N-by-NRHS */
+/* matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'N', the LU decomposition is used to factor the matrix A */
+/* as A = L * U, where L is a product of permutation and unit lower */
+/* bidiagonal matrices and U is upper triangular with nonzeros in */
+/* only the main diagonal and first two superdiagonals. */
+
+/* 2. If some U(i,i)=0, so that U is exactly singular, then the routine */
+/* returns with INFO = i. Otherwise, the factored form of A is used */
+/* to estimate the condition number of the matrix A. If the */
+/* reciprocal of the condition number is less than machine precision, */
+/* INFO = N+1 is returned as a warning, but the routine still goes on */
+/* to solve for X and compute error bounds as described below. */
+
+/* 3. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 4. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of A has been */
+/* supplied on entry. */
+/* = 'F': DLF, DF, DUF, DU2, and IPIV contain the factored */
+/* form of A; DL, D, DU, DLF, DF, DUF, DU2 and IPIV */
+/* will not be modified. */
+/* = 'N': The matrix will be copied to DLF, DF, and DUF */
+/* and factored. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose = Transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* DL (input) REAL array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of A. */
+
+/* D (input) REAL array, dimension (N) */
+/* The n diagonal elements of A. */
+
+/* DU (input) REAL array, dimension (N-1) */
+/* The (n-1) superdiagonal elements of A. */
+
+/* DLF (input or output) REAL array, dimension (N-1) */
+/* If FACT = 'F', then DLF is an input argument and on entry */
+/* contains the (n-1) multipliers that define the matrix L from */
+/* the LU factorization of A as computed by SGTTRF. */
+
+/* If FACT = 'N', then DLF is an output argument and on exit */
+/* contains the (n-1) multipliers that define the matrix L from */
+/* the LU factorization of A. */
+
+/* DF (input or output) REAL array, dimension (N) */
+/* If FACT = 'F', then DF is an input argument and on entry */
+/* contains the n diagonal elements of the upper triangular */
+/* matrix U from the LU factorization of A. */
+
+/* If FACT = 'N', then DF is an output argument and on exit */
+/* contains the n diagonal elements of the upper triangular */
+/* matrix U from the LU factorization of A. */
+
+/* DUF (input or output) REAL array, dimension (N-1) */
+/* If FACT = 'F', then DUF is an input argument and on entry */
+/* contains the (n-1) elements of the first superdiagonal of U. */
+
+/* If FACT = 'N', then DUF is an output argument and on exit */
+/* contains the (n-1) elements of the first superdiagonal of U. */
+
+/* DU2 (input or output) REAL array, dimension (N-2) */
+/* If FACT = 'F', then DU2 is an input argument and on entry */
+/* contains the (n-2) elements of the second superdiagonal of */
+/* U. */
+
+/* If FACT = 'N', then DU2 is an output argument and on exit */
+/* contains the (n-2) elements of the second superdiagonal of */
+/* U. */
+
+/* IPIV (input or output) INTEGER array, dimension (N) */
+/* If FACT = 'F', then IPIV is an input argument and on entry */
+/* contains the pivot indices from the LU factorization of A as */
+/* computed by SGTTRF. */
+
+/* If FACT = 'N', then IPIV is an output argument and on exit */
+/* contains the pivot indices from the LU factorization of A; */
+/* row i of the matrix was interchanged with row IPIV(i). */
+/* IPIV(i) will always be either i or i+1; IPIV(i) = i indicates */
+/* a row interchange was not required. */
+
+/* B (input) REAL array, dimension (LDB,NRHS) */
+/* The N-by-NRHS right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) REAL array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) REAL */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A. If RCOND is less than the machine precision (in */
+/* particular, if RCOND = 0), the matrix is singular to working */
+/* precision. This condition is indicated by a return code of */
+/* INFO > 0. */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: U(i,i) is exactly zero. The factorization */
+/* has not been completed unless i = N, but the */
+/* factor U is exactly singular, so the solution */
+/* and error bounds could not be computed. */
+/* RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --dl;
+ --d__;
+ --du;
+ --dlf;
+ --df;
+ --duf;
+ --du2;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ notran = lsame_(trans, "N");
+ if (! nofact && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -14;
+ } else if (*ldx < max(1,*n)) {
+ *info = -16;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGTSVX", &i__1);
+ return 0;
+ }
+
+ if (nofact) {
+
+/* Compute the LU factorization of A. */
+
+ scopy_(n, &d__[1], &c__1, &df[1], &c__1);
+ if (*n > 1) {
+ i__1 = *n - 1;
+ scopy_(&i__1, &dl[1], &c__1, &dlf[1], &c__1);
+ i__1 = *n - 1;
+ scopy_(&i__1, &du[1], &c__1, &duf[1], &c__1);
+ }
+ sgttrf_(n, &dlf[1], &df[1], &duf[1], &du2[1], &ipiv[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.f;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ if (notran) {
+ *(unsigned char *)norm = '1';
+ } else {
+ *(unsigned char *)norm = 'I';
+ }
+ anorm = slangt_(norm, n, &dl[1], &d__[1], &du[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ sgtcon_(norm, n, &dlf[1], &df[1], &duf[1], &du2[1], &ipiv[1], &anorm,
+ rcond, &work[1], &iwork[1], info);
+
+/* Compute the solution vectors X. */
+
+ slacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ sgttrs_(trans, n, nrhs, &dlf[1], &df[1], &duf[1], &du2[1], &ipiv[1], &x[
+ x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solutions and */
+/* compute error bounds and backward error estimates for them. */
+
+ sgtrfs_(trans, n, nrhs, &dl[1], &d__[1], &du[1], &dlf[1], &df[1], &duf[1],
+ &du2[1], &ipiv[1], &b[b_offset], ldb, &x[x_offset], ldx, &ferr[1]
+, &berr[1], &work[1], &iwork[1], info);
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < slamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of SGTSVX */
+
+} /* sgtsvx_ */
diff --git a/contrib/libs/clapack/sgttrf.c b/contrib/libs/clapack/sgttrf.c
new file mode 100644
index 0000000000..16f18a95de
--- /dev/null
+++ b/contrib/libs/clapack/sgttrf.c
@@ -0,0 +1,203 @@
+/* sgttrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sgttrf_(integer *n, real *dl, real *d__, real *du, real *
+ du2, integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2;
+
+ /* Local variables */
+ integer i__;
+ real fact, temp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGTTRF computes an LU factorization of a real tridiagonal matrix A */
+/* using elimination with partial pivoting and row interchanges. */
+
+/* The factorization has the form */
+/* A = L * U */
+/* where L is a product of permutation and unit lower bidiagonal */
+/* matrices and U is upper triangular with nonzeros in only the main */
+/* diagonal and first two superdiagonals. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* DL (input/output) REAL array, dimension (N-1) */
+/* On entry, DL must contain the (n-1) sub-diagonal elements of */
+/* A. */
+
+/* On exit, DL is overwritten by the (n-1) multipliers that */
+/* define the matrix L from the LU factorization of A. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, D must contain the diagonal elements of A. */
+
+/* On exit, D is overwritten by the n diagonal elements of the */
+/* upper triangular matrix U from the LU factorization of A. */
+
+/* DU (input/output) REAL array, dimension (N-1) */
+/* On entry, DU must contain the (n-1) super-diagonal elements */
+/* of A. */
+
+/* On exit, DU is overwritten by the (n-1) elements of the first */
+/* super-diagonal of U. */
+
+/* DU2 (output) REAL array, dimension (N-2) */
+/* On exit, DU2 is overwritten by the (n-2) elements of the */
+/* second super-diagonal of U. */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= n, row i of the matrix was */
+/* interchanged with row IPIV(i). IPIV(i) will always be either */
+/* i or i+1; IPIV(i) = i indicates a row interchange was not */
+/* required. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, U(k,k) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, and division by zero will occur if it is used */
+/* to solve a system of equations. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --ipiv;
+ --du2;
+ --du;
+ --d__;
+ --dl;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ i__1 = -(*info);
+ xerbla_("SGTTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Initialize IPIV(i) = i and DU2(I) = 0 */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ipiv[i__] = i__;
+/* L10: */
+ }
+ i__1 = *n - 2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ du2[i__] = 0.f;
+/* L20: */
+ }
+
+ i__1 = *n - 2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((r__1 = d__[i__], dabs(r__1)) >= (r__2 = dl[i__], dabs(r__2))) {
+
+/* No row interchange required, eliminate DL(I) */
+
+ if (d__[i__] != 0.f) {
+ fact = dl[i__] / d__[i__];
+ dl[i__] = fact;
+ d__[i__ + 1] -= fact * du[i__];
+ }
+ } else {
+
+/* Interchange rows I and I+1, eliminate DL(I) */
+
+ fact = d__[i__] / dl[i__];
+ d__[i__] = dl[i__];
+ dl[i__] = fact;
+ temp = du[i__];
+ du[i__] = d__[i__ + 1];
+ d__[i__ + 1] = temp - fact * d__[i__ + 1];
+ du2[i__] = du[i__ + 1];
+ du[i__ + 1] = -fact * du[i__ + 1];
+ ipiv[i__] = i__ + 1;
+ }
+/* L30: */
+ }
+ if (*n > 1) {
+ i__ = *n - 1;
+ if ((r__1 = d__[i__], dabs(r__1)) >= (r__2 = dl[i__], dabs(r__2))) {
+ if (d__[i__] != 0.f) {
+ fact = dl[i__] / d__[i__];
+ dl[i__] = fact;
+ d__[i__ + 1] -= fact * du[i__];
+ }
+ } else {
+ fact = d__[i__] / dl[i__];
+ d__[i__] = dl[i__];
+ dl[i__] = fact;
+ temp = du[i__];
+ du[i__] = d__[i__ + 1];
+ d__[i__ + 1] = temp - fact * d__[i__ + 1];
+ ipiv[i__] = i__ + 1;
+ }
+ }
+
+/* Check for a zero on the diagonal of U. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] == 0.f) {
+ *info = i__;
+ goto L50;
+ }
+/* L40: */
+ }
+L50:
+
+ return 0;
+
+/* End of SGTTRF */
+
+} /* sgttrf_ */
diff --git a/contrib/libs/clapack/sgttrs.c b/contrib/libs/clapack/sgttrs.c
new file mode 100644
index 0000000000..0b41a1301c
--- /dev/null
+++ b/contrib/libs/clapack/sgttrs.c
@@ -0,0 +1,189 @@
+/* sgttrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int sgttrs_(char *trans, integer *n, integer *nrhs, real *dl,
+ real *d__, real *du, real *du2, integer *ipiv, real *b, integer *ldb,
+ integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer j, jb, nb;
+ extern /* Subroutine */ int sgtts2_(integer *, integer *, integer *, real
+ *, real *, real *, real *, integer *, real *, integer *), xerbla_(
+ char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer itrans;
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGTTRS solves one of the systems of equations */
+/* A*X = B or A'*X = B, */
+/* with a tridiagonal matrix A using the LU factorization computed */
+/* by SGTTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations. */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A'* X = B (Transpose) */
+/* = 'C': A'* X = B (Conjugate transpose = Transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* DL (input) REAL array, dimension (N-1) */
+/* The (n-1) multipliers that define the matrix L from the */
+/* LU factorization of A. */
+
+/* D (input) REAL array, dimension (N) */
+/* The n diagonal elements of the upper triangular matrix U from */
+/* the LU factorization of A. */
+
+/* DU (input) REAL array, dimension (N-1) */
+/* The (n-1) elements of the first super-diagonal of U. */
+
+/* DU2 (input) REAL array, dimension (N-2) */
+/* The (n-2) elements of the second super-diagonal of U. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= n, row i of the matrix was */
+/* interchanged with row IPIV(i). IPIV(i) will always be either */
+/* i or i+1; IPIV(i) = i indicates a row interchange was not */
+/* required. */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the matrix of right hand side vectors B. */
+/* On exit, B is overwritten by the solution vectors X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --dl;
+ --d__;
+ --du;
+ --du2;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ notran = *(unsigned char *)trans == 'N' || *(unsigned char *)trans == 'n';
+ if (! notran && ! (*(unsigned char *)trans == 'T' || *(unsigned char *)
+ trans == 't') && ! (*(unsigned char *)trans == 'C' || *(unsigned
+ char *)trans == 'c')) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(*n,1)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGTTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+/* Decode TRANS */
+
+ if (notran) {
+ itrans = 0;
+ } else {
+ itrans = 1;
+ }
+
+/* Determine the number of right-hand sides to solve at a time. */
+
+ if (*nrhs == 1) {
+ nb = 1;
+ } else {
+/* Computing MAX */
+ i__1 = 1, i__2 = ilaenv_(&c__1, "SGTTRS", trans, n, nrhs, &c_n1, &
+ c_n1);
+ nb = max(i__1,i__2);
+ }
+
+ if (nb >= *nrhs) {
+ sgtts2_(&itrans, n, nrhs, &dl[1], &d__[1], &du[1], &du2[1], &ipiv[1],
+ &b[b_offset], ldb);
+ } else {
+ i__1 = *nrhs;
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = *nrhs - j + 1;
+ jb = min(i__3,nb);
+ sgtts2_(&itrans, n, &jb, &dl[1], &d__[1], &du[1], &du2[1], &ipiv[
+ 1], &b[j * b_dim1 + 1], ldb);
+/* L10: */
+ }
+ }
+
+/* End of SGTTRS */
+
+ return 0;
+} /* sgttrs_ */
diff --git a/contrib/libs/clapack/sgtts2.c b/contrib/libs/clapack/sgtts2.c
new file mode 100644
index 0000000000..afbe47bb39
--- /dev/null
+++ b/contrib/libs/clapack/sgtts2.c
@@ -0,0 +1,261 @@
+/* sgtts2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sgtts2_(integer *itrans, integer *n, integer *nrhs, real
+ *dl, real *d__, real *du, real *du2, integer *ipiv, real *b, integer *
+ ldb)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, ip;
+ real temp;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SGTTS2 solves one of the systems of equations */
+/* A*X = B or A'*X = B, */
+/* with a tridiagonal matrix A using the LU factorization computed */
+/* by SGTTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* ITRANS (input) INTEGER */
+/* Specifies the form of the system of equations. */
+/* = 0: A * X = B (No transpose) */
+/* = 1: A'* X = B (Transpose) */
+/* = 2: A'* X = B (Conjugate transpose = Transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* DL (input) REAL array, dimension (N-1) */
+/* The (n-1) multipliers that define the matrix L from the */
+/* LU factorization of A. */
+
+/* D (input) REAL array, dimension (N) */
+/* The n diagonal elements of the upper triangular matrix U from */
+/* the LU factorization of A. */
+
+/* DU (input) REAL array, dimension (N-1) */
+/* The (n-1) elements of the first super-diagonal of U. */
+
+/* DU2 (input) REAL array, dimension (N-2) */
+/* The (n-2) elements of the second super-diagonal of U. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= n, row i of the matrix was */
+/* interchanged with row IPIV(i). IPIV(i) will always be either */
+/* i or i+1; IPIV(i) = i indicates a row interchange was not */
+/* required. */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the matrix of right hand side vectors B. */
+/* On exit, B is overwritten by the solution vectors X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --dl;
+ --d__;
+ --du;
+ --du2;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (*itrans == 0) {
+
+/* Solve A*X = B using the LU factorization of A, */
+/* overwriting each right hand side vector with its solution. */
+
+ if (*nrhs <= 1) {
+ j = 1;
+L10:
+
+/* Solve L*x = b. */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ip = ipiv[i__];
+ temp = b[i__ + 1 - ip + i__ + j * b_dim1] - dl[i__] * b[ip +
+ j * b_dim1];
+ b[i__ + j * b_dim1] = b[ip + j * b_dim1];
+ b[i__ + 1 + j * b_dim1] = temp;
+/* L20: */
+ }
+
+/* Solve U*x = b. */
+
+ b[*n + j * b_dim1] /= d__[*n];
+ if (*n > 1) {
+ b[*n - 1 + j * b_dim1] = (b[*n - 1 + j * b_dim1] - du[*n - 1]
+ * b[*n + j * b_dim1]) / d__[*n - 1];
+ }
+ for (i__ = *n - 2; i__ >= 1; --i__) {
+ b[i__ + j * b_dim1] = (b[i__ + j * b_dim1] - du[i__] * b[i__
+ + 1 + j * b_dim1] - du2[i__] * b[i__ + 2 + j * b_dim1]
+ ) / d__[i__];
+/* L30: */
+ }
+ if (j < *nrhs) {
+ ++j;
+ goto L10;
+ }
+ } else {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Solve L*x = b. */
+
+ i__2 = *n - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (ipiv[i__] == i__) {
+ b[i__ + 1 + j * b_dim1] -= dl[i__] * b[i__ + j *
+ b_dim1];
+ } else {
+ temp = b[i__ + j * b_dim1];
+ b[i__ + j * b_dim1] = b[i__ + 1 + j * b_dim1];
+ b[i__ + 1 + j * b_dim1] = temp - dl[i__] * b[i__ + j *
+ b_dim1];
+ }
+/* L40: */
+ }
+
+/* Solve U*x = b. */
+
+ b[*n + j * b_dim1] /= d__[*n];
+ if (*n > 1) {
+ b[*n - 1 + j * b_dim1] = (b[*n - 1 + j * b_dim1] - du[*n
+ - 1] * b[*n + j * b_dim1]) / d__[*n - 1];
+ }
+ for (i__ = *n - 2; i__ >= 1; --i__) {
+ b[i__ + j * b_dim1] = (b[i__ + j * b_dim1] - du[i__] * b[
+ i__ + 1 + j * b_dim1] - du2[i__] * b[i__ + 2 + j *
+ b_dim1]) / d__[i__];
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+ } else {
+
+/* Solve A' * X = B. */
+
+ if (*nrhs <= 1) {
+
+/* Solve U'*x = b. */
+
+ j = 1;
+L70:
+ b[j * b_dim1 + 1] /= d__[1];
+ if (*n > 1) {
+ b[j * b_dim1 + 2] = (b[j * b_dim1 + 2] - du[1] * b[j * b_dim1
+ + 1]) / d__[2];
+ }
+ i__1 = *n;
+ for (i__ = 3; i__ <= i__1; ++i__) {
+ b[i__ + j * b_dim1] = (b[i__ + j * b_dim1] - du[i__ - 1] * b[
+ i__ - 1 + j * b_dim1] - du2[i__ - 2] * b[i__ - 2 + j *
+ b_dim1]) / d__[i__];
+/* L80: */
+ }
+
+/* Solve L'*x = b. */
+
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ ip = ipiv[i__];
+ temp = b[i__ + j * b_dim1] - dl[i__] * b[i__ + 1 + j * b_dim1]
+ ;
+ b[i__ + j * b_dim1] = b[ip + j * b_dim1];
+ b[ip + j * b_dim1] = temp;
+/* L90: */
+ }
+ if (j < *nrhs) {
+ ++j;
+ goto L70;
+ }
+
+ } else {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Solve U'*x = b. */
+
+ b[j * b_dim1 + 1] /= d__[1];
+ if (*n > 1) {
+ b[j * b_dim1 + 2] = (b[j * b_dim1 + 2] - du[1] * b[j *
+ b_dim1 + 1]) / d__[2];
+ }
+ i__2 = *n;
+ for (i__ = 3; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = (b[i__ + j * b_dim1] - du[i__ - 1] *
+ b[i__ - 1 + j * b_dim1] - du2[i__ - 2] * b[i__ -
+ 2 + j * b_dim1]) / d__[i__];
+/* L100: */
+ }
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ if (ipiv[i__] == i__) {
+ b[i__ + j * b_dim1] -= dl[i__] * b[i__ + 1 + j *
+ b_dim1];
+ } else {
+ temp = b[i__ + 1 + j * b_dim1];
+ b[i__ + 1 + j * b_dim1] = b[i__ + j * b_dim1] - dl[
+ i__] * temp;
+ b[i__ + j * b_dim1] = temp;
+ }
+/* L110: */
+ }
+/* L120: */
+ }
+ }
+ }
+
+/* End of SGTTS2 */
+
+ return 0;
+} /* sgtts2_ */
diff --git a/contrib/libs/clapack/shgeqz.c b/contrib/libs/clapack/shgeqz.c
new file mode 100644
index 0000000000..32951fb4c7
--- /dev/null
+++ b/contrib/libs/clapack/shgeqz.c
@@ -0,0 +1,1494 @@
+/* shgeqz.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b12 = 0.f;
+static real c_b13 = 1.f;
+static integer c__1 = 1;
+static integer c__3 = 3;
+
+/* Subroutine */ int shgeqz_(char *job, char *compq, char *compz, integer *n,
+ integer *ilo, integer *ihi, real *h__, integer *ldh, real *t, integer
+ *ldt, real *alphar, real *alphai, real *beta, real *q, integer *ldq,
+ real *z__, integer *ldz, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, q_dim1, q_offset, t_dim1, t_offset, z_dim1,
+ z_offset, i__1, i__2, i__3, i__4;
+ real r__1, r__2, r__3, r__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real c__;
+ integer j;
+ real s, v[3], s1, s2, t1, u1, u2, a11, a12, a21, a22, b11, b22, c12, c21;
+ integer jc;
+ real an, bn, cl, cq, cr;
+ integer in;
+ real u12, w11, w12, w21;
+ integer jr;
+ real cz, w22, sl, wi, sr, vs, wr, b1a, b2a, a1i, a2i, b1i, b2i, a1r, a2r,
+ b1r, b2r, wr2, ad11, ad12, ad21, ad22, c11i, c22i;
+ integer jch;
+ real c11r, c22r;
+ logical ilq;
+ real u12l, tau, sqi;
+ logical ilz;
+ real ulp, sqr, szi, szr, ad11l, ad12l, ad21l, ad22l, ad32l, wabs, atol,
+ btol, temp;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *), slag2_(real *, integer *, real *,
+ integer *, real *, real *, real *, real *, real *, real *);
+ real temp2, s1inv, scale;
+ extern logical lsame_(char *, char *);
+ integer iiter, ilast, jiter;
+ real anorm, bnorm;
+ integer maxit;
+ real tempi, tempr;
+ logical ilazr2;
+ extern doublereal slapy2_(real *, real *), slapy3_(real *, real *, real *)
+ ;
+ extern /* Subroutine */ int slasv2_(real *, real *, real *, real *, real *
+, real *, real *, real *, real *);
+ real ascale, bscale;
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int slarfg_(integer *, real *, real *, integer *,
+ real *);
+ real safmax;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real eshift;
+ logical ilschr;
+ integer icompq, ilastm;
+ extern doublereal slanhs_(char *, integer *, real *, integer *, real *);
+ extern /* Subroutine */ int slartg_(real *, real *, real *, real *, real *
+);
+ integer ischur;
+ extern /* Subroutine */ int slaset_(char *, integer *, integer *, real *,
+ real *, real *, integer *);
+ logical ilazro;
+ integer icompz, ifirst, ifrstm, istart;
+ logical ilpivt, lquery;
+
+
+/* -- LAPACK routine (version 3.2.1) -- */
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+/* -- April 2009 -- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SHGEQZ computes the eigenvalues of a real matrix pair (H,T), */
+/* where H is an upper Hessenberg matrix and T is upper triangular, */
+/* using the double-shift QZ method. */
+/* Matrix pairs of this type are produced by the reduction to */
+/* generalized upper Hessenberg form of a real matrix pair (A,B): */
+
+/* A = Q1*H*Z1**T, B = Q1*T*Z1**T, */
+
+/* as computed by SGGHRD. */
+
+/* If JOB='S', then the Hessenberg-triangular pair (H,T) is */
+/* also reduced to generalized Schur form, */
+
+/* H = Q*S*Z**T, T = Q*P*Z**T, */
+
+/* where Q and Z are orthogonal matrices, P is an upper triangular */
+/* matrix, and S is a quasi-triangular matrix with 1-by-1 and 2-by-2 */
+/* diagonal blocks. */
+
+/* The 1-by-1 blocks correspond to real eigenvalues of the matrix pair */
+/* (H,T) and the 2-by-2 blocks correspond to complex conjugate pairs of */
+/* eigenvalues. */
+
+/* Additionally, the 2-by-2 upper triangular diagonal blocks of P */
+/* corresponding to 2-by-2 blocks of S are reduced to positive diagonal */
+/* form, i.e., if S(j+1,j) is non-zero, then P(j+1,j) = P(j,j+1) = 0, */
+/* P(j,j) > 0, and P(j+1,j+1) > 0. */
+
+/* Optionally, the orthogonal matrix Q from the generalized Schur */
+/* factorization may be postmultiplied into an input matrix Q1, and the */
+/* orthogonal matrix Z may be postmultiplied into an input matrix Z1. */
+/* If Q1 and Z1 are the orthogonal matrices from SGGHRD that reduced */
+/* the matrix pair (A,B) to generalized upper Hessenberg form, then the */
+/* output matrices Q1*Q and Z1*Z are the orthogonal factors from the */
+/* generalized Schur factorization of (A,B): */
+
+/* A = (Q1*Q)*S*(Z1*Z)**T, B = (Q1*Q)*P*(Z1*Z)**T. */
+
+/* To avoid overflow, eigenvalues of the matrix pair (H,T) (equivalently, */
+/* of (A,B)) are computed as a pair of values (alpha,beta), where alpha is */
+/* complex and beta real. */
+/* If beta is nonzero, lambda = alpha / beta is an eigenvalue of the */
+/* generalized nonsymmetric eigenvalue problem (GNEP) */
+/* A*x = lambda*B*x */
+/* and if alpha is nonzero, mu = beta / alpha is an eigenvalue of the */
+/* alternate form of the GNEP */
+/* mu*A*y = B*y. */
+/* Real eigenvalues can be read directly from the generalized Schur */
+/* form: */
+/* alpha = S(i,i), beta = P(i,i). */
+
+/* Ref: C.B. Moler & G.W. Stewart, "An Algorithm for Generalized Matrix */
+/* Eigenvalue Problems", SIAM J. Numer. Anal., 10(1973), */
+/* pp. 241--256. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* = 'E': Compute eigenvalues only; */
+/* = 'S': Compute eigenvalues and the Schur form. */
+
+/* COMPQ (input) CHARACTER*1 */
+/* = 'N': Left Schur vectors (Q) are not computed; */
+/* = 'I': Q is initialized to the unit matrix and the matrix Q */
+/* of left Schur vectors of (H,T) is returned; */
+/* = 'V': Q must contain an orthogonal matrix Q1 on entry and */
+/* the product Q1*Q is returned. */
+
+/* COMPZ (input) CHARACTER*1 */
+/* = 'N': Right Schur vectors (Z) are not computed; */
+/* = 'I': Z is initialized to the unit matrix and the matrix Z */
+/* of right Schur vectors of (H,T) is returned; */
+/* = 'V': Z must contain an orthogonal matrix Z1 on entry and */
+/* the product Z1*Z is returned. */
+
+/* N (input) INTEGER */
+/* The order of the matrices H, T, Q, and Z. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* ILO and IHI mark the rows and columns of H which are in */
+/* Hessenberg form. It is assumed that A is already upper */
+/* triangular in rows and columns 1:ILO-1 and IHI+1:N. */
+/* If N > 0, 1 <= ILO <= IHI <= N; if N = 0, ILO=1 and IHI=0. */
+
+/* H (input/output) REAL array, dimension (LDH, N) */
+/* On entry, the N-by-N upper Hessenberg matrix H. */
+/* On exit, if JOB = 'S', H contains the upper quasi-triangular */
+/* matrix S from the generalized Schur factorization; */
+/* 2-by-2 diagonal blocks (corresponding to complex conjugate */
+/* pairs of eigenvalues) are returned in standard form, with */
+/* H(i,i) = H(i+1,i+1) and H(i+1,i)*H(i,i+1) < 0. */
+/* If JOB = 'E', the diagonal blocks of H match those of S, but */
+/* the rest of H is unspecified. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH >= max( 1, N ). */
+
+/* T (input/output) REAL array, dimension (LDT, N) */
+/* On entry, the N-by-N upper triangular matrix T. */
+/* On exit, if JOB = 'S', T contains the upper triangular */
+/* matrix P from the generalized Schur factorization; */
+/* 2-by-2 diagonal blocks of P corresponding to 2-by-2 blocks of S */
+/* are reduced to positive diagonal form, i.e., if H(j+1,j) is */
+/* non-zero, then T(j+1,j) = T(j,j+1) = 0, T(j,j) > 0, and */
+/* T(j+1,j+1) > 0. */
+/* If JOB = 'E', the diagonal blocks of T match those of P, but */
+/* the rest of T is unspecified. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= max( 1, N ). */
+
+/* ALPHAR (output) REAL array, dimension (N) */
+/* The real parts of each scalar alpha defining an eigenvalue */
+/* of GNEP. */
+
+/* ALPHAI (output) REAL array, dimension (N) */
+/* The imaginary parts of each scalar alpha defining an */
+/* eigenvalue of GNEP. */
+/* If ALPHAI(j) is zero, then the j-th eigenvalue is real; if */
+/* positive, then the j-th and (j+1)-st eigenvalues are a */
+/* complex conjugate pair, with ALPHAI(j+1) = -ALPHAI(j). */
+
+/* BETA (output) REAL array, dimension (N) */
+/* The scalars beta that define the eigenvalues of GNEP. */
+/* Together, the quantities alpha = (ALPHAR(j),ALPHAI(j)) and */
+/* beta = BETA(j) represent the j-th eigenvalue of the matrix */
+/* pair (A,B), in one of the forms lambda = alpha/beta or */
+/* mu = beta/alpha. Since either lambda or mu may overflow, */
+/* they should not, in general, be computed. */
+
+/* Q (input/output) REAL array, dimension (LDQ, N) */
+/* On entry, if COMPZ = 'V', the orthogonal matrix Q1 used in */
+/* the reduction of (A,B) to generalized Hessenberg form. */
+/* On exit, if COMPZ = 'I', the orthogonal matrix of left Schur */
+/* vectors of (H,T), and if COMPZ = 'V', the orthogonal matrix */
+/* of left Schur vectors of (A,B). */
+/* Not referenced if COMPZ = 'N'. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= 1. */
+/* If COMPQ='V' or 'I', then LDQ >= N. */
+
+/* Z (input/output) REAL array, dimension (LDZ, N) */
+/* On entry, if COMPZ = 'V', the orthogonal matrix Z1 used in */
+/* the reduction of (A,B) to generalized Hessenberg form. */
+/* On exit, if COMPZ = 'I', the orthogonal matrix of */
+/* right Schur vectors of (H,T), and if COMPZ = 'V', the */
+/* orthogonal matrix of right Schur vectors of (A,B). */
+/* Not referenced if COMPZ = 'N'. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1. */
+/* If COMPZ='V' or 'I', then LDZ >= N. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO >= 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* = 1,...,N: the QZ iteration did not converge. (H,T) is not */
+/* in Schur form, but ALPHAR(i), ALPHAI(i), and */
+/* BETA(i), i=INFO+1,...,N should be correct. */
+/* = N+1,...,2*N: the shift calculation failed. (H,T) is not */
+/* in Schur form, but ALPHAR(i), ALPHAI(i), and */
+/* BETA(i), i=INFO-N+1,...,N should be correct. */
+
+/* Further Details */
+/* =============== */
+
+/* Iteration counters: */
+
+/* JITER -- counts iterations. */
+/* IITER -- counts iterations run since ILAST was last */
+/* changed. This is therefore reset only when a 1-by-1 or */
+/* 2-by-2 block deflates off the bottom. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* $ SAFETY = 1.0E+0 ) */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode JOB, COMPQ, COMPZ */
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ --alphar;
+ --alphai;
+ --beta;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ if (lsame_(job, "E")) {
+ ilschr = FALSE_;
+ ischur = 1;
+ } else if (lsame_(job, "S")) {
+ ilschr = TRUE_;
+ ischur = 2;
+ } else {
+ ischur = 0;
+ }
+
+ if (lsame_(compq, "N")) {
+ ilq = FALSE_;
+ icompq = 1;
+ } else if (lsame_(compq, "V")) {
+ ilq = TRUE_;
+ icompq = 2;
+ } else if (lsame_(compq, "I")) {
+ ilq = TRUE_;
+ icompq = 3;
+ } else {
+ icompq = 0;
+ }
+
+ if (lsame_(compz, "N")) {
+ ilz = FALSE_;
+ icompz = 1;
+ } else if (lsame_(compz, "V")) {
+ ilz = TRUE_;
+ icompz = 2;
+ } else if (lsame_(compz, "I")) {
+ ilz = TRUE_;
+ icompz = 3;
+ } else {
+ icompz = 0;
+ }
+
+/* Check Argument Values */
+
+ *info = 0;
+ work[1] = (real) max(1,*n);
+ lquery = *lwork == -1;
+ if (ischur == 0) {
+ *info = -1;
+ } else if (icompq == 0) {
+ *info = -2;
+ } else if (icompz == 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ilo < 1) {
+ *info = -5;
+ } else if (*ihi > *n || *ihi < *ilo - 1) {
+ *info = -6;
+ } else if (*ldh < *n) {
+ *info = -8;
+ } else if (*ldt < *n) {
+ *info = -10;
+ } else if (*ldq < 1 || ilq && *ldq < *n) {
+ *info = -15;
+ } else if (*ldz < 1 || ilz && *ldz < *n) {
+ *info = -17;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -19;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SHGEQZ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+/* Initialize Q and Z */
+
+ if (icompq == 3) {
+ slaset_("Full", n, n, &c_b12, &c_b13, &q[q_offset], ldq);
+ }
+ if (icompz == 3) {
+ slaset_("Full", n, n, &c_b12, &c_b13, &z__[z_offset], ldz);
+ }
+
+/* Machine Constants */
+
+ in = *ihi + 1 - *ilo;
+ safmin = slamch_("S");
+ safmax = 1.f / safmin;
+ ulp = slamch_("E") * slamch_("B");
+ anorm = slanhs_("F", &in, &h__[*ilo + *ilo * h_dim1], ldh, &work[1]);
+ bnorm = slanhs_("F", &in, &t[*ilo + *ilo * t_dim1], ldt, &work[1]);
+/* Computing MAX */
+ r__1 = safmin, r__2 = ulp * anorm;
+ atol = dmax(r__1,r__2);
+/* Computing MAX */
+ r__1 = safmin, r__2 = ulp * bnorm;
+ btol = dmax(r__1,r__2);
+ ascale = 1.f / dmax(safmin,anorm);
+ bscale = 1.f / dmax(safmin,bnorm);
+
+/* Set Eigenvalues IHI+1:N */
+
+ i__1 = *n;
+ for (j = *ihi + 1; j <= i__1; ++j) {
+ if (t[j + j * t_dim1] < 0.f) {
+ if (ilschr) {
+ i__2 = j;
+ for (jr = 1; jr <= i__2; ++jr) {
+ h__[jr + j * h_dim1] = -h__[jr + j * h_dim1];
+ t[jr + j * t_dim1] = -t[jr + j * t_dim1];
+/* L10: */
+ }
+ } else {
+ h__[j + j * h_dim1] = -h__[j + j * h_dim1];
+ t[j + j * t_dim1] = -t[j + j * t_dim1];
+ }
+ if (ilz) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ z__[jr + j * z_dim1] = -z__[jr + j * z_dim1];
+/* L20: */
+ }
+ }
+ }
+ alphar[j] = h__[j + j * h_dim1];
+ alphai[j] = 0.f;
+ beta[j] = t[j + j * t_dim1];
+/* L30: */
+ }
+
+/* If IHI < ILO, skip QZ steps */
+
+ if (*ihi < *ilo) {
+ goto L380;
+ }
+
+/* MAIN QZ ITERATION LOOP */
+
+/* Initialize dynamic indices */
+
+/* Eigenvalues ILAST+1:N have been found. */
+/* Column operations modify rows IFRSTM:whatever. */
+/* Row operations modify columns whatever:ILASTM. */
+
+/* If only eigenvalues are being computed, then */
+/* IFRSTM is the row of the last splitting row above row ILAST; */
+/* this is always at least ILO. */
+/* IITER counts iterations since the last eigenvalue was found, */
+/* to tell when to use an extraordinary shift. */
+/* MAXIT is the maximum number of QZ sweeps allowed. */
+
+ ilast = *ihi;
+ if (ilschr) {
+ ifrstm = 1;
+ ilastm = *n;
+ } else {
+ ifrstm = *ilo;
+ ilastm = *ihi;
+ }
+ iiter = 0;
+ eshift = 0.f;
+ maxit = (*ihi - *ilo + 1) * 30;
+
+ i__1 = maxit;
+ for (jiter = 1; jiter <= i__1; ++jiter) {
+
+/* Split the matrix if possible. */
+
+/* Two tests: */
+/* 1: H(j,j-1)=0 or j=ILO */
+/* 2: T(j,j)=0 */
+
+ if (ilast == *ilo) {
+
+/* Special case: j=ILAST */
+
+ goto L80;
+ } else {
+ if ((r__1 = h__[ilast + (ilast - 1) * h_dim1], dabs(r__1)) <=
+ atol) {
+ h__[ilast + (ilast - 1) * h_dim1] = 0.f;
+ goto L80;
+ }
+ }
+
+ if ((r__1 = t[ilast + ilast * t_dim1], dabs(r__1)) <= btol) {
+ t[ilast + ilast * t_dim1] = 0.f;
+ goto L70;
+ }
+
+/* General case: j<ILAST */
+
+ i__2 = *ilo;
+ for (j = ilast - 1; j >= i__2; --j) {
+
+/* Test 1: for H(j,j-1)=0 or j=ILO */
+
+ if (j == *ilo) {
+ ilazro = TRUE_;
+ } else {
+ if ((r__1 = h__[j + (j - 1) * h_dim1], dabs(r__1)) <= atol) {
+ h__[j + (j - 1) * h_dim1] = 0.f;
+ ilazro = TRUE_;
+ } else {
+ ilazro = FALSE_;
+ }
+ }
+
+/* Test 2: for T(j,j)=0 */
+
+ if ((r__1 = t[j + j * t_dim1], dabs(r__1)) < btol) {
+ t[j + j * t_dim1] = 0.f;
+
+/* Test 1a: Check for 2 consecutive small subdiagonals in A */
+
+ ilazr2 = FALSE_;
+ if (! ilazro) {
+ temp = (r__1 = h__[j + (j - 1) * h_dim1], dabs(r__1));
+ temp2 = (r__1 = h__[j + j * h_dim1], dabs(r__1));
+ tempr = dmax(temp,temp2);
+ if (tempr < 1.f && tempr != 0.f) {
+ temp /= tempr;
+ temp2 /= tempr;
+ }
+ if (temp * (ascale * (r__1 = h__[j + 1 + j * h_dim1],
+ dabs(r__1))) <= temp2 * (ascale * atol)) {
+ ilazr2 = TRUE_;
+ }
+ }
+
+/* If both tests pass (1 & 2), i.e., the leading diagonal */
+/* element of B in the block is zero, split a 1x1 block off */
+/* at the top. (I.e., at the J-th row/column) The leading */
+/* diagonal element of the remainder can also be zero, so */
+/* this may have to be done repeatedly. */
+
+ if (ilazro || ilazr2) {
+ i__3 = ilast - 1;
+ for (jch = j; jch <= i__3; ++jch) {
+ temp = h__[jch + jch * h_dim1];
+ slartg_(&temp, &h__[jch + 1 + jch * h_dim1], &c__, &s,
+ &h__[jch + jch * h_dim1]);
+ h__[jch + 1 + jch * h_dim1] = 0.f;
+ i__4 = ilastm - jch;
+ srot_(&i__4, &h__[jch + (jch + 1) * h_dim1], ldh, &
+ h__[jch + 1 + (jch + 1) * h_dim1], ldh, &c__,
+ &s);
+ i__4 = ilastm - jch;
+ srot_(&i__4, &t[jch + (jch + 1) * t_dim1], ldt, &t[
+ jch + 1 + (jch + 1) * t_dim1], ldt, &c__, &s);
+ if (ilq) {
+ srot_(n, &q[jch * q_dim1 + 1], &c__1, &q[(jch + 1)
+ * q_dim1 + 1], &c__1, &c__, &s);
+ }
+ if (ilazr2) {
+ h__[jch + (jch - 1) * h_dim1] *= c__;
+ }
+ ilazr2 = FALSE_;
+ if ((r__1 = t[jch + 1 + (jch + 1) * t_dim1], dabs(
+ r__1)) >= btol) {
+ if (jch + 1 >= ilast) {
+ goto L80;
+ } else {
+ ifirst = jch + 1;
+ goto L110;
+ }
+ }
+ t[jch + 1 + (jch + 1) * t_dim1] = 0.f;
+/* L40: */
+ }
+ goto L70;
+ } else {
+
+/* Only test 2 passed -- chase the zero to T(ILAST,ILAST) */
+/* Then process as in the case T(ILAST,ILAST)=0 */
+
+ i__3 = ilast - 1;
+ for (jch = j; jch <= i__3; ++jch) {
+ temp = t[jch + (jch + 1) * t_dim1];
+ slartg_(&temp, &t[jch + 1 + (jch + 1) * t_dim1], &c__,
+ &s, &t[jch + (jch + 1) * t_dim1]);
+ t[jch + 1 + (jch + 1) * t_dim1] = 0.f;
+ if (jch < ilastm - 1) {
+ i__4 = ilastm - jch - 1;
+ srot_(&i__4, &t[jch + (jch + 2) * t_dim1], ldt, &
+ t[jch + 1 + (jch + 2) * t_dim1], ldt, &
+ c__, &s);
+ }
+ i__4 = ilastm - jch + 2;
+ srot_(&i__4, &h__[jch + (jch - 1) * h_dim1], ldh, &
+ h__[jch + 1 + (jch - 1) * h_dim1], ldh, &c__,
+ &s);
+ if (ilq) {
+ srot_(n, &q[jch * q_dim1 + 1], &c__1, &q[(jch + 1)
+ * q_dim1 + 1], &c__1, &c__, &s);
+ }
+ temp = h__[jch + 1 + jch * h_dim1];
+ slartg_(&temp, &h__[jch + 1 + (jch - 1) * h_dim1], &
+ c__, &s, &h__[jch + 1 + jch * h_dim1]);
+ h__[jch + 1 + (jch - 1) * h_dim1] = 0.f;
+ i__4 = jch + 1 - ifrstm;
+ srot_(&i__4, &h__[ifrstm + jch * h_dim1], &c__1, &h__[
+ ifrstm + (jch - 1) * h_dim1], &c__1, &c__, &s)
+ ;
+ i__4 = jch - ifrstm;
+ srot_(&i__4, &t[ifrstm + jch * t_dim1], &c__1, &t[
+ ifrstm + (jch - 1) * t_dim1], &c__1, &c__, &s)
+ ;
+ if (ilz) {
+ srot_(n, &z__[jch * z_dim1 + 1], &c__1, &z__[(jch
+ - 1) * z_dim1 + 1], &c__1, &c__, &s);
+ }
+/* L50: */
+ }
+ goto L70;
+ }
+ } else if (ilazro) {
+
+/* Only test 1 passed -- work on J:ILAST */
+
+ ifirst = j;
+ goto L110;
+ }
+
+/* Neither test passed -- try next J */
+
+/* L60: */
+ }
+
+/* (Drop-through is "impossible") */
+
+ *info = *n + 1;
+ goto L420;
+
+/* T(ILAST,ILAST)=0 -- clear H(ILAST,ILAST-1) to split off a */
+/* 1x1 block. */
+
+L70:
+ temp = h__[ilast + ilast * h_dim1];
+ slartg_(&temp, &h__[ilast + (ilast - 1) * h_dim1], &c__, &s, &h__[
+ ilast + ilast * h_dim1]);
+ h__[ilast + (ilast - 1) * h_dim1] = 0.f;
+ i__2 = ilast - ifrstm;
+ srot_(&i__2, &h__[ifrstm + ilast * h_dim1], &c__1, &h__[ifrstm + (
+ ilast - 1) * h_dim1], &c__1, &c__, &s);
+ i__2 = ilast - ifrstm;
+ srot_(&i__2, &t[ifrstm + ilast * t_dim1], &c__1, &t[ifrstm + (ilast -
+ 1) * t_dim1], &c__1, &c__, &s);
+ if (ilz) {
+ srot_(n, &z__[ilast * z_dim1 + 1], &c__1, &z__[(ilast - 1) *
+ z_dim1 + 1], &c__1, &c__, &s);
+ }
+
+/* H(ILAST,ILAST-1)=0 -- Standardize B, set ALPHAR, ALPHAI, */
+/* and BETA */
+
+L80:
+ if (t[ilast + ilast * t_dim1] < 0.f) {
+ if (ilschr) {
+ i__2 = ilast;
+ for (j = ifrstm; j <= i__2; ++j) {
+ h__[j + ilast * h_dim1] = -h__[j + ilast * h_dim1];
+ t[j + ilast * t_dim1] = -t[j + ilast * t_dim1];
+/* L90: */
+ }
+ } else {
+ h__[ilast + ilast * h_dim1] = -h__[ilast + ilast * h_dim1];
+ t[ilast + ilast * t_dim1] = -t[ilast + ilast * t_dim1];
+ }
+ if (ilz) {
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ z__[j + ilast * z_dim1] = -z__[j + ilast * z_dim1];
+/* L100: */
+ }
+ }
+ }
+ alphar[ilast] = h__[ilast + ilast * h_dim1];
+ alphai[ilast] = 0.f;
+ beta[ilast] = t[ilast + ilast * t_dim1];
+
+/* Go to next block -- exit if finished. */
+
+ --ilast;
+ if (ilast < *ilo) {
+ goto L380;
+ }
+
+/* Reset counters */
+
+ iiter = 0;
+ eshift = 0.f;
+ if (! ilschr) {
+ ilastm = ilast;
+ if (ifrstm > ilast) {
+ ifrstm = *ilo;
+ }
+ }
+ goto L350;
+
+/* QZ step */
+
+/* This iteration only involves rows/columns IFIRST:ILAST. We */
+/* assume IFIRST < ILAST, and that the diagonal of B is non-zero. */
+
+L110:
+ ++iiter;
+ if (! ilschr) {
+ ifrstm = ifirst;
+ }
+
+/* Compute single shifts. */
+
+/* At this point, IFIRST < ILAST, and the diagonal elements of */
+/* T(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in */
+/* magnitude) */
+
+ if (iiter / 10 * 10 == iiter) {
+
+/* Exceptional shift. Chosen for no particularly good reason. */
+/* (Single shift only.) */
+
+ if ((real) maxit * safmin * (r__1 = h__[ilast - 1 + ilast *
+ h_dim1], dabs(r__1)) < (r__2 = t[ilast - 1 + (ilast - 1) *
+ t_dim1], dabs(r__2))) {
+ eshift += h__[ilast - 1 + ilast * h_dim1] / t[ilast - 1 + (
+ ilast - 1) * t_dim1];
+ } else {
+ eshift += 1.f / (safmin * (real) maxit);
+ }
+ s1 = 1.f;
+ wr = eshift;
+
+ } else {
+
+/* Shifts based on the generalized eigenvalues of the */
+/* bottom-right 2x2 block of A and B. The first eigenvalue */
+/* returned by SLAG2 is the Wilkinson shift (AEP p.512), */
+
+ r__1 = safmin * 100.f;
+ slag2_(&h__[ilast - 1 + (ilast - 1) * h_dim1], ldh, &t[ilast - 1
+ + (ilast - 1) * t_dim1], ldt, &r__1, &s1, &s2, &wr, &wr2,
+ &wi);
+
+/* Computing MAX */
+/* Computing MAX */
+ r__3 = 1.f, r__4 = dabs(wr), r__3 = max(r__3,r__4), r__4 = dabs(
+ wi);
+ r__1 = s1, r__2 = safmin * dmax(r__3,r__4);
+ temp = dmax(r__1,r__2);
+ if (wi != 0.f) {
+ goto L200;
+ }
+ }
+
+/* Fiddle with shift to avoid overflow */
+
+ temp = dmin(ascale,1.f) * (safmax * .5f);
+ if (s1 > temp) {
+ scale = temp / s1;
+ } else {
+ scale = 1.f;
+ }
+
+ temp = dmin(bscale,1.f) * (safmax * .5f);
+ if (dabs(wr) > temp) {
+/* Computing MIN */
+ r__1 = scale, r__2 = temp / dabs(wr);
+ scale = dmin(r__1,r__2);
+ }
+ s1 = scale * s1;
+ wr = scale * wr;
+
+/* Now check for two consecutive small subdiagonals. */
+
+ i__2 = ifirst + 1;
+ for (j = ilast - 1; j >= i__2; --j) {
+ istart = j;
+ temp = (r__1 = s1 * h__[j + (j - 1) * h_dim1], dabs(r__1));
+ temp2 = (r__1 = s1 * h__[j + j * h_dim1] - wr * t[j + j * t_dim1],
+ dabs(r__1));
+ tempr = dmax(temp,temp2);
+ if (tempr < 1.f && tempr != 0.f) {
+ temp /= tempr;
+ temp2 /= tempr;
+ }
+ if ((r__1 = ascale * h__[j + 1 + j * h_dim1] * temp, dabs(r__1))
+ <= ascale * atol * temp2) {
+ goto L130;
+ }
+/* L120: */
+ }
+
+ istart = ifirst;
+L130:
+
+/* Do an implicit single-shift QZ sweep. */
+
+/* Initial Q */
+
+ temp = s1 * h__[istart + istart * h_dim1] - wr * t[istart + istart *
+ t_dim1];
+ temp2 = s1 * h__[istart + 1 + istart * h_dim1];
+ slartg_(&temp, &temp2, &c__, &s, &tempr);
+
+/* Sweep */
+
+ i__2 = ilast - 1;
+ for (j = istart; j <= i__2; ++j) {
+ if (j > istart) {
+ temp = h__[j + (j - 1) * h_dim1];
+ slartg_(&temp, &h__[j + 1 + (j - 1) * h_dim1], &c__, &s, &h__[
+ j + (j - 1) * h_dim1]);
+ h__[j + 1 + (j - 1) * h_dim1] = 0.f;
+ }
+
+ i__3 = ilastm;
+ for (jc = j; jc <= i__3; ++jc) {
+ temp = c__ * h__[j + jc * h_dim1] + s * h__[j + 1 + jc *
+ h_dim1];
+ h__[j + 1 + jc * h_dim1] = -s * h__[j + jc * h_dim1] + c__ *
+ h__[j + 1 + jc * h_dim1];
+ h__[j + jc * h_dim1] = temp;
+ temp2 = c__ * t[j + jc * t_dim1] + s * t[j + 1 + jc * t_dim1];
+ t[j + 1 + jc * t_dim1] = -s * t[j + jc * t_dim1] + c__ * t[j
+ + 1 + jc * t_dim1];
+ t[j + jc * t_dim1] = temp2;
+/* L140: */
+ }
+ if (ilq) {
+ i__3 = *n;
+ for (jr = 1; jr <= i__3; ++jr) {
+ temp = c__ * q[jr + j * q_dim1] + s * q[jr + (j + 1) *
+ q_dim1];
+ q[jr + (j + 1) * q_dim1] = -s * q[jr + j * q_dim1] + c__ *
+ q[jr + (j + 1) * q_dim1];
+ q[jr + j * q_dim1] = temp;
+/* L150: */
+ }
+ }
+
+ temp = t[j + 1 + (j + 1) * t_dim1];
+ slartg_(&temp, &t[j + 1 + j * t_dim1], &c__, &s, &t[j + 1 + (j +
+ 1) * t_dim1]);
+ t[j + 1 + j * t_dim1] = 0.f;
+
+/* Computing MIN */
+ i__4 = j + 2;
+ i__3 = min(i__4,ilast);
+ for (jr = ifrstm; jr <= i__3; ++jr) {
+ temp = c__ * h__[jr + (j + 1) * h_dim1] + s * h__[jr + j *
+ h_dim1];
+ h__[jr + j * h_dim1] = -s * h__[jr + (j + 1) * h_dim1] + c__ *
+ h__[jr + j * h_dim1];
+ h__[jr + (j + 1) * h_dim1] = temp;
+/* L160: */
+ }
+ i__3 = j;
+ for (jr = ifrstm; jr <= i__3; ++jr) {
+ temp = c__ * t[jr + (j + 1) * t_dim1] + s * t[jr + j * t_dim1]
+ ;
+ t[jr + j * t_dim1] = -s * t[jr + (j + 1) * t_dim1] + c__ * t[
+ jr + j * t_dim1];
+ t[jr + (j + 1) * t_dim1] = temp;
+/* L170: */
+ }
+ if (ilz) {
+ i__3 = *n;
+ for (jr = 1; jr <= i__3; ++jr) {
+ temp = c__ * z__[jr + (j + 1) * z_dim1] + s * z__[jr + j *
+ z_dim1];
+ z__[jr + j * z_dim1] = -s * z__[jr + (j + 1) * z_dim1] +
+ c__ * z__[jr + j * z_dim1];
+ z__[jr + (j + 1) * z_dim1] = temp;
+/* L180: */
+ }
+ }
+/* L190: */
+ }
+
+ goto L350;
+
+/* Use Francis double-shift */
+
+/* Note: the Francis double-shift should work with real shifts, */
+/* but only if the block is at least 3x3. */
+/* This code may break if this point is reached with */
+/* a 2x2 block with real eigenvalues. */
+
+L200:
+ if (ifirst + 1 == ilast) {
+
+/* Special case -- 2x2 block with complex eigenvectors */
+
+/* Step 1: Standardize, that is, rotate so that */
+
+/* ( B11 0 ) */
+/* B = ( ) with B11 non-negative. */
+/* ( 0 B22 ) */
+
+ slasv2_(&t[ilast - 1 + (ilast - 1) * t_dim1], &t[ilast - 1 +
+ ilast * t_dim1], &t[ilast + ilast * t_dim1], &b22, &b11, &
+ sr, &cr, &sl, &cl);
+
+ if (b11 < 0.f) {
+ cr = -cr;
+ sr = -sr;
+ b11 = -b11;
+ b22 = -b22;
+ }
+
+ i__2 = ilastm + 1 - ifirst;
+ srot_(&i__2, &h__[ilast - 1 + (ilast - 1) * h_dim1], ldh, &h__[
+ ilast + (ilast - 1) * h_dim1], ldh, &cl, &sl);
+ i__2 = ilast + 1 - ifrstm;
+ srot_(&i__2, &h__[ifrstm + (ilast - 1) * h_dim1], &c__1, &h__[
+ ifrstm + ilast * h_dim1], &c__1, &cr, &sr);
+
+ if (ilast < ilastm) {
+ i__2 = ilastm - ilast;
+ srot_(&i__2, &t[ilast - 1 + (ilast + 1) * t_dim1], ldt, &t[
+ ilast + (ilast + 1) * t_dim1], ldt, &cl, &sl);
+ }
+ if (ifrstm < ilast - 1) {
+ i__2 = ifirst - ifrstm;
+ srot_(&i__2, &t[ifrstm + (ilast - 1) * t_dim1], &c__1, &t[
+ ifrstm + ilast * t_dim1], &c__1, &cr, &sr);
+ }
+
+ if (ilq) {
+ srot_(n, &q[(ilast - 1) * q_dim1 + 1], &c__1, &q[ilast *
+ q_dim1 + 1], &c__1, &cl, &sl);
+ }
+ if (ilz) {
+ srot_(n, &z__[(ilast - 1) * z_dim1 + 1], &c__1, &z__[ilast *
+ z_dim1 + 1], &c__1, &cr, &sr);
+ }
+
+ t[ilast - 1 + (ilast - 1) * t_dim1] = b11;
+ t[ilast - 1 + ilast * t_dim1] = 0.f;
+ t[ilast + (ilast - 1) * t_dim1] = 0.f;
+ t[ilast + ilast * t_dim1] = b22;
+
+/* If B22 is negative, negate column ILAST */
+
+ if (b22 < 0.f) {
+ i__2 = ilast;
+ for (j = ifrstm; j <= i__2; ++j) {
+ h__[j + ilast * h_dim1] = -h__[j + ilast * h_dim1];
+ t[j + ilast * t_dim1] = -t[j + ilast * t_dim1];
+/* L210: */
+ }
+
+ if (ilz) {
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ z__[j + ilast * z_dim1] = -z__[j + ilast * z_dim1];
+/* L220: */
+ }
+ }
+ }
+
+/* Step 2: Compute ALPHAR, ALPHAI, and BETA (see refs.) */
+
+/* Recompute shift */
+
+ r__1 = safmin * 100.f;
+ slag2_(&h__[ilast - 1 + (ilast - 1) * h_dim1], ldh, &t[ilast - 1
+ + (ilast - 1) * t_dim1], ldt, &r__1, &s1, &temp, &wr, &
+ temp2, &wi);
+
+/* If standardization has perturbed the shift onto real line, */
+/* do another (real single-shift) QR step. */
+
+ if (wi == 0.f) {
+ goto L350;
+ }
+ s1inv = 1.f / s1;
+
+/* Do EISPACK (QZVAL) computation of alpha and beta */
+
+ a11 = h__[ilast - 1 + (ilast - 1) * h_dim1];
+ a21 = h__[ilast + (ilast - 1) * h_dim1];
+ a12 = h__[ilast - 1 + ilast * h_dim1];
+ a22 = h__[ilast + ilast * h_dim1];
+
+/* Compute complex Givens rotation on right */
+/* (Assume some element of C = (sA - wB) > unfl ) */
+/* __ */
+/* (sA - wB) ( CZ -SZ ) */
+/* ( SZ CZ ) */
+
+ c11r = s1 * a11 - wr * b11;
+ c11i = -wi * b11;
+ c12 = s1 * a12;
+ c21 = s1 * a21;
+ c22r = s1 * a22 - wr * b22;
+ c22i = -wi * b22;
+
+ if (dabs(c11r) + dabs(c11i) + dabs(c12) > dabs(c21) + dabs(c22r)
+ + dabs(c22i)) {
+ t1 = slapy3_(&c12, &c11r, &c11i);
+ cz = c12 / t1;
+ szr = -c11r / t1;
+ szi = -c11i / t1;
+ } else {
+ cz = slapy2_(&c22r, &c22i);
+ if (cz <= safmin) {
+ cz = 0.f;
+ szr = 1.f;
+ szi = 0.f;
+ } else {
+ tempr = c22r / cz;
+ tempi = c22i / cz;
+ t1 = slapy2_(&cz, &c21);
+ cz /= t1;
+ szr = -c21 * tempr / t1;
+ szi = c21 * tempi / t1;
+ }
+ }
+
+/* Compute Givens rotation on left */
+
+/* ( CQ SQ ) */
+/* ( __ ) A or B */
+/* ( -SQ CQ ) */
+
+ an = dabs(a11) + dabs(a12) + dabs(a21) + dabs(a22);
+ bn = dabs(b11) + dabs(b22);
+ wabs = dabs(wr) + dabs(wi);
+ if (s1 * an > wabs * bn) {
+ cq = cz * b11;
+ sqr = szr * b22;
+ sqi = -szi * b22;
+ } else {
+ a1r = cz * a11 + szr * a12;
+ a1i = szi * a12;
+ a2r = cz * a21 + szr * a22;
+ a2i = szi * a22;
+ cq = slapy2_(&a1r, &a1i);
+ if (cq <= safmin) {
+ cq = 0.f;
+ sqr = 1.f;
+ sqi = 0.f;
+ } else {
+ tempr = a1r / cq;
+ tempi = a1i / cq;
+ sqr = tempr * a2r + tempi * a2i;
+ sqi = tempi * a2r - tempr * a2i;
+ }
+ }
+ t1 = slapy3_(&cq, &sqr, &sqi);
+ cq /= t1;
+ sqr /= t1;
+ sqi /= t1;
+
+/* Compute diagonal elements of QBZ */
+
+ tempr = sqr * szr - sqi * szi;
+ tempi = sqr * szi + sqi * szr;
+ b1r = cq * cz * b11 + tempr * b22;
+ b1i = tempi * b22;
+ b1a = slapy2_(&b1r, &b1i);
+ b2r = cq * cz * b22 + tempr * b11;
+ b2i = -tempi * b11;
+ b2a = slapy2_(&b2r, &b2i);
+
+/* Normalize so beta > 0, and Im( alpha1 ) > 0 */
+
+ beta[ilast - 1] = b1a;
+ beta[ilast] = b2a;
+ alphar[ilast - 1] = wr * b1a * s1inv;
+ alphai[ilast - 1] = wi * b1a * s1inv;
+ alphar[ilast] = wr * b2a * s1inv;
+ alphai[ilast] = -(wi * b2a) * s1inv;
+
+/* Step 3: Go to next block -- exit if finished. */
+
+ ilast = ifirst - 1;
+ if (ilast < *ilo) {
+ goto L380;
+ }
+
+/* Reset counters */
+
+ iiter = 0;
+ eshift = 0.f;
+ if (! ilschr) {
+ ilastm = ilast;
+ if (ifrstm > ilast) {
+ ifrstm = *ilo;
+ }
+ }
+ goto L350;
+ } else {
+
+/* Usual case: 3x3 or larger block, using Francis implicit */
+/* double-shift */
+
+/* 2 */
+/* Eigenvalue equation is w - c w + d = 0, */
+
+/* -1 2 -1 */
+/* so compute 1st column of (A B ) - c A B + d */
+/* using the formula in QZIT (from EISPACK) */
+
+/* We assume that the block is at least 3x3 */
+
+ ad11 = ascale * h__[ilast - 1 + (ilast - 1) * h_dim1] / (bscale *
+ t[ilast - 1 + (ilast - 1) * t_dim1]);
+ ad21 = ascale * h__[ilast + (ilast - 1) * h_dim1] / (bscale * t[
+ ilast - 1 + (ilast - 1) * t_dim1]);
+ ad12 = ascale * h__[ilast - 1 + ilast * h_dim1] / (bscale * t[
+ ilast + ilast * t_dim1]);
+ ad22 = ascale * h__[ilast + ilast * h_dim1] / (bscale * t[ilast +
+ ilast * t_dim1]);
+ u12 = t[ilast - 1 + ilast * t_dim1] / t[ilast + ilast * t_dim1];
+ ad11l = ascale * h__[ifirst + ifirst * h_dim1] / (bscale * t[
+ ifirst + ifirst * t_dim1]);
+ ad21l = ascale * h__[ifirst + 1 + ifirst * h_dim1] / (bscale * t[
+ ifirst + ifirst * t_dim1]);
+ ad12l = ascale * h__[ifirst + (ifirst + 1) * h_dim1] / (bscale *
+ t[ifirst + 1 + (ifirst + 1) * t_dim1]);
+ ad22l = ascale * h__[ifirst + 1 + (ifirst + 1) * h_dim1] / (
+ bscale * t[ifirst + 1 + (ifirst + 1) * t_dim1]);
+ ad32l = ascale * h__[ifirst + 2 + (ifirst + 1) * h_dim1] / (
+ bscale * t[ifirst + 1 + (ifirst + 1) * t_dim1]);
+ u12l = t[ifirst + (ifirst + 1) * t_dim1] / t[ifirst + 1 + (ifirst
+ + 1) * t_dim1];
+
+ v[0] = (ad11 - ad11l) * (ad22 - ad11l) - ad12 * ad21 + ad21 * u12
+ * ad11l + (ad12l - ad11l * u12l) * ad21l;
+ v[1] = (ad22l - ad11l - ad21l * u12l - (ad11 - ad11l) - (ad22 -
+ ad11l) + ad21 * u12) * ad21l;
+ v[2] = ad32l * ad21l;
+
+ istart = ifirst;
+
+ slarfg_(&c__3, v, &v[1], &c__1, &tau);
+ v[0] = 1.f;
+
+/* Sweep */
+
+ i__2 = ilast - 2;
+ for (j = istart; j <= i__2; ++j) {
+
+/* All but last elements: use 3x3 Householder transforms. */
+
+/* Zero (j-1)st column of A */
+
+ if (j > istart) {
+ v[0] = h__[j + (j - 1) * h_dim1];
+ v[1] = h__[j + 1 + (j - 1) * h_dim1];
+ v[2] = h__[j + 2 + (j - 1) * h_dim1];
+
+ slarfg_(&c__3, &h__[j + (j - 1) * h_dim1], &v[1], &c__1, &
+ tau);
+ v[0] = 1.f;
+ h__[j + 1 + (j - 1) * h_dim1] = 0.f;
+ h__[j + 2 + (j - 1) * h_dim1] = 0.f;
+ }
+
+ i__3 = ilastm;
+ for (jc = j; jc <= i__3; ++jc) {
+ temp = tau * (h__[j + jc * h_dim1] + v[1] * h__[j + 1 +
+ jc * h_dim1] + v[2] * h__[j + 2 + jc * h_dim1]);
+ h__[j + jc * h_dim1] -= temp;
+ h__[j + 1 + jc * h_dim1] -= temp * v[1];
+ h__[j + 2 + jc * h_dim1] -= temp * v[2];
+ temp2 = tau * (t[j + jc * t_dim1] + v[1] * t[j + 1 + jc *
+ t_dim1] + v[2] * t[j + 2 + jc * t_dim1]);
+ t[j + jc * t_dim1] -= temp2;
+ t[j + 1 + jc * t_dim1] -= temp2 * v[1];
+ t[j + 2 + jc * t_dim1] -= temp2 * v[2];
+/* L230: */
+ }
+ if (ilq) {
+ i__3 = *n;
+ for (jr = 1; jr <= i__3; ++jr) {
+ temp = tau * (q[jr + j * q_dim1] + v[1] * q[jr + (j +
+ 1) * q_dim1] + v[2] * q[jr + (j + 2) * q_dim1]
+ );
+ q[jr + j * q_dim1] -= temp;
+ q[jr + (j + 1) * q_dim1] -= temp * v[1];
+ q[jr + (j + 2) * q_dim1] -= temp * v[2];
+/* L240: */
+ }
+ }
+
+/* Zero j-th column of B (see SLAGBC for details) */
+
+/* Swap rows to pivot */
+
+ ilpivt = FALSE_;
+/* Computing MAX */
+ r__3 = (r__1 = t[j + 1 + (j + 1) * t_dim1], dabs(r__1)), r__4
+ = (r__2 = t[j + 1 + (j + 2) * t_dim1], dabs(r__2));
+ temp = dmax(r__3,r__4);
+/* Computing MAX */
+ r__3 = (r__1 = t[j + 2 + (j + 1) * t_dim1], dabs(r__1)), r__4
+ = (r__2 = t[j + 2 + (j + 2) * t_dim1], dabs(r__2));
+ temp2 = dmax(r__3,r__4);
+ if (dmax(temp,temp2) < safmin) {
+ scale = 0.f;
+ u1 = 1.f;
+ u2 = 0.f;
+ goto L250;
+ } else if (temp >= temp2) {
+ w11 = t[j + 1 + (j + 1) * t_dim1];
+ w21 = t[j + 2 + (j + 1) * t_dim1];
+ w12 = t[j + 1 + (j + 2) * t_dim1];
+ w22 = t[j + 2 + (j + 2) * t_dim1];
+ u1 = t[j + 1 + j * t_dim1];
+ u2 = t[j + 2 + j * t_dim1];
+ } else {
+ w21 = t[j + 1 + (j + 1) * t_dim1];
+ w11 = t[j + 2 + (j + 1) * t_dim1];
+ w22 = t[j + 1 + (j + 2) * t_dim1];
+ w12 = t[j + 2 + (j + 2) * t_dim1];
+ u2 = t[j + 1 + j * t_dim1];
+ u1 = t[j + 2 + j * t_dim1];
+ }
+
+/* Swap columns if nec. */
+
+ if (dabs(w12) > dabs(w11)) {
+ ilpivt = TRUE_;
+ temp = w12;
+ temp2 = w22;
+ w12 = w11;
+ w22 = w21;
+ w11 = temp;
+ w21 = temp2;
+ }
+
+/* LU-factor */
+
+ temp = w21 / w11;
+ u2 -= temp * u1;
+ w22 -= temp * w12;
+ w21 = 0.f;
+
+/* Compute SCALE */
+
+ scale = 1.f;
+ if (dabs(w22) < safmin) {
+ scale = 0.f;
+ u2 = 1.f;
+ u1 = -w12 / w11;
+ goto L250;
+ }
+ if (dabs(w22) < dabs(u2)) {
+ scale = (r__1 = w22 / u2, dabs(r__1));
+ }
+ if (dabs(w11) < dabs(u1)) {
+/* Computing MIN */
+ r__2 = scale, r__3 = (r__1 = w11 / u1, dabs(r__1));
+ scale = dmin(r__2,r__3);
+ }
+
+/* Solve */
+
+ u2 = scale * u2 / w22;
+ u1 = (scale * u1 - w12 * u2) / w11;
+
+L250:
+ if (ilpivt) {
+ temp = u2;
+ u2 = u1;
+ u1 = temp;
+ }
+
+/* Compute Householder Vector */
+
+/* Computing 2nd power */
+ r__1 = scale;
+/* Computing 2nd power */
+ r__2 = u1;
+/* Computing 2nd power */
+ r__3 = u2;
+ t1 = sqrt(r__1 * r__1 + r__2 * r__2 + r__3 * r__3);
+ tau = scale / t1 + 1.f;
+ vs = -1.f / (scale + t1);
+ v[0] = 1.f;
+ v[1] = vs * u1;
+ v[2] = vs * u2;
+
+/* Apply transformations from the right. */
+
+/* Computing MIN */
+ i__4 = j + 3;
+ i__3 = min(i__4,ilast);
+ for (jr = ifrstm; jr <= i__3; ++jr) {
+ temp = tau * (h__[jr + j * h_dim1] + v[1] * h__[jr + (j +
+ 1) * h_dim1] + v[2] * h__[jr + (j + 2) * h_dim1]);
+ h__[jr + j * h_dim1] -= temp;
+ h__[jr + (j + 1) * h_dim1] -= temp * v[1];
+ h__[jr + (j + 2) * h_dim1] -= temp * v[2];
+/* L260: */
+ }
+ i__3 = j + 2;
+ for (jr = ifrstm; jr <= i__3; ++jr) {
+ temp = tau * (t[jr + j * t_dim1] + v[1] * t[jr + (j + 1) *
+ t_dim1] + v[2] * t[jr + (j + 2) * t_dim1]);
+ t[jr + j * t_dim1] -= temp;
+ t[jr + (j + 1) * t_dim1] -= temp * v[1];
+ t[jr + (j + 2) * t_dim1] -= temp * v[2];
+/* L270: */
+ }
+ if (ilz) {
+ i__3 = *n;
+ for (jr = 1; jr <= i__3; ++jr) {
+ temp = tau * (z__[jr + j * z_dim1] + v[1] * z__[jr + (
+ j + 1) * z_dim1] + v[2] * z__[jr + (j + 2) *
+ z_dim1]);
+ z__[jr + j * z_dim1] -= temp;
+ z__[jr + (j + 1) * z_dim1] -= temp * v[1];
+ z__[jr + (j + 2) * z_dim1] -= temp * v[2];
+/* L280: */
+ }
+ }
+ t[j + 1 + j * t_dim1] = 0.f;
+ t[j + 2 + j * t_dim1] = 0.f;
+/* L290: */
+ }
+
+/* Last elements: Use Givens rotations */
+
+/* Rotations from the left */
+
+ j = ilast - 1;
+ temp = h__[j + (j - 1) * h_dim1];
+ slartg_(&temp, &h__[j + 1 + (j - 1) * h_dim1], &c__, &s, &h__[j +
+ (j - 1) * h_dim1]);
+ h__[j + 1 + (j - 1) * h_dim1] = 0.f;
+
+ i__2 = ilastm;
+ for (jc = j; jc <= i__2; ++jc) {
+ temp = c__ * h__[j + jc * h_dim1] + s * h__[j + 1 + jc *
+ h_dim1];
+ h__[j + 1 + jc * h_dim1] = -s * h__[j + jc * h_dim1] + c__ *
+ h__[j + 1 + jc * h_dim1];
+ h__[j + jc * h_dim1] = temp;
+ temp2 = c__ * t[j + jc * t_dim1] + s * t[j + 1 + jc * t_dim1];
+ t[j + 1 + jc * t_dim1] = -s * t[j + jc * t_dim1] + c__ * t[j
+ + 1 + jc * t_dim1];
+ t[j + jc * t_dim1] = temp2;
+/* L300: */
+ }
+ if (ilq) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ temp = c__ * q[jr + j * q_dim1] + s * q[jr + (j + 1) *
+ q_dim1];
+ q[jr + (j + 1) * q_dim1] = -s * q[jr + j * q_dim1] + c__ *
+ q[jr + (j + 1) * q_dim1];
+ q[jr + j * q_dim1] = temp;
+/* L310: */
+ }
+ }
+
+/* Rotations from the right. */
+
+ temp = t[j + 1 + (j + 1) * t_dim1];
+ slartg_(&temp, &t[j + 1 + j * t_dim1], &c__, &s, &t[j + 1 + (j +
+ 1) * t_dim1]);
+ t[j + 1 + j * t_dim1] = 0.f;
+
+ i__2 = ilast;
+ for (jr = ifrstm; jr <= i__2; ++jr) {
+ temp = c__ * h__[jr + (j + 1) * h_dim1] + s * h__[jr + j *
+ h_dim1];
+ h__[jr + j * h_dim1] = -s * h__[jr + (j + 1) * h_dim1] + c__ *
+ h__[jr + j * h_dim1];
+ h__[jr + (j + 1) * h_dim1] = temp;
+/* L320: */
+ }
+ i__2 = ilast - 1;
+ for (jr = ifrstm; jr <= i__2; ++jr) {
+ temp = c__ * t[jr + (j + 1) * t_dim1] + s * t[jr + j * t_dim1]
+ ;
+ t[jr + j * t_dim1] = -s * t[jr + (j + 1) * t_dim1] + c__ * t[
+ jr + j * t_dim1];
+ t[jr + (j + 1) * t_dim1] = temp;
+/* L330: */
+ }
+ if (ilz) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ temp = c__ * z__[jr + (j + 1) * z_dim1] + s * z__[jr + j *
+ z_dim1];
+ z__[jr + j * z_dim1] = -s * z__[jr + (j + 1) * z_dim1] +
+ c__ * z__[jr + j * z_dim1];
+ z__[jr + (j + 1) * z_dim1] = temp;
+/* L340: */
+ }
+ }
+
+/* End of Double-Shift code */
+
+ }
+
+ goto L350;
+
+/* End of iteration loop */
+
+L350:
+/* L360: */
+ ;
+ }
+
+/* Drop-through = non-convergence */
+
+ *info = ilast;
+ goto L420;
+
+/* Successful completion of all QZ steps */
+
+L380:
+
+/* Set Eigenvalues 1:ILO-1 */
+
+ i__1 = *ilo - 1;
+ for (j = 1; j <= i__1; ++j) {
+ if (t[j + j * t_dim1] < 0.f) {
+ if (ilschr) {
+ i__2 = j;
+ for (jr = 1; jr <= i__2; ++jr) {
+ h__[jr + j * h_dim1] = -h__[jr + j * h_dim1];
+ t[jr + j * t_dim1] = -t[jr + j * t_dim1];
+/* L390: */
+ }
+ } else {
+ h__[j + j * h_dim1] = -h__[j + j * h_dim1];
+ t[j + j * t_dim1] = -t[j + j * t_dim1];
+ }
+ if (ilz) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ z__[jr + j * z_dim1] = -z__[jr + j * z_dim1];
+/* L400: */
+ }
+ }
+ }
+ alphar[j] = h__[j + j * h_dim1];
+ alphai[j] = 0.f;
+ beta[j] = t[j + j * t_dim1];
+/* L410: */
+ }
+
+/* Normal Termination */
+
+ *info = 0;
+
+/* Exit (other than argument error) -- return optimal workspace size */
+
+L420:
+ work[1] = (real) (*n);
+ return 0;
+
+/* End of SHGEQZ */
+
+} /* shgeqz_ */
diff --git a/contrib/libs/clapack/shsein.c b/contrib/libs/clapack/shsein.c
new file mode 100644
index 0000000000..51e1d3631d
--- /dev/null
+++ b/contrib/libs/clapack/shsein.c
@@ -0,0 +1,488 @@
+/* shsein.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static logical c_false = FALSE_;
+static logical c_true = TRUE_;
+
+/* Subroutine */ int shsein_(char *side, char *eigsrc, char *initv, logical *
+ select, integer *n, real *h__, integer *ldh, real *wr, real *wi, real
+ *vl, integer *ldvl, real *vr, integer *ldvr, integer *mm, integer *m,
+ real *work, integer *ifaill, integer *ifailr, integer *info)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2;
+ real r__1, r__2;
+
+ /* Local variables */
+ integer i__, k, kl, kr, kln, ksi;
+ real wki;
+ integer ksr;
+ real ulp, wkr, eps3;
+ logical pair;
+ real unfl;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical leftv, bothv;
+ real hnorm;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int slaein_(logical *, logical *, integer *, real
+ *, integer *, real *, real *, real *, real *, real *, integer *,
+ real *, real *, real *, real *, integer *), xerbla_(char *,
+ integer *);
+ real bignum;
+ extern doublereal slanhs_(char *, integer *, real *, integer *, real *);
+ logical noinit;
+ integer ldwork;
+ logical rightv, fromqr;
+ real smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SHSEIN uses inverse iteration to find specified right and/or left */
+/* eigenvectors of a real upper Hessenberg matrix H. */
+
+/* The right eigenvector x and the left eigenvector y of the matrix H */
+/* corresponding to an eigenvalue w are defined by: */
+
+/* H * x = w * x, y**h * H = w * y**h */
+
+/* where y**h denotes the conjugate transpose of the vector y. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'R': compute right eigenvectors only; */
+/* = 'L': compute left eigenvectors only; */
+/* = 'B': compute both right and left eigenvectors. */
+
+/* EIGSRC (input) CHARACTER*1 */
+/* Specifies the source of eigenvalues supplied in (WR,WI): */
+/* = 'Q': the eigenvalues were found using SHSEQR; thus, if */
+/* H has zero subdiagonal elements, and so is */
+/* block-triangular, then the j-th eigenvalue can be */
+/* assumed to be an eigenvalue of the block containing */
+/* the j-th row/column. This property allows SHSEIN to */
+/* perform inverse iteration on just one diagonal block. */
+/* = 'N': no assumptions are made on the correspondence */
+/* between eigenvalues and diagonal blocks. In this */
+/* case, SHSEIN must always perform inverse iteration */
+/* using the whole matrix H. */
+
+/* INITV (input) CHARACTER*1 */
+/* = 'N': no initial vectors are supplied; */
+/* = 'U': user-supplied initial vectors are stored in the arrays */
+/* VL and/or VR. */
+
+/* SELECT (input/output) LOGICAL array, dimension (N) */
+/* Specifies the eigenvectors to be computed. To select the */
+/* real eigenvector corresponding to a real eigenvalue WR(j), */
+/* SELECT(j) must be set to .TRUE.. To select the complex */
+/* eigenvector corresponding to a complex eigenvalue */
+/* (WR(j),WI(j)), with complex conjugate (WR(j+1),WI(j+1)), */
+/* either SELECT(j) or SELECT(j+1) or both must be set to */
+/* .TRUE.; then on exit SELECT(j) is .TRUE. and SELECT(j+1) is */
+/* .FALSE.. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H. N >= 0. */
+
+/* H (input) REAL array, dimension (LDH,N) */
+/* The upper Hessenberg matrix H. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH >= max(1,N). */
+
+/* WR (input/output) REAL array, dimension (N) */
+/* WI (input) REAL array, dimension (N) */
+/* On entry, the real and imaginary parts of the eigenvalues of */
+/* H; a complex conjugate pair of eigenvalues must be stored in */
+/* consecutive elements of WR and WI. */
+/* On exit, WR may have been altered since close eigenvalues */
+/* are perturbed slightly in searching for independent */
+/* eigenvectors. */
+
+/* VL (input/output) REAL array, dimension (LDVL,MM) */
+/* On entry, if INITV = 'U' and SIDE = 'L' or 'B', VL must */
+/* contain starting vectors for the inverse iteration for the */
+/* left eigenvectors; the starting vector for each eigenvector */
+/* must be in the same column(s) in which the eigenvector will */
+/* be stored. */
+/* On exit, if SIDE = 'L' or 'B', the left eigenvectors */
+/* specified by SELECT will be stored consecutively in the */
+/* columns of VL, in the same order as their eigenvalues. A */
+/* complex eigenvector corresponding to a complex eigenvalue is */
+/* stored in two consecutive columns, the first holding the real */
+/* part and the second the imaginary part. */
+/* If SIDE = 'R', VL is not referenced. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the array VL. */
+/* LDVL >= max(1,N) if SIDE = 'L' or 'B'; LDVL >= 1 otherwise. */
+
+/* VR (input/output) REAL array, dimension (LDVR,MM) */
+/* On entry, if INITV = 'U' and SIDE = 'R' or 'B', VR must */
+/* contain starting vectors for the inverse iteration for the */
+/* right eigenvectors; the starting vector for each eigenvector */
+/* must be in the same column(s) in which the eigenvector will */
+/* be stored. */
+/* On exit, if SIDE = 'R' or 'B', the right eigenvectors */
+/* specified by SELECT will be stored consecutively in the */
+/* columns of VR, in the same order as their eigenvalues. A */
+/* complex eigenvector corresponding to a complex eigenvalue is */
+/* stored in two consecutive columns, the first holding the real */
+/* part and the second the imaginary part. */
+/* If SIDE = 'L', VR is not referenced. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. */
+/* LDVR >= max(1,N) if SIDE = 'R' or 'B'; LDVR >= 1 otherwise. */
+
+/* MM (input) INTEGER */
+/* The number of columns in the arrays VL and/or VR. MM >= M. */
+
+/* M (output) INTEGER */
+/* The number of columns in the arrays VL and/or VR required to */
+/* store the eigenvectors; each selected real eigenvector */
+/* occupies one column and each selected complex eigenvector */
+/* occupies two columns. */
+
+/* WORK (workspace) REAL array, dimension ((N+2)*N) */
+
+/* IFAILL (output) INTEGER array, dimension (MM) */
+/* If SIDE = 'L' or 'B', IFAILL(i) = j > 0 if the left */
+/* eigenvector in the i-th column of VL (corresponding to the */
+/* eigenvalue w(j)) failed to converge; IFAILL(i) = 0 if the */
+/* eigenvector converged satisfactorily. If the i-th and (i+1)th */
+/* columns of VL hold a complex eigenvector, then IFAILL(i) and */
+/* IFAILL(i+1) are set to the same value. */
+/* If SIDE = 'R', IFAILL is not referenced. */
+
+/* IFAILR (output) INTEGER array, dimension (MM) */
+/* If SIDE = 'R' or 'B', IFAILR(i) = j > 0 if the right */
+/* eigenvector in the i-th column of VR (corresponding to the */
+/* eigenvalue w(j)) failed to converge; IFAILR(i) = 0 if the */
+/* eigenvector converged satisfactorily. If the i-th and (i+1)th */
+/* columns of VR hold a complex eigenvector, then IFAILR(i) and */
+/* IFAILR(i+1) are set to the same value. */
+/* If SIDE = 'L', IFAILR is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, i is the number of eigenvectors which */
+/* failed to converge; see IFAILL and IFAILR for further */
+/* details. */
+
+/* Further Details */
+/* =============== */
+
+/* Each eigenvector is normalized so that the element of largest */
+/* magnitude has magnitude 1; here the magnitude of a complex number */
+/* (x,y) is taken to be |x|+|y|. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters. */
+
+ /* Parameter adjustments */
+ --select;
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --wr;
+ --wi;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+ --ifaill;
+ --ifailr;
+
+ /* Function Body */
+ bothv = lsame_(side, "B");
+ rightv = lsame_(side, "R") || bothv;
+ leftv = lsame_(side, "L") || bothv;
+
+ fromqr = lsame_(eigsrc, "Q");
+
+ noinit = lsame_(initv, "N");
+
+/* Set M to the number of columns required to store the selected */
+/* eigenvectors, and standardize the array SELECT. */
+
+ *m = 0;
+ pair = FALSE_;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (pair) {
+ pair = FALSE_;
+ select[k] = FALSE_;
+ } else {
+ if (wi[k] == 0.f) {
+ if (select[k]) {
+ ++(*m);
+ }
+ } else {
+ pair = TRUE_;
+ if (select[k] || select[k + 1]) {
+ select[k] = TRUE_;
+ *m += 2;
+ }
+ }
+ }
+/* L10: */
+ }
+
+ *info = 0;
+ if (! rightv && ! leftv) {
+ *info = -1;
+ } else if (! fromqr && ! lsame_(eigsrc, "N")) {
+ *info = -2;
+ } else if (! noinit && ! lsame_(initv, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*ldh < max(1,*n)) {
+ *info = -7;
+ } else if (*ldvl < 1 || leftv && *ldvl < *n) {
+ *info = -11;
+ } else if (*ldvr < 1 || rightv && *ldvr < *n) {
+ *info = -13;
+ } else if (*mm < *m) {
+ *info = -14;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SHSEIN", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Set machine-dependent constants. */
+
+ unfl = slamch_("Safe minimum");
+ ulp = slamch_("Precision");
+ smlnum = unfl * (*n / ulp);
+ bignum = (1.f - ulp) / smlnum;
+
+ ldwork = *n + 1;
+
+ kl = 1;
+ kln = 0;
+ if (fromqr) {
+ kr = 0;
+ } else {
+ kr = *n;
+ }
+ ksr = 1;
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (select[k]) {
+
+/* Compute eigenvector(s) corresponding to W(K). */
+
+ if (fromqr) {
+
+/* If affiliation of eigenvalues is known, check whether */
+/* the matrix splits. */
+
+/* Determine KL and KR such that 1 <= KL <= K <= KR <= N */
+/* and H(KL,KL-1) and H(KR+1,KR) are zero (or KL = 1 or */
+/* KR = N). */
+
+/* Then inverse iteration can be performed with the */
+/* submatrix H(KL:N,KL:N) for a left eigenvector, and with */
+/* the submatrix H(1:KR,1:KR) for a right eigenvector. */
+
+ i__2 = kl + 1;
+ for (i__ = k; i__ >= i__2; --i__) {
+ if (h__[i__ + (i__ - 1) * h_dim1] == 0.f) {
+ goto L30;
+ }
+/* L20: */
+ }
+L30:
+ kl = i__;
+ if (k > kr) {
+ i__2 = *n - 1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ if (h__[i__ + 1 + i__ * h_dim1] == 0.f) {
+ goto L50;
+ }
+/* L40: */
+ }
+L50:
+ kr = i__;
+ }
+ }
+
+ if (kl != kln) {
+ kln = kl;
+
+/* Compute infinity-norm of submatrix H(KL:KR,KL:KR) if it */
+/* has not ben computed before. */
+
+ i__2 = kr - kl + 1;
+ hnorm = slanhs_("I", &i__2, &h__[kl + kl * h_dim1], ldh, &
+ work[1]);
+ if (hnorm > 0.f) {
+ eps3 = hnorm * ulp;
+ } else {
+ eps3 = smlnum;
+ }
+ }
+
+/* Perturb eigenvalue if it is close to any previous */
+/* selected eigenvalues affiliated to the submatrix */
+/* H(KL:KR,KL:KR). Close roots are modified by EPS3. */
+
+ wkr = wr[k];
+ wki = wi[k];
+L60:
+ i__2 = kl;
+ for (i__ = k - 1; i__ >= i__2; --i__) {
+ if (select[i__] && (r__1 = wr[i__] - wkr, dabs(r__1)) + (r__2
+ = wi[i__] - wki, dabs(r__2)) < eps3) {
+ wkr += eps3;
+ goto L60;
+ }
+/* L70: */
+ }
+ wr[k] = wkr;
+
+ pair = wki != 0.f;
+ if (pair) {
+ ksi = ksr + 1;
+ } else {
+ ksi = ksr;
+ }
+ if (leftv) {
+
+/* Compute left eigenvector. */
+
+ i__2 = *n - kl + 1;
+ slaein_(&c_false, &noinit, &i__2, &h__[kl + kl * h_dim1], ldh,
+ &wkr, &wki, &vl[kl + ksr * vl_dim1], &vl[kl + ksi *
+ vl_dim1], &work[1], &ldwork, &work[*n * *n + *n + 1],
+ &eps3, &smlnum, &bignum, &iinfo);
+ if (iinfo > 0) {
+ if (pair) {
+ *info += 2;
+ } else {
+ ++(*info);
+ }
+ ifaill[ksr] = k;
+ ifaill[ksi] = k;
+ } else {
+ ifaill[ksr] = 0;
+ ifaill[ksi] = 0;
+ }
+ i__2 = kl - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ vl[i__ + ksr * vl_dim1] = 0.f;
+/* L80: */
+ }
+ if (pair) {
+ i__2 = kl - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ vl[i__ + ksi * vl_dim1] = 0.f;
+/* L90: */
+ }
+ }
+ }
+ if (rightv) {
+
+/* Compute right eigenvector. */
+
+ slaein_(&c_true, &noinit, &kr, &h__[h_offset], ldh, &wkr, &
+ wki, &vr[ksr * vr_dim1 + 1], &vr[ksi * vr_dim1 + 1], &
+ work[1], &ldwork, &work[*n * *n + *n + 1], &eps3, &
+ smlnum, &bignum, &iinfo);
+ if (iinfo > 0) {
+ if (pair) {
+ *info += 2;
+ } else {
+ ++(*info);
+ }
+ ifailr[ksr] = k;
+ ifailr[ksi] = k;
+ } else {
+ ifailr[ksr] = 0;
+ ifailr[ksi] = 0;
+ }
+ i__2 = *n;
+ for (i__ = kr + 1; i__ <= i__2; ++i__) {
+ vr[i__ + ksr * vr_dim1] = 0.f;
+/* L100: */
+ }
+ if (pair) {
+ i__2 = *n;
+ for (i__ = kr + 1; i__ <= i__2; ++i__) {
+ vr[i__ + ksi * vr_dim1] = 0.f;
+/* L110: */
+ }
+ }
+ }
+
+ if (pair) {
+ ksr += 2;
+ } else {
+ ++ksr;
+ }
+ }
+/* L120: */
+ }
+
+ return 0;
+
+/* End of SHSEIN */
+
+} /* shsein_ */
diff --git a/contrib/libs/clapack/shseqr.c b/contrib/libs/clapack/shseqr.c
new file mode 100644
index 0000000000..533949dfd5
--- /dev/null
+++ b/contrib/libs/clapack/shseqr.c
@@ -0,0 +1,484 @@
+/* shseqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b11 = 0.f;
+static real c_b12 = 1.f;
+static integer c__12 = 12;
+static integer c__2 = 2;
+static integer c__49 = 49;
+
+/* Subroutine */ int shseqr_(char *job, char *compz, integer *n, integer *ilo,
+ integer *ihi, real *h__, integer *ldh, real *wr, real *wi, real *z__,
+ integer *ldz, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2[2], i__3;
+ real r__1;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i__;
+ real hl[2401] /* was [49][49] */;
+ integer kbot, nmin;
+ extern logical lsame_(char *, char *);
+ logical initz;
+ real workl[49];
+ logical wantt, wantz;
+ extern /* Subroutine */ int slaqr0_(logical *, logical *, integer *,
+ integer *, integer *, real *, integer *, real *, real *, integer *
+, integer *, real *, integer *, real *, integer *, integer *),
+ xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int slahqr_(logical *, logical *, integer *,
+ integer *, integer *, real *, integer *, real *, real *, integer *
+, integer *, real *, integer *, integer *), slacpy_(char *,
+ integer *, integer *, real *, integer *, real *, integer *), slaset_(char *, integer *, integer *, real *, real *,
+ real *, integer *);
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+/* Purpose */
+/* ======= */
+
+/* SHSEQR computes the eigenvalues of a Hessenberg matrix H */
+/* and, optionally, the matrices T and Z from the Schur decomposition */
+/* H = Z T Z**T, where T is an upper quasi-triangular matrix (the */
+/* Schur form), and Z is the orthogonal matrix of Schur vectors. */
+
+/* Optionally Z may be postmultiplied into an input orthogonal */
+/* matrix Q so that this routine can give the Schur factorization */
+/* of a matrix A which has been reduced to the Hessenberg form H */
+/* by the orthogonal matrix Q: A = Q*H*Q**T = (QZ)*T*(QZ)**T. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* = 'E': compute eigenvalues only; */
+/* = 'S': compute eigenvalues and the Schur form T. */
+
+/* COMPZ (input) CHARACTER*1 */
+/* = 'N': no Schur vectors are computed; */
+/* = 'I': Z is initialized to the unit matrix and the matrix Z */
+/* of Schur vectors of H is returned; */
+/* = 'V': Z must contain an orthogonal matrix Q on entry, and */
+/* the product Q*Z is returned. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H. N .GE. 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* It is assumed that H is already upper triangular in rows */
+/* and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally */
+/* set by a previous call to SGEBAL, and then passed to SGEHRD */
+/* when the matrix output by SGEBAL is reduced to Hessenberg */
+/* form. Otherwise ILO and IHI should be set to 1 and N */
+/* respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. */
+/* If N = 0, then ILO = 1 and IHI = 0. */
+
+/* H (input/output) REAL array, dimension (LDH,N) */
+/* On entry, the upper Hessenberg matrix H. */
+/* On exit, if INFO = 0 and JOB = 'S', then H contains the */
+/* upper quasi-triangular matrix T from the Schur decomposition */
+/* (the Schur form); 2-by-2 diagonal blocks (corresponding to */
+/* complex conjugate pairs of eigenvalues) are returned in */
+/* standard form, with H(i,i) = H(i+1,i+1) and */
+/* H(i+1,i)*H(i,i+1).LT.0. If INFO = 0 and JOB = 'E', the */
+/* contents of H are unspecified on exit. (The output value of */
+/* H when INFO.GT.0 is given under the description of INFO */
+/* below.) */
+
+/* Unlike earlier versions of SHSEQR, this subroutine may */
+/* explicitly H(i,j) = 0 for i.GT.j and j = 1, 2, ... ILO-1 */
+/* or j = IHI+1, IHI+2, ... N. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH .GE. max(1,N). */
+
+/* WR (output) REAL array, dimension (N) */
+/* WI (output) REAL array, dimension (N) */
+/* The real and imaginary parts, respectively, of the computed */
+/* eigenvalues. If two eigenvalues are computed as a complex */
+/* conjugate pair, they are stored in consecutive elements of */
+/* WR and WI, say the i-th and (i+1)th, with WI(i) .GT. 0 and */
+/* WI(i+1) .LT. 0. If JOB = 'S', the eigenvalues are stored in */
+/* the same order as on the diagonal of the Schur form returned */
+/* in H, with WR(i) = H(i,i) and, if H(i:i+1,i:i+1) is a 2-by-2 */
+/* diagonal block, WI(i) = sqrt(-H(i+1,i)*H(i,i+1)) and */
+/* WI(i+1) = -WI(i). */
+
+/* Z (input/output) REAL array, dimension (LDZ,N) */
+/* If COMPZ = 'N', Z is not referenced. */
+/* If COMPZ = 'I', on entry Z need not be set and on exit, */
+/* if INFO = 0, Z contains the orthogonal matrix Z of the Schur */
+/* vectors of H. If COMPZ = 'V', on entry Z must contain an */
+/* N-by-N matrix Q, which is assumed to be equal to the unit */
+/* matrix except for the submatrix Z(ILO:IHI,ILO:IHI). On exit, */
+/* if INFO = 0, Z contains Q*Z. */
+/* Normally Q is the orthogonal matrix generated by SORGHR */
+/* after the call to SGEHRD which formed the Hessenberg matrix */
+/* H. (The output value of Z when INFO.GT.0 is given under */
+/* the description of INFO below.) */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. if COMPZ = 'I' or */
+/* COMPZ = 'V', then LDZ.GE.MAX(1,N). Otherwize, LDZ.GE.1. */
+
+/* WORK (workspace/output) REAL array, dimension (LWORK) */
+/* On exit, if INFO = 0, WORK(1) returns an estimate of */
+/* the optimal value for LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK .GE. max(1,N) */
+/* is sufficient and delivers very good and sometimes */
+/* optimal performance. However, LWORK as large as 11*N */
+/* may be required for optimal performance. A workspace */
+/* query is recommended to determine the optimal workspace */
+/* size. */
+
+/* If LWORK = -1, then SHSEQR does a workspace query. */
+/* In this case, SHSEQR checks the input parameters and */
+/* estimates the optimal workspace size for the given */
+/* values of N, ILO and IHI. The estimate is returned */
+/* in WORK(1). No error message related to LWORK is */
+/* issued by XERBLA. Neither H nor Z are accessed. */
+
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* .LT. 0: if INFO = -i, the i-th argument had an illegal */
+/* value */
+/* .GT. 0: if INFO = i, SHSEQR failed to compute all of */
+/* the eigenvalues. Elements 1:ilo-1 and i+1:n of WR */
+/* and WI contain those eigenvalues which have been */
+/* successfully computed. (Failures are rare.) */
+
+/* If INFO .GT. 0 and JOB = 'E', then on exit, the */
+/* remaining unconverged eigenvalues are the eigen- */
+/* values of the upper Hessenberg matrix rows and */
+/* columns ILO through INFO of the final, output */
+/* value of H. */
+
+/* If INFO .GT. 0 and JOB = 'S', then on exit */
+
+/* (*) (initial value of H)*U = U*(final value of H) */
+
+/* where U is an orthogonal matrix. The final */
+/* value of H is upper Hessenberg and quasi-triangular */
+/* in rows and columns INFO+1 through IHI. */
+
+/* If INFO .GT. 0 and COMPZ = 'V', then on exit */
+
+/* (final value of Z) = (initial value of Z)*U */
+
+/* where U is the orthogonal matrix in (*) (regard- */
+/* less of the value of JOB.) */
+
+/* If INFO .GT. 0 and COMPZ = 'I', then on exit */
+/* (final value of Z) = U */
+/* where U is the orthogonal matrix in (*) (regard- */
+/* less of the value of JOB.) */
+
+/* If INFO .GT. 0 and COMPZ = 'N', then Z is not */
+/* accessed. */
+
+/* ================================================================ */
+/* Default values supplied by */
+/* ILAENV(ISPEC,'SHSEQR',JOB(:1)//COMPZ(:1),N,ILO,IHI,LWORK). */
+/* It is suggested that these defaults be adjusted in order */
+/* to attain best performance in each particular */
+/* computational environment. */
+
+/* ISPEC=12: The SLAHQR vs SLAQR0 crossover point. */
+/* Default: 75. (Must be at least 11.) */
+
+/* ISPEC=13: Recommended deflation window size. */
+/* This depends on ILO, IHI and NS. NS is the */
+/* number of simultaneous shifts returned */
+/* by ILAENV(ISPEC=15). (See ISPEC=15 below.) */
+/* The default for (IHI-ILO+1).LE.500 is NS. */
+/* The default for (IHI-ILO+1).GT.500 is 3*NS/2. */
+
+/* ISPEC=14: Nibble crossover point. (See IPARMQ for */
+/* details.) Default: 14% of deflation window */
+/* size. */
+
+/* ISPEC=15: Number of simultaneous shifts in a multishift */
+/* QR iteration. */
+
+/* If IHI-ILO+1 is ... */
+
+/* greater than ...but less ... the */
+/* or equal to ... than default is */
+
+/* 1 30 NS = 2(+) */
+/* 30 60 NS = 4(+) */
+/* 60 150 NS = 10(+) */
+/* 150 590 NS = ** */
+/* 590 3000 NS = 64 */
+/* 3000 6000 NS = 128 */
+/* 6000 infinity NS = 256 */
+
+/* (+) By default some or all matrices of this order */
+/* are passed to the implicit double shift routine */
+/* SLAHQR and this parameter is ignored. See */
+/* ISPEC=12 above and comments in IPARMQ for */
+/* details. */
+
+/* (**) The asterisks (**) indicate an ad-hoc */
+/* function of N increasing from 10 to 64. */
+
+/* ISPEC=16: Select structured matrix multiply. */
+/* If the number of simultaneous shifts (specified */
+/* by ISPEC=15) is less than 14, then the default */
+/* for ISPEC=16 is 0. Otherwise the default for */
+/* ISPEC=16 is 2. */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+/* References: */
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part I: Maintaining Well Focused Shifts, and Level 3 */
+/* Performance, SIAM Journal of Matrix Analysis, volume 23, pages */
+/* 929--947, 2002. */
+
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part II: Aggressive Early Deflation, SIAM Journal */
+/* of Matrix Analysis, volume 23, pages 948--973, 2002. */
+
+/* ================================================================ */
+/* .. Parameters .. */
+
+/* ==== Matrices of order NTINY or smaller must be processed by */
+/* . SLAHQR because of insufficient subdiagonal scratch space. */
+/* . (This is a hard limit.) ==== */
+
+/* ==== NL allocates some local workspace to help small matrices */
+/* . through a rare SLAHQR failure. NL .GT. NTINY = 11 is */
+/* . required and NL .LE. NMIN = ILAENV(ISPEC=12,...) is recom- */
+/* . mended. (The default value of NMIN is 75.) Using NL = 49 */
+/* . allows up to six simultaneous shifts and a 16-by-16 */
+/* . deflation window. ==== */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* ==== Decode and check the input parameters. ==== */
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --wr;
+ --wi;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ wantt = lsame_(job, "S");
+ initz = lsame_(compz, "I");
+ wantz = initz || lsame_(compz, "V");
+ work[1] = (real) max(1,*n);
+ lquery = *lwork == -1;
+
+ *info = 0;
+ if (! lsame_(job, "E") && ! wantt) {
+ *info = -1;
+ } else if (! lsame_(compz, "N") && ! wantz) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -4;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -5;
+ } else if (*ldh < max(1,*n)) {
+ *info = -7;
+ } else if (*ldz < 1 || wantz && *ldz < max(1,*n)) {
+ *info = -11;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -13;
+ }
+
+ if (*info != 0) {
+
+/* ==== Quick return in case of invalid argument. ==== */
+
+ i__1 = -(*info);
+ xerbla_("SHSEQR", &i__1);
+ return 0;
+
+ } else if (*n == 0) {
+
+/* ==== Quick return in case N = 0; nothing to do. ==== */
+
+ return 0;
+
+ } else if (lquery) {
+
+/* ==== Quick return in case of a workspace query ==== */
+
+ slaqr0_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1], &wi[
+ 1], ilo, ihi, &z__[z_offset], ldz, &work[1], lwork, info);
+/* ==== Ensure reported workspace size is backward-compatible with */
+/* . previous LAPACK versions. ==== */
+/* Computing MAX */
+ r__1 = (real) max(1,*n);
+ work[1] = dmax(r__1,work[1]);
+ return 0;
+
+ } else {
+
+/* ==== copy eigenvalues isolated by SGEBAL ==== */
+
+ i__1 = *ilo - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ wr[i__] = h__[i__ + i__ * h_dim1];
+ wi[i__] = 0.f;
+/* L10: */
+ }
+ i__1 = *n;
+ for (i__ = *ihi + 1; i__ <= i__1; ++i__) {
+ wr[i__] = h__[i__ + i__ * h_dim1];
+ wi[i__] = 0.f;
+/* L20: */
+ }
+
+/* ==== Initialize Z, if requested ==== */
+
+ if (initz) {
+ slaset_("A", n, n, &c_b11, &c_b12, &z__[z_offset], ldz)
+ ;
+ }
+
+/* ==== Quick return if possible ==== */
+
+ if (*ilo == *ihi) {
+ wr[*ilo] = h__[*ilo + *ilo * h_dim1];
+ wi[*ilo] = 0.f;
+ return 0;
+ }
+
+/* ==== SLAHQR/SLAQR0 crossover point ==== */
+
+/* Writing concatenation */
+ i__2[0] = 1, a__1[0] = job;
+ i__2[1] = 1, a__1[1] = compz;
+ s_cat(ch__1, a__1, i__2, &c__2, (ftnlen)2);
+ nmin = ilaenv_(&c__12, "SHSEQR", ch__1, n, ilo, ihi, lwork);
+ nmin = max(11,nmin);
+
+/* ==== SLAQR0 for big matrices; SLAHQR for small ones ==== */
+
+ if (*n > nmin) {
+ slaqr0_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1],
+ &wi[1], ilo, ihi, &z__[z_offset], ldz, &work[1], lwork,
+ info);
+ } else {
+
+/* ==== Small matrix ==== */
+
+ slahqr_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1],
+ &wi[1], ilo, ihi, &z__[z_offset], ldz, info);
+
+ if (*info > 0) {
+
+/* ==== A rare SLAHQR failure! SLAQR0 sometimes succeeds */
+/* . when SLAHQR fails. ==== */
+
+ kbot = *info;
+
+ if (*n >= 49) {
+
+/* ==== Larger matrices have enough subdiagonal scratch */
+/* . space to call SLAQR0 directly. ==== */
+
+ slaqr0_(&wantt, &wantz, n, ilo, &kbot, &h__[h_offset],
+ ldh, &wr[1], &wi[1], ilo, ihi, &z__[z_offset],
+ ldz, &work[1], lwork, info);
+
+ } else {
+
+/* ==== Tiny matrices don't have enough subdiagonal */
+/* . scratch space to benefit from SLAQR0. Hence, */
+/* . tiny matrices must be copied into a larger */
+/* . array before calling SLAQR0. ==== */
+
+ slacpy_("A", n, n, &h__[h_offset], ldh, hl, &c__49);
+ hl[*n + 1 + *n * 49 - 50] = 0.f;
+ i__1 = 49 - *n;
+ slaset_("A", &c__49, &i__1, &c_b11, &c_b11, &hl[(*n + 1) *
+ 49 - 49], &c__49);
+ slaqr0_(&wantt, &wantz, &c__49, ilo, &kbot, hl, &c__49, &
+ wr[1], &wi[1], ilo, ihi, &z__[z_offset], ldz,
+ workl, &c__49, info);
+ if (wantt || *info != 0) {
+ slacpy_("A", n, n, hl, &c__49, &h__[h_offset], ldh);
+ }
+ }
+ }
+ }
+
+/* ==== Clear out the trash, if necessary. ==== */
+
+ if ((wantt || *info != 0) && *n > 2) {
+ i__1 = *n - 2;
+ i__3 = *n - 2;
+ slaset_("L", &i__1, &i__3, &c_b11, &c_b11, &h__[h_dim1 + 3], ldh);
+ }
+
+/* ==== Ensure reported workspace size is backward-compatible with */
+/* . previous LAPACK versions. ==== */
+
+/* Computing MAX */
+ r__1 = (real) max(1,*n);
+ work[1] = dmax(r__1,work[1]);
+ }
+
+/* ==== End of SHSEQR ==== */
+
+ return 0;
+} /* shseqr_ */
diff --git a/contrib/libs/clapack/sisnan.c b/contrib/libs/clapack/sisnan.c
new file mode 100644
index 0000000000..9634bd2326
--- /dev/null
+++ b/contrib/libs/clapack/sisnan.c
@@ -0,0 +1,52 @@
+/* sisnan.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+logical sisnan_(real *sin__)
+{
+ /* System generated locals */
+ logical ret_val;
+
+ /* Local variables */
+ extern logical slaisnan_(real *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SISNAN returns .TRUE. if its argument is NaN, and .FALSE. */
+/* otherwise. To be replaced by the Fortran 2003 intrinsic in the */
+/* future. */
+
+/* Arguments */
+/* ========= */
+
+/* SIN (input) REAL */
+/* Input to test for NaN. */
+
+/* ===================================================================== */
+
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+ ret_val = slaisnan_(sin__, sin__);
+ return ret_val;
+} /* sisnan_ */
diff --git a/contrib/libs/clapack/slabad.c b/contrib/libs/clapack/slabad.c
new file mode 100644
index 0000000000..ed9a836d89
--- /dev/null
+++ b/contrib/libs/clapack/slabad.c
@@ -0,0 +1,72 @@
+/* slabad.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slabad_(real *small, real *large)
+{
+ /* Builtin functions */
+ double r_lg10(real *), sqrt(doublereal);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLABAD takes as input the values computed by SLAMCH for underflow and */
+/* overflow, and returns the square root of each of these values if the */
+/* log of LARGE is sufficiently large. This subroutine is intended to */
+/* identify machines with a large exponent range, such as the Crays, and */
+/* redefine the underflow and overflow limits to be the square roots of */
+/* the values computed by SLAMCH. This subroutine is needed because */
+/* SLAMCH does not compensate for poor arithmetic in the upper half of */
+/* the exponent range, as is found on a Cray. */
+
+/* Arguments */
+/* ========= */
+
+/* SMALL (input/output) REAL */
+/* On entry, the underflow threshold as computed by SLAMCH. */
+/* On exit, if LOG10(LARGE) is sufficiently large, the square */
+/* root of SMALL, otherwise unchanged. */
+
+/* LARGE (input/output) REAL */
+/* On entry, the overflow threshold as computed by SLAMCH. */
+/* On exit, if LOG10(LARGE) is sufficiently large, the square */
+/* root of LARGE, otherwise unchanged. */
+
+/* ===================================================================== */
+
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* If it looks like we're on a Cray, take the square root of */
+/* SMALL and LARGE to avoid overflow and underflow problems. */
+
+ if (r_lg10(large) > 2e3f) {
+ *small = sqrt(*small);
+ *large = sqrt(*large);
+ }
+
+ return 0;
+
+/* End of SLABAD */
+
+} /* slabad_ */
diff --git a/contrib/libs/clapack/slabrd.c b/contrib/libs/clapack/slabrd.c
new file mode 100644
index 0000000000..1f2481908f
--- /dev/null
+++ b/contrib/libs/clapack/slabrd.c
@@ -0,0 +1,432 @@
+/* slabrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b4 = -1.f;
+static real c_b5 = 1.f;
+static integer c__1 = 1;
+static real c_b16 = 0.f;
+
+/* Subroutine */ int slabrd_(integer *m, integer *n, integer *nb, real *a,
+ integer *lda, real *d__, real *e, real *tauq, real *taup, real *x,
+ integer *ldx, real *y, integer *ldy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, x_dim1, x_offset, y_dim1, y_offset, i__1, i__2,
+ i__3;
+
+ /* Local variables */
+ integer i__;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ sgemv_(char *, integer *, integer *, real *, real *, integer *,
+ real *, integer *, real *, real *, integer *), slarfg_(
+ integer *, real *, real *, integer *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLABRD reduces the first NB rows and columns of a real general */
+/* m by n matrix A to upper or lower bidiagonal form by an orthogonal */
+/* transformation Q' * A * P, and returns the matrices X and Y which */
+/* are needed to apply the transformation to the unreduced part of A. */
+
+/* If m >= n, A is reduced to upper bidiagonal form; if m < n, to lower */
+/* bidiagonal form. */
+
+/* This is an auxiliary routine called by SGEBRD */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows in the matrix A. */
+
+/* N (input) INTEGER */
+/* The number of columns in the matrix A. */
+
+/* NB (input) INTEGER */
+/* The number of leading rows and columns of A to be reduced. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the m by n general matrix to be reduced. */
+/* On exit, the first NB rows and columns of the matrix are */
+/* overwritten; the rest of the array is unchanged. */
+/* If m >= n, elements on and below the diagonal in the first NB */
+/* columns, with the array TAUQ, represent the orthogonal */
+/* matrix Q as a product of elementary reflectors; and */
+/* elements above the diagonal in the first NB rows, with the */
+/* array TAUP, represent the orthogonal matrix P as a product */
+/* of elementary reflectors. */
+/* If m < n, elements below the diagonal in the first NB */
+/* columns, with the array TAUQ, represent the orthogonal */
+/* matrix Q as a product of elementary reflectors, and */
+/* elements on and above the diagonal in the first NB rows, */
+/* with the array TAUP, represent the orthogonal matrix P as */
+/* a product of elementary reflectors. */
+/* See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* D (output) REAL array, dimension (NB) */
+/* The diagonal elements of the first NB rows and columns of */
+/* the reduced matrix. D(i) = A(i,i). */
+
+/* E (output) REAL array, dimension (NB) */
+/* The off-diagonal elements of the first NB rows and columns of */
+/* the reduced matrix. */
+
+/* TAUQ (output) REAL array dimension (NB) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the orthogonal matrix Q. See Further Details. */
+
+/* TAUP (output) REAL array, dimension (NB) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the orthogonal matrix P. See Further Details. */
+
+/* X (output) REAL array, dimension (LDX,NB) */
+/* The m-by-nb matrix X required to update the unreduced part */
+/* of A. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= M. */
+
+/* Y (output) REAL array, dimension (LDY,NB) */
+/* The n-by-nb matrix Y required to update the unreduced part */
+/* of A. */
+
+/* LDY (input) INTEGER */
+/* The leading dimension of the array Y. LDY >= N. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrices Q and P are represented as products of elementary */
+/* reflectors: */
+
+/* Q = H(1) H(2) . . . H(nb) and P = G(1) G(2) . . . G(nb) */
+
+/* Each H(i) and G(i) has the form: */
+
+/* H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' */
+
+/* where tauq and taup are real scalars, and v and u are real vectors. */
+
+/* If m >= n, v(1:i-1) = 0, v(i) = 1, and v(i:m) is stored on exit in */
+/* A(i:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+1:n) is stored on exit in */
+/* A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). */
+
+/* If m < n, v(1:i) = 0, v(i+1) = 1, and v(i+1:m) is stored on exit in */
+/* A(i+2:m,i); u(1:i-1) = 0, u(i) = 1, and u(i:n) is stored on exit in */
+/* A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). */
+
+/* The elements of the vectors v and u together form the m-by-nb matrix */
+/* V and the nb-by-n matrix U' which are needed, with X and Y, to apply */
+/* the transformation to the unreduced part of the matrix, using a block */
+/* update of the form: A := A - V*Y' - X*U'. */
+
+/* The contents of A on exit are illustrated by the following examples */
+/* with nb = 2: */
+
+/* m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n): */
+
+/* ( 1 1 u1 u1 u1 ) ( 1 u1 u1 u1 u1 u1 ) */
+/* ( v1 1 1 u2 u2 ) ( 1 1 u2 u2 u2 u2 ) */
+/* ( v1 v2 a a a ) ( v1 1 a a a a ) */
+/* ( v1 v2 a a a ) ( v1 v2 a a a a ) */
+/* ( v1 v2 a a a ) ( v1 v2 a a a a ) */
+/* ( v1 v2 a a a ) */
+
+/* where a denotes an element of the original matrix which is unchanged, */
+/* vi denotes an element of the vector defining H(i), and ui an element */
+/* of the vector defining G(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ y_dim1 = *ldy;
+ y_offset = 1 + y_dim1;
+ y -= y_offset;
+
+ /* Function Body */
+ if (*m <= 0 || *n <= 0) {
+ return 0;
+ }
+
+ if (*m >= *n) {
+
+/* Reduce to upper bidiagonal form */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i:m,i) */
+
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b4, &a[i__ + a_dim1], lda,
+ &y[i__ + y_dim1], ldy, &c_b5, &a[i__ + i__ * a_dim1], &
+ c__1);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b4, &x[i__ + x_dim1], ldx,
+ &a[i__ * a_dim1 + 1], &c__1, &c_b5, &a[i__ + i__ *
+ a_dim1], &c__1);
+
+/* Generate reflection Q(i) to annihilate A(i+1:m,i) */
+
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ slarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3, *m)+ i__ *
+ a_dim1], &c__1, &tauq[i__]);
+ d__[i__] = a[i__ + i__ * a_dim1];
+ if (i__ < *n) {
+ a[i__ + i__ * a_dim1] = 1.f;
+
+/* Compute Y(i+1:n,i) */
+
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ sgemv_("Transpose", &i__2, &i__3, &c_b5, &a[i__ + (i__ + 1) *
+ a_dim1], lda, &a[i__ + i__ * a_dim1], &c__1, &c_b16, &
+ y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ sgemv_("Transpose", &i__2, &i__3, &c_b5, &a[i__ + a_dim1],
+ lda, &a[i__ + i__ * a_dim1], &c__1, &c_b16, &y[i__ *
+ y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b4, &y[i__ + 1 +
+ y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b5, &y[
+ i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ sgemv_("Transpose", &i__2, &i__3, &c_b5, &x[i__ + x_dim1],
+ ldx, &a[i__ + i__ * a_dim1], &c__1, &c_b16, &y[i__ *
+ y_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ sgemv_("Transpose", &i__2, &i__3, &c_b4, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &c_b5,
+ &y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *n - i__;
+ sscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1);
+
+/* Update A(i,i+1:n) */
+
+ i__2 = *n - i__;
+ sgemv_("No transpose", &i__2, &i__, &c_b4, &y[i__ + 1 +
+ y_dim1], ldy, &a[i__ + a_dim1], lda, &c_b5, &a[i__ + (
+ i__ + 1) * a_dim1], lda);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ sgemv_("Transpose", &i__2, &i__3, &c_b4, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &x[i__ + x_dim1], ldx, &c_b5, &a[
+ i__ + (i__ + 1) * a_dim1], lda);
+
+/* Generate reflection P(i) to annihilate A(i,i+2:n) */
+
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ slarfg_(&i__2, &a[i__ + (i__ + 1) * a_dim1], &a[i__ + min(
+ i__3, *n)* a_dim1], lda, &taup[i__]);
+ e[i__] = a[i__ + (i__ + 1) * a_dim1];
+ a[i__ + (i__ + 1) * a_dim1] = 1.f;
+
+/* Compute X(i+1:m,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ sgemv_("No transpose", &i__2, &i__3, &c_b5, &a[i__ + 1 + (i__
+ + 1) * a_dim1], lda, &a[i__ + (i__ + 1) * a_dim1],
+ lda, &c_b16, &x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *n - i__;
+ sgemv_("Transpose", &i__2, &i__, &c_b5, &y[i__ + 1 + y_dim1],
+ ldy, &a[i__ + (i__ + 1) * a_dim1], lda, &c_b16, &x[
+ i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ sgemv_("No transpose", &i__2, &i__, &c_b4, &a[i__ + 1 +
+ a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b5, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ sgemv_("No transpose", &i__2, &i__3, &c_b5, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ c_b16, &x[i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b4, &x[i__ + 1 +
+ x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b5, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *m - i__;
+ sscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Reduce to lower bidiagonal form */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i,i:n) */
+
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b4, &y[i__ + y_dim1], ldy,
+ &a[i__ + a_dim1], lda, &c_b5, &a[i__ + i__ * a_dim1],
+ lda);
+ i__2 = i__ - 1;
+ i__3 = *n - i__ + 1;
+ sgemv_("Transpose", &i__2, &i__3, &c_b4, &a[i__ * a_dim1 + 1],
+ lda, &x[i__ + x_dim1], ldx, &c_b5, &a[i__ + i__ * a_dim1],
+ lda);
+
+/* Generate reflection P(i) to annihilate A(i,i+1:n) */
+
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ slarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3, *n)*
+ a_dim1], lda, &taup[i__]);
+ d__[i__] = a[i__ + i__ * a_dim1];
+ if (i__ < *m) {
+ a[i__ + i__ * a_dim1] = 1.f;
+
+/* Compute X(i+1:m,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b5, &a[i__ + 1 + i__ *
+ a_dim1], lda, &a[i__ + i__ * a_dim1], lda, &c_b16, &
+ x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ sgemv_("Transpose", &i__2, &i__3, &c_b5, &y[i__ + y_dim1],
+ ldy, &a[i__ + i__ * a_dim1], lda, &c_b16, &x[i__ *
+ x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b4, &a[i__ + 1 +
+ a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b5, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__ + 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b5, &a[i__ * a_dim1 +
+ 1], lda, &a[i__ + i__ * a_dim1], lda, &c_b16, &x[i__ *
+ x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b4, &x[i__ + 1 +
+ x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b5, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *m - i__;
+ sscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1);
+
+/* Update A(i+1:m,i) */
+
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b4, &a[i__ + 1 +
+ a_dim1], lda, &y[i__ + y_dim1], ldy, &c_b5, &a[i__ +
+ 1 + i__ * a_dim1], &c__1);
+ i__2 = *m - i__;
+ sgemv_("No transpose", &i__2, &i__, &c_b4, &x[i__ + 1 +
+ x_dim1], ldx, &a[i__ * a_dim1 + 1], &c__1, &c_b5, &a[
+ i__ + 1 + i__ * a_dim1], &c__1);
+
+/* Generate reflection Q(i) to annihilate A(i+2:m,i) */
+
+ i__2 = *m - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ slarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3, *m)+
+ i__ * a_dim1], &c__1, &tauq[i__]);
+ e[i__] = a[i__ + 1 + i__ * a_dim1];
+ a[i__ + 1 + i__ * a_dim1] = 1.f;
+
+/* Compute Y(i+1:n,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ sgemv_("Transpose", &i__2, &i__3, &c_b5, &a[i__ + 1 + (i__ +
+ 1) * a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1,
+ &c_b16, &y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ sgemv_("Transpose", &i__2, &i__3, &c_b5, &a[i__ + 1 + a_dim1],
+ lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b16, &y[
+ i__ * y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b4, &y[i__ + 1 +
+ y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b5, &y[
+ i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__;
+ sgemv_("Transpose", &i__2, &i__, &c_b5, &x[i__ + 1 + x_dim1],
+ ldx, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b16, &y[
+ i__ * y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ sgemv_("Transpose", &i__, &i__2, &c_b4, &a[(i__ + 1) * a_dim1
+ + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &c_b5, &y[i__
+ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *n - i__;
+ sscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1);
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of SLABRD */
+
+} /* slabrd_ */
diff --git a/contrib/libs/clapack/slacn2.c b/contrib/libs/clapack/slacn2.c
new file mode 100644
index 0000000000..9626eca46a
--- /dev/null
+++ b/contrib/libs/clapack/slacn2.c
@@ -0,0 +1,266 @@
+/* slacn2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b11 = 1.f;
+
+/* Subroutine */ int slacn2_(integer *n, real *v, real *x, integer *isgn,
+ real *est, integer *kase, integer *isave)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1;
+
+ /* Builtin functions */
+ double r_sign(real *, real *);
+ integer i_nint(real *);
+
+ /* Local variables */
+ integer i__;
+ real temp;
+ integer jlast;
+ extern doublereal sasum_(integer *, real *, integer *);
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ real altsgn, estold;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLACN2 estimates the 1-norm of a square, real matrix A. */
+/* Reverse communication is used for evaluating matrix-vector products. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 1. */
+
+/* V (workspace) REAL array, dimension (N) */
+/* On the final return, V = A*W, where EST = norm(V)/norm(W) */
+/* (W is not returned). */
+
+/* X (input/output) REAL array, dimension (N) */
+/* On an intermediate return, X should be overwritten by */
+/* A * X, if KASE=1, */
+/* A' * X, if KASE=2, */
+/* and SLACN2 must be re-called with all the other parameters */
+/* unchanged. */
+
+/* ISGN (workspace) INTEGER array, dimension (N) */
+
+/* EST (input/output) REAL */
+/* On entry with KASE = 1 or 2 and ISAVE(1) = 3, EST should be */
+/* unchanged from the previous call to SLACN2. */
+/* On exit, EST is an estimate (a lower bound) for norm(A). */
+
+/* KASE (input/output) INTEGER */
+/* On the initial call to SLACN2, KASE should be 0. */
+/* On an intermediate return, KASE will be 1 or 2, indicating */
+/* whether X should be overwritten by A * X or A' * X. */
+/* On the final return from SLACN2, KASE will again be 0. */
+
+/* ISAVE (input/output) INTEGER array, dimension (3) */
+/* ISAVE is used to save variables between calls to SLACN2 */
+
+/* Further Details */
+/* ======= ======= */
+
+/* Contributed by Nick Higham, University of Manchester. */
+/* Originally named SONEST, dated March 16, 1988. */
+
+/* Reference: N.J. Higham, "FORTRAN codes for estimating the one-norm of */
+/* a real or complex matrix, with applications to condition estimation", */
+/* ACM Trans. Math. Soft., vol. 14, no. 4, pp. 381-396, December 1988. */
+
+/* This is a thread safe version of SLACON, which uses the array ISAVE */
+/* in place of a SAVE statement, as follows: */
+
+/* SLACON SLACN2 */
+/* JUMP ISAVE(1) */
+/* J ISAVE(2) */
+/* ITER ISAVE(3) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --isave;
+ --isgn;
+ --x;
+ --v;
+
+ /* Function Body */
+ if (*kase == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ x[i__] = 1.f / (real) (*n);
+/* L10: */
+ }
+ *kase = 1;
+ isave[1] = 1;
+ return 0;
+ }
+
+ switch (isave[1]) {
+ case 1: goto L20;
+ case 2: goto L40;
+ case 3: goto L70;
+ case 4: goto L110;
+ case 5: goto L140;
+ }
+
+/* ................ ENTRY (ISAVE( 1 ) = 1) */
+/* FIRST ITERATION. X HAS BEEN OVERWRITTEN BY A*X. */
+
+L20:
+ if (*n == 1) {
+ v[1] = x[1];
+ *est = dabs(v[1]);
+/* ... QUIT */
+ goto L150;
+ }
+ *est = sasum_(n, &x[1], &c__1);
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ x[i__] = r_sign(&c_b11, &x[i__]);
+ isgn[i__] = i_nint(&x[i__]);
+/* L30: */
+ }
+ *kase = 2;
+ isave[1] = 2;
+ return 0;
+
+/* ................ ENTRY (ISAVE( 1 ) = 2) */
+/* FIRST ITERATION. X HAS BEEN OVERWRITTEN BY TRANSPOSE(A)*X. */
+
+L40:
+ isave[2] = isamax_(n, &x[1], &c__1);
+ isave[3] = 2;
+
+/* MAIN LOOP - ITERATIONS 2,3,...,ITMAX. */
+
+L50:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ x[i__] = 0.f;
+/* L60: */
+ }
+ x[isave[2]] = 1.f;
+ *kase = 1;
+ isave[1] = 3;
+ return 0;
+
+/* ................ ENTRY (ISAVE( 1 ) = 3) */
+/* X HAS BEEN OVERWRITTEN BY A*X. */
+
+L70:
+ scopy_(n, &x[1], &c__1, &v[1], &c__1);
+ estold = *est;
+ *est = sasum_(n, &v[1], &c__1);
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__1 = r_sign(&c_b11, &x[i__]);
+ if (i_nint(&r__1) != isgn[i__]) {
+ goto L90;
+ }
+/* L80: */
+ }
+/* REPEATED SIGN VECTOR DETECTED, HENCE ALGORITHM HAS CONVERGED. */
+ goto L120;
+
+L90:
+/* TEST FOR CYCLING. */
+ if (*est <= estold) {
+ goto L120;
+ }
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ x[i__] = r_sign(&c_b11, &x[i__]);
+ isgn[i__] = i_nint(&x[i__]);
+/* L100: */
+ }
+ *kase = 2;
+ isave[1] = 4;
+ return 0;
+
+/* ................ ENTRY (ISAVE( 1 ) = 4) */
+/* X HAS BEEN OVERWRITTEN BY TRANSPOSE(A)*X. */
+
+L110:
+ jlast = isave[2];
+ isave[2] = isamax_(n, &x[1], &c__1);
+ if (x[jlast] != (r__1 = x[isave[2]], dabs(r__1)) && isave[3] < 5) {
+ ++isave[3];
+ goto L50;
+ }
+
+/* ITERATION COMPLETE. FINAL STAGE. */
+
+L120:
+ altsgn = 1.f;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ x[i__] = altsgn * ((real) (i__ - 1) / (real) (*n - 1) + 1.f);
+ altsgn = -altsgn;
+/* L130: */
+ }
+ *kase = 1;
+ isave[1] = 5;
+ return 0;
+
+/* ................ ENTRY (ISAVE( 1 ) = 5) */
+/* X HAS BEEN OVERWRITTEN BY A*X. */
+
+L140:
+ temp = sasum_(n, &x[1], &c__1) / (real) (*n * 3) * 2.f;
+ if (temp > *est) {
+ scopy_(n, &x[1], &c__1, &v[1], &c__1);
+ *est = temp;
+ }
+
+L150:
+ *kase = 0;
+ return 0;
+
+/* End of SLACN2 */
+
+} /* slacn2_ */
diff --git a/contrib/libs/clapack/slacon.c b/contrib/libs/clapack/slacon.c
new file mode 100644
index 0000000000..ece86a390d
--- /dev/null
+++ b/contrib/libs/clapack/slacon.c
@@ -0,0 +1,256 @@
+/* slacon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b11 = 1.f;
+
+/* Subroutine */ int slacon_(integer *n, real *v, real *x, integer *isgn,
+ real *est, integer *kase)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1;
+
+ /* Builtin functions */
+ double r_sign(real *, real *);
+ integer i_nint(real *);
+
+ /* Local variables */
+ static integer i__, j, iter;
+ static real temp;
+ static integer jump, jlast;
+ extern doublereal sasum_(integer *, real *, integer *);
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ static real altsgn, estold;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLACON estimates the 1-norm of a square, real matrix A. */
+/* Reverse communication is used for evaluating matrix-vector products. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 1. */
+
+/* V (workspace) REAL array, dimension (N) */
+/* On the final return, V = A*W, where EST = norm(V)/norm(W) */
+/* (W is not returned). */
+
+/* X (input/output) REAL array, dimension (N) */
+/* On an intermediate return, X should be overwritten by */
+/* A * X, if KASE=1, */
+/* A' * X, if KASE=2, */
+/* and SLACON must be re-called with all the other parameters */
+/* unchanged. */
+
+/* ISGN (workspace) INTEGER array, dimension (N) */
+
+/* EST (input/output) REAL */
+/* On entry with KASE = 1 or 2 and JUMP = 3, EST should be */
+/* unchanged from the previous call to SLACON. */
+/* On exit, EST is an estimate (a lower bound) for norm(A). */
+
+/* KASE (input/output) INTEGER */
+/* On the initial call to SLACON, KASE should be 0. */
+/* On an intermediate return, KASE will be 1 or 2, indicating */
+/* whether X should be overwritten by A * X or A' * X. */
+/* On the final return from SLACON, KASE will again be 0. */
+
+/* Further Details */
+/* ======= ======= */
+
+/* Contributed by Nick Higham, University of Manchester. */
+/* Originally named SONEST, dated March 16, 1988. */
+
+/* Reference: N.J. Higham, "FORTRAN codes for estimating the one-norm of */
+/* a real or complex matrix, with applications to condition estimation", */
+/* ACM Trans. Math. Soft., vol. 14, no. 4, pp. 381-396, December 1988. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Save statement .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --isgn;
+ --x;
+ --v;
+
+ /* Function Body */
+ if (*kase == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ x[i__] = 1.f / (real) (*n);
+/* L10: */
+ }
+ *kase = 1;
+ jump = 1;
+ return 0;
+ }
+
+ switch (jump) {
+ case 1: goto L20;
+ case 2: goto L40;
+ case 3: goto L70;
+ case 4: goto L110;
+ case 5: goto L140;
+ }
+
+/* ................ ENTRY (JUMP = 1) */
+/* FIRST ITERATION. X HAS BEEN OVERWRITTEN BY A*X. */
+
+L20:
+ if (*n == 1) {
+ v[1] = x[1];
+ *est = dabs(v[1]);
+/* ... QUIT */
+ goto L150;
+ }
+ *est = sasum_(n, &x[1], &c__1);
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ x[i__] = r_sign(&c_b11, &x[i__]);
+ isgn[i__] = i_nint(&x[i__]);
+/* L30: */
+ }
+ *kase = 2;
+ jump = 2;
+ return 0;
+
+/* ................ ENTRY (JUMP = 2) */
+/* FIRST ITERATION. X HAS BEEN OVERWRITTEN BY TRANSPOSE(A)*X. */
+
+L40:
+ j = isamax_(n, &x[1], &c__1);
+ iter = 2;
+
+/* MAIN LOOP - ITERATIONS 2,3,...,ITMAX. */
+
+L50:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ x[i__] = 0.f;
+/* L60: */
+ }
+ x[j] = 1.f;
+ *kase = 1;
+ jump = 3;
+ return 0;
+
+/* ................ ENTRY (JUMP = 3) */
+/* X HAS BEEN OVERWRITTEN BY A*X. */
+
+L70:
+ scopy_(n, &x[1], &c__1, &v[1], &c__1);
+ estold = *est;
+ *est = sasum_(n, &v[1], &c__1);
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__1 = r_sign(&c_b11, &x[i__]);
+ if (i_nint(&r__1) != isgn[i__]) {
+ goto L90;
+ }
+/* L80: */
+ }
+/* REPEATED SIGN VECTOR DETECTED, HENCE ALGORITHM HAS CONVERGED. */
+ goto L120;
+
+L90:
+/* TEST FOR CYCLING. */
+ if (*est <= estold) {
+ goto L120;
+ }
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ x[i__] = r_sign(&c_b11, &x[i__]);
+ isgn[i__] = i_nint(&x[i__]);
+/* L100: */
+ }
+ *kase = 2;
+ jump = 4;
+ return 0;
+
+/* ................ ENTRY (JUMP = 4) */
+/* X HAS BEEN OVERWRITTEN BY TRANSPOSE(A)*X. */
+
+L110:
+ jlast = j;
+ j = isamax_(n, &x[1], &c__1);
+ if (x[jlast] != (r__1 = x[j], dabs(r__1)) && iter < 5) {
+ ++iter;
+ goto L50;
+ }
+
+/* ITERATION COMPLETE. FINAL STAGE. */
+
+L120:
+ altsgn = 1.f;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ x[i__] = altsgn * ((real) (i__ - 1) / (real) (*n - 1) + 1.f);
+ altsgn = -altsgn;
+/* L130: */
+ }
+ *kase = 1;
+ jump = 5;
+ return 0;
+
+/* ................ ENTRY (JUMP = 5) */
+/* X HAS BEEN OVERWRITTEN BY A*X. */
+
+L140:
+ temp = sasum_(n, &x[1], &c__1) / (real) (*n * 3) * 2.f;
+ if (temp > *est) {
+ scopy_(n, &x[1], &c__1, &v[1], &c__1);
+ *est = temp;
+ }
+
+L150:
+ *kase = 0;
+ return 0;
+
+/* End of SLACON */
+
+} /* slacon_ */
diff --git a/contrib/libs/clapack/slacpy.c b/contrib/libs/clapack/slacpy.c
new file mode 100644
index 0000000000..5dcb2fce36
--- /dev/null
+++ b/contrib/libs/clapack/slacpy.c
@@ -0,0 +1,125 @@
+/* slacpy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slacpy_(char *uplo, integer *m, integer *n, real *a,
+ integer *lda, real *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLACPY copies all or part of a two-dimensional matrix A to another */
+/* matrix B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies the part of the matrix A to be copied to B. */
+/* = 'U': Upper triangular part */
+/* = 'L': Lower triangular part */
+/* Otherwise: All of the matrix A */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The m by n matrix A. If UPLO = 'U', only the upper triangle */
+/* or trapezoid is accessed; if UPLO = 'L', only the lower */
+/* triangle or trapezoid is accessed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (output) REAL array, dimension (LDB,N) */
+/* On exit, B = A in the locations specified by UPLO. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(j,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = a[i__ + j * a_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if (lsame_(uplo, "L")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = a[i__ + j * a_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = a[i__ + j * a_dim1];
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+ return 0;
+
+/* End of SLACPY */
+
+} /* slacpy_ */
diff --git a/contrib/libs/clapack/sladiv.c b/contrib/libs/clapack/sladiv.c
new file mode 100644
index 0000000000..af4e0c5bfd
--- /dev/null
+++ b/contrib/libs/clapack/sladiv.c
@@ -0,0 +1,78 @@
+/* sladiv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sladiv_(real *a, real *b, real *c__, real *d__, real *p,
+ real *q)
+{
+ real e, f;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLADIV performs complex division in real arithmetic */
+
+/* a + i*b */
+/* p + i*q = --------- */
+/* c + i*d */
+
+/* The algorithm is due to Robert L. Smith and can be found */
+/* in D. Knuth, The art of Computer Programming, Vol.2, p.195 */
+
+/* Arguments */
+/* ========= */
+
+/* A (input) REAL */
+/* B (input) REAL */
+/* C (input) REAL */
+/* D (input) REAL */
+/* The scalars a, b, c, and d in the above expression. */
+
+/* P (output) REAL */
+/* Q (output) REAL */
+/* The scalars p and q in the above expression. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ if (dabs(*d__) < dabs(*c__)) {
+ e = *d__ / *c__;
+ f = *c__ + *d__ * e;
+ *p = (*a + *b * e) / f;
+ *q = (*b - *a * e) / f;
+ } else {
+ e = *c__ / *d__;
+ f = *d__ + *c__ * e;
+ *p = (*b + *a * e) / f;
+ *q = (-(*a) + *b * e) / f;
+ }
+
+ return 0;
+
+/* End of SLADIV */
+
+} /* sladiv_ */
diff --git a/contrib/libs/clapack/slae2.c b/contrib/libs/clapack/slae2.c
new file mode 100644
index 0000000000..c82234cbd4
--- /dev/null
+++ b/contrib/libs/clapack/slae2.c
@@ -0,0 +1,141 @@
+/* slae2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slae2_(real *a, real *b, real *c__, real *rt1, real *rt2)
+{
+ /* System generated locals */
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real ab, df, tb, sm, rt, adf, acmn, acmx;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAE2 computes the eigenvalues of a 2-by-2 symmetric matrix */
+/* [ A B ] */
+/* [ B C ]. */
+/* On return, RT1 is the eigenvalue of larger absolute value, and RT2 */
+/* is the eigenvalue of smaller absolute value. */
+
+/* Arguments */
+/* ========= */
+
+/* A (input) REAL */
+/* The (1,1) element of the 2-by-2 matrix. */
+
+/* B (input) REAL */
+/* The (1,2) and (2,1) elements of the 2-by-2 matrix. */
+
+/* C (input) REAL */
+/* The (2,2) element of the 2-by-2 matrix. */
+
+/* RT1 (output) REAL */
+/* The eigenvalue of larger absolute value. */
+
+/* RT2 (output) REAL */
+/* The eigenvalue of smaller absolute value. */
+
+/* Further Details */
+/* =============== */
+
+/* RT1 is accurate to a few ulps barring over/underflow. */
+
+/* RT2 may be inaccurate if there is massive cancellation in the */
+/* determinant A*C-B*B; higher precision or correctly rounded or */
+/* correctly truncated arithmetic would be needed to compute RT2 */
+/* accurately in all cases. */
+
+/* Overflow is possible only if RT1 is within a factor of 5 of overflow. */
+/* Underflow is harmless if the input data is 0 or exceeds */
+/* underflow_threshold / macheps. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Compute the eigenvalues */
+
+ sm = *a + *c__;
+ df = *a - *c__;
+ adf = dabs(df);
+ tb = *b + *b;
+ ab = dabs(tb);
+ if (dabs(*a) > dabs(*c__)) {
+ acmx = *a;
+ acmn = *c__;
+ } else {
+ acmx = *c__;
+ acmn = *a;
+ }
+ if (adf > ab) {
+/* Computing 2nd power */
+ r__1 = ab / adf;
+ rt = adf * sqrt(r__1 * r__1 + 1.f);
+ } else if (adf < ab) {
+/* Computing 2nd power */
+ r__1 = adf / ab;
+ rt = ab * sqrt(r__1 * r__1 + 1.f);
+ } else {
+
+/* Includes case AB=ADF=0 */
+
+ rt = ab * sqrt(2.f);
+ }
+ if (sm < 0.f) {
+ *rt1 = (sm - rt) * .5f;
+
+/* Order of execution important. */
+/* To get fully accurate smaller eigenvalue, */
+/* next line needs to be executed in higher precision. */
+
+ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
+ } else if (sm > 0.f) {
+ *rt1 = (sm + rt) * .5f;
+
+/* Order of execution important. */
+/* To get fully accurate smaller eigenvalue, */
+/* next line needs to be executed in higher precision. */
+
+ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
+ } else {
+
+/* Includes case RT1 = RT2 = 0 */
+
+ *rt1 = rt * .5f;
+ *rt2 = rt * -.5f;
+ }
+ return 0;
+
+/* End of SLAE2 */
+
+} /* slae2_ */
diff --git a/contrib/libs/clapack/slaebz.c b/contrib/libs/clapack/slaebz.c
new file mode 100644
index 0000000000..e7aaac073b
--- /dev/null
+++ b/contrib/libs/clapack/slaebz.c
@@ -0,0 +1,639 @@
+/* slaebz.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slaebz_(integer *ijob, integer *nitmax, integer *n,
+ integer *mmax, integer *minp, integer *nbmin, real *abstol, real *
+ reltol, real *pivmin, real *d__, real *e, real *e2, integer *nval,
+ real *ab, real *c__, integer *mout, integer *nab, real *work, integer
+ *iwork, integer *info)
+{
+ /* System generated locals */
+ integer nab_dim1, nab_offset, ab_dim1, ab_offset, i__1, i__2, i__3, i__4,
+ i__5, i__6;
+ real r__1, r__2, r__3, r__4;
+
+ /* Local variables */
+ integer j, kf, ji, kl, jp, jit;
+ real tmp1, tmp2;
+ integer itmp1, itmp2, kfnew, klnew;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAEBZ contains the iteration loops which compute and use the */
+/* function N(w), which is the count of eigenvalues of a symmetric */
+/* tridiagonal matrix T less than or equal to its argument w. It */
+/* performs a choice of two types of loops: */
+
+/* IJOB=1, followed by */
+/* IJOB=2: It takes as input a list of intervals and returns a list of */
+/* sufficiently small intervals whose union contains the same */
+/* eigenvalues as the union of the original intervals. */
+/* The input intervals are (AB(j,1),AB(j,2)], j=1,...,MINP. */
+/* The output interval (AB(j,1),AB(j,2)] will contain */
+/* eigenvalues NAB(j,1)+1,...,NAB(j,2), where 1 <= j <= MOUT. */
+
+/* IJOB=3: It performs a binary search in each input interval */
+/* (AB(j,1),AB(j,2)] for a point w(j) such that */
+/* N(w(j))=NVAL(j), and uses C(j) as the starting point of */
+/* the search. If such a w(j) is found, then on output */
+/* AB(j,1)=AB(j,2)=w. If no such w(j) is found, then on output */
+/* (AB(j,1),AB(j,2)] will be a small interval containing the */
+/* point where N(w) jumps through NVAL(j), unless that point */
+/* lies outside the initial interval. */
+
+/* Note that the intervals are in all cases half-open intervals, */
+/* i.e., of the form (a,b] , which includes b but not a . */
+
+/* To avoid underflow, the matrix should be scaled so that its largest */
+/* element is no greater than overflow**(1/2) * underflow**(1/4) */
+/* in absolute value. To assure the most accurate computation */
+/* of small eigenvalues, the matrix should be scaled to be */
+/* not much smaller than that, either. */
+
+/* See W. Kahan "Accurate Eigenvalues of a Symmetric Tridiagonal */
+/* Matrix", Report CS41, Computer Science Dept., Stanford */
+/* University, July 21, 1966 */
+
+/* Note: the arguments are, in general, *not* checked for unreasonable */
+/* values. */
+
+/* Arguments */
+/* ========= */
+
+/* IJOB (input) INTEGER */
+/* Specifies what is to be done: */
+/* = 1: Compute NAB for the initial intervals. */
+/* = 2: Perform bisection iteration to find eigenvalues of T. */
+/* = 3: Perform bisection iteration to invert N(w), i.e., */
+/* to find a point which has a specified number of */
+/* eigenvalues of T to its left. */
+/* Other values will cause SLAEBZ to return with INFO=-1. */
+
+/* NITMAX (input) INTEGER */
+/* The maximum number of "levels" of bisection to be */
+/* performed, i.e., an interval of width W will not be made */
+/* smaller than 2^(-NITMAX) * W. If not all intervals */
+/* have converged after NITMAX iterations, then INFO is set */
+/* to the number of non-converged intervals. */
+
+/* N (input) INTEGER */
+/* The dimension n of the tridiagonal matrix T. It must be at */
+/* least 1. */
+
+/* MMAX (input) INTEGER */
+/* The maximum number of intervals. If more than MMAX intervals */
+/* are generated, then SLAEBZ will quit with INFO=MMAX+1. */
+
+/* MINP (input) INTEGER */
+/* The initial number of intervals. It may not be greater than */
+/* MMAX. */
+
+/* NBMIN (input) INTEGER */
+/* The smallest number of intervals that should be processed */
+/* using a vector loop. If zero, then only the scalar loop */
+/* will be used. */
+
+/* ABSTOL (input) REAL */
+/* The minimum (absolute) width of an interval. When an */
+/* interval is narrower than ABSTOL, or than RELTOL times the */
+/* larger (in magnitude) endpoint, then it is considered to be */
+/* sufficiently small, i.e., converged. This must be at least */
+/* zero. */
+
+/* RELTOL (input) REAL */
+/* The minimum relative width of an interval. When an interval */
+/* is narrower than ABSTOL, or than RELTOL times the larger (in */
+/* magnitude) endpoint, then it is considered to be */
+/* sufficiently small, i.e., converged. Note: this should */
+/* always be at least radix*machine epsilon. */
+
+/* PIVMIN (input) REAL */
+/* The minimum absolute value of a "pivot" in the Sturm */
+/* sequence loop. This *must* be at least max |e(j)**2| * */
+/* safe_min and at least safe_min, where safe_min is at least */
+/* the smallest number that can divide one without overflow. */
+
+/* D (input) REAL array, dimension (N) */
+/* The diagonal elements of the tridiagonal matrix T. */
+
+/* E (input) REAL array, dimension (N) */
+/* The offdiagonal elements of the tridiagonal matrix T in */
+/* positions 1 through N-1. E(N) is arbitrary. */
+
+/* E2 (input) REAL array, dimension (N) */
+/* The squares of the offdiagonal elements of the tridiagonal */
+/* matrix T. E2(N) is ignored. */
+
+/* NVAL (input/output) INTEGER array, dimension (MINP) */
+/* If IJOB=1 or 2, not referenced. */
+/* If IJOB=3, the desired values of N(w). The elements of NVAL */
+/* will be reordered to correspond with the intervals in AB. */
+/* Thus, NVAL(j) on output will not, in general be the same as */
+/* NVAL(j) on input, but it will correspond with the interval */
+/* (AB(j,1),AB(j,2)] on output. */
+
+/* AB (input/output) REAL array, dimension (MMAX,2) */
+/* The endpoints of the intervals. AB(j,1) is a(j), the left */
+/* endpoint of the j-th interval, and AB(j,2) is b(j), the */
+/* right endpoint of the j-th interval. The input intervals */
+/* will, in general, be modified, split, and reordered by the */
+/* calculation. */
+
+/* C (input/output) REAL array, dimension (MMAX) */
+/* If IJOB=1, ignored. */
+/* If IJOB=2, workspace. */
+/* If IJOB=3, then on input C(j) should be initialized to the */
+/* first search point in the binary search. */
+
+/* MOUT (output) INTEGER */
+/* If IJOB=1, the number of eigenvalues in the intervals. */
+/* If IJOB=2 or 3, the number of intervals output. */
+/* If IJOB=3, MOUT will equal MINP. */
+
+/* NAB (input/output) INTEGER array, dimension (MMAX,2) */
+/* If IJOB=1, then on output NAB(i,j) will be set to N(AB(i,j)). */
+/* If IJOB=2, then on input, NAB(i,j) should be set. It must */
+/* satisfy the condition: */
+/* N(AB(i,1)) <= NAB(i,1) <= NAB(i,2) <= N(AB(i,2)), */
+/* which means that in interval i only eigenvalues */
+/* NAB(i,1)+1,...,NAB(i,2) will be considered. Usually, */
+/* NAB(i,j)=N(AB(i,j)), from a previous call to SLAEBZ with */
+/* IJOB=1. */
+/* On output, NAB(i,j) will contain */
+/* max(na(k),min(nb(k),N(AB(i,j)))), where k is the index of */
+/* the input interval that the output interval */
+/* (AB(j,1),AB(j,2)] came from, and na(k) and nb(k) are the */
+/* the input values of NAB(k,1) and NAB(k,2). */
+/* If IJOB=3, then on output, NAB(i,j) contains N(AB(i,j)), */
+/* unless N(w) > NVAL(i) for all search points w , in which */
+/* case NAB(i,1) will not be modified, i.e., the output */
+/* value will be the same as the input value (modulo */
+/* reorderings -- see NVAL and AB), or unless N(w) < NVAL(i) */
+/* for all search points w , in which case NAB(i,2) will */
+/* not be modified. Normally, NAB should be set to some */
+/* distinctive value(s) before SLAEBZ is called. */
+
+/* WORK (workspace) REAL array, dimension (MMAX) */
+/* Workspace. */
+
+/* IWORK (workspace) INTEGER array, dimension (MMAX) */
+/* Workspace. */
+
+/* INFO (output) INTEGER */
+/* = 0: All intervals converged. */
+/* = 1--MMAX: The last INFO intervals did not converge. */
+/* = MMAX+1: More than MMAX intervals were generated. */
+
+/* Further Details */
+/* =============== */
+
+/* This routine is intended to be called only by other LAPACK */
+/* routines, thus the interface is less user-friendly. It is intended */
+/* for two purposes: */
+
+/* (a) finding eigenvalues. In this case, SLAEBZ should have one or */
+/* more initial intervals set up in AB, and SLAEBZ should be called */
+/* with IJOB=1. This sets up NAB, and also counts the eigenvalues. */
+/* Intervals with no eigenvalues would usually be thrown out at */
+/* this point. Also, if not all the eigenvalues in an interval i */
+/* are desired, NAB(i,1) can be increased or NAB(i,2) decreased. */
+/* For example, set NAB(i,1)=NAB(i,2)-1 to get the largest */
+/* eigenvalue. SLAEBZ is then called with IJOB=2 and MMAX */
+/* no smaller than the value of MOUT returned by the call with */
+/* IJOB=1. After this (IJOB=2) call, eigenvalues NAB(i,1)+1 */
+/* through NAB(i,2) are approximately AB(i,1) (or AB(i,2)) to the */
+/* tolerance specified by ABSTOL and RELTOL. */
+
+/* (b) finding an interval (a',b'] containing eigenvalues w(f),...,w(l). */
+/* In this case, start with a Gershgorin interval (a,b). Set up */
+/* AB to contain 2 search intervals, both initially (a,b). One */
+/* NVAL element should contain f-1 and the other should contain l */
+/* , while C should contain a and b, resp. NAB(i,1) should be -1 */
+/* and NAB(i,2) should be N+1, to flag an error if the desired */
+/* interval does not lie in (a,b). SLAEBZ is then called with */
+/* IJOB=3. On exit, if w(f-1) < w(f), then one of the intervals -- */
+/* j -- will have AB(j,1)=AB(j,2) and NAB(j,1)=NAB(j,2)=f-1, while */
+/* if, to the specified tolerance, w(f-k)=...=w(f+r), k > 0 and r */
+/* >= 0, then the interval will have N(AB(j,1))=NAB(j,1)=f-k and */
+/* N(AB(j,2))=NAB(j,2)=f+r. The cases w(l) < w(l+1) and */
+/* w(l-r)=...=w(l+k) are handled similarly. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Check for Errors */
+
+ /* Parameter adjustments */
+ nab_dim1 = *mmax;
+ nab_offset = 1 + nab_dim1;
+ nab -= nab_offset;
+ ab_dim1 = *mmax;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --d__;
+ --e;
+ --e2;
+ --nval;
+ --c__;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ if (*ijob < 1 || *ijob > 3) {
+ *info = -1;
+ return 0;
+ }
+
+/* Initialize NAB */
+
+ if (*ijob == 1) {
+
+/* Compute the number of eigenvalues in the initial intervals. */
+
+ *mout = 0;
+/* DIR$ NOVECTOR */
+ i__1 = *minp;
+ for (ji = 1; ji <= i__1; ++ji) {
+ for (jp = 1; jp <= 2; ++jp) {
+ tmp1 = d__[1] - ab[ji + jp * ab_dim1];
+ if (dabs(tmp1) < *pivmin) {
+ tmp1 = -(*pivmin);
+ }
+ nab[ji + jp * nab_dim1] = 0;
+ if (tmp1 <= 0.f) {
+ nab[ji + jp * nab_dim1] = 1;
+ }
+
+ i__2 = *n;
+ for (j = 2; j <= i__2; ++j) {
+ tmp1 = d__[j] - e2[j - 1] / tmp1 - ab[ji + jp * ab_dim1];
+ if (dabs(tmp1) < *pivmin) {
+ tmp1 = -(*pivmin);
+ }
+ if (tmp1 <= 0.f) {
+ ++nab[ji + jp * nab_dim1];
+ }
+/* L10: */
+ }
+/* L20: */
+ }
+ *mout = *mout + nab[ji + (nab_dim1 << 1)] - nab[ji + nab_dim1];
+/* L30: */
+ }
+ return 0;
+ }
+
+/* Initialize for loop */
+
+/* KF and KL have the following meaning: */
+/* Intervals 1,...,KF-1 have converged. */
+/* Intervals KF,...,KL still need to be refined. */
+
+ kf = 1;
+ kl = *minp;
+
+/* If IJOB=2, initialize C. */
+/* If IJOB=3, use the user-supplied starting point. */
+
+ if (*ijob == 2) {
+ i__1 = *minp;
+ for (ji = 1; ji <= i__1; ++ji) {
+ c__[ji] = (ab[ji + ab_dim1] + ab[ji + (ab_dim1 << 1)]) * .5f;
+/* L40: */
+ }
+ }
+
+/* Iteration loop */
+
+ i__1 = *nitmax;
+ for (jit = 1; jit <= i__1; ++jit) {
+
+/* Loop over intervals */
+
+ if (kl - kf + 1 >= *nbmin && *nbmin > 0) {
+
+/* Begin of Parallel Version of the loop */
+
+ i__2 = kl;
+ for (ji = kf; ji <= i__2; ++ji) {
+
+/* Compute N(c), the number of eigenvalues less than c */
+
+ work[ji] = d__[1] - c__[ji];
+ iwork[ji] = 0;
+ if (work[ji] <= *pivmin) {
+ iwork[ji] = 1;
+/* Computing MIN */
+ r__1 = work[ji], r__2 = -(*pivmin);
+ work[ji] = dmin(r__1,r__2);
+ }
+
+ i__3 = *n;
+ for (j = 2; j <= i__3; ++j) {
+ work[ji] = d__[j] - e2[j - 1] / work[ji] - c__[ji];
+ if (work[ji] <= *pivmin) {
+ ++iwork[ji];
+/* Computing MIN */
+ r__1 = work[ji], r__2 = -(*pivmin);
+ work[ji] = dmin(r__1,r__2);
+ }
+/* L50: */
+ }
+/* L60: */
+ }
+
+ if (*ijob <= 2) {
+
+/* IJOB=2: Choose all intervals containing eigenvalues. */
+
+ klnew = kl;
+ i__2 = kl;
+ for (ji = kf; ji <= i__2; ++ji) {
+
+/* Insure that N(w) is monotone */
+
+/* Computing MIN */
+/* Computing MAX */
+ i__5 = nab[ji + nab_dim1], i__6 = iwork[ji];
+ i__3 = nab[ji + (nab_dim1 << 1)], i__4 = max(i__5,i__6);
+ iwork[ji] = min(i__3,i__4);
+
+/* Update the Queue -- add intervals if both halves */
+/* contain eigenvalues. */
+
+ if (iwork[ji] == nab[ji + (nab_dim1 << 1)]) {
+
+/* No eigenvalue in the upper interval: */
+/* just use the lower interval. */
+
+ ab[ji + (ab_dim1 << 1)] = c__[ji];
+
+ } else if (iwork[ji] == nab[ji + nab_dim1]) {
+
+/* No eigenvalue in the lower interval: */
+/* just use the upper interval. */
+
+ ab[ji + ab_dim1] = c__[ji];
+ } else {
+ ++klnew;
+ if (klnew <= *mmax) {
+
+/* Eigenvalue in both intervals -- add upper to */
+/* queue. */
+
+ ab[klnew + (ab_dim1 << 1)] = ab[ji + (ab_dim1 <<
+ 1)];
+ nab[klnew + (nab_dim1 << 1)] = nab[ji + (nab_dim1
+ << 1)];
+ ab[klnew + ab_dim1] = c__[ji];
+ nab[klnew + nab_dim1] = iwork[ji];
+ ab[ji + (ab_dim1 << 1)] = c__[ji];
+ nab[ji + (nab_dim1 << 1)] = iwork[ji];
+ } else {
+ *info = *mmax + 1;
+ }
+ }
+/* L70: */
+ }
+ if (*info != 0) {
+ return 0;
+ }
+ kl = klnew;
+ } else {
+
+/* IJOB=3: Binary search. Keep only the interval containing */
+/* w s.t. N(w) = NVAL */
+
+ i__2 = kl;
+ for (ji = kf; ji <= i__2; ++ji) {
+ if (iwork[ji] <= nval[ji]) {
+ ab[ji + ab_dim1] = c__[ji];
+ nab[ji + nab_dim1] = iwork[ji];
+ }
+ if (iwork[ji] >= nval[ji]) {
+ ab[ji + (ab_dim1 << 1)] = c__[ji];
+ nab[ji + (nab_dim1 << 1)] = iwork[ji];
+ }
+/* L80: */
+ }
+ }
+
+ } else {
+
+/* End of Parallel Version of the loop */
+
+/* Begin of Serial Version of the loop */
+
+ klnew = kl;
+ i__2 = kl;
+ for (ji = kf; ji <= i__2; ++ji) {
+
+/* Compute N(w), the number of eigenvalues less than w */
+
+ tmp1 = c__[ji];
+ tmp2 = d__[1] - tmp1;
+ itmp1 = 0;
+ if (tmp2 <= *pivmin) {
+ itmp1 = 1;
+/* Computing MIN */
+ r__1 = tmp2, r__2 = -(*pivmin);
+ tmp2 = dmin(r__1,r__2);
+ }
+
+/* A series of compiler directives to defeat vectorization */
+/* for the next loop */
+
+/* $PL$ CMCHAR=' ' */
+/* DIR$ NEXTSCALAR */
+/* $DIR SCALAR */
+/* DIR$ NEXT SCALAR */
+/* VD$L NOVECTOR */
+/* DEC$ NOVECTOR */
+/* VD$ NOVECTOR */
+/* VDIR NOVECTOR */
+/* VOCL LOOP,SCALAR */
+/* IBM PREFER SCALAR */
+/* $PL$ CMCHAR='*' */
+
+ i__3 = *n;
+ for (j = 2; j <= i__3; ++j) {
+ tmp2 = d__[j] - e2[j - 1] / tmp2 - tmp1;
+ if (tmp2 <= *pivmin) {
+ ++itmp1;
+/* Computing MIN */
+ r__1 = tmp2, r__2 = -(*pivmin);
+ tmp2 = dmin(r__1,r__2);
+ }
+/* L90: */
+ }
+
+ if (*ijob <= 2) {
+
+/* IJOB=2: Choose all intervals containing eigenvalues. */
+
+/* Insure that N(w) is monotone */
+
+/* Computing MIN */
+/* Computing MAX */
+ i__5 = nab[ji + nab_dim1];
+ i__3 = nab[ji + (nab_dim1 << 1)], i__4 = max(i__5,itmp1);
+ itmp1 = min(i__3,i__4);
+
+/* Update the Queue -- add intervals if both halves */
+/* contain eigenvalues. */
+
+ if (itmp1 == nab[ji + (nab_dim1 << 1)]) {
+
+/* No eigenvalue in the upper interval: */
+/* just use the lower interval. */
+
+ ab[ji + (ab_dim1 << 1)] = tmp1;
+
+ } else if (itmp1 == nab[ji + nab_dim1]) {
+
+/* No eigenvalue in the lower interval: */
+/* just use the upper interval. */
+
+ ab[ji + ab_dim1] = tmp1;
+ } else if (klnew < *mmax) {
+
+/* Eigenvalue in both intervals -- add upper to queue. */
+
+ ++klnew;
+ ab[klnew + (ab_dim1 << 1)] = ab[ji + (ab_dim1 << 1)];
+ nab[klnew + (nab_dim1 << 1)] = nab[ji + (nab_dim1 <<
+ 1)];
+ ab[klnew + ab_dim1] = tmp1;
+ nab[klnew + nab_dim1] = itmp1;
+ ab[ji + (ab_dim1 << 1)] = tmp1;
+ nab[ji + (nab_dim1 << 1)] = itmp1;
+ } else {
+ *info = *mmax + 1;
+ return 0;
+ }
+ } else {
+
+/* IJOB=3: Binary search. Keep only the interval */
+/* containing w s.t. N(w) = NVAL */
+
+ if (itmp1 <= nval[ji]) {
+ ab[ji + ab_dim1] = tmp1;
+ nab[ji + nab_dim1] = itmp1;
+ }
+ if (itmp1 >= nval[ji]) {
+ ab[ji + (ab_dim1 << 1)] = tmp1;
+ nab[ji + (nab_dim1 << 1)] = itmp1;
+ }
+ }
+/* L100: */
+ }
+ kl = klnew;
+
+/* End of Serial Version of the loop */
+
+ }
+
+/* Check for convergence */
+
+ kfnew = kf;
+ i__2 = kl;
+ for (ji = kf; ji <= i__2; ++ji) {
+ tmp1 = (r__1 = ab[ji + (ab_dim1 << 1)] - ab[ji + ab_dim1], dabs(
+ r__1));
+/* Computing MAX */
+ r__3 = (r__1 = ab[ji + (ab_dim1 << 1)], dabs(r__1)), r__4 = (r__2
+ = ab[ji + ab_dim1], dabs(r__2));
+ tmp2 = dmax(r__3,r__4);
+/* Computing MAX */
+ r__1 = max(*abstol,*pivmin), r__2 = *reltol * tmp2;
+ if (tmp1 < dmax(r__1,r__2) || nab[ji + nab_dim1] >= nab[ji + (
+ nab_dim1 << 1)]) {
+
+/* Converged -- Swap with position KFNEW, */
+/* then increment KFNEW */
+
+ if (ji > kfnew) {
+ tmp1 = ab[ji + ab_dim1];
+ tmp2 = ab[ji + (ab_dim1 << 1)];
+ itmp1 = nab[ji + nab_dim1];
+ itmp2 = nab[ji + (nab_dim1 << 1)];
+ ab[ji + ab_dim1] = ab[kfnew + ab_dim1];
+ ab[ji + (ab_dim1 << 1)] = ab[kfnew + (ab_dim1 << 1)];
+ nab[ji + nab_dim1] = nab[kfnew + nab_dim1];
+ nab[ji + (nab_dim1 << 1)] = nab[kfnew + (nab_dim1 << 1)];
+ ab[kfnew + ab_dim1] = tmp1;
+ ab[kfnew + (ab_dim1 << 1)] = tmp2;
+ nab[kfnew + nab_dim1] = itmp1;
+ nab[kfnew + (nab_dim1 << 1)] = itmp2;
+ if (*ijob == 3) {
+ itmp1 = nval[ji];
+ nval[ji] = nval[kfnew];
+ nval[kfnew] = itmp1;
+ }
+ }
+ ++kfnew;
+ }
+/* L110: */
+ }
+ kf = kfnew;
+
+/* Choose Midpoints */
+
+ i__2 = kl;
+ for (ji = kf; ji <= i__2; ++ji) {
+ c__[ji] = (ab[ji + ab_dim1] + ab[ji + (ab_dim1 << 1)]) * .5f;
+/* L120: */
+ }
+
+/* If no more intervals to refine, quit. */
+
+ if (kf > kl) {
+ goto L140;
+ }
+/* L130: */
+ }
+
+/* Converged */
+
+L140:
+/* Computing MAX */
+ i__1 = kl + 1 - kf;
+ *info = max(i__1,0);
+ *mout = kl;
+
+ return 0;
+
+/* End of SLAEBZ */
+
+} /* slaebz_ */
diff --git a/contrib/libs/clapack/slaed0.c b/contrib/libs/clapack/slaed0.c
new file mode 100644
index 0000000000..589956ab7c
--- /dev/null
+++ b/contrib/libs/clapack/slaed0.c
@@ -0,0 +1,435 @@
+/* slaed0.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__9 = 9;
+static integer c__0 = 0;
+static integer c__2 = 2;
+static real c_b23 = 1.f;
+static real c_b24 = 0.f;
+static integer c__1 = 1;
+
+/* Subroutine */ int slaed0_(integer *icompq, integer *qsiz, integer *n, real
+ *d__, real *e, real *q, integer *ldq, real *qstore, integer *ldqs,
+ real *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, qstore_dim1, qstore_offset, i__1, i__2;
+ real r__1;
+
+ /* Builtin functions */
+ double log(doublereal);
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ integer i__, j, k, iq, lgn, msd2, smm1, spm1, spm2;
+ real temp;
+ integer curr;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ integer iperm, indxq, iwrem;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ integer iqptr, tlvls;
+ extern /* Subroutine */ int slaed1_(integer *, real *, real *, integer *,
+ integer *, real *, integer *, real *, integer *, integer *),
+ slaed7_(integer *, integer *, integer *, integer *, integer *,
+ integer *, real *, real *, integer *, integer *, real *, integer *
+, real *, integer *, integer *, integer *, integer *, integer *,
+ real *, real *, integer *, integer *);
+ integer igivcl;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer igivnm, submat;
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *);
+ integer curprb, subpbs, igivpt, curlvl, matsiz, iprmpt, smlsiz;
+ extern /* Subroutine */ int ssteqr_(char *, integer *, real *, real *,
+ real *, integer *, real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAED0 computes all eigenvalues and corresponding eigenvectors of a */
+/* symmetric tridiagonal matrix using the divide and conquer method. */
+
+/* Arguments */
+/* ========= */
+
+/* ICOMPQ (input) INTEGER */
+/* = 0: Compute eigenvalues only. */
+/* = 1: Compute eigenvectors of original dense symmetric matrix */
+/* also. On entry, Q contains the orthogonal matrix used */
+/* to reduce the original matrix to tridiagonal form. */
+/* = 2: Compute eigenvalues and eigenvectors of tridiagonal */
+/* matrix. */
+
+/* QSIZ (input) INTEGER */
+/* The dimension of the orthogonal matrix used to reduce */
+/* the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1. */
+
+/* N (input) INTEGER */
+/* The dimension of the symmetric tridiagonal matrix. N >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the main diagonal of the tridiagonal matrix. */
+/* On exit, its eigenvalues. */
+
+/* E (input) REAL array, dimension (N-1) */
+/* The off-diagonal elements of the tridiagonal matrix. */
+/* On exit, E has been destroyed. */
+
+/* Q (input/output) REAL array, dimension (LDQ, N) */
+/* On entry, Q must contain an N-by-N orthogonal matrix. */
+/* If ICOMPQ = 0 Q is not referenced. */
+/* If ICOMPQ = 1 On entry, Q is a subset of the columns of the */
+/* orthogonal matrix used to reduce the full */
+/* matrix to tridiagonal form corresponding to */
+/* the subset of the full matrix which is being */
+/* decomposed at this time. */
+/* If ICOMPQ = 2 On entry, Q will be the identity matrix. */
+/* On exit, Q contains the eigenvectors of the */
+/* tridiagonal matrix. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. If eigenvectors are */
+/* desired, then LDQ >= max(1,N). In any case, LDQ >= 1. */
+
+/* QSTORE (workspace) REAL array, dimension (LDQS, N) */
+/* Referenced only when ICOMPQ = 1. Used to store parts of */
+/* the eigenvector matrix when the updating matrix multiplies */
+/* take place. */
+
+/* LDQS (input) INTEGER */
+/* The leading dimension of the array QSTORE. If ICOMPQ = 1, */
+/* then LDQS >= max(1,N). In any case, LDQS >= 1. */
+
+/* WORK (workspace) REAL array, */
+/* If ICOMPQ = 0 or 1, the dimension of WORK must be at least */
+/* 1 + 3*N + 2*N*lg N + 2*N**2 */
+/* ( lg( N ) = smallest integer k */
+/* such that 2^k >= N ) */
+/* If ICOMPQ = 2, the dimension of WORK must be at least */
+/* 4*N + N**2. */
+
+/* IWORK (workspace) INTEGER array, */
+/* If ICOMPQ = 0 or 1, the dimension of IWORK must be at least */
+/* 6 + 6*N + 5*N*lg N. */
+/* ( lg( N ) = smallest integer k */
+/* such that 2^k >= N ) */
+/* If ICOMPQ = 2, the dimension of IWORK must be at least */
+/* 3 + 5*N. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: The algorithm failed to compute an eigenvalue while */
+/* working on the submatrix lying in rows and columns */
+/* INFO/(N+1) through mod(INFO,N+1). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Jeff Rutter, Computer Science Division, University of California */
+/* at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ qstore_dim1 = *ldqs;
+ qstore_offset = 1 + qstore_dim1;
+ qstore -= qstore_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 2) {
+ *info = -1;
+ } else if (*icompq == 1 && *qsiz < max(0,*n)) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ldq < max(1,*n)) {
+ *info = -7;
+ } else if (*ldqs < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLAED0", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ smlsiz = ilaenv_(&c__9, "SLAED0", " ", &c__0, &c__0, &c__0, &c__0);
+
+/* Determine the size and placement of the submatrices, and save in */
+/* the leading elements of IWORK. */
+
+ iwork[1] = *n;
+ subpbs = 1;
+ tlvls = 0;
+L10:
+ if (iwork[subpbs] > smlsiz) {
+ for (j = subpbs; j >= 1; --j) {
+ iwork[j * 2] = (iwork[j] + 1) / 2;
+ iwork[(j << 1) - 1] = iwork[j] / 2;
+/* L20: */
+ }
+ ++tlvls;
+ subpbs <<= 1;
+ goto L10;
+ }
+ i__1 = subpbs;
+ for (j = 2; j <= i__1; ++j) {
+ iwork[j] += iwork[j - 1];
+/* L30: */
+ }
+
+/* Divide the matrix into SUBPBS submatrices of size at most SMLSIZ+1 */
+/* using rank-1 modifications (cuts). */
+
+ spm1 = subpbs - 1;
+ i__1 = spm1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ submat = iwork[i__] + 1;
+ smm1 = submat - 1;
+ d__[smm1] -= (r__1 = e[smm1], dabs(r__1));
+ d__[submat] -= (r__1 = e[smm1], dabs(r__1));
+/* L40: */
+ }
+
+ indxq = (*n << 2) + 3;
+ if (*icompq != 2) {
+
+/* Set up workspaces for eigenvalues only/accumulate new vectors */
+/* routine */
+
+ temp = log((real) (*n)) / log(2.f);
+ lgn = (integer) temp;
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ iprmpt = indxq + *n + 1;
+ iperm = iprmpt + *n * lgn;
+ iqptr = iperm + *n * lgn;
+ igivpt = iqptr + *n + 2;
+ igivcl = igivpt + *n * lgn;
+
+ igivnm = 1;
+ iq = igivnm + (*n << 1) * lgn;
+/* Computing 2nd power */
+ i__1 = *n;
+ iwrem = iq + i__1 * i__1 + 1;
+
+/* Initialize pointers */
+
+ i__1 = subpbs;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ iwork[iprmpt + i__] = 1;
+ iwork[igivpt + i__] = 1;
+/* L50: */
+ }
+ iwork[iqptr] = 1;
+ }
+
+/* Solve each submatrix eigenproblem at the bottom of the divide and */
+/* conquer tree. */
+
+ curr = 0;
+ i__1 = spm1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ if (i__ == 0) {
+ submat = 1;
+ matsiz = iwork[1];
+ } else {
+ submat = iwork[i__] + 1;
+ matsiz = iwork[i__ + 1] - iwork[i__];
+ }
+ if (*icompq == 2) {
+ ssteqr_("I", &matsiz, &d__[submat], &e[submat], &q[submat +
+ submat * q_dim1], ldq, &work[1], info);
+ if (*info != 0) {
+ goto L130;
+ }
+ } else {
+ ssteqr_("I", &matsiz, &d__[submat], &e[submat], &work[iq - 1 +
+ iwork[iqptr + curr]], &matsiz, &work[1], info);
+ if (*info != 0) {
+ goto L130;
+ }
+ if (*icompq == 1) {
+ sgemm_("N", "N", qsiz, &matsiz, &matsiz, &c_b23, &q[submat *
+ q_dim1 + 1], ldq, &work[iq - 1 + iwork[iqptr + curr]],
+ &matsiz, &c_b24, &qstore[submat * qstore_dim1 + 1],
+ ldqs);
+ }
+/* Computing 2nd power */
+ i__2 = matsiz;
+ iwork[iqptr + curr + 1] = iwork[iqptr + curr] + i__2 * i__2;
+ ++curr;
+ }
+ k = 1;
+ i__2 = iwork[i__ + 1];
+ for (j = submat; j <= i__2; ++j) {
+ iwork[indxq + j] = k;
+ ++k;
+/* L60: */
+ }
+/* L70: */
+ }
+
+/* Successively merge eigensystems of adjacent submatrices */
+/* into eigensystem for the corresponding larger matrix. */
+
+/* while ( SUBPBS > 1 ) */
+
+ curlvl = 1;
+L80:
+ if (subpbs > 1) {
+ spm2 = subpbs - 2;
+ i__1 = spm2;
+ for (i__ = 0; i__ <= i__1; i__ += 2) {
+ if (i__ == 0) {
+ submat = 1;
+ matsiz = iwork[2];
+ msd2 = iwork[1];
+ curprb = 0;
+ } else {
+ submat = iwork[i__] + 1;
+ matsiz = iwork[i__ + 2] - iwork[i__];
+ msd2 = matsiz / 2;
+ ++curprb;
+ }
+
+/* Merge lower order eigensystems (of size MSD2 and MATSIZ - MSD2) */
+/* into an eigensystem of size MATSIZ. */
+/* SLAED1 is used only for the full eigensystem of a tridiagonal */
+/* matrix. */
+/* SLAED7 handles the cases in which eigenvalues only or eigenvalues */
+/* and eigenvectors of a full symmetric matrix (which was reduced to */
+/* tridiagonal form) are desired. */
+
+ if (*icompq == 2) {
+ slaed1_(&matsiz, &d__[submat], &q[submat + submat * q_dim1],
+ ldq, &iwork[indxq + submat], &e[submat + msd2 - 1], &
+ msd2, &work[1], &iwork[subpbs + 1], info);
+ } else {
+ slaed7_(icompq, &matsiz, qsiz, &tlvls, &curlvl, &curprb, &d__[
+ submat], &qstore[submat * qstore_dim1 + 1], ldqs, &
+ iwork[indxq + submat], &e[submat + msd2 - 1], &msd2, &
+ work[iq], &iwork[iqptr], &iwork[iprmpt], &iwork[iperm]
+, &iwork[igivpt], &iwork[igivcl], &work[igivnm], &
+ work[iwrem], &iwork[subpbs + 1], info);
+ }
+ if (*info != 0) {
+ goto L130;
+ }
+ iwork[i__ / 2 + 1] = iwork[i__ + 2];
+/* L90: */
+ }
+ subpbs /= 2;
+ ++curlvl;
+ goto L80;
+ }
+
+/* end while */
+
+/* Re-merge the eigenvalues/vectors which were deflated at the final */
+/* merge step. */
+
+ if (*icompq == 1) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ j = iwork[indxq + i__];
+ work[i__] = d__[j];
+ scopy_(qsiz, &qstore[j * qstore_dim1 + 1], &c__1, &q[i__ * q_dim1
+ + 1], &c__1);
+/* L100: */
+ }
+ scopy_(n, &work[1], &c__1, &d__[1], &c__1);
+ } else if (*icompq == 2) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ j = iwork[indxq + i__];
+ work[i__] = d__[j];
+ scopy_(n, &q[j * q_dim1 + 1], &c__1, &work[*n * i__ + 1], &c__1);
+/* L110: */
+ }
+ scopy_(n, &work[1], &c__1, &d__[1], &c__1);
+ slacpy_("A", n, n, &work[*n + 1], n, &q[q_offset], ldq);
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ j = iwork[indxq + i__];
+ work[i__] = d__[j];
+/* L120: */
+ }
+ scopy_(n, &work[1], &c__1, &d__[1], &c__1);
+ }
+ goto L140;
+
+L130:
+ *info = submat * (*n + 1) + submat + matsiz - 1;
+
+L140:
+ return 0;
+
+/* End of SLAED0 */
+
+} /* slaed0_ */
diff --git a/contrib/libs/clapack/slaed1.c b/contrib/libs/clapack/slaed1.c
new file mode 100644
index 0000000000..3c95598fea
--- /dev/null
+++ b/contrib/libs/clapack/slaed1.c
@@ -0,0 +1,246 @@
+/* slaed1.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int slaed1_(integer *n, real *d__, real *q, integer *ldq,
+ integer *indxq, real *rho, integer *cutpnt, real *work, integer *
+ iwork, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, k, n1, n2, is, iw, iz, iq2, cpp1, indx, indxc, indxp;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), slaed2_(integer *, integer *, integer *, real *, real
+ *, integer *, integer *, real *, real *, real *, real *, real *,
+ integer *, integer *, integer *, integer *, integer *), slaed3_(
+ integer *, integer *, integer *, real *, real *, integer *, real *
+, real *, real *, integer *, integer *, real *, real *, integer *)
+ ;
+ integer idlmda;
+ extern /* Subroutine */ int xerbla_(char *, integer *), slamrg_(
+ integer *, integer *, real *, integer *, integer *, integer *);
+ integer coltyp;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAED1 computes the updated eigensystem of a diagonal */
+/* matrix after modification by a rank-one symmetric matrix. This */
+/* routine is used only for the eigenproblem which requires all */
+/* eigenvalues and eigenvectors of a tridiagonal matrix. SLAED7 handles */
+/* the case in which eigenvalues only or eigenvalues and eigenvectors */
+/* of a full symmetric matrix (which was reduced to tridiagonal form) */
+/* are desired. */
+
+/* T = Q(in) ( D(in) + RHO * Z*Z' ) Q'(in) = Q(out) * D(out) * Q'(out) */
+
+/* where Z = Q'u, u is a vector of length N with ones in the */
+/* CUTPNT and CUTPNT + 1 th elements and zeros elsewhere. */
+
+/* The eigenvectors of the original matrix are stored in Q, and the */
+/* eigenvalues are in D. The algorithm consists of three stages: */
+
+/* The first stage consists of deflating the size of the problem */
+/* when there are multiple eigenvalues or if there is a zero in */
+/* the Z vector. For each such occurence the dimension of the */
+/* secular equation problem is reduced by one. This stage is */
+/* performed by the routine SLAED2. */
+
+/* The second stage consists of calculating the updated */
+/* eigenvalues. This is done by finding the roots of the secular */
+/* equation via the routine SLAED4 (as called by SLAED3). */
+/* This routine also calculates the eigenvectors of the current */
+/* problem. */
+
+/* The final stage consists of computing the updated eigenvectors */
+/* directly using the updated eigenvalues. The eigenvectors for */
+/* the current problem are multiplied with the eigenvectors from */
+/* the overall problem. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The dimension of the symmetric tridiagonal matrix. N >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the eigenvalues of the rank-1-perturbed matrix. */
+/* On exit, the eigenvalues of the repaired matrix. */
+
+/* Q (input/output) REAL array, dimension (LDQ,N) */
+/* On entry, the eigenvectors of the rank-1-perturbed matrix. */
+/* On exit, the eigenvectors of the repaired tridiagonal matrix. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N). */
+
+/* INDXQ (input/output) INTEGER array, dimension (N) */
+/* On entry, the permutation which separately sorts the two */
+/* subproblems in D into ascending order. */
+/* On exit, the permutation which will reintegrate the */
+/* subproblems back into sorted order, */
+/* i.e. D( INDXQ( I = 1, N ) ) will be in ascending order. */
+
+/* RHO (input) REAL */
+/* The subdiagonal entry used to create the rank-1 modification. */
+
+/* CUTPNT (input) INTEGER */
+/* The location of the last eigenvalue in the leading sub-matrix. */
+/* min(1,N) <= CUTPNT <= N/2. */
+
+/* WORK (workspace) REAL array, dimension (4*N + N**2) */
+
+/* IWORK (workspace) INTEGER array, dimension (4*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, an eigenvalue did not converge */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Jeff Rutter, Computer Science Division, University of California */
+/* at Berkeley, USA */
+/* Modified by Francoise Tisseur, University of Tennessee. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --indxq;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -1;
+ } else if (*ldq < max(1,*n)) {
+ *info = -4;
+ } else /* if(complicated condition) */ {
+/* Computing MIN */
+ i__1 = 1, i__2 = *n / 2;
+ if (min(i__1,i__2) > *cutpnt || *n / 2 < *cutpnt) {
+ *info = -7;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLAED1", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* The following values are integer pointers which indicate */
+/* the portion of the workspace */
+/* used by a particular array in SLAED2 and SLAED3. */
+
+ iz = 1;
+ idlmda = iz + *n;
+ iw = idlmda + *n;
+ iq2 = iw + *n;
+
+ indx = 1;
+ indxc = indx + *n;
+ coltyp = indxc + *n;
+ indxp = coltyp + *n;
+
+
+/* Form the z-vector which consists of the last row of Q_1 and the */
+/* first row of Q_2. */
+
+ scopy_(cutpnt, &q[*cutpnt + q_dim1], ldq, &work[iz], &c__1);
+ cpp1 = *cutpnt + 1;
+ i__1 = *n - *cutpnt;
+ scopy_(&i__1, &q[cpp1 + cpp1 * q_dim1], ldq, &work[iz + *cutpnt], &c__1);
+
+/* Deflate eigenvalues. */
+
+ slaed2_(&k, n, cutpnt, &d__[1], &q[q_offset], ldq, &indxq[1], rho, &work[
+ iz], &work[idlmda], &work[iw], &work[iq2], &iwork[indx], &iwork[
+ indxc], &iwork[indxp], &iwork[coltyp], info);
+
+ if (*info != 0) {
+ goto L20;
+ }
+
+/* Solve Secular Equation. */
+
+ if (k != 0) {
+ is = (iwork[coltyp] + iwork[coltyp + 1]) * *cutpnt + (iwork[coltyp +
+ 1] + iwork[coltyp + 2]) * (*n - *cutpnt) + iq2;
+ slaed3_(&k, n, cutpnt, &d__[1], &q[q_offset], ldq, rho, &work[idlmda],
+ &work[iq2], &iwork[indxc], &iwork[coltyp], &work[iw], &work[
+ is], info);
+ if (*info != 0) {
+ goto L20;
+ }
+
+/* Prepare the INDXQ sorting permutation. */
+
+ n1 = k;
+ n2 = *n - k;
+ slamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &indxq[1]);
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ indxq[i__] = i__;
+/* L10: */
+ }
+ }
+
+L20:
+ return 0;
+
+/* End of SLAED1 */
+
+} /* slaed1_ */
diff --git a/contrib/libs/clapack/slaed2.c b/contrib/libs/clapack/slaed2.c
new file mode 100644
index 0000000000..ad04cf5b03
--- /dev/null
+++ b/contrib/libs/clapack/slaed2.c
@@ -0,0 +1,530 @@
+/* slaed2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b3 = -1.f;
+static integer c__1 = 1;
+
+/* Subroutine */ int slaed2_(integer *k, integer *n, integer *n1, real *d__,
+ real *q, integer *ldq, integer *indxq, real *rho, real *z__, real *
+ dlamda, real *w, real *q2, integer *indx, integer *indxc, integer *
+ indxp, integer *coltyp, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2;
+ real r__1, r__2, r__3, r__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real c__;
+ integer i__, j;
+ real s, t;
+ integer k2, n2, ct, nj, pj, js, iq1, iq2, n1p1;
+ real eps, tau, tol;
+ integer psm[4], imax, jmax, ctot[4];
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *), sscal_(integer *, real *, real *,
+ integer *), scopy_(integer *, real *, integer *, real *, integer *
+);
+ extern doublereal slapy2_(real *, real *), slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int slamrg_(integer *, integer *, real *, integer
+ *, integer *, integer *), slacpy_(char *, integer *, integer *,
+ real *, integer *, real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAED2 merges the two sets of eigenvalues together into a single */
+/* sorted set. Then it tries to deflate the size of the problem. */
+/* There are two ways in which deflation can occur: when two or more */
+/* eigenvalues are close together or if there is a tiny entry in the */
+/* Z vector. For each such occurrence the order of the related secular */
+/* equation problem is reduced by one. */
+
+/* Arguments */
+/* ========= */
+
+/* K (output) INTEGER */
+/* The number of non-deflated eigenvalues, and the order of the */
+/* related secular equation. 0 <= K <=N. */
+
+/* N (input) INTEGER */
+/* The dimension of the symmetric tridiagonal matrix. N >= 0. */
+
+/* N1 (input) INTEGER */
+/* The location of the last eigenvalue in the leading sub-matrix. */
+/* min(1,N) <= N1 <= N/2. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, D contains the eigenvalues of the two submatrices to */
+/* be combined. */
+/* On exit, D contains the trailing (N-K) updated eigenvalues */
+/* (those which were deflated) sorted into increasing order. */
+
+/* Q (input/output) REAL array, dimension (LDQ, N) */
+/* On entry, Q contains the eigenvectors of two submatrices in */
+/* the two square blocks with corners at (1,1), (N1,N1) */
+/* and (N1+1, N1+1), (N,N). */
+/* On exit, Q contains the trailing (N-K) updated eigenvectors */
+/* (those which were deflated) in its last N-K columns. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N). */
+
+/* INDXQ (input/output) INTEGER array, dimension (N) */
+/* The permutation which separately sorts the two sub-problems */
+/* in D into ascending order. Note that elements in the second */
+/* half of this permutation must first have N1 added to their */
+/* values. Destroyed on exit. */
+
+/* RHO (input/output) REAL */
+/* On entry, the off-diagonal element associated with the rank-1 */
+/* cut which originally split the two submatrices which are now */
+/* being recombined. */
+/* On exit, RHO has been modified to the value required by */
+/* SLAED3. */
+
+/* Z (input) REAL array, dimension (N) */
+/* On entry, Z contains the updating vector (the last */
+/* row of the first sub-eigenvector matrix and the first row of */
+/* the second sub-eigenvector matrix). */
+/* On exit, the contents of Z have been destroyed by the updating */
+/* process. */
+
+/* DLAMDA (output) REAL array, dimension (N) */
+/* A copy of the first K eigenvalues which will be used by */
+/* SLAED3 to form the secular equation. */
+
+/* W (output) REAL array, dimension (N) */
+/* The first k values of the final deflation-altered z-vector */
+/* which will be passed to SLAED3. */
+
+/* Q2 (output) REAL array, dimension (N1**2+(N-N1)**2) */
+/* A copy of the first K eigenvectors which will be used by */
+/* SLAED3 in a matrix multiply (SGEMM) to solve for the new */
+/* eigenvectors. */
+
+/* INDX (workspace) INTEGER array, dimension (N) */
+/* The permutation used to sort the contents of DLAMDA into */
+/* ascending order. */
+
+/* INDXC (output) INTEGER array, dimension (N) */
+/* The permutation used to arrange the columns of the deflated */
+/* Q matrix into three groups: the first group contains non-zero */
+/* elements only at and above N1, the second contains */
+/* non-zero elements only below N1, and the third is dense. */
+
+/* INDXP (workspace) INTEGER array, dimension (N) */
+/* The permutation used to place deflated values of D at the end */
+/* of the array. INDXP(1:K) points to the nondeflated D-values */
+/* and INDXP(K+1:N) points to the deflated eigenvalues. */
+
+/* COLTYP (workspace/output) INTEGER array, dimension (N) */
+/* During execution, a label which will indicate which of the */
+/* following types a column in the Q2 matrix is: */
+/* 1 : non-zero in the upper half only; */
+/* 2 : dense; */
+/* 3 : non-zero in the lower half only; */
+/* 4 : deflated. */
+/* On exit, COLTYP(i) is the number of columns of type i, */
+/* for i=1 to 4 only. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Jeff Rutter, Computer Science Division, University of California */
+/* at Berkeley, USA */
+/* Modified by Francoise Tisseur, University of Tennessee. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --indxq;
+ --z__;
+ --dlamda;
+ --w;
+ --q2;
+ --indx;
+ --indxc;
+ --indxp;
+ --coltyp;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -2;
+ } else if (*ldq < max(1,*n)) {
+ *info = -6;
+ } else /* if(complicated condition) */ {
+/* Computing MIN */
+ i__1 = 1, i__2 = *n / 2;
+ if (min(i__1,i__2) > *n1 || *n / 2 < *n1) {
+ *info = -3;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLAED2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ n2 = *n - *n1;
+ n1p1 = *n1 + 1;
+
+ if (*rho < 0.f) {
+ sscal_(&n2, &c_b3, &z__[n1p1], &c__1);
+ }
+
+/* Normalize z so that norm(z) = 1. Since z is the concatenation of */
+/* two normalized vectors, norm2(z) = sqrt(2). */
+
+ t = 1.f / sqrt(2.f);
+ sscal_(n, &t, &z__[1], &c__1);
+
+/* RHO = ABS( norm(z)**2 * RHO ) */
+
+ *rho = (r__1 = *rho * 2.f, dabs(r__1));
+
+/* Sort the eigenvalues into increasing order */
+
+ i__1 = *n;
+ for (i__ = n1p1; i__ <= i__1; ++i__) {
+ indxq[i__] += *n1;
+/* L10: */
+ }
+
+/* re-integrate the deflated parts from the last pass */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlamda[i__] = d__[indxq[i__]];
+/* L20: */
+ }
+ slamrg_(n1, &n2, &dlamda[1], &c__1, &c__1, &indxc[1]);
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ indx[i__] = indxq[indxc[i__]];
+/* L30: */
+ }
+
+/* Calculate the allowable deflation tolerance */
+
+ imax = isamax_(n, &z__[1], &c__1);
+ jmax = isamax_(n, &d__[1], &c__1);
+ eps = slamch_("Epsilon");
+/* Computing MAX */
+ r__3 = (r__1 = d__[jmax], dabs(r__1)), r__4 = (r__2 = z__[imax], dabs(
+ r__2));
+ tol = eps * 8.f * dmax(r__3,r__4);
+
+/* If the rank-1 modifier is small enough, no more needs to be done */
+/* except to reorganize Q so that its columns correspond with the */
+/* elements in D. */
+
+ if (*rho * (r__1 = z__[imax], dabs(r__1)) <= tol) {
+ *k = 0;
+ iq2 = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = indx[j];
+ scopy_(n, &q[i__ * q_dim1 + 1], &c__1, &q2[iq2], &c__1);
+ dlamda[j] = d__[i__];
+ iq2 += *n;
+/* L40: */
+ }
+ slacpy_("A", n, n, &q2[1], n, &q[q_offset], ldq);
+ scopy_(n, &dlamda[1], &c__1, &d__[1], &c__1);
+ goto L190;
+ }
+
+/* If there are multiple eigenvalues then the problem deflates. Here */
+/* the number of equal eigenvalues are found. As each equal */
+/* eigenvalue is found, an elementary reflector is computed to rotate */
+/* the corresponding eigensubspace so that the corresponding */
+/* components of Z are zero in this new basis. */
+
+ i__1 = *n1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ coltyp[i__] = 1;
+/* L50: */
+ }
+ i__1 = *n;
+ for (i__ = n1p1; i__ <= i__1; ++i__) {
+ coltyp[i__] = 3;
+/* L60: */
+ }
+
+
+ *k = 0;
+ k2 = *n + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ nj = indx[j];
+ if (*rho * (r__1 = z__[nj], dabs(r__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ coltyp[nj] = 4;
+ indxp[k2] = nj;
+ if (j == *n) {
+ goto L100;
+ }
+ } else {
+ pj = nj;
+ goto L80;
+ }
+/* L70: */
+ }
+L80:
+ ++j;
+ nj = indx[j];
+ if (j > *n) {
+ goto L100;
+ }
+ if (*rho * (r__1 = z__[nj], dabs(r__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ coltyp[nj] = 4;
+ indxp[k2] = nj;
+ } else {
+
+/* Check if eigenvalues are close enough to allow deflation. */
+
+ s = z__[pj];
+ c__ = z__[nj];
+
+/* Find sqrt(a**2+b**2) without overflow or */
+/* destructive underflow. */
+
+ tau = slapy2_(&c__, &s);
+ t = d__[nj] - d__[pj];
+ c__ /= tau;
+ s = -s / tau;
+ if ((r__1 = t * c__ * s, dabs(r__1)) <= tol) {
+
+/* Deflation is possible. */
+
+ z__[nj] = tau;
+ z__[pj] = 0.f;
+ if (coltyp[nj] != coltyp[pj]) {
+ coltyp[nj] = 2;
+ }
+ coltyp[pj] = 4;
+ srot_(n, &q[pj * q_dim1 + 1], &c__1, &q[nj * q_dim1 + 1], &c__1, &
+ c__, &s);
+/* Computing 2nd power */
+ r__1 = c__;
+/* Computing 2nd power */
+ r__2 = s;
+ t = d__[pj] * (r__1 * r__1) + d__[nj] * (r__2 * r__2);
+/* Computing 2nd power */
+ r__1 = s;
+/* Computing 2nd power */
+ r__2 = c__;
+ d__[nj] = d__[pj] * (r__1 * r__1) + d__[nj] * (r__2 * r__2);
+ d__[pj] = t;
+ --k2;
+ i__ = 1;
+L90:
+ if (k2 + i__ <= *n) {
+ if (d__[pj] < d__[indxp[k2 + i__]]) {
+ indxp[k2 + i__ - 1] = indxp[k2 + i__];
+ indxp[k2 + i__] = pj;
+ ++i__;
+ goto L90;
+ } else {
+ indxp[k2 + i__ - 1] = pj;
+ }
+ } else {
+ indxp[k2 + i__ - 1] = pj;
+ }
+ pj = nj;
+ } else {
+ ++(*k);
+ dlamda[*k] = d__[pj];
+ w[*k] = z__[pj];
+ indxp[*k] = pj;
+ pj = nj;
+ }
+ }
+ goto L80;
+L100:
+
+/* Record the last eigenvalue. */
+
+ ++(*k);
+ dlamda[*k] = d__[pj];
+ w[*k] = z__[pj];
+ indxp[*k] = pj;
+
+/* Count up the total number of the various types of columns, then */
+/* form a permutation which positions the four column types into */
+/* four uniform groups (although one or more of these groups may be */
+/* empty). */
+
+ for (j = 1; j <= 4; ++j) {
+ ctot[j - 1] = 0;
+/* L110: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ ct = coltyp[j];
+ ++ctot[ct - 1];
+/* L120: */
+ }
+
+/* PSM(*) = Position in SubMatrix (of types 1 through 4) */
+
+ psm[0] = 1;
+ psm[1] = ctot[0] + 1;
+ psm[2] = psm[1] + ctot[1];
+ psm[3] = psm[2] + ctot[2];
+ *k = *n - ctot[3];
+
+/* Fill out the INDXC array so that the permutation which it induces */
+/* will place all type-1 columns first, all type-2 columns next, */
+/* then all type-3's, and finally all type-4's. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ js = indxp[j];
+ ct = coltyp[js];
+ indx[psm[ct - 1]] = js;
+ indxc[psm[ct - 1]] = j;
+ ++psm[ct - 1];
+/* L130: */
+ }
+
+/* Sort the eigenvalues and corresponding eigenvectors into DLAMDA */
+/* and Q2 respectively. The eigenvalues/vectors which were not */
+/* deflated go into the first K slots of DLAMDA and Q2 respectively, */
+/* while those which were deflated go into the last N - K slots. */
+
+ i__ = 1;
+ iq1 = 1;
+ iq2 = (ctot[0] + ctot[1]) * *n1 + 1;
+ i__1 = ctot[0];
+ for (j = 1; j <= i__1; ++j) {
+ js = indx[i__];
+ scopy_(n1, &q[js * q_dim1 + 1], &c__1, &q2[iq1], &c__1);
+ z__[i__] = d__[js];
+ ++i__;
+ iq1 += *n1;
+/* L140: */
+ }
+
+ i__1 = ctot[1];
+ for (j = 1; j <= i__1; ++j) {
+ js = indx[i__];
+ scopy_(n1, &q[js * q_dim1 + 1], &c__1, &q2[iq1], &c__1);
+ scopy_(&n2, &q[*n1 + 1 + js * q_dim1], &c__1, &q2[iq2], &c__1);
+ z__[i__] = d__[js];
+ ++i__;
+ iq1 += *n1;
+ iq2 += n2;
+/* L150: */
+ }
+
+ i__1 = ctot[2];
+ for (j = 1; j <= i__1; ++j) {
+ js = indx[i__];
+ scopy_(&n2, &q[*n1 + 1 + js * q_dim1], &c__1, &q2[iq2], &c__1);
+ z__[i__] = d__[js];
+ ++i__;
+ iq2 += n2;
+/* L160: */
+ }
+
+ iq1 = iq2;
+ i__1 = ctot[3];
+ for (j = 1; j <= i__1; ++j) {
+ js = indx[i__];
+ scopy_(n, &q[js * q_dim1 + 1], &c__1, &q2[iq2], &c__1);
+ iq2 += *n;
+ z__[i__] = d__[js];
+ ++i__;
+/* L170: */
+ }
+
+/* The deflated eigenvalues and their corresponding vectors go back */
+/* into the last N - K slots of D and Q respectively. */
+
+ slacpy_("A", n, &ctot[3], &q2[iq1], n, &q[(*k + 1) * q_dim1 + 1], ldq);
+ i__1 = *n - *k;
+ scopy_(&i__1, &z__[*k + 1], &c__1, &d__[*k + 1], &c__1);
+
+/* Copy CTOT into COLTYP for referencing in SLAED3. */
+
+ for (j = 1; j <= 4; ++j) {
+ coltyp[j] = ctot[j - 1];
+/* L180: */
+ }
+
+L190:
+ return 0;
+
+/* End of SLAED2 */
+
+} /* slaed2_ */
diff --git a/contrib/libs/clapack/slaed3.c b/contrib/libs/clapack/slaed3.c
new file mode 100644
index 0000000000..f95a5cc62f
--- /dev/null
+++ b/contrib/libs/clapack/slaed3.c
@@ -0,0 +1,336 @@
+/* slaed3.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b22 = 1.f;
+static real c_b23 = 0.f;
+
+/* Subroutine */ int slaed3_(integer *k, integer *n, integer *n1, real *d__,
+ real *q, integer *ldq, real *rho, real *dlamda, real *q2, integer *
+ indx, integer *ctot, real *w, real *s, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ integer i__, j, n2, n12, ii, n23, iq2;
+ real temp;
+ extern doublereal snrm2_(integer *, real *, integer *);
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *), scopy_(integer *, real *,
+ integer *, real *, integer *), slaed4_(integer *, integer *, real
+ *, real *, real *, real *, real *, integer *);
+ extern doublereal slamc3_(real *, real *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), slacpy_(
+ char *, integer *, integer *, real *, integer *, real *, integer *
+), slaset_(char *, integer *, integer *, real *, real *,
+ real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAED3 finds the roots of the secular equation, as defined by the */
+/* values in D, W, and RHO, between 1 and K. It makes the */
+/* appropriate calls to SLAED4 and then updates the eigenvectors by */
+/* multiplying the matrix of eigenvectors of the pair of eigensystems */
+/* being combined by the matrix of eigenvectors of the K-by-K system */
+/* which is solved here. */
+
+/* This code makes very mild assumptions about floating point */
+/* arithmetic. It will work on machines with a guard digit in */
+/* add/subtract, or on those binary machines without guard digits */
+/* which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2. */
+/* It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* K (input) INTEGER */
+/* The number of terms in the rational function to be solved by */
+/* SLAED4. K >= 0. */
+
+/* N (input) INTEGER */
+/* The number of rows and columns in the Q matrix. */
+/* N >= K (deflation may result in N>K). */
+
+/* N1 (input) INTEGER */
+/* The location of the last eigenvalue in the leading submatrix. */
+/* min(1,N) <= N1 <= N/2. */
+
+/* D (output) REAL array, dimension (N) */
+/* D(I) contains the updated eigenvalues for */
+/* 1 <= I <= K. */
+
+/* Q (output) REAL array, dimension (LDQ,N) */
+/* Initially the first K columns are used as workspace. */
+/* On output the columns 1 to K contain */
+/* the updated eigenvectors. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N). */
+
+/* RHO (input) REAL */
+/* The value of the parameter in the rank one update equation. */
+/* RHO >= 0 required. */
+
+/* DLAMDA (input/output) REAL array, dimension (K) */
+/* The first K elements of this array contain the old roots */
+/* of the deflated updating problem. These are the poles */
+/* of the secular equation. May be changed on output by */
+/* having lowest order bit set to zero on Cray X-MP, Cray Y-MP, */
+/* Cray-2, or Cray C-90, as described above. */
+
+/* Q2 (input) REAL array, dimension (LDQ2, N) */
+/* The first K columns of this matrix contain the non-deflated */
+/* eigenvectors for the split problem. */
+
+/* INDX (input) INTEGER array, dimension (N) */
+/* The permutation used to arrange the columns of the deflated */
+/* Q matrix into three groups (see SLAED2). */
+/* The rows of the eigenvectors found by SLAED4 must be likewise */
+/* permuted before the matrix multiply can take place. */
+
+/* CTOT (input) INTEGER array, dimension (4) */
+/* A count of the total number of the various types of columns */
+/* in Q, as described in INDX. The fourth column type is any */
+/* column which has been deflated. */
+
+/* W (input/output) REAL array, dimension (K) */
+/* The first K elements of this array contain the components */
+/* of the deflation-adjusted updating vector. Destroyed on */
+/* output. */
+
+/* S (workspace) REAL array, dimension (N1 + 1)*K */
+/* Will contain the eigenvectors of the repaired matrix which */
+/* will be multiplied by the previously accumulated eigenvectors */
+/* to update the system. */
+
+/* LDS (input) INTEGER */
+/* The leading dimension of S. LDS >= max(1,K). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, an eigenvalue did not converge */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Jeff Rutter, Computer Science Division, University of California */
+/* at Berkeley, USA */
+/* Modified by Francoise Tisseur, University of Tennessee. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --dlamda;
+ --q2;
+ --indx;
+ --ctot;
+ --w;
+ --s;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*k < 0) {
+ *info = -1;
+ } else if (*n < *k) {
+ *info = -2;
+ } else if (*ldq < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLAED3", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*k == 0) {
+ return 0;
+ }
+
+/* Modify values DLAMDA(i) to make sure all DLAMDA(i)-DLAMDA(j) can */
+/* be computed with high relative accuracy (barring over/underflow). */
+/* This is a problem on machines without a guard digit in */
+/* add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2). */
+/* The following code replaces DLAMDA(I) by 2*DLAMDA(I)-DLAMDA(I), */
+/* which on any of these machines zeros out the bottommost */
+/* bit of DLAMDA(I) if it is 1; this makes the subsequent */
+/* subtractions DLAMDA(I)-DLAMDA(J) unproblematic when cancellation */
+/* occurs. On binary machines with a guard digit (almost all */
+/* machines) it does not change DLAMDA(I) at all. On hexadecimal */
+/* and decimal machines with a guard digit, it slightly */
+/* changes the bottommost bits of DLAMDA(I). It does not account */
+/* for hexadecimal or decimal machines without guard digits */
+/* (we know of none). We use a subroutine call to compute */
+/* 2*DLAMBDA(I) to prevent optimizing compilers from eliminating */
+/* this code. */
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlamda[i__] = slamc3_(&dlamda[i__], &dlamda[i__]) - dlamda[i__];
+/* L10: */
+ }
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ slaed4_(k, &j, &dlamda[1], &w[1], &q[j * q_dim1 + 1], rho, &d__[j],
+ info);
+
+/* If the zero finder fails, the computation is terminated. */
+
+ if (*info != 0) {
+ goto L120;
+ }
+/* L20: */
+ }
+
+ if (*k == 1) {
+ goto L110;
+ }
+ if (*k == 2) {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ w[1] = q[j * q_dim1 + 1];
+ w[2] = q[j * q_dim1 + 2];
+ ii = indx[1];
+ q[j * q_dim1 + 1] = w[ii];
+ ii = indx[2];
+ q[j * q_dim1 + 2] = w[ii];
+/* L30: */
+ }
+ goto L110;
+ }
+
+/* Compute updated W. */
+
+ scopy_(k, &w[1], &c__1, &s[1], &c__1);
+
+/* Initialize W(I) = Q(I,I) */
+
+ i__1 = *ldq + 1;
+ scopy_(k, &q[q_offset], &i__1, &w[1], &c__1);
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]);
+/* L40: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]);
+/* L50: */
+ }
+/* L60: */
+ }
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__1 = sqrt(-w[i__]);
+ w[i__] = r_sign(&r__1, &s[i__]);
+/* L70: */
+ }
+
+/* Compute eigenvectors of the modified rank-1 modification. */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *k;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ s[i__] = w[i__] / q[i__ + j * q_dim1];
+/* L80: */
+ }
+ temp = snrm2_(k, &s[1], &c__1);
+ i__2 = *k;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ ii = indx[i__];
+ q[i__ + j * q_dim1] = s[ii] / temp;
+/* L90: */
+ }
+/* L100: */
+ }
+
+/* Compute the updated eigenvectors. */
+
+L110:
+
+ n2 = *n - *n1;
+ n12 = ctot[1] + ctot[2];
+ n23 = ctot[2] + ctot[3];
+
+ slacpy_("A", &n23, k, &q[ctot[1] + 1 + q_dim1], ldq, &s[1], &n23);
+ iq2 = *n1 * n12 + 1;
+ if (n23 != 0) {
+ sgemm_("N", "N", &n2, k, &n23, &c_b22, &q2[iq2], &n2, &s[1], &n23, &
+ c_b23, &q[*n1 + 1 + q_dim1], ldq);
+ } else {
+ slaset_("A", &n2, k, &c_b23, &c_b23, &q[*n1 + 1 + q_dim1], ldq);
+ }
+
+ slacpy_("A", &n12, k, &q[q_offset], ldq, &s[1], &n12);
+ if (n12 != 0) {
+ sgemm_("N", "N", n1, k, &n12, &c_b22, &q2[1], n1, &s[1], &n12, &c_b23,
+ &q[q_offset], ldq);
+ } else {
+ slaset_("A", n1, k, &c_b23, &c_b23, &q[q_dim1 + 1], ldq);
+ }
+
+
+L120:
+ return 0;
+
+/* End of SLAED3 */
+
+} /* slaed3_ */
diff --git a/contrib/libs/clapack/slaed4.c b/contrib/libs/clapack/slaed4.c
new file mode 100644
index 0000000000..f661f87ffa
--- /dev/null
+++ b/contrib/libs/clapack/slaed4.c
@@ -0,0 +1,952 @@
+/* slaed4.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slaed4_(integer *n, integer *i__, real *d__, real *z__,
+ real *delta, real *rho, real *dlam, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real a, b, c__;
+ integer j;
+ real w;
+ integer ii;
+ real dw, zz[3];
+ integer ip1;
+ real del, eta, phi, eps, tau, psi;
+ integer iim1, iip1;
+ real dphi, dpsi;
+ integer iter;
+ real temp, prew, temp1, dltlb, dltub, midpt;
+ integer niter;
+ logical swtch;
+ extern /* Subroutine */ int slaed5_(integer *, real *, real *, real *,
+ real *, real *), slaed6_(integer *, logical *, real *, real *,
+ real *, real *, real *, integer *);
+ logical swtch3;
+ extern doublereal slamch_(char *);
+ logical orgati;
+ real erretm, rhoinv;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This subroutine computes the I-th updated eigenvalue of a symmetric */
+/* rank-one modification to a diagonal matrix whose elements are */
+/* given in the array d, and that */
+
+/* D(i) < D(j) for i < j */
+
+/* and that RHO > 0. This is arranged by the calling routine, and is */
+/* no loss in generality. The rank-one modified system is thus */
+
+/* diag( D ) + RHO * Z * Z_transpose. */
+
+/* where we assume the Euclidean norm of Z is 1. */
+
+/* The method consists of approximating the rational functions in the */
+/* secular equation by simpler interpolating rational functions. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The length of all arrays. */
+
+/* I (input) INTEGER */
+/* The index of the eigenvalue to be computed. 1 <= I <= N. */
+
+/* D (input) REAL array, dimension (N) */
+/* The original eigenvalues. It is assumed that they are in */
+/* order, D(I) < D(J) for I < J. */
+
+/* Z (input) REAL array, dimension (N) */
+/* The components of the updating vector. */
+
+/* DELTA (output) REAL array, dimension (N) */
+/* If N .GT. 2, DELTA contains (D(j) - lambda_I) in its j-th */
+/* component. If N = 1, then DELTA(1) = 1. If N = 2, see SLAED5 */
+/* for detail. The vector DELTA contains the information necessary */
+/* to construct the eigenvectors by SLAED3 and SLAED9. */
+
+/* RHO (input) REAL */
+/* The scalar in the symmetric updating formula. */
+
+/* DLAM (output) REAL */
+/* The computed lambda_I, the I-th updated eigenvalue. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* > 0: if INFO = 1, the updating process failed. */
+
+/* Internal Parameters */
+/* =================== */
+
+/* Logical variable ORGATI (origin-at-i?) is used for distinguishing */
+/* whether D(i) or D(i+1) is treated as the origin. */
+
+/* ORGATI = .true. origin at i */
+/* ORGATI = .false. origin at i+1 */
+
+/* Logical variable SWTCH3 (switch-for-3-poles?) is for noting */
+/* if we are working with THREE poles! */
+
+/* MAXIT is the maximum number of iterations allowed for each */
+/* eigenvalue. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ren-Cang Li, Computer Science Division, University of California */
+/* at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Since this routine is called in an inner loop, we do no argument */
+/* checking. */
+
+/* Quick return for N=1 and 2. */
+
+ /* Parameter adjustments */
+ --delta;
+ --z__;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ if (*n == 1) {
+
+/* Presumably, I=1 upon entry */
+
+ *dlam = d__[1] + *rho * z__[1] * z__[1];
+ delta[1] = 1.f;
+ return 0;
+ }
+ if (*n == 2) {
+ slaed5_(i__, &d__[1], &z__[1], &delta[1], rho, dlam);
+ return 0;
+ }
+
+/* Compute machine epsilon */
+
+ eps = slamch_("Epsilon");
+ rhoinv = 1.f / *rho;
+
+/* The case I = N */
+
+ if (*i__ == *n) {
+
+/* Initialize some basic variables */
+
+ ii = *n - 1;
+ niter = 1;
+
+/* Calculate initial guess */
+
+ midpt = *rho / 2.f;
+
+/* If ||Z||_2 is not one, then TEMP should be set to */
+/* RHO * ||Z||_2^2 / TWO */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[*i__] - midpt;
+/* L10: */
+ }
+
+ psi = 0.f;
+ i__1 = *n - 2;
+ for (j = 1; j <= i__1; ++j) {
+ psi += z__[j] * z__[j] / delta[j];
+/* L20: */
+ }
+
+ c__ = rhoinv + psi;
+ w = c__ + z__[ii] * z__[ii] / delta[ii] + z__[*n] * z__[*n] / delta[*
+ n];
+
+ if (w <= 0.f) {
+ temp = z__[*n - 1] * z__[*n - 1] / (d__[*n] - d__[*n - 1] + *rho)
+ + z__[*n] * z__[*n] / *rho;
+ if (c__ <= temp) {
+ tau = *rho;
+ } else {
+ del = d__[*n] - d__[*n - 1];
+ a = -c__ * del + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n]
+ ;
+ b = z__[*n] * z__[*n] * del;
+ if (a < 0.f) {
+ tau = b * 2.f / (sqrt(a * a + b * 4.f * c__) - a);
+ } else {
+ tau = (a + sqrt(a * a + b * 4.f * c__)) / (c__ * 2.f);
+ }
+ }
+
+/* It can be proved that */
+/* D(N)+RHO/2 <= LAMBDA(N) < D(N)+TAU <= D(N)+RHO */
+
+ dltlb = midpt;
+ dltub = *rho;
+ } else {
+ del = d__[*n] - d__[*n - 1];
+ a = -c__ * del + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n];
+ b = z__[*n] * z__[*n] * del;
+ if (a < 0.f) {
+ tau = b * 2.f / (sqrt(a * a + b * 4.f * c__) - a);
+ } else {
+ tau = (a + sqrt(a * a + b * 4.f * c__)) / (c__ * 2.f);
+ }
+
+/* It can be proved that */
+/* D(N) < D(N)+TAU < LAMBDA(N) < D(N)+RHO/2 */
+
+ dltlb = 0.f;
+ dltub = midpt;
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[*i__] - tau;
+/* L30: */
+ }
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.f;
+ psi = 0.f;
+ erretm = 0.f;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L40: */
+ }
+ erretm = dabs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / delta[*n];
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8.f + erretm - phi + rhoinv + dabs(tau) * (
+ dpsi + dphi);
+
+ w = rhoinv + phi + psi;
+
+/* Test for convergence */
+
+ if (dabs(w) <= eps * erretm) {
+ *dlam = d__[*i__] + tau;
+ goto L250;
+ }
+
+ if (w <= 0.f) {
+ dltlb = dmax(dltlb,tau);
+ } else {
+ dltub = dmin(dltub,tau);
+ }
+
+/* Calculate the new step */
+
+ ++niter;
+ c__ = w - delta[*n - 1] * dpsi - delta[*n] * dphi;
+ a = (delta[*n - 1] + delta[*n]) * w - delta[*n - 1] * delta[*n] * (
+ dpsi + dphi);
+ b = delta[*n - 1] * delta[*n] * w;
+ if (c__ < 0.f) {
+ c__ = dabs(c__);
+ }
+ if (c__ == 0.f) {
+/* ETA = B/A */
+/* ETA = RHO - TAU */
+ eta = dltub - tau;
+ } else if (a >= 0.f) {
+ eta = (a + sqrt((r__1 = a * a - b * 4.f * c__, dabs(r__1)))) / (
+ c__ * 2.f);
+ } else {
+ eta = b * 2.f / (a - sqrt((r__1 = a * a - b * 4.f * c__, dabs(
+ r__1))));
+ }
+
+/* Note, eta should be positive if w is negative, and */
+/* eta should be negative otherwise. However, */
+/* if for some reason caused by roundoff, eta*w > 0, */
+/* we simply use one Newton step instead. This way */
+/* will guarantee eta*w < 0. */
+
+ if (w * eta > 0.f) {
+ eta = -w / (dpsi + dphi);
+ }
+ temp = tau + eta;
+ if (temp > dltub || temp < dltlb) {
+ if (w < 0.f) {
+ eta = (dltub - tau) / 2.f;
+ } else {
+ eta = (dltlb - tau) / 2.f;
+ }
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+/* L50: */
+ }
+
+ tau += eta;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.f;
+ psi = 0.f;
+ erretm = 0.f;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L60: */
+ }
+ erretm = dabs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / delta[*n];
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8.f + erretm - phi + rhoinv + dabs(tau) * (
+ dpsi + dphi);
+
+ w = rhoinv + phi + psi;
+
+/* Main loop to update the values of the array DELTA */
+
+ iter = niter + 1;
+
+ for (niter = iter; niter <= 30; ++niter) {
+
+/* Test for convergence */
+
+ if (dabs(w) <= eps * erretm) {
+ *dlam = d__[*i__] + tau;
+ goto L250;
+ }
+
+ if (w <= 0.f) {
+ dltlb = dmax(dltlb,tau);
+ } else {
+ dltub = dmin(dltub,tau);
+ }
+
+/* Calculate the new step */
+
+ c__ = w - delta[*n - 1] * dpsi - delta[*n] * dphi;
+ a = (delta[*n - 1] + delta[*n]) * w - delta[*n - 1] * delta[*n] *
+ (dpsi + dphi);
+ b = delta[*n - 1] * delta[*n] * w;
+ if (a >= 0.f) {
+ eta = (a + sqrt((r__1 = a * a - b * 4.f * c__, dabs(r__1)))) /
+ (c__ * 2.f);
+ } else {
+ eta = b * 2.f / (a - sqrt((r__1 = a * a - b * 4.f * c__, dabs(
+ r__1))));
+ }
+
+/* Note, eta should be positive if w is negative, and */
+/* eta should be negative otherwise. However, */
+/* if for some reason caused by roundoff, eta*w > 0, */
+/* we simply use one Newton step instead. This way */
+/* will guarantee eta*w < 0. */
+
+ if (w * eta > 0.f) {
+ eta = -w / (dpsi + dphi);
+ }
+ temp = tau + eta;
+ if (temp > dltub || temp < dltlb) {
+ if (w < 0.f) {
+ eta = (dltub - tau) / 2.f;
+ } else {
+ eta = (dltlb - tau) / 2.f;
+ }
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+/* L70: */
+ }
+
+ tau += eta;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.f;
+ psi = 0.f;
+ erretm = 0.f;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L80: */
+ }
+ erretm = dabs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / delta[*n];
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8.f + erretm - phi + rhoinv + dabs(tau) *
+ (dpsi + dphi);
+
+ w = rhoinv + phi + psi;
+/* L90: */
+ }
+
+/* Return with INFO = 1, NITER = MAXIT and not converged */
+
+ *info = 1;
+ *dlam = d__[*i__] + tau;
+ goto L250;
+
+/* End for the case I = N */
+
+ } else {
+
+/* The case for I < N */
+
+ niter = 1;
+ ip1 = *i__ + 1;
+
+/* Calculate initial guess */
+
+ del = d__[ip1] - d__[*i__];
+ midpt = del / 2.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[*i__] - midpt;
+/* L100: */
+ }
+
+ psi = 0.f;
+ i__1 = *i__ - 1;
+ for (j = 1; j <= i__1; ++j) {
+ psi += z__[j] * z__[j] / delta[j];
+/* L110: */
+ }
+
+ phi = 0.f;
+ i__1 = *i__ + 2;
+ for (j = *n; j >= i__1; --j) {
+ phi += z__[j] * z__[j] / delta[j];
+/* L120: */
+ }
+ c__ = rhoinv + psi + phi;
+ w = c__ + z__[*i__] * z__[*i__] / delta[*i__] + z__[ip1] * z__[ip1] /
+ delta[ip1];
+
+ if (w > 0.f) {
+
+/* d(i)< the ith eigenvalue < (d(i)+d(i+1))/2 */
+
+/* We choose d(i) as origin. */
+
+ orgati = TRUE_;
+ a = c__ * del + z__[*i__] * z__[*i__] + z__[ip1] * z__[ip1];
+ b = z__[*i__] * z__[*i__] * del;
+ if (a > 0.f) {
+ tau = b * 2.f / (a + sqrt((r__1 = a * a - b * 4.f * c__, dabs(
+ r__1))));
+ } else {
+ tau = (a - sqrt((r__1 = a * a - b * 4.f * c__, dabs(r__1)))) /
+ (c__ * 2.f);
+ }
+ dltlb = 0.f;
+ dltub = midpt;
+ } else {
+
+/* (d(i)+d(i+1))/2 <= the ith eigenvalue < d(i+1) */
+
+/* We choose d(i+1) as origin. */
+
+ orgati = FALSE_;
+ a = c__ * del - z__[*i__] * z__[*i__] - z__[ip1] * z__[ip1];
+ b = z__[ip1] * z__[ip1] * del;
+ if (a < 0.f) {
+ tau = b * 2.f / (a - sqrt((r__1 = a * a + b * 4.f * c__, dabs(
+ r__1))));
+ } else {
+ tau = -(a + sqrt((r__1 = a * a + b * 4.f * c__, dabs(r__1))))
+ / (c__ * 2.f);
+ }
+ dltlb = -midpt;
+ dltub = 0.f;
+ }
+
+ if (orgati) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[*i__] - tau;
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[ip1] - tau;
+/* L140: */
+ }
+ }
+ if (orgati) {
+ ii = *i__;
+ } else {
+ ii = *i__ + 1;
+ }
+ iim1 = ii - 1;
+ iip1 = ii + 1;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.f;
+ psi = 0.f;
+ erretm = 0.f;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L150: */
+ }
+ erretm = dabs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.f;
+ phi = 0.f;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / delta[j];
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L160: */
+ }
+
+ w = rhoinv + phi + psi;
+
+/* W is the value of the secular function with */
+/* its ii-th element removed. */
+
+ swtch3 = FALSE_;
+ if (orgati) {
+ if (w < 0.f) {
+ swtch3 = TRUE_;
+ }
+ } else {
+ if (w > 0.f) {
+ swtch3 = TRUE_;
+ }
+ }
+ if (ii == 1 || ii == *n) {
+ swtch3 = FALSE_;
+ }
+
+ temp = z__[ii] / delta[ii];
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w += temp;
+ erretm = (phi - psi) * 8.f + erretm + rhoinv * 2.f + dabs(temp) * 3.f
+ + dabs(tau) * dw;
+
+/* Test for convergence */
+
+ if (dabs(w) <= eps * erretm) {
+ if (orgati) {
+ *dlam = d__[*i__] + tau;
+ } else {
+ *dlam = d__[ip1] + tau;
+ }
+ goto L250;
+ }
+
+ if (w <= 0.f) {
+ dltlb = dmax(dltlb,tau);
+ } else {
+ dltub = dmin(dltub,tau);
+ }
+
+/* Calculate the new step */
+
+ ++niter;
+ if (! swtch3) {
+ if (orgati) {
+/* Computing 2nd power */
+ r__1 = z__[*i__] / delta[*i__];
+ c__ = w - delta[ip1] * dw - (d__[*i__] - d__[ip1]) * (r__1 *
+ r__1);
+ } else {
+/* Computing 2nd power */
+ r__1 = z__[ip1] / delta[ip1];
+ c__ = w - delta[*i__] * dw - (d__[ip1] - d__[*i__]) * (r__1 *
+ r__1);
+ }
+ a = (delta[*i__] + delta[ip1]) * w - delta[*i__] * delta[ip1] *
+ dw;
+ b = delta[*i__] * delta[ip1] * w;
+ if (c__ == 0.f) {
+ if (a == 0.f) {
+ if (orgati) {
+ a = z__[*i__] * z__[*i__] + delta[ip1] * delta[ip1] *
+ (dpsi + dphi);
+ } else {
+ a = z__[ip1] * z__[ip1] + delta[*i__] * delta[*i__] *
+ (dpsi + dphi);
+ }
+ }
+ eta = b / a;
+ } else if (a <= 0.f) {
+ eta = (a - sqrt((r__1 = a * a - b * 4.f * c__, dabs(r__1)))) /
+ (c__ * 2.f);
+ } else {
+ eta = b * 2.f / (a + sqrt((r__1 = a * a - b * 4.f * c__, dabs(
+ r__1))));
+ }
+ } else {
+
+/* Interpolation using THREE most relevant poles */
+
+ temp = rhoinv + psi + phi;
+ if (orgati) {
+ temp1 = z__[iim1] / delta[iim1];
+ temp1 *= temp1;
+ c__ = temp - delta[iip1] * (dpsi + dphi) - (d__[iim1] - d__[
+ iip1]) * temp1;
+ zz[0] = z__[iim1] * z__[iim1];
+ zz[2] = delta[iip1] * delta[iip1] * (dpsi - temp1 + dphi);
+ } else {
+ temp1 = z__[iip1] / delta[iip1];
+ temp1 *= temp1;
+ c__ = temp - delta[iim1] * (dpsi + dphi) - (d__[iip1] - d__[
+ iim1]) * temp1;
+ zz[0] = delta[iim1] * delta[iim1] * (dpsi + (dphi - temp1));
+ zz[2] = z__[iip1] * z__[iip1];
+ }
+ zz[1] = z__[ii] * z__[ii];
+ slaed6_(&niter, &orgati, &c__, &delta[iim1], zz, &w, &eta, info);
+ if (*info != 0) {
+ goto L250;
+ }
+ }
+
+/* Note, eta should be positive if w is negative, and */
+/* eta should be negative otherwise. However, */
+/* if for some reason caused by roundoff, eta*w > 0, */
+/* we simply use one Newton step instead. This way */
+/* will guarantee eta*w < 0. */
+
+ if (w * eta >= 0.f) {
+ eta = -w / dw;
+ }
+ temp = tau + eta;
+ if (temp > dltub || temp < dltlb) {
+ if (w < 0.f) {
+ eta = (dltub - tau) / 2.f;
+ } else {
+ eta = (dltlb - tau) / 2.f;
+ }
+ }
+
+ prew = w;
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+/* L180: */
+ }
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.f;
+ psi = 0.f;
+ erretm = 0.f;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L190: */
+ }
+ erretm = dabs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.f;
+ phi = 0.f;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / delta[j];
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L200: */
+ }
+
+ temp = z__[ii] / delta[ii];
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w = rhoinv + phi + psi + temp;
+ erretm = (phi - psi) * 8.f + erretm + rhoinv * 2.f + dabs(temp) * 3.f
+ + (r__1 = tau + eta, dabs(r__1)) * dw;
+
+ swtch = FALSE_;
+ if (orgati) {
+ if (-w > dabs(prew) / 10.f) {
+ swtch = TRUE_;
+ }
+ } else {
+ if (w > dabs(prew) / 10.f) {
+ swtch = TRUE_;
+ }
+ }
+
+ tau += eta;
+
+/* Main loop to update the values of the array DELTA */
+
+ iter = niter + 1;
+
+ for (niter = iter; niter <= 30; ++niter) {
+
+/* Test for convergence */
+
+ if (dabs(w) <= eps * erretm) {
+ if (orgati) {
+ *dlam = d__[*i__] + tau;
+ } else {
+ *dlam = d__[ip1] + tau;
+ }
+ goto L250;
+ }
+
+ if (w <= 0.f) {
+ dltlb = dmax(dltlb,tau);
+ } else {
+ dltub = dmin(dltub,tau);
+ }
+
+/* Calculate the new step */
+
+ if (! swtch3) {
+ if (! swtch) {
+ if (orgati) {
+/* Computing 2nd power */
+ r__1 = z__[*i__] / delta[*i__];
+ c__ = w - delta[ip1] * dw - (d__[*i__] - d__[ip1]) * (
+ r__1 * r__1);
+ } else {
+/* Computing 2nd power */
+ r__1 = z__[ip1] / delta[ip1];
+ c__ = w - delta[*i__] * dw - (d__[ip1] - d__[*i__]) *
+ (r__1 * r__1);
+ }
+ } else {
+ temp = z__[ii] / delta[ii];
+ if (orgati) {
+ dpsi += temp * temp;
+ } else {
+ dphi += temp * temp;
+ }
+ c__ = w - delta[*i__] * dpsi - delta[ip1] * dphi;
+ }
+ a = (delta[*i__] + delta[ip1]) * w - delta[*i__] * delta[ip1]
+ * dw;
+ b = delta[*i__] * delta[ip1] * w;
+ if (c__ == 0.f) {
+ if (a == 0.f) {
+ if (! swtch) {
+ if (orgati) {
+ a = z__[*i__] * z__[*i__] + delta[ip1] *
+ delta[ip1] * (dpsi + dphi);
+ } else {
+ a = z__[ip1] * z__[ip1] + delta[*i__] * delta[
+ *i__] * (dpsi + dphi);
+ }
+ } else {
+ a = delta[*i__] * delta[*i__] * dpsi + delta[ip1]
+ * delta[ip1] * dphi;
+ }
+ }
+ eta = b / a;
+ } else if (a <= 0.f) {
+ eta = (a - sqrt((r__1 = a * a - b * 4.f * c__, dabs(r__1))
+ )) / (c__ * 2.f);
+ } else {
+ eta = b * 2.f / (a + sqrt((r__1 = a * a - b * 4.f * c__,
+ dabs(r__1))));
+ }
+ } else {
+
+/* Interpolation using THREE most relevant poles */
+
+ temp = rhoinv + psi + phi;
+ if (swtch) {
+ c__ = temp - delta[iim1] * dpsi - delta[iip1] * dphi;
+ zz[0] = delta[iim1] * delta[iim1] * dpsi;
+ zz[2] = delta[iip1] * delta[iip1] * dphi;
+ } else {
+ if (orgati) {
+ temp1 = z__[iim1] / delta[iim1];
+ temp1 *= temp1;
+ c__ = temp - delta[iip1] * (dpsi + dphi) - (d__[iim1]
+ - d__[iip1]) * temp1;
+ zz[0] = z__[iim1] * z__[iim1];
+ zz[2] = delta[iip1] * delta[iip1] * (dpsi - temp1 +
+ dphi);
+ } else {
+ temp1 = z__[iip1] / delta[iip1];
+ temp1 *= temp1;
+ c__ = temp - delta[iim1] * (dpsi + dphi) - (d__[iip1]
+ - d__[iim1]) * temp1;
+ zz[0] = delta[iim1] * delta[iim1] * (dpsi + (dphi -
+ temp1));
+ zz[2] = z__[iip1] * z__[iip1];
+ }
+ }
+ slaed6_(&niter, &orgati, &c__, &delta[iim1], zz, &w, &eta,
+ info);
+ if (*info != 0) {
+ goto L250;
+ }
+ }
+
+/* Note, eta should be positive if w is negative, and */
+/* eta should be negative otherwise. However, */
+/* if for some reason caused by roundoff, eta*w > 0, */
+/* we simply use one Newton step instead. This way */
+/* will guarantee eta*w < 0. */
+
+ if (w * eta >= 0.f) {
+ eta = -w / dw;
+ }
+ temp = tau + eta;
+ if (temp > dltub || temp < dltlb) {
+ if (w < 0.f) {
+ eta = (dltub - tau) / 2.f;
+ } else {
+ eta = (dltlb - tau) / 2.f;
+ }
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+/* L210: */
+ }
+
+ tau += eta;
+ prew = w;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.f;
+ psi = 0.f;
+ erretm = 0.f;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L220: */
+ }
+ erretm = dabs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.f;
+ phi = 0.f;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / delta[j];
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L230: */
+ }
+
+ temp = z__[ii] / delta[ii];
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w = rhoinv + phi + psi + temp;
+ erretm = (phi - psi) * 8.f + erretm + rhoinv * 2.f + dabs(temp) *
+ 3.f + dabs(tau) * dw;
+ if (w * prew > 0.f && dabs(w) > dabs(prew) / 10.f) {
+ swtch = ! swtch;
+ }
+
+/* L240: */
+ }
+
+/* Return with INFO = 1, NITER = MAXIT and not converged */
+
+ *info = 1;
+ if (orgati) {
+ *dlam = d__[*i__] + tau;
+ } else {
+ *dlam = d__[ip1] + tau;
+ }
+
+ }
+
+L250:
+
+ return 0;
+
+/* End of SLAED4 */
+
+} /* slaed4_ */
diff --git a/contrib/libs/clapack/slaed5.c b/contrib/libs/clapack/slaed5.c
new file mode 100644
index 0000000000..756fdd8d88
--- /dev/null
+++ b/contrib/libs/clapack/slaed5.c
@@ -0,0 +1,149 @@
+/* slaed5.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slaed5_(integer *i__, real *d__, real *z__, real *delta,
+ real *rho, real *dlam)
+{
+ /* System generated locals */
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real b, c__, w, del, tau, temp;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This subroutine computes the I-th eigenvalue of a symmetric rank-one */
+/* modification of a 2-by-2 diagonal matrix */
+
+/* diag( D ) + RHO * Z * transpose(Z) . */
+
+/* The diagonal elements in the array D are assumed to satisfy */
+
+/* D(i) < D(j) for i < j . */
+
+/* We also assume RHO > 0 and that the Euclidean norm of the vector */
+/* Z is one. */
+
+/* Arguments */
+/* ========= */
+
+/* I (input) INTEGER */
+/* The index of the eigenvalue to be computed. I = 1 or I = 2. */
+
+/* D (input) REAL array, dimension (2) */
+/* The original eigenvalues. We assume D(1) < D(2). */
+
+/* Z (input) REAL array, dimension (2) */
+/* The components of the updating vector. */
+
+/* DELTA (output) REAL array, dimension (2) */
+/* The vector DELTA contains the information necessary */
+/* to construct the eigenvectors. */
+
+/* RHO (input) REAL */
+/* The scalar in the symmetric updating formula. */
+
+/* DLAM (output) REAL */
+/* The computed lambda_I, the I-th updated eigenvalue. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ren-Cang Li, Computer Science Division, University of California */
+/* at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --delta;
+ --z__;
+ --d__;
+
+ /* Function Body */
+ del = d__[2] - d__[1];
+ if (*i__ == 1) {
+ w = *rho * 2.f * (z__[2] * z__[2] - z__[1] * z__[1]) / del + 1.f;
+ if (w > 0.f) {
+ b = del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[1] * z__[1] * del;
+
+/* B > ZERO, always */
+
+ tau = c__ * 2.f / (b + sqrt((r__1 = b * b - c__ * 4.f, dabs(r__1))
+ ));
+ *dlam = d__[1] + tau;
+ delta[1] = -z__[1] / tau;
+ delta[2] = z__[2] / (del - tau);
+ } else {
+ b = -del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[2] * z__[2] * del;
+ if (b > 0.f) {
+ tau = c__ * -2.f / (b + sqrt(b * b + c__ * 4.f));
+ } else {
+ tau = (b - sqrt(b * b + c__ * 4.f)) / 2.f;
+ }
+ *dlam = d__[2] + tau;
+ delta[1] = -z__[1] / (del + tau);
+ delta[2] = -z__[2] / tau;
+ }
+ temp = sqrt(delta[1] * delta[1] + delta[2] * delta[2]);
+ delta[1] /= temp;
+ delta[2] /= temp;
+ } else {
+
+/* Now I=2 */
+
+ b = -del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[2] * z__[2] * del;
+ if (b > 0.f) {
+ tau = (b + sqrt(b * b + c__ * 4.f)) / 2.f;
+ } else {
+ tau = c__ * 2.f / (-b + sqrt(b * b + c__ * 4.f));
+ }
+ *dlam = d__[2] + tau;
+ delta[1] = -z__[1] / (del + tau);
+ delta[2] = -z__[2] / tau;
+ temp = sqrt(delta[1] * delta[1] + delta[2] * delta[2]);
+ delta[1] /= temp;
+ delta[2] /= temp;
+ }
+ return 0;
+
+/* End OF SLAED5 */
+
+} /* slaed5_ */
diff --git a/contrib/libs/clapack/slaed6.c b/contrib/libs/clapack/slaed6.c
new file mode 100644
index 0000000000..07e353e5c6
--- /dev/null
+++ b/contrib/libs/clapack/slaed6.c
@@ -0,0 +1,375 @@
+/* slaed6.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slaed6_(integer *kniter, logical *orgati, real *rho,
+ real *d__, real *z__, real *finit, real *tau, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2, r__3, r__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal), log(doublereal), pow_ri(real *, integer *);
+
+ /* Local variables */
+ real a, b, c__, f;
+ integer i__;
+ real fc, df, ddf, lbd, eta, ubd, eps, base;
+ integer iter;
+ real temp, temp1, temp2, temp3, temp4;
+ logical scale;
+ integer niter;
+ real small1, small2, sminv1, sminv2, dscale[3], sclfac;
+ extern doublereal slamch_(char *);
+ real zscale[3], erretm, sclinv;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* February 2007 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAED6 computes the positive or negative root (closest to the origin) */
+/* of */
+/* z(1) z(2) z(3) */
+/* f(x) = rho + --------- + ---------- + --------- */
+/* d(1)-x d(2)-x d(3)-x */
+
+/* It is assumed that */
+
+/* if ORGATI = .true. the root is between d(2) and d(3); */
+/* otherwise it is between d(1) and d(2) */
+
+/* This routine will be called by SLAED4 when necessary. In most cases, */
+/* the root sought is the smallest in magnitude, though it might not be */
+/* in some extremely rare situations. */
+
+/* Arguments */
+/* ========= */
+
+/* KNITER (input) INTEGER */
+/* Refer to SLAED4 for its significance. */
+
+/* ORGATI (input) LOGICAL */
+/* If ORGATI is true, the needed root is between d(2) and */
+/* d(3); otherwise it is between d(1) and d(2). See */
+/* SLAED4 for further details. */
+
+/* RHO (input) REAL */
+/* Refer to the equation f(x) above. */
+
+/* D (input) REAL array, dimension (3) */
+/* D satisfies d(1) < d(2) < d(3). */
+
+/* Z (input) REAL array, dimension (3) */
+/* Each of the elements in z must be positive. */
+
+/* FINIT (input) REAL */
+/* The value of f at 0. It is more accurate than the one */
+/* evaluated inside this routine (if someone wants to do */
+/* so). */
+
+/* TAU (output) REAL */
+/* The root of the equation f(x). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* > 0: if INFO = 1, failure to converge */
+
+/* Further Details */
+/* =============== */
+
+/* 30/06/99: Based on contributions by */
+/* Ren-Cang Li, Computer Science Division, University of California */
+/* at Berkeley, USA */
+
+/* 10/02/03: This version has a few statements commented out for thread safety */
+/* (machine parameters are computed on each entry). SJH. */
+
+/* 05/10/06: Modified from a new version of Ren-Cang Li, use */
+/* Gragg-Thornton-Warner cubic convergent scheme for better stability. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --z__;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*orgati) {
+ lbd = d__[2];
+ ubd = d__[3];
+ } else {
+ lbd = d__[1];
+ ubd = d__[2];
+ }
+ if (*finit < 0.f) {
+ lbd = 0.f;
+ } else {
+ ubd = 0.f;
+ }
+
+ niter = 1;
+ *tau = 0.f;
+ if (*kniter == 2) {
+ if (*orgati) {
+ temp = (d__[3] - d__[2]) / 2.f;
+ c__ = *rho + z__[1] / (d__[1] - d__[2] - temp);
+ a = c__ * (d__[2] + d__[3]) + z__[2] + z__[3];
+ b = c__ * d__[2] * d__[3] + z__[2] * d__[3] + z__[3] * d__[2];
+ } else {
+ temp = (d__[1] - d__[2]) / 2.f;
+ c__ = *rho + z__[3] / (d__[3] - d__[2] - temp);
+ a = c__ * (d__[1] + d__[2]) + z__[1] + z__[2];
+ b = c__ * d__[1] * d__[2] + z__[1] * d__[2] + z__[2] * d__[1];
+ }
+/* Computing MAX */
+ r__1 = dabs(a), r__2 = dabs(b), r__1 = max(r__1,r__2), r__2 = dabs(
+ c__);
+ temp = dmax(r__1,r__2);
+ a /= temp;
+ b /= temp;
+ c__ /= temp;
+ if (c__ == 0.f) {
+ *tau = b / a;
+ } else if (a <= 0.f) {
+ *tau = (a - sqrt((r__1 = a * a - b * 4.f * c__, dabs(r__1)))) / (
+ c__ * 2.f);
+ } else {
+ *tau = b * 2.f / (a + sqrt((r__1 = a * a - b * 4.f * c__, dabs(
+ r__1))));
+ }
+ if (*tau < lbd || *tau > ubd) {
+ *tau = (lbd + ubd) / 2.f;
+ }
+ if (d__[1] == *tau || d__[2] == *tau || d__[3] == *tau) {
+ *tau = 0.f;
+ } else {
+ temp = *finit + *tau * z__[1] / (d__[1] * (d__[1] - *tau)) + *tau
+ * z__[2] / (d__[2] * (d__[2] - *tau)) + *tau * z__[3] / (
+ d__[3] * (d__[3] - *tau));
+ if (temp <= 0.f) {
+ lbd = *tau;
+ } else {
+ ubd = *tau;
+ }
+ if (dabs(*finit) <= dabs(temp)) {
+ *tau = 0.f;
+ }
+ }
+ }
+
+/* get machine parameters for possible scaling to avoid overflow */
+
+/* modified by Sven: parameters SMALL1, SMINV1, SMALL2, */
+/* SMINV2, EPS are not SAVEd anymore between one call to the */
+/* others but recomputed at each call */
+
+ eps = slamch_("Epsilon");
+ base = slamch_("Base");
+ i__1 = (integer) (log(slamch_("SafMin")) / log(base) / 3.f);
+ small1 = pow_ri(&base, &i__1);
+ sminv1 = 1.f / small1;
+ small2 = small1 * small1;
+ sminv2 = sminv1 * sminv1;
+
+/* Determine if scaling of inputs necessary to avoid overflow */
+/* when computing 1/TEMP**3 */
+
+ if (*orgati) {
+/* Computing MIN */
+ r__3 = (r__1 = d__[2] - *tau, dabs(r__1)), r__4 = (r__2 = d__[3] - *
+ tau, dabs(r__2));
+ temp = dmin(r__3,r__4);
+ } else {
+/* Computing MIN */
+ r__3 = (r__1 = d__[1] - *tau, dabs(r__1)), r__4 = (r__2 = d__[2] - *
+ tau, dabs(r__2));
+ temp = dmin(r__3,r__4);
+ }
+ scale = FALSE_;
+ if (temp <= small1) {
+ scale = TRUE_;
+ if (temp <= small2) {
+
+/* Scale up by power of radix nearest 1/SAFMIN**(2/3) */
+
+ sclfac = sminv2;
+ sclinv = small2;
+ } else {
+
+/* Scale up by power of radix nearest 1/SAFMIN**(1/3) */
+
+ sclfac = sminv1;
+ sclinv = small1;
+ }
+
+/* Scaling up safe because D, Z, TAU scaled elsewhere to be O(1) */
+
+ for (i__ = 1; i__ <= 3; ++i__) {
+ dscale[i__ - 1] = d__[i__] * sclfac;
+ zscale[i__ - 1] = z__[i__] * sclfac;
+/* L10: */
+ }
+ *tau *= sclfac;
+ lbd *= sclfac;
+ ubd *= sclfac;
+ } else {
+
+/* Copy D and Z to DSCALE and ZSCALE */
+
+ for (i__ = 1; i__ <= 3; ++i__) {
+ dscale[i__ - 1] = d__[i__];
+ zscale[i__ - 1] = z__[i__];
+/* L20: */
+ }
+ }
+
+ fc = 0.f;
+ df = 0.f;
+ ddf = 0.f;
+ for (i__ = 1; i__ <= 3; ++i__) {
+ temp = 1.f / (dscale[i__ - 1] - *tau);
+ temp1 = zscale[i__ - 1] * temp;
+ temp2 = temp1 * temp;
+ temp3 = temp2 * temp;
+ fc += temp1 / dscale[i__ - 1];
+ df += temp2;
+ ddf += temp3;
+/* L30: */
+ }
+ f = *finit + *tau * fc;
+
+ if (dabs(f) <= 0.f) {
+ goto L60;
+ }
+ if (f <= 0.f) {
+ lbd = *tau;
+ } else {
+ ubd = *tau;
+ }
+
+/* Iteration begins -- Use Gragg-Thornton-Warner cubic convergent */
+/* scheme */
+
+/* It is not hard to see that */
+
+/* 1) Iterations will go up monotonically */
+/* if FINIT < 0; */
+
+/* 2) Iterations will go down monotonically */
+/* if FINIT > 0. */
+
+ iter = niter + 1;
+
+ for (niter = iter; niter <= 40; ++niter) {
+
+ if (*orgati) {
+ temp1 = dscale[1] - *tau;
+ temp2 = dscale[2] - *tau;
+ } else {
+ temp1 = dscale[0] - *tau;
+ temp2 = dscale[1] - *tau;
+ }
+ a = (temp1 + temp2) * f - temp1 * temp2 * df;
+ b = temp1 * temp2 * f;
+ c__ = f - (temp1 + temp2) * df + temp1 * temp2 * ddf;
+/* Computing MAX */
+ r__1 = dabs(a), r__2 = dabs(b), r__1 = max(r__1,r__2), r__2 = dabs(
+ c__);
+ temp = dmax(r__1,r__2);
+ a /= temp;
+ b /= temp;
+ c__ /= temp;
+ if (c__ == 0.f) {
+ eta = b / a;
+ } else if (a <= 0.f) {
+ eta = (a - sqrt((r__1 = a * a - b * 4.f * c__, dabs(r__1)))) / (
+ c__ * 2.f);
+ } else {
+ eta = b * 2.f / (a + sqrt((r__1 = a * a - b * 4.f * c__, dabs(
+ r__1))));
+ }
+ if (f * eta >= 0.f) {
+ eta = -f / df;
+ }
+
+ *tau += eta;
+ if (*tau < lbd || *tau > ubd) {
+ *tau = (lbd + ubd) / 2.f;
+ }
+
+ fc = 0.f;
+ erretm = 0.f;
+ df = 0.f;
+ ddf = 0.f;
+ for (i__ = 1; i__ <= 3; ++i__) {
+ temp = 1.f / (dscale[i__ - 1] - *tau);
+ temp1 = zscale[i__ - 1] * temp;
+ temp2 = temp1 * temp;
+ temp3 = temp2 * temp;
+ temp4 = temp1 / dscale[i__ - 1];
+ fc += temp4;
+ erretm += dabs(temp4);
+ df += temp2;
+ ddf += temp3;
+/* L40: */
+ }
+ f = *finit + *tau * fc;
+ erretm = (dabs(*finit) + dabs(*tau) * erretm) * 8.f + dabs(*tau) * df;
+ if (dabs(f) <= eps * erretm) {
+ goto L60;
+ }
+ if (f <= 0.f) {
+ lbd = *tau;
+ } else {
+ ubd = *tau;
+ }
+/* L50: */
+ }
+ *info = 1;
+L60:
+
+/* Undo scaling */
+
+ if (scale) {
+ *tau *= sclinv;
+ }
+ return 0;
+
+/* End of SLAED6 */
+
+} /* slaed6_ */
diff --git a/contrib/libs/clapack/slaed7.c b/contrib/libs/clapack/slaed7.c
new file mode 100644
index 0000000000..d2631933ee
--- /dev/null
+++ b/contrib/libs/clapack/slaed7.c
@@ -0,0 +1,352 @@
+/* slaed7.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__2 = 2;
+static integer c__1 = 1;
+static real c_b10 = 1.f;
+static real c_b11 = 0.f;
+static integer c_n1 = -1;
+
+/* Subroutine */ int slaed7_(integer *icompq, integer *n, integer *qsiz,
+ integer *tlvls, integer *curlvl, integer *curpbm, real *d__, real *q,
+ integer *ldq, integer *indxq, real *rho, integer *cutpnt, real *
+ qstore, integer *qptr, integer *prmptr, integer *perm, integer *
+ givptr, integer *givcol, real *givnum, real *work, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ integer i__, k, n1, n2, is, iw, iz, iq2, ptr, ldq2, indx, curr, indxc;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ integer indxp;
+ extern /* Subroutine */ int slaed8_(integer *, integer *, integer *,
+ integer *, real *, real *, integer *, integer *, real *, integer *
+, real *, real *, real *, integer *, real *, integer *, integer *,
+ integer *, real *, integer *, integer *, integer *), slaed9_(
+ integer *, integer *, integer *, integer *, real *, real *,
+ integer *, real *, real *, real *, real *, integer *, integer *),
+ slaeda_(integer *, integer *, integer *, integer *, integer *,
+ integer *, integer *, integer *, real *, real *, integer *, real *
+, real *, integer *);
+ integer idlmda;
+ extern /* Subroutine */ int xerbla_(char *, integer *), slamrg_(
+ integer *, integer *, real *, integer *, integer *, integer *);
+ integer coltyp;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAED7 computes the updated eigensystem of a diagonal */
+/* matrix after modification by a rank-one symmetric matrix. This */
+/* routine is used only for the eigenproblem which requires all */
+/* eigenvalues and optionally eigenvectors of a dense symmetric matrix */
+/* that has been reduced to tridiagonal form. SLAED1 handles */
+/* the case in which all eigenvalues and eigenvectors of a symmetric */
+/* tridiagonal matrix are desired. */
+
+/* T = Q(in) ( D(in) + RHO * Z*Z' ) Q'(in) = Q(out) * D(out) * Q'(out) */
+
+/* where Z = Q'u, u is a vector of length N with ones in the */
+/* CUTPNT and CUTPNT + 1 th elements and zeros elsewhere. */
+
+/* The eigenvectors of the original matrix are stored in Q, and the */
+/* eigenvalues are in D. The algorithm consists of three stages: */
+
+/* The first stage consists of deflating the size of the problem */
+/* when there are multiple eigenvalues or if there is a zero in */
+/* the Z vector. For each such occurence the dimension of the */
+/* secular equation problem is reduced by one. This stage is */
+/* performed by the routine SLAED8. */
+
+/* The second stage consists of calculating the updated */
+/* eigenvalues. This is done by finding the roots of the secular */
+/* equation via the routine SLAED4 (as called by SLAED9). */
+/* This routine also calculates the eigenvectors of the current */
+/* problem. */
+
+/* The final stage consists of computing the updated eigenvectors */
+/* directly using the updated eigenvalues. The eigenvectors for */
+/* the current problem are multiplied with the eigenvectors from */
+/* the overall problem. */
+
+/* Arguments */
+/* ========= */
+
+/* ICOMPQ (input) INTEGER */
+/* = 0: Compute eigenvalues only. */
+/* = 1: Compute eigenvectors of original dense symmetric matrix */
+/* also. On entry, Q contains the orthogonal matrix used */
+/* to reduce the original matrix to tridiagonal form. */
+
+/* N (input) INTEGER */
+/* The dimension of the symmetric tridiagonal matrix. N >= 0. */
+
+/* QSIZ (input) INTEGER */
+/* The dimension of the orthogonal matrix used to reduce */
+/* the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1. */
+
+/* TLVLS (input) INTEGER */
+/* The total number of merging levels in the overall divide and */
+/* conquer tree. */
+
+/* CURLVL (input) INTEGER */
+/* The current level in the overall merge routine, */
+/* 0 <= CURLVL <= TLVLS. */
+
+/* CURPBM (input) INTEGER */
+/* The current problem in the current level in the overall */
+/* merge routine (counting from upper left to lower right). */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the eigenvalues of the rank-1-perturbed matrix. */
+/* On exit, the eigenvalues of the repaired matrix. */
+
+/* Q (input/output) REAL array, dimension (LDQ, N) */
+/* On entry, the eigenvectors of the rank-1-perturbed matrix. */
+/* On exit, the eigenvectors of the repaired tridiagonal matrix. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N). */
+
+/* INDXQ (output) INTEGER array, dimension (N) */
+/* The permutation which will reintegrate the subproblem just */
+/* solved back into sorted order, i.e., D( INDXQ( I = 1, N ) ) */
+/* will be in ascending order. */
+
+/* RHO (input) REAL */
+/* The subdiagonal element used to create the rank-1 */
+/* modification. */
+
+/* CUTPNT (input) INTEGER */
+/* Contains the location of the last eigenvalue in the leading */
+/* sub-matrix. min(1,N) <= CUTPNT <= N. */
+
+/* QSTORE (input/output) REAL array, dimension (N**2+1) */
+/* Stores eigenvectors of submatrices encountered during */
+/* divide and conquer, packed together. QPTR points to */
+/* beginning of the submatrices. */
+
+/* QPTR (input/output) INTEGER array, dimension (N+2) */
+/* List of indices pointing to beginning of submatrices stored */
+/* in QSTORE. The submatrices are numbered starting at the */
+/* bottom left of the divide and conquer tree, from left to */
+/* right and bottom to top. */
+
+/* PRMPTR (input) INTEGER array, dimension (N lg N) */
+/* Contains a list of pointers which indicate where in PERM a */
+/* level's permutation is stored. PRMPTR(i+1) - PRMPTR(i) */
+/* indicates the size of the permutation and also the size of */
+/* the full, non-deflated problem. */
+
+/* PERM (input) INTEGER array, dimension (N lg N) */
+/* Contains the permutations (from deflation and sorting) to be */
+/* applied to each eigenblock. */
+
+/* GIVPTR (input) INTEGER array, dimension (N lg N) */
+/* Contains a list of pointers which indicate where in GIVCOL a */
+/* level's Givens rotations are stored. GIVPTR(i+1) - GIVPTR(i) */
+/* indicates the number of Givens rotations. */
+
+/* GIVCOL (input) INTEGER array, dimension (2, N lg N) */
+/* Each pair of numbers indicates a pair of columns to take place */
+/* in a Givens rotation. */
+
+/* GIVNUM (input) REAL array, dimension (2, N lg N) */
+/* Each number indicates the S value to be used in the */
+/* corresponding Givens rotation. */
+
+/* WORK (workspace) REAL array, dimension (3*N+QSIZ*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (4*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, an eigenvalue did not converge */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Jeff Rutter, Computer Science Division, University of California */
+/* at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --indxq;
+ --qstore;
+ --qptr;
+ --prmptr;
+ --perm;
+ --givptr;
+ givcol -= 3;
+ givnum -= 3;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*icompq == 1 && *qsiz < *n) {
+ *info = -4;
+ } else if (*ldq < max(1,*n)) {
+ *info = -9;
+ } else if (min(1,*n) > *cutpnt || *n < *cutpnt) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLAED7", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* The following values are for bookkeeping purposes only. They are */
+/* integer pointers which indicate the portion of the workspace */
+/* used by a particular array in SLAED8 and SLAED9. */
+
+ if (*icompq == 1) {
+ ldq2 = *qsiz;
+ } else {
+ ldq2 = *n;
+ }
+
+ iz = 1;
+ idlmda = iz + *n;
+ iw = idlmda + *n;
+ iq2 = iw + *n;
+ is = iq2 + *n * ldq2;
+
+ indx = 1;
+ indxc = indx + *n;
+ coltyp = indxc + *n;
+ indxp = coltyp + *n;
+
+/* Form the z-vector which consists of the last row of Q_1 and the */
+/* first row of Q_2. */
+
+ ptr = pow_ii(&c__2, tlvls) + 1;
+ i__1 = *curlvl - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *tlvls - i__;
+ ptr += pow_ii(&c__2, &i__2);
+/* L10: */
+ }
+ curr = ptr + *curpbm;
+ slaeda_(n, tlvls, curlvl, curpbm, &prmptr[1], &perm[1], &givptr[1], &
+ givcol[3], &givnum[3], &qstore[1], &qptr[1], &work[iz], &work[iz
+ + *n], info);
+
+/* When solving the final problem, we no longer need the stored data, */
+/* so we will overwrite the data from this level onto the previously */
+/* used storage space. */
+
+ if (*curlvl == *tlvls) {
+ qptr[curr] = 1;
+ prmptr[curr] = 1;
+ givptr[curr] = 1;
+ }
+
+/* Sort and Deflate eigenvalues. */
+
+ slaed8_(icompq, &k, n, qsiz, &d__[1], &q[q_offset], ldq, &indxq[1], rho,
+ cutpnt, &work[iz], &work[idlmda], &work[iq2], &ldq2, &work[iw], &
+ perm[prmptr[curr]], &givptr[curr + 1], &givcol[(givptr[curr] << 1)
+ + 1], &givnum[(givptr[curr] << 1) + 1], &iwork[indxp], &iwork[
+ indx], info);
+ prmptr[curr + 1] = prmptr[curr] + *n;
+ givptr[curr + 1] += givptr[curr];
+
+/* Solve Secular Equation. */
+
+ if (k != 0) {
+ slaed9_(&k, &c__1, &k, n, &d__[1], &work[is], &k, rho, &work[idlmda],
+ &work[iw], &qstore[qptr[curr]], &k, info);
+ if (*info != 0) {
+ goto L30;
+ }
+ if (*icompq == 1) {
+ sgemm_("N", "N", qsiz, &k, &k, &c_b10, &work[iq2], &ldq2, &qstore[
+ qptr[curr]], &k, &c_b11, &q[q_offset], ldq);
+ }
+/* Computing 2nd power */
+ i__1 = k;
+ qptr[curr + 1] = qptr[curr] + i__1 * i__1;
+
+/* Prepare the INDXQ sorting permutation. */
+
+ n1 = k;
+ n2 = *n - k;
+ slamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &indxq[1]);
+ } else {
+ qptr[curr + 1] = qptr[curr];
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ indxq[i__] = i__;
+/* L20: */
+ }
+ }
+
+L30:
+ return 0;
+
+/* End of SLAED7 */
+
+} /* slaed7_ */
diff --git a/contrib/libs/clapack/slaed8.c b/contrib/libs/clapack/slaed8.c
new file mode 100644
index 0000000000..25776f847f
--- /dev/null
+++ b/contrib/libs/clapack/slaed8.c
@@ -0,0 +1,475 @@
+/* slaed8.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b3 = -1.f;
+static integer c__1 = 1;
+
+/* Subroutine */ int slaed8_(integer *icompq, integer *k, integer *n, integer
+ *qsiz, real *d__, real *q, integer *ldq, integer *indxq, real *rho,
+ integer *cutpnt, real *z__, real *dlamda, real *q2, integer *ldq2,
+ real *w, integer *perm, integer *givptr, integer *givcol, real *
+ givnum, integer *indxp, integer *indx, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, q2_dim1, q2_offset, i__1;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real c__;
+ integer i__, j;
+ real s, t;
+ integer k2, n1, n2, jp, n1p1;
+ real eps, tau, tol;
+ integer jlam, imax, jmax;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *), sscal_(integer *, real *, real *,
+ integer *), scopy_(integer *, real *, integer *, real *, integer *
+);
+ extern doublereal slapy2_(real *, real *), slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int slamrg_(integer *, integer *, real *, integer
+ *, integer *, integer *), slacpy_(char *, integer *, integer *,
+ real *, integer *, real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAED8 merges the two sets of eigenvalues together into a single */
+/* sorted set. Then it tries to deflate the size of the problem. */
+/* There are two ways in which deflation can occur: when two or more */
+/* eigenvalues are close together or if there is a tiny element in the */
+/* Z vector. For each such occurrence the order of the related secular */
+/* equation problem is reduced by one. */
+
+/* Arguments */
+/* ========= */
+
+/* ICOMPQ (input) INTEGER */
+/* = 0: Compute eigenvalues only. */
+/* = 1: Compute eigenvectors of original dense symmetric matrix */
+/* also. On entry, Q contains the orthogonal matrix used */
+/* to reduce the original matrix to tridiagonal form. */
+
+/* K (output) INTEGER */
+/* The number of non-deflated eigenvalues, and the order of the */
+/* related secular equation. */
+
+/* N (input) INTEGER */
+/* The dimension of the symmetric tridiagonal matrix. N >= 0. */
+
+/* QSIZ (input) INTEGER */
+/* The dimension of the orthogonal matrix used to reduce */
+/* the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the eigenvalues of the two submatrices to be */
+/* combined. On exit, the trailing (N-K) updated eigenvalues */
+/* (those which were deflated) sorted into increasing order. */
+
+/* Q (input/output) REAL array, dimension (LDQ,N) */
+/* If ICOMPQ = 0, Q is not referenced. Otherwise, */
+/* on entry, Q contains the eigenvectors of the partially solved */
+/* system which has been previously updated in matrix */
+/* multiplies with other partially solved eigensystems. */
+/* On exit, Q contains the trailing (N-K) updated eigenvectors */
+/* (those which were deflated) in its last N-K columns. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N). */
+
+/* INDXQ (input) INTEGER array, dimension (N) */
+/* The permutation which separately sorts the two sub-problems */
+/* in D into ascending order. Note that elements in the second */
+/* half of this permutation must first have CUTPNT added to */
+/* their values in order to be accurate. */
+
+/* RHO (input/output) REAL */
+/* On entry, the off-diagonal element associated with the rank-1 */
+/* cut which originally split the two submatrices which are now */
+/* being recombined. */
+/* On exit, RHO has been modified to the value required by */
+/* SLAED3. */
+
+/* CUTPNT (input) INTEGER */
+/* The location of the last eigenvalue in the leading */
+/* sub-matrix. min(1,N) <= CUTPNT <= N. */
+
+/* Z (input) REAL array, dimension (N) */
+/* On entry, Z contains the updating vector (the last row of */
+/* the first sub-eigenvector matrix and the first row of the */
+/* second sub-eigenvector matrix). */
+/* On exit, the contents of Z are destroyed by the updating */
+/* process. */
+
+/* DLAMDA (output) REAL array, dimension (N) */
+/* A copy of the first K eigenvalues which will be used by */
+/* SLAED3 to form the secular equation. */
+
+/* Q2 (output) REAL array, dimension (LDQ2,N) */
+/* If ICOMPQ = 0, Q2 is not referenced. Otherwise, */
+/* a copy of the first K eigenvectors which will be used by */
+/* SLAED7 in a matrix multiply (SGEMM) to update the new */
+/* eigenvectors. */
+
+/* LDQ2 (input) INTEGER */
+/* The leading dimension of the array Q2. LDQ2 >= max(1,N). */
+
+/* W (output) REAL array, dimension (N) */
+/* The first k values of the final deflation-altered z-vector and */
+/* will be passed to SLAED3. */
+
+/* PERM (output) INTEGER array, dimension (N) */
+/* The permutations (from deflation and sorting) to be applied */
+/* to each eigenblock. */
+
+/* GIVPTR (output) INTEGER */
+/* The number of Givens rotations which took place in this */
+/* subproblem. */
+
+/* GIVCOL (output) INTEGER array, dimension (2, N) */
+/* Each pair of numbers indicates a pair of columns to take place */
+/* in a Givens rotation. */
+
+/* GIVNUM (output) REAL array, dimension (2, N) */
+/* Each number indicates the S value to be used in the */
+/* corresponding Givens rotation. */
+
+/* INDXP (workspace) INTEGER array, dimension (N) */
+/* The permutation used to place deflated values of D at the end */
+/* of the array. INDXP(1:K) points to the nondeflated D-values */
+/* and INDXP(K+1:N) points to the deflated eigenvalues. */
+
+/* INDX (workspace) INTEGER array, dimension (N) */
+/* The permutation used to sort the contents of D into ascending */
+/* order. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Jeff Rutter, Computer Science Division, University of California */
+/* at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --indxq;
+ --z__;
+ --dlamda;
+ q2_dim1 = *ldq2;
+ q2_offset = 1 + q2_dim1;
+ q2 -= q2_offset;
+ --w;
+ --perm;
+ givcol -= 3;
+ givnum -= 3;
+ --indxp;
+ --indx;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*icompq == 1 && *qsiz < *n) {
+ *info = -4;
+ } else if (*ldq < max(1,*n)) {
+ *info = -7;
+ } else if (*cutpnt < min(1,*n) || *cutpnt > *n) {
+ *info = -10;
+ } else if (*ldq2 < max(1,*n)) {
+ *info = -14;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLAED8", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ n1 = *cutpnt;
+ n2 = *n - n1;
+ n1p1 = n1 + 1;
+
+ if (*rho < 0.f) {
+ sscal_(&n2, &c_b3, &z__[n1p1], &c__1);
+ }
+
+/* Normalize z so that norm(z) = 1 */
+
+ t = 1.f / sqrt(2.f);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ indx[j] = j;
+/* L10: */
+ }
+ sscal_(n, &t, &z__[1], &c__1);
+ *rho = (r__1 = *rho * 2.f, dabs(r__1));
+
+/* Sort the eigenvalues into increasing order */
+
+ i__1 = *n;
+ for (i__ = *cutpnt + 1; i__ <= i__1; ++i__) {
+ indxq[i__] += *cutpnt;
+/* L20: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlamda[i__] = d__[indxq[i__]];
+ w[i__] = z__[indxq[i__]];
+/* L30: */
+ }
+ i__ = 1;
+ j = *cutpnt + 1;
+ slamrg_(&n1, &n2, &dlamda[1], &c__1, &c__1, &indx[1]);
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = dlamda[indx[i__]];
+ z__[i__] = w[indx[i__]];
+/* L40: */
+ }
+
+/* Calculate the allowable deflation tolerence */
+
+ imax = isamax_(n, &z__[1], &c__1);
+ jmax = isamax_(n, &d__[1], &c__1);
+ eps = slamch_("Epsilon");
+ tol = eps * 8.f * (r__1 = d__[jmax], dabs(r__1));
+
+/* If the rank-1 modifier is small enough, no more needs to be done */
+/* except to reorganize Q so that its columns correspond with the */
+/* elements in D. */
+
+ if (*rho * (r__1 = z__[imax], dabs(r__1)) <= tol) {
+ *k = 0;
+ if (*icompq == 0) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ perm[j] = indxq[indx[j]];
+/* L50: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ perm[j] = indxq[indx[j]];
+ scopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1
+ + 1], &c__1);
+/* L60: */
+ }
+ slacpy_("A", qsiz, n, &q2[q2_dim1 + 1], ldq2, &q[q_dim1 + 1], ldq);
+ }
+ return 0;
+ }
+
+/* If there are multiple eigenvalues then the problem deflates. Here */
+/* the number of equal eigenvalues are found. As each equal */
+/* eigenvalue is found, an elementary reflector is computed to rotate */
+/* the corresponding eigensubspace so that the corresponding */
+/* components of Z are zero in this new basis. */
+
+ *k = 0;
+ *givptr = 0;
+ k2 = *n + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*rho * (r__1 = z__[j], dabs(r__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ indxp[k2] = j;
+ if (j == *n) {
+ goto L110;
+ }
+ } else {
+ jlam = j;
+ goto L80;
+ }
+/* L70: */
+ }
+L80:
+ ++j;
+ if (j > *n) {
+ goto L100;
+ }
+ if (*rho * (r__1 = z__[j], dabs(r__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ indxp[k2] = j;
+ } else {
+
+/* Check if eigenvalues are close enough to allow deflation. */
+
+ s = z__[jlam];
+ c__ = z__[j];
+
+/* Find sqrt(a**2+b**2) without overflow or */
+/* destructive underflow. */
+
+ tau = slapy2_(&c__, &s);
+ t = d__[j] - d__[jlam];
+ c__ /= tau;
+ s = -s / tau;
+ if ((r__1 = t * c__ * s, dabs(r__1)) <= tol) {
+
+/* Deflation is possible. */
+
+ z__[j] = tau;
+ z__[jlam] = 0.f;
+
+/* Record the appropriate Givens rotation */
+
+ ++(*givptr);
+ givcol[(*givptr << 1) + 1] = indxq[indx[jlam]];
+ givcol[(*givptr << 1) + 2] = indxq[indx[j]];
+ givnum[(*givptr << 1) + 1] = c__;
+ givnum[(*givptr << 1) + 2] = s;
+ if (*icompq == 1) {
+ srot_(qsiz, &q[indxq[indx[jlam]] * q_dim1 + 1], &c__1, &q[
+ indxq[indx[j]] * q_dim1 + 1], &c__1, &c__, &s);
+ }
+ t = d__[jlam] * c__ * c__ + d__[j] * s * s;
+ d__[j] = d__[jlam] * s * s + d__[j] * c__ * c__;
+ d__[jlam] = t;
+ --k2;
+ i__ = 1;
+L90:
+ if (k2 + i__ <= *n) {
+ if (d__[jlam] < d__[indxp[k2 + i__]]) {
+ indxp[k2 + i__ - 1] = indxp[k2 + i__];
+ indxp[k2 + i__] = jlam;
+ ++i__;
+ goto L90;
+ } else {
+ indxp[k2 + i__ - 1] = jlam;
+ }
+ } else {
+ indxp[k2 + i__ - 1] = jlam;
+ }
+ jlam = j;
+ } else {
+ ++(*k);
+ w[*k] = z__[jlam];
+ dlamda[*k] = d__[jlam];
+ indxp[*k] = jlam;
+ jlam = j;
+ }
+ }
+ goto L80;
+L100:
+
+/* Record the last eigenvalue. */
+
+ ++(*k);
+ w[*k] = z__[jlam];
+ dlamda[*k] = d__[jlam];
+ indxp[*k] = jlam;
+
+L110:
+
+/* Sort the eigenvalues and corresponding eigenvectors into DLAMDA */
+/* and Q2 respectively. The eigenvalues/vectors which were not */
+/* deflated go into the first K slots of DLAMDA and Q2 respectively, */
+/* while those which were deflated go into the last N - K slots. */
+
+ if (*icompq == 0) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jp = indxp[j];
+ dlamda[j] = d__[jp];
+ perm[j] = indxq[indx[jp]];
+/* L120: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jp = indxp[j];
+ dlamda[j] = d__[jp];
+ perm[j] = indxq[indx[jp]];
+ scopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 + 1]
+, &c__1);
+/* L130: */
+ }
+ }
+
+/* The deflated eigenvalues and their corresponding vectors go back */
+/* into the last N - K slots of D and Q respectively. */
+
+ if (*k < *n) {
+ if (*icompq == 0) {
+ i__1 = *n - *k;
+ scopy_(&i__1, &dlamda[*k + 1], &c__1, &d__[*k + 1], &c__1);
+ } else {
+ i__1 = *n - *k;
+ scopy_(&i__1, &dlamda[*k + 1], &c__1, &d__[*k + 1], &c__1);
+ i__1 = *n - *k;
+ slacpy_("A", qsiz, &i__1, &q2[(*k + 1) * q2_dim1 + 1], ldq2, &q[(*
+ k + 1) * q_dim1 + 1], ldq);
+ }
+ }
+
+ return 0;
+
+/* End of SLAED8 */
+
+} /* slaed8_ */
diff --git a/contrib/libs/clapack/slaed9.c b/contrib/libs/clapack/slaed9.c
new file mode 100644
index 0000000000..df4c0bdf77
--- /dev/null
+++ b/contrib/libs/clapack/slaed9.c
@@ -0,0 +1,272 @@
+/* slaed9.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int slaed9_(integer *k, integer *kstart, integer *kstop,
+ integer *n, real *d__, real *q, integer *ldq, real *rho, real *dlamda,
+ real *w, real *s, integer *lds, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, s_dim1, s_offset, i__1, i__2;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ integer i__, j;
+ real temp;
+ extern doublereal snrm2_(integer *, real *, integer *);
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), slaed4_(integer *, integer *, real *, real *, real *,
+ real *, real *, integer *);
+ extern doublereal slamc3_(real *, real *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAED9 finds the roots of the secular equation, as defined by the */
+/* values in D, Z, and RHO, between KSTART and KSTOP. It makes the */
+/* appropriate calls to SLAED4 and then stores the new matrix of */
+/* eigenvectors for use in calculating the next level of Z vectors. */
+
+/* Arguments */
+/* ========= */
+
+/* K (input) INTEGER */
+/* The number of terms in the rational function to be solved by */
+/* SLAED4. K >= 0. */
+
+/* KSTART (input) INTEGER */
+/* KSTOP (input) INTEGER */
+/* The updated eigenvalues Lambda(I), KSTART <= I <= KSTOP */
+/* are to be computed. 1 <= KSTART <= KSTOP <= K. */
+
+/* N (input) INTEGER */
+/* The number of rows and columns in the Q matrix. */
+/* N >= K (delation may result in N > K). */
+
+/* D (output) REAL array, dimension (N) */
+/* D(I) contains the updated eigenvalues */
+/* for KSTART <= I <= KSTOP. */
+
+/* Q (workspace) REAL array, dimension (LDQ,N) */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max( 1, N ). */
+
+/* RHO (input) REAL */
+/* The value of the parameter in the rank one update equation. */
+/* RHO >= 0 required. */
+
+/* DLAMDA (input) REAL array, dimension (K) */
+/* The first K elements of this array contain the old roots */
+/* of the deflated updating problem. These are the poles */
+/* of the secular equation. */
+
+/* W (input) REAL array, dimension (K) */
+/* The first K elements of this array contain the components */
+/* of the deflation-adjusted updating vector. */
+
+/* S (output) REAL array, dimension (LDS, K) */
+/* Will contain the eigenvectors of the repaired matrix which */
+/* will be stored for subsequent Z vector calculation and */
+/* multiplied by the previously accumulated eigenvectors */
+/* to update the system. */
+
+/* LDS (input) INTEGER */
+/* The leading dimension of S. LDS >= max( 1, K ). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, an eigenvalue did not converge */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Jeff Rutter, Computer Science Division, University of California */
+/* at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --dlamda;
+ --w;
+ s_dim1 = *lds;
+ s_offset = 1 + s_dim1;
+ s -= s_offset;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*k < 0) {
+ *info = -1;
+ } else if (*kstart < 1 || *kstart > max(1,*k)) {
+ *info = -2;
+ } else if (max(1,*kstop) < *kstart || *kstop > max(1,*k)) {
+ *info = -3;
+ } else if (*n < *k) {
+ *info = -4;
+ } else if (*ldq < max(1,*k)) {
+ *info = -7;
+ } else if (*lds < max(1,*k)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLAED9", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*k == 0) {
+ return 0;
+ }
+
+/* Modify values DLAMDA(i) to make sure all DLAMDA(i)-DLAMDA(j) can */
+/* be computed with high relative accuracy (barring over/underflow). */
+/* This is a problem on machines without a guard digit in */
+/* add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2). */
+/* The following code replaces DLAMDA(I) by 2*DLAMDA(I)-DLAMDA(I), */
+/* which on any of these machines zeros out the bottommost */
+/* bit of DLAMDA(I) if it is 1; this makes the subsequent */
+/* subtractions DLAMDA(I)-DLAMDA(J) unproblematic when cancellation */
+/* occurs. On binary machines with a guard digit (almost all */
+/* machines) it does not change DLAMDA(I) at all. On hexadecimal */
+/* and decimal machines with a guard digit, it slightly */
+/* changes the bottommost bits of DLAMDA(I). It does not account */
+/* for hexadecimal or decimal machines without guard digits */
+/* (we know of none). We use a subroutine call to compute */
+/* 2*DLAMBDA(I) to prevent optimizing compilers from eliminating */
+/* this code. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlamda[i__] = slamc3_(&dlamda[i__], &dlamda[i__]) - dlamda[i__];
+/* L10: */
+ }
+
+ i__1 = *kstop;
+ for (j = *kstart; j <= i__1; ++j) {
+ slaed4_(k, &j, &dlamda[1], &w[1], &q[j * q_dim1 + 1], rho, &d__[j],
+ info);
+
+/* If the zero finder fails, the computation is terminated. */
+
+ if (*info != 0) {
+ goto L120;
+ }
+/* L20: */
+ }
+
+ if (*k == 1 || *k == 2) {
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *k;
+ for (j = 1; j <= i__2; ++j) {
+ s[j + i__ * s_dim1] = q[j + i__ * q_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ goto L120;
+ }
+
+/* Compute updated W. */
+
+ scopy_(k, &w[1], &c__1, &s[s_offset], &c__1);
+
+/* Initialize W(I) = Q(I,I) */
+
+ i__1 = *ldq + 1;
+ scopy_(k, &q[q_offset], &i__1, &w[1], &c__1);
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]);
+/* L50: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]);
+/* L60: */
+ }
+/* L70: */
+ }
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__1 = sqrt(-w[i__]);
+ w[i__] = r_sign(&r__1, &s[i__ + s_dim1]);
+/* L80: */
+ }
+
+/* Compute eigenvectors of the modified rank-1 modification. */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *k;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ q[i__ + j * q_dim1] = w[i__] / q[i__ + j * q_dim1];
+/* L90: */
+ }
+ temp = snrm2_(k, &q[j * q_dim1 + 1], &c__1);
+ i__2 = *k;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ s[i__ + j * s_dim1] = q[i__ + j * q_dim1] / temp;
+/* L100: */
+ }
+/* L110: */
+ }
+
+L120:
+ return 0;
+
+/* End of SLAED9 */
+
+} /* slaed9_ */
diff --git a/contrib/libs/clapack/slaeda.c b/contrib/libs/clapack/slaeda.c
new file mode 100644
index 0000000000..f9ffbd3e5a
--- /dev/null
+++ b/contrib/libs/clapack/slaeda.c
@@ -0,0 +1,283 @@
+/* slaeda.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__2 = 2;
+static integer c__1 = 1;
+static real c_b24 = 1.f;
+static real c_b26 = 0.f;
+
+/* Subroutine */ int slaeda_(integer *n, integer *tlvls, integer *curlvl,
+ integer *curpbm, integer *prmptr, integer *perm, integer *givptr,
+ integer *givcol, real *givnum, real *q, integer *qptr, real *z__,
+ real *ztemp, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, k, mid, ptr, curr;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *);
+ integer bsiz1, bsiz2, psiz1, psiz2, zptr1;
+ extern /* Subroutine */ int sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *), scopy_(integer *, real *, integer *, real *, integer *),
+ xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAEDA computes the Z vector corresponding to the merge step in the */
+/* CURLVLth step of the merge process with TLVLS steps for the CURPBMth */
+/* problem. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The dimension of the symmetric tridiagonal matrix. N >= 0. */
+
+/* TLVLS (input) INTEGER */
+/* The total number of merging levels in the overall divide and */
+/* conquer tree. */
+
+/* CURLVL (input) INTEGER */
+/* The current level in the overall merge routine, */
+/* 0 <= curlvl <= tlvls. */
+
+/* CURPBM (input) INTEGER */
+/* The current problem in the current level in the overall */
+/* merge routine (counting from upper left to lower right). */
+
+/* PRMPTR (input) INTEGER array, dimension (N lg N) */
+/* Contains a list of pointers which indicate where in PERM a */
+/* level's permutation is stored. PRMPTR(i+1) - PRMPTR(i) */
+/* indicates the size of the permutation and incidentally the */
+/* size of the full, non-deflated problem. */
+
+/* PERM (input) INTEGER array, dimension (N lg N) */
+/* Contains the permutations (from deflation and sorting) to be */
+/* applied to each eigenblock. */
+
+/* GIVPTR (input) INTEGER array, dimension (N lg N) */
+/* Contains a list of pointers which indicate where in GIVCOL a */
+/* level's Givens rotations are stored. GIVPTR(i+1) - GIVPTR(i) */
+/* indicates the number of Givens rotations. */
+
+/* GIVCOL (input) INTEGER array, dimension (2, N lg N) */
+/* Each pair of numbers indicates a pair of columns to take place */
+/* in a Givens rotation. */
+
+/* GIVNUM (input) REAL array, dimension (2, N lg N) */
+/* Each number indicates the S value to be used in the */
+/* corresponding Givens rotation. */
+
+/* Q (input) REAL array, dimension (N**2) */
+/* Contains the square eigenblocks from previous levels, the */
+/* starting positions for blocks are given by QPTR. */
+
+/* QPTR (input) INTEGER array, dimension (N+2) */
+/* Contains a list of pointers which indicate where in Q an */
+/* eigenblock is stored. SQRT( QPTR(i+1) - QPTR(i) ) indicates */
+/* the size of the block. */
+
+/* Z (output) REAL array, dimension (N) */
+/* On output this vector contains the updating vector (the last */
+/* row of the first sub-eigenvector matrix and the first row of */
+/* the second sub-eigenvector matrix). */
+
+/* ZTEMP (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Jeff Rutter, Computer Science Division, University of California */
+/* at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ztemp;
+ --z__;
+ --qptr;
+ --q;
+ givnum -= 3;
+ givcol -= 3;
+ --givptr;
+ --perm;
+ --prmptr;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -1;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLAEDA", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine location of first number in second half. */
+
+ mid = *n / 2 + 1;
+
+/* Gather last/first rows of appropriate eigenblocks into center of Z */
+
+ ptr = 1;
+
+/* Determine location of lowest level subproblem in the full storage */
+/* scheme */
+
+ i__1 = *curlvl - 1;
+ curr = ptr + *curpbm * pow_ii(&c__2, curlvl) + pow_ii(&c__2, &i__1) - 1;
+
+/* Determine size of these matrices. We add HALF to the value of */
+/* the SQRT in case the machine underestimates one of these square */
+/* roots. */
+
+ bsiz1 = (integer) (sqrt((real) (qptr[curr + 1] - qptr[curr])) + .5f);
+ bsiz2 = (integer) (sqrt((real) (qptr[curr + 2] - qptr[curr + 1])) + .5f);
+ i__1 = mid - bsiz1 - 1;
+ for (k = 1; k <= i__1; ++k) {
+ z__[k] = 0.f;
+/* L10: */
+ }
+ scopy_(&bsiz1, &q[qptr[curr] + bsiz1 - 1], &bsiz1, &z__[mid - bsiz1], &
+ c__1);
+ scopy_(&bsiz2, &q[qptr[curr + 1]], &bsiz2, &z__[mid], &c__1);
+ i__1 = *n;
+ for (k = mid + bsiz2; k <= i__1; ++k) {
+ z__[k] = 0.f;
+/* L20: */
+ }
+
+/* Loop thru remaining levels 1 -> CURLVL applying the Givens */
+/* rotations and permutation and then multiplying the center matrices */
+/* against the current Z. */
+
+ ptr = pow_ii(&c__2, tlvls) + 1;
+ i__1 = *curlvl - 1;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = *curlvl - k;
+ i__3 = *curlvl - k - 1;
+ curr = ptr + *curpbm * pow_ii(&c__2, &i__2) + pow_ii(&c__2, &i__3) -
+ 1;
+ psiz1 = prmptr[curr + 1] - prmptr[curr];
+ psiz2 = prmptr[curr + 2] - prmptr[curr + 1];
+ zptr1 = mid - psiz1;
+
+/* Apply Givens at CURR and CURR+1 */
+
+ i__2 = givptr[curr + 1] - 1;
+ for (i__ = givptr[curr]; i__ <= i__2; ++i__) {
+ srot_(&c__1, &z__[zptr1 + givcol[(i__ << 1) + 1] - 1], &c__1, &
+ z__[zptr1 + givcol[(i__ << 1) + 2] - 1], &c__1, &givnum[(
+ i__ << 1) + 1], &givnum[(i__ << 1) + 2]);
+/* L30: */
+ }
+ i__2 = givptr[curr + 2] - 1;
+ for (i__ = givptr[curr + 1]; i__ <= i__2; ++i__) {
+ srot_(&c__1, &z__[mid - 1 + givcol[(i__ << 1) + 1]], &c__1, &z__[
+ mid - 1 + givcol[(i__ << 1) + 2]], &c__1, &givnum[(i__ <<
+ 1) + 1], &givnum[(i__ << 1) + 2]);
+/* L40: */
+ }
+ psiz1 = prmptr[curr + 1] - prmptr[curr];
+ psiz2 = prmptr[curr + 2] - prmptr[curr + 1];
+ i__2 = psiz1 - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ ztemp[i__ + 1] = z__[zptr1 + perm[prmptr[curr] + i__] - 1];
+/* L50: */
+ }
+ i__2 = psiz2 - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ ztemp[psiz1 + i__ + 1] = z__[mid + perm[prmptr[curr + 1] + i__] -
+ 1];
+/* L60: */
+ }
+
+/* Multiply Blocks at CURR and CURR+1 */
+
+/* Determine size of these matrices. We add HALF to the value of */
+/* the SQRT in case the machine underestimates one of these */
+/* square roots. */
+
+ bsiz1 = (integer) (sqrt((real) (qptr[curr + 1] - qptr[curr])) + .5f);
+ bsiz2 = (integer) (sqrt((real) (qptr[curr + 2] - qptr[curr + 1])) +
+ .5f);
+ if (bsiz1 > 0) {
+ sgemv_("T", &bsiz1, &bsiz1, &c_b24, &q[qptr[curr]], &bsiz1, &
+ ztemp[1], &c__1, &c_b26, &z__[zptr1], &c__1);
+ }
+ i__2 = psiz1 - bsiz1;
+ scopy_(&i__2, &ztemp[bsiz1 + 1], &c__1, &z__[zptr1 + bsiz1], &c__1);
+ if (bsiz2 > 0) {
+ sgemv_("T", &bsiz2, &bsiz2, &c_b24, &q[qptr[curr + 1]], &bsiz2, &
+ ztemp[psiz1 + 1], &c__1, &c_b26, &z__[mid], &c__1);
+ }
+ i__2 = psiz2 - bsiz2;
+ scopy_(&i__2, &ztemp[psiz1 + bsiz2 + 1], &c__1, &z__[mid + bsiz2], &
+ c__1);
+
+ i__2 = *tlvls - k;
+ ptr += pow_ii(&c__2, &i__2);
+/* L70: */
+ }
+
+ return 0;
+
+/* End of SLAEDA */
+
+} /* slaeda_ */
diff --git a/contrib/libs/clapack/slaein.c b/contrib/libs/clapack/slaein.c
new file mode 100644
index 0000000000..5cec131771
--- /dev/null
+++ b/contrib/libs/clapack/slaein.c
@@ -0,0 +1,678 @@
+/* slaein.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int slaein_(logical *rightv, logical *noinit, integer *n,
+ real *h__, integer *ldh, real *wr, real *wi, real *vr, real *vi, real
+ *b, integer *ldb, real *work, real *eps3, real *smlnum, real *bignum,
+ integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, h_dim1, h_offset, i__1, i__2, i__3, i__4;
+ real r__1, r__2, r__3, r__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ real w, x, y;
+ integer i1, i2, i3;
+ real w1, ei, ej, xi, xr, rec;
+ integer its, ierr;
+ real temp, norm, vmax;
+ extern doublereal snrm2_(integer *, real *, integer *);
+ real scale;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ char trans[1];
+ real vcrit;
+ extern doublereal sasum_(integer *, real *, integer *);
+ real rootn, vnorm;
+ extern doublereal slapy2_(real *, real *);
+ real absbii, absbjj;
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int sladiv_(real *, real *, real *, real *, real *
+, real *);
+ char normin[1];
+ real nrmsml;
+ extern /* Subroutine */ int slatrs_(char *, char *, char *, char *,
+ integer *, real *, integer *, real *, real *, real *, integer *);
+ real growto;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAEIN uses inverse iteration to find a right or left eigenvector */
+/* corresponding to the eigenvalue (WR,WI) of a real upper Hessenberg */
+/* matrix H. */
+
+/* Arguments */
+/* ========= */
+
+/* RIGHTV (input) LOGICAL */
+/* = .TRUE. : compute right eigenvector; */
+/* = .FALSE.: compute left eigenvector. */
+
+/* NOINIT (input) LOGICAL */
+/* = .TRUE. : no initial vector supplied in (VR,VI). */
+/* = .FALSE.: initial vector supplied in (VR,VI). */
+
+/* N (input) INTEGER */
+/* The order of the matrix H. N >= 0. */
+
+/* H (input) REAL array, dimension (LDH,N) */
+/* The upper Hessenberg matrix H. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH >= max(1,N). */
+
+/* WR (input) REAL */
+/* WI (input) REAL */
+/* The real and imaginary parts of the eigenvalue of H whose */
+/* corresponding right or left eigenvector is to be computed. */
+
+/* VR (input/output) REAL array, dimension (N) */
+/* VI (input/output) REAL array, dimension (N) */
+/* On entry, if NOINIT = .FALSE. and WI = 0.0, VR must contain */
+/* a real starting vector for inverse iteration using the real */
+/* eigenvalue WR; if NOINIT = .FALSE. and WI.ne.0.0, VR and VI */
+/* must contain the real and imaginary parts of a complex */
+/* starting vector for inverse iteration using the complex */
+/* eigenvalue (WR,WI); otherwise VR and VI need not be set. */
+/* On exit, if WI = 0.0 (real eigenvalue), VR contains the */
+/* computed real eigenvector; if WI.ne.0.0 (complex eigenvalue), */
+/* VR and VI contain the real and imaginary parts of the */
+/* computed complex eigenvector. The eigenvector is normalized */
+/* so that the component of largest magnitude has magnitude 1; */
+/* here the magnitude of a complex number (x,y) is taken to be */
+/* |x| + |y|. */
+/* VI is not referenced if WI = 0.0. */
+
+/* B (workspace) REAL array, dimension (LDB,N) */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= N+1. */
+
+/* WORK (workspace) REAL array, dimension (N) */
+
+/* EPS3 (input) REAL */
+/* A small machine-dependent value which is used to perturb */
+/* close eigenvalues, and to replace zero pivots. */
+
+/* SMLNUM (input) REAL */
+/* A machine-dependent value close to the underflow threshold. */
+
+/* BIGNUM (input) REAL */
+/* A machine-dependent value close to the overflow threshold. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* = 1: inverse iteration did not converge; VR is set to the */
+/* last iterate, and so is VI if WI.ne.0.0. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --vr;
+ --vi;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+/* GROWTO is the threshold used in the acceptance test for an */
+/* eigenvector. */
+
+ rootn = sqrt((real) (*n));
+ growto = .1f / rootn;
+/* Computing MAX */
+ r__1 = 1.f, r__2 = *eps3 * rootn;
+ nrmsml = dmax(r__1,r__2) * *smlnum;
+
+/* Form B = H - (WR,WI)*I (except that the subdiagonal elements and */
+/* the imaginary parts of the diagonal elements are not stored). */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = h__[i__ + j * h_dim1];
+/* L10: */
+ }
+ b[j + j * b_dim1] = h__[j + j * h_dim1] - *wr;
+/* L20: */
+ }
+
+ if (*wi == 0.f) {
+
+/* Real eigenvalue. */
+
+ if (*noinit) {
+
+/* Set initial vector. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ vr[i__] = *eps3;
+/* L30: */
+ }
+ } else {
+
+/* Scale supplied initial vector. */
+
+ vnorm = snrm2_(n, &vr[1], &c__1);
+ r__1 = *eps3 * rootn / dmax(vnorm,nrmsml);
+ sscal_(n, &r__1, &vr[1], &c__1);
+ }
+
+ if (*rightv) {
+
+/* LU decomposition with partial pivoting of B, replacing zero */
+/* pivots by EPS3. */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ei = h__[i__ + 1 + i__ * h_dim1];
+ if ((r__1 = b[i__ + i__ * b_dim1], dabs(r__1)) < dabs(ei)) {
+
+/* Interchange rows and eliminate. */
+
+ x = b[i__ + i__ * b_dim1] / ei;
+ b[i__ + i__ * b_dim1] = ei;
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ temp = b[i__ + 1 + j * b_dim1];
+ b[i__ + 1 + j * b_dim1] = b[i__ + j * b_dim1] - x *
+ temp;
+ b[i__ + j * b_dim1] = temp;
+/* L40: */
+ }
+ } else {
+
+/* Eliminate without interchange. */
+
+ if (b[i__ + i__ * b_dim1] == 0.f) {
+ b[i__ + i__ * b_dim1] = *eps3;
+ }
+ x = ei / b[i__ + i__ * b_dim1];
+ if (x != 0.f) {
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ b[i__ + 1 + j * b_dim1] -= x * b[i__ + j * b_dim1]
+ ;
+/* L50: */
+ }
+ }
+ }
+/* L60: */
+ }
+ if (b[*n + *n * b_dim1] == 0.f) {
+ b[*n + *n * b_dim1] = *eps3;
+ }
+
+ *(unsigned char *)trans = 'N';
+
+ } else {
+
+/* UL decomposition with partial pivoting of B, replacing zero */
+/* pivots by EPS3. */
+
+ for (j = *n; j >= 2; --j) {
+ ej = h__[j + (j - 1) * h_dim1];
+ if ((r__1 = b[j + j * b_dim1], dabs(r__1)) < dabs(ej)) {
+
+/* Interchange columns and eliminate. */
+
+ x = b[j + j * b_dim1] / ej;
+ b[j + j * b_dim1] = ej;
+ i__1 = j - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = b[i__ + (j - 1) * b_dim1];
+ b[i__ + (j - 1) * b_dim1] = b[i__ + j * b_dim1] - x *
+ temp;
+ b[i__ + j * b_dim1] = temp;
+/* L70: */
+ }
+ } else {
+
+/* Eliminate without interchange. */
+
+ if (b[j + j * b_dim1] == 0.f) {
+ b[j + j * b_dim1] = *eps3;
+ }
+ x = ej / b[j + j * b_dim1];
+ if (x != 0.f) {
+ i__1 = j - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + (j - 1) * b_dim1] -= x * b[i__ + j *
+ b_dim1];
+/* L80: */
+ }
+ }
+ }
+/* L90: */
+ }
+ if (b[b_dim1 + 1] == 0.f) {
+ b[b_dim1 + 1] = *eps3;
+ }
+
+ *(unsigned char *)trans = 'T';
+
+ }
+
+ *(unsigned char *)normin = 'N';
+ i__1 = *n;
+ for (its = 1; its <= i__1; ++its) {
+
+/* Solve U*x = scale*v for a right eigenvector */
+/* or U'*x = scale*v for a left eigenvector, */
+/* overwriting x on v. */
+
+ slatrs_("Upper", trans, "Nonunit", normin, n, &b[b_offset], ldb, &
+ vr[1], &scale, &work[1], &ierr);
+ *(unsigned char *)normin = 'Y';
+
+/* Test for sufficient growth in the norm of v. */
+
+ vnorm = sasum_(n, &vr[1], &c__1);
+ if (vnorm >= growto * scale) {
+ goto L120;
+ }
+
+/* Choose new orthogonal starting vector and try again. */
+
+ temp = *eps3 / (rootn + 1.f);
+ vr[1] = *eps3;
+ i__2 = *n;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ vr[i__] = temp;
+/* L100: */
+ }
+ vr[*n - its + 1] -= *eps3 * rootn;
+/* L110: */
+ }
+
+/* Failure to find eigenvector in N iterations. */
+
+ *info = 1;
+
+L120:
+
+/* Normalize eigenvector. */
+
+ i__ = isamax_(n, &vr[1], &c__1);
+ r__2 = 1.f / (r__1 = vr[i__], dabs(r__1));
+ sscal_(n, &r__2, &vr[1], &c__1);
+ } else {
+
+/* Complex eigenvalue. */
+
+ if (*noinit) {
+
+/* Set initial vector. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ vr[i__] = *eps3;
+ vi[i__] = 0.f;
+/* L130: */
+ }
+ } else {
+
+/* Scale supplied initial vector. */
+
+ r__1 = snrm2_(n, &vr[1], &c__1);
+ r__2 = snrm2_(n, &vi[1], &c__1);
+ norm = slapy2_(&r__1, &r__2);
+ rec = *eps3 * rootn / dmax(norm,nrmsml);
+ sscal_(n, &rec, &vr[1], &c__1);
+ sscal_(n, &rec, &vi[1], &c__1);
+ }
+
+ if (*rightv) {
+
+/* LU decomposition with partial pivoting of B, replacing zero */
+/* pivots by EPS3. */
+
+/* The imaginary part of the (i,j)-th element of U is stored in */
+/* B(j+1,i). */
+
+ b[b_dim1 + 2] = -(*wi);
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ b[i__ + 1 + b_dim1] = 0.f;
+/* L140: */
+ }
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ absbii = slapy2_(&b[i__ + i__ * b_dim1], &b[i__ + 1 + i__ *
+ b_dim1]);
+ ei = h__[i__ + 1 + i__ * h_dim1];
+ if (absbii < dabs(ei)) {
+
+/* Interchange rows and eliminate. */
+
+ xr = b[i__ + i__ * b_dim1] / ei;
+ xi = b[i__ + 1 + i__ * b_dim1] / ei;
+ b[i__ + i__ * b_dim1] = ei;
+ b[i__ + 1 + i__ * b_dim1] = 0.f;
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ temp = b[i__ + 1 + j * b_dim1];
+ b[i__ + 1 + j * b_dim1] = b[i__ + j * b_dim1] - xr *
+ temp;
+ b[j + 1 + (i__ + 1) * b_dim1] = b[j + 1 + i__ *
+ b_dim1] - xi * temp;
+ b[i__ + j * b_dim1] = temp;
+ b[j + 1 + i__ * b_dim1] = 0.f;
+/* L150: */
+ }
+ b[i__ + 2 + i__ * b_dim1] = -(*wi);
+ b[i__ + 1 + (i__ + 1) * b_dim1] -= xi * *wi;
+ b[i__ + 2 + (i__ + 1) * b_dim1] += xr * *wi;
+ } else {
+
+/* Eliminate without interchanging rows. */
+
+ if (absbii == 0.f) {
+ b[i__ + i__ * b_dim1] = *eps3;
+ b[i__ + 1 + i__ * b_dim1] = 0.f;
+ absbii = *eps3;
+ }
+ ei = ei / absbii / absbii;
+ xr = b[i__ + i__ * b_dim1] * ei;
+ xi = -b[i__ + 1 + i__ * b_dim1] * ei;
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ b[i__ + 1 + j * b_dim1] = b[i__ + 1 + j * b_dim1] -
+ xr * b[i__ + j * b_dim1] + xi * b[j + 1 + i__
+ * b_dim1];
+ b[j + 1 + (i__ + 1) * b_dim1] = -xr * b[j + 1 + i__ *
+ b_dim1] - xi * b[i__ + j * b_dim1];
+/* L160: */
+ }
+ b[i__ + 2 + (i__ + 1) * b_dim1] -= *wi;
+ }
+
+/* Compute 1-norm of offdiagonal elements of i-th row. */
+
+ i__2 = *n - i__;
+ i__3 = *n - i__;
+ work[i__] = sasum_(&i__2, &b[i__ + (i__ + 1) * b_dim1], ldb)
+ + sasum_(&i__3, &b[i__ + 2 + i__ * b_dim1], &c__1);
+/* L170: */
+ }
+ if (b[*n + *n * b_dim1] == 0.f && b[*n + 1 + *n * b_dim1] == 0.f)
+ {
+ b[*n + *n * b_dim1] = *eps3;
+ }
+ work[*n] = 0.f;
+
+ i1 = *n;
+ i2 = 1;
+ i3 = -1;
+ } else {
+
+/* UL decomposition with partial pivoting of conjg(B), */
+/* replacing zero pivots by EPS3. */
+
+/* The imaginary part of the (i,j)-th element of U is stored in */
+/* B(j+1,i). */
+
+ b[*n + 1 + *n * b_dim1] = *wi;
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ b[*n + 1 + j * b_dim1] = 0.f;
+/* L180: */
+ }
+
+ for (j = *n; j >= 2; --j) {
+ ej = h__[j + (j - 1) * h_dim1];
+ absbjj = slapy2_(&b[j + j * b_dim1], &b[j + 1 + j * b_dim1]);
+ if (absbjj < dabs(ej)) {
+
+/* Interchange columns and eliminate */
+
+ xr = b[j + j * b_dim1] / ej;
+ xi = b[j + 1 + j * b_dim1] / ej;
+ b[j + j * b_dim1] = ej;
+ b[j + 1 + j * b_dim1] = 0.f;
+ i__1 = j - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = b[i__ + (j - 1) * b_dim1];
+ b[i__ + (j - 1) * b_dim1] = b[i__ + j * b_dim1] - xr *
+ temp;
+ b[j + i__ * b_dim1] = b[j + 1 + i__ * b_dim1] - xi *
+ temp;
+ b[i__ + j * b_dim1] = temp;
+ b[j + 1 + i__ * b_dim1] = 0.f;
+/* L190: */
+ }
+ b[j + 1 + (j - 1) * b_dim1] = *wi;
+ b[j - 1 + (j - 1) * b_dim1] += xi * *wi;
+ b[j + (j - 1) * b_dim1] -= xr * *wi;
+ } else {
+
+/* Eliminate without interchange. */
+
+ if (absbjj == 0.f) {
+ b[j + j * b_dim1] = *eps3;
+ b[j + 1 + j * b_dim1] = 0.f;
+ absbjj = *eps3;
+ }
+ ej = ej / absbjj / absbjj;
+ xr = b[j + j * b_dim1] * ej;
+ xi = -b[j + 1 + j * b_dim1] * ej;
+ i__1 = j - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + (j - 1) * b_dim1] = b[i__ + (j - 1) * b_dim1]
+ - xr * b[i__ + j * b_dim1] + xi * b[j + 1 +
+ i__ * b_dim1];
+ b[j + i__ * b_dim1] = -xr * b[j + 1 + i__ * b_dim1] -
+ xi * b[i__ + j * b_dim1];
+/* L200: */
+ }
+ b[j + (j - 1) * b_dim1] += *wi;
+ }
+
+/* Compute 1-norm of offdiagonal elements of j-th column. */
+
+ i__1 = j - 1;
+ i__2 = j - 1;
+ work[j] = sasum_(&i__1, &b[j * b_dim1 + 1], &c__1) + sasum_(&
+ i__2, &b[j + 1 + b_dim1], ldb);
+/* L210: */
+ }
+ if (b[b_dim1 + 1] == 0.f && b[b_dim1 + 2] == 0.f) {
+ b[b_dim1 + 1] = *eps3;
+ }
+ work[1] = 0.f;
+
+ i1 = 1;
+ i2 = *n;
+ i3 = 1;
+ }
+
+ i__1 = *n;
+ for (its = 1; its <= i__1; ++its) {
+ scale = 1.f;
+ vmax = 1.f;
+ vcrit = *bignum;
+
+/* Solve U*(xr,xi) = scale*(vr,vi) for a right eigenvector, */
+/* or U'*(xr,xi) = scale*(vr,vi) for a left eigenvector, */
+/* overwriting (xr,xi) on (vr,vi). */
+
+ i__2 = i2;
+ i__3 = i3;
+ for (i__ = i1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__3)
+ {
+
+ if (work[i__] > vcrit) {
+ rec = 1.f / vmax;
+ sscal_(n, &rec, &vr[1], &c__1);
+ sscal_(n, &rec, &vi[1], &c__1);
+ scale *= rec;
+ vmax = 1.f;
+ vcrit = *bignum;
+ }
+
+ xr = vr[i__];
+ xi = vi[i__];
+ if (*rightv) {
+ i__4 = *n;
+ for (j = i__ + 1; j <= i__4; ++j) {
+ xr = xr - b[i__ + j * b_dim1] * vr[j] + b[j + 1 + i__
+ * b_dim1] * vi[j];
+ xi = xi - b[i__ + j * b_dim1] * vi[j] - b[j + 1 + i__
+ * b_dim1] * vr[j];
+/* L220: */
+ }
+ } else {
+ i__4 = i__ - 1;
+ for (j = 1; j <= i__4; ++j) {
+ xr = xr - b[j + i__ * b_dim1] * vr[j] + b[i__ + 1 + j
+ * b_dim1] * vi[j];
+ xi = xi - b[j + i__ * b_dim1] * vi[j] - b[i__ + 1 + j
+ * b_dim1] * vr[j];
+/* L230: */
+ }
+ }
+
+ w = (r__1 = b[i__ + i__ * b_dim1], dabs(r__1)) + (r__2 = b[
+ i__ + 1 + i__ * b_dim1], dabs(r__2));
+ if (w > *smlnum) {
+ if (w < 1.f) {
+ w1 = dabs(xr) + dabs(xi);
+ if (w1 > w * *bignum) {
+ rec = 1.f / w1;
+ sscal_(n, &rec, &vr[1], &c__1);
+ sscal_(n, &rec, &vi[1], &c__1);
+ xr = vr[i__];
+ xi = vi[i__];
+ scale *= rec;
+ vmax *= rec;
+ }
+ }
+
+/* Divide by diagonal element of B. */
+
+ sladiv_(&xr, &xi, &b[i__ + i__ * b_dim1], &b[i__ + 1 +
+ i__ * b_dim1], &vr[i__], &vi[i__]);
+/* Computing MAX */
+ r__3 = (r__1 = vr[i__], dabs(r__1)) + (r__2 = vi[i__],
+ dabs(r__2));
+ vmax = dmax(r__3,vmax);
+ vcrit = *bignum / vmax;
+ } else {
+ i__4 = *n;
+ for (j = 1; j <= i__4; ++j) {
+ vr[j] = 0.f;
+ vi[j] = 0.f;
+/* L240: */
+ }
+ vr[i__] = 1.f;
+ vi[i__] = 1.f;
+ scale = 0.f;
+ vmax = 1.f;
+ vcrit = *bignum;
+ }
+/* L250: */
+ }
+
+/* Test for sufficient growth in the norm of (VR,VI). */
+
+ vnorm = sasum_(n, &vr[1], &c__1) + sasum_(n, &vi[1], &c__1);
+ if (vnorm >= growto * scale) {
+ goto L280;
+ }
+
+/* Choose a new orthogonal starting vector and try again. */
+
+ y = *eps3 / (rootn + 1.f);
+ vr[1] = *eps3;
+ vi[1] = 0.f;
+
+ i__3 = *n;
+ for (i__ = 2; i__ <= i__3; ++i__) {
+ vr[i__] = y;
+ vi[i__] = 0.f;
+/* L260: */
+ }
+ vr[*n - its + 1] -= *eps3 * rootn;
+/* L270: */
+ }
+
+/* Failure to find eigenvector in N iterations */
+
+ *info = 1;
+
+L280:
+
+/* Normalize eigenvector. */
+
+ vnorm = 0.f;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__3 = vnorm, r__4 = (r__1 = vr[i__], dabs(r__1)) + (r__2 = vi[
+ i__], dabs(r__2));
+ vnorm = dmax(r__3,r__4);
+/* L290: */
+ }
+ r__1 = 1.f / vnorm;
+ sscal_(n, &r__1, &vr[1], &c__1);
+ r__1 = 1.f / vnorm;
+ sscal_(n, &r__1, &vi[1], &c__1);
+
+ }
+
+ return 0;
+
+/* End of SLAEIN */
+
+} /* slaein_ */
diff --git a/contrib/libs/clapack/slaev2.c b/contrib/libs/clapack/slaev2.c
new file mode 100644
index 0000000000..290fc6e490
--- /dev/null
+++ b/contrib/libs/clapack/slaev2.c
@@ -0,0 +1,188 @@
+/* slaev2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slaev2_(real *a, real *b, real *c__, real *rt1, real *
+ rt2, real *cs1, real *sn1)
+{
+ /* System generated locals */
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real ab, df, cs, ct, tb, sm, tn, rt, adf, acs;
+ integer sgn1, sgn2;
+ real acmn, acmx;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAEV2 computes the eigendecomposition of a 2-by-2 symmetric matrix */
+/* [ A B ] */
+/* [ B C ]. */
+/* On return, RT1 is the eigenvalue of larger absolute value, RT2 is the */
+/* eigenvalue of smaller absolute value, and (CS1,SN1) is the unit right */
+/* eigenvector for RT1, giving the decomposition */
+
+/* [ CS1 SN1 ] [ A B ] [ CS1 -SN1 ] = [ RT1 0 ] */
+/* [-SN1 CS1 ] [ B C ] [ SN1 CS1 ] [ 0 RT2 ]. */
+
+/* Arguments */
+/* ========= */
+
+/* A (input) REAL */
+/* The (1,1) element of the 2-by-2 matrix. */
+
+/* B (input) REAL */
+/* The (1,2) element and the conjugate of the (2,1) element of */
+/* the 2-by-2 matrix. */
+
+/* C (input) REAL */
+/* The (2,2) element of the 2-by-2 matrix. */
+
+/* RT1 (output) REAL */
+/* The eigenvalue of larger absolute value. */
+
+/* RT2 (output) REAL */
+/* The eigenvalue of smaller absolute value. */
+
+/* CS1 (output) REAL */
+/* SN1 (output) REAL */
+/* The vector (CS1, SN1) is a unit right eigenvector for RT1. */
+
+/* Further Details */
+/* =============== */
+
+/* RT1 is accurate to a few ulps barring over/underflow. */
+
+/* RT2 may be inaccurate if there is massive cancellation in the */
+/* determinant A*C-B*B; higher precision or correctly rounded or */
+/* correctly truncated arithmetic would be needed to compute RT2 */
+/* accurately in all cases. */
+
+/* CS1 and SN1 are accurate to a few ulps barring over/underflow. */
+
+/* Overflow is possible only if RT1 is within a factor of 5 of overflow. */
+/* Underflow is harmless if the input data is 0 or exceeds */
+/* underflow_threshold / macheps. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Compute the eigenvalues */
+
+ sm = *a + *c__;
+ df = *a - *c__;
+ adf = dabs(df);
+ tb = *b + *b;
+ ab = dabs(tb);
+ if (dabs(*a) > dabs(*c__)) {
+ acmx = *a;
+ acmn = *c__;
+ } else {
+ acmx = *c__;
+ acmn = *a;
+ }
+ if (adf > ab) {
+/* Computing 2nd power */
+ r__1 = ab / adf;
+ rt = adf * sqrt(r__1 * r__1 + 1.f);
+ } else if (adf < ab) {
+/* Computing 2nd power */
+ r__1 = adf / ab;
+ rt = ab * sqrt(r__1 * r__1 + 1.f);
+ } else {
+
+/* Includes case AB=ADF=0 */
+
+ rt = ab * sqrt(2.f);
+ }
+ if (sm < 0.f) {
+ *rt1 = (sm - rt) * .5f;
+ sgn1 = -1;
+
+/* Order of execution important. */
+/* To get fully accurate smaller eigenvalue, */
+/* next line needs to be executed in higher precision. */
+
+ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
+ } else if (sm > 0.f) {
+ *rt1 = (sm + rt) * .5f;
+ sgn1 = 1;
+
+/* Order of execution important. */
+/* To get fully accurate smaller eigenvalue, */
+/* next line needs to be executed in higher precision. */
+
+ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
+ } else {
+
+/* Includes case RT1 = RT2 = 0 */
+
+ *rt1 = rt * .5f;
+ *rt2 = rt * -.5f;
+ sgn1 = 1;
+ }
+
+/* Compute the eigenvector */
+
+ if (df >= 0.f) {
+ cs = df + rt;
+ sgn2 = 1;
+ } else {
+ cs = df - rt;
+ sgn2 = -1;
+ }
+ acs = dabs(cs);
+ if (acs > ab) {
+ ct = -tb / cs;
+ *sn1 = 1.f / sqrt(ct * ct + 1.f);
+ *cs1 = ct * *sn1;
+ } else {
+ if (ab == 0.f) {
+ *cs1 = 1.f;
+ *sn1 = 0.f;
+ } else {
+ tn = -cs / tb;
+ *cs1 = 1.f / sqrt(tn * tn + 1.f);
+ *sn1 = tn * *cs1;
+ }
+ }
+ if (sgn1 == sgn2) {
+ tn = *cs1;
+ *cs1 = -(*sn1);
+ *sn1 = tn;
+ }
+ return 0;
+
+/* End of SLAEV2 */
+
+} /* slaev2_ */
diff --git a/contrib/libs/clapack/slaexc.c b/contrib/libs/clapack/slaexc.c
new file mode 100644
index 0000000000..99f6d359fe
--- /dev/null
+++ b/contrib/libs/clapack/slaexc.c
@@ -0,0 +1,458 @@
+/* slaexc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__4 = 4;
+static logical c_false = FALSE_;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static integer c__3 = 3;
+
+/* Subroutine */ int slaexc_(logical *wantq, integer *n, real *t, integer *
+ ldt, real *q, integer *ldq, integer *j1, integer *n1, integer *n2,
+ real *work, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, t_dim1, t_offset, i__1;
+ real r__1, r__2, r__3;
+
+ /* Local variables */
+ real d__[16] /* was [4][4] */;
+ integer k;
+ real u[3], x[4] /* was [2][2] */;
+ integer j2, j3, j4;
+ real u1[3], u2[3];
+ integer nd;
+ real cs, t11, t22, t33, sn, wi1, wi2, wr1, wr2, eps, tau, tau1, tau2;
+ integer ierr;
+ real temp;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *);
+ real scale, dnorm, xnorm;
+ extern /* Subroutine */ int slanv2_(real *, real *, real *, real *, real *
+, real *, real *, real *, real *, real *), slasy2_(logical *,
+ logical *, integer *, integer *, integer *, real *, integer *,
+ real *, integer *, real *, integer *, real *, real *, integer *,
+ real *, integer *);
+ extern doublereal slamch_(char *), slange_(char *, integer *,
+ integer *, real *, integer *, real *);
+ extern /* Subroutine */ int slarfg_(integer *, real *, real *, integer *,
+ real *), slacpy_(char *, integer *, integer *, real *, integer *,
+ real *, integer *), slartg_(real *, real *, real *, real *
+, real *);
+ real thresh;
+ extern /* Subroutine */ int slarfx_(char *, integer *, integer *, real *,
+ real *, real *, integer *, real *);
+ real smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAEXC swaps adjacent diagonal blocks T11 and T22 of order 1 or 2 in */
+/* an upper quasi-triangular matrix T by an orthogonal similarity */
+/* transformation. */
+
+/* T must be in Schur canonical form, that is, block upper triangular */
+/* with 1-by-1 and 2-by-2 diagonal blocks; each 2-by-2 diagonal block */
+/* has its diagonal elemnts equal and its off-diagonal elements of */
+/* opposite sign. */
+
+/* Arguments */
+/* ========= */
+
+/* WANTQ (input) LOGICAL */
+/* = .TRUE. : accumulate the transformation in the matrix Q; */
+/* = .FALSE.: do not accumulate the transformation. */
+
+/* N (input) INTEGER */
+/* The order of the matrix T. N >= 0. */
+
+/* T (input/output) REAL array, dimension (LDT,N) */
+/* On entry, the upper quasi-triangular matrix T, in Schur */
+/* canonical form. */
+/* On exit, the updated matrix T, again in Schur canonical form. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= max(1,N). */
+
+/* Q (input/output) REAL array, dimension (LDQ,N) */
+/* On entry, if WANTQ is .TRUE., the orthogonal matrix Q. */
+/* On exit, if WANTQ is .TRUE., the updated matrix Q. */
+/* If WANTQ is .FALSE., Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. */
+/* LDQ >= 1; and if WANTQ is .TRUE., LDQ >= N. */
+
+/* J1 (input) INTEGER */
+/* The index of the first row of the first block T11. */
+
+/* N1 (input) INTEGER */
+/* The order of the first block T11. N1 = 0, 1 or 2. */
+
+/* N2 (input) INTEGER */
+/* The order of the second block T22. N2 = 0, 1 or 2. */
+
+/* WORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* = 1: the transformed matrix T would be too far from Schur */
+/* form; the blocks are not swapped and T and Q are */
+/* unchanged. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+/* Quick return if possible */
+
+ if (*n == 0 || *n1 == 0 || *n2 == 0) {
+ return 0;
+ }
+ if (*j1 + *n1 > *n) {
+ return 0;
+ }
+
+ j2 = *j1 + 1;
+ j3 = *j1 + 2;
+ j4 = *j1 + 3;
+
+ if (*n1 == 1 && *n2 == 1) {
+
+/* Swap two 1-by-1 blocks. */
+
+ t11 = t[*j1 + *j1 * t_dim1];
+ t22 = t[j2 + j2 * t_dim1];
+
+/* Determine the transformation to perform the interchange. */
+
+ r__1 = t22 - t11;
+ slartg_(&t[*j1 + j2 * t_dim1], &r__1, &cs, &sn, &temp);
+
+/* Apply transformation to the matrix T. */
+
+ if (j3 <= *n) {
+ i__1 = *n - *j1 - 1;
+ srot_(&i__1, &t[*j1 + j3 * t_dim1], ldt, &t[j2 + j3 * t_dim1],
+ ldt, &cs, &sn);
+ }
+ i__1 = *j1 - 1;
+ srot_(&i__1, &t[*j1 * t_dim1 + 1], &c__1, &t[j2 * t_dim1 + 1], &c__1,
+ &cs, &sn);
+
+ t[*j1 + *j1 * t_dim1] = t22;
+ t[j2 + j2 * t_dim1] = t11;
+
+ if (*wantq) {
+
+/* Accumulate transformation in the matrix Q. */
+
+ srot_(n, &q[*j1 * q_dim1 + 1], &c__1, &q[j2 * q_dim1 + 1], &c__1,
+ &cs, &sn);
+ }
+
+ } else {
+
+/* Swapping involves at least one 2-by-2 block. */
+
+/* Copy the diagonal block of order N1+N2 to the local array D */
+/* and compute its norm. */
+
+ nd = *n1 + *n2;
+ slacpy_("Full", &nd, &nd, &t[*j1 + *j1 * t_dim1], ldt, d__, &c__4);
+ dnorm = slange_("Max", &nd, &nd, d__, &c__4, &work[1]);
+
+/* Compute machine-dependent threshold for test for accepting */
+/* swap. */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S") / eps;
+/* Computing MAX */
+ r__1 = eps * 10.f * dnorm;
+ thresh = dmax(r__1,smlnum);
+
+/* Solve T11*X - X*T22 = scale*T12 for X. */
+
+ slasy2_(&c_false, &c_false, &c_n1, n1, n2, d__, &c__4, &d__[*n1 + 1 +
+ (*n1 + 1 << 2) - 5], &c__4, &d__[(*n1 + 1 << 2) - 4], &c__4, &
+ scale, x, &c__2, &xnorm, &ierr);
+
+/* Swap the adjacent diagonal blocks. */
+
+ k = *n1 + *n1 + *n2 - 3;
+ switch (k) {
+ case 1: goto L10;
+ case 2: goto L20;
+ case 3: goto L30;
+ }
+
+L10:
+
+/* N1 = 1, N2 = 2: generate elementary reflector H so that: */
+
+/* ( scale, X11, X12 ) H = ( 0, 0, * ) */
+
+ u[0] = scale;
+ u[1] = x[0];
+ u[2] = x[2];
+ slarfg_(&c__3, &u[2], u, &c__1, &tau);
+ u[2] = 1.f;
+ t11 = t[*j1 + *j1 * t_dim1];
+
+/* Perform swap provisionally on diagonal block in D. */
+
+ slarfx_("L", &c__3, &c__3, u, &tau, d__, &c__4, &work[1]);
+ slarfx_("R", &c__3, &c__3, u, &tau, d__, &c__4, &work[1]);
+
+/* Test whether to reject swap. */
+
+/* Computing MAX */
+ r__2 = dabs(d__[2]), r__3 = dabs(d__[6]), r__2 = max(r__2,r__3), r__3
+ = (r__1 = d__[10] - t11, dabs(r__1));
+ if (dmax(r__2,r__3) > thresh) {
+ goto L50;
+ }
+
+/* Accept swap: apply transformation to the entire matrix T. */
+
+ i__1 = *n - *j1 + 1;
+ slarfx_("L", &c__3, &i__1, u, &tau, &t[*j1 + *j1 * t_dim1], ldt, &
+ work[1]);
+ slarfx_("R", &j2, &c__3, u, &tau, &t[*j1 * t_dim1 + 1], ldt, &work[1]);
+
+ t[j3 + *j1 * t_dim1] = 0.f;
+ t[j3 + j2 * t_dim1] = 0.f;
+ t[j3 + j3 * t_dim1] = t11;
+
+ if (*wantq) {
+
+/* Accumulate transformation in the matrix Q. */
+
+ slarfx_("R", n, &c__3, u, &tau, &q[*j1 * q_dim1 + 1], ldq, &work[
+ 1]);
+ }
+ goto L40;
+
+L20:
+
+/* N1 = 2, N2 = 1: generate elementary reflector H so that: */
+
+/* H ( -X11 ) = ( * ) */
+/* ( -X21 ) = ( 0 ) */
+/* ( scale ) = ( 0 ) */
+
+ u[0] = -x[0];
+ u[1] = -x[1];
+ u[2] = scale;
+ slarfg_(&c__3, u, &u[1], &c__1, &tau);
+ u[0] = 1.f;
+ t33 = t[j3 + j3 * t_dim1];
+
+/* Perform swap provisionally on diagonal block in D. */
+
+ slarfx_("L", &c__3, &c__3, u, &tau, d__, &c__4, &work[1]);
+ slarfx_("R", &c__3, &c__3, u, &tau, d__, &c__4, &work[1]);
+
+/* Test whether to reject swap. */
+
+/* Computing MAX */
+ r__2 = dabs(d__[1]), r__3 = dabs(d__[2]), r__2 = max(r__2,r__3), r__3
+ = (r__1 = d__[0] - t33, dabs(r__1));
+ if (dmax(r__2,r__3) > thresh) {
+ goto L50;
+ }
+
+/* Accept swap: apply transformation to the entire matrix T. */
+
+ slarfx_("R", &j3, &c__3, u, &tau, &t[*j1 * t_dim1 + 1], ldt, &work[1]);
+ i__1 = *n - *j1;
+ slarfx_("L", &c__3, &i__1, u, &tau, &t[*j1 + j2 * t_dim1], ldt, &work[
+ 1]);
+
+ t[*j1 + *j1 * t_dim1] = t33;
+ t[j2 + *j1 * t_dim1] = 0.f;
+ t[j3 + *j1 * t_dim1] = 0.f;
+
+ if (*wantq) {
+
+/* Accumulate transformation in the matrix Q. */
+
+ slarfx_("R", n, &c__3, u, &tau, &q[*j1 * q_dim1 + 1], ldq, &work[
+ 1]);
+ }
+ goto L40;
+
+L30:
+
+/* N1 = 2, N2 = 2: generate elementary reflectors H(1) and H(2) so */
+/* that: */
+
+/* H(2) H(1) ( -X11 -X12 ) = ( * * ) */
+/* ( -X21 -X22 ) ( 0 * ) */
+/* ( scale 0 ) ( 0 0 ) */
+/* ( 0 scale ) ( 0 0 ) */
+
+ u1[0] = -x[0];
+ u1[1] = -x[1];
+ u1[2] = scale;
+ slarfg_(&c__3, u1, &u1[1], &c__1, &tau1);
+ u1[0] = 1.f;
+
+ temp = -tau1 * (x[2] + u1[1] * x[3]);
+ u2[0] = -temp * u1[1] - x[3];
+ u2[1] = -temp * u1[2];
+ u2[2] = scale;
+ slarfg_(&c__3, u2, &u2[1], &c__1, &tau2);
+ u2[0] = 1.f;
+
+/* Perform swap provisionally on diagonal block in D. */
+
+ slarfx_("L", &c__3, &c__4, u1, &tau1, d__, &c__4, &work[1])
+ ;
+ slarfx_("R", &c__4, &c__3, u1, &tau1, d__, &c__4, &work[1])
+ ;
+ slarfx_("L", &c__3, &c__4, u2, &tau2, &d__[1], &c__4, &work[1]);
+ slarfx_("R", &c__4, &c__3, u2, &tau2, &d__[4], &c__4, &work[1]);
+
+/* Test whether to reject swap. */
+
+/* Computing MAX */
+ r__1 = dabs(d__[2]), r__2 = dabs(d__[6]), r__1 = max(r__1,r__2), r__2
+ = dabs(d__[3]), r__1 = max(r__1,r__2), r__2 = dabs(d__[7]);
+ if (dmax(r__1,r__2) > thresh) {
+ goto L50;
+ }
+
+/* Accept swap: apply transformation to the entire matrix T. */
+
+ i__1 = *n - *j1 + 1;
+ slarfx_("L", &c__3, &i__1, u1, &tau1, &t[*j1 + *j1 * t_dim1], ldt, &
+ work[1]);
+ slarfx_("R", &j4, &c__3, u1, &tau1, &t[*j1 * t_dim1 + 1], ldt, &work[
+ 1]);
+ i__1 = *n - *j1 + 1;
+ slarfx_("L", &c__3, &i__1, u2, &tau2, &t[j2 + *j1 * t_dim1], ldt, &
+ work[1]);
+ slarfx_("R", &j4, &c__3, u2, &tau2, &t[j2 * t_dim1 + 1], ldt, &work[1]
+);
+
+ t[j3 + *j1 * t_dim1] = 0.f;
+ t[j3 + j2 * t_dim1] = 0.f;
+ t[j4 + *j1 * t_dim1] = 0.f;
+ t[j4 + j2 * t_dim1] = 0.f;
+
+ if (*wantq) {
+
+/* Accumulate transformation in the matrix Q. */
+
+ slarfx_("R", n, &c__3, u1, &tau1, &q[*j1 * q_dim1 + 1], ldq, &
+ work[1]);
+ slarfx_("R", n, &c__3, u2, &tau2, &q[j2 * q_dim1 + 1], ldq, &work[
+ 1]);
+ }
+
+L40:
+
+ if (*n2 == 2) {
+
+/* Standardize new 2-by-2 block T11 */
+
+ slanv2_(&t[*j1 + *j1 * t_dim1], &t[*j1 + j2 * t_dim1], &t[j2 + *
+ j1 * t_dim1], &t[j2 + j2 * t_dim1], &wr1, &wi1, &wr2, &
+ wi2, &cs, &sn);
+ i__1 = *n - *j1 - 1;
+ srot_(&i__1, &t[*j1 + (*j1 + 2) * t_dim1], ldt, &t[j2 + (*j1 + 2)
+ * t_dim1], ldt, &cs, &sn);
+ i__1 = *j1 - 1;
+ srot_(&i__1, &t[*j1 * t_dim1 + 1], &c__1, &t[j2 * t_dim1 + 1], &
+ c__1, &cs, &sn);
+ if (*wantq) {
+ srot_(n, &q[*j1 * q_dim1 + 1], &c__1, &q[j2 * q_dim1 + 1], &
+ c__1, &cs, &sn);
+ }
+ }
+
+ if (*n1 == 2) {
+
+/* Standardize new 2-by-2 block T22 */
+
+ j3 = *j1 + *n2;
+ j4 = j3 + 1;
+ slanv2_(&t[j3 + j3 * t_dim1], &t[j3 + j4 * t_dim1], &t[j4 + j3 *
+ t_dim1], &t[j4 + j4 * t_dim1], &wr1, &wi1, &wr2, &wi2, &
+ cs, &sn);
+ if (j3 + 2 <= *n) {
+ i__1 = *n - j3 - 1;
+ srot_(&i__1, &t[j3 + (j3 + 2) * t_dim1], ldt, &t[j4 + (j3 + 2)
+ * t_dim1], ldt, &cs, &sn);
+ }
+ i__1 = j3 - 1;
+ srot_(&i__1, &t[j3 * t_dim1 + 1], &c__1, &t[j4 * t_dim1 + 1], &
+ c__1, &cs, &sn);
+ if (*wantq) {
+ srot_(n, &q[j3 * q_dim1 + 1], &c__1, &q[j4 * q_dim1 + 1], &
+ c__1, &cs, &sn);
+ }
+ }
+
+ }
+ return 0;
+
+/* Exit with INFO = 1 if swap was rejected. */
+
+L50:
+ *info = 1;
+ return 0;
+
+/* End of SLAEXC */
+
+} /* slaexc_ */
diff --git a/contrib/libs/clapack/slag2.c b/contrib/libs/clapack/slag2.c
new file mode 100644
index 0000000000..02ce782f24
--- /dev/null
+++ b/contrib/libs/clapack/slag2.c
@@ -0,0 +1,356 @@
+/* slag2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slag2_(real *a, integer *lda, real *b, integer *ldb,
+ real *safmin, real *scale1, real *scale2, real *wr1, real *wr2, real *
+ wi)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset;
+ real r__1, r__2, r__3, r__4, r__5, r__6;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ real r__, c1, c2, c3, c4, c5, s1, s2, a11, a12, a21, a22, b11, b12, b22,
+ pp, qq, ss, as11, as12, as22, sum, abi22, diff, bmin, wbig, wabs,
+ wdet, binv11, binv22, discr, anorm, bnorm, bsize, shift, rtmin,
+ rtmax, wsize, ascale, bscale, wscale, safmax, wsmall;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAG2 computes the eigenvalues of a 2 x 2 generalized eigenvalue */
+/* problem A - w B, with scaling as necessary to avoid over-/underflow. */
+
+/* The scaling factor "s" results in a modified eigenvalue equation */
+
+/* s A - w B */
+
+/* where s is a non-negative scaling factor chosen so that w, w B, */
+/* and s A do not overflow and, if possible, do not underflow, either. */
+
+/* Arguments */
+/* ========= */
+
+/* A (input) REAL array, dimension (LDA, 2) */
+/* On entry, the 2 x 2 matrix A. It is assumed that its 1-norm */
+/* is less than 1/SAFMIN. Entries less than */
+/* sqrt(SAFMIN)*norm(A) are subject to being treated as zero. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= 2. */
+
+/* B (input) REAL array, dimension (LDB, 2) */
+/* On entry, the 2 x 2 upper triangular matrix B. It is */
+/* assumed that the one-norm of B is less than 1/SAFMIN. The */
+/* diagonals should be at least sqrt(SAFMIN) times the largest */
+/* element of B (in absolute value); if a diagonal is smaller */
+/* than that, then +/- sqrt(SAFMIN) will be used instead of */
+/* that diagonal. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= 2. */
+
+/* SAFMIN (input) REAL */
+/* The smallest positive number s.t. 1/SAFMIN does not */
+/* overflow. (This should always be SLAMCH('S') -- it is an */
+/* argument in order to avoid having to call SLAMCH frequently.) */
+
+/* SCALE1 (output) REAL */
+/* A scaling factor used to avoid over-/underflow in the */
+/* eigenvalue equation which defines the first eigenvalue. If */
+/* the eigenvalues are complex, then the eigenvalues are */
+/* ( WR1 +/- WI i ) / SCALE1 (which may lie outside the */
+/* exponent range of the machine), SCALE1=SCALE2, and SCALE1 */
+/* will always be positive. If the eigenvalues are real, then */
+/* the first (real) eigenvalue is WR1 / SCALE1 , but this may */
+/* overflow or underflow, and in fact, SCALE1 may be zero or */
+/* less than the underflow threshhold if the exact eigenvalue */
+/* is sufficiently large. */
+
+/* SCALE2 (output) REAL */
+/* A scaling factor used to avoid over-/underflow in the */
+/* eigenvalue equation which defines the second eigenvalue. If */
+/* the eigenvalues are complex, then SCALE2=SCALE1. If the */
+/* eigenvalues are real, then the second (real) eigenvalue is */
+/* WR2 / SCALE2 , but this may overflow or underflow, and in */
+/* fact, SCALE2 may be zero or less than the underflow */
+/* threshhold if the exact eigenvalue is sufficiently large. */
+
+/* WR1 (output) REAL */
+/* If the eigenvalue is real, then WR1 is SCALE1 times the */
+/* eigenvalue closest to the (2,2) element of A B**(-1). If the */
+/* eigenvalue is complex, then WR1=WR2 is SCALE1 times the real */
+/* part of the eigenvalues. */
+
+/* WR2 (output) REAL */
+/* If the eigenvalue is real, then WR2 is SCALE2 times the */
+/* other eigenvalue. If the eigenvalue is complex, then */
+/* WR1=WR2 is SCALE1 times the real part of the eigenvalues. */
+
+/* WI (output) REAL */
+/* If the eigenvalue is real, then WI is zero. If the */
+/* eigenvalue is complex, then WI is SCALE1 times the imaginary */
+/* part of the eigenvalues. WI will always be non-negative. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ rtmin = sqrt(*safmin);
+ rtmax = 1.f / rtmin;
+ safmax = 1.f / *safmin;
+
+/* Scale A */
+
+/* Computing MAX */
+ r__5 = (r__1 = a[a_dim1 + 1], dabs(r__1)) + (r__2 = a[a_dim1 + 2], dabs(
+ r__2)), r__6 = (r__3 = a[(a_dim1 << 1) + 1], dabs(r__3)) + (r__4 =
+ a[(a_dim1 << 1) + 2], dabs(r__4)), r__5 = max(r__5,r__6);
+ anorm = dmax(r__5,*safmin);
+ ascale = 1.f / anorm;
+ a11 = ascale * a[a_dim1 + 1];
+ a21 = ascale * a[a_dim1 + 2];
+ a12 = ascale * a[(a_dim1 << 1) + 1];
+ a22 = ascale * a[(a_dim1 << 1) + 2];
+
+/* Perturb B if necessary to insure non-singularity */
+
+ b11 = b[b_dim1 + 1];
+ b12 = b[(b_dim1 << 1) + 1];
+ b22 = b[(b_dim1 << 1) + 2];
+/* Computing MAX */
+ r__1 = dabs(b11), r__2 = dabs(b12), r__1 = max(r__1,r__2), r__2 = dabs(
+ b22), r__1 = max(r__1,r__2);
+ bmin = rtmin * dmax(r__1,rtmin);
+ if (dabs(b11) < bmin) {
+ b11 = r_sign(&bmin, &b11);
+ }
+ if (dabs(b22) < bmin) {
+ b22 = r_sign(&bmin, &b22);
+ }
+
+/* Scale B */
+
+/* Computing MAX */
+ r__1 = dabs(b11), r__2 = dabs(b12) + dabs(b22), r__1 = max(r__1,r__2);
+ bnorm = dmax(r__1,*safmin);
+/* Computing MAX */
+ r__1 = dabs(b11), r__2 = dabs(b22);
+ bsize = dmax(r__1,r__2);
+ bscale = 1.f / bsize;
+ b11 *= bscale;
+ b12 *= bscale;
+ b22 *= bscale;
+
+/* Compute larger eigenvalue by method described by C. van Loan */
+
+/* ( AS is A shifted by -SHIFT*B ) */
+
+ binv11 = 1.f / b11;
+ binv22 = 1.f / b22;
+ s1 = a11 * binv11;
+ s2 = a22 * binv22;
+ if (dabs(s1) <= dabs(s2)) {
+ as12 = a12 - s1 * b12;
+ as22 = a22 - s1 * b22;
+ ss = a21 * (binv11 * binv22);
+ abi22 = as22 * binv22 - ss * b12;
+ pp = abi22 * .5f;
+ shift = s1;
+ } else {
+ as12 = a12 - s2 * b12;
+ as11 = a11 - s2 * b11;
+ ss = a21 * (binv11 * binv22);
+ abi22 = -ss * b12;
+ pp = (as11 * binv11 + abi22) * .5f;
+ shift = s2;
+ }
+ qq = ss * as12;
+ if ((r__1 = pp * rtmin, dabs(r__1)) >= 1.f) {
+/* Computing 2nd power */
+ r__1 = rtmin * pp;
+ discr = r__1 * r__1 + qq * *safmin;
+ r__ = sqrt((dabs(discr))) * rtmax;
+ } else {
+/* Computing 2nd power */
+ r__1 = pp;
+ if (r__1 * r__1 + dabs(qq) <= *safmin) {
+/* Computing 2nd power */
+ r__1 = rtmax * pp;
+ discr = r__1 * r__1 + qq * safmax;
+ r__ = sqrt((dabs(discr))) * rtmin;
+ } else {
+/* Computing 2nd power */
+ r__1 = pp;
+ discr = r__1 * r__1 + qq;
+ r__ = sqrt((dabs(discr)));
+ }
+ }
+
+/* Note: the test of R in the following IF is to cover the case when */
+/* DISCR is small and negative and is flushed to zero during */
+/* the calculation of R. On machines which have a consistent */
+/* flush-to-zero threshhold and handle numbers above that */
+/* threshhold correctly, it would not be necessary. */
+
+ if (discr >= 0.f || r__ == 0.f) {
+ sum = pp + r_sign(&r__, &pp);
+ diff = pp - r_sign(&r__, &pp);
+ wbig = shift + sum;
+
+/* Compute smaller eigenvalue */
+
+ wsmall = shift + diff;
+/* Computing MAX */
+ r__1 = dabs(wsmall);
+ if (dabs(wbig) * .5f > dmax(r__1,*safmin)) {
+ wdet = (a11 * a22 - a12 * a21) * (binv11 * binv22);
+ wsmall = wdet / wbig;
+ }
+
+/* Choose (real) eigenvalue closest to 2,2 element of A*B**(-1) */
+/* for WR1. */
+
+ if (pp > abi22) {
+ *wr1 = dmin(wbig,wsmall);
+ *wr2 = dmax(wbig,wsmall);
+ } else {
+ *wr1 = dmax(wbig,wsmall);
+ *wr2 = dmin(wbig,wsmall);
+ }
+ *wi = 0.f;
+ } else {
+
+/* Complex eigenvalues */
+
+ *wr1 = shift + pp;
+ *wr2 = *wr1;
+ *wi = r__;
+ }
+
+/* Further scaling to avoid underflow and overflow in computing */
+/* SCALE1 and overflow in computing w*B. */
+
+/* This scale factor (WSCALE) is bounded from above using C1 and C2, */
+/* and from below using C3 and C4. */
+/* C1 implements the condition s A must never overflow. */
+/* C2 implements the condition w B must never overflow. */
+/* C3, with C2, */
+/* implement the condition that s A - w B must never overflow. */
+/* C4 implements the condition s should not underflow. */
+/* C5 implements the condition max(s,|w|) should be at least 2. */
+
+ c1 = bsize * (*safmin * dmax(1.f,ascale));
+ c2 = *safmin * dmax(1.f,bnorm);
+ c3 = bsize * *safmin;
+ if (ascale <= 1.f && bsize <= 1.f) {
+/* Computing MIN */
+ r__1 = 1.f, r__2 = ascale / *safmin * bsize;
+ c4 = dmin(r__1,r__2);
+ } else {
+ c4 = 1.f;
+ }
+ if (ascale <= 1.f || bsize <= 1.f) {
+/* Computing MIN */
+ r__1 = 1.f, r__2 = ascale * bsize;
+ c5 = dmin(r__1,r__2);
+ } else {
+ c5 = 1.f;
+ }
+
+/* Scale first eigenvalue */
+
+ wabs = dabs(*wr1) + dabs(*wi);
+/* Computing MAX */
+/* Computing MIN */
+ r__3 = c4, r__4 = dmax(wabs,c5) * .5f;
+ r__1 = max(*safmin,c1), r__2 = (wabs * c2 + c3) * 1.0000100000000001f,
+ r__1 = max(r__1,r__2), r__2 = dmin(r__3,r__4);
+ wsize = dmax(r__1,r__2);
+ if (wsize != 1.f) {
+ wscale = 1.f / wsize;
+ if (wsize > 1.f) {
+ *scale1 = dmax(ascale,bsize) * wscale * dmin(ascale,bsize);
+ } else {
+ *scale1 = dmin(ascale,bsize) * wscale * dmax(ascale,bsize);
+ }
+ *wr1 *= wscale;
+ if (*wi != 0.f) {
+ *wi *= wscale;
+ *wr2 = *wr1;
+ *scale2 = *scale1;
+ }
+ } else {
+ *scale1 = ascale * bsize;
+ *scale2 = *scale1;
+ }
+
+/* Scale second eigenvalue (if real) */
+
+ if (*wi == 0.f) {
+/* Computing MAX */
+/* Computing MIN */
+/* Computing MAX */
+ r__5 = dabs(*wr2);
+ r__3 = c4, r__4 = dmax(r__5,c5) * .5f;
+ r__1 = max(*safmin,c1), r__2 = (dabs(*wr2) * c2 + c3) *
+ 1.0000100000000001f, r__1 = max(r__1,r__2), r__2 = dmin(r__3,
+ r__4);
+ wsize = dmax(r__1,r__2);
+ if (wsize != 1.f) {
+ wscale = 1.f / wsize;
+ if (wsize > 1.f) {
+ *scale2 = dmax(ascale,bsize) * wscale * dmin(ascale,bsize);
+ } else {
+ *scale2 = dmin(ascale,bsize) * wscale * dmax(ascale,bsize);
+ }
+ *wr2 *= wscale;
+ } else {
+ *scale2 = ascale * bsize;
+ }
+ }
+
+/* End of SLAG2 */
+
+ return 0;
+} /* slag2_ */
diff --git a/contrib/libs/clapack/slag2d.c b/contrib/libs/clapack/slag2d.c
new file mode 100644
index 0000000000..81ae9c3a23
--- /dev/null
+++ b/contrib/libs/clapack/slag2d.c
@@ -0,0 +1,100 @@
+/* slag2d.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slag2d_(integer *m, integer *n, real *sa, integer *ldsa,
+ doublereal *a, integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer sa_dim1, sa_offset, a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j;
+
+
+/* -- LAPACK PROTOTYPE auxiliary routine (version 3.1.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* August 2007 */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAG2D converts a SINGLE PRECISION matrix, SA, to a DOUBLE */
+/* PRECISION matrix, A. */
+
+/* Note that while it is possible to overflow while converting */
+/* from double to single, it is not possible to overflow when */
+/* converting from single to double. */
+
+/* This is an auxiliary routine so there is no argument checking. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of lines of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* SA (input) REAL array, dimension (LDSA,N) */
+/* On entry, the M-by-N coefficient matrix SA. */
+
+/* LDSA (input) INTEGER */
+/* The leading dimension of the array SA. LDSA >= max(1,M). */
+
+/* A (output) DOUBLE PRECISION array, dimension (LDA,N) */
+/* On exit, the M-by-N coefficient matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* ========= */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ sa_dim1 = *ldsa;
+ sa_offset = 1 + sa_dim1;
+ sa -= sa_offset;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = sa[i__ + j * sa_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+
+/* End of SLAG2D */
+
+} /* slag2d_ */
diff --git a/contrib/libs/clapack/slags2.c b/contrib/libs/clapack/slags2.c
new file mode 100644
index 0000000000..bcd0c91702
--- /dev/null
+++ b/contrib/libs/clapack/slags2.c
@@ -0,0 +1,290 @@
+/* slags2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slags2_(logical *upper, real *a1, real *a2, real *a3,
+ real *b1, real *b2, real *b3, real *csu, real *snu, real *csv, real *
+ snv, real *csq, real *snq)
+{
+ /* System generated locals */
+ real r__1;
+
+ /* Local variables */
+ real a, b, c__, d__, r__, s1, s2, ua11, ua12, ua21, ua22, vb11, vb12,
+ vb21, vb22, csl, csr, snl, snr, aua11, aua12, aua21, aua22, avb11,
+ avb12, avb21, avb22, ua11r, ua22r, vb11r, vb22r;
+ extern /* Subroutine */ int slasv2_(real *, real *, real *, real *, real *
+, real *, real *, real *, real *), slartg_(real *, real *, real *,
+ real *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAGS2 computes 2-by-2 orthogonal matrices U, V and Q, such */
+/* that if ( UPPER ) then */
+
+/* U'*A*Q = U'*( A1 A2 )*Q = ( x 0 ) */
+/* ( 0 A3 ) ( x x ) */
+/* and */
+/* V'*B*Q = V'*( B1 B2 )*Q = ( x 0 ) */
+/* ( 0 B3 ) ( x x ) */
+
+/* or if ( .NOT.UPPER ) then */
+
+/* U'*A*Q = U'*( A1 0 )*Q = ( x x ) */
+/* ( A2 A3 ) ( 0 x ) */
+/* and */
+/* V'*B*Q = V'*( B1 0 )*Q = ( x x ) */
+/* ( B2 B3 ) ( 0 x ) */
+
+/* The rows of the transformed A and B are parallel, where */
+
+/* U = ( CSU SNU ), V = ( CSV SNV ), Q = ( CSQ SNQ ) */
+/* ( -SNU CSU ) ( -SNV CSV ) ( -SNQ CSQ ) */
+
+/* Z' denotes the transpose of Z. */
+
+
+/* Arguments */
+/* ========= */
+
+/* UPPER (input) LOGICAL */
+/* = .TRUE.: the input matrices A and B are upper triangular. */
+/* = .FALSE.: the input matrices A and B are lower triangular. */
+
+/* A1 (input) REAL */
+/* A2 (input) REAL */
+/* A3 (input) REAL */
+/* On entry, A1, A2 and A3 are elements of the input 2-by-2 */
+/* upper (lower) triangular matrix A. */
+
+/* B1 (input) REAL */
+/* B2 (input) REAL */
+/* B3 (input) REAL */
+/* On entry, B1, B2 and B3 are elements of the input 2-by-2 */
+/* upper (lower) triangular matrix B. */
+
+/* CSU (output) REAL */
+/* SNU (output) REAL */
+/* The desired orthogonal matrix U. */
+
+/* CSV (output) REAL */
+/* SNV (output) REAL */
+/* The desired orthogonal matrix V. */
+
+/* CSQ (output) REAL */
+/* SNQ (output) REAL */
+/* The desired orthogonal matrix Q. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ if (*upper) {
+
+/* Input matrices A and B are upper triangular matrices */
+
+/* Form matrix C = A*adj(B) = ( a b ) */
+/* ( 0 d ) */
+
+ a = *a1 * *b3;
+ d__ = *a3 * *b1;
+ b = *a2 * *b1 - *a1 * *b2;
+
+/* The SVD of real 2-by-2 triangular C */
+
+/* ( CSL -SNL )*( A B )*( CSR SNR ) = ( R 0 ) */
+/* ( SNL CSL ) ( 0 D ) ( -SNR CSR ) ( 0 T ) */
+
+ slasv2_(&a, &b, &d__, &s1, &s2, &snr, &csr, &snl, &csl);
+
+ if (dabs(csl) >= dabs(snl) || dabs(csr) >= dabs(snr)) {
+
+/* Compute the (1,1) and (1,2) elements of U'*A and V'*B, */
+/* and (1,2) element of |U|'*|A| and |V|'*|B|. */
+
+ ua11r = csl * *a1;
+ ua12 = csl * *a2 + snl * *a3;
+
+ vb11r = csr * *b1;
+ vb12 = csr * *b2 + snr * *b3;
+
+ aua12 = dabs(csl) * dabs(*a2) + dabs(snl) * dabs(*a3);
+ avb12 = dabs(csr) * dabs(*b2) + dabs(snr) * dabs(*b3);
+
+/* zero (1,2) elements of U'*A and V'*B */
+
+ if (dabs(ua11r) + dabs(ua12) != 0.f) {
+ if (aua12 / (dabs(ua11r) + dabs(ua12)) <= avb12 / (dabs(vb11r)
+ + dabs(vb12))) {
+ r__1 = -ua11r;
+ slartg_(&r__1, &ua12, csq, snq, &r__);
+ } else {
+ r__1 = -vb11r;
+ slartg_(&r__1, &vb12, csq, snq, &r__);
+ }
+ } else {
+ r__1 = -vb11r;
+ slartg_(&r__1, &vb12, csq, snq, &r__);
+ }
+
+ *csu = csl;
+ *snu = -snl;
+ *csv = csr;
+ *snv = -snr;
+
+ } else {
+
+/* Compute the (2,1) and (2,2) elements of U'*A and V'*B, */
+/* and (2,2) element of |U|'*|A| and |V|'*|B|. */
+
+ ua21 = -snl * *a1;
+ ua22 = -snl * *a2 + csl * *a3;
+
+ vb21 = -snr * *b1;
+ vb22 = -snr * *b2 + csr * *b3;
+
+ aua22 = dabs(snl) * dabs(*a2) + dabs(csl) * dabs(*a3);
+ avb22 = dabs(snr) * dabs(*b2) + dabs(csr) * dabs(*b3);
+
+/* zero (2,2) elements of U'*A and V'*B, and then swap. */
+
+ if (dabs(ua21) + dabs(ua22) != 0.f) {
+ if (aua22 / (dabs(ua21) + dabs(ua22)) <= avb22 / (dabs(vb21)
+ + dabs(vb22))) {
+ r__1 = -ua21;
+ slartg_(&r__1, &ua22, csq, snq, &r__);
+ } else {
+ r__1 = -vb21;
+ slartg_(&r__1, &vb22, csq, snq, &r__);
+ }
+ } else {
+ r__1 = -vb21;
+ slartg_(&r__1, &vb22, csq, snq, &r__);
+ }
+
+ *csu = snl;
+ *snu = csl;
+ *csv = snr;
+ *snv = csr;
+
+ }
+
+ } else {
+
+/* Input matrices A and B are lower triangular matrices */
+
+/* Form matrix C = A*adj(B) = ( a 0 ) */
+/* ( c d ) */
+
+ a = *a1 * *b3;
+ d__ = *a3 * *b1;
+ c__ = *a2 * *b3 - *a3 * *b2;
+
+/* The SVD of real 2-by-2 triangular C */
+
+/* ( CSL -SNL )*( A 0 )*( CSR SNR ) = ( R 0 ) */
+/* ( SNL CSL ) ( C D ) ( -SNR CSR ) ( 0 T ) */
+
+ slasv2_(&a, &c__, &d__, &s1, &s2, &snr, &csr, &snl, &csl);
+
+ if (dabs(csr) >= dabs(snr) || dabs(csl) >= dabs(snl)) {
+
+/* Compute the (2,1) and (2,2) elements of U'*A and V'*B, */
+/* and (2,1) element of |U|'*|A| and |V|'*|B|. */
+
+ ua21 = -snr * *a1 + csr * *a2;
+ ua22r = csr * *a3;
+
+ vb21 = -snl * *b1 + csl * *b2;
+ vb22r = csl * *b3;
+
+ aua21 = dabs(snr) * dabs(*a1) + dabs(csr) * dabs(*a2);
+ avb21 = dabs(snl) * dabs(*b1) + dabs(csl) * dabs(*b2);
+
+/* zero (2,1) elements of U'*A and V'*B. */
+
+ if (dabs(ua21) + dabs(ua22r) != 0.f) {
+ if (aua21 / (dabs(ua21) + dabs(ua22r)) <= avb21 / (dabs(vb21)
+ + dabs(vb22r))) {
+ slartg_(&ua22r, &ua21, csq, snq, &r__);
+ } else {
+ slartg_(&vb22r, &vb21, csq, snq, &r__);
+ }
+ } else {
+ slartg_(&vb22r, &vb21, csq, snq, &r__);
+ }
+
+ *csu = csr;
+ *snu = -snr;
+ *csv = csl;
+ *snv = -snl;
+
+ } else {
+
+/* Compute the (1,1) and (1,2) elements of U'*A and V'*B, */
+/* and (1,1) element of |U|'*|A| and |V|'*|B|. */
+
+ ua11 = csr * *a1 + snr * *a2;
+ ua12 = snr * *a3;
+
+ vb11 = csl * *b1 + snl * *b2;
+ vb12 = snl * *b3;
+
+ aua11 = dabs(csr) * dabs(*a1) + dabs(snr) * dabs(*a2);
+ avb11 = dabs(csl) * dabs(*b1) + dabs(snl) * dabs(*b2);
+
+/* zero (1,1) elements of U'*A and V'*B, and then swap. */
+
+ if (dabs(ua11) + dabs(ua12) != 0.f) {
+ if (aua11 / (dabs(ua11) + dabs(ua12)) <= avb11 / (dabs(vb11)
+ + dabs(vb12))) {
+ slartg_(&ua12, &ua11, csq, snq, &r__);
+ } else {
+ slartg_(&vb12, &vb11, csq, snq, &r__);
+ }
+ } else {
+ slartg_(&vb12, &vb11, csq, snq, &r__);
+ }
+
+ *csu = snr;
+ *snu = csr;
+ *csv = snl;
+ *snv = csl;
+
+ }
+
+ }
+
+ return 0;
+
+/* End of SLAGS2 */
+
+} /* slags2_ */
diff --git a/contrib/libs/clapack/slagtf.c b/contrib/libs/clapack/slagtf.c
new file mode 100644
index 0000000000..9046c23e55
--- /dev/null
+++ b/contrib/libs/clapack/slagtf.c
@@ -0,0 +1,223 @@
+/* slagtf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slagtf_(integer *n, real *a, real *lambda, real *b, real
+ *c__, real *tol, real *d__, integer *in, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2;
+
+ /* Local variables */
+ integer k;
+ real tl, eps, piv1, piv2, temp, mult, scale1, scale2;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAGTF factorizes the matrix (T - lambda*I), where T is an n by n */
+/* tridiagonal matrix and lambda is a scalar, as */
+
+/* T - lambda*I = PLU, */
+
+/* where P is a permutation matrix, L is a unit lower tridiagonal matrix */
+/* with at most one non-zero sub-diagonal elements per column and U is */
+/* an upper triangular matrix with at most two non-zero super-diagonal */
+/* elements per column. */
+
+/* The factorization is obtained by Gaussian elimination with partial */
+/* pivoting and implicit row scaling. */
+
+/* The parameter LAMBDA is included in the routine so that SLAGTF may */
+/* be used, in conjunction with SLAGTS, to obtain eigenvectors of T by */
+/* inverse iteration. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix T. */
+
+/* A (input/output) REAL array, dimension (N) */
+/* On entry, A must contain the diagonal elements of T. */
+
+/* On exit, A is overwritten by the n diagonal elements of the */
+/* upper triangular matrix U of the factorization of T. */
+
+/* LAMBDA (input) REAL */
+/* On entry, the scalar lambda. */
+
+/* B (input/output) REAL array, dimension (N-1) */
+/* On entry, B must contain the (n-1) super-diagonal elements of */
+/* T. */
+
+/* On exit, B is overwritten by the (n-1) super-diagonal */
+/* elements of the matrix U of the factorization of T. */
+
+/* C (input/output) REAL array, dimension (N-1) */
+/* On entry, C must contain the (n-1) sub-diagonal elements of */
+/* T. */
+
+/* On exit, C is overwritten by the (n-1) sub-diagonal elements */
+/* of the matrix L of the factorization of T. */
+
+/* TOL (input) REAL */
+/* On entry, a relative tolerance used to indicate whether or */
+/* not the matrix (T - lambda*I) is nearly singular. TOL should */
+/* normally be chose as approximately the largest relative error */
+/* in the elements of T. For example, if the elements of T are */
+/* correct to about 4 significant figures, then TOL should be */
+/* set to about 5*10**(-4). If TOL is supplied as less than eps, */
+/* where eps is the relative machine precision, then the value */
+/* eps is used in place of TOL. */
+
+/* D (output) REAL array, dimension (N-2) */
+/* On exit, D is overwritten by the (n-2) second super-diagonal */
+/* elements of the matrix U of the factorization of T. */
+
+/* IN (output) INTEGER array, dimension (N) */
+/* On exit, IN contains details of the permutation matrix P. If */
+/* an interchange occurred at the kth step of the elimination, */
+/* then IN(k) = 1, otherwise IN(k) = 0. The element IN(n) */
+/* returns the smallest positive integer j such that */
+
+/* abs( u(j,j) ).le. norm( (T - lambda*I)(j) )*TOL, */
+
+/* where norm( A(j) ) denotes the sum of the absolute values of */
+/* the jth row of the matrix A. If no such j exists then IN(n) */
+/* is returned as zero. If IN(n) is returned as positive, then a */
+/* diagonal element of U is small, indicating that */
+/* (T - lambda*I) is singular or nearly singular, */
+
+/* INFO (output) INTEGER */
+/* = 0 : successful exit */
+/* .lt. 0: if INFO = -k, the kth argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --in;
+ --d__;
+ --c__;
+ --b;
+ --a;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ i__1 = -(*info);
+ xerbla_("SLAGTF", &i__1);
+ return 0;
+ }
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ a[1] -= *lambda;
+ in[*n] = 0;
+ if (*n == 1) {
+ if (a[1] == 0.f) {
+ in[1] = 1;
+ }
+ return 0;
+ }
+
+ eps = slamch_("Epsilon");
+
+ tl = dmax(*tol,eps);
+ scale1 = dabs(a[1]) + dabs(b[1]);
+ i__1 = *n - 1;
+ for (k = 1; k <= i__1; ++k) {
+ a[k + 1] -= *lambda;
+ scale2 = (r__1 = c__[k], dabs(r__1)) + (r__2 = a[k + 1], dabs(r__2));
+ if (k < *n - 1) {
+ scale2 += (r__1 = b[k + 1], dabs(r__1));
+ }
+ if (a[k] == 0.f) {
+ piv1 = 0.f;
+ } else {
+ piv1 = (r__1 = a[k], dabs(r__1)) / scale1;
+ }
+ if (c__[k] == 0.f) {
+ in[k] = 0;
+ piv2 = 0.f;
+ scale1 = scale2;
+ if (k < *n - 1) {
+ d__[k] = 0.f;
+ }
+ } else {
+ piv2 = (r__1 = c__[k], dabs(r__1)) / scale2;
+ if (piv2 <= piv1) {
+ in[k] = 0;
+ scale1 = scale2;
+ c__[k] /= a[k];
+ a[k + 1] -= c__[k] * b[k];
+ if (k < *n - 1) {
+ d__[k] = 0.f;
+ }
+ } else {
+ in[k] = 1;
+ mult = a[k] / c__[k];
+ a[k] = c__[k];
+ temp = a[k + 1];
+ a[k + 1] = b[k] - mult * temp;
+ if (k < *n - 1) {
+ d__[k] = b[k + 1];
+ b[k + 1] = -mult * d__[k];
+ }
+ b[k] = temp;
+ c__[k] = mult;
+ }
+ }
+ if (dmax(piv1,piv2) <= tl && in[*n] == 0) {
+ in[*n] = k;
+ }
+/* L10: */
+ }
+ if ((r__1 = a[*n], dabs(r__1)) <= scale1 * tl && in[*n] == 0) {
+ in[*n] = *n;
+ }
+
+ return 0;
+
+/* End of SLAGTF */
+
+} /* slagtf_ */
diff --git a/contrib/libs/clapack/slagtm.c b/contrib/libs/clapack/slagtm.c
new file mode 100644
index 0000000000..8018ebcfd0
--- /dev/null
+++ b/contrib/libs/clapack/slagtm.c
@@ -0,0 +1,253 @@
+/* slagtm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slagtm_(char *trans, integer *n, integer *nrhs, real *
+ alpha, real *dl, real *d__, real *du, real *x, integer *ldx, real *
+ beta, real *b, integer *ldb)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAGTM performs a matrix-vector product of the form */
+
+/* B := alpha * A * X + beta * B */
+
+/* where A is a tridiagonal matrix of order N, B and X are N by NRHS */
+/* matrices, and alpha and beta are real scalars, each of which may be */
+/* 0., 1., or -1. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the operation applied to A. */
+/* = 'N': No transpose, B := alpha * A * X + beta * B */
+/* = 'T': Transpose, B := alpha * A'* X + beta * B */
+/* = 'C': Conjugate transpose = Transpose */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices X and B. */
+
+/* ALPHA (input) REAL */
+/* The scalar alpha. ALPHA must be 0., 1., or -1.; otherwise, */
+/* it is assumed to be 0. */
+
+/* DL (input) REAL array, dimension (N-1) */
+/* The (n-1) sub-diagonal elements of T. */
+
+/* D (input) REAL array, dimension (N) */
+/* The diagonal elements of T. */
+
+/* DU (input) REAL array, dimension (N-1) */
+/* The (n-1) super-diagonal elements of T. */
+
+/* X (input) REAL array, dimension (LDX,NRHS) */
+/* The N by NRHS matrix X. */
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(N,1). */
+
+/* BETA (input) REAL */
+/* The scalar beta. BETA must be 0., 1., or -1.; otherwise, */
+/* it is assumed to be 1. */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the N by NRHS matrix B. */
+/* On exit, B is overwritten by the matrix expression */
+/* B := alpha * A * X + beta * B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(N,1). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --dl;
+ --d__;
+ --du;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Multiply B by BETA if BETA.NE.1. */
+
+ if (*beta == 0.f) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if (*beta == -1.f) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = -b[i__ + j * b_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+
+ if (*alpha == 1.f) {
+ if (lsame_(trans, "N")) {
+
+/* Compute B := B + A*X */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ if (*n == 1) {
+ b[j * b_dim1 + 1] += d__[1] * x[j * x_dim1 + 1];
+ } else {
+ b[j * b_dim1 + 1] = b[j * b_dim1 + 1] + d__[1] * x[j *
+ x_dim1 + 1] + du[1] * x[j * x_dim1 + 2];
+ b[*n + j * b_dim1] = b[*n + j * b_dim1] + dl[*n - 1] * x[*
+ n - 1 + j * x_dim1] + d__[*n] * x[*n + j * x_dim1]
+ ;
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = b[i__ + j * b_dim1] + dl[i__ -
+ 1] * x[i__ - 1 + j * x_dim1] + d__[i__] * x[
+ i__ + j * x_dim1] + du[i__] * x[i__ + 1 + j *
+ x_dim1];
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else {
+
+/* Compute B := B + A'*X */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ if (*n == 1) {
+ b[j * b_dim1 + 1] += d__[1] * x[j * x_dim1 + 1];
+ } else {
+ b[j * b_dim1 + 1] = b[j * b_dim1 + 1] + d__[1] * x[j *
+ x_dim1 + 1] + dl[1] * x[j * x_dim1 + 2];
+ b[*n + j * b_dim1] = b[*n + j * b_dim1] + du[*n - 1] * x[*
+ n - 1 + j * x_dim1] + d__[*n] * x[*n + j * x_dim1]
+ ;
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = b[i__ + j * b_dim1] + du[i__ -
+ 1] * x[i__ - 1 + j * x_dim1] + d__[i__] * x[
+ i__ + j * x_dim1] + dl[i__] * x[i__ + 1 + j *
+ x_dim1];
+/* L70: */
+ }
+ }
+/* L80: */
+ }
+ }
+ } else if (*alpha == -1.f) {
+ if (lsame_(trans, "N")) {
+
+/* Compute B := B - A*X */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ if (*n == 1) {
+ b[j * b_dim1 + 1] -= d__[1] * x[j * x_dim1 + 1];
+ } else {
+ b[j * b_dim1 + 1] = b[j * b_dim1 + 1] - d__[1] * x[j *
+ x_dim1 + 1] - du[1] * x[j * x_dim1 + 2];
+ b[*n + j * b_dim1] = b[*n + j * b_dim1] - dl[*n - 1] * x[*
+ n - 1 + j * x_dim1] - d__[*n] * x[*n + j * x_dim1]
+ ;
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = b[i__ + j * b_dim1] - dl[i__ -
+ 1] * x[i__ - 1 + j * x_dim1] - d__[i__] * x[
+ i__ + j * x_dim1] - du[i__] * x[i__ + 1 + j *
+ x_dim1];
+/* L90: */
+ }
+ }
+/* L100: */
+ }
+ } else {
+
+/* Compute B := B - A'*X */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ if (*n == 1) {
+ b[j * b_dim1 + 1] -= d__[1] * x[j * x_dim1 + 1];
+ } else {
+ b[j * b_dim1 + 1] = b[j * b_dim1 + 1] - d__[1] * x[j *
+ x_dim1 + 1] - dl[1] * x[j * x_dim1 + 2];
+ b[*n + j * b_dim1] = b[*n + j * b_dim1] - du[*n - 1] * x[*
+ n - 1 + j * x_dim1] - d__[*n] * x[*n + j * x_dim1]
+ ;
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = b[i__ + j * b_dim1] - du[i__ -
+ 1] * x[i__ - 1 + j * x_dim1] - d__[i__] * x[
+ i__ + j * x_dim1] - dl[i__] * x[i__ + 1 + j *
+ x_dim1];
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+ }
+ }
+ return 0;
+
+/* End of SLAGTM */
+
+} /* slagtm_ */
diff --git a/contrib/libs/clapack/slagts.c b/contrib/libs/clapack/slagts.c
new file mode 100644
index 0000000000..dc828f8aeb
--- /dev/null
+++ b/contrib/libs/clapack/slagts.c
@@ -0,0 +1,351 @@
+/* slagts.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slagts_(integer *job, integer *n, real *a, real *b, real
+ *c__, real *d__, integer *in, real *y, real *tol, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2, r__3, r__4, r__5;
+
+ /* Builtin functions */
+ double r_sign(real *, real *);
+
+ /* Local variables */
+ integer k;
+ real ak, eps, temp, pert, absak, sfmin;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAGTS may be used to solve one of the systems of equations */
+
+/* (T - lambda*I)*x = y or (T - lambda*I)'*x = y, */
+
+/* where T is an n by n tridiagonal matrix, for x, following the */
+/* factorization of (T - lambda*I) as */
+
+/* (T - lambda*I) = P*L*U , */
+
+/* by routine SLAGTF. The choice of equation to be solved is */
+/* controlled by the argument JOB, and in each case there is an option */
+/* to perturb zero or very small diagonal elements of U, this option */
+/* being intended for use in applications such as inverse iteration. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) INTEGER */
+/* Specifies the job to be performed by SLAGTS as follows: */
+/* = 1: The equations (T - lambda*I)x = y are to be solved, */
+/* but diagonal elements of U are not to be perturbed. */
+/* = -1: The equations (T - lambda*I)x = y are to be solved */
+/* and, if overflow would otherwise occur, the diagonal */
+/* elements of U are to be perturbed. See argument TOL */
+/* below. */
+/* = 2: The equations (T - lambda*I)'x = y are to be solved, */
+/* but diagonal elements of U are not to be perturbed. */
+/* = -2: The equations (T - lambda*I)'x = y are to be solved */
+/* and, if overflow would otherwise occur, the diagonal */
+/* elements of U are to be perturbed. See argument TOL */
+/* below. */
+
+/* N (input) INTEGER */
+/* The order of the matrix T. */
+
+/* A (input) REAL array, dimension (N) */
+/* On entry, A must contain the diagonal elements of U as */
+/* returned from SLAGTF. */
+
+/* B (input) REAL array, dimension (N-1) */
+/* On entry, B must contain the first super-diagonal elements of */
+/* U as returned from SLAGTF. */
+
+/* C (input) REAL array, dimension (N-1) */
+/* On entry, C must contain the sub-diagonal elements of L as */
+/* returned from SLAGTF. */
+
+/* D (input) REAL array, dimension (N-2) */
+/* On entry, D must contain the second super-diagonal elements */
+/* of U as returned from SLAGTF. */
+
+/* IN (input) INTEGER array, dimension (N) */
+/* On entry, IN must contain details of the matrix P as returned */
+/* from SLAGTF. */
+
+/* Y (input/output) REAL array, dimension (N) */
+/* On entry, the right hand side vector y. */
+/* On exit, Y is overwritten by the solution vector x. */
+
+/* TOL (input/output) REAL */
+/* On entry, with JOB .lt. 0, TOL should be the minimum */
+/* perturbation to be made to very small diagonal elements of U. */
+/* TOL should normally be chosen as about eps*norm(U), where eps */
+/* is the relative machine precision, but if TOL is supplied as */
+/* non-positive, then it is reset to eps*max( abs( u(i,j) ) ). */
+/* If JOB .gt. 0 then TOL is not referenced. */
+
+/* On exit, TOL is changed as described above, only if TOL is */
+/* non-positive on entry. Otherwise TOL is unchanged. */
+
+/* INFO (output) INTEGER */
+/* = 0 : successful exit */
+/* .lt. 0: if INFO = -i, the i-th argument had an illegal value */
+/* .gt. 0: overflow would occur when computing the INFO(th) */
+/* element of the solution vector x. This can only occur */
+/* when JOB is supplied as positive and either means */
+/* that a diagonal element of U is very small, or that */
+/* the elements of the right-hand side vector y are very */
+/* large. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --y;
+ --in;
+ --d__;
+ --c__;
+ --b;
+ --a;
+
+ /* Function Body */
+ *info = 0;
+ if (abs(*job) > 2 || *job == 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLAGTS", &i__1);
+ return 0;
+ }
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ eps = slamch_("Epsilon");
+ sfmin = slamch_("Safe minimum");
+ bignum = 1.f / sfmin;
+
+ if (*job < 0) {
+ if (*tol <= 0.f) {
+ *tol = dabs(a[1]);
+ if (*n > 1) {
+/* Computing MAX */
+ r__1 = *tol, r__2 = dabs(a[2]), r__1 = max(r__1,r__2), r__2 =
+ dabs(b[1]);
+ *tol = dmax(r__1,r__2);
+ }
+ i__1 = *n;
+ for (k = 3; k <= i__1; ++k) {
+/* Computing MAX */
+ r__4 = *tol, r__5 = (r__1 = a[k], dabs(r__1)), r__4 = max(
+ r__4,r__5), r__5 = (r__2 = b[k - 1], dabs(r__2)),
+ r__4 = max(r__4,r__5), r__5 = (r__3 = d__[k - 2],
+ dabs(r__3));
+ *tol = dmax(r__4,r__5);
+/* L10: */
+ }
+ *tol *= eps;
+ if (*tol == 0.f) {
+ *tol = eps;
+ }
+ }
+ }
+
+ if (abs(*job) == 1) {
+ i__1 = *n;
+ for (k = 2; k <= i__1; ++k) {
+ if (in[k - 1] == 0) {
+ y[k] -= c__[k - 1] * y[k - 1];
+ } else {
+ temp = y[k - 1];
+ y[k - 1] = y[k];
+ y[k] = temp - c__[k - 1] * y[k];
+ }
+/* L20: */
+ }
+ if (*job == 1) {
+ for (k = *n; k >= 1; --k) {
+ if (k <= *n - 2) {
+ temp = y[k] - b[k] * y[k + 1] - d__[k] * y[k + 2];
+ } else if (k == *n - 1) {
+ temp = y[k] - b[k] * y[k + 1];
+ } else {
+ temp = y[k];
+ }
+ ak = a[k];
+ absak = dabs(ak);
+ if (absak < 1.f) {
+ if (absak < sfmin) {
+ if (absak == 0.f || dabs(temp) * sfmin > absak) {
+ *info = k;
+ return 0;
+ } else {
+ temp *= bignum;
+ ak *= bignum;
+ }
+ } else if (dabs(temp) > absak * bignum) {
+ *info = k;
+ return 0;
+ }
+ }
+ y[k] = temp / ak;
+/* L30: */
+ }
+ } else {
+ for (k = *n; k >= 1; --k) {
+ if (k <= *n - 2) {
+ temp = y[k] - b[k] * y[k + 1] - d__[k] * y[k + 2];
+ } else if (k == *n - 1) {
+ temp = y[k] - b[k] * y[k + 1];
+ } else {
+ temp = y[k];
+ }
+ ak = a[k];
+ pert = r_sign(tol, &ak);
+L40:
+ absak = dabs(ak);
+ if (absak < 1.f) {
+ if (absak < sfmin) {
+ if (absak == 0.f || dabs(temp) * sfmin > absak) {
+ ak += pert;
+ pert *= 2;
+ goto L40;
+ } else {
+ temp *= bignum;
+ ak *= bignum;
+ }
+ } else if (dabs(temp) > absak * bignum) {
+ ak += pert;
+ pert *= 2;
+ goto L40;
+ }
+ }
+ y[k] = temp / ak;
+/* L50: */
+ }
+ }
+ } else {
+
+/* Come to here if JOB = 2 or -2 */
+
+ if (*job == 2) {
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (k >= 3) {
+ temp = y[k] - b[k - 1] * y[k - 1] - d__[k - 2] * y[k - 2];
+ } else if (k == 2) {
+ temp = y[k] - b[k - 1] * y[k - 1];
+ } else {
+ temp = y[k];
+ }
+ ak = a[k];
+ absak = dabs(ak);
+ if (absak < 1.f) {
+ if (absak < sfmin) {
+ if (absak == 0.f || dabs(temp) * sfmin > absak) {
+ *info = k;
+ return 0;
+ } else {
+ temp *= bignum;
+ ak *= bignum;
+ }
+ } else if (dabs(temp) > absak * bignum) {
+ *info = k;
+ return 0;
+ }
+ }
+ y[k] = temp / ak;
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (k >= 3) {
+ temp = y[k] - b[k - 1] * y[k - 1] - d__[k - 2] * y[k - 2];
+ } else if (k == 2) {
+ temp = y[k] - b[k - 1] * y[k - 1];
+ } else {
+ temp = y[k];
+ }
+ ak = a[k];
+ pert = r_sign(tol, &ak);
+L70:
+ absak = dabs(ak);
+ if (absak < 1.f) {
+ if (absak < sfmin) {
+ if (absak == 0.f || dabs(temp) * sfmin > absak) {
+ ak += pert;
+ pert *= 2;
+ goto L70;
+ } else {
+ temp *= bignum;
+ ak *= bignum;
+ }
+ } else if (dabs(temp) > absak * bignum) {
+ ak += pert;
+ pert *= 2;
+ goto L70;
+ }
+ }
+ y[k] = temp / ak;
+/* L80: */
+ }
+ }
+
+ for (k = *n; k >= 2; --k) {
+ if (in[k - 1] == 0) {
+ y[k - 1] -= c__[k - 1] * y[k];
+ } else {
+ temp = y[k - 1];
+ y[k - 1] = y[k];
+ y[k] = temp - c__[k - 1] * y[k];
+ }
+/* L90: */
+ }
+ }
+
+/* End of SLAGTS */
+
+ return 0;
+} /* slagts_ */
diff --git a/contrib/libs/clapack/slagv2.c b/contrib/libs/clapack/slagv2.c
new file mode 100644
index 0000000000..03efb8c0b4
--- /dev/null
+++ b/contrib/libs/clapack/slagv2.c
@@ -0,0 +1,347 @@
+/* slagv2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__2 = 2;
+static integer c__1 = 1;
+
+/* Subroutine */ int slagv2_(real *a, integer *lda, real *b, integer *ldb,
+ real *alphar, real *alphai, real *beta, real *csl, real *snl, real *
+ csr, real *snr)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset;
+ real r__1, r__2, r__3, r__4, r__5, r__6;
+
+ /* Local variables */
+ real r__, t, h1, h2, h3, wi, qq, rr, wr1, wr2, ulp;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *), slag2_(real *, integer *, real *,
+ integer *, real *, real *, real *, real *, real *, real *);
+ real anorm, bnorm, scale1, scale2;
+ extern /* Subroutine */ int slasv2_(real *, real *, real *, real *, real *
+, real *, real *, real *, real *);
+ extern doublereal slapy2_(real *, real *);
+ real ascale, bscale;
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int slartg_(real *, real *, real *, real *, real *
+);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAGV2 computes the Generalized Schur factorization of a real 2-by-2 */
+/* matrix pencil (A,B) where B is upper triangular. This routine */
+/* computes orthogonal (rotation) matrices given by CSL, SNL and CSR, */
+/* SNR such that */
+
+/* 1) if the pencil (A,B) has two real eigenvalues (include 0/0 or 1/0 */
+/* types), then */
+
+/* [ a11 a12 ] := [ CSL SNL ] [ a11 a12 ] [ CSR -SNR ] */
+/* [ 0 a22 ] [ -SNL CSL ] [ a21 a22 ] [ SNR CSR ] */
+
+/* [ b11 b12 ] := [ CSL SNL ] [ b11 b12 ] [ CSR -SNR ] */
+/* [ 0 b22 ] [ -SNL CSL ] [ 0 b22 ] [ SNR CSR ], */
+
+/* 2) if the pencil (A,B) has a pair of complex conjugate eigenvalues, */
+/* then */
+
+/* [ a11 a12 ] := [ CSL SNL ] [ a11 a12 ] [ CSR -SNR ] */
+/* [ a21 a22 ] [ -SNL CSL ] [ a21 a22 ] [ SNR CSR ] */
+
+/* [ b11 0 ] := [ CSL SNL ] [ b11 b12 ] [ CSR -SNR ] */
+/* [ 0 b22 ] [ -SNL CSL ] [ 0 b22 ] [ SNR CSR ] */
+
+/* where b11 >= b22 > 0. */
+
+
+/* Arguments */
+/* ========= */
+
+/* A (input/output) REAL array, dimension (LDA, 2) */
+/* On entry, the 2 x 2 matrix A. */
+/* On exit, A is overwritten by the ``A-part'' of the */
+/* generalized Schur form. */
+
+/* LDA (input) INTEGER */
+/* THe leading dimension of the array A. LDA >= 2. */
+
+/* B (input/output) REAL array, dimension (LDB, 2) */
+/* On entry, the upper triangular 2 x 2 matrix B. */
+/* On exit, B is overwritten by the ``B-part'' of the */
+/* generalized Schur form. */
+
+/* LDB (input) INTEGER */
+/* THe leading dimension of the array B. LDB >= 2. */
+
+/* ALPHAR (output) REAL array, dimension (2) */
+/* ALPHAI (output) REAL array, dimension (2) */
+/* BETA (output) REAL array, dimension (2) */
+/* (ALPHAR(k)+i*ALPHAI(k))/BETA(k) are the eigenvalues of the */
+/* pencil (A,B), k=1,2, i = sqrt(-1). Note that BETA(k) may */
+/* be zero. */
+
+/* CSL (output) REAL */
+/* The cosine of the left rotation matrix. */
+
+/* SNL (output) REAL */
+/* The sine of the left rotation matrix. */
+
+/* CSR (output) REAL */
+/* The cosine of the right rotation matrix. */
+
+/* SNR (output) REAL */
+/* The sine of the right rotation matrix. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alphar;
+ --alphai;
+ --beta;
+
+ /* Function Body */
+ safmin = slamch_("S");
+ ulp = slamch_("P");
+
+/* Scale A */
+
+/* Computing MAX */
+ r__5 = (r__1 = a[a_dim1 + 1], dabs(r__1)) + (r__2 = a[a_dim1 + 2], dabs(
+ r__2)), r__6 = (r__3 = a[(a_dim1 << 1) + 1], dabs(r__3)) + (r__4 =
+ a[(a_dim1 << 1) + 2], dabs(r__4)), r__5 = max(r__5,r__6);
+ anorm = dmax(r__5,safmin);
+ ascale = 1.f / anorm;
+ a[a_dim1 + 1] = ascale * a[a_dim1 + 1];
+ a[(a_dim1 << 1) + 1] = ascale * a[(a_dim1 << 1) + 1];
+ a[a_dim1 + 2] = ascale * a[a_dim1 + 2];
+ a[(a_dim1 << 1) + 2] = ascale * a[(a_dim1 << 1) + 2];
+
+/* Scale B */
+
+/* Computing MAX */
+ r__4 = (r__3 = b[b_dim1 + 1], dabs(r__3)), r__5 = (r__1 = b[(b_dim1 << 1)
+ + 1], dabs(r__1)) + (r__2 = b[(b_dim1 << 1) + 2], dabs(r__2)),
+ r__4 = max(r__4,r__5);
+ bnorm = dmax(r__4,safmin);
+ bscale = 1.f / bnorm;
+ b[b_dim1 + 1] = bscale * b[b_dim1 + 1];
+ b[(b_dim1 << 1) + 1] = bscale * b[(b_dim1 << 1) + 1];
+ b[(b_dim1 << 1) + 2] = bscale * b[(b_dim1 << 1) + 2];
+
+/* Check if A can be deflated */
+
+ if ((r__1 = a[a_dim1 + 2], dabs(r__1)) <= ulp) {
+ *csl = 1.f;
+ *snl = 0.f;
+ *csr = 1.f;
+ *snr = 0.f;
+ a[a_dim1 + 2] = 0.f;
+ b[b_dim1 + 2] = 0.f;
+
+/* Check if B is singular */
+
+ } else if ((r__1 = b[b_dim1 + 1], dabs(r__1)) <= ulp) {
+ slartg_(&a[a_dim1 + 1], &a[a_dim1 + 2], csl, snl, &r__);
+ *csr = 1.f;
+ *snr = 0.f;
+ srot_(&c__2, &a[a_dim1 + 1], lda, &a[a_dim1 + 2], lda, csl, snl);
+ srot_(&c__2, &b[b_dim1 + 1], ldb, &b[b_dim1 + 2], ldb, csl, snl);
+ a[a_dim1 + 2] = 0.f;
+ b[b_dim1 + 1] = 0.f;
+ b[b_dim1 + 2] = 0.f;
+
+ } else if ((r__1 = b[(b_dim1 << 1) + 2], dabs(r__1)) <= ulp) {
+ slartg_(&a[(a_dim1 << 1) + 2], &a[a_dim1 + 2], csr, snr, &t);
+ *snr = -(*snr);
+ srot_(&c__2, &a[a_dim1 + 1], &c__1, &a[(a_dim1 << 1) + 1], &c__1, csr,
+ snr);
+ srot_(&c__2, &b[b_dim1 + 1], &c__1, &b[(b_dim1 << 1) + 1], &c__1, csr,
+ snr);
+ *csl = 1.f;
+ *snl = 0.f;
+ a[a_dim1 + 2] = 0.f;
+ b[b_dim1 + 2] = 0.f;
+ b[(b_dim1 << 1) + 2] = 0.f;
+
+ } else {
+
+/* B is nonsingular, first compute the eigenvalues of (A,B) */
+
+ slag2_(&a[a_offset], lda, &b[b_offset], ldb, &safmin, &scale1, &
+ scale2, &wr1, &wr2, &wi);
+
+ if (wi == 0.f) {
+
+/* two real eigenvalues, compute s*A-w*B */
+
+ h1 = scale1 * a[a_dim1 + 1] - wr1 * b[b_dim1 + 1];
+ h2 = scale1 * a[(a_dim1 << 1) + 1] - wr1 * b[(b_dim1 << 1) + 1];
+ h3 = scale1 * a[(a_dim1 << 1) + 2] - wr1 * b[(b_dim1 << 1) + 2];
+
+ rr = slapy2_(&h1, &h2);
+ r__1 = scale1 * a[a_dim1 + 2];
+ qq = slapy2_(&r__1, &h3);
+
+ if (rr > qq) {
+
+/* find right rotation matrix to zero 1,1 element of */
+/* (sA - wB) */
+
+ slartg_(&h2, &h1, csr, snr, &t);
+
+ } else {
+
+/* find right rotation matrix to zero 2,1 element of */
+/* (sA - wB) */
+
+ r__1 = scale1 * a[a_dim1 + 2];
+ slartg_(&h3, &r__1, csr, snr, &t);
+
+ }
+
+ *snr = -(*snr);
+ srot_(&c__2, &a[a_dim1 + 1], &c__1, &a[(a_dim1 << 1) + 1], &c__1,
+ csr, snr);
+ srot_(&c__2, &b[b_dim1 + 1], &c__1, &b[(b_dim1 << 1) + 1], &c__1,
+ csr, snr);
+
+/* compute inf norms of A and B */
+
+/* Computing MAX */
+ r__5 = (r__1 = a[a_dim1 + 1], dabs(r__1)) + (r__2 = a[(a_dim1 <<
+ 1) + 1], dabs(r__2)), r__6 = (r__3 = a[a_dim1 + 2], dabs(
+ r__3)) + (r__4 = a[(a_dim1 << 1) + 2], dabs(r__4));
+ h1 = dmax(r__5,r__6);
+/* Computing MAX */
+ r__5 = (r__1 = b[b_dim1 + 1], dabs(r__1)) + (r__2 = b[(b_dim1 <<
+ 1) + 1], dabs(r__2)), r__6 = (r__3 = b[b_dim1 + 2], dabs(
+ r__3)) + (r__4 = b[(b_dim1 << 1) + 2], dabs(r__4));
+ h2 = dmax(r__5,r__6);
+
+ if (scale1 * h1 >= dabs(wr1) * h2) {
+
+/* find left rotation matrix Q to zero out B(2,1) */
+
+ slartg_(&b[b_dim1 + 1], &b[b_dim1 + 2], csl, snl, &r__);
+
+ } else {
+
+/* find left rotation matrix Q to zero out A(2,1) */
+
+ slartg_(&a[a_dim1 + 1], &a[a_dim1 + 2], csl, snl, &r__);
+
+ }
+
+ srot_(&c__2, &a[a_dim1 + 1], lda, &a[a_dim1 + 2], lda, csl, snl);
+ srot_(&c__2, &b[b_dim1 + 1], ldb, &b[b_dim1 + 2], ldb, csl, snl);
+
+ a[a_dim1 + 2] = 0.f;
+ b[b_dim1 + 2] = 0.f;
+
+ } else {
+
+/* a pair of complex conjugate eigenvalues */
+/* first compute the SVD of the matrix B */
+
+ slasv2_(&b[b_dim1 + 1], &b[(b_dim1 << 1) + 1], &b[(b_dim1 << 1) +
+ 2], &r__, &t, snr, csr, snl, csl);
+
+/* Form (A,B) := Q(A,B)Z' where Q is left rotation matrix and */
+/* Z is right rotation matrix computed from SLASV2 */
+
+ srot_(&c__2, &a[a_dim1 + 1], lda, &a[a_dim1 + 2], lda, csl, snl);
+ srot_(&c__2, &b[b_dim1 + 1], ldb, &b[b_dim1 + 2], ldb, csl, snl);
+ srot_(&c__2, &a[a_dim1 + 1], &c__1, &a[(a_dim1 << 1) + 1], &c__1,
+ csr, snr);
+ srot_(&c__2, &b[b_dim1 + 1], &c__1, &b[(b_dim1 << 1) + 1], &c__1,
+ csr, snr);
+
+ b[b_dim1 + 2] = 0.f;
+ b[(b_dim1 << 1) + 1] = 0.f;
+
+ }
+
+ }
+
+/* Unscaling */
+
+ a[a_dim1 + 1] = anorm * a[a_dim1 + 1];
+ a[a_dim1 + 2] = anorm * a[a_dim1 + 2];
+ a[(a_dim1 << 1) + 1] = anorm * a[(a_dim1 << 1) + 1];
+ a[(a_dim1 << 1) + 2] = anorm * a[(a_dim1 << 1) + 2];
+ b[b_dim1 + 1] = bnorm * b[b_dim1 + 1];
+ b[b_dim1 + 2] = bnorm * b[b_dim1 + 2];
+ b[(b_dim1 << 1) + 1] = bnorm * b[(b_dim1 << 1) + 1];
+ b[(b_dim1 << 1) + 2] = bnorm * b[(b_dim1 << 1) + 2];
+
+ if (wi == 0.f) {
+ alphar[1] = a[a_dim1 + 1];
+ alphar[2] = a[(a_dim1 << 1) + 2];
+ alphai[1] = 0.f;
+ alphai[2] = 0.f;
+ beta[1] = b[b_dim1 + 1];
+ beta[2] = b[(b_dim1 << 1) + 2];
+ } else {
+ alphar[1] = anorm * wr1 / scale1 / bnorm;
+ alphai[1] = anorm * wi / scale1 / bnorm;
+ alphar[2] = alphar[1];
+ alphai[2] = -alphai[1];
+ beta[1] = 1.f;
+ beta[2] = 1.f;
+ }
+
+ return 0;
+
+/* End of SLAGV2 */
+
+} /* slagv2_ */
diff --git a/contrib/libs/clapack/slahqr.c b/contrib/libs/clapack/slahqr.c
new file mode 100644
index 0000000000..fa5a29893c
--- /dev/null
+++ b/contrib/libs/clapack/slahqr.c
@@ -0,0 +1,631 @@
+/* slahqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int slahqr_(logical *wantt, logical *wantz, integer *n,
+ integer *ilo, integer *ihi, real *h__, integer *ldh, real *wr, real *
+ wi, integer *iloz, integer *ihiz, real *z__, integer *ldz, integer *
+ info)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3;
+ real r__1, r__2, r__3, r__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, l, m;
+ real s, v[3];
+ integer i1, i2;
+ real t1, t2, t3, v2, v3, aa, ab, ba, bb, h11, h12, h21, h22, cs;
+ integer nh;
+ real sn;
+ integer nr;
+ real tr;
+ integer nz;
+ real det, h21s;
+ integer its;
+ real ulp, sum, tst, rt1i, rt2i, rt1r, rt2r;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *), scopy_(integer *, real *, integer *,
+ real *, integer *), slanv2_(real *, real *, real *, real *, real *
+, real *, real *, real *, real *, real *), slabad_(real *, real *)
+ ;
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int slarfg_(integer *, real *, real *, integer *,
+ real *);
+ real safmax, rtdisc, smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAHQR is an auxiliary routine called by SHSEQR to update the */
+/* eigenvalues and Schur decomposition already computed by SHSEQR, by */
+/* dealing with the Hessenberg submatrix in rows and columns ILO to */
+/* IHI. */
+
+/* Arguments */
+/* ========= */
+
+/* WANTT (input) LOGICAL */
+/* = .TRUE. : the full Schur form T is required; */
+/* = .FALSE.: only eigenvalues are required. */
+
+/* WANTZ (input) LOGICAL */
+/* = .TRUE. : the matrix of Schur vectors Z is required; */
+/* = .FALSE.: Schur vectors are not required. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* It is assumed that H is already upper quasi-triangular in */
+/* rows and columns IHI+1:N, and that H(ILO,ILO-1) = 0 (unless */
+/* ILO = 1). SLAHQR works primarily with the Hessenberg */
+/* submatrix in rows and columns ILO to IHI, but applies */
+/* transformations to all of H if WANTT is .TRUE.. */
+/* 1 <= ILO <= max(1,IHI); IHI <= N. */
+
+/* H (input/output) REAL array, dimension (LDH,N) */
+/* On entry, the upper Hessenberg matrix H. */
+/* On exit, if INFO is zero and if WANTT is .TRUE., H is upper */
+/* quasi-triangular in rows and columns ILO:IHI, with any */
+/* 2-by-2 diagonal blocks in standard form. If INFO is zero */
+/* and WANTT is .FALSE., the contents of H are unspecified on */
+/* exit. The output state of H if INFO is nonzero is given */
+/* below under the description of INFO. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH >= max(1,N). */
+
+/* WR (output) REAL array, dimension (N) */
+/* WI (output) REAL array, dimension (N) */
+/* The real and imaginary parts, respectively, of the computed */
+/* eigenvalues ILO to IHI are stored in the corresponding */
+/* elements of WR and WI. If two eigenvalues are computed as a */
+/* complex conjugate pair, they are stored in consecutive */
+/* elements of WR and WI, say the i-th and (i+1)th, with */
+/* WI(i) > 0 and WI(i+1) < 0. If WANTT is .TRUE., the */
+/* eigenvalues are stored in the same order as on the diagonal */
+/* of the Schur form returned in H, with WR(i) = H(i,i), and, if */
+/* H(i:i+1,i:i+1) is a 2-by-2 diagonal block, */
+/* WI(i) = sqrt(H(i+1,i)*H(i,i+1)) and WI(i+1) = -WI(i). */
+
+/* ILOZ (input) INTEGER */
+/* IHIZ (input) INTEGER */
+/* Specify the rows of Z to which transformations must be */
+/* applied if WANTZ is .TRUE.. */
+/* 1 <= ILOZ <= ILO; IHI <= IHIZ <= N. */
+
+/* Z (input/output) REAL array, dimension (LDZ,N) */
+/* If WANTZ is .TRUE., on entry Z must contain the current */
+/* matrix Z of transformations accumulated by SHSEQR, and on */
+/* exit Z has been updated; transformations are applied only to */
+/* the submatrix Z(ILOZ:IHIZ,ILO:IHI). */
+/* If WANTZ is .FALSE., Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* .GT. 0: If INFO = i, SLAHQR failed to compute all the */
+/* eigenvalues ILO to IHI in a total of 30 iterations */
+/* per eigenvalue; elements i+1:ihi of WR and WI */
+/* contain those eigenvalues which have been */
+/* successfully computed. */
+
+/* If INFO .GT. 0 and WANTT is .FALSE., then on exit, */
+/* the remaining unconverged eigenvalues are the */
+/* eigenvalues of the upper Hessenberg matrix rows */
+/* and columns ILO thorugh INFO of the final, output */
+/* value of H. */
+
+/* If INFO .GT. 0 and WANTT is .TRUE., then on exit */
+/* (*) (initial value of H)*U = U*(final value of H) */
+/* where U is an orthognal matrix. The final */
+/* value of H is upper Hessenberg and triangular in */
+/* rows and columns INFO+1 through IHI. */
+
+/* If INFO .GT. 0 and WANTZ is .TRUE., then on exit */
+/* (final value of Z) = (initial value of Z)*U */
+/* where U is the orthogonal matrix in (*) */
+/* (regardless of the value of WANTT.) */
+
+/* Further Details */
+/* =============== */
+
+/* 02-96 Based on modifications by */
+/* David Day, Sandia National Laboratory, USA */
+
+/* 12-04 Further modifications by */
+/* Ralph Byers, University of Kansas, USA */
+/* This is a modified version of SLAHQR from LAPACK version 3.0. */
+/* It is (1) more robust against overflow and underflow and */
+/* (2) adopts the more conservative Ahues & Tisseur stopping */
+/* criterion (LAWN 122, 1997). */
+
+/* ========================================================= */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --wr;
+ --wi;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+
+ /* Function Body */
+ *info = 0;
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*ilo == *ihi) {
+ wr[*ilo] = h__[*ilo + *ilo * h_dim1];
+ wi[*ilo] = 0.f;
+ return 0;
+ }
+
+/* ==== clear out the trash ==== */
+ i__1 = *ihi - 3;
+ for (j = *ilo; j <= i__1; ++j) {
+ h__[j + 2 + j * h_dim1] = 0.f;
+ h__[j + 3 + j * h_dim1] = 0.f;
+/* L10: */
+ }
+ if (*ilo <= *ihi - 2) {
+ h__[*ihi + (*ihi - 2) * h_dim1] = 0.f;
+ }
+
+ nh = *ihi - *ilo + 1;
+ nz = *ihiz - *iloz + 1;
+
+/* Set machine-dependent constants for the stopping criterion. */
+
+ safmin = slamch_("SAFE MINIMUM");
+ safmax = 1.f / safmin;
+ slabad_(&safmin, &safmax);
+ ulp = slamch_("PRECISION");
+ smlnum = safmin * ((real) nh / ulp);
+
+/* I1 and I2 are the indices of the first row and last column of H */
+/* to which transformations must be applied. If eigenvalues only are */
+/* being computed, I1 and I2 are set inside the main loop. */
+
+ if (*wantt) {
+ i1 = 1;
+ i2 = *n;
+ }
+
+/* The main loop begins here. I is the loop index and decreases from */
+/* IHI to ILO in steps of 1 or 2. Each iteration of the loop works */
+/* with the active submatrix in rows and columns L to I. */
+/* Eigenvalues I+1 to IHI have already converged. Either L = ILO or */
+/* H(L,L-1) is negligible so that the matrix splits. */
+
+ i__ = *ihi;
+L20:
+ l = *ilo;
+ if (i__ < *ilo) {
+ goto L160;
+ }
+
+/* Perform QR iterations on rows and columns ILO to I until a */
+/* submatrix of order 1 or 2 splits off at the bottom because a */
+/* subdiagonal element has become negligible. */
+
+ for (its = 0; its <= 30; ++its) {
+
+/* Look for a single small subdiagonal element. */
+
+ i__1 = l + 1;
+ for (k = i__; k >= i__1; --k) {
+ if ((r__1 = h__[k + (k - 1) * h_dim1], dabs(r__1)) <= smlnum) {
+ goto L40;
+ }
+ tst = (r__1 = h__[k - 1 + (k - 1) * h_dim1], dabs(r__1)) + (r__2 =
+ h__[k + k * h_dim1], dabs(r__2));
+ if (tst == 0.f) {
+ if (k - 2 >= *ilo) {
+ tst += (r__1 = h__[k - 1 + (k - 2) * h_dim1], dabs(r__1));
+ }
+ if (k + 1 <= *ihi) {
+ tst += (r__1 = h__[k + 1 + k * h_dim1], dabs(r__1));
+ }
+ }
+/* ==== The following is a conservative small subdiagonal */
+/* . deflation criterion due to Ahues & Tisseur (LAWN 122, */
+/* . 1997). It has better mathematical foundation and */
+/* . improves accuracy in some cases. ==== */
+ if ((r__1 = h__[k + (k - 1) * h_dim1], dabs(r__1)) <= ulp * tst) {
+/* Computing MAX */
+ r__3 = (r__1 = h__[k + (k - 1) * h_dim1], dabs(r__1)), r__4 =
+ (r__2 = h__[k - 1 + k * h_dim1], dabs(r__2));
+ ab = dmax(r__3,r__4);
+/* Computing MIN */
+ r__3 = (r__1 = h__[k + (k - 1) * h_dim1], dabs(r__1)), r__4 =
+ (r__2 = h__[k - 1 + k * h_dim1], dabs(r__2));
+ ba = dmin(r__3,r__4);
+/* Computing MAX */
+ r__3 = (r__1 = h__[k + k * h_dim1], dabs(r__1)), r__4 = (r__2
+ = h__[k - 1 + (k - 1) * h_dim1] - h__[k + k * h_dim1],
+ dabs(r__2));
+ aa = dmax(r__3,r__4);
+/* Computing MIN */
+ r__3 = (r__1 = h__[k + k * h_dim1], dabs(r__1)), r__4 = (r__2
+ = h__[k - 1 + (k - 1) * h_dim1] - h__[k + k * h_dim1],
+ dabs(r__2));
+ bb = dmin(r__3,r__4);
+ s = aa + ab;
+/* Computing MAX */
+ r__1 = smlnum, r__2 = ulp * (bb * (aa / s));
+ if (ba * (ab / s) <= dmax(r__1,r__2)) {
+ goto L40;
+ }
+ }
+/* L30: */
+ }
+L40:
+ l = k;
+ if (l > *ilo) {
+
+/* H(L,L-1) is negligible */
+
+ h__[l + (l - 1) * h_dim1] = 0.f;
+ }
+
+/* Exit from loop if a submatrix of order 1 or 2 has split off. */
+
+ if (l >= i__ - 1) {
+ goto L150;
+ }
+
+/* Now the active submatrix is in rows and columns L to I. If */
+/* eigenvalues only are being computed, only the active submatrix */
+/* need be transformed. */
+
+ if (! (*wantt)) {
+ i1 = l;
+ i2 = i__;
+ }
+
+ if (its == 10) {
+
+/* Exceptional shift. */
+
+ s = (r__1 = h__[l + 1 + l * h_dim1], dabs(r__1)) + (r__2 = h__[l
+ + 2 + (l + 1) * h_dim1], dabs(r__2));
+ h11 = s * .75f + h__[l + l * h_dim1];
+ h12 = s * -.4375f;
+ h21 = s;
+ h22 = h11;
+ } else if (its == 20) {
+
+/* Exceptional shift. */
+
+ s = (r__1 = h__[i__ + (i__ - 1) * h_dim1], dabs(r__1)) + (r__2 =
+ h__[i__ - 1 + (i__ - 2) * h_dim1], dabs(r__2));
+ h11 = s * .75f + h__[i__ + i__ * h_dim1];
+ h12 = s * -.4375f;
+ h21 = s;
+ h22 = h11;
+ } else {
+
+/* Prepare to use Francis' double shift */
+/* (i.e. 2nd degree generalized Rayleigh quotient) */
+
+ h11 = h__[i__ - 1 + (i__ - 1) * h_dim1];
+ h21 = h__[i__ + (i__ - 1) * h_dim1];
+ h12 = h__[i__ - 1 + i__ * h_dim1];
+ h22 = h__[i__ + i__ * h_dim1];
+ }
+ s = dabs(h11) + dabs(h12) + dabs(h21) + dabs(h22);
+ if (s == 0.f) {
+ rt1r = 0.f;
+ rt1i = 0.f;
+ rt2r = 0.f;
+ rt2i = 0.f;
+ } else {
+ h11 /= s;
+ h21 /= s;
+ h12 /= s;
+ h22 /= s;
+ tr = (h11 + h22) / 2.f;
+ det = (h11 - tr) * (h22 - tr) - h12 * h21;
+ rtdisc = sqrt((dabs(det)));
+ if (det >= 0.f) {
+
+/* ==== complex conjugate shifts ==== */
+
+ rt1r = tr * s;
+ rt2r = rt1r;
+ rt1i = rtdisc * s;
+ rt2i = -rt1i;
+ } else {
+
+/* ==== real shifts (use only one of them) ==== */
+
+ rt1r = tr + rtdisc;
+ rt2r = tr - rtdisc;
+ if ((r__1 = rt1r - h22, dabs(r__1)) <= (r__2 = rt2r - h22,
+ dabs(r__2))) {
+ rt1r *= s;
+ rt2r = rt1r;
+ } else {
+ rt2r *= s;
+ rt1r = rt2r;
+ }
+ rt1i = 0.f;
+ rt2i = 0.f;
+ }
+ }
+
+/* Look for two consecutive small subdiagonal elements. */
+
+ i__1 = l;
+ for (m = i__ - 2; m >= i__1; --m) {
+/* Determine the effect of starting the double-shift QR */
+/* iteration at row M, and see if this would make H(M,M-1) */
+/* negligible. (The following uses scaling to avoid */
+/* overflows and most underflows.) */
+
+ h21s = h__[m + 1 + m * h_dim1];
+ s = (r__1 = h__[m + m * h_dim1] - rt2r, dabs(r__1)) + dabs(rt2i)
+ + dabs(h21s);
+ h21s = h__[m + 1 + m * h_dim1] / s;
+ v[0] = h21s * h__[m + (m + 1) * h_dim1] + (h__[m + m * h_dim1] -
+ rt1r) * ((h__[m + m * h_dim1] - rt2r) / s) - rt1i * (rt2i
+ / s);
+ v[1] = h21s * (h__[m + m * h_dim1] + h__[m + 1 + (m + 1) * h_dim1]
+ - rt1r - rt2r);
+ v[2] = h21s * h__[m + 2 + (m + 1) * h_dim1];
+ s = dabs(v[0]) + dabs(v[1]) + dabs(v[2]);
+ v[0] /= s;
+ v[1] /= s;
+ v[2] /= s;
+ if (m == l) {
+ goto L60;
+ }
+ if ((r__1 = h__[m + (m - 1) * h_dim1], dabs(r__1)) * (dabs(v[1])
+ + dabs(v[2])) <= ulp * dabs(v[0]) * ((r__2 = h__[m - 1 + (
+ m - 1) * h_dim1], dabs(r__2)) + (r__3 = h__[m + m *
+ h_dim1], dabs(r__3)) + (r__4 = h__[m + 1 + (m + 1) *
+ h_dim1], dabs(r__4)))) {
+ goto L60;
+ }
+/* L50: */
+ }
+L60:
+
+/* Double-shift QR step */
+
+ i__1 = i__ - 1;
+ for (k = m; k <= i__1; ++k) {
+
+/* The first iteration of this loop determines a reflection G */
+/* from the vector V and applies it from left and right to H, */
+/* thus creating a nonzero bulge below the subdiagonal. */
+
+/* Each subsequent iteration determines a reflection G to */
+/* restore the Hessenberg form in the (K-1)th column, and thus */
+/* chases the bulge one step toward the bottom of the active */
+/* submatrix. NR is the order of G. */
+
+/* Computing MIN */
+ i__2 = 3, i__3 = i__ - k + 1;
+ nr = min(i__2,i__3);
+ if (k > m) {
+ scopy_(&nr, &h__[k + (k - 1) * h_dim1], &c__1, v, &c__1);
+ }
+ slarfg_(&nr, v, &v[1], &c__1, &t1);
+ if (k > m) {
+ h__[k + (k - 1) * h_dim1] = v[0];
+ h__[k + 1 + (k - 1) * h_dim1] = 0.f;
+ if (k < i__ - 1) {
+ h__[k + 2 + (k - 1) * h_dim1] = 0.f;
+ }
+ } else if (m > l) {
+/* ==== Use the following instead of */
+/* . H( K, K-1 ) = -H( K, K-1 ) to */
+/* . avoid a bug when v(2) and v(3) */
+/* . underflow. ==== */
+ h__[k + (k - 1) * h_dim1] *= 1.f - t1;
+ }
+ v2 = v[1];
+ t2 = t1 * v2;
+ if (nr == 3) {
+ v3 = v[2];
+ t3 = t1 * v3;
+
+/* Apply G from the left to transform the rows of the matrix */
+/* in columns K to I2. */
+
+ i__2 = i2;
+ for (j = k; j <= i__2; ++j) {
+ sum = h__[k + j * h_dim1] + v2 * h__[k + 1 + j * h_dim1]
+ + v3 * h__[k + 2 + j * h_dim1];
+ h__[k + j * h_dim1] -= sum * t1;
+ h__[k + 1 + j * h_dim1] -= sum * t2;
+ h__[k + 2 + j * h_dim1] -= sum * t3;
+/* L70: */
+ }
+
+/* Apply G from the right to transform the columns of the */
+/* matrix in rows I1 to min(K+3,I). */
+
+/* Computing MIN */
+ i__3 = k + 3;
+ i__2 = min(i__3,i__);
+ for (j = i1; j <= i__2; ++j) {
+ sum = h__[j + k * h_dim1] + v2 * h__[j + (k + 1) * h_dim1]
+ + v3 * h__[j + (k + 2) * h_dim1];
+ h__[j + k * h_dim1] -= sum * t1;
+ h__[j + (k + 1) * h_dim1] -= sum * t2;
+ h__[j + (k + 2) * h_dim1] -= sum * t3;
+/* L80: */
+ }
+
+ if (*wantz) {
+
+/* Accumulate transformations in the matrix Z */
+
+ i__2 = *ihiz;
+ for (j = *iloz; j <= i__2; ++j) {
+ sum = z__[j + k * z_dim1] + v2 * z__[j + (k + 1) *
+ z_dim1] + v3 * z__[j + (k + 2) * z_dim1];
+ z__[j + k * z_dim1] -= sum * t1;
+ z__[j + (k + 1) * z_dim1] -= sum * t2;
+ z__[j + (k + 2) * z_dim1] -= sum * t3;
+/* L90: */
+ }
+ }
+ } else if (nr == 2) {
+
+/* Apply G from the left to transform the rows of the matrix */
+/* in columns K to I2. */
+
+ i__2 = i2;
+ for (j = k; j <= i__2; ++j) {
+ sum = h__[k + j * h_dim1] + v2 * h__[k + 1 + j * h_dim1];
+ h__[k + j * h_dim1] -= sum * t1;
+ h__[k + 1 + j * h_dim1] -= sum * t2;
+/* L100: */
+ }
+
+/* Apply G from the right to transform the columns of the */
+/* matrix in rows I1 to min(K+3,I). */
+
+ i__2 = i__;
+ for (j = i1; j <= i__2; ++j) {
+ sum = h__[j + k * h_dim1] + v2 * h__[j + (k + 1) * h_dim1]
+ ;
+ h__[j + k * h_dim1] -= sum * t1;
+ h__[j + (k + 1) * h_dim1] -= sum * t2;
+/* L110: */
+ }
+
+ if (*wantz) {
+
+/* Accumulate transformations in the matrix Z */
+
+ i__2 = *ihiz;
+ for (j = *iloz; j <= i__2; ++j) {
+ sum = z__[j + k * z_dim1] + v2 * z__[j + (k + 1) *
+ z_dim1];
+ z__[j + k * z_dim1] -= sum * t1;
+ z__[j + (k + 1) * z_dim1] -= sum * t2;
+/* L120: */
+ }
+ }
+ }
+/* L130: */
+ }
+
+/* L140: */
+ }
+
+/* Failure to converge in remaining number of iterations */
+
+ *info = i__;
+ return 0;
+
+L150:
+
+ if (l == i__) {
+
+/* H(I,I-1) is negligible: one eigenvalue has converged. */
+
+ wr[i__] = h__[i__ + i__ * h_dim1];
+ wi[i__] = 0.f;
+ } else if (l == i__ - 1) {
+
+/* H(I-1,I-2) is negligible: a pair of eigenvalues have converged. */
+
+/* Transform the 2-by-2 submatrix to standard Schur form, */
+/* and compute and store the eigenvalues. */
+
+ slanv2_(&h__[i__ - 1 + (i__ - 1) * h_dim1], &h__[i__ - 1 + i__ *
+ h_dim1], &h__[i__ + (i__ - 1) * h_dim1], &h__[i__ + i__ *
+ h_dim1], &wr[i__ - 1], &wi[i__ - 1], &wr[i__], &wi[i__], &cs,
+ &sn);
+
+ if (*wantt) {
+
+/* Apply the transformation to the rest of H. */
+
+ if (i2 > i__) {
+ i__1 = i2 - i__;
+ srot_(&i__1, &h__[i__ - 1 + (i__ + 1) * h_dim1], ldh, &h__[
+ i__ + (i__ + 1) * h_dim1], ldh, &cs, &sn);
+ }
+ i__1 = i__ - i1 - 1;
+ srot_(&i__1, &h__[i1 + (i__ - 1) * h_dim1], &c__1, &h__[i1 + i__ *
+ h_dim1], &c__1, &cs, &sn);
+ }
+ if (*wantz) {
+
+/* Apply the transformation to Z. */
+
+ srot_(&nz, &z__[*iloz + (i__ - 1) * z_dim1], &c__1, &z__[*iloz +
+ i__ * z_dim1], &c__1, &cs, &sn);
+ }
+ }
+
+/* return to start of the main loop with new value of I. */
+
+ i__ = l - 1;
+ goto L20;
+
+L160:
+ return 0;
+
+/* End of SLAHQR */
+
+} /* slahqr_ */
diff --git a/contrib/libs/clapack/slahr2.c b/contrib/libs/clapack/slahr2.c
new file mode 100644
index 0000000000..d5df1ceec6
--- /dev/null
+++ b/contrib/libs/clapack/slahr2.c
@@ -0,0 +1,309 @@
+/* slahr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b4 = -1.f;
+static real c_b5 = 1.f;
+static integer c__1 = 1;
+static real c_b38 = 0.f;
+
+/* Subroutine */ int slahr2_(integer *n, integer *k, integer *nb, real *a,
+ integer *lda, real *tau, real *t, integer *ldt, real *y, integer *ldy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, t_dim1, t_offset, y_dim1, y_offset, i__1, i__2,
+ i__3;
+ real r__1;
+
+ /* Local variables */
+ integer i__;
+ real ei;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ sgemm_(char *, char *, integer *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *), sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *), scopy_(integer *, real *, integer *, real *, integer *),
+ strmm_(char *, char *, char *, char *, integer *, integer *, real
+ *, real *, integer *, real *, integer *), saxpy_(integer *, real *, real *, integer *, real *,
+ integer *), strmv_(char *, char *, char *, integer *, real *,
+ integer *, real *, integer *), slarfg_(
+ integer *, real *, real *, integer *, real *), slacpy_(char *,
+ integer *, integer *, real *, integer *, real *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAHR2 reduces the first NB columns of A real general n-BY-(n-k+1) */
+/* matrix A so that elements below the k-th subdiagonal are zero. The */
+/* reduction is performed by an orthogonal similarity transformation */
+/* Q' * A * Q. The routine returns the matrices V and T which determine */
+/* Q as a block reflector I - V*T*V', and also the matrix Y = A * V * T. */
+
+/* This is an auxiliary routine called by SGEHRD. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* K (input) INTEGER */
+/* The offset for the reduction. Elements below the k-th */
+/* subdiagonal in the first NB columns are reduced to zero. */
+/* K < N. */
+
+/* NB (input) INTEGER */
+/* The number of columns to be reduced. */
+
+/* A (input/output) REAL array, dimension (LDA,N-K+1) */
+/* On entry, the n-by-(n-k+1) general matrix A. */
+/* On exit, the elements on and above the k-th subdiagonal in */
+/* the first NB columns are overwritten with the corresponding */
+/* elements of the reduced matrix; the elements below the k-th */
+/* subdiagonal, with the array TAU, represent the matrix Q as a */
+/* product of elementary reflectors. The other columns of A are */
+/* unchanged. See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAU (output) REAL array, dimension (NB) */
+/* The scalar factors of the elementary reflectors. See Further */
+/* Details. */
+
+/* T (output) REAL array, dimension (LDT,NB) */
+/* The upper triangular matrix T. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= NB. */
+
+/* Y (output) REAL array, dimension (LDY,NB) */
+/* The n-by-nb matrix Y. */
+
+/* LDY (input) INTEGER */
+/* The leading dimension of the array Y. LDY >= N. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of nb elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(nb). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i+k-1) = 0, v(i+k) = 1; v(i+k+1:n) is stored on exit in */
+/* A(i+k+1:n,i), and tau in TAU(i). */
+
+/* The elements of the vectors v together form the (n-k+1)-by-nb matrix */
+/* V which is needed, with T and Y, to apply the transformation to the */
+/* unreduced part of the matrix, using an update of the form: */
+/* A := (I - V*T*V') * (A - Y*V'). */
+
+/* The contents of A on exit are illustrated by the following example */
+/* with n = 7, k = 3 and nb = 2: */
+
+/* ( a a a a a ) */
+/* ( a a a a a ) */
+/* ( a a a a a ) */
+/* ( h h a a a ) */
+/* ( v1 h a a a ) */
+/* ( v1 v2 a a a ) */
+/* ( v1 v2 a a a ) */
+
+/* where a denotes an element of the original matrix A, h denotes a */
+/* modified element of the upper Hessenberg matrix H, and vi denotes an */
+/* element of the vector defining H(i). */
+
+/* This file is a slight modification of LAPACK-3.0's SLAHRD */
+/* incorporating improvements proposed by Quintana-Orti and Van de */
+/* Gejin. Note that the entries of A(1:K,2:NB) differ from those */
+/* returned by the original LAPACK routine. This function is */
+/* not backward compatible with LAPACK3.0. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --tau;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ y_dim1 = *ldy;
+ y_offset = 1 + y_dim1;
+ y -= y_offset;
+
+ /* Function Body */
+ if (*n <= 1) {
+ return 0;
+ }
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (i__ > 1) {
+
+/* Update A(K+1:N,I) */
+
+/* Update I-th column of A - Y * V' */
+
+ i__2 = *n - *k;
+ i__3 = i__ - 1;
+ sgemv_("NO TRANSPOSE", &i__2, &i__3, &c_b4, &y[*k + 1 + y_dim1],
+ ldy, &a[*k + i__ - 1 + a_dim1], lda, &c_b5, &a[*k + 1 +
+ i__ * a_dim1], &c__1);
+
+/* Apply I - V * T' * V' to this column (call it b) from the */
+/* left, using the last column of T as workspace */
+
+/* Let V = ( V1 ) and b = ( b1 ) (first I-1 rows) */
+/* ( V2 ) ( b2 ) */
+
+/* where V1 is unit lower triangular */
+
+/* w := V1' * b1 */
+
+ i__2 = i__ - 1;
+ scopy_(&i__2, &a[*k + 1 + i__ * a_dim1], &c__1, &t[*nb * t_dim1 +
+ 1], &c__1);
+ i__2 = i__ - 1;
+ strmv_("Lower", "Transpose", "UNIT", &i__2, &a[*k + 1 + a_dim1],
+ lda, &t[*nb * t_dim1 + 1], &c__1);
+
+/* w := w + V2'*b2 */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ sgemv_("Transpose", &i__2, &i__3, &c_b5, &a[*k + i__ + a_dim1],
+ lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b5, &t[*nb *
+ t_dim1 + 1], &c__1);
+
+/* w := T'*w */
+
+ i__2 = i__ - 1;
+ strmv_("Upper", "Transpose", "NON-UNIT", &i__2, &t[t_offset], ldt,
+ &t[*nb * t_dim1 + 1], &c__1);
+
+/* b2 := b2 - V2*w */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ sgemv_("NO TRANSPOSE", &i__2, &i__3, &c_b4, &a[*k + i__ + a_dim1],
+ lda, &t[*nb * t_dim1 + 1], &c__1, &c_b5, &a[*k + i__ +
+ i__ * a_dim1], &c__1);
+
+/* b1 := b1 - V1*w */
+
+ i__2 = i__ - 1;
+ strmv_("Lower", "NO TRANSPOSE", "UNIT", &i__2, &a[*k + 1 + a_dim1]
+, lda, &t[*nb * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ saxpy_(&i__2, &c_b4, &t[*nb * t_dim1 + 1], &c__1, &a[*k + 1 + i__
+ * a_dim1], &c__1);
+
+ a[*k + i__ - 1 + (i__ - 1) * a_dim1] = ei;
+ }
+
+/* Generate the elementary reflector H(I) to annihilate */
+/* A(K+I+1:N,I) */
+
+ i__2 = *n - *k - i__ + 1;
+/* Computing MIN */
+ i__3 = *k + i__ + 1;
+ slarfg_(&i__2, &a[*k + i__ + i__ * a_dim1], &a[min(i__3, *n)+ i__ *
+ a_dim1], &c__1, &tau[i__]);
+ ei = a[*k + i__ + i__ * a_dim1];
+ a[*k + i__ + i__ * a_dim1] = 1.f;
+
+/* Compute Y(K+1:N,I) */
+
+ i__2 = *n - *k;
+ i__3 = *n - *k - i__ + 1;
+ sgemv_("NO TRANSPOSE", &i__2, &i__3, &c_b5, &a[*k + 1 + (i__ + 1) *
+ a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b38, &y[*
+ k + 1 + i__ * y_dim1], &c__1);
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ sgemv_("Transpose", &i__2, &i__3, &c_b5, &a[*k + i__ + a_dim1], lda, &
+ a[*k + i__ + i__ * a_dim1], &c__1, &c_b38, &t[i__ * t_dim1 +
+ 1], &c__1);
+ i__2 = *n - *k;
+ i__3 = i__ - 1;
+ sgemv_("NO TRANSPOSE", &i__2, &i__3, &c_b4, &y[*k + 1 + y_dim1], ldy,
+ &t[i__ * t_dim1 + 1], &c__1, &c_b5, &y[*k + 1 + i__ * y_dim1],
+ &c__1);
+ i__2 = *n - *k;
+ sscal_(&i__2, &tau[i__], &y[*k + 1 + i__ * y_dim1], &c__1);
+
+/* Compute T(1:I,I) */
+
+ i__2 = i__ - 1;
+ r__1 = -tau[i__];
+ sscal_(&i__2, &r__1, &t[i__ * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ strmv_("Upper", "No Transpose", "NON-UNIT", &i__2, &t[t_offset], ldt,
+ &t[i__ * t_dim1 + 1], &c__1)
+ ;
+ t[i__ + i__ * t_dim1] = tau[i__];
+
+/* L10: */
+ }
+ a[*k + *nb + *nb * a_dim1] = ei;
+
+/* Compute Y(1:K,1:NB) */
+
+ slacpy_("ALL", k, nb, &a[(a_dim1 << 1) + 1], lda, &y[y_offset], ldy);
+ strmm_("RIGHT", "Lower", "NO TRANSPOSE", "UNIT", k, nb, &c_b5, &a[*k + 1
+ + a_dim1], lda, &y[y_offset], ldy);
+ if (*n > *k + *nb) {
+ i__1 = *n - *k - *nb;
+ sgemm_("NO TRANSPOSE", "NO TRANSPOSE", k, nb, &i__1, &c_b5, &a[(*nb +
+ 2) * a_dim1 + 1], lda, &a[*k + 1 + *nb + a_dim1], lda, &c_b5,
+ &y[y_offset], ldy);
+ }
+ strmm_("RIGHT", "Upper", "NO TRANSPOSE", "NON-UNIT", k, nb, &c_b5, &t[
+ t_offset], ldt, &y[y_offset], ldy);
+
+ return 0;
+
+/* End of SLAHR2 */
+
+} /* slahr2_ */
diff --git a/contrib/libs/clapack/slahrd.c b/contrib/libs/clapack/slahrd.c
new file mode 100644
index 0000000000..d73e4ee5b9
--- /dev/null
+++ b/contrib/libs/clapack/slahrd.c
@@ -0,0 +1,282 @@
+/* slahrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b4 = -1.f;
+static real c_b5 = 1.f;
+static integer c__1 = 1;
+static real c_b38 = 0.f;
+
+/* Subroutine */ int slahrd_(integer *n, integer *k, integer *nb, real *a,
+ integer *lda, real *tau, real *t, integer *ldt, real *y, integer *ldy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, t_dim1, t_offset, y_dim1, y_offset, i__1, i__2,
+ i__3;
+ real r__1;
+
+ /* Local variables */
+ integer i__;
+ real ei;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ sgemv_(char *, integer *, integer *, real *, real *, integer *,
+ real *, integer *, real *, real *, integer *), scopy_(
+ integer *, real *, integer *, real *, integer *), saxpy_(integer *
+, real *, real *, integer *, real *, integer *), strmv_(char *,
+ char *, char *, integer *, real *, integer *, real *, integer *), slarfg_(integer *, real *, real *,
+ integer *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAHRD reduces the first NB columns of a real general n-by-(n-k+1) */
+/* matrix A so that elements below the k-th subdiagonal are zero. The */
+/* reduction is performed by an orthogonal similarity transformation */
+/* Q' * A * Q. The routine returns the matrices V and T which determine */
+/* Q as a block reflector I - V*T*V', and also the matrix Y = A * V * T. */
+
+/* This is an OBSOLETE auxiliary routine. */
+/* This routine will be 'deprecated' in a future release. */
+/* Please use the new routine SLAHR2 instead. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* K (input) INTEGER */
+/* The offset for the reduction. Elements below the k-th */
+/* subdiagonal in the first NB columns are reduced to zero. */
+
+/* NB (input) INTEGER */
+/* The number of columns to be reduced. */
+
+/* A (input/output) REAL array, dimension (LDA,N-K+1) */
+/* On entry, the n-by-(n-k+1) general matrix A. */
+/* On exit, the elements on and above the k-th subdiagonal in */
+/* the first NB columns are overwritten with the corresponding */
+/* elements of the reduced matrix; the elements below the k-th */
+/* subdiagonal, with the array TAU, represent the matrix Q as a */
+/* product of elementary reflectors. The other columns of A are */
+/* unchanged. See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAU (output) REAL array, dimension (NB) */
+/* The scalar factors of the elementary reflectors. See Further */
+/* Details. */
+
+/* T (output) REAL array, dimension (LDT,NB) */
+/* The upper triangular matrix T. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= NB. */
+
+/* Y (output) REAL array, dimension (LDY,NB) */
+/* The n-by-nb matrix Y. */
+
+/* LDY (input) INTEGER */
+/* The leading dimension of the array Y. LDY >= N. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of nb elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(nb). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i+k-1) = 0, v(i+k) = 1; v(i+k+1:n) is stored on exit in */
+/* A(i+k+1:n,i), and tau in TAU(i). */
+
+/* The elements of the vectors v together form the (n-k+1)-by-nb matrix */
+/* V which is needed, with T and Y, to apply the transformation to the */
+/* unreduced part of the matrix, using an update of the form: */
+/* A := (I - V*T*V') * (A - Y*V'). */
+
+/* The contents of A on exit are illustrated by the following example */
+/* with n = 7, k = 3 and nb = 2: */
+
+/* ( a h a a a ) */
+/* ( a h a a a ) */
+/* ( a h a a a ) */
+/* ( h h a a a ) */
+/* ( v1 h a a a ) */
+/* ( v1 v2 a a a ) */
+/* ( v1 v2 a a a ) */
+
+/* where a denotes an element of the original matrix A, h denotes a */
+/* modified element of the upper Hessenberg matrix H, and vi denotes an */
+/* element of the vector defining H(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --tau;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ y_dim1 = *ldy;
+ y_offset = 1 + y_dim1;
+ y -= y_offset;
+
+ /* Function Body */
+ if (*n <= 1) {
+ return 0;
+ }
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (i__ > 1) {
+
+/* Update A(1:n,i) */
+
+/* Compute i-th column of A - Y * V' */
+
+ i__2 = i__ - 1;
+ sgemv_("No transpose", n, &i__2, &c_b4, &y[y_offset], ldy, &a[*k
+ + i__ - 1 + a_dim1], lda, &c_b5, &a[i__ * a_dim1 + 1], &
+ c__1);
+
+/* Apply I - V * T' * V' to this column (call it b) from the */
+/* left, using the last column of T as workspace */
+
+/* Let V = ( V1 ) and b = ( b1 ) (first I-1 rows) */
+/* ( V2 ) ( b2 ) */
+
+/* where V1 is unit lower triangular */
+
+/* w := V1' * b1 */
+
+ i__2 = i__ - 1;
+ scopy_(&i__2, &a[*k + 1 + i__ * a_dim1], &c__1, &t[*nb * t_dim1 +
+ 1], &c__1);
+ i__2 = i__ - 1;
+ strmv_("Lower", "Transpose", "Unit", &i__2, &a[*k + 1 + a_dim1],
+ lda, &t[*nb * t_dim1 + 1], &c__1);
+
+/* w := w + V2'*b2 */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ sgemv_("Transpose", &i__2, &i__3, &c_b5, &a[*k + i__ + a_dim1],
+ lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b5, &t[*nb *
+ t_dim1 + 1], &c__1);
+
+/* w := T'*w */
+
+ i__2 = i__ - 1;
+ strmv_("Upper", "Transpose", "Non-unit", &i__2, &t[t_offset], ldt,
+ &t[*nb * t_dim1 + 1], &c__1);
+
+/* b2 := b2 - V2*w */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b4, &a[*k + i__ + a_dim1],
+ lda, &t[*nb * t_dim1 + 1], &c__1, &c_b5, &a[*k + i__ +
+ i__ * a_dim1], &c__1);
+
+/* b1 := b1 - V1*w */
+
+ i__2 = i__ - 1;
+ strmv_("Lower", "No transpose", "Unit", &i__2, &a[*k + 1 + a_dim1]
+, lda, &t[*nb * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ saxpy_(&i__2, &c_b4, &t[*nb * t_dim1 + 1], &c__1, &a[*k + 1 + i__
+ * a_dim1], &c__1);
+
+ a[*k + i__ - 1 + (i__ - 1) * a_dim1] = ei;
+ }
+
+/* Generate the elementary reflector H(i) to annihilate */
+/* A(k+i+1:n,i) */
+
+ i__2 = *n - *k - i__ + 1;
+/* Computing MIN */
+ i__3 = *k + i__ + 1;
+ slarfg_(&i__2, &a[*k + i__ + i__ * a_dim1], &a[min(i__3, *n)+ i__ *
+ a_dim1], &c__1, &tau[i__]);
+ ei = a[*k + i__ + i__ * a_dim1];
+ a[*k + i__ + i__ * a_dim1] = 1.f;
+
+/* Compute Y(1:n,i) */
+
+ i__2 = *n - *k - i__ + 1;
+ sgemv_("No transpose", n, &i__2, &c_b5, &a[(i__ + 1) * a_dim1 + 1],
+ lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b38, &y[i__ *
+ y_dim1 + 1], &c__1);
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ sgemv_("Transpose", &i__2, &i__3, &c_b5, &a[*k + i__ + a_dim1], lda, &
+ a[*k + i__ + i__ * a_dim1], &c__1, &c_b38, &t[i__ * t_dim1 +
+ 1], &c__1);
+ i__2 = i__ - 1;
+ sgemv_("No transpose", n, &i__2, &c_b4, &y[y_offset], ldy, &t[i__ *
+ t_dim1 + 1], &c__1, &c_b5, &y[i__ * y_dim1 + 1], &c__1);
+ sscal_(n, &tau[i__], &y[i__ * y_dim1 + 1], &c__1);
+
+/* Compute T(1:i,i) */
+
+ i__2 = i__ - 1;
+ r__1 = -tau[i__];
+ sscal_(&i__2, &r__1, &t[i__ * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ strmv_("Upper", "No transpose", "Non-unit", &i__2, &t[t_offset], ldt,
+ &t[i__ * t_dim1 + 1], &c__1)
+ ;
+ t[i__ + i__ * t_dim1] = tau[i__];
+
+/* L10: */
+ }
+ a[*k + *nb + *nb * a_dim1] = ei;
+
+ return 0;
+
+/* End of SLAHRD */
+
+} /* slahrd_ */
diff --git a/contrib/libs/clapack/slaic1.c b/contrib/libs/clapack/slaic1.c
new file mode 100644
index 0000000000..1ef8a6de19
--- /dev/null
+++ b/contrib/libs/clapack/slaic1.c
@@ -0,0 +1,324 @@
+/* slaic1.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b5 = 1.f;
+
+/* Subroutine */ int slaic1_(integer *job, integer *j, real *x, real *sest,
+ real *w, real *gamma, real *sestpr, real *s, real *c__)
+{
+ /* System generated locals */
+ real r__1, r__2, r__3, r__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ real b, t, s1, s2, eps, tmp, sine;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ real test, zeta1, zeta2, alpha, norma, absgam, absalp;
+ extern doublereal slamch_(char *);
+ real cosine, absest;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAIC1 applies one step of incremental condition estimation in */
+/* its simplest version: */
+
+/* Let x, twonorm(x) = 1, be an approximate singular vector of an j-by-j */
+/* lower triangular matrix L, such that */
+/* twonorm(L*x) = sest */
+/* Then SLAIC1 computes sestpr, s, c such that */
+/* the vector */
+/* [ s*x ] */
+/* xhat = [ c ] */
+/* is an approximate singular vector of */
+/* [ L 0 ] */
+/* Lhat = [ w' gamma ] */
+/* in the sense that */
+/* twonorm(Lhat*xhat) = sestpr. */
+
+/* Depending on JOB, an estimate for the largest or smallest singular */
+/* value is computed. */
+
+/* Note that [s c]' and sestpr**2 is an eigenpair of the system */
+
+/* diag(sest*sest, 0) + [alpha gamma] * [ alpha ] */
+/* [ gamma ] */
+
+/* where alpha = x'*w. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) INTEGER */
+/* = 1: an estimate for the largest singular value is computed. */
+/* = 2: an estimate for the smallest singular value is computed. */
+
+/* J (input) INTEGER */
+/* Length of X and W */
+
+/* X (input) REAL array, dimension (J) */
+/* The j-vector x. */
+
+/* SEST (input) REAL */
+/* Estimated singular value of j by j matrix L */
+
+/* W (input) REAL array, dimension (J) */
+/* The j-vector w. */
+
+/* GAMMA (input) REAL */
+/* The diagonal element gamma. */
+
+/* SESTPR (output) REAL */
+/* Estimated singular value of (j+1) by (j+1) matrix Lhat. */
+
+/* S (output) REAL */
+/* Sine needed in forming xhat. */
+
+/* C (output) REAL */
+/* Cosine needed in forming xhat. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --w;
+ --x;
+
+ /* Function Body */
+ eps = slamch_("Epsilon");
+ alpha = sdot_(j, &x[1], &c__1, &w[1], &c__1);
+
+ absalp = dabs(alpha);
+ absgam = dabs(*gamma);
+ absest = dabs(*sest);
+
+ if (*job == 1) {
+
+/* Estimating largest singular value */
+
+/* special cases */
+
+ if (*sest == 0.f) {
+ s1 = dmax(absgam,absalp);
+ if (s1 == 0.f) {
+ *s = 0.f;
+ *c__ = 1.f;
+ *sestpr = 0.f;
+ } else {
+ *s = alpha / s1;
+ *c__ = *gamma / s1;
+ tmp = sqrt(*s * *s + *c__ * *c__);
+ *s /= tmp;
+ *c__ /= tmp;
+ *sestpr = s1 * tmp;
+ }
+ return 0;
+ } else if (absgam <= eps * absest) {
+ *s = 1.f;
+ *c__ = 0.f;
+ tmp = dmax(absest,absalp);
+ s1 = absest / tmp;
+ s2 = absalp / tmp;
+ *sestpr = tmp * sqrt(s1 * s1 + s2 * s2);
+ return 0;
+ } else if (absalp <= eps * absest) {
+ s1 = absgam;
+ s2 = absest;
+ if (s1 <= s2) {
+ *s = 1.f;
+ *c__ = 0.f;
+ *sestpr = s2;
+ } else {
+ *s = 0.f;
+ *c__ = 1.f;
+ *sestpr = s1;
+ }
+ return 0;
+ } else if (absest <= eps * absalp || absest <= eps * absgam) {
+ s1 = absgam;
+ s2 = absalp;
+ if (s1 <= s2) {
+ tmp = s1 / s2;
+ *s = sqrt(tmp * tmp + 1.f);
+ *sestpr = s2 * *s;
+ *c__ = *gamma / s2 / *s;
+ *s = r_sign(&c_b5, &alpha) / *s;
+ } else {
+ tmp = s2 / s1;
+ *c__ = sqrt(tmp * tmp + 1.f);
+ *sestpr = s1 * *c__;
+ *s = alpha / s1 / *c__;
+ *c__ = r_sign(&c_b5, gamma) / *c__;
+ }
+ return 0;
+ } else {
+
+/* normal case */
+
+ zeta1 = alpha / absest;
+ zeta2 = *gamma / absest;
+
+ b = (1.f - zeta1 * zeta1 - zeta2 * zeta2) * .5f;
+ *c__ = zeta1 * zeta1;
+ if (b > 0.f) {
+ t = *c__ / (b + sqrt(b * b + *c__));
+ } else {
+ t = sqrt(b * b + *c__) - b;
+ }
+
+ sine = -zeta1 / t;
+ cosine = -zeta2 / (t + 1.f);
+ tmp = sqrt(sine * sine + cosine * cosine);
+ *s = sine / tmp;
+ *c__ = cosine / tmp;
+ *sestpr = sqrt(t + 1.f) * absest;
+ return 0;
+ }
+
+ } else if (*job == 2) {
+
+/* Estimating smallest singular value */
+
+/* special cases */
+
+ if (*sest == 0.f) {
+ *sestpr = 0.f;
+ if (dmax(absgam,absalp) == 0.f) {
+ sine = 1.f;
+ cosine = 0.f;
+ } else {
+ sine = -(*gamma);
+ cosine = alpha;
+ }
+/* Computing MAX */
+ r__1 = dabs(sine), r__2 = dabs(cosine);
+ s1 = dmax(r__1,r__2);
+ *s = sine / s1;
+ *c__ = cosine / s1;
+ tmp = sqrt(*s * *s + *c__ * *c__);
+ *s /= tmp;
+ *c__ /= tmp;
+ return 0;
+ } else if (absgam <= eps * absest) {
+ *s = 0.f;
+ *c__ = 1.f;
+ *sestpr = absgam;
+ return 0;
+ } else if (absalp <= eps * absest) {
+ s1 = absgam;
+ s2 = absest;
+ if (s1 <= s2) {
+ *s = 0.f;
+ *c__ = 1.f;
+ *sestpr = s1;
+ } else {
+ *s = 1.f;
+ *c__ = 0.f;
+ *sestpr = s2;
+ }
+ return 0;
+ } else if (absest <= eps * absalp || absest <= eps * absgam) {
+ s1 = absgam;
+ s2 = absalp;
+ if (s1 <= s2) {
+ tmp = s1 / s2;
+ *c__ = sqrt(tmp * tmp + 1.f);
+ *sestpr = absest * (tmp / *c__);
+ *s = -(*gamma / s2) / *c__;
+ *c__ = r_sign(&c_b5, &alpha) / *c__;
+ } else {
+ tmp = s2 / s1;
+ *s = sqrt(tmp * tmp + 1.f);
+ *sestpr = absest / *s;
+ *c__ = alpha / s1 / *s;
+ *s = -r_sign(&c_b5, gamma) / *s;
+ }
+ return 0;
+ } else {
+
+/* normal case */
+
+ zeta1 = alpha / absest;
+ zeta2 = *gamma / absest;
+
+/* Computing MAX */
+ r__3 = zeta1 * zeta1 + 1.f + (r__1 = zeta1 * zeta2, dabs(r__1)),
+ r__4 = (r__2 = zeta1 * zeta2, dabs(r__2)) + zeta2 * zeta2;
+ norma = dmax(r__3,r__4);
+
+/* See if root is closer to zero or to ONE */
+
+ test = (zeta1 - zeta2) * 2.f * (zeta1 + zeta2) + 1.f;
+ if (test >= 0.f) {
+
+/* root is close to zero, compute directly */
+
+ b = (zeta1 * zeta1 + zeta2 * zeta2 + 1.f) * .5f;
+ *c__ = zeta2 * zeta2;
+ t = *c__ / (b + sqrt((r__1 = b * b - *c__, dabs(r__1))));
+ sine = zeta1 / (1.f - t);
+ cosine = -zeta2 / t;
+ *sestpr = sqrt(t + eps * 4.f * eps * norma) * absest;
+ } else {
+
+/* root is closer to ONE, shift by that amount */
+
+ b = (zeta2 * zeta2 + zeta1 * zeta1 - 1.f) * .5f;
+ *c__ = zeta1 * zeta1;
+ if (b >= 0.f) {
+ t = -(*c__) / (b + sqrt(b * b + *c__));
+ } else {
+ t = b - sqrt(b * b + *c__);
+ }
+ sine = -zeta1 / t;
+ cosine = -zeta2 / (t + 1.f);
+ *sestpr = sqrt(t + 1.f + eps * 4.f * eps * norma) * absest;
+ }
+ tmp = sqrt(sine * sine + cosine * cosine);
+ *s = sine / tmp;
+ *c__ = cosine / tmp;
+ return 0;
+
+ }
+ }
+ return 0;
+
+/* End of SLAIC1 */
+
+} /* slaic1_ */
diff --git a/contrib/libs/clapack/slaisnan.c b/contrib/libs/clapack/slaisnan.c
new file mode 100644
index 0000000000..cb2b191a11
--- /dev/null
+++ b/contrib/libs/clapack/slaisnan.c
@@ -0,0 +1,58 @@
+/* slaisnan.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+logical slaisnan_(real *sin1, real *sin2)
+{
+ /* System generated locals */
+ logical ret_val;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is not for general use. It exists solely to avoid */
+/* over-optimization in SISNAN. */
+
+/* SLAISNAN checks for NaNs by comparing its two arguments for */
+/* inequality. NaN is the only floating-point value where NaN != NaN */
+/* returns .TRUE. To check for NaNs, pass the same variable as both */
+/* arguments. */
+
+/* A compiler must assume that the two arguments are */
+/* not the same variable, and the test will not be optimized away. */
+/* Interprocedural or whole-program optimization may delete this */
+/* test. The ISNAN functions will be replaced by the correct */
+/* Fortran 03 intrinsic once the intrinsic is widely available. */
+
+/* Arguments */
+/* ========= */
+
+/* SIN1 (input) REAL */
+/* SIN2 (input) REAL */
+/* Two numbers to compare for inequality. */
+
+/* ===================================================================== */
+
+/* .. Executable Statements .. */
+ ret_val = *sin1 != *sin2;
+ return ret_val;
+} /* slaisnan_ */
diff --git a/contrib/libs/clapack/slaln2.c b/contrib/libs/clapack/slaln2.c
new file mode 100644
index 0000000000..e1ebc2e20c
--- /dev/null
+++ b/contrib/libs/clapack/slaln2.c
@@ -0,0 +1,577 @@
+/* slaln2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slaln2_(logical *ltrans, integer *na, integer *nw, real *
+ smin, real *ca, real *a, integer *lda, real *d1, real *d2, real *b,
+ integer *ldb, real *wr, real *wi, real *x, integer *ldx, real *scale,
+ real *xnorm, integer *info)
+{
+ /* Initialized data */
+
+ static logical cswap[4] = { FALSE_,FALSE_,TRUE_,TRUE_ };
+ static logical rswap[4] = { FALSE_,TRUE_,FALSE_,TRUE_ };
+ static integer ipivot[16] /* was [4][4] */ = { 1,2,3,4,2,1,4,3,3,4,1,2,
+ 4,3,2,1 };
+
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, x_dim1, x_offset;
+ real r__1, r__2, r__3, r__4, r__5, r__6;
+ static real equiv_0[4], equiv_1[4];
+
+ /* Local variables */
+ integer j;
+#define ci (equiv_0)
+#define cr (equiv_1)
+ real bi1, bi2, br1, br2, xi1, xi2, xr1, xr2, ci21, ci22, cr21, cr22, li21,
+ csi, ui11, lr21, ui12, ui22;
+#define civ (equiv_0)
+ real csr, ur11, ur12, ur22;
+#define crv (equiv_1)
+ real bbnd, cmax, ui11r, ui12s, temp, ur11r, ur12s, u22abs;
+ integer icmax;
+ real bnorm, cnorm, smini;
+ extern doublereal slamch_(char *);
+ real bignum;
+ extern /* Subroutine */ int sladiv_(real *, real *, real *, real *, real *
+, real *);
+ real smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLALN2 solves a system of the form (ca A - w D ) X = s B */
+/* or (ca A' - w D) X = s B with possible scaling ("s") and */
+/* perturbation of A. (A' means A-transpose.) */
+
+/* A is an NA x NA real matrix, ca is a real scalar, D is an NA x NA */
+/* real diagonal matrix, w is a real or complex value, and X and B are */
+/* NA x 1 matrices -- real if w is real, complex if w is complex. NA */
+/* may be 1 or 2. */
+
+/* If w is complex, X and B are represented as NA x 2 matrices, */
+/* the first column of each being the real part and the second */
+/* being the imaginary part. */
+
+/* "s" is a scaling factor (.LE. 1), computed by SLALN2, which is */
+/* so chosen that X can be computed without overflow. X is further */
+/* scaled if necessary to assure that norm(ca A - w D)*norm(X) is less */
+/* than overflow. */
+
+/* If both singular values of (ca A - w D) are less than SMIN, */
+/* SMIN*identity will be used instead of (ca A - w D). If only one */
+/* singular value is less than SMIN, one element of (ca A - w D) will be */
+/* perturbed enough to make the smallest singular value roughly SMIN. */
+/* If both singular values are at least SMIN, (ca A - w D) will not be */
+/* perturbed. In any case, the perturbation will be at most some small */
+/* multiple of max( SMIN, ulp*norm(ca A - w D) ). The singular values */
+/* are computed by infinity-norm approximations, and thus will only be */
+/* correct to a factor of 2 or so. */
+
+/* Note: all input quantities are assumed to be smaller than overflow */
+/* by a reasonable factor. (See BIGNUM.) */
+
+/* Arguments */
+/* ========== */
+
+/* LTRANS (input) LOGICAL */
+/* =.TRUE.: A-transpose will be used. */
+/* =.FALSE.: A will be used (not transposed.) */
+
+/* NA (input) INTEGER */
+/* The size of the matrix A. It may (only) be 1 or 2. */
+
+/* NW (input) INTEGER */
+/* 1 if "w" is real, 2 if "w" is complex. It may only be 1 */
+/* or 2. */
+
+/* SMIN (input) REAL */
+/* The desired lower bound on the singular values of A. This */
+/* should be a safe distance away from underflow or overflow, */
+/* say, between (underflow/machine precision) and (machine */
+/* precision * overflow ). (See BIGNUM and ULP.) */
+
+/* CA (input) REAL */
+/* The coefficient c, which A is multiplied by. */
+
+/* A (input) REAL array, dimension (LDA,NA) */
+/* The NA x NA matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. It must be at least NA. */
+
+/* D1 (input) REAL */
+/* The 1,1 element in the diagonal matrix D. */
+
+/* D2 (input) REAL */
+/* The 2,2 element in the diagonal matrix D. Not used if NW=1. */
+
+/* B (input) REAL array, dimension (LDB,NW) */
+/* The NA x NW matrix B (right-hand side). If NW=2 ("w" is */
+/* complex), column 1 contains the real part of B and column 2 */
+/* contains the imaginary part. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. It must be at least NA. */
+
+/* WR (input) REAL */
+/* The real part of the scalar "w". */
+
+/* WI (input) REAL */
+/* The imaginary part of the scalar "w". Not used if NW=1. */
+
+/* X (output) REAL array, dimension (LDX,NW) */
+/* The NA x NW matrix X (unknowns), as computed by SLALN2. */
+/* If NW=2 ("w" is complex), on exit, column 1 will contain */
+/* the real part of X and column 2 will contain the imaginary */
+/* part. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of X. It must be at least NA. */
+
+/* SCALE (output) REAL */
+/* The scale factor that B must be multiplied by to insure */
+/* that overflow does not occur when computing X. Thus, */
+/* (ca A - w D) X will be SCALE*B, not B (ignoring */
+/* perturbations of A.) It will be at most 1. */
+
+/* XNORM (output) REAL */
+/* The infinity-norm of X, when X is regarded as an NA x NW */
+/* real matrix. */
+
+/* INFO (output) INTEGER */
+/* An error flag. It will be set to zero if no error occurs, */
+/* a negative number if an argument is in error, or a positive */
+/* number if ca A - w D had to be perturbed. */
+/* The possible values are: */
+/* = 0: No error occurred, and (ca A - w D) did not have to be */
+/* perturbed. */
+/* = 1: (ca A - w D) had to be perturbed to make its smallest */
+/* (or only) singular value greater than SMIN. */
+/* NOTE: In the interests of speed, this routine does not */
+/* check the inputs for errors. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Equivalences .. */
+/* .. */
+/* .. Data statements .. */
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+
+ /* Function Body */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Compute BIGNUM */
+
+ smlnum = 2.f * slamch_("Safe minimum");
+ bignum = 1.f / smlnum;
+ smini = dmax(*smin,smlnum);
+
+/* Don't check for input errors */
+
+ *info = 0;
+
+/* Standard Initializations */
+
+ *scale = 1.f;
+
+ if (*na == 1) {
+
+/* 1 x 1 (i.e., scalar) system C X = B */
+
+ if (*nw == 1) {
+
+/* Real 1x1 system. */
+
+/* C = ca A - w D */
+
+ csr = *ca * a[a_dim1 + 1] - *wr * *d1;
+ cnorm = dabs(csr);
+
+/* If | C | < SMINI, use C = SMINI */
+
+ if (cnorm < smini) {
+ csr = smini;
+ cnorm = smini;
+ *info = 1;
+ }
+
+/* Check scaling for X = B / C */
+
+ bnorm = (r__1 = b[b_dim1 + 1], dabs(r__1));
+ if (cnorm < 1.f && bnorm > 1.f) {
+ if (bnorm > bignum * cnorm) {
+ *scale = 1.f / bnorm;
+ }
+ }
+
+/* Compute X */
+
+ x[x_dim1 + 1] = b[b_dim1 + 1] * *scale / csr;
+ *xnorm = (r__1 = x[x_dim1 + 1], dabs(r__1));
+ } else {
+
+/* Complex 1x1 system (w is complex) */
+
+/* C = ca A - w D */
+
+ csr = *ca * a[a_dim1 + 1] - *wr * *d1;
+ csi = -(*wi) * *d1;
+ cnorm = dabs(csr) + dabs(csi);
+
+/* If | C | < SMINI, use C = SMINI */
+
+ if (cnorm < smini) {
+ csr = smini;
+ csi = 0.f;
+ cnorm = smini;
+ *info = 1;
+ }
+
+/* Check scaling for X = B / C */
+
+ bnorm = (r__1 = b[b_dim1 + 1], dabs(r__1)) + (r__2 = b[(b_dim1 <<
+ 1) + 1], dabs(r__2));
+ if (cnorm < 1.f && bnorm > 1.f) {
+ if (bnorm > bignum * cnorm) {
+ *scale = 1.f / bnorm;
+ }
+ }
+
+/* Compute X */
+
+ r__1 = *scale * b[b_dim1 + 1];
+ r__2 = *scale * b[(b_dim1 << 1) + 1];
+ sladiv_(&r__1, &r__2, &csr, &csi, &x[x_dim1 + 1], &x[(x_dim1 << 1)
+ + 1]);
+ *xnorm = (r__1 = x[x_dim1 + 1], dabs(r__1)) + (r__2 = x[(x_dim1 <<
+ 1) + 1], dabs(r__2));
+ }
+
+ } else {
+
+/* 2x2 System */
+
+/* Compute the real part of C = ca A - w D (or ca A' - w D ) */
+
+ cr[0] = *ca * a[a_dim1 + 1] - *wr * *d1;
+ cr[3] = *ca * a[(a_dim1 << 1) + 2] - *wr * *d2;
+ if (*ltrans) {
+ cr[2] = *ca * a[a_dim1 + 2];
+ cr[1] = *ca * a[(a_dim1 << 1) + 1];
+ } else {
+ cr[1] = *ca * a[a_dim1 + 2];
+ cr[2] = *ca * a[(a_dim1 << 1) + 1];
+ }
+
+ if (*nw == 1) {
+
+/* Real 2x2 system (w is real) */
+
+/* Find the largest element in C */
+
+ cmax = 0.f;
+ icmax = 0;
+
+ for (j = 1; j <= 4; ++j) {
+ if ((r__1 = crv[j - 1], dabs(r__1)) > cmax) {
+ cmax = (r__1 = crv[j - 1], dabs(r__1));
+ icmax = j;
+ }
+/* L10: */
+ }
+
+/* If norm(C) < SMINI, use SMINI*identity. */
+
+ if (cmax < smini) {
+/* Computing MAX */
+ r__3 = (r__1 = b[b_dim1 + 1], dabs(r__1)), r__4 = (r__2 = b[
+ b_dim1 + 2], dabs(r__2));
+ bnorm = dmax(r__3,r__4);
+ if (smini < 1.f && bnorm > 1.f) {
+ if (bnorm > bignum * smini) {
+ *scale = 1.f / bnorm;
+ }
+ }
+ temp = *scale / smini;
+ x[x_dim1 + 1] = temp * b[b_dim1 + 1];
+ x[x_dim1 + 2] = temp * b[b_dim1 + 2];
+ *xnorm = temp * bnorm;
+ *info = 1;
+ return 0;
+ }
+
+/* Gaussian elimination with complete pivoting. */
+
+ ur11 = crv[icmax - 1];
+ cr21 = crv[ipivot[(icmax << 2) - 3] - 1];
+ ur12 = crv[ipivot[(icmax << 2) - 2] - 1];
+ cr22 = crv[ipivot[(icmax << 2) - 1] - 1];
+ ur11r = 1.f / ur11;
+ lr21 = ur11r * cr21;
+ ur22 = cr22 - ur12 * lr21;
+
+/* If smaller pivot < SMINI, use SMINI */
+
+ if (dabs(ur22) < smini) {
+ ur22 = smini;
+ *info = 1;
+ }
+ if (rswap[icmax - 1]) {
+ br1 = b[b_dim1 + 2];
+ br2 = b[b_dim1 + 1];
+ } else {
+ br1 = b[b_dim1 + 1];
+ br2 = b[b_dim1 + 2];
+ }
+ br2 -= lr21 * br1;
+/* Computing MAX */
+ r__2 = (r__1 = br1 * (ur22 * ur11r), dabs(r__1)), r__3 = dabs(br2)
+ ;
+ bbnd = dmax(r__2,r__3);
+ if (bbnd > 1.f && dabs(ur22) < 1.f) {
+ if (bbnd >= bignum * dabs(ur22)) {
+ *scale = 1.f / bbnd;
+ }
+ }
+
+ xr2 = br2 * *scale / ur22;
+ xr1 = *scale * br1 * ur11r - xr2 * (ur11r * ur12);
+ if (cswap[icmax - 1]) {
+ x[x_dim1 + 1] = xr2;
+ x[x_dim1 + 2] = xr1;
+ } else {
+ x[x_dim1 + 1] = xr1;
+ x[x_dim1 + 2] = xr2;
+ }
+/* Computing MAX */
+ r__1 = dabs(xr1), r__2 = dabs(xr2);
+ *xnorm = dmax(r__1,r__2);
+
+/* Further scaling if norm(A) norm(X) > overflow */
+
+ if (*xnorm > 1.f && cmax > 1.f) {
+ if (*xnorm > bignum / cmax) {
+ temp = cmax / bignum;
+ x[x_dim1 + 1] = temp * x[x_dim1 + 1];
+ x[x_dim1 + 2] = temp * x[x_dim1 + 2];
+ *xnorm = temp * *xnorm;
+ *scale = temp * *scale;
+ }
+ }
+ } else {
+
+/* Complex 2x2 system (w is complex) */
+
+/* Find the largest element in C */
+
+ ci[0] = -(*wi) * *d1;
+ ci[1] = 0.f;
+ ci[2] = 0.f;
+ ci[3] = -(*wi) * *d2;
+ cmax = 0.f;
+ icmax = 0;
+
+ for (j = 1; j <= 4; ++j) {
+ if ((r__1 = crv[j - 1], dabs(r__1)) + (r__2 = civ[j - 1],
+ dabs(r__2)) > cmax) {
+ cmax = (r__1 = crv[j - 1], dabs(r__1)) + (r__2 = civ[j -
+ 1], dabs(r__2));
+ icmax = j;
+ }
+/* L20: */
+ }
+
+/* If norm(C) < SMINI, use SMINI*identity. */
+
+ if (cmax < smini) {
+/* Computing MAX */
+ r__5 = (r__1 = b[b_dim1 + 1], dabs(r__1)) + (r__2 = b[(b_dim1
+ << 1) + 1], dabs(r__2)), r__6 = (r__3 = b[b_dim1 + 2],
+ dabs(r__3)) + (r__4 = b[(b_dim1 << 1) + 2], dabs(
+ r__4));
+ bnorm = dmax(r__5,r__6);
+ if (smini < 1.f && bnorm > 1.f) {
+ if (bnorm > bignum * smini) {
+ *scale = 1.f / bnorm;
+ }
+ }
+ temp = *scale / smini;
+ x[x_dim1 + 1] = temp * b[b_dim1 + 1];
+ x[x_dim1 + 2] = temp * b[b_dim1 + 2];
+ x[(x_dim1 << 1) + 1] = temp * b[(b_dim1 << 1) + 1];
+ x[(x_dim1 << 1) + 2] = temp * b[(b_dim1 << 1) + 2];
+ *xnorm = temp * bnorm;
+ *info = 1;
+ return 0;
+ }
+
+/* Gaussian elimination with complete pivoting. */
+
+ ur11 = crv[icmax - 1];
+ ui11 = civ[icmax - 1];
+ cr21 = crv[ipivot[(icmax << 2) - 3] - 1];
+ ci21 = civ[ipivot[(icmax << 2) - 3] - 1];
+ ur12 = crv[ipivot[(icmax << 2) - 2] - 1];
+ ui12 = civ[ipivot[(icmax << 2) - 2] - 1];
+ cr22 = crv[ipivot[(icmax << 2) - 1] - 1];
+ ci22 = civ[ipivot[(icmax << 2) - 1] - 1];
+ if (icmax == 1 || icmax == 4) {
+
+/* Code when off-diagonals of pivoted C are real */
+
+ if (dabs(ur11) > dabs(ui11)) {
+ temp = ui11 / ur11;
+/* Computing 2nd power */
+ r__1 = temp;
+ ur11r = 1.f / (ur11 * (r__1 * r__1 + 1.f));
+ ui11r = -temp * ur11r;
+ } else {
+ temp = ur11 / ui11;
+/* Computing 2nd power */
+ r__1 = temp;
+ ui11r = -1.f / (ui11 * (r__1 * r__1 + 1.f));
+ ur11r = -temp * ui11r;
+ }
+ lr21 = cr21 * ur11r;
+ li21 = cr21 * ui11r;
+ ur12s = ur12 * ur11r;
+ ui12s = ur12 * ui11r;
+ ur22 = cr22 - ur12 * lr21;
+ ui22 = ci22 - ur12 * li21;
+ } else {
+
+/* Code when diagonals of pivoted C are real */
+
+ ur11r = 1.f / ur11;
+ ui11r = 0.f;
+ lr21 = cr21 * ur11r;
+ li21 = ci21 * ur11r;
+ ur12s = ur12 * ur11r;
+ ui12s = ui12 * ur11r;
+ ur22 = cr22 - ur12 * lr21 + ui12 * li21;
+ ui22 = -ur12 * li21 - ui12 * lr21;
+ }
+ u22abs = dabs(ur22) + dabs(ui22);
+
+/* If smaller pivot < SMINI, use SMINI */
+
+ if (u22abs < smini) {
+ ur22 = smini;
+ ui22 = 0.f;
+ *info = 1;
+ }
+ if (rswap[icmax - 1]) {
+ br2 = b[b_dim1 + 1];
+ br1 = b[b_dim1 + 2];
+ bi2 = b[(b_dim1 << 1) + 1];
+ bi1 = b[(b_dim1 << 1) + 2];
+ } else {
+ br1 = b[b_dim1 + 1];
+ br2 = b[b_dim1 + 2];
+ bi1 = b[(b_dim1 << 1) + 1];
+ bi2 = b[(b_dim1 << 1) + 2];
+ }
+ br2 = br2 - lr21 * br1 + li21 * bi1;
+ bi2 = bi2 - li21 * br1 - lr21 * bi1;
+/* Computing MAX */
+ r__1 = (dabs(br1) + dabs(bi1)) * (u22abs * (dabs(ur11r) + dabs(
+ ui11r))), r__2 = dabs(br2) + dabs(bi2);
+ bbnd = dmax(r__1,r__2);
+ if (bbnd > 1.f && u22abs < 1.f) {
+ if (bbnd >= bignum * u22abs) {
+ *scale = 1.f / bbnd;
+ br1 = *scale * br1;
+ bi1 = *scale * bi1;
+ br2 = *scale * br2;
+ bi2 = *scale * bi2;
+ }
+ }
+
+ sladiv_(&br2, &bi2, &ur22, &ui22, &xr2, &xi2);
+ xr1 = ur11r * br1 - ui11r * bi1 - ur12s * xr2 + ui12s * xi2;
+ xi1 = ui11r * br1 + ur11r * bi1 - ui12s * xr2 - ur12s * xi2;
+ if (cswap[icmax - 1]) {
+ x[x_dim1 + 1] = xr2;
+ x[x_dim1 + 2] = xr1;
+ x[(x_dim1 << 1) + 1] = xi2;
+ x[(x_dim1 << 1) + 2] = xi1;
+ } else {
+ x[x_dim1 + 1] = xr1;
+ x[x_dim1 + 2] = xr2;
+ x[(x_dim1 << 1) + 1] = xi1;
+ x[(x_dim1 << 1) + 2] = xi2;
+ }
+/* Computing MAX */
+ r__1 = dabs(xr1) + dabs(xi1), r__2 = dabs(xr2) + dabs(xi2);
+ *xnorm = dmax(r__1,r__2);
+
+/* Further scaling if norm(A) norm(X) > overflow */
+
+ if (*xnorm > 1.f && cmax > 1.f) {
+ if (*xnorm > bignum / cmax) {
+ temp = cmax / bignum;
+ x[x_dim1 + 1] = temp * x[x_dim1 + 1];
+ x[x_dim1 + 2] = temp * x[x_dim1 + 2];
+ x[(x_dim1 << 1) + 1] = temp * x[(x_dim1 << 1) + 1];
+ x[(x_dim1 << 1) + 2] = temp * x[(x_dim1 << 1) + 2];
+ *xnorm = temp * *xnorm;
+ *scale = temp * *scale;
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SLALN2 */
+
+} /* slaln2_ */
+
+#undef crv
+#undef civ
+#undef cr
+#undef ci
diff --git a/contrib/libs/clapack/slals0.c b/contrib/libs/clapack/slals0.c
new file mode 100644
index 0000000000..5d29eaa687
--- /dev/null
+++ b/contrib/libs/clapack/slals0.c
@@ -0,0 +1,470 @@
+/* slals0.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b5 = -1.f;
+static integer c__1 = 1;
+static real c_b11 = 1.f;
+static real c_b13 = 0.f;
+static integer c__0 = 0;
+
+/* Subroutine */ int slals0_(integer *icompq, integer *nl, integer *nr,
+ integer *sqre, integer *nrhs, real *b, integer *ldb, real *bx,
+ integer *ldbx, integer *perm, integer *givptr, integer *givcol,
+ integer *ldgcol, real *givnum, integer *ldgnum, real *poles, real *
+ difl, real *difr, real *z__, integer *k, real *c__, real *s, real *
+ work, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, b_dim1, b_offset, bx_dim1, bx_offset,
+ difr_dim1, difr_offset, givnum_dim1, givnum_offset, poles_dim1,
+ poles_offset, i__1, i__2;
+ real r__1;
+
+ /* Local variables */
+ integer i__, j, m, n;
+ real dj;
+ integer nlp1;
+ real temp;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *);
+ extern doublereal snrm2_(integer *, real *, integer *);
+ real diflj, difrj, dsigj;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ sgemv_(char *, integer *, integer *, real *, real *, integer *,
+ real *, integer *, real *, real *, integer *), scopy_(
+ integer *, real *, integer *, real *, integer *);
+ extern doublereal slamc3_(real *, real *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real dsigjp;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *), slacpy_(char *, integer *, integer *, real *, integer *,
+ real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLALS0 applies back the multiplying factors of either the left or the */
+/* right singular vector matrix of a diagonal matrix appended by a row */
+/* to the right hand side matrix B in solving the least squares problem */
+/* using the divide-and-conquer SVD approach. */
+
+/* For the left singular vector matrix, three types of orthogonal */
+/* matrices are involved: */
+
+/* (1L) Givens rotations: the number of such rotations is GIVPTR; the */
+/* pairs of columns/rows they were applied to are stored in GIVCOL; */
+/* and the C- and S-values of these rotations are stored in GIVNUM. */
+
+/* (2L) Permutation. The (NL+1)-st row of B is to be moved to the first */
+/* row, and for J=2:N, PERM(J)-th row of B is to be moved to the */
+/* J-th row. */
+
+/* (3L) The left singular vector matrix of the remaining matrix. */
+
+/* For the right singular vector matrix, four types of orthogonal */
+/* matrices are involved: */
+
+/* (1R) The right singular vector matrix of the remaining matrix. */
+
+/* (2R) If SQRE = 1, one extra Givens rotation to generate the right */
+/* null space. */
+
+/* (3R) The inverse transformation of (2L). */
+
+/* (4R) The inverse transformation of (1L). */
+
+/* Arguments */
+/* ========= */
+
+/* ICOMPQ (input) INTEGER */
+/* Specifies whether singular vectors are to be computed in */
+/* factored form: */
+/* = 0: Left singular vector matrix. */
+/* = 1: Right singular vector matrix. */
+
+/* NL (input) INTEGER */
+/* The row dimension of the upper block. NL >= 1. */
+
+/* NR (input) INTEGER */
+/* The row dimension of the lower block. NR >= 1. */
+
+/* SQRE (input) INTEGER */
+/* = 0: the lower block is an NR-by-NR square matrix. */
+/* = 1: the lower block is an NR-by-(NR+1) rectangular matrix. */
+
+/* The bidiagonal matrix has row dimension N = NL + NR + 1, */
+/* and column dimension M = N + SQRE. */
+
+/* NRHS (input) INTEGER */
+/* The number of columns of B and BX. NRHS must be at least 1. */
+
+/* B (input/output) REAL array, dimension ( LDB, NRHS ) */
+/* On input, B contains the right hand sides of the least */
+/* squares problem in rows 1 through M. On output, B contains */
+/* the solution X in rows 1 through N. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB must be at least */
+/* max(1,MAX( M, N ) ). */
+
+/* BX (workspace) REAL array, dimension ( LDBX, NRHS ) */
+
+/* LDBX (input) INTEGER */
+/* The leading dimension of BX. */
+
+/* PERM (input) INTEGER array, dimension ( N ) */
+/* The permutations (from deflation and sorting) applied */
+/* to the two blocks. */
+
+/* GIVPTR (input) INTEGER */
+/* The number of Givens rotations which took place in this */
+/* subproblem. */
+
+/* GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 ) */
+/* Each pair of numbers indicates a pair of rows/columns */
+/* involved in a Givens rotation. */
+
+/* LDGCOL (input) INTEGER */
+/* The leading dimension of GIVCOL, must be at least N. */
+
+/* GIVNUM (input) REAL array, dimension ( LDGNUM, 2 ) */
+/* Each number indicates the C or S value used in the */
+/* corresponding Givens rotation. */
+
+/* LDGNUM (input) INTEGER */
+/* The leading dimension of arrays DIFR, POLES and */
+/* GIVNUM, must be at least K. */
+
+/* POLES (input) REAL array, dimension ( LDGNUM, 2 ) */
+/* On entry, POLES(1:K, 1) contains the new singular */
+/* values obtained from solving the secular equation, and */
+/* POLES(1:K, 2) is an array containing the poles in the secular */
+/* equation. */
+
+/* DIFL (input) REAL array, dimension ( K ). */
+/* On entry, DIFL(I) is the distance between I-th updated */
+/* (undeflated) singular value and the I-th (undeflated) old */
+/* singular value. */
+
+/* DIFR (input) REAL array, dimension ( LDGNUM, 2 ). */
+/* On entry, DIFR(I, 1) contains the distances between I-th */
+/* updated (undeflated) singular value and the I+1-th */
+/* (undeflated) old singular value. And DIFR(I, 2) is the */
+/* normalizing factor for the I-th right singular vector. */
+
+/* Z (input) REAL array, dimension ( K ) */
+/* Contain the components of the deflation-adjusted updating row */
+/* vector. */
+
+/* K (input) INTEGER */
+/* Contains the dimension of the non-deflated matrix, */
+/* This is the order of the related secular equation. 1 <= K <=N. */
+
+/* C (input) REAL */
+/* C contains garbage if SQRE =0 and the C-value of a Givens */
+/* rotation related to the right null space if SQRE = 1. */
+
+/* S (input) REAL */
+/* S contains garbage if SQRE =0 and the S-value of a Givens */
+/* rotation related to the right null space if SQRE = 1. */
+
+/* WORK (workspace) REAL array, dimension ( K ) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Ren-Cang Li, Computer Science Division, University of */
+/* California at Berkeley, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ bx_dim1 = *ldbx;
+ bx_offset = 1 + bx_dim1;
+ bx -= bx_offset;
+ --perm;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ difr_dim1 = *ldgnum;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ poles_dim1 = *ldgnum;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ givnum_dim1 = *ldgnum;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ --difl;
+ --z__;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*nl < 1) {
+ *info = -2;
+ } else if (*nr < 1) {
+ *info = -3;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -4;
+ }
+
+ n = *nl + *nr + 1;
+
+ if (*nrhs < 1) {
+ *info = -5;
+ } else if (*ldb < n) {
+ *info = -7;
+ } else if (*ldbx < n) {
+ *info = -9;
+ } else if (*givptr < 0) {
+ *info = -11;
+ } else if (*ldgcol < n) {
+ *info = -13;
+ } else if (*ldgnum < n) {
+ *info = -15;
+ } else if (*k < 1) {
+ *info = -20;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLALS0", &i__1);
+ return 0;
+ }
+
+ m = n + *sqre;
+ nlp1 = *nl + 1;
+
+ if (*icompq == 0) {
+
+/* Apply back orthogonal transformations from the left. */
+
+/* Step (1L): apply back the Givens rotations performed. */
+
+ i__1 = *givptr;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ srot_(nrhs, &b[givcol[i__ + (givcol_dim1 << 1)] + b_dim1], ldb, &
+ b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[i__ +
+ (givnum_dim1 << 1)], &givnum[i__ + givnum_dim1]);
+/* L10: */
+ }
+
+/* Step (2L): permute rows of B. */
+
+ scopy_(nrhs, &b[nlp1 + b_dim1], ldb, &bx[bx_dim1 + 1], ldbx);
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ scopy_(nrhs, &b[perm[i__] + b_dim1], ldb, &bx[i__ + bx_dim1],
+ ldbx);
+/* L20: */
+ }
+
+/* Step (3L): apply the inverse of the left singular vector */
+/* matrix to BX. */
+
+ if (*k == 1) {
+ scopy_(nrhs, &bx[bx_offset], ldbx, &b[b_offset], ldb);
+ if (z__[1] < 0.f) {
+ sscal_(nrhs, &c_b5, &b[b_offset], ldb);
+ }
+ } else {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ diflj = difl[j];
+ dj = poles[j + poles_dim1];
+ dsigj = -poles[j + (poles_dim1 << 1)];
+ if (j < *k) {
+ difrj = -difr[j + difr_dim1];
+ dsigjp = -poles[j + 1 + (poles_dim1 << 1)];
+ }
+ if (z__[j] == 0.f || poles[j + (poles_dim1 << 1)] == 0.f) {
+ work[j] = 0.f;
+ } else {
+ work[j] = -poles[j + (poles_dim1 << 1)] * z__[j] / diflj /
+ (poles[j + (poles_dim1 << 1)] + dj);
+ }
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (z__[i__] == 0.f || poles[i__ + (poles_dim1 << 1)] ==
+ 0.f) {
+ work[i__] = 0.f;
+ } else {
+ work[i__] = poles[i__ + (poles_dim1 << 1)] * z__[i__]
+ / (slamc3_(&poles[i__ + (poles_dim1 << 1)], &
+ dsigj) - diflj) / (poles[i__ + (poles_dim1 <<
+ 1)] + dj);
+ }
+/* L30: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ if (z__[i__] == 0.f || poles[i__ + (poles_dim1 << 1)] ==
+ 0.f) {
+ work[i__] = 0.f;
+ } else {
+ work[i__] = poles[i__ + (poles_dim1 << 1)] * z__[i__]
+ / (slamc3_(&poles[i__ + (poles_dim1 << 1)], &
+ dsigjp) + difrj) / (poles[i__ + (poles_dim1 <<
+ 1)] + dj);
+ }
+/* L40: */
+ }
+ work[1] = -1.f;
+ temp = snrm2_(k, &work[1], &c__1);
+ sgemv_("T", k, nrhs, &c_b11, &bx[bx_offset], ldbx, &work[1], &
+ c__1, &c_b13, &b[j + b_dim1], ldb);
+ slascl_("G", &c__0, &c__0, &temp, &c_b11, &c__1, nrhs, &b[j +
+ b_dim1], ldb, info);
+/* L50: */
+ }
+ }
+
+/* Move the deflated rows of BX to B also. */
+
+ if (*k < max(m,n)) {
+ i__1 = n - *k;
+ slacpy_("A", &i__1, nrhs, &bx[*k + 1 + bx_dim1], ldbx, &b[*k + 1
+ + b_dim1], ldb);
+ }
+ } else {
+
+/* Apply back the right orthogonal transformations. */
+
+/* Step (1R): apply back the new right singular vector matrix */
+/* to B. */
+
+ if (*k == 1) {
+ scopy_(nrhs, &b[b_offset], ldb, &bx[bx_offset], ldbx);
+ } else {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dsigj = poles[j + (poles_dim1 << 1)];
+ if (z__[j] == 0.f) {
+ work[j] = 0.f;
+ } else {
+ work[j] = -z__[j] / difl[j] / (dsigj + poles[j +
+ poles_dim1]) / difr[j + (difr_dim1 << 1)];
+ }
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (z__[j] == 0.f) {
+ work[i__] = 0.f;
+ } else {
+ r__1 = -poles[i__ + 1 + (poles_dim1 << 1)];
+ work[i__] = z__[j] / (slamc3_(&dsigj, &r__1) - difr[
+ i__ + difr_dim1]) / (dsigj + poles[i__ +
+ poles_dim1]) / difr[i__ + (difr_dim1 << 1)];
+ }
+/* L60: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ if (z__[j] == 0.f) {
+ work[i__] = 0.f;
+ } else {
+ r__1 = -poles[i__ + (poles_dim1 << 1)];
+ work[i__] = z__[j] / (slamc3_(&dsigj, &r__1) - difl[
+ i__]) / (dsigj + poles[i__ + poles_dim1]) /
+ difr[i__ + (difr_dim1 << 1)];
+ }
+/* L70: */
+ }
+ sgemv_("T", k, nrhs, &c_b11, &b[b_offset], ldb, &work[1], &
+ c__1, &c_b13, &bx[j + bx_dim1], ldbx);
+/* L80: */
+ }
+ }
+
+/* Step (2R): if SQRE = 1, apply back the rotation that is */
+/* related to the right null space of the subproblem. */
+
+ if (*sqre == 1) {
+ scopy_(nrhs, &b[m + b_dim1], ldb, &bx[m + bx_dim1], ldbx);
+ srot_(nrhs, &bx[bx_dim1 + 1], ldbx, &bx[m + bx_dim1], ldbx, c__,
+ s);
+ }
+ if (*k < max(m,n)) {
+ i__1 = n - *k;
+ slacpy_("A", &i__1, nrhs, &b[*k + 1 + b_dim1], ldb, &bx[*k + 1 +
+ bx_dim1], ldbx);
+ }
+
+/* Step (3R): permute rows of B. */
+
+ scopy_(nrhs, &bx[bx_dim1 + 1], ldbx, &b[nlp1 + b_dim1], ldb);
+ if (*sqre == 1) {
+ scopy_(nrhs, &bx[m + bx_dim1], ldbx, &b[m + b_dim1], ldb);
+ }
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ scopy_(nrhs, &bx[i__ + bx_dim1], ldbx, &b[perm[i__] + b_dim1],
+ ldb);
+/* L90: */
+ }
+
+/* Step (4R): apply back the Givens rotations performed. */
+
+ for (i__ = *givptr; i__ >= 1; --i__) {
+ r__1 = -givnum[i__ + givnum_dim1];
+ srot_(nrhs, &b[givcol[i__ + (givcol_dim1 << 1)] + b_dim1], ldb, &
+ b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[i__ +
+ (givnum_dim1 << 1)], &r__1);
+/* L100: */
+ }
+ }
+
+ return 0;
+
+/* End of SLALS0 */
+
+} /* slals0_ */
diff --git a/contrib/libs/clapack/slalsa.c b/contrib/libs/clapack/slalsa.c
new file mode 100644
index 0000000000..d3495764a4
--- /dev/null
+++ b/contrib/libs/clapack/slalsa.c
@@ -0,0 +1,454 @@
+/* slalsa.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b7 = 1.f;
+static real c_b8 = 0.f;
+static integer c__2 = 2;
+
+/* Subroutine */ int slalsa_(integer *icompq, integer *smlsiz, integer *n,
+ integer *nrhs, real *b, integer *ldb, real *bx, integer *ldbx, real *
+ u, integer *ldu, real *vt, integer *k, real *difl, real *difr, real *
+ z__, real *poles, integer *givptr, integer *givcol, integer *ldgcol,
+ integer *perm, real *givnum, real *c__, real *s, real *work, integer *
+ iwork, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, perm_dim1, perm_offset, b_dim1,
+ b_offset, bx_dim1, bx_offset, difl_dim1, difl_offset, difr_dim1,
+ difr_offset, givnum_dim1, givnum_offset, poles_dim1, poles_offset,
+ u_dim1, u_offset, vt_dim1, vt_offset, z_dim1, z_offset, i__1,
+ i__2;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ integer i__, j, i1, ic, lf, nd, ll, nl, nr, im1, nlf, nrf, lvl, ndb1,
+ nlp1, lvl2, nrp1, nlvl, sqre, inode, ndiml;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ integer ndimr;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), slals0_(integer *, integer *, integer *, integer *,
+ integer *, real *, integer *, real *, integer *, integer *,
+ integer *, integer *, integer *, real *, integer *, real *, real *
+, real *, real *, integer *, real *, real *, real *, integer *),
+ xerbla_(char *, integer *), slasdt_(integer *, integer *,
+ integer *, integer *, integer *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLALSA is an itermediate step in solving the least squares problem */
+/* by computing the SVD of the coefficient matrix in compact form (The */
+/* singular vectors are computed as products of simple orthorgonal */
+/* matrices.). */
+
+/* If ICOMPQ = 0, SLALSA applies the inverse of the left singular vector */
+/* matrix of an upper bidiagonal matrix to the right hand side; and if */
+/* ICOMPQ = 1, SLALSA applies the right singular vector matrix to the */
+/* right hand side. The singular vector matrices were generated in */
+/* compact form by SLALSA. */
+
+/* Arguments */
+/* ========= */
+
+
+/* ICOMPQ (input) INTEGER */
+/* Specifies whether the left or the right singular vector */
+/* matrix is involved. */
+/* = 0: Left singular vector matrix */
+/* = 1: Right singular vector matrix */
+
+/* SMLSIZ (input) INTEGER */
+/* The maximum size of the subproblems at the bottom of the */
+/* computation tree. */
+
+/* N (input) INTEGER */
+/* The row and column dimensions of the upper bidiagonal matrix. */
+
+/* NRHS (input) INTEGER */
+/* The number of columns of B and BX. NRHS must be at least 1. */
+
+/* B (input/output) REAL array, dimension ( LDB, NRHS ) */
+/* On input, B contains the right hand sides of the least */
+/* squares problem in rows 1 through M. */
+/* On output, B contains the solution X in rows 1 through N. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B in the calling subprogram. */
+/* LDB must be at least max(1,MAX( M, N ) ). */
+
+/* BX (output) REAL array, dimension ( LDBX, NRHS ) */
+/* On exit, the result of applying the left or right singular */
+/* vector matrix to B. */
+
+/* LDBX (input) INTEGER */
+/* The leading dimension of BX. */
+
+/* U (input) REAL array, dimension ( LDU, SMLSIZ ). */
+/* On entry, U contains the left singular vector matrices of all */
+/* subproblems at the bottom level. */
+
+/* LDU (input) INTEGER, LDU = > N. */
+/* The leading dimension of arrays U, VT, DIFL, DIFR, */
+/* POLES, GIVNUM, and Z. */
+
+/* VT (input) REAL array, dimension ( LDU, SMLSIZ+1 ). */
+/* On entry, VT' contains the right singular vector matrices of */
+/* all subproblems at the bottom level. */
+
+/* K (input) INTEGER array, dimension ( N ). */
+
+/* DIFL (input) REAL array, dimension ( LDU, NLVL ). */
+/* where NLVL = INT(log_2 (N/(SMLSIZ+1))) + 1. */
+
+/* DIFR (input) REAL array, dimension ( LDU, 2 * NLVL ). */
+/* On entry, DIFL(*, I) and DIFR(*, 2 * I -1) record */
+/* distances between singular values on the I-th level and */
+/* singular values on the (I -1)-th level, and DIFR(*, 2 * I) */
+/* record the normalizing factors of the right singular vectors */
+/* matrices of subproblems on I-th level. */
+
+/* Z (input) REAL array, dimension ( LDU, NLVL ). */
+/* On entry, Z(1, I) contains the components of the deflation- */
+/* adjusted updating row vector for subproblems on the I-th */
+/* level. */
+
+/* POLES (input) REAL array, dimension ( LDU, 2 * NLVL ). */
+/* On entry, POLES(*, 2 * I -1: 2 * I) contains the new and old */
+/* singular values involved in the secular equations on the I-th */
+/* level. */
+
+/* GIVPTR (input) INTEGER array, dimension ( N ). */
+/* On entry, GIVPTR( I ) records the number of Givens */
+/* rotations performed on the I-th problem on the computation */
+/* tree. */
+
+/* GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 * NLVL ). */
+/* On entry, for each I, GIVCOL(*, 2 * I - 1: 2 * I) records the */
+/* locations of Givens rotations performed on the I-th level on */
+/* the computation tree. */
+
+/* LDGCOL (input) INTEGER, LDGCOL = > N. */
+/* The leading dimension of arrays GIVCOL and PERM. */
+
+/* PERM (input) INTEGER array, dimension ( LDGCOL, NLVL ). */
+/* On entry, PERM(*, I) records permutations done on the I-th */
+/* level of the computation tree. */
+
+/* GIVNUM (input) REAL array, dimension ( LDU, 2 * NLVL ). */
+/* On entry, GIVNUM(*, 2 *I -1 : 2 * I) records the C- and S- */
+/* values of Givens rotations performed on the I-th level on the */
+/* computation tree. */
+
+/* C (input) REAL array, dimension ( N ). */
+/* On entry, if the I-th subproblem is not square, */
+/* C( I ) contains the C-value of a Givens rotation related to */
+/* the right null space of the I-th subproblem. */
+
+/* S (input) REAL array, dimension ( N ). */
+/* On entry, if the I-th subproblem is not square, */
+/* S( I ) contains the S-value of a Givens rotation related to */
+/* the right null space of the I-th subproblem. */
+
+/* WORK (workspace) REAL array. */
+/* The dimension must be at least N. */
+
+/* IWORK (workspace) INTEGER array. */
+/* The dimension must be at least 3 * N */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Ren-Cang Li, Computer Science Division, University of */
+/* California at Berkeley, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ bx_dim1 = *ldbx;
+ bx_offset = 1 + bx_dim1;
+ bx -= bx_offset;
+ givnum_dim1 = *ldu;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ poles_dim1 = *ldu;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ z_dim1 = *ldu;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ difr_dim1 = *ldu;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ difl_dim1 = *ldu;
+ difl_offset = 1 + difl_dim1;
+ difl -= difl_offset;
+ vt_dim1 = *ldu;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ --k;
+ --givptr;
+ perm_dim1 = *ldgcol;
+ perm_offset = 1 + perm_dim1;
+ perm -= perm_offset;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ --c__;
+ --s;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*smlsiz < 3) {
+ *info = -2;
+ } else if (*n < *smlsiz) {
+ *info = -3;
+ } else if (*nrhs < 1) {
+ *info = -4;
+ } else if (*ldb < *n) {
+ *info = -6;
+ } else if (*ldbx < *n) {
+ *info = -8;
+ } else if (*ldu < *n) {
+ *info = -10;
+ } else if (*ldgcol < *n) {
+ *info = -19;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLALSA", &i__1);
+ return 0;
+ }
+
+/* Book-keeping and setting up the computation tree. */
+
+ inode = 1;
+ ndiml = inode + *n;
+ ndimr = ndiml + *n;
+
+ slasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr],
+ smlsiz);
+
+/* The following code applies back the left singular vector factors. */
+/* For applying back the right singular vector factors, go to 50. */
+
+ if (*icompq == 1) {
+ goto L50;
+ }
+
+/* The nodes on the bottom level of the tree were solved */
+/* by SLASDQ. The corresponding left and right singular vector */
+/* matrices are in explicit form. First apply back the left */
+/* singular vector matrices. */
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+
+/* IC : center row of each node */
+/* NL : number of rows of left subproblem */
+/* NR : number of rows of right subproblem */
+/* NLF: starting row of the left subproblem */
+/* NRF: starting row of the right subproblem */
+
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nr = iwork[ndimr + i1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ sgemm_("T", "N", &nl, nrhs, &nl, &c_b7, &u[nlf + u_dim1], ldu, &b[nlf
+ + b_dim1], ldb, &c_b8, &bx[nlf + bx_dim1], ldbx);
+ sgemm_("T", "N", &nr, nrhs, &nr, &c_b7, &u[nrf + u_dim1], ldu, &b[nrf
+ + b_dim1], ldb, &c_b8, &bx[nrf + bx_dim1], ldbx);
+/* L10: */
+ }
+
+/* Next copy the rows of B that correspond to unchanged rows */
+/* in the bidiagonal matrix to BX. */
+
+ i__1 = nd;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ic = iwork[inode + i__ - 1];
+ scopy_(nrhs, &b[ic + b_dim1], ldb, &bx[ic + bx_dim1], ldbx);
+/* L20: */
+ }
+
+/* Finally go through the left singular vector matrices of all */
+/* the other subproblems bottom-up on the tree. */
+
+ j = pow_ii(&c__2, &nlvl);
+ sqre = 0;
+
+ for (lvl = nlvl; lvl >= 1; --lvl) {
+ lvl2 = (lvl << 1) - 1;
+
+/* find the first node LF and last node LL on */
+/* the current level LVL */
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__1 = lvl - 1;
+ lf = pow_ii(&c__2, &i__1);
+ ll = (lf << 1) - 1;
+ }
+ i__1 = ll;
+ for (i__ = lf; i__ <= i__1; ++i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ --j;
+ slals0_(icompq, &nl, &nr, &sqre, nrhs, &bx[nlf + bx_dim1], ldbx, &
+ b[nlf + b_dim1], ldb, &perm[nlf + lvl * perm_dim1], &
+ givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &
+ givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 *
+ poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf +
+ lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[
+ j], &s[j], &work[1], info);
+/* L30: */
+ }
+/* L40: */
+ }
+ goto L90;
+
+/* ICOMPQ = 1: applying back the right singular vector factors. */
+
+L50:
+
+/* First now go through the right singular vector matrices of all */
+/* the tree nodes top-down. */
+
+ j = 0;
+ i__1 = nlvl;
+ for (lvl = 1; lvl <= i__1; ++lvl) {
+ lvl2 = (lvl << 1) - 1;
+
+/* Find the first node LF and last node LL on */
+/* the current level LVL. */
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__2 = lvl - 1;
+ lf = pow_ii(&c__2, &i__2);
+ ll = (lf << 1) - 1;
+ }
+ i__2 = lf;
+ for (i__ = ll; i__ >= i__2; --i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ if (i__ == ll) {
+ sqre = 0;
+ } else {
+ sqre = 1;
+ }
+ ++j;
+ slals0_(icompq, &nl, &nr, &sqre, nrhs, &b[nlf + b_dim1], ldb, &bx[
+ nlf + bx_dim1], ldbx, &perm[nlf + lvl * perm_dim1], &
+ givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &
+ givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 *
+ poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf +
+ lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[
+ j], &s[j], &work[1], info);
+/* L60: */
+ }
+/* L70: */
+ }
+
+/* The nodes on the bottom level of the tree were solved */
+/* by SLASDQ. The corresponding right singular vector */
+/* matrices are in explicit form. Apply them back. */
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nr = iwork[ndimr + i1];
+ nlp1 = nl + 1;
+ if (i__ == nd) {
+ nrp1 = nr;
+ } else {
+ nrp1 = nr + 1;
+ }
+ nlf = ic - nl;
+ nrf = ic + 1;
+ sgemm_("T", "N", &nlp1, nrhs, &nlp1, &c_b7, &vt[nlf + vt_dim1], ldu, &
+ b[nlf + b_dim1], ldb, &c_b8, &bx[nlf + bx_dim1], ldbx);
+ sgemm_("T", "N", &nrp1, nrhs, &nrp1, &c_b7, &vt[nrf + vt_dim1], ldu, &
+ b[nrf + b_dim1], ldb, &c_b8, &bx[nrf + bx_dim1], ldbx);
+/* L80: */
+ }
+
+L90:
+
+ return 0;
+
+/* End of SLALSA */
+
+} /* slalsa_ */
diff --git a/contrib/libs/clapack/slalsd.c b/contrib/libs/clapack/slalsd.c
new file mode 100644
index 0000000000..b90ddbf0e9
--- /dev/null
+++ b/contrib/libs/clapack/slalsd.c
@@ -0,0 +1,523 @@
+/* slalsd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b6 = 0.f;
+static integer c__0 = 0;
+static real c_b11 = 1.f;
+
+/* Subroutine */ int slalsd_(char *uplo, integer *smlsiz, integer *n, integer
+ *nrhs, real *d__, real *e, real *b, integer *ldb, real *rcond,
+ integer *rank, real *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2;
+ real r__1;
+
+ /* Builtin functions */
+ double log(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ integer c__, i__, j, k;
+ real r__;
+ integer s, u, z__;
+ real cs;
+ integer bx;
+ real sn;
+ integer st, vt, nm1, st1;
+ real eps;
+ integer iwk;
+ real tol;
+ integer difl, difr;
+ real rcnd;
+ integer perm, nsub, nlvl, sqre, bxst;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *), sgemm_(char *, char *, integer *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+, real *, real *, integer *);
+ integer poles, sizei, nsize;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ integer nwork, icmpq1, icmpq2;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int slasda_(integer *, integer *, integer *,
+ integer *, real *, real *, real *, integer *, real *, integer *,
+ real *, real *, real *, real *, integer *, integer *, integer *,
+ integer *, real *, real *, real *, real *, integer *, integer *),
+ xerbla_(char *, integer *), slalsa_(integer *, integer *,
+ integer *, integer *, real *, integer *, real *, integer *, real *
+, integer *, real *, integer *, real *, real *, real *, real *,
+ integer *, integer *, integer *, integer *, real *, real *, real *
+, real *, integer *, integer *), slascl_(char *, integer *,
+ integer *, real *, real *, integer *, integer *, real *, integer *
+, integer *);
+ integer givcol;
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int slasdq_(char *, integer *, integer *, integer
+ *, integer *, integer *, real *, real *, real *, integer *, real *
+, integer *, real *, integer *, real *, integer *),
+ slacpy_(char *, integer *, integer *, real *, integer *, real *,
+ integer *), slartg_(real *, real *, real *, real *, real *
+), slaset_(char *, integer *, integer *, real *, real *, real *,
+ integer *);
+ real orgnrm;
+ integer givnum;
+ extern doublereal slanst_(char *, integer *, real *, real *);
+ extern /* Subroutine */ int slasrt_(char *, integer *, real *, integer *);
+ integer givptr, smlszp;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLALSD uses the singular value decomposition of A to solve the least */
+/* squares problem of finding X to minimize the Euclidean norm of each */
+/* column of A*X-B, where A is N-by-N upper bidiagonal, and X and B */
+/* are N-by-NRHS. The solution X overwrites B. */
+
+/* The singular values of A smaller than RCOND times the largest */
+/* singular value are treated as zero in solving the least squares */
+/* problem; in this case a minimum norm solution is returned. */
+/* The actual singular values are returned in D in ascending order. */
+
+/* This code makes very mild assumptions about floating point */
+/* arithmetic. It will work on machines with a guard digit in */
+/* add/subtract, or on those binary machines without guard digits */
+/* which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2. */
+/* It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': D and E define an upper bidiagonal matrix. */
+/* = 'L': D and E define a lower bidiagonal matrix. */
+
+/* SMLSIZ (input) INTEGER */
+/* The maximum size of the subproblems at the bottom of the */
+/* computation tree. */
+
+/* N (input) INTEGER */
+/* The dimension of the bidiagonal matrix. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of columns of B. NRHS must be at least 1. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry D contains the main diagonal of the bidiagonal */
+/* matrix. On exit, if INFO = 0, D contains its singular values. */
+
+/* E (input/output) REAL array, dimension (N-1) */
+/* Contains the super-diagonal entries of the bidiagonal matrix. */
+/* On exit, E has been destroyed. */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On input, B contains the right hand sides of the least */
+/* squares problem. On output, B contains the solution X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B in the calling subprogram. */
+/* LDB must be at least max(1,N). */
+
+/* RCOND (input) REAL */
+/* The singular values of A less than or equal to RCOND times */
+/* the largest singular value are treated as zero in solving */
+/* the least squares problem. If RCOND is negative, */
+/* machine precision is used instead. */
+/* For example, if diag(S)*X=B were the least squares problem, */
+/* where diag(S) is a diagonal matrix of singular values, the */
+/* solution would be X(i) = B(i) / S(i) if S(i) is greater than */
+/* RCOND*max(S), and X(i) = 0 if S(i) is less than or equal to */
+/* RCOND*max(S). */
+
+/* RANK (output) INTEGER */
+/* The number of singular values of A greater than RCOND times */
+/* the largest singular value. */
+
+/* WORK (workspace) REAL array, dimension at least */
+/* (9*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS + (SMLSIZ+1)**2), */
+/* where NLVL = max(0, INT(log_2 (N/(SMLSIZ+1))) + 1). */
+
+/* IWORK (workspace) INTEGER array, dimension at least */
+/* (3*N*NLVL + 11*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: The algorithm failed to compute an singular value while */
+/* working on the submatrix lying in rows and columns */
+/* INFO/(N+1) through MOD(INFO,N+1). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Ren-Cang Li, Computer Science Division, University of */
+/* California at Berkeley, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 1) {
+ *info = -4;
+ } else if (*ldb < 1 || *ldb < *n) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLALSD", &i__1);
+ return 0;
+ }
+
+ eps = slamch_("Epsilon");
+
+/* Set up the tolerance. */
+
+ if (*rcond <= 0.f || *rcond >= 1.f) {
+ rcnd = eps;
+ } else {
+ rcnd = *rcond;
+ }
+
+ *rank = 0;
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ } else if (*n == 1) {
+ if (d__[1] == 0.f) {
+ slaset_("A", &c__1, nrhs, &c_b6, &c_b6, &b[b_offset], ldb);
+ } else {
+ *rank = 1;
+ slascl_("G", &c__0, &c__0, &d__[1], &c_b11, &c__1, nrhs, &b[
+ b_offset], ldb, info);
+ d__[1] = dabs(d__[1]);
+ }
+ return 0;
+ }
+
+/* Rotate the matrix if it is lower bidiagonal. */
+
+ if (*(unsigned char *)uplo == 'L') {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ slartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (*nrhs == 1) {
+ srot_(&c__1, &b[i__ + b_dim1], &c__1, &b[i__ + 1 + b_dim1], &
+ c__1, &cs, &sn);
+ } else {
+ work[(i__ << 1) - 1] = cs;
+ work[i__ * 2] = sn;
+ }
+/* L10: */
+ }
+ if (*nrhs > 1) {
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *n - 1;
+ for (j = 1; j <= i__2; ++j) {
+ cs = work[(j << 1) - 1];
+ sn = work[j * 2];
+ srot_(&c__1, &b[j + i__ * b_dim1], &c__1, &b[j + 1 + i__ *
+ b_dim1], &c__1, &cs, &sn);
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+ }
+
+/* Scale. */
+
+ nm1 = *n - 1;
+ orgnrm = slanst_("M", n, &d__[1], &e[1]);
+ if (orgnrm == 0.f) {
+ slaset_("A", n, nrhs, &c_b6, &c_b6, &b[b_offset], ldb);
+ return 0;
+ }
+
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b11, n, &c__1, &d__[1], n, info);
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b11, &nm1, &c__1, &e[1], &nm1,
+ info);
+
+/* If N is smaller than the minimum divide size SMLSIZ, then solve */
+/* the problem with another solver. */
+
+ if (*n <= *smlsiz) {
+ nwork = *n * *n + 1;
+ slaset_("A", n, n, &c_b6, &c_b11, &work[1], n);
+ slasdq_("U", &c__0, n, n, &c__0, nrhs, &d__[1], &e[1], &work[1], n, &
+ work[1], n, &b[b_offset], ldb, &work[nwork], info);
+ if (*info != 0) {
+ return 0;
+ }
+ tol = rcnd * (r__1 = d__[isamax_(n, &d__[1], &c__1)], dabs(r__1));
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] <= tol) {
+ slaset_("A", &c__1, nrhs, &c_b6, &c_b6, &b[i__ + b_dim1], ldb);
+ } else {
+ slascl_("G", &c__0, &c__0, &d__[i__], &c_b11, &c__1, nrhs, &b[
+ i__ + b_dim1], ldb, info);
+ ++(*rank);
+ }
+/* L40: */
+ }
+ sgemm_("T", "N", n, nrhs, n, &c_b11, &work[1], n, &b[b_offset], ldb, &
+ c_b6, &work[nwork], n);
+ slacpy_("A", n, nrhs, &work[nwork], n, &b[b_offset], ldb);
+
+/* Unscale. */
+
+ slascl_("G", &c__0, &c__0, &c_b11, &orgnrm, n, &c__1, &d__[1], n,
+ info);
+ slasrt_("D", n, &d__[1], info);
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b11, n, nrhs, &b[b_offset],
+ ldb, info);
+
+ return 0;
+ }
+
+/* Book-keeping and setting up some constants. */
+
+ nlvl = (integer) (log((real) (*n) / (real) (*smlsiz + 1)) / log(2.f)) + 1;
+
+ smlszp = *smlsiz + 1;
+
+ u = 1;
+ vt = *smlsiz * *n + 1;
+ difl = vt + smlszp * *n;
+ difr = difl + nlvl * *n;
+ z__ = difr + (nlvl * *n << 1);
+ c__ = z__ + nlvl * *n;
+ s = c__ + *n;
+ poles = s + *n;
+ givnum = poles + (nlvl << 1) * *n;
+ bx = givnum + (nlvl << 1) * *n;
+ nwork = bx + *n * *nrhs;
+
+ sizei = *n + 1;
+ k = sizei + *n;
+ givptr = k + *n;
+ perm = givptr + *n;
+ givcol = perm + nlvl * *n;
+ iwk = givcol + (nlvl * *n << 1);
+
+ st = 1;
+ sqre = 0;
+ icmpq1 = 1;
+ icmpq2 = 0;
+ nsub = 0;
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((r__1 = d__[i__], dabs(r__1)) < eps) {
+ d__[i__] = r_sign(&eps, &d__[i__]);
+ }
+/* L50: */
+ }
+
+ i__1 = nm1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((r__1 = e[i__], dabs(r__1)) < eps || i__ == nm1) {
+ ++nsub;
+ iwork[nsub] = st;
+
+/* Subproblem found. First determine its size and then */
+/* apply divide and conquer on it. */
+
+ if (i__ < nm1) {
+
+/* A subproblem with E(I) small for I < NM1. */
+
+ nsize = i__ - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ } else if ((r__1 = e[i__], dabs(r__1)) >= eps) {
+
+/* A subproblem with E(NM1) not too small but I = NM1. */
+
+ nsize = *n - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ } else {
+
+/* A subproblem with E(NM1) small. This implies an */
+/* 1-by-1 subproblem at D(N), which is not solved */
+/* explicitly. */
+
+ nsize = i__ - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ ++nsub;
+ iwork[nsub] = *n;
+ iwork[sizei + nsub - 1] = 1;
+ scopy_(nrhs, &b[*n + b_dim1], ldb, &work[bx + nm1], n);
+ }
+ st1 = st - 1;
+ if (nsize == 1) {
+
+/* This is a 1-by-1 subproblem and is not solved */
+/* explicitly. */
+
+ scopy_(nrhs, &b[st + b_dim1], ldb, &work[bx + st1], n);
+ } else if (nsize <= *smlsiz) {
+
+/* This is a small subproblem and is solved by SLASDQ. */
+
+ slaset_("A", &nsize, &nsize, &c_b6, &c_b11, &work[vt + st1],
+ n);
+ slasdq_("U", &c__0, &nsize, &nsize, &c__0, nrhs, &d__[st], &e[
+ st], &work[vt + st1], n, &work[nwork], n, &b[st +
+ b_dim1], ldb, &work[nwork], info);
+ if (*info != 0) {
+ return 0;
+ }
+ slacpy_("A", &nsize, nrhs, &b[st + b_dim1], ldb, &work[bx +
+ st1], n);
+ } else {
+
+/* A large problem. Solve it using divide and conquer. */
+
+ slasda_(&icmpq1, smlsiz, &nsize, &sqre, &d__[st], &e[st], &
+ work[u + st1], n, &work[vt + st1], &iwork[k + st1], &
+ work[difl + st1], &work[difr + st1], &work[z__ + st1],
+ &work[poles + st1], &iwork[givptr + st1], &iwork[
+ givcol + st1], n, &iwork[perm + st1], &work[givnum +
+ st1], &work[c__ + st1], &work[s + st1], &work[nwork],
+ &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ bxst = bx + st1;
+ slalsa_(&icmpq2, smlsiz, &nsize, nrhs, &b[st + b_dim1], ldb, &
+ work[bxst], n, &work[u + st1], n, &work[vt + st1], &
+ iwork[k + st1], &work[difl + st1], &work[difr + st1],
+ &work[z__ + st1], &work[poles + st1], &iwork[givptr +
+ st1], &iwork[givcol + st1], n, &iwork[perm + st1], &
+ work[givnum + st1], &work[c__ + st1], &work[s + st1],
+ &work[nwork], &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+ st = i__ + 1;
+ }
+/* L60: */
+ }
+
+/* Apply the singular values and treat the tiny ones as zero. */
+
+ tol = rcnd * (r__1 = d__[isamax_(n, &d__[1], &c__1)], dabs(r__1));
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Some of the elements in D can be negative because 1-by-1 */
+/* subproblems were not solved explicitly. */
+
+ if ((r__1 = d__[i__], dabs(r__1)) <= tol) {
+ slaset_("A", &c__1, nrhs, &c_b6, &c_b6, &work[bx + i__ - 1], n);
+ } else {
+ ++(*rank);
+ slascl_("G", &c__0, &c__0, &d__[i__], &c_b11, &c__1, nrhs, &work[
+ bx + i__ - 1], n, info);
+ }
+ d__[i__] = (r__1 = d__[i__], dabs(r__1));
+/* L70: */
+ }
+
+/* Now apply back the right singular vectors. */
+
+ icmpq2 = 1;
+ i__1 = nsub;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ st = iwork[i__];
+ st1 = st - 1;
+ nsize = iwork[sizei + i__ - 1];
+ bxst = bx + st1;
+ if (nsize == 1) {
+ scopy_(nrhs, &work[bxst], n, &b[st + b_dim1], ldb);
+ } else if (nsize <= *smlsiz) {
+ sgemm_("T", "N", &nsize, nrhs, &nsize, &c_b11, &work[vt + st1], n,
+ &work[bxst], n, &c_b6, &b[st + b_dim1], ldb);
+ } else {
+ slalsa_(&icmpq2, smlsiz, &nsize, nrhs, &work[bxst], n, &b[st +
+ b_dim1], ldb, &work[u + st1], n, &work[vt + st1], &iwork[
+ k + st1], &work[difl + st1], &work[difr + st1], &work[z__
+ + st1], &work[poles + st1], &iwork[givptr + st1], &iwork[
+ givcol + st1], n, &iwork[perm + st1], &work[givnum + st1],
+ &work[c__ + st1], &work[s + st1], &work[nwork], &iwork[
+ iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+/* L80: */
+ }
+
+/* Unscale and sort the singular values. */
+
+ slascl_("G", &c__0, &c__0, &c_b11, &orgnrm, n, &c__1, &d__[1], n, info);
+ slasrt_("D", n, &d__[1], info);
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b11, n, nrhs, &b[b_offset], ldb,
+ info);
+
+ return 0;
+
+/* End of SLALSD */
+
+} /* slalsd_ */
diff --git a/contrib/libs/clapack/slamch.c b/contrib/libs/clapack/slamch.c
new file mode 100644
index 0000000000..afd17fd491
--- /dev/null
+++ b/contrib/libs/clapack/slamch.c
@@ -0,0 +1,1000 @@
+/* slamch.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b32 = 0.f;
+
+doublereal slamch_(char *cmach)
+{
+ /* Initialized data */
+
+ static logical first = TRUE_;
+
+ /* System generated locals */
+ integer i__1;
+ real ret_val;
+
+ /* Builtin functions */
+ double pow_ri(real *, integer *);
+
+ /* Local variables */
+ static real t;
+ integer it;
+ static real rnd, eps, base;
+ integer beta;
+ static real emin, prec, emax;
+ integer imin, imax;
+ logical lrnd;
+ static real rmin, rmax;
+ real rmach;
+ extern logical lsame_(char *, char *);
+ real small;
+ static real sfmin;
+ extern /* Subroutine */ int slamc2_(integer *, integer *, logical *, real
+ *, integer *, real *, integer *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAMCH determines single precision machine parameters. */
+
+/* Arguments */
+/* ========= */
+
+/* CMACH (input) CHARACTER*1 */
+/* Specifies the value to be returned by SLAMCH: */
+/* = 'E' or 'e', SLAMCH := eps */
+/* = 'S' or 's , SLAMCH := sfmin */
+/* = 'B' or 'b', SLAMCH := base */
+/* = 'P' or 'p', SLAMCH := eps*base */
+/* = 'N' or 'n', SLAMCH := t */
+/* = 'R' or 'r', SLAMCH := rnd */
+/* = 'M' or 'm', SLAMCH := emin */
+/* = 'U' or 'u', SLAMCH := rmin */
+/* = 'L' or 'l', SLAMCH := emax */
+/* = 'O' or 'o', SLAMCH := rmax */
+
+/* where */
+
+/* eps = relative machine precision */
+/* sfmin = safe minimum, such that 1/sfmin does not overflow */
+/* base = base of the machine */
+/* prec = eps*base */
+/* t = number of (base) digits in the mantissa */
+/* rnd = 1.0 when rounding occurs in addition, 0.0 otherwise */
+/* emin = minimum exponent before (gradual) underflow */
+/* rmin = underflow threshold - base**(emin-1) */
+/* emax = largest exponent before overflow */
+/* rmax = overflow threshold - (base**emax)*(1-eps) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Save statement .. */
+/* .. */
+/* .. Data statements .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ if (first) {
+ slamc2_(&beta, &it, &lrnd, &eps, &imin, &rmin, &imax, &rmax);
+ base = (real) beta;
+ t = (real) it;
+ if (lrnd) {
+ rnd = 1.f;
+ i__1 = 1 - it;
+ eps = pow_ri(&base, &i__1) / 2;
+ } else {
+ rnd = 0.f;
+ i__1 = 1 - it;
+ eps = pow_ri(&base, &i__1);
+ }
+ prec = eps * base;
+ emin = (real) imin;
+ emax = (real) imax;
+ sfmin = rmin;
+ small = 1.f / rmax;
+ if (small >= sfmin) {
+
+/* Use SMALL plus a bit, to avoid the possibility of rounding */
+/* causing overflow when computing 1/sfmin. */
+
+ sfmin = small * (eps + 1.f);
+ }
+ }
+
+ if (lsame_(cmach, "E")) {
+ rmach = eps;
+ } else if (lsame_(cmach, "S")) {
+ rmach = sfmin;
+ } else if (lsame_(cmach, "B")) {
+ rmach = base;
+ } else if (lsame_(cmach, "P")) {
+ rmach = prec;
+ } else if (lsame_(cmach, "N")) {
+ rmach = t;
+ } else if (lsame_(cmach, "R")) {
+ rmach = rnd;
+ } else if (lsame_(cmach, "M")) {
+ rmach = emin;
+ } else if (lsame_(cmach, "U")) {
+ rmach = rmin;
+ } else if (lsame_(cmach, "L")) {
+ rmach = emax;
+ } else if (lsame_(cmach, "O")) {
+ rmach = rmax;
+ }
+
+ ret_val = rmach;
+ first = FALSE_;
+ return ret_val;
+
+/* End of SLAMCH */
+
+} /* slamch_ */
+
+
+/* *********************************************************************** */
+
+/* Subroutine */ int slamc1_(integer *beta, integer *t, logical *rnd, logical
+ *ieee1)
+{
+ /* Initialized data */
+
+ static logical first = TRUE_;
+
+ /* System generated locals */
+ real r__1, r__2;
+
+ /* Local variables */
+ real a, b, c__, f, t1, t2;
+ static integer lt;
+ real one, qtr;
+ static logical lrnd;
+ static integer lbeta;
+ real savec;
+ static logical lieee1;
+ extern doublereal slamc3_(real *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAMC1 determines the machine parameters given by BETA, T, RND, and */
+/* IEEE1. */
+
+/* Arguments */
+/* ========= */
+
+/* BETA (output) INTEGER */
+/* The base of the machine. */
+
+/* T (output) INTEGER */
+/* The number of ( BETA ) digits in the mantissa. */
+
+/* RND (output) LOGICAL */
+/* Specifies whether proper rounding ( RND = .TRUE. ) or */
+/* chopping ( RND = .FALSE. ) occurs in addition. This may not */
+/* be a reliable guide to the way in which the machine performs */
+/* its arithmetic. */
+
+/* IEEE1 (output) LOGICAL */
+/* Specifies whether rounding appears to be done in the IEEE */
+/* 'round to nearest' style. */
+
+/* Further Details */
+/* =============== */
+
+/* The routine is based on the routine ENVRON by Malcolm and */
+/* incorporates suggestions by Gentleman and Marovich. See */
+
+/* Malcolm M. A. (1972) Algorithms to reveal properties of */
+/* floating-point arithmetic. Comms. of the ACM, 15, 949-951. */
+
+/* Gentleman W. M. and Marovich S. B. (1974) More on algorithms */
+/* that reveal properties of floating point arithmetic units. */
+/* Comms. of the ACM, 17, 276-277. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Save statement .. */
+/* .. */
+/* .. Data statements .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ if (first) {
+ one = 1.f;
+
+/* LBETA, LIEEE1, LT and LRND are the local values of BETA, */
+/* IEEE1, T and RND. */
+
+/* Throughout this routine we use the function SLAMC3 to ensure */
+/* that relevant values are stored and not held in registers, or */
+/* are not affected by optimizers. */
+
+/* Compute a = 2.0**m with the smallest positive integer m such */
+/* that */
+
+/* fl( a + 1.0 ) = a. */
+
+ a = 1.f;
+ c__ = 1.f;
+
+/* + WHILE( C.EQ.ONE )LOOP */
+L10:
+ if (c__ == one) {
+ a *= 2;
+ c__ = slamc3_(&a, &one);
+ r__1 = -a;
+ c__ = slamc3_(&c__, &r__1);
+ goto L10;
+ }
+/* + END WHILE */
+
+/* Now compute b = 2.0**m with the smallest positive integer m */
+/* such that */
+
+/* fl( a + b ) .gt. a. */
+
+ b = 1.f;
+ c__ = slamc3_(&a, &b);
+
+/* + WHILE( C.EQ.A )LOOP */
+L20:
+ if (c__ == a) {
+ b *= 2;
+ c__ = slamc3_(&a, &b);
+ goto L20;
+ }
+/* + END WHILE */
+
+/* Now compute the base. a and c are neighbouring floating point */
+/* numbers in the interval ( beta**t, beta**( t + 1 ) ) and so */
+/* their difference is beta. Adding 0.25 to c is to ensure that it */
+/* is truncated to beta and not ( beta - 1 ). */
+
+ qtr = one / 4;
+ savec = c__;
+ r__1 = -a;
+ c__ = slamc3_(&c__, &r__1);
+ lbeta = c__ + qtr;
+
+/* Now determine whether rounding or chopping occurs, by adding a */
+/* bit less than beta/2 and a bit more than beta/2 to a. */
+
+ b = (real) lbeta;
+ r__1 = b / 2;
+ r__2 = -b / 100;
+ f = slamc3_(&r__1, &r__2);
+ c__ = slamc3_(&f, &a);
+ if (c__ == a) {
+ lrnd = TRUE_;
+ } else {
+ lrnd = FALSE_;
+ }
+ r__1 = b / 2;
+ r__2 = b / 100;
+ f = slamc3_(&r__1, &r__2);
+ c__ = slamc3_(&f, &a);
+ if (lrnd && c__ == a) {
+ lrnd = FALSE_;
+ }
+
+/* Try and decide whether rounding is done in the IEEE 'round to */
+/* nearest' style. B/2 is half a unit in the last place of the two */
+/* numbers A and SAVEC. Furthermore, A is even, i.e. has last bit */
+/* zero, and SAVEC is odd. Thus adding B/2 to A should not change */
+/* A, but adding B/2 to SAVEC should change SAVEC. */
+
+ r__1 = b / 2;
+ t1 = slamc3_(&r__1, &a);
+ r__1 = b / 2;
+ t2 = slamc3_(&r__1, &savec);
+ lieee1 = t1 == a && t2 > savec && lrnd;
+
+/* Now find the mantissa, t. It should be the integer part of */
+/* log to the base beta of a, however it is safer to determine t */
+/* by powering. So we find t as the smallest positive integer for */
+/* which */
+
+/* fl( beta**t + 1.0 ) = 1.0. */
+
+ lt = 0;
+ a = 1.f;
+ c__ = 1.f;
+
+/* + WHILE( C.EQ.ONE )LOOP */
+L30:
+ if (c__ == one) {
+ ++lt;
+ a *= lbeta;
+ c__ = slamc3_(&a, &one);
+ r__1 = -a;
+ c__ = slamc3_(&c__, &r__1);
+ goto L30;
+ }
+/* + END WHILE */
+
+ }
+
+ *beta = lbeta;
+ *t = lt;
+ *rnd = lrnd;
+ *ieee1 = lieee1;
+ first = FALSE_;
+ return 0;
+
+/* End of SLAMC1 */
+
+} /* slamc1_ */
+
+
+/* *********************************************************************** */
+
+/* Subroutine */ int slamc2_(integer *beta, integer *t, logical *rnd, real *
+ eps, integer *emin, real *rmin, integer *emax, real *rmax)
+{
+ /* Initialized data */
+
+ static logical first = TRUE_;
+ static logical iwarn = FALSE_;
+
+ /* Format strings */
+ static char fmt_9999[] = "(//\002 WARNING. The value EMIN may be incorre"
+ "ct:-\002,\002 EMIN = \002,i8,/\002 If, after inspection, the va"
+ "lue EMIN looks\002,\002 acceptable please comment out \002,/\002"
+ " the IF block as marked within the code of routine\002,\002 SLAM"
+ "C2,\002,/\002 otherwise supply EMIN explicitly.\002,/)";
+
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2, r__3, r__4, r__5;
+
+ /* Builtin functions */
+ double pow_ri(real *, integer *);
+ integer s_wsfe(cilist *), do_fio(integer *, char *, ftnlen), e_wsfe(void);
+
+ /* Local variables */
+ real a, b, c__;
+ integer i__;
+ static integer lt;
+ real one, two;
+ logical ieee;
+ real half;
+ logical lrnd;
+ static real leps;
+ real zero;
+ static integer lbeta;
+ real rbase;
+ static integer lemin, lemax;
+ integer gnmin;
+ real small;
+ integer gpmin;
+ real third;
+ static real lrmin, lrmax;
+ real sixth;
+ logical lieee1;
+ extern /* Subroutine */ int slamc1_(integer *, integer *, logical *,
+ logical *);
+ extern doublereal slamc3_(real *, real *);
+ extern /* Subroutine */ int slamc4_(integer *, real *, integer *),
+ slamc5_(integer *, integer *, integer *, logical *, integer *,
+ real *);
+ integer ngnmin, ngpmin;
+
+ /* Fortran I/O blocks */
+ static cilist io___58 = { 0, 6, 0, fmt_9999, 0 };
+
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAMC2 determines the machine parameters specified in its argument */
+/* list. */
+
+/* Arguments */
+/* ========= */
+
+/* BETA (output) INTEGER */
+/* The base of the machine. */
+
+/* T (output) INTEGER */
+/* The number of ( BETA ) digits in the mantissa. */
+
+/* RND (output) LOGICAL */
+/* Specifies whether proper rounding ( RND = .TRUE. ) or */
+/* chopping ( RND = .FALSE. ) occurs in addition. This may not */
+/* be a reliable guide to the way in which the machine performs */
+/* its arithmetic. */
+
+/* EPS (output) REAL */
+/* The smallest positive number such that */
+
+/* fl( 1.0 - EPS ) .LT. 1.0, */
+
+/* where fl denotes the computed value. */
+
+/* EMIN (output) INTEGER */
+/* The minimum exponent before (gradual) underflow occurs. */
+
+/* RMIN (output) REAL */
+/* The smallest normalized number for the machine, given by */
+/* BASE**( EMIN - 1 ), where BASE is the floating point value */
+/* of BETA. */
+
+/* EMAX (output) INTEGER */
+/* The maximum exponent before overflow occurs. */
+
+/* RMAX (output) REAL */
+/* The largest positive number for the machine, given by */
+/* BASE**EMAX * ( 1 - EPS ), where BASE is the floating point */
+/* value of BETA. */
+
+/* Further Details */
+/* =============== */
+
+/* The computation of EPS is based on a routine PARANOIA by */
+/* W. Kahan of the University of California at Berkeley. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Save statement .. */
+/* .. */
+/* .. Data statements .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ if (first) {
+ zero = 0.f;
+ one = 1.f;
+ two = 2.f;
+
+/* LBETA, LT, LRND, LEPS, LEMIN and LRMIN are the local values of */
+/* BETA, T, RND, EPS, EMIN and RMIN. */
+
+/* Throughout this routine we use the function SLAMC3 to ensure */
+/* that relevant values are stored and not held in registers, or */
+/* are not affected by optimizers. */
+
+/* SLAMC1 returns the parameters LBETA, LT, LRND and LIEEE1. */
+
+ slamc1_(&lbeta, &lt, &lrnd, &lieee1);
+
+/* Start to find EPS. */
+
+ b = (real) lbeta;
+ i__1 = -lt;
+ a = pow_ri(&b, &i__1);
+ leps = a;
+
+/* Try some tricks to see whether or not this is the correct EPS. */
+
+ b = two / 3;
+ half = one / 2;
+ r__1 = -half;
+ sixth = slamc3_(&b, &r__1);
+ third = slamc3_(&sixth, &sixth);
+ r__1 = -half;
+ b = slamc3_(&third, &r__1);
+ b = slamc3_(&b, &sixth);
+ b = dabs(b);
+ if (b < leps) {
+ b = leps;
+ }
+
+ leps = 1.f;
+
+/* + WHILE( ( LEPS.GT.B ).AND.( B.GT.ZERO ) )LOOP */
+L10:
+ if (leps > b && b > zero) {
+ leps = b;
+ r__1 = half * leps;
+/* Computing 5th power */
+ r__3 = two, r__4 = r__3, r__3 *= r__3;
+/* Computing 2nd power */
+ r__5 = leps;
+ r__2 = r__4 * (r__3 * r__3) * (r__5 * r__5);
+ c__ = slamc3_(&r__1, &r__2);
+ r__1 = -c__;
+ c__ = slamc3_(&half, &r__1);
+ b = slamc3_(&half, &c__);
+ r__1 = -b;
+ c__ = slamc3_(&half, &r__1);
+ b = slamc3_(&half, &c__);
+ goto L10;
+ }
+/* + END WHILE */
+
+ if (a < leps) {
+ leps = a;
+ }
+
+/* Computation of EPS complete. */
+
+/* Now find EMIN. Let A = + or - 1, and + or - (1 + BASE**(-3)). */
+/* Keep dividing A by BETA until (gradual) underflow occurs. This */
+/* is detected when we cannot recover the previous A. */
+
+ rbase = one / lbeta;
+ small = one;
+ for (i__ = 1; i__ <= 3; ++i__) {
+ r__1 = small * rbase;
+ small = slamc3_(&r__1, &zero);
+/* L20: */
+ }
+ a = slamc3_(&one, &small);
+ slamc4_(&ngpmin, &one, &lbeta);
+ r__1 = -one;
+ slamc4_(&ngnmin, &r__1, &lbeta);
+ slamc4_(&gpmin, &a, &lbeta);
+ r__1 = -a;
+ slamc4_(&gnmin, &r__1, &lbeta);
+ ieee = FALSE_;
+
+ if (ngpmin == ngnmin && gpmin == gnmin) {
+ if (ngpmin == gpmin) {
+ lemin = ngpmin;
+/* ( Non twos-complement machines, no gradual underflow; */
+/* e.g., VAX ) */
+ } else if (gpmin - ngpmin == 3) {
+ lemin = ngpmin - 1 + lt;
+ ieee = TRUE_;
+/* ( Non twos-complement machines, with gradual underflow; */
+/* e.g., IEEE standard followers ) */
+ } else {
+ lemin = min(ngpmin,gpmin);
+/* ( A guess; no known machine ) */
+ iwarn = TRUE_;
+ }
+
+ } else if (ngpmin == gpmin && ngnmin == gnmin) {
+ if ((i__1 = ngpmin - ngnmin, abs(i__1)) == 1) {
+ lemin = max(ngpmin,ngnmin);
+/* ( Twos-complement machines, no gradual underflow; */
+/* e.g., CYBER 205 ) */
+ } else {
+ lemin = min(ngpmin,ngnmin);
+/* ( A guess; no known machine ) */
+ iwarn = TRUE_;
+ }
+
+ } else if ((i__1 = ngpmin - ngnmin, abs(i__1)) == 1 && gpmin == gnmin)
+ {
+ if (gpmin - min(ngpmin,ngnmin) == 3) {
+ lemin = max(ngpmin,ngnmin) - 1 + lt;
+/* ( Twos-complement machines with gradual underflow; */
+/* no known machine ) */
+ } else {
+ lemin = min(ngpmin,ngnmin);
+/* ( A guess; no known machine ) */
+ iwarn = TRUE_;
+ }
+
+ } else {
+/* Computing MIN */
+ i__1 = min(ngpmin,ngnmin), i__1 = min(i__1,gpmin);
+ lemin = min(i__1,gnmin);
+/* ( A guess; no known machine ) */
+ iwarn = TRUE_;
+ }
+ first = FALSE_;
+/* ** */
+/* Comment out this if block if EMIN is ok */
+ if (iwarn) {
+ first = TRUE_;
+ s_wsfe(&io___58);
+ do_fio(&c__1, (char *)&lemin, (ftnlen)sizeof(integer));
+ e_wsfe();
+ }
+/* ** */
+
+/* Assume IEEE arithmetic if we found denormalised numbers above, */
+/* or if arithmetic seems to round in the IEEE style, determined */
+/* in routine SLAMC1. A true IEEE machine should have both things */
+/* true; however, faulty machines may have one or the other. */
+
+ ieee = ieee || lieee1;
+
+/* Compute RMIN by successive division by BETA. We could compute */
+/* RMIN as BASE**( EMIN - 1 ), but some machines underflow during */
+/* this computation. */
+
+ lrmin = 1.f;
+ i__1 = 1 - lemin;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__1 = lrmin * rbase;
+ lrmin = slamc3_(&r__1, &zero);
+/* L30: */
+ }
+
+/* Finally, call SLAMC5 to compute EMAX and RMAX. */
+
+ slamc5_(&lbeta, &lt, &lemin, &ieee, &lemax, &lrmax);
+ }
+
+ *beta = lbeta;
+ *t = lt;
+ *rnd = lrnd;
+ *eps = leps;
+ *emin = lemin;
+ *rmin = lrmin;
+ *emax = lemax;
+ *rmax = lrmax;
+
+ return 0;
+
+
+/* End of SLAMC2 */
+
+} /* slamc2_ */
+
+
+/* *********************************************************************** */
+
+doublereal slamc3_(real *a, real *b)
+{
+ /* System generated locals */
+ real ret_val;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAMC3 is intended to force A and B to be stored prior to doing */
+/* the addition of A and B , for use in situations where optimizers */
+/* might hold one of these in a register. */
+
+/* Arguments */
+/* ========= */
+
+/* A (input) REAL */
+/* B (input) REAL */
+/* The values A and B. */
+
+/* ===================================================================== */
+
+/* .. Executable Statements .. */
+
+ ret_val = *a + *b;
+
+ return ret_val;
+
+/* End of SLAMC3 */
+
+} /* slamc3_ */
+
+
+/* *********************************************************************** */
+
+/* Subroutine */ int slamc4_(integer *emin, real *start, integer *base)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1;
+
+ /* Local variables */
+ real a;
+ integer i__;
+ real b1, b2, c1, c2, d1, d2, one, zero, rbase;
+ extern doublereal slamc3_(real *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAMC4 is a service routine for SLAMC2. */
+
+/* Arguments */
+/* ========= */
+
+/* EMIN (output) INTEGER */
+/* The minimum exponent before (gradual) underflow, computed by */
+/* setting A = START and dividing by BASE until the previous A */
+/* can not be recovered. */
+
+/* START (input) REAL */
+/* The starting point for determining EMIN. */
+
+/* BASE (input) INTEGER */
+/* The base of the machine. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ a = *start;
+ one = 1.f;
+ rbase = one / *base;
+ zero = 0.f;
+ *emin = 1;
+ r__1 = a * rbase;
+ b1 = slamc3_(&r__1, &zero);
+ c1 = a;
+ c2 = a;
+ d1 = a;
+ d2 = a;
+/* + WHILE( ( C1.EQ.A ).AND.( C2.EQ.A ).AND. */
+/* $ ( D1.EQ.A ).AND.( D2.EQ.A ) )LOOP */
+L10:
+ if (c1 == a && c2 == a && d1 == a && d2 == a) {
+ --(*emin);
+ a = b1;
+ r__1 = a / *base;
+ b1 = slamc3_(&r__1, &zero);
+ r__1 = b1 * *base;
+ c1 = slamc3_(&r__1, &zero);
+ d1 = zero;
+ i__1 = *base;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d1 += b1;
+/* L20: */
+ }
+ r__1 = a * rbase;
+ b2 = slamc3_(&r__1, &zero);
+ r__1 = b2 / rbase;
+ c2 = slamc3_(&r__1, &zero);
+ d2 = zero;
+ i__1 = *base;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d2 += b2;
+/* L30: */
+ }
+ goto L10;
+ }
+/* + END WHILE */
+
+ return 0;
+
+/* End of SLAMC4 */
+
+} /* slamc4_ */
+
+
+/* *********************************************************************** */
+
+/* Subroutine */ int slamc5_(integer *beta, integer *p, integer *emin,
+ logical *ieee, integer *emax, real *rmax)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1;
+
+ /* Local variables */
+ integer i__;
+ real y, z__;
+ integer try__, lexp;
+ real oldy;
+ integer uexp, nbits;
+ extern doublereal slamc3_(real *, real *);
+ real recbas;
+ integer exbits, expsum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAMC5 attempts to compute RMAX, the largest machine floating-point */
+/* number, without overflow. It assumes that EMAX + abs(EMIN) sum */
+/* approximately to a power of 2. It will fail on machines where this */
+/* assumption does not hold, for example, the Cyber 205 (EMIN = -28625, */
+/* EMAX = 28718). It will also fail if the value supplied for EMIN is */
+/* too large (i.e. too close to zero), probably with overflow. */
+
+/* Arguments */
+/* ========= */
+
+/* BETA (input) INTEGER */
+/* The base of floating-point arithmetic. */
+
+/* P (input) INTEGER */
+/* The number of base BETA digits in the mantissa of a */
+/* floating-point value. */
+
+/* EMIN (input) INTEGER */
+/* The minimum exponent before (gradual) underflow. */
+
+/* IEEE (input) LOGICAL */
+/* A logical flag specifying whether or not the arithmetic */
+/* system is thought to comply with the IEEE standard. */
+
+/* EMAX (output) INTEGER */
+/* The largest exponent before overflow */
+
+/* RMAX (output) REAL */
+/* The largest machine floating-point number. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* First compute LEXP and UEXP, two powers of 2 that bound */
+/* abs(EMIN). We then assume that EMAX + abs(EMIN) will sum */
+/* approximately to the bound that is closest to abs(EMIN). */
+/* (EMAX is the exponent of the required number RMAX). */
+
+ lexp = 1;
+ exbits = 1;
+L10:
+ try__ = lexp << 1;
+ if (try__ <= -(*emin)) {
+ lexp = try__;
+ ++exbits;
+ goto L10;
+ }
+ if (lexp == -(*emin)) {
+ uexp = lexp;
+ } else {
+ uexp = try__;
+ ++exbits;
+ }
+
+/* Now -LEXP is less than or equal to EMIN, and -UEXP is greater */
+/* than or equal to EMIN. EXBITS is the number of bits needed to */
+/* store the exponent. */
+
+ if (uexp + *emin > -lexp - *emin) {
+ expsum = lexp << 1;
+ } else {
+ expsum = uexp << 1;
+ }
+
+/* EXPSUM is the exponent range, approximately equal to */
+/* EMAX - EMIN + 1 . */
+
+ *emax = expsum + *emin - 1;
+ nbits = exbits + 1 + *p;
+
+/* NBITS is the total number of bits needed to store a */
+/* floating-point number. */
+
+ if (nbits % 2 == 1 && *beta == 2) {
+
+/* Either there are an odd number of bits used to store a */
+/* floating-point number, which is unlikely, or some bits are */
+/* not used in the representation of numbers, which is possible, */
+/* (e.g. Cray machines) or the mantissa has an implicit bit, */
+/* (e.g. IEEE machines, Dec Vax machines), which is perhaps the */
+/* most likely. We have to assume the last alternative. */
+/* If this is true, then we need to reduce EMAX by one because */
+/* there must be some way of representing zero in an implicit-bit */
+/* system. On machines like Cray, we are reducing EMAX by one */
+/* unnecessarily. */
+
+ --(*emax);
+ }
+
+ if (*ieee) {
+
+/* Assume we are on an IEEE machine which reserves one exponent */
+/* for infinity and NaN. */
+
+ --(*emax);
+ }
+
+/* Now create RMAX, the largest machine number, which should */
+/* be equal to (1.0 - BETA**(-P)) * BETA**EMAX . */
+
+/* First compute 1.0 - BETA**(-P), being careful that the */
+/* result is less than 1.0 . */
+
+ recbas = 1.f / *beta;
+ z__ = *beta - 1.f;
+ y = 0.f;
+ i__1 = *p;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ z__ *= recbas;
+ if (y < 1.f) {
+ oldy = y;
+ }
+ y = slamc3_(&y, &z__);
+/* L20: */
+ }
+ if (y >= 1.f) {
+ y = oldy;
+ }
+
+/* Now multiply by BETA**EMAX to get RMAX. */
+
+ i__1 = *emax;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__1 = y * *beta;
+ y = slamc3_(&r__1, &c_b32);
+/* L30: */
+ }
+
+ *rmax = y;
+ return 0;
+
+/* End of SLAMC5 */
+
+} /* slamc5_ */
diff --git a/contrib/libs/clapack/slamrg.c b/contrib/libs/clapack/slamrg.c
new file mode 100644
index 0000000000..4dfffc5361
--- /dev/null
+++ b/contrib/libs/clapack/slamrg.c
@@ -0,0 +1,131 @@
+/* slamrg.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slamrg_(integer *n1, integer *n2, real *a, integer *
+ strd1, integer *strd2, integer *index)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ integer i__, ind1, ind2, n1sv, n2sv;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAMRG will create a permutation list which will merge the elements */
+/* of A (which is composed of two independently sorted sets) into a */
+/* single set which is sorted in ascending order. */
+
+/* Arguments */
+/* ========= */
+
+/* N1 (input) INTEGER */
+/* N2 (input) INTEGER */
+/* These arguements contain the respective lengths of the two */
+/* sorted lists to be merged. */
+
+/* A (input) REAL array, dimension (N1+N2) */
+/* The first N1 elements of A contain a list of numbers which */
+/* are sorted in either ascending or descending order. Likewise */
+/* for the final N2 elements. */
+
+/* STRD1 (input) INTEGER */
+/* STRD2 (input) INTEGER */
+/* These are the strides to be taken through the array A. */
+/* Allowable strides are 1 and -1. They indicate whether a */
+/* subset of A is sorted in ascending (STRDx = 1) or descending */
+/* (STRDx = -1) order. */
+
+/* INDEX (output) INTEGER array, dimension (N1+N2) */
+/* On exit this array will contain a permutation such that */
+/* if B( I ) = A( INDEX( I ) ) for I=1,N1+N2, then B will be */
+/* sorted in ascending order. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --index;
+ --a;
+
+ /* Function Body */
+ n1sv = *n1;
+ n2sv = *n2;
+ if (*strd1 > 0) {
+ ind1 = 1;
+ } else {
+ ind1 = *n1;
+ }
+ if (*strd2 > 0) {
+ ind2 = *n1 + 1;
+ } else {
+ ind2 = *n1 + *n2;
+ }
+ i__ = 1;
+/* while ( (N1SV > 0) & (N2SV > 0) ) */
+L10:
+ if (n1sv > 0 && n2sv > 0) {
+ if (a[ind1] <= a[ind2]) {
+ index[i__] = ind1;
+ ++i__;
+ ind1 += *strd1;
+ --n1sv;
+ } else {
+ index[i__] = ind2;
+ ++i__;
+ ind2 += *strd2;
+ --n2sv;
+ }
+ goto L10;
+ }
+/* end while */
+ if (n1sv == 0) {
+ i__1 = n2sv;
+ for (n1sv = 1; n1sv <= i__1; ++n1sv) {
+ index[i__] = ind2;
+ ++i__;
+ ind2 += *strd2;
+/* L20: */
+ }
+ } else {
+/* N2SV .EQ. 0 */
+ i__1 = n1sv;
+ for (n2sv = 1; n2sv <= i__1; ++n2sv) {
+ index[i__] = ind1;
+ ++i__;
+ ind1 += *strd1;
+/* L30: */
+ }
+ }
+
+ return 0;
+
+/* End of SLAMRG */
+
+} /* slamrg_ */
diff --git a/contrib/libs/clapack/slaneg.c b/contrib/libs/clapack/slaneg.c
new file mode 100644
index 0000000000..2b1d242542
--- /dev/null
+++ b/contrib/libs/clapack/slaneg.c
@@ -0,0 +1,218 @@
+/* slaneg.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+integer slaneg_(integer *n, real *d__, real *lld, real *sigma, real *pivmin,
+ integer *r__)
+{
+ /* System generated locals */
+ integer ret_val, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer j;
+ real p, t;
+ integer bj;
+ real tmp;
+ integer neg1, neg2;
+ real bsav, gamma, dplus;
+ integer negcnt;
+ logical sawnan;
+ extern logical sisnan_(real *);
+ real dminus;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLANEG computes the Sturm count, the number of negative pivots */
+/* encountered while factoring tridiagonal T - sigma I = L D L^T. */
+/* This implementation works directly on the factors without forming */
+/* the tridiagonal matrix T. The Sturm count is also the number of */
+/* eigenvalues of T less than sigma. */
+
+/* This routine is called from SLARRB. */
+
+/* The current routine does not use the PIVMIN parameter but rather */
+/* requires IEEE-754 propagation of Infinities and NaNs. This */
+/* routine also has no input range restrictions but does require */
+/* default exception handling such that x/0 produces Inf when x is */
+/* non-zero, and Inf/Inf produces NaN. For more information, see: */
+
+/* Marques, Riedy, and Voemel, "Benefits of IEEE-754 Features in */
+/* Modern Symmetric Tridiagonal Eigensolvers," SIAM Journal on */
+/* Scientific Computing, v28, n5, 2006. DOI 10.1137/050641624 */
+/* (Tech report version in LAWN 172 with the same title.) */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. */
+
+/* D (input) REAL array, dimension (N) */
+/* The N diagonal elements of the diagonal matrix D. */
+
+/* LLD (input) REAL array, dimension (N-1) */
+/* The (N-1) elements L(i)*L(i)*D(i). */
+
+/* SIGMA (input) REAL */
+/* Shift amount in T - sigma I = L D L^T. */
+
+/* PIVMIN (input) REAL */
+/* The minimum pivot in the Sturm sequence. May be used */
+/* when zero pivots are encountered on non-IEEE-754 */
+/* architectures. */
+
+/* R (input) INTEGER */
+/* The twist index for the twisted factorization that is used */
+/* for the negcount. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+/* Jason Riedy, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* Some architectures propagate Infinities and NaNs very slowly, so */
+/* the code computes counts in BLKLEN chunks. Then a NaN can */
+/* propagate at most BLKLEN columns before being detected. This is */
+/* not a general tuning parameter; it needs only to be just large */
+/* enough that the overhead is tiny in common cases. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+ /* Parameter adjustments */
+ --lld;
+ --d__;
+
+ /* Function Body */
+ negcnt = 0;
+/* I) upper part: L D L^T - SIGMA I = L+ D+ L+^T */
+ t = -(*sigma);
+ i__1 = *r__ - 1;
+ for (bj = 1; bj <= i__1; bj += 128) {
+ neg1 = 0;
+ bsav = t;
+/* Computing MIN */
+ i__3 = bj + 127, i__4 = *r__ - 1;
+ i__2 = min(i__3,i__4);
+ for (j = bj; j <= i__2; ++j) {
+ dplus = d__[j] + t;
+ if (dplus < 0.f) {
+ ++neg1;
+ }
+ tmp = t / dplus;
+ t = tmp * lld[j] - *sigma;
+/* L21: */
+ }
+ sawnan = sisnan_(&t);
+/* Run a slower version of the above loop if a NaN is detected. */
+/* A NaN should occur only with a zero pivot after an infinite */
+/* pivot. In that case, substituting 1 for T/DPLUS is the */
+/* correct limit. */
+ if (sawnan) {
+ neg1 = 0;
+ t = bsav;
+/* Computing MIN */
+ i__3 = bj + 127, i__4 = *r__ - 1;
+ i__2 = min(i__3,i__4);
+ for (j = bj; j <= i__2; ++j) {
+ dplus = d__[j] + t;
+ if (dplus < 0.f) {
+ ++neg1;
+ }
+ tmp = t / dplus;
+ if (sisnan_(&tmp)) {
+ tmp = 1.f;
+ }
+ t = tmp * lld[j] - *sigma;
+/* L22: */
+ }
+ }
+ negcnt += neg1;
+/* L210: */
+ }
+
+/* II) lower part: L D L^T - SIGMA I = U- D- U-^T */
+ p = d__[*n] - *sigma;
+ i__1 = *r__;
+ for (bj = *n - 1; bj >= i__1; bj += -128) {
+ neg2 = 0;
+ bsav = p;
+/* Computing MAX */
+ i__3 = bj - 127;
+ i__2 = max(i__3,*r__);
+ for (j = bj; j >= i__2; --j) {
+ dminus = lld[j] + p;
+ if (dminus < 0.f) {
+ ++neg2;
+ }
+ tmp = p / dminus;
+ p = tmp * d__[j] - *sigma;
+/* L23: */
+ }
+ sawnan = sisnan_(&p);
+/* As above, run a slower version that substitutes 1 for Inf/Inf. */
+
+ if (sawnan) {
+ neg2 = 0;
+ p = bsav;
+/* Computing MAX */
+ i__3 = bj - 127;
+ i__2 = max(i__3,*r__);
+ for (j = bj; j >= i__2; --j) {
+ dminus = lld[j] + p;
+ if (dminus < 0.f) {
+ ++neg2;
+ }
+ tmp = p / dminus;
+ if (sisnan_(&tmp)) {
+ tmp = 1.f;
+ }
+ p = tmp * d__[j] - *sigma;
+/* L24: */
+ }
+ }
+ negcnt += neg2;
+/* L230: */
+ }
+
+/* III) Twist index */
+/* T was shifted by SIGMA initially. */
+ gamma = t + *sigma + p;
+ if (gamma < 0.f) {
+ ++negcnt;
+ }
+ ret_val = negcnt;
+ return ret_val;
+} /* slaneg_ */
diff --git a/contrib/libs/clapack/slangb.c b/contrib/libs/clapack/slangb.c
new file mode 100644
index 0000000000..eaa27b82cd
--- /dev/null
+++ b/contrib/libs/clapack/slangb.c
@@ -0,0 +1,226 @@
+/* slangb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal slangb_(char *norm, integer *n, integer *kl, integer *ku, real *ab,
+ integer *ldab, real *work)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ real ret_val, r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, l;
+ real sum, scale;
+ extern logical lsame_(char *, char *);
+ real value;
+ extern /* Subroutine */ int slassq_(integer *, real *, integer *, real *,
+ real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLANGB returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of an */
+/* n by n band matrix A, with kl sub-diagonals and ku super-diagonals. */
+
+/* Description */
+/* =========== */
+
+/* SLANGB returns the value */
+
+/* SLANGB = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in SLANGB as described */
+/* above. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, SLANGB is */
+/* set to zero. */
+
+/* KL (input) INTEGER */
+/* The number of sub-diagonals of the matrix A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of super-diagonals of the matrix A. KU >= 0. */
+
+/* AB (input) REAL array, dimension (LDAB,N) */
+/* The band matrix A, stored in rows 1 to KL+KU+1. The j-th */
+/* column of A is stored in the j-th column of the array AB as */
+/* follows: */
+/* AB(ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(n,j+kl). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KL+KU+1. */
+
+/* WORK (workspace) REAL array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I'; otherwise, WORK is not */
+/* referenced. */
+
+/* ===================================================================== */
+
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = *ku + 2 - j;
+/* Computing MIN */
+ i__4 = *n + *ku + 1 - j, i__5 = *kl + *ku + 1;
+ i__3 = min(i__4,i__5);
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = ab[i__ + j * ab_dim1], dabs(r__1)
+ );
+ value = dmax(r__2,r__3);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.f;
+/* Computing MAX */
+ i__3 = *ku + 2 - j;
+/* Computing MIN */
+ i__4 = *n + *ku + 1 - j, i__5 = *kl + *ku + 1;
+ i__2 = min(i__4,i__5);
+ for (i__ = max(i__3,1); i__ <= i__2; ++i__) {
+ sum += (r__1 = ab[i__ + j * ab_dim1], dabs(r__1));
+/* L30: */
+ }
+ value = dmax(value,sum);
+/* L40: */
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L50: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ k = *ku + 1 - j;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *ku;
+/* Computing MIN */
+ i__5 = *n, i__6 = j + *kl;
+ i__4 = min(i__5,i__6);
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ work[i__] += (r__1 = ab[k + i__ + j * ab_dim1], dabs(r__1));
+/* L60: */
+ }
+/* L70: */
+ }
+ value = 0.f;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L80: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *ku;
+ l = max(i__4,i__2);
+ k = *ku + 1 - j + l;
+/* Computing MIN */
+ i__2 = *n, i__3 = j + *kl;
+ i__4 = min(i__2,i__3) - l + 1;
+ slassq_(&i__4, &ab[k + j * ab_dim1], &c__1, &scale, &sum);
+/* L90: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of SLANGB */
+
+} /* slangb_ */
diff --git a/contrib/libs/clapack/slange.c b/contrib/libs/clapack/slange.c
new file mode 100644
index 0000000000..621cb2afc6
--- /dev/null
+++ b/contrib/libs/clapack/slange.c
@@ -0,0 +1,199 @@
+/* slange.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal slange_(char *norm, integer *m, integer *n, real *a, integer *lda,
+ real *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real ret_val, r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ real sum, scale;
+ extern logical lsame_(char *, char *);
+ real value;
+ extern /* Subroutine */ int slassq_(integer *, real *, integer *, real *,
+ real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLANGE returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* real matrix A. */
+
+/* Description */
+/* =========== */
+
+/* SLANGE returns the value */
+
+/* SLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in SLANGE as described */
+/* above. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. When M = 0, */
+/* SLANGE is set to zero. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. When N = 0, */
+/* SLANGE is set to zero. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The m by n matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(M,1). */
+
+/* WORK (workspace) REAL array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= M when NORM = 'I'; otherwise, WORK is not */
+/* referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (min(*m,*n) == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+ value = dmax(r__2,r__3);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.f;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+/* L30: */
+ }
+ value = dmax(value,sum);
+/* L40: */
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L50: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+/* L60: */
+ }
+/* L70: */
+ }
+ value = 0.f;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L80: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ slassq_(m, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L90: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of SLANGE */
+
+} /* slange_ */
diff --git a/contrib/libs/clapack/slangt.c b/contrib/libs/clapack/slangt.c
new file mode 100644
index 0000000000..1d225b5de4
--- /dev/null
+++ b/contrib/libs/clapack/slangt.c
@@ -0,0 +1,196 @@
+/* slangt.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal slangt_(char *norm, integer *n, real *dl, real *d__, real *du)
+{
+ /* System generated locals */
+ integer i__1;
+ real ret_val, r__1, r__2, r__3, r__4, r__5;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ real sum, scale;
+ extern logical lsame_(char *, char *);
+ real anorm;
+ extern /* Subroutine */ int slassq_(integer *, real *, integer *, real *,
+ real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLANGT returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* real tridiagonal matrix A. */
+
+/* Description */
+/* =========== */
+
+/* SLANGT returns the value */
+
+/* SLANGT = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in SLANGT as described */
+/* above. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, SLANGT is */
+/* set to zero. */
+
+/* DL (input) REAL array, dimension (N-1) */
+/* The (n-1) sub-diagonal elements of A. */
+
+/* D (input) REAL array, dimension (N) */
+/* The diagonal elements of A. */
+
+/* DU (input) REAL array, dimension (N-1) */
+/* The (n-1) super-diagonal elements of A. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --du;
+ --d__;
+ --dl;
+
+ /* Function Body */
+ if (*n <= 0) {
+ anorm = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ anorm = (r__1 = d__[*n], dabs(r__1));
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__2 = anorm, r__3 = (r__1 = dl[i__], dabs(r__1));
+ anorm = dmax(r__2,r__3);
+/* Computing MAX */
+ r__2 = anorm, r__3 = (r__1 = d__[i__], dabs(r__1));
+ anorm = dmax(r__2,r__3);
+/* Computing MAX */
+ r__2 = anorm, r__3 = (r__1 = du[i__], dabs(r__1));
+ anorm = dmax(r__2,r__3);
+/* L10: */
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ if (*n == 1) {
+ anorm = dabs(d__[1]);
+ } else {
+/* Computing MAX */
+ r__3 = dabs(d__[1]) + dabs(dl[1]), r__4 = (r__1 = d__[*n], dabs(
+ r__1)) + (r__2 = du[*n - 1], dabs(r__2));
+ anorm = dmax(r__3,r__4);
+ i__1 = *n - 1;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__4 = anorm, r__5 = (r__1 = d__[i__], dabs(r__1)) + (r__2 =
+ dl[i__], dabs(r__2)) + (r__3 = du[i__ - 1], dabs(r__3)
+ );
+ anorm = dmax(r__4,r__5);
+/* L20: */
+ }
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ if (*n == 1) {
+ anorm = dabs(d__[1]);
+ } else {
+/* Computing MAX */
+ r__3 = dabs(d__[1]) + dabs(du[1]), r__4 = (r__1 = d__[*n], dabs(
+ r__1)) + (r__2 = dl[*n - 1], dabs(r__2));
+ anorm = dmax(r__3,r__4);
+ i__1 = *n - 1;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__4 = anorm, r__5 = (r__1 = d__[i__], dabs(r__1)) + (r__2 =
+ du[i__], dabs(r__2)) + (r__3 = dl[i__ - 1], dabs(r__3)
+ );
+ anorm = dmax(r__4,r__5);
+/* L30: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ slassq_(n, &d__[1], &c__1, &scale, &sum);
+ if (*n > 1) {
+ i__1 = *n - 1;
+ slassq_(&i__1, &dl[1], &c__1, &scale, &sum);
+ i__1 = *n - 1;
+ slassq_(&i__1, &du[1], &c__1, &scale, &sum);
+ }
+ anorm = scale * sqrt(sum);
+ }
+
+ ret_val = anorm;
+ return ret_val;
+
+/* End of SLANGT */
+
+} /* slangt_ */
diff --git a/contrib/libs/clapack/slanhs.c b/contrib/libs/clapack/slanhs.c
new file mode 100644
index 0000000000..a09cf142b1
--- /dev/null
+++ b/contrib/libs/clapack/slanhs.c
@@ -0,0 +1,204 @@
+/* slanhs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal slanhs_(char *norm, integer *n, real *a, integer *lda, real *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ real ret_val, r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ real sum, scale;
+ extern logical lsame_(char *, char *);
+ real value;
+ extern /* Subroutine */ int slassq_(integer *, real *, integer *, real *,
+ real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLANHS returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* Hessenberg matrix A. */
+
+/* Description */
+/* =========== */
+
+/* SLANHS returns the value */
+
+/* SLANHS = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in SLANHS as described */
+/* above. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, SLANHS is */
+/* set to zero. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The n by n upper Hessenberg matrix A; the part of A below the */
+/* first sub-diagonal is not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(N,1). */
+
+/* WORK (workspace) REAL array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I'; otherwise, WORK is not */
+/* referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+ value = dmax(r__2,r__3);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.f;
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+/* L30: */
+ }
+ value = dmax(value,sum);
+/* L40: */
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L50: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+/* L60: */
+ }
+/* L70: */
+ }
+ value = 0.f;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L80: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ slassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L90: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of SLANHS */
+
+} /* slanhs_ */
diff --git a/contrib/libs/clapack/slansb.c b/contrib/libs/clapack/slansb.c
new file mode 100644
index 0000000000..b0718507cf
--- /dev/null
+++ b/contrib/libs/clapack/slansb.c
@@ -0,0 +1,263 @@
+/* slansb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal slansb_(char *norm, char *uplo, integer *n, integer *k, real *ab,
+ integer *ldab, real *work)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+ real ret_val, r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, l;
+ real sum, absa, scale;
+ extern logical lsame_(char *, char *);
+ real value;
+ extern /* Subroutine */ int slassq_(integer *, real *, integer *, real *,
+ real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLANSB returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of an */
+/* n by n symmetric band matrix A, with k super-diagonals. */
+
+/* Description */
+/* =========== */
+
+/* SLANSB returns the value */
+
+/* SLANSB = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in SLANSB as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* band matrix A is supplied. */
+/* = 'U': Upper triangular part is supplied */
+/* = 'L': Lower triangular part is supplied */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, SLANSB is */
+/* set to zero. */
+
+/* K (input) INTEGER */
+/* The number of super-diagonals or sub-diagonals of the */
+/* band matrix A. K >= 0. */
+
+/* AB (input) REAL array, dimension (LDAB,N) */
+/* The upper or lower triangle of the symmetric band matrix A, */
+/* stored in the first K+1 rows of AB. The j-th column of A is */
+/* stored in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(k+1+i-j,j) = A(i,j) for max(1,j-k)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+k). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= K+1. */
+
+/* WORK (workspace) REAL array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, */
+/* WORK is not referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = *k + 2 - j;
+ i__3 = *k + 1;
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = ab[i__ + j * ab_dim1], dabs(
+ r__1));
+ value = dmax(r__2,r__3);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = *n + 1 - j, i__4 = *k + 1;
+ i__3 = min(i__2,i__4);
+ for (i__ = 1; i__ <= i__3; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = ab[i__ + j * ab_dim1], dabs(
+ r__1));
+ value = dmax(r__2,r__3);
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
+
+/* Find normI(A) ( = norm1(A), since A is symmetric). */
+
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.f;
+ l = *k + 1 - j;
+/* Computing MAX */
+ i__3 = 1, i__2 = j - *k;
+ i__4 = j - 1;
+ for (i__ = max(i__3,i__2); i__ <= i__4; ++i__) {
+ absa = (r__1 = ab[l + i__ + j * ab_dim1], dabs(r__1));
+ sum += absa;
+ work[i__] += absa;
+/* L50: */
+ }
+ work[j] = sum + (r__1 = ab[*k + 1 + j * ab_dim1], dabs(r__1));
+/* L60: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L80: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = work[j] + (r__1 = ab[j * ab_dim1 + 1], dabs(r__1));
+ l = 1 - j;
+/* Computing MIN */
+ i__3 = *n, i__2 = j + *k;
+ i__4 = min(i__3,i__2);
+ for (i__ = j + 1; i__ <= i__4; ++i__) {
+ absa = (r__1 = ab[l + i__ + j * ab_dim1], dabs(r__1));
+ sum += absa;
+ work[i__] += absa;
+/* L90: */
+ }
+ value = dmax(value,sum);
+/* L100: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ if (*k > 0) {
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = j - 1;
+ i__4 = min(i__3,*k);
+/* Computing MAX */
+ i__2 = *k + 2 - j;
+ slassq_(&i__4, &ab[max(i__2, 1)+ j * ab_dim1], &c__1, &
+ scale, &sum);
+/* L110: */
+ }
+ l = *k + 1;
+ } else {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n - j;
+ i__4 = min(i__3,*k);
+ slassq_(&i__4, &ab[j * ab_dim1 + 2], &c__1, &scale, &sum);
+/* L120: */
+ }
+ l = 1;
+ }
+ sum *= 2;
+ } else {
+ l = 1;
+ }
+ slassq_(n, &ab[l + ab_dim1], ldab, &scale, &sum);
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of SLANSB */
+
+} /* slansb_ */
diff --git a/contrib/libs/clapack/slansf.c b/contrib/libs/clapack/slansf.c
new file mode 100644
index 0000000000..52243996ff
--- /dev/null
+++ b/contrib/libs/clapack/slansf.c
@@ -0,0 +1,1013 @@
+/* slansf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal slansf_(char *norm, char *transr, char *uplo, integer *n, real *a,
+ real *work)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ real ret_val, r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, l;
+ real s;
+ integer n1;
+ real aa;
+ integer lda, ifm, noe, ilu;
+ real scale;
+ extern logical lsame_(char *, char *);
+ real value;
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int slassq_(integer *, real *, integer *, real *,
+ real *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLANSF returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* real symmetric matrix A in RFP format. */
+
+/* Description */
+/* =========== */
+
+/* SLANSF returns the value */
+
+/* SLANSF = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER */
+/* Specifies the value to be returned in SLANSF as described */
+/* above. */
+
+/* TRANSR (input) CHARACTER */
+/* Specifies whether the RFP format of A is normal or */
+/* transposed format. */
+/* = 'N': RFP format is Normal; */
+/* = 'T': RFP format is Transpose. */
+
+/* UPLO (input) CHARACTER */
+/* On entry, UPLO specifies whether the RFP matrix A came from */
+/* an upper or lower triangular matrix as follows: */
+/* = 'U': RFP A came from an upper triangular matrix; */
+/* = 'L': RFP A came from a lower triangular matrix. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, SLANSF is */
+/* set to zero. */
+
+/* A (input) REAL array, dimension ( N*(N+1)/2 ); */
+/* On entry, the upper (if UPLO = 'U') or lower (if UPLO = 'L') */
+/* part of the symmetric matrix A stored in RFP format. See the */
+/* "Notes" below for more details. */
+/* Unchanged on exit. */
+
+/* WORK (workspace) REAL array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, */
+/* WORK is not referenced. */
+
+/* Notes */
+/* ===== */
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* even. We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* the transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* the transpose of the last three columns of AP lower. */
+/* This covers the case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 03 04 05 33 43 53 */
+/* 13 14 15 00 44 54 */
+/* 23 24 25 10 11 55 */
+/* 33 34 35 20 21 22 */
+/* 00 44 45 30 31 32 */
+/* 01 11 55 40 41 42 */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* odd. We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* the transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* the transpose of the last two columns of AP lower. */
+/* This covers the case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 02 03 04 00 33 43 */
+/* 12 13 14 10 11 44 */
+/* 22 23 24 20 21 22 */
+/* 00 33 34 30 31 32 */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+/* RFP A RFP A */
+
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* Reference */
+/* ========= */
+
+/* ===================================================================== */
+
+/* .. */
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ if (*n == 0) {
+ ret_val = 0.f;
+ return ret_val;
+ }
+
+/* set noe = 1 if n is odd. if n is even set noe=0 */
+
+ noe = 1;
+ if (*n % 2 == 0) {
+ noe = 0;
+ }
+
+/* set ifm = 0 when form='T or 't' and 1 otherwise */
+
+ ifm = 1;
+ if (lsame_(transr, "T")) {
+ ifm = 0;
+ }
+
+/* set ilu = 0 when uplo='U or 'u' and 1 otherwise */
+
+ ilu = 1;
+ if (lsame_(uplo, "U")) {
+ ilu = 0;
+ }
+
+/* set lda = (n+1)/2 when ifm = 0 */
+/* set lda = n when ifm = 1 and noe = 1 */
+/* set lda = n+1 when ifm = 1 and noe = 0 */
+
+ if (ifm == 1) {
+ if (noe == 1) {
+ lda = *n;
+ } else {
+/* noe=0 */
+ lda = *n + 1;
+ }
+ } else {
+/* ifm=0 */
+ lda = (*n + 1) / 2;
+ }
+
+ if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ k = (*n + 1) / 2;
+ value = 0.f;
+ if (noe == 1) {
+/* n is odd */
+ if (ifm == 1) {
+/* A is n by k */
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = a[i__ + j * lda], dabs(
+ r__1));
+ value = dmax(r__2,r__3);
+ }
+ }
+ } else {
+/* xpose case; A is k by n */
+ i__1 = *n - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = a[i__ + j * lda], dabs(
+ r__1));
+ value = dmax(r__2,r__3);
+ }
+ }
+ }
+ } else {
+/* n is even */
+ if (ifm == 1) {
+/* A is n+1 by k */
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = a[i__ + j * lda], dabs(
+ r__1));
+ value = dmax(r__2,r__3);
+ }
+ }
+ } else {
+/* xpose case; A is k by n+1 */
+ i__1 = *n;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = a[i__ + j * lda], dabs(
+ r__1));
+ value = dmax(r__2,r__3);
+ }
+ }
+ }
+ }
+ } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
+
+/* Find normI(A) ( = norm1(A), since A is symmetric). */
+
+ if (ifm == 1) {
+ k = *n / 2;
+ if (noe == 1) {
+/* n is odd */
+ if (ilu == 0) {
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+ }
+ i__1 = k;
+ for (j = 0; j <= i__1; ++j) {
+ s = 0.f;
+ i__2 = k + j - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* -> A(i,j+k) */
+ s += aa;
+ work[i__] += aa;
+ }
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* -> A(j+k,j+k) */
+ work[j + k] = s + aa;
+ if (i__ == k + k) {
+ goto L10;
+ }
+ ++i__;
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* -> A(j,j) */
+ work[j] += aa;
+ s = 0.f;
+ i__2 = k - 1;
+ for (l = j + 1; l <= i__2; ++l) {
+ ++i__;
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* -> A(l,j) */
+ s += aa;
+ work[l] += aa;
+ }
+ work[j] += s;
+ }
+L10:
+ i__ = isamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ } else {
+/* ilu = 1 */
+ ++k;
+/* k=(n+1)/2 for n odd and ilu=1 */
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+ }
+ for (j = k - 1; j >= 0; --j) {
+ s = 0.f;
+ i__1 = j - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* -> A(j+k,i+k) */
+ s += aa;
+ work[i__ + k] += aa;
+ }
+ if (j > 0) {
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* -> A(j+k,j+k) */
+ s += aa;
+ work[i__ + k] += s;
+/* i=j */
+ ++i__;
+ }
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* -> A(j,j) */
+ work[j] = aa;
+ s = 0.f;
+ i__1 = *n - 1;
+ for (l = j + 1; l <= i__1; ++l) {
+ ++i__;
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* -> A(l,j) */
+ s += aa;
+ work[l] += aa;
+ }
+ work[j] += s;
+ }
+ i__ = isamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ }
+ } else {
+/* n is even */
+ if (ilu == 0) {
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+ }
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ s = 0.f;
+ i__2 = k + j - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* -> A(i,j+k) */
+ s += aa;
+ work[i__] += aa;
+ }
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* -> A(j+k,j+k) */
+ work[j + k] = s + aa;
+ ++i__;
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* -> A(j,j) */
+ work[j] += aa;
+ s = 0.f;
+ i__2 = k - 1;
+ for (l = j + 1; l <= i__2; ++l) {
+ ++i__;
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* -> A(l,j) */
+ s += aa;
+ work[l] += aa;
+ }
+ work[j] += s;
+ }
+ i__ = isamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ } else {
+/* ilu = 1 */
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+ }
+ for (j = k - 1; j >= 0; --j) {
+ s = 0.f;
+ i__1 = j - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* -> A(j+k,i+k) */
+ s += aa;
+ work[i__ + k] += aa;
+ }
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* -> A(j+k,j+k) */
+ s += aa;
+ work[i__ + k] += s;
+/* i=j */
+ ++i__;
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* -> A(j,j) */
+ work[j] = aa;
+ s = 0.f;
+ i__1 = *n - 1;
+ for (l = j + 1; l <= i__1; ++l) {
+ ++i__;
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* -> A(l,j) */
+ s += aa;
+ work[l] += aa;
+ }
+ work[j] += s;
+ }
+ i__ = isamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ }
+ }
+ } else {
+/* ifm=0 */
+ k = *n / 2;
+ if (noe == 1) {
+/* n is odd */
+ if (ilu == 0) {
+ n1 = k;
+/* n/2 */
+ ++k;
+/* k is the row size and lda */
+ i__1 = *n - 1;
+ for (i__ = n1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+ }
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ s = 0.f;
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* A(j,n1+i) */
+ work[i__ + n1] += aa;
+ s += aa;
+ }
+ work[j] = s;
+ }
+/* j=n1=k-1 is special */
+ s = (r__1 = a[j * lda], dabs(r__1));
+/* A(k-1,k-1) */
+ i__1 = k - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* A(k-1,i+n1) */
+ work[i__ + n1] += aa;
+ s += aa;
+ }
+ work[j] += s;
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+ s = 0.f;
+ i__2 = j - k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* A(i,j-k) */
+ work[i__] += aa;
+ s += aa;
+ }
+/* i=j-k */
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* A(j-k,j-k) */
+ s += aa;
+ work[j - k] += s;
+ ++i__;
+ s = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* A(j,j) */
+ i__2 = *n - 1;
+ for (l = j + 1; l <= i__2; ++l) {
+ ++i__;
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* A(j,l) */
+ work[l] += aa;
+ s += aa;
+ }
+ work[j] += s;
+ }
+ i__ = isamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ } else {
+/* ilu=1 */
+ ++k;
+/* k=(n+1)/2 for n odd and ilu=1 */
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+/* process */
+ s = 0.f;
+ i__2 = j - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* A(j,i) */
+ work[i__] += aa;
+ s += aa;
+ }
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* i=j so process of A(j,j) */
+ s += aa;
+ work[j] = s;
+/* is initialised here */
+ ++i__;
+/* i=j process A(j+k,j+k) */
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+ s = aa;
+ i__2 = *n - 1;
+ for (l = k + j + 1; l <= i__2; ++l) {
+ ++i__;
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* A(l,k+j) */
+ s += aa;
+ work[l] += aa;
+ }
+ work[k + j] += s;
+ }
+/* j=k-1 is special :process col A(k-1,0:k-1) */
+ s = 0.f;
+ i__1 = k - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* A(k,i) */
+ work[i__] += aa;
+ s += aa;
+ }
+/* i=k-1 */
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* A(k-1,k-1) */
+ s += aa;
+ work[i__] = s;
+/* done with col j=k+1 */
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+/* process col j of A = A(j,0:k-1) */
+ s = 0.f;
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* A(j,i) */
+ work[i__] += aa;
+ s += aa;
+ }
+ work[j] += s;
+ }
+ i__ = isamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ }
+ } else {
+/* n is even */
+ if (ilu == 0) {
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+ }
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ s = 0.f;
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* A(j,i+k) */
+ work[i__ + k] += aa;
+ s += aa;
+ }
+ work[j] = s;
+ }
+/* j=k */
+ aa = (r__1 = a[j * lda], dabs(r__1));
+/* A(k,k) */
+ s = aa;
+ i__1 = k - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* A(k,k+i) */
+ work[i__ + k] += aa;
+ s += aa;
+ }
+ work[j] += s;
+ i__1 = *n - 1;
+ for (j = k + 1; j <= i__1; ++j) {
+ s = 0.f;
+ i__2 = j - 2 - k;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* A(i,j-k-1) */
+ work[i__] += aa;
+ s += aa;
+ }
+/* i=j-1-k */
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* A(j-k-1,j-k-1) */
+ s += aa;
+ work[j - k - 1] += s;
+ ++i__;
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* A(j,j) */
+ s = aa;
+ i__2 = *n - 1;
+ for (l = j + 1; l <= i__2; ++l) {
+ ++i__;
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* A(j,l) */
+ work[l] += aa;
+ s += aa;
+ }
+ work[j] += s;
+ }
+/* j=n */
+ s = 0.f;
+ i__1 = k - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* A(i,k-1) */
+ work[i__] += aa;
+ s += aa;
+ }
+/* i=k-1 */
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* A(k-1,k-1) */
+ s += aa;
+ work[i__] += s;
+ i__ = isamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ } else {
+/* ilu=1 */
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+ }
+/* j=0 is special :process col A(k:n-1,k) */
+ s = dabs(a[0]);
+/* A(k,k) */
+ i__1 = k - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ aa = (r__1 = a[i__], dabs(r__1));
+/* A(k+i,k) */
+ work[i__ + k] += aa;
+ s += aa;
+ }
+ work[k] += s;
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* process */
+ s = 0.f;
+ i__2 = j - 2;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* A(j-1,i) */
+ work[i__] += aa;
+ s += aa;
+ }
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* i=j-1 so process of A(j-1,j-1) */
+ s += aa;
+ work[j - 1] = s;
+/* is initialised here */
+ ++i__;
+/* i=j process A(j+k,j+k) */
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+ s = aa;
+ i__2 = *n - 1;
+ for (l = k + j + 1; l <= i__2; ++l) {
+ ++i__;
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* A(l,k+j) */
+ s += aa;
+ work[l] += aa;
+ }
+ work[k + j] += s;
+ }
+/* j=k is special :process col A(k,0:k-1) */
+ s = 0.f;
+ i__1 = k - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* A(k,i) */
+ work[i__] += aa;
+ s += aa;
+ }
+/* i=k-1 */
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* A(k-1,k-1) */
+ s += aa;
+ work[i__] = s;
+/* done with col j=k+1 */
+ i__1 = *n;
+ for (j = k + 1; j <= i__1; ++j) {
+/* process col j-1 of A = A(j-1,0:k-1) */
+ s = 0.f;
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = (r__1 = a[i__ + j * lda], dabs(r__1));
+/* A(j-1,i) */
+ work[i__] += aa;
+ s += aa;
+ }
+ work[j - 1] += s;
+ }
+ i__ = isamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ }
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ k = (*n + 1) / 2;
+ scale = 0.f;
+ s = 1.f;
+ if (noe == 1) {
+/* n is odd */
+ if (ifm == 1) {
+/* A is normal */
+ if (ilu == 0) {
+/* A is upper */
+ i__1 = k - 3;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - j - 2;
+ slassq_(&i__2, &a[k + j + 1 + j * lda], &c__1, &scale,
+ &s);
+/* L at A(k,0) */
+ }
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k + j - 1;
+ slassq_(&i__2, &a[j * lda], &c__1, &scale, &s);
+/* trap U at A(0,0) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ i__1 = k - 1;
+ i__2 = lda + 1;
+ slassq_(&i__1, &a[k], &i__2, &scale, &s);
+/* tri L at A(k,0) */
+ i__1 = lda + 1;
+ slassq_(&k, &a[k - 1], &i__1, &scale, &s);
+/* tri U at A(k-1,0) */
+ } else {
+/* ilu=1 & A is lower */
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - j - 1;
+ slassq_(&i__2, &a[j + 1 + j * lda], &c__1, &scale, &s)
+ ;
+/* trap L at A(0,0) */
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ slassq_(&j, &a[(j + 1) * lda], &c__1, &scale, &s);
+/* U at A(0,1) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ i__1 = lda + 1;
+ slassq_(&k, a, &i__1, &scale, &s);
+/* tri L at A(0,0) */
+ i__1 = k - 1;
+ i__2 = lda + 1;
+ slassq_(&i__1, &a[lda], &i__2, &scale, &s);
+/* tri U at A(0,1) */
+ }
+ } else {
+/* A is xpose */
+ if (ilu == 0) {
+/* A' is upper */
+ i__1 = k - 2;
+ for (j = 1; j <= i__1; ++j) {
+ slassq_(&j, &a[(k + j) * lda], &c__1, &scale, &s);
+/* U at A(0,k) */
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ slassq_(&k, &a[j * lda], &c__1, &scale, &s);
+/* k by k-1 rect. at A(0,0) */
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - j - 1;
+ slassq_(&i__2, &a[j + 1 + (j + k - 1) * lda], &c__1, &
+ scale, &s);
+/* L at A(0,k-1) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ i__1 = k - 1;
+ i__2 = lda + 1;
+ slassq_(&i__1, &a[k * lda], &i__2, &scale, &s);
+/* tri U at A(0,k) */
+ i__1 = lda + 1;
+ slassq_(&k, &a[(k - 1) * lda], &i__1, &scale, &s);
+/* tri L at A(0,k-1) */
+ } else {
+/* A' is lower */
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ slassq_(&j, &a[j * lda], &c__1, &scale, &s);
+/* U at A(0,0) */
+ }
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+ slassq_(&k, &a[j * lda], &c__1, &scale, &s);
+/* k by k-1 rect. at A(0,k) */
+ }
+ i__1 = k - 3;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - j - 2;
+ slassq_(&i__2, &a[j + 2 + j * lda], &c__1, &scale, &s)
+ ;
+/* L at A(1,0) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ i__1 = lda + 1;
+ slassq_(&k, a, &i__1, &scale, &s);
+/* tri U at A(0,0) */
+ i__1 = k - 1;
+ i__2 = lda + 1;
+ slassq_(&i__1, &a[1], &i__2, &scale, &s);
+/* tri L at A(1,0) */
+ }
+ }
+ } else {
+/* n is even */
+ if (ifm == 1) {
+/* A is normal */
+ if (ilu == 0) {
+/* A is upper */
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - j - 1;
+ slassq_(&i__2, &a[k + j + 2 + j * lda], &c__1, &scale,
+ &s);
+/* L at A(k+1,0) */
+ }
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k + j;
+ slassq_(&i__2, &a[j * lda], &c__1, &scale, &s);
+/* trap U at A(0,0) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ i__1 = lda + 1;
+ slassq_(&k, &a[k + 1], &i__1, &scale, &s);
+/* tri L at A(k+1,0) */
+ i__1 = lda + 1;
+ slassq_(&k, &a[k], &i__1, &scale, &s);
+/* tri U at A(k,0) */
+ } else {
+/* ilu=1 & A is lower */
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - j - 1;
+ slassq_(&i__2, &a[j + 2 + j * lda], &c__1, &scale, &s)
+ ;
+/* trap L at A(1,0) */
+ }
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ slassq_(&j, &a[j * lda], &c__1, &scale, &s);
+/* U at A(0,0) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ i__1 = lda + 1;
+ slassq_(&k, &a[1], &i__1, &scale, &s);
+/* tri L at A(1,0) */
+ i__1 = lda + 1;
+ slassq_(&k, a, &i__1, &scale, &s);
+/* tri U at A(0,0) */
+ }
+ } else {
+/* A is xpose */
+ if (ilu == 0) {
+/* A' is upper */
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ slassq_(&j, &a[(k + 1 + j) * lda], &c__1, &scale, &s);
+/* U at A(0,k+1) */
+ }
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ slassq_(&k, &a[j * lda], &c__1, &scale, &s);
+/* k by k rect. at A(0,0) */
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - j - 1;
+ slassq_(&i__2, &a[j + 1 + (j + k) * lda], &c__1, &
+ scale, &s);
+/* L at A(0,k) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ i__1 = lda + 1;
+ slassq_(&k, &a[(k + 1) * lda], &i__1, &scale, &s);
+/* tri U at A(0,k+1) */
+ i__1 = lda + 1;
+ slassq_(&k, &a[k * lda], &i__1, &scale, &s);
+/* tri L at A(0,k) */
+ } else {
+/* A' is lower */
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ slassq_(&j, &a[(j + 1) * lda], &c__1, &scale, &s);
+/* U at A(0,1) */
+ }
+ i__1 = *n;
+ for (j = k + 1; j <= i__1; ++j) {
+ slassq_(&k, &a[j * lda], &c__1, &scale, &s);
+/* k by k rect. at A(0,k+1) */
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - j - 1;
+ slassq_(&i__2, &a[j + 1 + j * lda], &c__1, &scale, &s)
+ ;
+/* L at A(0,0) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ i__1 = lda + 1;
+ slassq_(&k, &a[lda], &i__1, &scale, &s);
+/* tri L at A(0,1) */
+ i__1 = lda + 1;
+ slassq_(&k, a, &i__1, &scale, &s);
+/* tri U at A(0,0) */
+ }
+ }
+ }
+ value = scale * sqrt(s);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of SLANSF */
+
+} /* slansf_ */
diff --git a/contrib/libs/clapack/slansp.c b/contrib/libs/clapack/slansp.c
new file mode 100644
index 0000000000..822c5eda8a
--- /dev/null
+++ b/contrib/libs/clapack/slansp.c
@@ -0,0 +1,262 @@
+/* slansp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal slansp_(char *norm, char *uplo, integer *n, real *ap, real *work)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ real ret_val, r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k;
+ real sum, absa, scale;
+ extern logical lsame_(char *, char *);
+ real value;
+ extern /* Subroutine */ int slassq_(integer *, real *, integer *, real *,
+ real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLANSP returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* real symmetric matrix A, supplied in packed form. */
+
+/* Description */
+/* =========== */
+
+/* SLANSP returns the value */
+
+/* SLANSP = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in SLANSP as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is supplied. */
+/* = 'U': Upper triangular part of A is supplied */
+/* = 'L': Lower triangular part of A is supplied */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, SLANSP is */
+/* set to zero. */
+
+/* AP (input) REAL array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the symmetric matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* WORK (workspace) REAL array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, */
+/* WORK is not referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --work;
+ --ap;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ k = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + j - 1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = ap[i__], dabs(r__1));
+ value = dmax(r__2,r__3);
+/* L10: */
+ }
+ k += j;
+/* L20: */
+ }
+ } else {
+ k = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + *n - j;
+ for (i__ = k; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = ap[i__], dabs(r__1));
+ value = dmax(r__2,r__3);
+/* L30: */
+ }
+ k = k + *n - j + 1;
+/* L40: */
+ }
+ }
+ } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
+
+/* Find normI(A) ( = norm1(A), since A is symmetric). */
+
+ value = 0.f;
+ k = 1;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.f;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ absa = (r__1 = ap[k], dabs(r__1));
+ sum += absa;
+ work[i__] += absa;
+ ++k;
+/* L50: */
+ }
+ work[j] = sum + (r__1 = ap[k], dabs(r__1));
+ ++k;
+/* L60: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L80: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = work[j] + (r__1 = ap[k], dabs(r__1));
+ ++k;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ absa = (r__1 = ap[k], dabs(r__1));
+ sum += absa;
+ work[i__] += absa;
+ ++k;
+/* L90: */
+ }
+ value = dmax(value,sum);
+/* L100: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ k = 2;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ slassq_(&i__2, &ap[k], &c__1, &scale, &sum);
+ k += j;
+/* L110: */
+ }
+ } else {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ slassq_(&i__2, &ap[k], &c__1, &scale, &sum);
+ k = k + *n - j + 1;
+/* L120: */
+ }
+ }
+ sum *= 2;
+ k = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (ap[k] != 0.f) {
+ absa = (r__1 = ap[k], dabs(r__1));
+ if (scale < absa) {
+/* Computing 2nd power */
+ r__1 = scale / absa;
+ sum = sum * (r__1 * r__1) + 1.f;
+ scale = absa;
+ } else {
+/* Computing 2nd power */
+ r__1 = absa / scale;
+ sum += r__1 * r__1;
+ }
+ }
+ if (lsame_(uplo, "U")) {
+ k = k + i__ + 1;
+ } else {
+ k = k + *n - i__ + 1;
+ }
+/* L130: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of SLANSP */
+
+} /* slansp_ */
diff --git a/contrib/libs/clapack/slanst.c b/contrib/libs/clapack/slanst.c
new file mode 100644
index 0000000000..dd240cad48
--- /dev/null
+++ b/contrib/libs/clapack/slanst.c
@@ -0,0 +1,166 @@
+/* slanst.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal slanst_(char *norm, integer *n, real *d__, real *e)
+{
+ /* System generated locals */
+ integer i__1;
+ real ret_val, r__1, r__2, r__3, r__4, r__5;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ real sum, scale;
+ extern logical lsame_(char *, char *);
+ real anorm;
+ extern /* Subroutine */ int slassq_(integer *, real *, integer *, real *,
+ real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLANST returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* real symmetric tridiagonal matrix A. */
+
+/* Description */
+/* =========== */
+
+/* SLANST returns the value */
+
+/* SLANST = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in SLANST as described */
+/* above. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, SLANST is */
+/* set to zero. */
+
+/* D (input) REAL array, dimension (N) */
+/* The diagonal elements of A. */
+
+/* E (input) REAL array, dimension (N-1) */
+/* The (n-1) sub-diagonal or super-diagonal elements of A. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --e;
+ --d__;
+
+ /* Function Body */
+ if (*n <= 0) {
+ anorm = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ anorm = (r__1 = d__[*n], dabs(r__1));
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__2 = anorm, r__3 = (r__1 = d__[i__], dabs(r__1));
+ anorm = dmax(r__2,r__3);
+/* Computing MAX */
+ r__2 = anorm, r__3 = (r__1 = e[i__], dabs(r__1));
+ anorm = dmax(r__2,r__3);
+/* L10: */
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1' || lsame_(norm, "I")) {
+
+/* Find norm1(A). */
+
+ if (*n == 1) {
+ anorm = dabs(d__[1]);
+ } else {
+/* Computing MAX */
+ r__3 = dabs(d__[1]) + dabs(e[1]), r__4 = (r__1 = e[*n - 1], dabs(
+ r__1)) + (r__2 = d__[*n], dabs(r__2));
+ anorm = dmax(r__3,r__4);
+ i__1 = *n - 1;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__4 = anorm, r__5 = (r__1 = d__[i__], dabs(r__1)) + (r__2 =
+ e[i__], dabs(r__2)) + (r__3 = e[i__ - 1], dabs(r__3));
+ anorm = dmax(r__4,r__5);
+/* L20: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ if (*n > 1) {
+ i__1 = *n - 1;
+ slassq_(&i__1, &e[1], &c__1, &scale, &sum);
+ sum *= 2;
+ }
+ slassq_(n, &d__[1], &c__1, &scale, &sum);
+ anorm = scale * sqrt(sum);
+ }
+
+ ret_val = anorm;
+ return ret_val;
+
+/* End of SLANST */
+
+} /* slanst_ */
diff --git a/contrib/libs/clapack/slansy.c b/contrib/libs/clapack/slansy.c
new file mode 100644
index 0000000000..1064b19009
--- /dev/null
+++ b/contrib/libs/clapack/slansy.c
@@ -0,0 +1,239 @@
+/* slansy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal slansy_(char *norm, char *uplo, integer *n, real *a, integer *lda,
+ real *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real ret_val, r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ real sum, absa, scale;
+ extern logical lsame_(char *, char *);
+ real value;
+ extern /* Subroutine */ int slassq_(integer *, real *, integer *, real *,
+ real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLANSY returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* real symmetric matrix A. */
+
+/* Description */
+/* =========== */
+
+/* SLANSY returns the value */
+
+/* SLANSY = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in SLANSY as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is to be referenced. */
+/* = 'U': Upper triangular part of A is referenced */
+/* = 'L': Lower triangular part of A is referenced */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, SLANSY is */
+/* set to zero. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The symmetric matrix A. If UPLO = 'U', the leading n by n */
+/* upper triangular part of A contains the upper triangular part */
+/* of the matrix A, and the strictly lower triangular part of A */
+/* is not referenced. If UPLO = 'L', the leading n by n lower */
+/* triangular part of A contains the lower triangular part of */
+/* the matrix A, and the strictly upper triangular part of A is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(N,1). */
+
+/* WORK (workspace) REAL array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, */
+/* WORK is not referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = a[i__ + j * a_dim1], dabs(
+ r__1));
+ value = dmax(r__2,r__3);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = a[i__ + j * a_dim1], dabs(
+ r__1));
+ value = dmax(r__2,r__3);
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
+
+/* Find normI(A) ( = norm1(A), since A is symmetric). */
+
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.f;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ absa = (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+ sum += absa;
+ work[i__] += absa;
+/* L50: */
+ }
+ work[j] = sum + (r__1 = a[j + j * a_dim1], dabs(r__1));
+/* L60: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L80: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = work[j] + (r__1 = a[j + j * a_dim1], dabs(r__1));
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ absa = (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+ sum += absa;
+ work[i__] += absa;
+/* L90: */
+ }
+ value = dmax(value,sum);
+/* L100: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ slassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L110: */
+ }
+ } else {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ slassq_(&i__2, &a[j + 1 + j * a_dim1], &c__1, &scale, &sum);
+/* L120: */
+ }
+ }
+ sum *= 2;
+ i__1 = *lda + 1;
+ slassq_(n, &a[a_offset], &i__1, &scale, &sum);
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of SLANSY */
+
+} /* slansy_ */
diff --git a/contrib/libs/clapack/slantb.c b/contrib/libs/clapack/slantb.c
new file mode 100644
index 0000000000..22bdf10a1f
--- /dev/null
+++ b/contrib/libs/clapack/slantb.c
@@ -0,0 +1,434 @@
+/* slantb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal slantb_(char *norm, char *uplo, char *diag, integer *n, integer *k,
+ real *ab, integer *ldab, real *work)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4, i__5;
+ real ret_val, r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, l;
+ real sum, scale;
+ logical udiag;
+ extern logical lsame_(char *, char *);
+ real value;
+ extern /* Subroutine */ int slassq_(integer *, real *, integer *, real *,
+ real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLANTB returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of an */
+/* n by n triangular band matrix A, with ( k + 1 ) diagonals. */
+
+/* Description */
+/* =========== */
+
+/* SLANTB returns the value */
+
+/* SLANTB = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in SLANTB as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower triangular. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A is unit triangular. */
+/* = 'N': Non-unit triangular */
+/* = 'U': Unit triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, SLANTB is */
+/* set to zero. */
+
+/* K (input) INTEGER */
+/* The number of super-diagonals of the matrix A if UPLO = 'U', */
+/* or the number of sub-diagonals of the matrix A if UPLO = 'L'. */
+/* K >= 0. */
+
+/* AB (input) REAL array, dimension (LDAB,N) */
+/* The upper or lower triangular band matrix A, stored in the */
+/* first k+1 rows of AB. The j-th column of A is stored */
+/* in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(k+1+i-j,j) = A(i,j) for max(1,j-k)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+k). */
+/* Note that when DIAG = 'U', the elements of the array AB */
+/* corresponding to the diagonal elements of the matrix A are */
+/* not referenced, but are assumed to be one. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= K+1. */
+
+/* WORK (workspace) REAL array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I'; otherwise, WORK is not */
+/* referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ if (lsame_(diag, "U")) {
+ value = 1.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = *k + 2 - j;
+ i__3 = *k;
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = ab[i__ + j * ab_dim1],
+ dabs(r__1));
+ value = dmax(r__2,r__3);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = *n + 1 - j, i__4 = *k + 1;
+ i__3 = min(i__2,i__4);
+ for (i__ = 2; i__ <= i__3; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = ab[i__ + j * ab_dim1],
+ dabs(r__1));
+ value = dmax(r__2,r__3);
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else {
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__3 = *k + 2 - j;
+ i__2 = *k + 1;
+ for (i__ = max(i__3,1); i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = ab[i__ + j * ab_dim1],
+ dabs(r__1));
+ value = dmax(r__2,r__3);
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n + 1 - j, i__4 = *k + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = ab[i__ + j * ab_dim1],
+ dabs(r__1));
+ value = dmax(r__2,r__3);
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.f;
+ udiag = lsame_(diag, "U");
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (udiag) {
+ sum = 1.f;
+/* Computing MAX */
+ i__2 = *k + 2 - j;
+ i__3 = *k;
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+ sum += (r__1 = ab[i__ + j * ab_dim1], dabs(r__1));
+/* L90: */
+ }
+ } else {
+ sum = 0.f;
+/* Computing MAX */
+ i__3 = *k + 2 - j;
+ i__2 = *k + 1;
+ for (i__ = max(i__3,1); i__ <= i__2; ++i__) {
+ sum += (r__1 = ab[i__ + j * ab_dim1], dabs(r__1));
+/* L100: */
+ }
+ }
+ value = dmax(value,sum);
+/* L110: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (udiag) {
+ sum = 1.f;
+/* Computing MIN */
+ i__3 = *n + 1 - j, i__4 = *k + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ sum += (r__1 = ab[i__ + j * ab_dim1], dabs(r__1));
+/* L120: */
+ }
+ } else {
+ sum = 0.f;
+/* Computing MIN */
+ i__3 = *n + 1 - j, i__4 = *k + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += (r__1 = ab[i__ + j * ab_dim1], dabs(r__1));
+/* L130: */
+ }
+ }
+ value = dmax(value,sum);
+/* L140: */
+ }
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ if (lsame_(diag, "U")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 1.f;
+/* L150: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ l = *k + 1 - j;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__4 = j - 1;
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ work[i__] += (r__1 = ab[l + i__ + j * ab_dim1], dabs(
+ r__1));
+/* L160: */
+ }
+/* L170: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L180: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ l = *k + 1 - j;
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *k;
+ i__3 = j;
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ work[i__] += (r__1 = ab[l + i__ + j * ab_dim1], dabs(
+ r__1));
+/* L190: */
+ }
+/* L200: */
+ }
+ }
+ } else {
+ if (lsame_(diag, "U")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 1.f;
+/* L210: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ l = 1 - j;
+/* Computing MIN */
+ i__4 = *n, i__2 = j + *k;
+ i__3 = min(i__4,i__2);
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ work[i__] += (r__1 = ab[l + i__ + j * ab_dim1], dabs(
+ r__1));
+/* L220: */
+ }
+/* L230: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L240: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ l = 1 - j;
+/* Computing MIN */
+ i__4 = *n, i__2 = j + *k;
+ i__3 = min(i__4,i__2);
+ for (i__ = j; i__ <= i__3; ++i__) {
+ work[i__] += (r__1 = ab[l + i__ + j * ab_dim1], dabs(
+ r__1));
+/* L250: */
+ }
+/* L260: */
+ }
+ }
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L270: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ if (lsame_(uplo, "U")) {
+ if (lsame_(diag, "U")) {
+ scale = 1.f;
+ sum = (real) (*n);
+ if (*k > 0) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing MIN */
+ i__4 = j - 1;
+ i__3 = min(i__4,*k);
+/* Computing MAX */
+ i__2 = *k + 2 - j;
+ slassq_(&i__3, &ab[max(i__2, 1)+ j * ab_dim1], &c__1,
+ &scale, &sum);
+/* L280: */
+ }
+ }
+ } else {
+ scale = 0.f;
+ sum = 1.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__4 = j, i__2 = *k + 1;
+ i__3 = min(i__4,i__2);
+/* Computing MAX */
+ i__5 = *k + 2 - j;
+ slassq_(&i__3, &ab[max(i__5, 1)+ j * ab_dim1], &c__1, &
+ scale, &sum);
+/* L290: */
+ }
+ }
+ } else {
+ if (lsame_(diag, "U")) {
+ scale = 1.f;
+ sum = (real) (*n);
+ if (*k > 0) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__4 = *n - j;
+ i__3 = min(i__4,*k);
+ slassq_(&i__3, &ab[j * ab_dim1 + 2], &c__1, &scale, &
+ sum);
+/* L300: */
+ }
+ }
+ } else {
+ scale = 0.f;
+ sum = 1.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__4 = *n - j + 1, i__2 = *k + 1;
+ i__3 = min(i__4,i__2);
+ slassq_(&i__3, &ab[j * ab_dim1 + 1], &c__1, &scale, &sum);
+/* L310: */
+ }
+ }
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of SLANTB */
+
+} /* slantb_ */
diff --git a/contrib/libs/clapack/slantp.c b/contrib/libs/clapack/slantp.c
new file mode 100644
index 0000000000..f004758bf3
--- /dev/null
+++ b/contrib/libs/clapack/slantp.c
@@ -0,0 +1,391 @@
+/* slantp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal slantp_(char *norm, char *uplo, char *diag, integer *n, real *ap,
+ real *work)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ real ret_val, r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k;
+ real sum, scale;
+ logical udiag;
+ extern logical lsame_(char *, char *);
+ real value;
+ extern /* Subroutine */ int slassq_(integer *, real *, integer *, real *,
+ real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLANTP returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* triangular matrix A, supplied in packed form. */
+
+/* Description */
+/* =========== */
+
+/* SLANTP returns the value */
+
+/* SLANTP = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in SLANTP as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower triangular. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A is unit triangular. */
+/* = 'N': Non-unit triangular */
+/* = 'U': Unit triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, SLANTP is */
+/* set to zero. */
+
+/* AP (input) REAL array, dimension (N*(N+1)/2) */
+/* The upper or lower triangular matrix A, packed columnwise in */
+/* a linear array. The j-th column of A is stored in the array */
+/* AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* Note that when DIAG = 'U', the elements of the array AP */
+/* corresponding to the diagonal elements of the matrix A are */
+/* not referenced, but are assumed to be one. */
+
+/* WORK (workspace) REAL array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I'; otherwise, WORK is not */
+/* referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --work;
+ --ap;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ k = 1;
+ if (lsame_(diag, "U")) {
+ value = 1.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + j - 2;
+ for (i__ = k; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = ap[i__], dabs(r__1));
+ value = dmax(r__2,r__3);
+/* L10: */
+ }
+ k += j;
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + *n - j;
+ for (i__ = k + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = ap[i__], dabs(r__1));
+ value = dmax(r__2,r__3);
+/* L30: */
+ }
+ k = k + *n - j + 1;
+/* L40: */
+ }
+ }
+ } else {
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + j - 1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = ap[i__], dabs(r__1));
+ value = dmax(r__2,r__3);
+/* L50: */
+ }
+ k += j;
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + *n - j;
+ for (i__ = k; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = ap[i__], dabs(r__1));
+ value = dmax(r__2,r__3);
+/* L70: */
+ }
+ k = k + *n - j + 1;
+/* L80: */
+ }
+ }
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.f;
+ k = 1;
+ udiag = lsame_(diag, "U");
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (udiag) {
+ sum = 1.f;
+ i__2 = k + j - 2;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ sum += (r__1 = ap[i__], dabs(r__1));
+/* L90: */
+ }
+ } else {
+ sum = 0.f;
+ i__2 = k + j - 1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ sum += (r__1 = ap[i__], dabs(r__1));
+/* L100: */
+ }
+ }
+ k += j;
+ value = dmax(value,sum);
+/* L110: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (udiag) {
+ sum = 1.f;
+ i__2 = k + *n - j;
+ for (i__ = k + 1; i__ <= i__2; ++i__) {
+ sum += (r__1 = ap[i__], dabs(r__1));
+/* L120: */
+ }
+ } else {
+ sum = 0.f;
+ i__2 = k + *n - j;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ sum += (r__1 = ap[i__], dabs(r__1));
+/* L130: */
+ }
+ }
+ k = k + *n - j + 1;
+ value = dmax(value,sum);
+/* L140: */
+ }
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ k = 1;
+ if (lsame_(uplo, "U")) {
+ if (lsame_(diag, "U")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 1.f;
+/* L150: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += (r__1 = ap[k], dabs(r__1));
+ ++k;
+/* L160: */
+ }
+ ++k;
+/* L170: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L180: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += (r__1 = ap[k], dabs(r__1));
+ ++k;
+/* L190: */
+ }
+/* L200: */
+ }
+ }
+ } else {
+ if (lsame_(diag, "U")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 1.f;
+/* L210: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ ++k;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ work[i__] += (r__1 = ap[k], dabs(r__1));
+ ++k;
+/* L220: */
+ }
+/* L230: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L240: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ work[i__] += (r__1 = ap[k], dabs(r__1));
+ ++k;
+/* L250: */
+ }
+/* L260: */
+ }
+ }
+ }
+ value = 0.f;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L270: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ if (lsame_(uplo, "U")) {
+ if (lsame_(diag, "U")) {
+ scale = 1.f;
+ sum = (real) (*n);
+ k = 2;
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ slassq_(&i__2, &ap[k], &c__1, &scale, &sum);
+ k += j;
+/* L280: */
+ }
+ } else {
+ scale = 0.f;
+ sum = 1.f;
+ k = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ slassq_(&j, &ap[k], &c__1, &scale, &sum);
+ k += j;
+/* L290: */
+ }
+ }
+ } else {
+ if (lsame_(diag, "U")) {
+ scale = 1.f;
+ sum = (real) (*n);
+ k = 2;
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ slassq_(&i__2, &ap[k], &c__1, &scale, &sum);
+ k = k + *n - j + 1;
+/* L300: */
+ }
+ } else {
+ scale = 0.f;
+ sum = 1.f;
+ k = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j + 1;
+ slassq_(&i__2, &ap[k], &c__1, &scale, &sum);
+ k = k + *n - j + 1;
+/* L310: */
+ }
+ }
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of SLANTP */
+
+} /* slantp_ */
diff --git a/contrib/libs/clapack/slantr.c b/contrib/libs/clapack/slantr.c
new file mode 100644
index 0000000000..b62c8d9d62
--- /dev/null
+++ b/contrib/libs/clapack/slantr.c
@@ -0,0 +1,398 @@
+/* slantr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal slantr_(char *norm, char *uplo, char *diag, integer *m, integer *n,
+ real *a, integer *lda, real *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ real ret_val, r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ real sum, scale;
+ logical udiag;
+ extern logical lsame_(char *, char *);
+ real value;
+ extern /* Subroutine */ int slassq_(integer *, real *, integer *, real *,
+ real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLANTR returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* trapezoidal or triangular matrix A. */
+
+/* Description */
+/* =========== */
+
+/* SLANTR returns the value */
+
+/* SLANTR = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in SLANTR as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower trapezoidal. */
+/* = 'U': Upper trapezoidal */
+/* = 'L': Lower trapezoidal */
+/* Note that A is triangular instead of trapezoidal if M = N. */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A has unit diagonal. */
+/* = 'N': Non-unit diagonal */
+/* = 'U': Unit diagonal */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0, and if */
+/* UPLO = 'U', M <= N. When M = 0, SLANTR is set to zero. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0, and if */
+/* UPLO = 'L', N <= M. When N = 0, SLANTR is set to zero. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The trapezoidal matrix A (A is triangular if M = N). */
+/* If UPLO = 'U', the leading m by n upper trapezoidal part of */
+/* the array A contains the upper trapezoidal matrix, and the */
+/* strictly lower triangular part of A is not referenced. */
+/* If UPLO = 'L', the leading m by n lower trapezoidal part of */
+/* the array A contains the lower trapezoidal matrix, and the */
+/* strictly upper triangular part of A is not referenced. Note */
+/* that when DIAG = 'U', the diagonal elements of A are not */
+/* referenced and are assumed to be one. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(M,1). */
+
+/* WORK (workspace) REAL array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= M when NORM = 'I'; otherwise, WORK is not */
+/* referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (min(*m,*n) == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ if (lsame_(diag, "U")) {
+ value = 1.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *m, i__4 = j - 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = a[i__ + j * a_dim1],
+ dabs(r__1));
+ value = dmax(r__2,r__3);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = a[i__ + j * a_dim1],
+ dabs(r__1));
+ value = dmax(r__2,r__3);
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else {
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(*m,j);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = a[i__ + j * a_dim1],
+ dabs(r__1));
+ value = dmax(r__2,r__3);
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = a[i__ + j * a_dim1],
+ dabs(r__1));
+ value = dmax(r__2,r__3);
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.f;
+ udiag = lsame_(diag, "U");
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (udiag && j <= *m) {
+ sum = 1.f;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+/* L90: */
+ }
+ } else {
+ sum = 0.f;
+ i__2 = min(*m,j);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+/* L100: */
+ }
+ }
+ value = dmax(value,sum);
+/* L110: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (udiag) {
+ sum = 1.f;
+ i__2 = *m;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ sum += (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+/* L120: */
+ }
+ } else {
+ sum = 0.f;
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ sum += (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+/* L130: */
+ }
+ }
+ value = dmax(value,sum);
+/* L140: */
+ }
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ if (lsame_(uplo, "U")) {
+ if (lsame_(diag, "U")) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 1.f;
+/* L150: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *m, i__4 = j - 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+/* L160: */
+ }
+/* L170: */
+ }
+ } else {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L180: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(*m,j);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+/* L190: */
+ }
+/* L200: */
+ }
+ }
+ } else {
+ if (lsame_(diag, "U")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 1.f;
+/* L210: */
+ }
+ i__1 = *m;
+ for (i__ = *n + 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L220: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ work[i__] += (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+/* L230: */
+ }
+/* L240: */
+ }
+ } else {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L250: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ work[i__] += (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+/* L260: */
+ }
+/* L270: */
+ }
+ }
+ }
+ value = 0.f;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L280: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ if (lsame_(uplo, "U")) {
+ if (lsame_(diag, "U")) {
+ scale = 1.f;
+ sum = (real) min(*m,*n);
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *m, i__4 = j - 1;
+ i__2 = min(i__3,i__4);
+ slassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L290: */
+ }
+ } else {
+ scale = 0.f;
+ sum = 1.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(*m,j);
+ slassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L300: */
+ }
+ }
+ } else {
+ if (lsame_(diag, "U")) {
+ scale = 1.f;
+ sum = (real) min(*m,*n);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m - j;
+/* Computing MIN */
+ i__3 = *m, i__4 = j + 1;
+ slassq_(&i__2, &a[min(i__3, i__4)+ j * a_dim1], &c__1, &
+ scale, &sum);
+/* L310: */
+ }
+ } else {
+ scale = 0.f;
+ sum = 1.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m - j + 1;
+ slassq_(&i__2, &a[j + j * a_dim1], &c__1, &scale, &sum);
+/* L320: */
+ }
+ }
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of SLANTR */
+
+} /* slantr_ */
diff --git a/contrib/libs/clapack/slanv2.c b/contrib/libs/clapack/slanv2.c
new file mode 100644
index 0000000000..95a593433d
--- /dev/null
+++ b/contrib/libs/clapack/slanv2.c
@@ -0,0 +1,234 @@
+/* slanv2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b4 = 1.f;
+
+/* Subroutine */ int slanv2_(real *a, real *b, real *c__, real *d__, real *
+ rt1r, real *rt1i, real *rt2r, real *rt2i, real *cs, real *sn)
+{
+ /* System generated locals */
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double r_sign(real *, real *), sqrt(doublereal);
+
+ /* Local variables */
+ real p, z__, aa, bb, cc, dd, cs1, sn1, sab, sac, eps, tau, temp, scale,
+ bcmax, bcmis, sigma;
+ extern doublereal slapy2_(real *, real *), slamch_(char *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLANV2 computes the Schur factorization of a real 2-by-2 nonsymmetric */
+/* matrix in standard form: */
+
+/* [ A B ] = [ CS -SN ] [ AA BB ] [ CS SN ] */
+/* [ C D ] [ SN CS ] [ CC DD ] [-SN CS ] */
+
+/* where either */
+/* 1) CC = 0 so that AA and DD are real eigenvalues of the matrix, or */
+/* 2) AA = DD and BB*CC < 0, so that AA + or - sqrt(BB*CC) are complex */
+/* conjugate eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* A (input/output) REAL */
+/* B (input/output) REAL */
+/* C (input/output) REAL */
+/* D (input/output) REAL */
+/* On entry, the elements of the input matrix. */
+/* On exit, they are overwritten by the elements of the */
+/* standardised Schur form. */
+
+/* RT1R (output) REAL */
+/* RT1I (output) REAL */
+/* RT2R (output) REAL */
+/* RT2I (output) REAL */
+/* The real and imaginary parts of the eigenvalues. If the */
+/* eigenvalues are a complex conjugate pair, RT1I > 0. */
+
+/* CS (output) REAL */
+/* SN (output) REAL */
+/* Parameters of the rotation matrix. */
+
+/* Further Details */
+/* =============== */
+
+/* Modified by V. Sima, Research Institute for Informatics, Bucharest, */
+/* Romania, to reduce the risk of cancellation errors, */
+/* when computing real eigenvalues, and to ensure, if possible, that */
+/* abs(RT1R) >= abs(RT2R). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ eps = slamch_("P");
+ if (*c__ == 0.f) {
+ *cs = 1.f;
+ *sn = 0.f;
+ goto L10;
+
+ } else if (*b == 0.f) {
+
+/* Swap rows and columns */
+
+ *cs = 0.f;
+ *sn = 1.f;
+ temp = *d__;
+ *d__ = *a;
+ *a = temp;
+ *b = -(*c__);
+ *c__ = 0.f;
+ goto L10;
+ } else if (*a - *d__ == 0.f && r_sign(&c_b4, b) != r_sign(&c_b4, c__)) {
+ *cs = 1.f;
+ *sn = 0.f;
+ goto L10;
+ } else {
+
+ temp = *a - *d__;
+ p = temp * .5f;
+/* Computing MAX */
+ r__1 = dabs(*b), r__2 = dabs(*c__);
+ bcmax = dmax(r__1,r__2);
+/* Computing MIN */
+ r__1 = dabs(*b), r__2 = dabs(*c__);
+ bcmis = dmin(r__1,r__2) * r_sign(&c_b4, b) * r_sign(&c_b4, c__);
+/* Computing MAX */
+ r__1 = dabs(p);
+ scale = dmax(r__1,bcmax);
+ z__ = p / scale * p + bcmax / scale * bcmis;
+
+/* If Z is of the order of the machine accuracy, postpone the */
+/* decision on the nature of eigenvalues */
+
+ if (z__ >= eps * 4.f) {
+
+/* Real eigenvalues. Compute A and D. */
+
+ r__1 = sqrt(scale) * sqrt(z__);
+ z__ = p + r_sign(&r__1, &p);
+ *a = *d__ + z__;
+ *d__ -= bcmax / z__ * bcmis;
+
+/* Compute B and the rotation matrix */
+
+ tau = slapy2_(c__, &z__);
+ *cs = z__ / tau;
+ *sn = *c__ / tau;
+ *b -= *c__;
+ *c__ = 0.f;
+ } else {
+
+/* Complex eigenvalues, or real (almost) equal eigenvalues. */
+/* Make diagonal elements equal. */
+
+ sigma = *b + *c__;
+ tau = slapy2_(&sigma, &temp);
+ *cs = sqrt((dabs(sigma) / tau + 1.f) * .5f);
+ *sn = -(p / (tau * *cs)) * r_sign(&c_b4, &sigma);
+
+/* Compute [ AA BB ] = [ A B ] [ CS -SN ] */
+/* [ CC DD ] [ C D ] [ SN CS ] */
+
+ aa = *a * *cs + *b * *sn;
+ bb = -(*a) * *sn + *b * *cs;
+ cc = *c__ * *cs + *d__ * *sn;
+ dd = -(*c__) * *sn + *d__ * *cs;
+
+/* Compute [ A B ] = [ CS SN ] [ AA BB ] */
+/* [ C D ] [-SN CS ] [ CC DD ] */
+
+ *a = aa * *cs + cc * *sn;
+ *b = bb * *cs + dd * *sn;
+ *c__ = -aa * *sn + cc * *cs;
+ *d__ = -bb * *sn + dd * *cs;
+
+ temp = (*a + *d__) * .5f;
+ *a = temp;
+ *d__ = temp;
+
+ if (*c__ != 0.f) {
+ if (*b != 0.f) {
+ if (r_sign(&c_b4, b) == r_sign(&c_b4, c__)) {
+
+/* Real eigenvalues: reduce to upper triangular form */
+
+ sab = sqrt((dabs(*b)));
+ sac = sqrt((dabs(*c__)));
+ r__1 = sab * sac;
+ p = r_sign(&r__1, c__);
+ tau = 1.f / sqrt((r__1 = *b + *c__, dabs(r__1)));
+ *a = temp + p;
+ *d__ = temp - p;
+ *b -= *c__;
+ *c__ = 0.f;
+ cs1 = sab * tau;
+ sn1 = sac * tau;
+ temp = *cs * cs1 - *sn * sn1;
+ *sn = *cs * sn1 + *sn * cs1;
+ *cs = temp;
+ }
+ } else {
+ *b = -(*c__);
+ *c__ = 0.f;
+ temp = *cs;
+ *cs = -(*sn);
+ *sn = temp;
+ }
+ }
+ }
+
+ }
+
+L10:
+
+/* Store eigenvalues in (RT1R,RT1I) and (RT2R,RT2I). */
+
+ *rt1r = *a;
+ *rt2r = *d__;
+ if (*c__ == 0.f) {
+ *rt1i = 0.f;
+ *rt2i = 0.f;
+ } else {
+ *rt1i = sqrt((dabs(*b))) * sqrt((dabs(*c__)));
+ *rt2i = -(*rt1i);
+ }
+ return 0;
+
+/* End of SLANV2 */
+
+} /* slanv2_ */
diff --git a/contrib/libs/clapack/slapll.c b/contrib/libs/clapack/slapll.c
new file mode 100644
index 0000000000..45c30f1ebd
--- /dev/null
+++ b/contrib/libs/clapack/slapll.c
@@ -0,0 +1,126 @@
+/* slapll.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slapll_(integer *n, real *x, integer *incx, real *y,
+ integer *incy, real *ssmin)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ real c__, a11, a12, a22, tau;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ extern /* Subroutine */ int slas2_(real *, real *, real *, real *, real *)
+ ;
+ real ssmax;
+ extern /* Subroutine */ int saxpy_(integer *, real *, real *, integer *,
+ real *, integer *), slarfg_(integer *, real *, real *, integer *,
+ real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Given two column vectors X and Y, let */
+
+/* A = ( X Y ). */
+
+/* The subroutine first computes the QR factorization of A = Q*R, */
+/* and then computes the SVD of the 2-by-2 upper triangular matrix R. */
+/* The smaller singular value of R is returned in SSMIN, which is used */
+/* as the measurement of the linear dependency of the vectors X and Y. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The length of the vectors X and Y. */
+
+/* X (input/output) REAL array, */
+/* dimension (1+(N-1)*INCX) */
+/* On entry, X contains the N-vector X. */
+/* On exit, X is overwritten. */
+
+/* INCX (input) INTEGER */
+/* The increment between successive elements of X. INCX > 0. */
+
+/* Y (input/output) REAL array, */
+/* dimension (1+(N-1)*INCY) */
+/* On entry, Y contains the N-vector Y. */
+/* On exit, Y is overwritten. */
+
+/* INCY (input) INTEGER */
+/* The increment between successive elements of Y. INCY > 0. */
+
+/* SSMIN (output) REAL */
+/* The smallest singular value of the N-by-2 matrix A = ( X Y ). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --y;
+ --x;
+
+ /* Function Body */
+ if (*n <= 1) {
+ *ssmin = 0.f;
+ return 0;
+ }
+
+/* Compute the QR factorization of the N-by-2 matrix ( X Y ) */
+
+ slarfg_(n, &x[1], &x[*incx + 1], incx, &tau);
+ a11 = x[1];
+ x[1] = 1.f;
+
+ c__ = -tau * sdot_(n, &x[1], incx, &y[1], incy);
+ saxpy_(n, &c__, &x[1], incx, &y[1], incy);
+
+ i__1 = *n - 1;
+ slarfg_(&i__1, &y[*incy + 1], &y[(*incy << 1) + 1], incy, &tau);
+
+ a12 = y[1];
+ a22 = y[*incy + 1];
+
+/* Compute the SVD of 2-by-2 Upper triangular matrix. */
+
+ slas2_(&a11, &a12, &a22, ssmin, &ssmax);
+
+ return 0;
+
+/* End of SLAPLL */
+
+} /* slapll_ */
diff --git a/contrib/libs/clapack/slapmt.c b/contrib/libs/clapack/slapmt.c
new file mode 100644
index 0000000000..b77282e8cb
--- /dev/null
+++ b/contrib/libs/clapack/slapmt.c
@@ -0,0 +1,177 @@
+/* slapmt.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slapmt_(logical *forwrd, integer *m, integer *n, real *x,
+ integer *ldx, integer *k)
+{
+ /* System generated locals */
+ integer x_dim1, x_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, ii, in;
+ real temp;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAPMT rearranges the columns of the M by N matrix X as specified */
+/* by the permutation K(1),K(2),...,K(N) of the integers 1,...,N. */
+/* If FORWRD = .TRUE., forward permutation: */
+
+/* X(*,K(J)) is moved X(*,J) for J = 1,2,...,N. */
+
+/* If FORWRD = .FALSE., backward permutation: */
+
+/* X(*,J) is moved to X(*,K(J)) for J = 1,2,...,N. */
+
+/* Arguments */
+/* ========= */
+
+/* FORWRD (input) LOGICAL */
+/* = .TRUE., forward permutation */
+/* = .FALSE., backward permutation */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix X. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix X. N >= 0. */
+
+/* X (input/output) REAL array, dimension (LDX,N) */
+/* On entry, the M by N matrix X. */
+/* On exit, X contains the permuted matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X, LDX >= MAX(1,M). */
+
+/* K (input/output) INTEGER array, dimension (N) */
+/* On entry, K contains the permutation vector. K is used as */
+/* internal workspace, but reset to its original value on */
+/* output. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --k;
+
+ /* Function Body */
+ if (*n <= 1) {
+ return 0;
+ }
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ k[i__] = -k[i__];
+/* L10: */
+ }
+
+ if (*forwrd) {
+
+/* Forward permutation */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+ if (k[i__] > 0) {
+ goto L40;
+ }
+
+ j = i__;
+ k[j] = -k[j];
+ in = k[j];
+
+L20:
+ if (k[in] > 0) {
+ goto L40;
+ }
+
+ i__2 = *m;
+ for (ii = 1; ii <= i__2; ++ii) {
+ temp = x[ii + j * x_dim1];
+ x[ii + j * x_dim1] = x[ii + in * x_dim1];
+ x[ii + in * x_dim1] = temp;
+/* L30: */
+ }
+
+ k[in] = -k[in];
+ j = in;
+ in = k[in];
+ goto L20;
+
+L40:
+
+/* L60: */
+ ;
+ }
+
+ } else {
+
+/* Backward permutation */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+ if (k[i__] > 0) {
+ goto L100;
+ }
+
+ k[i__] = -k[i__];
+ j = k[i__];
+L80:
+ if (j == i__) {
+ goto L100;
+ }
+
+ i__2 = *m;
+ for (ii = 1; ii <= i__2; ++ii) {
+ temp = x[ii + i__ * x_dim1];
+ x[ii + i__ * x_dim1] = x[ii + j * x_dim1];
+ x[ii + j * x_dim1] = temp;
+/* L90: */
+ }
+
+ k[j] = -k[j];
+ j = k[j];
+ goto L80;
+
+L100:
+/* L110: */
+ ;
+ }
+
+ }
+
+ return 0;
+
+/* End of SLAPMT */
+
+} /* slapmt_ */
diff --git a/contrib/libs/clapack/slapy2.c b/contrib/libs/clapack/slapy2.c
new file mode 100644
index 0000000000..e048cac762
--- /dev/null
+++ b/contrib/libs/clapack/slapy2.c
@@ -0,0 +1,73 @@
+/* slapy2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+doublereal slapy2_(real *x, real *y)
+{
+ /* System generated locals */
+ real ret_val, r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real w, z__, xabs, yabs;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAPY2 returns sqrt(x**2+y**2), taking care not to cause unnecessary */
+/* overflow. */
+
+/* Arguments */
+/* ========= */
+
+/* X (input) REAL */
+/* Y (input) REAL */
+/* X and Y specify the values x and y. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ xabs = dabs(*x);
+ yabs = dabs(*y);
+ w = dmax(xabs,yabs);
+ z__ = dmin(xabs,yabs);
+ if (z__ == 0.f) {
+ ret_val = w;
+ } else {
+/* Computing 2nd power */
+ r__1 = z__ / w;
+ ret_val = w * sqrt(r__1 * r__1 + 1.f);
+ }
+ return ret_val;
+
+/* End of SLAPY2 */
+
+} /* slapy2_ */
diff --git a/contrib/libs/clapack/slapy3.c b/contrib/libs/clapack/slapy3.c
new file mode 100644
index 0000000000..921a2c4192
--- /dev/null
+++ b/contrib/libs/clapack/slapy3.c
@@ -0,0 +1,83 @@
+/* slapy3.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+doublereal slapy3_(real *x, real *y, real *z__)
+{
+ /* System generated locals */
+ real ret_val, r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real w, xabs, yabs, zabs;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAPY3 returns sqrt(x**2+y**2+z**2), taking care not to cause */
+/* unnecessary overflow. */
+
+/* Arguments */
+/* ========= */
+
+/* X (input) REAL */
+/* Y (input) REAL */
+/* Z (input) REAL */
+/* X, Y and Z specify the values x, y and z. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ xabs = dabs(*x);
+ yabs = dabs(*y);
+ zabs = dabs(*z__);
+/* Computing MAX */
+ r__1 = max(xabs,yabs);
+ w = dmax(r__1,zabs);
+ if (w == 0.f) {
+/* W can be zero for max(0,nan,0) */
+/* adding all three entries together will make sure */
+/* NaN will not disappear. */
+ ret_val = xabs + yabs + zabs;
+ } else {
+/* Computing 2nd power */
+ r__1 = xabs / w;
+/* Computing 2nd power */
+ r__2 = yabs / w;
+/* Computing 2nd power */
+ r__3 = zabs / w;
+ ret_val = w * sqrt(r__1 * r__1 + r__2 * r__2 + r__3 * r__3);
+ }
+ return ret_val;
+
+/* End of SLAPY3 */
+
+} /* slapy3_ */
diff --git a/contrib/libs/clapack/slaqgb.c b/contrib/libs/clapack/slaqgb.c
new file mode 100644
index 0000000000..1a4a6b0af6
--- /dev/null
+++ b/contrib/libs/clapack/slaqgb.c
@@ -0,0 +1,216 @@
+/* slaqgb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slaqgb_(integer *m, integer *n, integer *kl, integer *ku,
+ real *ab, integer *ldab, real *r__, real *c__, real *rowcnd, real *
+ colcnd, real *amax, char *equed)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+
+ /* Local variables */
+ integer i__, j;
+ real cj, large, small;
+ extern doublereal slamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAQGB equilibrates a general M by N band matrix A with KL */
+/* subdiagonals and KU superdiagonals using the row and scaling factors */
+/* in the vectors R and C. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* AB (input/output) REAL array, dimension (LDAB,N) */
+/* On entry, the matrix A in band storage, in rows 1 to KL+KU+1. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(m,j+kl) */
+
+/* On exit, the equilibrated matrix, in the same storage format */
+/* as A. See EQUED for the form of the equilibrated matrix. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDA >= KL+KU+1. */
+
+/* R (input) REAL array, dimension (M) */
+/* The row scale factors for A. */
+
+/* C (input) REAL array, dimension (N) */
+/* The column scale factors for A. */
+
+/* ROWCND (input) REAL */
+/* Ratio of the smallest R(i) to the largest R(i). */
+
+/* COLCND (input) REAL */
+/* Ratio of the smallest C(i) to the largest C(i). */
+
+/* AMAX (input) REAL */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration */
+/* = 'R': Row equilibration, i.e., A has been premultiplied by */
+/* diag(R). */
+/* = 'C': Column equilibration, i.e., A has been postmultiplied */
+/* by diag(C). */
+/* = 'B': Both row and column equilibration, i.e., A has been */
+/* replaced by diag(R) * A * diag(C). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if row or column scaling */
+/* should be done based on the ratio of the row or column scaling */
+/* factors. If ROWCND < THRESH, row scaling is done, and if */
+/* COLCND < THRESH, column scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if row scaling */
+/* should be done based on the absolute size of the largest matrix */
+/* element. If AMAX > LARGE or AMAX < SMALL, row scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --r__;
+ --c__;
+
+ /* Function Body */
+ if (*m <= 0 || *n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = slamch_("Safe minimum") / slamch_("Precision");
+ large = 1.f / small;
+
+ if (*rowcnd >= .1f && *amax >= small && *amax <= large) {
+
+/* No row scaling */
+
+ if (*colcnd >= .1f) {
+
+/* No column scaling */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Column scaling */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = c__[j];
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__4 = min(i__5,i__6);
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ ab[*ku + 1 + i__ - j + j * ab_dim1] = cj * ab[*ku + 1 +
+ i__ - j + j * ab_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+ *(unsigned char *)equed = 'C';
+ }
+ } else if (*colcnd >= .1f) {
+
+/* Row scaling, no column scaling */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__3 = min(i__5,i__6);
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ ab[*ku + 1 + i__ - j + j * ab_dim1] = r__[i__] * ab[*ku + 1 +
+ i__ - j + j * ab_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ *(unsigned char *)equed = 'R';
+ } else {
+
+/* Row and column scaling */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = c__[j];
+/* Computing MAX */
+ i__3 = 1, i__4 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__2 = min(i__5,i__6);
+ for (i__ = max(i__3,i__4); i__ <= i__2; ++i__) {
+ ab[*ku + 1 + i__ - j + j * ab_dim1] = cj * r__[i__] * ab[*ku
+ + 1 + i__ - j + j * ab_dim1];
+/* L50: */
+ }
+/* L60: */
+ }
+ *(unsigned char *)equed = 'B';
+ }
+
+ return 0;
+
+/* End of SLAQGB */
+
+} /* slaqgb_ */
diff --git a/contrib/libs/clapack/slaqge.c b/contrib/libs/clapack/slaqge.c
new file mode 100644
index 0000000000..54e8f1b7f1
--- /dev/null
+++ b/contrib/libs/clapack/slaqge.c
@@ -0,0 +1,188 @@
+/* slaqge.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slaqge_(integer *m, integer *n, real *a, integer *lda,
+ real *r__, real *c__, real *rowcnd, real *colcnd, real *amax, char *
+ equed)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j;
+ real cj, large, small;
+ extern doublereal slamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAQGE equilibrates a general M by N matrix A using the row and */
+/* column scaling factors in the vectors R and C. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the M by N matrix A. */
+/* On exit, the equilibrated matrix. See EQUED for the form of */
+/* the equilibrated matrix. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(M,1). */
+
+/* R (input) REAL array, dimension (M) */
+/* The row scale factors for A. */
+
+/* C (input) REAL array, dimension (N) */
+/* The column scale factors for A. */
+
+/* ROWCND (input) REAL */
+/* Ratio of the smallest R(i) to the largest R(i). */
+
+/* COLCND (input) REAL */
+/* Ratio of the smallest C(i) to the largest C(i). */
+
+/* AMAX (input) REAL */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration */
+/* = 'R': Row equilibration, i.e., A has been premultiplied by */
+/* diag(R). */
+/* = 'C': Column equilibration, i.e., A has been postmultiplied */
+/* by diag(C). */
+/* = 'B': Both row and column equilibration, i.e., A has been */
+/* replaced by diag(R) * A * diag(C). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if row or column scaling */
+/* should be done based on the ratio of the row or column scaling */
+/* factors. If ROWCND < THRESH, row scaling is done, and if */
+/* COLCND < THRESH, column scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if row scaling */
+/* should be done based on the absolute size of the largest matrix */
+/* element. If AMAX > LARGE or AMAX < SMALL, row scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --r__;
+ --c__;
+
+ /* Function Body */
+ if (*m <= 0 || *n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = slamch_("Safe minimum") / slamch_("Precision");
+ large = 1.f / small;
+
+ if (*rowcnd >= .1f && *amax >= small && *amax <= large) {
+
+/* No row scaling */
+
+ if (*colcnd >= .1f) {
+
+/* No column scaling */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Column scaling */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = c__[j];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = cj * a[i__ + j * a_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+ *(unsigned char *)equed = 'C';
+ }
+ } else if (*colcnd >= .1f) {
+
+/* Row scaling, no column scaling */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = r__[i__] * a[i__ + j * a_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ *(unsigned char *)equed = 'R';
+ } else {
+
+/* Row and column scaling */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = c__[j];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = cj * r__[i__] * a[i__ + j * a_dim1];
+/* L50: */
+ }
+/* L60: */
+ }
+ *(unsigned char *)equed = 'B';
+ }
+
+ return 0;
+
+/* End of SLAQGE */
+
+} /* slaqge_ */
diff --git a/contrib/libs/clapack/slaqp2.c b/contrib/libs/clapack/slaqp2.c
new file mode 100644
index 0000000000..2b97dffecb
--- /dev/null
+++ b/contrib/libs/clapack/slaqp2.c
@@ -0,0 +1,238 @@
+/* slaqp2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int slaqp2_(integer *m, integer *n, integer *offset, real *a,
+ integer *lda, integer *jpvt, real *tau, real *vn1, real *vn2, real *
+ work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, mn;
+ real aii;
+ integer pvt;
+ real temp, temp2;
+ extern doublereal snrm2_(integer *, real *, integer *);
+ real tol3z;
+ integer offpi;
+ extern /* Subroutine */ int slarf_(char *, integer *, integer *, real *,
+ integer *, real *, real *, integer *, real *);
+ integer itemp;
+ extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
+ integer *);
+ extern doublereal slamch_(char *);
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int slarfp_(integer *, real *, real *, integer *,
+ real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAQP2 computes a QR factorization with column pivoting of */
+/* the block A(OFFSET+1:M,1:N). */
+/* The block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* OFFSET (input) INTEGER */
+/* The number of rows of the matrix A that must be pivoted */
+/* but no factorized. OFFSET >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the upper triangle of block A(OFFSET+1:M,1:N) is */
+/* the triangular factor obtained; the elements in block */
+/* A(OFFSET+1:M,1:N) below the diagonal, together with the */
+/* array TAU, represent the orthogonal matrix Q as a product of */
+/* elementary reflectors. Block A(1:OFFSET,1:N) has been */
+/* accordingly pivoted, but no factorized. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* JPVT (input/output) INTEGER array, dimension (N) */
+/* On entry, if JPVT(i) .ne. 0, the i-th column of A is permuted */
+/* to the front of A*P (a leading column); if JPVT(i) = 0, */
+/* the i-th column of A is a free column. */
+/* On exit, if JPVT(i) = k, then the i-th column of A*P */
+/* was the k-th column of A. */
+
+/* TAU (output) REAL array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors. */
+
+/* VN1 (input/output) REAL array, dimension (N) */
+/* The vector with the partial column norms. */
+
+/* VN2 (input/output) REAL array, dimension (N) */
+/* The vector with the exact column norms. */
+
+/* WORK (workspace) REAL array, dimension (N) */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* G. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */
+/* X. Sun, Computer Science Dept., Duke University, USA */
+
+/* Partial column norm updating strategy modified by */
+/* Z. Drmac and Z. Bujanovic, Dept. of Mathematics, */
+/* University of Zagreb, Croatia. */
+/* June 2006. */
+/* For more details see LAPACK Working Note 176. */
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --jpvt;
+ --tau;
+ --vn1;
+ --vn2;
+ --work;
+
+ /* Function Body */
+/* Computing MIN */
+ i__1 = *m - *offset;
+ mn = min(i__1,*n);
+ tol3z = sqrt(slamch_("Epsilon"));
+
+/* Compute factorization. */
+
+ i__1 = mn;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+ offpi = *offset + i__;
+
+/* Determine ith pivot column and swap if necessary. */
+
+ i__2 = *n - i__ + 1;
+ pvt = i__ - 1 + isamax_(&i__2, &vn1[i__], &c__1);
+
+ if (pvt != i__) {
+ sswap_(m, &a[pvt * a_dim1 + 1], &c__1, &a[i__ * a_dim1 + 1], &
+ c__1);
+ itemp = jpvt[pvt];
+ jpvt[pvt] = jpvt[i__];
+ jpvt[i__] = itemp;
+ vn1[pvt] = vn1[i__];
+ vn2[pvt] = vn2[i__];
+ }
+
+/* Generate elementary reflector H(i). */
+
+ if (offpi < *m) {
+ i__2 = *m - offpi + 1;
+ slarfp_(&i__2, &a[offpi + i__ * a_dim1], &a[offpi + 1 + i__ *
+ a_dim1], &c__1, &tau[i__]);
+ } else {
+ slarfp_(&c__1, &a[*m + i__ * a_dim1], &a[*m + i__ * a_dim1], &
+ c__1, &tau[i__]);
+ }
+
+ if (i__ < *n) {
+
+/* Apply H(i)' to A(offset+i:m,i+1:n) from the left. */
+
+ aii = a[offpi + i__ * a_dim1];
+ a[offpi + i__ * a_dim1] = 1.f;
+ i__2 = *m - offpi + 1;
+ i__3 = *n - i__;
+ slarf_("Left", &i__2, &i__3, &a[offpi + i__ * a_dim1], &c__1, &
+ tau[i__], &a[offpi + (i__ + 1) * a_dim1], lda, &work[1]);
+ a[offpi + i__ * a_dim1] = aii;
+ }
+
+/* Update partial column norms. */
+
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ if (vn1[j] != 0.f) {
+
+/* NOTE: The following 4 lines follow from the analysis in */
+/* Lapack Working Note 176. */
+
+/* Computing 2nd power */
+ r__2 = (r__1 = a[offpi + j * a_dim1], dabs(r__1)) / vn1[j];
+ temp = 1.f - r__2 * r__2;
+ temp = dmax(temp,0.f);
+/* Computing 2nd power */
+ r__1 = vn1[j] / vn2[j];
+ temp2 = temp * (r__1 * r__1);
+ if (temp2 <= tol3z) {
+ if (offpi < *m) {
+ i__3 = *m - offpi;
+ vn1[j] = snrm2_(&i__3, &a[offpi + 1 + j * a_dim1], &
+ c__1);
+ vn2[j] = vn1[j];
+ } else {
+ vn1[j] = 0.f;
+ vn2[j] = 0.f;
+ }
+ } else {
+ vn1[j] *= sqrt(temp);
+ }
+ }
+/* L10: */
+ }
+
+/* L20: */
+ }
+
+ return 0;
+
+/* End of SLAQP2 */
+
+} /* slaqp2_ */
diff --git a/contrib/libs/clapack/slaqps.c b/contrib/libs/clapack/slaqps.c
new file mode 100644
index 0000000000..dc26102a5b
--- /dev/null
+++ b/contrib/libs/clapack/slaqps.c
@@ -0,0 +1,342 @@
+/* slaqps.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b8 = -1.f;
+static real c_b9 = 1.f;
+static real c_b16 = 0.f;
+
+/* Subroutine */ int slaqps_(integer *m, integer *n, integer *offset, integer
+ *nb, integer *kb, real *a, integer *lda, integer *jpvt, real *tau,
+ real *vn1, real *vn2, real *auxv, real *f, integer *ldf)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, f_dim1, f_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+ integer i_nint(real *);
+
+ /* Local variables */
+ integer j, k, rk;
+ real akk;
+ integer pvt;
+ real temp, temp2;
+ extern doublereal snrm2_(integer *, real *, integer *);
+ real tol3z;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ integer itemp;
+ extern /* Subroutine */ int sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *), sswap_(integer *, real *, integer *, real *, integer *);
+ extern doublereal slamch_(char *);
+ integer lsticc;
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int slarfp_(integer *, real *, real *, integer *,
+ real *);
+ integer lastrk;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAQPS computes a step of QR factorization with column pivoting */
+/* of a real M-by-N matrix A by using Blas-3. It tries to factorize */
+/* NB columns from A starting from the row OFFSET+1, and updates all */
+/* of the matrix with Blas-3 xGEMM. */
+
+/* In some cases, due to catastrophic cancellations, it cannot */
+/* factorize NB columns. Hence, the actual number of factorized */
+/* columns is returned in KB. */
+
+/* Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0 */
+
+/* OFFSET (input) INTEGER */
+/* The number of rows of A that have been factorized in */
+/* previous steps. */
+
+/* NB (input) INTEGER */
+/* The number of columns to factorize. */
+
+/* KB (output) INTEGER */
+/* The number of columns actually factorized. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, block A(OFFSET+1:M,1:KB) is the triangular */
+/* factor obtained and block A(1:OFFSET,1:N) has been */
+/* accordingly pivoted, but no factorized. */
+/* The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has */
+/* been updated. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* JPVT (input/output) INTEGER array, dimension (N) */
+/* JPVT(I) = K <==> Column K of the full matrix A has been */
+/* permuted into position I in AP. */
+
+/* TAU (output) REAL array, dimension (KB) */
+/* The scalar factors of the elementary reflectors. */
+
+/* VN1 (input/output) REAL array, dimension (N) */
+/* The vector with the partial column norms. */
+
+/* VN2 (input/output) REAL array, dimension (N) */
+/* The vector with the exact column norms. */
+
+/* AUXV (input/output) REAL array, dimension (NB) */
+/* Auxiliar vector. */
+
+/* F (input/output) REAL array, dimension (LDF,NB) */
+/* Matrix F' = L*Y'*A. */
+
+/* LDF (input) INTEGER */
+/* The leading dimension of the array F. LDF >= max(1,N). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* G. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */
+/* X. Sun, Computer Science Dept., Duke University, USA */
+
+/* Partial column norm updating strategy modified by */
+/* Z. Drmac and Z. Bujanovic, Dept. of Mathematics, */
+/* University of Zagreb, Croatia. */
+/* June 2006. */
+/* For more details see LAPACK Working Note 176. */
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --jpvt;
+ --tau;
+ --vn1;
+ --vn2;
+ --auxv;
+ f_dim1 = *ldf;
+ f_offset = 1 + f_dim1;
+ f -= f_offset;
+
+ /* Function Body */
+/* Computing MIN */
+ i__1 = *m, i__2 = *n + *offset;
+ lastrk = min(i__1,i__2);
+ lsticc = 0;
+ k = 0;
+ tol3z = sqrt(slamch_("Epsilon"));
+
+/* Beginning of while loop. */
+
+L10:
+ if (k < *nb && lsticc == 0) {
+ ++k;
+ rk = *offset + k;
+
+/* Determine ith pivot column and swap if necessary */
+
+ i__1 = *n - k + 1;
+ pvt = k - 1 + isamax_(&i__1, &vn1[k], &c__1);
+ if (pvt != k) {
+ sswap_(m, &a[pvt * a_dim1 + 1], &c__1, &a[k * a_dim1 + 1], &c__1);
+ i__1 = k - 1;
+ sswap_(&i__1, &f[pvt + f_dim1], ldf, &f[k + f_dim1], ldf);
+ itemp = jpvt[pvt];
+ jpvt[pvt] = jpvt[k];
+ jpvt[k] = itemp;
+ vn1[pvt] = vn1[k];
+ vn2[pvt] = vn2[k];
+ }
+
+/* Apply previous Householder reflectors to column K: */
+/* A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)'. */
+
+ if (k > 1) {
+ i__1 = *m - rk + 1;
+ i__2 = k - 1;
+ sgemv_("No transpose", &i__1, &i__2, &c_b8, &a[rk + a_dim1], lda,
+ &f[k + f_dim1], ldf, &c_b9, &a[rk + k * a_dim1], &c__1);
+ }
+
+/* Generate elementary reflector H(k). */
+
+ if (rk < *m) {
+ i__1 = *m - rk + 1;
+ slarfp_(&i__1, &a[rk + k * a_dim1], &a[rk + 1 + k * a_dim1], &
+ c__1, &tau[k]);
+ } else {
+ slarfp_(&c__1, &a[rk + k * a_dim1], &a[rk + k * a_dim1], &c__1, &
+ tau[k]);
+ }
+
+ akk = a[rk + k * a_dim1];
+ a[rk + k * a_dim1] = 1.f;
+
+/* Compute Kth column of F: */
+
+/* Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K). */
+
+ if (k < *n) {
+ i__1 = *m - rk + 1;
+ i__2 = *n - k;
+ sgemv_("Transpose", &i__1, &i__2, &tau[k], &a[rk + (k + 1) *
+ a_dim1], lda, &a[rk + k * a_dim1], &c__1, &c_b16, &f[k +
+ 1 + k * f_dim1], &c__1);
+ }
+
+/* Padding F(1:K,K) with zeros. */
+
+ i__1 = k;
+ for (j = 1; j <= i__1; ++j) {
+ f[j + k * f_dim1] = 0.f;
+/* L20: */
+ }
+
+/* Incremental updating of F: */
+/* F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)' */
+/* *A(RK:M,K). */
+
+ if (k > 1) {
+ i__1 = *m - rk + 1;
+ i__2 = k - 1;
+ r__1 = -tau[k];
+ sgemv_("Transpose", &i__1, &i__2, &r__1, &a[rk + a_dim1], lda, &a[
+ rk + k * a_dim1], &c__1, &c_b16, &auxv[1], &c__1);
+
+ i__1 = k - 1;
+ sgemv_("No transpose", n, &i__1, &c_b9, &f[f_dim1 + 1], ldf, &
+ auxv[1], &c__1, &c_b9, &f[k * f_dim1 + 1], &c__1);
+ }
+
+/* Update the current row of A: */
+/* A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ sgemv_("No transpose", &i__1, &k, &c_b8, &f[k + 1 + f_dim1], ldf,
+ &a[rk + a_dim1], lda, &c_b9, &a[rk + (k + 1) * a_dim1],
+ lda);
+ }
+
+/* Update partial column norms. */
+
+ if (rk < lastrk) {
+ i__1 = *n;
+ for (j = k + 1; j <= i__1; ++j) {
+ if (vn1[j] != 0.f) {
+
+/* NOTE: The following 4 lines follow from the analysis in */
+/* Lapack Working Note 176. */
+
+ temp = (r__1 = a[rk + j * a_dim1], dabs(r__1)) / vn1[j];
+/* Computing MAX */
+ r__1 = 0.f, r__2 = (temp + 1.f) * (1.f - temp);
+ temp = dmax(r__1,r__2);
+/* Computing 2nd power */
+ r__1 = vn1[j] / vn2[j];
+ temp2 = temp * (r__1 * r__1);
+ if (temp2 <= tol3z) {
+ vn2[j] = (real) lsticc;
+ lsticc = j;
+ } else {
+ vn1[j] *= sqrt(temp);
+ }
+ }
+/* L30: */
+ }
+ }
+
+ a[rk + k * a_dim1] = akk;
+
+/* End of while loop. */
+
+ goto L10;
+ }
+ *kb = k;
+ rk = *offset + *kb;
+
+/* Apply the block reflector to the rest of the matrix: */
+/* A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) - */
+/* A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)'. */
+
+/* Computing MIN */
+ i__1 = *n, i__2 = *m - *offset;
+ if (*kb < min(i__1,i__2)) {
+ i__1 = *m - rk;
+ i__2 = *n - *kb;
+ sgemm_("No transpose", "Transpose", &i__1, &i__2, kb, &c_b8, &a[rk +
+ 1 + a_dim1], lda, &f[*kb + 1 + f_dim1], ldf, &c_b9, &a[rk + 1
+ + (*kb + 1) * a_dim1], lda);
+ }
+
+/* Recomputation of difficult columns. */
+
+L40:
+ if (lsticc > 0) {
+ itemp = i_nint(&vn2[lsticc]);
+ i__1 = *m - rk;
+ vn1[lsticc] = snrm2_(&i__1, &a[rk + 1 + lsticc * a_dim1], &c__1);
+
+/* NOTE: The computation of VN1( LSTICC ) relies on the fact that */
+/* SNRM2 does not fail on vectors with norm below the value of */
+/* SQRT(DLAMCH('S')) */
+
+ vn2[lsticc] = vn1[lsticc];
+ lsticc = itemp;
+ goto L40;
+ }
+
+ return 0;
+
+/* End of SLAQPS */
+
+} /* slaqps_ */
diff --git a/contrib/libs/clapack/slaqr0.c b/contrib/libs/clapack/slaqr0.c
new file mode 100644
index 0000000000..d2dab03045
--- /dev/null
+++ b/contrib/libs/clapack/slaqr0.c
@@ -0,0 +1,753 @@
+/* slaqr0.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__13 = 13;
+static integer c__15 = 15;
+static integer c_n1 = -1;
+static integer c__12 = 12;
+static integer c__14 = 14;
+static integer c__16 = 16;
+static logical c_false = FALSE_;
+static integer c__1 = 1;
+static integer c__3 = 3;
+
+/* Subroutine */ int slaqr0_(logical *wantt, logical *wantz, integer *n,
+ integer *ilo, integer *ihi, real *h__, integer *ldh, real *wr, real *
+ wi, integer *iloz, integer *ihiz, real *z__, integer *ldz, real *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4;
+
+ /* Local variables */
+ integer i__, k;
+ real aa, bb, cc, dd;
+ integer ld;
+ real cs;
+ integer nh, it, ks, kt;
+ real sn;
+ integer ku, kv, ls, ns;
+ real ss;
+ integer nw, inf, kdu, nho, nve, kwh, nsr, nwr, kwv, ndec, ndfl, kbot,
+ nmin;
+ real swap;
+ integer ktop;
+ real zdum[1] /* was [1][1] */;
+ integer kacc22, itmax, nsmax, nwmax, kwtop;
+ extern /* Subroutine */ int slanv2_(real *, real *, real *, real *, real *
+, real *, real *, real *, real *, real *), slaqr3_(logical *,
+ logical *, integer *, integer *, integer *, integer *, real *,
+ integer *, integer *, integer *, real *, integer *, integer *,
+ integer *, real *, real *, real *, integer *, integer *, real *,
+ integer *, integer *, real *, integer *, real *, integer *),
+ slaqr4_(logical *, logical *, integer *, integer *, integer *,
+ real *, integer *, real *, real *, integer *, integer *, real *,
+ integer *, real *, integer *, integer *), slaqr5_(logical *,
+ logical *, integer *, integer *, integer *, integer *, integer *,
+ real *, real *, real *, integer *, integer *, integer *, real *,
+ integer *, real *, integer *, real *, integer *, integer *, real *
+, integer *, integer *, real *, integer *);
+ integer nibble;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ char jbcmpz[1];
+ extern /* Subroutine */ int slahqr_(logical *, logical *, integer *,
+ integer *, integer *, real *, integer *, real *, real *, integer *
+, integer *, real *, integer *, integer *), slacpy_(char *,
+ integer *, integer *, real *, integer *, real *, integer *);
+ integer nwupbd;
+ logical sorted;
+ integer lwkopt;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAQR0 computes the eigenvalues of a Hessenberg matrix H */
+/* and, optionally, the matrices T and Z from the Schur decomposition */
+/* H = Z T Z**T, where T is an upper quasi-triangular matrix (the */
+/* Schur form), and Z is the orthogonal matrix of Schur vectors. */
+
+/* Optionally Z may be postmultiplied into an input orthogonal */
+/* matrix Q so that this routine can give the Schur factorization */
+/* of a matrix A which has been reduced to the Hessenberg form H */
+/* by the orthogonal matrix Q: A = Q*H*Q**T = (QZ)*T*(QZ)**T. */
+
+/* Arguments */
+/* ========= */
+
+/* WANTT (input) LOGICAL */
+/* = .TRUE. : the full Schur form T is required; */
+/* = .FALSE.: only eigenvalues are required. */
+
+/* WANTZ (input) LOGICAL */
+/* = .TRUE. : the matrix of Schur vectors Z is required; */
+/* = .FALSE.: Schur vectors are not required. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H. N .GE. 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* It is assumed that H is already upper triangular in rows */
+/* and columns 1:ILO-1 and IHI+1:N and, if ILO.GT.1, */
+/* H(ILO,ILO-1) is zero. ILO and IHI are normally set by a */
+/* previous call to SGEBAL, and then passed to SGEHRD when the */
+/* matrix output by SGEBAL is reduced to Hessenberg form. */
+/* Otherwise, ILO and IHI should be set to 1 and N, */
+/* respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. */
+/* If N = 0, then ILO = 1 and IHI = 0. */
+
+/* H (input/output) REAL array, dimension (LDH,N) */
+/* On entry, the upper Hessenberg matrix H. */
+/* On exit, if INFO = 0 and WANTT is .TRUE., then H contains */
+/* the upper quasi-triangular matrix T from the Schur */
+/* decomposition (the Schur form); 2-by-2 diagonal blocks */
+/* (corresponding to complex conjugate pairs of eigenvalues) */
+/* are returned in standard form, with H(i,i) = H(i+1,i+1) */
+/* and H(i+1,i)*H(i,i+1).LT.0. If INFO = 0 and WANTT is */
+/* .FALSE., then the contents of H are unspecified on exit. */
+/* (The output value of H when INFO.GT.0 is given under the */
+/* description of INFO below.) */
+
+/* This subroutine may explicitly set H(i,j) = 0 for i.GT.j and */
+/* j = 1, 2, ... ILO-1 or j = IHI+1, IHI+2, ... N. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH .GE. max(1,N). */
+
+/* WR (output) REAL array, dimension (IHI) */
+/* WI (output) REAL array, dimension (IHI) */
+/* The real and imaginary parts, respectively, of the computed */
+/* eigenvalues of H(ILO:IHI,ILO:IHI) are stored in WR(ILO:IHI) */
+/* and WI(ILO:IHI). If two eigenvalues are computed as a */
+/* complex conjugate pair, they are stored in consecutive */
+/* elements of WR and WI, say the i-th and (i+1)th, with */
+/* WI(i) .GT. 0 and WI(i+1) .LT. 0. If WANTT is .TRUE., then */
+/* the eigenvalues are stored in the same order as on the */
+/* diagonal of the Schur form returned in H, with */
+/* WR(i) = H(i,i) and, if H(i:i+1,i:i+1) is a 2-by-2 diagonal */
+/* block, WI(i) = sqrt(-H(i+1,i)*H(i,i+1)) and */
+/* WI(i+1) = -WI(i). */
+
+/* ILOZ (input) INTEGER */
+/* IHIZ (input) INTEGER */
+/* Specify the rows of Z to which transformations must be */
+/* applied if WANTZ is .TRUE.. */
+/* 1 .LE. ILOZ .LE. ILO; IHI .LE. IHIZ .LE. N. */
+
+/* Z (input/output) REAL array, dimension (LDZ,IHI) */
+/* If WANTZ is .FALSE., then Z is not referenced. */
+/* If WANTZ is .TRUE., then Z(ILO:IHI,ILOZ:IHIZ) is */
+/* replaced by Z(ILO:IHI,ILOZ:IHIZ)*U where U is the */
+/* orthogonal Schur factor of H(ILO:IHI,ILO:IHI). */
+/* (The output value of Z when INFO.GT.0 is given under */
+/* the description of INFO below.) */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. if WANTZ is .TRUE. */
+/* then LDZ.GE.MAX(1,IHIZ). Otherwize, LDZ.GE.1. */
+
+/* WORK (workspace/output) REAL array, dimension LWORK */
+/* On exit, if LWORK = -1, WORK(1) returns an estimate of */
+/* the optimal value for LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK .GE. max(1,N) */
+/* is sufficient, but LWORK typically as large as 6*N may */
+/* be required for optimal performance. A workspace query */
+/* to determine the optimal workspace size is recommended. */
+
+/* If LWORK = -1, then SLAQR0 does a workspace query. */
+/* In this case, SLAQR0 checks the input parameters and */
+/* estimates the optimal workspace size for the given */
+/* values of N, ILO and IHI. The estimate is returned */
+/* in WORK(1). No error message related to LWORK is */
+/* issued by XERBLA. Neither H nor Z are accessed. */
+
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* .GT. 0: if INFO = i, SLAQR0 failed to compute all of */
+/* the eigenvalues. Elements 1:ilo-1 and i+1:n of WR */
+/* and WI contain those eigenvalues which have been */
+/* successfully computed. (Failures are rare.) */
+
+/* If INFO .GT. 0 and WANT is .FALSE., then on exit, */
+/* the remaining unconverged eigenvalues are the eigen- */
+/* values of the upper Hessenberg matrix rows and */
+/* columns ILO through INFO of the final, output */
+/* value of H. */
+
+/* If INFO .GT. 0 and WANTT is .TRUE., then on exit */
+
+/* (*) (initial value of H)*U = U*(final value of H) */
+
+/* where U is an orthogonal matrix. The final */
+/* value of H is upper Hessenberg and quasi-triangular */
+/* in rows and columns INFO+1 through IHI. */
+
+/* If INFO .GT. 0 and WANTZ is .TRUE., then on exit */
+
+/* (final value of Z(ILO:IHI,ILOZ:IHIZ) */
+/* = (initial value of Z(ILO:IHI,ILOZ:IHIZ)*U */
+
+/* where U is the orthogonal matrix in (*) (regard- */
+/* less of the value of WANTT.) */
+
+/* If INFO .GT. 0 and WANTZ is .FALSE., then Z is not */
+/* accessed. */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+/* References: */
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part I: Maintaining Well Focused Shifts, and Level 3 */
+/* Performance, SIAM Journal of Matrix Analysis, volume 23, pages */
+/* 929--947, 2002. */
+
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part II: Aggressive Early Deflation, SIAM Journal */
+/* of Matrix Analysis, volume 23, pages 948--973, 2002. */
+
+/* ================================================================ */
+/* .. Parameters .. */
+
+/* ==== Matrices of order NTINY or smaller must be processed by */
+/* . SLAHQR because of insufficient subdiagonal scratch space. */
+/* . (This is a hard limit.) ==== */
+
+/* ==== Exceptional deflation windows: try to cure rare */
+/* . slow convergence by varying the size of the */
+/* . deflation window after KEXNW iterations. ==== */
+
+/* ==== Exceptional shifts: try to cure rare slow convergence */
+/* . with ad-hoc exceptional shifts every KEXSH iterations. */
+/* . ==== */
+
+/* ==== The constants WILK1 and WILK2 are used to form the */
+/* . exceptional shifts. ==== */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --wr;
+ --wi;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+/* ==== Quick return for N = 0: nothing to do. ==== */
+
+ if (*n == 0) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ if (*n <= 11) {
+
+/* ==== Tiny matrices must use SLAHQR. ==== */
+
+ lwkopt = 1;
+ if (*lwork != -1) {
+ slahqr_(wantt, wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1], &
+ wi[1], iloz, ihiz, &z__[z_offset], ldz, info);
+ }
+ } else {
+
+/* ==== Use small bulge multi-shift QR with aggressive early */
+/* . deflation on larger-than-tiny matrices. ==== */
+
+/* ==== Hope for the best. ==== */
+
+ *info = 0;
+
+/* ==== Set up job flags for ILAENV. ==== */
+
+ if (*wantt) {
+ *(unsigned char *)jbcmpz = 'S';
+ } else {
+ *(unsigned char *)jbcmpz = 'E';
+ }
+ if (*wantz) {
+ *(unsigned char *)&jbcmpz[1] = 'V';
+ } else {
+ *(unsigned char *)&jbcmpz[1] = 'N';
+ }
+
+/* ==== NWR = recommended deflation window size. At this */
+/* . point, N .GT. NTINY = 11, so there is enough */
+/* . subdiagonal workspace for NWR.GE.2 as required. */
+/* . (In fact, there is enough subdiagonal space for */
+/* . NWR.GE.3.) ==== */
+
+ nwr = ilaenv_(&c__13, "SLAQR0", jbcmpz, n, ilo, ihi, lwork);
+ nwr = max(2,nwr);
+/* Computing MIN */
+ i__1 = *ihi - *ilo + 1, i__2 = (*n - 1) / 3, i__1 = min(i__1,i__2);
+ nwr = min(i__1,nwr);
+
+/* ==== NSR = recommended number of simultaneous shifts. */
+/* . At this point N .GT. NTINY = 11, so there is at */
+/* . enough subdiagonal workspace for NSR to be even */
+/* . and greater than or equal to two as required. ==== */
+
+ nsr = ilaenv_(&c__15, "SLAQR0", jbcmpz, n, ilo, ihi, lwork);
+/* Computing MIN */
+ i__1 = nsr, i__2 = (*n + 6) / 9, i__1 = min(i__1,i__2), i__2 = *ihi -
+ *ilo;
+ nsr = min(i__1,i__2);
+/* Computing MAX */
+ i__1 = 2, i__2 = nsr - nsr % 2;
+ nsr = max(i__1,i__2);
+
+/* ==== Estimate optimal workspace ==== */
+
+/* ==== Workspace query call to SLAQR3 ==== */
+
+ i__1 = nwr + 1;
+ slaqr3_(wantt, wantz, n, ilo, ihi, &i__1, &h__[h_offset], ldh, iloz,
+ ihiz, &z__[z_offset], ldz, &ls, &ld, &wr[1], &wi[1], &h__[
+ h_offset], ldh, n, &h__[h_offset], ldh, n, &h__[h_offset],
+ ldh, &work[1], &c_n1);
+
+/* ==== Optimal workspace = MAX(SLAQR5, SLAQR3) ==== */
+
+/* Computing MAX */
+ i__1 = nsr * 3 / 2, i__2 = (integer) work[1];
+ lwkopt = max(i__1,i__2);
+
+/* ==== Quick return in case of workspace query. ==== */
+
+ if (*lwork == -1) {
+ work[1] = (real) lwkopt;
+ return 0;
+ }
+
+/* ==== SLAHQR/SLAQR0 crossover point ==== */
+
+ nmin = ilaenv_(&c__12, "SLAQR0", jbcmpz, n, ilo, ihi, lwork);
+ nmin = max(11,nmin);
+
+/* ==== Nibble crossover point ==== */
+
+ nibble = ilaenv_(&c__14, "SLAQR0", jbcmpz, n, ilo, ihi, lwork);
+ nibble = max(0,nibble);
+
+/* ==== Accumulate reflections during ttswp? Use block */
+/* . 2-by-2 structure during matrix-matrix multiply? ==== */
+
+ kacc22 = ilaenv_(&c__16, "SLAQR0", jbcmpz, n, ilo, ihi, lwork);
+ kacc22 = max(0,kacc22);
+ kacc22 = min(2,kacc22);
+
+/* ==== NWMAX = the largest possible deflation window for */
+/* . which there is sufficient workspace. ==== */
+
+/* Computing MIN */
+ i__1 = (*n - 1) / 3, i__2 = *lwork / 2;
+ nwmax = min(i__1,i__2);
+ nw = nwmax;
+
+/* ==== NSMAX = the Largest number of simultaneous shifts */
+/* . for which there is sufficient workspace. ==== */
+
+/* Computing MIN */
+ i__1 = (*n + 6) / 9, i__2 = (*lwork << 1) / 3;
+ nsmax = min(i__1,i__2);
+ nsmax -= nsmax % 2;
+
+/* ==== NDFL: an iteration count restarted at deflation. ==== */
+
+ ndfl = 1;
+
+/* ==== ITMAX = iteration limit ==== */
+
+/* Computing MAX */
+ i__1 = 10, i__2 = *ihi - *ilo + 1;
+ itmax = max(i__1,i__2) * 30;
+
+/* ==== Last row and column in the active block ==== */
+
+ kbot = *ihi;
+
+/* ==== Main Loop ==== */
+
+ i__1 = itmax;
+ for (it = 1; it <= i__1; ++it) {
+
+/* ==== Done when KBOT falls below ILO ==== */
+
+ if (kbot < *ilo) {
+ goto L90;
+ }
+
+/* ==== Locate active block ==== */
+
+ i__2 = *ilo + 1;
+ for (k = kbot; k >= i__2; --k) {
+ if (h__[k + (k - 1) * h_dim1] == 0.f) {
+ goto L20;
+ }
+/* L10: */
+ }
+ k = *ilo;
+L20:
+ ktop = k;
+
+/* ==== Select deflation window size: */
+/* . Typical Case: */
+/* . If possible and advisable, nibble the entire */
+/* . active block. If not, use size MIN(NWR,NWMAX) */
+/* . or MIN(NWR+1,NWMAX) depending upon which has */
+/* . the smaller corresponding subdiagonal entry */
+/* . (a heuristic). */
+/* . */
+/* . Exceptional Case: */
+/* . If there have been no deflations in KEXNW or */
+/* . more iterations, then vary the deflation window */
+/* . size. At first, because, larger windows are, */
+/* . in general, more powerful than smaller ones, */
+/* . rapidly increase the window to the maximum possible. */
+/* . Then, gradually reduce the window size. ==== */
+
+ nh = kbot - ktop + 1;
+ nwupbd = min(nh,nwmax);
+ if (ndfl < 5) {
+ nw = min(nwupbd,nwr);
+ } else {
+/* Computing MIN */
+ i__2 = nwupbd, i__3 = nw << 1;
+ nw = min(i__2,i__3);
+ }
+ if (nw < nwmax) {
+ if (nw >= nh - 1) {
+ nw = nh;
+ } else {
+ kwtop = kbot - nw + 1;
+ if ((r__1 = h__[kwtop + (kwtop - 1) * h_dim1], dabs(r__1))
+ > (r__2 = h__[kwtop - 1 + (kwtop - 2) * h_dim1],
+ dabs(r__2))) {
+ ++nw;
+ }
+ }
+ }
+ if (ndfl < 5) {
+ ndec = -1;
+ } else if (ndec >= 0 || nw >= nwupbd) {
+ ++ndec;
+ if (nw - ndec < 2) {
+ ndec = 0;
+ }
+ nw -= ndec;
+ }
+
+/* ==== Aggressive early deflation: */
+/* . split workspace under the subdiagonal into */
+/* . - an nw-by-nw work array V in the lower */
+/* . left-hand-corner, */
+/* . - an NW-by-at-least-NW-but-more-is-better */
+/* . (NW-by-NHO) horizontal work array along */
+/* . the bottom edge, */
+/* . - an at-least-NW-but-more-is-better (NHV-by-NW) */
+/* . vertical work array along the left-hand-edge. */
+/* . ==== */
+
+ kv = *n - nw + 1;
+ kt = nw + 1;
+ nho = *n - nw - 1 - kt + 1;
+ kwv = nw + 2;
+ nve = *n - nw - kwv + 1;
+
+/* ==== Aggressive early deflation ==== */
+
+ slaqr3_(wantt, wantz, n, &ktop, &kbot, &nw, &h__[h_offset], ldh,
+ iloz, ihiz, &z__[z_offset], ldz, &ls, &ld, &wr[1], &wi[1],
+ &h__[kv + h_dim1], ldh, &nho, &h__[kv + kt * h_dim1],
+ ldh, &nve, &h__[kwv + h_dim1], ldh, &work[1], lwork);
+
+/* ==== Adjust KBOT accounting for new deflations. ==== */
+
+ kbot -= ld;
+
+/* ==== KS points to the shifts. ==== */
+
+ ks = kbot - ls + 1;
+
+/* ==== Skip an expensive QR sweep if there is a (partly */
+/* . heuristic) reason to expect that many eigenvalues */
+/* . will deflate without it. Here, the QR sweep is */
+/* . skipped if many eigenvalues have just been deflated */
+/* . or if the remaining active block is small. */
+
+ if (ld == 0 || ld * 100 <= nw * nibble && kbot - ktop + 1 > min(
+ nmin,nwmax)) {
+
+/* ==== NS = nominal number of simultaneous shifts. */
+/* . This may be lowered (slightly) if SLAQR3 */
+/* . did not provide that many shifts. ==== */
+
+/* Computing MIN */
+/* Computing MAX */
+ i__4 = 2, i__5 = kbot - ktop;
+ i__2 = min(nsmax,nsr), i__3 = max(i__4,i__5);
+ ns = min(i__2,i__3);
+ ns -= ns % 2;
+
+/* ==== If there have been no deflations */
+/* . in a multiple of KEXSH iterations, */
+/* . then try exceptional shifts. */
+/* . Otherwise use shifts provided by */
+/* . SLAQR3 above or from the eigenvalues */
+/* . of a trailing principal submatrix. ==== */
+
+ if (ndfl % 6 == 0) {
+ ks = kbot - ns + 1;
+/* Computing MAX */
+ i__3 = ks + 1, i__4 = ktop + 2;
+ i__2 = max(i__3,i__4);
+ for (i__ = kbot; i__ >= i__2; i__ += -2) {
+ ss = (r__1 = h__[i__ + (i__ - 1) * h_dim1], dabs(r__1)
+ ) + (r__2 = h__[i__ - 1 + (i__ - 2) * h_dim1],
+ dabs(r__2));
+ aa = ss * .75f + h__[i__ + i__ * h_dim1];
+ bb = ss;
+ cc = ss * -.4375f;
+ dd = aa;
+ slanv2_(&aa, &bb, &cc, &dd, &wr[i__ - 1], &wi[i__ - 1]
+, &wr[i__], &wi[i__], &cs, &sn);
+/* L30: */
+ }
+ if (ks == ktop) {
+ wr[ks + 1] = h__[ks + 1 + (ks + 1) * h_dim1];
+ wi[ks + 1] = 0.f;
+ wr[ks] = wr[ks + 1];
+ wi[ks] = wi[ks + 1];
+ }
+ } else {
+
+/* ==== Got NS/2 or fewer shifts? Use SLAQR4 or */
+/* . SLAHQR on a trailing principal submatrix to */
+/* . get more. (Since NS.LE.NSMAX.LE.(N+6)/9, */
+/* . there is enough space below the subdiagonal */
+/* . to fit an NS-by-NS scratch array.) ==== */
+
+ if (kbot - ks + 1 <= ns / 2) {
+ ks = kbot - ns + 1;
+ kt = *n - ns + 1;
+ slacpy_("A", &ns, &ns, &h__[ks + ks * h_dim1], ldh, &
+ h__[kt + h_dim1], ldh);
+ if (ns > nmin) {
+ slaqr4_(&c_false, &c_false, &ns, &c__1, &ns, &h__[
+ kt + h_dim1], ldh, &wr[ks], &wi[ks], &
+ c__1, &c__1, zdum, &c__1, &work[1], lwork,
+ &inf);
+ } else {
+ slahqr_(&c_false, &c_false, &ns, &c__1, &ns, &h__[
+ kt + h_dim1], ldh, &wr[ks], &wi[ks], &
+ c__1, &c__1, zdum, &c__1, &inf);
+ }
+ ks += inf;
+
+/* ==== In case of a rare QR failure use */
+/* . eigenvalues of the trailing 2-by-2 */
+/* . principal submatrix. ==== */
+
+ if (ks >= kbot) {
+ aa = h__[kbot - 1 + (kbot - 1) * h_dim1];
+ cc = h__[kbot + (kbot - 1) * h_dim1];
+ bb = h__[kbot - 1 + kbot * h_dim1];
+ dd = h__[kbot + kbot * h_dim1];
+ slanv2_(&aa, &bb, &cc, &dd, &wr[kbot - 1], &wi[
+ kbot - 1], &wr[kbot], &wi[kbot], &cs, &sn)
+ ;
+ ks = kbot - 1;
+ }
+ }
+
+ if (kbot - ks + 1 > ns) {
+
+/* ==== Sort the shifts (Helps a little) */
+/* . Bubble sort keeps complex conjugate */
+/* . pairs together. ==== */
+
+ sorted = FALSE_;
+ i__2 = ks + 1;
+ for (k = kbot; k >= i__2; --k) {
+ if (sorted) {
+ goto L60;
+ }
+ sorted = TRUE_;
+ i__3 = k - 1;
+ for (i__ = ks; i__ <= i__3; ++i__) {
+ if ((r__1 = wr[i__], dabs(r__1)) + (r__2 = wi[
+ i__], dabs(r__2)) < (r__3 = wr[i__ +
+ 1], dabs(r__3)) + (r__4 = wi[i__ + 1],
+ dabs(r__4))) {
+ sorted = FALSE_;
+
+ swap = wr[i__];
+ wr[i__] = wr[i__ + 1];
+ wr[i__ + 1] = swap;
+
+ swap = wi[i__];
+ wi[i__] = wi[i__ + 1];
+ wi[i__ + 1] = swap;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+L60:
+ ;
+ }
+
+/* ==== Shuffle shifts into pairs of real shifts */
+/* . and pairs of complex conjugate shifts */
+/* . assuming complex conjugate shifts are */
+/* . already adjacent to one another. (Yes, */
+/* . they are.) ==== */
+
+ i__2 = ks + 2;
+ for (i__ = kbot; i__ >= i__2; i__ += -2) {
+ if (wi[i__] != -wi[i__ - 1]) {
+
+ swap = wr[i__];
+ wr[i__] = wr[i__ - 1];
+ wr[i__ - 1] = wr[i__ - 2];
+ wr[i__ - 2] = swap;
+
+ swap = wi[i__];
+ wi[i__] = wi[i__ - 1];
+ wi[i__ - 1] = wi[i__ - 2];
+ wi[i__ - 2] = swap;
+ }
+/* L70: */
+ }
+ }
+
+/* ==== If there are only two shifts and both are */
+/* . real, then use only one. ==== */
+
+ if (kbot - ks + 1 == 2) {
+ if (wi[kbot] == 0.f) {
+ if ((r__1 = wr[kbot] - h__[kbot + kbot * h_dim1],
+ dabs(r__1)) < (r__2 = wr[kbot - 1] - h__[kbot
+ + kbot * h_dim1], dabs(r__2))) {
+ wr[kbot - 1] = wr[kbot];
+ } else {
+ wr[kbot] = wr[kbot - 1];
+ }
+ }
+ }
+
+/* ==== Use up to NS of the the smallest magnatiude */
+/* . shifts. If there aren't NS shifts available, */
+/* . then use them all, possibly dropping one to */
+/* . make the number of shifts even. ==== */
+
+/* Computing MIN */
+ i__2 = ns, i__3 = kbot - ks + 1;
+ ns = min(i__2,i__3);
+ ns -= ns % 2;
+ ks = kbot - ns + 1;
+
+/* ==== Small-bulge multi-shift QR sweep: */
+/* . split workspace under the subdiagonal into */
+/* . - a KDU-by-KDU work array U in the lower */
+/* . left-hand-corner, */
+/* . - a KDU-by-at-least-KDU-but-more-is-better */
+/* . (KDU-by-NHo) horizontal work array WH along */
+/* . the bottom edge, */
+/* . - and an at-least-KDU-but-more-is-better-by-KDU */
+/* . (NVE-by-KDU) vertical work WV arrow along */
+/* . the left-hand-edge. ==== */
+
+ kdu = ns * 3 - 3;
+ ku = *n - kdu + 1;
+ kwh = kdu + 1;
+ nho = *n - kdu - 3 - (kdu + 1) + 1;
+ kwv = kdu + 4;
+ nve = *n - kdu - kwv + 1;
+
+/* ==== Small-bulge multi-shift QR sweep ==== */
+
+ slaqr5_(wantt, wantz, &kacc22, n, &ktop, &kbot, &ns, &wr[ks],
+ &wi[ks], &h__[h_offset], ldh, iloz, ihiz, &z__[
+ z_offset], ldz, &work[1], &c__3, &h__[ku + h_dim1],
+ ldh, &nve, &h__[kwv + h_dim1], ldh, &nho, &h__[ku +
+ kwh * h_dim1], ldh);
+ }
+
+/* ==== Note progress (or the lack of it). ==== */
+
+ if (ld > 0) {
+ ndfl = 1;
+ } else {
+ ++ndfl;
+ }
+
+/* ==== End of main loop ==== */
+/* L80: */
+ }
+
+/* ==== Iteration limit exceeded. Set INFO to show where */
+/* . the problem occurred and exit. ==== */
+
+ *info = kbot;
+L90:
+ ;
+ }
+
+/* ==== Return the optimal value of LWORK. ==== */
+
+ work[1] = (real) lwkopt;
+
+/* ==== End of SLAQR0 ==== */
+
+ return 0;
+} /* slaqr0_ */
diff --git a/contrib/libs/clapack/slaqr1.c b/contrib/libs/clapack/slaqr1.c
new file mode 100644
index 0000000000..4726190d9d
--- /dev/null
+++ b/contrib/libs/clapack/slaqr1.c
@@ -0,0 +1,126 @@
+/* slaqr1.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slaqr1_(integer *n, real *h__, integer *ldh, real *sr1,
+ real *si1, real *sr2, real *si2, real *v)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset;
+ real r__1, r__2, r__3;
+
+ /* Local variables */
+ real s, h21s, h31s;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Given a 2-by-2 or 3-by-3 matrix H, SLAQR1 sets v to a */
+/* scalar multiple of the first column of the product */
+
+/* (*) K = (H - (sr1 + i*si1)*I)*(H - (sr2 + i*si2)*I) */
+
+/* scaling to avoid overflows and most underflows. It */
+/* is assumed that either */
+
+/* 1) sr1 = sr2 and si1 = -si2 */
+/* or */
+/* 2) si1 = si2 = 0. */
+
+/* This is useful for starting double implicit shift bulges */
+/* in the QR algorithm. */
+
+
+/* N (input) integer */
+/* Order of the matrix H. N must be either 2 or 3. */
+
+/* H (input) REAL array of dimension (LDH,N) */
+/* The 2-by-2 or 3-by-3 matrix H in (*). */
+
+/* LDH (input) integer */
+/* The leading dimension of H as declared in */
+/* the calling procedure. LDH.GE.N */
+
+/* SR1 (input) REAL */
+/* SI1 The shifts in (*). */
+/* SR2 */
+/* SI2 */
+
+/* V (output) REAL array of dimension N */
+/* A scalar multiple of the first column of the */
+/* matrix K in (*). */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --v;
+
+ /* Function Body */
+ if (*n == 2) {
+ s = (r__1 = h__[h_dim1 + 1] - *sr2, dabs(r__1)) + dabs(*si2) + (r__2 =
+ h__[h_dim1 + 2], dabs(r__2));
+ if (s == 0.f) {
+ v[1] = 0.f;
+ v[2] = 0.f;
+ } else {
+ h21s = h__[h_dim1 + 2] / s;
+ v[1] = h21s * h__[(h_dim1 << 1) + 1] + (h__[h_dim1 + 1] - *sr1) *
+ ((h__[h_dim1 + 1] - *sr2) / s) - *si1 * (*si2 / s);
+ v[2] = h21s * (h__[h_dim1 + 1] + h__[(h_dim1 << 1) + 2] - *sr1 - *
+ sr2);
+ }
+ } else {
+ s = (r__1 = h__[h_dim1 + 1] - *sr2, dabs(r__1)) + dabs(*si2) + (r__2 =
+ h__[h_dim1 + 2], dabs(r__2)) + (r__3 = h__[h_dim1 + 3], dabs(
+ r__3));
+ if (s == 0.f) {
+ v[1] = 0.f;
+ v[2] = 0.f;
+ v[3] = 0.f;
+ } else {
+ h21s = h__[h_dim1 + 2] / s;
+ h31s = h__[h_dim1 + 3] / s;
+ v[1] = (h__[h_dim1 + 1] - *sr1) * ((h__[h_dim1 + 1] - *sr2) / s)
+ - *si1 * (*si2 / s) + h__[(h_dim1 << 1) + 1] * h21s + h__[
+ h_dim1 * 3 + 1] * h31s;
+ v[2] = h21s * (h__[h_dim1 + 1] + h__[(h_dim1 << 1) + 2] - *sr1 - *
+ sr2) + h__[h_dim1 * 3 + 2] * h31s;
+ v[3] = h31s * (h__[h_dim1 + 1] + h__[h_dim1 * 3 + 3] - *sr1 - *
+ sr2) + h21s * h__[(h_dim1 << 1) + 3];
+ }
+ }
+ return 0;
+} /* slaqr1_ */
diff --git a/contrib/libs/clapack/slaqr2.c b/contrib/libs/clapack/slaqr2.c
new file mode 100644
index 0000000000..977d45381b
--- /dev/null
+++ b/contrib/libs/clapack/slaqr2.c
@@ -0,0 +1,694 @@
+/* slaqr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static real c_b12 = 0.f;
+static real c_b13 = 1.f;
+static logical c_true = TRUE_;
+
+/* Subroutine */ int slaqr2_(logical *wantt, logical *wantz, integer *n,
+ integer *ktop, integer *kbot, integer *nw, real *h__, integer *ldh,
+ integer *iloz, integer *ihiz, real *z__, integer *ldz, integer *ns,
+ integer *nd, real *sr, real *si, real *v, integer *ldv, integer *nh,
+ real *t, integer *ldt, integer *nv, real *wv, integer *ldwv, real *
+ work, integer *lwork)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, t_dim1, t_offset, v_dim1, v_offset, wv_dim1,
+ wv_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4;
+ real r__1, r__2, r__3, r__4, r__5, r__6;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k;
+ real s, aa, bb, cc, dd, cs, sn;
+ integer jw;
+ real evi, evk, foo;
+ integer kln;
+ real tau, ulp;
+ integer lwk1, lwk2;
+ real beta;
+ integer kend, kcol, info, ifst, ilst, ltop, krow;
+ logical bulge;
+ extern /* Subroutine */ int slarf_(char *, integer *, integer *, real *,
+ integer *, real *, real *, integer *, real *), sgemm_(
+ char *, char *, integer *, integer *, integer *, real *, real *,
+ integer *, real *, integer *, real *, real *, integer *);
+ integer infqr;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ integer kwtop;
+ extern /* Subroutine */ int slanv2_(real *, real *, real *, real *, real *
+, real *, real *, real *, real *, real *), slabad_(real *, real *)
+ ;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int sgehrd_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *);
+ real safmin;
+ extern /* Subroutine */ int slarfg_(integer *, real *, real *, integer *,
+ real *);
+ real safmax;
+ extern /* Subroutine */ int slahqr_(logical *, logical *, integer *,
+ integer *, integer *, real *, integer *, real *, real *, integer *
+, integer *, real *, integer *, integer *), slacpy_(char *,
+ integer *, integer *, real *, integer *, real *, integer *), slaset_(char *, integer *, integer *, real *, real *,
+ real *, integer *);
+ logical sorted;
+ extern /* Subroutine */ int strexc_(char *, integer *, real *, integer *,
+ real *, integer *, integer *, integer *, real *, integer *), sormhr_(char *, char *, integer *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+ real smlnum;
+ integer lwkopt;
+
+
+/* -- LAPACK auxiliary routine (version 3.2.1) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* -- April 2009 -- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* This subroutine is identical to SLAQR3 except that it avoids */
+/* recursion by calling SLAHQR instead of SLAQR4. */
+
+
+/* ****************************************************************** */
+/* Aggressive early deflation: */
+
+/* This subroutine accepts as input an upper Hessenberg matrix */
+/* H and performs an orthogonal similarity transformation */
+/* designed to detect and deflate fully converged eigenvalues from */
+/* a trailing principal submatrix. On output H has been over- */
+/* written by a new Hessenberg matrix that is a perturbation of */
+/* an orthogonal similarity transformation of H. It is to be */
+/* hoped that the final version of H has many zero subdiagonal */
+/* entries. */
+
+/* ****************************************************************** */
+/* WANTT (input) LOGICAL */
+/* If .TRUE., then the Hessenberg matrix H is fully updated */
+/* so that the quasi-triangular Schur factor may be */
+/* computed (in cooperation with the calling subroutine). */
+/* If .FALSE., then only enough of H is updated to preserve */
+/* the eigenvalues. */
+
+/* WANTZ (input) LOGICAL */
+/* If .TRUE., then the orthogonal matrix Z is updated so */
+/* so that the orthogonal Schur factor may be computed */
+/* (in cooperation with the calling subroutine). */
+/* If .FALSE., then Z is not referenced. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H and (if WANTZ is .TRUE.) the */
+/* order of the orthogonal matrix Z. */
+
+/* KTOP (input) INTEGER */
+/* It is assumed that either KTOP = 1 or H(KTOP,KTOP-1)=0. */
+/* KBOT and KTOP together determine an isolated block */
+/* along the diagonal of the Hessenberg matrix. */
+
+/* KBOT (input) INTEGER */
+/* It is assumed without a check that either */
+/* KBOT = N or H(KBOT+1,KBOT)=0. KBOT and KTOP together */
+/* determine an isolated block along the diagonal of the */
+/* Hessenberg matrix. */
+
+/* NW (input) INTEGER */
+/* Deflation window size. 1 .LE. NW .LE. (KBOT-KTOP+1). */
+
+/* H (input/output) REAL array, dimension (LDH,N) */
+/* On input the initial N-by-N section of H stores the */
+/* Hessenberg matrix undergoing aggressive early deflation. */
+/* On output H has been transformed by an orthogonal */
+/* similarity transformation, perturbed, and the returned */
+/* to Hessenberg form that (it is to be hoped) has some */
+/* zero subdiagonal entries. */
+
+/* LDH (input) integer */
+/* Leading dimension of H just as declared in the calling */
+/* subroutine. N .LE. LDH */
+
+/* ILOZ (input) INTEGER */
+/* IHIZ (input) INTEGER */
+/* Specify the rows of Z to which transformations must be */
+/* applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N. */
+
+/* Z (input/output) REAL array, dimension (LDZ,N) */
+/* IF WANTZ is .TRUE., then on output, the orthogonal */
+/* similarity transformation mentioned above has been */
+/* accumulated into Z(ILOZ:IHIZ,ILO:IHI) from the right. */
+/* If WANTZ is .FALSE., then Z is unreferenced. */
+
+/* LDZ (input) integer */
+/* The leading dimension of Z just as declared in the */
+/* calling subroutine. 1 .LE. LDZ. */
+
+/* NS (output) integer */
+/* The number of unconverged (ie approximate) eigenvalues */
+/* returned in SR and SI that may be used as shifts by the */
+/* calling subroutine. */
+
+/* ND (output) integer */
+/* The number of converged eigenvalues uncovered by this */
+/* subroutine. */
+
+/* SR (output) REAL array, dimension KBOT */
+/* SI (output) REAL array, dimension KBOT */
+/* On output, the real and imaginary parts of approximate */
+/* eigenvalues that may be used for shifts are stored in */
+/* SR(KBOT-ND-NS+1) through SR(KBOT-ND) and */
+/* SI(KBOT-ND-NS+1) through SI(KBOT-ND), respectively. */
+/* The real and imaginary parts of converged eigenvalues */
+/* are stored in SR(KBOT-ND+1) through SR(KBOT) and */
+/* SI(KBOT-ND+1) through SI(KBOT), respectively. */
+
+/* V (workspace) REAL array, dimension (LDV,NW) */
+/* An NW-by-NW work array. */
+
+/* LDV (input) integer scalar */
+/* The leading dimension of V just as declared in the */
+/* calling subroutine. NW .LE. LDV */
+
+/* NH (input) integer scalar */
+/* The number of columns of T. NH.GE.NW. */
+
+/* T (workspace) REAL array, dimension (LDT,NW) */
+
+/* LDT (input) integer */
+/* The leading dimension of T just as declared in the */
+/* calling subroutine. NW .LE. LDT */
+
+/* NV (input) integer */
+/* The number of rows of work array WV available for */
+/* workspace. NV.GE.NW. */
+
+/* WV (workspace) REAL array, dimension (LDWV,NW) */
+
+/* LDWV (input) integer */
+/* The leading dimension of W just as declared in the */
+/* calling subroutine. NW .LE. LDV */
+
+/* WORK (workspace) REAL array, dimension LWORK. */
+/* On exit, WORK(1) is set to an estimate of the optimal value */
+/* of LWORK for the given values of N, NW, KTOP and KBOT. */
+
+/* LWORK (input) integer */
+/* The dimension of the work array WORK. LWORK = 2*NW */
+/* suffices, but greater efficiency may result from larger */
+/* values of LWORK. */
+
+/* If LWORK = -1, then a workspace query is assumed; SLAQR2 */
+/* only estimates the optimal workspace size for the given */
+/* values of N, NW, KTOP and KBOT. The estimate is returned */
+/* in WORK(1). No error message related to LWORK is issued */
+/* by XERBLA. Neither H nor Z are accessed. */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* ==== Estimate optimal workspace. ==== */
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --sr;
+ --si;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ wv_dim1 = *ldwv;
+ wv_offset = 1 + wv_dim1;
+ wv -= wv_offset;
+ --work;
+
+ /* Function Body */
+/* Computing MIN */
+ i__1 = *nw, i__2 = *kbot - *ktop + 1;
+ jw = min(i__1,i__2);
+ if (jw <= 2) {
+ lwkopt = 1;
+ } else {
+
+/* ==== Workspace query call to SGEHRD ==== */
+
+ i__1 = jw - 1;
+ sgehrd_(&jw, &c__1, &i__1, &t[t_offset], ldt, &work[1], &work[1], &
+ c_n1, &info);
+ lwk1 = (integer) work[1];
+
+/* ==== Workspace query call to SORMHR ==== */
+
+ i__1 = jw - 1;
+ sormhr_("R", "N", &jw, &jw, &c__1, &i__1, &t[t_offset], ldt, &work[1],
+ &v[v_offset], ldv, &work[1], &c_n1, &info);
+ lwk2 = (integer) work[1];
+
+/* ==== Optimal workspace ==== */
+
+ lwkopt = jw + max(lwk1,lwk2);
+ }
+
+/* ==== Quick return in case of workspace query. ==== */
+
+ if (*lwork == -1) {
+ work[1] = (real) lwkopt;
+ return 0;
+ }
+
+/* ==== Nothing to do ... */
+/* ... for an empty active block ... ==== */
+ *ns = 0;
+ *nd = 0;
+ work[1] = 1.f;
+ if (*ktop > *kbot) {
+ return 0;
+ }
+/* ... nor for an empty deflation window. ==== */
+ if (*nw < 1) {
+ return 0;
+ }
+
+/* ==== Machine constants ==== */
+
+ safmin = slamch_("SAFE MINIMUM");
+ safmax = 1.f / safmin;
+ slabad_(&safmin, &safmax);
+ ulp = slamch_("PRECISION");
+ smlnum = safmin * ((real) (*n) / ulp);
+
+/* ==== Setup deflation window ==== */
+
+/* Computing MIN */
+ i__1 = *nw, i__2 = *kbot - *ktop + 1;
+ jw = min(i__1,i__2);
+ kwtop = *kbot - jw + 1;
+ if (kwtop == *ktop) {
+ s = 0.f;
+ } else {
+ s = h__[kwtop + (kwtop - 1) * h_dim1];
+ }
+
+ if (*kbot == kwtop) {
+
+/* ==== 1-by-1 deflation window: not much to do ==== */
+
+ sr[kwtop] = h__[kwtop + kwtop * h_dim1];
+ si[kwtop] = 0.f;
+ *ns = 1;
+ *nd = 0;
+/* Computing MAX */
+ r__2 = smlnum, r__3 = ulp * (r__1 = h__[kwtop + kwtop * h_dim1], dabs(
+ r__1));
+ if (dabs(s) <= dmax(r__2,r__3)) {
+ *ns = 0;
+ *nd = 1;
+ if (kwtop > *ktop) {
+ h__[kwtop + (kwtop - 1) * h_dim1] = 0.f;
+ }
+ }
+ work[1] = 1.f;
+ return 0;
+ }
+
+/* ==== Convert to spike-triangular form. (In case of a */
+/* . rare QR failure, this routine continues to do */
+/* . aggressive early deflation using that part of */
+/* . the deflation window that converged using INFQR */
+/* . here and there to keep track.) ==== */
+
+ slacpy_("U", &jw, &jw, &h__[kwtop + kwtop * h_dim1], ldh, &t[t_offset],
+ ldt);
+ i__1 = jw - 1;
+ i__2 = *ldh + 1;
+ i__3 = *ldt + 1;
+ scopy_(&i__1, &h__[kwtop + 1 + kwtop * h_dim1], &i__2, &t[t_dim1 + 2], &
+ i__3);
+
+ slaset_("A", &jw, &jw, &c_b12, &c_b13, &v[v_offset], ldv);
+ slahqr_(&c_true, &c_true, &jw, &c__1, &jw, &t[t_offset], ldt, &sr[kwtop],
+ &si[kwtop], &c__1, &jw, &v[v_offset], ldv, &infqr);
+
+/* ==== STREXC needs a clean margin near the diagonal ==== */
+
+ i__1 = jw - 3;
+ for (j = 1; j <= i__1; ++j) {
+ t[j + 2 + j * t_dim1] = 0.f;
+ t[j + 3 + j * t_dim1] = 0.f;
+/* L10: */
+ }
+ if (jw > 2) {
+ t[jw + (jw - 2) * t_dim1] = 0.f;
+ }
+
+/* ==== Deflation detection loop ==== */
+
+ *ns = jw;
+ ilst = infqr + 1;
+L20:
+ if (ilst <= *ns) {
+ if (*ns == 1) {
+ bulge = FALSE_;
+ } else {
+ bulge = t[*ns + (*ns - 1) * t_dim1] != 0.f;
+ }
+
+/* ==== Small spike tip test for deflation ==== */
+
+ if (! bulge) {
+
+/* ==== Real eigenvalue ==== */
+
+ foo = (r__1 = t[*ns + *ns * t_dim1], dabs(r__1));
+ if (foo == 0.f) {
+ foo = dabs(s);
+ }
+/* Computing MAX */
+ r__2 = smlnum, r__3 = ulp * foo;
+ if ((r__1 = s * v[*ns * v_dim1 + 1], dabs(r__1)) <= dmax(r__2,
+ r__3)) {
+
+/* ==== Deflatable ==== */
+
+ --(*ns);
+ } else {
+
+/* ==== Undeflatable. Move it up out of the way. */
+/* . (STREXC can not fail in this case.) ==== */
+
+ ifst = *ns;
+ strexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst,
+ &ilst, &work[1], &info);
+ ++ilst;
+ }
+ } else {
+
+/* ==== Complex conjugate pair ==== */
+
+ foo = (r__3 = t[*ns + *ns * t_dim1], dabs(r__3)) + sqrt((r__1 = t[
+ *ns + (*ns - 1) * t_dim1], dabs(r__1))) * sqrt((r__2 = t[*
+ ns - 1 + *ns * t_dim1], dabs(r__2)));
+ if (foo == 0.f) {
+ foo = dabs(s);
+ }
+/* Computing MAX */
+ r__3 = (r__1 = s * v[*ns * v_dim1 + 1], dabs(r__1)), r__4 = (r__2
+ = s * v[(*ns - 1) * v_dim1 + 1], dabs(r__2));
+/* Computing MAX */
+ r__5 = smlnum, r__6 = ulp * foo;
+ if (dmax(r__3,r__4) <= dmax(r__5,r__6)) {
+
+/* ==== Deflatable ==== */
+
+ *ns += -2;
+ } else {
+
+/* ==== Undeflatable. Move them up out of the way. */
+/* . Fortunately, STREXC does the right thing with */
+/* . ILST in case of a rare exchange failure. ==== */
+
+ ifst = *ns;
+ strexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst,
+ &ilst, &work[1], &info);
+ ilst += 2;
+ }
+ }
+
+/* ==== End deflation detection loop ==== */
+
+ goto L20;
+ }
+
+/* ==== Return to Hessenberg form ==== */
+
+ if (*ns == 0) {
+ s = 0.f;
+ }
+
+ if (*ns < jw) {
+
+/* ==== sorting diagonal blocks of T improves accuracy for */
+/* . graded matrices. Bubble sort deals well with */
+/* . exchange failures. ==== */
+
+ sorted = FALSE_;
+ i__ = *ns + 1;
+L30:
+ if (sorted) {
+ goto L50;
+ }
+ sorted = TRUE_;
+
+ kend = i__ - 1;
+ i__ = infqr + 1;
+ if (i__ == *ns) {
+ k = i__ + 1;
+ } else if (t[i__ + 1 + i__ * t_dim1] == 0.f) {
+ k = i__ + 1;
+ } else {
+ k = i__ + 2;
+ }
+L40:
+ if (k <= kend) {
+ if (k == i__ + 1) {
+ evi = (r__1 = t[i__ + i__ * t_dim1], dabs(r__1));
+ } else {
+ evi = (r__3 = t[i__ + i__ * t_dim1], dabs(r__3)) + sqrt((r__1
+ = t[i__ + 1 + i__ * t_dim1], dabs(r__1))) * sqrt((
+ r__2 = t[i__ + (i__ + 1) * t_dim1], dabs(r__2)));
+ }
+
+ if (k == kend) {
+ evk = (r__1 = t[k + k * t_dim1], dabs(r__1));
+ } else if (t[k + 1 + k * t_dim1] == 0.f) {
+ evk = (r__1 = t[k + k * t_dim1], dabs(r__1));
+ } else {
+ evk = (r__3 = t[k + k * t_dim1], dabs(r__3)) + sqrt((r__1 = t[
+ k + 1 + k * t_dim1], dabs(r__1))) * sqrt((r__2 = t[k
+ + (k + 1) * t_dim1], dabs(r__2)));
+ }
+
+ if (evi >= evk) {
+ i__ = k;
+ } else {
+ sorted = FALSE_;
+ ifst = i__;
+ ilst = k;
+ strexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst,
+ &ilst, &work[1], &info);
+ if (info == 0) {
+ i__ = ilst;
+ } else {
+ i__ = k;
+ }
+ }
+ if (i__ == kend) {
+ k = i__ + 1;
+ } else if (t[i__ + 1 + i__ * t_dim1] == 0.f) {
+ k = i__ + 1;
+ } else {
+ k = i__ + 2;
+ }
+ goto L40;
+ }
+ goto L30;
+L50:
+ ;
+ }
+
+/* ==== Restore shift/eigenvalue array from T ==== */
+
+ i__ = jw;
+L60:
+ if (i__ >= infqr + 1) {
+ if (i__ == infqr + 1) {
+ sr[kwtop + i__ - 1] = t[i__ + i__ * t_dim1];
+ si[kwtop + i__ - 1] = 0.f;
+ --i__;
+ } else if (t[i__ + (i__ - 1) * t_dim1] == 0.f) {
+ sr[kwtop + i__ - 1] = t[i__ + i__ * t_dim1];
+ si[kwtop + i__ - 1] = 0.f;
+ --i__;
+ } else {
+ aa = t[i__ - 1 + (i__ - 1) * t_dim1];
+ cc = t[i__ + (i__ - 1) * t_dim1];
+ bb = t[i__ - 1 + i__ * t_dim1];
+ dd = t[i__ + i__ * t_dim1];
+ slanv2_(&aa, &bb, &cc, &dd, &sr[kwtop + i__ - 2], &si[kwtop + i__
+ - 2], &sr[kwtop + i__ - 1], &si[kwtop + i__ - 1], &cs, &
+ sn);
+ i__ += -2;
+ }
+ goto L60;
+ }
+
+ if (*ns < jw || s == 0.f) {
+ if (*ns > 1 && s != 0.f) {
+
+/* ==== Reflect spike back into lower triangle ==== */
+
+ scopy_(ns, &v[v_offset], ldv, &work[1], &c__1);
+ beta = work[1];
+ slarfg_(ns, &beta, &work[2], &c__1, &tau);
+ work[1] = 1.f;
+
+ i__1 = jw - 2;
+ i__2 = jw - 2;
+ slaset_("L", &i__1, &i__2, &c_b12, &c_b12, &t[t_dim1 + 3], ldt);
+
+ slarf_("L", ns, &jw, &work[1], &c__1, &tau, &t[t_offset], ldt, &
+ work[jw + 1]);
+ slarf_("R", ns, ns, &work[1], &c__1, &tau, &t[t_offset], ldt, &
+ work[jw + 1]);
+ slarf_("R", &jw, ns, &work[1], &c__1, &tau, &v[v_offset], ldv, &
+ work[jw + 1]);
+
+ i__1 = *lwork - jw;
+ sgehrd_(&jw, &c__1, ns, &t[t_offset], ldt, &work[1], &work[jw + 1]
+, &i__1, &info);
+ }
+
+/* ==== Copy updated reduced window into place ==== */
+
+ if (kwtop > 1) {
+ h__[kwtop + (kwtop - 1) * h_dim1] = s * v[v_dim1 + 1];
+ }
+ slacpy_("U", &jw, &jw, &t[t_offset], ldt, &h__[kwtop + kwtop * h_dim1]
+, ldh);
+ i__1 = jw - 1;
+ i__2 = *ldt + 1;
+ i__3 = *ldh + 1;
+ scopy_(&i__1, &t[t_dim1 + 2], &i__2, &h__[kwtop + 1 + kwtop * h_dim1],
+ &i__3);
+
+/* ==== Accumulate orthogonal matrix in order update */
+/* . H and Z, if requested. ==== */
+
+ if (*ns > 1 && s != 0.f) {
+ i__1 = *lwork - jw;
+ sormhr_("R", "N", &jw, ns, &c__1, ns, &t[t_offset], ldt, &work[1],
+ &v[v_offset], ldv, &work[jw + 1], &i__1, &info);
+ }
+
+/* ==== Update vertical slab in H ==== */
+
+ if (*wantt) {
+ ltop = 1;
+ } else {
+ ltop = *ktop;
+ }
+ i__1 = kwtop - 1;
+ i__2 = *nv;
+ for (krow = ltop; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *nv, i__4 = kwtop - krow;
+ kln = min(i__3,i__4);
+ sgemm_("N", "N", &kln, &jw, &jw, &c_b13, &h__[krow + kwtop *
+ h_dim1], ldh, &v[v_offset], ldv, &c_b12, &wv[wv_offset],
+ ldwv);
+ slacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &h__[krow + kwtop *
+ h_dim1], ldh);
+/* L70: */
+ }
+
+/* ==== Update horizontal slab in H ==== */
+
+ if (*wantt) {
+ i__2 = *n;
+ i__1 = *nh;
+ for (kcol = *kbot + 1; i__1 < 0 ? kcol >= i__2 : kcol <= i__2;
+ kcol += i__1) {
+/* Computing MIN */
+ i__3 = *nh, i__4 = *n - kcol + 1;
+ kln = min(i__3,i__4);
+ sgemm_("C", "N", &jw, &kln, &jw, &c_b13, &v[v_offset], ldv, &
+ h__[kwtop + kcol * h_dim1], ldh, &c_b12, &t[t_offset],
+ ldt);
+ slacpy_("A", &jw, &kln, &t[t_offset], ldt, &h__[kwtop + kcol *
+ h_dim1], ldh);
+/* L80: */
+ }
+ }
+
+/* ==== Update vertical slab in Z ==== */
+
+ if (*wantz) {
+ i__1 = *ihiz;
+ i__2 = *nv;
+ for (krow = *iloz; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *nv, i__4 = *ihiz - krow + 1;
+ kln = min(i__3,i__4);
+ sgemm_("N", "N", &kln, &jw, &jw, &c_b13, &z__[krow + kwtop *
+ z_dim1], ldz, &v[v_offset], ldv, &c_b12, &wv[
+ wv_offset], ldwv);
+ slacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &z__[krow +
+ kwtop * z_dim1], ldz);
+/* L90: */
+ }
+ }
+ }
+
+/* ==== Return the number of deflations ... ==== */
+
+ *nd = jw - *ns;
+
+/* ==== ... and the number of shifts. (Subtracting */
+/* . INFQR from the spike length takes care */
+/* . of the case of a rare QR failure while */
+/* . calculating eigenvalues of the deflation */
+/* . window.) ==== */
+
+ *ns -= infqr;
+
+/* ==== Return optimal workspace. ==== */
+
+ work[1] = (real) lwkopt;
+
+/* ==== End of SLAQR2 ==== */
+
+ return 0;
+} /* slaqr2_ */
diff --git a/contrib/libs/clapack/slaqr3.c b/contrib/libs/clapack/slaqr3.c
new file mode 100644
index 0000000000..b3b828af98
--- /dev/null
+++ b/contrib/libs/clapack/slaqr3.c
@@ -0,0 +1,710 @@
+/* slaqr3.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static logical c_true = TRUE_;
+static real c_b17 = 0.f;
+static real c_b18 = 1.f;
+static integer c__12 = 12;
+
+/* Subroutine */ int slaqr3_(logical *wantt, logical *wantz, integer *n,
+ integer *ktop, integer *kbot, integer *nw, real *h__, integer *ldh,
+ integer *iloz, integer *ihiz, real *z__, integer *ldz, integer *ns,
+ integer *nd, real *sr, real *si, real *v, integer *ldv, integer *nh,
+ real *t, integer *ldt, integer *nv, real *wv, integer *ldwv, real *
+ work, integer *lwork)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, t_dim1, t_offset, v_dim1, v_offset, wv_dim1,
+ wv_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4;
+ real r__1, r__2, r__3, r__4, r__5, r__6;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k;
+ real s, aa, bb, cc, dd, cs, sn;
+ integer jw;
+ real evi, evk, foo;
+ integer kln;
+ real tau, ulp;
+ integer lwk1, lwk2, lwk3;
+ real beta;
+ integer kend, kcol, info, nmin, ifst, ilst, ltop, krow;
+ logical bulge;
+ extern /* Subroutine */ int slarf_(char *, integer *, integer *, real *,
+ integer *, real *, real *, integer *, real *), sgemm_(
+ char *, char *, integer *, integer *, integer *, real *, real *,
+ integer *, real *, integer *, real *, real *, integer *);
+ integer infqr;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ integer kwtop;
+ extern /* Subroutine */ int slanv2_(real *, real *, real *, real *, real *
+, real *, real *, real *, real *, real *), slaqr4_(logical *,
+ logical *, integer *, integer *, integer *, real *, integer *,
+ real *, real *, integer *, integer *, real *, integer *, real *,
+ integer *, integer *), slabad_(real *, real *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int sgehrd_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *);
+ real safmin;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ real safmax;
+ extern /* Subroutine */ int slarfg_(integer *, real *, real *, integer *,
+ real *), slahqr_(logical *, logical *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, integer *
+, real *, integer *, integer *), slacpy_(char *, integer *,
+ integer *, real *, integer *, real *, integer *), slaset_(
+ char *, integer *, integer *, real *, real *, real *, integer *);
+ logical sorted;
+ extern /* Subroutine */ int strexc_(char *, integer *, real *, integer *,
+ real *, integer *, integer *, integer *, real *, integer *), sormhr_(char *, char *, integer *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+ real smlnum;
+ integer lwkopt;
+
+
+/* -- LAPACK auxiliary routine (version 3.2.1) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* -- April 2009 -- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* ****************************************************************** */
+/* Aggressive early deflation: */
+
+/* This subroutine accepts as input an upper Hessenberg matrix */
+/* H and performs an orthogonal similarity transformation */
+/* designed to detect and deflate fully converged eigenvalues from */
+/* a trailing principal submatrix. On output H has been over- */
+/* written by a new Hessenberg matrix that is a perturbation of */
+/* an orthogonal similarity transformation of H. It is to be */
+/* hoped that the final version of H has many zero subdiagonal */
+/* entries. */
+
+/* ****************************************************************** */
+/* WANTT (input) LOGICAL */
+/* If .TRUE., then the Hessenberg matrix H is fully updated */
+/* so that the quasi-triangular Schur factor may be */
+/* computed (in cooperation with the calling subroutine). */
+/* If .FALSE., then only enough of H is updated to preserve */
+/* the eigenvalues. */
+
+/* WANTZ (input) LOGICAL */
+/* If .TRUE., then the orthogonal matrix Z is updated so */
+/* so that the orthogonal Schur factor may be computed */
+/* (in cooperation with the calling subroutine). */
+/* If .FALSE., then Z is not referenced. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H and (if WANTZ is .TRUE.) the */
+/* order of the orthogonal matrix Z. */
+
+/* KTOP (input) INTEGER */
+/* It is assumed that either KTOP = 1 or H(KTOP,KTOP-1)=0. */
+/* KBOT and KTOP together determine an isolated block */
+/* along the diagonal of the Hessenberg matrix. */
+
+/* KBOT (input) INTEGER */
+/* It is assumed without a check that either */
+/* KBOT = N or H(KBOT+1,KBOT)=0. KBOT and KTOP together */
+/* determine an isolated block along the diagonal of the */
+/* Hessenberg matrix. */
+
+/* NW (input) INTEGER */
+/* Deflation window size. 1 .LE. NW .LE. (KBOT-KTOP+1). */
+
+/* H (input/output) REAL array, dimension (LDH,N) */
+/* On input the initial N-by-N section of H stores the */
+/* Hessenberg matrix undergoing aggressive early deflation. */
+/* On output H has been transformed by an orthogonal */
+/* similarity transformation, perturbed, and the returned */
+/* to Hessenberg form that (it is to be hoped) has some */
+/* zero subdiagonal entries. */
+
+/* LDH (input) integer */
+/* Leading dimension of H just as declared in the calling */
+/* subroutine. N .LE. LDH */
+
+/* ILOZ (input) INTEGER */
+/* IHIZ (input) INTEGER */
+/* Specify the rows of Z to which transformations must be */
+/* applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N. */
+
+/* Z (input/output) REAL array, dimension (LDZ,N) */
+/* IF WANTZ is .TRUE., then on output, the orthogonal */
+/* similarity transformation mentioned above has been */
+/* accumulated into Z(ILOZ:IHIZ,ILO:IHI) from the right. */
+/* If WANTZ is .FALSE., then Z is unreferenced. */
+
+/* LDZ (input) integer */
+/* The leading dimension of Z just as declared in the */
+/* calling subroutine. 1 .LE. LDZ. */
+
+/* NS (output) integer */
+/* The number of unconverged (ie approximate) eigenvalues */
+/* returned in SR and SI that may be used as shifts by the */
+/* calling subroutine. */
+
+/* ND (output) integer */
+/* The number of converged eigenvalues uncovered by this */
+/* subroutine. */
+
+/* SR (output) REAL array, dimension KBOT */
+/* SI (output) REAL array, dimension KBOT */
+/* On output, the real and imaginary parts of approximate */
+/* eigenvalues that may be used for shifts are stored in */
+/* SR(KBOT-ND-NS+1) through SR(KBOT-ND) and */
+/* SI(KBOT-ND-NS+1) through SI(KBOT-ND), respectively. */
+/* The real and imaginary parts of converged eigenvalues */
+/* are stored in SR(KBOT-ND+1) through SR(KBOT) and */
+/* SI(KBOT-ND+1) through SI(KBOT), respectively. */
+
+/* V (workspace) REAL array, dimension (LDV,NW) */
+/* An NW-by-NW work array. */
+
+/* LDV (input) integer scalar */
+/* The leading dimension of V just as declared in the */
+/* calling subroutine. NW .LE. LDV */
+
+/* NH (input) integer scalar */
+/* The number of columns of T. NH.GE.NW. */
+
+/* T (workspace) REAL array, dimension (LDT,NW) */
+
+/* LDT (input) integer */
+/* The leading dimension of T just as declared in the */
+/* calling subroutine. NW .LE. LDT */
+
+/* NV (input) integer */
+/* The number of rows of work array WV available for */
+/* workspace. NV.GE.NW. */
+
+/* WV (workspace) REAL array, dimension (LDWV,NW) */
+
+/* LDWV (input) integer */
+/* The leading dimension of W just as declared in the */
+/* calling subroutine. NW .LE. LDV */
+
+/* WORK (workspace) REAL array, dimension LWORK. */
+/* On exit, WORK(1) is set to an estimate of the optimal value */
+/* of LWORK for the given values of N, NW, KTOP and KBOT. */
+
+/* LWORK (input) integer */
+/* The dimension of the work array WORK. LWORK = 2*NW */
+/* suffices, but greater efficiency may result from larger */
+/* values of LWORK. */
+
+/* If LWORK = -1, then a workspace query is assumed; SLAQR3 */
+/* only estimates the optimal workspace size for the given */
+/* values of N, NW, KTOP and KBOT. The estimate is returned */
+/* in WORK(1). No error message related to LWORK is issued */
+/* by XERBLA. Neither H nor Z are accessed. */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* ==== Estimate optimal workspace. ==== */
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --sr;
+ --si;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ wv_dim1 = *ldwv;
+ wv_offset = 1 + wv_dim1;
+ wv -= wv_offset;
+ --work;
+
+ /* Function Body */
+/* Computing MIN */
+ i__1 = *nw, i__2 = *kbot - *ktop + 1;
+ jw = min(i__1,i__2);
+ if (jw <= 2) {
+ lwkopt = 1;
+ } else {
+
+/* ==== Workspace query call to SGEHRD ==== */
+
+ i__1 = jw - 1;
+ sgehrd_(&jw, &c__1, &i__1, &t[t_offset], ldt, &work[1], &work[1], &
+ c_n1, &info);
+ lwk1 = (integer) work[1];
+
+/* ==== Workspace query call to SORMHR ==== */
+
+ i__1 = jw - 1;
+ sormhr_("R", "N", &jw, &jw, &c__1, &i__1, &t[t_offset], ldt, &work[1],
+ &v[v_offset], ldv, &work[1], &c_n1, &info);
+ lwk2 = (integer) work[1];
+
+/* ==== Workspace query call to SLAQR4 ==== */
+
+ slaqr4_(&c_true, &c_true, &jw, &c__1, &jw, &t[t_offset], ldt, &sr[1],
+ &si[1], &c__1, &jw, &v[v_offset], ldv, &work[1], &c_n1, &
+ infqr);
+ lwk3 = (integer) work[1];
+
+/* ==== Optimal workspace ==== */
+
+/* Computing MAX */
+ i__1 = jw + max(lwk1,lwk2);
+ lwkopt = max(i__1,lwk3);
+ }
+
+/* ==== Quick return in case of workspace query. ==== */
+
+ if (*lwork == -1) {
+ work[1] = (real) lwkopt;
+ return 0;
+ }
+
+/* ==== Nothing to do ... */
+/* ... for an empty active block ... ==== */
+ *ns = 0;
+ *nd = 0;
+ work[1] = 1.f;
+ if (*ktop > *kbot) {
+ return 0;
+ }
+/* ... nor for an empty deflation window. ==== */
+ if (*nw < 1) {
+ return 0;
+ }
+
+/* ==== Machine constants ==== */
+
+ safmin = slamch_("SAFE MINIMUM");
+ safmax = 1.f / safmin;
+ slabad_(&safmin, &safmax);
+ ulp = slamch_("PRECISION");
+ smlnum = safmin * ((real) (*n) / ulp);
+
+/* ==== Setup deflation window ==== */
+
+/* Computing MIN */
+ i__1 = *nw, i__2 = *kbot - *ktop + 1;
+ jw = min(i__1,i__2);
+ kwtop = *kbot - jw + 1;
+ if (kwtop == *ktop) {
+ s = 0.f;
+ } else {
+ s = h__[kwtop + (kwtop - 1) * h_dim1];
+ }
+
+ if (*kbot == kwtop) {
+
+/* ==== 1-by-1 deflation window: not much to do ==== */
+
+ sr[kwtop] = h__[kwtop + kwtop * h_dim1];
+ si[kwtop] = 0.f;
+ *ns = 1;
+ *nd = 0;
+/* Computing MAX */
+ r__2 = smlnum, r__3 = ulp * (r__1 = h__[kwtop + kwtop * h_dim1], dabs(
+ r__1));
+ if (dabs(s) <= dmax(r__2,r__3)) {
+ *ns = 0;
+ *nd = 1;
+ if (kwtop > *ktop) {
+ h__[kwtop + (kwtop - 1) * h_dim1] = 0.f;
+ }
+ }
+ work[1] = 1.f;
+ return 0;
+ }
+
+/* ==== Convert to spike-triangular form. (In case of a */
+/* . rare QR failure, this routine continues to do */
+/* . aggressive early deflation using that part of */
+/* . the deflation window that converged using INFQR */
+/* . here and there to keep track.) ==== */
+
+ slacpy_("U", &jw, &jw, &h__[kwtop + kwtop * h_dim1], ldh, &t[t_offset],
+ ldt);
+ i__1 = jw - 1;
+ i__2 = *ldh + 1;
+ i__3 = *ldt + 1;
+ scopy_(&i__1, &h__[kwtop + 1 + kwtop * h_dim1], &i__2, &t[t_dim1 + 2], &
+ i__3);
+
+ slaset_("A", &jw, &jw, &c_b17, &c_b18, &v[v_offset], ldv);
+ nmin = ilaenv_(&c__12, "SLAQR3", "SV", &jw, &c__1, &jw, lwork);
+ if (jw > nmin) {
+ slaqr4_(&c_true, &c_true, &jw, &c__1, &jw, &t[t_offset], ldt, &sr[
+ kwtop], &si[kwtop], &c__1, &jw, &v[v_offset], ldv, &work[1],
+ lwork, &infqr);
+ } else {
+ slahqr_(&c_true, &c_true, &jw, &c__1, &jw, &t[t_offset], ldt, &sr[
+ kwtop], &si[kwtop], &c__1, &jw, &v[v_offset], ldv, &infqr);
+ }
+
+/* ==== STREXC needs a clean margin near the diagonal ==== */
+
+ i__1 = jw - 3;
+ for (j = 1; j <= i__1; ++j) {
+ t[j + 2 + j * t_dim1] = 0.f;
+ t[j + 3 + j * t_dim1] = 0.f;
+/* L10: */
+ }
+ if (jw > 2) {
+ t[jw + (jw - 2) * t_dim1] = 0.f;
+ }
+
+/* ==== Deflation detection loop ==== */
+
+ *ns = jw;
+ ilst = infqr + 1;
+L20:
+ if (ilst <= *ns) {
+ if (*ns == 1) {
+ bulge = FALSE_;
+ } else {
+ bulge = t[*ns + (*ns - 1) * t_dim1] != 0.f;
+ }
+
+/* ==== Small spike tip test for deflation ==== */
+
+ if (! bulge) {
+
+/* ==== Real eigenvalue ==== */
+
+ foo = (r__1 = t[*ns + *ns * t_dim1], dabs(r__1));
+ if (foo == 0.f) {
+ foo = dabs(s);
+ }
+/* Computing MAX */
+ r__2 = smlnum, r__3 = ulp * foo;
+ if ((r__1 = s * v[*ns * v_dim1 + 1], dabs(r__1)) <= dmax(r__2,
+ r__3)) {
+
+/* ==== Deflatable ==== */
+
+ --(*ns);
+ } else {
+
+/* ==== Undeflatable. Move it up out of the way. */
+/* . (STREXC can not fail in this case.) ==== */
+
+ ifst = *ns;
+ strexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst,
+ &ilst, &work[1], &info);
+ ++ilst;
+ }
+ } else {
+
+/* ==== Complex conjugate pair ==== */
+
+ foo = (r__3 = t[*ns + *ns * t_dim1], dabs(r__3)) + sqrt((r__1 = t[
+ *ns + (*ns - 1) * t_dim1], dabs(r__1))) * sqrt((r__2 = t[*
+ ns - 1 + *ns * t_dim1], dabs(r__2)));
+ if (foo == 0.f) {
+ foo = dabs(s);
+ }
+/* Computing MAX */
+ r__3 = (r__1 = s * v[*ns * v_dim1 + 1], dabs(r__1)), r__4 = (r__2
+ = s * v[(*ns - 1) * v_dim1 + 1], dabs(r__2));
+/* Computing MAX */
+ r__5 = smlnum, r__6 = ulp * foo;
+ if (dmax(r__3,r__4) <= dmax(r__5,r__6)) {
+
+/* ==== Deflatable ==== */
+
+ *ns += -2;
+ } else {
+
+/* ==== Undeflatable. Move them up out of the way. */
+/* . Fortunately, STREXC does the right thing with */
+/* . ILST in case of a rare exchange failure. ==== */
+
+ ifst = *ns;
+ strexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst,
+ &ilst, &work[1], &info);
+ ilst += 2;
+ }
+ }
+
+/* ==== End deflation detection loop ==== */
+
+ goto L20;
+ }
+
+/* ==== Return to Hessenberg form ==== */
+
+ if (*ns == 0) {
+ s = 0.f;
+ }
+
+ if (*ns < jw) {
+
+/* ==== sorting diagonal blocks of T improves accuracy for */
+/* . graded matrices. Bubble sort deals well with */
+/* . exchange failures. ==== */
+
+ sorted = FALSE_;
+ i__ = *ns + 1;
+L30:
+ if (sorted) {
+ goto L50;
+ }
+ sorted = TRUE_;
+
+ kend = i__ - 1;
+ i__ = infqr + 1;
+ if (i__ == *ns) {
+ k = i__ + 1;
+ } else if (t[i__ + 1 + i__ * t_dim1] == 0.f) {
+ k = i__ + 1;
+ } else {
+ k = i__ + 2;
+ }
+L40:
+ if (k <= kend) {
+ if (k == i__ + 1) {
+ evi = (r__1 = t[i__ + i__ * t_dim1], dabs(r__1));
+ } else {
+ evi = (r__3 = t[i__ + i__ * t_dim1], dabs(r__3)) + sqrt((r__1
+ = t[i__ + 1 + i__ * t_dim1], dabs(r__1))) * sqrt((
+ r__2 = t[i__ + (i__ + 1) * t_dim1], dabs(r__2)));
+ }
+
+ if (k == kend) {
+ evk = (r__1 = t[k + k * t_dim1], dabs(r__1));
+ } else if (t[k + 1 + k * t_dim1] == 0.f) {
+ evk = (r__1 = t[k + k * t_dim1], dabs(r__1));
+ } else {
+ evk = (r__3 = t[k + k * t_dim1], dabs(r__3)) + sqrt((r__1 = t[
+ k + 1 + k * t_dim1], dabs(r__1))) * sqrt((r__2 = t[k
+ + (k + 1) * t_dim1], dabs(r__2)));
+ }
+
+ if (evi >= evk) {
+ i__ = k;
+ } else {
+ sorted = FALSE_;
+ ifst = i__;
+ ilst = k;
+ strexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst,
+ &ilst, &work[1], &info);
+ if (info == 0) {
+ i__ = ilst;
+ } else {
+ i__ = k;
+ }
+ }
+ if (i__ == kend) {
+ k = i__ + 1;
+ } else if (t[i__ + 1 + i__ * t_dim1] == 0.f) {
+ k = i__ + 1;
+ } else {
+ k = i__ + 2;
+ }
+ goto L40;
+ }
+ goto L30;
+L50:
+ ;
+ }
+
+/* ==== Restore shift/eigenvalue array from T ==== */
+
+ i__ = jw;
+L60:
+ if (i__ >= infqr + 1) {
+ if (i__ == infqr + 1) {
+ sr[kwtop + i__ - 1] = t[i__ + i__ * t_dim1];
+ si[kwtop + i__ - 1] = 0.f;
+ --i__;
+ } else if (t[i__ + (i__ - 1) * t_dim1] == 0.f) {
+ sr[kwtop + i__ - 1] = t[i__ + i__ * t_dim1];
+ si[kwtop + i__ - 1] = 0.f;
+ --i__;
+ } else {
+ aa = t[i__ - 1 + (i__ - 1) * t_dim1];
+ cc = t[i__ + (i__ - 1) * t_dim1];
+ bb = t[i__ - 1 + i__ * t_dim1];
+ dd = t[i__ + i__ * t_dim1];
+ slanv2_(&aa, &bb, &cc, &dd, &sr[kwtop + i__ - 2], &si[kwtop + i__
+ - 2], &sr[kwtop + i__ - 1], &si[kwtop + i__ - 1], &cs, &
+ sn);
+ i__ += -2;
+ }
+ goto L60;
+ }
+
+ if (*ns < jw || s == 0.f) {
+ if (*ns > 1 && s != 0.f) {
+
+/* ==== Reflect spike back into lower triangle ==== */
+
+ scopy_(ns, &v[v_offset], ldv, &work[1], &c__1);
+ beta = work[1];
+ slarfg_(ns, &beta, &work[2], &c__1, &tau);
+ work[1] = 1.f;
+
+ i__1 = jw - 2;
+ i__2 = jw - 2;
+ slaset_("L", &i__1, &i__2, &c_b17, &c_b17, &t[t_dim1 + 3], ldt);
+
+ slarf_("L", ns, &jw, &work[1], &c__1, &tau, &t[t_offset], ldt, &
+ work[jw + 1]);
+ slarf_("R", ns, ns, &work[1], &c__1, &tau, &t[t_offset], ldt, &
+ work[jw + 1]);
+ slarf_("R", &jw, ns, &work[1], &c__1, &tau, &v[v_offset], ldv, &
+ work[jw + 1]);
+
+ i__1 = *lwork - jw;
+ sgehrd_(&jw, &c__1, ns, &t[t_offset], ldt, &work[1], &work[jw + 1]
+, &i__1, &info);
+ }
+
+/* ==== Copy updated reduced window into place ==== */
+
+ if (kwtop > 1) {
+ h__[kwtop + (kwtop - 1) * h_dim1] = s * v[v_dim1 + 1];
+ }
+ slacpy_("U", &jw, &jw, &t[t_offset], ldt, &h__[kwtop + kwtop * h_dim1]
+, ldh);
+ i__1 = jw - 1;
+ i__2 = *ldt + 1;
+ i__3 = *ldh + 1;
+ scopy_(&i__1, &t[t_dim1 + 2], &i__2, &h__[kwtop + 1 + kwtop * h_dim1],
+ &i__3);
+
+/* ==== Accumulate orthogonal matrix in order update */
+/* . H and Z, if requested. ==== */
+
+ if (*ns > 1 && s != 0.f) {
+ i__1 = *lwork - jw;
+ sormhr_("R", "N", &jw, ns, &c__1, ns, &t[t_offset], ldt, &work[1],
+ &v[v_offset], ldv, &work[jw + 1], &i__1, &info);
+ }
+
+/* ==== Update vertical slab in H ==== */
+
+ if (*wantt) {
+ ltop = 1;
+ } else {
+ ltop = *ktop;
+ }
+ i__1 = kwtop - 1;
+ i__2 = *nv;
+ for (krow = ltop; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *nv, i__4 = kwtop - krow;
+ kln = min(i__3,i__4);
+ sgemm_("N", "N", &kln, &jw, &jw, &c_b18, &h__[krow + kwtop *
+ h_dim1], ldh, &v[v_offset], ldv, &c_b17, &wv[wv_offset],
+ ldwv);
+ slacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &h__[krow + kwtop *
+ h_dim1], ldh);
+/* L70: */
+ }
+
+/* ==== Update horizontal slab in H ==== */
+
+ if (*wantt) {
+ i__2 = *n;
+ i__1 = *nh;
+ for (kcol = *kbot + 1; i__1 < 0 ? kcol >= i__2 : kcol <= i__2;
+ kcol += i__1) {
+/* Computing MIN */
+ i__3 = *nh, i__4 = *n - kcol + 1;
+ kln = min(i__3,i__4);
+ sgemm_("C", "N", &jw, &kln, &jw, &c_b18, &v[v_offset], ldv, &
+ h__[kwtop + kcol * h_dim1], ldh, &c_b17, &t[t_offset],
+ ldt);
+ slacpy_("A", &jw, &kln, &t[t_offset], ldt, &h__[kwtop + kcol *
+ h_dim1], ldh);
+/* L80: */
+ }
+ }
+
+/* ==== Update vertical slab in Z ==== */
+
+ if (*wantz) {
+ i__1 = *ihiz;
+ i__2 = *nv;
+ for (krow = *iloz; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *nv, i__4 = *ihiz - krow + 1;
+ kln = min(i__3,i__4);
+ sgemm_("N", "N", &kln, &jw, &jw, &c_b18, &z__[krow + kwtop *
+ z_dim1], ldz, &v[v_offset], ldv, &c_b17, &wv[
+ wv_offset], ldwv);
+ slacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &z__[krow +
+ kwtop * z_dim1], ldz);
+/* L90: */
+ }
+ }
+ }
+
+/* ==== Return the number of deflations ... ==== */
+
+ *nd = jw - *ns;
+
+/* ==== ... and the number of shifts. (Subtracting */
+/* . INFQR from the spike length takes care */
+/* . of the case of a rare QR failure while */
+/* . calculating eigenvalues of the deflation */
+/* . window.) ==== */
+
+ *ns -= infqr;
+
+/* ==== Return optimal workspace. ==== */
+
+ work[1] = (real) lwkopt;
+
+/* ==== End of SLAQR3 ==== */
+
+ return 0;
+} /* slaqr3_ */
diff --git a/contrib/libs/clapack/slaqr4.c b/contrib/libs/clapack/slaqr4.c
new file mode 100644
index 0000000000..c0d039cbef
--- /dev/null
+++ b/contrib/libs/clapack/slaqr4.c
@@ -0,0 +1,751 @@
+/* slaqr4.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__13 = 13;
+static integer c__15 = 15;
+static integer c_n1 = -1;
+static integer c__12 = 12;
+static integer c__14 = 14;
+static integer c__16 = 16;
+static logical c_false = FALSE_;
+static integer c__1 = 1;
+static integer c__3 = 3;
+
+/* Subroutine */ int slaqr4_(logical *wantt, logical *wantz, integer *n,
+ integer *ilo, integer *ihi, real *h__, integer *ldh, real *wr, real *
+ wi, integer *iloz, integer *ihiz, real *z__, integer *ldz, real *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4;
+
+ /* Local variables */
+ integer i__, k;
+ real aa, bb, cc, dd;
+ integer ld;
+ real cs;
+ integer nh, it, ks, kt;
+ real sn;
+ integer ku, kv, ls, ns;
+ real ss;
+ integer nw, inf, kdu, nho, nve, kwh, nsr, nwr, kwv, ndec, ndfl, kbot,
+ nmin;
+ real swap;
+ integer ktop;
+ real zdum[1] /* was [1][1] */;
+ integer kacc22, itmax, nsmax, nwmax, kwtop;
+ extern /* Subroutine */ int slaqr2_(logical *, logical *, integer *,
+ integer *, integer *, integer *, real *, integer *, integer *,
+ integer *, real *, integer *, integer *, integer *, real *, real *
+, real *, integer *, integer *, real *, integer *, integer *,
+ real *, integer *, real *, integer *), slanv2_(real *, real *,
+ real *, real *, real *, real *, real *, real *, real *, real *),
+ slaqr5_(logical *, logical *, integer *, integer *, integer *,
+ integer *, integer *, real *, real *, real *, integer *, integer *
+, integer *, real *, integer *, real *, integer *, real *,
+ integer *, integer *, real *, integer *, integer *, real *,
+ integer *);
+ integer nibble;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ char jbcmpz[1];
+ extern /* Subroutine */ int slahqr_(logical *, logical *, integer *,
+ integer *, integer *, real *, integer *, real *, real *, integer *
+, integer *, real *, integer *, integer *), slacpy_(char *,
+ integer *, integer *, real *, integer *, real *, integer *);
+ integer nwupbd;
+ logical sorted;
+ integer lwkopt;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* This subroutine implements one level of recursion for SLAQR0. */
+/* It is a complete implementation of the small bulge multi-shift */
+/* QR algorithm. It may be called by SLAQR0 and, for large enough */
+/* deflation window size, it may be called by SLAQR3. This */
+/* subroutine is identical to SLAQR0 except that it calls SLAQR2 */
+/* instead of SLAQR3. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAQR4 computes the eigenvalues of a Hessenberg matrix H */
+/* and, optionally, the matrices T and Z from the Schur decomposition */
+/* H = Z T Z**T, where T is an upper quasi-triangular matrix (the */
+/* Schur form), and Z is the orthogonal matrix of Schur vectors. */
+
+/* Optionally Z may be postmultiplied into an input orthogonal */
+/* matrix Q so that this routine can give the Schur factorization */
+/* of a matrix A which has been reduced to the Hessenberg form H */
+/* by the orthogonal matrix Q: A = Q*H*Q**T = (QZ)*T*(QZ)**T. */
+
+/* Arguments */
+/* ========= */
+
+/* WANTT (input) LOGICAL */
+/* = .TRUE. : the full Schur form T is required; */
+/* = .FALSE.: only eigenvalues are required. */
+
+/* WANTZ (input) LOGICAL */
+/* = .TRUE. : the matrix of Schur vectors Z is required; */
+/* = .FALSE.: Schur vectors are not required. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H. N .GE. 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* It is assumed that H is already upper triangular in rows */
+/* and columns 1:ILO-1 and IHI+1:N and, if ILO.GT.1, */
+/* H(ILO,ILO-1) is zero. ILO and IHI are normally set by a */
+/* previous call to SGEBAL, and then passed to SGEHRD when the */
+/* matrix output by SGEBAL is reduced to Hessenberg form. */
+/* Otherwise, ILO and IHI should be set to 1 and N, */
+/* respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. */
+/* If N = 0, then ILO = 1 and IHI = 0. */
+
+/* H (input/output) REAL array, dimension (LDH,N) */
+/* On entry, the upper Hessenberg matrix H. */
+/* On exit, if INFO = 0 and WANTT is .TRUE., then H contains */
+/* the upper quasi-triangular matrix T from the Schur */
+/* decomposition (the Schur form); 2-by-2 diagonal blocks */
+/* (corresponding to complex conjugate pairs of eigenvalues) */
+/* are returned in standard form, with H(i,i) = H(i+1,i+1) */
+/* and H(i+1,i)*H(i,i+1).LT.0. If INFO = 0 and WANTT is */
+/* .FALSE., then the contents of H are unspecified on exit. */
+/* (The output value of H when INFO.GT.0 is given under the */
+/* description of INFO below.) */
+
+/* This subroutine may explicitly set H(i,j) = 0 for i.GT.j and */
+/* j = 1, 2, ... ILO-1 or j = IHI+1, IHI+2, ... N. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH .GE. max(1,N). */
+
+/* WR (output) REAL array, dimension (IHI) */
+/* WI (output) REAL array, dimension (IHI) */
+/* The real and imaginary parts, respectively, of the computed */
+/* eigenvalues of H(ILO:IHI,ILO:IHI) are stored in WR(ILO:IHI) */
+/* and WI(ILO:IHI). If two eigenvalues are computed as a */
+/* complex conjugate pair, they are stored in consecutive */
+/* elements of WR and WI, say the i-th and (i+1)th, with */
+/* WI(i) .GT. 0 and WI(i+1) .LT. 0. If WANTT is .TRUE., then */
+/* the eigenvalues are stored in the same order as on the */
+/* diagonal of the Schur form returned in H, with */
+/* WR(i) = H(i,i) and, if H(i:i+1,i:i+1) is a 2-by-2 diagonal */
+/* block, WI(i) = sqrt(-H(i+1,i)*H(i,i+1)) and */
+/* WI(i+1) = -WI(i). */
+
+/* ILOZ (input) INTEGER */
+/* IHIZ (input) INTEGER */
+/* Specify the rows of Z to which transformations must be */
+/* applied if WANTZ is .TRUE.. */
+/* 1 .LE. ILOZ .LE. ILO; IHI .LE. IHIZ .LE. N. */
+
+/* Z (input/output) REAL array, dimension (LDZ,IHI) */
+/* If WANTZ is .FALSE., then Z is not referenced. */
+/* If WANTZ is .TRUE., then Z(ILO:IHI,ILOZ:IHIZ) is */
+/* replaced by Z(ILO:IHI,ILOZ:IHIZ)*U where U is the */
+/* orthogonal Schur factor of H(ILO:IHI,ILO:IHI). */
+/* (The output value of Z when INFO.GT.0 is given under */
+/* the description of INFO below.) */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. if WANTZ is .TRUE. */
+/* then LDZ.GE.MAX(1,IHIZ). Otherwize, LDZ.GE.1. */
+
+/* WORK (workspace/output) REAL array, dimension LWORK */
+/* On exit, if LWORK = -1, WORK(1) returns an estimate of */
+/* the optimal value for LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK .GE. max(1,N) */
+/* is sufficient, but LWORK typically as large as 6*N may */
+/* be required for optimal performance. A workspace query */
+/* to determine the optimal workspace size is recommended. */
+
+/* If LWORK = -1, then SLAQR4 does a workspace query. */
+/* In this case, SLAQR4 checks the input parameters and */
+/* estimates the optimal workspace size for the given */
+/* values of N, ILO and IHI. The estimate is returned */
+/* in WORK(1). No error message related to LWORK is */
+/* issued by XERBLA. Neither H nor Z are accessed. */
+
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* .GT. 0: if INFO = i, SLAQR4 failed to compute all of */
+/* the eigenvalues. Elements 1:ilo-1 and i+1:n of WR */
+/* and WI contain those eigenvalues which have been */
+/* successfully computed. (Failures are rare.) */
+
+/* If INFO .GT. 0 and WANT is .FALSE., then on exit, */
+/* the remaining unconverged eigenvalues are the eigen- */
+/* values of the upper Hessenberg matrix rows and */
+/* columns ILO through INFO of the final, output */
+/* value of H. */
+
+/* If INFO .GT. 0 and WANTT is .TRUE., then on exit */
+
+/* (*) (initial value of H)*U = U*(final value of H) */
+
+/* where U is an orthogonal matrix. The final */
+/* value of H is upper Hessenberg and quasi-triangular */
+/* in rows and columns INFO+1 through IHI. */
+
+/* If INFO .GT. 0 and WANTZ is .TRUE., then on exit */
+
+/* (final value of Z(ILO:IHI,ILOZ:IHIZ) */
+/* = (initial value of Z(ILO:IHI,ILOZ:IHIZ)*U */
+
+/* where U is the orthogonal matrix in (*) (regard- */
+/* less of the value of WANTT.) */
+
+/* If INFO .GT. 0 and WANTZ is .FALSE., then Z is not */
+/* accessed. */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+/* References: */
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part I: Maintaining Well Focused Shifts, and Level 3 */
+/* Performance, SIAM Journal of Matrix Analysis, volume 23, pages */
+/* 929--947, 2002. */
+
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part II: Aggressive Early Deflation, SIAM Journal */
+/* of Matrix Analysis, volume 23, pages 948--973, 2002. */
+
+/* ================================================================ */
+/* .. Parameters .. */
+
+/* ==== Matrices of order NTINY or smaller must be processed by */
+/* . SLAHQR because of insufficient subdiagonal scratch space. */
+/* . (This is a hard limit.) ==== */
+
+/* ==== Exceptional deflation windows: try to cure rare */
+/* . slow convergence by varying the size of the */
+/* . deflation window after KEXNW iterations. ==== */
+
+/* ==== Exceptional shifts: try to cure rare slow convergence */
+/* . with ad-hoc exceptional shifts every KEXSH iterations. */
+/* . ==== */
+
+/* ==== The constants WILK1 and WILK2 are used to form the */
+/* . exceptional shifts. ==== */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --wr;
+ --wi;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+/* ==== Quick return for N = 0: nothing to do. ==== */
+
+ if (*n == 0) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ if (*n <= 11) {
+
+/* ==== Tiny matrices must use SLAHQR. ==== */
+
+ lwkopt = 1;
+ if (*lwork != -1) {
+ slahqr_(wantt, wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1], &
+ wi[1], iloz, ihiz, &z__[z_offset], ldz, info);
+ }
+ } else {
+
+/* ==== Use small bulge multi-shift QR with aggressive early */
+/* . deflation on larger-than-tiny matrices. ==== */
+
+/* ==== Hope for the best. ==== */
+
+ *info = 0;
+
+/* ==== Set up job flags for ILAENV. ==== */
+
+ if (*wantt) {
+ *(unsigned char *)jbcmpz = 'S';
+ } else {
+ *(unsigned char *)jbcmpz = 'E';
+ }
+ if (*wantz) {
+ *(unsigned char *)&jbcmpz[1] = 'V';
+ } else {
+ *(unsigned char *)&jbcmpz[1] = 'N';
+ }
+
+/* ==== NWR = recommended deflation window size. At this */
+/* . point, N .GT. NTINY = 11, so there is enough */
+/* . subdiagonal workspace for NWR.GE.2 as required. */
+/* . (In fact, there is enough subdiagonal space for */
+/* . NWR.GE.3.) ==== */
+
+ nwr = ilaenv_(&c__13, "SLAQR4", jbcmpz, n, ilo, ihi, lwork);
+ nwr = max(2,nwr);
+/* Computing MIN */
+ i__1 = *ihi - *ilo + 1, i__2 = (*n - 1) / 3, i__1 = min(i__1,i__2);
+ nwr = min(i__1,nwr);
+
+/* ==== NSR = recommended number of simultaneous shifts. */
+/* . At this point N .GT. NTINY = 11, so there is at */
+/* . enough subdiagonal workspace for NSR to be even */
+/* . and greater than or equal to two as required. ==== */
+
+ nsr = ilaenv_(&c__15, "SLAQR4", jbcmpz, n, ilo, ihi, lwork);
+/* Computing MIN */
+ i__1 = nsr, i__2 = (*n + 6) / 9, i__1 = min(i__1,i__2), i__2 = *ihi -
+ *ilo;
+ nsr = min(i__1,i__2);
+/* Computing MAX */
+ i__1 = 2, i__2 = nsr - nsr % 2;
+ nsr = max(i__1,i__2);
+
+/* ==== Estimate optimal workspace ==== */
+
+/* ==== Workspace query call to SLAQR2 ==== */
+
+ i__1 = nwr + 1;
+ slaqr2_(wantt, wantz, n, ilo, ihi, &i__1, &h__[h_offset], ldh, iloz,
+ ihiz, &z__[z_offset], ldz, &ls, &ld, &wr[1], &wi[1], &h__[
+ h_offset], ldh, n, &h__[h_offset], ldh, n, &h__[h_offset],
+ ldh, &work[1], &c_n1);
+
+/* ==== Optimal workspace = MAX(SLAQR5, SLAQR2) ==== */
+
+/* Computing MAX */
+ i__1 = nsr * 3 / 2, i__2 = (integer) work[1];
+ lwkopt = max(i__1,i__2);
+
+/* ==== Quick return in case of workspace query. ==== */
+
+ if (*lwork == -1) {
+ work[1] = (real) lwkopt;
+ return 0;
+ }
+
+/* ==== SLAHQR/SLAQR0 crossover point ==== */
+
+ nmin = ilaenv_(&c__12, "SLAQR4", jbcmpz, n, ilo, ihi, lwork);
+ nmin = max(11,nmin);
+
+/* ==== Nibble crossover point ==== */
+
+ nibble = ilaenv_(&c__14, "SLAQR4", jbcmpz, n, ilo, ihi, lwork);
+ nibble = max(0,nibble);
+
+/* ==== Accumulate reflections during ttswp? Use block */
+/* . 2-by-2 structure during matrix-matrix multiply? ==== */
+
+ kacc22 = ilaenv_(&c__16, "SLAQR4", jbcmpz, n, ilo, ihi, lwork);
+ kacc22 = max(0,kacc22);
+ kacc22 = min(2,kacc22);
+
+/* ==== NWMAX = the largest possible deflation window for */
+/* . which there is sufficient workspace. ==== */
+
+/* Computing MIN */
+ i__1 = (*n - 1) / 3, i__2 = *lwork / 2;
+ nwmax = min(i__1,i__2);
+ nw = nwmax;
+
+/* ==== NSMAX = the Largest number of simultaneous shifts */
+/* . for which there is sufficient workspace. ==== */
+
+/* Computing MIN */
+ i__1 = (*n + 6) / 9, i__2 = (*lwork << 1) / 3;
+ nsmax = min(i__1,i__2);
+ nsmax -= nsmax % 2;
+
+/* ==== NDFL: an iteration count restarted at deflation. ==== */
+
+ ndfl = 1;
+
+/* ==== ITMAX = iteration limit ==== */
+
+/* Computing MAX */
+ i__1 = 10, i__2 = *ihi - *ilo + 1;
+ itmax = max(i__1,i__2) * 30;
+
+/* ==== Last row and column in the active block ==== */
+
+ kbot = *ihi;
+
+/* ==== Main Loop ==== */
+
+ i__1 = itmax;
+ for (it = 1; it <= i__1; ++it) {
+
+/* ==== Done when KBOT falls below ILO ==== */
+
+ if (kbot < *ilo) {
+ goto L90;
+ }
+
+/* ==== Locate active block ==== */
+
+ i__2 = *ilo + 1;
+ for (k = kbot; k >= i__2; --k) {
+ if (h__[k + (k - 1) * h_dim1] == 0.f) {
+ goto L20;
+ }
+/* L10: */
+ }
+ k = *ilo;
+L20:
+ ktop = k;
+
+/* ==== Select deflation window size: */
+/* . Typical Case: */
+/* . If possible and advisable, nibble the entire */
+/* . active block. If not, use size MIN(NWR,NWMAX) */
+/* . or MIN(NWR+1,NWMAX) depending upon which has */
+/* . the smaller corresponding subdiagonal entry */
+/* . (a heuristic). */
+/* . */
+/* . Exceptional Case: */
+/* . If there have been no deflations in KEXNW or */
+/* . more iterations, then vary the deflation window */
+/* . size. At first, because, larger windows are, */
+/* . in general, more powerful than smaller ones, */
+/* . rapidly increase the window to the maximum possible. */
+/* . Then, gradually reduce the window size. ==== */
+
+ nh = kbot - ktop + 1;
+ nwupbd = min(nh,nwmax);
+ if (ndfl < 5) {
+ nw = min(nwupbd,nwr);
+ } else {
+/* Computing MIN */
+ i__2 = nwupbd, i__3 = nw << 1;
+ nw = min(i__2,i__3);
+ }
+ if (nw < nwmax) {
+ if (nw >= nh - 1) {
+ nw = nh;
+ } else {
+ kwtop = kbot - nw + 1;
+ if ((r__1 = h__[kwtop + (kwtop - 1) * h_dim1], dabs(r__1))
+ > (r__2 = h__[kwtop - 1 + (kwtop - 2) * h_dim1],
+ dabs(r__2))) {
+ ++nw;
+ }
+ }
+ }
+ if (ndfl < 5) {
+ ndec = -1;
+ } else if (ndec >= 0 || nw >= nwupbd) {
+ ++ndec;
+ if (nw - ndec < 2) {
+ ndec = 0;
+ }
+ nw -= ndec;
+ }
+
+/* ==== Aggressive early deflation: */
+/* . split workspace under the subdiagonal into */
+/* . - an nw-by-nw work array V in the lower */
+/* . left-hand-corner, */
+/* . - an NW-by-at-least-NW-but-more-is-better */
+/* . (NW-by-NHO) horizontal work array along */
+/* . the bottom edge, */
+/* . - an at-least-NW-but-more-is-better (NHV-by-NW) */
+/* . vertical work array along the left-hand-edge. */
+/* . ==== */
+
+ kv = *n - nw + 1;
+ kt = nw + 1;
+ nho = *n - nw - 1 - kt + 1;
+ kwv = nw + 2;
+ nve = *n - nw - kwv + 1;
+
+/* ==== Aggressive early deflation ==== */
+
+ slaqr2_(wantt, wantz, n, &ktop, &kbot, &nw, &h__[h_offset], ldh,
+ iloz, ihiz, &z__[z_offset], ldz, &ls, &ld, &wr[1], &wi[1],
+ &h__[kv + h_dim1], ldh, &nho, &h__[kv + kt * h_dim1],
+ ldh, &nve, &h__[kwv + h_dim1], ldh, &work[1], lwork);
+
+/* ==== Adjust KBOT accounting for new deflations. ==== */
+
+ kbot -= ld;
+
+/* ==== KS points to the shifts. ==== */
+
+ ks = kbot - ls + 1;
+
+/* ==== Skip an expensive QR sweep if there is a (partly */
+/* . heuristic) reason to expect that many eigenvalues */
+/* . will deflate without it. Here, the QR sweep is */
+/* . skipped if many eigenvalues have just been deflated */
+/* . or if the remaining active block is small. */
+
+ if (ld == 0 || ld * 100 <= nw * nibble && kbot - ktop + 1 > min(
+ nmin,nwmax)) {
+
+/* ==== NS = nominal number of simultaneous shifts. */
+/* . This may be lowered (slightly) if SLAQR2 */
+/* . did not provide that many shifts. ==== */
+
+/* Computing MIN */
+/* Computing MAX */
+ i__4 = 2, i__5 = kbot - ktop;
+ i__2 = min(nsmax,nsr), i__3 = max(i__4,i__5);
+ ns = min(i__2,i__3);
+ ns -= ns % 2;
+
+/* ==== If there have been no deflations */
+/* . in a multiple of KEXSH iterations, */
+/* . then try exceptional shifts. */
+/* . Otherwise use shifts provided by */
+/* . SLAQR2 above or from the eigenvalues */
+/* . of a trailing principal submatrix. ==== */
+
+ if (ndfl % 6 == 0) {
+ ks = kbot - ns + 1;
+/* Computing MAX */
+ i__3 = ks + 1, i__4 = ktop + 2;
+ i__2 = max(i__3,i__4);
+ for (i__ = kbot; i__ >= i__2; i__ += -2) {
+ ss = (r__1 = h__[i__ + (i__ - 1) * h_dim1], dabs(r__1)
+ ) + (r__2 = h__[i__ - 1 + (i__ - 2) * h_dim1],
+ dabs(r__2));
+ aa = ss * .75f + h__[i__ + i__ * h_dim1];
+ bb = ss;
+ cc = ss * -.4375f;
+ dd = aa;
+ slanv2_(&aa, &bb, &cc, &dd, &wr[i__ - 1], &wi[i__ - 1]
+, &wr[i__], &wi[i__], &cs, &sn);
+/* L30: */
+ }
+ if (ks == ktop) {
+ wr[ks + 1] = h__[ks + 1 + (ks + 1) * h_dim1];
+ wi[ks + 1] = 0.f;
+ wr[ks] = wr[ks + 1];
+ wi[ks] = wi[ks + 1];
+ }
+ } else {
+
+/* ==== Got NS/2 or fewer shifts? Use SLAHQR */
+/* . on a trailing principal submatrix to */
+/* . get more. (Since NS.LE.NSMAX.LE.(N+6)/9, */
+/* . there is enough space below the subdiagonal */
+/* . to fit an NS-by-NS scratch array.) ==== */
+
+ if (kbot - ks + 1 <= ns / 2) {
+ ks = kbot - ns + 1;
+ kt = *n - ns + 1;
+ slacpy_("A", &ns, &ns, &h__[ks + ks * h_dim1], ldh, &
+ h__[kt + h_dim1], ldh);
+ slahqr_(&c_false, &c_false, &ns, &c__1, &ns, &h__[kt
+ + h_dim1], ldh, &wr[ks], &wi[ks], &c__1, &
+ c__1, zdum, &c__1, &inf);
+ ks += inf;
+
+/* ==== In case of a rare QR failure use */
+/* . eigenvalues of the trailing 2-by-2 */
+/* . principal submatrix. ==== */
+
+ if (ks >= kbot) {
+ aa = h__[kbot - 1 + (kbot - 1) * h_dim1];
+ cc = h__[kbot + (kbot - 1) * h_dim1];
+ bb = h__[kbot - 1 + kbot * h_dim1];
+ dd = h__[kbot + kbot * h_dim1];
+ slanv2_(&aa, &bb, &cc, &dd, &wr[kbot - 1], &wi[
+ kbot - 1], &wr[kbot], &wi[kbot], &cs, &sn)
+ ;
+ ks = kbot - 1;
+ }
+ }
+
+ if (kbot - ks + 1 > ns) {
+
+/* ==== Sort the shifts (Helps a little) */
+/* . Bubble sort keeps complex conjugate */
+/* . pairs together. ==== */
+
+ sorted = FALSE_;
+ i__2 = ks + 1;
+ for (k = kbot; k >= i__2; --k) {
+ if (sorted) {
+ goto L60;
+ }
+ sorted = TRUE_;
+ i__3 = k - 1;
+ for (i__ = ks; i__ <= i__3; ++i__) {
+ if ((r__1 = wr[i__], dabs(r__1)) + (r__2 = wi[
+ i__], dabs(r__2)) < (r__3 = wr[i__ +
+ 1], dabs(r__3)) + (r__4 = wi[i__ + 1],
+ dabs(r__4))) {
+ sorted = FALSE_;
+
+ swap = wr[i__];
+ wr[i__] = wr[i__ + 1];
+ wr[i__ + 1] = swap;
+
+ swap = wi[i__];
+ wi[i__] = wi[i__ + 1];
+ wi[i__ + 1] = swap;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+L60:
+ ;
+ }
+
+/* ==== Shuffle shifts into pairs of real shifts */
+/* . and pairs of complex conjugate shifts */
+/* . assuming complex conjugate shifts are */
+/* . already adjacent to one another. (Yes, */
+/* . they are.) ==== */
+
+ i__2 = ks + 2;
+ for (i__ = kbot; i__ >= i__2; i__ += -2) {
+ if (wi[i__] != -wi[i__ - 1]) {
+
+ swap = wr[i__];
+ wr[i__] = wr[i__ - 1];
+ wr[i__ - 1] = wr[i__ - 2];
+ wr[i__ - 2] = swap;
+
+ swap = wi[i__];
+ wi[i__] = wi[i__ - 1];
+ wi[i__ - 1] = wi[i__ - 2];
+ wi[i__ - 2] = swap;
+ }
+/* L70: */
+ }
+ }
+
+/* ==== If there are only two shifts and both are */
+/* . real, then use only one. ==== */
+
+ if (kbot - ks + 1 == 2) {
+ if (wi[kbot] == 0.f) {
+ if ((r__1 = wr[kbot] - h__[kbot + kbot * h_dim1],
+ dabs(r__1)) < (r__2 = wr[kbot - 1] - h__[kbot
+ + kbot * h_dim1], dabs(r__2))) {
+ wr[kbot - 1] = wr[kbot];
+ } else {
+ wr[kbot] = wr[kbot - 1];
+ }
+ }
+ }
+
+/* ==== Use up to NS of the the smallest magnatiude */
+/* . shifts. If there aren't NS shifts available, */
+/* . then use them all, possibly dropping one to */
+/* . make the number of shifts even. ==== */
+
+/* Computing MIN */
+ i__2 = ns, i__3 = kbot - ks + 1;
+ ns = min(i__2,i__3);
+ ns -= ns % 2;
+ ks = kbot - ns + 1;
+
+/* ==== Small-bulge multi-shift QR sweep: */
+/* . split workspace under the subdiagonal into */
+/* . - a KDU-by-KDU work array U in the lower */
+/* . left-hand-corner, */
+/* . - a KDU-by-at-least-KDU-but-more-is-better */
+/* . (KDU-by-NHo) horizontal work array WH along */
+/* . the bottom edge, */
+/* . - and an at-least-KDU-but-more-is-better-by-KDU */
+/* . (NVE-by-KDU) vertical work WV arrow along */
+/* . the left-hand-edge. ==== */
+
+ kdu = ns * 3 - 3;
+ ku = *n - kdu + 1;
+ kwh = kdu + 1;
+ nho = *n - kdu - 3 - (kdu + 1) + 1;
+ kwv = kdu + 4;
+ nve = *n - kdu - kwv + 1;
+
+/* ==== Small-bulge multi-shift QR sweep ==== */
+
+ slaqr5_(wantt, wantz, &kacc22, n, &ktop, &kbot, &ns, &wr[ks],
+ &wi[ks], &h__[h_offset], ldh, iloz, ihiz, &z__[
+ z_offset], ldz, &work[1], &c__3, &h__[ku + h_dim1],
+ ldh, &nve, &h__[kwv + h_dim1], ldh, &nho, &h__[ku +
+ kwh * h_dim1], ldh);
+ }
+
+/* ==== Note progress (or the lack of it). ==== */
+
+ if (ld > 0) {
+ ndfl = 1;
+ } else {
+ ++ndfl;
+ }
+
+/* ==== End of main loop ==== */
+/* L80: */
+ }
+
+/* ==== Iteration limit exceeded. Set INFO to show where */
+/* . the problem occurred and exit. ==== */
+
+ *info = kbot;
+L90:
+ ;
+ }
+
+/* ==== Return the optimal value of LWORK. ==== */
+
+ work[1] = (real) lwkopt;
+
+/* ==== End of SLAQR4 ==== */
+
+ return 0;
+} /* slaqr4_ */
diff --git a/contrib/libs/clapack/slaqr5.c b/contrib/libs/clapack/slaqr5.c
new file mode 100644
index 0000000000..0f32351c16
--- /dev/null
+++ b/contrib/libs/clapack/slaqr5.c
@@ -0,0 +1,1026 @@
+/* slaqr5.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b7 = 0.f;
+static real c_b8 = 1.f;
+static integer c__3 = 3;
+static integer c__1 = 1;
+static integer c__2 = 2;
+
+/* Subroutine */ int slaqr5_(logical *wantt, logical *wantz, integer *kacc22,
+ integer *n, integer *ktop, integer *kbot, integer *nshfts, real *sr,
+ real *si, real *h__, integer *ldh, integer *iloz, integer *ihiz, real
+ *z__, integer *ldz, real *v, integer *ldv, real *u, integer *ldu,
+ integer *nv, real *wv, integer *ldwv, integer *nh, real *wh, integer *
+ ldwh)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, u_dim1, u_offset, v_dim1, v_offset, wh_dim1,
+ wh_offset, wv_dim1, wv_offset, z_dim1, z_offset, i__1, i__2, i__3,
+ i__4, i__5, i__6, i__7;
+ real r__1, r__2, r__3, r__4, r__5;
+
+ /* Local variables */
+ integer i__, j, k, m, i2, j2, i4, j4, k1;
+ real h11, h12, h21, h22;
+ integer m22, ns, nu;
+ real vt[3], scl;
+ integer kdu, kms;
+ real ulp;
+ integer knz, kzs;
+ real tst1, tst2, beta;
+ logical blk22, bmp22;
+ integer mend, jcol, jlen, jbot, mbot;
+ real swap;
+ integer jtop, jrow, mtop;
+ real alpha;
+ logical accum;
+ integer ndcol, incol;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ integer krcol, nbmps;
+ extern /* Subroutine */ int strmm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+), slaqr1_(integer *, real *,
+ integer *, real *, real *, real *, real *, real *), slabad_(real *
+, real *);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int slarfg_(integer *, real *, real *, integer *,
+ real *);
+ real safmax;
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *), slaset_(char *, integer *,
+ integer *, real *, real *, real *, integer *);
+ real refsum;
+ integer mstart;
+ real smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* This auxiliary subroutine called by SLAQR0 performs a */
+/* single small-bulge multi-shift QR sweep. */
+
+/* WANTT (input) logical scalar */
+/* WANTT = .true. if the quasi-triangular Schur factor */
+/* is being computed. WANTT is set to .false. otherwise. */
+
+/* WANTZ (input) logical scalar */
+/* WANTZ = .true. if the orthogonal Schur factor is being */
+/* computed. WANTZ is set to .false. otherwise. */
+
+/* KACC22 (input) integer with value 0, 1, or 2. */
+/* Specifies the computation mode of far-from-diagonal */
+/* orthogonal updates. */
+/* = 0: SLAQR5 does not accumulate reflections and does not */
+/* use matrix-matrix multiply to update far-from-diagonal */
+/* matrix entries. */
+/* = 1: SLAQR5 accumulates reflections and uses matrix-matrix */
+/* multiply to update the far-from-diagonal matrix entries. */
+/* = 2: SLAQR5 accumulates reflections, uses matrix-matrix */
+/* multiply to update the far-from-diagonal matrix entries, */
+/* and takes advantage of 2-by-2 block structure during */
+/* matrix multiplies. */
+
+/* N (input) integer scalar */
+/* N is the order of the Hessenberg matrix H upon which this */
+/* subroutine operates. */
+
+/* KTOP (input) integer scalar */
+/* KBOT (input) integer scalar */
+/* These are the first and last rows and columns of an */
+/* isolated diagonal block upon which the QR sweep is to be */
+/* applied. It is assumed without a check that */
+/* either KTOP = 1 or H(KTOP,KTOP-1) = 0 */
+/* and */
+/* either KBOT = N or H(KBOT+1,KBOT) = 0. */
+
+/* NSHFTS (input) integer scalar */
+/* NSHFTS gives the number of simultaneous shifts. NSHFTS */
+/* must be positive and even. */
+
+/* SR (input/output) REAL array of size (NSHFTS) */
+/* SI (input/output) REAL array of size (NSHFTS) */
+/* SR contains the real parts and SI contains the imaginary */
+/* parts of the NSHFTS shifts of origin that define the */
+/* multi-shift QR sweep. On output SR and SI may be */
+/* reordered. */
+
+/* H (input/output) REAL array of size (LDH,N) */
+/* On input H contains a Hessenberg matrix. On output a */
+/* multi-shift QR sweep with shifts SR(J)+i*SI(J) is applied */
+/* to the isolated diagonal block in rows and columns KTOP */
+/* through KBOT. */
+
+/* LDH (input) integer scalar */
+/* LDH is the leading dimension of H just as declared in the */
+/* calling procedure. LDH.GE.MAX(1,N). */
+
+/* ILOZ (input) INTEGER */
+/* IHIZ (input) INTEGER */
+/* Specify the rows of Z to which transformations must be */
+/* applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N */
+
+/* Z (input/output) REAL array of size (LDZ,IHI) */
+/* If WANTZ = .TRUE., then the QR Sweep orthogonal */
+/* similarity transformation is accumulated into */
+/* Z(ILOZ:IHIZ,ILO:IHI) from the right. */
+/* If WANTZ = .FALSE., then Z is unreferenced. */
+
+/* LDZ (input) integer scalar */
+/* LDA is the leading dimension of Z just as declared in */
+/* the calling procedure. LDZ.GE.N. */
+
+/* V (workspace) REAL array of size (LDV,NSHFTS/2) */
+
+/* LDV (input) integer scalar */
+/* LDV is the leading dimension of V as declared in the */
+/* calling procedure. LDV.GE.3. */
+
+/* U (workspace) REAL array of size */
+/* (LDU,3*NSHFTS-3) */
+
+/* LDU (input) integer scalar */
+/* LDU is the leading dimension of U just as declared in the */
+/* in the calling subroutine. LDU.GE.3*NSHFTS-3. */
+
+/* NH (input) integer scalar */
+/* NH is the number of columns in array WH available for */
+/* workspace. NH.GE.1. */
+
+/* WH (workspace) REAL array of size (LDWH,NH) */
+
+/* LDWH (input) integer scalar */
+/* Leading dimension of WH just as declared in the */
+/* calling procedure. LDWH.GE.3*NSHFTS-3. */
+
+/* NV (input) integer scalar */
+/* NV is the number of rows in WV agailable for workspace. */
+/* NV.GE.1. */
+
+/* WV (workspace) REAL array of size */
+/* (LDWV,3*NSHFTS-3) */
+
+/* LDWV (input) integer scalar */
+/* LDWV is the leading dimension of WV as declared in the */
+/* in the calling subroutine. LDWV.GE.NV. */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+/* Reference: */
+
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part I: Maintaining Well Focused Shifts, and */
+/* Level 3 Performance, SIAM Journal of Matrix Analysis, */
+/* volume 23, pages 929--947, 2002. */
+
+/* ================================================================ */
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* ==== If there are no shifts, then there is nothing to do. ==== */
+
+ /* Parameter adjustments */
+ --sr;
+ --si;
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ wv_dim1 = *ldwv;
+ wv_offset = 1 + wv_dim1;
+ wv -= wv_offset;
+ wh_dim1 = *ldwh;
+ wh_offset = 1 + wh_dim1;
+ wh -= wh_offset;
+
+ /* Function Body */
+ if (*nshfts < 2) {
+ return 0;
+ }
+
+/* ==== If the active block is empty or 1-by-1, then there */
+/* . is nothing to do. ==== */
+
+ if (*ktop >= *kbot) {
+ return 0;
+ }
+
+/* ==== Shuffle shifts into pairs of real shifts and pairs */
+/* . of complex conjugate shifts assuming complex */
+/* . conjugate shifts are already adjacent to one */
+/* . another. ==== */
+
+ i__1 = *nshfts - 2;
+ for (i__ = 1; i__ <= i__1; i__ += 2) {
+ if (si[i__] != -si[i__ + 1]) {
+
+ swap = sr[i__];
+ sr[i__] = sr[i__ + 1];
+ sr[i__ + 1] = sr[i__ + 2];
+ sr[i__ + 2] = swap;
+
+ swap = si[i__];
+ si[i__] = si[i__ + 1];
+ si[i__ + 1] = si[i__ + 2];
+ si[i__ + 2] = swap;
+ }
+/* L10: */
+ }
+
+/* ==== NSHFTS is supposed to be even, but if it is odd, */
+/* . then simply reduce it by one. The shuffle above */
+/* . ensures that the dropped shift is real and that */
+/* . the remaining shifts are paired. ==== */
+
+ ns = *nshfts - *nshfts % 2;
+
+/* ==== Machine constants for deflation ==== */
+
+ safmin = slamch_("SAFE MINIMUM");
+ safmax = 1.f / safmin;
+ slabad_(&safmin, &safmax);
+ ulp = slamch_("PRECISION");
+ smlnum = safmin * ((real) (*n) / ulp);
+
+/* ==== Use accumulated reflections to update far-from-diagonal */
+/* . entries ? ==== */
+
+ accum = *kacc22 == 1 || *kacc22 == 2;
+
+/* ==== If so, exploit the 2-by-2 block structure? ==== */
+
+ blk22 = ns > 2 && *kacc22 == 2;
+
+/* ==== clear trash ==== */
+
+ if (*ktop + 2 <= *kbot) {
+ h__[*ktop + 2 + *ktop * h_dim1] = 0.f;
+ }
+
+/* ==== NBMPS = number of 2-shift bulges in the chain ==== */
+
+ nbmps = ns / 2;
+
+/* ==== KDU = width of slab ==== */
+
+ kdu = nbmps * 6 - 3;
+
+/* ==== Create and chase chains of NBMPS bulges ==== */
+
+ i__1 = *kbot - 2;
+ i__2 = nbmps * 3 - 2;
+ for (incol = (1 - nbmps) * 3 + *ktop - 1; i__2 < 0 ? incol >= i__1 :
+ incol <= i__1; incol += i__2) {
+ ndcol = incol + kdu;
+ if (accum) {
+ slaset_("ALL", &kdu, &kdu, &c_b7, &c_b8, &u[u_offset], ldu);
+ }
+
+/* ==== Near-the-diagonal bulge chase. The following loop */
+/* . performs the near-the-diagonal part of a small bulge */
+/* . multi-shift QR sweep. Each 6*NBMPS-2 column diagonal */
+/* . chunk extends from column INCOL to column NDCOL */
+/* . (including both column INCOL and column NDCOL). The */
+/* . following loop chases a 3*NBMPS column long chain of */
+/* . NBMPS bulges 3*NBMPS-2 columns to the right. (INCOL */
+/* . may be less than KTOP and and NDCOL may be greater than */
+/* . KBOT indicating phantom columns from which to chase */
+/* . bulges before they are actually introduced or to which */
+/* . to chase bulges beyond column KBOT.) ==== */
+
+/* Computing MIN */
+ i__4 = incol + nbmps * 3 - 3, i__5 = *kbot - 2;
+ i__3 = min(i__4,i__5);
+ for (krcol = incol; krcol <= i__3; ++krcol) {
+
+/* ==== Bulges number MTOP to MBOT are active double implicit */
+/* . shift bulges. There may or may not also be small */
+/* . 2-by-2 bulge, if there is room. The inactive bulges */
+/* . (if any) must wait until the active bulges have moved */
+/* . down the diagonal to make room. The phantom matrix */
+/* . paradigm described above helps keep track. ==== */
+
+/* Computing MAX */
+ i__4 = 1, i__5 = (*ktop - 1 - krcol + 2) / 3 + 1;
+ mtop = max(i__4,i__5);
+/* Computing MIN */
+ i__4 = nbmps, i__5 = (*kbot - krcol) / 3;
+ mbot = min(i__4,i__5);
+ m22 = mbot + 1;
+ bmp22 = mbot < nbmps && krcol + (m22 - 1) * 3 == *kbot - 2;
+
+/* ==== Generate reflections to chase the chain right */
+/* . one column. (The minimum value of K is KTOP-1.) ==== */
+
+ i__4 = mbot;
+ for (m = mtop; m <= i__4; ++m) {
+ k = krcol + (m - 1) * 3;
+ if (k == *ktop - 1) {
+ slaqr1_(&c__3, &h__[*ktop + *ktop * h_dim1], ldh, &sr[(m
+ << 1) - 1], &si[(m << 1) - 1], &sr[m * 2], &si[m *
+ 2], &v[m * v_dim1 + 1]);
+ alpha = v[m * v_dim1 + 1];
+ slarfg_(&c__3, &alpha, &v[m * v_dim1 + 2], &c__1, &v[m *
+ v_dim1 + 1]);
+ } else {
+ beta = h__[k + 1 + k * h_dim1];
+ v[m * v_dim1 + 2] = h__[k + 2 + k * h_dim1];
+ v[m * v_dim1 + 3] = h__[k + 3 + k * h_dim1];
+ slarfg_(&c__3, &beta, &v[m * v_dim1 + 2], &c__1, &v[m *
+ v_dim1 + 1]);
+
+/* ==== A Bulge may collapse because of vigilant */
+/* . deflation or destructive underflow. In the */
+/* . underflow case, try the two-small-subdiagonals */
+/* . trick to try to reinflate the bulge. ==== */
+
+ if (h__[k + 3 + k * h_dim1] != 0.f || h__[k + 3 + (k + 1)
+ * h_dim1] != 0.f || h__[k + 3 + (k + 2) * h_dim1]
+ == 0.f) {
+
+/* ==== Typical case: not collapsed (yet). ==== */
+
+ h__[k + 1 + k * h_dim1] = beta;
+ h__[k + 2 + k * h_dim1] = 0.f;
+ h__[k + 3 + k * h_dim1] = 0.f;
+ } else {
+
+/* ==== Atypical case: collapsed. Attempt to */
+/* . reintroduce ignoring H(K+1,K) and H(K+2,K). */
+/* . If the fill resulting from the new */
+/* . reflector is too large, then abandon it. */
+/* . Otherwise, use the new one. ==== */
+
+ slaqr1_(&c__3, &h__[k + 1 + (k + 1) * h_dim1], ldh, &
+ sr[(m << 1) - 1], &si[(m << 1) - 1], &sr[m *
+ 2], &si[m * 2], vt);
+ alpha = vt[0];
+ slarfg_(&c__3, &alpha, &vt[1], &c__1, vt);
+ refsum = vt[0] * (h__[k + 1 + k * h_dim1] + vt[1] *
+ h__[k + 2 + k * h_dim1]);
+
+ if ((r__1 = h__[k + 2 + k * h_dim1] - refsum * vt[1],
+ dabs(r__1)) + (r__2 = refsum * vt[2], dabs(
+ r__2)) > ulp * ((r__3 = h__[k + k * h_dim1],
+ dabs(r__3)) + (r__4 = h__[k + 1 + (k + 1) *
+ h_dim1], dabs(r__4)) + (r__5 = h__[k + 2 + (k
+ + 2) * h_dim1], dabs(r__5)))) {
+
+/* ==== Starting a new bulge here would */
+/* . create non-negligible fill. Use */
+/* . the old one with trepidation. ==== */
+
+ h__[k + 1 + k * h_dim1] = beta;
+ h__[k + 2 + k * h_dim1] = 0.f;
+ h__[k + 3 + k * h_dim1] = 0.f;
+ } else {
+
+/* ==== Stating a new bulge here would */
+/* . create only negligible fill. */
+/* . Replace the old reflector with */
+/* . the new one. ==== */
+
+ h__[k + 1 + k * h_dim1] -= refsum;
+ h__[k + 2 + k * h_dim1] = 0.f;
+ h__[k + 3 + k * h_dim1] = 0.f;
+ v[m * v_dim1 + 1] = vt[0];
+ v[m * v_dim1 + 2] = vt[1];
+ v[m * v_dim1 + 3] = vt[2];
+ }
+ }
+ }
+/* L20: */
+ }
+
+/* ==== Generate a 2-by-2 reflection, if needed. ==== */
+
+ k = krcol + (m22 - 1) * 3;
+ if (bmp22) {
+ if (k == *ktop - 1) {
+ slaqr1_(&c__2, &h__[k + 1 + (k + 1) * h_dim1], ldh, &sr[(
+ m22 << 1) - 1], &si[(m22 << 1) - 1], &sr[m22 * 2],
+ &si[m22 * 2], &v[m22 * v_dim1 + 1]);
+ beta = v[m22 * v_dim1 + 1];
+ slarfg_(&c__2, &beta, &v[m22 * v_dim1 + 2], &c__1, &v[m22
+ * v_dim1 + 1]);
+ } else {
+ beta = h__[k + 1 + k * h_dim1];
+ v[m22 * v_dim1 + 2] = h__[k + 2 + k * h_dim1];
+ slarfg_(&c__2, &beta, &v[m22 * v_dim1 + 2], &c__1, &v[m22
+ * v_dim1 + 1]);
+ h__[k + 1 + k * h_dim1] = beta;
+ h__[k + 2 + k * h_dim1] = 0.f;
+ }
+ }
+
+/* ==== Multiply H by reflections from the left ==== */
+
+ if (accum) {
+ jbot = min(ndcol,*kbot);
+ } else if (*wantt) {
+ jbot = *n;
+ } else {
+ jbot = *kbot;
+ }
+ i__4 = jbot;
+ for (j = max(*ktop,krcol); j <= i__4; ++j) {
+/* Computing MIN */
+ i__5 = mbot, i__6 = (j - krcol + 2) / 3;
+ mend = min(i__5,i__6);
+ i__5 = mend;
+ for (m = mtop; m <= i__5; ++m) {
+ k = krcol + (m - 1) * 3;
+ refsum = v[m * v_dim1 + 1] * (h__[k + 1 + j * h_dim1] + v[
+ m * v_dim1 + 2] * h__[k + 2 + j * h_dim1] + v[m *
+ v_dim1 + 3] * h__[k + 3 + j * h_dim1]);
+ h__[k + 1 + j * h_dim1] -= refsum;
+ h__[k + 2 + j * h_dim1] -= refsum * v[m * v_dim1 + 2];
+ h__[k + 3 + j * h_dim1] -= refsum * v[m * v_dim1 + 3];
+/* L30: */
+ }
+/* L40: */
+ }
+ if (bmp22) {
+ k = krcol + (m22 - 1) * 3;
+/* Computing MAX */
+ i__4 = k + 1;
+ i__5 = jbot;
+ for (j = max(i__4,*ktop); j <= i__5; ++j) {
+ refsum = v[m22 * v_dim1 + 1] * (h__[k + 1 + j * h_dim1] +
+ v[m22 * v_dim1 + 2] * h__[k + 2 + j * h_dim1]);
+ h__[k + 1 + j * h_dim1] -= refsum;
+ h__[k + 2 + j * h_dim1] -= refsum * v[m22 * v_dim1 + 2];
+/* L50: */
+ }
+ }
+
+/* ==== Multiply H by reflections from the right. */
+/* . Delay filling in the last row until the */
+/* . vigilant deflation check is complete. ==== */
+
+ if (accum) {
+ jtop = max(*ktop,incol);
+ } else if (*wantt) {
+ jtop = 1;
+ } else {
+ jtop = *ktop;
+ }
+ i__5 = mbot;
+ for (m = mtop; m <= i__5; ++m) {
+ if (v[m * v_dim1 + 1] != 0.f) {
+ k = krcol + (m - 1) * 3;
+/* Computing MIN */
+ i__6 = *kbot, i__7 = k + 3;
+ i__4 = min(i__6,i__7);
+ for (j = jtop; j <= i__4; ++j) {
+ refsum = v[m * v_dim1 + 1] * (h__[j + (k + 1) *
+ h_dim1] + v[m * v_dim1 + 2] * h__[j + (k + 2)
+ * h_dim1] + v[m * v_dim1 + 3] * h__[j + (k +
+ 3) * h_dim1]);
+ h__[j + (k + 1) * h_dim1] -= refsum;
+ h__[j + (k + 2) * h_dim1] -= refsum * v[m * v_dim1 +
+ 2];
+ h__[j + (k + 3) * h_dim1] -= refsum * v[m * v_dim1 +
+ 3];
+/* L60: */
+ }
+
+ if (accum) {
+
+/* ==== Accumulate U. (If necessary, update Z later */
+/* . with with an efficient matrix-matrix */
+/* . multiply.) ==== */
+
+ kms = k - incol;
+/* Computing MAX */
+ i__4 = 1, i__6 = *ktop - incol;
+ i__7 = kdu;
+ for (j = max(i__4,i__6); j <= i__7; ++j) {
+ refsum = v[m * v_dim1 + 1] * (u[j + (kms + 1) *
+ u_dim1] + v[m * v_dim1 + 2] * u[j + (kms
+ + 2) * u_dim1] + v[m * v_dim1 + 3] * u[j
+ + (kms + 3) * u_dim1]);
+ u[j + (kms + 1) * u_dim1] -= refsum;
+ u[j + (kms + 2) * u_dim1] -= refsum * v[m *
+ v_dim1 + 2];
+ u[j + (kms + 3) * u_dim1] -= refsum * v[m *
+ v_dim1 + 3];
+/* L70: */
+ }
+ } else if (*wantz) {
+
+/* ==== U is not accumulated, so update Z */
+/* . now by multiplying by reflections */
+/* . from the right. ==== */
+
+ i__7 = *ihiz;
+ for (j = *iloz; j <= i__7; ++j) {
+ refsum = v[m * v_dim1 + 1] * (z__[j + (k + 1) *
+ z_dim1] + v[m * v_dim1 + 2] * z__[j + (k
+ + 2) * z_dim1] + v[m * v_dim1 + 3] * z__[
+ j + (k + 3) * z_dim1]);
+ z__[j + (k + 1) * z_dim1] -= refsum;
+ z__[j + (k + 2) * z_dim1] -= refsum * v[m *
+ v_dim1 + 2];
+ z__[j + (k + 3) * z_dim1] -= refsum * v[m *
+ v_dim1 + 3];
+/* L80: */
+ }
+ }
+ }
+/* L90: */
+ }
+
+/* ==== Special case: 2-by-2 reflection (if needed) ==== */
+
+ k = krcol + (m22 - 1) * 3;
+ if (bmp22 && v[m22 * v_dim1 + 1] != 0.f) {
+/* Computing MIN */
+ i__7 = *kbot, i__4 = k + 3;
+ i__5 = min(i__7,i__4);
+ for (j = jtop; j <= i__5; ++j) {
+ refsum = v[m22 * v_dim1 + 1] * (h__[j + (k + 1) * h_dim1]
+ + v[m22 * v_dim1 + 2] * h__[j + (k + 2) * h_dim1])
+ ;
+ h__[j + (k + 1) * h_dim1] -= refsum;
+ h__[j + (k + 2) * h_dim1] -= refsum * v[m22 * v_dim1 + 2];
+/* L100: */
+ }
+
+ if (accum) {
+ kms = k - incol;
+/* Computing MAX */
+ i__5 = 1, i__7 = *ktop - incol;
+ i__4 = kdu;
+ for (j = max(i__5,i__7); j <= i__4; ++j) {
+ refsum = v[m22 * v_dim1 + 1] * (u[j + (kms + 1) *
+ u_dim1] + v[m22 * v_dim1 + 2] * u[j + (kms +
+ 2) * u_dim1]);
+ u[j + (kms + 1) * u_dim1] -= refsum;
+ u[j + (kms + 2) * u_dim1] -= refsum * v[m22 * v_dim1
+ + 2];
+/* L110: */
+ }
+ } else if (*wantz) {
+ i__4 = *ihiz;
+ for (j = *iloz; j <= i__4; ++j) {
+ refsum = v[m22 * v_dim1 + 1] * (z__[j + (k + 1) *
+ z_dim1] + v[m22 * v_dim1 + 2] * z__[j + (k +
+ 2) * z_dim1]);
+ z__[j + (k + 1) * z_dim1] -= refsum;
+ z__[j + (k + 2) * z_dim1] -= refsum * v[m22 * v_dim1
+ + 2];
+/* L120: */
+ }
+ }
+ }
+
+/* ==== Vigilant deflation check ==== */
+
+ mstart = mtop;
+ if (krcol + (mstart - 1) * 3 < *ktop) {
+ ++mstart;
+ }
+ mend = mbot;
+ if (bmp22) {
+ ++mend;
+ }
+ if (krcol == *kbot - 2) {
+ ++mend;
+ }
+ i__4 = mend;
+ for (m = mstart; m <= i__4; ++m) {
+/* Computing MIN */
+ i__5 = *kbot - 1, i__7 = krcol + (m - 1) * 3;
+ k = min(i__5,i__7);
+
+/* ==== The following convergence test requires that */
+/* . the tradition small-compared-to-nearby-diagonals */
+/* . criterion and the Ahues & Tisseur (LAWN 122, 1997) */
+/* . criteria both be satisfied. The latter improves */
+/* . accuracy in some examples. Falling back on an */
+/* . alternate convergence criterion when TST1 or TST2 */
+/* . is zero (as done here) is traditional but probably */
+/* . unnecessary. ==== */
+
+ if (h__[k + 1 + k * h_dim1] != 0.f) {
+ tst1 = (r__1 = h__[k + k * h_dim1], dabs(r__1)) + (r__2 =
+ h__[k + 1 + (k + 1) * h_dim1], dabs(r__2));
+ if (tst1 == 0.f) {
+ if (k >= *ktop + 1) {
+ tst1 += (r__1 = h__[k + (k - 1) * h_dim1], dabs(
+ r__1));
+ }
+ if (k >= *ktop + 2) {
+ tst1 += (r__1 = h__[k + (k - 2) * h_dim1], dabs(
+ r__1));
+ }
+ if (k >= *ktop + 3) {
+ tst1 += (r__1 = h__[k + (k - 3) * h_dim1], dabs(
+ r__1));
+ }
+ if (k <= *kbot - 2) {
+ tst1 += (r__1 = h__[k + 2 + (k + 1) * h_dim1],
+ dabs(r__1));
+ }
+ if (k <= *kbot - 3) {
+ tst1 += (r__1 = h__[k + 3 + (k + 1) * h_dim1],
+ dabs(r__1));
+ }
+ if (k <= *kbot - 4) {
+ tst1 += (r__1 = h__[k + 4 + (k + 1) * h_dim1],
+ dabs(r__1));
+ }
+ }
+/* Computing MAX */
+ r__2 = smlnum, r__3 = ulp * tst1;
+ if ((r__1 = h__[k + 1 + k * h_dim1], dabs(r__1)) <= dmax(
+ r__2,r__3)) {
+/* Computing MAX */
+ r__3 = (r__1 = h__[k + 1 + k * h_dim1], dabs(r__1)),
+ r__4 = (r__2 = h__[k + (k + 1) * h_dim1],
+ dabs(r__2));
+ h12 = dmax(r__3,r__4);
+/* Computing MIN */
+ r__3 = (r__1 = h__[k + 1 + k * h_dim1], dabs(r__1)),
+ r__4 = (r__2 = h__[k + (k + 1) * h_dim1],
+ dabs(r__2));
+ h21 = dmin(r__3,r__4);
+/* Computing MAX */
+ r__3 = (r__1 = h__[k + 1 + (k + 1) * h_dim1], dabs(
+ r__1)), r__4 = (r__2 = h__[k + k * h_dim1] -
+ h__[k + 1 + (k + 1) * h_dim1], dabs(r__2));
+ h11 = dmax(r__3,r__4);
+/* Computing MIN */
+ r__3 = (r__1 = h__[k + 1 + (k + 1) * h_dim1], dabs(
+ r__1)), r__4 = (r__2 = h__[k + k * h_dim1] -
+ h__[k + 1 + (k + 1) * h_dim1], dabs(r__2));
+ h22 = dmin(r__3,r__4);
+ scl = h11 + h12;
+ tst2 = h22 * (h11 / scl);
+
+/* Computing MAX */
+ r__1 = smlnum, r__2 = ulp * tst2;
+ if (tst2 == 0.f || h21 * (h12 / scl) <= dmax(r__1,
+ r__2)) {
+ h__[k + 1 + k * h_dim1] = 0.f;
+ }
+ }
+ }
+/* L130: */
+ }
+
+/* ==== Fill in the last row of each bulge. ==== */
+
+/* Computing MIN */
+ i__4 = nbmps, i__5 = (*kbot - krcol - 1) / 3;
+ mend = min(i__4,i__5);
+ i__4 = mend;
+ for (m = mtop; m <= i__4; ++m) {
+ k = krcol + (m - 1) * 3;
+ refsum = v[m * v_dim1 + 1] * v[m * v_dim1 + 3] * h__[k + 4 + (
+ k + 3) * h_dim1];
+ h__[k + 4 + (k + 1) * h_dim1] = -refsum;
+ h__[k + 4 + (k + 2) * h_dim1] = -refsum * v[m * v_dim1 + 2];
+ h__[k + 4 + (k + 3) * h_dim1] -= refsum * v[m * v_dim1 + 3];
+/* L140: */
+ }
+
+/* ==== End of near-the-diagonal bulge chase. ==== */
+
+/* L150: */
+ }
+
+/* ==== Use U (if accumulated) to update far-from-diagonal */
+/* . entries in H. If required, use U to update Z as */
+/* . well. ==== */
+
+ if (accum) {
+ if (*wantt) {
+ jtop = 1;
+ jbot = *n;
+ } else {
+ jtop = *ktop;
+ jbot = *kbot;
+ }
+ if (! blk22 || incol < *ktop || ndcol > *kbot || ns <= 2) {
+
+/* ==== Updates not exploiting the 2-by-2 block */
+/* . structure of U. K1 and NU keep track of */
+/* . the location and size of U in the special */
+/* . cases of introducing bulges and chasing */
+/* . bulges off the bottom. In these special */
+/* . cases and in case the number of shifts */
+/* . is NS = 2, there is no 2-by-2 block */
+/* . structure to exploit. ==== */
+
+/* Computing MAX */
+ i__3 = 1, i__4 = *ktop - incol;
+ k1 = max(i__3,i__4);
+/* Computing MAX */
+ i__3 = 0, i__4 = ndcol - *kbot;
+ nu = kdu - max(i__3,i__4) - k1 + 1;
+
+/* ==== Horizontal Multiply ==== */
+
+ i__3 = jbot;
+ i__4 = *nh;
+ for (jcol = min(ndcol,*kbot) + 1; i__4 < 0 ? jcol >= i__3 :
+ jcol <= i__3; jcol += i__4) {
+/* Computing MIN */
+ i__5 = *nh, i__7 = jbot - jcol + 1;
+ jlen = min(i__5,i__7);
+ sgemm_("C", "N", &nu, &jlen, &nu, &c_b8, &u[k1 + k1 *
+ u_dim1], ldu, &h__[incol + k1 + jcol * h_dim1],
+ ldh, &c_b7, &wh[wh_offset], ldwh);
+ slacpy_("ALL", &nu, &jlen, &wh[wh_offset], ldwh, &h__[
+ incol + k1 + jcol * h_dim1], ldh);
+/* L160: */
+ }
+
+/* ==== Vertical multiply ==== */
+
+ i__4 = max(*ktop,incol) - 1;
+ i__3 = *nv;
+ for (jrow = jtop; i__3 < 0 ? jrow >= i__4 : jrow <= i__4;
+ jrow += i__3) {
+/* Computing MIN */
+ i__5 = *nv, i__7 = max(*ktop,incol) - jrow;
+ jlen = min(i__5,i__7);
+ sgemm_("N", "N", &jlen, &nu, &nu, &c_b8, &h__[jrow + (
+ incol + k1) * h_dim1], ldh, &u[k1 + k1 * u_dim1],
+ ldu, &c_b7, &wv[wv_offset], ldwv);
+ slacpy_("ALL", &jlen, &nu, &wv[wv_offset], ldwv, &h__[
+ jrow + (incol + k1) * h_dim1], ldh);
+/* L170: */
+ }
+
+/* ==== Z multiply (also vertical) ==== */
+
+ if (*wantz) {
+ i__3 = *ihiz;
+ i__4 = *nv;
+ for (jrow = *iloz; i__4 < 0 ? jrow >= i__3 : jrow <= i__3;
+ jrow += i__4) {
+/* Computing MIN */
+ i__5 = *nv, i__7 = *ihiz - jrow + 1;
+ jlen = min(i__5,i__7);
+ sgemm_("N", "N", &jlen, &nu, &nu, &c_b8, &z__[jrow + (
+ incol + k1) * z_dim1], ldz, &u[k1 + k1 *
+ u_dim1], ldu, &c_b7, &wv[wv_offset], ldwv);
+ slacpy_("ALL", &jlen, &nu, &wv[wv_offset], ldwv, &z__[
+ jrow + (incol + k1) * z_dim1], ldz)
+ ;
+/* L180: */
+ }
+ }
+ } else {
+
+/* ==== Updates exploiting U's 2-by-2 block structure. */
+/* . (I2, I4, J2, J4 are the last rows and columns */
+/* . of the blocks.) ==== */
+
+ i2 = (kdu + 1) / 2;
+ i4 = kdu;
+ j2 = i4 - i2;
+ j4 = kdu;
+
+/* ==== KZS and KNZ deal with the band of zeros */
+/* . along the diagonal of one of the triangular */
+/* . blocks. ==== */
+
+ kzs = j4 - j2 - (ns + 1);
+ knz = ns + 1;
+
+/* ==== Horizontal multiply ==== */
+
+ i__4 = jbot;
+ i__3 = *nh;
+ for (jcol = min(ndcol,*kbot) + 1; i__3 < 0 ? jcol >= i__4 :
+ jcol <= i__4; jcol += i__3) {
+/* Computing MIN */
+ i__5 = *nh, i__7 = jbot - jcol + 1;
+ jlen = min(i__5,i__7);
+
+/* ==== Copy bottom of H to top+KZS of scratch ==== */
+/* (The first KZS rows get multiplied by zero.) ==== */
+
+ slacpy_("ALL", &knz, &jlen, &h__[incol + 1 + j2 + jcol *
+ h_dim1], ldh, &wh[kzs + 1 + wh_dim1], ldwh);
+
+/* ==== Multiply by U21' ==== */
+
+ slaset_("ALL", &kzs, &jlen, &c_b7, &c_b7, &wh[wh_offset],
+ ldwh);
+ strmm_("L", "U", "C", "N", &knz, &jlen, &c_b8, &u[j2 + 1
+ + (kzs + 1) * u_dim1], ldu, &wh[kzs + 1 + wh_dim1]
+, ldwh);
+
+/* ==== Multiply top of H by U11' ==== */
+
+ sgemm_("C", "N", &i2, &jlen, &j2, &c_b8, &u[u_offset],
+ ldu, &h__[incol + 1 + jcol * h_dim1], ldh, &c_b8,
+ &wh[wh_offset], ldwh);
+
+/* ==== Copy top of H to bottom of WH ==== */
+
+ slacpy_("ALL", &j2, &jlen, &h__[incol + 1 + jcol * h_dim1]
+, ldh, &wh[i2 + 1 + wh_dim1], ldwh);
+
+/* ==== Multiply by U21' ==== */
+
+ strmm_("L", "L", "C", "N", &j2, &jlen, &c_b8, &u[(i2 + 1)
+ * u_dim1 + 1], ldu, &wh[i2 + 1 + wh_dim1], ldwh);
+
+/* ==== Multiply by U22 ==== */
+
+ i__5 = i4 - i2;
+ i__7 = j4 - j2;
+ sgemm_("C", "N", &i__5, &jlen, &i__7, &c_b8, &u[j2 + 1 + (
+ i2 + 1) * u_dim1], ldu, &h__[incol + 1 + j2 +
+ jcol * h_dim1], ldh, &c_b8, &wh[i2 + 1 + wh_dim1],
+ ldwh);
+
+/* ==== Copy it back ==== */
+
+ slacpy_("ALL", &kdu, &jlen, &wh[wh_offset], ldwh, &h__[
+ incol + 1 + jcol * h_dim1], ldh);
+/* L190: */
+ }
+
+/* ==== Vertical multiply ==== */
+
+ i__3 = max(incol,*ktop) - 1;
+ i__4 = *nv;
+ for (jrow = jtop; i__4 < 0 ? jrow >= i__3 : jrow <= i__3;
+ jrow += i__4) {
+/* Computing MIN */
+ i__5 = *nv, i__7 = max(incol,*ktop) - jrow;
+ jlen = min(i__5,i__7);
+
+/* ==== Copy right of H to scratch (the first KZS */
+/* . columns get multiplied by zero) ==== */
+
+ slacpy_("ALL", &jlen, &knz, &h__[jrow + (incol + 1 + j2) *
+ h_dim1], ldh, &wv[(kzs + 1) * wv_dim1 + 1], ldwv);
+
+/* ==== Multiply by U21 ==== */
+
+ slaset_("ALL", &jlen, &kzs, &c_b7, &c_b7, &wv[wv_offset],
+ ldwv);
+ strmm_("R", "U", "N", "N", &jlen, &knz, &c_b8, &u[j2 + 1
+ + (kzs + 1) * u_dim1], ldu, &wv[(kzs + 1) *
+ wv_dim1 + 1], ldwv);
+
+/* ==== Multiply by U11 ==== */
+
+ sgemm_("N", "N", &jlen, &i2, &j2, &c_b8, &h__[jrow + (
+ incol + 1) * h_dim1], ldh, &u[u_offset], ldu, &
+ c_b8, &wv[wv_offset], ldwv);
+
+/* ==== Copy left of H to right of scratch ==== */
+
+ slacpy_("ALL", &jlen, &j2, &h__[jrow + (incol + 1) *
+ h_dim1], ldh, &wv[(i2 + 1) * wv_dim1 + 1], ldwv);
+
+/* ==== Multiply by U21 ==== */
+
+ i__5 = i4 - i2;
+ strmm_("R", "L", "N", "N", &jlen, &i__5, &c_b8, &u[(i2 +
+ 1) * u_dim1 + 1], ldu, &wv[(i2 + 1) * wv_dim1 + 1]
+, ldwv);
+
+/* ==== Multiply by U22 ==== */
+
+ i__5 = i4 - i2;
+ i__7 = j4 - j2;
+ sgemm_("N", "N", &jlen, &i__5, &i__7, &c_b8, &h__[jrow + (
+ incol + 1 + j2) * h_dim1], ldh, &u[j2 + 1 + (i2 +
+ 1) * u_dim1], ldu, &c_b8, &wv[(i2 + 1) * wv_dim1
+ + 1], ldwv);
+
+/* ==== Copy it back ==== */
+
+ slacpy_("ALL", &jlen, &kdu, &wv[wv_offset], ldwv, &h__[
+ jrow + (incol + 1) * h_dim1], ldh);
+/* L200: */
+ }
+
+/* ==== Multiply Z (also vertical) ==== */
+
+ if (*wantz) {
+ i__4 = *ihiz;
+ i__3 = *nv;
+ for (jrow = *iloz; i__3 < 0 ? jrow >= i__4 : jrow <= i__4;
+ jrow += i__3) {
+/* Computing MIN */
+ i__5 = *nv, i__7 = *ihiz - jrow + 1;
+ jlen = min(i__5,i__7);
+
+/* ==== Copy right of Z to left of scratch (first */
+/* . KZS columns get multiplied by zero) ==== */
+
+ slacpy_("ALL", &jlen, &knz, &z__[jrow + (incol + 1 +
+ j2) * z_dim1], ldz, &wv[(kzs + 1) * wv_dim1 +
+ 1], ldwv);
+
+/* ==== Multiply by U12 ==== */
+
+ slaset_("ALL", &jlen, &kzs, &c_b7, &c_b7, &wv[
+ wv_offset], ldwv);
+ strmm_("R", "U", "N", "N", &jlen, &knz, &c_b8, &u[j2
+ + 1 + (kzs + 1) * u_dim1], ldu, &wv[(kzs + 1)
+ * wv_dim1 + 1], ldwv);
+
+/* ==== Multiply by U11 ==== */
+
+ sgemm_("N", "N", &jlen, &i2, &j2, &c_b8, &z__[jrow + (
+ incol + 1) * z_dim1], ldz, &u[u_offset], ldu,
+ &c_b8, &wv[wv_offset], ldwv);
+
+/* ==== Copy left of Z to right of scratch ==== */
+
+ slacpy_("ALL", &jlen, &j2, &z__[jrow + (incol + 1) *
+ z_dim1], ldz, &wv[(i2 + 1) * wv_dim1 + 1],
+ ldwv);
+
+/* ==== Multiply by U21 ==== */
+
+ i__5 = i4 - i2;
+ strmm_("R", "L", "N", "N", &jlen, &i__5, &c_b8, &u[(
+ i2 + 1) * u_dim1 + 1], ldu, &wv[(i2 + 1) *
+ wv_dim1 + 1], ldwv);
+
+/* ==== Multiply by U22 ==== */
+
+ i__5 = i4 - i2;
+ i__7 = j4 - j2;
+ sgemm_("N", "N", &jlen, &i__5, &i__7, &c_b8, &z__[
+ jrow + (incol + 1 + j2) * z_dim1], ldz, &u[j2
+ + 1 + (i2 + 1) * u_dim1], ldu, &c_b8, &wv[(i2
+ + 1) * wv_dim1 + 1], ldwv);
+
+/* ==== Copy the result back to Z ==== */
+
+ slacpy_("ALL", &jlen, &kdu, &wv[wv_offset], ldwv, &
+ z__[jrow + (incol + 1) * z_dim1], ldz);
+/* L210: */
+ }
+ }
+ }
+ }
+/* L220: */
+ }
+
+/* ==== End of SLAQR5 ==== */
+
+ return 0;
+} /* slaqr5_ */
diff --git a/contrib/libs/clapack/slaqsb.c b/contrib/libs/clapack/slaqsb.c
new file mode 100644
index 0000000000..772a959841
--- /dev/null
+++ b/contrib/libs/clapack/slaqsb.c
@@ -0,0 +1,184 @@
+/* slaqsb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slaqsb_(char *uplo, integer *n, integer *kd, real *ab,
+ integer *ldab, real *s, real *scond, real *amax, char *equed)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j;
+ real cj, large;
+ extern logical lsame_(char *, char *);
+ real small;
+ extern doublereal slamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAQSB equilibrates a symmetric band matrix A using the scaling */
+/* factors in the vector S. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of super-diagonals of the matrix A if UPLO = 'U', */
+/* or the number of sub-diagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) REAL array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, if INFO = 0, the triangular factor U or L from the */
+/* Cholesky factorization A = U'*U or A = L*L' of the band */
+/* matrix A, in the same storage format as A. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* S (input) REAL array, dimension (N) */
+/* The scale factors for A. */
+
+/* SCOND (input) REAL */
+/* Ratio of the smallest S(i) to the largest S(i). */
+
+/* AMAX (input) REAL */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies whether or not equilibration was done. */
+/* = 'N': No equilibration. */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if scaling should be done */
+/* based on the ratio of the scaling factors. If SCOND < THRESH, */
+/* scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if scaling should */
+/* be done based on the absolute size of the largest matrix element. */
+/* If AMAX > LARGE or AMAX < SMALL, scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --s;
+
+ /* Function Body */
+ if (*n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = slamch_("Safe minimum") / slamch_("Precision");
+ large = 1.f / small;
+
+ if (*scond >= .1f && *amax >= small && *amax <= large) {
+
+/* No equilibration */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Replace A by diag(S) * A * diag(S). */
+
+ if (lsame_(uplo, "U")) {
+
+/* Upper triangle of A is stored in band format. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *kd;
+ i__4 = j;
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ ab[*kd + 1 + i__ - j + j * ab_dim1] = cj * s[i__] * ab[*
+ kd + 1 + i__ - j + j * ab_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+
+/* Lower triangle of A is stored. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+/* Computing MIN */
+ i__2 = *n, i__3 = j + *kd;
+ i__4 = min(i__2,i__3);
+ for (i__ = j; i__ <= i__4; ++i__) {
+ ab[i__ + 1 - j + j * ab_dim1] = cj * s[i__] * ab[i__ + 1
+ - j + j * ab_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ *(unsigned char *)equed = 'Y';
+ }
+
+ return 0;
+
+/* End of SLAQSB */
+
+} /* slaqsb_ */
diff --git a/contrib/libs/clapack/slaqsp.c b/contrib/libs/clapack/slaqsp.c
new file mode 100644
index 0000000000..3aed8f77e7
--- /dev/null
+++ b/contrib/libs/clapack/slaqsp.c
@@ -0,0 +1,169 @@
+/* slaqsp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slaqsp_(char *uplo, integer *n, real *ap, real *s, real *
+ scond, real *amax, char *equed)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, jc;
+ real cj, large;
+ extern logical lsame_(char *, char *);
+ real small;
+ extern doublereal slamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAQSP equilibrates a symmetric matrix A using the scaling factors */
+/* in the vector S. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) REAL array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, the equilibrated matrix: diag(S) * A * diag(S), in */
+/* the same storage format as A. */
+
+/* S (input) REAL array, dimension (N) */
+/* The scale factors for A. */
+
+/* SCOND (input) REAL */
+/* Ratio of the smallest S(i) to the largest S(i). */
+
+/* AMAX (input) REAL */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies whether or not equilibration was done. */
+/* = 'N': No equilibration. */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if scaling should be done */
+/* based on the ratio of the scaling factors. If SCOND < THRESH, */
+/* scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if scaling should */
+/* be done based on the absolute size of the largest matrix element. */
+/* If AMAX > LARGE or AMAX < SMALL, scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --s;
+ --ap;
+
+ /* Function Body */
+ if (*n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = slamch_("Safe minimum") / slamch_("Precision");
+ large = 1.f / small;
+
+ if (*scond >= .1f && *amax >= small && *amax <= large) {
+
+/* No equilibration */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Replace A by diag(S) * A * diag(S). */
+
+ if (lsame_(uplo, "U")) {
+
+/* Upper triangle of A is stored. */
+
+ jc = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ ap[jc + i__ - 1] = cj * s[i__] * ap[jc + i__ - 1];
+/* L10: */
+ }
+ jc += j;
+/* L20: */
+ }
+ } else {
+
+/* Lower triangle of A is stored. */
+
+ jc = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ap[jc + i__ - j] = cj * s[i__] * ap[jc + i__ - j];
+/* L30: */
+ }
+ jc = jc + *n - j + 1;
+/* L40: */
+ }
+ }
+ *(unsigned char *)equed = 'Y';
+ }
+
+ return 0;
+
+/* End of SLAQSP */
+
+} /* slaqsp_ */
diff --git a/contrib/libs/clapack/slaqsy.c b/contrib/libs/clapack/slaqsy.c
new file mode 100644
index 0000000000..6ae44938c5
--- /dev/null
+++ b/contrib/libs/clapack/slaqsy.c
@@ -0,0 +1,172 @@
+/* slaqsy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slaqsy_(char *uplo, integer *n, real *a, integer *lda,
+ real *s, real *scond, real *amax, char *equed)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j;
+ real cj, large;
+ extern logical lsame_(char *, char *);
+ real small;
+ extern doublereal slamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAQSY equilibrates a symmetric matrix A using the scaling factors */
+/* in the vector S. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n by n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if EQUED = 'Y', the equilibrated matrix: */
+/* diag(S) * A * diag(S). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(N,1). */
+
+/* S (input) REAL array, dimension (N) */
+/* The scale factors for A. */
+
+/* SCOND (input) REAL */
+/* Ratio of the smallest S(i) to the largest S(i). */
+
+/* AMAX (input) REAL */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies whether or not equilibration was done. */
+/* = 'N': No equilibration. */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if scaling should be done */
+/* based on the ratio of the scaling factors. If SCOND < THRESH, */
+/* scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if scaling should */
+/* be done based on the absolute size of the largest matrix element. */
+/* If AMAX > LARGE or AMAX < SMALL, scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+
+ /* Function Body */
+ if (*n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = slamch_("Safe minimum") / slamch_("Precision");
+ large = 1.f / small;
+
+ if (*scond >= .1f && *amax >= small && *amax <= large) {
+
+/* No equilibration */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Replace A by diag(S) * A * diag(S). */
+
+ if (lsame_(uplo, "U")) {
+
+/* Upper triangle of A is stored. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = cj * s[i__] * a[i__ + j * a_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+
+/* Lower triangle of A is stored. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = cj * s[i__] * a[i__ + j * a_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ *(unsigned char *)equed = 'Y';
+ }
+
+ return 0;
+
+/* End of SLAQSY */
+
+} /* slaqsy_ */
diff --git a/contrib/libs/clapack/slaqtr.c b/contrib/libs/clapack/slaqtr.c
new file mode 100644
index 0000000000..dd32a081b3
--- /dev/null
+++ b/contrib/libs/clapack/slaqtr.c
@@ -0,0 +1,831 @@
+/* slaqtr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static logical c_false = FALSE_;
+static integer c__2 = 2;
+static real c_b21 = 1.f;
+static real c_b25 = 0.f;
+static logical c_true = TRUE_;
+
+/* Subroutine */ int slaqtr_(logical *ltran, logical *lreal, integer *n, real
+ *t, integer *ldt, real *b, real *w, real *scale, real *x, real *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, i__1, i__2;
+ real r__1, r__2, r__3, r__4, r__5, r__6;
+
+ /* Local variables */
+ real d__[4] /* was [2][2] */;
+ integer i__, j, k;
+ real v[4] /* was [2][2] */, z__;
+ integer j1, j2, n1, n2;
+ real si, xj, sr, rec, eps, tjj, tmp;
+ integer ierr;
+ real smin;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ real xmax;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ integer jnext;
+ extern doublereal sasum_(integer *, real *, integer *);
+ real sminw, xnorm;
+ extern /* Subroutine */ int saxpy_(integer *, real *, real *, integer *,
+ real *, integer *), slaln2_(logical *, integer *, integer *, real
+ *, real *, real *, integer *, real *, real *, real *, integer *,
+ real *, real *, real *, integer *, real *, real *, integer *);
+ real scaloc;
+ extern doublereal slamch_(char *), slange_(char *, integer *,
+ integer *, real *, integer *, real *);
+ real bignum;
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int sladiv_(real *, real *, real *, real *, real *
+, real *);
+ logical notran;
+ real smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAQTR solves the real quasi-triangular system */
+
+/* op(T)*p = scale*c, if LREAL = .TRUE. */
+
+/* or the complex quasi-triangular systems */
+
+/* op(T + iB)*(p+iq) = scale*(c+id), if LREAL = .FALSE. */
+
+/* in real arithmetic, where T is upper quasi-triangular. */
+/* If LREAL = .FALSE., then the first diagonal block of T must be */
+/* 1 by 1, B is the specially structured matrix */
+
+/* B = [ b(1) b(2) ... b(n) ] */
+/* [ w ] */
+/* [ w ] */
+/* [ . ] */
+/* [ w ] */
+
+/* op(A) = A or A', A' denotes the conjugate transpose of */
+/* matrix A. */
+
+/* On input, X = [ c ]. On output, X = [ p ]. */
+/* [ d ] [ q ] */
+
+/* This subroutine is designed for the condition number estimation */
+/* in routine STRSNA. */
+
+/* Arguments */
+/* ========= */
+
+/* LTRAN (input) LOGICAL */
+/* On entry, LTRAN specifies the option of conjugate transpose: */
+/* = .FALSE., op(T+i*B) = T+i*B, */
+/* = .TRUE., op(T+i*B) = (T+i*B)'. */
+
+/* LREAL (input) LOGICAL */
+/* On entry, LREAL specifies the input matrix structure: */
+/* = .FALSE., the input is complex */
+/* = .TRUE., the input is real */
+
+/* N (input) INTEGER */
+/* On entry, N specifies the order of T+i*B. N >= 0. */
+
+/* T (input) REAL array, dimension (LDT,N) */
+/* On entry, T contains a matrix in Schur canonical form. */
+/* If LREAL = .FALSE., then the first diagonal block of T must */
+/* be 1 by 1. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the matrix T. LDT >= max(1,N). */
+
+/* B (input) REAL array, dimension (N) */
+/* On entry, B contains the elements to form the matrix */
+/* B as described above. */
+/* If LREAL = .TRUE., B is not referenced. */
+
+/* W (input) REAL */
+/* On entry, W is the diagonal element of the matrix B. */
+/* If LREAL = .TRUE., W is not referenced. */
+
+/* SCALE (output) REAL */
+/* On exit, SCALE is the scale factor. */
+
+/* X (input/output) REAL array, dimension (2*N) */
+/* On entry, X contains the right hand side of the system. */
+/* On exit, X is overwritten by the solution. */
+
+/* WORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* On exit, INFO is set to */
+/* 0: successful exit. */
+/* 1: the some diagonal 1 by 1 block has been perturbed by */
+/* a small number SMIN to keep nonsingularity. */
+/* 2: the some diagonal 2 by 2 block has been perturbed by */
+/* a small number in SLALN2 to keep nonsingularity. */
+/* NOTE: In the interests of speed, this routine does not */
+/* check the inputs for errors. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Do not test the input parameters for errors */
+
+ /* Parameter adjustments */
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ --b;
+ --x;
+ --work;
+
+ /* Function Body */
+ notran = ! (*ltran);
+ *info = 0;
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Set constants to control overflow */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S") / eps;
+ bignum = 1.f / smlnum;
+
+ xnorm = slange_("M", n, n, &t[t_offset], ldt, d__);
+ if (! (*lreal)) {
+/* Computing MAX */
+ r__1 = xnorm, r__2 = dabs(*w), r__1 = max(r__1,r__2), r__2 = slange_(
+ "M", n, &c__1, &b[1], n, d__);
+ xnorm = dmax(r__1,r__2);
+ }
+/* Computing MAX */
+ r__1 = smlnum, r__2 = eps * xnorm;
+ smin = dmax(r__1,r__2);
+
+/* Compute 1-norm of each column of strictly upper triangular */
+/* part of T to control overflow in triangular solver. */
+
+ work[1] = 0.f;
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ work[j] = sasum_(&i__2, &t[j * t_dim1 + 1], &c__1);
+/* L10: */
+ }
+
+ if (! (*lreal)) {
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ work[i__] += (r__1 = b[i__], dabs(r__1));
+/* L20: */
+ }
+ }
+
+ n2 = *n << 1;
+ n1 = *n;
+ if (! (*lreal)) {
+ n1 = n2;
+ }
+ k = isamax_(&n1, &x[1], &c__1);
+ xmax = (r__1 = x[k], dabs(r__1));
+ *scale = 1.f;
+
+ if (xmax > bignum) {
+ *scale = bignum / xmax;
+ sscal_(&n1, scale, &x[1], &c__1);
+ xmax = bignum;
+ }
+
+ if (*lreal) {
+
+ if (notran) {
+
+/* Solve T*p = scale*c */
+
+ jnext = *n;
+ for (j = *n; j >= 1; --j) {
+ if (j > jnext) {
+ goto L30;
+ }
+ j1 = j;
+ j2 = j;
+ jnext = j - 1;
+ if (j > 1) {
+ if (t[j + (j - 1) * t_dim1] != 0.f) {
+ j1 = j - 1;
+ jnext = j - 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/* Meet 1 by 1 diagonal block */
+
+/* Scale to avoid overflow when computing */
+/* x(j) = b(j)/T(j,j) */
+
+ xj = (r__1 = x[j1], dabs(r__1));
+ tjj = (r__1 = t[j1 + j1 * t_dim1], dabs(r__1));
+ tmp = t[j1 + j1 * t_dim1];
+ if (tjj < smin) {
+ tmp = smin;
+ tjj = smin;
+ *info = 1;
+ }
+
+ if (xj == 0.f) {
+ goto L30;
+ }
+
+ if (tjj < 1.f) {
+ if (xj > bignum * tjj) {
+ rec = 1.f / xj;
+ sscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ x[j1] /= tmp;
+ xj = (r__1 = x[j1], dabs(r__1));
+
+/* Scale x if necessary to avoid overflow when adding a */
+/* multiple of column j1 of T. */
+
+ if (xj > 1.f) {
+ rec = 1.f / xj;
+ if (work[j1] > (bignum - xmax) * rec) {
+ sscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ }
+ }
+ if (j1 > 1) {
+ i__1 = j1 - 1;
+ r__1 = -x[j1];
+ saxpy_(&i__1, &r__1, &t[j1 * t_dim1 + 1], &c__1, &x[1]
+, &c__1);
+ i__1 = j1 - 1;
+ k = isamax_(&i__1, &x[1], &c__1);
+ xmax = (r__1 = x[k], dabs(r__1));
+ }
+
+ } else {
+
+/* Meet 2 by 2 diagonal block */
+
+/* Call 2 by 2 linear system solve, to take */
+/* care of possible overflow by scaling factor. */
+
+ d__[0] = x[j1];
+ d__[1] = x[j2];
+ slaln2_(&c_false, &c__2, &c__1, &smin, &c_b21, &t[j1 + j1
+ * t_dim1], ldt, &c_b21, &c_b21, d__, &c__2, &
+ c_b25, &c_b25, v, &c__2, &scaloc, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 2;
+ }
+
+ if (scaloc != 1.f) {
+ sscal_(n, &scaloc, &x[1], &c__1);
+ *scale *= scaloc;
+ }
+ x[j1] = v[0];
+ x[j2] = v[1];
+
+/* Scale V(1,1) (= X(J1)) and/or V(2,1) (=X(J2)) */
+/* to avoid overflow in updating right-hand side. */
+
+/* Computing MAX */
+ r__1 = dabs(v[0]), r__2 = dabs(v[1]);
+ xj = dmax(r__1,r__2);
+ if (xj > 1.f) {
+ rec = 1.f / xj;
+/* Computing MAX */
+ r__1 = work[j1], r__2 = work[j2];
+ if (dmax(r__1,r__2) > (bignum - xmax) * rec) {
+ sscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ }
+ }
+
+/* Update right-hand side */
+
+ if (j1 > 1) {
+ i__1 = j1 - 1;
+ r__1 = -x[j1];
+ saxpy_(&i__1, &r__1, &t[j1 * t_dim1 + 1], &c__1, &x[1]
+, &c__1);
+ i__1 = j1 - 1;
+ r__1 = -x[j2];
+ saxpy_(&i__1, &r__1, &t[j2 * t_dim1 + 1], &c__1, &x[1]
+, &c__1);
+ i__1 = j1 - 1;
+ k = isamax_(&i__1, &x[1], &c__1);
+ xmax = (r__1 = x[k], dabs(r__1));
+ }
+
+ }
+
+L30:
+ ;
+ }
+
+ } else {
+
+/* Solve T'*p = scale*c */
+
+ jnext = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (j < jnext) {
+ goto L40;
+ }
+ j1 = j;
+ j2 = j;
+ jnext = j + 1;
+ if (j < *n) {
+ if (t[j + 1 + j * t_dim1] != 0.f) {
+ j2 = j + 1;
+ jnext = j + 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/* 1 by 1 diagonal block */
+
+/* Scale if necessary to avoid overflow in forming the */
+/* right-hand side element by inner product. */
+
+ xj = (r__1 = x[j1], dabs(r__1));
+ if (xmax > 1.f) {
+ rec = 1.f / xmax;
+ if (work[j1] > (bignum - xj) * rec) {
+ sscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ i__2 = j1 - 1;
+ x[j1] -= sdot_(&i__2, &t[j1 * t_dim1 + 1], &c__1, &x[1], &
+ c__1);
+
+ xj = (r__1 = x[j1], dabs(r__1));
+ tjj = (r__1 = t[j1 + j1 * t_dim1], dabs(r__1));
+ tmp = t[j1 + j1 * t_dim1];
+ if (tjj < smin) {
+ tmp = smin;
+ tjj = smin;
+ *info = 1;
+ }
+
+ if (tjj < 1.f) {
+ if (xj > bignum * tjj) {
+ rec = 1.f / xj;
+ sscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ x[j1] /= tmp;
+/* Computing MAX */
+ r__2 = xmax, r__3 = (r__1 = x[j1], dabs(r__1));
+ xmax = dmax(r__2,r__3);
+
+ } else {
+
+/* 2 by 2 diagonal block */
+
+/* Scale if necessary to avoid overflow in forming the */
+/* right-hand side elements by inner product. */
+
+/* Computing MAX */
+ r__3 = (r__1 = x[j1], dabs(r__1)), r__4 = (r__2 = x[j2],
+ dabs(r__2));
+ xj = dmax(r__3,r__4);
+ if (xmax > 1.f) {
+ rec = 1.f / xmax;
+/* Computing MAX */
+ r__1 = work[j2], r__2 = work[j1];
+ if (dmax(r__1,r__2) > (bignum - xj) * rec) {
+ sscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ i__2 = j1 - 1;
+ d__[0] = x[j1] - sdot_(&i__2, &t[j1 * t_dim1 + 1], &c__1,
+ &x[1], &c__1);
+ i__2 = j1 - 1;
+ d__[1] = x[j2] - sdot_(&i__2, &t[j2 * t_dim1 + 1], &c__1,
+ &x[1], &c__1);
+
+ slaln2_(&c_true, &c__2, &c__1, &smin, &c_b21, &t[j1 + j1 *
+ t_dim1], ldt, &c_b21, &c_b21, d__, &c__2, &c_b25,
+ &c_b25, v, &c__2, &scaloc, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 2;
+ }
+
+ if (scaloc != 1.f) {
+ sscal_(n, &scaloc, &x[1], &c__1);
+ *scale *= scaloc;
+ }
+ x[j1] = v[0];
+ x[j2] = v[1];
+/* Computing MAX */
+ r__3 = (r__1 = x[j1], dabs(r__1)), r__4 = (r__2 = x[j2],
+ dabs(r__2)), r__3 = max(r__3,r__4);
+ xmax = dmax(r__3,xmax);
+
+ }
+L40:
+ ;
+ }
+ }
+
+ } else {
+
+/* Computing MAX */
+ r__1 = eps * dabs(*w);
+ sminw = dmax(r__1,smin);
+ if (notran) {
+
+/* Solve (T + iB)*(p+iq) = c+id */
+
+ jnext = *n;
+ for (j = *n; j >= 1; --j) {
+ if (j > jnext) {
+ goto L70;
+ }
+ j1 = j;
+ j2 = j;
+ jnext = j - 1;
+ if (j > 1) {
+ if (t[j + (j - 1) * t_dim1] != 0.f) {
+ j1 = j - 1;
+ jnext = j - 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/* 1 by 1 diagonal block */
+
+/* Scale if necessary to avoid overflow in division */
+
+ z__ = *w;
+ if (j1 == 1) {
+ z__ = b[1];
+ }
+ xj = (r__1 = x[j1], dabs(r__1)) + (r__2 = x[*n + j1],
+ dabs(r__2));
+ tjj = (r__1 = t[j1 + j1 * t_dim1], dabs(r__1)) + dabs(z__)
+ ;
+ tmp = t[j1 + j1 * t_dim1];
+ if (tjj < sminw) {
+ tmp = sminw;
+ tjj = sminw;
+ *info = 1;
+ }
+
+ if (xj == 0.f) {
+ goto L70;
+ }
+
+ if (tjj < 1.f) {
+ if (xj > bignum * tjj) {
+ rec = 1.f / xj;
+ sscal_(&n2, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ sladiv_(&x[j1], &x[*n + j1], &tmp, &z__, &sr, &si);
+ x[j1] = sr;
+ x[*n + j1] = si;
+ xj = (r__1 = x[j1], dabs(r__1)) + (r__2 = x[*n + j1],
+ dabs(r__2));
+
+/* Scale x if necessary to avoid overflow when adding a */
+/* multiple of column j1 of T. */
+
+ if (xj > 1.f) {
+ rec = 1.f / xj;
+ if (work[j1] > (bignum - xmax) * rec) {
+ sscal_(&n2, &rec, &x[1], &c__1);
+ *scale *= rec;
+ }
+ }
+
+ if (j1 > 1) {
+ i__1 = j1 - 1;
+ r__1 = -x[j1];
+ saxpy_(&i__1, &r__1, &t[j1 * t_dim1 + 1], &c__1, &x[1]
+, &c__1);
+ i__1 = j1 - 1;
+ r__1 = -x[*n + j1];
+ saxpy_(&i__1, &r__1, &t[j1 * t_dim1 + 1], &c__1, &x[*
+ n + 1], &c__1);
+
+ x[1] += b[j1] * x[*n + j1];
+ x[*n + 1] -= b[j1] * x[j1];
+
+ xmax = 0.f;
+ i__1 = j1 - 1;
+ for (k = 1; k <= i__1; ++k) {
+/* Computing MAX */
+ r__3 = xmax, r__4 = (r__1 = x[k], dabs(r__1)) + (
+ r__2 = x[k + *n], dabs(r__2));
+ xmax = dmax(r__3,r__4);
+/* L50: */
+ }
+ }
+
+ } else {
+
+/* Meet 2 by 2 diagonal block */
+
+ d__[0] = x[j1];
+ d__[1] = x[j2];
+ d__[2] = x[*n + j1];
+ d__[3] = x[*n + j2];
+ r__1 = -(*w);
+ slaln2_(&c_false, &c__2, &c__2, &sminw, &c_b21, &t[j1 +
+ j1 * t_dim1], ldt, &c_b21, &c_b21, d__, &c__2, &
+ c_b25, &r__1, v, &c__2, &scaloc, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 2;
+ }
+
+ if (scaloc != 1.f) {
+ i__1 = *n << 1;
+ sscal_(&i__1, &scaloc, &x[1], &c__1);
+ *scale = scaloc * *scale;
+ }
+ x[j1] = v[0];
+ x[j2] = v[1];
+ x[*n + j1] = v[2];
+ x[*n + j2] = v[3];
+
+/* Scale X(J1), .... to avoid overflow in */
+/* updating right hand side. */
+
+/* Computing MAX */
+ r__1 = dabs(v[0]) + dabs(v[2]), r__2 = dabs(v[1]) + dabs(
+ v[3]);
+ xj = dmax(r__1,r__2);
+ if (xj > 1.f) {
+ rec = 1.f / xj;
+/* Computing MAX */
+ r__1 = work[j1], r__2 = work[j2];
+ if (dmax(r__1,r__2) > (bignum - xmax) * rec) {
+ sscal_(&n2, &rec, &x[1], &c__1);
+ *scale *= rec;
+ }
+ }
+
+/* Update the right-hand side. */
+
+ if (j1 > 1) {
+ i__1 = j1 - 1;
+ r__1 = -x[j1];
+ saxpy_(&i__1, &r__1, &t[j1 * t_dim1 + 1], &c__1, &x[1]
+, &c__1);
+ i__1 = j1 - 1;
+ r__1 = -x[j2];
+ saxpy_(&i__1, &r__1, &t[j2 * t_dim1 + 1], &c__1, &x[1]
+, &c__1);
+
+ i__1 = j1 - 1;
+ r__1 = -x[*n + j1];
+ saxpy_(&i__1, &r__1, &t[j1 * t_dim1 + 1], &c__1, &x[*
+ n + 1], &c__1);
+ i__1 = j1 - 1;
+ r__1 = -x[*n + j2];
+ saxpy_(&i__1, &r__1, &t[j2 * t_dim1 + 1], &c__1, &x[*
+ n + 1], &c__1);
+
+ x[1] = x[1] + b[j1] * x[*n + j1] + b[j2] * x[*n + j2];
+ x[*n + 1] = x[*n + 1] - b[j1] * x[j1] - b[j2] * x[j2];
+
+ xmax = 0.f;
+ i__1 = j1 - 1;
+ for (k = 1; k <= i__1; ++k) {
+/* Computing MAX */
+ r__3 = (r__1 = x[k], dabs(r__1)) + (r__2 = x[k + *
+ n], dabs(r__2));
+ xmax = dmax(r__3,xmax);
+/* L60: */
+ }
+ }
+
+ }
+L70:
+ ;
+ }
+
+ } else {
+
+/* Solve (T + iB)'*(p+iq) = c+id */
+
+ jnext = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (j < jnext) {
+ goto L80;
+ }
+ j1 = j;
+ j2 = j;
+ jnext = j + 1;
+ if (j < *n) {
+ if (t[j + 1 + j * t_dim1] != 0.f) {
+ j2 = j + 1;
+ jnext = j + 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/* 1 by 1 diagonal block */
+
+/* Scale if necessary to avoid overflow in forming the */
+/* right-hand side element by inner product. */
+
+ xj = (r__1 = x[j1], dabs(r__1)) + (r__2 = x[j1 + *n],
+ dabs(r__2));
+ if (xmax > 1.f) {
+ rec = 1.f / xmax;
+ if (work[j1] > (bignum - xj) * rec) {
+ sscal_(&n2, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ i__2 = j1 - 1;
+ x[j1] -= sdot_(&i__2, &t[j1 * t_dim1 + 1], &c__1, &x[1], &
+ c__1);
+ i__2 = j1 - 1;
+ x[*n + j1] -= sdot_(&i__2, &t[j1 * t_dim1 + 1], &c__1, &x[
+ *n + 1], &c__1);
+ if (j1 > 1) {
+ x[j1] -= b[j1] * x[*n + 1];
+ x[*n + j1] += b[j1] * x[1];
+ }
+ xj = (r__1 = x[j1], dabs(r__1)) + (r__2 = x[j1 + *n],
+ dabs(r__2));
+
+ z__ = *w;
+ if (j1 == 1) {
+ z__ = b[1];
+ }
+
+/* Scale if necessary to avoid overflow in */
+/* complex division */
+
+ tjj = (r__1 = t[j1 + j1 * t_dim1], dabs(r__1)) + dabs(z__)
+ ;
+ tmp = t[j1 + j1 * t_dim1];
+ if (tjj < sminw) {
+ tmp = sminw;
+ tjj = sminw;
+ *info = 1;
+ }
+
+ if (tjj < 1.f) {
+ if (xj > bignum * tjj) {
+ rec = 1.f / xj;
+ sscal_(&n2, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ r__1 = -z__;
+ sladiv_(&x[j1], &x[*n + j1], &tmp, &r__1, &sr, &si);
+ x[j1] = sr;
+ x[j1 + *n] = si;
+/* Computing MAX */
+ r__3 = (r__1 = x[j1], dabs(r__1)) + (r__2 = x[j1 + *n],
+ dabs(r__2));
+ xmax = dmax(r__3,xmax);
+
+ } else {
+
+/* 2 by 2 diagonal block */
+
+/* Scale if necessary to avoid overflow in forming the */
+/* right-hand side element by inner product. */
+
+/* Computing MAX */
+ r__5 = (r__1 = x[j1], dabs(r__1)) + (r__2 = x[*n + j1],
+ dabs(r__2)), r__6 = (r__3 = x[j2], dabs(r__3)) + (
+ r__4 = x[*n + j2], dabs(r__4));
+ xj = dmax(r__5,r__6);
+ if (xmax > 1.f) {
+ rec = 1.f / xmax;
+/* Computing MAX */
+ r__1 = work[j1], r__2 = work[j2];
+ if (dmax(r__1,r__2) > (bignum - xj) / xmax) {
+ sscal_(&n2, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ i__2 = j1 - 1;
+ d__[0] = x[j1] - sdot_(&i__2, &t[j1 * t_dim1 + 1], &c__1,
+ &x[1], &c__1);
+ i__2 = j1 - 1;
+ d__[1] = x[j2] - sdot_(&i__2, &t[j2 * t_dim1 + 1], &c__1,
+ &x[1], &c__1);
+ i__2 = j1 - 1;
+ d__[2] = x[*n + j1] - sdot_(&i__2, &t[j1 * t_dim1 + 1], &
+ c__1, &x[*n + 1], &c__1);
+ i__2 = j1 - 1;
+ d__[3] = x[*n + j2] - sdot_(&i__2, &t[j2 * t_dim1 + 1], &
+ c__1, &x[*n + 1], &c__1);
+ d__[0] -= b[j1] * x[*n + 1];
+ d__[1] -= b[j2] * x[*n + 1];
+ d__[2] += b[j1] * x[1];
+ d__[3] += b[j2] * x[1];
+
+ slaln2_(&c_true, &c__2, &c__2, &sminw, &c_b21, &t[j1 + j1
+ * t_dim1], ldt, &c_b21, &c_b21, d__, &c__2, &
+ c_b25, w, v, &c__2, &scaloc, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 2;
+ }
+
+ if (scaloc != 1.f) {
+ sscal_(&n2, &scaloc, &x[1], &c__1);
+ *scale = scaloc * *scale;
+ }
+ x[j1] = v[0];
+ x[j2] = v[1];
+ x[*n + j1] = v[2];
+ x[*n + j2] = v[3];
+/* Computing MAX */
+ r__5 = (r__1 = x[j1], dabs(r__1)) + (r__2 = x[*n + j1],
+ dabs(r__2)), r__6 = (r__3 = x[j2], dabs(r__3)) + (
+ r__4 = x[*n + j2], dabs(r__4)), r__5 = max(r__5,
+ r__6);
+ xmax = dmax(r__5,xmax);
+
+ }
+
+L80:
+ ;
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of SLAQTR */
+
+} /* slaqtr_ */
diff --git a/contrib/libs/clapack/slar1v.c b/contrib/libs/clapack/slar1v.c
new file mode 100644
index 0000000000..59de90199a
--- /dev/null
+++ b/contrib/libs/clapack/slar1v.c
@@ -0,0 +1,440 @@
+/* slar1v.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slar1v_(integer *n, integer *b1, integer *bn, real *
+ lambda, real *d__, real *l, real *ld, real *lld, real *pivmin, real *
+ gaptol, real *z__, logical *wantnc, integer *negcnt, real *ztz, real *
+ mingma, integer *r__, integer *isuppz, real *nrminv, real *resid,
+ real *rqcorr, real *work)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ real s;
+ integer r1, r2;
+ real eps, tmp;
+ integer neg1, neg2, indp, inds;
+ real dplus;
+ extern doublereal slamch_(char *);
+ integer indlpl, indumn;
+ extern logical sisnan_(real *);
+ real dminus;
+ logical sawnan1, sawnan2;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAR1V computes the (scaled) r-th column of the inverse of */
+/* the sumbmatrix in rows B1 through BN of the tridiagonal matrix */
+/* L D L^T - sigma I. When sigma is close to an eigenvalue, the */
+/* computed vector is an accurate eigenvector. Usually, r corresponds */
+/* to the index where the eigenvector is largest in magnitude. */
+/* The following steps accomplish this computation : */
+/* (a) Stationary qd transform, L D L^T - sigma I = L(+) D(+) L(+)^T, */
+/* (b) Progressive qd transform, L D L^T - sigma I = U(-) D(-) U(-)^T, */
+/* (c) Computation of the diagonal elements of the inverse of */
+/* L D L^T - sigma I by combining the above transforms, and choosing */
+/* r as the index where the diagonal of the inverse is (one of the) */
+/* largest in magnitude. */
+/* (d) Computation of the (scaled) r-th column of the inverse using the */
+/* twisted factorization obtained by combining the top part of the */
+/* the stationary and the bottom part of the progressive transform. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix L D L^T. */
+
+/* B1 (input) INTEGER */
+/* First index of the submatrix of L D L^T. */
+
+/* BN (input) INTEGER */
+/* Last index of the submatrix of L D L^T. */
+
+/* LAMBDA (input) REAL */
+/* The shift. In order to compute an accurate eigenvector, */
+/* LAMBDA should be a good approximation to an eigenvalue */
+/* of L D L^T. */
+
+/* L (input) REAL array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of the unit bidiagonal matrix */
+/* L, in elements 1 to N-1. */
+
+/* D (input) REAL array, dimension (N) */
+/* The n diagonal elements of the diagonal matrix D. */
+
+/* LD (input) REAL array, dimension (N-1) */
+/* The n-1 elements L(i)*D(i). */
+
+/* LLD (input) REAL array, dimension (N-1) */
+/* The n-1 elements L(i)*L(i)*D(i). */
+
+/* PIVMIN (input) REAL */
+/* The minimum pivot in the Sturm sequence. */
+
+/* GAPTOL (input) REAL */
+/* Tolerance that indicates when eigenvector entries are negligible */
+/* w.r.t. their contribution to the residual. */
+
+/* Z (input/output) REAL array, dimension (N) */
+/* On input, all entries of Z must be set to 0. */
+/* On output, Z contains the (scaled) r-th column of the */
+/* inverse. The scaling is such that Z(R) equals 1. */
+
+/* WANTNC (input) LOGICAL */
+/* Specifies whether NEGCNT has to be computed. */
+
+/* NEGCNT (output) INTEGER */
+/* If WANTNC is .TRUE. then NEGCNT = the number of pivots < pivmin */
+/* in the matrix factorization L D L^T, and NEGCNT = -1 otherwise. */
+
+/* ZTZ (output) REAL */
+/* The square of the 2-norm of Z. */
+
+/* MINGMA (output) REAL */
+/* The reciprocal of the largest (in magnitude) diagonal */
+/* element of the inverse of L D L^T - sigma I. */
+
+/* R (input/output) INTEGER */
+/* The twist index for the twisted factorization used to */
+/* compute Z. */
+/* On input, 0 <= R <= N. If R is input as 0, R is set to */
+/* the index where (L D L^T - sigma I)^{-1} is largest */
+/* in magnitude. If 1 <= R <= N, R is unchanged. */
+/* On output, R contains the twist index used to compute Z. */
+/* Ideally, R designates the position of the maximum entry in the */
+/* eigenvector. */
+
+/* ISUPPZ (output) INTEGER array, dimension (2) */
+/* The support of the vector in Z, i.e., the vector Z is */
+/* nonzero only in elements ISUPPZ(1) through ISUPPZ( 2 ). */
+
+/* NRMINV (output) REAL */
+/* NRMINV = 1/SQRT( ZTZ ) */
+
+/* RESID (output) REAL */
+/* The residual of the FP vector. */
+/* RESID = ABS( MINGMA )/SQRT( ZTZ ) */
+
+/* RQCORR (output) REAL */
+/* The Rayleigh Quotient correction to LAMBDA. */
+/* RQCORR = MINGMA*TMP */
+
+/* WORK (workspace) REAL array, dimension (4*N) */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --work;
+ --isuppz;
+ --z__;
+ --lld;
+ --ld;
+ --l;
+ --d__;
+
+ /* Function Body */
+ eps = slamch_("Precision");
+ if (*r__ == 0) {
+ r1 = *b1;
+ r2 = *bn;
+ } else {
+ r1 = *r__;
+ r2 = *r__;
+ }
+/* Storage for LPLUS */
+ indlpl = 0;
+/* Storage for UMINUS */
+ indumn = *n;
+ inds = (*n << 1) + 1;
+ indp = *n * 3 + 1;
+ if (*b1 == 1) {
+ work[inds] = 0.f;
+ } else {
+ work[inds + *b1 - 1] = lld[*b1 - 1];
+ }
+
+/* Compute the stationary transform (using the differential form) */
+/* until the index R2. */
+
+ sawnan1 = FALSE_;
+ neg1 = 0;
+ s = work[inds + *b1 - 1] - *lambda;
+ i__1 = r1 - 1;
+ for (i__ = *b1; i__ <= i__1; ++i__) {
+ dplus = d__[i__] + s;
+ work[indlpl + i__] = ld[i__] / dplus;
+ if (dplus < 0.f) {
+ ++neg1;
+ }
+ work[inds + i__] = s * work[indlpl + i__] * l[i__];
+ s = work[inds + i__] - *lambda;
+/* L50: */
+ }
+ sawnan1 = sisnan_(&s);
+ if (sawnan1) {
+ goto L60;
+ }
+ i__1 = r2 - 1;
+ for (i__ = r1; i__ <= i__1; ++i__) {
+ dplus = d__[i__] + s;
+ work[indlpl + i__] = ld[i__] / dplus;
+ work[inds + i__] = s * work[indlpl + i__] * l[i__];
+ s = work[inds + i__] - *lambda;
+/* L51: */
+ }
+ sawnan1 = sisnan_(&s);
+
+L60:
+ if (sawnan1) {
+/* Runs a slower version of the above loop if a NaN is detected */
+ neg1 = 0;
+ s = work[inds + *b1 - 1] - *lambda;
+ i__1 = r1 - 1;
+ for (i__ = *b1; i__ <= i__1; ++i__) {
+ dplus = d__[i__] + s;
+ if (dabs(dplus) < *pivmin) {
+ dplus = -(*pivmin);
+ }
+ work[indlpl + i__] = ld[i__] / dplus;
+ if (dplus < 0.f) {
+ ++neg1;
+ }
+ work[inds + i__] = s * work[indlpl + i__] * l[i__];
+ if (work[indlpl + i__] == 0.f) {
+ work[inds + i__] = lld[i__];
+ }
+ s = work[inds + i__] - *lambda;
+/* L70: */
+ }
+ i__1 = r2 - 1;
+ for (i__ = r1; i__ <= i__1; ++i__) {
+ dplus = d__[i__] + s;
+ if (dabs(dplus) < *pivmin) {
+ dplus = -(*pivmin);
+ }
+ work[indlpl + i__] = ld[i__] / dplus;
+ work[inds + i__] = s * work[indlpl + i__] * l[i__];
+ if (work[indlpl + i__] == 0.f) {
+ work[inds + i__] = lld[i__];
+ }
+ s = work[inds + i__] - *lambda;
+/* L71: */
+ }
+ }
+
+/* Compute the progressive transform (using the differential form) */
+/* until the index R1 */
+
+ sawnan2 = FALSE_;
+ neg2 = 0;
+ work[indp + *bn - 1] = d__[*bn] - *lambda;
+ i__1 = r1;
+ for (i__ = *bn - 1; i__ >= i__1; --i__) {
+ dminus = lld[i__] + work[indp + i__];
+ tmp = d__[i__] / dminus;
+ if (dminus < 0.f) {
+ ++neg2;
+ }
+ work[indumn + i__] = l[i__] * tmp;
+ work[indp + i__ - 1] = work[indp + i__] * tmp - *lambda;
+/* L80: */
+ }
+ tmp = work[indp + r1 - 1];
+ sawnan2 = sisnan_(&tmp);
+ if (sawnan2) {
+/* Runs a slower version of the above loop if a NaN is detected */
+ neg2 = 0;
+ i__1 = r1;
+ for (i__ = *bn - 1; i__ >= i__1; --i__) {
+ dminus = lld[i__] + work[indp + i__];
+ if (dabs(dminus) < *pivmin) {
+ dminus = -(*pivmin);
+ }
+ tmp = d__[i__] / dminus;
+ if (dminus < 0.f) {
+ ++neg2;
+ }
+ work[indumn + i__] = l[i__] * tmp;
+ work[indp + i__ - 1] = work[indp + i__] * tmp - *lambda;
+ if (tmp == 0.f) {
+ work[indp + i__ - 1] = d__[i__] - *lambda;
+ }
+/* L100: */
+ }
+ }
+
+/* Find the index (from R1 to R2) of the largest (in magnitude) */
+/* diagonal element of the inverse */
+
+ *mingma = work[inds + r1 - 1] + work[indp + r1 - 1];
+ if (*mingma < 0.f) {
+ ++neg1;
+ }
+ if (*wantnc) {
+ *negcnt = neg1 + neg2;
+ } else {
+ *negcnt = -1;
+ }
+ if (dabs(*mingma) == 0.f) {
+ *mingma = eps * work[inds + r1 - 1];
+ }
+ *r__ = r1;
+ i__1 = r2 - 1;
+ for (i__ = r1; i__ <= i__1; ++i__) {
+ tmp = work[inds + i__] + work[indp + i__];
+ if (tmp == 0.f) {
+ tmp = eps * work[inds + i__];
+ }
+ if (dabs(tmp) <= dabs(*mingma)) {
+ *mingma = tmp;
+ *r__ = i__ + 1;
+ }
+/* L110: */
+ }
+
+/* Compute the FP vector: solve N^T v = e_r */
+
+ isuppz[1] = *b1;
+ isuppz[2] = *bn;
+ z__[*r__] = 1.f;
+ *ztz = 1.f;
+
+/* Compute the FP vector upwards from R */
+
+ if (! sawnan1 && ! sawnan2) {
+ i__1 = *b1;
+ for (i__ = *r__ - 1; i__ >= i__1; --i__) {
+ z__[i__] = -(work[indlpl + i__] * z__[i__ + 1]);
+ if (((r__1 = z__[i__], dabs(r__1)) + (r__2 = z__[i__ + 1], dabs(
+ r__2))) * (r__3 = ld[i__], dabs(r__3)) < *gaptol) {
+ z__[i__] = 0.f;
+ isuppz[1] = i__ + 1;
+ goto L220;
+ }
+ *ztz += z__[i__] * z__[i__];
+/* L210: */
+ }
+L220:
+ ;
+ } else {
+/* Run slower loop if NaN occurred. */
+ i__1 = *b1;
+ for (i__ = *r__ - 1; i__ >= i__1; --i__) {
+ if (z__[i__ + 1] == 0.f) {
+ z__[i__] = -(ld[i__ + 1] / ld[i__]) * z__[i__ + 2];
+ } else {
+ z__[i__] = -(work[indlpl + i__] * z__[i__ + 1]);
+ }
+ if (((r__1 = z__[i__], dabs(r__1)) + (r__2 = z__[i__ + 1], dabs(
+ r__2))) * (r__3 = ld[i__], dabs(r__3)) < *gaptol) {
+ z__[i__] = 0.f;
+ isuppz[1] = i__ + 1;
+ goto L240;
+ }
+ *ztz += z__[i__] * z__[i__];
+/* L230: */
+ }
+L240:
+ ;
+ }
+/* Compute the FP vector downwards from R in blocks of size BLKSIZ */
+ if (! sawnan1 && ! sawnan2) {
+ i__1 = *bn - 1;
+ for (i__ = *r__; i__ <= i__1; ++i__) {
+ z__[i__ + 1] = -(work[indumn + i__] * z__[i__]);
+ if (((r__1 = z__[i__], dabs(r__1)) + (r__2 = z__[i__ + 1], dabs(
+ r__2))) * (r__3 = ld[i__], dabs(r__3)) < *gaptol) {
+ z__[i__ + 1] = 0.f;
+ isuppz[2] = i__;
+ goto L260;
+ }
+ *ztz += z__[i__ + 1] * z__[i__ + 1];
+/* L250: */
+ }
+L260:
+ ;
+ } else {
+/* Run slower loop if NaN occurred. */
+ i__1 = *bn - 1;
+ for (i__ = *r__; i__ <= i__1; ++i__) {
+ if (z__[i__] == 0.f) {
+ z__[i__ + 1] = -(ld[i__ - 1] / ld[i__]) * z__[i__ - 1];
+ } else {
+ z__[i__ + 1] = -(work[indumn + i__] * z__[i__]);
+ }
+ if (((r__1 = z__[i__], dabs(r__1)) + (r__2 = z__[i__ + 1], dabs(
+ r__2))) * (r__3 = ld[i__], dabs(r__3)) < *gaptol) {
+ z__[i__ + 1] = 0.f;
+ isuppz[2] = i__;
+ goto L280;
+ }
+ *ztz += z__[i__ + 1] * z__[i__ + 1];
+/* L270: */
+ }
+L280:
+ ;
+ }
+
+/* Compute quantities for convergence test */
+
+ tmp = 1.f / *ztz;
+ *nrminv = sqrt(tmp);
+ *resid = dabs(*mingma) * *nrminv;
+ *rqcorr = *mingma * tmp;
+
+
+ return 0;
+
+/* End of SLAR1V */
+
+} /* slar1v_ */
diff --git a/contrib/libs/clapack/slar2v.c b/contrib/libs/clapack/slar2v.c
new file mode 100644
index 0000000000..ab6a104e3c
--- /dev/null
+++ b/contrib/libs/clapack/slar2v.c
@@ -0,0 +1,120 @@
+/* slar2v.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slar2v_(integer *n, real *x, real *y, real *z__, integer
+ *incx, real *c__, real *s, integer *incc)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ integer i__;
+ real t1, t2, t3, t4, t5, t6;
+ integer ic;
+ real ci, si;
+ integer ix;
+ real xi, yi, zi;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAR2V applies a vector of real plane rotations from both sides to */
+/* a sequence of 2-by-2 real symmetric matrices, defined by the elements */
+/* of the vectors x, y and z. For i = 1,2,...,n */
+
+/* ( x(i) z(i) ) := ( c(i) s(i) ) ( x(i) z(i) ) ( c(i) -s(i) ) */
+/* ( z(i) y(i) ) ( -s(i) c(i) ) ( z(i) y(i) ) ( s(i) c(i) ) */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of plane rotations to be applied. */
+
+/* X (input/output) REAL array, */
+/* dimension (1+(N-1)*INCX) */
+/* The vector x. */
+
+/* Y (input/output) REAL array, */
+/* dimension (1+(N-1)*INCX) */
+/* The vector y. */
+
+/* Z (input/output) REAL array, */
+/* dimension (1+(N-1)*INCX) */
+/* The vector z. */
+
+/* INCX (input) INTEGER */
+/* The increment between elements of X, Y and Z. INCX > 0. */
+
+/* C (input) REAL array, dimension (1+(N-1)*INCC) */
+/* The cosines of the plane rotations. */
+
+/* S (input) REAL array, dimension (1+(N-1)*INCC) */
+/* The sines of the plane rotations. */
+
+/* INCC (input) INTEGER */
+/* The increment between elements of C and S. INCC > 0. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --s;
+ --c__;
+ --z__;
+ --y;
+ --x;
+
+ /* Function Body */
+ ix = 1;
+ ic = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ xi = x[ix];
+ yi = y[ix];
+ zi = z__[ix];
+ ci = c__[ic];
+ si = s[ic];
+ t1 = si * zi;
+ t2 = ci * zi;
+ t3 = t2 - si * xi;
+ t4 = t2 + si * yi;
+ t5 = ci * xi + t1;
+ t6 = ci * yi - t1;
+ x[ix] = ci * t5 + si * t4;
+ y[ix] = ci * t6 - si * t3;
+ z__[ix] = ci * t4 - si * t5;
+ ix += *incx;
+ ic += *incc;
+/* L10: */
+ }
+
+/* End of SLAR2V */
+
+ return 0;
+} /* slar2v_ */
diff --git a/contrib/libs/clapack/slarf.c b/contrib/libs/clapack/slarf.c
new file mode 100644
index 0000000000..dbef0828ef
--- /dev/null
+++ b/contrib/libs/clapack/slarf.c
@@ -0,0 +1,191 @@
+/* slarf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b4 = 1.f;
+static real c_b5 = 0.f;
+static integer c__1 = 1;
+
+/* Subroutine */ int slarf_(char *side, integer *m, integer *n, real *v,
+ integer *incv, real *tau, real *c__, integer *ldc, real *work)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset;
+ real r__1;
+
+ /* Local variables */
+ integer i__;
+ logical applyleft;
+ extern /* Subroutine */ int sger_(integer *, integer *, real *, real *,
+ integer *, real *, integer *, real *, integer *);
+ extern logical lsame_(char *, char *);
+ integer lastc;
+ extern /* Subroutine */ int sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *);
+ integer lastv;
+ extern integer ilaslc_(integer *, integer *, real *, integer *), ilaslr_(
+ integer *, integer *, real *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLARF applies a real elementary reflector H to a real m by n matrix */
+/* C, from either the left or the right. H is represented in the form */
+
+/* H = I - tau * v * v' */
+
+/* where tau is a real scalar and v is a real vector. */
+
+/* If tau = 0, then H is taken to be the unit matrix. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': form H * C */
+/* = 'R': form C * H */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. */
+
+/* V (input) REAL array, dimension */
+/* (1 + (M-1)*abs(INCV)) if SIDE = 'L' */
+/* or (1 + (N-1)*abs(INCV)) if SIDE = 'R' */
+/* The vector v in the representation of H. V is not used if */
+/* TAU = 0. */
+
+/* INCV (input) INTEGER */
+/* The increment between elements of v. INCV <> 0. */
+
+/* TAU (input) REAL */
+/* The value tau in the representation of H. */
+
+/* C (input/output) REAL array, dimension (LDC,N) */
+/* On entry, the m by n matrix C. */
+/* On exit, C is overwritten by the matrix H * C if SIDE = 'L', */
+/* or C * H if SIDE = 'R'. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) REAL array, dimension */
+/* (N) if SIDE = 'L' */
+/* or (M) if SIDE = 'R' */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --v;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ applyleft = lsame_(side, "L");
+ lastv = 0;
+ lastc = 0;
+ if (*tau != 0.f) {
+/* Set up variables for scanning V. LASTV begins pointing to the end */
+/* of V. */
+ if (applyleft) {
+ lastv = *m;
+ } else {
+ lastv = *n;
+ }
+ if (*incv > 0) {
+ i__ = (lastv - 1) * *incv + 1;
+ } else {
+ i__ = 1;
+ }
+/* Look for the last non-zero row in V. */
+ while(lastv > 0 && v[i__] == 0.f) {
+ --lastv;
+ i__ -= *incv;
+ }
+ if (applyleft) {
+/* Scan for the last non-zero column in C(1:lastv,:). */
+ lastc = ilaslc_(&lastv, n, &c__[c_offset], ldc);
+ } else {
+/* Scan for the last non-zero row in C(:,1:lastv). */
+ lastc = ilaslr_(m, &lastv, &c__[c_offset], ldc);
+ }
+ }
+/* Note that lastc.eq.0 renders the BLAS operations null; no special */
+/* case is needed at this level. */
+ if (applyleft) {
+
+/* Form H * C */
+
+ if (lastv > 0) {
+
+/* w(1:lastc,1) := C(1:lastv,1:lastc)' * v(1:lastv,1) */
+
+ sgemv_("Transpose", &lastv, &lastc, &c_b4, &c__[c_offset], ldc, &
+ v[1], incv, &c_b5, &work[1], &c__1);
+
+/* C(1:lastv,1:lastc) := C(...) - v(1:lastv,1) * w(1:lastc,1)' */
+
+ r__1 = -(*tau);
+ sger_(&lastv, &lastc, &r__1, &v[1], incv, &work[1], &c__1, &c__[
+ c_offset], ldc);
+ }
+ } else {
+
+/* Form C * H */
+
+ if (lastv > 0) {
+
+/* w(1:lastc,1) := C(1:lastc,1:lastv) * v(1:lastv,1) */
+
+ sgemv_("No transpose", &lastc, &lastv, &c_b4, &c__[c_offset], ldc,
+ &v[1], incv, &c_b5, &work[1], &c__1);
+
+/* C(1:lastc,1:lastv) := C(...) - w(1:lastc,1) * v(1:lastv,1)' */
+
+ r__1 = -(*tau);
+ sger_(&lastc, &lastv, &r__1, &work[1], &c__1, &v[1], incv, &c__[
+ c_offset], ldc);
+ }
+ }
+ return 0;
+
+/* End of SLARF */
+
+} /* slarf_ */
diff --git a/contrib/libs/clapack/slarfb.c b/contrib/libs/clapack/slarfb.c
new file mode 100644
index 0000000000..3c8030a62a
--- /dev/null
+++ b/contrib/libs/clapack/slarfb.c
@@ -0,0 +1,773 @@
+/* slarfb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b14 = 1.f;
+static real c_b25 = -1.f;
+
+/* Subroutine */ int slarfb_(char *side, char *trans, char *direct, char *
+ storev, integer *m, integer *n, integer *k, real *v, integer *ldv,
+ real *t, integer *ldt, real *c__, integer *ldc, real *work, integer *
+ ldwork)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, t_dim1, t_offset, v_dim1, v_offset, work_dim1,
+ work_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j;
+ extern logical lsame_(char *, char *);
+ integer lastc;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ integer lastv;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), strmm_(char *, char *, char *, char *, integer *,
+ integer *, real *, real *, integer *, real *, integer *);
+ extern integer ilaslc_(integer *, integer *, real *, integer *), ilaslr_(
+ integer *, integer *, real *, integer *);
+ char transt[1];
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLARFB applies a real block reflector H or its transpose H' to a */
+/* real m by n matrix C, from either the left or the right. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply H or H' from the Left */
+/* = 'R': apply H or H' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply H (No transpose) */
+/* = 'T': apply H' (Transpose) */
+
+/* DIRECT (input) CHARACTER*1 */
+/* Indicates how H is formed from a product of elementary */
+/* reflectors */
+/* = 'F': H = H(1) H(2) . . . H(k) (Forward) */
+/* = 'B': H = H(k) . . . H(2) H(1) (Backward) */
+
+/* STOREV (input) CHARACTER*1 */
+/* Indicates how the vectors which define the elementary */
+/* reflectors are stored: */
+/* = 'C': Columnwise */
+/* = 'R': Rowwise */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. */
+
+/* K (input) INTEGER */
+/* The order of the matrix T (= the number of elementary */
+/* reflectors whose product defines the block reflector). */
+
+/* V (input) REAL array, dimension */
+/* (LDV,K) if STOREV = 'C' */
+/* (LDV,M) if STOREV = 'R' and SIDE = 'L' */
+/* (LDV,N) if STOREV = 'R' and SIDE = 'R' */
+/* The matrix V. See further details. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. */
+/* If STOREV = 'C' and SIDE = 'L', LDV >= max(1,M); */
+/* if STOREV = 'C' and SIDE = 'R', LDV >= max(1,N); */
+/* if STOREV = 'R', LDV >= K. */
+
+/* T (input) REAL array, dimension (LDT,K) */
+/* The triangular k by k matrix T in the representation of the */
+/* block reflector. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= K. */
+
+/* C (input/output) REAL array, dimension (LDC,N) */
+/* On entry, the m by n matrix C. */
+/* On exit, C is overwritten by H*C or H'*C or C*H or C*H'. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDA >= max(1,M). */
+
+/* WORK (workspace) REAL array, dimension (LDWORK,K) */
+
+/* LDWORK (input) INTEGER */
+/* The leading dimension of the array WORK. */
+/* If SIDE = 'L', LDWORK >= max(1,N); */
+/* if SIDE = 'R', LDWORK >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ work_dim1 = *ldwork;
+ work_offset = 1 + work_dim1;
+ work -= work_offset;
+
+ /* Function Body */
+ if (*m <= 0 || *n <= 0) {
+ return 0;
+ }
+
+ if (lsame_(trans, "N")) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ if (lsame_(storev, "C")) {
+
+ if (lsame_(direct, "F")) {
+
+/* Let V = ( V1 ) (first K rows) */
+/* ( V2 ) */
+/* where V1 is unit lower triangular. */
+
+ if (lsame_(side, "L")) {
+
+/* Form H * C or H' * C where C = ( C1 ) */
+/* ( C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = ilaslr_(m, k, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = ilaslc_(&lastv, n, &c__[c_offset], ldc);
+
+/* W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK) */
+
+/* W := C1' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ scopy_(&lastc, &c__[j + c_dim1], ldc, &work[j * work_dim1
+ + 1], &c__1);
+/* L10: */
+ }
+
+/* W := W * V1 */
+
+ strmm_("Right", "Lower", "No transpose", "Unit", &lastc, k, &
+ c_b14, &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C2'*V2 */
+
+ i__1 = lastv - *k;
+ sgemm_("Transpose", "No transpose", &lastc, k, &i__1, &
+ c_b14, &c__[*k + 1 + c_dim1], ldc, &v[*k + 1 +
+ v_dim1], ldv, &c_b14, &work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ strmm_("Right", "Upper", transt, "Non-unit", &lastc, k, &
+ c_b14, &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V * W' */
+
+ if (lastv > *k) {
+
+/* C2 := C2 - V2 * W' */
+
+ i__1 = lastv - *k;
+ sgemm_("No transpose", "Transpose", &i__1, &lastc, k, &
+ c_b25, &v[*k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork, &c_b14, &c__[*k + 1 +
+ c_dim1], ldc);
+ }
+
+/* W := W * V1' */
+
+ strmm_("Right", "Lower", "Transpose", "Unit", &lastc, k, &
+ c_b14, &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[j + i__ * c_dim1] -= work[i__ + j * work_dim1];
+/* L20: */
+ }
+/* L30: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/* Form C * H or C * H' where C = ( C1 C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = ilaslr_(n, k, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = ilaslr_(m, &lastv, &c__[c_offset], ldc);
+
+/* W := C * V = (C1*V1 + C2*V2) (stored in WORK) */
+
+/* W := C1 */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ scopy_(&lastc, &c__[j * c_dim1 + 1], &c__1, &work[j *
+ work_dim1 + 1], &c__1);
+/* L40: */
+ }
+
+/* W := W * V1 */
+
+ strmm_("Right", "Lower", "No transpose", "Unit", &lastc, k, &
+ c_b14, &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C2 * V2 */
+
+ i__1 = lastv - *k;
+ sgemm_("No transpose", "No transpose", &lastc, k, &i__1, &
+ c_b14, &c__[(*k + 1) * c_dim1 + 1], ldc, &v[*k +
+ 1 + v_dim1], ldv, &c_b14, &work[work_offset],
+ ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ strmm_("Right", "Upper", trans, "Non-unit", &lastc, k, &c_b14,
+ &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V' */
+
+ if (lastv > *k) {
+
+/* C2 := C2 - W * V2' */
+
+ i__1 = lastv - *k;
+ sgemm_("No transpose", "Transpose", &lastc, &i__1, k, &
+ c_b25, &work[work_offset], ldwork, &v[*k + 1 +
+ v_dim1], ldv, &c_b14, &c__[(*k + 1) * c_dim1 + 1],
+ ldc);
+ }
+
+/* W := W * V1' */
+
+ strmm_("Right", "Lower", "Transpose", "Unit", &lastc, k, &
+ c_b14, &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] -= work[i__ + j * work_dim1];
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+ } else {
+
+/* Let V = ( V1 ) */
+/* ( V2 ) (last K rows) */
+/* where V2 is unit upper triangular. */
+
+ if (lsame_(side, "L")) {
+
+/* Form H * C or H' * C where C = ( C1 ) */
+/* ( C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = ilaslr_(m, k, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = ilaslc_(&lastv, n, &c__[c_offset], ldc);
+
+/* W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK) */
+
+/* W := C2' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ scopy_(&lastc, &c__[lastv - *k + j + c_dim1], ldc, &work[
+ j * work_dim1 + 1], &c__1);
+/* L70: */
+ }
+
+/* W := W * V2 */
+
+ strmm_("Right", "Upper", "No transpose", "Unit", &lastc, k, &
+ c_b14, &v[lastv - *k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C1'*V1 */
+
+ i__1 = lastv - *k;
+ sgemm_("Transpose", "No transpose", &lastc, k, &i__1, &
+ c_b14, &c__[c_offset], ldc, &v[v_offset], ldv, &
+ c_b14, &work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ strmm_("Right", "Lower", transt, "Non-unit", &lastc, k, &
+ c_b14, &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V * W' */
+
+ if (lastv > *k) {
+
+/* C1 := C1 - V1 * W' */
+
+ i__1 = lastv - *k;
+ sgemm_("No transpose", "Transpose", &i__1, &lastc, k, &
+ c_b25, &v[v_offset], ldv, &work[work_offset],
+ ldwork, &c_b14, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2' */
+
+ strmm_("Right", "Upper", "Transpose", "Unit", &lastc, k, &
+ c_b14, &v[lastv - *k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork);
+
+/* C2 := C2 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[lastv - *k + j + i__ * c_dim1] -= work[i__ + j *
+ work_dim1];
+/* L80: */
+ }
+/* L90: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/* Form C * H or C * H' where C = ( C1 C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = ilaslr_(n, k, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = ilaslr_(m, &lastv, &c__[c_offset], ldc);
+
+/* W := C * V = (C1*V1 + C2*V2) (stored in WORK) */
+
+/* W := C2 */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ scopy_(&lastc, &c__[(*n - *k + j) * c_dim1 + 1], &c__1, &
+ work[j * work_dim1 + 1], &c__1);
+/* L100: */
+ }
+
+/* W := W * V2 */
+
+ strmm_("Right", "Upper", "No transpose", "Unit", &lastc, k, &
+ c_b14, &v[lastv - *k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C1 * V1 */
+
+ i__1 = lastv - *k;
+ sgemm_("No transpose", "No transpose", &lastc, k, &i__1, &
+ c_b14, &c__[c_offset], ldc, &v[v_offset], ldv, &
+ c_b14, &work[work_offset], ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ strmm_("Right", "Lower", trans, "Non-unit", &lastc, k, &c_b14,
+ &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V' */
+
+ if (lastv > *k) {
+
+/* C1 := C1 - W * V1' */
+
+ i__1 = lastv - *k;
+ sgemm_("No transpose", "Transpose", &lastc, &i__1, k, &
+ c_b25, &work[work_offset], ldwork, &v[v_offset],
+ ldv, &c_b14, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2' */
+
+ strmm_("Right", "Upper", "Transpose", "Unit", &lastc, k, &
+ c_b14, &v[lastv - *k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork);
+
+/* C2 := C2 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + (lastv - *k + j) * c_dim1] -= work[i__ + j *
+ work_dim1];
+/* L110: */
+ }
+/* L120: */
+ }
+ }
+ }
+
+ } else if (lsame_(storev, "R")) {
+
+ if (lsame_(direct, "F")) {
+
+/* Let V = ( V1 V2 ) (V1: first K columns) */
+/* where V1 is unit upper triangular. */
+
+ if (lsame_(side, "L")) {
+
+/* Form H * C or H' * C where C = ( C1 ) */
+/* ( C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = ilaslc_(k, m, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = ilaslc_(&lastv, n, &c__[c_offset], ldc);
+
+/* W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK) */
+
+/* W := C1' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ scopy_(&lastc, &c__[j + c_dim1], ldc, &work[j * work_dim1
+ + 1], &c__1);
+/* L130: */
+ }
+
+/* W := W * V1' */
+
+ strmm_("Right", "Upper", "Transpose", "Unit", &lastc, k, &
+ c_b14, &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C2'*V2' */
+
+ i__1 = lastv - *k;
+ sgemm_("Transpose", "Transpose", &lastc, k, &i__1, &c_b14,
+ &c__[*k + 1 + c_dim1], ldc, &v[(*k + 1) * v_dim1
+ + 1], ldv, &c_b14, &work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ strmm_("Right", "Upper", transt, "Non-unit", &lastc, k, &
+ c_b14, &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V' * W' */
+
+ if (lastv > *k) {
+
+/* C2 := C2 - V2' * W' */
+
+ i__1 = lastv - *k;
+ sgemm_("Transpose", "Transpose", &i__1, &lastc, k, &c_b25,
+ &v[(*k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork, &c_b14, &c__[*k + 1 +
+ c_dim1], ldc);
+ }
+
+/* W := W * V1 */
+
+ strmm_("Right", "Upper", "No transpose", "Unit", &lastc, k, &
+ c_b14, &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[j + i__ * c_dim1] -= work[i__ + j * work_dim1];
+/* L140: */
+ }
+/* L150: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/* Form C * H or C * H' where C = ( C1 C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = ilaslc_(k, n, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = ilaslr_(m, &lastv, &c__[c_offset], ldc);
+
+/* W := C * V' = (C1*V1' + C2*V2') (stored in WORK) */
+
+/* W := C1 */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ scopy_(&lastc, &c__[j * c_dim1 + 1], &c__1, &work[j *
+ work_dim1 + 1], &c__1);
+/* L160: */
+ }
+
+/* W := W * V1' */
+
+ strmm_("Right", "Upper", "Transpose", "Unit", &lastc, k, &
+ c_b14, &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C2 * V2' */
+
+ i__1 = lastv - *k;
+ sgemm_("No transpose", "Transpose", &lastc, k, &i__1, &
+ c_b14, &c__[(*k + 1) * c_dim1 + 1], ldc, &v[(*k +
+ 1) * v_dim1 + 1], ldv, &c_b14, &work[work_offset],
+ ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ strmm_("Right", "Upper", trans, "Non-unit", &lastc, k, &c_b14,
+ &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V */
+
+ if (lastv > *k) {
+
+/* C2 := C2 - W * V2 */
+
+ i__1 = lastv - *k;
+ sgemm_("No transpose", "No transpose", &lastc, &i__1, k, &
+ c_b25, &work[work_offset], ldwork, &v[(*k + 1) *
+ v_dim1 + 1], ldv, &c_b14, &c__[(*k + 1) * c_dim1
+ + 1], ldc);
+ }
+
+/* W := W * V1 */
+
+ strmm_("Right", "Upper", "No transpose", "Unit", &lastc, k, &
+ c_b14, &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] -= work[i__ + j * work_dim1];
+/* L170: */
+ }
+/* L180: */
+ }
+
+ }
+
+ } else {
+
+/* Let V = ( V1 V2 ) (V2: last K columns) */
+/* where V2 is unit lower triangular. */
+
+ if (lsame_(side, "L")) {
+
+/* Form H * C or H' * C where C = ( C1 ) */
+/* ( C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = ilaslc_(k, m, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = ilaslc_(&lastv, n, &c__[c_offset], ldc);
+
+/* W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK) */
+
+/* W := C2' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ scopy_(&lastc, &c__[lastv - *k + j + c_dim1], ldc, &work[
+ j * work_dim1 + 1], &c__1);
+/* L190: */
+ }
+
+/* W := W * V2' */
+
+ strmm_("Right", "Lower", "Transpose", "Unit", &lastc, k, &
+ c_b14, &v[(lastv - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C1'*V1' */
+
+ i__1 = lastv - *k;
+ sgemm_("Transpose", "Transpose", &lastc, k, &i__1, &c_b14,
+ &c__[c_offset], ldc, &v[v_offset], ldv, &c_b14, &
+ work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ strmm_("Right", "Lower", transt, "Non-unit", &lastc, k, &
+ c_b14, &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V' * W' */
+
+ if (lastv > *k) {
+
+/* C1 := C1 - V1' * W' */
+
+ i__1 = lastv - *k;
+ sgemm_("Transpose", "Transpose", &i__1, &lastc, k, &c_b25,
+ &v[v_offset], ldv, &work[work_offset], ldwork, &
+ c_b14, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2 */
+
+ strmm_("Right", "Lower", "No transpose", "Unit", &lastc, k, &
+ c_b14, &v[(lastv - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+
+/* C2 := C2 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[lastv - *k + j + i__ * c_dim1] -= work[i__ + j *
+ work_dim1];
+/* L200: */
+ }
+/* L210: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/* Form C * H or C * H' where C = ( C1 C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = ilaslc_(k, n, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = ilaslr_(m, &lastv, &c__[c_offset], ldc);
+
+/* W := C * V' = (C1*V1' + C2*V2') (stored in WORK) */
+
+/* W := C2 */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ scopy_(&lastc, &c__[(lastv - *k + j) * c_dim1 + 1], &c__1,
+ &work[j * work_dim1 + 1], &c__1);
+/* L220: */
+ }
+
+/* W := W * V2' */
+
+ strmm_("Right", "Lower", "Transpose", "Unit", &lastc, k, &
+ c_b14, &v[(lastv - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C1 * V1' */
+
+ i__1 = lastv - *k;
+ sgemm_("No transpose", "Transpose", &lastc, k, &i__1, &
+ c_b14, &c__[c_offset], ldc, &v[v_offset], ldv, &
+ c_b14, &work[work_offset], ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ strmm_("Right", "Lower", trans, "Non-unit", &lastc, k, &c_b14,
+ &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V */
+
+ if (lastv > *k) {
+
+/* C1 := C1 - W * V1 */
+
+ i__1 = lastv - *k;
+ sgemm_("No transpose", "No transpose", &lastc, &i__1, k, &
+ c_b25, &work[work_offset], ldwork, &v[v_offset],
+ ldv, &c_b14, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2 */
+
+ strmm_("Right", "Lower", "No transpose", "Unit", &lastc, k, &
+ c_b14, &v[(lastv - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + (lastv - *k + j) * c_dim1] -= work[i__ + j *
+ work_dim1];
+/* L230: */
+ }
+/* L240: */
+ }
+
+ }
+
+ }
+ }
+
+ return 0;
+
+/* End of SLARFB */
+
+} /* slarfb_ */
diff --git a/contrib/libs/clapack/slarfg.c b/contrib/libs/clapack/slarfg.c
new file mode 100644
index 0000000000..0d31251015
--- /dev/null
+++ b/contrib/libs/clapack/slarfg.c
@@ -0,0 +1,169 @@
+/* slarfg.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slarfg_(integer *n, real *alpha, real *x, integer *incx,
+ real *tau)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1;
+
+ /* Builtin functions */
+ double r_sign(real *, real *);
+
+ /* Local variables */
+ integer j, knt;
+ real beta;
+ extern doublereal snrm2_(integer *, real *, integer *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ real xnorm;
+ extern doublereal slapy2_(real *, real *), slamch_(char *);
+ real safmin, rsafmn;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLARFG generates a real elementary reflector H of order n, such */
+/* that */
+
+/* H * ( alpha ) = ( beta ), H' * H = I. */
+/* ( x ) ( 0 ) */
+
+/* where alpha and beta are scalars, and x is an (n-1)-element real */
+/* vector. H is represented in the form */
+
+/* H = I - tau * ( 1 ) * ( 1 v' ) , */
+/* ( v ) */
+
+/* where tau is a real scalar and v is a real (n-1)-element */
+/* vector. */
+
+/* If the elements of x are all zero, then tau = 0 and H is taken to be */
+/* the unit matrix. */
+
+/* Otherwise 1 <= tau <= 2. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the elementary reflector. */
+
+/* ALPHA (input/output) REAL */
+/* On entry, the value alpha. */
+/* On exit, it is overwritten with the value beta. */
+
+/* X (input/output) REAL array, dimension */
+/* (1+(N-2)*abs(INCX)) */
+/* On entry, the vector x. */
+/* On exit, it is overwritten with the vector v. */
+
+/* INCX (input) INTEGER */
+/* The increment between elements of X. INCX > 0. */
+
+/* TAU (output) REAL */
+/* The value tau. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n <= 1) {
+ *tau = 0.f;
+ return 0;
+ }
+
+ i__1 = *n - 1;
+ xnorm = snrm2_(&i__1, &x[1], incx);
+
+ if (xnorm == 0.f) {
+
+/* H = I */
+
+ *tau = 0.f;
+ } else {
+
+/* general case */
+
+ r__1 = slapy2_(alpha, &xnorm);
+ beta = -r_sign(&r__1, alpha);
+ safmin = slamch_("S") / slamch_("E");
+ knt = 0;
+ if (dabs(beta) < safmin) {
+
+/* XNORM, BETA may be inaccurate; scale X and recompute them */
+
+ rsafmn = 1.f / safmin;
+L10:
+ ++knt;
+ i__1 = *n - 1;
+ sscal_(&i__1, &rsafmn, &x[1], incx);
+ beta *= rsafmn;
+ *alpha *= rsafmn;
+ if (dabs(beta) < safmin) {
+ goto L10;
+ }
+
+/* New BETA is at most 1, at least SAFMIN */
+
+ i__1 = *n - 1;
+ xnorm = snrm2_(&i__1, &x[1], incx);
+ r__1 = slapy2_(alpha, &xnorm);
+ beta = -r_sign(&r__1, alpha);
+ }
+ *tau = (beta - *alpha) / beta;
+ i__1 = *n - 1;
+ r__1 = 1.f / (*alpha - beta);
+ sscal_(&i__1, &r__1, &x[1], incx);
+
+/* If ALPHA is subnormal, it may lose relative accuracy */
+
+ i__1 = knt;
+ for (j = 1; j <= i__1; ++j) {
+ beta *= safmin;
+/* L20: */
+ }
+ *alpha = beta;
+ }
+
+ return 0;
+
+/* End of SLARFG */
+
+} /* slarfg_ */
diff --git a/contrib/libs/clapack/slarfp.c b/contrib/libs/clapack/slarfp.c
new file mode 100644
index 0000000000..5db647ab0a
--- /dev/null
+++ b/contrib/libs/clapack/slarfp.c
@@ -0,0 +1,191 @@
+/* slarfp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slarfp_(integer *n, real *alpha, real *x, integer *incx,
+ real *tau)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1;
+
+ /* Builtin functions */
+ double r_sign(real *, real *);
+
+ /* Local variables */
+ integer j, knt;
+ real beta;
+ extern doublereal snrm2_(integer *, real *, integer *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ real xnorm;
+ extern doublereal slapy2_(real *, real *), slamch_(char *);
+ real safmin, rsafmn;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLARFP generates a real elementary reflector H of order n, such */
+/* that */
+
+/* H * ( alpha ) = ( beta ), H' * H = I. */
+/* ( x ) ( 0 ) */
+
+/* where alpha and beta are scalars, beta is non-negative, and x is */
+/* an (n-1)-element real vector. H is represented in the form */
+
+/* H = I - tau * ( 1 ) * ( 1 v' ) , */
+/* ( v ) */
+
+/* where tau is a real scalar and v is a real (n-1)-element */
+/* vector. */
+
+/* If the elements of x are all zero, then tau = 0 and H is taken to be */
+/* the unit matrix. */
+
+/* Otherwise 1 <= tau <= 2. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the elementary reflector. */
+
+/* ALPHA (input/output) REAL */
+/* On entry, the value alpha. */
+/* On exit, it is overwritten with the value beta. */
+
+/* X (input/output) REAL array, dimension */
+/* (1+(N-2)*abs(INCX)) */
+/* On entry, the vector x. */
+/* On exit, it is overwritten with the vector v. */
+
+/* INCX (input) INTEGER */
+/* The increment between elements of X. INCX > 0. */
+
+/* TAU (output) REAL */
+/* The value tau. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n <= 0) {
+ *tau = 0.f;
+ return 0;
+ }
+
+ i__1 = *n - 1;
+ xnorm = snrm2_(&i__1, &x[1], incx);
+
+ if (xnorm == 0.f) {
+
+/* H = [+/-1, 0; I], sign chosen so ALPHA >= 0. */
+
+ if (*alpha >= 0.f) {
+/* When TAU.eq.ZERO, the vector is special-cased to be */
+/* all zeros in the application routines. We do not need */
+/* to clear it. */
+ *tau = 0.f;
+ } else {
+/* However, the application routines rely on explicit */
+/* zero checks when TAU.ne.ZERO, and we must clear X. */
+ *tau = 2.f;
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ x[(j - 1) * *incx + 1] = 0.f;
+ }
+ *alpha = -(*alpha);
+ }
+ } else {
+
+/* general case */
+
+ r__1 = slapy2_(alpha, &xnorm);
+ beta = r_sign(&r__1, alpha);
+ safmin = slamch_("S") / slamch_("E");
+ knt = 0;
+ if (dabs(beta) < safmin) {
+
+/* XNORM, BETA may be inaccurate; scale X and recompute them */
+
+ rsafmn = 1.f / safmin;
+L10:
+ ++knt;
+ i__1 = *n - 1;
+ sscal_(&i__1, &rsafmn, &x[1], incx);
+ beta *= rsafmn;
+ *alpha *= rsafmn;
+ if (dabs(beta) < safmin) {
+ goto L10;
+ }
+
+/* New BETA is at most 1, at least SAFMIN */
+
+ i__1 = *n - 1;
+ xnorm = snrm2_(&i__1, &x[1], incx);
+ r__1 = slapy2_(alpha, &xnorm);
+ beta = r_sign(&r__1, alpha);
+ }
+ *alpha += beta;
+ if (beta < 0.f) {
+ beta = -beta;
+ *tau = -(*alpha) / beta;
+ } else {
+ *alpha = xnorm * (xnorm / *alpha);
+ *tau = *alpha / beta;
+ *alpha = -(*alpha);
+ }
+ i__1 = *n - 1;
+ r__1 = 1.f / *alpha;
+ sscal_(&i__1, &r__1, &x[1], incx);
+
+/* If BETA is subnormal, it may lose relative accuracy */
+
+ i__1 = knt;
+ for (j = 1; j <= i__1; ++j) {
+ beta *= safmin;
+/* L20: */
+ }
+ *alpha = beta;
+ }
+
+ return 0;
+
+/* End of SLARFP */
+
+} /* slarfp_ */
diff --git a/contrib/libs/clapack/slarft.c b/contrib/libs/clapack/slarft.c
new file mode 100644
index 0000000000..4143ce4df4
--- /dev/null
+++ b/contrib/libs/clapack/slarft.c
@@ -0,0 +1,323 @@
+/* slarft.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b8 = 0.f;
+
+/* Subroutine */ int slarft_(char *direct, char *storev, integer *n, integer *
+ k, real *v, integer *ldv, real *tau, real *t, integer *ldt)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, v_dim1, v_offset, i__1, i__2, i__3;
+ real r__1;
+
+ /* Local variables */
+ integer i__, j, prevlastv;
+ real vii;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *);
+ integer lastv;
+ extern /* Subroutine */ int strmv_(char *, char *, char *, integer *,
+ real *, integer *, real *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLARFT forms the triangular factor T of a real block reflector H */
+/* of order n, which is defined as a product of k elementary reflectors. */
+
+/* If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular; */
+
+/* If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular. */
+
+/* If STOREV = 'C', the vector which defines the elementary reflector */
+/* H(i) is stored in the i-th column of the array V, and */
+
+/* H = I - V * T * V' */
+
+/* If STOREV = 'R', the vector which defines the elementary reflector */
+/* H(i) is stored in the i-th row of the array V, and */
+
+/* H = I - V' * T * V */
+
+/* Arguments */
+/* ========= */
+
+/* DIRECT (input) CHARACTER*1 */
+/* Specifies the order in which the elementary reflectors are */
+/* multiplied to form the block reflector: */
+/* = 'F': H = H(1) H(2) . . . H(k) (Forward) */
+/* = 'B': H = H(k) . . . H(2) H(1) (Backward) */
+
+/* STOREV (input) CHARACTER*1 */
+/* Specifies how the vectors which define the elementary */
+/* reflectors are stored (see also Further Details): */
+/* = 'C': columnwise */
+/* = 'R': rowwise */
+
+/* N (input) INTEGER */
+/* The order of the block reflector H. N >= 0. */
+
+/* K (input) INTEGER */
+/* The order of the triangular factor T (= the number of */
+/* elementary reflectors). K >= 1. */
+
+/* V (input/output) REAL array, dimension */
+/* (LDV,K) if STOREV = 'C' */
+/* (LDV,N) if STOREV = 'R' */
+/* The matrix V. See further details. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. */
+/* If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K. */
+
+/* TAU (input) REAL array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i). */
+
+/* T (output) REAL array, dimension (LDT,K) */
+/* The k by k triangular factor T of the block reflector. */
+/* If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is */
+/* lower triangular. The rest of the array is not used. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= K. */
+
+/* Further Details */
+/* =============== */
+
+/* The shape of the matrix V and the storage of the vectors which define */
+/* the H(i) is best illustrated by the following example with n = 5 and */
+/* k = 3. The elements equal to 1 are not stored; the corresponding */
+/* array elements are modified but restored on exit. The rest of the */
+/* array is not used. */
+
+/* DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R': */
+
+/* V = ( 1 ) V = ( 1 v1 v1 v1 v1 ) */
+/* ( v1 1 ) ( 1 v2 v2 v2 ) */
+/* ( v1 v2 1 ) ( 1 v3 v3 ) */
+/* ( v1 v2 v3 ) */
+/* ( v1 v2 v3 ) */
+
+/* DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R': */
+
+/* V = ( v1 v2 v3 ) V = ( v1 v1 1 ) */
+/* ( v1 v2 v3 ) ( v2 v2 v2 1 ) */
+/* ( 1 v2 v3 ) ( v3 v3 v3 v3 1 ) */
+/* ( 1 v3 ) */
+/* ( 1 ) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ --tau;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+
+ /* Function Body */
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (lsame_(direct, "F")) {
+ prevlastv = *n;
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ prevlastv = max(i__,prevlastv);
+ if (tau[i__] == 0.f) {
+
+/* H(i) = I */
+
+ i__2 = i__;
+ for (j = 1; j <= i__2; ++j) {
+ t[j + i__ * t_dim1] = 0.f;
+/* L10: */
+ }
+ } else {
+
+/* general case */
+
+ vii = v[i__ + i__ * v_dim1];
+ v[i__ + i__ * v_dim1] = 1.f;
+ if (lsame_(storev, "C")) {
+/* Skip any trailing zeros. */
+ i__2 = i__ + 1;
+ for (lastv = *n; lastv >= i__2; --lastv) {
+ if (v[lastv + i__ * v_dim1] != 0.f) {
+ break;
+ }
+ }
+ j = min(lastv,prevlastv);
+
+/* T(1:i-1,i) := - tau(i) * V(i:j,1:i-1)' * V(i:j,i) */
+
+ i__2 = j - i__ + 1;
+ i__3 = i__ - 1;
+ r__1 = -tau[i__];
+ sgemv_("Transpose", &i__2, &i__3, &r__1, &v[i__ + v_dim1],
+ ldv, &v[i__ + i__ * v_dim1], &c__1, &c_b8, &t[
+ i__ * t_dim1 + 1], &c__1);
+ } else {
+/* Skip any trailing zeros. */
+ i__2 = i__ + 1;
+ for (lastv = *n; lastv >= i__2; --lastv) {
+ if (v[i__ + lastv * v_dim1] != 0.f) {
+ break;
+ }
+ }
+ j = min(lastv,prevlastv);
+
+/* T(1:i-1,i) := - tau(i) * V(1:i-1,i:j) * V(i,i:j)' */
+
+ i__2 = i__ - 1;
+ i__3 = j - i__ + 1;
+ r__1 = -tau[i__];
+ sgemv_("No transpose", &i__2, &i__3, &r__1, &v[i__ *
+ v_dim1 + 1], ldv, &v[i__ + i__ * v_dim1], ldv, &
+ c_b8, &t[i__ * t_dim1 + 1], &c__1);
+ }
+ v[i__ + i__ * v_dim1] = vii;
+
+/* T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) */
+
+ i__2 = i__ - 1;
+ strmv_("Upper", "No transpose", "Non-unit", &i__2, &t[
+ t_offset], ldt, &t[i__ * t_dim1 + 1], &c__1);
+ t[i__ + i__ * t_dim1] = tau[i__];
+ if (i__ > 1) {
+ prevlastv = max(prevlastv,lastv);
+ } else {
+ prevlastv = lastv;
+ }
+ }
+/* L20: */
+ }
+ } else {
+ prevlastv = 1;
+ for (i__ = *k; i__ >= 1; --i__) {
+ if (tau[i__] == 0.f) {
+
+/* H(i) = I */
+
+ i__1 = *k;
+ for (j = i__; j <= i__1; ++j) {
+ t[j + i__ * t_dim1] = 0.f;
+/* L30: */
+ }
+ } else {
+
+/* general case */
+
+ if (i__ < *k) {
+ if (lsame_(storev, "C")) {
+ vii = v[*n - *k + i__ + i__ * v_dim1];
+ v[*n - *k + i__ + i__ * v_dim1] = 1.f;
+/* Skip any leading zeros. */
+ i__1 = i__ - 1;
+ for (lastv = 1; lastv <= i__1; ++lastv) {
+ if (v[lastv + i__ * v_dim1] != 0.f) {
+ break;
+ }
+ }
+ j = max(lastv,prevlastv);
+
+/* T(i+1:k,i) := */
+/* - tau(i) * V(j:n-k+i,i+1:k)' * V(j:n-k+i,i) */
+
+ i__1 = *n - *k + i__ - j + 1;
+ i__2 = *k - i__;
+ r__1 = -tau[i__];
+ sgemv_("Transpose", &i__1, &i__2, &r__1, &v[j + (i__
+ + 1) * v_dim1], ldv, &v[j + i__ * v_dim1], &
+ c__1, &c_b8, &t[i__ + 1 + i__ * t_dim1], &
+ c__1);
+ v[*n - *k + i__ + i__ * v_dim1] = vii;
+ } else {
+ vii = v[i__ + (*n - *k + i__) * v_dim1];
+ v[i__ + (*n - *k + i__) * v_dim1] = 1.f;
+/* Skip any leading zeros. */
+ i__1 = i__ - 1;
+ for (lastv = 1; lastv <= i__1; ++lastv) {
+ if (v[i__ + lastv * v_dim1] != 0.f) {
+ break;
+ }
+ }
+ j = max(lastv,prevlastv);
+
+/* T(i+1:k,i) := */
+/* - tau(i) * V(i+1:k,j:n-k+i) * V(i,j:n-k+i)' */
+
+ i__1 = *k - i__;
+ i__2 = *n - *k + i__ - j + 1;
+ r__1 = -tau[i__];
+ sgemv_("No transpose", &i__1, &i__2, &r__1, &v[i__ +
+ 1 + j * v_dim1], ldv, &v[i__ + j * v_dim1],
+ ldv, &c_b8, &t[i__ + 1 + i__ * t_dim1], &c__1);
+ v[i__ + (*n - *k + i__) * v_dim1] = vii;
+ }
+
+/* T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) */
+
+ i__1 = *k - i__;
+ strmv_("Lower", "No transpose", "Non-unit", &i__1, &t[i__
+ + 1 + (i__ + 1) * t_dim1], ldt, &t[i__ + 1 + i__ *
+ t_dim1], &c__1)
+ ;
+ if (i__ > 1) {
+ prevlastv = min(prevlastv,lastv);
+ } else {
+ prevlastv = lastv;
+ }
+ }
+ t[i__ + i__ * t_dim1] = tau[i__];
+ }
+/* L40: */
+ }
+ }
+ return 0;
+
+/* End of SLARFT */
+
+} /* slarft_ */
diff --git a/contrib/libs/clapack/slarfx.c b/contrib/libs/clapack/slarfx.c
new file mode 100644
index 0000000000..59c5d0a004
--- /dev/null
+++ b/contrib/libs/clapack/slarfx.c
@@ -0,0 +1,729 @@
+/* slarfx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int slarfx_(char *side, integer *m, integer *n, real *v,
+ real *tau, real *c__, integer *ldc, real *work)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, i__1;
+
+ /* Local variables */
+ integer j;
+ real t1, t2, t3, t4, t5, t6, t7, t8, t9, v1, v2, v3, v4, v5, v6, v7, v8,
+ v9, t10, v10, sum;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int slarf_(char *, integer *, integer *, real *,
+ integer *, real *, real *, integer *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLARFX applies a real elementary reflector H to a real m by n */
+/* matrix C, from either the left or the right. H is represented in the */
+/* form */
+
+/* H = I - tau * v * v' */
+
+/* where tau is a real scalar and v is a real vector. */
+
+/* If tau = 0, then H is taken to be the unit matrix */
+
+/* This version uses inline code if H has order < 11. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': form H * C */
+/* = 'R': form C * H */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. */
+
+/* V (input) REAL array, dimension (M) if SIDE = 'L' */
+/* or (N) if SIDE = 'R' */
+/* The vector v in the representation of H. */
+
+/* TAU (input) REAL */
+/* The value tau in the representation of H. */
+
+/* C (input/output) REAL array, dimension (LDC,N) */
+/* On entry, the m by n matrix C. */
+/* On exit, C is overwritten by the matrix H * C if SIDE = 'L', */
+/* or C * H if SIDE = 'R'. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDA >= (1,M). */
+
+/* WORK (workspace) REAL array, dimension */
+/* (N) if SIDE = 'L' */
+/* or (M) if SIDE = 'R' */
+/* WORK is not referenced if H has order < 11. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --v;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ if (*tau == 0.f) {
+ return 0;
+ }
+ if (lsame_(side, "L")) {
+
+/* Form H * C, where H has order m. */
+
+ switch (*m) {
+ case 1: goto L10;
+ case 2: goto L30;
+ case 3: goto L50;
+ case 4: goto L70;
+ case 5: goto L90;
+ case 6: goto L110;
+ case 7: goto L130;
+ case 8: goto L150;
+ case 9: goto L170;
+ case 10: goto L190;
+ }
+
+/* Code for general M */
+
+ slarf_(side, m, n, &v[1], &c__1, tau, &c__[c_offset], ldc, &work[1]);
+ goto L410;
+L10:
+
+/* Special code for 1 x 1 Householder */
+
+ t1 = 1.f - *tau * v[1] * v[1];
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j * c_dim1 + 1] = t1 * c__[j * c_dim1 + 1];
+/* L20: */
+ }
+ goto L410;
+L30:
+
+/* Special code for 2 x 2 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+/* L40: */
+ }
+ goto L410;
+L50:
+
+/* Special code for 3 x 3 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+/* L60: */
+ }
+ goto L410;
+L70:
+
+/* Special code for 4 x 4 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+/* L80: */
+ }
+ goto L410;
+L90:
+
+/* Special code for 5 x 5 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+/* L100: */
+ }
+ goto L410;
+L110:
+
+/* Special code for 6 x 6 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+ c__[j * c_dim1 + 6] -= sum * t6;
+/* L120: */
+ }
+ goto L410;
+L130:
+
+/* Special code for 7 x 7 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j *
+ c_dim1 + 7];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+ c__[j * c_dim1 + 6] -= sum * t6;
+ c__[j * c_dim1 + 7] -= sum * t7;
+/* L140: */
+ }
+ goto L410;
+L150:
+
+/* Special code for 8 x 8 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j *
+ c_dim1 + 7] + v8 * c__[j * c_dim1 + 8];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+ c__[j * c_dim1 + 6] -= sum * t6;
+ c__[j * c_dim1 + 7] -= sum * t7;
+ c__[j * c_dim1 + 8] -= sum * t8;
+/* L160: */
+ }
+ goto L410;
+L170:
+
+/* Special code for 9 x 9 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ v9 = v[9];
+ t9 = *tau * v9;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j *
+ c_dim1 + 7] + v8 * c__[j * c_dim1 + 8] + v9 * c__[j *
+ c_dim1 + 9];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+ c__[j * c_dim1 + 6] -= sum * t6;
+ c__[j * c_dim1 + 7] -= sum * t7;
+ c__[j * c_dim1 + 8] -= sum * t8;
+ c__[j * c_dim1 + 9] -= sum * t9;
+/* L180: */
+ }
+ goto L410;
+L190:
+
+/* Special code for 10 x 10 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ v9 = v[9];
+ t9 = *tau * v9;
+ v10 = v[10];
+ t10 = *tau * v10;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j *
+ c_dim1 + 7] + v8 * c__[j * c_dim1 + 8] + v9 * c__[j *
+ c_dim1 + 9] + v10 * c__[j * c_dim1 + 10];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+ c__[j * c_dim1 + 6] -= sum * t6;
+ c__[j * c_dim1 + 7] -= sum * t7;
+ c__[j * c_dim1 + 8] -= sum * t8;
+ c__[j * c_dim1 + 9] -= sum * t9;
+ c__[j * c_dim1 + 10] -= sum * t10;
+/* L200: */
+ }
+ goto L410;
+ } else {
+
+/* Form C * H, where H has order n. */
+
+ switch (*n) {
+ case 1: goto L210;
+ case 2: goto L230;
+ case 3: goto L250;
+ case 4: goto L270;
+ case 5: goto L290;
+ case 6: goto L310;
+ case 7: goto L330;
+ case 8: goto L350;
+ case 9: goto L370;
+ case 10: goto L390;
+ }
+
+/* Code for general N */
+
+ slarf_(side, m, n, &v[1], &c__1, tau, &c__[c_offset], ldc, &work[1]);
+ goto L410;
+L210:
+
+/* Special code for 1 x 1 Householder */
+
+ t1 = 1.f - *tau * v[1] * v[1];
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j + c_dim1] = t1 * c__[j + c_dim1];
+/* L220: */
+ }
+ goto L410;
+L230:
+
+/* Special code for 2 x 2 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + (c_dim1 << 1)] -= sum * t2;
+/* L240: */
+ }
+ goto L410;
+L250:
+
+/* Special code for 3 x 3 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 *
+ c__[j + c_dim1 * 3];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + (c_dim1 << 1)] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+/* L260: */
+ }
+ goto L410;
+L270:
+
+/* Special code for 4 x 4 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 *
+ c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + (c_dim1 << 1)] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + (c_dim1 << 2)] -= sum * t4;
+/* L280: */
+ }
+ goto L410;
+L290:
+
+/* Special code for 5 x 5 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 *
+ c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 *
+ c__[j + c_dim1 * 5];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + (c_dim1 << 1)] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + (c_dim1 << 2)] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+/* L300: */
+ }
+ goto L410;
+L310:
+
+/* Special code for 6 x 6 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 *
+ c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 *
+ c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + (c_dim1 << 1)] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + (c_dim1 << 2)] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+ c__[j + c_dim1 * 6] -= sum * t6;
+/* L320: */
+ }
+ goto L410;
+L330:
+
+/* Special code for 7 x 7 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 *
+ c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 *
+ c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * c__[
+ j + c_dim1 * 7];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + (c_dim1 << 1)] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + (c_dim1 << 2)] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+ c__[j + c_dim1 * 6] -= sum * t6;
+ c__[j + c_dim1 * 7] -= sum * t7;
+/* L340: */
+ }
+ goto L410;
+L350:
+
+/* Special code for 8 x 8 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 *
+ c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 *
+ c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * c__[
+ j + c_dim1 * 7] + v8 * c__[j + (c_dim1 << 3)];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + (c_dim1 << 1)] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + (c_dim1 << 2)] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+ c__[j + c_dim1 * 6] -= sum * t6;
+ c__[j + c_dim1 * 7] -= sum * t7;
+ c__[j + (c_dim1 << 3)] -= sum * t8;
+/* L360: */
+ }
+ goto L410;
+L370:
+
+/* Special code for 9 x 9 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ v9 = v[9];
+ t9 = *tau * v9;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 *
+ c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 *
+ c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * c__[
+ j + c_dim1 * 7] + v8 * c__[j + (c_dim1 << 3)] + v9 * c__[
+ j + c_dim1 * 9];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + (c_dim1 << 1)] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + (c_dim1 << 2)] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+ c__[j + c_dim1 * 6] -= sum * t6;
+ c__[j + c_dim1 * 7] -= sum * t7;
+ c__[j + (c_dim1 << 3)] -= sum * t8;
+ c__[j + c_dim1 * 9] -= sum * t9;
+/* L380: */
+ }
+ goto L410;
+L390:
+
+/* Special code for 10 x 10 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ v9 = v[9];
+ t9 = *tau * v9;
+ v10 = v[10];
+ t10 = *tau * v10;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 *
+ c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 *
+ c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * c__[
+ j + c_dim1 * 7] + v8 * c__[j + (c_dim1 << 3)] + v9 * c__[
+ j + c_dim1 * 9] + v10 * c__[j + c_dim1 * 10];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + (c_dim1 << 1)] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + (c_dim1 << 2)] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+ c__[j + c_dim1 * 6] -= sum * t6;
+ c__[j + c_dim1 * 7] -= sum * t7;
+ c__[j + (c_dim1 << 3)] -= sum * t8;
+ c__[j + c_dim1 * 9] -= sum * t9;
+ c__[j + c_dim1 * 10] -= sum * t10;
+/* L400: */
+ }
+ goto L410;
+ }
+L410:
+ return 0;
+
+/* End of SLARFX */
+
+} /* slarfx_ */
diff --git a/contrib/libs/clapack/slargv.c b/contrib/libs/clapack/slargv.c
new file mode 100644
index 0000000000..32e0e332cc
--- /dev/null
+++ b/contrib/libs/clapack/slargv.c
@@ -0,0 +1,130 @@
+/* slargv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slargv_(integer *n, real *x, integer *incx, real *y,
+ integer *incy, real *c__, integer *incc)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real f, g;
+ integer i__;
+ real t;
+ integer ic, ix, iy;
+ real tt;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLARGV generates a vector of real plane rotations, determined by */
+/* elements of the real vectors x and y. For i = 1,2,...,n */
+
+/* ( c(i) s(i) ) ( x(i) ) = ( a(i) ) */
+/* ( -s(i) c(i) ) ( y(i) ) = ( 0 ) */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of plane rotations to be generated. */
+
+/* X (input/output) REAL array, */
+/* dimension (1+(N-1)*INCX) */
+/* On entry, the vector x. */
+/* On exit, x(i) is overwritten by a(i), for i = 1,...,n. */
+
+/* INCX (input) INTEGER */
+/* The increment between elements of X. INCX > 0. */
+
+/* Y (input/output) REAL array, */
+/* dimension (1+(N-1)*INCY) */
+/* On entry, the vector y. */
+/* On exit, the sines of the plane rotations. */
+
+/* INCY (input) INTEGER */
+/* The increment between elements of Y. INCY > 0. */
+
+/* C (output) REAL array, dimension (1+(N-1)*INCC) */
+/* The cosines of the plane rotations. */
+
+/* INCC (input) INTEGER */
+/* The increment between elements of C. INCC > 0. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --c__;
+ --y;
+ --x;
+
+ /* Function Body */
+ ix = 1;
+ iy = 1;
+ ic = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ f = x[ix];
+ g = y[iy];
+ if (g == 0.f) {
+ c__[ic] = 1.f;
+ } else if (f == 0.f) {
+ c__[ic] = 0.f;
+ y[iy] = 1.f;
+ x[ix] = g;
+ } else if (dabs(f) > dabs(g)) {
+ t = g / f;
+ tt = sqrt(t * t + 1.f);
+ c__[ic] = 1.f / tt;
+ y[iy] = t * c__[ic];
+ x[ix] = f * tt;
+ } else {
+ t = f / g;
+ tt = sqrt(t * t + 1.f);
+ y[iy] = 1.f / tt;
+ c__[ic] = t * y[iy];
+ x[ix] = g * tt;
+ }
+ ic += *incc;
+ iy += *incy;
+ ix += *incx;
+/* L10: */
+ }
+ return 0;
+
+/* End of SLARGV */
+
+} /* slargv_ */
diff --git a/contrib/libs/clapack/slarnv.c b/contrib/libs/clapack/slarnv.c
new file mode 100644
index 0000000000..9ea4b19033
--- /dev/null
+++ b/contrib/libs/clapack/slarnv.c
@@ -0,0 +1,146 @@
+/* slarnv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slarnv_(integer *idist, integer *iseed, integer *n, real
+ *x)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+
+ /* Builtin functions */
+ double log(doublereal), sqrt(doublereal), cos(doublereal);
+
+ /* Local variables */
+ integer i__;
+ real u[128];
+ integer il, iv, il2;
+ extern /* Subroutine */ int slaruv_(integer *, integer *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLARNV returns a vector of n random real numbers from a uniform or */
+/* normal distribution. */
+
+/* Arguments */
+/* ========= */
+
+/* IDIST (input) INTEGER */
+/* Specifies the distribution of the random numbers: */
+/* = 1: uniform (0,1) */
+/* = 2: uniform (-1,1) */
+/* = 3: normal (0,1) */
+
+/* ISEED (input/output) INTEGER array, dimension (4) */
+/* On entry, the seed of the random number generator; the array */
+/* elements must be between 0 and 4095, and ISEED(4) must be */
+/* odd. */
+/* On exit, the seed is updated. */
+
+/* N (input) INTEGER */
+/* The number of random numbers to be generated. */
+
+/* X (output) REAL array, dimension (N) */
+/* The generated random numbers. */
+
+/* Further Details */
+/* =============== */
+
+/* This routine calls the auxiliary routine SLARUV to generate random */
+/* real numbers from a uniform (0,1) distribution, in batches of up to */
+/* 128 using vectorisable code. The Box-Muller method is used to */
+/* transform numbers from a uniform to a normal distribution. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --x;
+ --iseed;
+
+ /* Function Body */
+ i__1 = *n;
+ for (iv = 1; iv <= i__1; iv += 64) {
+/* Computing MIN */
+ i__2 = 64, i__3 = *n - iv + 1;
+ il = min(i__2,i__3);
+ if (*idist == 3) {
+ il2 = il << 1;
+ } else {
+ il2 = il;
+ }
+
+/* Call SLARUV to generate IL2 numbers from a uniform (0,1) */
+/* distribution (IL2 <= LV) */
+
+ slaruv_(&iseed[1], &il2, u);
+
+ if (*idist == 1) {
+
+/* Copy generated numbers */
+
+ i__2 = il;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[iv + i__ - 1] = u[i__ - 1];
+/* L10: */
+ }
+ } else if (*idist == 2) {
+
+/* Convert generated numbers to uniform (-1,1) distribution */
+
+ i__2 = il;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[iv + i__ - 1] = u[i__ - 1] * 2.f - 1.f;
+/* L20: */
+ }
+ } else if (*idist == 3) {
+
+/* Convert generated numbers to normal (0,1) distribution */
+
+ i__2 = il;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[iv + i__ - 1] = sqrt(log(u[(i__ << 1) - 2]) * -2.f) * cos(u[
+ (i__ << 1) - 1] * 6.2831853071795864769252867663f);
+/* L30: */
+ }
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of SLARNV */
+
+} /* slarnv_ */
diff --git a/contrib/libs/clapack/slarra.c b/contrib/libs/clapack/slarra.c
new file mode 100644
index 0000000000..ecff65ac3b
--- /dev/null
+++ b/contrib/libs/clapack/slarra.c
@@ -0,0 +1,155 @@
+/* slarra.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slarra_(integer *n, real *d__, real *e, real *e2, real *
+ spltol, real *tnrm, integer *nsplit, integer *isplit, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ real tmp1, eabs;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Compute the splitting points with threshold SPLTOL. */
+/* SLARRA sets any "small" off-diagonal elements to zero. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N > 0. */
+
+/* D (input) REAL array, dimension (N) */
+/* On entry, the N diagonal elements of the tridiagonal */
+/* matrix T. */
+
+/* E (input/output) REAL array, dimension (N) */
+/* On entry, the first (N-1) entries contain the subdiagonal */
+/* elements of the tridiagonal matrix T; E(N) need not be set. */
+/* On exit, the entries E( ISPLIT( I ) ), 1 <= I <= NSPLIT, */
+/* are set to zero, the other entries of E are untouched. */
+
+/* E2 (input/output) REAL array, dimension (N) */
+/* On entry, the first (N-1) entries contain the SQUARES of the */
+/* subdiagonal elements of the tridiagonal matrix T; */
+/* E2(N) need not be set. */
+/* On exit, the entries E2( ISPLIT( I ) ), */
+/* 1 <= I <= NSPLIT, have been set to zero */
+
+/* SPLTOL (input) REAL */
+/* The threshold for splitting. Two criteria can be used: */
+/* SPLTOL<0 : criterion based on absolute off-diagonal value */
+/* SPLTOL>0 : criterion that preserves relative accuracy */
+
+/* TNRM (input) REAL */
+/* The norm of the matrix. */
+
+/* NSPLIT (output) INTEGER */
+/* The number of blocks T splits into. 1 <= NSPLIT <= N. */
+
+/* ISPLIT (output) INTEGER array, dimension (N) */
+/* The splitting points, at which T breaks up into blocks. */
+/* The first block consists of rows/columns 1 to ISPLIT(1), */
+/* the second of rows/columns ISPLIT(1)+1 through ISPLIT(2), */
+/* etc., and the NSPLIT-th consists of rows/columns */
+/* ISPLIT(NSPLIT-1)+1 through ISPLIT(NSPLIT)=N. */
+
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --isplit;
+ --e2;
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+/* Compute splitting points */
+ *nsplit = 1;
+ if (*spltol < 0.f) {
+/* Criterion based on absolute off-diagonal value */
+ tmp1 = dabs(*spltol) * *tnrm;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ eabs = (r__1 = e[i__], dabs(r__1));
+ if (eabs <= tmp1) {
+ e[i__] = 0.f;
+ e2[i__] = 0.f;
+ isplit[*nsplit] = i__;
+ ++(*nsplit);
+ }
+/* L9: */
+ }
+ } else {
+/* Criterion that guarantees relative accuracy */
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ eabs = (r__1 = e[i__], dabs(r__1));
+ if (eabs <= *spltol * sqrt((r__1 = d__[i__], dabs(r__1))) * sqrt((
+ r__2 = d__[i__ + 1], dabs(r__2)))) {
+ e[i__] = 0.f;
+ e2[i__] = 0.f;
+ isplit[*nsplit] = i__;
+ ++(*nsplit);
+ }
+/* L10: */
+ }
+ }
+ isplit[*nsplit] = *n;
+ return 0;
+
+/* End of SLARRA */
+
+} /* slarra_ */
diff --git a/contrib/libs/clapack/slarrb.c b/contrib/libs/clapack/slarrb.c
new file mode 100644
index 0000000000..f1606a0387
--- /dev/null
+++ b/contrib/libs/clapack/slarrb.c
@@ -0,0 +1,349 @@
+/* slarrb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slarrb_(integer *n, real *d__, real *lld, integer *
+ ifirst, integer *ilast, real *rtol1, real *rtol2, integer *offset,
+ real *w, real *wgap, real *werr, real *work, integer *iwork, real *
+ pivmin, real *spdiam, integer *twist, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double log(doublereal);
+
+ /* Local variables */
+ integer i__, k, r__, i1, ii, ip;
+ real gap, mid, tmp, back, lgap, rgap, left;
+ integer iter, nint, prev, next;
+ real cvrgd, right, width;
+ extern integer slaneg_(integer *, real *, real *, real *, real *, integer
+ *);
+ integer negcnt;
+ real mnwdth;
+ integer olnint, maxitr;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Given the relatively robust representation(RRR) L D L^T, SLARRB */
+/* does "limited" bisection to refine the eigenvalues of L D L^T, */
+/* W( IFIRST-OFFSET ) through W( ILAST-OFFSET ), to more accuracy. Initial */
+/* guesses for these eigenvalues are input in W, the corresponding estimate */
+/* of the error in these guesses and their gaps are input in WERR */
+/* and WGAP, respectively. During bisection, intervals */
+/* [left, right] are maintained by storing their mid-points and */
+/* semi-widths in the arrays W and WERR respectively. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. */
+
+/* D (input) REAL array, dimension (N) */
+/* The N diagonal elements of the diagonal matrix D. */
+
+/* LLD (input) REAL array, dimension (N-1) */
+/* The (N-1) elements L(i)*L(i)*D(i). */
+
+/* IFIRST (input) INTEGER */
+/* The index of the first eigenvalue to be computed. */
+
+/* ILAST (input) INTEGER */
+/* The index of the last eigenvalue to be computed. */
+
+/* RTOL1 (input) REAL */
+/* RTOL2 (input) REAL */
+/* Tolerance for the convergence of the bisection intervals. */
+/* An interval [LEFT,RIGHT] has converged if */
+/* RIGHT-LEFT.LT.MAX( RTOL1*GAP, RTOL2*MAX(|LEFT|,|RIGHT|) ) */
+/* where GAP is the (estimated) distance to the nearest */
+/* eigenvalue. */
+
+/* OFFSET (input) INTEGER */
+/* Offset for the arrays W, WGAP and WERR, i.e., the IFIRST-OFFSET */
+/* through ILAST-OFFSET elements of these arrays are to be used. */
+
+/* W (input/output) REAL array, dimension (N) */
+/* On input, W( IFIRST-OFFSET ) through W( ILAST-OFFSET ) are */
+/* estimates of the eigenvalues of L D L^T indexed IFIRST throug */
+/* ILAST. */
+/* On output, these estimates are refined. */
+
+/* WGAP (input/output) REAL array, dimension (N-1) */
+/* On input, the (estimated) gaps between consecutive */
+/* eigenvalues of L D L^T, i.e., WGAP(I-OFFSET) is the gap between */
+/* eigenvalues I and I+1. Note that if IFIRST.EQ.ILAST */
+/* then WGAP(IFIRST-OFFSET) must be set to ZERO. */
+/* On output, these gaps are refined. */
+
+/* WERR (input/output) REAL array, dimension (N) */
+/* On input, WERR( IFIRST-OFFSET ) through WERR( ILAST-OFFSET ) are */
+/* the errors in the estimates of the corresponding elements in W. */
+/* On output, these errors are refined. */
+
+/* WORK (workspace) REAL array, dimension (2*N) */
+/* Workspace. */
+
+/* IWORK (workspace) INTEGER array, dimension (2*N) */
+/* Workspace. */
+
+/* PIVMIN (input) DOUBLE PRECISION */
+/* The minimum pivot in the Sturm sequence. */
+
+/* SPDIAM (input) DOUBLE PRECISION */
+/* The spectral diameter of the matrix. */
+
+/* TWIST (input) INTEGER */
+/* The twist index for the twisted factorization that is used */
+/* for the negcount. */
+/* TWIST = N: Compute negcount from L D L^T - LAMBDA I = L+ D+ L+^T */
+/* TWIST = 1: Compute negcount from L D L^T - LAMBDA I = U- D- U-^T */
+/* TWIST = R: Compute negcount from L D L^T - LAMBDA I = N(r) D(r) N(r) */
+
+/* INFO (output) INTEGER */
+/* Error flag. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --iwork;
+ --work;
+ --werr;
+ --wgap;
+ --w;
+ --lld;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+
+ maxitr = (integer) ((log(*spdiam + *pivmin) - log(*pivmin)) / log(2.f)) +
+ 2;
+ mnwdth = *pivmin * 2.f;
+
+ r__ = *twist;
+ if (r__ < 1 || r__ > *n) {
+ r__ = *n;
+ }
+
+/* Initialize unconverged intervals in [ WORK(2*I-1), WORK(2*I) ]. */
+/* The Sturm Count, Count( WORK(2*I-1) ) is arranged to be I-1, while */
+/* Count( WORK(2*I) ) is stored in IWORK( 2*I ). The integer IWORK( 2*I-1 ) */
+/* for an unconverged interval is set to the index of the next unconverged */
+/* interval, and is -1 or 0 for a converged interval. Thus a linked */
+/* list of unconverged intervals is set up. */
+
+ i1 = *ifirst;
+/* The number of unconverged intervals */
+ nint = 0;
+/* The last unconverged interval found */
+ prev = 0;
+ rgap = wgap[i1 - *offset];
+ i__1 = *ilast;
+ for (i__ = i1; i__ <= i__1; ++i__) {
+ k = i__ << 1;
+ ii = i__ - *offset;
+ left = w[ii] - werr[ii];
+ right = w[ii] + werr[ii];
+ lgap = rgap;
+ rgap = wgap[ii];
+ gap = dmin(lgap,rgap);
+/* Make sure that [LEFT,RIGHT] contains the desired eigenvalue */
+/* Compute negcount from dstqds facto L+D+L+^T = L D L^T - LEFT */
+
+/* Do while( NEGCNT(LEFT).GT.I-1 ) */
+
+ back = werr[ii];
+L20:
+ negcnt = slaneg_(n, &d__[1], &lld[1], &left, pivmin, &r__);
+ if (negcnt > i__ - 1) {
+ left -= back;
+ back *= 2.f;
+ goto L20;
+ }
+
+/* Do while( NEGCNT(RIGHT).LT.I ) */
+/* Compute negcount from dstqds facto L+D+L+^T = L D L^T - RIGHT */
+
+ back = werr[ii];
+L50:
+ negcnt = slaneg_(n, &d__[1], &lld[1], &right, pivmin, &r__);
+ if (negcnt < i__) {
+ right += back;
+ back *= 2.f;
+ goto L50;
+ }
+ width = (r__1 = left - right, dabs(r__1)) * .5f;
+/* Computing MAX */
+ r__1 = dabs(left), r__2 = dabs(right);
+ tmp = dmax(r__1,r__2);
+/* Computing MAX */
+ r__1 = *rtol1 * gap, r__2 = *rtol2 * tmp;
+ cvrgd = dmax(r__1,r__2);
+ if (width <= cvrgd || width <= mnwdth) {
+/* This interval has already converged and does not need refinement. */
+/* (Note that the gaps might change through refining the */
+/* eigenvalues, however, they can only get bigger.) */
+/* Remove it from the list. */
+ iwork[k - 1] = -1;
+/* Make sure that I1 always points to the first unconverged interval */
+ if (i__ == i1 && i__ < *ilast) {
+ i1 = i__ + 1;
+ }
+ if (prev >= i1 && i__ <= *ilast) {
+ iwork[(prev << 1) - 1] = i__ + 1;
+ }
+ } else {
+/* unconverged interval found */
+ prev = i__;
+ ++nint;
+ iwork[k - 1] = i__ + 1;
+ iwork[k] = negcnt;
+ }
+ work[k - 1] = left;
+ work[k] = right;
+/* L75: */
+ }
+
+/* Do while( NINT.GT.0 ), i.e. there are still unconverged intervals */
+/* and while (ITER.LT.MAXITR) */
+
+ iter = 0;
+L80:
+ prev = i1 - 1;
+ i__ = i1;
+ olnint = nint;
+ i__1 = olnint;
+ for (ip = 1; ip <= i__1; ++ip) {
+ k = i__ << 1;
+ ii = i__ - *offset;
+ rgap = wgap[ii];
+ lgap = rgap;
+ if (ii > 1) {
+ lgap = wgap[ii - 1];
+ }
+ gap = dmin(lgap,rgap);
+ next = iwork[k - 1];
+ left = work[k - 1];
+ right = work[k];
+ mid = (left + right) * .5f;
+/* semiwidth of interval */
+ width = right - mid;
+/* Computing MAX */
+ r__1 = dabs(left), r__2 = dabs(right);
+ tmp = dmax(r__1,r__2);
+/* Computing MAX */
+ r__1 = *rtol1 * gap, r__2 = *rtol2 * tmp;
+ cvrgd = dmax(r__1,r__2);
+ if (width <= cvrgd || width <= mnwdth || iter == maxitr) {
+/* reduce number of unconverged intervals */
+ --nint;
+/* Mark interval as converged. */
+ iwork[k - 1] = 0;
+ if (i1 == i__) {
+ i1 = next;
+ } else {
+/* Prev holds the last unconverged interval previously examined */
+ if (prev >= i1) {
+ iwork[(prev << 1) - 1] = next;
+ }
+ }
+ i__ = next;
+ goto L100;
+ }
+ prev = i__;
+
+/* Perform one bisection step */
+
+ negcnt = slaneg_(n, &d__[1], &lld[1], &mid, pivmin, &r__);
+ if (negcnt <= i__ - 1) {
+ work[k - 1] = mid;
+ } else {
+ work[k] = mid;
+ }
+ i__ = next;
+L100:
+ ;
+ }
+ ++iter;
+/* do another loop if there are still unconverged intervals */
+/* However, in the last iteration, all intervals are accepted */
+/* since this is the best we can do. */
+ if (nint > 0 && iter <= maxitr) {
+ goto L80;
+ }
+
+
+/* At this point, all the intervals have converged */
+ i__1 = *ilast;
+ for (i__ = *ifirst; i__ <= i__1; ++i__) {
+ k = i__ << 1;
+ ii = i__ - *offset;
+/* All intervals marked by '0' have been refined. */
+ if (iwork[k - 1] == 0) {
+ w[ii] = (work[k - 1] + work[k]) * .5f;
+ werr[ii] = work[k] - w[ii];
+ }
+/* L110: */
+ }
+
+ i__1 = *ilast;
+ for (i__ = *ifirst + 1; i__ <= i__1; ++i__) {
+ k = i__ << 1;
+ ii = i__ - *offset;
+/* Computing MAX */
+ r__1 = 0.f, r__2 = w[ii] - werr[ii] - w[ii - 1] - werr[ii - 1];
+ wgap[ii - 1] = dmax(r__1,r__2);
+/* L111: */
+ }
+ return 0;
+
+/* End of SLARRB */
+
+} /* slarrb_ */
diff --git a/contrib/libs/clapack/slarrc.c b/contrib/libs/clapack/slarrc.c
new file mode 100644
index 0000000000..d2b7bec0a0
--- /dev/null
+++ b/contrib/libs/clapack/slarrc.c
@@ -0,0 +1,183 @@
+/* slarrc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slarrc_(char *jobt, integer *n, real *vl, real *vu, real
+ *d__, real *e, real *pivmin, integer *eigcnt, integer *lcnt, integer *
+ rcnt, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1;
+
+ /* Local variables */
+ integer i__;
+ real sl, su, tmp, tmp2;
+ logical matt;
+ extern logical lsame_(char *, char *);
+ real lpivot, rpivot;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Find the number of eigenvalues of the symmetric tridiagonal matrix T */
+/* that are in the interval (VL,VU] if JOBT = 'T', and of L D L^T */
+/* if JOBT = 'L'. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBT (input) CHARACTER*1 */
+/* = 'T': Compute Sturm count for matrix T. */
+/* = 'L': Compute Sturm count for matrix L D L^T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N > 0. */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* The lower and upper bounds for the eigenvalues. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* JOBT = 'T': The N diagonal elements of the tridiagonal matrix T. */
+/* JOBT = 'L': The N diagonal elements of the diagonal matrix D. */
+
+/* E (input) DOUBLE PRECISION array, dimension (N) */
+/* JOBT = 'T': The N-1 offdiagonal elements of the matrix T. */
+/* JOBT = 'L': The N-1 offdiagonal elements of the matrix L. */
+
+/* PIVMIN (input) DOUBLE PRECISION */
+/* The minimum pivot in the Sturm sequence for T. */
+
+/* EIGCNT (output) INTEGER */
+/* The number of eigenvalues of the symmetric tridiagonal matrix T */
+/* that are in the interval (VL,VU] */
+
+/* LCNT (output) INTEGER */
+/* RCNT (output) INTEGER */
+/* The left and right negcounts of the interval. */
+
+/* INFO (output) INTEGER */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ *lcnt = 0;
+ *rcnt = 0;
+ *eigcnt = 0;
+ matt = lsame_(jobt, "T");
+ if (matt) {
+/* Sturm sequence count on T */
+ lpivot = d__[1] - *vl;
+ rpivot = d__[1] - *vu;
+ if (lpivot <= 0.f) {
+ ++(*lcnt);
+ }
+ if (rpivot <= 0.f) {
+ ++(*rcnt);
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing 2nd power */
+ r__1 = e[i__];
+ tmp = r__1 * r__1;
+ lpivot = d__[i__ + 1] - *vl - tmp / lpivot;
+ rpivot = d__[i__ + 1] - *vu - tmp / rpivot;
+ if (lpivot <= 0.f) {
+ ++(*lcnt);
+ }
+ if (rpivot <= 0.f) {
+ ++(*rcnt);
+ }
+/* L10: */
+ }
+ } else {
+/* Sturm sequence count on L D L^T */
+ sl = -(*vl);
+ su = -(*vu);
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ lpivot = d__[i__] + sl;
+ rpivot = d__[i__] + su;
+ if (lpivot <= 0.f) {
+ ++(*lcnt);
+ }
+ if (rpivot <= 0.f) {
+ ++(*rcnt);
+ }
+ tmp = e[i__] * d__[i__] * e[i__];
+
+ tmp2 = tmp / lpivot;
+ if (tmp2 == 0.f) {
+ sl = tmp - *vl;
+ } else {
+ sl = sl * tmp2 - *vl;
+ }
+
+ tmp2 = tmp / rpivot;
+ if (tmp2 == 0.f) {
+ su = tmp - *vu;
+ } else {
+ su = su * tmp2 - *vu;
+ }
+/* L20: */
+ }
+ lpivot = d__[*n] + sl;
+ rpivot = d__[*n] + su;
+ if (lpivot <= 0.f) {
+ ++(*lcnt);
+ }
+ if (rpivot <= 0.f) {
+ ++(*rcnt);
+ }
+ }
+ *eigcnt = *rcnt - *lcnt;
+ return 0;
+
+/* end of SLARRC */
+
+} /* slarrc_ */
diff --git a/contrib/libs/clapack/slarrd.c b/contrib/libs/clapack/slarrd.c
new file mode 100644
index 0000000000..7670ff9edf
--- /dev/null
+++ b/contrib/libs/clapack/slarrd.c
@@ -0,0 +1,790 @@
+/* slarrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+static integer c__0 = 0;
+
+/* Subroutine */ int slarrd_(char *range, char *order, integer *n, real *vl,
+ real *vu, integer *il, integer *iu, real *gers, real *reltol, real *
+ d__, real *e, real *e2, real *pivmin, integer *nsplit, integer *
+ isplit, integer *m, real *w, real *werr, real *wl, real *wu, integer *
+ iblock, integer *indexw, real *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double log(doublereal);
+
+ /* Local variables */
+ integer i__, j, ib, ie, je, nb;
+ real gl;
+ integer im, in;
+ real gu;
+ integer iw, jee;
+ real eps;
+ integer nwl;
+ real wlu, wul;
+ integer nwu;
+ real tmp1, tmp2;
+ integer iend, jblk, ioff, iout, itmp1, itmp2, jdisc;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ real atoli;
+ integer iwoff, itmax;
+ real wkill, rtoli, uflow, tnorm;
+ integer ibegin, irange, idiscl;
+ extern doublereal slamch_(char *);
+ integer idumma[1];
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer idiscu;
+ extern /* Subroutine */ int slaebz_(integer *, integer *, integer *,
+ integer *, integer *, integer *, real *, real *, real *, real *,
+ real *, real *, integer *, real *, real *, integer *, integer *,
+ real *, integer *, integer *);
+ logical ncnvrg, toofew;
+
+
+/* -- LAPACK auxiliary routine (version 3.2.1) -- */
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+/* -- April 2009 -- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLARRD computes the eigenvalues of a symmetric tridiagonal */
+/* matrix T to suitable accuracy. This is an auxiliary code to be */
+/* called from SSTEMR. */
+/* The user may ask for all eigenvalues, all eigenvalues */
+/* in the half-open interval (VL, VU], or the IL-th through IU-th */
+/* eigenvalues. */
+
+/* To avoid overflow, the matrix must be scaled so that its */
+/* largest element is no greater than overflow**(1/2) * */
+/* underflow**(1/4) in absolute value, and for greatest */
+/* accuracy, it should not be much smaller than that. */
+
+/* See W. Kahan "Accurate Eigenvalues of a Symmetric Tridiagonal */
+/* Matrix", Report CS41, Computer Science Dept., Stanford */
+/* University, July 21, 1966. */
+
+/* Arguments */
+/* ========= */
+
+/* RANGE (input) CHARACTER */
+/* = 'A': ("All") all eigenvalues will be found. */
+/* = 'V': ("Value") all eigenvalues in the half-open interval */
+/* (VL, VU] will be found. */
+/* = 'I': ("Index") the IL-th through IU-th eigenvalues (of the */
+/* entire matrix) will be found. */
+
+/* ORDER (input) CHARACTER */
+/* = 'B': ("By Block") the eigenvalues will be grouped by */
+/* split-off block (see IBLOCK, ISPLIT) and */
+/* ordered from smallest to largest within */
+/* the block. */
+/* = 'E': ("Entire matrix") */
+/* the eigenvalues for the entire matrix */
+/* will be ordered from smallest to */
+/* largest. */
+
+/* N (input) INTEGER */
+/* The order of the tridiagonal matrix T. N >= 0. */
+
+/* VL (input) REAL */
+/* VU (input) REAL */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. Eigenvalues less than or equal */
+/* to VL, or greater than VU, will not be returned. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* GERS (input) REAL array, dimension (2*N) */
+/* The N Gerschgorin intervals (the i-th Gerschgorin interval */
+/* is (GERS(2*i-1), GERS(2*i)). */
+
+/* RELTOL (input) REAL */
+/* The minimum relative width of an interval. When an interval */
+/* is narrower than RELTOL times the larger (in */
+/* magnitude) endpoint, then it is considered to be */
+/* sufficiently small, i.e., converged. Note: this should */
+/* always be at least radix*machine epsilon. */
+
+/* D (input) REAL array, dimension (N) */
+/* The n diagonal elements of the tridiagonal matrix T. */
+
+/* E (input) REAL array, dimension (N-1) */
+/* The (n-1) off-diagonal elements of the tridiagonal matrix T. */
+
+/* E2 (input) REAL array, dimension (N-1) */
+/* The (n-1) squared off-diagonal elements of the tridiagonal matrix T. */
+
+/* PIVMIN (input) REAL */
+/* The minimum pivot allowed in the Sturm sequence for T. */
+
+/* NSPLIT (input) INTEGER */
+/* The number of diagonal blocks in the matrix T. */
+/* 1 <= NSPLIT <= N. */
+
+/* ISPLIT (input) INTEGER array, dimension (N) */
+/* The splitting points, at which T breaks up into submatrices. */
+/* The first submatrix consists of rows/columns 1 to ISPLIT(1), */
+/* the second of rows/columns ISPLIT(1)+1 through ISPLIT(2), */
+/* etc., and the NSPLIT-th consists of rows/columns */
+/* ISPLIT(NSPLIT-1)+1 through ISPLIT(NSPLIT)=N. */
+/* (Only the first NSPLIT elements will actually be used, but */
+/* since the user cannot know a priori what value NSPLIT will */
+/* have, N words must be reserved for ISPLIT.) */
+
+/* M (output) INTEGER */
+/* The actual number of eigenvalues found. 0 <= M <= N. */
+/* (See also the description of INFO=2,3.) */
+
+/* W (output) REAL array, dimension (N) */
+/* On exit, the first M elements of W will contain the */
+/* eigenvalue approximations. SLARRD computes an interval */
+/* I_j = (a_j, b_j] that includes eigenvalue j. The eigenvalue */
+/* approximation is given as the interval midpoint */
+/* W(j)= ( a_j + b_j)/2. The corresponding error is bounded by */
+/* WERR(j) = abs( a_j - b_j)/2 */
+
+/* WERR (output) REAL array, dimension (N) */
+/* The error bound on the corresponding eigenvalue approximation */
+/* in W. */
+
+/* WL (output) REAL */
+/* WU (output) REAL */
+/* The interval (WL, WU] contains all the wanted eigenvalues. */
+/* If RANGE='V', then WL=VL and WU=VU. */
+/* If RANGE='A', then WL and WU are the global Gerschgorin bounds */
+/* on the spectrum. */
+/* If RANGE='I', then WL and WU are computed by SLAEBZ from the */
+/* index range specified. */
+
+/* IBLOCK (output) INTEGER array, dimension (N) */
+/* At each row/column j where E(j) is zero or small, the */
+/* matrix T is considered to split into a block diagonal */
+/* matrix. On exit, if INFO = 0, IBLOCK(i) specifies to which */
+/* block (from 1 to the number of blocks) the eigenvalue W(i) */
+/* belongs. (SLARRD may use the remaining N-M elements as */
+/* workspace.) */
+
+/* INDEXW (output) INTEGER array, dimension (N) */
+/* The indices of the eigenvalues within each block (submatrix); */
+/* for example, INDEXW(i)= j and IBLOCK(i)=k imply that the */
+/* i-th eigenvalue W(i) is the j-th eigenvalue in block k. */
+
+/* WORK (workspace) REAL array, dimension (4*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (3*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: some or all of the eigenvalues failed to converge or */
+/* were not computed: */
+/* =1 or 3: Bisection failed to converge for some */
+/* eigenvalues; these eigenvalues are flagged by a */
+/* negative block number. The effect is that the */
+/* eigenvalues may not be as accurate as the */
+/* absolute and relative tolerances. This is */
+/* generally caused by unexpectedly inaccurate */
+/* arithmetic. */
+/* =2 or 3: RANGE='I' only: Not all of the eigenvalues */
+/* IL:IU were found. */
+/* Effect: M < IU+1-IL */
+/* Cause: non-monotonic arithmetic, causing the */
+/* Sturm sequence to be non-monotonic. */
+/* Cure: recalculate, using RANGE='A', and pick */
+/* out eigenvalues IL:IU. In some cases, */
+/* increasing the PARAMETER "FUDGE" may */
+/* make things work. */
+/* = 4: RANGE='I', and the Gershgorin interval */
+/* initially used was too small. No eigenvalues */
+/* were computed. */
+/* Probable cause: your machine has sloppy */
+/* floating-point arithmetic. */
+/* Cure: Increase the PARAMETER "FUDGE", */
+/* recompile, and try again. */
+
+/* Internal Parameters */
+/* =================== */
+
+/* FUDGE REAL , default = 2 */
+/* A "fudge factor" to widen the Gershgorin intervals. Ideally, */
+/* a value of 1 should work, but on machines with sloppy */
+/* arithmetic, this needs to be larger. The default for */
+/* publicly released versions should be large enough to handle */
+/* the worst machine around. Note that this has no effect */
+/* on accuracy of the solution. */
+
+/* Based on contributions by */
+/* W. Kahan, University of California, Berkeley, USA */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --iwork;
+ --work;
+ --indexw;
+ --iblock;
+ --werr;
+ --w;
+ --isplit;
+ --e2;
+ --e;
+ --d__;
+ --gers;
+
+ /* Function Body */
+ *info = 0;
+
+/* Decode RANGE */
+
+ if (lsame_(range, "A")) {
+ irange = 1;
+ } else if (lsame_(range, "V")) {
+ irange = 2;
+ } else if (lsame_(range, "I")) {
+ irange = 3;
+ } else {
+ irange = 0;
+ }
+
+/* Check for Errors */
+
+ if (irange <= 0) {
+ *info = -1;
+ } else if (! (lsame_(order, "B") || lsame_(order,
+ "E"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (irange == 2) {
+ if (*vl >= *vu) {
+ *info = -5;
+ }
+ } else if (irange == 3 && (*il < 1 || *il > max(1,*n))) {
+ *info = -6;
+ } else if (irange == 3 && (*iu < min(*n,*il) || *iu > *n)) {
+ *info = -7;
+ }
+
+ if (*info != 0) {
+ return 0;
+ }
+/* Initialize error flags */
+ *info = 0;
+ ncnvrg = FALSE_;
+ toofew = FALSE_;
+/* Quick return if possible */
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+/* Simplification: */
+ if (irange == 3 && *il == 1 && *iu == *n) {
+ irange = 1;
+ }
+/* Get machine constants */
+ eps = slamch_("P");
+ uflow = slamch_("U");
+/* Special Case when N=1 */
+/* Treat case of 1x1 matrix for quick return */
+ if (*n == 1) {
+ if (irange == 1 || irange == 2 && d__[1] > *vl && d__[1] <= *vu ||
+ irange == 3 && *il == 1 && *iu == 1) {
+ *m = 1;
+ w[1] = d__[1];
+/* The computation error of the eigenvalue is zero */
+ werr[1] = 0.f;
+ iblock[1] = 1;
+ indexw[1] = 1;
+ }
+ return 0;
+ }
+/* NB is the minimum vector length for vector bisection, or 0 */
+/* if only scalar is to be done. */
+ nb = ilaenv_(&c__1, "SSTEBZ", " ", n, &c_n1, &c_n1, &c_n1);
+ if (nb <= 1) {
+ nb = 0;
+ }
+/* Find global spectral radius */
+ gl = d__[1];
+ gu = d__[1];
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MIN */
+ r__1 = gl, r__2 = gers[(i__ << 1) - 1];
+ gl = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = gu, r__2 = gers[i__ * 2];
+ gu = dmax(r__1,r__2);
+/* L5: */
+ }
+/* Compute global Gerschgorin bounds and spectral diameter */
+/* Computing MAX */
+ r__1 = dabs(gl), r__2 = dabs(gu);
+ tnorm = dmax(r__1,r__2);
+ gl = gl - tnorm * 2.f * eps * *n - *pivmin * 4.f;
+ gu = gu + tnorm * 2.f * eps * *n + *pivmin * 4.f;
+/* [JAN/28/2009] remove the line below since SPDIAM variable not use */
+/* SPDIAM = GU - GL */
+/* Input arguments for SLAEBZ: */
+/* The relative tolerance. An interval (a,b] lies within */
+/* "relative tolerance" if b-a < RELTOL*max(|a|,|b|), */
+ rtoli = *reltol;
+/* Set the absolute tolerance for interval convergence to zero to force */
+/* interval convergence based on relative size of the interval. */
+/* This is dangerous because intervals might not converge when RELTOL is */
+/* small. But at least a very small number should be selected so that for */
+/* strongly graded matrices, the code can get relatively accurate */
+/* eigenvalues. */
+ atoli = uflow * 4.f + *pivmin * 4.f;
+ if (irange == 3) {
+/* RANGE='I': Compute an interval containing eigenvalues */
+/* IL through IU. The initial interval [GL,GU] from the global */
+/* Gerschgorin bounds GL and GU is refined by SLAEBZ. */
+ itmax = (integer) ((log(tnorm + *pivmin) - log(*pivmin)) / log(2.f))
+ + 2;
+ work[*n + 1] = gl;
+ work[*n + 2] = gl;
+ work[*n + 3] = gu;
+ work[*n + 4] = gu;
+ work[*n + 5] = gl;
+ work[*n + 6] = gu;
+ iwork[1] = -1;
+ iwork[2] = -1;
+ iwork[3] = *n + 1;
+ iwork[4] = *n + 1;
+ iwork[5] = *il - 1;
+ iwork[6] = *iu;
+
+ slaebz_(&c__3, &itmax, n, &c__2, &c__2, &nb, &atoli, &rtoli, pivmin, &
+ d__[1], &e[1], &e2[1], &iwork[5], &work[*n + 1], &work[*n + 5]
+, &iout, &iwork[1], &w[1], &iblock[1], &iinfo);
+ if (iinfo != 0) {
+ *info = iinfo;
+ return 0;
+ }
+/* On exit, output intervals may not be ordered by ascending negcount */
+ if (iwork[6] == *iu) {
+ *wl = work[*n + 1];
+ wlu = work[*n + 3];
+ nwl = iwork[1];
+ *wu = work[*n + 4];
+ wul = work[*n + 2];
+ nwu = iwork[4];
+ } else {
+ *wl = work[*n + 2];
+ wlu = work[*n + 4];
+ nwl = iwork[2];
+ *wu = work[*n + 3];
+ wul = work[*n + 1];
+ nwu = iwork[3];
+ }
+/* On exit, the interval [WL, WLU] contains a value with negcount NWL, */
+/* and [WUL, WU] contains a value with negcount NWU. */
+ if (nwl < 0 || nwl >= *n || nwu < 1 || nwu > *n) {
+ *info = 4;
+ return 0;
+ }
+ } else if (irange == 2) {
+ *wl = *vl;
+ *wu = *vu;
+ } else if (irange == 1) {
+ *wl = gl;
+ *wu = gu;
+ }
+/* Find Eigenvalues -- Loop Over blocks and recompute NWL and NWU. */
+/* NWL accumulates the number of eigenvalues .le. WL, */
+/* NWU accumulates the number of eigenvalues .le. WU */
+ *m = 0;
+ iend = 0;
+ *info = 0;
+ nwl = 0;
+ nwu = 0;
+
+ i__1 = *nsplit;
+ for (jblk = 1; jblk <= i__1; ++jblk) {
+ ioff = iend;
+ ibegin = ioff + 1;
+ iend = isplit[jblk];
+ in = iend - ioff;
+
+ if (in == 1) {
+/* 1x1 block */
+ if (*wl >= d__[ibegin] - *pivmin) {
+ ++nwl;
+ }
+ if (*wu >= d__[ibegin] - *pivmin) {
+ ++nwu;
+ }
+ if (irange == 1 || *wl < d__[ibegin] - *pivmin && *wu >= d__[
+ ibegin] - *pivmin) {
+ ++(*m);
+ w[*m] = d__[ibegin];
+ werr[*m] = 0.f;
+/* The gap for a single block doesn't matter for the later */
+/* algorithm and is assigned an arbitrary large value */
+ iblock[*m] = jblk;
+ indexw[*m] = 1;
+ }
+/* Disabled 2x2 case because of a failure on the following matrix */
+/* RANGE = 'I', IL = IU = 4 */
+/* Original Tridiagonal, d = [ */
+/* -0.150102010615740E+00 */
+/* -0.849897989384260E+00 */
+/* -0.128208148052635E-15 */
+/* 0.128257718286320E-15 */
+/* ]; */
+/* e = [ */
+/* -0.357171383266986E+00 */
+/* -0.180411241501588E-15 */
+/* -0.175152352710251E-15 */
+/* ]; */
+
+/* ELSE IF( IN.EQ.2 ) THEN */
+/* * 2x2 block */
+/* DISC = SQRT( (HALF*(D(IBEGIN)-D(IEND)))**2 + E(IBEGIN)**2 ) */
+/* TMP1 = HALF*(D(IBEGIN)+D(IEND)) */
+/* L1 = TMP1 - DISC */
+/* IF( WL.GE. L1-PIVMIN ) */
+/* $ NWL = NWL + 1 */
+/* IF( WU.GE. L1-PIVMIN ) */
+/* $ NWU = NWU + 1 */
+/* IF( IRANGE.EQ.ALLRNG .OR. ( WL.LT.L1-PIVMIN .AND. WU.GE. */
+/* $ L1-PIVMIN ) ) THEN */
+/* M = M + 1 */
+/* W( M ) = L1 */
+/* * The uncertainty of eigenvalues of a 2x2 matrix is very small */
+/* WERR( M ) = EPS * ABS( W( M ) ) * TWO */
+/* IBLOCK( M ) = JBLK */
+/* INDEXW( M ) = 1 */
+/* ENDIF */
+/* L2 = TMP1 + DISC */
+/* IF( WL.GE. L2-PIVMIN ) */
+/* $ NWL = NWL + 1 */
+/* IF( WU.GE. L2-PIVMIN ) */
+/* $ NWU = NWU + 1 */
+/* IF( IRANGE.EQ.ALLRNG .OR. ( WL.LT.L2-PIVMIN .AND. WU.GE. */
+/* $ L2-PIVMIN ) ) THEN */
+/* M = M + 1 */
+/* W( M ) = L2 */
+/* * The uncertainty of eigenvalues of a 2x2 matrix is very small */
+/* WERR( M ) = EPS * ABS( W( M ) ) * TWO */
+/* IBLOCK( M ) = JBLK */
+/* INDEXW( M ) = 2 */
+/* ENDIF */
+ } else {
+/* General Case - block of size IN >= 2 */
+/* Compute local Gerschgorin interval and use it as the initial */
+/* interval for SLAEBZ */
+ gu = d__[ibegin];
+ gl = d__[ibegin];
+ tmp1 = 0.f;
+ i__2 = iend;
+ for (j = ibegin; j <= i__2; ++j) {
+/* Computing MIN */
+ r__1 = gl, r__2 = gers[(j << 1) - 1];
+ gl = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = gu, r__2 = gers[j * 2];
+ gu = dmax(r__1,r__2);
+/* L40: */
+ }
+/* [JAN/28/2009] */
+/* change SPDIAM by TNORM in lines 2 and 3 thereafter */
+/* line 1: remove computation of SPDIAM (not useful anymore) */
+/* SPDIAM = GU - GL */
+/* GL = GL - FUDGE*SPDIAM*EPS*IN - FUDGE*PIVMIN */
+/* GU = GU + FUDGE*SPDIAM*EPS*IN + FUDGE*PIVMIN */
+ gl = gl - tnorm * 2.f * eps * in - *pivmin * 2.f;
+ gu = gu + tnorm * 2.f * eps * in + *pivmin * 2.f;
+
+ if (irange > 1) {
+ if (gu < *wl) {
+/* the local block contains none of the wanted eigenvalues */
+ nwl += in;
+ nwu += in;
+ goto L70;
+ }
+/* refine search interval if possible, only range (WL,WU] matters */
+ gl = dmax(gl,*wl);
+ gu = dmin(gu,*wu);
+ if (gl >= gu) {
+ goto L70;
+ }
+ }
+/* Find negcount of initial interval boundaries GL and GU */
+ work[*n + 1] = gl;
+ work[*n + in + 1] = gu;
+ slaebz_(&c__1, &c__0, &in, &in, &c__1, &nb, &atoli, &rtoli,
+ pivmin, &d__[ibegin], &e[ibegin], &e2[ibegin], idumma, &
+ work[*n + 1], &work[*n + (in << 1) + 1], &im, &iwork[1], &
+ w[*m + 1], &iblock[*m + 1], &iinfo);
+ if (iinfo != 0) {
+ *info = iinfo;
+ return 0;
+ }
+
+ nwl += iwork[1];
+ nwu += iwork[in + 1];
+ iwoff = *m - iwork[1];
+/* Compute Eigenvalues */
+ itmax = (integer) ((log(gu - gl + *pivmin) - log(*pivmin)) / log(
+ 2.f)) + 2;
+ slaebz_(&c__2, &itmax, &in, &in, &c__1, &nb, &atoli, &rtoli,
+ pivmin, &d__[ibegin], &e[ibegin], &e2[ibegin], idumma, &
+ work[*n + 1], &work[*n + (in << 1) + 1], &iout, &iwork[1],
+ &w[*m + 1], &iblock[*m + 1], &iinfo);
+ if (iinfo != 0) {
+ *info = iinfo;
+ return 0;
+ }
+
+/* Copy eigenvalues into W and IBLOCK */
+/* Use -JBLK for block number for unconverged eigenvalues. */
+/* Loop over the number of output intervals from SLAEBZ */
+ i__2 = iout;
+ for (j = 1; j <= i__2; ++j) {
+/* eigenvalue approximation is middle point of interval */
+ tmp1 = (work[j + *n] + work[j + in + *n]) * .5f;
+/* semi length of error interval */
+ tmp2 = (r__1 = work[j + *n] - work[j + in + *n], dabs(r__1)) *
+ .5f;
+ if (j > iout - iinfo) {
+/* Flag non-convergence. */
+ ncnvrg = TRUE_;
+ ib = -jblk;
+ } else {
+ ib = jblk;
+ }
+ i__3 = iwork[j + in] + iwoff;
+ for (je = iwork[j] + 1 + iwoff; je <= i__3; ++je) {
+ w[je] = tmp1;
+ werr[je] = tmp2;
+ indexw[je] = je - iwoff;
+ iblock[je] = ib;
+/* L50: */
+ }
+/* L60: */
+ }
+
+ *m += im;
+ }
+L70:
+ ;
+ }
+/* If RANGE='I', then (WL,WU) contains eigenvalues NWL+1,...,NWU */
+/* If NWL+1 < IL or NWU > IU, discard extra eigenvalues. */
+ if (irange == 3) {
+ idiscl = *il - 1 - nwl;
+ idiscu = nwu - *iu;
+
+ if (idiscl > 0) {
+ im = 0;
+ i__1 = *m;
+ for (je = 1; je <= i__1; ++je) {
+/* Remove some of the smallest eigenvalues from the left so that */
+/* at the end IDISCL =0. Move all eigenvalues up to the left. */
+ if (w[je] <= wlu && idiscl > 0) {
+ --idiscl;
+ } else {
+ ++im;
+ w[im] = w[je];
+ werr[im] = werr[je];
+ indexw[im] = indexw[je];
+ iblock[im] = iblock[je];
+ }
+/* L80: */
+ }
+ *m = im;
+ }
+ if (idiscu > 0) {
+/* Remove some of the largest eigenvalues from the right so that */
+/* at the end IDISCU =0. Move all eigenvalues up to the left. */
+ im = *m + 1;
+ for (je = *m; je >= 1; --je) {
+ if (w[je] >= wul && idiscu > 0) {
+ --idiscu;
+ } else {
+ --im;
+ w[im] = w[je];
+ werr[im] = werr[je];
+ indexw[im] = indexw[je];
+ iblock[im] = iblock[je];
+ }
+/* L81: */
+ }
+ jee = 0;
+ i__1 = *m;
+ for (je = im; je <= i__1; ++je) {
+ ++jee;
+ w[jee] = w[je];
+ werr[jee] = werr[je];
+ indexw[jee] = indexw[je];
+ iblock[jee] = iblock[je];
+/* L82: */
+ }
+ *m = *m - im + 1;
+ }
+ if (idiscl > 0 || idiscu > 0) {
+/* Code to deal with effects of bad arithmetic. (If N(w) is */
+/* monotone non-decreasing, this should never happen.) */
+/* Some low eigenvalues to be discarded are not in (WL,WLU], */
+/* or high eigenvalues to be discarded are not in (WUL,WU] */
+/* so just kill off the smallest IDISCL/largest IDISCU */
+/* eigenvalues, by marking the corresponding IBLOCK = 0 */
+ if (idiscl > 0) {
+ wkill = *wu;
+ i__1 = idiscl;
+ for (jdisc = 1; jdisc <= i__1; ++jdisc) {
+ iw = 0;
+ i__2 = *m;
+ for (je = 1; je <= i__2; ++je) {
+ if (iblock[je] != 0 && (w[je] < wkill || iw == 0)) {
+ iw = je;
+ wkill = w[je];
+ }
+/* L90: */
+ }
+ iblock[iw] = 0;
+/* L100: */
+ }
+ }
+ if (idiscu > 0) {
+ wkill = *wl;
+ i__1 = idiscu;
+ for (jdisc = 1; jdisc <= i__1; ++jdisc) {
+ iw = 0;
+ i__2 = *m;
+ for (je = 1; je <= i__2; ++je) {
+ if (iblock[je] != 0 && (w[je] >= wkill || iw == 0)) {
+ iw = je;
+ wkill = w[je];
+ }
+/* L110: */
+ }
+ iblock[iw] = 0;
+/* L120: */
+ }
+ }
+/* Now erase all eigenvalues with IBLOCK set to zero */
+ im = 0;
+ i__1 = *m;
+ for (je = 1; je <= i__1; ++je) {
+ if (iblock[je] != 0) {
+ ++im;
+ w[im] = w[je];
+ werr[im] = werr[je];
+ indexw[im] = indexw[je];
+ iblock[im] = iblock[je];
+ }
+/* L130: */
+ }
+ *m = im;
+ }
+ if (idiscl < 0 || idiscu < 0) {
+ toofew = TRUE_;
+ }
+ }
+
+ if (irange == 1 && *m != *n || irange == 3 && *m != *iu - *il + 1) {
+ toofew = TRUE_;
+ }
+/* If ORDER='B', do nothing the eigenvalues are already sorted by */
+/* block. */
+/* If ORDER='E', sort the eigenvalues from smallest to largest */
+ if (lsame_(order, "E") && *nsplit > 1) {
+ i__1 = *m - 1;
+ for (je = 1; je <= i__1; ++je) {
+ ie = 0;
+ tmp1 = w[je];
+ i__2 = *m;
+ for (j = je + 1; j <= i__2; ++j) {
+ if (w[j] < tmp1) {
+ ie = j;
+ tmp1 = w[j];
+ }
+/* L140: */
+ }
+ if (ie != 0) {
+ tmp2 = werr[ie];
+ itmp1 = iblock[ie];
+ itmp2 = indexw[ie];
+ w[ie] = w[je];
+ werr[ie] = werr[je];
+ iblock[ie] = iblock[je];
+ indexw[ie] = indexw[je];
+ w[je] = tmp1;
+ werr[je] = tmp2;
+ iblock[je] = itmp1;
+ indexw[je] = itmp2;
+ }
+/* L150: */
+ }
+ }
+
+ *info = 0;
+ if (ncnvrg) {
+ ++(*info);
+ }
+ if (toofew) {
+ *info += 2;
+ }
+ return 0;
+
+/* End of SLARRD */
+
+} /* slarrd_ */
diff --git a/contrib/libs/clapack/slarre.c b/contrib/libs/clapack/slarre.c
new file mode 100644
index 0000000000..0fd7a29192
--- /dev/null
+++ b/contrib/libs/clapack/slarre.c
@@ -0,0 +1,857 @@
+/* slarre.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__2 = 2;
+
+/* Subroutine */ int slarre_(char *range, integer *n, real *vl, real *vu,
+ integer *il, integer *iu, real *d__, real *e, real *e2, real *rtol1,
+ real *rtol2, real *spltol, integer *nsplit, integer *isplit, integer *
+ m, real *w, real *werr, real *wgap, integer *iblock, integer *indexw,
+ real *gers, real *pivmin, real *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ real r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal), log(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ real s1, s2;
+ integer mb;
+ real gl;
+ integer in, mm;
+ real gu;
+ integer cnt;
+ real eps, tau, tmp, rtl;
+ integer cnt1, cnt2;
+ real tmp1, eabs;
+ integer iend, jblk;
+ real eold;
+ integer indl;
+ real dmax__, emax;
+ integer wend, idum, indu;
+ real rtol;
+ integer iseed[4];
+ real avgap, sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical norep;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), slasq2_(integer *, real *, integer *);
+ integer ibegin;
+ logical forceb;
+ integer irange;
+ real sgndef;
+ extern doublereal slamch_(char *);
+ integer wbegin;
+ real safmin, spdiam;
+ extern /* Subroutine */ int slarra_(integer *, real *, real *, real *,
+ real *, real *, integer *, integer *, integer *);
+ logical usedqd;
+ real clwdth, isleft;
+ extern /* Subroutine */ int slarrb_(integer *, real *, real *, integer *,
+ integer *, real *, real *, integer *, real *, real *, real *,
+ real *, integer *, real *, real *, integer *, integer *), slarrc_(
+ char *, integer *, real *, real *, real *, real *, real *,
+ integer *, integer *, integer *, integer *), slarrd_(char
+ *, char *, integer *, real *, real *, integer *, integer *, real *
+, real *, real *, real *, real *, real *, integer *, integer *,
+ integer *, real *, real *, real *, real *, integer *, integer *,
+ real *, integer *, integer *), slarrk_(integer *,
+ integer *, real *, real *, real *, real *, real *, real *, real *,
+ real *, integer *);
+ real isrght, bsrtol, dpivot;
+ extern /* Subroutine */ int slarnv_(integer *, integer *, integer *, real
+ *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* To find the desired eigenvalues of a given real symmetric */
+/* tridiagonal matrix T, SLARRE sets any "small" off-diagonal */
+/* elements to zero, and for each unreduced block T_i, it finds */
+/* (a) a suitable shift at one end of the block's spectrum, */
+/* (b) the base representation, T_i - sigma_i I = L_i D_i L_i^T, and */
+/* (c) eigenvalues of each L_i D_i L_i^T. */
+/* The representations and eigenvalues found are then used by */
+/* SSTEMR to compute the eigenvectors of T. */
+/* The accuracy varies depending on whether bisection is used to */
+/* find a few eigenvalues or the dqds algorithm (subroutine SLASQ2) to */
+/* conpute all and then discard any unwanted one. */
+/* As an added benefit, SLARRE also outputs the n */
+/* Gerschgorin intervals for the matrices L_i D_i L_i^T. */
+
+/* Arguments */
+/* ========= */
+
+/* RANGE (input) CHARACTER */
+/* = 'A': ("All") all eigenvalues will be found. */
+/* = 'V': ("Value") all eigenvalues in the half-open interval */
+/* (VL, VU] will be found. */
+/* = 'I': ("Index") the IL-th through IU-th eigenvalues (of the */
+/* entire matrix) will be found. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N > 0. */
+
+/* VL (input/output) REAL */
+/* VU (input/output) REAL */
+/* If RANGE='V', the lower and upper bounds for the eigenvalues. */
+/* Eigenvalues less than or equal to VL, or greater than VU, */
+/* will not be returned. VL < VU. */
+/* If RANGE='I' or ='A', SLARRE computes bounds on the desired */
+/* part of the spectrum. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the N diagonal elements of the tridiagonal */
+/* matrix T. */
+/* On exit, the N diagonal elements of the diagonal */
+/* matrices D_i. */
+
+/* E (input/output) REAL array, dimension (N) */
+/* On entry, the first (N-1) entries contain the subdiagonal */
+/* elements of the tridiagonal matrix T; E(N) need not be set. */
+/* On exit, E contains the subdiagonal elements of the unit */
+/* bidiagonal matrices L_i. The entries E( ISPLIT( I ) ), */
+/* 1 <= I <= NSPLIT, contain the base points sigma_i on output. */
+
+/* E2 (input/output) REAL array, dimension (N) */
+/* On entry, the first (N-1) entries contain the SQUARES of the */
+/* subdiagonal elements of the tridiagonal matrix T; */
+/* E2(N) need not be set. */
+/* On exit, the entries E2( ISPLIT( I ) ), */
+/* 1 <= I <= NSPLIT, have been set to zero */
+
+/* RTOL1 (input) REAL */
+/* RTOL2 (input) REAL */
+/* Parameters for bisection. */
+/* An interval [LEFT,RIGHT] has converged if */
+/* RIGHT-LEFT.LT.MAX( RTOL1*GAP, RTOL2*MAX(|LEFT|,|RIGHT|) ) */
+
+/* SPLTOL (input) REAL */
+/* The threshold for splitting. */
+
+/* NSPLIT (output) INTEGER */
+/* The number of blocks T splits into. 1 <= NSPLIT <= N. */
+
+/* ISPLIT (output) INTEGER array, dimension (N) */
+/* The splitting points, at which T breaks up into blocks. */
+/* The first block consists of rows/columns 1 to ISPLIT(1), */
+/* the second of rows/columns ISPLIT(1)+1 through ISPLIT(2), */
+/* etc., and the NSPLIT-th consists of rows/columns */
+/* ISPLIT(NSPLIT-1)+1 through ISPLIT(NSPLIT)=N. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues (of all L_i D_i L_i^T) */
+/* found. */
+
+/* W (output) REAL array, dimension (N) */
+/* The first M elements contain the eigenvalues. The */
+/* eigenvalues of each of the blocks, L_i D_i L_i^T, are */
+/* sorted in ascending order ( SLARRE may use the */
+/* remaining N-M elements as workspace). */
+
+/* WERR (output) REAL array, dimension (N) */
+/* The error bound on the corresponding eigenvalue in W. */
+
+/* WGAP (output) REAL array, dimension (N) */
+/* The separation from the right neighbor eigenvalue in W. */
+/* The gap is only with respect to the eigenvalues of the same block */
+/* as each block has its own representation tree. */
+/* Exception: at the right end of a block we store the left gap */
+
+/* IBLOCK (output) INTEGER array, dimension (N) */
+/* The indices of the blocks (submatrices) associated with the */
+/* corresponding eigenvalues in W; IBLOCK(i)=1 if eigenvalue */
+/* W(i) belongs to the first block from the top, =2 if W(i) */
+/* belongs to the second block, etc. */
+
+/* INDEXW (output) INTEGER array, dimension (N) */
+/* The indices of the eigenvalues within each block (submatrix); */
+/* for example, INDEXW(i)= 10 and IBLOCK(i)=2 imply that the */
+/* i-th eigenvalue W(i) is the 10-th eigenvalue in block 2 */
+
+/* GERS (output) REAL array, dimension (2*N) */
+/* The N Gerschgorin intervals (the i-th Gerschgorin interval */
+/* is (GERS(2*i-1), GERS(2*i)). */
+
+/* PIVMIN (output) DOUBLE PRECISION */
+/* The minimum pivot in the Sturm sequence for T. */
+
+/* WORK (workspace) REAL array, dimension (6*N) */
+/* Workspace. */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+/* Workspace. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* > 0: A problem occured in SLARRE. */
+/* < 0: One of the called subroutines signaled an internal problem. */
+/* Needs inspection of the corresponding parameter IINFO */
+/* for further information. */
+
+/* =-1: Problem in SLARRD. */
+/* = 2: No base representation could be found in MAXTRY iterations. */
+/* Increasing MAXTRY and recompilation might be a remedy. */
+/* =-3: Problem in SLARRB when computing the refined root */
+/* representation for SLASQ2. */
+/* =-4: Problem in SLARRB when preforming bisection on the */
+/* desired part of the spectrum. */
+/* =-5: Problem in SLASQ2. */
+/* =-6: Problem in SLASQ2. */
+
+/* Further Details */
+/* The base representations are required to suffer very little */
+/* element growth and consequently define all their eigenvalues to */
+/* high relative accuracy. */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --iwork;
+ --work;
+ --gers;
+ --indexw;
+ --iblock;
+ --wgap;
+ --werr;
+ --w;
+ --isplit;
+ --e2;
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+
+/* Decode RANGE */
+
+ if (lsame_(range, "A")) {
+ irange = 1;
+ } else if (lsame_(range, "V")) {
+ irange = 3;
+ } else if (lsame_(range, "I")) {
+ irange = 2;
+ }
+ *m = 0;
+/* Get machine constants */
+ safmin = slamch_("S");
+ eps = slamch_("P");
+/* Set parameters */
+ rtl = eps * 100.f;
+/* If one were ever to ask for less initial precision in BSRTOL, */
+/* one should keep in mind that for the subset case, the extremal */
+/* eigenvalues must be at least as accurate as the current setting */
+/* (eigenvalues in the middle need not as much accuracy) */
+ bsrtol = sqrt(eps) * 5e-4f;
+/* Treat case of 1x1 matrix for quick return */
+ if (*n == 1) {
+ if (irange == 1 || irange == 3 && d__[1] > *vl && d__[1] <= *vu ||
+ irange == 2 && *il == 1 && *iu == 1) {
+ *m = 1;
+ w[1] = d__[1];
+/* The computation error of the eigenvalue is zero */
+ werr[1] = 0.f;
+ wgap[1] = 0.f;
+ iblock[1] = 1;
+ indexw[1] = 1;
+ gers[1] = d__[1];
+ gers[2] = d__[1];
+ }
+/* store the shift for the initial RRR, which is zero in this case */
+ e[1] = 0.f;
+ return 0;
+ }
+/* General case: tridiagonal matrix of order > 1 */
+
+/* Init WERR, WGAP. Compute Gerschgorin intervals and spectral diameter. */
+/* Compute maximum off-diagonal entry and pivmin. */
+ gl = d__[1];
+ gu = d__[1];
+ eold = 0.f;
+ emax = 0.f;
+ e[*n] = 0.f;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ werr[i__] = 0.f;
+ wgap[i__] = 0.f;
+ eabs = (r__1 = e[i__], dabs(r__1));
+ if (eabs >= emax) {
+ emax = eabs;
+ }
+ tmp1 = eabs + eold;
+ gers[(i__ << 1) - 1] = d__[i__] - tmp1;
+/* Computing MIN */
+ r__1 = gl, r__2 = gers[(i__ << 1) - 1];
+ gl = dmin(r__1,r__2);
+ gers[i__ * 2] = d__[i__] + tmp1;
+/* Computing MAX */
+ r__1 = gu, r__2 = gers[i__ * 2];
+ gu = dmax(r__1,r__2);
+ eold = eabs;
+/* L5: */
+ }
+/* The minimum pivot allowed in the Sturm sequence for T */
+/* Computing MAX */
+/* Computing 2nd power */
+ r__3 = emax;
+ r__1 = 1.f, r__2 = r__3 * r__3;
+ *pivmin = safmin * dmax(r__1,r__2);
+/* Compute spectral diameter. The Gerschgorin bounds give an */
+/* estimate that is wrong by at most a factor of SQRT(2) */
+ spdiam = gu - gl;
+/* Compute splitting points */
+ slarra_(n, &d__[1], &e[1], &e2[1], spltol, &spdiam, nsplit, &isplit[1], &
+ iinfo);
+/* Can force use of bisection instead of faster DQDS. */
+/* Option left in the code for future multisection work. */
+ forceb = FALSE_;
+/* Initialize USEDQD, DQDS should be used for ALLRNG unless someone */
+/* explicitly wants bisection. */
+ usedqd = irange == 1 && ! forceb;
+ if (irange == 1 && ! forceb) {
+/* Set interval [VL,VU] that contains all eigenvalues */
+ *vl = gl;
+ *vu = gu;
+ } else {
+/* We call SLARRD to find crude approximations to the eigenvalues */
+/* in the desired range. In case IRANGE = INDRNG, we also obtain the */
+/* interval (VL,VU] that contains all the wanted eigenvalues. */
+/* An interval [LEFT,RIGHT] has converged if */
+/* RIGHT-LEFT.LT.RTOL*MAX(ABS(LEFT),ABS(RIGHT)) */
+/* SLARRD needs a WORK of size 4*N, IWORK of size 3*N */
+ slarrd_(range, "B", n, vl, vu, il, iu, &gers[1], &bsrtol, &d__[1], &e[
+ 1], &e2[1], pivmin, nsplit, &isplit[1], &mm, &w[1], &werr[1],
+ vl, vu, &iblock[1], &indexw[1], &work[1], &iwork[1], &iinfo);
+ if (iinfo != 0) {
+ *info = -1;
+ return 0;
+ }
+/* Make sure that the entries M+1 to N in W, WERR, IBLOCK, INDEXW are 0 */
+ i__1 = *n;
+ for (i__ = mm + 1; i__ <= i__1; ++i__) {
+ w[i__] = 0.f;
+ werr[i__] = 0.f;
+ iblock[i__] = 0;
+ indexw[i__] = 0;
+/* L14: */
+ }
+ }
+/* ** */
+/* Loop over unreduced blocks */
+ ibegin = 1;
+ wbegin = 1;
+ i__1 = *nsplit;
+ for (jblk = 1; jblk <= i__1; ++jblk) {
+ iend = isplit[jblk];
+ in = iend - ibegin + 1;
+/* 1 X 1 block */
+ if (in == 1) {
+ if (irange == 1 || irange == 3 && d__[ibegin] > *vl && d__[ibegin]
+ <= *vu || irange == 2 && iblock[wbegin] == jblk) {
+ ++(*m);
+ w[*m] = d__[ibegin];
+ werr[*m] = 0.f;
+/* The gap for a single block doesn't matter for the later */
+/* algorithm and is assigned an arbitrary large value */
+ wgap[*m] = 0.f;
+ iblock[*m] = jblk;
+ indexw[*m] = 1;
+ ++wbegin;
+ }
+/* E( IEND ) holds the shift for the initial RRR */
+ e[iend] = 0.f;
+ ibegin = iend + 1;
+ goto L170;
+ }
+
+/* Blocks of size larger than 1x1 */
+
+/* E( IEND ) will hold the shift for the initial RRR, for now set it =0 */
+ e[iend] = 0.f;
+
+/* Find local outer bounds GL,GU for the block */
+ gl = d__[ibegin];
+ gu = d__[ibegin];
+ i__2 = iend;
+ for (i__ = ibegin; i__ <= i__2; ++i__) {
+/* Computing MIN */
+ r__1 = gers[(i__ << 1) - 1];
+ gl = dmin(r__1,gl);
+/* Computing MAX */
+ r__1 = gers[i__ * 2];
+ gu = dmax(r__1,gu);
+/* L15: */
+ }
+ spdiam = gu - gl;
+ if (! (irange == 1 && ! forceb)) {
+/* Count the number of eigenvalues in the current block. */
+ mb = 0;
+ i__2 = mm;
+ for (i__ = wbegin; i__ <= i__2; ++i__) {
+ if (iblock[i__] == jblk) {
+ ++mb;
+ } else {
+ goto L21;
+ }
+/* L20: */
+ }
+L21:
+ if (mb == 0) {
+/* No eigenvalue in the current block lies in the desired range */
+/* E( IEND ) holds the shift for the initial RRR */
+ e[iend] = 0.f;
+ ibegin = iend + 1;
+ goto L170;
+ } else {
+/* Decide whether dqds or bisection is more efficient */
+ usedqd = (real) mb > in * .5f && ! forceb;
+ wend = wbegin + mb - 1;
+/* Calculate gaps for the current block */
+/* In later stages, when representations for individual */
+/* eigenvalues are different, we use SIGMA = E( IEND ). */
+ sigma = 0.f;
+ i__2 = wend - 1;
+ for (i__ = wbegin; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = 0.f, r__2 = w[i__ + 1] - werr[i__ + 1] - (w[i__] +
+ werr[i__]);
+ wgap[i__] = dmax(r__1,r__2);
+/* L30: */
+ }
+/* Computing MAX */
+ r__1 = 0.f, r__2 = *vu - sigma - (w[wend] + werr[wend]);
+ wgap[wend] = dmax(r__1,r__2);
+/* Find local index of the first and last desired evalue. */
+ indl = indexw[wbegin];
+ indu = indexw[wend];
+ }
+ }
+ if (irange == 1 && ! forceb || usedqd) {
+/* Case of DQDS */
+/* Find approximations to the extremal eigenvalues of the block */
+ slarrk_(&in, &c__1, &gl, &gu, &d__[ibegin], &e2[ibegin], pivmin, &
+ rtl, &tmp, &tmp1, &iinfo);
+ if (iinfo != 0) {
+ *info = -1;
+ return 0;
+ }
+/* Computing MAX */
+ r__2 = gl, r__3 = tmp - tmp1 - eps * 100.f * (r__1 = tmp - tmp1,
+ dabs(r__1));
+ isleft = dmax(r__2,r__3);
+ slarrk_(&in, &in, &gl, &gu, &d__[ibegin], &e2[ibegin], pivmin, &
+ rtl, &tmp, &tmp1, &iinfo);
+ if (iinfo != 0) {
+ *info = -1;
+ return 0;
+ }
+/* Computing MIN */
+ r__2 = gu, r__3 = tmp + tmp1 + eps * 100.f * (r__1 = tmp + tmp1,
+ dabs(r__1));
+ isrght = dmin(r__2,r__3);
+/* Improve the estimate of the spectral diameter */
+ spdiam = isrght - isleft;
+ } else {
+/* Case of bisection */
+/* Find approximations to the wanted extremal eigenvalues */
+/* Computing MAX */
+ r__2 = gl, r__3 = w[wbegin] - werr[wbegin] - eps * 100.f * (r__1 =
+ w[wbegin] - werr[wbegin], dabs(r__1));
+ isleft = dmax(r__2,r__3);
+/* Computing MIN */
+ r__2 = gu, r__3 = w[wend] + werr[wend] + eps * 100.f * (r__1 = w[
+ wend] + werr[wend], dabs(r__1));
+ isrght = dmin(r__2,r__3);
+ }
+/* Decide whether the base representation for the current block */
+/* L_JBLK D_JBLK L_JBLK^T = T_JBLK - sigma_JBLK I */
+/* should be on the left or the right end of the current block. */
+/* The strategy is to shift to the end which is "more populated" */
+/* Furthermore, decide whether to use DQDS for the computation of */
+/* the eigenvalue approximations at the end of SLARRE or bisection. */
+/* dqds is chosen if all eigenvalues are desired or the number of */
+/* eigenvalues to be computed is large compared to the blocksize. */
+ if (irange == 1 && ! forceb) {
+/* If all the eigenvalues have to be computed, we use dqd */
+ usedqd = TRUE_;
+/* INDL is the local index of the first eigenvalue to compute */
+ indl = 1;
+ indu = in;
+/* MB = number of eigenvalues to compute */
+ mb = in;
+ wend = wbegin + mb - 1;
+/* Define 1/4 and 3/4 points of the spectrum */
+ s1 = isleft + spdiam * .25f;
+ s2 = isrght - spdiam * .25f;
+ } else {
+/* SLARRD has computed IBLOCK and INDEXW for each eigenvalue */
+/* approximation. */
+/* choose sigma */
+ if (usedqd) {
+ s1 = isleft + spdiam * .25f;
+ s2 = isrght - spdiam * .25f;
+ } else {
+ tmp = dmin(isrght,*vu) - dmax(isleft,*vl);
+ s1 = dmax(isleft,*vl) + tmp * .25f;
+ s2 = dmin(isrght,*vu) - tmp * .25f;
+ }
+ }
+/* Compute the negcount at the 1/4 and 3/4 points */
+ if (mb > 1) {
+ slarrc_("T", &in, &s1, &s2, &d__[ibegin], &e[ibegin], pivmin, &
+ cnt, &cnt1, &cnt2, &iinfo);
+ }
+ if (mb == 1) {
+ sigma = gl;
+ sgndef = 1.f;
+ } else if (cnt1 - indl >= indu - cnt2) {
+ if (irange == 1 && ! forceb) {
+ sigma = dmax(isleft,gl);
+ } else if (usedqd) {
+/* use Gerschgorin bound as shift to get pos def matrix */
+/* for dqds */
+ sigma = isleft;
+ } else {
+/* use approximation of the first desired eigenvalue of the */
+/* block as shift */
+ sigma = dmax(isleft,*vl);
+ }
+ sgndef = 1.f;
+ } else {
+ if (irange == 1 && ! forceb) {
+ sigma = dmin(isrght,gu);
+ } else if (usedqd) {
+/* use Gerschgorin bound as shift to get neg def matrix */
+/* for dqds */
+ sigma = isrght;
+ } else {
+/* use approximation of the first desired eigenvalue of the */
+/* block as shift */
+ sigma = dmin(isrght,*vu);
+ }
+ sgndef = -1.f;
+ }
+/* An initial SIGMA has been chosen that will be used for computing */
+/* T - SIGMA I = L D L^T */
+/* Define the increment TAU of the shift in case the initial shift */
+/* needs to be refined to obtain a factorization with not too much */
+/* element growth. */
+ if (usedqd) {
+/* The initial SIGMA was to the outer end of the spectrum */
+/* the matrix is definite and we need not retreat. */
+ tau = spdiam * eps * *n + *pivmin * 2.f;
+ } else {
+ if (mb > 1) {
+ clwdth = w[wend] + werr[wend] - w[wbegin] - werr[wbegin];
+ avgap = (r__1 = clwdth / (real) (wend - wbegin), dabs(r__1));
+ if (sgndef == 1.f) {
+/* Computing MAX */
+ r__1 = wgap[wbegin];
+ tau = dmax(r__1,avgap) * .5f;
+/* Computing MAX */
+ r__1 = tau, r__2 = werr[wbegin];
+ tau = dmax(r__1,r__2);
+ } else {
+/* Computing MAX */
+ r__1 = wgap[wend - 1];
+ tau = dmax(r__1,avgap) * .5f;
+/* Computing MAX */
+ r__1 = tau, r__2 = werr[wend];
+ tau = dmax(r__1,r__2);
+ }
+ } else {
+ tau = werr[wbegin];
+ }
+ }
+
+ for (idum = 1; idum <= 6; ++idum) {
+/* Compute L D L^T factorization of tridiagonal matrix T - sigma I. */
+/* Store D in WORK(1:IN), L in WORK(IN+1:2*IN), and reciprocals of */
+/* pivots in WORK(2*IN+1:3*IN) */
+ dpivot = d__[ibegin] - sigma;
+ work[1] = dpivot;
+ dmax__ = dabs(work[1]);
+ j = ibegin;
+ i__2 = in - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[(in << 1) + i__] = 1.f / work[i__];
+ tmp = e[j] * work[(in << 1) + i__];
+ work[in + i__] = tmp;
+ dpivot = d__[j + 1] - sigma - tmp * e[j];
+ work[i__ + 1] = dpivot;
+/* Computing MAX */
+ r__1 = dmax__, r__2 = dabs(dpivot);
+ dmax__ = dmax(r__1,r__2);
+ ++j;
+/* L70: */
+ }
+/* check for element growth */
+ if (dmax__ > spdiam * 64.f) {
+ norep = TRUE_;
+ } else {
+ norep = FALSE_;
+ }
+ if (usedqd && ! norep) {
+/* Ensure the definiteness of the representation */
+/* All entries of D (of L D L^T) must have the same sign */
+ i__2 = in;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ tmp = sgndef * work[i__];
+ if (tmp < 0.f) {
+ norep = TRUE_;
+ }
+/* L71: */
+ }
+ }
+ if (norep) {
+/* Note that in the case of IRANGE=ALLRNG, we use the Gerschgorin */
+/* shift which makes the matrix definite. So we should end up */
+/* here really only in the case of IRANGE = VALRNG or INDRNG. */
+ if (idum == 5) {
+ if (sgndef == 1.f) {
+/* The fudged Gerschgorin shift should succeed */
+ sigma = gl - spdiam * 2.f * eps * *n - *pivmin * 4.f;
+ } else {
+ sigma = gu + spdiam * 2.f * eps * *n + *pivmin * 4.f;
+ }
+ } else {
+ sigma -= sgndef * tau;
+ tau *= 2.f;
+ }
+ } else {
+/* an initial RRR is found */
+ goto L83;
+ }
+/* L80: */
+ }
+/* if the program reaches this point, no base representation could be */
+/* found in MAXTRY iterations. */
+ *info = 2;
+ return 0;
+L83:
+/* At this point, we have found an initial base representation */
+/* T - SIGMA I = L D L^T with not too much element growth. */
+/* Store the shift. */
+ e[iend] = sigma;
+/* Store D and L. */
+ scopy_(&in, &work[1], &c__1, &d__[ibegin], &c__1);
+ i__2 = in - 1;
+ scopy_(&i__2, &work[in + 1], &c__1, &e[ibegin], &c__1);
+ if (mb > 1) {
+
+/* Perturb each entry of the base representation by a small */
+/* (but random) relative amount to overcome difficulties with */
+/* glued matrices. */
+
+ for (i__ = 1; i__ <= 4; ++i__) {
+ iseed[i__ - 1] = 1;
+/* L122: */
+ }
+ i__2 = (in << 1) - 1;
+ slarnv_(&c__2, iseed, &i__2, &work[1]);
+ i__2 = in - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ d__[ibegin + i__ - 1] *= eps * 4.f * work[i__] + 1.f;
+ e[ibegin + i__ - 1] *= eps * 4.f * work[in + i__] + 1.f;
+/* L125: */
+ }
+ d__[iend] *= eps * 4.f * work[in] + 1.f;
+
+ }
+
+/* Don't update the Gerschgorin intervals because keeping track */
+/* of the updates would be too much work in SLARRV. */
+/* We update W instead and use it to locate the proper Gerschgorin */
+/* intervals. */
+/* Compute the required eigenvalues of L D L' by bisection or dqds */
+ if (! usedqd) {
+/* If SLARRD has been used, shift the eigenvalue approximations */
+/* according to their representation. This is necessary for */
+/* a uniform SLARRV since dqds computes eigenvalues of the */
+/* shifted representation. In SLARRV, W will always hold the */
+/* UNshifted eigenvalue approximation. */
+ i__2 = wend;
+ for (j = wbegin; j <= i__2; ++j) {
+ w[j] -= sigma;
+ werr[j] += (r__1 = w[j], dabs(r__1)) * eps;
+/* L134: */
+ }
+/* call SLARRB to reduce eigenvalue error of the approximations */
+/* from SLARRD */
+ i__2 = iend - 1;
+ for (i__ = ibegin; i__ <= i__2; ++i__) {
+/* Computing 2nd power */
+ r__1 = e[i__];
+ work[i__] = d__[i__] * (r__1 * r__1);
+/* L135: */
+ }
+/* use bisection to find EV from INDL to INDU */
+ i__2 = indl - 1;
+ slarrb_(&in, &d__[ibegin], &work[ibegin], &indl, &indu, rtol1,
+ rtol2, &i__2, &w[wbegin], &wgap[wbegin], &werr[wbegin], &
+ work[(*n << 1) + 1], &iwork[1], pivmin, &spdiam, &in, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = -4;
+ return 0;
+ }
+/* SLARRB computes all gaps correctly except for the last one */
+/* Record distance to VU/GU */
+/* Computing MAX */
+ r__1 = 0.f, r__2 = *vu - sigma - (w[wend] + werr[wend]);
+ wgap[wend] = dmax(r__1,r__2);
+ i__2 = indu;
+ for (i__ = indl; i__ <= i__2; ++i__) {
+ ++(*m);
+ iblock[*m] = jblk;
+ indexw[*m] = i__;
+/* L138: */
+ }
+ } else {
+/* Call dqds to get all eigs (and then possibly delete unwanted */
+/* eigenvalues). */
+/* Note that dqds finds the eigenvalues of the L D L^T representation */
+/* of T to high relative accuracy. High relative accuracy */
+/* might be lost when the shift of the RRR is subtracted to obtain */
+/* the eigenvalues of T. However, T is not guaranteed to define its */
+/* eigenvalues to high relative accuracy anyway. */
+/* Set RTOL to the order of the tolerance used in SLASQ2 */
+/* This is an ESTIMATED error, the worst case bound is 4*N*EPS */
+/* which is usually too large and requires unnecessary work to be */
+/* done by bisection when computing the eigenvectors */
+ rtol = log((real) in) * 4.f * eps;
+ j = ibegin;
+ i__2 = in - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[(i__ << 1) - 1] = (r__1 = d__[j], dabs(r__1));
+ work[i__ * 2] = e[j] * e[j] * work[(i__ << 1) - 1];
+ ++j;
+/* L140: */
+ }
+ work[(in << 1) - 1] = (r__1 = d__[iend], dabs(r__1));
+ work[in * 2] = 0.f;
+ slasq2_(&in, &work[1], &iinfo);
+ if (iinfo != 0) {
+/* If IINFO = -5 then an index is part of a tight cluster */
+/* and should be changed. The index is in IWORK(1) and the */
+/* gap is in WORK(N+1) */
+ *info = -5;
+ return 0;
+ } else {
+/* Test that all eigenvalues are positive as expected */
+ i__2 = in;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] < 0.f) {
+ *info = -6;
+ return 0;
+ }
+/* L149: */
+ }
+ }
+ if (sgndef > 0.f) {
+ i__2 = indu;
+ for (i__ = indl; i__ <= i__2; ++i__) {
+ ++(*m);
+ w[*m] = work[in - i__ + 1];
+ iblock[*m] = jblk;
+ indexw[*m] = i__;
+/* L150: */
+ }
+ } else {
+ i__2 = indu;
+ for (i__ = indl; i__ <= i__2; ++i__) {
+ ++(*m);
+ w[*m] = -work[i__];
+ iblock[*m] = jblk;
+ indexw[*m] = i__;
+/* L160: */
+ }
+ }
+ i__2 = *m;
+ for (i__ = *m - mb + 1; i__ <= i__2; ++i__) {
+/* the value of RTOL below should be the tolerance in SLASQ2 */
+ werr[i__] = rtol * (r__1 = w[i__], dabs(r__1));
+/* L165: */
+ }
+ i__2 = *m - 1;
+ for (i__ = *m - mb + 1; i__ <= i__2; ++i__) {
+/* compute the right gap between the intervals */
+/* Computing MAX */
+ r__1 = 0.f, r__2 = w[i__ + 1] - werr[i__ + 1] - (w[i__] +
+ werr[i__]);
+ wgap[i__] = dmax(r__1,r__2);
+/* L166: */
+ }
+/* Computing MAX */
+ r__1 = 0.f, r__2 = *vu - sigma - (w[*m] + werr[*m]);
+ wgap[*m] = dmax(r__1,r__2);
+ }
+/* proceed with next block */
+ ibegin = iend + 1;
+ wbegin = wend + 1;
+L170:
+ ;
+ }
+
+ return 0;
+
+/* end of SLARRE */
+
+} /* slarre_ */
diff --git a/contrib/libs/clapack/slarrf.c b/contrib/libs/clapack/slarrf.c
new file mode 100644
index 0000000000..372a339edd
--- /dev/null
+++ b/contrib/libs/clapack/slarrf.c
@@ -0,0 +1,422 @@
+/* slarrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int slarrf_(integer *n, real *d__, real *l, real *ld,
+ integer *clstrt, integer *clend, real *w, real *wgap, real *werr,
+ real *spdiam, real *clgapl, real *clgapr, real *pivmin, real *sigma,
+ real *dplus, real *lplus, real *work, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ real s, bestshift, smlgrowth, eps, tmp, max1, max2, rrr1, rrr2, znm2,
+ growthbound, fail, fact, oldp;
+ integer indx;
+ real prod;
+ integer ktry;
+ real fail2, avgap, ldmax, rdmax;
+ integer shift;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ logical dorrr1;
+ real ldelta;
+ extern doublereal slamch_(char *);
+ logical nofail;
+ real mingap, lsigma, rdelta;
+ logical forcer;
+ real rsigma, clwdth;
+ extern logical sisnan_(real *);
+ logical sawnan1, sawnan2, tryrrr1;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+/* * */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Given the initial representation L D L^T and its cluster of close */
+/* eigenvalues (in a relative measure), W( CLSTRT ), W( CLSTRT+1 ), ... */
+/* W( CLEND ), SLARRF finds a new relatively robust representation */
+/* L D L^T - SIGMA I = L(+) D(+) L(+)^T such that at least one of the */
+/* eigenvalues of L(+) D(+) L(+)^T is relatively isolated. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix (subblock, if the matrix splitted). */
+
+/* D (input) REAL array, dimension (N) */
+/* The N diagonal elements of the diagonal matrix D. */
+
+/* L (input) REAL array, dimension (N-1) */
+/* The (N-1) subdiagonal elements of the unit bidiagonal */
+/* matrix L. */
+
+/* LD (input) REAL array, dimension (N-1) */
+/* The (N-1) elements L(i)*D(i). */
+
+/* CLSTRT (input) INTEGER */
+/* The index of the first eigenvalue in the cluster. */
+
+/* CLEND (input) INTEGER */
+/* The index of the last eigenvalue in the cluster. */
+
+/* W (input) REAL array, dimension >= (CLEND-CLSTRT+1) */
+/* The eigenvalue APPROXIMATIONS of L D L^T in ascending order. */
+/* W( CLSTRT ) through W( CLEND ) form the cluster of relatively */
+/* close eigenalues. */
+
+/* WGAP (input/output) REAL array, dimension >= (CLEND-CLSTRT+1) */
+/* The separation from the right neighbor eigenvalue in W. */
+
+/* WERR (input) REAL array, dimension >= (CLEND-CLSTRT+1) */
+/* WERR contain the semiwidth of the uncertainty */
+/* interval of the corresponding eigenvalue APPROXIMATION in W */
+
+/* SPDIAM (input) estimate of the spectral diameter obtained from the */
+/* Gerschgorin intervals */
+
+/* CLGAPL, CLGAPR (input) absolute gap on each end of the cluster. */
+/* Set by the calling routine to protect against shifts too close */
+/* to eigenvalues outside the cluster. */
+
+/* PIVMIN (input) DOUBLE PRECISION */
+/* The minimum pivot allowed in the Sturm sequence. */
+
+/* SIGMA (output) REAL */
+/* The shift used to form L(+) D(+) L(+)^T. */
+
+/* DPLUS (output) REAL array, dimension (N) */
+/* The N diagonal elements of the diagonal matrix D(+). */
+
+/* LPLUS (output) REAL array, dimension (N-1) */
+/* The first (N-1) elements of LPLUS contain the subdiagonal */
+/* elements of the unit bidiagonal matrix L(+). */
+
+/* WORK (workspace) REAL array, dimension (2*N) */
+/* Workspace. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --work;
+ --lplus;
+ --dplus;
+ --werr;
+ --wgap;
+ --w;
+ --ld;
+ --l;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ fact = 2.f;
+ eps = slamch_("Precision");
+ shift = 0;
+ forcer = FALSE_;
+/* Note that we cannot guarantee that for any of the shifts tried, */
+/* the factorization has a small or even moderate element growth. */
+/* There could be Ritz values at both ends of the cluster and despite */
+/* backing off, there are examples where all factorizations tried */
+/* (in IEEE mode, allowing zero pivots & infinities) have INFINITE */
+/* element growth. */
+/* For this reason, we should use PIVMIN in this subroutine so that at */
+/* least the L D L^T factorization exists. It can be checked afterwards */
+/* whether the element growth caused bad residuals/orthogonality. */
+/* Decide whether the code should accept the best among all */
+/* representations despite large element growth or signal INFO=1 */
+ nofail = TRUE_;
+
+/* Compute the average gap length of the cluster */
+ clwdth = (r__1 = w[*clend] - w[*clstrt], dabs(r__1)) + werr[*clend] +
+ werr[*clstrt];
+ avgap = clwdth / (real) (*clend - *clstrt);
+ mingap = dmin(*clgapl,*clgapr);
+/* Initial values for shifts to both ends of cluster */
+/* Computing MIN */
+ r__1 = w[*clstrt], r__2 = w[*clend];
+ lsigma = dmin(r__1,r__2) - werr[*clstrt];
+/* Computing MAX */
+ r__1 = w[*clstrt], r__2 = w[*clend];
+ rsigma = dmax(r__1,r__2) + werr[*clend];
+/* Use a small fudge to make sure that we really shift to the outside */
+ lsigma -= dabs(lsigma) * 2.f * eps;
+ rsigma += dabs(rsigma) * 2.f * eps;
+/* Compute upper bounds for how much to back off the initial shifts */
+ ldmax = mingap * .25f + *pivmin * 2.f;
+ rdmax = mingap * .25f + *pivmin * 2.f;
+/* Computing MAX */
+ r__1 = avgap, r__2 = wgap[*clstrt];
+ ldelta = dmax(r__1,r__2) / fact;
+/* Computing MAX */
+ r__1 = avgap, r__2 = wgap[*clend - 1];
+ rdelta = dmax(r__1,r__2) / fact;
+
+/* Initialize the record of the best representation found */
+
+ s = slamch_("S");
+ smlgrowth = 1.f / s;
+ fail = (real) (*n - 1) * mingap / (*spdiam * eps);
+ fail2 = (real) (*n - 1) * mingap / (*spdiam * sqrt(eps));
+ bestshift = lsigma;
+
+/* while (KTRY <= KTRYMAX) */
+ ktry = 0;
+ growthbound = *spdiam * 8.f;
+L5:
+ sawnan1 = FALSE_;
+ sawnan2 = FALSE_;
+/* Ensure that we do not back off too much of the initial shifts */
+ ldelta = dmin(ldmax,ldelta);
+ rdelta = dmin(rdmax,rdelta);
+/* Compute the element growth when shifting to both ends of the cluster */
+/* accept the shift if there is no element growth at one of the two ends */
+/* Left end */
+ s = -lsigma;
+ dplus[1] = d__[1] + s;
+ if (dabs(dplus[1]) < *pivmin) {
+ dplus[1] = -(*pivmin);
+/* Need to set SAWNAN1 because refined RRR test should not be used */
+/* in this case */
+ sawnan1 = TRUE_;
+ }
+ max1 = dabs(dplus[1]);
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ lplus[i__] = ld[i__] / dplus[i__];
+ s = s * lplus[i__] * l[i__] - lsigma;
+ dplus[i__ + 1] = d__[i__ + 1] + s;
+ if ((r__1 = dplus[i__ + 1], dabs(r__1)) < *pivmin) {
+ dplus[i__ + 1] = -(*pivmin);
+/* Need to set SAWNAN1 because refined RRR test should not be used */
+/* in this case */
+ sawnan1 = TRUE_;
+ }
+/* Computing MAX */
+ r__2 = max1, r__3 = (r__1 = dplus[i__ + 1], dabs(r__1));
+ max1 = dmax(r__2,r__3);
+/* L6: */
+ }
+ sawnan1 = sawnan1 || sisnan_(&max1);
+ if (forcer || max1 <= growthbound && ! sawnan1) {
+ *sigma = lsigma;
+ shift = 1;
+ goto L100;
+ }
+/* Right end */
+ s = -rsigma;
+ work[1] = d__[1] + s;
+ if (dabs(work[1]) < *pivmin) {
+ work[1] = -(*pivmin);
+/* Need to set SAWNAN2 because refined RRR test should not be used */
+/* in this case */
+ sawnan2 = TRUE_;
+ }
+ max2 = dabs(work[1]);
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[*n + i__] = ld[i__] / work[i__];
+ s = s * work[*n + i__] * l[i__] - rsigma;
+ work[i__ + 1] = d__[i__ + 1] + s;
+ if ((r__1 = work[i__ + 1], dabs(r__1)) < *pivmin) {
+ work[i__ + 1] = -(*pivmin);
+/* Need to set SAWNAN2 because refined RRR test should not be used */
+/* in this case */
+ sawnan2 = TRUE_;
+ }
+/* Computing MAX */
+ r__2 = max2, r__3 = (r__1 = work[i__ + 1], dabs(r__1));
+ max2 = dmax(r__2,r__3);
+/* L7: */
+ }
+ sawnan2 = sawnan2 || sisnan_(&max2);
+ if (forcer || max2 <= growthbound && ! sawnan2) {
+ *sigma = rsigma;
+ shift = 2;
+ goto L100;
+ }
+/* If we are at this point, both shifts led to too much element growth */
+/* Record the better of the two shifts (provided it didn't lead to NaN) */
+ if (sawnan1 && sawnan2) {
+/* both MAX1 and MAX2 are NaN */
+ goto L50;
+ } else {
+ if (! sawnan1) {
+ indx = 1;
+ if (max1 <= smlgrowth) {
+ smlgrowth = max1;
+ bestshift = lsigma;
+ }
+ }
+ if (! sawnan2) {
+ if (sawnan1 || max2 <= max1) {
+ indx = 2;
+ }
+ if (max2 <= smlgrowth) {
+ smlgrowth = max2;
+ bestshift = rsigma;
+ }
+ }
+ }
+/* If we are here, both the left and the right shift led to */
+/* element growth. If the element growth is moderate, then */
+/* we may still accept the representation, if it passes a */
+/* refined test for RRR. This test supposes that no NaN occurred. */
+/* Moreover, we use the refined RRR test only for isolated clusters. */
+ if (clwdth < mingap / 128.f && dmin(max1,max2) < fail2 && ! sawnan1 && !
+ sawnan2) {
+ dorrr1 = TRUE_;
+ } else {
+ dorrr1 = FALSE_;
+ }
+ tryrrr1 = TRUE_;
+ if (tryrrr1 && dorrr1) {
+ if (indx == 1) {
+ tmp = (r__1 = dplus[*n], dabs(r__1));
+ znm2 = 1.f;
+ prod = 1.f;
+ oldp = 1.f;
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ if (prod <= eps) {
+ prod = dplus[i__ + 1] * work[*n + i__ + 1] / (dplus[i__] *
+ work[*n + i__]) * oldp;
+ } else {
+ prod *= (r__1 = work[*n + i__], dabs(r__1));
+ }
+ oldp = prod;
+/* Computing 2nd power */
+ r__1 = prod;
+ znm2 += r__1 * r__1;
+/* Computing MAX */
+ r__2 = tmp, r__3 = (r__1 = dplus[i__] * prod, dabs(r__1));
+ tmp = dmax(r__2,r__3);
+/* L15: */
+ }
+ rrr1 = tmp / (*spdiam * sqrt(znm2));
+ if (rrr1 <= 8.f) {
+ *sigma = lsigma;
+ shift = 1;
+ goto L100;
+ }
+ } else if (indx == 2) {
+ tmp = (r__1 = work[*n], dabs(r__1));
+ znm2 = 1.f;
+ prod = 1.f;
+ oldp = 1.f;
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ if (prod <= eps) {
+ prod = work[i__ + 1] * lplus[i__ + 1] / (work[i__] *
+ lplus[i__]) * oldp;
+ } else {
+ prod *= (r__1 = lplus[i__], dabs(r__1));
+ }
+ oldp = prod;
+/* Computing 2nd power */
+ r__1 = prod;
+ znm2 += r__1 * r__1;
+/* Computing MAX */
+ r__2 = tmp, r__3 = (r__1 = work[i__] * prod, dabs(r__1));
+ tmp = dmax(r__2,r__3);
+/* L16: */
+ }
+ rrr2 = tmp / (*spdiam * sqrt(znm2));
+ if (rrr2 <= 8.f) {
+ *sigma = rsigma;
+ shift = 2;
+ goto L100;
+ }
+ }
+ }
+L50:
+ if (ktry < 1) {
+/* If we are here, both shifts failed also the RRR test. */
+/* Back off to the outside */
+/* Computing MAX */
+ r__1 = lsigma - ldelta, r__2 = lsigma - ldmax;
+ lsigma = dmax(r__1,r__2);
+/* Computing MIN */
+ r__1 = rsigma + rdelta, r__2 = rsigma + rdmax;
+ rsigma = dmin(r__1,r__2);
+ ldelta *= 2.f;
+ rdelta *= 2.f;
+ ++ktry;
+ goto L5;
+ } else {
+/* None of the representations investigated satisfied our */
+/* criteria. Take the best one we found. */
+ if (smlgrowth < fail || nofail) {
+ lsigma = bestshift;
+ rsigma = bestshift;
+ forcer = TRUE_;
+ goto L5;
+ } else {
+ *info = 1;
+ return 0;
+ }
+ }
+L100:
+ if (shift == 1) {
+ } else if (shift == 2) {
+/* store new L and D back into DPLUS, LPLUS */
+ scopy_(n, &work[1], &c__1, &dplus[1], &c__1);
+ i__1 = *n - 1;
+ scopy_(&i__1, &work[*n + 1], &c__1, &lplus[1], &c__1);
+ }
+ return 0;
+
+/* End of SLARRF */
+
+} /* slarrf_ */
diff --git a/contrib/libs/clapack/slarrj.c b/contrib/libs/clapack/slarrj.c
new file mode 100644
index 0000000000..4091bb6e32
--- /dev/null
+++ b/contrib/libs/clapack/slarrj.c
@@ -0,0 +1,337 @@
+/* slarrj.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slarrj_(integer *n, real *d__, real *e2, integer *ifirst,
+ integer *ilast, real *rtol, integer *offset, real *w, real *werr,
+ real *work, integer *iwork, real *pivmin, real *spdiam, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double log(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, p;
+ real s;
+ integer i1, i2, ii;
+ real fac, mid;
+ integer cnt;
+ real tmp, left;
+ integer iter, nint, prev, next, savi1;
+ real right, width, dplus;
+ integer olnint, maxitr;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Given the initial eigenvalue approximations of T, SLARRJ */
+/* does bisection to refine the eigenvalues of T, */
+/* W( IFIRST-OFFSET ) through W( ILAST-OFFSET ), to more accuracy. Initial */
+/* guesses for these eigenvalues are input in W, the corresponding estimate */
+/* of the error in these guesses in WERR. During bisection, intervals */
+/* [left, right] are maintained by storing their mid-points and */
+/* semi-widths in the arrays W and WERR respectively. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. */
+
+/* D (input) REAL array, dimension (N) */
+/* The N diagonal elements of T. */
+
+/* E2 (input) REAL array, dimension (N-1) */
+/* The Squares of the (N-1) subdiagonal elements of T. */
+
+/* IFIRST (input) INTEGER */
+/* The index of the first eigenvalue to be computed. */
+
+/* ILAST (input) INTEGER */
+/* The index of the last eigenvalue to be computed. */
+
+/* RTOL (input) REAL */
+/* Tolerance for the convergence of the bisection intervals. */
+/* An interval [LEFT,RIGHT] has converged if */
+/* RIGHT-LEFT.LT.RTOL*MAX(|LEFT|,|RIGHT|). */
+
+/* OFFSET (input) INTEGER */
+/* Offset for the arrays W and WERR, i.e., the IFIRST-OFFSET */
+/* through ILAST-OFFSET elements of these arrays are to be used. */
+
+/* W (input/output) REAL array, dimension (N) */
+/* On input, W( IFIRST-OFFSET ) through W( ILAST-OFFSET ) are */
+/* estimates of the eigenvalues of L D L^T indexed IFIRST through */
+/* ILAST. */
+/* On output, these estimates are refined. */
+
+/* WERR (input/output) REAL array, dimension (N) */
+/* On input, WERR( IFIRST-OFFSET ) through WERR( ILAST-OFFSET ) are */
+/* the errors in the estimates of the corresponding elements in W. */
+/* On output, these errors are refined. */
+
+/* WORK (workspace) REAL array, dimension (2*N) */
+/* Workspace. */
+
+/* IWORK (workspace) INTEGER array, dimension (2*N) */
+/* Workspace. */
+
+/* PIVMIN (input) DOUBLE PRECISION */
+/* The minimum pivot in the Sturm sequence for T. */
+
+/* SPDIAM (input) DOUBLE PRECISION */
+/* The spectral diameter of T. */
+
+/* INFO (output) INTEGER */
+/* Error flag. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --iwork;
+ --work;
+ --werr;
+ --w;
+ --e2;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+
+ maxitr = (integer) ((log(*spdiam + *pivmin) - log(*pivmin)) / log(2.f)) +
+ 2;
+
+/* Initialize unconverged intervals in [ WORK(2*I-1), WORK(2*I) ]. */
+/* The Sturm Count, Count( WORK(2*I-1) ) is arranged to be I-1, while */
+/* Count( WORK(2*I) ) is stored in IWORK( 2*I ). The integer IWORK( 2*I-1 ) */
+/* for an unconverged interval is set to the index of the next unconverged */
+/* interval, and is -1 or 0 for a converged interval. Thus a linked */
+/* list of unconverged intervals is set up. */
+
+ i1 = *ifirst;
+ i2 = *ilast;
+/* The number of unconverged intervals */
+ nint = 0;
+/* The last unconverged interval found */
+ prev = 0;
+ i__1 = i2;
+ for (i__ = i1; i__ <= i__1; ++i__) {
+ k = i__ << 1;
+ ii = i__ - *offset;
+ left = w[ii] - werr[ii];
+ mid = w[ii];
+ right = w[ii] + werr[ii];
+ width = right - mid;
+/* Computing MAX */
+ r__1 = dabs(left), r__2 = dabs(right);
+ tmp = dmax(r__1,r__2);
+/* The following test prevents the test of converged intervals */
+ if (width < *rtol * tmp) {
+/* This interval has already converged and does not need refinement. */
+/* (Note that the gaps might change through refining the */
+/* eigenvalues, however, they can only get bigger.) */
+/* Remove it from the list. */
+ iwork[k - 1] = -1;
+/* Make sure that I1 always points to the first unconverged interval */
+ if (i__ == i1 && i__ < i2) {
+ i1 = i__ + 1;
+ }
+ if (prev >= i1 && i__ <= i2) {
+ iwork[(prev << 1) - 1] = i__ + 1;
+ }
+ } else {
+/* unconverged interval found */
+ prev = i__;
+/* Make sure that [LEFT,RIGHT] contains the desired eigenvalue */
+
+/* Do while( CNT(LEFT).GT.I-1 ) */
+
+ fac = 1.f;
+L20:
+ cnt = 0;
+ s = left;
+ dplus = d__[1] - s;
+ if (dplus < 0.f) {
+ ++cnt;
+ }
+ i__2 = *n;
+ for (j = 2; j <= i__2; ++j) {
+ dplus = d__[j] - s - e2[j - 1] / dplus;
+ if (dplus < 0.f) {
+ ++cnt;
+ }
+/* L30: */
+ }
+ if (cnt > i__ - 1) {
+ left -= werr[ii] * fac;
+ fac *= 2.f;
+ goto L20;
+ }
+
+/* Do while( CNT(RIGHT).LT.I ) */
+
+ fac = 1.f;
+L50:
+ cnt = 0;
+ s = right;
+ dplus = d__[1] - s;
+ if (dplus < 0.f) {
+ ++cnt;
+ }
+ i__2 = *n;
+ for (j = 2; j <= i__2; ++j) {
+ dplus = d__[j] - s - e2[j - 1] / dplus;
+ if (dplus < 0.f) {
+ ++cnt;
+ }
+/* L60: */
+ }
+ if (cnt < i__) {
+ right += werr[ii] * fac;
+ fac *= 2.f;
+ goto L50;
+ }
+ ++nint;
+ iwork[k - 1] = i__ + 1;
+ iwork[k] = cnt;
+ }
+ work[k - 1] = left;
+ work[k] = right;
+/* L75: */
+ }
+ savi1 = i1;
+
+/* Do while( NINT.GT.0 ), i.e. there are still unconverged intervals */
+/* and while (ITER.LT.MAXITR) */
+
+ iter = 0;
+L80:
+ prev = i1 - 1;
+ i__ = i1;
+ olnint = nint;
+ i__1 = olnint;
+ for (p = 1; p <= i__1; ++p) {
+ k = i__ << 1;
+ ii = i__ - *offset;
+ next = iwork[k - 1];
+ left = work[k - 1];
+ right = work[k];
+ mid = (left + right) * .5f;
+/* semiwidth of interval */
+ width = right - mid;
+/* Computing MAX */
+ r__1 = dabs(left), r__2 = dabs(right);
+ tmp = dmax(r__1,r__2);
+ if (width < *rtol * tmp || iter == maxitr) {
+/* reduce number of unconverged intervals */
+ --nint;
+/* Mark interval as converged. */
+ iwork[k - 1] = 0;
+ if (i1 == i__) {
+ i1 = next;
+ } else {
+/* Prev holds the last unconverged interval previously examined */
+ if (prev >= i1) {
+ iwork[(prev << 1) - 1] = next;
+ }
+ }
+ i__ = next;
+ goto L100;
+ }
+ prev = i__;
+
+/* Perform one bisection step */
+
+ cnt = 0;
+ s = mid;
+ dplus = d__[1] - s;
+ if (dplus < 0.f) {
+ ++cnt;
+ }
+ i__2 = *n;
+ for (j = 2; j <= i__2; ++j) {
+ dplus = d__[j] - s - e2[j - 1] / dplus;
+ if (dplus < 0.f) {
+ ++cnt;
+ }
+/* L90: */
+ }
+ if (cnt <= i__ - 1) {
+ work[k - 1] = mid;
+ } else {
+ work[k] = mid;
+ }
+ i__ = next;
+L100:
+ ;
+ }
+ ++iter;
+/* do another loop if there are still unconverged intervals */
+/* However, in the last iteration, all intervals are accepted */
+/* since this is the best we can do. */
+ if (nint > 0 && iter <= maxitr) {
+ goto L80;
+ }
+
+
+/* At this point, all the intervals have converged */
+ i__1 = *ilast;
+ for (i__ = savi1; i__ <= i__1; ++i__) {
+ k = i__ << 1;
+ ii = i__ - *offset;
+/* All intervals marked by '0' have been refined. */
+ if (iwork[k - 1] == 0) {
+ w[ii] = (work[k - 1] + work[k]) * .5f;
+ werr[ii] = work[k] - w[ii];
+ }
+/* L110: */
+ }
+
+ return 0;
+
+/* End of SLARRJ */
+
+} /* slarrj_ */
diff --git a/contrib/libs/clapack/slarrk.c b/contrib/libs/clapack/slarrk.c
new file mode 100644
index 0000000000..b525a9f5ac
--- /dev/null
+++ b/contrib/libs/clapack/slarrk.c
@@ -0,0 +1,193 @@
+/* slarrk.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slarrk_(integer *n, integer *iw, real *gl, real *gu,
+ real *d__, real *e2, real *pivmin, real *reltol, real *w, real *werr,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double log(doublereal);
+
+ /* Local variables */
+ integer i__, it;
+ real mid, eps, tmp1, tmp2, left, atoli, right;
+ integer itmax;
+ real rtoli, tnorm;
+ extern doublereal slamch_(char *);
+ integer negcnt;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLARRK computes one eigenvalue of a symmetric tridiagonal */
+/* matrix T to suitable accuracy. This is an auxiliary code to be */
+/* called from SSTEMR. */
+
+/* To avoid overflow, the matrix must be scaled so that its */
+/* largest element is no greater than overflow**(1/2) * */
+/* underflow**(1/4) in absolute value, and for greatest */
+/* accuracy, it should not be much smaller than that. */
+
+/* See W. Kahan "Accurate Eigenvalues of a Symmetric Tridiagonal */
+/* Matrix", Report CS41, Computer Science Dept., Stanford */
+/* University, July 21, 1966. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the tridiagonal matrix T. N >= 0. */
+
+/* IW (input) INTEGER */
+/* The index of the eigenvalues to be returned. */
+
+/* GL (input) REAL */
+/* GU (input) REAL */
+/* An upper and a lower bound on the eigenvalue. */
+
+/* D (input) REAL array, dimension (N) */
+/* The n diagonal elements of the tridiagonal matrix T. */
+
+/* E2 (input) REAL array, dimension (N-1) */
+/* The (n-1) squared off-diagonal elements of the tridiagonal matrix T. */
+
+/* PIVMIN (input) REAL */
+/* The minimum pivot allowed in the Sturm sequence for T. */
+
+/* RELTOL (input) REAL */
+/* The minimum relative width of an interval. When an interval */
+/* is narrower than RELTOL times the larger (in */
+/* magnitude) endpoint, then it is considered to be */
+/* sufficiently small, i.e., converged. Note: this should */
+/* always be at least radix*machine epsilon. */
+
+/* W (output) REAL */
+
+/* WERR (output) REAL */
+/* The error bound on the corresponding eigenvalue approximation */
+/* in W. */
+
+/* INFO (output) INTEGER */
+/* = 0: Eigenvalue converged */
+/* = -1: Eigenvalue did NOT converge */
+
+/* Internal Parameters */
+/* =================== */
+
+/* FUDGE REAL , default = 2 */
+/* A "fudge factor" to widen the Gershgorin intervals. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Get machine constants */
+ /* Parameter adjustments */
+ --e2;
+ --d__;
+
+ /* Function Body */
+ eps = slamch_("P");
+/* Computing MAX */
+ r__1 = dabs(*gl), r__2 = dabs(*gu);
+ tnorm = dmax(r__1,r__2);
+ rtoli = *reltol;
+ atoli = *pivmin * 4.f;
+ itmax = (integer) ((log(tnorm + *pivmin) - log(*pivmin)) / log(2.f)) + 2;
+ *info = -1;
+ left = *gl - tnorm * 2.f * eps * *n - *pivmin * 4.f;
+ right = *gu + tnorm * 2.f * eps * *n + *pivmin * 4.f;
+ it = 0;
+L10:
+
+/* Check if interval converged or maximum number of iterations reached */
+
+ tmp1 = (r__1 = right - left, dabs(r__1));
+/* Computing MAX */
+ r__1 = dabs(right), r__2 = dabs(left);
+ tmp2 = dmax(r__1,r__2);
+/* Computing MAX */
+ r__1 = max(atoli,*pivmin), r__2 = rtoli * tmp2;
+ if (tmp1 < dmax(r__1,r__2)) {
+ *info = 0;
+ goto L30;
+ }
+ if (it > itmax) {
+ goto L30;
+ }
+
+/* Count number of negative pivots for mid-point */
+
+ ++it;
+ mid = (left + right) * .5f;
+ negcnt = 0;
+ tmp1 = d__[1] - mid;
+ if (dabs(tmp1) < *pivmin) {
+ tmp1 = -(*pivmin);
+ }
+ if (tmp1 <= 0.f) {
+ ++negcnt;
+ }
+
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ tmp1 = d__[i__] - e2[i__ - 1] / tmp1 - mid;
+ if (dabs(tmp1) < *pivmin) {
+ tmp1 = -(*pivmin);
+ }
+ if (tmp1 <= 0.f) {
+ ++negcnt;
+ }
+/* L20: */
+ }
+ if (negcnt >= *iw) {
+ right = mid;
+ } else {
+ left = mid;
+ }
+ goto L10;
+L30:
+
+/* Converged or maximum number of iterations reached */
+
+ *w = (left + right) * .5f;
+ *werr = (r__1 = right - left, dabs(r__1)) * .5f;
+ return 0;
+
+/* End of SLARRK */
+
+} /* slarrk_ */
diff --git a/contrib/libs/clapack/slarrr.c b/contrib/libs/clapack/slarrr.c
new file mode 100644
index 0000000000..f7f1577ea1
--- /dev/null
+++ b/contrib/libs/clapack/slarrr.c
@@ -0,0 +1,175 @@
+/* slarrr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slarrr_(integer *n, real *d__, real *e, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ real eps, tmp, tmp2, rmin, offdig;
+ extern doublereal slamch_(char *);
+ real safmin;
+ logical yesrel;
+ real smlnum, offdig2;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+
+/* Purpose */
+/* ======= */
+
+/* Perform tests to decide whether the symmetric tridiagonal matrix T */
+/* warrants expensive computations which guarantee high relative accuracy */
+/* in the eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N > 0. */
+
+/* D (input) REAL array, dimension (N) */
+/* The N diagonal elements of the tridiagonal matrix T. */
+
+/* E (input/output) REAL array, dimension (N) */
+/* On entry, the first (N-1) entries contain the subdiagonal */
+/* elements of the tridiagonal matrix T; E(N) is set to ZERO. */
+
+/* INFO (output) INTEGER */
+/* INFO = 0(default) : the matrix warrants computations preserving */
+/* relative accuracy. */
+/* INFO = 1 : the matrix warrants computations guaranteeing */
+/* only absolute accuracy. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* As a default, do NOT go for relative-accuracy preserving computations. */
+ /* Parameter adjustments */
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 1;
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ rmin = sqrt(smlnum);
+/* Tests for relative accuracy */
+
+/* Test for scaled diagonal dominance */
+/* Scale the diagonal entries to one and check whether the sum of the */
+/* off-diagonals is less than one */
+
+/* The sdd relative error bounds have a 1/(1- 2*x) factor in them, */
+/* x = max(OFFDIG + OFFDIG2), so when x is close to 1/2, no relative */
+/* accuracy is promised. In the notation of the code fragment below, */
+/* 1/(1 - (OFFDIG + OFFDIG2)) is the condition number. */
+/* We don't think it is worth going into "sdd mode" unless the relative */
+/* condition number is reasonable, not 1/macheps. */
+/* The threshold should be compatible with other thresholds used in the */
+/* code. We set OFFDIG + OFFDIG2 <= .999 =: RELCOND, it corresponds */
+/* to losing at most 3 decimal digits: 1 / (1 - (OFFDIG + OFFDIG2)) <= 1000 */
+/* instead of the current OFFDIG + OFFDIG2 < 1 */
+
+ yesrel = TRUE_;
+ offdig = 0.f;
+ tmp = sqrt((dabs(d__[1])));
+ if (tmp < rmin) {
+ yesrel = FALSE_;
+ }
+ if (! yesrel) {
+ goto L11;
+ }
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ tmp2 = sqrt((r__1 = d__[i__], dabs(r__1)));
+ if (tmp2 < rmin) {
+ yesrel = FALSE_;
+ }
+ if (! yesrel) {
+ goto L11;
+ }
+ offdig2 = (r__1 = e[i__ - 1], dabs(r__1)) / (tmp * tmp2);
+ if (offdig + offdig2 >= .999f) {
+ yesrel = FALSE_;
+ }
+ if (! yesrel) {
+ goto L11;
+ }
+ tmp = tmp2;
+ offdig = offdig2;
+/* L10: */
+ }
+L11:
+ if (yesrel) {
+ *info = 0;
+ return 0;
+ } else {
+ }
+
+
+/* *** MORE TO BE IMPLEMENTED *** */
+
+
+/* Test if the lower bidiagonal matrix L from T = L D L^T */
+/* (zero shift facto) is well conditioned */
+
+
+/* Test if the upper bidiagonal matrix U from T = U D U^T */
+/* (zero shift facto) is well conditioned. */
+/* In this case, the matrix needs to be flipped and, at the end */
+/* of the eigenvector computation, the flip needs to be applied */
+/* to the computed eigenvectors (and the support) */
+
+
+ return 0;
+
+/* END OF SLARRR */
+
+} /* slarrr_ */
diff --git a/contrib/libs/clapack/slarrv.c b/contrib/libs/clapack/slarrv.c
new file mode 100644
index 0000000000..c1dc8592cd
--- /dev/null
+++ b/contrib/libs/clapack/slarrv.c
@@ -0,0 +1,980 @@
+/* slarrv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b5 = 0.f;
+static integer c__1 = 1;
+static integer c__2 = 2;
+
+/* Subroutine */ int slarrv_(integer *n, real *vl, real *vu, real *d__, real *
+ l, real *pivmin, integer *isplit, integer *m, integer *dol, integer *
+ dou, real *minrgp, real *rtol1, real *rtol2, real *w, real *werr,
+ real *wgap, integer *iblock, integer *indexw, real *gers, real *z__,
+ integer *ldz, integer *isuppz, real *work, integer *iwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2;
+ logical L__1;
+
+ /* Builtin functions */
+ double log(doublereal);
+
+ /* Local variables */
+ integer minwsize, i__, j, k, p, q, miniwsize, ii;
+ real gl;
+ integer im, in;
+ real gu, gap, eps, tau, tol, tmp;
+ integer zto;
+ real ztz;
+ integer iend, jblk;
+ real lgap;
+ integer done;
+ real rgap, left;
+ integer wend, iter;
+ real bstw;
+ integer itmp1, indld;
+ real fudge;
+ integer idone;
+ real sigma;
+ integer iinfo, iindr;
+ real resid;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ logical eskip;
+ real right;
+ integer nclus, zfrom;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ real rqtol;
+ integer iindc1, iindc2;
+ extern /* Subroutine */ int slar1v_(integer *, integer *, integer *, real
+ *, real *, real *, real *, real *, real *, real *, real *,
+ logical *, integer *, real *, real *, integer *, integer *, real *
+, real *, real *, real *);
+ logical stp2ii;
+ real lambda;
+ integer ibegin, indeig;
+ logical needbs;
+ integer indlld;
+ real sgndef, mingma;
+ extern doublereal slamch_(char *);
+ integer oldien, oldncl, wbegin;
+ real spdiam;
+ integer negcnt, oldcls;
+ real savgap;
+ integer ndepth;
+ real ssigma;
+ logical usedbs;
+ integer iindwk, offset;
+ real gaptol;
+ extern /* Subroutine */ int slarrb_(integer *, real *, real *, integer *,
+ integer *, real *, real *, integer *, real *, real *, real *,
+ real *, integer *, real *, real *, integer *, integer *), slarrf_(
+ integer *, real *, real *, real *, integer *, integer *, real *,
+ real *, real *, real *, real *, real *, real *, real *, real *,
+ real *, real *, integer *);
+ integer newcls, oldfst, indwrk, windex, oldlst;
+ logical usedrq;
+ integer newfst, newftt, parity, windmn, isupmn, newlst, windpl, zusedl,
+ newsiz, zusedu, zusedw;
+ real bstres, nrminv;
+ logical tryrqc;
+ integer isupmx;
+ real rqcorr;
+ extern /* Subroutine */ int slaset_(char *, integer *, integer *, real *,
+ real *, real *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLARRV computes the eigenvectors of the tridiagonal matrix */
+/* T = L D L^T given L, D and APPROXIMATIONS to the eigenvalues of L D L^T. */
+/* The input eigenvalues should have been computed by SLARRE. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* VL (input) REAL */
+/* VU (input) REAL */
+/* Lower and upper bounds of the interval that contains the desired */
+/* eigenvalues. VL < VU. Needed to compute gaps on the left or right */
+/* end of the extremal eigenvalues in the desired RANGE. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the N diagonal elements of the diagonal matrix D. */
+/* On exit, D may be overwritten. */
+
+/* L (input/output) REAL array, dimension (N) */
+/* On entry, the (N-1) subdiagonal elements of the unit */
+/* bidiagonal matrix L are in elements 1 to N-1 of L */
+/* (if the matrix is not splitted.) At the end of each block */
+/* is stored the corresponding shift as given by SLARRE. */
+/* On exit, L is overwritten. */
+
+/* PIVMIN (in) DOUBLE PRECISION */
+/* The minimum pivot allowed in the Sturm sequence. */
+
+/* ISPLIT (input) INTEGER array, dimension (N) */
+/* The splitting points, at which T breaks up into blocks. */
+/* The first block consists of rows/columns 1 to */
+/* ISPLIT( 1 ), the second of rows/columns ISPLIT( 1 )+1 */
+/* through ISPLIT( 2 ), etc. */
+
+/* M (input) INTEGER */
+/* The total number of input eigenvalues. 0 <= M <= N. */
+
+/* DOL (input) INTEGER */
+/* DOU (input) INTEGER */
+/* If the user wants to compute only selected eigenvectors from all */
+/* the eigenvalues supplied, he can specify an index range DOL:DOU. */
+/* Or else the setting DOL=1, DOU=M should be applied. */
+/* Note that DOL and DOU refer to the order in which the eigenvalues */
+/* are stored in W. */
+/* If the user wants to compute only selected eigenpairs, then */
+/* the columns DOL-1 to DOU+1 of the eigenvector space Z contain the */
+/* computed eigenvectors. All other columns of Z are set to zero. */
+
+/* MINRGP (input) REAL */
+
+/* RTOL1 (input) REAL */
+/* RTOL2 (input) REAL */
+/* Parameters for bisection. */
+/* An interval [LEFT,RIGHT] has converged if */
+/* RIGHT-LEFT.LT.MAX( RTOL1*GAP, RTOL2*MAX(|LEFT|,|RIGHT|) ) */
+
+/* W (input/output) REAL array, dimension (N) */
+/* The first M elements of W contain the APPROXIMATE eigenvalues for */
+/* which eigenvectors are to be computed. The eigenvalues */
+/* should be grouped by split-off block and ordered from */
+/* smallest to largest within the block ( The output array */
+/* W from SLARRE is expected here ). Furthermore, they are with */
+/* respect to the shift of the corresponding root representation */
+/* for their block. On exit, W holds the eigenvalues of the */
+/* UNshifted matrix. */
+
+/* WERR (input/output) REAL array, dimension (N) */
+/* The first M elements contain the semiwidth of the uncertainty */
+/* interval of the corresponding eigenvalue in W */
+
+/* WGAP (input/output) REAL array, dimension (N) */
+/* The separation from the right neighbor eigenvalue in W. */
+
+/* IBLOCK (input) INTEGER array, dimension (N) */
+/* The indices of the blocks (submatrices) associated with the */
+/* corresponding eigenvalues in W; IBLOCK(i)=1 if eigenvalue */
+/* W(i) belongs to the first block from the top, =2 if W(i) */
+/* belongs to the second block, etc. */
+
+/* INDEXW (input) INTEGER array, dimension (N) */
+/* The indices of the eigenvalues within each block (submatrix); */
+/* for example, INDEXW(i)= 10 and IBLOCK(i)=2 imply that the */
+/* i-th eigenvalue W(i) is the 10-th eigenvalue in the second block. */
+
+/* GERS (input) REAL array, dimension (2*N) */
+/* The N Gerschgorin intervals (the i-th Gerschgorin interval */
+/* is (GERS(2*i-1), GERS(2*i)). The Gerschgorin intervals should */
+/* be computed from the original UNshifted matrix. */
+
+/* Z (output) REAL array, dimension (LDZ, max(1,M) ) */
+/* If INFO = 0, the first M columns of Z contain the */
+/* orthonormal eigenvectors of the matrix T */
+/* corresponding to the input eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* ISUPPZ (output) INTEGER array, dimension ( 2*max(1,M) ) */
+/* The support of the eigenvectors in Z, i.e., the indices */
+/* indicating the nonzero elements in Z. The I-th eigenvector */
+/* is nonzero only in elements ISUPPZ( 2*I-1 ) through */
+/* ISUPPZ( 2*I ). */
+
+/* WORK (workspace) REAL array, dimension (12*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (7*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+
+/* > 0: A problem occured in SLARRV. */
+/* < 0: One of the called subroutines signaled an internal problem. */
+/* Needs inspection of the corresponding parameter IINFO */
+/* for further information. */
+
+/* =-1: Problem in SLARRB when refining a child's eigenvalues. */
+/* =-2: Problem in SLARRF when computing the RRR of a child. */
+/* When a child is inside a tight cluster, it can be difficult */
+/* to find an RRR. A partial remedy from the user's point of */
+/* view is to make the parameter MINRGP smaller and recompile. */
+/* However, as the orthogonality of the computed vectors is */
+/* proportional to 1/MINRGP, the user should be aware that */
+/* he might be trading in precision when he decreases MINRGP. */
+/* =-3: Problem in SLARRB when refining a single eigenvalue */
+/* after the Rayleigh correction was rejected. */
+/* = 5: The Rayleigh Quotient Iteration failed to converge to */
+/* full accuracy in MAXITR steps. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+/* .. */
+/* The first N entries of WORK are reserved for the eigenvalues */
+ /* Parameter adjustments */
+ --d__;
+ --l;
+ --isplit;
+ --w;
+ --werr;
+ --wgap;
+ --iblock;
+ --indexw;
+ --gers;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --isuppz;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ indld = *n + 1;
+ indlld = (*n << 1) + 1;
+ indwrk = *n * 3 + 1;
+ minwsize = *n * 12;
+ i__1 = minwsize;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L5: */
+ }
+/* IWORK(IINDR+1:IINDR+N) hold the twist indices R for the */
+/* factorization used to compute the FP vector */
+ iindr = 0;
+/* IWORK(IINDC1+1:IINC2+N) are used to store the clusters of the current */
+/* layer and the one above. */
+ iindc1 = *n;
+ iindc2 = *n << 1;
+ iindwk = *n * 3 + 1;
+ miniwsize = *n * 7;
+ i__1 = miniwsize;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ iwork[i__] = 0;
+/* L10: */
+ }
+ zusedl = 1;
+ if (*dol > 1) {
+/* Set lower bound for use of Z */
+ zusedl = *dol - 1;
+ }
+ zusedu = *m;
+ if (*dou < *m) {
+/* Set lower bound for use of Z */
+ zusedu = *dou + 1;
+ }
+/* The width of the part of Z that is used */
+ zusedw = zusedu - zusedl + 1;
+ slaset_("Full", n, &zusedw, &c_b5, &c_b5, &z__[zusedl * z_dim1 + 1], ldz);
+ eps = slamch_("Precision");
+ rqtol = eps * 2.f;
+
+/* Set expert flags for standard code. */
+ tryrqc = TRUE_;
+ if (*dol == 1 && *dou == *m) {
+ } else {
+/* Only selected eigenpairs are computed. Since the other evalues */
+/* are not refined by RQ iteration, bisection has to compute to full */
+/* accuracy. */
+ *rtol1 = eps * 4.f;
+ *rtol2 = eps * 4.f;
+ }
+/* The entries WBEGIN:WEND in W, WERR, WGAP correspond to the */
+/* desired eigenvalues. The support of the nonzero eigenvector */
+/* entries is contained in the interval IBEGIN:IEND. */
+/* Remark that if k eigenpairs are desired, then the eigenvectors */
+/* are stored in k contiguous columns of Z. */
+/* DONE is the number of eigenvectors already computed */
+ done = 0;
+ ibegin = 1;
+ wbegin = 1;
+ i__1 = iblock[*m];
+ for (jblk = 1; jblk <= i__1; ++jblk) {
+ iend = isplit[jblk];
+ sigma = l[iend];
+/* Find the eigenvectors of the submatrix indexed IBEGIN */
+/* through IEND. */
+ wend = wbegin - 1;
+L15:
+ if (wend < *m) {
+ if (iblock[wend + 1] == jblk) {
+ ++wend;
+ goto L15;
+ }
+ }
+ if (wend < wbegin) {
+ ibegin = iend + 1;
+ goto L170;
+ } else if (wend < *dol || wbegin > *dou) {
+ ibegin = iend + 1;
+ wbegin = wend + 1;
+ goto L170;
+ }
+/* Find local spectral diameter of the block */
+ gl = gers[(ibegin << 1) - 1];
+ gu = gers[ibegin * 2];
+ i__2 = iend;
+ for (i__ = ibegin + 1; i__ <= i__2; ++i__) {
+/* Computing MIN */
+ r__1 = gers[(i__ << 1) - 1];
+ gl = dmin(r__1,gl);
+/* Computing MAX */
+ r__1 = gers[i__ * 2];
+ gu = dmax(r__1,gu);
+/* L20: */
+ }
+ spdiam = gu - gl;
+/* OLDIEN is the last index of the previous block */
+ oldien = ibegin - 1;
+/* Calculate the size of the current block */
+ in = iend - ibegin + 1;
+/* The number of eigenvalues in the current block */
+ im = wend - wbegin + 1;
+/* This is for a 1x1 block */
+ if (ibegin == iend) {
+ ++done;
+ z__[ibegin + wbegin * z_dim1] = 1.f;
+ isuppz[(wbegin << 1) - 1] = ibegin;
+ isuppz[wbegin * 2] = ibegin;
+ w[wbegin] += sigma;
+ work[wbegin] = w[wbegin];
+ ibegin = iend + 1;
+ ++wbegin;
+ goto L170;
+ }
+/* The desired (shifted) eigenvalues are stored in W(WBEGIN:WEND) */
+/* Note that these can be approximations, in this case, the corresp. */
+/* entries of WERR give the size of the uncertainty interval. */
+/* The eigenvalue approximations will be refined when necessary as */
+/* high relative accuracy is required for the computation of the */
+/* corresponding eigenvectors. */
+ scopy_(&im, &w[wbegin], &c__1, &work[wbegin], &c__1);
+/* We store in W the eigenvalue approximations w.r.t. the original */
+/* matrix T. */
+ i__2 = im;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ w[wbegin + i__ - 1] += sigma;
+/* L30: */
+ }
+/* NDEPTH is the current depth of the representation tree */
+ ndepth = 0;
+/* PARITY is either 1 or 0 */
+ parity = 1;
+/* NCLUS is the number of clusters for the next level of the */
+/* representation tree, we start with NCLUS = 1 for the root */
+ nclus = 1;
+ iwork[iindc1 + 1] = 1;
+ iwork[iindc1 + 2] = im;
+/* IDONE is the number of eigenvectors already computed in the current */
+/* block */
+ idone = 0;
+/* loop while( IDONE.LT.IM ) */
+/* generate the representation tree for the current block and */
+/* compute the eigenvectors */
+L40:
+ if (idone < im) {
+/* This is a crude protection against infinitely deep trees */
+ if (ndepth > *m) {
+ *info = -2;
+ return 0;
+ }
+/* breadth first processing of the current level of the representation */
+/* tree: OLDNCL = number of clusters on current level */
+ oldncl = nclus;
+/* reset NCLUS to count the number of child clusters */
+ nclus = 0;
+
+ parity = 1 - parity;
+ if (parity == 0) {
+ oldcls = iindc1;
+ newcls = iindc2;
+ } else {
+ oldcls = iindc2;
+ newcls = iindc1;
+ }
+/* Process the clusters on the current level */
+ i__2 = oldncl;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ j = oldcls + (i__ << 1);
+/* OLDFST, OLDLST = first, last index of current cluster. */
+/* cluster indices start with 1 and are relative */
+/* to WBEGIN when accessing W, WGAP, WERR, Z */
+ oldfst = iwork[j - 1];
+ oldlst = iwork[j];
+ if (ndepth > 0) {
+/* Retrieve relatively robust representation (RRR) of cluster */
+/* that has been computed at the previous level */
+/* The RRR is stored in Z and overwritten once the eigenvectors */
+/* have been computed or when the cluster is refined */
+ if (*dol == 1 && *dou == *m) {
+/* Get representation from location of the leftmost evalue */
+/* of the cluster */
+ j = wbegin + oldfst - 1;
+ } else {
+ if (wbegin + oldfst - 1 < *dol) {
+/* Get representation from the left end of Z array */
+ j = *dol - 1;
+ } else if (wbegin + oldfst - 1 > *dou) {
+/* Get representation from the right end of Z array */
+ j = *dou;
+ } else {
+ j = wbegin + oldfst - 1;
+ }
+ }
+ scopy_(&in, &z__[ibegin + j * z_dim1], &c__1, &d__[ibegin]
+, &c__1);
+ i__3 = in - 1;
+ scopy_(&i__3, &z__[ibegin + (j + 1) * z_dim1], &c__1, &l[
+ ibegin], &c__1);
+ sigma = z__[iend + (j + 1) * z_dim1];
+/* Set the corresponding entries in Z to zero */
+ slaset_("Full", &in, &c__2, &c_b5, &c_b5, &z__[ibegin + j
+ * z_dim1], ldz);
+ }
+/* Compute DL and DLL of current RRR */
+ i__3 = iend - 1;
+ for (j = ibegin; j <= i__3; ++j) {
+ tmp = d__[j] * l[j];
+ work[indld - 1 + j] = tmp;
+ work[indlld - 1 + j] = tmp * l[j];
+/* L50: */
+ }
+ if (ndepth > 0) {
+/* P and Q are index of the first and last eigenvalue to compute */
+/* within the current block */
+ p = indexw[wbegin - 1 + oldfst];
+ q = indexw[wbegin - 1 + oldlst];
+/* Offset for the arrays WORK, WGAP and WERR, i.e., th P-OFFSET */
+/* thru' Q-OFFSET elements of these arrays are to be used. */
+/* OFFSET = P-OLDFST */
+ offset = indexw[wbegin] - 1;
+/* perform limited bisection (if necessary) to get approximate */
+/* eigenvalues to the precision needed. */
+ slarrb_(&in, &d__[ibegin], &work[indlld + ibegin - 1], &p,
+ &q, rtol1, rtol2, &offset, &work[wbegin], &wgap[
+ wbegin], &werr[wbegin], &work[indwrk], &iwork[
+ iindwk], pivmin, &spdiam, &in, &iinfo);
+ if (iinfo != 0) {
+ *info = -1;
+ return 0;
+ }
+/* We also recompute the extremal gaps. W holds all eigenvalues */
+/* of the unshifted matrix and must be used for computation */
+/* of WGAP, the entries of WORK might stem from RRRs with */
+/* different shifts. The gaps from WBEGIN-1+OLDFST to */
+/* WBEGIN-1+OLDLST are correctly computed in SLARRB. */
+/* However, we only allow the gaps to become greater since */
+/* this is what should happen when we decrease WERR */
+ if (oldfst > 1) {
+/* Computing MAX */
+ r__1 = wgap[wbegin + oldfst - 2], r__2 = w[wbegin +
+ oldfst - 1] - werr[wbegin + oldfst - 1] - w[
+ wbegin + oldfst - 2] - werr[wbegin + oldfst -
+ 2];
+ wgap[wbegin + oldfst - 2] = dmax(r__1,r__2);
+ }
+ if (wbegin + oldlst - 1 < wend) {
+/* Computing MAX */
+ r__1 = wgap[wbegin + oldlst - 1], r__2 = w[wbegin +
+ oldlst] - werr[wbegin + oldlst] - w[wbegin +
+ oldlst - 1] - werr[wbegin + oldlst - 1];
+ wgap[wbegin + oldlst - 1] = dmax(r__1,r__2);
+ }
+/* Each time the eigenvalues in WORK get refined, we store */
+/* the newly found approximation with all shifts applied in W */
+ i__3 = oldlst;
+ for (j = oldfst; j <= i__3; ++j) {
+ w[wbegin + j - 1] = work[wbegin + j - 1] + sigma;
+/* L53: */
+ }
+ }
+/* Process the current node. */
+ newfst = oldfst;
+ i__3 = oldlst;
+ for (j = oldfst; j <= i__3; ++j) {
+ if (j == oldlst) {
+/* we are at the right end of the cluster, this is also the */
+/* boundary of the child cluster */
+ newlst = j;
+ } else if (wgap[wbegin + j - 1] >= *minrgp * (r__1 = work[
+ wbegin + j - 1], dabs(r__1))) {
+/* the right relative gap is big enough, the child cluster */
+/* (NEWFST,..,NEWLST) is well separated from the following */
+ newlst = j;
+ } else {
+/* inside a child cluster, the relative gap is not */
+/* big enough. */
+ goto L140;
+ }
+/* Compute size of child cluster found */
+ newsiz = newlst - newfst + 1;
+/* NEWFTT is the place in Z where the new RRR or the computed */
+/* eigenvector is to be stored */
+ if (*dol == 1 && *dou == *m) {
+/* Store representation at location of the leftmost evalue */
+/* of the cluster */
+ newftt = wbegin + newfst - 1;
+ } else {
+ if (wbegin + newfst - 1 < *dol) {
+/* Store representation at the left end of Z array */
+ newftt = *dol - 1;
+ } else if (wbegin + newfst - 1 > *dou) {
+/* Store representation at the right end of Z array */
+ newftt = *dou;
+ } else {
+ newftt = wbegin + newfst - 1;
+ }
+ }
+ if (newsiz > 1) {
+
+/* Current child is not a singleton but a cluster. */
+/* Compute and store new representation of child. */
+
+
+/* Compute left and right cluster gap. */
+
+/* LGAP and RGAP are not computed from WORK because */
+/* the eigenvalue approximations may stem from RRRs */
+/* different shifts. However, W hold all eigenvalues */
+/* of the unshifted matrix. Still, the entries in WGAP */
+/* have to be computed from WORK since the entries */
+/* in W might be of the same order so that gaps are not */
+/* exhibited correctly for very close eigenvalues. */
+ if (newfst == 1) {
+/* Computing MAX */
+ r__1 = 0.f, r__2 = w[wbegin] - werr[wbegin] - *vl;
+ lgap = dmax(r__1,r__2);
+ } else {
+ lgap = wgap[wbegin + newfst - 2];
+ }
+ rgap = wgap[wbegin + newlst - 1];
+
+/* Compute left- and rightmost eigenvalue of child */
+/* to high precision in order to shift as close */
+/* as possible and obtain as large relative gaps */
+/* as possible */
+
+ for (k = 1; k <= 2; ++k) {
+ if (k == 1) {
+ p = indexw[wbegin - 1 + newfst];
+ } else {
+ p = indexw[wbegin - 1 + newlst];
+ }
+ offset = indexw[wbegin] - 1;
+ slarrb_(&in, &d__[ibegin], &work[indlld + ibegin
+ - 1], &p, &p, &rqtol, &rqtol, &offset, &
+ work[wbegin], &wgap[wbegin], &werr[wbegin]
+, &work[indwrk], &iwork[iindwk], pivmin, &
+ spdiam, &in, &iinfo);
+/* L55: */
+ }
+
+ if (wbegin + newlst - 1 < *dol || wbegin + newfst - 1
+ > *dou) {
+/* if the cluster contains no desired eigenvalues */
+/* skip the computation of that branch of the rep. tree */
+
+/* We could skip before the refinement of the extremal */
+/* eigenvalues of the child, but then the representation */
+/* tree could be different from the one when nothing is */
+/* skipped. For this reason we skip at this place. */
+ idone = idone + newlst - newfst + 1;
+ goto L139;
+ }
+
+/* Compute RRR of child cluster. */
+/* Note that the new RRR is stored in Z */
+
+/* SLARRF needs LWORK = 2*N */
+ slarrf_(&in, &d__[ibegin], &l[ibegin], &work[indld +
+ ibegin - 1], &newfst, &newlst, &work[wbegin],
+ &wgap[wbegin], &werr[wbegin], &spdiam, &lgap,
+ &rgap, pivmin, &tau, &z__[ibegin + newftt *
+ z_dim1], &z__[ibegin + (newftt + 1) * z_dim1],
+ &work[indwrk], &iinfo);
+ if (iinfo == 0) {
+/* a new RRR for the cluster was found by SLARRF */
+/* update shift and store it */
+ ssigma = sigma + tau;
+ z__[iend + (newftt + 1) * z_dim1] = ssigma;
+/* WORK() are the midpoints and WERR() the semi-width */
+/* Note that the entries in W are unchanged. */
+ i__4 = newlst;
+ for (k = newfst; k <= i__4; ++k) {
+ fudge = eps * 3.f * (r__1 = work[wbegin + k -
+ 1], dabs(r__1));
+ work[wbegin + k - 1] -= tau;
+ fudge += eps * 4.f * (r__1 = work[wbegin + k
+ - 1], dabs(r__1));
+/* Fudge errors */
+ werr[wbegin + k - 1] += fudge;
+/* Gaps are not fudged. Provided that WERR is small */
+/* when eigenvalues are close, a zero gap indicates */
+/* that a new representation is needed for resolving */
+/* the cluster. A fudge could lead to a wrong decision */
+/* of judging eigenvalues 'separated' which in */
+/* reality are not. This could have a negative impact */
+/* on the orthogonality of the computed eigenvectors. */
+/* L116: */
+ }
+ ++nclus;
+ k = newcls + (nclus << 1);
+ iwork[k - 1] = newfst;
+ iwork[k] = newlst;
+ } else {
+ *info = -2;
+ return 0;
+ }
+ } else {
+
+/* Compute eigenvector of singleton */
+
+ iter = 0;
+
+ tol = log((real) in) * 4.f * eps;
+
+ k = newfst;
+ windex = wbegin + k - 1;
+/* Computing MAX */
+ i__4 = windex - 1;
+ windmn = max(i__4,1);
+/* Computing MIN */
+ i__4 = windex + 1;
+ windpl = min(i__4,*m);
+ lambda = work[windex];
+ ++done;
+/* Check if eigenvector computation is to be skipped */
+ if (windex < *dol || windex > *dou) {
+ eskip = TRUE_;
+ goto L125;
+ } else {
+ eskip = FALSE_;
+ }
+ left = work[windex] - werr[windex];
+ right = work[windex] + werr[windex];
+ indeig = indexw[windex];
+/* Note that since we compute the eigenpairs for a child, */
+/* all eigenvalue approximations are w.r.t the same shift. */
+/* In this case, the entries in WORK should be used for */
+/* computing the gaps since they exhibit even very small */
+/* differences in the eigenvalues, as opposed to the */
+/* entries in W which might "look" the same. */
+ if (k == 1) {
+/* In the case RANGE='I' and with not much initial */
+/* accuracy in LAMBDA and VL, the formula */
+/* LGAP = MAX( ZERO, (SIGMA - VL) + LAMBDA ) */
+/* can lead to an overestimation of the left gap and */
+/* thus to inadequately early RQI 'convergence'. */
+/* Prevent this by forcing a small left gap. */
+/* Computing MAX */
+ r__1 = dabs(left), r__2 = dabs(right);
+ lgap = eps * dmax(r__1,r__2);
+ } else {
+ lgap = wgap[windmn];
+ }
+ if (k == im) {
+/* In the case RANGE='I' and with not much initial */
+/* accuracy in LAMBDA and VU, the formula */
+/* can lead to an overestimation of the right gap and */
+/* thus to inadequately early RQI 'convergence'. */
+/* Prevent this by forcing a small right gap. */
+/* Computing MAX */
+ r__1 = dabs(left), r__2 = dabs(right);
+ rgap = eps * dmax(r__1,r__2);
+ } else {
+ rgap = wgap[windex];
+ }
+ gap = dmin(lgap,rgap);
+ if (k == 1 || k == im) {
+/* The eigenvector support can become wrong */
+/* because significant entries could be cut off due to a */
+/* large GAPTOL parameter in LAR1V. Prevent this. */
+ gaptol = 0.f;
+ } else {
+ gaptol = gap * eps;
+ }
+ isupmn = in;
+ isupmx = 1;
+/* Update WGAP so that it holds the minimum gap */
+/* to the left or the right. This is crucial in the */
+/* case where bisection is used to ensure that the */
+/* eigenvalue is refined up to the required precision. */
+/* The correct value is restored afterwards. */
+ savgap = wgap[windex];
+ wgap[windex] = gap;
+/* We want to use the Rayleigh Quotient Correction */
+/* as often as possible since it converges quadratically */
+/* when we are close enough to the desired eigenvalue. */
+/* However, the Rayleigh Quotient can have the wrong sign */
+/* and lead us away from the desired eigenvalue. In this */
+/* case, the best we can do is to use bisection. */
+ usedbs = FALSE_;
+ usedrq = FALSE_;
+/* Bisection is initially turned off unless it is forced */
+ needbs = ! tryrqc;
+L120:
+/* Check if bisection should be used to refine eigenvalue */
+ if (needbs) {
+/* Take the bisection as new iterate */
+ usedbs = TRUE_;
+ itmp1 = iwork[iindr + windex];
+ offset = indexw[wbegin] - 1;
+ r__1 = eps * 2.f;
+ slarrb_(&in, &d__[ibegin], &work[indlld + ibegin
+ - 1], &indeig, &indeig, &c_b5, &r__1, &
+ offset, &work[wbegin], &wgap[wbegin], &
+ werr[wbegin], &work[indwrk], &iwork[
+ iindwk], pivmin, &spdiam, &itmp1, &iinfo);
+ if (iinfo != 0) {
+ *info = -3;
+ return 0;
+ }
+ lambda = work[windex];
+/* Reset twist index from inaccurate LAMBDA to */
+/* force computation of true MINGMA */
+ iwork[iindr + windex] = 0;
+ }
+/* Given LAMBDA, compute the eigenvector. */
+ L__1 = ! usedbs;
+ slar1v_(&in, &c__1, &in, &lambda, &d__[ibegin], &l[
+ ibegin], &work[indld + ibegin - 1], &work[
+ indlld + ibegin - 1], pivmin, &gaptol, &z__[
+ ibegin + windex * z_dim1], &L__1, &negcnt, &
+ ztz, &mingma, &iwork[iindr + windex], &isuppz[
+ (windex << 1) - 1], &nrminv, &resid, &rqcorr,
+ &work[indwrk]);
+ if (iter == 0) {
+ bstres = resid;
+ bstw = lambda;
+ } else if (resid < bstres) {
+ bstres = resid;
+ bstw = lambda;
+ }
+/* Computing MIN */
+ i__4 = isupmn, i__5 = isuppz[(windex << 1) - 1];
+ isupmn = min(i__4,i__5);
+/* Computing MAX */
+ i__4 = isupmx, i__5 = isuppz[windex * 2];
+ isupmx = max(i__4,i__5);
+ ++iter;
+/* sin alpha <= |resid|/gap */
+/* Note that both the residual and the gap are */
+/* proportional to the matrix, so ||T|| doesn't play */
+/* a role in the quotient */
+
+/* Convergence test for Rayleigh-Quotient iteration */
+/* (omitted when Bisection has been used) */
+
+ if (resid > tol * gap && dabs(rqcorr) > rqtol * dabs(
+ lambda) && ! usedbs) {
+/* We need to check that the RQCORR update doesn't */
+/* move the eigenvalue away from the desired one and */
+/* towards a neighbor. -> protection with bisection */
+ if (indeig <= negcnt) {
+/* The wanted eigenvalue lies to the left */
+ sgndef = -1.f;
+ } else {
+/* The wanted eigenvalue lies to the right */
+ sgndef = 1.f;
+ }
+/* We only use the RQCORR if it improves the */
+/* the iterate reasonably. */
+ if (rqcorr * sgndef >= 0.f && lambda + rqcorr <=
+ right && lambda + rqcorr >= left) {
+ usedrq = TRUE_;
+/* Store new midpoint of bisection interval in WORK */
+ if (sgndef == 1.f) {
+/* The current LAMBDA is on the left of the true */
+/* eigenvalue */
+ left = lambda;
+/* We prefer to assume that the error estimate */
+/* is correct. We could make the interval not */
+/* as a bracket but to be modified if the RQCORR */
+/* chooses to. In this case, the RIGHT side should */
+/* be modified as follows: */
+/* RIGHT = MAX(RIGHT, LAMBDA + RQCORR) */
+ } else {
+/* The current LAMBDA is on the right of the true */
+/* eigenvalue */
+ right = lambda;
+/* See comment about assuming the error estimate is */
+/* correct above. */
+/* LEFT = MIN(LEFT, LAMBDA + RQCORR) */
+ }
+ work[windex] = (right + left) * .5f;
+/* Take RQCORR since it has the correct sign and */
+/* improves the iterate reasonably */
+ lambda += rqcorr;
+/* Update width of error interval */
+ werr[windex] = (right - left) * .5f;
+ } else {
+ needbs = TRUE_;
+ }
+ if (right - left < rqtol * dabs(lambda)) {
+/* The eigenvalue is computed to bisection accuracy */
+/* compute eigenvector and stop */
+ usedbs = TRUE_;
+ goto L120;
+ } else if (iter < 10) {
+ goto L120;
+ } else if (iter == 10) {
+ needbs = TRUE_;
+ goto L120;
+ } else {
+ *info = 5;
+ return 0;
+ }
+ } else {
+ stp2ii = FALSE_;
+ if (usedrq && usedbs && bstres <= resid) {
+ lambda = bstw;
+ stp2ii = TRUE_;
+ }
+ if (stp2ii) {
+/* improve error angle by second step */
+ L__1 = ! usedbs;
+ slar1v_(&in, &c__1, &in, &lambda, &d__[ibegin]
+, &l[ibegin], &work[indld + ibegin -
+ 1], &work[indlld + ibegin - 1],
+ pivmin, &gaptol, &z__[ibegin + windex
+ * z_dim1], &L__1, &negcnt, &ztz, &
+ mingma, &iwork[iindr + windex], &
+ isuppz[(windex << 1) - 1], &nrminv, &
+ resid, &rqcorr, &work[indwrk]);
+ }
+ work[windex] = lambda;
+ }
+
+/* Compute FP-vector support w.r.t. whole matrix */
+
+ isuppz[(windex << 1) - 1] += oldien;
+ isuppz[windex * 2] += oldien;
+ zfrom = isuppz[(windex << 1) - 1];
+ zto = isuppz[windex * 2];
+ isupmn += oldien;
+ isupmx += oldien;
+/* Ensure vector is ok if support in the RQI has changed */
+ if (isupmn < zfrom) {
+ i__4 = zfrom - 1;
+ for (ii = isupmn; ii <= i__4; ++ii) {
+ z__[ii + windex * z_dim1] = 0.f;
+/* L122: */
+ }
+ }
+ if (isupmx > zto) {
+ i__4 = isupmx;
+ for (ii = zto + 1; ii <= i__4; ++ii) {
+ z__[ii + windex * z_dim1] = 0.f;
+/* L123: */
+ }
+ }
+ i__4 = zto - zfrom + 1;
+ sscal_(&i__4, &nrminv, &z__[zfrom + windex * z_dim1],
+ &c__1);
+L125:
+/* Update W */
+ w[windex] = lambda + sigma;
+/* Recompute the gaps on the left and right */
+/* But only allow them to become larger and not */
+/* smaller (which can only happen through "bad" */
+/* cancellation and doesn't reflect the theory */
+/* where the initial gaps are underestimated due */
+/* to WERR being too crude.) */
+ if (! eskip) {
+ if (k > 1) {
+/* Computing MAX */
+ r__1 = wgap[windmn], r__2 = w[windex] - werr[
+ windex] - w[windmn] - werr[windmn];
+ wgap[windmn] = dmax(r__1,r__2);
+ }
+ if (windex < wend) {
+/* Computing MAX */
+ r__1 = savgap, r__2 = w[windpl] - werr[windpl]
+ - w[windex] - werr[windex];
+ wgap[windex] = dmax(r__1,r__2);
+ }
+ }
+ ++idone;
+ }
+/* here ends the code for the current child */
+
+L139:
+/* Proceed to any remaining child nodes */
+ newfst = j + 1;
+L140:
+ ;
+ }
+/* L150: */
+ }
+ ++ndepth;
+ goto L40;
+ }
+ ibegin = iend + 1;
+ wbegin = wend + 1;
+L170:
+ ;
+ }
+
+ return 0;
+
+/* End of SLARRV */
+
+} /* slarrv_ */
diff --git a/contrib/libs/clapack/slartg.c b/contrib/libs/clapack/slartg.c
new file mode 100644
index 0000000000..d1d5281f3a
--- /dev/null
+++ b/contrib/libs/clapack/slartg.c
@@ -0,0 +1,189 @@
+/* slartg.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slartg_(real *f, real *g, real *cs, real *sn, real *r__)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double log(doublereal), pow_ri(real *, integer *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ real f1, g1, eps, scale;
+ integer count;
+ real safmn2, safmx2;
+ extern doublereal slamch_(char *);
+ real safmin;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLARTG generate a plane rotation so that */
+
+/* [ CS SN ] . [ F ] = [ R ] where CS**2 + SN**2 = 1. */
+/* [ -SN CS ] [ G ] [ 0 ] */
+
+/* This is a slower, more accurate version of the BLAS1 routine SROTG, */
+/* with the following other differences: */
+/* F and G are unchanged on return. */
+/* If G=0, then CS=1 and SN=0. */
+/* If F=0 and (G .ne. 0), then CS=0 and SN=1 without doing any */
+/* floating point operations (saves work in SBDSQR when */
+/* there are zeros on the diagonal). */
+
+/* If F exceeds G in magnitude, CS will be positive. */
+
+/* Arguments */
+/* ========= */
+
+/* F (input) REAL */
+/* The first component of vector to be rotated. */
+
+/* G (input) REAL */
+/* The second component of vector to be rotated. */
+
+/* CS (output) REAL */
+/* The cosine of the rotation. */
+
+/* SN (output) REAL */
+/* The sine of the rotation. */
+
+/* R (output) REAL */
+/* The nonzero component of the rotated vector. */
+
+/* This version has a few statements commented out for thread safety */
+/* (machine parameters are computed on each entry). 10 feb 03, SJH. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* LOGICAL FIRST */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Save statement .. */
+/* SAVE FIRST, SAFMX2, SAFMIN, SAFMN2 */
+/* .. */
+/* .. Data statements .. */
+/* DATA FIRST / .TRUE. / */
+/* .. */
+/* .. Executable Statements .. */
+
+/* IF( FIRST ) THEN */
+ safmin = slamch_("S");
+ eps = slamch_("E");
+ r__1 = slamch_("B");
+ i__1 = (integer) (log(safmin / eps) / log(slamch_("B")) / 2.f);
+ safmn2 = pow_ri(&r__1, &i__1);
+ safmx2 = 1.f / safmn2;
+/* FIRST = .FALSE. */
+/* END IF */
+ if (*g == 0.f) {
+ *cs = 1.f;
+ *sn = 0.f;
+ *r__ = *f;
+ } else if (*f == 0.f) {
+ *cs = 0.f;
+ *sn = 1.f;
+ *r__ = *g;
+ } else {
+ f1 = *f;
+ g1 = *g;
+/* Computing MAX */
+ r__1 = dabs(f1), r__2 = dabs(g1);
+ scale = dmax(r__1,r__2);
+ if (scale >= safmx2) {
+ count = 0;
+L10:
+ ++count;
+ f1 *= safmn2;
+ g1 *= safmn2;
+/* Computing MAX */
+ r__1 = dabs(f1), r__2 = dabs(g1);
+ scale = dmax(r__1,r__2);
+ if (scale >= safmx2) {
+ goto L10;
+ }
+/* Computing 2nd power */
+ r__1 = f1;
+/* Computing 2nd power */
+ r__2 = g1;
+ *r__ = sqrt(r__1 * r__1 + r__2 * r__2);
+ *cs = f1 / *r__;
+ *sn = g1 / *r__;
+ i__1 = count;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ *r__ *= safmx2;
+/* L20: */
+ }
+ } else if (scale <= safmn2) {
+ count = 0;
+L30:
+ ++count;
+ f1 *= safmx2;
+ g1 *= safmx2;
+/* Computing MAX */
+ r__1 = dabs(f1), r__2 = dabs(g1);
+ scale = dmax(r__1,r__2);
+ if (scale <= safmn2) {
+ goto L30;
+ }
+/* Computing 2nd power */
+ r__1 = f1;
+/* Computing 2nd power */
+ r__2 = g1;
+ *r__ = sqrt(r__1 * r__1 + r__2 * r__2);
+ *cs = f1 / *r__;
+ *sn = g1 / *r__;
+ i__1 = count;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ *r__ *= safmn2;
+/* L40: */
+ }
+ } else {
+/* Computing 2nd power */
+ r__1 = f1;
+/* Computing 2nd power */
+ r__2 = g1;
+ *r__ = sqrt(r__1 * r__1 + r__2 * r__2);
+ *cs = f1 / *r__;
+ *sn = g1 / *r__;
+ }
+ if (dabs(*f) > dabs(*g) && *cs < 0.f) {
+ *cs = -(*cs);
+ *sn = -(*sn);
+ *r__ = -(*r__);
+ }
+ }
+ return 0;
+
+/* End of SLARTG */
+
+} /* slartg_ */
diff --git a/contrib/libs/clapack/slartv.c b/contrib/libs/clapack/slartv.c
new file mode 100644
index 0000000000..58d967b397
--- /dev/null
+++ b/contrib/libs/clapack/slartv.c
@@ -0,0 +1,105 @@
+/* slartv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slartv_(integer *n, real *x, integer *incx, real *y,
+ integer *incy, real *c__, real *s, integer *incc)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ integer i__, ic, ix, iy;
+ real xi, yi;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLARTV applies a vector of real plane rotations to elements of the */
+/* real vectors x and y. For i = 1,2,...,n */
+
+/* ( x(i) ) := ( c(i) s(i) ) ( x(i) ) */
+/* ( y(i) ) ( -s(i) c(i) ) ( y(i) ) */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of plane rotations to be applied. */
+
+/* X (input/output) REAL array, */
+/* dimension (1+(N-1)*INCX) */
+/* The vector x. */
+
+/* INCX (input) INTEGER */
+/* The increment between elements of X. INCX > 0. */
+
+/* Y (input/output) REAL array, */
+/* dimension (1+(N-1)*INCY) */
+/* The vector y. */
+
+/* INCY (input) INTEGER */
+/* The increment between elements of Y. INCY > 0. */
+
+/* C (input) REAL array, dimension (1+(N-1)*INCC) */
+/* The cosines of the plane rotations. */
+
+/* S (input) REAL array, dimension (1+(N-1)*INCC) */
+/* The sines of the plane rotations. */
+
+/* INCC (input) INTEGER */
+/* The increment between elements of C and S. INCC > 0. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --s;
+ --c__;
+ --y;
+ --x;
+
+ /* Function Body */
+ ix = 1;
+ iy = 1;
+ ic = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ xi = x[ix];
+ yi = y[iy];
+ x[ix] = c__[ic] * xi + s[ic] * yi;
+ y[iy] = c__[ic] * yi - s[ic] * xi;
+ ix += *incx;
+ iy += *incy;
+ ic += *incc;
+/* L10: */
+ }
+ return 0;
+
+/* End of SLARTV */
+
+} /* slartv_ */
diff --git a/contrib/libs/clapack/slaruv.c b/contrib/libs/clapack/slaruv.c
new file mode 100644
index 0000000000..68d67fe841
--- /dev/null
+++ b/contrib/libs/clapack/slaruv.c
@@ -0,0 +1,193 @@
+/* slaruv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slaruv_(integer *iseed, integer *n, real *x)
+{
+ /* Initialized data */
+
+ static integer mm[512] /* was [128][4] */ = { 494,2637,255,2008,1253,
+ 3344,4084,1739,3143,3468,688,1657,1238,3166,1292,3422,1270,2016,
+ 154,2862,697,1706,491,931,1444,444,3577,3944,2184,1661,3482,657,
+ 3023,3618,1267,1828,164,3798,3087,2400,2870,3876,1905,1593,1797,
+ 1234,3460,328,2861,1950,617,2070,3331,769,1558,2412,2800,189,287,
+ 2045,1227,2838,209,2770,3654,3993,192,2253,3491,2889,2857,2094,
+ 1818,688,1407,634,3231,815,3524,1914,516,164,303,2144,3480,119,
+ 3357,837,2826,2332,2089,3780,1700,3712,150,2000,3375,1621,3090,
+ 3765,1149,3146,33,3082,2741,359,3316,1749,185,2784,2202,2199,1364,
+ 1244,2020,3160,2785,2772,1217,1822,1245,2252,3904,2774,997,2573,
+ 1148,545,322,789,1440,752,2859,123,1848,643,2405,2638,2344,46,
+ 3814,913,3649,339,3808,822,2832,3078,3633,2970,637,2249,2081,4019,
+ 1478,242,481,2075,4058,622,3376,812,234,641,4005,1122,3135,2640,
+ 2302,40,1832,2247,2034,2637,1287,1691,496,1597,2394,2584,1843,336,
+ 1472,2407,433,2096,1761,2810,566,442,41,1238,1086,603,840,3168,
+ 1499,1084,3438,2408,1589,2391,288,26,512,1456,171,1677,2657,2270,
+ 2587,2961,1970,1817,676,1410,3723,2803,3185,184,663,499,3784,1631,
+ 1925,3912,1398,1349,1441,2224,2411,1907,3192,2786,382,37,759,2948,
+ 1862,3802,2423,2051,2295,1332,1832,2405,3638,3661,327,3660,716,
+ 1842,3987,1368,1848,2366,2508,3754,1766,3572,2893,307,1297,3966,
+ 758,2598,3406,2922,1038,2934,2091,2451,1580,1958,2055,1507,1078,
+ 3273,17,854,2916,3971,2889,3831,2621,1541,893,736,3992,787,2125,
+ 2364,2460,257,1574,3912,1216,3248,3401,2124,2762,149,2245,166,466,
+ 4018,1399,190,2879,153,2320,18,712,2159,2318,2091,3443,1510,449,
+ 1956,2201,3137,3399,1321,2271,3667,2703,629,2365,2431,1113,3922,
+ 2554,184,2099,3228,4012,1921,3452,3901,572,3309,3171,817,3039,
+ 1696,1256,3715,2077,3019,1497,1101,717,51,981,1978,1813,3881,76,
+ 3846,3694,1682,124,1660,3997,479,1141,886,3514,1301,3604,1888,
+ 1836,1990,2058,692,1194,20,3285,2046,2107,3508,3525,3801,2549,
+ 1145,2253,305,3301,1065,3133,2913,3285,1241,1197,3729,2501,1673,
+ 541,2753,949,2361,1165,4081,2725,3305,3069,3617,3733,409,2157,
+ 1361,3973,1865,2525,1409,3445,3577,77,3761,2149,1449,3005,225,85,
+ 3673,3117,3089,1349,2057,413,65,1845,697,3085,3441,1573,3689,2941,
+ 929,533,2841,4077,721,2821,2249,2397,2817,245,1913,1997,3121,997,
+ 1833,2877,1633,981,2009,941,2449,197,2441,285,1473,2741,3129,909,
+ 2801,421,4073,2813,2337,1429,1177,1901,81,1669,2633,2269,129,1141,
+ 249,3917,2481,3941,2217,2749,3041,1877,345,2861,1809,3141,2825,
+ 157,2881,3637,1465,2829,2161,3365,361,2685,3745,2325,3609,3821,
+ 3537,517,3017,2141,1537 };
+
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ integer i__, i1, i2, i3, i4, it1, it2, it3, it4;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLARUV returns a vector of n random real numbers from a uniform (0,1) */
+/* distribution (n <= 128). */
+
+/* This is an auxiliary routine called by SLARNV and CLARNV. */
+
+/* Arguments */
+/* ========= */
+
+/* ISEED (input/output) INTEGER array, dimension (4) */
+/* On entry, the seed of the random number generator; the array */
+/* elements must be between 0 and 4095, and ISEED(4) must be */
+/* odd. */
+/* On exit, the seed is updated. */
+
+/* N (input) INTEGER */
+/* The number of random numbers to be generated. N <= 128. */
+
+/* X (output) REAL array, dimension (N) */
+/* The generated random numbers. */
+
+/* Further Details */
+/* =============== */
+
+/* This routine uses a multiplicative congruential method with modulus */
+/* 2**48 and multiplier 33952834046453 (see G.S.Fishman, */
+/* 'Multiplicative congruential random number generators with modulus */
+/* 2**b: an exhaustive analysis for b = 32 and a partial analysis for */
+/* b = 48', Math. Comp. 189, pp 331-344, 1990). */
+
+/* 48-bit integers are stored in 4 integer array elements with 12 bits */
+/* per element. Hence the routine is portable across machines with */
+/* integers of 32 bits or more. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Data statements .. */
+ /* Parameter adjustments */
+ --iseed;
+ --x;
+
+ /* Function Body */
+/* .. */
+/* .. Executable Statements .. */
+
+ i1 = iseed[1];
+ i2 = iseed[2];
+ i3 = iseed[3];
+ i4 = iseed[4];
+
+ i__1 = min(*n,128);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+L20:
+
+/* Multiply the seed by i-th power of the multiplier modulo 2**48 */
+
+ it4 = i4 * mm[i__ + 383];
+ it3 = it4 / 4096;
+ it4 -= it3 << 12;
+ it3 = it3 + i3 * mm[i__ + 383] + i4 * mm[i__ + 255];
+ it2 = it3 / 4096;
+ it3 -= it2 << 12;
+ it2 = it2 + i2 * mm[i__ + 383] + i3 * mm[i__ + 255] + i4 * mm[i__ +
+ 127];
+ it1 = it2 / 4096;
+ it2 -= it1 << 12;
+ it1 = it1 + i1 * mm[i__ + 383] + i2 * mm[i__ + 255] + i3 * mm[i__ +
+ 127] + i4 * mm[i__ - 1];
+ it1 %= 4096;
+
+/* Convert 48-bit integer to a real number in the interval (0,1) */
+
+ x[i__] = ((real) it1 + ((real) it2 + ((real) it3 + (real) it4 *
+ 2.44140625e-4f) * 2.44140625e-4f) * 2.44140625e-4f) *
+ 2.44140625e-4f;
+
+ if (x[i__] == 1.f) {
+/* If a real number has n bits of precision, and the first */
+/* n bits of the 48-bit integer above happen to be all 1 (which */
+/* will occur about once every 2**n calls), then X( I ) will */
+/* be rounded to exactly 1.0. In IEEE single precision arithmetic, */
+/* this will happen relatively often since n = 24. */
+/* Since X( I ) is not supposed to return exactly 0.0 or 1.0, */
+/* the statistically correct thing to do in this situation is */
+/* simply to iterate again. */
+/* N.B. the case X( I ) = 0.0 should not be possible. */
+ i1 += 2;
+ i2 += 2;
+ i3 += 2;
+ i4 += 2;
+ goto L20;
+ }
+
+/* L10: */
+ }
+
+/* Return final value of seed */
+
+ iseed[1] = it1;
+ iseed[2] = it2;
+ iseed[3] = it3;
+ iseed[4] = it4;
+ return 0;
+
+/* End of SLARUV */
+
+} /* slaruv_ */
diff --git a/contrib/libs/clapack/slarz.c b/contrib/libs/clapack/slarz.c
new file mode 100644
index 0000000000..05e816d232
--- /dev/null
+++ b/contrib/libs/clapack/slarz.c
@@ -0,0 +1,190 @@
+/* slarz.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b5 = 1.f;
+
+/* Subroutine */ int slarz_(char *side, integer *m, integer *n, integer *l,
+ real *v, integer *incv, real *tau, real *c__, integer *ldc, real *
+ work)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset;
+ real r__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int sger_(integer *, integer *, real *, real *,
+ integer *, real *, integer *, real *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *), scopy_(integer *, real *, integer *, real *, integer *),
+ saxpy_(integer *, real *, real *, integer *, real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLARZ applies a real elementary reflector H to a real M-by-N */
+/* matrix C, from either the left or the right. H is represented in the */
+/* form */
+
+/* H = I - tau * v * v' */
+
+/* where tau is a real scalar and v is a real vector. */
+
+/* If tau = 0, then H is taken to be the unit matrix. */
+
+
+/* H is a product of k elementary reflectors as returned by STZRZF. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': form H * C */
+/* = 'R': form C * H */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. */
+
+/* L (input) INTEGER */
+/* The number of entries of the vector V containing */
+/* the meaningful part of the Householder vectors. */
+/* If SIDE = 'L', M >= L >= 0, if SIDE = 'R', N >= L >= 0. */
+
+/* V (input) REAL array, dimension (1+(L-1)*abs(INCV)) */
+/* The vector v in the representation of H as returned by */
+/* STZRZF. V is not used if TAU = 0. */
+
+/* INCV (input) INTEGER */
+/* The increment between elements of v. INCV <> 0. */
+
+/* TAU (input) REAL */
+/* The value tau in the representation of H. */
+
+/* C (input/output) REAL array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by the matrix H * C if SIDE = 'L', */
+/* or C * H if SIDE = 'R'. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) REAL array, dimension */
+/* (N) if SIDE = 'L' */
+/* or (M) if SIDE = 'R' */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --v;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ if (lsame_(side, "L")) {
+
+/* Form H * C */
+
+ if (*tau != 0.f) {
+
+/* w( 1:n ) = C( 1, 1:n ) */
+
+ scopy_(n, &c__[c_offset], ldc, &work[1], &c__1);
+
+/* w( 1:n ) = w( 1:n ) + C( m-l+1:m, 1:n )' * v( 1:l ) */
+
+ sgemv_("Transpose", l, n, &c_b5, &c__[*m - *l + 1 + c_dim1], ldc,
+ &v[1], incv, &c_b5, &work[1], &c__1);
+
+/* C( 1, 1:n ) = C( 1, 1:n ) - tau * w( 1:n ) */
+
+ r__1 = -(*tau);
+ saxpy_(n, &r__1, &work[1], &c__1, &c__[c_offset], ldc);
+
+/* C( m-l+1:m, 1:n ) = C( m-l+1:m, 1:n ) - ... */
+/* tau * v( 1:l ) * w( 1:n )' */
+
+ r__1 = -(*tau);
+ sger_(l, n, &r__1, &v[1], incv, &work[1], &c__1, &c__[*m - *l + 1
+ + c_dim1], ldc);
+ }
+
+ } else {
+
+/* Form C * H */
+
+ if (*tau != 0.f) {
+
+/* w( 1:m ) = C( 1:m, 1 ) */
+
+ scopy_(m, &c__[c_offset], &c__1, &work[1], &c__1);
+
+/* w( 1:m ) = w( 1:m ) + C( 1:m, n-l+1:n, 1:n ) * v( 1:l ) */
+
+ sgemv_("No transpose", m, l, &c_b5, &c__[(*n - *l + 1) * c_dim1 +
+ 1], ldc, &v[1], incv, &c_b5, &work[1], &c__1);
+
+/* C( 1:m, 1 ) = C( 1:m, 1 ) - tau * w( 1:m ) */
+
+ r__1 = -(*tau);
+ saxpy_(m, &r__1, &work[1], &c__1, &c__[c_offset], &c__1);
+
+/* C( 1:m, n-l+1:n ) = C( 1:m, n-l+1:n ) - ... */
+/* tau * w( 1:m ) * v( 1:l )' */
+
+ r__1 = -(*tau);
+ sger_(m, l, &r__1, &work[1], &c__1, &v[1], incv, &c__[(*n - *l +
+ 1) * c_dim1 + 1], ldc);
+
+ }
+
+ }
+
+ return 0;
+
+/* End of SLARZ */
+
+} /* slarz_ */
diff --git a/contrib/libs/clapack/slarzb.c b/contrib/libs/clapack/slarzb.c
new file mode 100644
index 0000000000..b47222440f
--- /dev/null
+++ b/contrib/libs/clapack/slarzb.c
@@ -0,0 +1,287 @@
+/* slarzb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b13 = 1.f;
+static real c_b23 = -1.f;
+
+/* Subroutine */ int slarzb_(char *side, char *trans, char *direct, char *
+ storev, integer *m, integer *n, integer *k, integer *l, real *v,
+ integer *ldv, real *t, integer *ldt, real *c__, integer *ldc, real *
+ work, integer *ldwork)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, t_dim1, t_offset, v_dim1, v_offset, work_dim1,
+ work_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, info;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *), scopy_(integer *, real *,
+ integer *, real *, integer *), strmm_(char *, char *, char *,
+ char *, integer *, integer *, real *, real *, integer *, real *,
+ integer *), xerbla_(char *,
+ integer *);
+ char transt[1];
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLARZB applies a real block reflector H or its transpose H**T to */
+/* a real distributed M-by-N C from the left or the right. */
+
+/* Currently, only STOREV = 'R' and DIRECT = 'B' are supported. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply H or H' from the Left */
+/* = 'R': apply H or H' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply H (No transpose) */
+/* = 'C': apply H' (Transpose) */
+
+/* DIRECT (input) CHARACTER*1 */
+/* Indicates how H is formed from a product of elementary */
+/* reflectors */
+/* = 'F': H = H(1) H(2) . . . H(k) (Forward, not supported yet) */
+/* = 'B': H = H(k) . . . H(2) H(1) (Backward) */
+
+/* STOREV (input) CHARACTER*1 */
+/* Indicates how the vectors which define the elementary */
+/* reflectors are stored: */
+/* = 'C': Columnwise (not supported yet) */
+/* = 'R': Rowwise */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. */
+
+/* K (input) INTEGER */
+/* The order of the matrix T (= the number of elementary */
+/* reflectors whose product defines the block reflector). */
+
+/* L (input) INTEGER */
+/* The number of columns of the matrix V containing the */
+/* meaningful part of the Householder reflectors. */
+/* If SIDE = 'L', M >= L >= 0, if SIDE = 'R', N >= L >= 0. */
+
+/* V (input) REAL array, dimension (LDV,NV). */
+/* If STOREV = 'C', NV = K; if STOREV = 'R', NV = L. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. */
+/* If STOREV = 'C', LDV >= L; if STOREV = 'R', LDV >= K. */
+
+/* T (input) REAL array, dimension (LDT,K) */
+/* The triangular K-by-K matrix T in the representation of the */
+/* block reflector. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= K. */
+
+/* C (input/output) REAL array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by H*C or H'*C or C*H or C*H'. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) REAL array, dimension (LDWORK,K) */
+
+/* LDWORK (input) INTEGER */
+/* The leading dimension of the array WORK. */
+/* If SIDE = 'L', LDWORK >= max(1,N); */
+/* if SIDE = 'R', LDWORK >= max(1,M). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ work_dim1 = *ldwork;
+ work_offset = 1 + work_dim1;
+ work -= work_offset;
+
+ /* Function Body */
+ if (*m <= 0 || *n <= 0) {
+ return 0;
+ }
+
+/* Check for currently supported options */
+
+ info = 0;
+ if (! lsame_(direct, "B")) {
+ info = -3;
+ } else if (! lsame_(storev, "R")) {
+ info = -4;
+ }
+ if (info != 0) {
+ i__1 = -info;
+ xerbla_("SLARZB", &i__1);
+ return 0;
+ }
+
+ if (lsame_(trans, "N")) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ if (lsame_(side, "L")) {
+
+/* Form H * C or H' * C */
+
+/* W( 1:n, 1:k ) = C( 1:k, 1:n )' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ scopy_(n, &c__[j + c_dim1], ldc, &work[j * work_dim1 + 1], &c__1);
+/* L10: */
+ }
+
+/* W( 1:n, 1:k ) = W( 1:n, 1:k ) + ... */
+/* C( m-l+1:m, 1:n )' * V( 1:k, 1:l )' */
+
+ if (*l > 0) {
+ sgemm_("Transpose", "Transpose", n, k, l, &c_b13, &c__[*m - *l +
+ 1 + c_dim1], ldc, &v[v_offset], ldv, &c_b13, &work[
+ work_offset], ldwork);
+ }
+
+/* W( 1:n, 1:k ) = W( 1:n, 1:k ) * T' or W( 1:m, 1:k ) * T */
+
+ strmm_("Right", "Lower", transt, "Non-unit", n, k, &c_b13, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C( 1:k, 1:n ) = C( 1:k, 1:n ) - W( 1:n, 1:k )' */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *k;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] -= work[j + i__ * work_dim1];
+/* L20: */
+ }
+/* L30: */
+ }
+
+/* C( m-l+1:m, 1:n ) = C( m-l+1:m, 1:n ) - ... */
+/* V( 1:k, 1:l )' * W( 1:n, 1:k )' */
+
+ if (*l > 0) {
+ sgemm_("Transpose", "Transpose", l, n, k, &c_b23, &v[v_offset],
+ ldv, &work[work_offset], ldwork, &c_b13, &c__[*m - *l + 1
+ + c_dim1], ldc);
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/* Form C * H or C * H' */
+
+/* W( 1:m, 1:k ) = C( 1:m, 1:k ) */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ scopy_(m, &c__[j * c_dim1 + 1], &c__1, &work[j * work_dim1 + 1], &
+ c__1);
+/* L40: */
+ }
+
+/* W( 1:m, 1:k ) = W( 1:m, 1:k ) + ... */
+/* C( 1:m, n-l+1:n ) * V( 1:k, 1:l )' */
+
+ if (*l > 0) {
+ sgemm_("No transpose", "Transpose", m, k, l, &c_b13, &c__[(*n - *
+ l + 1) * c_dim1 + 1], ldc, &v[v_offset], ldv, &c_b13, &
+ work[work_offset], ldwork);
+ }
+
+/* W( 1:m, 1:k ) = W( 1:m, 1:k ) * T or W( 1:m, 1:k ) * T' */
+
+ strmm_("Right", "Lower", trans, "Non-unit", m, k, &c_b13, &t[t_offset]
+, ldt, &work[work_offset], ldwork);
+
+/* C( 1:m, 1:k ) = C( 1:m, 1:k ) - W( 1:m, 1:k ) */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] -= work[i__ + j * work_dim1];
+/* L50: */
+ }
+/* L60: */
+ }
+
+/* C( 1:m, n-l+1:n ) = C( 1:m, n-l+1:n ) - ... */
+/* W( 1:m, 1:k ) * V( 1:k, 1:l ) */
+
+ if (*l > 0) {
+ sgemm_("No transpose", "No transpose", m, l, k, &c_b23, &work[
+ work_offset], ldwork, &v[v_offset], ldv, &c_b13, &c__[(*n
+ - *l + 1) * c_dim1 + 1], ldc);
+ }
+
+ }
+
+ return 0;
+
+/* End of SLARZB */
+
+} /* slarzb_ */
diff --git a/contrib/libs/clapack/slarzt.c b/contrib/libs/clapack/slarzt.c
new file mode 100644
index 0000000000..ed5c92a193
--- /dev/null
+++ b/contrib/libs/clapack/slarzt.c
@@ -0,0 +1,227 @@
+/* slarzt.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b8 = 0.f;
+static integer c__1 = 1;
+
+/* Subroutine */ int slarzt_(char *direct, char *storev, integer *n, integer *
+ k, real *v, integer *ldv, real *tau, real *t, integer *ldt)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, v_dim1, v_offset, i__1;
+ real r__1;
+
+ /* Local variables */
+ integer i__, j, info;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *), strmv_(char *, char *, char *, integer *, real *,
+ integer *, real *, integer *), xerbla_(
+ char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLARZT forms the triangular factor T of a real block reflector */
+/* H of order > n, which is defined as a product of k elementary */
+/* reflectors. */
+
+/* If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular; */
+
+/* If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular. */
+
+/* If STOREV = 'C', the vector which defines the elementary reflector */
+/* H(i) is stored in the i-th column of the array V, and */
+
+/* H = I - V * T * V' */
+
+/* If STOREV = 'R', the vector which defines the elementary reflector */
+/* H(i) is stored in the i-th row of the array V, and */
+
+/* H = I - V' * T * V */
+
+/* Currently, only STOREV = 'R' and DIRECT = 'B' are supported. */
+
+/* Arguments */
+/* ========= */
+
+/* DIRECT (input) CHARACTER*1 */
+/* Specifies the order in which the elementary reflectors are */
+/* multiplied to form the block reflector: */
+/* = 'F': H = H(1) H(2) . . . H(k) (Forward, not supported yet) */
+/* = 'B': H = H(k) . . . H(2) H(1) (Backward) */
+
+/* STOREV (input) CHARACTER*1 */
+/* Specifies how the vectors which define the elementary */
+/* reflectors are stored (see also Further Details): */
+/* = 'C': columnwise (not supported yet) */
+/* = 'R': rowwise */
+
+/* N (input) INTEGER */
+/* The order of the block reflector H. N >= 0. */
+
+/* K (input) INTEGER */
+/* The order of the triangular factor T (= the number of */
+/* elementary reflectors). K >= 1. */
+
+/* V (input/output) REAL array, dimension */
+/* (LDV,K) if STOREV = 'C' */
+/* (LDV,N) if STOREV = 'R' */
+/* The matrix V. See further details. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. */
+/* If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K. */
+
+/* TAU (input) REAL array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i). */
+
+/* T (output) REAL array, dimension (LDT,K) */
+/* The k by k triangular factor T of the block reflector. */
+/* If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is */
+/* lower triangular. The rest of the array is not used. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= K. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* The shape of the matrix V and the storage of the vectors which define */
+/* the H(i) is best illustrated by the following example with n = 5 and */
+/* k = 3. The elements equal to 1 are not stored; the corresponding */
+/* array elements are modified but restored on exit. The rest of the */
+/* array is not used. */
+
+/* DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R': */
+
+/* ______V_____ */
+/* ( v1 v2 v3 ) / \ */
+/* ( v1 v2 v3 ) ( v1 v1 v1 v1 v1 . . . . 1 ) */
+/* V = ( v1 v2 v3 ) ( v2 v2 v2 v2 v2 . . . 1 ) */
+/* ( v1 v2 v3 ) ( v3 v3 v3 v3 v3 . . 1 ) */
+/* ( v1 v2 v3 ) */
+/* . . . */
+/* . . . */
+/* 1 . . */
+/* 1 . */
+/* 1 */
+
+/* DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R': */
+
+/* ______V_____ */
+/* 1 / \ */
+/* . 1 ( 1 . . . . v1 v1 v1 v1 v1 ) */
+/* . . 1 ( . 1 . . . v2 v2 v2 v2 v2 ) */
+/* . . . ( . . 1 . . v3 v3 v3 v3 v3 ) */
+/* . . . */
+/* ( v1 v2 v3 ) */
+/* ( v1 v2 v3 ) */
+/* V = ( v1 v2 v3 ) */
+/* ( v1 v2 v3 ) */
+/* ( v1 v2 v3 ) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Check for currently supported options */
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ --tau;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(direct, "B")) {
+ info = -1;
+ } else if (! lsame_(storev, "R")) {
+ info = -2;
+ }
+ if (info != 0) {
+ i__1 = -info;
+ xerbla_("SLARZT", &i__1);
+ return 0;
+ }
+
+ for (i__ = *k; i__ >= 1; --i__) {
+ if (tau[i__] == 0.f) {
+
+/* H(i) = I */
+
+ i__1 = *k;
+ for (j = i__; j <= i__1; ++j) {
+ t[j + i__ * t_dim1] = 0.f;
+/* L10: */
+ }
+ } else {
+
+/* general case */
+
+ if (i__ < *k) {
+
+/* T(i+1:k,i) = - tau(i) * V(i+1:k,1:n) * V(i,1:n)' */
+
+ i__1 = *k - i__;
+ r__1 = -tau[i__];
+ sgemv_("No transpose", &i__1, n, &r__1, &v[i__ + 1 + v_dim1],
+ ldv, &v[i__ + v_dim1], ldv, &c_b8, &t[i__ + 1 + i__ *
+ t_dim1], &c__1);
+
+/* T(i+1:k,i) = T(i+1:k,i+1:k) * T(i+1:k,i) */
+
+ i__1 = *k - i__;
+ strmv_("Lower", "No transpose", "Non-unit", &i__1, &t[i__ + 1
+ + (i__ + 1) * t_dim1], ldt, &t[i__ + 1 + i__ * t_dim1]
+, &c__1);
+ }
+ t[i__ + i__ * t_dim1] = tau[i__];
+ }
+/* L20: */
+ }
+ return 0;
+
+/* End of SLARZT */
+
+} /* slarzt_ */
diff --git a/contrib/libs/clapack/slas2.c b/contrib/libs/clapack/slas2.c
new file mode 100644
index 0000000000..4269a70ffa
--- /dev/null
+++ b/contrib/libs/clapack/slas2.c
@@ -0,0 +1,145 @@
+/* slas2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slas2_(real *f, real *g, real *h__, real *ssmin, real *
+ ssmax)
+{
+ /* System generated locals */
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real c__, fa, ga, ha, as, at, au, fhmn, fhmx;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAS2 computes the singular values of the 2-by-2 matrix */
+/* [ F G ] */
+/* [ 0 H ]. */
+/* On return, SSMIN is the smaller singular value and SSMAX is the */
+/* larger singular value. */
+
+/* Arguments */
+/* ========= */
+
+/* F (input) REAL */
+/* The (1,1) element of the 2-by-2 matrix. */
+
+/* G (input) REAL */
+/* The (1,2) element of the 2-by-2 matrix. */
+
+/* H (input) REAL */
+/* The (2,2) element of the 2-by-2 matrix. */
+
+/* SSMIN (output) REAL */
+/* The smaller singular value. */
+
+/* SSMAX (output) REAL */
+/* The larger singular value. */
+
+/* Further Details */
+/* =============== */
+
+/* Barring over/underflow, all output quantities are correct to within */
+/* a few units in the last place (ulps), even in the absence of a guard */
+/* digit in addition/subtraction. */
+
+/* In IEEE arithmetic, the code works correctly if one matrix element is */
+/* infinite. */
+
+/* Overflow will not occur unless the largest singular value itself */
+/* overflows, or is within a few ulps of overflow. (On machines with */
+/* partial overflow, like the Cray, overflow may occur if the largest */
+/* singular value is within a factor of 2 of overflow.) */
+
+/* Underflow is harmless if underflow is gradual. Otherwise, results */
+/* may correspond to a matrix modified by perturbations of size near */
+/* the underflow threshold. */
+
+/* ==================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ fa = dabs(*f);
+ ga = dabs(*g);
+ ha = dabs(*h__);
+ fhmn = dmin(fa,ha);
+ fhmx = dmax(fa,ha);
+ if (fhmn == 0.f) {
+ *ssmin = 0.f;
+ if (fhmx == 0.f) {
+ *ssmax = ga;
+ } else {
+/* Computing 2nd power */
+ r__1 = dmin(fhmx,ga) / dmax(fhmx,ga);
+ *ssmax = dmax(fhmx,ga) * sqrt(r__1 * r__1 + 1.f);
+ }
+ } else {
+ if (ga < fhmx) {
+ as = fhmn / fhmx + 1.f;
+ at = (fhmx - fhmn) / fhmx;
+/* Computing 2nd power */
+ r__1 = ga / fhmx;
+ au = r__1 * r__1;
+ c__ = 2.f / (sqrt(as * as + au) + sqrt(at * at + au));
+ *ssmin = fhmn * c__;
+ *ssmax = fhmx / c__;
+ } else {
+ au = fhmx / ga;
+ if (au == 0.f) {
+
+/* Avoid possible harmful underflow if exponent range */
+/* asymmetric (true SSMIN may not underflow even if */
+/* AU underflows) */
+
+ *ssmin = fhmn * fhmx / ga;
+ *ssmax = ga;
+ } else {
+ as = fhmn / fhmx + 1.f;
+ at = (fhmx - fhmn) / fhmx;
+/* Computing 2nd power */
+ r__1 = as * au;
+/* Computing 2nd power */
+ r__2 = at * au;
+ c__ = 1.f / (sqrt(r__1 * r__1 + 1.f) + sqrt(r__2 * r__2 + 1.f)
+ );
+ *ssmin = fhmn * c__ * au;
+ *ssmin += *ssmin;
+ *ssmax = ga / (c__ + c__);
+ }
+ }
+ }
+ return 0;
+
+/* End of SLAS2 */
+
+} /* slas2_ */
diff --git a/contrib/libs/clapack/slascl.c b/contrib/libs/clapack/slascl.c
new file mode 100644
index 0000000000..6afa7ef3e4
--- /dev/null
+++ b/contrib/libs/clapack/slascl.c
@@ -0,0 +1,355 @@
+/* slascl.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slascl_(char *type__, integer *kl, integer *ku, real *
+ cfrom, real *cto, integer *m, integer *n, real *a, integer *lda,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+
+ /* Local variables */
+ integer i__, j, k1, k2, k3, k4;
+ real mul, cto1;
+ logical done;
+ real ctoc;
+ extern logical lsame_(char *, char *);
+ integer itype;
+ real cfrom1;
+ extern doublereal slamch_(char *);
+ real cfromc;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ extern logical sisnan_(real *);
+ real smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLASCL multiplies the M by N real matrix A by the real scalar */
+/* CTO/CFROM. This is done without over/underflow as long as the final */
+/* result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that */
+/* A may be full, upper triangular, lower triangular, upper Hessenberg, */
+/* or banded. */
+
+/* Arguments */
+/* ========= */
+
+/* TYPE (input) CHARACTER*1 */
+/* TYPE indices the storage type of the input matrix. */
+/* = 'G': A is a full matrix. */
+/* = 'L': A is a lower triangular matrix. */
+/* = 'U': A is an upper triangular matrix. */
+/* = 'H': A is an upper Hessenberg matrix. */
+/* = 'B': A is a symmetric band matrix with lower bandwidth KL */
+/* and upper bandwidth KU and with the only the lower */
+/* half stored. */
+/* = 'Q': A is a symmetric band matrix with lower bandwidth KL */
+/* and upper bandwidth KU and with the only the upper */
+/* half stored. */
+/* = 'Z': A is a band matrix with lower bandwidth KL and upper */
+/* bandwidth KU. */
+
+/* KL (input) INTEGER */
+/* The lower bandwidth of A. Referenced only if TYPE = 'B', */
+/* 'Q' or 'Z'. */
+
+/* KU (input) INTEGER */
+/* The upper bandwidth of A. Referenced only if TYPE = 'B', */
+/* 'Q' or 'Z'. */
+
+/* CFROM (input) REAL */
+/* CTO (input) REAL */
+/* The matrix A is multiplied by CTO/CFROM. A(I,J) is computed */
+/* without over/underflow if the final result CTO*A(I,J)/CFROM */
+/* can be represented without over/underflow. CFROM must be */
+/* nonzero. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* The matrix to be multiplied by CTO/CFROM. See TYPE for the */
+/* storage type. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* INFO (output) INTEGER */
+/* 0 - successful exit */
+/* <0 - if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+
+ if (lsame_(type__, "G")) {
+ itype = 0;
+ } else if (lsame_(type__, "L")) {
+ itype = 1;
+ } else if (lsame_(type__, "U")) {
+ itype = 2;
+ } else if (lsame_(type__, "H")) {
+ itype = 3;
+ } else if (lsame_(type__, "B")) {
+ itype = 4;
+ } else if (lsame_(type__, "Q")) {
+ itype = 5;
+ } else if (lsame_(type__, "Z")) {
+ itype = 6;
+ } else {
+ itype = -1;
+ }
+
+ if (itype == -1) {
+ *info = -1;
+ } else if (*cfrom == 0.f || sisnan_(cfrom)) {
+ *info = -4;
+ } else if (sisnan_(cto)) {
+ *info = -5;
+ } else if (*m < 0) {
+ *info = -6;
+ } else if (*n < 0 || itype == 4 && *n != *m || itype == 5 && *n != *m) {
+ *info = -7;
+ } else if (itype <= 3 && *lda < max(1,*m)) {
+ *info = -9;
+ } else if (itype >= 4) {
+/* Computing MAX */
+ i__1 = *m - 1;
+ if (*kl < 0 || *kl > max(i__1,0)) {
+ *info = -2;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = *n - 1;
+ if (*ku < 0 || *ku > max(i__1,0) || (itype == 4 || itype == 5) &&
+ *kl != *ku) {
+ *info = -3;
+ } else if (itype == 4 && *lda < *kl + 1 || itype == 5 && *lda < *
+ ku + 1 || itype == 6 && *lda < (*kl << 1) + *ku + 1) {
+ *info = -9;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLASCL", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *m == 0) {
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+
+ cfromc = *cfrom;
+ ctoc = *cto;
+
+L10:
+ cfrom1 = cfromc * smlnum;
+ if (cfrom1 == cfromc) {
+/* CFROMC is an inf. Multiply by a correctly signed zero for */
+/* finite CTOC, or a NaN if CTOC is infinite. */
+ mul = ctoc / cfromc;
+ done = TRUE_;
+ cto1 = ctoc;
+ } else {
+ cto1 = ctoc / bignum;
+ if (cto1 == ctoc) {
+/* CTOC is either 0 or an inf. In both cases, CTOC itself */
+/* serves as the correct multiplication factor. */
+ mul = ctoc;
+ done = TRUE_;
+ cfromc = 1.f;
+ } else if (dabs(cfrom1) > dabs(ctoc) && ctoc != 0.f) {
+ mul = smlnum;
+ done = FALSE_;
+ cfromc = cfrom1;
+ } else if (dabs(cto1) > dabs(cfromc)) {
+ mul = bignum;
+ done = FALSE_;
+ ctoc = cto1;
+ } else {
+ mul = ctoc / cfromc;
+ done = TRUE_;
+ }
+ }
+
+ if (itype == 0) {
+
+/* Full matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L20: */
+ }
+/* L30: */
+ }
+
+ } else if (itype == 1) {
+
+/* Lower triangular matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L40: */
+ }
+/* L50: */
+ }
+
+ } else if (itype == 2) {
+
+/* Upper triangular matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(j,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L60: */
+ }
+/* L70: */
+ }
+
+ } else if (itype == 3) {
+
+/* Upper Hessenberg matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = j + 1;
+ i__2 = min(i__3,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L80: */
+ }
+/* L90: */
+ }
+
+ } else if (itype == 4) {
+
+/* Lower half of a symmetric band matrix */
+
+ k3 = *kl + 1;
+ k4 = *n + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = k3, i__4 = k4 - j;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L100: */
+ }
+/* L110: */
+ }
+
+ } else if (itype == 5) {
+
+/* Upper half of a symmetric band matrix */
+
+ k1 = *ku + 2;
+ k3 = *ku + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = k1 - j;
+ i__3 = k3;
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L120: */
+ }
+/* L130: */
+ }
+
+ } else if (itype == 6) {
+
+/* Band matrix */
+
+ k1 = *kl + *ku + 2;
+ k2 = *kl + 1;
+ k3 = (*kl << 1) + *ku + 1;
+ k4 = *kl + *ku + 1 + *m;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__3 = k1 - j;
+/* Computing MIN */
+ i__4 = k3, i__5 = k4 - j;
+ i__2 = min(i__4,i__5);
+ for (i__ = max(i__3,k2); i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L140: */
+ }
+/* L150: */
+ }
+
+ }
+
+ if (! done) {
+ goto L10;
+ }
+
+ return 0;
+
+/* End of SLASCL */
+
+} /* slascl_ */
diff --git a/contrib/libs/clapack/slasd0.c b/contrib/libs/clapack/slasd0.c
new file mode 100644
index 0000000000..41f3ee72b1
--- /dev/null
+++ b/contrib/libs/clapack/slasd0.c
@@ -0,0 +1,286 @@
+/* slasd0.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__0 = 0;
+static integer c__2 = 2;
+
+/* Subroutine */ int slasd0_(integer *n, integer *sqre, real *d__, real *e,
+ real *u, integer *ldu, real *vt, integer *ldvt, integer *smlsiz,
+ integer *iwork, real *work, integer *info)
+{
+ /* System generated locals */
+ integer u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ integer i__, j, m, i1, ic, lf, nd, ll, nl, nr, im1, ncc, nlf, nrf, iwk,
+ lvl, ndb1, nlp1, nrp1;
+ real beta;
+ integer idxq, nlvl;
+ real alpha;
+ integer inode, ndiml, idxqc, ndimr, itemp, sqrei;
+ extern /* Subroutine */ int slasd1_(integer *, integer *, integer *, real
+ *, real *, real *, real *, integer *, real *, integer *, integer *
+, integer *, real *, integer *), xerbla_(char *, integer *), slasdq_(char *, integer *, integer *, integer *, integer
+ *, integer *, real *, real *, real *, integer *, real *, integer *
+, real *, integer *, real *, integer *), slasdt_(integer *
+, integer *, integer *, integer *, integer *, integer *, integer *
+);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Using a divide and conquer approach, SLASD0 computes the singular */
+/* value decomposition (SVD) of a real upper bidiagonal N-by-M */
+/* matrix B with diagonal D and offdiagonal E, where M = N + SQRE. */
+/* The algorithm computes orthogonal matrices U and VT such that */
+/* B = U * S * VT. The singular values S are overwritten on D. */
+
+/* A related subroutine, SLASDA, computes only the singular values, */
+/* and optionally, the singular vectors in compact form. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* On entry, the row dimension of the upper bidiagonal matrix. */
+/* This is also the dimension of the main diagonal array D. */
+
+/* SQRE (input) INTEGER */
+/* Specifies the column dimension of the bidiagonal matrix. */
+/* = 0: The bidiagonal matrix has column dimension M = N; */
+/* = 1: The bidiagonal matrix has column dimension M = N+1; */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry D contains the main diagonal of the bidiagonal */
+/* matrix. */
+/* On exit D, if INFO = 0, contains its singular values. */
+
+/* E (input) REAL array, dimension (M-1) */
+/* Contains the subdiagonal entries of the bidiagonal matrix. */
+/* On exit, E has been destroyed. */
+
+/* U (output) REAL array, dimension at least (LDQ, N) */
+/* On exit, U contains the left singular vectors. */
+
+/* LDU (input) INTEGER */
+/* On entry, leading dimension of U. */
+
+/* VT (output) REAL array, dimension at least (LDVT, M) */
+/* On exit, VT' contains the right singular vectors. */
+
+/* LDVT (input) INTEGER */
+/* On entry, leading dimension of VT. */
+
+/* SMLSIZ (input) INTEGER */
+/* On entry, maximum size of the subproblems at the */
+/* bottom of the computation tree. */
+
+/* IWORK (workspace) INTEGER array, dimension (8*N) */
+
+/* WORK (workspace) REAL array, dimension (3*M**2+2*M) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, an singular value did not converge */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --iwork;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -1;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -2;
+ }
+
+ m = *n + *sqre;
+
+ if (*ldu < *n) {
+ *info = -6;
+ } else if (*ldvt < m) {
+ *info = -8;
+ } else if (*smlsiz < 3) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLASD0", &i__1);
+ return 0;
+ }
+
+/* If the input matrix is too small, call SLASDQ to find the SVD. */
+
+ if (*n <= *smlsiz) {
+ slasdq_("U", sqre, n, &m, n, &c__0, &d__[1], &e[1], &vt[vt_offset],
+ ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[1], info);
+ return 0;
+ }
+
+/* Set up the computation tree. */
+
+ inode = 1;
+ ndiml = inode + *n;
+ ndimr = ndiml + *n;
+ idxq = ndimr + *n;
+ iwk = idxq + *n;
+ slasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr],
+ smlsiz);
+
+/* For the nodes on bottom level of the tree, solve */
+/* their subproblems by SLASDQ. */
+
+ ndb1 = (nd + 1) / 2;
+ ncc = 0;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+
+/* IC : center row of each node */
+/* NL : number of rows of left subproblem */
+/* NR : number of rows of right subproblem */
+/* NLF: starting row of the left subproblem */
+/* NRF: starting row of the right subproblem */
+
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nlp1 = nl + 1;
+ nr = iwork[ndimr + i1];
+ nrp1 = nr + 1;
+ nlf = ic - nl;
+ nrf = ic + 1;
+ sqrei = 1;
+ slasdq_("U", &sqrei, &nl, &nlp1, &nl, &ncc, &d__[nlf], &e[nlf], &vt[
+ nlf + nlf * vt_dim1], ldvt, &u[nlf + nlf * u_dim1], ldu, &u[
+ nlf + nlf * u_dim1], ldu, &work[1], info);
+ if (*info != 0) {
+ return 0;
+ }
+ itemp = idxq + nlf - 2;
+ i__2 = nl;
+ for (j = 1; j <= i__2; ++j) {
+ iwork[itemp + j] = j;
+/* L10: */
+ }
+ if (i__ == nd) {
+ sqrei = *sqre;
+ } else {
+ sqrei = 1;
+ }
+ nrp1 = nr + sqrei;
+ slasdq_("U", &sqrei, &nr, &nrp1, &nr, &ncc, &d__[nrf], &e[nrf], &vt[
+ nrf + nrf * vt_dim1], ldvt, &u[nrf + nrf * u_dim1], ldu, &u[
+ nrf + nrf * u_dim1], ldu, &work[1], info);
+ if (*info != 0) {
+ return 0;
+ }
+ itemp = idxq + ic;
+ i__2 = nr;
+ for (j = 1; j <= i__2; ++j) {
+ iwork[itemp + j - 1] = j;
+/* L20: */
+ }
+/* L30: */
+ }
+
+/* Now conquer each subproblem bottom-up. */
+
+ for (lvl = nlvl; lvl >= 1; --lvl) {
+
+/* Find the first node LF and last node LL on the */
+/* current level LVL. */
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__1 = lvl - 1;
+ lf = pow_ii(&c__2, &i__1);
+ ll = (lf << 1) - 1;
+ }
+ i__1 = ll;
+ for (i__ = lf; i__ <= i__1; ++i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ if (*sqre == 0 && i__ == ll) {
+ sqrei = *sqre;
+ } else {
+ sqrei = 1;
+ }
+ idxqc = idxq + nlf - 1;
+ alpha = d__[ic];
+ beta = e[ic];
+ slasd1_(&nl, &nr, &sqrei, &d__[nlf], &alpha, &beta, &u[nlf + nlf *
+ u_dim1], ldu, &vt[nlf + nlf * vt_dim1], ldvt, &iwork[
+ idxqc], &iwork[iwk], &work[1], info);
+ if (*info != 0) {
+ return 0;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+
+ return 0;
+
+/* End of SLASD0 */
+
+} /* slasd0_ */
diff --git a/contrib/libs/clapack/slasd1.c b/contrib/libs/clapack/slasd1.c
new file mode 100644
index 0000000000..5341356bdb
--- /dev/null
+++ b/contrib/libs/clapack/slasd1.c
@@ -0,0 +1,286 @@
+/* slasd1.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__0 = 0;
+static real c_b7 = 1.f;
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int slasd1_(integer *nl, integer *nr, integer *sqre, real *
+ d__, real *alpha, real *beta, real *u, integer *ldu, real *vt,
+ integer *ldvt, integer *idxq, integer *iwork, real *work, integer *
+ info)
+{
+ /* System generated locals */
+ integer u_dim1, u_offset, vt_dim1, vt_offset, i__1;
+ real r__1, r__2;
+
+ /* Local variables */
+ integer i__, k, m, n, n1, n2, iq, iz, iu2, ldq, idx, ldu2, ivt2, idxc,
+ idxp, ldvt2;
+ extern /* Subroutine */ int slasd2_(integer *, integer *, integer *,
+ integer *, real *, real *, real *, real *, real *, integer *,
+ real *, integer *, real *, real *, integer *, real *, integer *,
+ integer *, integer *, integer *, integer *, integer *, integer *),
+ slasd3_(integer *, integer *, integer *, integer *, real *, real
+ *, integer *, real *, real *, integer *, real *, integer *, real *
+, integer *, real *, integer *, integer *, integer *, real *,
+ integer *);
+ integer isigma;
+ extern /* Subroutine */ int xerbla_(char *, integer *), slascl_(
+ char *, integer *, integer *, real *, real *, integer *, integer *
+, real *, integer *, integer *), slamrg_(integer *,
+ integer *, real *, integer *, integer *, integer *);
+ real orgnrm;
+ integer coltyp;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLASD1 computes the SVD of an upper bidiagonal N-by-M matrix B, */
+/* where N = NL + NR + 1 and M = N + SQRE. SLASD1 is called from SLASD0. */
+
+/* A related subroutine SLASD7 handles the case in which the singular */
+/* values (and the singular vectors in factored form) are desired. */
+
+/* SLASD1 computes the SVD as follows: */
+
+/* ( D1(in) 0 0 0 ) */
+/* B = U(in) * ( Z1' a Z2' b ) * VT(in) */
+/* ( 0 0 D2(in) 0 ) */
+
+/* = U(out) * ( D(out) 0) * VT(out) */
+
+/* where Z' = (Z1' a Z2' b) = u' VT', and u is a vector of dimension M */
+/* with ALPHA and BETA in the NL+1 and NL+2 th entries and zeros */
+/* elsewhere; and the entry b is empty if SQRE = 0. */
+
+/* The left singular vectors of the original matrix are stored in U, and */
+/* the transpose of the right singular vectors are stored in VT, and the */
+/* singular values are in D. The algorithm consists of three stages: */
+
+/* The first stage consists of deflating the size of the problem */
+/* when there are multiple singular values or when there are zeros in */
+/* the Z vector. For each such occurence the dimension of the */
+/* secular equation problem is reduced by one. This stage is */
+/* performed by the routine SLASD2. */
+
+/* The second stage consists of calculating the updated */
+/* singular values. This is done by finding the square roots of the */
+/* roots of the secular equation via the routine SLASD4 (as called */
+/* by SLASD3). This routine also calculates the singular vectors of */
+/* the current problem. */
+
+/* The final stage consists of computing the updated singular vectors */
+/* directly using the updated singular values. The singular vectors */
+/* for the current problem are multiplied with the singular vectors */
+/* from the overall problem. */
+
+/* Arguments */
+/* ========= */
+
+/* NL (input) INTEGER */
+/* The row dimension of the upper block. NL >= 1. */
+
+/* NR (input) INTEGER */
+/* The row dimension of the lower block. NR >= 1. */
+
+/* SQRE (input) INTEGER */
+/* = 0: the lower block is an NR-by-NR square matrix. */
+/* = 1: the lower block is an NR-by-(NR+1) rectangular matrix. */
+
+/* The bidiagonal matrix has row dimension N = NL + NR + 1, */
+/* and column dimension M = N + SQRE. */
+
+/* D (input/output) REAL array, dimension (NL+NR+1). */
+/* N = NL+NR+1 */
+/* On entry D(1:NL,1:NL) contains the singular values of the */
+/* upper block; and D(NL+2:N) contains the singular values of */
+/* the lower block. On exit D(1:N) contains the singular values */
+/* of the modified matrix. */
+
+/* ALPHA (input/output) REAL */
+/* Contains the diagonal element associated with the added row. */
+
+/* BETA (input/output) REAL */
+/* Contains the off-diagonal element associated with the added */
+/* row. */
+
+/* U (input/output) REAL array, dimension (LDU,N) */
+/* On entry U(1:NL, 1:NL) contains the left singular vectors of */
+/* the upper block; U(NL+2:N, NL+2:N) contains the left singular */
+/* vectors of the lower block. On exit U contains the left */
+/* singular vectors of the bidiagonal matrix. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= max( 1, N ). */
+
+/* VT (input/output) REAL array, dimension (LDVT,M) */
+/* where M = N + SQRE. */
+/* On entry VT(1:NL+1, 1:NL+1)' contains the right singular */
+/* vectors of the upper block; VT(NL+2:M, NL+2:M)' contains */
+/* the right singular vectors of the lower block. On exit */
+/* VT' contains the right singular vectors of the */
+/* bidiagonal matrix. */
+
+/* LDVT (input) INTEGER */
+/* The leading dimension of the array VT. LDVT >= max( 1, M ). */
+
+/* IDXQ (output) INTEGER array, dimension (N) */
+/* This contains the permutation which will reintegrate the */
+/* subproblem just solved back into sorted order, i.e. */
+/* D( IDXQ( I = 1, N ) ) will be in ascending order. */
+
+/* IWORK (workspace) INTEGER array, dimension (4*N) */
+
+/* WORK (workspace) REAL array, dimension (3*M**2+2*M) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, an singular value did not converge */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --idxq;
+ --iwork;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*nl < 1) {
+ *info = -1;
+ } else if (*nr < 1) {
+ *info = -2;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLASD1", &i__1);
+ return 0;
+ }
+
+ n = *nl + *nr + 1;
+ m = n + *sqre;
+
+/* The following values are for bookkeeping purposes only. They are */
+/* integer pointers which indicate the portion of the workspace */
+/* used by a particular array in SLASD2 and SLASD3. */
+
+ ldu2 = n;
+ ldvt2 = m;
+
+ iz = 1;
+ isigma = iz + m;
+ iu2 = isigma + n;
+ ivt2 = iu2 + ldu2 * n;
+ iq = ivt2 + ldvt2 * m;
+
+ idx = 1;
+ idxc = idx + n;
+ coltyp = idxc + n;
+ idxp = coltyp + n;
+
+/* Scale. */
+
+/* Computing MAX */
+ r__1 = dabs(*alpha), r__2 = dabs(*beta);
+ orgnrm = dmax(r__1,r__2);
+ d__[*nl + 1] = 0.f;
+ i__1 = n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((r__1 = d__[i__], dabs(r__1)) > orgnrm) {
+ orgnrm = (r__1 = d__[i__], dabs(r__1));
+ }
+/* L10: */
+ }
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b7, &n, &c__1, &d__[1], &n, info);
+ *alpha /= orgnrm;
+ *beta /= orgnrm;
+
+/* Deflate singular values. */
+
+ slasd2_(nl, nr, sqre, &k, &d__[1], &work[iz], alpha, beta, &u[u_offset],
+ ldu, &vt[vt_offset], ldvt, &work[isigma], &work[iu2], &ldu2, &
+ work[ivt2], &ldvt2, &iwork[idxp], &iwork[idx], &iwork[idxc], &
+ idxq[1], &iwork[coltyp], info);
+
+/* Solve Secular Equation and update singular vectors. */
+
+ ldq = k;
+ slasd3_(nl, nr, sqre, &k, &d__[1], &work[iq], &ldq, &work[isigma], &u[
+ u_offset], ldu, &work[iu2], &ldu2, &vt[vt_offset], ldvt, &work[
+ ivt2], &ldvt2, &iwork[idxc], &iwork[coltyp], &work[iz], info);
+ if (*info != 0) {
+ return 0;
+ }
+
+/* Unscale. */
+
+ slascl_("G", &c__0, &c__0, &c_b7, &orgnrm, &n, &c__1, &d__[1], &n, info);
+
+/* Prepare the IDXQ sorting permutation. */
+
+ n1 = k;
+ n2 = n - k;
+ slamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &idxq[1]);
+
+ return 0;
+
+/* End of SLASD1 */
+
+} /* slasd1_ */
diff --git a/contrib/libs/clapack/slasd2.c b/contrib/libs/clapack/slasd2.c
new file mode 100644
index 0000000000..15e3e47fe5
--- /dev/null
+++ b/contrib/libs/clapack/slasd2.c
@@ -0,0 +1,607 @@
+/* slasd2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b30 = 0.f;
+
+/* Subroutine */ int slasd2_(integer *nl, integer *nr, integer *sqre, integer
+ *k, real *d__, real *z__, real *alpha, real *beta, real *u, integer *
+ ldu, real *vt, integer *ldvt, real *dsigma, real *u2, integer *ldu2,
+ real *vt2, integer *ldvt2, integer *idxp, integer *idx, integer *idxc,
+ integer *idxq, integer *coltyp, integer *info)
+{
+ /* System generated locals */
+ integer u_dim1, u_offset, u2_dim1, u2_offset, vt_dim1, vt_offset,
+ vt2_dim1, vt2_offset, i__1;
+ real r__1, r__2;
+
+ /* Local variables */
+ real c__;
+ integer i__, j, m, n;
+ real s;
+ integer k2;
+ real z1;
+ integer ct, jp;
+ real eps, tau, tol;
+ integer psm[4], nlp1, nlp2, idxi, idxj, ctot[4];
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *);
+ integer idxjp, jprev;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ extern doublereal slapy2_(real *, real *), slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), slamrg_(
+ integer *, integer *, real *, integer *, integer *, integer *);
+ real hlftol;
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *), slaset_(char *, integer *,
+ integer *, real *, real *, real *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLASD2 merges the two sets of singular values together into a single */
+/* sorted set. Then it tries to deflate the size of the problem. */
+/* There are two ways in which deflation can occur: when two or more */
+/* singular values are close together or if there is a tiny entry in the */
+/* Z vector. For each such occurrence the order of the related secular */
+/* equation problem is reduced by one. */
+
+/* SLASD2 is called from SLASD1. */
+
+/* Arguments */
+/* ========= */
+
+/* NL (input) INTEGER */
+/* The row dimension of the upper block. NL >= 1. */
+
+/* NR (input) INTEGER */
+/* The row dimension of the lower block. NR >= 1. */
+
+/* SQRE (input) INTEGER */
+/* = 0: the lower block is an NR-by-NR square matrix. */
+/* = 1: the lower block is an NR-by-(NR+1) rectangular matrix. */
+
+/* The bidiagonal matrix has N = NL + NR + 1 rows and */
+/* M = N + SQRE >= N columns. */
+
+/* K (output) INTEGER */
+/* Contains the dimension of the non-deflated matrix, */
+/* This is the order of the related secular equation. 1 <= K <=N. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry D contains the singular values of the two submatrices */
+/* to be combined. On exit D contains the trailing (N-K) updated */
+/* singular values (those which were deflated) sorted into */
+/* increasing order. */
+
+/* Z (output) REAL array, dimension (N) */
+/* On exit Z contains the updating row vector in the secular */
+/* equation. */
+
+/* ALPHA (input) REAL */
+/* Contains the diagonal element associated with the added row. */
+
+/* BETA (input) REAL */
+/* Contains the off-diagonal element associated with the added */
+/* row. */
+
+/* U (input/output) REAL array, dimension (LDU,N) */
+/* On entry U contains the left singular vectors of two */
+/* submatrices in the two square blocks with corners at (1,1), */
+/* (NL, NL), and (NL+2, NL+2), (N,N). */
+/* On exit U contains the trailing (N-K) updated left singular */
+/* vectors (those which were deflated) in its last N-K columns. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= N. */
+
+/* VT (input/output) REAL array, dimension (LDVT,M) */
+/* On entry VT' contains the right singular vectors of two */
+/* submatrices in the two square blocks with corners at (1,1), */
+/* (NL+1, NL+1), and (NL+2, NL+2), (M,M). */
+/* On exit VT' contains the trailing (N-K) updated right singular */
+/* vectors (those which were deflated) in its last N-K columns. */
+/* In case SQRE =1, the last row of VT spans the right null */
+/* space. */
+
+/* LDVT (input) INTEGER */
+/* The leading dimension of the array VT. LDVT >= M. */
+
+/* DSIGMA (output) REAL array, dimension (N) */
+/* Contains a copy of the diagonal elements (K-1 singular values */
+/* and one zero) in the secular equation. */
+
+/* U2 (output) REAL array, dimension (LDU2,N) */
+/* Contains a copy of the first K-1 left singular vectors which */
+/* will be used by SLASD3 in a matrix multiply (SGEMM) to solve */
+/* for the new left singular vectors. U2 is arranged into four */
+/* blocks. The first block contains a column with 1 at NL+1 and */
+/* zero everywhere else; the second block contains non-zero */
+/* entries only at and above NL; the third contains non-zero */
+/* entries only below NL+1; and the fourth is dense. */
+
+/* LDU2 (input) INTEGER */
+/* The leading dimension of the array U2. LDU2 >= N. */
+
+/* VT2 (output) REAL array, dimension (LDVT2,N) */
+/* VT2' contains a copy of the first K right singular vectors */
+/* which will be used by SLASD3 in a matrix multiply (SGEMM) to */
+/* solve for the new right singular vectors. VT2 is arranged into */
+/* three blocks. The first block contains a row that corresponds */
+/* to the special 0 diagonal element in SIGMA; the second block */
+/* contains non-zeros only at and before NL +1; the third block */
+/* contains non-zeros only at and after NL +2. */
+
+/* LDVT2 (input) INTEGER */
+/* The leading dimension of the array VT2. LDVT2 >= M. */
+
+/* IDXP (workspace) INTEGER array, dimension (N) */
+/* This will contain the permutation used to place deflated */
+/* values of D at the end of the array. On output IDXP(2:K) */
+/* points to the nondeflated D-values and IDXP(K+1:N) */
+/* points to the deflated singular values. */
+
+/* IDX (workspace) INTEGER array, dimension (N) */
+/* This will contain the permutation used to sort the contents of */
+/* D into ascending order. */
+
+/* IDXC (output) INTEGER array, dimension (N) */
+/* This will contain the permutation used to arrange the columns */
+/* of the deflated U matrix into three groups: the first group */
+/* contains non-zero entries only at and above NL, the second */
+/* contains non-zero entries only below NL+2, and the third is */
+/* dense. */
+
+/* IDXQ (input/output) INTEGER array, dimension (N) */
+/* This contains the permutation which separately sorts the two */
+/* sub-problems in D into ascending order. Note that entries in */
+/* the first hlaf of this permutation must first be moved one */
+/* position backward; and entries in the second half */
+/* must first have NL+1 added to their values. */
+
+/* COLTYP (workspace/output) INTEGER array, dimension (N) */
+/* As workspace, this will contain a label which will indicate */
+/* which of the following types a column in the U2 matrix or a */
+/* row in the VT2 matrix is: */
+/* 1 : non-zero in the upper half only */
+/* 2 : non-zero in the lower half only */
+/* 3 : dense */
+/* 4 : deflated */
+
+/* On exit, it is an array of dimension 4, with COLTYP(I) being */
+/* the dimension of the I-th type columns. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --z__;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --dsigma;
+ u2_dim1 = *ldu2;
+ u2_offset = 1 + u2_dim1;
+ u2 -= u2_offset;
+ vt2_dim1 = *ldvt2;
+ vt2_offset = 1 + vt2_dim1;
+ vt2 -= vt2_offset;
+ --idxp;
+ --idx;
+ --idxc;
+ --idxq;
+ --coltyp;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*nl < 1) {
+ *info = -1;
+ } else if (*nr < 1) {
+ *info = -2;
+ } else if (*sqre != 1 && *sqre != 0) {
+ *info = -3;
+ }
+
+ n = *nl + *nr + 1;
+ m = n + *sqre;
+
+ if (*ldu < n) {
+ *info = -10;
+ } else if (*ldvt < m) {
+ *info = -12;
+ } else if (*ldu2 < n) {
+ *info = -15;
+ } else if (*ldvt2 < m) {
+ *info = -17;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLASD2", &i__1);
+ return 0;
+ }
+
+ nlp1 = *nl + 1;
+ nlp2 = *nl + 2;
+
+/* Generate the first part of the vector Z; and move the singular */
+/* values in the first part of D one position backward. */
+
+ z1 = *alpha * vt[nlp1 + nlp1 * vt_dim1];
+ z__[1] = z1;
+ for (i__ = *nl; i__ >= 1; --i__) {
+ z__[i__ + 1] = *alpha * vt[i__ + nlp1 * vt_dim1];
+ d__[i__ + 1] = d__[i__];
+ idxq[i__ + 1] = idxq[i__] + 1;
+/* L10: */
+ }
+
+/* Generate the second part of the vector Z. */
+
+ i__1 = m;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ z__[i__] = *beta * vt[i__ + nlp2 * vt_dim1];
+/* L20: */
+ }
+
+/* Initialize some reference arrays. */
+
+ i__1 = nlp1;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ coltyp[i__] = 1;
+/* L30: */
+ }
+ i__1 = n;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ coltyp[i__] = 2;
+/* L40: */
+ }
+
+/* Sort the singular values into increasing order */
+
+ i__1 = n;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ idxq[i__] += nlp1;
+/* L50: */
+ }
+
+/* DSIGMA, IDXC, IDXC, and the first column of U2 */
+/* are used as storage space. */
+
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ dsigma[i__] = d__[idxq[i__]];
+ u2[i__ + u2_dim1] = z__[idxq[i__]];
+ idxc[i__] = coltyp[idxq[i__]];
+/* L60: */
+ }
+
+ slamrg_(nl, nr, &dsigma[2], &c__1, &c__1, &idx[2]);
+
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ idxi = idx[i__] + 1;
+ d__[i__] = dsigma[idxi];
+ z__[i__] = u2[idxi + u2_dim1];
+ coltyp[i__] = idxc[idxi];
+/* L70: */
+ }
+
+/* Calculate the allowable deflation tolerance */
+
+ eps = slamch_("Epsilon");
+/* Computing MAX */
+ r__1 = dabs(*alpha), r__2 = dabs(*beta);
+ tol = dmax(r__1,r__2);
+/* Computing MAX */
+ r__2 = (r__1 = d__[n], dabs(r__1));
+ tol = eps * 8.f * dmax(r__2,tol);
+
+/* There are 2 kinds of deflation -- first a value in the z-vector */
+/* is small, second two (or more) singular values are very close */
+/* together (their difference is small). */
+
+/* If the value in the z-vector is small, we simply permute the */
+/* array so that the corresponding singular value is moved to the */
+/* end. */
+
+/* If two values in the D-vector are close, we perform a two-sided */
+/* rotation designed to make one of the corresponding z-vector */
+/* entries zero, and then permute the array so that the deflated */
+/* singular value is moved to the end. */
+
+/* If there are multiple singular values then the problem deflates. */
+/* Here the number of equal singular values are found. As each equal */
+/* singular value is found, an elementary reflector is computed to */
+/* rotate the corresponding singular subspace so that the */
+/* corresponding components of Z are zero in this new basis. */
+
+ *k = 1;
+ k2 = n + 1;
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ if ((r__1 = z__[j], dabs(r__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ idxp[k2] = j;
+ coltyp[j] = 4;
+ if (j == n) {
+ goto L120;
+ }
+ } else {
+ jprev = j;
+ goto L90;
+ }
+/* L80: */
+ }
+L90:
+ j = jprev;
+L100:
+ ++j;
+ if (j > n) {
+ goto L110;
+ }
+ if ((r__1 = z__[j], dabs(r__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ idxp[k2] = j;
+ coltyp[j] = 4;
+ } else {
+
+/* Check if singular values are close enough to allow deflation. */
+
+ if ((r__1 = d__[j] - d__[jprev], dabs(r__1)) <= tol) {
+
+/* Deflation is possible. */
+
+ s = z__[jprev];
+ c__ = z__[j];
+
+/* Find sqrt(a**2+b**2) without overflow or */
+/* destructive underflow. */
+
+ tau = slapy2_(&c__, &s);
+ c__ /= tau;
+ s = -s / tau;
+ z__[j] = tau;
+ z__[jprev] = 0.f;
+
+/* Apply back the Givens rotation to the left and right */
+/* singular vector matrices. */
+
+ idxjp = idxq[idx[jprev] + 1];
+ idxj = idxq[idx[j] + 1];
+ if (idxjp <= nlp1) {
+ --idxjp;
+ }
+ if (idxj <= nlp1) {
+ --idxj;
+ }
+ srot_(&n, &u[idxjp * u_dim1 + 1], &c__1, &u[idxj * u_dim1 + 1], &
+ c__1, &c__, &s);
+ srot_(&m, &vt[idxjp + vt_dim1], ldvt, &vt[idxj + vt_dim1], ldvt, &
+ c__, &s);
+ if (coltyp[j] != coltyp[jprev]) {
+ coltyp[j] = 3;
+ }
+ coltyp[jprev] = 4;
+ --k2;
+ idxp[k2] = jprev;
+ jprev = j;
+ } else {
+ ++(*k);
+ u2[*k + u2_dim1] = z__[jprev];
+ dsigma[*k] = d__[jprev];
+ idxp[*k] = jprev;
+ jprev = j;
+ }
+ }
+ goto L100;
+L110:
+
+/* Record the last singular value. */
+
+ ++(*k);
+ u2[*k + u2_dim1] = z__[jprev];
+ dsigma[*k] = d__[jprev];
+ idxp[*k] = jprev;
+
+L120:
+
+/* Count up the total number of the various types of columns, then */
+/* form a permutation which positions the four column types into */
+/* four groups of uniform structure (although one or more of these */
+/* groups may be empty). */
+
+ for (j = 1; j <= 4; ++j) {
+ ctot[j - 1] = 0;
+/* L130: */
+ }
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ ct = coltyp[j];
+ ++ctot[ct - 1];
+/* L140: */
+ }
+
+/* PSM(*) = Position in SubMatrix (of types 1 through 4) */
+
+ psm[0] = 2;
+ psm[1] = ctot[0] + 2;
+ psm[2] = psm[1] + ctot[1];
+ psm[3] = psm[2] + ctot[2];
+
+/* Fill out the IDXC array so that the permutation which it induces */
+/* will place all type-1 columns first, all type-2 columns next, */
+/* then all type-3's, and finally all type-4's, starting from the */
+/* second column. This applies similarly to the rows of VT. */
+
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ jp = idxp[j];
+ ct = coltyp[jp];
+ idxc[psm[ct - 1]] = j;
+ ++psm[ct - 1];
+/* L150: */
+ }
+
+/* Sort the singular values and corresponding singular vectors into */
+/* DSIGMA, U2, and VT2 respectively. The singular values/vectors */
+/* which were not deflated go into the first K slots of DSIGMA, U2, */
+/* and VT2 respectively, while those which were deflated go into the */
+/* last N - K slots, except that the first column/row will be treated */
+/* separately. */
+
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ jp = idxp[j];
+ dsigma[j] = d__[jp];
+ idxj = idxq[idx[idxp[idxc[j]]] + 1];
+ if (idxj <= nlp1) {
+ --idxj;
+ }
+ scopy_(&n, &u[idxj * u_dim1 + 1], &c__1, &u2[j * u2_dim1 + 1], &c__1);
+ scopy_(&m, &vt[idxj + vt_dim1], ldvt, &vt2[j + vt2_dim1], ldvt2);
+/* L160: */
+ }
+
+/* Determine DSIGMA(1), DSIGMA(2) and Z(1) */
+
+ dsigma[1] = 0.f;
+ hlftol = tol / 2.f;
+ if (dabs(dsigma[2]) <= hlftol) {
+ dsigma[2] = hlftol;
+ }
+ if (m > n) {
+ z__[1] = slapy2_(&z1, &z__[m]);
+ if (z__[1] <= tol) {
+ c__ = 1.f;
+ s = 0.f;
+ z__[1] = tol;
+ } else {
+ c__ = z1 / z__[1];
+ s = z__[m] / z__[1];
+ }
+ } else {
+ if (dabs(z1) <= tol) {
+ z__[1] = tol;
+ } else {
+ z__[1] = z1;
+ }
+ }
+
+/* Move the rest of the updating row to Z. */
+
+ i__1 = *k - 1;
+ scopy_(&i__1, &u2[u2_dim1 + 2], &c__1, &z__[2], &c__1);
+
+/* Determine the first column of U2, the first row of VT2 and the */
+/* last row of VT. */
+
+ slaset_("A", &n, &c__1, &c_b30, &c_b30, &u2[u2_offset], ldu2);
+ u2[nlp1 + u2_dim1] = 1.f;
+ if (m > n) {
+ i__1 = nlp1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ vt[m + i__ * vt_dim1] = -s * vt[nlp1 + i__ * vt_dim1];
+ vt2[i__ * vt2_dim1 + 1] = c__ * vt[nlp1 + i__ * vt_dim1];
+/* L170: */
+ }
+ i__1 = m;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ vt2[i__ * vt2_dim1 + 1] = s * vt[m + i__ * vt_dim1];
+ vt[m + i__ * vt_dim1] = c__ * vt[m + i__ * vt_dim1];
+/* L180: */
+ }
+ } else {
+ scopy_(&m, &vt[nlp1 + vt_dim1], ldvt, &vt2[vt2_dim1 + 1], ldvt2);
+ }
+ if (m > n) {
+ scopy_(&m, &vt[m + vt_dim1], ldvt, &vt2[m + vt2_dim1], ldvt2);
+ }
+
+/* The deflated singular values and their corresponding vectors go */
+/* into the back of D, U, and V respectively. */
+
+ if (n > *k) {
+ i__1 = n - *k;
+ scopy_(&i__1, &dsigma[*k + 1], &c__1, &d__[*k + 1], &c__1);
+ i__1 = n - *k;
+ slacpy_("A", &n, &i__1, &u2[(*k + 1) * u2_dim1 + 1], ldu2, &u[(*k + 1)
+ * u_dim1 + 1], ldu);
+ i__1 = n - *k;
+ slacpy_("A", &i__1, &m, &vt2[*k + 1 + vt2_dim1], ldvt2, &vt[*k + 1 +
+ vt_dim1], ldvt);
+ }
+
+/* Copy CTOT into COLTYP for referencing in SLASD3. */
+
+ for (j = 1; j <= 4; ++j) {
+ coltyp[j] = ctot[j - 1];
+/* L190: */
+ }
+
+ return 0;
+
+/* End of SLASD2 */
+
+} /* slasd2_ */
diff --git a/contrib/libs/clapack/slasd3.c b/contrib/libs/clapack/slasd3.c
new file mode 100644
index 0000000000..d4b3ab0781
--- /dev/null
+++ b/contrib/libs/clapack/slasd3.c
@@ -0,0 +1,450 @@
+/* slasd3.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static real c_b13 = 1.f;
+static real c_b26 = 0.f;
+
+/* Subroutine */ int slasd3_(integer *nl, integer *nr, integer *sqre, integer
+ *k, real *d__, real *q, integer *ldq, real *dsigma, real *u, integer *
+ ldu, real *u2, integer *ldu2, real *vt, integer *ldvt, real *vt2,
+ integer *ldvt2, integer *idxc, integer *ctot, real *z__, integer *
+ info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, u_dim1, u_offset, u2_dim1, u2_offset, vt_dim1,
+ vt_offset, vt2_dim1, vt2_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ integer i__, j, m, n, jc;
+ real rho;
+ integer nlp1, nlp2, nrp1;
+ real temp;
+ extern doublereal snrm2_(integer *, real *, integer *);
+ integer ctemp;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ integer ktemp;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ extern doublereal slamc3_(real *, real *);
+ extern /* Subroutine */ int slasd4_(integer *, integer *, real *, real *,
+ real *, real *, real *, real *, integer *), xerbla_(char *,
+ integer *), slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *), slacpy_(char *, integer *, integer *, real *, integer *,
+ real *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLASD3 finds all the square roots of the roots of the secular */
+/* equation, as defined by the values in D and Z. It makes the */
+/* appropriate calls to SLASD4 and then updates the singular */
+/* vectors by matrix multiplication. */
+
+/* This code makes very mild assumptions about floating point */
+/* arithmetic. It will work on machines with a guard digit in */
+/* add/subtract, or on those binary machines without guard digits */
+/* which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2. */
+/* It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* SLASD3 is called from SLASD1. */
+
+/* Arguments */
+/* ========= */
+
+/* NL (input) INTEGER */
+/* The row dimension of the upper block. NL >= 1. */
+
+/* NR (input) INTEGER */
+/* The row dimension of the lower block. NR >= 1. */
+
+/* SQRE (input) INTEGER */
+/* = 0: the lower block is an NR-by-NR square matrix. */
+/* = 1: the lower block is an NR-by-(NR+1) rectangular matrix. */
+
+/* The bidiagonal matrix has N = NL + NR + 1 rows and */
+/* M = N + SQRE >= N columns. */
+
+/* K (input) INTEGER */
+/* The size of the secular equation, 1 =< K = < N. */
+
+/* D (output) REAL array, dimension(K) */
+/* On exit the square roots of the roots of the secular equation, */
+/* in ascending order. */
+
+/* Q (workspace) REAL array, */
+/* dimension at least (LDQ,K). */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= K. */
+
+/* DSIGMA (input/output) REAL array, dimension(K) */
+/* The first K elements of this array contain the old roots */
+/* of the deflated updating problem. These are the poles */
+/* of the secular equation. */
+
+/* U (output) REAL array, dimension (LDU, N) */
+/* The last N - K columns of this matrix contain the deflated */
+/* left singular vectors. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= N. */
+
+/* U2 (input) REAL array, dimension (LDU2, N) */
+/* The first K columns of this matrix contain the non-deflated */
+/* left singular vectors for the split problem. */
+
+/* LDU2 (input) INTEGER */
+/* The leading dimension of the array U2. LDU2 >= N. */
+
+/* VT (output) REAL array, dimension (LDVT, M) */
+/* The last M - K columns of VT' contain the deflated */
+/* right singular vectors. */
+
+/* LDVT (input) INTEGER */
+/* The leading dimension of the array VT. LDVT >= N. */
+
+/* VT2 (input/output) REAL array, dimension (LDVT2, N) */
+/* The first K columns of VT2' contain the non-deflated */
+/* right singular vectors for the split problem. */
+
+/* LDVT2 (input) INTEGER */
+/* The leading dimension of the array VT2. LDVT2 >= N. */
+
+/* IDXC (input) INTEGER array, dimension (N) */
+/* The permutation used to arrange the columns of U (and rows of */
+/* VT) into three groups: the first group contains non-zero */
+/* entries only at and above (or before) NL +1; the second */
+/* contains non-zero entries only at and below (or after) NL+2; */
+/* and the third is dense. The first column of U and the row of */
+/* VT are treated separately, however. */
+
+/* The rows of the singular vectors found by SLASD4 */
+/* must be likewise permuted before the matrix multiplies can */
+/* take place. */
+
+/* CTOT (input) INTEGER array, dimension (4) */
+/* A count of the total number of the various types of columns */
+/* in U (or rows in VT), as described in IDXC. The fourth column */
+/* type is any column which has been deflated. */
+
+/* Z (input/output) REAL array, dimension (K) */
+/* The first K elements of this array contain the components */
+/* of the deflation-adjusted updating row vector. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, an singular value did not converge */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --dsigma;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ u2_dim1 = *ldu2;
+ u2_offset = 1 + u2_dim1;
+ u2 -= u2_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ vt2_dim1 = *ldvt2;
+ vt2_offset = 1 + vt2_dim1;
+ vt2 -= vt2_offset;
+ --idxc;
+ --ctot;
+ --z__;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*nl < 1) {
+ *info = -1;
+ } else if (*nr < 1) {
+ *info = -2;
+ } else if (*sqre != 1 && *sqre != 0) {
+ *info = -3;
+ }
+
+ n = *nl + *nr + 1;
+ m = n + *sqre;
+ nlp1 = *nl + 1;
+ nlp2 = *nl + 2;
+
+ if (*k < 1 || *k > n) {
+ *info = -4;
+ } else if (*ldq < *k) {
+ *info = -7;
+ } else if (*ldu < n) {
+ *info = -10;
+ } else if (*ldu2 < n) {
+ *info = -12;
+ } else if (*ldvt < m) {
+ *info = -14;
+ } else if (*ldvt2 < m) {
+ *info = -16;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLASD3", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*k == 1) {
+ d__[1] = dabs(z__[1]);
+ scopy_(&m, &vt2[vt2_dim1 + 1], ldvt2, &vt[vt_dim1 + 1], ldvt);
+ if (z__[1] > 0.f) {
+ scopy_(&n, &u2[u2_dim1 + 1], &c__1, &u[u_dim1 + 1], &c__1);
+ } else {
+ i__1 = n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ u[i__ + u_dim1] = -u2[i__ + u2_dim1];
+/* L10: */
+ }
+ }
+ return 0;
+ }
+
+/* Modify values DSIGMA(i) to make sure all DSIGMA(i)-DSIGMA(j) can */
+/* be computed with high relative accuracy (barring over/underflow). */
+/* This is a problem on machines without a guard digit in */
+/* add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2). */
+/* The following code replaces DSIGMA(I) by 2*DSIGMA(I)-DSIGMA(I), */
+/* which on any of these machines zeros out the bottommost */
+/* bit of DSIGMA(I) if it is 1; this makes the subsequent */
+/* subtractions DSIGMA(I)-DSIGMA(J) unproblematic when cancellation */
+/* occurs. On binary machines with a guard digit (almost all */
+/* machines) it does not change DSIGMA(I) at all. On hexadecimal */
+/* and decimal machines with a guard digit, it slightly */
+/* changes the bottommost bits of DSIGMA(I). It does not account */
+/* for hexadecimal or decimal machines without guard digits */
+/* (we know of none). We use a subroutine call to compute */
+/* 2*DSIGMA(I) to prevent optimizing compilers from eliminating */
+/* this code. */
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dsigma[i__] = slamc3_(&dsigma[i__], &dsigma[i__]) - dsigma[i__];
+/* L20: */
+ }
+
+/* Keep a copy of Z. */
+
+ scopy_(k, &z__[1], &c__1, &q[q_offset], &c__1);
+
+/* Normalize Z. */
+
+ rho = snrm2_(k, &z__[1], &c__1);
+ slascl_("G", &c__0, &c__0, &rho, &c_b13, k, &c__1, &z__[1], k, info);
+ rho *= rho;
+
+/* Find the new singular values. */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ slasd4_(k, &j, &dsigma[1], &z__[1], &u[j * u_dim1 + 1], &rho, &d__[j],
+ &vt[j * vt_dim1 + 1], info);
+
+/* If the zero finder fails, the computation is terminated. */
+
+ if (*info != 0) {
+ return 0;
+ }
+/* L30: */
+ }
+
+/* Compute updated Z. */
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ z__[i__] = u[i__ + *k * u_dim1] * vt[i__ + *k * vt_dim1];
+ i__2 = i__ - 1;
+ for (j = 1; j <= i__2; ++j) {
+ z__[i__] *= u[i__ + j * u_dim1] * vt[i__ + j * vt_dim1] / (dsigma[
+ i__] - dsigma[j]) / (dsigma[i__] + dsigma[j]);
+/* L40: */
+ }
+ i__2 = *k - 1;
+ for (j = i__; j <= i__2; ++j) {
+ z__[i__] *= u[i__ + j * u_dim1] * vt[i__ + j * vt_dim1] / (dsigma[
+ i__] - dsigma[j + 1]) / (dsigma[i__] + dsigma[j + 1]);
+/* L50: */
+ }
+ r__2 = sqrt((r__1 = z__[i__], dabs(r__1)));
+ z__[i__] = r_sign(&r__2, &q[i__ + q_dim1]);
+/* L60: */
+ }
+
+/* Compute left singular vectors of the modified diagonal matrix, */
+/* and store related information for the right singular vectors. */
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ vt[i__ * vt_dim1 + 1] = z__[1] / u[i__ * u_dim1 + 1] / vt[i__ *
+ vt_dim1 + 1];
+ u[i__ * u_dim1 + 1] = -1.f;
+ i__2 = *k;
+ for (j = 2; j <= i__2; ++j) {
+ vt[j + i__ * vt_dim1] = z__[j] / u[j + i__ * u_dim1] / vt[j + i__
+ * vt_dim1];
+ u[j + i__ * u_dim1] = dsigma[j] * vt[j + i__ * vt_dim1];
+/* L70: */
+ }
+ temp = snrm2_(k, &u[i__ * u_dim1 + 1], &c__1);
+ q[i__ * q_dim1 + 1] = u[i__ * u_dim1 + 1] / temp;
+ i__2 = *k;
+ for (j = 2; j <= i__2; ++j) {
+ jc = idxc[j];
+ q[j + i__ * q_dim1] = u[jc + i__ * u_dim1] / temp;
+/* L80: */
+ }
+/* L90: */
+ }
+
+/* Update the left singular vector matrix. */
+
+ if (*k == 2) {
+ sgemm_("N", "N", &n, k, k, &c_b13, &u2[u2_offset], ldu2, &q[q_offset],
+ ldq, &c_b26, &u[u_offset], ldu);
+ goto L100;
+ }
+ if (ctot[1] > 0) {
+ sgemm_("N", "N", nl, k, &ctot[1], &c_b13, &u2[(u2_dim1 << 1) + 1],
+ ldu2, &q[q_dim1 + 2], ldq, &c_b26, &u[u_dim1 + 1], ldu);
+ if (ctot[3] > 0) {
+ ktemp = ctot[1] + 2 + ctot[2];
+ sgemm_("N", "N", nl, k, &ctot[3], &c_b13, &u2[ktemp * u2_dim1 + 1]
+, ldu2, &q[ktemp + q_dim1], ldq, &c_b13, &u[u_dim1 + 1],
+ ldu);
+ }
+ } else if (ctot[3] > 0) {
+ ktemp = ctot[1] + 2 + ctot[2];
+ sgemm_("N", "N", nl, k, &ctot[3], &c_b13, &u2[ktemp * u2_dim1 + 1],
+ ldu2, &q[ktemp + q_dim1], ldq, &c_b26, &u[u_dim1 + 1], ldu);
+ } else {
+ slacpy_("F", nl, k, &u2[u2_offset], ldu2, &u[u_offset], ldu);
+ }
+ scopy_(k, &q[q_dim1 + 1], ldq, &u[nlp1 + u_dim1], ldu);
+ ktemp = ctot[1] + 2;
+ ctemp = ctot[2] + ctot[3];
+ sgemm_("N", "N", nr, k, &ctemp, &c_b13, &u2[nlp2 + ktemp * u2_dim1], ldu2,
+ &q[ktemp + q_dim1], ldq, &c_b26, &u[nlp2 + u_dim1], ldu);
+
+/* Generate the right singular vectors. */
+
+L100:
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = snrm2_(k, &vt[i__ * vt_dim1 + 1], &c__1);
+ q[i__ + q_dim1] = vt[i__ * vt_dim1 + 1] / temp;
+ i__2 = *k;
+ for (j = 2; j <= i__2; ++j) {
+ jc = idxc[j];
+ q[i__ + j * q_dim1] = vt[jc + i__ * vt_dim1] / temp;
+/* L110: */
+ }
+/* L120: */
+ }
+
+/* Update the right singular vector matrix. */
+
+ if (*k == 2) {
+ sgemm_("N", "N", k, &m, k, &c_b13, &q[q_offset], ldq, &vt2[vt2_offset]
+, ldvt2, &c_b26, &vt[vt_offset], ldvt);
+ return 0;
+ }
+ ktemp = ctot[1] + 1;
+ sgemm_("N", "N", k, &nlp1, &ktemp, &c_b13, &q[q_dim1 + 1], ldq, &vt2[
+ vt2_dim1 + 1], ldvt2, &c_b26, &vt[vt_dim1 + 1], ldvt);
+ ktemp = ctot[1] + 2 + ctot[2];
+ if (ktemp <= *ldvt2) {
+ sgemm_("N", "N", k, &nlp1, &ctot[3], &c_b13, &q[ktemp * q_dim1 + 1],
+ ldq, &vt2[ktemp + vt2_dim1], ldvt2, &c_b13, &vt[vt_dim1 + 1],
+ ldvt);
+ }
+
+ ktemp = ctot[1] + 1;
+ nrp1 = *nr + *sqre;
+ if (ktemp > 1) {
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ q[i__ + ktemp * q_dim1] = q[i__ + q_dim1];
+/* L130: */
+ }
+ i__1 = m;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ vt2[ktemp + i__ * vt2_dim1] = vt2[i__ * vt2_dim1 + 1];
+/* L140: */
+ }
+ }
+ ctemp = ctot[2] + 1 + ctot[3];
+ sgemm_("N", "N", k, &nrp1, &ctemp, &c_b13, &q[ktemp * q_dim1 + 1], ldq, &
+ vt2[ktemp + nlp2 * vt2_dim1], ldvt2, &c_b26, &vt[nlp2 * vt_dim1 +
+ 1], ldvt);
+
+ return 0;
+
+/* End of SLASD3 */
+
+} /* slasd3_ */
diff --git a/contrib/libs/clapack/slasd4.c b/contrib/libs/clapack/slasd4.c
new file mode 100644
index 0000000000..11ba6d23c6
--- /dev/null
+++ b/contrib/libs/clapack/slasd4.c
@@ -0,0 +1,1010 @@
+/* slasd4.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slasd4_(integer *n, integer *i__, real *d__, real *z__,
+ real *delta, real *rho, real *sigma, real *work, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real a, b, c__;
+ integer j;
+ real w, dd[3];
+ integer ii;
+ real dw, zz[3];
+ integer ip1;
+ real eta, phi, eps, tau, psi;
+ integer iim1, iip1;
+ real dphi, dpsi;
+ integer iter;
+ real temp, prew, sg2lb, sg2ub, temp1, temp2, dtiim, delsq, dtiip;
+ integer niter;
+ real dtisq;
+ logical swtch;
+ real dtnsq;
+ extern /* Subroutine */ int slaed6_(integer *, logical *, real *, real *,
+ real *, real *, real *, integer *);
+ real delsq2;
+ extern /* Subroutine */ int slasd5_(integer *, real *, real *, real *,
+ real *, real *, real *);
+ real dtnsq1;
+ logical swtch3;
+ extern doublereal slamch_(char *);
+ logical orgati;
+ real erretm, dtipsq, rhoinv;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This subroutine computes the square root of the I-th updated */
+/* eigenvalue of a positive symmetric rank-one modification to */
+/* a positive diagonal matrix whose entries are given as the squares */
+/* of the corresponding entries in the array d, and that */
+
+/* 0 <= D(i) < D(j) for i < j */
+
+/* and that RHO > 0. This is arranged by the calling routine, and is */
+/* no loss in generality. The rank-one modified system is thus */
+
+/* diag( D ) * diag( D ) + RHO * Z * Z_transpose. */
+
+/* where we assume the Euclidean norm of Z is 1. */
+
+/* The method consists of approximating the rational functions in the */
+/* secular equation by simpler interpolating rational functions. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The length of all arrays. */
+
+/* I (input) INTEGER */
+/* The index of the eigenvalue to be computed. 1 <= I <= N. */
+
+/* D (input) REAL array, dimension ( N ) */
+/* The original eigenvalues. It is assumed that they are in */
+/* order, 0 <= D(I) < D(J) for I < J. */
+
+/* Z (input) REAL array, dimension (N) */
+/* The components of the updating vector. */
+
+/* DELTA (output) REAL array, dimension (N) */
+/* If N .ne. 1, DELTA contains (D(j) - sigma_I) in its j-th */
+/* component. If N = 1, then DELTA(1) = 1. The vector DELTA */
+/* contains the information necessary to construct the */
+/* (singular) eigenvectors. */
+
+/* RHO (input) REAL */
+/* The scalar in the symmetric updating formula. */
+
+/* SIGMA (output) REAL */
+/* The computed sigma_I, the I-th updated eigenvalue. */
+
+/* WORK (workspace) REAL array, dimension (N) */
+/* If N .ne. 1, WORK contains (D(j) + sigma_I) in its j-th */
+/* component. If N = 1, then WORK( 1 ) = 1. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* > 0: if INFO = 1, the updating process failed. */
+
+/* Internal Parameters */
+/* =================== */
+
+/* Logical variable ORGATI (origin-at-i?) is used for distinguishing */
+/* whether D(i) or D(i+1) is treated as the origin. */
+
+/* ORGATI = .true. origin at i */
+/* ORGATI = .false. origin at i+1 */
+
+/* Logical variable SWTCH3 (switch-for-3-poles?) is for noting */
+/* if we are working with THREE poles! */
+
+/* MAXIT is the maximum number of iterations allowed for each */
+/* eigenvalue. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ren-Cang Li, Computer Science Division, University of California */
+/* at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Since this routine is called in an inner loop, we do no argument */
+/* checking. */
+
+/* Quick return for N=1 and 2. */
+
+ /* Parameter adjustments */
+ --work;
+ --delta;
+ --z__;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ if (*n == 1) {
+
+/* Presumably, I=1 upon entry */
+
+ *sigma = sqrt(d__[1] * d__[1] + *rho * z__[1] * z__[1]);
+ delta[1] = 1.f;
+ work[1] = 1.f;
+ return 0;
+ }
+ if (*n == 2) {
+ slasd5_(i__, &d__[1], &z__[1], &delta[1], rho, sigma, &work[1]);
+ return 0;
+ }
+
+/* Compute machine epsilon */
+
+ eps = slamch_("Epsilon");
+ rhoinv = 1.f / *rho;
+
+/* The case I = N */
+
+ if (*i__ == *n) {
+
+/* Initialize some basic variables */
+
+ ii = *n - 1;
+ niter = 1;
+
+/* Calculate initial guess */
+
+ temp = *rho / 2.f;
+
+/* If ||Z||_2 is not one, then TEMP should be set to */
+/* RHO * ||Z||_2^2 / TWO */
+
+ temp1 = temp / (d__[*n] + sqrt(d__[*n] * d__[*n] + temp));
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] = d__[j] + d__[*n] + temp1;
+ delta[j] = d__[j] - d__[*n] - temp1;
+/* L10: */
+ }
+
+ psi = 0.f;
+ i__1 = *n - 2;
+ for (j = 1; j <= i__1; ++j) {
+ psi += z__[j] * z__[j] / (delta[j] * work[j]);
+/* L20: */
+ }
+
+ c__ = rhoinv + psi;
+ w = c__ + z__[ii] * z__[ii] / (delta[ii] * work[ii]) + z__[*n] * z__[*
+ n] / (delta[*n] * work[*n]);
+
+ if (w <= 0.f) {
+ temp1 = sqrt(d__[*n] * d__[*n] + *rho);
+ temp = z__[*n - 1] * z__[*n - 1] / ((d__[*n - 1] + temp1) * (d__[*
+ n] - d__[*n - 1] + *rho / (d__[*n] + temp1))) + z__[*n] *
+ z__[*n] / *rho;
+
+/* The following TAU is to approximate */
+/* SIGMA_n^2 - D( N )*D( N ) */
+
+ if (c__ <= temp) {
+ tau = *rho;
+ } else {
+ delsq = (d__[*n] - d__[*n - 1]) * (d__[*n] + d__[*n - 1]);
+ a = -c__ * delsq + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*
+ n];
+ b = z__[*n] * z__[*n] * delsq;
+ if (a < 0.f) {
+ tau = b * 2.f / (sqrt(a * a + b * 4.f * c__) - a);
+ } else {
+ tau = (a + sqrt(a * a + b * 4.f * c__)) / (c__ * 2.f);
+ }
+ }
+
+/* It can be proved that */
+/* D(N)^2+RHO/2 <= SIGMA_n^2 < D(N)^2+TAU <= D(N)^2+RHO */
+
+ } else {
+ delsq = (d__[*n] - d__[*n - 1]) * (d__[*n] + d__[*n - 1]);
+ a = -c__ * delsq + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n];
+ b = z__[*n] * z__[*n] * delsq;
+
+/* The following TAU is to approximate */
+/* SIGMA_n^2 - D( N )*D( N ) */
+
+ if (a < 0.f) {
+ tau = b * 2.f / (sqrt(a * a + b * 4.f * c__) - a);
+ } else {
+ tau = (a + sqrt(a * a + b * 4.f * c__)) / (c__ * 2.f);
+ }
+
+/* It can be proved that */
+/* D(N)^2 < D(N)^2+TAU < SIGMA(N)^2 < D(N)^2+RHO/2 */
+
+ }
+
+/* The following ETA is to approximate SIGMA_n - D( N ) */
+
+ eta = tau / (d__[*n] + sqrt(d__[*n] * d__[*n] + tau));
+
+ *sigma = d__[*n] + eta;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[*i__] - eta;
+ work[j] = d__[j] + d__[*i__] + eta;
+/* L30: */
+ }
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.f;
+ psi = 0.f;
+ erretm = 0.f;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (delta[j] * work[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L40: */
+ }
+ erretm = dabs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / (delta[*n] * work[*n]);
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8.f + erretm - phi + rhoinv + dabs(tau) * (
+ dpsi + dphi);
+
+ w = rhoinv + phi + psi;
+
+/* Test for convergence */
+
+ if (dabs(w) <= eps * erretm) {
+ goto L240;
+ }
+
+/* Calculate the new step */
+
+ ++niter;
+ dtnsq1 = work[*n - 1] * delta[*n - 1];
+ dtnsq = work[*n] * delta[*n];
+ c__ = w - dtnsq1 * dpsi - dtnsq * dphi;
+ a = (dtnsq + dtnsq1) * w - dtnsq * dtnsq1 * (dpsi + dphi);
+ b = dtnsq * dtnsq1 * w;
+ if (c__ < 0.f) {
+ c__ = dabs(c__);
+ }
+ if (c__ == 0.f) {
+ eta = *rho - *sigma * *sigma;
+ } else if (a >= 0.f) {
+ eta = (a + sqrt((r__1 = a * a - b * 4.f * c__, dabs(r__1)))) / (
+ c__ * 2.f);
+ } else {
+ eta = b * 2.f / (a - sqrt((r__1 = a * a - b * 4.f * c__, dabs(
+ r__1))));
+ }
+
+/* Note, eta should be positive if w is negative, and */
+/* eta should be negative otherwise. However, */
+/* if for some reason caused by roundoff, eta*w > 0, */
+/* we simply use one Newton step instead. This way */
+/* will guarantee eta*w < 0. */
+
+ if (w * eta > 0.f) {
+ eta = -w / (dpsi + dphi);
+ }
+ temp = eta - dtnsq;
+ if (temp > *rho) {
+ eta = *rho + dtnsq;
+ }
+
+ tau += eta;
+ eta /= *sigma + sqrt(eta + *sigma * *sigma);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+ work[j] += eta;
+/* L50: */
+ }
+
+ *sigma += eta;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.f;
+ psi = 0.f;
+ erretm = 0.f;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L60: */
+ }
+ erretm = dabs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / (work[*n] * delta[*n]);
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8.f + erretm - phi + rhoinv + dabs(tau) * (
+ dpsi + dphi);
+
+ w = rhoinv + phi + psi;
+
+/* Main loop to update the values of the array DELTA */
+
+ iter = niter + 1;
+
+ for (niter = iter; niter <= 20; ++niter) {
+
+/* Test for convergence */
+
+ if (dabs(w) <= eps * erretm) {
+ goto L240;
+ }
+
+/* Calculate the new step */
+
+ dtnsq1 = work[*n - 1] * delta[*n - 1];
+ dtnsq = work[*n] * delta[*n];
+ c__ = w - dtnsq1 * dpsi - dtnsq * dphi;
+ a = (dtnsq + dtnsq1) * w - dtnsq1 * dtnsq * (dpsi + dphi);
+ b = dtnsq1 * dtnsq * w;
+ if (a >= 0.f) {
+ eta = (a + sqrt((r__1 = a * a - b * 4.f * c__, dabs(r__1)))) /
+ (c__ * 2.f);
+ } else {
+ eta = b * 2.f / (a - sqrt((r__1 = a * a - b * 4.f * c__, dabs(
+ r__1))));
+ }
+
+/* Note, eta should be positive if w is negative, and */
+/* eta should be negative otherwise. However, */
+/* if for some reason caused by roundoff, eta*w > 0, */
+/* we simply use one Newton step instead. This way */
+/* will guarantee eta*w < 0. */
+
+ if (w * eta > 0.f) {
+ eta = -w / (dpsi + dphi);
+ }
+ temp = eta - dtnsq;
+ if (temp <= 0.f) {
+ eta /= 2.f;
+ }
+
+ tau += eta;
+ eta /= *sigma + sqrt(eta + *sigma * *sigma);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+ work[j] += eta;
+/* L70: */
+ }
+
+ *sigma += eta;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.f;
+ psi = 0.f;
+ erretm = 0.f;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L80: */
+ }
+ erretm = dabs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / (work[*n] * delta[*n]);
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8.f + erretm - phi + rhoinv + dabs(tau) *
+ (dpsi + dphi);
+
+ w = rhoinv + phi + psi;
+/* L90: */
+ }
+
+/* Return with INFO = 1, NITER = MAXIT and not converged */
+
+ *info = 1;
+ goto L240;
+
+/* End for the case I = N */
+
+ } else {
+
+/* The case for I < N */
+
+ niter = 1;
+ ip1 = *i__ + 1;
+
+/* Calculate initial guess */
+
+ delsq = (d__[ip1] - d__[*i__]) * (d__[ip1] + d__[*i__]);
+ delsq2 = delsq / 2.f;
+ temp = delsq2 / (d__[*i__] + sqrt(d__[*i__] * d__[*i__] + delsq2));
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] = d__[j] + d__[*i__] + temp;
+ delta[j] = d__[j] - d__[*i__] - temp;
+/* L100: */
+ }
+
+ psi = 0.f;
+ i__1 = *i__ - 1;
+ for (j = 1; j <= i__1; ++j) {
+ psi += z__[j] * z__[j] / (work[j] * delta[j]);
+/* L110: */
+ }
+
+ phi = 0.f;
+ i__1 = *i__ + 2;
+ for (j = *n; j >= i__1; --j) {
+ phi += z__[j] * z__[j] / (work[j] * delta[j]);
+/* L120: */
+ }
+ c__ = rhoinv + psi + phi;
+ w = c__ + z__[*i__] * z__[*i__] / (work[*i__] * delta[*i__]) + z__[
+ ip1] * z__[ip1] / (work[ip1] * delta[ip1]);
+
+ if (w > 0.f) {
+
+/* d(i)^2 < the ith sigma^2 < (d(i)^2+d(i+1)^2)/2 */
+
+/* We choose d(i) as origin. */
+
+ orgati = TRUE_;
+ sg2lb = 0.f;
+ sg2ub = delsq2;
+ a = c__ * delsq + z__[*i__] * z__[*i__] + z__[ip1] * z__[ip1];
+ b = z__[*i__] * z__[*i__] * delsq;
+ if (a > 0.f) {
+ tau = b * 2.f / (a + sqrt((r__1 = a * a - b * 4.f * c__, dabs(
+ r__1))));
+ } else {
+ tau = (a - sqrt((r__1 = a * a - b * 4.f * c__, dabs(r__1)))) /
+ (c__ * 2.f);
+ }
+
+/* TAU now is an estimation of SIGMA^2 - D( I )^2. The */
+/* following, however, is the corresponding estimation of */
+/* SIGMA - D( I ). */
+
+ eta = tau / (d__[*i__] + sqrt(d__[*i__] * d__[*i__] + tau));
+ } else {
+
+/* (d(i)^2+d(i+1)^2)/2 <= the ith sigma^2 < d(i+1)^2/2 */
+
+/* We choose d(i+1) as origin. */
+
+ orgati = FALSE_;
+ sg2lb = -delsq2;
+ sg2ub = 0.f;
+ a = c__ * delsq - z__[*i__] * z__[*i__] - z__[ip1] * z__[ip1];
+ b = z__[ip1] * z__[ip1] * delsq;
+ if (a < 0.f) {
+ tau = b * 2.f / (a - sqrt((r__1 = a * a + b * 4.f * c__, dabs(
+ r__1))));
+ } else {
+ tau = -(a + sqrt((r__1 = a * a + b * 4.f * c__, dabs(r__1))))
+ / (c__ * 2.f);
+ }
+
+/* TAU now is an estimation of SIGMA^2 - D( IP1 )^2. The */
+/* following, however, is the corresponding estimation of */
+/* SIGMA - D( IP1 ). */
+
+ eta = tau / (d__[ip1] + sqrt((r__1 = d__[ip1] * d__[ip1] + tau,
+ dabs(r__1))));
+ }
+
+ if (orgati) {
+ ii = *i__;
+ *sigma = d__[*i__] + eta;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] = d__[j] + d__[*i__] + eta;
+ delta[j] = d__[j] - d__[*i__] - eta;
+/* L130: */
+ }
+ } else {
+ ii = *i__ + 1;
+ *sigma = d__[ip1] + eta;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] = d__[j] + d__[ip1] + eta;
+ delta[j] = d__[j] - d__[ip1] - eta;
+/* L140: */
+ }
+ }
+ iim1 = ii - 1;
+ iip1 = ii + 1;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.f;
+ psi = 0.f;
+ erretm = 0.f;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L150: */
+ }
+ erretm = dabs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.f;
+ phi = 0.f;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L160: */
+ }
+
+ w = rhoinv + phi + psi;
+
+/* W is the value of the secular function with */
+/* its ii-th element removed. */
+
+ swtch3 = FALSE_;
+ if (orgati) {
+ if (w < 0.f) {
+ swtch3 = TRUE_;
+ }
+ } else {
+ if (w > 0.f) {
+ swtch3 = TRUE_;
+ }
+ }
+ if (ii == 1 || ii == *n) {
+ swtch3 = FALSE_;
+ }
+
+ temp = z__[ii] / (work[ii] * delta[ii]);
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w += temp;
+ erretm = (phi - psi) * 8.f + erretm + rhoinv * 2.f + dabs(temp) * 3.f
+ + dabs(tau) * dw;
+
+/* Test for convergence */
+
+ if (dabs(w) <= eps * erretm) {
+ goto L240;
+ }
+
+ if (w <= 0.f) {
+ sg2lb = dmax(sg2lb,tau);
+ } else {
+ sg2ub = dmin(sg2ub,tau);
+ }
+
+/* Calculate the new step */
+
+ ++niter;
+ if (! swtch3) {
+ dtipsq = work[ip1] * delta[ip1];
+ dtisq = work[*i__] * delta[*i__];
+ if (orgati) {
+/* Computing 2nd power */
+ r__1 = z__[*i__] / dtisq;
+ c__ = w - dtipsq * dw + delsq * (r__1 * r__1);
+ } else {
+/* Computing 2nd power */
+ r__1 = z__[ip1] / dtipsq;
+ c__ = w - dtisq * dw - delsq * (r__1 * r__1);
+ }
+ a = (dtipsq + dtisq) * w - dtipsq * dtisq * dw;
+ b = dtipsq * dtisq * w;
+ if (c__ == 0.f) {
+ if (a == 0.f) {
+ if (orgati) {
+ a = z__[*i__] * z__[*i__] + dtipsq * dtipsq * (dpsi +
+ dphi);
+ } else {
+ a = z__[ip1] * z__[ip1] + dtisq * dtisq * (dpsi +
+ dphi);
+ }
+ }
+ eta = b / a;
+ } else if (a <= 0.f) {
+ eta = (a - sqrt((r__1 = a * a - b * 4.f * c__, dabs(r__1)))) /
+ (c__ * 2.f);
+ } else {
+ eta = b * 2.f / (a + sqrt((r__1 = a * a - b * 4.f * c__, dabs(
+ r__1))));
+ }
+ } else {
+
+/* Interpolation using THREE most relevant poles */
+
+ dtiim = work[iim1] * delta[iim1];
+ dtiip = work[iip1] * delta[iip1];
+ temp = rhoinv + psi + phi;
+ if (orgati) {
+ temp1 = z__[iim1] / dtiim;
+ temp1 *= temp1;
+ c__ = temp - dtiip * (dpsi + dphi) - (d__[iim1] - d__[iip1]) *
+ (d__[iim1] + d__[iip1]) * temp1;
+ zz[0] = z__[iim1] * z__[iim1];
+ if (dpsi < temp1) {
+ zz[2] = dtiip * dtiip * dphi;
+ } else {
+ zz[2] = dtiip * dtiip * (dpsi - temp1 + dphi);
+ }
+ } else {
+ temp1 = z__[iip1] / dtiip;
+ temp1 *= temp1;
+ c__ = temp - dtiim * (dpsi + dphi) - (d__[iip1] - d__[iim1]) *
+ (d__[iim1] + d__[iip1]) * temp1;
+ if (dphi < temp1) {
+ zz[0] = dtiim * dtiim * dpsi;
+ } else {
+ zz[0] = dtiim * dtiim * (dpsi + (dphi - temp1));
+ }
+ zz[2] = z__[iip1] * z__[iip1];
+ }
+ zz[1] = z__[ii] * z__[ii];
+ dd[0] = dtiim;
+ dd[1] = delta[ii] * work[ii];
+ dd[2] = dtiip;
+ slaed6_(&niter, &orgati, &c__, dd, zz, &w, &eta, info);
+ if (*info != 0) {
+ goto L240;
+ }
+ }
+
+/* Note, eta should be positive if w is negative, and */
+/* eta should be negative otherwise. However, */
+/* if for some reason caused by roundoff, eta*w > 0, */
+/* we simply use one Newton step instead. This way */
+/* will guarantee eta*w < 0. */
+
+ if (w * eta >= 0.f) {
+ eta = -w / dw;
+ }
+ if (orgati) {
+ temp1 = work[*i__] * delta[*i__];
+ temp = eta - temp1;
+ } else {
+ temp1 = work[ip1] * delta[ip1];
+ temp = eta - temp1;
+ }
+ if (temp > sg2ub || temp < sg2lb) {
+ if (w < 0.f) {
+ eta = (sg2ub - tau) / 2.f;
+ } else {
+ eta = (sg2lb - tau) / 2.f;
+ }
+ }
+
+ tau += eta;
+ eta /= *sigma + sqrt(*sigma * *sigma + eta);
+
+ prew = w;
+
+ *sigma += eta;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] += eta;
+ delta[j] -= eta;
+/* L170: */
+ }
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.f;
+ psi = 0.f;
+ erretm = 0.f;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L180: */
+ }
+ erretm = dabs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.f;
+ phi = 0.f;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L190: */
+ }
+
+ temp = z__[ii] / (work[ii] * delta[ii]);
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w = rhoinv + phi + psi + temp;
+ erretm = (phi - psi) * 8.f + erretm + rhoinv * 2.f + dabs(temp) * 3.f
+ + dabs(tau) * dw;
+
+ if (w <= 0.f) {
+ sg2lb = dmax(sg2lb,tau);
+ } else {
+ sg2ub = dmin(sg2ub,tau);
+ }
+
+ swtch = FALSE_;
+ if (orgati) {
+ if (-w > dabs(prew) / 10.f) {
+ swtch = TRUE_;
+ }
+ } else {
+ if (w > dabs(prew) / 10.f) {
+ swtch = TRUE_;
+ }
+ }
+
+/* Main loop to update the values of the array DELTA and WORK */
+
+ iter = niter + 1;
+
+ for (niter = iter; niter <= 20; ++niter) {
+
+/* Test for convergence */
+
+ if (dabs(w) <= eps * erretm) {
+ goto L240;
+ }
+
+/* Calculate the new step */
+
+ if (! swtch3) {
+ dtipsq = work[ip1] * delta[ip1];
+ dtisq = work[*i__] * delta[*i__];
+ if (! swtch) {
+ if (orgati) {
+/* Computing 2nd power */
+ r__1 = z__[*i__] / dtisq;
+ c__ = w - dtipsq * dw + delsq * (r__1 * r__1);
+ } else {
+/* Computing 2nd power */
+ r__1 = z__[ip1] / dtipsq;
+ c__ = w - dtisq * dw - delsq * (r__1 * r__1);
+ }
+ } else {
+ temp = z__[ii] / (work[ii] * delta[ii]);
+ if (orgati) {
+ dpsi += temp * temp;
+ } else {
+ dphi += temp * temp;
+ }
+ c__ = w - dtisq * dpsi - dtipsq * dphi;
+ }
+ a = (dtipsq + dtisq) * w - dtipsq * dtisq * dw;
+ b = dtipsq * dtisq * w;
+ if (c__ == 0.f) {
+ if (a == 0.f) {
+ if (! swtch) {
+ if (orgati) {
+ a = z__[*i__] * z__[*i__] + dtipsq * dtipsq *
+ (dpsi + dphi);
+ } else {
+ a = z__[ip1] * z__[ip1] + dtisq * dtisq * (
+ dpsi + dphi);
+ }
+ } else {
+ a = dtisq * dtisq * dpsi + dtipsq * dtipsq * dphi;
+ }
+ }
+ eta = b / a;
+ } else if (a <= 0.f) {
+ eta = (a - sqrt((r__1 = a * a - b * 4.f * c__, dabs(r__1))
+ )) / (c__ * 2.f);
+ } else {
+ eta = b * 2.f / (a + sqrt((r__1 = a * a - b * 4.f * c__,
+ dabs(r__1))));
+ }
+ } else {
+
+/* Interpolation using THREE most relevant poles */
+
+ dtiim = work[iim1] * delta[iim1];
+ dtiip = work[iip1] * delta[iip1];
+ temp = rhoinv + psi + phi;
+ if (swtch) {
+ c__ = temp - dtiim * dpsi - dtiip * dphi;
+ zz[0] = dtiim * dtiim * dpsi;
+ zz[2] = dtiip * dtiip * dphi;
+ } else {
+ if (orgati) {
+ temp1 = z__[iim1] / dtiim;
+ temp1 *= temp1;
+ temp2 = (d__[iim1] - d__[iip1]) * (d__[iim1] + d__[
+ iip1]) * temp1;
+ c__ = temp - dtiip * (dpsi + dphi) - temp2;
+ zz[0] = z__[iim1] * z__[iim1];
+ if (dpsi < temp1) {
+ zz[2] = dtiip * dtiip * dphi;
+ } else {
+ zz[2] = dtiip * dtiip * (dpsi - temp1 + dphi);
+ }
+ } else {
+ temp1 = z__[iip1] / dtiip;
+ temp1 *= temp1;
+ temp2 = (d__[iip1] - d__[iim1]) * (d__[iim1] + d__[
+ iip1]) * temp1;
+ c__ = temp - dtiim * (dpsi + dphi) - temp2;
+ if (dphi < temp1) {
+ zz[0] = dtiim * dtiim * dpsi;
+ } else {
+ zz[0] = dtiim * dtiim * (dpsi + (dphi - temp1));
+ }
+ zz[2] = z__[iip1] * z__[iip1];
+ }
+ }
+ dd[0] = dtiim;
+ dd[1] = delta[ii] * work[ii];
+ dd[2] = dtiip;
+ slaed6_(&niter, &orgati, &c__, dd, zz, &w, &eta, info);
+ if (*info != 0) {
+ goto L240;
+ }
+ }
+
+/* Note, eta should be positive if w is negative, and */
+/* eta should be negative otherwise. However, */
+/* if for some reason caused by roundoff, eta*w > 0, */
+/* we simply use one Newton step instead. This way */
+/* will guarantee eta*w < 0. */
+
+ if (w * eta >= 0.f) {
+ eta = -w / dw;
+ }
+ if (orgati) {
+ temp1 = work[*i__] * delta[*i__];
+ temp = eta - temp1;
+ } else {
+ temp1 = work[ip1] * delta[ip1];
+ temp = eta - temp1;
+ }
+ if (temp > sg2ub || temp < sg2lb) {
+ if (w < 0.f) {
+ eta = (sg2ub - tau) / 2.f;
+ } else {
+ eta = (sg2lb - tau) / 2.f;
+ }
+ }
+
+ tau += eta;
+ eta /= *sigma + sqrt(*sigma * *sigma + eta);
+
+ *sigma += eta;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] += eta;
+ delta[j] -= eta;
+/* L200: */
+ }
+
+ prew = w;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.f;
+ psi = 0.f;
+ erretm = 0.f;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L210: */
+ }
+ erretm = dabs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.f;
+ phi = 0.f;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L220: */
+ }
+
+ temp = z__[ii] / (work[ii] * delta[ii]);
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w = rhoinv + phi + psi + temp;
+ erretm = (phi - psi) * 8.f + erretm + rhoinv * 2.f + dabs(temp) *
+ 3.f + dabs(tau) * dw;
+ if (w * prew > 0.f && dabs(w) > dabs(prew) / 10.f) {
+ swtch = ! swtch;
+ }
+
+ if (w <= 0.f) {
+ sg2lb = dmax(sg2lb,tau);
+ } else {
+ sg2ub = dmin(sg2ub,tau);
+ }
+
+/* L230: */
+ }
+
+/* Return with INFO = 1, NITER = MAXIT and not converged */
+
+ *info = 1;
+
+ }
+
+L240:
+ return 0;
+
+/* End of SLASD4 */
+
+} /* slasd4_ */
diff --git a/contrib/libs/clapack/slasd5.c b/contrib/libs/clapack/slasd5.c
new file mode 100644
index 0000000000..6e2e3d2c1a
--- /dev/null
+++ b/contrib/libs/clapack/slasd5.c
@@ -0,0 +1,189 @@
+/* slasd5.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slasd5_(integer *i__, real *d__, real *z__, real *delta,
+ real *rho, real *dsigma, real *work)
+{
+ /* System generated locals */
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real b, c__, w, del, tau, delsq;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This subroutine computes the square root of the I-th eigenvalue */
+/* of a positive symmetric rank-one modification of a 2-by-2 diagonal */
+/* matrix */
+
+/* diag( D ) * diag( D ) + RHO * Z * transpose(Z) . */
+
+/* The diagonal entries in the array D are assumed to satisfy */
+
+/* 0 <= D(i) < D(j) for i < j . */
+
+/* We also assume RHO > 0 and that the Euclidean norm of the vector */
+/* Z is one. */
+
+/* Arguments */
+/* ========= */
+
+/* I (input) INTEGER */
+/* The index of the eigenvalue to be computed. I = 1 or I = 2. */
+
+/* D (input) REAL array, dimension (2) */
+/* The original eigenvalues. We assume 0 <= D(1) < D(2). */
+
+/* Z (input) REAL array, dimension (2) */
+/* The components of the updating vector. */
+
+/* DELTA (output) REAL array, dimension (2) */
+/* Contains (D(j) - sigma_I) in its j-th component. */
+/* The vector DELTA contains the information necessary */
+/* to construct the eigenvectors. */
+
+/* RHO (input) REAL */
+/* The scalar in the symmetric updating formula. */
+
+/* DSIGMA (output) REAL */
+/* The computed sigma_I, the I-th updated eigenvalue. */
+
+/* WORK (workspace) REAL array, dimension (2) */
+/* WORK contains (D(j) + sigma_I) in its j-th component. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ren-Cang Li, Computer Science Division, University of California */
+/* at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --work;
+ --delta;
+ --z__;
+ --d__;
+
+ /* Function Body */
+ del = d__[2] - d__[1];
+ delsq = del * (d__[2] + d__[1]);
+ if (*i__ == 1) {
+ w = *rho * 4.f * (z__[2] * z__[2] / (d__[1] + d__[2] * 3.f) - z__[1] *
+ z__[1] / (d__[1] * 3.f + d__[2])) / del + 1.f;
+ if (w > 0.f) {
+ b = delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[1] * z__[1] * delsq;
+
+/* B > ZERO, always */
+
+/* The following TAU is DSIGMA * DSIGMA - D( 1 ) * D( 1 ) */
+
+ tau = c__ * 2.f / (b + sqrt((r__1 = b * b - c__ * 4.f, dabs(r__1))
+ ));
+
+/* The following TAU is DSIGMA - D( 1 ) */
+
+ tau /= d__[1] + sqrt(d__[1] * d__[1] + tau);
+ *dsigma = d__[1] + tau;
+ delta[1] = -tau;
+ delta[2] = del - tau;
+ work[1] = d__[1] * 2.f + tau;
+ work[2] = d__[1] + tau + d__[2];
+/* DELTA( 1 ) = -Z( 1 ) / TAU */
+/* DELTA( 2 ) = Z( 2 ) / ( DEL-TAU ) */
+ } else {
+ b = -delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[2] * z__[2] * delsq;
+
+/* The following TAU is DSIGMA * DSIGMA - D( 2 ) * D( 2 ) */
+
+ if (b > 0.f) {
+ tau = c__ * -2.f / (b + sqrt(b * b + c__ * 4.f));
+ } else {
+ tau = (b - sqrt(b * b + c__ * 4.f)) / 2.f;
+ }
+
+/* The following TAU is DSIGMA - D( 2 ) */
+
+ tau /= d__[2] + sqrt((r__1 = d__[2] * d__[2] + tau, dabs(r__1)));
+ *dsigma = d__[2] + tau;
+ delta[1] = -(del + tau);
+ delta[2] = -tau;
+ work[1] = d__[1] + tau + d__[2];
+ work[2] = d__[2] * 2.f + tau;
+/* DELTA( 1 ) = -Z( 1 ) / ( DEL+TAU ) */
+/* DELTA( 2 ) = -Z( 2 ) / TAU */
+ }
+/* TEMP = SQRT( DELTA( 1 )*DELTA( 1 )+DELTA( 2 )*DELTA( 2 ) ) */
+/* DELTA( 1 ) = DELTA( 1 ) / TEMP */
+/* DELTA( 2 ) = DELTA( 2 ) / TEMP */
+ } else {
+
+/* Now I=2 */
+
+ b = -delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[2] * z__[2] * delsq;
+
+/* The following TAU is DSIGMA * DSIGMA - D( 2 ) * D( 2 ) */
+
+ if (b > 0.f) {
+ tau = (b + sqrt(b * b + c__ * 4.f)) / 2.f;
+ } else {
+ tau = c__ * 2.f / (-b + sqrt(b * b + c__ * 4.f));
+ }
+
+/* The following TAU is DSIGMA - D( 2 ) */
+
+ tau /= d__[2] + sqrt(d__[2] * d__[2] + tau);
+ *dsigma = d__[2] + tau;
+ delta[1] = -(del + tau);
+ delta[2] = -tau;
+ work[1] = d__[1] + tau + d__[2];
+ work[2] = d__[2] * 2.f + tau;
+/* DELTA( 1 ) = -Z( 1 ) / ( DEL+TAU ) */
+/* DELTA( 2 ) = -Z( 2 ) / TAU */
+/* TEMP = SQRT( DELTA( 1 )*DELTA( 1 )+DELTA( 2 )*DELTA( 2 ) ) */
+/* DELTA( 1 ) = DELTA( 1 ) / TEMP */
+/* DELTA( 2 ) = DELTA( 2 ) / TEMP */
+ }
+ return 0;
+
+/* End of SLASD5 */
+
+} /* slasd5_ */
diff --git a/contrib/libs/clapack/slasd6.c b/contrib/libs/clapack/slasd6.c
new file mode 100644
index 0000000000..3be8d74036
--- /dev/null
+++ b/contrib/libs/clapack/slasd6.c
@@ -0,0 +1,364 @@
+/* slasd6.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__0 = 0;
+static real c_b7 = 1.f;
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int slasd6_(integer *icompq, integer *nl, integer *nr,
+ integer *sqre, real *d__, real *vf, real *vl, real *alpha, real *beta,
+ integer *idxq, integer *perm, integer *givptr, integer *givcol,
+ integer *ldgcol, real *givnum, integer *ldgnum, real *poles, real *
+ difl, real *difr, real *z__, integer *k, real *c__, real *s, real *
+ work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, givnum_dim1, givnum_offset,
+ poles_dim1, poles_offset, i__1;
+ real r__1, r__2;
+
+ /* Local variables */
+ integer i__, m, n, n1, n2, iw, idx, idxc, idxp, ivfw, ivlw;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), slasd7_(integer *, integer *, integer *, integer *,
+ integer *, real *, real *, real *, real *, real *, real *, real *,
+ real *, real *, real *, integer *, integer *, integer *, integer
+ *, integer *, integer *, integer *, real *, integer *, real *,
+ real *, integer *), slasd8_(integer *, integer *, real *, real *,
+ real *, real *, real *, real *, integer *, real *, real *,
+ integer *);
+ integer isigma;
+ extern /* Subroutine */ int xerbla_(char *, integer *), slascl_(
+ char *, integer *, integer *, real *, real *, integer *, integer *
+, real *, integer *, integer *), slamrg_(integer *,
+ integer *, real *, integer *, integer *, integer *);
+ real orgnrm;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLASD6 computes the SVD of an updated upper bidiagonal matrix B */
+/* obtained by merging two smaller ones by appending a row. This */
+/* routine is used only for the problem which requires all singular */
+/* values and optionally singular vector matrices in factored form. */
+/* B is an N-by-M matrix with N = NL + NR + 1 and M = N + SQRE. */
+/* A related subroutine, SLASD1, handles the case in which all singular */
+/* values and singular vectors of the bidiagonal matrix are desired. */
+
+/* SLASD6 computes the SVD as follows: */
+
+/* ( D1(in) 0 0 0 ) */
+/* B = U(in) * ( Z1' a Z2' b ) * VT(in) */
+/* ( 0 0 D2(in) 0 ) */
+
+/* = U(out) * ( D(out) 0) * VT(out) */
+
+/* where Z' = (Z1' a Z2' b) = u' VT', and u is a vector of dimension M */
+/* with ALPHA and BETA in the NL+1 and NL+2 th entries and zeros */
+/* elsewhere; and the entry b is empty if SQRE = 0. */
+
+/* The singular values of B can be computed using D1, D2, the first */
+/* components of all the right singular vectors of the lower block, and */
+/* the last components of all the right singular vectors of the upper */
+/* block. These components are stored and updated in VF and VL, */
+/* respectively, in SLASD6. Hence U and VT are not explicitly */
+/* referenced. */
+
+/* The singular values are stored in D. The algorithm consists of two */
+/* stages: */
+
+/* The first stage consists of deflating the size of the problem */
+/* when there are multiple singular values or if there is a zero */
+/* in the Z vector. For each such occurence the dimension of the */
+/* secular equation problem is reduced by one. This stage is */
+/* performed by the routine SLASD7. */
+
+/* The second stage consists of calculating the updated */
+/* singular values. This is done by finding the roots of the */
+/* secular equation via the routine SLASD4 (as called by SLASD8). */
+/* This routine also updates VF and VL and computes the distances */
+/* between the updated singular values and the old singular */
+/* values. */
+
+/* SLASD6 is called from SLASDA. */
+
+/* Arguments */
+/* ========= */
+
+/* ICOMPQ (input) INTEGER */
+/* Specifies whether singular vectors are to be computed in */
+/* factored form: */
+/* = 0: Compute singular values only. */
+/* = 1: Compute singular vectors in factored form as well. */
+
+/* NL (input) INTEGER */
+/* The row dimension of the upper block. NL >= 1. */
+
+/* NR (input) INTEGER */
+/* The row dimension of the lower block. NR >= 1. */
+
+/* SQRE (input) INTEGER */
+/* = 0: the lower block is an NR-by-NR square matrix. */
+/* = 1: the lower block is an NR-by-(NR+1) rectangular matrix. */
+
+/* The bidiagonal matrix has row dimension N = NL + NR + 1, */
+/* and column dimension M = N + SQRE. */
+
+/* D (input/output) REAL array, dimension (NL+NR+1). */
+/* On entry D(1:NL,1:NL) contains the singular values of the */
+/* upper block, and D(NL+2:N) contains the singular values */
+/* of the lower block. On exit D(1:N) contains the singular */
+/* values of the modified matrix. */
+
+/* VF (input/output) REAL array, dimension (M) */
+/* On entry, VF(1:NL+1) contains the first components of all */
+/* right singular vectors of the upper block; and VF(NL+2:M) */
+/* contains the first components of all right singular vectors */
+/* of the lower block. On exit, VF contains the first components */
+/* of all right singular vectors of the bidiagonal matrix. */
+
+/* VL (input/output) REAL array, dimension (M) */
+/* On entry, VL(1:NL+1) contains the last components of all */
+/* right singular vectors of the upper block; and VL(NL+2:M) */
+/* contains the last components of all right singular vectors of */
+/* the lower block. On exit, VL contains the last components of */
+/* all right singular vectors of the bidiagonal matrix. */
+
+/* ALPHA (input/output) REAL */
+/* Contains the diagonal element associated with the added row. */
+
+/* BETA (input/output) REAL */
+/* Contains the off-diagonal element associated with the added */
+/* row. */
+
+/* IDXQ (output) INTEGER array, dimension (N) */
+/* This contains the permutation which will reintegrate the */
+/* subproblem just solved back into sorted order, i.e. */
+/* D( IDXQ( I = 1, N ) ) will be in ascending order. */
+
+/* PERM (output) INTEGER array, dimension ( N ) */
+/* The permutations (from deflation and sorting) to be applied */
+/* to each block. Not referenced if ICOMPQ = 0. */
+
+/* GIVPTR (output) INTEGER */
+/* The number of Givens rotations which took place in this */
+/* subproblem. Not referenced if ICOMPQ = 0. */
+
+/* GIVCOL (output) INTEGER array, dimension ( LDGCOL, 2 ) */
+/* Each pair of numbers indicates a pair of columns to take place */
+/* in a Givens rotation. Not referenced if ICOMPQ = 0. */
+
+/* LDGCOL (input) INTEGER */
+/* leading dimension of GIVCOL, must be at least N. */
+
+/* GIVNUM (output) REAL array, dimension ( LDGNUM, 2 ) */
+/* Each number indicates the C or S value to be used in the */
+/* corresponding Givens rotation. Not referenced if ICOMPQ = 0. */
+
+/* LDGNUM (input) INTEGER */
+/* The leading dimension of GIVNUM and POLES, must be at least N. */
+
+/* POLES (output) REAL array, dimension ( LDGNUM, 2 ) */
+/* On exit, POLES(1,*) is an array containing the new singular */
+/* values obtained from solving the secular equation, and */
+/* POLES(2,*) is an array containing the poles in the secular */
+/* equation. Not referenced if ICOMPQ = 0. */
+
+/* DIFL (output) REAL array, dimension ( N ) */
+/* On exit, DIFL(I) is the distance between I-th updated */
+/* (undeflated) singular value and the I-th (undeflated) old */
+/* singular value. */
+
+/* DIFR (output) REAL array, */
+/* dimension ( LDGNUM, 2 ) if ICOMPQ = 1 and */
+/* dimension ( N ) if ICOMPQ = 0. */
+/* On exit, DIFR(I, 1) is the distance between I-th updated */
+/* (undeflated) singular value and the I+1-th (undeflated) old */
+/* singular value. */
+
+/* If ICOMPQ = 1, DIFR(1:K,2) is an array containing the */
+/* normalizing factors for the right singular vector matrix. */
+
+/* See SLASD8 for details on DIFL and DIFR. */
+
+/* Z (output) REAL array, dimension ( M ) */
+/* The first elements of this array contain the components */
+/* of the deflation-adjusted updating row vector. */
+
+/* K (output) INTEGER */
+/* Contains the dimension of the non-deflated matrix, */
+/* This is the order of the related secular equation. 1 <= K <=N. */
+
+/* C (output) REAL */
+/* C contains garbage if SQRE =0 and the C-value of a Givens */
+/* rotation related to the right null space if SQRE = 1. */
+
+/* S (output) REAL */
+/* S contains garbage if SQRE =0 and the S-value of a Givens */
+/* rotation related to the right null space if SQRE = 1. */
+
+/* WORK (workspace) REAL array, dimension ( 4 * M ) */
+
+/* IWORK (workspace) INTEGER array, dimension ( 3 * N ) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, an singular value did not converge */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --vf;
+ --vl;
+ --idxq;
+ --perm;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ poles_dim1 = *ldgnum;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ givnum_dim1 = *ldgnum;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ --difl;
+ --difr;
+ --z__;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ n = *nl + *nr + 1;
+ m = n + *sqre;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*nl < 1) {
+ *info = -2;
+ } else if (*nr < 1) {
+ *info = -3;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -4;
+ } else if (*ldgcol < n) {
+ *info = -14;
+ } else if (*ldgnum < n) {
+ *info = -16;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLASD6", &i__1);
+ return 0;
+ }
+
+/* The following values are for bookkeeping purposes only. They are */
+/* integer pointers which indicate the portion of the workspace */
+/* used by a particular array in SLASD7 and SLASD8. */
+
+ isigma = 1;
+ iw = isigma + n;
+ ivfw = iw + m;
+ ivlw = ivfw + m;
+
+ idx = 1;
+ idxc = idx + n;
+ idxp = idxc + n;
+
+/* Scale. */
+
+/* Computing MAX */
+ r__1 = dabs(*alpha), r__2 = dabs(*beta);
+ orgnrm = dmax(r__1,r__2);
+ d__[*nl + 1] = 0.f;
+ i__1 = n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((r__1 = d__[i__], dabs(r__1)) > orgnrm) {
+ orgnrm = (r__1 = d__[i__], dabs(r__1));
+ }
+/* L10: */
+ }
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b7, &n, &c__1, &d__[1], &n, info);
+ *alpha /= orgnrm;
+ *beta /= orgnrm;
+
+/* Sort and Deflate singular values. */
+
+ slasd7_(icompq, nl, nr, sqre, k, &d__[1], &z__[1], &work[iw], &vf[1], &
+ work[ivfw], &vl[1], &work[ivlw], alpha, beta, &work[isigma], &
+ iwork[idx], &iwork[idxp], &idxq[1], &perm[1], givptr, &givcol[
+ givcol_offset], ldgcol, &givnum[givnum_offset], ldgnum, c__, s,
+ info);
+
+/* Solve Secular Equation, compute DIFL, DIFR, and update VF, VL. */
+
+ slasd8_(icompq, k, &d__[1], &z__[1], &vf[1], &vl[1], &difl[1], &difr[1],
+ ldgnum, &work[isigma], &work[iw], info);
+
+/* Save the poles if ICOMPQ = 1. */
+
+ if (*icompq == 1) {
+ scopy_(k, &d__[1], &c__1, &poles[poles_dim1 + 1], &c__1);
+ scopy_(k, &work[isigma], &c__1, &poles[(poles_dim1 << 1) + 1], &c__1);
+ }
+
+/* Unscale. */
+
+ slascl_("G", &c__0, &c__0, &c_b7, &orgnrm, &n, &c__1, &d__[1], &n, info);
+
+/* Prepare the IDXQ sorting permutation. */
+
+ n1 = *k;
+ n2 = n - *k;
+ slamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &idxq[1]);
+
+ return 0;
+
+/* End of SLASD6 */
+
+} /* slasd6_ */
diff --git a/contrib/libs/clapack/slasd7.c b/contrib/libs/clapack/slasd7.c
new file mode 100644
index 0000000000..01a0542df3
--- /dev/null
+++ b/contrib/libs/clapack/slasd7.c
@@ -0,0 +1,516 @@
+/* slasd7.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int slasd7_(integer *icompq, integer *nl, integer *nr,
+ integer *sqre, integer *k, real *d__, real *z__, real *zw, real *vf,
+ real *vfw, real *vl, real *vlw, real *alpha, real *beta, real *dsigma,
+ integer *idx, integer *idxp, integer *idxq, integer *perm, integer *
+ givptr, integer *givcol, integer *ldgcol, real *givnum, integer *
+ ldgnum, real *c__, real *s, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, givnum_dim1, givnum_offset, i__1;
+ real r__1, r__2;
+
+ /* Local variables */
+ integer i__, j, m, n, k2;
+ real z1;
+ integer jp;
+ real eps, tau, tol;
+ integer nlp1, nlp2, idxi, idxj;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *);
+ integer idxjp, jprev;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ extern doublereal slapy2_(real *, real *), slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), slamrg_(
+ integer *, integer *, real *, integer *, integer *, integer *);
+ real hlftol;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLASD7 merges the two sets of singular values together into a single */
+/* sorted set. Then it tries to deflate the size of the problem. There */
+/* are two ways in which deflation can occur: when two or more singular */
+/* values are close together or if there is a tiny entry in the Z */
+/* vector. For each such occurrence the order of the related */
+/* secular equation problem is reduced by one. */
+
+/* SLASD7 is called from SLASD6. */
+
+/* Arguments */
+/* ========= */
+
+/* ICOMPQ (input) INTEGER */
+/* Specifies whether singular vectors are to be computed */
+/* in compact form, as follows: */
+/* = 0: Compute singular values only. */
+/* = 1: Compute singular vectors of upper */
+/* bidiagonal matrix in compact form. */
+
+/* NL (input) INTEGER */
+/* The row dimension of the upper block. NL >= 1. */
+
+/* NR (input) INTEGER */
+/* The row dimension of the lower block. NR >= 1. */
+
+/* SQRE (input) INTEGER */
+/* = 0: the lower block is an NR-by-NR square matrix. */
+/* = 1: the lower block is an NR-by-(NR+1) rectangular matrix. */
+
+/* The bidiagonal matrix has */
+/* N = NL + NR + 1 rows and */
+/* M = N + SQRE >= N columns. */
+
+/* K (output) INTEGER */
+/* Contains the dimension of the non-deflated matrix, this is */
+/* the order of the related secular equation. 1 <= K <=N. */
+
+/* D (input/output) REAL array, dimension ( N ) */
+/* On entry D contains the singular values of the two submatrices */
+/* to be combined. On exit D contains the trailing (N-K) updated */
+/* singular values (those which were deflated) sorted into */
+/* increasing order. */
+
+/* Z (output) REAL array, dimension ( M ) */
+/* On exit Z contains the updating row vector in the secular */
+/* equation. */
+
+/* ZW (workspace) REAL array, dimension ( M ) */
+/* Workspace for Z. */
+
+/* VF (input/output) REAL array, dimension ( M ) */
+/* On entry, VF(1:NL+1) contains the first components of all */
+/* right singular vectors of the upper block; and VF(NL+2:M) */
+/* contains the first components of all right singular vectors */
+/* of the lower block. On exit, VF contains the first components */
+/* of all right singular vectors of the bidiagonal matrix. */
+
+/* VFW (workspace) REAL array, dimension ( M ) */
+/* Workspace for VF. */
+
+/* VL (input/output) REAL array, dimension ( M ) */
+/* On entry, VL(1:NL+1) contains the last components of all */
+/* right singular vectors of the upper block; and VL(NL+2:M) */
+/* contains the last components of all right singular vectors */
+/* of the lower block. On exit, VL contains the last components */
+/* of all right singular vectors of the bidiagonal matrix. */
+
+/* VLW (workspace) REAL array, dimension ( M ) */
+/* Workspace for VL. */
+
+/* ALPHA (input) REAL */
+/* Contains the diagonal element associated with the added row. */
+
+/* BETA (input) REAL */
+/* Contains the off-diagonal element associated with the added */
+/* row. */
+
+/* DSIGMA (output) REAL array, dimension ( N ) */
+/* Contains a copy of the diagonal elements (K-1 singular values */
+/* and one zero) in the secular equation. */
+
+/* IDX (workspace) INTEGER array, dimension ( N ) */
+/* This will contain the permutation used to sort the contents of */
+/* D into ascending order. */
+
+/* IDXP (workspace) INTEGER array, dimension ( N ) */
+/* This will contain the permutation used to place deflated */
+/* values of D at the end of the array. On output IDXP(2:K) */
+/* points to the nondeflated D-values and IDXP(K+1:N) */
+/* points to the deflated singular values. */
+
+/* IDXQ (input) INTEGER array, dimension ( N ) */
+/* This contains the permutation which separately sorts the two */
+/* sub-problems in D into ascending order. Note that entries in */
+/* the first half of this permutation must first be moved one */
+/* position backward; and entries in the second half */
+/* must first have NL+1 added to their values. */
+
+/* PERM (output) INTEGER array, dimension ( N ) */
+/* The permutations (from deflation and sorting) to be applied */
+/* to each singular block. Not referenced if ICOMPQ = 0. */
+
+/* GIVPTR (output) INTEGER */
+/* The number of Givens rotations which took place in this */
+/* subproblem. Not referenced if ICOMPQ = 0. */
+
+/* GIVCOL (output) INTEGER array, dimension ( LDGCOL, 2 ) */
+/* Each pair of numbers indicates a pair of columns to take place */
+/* in a Givens rotation. Not referenced if ICOMPQ = 0. */
+
+/* LDGCOL (input) INTEGER */
+/* The leading dimension of GIVCOL, must be at least N. */
+
+/* GIVNUM (output) REAL array, dimension ( LDGNUM, 2 ) */
+/* Each number indicates the C or S value to be used in the */
+/* corresponding Givens rotation. Not referenced if ICOMPQ = 0. */
+
+/* LDGNUM (input) INTEGER */
+/* The leading dimension of GIVNUM, must be at least N. */
+
+/* C (output) REAL */
+/* C contains garbage if SQRE =0 and the C-value of a Givens */
+/* rotation related to the right null space if SQRE = 1. */
+
+/* S (output) REAL */
+/* S contains garbage if SQRE =0 and the S-value of a Givens */
+/* rotation related to the right null space if SQRE = 1. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --z__;
+ --zw;
+ --vf;
+ --vfw;
+ --vl;
+ --vlw;
+ --dsigma;
+ --idx;
+ --idxp;
+ --idxq;
+ --perm;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ givnum_dim1 = *ldgnum;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+
+ /* Function Body */
+ *info = 0;
+ n = *nl + *nr + 1;
+ m = n + *sqre;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*nl < 1) {
+ *info = -2;
+ } else if (*nr < 1) {
+ *info = -3;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -4;
+ } else if (*ldgcol < n) {
+ *info = -22;
+ } else if (*ldgnum < n) {
+ *info = -24;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLASD7", &i__1);
+ return 0;
+ }
+
+ nlp1 = *nl + 1;
+ nlp2 = *nl + 2;
+ if (*icompq == 1) {
+ *givptr = 0;
+ }
+
+/* Generate the first part of the vector Z and move the singular */
+/* values in the first part of D one position backward. */
+
+ z1 = *alpha * vl[nlp1];
+ vl[nlp1] = 0.f;
+ tau = vf[nlp1];
+ for (i__ = *nl; i__ >= 1; --i__) {
+ z__[i__ + 1] = *alpha * vl[i__];
+ vl[i__] = 0.f;
+ vf[i__ + 1] = vf[i__];
+ d__[i__ + 1] = d__[i__];
+ idxq[i__ + 1] = idxq[i__] + 1;
+/* L10: */
+ }
+ vf[1] = tau;
+
+/* Generate the second part of the vector Z. */
+
+ i__1 = m;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ z__[i__] = *beta * vf[i__];
+ vf[i__] = 0.f;
+/* L20: */
+ }
+
+/* Sort the singular values into increasing order */
+
+ i__1 = n;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ idxq[i__] += nlp1;
+/* L30: */
+ }
+
+/* DSIGMA, IDXC, IDXC, and ZW are used as storage space. */
+
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ dsigma[i__] = d__[idxq[i__]];
+ zw[i__] = z__[idxq[i__]];
+ vfw[i__] = vf[idxq[i__]];
+ vlw[i__] = vl[idxq[i__]];
+/* L40: */
+ }
+
+ slamrg_(nl, nr, &dsigma[2], &c__1, &c__1, &idx[2]);
+
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ idxi = idx[i__] + 1;
+ d__[i__] = dsigma[idxi];
+ z__[i__] = zw[idxi];
+ vf[i__] = vfw[idxi];
+ vl[i__] = vlw[idxi];
+/* L50: */
+ }
+
+/* Calculate the allowable deflation tolerence */
+
+ eps = slamch_("Epsilon");
+/* Computing MAX */
+ r__1 = dabs(*alpha), r__2 = dabs(*beta);
+ tol = dmax(r__1,r__2);
+/* Computing MAX */
+ r__2 = (r__1 = d__[n], dabs(r__1));
+ tol = eps * 64.f * dmax(r__2,tol);
+
+/* There are 2 kinds of deflation -- first a value in the z-vector */
+/* is small, second two (or more) singular values are very close */
+/* together (their difference is small). */
+
+/* If the value in the z-vector is small, we simply permute the */
+/* array so that the corresponding singular value is moved to the */
+/* end. */
+
+/* If two values in the D-vector are close, we perform a two-sided */
+/* rotation designed to make one of the corresponding z-vector */
+/* entries zero, and then permute the array so that the deflated */
+/* singular value is moved to the end. */
+
+/* If there are multiple singular values then the problem deflates. */
+/* Here the number of equal singular values are found. As each equal */
+/* singular value is found, an elementary reflector is computed to */
+/* rotate the corresponding singular subspace so that the */
+/* corresponding components of Z are zero in this new basis. */
+
+ *k = 1;
+ k2 = n + 1;
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ if ((r__1 = z__[j], dabs(r__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ idxp[k2] = j;
+ if (j == n) {
+ goto L100;
+ }
+ } else {
+ jprev = j;
+ goto L70;
+ }
+/* L60: */
+ }
+L70:
+ j = jprev;
+L80:
+ ++j;
+ if (j > n) {
+ goto L90;
+ }
+ if ((r__1 = z__[j], dabs(r__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ idxp[k2] = j;
+ } else {
+
+/* Check if singular values are close enough to allow deflation. */
+
+ if ((r__1 = d__[j] - d__[jprev], dabs(r__1)) <= tol) {
+
+/* Deflation is possible. */
+
+ *s = z__[jprev];
+ *c__ = z__[j];
+
+/* Find sqrt(a**2+b**2) without overflow or */
+/* destructive underflow. */
+
+ tau = slapy2_(c__, s);
+ z__[j] = tau;
+ z__[jprev] = 0.f;
+ *c__ /= tau;
+ *s = -(*s) / tau;
+
+/* Record the appropriate Givens rotation */
+
+ if (*icompq == 1) {
+ ++(*givptr);
+ idxjp = idxq[idx[jprev] + 1];
+ idxj = idxq[idx[j] + 1];
+ if (idxjp <= nlp1) {
+ --idxjp;
+ }
+ if (idxj <= nlp1) {
+ --idxj;
+ }
+ givcol[*givptr + (givcol_dim1 << 1)] = idxjp;
+ givcol[*givptr + givcol_dim1] = idxj;
+ givnum[*givptr + (givnum_dim1 << 1)] = *c__;
+ givnum[*givptr + givnum_dim1] = *s;
+ }
+ srot_(&c__1, &vf[jprev], &c__1, &vf[j], &c__1, c__, s);
+ srot_(&c__1, &vl[jprev], &c__1, &vl[j], &c__1, c__, s);
+ --k2;
+ idxp[k2] = jprev;
+ jprev = j;
+ } else {
+ ++(*k);
+ zw[*k] = z__[jprev];
+ dsigma[*k] = d__[jprev];
+ idxp[*k] = jprev;
+ jprev = j;
+ }
+ }
+ goto L80;
+L90:
+
+/* Record the last singular value. */
+
+ ++(*k);
+ zw[*k] = z__[jprev];
+ dsigma[*k] = d__[jprev];
+ idxp[*k] = jprev;
+
+L100:
+
+/* Sort the singular values into DSIGMA. The singular values which */
+/* were not deflated go into the first K slots of DSIGMA, except */
+/* that DSIGMA(1) is treated separately. */
+
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ jp = idxp[j];
+ dsigma[j] = d__[jp];
+ vfw[j] = vf[jp];
+ vlw[j] = vl[jp];
+/* L110: */
+ }
+ if (*icompq == 1) {
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ jp = idxp[j];
+ perm[j] = idxq[idx[jp] + 1];
+ if (perm[j] <= nlp1) {
+ --perm[j];
+ }
+/* L120: */
+ }
+ }
+
+/* The deflated singular values go back into the last N - K slots of */
+/* D. */
+
+ i__1 = n - *k;
+ scopy_(&i__1, &dsigma[*k + 1], &c__1, &d__[*k + 1], &c__1);
+
+/* Determine DSIGMA(1), DSIGMA(2), Z(1), VF(1), VL(1), VF(M), and */
+/* VL(M). */
+
+ dsigma[1] = 0.f;
+ hlftol = tol / 2.f;
+ if (dabs(dsigma[2]) <= hlftol) {
+ dsigma[2] = hlftol;
+ }
+ if (m > n) {
+ z__[1] = slapy2_(&z1, &z__[m]);
+ if (z__[1] <= tol) {
+ *c__ = 1.f;
+ *s = 0.f;
+ z__[1] = tol;
+ } else {
+ *c__ = z1 / z__[1];
+ *s = -z__[m] / z__[1];
+ }
+ srot_(&c__1, &vf[m], &c__1, &vf[1], &c__1, c__, s);
+ srot_(&c__1, &vl[m], &c__1, &vl[1], &c__1, c__, s);
+ } else {
+ if (dabs(z1) <= tol) {
+ z__[1] = tol;
+ } else {
+ z__[1] = z1;
+ }
+ }
+
+/* Restore Z, VF, and VL. */
+
+ i__1 = *k - 1;
+ scopy_(&i__1, &zw[2], &c__1, &z__[2], &c__1);
+ i__1 = n - 1;
+ scopy_(&i__1, &vfw[2], &c__1, &vf[2], &c__1);
+ i__1 = n - 1;
+ scopy_(&i__1, &vlw[2], &c__1, &vl[2], &c__1);
+
+ return 0;
+
+/* End of SLASD7 */
+
+} /* slasd7_ */
diff --git a/contrib/libs/clapack/slasd8.c b/contrib/libs/clapack/slasd8.c
new file mode 100644
index 0000000000..b0079d51c1
--- /dev/null
+++ b/contrib/libs/clapack/slasd8.c
@@ -0,0 +1,323 @@
+/* slasd8.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static real c_b8 = 1.f;
+
+/* Subroutine */ int slasd8_(integer *icompq, integer *k, real *d__, real *
+ z__, real *vf, real *vl, real *difl, real *difr, integer *lddifr,
+ real *dsigma, real *work, integer *info)
+{
+ /* System generated locals */
+ integer difr_dim1, difr_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ integer i__, j;
+ real dj, rho;
+ integer iwk1, iwk2, iwk3;
+ real temp;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ integer iwk2i, iwk3i;
+ extern doublereal snrm2_(integer *, real *, integer *);
+ real diflj, difrj, dsigj;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ extern doublereal slamc3_(real *, real *);
+ extern /* Subroutine */ int slasd4_(integer *, integer *, real *, real *,
+ real *, real *, real *, real *, integer *), xerbla_(char *,
+ integer *);
+ real dsigjp;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *), slaset_(char *, integer *, integer *, real *, real *,
+ real *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* October 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLASD8 finds the square roots of the roots of the secular equation, */
+/* as defined by the values in DSIGMA and Z. It makes the appropriate */
+/* calls to SLASD4, and stores, for each element in D, the distance */
+/* to its two nearest poles (elements in DSIGMA). It also updates */
+/* the arrays VF and VL, the first and last components of all the */
+/* right singular vectors of the original bidiagonal matrix. */
+
+/* SLASD8 is called from SLASD6. */
+
+/* Arguments */
+/* ========= */
+
+/* ICOMPQ (input) INTEGER */
+/* Specifies whether singular vectors are to be computed in */
+/* factored form in the calling routine: */
+/* = 0: Compute singular values only. */
+/* = 1: Compute singular vectors in factored form as well. */
+
+/* K (input) INTEGER */
+/* The number of terms in the rational function to be solved */
+/* by SLASD4. K >= 1. */
+
+/* D (output) REAL array, dimension ( K ) */
+/* On output, D contains the updated singular values. */
+
+/* Z (input/output) REAL array, dimension ( K ) */
+/* On entry, the first K elements of this array contain the */
+/* components of the deflation-adjusted updating row vector. */
+/* On exit, Z is updated. */
+
+/* VF (input/output) REAL array, dimension ( K ) */
+/* On entry, VF contains information passed through DBEDE8. */
+/* On exit, VF contains the first K components of the first */
+/* components of all right singular vectors of the bidiagonal */
+/* matrix. */
+
+/* VL (input/output) REAL array, dimension ( K ) */
+/* On entry, VL contains information passed through DBEDE8. */
+/* On exit, VL contains the first K components of the last */
+/* components of all right singular vectors of the bidiagonal */
+/* matrix. */
+
+/* DIFL (output) REAL array, dimension ( K ) */
+/* On exit, DIFL(I) = D(I) - DSIGMA(I). */
+
+/* DIFR (output) REAL array, */
+/* dimension ( LDDIFR, 2 ) if ICOMPQ = 1 and */
+/* dimension ( K ) if ICOMPQ = 0. */
+/* On exit, DIFR(I,1) = D(I) - DSIGMA(I+1), DIFR(K,1) is not */
+/* defined and will not be referenced. */
+
+/* If ICOMPQ = 1, DIFR(1:K,2) is an array containing the */
+/* normalizing factors for the right singular vector matrix. */
+
+/* LDDIFR (input) INTEGER */
+/* The leading dimension of DIFR, must be at least K. */
+
+/* DSIGMA (input/output) REAL array, dimension ( K ) */
+/* On entry, the first K elements of this array contain the old */
+/* roots of the deflated updating problem. These are the poles */
+/* of the secular equation. */
+/* On exit, the elements of DSIGMA may be very slightly altered */
+/* in value. */
+
+/* WORK (workspace) REAL array, dimension at least 3 * K */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, an singular value did not converge */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --z__;
+ --vf;
+ --vl;
+ --difl;
+ difr_dim1 = *lddifr;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ --dsigma;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*k < 1) {
+ *info = -2;
+ } else if (*lddifr < *k) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLASD8", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*k == 1) {
+ d__[1] = dabs(z__[1]);
+ difl[1] = d__[1];
+ if (*icompq == 1) {
+ difl[2] = 1.f;
+ difr[(difr_dim1 << 1) + 1] = 1.f;
+ }
+ return 0;
+ }
+
+/* Modify values DSIGMA(i) to make sure all DSIGMA(i)-DSIGMA(j) can */
+/* be computed with high relative accuracy (barring over/underflow). */
+/* This is a problem on machines without a guard digit in */
+/* add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2). */
+/* The following code replaces DSIGMA(I) by 2*DSIGMA(I)-DSIGMA(I), */
+/* which on any of these machines zeros out the bottommost */
+/* bit of DSIGMA(I) if it is 1; this makes the subsequent */
+/* subtractions DSIGMA(I)-DSIGMA(J) unproblematic when cancellation */
+/* occurs. On binary machines with a guard digit (almost all */
+/* machines) it does not change DSIGMA(I) at all. On hexadecimal */
+/* and decimal machines with a guard digit, it slightly */
+/* changes the bottommost bits of DSIGMA(I). It does not account */
+/* for hexadecimal or decimal machines without guard digits */
+/* (we know of none). We use a subroutine call to compute */
+/* 2*DLAMBDA(I) to prevent optimizing compilers from eliminating */
+/* this code. */
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dsigma[i__] = slamc3_(&dsigma[i__], &dsigma[i__]) - dsigma[i__];
+/* L10: */
+ }
+
+/* Book keeping. */
+
+ iwk1 = 1;
+ iwk2 = iwk1 + *k;
+ iwk3 = iwk2 + *k;
+ iwk2i = iwk2 - 1;
+ iwk3i = iwk3 - 1;
+
+/* Normalize Z. */
+
+ rho = snrm2_(k, &z__[1], &c__1);
+ slascl_("G", &c__0, &c__0, &rho, &c_b8, k, &c__1, &z__[1], k, info);
+ rho *= rho;
+
+/* Initialize WORK(IWK3). */
+
+ slaset_("A", k, &c__1, &c_b8, &c_b8, &work[iwk3], k);
+
+/* Compute the updated singular values, the arrays DIFL, DIFR, */
+/* and the updated Z. */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ slasd4_(k, &j, &dsigma[1], &z__[1], &work[iwk1], &rho, &d__[j], &work[
+ iwk2], info);
+
+/* If the root finder fails, the computation is terminated. */
+
+ if (*info != 0) {
+ return 0;
+ }
+ work[iwk3i + j] = work[iwk3i + j] * work[j] * work[iwk2i + j];
+ difl[j] = -work[j];
+ difr[j + difr_dim1] = -work[j + 1];
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[iwk3i + i__] = work[iwk3i + i__] * work[i__] * work[iwk2i +
+ i__] / (dsigma[i__] - dsigma[j]) / (dsigma[i__] + dsigma[
+ j]);
+/* L20: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ work[iwk3i + i__] = work[iwk3i + i__] * work[i__] * work[iwk2i +
+ i__] / (dsigma[i__] - dsigma[j]) / (dsigma[i__] + dsigma[
+ j]);
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Compute updated Z. */
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__2 = sqrt((r__1 = work[iwk3i + i__], dabs(r__1)));
+ z__[i__] = r_sign(&r__2, &z__[i__]);
+/* L50: */
+ }
+
+/* Update VF and VL. */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ diflj = difl[j];
+ dj = d__[j];
+ dsigj = -dsigma[j];
+ if (j < *k) {
+ difrj = -difr[j + difr_dim1];
+ dsigjp = -dsigma[j + 1];
+ }
+ work[j] = -z__[j] / diflj / (dsigma[j] + dj);
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] = z__[i__] / (slamc3_(&dsigma[i__], &dsigj) - diflj) / (
+ dsigma[i__] + dj);
+/* L60: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ work[i__] = z__[i__] / (slamc3_(&dsigma[i__], &dsigjp) + difrj) /
+ (dsigma[i__] + dj);
+/* L70: */
+ }
+ temp = snrm2_(k, &work[1], &c__1);
+ work[iwk2i + j] = sdot_(k, &work[1], &c__1, &vf[1], &c__1) / temp;
+ work[iwk3i + j] = sdot_(k, &work[1], &c__1, &vl[1], &c__1) / temp;
+ if (*icompq == 1) {
+ difr[j + (difr_dim1 << 1)] = temp;
+ }
+/* L80: */
+ }
+
+ scopy_(k, &work[iwk2], &c__1, &vf[1], &c__1);
+ scopy_(k, &work[iwk3], &c__1, &vl[1], &c__1);
+
+ return 0;
+
+/* End of SLASD8 */
+
+} /* slasd8_ */
diff --git a/contrib/libs/clapack/slasda.c b/contrib/libs/clapack/slasda.c
new file mode 100644
index 0000000000..694a4e21b1
--- /dev/null
+++ b/contrib/libs/clapack/slasda.c
@@ -0,0 +1,483 @@
+/* slasda.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__0 = 0;
+static real c_b11 = 0.f;
+static real c_b12 = 1.f;
+static integer c__1 = 1;
+static integer c__2 = 2;
+
+/* Subroutine */ int slasda_(integer *icompq, integer *smlsiz, integer *n,
+ integer *sqre, real *d__, real *e, real *u, integer *ldu, real *vt,
+ integer *k, real *difl, real *difr, real *z__, real *poles, integer *
+ givptr, integer *givcol, integer *ldgcol, integer *perm, real *givnum,
+ real *c__, real *s, real *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, perm_dim1, perm_offset, difl_dim1,
+ difl_offset, difr_dim1, difr_offset, givnum_dim1, givnum_offset,
+ poles_dim1, poles_offset, u_dim1, u_offset, vt_dim1, vt_offset,
+ z_dim1, z_offset, i__1, i__2;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ integer i__, j, m, i1, ic, lf, nd, ll, nl, vf, nr, vl, im1, ncc, nlf, nrf,
+ vfi, iwk, vli, lvl, nru, ndb1, nlp1, lvl2, nrp1;
+ real beta;
+ integer idxq, nlvl;
+ real alpha;
+ integer inode, ndiml, ndimr, idxqi, itemp, sqrei;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), slasd6_(integer *, integer *, integer *, integer *,
+ real *, real *, real *, real *, real *, integer *, integer *,
+ integer *, integer *, integer *, real *, integer *, real *, real *
+, real *, real *, integer *, real *, real *, real *, integer *,
+ integer *);
+ integer nwork1, nwork2;
+ extern /* Subroutine */ int xerbla_(char *, integer *), slasdq_(
+ char *, integer *, integer *, integer *, integer *, integer *,
+ real *, real *, real *, integer *, real *, integer *, real *,
+ integer *, real *, integer *), slasdt_(integer *, integer
+ *, integer *, integer *, integer *, integer *, integer *),
+ slaset_(char *, integer *, integer *, real *, real *, real *,
+ integer *);
+ integer smlszp;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Using a divide and conquer approach, SLASDA computes the singular */
+/* value decomposition (SVD) of a real upper bidiagonal N-by-M matrix */
+/* B with diagonal D and offdiagonal E, where M = N + SQRE. The */
+/* algorithm computes the singular values in the SVD B = U * S * VT. */
+/* The orthogonal matrices U and VT are optionally computed in */
+/* compact form. */
+
+/* A related subroutine, SLASD0, computes the singular values and */
+/* the singular vectors in explicit form. */
+
+/* Arguments */
+/* ========= */
+
+/* ICOMPQ (input) INTEGER */
+/* Specifies whether singular vectors are to be computed */
+/* in compact form, as follows */
+/* = 0: Compute singular values only. */
+/* = 1: Compute singular vectors of upper bidiagonal */
+/* matrix in compact form. */
+
+/* SMLSIZ (input) INTEGER */
+/* The maximum size of the subproblems at the bottom of the */
+/* computation tree. */
+
+/* N (input) INTEGER */
+/* The row dimension of the upper bidiagonal matrix. This is */
+/* also the dimension of the main diagonal array D. */
+
+/* SQRE (input) INTEGER */
+/* Specifies the column dimension of the bidiagonal matrix. */
+/* = 0: The bidiagonal matrix has column dimension M = N; */
+/* = 1: The bidiagonal matrix has column dimension M = N + 1. */
+
+/* D (input/output) REAL array, dimension ( N ) */
+/* On entry D contains the main diagonal of the bidiagonal */
+/* matrix. On exit D, if INFO = 0, contains its singular values. */
+
+/* E (input) REAL array, dimension ( M-1 ) */
+/* Contains the subdiagonal entries of the bidiagonal matrix. */
+/* On exit, E has been destroyed. */
+
+/* U (output) REAL array, */
+/* dimension ( LDU, SMLSIZ ) if ICOMPQ = 1, and not referenced */
+/* if ICOMPQ = 0. If ICOMPQ = 1, on exit, U contains the left */
+/* singular vector matrices of all subproblems at the bottom */
+/* level. */
+
+/* LDU (input) INTEGER, LDU = > N. */
+/* The leading dimension of arrays U, VT, DIFL, DIFR, POLES, */
+/* GIVNUM, and Z. */
+
+/* VT (output) REAL array, */
+/* dimension ( LDU, SMLSIZ+1 ) if ICOMPQ = 1, and not referenced */
+/* if ICOMPQ = 0. If ICOMPQ = 1, on exit, VT' contains the right */
+/* singular vector matrices of all subproblems at the bottom */
+/* level. */
+
+/* K (output) INTEGER array, dimension ( N ) */
+/* if ICOMPQ = 1 and dimension 1 if ICOMPQ = 0. */
+/* If ICOMPQ = 1, on exit, K(I) is the dimension of the I-th */
+/* secular equation on the computation tree. */
+
+/* DIFL (output) REAL array, dimension ( LDU, NLVL ), */
+/* where NLVL = floor(log_2 (N/SMLSIZ))). */
+
+/* DIFR (output) REAL array, */
+/* dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1 and */
+/* dimension ( N ) if ICOMPQ = 0. */
+/* If ICOMPQ = 1, on exit, DIFL(1:N, I) and DIFR(1:N, 2 * I - 1) */
+/* record distances between singular values on the I-th */
+/* level and singular values on the (I -1)-th level, and */
+/* DIFR(1:N, 2 * I ) contains the normalizing factors for */
+/* the right singular vector matrix. See SLASD8 for details. */
+
+/* Z (output) REAL array, */
+/* dimension ( LDU, NLVL ) if ICOMPQ = 1 and */
+/* dimension ( N ) if ICOMPQ = 0. */
+/* The first K elements of Z(1, I) contain the components of */
+/* the deflation-adjusted updating row vector for subproblems */
+/* on the I-th level. */
+
+/* POLES (output) REAL array, */
+/* dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1, and not referenced */
+/* if ICOMPQ = 0. If ICOMPQ = 1, on exit, POLES(1, 2*I - 1) and */
+/* POLES(1, 2*I) contain the new and old singular values */
+/* involved in the secular equations on the I-th level. */
+
+/* GIVPTR (output) INTEGER array, */
+/* dimension ( N ) if ICOMPQ = 1, and not referenced if */
+/* ICOMPQ = 0. If ICOMPQ = 1, on exit, GIVPTR( I ) records */
+/* the number of Givens rotations performed on the I-th */
+/* problem on the computation tree. */
+
+/* GIVCOL (output) INTEGER array, */
+/* dimension ( LDGCOL, 2 * NLVL ) if ICOMPQ = 1, and not */
+/* referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, for each I, */
+/* GIVCOL(1, 2 *I - 1) and GIVCOL(1, 2 *I) record the locations */
+/* of Givens rotations performed on the I-th level on the */
+/* computation tree. */
+
+/* LDGCOL (input) INTEGER, LDGCOL = > N. */
+/* The leading dimension of arrays GIVCOL and PERM. */
+
+/* PERM (output) INTEGER array, dimension ( LDGCOL, NLVL ) */
+/* if ICOMPQ = 1, and not referenced */
+/* if ICOMPQ = 0. If ICOMPQ = 1, on exit, PERM(1, I) records */
+/* permutations done on the I-th level of the computation tree. */
+
+/* GIVNUM (output) REAL array, */
+/* dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1, and not */
+/* referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, for each I, */
+/* GIVNUM(1, 2 *I - 1) and GIVNUM(1, 2 *I) record the C- and S- */
+/* values of Givens rotations performed on the I-th level on */
+/* the computation tree. */
+
+/* C (output) REAL array, */
+/* dimension ( N ) if ICOMPQ = 1, and dimension 1 if ICOMPQ = 0. */
+/* If ICOMPQ = 1 and the I-th subproblem is not square, on exit, */
+/* C( I ) contains the C-value of a Givens rotation related to */
+/* the right null space of the I-th subproblem. */
+
+/* S (output) REAL array, dimension ( N ) if */
+/* ICOMPQ = 1, and dimension 1 if ICOMPQ = 0. If ICOMPQ = 1 */
+/* and the I-th subproblem is not square, on exit, S( I ) */
+/* contains the S-value of a Givens rotation related to */
+/* the right null space of the I-th subproblem. */
+
+/* WORK (workspace) REAL array, dimension */
+/* (6 * N + (SMLSIZ + 1)*(SMLSIZ + 1)). */
+
+/* IWORK (workspace) INTEGER array, dimension (7*N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, an singular value did not converge */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ givnum_dim1 = *ldu;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ poles_dim1 = *ldu;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ z_dim1 = *ldu;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ difr_dim1 = *ldu;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ difl_dim1 = *ldu;
+ difl_offset = 1 + difl_dim1;
+ difl -= difl_offset;
+ vt_dim1 = *ldu;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ --k;
+ --givptr;
+ perm_dim1 = *ldgcol;
+ perm_offset = 1 + perm_dim1;
+ perm -= perm_offset;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ --c__;
+ --s;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*smlsiz < 3) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -4;
+ } else if (*ldu < *n + *sqre) {
+ *info = -8;
+ } else if (*ldgcol < *n) {
+ *info = -17;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLASDA", &i__1);
+ return 0;
+ }
+
+ m = *n + *sqre;
+
+/* If the input matrix is too small, call SLASDQ to find the SVD. */
+
+ if (*n <= *smlsiz) {
+ if (*icompq == 0) {
+ slasdq_("U", sqre, n, &c__0, &c__0, &c__0, &d__[1], &e[1], &vt[
+ vt_offset], ldu, &u[u_offset], ldu, &u[u_offset], ldu, &
+ work[1], info);
+ } else {
+ slasdq_("U", sqre, n, &m, n, &c__0, &d__[1], &e[1], &vt[vt_offset]
+, ldu, &u[u_offset], ldu, &u[u_offset], ldu, &work[1],
+ info);
+ }
+ return 0;
+ }
+
+/* Book-keeping and set up the computation tree. */
+
+ inode = 1;
+ ndiml = inode + *n;
+ ndimr = ndiml + *n;
+ idxq = ndimr + *n;
+ iwk = idxq + *n;
+
+ ncc = 0;
+ nru = 0;
+
+ smlszp = *smlsiz + 1;
+ vf = 1;
+ vl = vf + m;
+ nwork1 = vl + m;
+ nwork2 = nwork1 + smlszp * smlszp;
+
+ slasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr],
+ smlsiz);
+
+/* for the nodes on bottom level of the tree, solve */
+/* their subproblems by SLASDQ. */
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+
+/* IC : center row of each node */
+/* NL : number of rows of left subproblem */
+/* NR : number of rows of right subproblem */
+/* NLF: starting row of the left subproblem */
+/* NRF: starting row of the right subproblem */
+
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nlp1 = nl + 1;
+ nr = iwork[ndimr + i1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ idxqi = idxq + nlf - 2;
+ vfi = vf + nlf - 1;
+ vli = vl + nlf - 1;
+ sqrei = 1;
+ if (*icompq == 0) {
+ slaset_("A", &nlp1, &nlp1, &c_b11, &c_b12, &work[nwork1], &smlszp);
+ slasdq_("U", &sqrei, &nl, &nlp1, &nru, &ncc, &d__[nlf], &e[nlf], &
+ work[nwork1], &smlszp, &work[nwork2], &nl, &work[nwork2],
+ &nl, &work[nwork2], info);
+ itemp = nwork1 + nl * smlszp;
+ scopy_(&nlp1, &work[nwork1], &c__1, &work[vfi], &c__1);
+ scopy_(&nlp1, &work[itemp], &c__1, &work[vli], &c__1);
+ } else {
+ slaset_("A", &nl, &nl, &c_b11, &c_b12, &u[nlf + u_dim1], ldu);
+ slaset_("A", &nlp1, &nlp1, &c_b11, &c_b12, &vt[nlf + vt_dim1],
+ ldu);
+ slasdq_("U", &sqrei, &nl, &nlp1, &nl, &ncc, &d__[nlf], &e[nlf], &
+ vt[nlf + vt_dim1], ldu, &u[nlf + u_dim1], ldu, &u[nlf +
+ u_dim1], ldu, &work[nwork1], info);
+ scopy_(&nlp1, &vt[nlf + vt_dim1], &c__1, &work[vfi], &c__1);
+ scopy_(&nlp1, &vt[nlf + nlp1 * vt_dim1], &c__1, &work[vli], &c__1)
+ ;
+ }
+ if (*info != 0) {
+ return 0;
+ }
+ i__2 = nl;
+ for (j = 1; j <= i__2; ++j) {
+ iwork[idxqi + j] = j;
+/* L10: */
+ }
+ if (i__ == nd && *sqre == 0) {
+ sqrei = 0;
+ } else {
+ sqrei = 1;
+ }
+ idxqi += nlp1;
+ vfi += nlp1;
+ vli += nlp1;
+ nrp1 = nr + sqrei;
+ if (*icompq == 0) {
+ slaset_("A", &nrp1, &nrp1, &c_b11, &c_b12, &work[nwork1], &smlszp);
+ slasdq_("U", &sqrei, &nr, &nrp1, &nru, &ncc, &d__[nrf], &e[nrf], &
+ work[nwork1], &smlszp, &work[nwork2], &nr, &work[nwork2],
+ &nr, &work[nwork2], info);
+ itemp = nwork1 + (nrp1 - 1) * smlszp;
+ scopy_(&nrp1, &work[nwork1], &c__1, &work[vfi], &c__1);
+ scopy_(&nrp1, &work[itemp], &c__1, &work[vli], &c__1);
+ } else {
+ slaset_("A", &nr, &nr, &c_b11, &c_b12, &u[nrf + u_dim1], ldu);
+ slaset_("A", &nrp1, &nrp1, &c_b11, &c_b12, &vt[nrf + vt_dim1],
+ ldu);
+ slasdq_("U", &sqrei, &nr, &nrp1, &nr, &ncc, &d__[nrf], &e[nrf], &
+ vt[nrf + vt_dim1], ldu, &u[nrf + u_dim1], ldu, &u[nrf +
+ u_dim1], ldu, &work[nwork1], info);
+ scopy_(&nrp1, &vt[nrf + vt_dim1], &c__1, &work[vfi], &c__1);
+ scopy_(&nrp1, &vt[nrf + nrp1 * vt_dim1], &c__1, &work[vli], &c__1)
+ ;
+ }
+ if (*info != 0) {
+ return 0;
+ }
+ i__2 = nr;
+ for (j = 1; j <= i__2; ++j) {
+ iwork[idxqi + j] = j;
+/* L20: */
+ }
+/* L30: */
+ }
+
+/* Now conquer each subproblem bottom-up. */
+
+ j = pow_ii(&c__2, &nlvl);
+ for (lvl = nlvl; lvl >= 1; --lvl) {
+ lvl2 = (lvl << 1) - 1;
+
+/* Find the first node LF and last node LL on */
+/* the current level LVL. */
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__1 = lvl - 1;
+ lf = pow_ii(&c__2, &i__1);
+ ll = (lf << 1) - 1;
+ }
+ i__1 = ll;
+ for (i__ = lf; i__ <= i__1; ++i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ if (i__ == ll) {
+ sqrei = *sqre;
+ } else {
+ sqrei = 1;
+ }
+ vfi = vf + nlf - 1;
+ vli = vl + nlf - 1;
+ idxqi = idxq + nlf - 1;
+ alpha = d__[ic];
+ beta = e[ic];
+ if (*icompq == 0) {
+ slasd6_(icompq, &nl, &nr, &sqrei, &d__[nlf], &work[vfi], &
+ work[vli], &alpha, &beta, &iwork[idxqi], &perm[
+ perm_offset], &givptr[1], &givcol[givcol_offset],
+ ldgcol, &givnum[givnum_offset], ldu, &poles[
+ poles_offset], &difl[difl_offset], &difr[difr_offset],
+ &z__[z_offset], &k[1], &c__[1], &s[1], &work[nwork1],
+ &iwork[iwk], info);
+ } else {
+ --j;
+ slasd6_(icompq, &nl, &nr, &sqrei, &d__[nlf], &work[vfi], &
+ work[vli], &alpha, &beta, &iwork[idxqi], &perm[nlf +
+ lvl * perm_dim1], &givptr[j], &givcol[nlf + lvl2 *
+ givcol_dim1], ldgcol, &givnum[nlf + lvl2 *
+ givnum_dim1], ldu, &poles[nlf + lvl2 * poles_dim1], &
+ difl[nlf + lvl * difl_dim1], &difr[nlf + lvl2 *
+ difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[j],
+ &s[j], &work[nwork1], &iwork[iwk], info);
+ }
+ if (*info != 0) {
+ return 0;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+
+ return 0;
+
+/* End of SLASDA */
+
+} /* slasda_ */
diff --git a/contrib/libs/clapack/slasdq.c b/contrib/libs/clapack/slasdq.c
new file mode 100644
index 0000000000..fae6a16b6a
--- /dev/null
+++ b/contrib/libs/clapack/slasdq.c
@@ -0,0 +1,379 @@
+/* slasdq.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int slasdq_(char *uplo, integer *sqre, integer *n, integer *
+ ncvt, integer *nru, integer *ncc, real *d__, real *e, real *vt,
+ integer *ldvt, real *u, integer *ldu, real *c__, integer *ldc, real *
+ work, integer *info)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1,
+ i__2;
+
+ /* Local variables */
+ integer i__, j;
+ real r__, cs, sn;
+ integer np1, isub;
+ real smin;
+ integer sqre1;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int slasr_(char *, char *, char *, integer *,
+ integer *, real *, real *, real *, integer *);
+ integer iuplo;
+ extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
+ integer *), xerbla_(char *, integer *), slartg_(real *,
+ real *, real *, real *, real *);
+ logical rotate;
+ extern /* Subroutine */ int sbdsqr_(char *, integer *, integer *, integer
+ *, integer *, real *, real *, real *, integer *, real *, integer *
+, real *, integer *, real *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLASDQ computes the singular value decomposition (SVD) of a real */
+/* (upper or lower) bidiagonal matrix with diagonal D and offdiagonal */
+/* E, accumulating the transformations if desired. Letting B denote */
+/* the input bidiagonal matrix, the algorithm computes orthogonal */
+/* matrices Q and P such that B = Q * S * P' (P' denotes the transpose */
+/* of P). The singular values S are overwritten on D. */
+
+/* The input matrix U is changed to U * Q if desired. */
+/* The input matrix VT is changed to P' * VT if desired. */
+/* The input matrix C is changed to Q' * C if desired. */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices With */
+/* Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan, */
+/* LAPACK Working Note #3, for a detailed description of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* On entry, UPLO specifies whether the input bidiagonal matrix */
+/* is upper or lower bidiagonal, and wether it is square are */
+/* not. */
+/* UPLO = 'U' or 'u' B is upper bidiagonal. */
+/* UPLO = 'L' or 'l' B is lower bidiagonal. */
+
+/* SQRE (input) INTEGER */
+/* = 0: then the input matrix is N-by-N. */
+/* = 1: then the input matrix is N-by-(N+1) if UPLU = 'U' and */
+/* (N+1)-by-N if UPLU = 'L'. */
+
+/* The bidiagonal matrix has */
+/* N = NL + NR + 1 rows and */
+/* M = N + SQRE >= N columns. */
+
+/* N (input) INTEGER */
+/* On entry, N specifies the number of rows and columns */
+/* in the matrix. N must be at least 0. */
+
+/* NCVT (input) INTEGER */
+/* On entry, NCVT specifies the number of columns of */
+/* the matrix VT. NCVT must be at least 0. */
+
+/* NRU (input) INTEGER */
+/* On entry, NRU specifies the number of rows of */
+/* the matrix U. NRU must be at least 0. */
+
+/* NCC (input) INTEGER */
+/* On entry, NCC specifies the number of columns of */
+/* the matrix C. NCC must be at least 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, D contains the diagonal entries of the */
+/* bidiagonal matrix whose SVD is desired. On normal exit, */
+/* D contains the singular values in ascending order. */
+
+/* E (input/output) REAL array. */
+/* dimension is (N-1) if SQRE = 0 and N if SQRE = 1. */
+/* On entry, the entries of E contain the offdiagonal entries */
+/* of the bidiagonal matrix whose SVD is desired. On normal */
+/* exit, E will contain 0. If the algorithm does not converge, */
+/* D and E will contain the diagonal and superdiagonal entries */
+/* of a bidiagonal matrix orthogonally equivalent to the one */
+/* given as input. */
+
+/* VT (input/output) REAL array, dimension (LDVT, NCVT) */
+/* On entry, contains a matrix which on exit has been */
+/* premultiplied by P', dimension N-by-NCVT if SQRE = 0 */
+/* and (N+1)-by-NCVT if SQRE = 1 (not referenced if NCVT=0). */
+
+/* LDVT (input) INTEGER */
+/* On entry, LDVT specifies the leading dimension of VT as */
+/* declared in the calling (sub) program. LDVT must be at */
+/* least 1. If NCVT is nonzero LDVT must also be at least N. */
+
+/* U (input/output) REAL array, dimension (LDU, N) */
+/* On entry, contains a matrix which on exit has been */
+/* postmultiplied by Q, dimension NRU-by-N if SQRE = 0 */
+/* and NRU-by-(N+1) if SQRE = 1 (not referenced if NRU=0). */
+
+/* LDU (input) INTEGER */
+/* On entry, LDU specifies the leading dimension of U as */
+/* declared in the calling (sub) program. LDU must be at */
+/* least max( 1, NRU ) . */
+
+/* C (input/output) REAL array, dimension (LDC, NCC) */
+/* On entry, contains an N-by-NCC matrix which on exit */
+/* has been premultiplied by Q' dimension N-by-NCC if SQRE = 0 */
+/* and (N+1)-by-NCC if SQRE = 1 (not referenced if NCC=0). */
+
+/* LDC (input) INTEGER */
+/* On entry, LDC specifies the leading dimension of C as */
+/* declared in the calling (sub) program. LDC must be at */
+/* least 1. If NCC is nonzero, LDC must also be at least N. */
+
+/* WORK (workspace) REAL array, dimension (4*N) */
+/* Workspace. Only referenced if one of NCVT, NRU, or NCC is */
+/* nonzero, and if N is at least 2. */
+
+/* INFO (output) INTEGER */
+/* On exit, a value of 0 indicates a successful exit. */
+/* If INFO < 0, argument number -INFO is illegal. */
+/* If INFO > 0, the algorithm did not converge, and INFO */
+/* specifies how many superdiagonals did not converge. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ iuplo = 0;
+ if (lsame_(uplo, "U")) {
+ iuplo = 1;
+ }
+ if (lsame_(uplo, "L")) {
+ iuplo = 2;
+ }
+ if (iuplo == 0) {
+ *info = -1;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ncvt < 0) {
+ *info = -4;
+ } else if (*nru < 0) {
+ *info = -5;
+ } else if (*ncc < 0) {
+ *info = -6;
+ } else if (*ncvt == 0 && *ldvt < 1 || *ncvt > 0 && *ldvt < max(1,*n)) {
+ *info = -10;
+ } else if (*ldu < max(1,*nru)) {
+ *info = -12;
+ } else if (*ncc == 0 && *ldc < 1 || *ncc > 0 && *ldc < max(1,*n)) {
+ *info = -14;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLASDQ", &i__1);
+ return 0;
+ }
+ if (*n == 0) {
+ return 0;
+ }
+
+/* ROTATE is true if any singular vectors desired, false otherwise */
+
+ rotate = *ncvt > 0 || *nru > 0 || *ncc > 0;
+ np1 = *n + 1;
+ sqre1 = *sqre;
+
+/* If matrix non-square upper bidiagonal, rotate to be lower */
+/* bidiagonal. The rotations are on the right. */
+
+ if (iuplo == 1 && sqre1 == 1) {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ slartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (rotate) {
+ work[i__] = cs;
+ work[*n + i__] = sn;
+ }
+/* L10: */
+ }
+ slartg_(&d__[*n], &e[*n], &cs, &sn, &r__);
+ d__[*n] = r__;
+ e[*n] = 0.f;
+ if (rotate) {
+ work[*n] = cs;
+ work[*n + *n] = sn;
+ }
+ iuplo = 2;
+ sqre1 = 0;
+
+/* Update singular vectors if desired. */
+
+ if (*ncvt > 0) {
+ slasr_("L", "V", "F", &np1, ncvt, &work[1], &work[np1], &vt[
+ vt_offset], ldvt);
+ }
+ }
+
+/* If matrix lower bidiagonal, rotate to be upper bidiagonal */
+/* by applying Givens rotations on the left. */
+
+ if (iuplo == 2) {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ slartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (rotate) {
+ work[i__] = cs;
+ work[*n + i__] = sn;
+ }
+/* L20: */
+ }
+
+/* If matrix (N+1)-by-N lower bidiagonal, one additional */
+/* rotation is needed. */
+
+ if (sqre1 == 1) {
+ slartg_(&d__[*n], &e[*n], &cs, &sn, &r__);
+ d__[*n] = r__;
+ if (rotate) {
+ work[*n] = cs;
+ work[*n + *n] = sn;
+ }
+ }
+
+/* Update singular vectors if desired. */
+
+ if (*nru > 0) {
+ if (sqre1 == 0) {
+ slasr_("R", "V", "F", nru, n, &work[1], &work[np1], &u[
+ u_offset], ldu);
+ } else {
+ slasr_("R", "V", "F", nru, &np1, &work[1], &work[np1], &u[
+ u_offset], ldu);
+ }
+ }
+ if (*ncc > 0) {
+ if (sqre1 == 0) {
+ slasr_("L", "V", "F", n, ncc, &work[1], &work[np1], &c__[
+ c_offset], ldc);
+ } else {
+ slasr_("L", "V", "F", &np1, ncc, &work[1], &work[np1], &c__[
+ c_offset], ldc);
+ }
+ }
+ }
+
+/* Call SBDSQR to compute the SVD of the reduced real */
+/* N-by-N upper bidiagonal matrix. */
+
+ sbdsqr_("U", n, ncvt, nru, ncc, &d__[1], &e[1], &vt[vt_offset], ldvt, &u[
+ u_offset], ldu, &c__[c_offset], ldc, &work[1], info);
+
+/* Sort the singular values into ascending order (insertion sort on */
+/* singular values, but only one transposition per singular vector) */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Scan for smallest D(I). */
+
+ isub = i__;
+ smin = d__[i__];
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ if (d__[j] < smin) {
+ isub = j;
+ smin = d__[j];
+ }
+/* L30: */
+ }
+ if (isub != i__) {
+
+/* Swap singular values and vectors. */
+
+ d__[isub] = d__[i__];
+ d__[i__] = smin;
+ if (*ncvt > 0) {
+ sswap_(ncvt, &vt[isub + vt_dim1], ldvt, &vt[i__ + vt_dim1],
+ ldvt);
+ }
+ if (*nru > 0) {
+ sswap_(nru, &u[isub * u_dim1 + 1], &c__1, &u[i__ * u_dim1 + 1]
+, &c__1);
+ }
+ if (*ncc > 0) {
+ sswap_(ncc, &c__[isub + c_dim1], ldc, &c__[i__ + c_dim1], ldc)
+ ;
+ }
+ }
+/* L40: */
+ }
+
+ return 0;
+
+/* End of SLASDQ */
+
+} /* slasdq_ */
diff --git a/contrib/libs/clapack/slasdt.c b/contrib/libs/clapack/slasdt.c
new file mode 100644
index 0000000000..aac6d27e0c
--- /dev/null
+++ b/contrib/libs/clapack/slasdt.c
@@ -0,0 +1,136 @@
+/* slasdt.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slasdt_(integer *n, integer *lvl, integer *nd, integer *
+ inode, integer *ndiml, integer *ndimr, integer *msub)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Builtin functions */
+ double log(doublereal);
+
+ /* Local variables */
+ integer i__, il, ir, maxn;
+ real temp;
+ integer nlvl, llst, ncrnt;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLASDT creates a tree of subproblems for bidiagonal divide and */
+/* conquer. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* On entry, the number of diagonal elements of the */
+/* bidiagonal matrix. */
+
+/* LVL (output) INTEGER */
+/* On exit, the number of levels on the computation tree. */
+
+/* ND (output) INTEGER */
+/* On exit, the number of nodes on the tree. */
+
+/* INODE (output) INTEGER array, dimension ( N ) */
+/* On exit, centers of subproblems. */
+
+/* NDIML (output) INTEGER array, dimension ( N ) */
+/* On exit, row dimensions of left children. */
+
+/* NDIMR (output) INTEGER array, dimension ( N ) */
+/* On exit, row dimensions of right children. */
+
+/* MSUB (input) INTEGER. */
+/* On entry, the maximum row dimension each subproblem at the */
+/* bottom of the tree can be of. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Find the number of levels on the tree. */
+
+ /* Parameter adjustments */
+ --ndimr;
+ --ndiml;
+ --inode;
+
+ /* Function Body */
+ maxn = max(1,*n);
+ temp = log((real) maxn / (real) (*msub + 1)) / log(2.f);
+ *lvl = (integer) temp + 1;
+
+ i__ = *n / 2;
+ inode[1] = i__ + 1;
+ ndiml[1] = i__;
+ ndimr[1] = *n - i__ - 1;
+ il = 0;
+ ir = 1;
+ llst = 1;
+ i__1 = *lvl - 1;
+ for (nlvl = 1; nlvl <= i__1; ++nlvl) {
+
+/* Constructing the tree at (NLVL+1)-st level. The number of */
+/* nodes created on this level is LLST * 2. */
+
+ i__2 = llst - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ il += 2;
+ ir += 2;
+ ncrnt = llst + i__;
+ ndiml[il] = ndiml[ncrnt] / 2;
+ ndimr[il] = ndiml[ncrnt] - ndiml[il] - 1;
+ inode[il] = inode[ncrnt] - ndimr[il] - 1;
+ ndiml[ir] = ndimr[ncrnt] / 2;
+ ndimr[ir] = ndimr[ncrnt] - ndiml[ir] - 1;
+ inode[ir] = inode[ncrnt] + ndiml[ir] + 1;
+/* L10: */
+ }
+ llst <<= 1;
+/* L20: */
+ }
+ *nd = (llst << 1) - 1;
+
+ return 0;
+
+/* End of SLASDT */
+
+} /* slasdt_ */
diff --git a/contrib/libs/clapack/slaset.c b/contrib/libs/clapack/slaset.c
new file mode 100644
index 0000000000..1566db7056
--- /dev/null
+++ b/contrib/libs/clapack/slaset.c
@@ -0,0 +1,152 @@
+/* slaset.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slaset_(char *uplo, integer *m, integer *n, real *alpha,
+ real *beta, real *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLASET initializes an m-by-n matrix A to BETA on the diagonal and */
+/* ALPHA on the offdiagonals. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies the part of the matrix A to be set. */
+/* = 'U': Upper triangular part is set; the strictly lower */
+/* triangular part of A is not changed. */
+/* = 'L': Lower triangular part is set; the strictly upper */
+/* triangular part of A is not changed. */
+/* Otherwise: All of the matrix A is set. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* ALPHA (input) REAL */
+/* The constant to which the offdiagonal elements are to be set. */
+
+/* BETA (input) REAL */
+/* The constant to which the diagonal elements are to be set. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On exit, the leading m-by-n submatrix of A is set as follows: */
+
+/* if UPLO = 'U', A(i,j) = ALPHA, 1<=i<=j-1, 1<=j<=n, */
+/* if UPLO = 'L', A(i,j) = ALPHA, j+1<=i<=m, 1<=j<=n, */
+/* otherwise, A(i,j) = ALPHA, 1<=i<=m, 1<=j<=n, i.ne.j, */
+
+/* and, for all UPLO, A(i,i) = BETA, 1<=i<=min(m,n). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ if (lsame_(uplo, "U")) {
+
+/* Set the strictly upper triangular or trapezoidal part of the */
+/* array to ALPHA. */
+
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = j - 1;
+ i__2 = min(i__3,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = *alpha;
+/* L10: */
+ }
+/* L20: */
+ }
+
+ } else if (lsame_(uplo, "L")) {
+
+/* Set the strictly lower triangular or trapezoidal part of the */
+/* array to ALPHA. */
+
+ i__1 = min(*m,*n);
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = *alpha;
+/* L30: */
+ }
+/* L40: */
+ }
+
+ } else {
+
+/* Set the leading m-by-n submatrix to ALPHA. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = *alpha;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+/* Set the first min(M,N) diagonal elements to BETA. */
+
+ i__1 = min(*m,*n);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ a[i__ + i__ * a_dim1] = *beta;
+/* L70: */
+ }
+
+ return 0;
+
+/* End of SLASET */
+
+} /* slaset_ */
diff --git a/contrib/libs/clapack/slasq1.c b/contrib/libs/clapack/slasq1.c
new file mode 100644
index 0000000000..b812d23956
--- /dev/null
+++ b/contrib/libs/clapack/slasq1.c
@@ -0,0 +1,216 @@
+/* slasq1.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__2 = 2;
+static integer c__0 = 0;
+
+/* Subroutine */ int slasq1_(integer *n, real *d__, real *e, real *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ real r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ real eps;
+ extern /* Subroutine */ int slas2_(real *, real *, real *, real *, real *)
+ ;
+ real scale;
+ integer iinfo;
+ real sigmn, sigmx;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), slasq2_(integer *, real *, integer *);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *), slascl_(
+ char *, integer *, integer *, real *, real *, integer *, integer *
+, real *, integer *, integer *), slasrt_(char *, integer *
+, real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Osni Marques of the Lawrence Berkeley National -- */
+/* -- Laboratory and Beresford Parlett of the Univ. of California at -- */
+/* -- Berkeley -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLASQ1 computes the singular values of a real N-by-N bidiagonal */
+/* matrix with diagonal D and off-diagonal E. The singular values */
+/* are computed to high relative accuracy, in the absence of */
+/* denormalization, underflow and overflow. The algorithm was first */
+/* presented in */
+
+/* "Accurate singular values and differential qd algorithms" by K. V. */
+/* Fernando and B. N. Parlett, Numer. Math., Vol-67, No. 2, pp. 191-230, */
+/* 1994, */
+
+/* and the present implementation is described in "An implementation of */
+/* the dqds Algorithm (Positive Case)", LAPACK Working Note. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of rows and columns in the matrix. N >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, D contains the diagonal elements of the */
+/* bidiagonal matrix whose SVD is desired. On normal exit, */
+/* D contains the singular values in decreasing order. */
+
+/* E (input/output) REAL array, dimension (N) */
+/* On entry, elements E(1:N-1) contain the off-diagonal elements */
+/* of the bidiagonal matrix whose SVD is desired. */
+/* On exit, E is overwritten. */
+
+/* WORK (workspace) REAL array, dimension (4*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: the algorithm failed */
+/* = 1, a split was marked by a positive value in E */
+/* = 2, current block of Z not diagonalized after 30*N */
+/* iterations (in inner while loop) */
+/* = 3, termination criterion of outer while loop not met */
+/* (program created more than N unreduced blocks) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --work;
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -2;
+ i__1 = -(*info);
+ xerbla_("SLASQ1", &i__1);
+ return 0;
+ } else if (*n == 0) {
+ return 0;
+ } else if (*n == 1) {
+ d__[1] = dabs(d__[1]);
+ return 0;
+ } else if (*n == 2) {
+ slas2_(&d__[1], &e[1], &d__[2], &sigmn, &sigmx);
+ d__[1] = sigmx;
+ d__[2] = sigmn;
+ return 0;
+ }
+
+/* Estimate the largest singular value. */
+
+ sigmx = 0.f;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = (r__1 = d__[i__], dabs(r__1));
+/* Computing MAX */
+ r__2 = sigmx, r__3 = (r__1 = e[i__], dabs(r__1));
+ sigmx = dmax(r__2,r__3);
+/* L10: */
+ }
+ d__[*n] = (r__1 = d__[*n], dabs(r__1));
+
+/* Early return if SIGMX is zero (matrix is already diagonal). */
+
+ if (sigmx == 0.f) {
+ slasrt_("D", n, &d__[1], &iinfo);
+ return 0;
+ }
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = sigmx, r__2 = d__[i__];
+ sigmx = dmax(r__1,r__2);
+/* L20: */
+ }
+
+/* Copy D and E into WORK (in the Z format) and scale (squaring the */
+/* input data makes scaling by a power of the radix pointless). */
+
+ eps = slamch_("Precision");
+ safmin = slamch_("Safe minimum");
+ scale = sqrt(eps / safmin);
+ scopy_(n, &d__[1], &c__1, &work[1], &c__2);
+ i__1 = *n - 1;
+ scopy_(&i__1, &e[1], &c__1, &work[2], &c__2);
+ i__1 = (*n << 1) - 1;
+ i__2 = (*n << 1) - 1;
+ slascl_("G", &c__0, &c__0, &sigmx, &scale, &i__1, &c__1, &work[1], &i__2,
+ &iinfo);
+
+/* Compute the q's and e's. */
+
+ i__1 = (*n << 1) - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing 2nd power */
+ r__1 = work[i__];
+ work[i__] = r__1 * r__1;
+/* L30: */
+ }
+ work[*n * 2] = 0.f;
+
+ slasq2_(n, &work[1], info);
+
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = sqrt(work[i__]);
+/* L40: */
+ }
+ slascl_("G", &c__0, &c__0, &scale, &sigmx, n, &c__1, &d__[1], n, &
+ iinfo);
+ }
+
+ return 0;
+
+/* End of SLASQ1 */
+
+} /* slasq1_ */
diff --git a/contrib/libs/clapack/slasq2.c b/contrib/libs/clapack/slasq2.c
new file mode 100644
index 0000000000..00d11ccbfc
--- /dev/null
+++ b/contrib/libs/clapack/slasq2.c
@@ -0,0 +1,599 @@
+/* slasq2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__2 = 2;
+
+/* Subroutine */ int slasq2_(integer *n, real *z__, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real d__, e, g;
+ integer k;
+ real s, t;
+ integer i0, i4, n0;
+ real dn;
+ integer pp;
+ real dn1, dn2, dee, eps, tau, tol;
+ integer ipn4;
+ real tol2;
+ logical ieee;
+ integer nbig;
+ real dmin__, emin, emax;
+ integer kmin, ndiv, iter;
+ real qmin, temp, qmax, zmax;
+ integer splt;
+ real dmin1, dmin2;
+ integer nfail;
+ real desig, trace, sigma;
+ integer iinfo, ttype;
+ extern /* Subroutine */ int slasq3_(integer *, integer *, real *, integer
+ *, real *, real *, real *, real *, integer *, integer *, integer *
+, logical *, integer *, real *, real *, real *, real *, real *,
+ real *, real *);
+ real deemin;
+ extern doublereal slamch_(char *);
+ integer iwhila, iwhilb;
+ real oldemn, safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *), slasrt_(
+ char *, integer *, real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Osni Marques of the Lawrence Berkeley National -- */
+/* -- Laboratory and Beresford Parlett of the Univ. of California at -- */
+/* -- Berkeley -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLASQ2 computes all the eigenvalues of the symmetric positive */
+/* definite tridiagonal matrix associated with the qd array Z to high */
+/* relative accuracy are computed to high relative accuracy, in the */
+/* absence of denormalization, underflow and overflow. */
+
+/* To see the relation of Z to the tridiagonal matrix, let L be a */
+/* unit lower bidiagonal matrix with subdiagonals Z(2,4,6,,..) and */
+/* let U be an upper bidiagonal matrix with 1's above and diagonal */
+/* Z(1,3,5,,..). The tridiagonal is L*U or, if you prefer, the */
+/* symmetric tridiagonal to which it is similar. */
+
+/* Note : SLASQ2 defines a logical variable, IEEE, which is true */
+/* on machines which follow ieee-754 floating-point standard in their */
+/* handling of infinities and NaNs, and false otherwise. This variable */
+/* is passed to SLASQ3. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of rows and columns in the matrix. N >= 0. */
+
+/* Z (input/output) REAL array, dimension ( 4*N ) */
+/* On entry Z holds the qd array. On exit, entries 1 to N hold */
+/* the eigenvalues in decreasing order, Z( 2*N+1 ) holds the */
+/* trace, and Z( 2*N+2 ) holds the sum of the eigenvalues. If */
+/* N > 2, then Z( 2*N+3 ) holds the iteration count, Z( 2*N+4 ) */
+/* holds NDIVS/NIN^2, and Z( 2*N+5 ) holds the percentage of */
+/* shifts that failed. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if the i-th argument is a scalar and had an illegal */
+/* value, then INFO = -i, if the i-th argument is an */
+/* array and the j-entry had an illegal value, then */
+/* INFO = -(i*100+j) */
+/* > 0: the algorithm failed */
+/* = 1, a split was marked by a positive value in E */
+/* = 2, current block of Z not diagonalized after 30*N */
+/* iterations (in inner while loop) */
+/* = 3, termination criterion of outer while loop not met */
+/* (program created more than N unreduced blocks) */
+
+/* Further Details */
+/* =============== */
+/* Local Variables: I0:N0 defines a current unreduced segment of Z. */
+/* The shifts are accumulated in SIGMA. Iteration count is in ITER. */
+/* Ping-pong is controlled by PP (alternates between 0 and 1). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments. */
+/* (in case SLASQ2 is not called by SLASQ1) */
+
+ /* Parameter adjustments */
+ --z__;
+
+ /* Function Body */
+ *info = 0;
+ eps = slamch_("Precision");
+ safmin = slamch_("Safe minimum");
+ tol = eps * 100.f;
+/* Computing 2nd power */
+ r__1 = tol;
+ tol2 = r__1 * r__1;
+
+ if (*n < 0) {
+ *info = -1;
+ xerbla_("SLASQ2", &c__1);
+ return 0;
+ } else if (*n == 0) {
+ return 0;
+ } else if (*n == 1) {
+
+/* 1-by-1 case. */
+
+ if (z__[1] < 0.f) {
+ *info = -201;
+ xerbla_("SLASQ2", &c__2);
+ }
+ return 0;
+ } else if (*n == 2) {
+
+/* 2-by-2 case. */
+
+ if (z__[2] < 0.f || z__[3] < 0.f) {
+ *info = -2;
+ xerbla_("SLASQ2", &c__2);
+ return 0;
+ } else if (z__[3] > z__[1]) {
+ d__ = z__[3];
+ z__[3] = z__[1];
+ z__[1] = d__;
+ }
+ z__[5] = z__[1] + z__[2] + z__[3];
+ if (z__[2] > z__[3] * tol2) {
+ t = (z__[1] - z__[3] + z__[2]) * .5f;
+ s = z__[3] * (z__[2] / t);
+ if (s <= t) {
+ s = z__[3] * (z__[2] / (t * (sqrt(s / t + 1.f) + 1.f)));
+ } else {
+ s = z__[3] * (z__[2] / (t + sqrt(t) * sqrt(t + s)));
+ }
+ t = z__[1] + (s + z__[2]);
+ z__[3] *= z__[1] / t;
+ z__[1] = t;
+ }
+ z__[2] = z__[3];
+ z__[6] = z__[2] + z__[1];
+ return 0;
+ }
+
+/* Check for negative data and compute sums of q's and e's. */
+
+ z__[*n * 2] = 0.f;
+ emin = z__[2];
+ qmax = 0.f;
+ zmax = 0.f;
+ d__ = 0.f;
+ e = 0.f;
+
+ i__1 = *n - 1 << 1;
+ for (k = 1; k <= i__1; k += 2) {
+ if (z__[k] < 0.f) {
+ *info = -(k + 200);
+ xerbla_("SLASQ2", &c__2);
+ return 0;
+ } else if (z__[k + 1] < 0.f) {
+ *info = -(k + 201);
+ xerbla_("SLASQ2", &c__2);
+ return 0;
+ }
+ d__ += z__[k];
+ e += z__[k + 1];
+/* Computing MAX */
+ r__1 = qmax, r__2 = z__[k];
+ qmax = dmax(r__1,r__2);
+/* Computing MIN */
+ r__1 = emin, r__2 = z__[k + 1];
+ emin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = max(qmax,zmax), r__2 = z__[k + 1];
+ zmax = dmax(r__1,r__2);
+/* L10: */
+ }
+ if (z__[(*n << 1) - 1] < 0.f) {
+ *info = -((*n << 1) + 199);
+ xerbla_("SLASQ2", &c__2);
+ return 0;
+ }
+ d__ += z__[(*n << 1) - 1];
+/* Computing MAX */
+ r__1 = qmax, r__2 = z__[(*n << 1) - 1];
+ qmax = dmax(r__1,r__2);
+ zmax = dmax(qmax,zmax);
+
+/* Check for diagonality. */
+
+ if (e == 0.f) {
+ i__1 = *n;
+ for (k = 2; k <= i__1; ++k) {
+ z__[k] = z__[(k << 1) - 1];
+/* L20: */
+ }
+ slasrt_("D", n, &z__[1], &iinfo);
+ z__[(*n << 1) - 1] = d__;
+ return 0;
+ }
+
+ trace = d__ + e;
+
+/* Check for zero data. */
+
+ if (trace == 0.f) {
+ z__[(*n << 1) - 1] = 0.f;
+ return 0;
+ }
+
+/* Check whether the machine is IEEE conformable. */
+
+/* IEEE = ILAENV( 10, 'SLASQ2', 'N', 1, 2, 3, 4 ).EQ.1 .AND. */
+/* $ ILAENV( 11, 'SLASQ2', 'N', 1, 2, 3, 4 ).EQ.1 */
+
+/* [11/15/2008] The case IEEE=.TRUE. has a problem in single precision with */
+/* some the test matrices of type 16. The double precision code is fine. */
+
+ ieee = FALSE_;
+
+/* Rearrange data for locality: Z=(q1,qq1,e1,ee1,q2,qq2,e2,ee2,...). */
+
+ for (k = *n << 1; k >= 2; k += -2) {
+ z__[k * 2] = 0.f;
+ z__[(k << 1) - 1] = z__[k];
+ z__[(k << 1) - 2] = 0.f;
+ z__[(k << 1) - 3] = z__[k - 1];
+/* L30: */
+ }
+
+ i0 = 1;
+ n0 = *n;
+
+/* Reverse the qd-array, if warranted. */
+
+ if (z__[(i0 << 2) - 3] * 1.5f < z__[(n0 << 2) - 3]) {
+ ipn4 = i0 + n0 << 2;
+ i__1 = i0 + n0 - 1 << 1;
+ for (i4 = i0 << 2; i4 <= i__1; i4 += 4) {
+ temp = z__[i4 - 3];
+ z__[i4 - 3] = z__[ipn4 - i4 - 3];
+ z__[ipn4 - i4 - 3] = temp;
+ temp = z__[i4 - 1];
+ z__[i4 - 1] = z__[ipn4 - i4 - 5];
+ z__[ipn4 - i4 - 5] = temp;
+/* L40: */
+ }
+ }
+
+/* Initial split checking via dqd and Li's test. */
+
+ pp = 0;
+
+ for (k = 1; k <= 2; ++k) {
+
+ d__ = z__[(n0 << 2) + pp - 3];
+ i__1 = (i0 << 2) + pp;
+ for (i4 = (n0 - 1 << 2) + pp; i4 >= i__1; i4 += -4) {
+ if (z__[i4 - 1] <= tol2 * d__) {
+ z__[i4 - 1] = -0.f;
+ d__ = z__[i4 - 3];
+ } else {
+ d__ = z__[i4 - 3] * (d__ / (d__ + z__[i4 - 1]));
+ }
+/* L50: */
+ }
+
+/* dqd maps Z to ZZ plus Li's test. */
+
+ emin = z__[(i0 << 2) + pp + 1];
+ d__ = z__[(i0 << 2) + pp - 3];
+ i__1 = (n0 - 1 << 2) + pp;
+ for (i4 = (i0 << 2) + pp; i4 <= i__1; i4 += 4) {
+ z__[i4 - (pp << 1) - 2] = d__ + z__[i4 - 1];
+ if (z__[i4 - 1] <= tol2 * d__) {
+ z__[i4 - 1] = -0.f;
+ z__[i4 - (pp << 1) - 2] = d__;
+ z__[i4 - (pp << 1)] = 0.f;
+ d__ = z__[i4 + 1];
+ } else if (safmin * z__[i4 + 1] < z__[i4 - (pp << 1) - 2] &&
+ safmin * z__[i4 - (pp << 1) - 2] < z__[i4 + 1]) {
+ temp = z__[i4 + 1] / z__[i4 - (pp << 1) - 2];
+ z__[i4 - (pp << 1)] = z__[i4 - 1] * temp;
+ d__ *= temp;
+ } else {
+ z__[i4 - (pp << 1)] = z__[i4 + 1] * (z__[i4 - 1] / z__[i4 - (
+ pp << 1) - 2]);
+ d__ = z__[i4 + 1] * (d__ / z__[i4 - (pp << 1) - 2]);
+ }
+/* Computing MIN */
+ r__1 = emin, r__2 = z__[i4 - (pp << 1)];
+ emin = dmin(r__1,r__2);
+/* L60: */
+ }
+ z__[(n0 << 2) - pp - 2] = d__;
+
+/* Now find qmax. */
+
+ qmax = z__[(i0 << 2) - pp - 2];
+ i__1 = (n0 << 2) - pp - 2;
+ for (i4 = (i0 << 2) - pp + 2; i4 <= i__1; i4 += 4) {
+/* Computing MAX */
+ r__1 = qmax, r__2 = z__[i4];
+ qmax = dmax(r__1,r__2);
+/* L70: */
+ }
+
+/* Prepare for the next iteration on K. */
+
+ pp = 1 - pp;
+/* L80: */
+ }
+
+/* Initialise variables to pass to SLASQ3. */
+
+ ttype = 0;
+ dmin1 = 0.f;
+ dmin2 = 0.f;
+ dn = 0.f;
+ dn1 = 0.f;
+ dn2 = 0.f;
+ g = 0.f;
+ tau = 0.f;
+
+ iter = 2;
+ nfail = 0;
+ ndiv = n0 - i0 << 1;
+
+ i__1 = *n + 1;
+ for (iwhila = 1; iwhila <= i__1; ++iwhila) {
+ if (n0 < 1) {
+ goto L170;
+ }
+
+/* While array unfinished do */
+
+/* E(N0) holds the value of SIGMA when submatrix in I0:N0 */
+/* splits from the rest of the array, but is negated. */
+
+ desig = 0.f;
+ if (n0 == *n) {
+ sigma = 0.f;
+ } else {
+ sigma = -z__[(n0 << 2) - 1];
+ }
+ if (sigma < 0.f) {
+ *info = 1;
+ return 0;
+ }
+
+/* Find last unreduced submatrix's top index I0, find QMAX and */
+/* EMIN. Find Gershgorin-type bound if Q's much greater than E's. */
+
+ emax = 0.f;
+ if (n0 > i0) {
+ emin = (r__1 = z__[(n0 << 2) - 5], dabs(r__1));
+ } else {
+ emin = 0.f;
+ }
+ qmin = z__[(n0 << 2) - 3];
+ qmax = qmin;
+ for (i4 = n0 << 2; i4 >= 8; i4 += -4) {
+ if (z__[i4 - 5] <= 0.f) {
+ goto L100;
+ }
+ if (qmin >= emax * 4.f) {
+/* Computing MIN */
+ r__1 = qmin, r__2 = z__[i4 - 3];
+ qmin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = emax, r__2 = z__[i4 - 5];
+ emax = dmax(r__1,r__2);
+ }
+/* Computing MAX */
+ r__1 = qmax, r__2 = z__[i4 - 7] + z__[i4 - 5];
+ qmax = dmax(r__1,r__2);
+/* Computing MIN */
+ r__1 = emin, r__2 = z__[i4 - 5];
+ emin = dmin(r__1,r__2);
+/* L90: */
+ }
+ i4 = 4;
+
+L100:
+ i0 = i4 / 4;
+ pp = 0;
+
+ if (n0 - i0 > 1) {
+ dee = z__[(i0 << 2) - 3];
+ deemin = dee;
+ kmin = i0;
+ i__2 = (n0 << 2) - 3;
+ for (i4 = (i0 << 2) + 1; i4 <= i__2; i4 += 4) {
+ dee = z__[i4] * (dee / (dee + z__[i4 - 2]));
+ if (dee <= deemin) {
+ deemin = dee;
+ kmin = (i4 + 3) / 4;
+ }
+/* L110: */
+ }
+ if (kmin - i0 << 1 < n0 - kmin && deemin <= z__[(n0 << 2) - 3] *
+ .5f) {
+ ipn4 = i0 + n0 << 2;
+ pp = 2;
+ i__2 = i0 + n0 - 1 << 1;
+ for (i4 = i0 << 2; i4 <= i__2; i4 += 4) {
+ temp = z__[i4 - 3];
+ z__[i4 - 3] = z__[ipn4 - i4 - 3];
+ z__[ipn4 - i4 - 3] = temp;
+ temp = z__[i4 - 2];
+ z__[i4 - 2] = z__[ipn4 - i4 - 2];
+ z__[ipn4 - i4 - 2] = temp;
+ temp = z__[i4 - 1];
+ z__[i4 - 1] = z__[ipn4 - i4 - 5];
+ z__[ipn4 - i4 - 5] = temp;
+ temp = z__[i4];
+ z__[i4] = z__[ipn4 - i4 - 4];
+ z__[ipn4 - i4 - 4] = temp;
+/* L120: */
+ }
+ }
+ }
+
+/* Put -(initial shift) into DMIN. */
+
+/* Computing MAX */
+ r__1 = 0.f, r__2 = qmin - sqrt(qmin) * 2.f * sqrt(emax);
+ dmin__ = -dmax(r__1,r__2);
+
+/* Now I0:N0 is unreduced. */
+/* PP = 0 for ping, PP = 1 for pong. */
+/* PP = 2 indicates that flipping was applied to the Z array and */
+/* and that the tests for deflation upon entry in SLASQ3 */
+/* should not be performed. */
+
+ nbig = (n0 - i0 + 1) * 30;
+ i__2 = nbig;
+ for (iwhilb = 1; iwhilb <= i__2; ++iwhilb) {
+ if (i0 > n0) {
+ goto L150;
+ }
+
+/* While submatrix unfinished take a good dqds step. */
+
+ slasq3_(&i0, &n0, &z__[1], &pp, &dmin__, &sigma, &desig, &qmax, &
+ nfail, &iter, &ndiv, &ieee, &ttype, &dmin1, &dmin2, &dn, &
+ dn1, &dn2, &g, &tau);
+
+ pp = 1 - pp;
+
+/* When EMIN is very small check for splits. */
+
+ if (pp == 0 && n0 - i0 >= 3) {
+ if (z__[n0 * 4] <= tol2 * qmax || z__[(n0 << 2) - 1] <= tol2 *
+ sigma) {
+ splt = i0 - 1;
+ qmax = z__[(i0 << 2) - 3];
+ emin = z__[(i0 << 2) - 1];
+ oldemn = z__[i0 * 4];
+ i__3 = n0 - 3 << 2;
+ for (i4 = i0 << 2; i4 <= i__3; i4 += 4) {
+ if (z__[i4] <= tol2 * z__[i4 - 3] || z__[i4 - 1] <=
+ tol2 * sigma) {
+ z__[i4 - 1] = -sigma;
+ splt = i4 / 4;
+ qmax = 0.f;
+ emin = z__[i4 + 3];
+ oldemn = z__[i4 + 4];
+ } else {
+/* Computing MAX */
+ r__1 = qmax, r__2 = z__[i4 + 1];
+ qmax = dmax(r__1,r__2);
+/* Computing MIN */
+ r__1 = emin, r__2 = z__[i4 - 1];
+ emin = dmin(r__1,r__2);
+/* Computing MIN */
+ r__1 = oldemn, r__2 = z__[i4];
+ oldemn = dmin(r__1,r__2);
+ }
+/* L130: */
+ }
+ z__[(n0 << 2) - 1] = emin;
+ z__[n0 * 4] = oldemn;
+ i0 = splt + 1;
+ }
+ }
+
+/* L140: */
+ }
+
+ *info = 2;
+ return 0;
+
+/* end IWHILB */
+
+L150:
+
+/* L160: */
+ ;
+ }
+
+ *info = 3;
+ return 0;
+
+/* end IWHILA */
+
+L170:
+
+/* Move q's to the front. */
+
+ i__1 = *n;
+ for (k = 2; k <= i__1; ++k) {
+ z__[k] = z__[(k << 2) - 3];
+/* L180: */
+ }
+
+/* Sort and compute sum of eigenvalues. */
+
+ slasrt_("D", n, &z__[1], &iinfo);
+
+ e = 0.f;
+ for (k = *n; k >= 1; --k) {
+ e += z__[k];
+/* L190: */
+ }
+
+/* Store trace, sum(eigenvalues) and information on performance. */
+
+ z__[(*n << 1) + 1] = trace;
+ z__[(*n << 1) + 2] = e;
+ z__[(*n << 1) + 3] = (real) iter;
+/* Computing 2nd power */
+ i__1 = *n;
+ z__[(*n << 1) + 4] = (real) ndiv / (real) (i__1 * i__1);
+ z__[(*n << 1) + 5] = nfail * 100.f / (real) iter;
+ return 0;
+
+/* End of SLASQ2 */
+
+} /* slasq2_ */
diff --git a/contrib/libs/clapack/slasq3.c b/contrib/libs/clapack/slasq3.c
new file mode 100644
index 0000000000..409ab3f49d
--- /dev/null
+++ b/contrib/libs/clapack/slasq3.c
@@ -0,0 +1,346 @@
+/* slasq3.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slasq3_(integer *i0, integer *n0, real *z__, integer *pp,
+ real *dmin__, real *sigma, real *desig, real *qmax, integer *nfail,
+ integer *iter, integer *ndiv, logical *ieee, integer *ttype, real *
+ dmin1, real *dmin2, real *dn, real *dn1, real *dn2, real *g, real *
+ tau)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real s, t;
+ integer j4, nn;
+ real eps, tol;
+ integer n0in, ipn4;
+ real tol2, temp;
+ extern /* Subroutine */ int slasq4_(integer *, integer *, real *, integer
+ *, integer *, real *, real *, real *, real *, real *, real *,
+ real *, integer *, real *), slasq5_(integer *, integer *, real *,
+ integer *, real *, real *, real *, real *, real *, real *, real *,
+ logical *), slasq6_(integer *, integer *, real *, integer *,
+ real *, real *, real *, real *, real *, real *);
+ extern doublereal slamch_(char *);
+ extern logical sisnan_(real *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Osni Marques of the Lawrence Berkeley National -- */
+/* -- Laboratory and Beresford Parlett of the Univ. of California at -- */
+/* -- Berkeley -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLASQ3 checks for deflation, computes a shift (TAU) and calls dqds. */
+/* In case of failure it changes shifts, and tries again until output */
+/* is positive. */
+
+/* Arguments */
+/* ========= */
+
+/* I0 (input) INTEGER */
+/* First index. */
+
+/* N0 (input) INTEGER */
+/* Last index. */
+
+/* Z (input) REAL array, dimension ( 4*N ) */
+/* Z holds the qd array. */
+
+/* PP (input/output) INTEGER */
+/* PP=0 for ping, PP=1 for pong. */
+/* PP=2 indicates that flipping was applied to the Z array */
+/* and that the initial tests for deflation should not be */
+/* performed. */
+
+/* DMIN (output) REAL */
+/* Minimum value of d. */
+
+/* SIGMA (output) REAL */
+/* Sum of shifts used in current segment. */
+
+/* DESIG (input/output) REAL */
+/* Lower order part of SIGMA */
+
+/* QMAX (input) REAL */
+/* Maximum value of q. */
+
+/* NFAIL (output) INTEGER */
+/* Number of times shift was too big. */
+
+/* ITER (output) INTEGER */
+/* Number of iterations. */
+
+/* NDIV (output) INTEGER */
+/* Number of divisions. */
+
+/* IEEE (input) LOGICAL */
+/* Flag for IEEE or non IEEE arithmetic (passed to SLASQ5). */
+
+/* TTYPE (input/output) INTEGER */
+/* Shift type. */
+
+/* DMIN1, DMIN2, DN, DN1, DN2, G, TAU (input/output) REAL */
+/* These are passed as arguments in order to save their values */
+/* between calls to SLASQ3. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Function .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --z__;
+
+ /* Function Body */
+ n0in = *n0;
+ eps = slamch_("Precision");
+ tol = eps * 100.f;
+/* Computing 2nd power */
+ r__1 = tol;
+ tol2 = r__1 * r__1;
+
+/* Check for deflation. */
+
+L10:
+
+ if (*n0 < *i0) {
+ return 0;
+ }
+ if (*n0 == *i0) {
+ goto L20;
+ }
+ nn = (*n0 << 2) + *pp;
+ if (*n0 == *i0 + 1) {
+ goto L40;
+ }
+
+/* Check whether E(N0-1) is negligible, 1 eigenvalue. */
+
+ if (z__[nn - 5] > tol2 * (*sigma + z__[nn - 3]) && z__[nn - (*pp << 1) -
+ 4] > tol2 * z__[nn - 7]) {
+ goto L30;
+ }
+
+L20:
+
+ z__[(*n0 << 2) - 3] = z__[(*n0 << 2) + *pp - 3] + *sigma;
+ --(*n0);
+ goto L10;
+
+/* Check whether E(N0-2) is negligible, 2 eigenvalues. */
+
+L30:
+
+ if (z__[nn - 9] > tol2 * *sigma && z__[nn - (*pp << 1) - 8] > tol2 * z__[
+ nn - 11]) {
+ goto L50;
+ }
+
+L40:
+
+ if (z__[nn - 3] > z__[nn - 7]) {
+ s = z__[nn - 3];
+ z__[nn - 3] = z__[nn - 7];
+ z__[nn - 7] = s;
+ }
+ if (z__[nn - 5] > z__[nn - 3] * tol2) {
+ t = (z__[nn - 7] - z__[nn - 3] + z__[nn - 5]) * .5f;
+ s = z__[nn - 3] * (z__[nn - 5] / t);
+ if (s <= t) {
+ s = z__[nn - 3] * (z__[nn - 5] / (t * (sqrt(s / t + 1.f) + 1.f)));
+ } else {
+ s = z__[nn - 3] * (z__[nn - 5] / (t + sqrt(t) * sqrt(t + s)));
+ }
+ t = z__[nn - 7] + (s + z__[nn - 5]);
+ z__[nn - 3] *= z__[nn - 7] / t;
+ z__[nn - 7] = t;
+ }
+ z__[(*n0 << 2) - 7] = z__[nn - 7] + *sigma;
+ z__[(*n0 << 2) - 3] = z__[nn - 3] + *sigma;
+ *n0 += -2;
+ goto L10;
+
+L50:
+ if (*pp == 2) {
+ *pp = 0;
+ }
+
+/* Reverse the qd-array, if warranted. */
+
+ if (*dmin__ <= 0.f || *n0 < n0in) {
+ if (z__[(*i0 << 2) + *pp - 3] * 1.5f < z__[(*n0 << 2) + *pp - 3]) {
+ ipn4 = *i0 + *n0 << 2;
+ i__1 = *i0 + *n0 - 1 << 1;
+ for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) {
+ temp = z__[j4 - 3];
+ z__[j4 - 3] = z__[ipn4 - j4 - 3];
+ z__[ipn4 - j4 - 3] = temp;
+ temp = z__[j4 - 2];
+ z__[j4 - 2] = z__[ipn4 - j4 - 2];
+ z__[ipn4 - j4 - 2] = temp;
+ temp = z__[j4 - 1];
+ z__[j4 - 1] = z__[ipn4 - j4 - 5];
+ z__[ipn4 - j4 - 5] = temp;
+ temp = z__[j4];
+ z__[j4] = z__[ipn4 - j4 - 4];
+ z__[ipn4 - j4 - 4] = temp;
+/* L60: */
+ }
+ if (*n0 - *i0 <= 4) {
+ z__[(*n0 << 2) + *pp - 1] = z__[(*i0 << 2) + *pp - 1];
+ z__[(*n0 << 2) - *pp] = z__[(*i0 << 2) - *pp];
+ }
+/* Computing MIN */
+ r__1 = *dmin2, r__2 = z__[(*n0 << 2) + *pp - 1];
+ *dmin2 = dmin(r__1,r__2);
+/* Computing MIN */
+ r__1 = z__[(*n0 << 2) + *pp - 1], r__2 = z__[(*i0 << 2) + *pp - 1]
+ , r__1 = min(r__1,r__2), r__2 = z__[(*i0 << 2) + *pp + 3];
+ z__[(*n0 << 2) + *pp - 1] = dmin(r__1,r__2);
+/* Computing MIN */
+ r__1 = z__[(*n0 << 2) - *pp], r__2 = z__[(*i0 << 2) - *pp], r__1 =
+ min(r__1,r__2), r__2 = z__[(*i0 << 2) - *pp + 4];
+ z__[(*n0 << 2) - *pp] = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = *qmax, r__2 = z__[(*i0 << 2) + *pp - 3], r__1 = max(r__1,
+ r__2), r__2 = z__[(*i0 << 2) + *pp + 1];
+ *qmax = dmax(r__1,r__2);
+ *dmin__ = -0.f;
+ }
+ }
+
+/* Choose a shift. */
+
+ slasq4_(i0, n0, &z__[1], pp, &n0in, dmin__, dmin1, dmin2, dn, dn1, dn2,
+ tau, ttype, g);
+
+/* Call dqds until DMIN > 0. */
+
+L70:
+
+ slasq5_(i0, n0, &z__[1], pp, tau, dmin__, dmin1, dmin2, dn, dn1, dn2,
+ ieee);
+
+ *ndiv += *n0 - *i0 + 2;
+ ++(*iter);
+
+/* Check status. */
+
+ if (*dmin__ >= 0.f && *dmin1 > 0.f) {
+
+/* Success. */
+
+ goto L90;
+
+ } else if (*dmin__ < 0.f && *dmin1 > 0.f && z__[(*n0 - 1 << 2) - *pp] <
+ tol * (*sigma + *dn1) && dabs(*dn) < tol * *sigma) {
+
+/* Convergence hidden by negative DN. */
+
+ z__[(*n0 - 1 << 2) - *pp + 2] = 0.f;
+ *dmin__ = 0.f;
+ goto L90;
+ } else if (*dmin__ < 0.f) {
+
+/* TAU too big. Select new TAU and try again. */
+
+ ++(*nfail);
+ if (*ttype < -22) {
+
+/* Failed twice. Play it safe. */
+
+ *tau = 0.f;
+ } else if (*dmin1 > 0.f) {
+
+/* Late failure. Gives excellent shift. */
+
+ *tau = (*tau + *dmin__) * (1.f - eps * 2.f);
+ *ttype += -11;
+ } else {
+
+/* Early failure. Divide by 4. */
+
+ *tau *= .25f;
+ *ttype += -12;
+ }
+ goto L70;
+ } else if (sisnan_(dmin__)) {
+
+/* NaN. */
+
+ if (*tau == 0.f) {
+ goto L80;
+ } else {
+ *tau = 0.f;
+ goto L70;
+ }
+ } else {
+
+/* Possible underflow. Play it safe. */
+
+ goto L80;
+ }
+
+/* Risk of underflow. */
+
+L80:
+ slasq6_(i0, n0, &z__[1], pp, dmin__, dmin1, dmin2, dn, dn1, dn2);
+ *ndiv += *n0 - *i0 + 2;
+ ++(*iter);
+ *tau = 0.f;
+
+L90:
+ if (*tau < *sigma) {
+ *desig += *tau;
+ t = *sigma + *desig;
+ *desig -= t - *sigma;
+ } else {
+ t = *sigma + *tau;
+ *desig = *sigma - (t - *tau) + *desig;
+ }
+ *sigma = t;
+
+ return 0;
+
+/* End of SLASQ3 */
+
+} /* slasq3_ */
diff --git a/contrib/libs/clapack/slasq4.c b/contrib/libs/clapack/slasq4.c
new file mode 100644
index 0000000000..8f3d36d1b8
--- /dev/null
+++ b/contrib/libs/clapack/slasq4.c
@@ -0,0 +1,402 @@
+/* slasq4.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slasq4_(integer *i0, integer *n0, real *z__, integer *pp,
+ integer *n0in, real *dmin__, real *dmin1, real *dmin2, real *dn,
+ real *dn1, real *dn2, real *tau, integer *ttype, real *g)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real s, a2, b1, b2;
+ integer i4, nn, np;
+ real gam, gap1, gap2;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Osni Marques of the Lawrence Berkeley National -- */
+/* -- Laboratory and Beresford Parlett of the Univ. of California at -- */
+/* -- Berkeley -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLASQ4 computes an approximation TAU to the smallest eigenvalue */
+/* using values of d from the previous transform. */
+
+/* I0 (input) INTEGER */
+/* First index. */
+
+/* N0 (input) INTEGER */
+/* Last index. */
+
+/* Z (input) REAL array, dimension ( 4*N ) */
+/* Z holds the qd array. */
+
+/* PP (input) INTEGER */
+/* PP=0 for ping, PP=1 for pong. */
+
+/* NOIN (input) INTEGER */
+/* The value of N0 at start of EIGTEST. */
+
+/* DMIN (input) REAL */
+/* Minimum value of d. */
+
+/* DMIN1 (input) REAL */
+/* Minimum value of d, excluding D( N0 ). */
+
+/* DMIN2 (input) REAL */
+/* Minimum value of d, excluding D( N0 ) and D( N0-1 ). */
+
+/* DN (input) REAL */
+/* d(N) */
+
+/* DN1 (input) REAL */
+/* d(N-1) */
+
+/* DN2 (input) REAL */
+/* d(N-2) */
+
+/* TAU (output) REAL */
+/* This is the shift. */
+
+/* TTYPE (output) INTEGER */
+/* Shift type. */
+
+/* G (input/output) REAL */
+/* G is passed as an argument in order to save its value between */
+/* calls to SLASQ4. */
+
+/* Further Details */
+/* =============== */
+/* CNST1 = 9/16 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* A negative DMIN forces the shift to take that absolute value */
+/* TTYPE records the type of shift. */
+
+ /* Parameter adjustments */
+ --z__;
+
+ /* Function Body */
+ if (*dmin__ <= 0.f) {
+ *tau = -(*dmin__);
+ *ttype = -1;
+ return 0;
+ }
+
+ nn = (*n0 << 2) + *pp;
+ if (*n0in == *n0) {
+
+/* No eigenvalues deflated. */
+
+ if (*dmin__ == *dn || *dmin__ == *dn1) {
+
+ b1 = sqrt(z__[nn - 3]) * sqrt(z__[nn - 5]);
+ b2 = sqrt(z__[nn - 7]) * sqrt(z__[nn - 9]);
+ a2 = z__[nn - 7] + z__[nn - 5];
+
+/* Cases 2 and 3. */
+
+ if (*dmin__ == *dn && *dmin1 == *dn1) {
+ gap2 = *dmin2 - a2 - *dmin2 * .25f;
+ if (gap2 > 0.f && gap2 > b2) {
+ gap1 = a2 - *dn - b2 / gap2 * b2;
+ } else {
+ gap1 = a2 - *dn - (b1 + b2);
+ }
+ if (gap1 > 0.f && gap1 > b1) {
+/* Computing MAX */
+ r__1 = *dn - b1 / gap1 * b1, r__2 = *dmin__ * .5f;
+ s = dmax(r__1,r__2);
+ *ttype = -2;
+ } else {
+ s = 0.f;
+ if (*dn > b1) {
+ s = *dn - b1;
+ }
+ if (a2 > b1 + b2) {
+/* Computing MIN */
+ r__1 = s, r__2 = a2 - (b1 + b2);
+ s = dmin(r__1,r__2);
+ }
+/* Computing MAX */
+ r__1 = s, r__2 = *dmin__ * .333f;
+ s = dmax(r__1,r__2);
+ *ttype = -3;
+ }
+ } else {
+
+/* Case 4. */
+
+ *ttype = -4;
+ s = *dmin__ * .25f;
+ if (*dmin__ == *dn) {
+ gam = *dn;
+ a2 = 0.f;
+ if (z__[nn - 5] > z__[nn - 7]) {
+ return 0;
+ }
+ b2 = z__[nn - 5] / z__[nn - 7];
+ np = nn - 9;
+ } else {
+ np = nn - (*pp << 1);
+ b2 = z__[np - 2];
+ gam = *dn1;
+ if (z__[np - 4] > z__[np - 2]) {
+ return 0;
+ }
+ a2 = z__[np - 4] / z__[np - 2];
+ if (z__[nn - 9] > z__[nn - 11]) {
+ return 0;
+ }
+ b2 = z__[nn - 9] / z__[nn - 11];
+ np = nn - 13;
+ }
+
+/* Approximate contribution to norm squared from I < NN-1. */
+
+ a2 += b2;
+ i__1 = (*i0 << 2) - 1 + *pp;
+ for (i4 = np; i4 >= i__1; i4 += -4) {
+ if (b2 == 0.f) {
+ goto L20;
+ }
+ b1 = b2;
+ if (z__[i4] > z__[i4 - 2]) {
+ return 0;
+ }
+ b2 *= z__[i4] / z__[i4 - 2];
+ a2 += b2;
+ if (dmax(b2,b1) * 100.f < a2 || .563f < a2) {
+ goto L20;
+ }
+/* L10: */
+ }
+L20:
+ a2 *= 1.05f;
+
+/* Rayleigh quotient residual bound. */
+
+ if (a2 < .563f) {
+ s = gam * (1.f - sqrt(a2)) / (a2 + 1.f);
+ }
+ }
+ } else if (*dmin__ == *dn2) {
+
+/* Case 5. */
+
+ *ttype = -5;
+ s = *dmin__ * .25f;
+
+/* Compute contribution to norm squared from I > NN-2. */
+
+ np = nn - (*pp << 1);
+ b1 = z__[np - 2];
+ b2 = z__[np - 6];
+ gam = *dn2;
+ if (z__[np - 8] > b2 || z__[np - 4] > b1) {
+ return 0;
+ }
+ a2 = z__[np - 8] / b2 * (z__[np - 4] / b1 + 1.f);
+
+/* Approximate contribution to norm squared from I < NN-2. */
+
+ if (*n0 - *i0 > 2) {
+ b2 = z__[nn - 13] / z__[nn - 15];
+ a2 += b2;
+ i__1 = (*i0 << 2) - 1 + *pp;
+ for (i4 = nn - 17; i4 >= i__1; i4 += -4) {
+ if (b2 == 0.f) {
+ goto L40;
+ }
+ b1 = b2;
+ if (z__[i4] > z__[i4 - 2]) {
+ return 0;
+ }
+ b2 *= z__[i4] / z__[i4 - 2];
+ a2 += b2;
+ if (dmax(b2,b1) * 100.f < a2 || .563f < a2) {
+ goto L40;
+ }
+/* L30: */
+ }
+L40:
+ a2 *= 1.05f;
+ }
+
+ if (a2 < .563f) {
+ s = gam * (1.f - sqrt(a2)) / (a2 + 1.f);
+ }
+ } else {
+
+/* Case 6, no information to guide us. */
+
+ if (*ttype == -6) {
+ *g += (1.f - *g) * .333f;
+ } else if (*ttype == -18) {
+ *g = .083250000000000005f;
+ } else {
+ *g = .25f;
+ }
+ s = *g * *dmin__;
+ *ttype = -6;
+ }
+
+ } else if (*n0in == *n0 + 1) {
+
+/* One eigenvalue just deflated. Use DMIN1, DN1 for DMIN and DN. */
+
+ if (*dmin1 == *dn1 && *dmin2 == *dn2) {
+
+/* Cases 7 and 8. */
+
+ *ttype = -7;
+ s = *dmin1 * .333f;
+ if (z__[nn - 5] > z__[nn - 7]) {
+ return 0;
+ }
+ b1 = z__[nn - 5] / z__[nn - 7];
+ b2 = b1;
+ if (b2 == 0.f) {
+ goto L60;
+ }
+ i__1 = (*i0 << 2) - 1 + *pp;
+ for (i4 = (*n0 << 2) - 9 + *pp; i4 >= i__1; i4 += -4) {
+ a2 = b1;
+ if (z__[i4] > z__[i4 - 2]) {
+ return 0;
+ }
+ b1 *= z__[i4] / z__[i4 - 2];
+ b2 += b1;
+ if (dmax(b1,a2) * 100.f < b2) {
+ goto L60;
+ }
+/* L50: */
+ }
+L60:
+ b2 = sqrt(b2 * 1.05f);
+/* Computing 2nd power */
+ r__1 = b2;
+ a2 = *dmin1 / (r__1 * r__1 + 1.f);
+ gap2 = *dmin2 * .5f - a2;
+ if (gap2 > 0.f && gap2 > b2 * a2) {
+/* Computing MAX */
+ r__1 = s, r__2 = a2 * (1.f - a2 * 1.01f * (b2 / gap2) * b2);
+ s = dmax(r__1,r__2);
+ } else {
+/* Computing MAX */
+ r__1 = s, r__2 = a2 * (1.f - b2 * 1.01f);
+ s = dmax(r__1,r__2);
+ *ttype = -8;
+ }
+ } else {
+
+/* Case 9. */
+
+ s = *dmin1 * .25f;
+ if (*dmin1 == *dn1) {
+ s = *dmin1 * .5f;
+ }
+ *ttype = -9;
+ }
+
+ } else if (*n0in == *n0 + 2) {
+
+/* Two eigenvalues deflated. Use DMIN2, DN2 for DMIN and DN. */
+
+/* Cases 10 and 11. */
+
+ if (*dmin2 == *dn2 && z__[nn - 5] * 2.f < z__[nn - 7]) {
+ *ttype = -10;
+ s = *dmin2 * .333f;
+ if (z__[nn - 5] > z__[nn - 7]) {
+ return 0;
+ }
+ b1 = z__[nn - 5] / z__[nn - 7];
+ b2 = b1;
+ if (b2 == 0.f) {
+ goto L80;
+ }
+ i__1 = (*i0 << 2) - 1 + *pp;
+ for (i4 = (*n0 << 2) - 9 + *pp; i4 >= i__1; i4 += -4) {
+ if (z__[i4] > z__[i4 - 2]) {
+ return 0;
+ }
+ b1 *= z__[i4] / z__[i4 - 2];
+ b2 += b1;
+ if (b1 * 100.f < b2) {
+ goto L80;
+ }
+/* L70: */
+ }
+L80:
+ b2 = sqrt(b2 * 1.05f);
+/* Computing 2nd power */
+ r__1 = b2;
+ a2 = *dmin2 / (r__1 * r__1 + 1.f);
+ gap2 = z__[nn - 7] + z__[nn - 9] - sqrt(z__[nn - 11]) * sqrt(z__[
+ nn - 9]) - a2;
+ if (gap2 > 0.f && gap2 > b2 * a2) {
+/* Computing MAX */
+ r__1 = s, r__2 = a2 * (1.f - a2 * 1.01f * (b2 / gap2) * b2);
+ s = dmax(r__1,r__2);
+ } else {
+/* Computing MAX */
+ r__1 = s, r__2 = a2 * (1.f - b2 * 1.01f);
+ s = dmax(r__1,r__2);
+ }
+ } else {
+ s = *dmin2 * .25f;
+ *ttype = -11;
+ }
+ } else if (*n0in > *n0 + 2) {
+
+/* Case 12, more than two eigenvalues deflated. No information. */
+
+ s = 0.f;
+ *ttype = -12;
+ }
+
+ *tau = s;
+ return 0;
+
+/* End of SLASQ4 */
+
+} /* slasq4_ */
diff --git a/contrib/libs/clapack/slasq5.c b/contrib/libs/clapack/slasq5.c
new file mode 100644
index 0000000000..4faff01127
--- /dev/null
+++ b/contrib/libs/clapack/slasq5.c
@@ -0,0 +1,239 @@
+/* slasq5.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slasq5_(integer *i0, integer *n0, real *z__, integer *pp,
+ real *tau, real *dmin__, real *dmin1, real *dmin2, real *dn, real *
+ dnm1, real *dnm2, logical *ieee)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2;
+
+ /* Local variables */
+ real d__;
+ integer j4, j4p2;
+ real emin, temp;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Osni Marques of the Lawrence Berkeley National -- */
+/* -- Laboratory and Beresford Parlett of the Univ. of California at -- */
+/* -- Berkeley -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLASQ5 computes one dqds transform in ping-pong form, one */
+/* version for IEEE machines another for non IEEE machines. */
+
+/* Arguments */
+/* ========= */
+
+/* I0 (input) INTEGER */
+/* First index. */
+
+/* N0 (input) INTEGER */
+/* Last index. */
+
+/* Z (input) REAL array, dimension ( 4*N ) */
+/* Z holds the qd array. EMIN is stored in Z(4*N0) to avoid */
+/* an extra argument. */
+
+/* PP (input) INTEGER */
+/* PP=0 for ping, PP=1 for pong. */
+
+/* TAU (input) REAL */
+/* This is the shift. */
+
+/* DMIN (output) REAL */
+/* Minimum value of d. */
+
+/* DMIN1 (output) REAL */
+/* Minimum value of d, excluding D( N0 ). */
+
+/* DMIN2 (output) REAL */
+/* Minimum value of d, excluding D( N0 ) and D( N0-1 ). */
+
+/* DN (output) REAL */
+/* d(N0), the last value of d. */
+
+/* DNM1 (output) REAL */
+/* d(N0-1). */
+
+/* DNM2 (output) REAL */
+/* d(N0-2). */
+
+/* IEEE (input) LOGICAL */
+/* Flag for IEEE or non IEEE arithmetic. */
+
+/* ===================================================================== */
+
+/* .. Parameter .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --z__;
+
+ /* Function Body */
+ if (*n0 - *i0 - 1 <= 0) {
+ return 0;
+ }
+
+ j4 = (*i0 << 2) + *pp - 3;
+ emin = z__[j4 + 4];
+ d__ = z__[j4] - *tau;
+ *dmin__ = d__;
+ *dmin1 = -z__[j4];
+
+ if (*ieee) {
+
+/* Code for IEEE arithmetic. */
+
+ if (*pp == 0) {
+ i__1 = *n0 - 3 << 2;
+ for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) {
+ z__[j4 - 2] = d__ + z__[j4 - 1];
+ temp = z__[j4 + 1] / z__[j4 - 2];
+ d__ = d__ * temp - *tau;
+ *dmin__ = dmin(*dmin__,d__);
+ z__[j4] = z__[j4 - 1] * temp;
+/* Computing MIN */
+ r__1 = z__[j4];
+ emin = dmin(r__1,emin);
+/* L10: */
+ }
+ } else {
+ i__1 = *n0 - 3 << 2;
+ for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) {
+ z__[j4 - 3] = d__ + z__[j4];
+ temp = z__[j4 + 2] / z__[j4 - 3];
+ d__ = d__ * temp - *tau;
+ *dmin__ = dmin(*dmin__,d__);
+ z__[j4 - 1] = z__[j4] * temp;
+/* Computing MIN */
+ r__1 = z__[j4 - 1];
+ emin = dmin(r__1,emin);
+/* L20: */
+ }
+ }
+
+/* Unroll last two steps. */
+
+ *dnm2 = d__;
+ *dmin2 = *dmin__;
+ j4 = (*n0 - 2 << 2) - *pp;
+ j4p2 = j4 + (*pp << 1) - 1;
+ z__[j4 - 2] = *dnm2 + z__[j4p2];
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]) - *tau;
+ *dmin__ = dmin(*dmin__,*dnm1);
+
+ *dmin1 = *dmin__;
+ j4 += 4;
+ j4p2 = j4 + (*pp << 1) - 1;
+ z__[j4 - 2] = *dnm1 + z__[j4p2];
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]) - *tau;
+ *dmin__ = dmin(*dmin__,*dn);
+
+ } else {
+
+/* Code for non IEEE arithmetic. */
+
+ if (*pp == 0) {
+ i__1 = *n0 - 3 << 2;
+ for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) {
+ z__[j4 - 2] = d__ + z__[j4 - 1];
+ if (d__ < 0.f) {
+ return 0;
+ } else {
+ z__[j4] = z__[j4 + 1] * (z__[j4 - 1] / z__[j4 - 2]);
+ d__ = z__[j4 + 1] * (d__ / z__[j4 - 2]) - *tau;
+ }
+ *dmin__ = dmin(*dmin__,d__);
+/* Computing MIN */
+ r__1 = emin, r__2 = z__[j4];
+ emin = dmin(r__1,r__2);
+/* L30: */
+ }
+ } else {
+ i__1 = *n0 - 3 << 2;
+ for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) {
+ z__[j4 - 3] = d__ + z__[j4];
+ if (d__ < 0.f) {
+ return 0;
+ } else {
+ z__[j4 - 1] = z__[j4 + 2] * (z__[j4] / z__[j4 - 3]);
+ d__ = z__[j4 + 2] * (d__ / z__[j4 - 3]) - *tau;
+ }
+ *dmin__ = dmin(*dmin__,d__);
+/* Computing MIN */
+ r__1 = emin, r__2 = z__[j4 - 1];
+ emin = dmin(r__1,r__2);
+/* L40: */
+ }
+ }
+
+/* Unroll last two steps. */
+
+ *dnm2 = d__;
+ *dmin2 = *dmin__;
+ j4 = (*n0 - 2 << 2) - *pp;
+ j4p2 = j4 + (*pp << 1) - 1;
+ z__[j4 - 2] = *dnm2 + z__[j4p2];
+ if (*dnm2 < 0.f) {
+ return 0;
+ } else {
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]) - *tau;
+ }
+ *dmin__ = dmin(*dmin__,*dnm1);
+
+ *dmin1 = *dmin__;
+ j4 += 4;
+ j4p2 = j4 + (*pp << 1) - 1;
+ z__[j4 - 2] = *dnm1 + z__[j4p2];
+ if (*dnm1 < 0.f) {
+ return 0;
+ } else {
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]) - *tau;
+ }
+ *dmin__ = dmin(*dmin__,*dn);
+
+ }
+
+ z__[j4 + 2] = *dn;
+ z__[(*n0 << 2) - *pp] = emin;
+ return 0;
+
+/* End of SLASQ5 */
+
+} /* slasq5_ */
diff --git a/contrib/libs/clapack/slasq6.c b/contrib/libs/clapack/slasq6.c
new file mode 100644
index 0000000000..b856800e38
--- /dev/null
+++ b/contrib/libs/clapack/slasq6.c
@@ -0,0 +1,212 @@
+/* slasq6.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slasq6_(integer *i0, integer *n0, real *z__, integer *pp,
+ real *dmin__, real *dmin1, real *dmin2, real *dn, real *dnm1, real *
+ dnm2)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2;
+
+ /* Local variables */
+ real d__;
+ integer j4, j4p2;
+ real emin, temp;
+ extern doublereal slamch_(char *);
+ real safmin;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Osni Marques of the Lawrence Berkeley National -- */
+/* -- Laboratory and Beresford Parlett of the Univ. of California at -- */
+/* -- Berkeley -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLASQ6 computes one dqd (shift equal to zero) transform in */
+/* ping-pong form, with protection against underflow and overflow. */
+
+/* Arguments */
+/* ========= */
+
+/* I0 (input) INTEGER */
+/* First index. */
+
+/* N0 (input) INTEGER */
+/* Last index. */
+
+/* Z (input) REAL array, dimension ( 4*N ) */
+/* Z holds the qd array. EMIN is stored in Z(4*N0) to avoid */
+/* an extra argument. */
+
+/* PP (input) INTEGER */
+/* PP=0 for ping, PP=1 for pong. */
+
+/* DMIN (output) REAL */
+/* Minimum value of d. */
+
+/* DMIN1 (output) REAL */
+/* Minimum value of d, excluding D( N0 ). */
+
+/* DMIN2 (output) REAL */
+/* Minimum value of d, excluding D( N0 ) and D( N0-1 ). */
+
+/* DN (output) REAL */
+/* d(N0), the last value of d. */
+
+/* DNM1 (output) REAL */
+/* d(N0-1). */
+
+/* DNM2 (output) REAL */
+/* d(N0-2). */
+
+/* ===================================================================== */
+
+/* .. Parameter .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Function .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --z__;
+
+ /* Function Body */
+ if (*n0 - *i0 - 1 <= 0) {
+ return 0;
+ }
+
+ safmin = slamch_("Safe minimum");
+ j4 = (*i0 << 2) + *pp - 3;
+ emin = z__[j4 + 4];
+ d__ = z__[j4];
+ *dmin__ = d__;
+
+ if (*pp == 0) {
+ i__1 = *n0 - 3 << 2;
+ for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) {
+ z__[j4 - 2] = d__ + z__[j4 - 1];
+ if (z__[j4 - 2] == 0.f) {
+ z__[j4] = 0.f;
+ d__ = z__[j4 + 1];
+ *dmin__ = d__;
+ emin = 0.f;
+ } else if (safmin * z__[j4 + 1] < z__[j4 - 2] && safmin * z__[j4
+ - 2] < z__[j4 + 1]) {
+ temp = z__[j4 + 1] / z__[j4 - 2];
+ z__[j4] = z__[j4 - 1] * temp;
+ d__ *= temp;
+ } else {
+ z__[j4] = z__[j4 + 1] * (z__[j4 - 1] / z__[j4 - 2]);
+ d__ = z__[j4 + 1] * (d__ / z__[j4 - 2]);
+ }
+ *dmin__ = dmin(*dmin__,d__);
+/* Computing MIN */
+ r__1 = emin, r__2 = z__[j4];
+ emin = dmin(r__1,r__2);
+/* L10: */
+ }
+ } else {
+ i__1 = *n0 - 3 << 2;
+ for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) {
+ z__[j4 - 3] = d__ + z__[j4];
+ if (z__[j4 - 3] == 0.f) {
+ z__[j4 - 1] = 0.f;
+ d__ = z__[j4 + 2];
+ *dmin__ = d__;
+ emin = 0.f;
+ } else if (safmin * z__[j4 + 2] < z__[j4 - 3] && safmin * z__[j4
+ - 3] < z__[j4 + 2]) {
+ temp = z__[j4 + 2] / z__[j4 - 3];
+ z__[j4 - 1] = z__[j4] * temp;
+ d__ *= temp;
+ } else {
+ z__[j4 - 1] = z__[j4 + 2] * (z__[j4] / z__[j4 - 3]);
+ d__ = z__[j4 + 2] * (d__ / z__[j4 - 3]);
+ }
+ *dmin__ = dmin(*dmin__,d__);
+/* Computing MIN */
+ r__1 = emin, r__2 = z__[j4 - 1];
+ emin = dmin(r__1,r__2);
+/* L20: */
+ }
+ }
+
+/* Unroll last two steps. */
+
+ *dnm2 = d__;
+ *dmin2 = *dmin__;
+ j4 = (*n0 - 2 << 2) - *pp;
+ j4p2 = j4 + (*pp << 1) - 1;
+ z__[j4 - 2] = *dnm2 + z__[j4p2];
+ if (z__[j4 - 2] == 0.f) {
+ z__[j4] = 0.f;
+ *dnm1 = z__[j4p2 + 2];
+ *dmin__ = *dnm1;
+ emin = 0.f;
+ } else if (safmin * z__[j4p2 + 2] < z__[j4 - 2] && safmin * z__[j4 - 2] <
+ z__[j4p2 + 2]) {
+ temp = z__[j4p2 + 2] / z__[j4 - 2];
+ z__[j4] = z__[j4p2] * temp;
+ *dnm1 = *dnm2 * temp;
+ } else {
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]);
+ }
+ *dmin__ = dmin(*dmin__,*dnm1);
+
+ *dmin1 = *dmin__;
+ j4 += 4;
+ j4p2 = j4 + (*pp << 1) - 1;
+ z__[j4 - 2] = *dnm1 + z__[j4p2];
+ if (z__[j4 - 2] == 0.f) {
+ z__[j4] = 0.f;
+ *dn = z__[j4p2 + 2];
+ *dmin__ = *dn;
+ emin = 0.f;
+ } else if (safmin * z__[j4p2 + 2] < z__[j4 - 2] && safmin * z__[j4 - 2] <
+ z__[j4p2 + 2]) {
+ temp = z__[j4p2 + 2] / z__[j4 - 2];
+ z__[j4] = z__[j4p2] * temp;
+ *dn = *dnm1 * temp;
+ } else {
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]);
+ }
+ *dmin__ = dmin(*dmin__,*dn);
+
+ z__[j4 + 2] = *dn;
+ z__[(*n0 << 2) - *pp] = emin;
+ return 0;
+
+/* End of SLASQ6 */
+
+} /* slasq6_ */
diff --git a/contrib/libs/clapack/slasr.c b/contrib/libs/clapack/slasr.c
new file mode 100644
index 0000000000..2b22242616
--- /dev/null
+++ b/contrib/libs/clapack/slasr.c
@@ -0,0 +1,452 @@
+/* slasr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slasr_(char *side, char *pivot, char *direct, integer *m,
+ integer *n, real *c__, real *s, real *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, info;
+ real temp;
+ extern logical lsame_(char *, char *);
+ real ctemp, stemp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLASR applies a sequence of plane rotations to a real matrix A, */
+/* from either the left or the right. */
+
+/* When SIDE = 'L', the transformation takes the form */
+
+/* A := P*A */
+
+/* and when SIDE = 'R', the transformation takes the form */
+
+/* A := A*P**T */
+
+/* where P is an orthogonal matrix consisting of a sequence of z plane */
+/* rotations, with z = M when SIDE = 'L' and z = N when SIDE = 'R', */
+/* and P**T is the transpose of P. */
+
+/* When DIRECT = 'F' (Forward sequence), then */
+
+/* P = P(z-1) * ... * P(2) * P(1) */
+
+/* and when DIRECT = 'B' (Backward sequence), then */
+
+/* P = P(1) * P(2) * ... * P(z-1) */
+
+/* where P(k) is a plane rotation matrix defined by the 2-by-2 rotation */
+
+/* R(k) = ( c(k) s(k) ) */
+/* = ( -s(k) c(k) ). */
+
+/* When PIVOT = 'V' (Variable pivot), the rotation is performed */
+/* for the plane (k,k+1), i.e., P(k) has the form */
+
+/* P(k) = ( 1 ) */
+/* ( ... ) */
+/* ( 1 ) */
+/* ( c(k) s(k) ) */
+/* ( -s(k) c(k) ) */
+/* ( 1 ) */
+/* ( ... ) */
+/* ( 1 ) */
+
+/* where R(k) appears as a rank-2 modification to the identity matrix in */
+/* rows and columns k and k+1. */
+
+/* When PIVOT = 'T' (Top pivot), the rotation is performed for the */
+/* plane (1,k+1), so P(k) has the form */
+
+/* P(k) = ( c(k) s(k) ) */
+/* ( 1 ) */
+/* ( ... ) */
+/* ( 1 ) */
+/* ( -s(k) c(k) ) */
+/* ( 1 ) */
+/* ( ... ) */
+/* ( 1 ) */
+
+/* where R(k) appears in rows and columns 1 and k+1. */
+
+/* Similarly, when PIVOT = 'B' (Bottom pivot), the rotation is */
+/* performed for the plane (k,z), giving P(k) the form */
+
+/* P(k) = ( 1 ) */
+/* ( ... ) */
+/* ( 1 ) */
+/* ( c(k) s(k) ) */
+/* ( 1 ) */
+/* ( ... ) */
+/* ( 1 ) */
+/* ( -s(k) c(k) ) */
+
+/* where R(k) appears in rows and columns k and z. The rotations are */
+/* performed without ever forming P(k) explicitly. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* Specifies whether the plane rotation matrix P is applied to */
+/* A on the left or the right. */
+/* = 'L': Left, compute A := P*A */
+/* = 'R': Right, compute A:= A*P**T */
+
+/* PIVOT (input) CHARACTER*1 */
+/* Specifies the plane for which P(k) is a plane rotation */
+/* matrix. */
+/* = 'V': Variable pivot, the plane (k,k+1) */
+/* = 'T': Top pivot, the plane (1,k+1) */
+/* = 'B': Bottom pivot, the plane (k,z) */
+
+/* DIRECT (input) CHARACTER*1 */
+/* Specifies whether P is a forward or backward sequence of */
+/* plane rotations. */
+/* = 'F': Forward, P = P(z-1)*...*P(2)*P(1) */
+/* = 'B': Backward, P = P(1)*P(2)*...*P(z-1) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. If m <= 1, an immediate */
+/* return is effected. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. If n <= 1, an */
+/* immediate return is effected. */
+
+/* C (input) REAL array, dimension */
+/* (M-1) if SIDE = 'L' */
+/* (N-1) if SIDE = 'R' */
+/* The cosines c(k) of the plane rotations. */
+
+/* S (input) REAL array, dimension */
+/* (M-1) if SIDE = 'L' */
+/* (N-1) if SIDE = 'R' */
+/* The sines s(k) of the plane rotations. The 2-by-2 plane */
+/* rotation part of the matrix P(k), R(k), has the form */
+/* R(k) = ( c(k) s(k) ) */
+/* ( -s(k) c(k) ). */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* The M-by-N matrix A. On exit, A is overwritten by P*A if */
+/* SIDE = 'R' or by A*P**T if SIDE = 'L'. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ --c__;
+ --s;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! (lsame_(side, "L") || lsame_(side, "R"))) {
+ info = 1;
+ } else if (! (lsame_(pivot, "V") || lsame_(pivot,
+ "T") || lsame_(pivot, "B"))) {
+ info = 2;
+ } else if (! (lsame_(direct, "F") || lsame_(direct,
+ "B"))) {
+ info = 3;
+ } else if (*m < 0) {
+ info = 4;
+ } else if (*n < 0) {
+ info = 5;
+ } else if (*lda < max(1,*m)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("SLASR ", &info);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+ if (lsame_(side, "L")) {
+
+/* Form P * A */
+
+ if (lsame_(pivot, "V")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1.f || stemp != 0.f) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[j + 1 + i__ * a_dim1];
+ a[j + 1 + i__ * a_dim1] = ctemp * temp - stemp *
+ a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = stemp * temp + ctemp * a[j
+ + i__ * a_dim1];
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1.f || stemp != 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[j + 1 + i__ * a_dim1];
+ a[j + 1 + i__ * a_dim1] = ctemp * temp - stemp *
+ a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = stemp * temp + ctemp * a[j
+ + i__ * a_dim1];
+/* L30: */
+ }
+ }
+/* L40: */
+ }
+ }
+ } else if (lsame_(pivot, "T")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m;
+ for (j = 2; j <= i__1; ++j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if (ctemp != 1.f || stemp != 0.f) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = ctemp * temp - stemp * a[
+ i__ * a_dim1 + 1];
+ a[i__ * a_dim1 + 1] = stemp * temp + ctemp * a[
+ i__ * a_dim1 + 1];
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m; j >= 2; --j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if (ctemp != 1.f || stemp != 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = ctemp * temp - stemp * a[
+ i__ * a_dim1 + 1];
+ a[i__ * a_dim1 + 1] = stemp * temp + ctemp * a[
+ i__ * a_dim1 + 1];
+/* L70: */
+ }
+ }
+/* L80: */
+ }
+ }
+ } else if (lsame_(pivot, "B")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1.f || stemp != 0.f) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = stemp * a[*m + i__ * a_dim1]
+ + ctemp * temp;
+ a[*m + i__ * a_dim1] = ctemp * a[*m + i__ *
+ a_dim1] - stemp * temp;
+/* L90: */
+ }
+ }
+/* L100: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1.f || stemp != 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = stemp * a[*m + i__ * a_dim1]
+ + ctemp * temp;
+ a[*m + i__ * a_dim1] = ctemp * a[*m + i__ *
+ a_dim1] - stemp * temp;
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+ }
+ }
+ } else if (lsame_(side, "R")) {
+
+/* Form A * P' */
+
+ if (lsame_(pivot, "V")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1.f || stemp != 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[i__ + (j + 1) * a_dim1];
+ a[i__ + (j + 1) * a_dim1] = ctemp * temp - stemp *
+ a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = stemp * temp + ctemp * a[
+ i__ + j * a_dim1];
+/* L130: */
+ }
+ }
+/* L140: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1.f || stemp != 0.f) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[i__ + (j + 1) * a_dim1];
+ a[i__ + (j + 1) * a_dim1] = ctemp * temp - stemp *
+ a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = stemp * temp + ctemp * a[
+ i__ + j * a_dim1];
+/* L150: */
+ }
+ }
+/* L160: */
+ }
+ }
+ } else if (lsame_(pivot, "T")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if (ctemp != 1.f || stemp != 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = ctemp * temp - stemp * a[
+ i__ + a_dim1];
+ a[i__ + a_dim1] = stemp * temp + ctemp * a[i__ +
+ a_dim1];
+/* L170: */
+ }
+ }
+/* L180: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n; j >= 2; --j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if (ctemp != 1.f || stemp != 0.f) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = ctemp * temp - stemp * a[
+ i__ + a_dim1];
+ a[i__ + a_dim1] = stemp * temp + ctemp * a[i__ +
+ a_dim1];
+/* L190: */
+ }
+ }
+/* L200: */
+ }
+ }
+ } else if (lsame_(pivot, "B")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1.f || stemp != 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = stemp * a[i__ + *n * a_dim1]
+ + ctemp * temp;
+ a[i__ + *n * a_dim1] = ctemp * a[i__ + *n *
+ a_dim1] - stemp * temp;
+/* L210: */
+ }
+ }
+/* L220: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1.f || stemp != 0.f) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = stemp * a[i__ + *n * a_dim1]
+ + ctemp * temp;
+ a[i__ + *n * a_dim1] = ctemp * a[i__ + *n *
+ a_dim1] - stemp * temp;
+/* L230: */
+ }
+ }
+/* L240: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SLASR */
+
+} /* slasr_ */
diff --git a/contrib/libs/clapack/slasrt.c b/contrib/libs/clapack/slasrt.c
new file mode 100644
index 0000000000..d844db55b1
--- /dev/null
+++ b/contrib/libs/clapack/slasrt.c
@@ -0,0 +1,285 @@
+/* slasrt.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slasrt_(char *id, integer *n, real *d__, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__, j;
+ real d1, d2, d3;
+ integer dir;
+ real tmp;
+ integer endd;
+ extern logical lsame_(char *, char *);
+ integer stack[64] /* was [2][32] */;
+ real dmnmx;
+ integer start;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ integer stkpnt;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Sort the numbers in D in increasing order (if ID = 'I') or */
+/* in decreasing order (if ID = 'D' ). */
+
+/* Use Quick Sort, reverting to Insertion sort on arrays of */
+/* size <= 20. Dimension of STACK limits N to about 2**32. */
+
+/* Arguments */
+/* ========= */
+
+/* ID (input) CHARACTER*1 */
+/* = 'I': sort D in increasing order; */
+/* = 'D': sort D in decreasing order. */
+
+/* N (input) INTEGER */
+/* The length of the array D. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the array to be sorted. */
+/* On exit, D has been sorted into increasing order */
+/* (D(1) <= ... <= D(N) ) or into decreasing order */
+/* (D(1) >= ... >= D(N) ), depending on ID. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input paramters. */
+
+ /* Parameter adjustments */
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ dir = -1;
+ if (lsame_(id, "D")) {
+ dir = 0;
+ } else if (lsame_(id, "I")) {
+ dir = 1;
+ }
+ if (dir == -1) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLASRT", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 1) {
+ return 0;
+ }
+
+ stkpnt = 1;
+ stack[0] = 1;
+ stack[1] = *n;
+L10:
+ start = stack[(stkpnt << 1) - 2];
+ endd = stack[(stkpnt << 1) - 1];
+ --stkpnt;
+ if (endd - start <= 20 && endd - start > 0) {
+
+/* Do Insertion sort on D( START:ENDD ) */
+
+ if (dir == 0) {
+
+/* Sort into decreasing order */
+
+ i__1 = endd;
+ for (i__ = start + 1; i__ <= i__1; ++i__) {
+ i__2 = start + 1;
+ for (j = i__; j >= i__2; --j) {
+ if (d__[j] > d__[j - 1]) {
+ dmnmx = d__[j];
+ d__[j] = d__[j - 1];
+ d__[j - 1] = dmnmx;
+ } else {
+ goto L30;
+ }
+/* L20: */
+ }
+L30:
+ ;
+ }
+
+ } else {
+
+/* Sort into increasing order */
+
+ i__1 = endd;
+ for (i__ = start + 1; i__ <= i__1; ++i__) {
+ i__2 = start + 1;
+ for (j = i__; j >= i__2; --j) {
+ if (d__[j] < d__[j - 1]) {
+ dmnmx = d__[j];
+ d__[j] = d__[j - 1];
+ d__[j - 1] = dmnmx;
+ } else {
+ goto L50;
+ }
+/* L40: */
+ }
+L50:
+ ;
+ }
+
+ }
+
+ } else if (endd - start > 20) {
+
+/* Partition D( START:ENDD ) and stack parts, largest one first */
+
+/* Choose partition entry as median of 3 */
+
+ d1 = d__[start];
+ d2 = d__[endd];
+ i__ = (start + endd) / 2;
+ d3 = d__[i__];
+ if (d1 < d2) {
+ if (d3 < d1) {
+ dmnmx = d1;
+ } else if (d3 < d2) {
+ dmnmx = d3;
+ } else {
+ dmnmx = d2;
+ }
+ } else {
+ if (d3 < d2) {
+ dmnmx = d2;
+ } else if (d3 < d1) {
+ dmnmx = d3;
+ } else {
+ dmnmx = d1;
+ }
+ }
+
+ if (dir == 0) {
+
+/* Sort into decreasing order */
+
+ i__ = start - 1;
+ j = endd + 1;
+L60:
+L70:
+ --j;
+ if (d__[j] < dmnmx) {
+ goto L70;
+ }
+L80:
+ ++i__;
+ if (d__[i__] > dmnmx) {
+ goto L80;
+ }
+ if (i__ < j) {
+ tmp = d__[i__];
+ d__[i__] = d__[j];
+ d__[j] = tmp;
+ goto L60;
+ }
+ if (j - start > endd - j - 1) {
+ ++stkpnt;
+ stack[(stkpnt << 1) - 2] = start;
+ stack[(stkpnt << 1) - 1] = j;
+ ++stkpnt;
+ stack[(stkpnt << 1) - 2] = j + 1;
+ stack[(stkpnt << 1) - 1] = endd;
+ } else {
+ ++stkpnt;
+ stack[(stkpnt << 1) - 2] = j + 1;
+ stack[(stkpnt << 1) - 1] = endd;
+ ++stkpnt;
+ stack[(stkpnt << 1) - 2] = start;
+ stack[(stkpnt << 1) - 1] = j;
+ }
+ } else {
+
+/* Sort into increasing order */
+
+ i__ = start - 1;
+ j = endd + 1;
+L90:
+L100:
+ --j;
+ if (d__[j] > dmnmx) {
+ goto L100;
+ }
+L110:
+ ++i__;
+ if (d__[i__] < dmnmx) {
+ goto L110;
+ }
+ if (i__ < j) {
+ tmp = d__[i__];
+ d__[i__] = d__[j];
+ d__[j] = tmp;
+ goto L90;
+ }
+ if (j - start > endd - j - 1) {
+ ++stkpnt;
+ stack[(stkpnt << 1) - 2] = start;
+ stack[(stkpnt << 1) - 1] = j;
+ ++stkpnt;
+ stack[(stkpnt << 1) - 2] = j + 1;
+ stack[(stkpnt << 1) - 1] = endd;
+ } else {
+ ++stkpnt;
+ stack[(stkpnt << 1) - 2] = j + 1;
+ stack[(stkpnt << 1) - 1] = endd;
+ ++stkpnt;
+ stack[(stkpnt << 1) - 2] = start;
+ stack[(stkpnt << 1) - 1] = j;
+ }
+ }
+ }
+ if (stkpnt > 0) {
+ goto L10;
+ }
+ return 0;
+
+/* End of SLASRT */
+
+} /* slasrt_ */
diff --git a/contrib/libs/clapack/slassq.c b/contrib/libs/clapack/slassq.c
new file mode 100644
index 0000000000..56f3f9393b
--- /dev/null
+++ b/contrib/libs/clapack/slassq.c
@@ -0,0 +1,116 @@
+/* slassq.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slassq_(integer *n, real *x, integer *incx, real *scale,
+ real *sumsq)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ real r__1;
+
+ /* Local variables */
+ integer ix;
+ real absxi;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLASSQ returns the values scl and smsq such that */
+
+/* ( scl**2 )*smsq = x( 1 )**2 +...+ x( n )**2 + ( scale**2 )*sumsq, */
+
+/* where x( i ) = X( 1 + ( i - 1 )*INCX ). The value of sumsq is */
+/* assumed to be non-negative and scl returns the value */
+
+/* scl = max( scale, abs( x( i ) ) ). */
+
+/* scale and sumsq must be supplied in SCALE and SUMSQ and */
+/* scl and smsq are overwritten on SCALE and SUMSQ respectively. */
+
+/* The routine makes only one pass through the vector x. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of elements to be used from the vector X. */
+
+/* X (input) REAL array, dimension (N) */
+/* The vector for which a scaled sum of squares is computed. */
+/* x( i ) = X( 1 + ( i - 1 )*INCX ), 1 <= i <= n. */
+
+/* INCX (input) INTEGER */
+/* The increment between successive values of the vector X. */
+/* INCX > 0. */
+
+/* SCALE (input/output) REAL */
+/* On entry, the value scale in the equation above. */
+/* On exit, SCALE is overwritten with scl , the scaling factor */
+/* for the sum of squares. */
+
+/* SUMSQ (input/output) REAL */
+/* On entry, the value sumsq in the equation above. */
+/* On exit, SUMSQ is overwritten with smsq , the basic sum of */
+/* squares from which scl has been factored out. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n > 0) {
+ i__1 = (*n - 1) * *incx + 1;
+ i__2 = *incx;
+ for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) {
+ if (x[ix] != 0.f) {
+ absxi = (r__1 = x[ix], dabs(r__1));
+ if (*scale < absxi) {
+/* Computing 2nd power */
+ r__1 = *scale / absxi;
+ *sumsq = *sumsq * (r__1 * r__1) + 1;
+ *scale = absxi;
+ } else {
+/* Computing 2nd power */
+ r__1 = absxi / *scale;
+ *sumsq += r__1 * r__1;
+ }
+ }
+/* L10: */
+ }
+ }
+ return 0;
+
+/* End of SLASSQ */
+
+} /* slassq_ */
diff --git a/contrib/libs/clapack/slasv2.c b/contrib/libs/clapack/slasv2.c
new file mode 100644
index 0000000000..9fdca48cfa
--- /dev/null
+++ b/contrib/libs/clapack/slasv2.c
@@ -0,0 +1,273 @@
+/* slasv2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b3 = 2.f;
+static real c_b4 = 1.f;
+
+/* Subroutine */ int slasv2_(real *f, real *g, real *h__, real *ssmin, real *
+ ssmax, real *snr, real *csr, real *snl, real *csl)
+{
+ /* System generated locals */
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ real a, d__, l, m, r__, s, t, fa, ga, ha, ft, gt, ht, mm, tt, clt, crt,
+ slt, srt;
+ integer pmax;
+ real temp;
+ logical swap;
+ real tsign;
+ logical gasmal;
+ extern doublereal slamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLASV2 computes the singular value decomposition of a 2-by-2 */
+/* triangular matrix */
+/* [ F G ] */
+/* [ 0 H ]. */
+/* On return, abs(SSMAX) is the larger singular value, abs(SSMIN) is the */
+/* smaller singular value, and (CSL,SNL) and (CSR,SNR) are the left and */
+/* right singular vectors for abs(SSMAX), giving the decomposition */
+
+/* [ CSL SNL ] [ F G ] [ CSR -SNR ] = [ SSMAX 0 ] */
+/* [-SNL CSL ] [ 0 H ] [ SNR CSR ] [ 0 SSMIN ]. */
+
+/* Arguments */
+/* ========= */
+
+/* F (input) REAL */
+/* The (1,1) element of the 2-by-2 matrix. */
+
+/* G (input) REAL */
+/* The (1,2) element of the 2-by-2 matrix. */
+
+/* H (input) REAL */
+/* The (2,2) element of the 2-by-2 matrix. */
+
+/* SSMIN (output) REAL */
+/* abs(SSMIN) is the smaller singular value. */
+
+/* SSMAX (output) REAL */
+/* abs(SSMAX) is the larger singular value. */
+
+/* SNL (output) REAL */
+/* CSL (output) REAL */
+/* The vector (CSL, SNL) is a unit left singular vector for the */
+/* singular value abs(SSMAX). */
+
+/* SNR (output) REAL */
+/* CSR (output) REAL */
+/* The vector (CSR, SNR) is a unit right singular vector for the */
+/* singular value abs(SSMAX). */
+
+/* Further Details */
+/* =============== */
+
+/* Any input parameter may be aliased with any output parameter. */
+
+/* Barring over/underflow and assuming a guard digit in subtraction, all */
+/* output quantities are correct to within a few units in the last */
+/* place (ulps). */
+
+/* In IEEE arithmetic, the code works correctly if one matrix element is */
+/* infinite. */
+
+/* Overflow will not occur unless the largest singular value itself */
+/* overflows or is within a few ulps of overflow. (On machines with */
+/* partial overflow, like the Cray, overflow may occur if the largest */
+/* singular value is within a factor of 2 of overflow.) */
+
+/* Underflow is harmless if underflow is gradual. Otherwise, results */
+/* may correspond to a matrix modified by perturbations of size near */
+/* the underflow threshold. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ ft = *f;
+ fa = dabs(ft);
+ ht = *h__;
+ ha = dabs(*h__);
+
+/* PMAX points to the maximum absolute element of matrix */
+/* PMAX = 1 if F largest in absolute values */
+/* PMAX = 2 if G largest in absolute values */
+/* PMAX = 3 if H largest in absolute values */
+
+ pmax = 1;
+ swap = ha > fa;
+ if (swap) {
+ pmax = 3;
+ temp = ft;
+ ft = ht;
+ ht = temp;
+ temp = fa;
+ fa = ha;
+ ha = temp;
+
+/* Now FA .ge. HA */
+
+ }
+ gt = *g;
+ ga = dabs(gt);
+ if (ga == 0.f) {
+
+/* Diagonal matrix */
+
+ *ssmin = ha;
+ *ssmax = fa;
+ clt = 1.f;
+ crt = 1.f;
+ slt = 0.f;
+ srt = 0.f;
+ } else {
+ gasmal = TRUE_;
+ if (ga > fa) {
+ pmax = 2;
+ if (fa / ga < slamch_("EPS")) {
+
+/* Case of very large GA */
+
+ gasmal = FALSE_;
+ *ssmax = ga;
+ if (ha > 1.f) {
+ *ssmin = fa / (ga / ha);
+ } else {
+ *ssmin = fa / ga * ha;
+ }
+ clt = 1.f;
+ slt = ht / gt;
+ srt = 1.f;
+ crt = ft / gt;
+ }
+ }
+ if (gasmal) {
+
+/* Normal case */
+
+ d__ = fa - ha;
+ if (d__ == fa) {
+
+/* Copes with infinite F or H */
+
+ l = 1.f;
+ } else {
+ l = d__ / fa;
+ }
+
+/* Note that 0 .le. L .le. 1 */
+
+ m = gt / ft;
+
+/* Note that abs(M) .le. 1/macheps */
+
+ t = 2.f - l;
+
+/* Note that T .ge. 1 */
+
+ mm = m * m;
+ tt = t * t;
+ s = sqrt(tt + mm);
+
+/* Note that 1 .le. S .le. 1 + 1/macheps */
+
+ if (l == 0.f) {
+ r__ = dabs(m);
+ } else {
+ r__ = sqrt(l * l + mm);
+ }
+
+/* Note that 0 .le. R .le. 1 + 1/macheps */
+
+ a = (s + r__) * .5f;
+
+/* Note that 1 .le. A .le. 1 + abs(M) */
+
+ *ssmin = ha / a;
+ *ssmax = fa * a;
+ if (mm == 0.f) {
+
+/* Note that M is very tiny */
+
+ if (l == 0.f) {
+ t = r_sign(&c_b3, &ft) * r_sign(&c_b4, &gt);
+ } else {
+ t = gt / r_sign(&d__, &ft) + m / t;
+ }
+ } else {
+ t = (m / (s + t) + m / (r__ + l)) * (a + 1.f);
+ }
+ l = sqrt(t * t + 4.f);
+ crt = 2.f / l;
+ srt = t / l;
+ clt = (crt + srt * m) / a;
+ slt = ht / ft * srt / a;
+ }
+ }
+ if (swap) {
+ *csl = srt;
+ *snl = crt;
+ *csr = slt;
+ *snr = clt;
+ } else {
+ *csl = clt;
+ *snl = slt;
+ *csr = crt;
+ *snr = srt;
+ }
+
+/* Correct signs of SSMAX and SSMIN */
+
+ if (pmax == 1) {
+ tsign = r_sign(&c_b4, csr) * r_sign(&c_b4, csl) * r_sign(&c_b4, f);
+ }
+ if (pmax == 2) {
+ tsign = r_sign(&c_b4, snr) * r_sign(&c_b4, csl) * r_sign(&c_b4, g);
+ }
+ if (pmax == 3) {
+ tsign = r_sign(&c_b4, snr) * r_sign(&c_b4, snl) * r_sign(&c_b4, h__);
+ }
+ *ssmax = r_sign(ssmax, &tsign);
+ r__1 = tsign * r_sign(&c_b4, f) * r_sign(&c_b4, h__);
+ *ssmin = r_sign(ssmin, &r__1);
+ return 0;
+
+/* End of SLASV2 */
+
+} /* slasv2_ */
diff --git a/contrib/libs/clapack/slaswp.c b/contrib/libs/clapack/slaswp.c
new file mode 100644
index 0000000000..d89ee039ce
--- /dev/null
+++ b/contrib/libs/clapack/slaswp.c
@@ -0,0 +1,158 @@
+/* slaswp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slaswp_(integer *n, real *a, integer *lda, integer *k1,
+ integer *k2, integer *ipiv, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, k, i1, i2, n32, ip, ix, ix0, inc;
+ real temp;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLASWP performs a series of row interchanges on the matrix A. */
+/* One row interchange is initiated for each of rows K1 through K2 of A. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the matrix of column dimension N to which the row */
+/* interchanges will be applied. */
+/* On exit, the permuted matrix. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+
+/* K1 (input) INTEGER */
+/* The first element of IPIV for which a row interchange will */
+/* be done. */
+
+/* K2 (input) INTEGER */
+/* The last element of IPIV for which a row interchange will */
+/* be done. */
+
+/* IPIV (input) INTEGER array, dimension (K2*abs(INCX)) */
+/* The vector of pivot indices. Only the elements in positions */
+/* K1 through K2 of IPIV are accessed. */
+/* IPIV(K) = L implies rows K and L are to be interchanged. */
+
+/* INCX (input) INTEGER */
+/* The increment between successive values of IPIV. If IPIV */
+/* is negative, the pivots are applied in reverse order. */
+
+/* Further Details */
+/* =============== */
+
+/* Modified by */
+/* R. C. Whaley, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Interchange row I with row IPIV(I) for each of rows K1 through K2. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ if (*incx > 0) {
+ ix0 = *k1;
+ i1 = *k1;
+ i2 = *k2;
+ inc = 1;
+ } else if (*incx < 0) {
+ ix0 = (1 - *k2) * *incx + 1;
+ i1 = *k2;
+ i2 = *k1;
+ inc = -1;
+ } else {
+ return 0;
+ }
+
+ n32 = *n / 32 << 5;
+ if (n32 != 0) {
+ i__1 = n32;
+ for (j = 1; j <= i__1; j += 32) {
+ ix = ix0;
+ i__2 = i2;
+ i__3 = inc;
+ for (i__ = i1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__3)
+ {
+ ip = ipiv[ix];
+ if (ip != i__) {
+ i__4 = j + 31;
+ for (k = j; k <= i__4; ++k) {
+ temp = a[i__ + k * a_dim1];
+ a[i__ + k * a_dim1] = a[ip + k * a_dim1];
+ a[ip + k * a_dim1] = temp;
+/* L10: */
+ }
+ }
+ ix += *incx;
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+ if (n32 != *n) {
+ ++n32;
+ ix = ix0;
+ i__1 = i2;
+ i__3 = inc;
+ for (i__ = i1; i__3 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__3) {
+ ip = ipiv[ix];
+ if (ip != i__) {
+ i__2 = *n;
+ for (k = n32; k <= i__2; ++k) {
+ temp = a[i__ + k * a_dim1];
+ a[i__ + k * a_dim1] = a[ip + k * a_dim1];
+ a[ip + k * a_dim1] = temp;
+/* L40: */
+ }
+ }
+ ix += *incx;
+/* L50: */
+ }
+ }
+
+ return 0;
+
+/* End of SLASWP */
+
+} /* slaswp_ */
diff --git a/contrib/libs/clapack/slasy2.c b/contrib/libs/clapack/slasy2.c
new file mode 100644
index 0000000000..1f8e1c2b7e
--- /dev/null
+++ b/contrib/libs/clapack/slasy2.c
@@ -0,0 +1,479 @@
+/* slasy2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__4 = 4;
+static integer c__1 = 1;
+static integer c__16 = 16;
+static integer c__0 = 0;
+
+/* Subroutine */ int slasy2_(logical *ltranl, logical *ltranr, integer *isgn,
+ integer *n1, integer *n2, real *tl, integer *ldtl, real *tr, integer *
+ ldtr, real *b, integer *ldb, real *scale, real *x, integer *ldx, real
+ *xnorm, integer *info)
+{
+ /* Initialized data */
+
+ static integer locu12[4] = { 3,4,1,2 };
+ static integer locl21[4] = { 2,1,4,3 };
+ static integer locu22[4] = { 4,3,2,1 };
+ static logical xswpiv[4] = { FALSE_,FALSE_,TRUE_,TRUE_ };
+ static logical bswpiv[4] = { FALSE_,TRUE_,FALSE_,TRUE_ };
+
+ /* System generated locals */
+ integer b_dim1, b_offset, tl_dim1, tl_offset, tr_dim1, tr_offset, x_dim1,
+ x_offset;
+ real r__1, r__2, r__3, r__4, r__5, r__6, r__7, r__8;
+
+ /* Local variables */
+ integer i__, j, k;
+ real x2[2], l21, u11, u12;
+ integer ip, jp;
+ real u22, t16[16] /* was [4][4] */, gam, bet, eps, sgn, tmp[4], tau1,
+ btmp[4], smin;
+ integer ipiv;
+ real temp;
+ integer jpiv[4];
+ real xmax;
+ integer ipsv, jpsv;
+ logical bswap;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), sswap_(integer *, real *, integer *, real *, integer *
+);
+ logical xswap;
+ extern doublereal slamch_(char *);
+ extern integer isamax_(integer *, real *, integer *);
+ real smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLASY2 solves for the N1 by N2 matrix X, 1 <= N1,N2 <= 2, in */
+
+/* op(TL)*X + ISGN*X*op(TR) = SCALE*B, */
+
+/* where TL is N1 by N1, TR is N2 by N2, B is N1 by N2, and ISGN = 1 or */
+/* -1. op(T) = T or T', where T' denotes the transpose of T. */
+
+/* Arguments */
+/* ========= */
+
+/* LTRANL (input) LOGICAL */
+/* On entry, LTRANL specifies the op(TL): */
+/* = .FALSE., op(TL) = TL, */
+/* = .TRUE., op(TL) = TL'. */
+
+/* LTRANR (input) LOGICAL */
+/* On entry, LTRANR specifies the op(TR): */
+/* = .FALSE., op(TR) = TR, */
+/* = .TRUE., op(TR) = TR'. */
+
+/* ISGN (input) INTEGER */
+/* On entry, ISGN specifies the sign of the equation */
+/* as described before. ISGN may only be 1 or -1. */
+
+/* N1 (input) INTEGER */
+/* On entry, N1 specifies the order of matrix TL. */
+/* N1 may only be 0, 1 or 2. */
+
+/* N2 (input) INTEGER */
+/* On entry, N2 specifies the order of matrix TR. */
+/* N2 may only be 0, 1 or 2. */
+
+/* TL (input) REAL array, dimension (LDTL,2) */
+/* On entry, TL contains an N1 by N1 matrix. */
+
+/* LDTL (input) INTEGER */
+/* The leading dimension of the matrix TL. LDTL >= max(1,N1). */
+
+/* TR (input) REAL array, dimension (LDTR,2) */
+/* On entry, TR contains an N2 by N2 matrix. */
+
+/* LDTR (input) INTEGER */
+/* The leading dimension of the matrix TR. LDTR >= max(1,N2). */
+
+/* B (input) REAL array, dimension (LDB,2) */
+/* On entry, the N1 by N2 matrix B contains the right-hand */
+/* side of the equation. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the matrix B. LDB >= max(1,N1). */
+
+/* SCALE (output) REAL */
+/* On exit, SCALE contains the scale factor. SCALE is chosen */
+/* less than or equal to 1 to prevent the solution overflowing. */
+
+/* X (output) REAL array, dimension (LDX,2) */
+/* On exit, X contains the N1 by N2 solution. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the matrix X. LDX >= max(1,N1). */
+
+/* XNORM (output) REAL */
+/* On exit, XNORM is the infinity-norm of the solution. */
+
+/* INFO (output) INTEGER */
+/* On exit, INFO is set to */
+/* 0: successful exit. */
+/* 1: TL and TR have too close eigenvalues, so TL or */
+/* TR is perturbed to get a nonsingular equation. */
+/* NOTE: In the interests of speed, this routine does not */
+/* check the inputs for errors. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Data statements .. */
+ /* Parameter adjustments */
+ tl_dim1 = *ldtl;
+ tl_offset = 1 + tl_dim1;
+ tl -= tl_offset;
+ tr_dim1 = *ldtr;
+ tr_offset = 1 + tr_dim1;
+ tr -= tr_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+
+ /* Function Body */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Do not check the input parameters for errors */
+
+ *info = 0;
+
+/* Quick return if possible */
+
+ if (*n1 == 0 || *n2 == 0) {
+ return 0;
+ }
+
+/* Set constants to control overflow */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S") / eps;
+ sgn = (real) (*isgn);
+
+ k = *n1 + *n1 + *n2 - 2;
+ switch (k) {
+ case 1: goto L10;
+ case 2: goto L20;
+ case 3: goto L30;
+ case 4: goto L50;
+ }
+
+/* 1 by 1: TL11*X + SGN*X*TR11 = B11 */
+
+L10:
+ tau1 = tl[tl_dim1 + 1] + sgn * tr[tr_dim1 + 1];
+ bet = dabs(tau1);
+ if (bet <= smlnum) {
+ tau1 = smlnum;
+ bet = smlnum;
+ *info = 1;
+ }
+
+ *scale = 1.f;
+ gam = (r__1 = b[b_dim1 + 1], dabs(r__1));
+ if (smlnum * gam > bet) {
+ *scale = 1.f / gam;
+ }
+
+ x[x_dim1 + 1] = b[b_dim1 + 1] * *scale / tau1;
+ *xnorm = (r__1 = x[x_dim1 + 1], dabs(r__1));
+ return 0;
+
+/* 1 by 2: */
+/* TL11*[X11 X12] + ISGN*[X11 X12]*op[TR11 TR12] = [B11 B12] */
+/* [TR21 TR22] */
+
+L20:
+
+/* Computing MAX */
+/* Computing MAX */
+ r__7 = (r__1 = tl[tl_dim1 + 1], dabs(r__1)), r__8 = (r__2 = tr[tr_dim1 +
+ 1], dabs(r__2)), r__7 = max(r__7,r__8), r__8 = (r__3 = tr[(
+ tr_dim1 << 1) + 1], dabs(r__3)), r__7 = max(r__7,r__8), r__8 = (
+ r__4 = tr[tr_dim1 + 2], dabs(r__4)), r__7 = max(r__7,r__8), r__8 =
+ (r__5 = tr[(tr_dim1 << 1) + 2], dabs(r__5));
+ r__6 = eps * dmax(r__7,r__8);
+ smin = dmax(r__6,smlnum);
+ tmp[0] = tl[tl_dim1 + 1] + sgn * tr[tr_dim1 + 1];
+ tmp[3] = tl[tl_dim1 + 1] + sgn * tr[(tr_dim1 << 1) + 2];
+ if (*ltranr) {
+ tmp[1] = sgn * tr[tr_dim1 + 2];
+ tmp[2] = sgn * tr[(tr_dim1 << 1) + 1];
+ } else {
+ tmp[1] = sgn * tr[(tr_dim1 << 1) + 1];
+ tmp[2] = sgn * tr[tr_dim1 + 2];
+ }
+ btmp[0] = b[b_dim1 + 1];
+ btmp[1] = b[(b_dim1 << 1) + 1];
+ goto L40;
+
+/* 2 by 1: */
+/* op[TL11 TL12]*[X11] + ISGN* [X11]*TR11 = [B11] */
+/* [TL21 TL22] [X21] [X21] [B21] */
+
+L30:
+/* Computing MAX */
+/* Computing MAX */
+ r__7 = (r__1 = tr[tr_dim1 + 1], dabs(r__1)), r__8 = (r__2 = tl[tl_dim1 +
+ 1], dabs(r__2)), r__7 = max(r__7,r__8), r__8 = (r__3 = tl[(
+ tl_dim1 << 1) + 1], dabs(r__3)), r__7 = max(r__7,r__8), r__8 = (
+ r__4 = tl[tl_dim1 + 2], dabs(r__4)), r__7 = max(r__7,r__8), r__8 =
+ (r__5 = tl[(tl_dim1 << 1) + 2], dabs(r__5));
+ r__6 = eps * dmax(r__7,r__8);
+ smin = dmax(r__6,smlnum);
+ tmp[0] = tl[tl_dim1 + 1] + sgn * tr[tr_dim1 + 1];
+ tmp[3] = tl[(tl_dim1 << 1) + 2] + sgn * tr[tr_dim1 + 1];
+ if (*ltranl) {
+ tmp[1] = tl[(tl_dim1 << 1) + 1];
+ tmp[2] = tl[tl_dim1 + 2];
+ } else {
+ tmp[1] = tl[tl_dim1 + 2];
+ tmp[2] = tl[(tl_dim1 << 1) + 1];
+ }
+ btmp[0] = b[b_dim1 + 1];
+ btmp[1] = b[b_dim1 + 2];
+L40:
+
+/* Solve 2 by 2 system using complete pivoting. */
+/* Set pivots less than SMIN to SMIN. */
+
+ ipiv = isamax_(&c__4, tmp, &c__1);
+ u11 = tmp[ipiv - 1];
+ if (dabs(u11) <= smin) {
+ *info = 1;
+ u11 = smin;
+ }
+ u12 = tmp[locu12[ipiv - 1] - 1];
+ l21 = tmp[locl21[ipiv - 1] - 1] / u11;
+ u22 = tmp[locu22[ipiv - 1] - 1] - u12 * l21;
+ xswap = xswpiv[ipiv - 1];
+ bswap = bswpiv[ipiv - 1];
+ if (dabs(u22) <= smin) {
+ *info = 1;
+ u22 = smin;
+ }
+ if (bswap) {
+ temp = btmp[1];
+ btmp[1] = btmp[0] - l21 * temp;
+ btmp[0] = temp;
+ } else {
+ btmp[1] -= l21 * btmp[0];
+ }
+ *scale = 1.f;
+ if (smlnum * 2.f * dabs(btmp[1]) > dabs(u22) || smlnum * 2.f * dabs(btmp[
+ 0]) > dabs(u11)) {
+/* Computing MAX */
+ r__1 = dabs(btmp[0]), r__2 = dabs(btmp[1]);
+ *scale = .5f / dmax(r__1,r__2);
+ btmp[0] *= *scale;
+ btmp[1] *= *scale;
+ }
+ x2[1] = btmp[1] / u22;
+ x2[0] = btmp[0] / u11 - u12 / u11 * x2[1];
+ if (xswap) {
+ temp = x2[1];
+ x2[1] = x2[0];
+ x2[0] = temp;
+ }
+ x[x_dim1 + 1] = x2[0];
+ if (*n1 == 1) {
+ x[(x_dim1 << 1) + 1] = x2[1];
+ *xnorm = (r__1 = x[x_dim1 + 1], dabs(r__1)) + (r__2 = x[(x_dim1 << 1)
+ + 1], dabs(r__2));
+ } else {
+ x[x_dim1 + 2] = x2[1];
+/* Computing MAX */
+ r__3 = (r__1 = x[x_dim1 + 1], dabs(r__1)), r__4 = (r__2 = x[x_dim1 +
+ 2], dabs(r__2));
+ *xnorm = dmax(r__3,r__4);
+ }
+ return 0;
+
+/* 2 by 2: */
+/* op[TL11 TL12]*[X11 X12] +ISGN* [X11 X12]*op[TR11 TR12] = [B11 B12] */
+/* [TL21 TL22] [X21 X22] [X21 X22] [TR21 TR22] [B21 B22] */
+
+/* Solve equivalent 4 by 4 system using complete pivoting. */
+/* Set pivots less than SMIN to SMIN. */
+
+L50:
+/* Computing MAX */
+ r__5 = (r__1 = tr[tr_dim1 + 1], dabs(r__1)), r__6 = (r__2 = tr[(tr_dim1 <<
+ 1) + 1], dabs(r__2)), r__5 = max(r__5,r__6), r__6 = (r__3 = tr[
+ tr_dim1 + 2], dabs(r__3)), r__5 = max(r__5,r__6), r__6 = (r__4 =
+ tr[(tr_dim1 << 1) + 2], dabs(r__4));
+ smin = dmax(r__5,r__6);
+/* Computing MAX */
+ r__5 = smin, r__6 = (r__1 = tl[tl_dim1 + 1], dabs(r__1)), r__5 = max(r__5,
+ r__6), r__6 = (r__2 = tl[(tl_dim1 << 1) + 1], dabs(r__2)), r__5 =
+ max(r__5,r__6), r__6 = (r__3 = tl[tl_dim1 + 2], dabs(r__3)), r__5
+ = max(r__5,r__6), r__6 = (r__4 = tl[(tl_dim1 << 1) + 2], dabs(
+ r__4));
+ smin = dmax(r__5,r__6);
+/* Computing MAX */
+ r__1 = eps * smin;
+ smin = dmax(r__1,smlnum);
+ btmp[0] = 0.f;
+ scopy_(&c__16, btmp, &c__0, t16, &c__1);
+ t16[0] = tl[tl_dim1 + 1] + sgn * tr[tr_dim1 + 1];
+ t16[5] = tl[(tl_dim1 << 1) + 2] + sgn * tr[tr_dim1 + 1];
+ t16[10] = tl[tl_dim1 + 1] + sgn * tr[(tr_dim1 << 1) + 2];
+ t16[15] = tl[(tl_dim1 << 1) + 2] + sgn * tr[(tr_dim1 << 1) + 2];
+ if (*ltranl) {
+ t16[4] = tl[tl_dim1 + 2];
+ t16[1] = tl[(tl_dim1 << 1) + 1];
+ t16[14] = tl[tl_dim1 + 2];
+ t16[11] = tl[(tl_dim1 << 1) + 1];
+ } else {
+ t16[4] = tl[(tl_dim1 << 1) + 1];
+ t16[1] = tl[tl_dim1 + 2];
+ t16[14] = tl[(tl_dim1 << 1) + 1];
+ t16[11] = tl[tl_dim1 + 2];
+ }
+ if (*ltranr) {
+ t16[8] = sgn * tr[(tr_dim1 << 1) + 1];
+ t16[13] = sgn * tr[(tr_dim1 << 1) + 1];
+ t16[2] = sgn * tr[tr_dim1 + 2];
+ t16[7] = sgn * tr[tr_dim1 + 2];
+ } else {
+ t16[8] = sgn * tr[tr_dim1 + 2];
+ t16[13] = sgn * tr[tr_dim1 + 2];
+ t16[2] = sgn * tr[(tr_dim1 << 1) + 1];
+ t16[7] = sgn * tr[(tr_dim1 << 1) + 1];
+ }
+ btmp[0] = b[b_dim1 + 1];
+ btmp[1] = b[b_dim1 + 2];
+ btmp[2] = b[(b_dim1 << 1) + 1];
+ btmp[3] = b[(b_dim1 << 1) + 2];
+
+/* Perform elimination */
+
+ for (i__ = 1; i__ <= 3; ++i__) {
+ xmax = 0.f;
+ for (ip = i__; ip <= 4; ++ip) {
+ for (jp = i__; jp <= 4; ++jp) {
+ if ((r__1 = t16[ip + (jp << 2) - 5], dabs(r__1)) >= xmax) {
+ xmax = (r__1 = t16[ip + (jp << 2) - 5], dabs(r__1));
+ ipsv = ip;
+ jpsv = jp;
+ }
+/* L60: */
+ }
+/* L70: */
+ }
+ if (ipsv != i__) {
+ sswap_(&c__4, &t16[ipsv - 1], &c__4, &t16[i__ - 1], &c__4);
+ temp = btmp[i__ - 1];
+ btmp[i__ - 1] = btmp[ipsv - 1];
+ btmp[ipsv - 1] = temp;
+ }
+ if (jpsv != i__) {
+ sswap_(&c__4, &t16[(jpsv << 2) - 4], &c__1, &t16[(i__ << 2) - 4],
+ &c__1);
+ }
+ jpiv[i__ - 1] = jpsv;
+ if ((r__1 = t16[i__ + (i__ << 2) - 5], dabs(r__1)) < smin) {
+ *info = 1;
+ t16[i__ + (i__ << 2) - 5] = smin;
+ }
+ for (j = i__ + 1; j <= 4; ++j) {
+ t16[j + (i__ << 2) - 5] /= t16[i__ + (i__ << 2) - 5];
+ btmp[j - 1] -= t16[j + (i__ << 2) - 5] * btmp[i__ - 1];
+ for (k = i__ + 1; k <= 4; ++k) {
+ t16[j + (k << 2) - 5] -= t16[j + (i__ << 2) - 5] * t16[i__ + (
+ k << 2) - 5];
+/* L80: */
+ }
+/* L90: */
+ }
+/* L100: */
+ }
+ if (dabs(t16[15]) < smin) {
+ t16[15] = smin;
+ }
+ *scale = 1.f;
+ if (smlnum * 8.f * dabs(btmp[0]) > dabs(t16[0]) || smlnum * 8.f * dabs(
+ btmp[1]) > dabs(t16[5]) || smlnum * 8.f * dabs(btmp[2]) > dabs(
+ t16[10]) || smlnum * 8.f * dabs(btmp[3]) > dabs(t16[15])) {
+/* Computing MAX */
+ r__1 = dabs(btmp[0]), r__2 = dabs(btmp[1]), r__1 = max(r__1,r__2),
+ r__2 = dabs(btmp[2]), r__1 = max(r__1,r__2), r__2 = dabs(btmp[
+ 3]);
+ *scale = .125f / dmax(r__1,r__2);
+ btmp[0] *= *scale;
+ btmp[1] *= *scale;
+ btmp[2] *= *scale;
+ btmp[3] *= *scale;
+ }
+ for (i__ = 1; i__ <= 4; ++i__) {
+ k = 5 - i__;
+ temp = 1.f / t16[k + (k << 2) - 5];
+ tmp[k - 1] = btmp[k - 1] * temp;
+ for (j = k + 1; j <= 4; ++j) {
+ tmp[k - 1] -= temp * t16[k + (j << 2) - 5] * tmp[j - 1];
+/* L110: */
+ }
+/* L120: */
+ }
+ for (i__ = 1; i__ <= 3; ++i__) {
+ if (jpiv[4 - i__ - 1] != 4 - i__) {
+ temp = tmp[4 - i__ - 1];
+ tmp[4 - i__ - 1] = tmp[jpiv[4 - i__ - 1] - 1];
+ tmp[jpiv[4 - i__ - 1] - 1] = temp;
+ }
+/* L130: */
+ }
+ x[x_dim1 + 1] = tmp[0];
+ x[x_dim1 + 2] = tmp[1];
+ x[(x_dim1 << 1) + 1] = tmp[2];
+ x[(x_dim1 << 1) + 2] = tmp[3];
+/* Computing MAX */
+ r__1 = dabs(tmp[0]) + dabs(tmp[2]), r__2 = dabs(tmp[1]) + dabs(tmp[3]);
+ *xnorm = dmax(r__1,r__2);
+ return 0;
+
+/* End of SLASY2 */
+
+} /* slasy2_ */
diff --git a/contrib/libs/clapack/slasyf.c b/contrib/libs/clapack/slasyf.c
new file mode 100644
index 0000000000..07215404f7
--- /dev/null
+++ b/contrib/libs/clapack/slasyf.c
@@ -0,0 +1,719 @@
+/* slasyf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b8 = -1.f;
+static real c_b9 = 1.f;
+
+/* Subroutine */ int slasyf_(char *uplo, integer *n, integer *nb, integer *kb,
+ real *a, integer *lda, integer *ipiv, real *w, integer *ldw, integer
+ *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer j, k;
+ real t, r1, d11, d21, d22;
+ integer jb, jj, kk, jp, kp, kw, kkw, imax, jmax;
+ real alpha;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ sgemm_(char *, char *, integer *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *), sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *);
+ integer kstep;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), sswap_(integer *, real *, integer *, real *, integer *
+);
+ real absakk;
+ extern integer isamax_(integer *, real *, integer *);
+ real colmax, rowmax;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLASYF computes a partial factorization of a real symmetric matrix A */
+/* using the Bunch-Kaufman diagonal pivoting method. The partial */
+/* factorization has the form: */
+
+/* A = ( I U12 ) ( A11 0 ) ( I 0 ) if UPLO = 'U', or: */
+/* ( 0 U22 ) ( 0 D ) ( U12' U22' ) */
+
+/* A = ( L11 0 ) ( D 0 ) ( L11' L21' ) if UPLO = 'L' */
+/* ( L21 I ) ( 0 A22 ) ( 0 I ) */
+
+/* where the order of D is at most NB. The actual order is returned in */
+/* the argument KB, and is either NB or NB-1, or N if N <= NB. */
+
+/* SLASYF is an auxiliary routine called by SSYTRF. It uses blocked code */
+/* (calling Level 3 BLAS) to update the submatrix A11 (if UPLO = 'U') or */
+/* A22 (if UPLO = 'L'). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NB (input) INTEGER */
+/* The maximum number of columns of the matrix A that should be */
+/* factored. NB should be at least 2 to allow for 2-by-2 pivot */
+/* blocks. */
+
+/* KB (output) INTEGER */
+/* The number of columns of A that were actually factored. */
+/* KB is either NB-1 or NB, or N if N <= NB. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n-by-n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n-by-n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+/* On exit, A contains details of the partial factorization. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D. */
+/* If UPLO = 'U', only the last KB elements of IPIV are set; */
+/* if UPLO = 'L', only the first KB elements are set. */
+
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* W (workspace) REAL array, dimension (LDW,NB) */
+
+/* LDW (input) INTEGER */
+/* The leading dimension of the array W. LDW >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* > 0: if INFO = k, D(k,k) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ w_dim1 = *ldw;
+ w_offset = 1 + w_dim1;
+ w -= w_offset;
+
+ /* Function Body */
+ *info = 0;
+
+/* Initialize ALPHA for use in choosing pivot block size. */
+
+ alpha = (sqrt(17.f) + 1.f) / 8.f;
+
+ if (lsame_(uplo, "U")) {
+
+/* Factorize the trailing columns of A using the upper triangle */
+/* of A and working backwards, and compute the matrix W = U12*D */
+/* for use in updating A11 */
+
+/* K is the main loop index, decreasing from N in steps of 1 or 2 */
+
+/* KW is the column of W which corresponds to column K of A */
+
+ k = *n;
+L10:
+ kw = *nb + k - *n;
+
+/* Exit from loop */
+
+ if (k <= *n - *nb + 1 && *nb < *n || k < 1) {
+ goto L30;
+ }
+
+/* Copy column K of A to column KW of W and update it */
+
+ scopy_(&k, &a[k * a_dim1 + 1], &c__1, &w[kw * w_dim1 + 1], &c__1);
+ if (k < *n) {
+ i__1 = *n - k;
+ sgemv_("No transpose", &k, &i__1, &c_b8, &a[(k + 1) * a_dim1 + 1],
+ lda, &w[k + (kw + 1) * w_dim1], ldw, &c_b9, &w[kw *
+ w_dim1 + 1], &c__1);
+ }
+
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ absakk = (r__1 = w[k + kw * w_dim1], dabs(r__1));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ imax = isamax_(&i__1, &w[kw * w_dim1 + 1], &c__1);
+ colmax = (r__1 = w[imax + kw * w_dim1], dabs(r__1));
+ } else {
+ colmax = 0.f;
+ }
+
+ if (dmax(absakk,colmax) == 0.f) {
+
+/* Column K is zero: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* Copy column IMAX to column KW-1 of W and update it */
+
+ scopy_(&imax, &a[imax * a_dim1 + 1], &c__1, &w[(kw - 1) *
+ w_dim1 + 1], &c__1);
+ i__1 = k - imax;
+ scopy_(&i__1, &a[imax + (imax + 1) * a_dim1], lda, &w[imax +
+ 1 + (kw - 1) * w_dim1], &c__1);
+ if (k < *n) {
+ i__1 = *n - k;
+ sgemv_("No transpose", &k, &i__1, &c_b8, &a[(k + 1) *
+ a_dim1 + 1], lda, &w[imax + (kw + 1) * w_dim1],
+ ldw, &c_b9, &w[(kw - 1) * w_dim1 + 1], &c__1);
+ }
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ i__1 = k - imax;
+ jmax = imax + isamax_(&i__1, &w[imax + 1 + (kw - 1) * w_dim1],
+ &c__1);
+ rowmax = (r__1 = w[jmax + (kw - 1) * w_dim1], dabs(r__1));
+ if (imax > 1) {
+ i__1 = imax - 1;
+ jmax = isamax_(&i__1, &w[(kw - 1) * w_dim1 + 1], &c__1);
+/* Computing MAX */
+ r__2 = rowmax, r__3 = (r__1 = w[jmax + (kw - 1) * w_dim1],
+ dabs(r__1));
+ rowmax = dmax(r__2,r__3);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else if ((r__1 = w[imax + (kw - 1) * w_dim1], dabs(r__1)) >=
+ alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+
+/* copy column KW-1 of W to column KW */
+
+ scopy_(&k, &w[(kw - 1) * w_dim1 + 1], &c__1, &w[kw *
+ w_dim1 + 1], &c__1);
+ } else {
+
+/* interchange rows and columns K-1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+
+ kk = k - kstep + 1;
+ kkw = *nb + kk - *n;
+
+/* Updated column KP is already stored in column KKW of W */
+
+ if (kp != kk) {
+
+/* Copy non-updated column KK to column KP */
+
+ a[kp + k * a_dim1] = a[kk + k * a_dim1];
+ i__1 = k - 1 - kp;
+ scopy_(&i__1, &a[kp + 1 + kk * a_dim1], &c__1, &a[kp + (kp +
+ 1) * a_dim1], lda);
+ scopy_(&kp, &a[kk * a_dim1 + 1], &c__1, &a[kp * a_dim1 + 1], &
+ c__1);
+
+/* Interchange rows KK and KP in last KK columns of A and W */
+
+ i__1 = *n - kk + 1;
+ sswap_(&i__1, &a[kk + kk * a_dim1], lda, &a[kp + kk * a_dim1],
+ lda);
+ i__1 = *n - kk + 1;
+ sswap_(&i__1, &w[kk + kkw * w_dim1], ldw, &w[kp + kkw *
+ w_dim1], ldw);
+ }
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column KW of W now holds */
+
+/* W(k) = U(k)*D(k) */
+
+/* where U(k) is the k-th column of U */
+
+/* Store U(k) in column k of A */
+
+ scopy_(&k, &w[kw * w_dim1 + 1], &c__1, &a[k * a_dim1 + 1], &
+ c__1);
+ r1 = 1.f / a[k + k * a_dim1];
+ i__1 = k - 1;
+ sscal_(&i__1, &r1, &a[k * a_dim1 + 1], &c__1);
+ } else {
+
+/* 2-by-2 pivot block D(k): columns KW and KW-1 of W now */
+/* hold */
+
+/* ( W(k-1) W(k) ) = ( U(k-1) U(k) )*D(k) */
+
+/* where U(k) and U(k-1) are the k-th and (k-1)-th columns */
+/* of U */
+
+ if (k > 2) {
+
+/* Store U(k) and U(k-1) in columns k and k-1 of A */
+
+ d21 = w[k - 1 + kw * w_dim1];
+ d11 = w[k + kw * w_dim1] / d21;
+ d22 = w[k - 1 + (kw - 1) * w_dim1] / d21;
+ t = 1.f / (d11 * d22 - 1.f);
+ d21 = t / d21;
+ i__1 = k - 2;
+ for (j = 1; j <= i__1; ++j) {
+ a[j + (k - 1) * a_dim1] = d21 * (d11 * w[j + (kw - 1)
+ * w_dim1] - w[j + kw * w_dim1]);
+ a[j + k * a_dim1] = d21 * (d22 * w[j + kw * w_dim1] -
+ w[j + (kw - 1) * w_dim1]);
+/* L20: */
+ }
+ }
+
+/* Copy D(k) to A */
+
+ a[k - 1 + (k - 1) * a_dim1] = w[k - 1 + (kw - 1) * w_dim1];
+ a[k - 1 + k * a_dim1] = w[k - 1 + kw * w_dim1];
+ a[k + k * a_dim1] = w[k + kw * w_dim1];
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k - 1] = -kp;
+ }
+
+/* Decrease K and return to the start of the main loop */
+
+ k -= kstep;
+ goto L10;
+
+L30:
+
+/* Update the upper triangle of A11 (= A(1:k,1:k)) as */
+
+/* A11 := A11 - U12*D*U12' = A11 - U12*W' */
+
+/* computing blocks of NB columns at a time */
+
+ i__1 = -(*nb);
+ for (j = (k - 1) / *nb * *nb + 1; i__1 < 0 ? j >= 1 : j <= 1; j +=
+ i__1) {
+/* Computing MIN */
+ i__2 = *nb, i__3 = k - j + 1;
+ jb = min(i__2,i__3);
+
+/* Update the upper triangle of the diagonal block */
+
+ i__2 = j + jb - 1;
+ for (jj = j; jj <= i__2; ++jj) {
+ i__3 = jj - j + 1;
+ i__4 = *n - k;
+ sgemv_("No transpose", &i__3, &i__4, &c_b8, &a[j + (k + 1) *
+ a_dim1], lda, &w[jj + (kw + 1) * w_dim1], ldw, &c_b9,
+ &a[j + jj * a_dim1], &c__1);
+/* L40: */
+ }
+
+/* Update the rectangular superdiagonal block */
+
+ i__2 = j - 1;
+ i__3 = *n - k;
+ sgemm_("No transpose", "Transpose", &i__2, &jb, &i__3, &c_b8, &a[(
+ k + 1) * a_dim1 + 1], lda, &w[j + (kw + 1) * w_dim1], ldw,
+ &c_b9, &a[j * a_dim1 + 1], lda);
+/* L50: */
+ }
+
+/* Put U12 in standard form by partially undoing the interchanges */
+/* in columns k+1:n */
+
+ j = k + 1;
+L60:
+ jj = j;
+ jp = ipiv[j];
+ if (jp < 0) {
+ jp = -jp;
+ ++j;
+ }
+ ++j;
+ if (jp != jj && j <= *n) {
+ i__1 = *n - j + 1;
+ sswap_(&i__1, &a[jp + j * a_dim1], lda, &a[jj + j * a_dim1], lda);
+ }
+ if (j <= *n) {
+ goto L60;
+ }
+
+/* Set KB to the number of columns factorized */
+
+ *kb = *n - k;
+
+ } else {
+
+/* Factorize the leading columns of A using the lower triangle */
+/* of A and working forwards, and compute the matrix W = L21*D */
+/* for use in updating A22 */
+
+/* K is the main loop index, increasing from 1 in steps of 1 or 2 */
+
+ k = 1;
+L70:
+
+/* Exit from loop */
+
+ if (k >= *nb && *nb < *n || k > *n) {
+ goto L90;
+ }
+
+/* Copy column K of A to column K of W and update it */
+
+ i__1 = *n - k + 1;
+ scopy_(&i__1, &a[k + k * a_dim1], &c__1, &w[k + k * w_dim1], &c__1);
+ i__1 = *n - k + 1;
+ i__2 = k - 1;
+ sgemv_("No transpose", &i__1, &i__2, &c_b8, &a[k + a_dim1], lda, &w[k
+ + w_dim1], ldw, &c_b9, &w[k + k * w_dim1], &c__1);
+
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ absakk = (r__1 = w[k + k * w_dim1], dabs(r__1));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ imax = k + isamax_(&i__1, &w[k + 1 + k * w_dim1], &c__1);
+ colmax = (r__1 = w[imax + k * w_dim1], dabs(r__1));
+ } else {
+ colmax = 0.f;
+ }
+
+ if (dmax(absakk,colmax) == 0.f) {
+
+/* Column K is zero: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* Copy column IMAX to column K+1 of W and update it */
+
+ i__1 = imax - k;
+ scopy_(&i__1, &a[imax + k * a_dim1], lda, &w[k + (k + 1) *
+ w_dim1], &c__1);
+ i__1 = *n - imax + 1;
+ scopy_(&i__1, &a[imax + imax * a_dim1], &c__1, &w[imax + (k +
+ 1) * w_dim1], &c__1);
+ i__1 = *n - k + 1;
+ i__2 = k - 1;
+ sgemv_("No transpose", &i__1, &i__2, &c_b8, &a[k + a_dim1],
+ lda, &w[imax + w_dim1], ldw, &c_b9, &w[k + (k + 1) *
+ w_dim1], &c__1);
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ i__1 = imax - k;
+ jmax = k - 1 + isamax_(&i__1, &w[k + (k + 1) * w_dim1], &c__1)
+ ;
+ rowmax = (r__1 = w[jmax + (k + 1) * w_dim1], dabs(r__1));
+ if (imax < *n) {
+ i__1 = *n - imax;
+ jmax = imax + isamax_(&i__1, &w[imax + 1 + (k + 1) *
+ w_dim1], &c__1);
+/* Computing MAX */
+ r__2 = rowmax, r__3 = (r__1 = w[jmax + (k + 1) * w_dim1],
+ dabs(r__1));
+ rowmax = dmax(r__2,r__3);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else if ((r__1 = w[imax + (k + 1) * w_dim1], dabs(r__1)) >=
+ alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+
+/* copy column K+1 of W to column K */
+
+ i__1 = *n - k + 1;
+ scopy_(&i__1, &w[k + (k + 1) * w_dim1], &c__1, &w[k + k *
+ w_dim1], &c__1);
+ } else {
+
+/* interchange rows and columns K+1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+
+ kk = k + kstep - 1;
+
+/* Updated column KP is already stored in column KK of W */
+
+ if (kp != kk) {
+
+/* Copy non-updated column KK to column KP */
+
+ a[kp + k * a_dim1] = a[kk + k * a_dim1];
+ i__1 = kp - k - 1;
+ scopy_(&i__1, &a[k + 1 + kk * a_dim1], &c__1, &a[kp + (k + 1)
+ * a_dim1], lda);
+ i__1 = *n - kp + 1;
+ scopy_(&i__1, &a[kp + kk * a_dim1], &c__1, &a[kp + kp *
+ a_dim1], &c__1);
+
+/* Interchange rows KK and KP in first KK columns of A and W */
+
+ sswap_(&kk, &a[kk + a_dim1], lda, &a[kp + a_dim1], lda);
+ sswap_(&kk, &w[kk + w_dim1], ldw, &w[kp + w_dim1], ldw);
+ }
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k of W now holds */
+
+/* W(k) = L(k)*D(k) */
+
+/* where L(k) is the k-th column of L */
+
+/* Store L(k) in column k of A */
+
+ i__1 = *n - k + 1;
+ scopy_(&i__1, &w[k + k * w_dim1], &c__1, &a[k + k * a_dim1], &
+ c__1);
+ if (k < *n) {
+ r1 = 1.f / a[k + k * a_dim1];
+ i__1 = *n - k;
+ sscal_(&i__1, &r1, &a[k + 1 + k * a_dim1], &c__1);
+ }
+ } else {
+
+/* 2-by-2 pivot block D(k): columns k and k+1 of W now hold */
+
+/* ( W(k) W(k+1) ) = ( L(k) L(k+1) )*D(k) */
+
+/* where L(k) and L(k+1) are the k-th and (k+1)-th columns */
+/* of L */
+
+ if (k < *n - 1) {
+
+/* Store L(k) and L(k+1) in columns k and k+1 of A */
+
+ d21 = w[k + 1 + k * w_dim1];
+ d11 = w[k + 1 + (k + 1) * w_dim1] / d21;
+ d22 = w[k + k * w_dim1] / d21;
+ t = 1.f / (d11 * d22 - 1.f);
+ d21 = t / d21;
+ i__1 = *n;
+ for (j = k + 2; j <= i__1; ++j) {
+ a[j + k * a_dim1] = d21 * (d11 * w[j + k * w_dim1] -
+ w[j + (k + 1) * w_dim1]);
+ a[j + (k + 1) * a_dim1] = d21 * (d22 * w[j + (k + 1) *
+ w_dim1] - w[j + k * w_dim1]);
+/* L80: */
+ }
+ }
+
+/* Copy D(k) to A */
+
+ a[k + k * a_dim1] = w[k + k * w_dim1];
+ a[k + 1 + k * a_dim1] = w[k + 1 + k * w_dim1];
+ a[k + 1 + (k + 1) * a_dim1] = w[k + 1 + (k + 1) * w_dim1];
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k + 1] = -kp;
+ }
+
+/* Increase K and return to the start of the main loop */
+
+ k += kstep;
+ goto L70;
+
+L90:
+
+/* Update the lower triangle of A22 (= A(k:n,k:n)) as */
+
+/* A22 := A22 - L21*D*L21' = A22 - L21*W' */
+
+/* computing blocks of NB columns at a time */
+
+ i__1 = *n;
+ i__2 = *nb;
+ for (j = k; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = *nb, i__4 = *n - j + 1;
+ jb = min(i__3,i__4);
+
+/* Update the lower triangle of the diagonal block */
+
+ i__3 = j + jb - 1;
+ for (jj = j; jj <= i__3; ++jj) {
+ i__4 = j + jb - jj;
+ i__5 = k - 1;
+ sgemv_("No transpose", &i__4, &i__5, &c_b8, &a[jj + a_dim1],
+ lda, &w[jj + w_dim1], ldw, &c_b9, &a[jj + jj * a_dim1]
+, &c__1);
+/* L100: */
+ }
+
+/* Update the rectangular subdiagonal block */
+
+ if (j + jb <= *n) {
+ i__3 = *n - j - jb + 1;
+ i__4 = k - 1;
+ sgemm_("No transpose", "Transpose", &i__3, &jb, &i__4, &c_b8,
+ &a[j + jb + a_dim1], lda, &w[j + w_dim1], ldw, &c_b9,
+ &a[j + jb + j * a_dim1], lda);
+ }
+/* L110: */
+ }
+
+/* Put L21 in standard form by partially undoing the interchanges */
+/* in columns 1:k-1 */
+
+ j = k - 1;
+L120:
+ jj = j;
+ jp = ipiv[j];
+ if (jp < 0) {
+ jp = -jp;
+ --j;
+ }
+ --j;
+ if (jp != jj && j >= 1) {
+ sswap_(&j, &a[jp + a_dim1], lda, &a[jj + a_dim1], lda);
+ }
+ if (j >= 1) {
+ goto L120;
+ }
+
+/* Set KB to the number of columns factorized */
+
+ *kb = k - 1;
+
+ }
+ return 0;
+
+/* End of SLASYF */
+
+} /* slasyf_ */
diff --git a/contrib/libs/clapack/slatbs.c b/contrib/libs/clapack/slatbs.c
new file mode 100644
index 0000000000..9a3e867a71
--- /dev/null
+++ b/contrib/libs/clapack/slatbs.c
@@ -0,0 +1,849 @@
+/* slatbs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b36 = .5f;
+
+/* Subroutine */ int slatbs_(char *uplo, char *trans, char *diag, char *
+ normin, integer *n, integer *kd, real *ab, integer *ldab, real *x,
+ real *scale, real *cnorm, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+ real r__1, r__2, r__3;
+
+ /* Local variables */
+ integer i__, j;
+ real xj, rec, tjj;
+ integer jinc, jlen;
+ real xbnd;
+ integer imax;
+ real tmax, tjjs;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ real xmax, grow, sumj;
+ integer maind;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ real tscal, uscal;
+ integer jlast;
+ extern doublereal sasum_(integer *, real *, integer *);
+ logical upper;
+ extern /* Subroutine */ int stbsv_(char *, char *, char *, integer *,
+ integer *, real *, integer *, real *, integer *), saxpy_(integer *, real *, real *, integer *, real *,
+ integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ extern integer isamax_(integer *, real *, integer *);
+ logical notran;
+ integer jfirst;
+ real smlnum;
+ logical nounit;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLATBS solves one of the triangular systems */
+
+/* A *x = s*b or A'*x = s*b */
+
+/* with scaling to prevent overflow, where A is an upper or lower */
+/* triangular band matrix. Here A' denotes the transpose of A, x and b */
+/* are n-element vectors, and s is a scaling factor, usually less than */
+/* or equal to 1, chosen so that the components of x will be less than */
+/* the overflow threshold. If the unscaled problem will not cause */
+/* overflow, the Level 2 BLAS routine STBSV is called. If the matrix A */
+/* is singular (A(j,j) = 0 for some j), then s is set to 0 and a */
+/* non-trivial solution to A*x = 0 is returned. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower triangular. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the operation applied to A. */
+/* = 'N': Solve A * x = s*b (No transpose) */
+/* = 'T': Solve A'* x = s*b (Transpose) */
+/* = 'C': Solve A'* x = s*b (Conjugate transpose = Transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A is unit triangular. */
+/* = 'N': Non-unit triangular */
+/* = 'U': Unit triangular */
+
+/* NORMIN (input) CHARACTER*1 */
+/* Specifies whether CNORM has been set or not. */
+/* = 'Y': CNORM contains the column norms on entry */
+/* = 'N': CNORM is not set on entry. On exit, the norms will */
+/* be computed and stored in CNORM. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of subdiagonals or superdiagonals in the */
+/* triangular matrix A. KD >= 0. */
+
+/* AB (input) REAL array, dimension (LDAB,N) */
+/* The upper or lower triangular band matrix A, stored in the */
+/* first KD+1 rows of the array. The j-th column of A is stored */
+/* in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* X (input/output) REAL array, dimension (N) */
+/* On entry, the right hand side b of the triangular system. */
+/* On exit, X is overwritten by the solution vector x. */
+
+/* SCALE (output) REAL */
+/* The scaling factor s for the triangular system */
+/* A * x = s*b or A'* x = s*b. */
+/* If SCALE = 0, the matrix A is singular or badly scaled, and */
+/* the vector x is an exact or approximate solution to A*x = 0. */
+
+/* CNORM (input or output) REAL array, dimension (N) */
+
+/* If NORMIN = 'Y', CNORM is an input argument and CNORM(j) */
+/* contains the norm of the off-diagonal part of the j-th column */
+/* of A. If TRANS = 'N', CNORM(j) must be greater than or equal */
+/* to the infinity-norm, and if TRANS = 'T' or 'C', CNORM(j) */
+/* must be greater than or equal to the 1-norm. */
+
+/* If NORMIN = 'N', CNORM is an output argument and CNORM(j) */
+/* returns the 1-norm of the offdiagonal part of the j-th column */
+/* of A. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* Further Details */
+/* ======= ======= */
+
+/* A rough bound on x is computed; if that is less than overflow, STBSV */
+/* is called, otherwise, specific code is used which checks for possible */
+/* overflow or divide-by-zero at every operation. */
+
+/* A columnwise scheme is used for solving A*x = b. The basic algorithm */
+/* if A is lower triangular is */
+
+/* x[1:n] := b[1:n] */
+/* for j = 1, ..., n */
+/* x(j) := x(j) / A(j,j) */
+/* x[j+1:n] := x[j+1:n] - x(j) * A[j+1:n,j] */
+/* end */
+
+/* Define bounds on the components of x after j iterations of the loop: */
+/* M(j) = bound on x[1:j] */
+/* G(j) = bound on x[j+1:n] */
+/* Initially, let M(0) = 0 and G(0) = max{x(i), i=1,...,n}. */
+
+/* Then for iteration j+1 we have */
+/* M(j+1) <= G(j) / | A(j+1,j+1) | */
+/* G(j+1) <= G(j) + M(j+1) * | A[j+2:n,j+1] | */
+/* <= G(j) ( 1 + CNORM(j+1) / | A(j+1,j+1) | ) */
+
+/* where CNORM(j+1) is greater than or equal to the infinity-norm of */
+/* column j+1 of A, not counting the diagonal. Hence */
+
+/* G(j) <= G(0) product ( 1 + CNORM(i) / | A(i,i) | ) */
+/* 1<=i<=j */
+/* and */
+
+/* |x(j)| <= ( G(0) / |A(j,j)| ) product ( 1 + CNORM(i) / |A(i,i)| ) */
+/* 1<=i< j */
+
+/* Since |x(j)| <= M(j), we use the Level 2 BLAS routine STBSV if the */
+/* reciprocal of the largest M(j), j=1,..,n, is larger than */
+/* max(underflow, 1/overflow). */
+
+/* The bound on x(j) is also used to determine when a step in the */
+/* columnwise method can be performed without fear of overflow. If */
+/* the computed bound is greater than a large constant, x is scaled to */
+/* prevent overflow, but if the bound overflows, x is set to 0, x(j) to */
+/* 1, and scale to 0, and a non-trivial solution to A*x = 0 is found. */
+
+/* Similarly, a row-wise scheme is used to solve A'*x = b. The basic */
+/* algorithm for A upper triangular is */
+
+/* for j = 1, ..., n */
+/* x(j) := ( b(j) - A[1:j-1,j]' * x[1:j-1] ) / A(j,j) */
+/* end */
+
+/* We simultaneously compute two bounds */
+/* G(j) = bound on ( b(i) - A[1:i-1,i]' * x[1:i-1] ), 1<=i<=j */
+/* M(j) = bound on x(i), 1<=i<=j */
+
+/* The initial values are G(0) = 0, M(0) = max{b(i), i=1,..,n}, and we */
+/* add the constraint G(j) >= G(j-1) and M(j) >= M(j-1) for j >= 1. */
+/* Then the bound on x(j) is */
+
+/* M(j) <= M(j-1) * ( 1 + CNORM(j) ) / | A(j,j) | */
+
+/* <= M(0) * product ( ( 1 + CNORM(i) ) / |A(i,i)| ) */
+/* 1<=i<=j */
+
+/* and we can safely call STBSV if 1/M(n) and 1/G(n) are both greater */
+/* than max(underflow, 1/overflow). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --x;
+ --cnorm;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+/* Test the input parameters. */
+
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (! lsame_(normin, "Y") && ! lsame_(normin,
+ "N")) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*kd < 0) {
+ *info = -6;
+ } else if (*ldab < *kd + 1) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLATBS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine machine dependent parameters to control overflow. */
+
+ smlnum = slamch_("Safe minimum") / slamch_("Precision");
+ bignum = 1.f / smlnum;
+ *scale = 1.f;
+
+ if (lsame_(normin, "N")) {
+
+/* Compute the 1-norm of each column, not including the diagonal. */
+
+ if (upper) {
+
+/* A is upper triangular. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = *kd, i__3 = j - 1;
+ jlen = min(i__2,i__3);
+ cnorm[j] = sasum_(&jlen, &ab[*kd + 1 - jlen + j * ab_dim1], &
+ c__1);
+/* L10: */
+ }
+ } else {
+
+/* A is lower triangular. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = *kd, i__3 = *n - j;
+ jlen = min(i__2,i__3);
+ if (jlen > 0) {
+ cnorm[j] = sasum_(&jlen, &ab[j * ab_dim1 + 2], &c__1);
+ } else {
+ cnorm[j] = 0.f;
+ }
+/* L20: */
+ }
+ }
+ }
+
+/* Scale the column norms by TSCAL if the maximum element in CNORM is */
+/* greater than BIGNUM. */
+
+ imax = isamax_(n, &cnorm[1], &c__1);
+ tmax = cnorm[imax];
+ if (tmax <= bignum) {
+ tscal = 1.f;
+ } else {
+ tscal = 1.f / (smlnum * tmax);
+ sscal_(n, &tscal, &cnorm[1], &c__1);
+ }
+
+/* Compute a bound on the computed solution vector to see if the */
+/* Level 2 BLAS routine STBSV can be used. */
+
+ j = isamax_(n, &x[1], &c__1);
+ xmax = (r__1 = x[j], dabs(r__1));
+ xbnd = xmax;
+ if (notran) {
+
+/* Compute the growth in A * x = b. */
+
+ if (upper) {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ maind = *kd + 1;
+ } else {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ maind = 1;
+ }
+
+ if (tscal != 1.f) {
+ grow = 0.f;
+ goto L50;
+ }
+
+ if (nounit) {
+
+/* A is non-unit triangular. */
+
+/* Compute GROW = 1/G(j) and XBND = 1/M(j). */
+/* Initially, G(0) = max{x(i), i=1,...,n}. */
+
+ grow = 1.f / dmax(xbnd,smlnum);
+ xbnd = grow;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L50;
+ }
+
+/* M(j) = G(j-1) / abs(A(j,j)) */
+
+ tjj = (r__1 = ab[maind + j * ab_dim1], dabs(r__1));
+/* Computing MIN */
+ r__1 = xbnd, r__2 = dmin(1.f,tjj) * grow;
+ xbnd = dmin(r__1,r__2);
+ if (tjj + cnorm[j] >= smlnum) {
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) / abs(A(j,j)) ) */
+
+ grow *= tjj / (tjj + cnorm[j]);
+ } else {
+
+/* G(j) could overflow, set GROW to 0. */
+
+ grow = 0.f;
+ }
+/* L30: */
+ }
+ grow = xbnd;
+ } else {
+
+/* A is unit triangular. */
+
+/* Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. */
+
+/* Computing MIN */
+ r__1 = 1.f, r__2 = 1.f / dmax(xbnd,smlnum);
+ grow = dmin(r__1,r__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L50;
+ }
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) ) */
+
+ grow *= 1.f / (cnorm[j] + 1.f);
+/* L40: */
+ }
+ }
+L50:
+
+ ;
+ } else {
+
+/* Compute the growth in A' * x = b. */
+
+ if (upper) {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ maind = *kd + 1;
+ } else {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ maind = 1;
+ }
+
+ if (tscal != 1.f) {
+ grow = 0.f;
+ goto L80;
+ }
+
+ if (nounit) {
+
+/* A is non-unit triangular. */
+
+/* Compute GROW = 1/G(j) and XBND = 1/M(j). */
+/* Initially, M(0) = max{x(i), i=1,...,n}. */
+
+ grow = 1.f / dmax(xbnd,smlnum);
+ xbnd = grow;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L80;
+ }
+
+/* G(j) = max( G(j-1), M(j-1)*( 1 + CNORM(j) ) ) */
+
+ xj = cnorm[j] + 1.f;
+/* Computing MIN */
+ r__1 = grow, r__2 = xbnd / xj;
+ grow = dmin(r__1,r__2);
+
+/* M(j) = M(j-1)*( 1 + CNORM(j) ) / abs(A(j,j)) */
+
+ tjj = (r__1 = ab[maind + j * ab_dim1], dabs(r__1));
+ if (xj > tjj) {
+ xbnd *= tjj / xj;
+ }
+/* L60: */
+ }
+ grow = dmin(grow,xbnd);
+ } else {
+
+/* A is unit triangular. */
+
+/* Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. */
+
+/* Computing MIN */
+ r__1 = 1.f, r__2 = 1.f / dmax(xbnd,smlnum);
+ grow = dmin(r__1,r__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L80;
+ }
+
+/* G(j) = ( 1 + CNORM(j) )*G(j-1) */
+
+ xj = cnorm[j] + 1.f;
+ grow /= xj;
+/* L70: */
+ }
+ }
+L80:
+ ;
+ }
+
+ if (grow * tscal > smlnum) {
+
+/* Use the Level 2 BLAS solve if the reciprocal of the bound on */
+/* elements of X is not too small. */
+
+ stbsv_(uplo, trans, diag, n, kd, &ab[ab_offset], ldab, &x[1], &c__1);
+ } else {
+
+/* Use a Level 1 BLAS solve, scaling intermediate results. */
+
+ if (xmax > bignum) {
+
+/* Scale X so that its components are less than or equal to */
+/* BIGNUM in absolute value. */
+
+ *scale = bignum / xmax;
+ sscal_(n, scale, &x[1], &c__1);
+ xmax = bignum;
+ }
+
+ if (notran) {
+
+/* Solve A * x = b */
+
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Compute x(j) = b(j) / A(j,j), scaling x if necessary. */
+
+ xj = (r__1 = x[j], dabs(r__1));
+ if (nounit) {
+ tjjs = ab[maind + j * ab_dim1] * tscal;
+ } else {
+ tjjs = tscal;
+ if (tscal == 1.f) {
+ goto L95;
+ }
+ }
+ tjj = dabs(tjjs);
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.f) {
+ if (xj > tjj * bignum) {
+
+/* Scale x by 1/b(j). */
+
+ rec = 1.f / xj;
+ sscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ x[j] /= tjjs;
+ xj = (r__1 = x[j], dabs(r__1));
+ } else if (tjj > 0.f) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM */
+/* to avoid overflow when dividing by A(j,j). */
+
+ rec = tjj * bignum / xj;
+ if (cnorm[j] > 1.f) {
+
+/* Scale by 1/CNORM(j) to avoid overflow when */
+/* multiplying x(j) times column j. */
+
+ rec /= cnorm[j];
+ }
+ sscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ x[j] /= tjjs;
+ xj = (r__1 = x[j], dabs(r__1));
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0, and compute a solution to A*x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ x[i__] = 0.f;
+/* L90: */
+ }
+ x[j] = 1.f;
+ xj = 1.f;
+ *scale = 0.f;
+ xmax = 0.f;
+ }
+L95:
+
+/* Scale x if necessary to avoid overflow when adding a */
+/* multiple of column j of A. */
+
+ if (xj > 1.f) {
+ rec = 1.f / xj;
+ if (cnorm[j] > (bignum - xmax) * rec) {
+
+/* Scale x by 1/(2*abs(x(j))). */
+
+ rec *= .5f;
+ sscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ }
+ } else if (xj * cnorm[j] > bignum - xmax) {
+
+/* Scale x by 1/2. */
+
+ sscal_(n, &c_b36, &x[1], &c__1);
+ *scale *= .5f;
+ }
+
+ if (upper) {
+ if (j > 1) {
+
+/* Compute the update */
+/* x(max(1,j-kd):j-1) := x(max(1,j-kd):j-1) - */
+/* x(j)* A(max(1,j-kd):j-1,j) */
+
+/* Computing MIN */
+ i__3 = *kd, i__4 = j - 1;
+ jlen = min(i__3,i__4);
+ r__1 = -x[j] * tscal;
+ saxpy_(&jlen, &r__1, &ab[*kd + 1 - jlen + j * ab_dim1]
+, &c__1, &x[j - jlen], &c__1);
+ i__3 = j - 1;
+ i__ = isamax_(&i__3, &x[1], &c__1);
+ xmax = (r__1 = x[i__], dabs(r__1));
+ }
+ } else if (j < *n) {
+
+/* Compute the update */
+/* x(j+1:min(j+kd,n)) := x(j+1:min(j+kd,n)) - */
+/* x(j) * A(j+1:min(j+kd,n),j) */
+
+/* Computing MIN */
+ i__3 = *kd, i__4 = *n - j;
+ jlen = min(i__3,i__4);
+ if (jlen > 0) {
+ r__1 = -x[j] * tscal;
+ saxpy_(&jlen, &r__1, &ab[j * ab_dim1 + 2], &c__1, &x[
+ j + 1], &c__1);
+ }
+ i__3 = *n - j;
+ i__ = j + isamax_(&i__3, &x[j + 1], &c__1);
+ xmax = (r__1 = x[i__], dabs(r__1));
+ }
+/* L100: */
+ }
+
+ } else {
+
+/* Solve A' * x = b */
+
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Compute x(j) = b(j) - sum A(k,j)*x(k). */
+/* k<>j */
+
+ xj = (r__1 = x[j], dabs(r__1));
+ uscal = tscal;
+ rec = 1.f / dmax(xmax,1.f);
+ if (cnorm[j] > (bignum - xj) * rec) {
+
+/* If x(j) could overflow, scale x by 1/(2*XMAX). */
+
+ rec *= .5f;
+ if (nounit) {
+ tjjs = ab[maind + j * ab_dim1] * tscal;
+ } else {
+ tjjs = tscal;
+ }
+ tjj = dabs(tjjs);
+ if (tjj > 1.f) {
+
+/* Divide by A(j,j) when scaling x if A(j,j) > 1. */
+
+/* Computing MIN */
+ r__1 = 1.f, r__2 = rec * tjj;
+ rec = dmin(r__1,r__2);
+ uscal /= tjjs;
+ }
+ if (rec < 1.f) {
+ sscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ sumj = 0.f;
+ if (uscal == 1.f) {
+
+/* If the scaling needed for A in the dot product is 1, */
+/* call SDOT to perform the dot product. */
+
+ if (upper) {
+/* Computing MIN */
+ i__3 = *kd, i__4 = j - 1;
+ jlen = min(i__3,i__4);
+ sumj = sdot_(&jlen, &ab[*kd + 1 - jlen + j * ab_dim1],
+ &c__1, &x[j - jlen], &c__1);
+ } else {
+/* Computing MIN */
+ i__3 = *kd, i__4 = *n - j;
+ jlen = min(i__3,i__4);
+ if (jlen > 0) {
+ sumj = sdot_(&jlen, &ab[j * ab_dim1 + 2], &c__1, &
+ x[j + 1], &c__1);
+ }
+ }
+ } else {
+
+/* Otherwise, use in-line code for the dot product. */
+
+ if (upper) {
+/* Computing MIN */
+ i__3 = *kd, i__4 = j - 1;
+ jlen = min(i__3,i__4);
+ i__3 = jlen;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ sumj += ab[*kd + i__ - jlen + j * ab_dim1] *
+ uscal * x[j - jlen - 1 + i__];
+/* L110: */
+ }
+ } else {
+/* Computing MIN */
+ i__3 = *kd, i__4 = *n - j;
+ jlen = min(i__3,i__4);
+ i__3 = jlen;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ sumj += ab[i__ + 1 + j * ab_dim1] * uscal * x[j +
+ i__];
+/* L120: */
+ }
+ }
+ }
+
+ if (uscal == tscal) {
+
+/* Compute x(j) := ( x(j) - sumj ) / A(j,j) if 1/A(j,j) */
+/* was not used to scale the dotproduct. */
+
+ x[j] -= sumj;
+ xj = (r__1 = x[j], dabs(r__1));
+ if (nounit) {
+
+/* Compute x(j) = x(j) / A(j,j), scaling if necessary. */
+
+ tjjs = ab[maind + j * ab_dim1] * tscal;
+ } else {
+ tjjs = tscal;
+ if (tscal == 1.f) {
+ goto L135;
+ }
+ }
+ tjj = dabs(tjjs);
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.f) {
+ if (xj > tjj * bignum) {
+
+/* Scale X by 1/abs(x(j)). */
+
+ rec = 1.f / xj;
+ sscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ x[j] /= tjjs;
+ } else if (tjj > 0.f) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */
+
+ rec = tjj * bignum / xj;
+ sscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ x[j] /= tjjs;
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0, and compute a solution to A'*x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ x[i__] = 0.f;
+/* L130: */
+ }
+ x[j] = 1.f;
+ *scale = 0.f;
+ xmax = 0.f;
+ }
+L135:
+ ;
+ } else {
+
+/* Compute x(j) := x(j) / A(j,j) - sumj if the dot */
+/* product has already been divided by 1/A(j,j). */
+
+ x[j] = x[j] / tjjs - sumj;
+ }
+/* Computing MAX */
+ r__2 = xmax, r__3 = (r__1 = x[j], dabs(r__1));
+ xmax = dmax(r__2,r__3);
+/* L140: */
+ }
+ }
+ *scale /= tscal;
+ }
+
+/* Scale the column norms by 1/TSCAL for return. */
+
+ if (tscal != 1.f) {
+ r__1 = 1.f / tscal;
+ sscal_(n, &r__1, &cnorm[1], &c__1);
+ }
+
+ return 0;
+
+/* End of SLATBS */
+
+} /* slatbs_ */
diff --git a/contrib/libs/clapack/slatdf.c b/contrib/libs/clapack/slatdf.c
new file mode 100644
index 0000000000..3f9bbb062c
--- /dev/null
+++ b/contrib/libs/clapack/slatdf.c
@@ -0,0 +1,301 @@
+/* slatdf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static real c_b23 = 1.f;
+static real c_b37 = -1.f;
+
+/* Subroutine */ int slatdf_(integer *ijob, integer *n, real *z__, integer *
+ ldz, real *rhs, real *rdsum, real *rdscal, integer *ipiv, integer *
+ jpiv)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k;
+ real bm, bp, xm[8], xp[8];
+ integer info;
+ real temp;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ real work[32];
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ real pmone;
+ extern doublereal sasum_(integer *, real *, integer *);
+ real sminu;
+ integer iwork[8];
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), saxpy_(integer *, real *, real *, integer *, real *,
+ integer *);
+ real splus;
+ extern /* Subroutine */ int sgesc2_(integer *, real *, integer *, real *,
+ integer *, integer *, real *), sgecon_(char *, integer *, real *,
+ integer *, real *, real *, real *, integer *, integer *),
+ slassq_(integer *, real *, integer *, real *, real *), slaswp_(
+ integer *, real *, integer *, integer *, integer *, integer *,
+ integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLATDF uses the LU factorization of the n-by-n matrix Z computed by */
+/* SGETC2 and computes a contribution to the reciprocal Dif-estimate */
+/* by solving Z * x = b for x, and choosing the r.h.s. b such that */
+/* the norm of x is as large as possible. On entry RHS = b holds the */
+/* contribution from earlier solved sub-systems, and on return RHS = x. */
+
+/* The factorization of Z returned by SGETC2 has the form Z = P*L*U*Q, */
+/* where P and Q are permutation matrices. L is lower triangular with */
+/* unit diagonal elements and U is upper triangular. */
+
+/* Arguments */
+/* ========= */
+
+/* IJOB (input) INTEGER */
+/* IJOB = 2: First compute an approximative null-vector e */
+/* of Z using SGECON, e is normalized and solve for */
+/* Zx = +-e - f with the sign giving the greater value */
+/* of 2-norm(x). About 5 times as expensive as Default. */
+/* IJOB .ne. 2: Local look ahead strategy where all entries of */
+/* the r.h.s. b is choosen as either +1 or -1 (Default). */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Z. */
+
+/* Z (input) REAL array, dimension (LDZ, N) */
+/* On entry, the LU part of the factorization of the n-by-n */
+/* matrix Z computed by SGETC2: Z = P * L * U * Q */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDA >= max(1, N). */
+
+/* RHS (input/output) REAL array, dimension N. */
+/* On entry, RHS contains contributions from other subsystems. */
+/* On exit, RHS contains the solution of the subsystem with */
+/* entries acoording to the value of IJOB (see above). */
+
+/* RDSUM (input/output) REAL */
+/* On entry, the sum of squares of computed contributions to */
+/* the Dif-estimate under computation by STGSYL, where the */
+/* scaling factor RDSCAL (see below) has been factored out. */
+/* On exit, the corresponding sum of squares updated with the */
+/* contributions from the current sub-system. */
+/* If TRANS = 'T' RDSUM is not touched. */
+/* NOTE: RDSUM only makes sense when STGSY2 is called by STGSYL. */
+
+/* RDSCAL (input/output) REAL */
+/* On entry, scaling factor used to prevent overflow in RDSUM. */
+/* On exit, RDSCAL is updated w.r.t. the current contributions */
+/* in RDSUM. */
+/* If TRANS = 'T', RDSCAL is not touched. */
+/* NOTE: RDSCAL only makes sense when STGSY2 is called by */
+/* STGSYL. */
+
+/* IPIV (input) INTEGER array, dimension (N). */
+/* The pivot indices; for 1 <= i <= N, row i of the */
+/* matrix has been interchanged with row IPIV(i). */
+
+/* JPIV (input) INTEGER array, dimension (N). */
+/* The pivot indices; for 1 <= j <= N, column j of the */
+/* matrix has been interchanged with column JPIV(j). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* This routine is a further developed implementation of algorithm */
+/* BSOLVE in [1] using complete pivoting in the LU factorization. */
+
+/* [1] Bo Kagstrom and Lars Westin, */
+/* Generalized Schur Methods with Condition Estimators for */
+/* Solving the Generalized Sylvester Equation, IEEE Transactions */
+/* on Automatic Control, Vol. 34, No. 7, July 1989, pp 745-751. */
+
+/* [2] Peter Poromaa, */
+/* On Efficient and Robust Estimators for the Separation */
+/* between two Regular Matrix Pairs with Applications in */
+/* Condition Estimation. Report IMINF-95.05, Departement of */
+/* Computing Science, Umea University, S-901 87 Umea, Sweden, 1995. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --rhs;
+ --ipiv;
+ --jpiv;
+
+ /* Function Body */
+ if (*ijob != 2) {
+
+/* Apply permutations IPIV to RHS */
+
+ i__1 = *n - 1;
+ slaswp_(&c__1, &rhs[1], ldz, &c__1, &i__1, &ipiv[1], &c__1);
+
+/* Solve for L-part choosing RHS either to +1 or -1. */
+
+ pmone = -1.f;
+
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ bp = rhs[j] + 1.f;
+ bm = rhs[j] - 1.f;
+ splus = 1.f;
+
+/* Look-ahead for L-part RHS(1:N-1) = + or -1, SPLUS and */
+/* SMIN computed more efficiently than in BSOLVE [1]. */
+
+ i__2 = *n - j;
+ splus += sdot_(&i__2, &z__[j + 1 + j * z_dim1], &c__1, &z__[j + 1
+ + j * z_dim1], &c__1);
+ i__2 = *n - j;
+ sminu = sdot_(&i__2, &z__[j + 1 + j * z_dim1], &c__1, &rhs[j + 1],
+ &c__1);
+ splus *= rhs[j];
+ if (splus > sminu) {
+ rhs[j] = bp;
+ } else if (sminu > splus) {
+ rhs[j] = bm;
+ } else {
+
+/* In this case the updating sums are equal and we can */
+/* choose RHS(J) +1 or -1. The first time this happens */
+/* we choose -1, thereafter +1. This is a simple way to */
+/* get good estimates of matrices like Byers well-known */
+/* example (see [1]). (Not done in BSOLVE.) */
+
+ rhs[j] += pmone;
+ pmone = 1.f;
+ }
+
+/* Compute the remaining r.h.s. */
+
+ temp = -rhs[j];
+ i__2 = *n - j;
+ saxpy_(&i__2, &temp, &z__[j + 1 + j * z_dim1], &c__1, &rhs[j + 1],
+ &c__1);
+
+/* L10: */
+ }
+
+/* Solve for U-part, look-ahead for RHS(N) = +-1. This is not done */
+/* in BSOLVE and will hopefully give us a better estimate because */
+/* any ill-conditioning of the original matrix is transfered to U */
+/* and not to L. U(N, N) is an approximation to sigma_min(LU). */
+
+ i__1 = *n - 1;
+ scopy_(&i__1, &rhs[1], &c__1, xp, &c__1);
+ xp[*n - 1] = rhs[*n] + 1.f;
+ rhs[*n] += -1.f;
+ splus = 0.f;
+ sminu = 0.f;
+ for (i__ = *n; i__ >= 1; --i__) {
+ temp = 1.f / z__[i__ + i__ * z_dim1];
+ xp[i__ - 1] *= temp;
+ rhs[i__] *= temp;
+ i__1 = *n;
+ for (k = i__ + 1; k <= i__1; ++k) {
+ xp[i__ - 1] -= xp[k - 1] * (z__[i__ + k * z_dim1] * temp);
+ rhs[i__] -= rhs[k] * (z__[i__ + k * z_dim1] * temp);
+/* L20: */
+ }
+ splus += (r__1 = xp[i__ - 1], dabs(r__1));
+ sminu += (r__1 = rhs[i__], dabs(r__1));
+/* L30: */
+ }
+ if (splus > sminu) {
+ scopy_(n, xp, &c__1, &rhs[1], &c__1);
+ }
+
+/* Apply the permutations JPIV to the computed solution (RHS) */
+
+ i__1 = *n - 1;
+ slaswp_(&c__1, &rhs[1], ldz, &c__1, &i__1, &jpiv[1], &c_n1);
+
+/* Compute the sum of squares */
+
+ slassq_(n, &rhs[1], &c__1, rdscal, rdsum);
+
+ } else {
+
+/* IJOB = 2, Compute approximate nullvector XM of Z */
+
+ sgecon_("I", n, &z__[z_offset], ldz, &c_b23, &temp, work, iwork, &
+ info);
+ scopy_(n, &work[*n], &c__1, xm, &c__1);
+
+/* Compute RHS */
+
+ i__1 = *n - 1;
+ slaswp_(&c__1, xm, ldz, &c__1, &i__1, &ipiv[1], &c_n1);
+ temp = 1.f / sqrt(sdot_(n, xm, &c__1, xm, &c__1));
+ sscal_(n, &temp, xm, &c__1);
+ scopy_(n, xm, &c__1, xp, &c__1);
+ saxpy_(n, &c_b23, &rhs[1], &c__1, xp, &c__1);
+ saxpy_(n, &c_b37, xm, &c__1, &rhs[1], &c__1);
+ sgesc2_(n, &z__[z_offset], ldz, &rhs[1], &ipiv[1], &jpiv[1], &temp);
+ sgesc2_(n, &z__[z_offset], ldz, xp, &ipiv[1], &jpiv[1], &temp);
+ if (sasum_(n, xp, &c__1) > sasum_(n, &rhs[1], &c__1)) {
+ scopy_(n, xp, &c__1, &rhs[1], &c__1);
+ }
+
+/* Compute the sum of squares */
+
+ slassq_(n, &rhs[1], &c__1, rdscal, rdsum);
+
+ }
+
+ return 0;
+
+/* End of SLATDF */
+
+} /* slatdf_ */
diff --git a/contrib/libs/clapack/slatps.c b/contrib/libs/clapack/slatps.c
new file mode 100644
index 0000000000..6ce1742fe3
--- /dev/null
+++ b/contrib/libs/clapack/slatps.c
@@ -0,0 +1,822 @@
+/* slatps.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b36 = .5f;
+
+/* Subroutine */ int slatps_(char *uplo, char *trans, char *diag, char *
+ normin, integer *n, real *ap, real *x, real *scale, real *cnorm,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ real r__1, r__2, r__3;
+
+ /* Local variables */
+ integer i__, j, ip;
+ real xj, rec, tjj;
+ integer jinc, jlen;
+ real xbnd;
+ integer imax;
+ real tmax, tjjs;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ real xmax, grow, sumj;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ real tscal, uscal;
+ integer jlast;
+ extern doublereal sasum_(integer *, real *, integer *);
+ logical upper;
+ extern /* Subroutine */ int saxpy_(integer *, real *, real *, integer *,
+ real *, integer *), stpsv_(char *, char *, char *, integer *,
+ real *, real *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ extern integer isamax_(integer *, real *, integer *);
+ logical notran;
+ integer jfirst;
+ real smlnum;
+ logical nounit;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLATPS solves one of the triangular systems */
+
+/* A *x = s*b or A'*x = s*b */
+
+/* with scaling to prevent overflow, where A is an upper or lower */
+/* triangular matrix stored in packed form. Here A' denotes the */
+/* transpose of A, x and b are n-element vectors, and s is a scaling */
+/* factor, usually less than or equal to 1, chosen so that the */
+/* components of x will be less than the overflow threshold. If the */
+/* unscaled problem will not cause overflow, the Level 2 BLAS routine */
+/* STPSV is called. If the matrix A is singular (A(j,j) = 0 for some j), */
+/* then s is set to 0 and a non-trivial solution to A*x = 0 is returned. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower triangular. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the operation applied to A. */
+/* = 'N': Solve A * x = s*b (No transpose) */
+/* = 'T': Solve A'* x = s*b (Transpose) */
+/* = 'C': Solve A'* x = s*b (Conjugate transpose = Transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A is unit triangular. */
+/* = 'N': Non-unit triangular */
+/* = 'U': Unit triangular */
+
+/* NORMIN (input) CHARACTER*1 */
+/* Specifies whether CNORM has been set or not. */
+/* = 'Y': CNORM contains the column norms on entry */
+/* = 'N': CNORM is not set on entry. On exit, the norms will */
+/* be computed and stored in CNORM. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) REAL array, dimension (N*(N+1)/2) */
+/* The upper or lower triangular matrix A, packed columnwise in */
+/* a linear array. The j-th column of A is stored in the array */
+/* AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* X (input/output) REAL array, dimension (N) */
+/* On entry, the right hand side b of the triangular system. */
+/* On exit, X is overwritten by the solution vector x. */
+
+/* SCALE (output) REAL */
+/* The scaling factor s for the triangular system */
+/* A * x = s*b or A'* x = s*b. */
+/* If SCALE = 0, the matrix A is singular or badly scaled, and */
+/* the vector x is an exact or approximate solution to A*x = 0. */
+
+/* CNORM (input or output) REAL array, dimension (N) */
+
+/* If NORMIN = 'Y', CNORM is an input argument and CNORM(j) */
+/* contains the norm of the off-diagonal part of the j-th column */
+/* of A. If TRANS = 'N', CNORM(j) must be greater than or equal */
+/* to the infinity-norm, and if TRANS = 'T' or 'C', CNORM(j) */
+/* must be greater than or equal to the 1-norm. */
+
+/* If NORMIN = 'N', CNORM is an output argument and CNORM(j) */
+/* returns the 1-norm of the offdiagonal part of the j-th column */
+/* of A. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* Further Details */
+/* ======= ======= */
+
+/* A rough bound on x is computed; if that is less than overflow, STPSV */
+/* is called, otherwise, specific code is used which checks for possible */
+/* overflow or divide-by-zero at every operation. */
+
+/* A columnwise scheme is used for solving A*x = b. The basic algorithm */
+/* if A is lower triangular is */
+
+/* x[1:n] := b[1:n] */
+/* for j = 1, ..., n */
+/* x(j) := x(j) / A(j,j) */
+/* x[j+1:n] := x[j+1:n] - x(j) * A[j+1:n,j] */
+/* end */
+
+/* Define bounds on the components of x after j iterations of the loop: */
+/* M(j) = bound on x[1:j] */
+/* G(j) = bound on x[j+1:n] */
+/* Initially, let M(0) = 0 and G(0) = max{x(i), i=1,...,n}. */
+
+/* Then for iteration j+1 we have */
+/* M(j+1) <= G(j) / | A(j+1,j+1) | */
+/* G(j+1) <= G(j) + M(j+1) * | A[j+2:n,j+1] | */
+/* <= G(j) ( 1 + CNORM(j+1) / | A(j+1,j+1) | ) */
+
+/* where CNORM(j+1) is greater than or equal to the infinity-norm of */
+/* column j+1 of A, not counting the diagonal. Hence */
+
+/* G(j) <= G(0) product ( 1 + CNORM(i) / | A(i,i) | ) */
+/* 1<=i<=j */
+/* and */
+
+/* |x(j)| <= ( G(0) / |A(j,j)| ) product ( 1 + CNORM(i) / |A(i,i)| ) */
+/* 1<=i< j */
+
+/* Since |x(j)| <= M(j), we use the Level 2 BLAS routine STPSV if the */
+/* reciprocal of the largest M(j), j=1,..,n, is larger than */
+/* max(underflow, 1/overflow). */
+
+/* The bound on x(j) is also used to determine when a step in the */
+/* columnwise method can be performed without fear of overflow. If */
+/* the computed bound is greater than a large constant, x is scaled to */
+/* prevent overflow, but if the bound overflows, x is set to 0, x(j) to */
+/* 1, and scale to 0, and a non-trivial solution to A*x = 0 is found. */
+
+/* Similarly, a row-wise scheme is used to solve A'*x = b. The basic */
+/* algorithm for A upper triangular is */
+
+/* for j = 1, ..., n */
+/* x(j) := ( b(j) - A[1:j-1,j]' * x[1:j-1] ) / A(j,j) */
+/* end */
+
+/* We simultaneously compute two bounds */
+/* G(j) = bound on ( b(i) - A[1:i-1,i]' * x[1:i-1] ), 1<=i<=j */
+/* M(j) = bound on x(i), 1<=i<=j */
+
+/* The initial values are G(0) = 0, M(0) = max{b(i), i=1,..,n}, and we */
+/* add the constraint G(j) >= G(j-1) and M(j) >= M(j-1) for j >= 1. */
+/* Then the bound on x(j) is */
+
+/* M(j) <= M(j-1) * ( 1 + CNORM(j) ) / | A(j,j) | */
+
+/* <= M(0) * product ( ( 1 + CNORM(i) ) / |A(i,i)| ) */
+/* 1<=i<=j */
+
+/* and we can safely call STPSV if 1/M(n) and 1/G(n) are both greater */
+/* than max(underflow, 1/overflow). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --cnorm;
+ --x;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+/* Test the input parameters. */
+
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (! lsame_(normin, "Y") && ! lsame_(normin,
+ "N")) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLATPS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine machine dependent parameters to control overflow. */
+
+ smlnum = slamch_("Safe minimum") / slamch_("Precision");
+ bignum = 1.f / smlnum;
+ *scale = 1.f;
+
+ if (lsame_(normin, "N")) {
+
+/* Compute the 1-norm of each column, not including the diagonal. */
+
+ if (upper) {
+
+/* A is upper triangular. */
+
+ ip = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ cnorm[j] = sasum_(&i__2, &ap[ip], &c__1);
+ ip += j;
+/* L10: */
+ }
+ } else {
+
+/* A is lower triangular. */
+
+ ip = 1;
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ cnorm[j] = sasum_(&i__2, &ap[ip + 1], &c__1);
+ ip = ip + *n - j + 1;
+/* L20: */
+ }
+ cnorm[*n] = 0.f;
+ }
+ }
+
+/* Scale the column norms by TSCAL if the maximum element in CNORM is */
+/* greater than BIGNUM. */
+
+ imax = isamax_(n, &cnorm[1], &c__1);
+ tmax = cnorm[imax];
+ if (tmax <= bignum) {
+ tscal = 1.f;
+ } else {
+ tscal = 1.f / (smlnum * tmax);
+ sscal_(n, &tscal, &cnorm[1], &c__1);
+ }
+
+/* Compute a bound on the computed solution vector to see if the */
+/* Level 2 BLAS routine STPSV can be used. */
+
+ j = isamax_(n, &x[1], &c__1);
+ xmax = (r__1 = x[j], dabs(r__1));
+ xbnd = xmax;
+ if (notran) {
+
+/* Compute the growth in A * x = b. */
+
+ if (upper) {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ } else {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ }
+
+ if (tscal != 1.f) {
+ grow = 0.f;
+ goto L50;
+ }
+
+ if (nounit) {
+
+/* A is non-unit triangular. */
+
+/* Compute GROW = 1/G(j) and XBND = 1/M(j). */
+/* Initially, G(0) = max{x(i), i=1,...,n}. */
+
+ grow = 1.f / dmax(xbnd,smlnum);
+ xbnd = grow;
+ ip = jfirst * (jfirst + 1) / 2;
+ jlen = *n;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L50;
+ }
+
+/* M(j) = G(j-1) / abs(A(j,j)) */
+
+ tjj = (r__1 = ap[ip], dabs(r__1));
+/* Computing MIN */
+ r__1 = xbnd, r__2 = dmin(1.f,tjj) * grow;
+ xbnd = dmin(r__1,r__2);
+ if (tjj + cnorm[j] >= smlnum) {
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) / abs(A(j,j)) ) */
+
+ grow *= tjj / (tjj + cnorm[j]);
+ } else {
+
+/* G(j) could overflow, set GROW to 0. */
+
+ grow = 0.f;
+ }
+ ip += jinc * jlen;
+ --jlen;
+/* L30: */
+ }
+ grow = xbnd;
+ } else {
+
+/* A is unit triangular. */
+
+/* Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. */
+
+/* Computing MIN */
+ r__1 = 1.f, r__2 = 1.f / dmax(xbnd,smlnum);
+ grow = dmin(r__1,r__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L50;
+ }
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) ) */
+
+ grow *= 1.f / (cnorm[j] + 1.f);
+/* L40: */
+ }
+ }
+L50:
+
+ ;
+ } else {
+
+/* Compute the growth in A' * x = b. */
+
+ if (upper) {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ } else {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ }
+
+ if (tscal != 1.f) {
+ grow = 0.f;
+ goto L80;
+ }
+
+ if (nounit) {
+
+/* A is non-unit triangular. */
+
+/* Compute GROW = 1/G(j) and XBND = 1/M(j). */
+/* Initially, M(0) = max{x(i), i=1,...,n}. */
+
+ grow = 1.f / dmax(xbnd,smlnum);
+ xbnd = grow;
+ ip = jfirst * (jfirst + 1) / 2;
+ jlen = 1;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L80;
+ }
+
+/* G(j) = max( G(j-1), M(j-1)*( 1 + CNORM(j) ) ) */
+
+ xj = cnorm[j] + 1.f;
+/* Computing MIN */
+ r__1 = grow, r__2 = xbnd / xj;
+ grow = dmin(r__1,r__2);
+
+/* M(j) = M(j-1)*( 1 + CNORM(j) ) / abs(A(j,j)) */
+
+ tjj = (r__1 = ap[ip], dabs(r__1));
+ if (xj > tjj) {
+ xbnd *= tjj / xj;
+ }
+ ++jlen;
+ ip += jinc * jlen;
+/* L60: */
+ }
+ grow = dmin(grow,xbnd);
+ } else {
+
+/* A is unit triangular. */
+
+/* Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. */
+
+/* Computing MIN */
+ r__1 = 1.f, r__2 = 1.f / dmax(xbnd,smlnum);
+ grow = dmin(r__1,r__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L80;
+ }
+
+/* G(j) = ( 1 + CNORM(j) )*G(j-1) */
+
+ xj = cnorm[j] + 1.f;
+ grow /= xj;
+/* L70: */
+ }
+ }
+L80:
+ ;
+ }
+
+ if (grow * tscal > smlnum) {
+
+/* Use the Level 2 BLAS solve if the reciprocal of the bound on */
+/* elements of X is not too small. */
+
+ stpsv_(uplo, trans, diag, n, &ap[1], &x[1], &c__1);
+ } else {
+
+/* Use a Level 1 BLAS solve, scaling intermediate results. */
+
+ if (xmax > bignum) {
+
+/* Scale X so that its components are less than or equal to */
+/* BIGNUM in absolute value. */
+
+ *scale = bignum / xmax;
+ sscal_(n, scale, &x[1], &c__1);
+ xmax = bignum;
+ }
+
+ if (notran) {
+
+/* Solve A * x = b */
+
+ ip = jfirst * (jfirst + 1) / 2;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Compute x(j) = b(j) / A(j,j), scaling x if necessary. */
+
+ xj = (r__1 = x[j], dabs(r__1));
+ if (nounit) {
+ tjjs = ap[ip] * tscal;
+ } else {
+ tjjs = tscal;
+ if (tscal == 1.f) {
+ goto L95;
+ }
+ }
+ tjj = dabs(tjjs);
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.f) {
+ if (xj > tjj * bignum) {
+
+/* Scale x by 1/b(j). */
+
+ rec = 1.f / xj;
+ sscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ x[j] /= tjjs;
+ xj = (r__1 = x[j], dabs(r__1));
+ } else if (tjj > 0.f) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM */
+/* to avoid overflow when dividing by A(j,j). */
+
+ rec = tjj * bignum / xj;
+ if (cnorm[j] > 1.f) {
+
+/* Scale by 1/CNORM(j) to avoid overflow when */
+/* multiplying x(j) times column j. */
+
+ rec /= cnorm[j];
+ }
+ sscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ x[j] /= tjjs;
+ xj = (r__1 = x[j], dabs(r__1));
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0, and compute a solution to A*x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ x[i__] = 0.f;
+/* L90: */
+ }
+ x[j] = 1.f;
+ xj = 1.f;
+ *scale = 0.f;
+ xmax = 0.f;
+ }
+L95:
+
+/* Scale x if necessary to avoid overflow when adding a */
+/* multiple of column j of A. */
+
+ if (xj > 1.f) {
+ rec = 1.f / xj;
+ if (cnorm[j] > (bignum - xmax) * rec) {
+
+/* Scale x by 1/(2*abs(x(j))). */
+
+ rec *= .5f;
+ sscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ }
+ } else if (xj * cnorm[j] > bignum - xmax) {
+
+/* Scale x by 1/2. */
+
+ sscal_(n, &c_b36, &x[1], &c__1);
+ *scale *= .5f;
+ }
+
+ if (upper) {
+ if (j > 1) {
+
+/* Compute the update */
+/* x(1:j-1) := x(1:j-1) - x(j) * A(1:j-1,j) */
+
+ i__3 = j - 1;
+ r__1 = -x[j] * tscal;
+ saxpy_(&i__3, &r__1, &ap[ip - j + 1], &c__1, &x[1], &
+ c__1);
+ i__3 = j - 1;
+ i__ = isamax_(&i__3, &x[1], &c__1);
+ xmax = (r__1 = x[i__], dabs(r__1));
+ }
+ ip -= j;
+ } else {
+ if (j < *n) {
+
+/* Compute the update */
+/* x(j+1:n) := x(j+1:n) - x(j) * A(j+1:n,j) */
+
+ i__3 = *n - j;
+ r__1 = -x[j] * tscal;
+ saxpy_(&i__3, &r__1, &ap[ip + 1], &c__1, &x[j + 1], &
+ c__1);
+ i__3 = *n - j;
+ i__ = j + isamax_(&i__3, &x[j + 1], &c__1);
+ xmax = (r__1 = x[i__], dabs(r__1));
+ }
+ ip = ip + *n - j + 1;
+ }
+/* L100: */
+ }
+
+ } else {
+
+/* Solve A' * x = b */
+
+ ip = jfirst * (jfirst + 1) / 2;
+ jlen = 1;
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Compute x(j) = b(j) - sum A(k,j)*x(k). */
+/* k<>j */
+
+ xj = (r__1 = x[j], dabs(r__1));
+ uscal = tscal;
+ rec = 1.f / dmax(xmax,1.f);
+ if (cnorm[j] > (bignum - xj) * rec) {
+
+/* If x(j) could overflow, scale x by 1/(2*XMAX). */
+
+ rec *= .5f;
+ if (nounit) {
+ tjjs = ap[ip] * tscal;
+ } else {
+ tjjs = tscal;
+ }
+ tjj = dabs(tjjs);
+ if (tjj > 1.f) {
+
+/* Divide by A(j,j) when scaling x if A(j,j) > 1. */
+
+/* Computing MIN */
+ r__1 = 1.f, r__2 = rec * tjj;
+ rec = dmin(r__1,r__2);
+ uscal /= tjjs;
+ }
+ if (rec < 1.f) {
+ sscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ sumj = 0.f;
+ if (uscal == 1.f) {
+
+/* If the scaling needed for A in the dot product is 1, */
+/* call SDOT to perform the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ sumj = sdot_(&i__3, &ap[ip - j + 1], &c__1, &x[1], &
+ c__1);
+ } else if (j < *n) {
+ i__3 = *n - j;
+ sumj = sdot_(&i__3, &ap[ip + 1], &c__1, &x[j + 1], &
+ c__1);
+ }
+ } else {
+
+/* Otherwise, use in-line code for the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ sumj += ap[ip - j + i__] * uscal * x[i__];
+/* L110: */
+ }
+ } else if (j < *n) {
+ i__3 = *n - j;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ sumj += ap[ip + i__] * uscal * x[j + i__];
+/* L120: */
+ }
+ }
+ }
+
+ if (uscal == tscal) {
+
+/* Compute x(j) := ( x(j) - sumj ) / A(j,j) if 1/A(j,j) */
+/* was not used to scale the dotproduct. */
+
+ x[j] -= sumj;
+ xj = (r__1 = x[j], dabs(r__1));
+ if (nounit) {
+
+/* Compute x(j) = x(j) / A(j,j), scaling if necessary. */
+
+ tjjs = ap[ip] * tscal;
+ } else {
+ tjjs = tscal;
+ if (tscal == 1.f) {
+ goto L135;
+ }
+ }
+ tjj = dabs(tjjs);
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.f) {
+ if (xj > tjj * bignum) {
+
+/* Scale X by 1/abs(x(j)). */
+
+ rec = 1.f / xj;
+ sscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ x[j] /= tjjs;
+ } else if (tjj > 0.f) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */
+
+ rec = tjj * bignum / xj;
+ sscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ x[j] /= tjjs;
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0, and compute a solution to A'*x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ x[i__] = 0.f;
+/* L130: */
+ }
+ x[j] = 1.f;
+ *scale = 0.f;
+ xmax = 0.f;
+ }
+L135:
+ ;
+ } else {
+
+/* Compute x(j) := x(j) / A(j,j) - sumj if the dot */
+/* product has already been divided by 1/A(j,j). */
+
+ x[j] = x[j] / tjjs - sumj;
+ }
+/* Computing MAX */
+ r__2 = xmax, r__3 = (r__1 = x[j], dabs(r__1));
+ xmax = dmax(r__2,r__3);
+ ++jlen;
+ ip += jinc * jlen;
+/* L140: */
+ }
+ }
+ *scale /= tscal;
+ }
+
+/* Scale the column norms by 1/TSCAL for return. */
+
+ if (tscal != 1.f) {
+ r__1 = 1.f / tscal;
+ sscal_(n, &r__1, &cnorm[1], &c__1);
+ }
+
+ return 0;
+
+/* End of SLATPS */
+
+} /* slatps_ */
diff --git a/contrib/libs/clapack/slatrd.c b/contrib/libs/clapack/slatrd.c
new file mode 100644
index 0000000000..6be05d9707
--- /dev/null
+++ b/contrib/libs/clapack/slatrd.c
@@ -0,0 +1,351 @@
+/* slatrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b5 = -1.f;
+static real c_b6 = 1.f;
+static integer c__1 = 1;
+static real c_b16 = 0.f;
+
+/* Subroutine */ int slatrd_(char *uplo, integer *n, integer *nb, real *a,
+ integer *lda, real *e, real *tau, real *w, integer *ldw)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, iw;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ real alpha;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ sgemv_(char *, integer *, integer *, real *, real *, integer *,
+ real *, integer *, real *, real *, integer *), saxpy_(
+ integer *, real *, real *, integer *, real *, integer *), ssymv_(
+ char *, integer *, real *, real *, integer *, real *, integer *,
+ real *, real *, integer *), slarfg_(integer *, real *,
+ real *, integer *, real *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLATRD reduces NB rows and columns of a real symmetric matrix A to */
+/* symmetric tridiagonal form by an orthogonal similarity */
+/* transformation Q' * A * Q, and returns the matrices V and W which are */
+/* needed to apply the transformation to the unreduced part of A. */
+
+/* If UPLO = 'U', SLATRD reduces the last NB rows and columns of a */
+/* matrix, of which the upper triangle is supplied; */
+/* if UPLO = 'L', SLATRD reduces the first NB rows and columns of a */
+/* matrix, of which the lower triangle is supplied. */
+
+/* This is an auxiliary routine called by SSYTRD. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* NB (input) INTEGER */
+/* The number of rows and columns to be reduced. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n-by-n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n-by-n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+/* On exit: */
+/* if UPLO = 'U', the last NB columns have been reduced to */
+/* tridiagonal form, with the diagonal elements overwriting */
+/* the diagonal elements of A; the elements above the diagonal */
+/* with the array TAU, represent the orthogonal matrix Q as a */
+/* product of elementary reflectors; */
+/* if UPLO = 'L', the first NB columns have been reduced to */
+/* tridiagonal form, with the diagonal elements overwriting */
+/* the diagonal elements of A; the elements below the diagonal */
+/* with the array TAU, represent the orthogonal matrix Q as a */
+/* product of elementary reflectors. */
+/* See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= (1,N). */
+
+/* E (output) REAL array, dimension (N-1) */
+/* If UPLO = 'U', E(n-nb:n-1) contains the superdiagonal */
+/* elements of the last NB columns of the reduced matrix; */
+/* if UPLO = 'L', E(1:nb) contains the subdiagonal elements of */
+/* the first NB columns of the reduced matrix. */
+
+/* TAU (output) REAL array, dimension (N-1) */
+/* The scalar factors of the elementary reflectors, stored in */
+/* TAU(n-nb:n-1) if UPLO = 'U', and in TAU(1:nb) if UPLO = 'L'. */
+/* See Further Details. */
+
+/* W (output) REAL array, dimension (LDW,NB) */
+/* The n-by-nb matrix W required to update the unreduced part */
+/* of A. */
+
+/* LDW (input) INTEGER */
+/* The leading dimension of the array W. LDW >= max(1,N). */
+
+/* Further Details */
+/* =============== */
+
+/* If UPLO = 'U', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(n) H(n-1) . . . H(n-nb+1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(i:n) = 0 and v(i-1) = 1; v(1:i-1) is stored on exit in A(1:i-1,i), */
+/* and tau in TAU(i-1). */
+
+/* If UPLO = 'L', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(1) H(2) . . . H(nb). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i) = 0 and v(i+1) = 1; v(i+1:n) is stored on exit in A(i+1:n,i), */
+/* and tau in TAU(i). */
+
+/* The elements of the vectors v together form the n-by-nb matrix V */
+/* which is needed, with W, to apply the transformation to the unreduced */
+/* part of the matrix, using a symmetric rank-2k update of the form: */
+/* A := A - V*W' - W*V'. */
+
+/* The contents of A on exit are illustrated by the following examples */
+/* with n = 5 and nb = 2: */
+
+/* if UPLO = 'U': if UPLO = 'L': */
+
+/* ( a a a v4 v5 ) ( d ) */
+/* ( a a v4 v5 ) ( 1 d ) */
+/* ( a 1 v5 ) ( v1 1 a ) */
+/* ( d 1 ) ( v1 v2 a a ) */
+/* ( d ) ( v1 v2 a a a ) */
+
+/* where d denotes a diagonal element of the reduced matrix, a denotes */
+/* an element of the original matrix that is unchanged, and vi denotes */
+/* an element of the vector defining H(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --e;
+ --tau;
+ w_dim1 = *ldw;
+ w_offset = 1 + w_dim1;
+ w -= w_offset;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+
+ if (lsame_(uplo, "U")) {
+
+/* Reduce last NB columns of upper triangle */
+
+ i__1 = *n - *nb + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ iw = i__ - *n + *nb;
+ if (i__ < *n) {
+
+/* Update A(1:i,i) */
+
+ i__2 = *n - i__;
+ sgemv_("No transpose", &i__, &i__2, &c_b5, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &w[i__ + (iw + 1) * w_dim1], ldw, &
+ c_b6, &a[i__ * a_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ sgemv_("No transpose", &i__, &i__2, &c_b5, &w[(iw + 1) *
+ w_dim1 + 1], ldw, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ c_b6, &a[i__ * a_dim1 + 1], &c__1);
+ }
+ if (i__ > 1) {
+
+/* Generate elementary reflector H(i) to annihilate */
+/* A(1:i-2,i) */
+
+ i__2 = i__ - 1;
+ slarfg_(&i__2, &a[i__ - 1 + i__ * a_dim1], &a[i__ * a_dim1 +
+ 1], &c__1, &tau[i__ - 1]);
+ e[i__ - 1] = a[i__ - 1 + i__ * a_dim1];
+ a[i__ - 1 + i__ * a_dim1] = 1.f;
+
+/* Compute W(1:i-1,i) */
+
+ i__2 = i__ - 1;
+ ssymv_("Upper", &i__2, &c_b6, &a[a_offset], lda, &a[i__ *
+ a_dim1 + 1], &c__1, &c_b16, &w[iw * w_dim1 + 1], &
+ c__1);
+ if (i__ < *n) {
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ sgemv_("Transpose", &i__2, &i__3, &c_b6, &w[(iw + 1) *
+ w_dim1 + 1], ldw, &a[i__ * a_dim1 + 1], &c__1, &
+ c_b16, &w[i__ + 1 + iw * w_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ sgemv_("No transpose", &i__2, &i__3, &c_b5, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &w[i__ + 1 + iw * w_dim1], &
+ c__1, &c_b6, &w[iw * w_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ sgemv_("Transpose", &i__2, &i__3, &c_b6, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &a[i__ * a_dim1 + 1], &c__1, &
+ c_b16, &w[i__ + 1 + iw * w_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ sgemv_("No transpose", &i__2, &i__3, &c_b5, &w[(iw + 1) *
+ w_dim1 + 1], ldw, &w[i__ + 1 + iw * w_dim1], &
+ c__1, &c_b6, &w[iw * w_dim1 + 1], &c__1);
+ }
+ i__2 = i__ - 1;
+ sscal_(&i__2, &tau[i__ - 1], &w[iw * w_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ alpha = tau[i__ - 1] * -.5f * sdot_(&i__2, &w[iw * w_dim1 + 1]
+, &c__1, &a[i__ * a_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ saxpy_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &w[iw *
+ w_dim1 + 1], &c__1);
+ }
+
+/* L10: */
+ }
+ } else {
+
+/* Reduce first NB columns of lower triangle */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i:n,i) */
+
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b5, &a[i__ + a_dim1], lda,
+ &w[i__ + w_dim1], ldw, &c_b6, &a[i__ + i__ * a_dim1], &
+ c__1);
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b5, &w[i__ + w_dim1], ldw,
+ &a[i__ + a_dim1], lda, &c_b6, &a[i__ + i__ * a_dim1], &
+ c__1);
+ if (i__ < *n) {
+
+/* Generate elementary reflector H(i) to annihilate */
+/* A(i+2:n,i) */
+
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ slarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3, *n)+
+ i__ * a_dim1], &c__1, &tau[i__]);
+ e[i__] = a[i__ + 1 + i__ * a_dim1];
+ a[i__ + 1 + i__ * a_dim1] = 1.f;
+
+/* Compute W(i+1:n,i) */
+
+ i__2 = *n - i__;
+ ssymv_("Lower", &i__2, &c_b6, &a[i__ + 1 + (i__ + 1) * a_dim1]
+, lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b16, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ sgemv_("Transpose", &i__2, &i__3, &c_b6, &w[i__ + 1 + w_dim1],
+ ldw, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b16, &w[
+ i__ * w_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b5, &a[i__ + 1 +
+ a_dim1], lda, &w[i__ * w_dim1 + 1], &c__1, &c_b6, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ sgemv_("Transpose", &i__2, &i__3, &c_b6, &a[i__ + 1 + a_dim1],
+ lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b16, &w[
+ i__ * w_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b5, &w[i__ + 1 +
+ w_dim1], ldw, &w[i__ * w_dim1 + 1], &c__1, &c_b6, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ sscal_(&i__2, &tau[i__], &w[i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ alpha = tau[i__] * -.5f * sdot_(&i__2, &w[i__ + 1 + i__ *
+ w_dim1], &c__1, &a[i__ + 1 + i__ * a_dim1], &c__1);
+ i__2 = *n - i__;
+ saxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ }
+
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of SLATRD */
+
+} /* slatrd_ */
diff --git a/contrib/libs/clapack/slatrs.c b/contrib/libs/clapack/slatrs.c
new file mode 100644
index 0000000000..f9962940c5
--- /dev/null
+++ b/contrib/libs/clapack/slatrs.c
@@ -0,0 +1,813 @@
+/* slatrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b36 = .5f;
+
+/* Subroutine */ int slatrs_(char *uplo, char *trans, char *diag, char *
+ normin, integer *n, real *a, integer *lda, real *x, real *scale, real
+ *cnorm, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1, r__2, r__3;
+
+ /* Local variables */
+ integer i__, j;
+ real xj, rec, tjj;
+ integer jinc;
+ real xbnd;
+ integer imax;
+ real tmax, tjjs;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ real xmax, grow, sumj;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ real tscal, uscal;
+ integer jlast;
+ extern doublereal sasum_(integer *, real *, integer *);
+ logical upper;
+ extern /* Subroutine */ int saxpy_(integer *, real *, real *, integer *,
+ real *, integer *), strsv_(char *, char *, char *, integer *,
+ real *, integer *, real *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ extern integer isamax_(integer *, real *, integer *);
+ logical notran;
+ integer jfirst;
+ real smlnum;
+ logical nounit;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLATRS solves one of the triangular systems */
+
+/* A *x = s*b or A'*x = s*b */
+
+/* with scaling to prevent overflow. Here A is an upper or lower */
+/* triangular matrix, A' denotes the transpose of A, x and b are */
+/* n-element vectors, and s is a scaling factor, usually less than */
+/* or equal to 1, chosen so that the components of x will be less than */
+/* the overflow threshold. If the unscaled problem will not cause */
+/* overflow, the Level 2 BLAS routine STRSV is called. If the matrix A */
+/* is singular (A(j,j) = 0 for some j), then s is set to 0 and a */
+/* non-trivial solution to A*x = 0 is returned. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower triangular. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the operation applied to A. */
+/* = 'N': Solve A * x = s*b (No transpose) */
+/* = 'T': Solve A'* x = s*b (Transpose) */
+/* = 'C': Solve A'* x = s*b (Conjugate transpose = Transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A is unit triangular. */
+/* = 'N': Non-unit triangular */
+/* = 'U': Unit triangular */
+
+/* NORMIN (input) CHARACTER*1 */
+/* Specifies whether CNORM has been set or not. */
+/* = 'Y': CNORM contains the column norms on entry */
+/* = 'N': CNORM is not set on entry. On exit, the norms will */
+/* be computed and stored in CNORM. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The triangular matrix A. If UPLO = 'U', the leading n by n */
+/* upper triangular part of the array A contains the upper */
+/* triangular matrix, and the strictly lower triangular part of */
+/* A is not referenced. If UPLO = 'L', the leading n by n lower */
+/* triangular part of the array A contains the lower triangular */
+/* matrix, and the strictly upper triangular part of A is not */
+/* referenced. If DIAG = 'U', the diagonal elements of A are */
+/* also not referenced and are assumed to be 1. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max (1,N). */
+
+/* X (input/output) REAL array, dimension (N) */
+/* On entry, the right hand side b of the triangular system. */
+/* On exit, X is overwritten by the solution vector x. */
+
+/* SCALE (output) REAL */
+/* The scaling factor s for the triangular system */
+/* A * x = s*b or A'* x = s*b. */
+/* If SCALE = 0, the matrix A is singular or badly scaled, and */
+/* the vector x is an exact or approximate solution to A*x = 0. */
+
+/* CNORM (input or output) REAL array, dimension (N) */
+
+/* If NORMIN = 'Y', CNORM is an input argument and CNORM(j) */
+/* contains the norm of the off-diagonal part of the j-th column */
+/* of A. If TRANS = 'N', CNORM(j) must be greater than or equal */
+/* to the infinity-norm, and if TRANS = 'T' or 'C', CNORM(j) */
+/* must be greater than or equal to the 1-norm. */
+
+/* If NORMIN = 'N', CNORM is an output argument and CNORM(j) */
+/* returns the 1-norm of the offdiagonal part of the j-th column */
+/* of A. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* Further Details */
+/* ======= ======= */
+
+/* A rough bound on x is computed; if that is less than overflow, STRSV */
+/* is called, otherwise, specific code is used which checks for possible */
+/* overflow or divide-by-zero at every operation. */
+
+/* A columnwise scheme is used for solving A*x = b. The basic algorithm */
+/* if A is lower triangular is */
+
+/* x[1:n] := b[1:n] */
+/* for j = 1, ..., n */
+/* x(j) := x(j) / A(j,j) */
+/* x[j+1:n] := x[j+1:n] - x(j) * A[j+1:n,j] */
+/* end */
+
+/* Define bounds on the components of x after j iterations of the loop: */
+/* M(j) = bound on x[1:j] */
+/* G(j) = bound on x[j+1:n] */
+/* Initially, let M(0) = 0 and G(0) = max{x(i), i=1,...,n}. */
+
+/* Then for iteration j+1 we have */
+/* M(j+1) <= G(j) / | A(j+1,j+1) | */
+/* G(j+1) <= G(j) + M(j+1) * | A[j+2:n,j+1] | */
+/* <= G(j) ( 1 + CNORM(j+1) / | A(j+1,j+1) | ) */
+
+/* where CNORM(j+1) is greater than or equal to the infinity-norm of */
+/* column j+1 of A, not counting the diagonal. Hence */
+
+/* G(j) <= G(0) product ( 1 + CNORM(i) / | A(i,i) | ) */
+/* 1<=i<=j */
+/* and */
+
+/* |x(j)| <= ( G(0) / |A(j,j)| ) product ( 1 + CNORM(i) / |A(i,i)| ) */
+/* 1<=i< j */
+
+/* Since |x(j)| <= M(j), we use the Level 2 BLAS routine STRSV if the */
+/* reciprocal of the largest M(j), j=1,..,n, is larger than */
+/* max(underflow, 1/overflow). */
+
+/* The bound on x(j) is also used to determine when a step in the */
+/* columnwise method can be performed without fear of overflow. If */
+/* the computed bound is greater than a large constant, x is scaled to */
+/* prevent overflow, but if the bound overflows, x is set to 0, x(j) to */
+/* 1, and scale to 0, and a non-trivial solution to A*x = 0 is found. */
+
+/* Similarly, a row-wise scheme is used to solve A'*x = b. The basic */
+/* algorithm for A upper triangular is */
+
+/* for j = 1, ..., n */
+/* x(j) := ( b(j) - A[1:j-1,j]' * x[1:j-1] ) / A(j,j) */
+/* end */
+
+/* We simultaneously compute two bounds */
+/* G(j) = bound on ( b(i) - A[1:i-1,i]' * x[1:i-1] ), 1<=i<=j */
+/* M(j) = bound on x(i), 1<=i<=j */
+
+/* The initial values are G(0) = 0, M(0) = max{b(i), i=1,..,n}, and we */
+/* add the constraint G(j) >= G(j-1) and M(j) >= M(j-1) for j >= 1. */
+/* Then the bound on x(j) is */
+
+/* M(j) <= M(j-1) * ( 1 + CNORM(j) ) / | A(j,j) | */
+
+/* <= M(0) * product ( ( 1 + CNORM(i) ) / |A(i,i)| ) */
+/* 1<=i<=j */
+
+/* and we can safely call STRSV if 1/M(n) and 1/G(n) are both greater */
+/* than max(underflow, 1/overflow). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --cnorm;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+/* Test the input parameters. */
+
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (! lsame_(normin, "Y") && ! lsame_(normin,
+ "N")) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLATRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine machine dependent parameters to control overflow. */
+
+ smlnum = slamch_("Safe minimum") / slamch_("Precision");
+ bignum = 1.f / smlnum;
+ *scale = 1.f;
+
+ if (lsame_(normin, "N")) {
+
+/* Compute the 1-norm of each column, not including the diagonal. */
+
+ if (upper) {
+
+/* A is upper triangular. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ cnorm[j] = sasum_(&i__2, &a[j * a_dim1 + 1], &c__1);
+/* L10: */
+ }
+ } else {
+
+/* A is lower triangular. */
+
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ cnorm[j] = sasum_(&i__2, &a[j + 1 + j * a_dim1], &c__1);
+/* L20: */
+ }
+ cnorm[*n] = 0.f;
+ }
+ }
+
+/* Scale the column norms by TSCAL if the maximum element in CNORM is */
+/* greater than BIGNUM. */
+
+ imax = isamax_(n, &cnorm[1], &c__1);
+ tmax = cnorm[imax];
+ if (tmax <= bignum) {
+ tscal = 1.f;
+ } else {
+ tscal = 1.f / (smlnum * tmax);
+ sscal_(n, &tscal, &cnorm[1], &c__1);
+ }
+
+/* Compute a bound on the computed solution vector to see if the */
+/* Level 2 BLAS routine STRSV can be used. */
+
+ j = isamax_(n, &x[1], &c__1);
+ xmax = (r__1 = x[j], dabs(r__1));
+ xbnd = xmax;
+ if (notran) {
+
+/* Compute the growth in A * x = b. */
+
+ if (upper) {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ } else {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ }
+
+ if (tscal != 1.f) {
+ grow = 0.f;
+ goto L50;
+ }
+
+ if (nounit) {
+
+/* A is non-unit triangular. */
+
+/* Compute GROW = 1/G(j) and XBND = 1/M(j). */
+/* Initially, G(0) = max{x(i), i=1,...,n}. */
+
+ grow = 1.f / dmax(xbnd,smlnum);
+ xbnd = grow;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L50;
+ }
+
+/* M(j) = G(j-1) / abs(A(j,j)) */
+
+ tjj = (r__1 = a[j + j * a_dim1], dabs(r__1));
+/* Computing MIN */
+ r__1 = xbnd, r__2 = dmin(1.f,tjj) * grow;
+ xbnd = dmin(r__1,r__2);
+ if (tjj + cnorm[j] >= smlnum) {
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) / abs(A(j,j)) ) */
+
+ grow *= tjj / (tjj + cnorm[j]);
+ } else {
+
+/* G(j) could overflow, set GROW to 0. */
+
+ grow = 0.f;
+ }
+/* L30: */
+ }
+ grow = xbnd;
+ } else {
+
+/* A is unit triangular. */
+
+/* Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. */
+
+/* Computing MIN */
+ r__1 = 1.f, r__2 = 1.f / dmax(xbnd,smlnum);
+ grow = dmin(r__1,r__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L50;
+ }
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) ) */
+
+ grow *= 1.f / (cnorm[j] + 1.f);
+/* L40: */
+ }
+ }
+L50:
+
+ ;
+ } else {
+
+/* Compute the growth in A' * x = b. */
+
+ if (upper) {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ } else {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ }
+
+ if (tscal != 1.f) {
+ grow = 0.f;
+ goto L80;
+ }
+
+ if (nounit) {
+
+/* A is non-unit triangular. */
+
+/* Compute GROW = 1/G(j) and XBND = 1/M(j). */
+/* Initially, M(0) = max{x(i), i=1,...,n}. */
+
+ grow = 1.f / dmax(xbnd,smlnum);
+ xbnd = grow;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L80;
+ }
+
+/* G(j) = max( G(j-1), M(j-1)*( 1 + CNORM(j) ) ) */
+
+ xj = cnorm[j] + 1.f;
+/* Computing MIN */
+ r__1 = grow, r__2 = xbnd / xj;
+ grow = dmin(r__1,r__2);
+
+/* M(j) = M(j-1)*( 1 + CNORM(j) ) / abs(A(j,j)) */
+
+ tjj = (r__1 = a[j + j * a_dim1], dabs(r__1));
+ if (xj > tjj) {
+ xbnd *= tjj / xj;
+ }
+/* L60: */
+ }
+ grow = dmin(grow,xbnd);
+ } else {
+
+/* A is unit triangular. */
+
+/* Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. */
+
+/* Computing MIN */
+ r__1 = 1.f, r__2 = 1.f / dmax(xbnd,smlnum);
+ grow = dmin(r__1,r__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L80;
+ }
+
+/* G(j) = ( 1 + CNORM(j) )*G(j-1) */
+
+ xj = cnorm[j] + 1.f;
+ grow /= xj;
+/* L70: */
+ }
+ }
+L80:
+ ;
+ }
+
+ if (grow * tscal > smlnum) {
+
+/* Use the Level 2 BLAS solve if the reciprocal of the bound on */
+/* elements of X is not too small. */
+
+ strsv_(uplo, trans, diag, n, &a[a_offset], lda, &x[1], &c__1);
+ } else {
+
+/* Use a Level 1 BLAS solve, scaling intermediate results. */
+
+ if (xmax > bignum) {
+
+/* Scale X so that its components are less than or equal to */
+/* BIGNUM in absolute value. */
+
+ *scale = bignum / xmax;
+ sscal_(n, scale, &x[1], &c__1);
+ xmax = bignum;
+ }
+
+ if (notran) {
+
+/* Solve A * x = b */
+
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Compute x(j) = b(j) / A(j,j), scaling x if necessary. */
+
+ xj = (r__1 = x[j], dabs(r__1));
+ if (nounit) {
+ tjjs = a[j + j * a_dim1] * tscal;
+ } else {
+ tjjs = tscal;
+ if (tscal == 1.f) {
+ goto L95;
+ }
+ }
+ tjj = dabs(tjjs);
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.f) {
+ if (xj > tjj * bignum) {
+
+/* Scale x by 1/b(j). */
+
+ rec = 1.f / xj;
+ sscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ x[j] /= tjjs;
+ xj = (r__1 = x[j], dabs(r__1));
+ } else if (tjj > 0.f) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM */
+/* to avoid overflow when dividing by A(j,j). */
+
+ rec = tjj * bignum / xj;
+ if (cnorm[j] > 1.f) {
+
+/* Scale by 1/CNORM(j) to avoid overflow when */
+/* multiplying x(j) times column j. */
+
+ rec /= cnorm[j];
+ }
+ sscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ x[j] /= tjjs;
+ xj = (r__1 = x[j], dabs(r__1));
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0, and compute a solution to A*x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ x[i__] = 0.f;
+/* L90: */
+ }
+ x[j] = 1.f;
+ xj = 1.f;
+ *scale = 0.f;
+ xmax = 0.f;
+ }
+L95:
+
+/* Scale x if necessary to avoid overflow when adding a */
+/* multiple of column j of A. */
+
+ if (xj > 1.f) {
+ rec = 1.f / xj;
+ if (cnorm[j] > (bignum - xmax) * rec) {
+
+/* Scale x by 1/(2*abs(x(j))). */
+
+ rec *= .5f;
+ sscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ }
+ } else if (xj * cnorm[j] > bignum - xmax) {
+
+/* Scale x by 1/2. */
+
+ sscal_(n, &c_b36, &x[1], &c__1);
+ *scale *= .5f;
+ }
+
+ if (upper) {
+ if (j > 1) {
+
+/* Compute the update */
+/* x(1:j-1) := x(1:j-1) - x(j) * A(1:j-1,j) */
+
+ i__3 = j - 1;
+ r__1 = -x[j] * tscal;
+ saxpy_(&i__3, &r__1, &a[j * a_dim1 + 1], &c__1, &x[1],
+ &c__1);
+ i__3 = j - 1;
+ i__ = isamax_(&i__3, &x[1], &c__1);
+ xmax = (r__1 = x[i__], dabs(r__1));
+ }
+ } else {
+ if (j < *n) {
+
+/* Compute the update */
+/* x(j+1:n) := x(j+1:n) - x(j) * A(j+1:n,j) */
+
+ i__3 = *n - j;
+ r__1 = -x[j] * tscal;
+ saxpy_(&i__3, &r__1, &a[j + 1 + j * a_dim1], &c__1, &
+ x[j + 1], &c__1);
+ i__3 = *n - j;
+ i__ = j + isamax_(&i__3, &x[j + 1], &c__1);
+ xmax = (r__1 = x[i__], dabs(r__1));
+ }
+ }
+/* L100: */
+ }
+
+ } else {
+
+/* Solve A' * x = b */
+
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Compute x(j) = b(j) - sum A(k,j)*x(k). */
+/* k<>j */
+
+ xj = (r__1 = x[j], dabs(r__1));
+ uscal = tscal;
+ rec = 1.f / dmax(xmax,1.f);
+ if (cnorm[j] > (bignum - xj) * rec) {
+
+/* If x(j) could overflow, scale x by 1/(2*XMAX). */
+
+ rec *= .5f;
+ if (nounit) {
+ tjjs = a[j + j * a_dim1] * tscal;
+ } else {
+ tjjs = tscal;
+ }
+ tjj = dabs(tjjs);
+ if (tjj > 1.f) {
+
+/* Divide by A(j,j) when scaling x if A(j,j) > 1. */
+
+/* Computing MIN */
+ r__1 = 1.f, r__2 = rec * tjj;
+ rec = dmin(r__1,r__2);
+ uscal /= tjjs;
+ }
+ if (rec < 1.f) {
+ sscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ sumj = 0.f;
+ if (uscal == 1.f) {
+
+/* If the scaling needed for A in the dot product is 1, */
+/* call SDOT to perform the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ sumj = sdot_(&i__3, &a[j * a_dim1 + 1], &c__1, &x[1],
+ &c__1);
+ } else if (j < *n) {
+ i__3 = *n - j;
+ sumj = sdot_(&i__3, &a[j + 1 + j * a_dim1], &c__1, &x[
+ j + 1], &c__1);
+ }
+ } else {
+
+/* Otherwise, use in-line code for the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ sumj += a[i__ + j * a_dim1] * uscal * x[i__];
+/* L110: */
+ }
+ } else if (j < *n) {
+ i__3 = *n;
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ sumj += a[i__ + j * a_dim1] * uscal * x[i__];
+/* L120: */
+ }
+ }
+ }
+
+ if (uscal == tscal) {
+
+/* Compute x(j) := ( x(j) - sumj ) / A(j,j) if 1/A(j,j) */
+/* was not used to scale the dotproduct. */
+
+ x[j] -= sumj;
+ xj = (r__1 = x[j], dabs(r__1));
+ if (nounit) {
+ tjjs = a[j + j * a_dim1] * tscal;
+ } else {
+ tjjs = tscal;
+ if (tscal == 1.f) {
+ goto L135;
+ }
+ }
+
+/* Compute x(j) = x(j) / A(j,j), scaling if necessary. */
+
+ tjj = dabs(tjjs);
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.f) {
+ if (xj > tjj * bignum) {
+
+/* Scale X by 1/abs(x(j)). */
+
+ rec = 1.f / xj;
+ sscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ x[j] /= tjjs;
+ } else if (tjj > 0.f) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */
+
+ rec = tjj * bignum / xj;
+ sscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ x[j] /= tjjs;
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0, and compute a solution to A'*x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ x[i__] = 0.f;
+/* L130: */
+ }
+ x[j] = 1.f;
+ *scale = 0.f;
+ xmax = 0.f;
+ }
+L135:
+ ;
+ } else {
+
+/* Compute x(j) := x(j) / A(j,j) - sumj if the dot */
+/* product has already been divided by 1/A(j,j). */
+
+ x[j] = x[j] / tjjs - sumj;
+ }
+/* Computing MAX */
+ r__2 = xmax, r__3 = (r__1 = x[j], dabs(r__1));
+ xmax = dmax(r__2,r__3);
+/* L140: */
+ }
+ }
+ *scale /= tscal;
+ }
+
+/* Scale the column norms by 1/TSCAL for return. */
+
+ if (tscal != 1.f) {
+ r__1 = 1.f / tscal;
+ sscal_(n, &r__1, &cnorm[1], &c__1);
+ }
+
+ return 0;
+
+/* End of SLATRS */
+
+} /* slatrs_ */
diff --git a/contrib/libs/clapack/slatrz.c b/contrib/libs/clapack/slatrz.c
new file mode 100644
index 0000000000..c6cd038317
--- /dev/null
+++ b/contrib/libs/clapack/slatrz.c
@@ -0,0 +1,162 @@
+/* slatrz.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int slatrz_(integer *m, integer *n, integer *l, real *a,
+ integer *lda, real *tau, real *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__;
+ extern /* Subroutine */ int slarz_(char *, integer *, integer *, integer *
+, real *, integer *, real *, real *, integer *, real *),
+ slarfp_(integer *, real *, real *, integer *, real *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLATRZ factors the M-by-(M+L) real upper trapezoidal matrix */
+/* [ A1 A2 ] = [ A(1:M,1:M) A(1:M,N-L+1:N) ] as ( R 0 ) * Z, by means */
+/* of orthogonal transformations. Z is an (M+L)-by-(M+L) orthogonal */
+/* matrix and, R and A1 are M-by-M upper triangular matrices. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* L (input) INTEGER */
+/* The number of columns of the matrix A containing the */
+/* meaningful part of the Householder vectors. N-M >= L >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the leading M-by-N upper trapezoidal part of the */
+/* array A must contain the matrix to be factorized. */
+/* On exit, the leading M-by-M upper triangular part of A */
+/* contains the upper triangular matrix R, and elements N-L+1 to */
+/* N of the first M rows of A, with the array TAU, represent the */
+/* orthogonal matrix Z as a product of M elementary reflectors. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) REAL array, dimension (M) */
+/* The scalar factors of the elementary reflectors. */
+
+/* WORK (workspace) REAL array, dimension (M) */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* The factorization is obtained by Householder's method. The kth */
+/* transformation matrix, Z( k ), which is used to introduce zeros into */
+/* the ( m - k + 1 )th row of A, is given in the form */
+
+/* Z( k ) = ( I 0 ), */
+/* ( 0 T( k ) ) */
+
+/* where */
+
+/* T( k ) = I - tau*u( k )*u( k )', u( k ) = ( 1 ), */
+/* ( 0 ) */
+/* ( z( k ) ) */
+
+/* tau is a scalar and z( k ) is an l element vector. tau and z( k ) */
+/* are chosen to annihilate the elements of the kth row of A2. */
+
+/* The scalar tau is returned in the kth element of TAU and the vector */
+/* u( k ) in the kth row of A2, such that the elements of z( k ) are */
+/* in a( k, l + 1 ), ..., a( k, n ). The elements of R are returned in */
+/* the upper triangular part of A1. */
+
+/* Z is given by */
+
+/* Z = Z( 1 ) * Z( 2 ) * ... * Z( m ). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ if (*m == 0) {
+ return 0;
+ } else if (*m == *n) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ tau[i__] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+ for (i__ = *m; i__ >= 1; --i__) {
+
+/* Generate elementary reflector H(i) to annihilate */
+/* [ A(i,i) A(i,n-l+1:n) ] */
+
+ i__1 = *l + 1;
+ slarfp_(&i__1, &a[i__ + i__ * a_dim1], &a[i__ + (*n - *l + 1) *
+ a_dim1], lda, &tau[i__]);
+
+/* Apply H(i) to A(1:i-1,i:n) from the right */
+
+ i__1 = i__ - 1;
+ i__2 = *n - i__ + 1;
+ slarz_("Right", &i__1, &i__2, l, &a[i__ + (*n - *l + 1) * a_dim1],
+ lda, &tau[i__], &a[i__ * a_dim1 + 1], lda, &work[1]);
+
+/* L20: */
+ }
+
+ return 0;
+
+/* End of SLATRZ */
+
+} /* slatrz_ */
diff --git a/contrib/libs/clapack/slatzm.c b/contrib/libs/clapack/slatzm.c
new file mode 100644
index 0000000000..48279d72bc
--- /dev/null
+++ b/contrib/libs/clapack/slatzm.c
@@ -0,0 +1,189 @@
+/* slatzm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b5 = 1.f;
+
+/* Subroutine */ int slatzm_(char *side, integer *m, integer *n, real *v,
+ integer *incv, real *tau, real *c1, real *c2, integer *ldc, real *
+ work)
+{
+ /* System generated locals */
+ integer c1_dim1, c1_offset, c2_dim1, c2_offset, i__1;
+ real r__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int sger_(integer *, integer *, real *, real *,
+ integer *, real *, integer *, real *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *), scopy_(integer *, real *, integer *, real *, integer *),
+ saxpy_(integer *, real *, real *, integer *, real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is deprecated and has been replaced by routine SORMRZ. */
+
+/* SLATZM applies a Householder matrix generated by STZRQF to a matrix. */
+
+/* Let P = I - tau*u*u', u = ( 1 ), */
+/* ( v ) */
+/* where v is an (m-1) vector if SIDE = 'L', or a (n-1) vector if */
+/* SIDE = 'R'. */
+
+/* If SIDE equals 'L', let */
+/* C = [ C1 ] 1 */
+/* [ C2 ] m-1 */
+/* n */
+/* Then C is overwritten by P*C. */
+
+/* If SIDE equals 'R', let */
+/* C = [ C1, C2 ] m */
+/* 1 n-1 */
+/* Then C is overwritten by C*P. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': form P * C */
+/* = 'R': form C * P */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. */
+
+/* V (input) REAL array, dimension */
+/* (1 + (M-1)*abs(INCV)) if SIDE = 'L' */
+/* (1 + (N-1)*abs(INCV)) if SIDE = 'R' */
+/* The vector v in the representation of P. V is not used */
+/* if TAU = 0. */
+
+/* INCV (input) INTEGER */
+/* The increment between elements of v. INCV <> 0 */
+
+/* TAU (input) REAL */
+/* The value tau in the representation of P. */
+
+/* C1 (input/output) REAL array, dimension */
+/* (LDC,N) if SIDE = 'L' */
+/* (M,1) if SIDE = 'R' */
+/* On entry, the n-vector C1 if SIDE = 'L', or the m-vector C1 */
+/* if SIDE = 'R'. */
+
+/* On exit, the first row of P*C if SIDE = 'L', or the first */
+/* column of C*P if SIDE = 'R'. */
+
+/* C2 (input/output) REAL array, dimension */
+/* (LDC, N) if SIDE = 'L' */
+/* (LDC, N-1) if SIDE = 'R' */
+/* On entry, the (m - 1) x n matrix C2 if SIDE = 'L', or the */
+/* m x (n - 1) matrix C2 if SIDE = 'R'. */
+
+/* On exit, rows 2:m of P*C if SIDE = 'L', or columns 2:m of C*P */
+/* if SIDE = 'R'. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the arrays C1 and C2. LDC >= (1,M). */
+
+/* WORK (workspace) REAL array, dimension */
+/* (N) if SIDE = 'L' */
+/* (M) if SIDE = 'R' */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --v;
+ c2_dim1 = *ldc;
+ c2_offset = 1 + c2_dim1;
+ c2 -= c2_offset;
+ c1_dim1 = *ldc;
+ c1_offset = 1 + c1_dim1;
+ c1 -= c1_offset;
+ --work;
+
+ /* Function Body */
+ if (min(*m,*n) == 0 || *tau == 0.f) {
+ return 0;
+ }
+
+ if (lsame_(side, "L")) {
+
+/* w := C1 + v' * C2 */
+
+ scopy_(n, &c1[c1_offset], ldc, &work[1], &c__1);
+ i__1 = *m - 1;
+ sgemv_("Transpose", &i__1, n, &c_b5, &c2[c2_offset], ldc, &v[1], incv,
+ &c_b5, &work[1], &c__1);
+
+/* [ C1 ] := [ C1 ] - tau* [ 1 ] * w' */
+/* [ C2 ] [ C2 ] [ v ] */
+
+ r__1 = -(*tau);
+ saxpy_(n, &r__1, &work[1], &c__1, &c1[c1_offset], ldc);
+ i__1 = *m - 1;
+ r__1 = -(*tau);
+ sger_(&i__1, n, &r__1, &v[1], incv, &work[1], &c__1, &c2[c2_offset],
+ ldc);
+
+ } else if (lsame_(side, "R")) {
+
+/* w := C1 + C2 * v */
+
+ scopy_(m, &c1[c1_offset], &c__1, &work[1], &c__1);
+ i__1 = *n - 1;
+ sgemv_("No transpose", m, &i__1, &c_b5, &c2[c2_offset], ldc, &v[1],
+ incv, &c_b5, &work[1], &c__1);
+
+/* [ C1, C2 ] := [ C1, C2 ] - tau* w * [ 1 , v'] */
+
+ r__1 = -(*tau);
+ saxpy_(m, &r__1, &work[1], &c__1, &c1[c1_offset], &c__1);
+ i__1 = *n - 1;
+ r__1 = -(*tau);
+ sger_(m, &i__1, &r__1, &work[1], &c__1, &v[1], incv, &c2[c2_offset],
+ ldc);
+ }
+
+ return 0;
+
+/* End of SLATZM */
+
+} /* slatzm_ */
diff --git a/contrib/libs/clapack/slauu2.c b/contrib/libs/clapack/slauu2.c
new file mode 100644
index 0000000000..ad80d23e9c
--- /dev/null
+++ b/contrib/libs/clapack/slauu2.c
@@ -0,0 +1,180 @@
+/* slauu2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b7 = 1.f;
+static integer c__1 = 1;
+
+/* Subroutine */ int slauu2_(char *uplo, integer *n, real *a, integer *lda,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__;
+ real aii;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ sgemv_(char *, integer *, integer *, real *, real *, integer *,
+ real *, integer *, real *, real *, integer *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAUU2 computes the product U * U' or L' * L, where the triangular */
+/* factor U or L is stored in the upper or lower triangular part of */
+/* the array A. */
+
+/* If UPLO = 'U' or 'u' then the upper triangle of the result is stored, */
+/* overwriting the factor U in A. */
+/* If UPLO = 'L' or 'l' then the lower triangle of the result is stored, */
+/* overwriting the factor L in A. */
+
+/* This is the unblocked form of the algorithm, calling Level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the triangular factor stored in the array A */
+/* is upper or lower triangular: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the triangular factor U or L. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the triangular factor U or L. */
+/* On exit, if UPLO = 'U', the upper triangle of A is */
+/* overwritten with the upper triangle of the product U * U'; */
+/* if UPLO = 'L', the lower triangle of A is overwritten with */
+/* the lower triangle of the product L' * L. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLAUU2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute the product U * U'. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ aii = a[i__ + i__ * a_dim1];
+ if (i__ < *n) {
+ i__2 = *n - i__ + 1;
+ a[i__ + i__ * a_dim1] = sdot_(&i__2, &a[i__ + i__ * a_dim1],
+ lda, &a[i__ + i__ * a_dim1], lda);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ sgemv_("No transpose", &i__2, &i__3, &c_b7, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ aii, &a[i__ * a_dim1 + 1], &c__1);
+ } else {
+ sscal_(&i__, &aii, &a[i__ * a_dim1 + 1], &c__1);
+ }
+/* L10: */
+ }
+
+ } else {
+
+/* Compute the product L' * L. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ aii = a[i__ + i__ * a_dim1];
+ if (i__ < *n) {
+ i__2 = *n - i__ + 1;
+ a[i__ + i__ * a_dim1] = sdot_(&i__2, &a[i__ + i__ * a_dim1], &
+ c__1, &a[i__ + i__ * a_dim1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ sgemv_("Transpose", &i__2, &i__3, &c_b7, &a[i__ + 1 + a_dim1],
+ lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &aii, &a[i__
+ + a_dim1], lda);
+ } else {
+ sscal_(&i__, &aii, &a[i__ + a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of SLAUU2 */
+
+} /* slauu2_ */
diff --git a/contrib/libs/clapack/slauum.c b/contrib/libs/clapack/slauum.c
new file mode 100644
index 0000000000..d65e5bc78c
--- /dev/null
+++ b/contrib/libs/clapack/slauum.c
@@ -0,0 +1,215 @@
+/* slauum.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static real c_b15 = 1.f;
+
+/* Subroutine */ int slauum_(char *uplo, integer *n, real *a, integer *lda,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, ib, nb;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ logical upper;
+ extern /* Subroutine */ int strmm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+), ssyrk_(char *, char *, integer
+ *, integer *, real *, real *, integer *, real *, real *, integer *
+), slauu2_(char *, integer *, real *, integer *,
+ integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SLAUUM computes the product U * U' or L' * L, where the triangular */
+/* factor U or L is stored in the upper or lower triangular part of */
+/* the array A. */
+
+/* If UPLO = 'U' or 'u' then the upper triangle of the result is stored, */
+/* overwriting the factor U in A. */
+/* If UPLO = 'L' or 'l' then the lower triangle of the result is stored, */
+/* overwriting the factor L in A. */
+
+/* This is the blocked form of the algorithm, calling Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the triangular factor stored in the array A */
+/* is upper or lower triangular: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the triangular factor U or L. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the triangular factor U or L. */
+/* On exit, if UPLO = 'U', the upper triangle of A is */
+/* overwritten with the upper triangle of the product U * U'; */
+/* if UPLO = 'L', the lower triangle of A is overwritten with */
+/* the lower triangle of the product L' * L. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLAUUM", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "SLAUUM", uplo, n, &c_n1, &c_n1, &c_n1);
+
+ if (nb <= 1 || nb >= *n) {
+
+/* Use unblocked code */
+
+ slauu2_(uplo, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code */
+
+ if (upper) {
+
+/* Compute the product U * U'. */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - i__ + 1;
+ ib = min(i__3,i__4);
+ i__3 = i__ - 1;
+ strmm_("Right", "Upper", "Transpose", "Non-unit", &i__3, &ib,
+ &c_b15, &a[i__ + i__ * a_dim1], lda, &a[i__ * a_dim1
+ + 1], lda)
+ ;
+ slauu2_("Upper", &ib, &a[i__ + i__ * a_dim1], lda, info);
+ if (i__ + ib <= *n) {
+ i__3 = i__ - 1;
+ i__4 = *n - i__ - ib + 1;
+ sgemm_("No transpose", "Transpose", &i__3, &ib, &i__4, &
+ c_b15, &a[(i__ + ib) * a_dim1 + 1], lda, &a[i__ +
+ (i__ + ib) * a_dim1], lda, &c_b15, &a[i__ *
+ a_dim1 + 1], lda);
+ i__3 = *n - i__ - ib + 1;
+ ssyrk_("Upper", "No transpose", &ib, &i__3, &c_b15, &a[
+ i__ + (i__ + ib) * a_dim1], lda, &c_b15, &a[i__ +
+ i__ * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute the product L' * L. */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - i__ + 1;
+ ib = min(i__3,i__4);
+ i__3 = i__ - 1;
+ strmm_("Left", "Lower", "Transpose", "Non-unit", &ib, &i__3, &
+ c_b15, &a[i__ + i__ * a_dim1], lda, &a[i__ + a_dim1],
+ lda);
+ slauu2_("Lower", &ib, &a[i__ + i__ * a_dim1], lda, info);
+ if (i__ + ib <= *n) {
+ i__3 = i__ - 1;
+ i__4 = *n - i__ - ib + 1;
+ sgemm_("Transpose", "No transpose", &ib, &i__3, &i__4, &
+ c_b15, &a[i__ + ib + i__ * a_dim1], lda, &a[i__ +
+ ib + a_dim1], lda, &c_b15, &a[i__ + a_dim1], lda);
+ i__3 = *n - i__ - ib + 1;
+ ssyrk_("Lower", "Transpose", &ib, &i__3, &c_b15, &a[i__ +
+ ib + i__ * a_dim1], lda, &c_b15, &a[i__ + i__ *
+ a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SLAUUM */
+
+} /* slauum_ */
diff --git a/contrib/libs/clapack/sopgtr.c b/contrib/libs/clapack/sopgtr.c
new file mode 100644
index 0000000000..7296c2e9e5
--- /dev/null
+++ b/contrib/libs/clapack/sopgtr.c
@@ -0,0 +1,209 @@
+/* sopgtr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sopgtr_(char *uplo, integer *n, real *ap, real *tau,
+ real *q, integer *ldq, real *work, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, ij;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical upper;
+ extern /* Subroutine */ int sorg2l_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *), sorg2r_(integer *,
+ integer *, integer *, real *, integer *, real *, real *, integer *
+), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SOPGTR generates a real orthogonal matrix Q which is defined as the */
+/* product of n-1 elementary reflectors H(i) of order n, as returned by */
+/* SSPTRD using packed storage: */
+
+/* if UPLO = 'U', Q = H(n-1) . . . H(2) H(1), */
+
+/* if UPLO = 'L', Q = H(1) H(2) . . . H(n-1). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangular packed storage used in previous */
+/* call to SSPTRD; */
+/* = 'L': Lower triangular packed storage used in previous */
+/* call to SSPTRD. */
+
+/* N (input) INTEGER */
+/* The order of the matrix Q. N >= 0. */
+
+/* AP (input) REAL array, dimension (N*(N+1)/2) */
+/* The vectors which define the elementary reflectors, as */
+/* returned by SSPTRD. */
+
+/* TAU (input) REAL array, dimension (N-1) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by SSPTRD. */
+
+/* Q (output) REAL array, dimension (LDQ,N) */
+/* The N-by-N orthogonal matrix Q. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N). */
+
+/* WORK (workspace) REAL array, dimension (N-1) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ --ap;
+ --tau;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldq < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SOPGTR", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Q was determined by a call to SSPTRD with UPLO = 'U' */
+
+/* Unpack the vectors which define the elementary reflectors and */
+/* set the last row and column of Q equal to those of the unit */
+/* matrix */
+
+ ij = 2;
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ q[i__ + j * q_dim1] = ap[ij];
+ ++ij;
+/* L10: */
+ }
+ ij += 2;
+ q[*n + j * q_dim1] = 0.f;
+/* L20: */
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ q[i__ + *n * q_dim1] = 0.f;
+/* L30: */
+ }
+ q[*n + *n * q_dim1] = 1.f;
+
+/* Generate Q(1:n-1,1:n-1) */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ sorg2l_(&i__1, &i__2, &i__3, &q[q_offset], ldq, &tau[1], &work[1], &
+ iinfo);
+
+ } else {
+
+/* Q was determined by a call to SSPTRD with UPLO = 'L'. */
+
+/* Unpack the vectors which define the elementary reflectors and */
+/* set the first row and column of Q equal to those of the unit */
+/* matrix */
+
+ q[q_dim1 + 1] = 1.f;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ q[i__ + q_dim1] = 0.f;
+/* L40: */
+ }
+ ij = 3;
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ q[j * q_dim1 + 1] = 0.f;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ q[i__ + j * q_dim1] = ap[ij];
+ ++ij;
+/* L50: */
+ }
+ ij += 2;
+/* L60: */
+ }
+ if (*n > 1) {
+
+/* Generate Q(2:n,2:n) */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ sorg2r_(&i__1, &i__2, &i__3, &q[(q_dim1 << 1) + 2], ldq, &tau[1],
+ &work[1], &iinfo);
+ }
+ }
+ return 0;
+
+/* End of SOPGTR */
+
+} /* sopgtr_ */
diff --git a/contrib/libs/clapack/sopmtr.c b/contrib/libs/clapack/sopmtr.c
new file mode 100644
index 0000000000..f85bfd7ef4
--- /dev/null
+++ b/contrib/libs/clapack/sopmtr.c
@@ -0,0 +1,295 @@
+/* sopmtr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sopmtr_(char *side, char *uplo, char *trans, integer *m,
+ integer *n, real *ap, real *tau, real *c__, integer *ldc, real *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, i1, i2, i3, ic, jc, ii, mi, ni, nq;
+ real aii;
+ logical left;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int slarf_(char *, integer *, integer *, real *,
+ integer *, real *, real *, integer *, real *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran, forwrd;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SOPMTR overwrites the general real M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'T': Q**T * C C * Q**T */
+
+/* where Q is a real orthogonal matrix of order nq, with nq = m if */
+/* SIDE = 'L' and nq = n if SIDE = 'R'. Q is defined as the product of */
+/* nq-1 elementary reflectors, as returned by SSPTRD using packed */
+/* storage: */
+
+/* if UPLO = 'U', Q = H(nq-1) . . . H(2) H(1); */
+
+/* if UPLO = 'L', Q = H(1) H(2) . . . H(nq-1). */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**T from the Left; */
+/* = 'R': apply Q or Q**T from the Right. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangular packed storage used in previous */
+/* call to SSPTRD; */
+/* = 'L': Lower triangular packed storage used in previous */
+/* call to SSPTRD. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'T': Transpose, apply Q**T. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* AP (input) REAL array, dimension */
+/* (M*(M+1)/2) if SIDE = 'L' */
+/* (N*(N+1)/2) if SIDE = 'R' */
+/* The vectors which define the elementary reflectors, as */
+/* returned by SSPTRD. AP is modified by the routine but */
+/* restored on exit. */
+
+/* TAU (input) REAL array, dimension (M-1) if SIDE = 'L' */
+/* or (N-1) if SIDE = 'R' */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by SSPTRD. */
+
+/* C (input/output) REAL array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) REAL array, dimension */
+/* (N) if SIDE = 'L' */
+/* (M) if SIDE = 'R' */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ --ap;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ upper = lsame_(uplo, "U");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*ldc < max(1,*m)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SOPMTR", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Q was determined by a call to SSPTRD with UPLO = 'U' */
+
+ forwrd = left && notran || ! left && ! notran;
+
+ if (forwrd) {
+ i1 = 1;
+ i2 = nq - 1;
+ i3 = 1;
+ ii = 2;
+ } else {
+ i1 = nq - 1;
+ i2 = 1;
+ i3 = -1;
+ ii = nq * (nq + 1) / 2 - 1;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) is applied to C(1:i,1:n) */
+
+ mi = i__;
+ } else {
+
+/* H(i) is applied to C(1:m,1:i) */
+
+ ni = i__;
+ }
+
+/* Apply H(i) */
+
+ aii = ap[ii];
+ ap[ii] = 1.f;
+ slarf_(side, &mi, &ni, &ap[ii - i__ + 1], &c__1, &tau[i__], &c__[
+ c_offset], ldc, &work[1]);
+ ap[ii] = aii;
+
+ if (forwrd) {
+ ii = ii + i__ + 2;
+ } else {
+ ii = ii - i__ - 1;
+ }
+/* L10: */
+ }
+ } else {
+
+/* Q was determined by a call to SSPTRD with UPLO = 'L'. */
+
+ forwrd = left && ! notran || ! left && notran;
+
+ if (forwrd) {
+ i1 = 1;
+ i2 = nq - 1;
+ i3 = 1;
+ ii = 2;
+ } else {
+ i1 = nq - 1;
+ i2 = 1;
+ i3 = -1;
+ ii = nq * (nq + 1) / 2 - 1;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__2 = i2;
+ i__1 = i3;
+ for (i__ = i1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+ aii = ap[ii];
+ ap[ii] = 1.f;
+ if (left) {
+
+/* H(i) is applied to C(i+1:m,1:n) */
+
+ mi = *m - i__;
+ ic = i__ + 1;
+ } else {
+
+/* H(i) is applied to C(1:m,i+1:n) */
+
+ ni = *n - i__;
+ jc = i__ + 1;
+ }
+
+/* Apply H(i) */
+
+ slarf_(side, &mi, &ni, &ap[ii], &c__1, &tau[i__], &c__[ic + jc *
+ c_dim1], ldc, &work[1]);
+ ap[ii] = aii;
+
+ if (forwrd) {
+ ii = ii + nq - i__ + 1;
+ } else {
+ ii = ii - nq + i__ - 2;
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of SOPMTR */
+
+} /* sopmtr_ */
diff --git a/contrib/libs/clapack/sorg2l.c b/contrib/libs/clapack/sorg2l.c
new file mode 100644
index 0000000000..a668c6a81c
--- /dev/null
+++ b/contrib/libs/clapack/sorg2l.c
@@ -0,0 +1,173 @@
+/* sorg2l.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sorg2l_(integer *m, integer *n, integer *k, real *a,
+ integer *lda, real *tau, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1;
+
+ /* Local variables */
+ integer i__, j, l, ii;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ slarf_(char *, integer *, integer *, real *, integer *, real *,
+ real *, integer *, real *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SORG2L generates an m by n real matrix Q with orthonormal columns, */
+/* which is defined as the last n columns of a product of k elementary */
+/* reflectors of order m */
+
+/* Q = H(k) . . . H(2) H(1) */
+
+/* as returned by SGEQLF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. M >= N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. N >= K >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the (n-k+i)-th column must contain the vector which */
+/* defines the elementary reflector H(i), for i = 1,2,...,k, as */
+/* returned by SGEQLF in the last k columns of its array */
+/* argument A. */
+/* On exit, the m by n matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) REAL array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by SGEQLF. */
+
+/* WORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0 || *n > *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORG2L", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+/* Initialise columns 1:n-k to columns of the unit matrix */
+
+ i__1 = *n - *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (l = 1; l <= i__2; ++l) {
+ a[l + j * a_dim1] = 0.f;
+/* L10: */
+ }
+ a[*m - *n + j + j * a_dim1] = 1.f;
+/* L20: */
+ }
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ii = *n - *k + i__;
+
+/* Apply H(i) to A(1:m-k+i,1:n-k+i) from the left */
+
+ a[*m - *n + ii + ii * a_dim1] = 1.f;
+ i__2 = *m - *n + ii;
+ i__3 = ii - 1;
+ slarf_("Left", &i__2, &i__3, &a[ii * a_dim1 + 1], &c__1, &tau[i__], &
+ a[a_offset], lda, &work[1]);
+ i__2 = *m - *n + ii - 1;
+ r__1 = -tau[i__];
+ sscal_(&i__2, &r__1, &a[ii * a_dim1 + 1], &c__1);
+ a[*m - *n + ii + ii * a_dim1] = 1.f - tau[i__];
+
+/* Set A(m-k+i+1:m,n-k+i) to zero */
+
+ i__2 = *m;
+ for (l = *m - *n + ii + 1; l <= i__2; ++l) {
+ a[l + ii * a_dim1] = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of SORG2L */
+
+} /* sorg2l_ */
diff --git a/contrib/libs/clapack/sorg2r.c b/contrib/libs/clapack/sorg2r.c
new file mode 100644
index 0000000000..4fbbded379
--- /dev/null
+++ b/contrib/libs/clapack/sorg2r.c
@@ -0,0 +1,175 @@
+/* sorg2r.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sorg2r_(integer *m, integer *n, integer *k, real *a,
+ integer *lda, real *tau, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real r__1;
+
+ /* Local variables */
+ integer i__, j, l;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ slarf_(char *, integer *, integer *, real *, integer *, real *,
+ real *, integer *, real *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SORG2R generates an m by n real matrix Q with orthonormal columns, */
+/* which is defined as the first n columns of a product of k elementary */
+/* reflectors of order m */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by SGEQRF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. M >= N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. N >= K >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the i-th column must contain the vector which */
+/* defines the elementary reflector H(i), for i = 1,2,...,k, as */
+/* returned by SGEQRF in the first k columns of its array */
+/* argument A. */
+/* On exit, the m-by-n matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) REAL array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by SGEQRF. */
+
+/* WORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0 || *n > *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORG2R", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+/* Initialise columns k+1:n to columns of the unit matrix */
+
+ i__1 = *n;
+ for (j = *k + 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (l = 1; l <= i__2; ++l) {
+ a[l + j * a_dim1] = 0.f;
+/* L10: */
+ }
+ a[j + j * a_dim1] = 1.f;
+/* L20: */
+ }
+
+ for (i__ = *k; i__ >= 1; --i__) {
+
+/* Apply H(i) to A(i:m,i:n) from the left */
+
+ if (i__ < *n) {
+ a[i__ + i__ * a_dim1] = 1.f;
+ i__1 = *m - i__ + 1;
+ i__2 = *n - i__;
+ slarf_("Left", &i__1, &i__2, &a[i__ + i__ * a_dim1], &c__1, &tau[
+ i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ }
+ if (i__ < *m) {
+ i__1 = *m - i__;
+ r__1 = -tau[i__];
+ sscal_(&i__1, &r__1, &a[i__ + 1 + i__ * a_dim1], &c__1);
+ }
+ a[i__ + i__ * a_dim1] = 1.f - tau[i__];
+
+/* Set A(1:i-1,i) to zero */
+
+ i__1 = i__ - 1;
+ for (l = 1; l <= i__1; ++l) {
+ a[l + i__ * a_dim1] = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of SORG2R */
+
+} /* sorg2r_ */
diff --git a/contrib/libs/clapack/sorgbr.c b/contrib/libs/clapack/sorgbr.c
new file mode 100644
index 0000000000..8274482ea3
--- /dev/null
+++ b/contrib/libs/clapack/sorgbr.c
@@ -0,0 +1,299 @@
+/* sorgbr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int sorgbr_(char *vect, integer *m, integer *n, integer *k,
+ real *a, integer *lda, real *tau, real *work, integer *lwork, integer
+ *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, nb, mn;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical wantq;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int sorglq_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *), sorgqr_(
+ integer *, integer *, integer *, real *, integer *, real *, real *
+, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SORGBR generates one of the real orthogonal matrices Q or P**T */
+/* determined by SGEBRD when reducing a real matrix A to bidiagonal */
+/* form: A = Q * B * P**T. Q and P**T are defined as products of */
+/* elementary reflectors H(i) or G(i) respectively. */
+
+/* If VECT = 'Q', A is assumed to have been an M-by-K matrix, and Q */
+/* is of order M: */
+/* if m >= k, Q = H(1) H(2) . . . H(k) and SORGBR returns the first n */
+/* columns of Q, where m >= n >= k; */
+/* if m < k, Q = H(1) H(2) . . . H(m-1) and SORGBR returns Q as an */
+/* M-by-M matrix. */
+
+/* If VECT = 'P', A is assumed to have been a K-by-N matrix, and P**T */
+/* is of order N: */
+/* if k < n, P**T = G(k) . . . G(2) G(1) and SORGBR returns the first m */
+/* rows of P**T, where n >= m >= k; */
+/* if k >= n, P**T = G(n-1) . . . G(2) G(1) and SORGBR returns P**T as */
+/* an N-by-N matrix. */
+
+/* Arguments */
+/* ========= */
+
+/* VECT (input) CHARACTER*1 */
+/* Specifies whether the matrix Q or the matrix P**T is */
+/* required, as defined in the transformation applied by SGEBRD: */
+/* = 'Q': generate Q; */
+/* = 'P': generate P**T. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q or P**T to be returned. */
+/* M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q or P**T to be returned. */
+/* N >= 0. */
+/* If VECT = 'Q', M >= N >= min(M,K); */
+/* if VECT = 'P', N >= M >= min(N,K). */
+
+/* K (input) INTEGER */
+/* If VECT = 'Q', the number of columns in the original M-by-K */
+/* matrix reduced by SGEBRD. */
+/* If VECT = 'P', the number of rows in the original K-by-N */
+/* matrix reduced by SGEBRD. */
+/* K >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the vectors which define the elementary reflectors, */
+/* as returned by SGEBRD. */
+/* On exit, the M-by-N matrix Q or P**T. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) REAL array, dimension */
+/* (min(M,K)) if VECT = 'Q' */
+/* (min(N,K)) if VECT = 'P' */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i) or G(i), which determines Q or P**T, as */
+/* returned by SGEBRD in its array argument TAUQ or TAUP. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,min(M,N)). */
+/* For optimum performance LWORK >= min(M,N)*NB, where NB */
+/* is the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ wantq = lsame_(vect, "Q");
+ mn = min(*m,*n);
+ lquery = *lwork == -1;
+ if (! wantq && ! lsame_(vect, "P")) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0 || wantq && (*n > *m || *n < min(*m,*k)) || ! wantq && (
+ *m > *n || *m < min(*n,*k))) {
+ *info = -3;
+ } else if (*k < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*m)) {
+ *info = -6;
+ } else if (*lwork < max(1,mn) && ! lquery) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+ if (wantq) {
+ nb = ilaenv_(&c__1, "SORGQR", " ", m, n, k, &c_n1);
+ } else {
+ nb = ilaenv_(&c__1, "SORGLQ", " ", m, n, k, &c_n1);
+ }
+ lwkopt = max(1,mn) * nb;
+ work[1] = (real) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORGBR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ if (wantq) {
+
+/* Form Q, determined by a call to SGEBRD to reduce an m-by-k */
+/* matrix */
+
+ if (*m >= *k) {
+
+/* If m >= k, assume m >= n >= k */
+
+ sorgqr_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, &
+ iinfo);
+
+ } else {
+
+/* If m < k, assume m = n */
+
+/* Shift the vectors which define the elementary reflectors one */
+/* column to the right, and set the first row and column of Q */
+/* to those of the unit matrix */
+
+ for (j = *m; j >= 2; --j) {
+ a[j * a_dim1 + 1] = 0.f;
+ i__1 = *m;
+ for (i__ = j + 1; i__ <= i__1; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + (j - 1) * a_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+ a[a_dim1 + 1] = 1.f;
+ i__1 = *m;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ a[i__ + a_dim1] = 0.f;
+/* L30: */
+ }
+ if (*m > 1) {
+
+/* Form Q(2:m,2:m) */
+
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ sorgqr_(&i__1, &i__2, &i__3, &a[(a_dim1 << 1) + 2], lda, &tau[
+ 1], &work[1], lwork, &iinfo);
+ }
+ }
+ } else {
+
+/* Form P', determined by a call to SGEBRD to reduce a k-by-n */
+/* matrix */
+
+ if (*k < *n) {
+
+/* If k < n, assume k <= m <= n */
+
+ sorglq_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, &
+ iinfo);
+
+ } else {
+
+/* If k >= n, assume m = n */
+
+/* Shift the vectors which define the elementary reflectors one */
+/* row downward, and set the first row and column of P' to */
+/* those of the unit matrix */
+
+ a[a_dim1 + 1] = 1.f;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ a[i__ + a_dim1] = 0.f;
+/* L40: */
+ }
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ for (i__ = j - 1; i__ >= 2; --i__) {
+ a[i__ + j * a_dim1] = a[i__ - 1 + j * a_dim1];
+/* L50: */
+ }
+ a[j * a_dim1 + 1] = 0.f;
+/* L60: */
+ }
+ if (*n > 1) {
+
+/* Form P'(2:n,2:n) */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ sorglq_(&i__1, &i__2, &i__3, &a[(a_dim1 << 1) + 2], lda, &tau[
+ 1], &work[1], lwork, &iinfo);
+ }
+ }
+ }
+ work[1] = (real) lwkopt;
+ return 0;
+
+/* End of SORGBR */
+
+} /* sorgbr_ */
diff --git a/contrib/libs/clapack/sorghr.c b/contrib/libs/clapack/sorghr.c
new file mode 100644
index 0000000000..3e5186c9dd
--- /dev/null
+++ b/contrib/libs/clapack/sorghr.c
@@ -0,0 +1,214 @@
+/* sorghr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int sorghr_(integer *n, integer *ilo, integer *ihi, real *a,
+ integer *lda, real *tau, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, nb, nh, iinfo;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int sorgqr_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SORGHR generates a real orthogonal matrix Q which is defined as the */
+/* product of IHI-ILO elementary reflectors of order N, as returned by */
+/* SGEHRD: */
+
+/* Q = H(ilo) H(ilo+1) . . . H(ihi-1). */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix Q. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* ILO and IHI must have the same values as in the previous call */
+/* of SGEHRD. Q is equal to the unit matrix except in the */
+/* submatrix Q(ilo+1:ihi,ilo+1:ihi). */
+/* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the vectors which define the elementary reflectors, */
+/* as returned by SGEHRD. */
+/* On exit, the N-by-N orthogonal matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAU (input) REAL array, dimension (N-1) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by SGEHRD. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= IHI-ILO. */
+/* For optimum performance LWORK >= (IHI-ILO)*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nh = *ihi - *ilo;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -2;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*lwork < max(1,nh) && ! lquery) {
+ *info = -8;
+ }
+
+ if (*info == 0) {
+ nb = ilaenv_(&c__1, "SORGQR", " ", &nh, &nh, &nh, &c_n1);
+ lwkopt = max(1,nh) * nb;
+ work[1] = (real) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORGHR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+/* Shift the vectors which define the elementary reflectors one */
+/* column to the right, and set the first ilo and the last n-ihi */
+/* rows and columns to those of the unit matrix */
+
+ i__1 = *ilo + 1;
+ for (j = *ihi; j >= i__1; --j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.f;
+/* L10: */
+ }
+ i__2 = *ihi;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + (j - 1) * a_dim1];
+/* L20: */
+ }
+ i__2 = *n;
+ for (i__ = *ihi + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+ i__1 = *ilo;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.f;
+/* L50: */
+ }
+ a[j + j * a_dim1] = 1.f;
+/* L60: */
+ }
+ i__1 = *n;
+ for (j = *ihi + 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.f;
+/* L70: */
+ }
+ a[j + j * a_dim1] = 1.f;
+/* L80: */
+ }
+
+ if (nh > 0) {
+
+/* Generate Q(ilo+1:ihi,ilo+1:ihi) */
+
+ sorgqr_(&nh, &nh, &nh, &a[*ilo + 1 + (*ilo + 1) * a_dim1], lda, &tau[*
+ ilo], &work[1], lwork, &iinfo);
+ }
+ work[1] = (real) lwkopt;
+ return 0;
+
+/* End of SORGHR */
+
+} /* sorghr_ */
diff --git a/contrib/libs/clapack/sorgl2.c b/contrib/libs/clapack/sorgl2.c
new file mode 100644
index 0000000000..8b0ac9f7f4
--- /dev/null
+++ b/contrib/libs/clapack/sorgl2.c
@@ -0,0 +1,175 @@
+/* sorgl2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sorgl2_(integer *m, integer *n, integer *k, real *a,
+ integer *lda, real *tau, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real r__1;
+
+ /* Local variables */
+ integer i__, j, l;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ slarf_(char *, integer *, integer *, real *, integer *, real *,
+ real *, integer *, real *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SORGL2 generates an m by n real matrix Q with orthonormal rows, */
+/* which is defined as the first m rows of a product of k elementary */
+/* reflectors of order n */
+
+/* Q = H(k) . . . H(2) H(1) */
+
+/* as returned by SGELQF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. N >= M. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. M >= K >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the i-th row must contain the vector which defines */
+/* the elementary reflector H(i), for i = 1,2,...,k, as returned */
+/* by SGELQF in the first k rows of its array argument A. */
+/* On exit, the m-by-n matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) REAL array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by SGELQF. */
+
+/* WORK (workspace) REAL array, dimension (M) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *m) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORGL2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ return 0;
+ }
+
+ if (*k < *m) {
+
+/* Initialise rows k+1:m to rows of the unit matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (l = *k + 1; l <= i__2; ++l) {
+ a[l + j * a_dim1] = 0.f;
+/* L10: */
+ }
+ if (j > *k && j <= *m) {
+ a[j + j * a_dim1] = 1.f;
+ }
+/* L20: */
+ }
+ }
+
+ for (i__ = *k; i__ >= 1; --i__) {
+
+/* Apply H(i) to A(i:m,i:n) from the right */
+
+ if (i__ < *n) {
+ if (i__ < *m) {
+ a[i__ + i__ * a_dim1] = 1.f;
+ i__1 = *m - i__;
+ i__2 = *n - i__ + 1;
+ slarf_("Right", &i__1, &i__2, &a[i__ + i__ * a_dim1], lda, &
+ tau[i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
+ }
+ i__1 = *n - i__;
+ r__1 = -tau[i__];
+ sscal_(&i__1, &r__1, &a[i__ + (i__ + 1) * a_dim1], lda);
+ }
+ a[i__ + i__ * a_dim1] = 1.f - tau[i__];
+
+/* Set A(i,1:i-1) to zero */
+
+ i__1 = i__ - 1;
+ for (l = 1; l <= i__1; ++l) {
+ a[i__ + l * a_dim1] = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of SORGL2 */
+
+} /* sorgl2_ */
diff --git a/contrib/libs/clapack/sorglq.c b/contrib/libs/clapack/sorglq.c
new file mode 100644
index 0000000000..c72fb8f81c
--- /dev/null
+++ b/contrib/libs/clapack/sorglq.c
@@ -0,0 +1,279 @@
+/* sorglq.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int sorglq_(integer *m, integer *n, integer *k, real *a,
+ integer *lda, real *tau, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, l, ib, nb, ki, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int sorgl2_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *), slarfb_(char *, char *,
+ char *, char *, integer *, integer *, integer *, real *, integer *
+, real *, integer *, real *, integer *, real *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int slarft_(char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SORGLQ generates an M-by-N real matrix Q with orthonormal rows, */
+/* which is defined as the first M rows of a product of K elementary */
+/* reflectors of order N */
+
+/* Q = H(k) . . . H(2) H(1) */
+
+/* as returned by SGELQF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. N >= M. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. M >= K >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the i-th row must contain the vector which defines */
+/* the elementary reflector H(i), for i = 1,2,...,k, as returned */
+/* by SGELQF in the first k rows of its array argument A. */
+/* On exit, the M-by-N matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) REAL array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by SGELQF. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,M). */
+/* For optimum performance LWORK >= M*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "SORGLQ", " ", m, n, k, &c_n1);
+ lwkopt = max(1,*m) * nb;
+ work[1] = (real) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *m) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*lwork < max(1,*m) && ! lquery) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORGLQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *m;
+ if (nb > 1 && nb < *k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "SORGLQ", " ", m, n, k, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SORGLQ", " ", m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *k && nx < *k) {
+
+/* Use blocked code after the last block. */
+/* The first kk rows are handled by the block method. */
+
+ ki = (*k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = *k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+/* Set A(kk+1:m,1:kk) to zero. */
+
+ i__1 = kk;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = kk + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the last or only block. */
+
+ if (kk < *m) {
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ sorgl2_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, &
+ tau[kk + 1], &work[1], &iinfo);
+ }
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = -nb;
+ for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) {
+/* Computing MIN */
+ i__2 = nb, i__3 = *k - i__ + 1;
+ ib = min(i__2,i__3);
+ if (i__ + ib <= *m) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i) H(i+1) . . . H(i+ib-1) */
+
+ i__2 = *n - i__ + 1;
+ slarft_("Forward", "Rowwise", &i__2, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(i+ib:m,i:n) from the right */
+
+ i__2 = *m - i__ - ib + 1;
+ i__3 = *n - i__ + 1;
+ slarfb_("Right", "Transpose", "Forward", "Rowwise", &i__2, &
+ i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], &
+ ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[ib +
+ 1], &ldwork);
+ }
+
+/* Apply H' to columns i:n of current block */
+
+ i__2 = *n - i__ + 1;
+ sorgl2_(&ib, &i__2, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &
+ work[1], &iinfo);
+
+/* Set columns 1:i-1 of current block to zero */
+
+ i__2 = i__ - 1;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = i__ + ib - 1;
+ for (l = i__; l <= i__3; ++l) {
+ a[l + j * a_dim1] = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1] = (real) iws;
+ return 0;
+
+/* End of SORGLQ */
+
+} /* sorglq_ */
diff --git a/contrib/libs/clapack/sorgql.c b/contrib/libs/clapack/sorgql.c
new file mode 100644
index 0000000000..46fb3eaca9
--- /dev/null
+++ b/contrib/libs/clapack/sorgql.c
@@ -0,0 +1,288 @@
+/* sorgql.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int sorgql_(integer *m, integer *n, integer *k, real *a,
+ integer *lda, real *tau, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, l, ib, nb, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int sorg2l_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *), slarfb_(char *, char *,
+ char *, char *, integer *, integer *, integer *, real *, integer *
+, real *, integer *, real *, integer *, real *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int slarft_(char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SORGQL generates an M-by-N real matrix Q with orthonormal columns, */
+/* which is defined as the last N columns of a product of K elementary */
+/* reflectors of order M */
+
+/* Q = H(k) . . . H(2) H(1) */
+
+/* as returned by SGEQLF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. M >= N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. N >= K >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the (n-k+i)-th column must contain the vector which */
+/* defines the elementary reflector H(i), for i = 1,2,...,k, as */
+/* returned by SGEQLF in the last k columns of its array */
+/* argument A. */
+/* On exit, the M-by-N matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) REAL array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by SGEQLF. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+/* For optimum performance LWORK >= N*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0 || *n > *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+
+ if (*info == 0) {
+ if (*n == 0) {
+ lwkopt = 1;
+ } else {
+ nb = ilaenv_(&c__1, "SORGQL", " ", m, n, k, &c_n1);
+ lwkopt = *n * nb;
+ }
+ work[1] = (real) lwkopt;
+
+ if (*lwork < max(1,*n) && ! lquery) {
+ *info = -8;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORGQL", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *n;
+ if (nb > 1 && nb < *k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "SORGQL", " ", m, n, k, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SORGQL", " ", m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *k && nx < *k) {
+
+/* Use blocked code after the first block. */
+/* The last kk columns are handled by the block method. */
+
+/* Computing MIN */
+ i__1 = *k, i__2 = (*k - nx + nb - 1) / nb * nb;
+ kk = min(i__1,i__2);
+
+/* Set A(m-kk+1:m,1:n-kk) to zero. */
+
+ i__1 = *n - kk;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = *m - kk + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the first or only block. */
+
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ sorg2l_(&i__1, &i__2, &i__3, &a[a_offset], lda, &tau[1], &work[1], &iinfo)
+ ;
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = *k;
+ i__2 = nb;
+ for (i__ = *k - kk + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *k - i__ + 1;
+ ib = min(i__3,i__4);
+ if (*n - *k + i__ > 1) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__3 = *m - *k + i__ + ib - 1;
+ slarft_("Backward", "Columnwise", &i__3, &ib, &a[(*n - *k +
+ i__) * a_dim1 + 1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(1:m-k+i+ib-1,1:n-k+i-1) from the left */
+
+ i__3 = *m - *k + i__ + ib - 1;
+ i__4 = *n - *k + i__ - 1;
+ slarfb_("Left", "No transpose", "Backward", "Columnwise", &
+ i__3, &i__4, &ib, &a[(*n - *k + i__) * a_dim1 + 1],
+ lda, &work[1], &ldwork, &a[a_offset], lda, &work[ib +
+ 1], &ldwork);
+ }
+
+/* Apply H to rows 1:m-k+i+ib-1 of current block */
+
+ i__3 = *m - *k + i__ + ib - 1;
+ sorg2l_(&i__3, &ib, &ib, &a[(*n - *k + i__) * a_dim1 + 1], lda, &
+ tau[i__], &work[1], &iinfo);
+
+/* Set rows m-k+i+ib:m of current block to zero */
+
+ i__3 = *n - *k + i__ + ib - 1;
+ for (j = *n - *k + i__; j <= i__3; ++j) {
+ i__4 = *m;
+ for (l = *m - *k + i__ + ib; l <= i__4; ++l) {
+ a[l + j * a_dim1] = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1] = (real) iws;
+ return 0;
+
+/* End of SORGQL */
+
+} /* sorgql_ */
diff --git a/contrib/libs/clapack/sorgqr.c b/contrib/libs/clapack/sorgqr.c
new file mode 100644
index 0000000000..667255d759
--- /dev/null
+++ b/contrib/libs/clapack/sorgqr.c
@@ -0,0 +1,280 @@
+/* sorgqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int sorgqr_(integer *m, integer *n, integer *k, real *a,
+ integer *lda, real *tau, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, l, ib, nb, ki, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int sorg2r_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *), slarfb_(char *, char *,
+ char *, char *, integer *, integer *, integer *, real *, integer *
+, real *, integer *, real *, integer *, real *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int slarft_(char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SORGQR generates an M-by-N real matrix Q with orthonormal columns, */
+/* which is defined as the first N columns of a product of K elementary */
+/* reflectors of order M */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by SGEQRF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. M >= N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. N >= K >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the i-th column must contain the vector which */
+/* defines the elementary reflector H(i), for i = 1,2,...,k, as */
+/* returned by SGEQRF in the first k columns of its array */
+/* argument A. */
+/* On exit, the M-by-N matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) REAL array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by SGEQRF. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+/* For optimum performance LWORK >= N*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "SORGQR", " ", m, n, k, &c_n1);
+ lwkopt = max(1,*n) * nb;
+ work[1] = (real) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0 || *n > *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORGQR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *n;
+ if (nb > 1 && nb < *k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "SORGQR", " ", m, n, k, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SORGQR", " ", m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *k && nx < *k) {
+
+/* Use blocked code after the last block. */
+/* The first kk columns are handled by the block method. */
+
+ ki = (*k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = *k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+/* Set A(1:kk,kk+1:n) to zero. */
+
+ i__1 = *n;
+ for (j = kk + 1; j <= i__1; ++j) {
+ i__2 = kk;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the last or only block. */
+
+ if (kk < *n) {
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ sorg2r_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, &
+ tau[kk + 1], &work[1], &iinfo);
+ }
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = -nb;
+ for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) {
+/* Computing MIN */
+ i__2 = nb, i__3 = *k - i__ + 1;
+ ib = min(i__2,i__3);
+ if (i__ + ib <= *n) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i) H(i+1) . . . H(i+ib-1) */
+
+ i__2 = *m - i__ + 1;
+ slarft_("Forward", "Columnwise", &i__2, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(i:m,i+ib:n) from the left */
+
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__ - ib + 1;
+ slarfb_("Left", "No transpose", "Forward", "Columnwise", &
+ i__2, &i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[
+ 1], &ldwork, &a[i__ + (i__ + ib) * a_dim1], lda, &
+ work[ib + 1], &ldwork);
+ }
+
+/* Apply H to rows i:m of current block */
+
+ i__2 = *m - i__ + 1;
+ sorg2r_(&i__2, &ib, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &
+ work[1], &iinfo);
+
+/* Set rows 1:i-1 of current block to zero */
+
+ i__2 = i__ + ib - 1;
+ for (j = i__; j <= i__2; ++j) {
+ i__3 = i__ - 1;
+ for (l = 1; l <= i__3; ++l) {
+ a[l + j * a_dim1] = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1] = (real) iws;
+ return 0;
+
+/* End of SORGQR */
+
+} /* sorgqr_ */
diff --git a/contrib/libs/clapack/sorgr2.c b/contrib/libs/clapack/sorgr2.c
new file mode 100644
index 0000000000..c47f5b731b
--- /dev/null
+++ b/contrib/libs/clapack/sorgr2.c
@@ -0,0 +1,174 @@
+/* sorgr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sorgr2_(integer *m, integer *n, integer *k, real *a,
+ integer *lda, real *tau, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1;
+
+ /* Local variables */
+ integer i__, j, l, ii;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ slarf_(char *, integer *, integer *, real *, integer *, real *,
+ real *, integer *, real *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SORGR2 generates an m by n real matrix Q with orthonormal rows, */
+/* which is defined as the last m rows of a product of k elementary */
+/* reflectors of order n */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by SGERQF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. N >= M. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. M >= K >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the (m-k+i)-th row must contain the vector which */
+/* defines the elementary reflector H(i), for i = 1,2,...,k, as */
+/* returned by SGERQF in the last k rows of its array argument */
+/* A. */
+/* On exit, the m by n matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) REAL array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by SGERQF. */
+
+/* WORK (workspace) REAL array, dimension (M) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *m) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORGR2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ return 0;
+ }
+
+ if (*k < *m) {
+
+/* Initialise rows 1:m-k to rows of the unit matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m - *k;
+ for (l = 1; l <= i__2; ++l) {
+ a[l + j * a_dim1] = 0.f;
+/* L10: */
+ }
+ if (j > *n - *m && j <= *n - *k) {
+ a[*m - *n + j + j * a_dim1] = 1.f;
+ }
+/* L20: */
+ }
+ }
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ii = *m - *k + i__;
+
+/* Apply H(i) to A(1:m-k+i,1:n-k+i) from the right */
+
+ a[ii + (*n - *m + ii) * a_dim1] = 1.f;
+ i__2 = ii - 1;
+ i__3 = *n - *m + ii;
+ slarf_("Right", &i__2, &i__3, &a[ii + a_dim1], lda, &tau[i__], &a[
+ a_offset], lda, &work[1]);
+ i__2 = *n - *m + ii - 1;
+ r__1 = -tau[i__];
+ sscal_(&i__2, &r__1, &a[ii + a_dim1], lda);
+ a[ii + (*n - *m + ii) * a_dim1] = 1.f - tau[i__];
+
+/* Set A(m-k+i,n-k+i+1:n) to zero */
+
+ i__2 = *n;
+ for (l = *n - *m + ii + 1; l <= i__2; ++l) {
+ a[ii + l * a_dim1] = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of SORGR2 */
+
+} /* sorgr2_ */
diff --git a/contrib/libs/clapack/sorgrq.c b/contrib/libs/clapack/sorgrq.c
new file mode 100644
index 0000000000..36d853c071
--- /dev/null
+++ b/contrib/libs/clapack/sorgrq.c
@@ -0,0 +1,288 @@
+/* sorgrq.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int sorgrq_(integer *m, integer *n, integer *k, real *a,
+ integer *lda, real *tau, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, l, ib, nb, ii, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int sorgr2_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *), slarfb_(char *, char *,
+ char *, char *, integer *, integer *, integer *, real *, integer *
+, real *, integer *, real *, integer *, real *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int slarft_(char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SORGRQ generates an M-by-N real matrix Q with orthonormal rows, */
+/* which is defined as the last M rows of a product of K elementary */
+/* reflectors of order N */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by SGERQF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. N >= M. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. M >= K >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the (m-k+i)-th row must contain the vector which */
+/* defines the elementary reflector H(i), for i = 1,2,...,k, as */
+/* returned by SGERQF in the last k rows of its array argument */
+/* A. */
+/* On exit, the M-by-N matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) REAL array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by SGERQF. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,M). */
+/* For optimum performance LWORK >= M*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *m) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+
+ if (*info == 0) {
+ if (*m <= 0) {
+ lwkopt = 1;
+ } else {
+ nb = ilaenv_(&c__1, "SORGRQ", " ", m, n, k, &c_n1);
+ lwkopt = *m * nb;
+ }
+ work[1] = (real) lwkopt;
+
+ if (*lwork < max(1,*m) && ! lquery) {
+ *info = -8;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORGRQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *m;
+ if (nb > 1 && nb < *k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "SORGRQ", " ", m, n, k, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SORGRQ", " ", m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *k && nx < *k) {
+
+/* Use blocked code after the first block. */
+/* The last kk rows are handled by the block method. */
+
+/* Computing MIN */
+ i__1 = *k, i__2 = (*k - nx + nb - 1) / nb * nb;
+ kk = min(i__1,i__2);
+
+/* Set A(1:m-kk,n-kk+1:n) to zero. */
+
+ i__1 = *n;
+ for (j = *n - kk + 1; j <= i__1; ++j) {
+ i__2 = *m - kk;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the first or only block. */
+
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ sorgr2_(&i__1, &i__2, &i__3, &a[a_offset], lda, &tau[1], &work[1], &iinfo)
+ ;
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = *k;
+ i__2 = nb;
+ for (i__ = *k - kk + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *k - i__ + 1;
+ ib = min(i__3,i__4);
+ ii = *m - *k + i__;
+ if (ii > 1) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__3 = *n - *k + i__ + ib - 1;
+ slarft_("Backward", "Rowwise", &i__3, &ib, &a[ii + a_dim1],
+ lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(1:m-k+i-1,1:n-k+i+ib-1) from the right */
+
+ i__3 = ii - 1;
+ i__4 = *n - *k + i__ + ib - 1;
+ slarfb_("Right", "Transpose", "Backward", "Rowwise", &i__3, &
+ i__4, &ib, &a[ii + a_dim1], lda, &work[1], &ldwork, &
+ a[a_offset], lda, &work[ib + 1], &ldwork);
+ }
+
+/* Apply H' to columns 1:n-k+i+ib-1 of current block */
+
+ i__3 = *n - *k + i__ + ib - 1;
+ sorgr2_(&ib, &i__3, &ib, &a[ii + a_dim1], lda, &tau[i__], &work[1]
+, &iinfo);
+
+/* Set columns n-k+i+ib:n of current block to zero */
+
+ i__3 = *n;
+ for (l = *n - *k + i__ + ib; l <= i__3; ++l) {
+ i__4 = ii + ib - 1;
+ for (j = ii; j <= i__4; ++j) {
+ a[j + l * a_dim1] = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1] = (real) iws;
+ return 0;
+
+/* End of SORGRQ */
+
+} /* sorgrq_ */
diff --git a/contrib/libs/clapack/sorgtr.c b/contrib/libs/clapack/sorgtr.c
new file mode 100644
index 0000000000..47e521e4d7
--- /dev/null
+++ b/contrib/libs/clapack/sorgtr.c
@@ -0,0 +1,250 @@
+/* sorgtr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int sorgtr_(char *uplo, integer *n, real *a, integer *lda,
+ real *tau, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, nb;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int sorgql_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *), sorgqr_(
+ integer *, integer *, integer *, real *, integer *, real *, real *
+, integer *, integer *);
+ logical lquery;
+ integer lwkopt;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SORGTR generates a real orthogonal matrix Q which is defined as the */
+/* product of n-1 elementary reflectors of order N, as returned by */
+/* SSYTRD: */
+
+/* if UPLO = 'U', Q = H(n-1) . . . H(2) H(1), */
+
+/* if UPLO = 'L', Q = H(1) H(2) . . . H(n-1). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A contains elementary reflectors */
+/* from SSYTRD; */
+/* = 'L': Lower triangle of A contains elementary reflectors */
+/* from SSYTRD. */
+
+/* N (input) INTEGER */
+/* The order of the matrix Q. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the vectors which define the elementary reflectors, */
+/* as returned by SSYTRD. */
+/* On exit, the N-by-N orthogonal matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAU (input) REAL array, dimension (N-1) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by SSYTRD. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N-1). */
+/* For optimum performance LWORK >= (N-1)*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n - 1;
+ if (*lwork < max(i__1,i__2) && ! lquery) {
+ *info = -7;
+ }
+ }
+
+ if (*info == 0) {
+ if (upper) {
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "SORGQL", " ", &i__1, &i__2, &i__3, &c_n1);
+ } else {
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "SORGQR", " ", &i__1, &i__2, &i__3, &c_n1);
+ }
+/* Computing MAX */
+ i__1 = 1, i__2 = *n - 1;
+ lwkopt = max(i__1,i__2) * nb;
+ work[1] = (real) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORGTR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ if (upper) {
+
+/* Q was determined by a call to SSYTRD with UPLO = 'U' */
+
+/* Shift the vectors which define the elementary reflectors one */
+/* column to the left, and set the last row and column of Q to */
+/* those of the unit matrix */
+
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + (j + 1) * a_dim1];
+/* L10: */
+ }
+ a[*n + j * a_dim1] = 0.f;
+/* L20: */
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ a[i__ + *n * a_dim1] = 0.f;
+/* L30: */
+ }
+ a[*n + *n * a_dim1] = 1.f;
+
+/* Generate Q(1:n-1,1:n-1) */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ sorgql_(&i__1, &i__2, &i__3, &a[a_offset], lda, &tau[1], &work[1],
+ lwork, &iinfo);
+
+ } else {
+
+/* Q was determined by a call to SSYTRD with UPLO = 'L'. */
+
+/* Shift the vectors which define the elementary reflectors one */
+/* column to the right, and set the first row and column of Q to */
+/* those of the unit matrix */
+
+ for (j = *n; j >= 2; --j) {
+ a[j * a_dim1 + 1] = 0.f;
+ i__1 = *n;
+ for (i__ = j + 1; i__ <= i__1; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + (j - 1) * a_dim1];
+/* L40: */
+ }
+/* L50: */
+ }
+ a[a_dim1 + 1] = 1.f;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ a[i__ + a_dim1] = 0.f;
+/* L60: */
+ }
+ if (*n > 1) {
+
+/* Generate Q(2:n,2:n) */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ sorgqr_(&i__1, &i__2, &i__3, &a[(a_dim1 << 1) + 2], lda, &tau[1],
+ &work[1], lwork, &iinfo);
+ }
+ }
+ work[1] = (real) lwkopt;
+ return 0;
+
+/* End of SORGTR */
+
+} /* sorgtr_ */
diff --git a/contrib/libs/clapack/sorm2l.c b/contrib/libs/clapack/sorm2l.c
new file mode 100644
index 0000000000..2a727ad6b2
--- /dev/null
+++ b/contrib/libs/clapack/sorm2l.c
@@ -0,0 +1,230 @@
+/* sorm2l.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sorm2l_(char *side, char *trans, integer *m, integer *n,
+ integer *k, real *a, integer *lda, real *tau, real *c__, integer *ldc,
+ real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, i1, i2, i3, mi, ni, nq;
+ real aii;
+ logical left;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int slarf_(char *, integer *, integer *, real *,
+ integer *, real *, real *, integer *, real *), xerbla_(
+ char *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SORM2L overwrites the general real m by n matrix C with */
+
+/* Q * C if SIDE = 'L' and TRANS = 'N', or */
+
+/* Q'* C if SIDE = 'L' and TRANS = 'T', or */
+
+/* C * Q if SIDE = 'R' and TRANS = 'N', or */
+
+/* C * Q' if SIDE = 'R' and TRANS = 'T', */
+
+/* where Q is a real orthogonal matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(k) . . . H(2) H(1) */
+
+/* as returned by SGEQLF. Q is of order m if SIDE = 'L' and of order n */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q' from the Left */
+/* = 'R': apply Q or Q' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply Q (No transpose) */
+/* = 'T': apply Q' (Transpose) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) REAL array, dimension (LDA,K) */
+/* The i-th column must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* SGEQLF in the last k columns of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* If SIDE = 'L', LDA >= max(1,M); */
+/* if SIDE = 'R', LDA >= max(1,N). */
+
+/* TAU (input) REAL array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by SGEQLF. */
+
+/* C (input/output) REAL array, dimension (LDC,N) */
+/* On entry, the m by n matrix C. */
+/* On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) REAL array, dimension */
+/* (N) if SIDE = 'L', */
+/* (M) if SIDE = 'R' */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORM2L", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if (left && notran || ! left && ! notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) is applied to C(1:m-k+i,1:n) */
+
+ mi = *m - *k + i__;
+ } else {
+
+/* H(i) is applied to C(1:m,1:n-k+i) */
+
+ ni = *n - *k + i__;
+ }
+
+/* Apply H(i) */
+
+ aii = a[nq - *k + i__ + i__ * a_dim1];
+ a[nq - *k + i__ + i__ * a_dim1] = 1.f;
+ slarf_(side, &mi, &ni, &a[i__ * a_dim1 + 1], &c__1, &tau[i__], &c__[
+ c_offset], ldc, &work[1]);
+ a[nq - *k + i__ + i__ * a_dim1] = aii;
+/* L10: */
+ }
+ return 0;
+
+/* End of SORM2L */
+
+} /* sorm2l_ */
diff --git a/contrib/libs/clapack/sorm2r.c b/contrib/libs/clapack/sorm2r.c
new file mode 100644
index 0000000000..f12cbbba30
--- /dev/null
+++ b/contrib/libs/clapack/sorm2r.c
@@ -0,0 +1,234 @@
+/* sorm2r.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sorm2r_(char *side, char *trans, integer *m, integer *n,
+ integer *k, real *a, integer *lda, real *tau, real *c__, integer *ldc,
+ real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, i1, i2, i3, ic, jc, mi, ni, nq;
+ real aii;
+ logical left;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int slarf_(char *, integer *, integer *, real *,
+ integer *, real *, real *, integer *, real *), xerbla_(
+ char *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SORM2R overwrites the general real m by n matrix C with */
+
+/* Q * C if SIDE = 'L' and TRANS = 'N', or */
+
+/* Q'* C if SIDE = 'L' and TRANS = 'T', or */
+
+/* C * Q if SIDE = 'R' and TRANS = 'N', or */
+
+/* C * Q' if SIDE = 'R' and TRANS = 'T', */
+
+/* where Q is a real orthogonal matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by SGEQRF. Q is of order m if SIDE = 'L' and of order n */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q' from the Left */
+/* = 'R': apply Q or Q' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply Q (No transpose) */
+/* = 'T': apply Q' (Transpose) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) REAL array, dimension (LDA,K) */
+/* The i-th column must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* SGEQRF in the first k columns of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* If SIDE = 'L', LDA >= max(1,M); */
+/* if SIDE = 'R', LDA >= max(1,N). */
+
+/* TAU (input) REAL array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by SGEQRF. */
+
+/* C (input/output) REAL array, dimension (LDC,N) */
+/* On entry, the m by n matrix C. */
+/* On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) REAL array, dimension */
+/* (N) if SIDE = 'L', */
+/* (M) if SIDE = 'R' */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORM2R", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if (left && ! notran || ! left && notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H(i) is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H(i) */
+
+ aii = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.f;
+ slarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], &c__1, &tau[i__], &c__[
+ ic + jc * c_dim1], ldc, &work[1]);
+ a[i__ + i__ * a_dim1] = aii;
+/* L10: */
+ }
+ return 0;
+
+/* End of SORM2R */
+
+} /* sorm2r_ */
diff --git a/contrib/libs/clapack/sormbr.c b/contrib/libs/clapack/sormbr.c
new file mode 100644
index 0000000000..d91930cab2
--- /dev/null
+++ b/contrib/libs/clapack/sormbr.c
@@ -0,0 +1,359 @@
+/* sormbr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+
+/* Subroutine */ int sormbr_(char *vect, char *side, char *trans, integer *m,
+ integer *n, integer *k, real *a, integer *lda, real *tau, real *c__,
+ integer *ldc, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2];
+ char ch__1[3];
+ ch__1[2] = 0;
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i1, i2, nb, mi, ni, nq, nw;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ logical notran, applyq;
+ char transt[1];
+ extern /* Subroutine */ int sormlq_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int sormqr_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* If VECT = 'Q', SORMBR overwrites the general real M-by-N matrix C */
+/* with */
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'T': Q**T * C C * Q**T */
+
+/* If VECT = 'P', SORMBR overwrites the general real M-by-N matrix C */
+/* with */
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': P * C C * P */
+/* TRANS = 'T': P**T * C C * P**T */
+
+/* Here Q and P**T are the orthogonal matrices determined by SGEBRD when */
+/* reducing a real matrix A to bidiagonal form: A = Q * B * P**T. Q and */
+/* P**T are defined as products of elementary reflectors H(i) and G(i) */
+/* respectively. */
+
+/* Let nq = m if SIDE = 'L' and nq = n if SIDE = 'R'. Thus nq is the */
+/* order of the orthogonal matrix Q or P**T that is applied. */
+
+/* If VECT = 'Q', A is assumed to have been an NQ-by-K matrix: */
+/* if nq >= k, Q = H(1) H(2) . . . H(k); */
+/* if nq < k, Q = H(1) H(2) . . . H(nq-1). */
+
+/* If VECT = 'P', A is assumed to have been a K-by-NQ matrix: */
+/* if k < nq, P = G(1) G(2) . . . G(k); */
+/* if k >= nq, P = G(1) G(2) . . . G(nq-1). */
+
+/* Arguments */
+/* ========= */
+
+/* VECT (input) CHARACTER*1 */
+/* = 'Q': apply Q or Q**T; */
+/* = 'P': apply P or P**T. */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q, Q**T, P or P**T from the Left; */
+/* = 'R': apply Q, Q**T, P or P**T from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q or P; */
+/* = 'T': Transpose, apply Q**T or P**T. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* If VECT = 'Q', the number of columns in the original */
+/* matrix reduced by SGEBRD. */
+/* If VECT = 'P', the number of rows in the original */
+/* matrix reduced by SGEBRD. */
+/* K >= 0. */
+
+/* A (input) REAL array, dimension */
+/* (LDA,min(nq,K)) if VECT = 'Q' */
+/* (LDA,nq) if VECT = 'P' */
+/* The vectors which define the elementary reflectors H(i) and */
+/* G(i), whose products determine the matrices Q and P, as */
+/* returned by SGEBRD. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* If VECT = 'Q', LDA >= max(1,nq); */
+/* if VECT = 'P', LDA >= max(1,min(nq,K)). */
+
+/* TAU (input) REAL array, dimension (min(nq,K)) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i) or G(i) which determines Q or P, as returned */
+/* by SGEBRD in the array argument TAUQ or TAUP. */
+
+/* C (input/output) REAL array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q */
+/* or P*C or P**T*C or C*P or C*P**T. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ applyq = lsame_(vect, "Q");
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q or P and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! applyq && ! lsame_(vect, "P")) {
+ *info = -1;
+ } else if (! left && ! lsame_(side, "R")) {
+ *info = -2;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*k < 0) {
+ *info = -6;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = 1, i__2 = min(nq,*k);
+ if (applyq && *lda < max(1,nq) || ! applyq && *lda < max(i__1,i__2)) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -11;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info == 0) {
+ if (applyq) {
+ if (left) {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ nb = ilaenv_(&c__1, "SORMQR", ch__1, &i__1, n, &i__2, &c_n1);
+ } else {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ nb = ilaenv_(&c__1, "SORMQR", ch__1, m, &i__1, &i__2, &c_n1);
+ }
+ } else {
+ if (left) {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ nb = ilaenv_(&c__1, "SORMLQ", ch__1, &i__1, n, &i__2, &c_n1);
+ } else {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ nb = ilaenv_(&c__1, "SORMLQ", ch__1, m, &i__1, &i__2, &c_n1);
+ }
+ }
+ lwkopt = max(1,nw) * nb;
+ work[1] = (real) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORMBR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ work[1] = 1.f;
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ if (applyq) {
+
+/* Apply Q */
+
+ if (nq >= *k) {
+
+/* Q was determined by a call to SGEBRD with nq >= k */
+
+ sormqr_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], lwork, &iinfo);
+ } else if (nq > 1) {
+
+/* Q was determined by a call to SGEBRD with nq < k */
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ i1 = 2;
+ i2 = 1;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ i1 = 1;
+ i2 = 2;
+ }
+ i__1 = nq - 1;
+ sormqr_(side, trans, &mi, &ni, &i__1, &a[a_dim1 + 2], lda, &tau[1]
+, &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo);
+ }
+ } else {
+
+/* Apply P */
+
+ if (notran) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+ if (nq > *k) {
+
+/* P was determined by a call to SGEBRD with nq > k */
+
+ sormlq_(side, transt, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], lwork, &iinfo);
+ } else if (nq > 1) {
+
+/* P was determined by a call to SGEBRD with nq <= k */
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ i1 = 2;
+ i2 = 1;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ i1 = 1;
+ i2 = 2;
+ }
+ i__1 = nq - 1;
+ sormlq_(side, transt, &mi, &ni, &i__1, &a[(a_dim1 << 1) + 1], lda,
+ &tau[1], &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &
+ iinfo);
+ }
+ }
+ work[1] = (real) lwkopt;
+ return 0;
+
+/* End of SORMBR */
+
+} /* sormbr_ */
diff --git a/contrib/libs/clapack/sormhr.c b/contrib/libs/clapack/sormhr.c
new file mode 100644
index 0000000000..7fd17f731b
--- /dev/null
+++ b/contrib/libs/clapack/sormhr.c
@@ -0,0 +1,256 @@
+/* sormhr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+
+/* Subroutine */ int sormhr_(char *side, char *trans, integer *m, integer *n,
+ integer *ilo, integer *ihi, real *a, integer *lda, real *tau, real *
+ c__, integer *ldc, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i1, i2, nb, mi, nh, ni, nq, nw;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int sormqr_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SORMHR overwrites the general real M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'T': Q**T * C C * Q**T */
+
+/* where Q is a real orthogonal matrix of order nq, with nq = m if */
+/* SIDE = 'L' and nq = n if SIDE = 'R'. Q is defined as the product of */
+/* IHI-ILO elementary reflectors, as returned by SGEHRD: */
+
+/* Q = H(ilo) H(ilo+1) . . . H(ihi-1). */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**T from the Left; */
+/* = 'R': apply Q or Q**T from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'T': Transpose, apply Q**T. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* ILO and IHI must have the same values as in the previous call */
+/* of SGEHRD. Q is equal to the unit matrix except in the */
+/* submatrix Q(ilo+1:ihi,ilo+1:ihi). */
+/* If SIDE = 'L', then 1 <= ILO <= IHI <= M, if M > 0, and */
+/* ILO = 1 and IHI = 0, if M = 0; */
+/* if SIDE = 'R', then 1 <= ILO <= IHI <= N, if N > 0, and */
+/* ILO = 1 and IHI = 0, if N = 0. */
+
+/* A (input) REAL array, dimension */
+/* (LDA,M) if SIDE = 'L' */
+/* (LDA,N) if SIDE = 'R' */
+/* The vectors which define the elementary reflectors, as */
+/* returned by SGEHRD. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* LDA >= max(1,M) if SIDE = 'L'; LDA >= max(1,N) if SIDE = 'R'. */
+
+/* TAU (input) REAL array, dimension */
+/* (M-1) if SIDE = 'L' */
+/* (N-1) if SIDE = 'R' */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by SGEHRD. */
+
+/* C (input/output) REAL array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nh = *ihi - *ilo;
+ left = lsame_(side, "L");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ilo < 1 || *ilo > max(1,nq)) {
+ *info = -5;
+ } else if (*ihi < min(*ilo,nq) || *ihi > nq) {
+ *info = -6;
+ } else if (*lda < max(1,nq)) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -11;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -13;
+ }
+
+ if (*info == 0) {
+ if (left) {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ nb = ilaenv_(&c__1, "SORMQR", ch__1, &nh, n, &nh, &c_n1);
+ } else {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ nb = ilaenv_(&c__1, "SORMQR", ch__1, m, &nh, &nh, &c_n1);
+ }
+ lwkopt = max(1,nw) * nb;
+ work[1] = (real) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__2 = -(*info);
+ xerbla_("SORMHR", &i__2);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || nh == 0) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ if (left) {
+ mi = nh;
+ ni = *n;
+ i1 = *ilo + 1;
+ i2 = 1;
+ } else {
+ mi = *m;
+ ni = nh;
+ i1 = 1;
+ i2 = *ilo + 1;
+ }
+
+ sormqr_(side, trans, &mi, &ni, &nh, &a[*ilo + 1 + *ilo * a_dim1], lda, &
+ tau[*ilo], &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo);
+
+ work[1] = (real) lwkopt;
+ return 0;
+
+/* End of SORMHR */
+
+} /* sormhr_ */
diff --git a/contrib/libs/clapack/sorml2.c b/contrib/libs/clapack/sorml2.c
new file mode 100644
index 0000000000..c2a603852e
--- /dev/null
+++ b/contrib/libs/clapack/sorml2.c
@@ -0,0 +1,230 @@
+/* sorml2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sorml2_(char *side, char *trans, integer *m, integer *n,
+ integer *k, real *a, integer *lda, real *tau, real *c__, integer *ldc,
+ real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, i1, i2, i3, ic, jc, mi, ni, nq;
+ real aii;
+ logical left;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int slarf_(char *, integer *, integer *, real *,
+ integer *, real *, real *, integer *, real *), xerbla_(
+ char *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SORML2 overwrites the general real m by n matrix C with */
+
+/* Q * C if SIDE = 'L' and TRANS = 'N', or */
+
+/* Q'* C if SIDE = 'L' and TRANS = 'T', or */
+
+/* C * Q if SIDE = 'R' and TRANS = 'N', or */
+
+/* C * Q' if SIDE = 'R' and TRANS = 'T', */
+
+/* where Q is a real orthogonal matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(k) . . . H(2) H(1) */
+
+/* as returned by SGELQF. Q is of order m if SIDE = 'L' and of order n */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q' from the Left */
+/* = 'R': apply Q or Q' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply Q (No transpose) */
+/* = 'T': apply Q' (Transpose) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) REAL array, dimension */
+/* (LDA,M) if SIDE = 'L', */
+/* (LDA,N) if SIDE = 'R' */
+/* The i-th row must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* SGELQF in the first k rows of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,K). */
+
+/* TAU (input) REAL array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by SGELQF. */
+
+/* C (input/output) REAL array, dimension (LDC,N) */
+/* On entry, the m by n matrix C. */
+/* On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) REAL array, dimension */
+/* (N) if SIDE = 'L', */
+/* (M) if SIDE = 'R' */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORML2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if (left && notran || ! left && ! notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H(i) is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H(i) */
+
+ aii = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.f;
+ slarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], lda, &tau[i__], &c__[
+ ic + jc * c_dim1], ldc, &work[1]);
+ a[i__ + i__ * a_dim1] = aii;
+/* L10: */
+ }
+ return 0;
+
+/* End of SORML2 */
+
+} /* sorml2_ */
diff --git a/contrib/libs/clapack/sormlq.c b/contrib/libs/clapack/sormlq.c
new file mode 100644
index 0000000000..219eb7871c
--- /dev/null
+++ b/contrib/libs/clapack/sormlq.c
@@ -0,0 +1,335 @@
+/* sormlq.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static integer c__65 = 65;
+
+/* Subroutine */ int sormlq_(char *side, char *trans, integer *m, integer *n,
+ integer *k, real *a, integer *lda, real *tau, real *c__, integer *ldc,
+ real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[3];
+ ch__1[2] = 0;
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i__;
+ real t[4160] /* was [65][64] */;
+ integer i1, i2, i3, ib, ic, jc, nb, mi, ni, nq, nw, iws;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ extern /* Subroutine */ int sorml2_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *), slarfb_(char *, char *, char *, char *
+, integer *, integer *, integer *, real *, integer *, real *,
+ integer *, real *, integer *, real *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int slarft_(char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, integer *);
+ logical notran;
+ integer ldwork;
+ char transt[1];
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SORMLQ overwrites the general real M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'T': Q**T * C C * Q**T */
+
+/* where Q is a real orthogonal matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(k) . . . H(2) H(1) */
+
+/* as returned by SGELQF. Q is of order M if SIDE = 'L' and of order N */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**T from the Left; */
+/* = 'R': apply Q or Q**T from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'T': Transpose, apply Q**T. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) REAL array, dimension */
+/* (LDA,M) if SIDE = 'L', */
+/* (LDA,N) if SIDE = 'R' */
+/* The i-th row must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* SGELQF in the first k rows of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,K). */
+
+/* TAU (input) REAL array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by SGELQF. */
+
+/* C (input/output) REAL array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+
+/* Determine the block size. NB may be at most NBMAX, where NBMAX */
+/* is used to define the local array T. */
+
+/* Computing MIN */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "SORMLQ", ch__1, m, n, k, &c_n1);
+ nb = min(i__1,i__2);
+ lwkopt = max(1,nw) * nb;
+ work[1] = (real) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORMLQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/* Computing MAX */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SORMLQ", ch__1, m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ sorml2_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if (left && notran || ! left && ! notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i) H(i+1) . . . H(i+ib-1) */
+
+ i__4 = nq - i__ + 1;
+ slarft_("Forward", "Rowwise", &i__4, &ib, &a[i__ + i__ * a_dim1],
+ lda, &tau[i__], t, &c__65);
+ if (left) {
+
+/* H or H' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H or H' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H or H' */
+
+ slarfb_(side, transt, "Forward", "Rowwise", &mi, &ni, &ib, &a[i__
+ + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc * c_dim1],
+ ldc, &work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1] = (real) lwkopt;
+ return 0;
+
+/* End of SORMLQ */
+
+} /* sormlq_ */
diff --git a/contrib/libs/clapack/sormql.c b/contrib/libs/clapack/sormql.c
new file mode 100644
index 0000000000..713b929b89
--- /dev/null
+++ b/contrib/libs/clapack/sormql.c
@@ -0,0 +1,329 @@
+/* sormql.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static integer c__65 = 65;
+
+/* Subroutine */ int sormql_(char *side, char *trans, integer *m, integer *n,
+ integer *k, real *a, integer *lda, real *tau, real *c__, integer *ldc,
+ real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[3];
+ ch__1[2] = 0;
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i__;
+ real t[4160] /* was [65][64] */;
+ integer i1, i2, i3, ib, nb, mi, ni, nq, nw, iws;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ extern /* Subroutine */ int sorm2l_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *), slarfb_(char *, char *, char *, char *
+, integer *, integer *, integer *, real *, integer *, real *,
+ integer *, real *, integer *, real *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int slarft_(char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, integer *);
+ logical notran;
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SORMQL overwrites the general real M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'T': Q**T * C C * Q**T */
+
+/* where Q is a real orthogonal matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(k) . . . H(2) H(1) */
+
+/* as returned by SGEQLF. Q is of order M if SIDE = 'L' and of order N */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**T from the Left; */
+/* = 'R': apply Q or Q**T from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'T': Transpose, apply Q**T. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) REAL array, dimension (LDA,K) */
+/* The i-th column must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* SGEQLF in the last k columns of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* If SIDE = 'L', LDA >= max(1,M); */
+/* if SIDE = 'R', LDA >= max(1,N). */
+
+/* TAU (input) REAL array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by SGEQLF. */
+
+/* C (input/output) REAL array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = max(1,*n);
+ } else {
+ nq = *n;
+ nw = max(1,*m);
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+
+ if (*info == 0) {
+ if (*m == 0 || *n == 0) {
+ lwkopt = 1;
+ } else {
+
+/* Determine the block size. NB may be at most NBMAX, where */
+/* NBMAX is used to define the local array T. */
+
+
+/* Computing MIN */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "SORMQL", ch__1, m, n, k, &c_n1);
+ nb = min(i__1,i__2);
+ lwkopt = nw * nb;
+ }
+ work[1] = (real) lwkopt;
+
+ if (*lwork < nw && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORMQL", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/* Computing MAX */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SORMQL", ch__1, m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ sorm2l_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if (left && notran || ! left && ! notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__4 = nq - *k + i__ + ib - 1;
+ slarft_("Backward", "Columnwise", &i__4, &ib, &a[i__ * a_dim1 + 1]
+, lda, &tau[i__], t, &c__65);
+ if (left) {
+
+/* H or H' is applied to C(1:m-k+i+ib-1,1:n) */
+
+ mi = *m - *k + i__ + ib - 1;
+ } else {
+
+/* H or H' is applied to C(1:m,1:n-k+i+ib-1) */
+
+ ni = *n - *k + i__ + ib - 1;
+ }
+
+/* Apply H or H' */
+
+ slarfb_(side, trans, "Backward", "Columnwise", &mi, &ni, &ib, &a[
+ i__ * a_dim1 + 1], lda, t, &c__65, &c__[c_offset], ldc, &
+ work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1] = (real) lwkopt;
+ return 0;
+
+/* End of SORMQL */
+
+} /* sormql_ */
diff --git a/contrib/libs/clapack/sormqr.c b/contrib/libs/clapack/sormqr.c
new file mode 100644
index 0000000000..89d15071f0
--- /dev/null
+++ b/contrib/libs/clapack/sormqr.c
@@ -0,0 +1,328 @@
+/* sormqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static integer c__65 = 65;
+
+/* Subroutine */ int sormqr_(char *side, char *trans, integer *m, integer *n,
+ integer *k, real *a, integer *lda, real *tau, real *c__, integer *ldc,
+ real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[3];
+ ch__1[2] = 0;
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i__;
+ real t[4160] /* was [65][64] */;
+ integer i1, i2, i3, ib, ic, jc, nb, mi, ni, nq, nw, iws;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ extern /* Subroutine */ int sorm2r_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *), slarfb_(char *, char *, char *, char *
+, integer *, integer *, integer *, real *, integer *, real *,
+ integer *, real *, integer *, real *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int slarft_(char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, integer *);
+ logical notran;
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SORMQR overwrites the general real M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'T': Q**T * C C * Q**T */
+
+/* where Q is a real orthogonal matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by SGEQRF. Q is of order M if SIDE = 'L' and of order N */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**T from the Left; */
+/* = 'R': apply Q or Q**T from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'T': Transpose, apply Q**T. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) REAL array, dimension (LDA,K) */
+/* The i-th column must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* SGEQRF in the first k columns of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* If SIDE = 'L', LDA >= max(1,M); */
+/* if SIDE = 'R', LDA >= max(1,N). */
+
+/* TAU (input) REAL array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by SGEQRF. */
+
+/* C (input/output) REAL array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+
+/* Determine the block size. NB may be at most NBMAX, where NBMAX */
+/* is used to define the local array T. */
+
+/* Computing MIN */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "SORMQR", ch__1, m, n, k, &c_n1);
+ nb = min(i__1,i__2);
+ lwkopt = max(1,nw) * nb;
+ work[1] = (real) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORMQR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/* Computing MAX */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SORMQR", ch__1, m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ sorm2r_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if (left && ! notran || ! left && notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i) H(i+1) . . . H(i+ib-1) */
+
+ i__4 = nq - i__ + 1;
+ slarft_("Forward", "Columnwise", &i__4, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], t, &c__65)
+ ;
+ if (left) {
+
+/* H or H' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H or H' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H or H' */
+
+ slarfb_(side, trans, "Forward", "Columnwise", &mi, &ni, &ib, &a[
+ i__ + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc *
+ c_dim1], ldc, &work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1] = (real) lwkopt;
+ return 0;
+
+/* End of SORMQR */
+
+} /* sormqr_ */
diff --git a/contrib/libs/clapack/sormr2.c b/contrib/libs/clapack/sormr2.c
new file mode 100644
index 0000000000..b3bc353852
--- /dev/null
+++ b/contrib/libs/clapack/sormr2.c
@@ -0,0 +1,226 @@
+/* sormr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sormr2_(char *side, char *trans, integer *m, integer *n,
+ integer *k, real *a, integer *lda, real *tau, real *c__, integer *ldc,
+ real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, i1, i2, i3, mi, ni, nq;
+ real aii;
+ logical left;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int slarf_(char *, integer *, integer *, real *,
+ integer *, real *, real *, integer *, real *), xerbla_(
+ char *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SORMR2 overwrites the general real m by n matrix C with */
+
+/* Q * C if SIDE = 'L' and TRANS = 'N', or */
+
+/* Q'* C if SIDE = 'L' and TRANS = 'T', or */
+
+/* C * Q if SIDE = 'R' and TRANS = 'N', or */
+
+/* C * Q' if SIDE = 'R' and TRANS = 'T', */
+
+/* where Q is a real orthogonal matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by SGERQF. Q is of order m if SIDE = 'L' and of order n */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q' from the Left */
+/* = 'R': apply Q or Q' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply Q (No transpose) */
+/* = 'T': apply Q' (Transpose) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) REAL array, dimension */
+/* (LDA,M) if SIDE = 'L', */
+/* (LDA,N) if SIDE = 'R' */
+/* The i-th row must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* SGERQF in the last k rows of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,K). */
+
+/* TAU (input) REAL array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by SGERQF. */
+
+/* C (input/output) REAL array, dimension (LDC,N) */
+/* On entry, the m by n matrix C. */
+/* On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) REAL array, dimension */
+/* (N) if SIDE = 'L', */
+/* (M) if SIDE = 'R' */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORMR2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if (left && ! notran || ! left && notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) is applied to C(1:m-k+i,1:n) */
+
+ mi = *m - *k + i__;
+ } else {
+
+/* H(i) is applied to C(1:m,1:n-k+i) */
+
+ ni = *n - *k + i__;
+ }
+
+/* Apply H(i) */
+
+ aii = a[i__ + (nq - *k + i__) * a_dim1];
+ a[i__ + (nq - *k + i__) * a_dim1] = 1.f;
+ slarf_(side, &mi, &ni, &a[i__ + a_dim1], lda, &tau[i__], &c__[
+ c_offset], ldc, &work[1]);
+ a[i__ + (nq - *k + i__) * a_dim1] = aii;
+/* L10: */
+ }
+ return 0;
+
+/* End of SORMR2 */
+
+} /* sormr2_ */
diff --git a/contrib/libs/clapack/sormr3.c b/contrib/libs/clapack/sormr3.c
new file mode 100644
index 0000000000..33ad1e817c
--- /dev/null
+++ b/contrib/libs/clapack/sormr3.c
@@ -0,0 +1,241 @@
+/* sormr3.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sormr3_(char *side, char *trans, integer *m, integer *n,
+ integer *k, integer *l, real *a, integer *lda, real *tau, real *c__,
+ integer *ldc, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, i1, i2, i3, ja, ic, jc, mi, ni, nq;
+ logical left;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int slarz_(char *, integer *, integer *, integer *
+, real *, integer *, real *, real *, integer *, real *),
+ xerbla_(char *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SORMR3 overwrites the general real m by n matrix C with */
+
+/* Q * C if SIDE = 'L' and TRANS = 'N', or */
+
+/* Q'* C if SIDE = 'L' and TRANS = 'T', or */
+
+/* C * Q if SIDE = 'R' and TRANS = 'N', or */
+
+/* C * Q' if SIDE = 'R' and TRANS = 'T', */
+
+/* where Q is a real orthogonal matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by STZRZF. Q is of order m if SIDE = 'L' and of order n */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q' from the Left */
+/* = 'R': apply Q or Q' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply Q (No transpose) */
+/* = 'T': apply Q' (Transpose) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* L (input) INTEGER */
+/* The number of columns of the matrix A containing */
+/* the meaningful part of the Householder reflectors. */
+/* If SIDE = 'L', M >= L >= 0, if SIDE = 'R', N >= L >= 0. */
+
+/* A (input) REAL array, dimension */
+/* (LDA,M) if SIDE = 'L', */
+/* (LDA,N) if SIDE = 'R' */
+/* The i-th row must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* STZRZF in the last k rows of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,K). */
+
+/* TAU (input) REAL array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by STZRZF. */
+
+/* C (input/output) REAL array, dimension (LDC,N) */
+/* On entry, the m-by-n matrix C. */
+/* On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) REAL array, dimension */
+/* (N) if SIDE = 'L', */
+/* (M) if SIDE = 'R' */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*l < 0 || left && *l > *m || ! left && *l > *n) {
+ *info = -6;
+ } else if (*lda < max(1,*k)) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORMR3", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if (left && ! notran || ! left && notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ ja = *m - *l + 1;
+ jc = 1;
+ } else {
+ mi = *m;
+ ja = *n - *l + 1;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) or H(i)' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H(i) or H(i)' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H(i) or H(i)' */
+
+ slarz_(side, &mi, &ni, l, &a[i__ + ja * a_dim1], lda, &tau[i__], &c__[
+ ic + jc * c_dim1], ldc, &work[1]);
+
+/* L10: */
+ }
+
+ return 0;
+
+/* End of SORMR3 */
+
+} /* sormr3_ */
diff --git a/contrib/libs/clapack/sormrq.c b/contrib/libs/clapack/sormrq.c
new file mode 100644
index 0000000000..aa97bd52d3
--- /dev/null
+++ b/contrib/libs/clapack/sormrq.c
@@ -0,0 +1,335 @@
+/* sormrq.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static integer c__65 = 65;
+
+/* Subroutine */ int sormrq_(char *side, char *trans, integer *m, integer *n,
+ integer *k, real *a, integer *lda, real *tau, real *c__, integer *ldc,
+ real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i__;
+ real t[4160] /* was [65][64] */;
+ integer i1, i2, i3, ib, nb, mi, ni, nq, nw, iws;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ extern /* Subroutine */ int sormr2_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *), slarfb_(char *, char *, char *, char *
+, integer *, integer *, integer *, real *, integer *, real *,
+ integer *, real *, integer *, real *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int slarft_(char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, integer *);
+ logical notran;
+ integer ldwork;
+ char transt[1];
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SORMRQ overwrites the general real M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'T': Q**T * C C * Q**T */
+
+/* where Q is a real orthogonal matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by SGERQF. Q is of order M if SIDE = 'L' and of order N */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**T from the Left; */
+/* = 'R': apply Q or Q**T from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'T': Transpose, apply Q**T. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) REAL array, dimension */
+/* (LDA,M) if SIDE = 'L', */
+/* (LDA,N) if SIDE = 'R' */
+/* The i-th row must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* SGERQF in the last k rows of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,K). */
+
+/* TAU (input) REAL array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by SGERQF. */
+
+/* C (input/output) REAL array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = max(1,*n);
+ } else {
+ nq = *n;
+ nw = max(1,*m);
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+
+ if (*info == 0) {
+ if (*m == 0 || *n == 0) {
+ lwkopt = 1;
+ } else {
+
+/* Determine the block size. NB may be at most NBMAX, where */
+/* NBMAX is used to define the local array T. */
+
+/* Computing MIN */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "SORMRQ", ch__1, m, n, k, &c_n1);
+ nb = min(i__1,i__2);
+ lwkopt = nw * nb;
+ }
+ work[1] = (real) lwkopt;
+
+ if (*lwork < nw && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORMRQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/* Computing MAX */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SORMRQ", ch__1, m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ sormr2_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if (left && ! notran || ! left && notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__4 = nq - *k + i__ + ib - 1;
+ slarft_("Backward", "Rowwise", &i__4, &ib, &a[i__ + a_dim1], lda,
+ &tau[i__], t, &c__65);
+ if (left) {
+
+/* H or H' is applied to C(1:m-k+i+ib-1,1:n) */
+
+ mi = *m - *k + i__ + ib - 1;
+ } else {
+
+/* H or H' is applied to C(1:m,1:n-k+i+ib-1) */
+
+ ni = *n - *k + i__ + ib - 1;
+ }
+
+/* Apply H or H' */
+
+ slarfb_(side, transt, "Backward", "Rowwise", &mi, &ni, &ib, &a[
+ i__ + a_dim1], lda, t, &c__65, &c__[c_offset], ldc, &work[
+ 1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1] = (real) lwkopt;
+ return 0;
+
+/* End of SORMRQ */
+
+} /* sormrq_ */
diff --git a/contrib/libs/clapack/sormrz.c b/contrib/libs/clapack/sormrz.c
new file mode 100644
index 0000000000..db1aed0eeb
--- /dev/null
+++ b/contrib/libs/clapack/sormrz.c
@@ -0,0 +1,358 @@
+/* sormrz.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static integer c__65 = 65;
+
+/* Subroutine */ int sormrz_(char *side, char *trans, integer *m, integer *n,
+ integer *k, integer *l, real *a, integer *lda, real *tau, real *c__,
+ integer *ldc, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i__;
+ real t[4160] /* was [65][64] */;
+ integer i1, i2, i3, ib, ic, ja, jc, nb, mi, ni, nq, nw, iws;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ extern /* Subroutine */ int sormr3_(char *, char *, integer *, integer *,
+ integer *, integer *, real *, integer *, real *, real *, integer *
+, real *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int slarzb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, integer *, real *, integer *,
+ real *, integer *, real *, integer *, real *, integer *);
+ logical notran;
+ integer ldwork;
+ char transt[1];
+ extern /* Subroutine */ int slarzt_(char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* January 2007 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SORMRZ overwrites the general real M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'T': Q**T * C C * Q**T */
+
+/* where Q is a real orthogonal matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by STZRZF. Q is of order M if SIDE = 'L' and of order N */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**T from the Left; */
+/* = 'R': apply Q or Q**T from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'T': Transpose, apply Q**T. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* L (input) INTEGER */
+/* The number of columns of the matrix A containing */
+/* the meaningful part of the Householder reflectors. */
+/* If SIDE = 'L', M >= L >= 0, if SIDE = 'R', N >= L >= 0. */
+
+/* A (input) REAL array, dimension */
+/* (LDA,M) if SIDE = 'L', */
+/* (LDA,N) if SIDE = 'R' */
+/* The i-th row must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* STZRZF in the last k rows of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,K). */
+
+/* TAU (input) REAL array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by STZRZF. */
+
+/* C (input/output) REAL array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = max(1,*n);
+ } else {
+ nq = *n;
+ nw = max(1,*m);
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*l < 0 || left && *l > *m || ! left && *l > *n) {
+ *info = -6;
+ } else if (*lda < max(1,*k)) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -11;
+ }
+
+ if (*info == 0) {
+ if (*m == 0 || *n == 0) {
+ lwkopt = 1;
+ } else {
+
+/* Determine the block size. NB may be at most NBMAX, where */
+/* NBMAX is used to define the local array T. */
+
+/* Computing MIN */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "SORMRQ", ch__1, m, n, k, &c_n1);
+ nb = min(i__1,i__2);
+ lwkopt = nw * nb;
+ }
+ work[1] = (real) lwkopt;
+
+ if (*lwork < max(1,nw) && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORMRZ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/* Computing MAX */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SORMRQ", ch__1, m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ sormr3_(side, trans, m, n, k, l, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if (left && ! notran || ! left && notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ ja = *m - *l + 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ ja = *n - *l + 1;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ slarzt_("Backward", "Rowwise", l, &ib, &a[i__ + ja * a_dim1], lda,
+ &tau[i__], t, &c__65);
+
+ if (left) {
+
+/* H or H' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H or H' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H or H' */
+
+ slarzb_(side, transt, "Backward", "Rowwise", &mi, &ni, &ib, l, &a[
+ i__ + ja * a_dim1], lda, t, &c__65, &c__[ic + jc * c_dim1]
+, ldc, &work[1], &ldwork);
+/* L10: */
+ }
+
+ }
+
+ work[1] = (real) lwkopt;
+
+ return 0;
+
+/* End of SORMRZ */
+
+} /* sormrz_ */
diff --git a/contrib/libs/clapack/sormtr.c b/contrib/libs/clapack/sormtr.c
new file mode 100644
index 0000000000..bb23e1842e
--- /dev/null
+++ b/contrib/libs/clapack/sormtr.c
@@ -0,0 +1,296 @@
+/* sormtr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+
+/* Subroutine */ int sormtr_(char *side, char *uplo, char *trans, integer *m,
+ integer *n, real *a, integer *lda, real *tau, real *c__, integer *ldc,
+ real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2, i__3;
+ char ch__1[3];
+ ch__1[2] = 0;
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i1, i2, nb, mi, ni, nq, nw;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int sormql_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int sormqr_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SORMTR overwrites the general real M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'T': Q**T * C C * Q**T */
+
+/* where Q is a real orthogonal matrix of order nq, with nq = m if */
+/* SIDE = 'L' and nq = n if SIDE = 'R'. Q is defined as the product of */
+/* nq-1 elementary reflectors, as returned by SSYTRD: */
+
+/* if UPLO = 'U', Q = H(nq-1) . . . H(2) H(1); */
+
+/* if UPLO = 'L', Q = H(1) H(2) . . . H(nq-1). */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**T from the Left; */
+/* = 'R': apply Q or Q**T from the Right. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A contains elementary reflectors */
+/* from SSYTRD; */
+/* = 'L': Lower triangle of A contains elementary reflectors */
+/* from SSYTRD. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'T': Transpose, apply Q**T. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* A (input) REAL array, dimension */
+/* (LDA,M) if SIDE = 'L' */
+/* (LDA,N) if SIDE = 'R' */
+/* The vectors which define the elementary reflectors, as */
+/* returned by SSYTRD. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* LDA >= max(1,M) if SIDE = 'L'; LDA >= max(1,N) if SIDE = 'R'. */
+
+/* TAU (input) REAL array, dimension */
+/* (M-1) if SIDE = 'L' */
+/* (N-1) if SIDE = 'R' */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by SSYTRD. */
+
+/* C (input/output) REAL array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T")) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+ if (upper) {
+ if (left) {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ nb = ilaenv_(&c__1, "SORMQL", ch__1, &i__2, n, &i__3, &c_n1);
+ } else {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "SORMQL", ch__1, m, &i__2, &i__3, &c_n1);
+ }
+ } else {
+ if (left) {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ nb = ilaenv_(&c__1, "SORMQR", ch__1, &i__2, n, &i__3, &c_n1);
+ } else {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "SORMQR", ch__1, m, &i__2, &i__3, &c_n1);
+ }
+ }
+ lwkopt = max(1,nw) * nb;
+ work[1] = (real) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__2 = -(*info);
+ xerbla_("SORMTR", &i__2);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || nq == 1) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ }
+
+ if (upper) {
+
+/* Q was determined by a call to SSYTRD with UPLO = 'U' */
+
+ i__2 = nq - 1;
+ sormql_(side, trans, &mi, &ni, &i__2, &a[(a_dim1 << 1) + 1], lda, &
+ tau[1], &c__[c_offset], ldc, &work[1], lwork, &iinfo);
+ } else {
+
+/* Q was determined by a call to SSYTRD with UPLO = 'L' */
+
+ if (left) {
+ i1 = 2;
+ i2 = 1;
+ } else {
+ i1 = 1;
+ i2 = 2;
+ }
+ i__2 = nq - 1;
+ sormqr_(side, trans, &mi, &ni, &i__2, &a[a_dim1 + 2], lda, &tau[1], &
+ c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo);
+ }
+ work[1] = (real) lwkopt;
+ return 0;
+
+/* End of SORMTR */
+
+} /* sormtr_ */
diff --git a/contrib/libs/clapack/spbcon.c b/contrib/libs/clapack/spbcon.c
new file mode 100644
index 0000000000..620291ee00
--- /dev/null
+++ b/contrib/libs/clapack/spbcon.c
@@ -0,0 +1,232 @@
+/* spbcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int spbcon_(char *uplo, integer *n, integer *kd, real *ab,
+ integer *ldab, real *anorm, real *rcond, real *work, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1;
+ real r__1;
+
+ /* Local variables */
+ integer ix, kase;
+ real scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int srscl_(integer *, real *, real *, integer *);
+ logical upper;
+ extern /* Subroutine */ int slacn2_(integer *, real *, real *, integer *,
+ real *, integer *, integer *);
+ real scalel;
+ extern doublereal slamch_(char *);
+ real scaleu;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ real ainvnm;
+ extern /* Subroutine */ int slatbs_(char *, char *, char *, char *,
+ integer *, integer *, real *, integer *, real *, real *, real *,
+ integer *);
+ char normin[1];
+ real smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call SLACN2 in place of SLACON, 7 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPBCON estimates the reciprocal of the condition number (in the */
+/* 1-norm) of a real symmetric positive definite band matrix using the */
+/* Cholesky factorization A = U**T*U or A = L*L**T computed by SPBTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangular factor stored in AB; */
+/* = 'L': Lower triangular factor stored in AB. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input) REAL array, dimension (LDAB,N) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**T*U or A = L*L**T of the band matrix A, stored in the */
+/* first KD+1 rows of the array. The j-th column of U or L is */
+/* stored in the j-th column of the array AB as follows: */
+/* if UPLO ='U', AB(kd+1+i-j,j) = U(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO ='L', AB(1+i-j,j) = L(i,j) for j<=i<=min(n,j+kd). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* ANORM (input) REAL */
+/* The 1-norm (or infinity-norm) of the symmetric band matrix A. */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*ldab < *kd + 1) {
+ *info = -5;
+ } else if (*anorm < 0.f) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPBCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.f;
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ } else if (*anorm == 0.f) {
+ return 0;
+ }
+
+ smlnum = slamch_("Safe minimum");
+
+/* Estimate the 1-norm of the inverse. */
+
+ kase = 0;
+ *(unsigned char *)normin = 'N';
+L10:
+ slacn2_(n, &work[*n + 1], &work[1], &iwork[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (upper) {
+
+/* Multiply by inv(U'). */
+
+ slatbs_("Upper", "Transpose", "Non-unit", normin, n, kd, &ab[
+ ab_offset], ldab, &work[1], &scalel, &work[(*n << 1) + 1],
+ info);
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by inv(U). */
+
+ slatbs_("Upper", "No transpose", "Non-unit", normin, n, kd, &ab[
+ ab_offset], ldab, &work[1], &scaleu, &work[(*n << 1) + 1],
+ info);
+ } else {
+
+/* Multiply by inv(L). */
+
+ slatbs_("Lower", "No transpose", "Non-unit", normin, n, kd, &ab[
+ ab_offset], ldab, &work[1], &scalel, &work[(*n << 1) + 1],
+ info);
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by inv(L'). */
+
+ slatbs_("Lower", "Transpose", "Non-unit", normin, n, kd, &ab[
+ ab_offset], ldab, &work[1], &scaleu, &work[(*n << 1) + 1],
+ info);
+ }
+
+/* Multiply by 1/SCALE if doing so will not cause overflow. */
+
+ scale = scalel * scaleu;
+ if (scale != 1.f) {
+ ix = isamax_(n, &work[1], &c__1);
+ if (scale < (r__1 = work[ix], dabs(r__1)) * smlnum || scale ==
+ 0.f) {
+ goto L20;
+ }
+ srscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / ainvnm / *anorm;
+ }
+
+L20:
+
+ return 0;
+
+/* End of SPBCON */
+
+} /* spbcon_ */
diff --git a/contrib/libs/clapack/spbequ.c b/contrib/libs/clapack/spbequ.c
new file mode 100644
index 0000000000..215af5268b
--- /dev/null
+++ b/contrib/libs/clapack/spbequ.c
@@ -0,0 +1,202 @@
+/* spbequ.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int spbequ_(char *uplo, integer *n, integer *kd, real *ab,
+ integer *ldab, real *s, real *scond, real *amax, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ real smin;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPBEQU computes row and column scalings intended to equilibrate a */
+/* symmetric positive definite band matrix A and reduce its condition */
+/* number (with respect to the two-norm). S contains the scale factors, */
+/* S(i) = 1/sqrt(A(i,i)), chosen so that the scaled matrix B with */
+/* elements B(i,j) = S(i)*A(i,j)*S(j) has ones on the diagonal. This */
+/* choice of S puts the condition number of B within a factor N of the */
+/* smallest possible condition number over all possible diagonal */
+/* scalings. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangular of A is stored; */
+/* = 'L': Lower triangular of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input) REAL array, dimension (LDAB,N) */
+/* The upper or lower triangle of the symmetric band matrix A, */
+/* stored in the first KD+1 rows of the array. The j-th column */
+/* of A is stored in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array A. LDAB >= KD+1. */
+
+/* S (output) REAL array, dimension (N) */
+/* If INFO = 0, S contains the scale factors for A. */
+
+/* SCOND (output) REAL */
+/* If INFO = 0, S contains the ratio of the smallest S(i) to */
+/* the largest S(i). If SCOND >= 0.1 and AMAX is neither too */
+/* large nor too small, it is not worth scaling by S. */
+
+/* AMAX (output) REAL */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, the i-th diagonal element is nonpositive. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --s;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*ldab < *kd + 1) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPBEQU", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *scond = 1.f;
+ *amax = 0.f;
+ return 0;
+ }
+
+ if (upper) {
+ j = *kd + 1;
+ } else {
+ j = 1;
+ }
+
+/* Initialize SMIN and AMAX. */
+
+ s[1] = ab[j + ab_dim1];
+ smin = s[1];
+ *amax = s[1];
+
+/* Find the minimum and maximum diagonal elements. */
+
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ s[i__] = ab[j + i__ * ab_dim1];
+/* Computing MIN */
+ r__1 = smin, r__2 = s[i__];
+ smin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = *amax, r__2 = s[i__];
+ *amax = dmax(r__1,r__2);
+/* L10: */
+ }
+
+ if (smin <= 0.f) {
+
+/* Find the first non-positive diagonal element and return. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (s[i__] <= 0.f) {
+ *info = i__;
+ return 0;
+ }
+/* L20: */
+ }
+ } else {
+
+/* Set the scale factors to the reciprocals */
+/* of the diagonal elements. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ s[i__] = 1.f / sqrt(s[i__]);
+/* L30: */
+ }
+
+/* Compute SCOND = min(S(I)) / max(S(I)) */
+
+ *scond = sqrt(smin) / sqrt(*amax);
+ }
+ return 0;
+
+/* End of SPBEQU */
+
+} /* spbequ_ */
diff --git a/contrib/libs/clapack/spbrfs.c b/contrib/libs/clapack/spbrfs.c
new file mode 100644
index 0000000000..bbcab5a4a0
--- /dev/null
+++ b/contrib/libs/clapack/spbrfs.c
@@ -0,0 +1,434 @@
+/* spbrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b12 = -1.f;
+static real c_b14 = 1.f;
+
+/* Subroutine */ int spbrfs_(char *uplo, integer *n, integer *kd, integer *
+ nrhs, real *ab, integer *ldab, real *afb, integer *ldafb, real *b,
+ integer *ldb, real *x, integer *ldx, real *ferr, real *berr, real *
+ work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, afb_dim1, afb_offset, b_dim1, b_offset,
+ x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3;
+
+ /* Local variables */
+ integer i__, j, k, l;
+ real s, xk;
+ integer nz;
+ real eps;
+ integer kase;
+ real safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3], count;
+ extern /* Subroutine */ int ssbmv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *);
+ logical upper;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), saxpy_(integer *, real *, real *, integer *, real *,
+ integer *), slacn2_(integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real lstres;
+ extern /* Subroutine */ int spbtrs_(char *, integer *, integer *, integer
+ *, real *, integer *, real *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call SLACN2 in place of SLACON, 7 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPBRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is symmetric positive definite */
+/* and banded, and provides error bounds and backward error estimates */
+/* for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AB (input) REAL array, dimension (LDAB,N) */
+/* The upper or lower triangle of the symmetric band matrix A, */
+/* stored in the first KD+1 rows of the array. The j-th column */
+/* of A is stored in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* AFB (input) REAL array, dimension (LDAFB,N) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**T*U or A = L*L**T of the band matrix A as computed by */
+/* SPBTRF, in the same storage format as A (see AB). */
+
+/* LDAFB (input) INTEGER */
+/* The leading dimension of the array AFB. LDAFB >= KD+1. */
+
+/* B (input) REAL array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) REAL array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by SPBTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ afb_dim1 = *ldafb;
+ afb_offset = 1 + afb_dim1;
+ afb -= afb_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldab < *kd + 1) {
+ *info = -6;
+ } else if (*ldafb < *kd + 1) {
+ *info = -8;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldx < max(1,*n)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPBRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+/* Computing MIN */
+ i__1 = *n + 1, i__2 = (*kd << 1) + 2;
+ nz = min(i__1,i__2);
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.f;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X */
+
+ scopy_(n, &b[j * b_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+ ssbmv_(uplo, n, kd, &c_b12, &ab[ab_offset], ldab, &x[j * x_dim1 + 1],
+ &c__1, &c_b14, &work[*n + 1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] = (r__1 = b[i__ + j * b_dim1], dabs(r__1));
+/* L30: */
+ }
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ xk = (r__1 = x[k + j * x_dim1], dabs(r__1));
+ l = *kd + 1 - k;
+/* Computing MAX */
+ i__3 = 1, i__4 = k - *kd;
+ i__5 = k - 1;
+ for (i__ = max(i__3,i__4); i__ <= i__5; ++i__) {
+ work[i__] += (r__1 = ab[l + i__ + k * ab_dim1], dabs(r__1)
+ ) * xk;
+ s += (r__1 = ab[l + i__ + k * ab_dim1], dabs(r__1)) * (
+ r__2 = x[i__ + j * x_dim1], dabs(r__2));
+/* L40: */
+ }
+ work[k] = work[k] + (r__1 = ab[*kd + 1 + k * ab_dim1], dabs(
+ r__1)) * xk + s;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ xk = (r__1 = x[k + j * x_dim1], dabs(r__1));
+ work[k] += (r__1 = ab[k * ab_dim1 + 1], dabs(r__1)) * xk;
+ l = 1 - k;
+/* Computing MIN */
+ i__3 = *n, i__4 = k + *kd;
+ i__5 = min(i__3,i__4);
+ for (i__ = k + 1; i__ <= i__5; ++i__) {
+ work[i__] += (r__1 = ab[l + i__ + k * ab_dim1], dabs(r__1)
+ ) * xk;
+ s += (r__1 = ab[l + i__ + k * ab_dim1], dabs(r__1)) * (
+ r__2 = x[i__ + j * x_dim1], dabs(r__2));
+/* L60: */
+ }
+ work[k] += s;
+/* L70: */
+ }
+ }
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+/* Computing MAX */
+ r__2 = s, r__3 = (r__1 = work[*n + i__], dabs(r__1)) / work[
+ i__];
+ s = dmax(r__2,r__3);
+ } else {
+/* Computing MAX */
+ r__2 = s, r__3 = ((r__1 = work[*n + i__], dabs(r__1)) + safe1)
+ / (work[i__] + safe1);
+ s = dmax(r__2,r__3);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2.f <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ spbtrs_(uplo, n, kd, &c__1, &afb[afb_offset], ldafb, &work[*n + 1]
+, n, info);
+ saxpy_(n, &c_b14, &work[*n + 1], &c__1, &x[j * x_dim1 + 1], &c__1)
+ ;
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use SLACN2 to estimate the infinity-norm of the matrix */
+/* inv(A) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+ work[i__] = (r__1 = work[*n + i__], dabs(r__1)) + nz * eps *
+ work[i__];
+ } else {
+ work[i__] = (r__1 = work[*n + i__], dabs(r__1)) + nz * eps *
+ work[i__] + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ slacn2_(n, &work[(*n << 1) + 1], &work[*n + 1], &iwork[1], &ferr[j], &
+ kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(A'). */
+
+ spbtrs_(uplo, n, kd, &c__1, &afb[afb_offset], ldafb, &work[*n
+ + 1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] *= work[i__];
+/* L110: */
+ }
+ } else if (kase == 2) {
+
+/* Multiply by inv(A)*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] *= work[i__];
+/* L120: */
+ }
+ spbtrs_(uplo, n, kd, &c__1, &afb[afb_offset], ldafb, &work[*n
+ + 1], n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = lstres, r__3 = (r__1 = x[i__ + j * x_dim1], dabs(r__1));
+ lstres = dmax(r__2,r__3);
+/* L130: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of SPBRFS */
+
+} /* spbrfs_ */
diff --git a/contrib/libs/clapack/spbstf.c b/contrib/libs/clapack/spbstf.c
new file mode 100644
index 0000000000..6c79ba8011
--- /dev/null
+++ b/contrib/libs/clapack/spbstf.c
@@ -0,0 +1,312 @@
+/* spbstf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b9 = -1.f;
+
+/* Subroutine */ int spbstf_(char *uplo, integer *n, integer *kd, real *ab,
+ integer *ldab, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer j, m, km;
+ real ajj;
+ integer kld;
+ extern /* Subroutine */ int ssyr_(char *, integer *, real *, real *,
+ integer *, real *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPBSTF computes a split Cholesky factorization of a real */
+/* symmetric positive definite band matrix A. */
+
+/* This routine is designed to be used in conjunction with SSBGST. */
+
+/* The factorization has the form A = S**T*S where S is a band matrix */
+/* of the same bandwidth as A and the following structure: */
+
+/* S = ( U ) */
+/* ( M L ) */
+
+/* where U is upper triangular of order m = (n+kd)/2, and L is lower */
+/* triangular of order n-m. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) REAL array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first kd+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, if INFO = 0, the factor S from the split Cholesky */
+/* factorization A = S**T*S. See Further Details. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the factorization could not be completed, */
+/* because the updated element a(i,i) was negative; the */
+/* matrix A is not positive definite. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* N = 7, KD = 2: */
+
+/* S = ( s11 s12 s13 ) */
+/* ( s22 s23 s24 ) */
+/* ( s33 s34 ) */
+/* ( s44 ) */
+/* ( s53 s54 s55 ) */
+/* ( s64 s65 s66 ) */
+/* ( s75 s76 s77 ) */
+
+/* If UPLO = 'U', the array AB holds: */
+
+/* on entry: on exit: */
+
+/* * * a13 a24 a35 a46 a57 * * s13 s24 s53 s64 s75 */
+/* * a12 a23 a34 a45 a56 a67 * s12 s23 s34 s54 s65 s76 */
+/* a11 a22 a33 a44 a55 a66 a77 s11 s22 s33 s44 s55 s66 s77 */
+
+/* If UPLO = 'L', the array AB holds: */
+
+/* on entry: on exit: */
+
+/* a11 a22 a33 a44 a55 a66 a77 s11 s22 s33 s44 s55 s66 s77 */
+/* a21 a32 a43 a54 a65 a76 * s12 s23 s34 s54 s65 s76 * */
+/* a31 a42 a53 a64 a64 * * s13 s24 s53 s64 s75 * * */
+
+/* Array elements marked * are not used by the routine. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*ldab < *kd + 1) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPBSTF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Computing MAX */
+ i__1 = 1, i__2 = *ldab - 1;
+ kld = max(i__1,i__2);
+
+/* Set the splitting point m. */
+
+ m = (*n + *kd) / 2;
+
+ if (upper) {
+
+/* Factorize A(m+1:n,m+1:n) as L**T*L, and update A(1:m,1:m). */
+
+ i__1 = m + 1;
+ for (j = *n; j >= i__1; --j) {
+
+/* Compute s(j,j) and test for non-positive-definiteness. */
+
+ ajj = ab[*kd + 1 + j * ab_dim1];
+ if (ajj <= 0.f) {
+ goto L50;
+ }
+ ajj = sqrt(ajj);
+ ab[*kd + 1 + j * ab_dim1] = ajj;
+/* Computing MIN */
+ i__2 = j - 1;
+ km = min(i__2,*kd);
+
+/* Compute elements j-km:j-1 of the j-th column and update the */
+/* the leading submatrix within the band. */
+
+ r__1 = 1.f / ajj;
+ sscal_(&km, &r__1, &ab[*kd + 1 - km + j * ab_dim1], &c__1);
+ ssyr_("Upper", &km, &c_b9, &ab[*kd + 1 - km + j * ab_dim1], &c__1,
+ &ab[*kd + 1 + (j - km) * ab_dim1], &kld);
+/* L10: */
+ }
+
+/* Factorize the updated submatrix A(1:m,1:m) as U**T*U. */
+
+ i__1 = m;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute s(j,j) and test for non-positive-definiteness. */
+
+ ajj = ab[*kd + 1 + j * ab_dim1];
+ if (ajj <= 0.f) {
+ goto L50;
+ }
+ ajj = sqrt(ajj);
+ ab[*kd + 1 + j * ab_dim1] = ajj;
+/* Computing MIN */
+ i__2 = *kd, i__3 = m - j;
+ km = min(i__2,i__3);
+
+/* Compute elements j+1:j+km of the j-th row and update the */
+/* trailing submatrix within the band. */
+
+ if (km > 0) {
+ r__1 = 1.f / ajj;
+ sscal_(&km, &r__1, &ab[*kd + (j + 1) * ab_dim1], &kld);
+ ssyr_("Upper", &km, &c_b9, &ab[*kd + (j + 1) * ab_dim1], &kld,
+ &ab[*kd + 1 + (j + 1) * ab_dim1], &kld);
+ }
+/* L20: */
+ }
+ } else {
+
+/* Factorize A(m+1:n,m+1:n) as L**T*L, and update A(1:m,1:m). */
+
+ i__1 = m + 1;
+ for (j = *n; j >= i__1; --j) {
+
+/* Compute s(j,j) and test for non-positive-definiteness. */
+
+ ajj = ab[j * ab_dim1 + 1];
+ if (ajj <= 0.f) {
+ goto L50;
+ }
+ ajj = sqrt(ajj);
+ ab[j * ab_dim1 + 1] = ajj;
+/* Computing MIN */
+ i__2 = j - 1;
+ km = min(i__2,*kd);
+
+/* Compute elements j-km:j-1 of the j-th row and update the */
+/* trailing submatrix within the band. */
+
+ r__1 = 1.f / ajj;
+ sscal_(&km, &r__1, &ab[km + 1 + (j - km) * ab_dim1], &kld);
+ ssyr_("Lower", &km, &c_b9, &ab[km + 1 + (j - km) * ab_dim1], &kld,
+ &ab[(j - km) * ab_dim1 + 1], &kld);
+/* L30: */
+ }
+
+/* Factorize the updated submatrix A(1:m,1:m) as U**T*U. */
+
+ i__1 = m;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute s(j,j) and test for non-positive-definiteness. */
+
+ ajj = ab[j * ab_dim1 + 1];
+ if (ajj <= 0.f) {
+ goto L50;
+ }
+ ajj = sqrt(ajj);
+ ab[j * ab_dim1 + 1] = ajj;
+/* Computing MIN */
+ i__2 = *kd, i__3 = m - j;
+ km = min(i__2,i__3);
+
+/* Compute elements j+1:j+km of the j-th column and update the */
+/* trailing submatrix within the band. */
+
+ if (km > 0) {
+ r__1 = 1.f / ajj;
+ sscal_(&km, &r__1, &ab[j * ab_dim1 + 2], &c__1);
+ ssyr_("Lower", &km, &c_b9, &ab[j * ab_dim1 + 2], &c__1, &ab[(
+ j + 1) * ab_dim1 + 1], &kld);
+ }
+/* L40: */
+ }
+ }
+ return 0;
+
+L50:
+ *info = j;
+ return 0;
+
+/* End of SPBSTF */
+
+} /* spbstf_ */
diff --git a/contrib/libs/clapack/spbsv.c b/contrib/libs/clapack/spbsv.c
new file mode 100644
index 0000000000..804dd541d2
--- /dev/null
+++ b/contrib/libs/clapack/spbsv.c
@@ -0,0 +1,181 @@
+/* spbsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int spbsv_(char *uplo, integer *n, integer *kd, integer *
+ nrhs, real *ab, integer *ldab, real *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), spbtrf_(
+ char *, integer *, integer *, real *, integer *, integer *), spbtrs_(char *, integer *, integer *, integer *, real *,
+ integer *, real *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPBSV computes the solution to a real system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N symmetric positive definite band matrix and X */
+/* and B are N-by-NRHS matrices. */
+
+/* The Cholesky decomposition is used to factor A as */
+/* A = U**T * U, if UPLO = 'U', or */
+/* A = L * L**T, if UPLO = 'L', */
+/* where U is an upper triangular band matrix, and L is a lower */
+/* triangular band matrix, with the same number of superdiagonals or */
+/* subdiagonals as A. The factored form of A is then used to solve the */
+/* system of equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AB (input/output) REAL array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(KD+1+i-j,j) = A(i,j) for max(1,j-KD)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(N,j+KD). */
+/* See below for further details. */
+
+/* On exit, if INFO = 0, the triangular factor U or L from the */
+/* Cholesky factorization A = U**T*U or A = L*L**T of the band */
+/* matrix A, in the same storage format as A. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i of A is not */
+/* positive definite, so the factorization could not be */
+/* completed, and the solution has not been computed. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* N = 6, KD = 2, and UPLO = 'U': */
+
+/* On entry: On exit: */
+
+/* * * a13 a24 a35 a46 * * u13 u24 u35 u46 */
+/* * a12 a23 a34 a45 a56 * u12 u23 u34 u45 u56 */
+/* a11 a22 a33 a44 a55 a66 u11 u22 u33 u44 u55 u66 */
+
+/* Similarly, if UPLO = 'L' the format of A is as follows: */
+
+/* On entry: On exit: */
+
+/* a11 a22 a33 a44 a55 a66 l11 l22 l33 l44 l55 l66 */
+/* a21 a32 a43 a54 a65 * l21 l32 l43 l54 l65 * */
+/* a31 a42 a53 a64 * * l31 l42 l53 l64 * * */
+
+/* Array elements marked * are not used by the routine. */
+
+/* ===================================================================== */
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldab < *kd + 1) {
+ *info = -6;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPBSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the Cholesky factorization A = U'*U or A = L*L'. */
+
+ spbtrf_(uplo, n, kd, &ab[ab_offset], ldab, info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ spbtrs_(uplo, n, kd, nrhs, &ab[ab_offset], ldab, &b[b_offset], ldb,
+ info);
+
+ }
+ return 0;
+
+/* End of SPBSV */
+
+} /* spbsv_ */
diff --git a/contrib/libs/clapack/spbsvx.c b/contrib/libs/clapack/spbsvx.c
new file mode 100644
index 0000000000..d61997d415
--- /dev/null
+++ b/contrib/libs/clapack/spbsvx.c
@@ -0,0 +1,512 @@
+/* spbsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int spbsvx_(char *fact, char *uplo, integer *n, integer *kd,
+ integer *nrhs, real *ab, integer *ldab, real *afb, integer *ldafb,
+ char *equed, real *s, real *b, integer *ldb, real *x, integer *ldx,
+ real *rcond, real *ferr, real *berr, real *work, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, afb_dim1, afb_offset, b_dim1, b_offset,
+ x_dim1, x_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Local variables */
+ integer i__, j, j1, j2;
+ real amax, smin, smax;
+ extern logical lsame_(char *, char *);
+ real scond, anorm;
+ logical equil, rcequ, upper;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ extern doublereal slamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ extern doublereal slansb_(char *, char *, integer *, integer *, real *,
+ integer *, real *);
+ extern /* Subroutine */ int spbcon_(char *, integer *, integer *, real *,
+ integer *, real *, real *, real *, integer *, integer *),
+ slaqsb_(char *, integer *, integer *, real *, integer *, real *,
+ real *, real *, char *);
+ integer infequ;
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *), spbequ_(char *, integer *,
+ integer *, real *, integer *, real *, real *, real *, integer *), spbrfs_(char *, integer *, integer *, integer *, real *,
+ integer *, real *, integer *, real *, integer *, real *, integer *
+, real *, real *, real *, integer *, integer *), spbtrf_(
+ char *, integer *, integer *, real *, integer *, integer *);
+ real smlnum;
+ extern /* Subroutine */ int spbtrs_(char *, integer *, integer *, integer
+ *, real *, integer *, real *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPBSVX uses the Cholesky factorization A = U**T*U or A = L*L**T to */
+/* compute the solution to a real system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N symmetric positive definite band matrix and X */
+/* and B are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'E', real scaling factors are computed to equilibrate */
+/* the system: */
+/* diag(S) * A * diag(S) * inv(diag(S)) * X = diag(S) * B */
+/* Whether or not the system will be equilibrated depends on the */
+/* scaling of the matrix A, but if equilibration is used, A is */
+/* overwritten by diag(S)*A*diag(S) and B by diag(S)*B. */
+
+/* 2. If FACT = 'N' or 'E', the Cholesky decomposition is used to */
+/* factor the matrix A (after equilibration if FACT = 'E') as */
+/* A = U**T * U, if UPLO = 'U', or */
+/* A = L * L**T, if UPLO = 'L', */
+/* where U is an upper triangular band matrix, and L is a lower */
+/* triangular band matrix. */
+
+/* 3. If the leading i-by-i principal minor is not positive definite, */
+/* then the routine returns with INFO = i. Otherwise, the factored */
+/* form of A is used to estimate the condition number of the matrix */
+/* A. If the reciprocal of the condition number is less than machine */
+/* precision, INFO = N+1 is returned as a warning, but the routine */
+/* still goes on to solve for X and compute error bounds as */
+/* described below. */
+
+/* 4. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 5. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* 6. If equilibration was used, the matrix X is premultiplied by */
+/* diag(S) so that it solves the original system before */
+/* equilibration. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of the matrix A is */
+/* supplied on entry, and if not, whether the matrix A should be */
+/* equilibrated before it is factored. */
+/* = 'F': On entry, AFB contains the factored form of A. */
+/* If EQUED = 'Y', the matrix A has been equilibrated */
+/* with scaling factors given by S. AB and AFB will not */
+/* be modified. */
+/* = 'N': The matrix A will be copied to AFB and factored. */
+/* = 'E': The matrix A will be equilibrated if necessary, then */
+/* copied to AFB and factored. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right-hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AB (input/output) REAL array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first KD+1 rows of the array, except */
+/* if FACT = 'F' and EQUED = 'Y', then A must contain the */
+/* equilibrated matrix diag(S)*A*diag(S). The j-th column of A */
+/* is stored in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(KD+1+i-j,j) = A(i,j) for max(1,j-KD)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(N,j+KD). */
+/* See below for further details. */
+
+/* On exit, if FACT = 'E' and EQUED = 'Y', A is overwritten by */
+/* diag(S)*A*diag(S). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array A. LDAB >= KD+1. */
+
+/* AFB (input or output) REAL array, dimension (LDAFB,N) */
+/* If FACT = 'F', then AFB is an input argument and on entry */
+/* contains the triangular factor U or L from the Cholesky */
+/* factorization A = U**T*U or A = L*L**T of the band matrix */
+/* A, in the same storage format as A (see AB). If EQUED = 'Y', */
+/* then AFB is the factored form of the equilibrated matrix A. */
+
+/* If FACT = 'N', then AFB is an output argument and on exit */
+/* returns the triangular factor U or L from the Cholesky */
+/* factorization A = U**T*U or A = L*L**T. */
+
+/* If FACT = 'E', then AFB is an output argument and on exit */
+/* returns the triangular factor U or L from the Cholesky */
+/* factorization A = U**T*U or A = L*L**T of the equilibrated */
+/* matrix A (see the description of A for the form of the */
+/* equilibrated matrix). */
+
+/* LDAFB (input) INTEGER */
+/* The leading dimension of the array AFB. LDAFB >= KD+1. */
+
+/* EQUED (input or output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration (always true if FACT = 'N'). */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+/* EQUED is an input argument if FACT = 'F'; otherwise, it is an */
+/* output argument. */
+
+/* S (input or output) REAL array, dimension (N) */
+/* The scale factors for A; not accessed if EQUED = 'N'. S is */
+/* an input argument if FACT = 'F'; otherwise, S is an output */
+/* argument. If FACT = 'F' and EQUED = 'Y', each element of S */
+/* must be positive. */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if EQUED = 'N', B is not modified; if EQUED = 'Y', */
+/* B is overwritten by diag(S) * B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) REAL array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X to */
+/* the original system of equations. Note that if EQUED = 'Y', */
+/* A and B are modified on exit, and the solution to the */
+/* equilibrated system is inv(diag(S))*X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) REAL */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A after equilibration (if done). If RCOND is less than the */
+/* machine precision (in particular, if RCOND = 0), the matrix */
+/* is singular to working precision. This condition is */
+/* indicated by a return code of INFO > 0. */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: the leading minor of order i of A is */
+/* not positive definite, so the factorization */
+/* could not be completed, and the solution has not */
+/* been computed. RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* N = 6, KD = 2, and UPLO = 'U': */
+
+/* Two-dimensional storage of the symmetric matrix A: */
+
+/* a11 a12 a13 */
+/* a22 a23 a24 */
+/* a33 a34 a35 */
+/* a44 a45 a46 */
+/* a55 a56 */
+/* (aij=conjg(aji)) a66 */
+
+/* Band storage of the upper triangle of A: */
+
+/* * * a13 a24 a35 a46 */
+/* * a12 a23 a34 a45 a56 */
+/* a11 a22 a33 a44 a55 a66 */
+
+/* Similarly, if UPLO = 'L' the format of A is as follows: */
+
+/* a11 a22 a33 a44 a55 a66 */
+/* a21 a32 a43 a54 a65 * */
+/* a31 a42 a53 a64 * * */
+
+/* Array elements marked * are not used by the routine. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ afb_dim1 = *ldafb;
+ afb_offset = 1 + afb_dim1;
+ afb -= afb_offset;
+ --s;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ equil = lsame_(fact, "E");
+ upper = lsame_(uplo, "U");
+ if (nofact || equil) {
+ *(unsigned char *)equed = 'N';
+ rcequ = FALSE_;
+ } else {
+ rcequ = lsame_(equed, "Y");
+ smlnum = slamch_("Safe minimum");
+ bignum = 1.f / smlnum;
+ }
+
+/* Test the input parameters. */
+
+ if (! nofact && ! equil && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*kd < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*ldab < *kd + 1) {
+ *info = -7;
+ } else if (*ldafb < *kd + 1) {
+ *info = -9;
+ } else if (lsame_(fact, "F") && ! (rcequ || lsame_(
+ equed, "N"))) {
+ *info = -10;
+ } else {
+ if (rcequ) {
+ smin = bignum;
+ smax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ r__1 = smin, r__2 = s[j];
+ smin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = smax, r__2 = s[j];
+ smax = dmax(r__1,r__2);
+/* L10: */
+ }
+ if (smin <= 0.f) {
+ *info = -11;
+ } else if (*n > 0) {
+ scond = dmax(smin,smlnum) / dmin(smax,bignum);
+ } else {
+ scond = 1.f;
+ }
+ }
+ if (*info == 0) {
+ if (*ldb < max(1,*n)) {
+ *info = -13;
+ } else if (*ldx < max(1,*n)) {
+ *info = -15;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPBSVX", &i__1);
+ return 0;
+ }
+
+ if (equil) {
+
+/* Compute row and column scalings to equilibrate the matrix A. */
+
+ spbequ_(uplo, n, kd, &ab[ab_offset], ldab, &s[1], &scond, &amax, &
+ infequ);
+ if (infequ == 0) {
+
+/* Equilibrate the matrix. */
+
+ slaqsb_(uplo, n, kd, &ab[ab_offset], ldab, &s[1], &scond, &amax,
+ equed);
+ rcequ = lsame_(equed, "Y");
+ }
+ }
+
+/* Scale the right-hand side. */
+
+ if (rcequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = s[i__] * b[i__ + j * b_dim1];
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+
+ if (nofact || equil) {
+
+/* Compute the Cholesky factorization A = U'*U or A = L*L'. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j - *kd;
+ j1 = max(i__2,1);
+ i__2 = j - j1 + 1;
+ scopy_(&i__2, &ab[*kd + 1 - j + j1 + j * ab_dim1], &c__1, &
+ afb[*kd + 1 - j + j1 + j * afb_dim1], &c__1);
+/* L40: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = j + *kd;
+ j2 = min(i__2,*n);
+ i__2 = j2 - j + 1;
+ scopy_(&i__2, &ab[j * ab_dim1 + 1], &c__1, &afb[j * afb_dim1
+ + 1], &c__1);
+/* L50: */
+ }
+ }
+
+ spbtrf_(uplo, n, kd, &afb[afb_offset], ldafb, info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.f;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = slansb_("1", uplo, n, kd, &ab[ab_offset], ldab, &work[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ spbcon_(uplo, n, kd, &afb[afb_offset], ldafb, &anorm, rcond, &work[1], &
+ iwork[1], info);
+
+/* Compute the solution matrix X. */
+
+ slacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ spbtrs_(uplo, n, kd, nrhs, &afb[afb_offset], ldafb, &x[x_offset], ldx,
+ info);
+
+/* Use iterative refinement to improve the computed solution and */
+/* compute error bounds and backward error estimates for it. */
+
+ spbrfs_(uplo, n, kd, nrhs, &ab[ab_offset], ldab, &afb[afb_offset], ldafb,
+ &b[b_offset], ldb, &x[x_offset], ldx, &ferr[1], &berr[1], &work[1]
+, &iwork[1], info);
+
+/* Transform the solution matrix X to a solution of the original */
+/* system. */
+
+ if (rcequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[i__ + j * x_dim1] = s[i__] * x[i__ + j * x_dim1];
+/* L60: */
+ }
+/* L70: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= scond;
+/* L80: */
+ }
+ }
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < slamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of SPBSVX */
+
+} /* spbsvx_ */
diff --git a/contrib/libs/clapack/spbtf2.c b/contrib/libs/clapack/spbtf2.c
new file mode 100644
index 0000000000..e3d2dd2996
--- /dev/null
+++ b/contrib/libs/clapack/spbtf2.c
@@ -0,0 +1,244 @@
+/* spbtf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b8 = -1.f;
+static integer c__1 = 1;
+
+/* Subroutine */ int spbtf2_(char *uplo, integer *n, integer *kd, real *ab,
+ integer *ldab, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer j, kn;
+ real ajj;
+ integer kld;
+ extern /* Subroutine */ int ssyr_(char *, integer *, real *, real *,
+ integer *, real *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPBTF2 computes the Cholesky factorization of a real symmetric */
+/* positive definite band matrix A. */
+
+/* The factorization has the form */
+/* A = U' * U , if UPLO = 'U', or */
+/* A = L * L', if UPLO = 'L', */
+/* where U is an upper triangular matrix, U' is the transpose of U, and */
+/* L is lower triangular. */
+
+/* This is the unblocked version of the algorithm, calling Level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of super-diagonals of the matrix A if UPLO = 'U', */
+/* or the number of sub-diagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) REAL array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, if INFO = 0, the triangular factor U or L from the */
+/* Cholesky factorization A = U'*U or A = L*L' of the band */
+/* matrix A, in the same storage format as A. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, the leading minor of order k is not */
+/* positive definite, and the factorization could not be */
+/* completed. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* N = 6, KD = 2, and UPLO = 'U': */
+
+/* On entry: On exit: */
+
+/* * * a13 a24 a35 a46 * * u13 u24 u35 u46 */
+/* * a12 a23 a34 a45 a56 * u12 u23 u34 u45 u56 */
+/* a11 a22 a33 a44 a55 a66 u11 u22 u33 u44 u55 u66 */
+
+/* Similarly, if UPLO = 'L' the format of A is as follows: */
+
+/* On entry: On exit: */
+
+/* a11 a22 a33 a44 a55 a66 l11 l22 l33 l44 l55 l66 */
+/* a21 a32 a43 a54 a65 * l21 l32 l43 l54 l65 * */
+/* a31 a42 a53 a64 * * l31 l42 l53 l64 * * */
+
+/* Array elements marked * are not used by the routine. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*ldab < *kd + 1) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPBTF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Computing MAX */
+ i__1 = 1, i__2 = *ldab - 1;
+ kld = max(i__1,i__2);
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute U(J,J) and test for non-positive-definiteness. */
+
+ ajj = ab[*kd + 1 + j * ab_dim1];
+ if (ajj <= 0.f) {
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ ab[*kd + 1 + j * ab_dim1] = ajj;
+
+/* Compute elements J+1:J+KN of row J and update the */
+/* trailing submatrix within the band. */
+
+/* Computing MIN */
+ i__2 = *kd, i__3 = *n - j;
+ kn = min(i__2,i__3);
+ if (kn > 0) {
+ r__1 = 1.f / ajj;
+ sscal_(&kn, &r__1, &ab[*kd + (j + 1) * ab_dim1], &kld);
+ ssyr_("Upper", &kn, &c_b8, &ab[*kd + (j + 1) * ab_dim1], &kld,
+ &ab[*kd + 1 + (j + 1) * ab_dim1], &kld);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute L(J,J) and test for non-positive-definiteness. */
+
+ ajj = ab[j * ab_dim1 + 1];
+ if (ajj <= 0.f) {
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ ab[j * ab_dim1 + 1] = ajj;
+
+/* Compute elements J+1:J+KN of column J and update the */
+/* trailing submatrix within the band. */
+
+/* Computing MIN */
+ i__2 = *kd, i__3 = *n - j;
+ kn = min(i__2,i__3);
+ if (kn > 0) {
+ r__1 = 1.f / ajj;
+ sscal_(&kn, &r__1, &ab[j * ab_dim1 + 2], &c__1);
+ ssyr_("Lower", &kn, &c_b8, &ab[j * ab_dim1 + 2], &c__1, &ab[(
+ j + 1) * ab_dim1 + 1], &kld);
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+L30:
+ *info = j;
+ return 0;
+
+/* End of SPBTF2 */
+
+} /* spbtf2_ */
diff --git a/contrib/libs/clapack/spbtrf.c b/contrib/libs/clapack/spbtrf.c
new file mode 100644
index 0000000000..8f8ccca045
--- /dev/null
+++ b/contrib/libs/clapack/spbtrf.c
@@ -0,0 +1,469 @@
+/* spbtrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static real c_b18 = 1.f;
+static real c_b21 = -1.f;
+static integer c__33 = 33;
+
+/* Subroutine */ int spbtrf_(char *uplo, integer *n, integer *kd, real *ab,
+ integer *ldab, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, i2, i3, ib, nb, ii, jj;
+ real work[1056] /* was [33][32] */;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *), strsm_(char *, char *, char *,
+ char *, integer *, integer *, real *, real *, integer *, real *,
+ integer *), ssyrk_(char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, real *,
+ integer *), spbtf2_(char *, integer *, integer *,
+ real *, integer *, integer *), spotf2_(char *, integer *,
+ real *, integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPBTRF computes the Cholesky factorization of a real symmetric */
+/* positive definite band matrix A. */
+
+/* The factorization has the form */
+/* A = U**T * U, if UPLO = 'U', or */
+/* A = L * L**T, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) REAL array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, if INFO = 0, the triangular factor U or L from the */
+/* Cholesky factorization A = U**T*U or A = L*L**T of the band */
+/* matrix A, in the same storage format as A. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i is not */
+/* positive definite, and the factorization could not be */
+/* completed. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* N = 6, KD = 2, and UPLO = 'U': */
+
+/* On entry: On exit: */
+
+/* * * a13 a24 a35 a46 * * u13 u24 u35 u46 */
+/* * a12 a23 a34 a45 a56 * u12 u23 u34 u45 u56 */
+/* a11 a22 a33 a44 a55 a66 u11 u22 u33 u44 u55 u66 */
+
+/* Similarly, if UPLO = 'L' the format of A is as follows: */
+
+/* On entry: On exit: */
+
+/* a11 a22 a33 a44 a55 a66 l11 l22 l33 l44 l55 l66 */
+/* a21 a32 a43 a54 a65 * l21 l32 l43 l54 l65 * */
+/* a31 a42 a53 a64 * * l31 l42 l53 l64 * * */
+
+/* Array elements marked * are not used by the routine. */
+
+/* Contributed by */
+/* Peter Mayes and Giuseppe Radicati, IBM ECSEC, Rome, March 23, 1989 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*ldab < *kd + 1) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPBTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment */
+
+ nb = ilaenv_(&c__1, "SPBTRF", uplo, n, kd, &c_n1, &c_n1);
+
+/* The block size must not exceed the semi-bandwidth KD, and must not */
+/* exceed the limit set by the size of the local array WORK. */
+
+ nb = min(nb,32);
+
+ if (nb <= 1 || nb > *kd) {
+
+/* Use unblocked code */
+
+ spbtf2_(uplo, n, kd, &ab[ab_offset], ldab, info);
+ } else {
+
+/* Use blocked code */
+
+ if (lsame_(uplo, "U")) {
+
+/* Compute the Cholesky factorization of a symmetric band */
+/* matrix, given the upper triangle of the matrix in band */
+/* storage. */
+
+/* Zero the upper triangle of the work array. */
+
+ i__1 = nb;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__ + j * 33 - 34] = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* Process the band matrix one diagonal block at a time. */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - i__ + 1;
+ ib = min(i__3,i__4);
+
+/* Factorize the diagonal block */
+
+ i__3 = *ldab - 1;
+ spotf2_(uplo, &ib, &ab[*kd + 1 + i__ * ab_dim1], &i__3, &ii);
+ if (ii != 0) {
+ *info = i__ + ii - 1;
+ goto L150;
+ }
+ if (i__ + ib <= *n) {
+
+/* Update the relevant part of the trailing submatrix. */
+/* If A11 denotes the diagonal block which has just been */
+/* factorized, then we need to update the remaining */
+/* blocks in the diagram: */
+
+/* A11 A12 A13 */
+/* A22 A23 */
+/* A33 */
+
+/* The numbers of rows and columns in the partitioning */
+/* are IB, I2, I3 respectively. The blocks A12, A22 and */
+/* A23 are empty if IB = KD. The upper triangle of A13 */
+/* lies outside the band. */
+
+/* Computing MIN */
+ i__3 = *kd - ib, i__4 = *n - i__ - ib + 1;
+ i2 = min(i__3,i__4);
+/* Computing MIN */
+ i__3 = ib, i__4 = *n - i__ - *kd + 1;
+ i3 = min(i__3,i__4);
+
+ if (i2 > 0) {
+
+/* Update A12 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ strsm_("Left", "Upper", "Transpose", "Non-unit", &ib,
+ &i2, &c_b18, &ab[*kd + 1 + i__ * ab_dim1], &
+ i__3, &ab[*kd + 1 - ib + (i__ + ib) * ab_dim1]
+, &i__4);
+
+/* Update A22 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ ssyrk_("Upper", "Transpose", &i2, &ib, &c_b21, &ab[*
+ kd + 1 - ib + (i__ + ib) * ab_dim1], &i__3, &
+ c_b18, &ab[*kd + 1 + (i__ + ib) * ab_dim1], &
+ i__4);
+ }
+
+ if (i3 > 0) {
+
+/* Copy the lower triangle of A13 into the work array. */
+
+ i__3 = i3;
+ for (jj = 1; jj <= i__3; ++jj) {
+ i__4 = ib;
+ for (ii = jj; ii <= i__4; ++ii) {
+ work[ii + jj * 33 - 34] = ab[ii - jj + 1 + (
+ jj + i__ + *kd - 1) * ab_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Update A13 (in the work array). */
+
+ i__3 = *ldab - 1;
+ strsm_("Left", "Upper", "Transpose", "Non-unit", &ib,
+ &i3, &c_b18, &ab[*kd + 1 + i__ * ab_dim1], &
+ i__3, work, &c__33);
+
+/* Update A23 */
+
+ if (i2 > 0) {
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ sgemm_("Transpose", "No Transpose", &i2, &i3, &ib,
+ &c_b21, &ab[*kd + 1 - ib + (i__ + ib) *
+ ab_dim1], &i__3, work, &c__33, &c_b18, &
+ ab[ib + 1 + (i__ + *kd) * ab_dim1], &i__4);
+ }
+
+/* Update A33 */
+
+ i__3 = *ldab - 1;
+ ssyrk_("Upper", "Transpose", &i3, &ib, &c_b21, work, &
+ c__33, &c_b18, &ab[*kd + 1 + (i__ + *kd) *
+ ab_dim1], &i__3);
+
+/* Copy the lower triangle of A13 back into place. */
+
+ i__3 = i3;
+ for (jj = 1; jj <= i__3; ++jj) {
+ i__4 = ib;
+ for (ii = jj; ii <= i__4; ++ii) {
+ ab[ii - jj + 1 + (jj + i__ + *kd - 1) *
+ ab_dim1] = work[ii + jj * 33 - 34];
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+ }
+/* L70: */
+ }
+ } else {
+
+/* Compute the Cholesky factorization of a symmetric band */
+/* matrix, given the lower triangle of the matrix in band */
+/* storage. */
+
+/* Zero the lower triangle of the work array. */
+
+ i__2 = nb;
+ for (j = 1; j <= i__2; ++j) {
+ i__1 = nb;
+ for (i__ = j + 1; i__ <= i__1; ++i__) {
+ work[i__ + j * 33 - 34] = 0.f;
+/* L80: */
+ }
+/* L90: */
+ }
+
+/* Process the band matrix one diagonal block at a time. */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - i__ + 1;
+ ib = min(i__3,i__4);
+
+/* Factorize the diagonal block */
+
+ i__3 = *ldab - 1;
+ spotf2_(uplo, &ib, &ab[i__ * ab_dim1 + 1], &i__3, &ii);
+ if (ii != 0) {
+ *info = i__ + ii - 1;
+ goto L150;
+ }
+ if (i__ + ib <= *n) {
+
+/* Update the relevant part of the trailing submatrix. */
+/* If A11 denotes the diagonal block which has just been */
+/* factorized, then we need to update the remaining */
+/* blocks in the diagram: */
+
+/* A11 */
+/* A21 A22 */
+/* A31 A32 A33 */
+
+/* The numbers of rows and columns in the partitioning */
+/* are IB, I2, I3 respectively. The blocks A21, A22 and */
+/* A32 are empty if IB = KD. The lower triangle of A31 */
+/* lies outside the band. */
+
+/* Computing MIN */
+ i__3 = *kd - ib, i__4 = *n - i__ - ib + 1;
+ i2 = min(i__3,i__4);
+/* Computing MIN */
+ i__3 = ib, i__4 = *n - i__ - *kd + 1;
+ i3 = min(i__3,i__4);
+
+ if (i2 > 0) {
+
+/* Update A21 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ strsm_("Right", "Lower", "Transpose", "Non-unit", &i2,
+ &ib, &c_b18, &ab[i__ * ab_dim1 + 1], &i__3, &
+ ab[ib + 1 + i__ * ab_dim1], &i__4);
+
+/* Update A22 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ ssyrk_("Lower", "No Transpose", &i2, &ib, &c_b21, &ab[
+ ib + 1 + i__ * ab_dim1], &i__3, &c_b18, &ab[(
+ i__ + ib) * ab_dim1 + 1], &i__4);
+ }
+
+ if (i3 > 0) {
+
+/* Copy the upper triangle of A31 into the work array. */
+
+ i__3 = ib;
+ for (jj = 1; jj <= i__3; ++jj) {
+ i__4 = min(jj,i3);
+ for (ii = 1; ii <= i__4; ++ii) {
+ work[ii + jj * 33 - 34] = ab[*kd + 1 - jj +
+ ii + (jj + i__ - 1) * ab_dim1];
+/* L100: */
+ }
+/* L110: */
+ }
+
+/* Update A31 (in the work array). */
+
+ i__3 = *ldab - 1;
+ strsm_("Right", "Lower", "Transpose", "Non-unit", &i3,
+ &ib, &c_b18, &ab[i__ * ab_dim1 + 1], &i__3,
+ work, &c__33);
+
+/* Update A32 */
+
+ if (i2 > 0) {
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ sgemm_("No transpose", "Transpose", &i3, &i2, &ib,
+ &c_b21, work, &c__33, &ab[ib + 1 + i__ *
+ ab_dim1], &i__3, &c_b18, &ab[*kd + 1 - ib
+ + (i__ + ib) * ab_dim1], &i__4);
+ }
+
+/* Update A33 */
+
+ i__3 = *ldab - 1;
+ ssyrk_("Lower", "No Transpose", &i3, &ib, &c_b21,
+ work, &c__33, &c_b18, &ab[(i__ + *kd) *
+ ab_dim1 + 1], &i__3);
+
+/* Copy the upper triangle of A31 back into place. */
+
+ i__3 = ib;
+ for (jj = 1; jj <= i__3; ++jj) {
+ i__4 = min(jj,i3);
+ for (ii = 1; ii <= i__4; ++ii) {
+ ab[*kd + 1 - jj + ii + (jj + i__ - 1) *
+ ab_dim1] = work[ii + jj * 33 - 34];
+/* L120: */
+ }
+/* L130: */
+ }
+ }
+ }
+/* L140: */
+ }
+ }
+ }
+ return 0;
+
+L150:
+ return 0;
+
+/* End of SPBTRF */
+
+} /* spbtrf_ */
diff --git a/contrib/libs/clapack/spbtrs.c b/contrib/libs/clapack/spbtrs.c
new file mode 100644
index 0000000000..dd15ab8325
--- /dev/null
+++ b/contrib/libs/clapack/spbtrs.c
@@ -0,0 +1,182 @@
+/* spbtrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int spbtrs_(char *uplo, integer *n, integer *kd, integer *
+ nrhs, real *ab, integer *ldab, real *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ integer j;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int stbsv_(char *, char *, char *, integer *,
+ integer *, real *, integer *, real *, integer *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPBTRS solves a system of linear equations A*X = B with a symmetric */
+/* positive definite band matrix A using the Cholesky factorization */
+/* A = U**T*U or A = L*L**T computed by SPBTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangular factor stored in AB; */
+/* = 'L': Lower triangular factor stored in AB. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AB (input) REAL array, dimension (LDAB,N) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**T*U or A = L*L**T of the band matrix A, stored in the */
+/* first KD+1 rows of the array. The j-th column of U or L is */
+/* stored in the j-th column of the array AB as follows: */
+/* if UPLO ='U', AB(kd+1+i-j,j) = U(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO ='L', AB(1+i-j,j) = L(i,j) for j<=i<=min(n,j+kd). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldab < *kd + 1) {
+ *info = -6;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPBTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Solve A*X = B where A = U'*U. */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Solve U'*X = B, overwriting B with X. */
+
+ stbsv_("Upper", "Transpose", "Non-unit", n, kd, &ab[ab_offset],
+ ldab, &b[j * b_dim1 + 1], &c__1);
+
+/* Solve U*X = B, overwriting B with X. */
+
+ stbsv_("Upper", "No transpose", "Non-unit", n, kd, &ab[ab_offset],
+ ldab, &b[j * b_dim1 + 1], &c__1);
+/* L10: */
+ }
+ } else {
+
+/* Solve A*X = B where A = L*L'. */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Solve L*X = B, overwriting B with X. */
+
+ stbsv_("Lower", "No transpose", "Non-unit", n, kd, &ab[ab_offset],
+ ldab, &b[j * b_dim1 + 1], &c__1);
+
+/* Solve L'*X = B, overwriting B with X. */
+
+ stbsv_("Lower", "Transpose", "Non-unit", n, kd, &ab[ab_offset],
+ ldab, &b[j * b_dim1 + 1], &c__1);
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of SPBTRS */
+
+} /* spbtrs_ */
diff --git a/contrib/libs/clapack/spftrf.c b/contrib/libs/clapack/spftrf.c
new file mode 100644
index 0000000000..64a23587a6
--- /dev/null
+++ b/contrib/libs/clapack/spftrf.c
@@ -0,0 +1,451 @@
+/* spftrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b12 = 1.f;
+static real c_b15 = -1.f;
+
+/* Subroutine */ int spftrf_(char *transr, char *uplo, integer *n, real *a,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer k, n1, n2;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int strsm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+), ssyrk_(char *, char *, integer
+ *, integer *, real *, real *, integer *, real *, real *, integer *
+), xerbla_(char *, integer *);
+ logical nisodd;
+ extern /* Subroutine */ int spotrf_(char *, integer *, real *, integer *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPFTRF computes the Cholesky factorization of a real symmetric */
+/* positive definite matrix A. */
+
+/* The factorization has the form */
+/* A = U**T * U, if UPLO = 'U', or */
+/* A = L * L**T, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular. */
+
+/* This is the block version of the algorithm, calling Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': The Normal TRANSR of RFP A is stored; */
+/* = 'T': The Transpose TRANSR of RFP A is stored. */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': Upper triangle of RFP A is stored; */
+/* = 'L': Lower triangle of RFP A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension ( N*(N+1)/2 ); */
+/* On entry, the symmetric matrix A in RFP format. RFP format is */
+/* described by TRANSR, UPLO, and N as follows: If TRANSR = 'N' */
+/* then RFP A is (0:N,0:k-1) when N is even; k=N/2. RFP A is */
+/* (0:N-1,0:k) when N is odd; k=N/2. IF TRANSR = 'T' then RFP is */
+/* the transpose of RFP A as defined when */
+/* TRANSR = 'N'. The contents of RFP A are defined by UPLO as */
+/* follows: If UPLO = 'U' the RFP A contains the NT elements of */
+/* upper packed A. If UPLO = 'L' the RFP A contains the elements */
+/* of lower packed A. The LDA of RFP A is (N+1)/2 when TRANSR = */
+/* 'T'. When TRANSR is 'N' the LDA is N+1 when N is even and N */
+/* is odd. See the Note below for more details. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization RFP A = U**T*U or RFP A = L*L**T. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i is not */
+/* positive definite, and the factorization could not be */
+/* completed. */
+
+/* Notes */
+/* ===== */
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* even. We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* the transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* the transpose of the last three columns of AP lower. */
+/* This covers the case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 03 04 05 33 43 53 */
+/* 13 14 15 00 44 54 */
+/* 23 24 25 10 11 55 */
+/* 33 34 35 20 21 22 */
+/* 00 44 45 30 31 32 */
+/* 01 11 55 40 41 42 */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* odd. We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* the transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* the transpose of the last two columns of AP lower. */
+/* This covers the case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 02 03 04 00 33 43 */
+/* 12 13 14 10 11 44 */
+/* 22 23 24 20 21 22 */
+/* 00 33 34 30 31 32 */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+/* RFP A RFP A */
+
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "T")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPFTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* If N is odd, set NISODD = .TRUE. */
+/* If N is even, set K = N/2 and NISODD = .FALSE. */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ } else {
+ nisodd = TRUE_;
+ }
+
+/* Set N1 and N2 depending on LOWER */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+/* start execution: there are eight cases */
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL and N is odd ( a(0:n-1,0:n1-1) ) */
+/* T1 -> a(0,0), T2 -> a(0,1), S -> a(n1,0) */
+/* T1 -> a(0), T2 -> a(n), S -> a(n1) */
+
+ spotrf_("L", &n1, a, n, info);
+ if (*info > 0) {
+ return 0;
+ }
+ strsm_("R", "L", "T", "N", &n2, &n1, &c_b12, a, n, &a[n1], n);
+ ssyrk_("U", "N", &n2, &n1, &c_b15, &a[n1], n, &c_b12, &a[*n],
+ n);
+ spotrf_("U", &n2, &a[*n], n, info);
+ if (*info > 0) {
+ *info += n1;
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL and N is odd ( a(0:n-1,0:n2-1) */
+/* T1 -> a(n1+1,0), T2 -> a(n1,0), S -> a(0,0) */
+/* T1 -> a(n2), T2 -> a(n1), S -> a(0) */
+
+ spotrf_("L", &n1, &a[n2], n, info);
+ if (*info > 0) {
+ return 0;
+ }
+ strsm_("L", "L", "N", "N", &n1, &n2, &c_b12, &a[n2], n, a, n);
+ ssyrk_("U", "T", &n2, &n1, &c_b15, a, n, &c_b12, &a[n1], n);
+ spotrf_("U", &n2, &a[n1], n, info);
+ if (*info > 0) {
+ *info += n1;
+ }
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'T' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is odd */
+/* T1 -> A(0,0) , T2 -> A(1,0) , S -> A(0,n1) */
+/* T1 -> a(0+0) , T2 -> a(1+0) , S -> a(0+n1*n1); lda=n1 */
+
+ spotrf_("U", &n1, a, &n1, info);
+ if (*info > 0) {
+ return 0;
+ }
+ strsm_("L", "U", "T", "N", &n1, &n2, &c_b12, a, &n1, &a[n1 *
+ n1], &n1);
+ ssyrk_("L", "T", &n2, &n1, &c_b15, &a[n1 * n1], &n1, &c_b12, &
+ a[1], &n1);
+ spotrf_("L", &n2, &a[1], &n1, info);
+ if (*info > 0) {
+ *info += n1;
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is odd */
+/* T1 -> A(0,n1+1), T2 -> A(0,n1), S -> A(0,0) */
+/* T1 -> a(n2*n2), T2 -> a(n1*n2), S -> a(0); lda = n2 */
+
+ spotrf_("U", &n1, &a[n2 * n2], &n2, info);
+ if (*info > 0) {
+ return 0;
+ }
+ strsm_("R", "U", "N", "N", &n2, &n1, &c_b12, &a[n2 * n2], &n2,
+ a, &n2);
+ ssyrk_("L", "N", &n2, &n1, &c_b15, a, &n2, &c_b12, &a[n1 * n2]
+, &n2);
+ spotrf_("L", &n2, &a[n1 * n2], &n2, info);
+ if (*info > 0) {
+ *info += n1;
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(1,0), T2 -> a(0,0), S -> a(k+1,0) */
+/* T1 -> a(1), T2 -> a(0), S -> a(k+1) */
+
+ i__1 = *n + 1;
+ spotrf_("L", &k, &a[1], &i__1, info);
+ if (*info > 0) {
+ return 0;
+ }
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ strsm_("R", "L", "T", "N", &k, &k, &c_b12, &a[1], &i__1, &a[k
+ + 1], &i__2);
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ ssyrk_("U", "N", &k, &k, &c_b15, &a[k + 1], &i__1, &c_b12, a,
+ &i__2);
+ i__1 = *n + 1;
+ spotrf_("U", &k, a, &i__1, info);
+ if (*info > 0) {
+ *info += k;
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(k+1,0) , T2 -> a(k,0), S -> a(0,0) */
+/* T1 -> a(k+1), T2 -> a(k), S -> a(0) */
+
+ i__1 = *n + 1;
+ spotrf_("L", &k, &a[k + 1], &i__1, info);
+ if (*info > 0) {
+ return 0;
+ }
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ strsm_("L", "L", "N", "N", &k, &k, &c_b12, &a[k + 1], &i__1,
+ a, &i__2);
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ ssyrk_("U", "T", &k, &k, &c_b15, a, &i__1, &c_b12, &a[k], &
+ i__2);
+ i__1 = *n + 1;
+ spotrf_("U", &k, &a[k], &i__1, info);
+ if (*info > 0) {
+ *info += k;
+ }
+
+ }
+
+ } else {
+
+/* N is even and TRANSR = 'T' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,1), T2 -> B(0,0), S -> B(0,k+1) */
+/* T1 -> a(0+k), T2 -> a(0+0), S -> a(0+k*(k+1)); lda=k */
+
+ spotrf_("U", &k, &a[k], &k, info);
+ if (*info > 0) {
+ return 0;
+ }
+ strsm_("L", "U", "T", "N", &k, &k, &c_b12, &a[k], &n1, &a[k *
+ (k + 1)], &k);
+ ssyrk_("L", "T", &k, &k, &c_b15, &a[k * (k + 1)], &k, &c_b12,
+ a, &k);
+ spotrf_("L", &k, a, &k, info);
+ if (*info > 0) {
+ *info += k;
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,k+1), T2 -> B(0,k), S -> B(0,0) */
+/* T1 -> a(0+k*(k+1)), T2 -> a(0+k*k), S -> a(0+0)); lda=k */
+
+ spotrf_("U", &k, &a[k * (k + 1)], &k, info);
+ if (*info > 0) {
+ return 0;
+ }
+ strsm_("R", "U", "N", "N", &k, &k, &c_b12, &a[k * (k + 1)], &
+ k, a, &k);
+ ssyrk_("L", "N", &k, &k, &c_b15, a, &k, &c_b12, &a[k * k], &k);
+ spotrf_("L", &k, &a[k * k], &k, info);
+ if (*info > 0) {
+ *info += k;
+ }
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of SPFTRF */
+
+} /* spftrf_ */
diff --git a/contrib/libs/clapack/spftri.c b/contrib/libs/clapack/spftri.c
new file mode 100644
index 0000000000..fde795a5bf
--- /dev/null
+++ b/contrib/libs/clapack/spftri.c
@@ -0,0 +1,402 @@
+/* spftri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b11 = 1.f;
+
+/* Subroutine */ int spftri_(char *transr, char *uplo, integer *n, real *a,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer k, n1, n2;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int strmm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+), ssyrk_(char *, char *, integer
+ *, integer *, real *, real *, integer *, real *, real *, integer *
+), xerbla_(char *, integer *);
+ logical nisodd;
+ extern /* Subroutine */ int slauum_(char *, integer *, real *, integer *,
+ integer *), stftri_(char *, char *, char *, integer *,
+ real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPFTRI computes the inverse of a real (symmetric) positive definite */
+/* matrix A using the Cholesky factorization A = U**T*U or A = L*L**T */
+/* computed by SPFTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': The Normal TRANSR of RFP A is stored; */
+/* = 'T': The Transpose TRANSR of RFP A is stored. */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension ( N*(N+1)/2 ) */
+/* On entry, the symmetric matrix A in RFP format. RFP format is */
+/* described by TRANSR, UPLO, and N as follows: If TRANSR = 'N' */
+/* then RFP A is (0:N,0:k-1) when N is even; k=N/2. RFP A is */
+/* (0:N-1,0:k) when N is odd; k=N/2. IF TRANSR = 'T' then RFP is */
+/* the transpose of RFP A as defined when */
+/* TRANSR = 'N'. The contents of RFP A are defined by UPLO as */
+/* follows: If UPLO = 'U' the RFP A contains the nt elements of */
+/* upper packed A. If UPLO = 'L' the RFP A contains the elements */
+/* of lower packed A. The LDA of RFP A is (N+1)/2 when TRANSR = */
+/* 'T'. When TRANSR is 'N' the LDA is N+1 when N is even and N */
+/* is odd. See the Note below for more details. */
+
+/* On exit, the symmetric inverse of the original matrix, in the */
+/* same storage format. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the (i,i) element of the factor U or L is */
+/* zero, and the inverse could not be computed. */
+
+/* Notes */
+/* ===== */
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* even. We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* the transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* the transpose of the last three columns of AP lower. */
+/* This covers the case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 03 04 05 33 43 53 */
+/* 13 14 15 00 44 54 */
+/* 23 24 25 10 11 55 */
+/* 33 34 35 20 21 22 */
+/* 00 44 45 30 31 32 */
+/* 01 11 55 40 41 42 */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* odd. We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* the transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* the transpose of the last two columns of AP lower. */
+/* This covers the case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 02 03 04 00 33 43 */
+/* 12 13 14 10 11 44 */
+/* 22 23 24 20 21 22 */
+/* 00 33 34 30 31 32 */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+/* RFP A RFP A */
+
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "T")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPFTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Invert the triangular Cholesky factor U or L. */
+
+ stftri_(transr, uplo, "N", n, a, info);
+ if (*info > 0) {
+ return 0;
+ }
+
+/* If N is odd, set NISODD = .TRUE. */
+/* If N is even, set K = N/2 and NISODD = .FALSE. */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ } else {
+ nisodd = TRUE_;
+ }
+
+/* Set N1 and N2 depending on LOWER */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+/* Start execution of triangular matrix multiply: inv(U)*inv(U)^C or */
+/* inv(L)^C*inv(L). There are eight cases. */
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL and N is odd ( a(0:n-1,0:N1-1) ) */
+/* T1 -> a(0,0), T2 -> a(0,1), S -> a(N1,0) */
+/* T1 -> a(0), T2 -> a(n), S -> a(N1) */
+
+ slauum_("L", &n1, a, n, info);
+ ssyrk_("L", "T", &n1, &n2, &c_b11, &a[n1], n, &c_b11, a, n);
+ strmm_("L", "U", "N", "N", &n2, &n1, &c_b11, &a[*n], n, &a[n1]
+, n);
+ slauum_("U", &n2, &a[*n], n, info);
+
+ } else {
+
+/* SRPA for UPPER, NORMAL and N is odd ( a(0:n-1,0:N2-1) */
+/* T1 -> a(N1+1,0), T2 -> a(N1,0), S -> a(0,0) */
+/* T1 -> a(N2), T2 -> a(N1), S -> a(0) */
+
+ slauum_("L", &n1, &a[n2], n, info);
+ ssyrk_("L", "N", &n1, &n2, &c_b11, a, n, &c_b11, &a[n2], n);
+ strmm_("R", "U", "T", "N", &n1, &n2, &c_b11, &a[n1], n, a, n);
+ slauum_("U", &n2, &a[n1], n, info);
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'T' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE, and N is odd */
+/* T1 -> a(0), T2 -> a(1), S -> a(0+N1*N1) */
+
+ slauum_("U", &n1, a, &n1, info);
+ ssyrk_("U", "N", &n1, &n2, &c_b11, &a[n1 * n1], &n1, &c_b11,
+ a, &n1);
+ strmm_("R", "L", "N", "N", &n1, &n2, &c_b11, &a[1], &n1, &a[
+ n1 * n1], &n1);
+ slauum_("L", &n2, &a[1], &n1, info);
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE, and N is odd */
+/* T1 -> a(0+N2*N2), T2 -> a(0+N1*N2), S -> a(0) */
+
+ slauum_("U", &n1, &a[n2 * n2], &n2, info);
+ ssyrk_("U", "T", &n1, &n2, &c_b11, a, &n2, &c_b11, &a[n2 * n2]
+, &n2);
+ strmm_("L", "L", "T", "N", &n2, &n1, &c_b11, &a[n1 * n2], &n2,
+ a, &n2);
+ slauum_("L", &n2, &a[n1 * n2], &n2, info);
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(1,0), T2 -> a(0,0), S -> a(k+1,0) */
+/* T1 -> a(1), T2 -> a(0), S -> a(k+1) */
+
+ i__1 = *n + 1;
+ slauum_("L", &k, &a[1], &i__1, info);
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ ssyrk_("L", "T", &k, &k, &c_b11, &a[k + 1], &i__1, &c_b11, &a[
+ 1], &i__2);
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ strmm_("L", "U", "N", "N", &k, &k, &c_b11, a, &i__1, &a[k + 1]
+, &i__2);
+ i__1 = *n + 1;
+ slauum_("U", &k, a, &i__1, info);
+
+ } else {
+
+/* SRPA for UPPER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(k+1,0) , T2 -> a(k,0), S -> a(0,0) */
+/* T1 -> a(k+1), T2 -> a(k), S -> a(0) */
+
+ i__1 = *n + 1;
+ slauum_("L", &k, &a[k + 1], &i__1, info);
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ ssyrk_("L", "N", &k, &k, &c_b11, a, &i__1, &c_b11, &a[k + 1],
+ &i__2);
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ strmm_("R", "U", "T", "N", &k, &k, &c_b11, &a[k], &i__1, a, &
+ i__2);
+ i__1 = *n + 1;
+ slauum_("U", &k, &a[k], &i__1, info);
+
+ }
+
+ } else {
+
+/* N is even and TRANSR = 'T' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE, and N is even (see paper) */
+/* T1 -> B(0,1), T2 -> B(0,0), S -> B(0,k+1), */
+/* T1 -> a(0+k), T2 -> a(0+0), S -> a(0+k*(k+1)); lda=k */
+
+ slauum_("U", &k, &a[k], &k, info);
+ ssyrk_("U", "N", &k, &k, &c_b11, &a[k * (k + 1)], &k, &c_b11,
+ &a[k], &k);
+ strmm_("R", "L", "N", "N", &k, &k, &c_b11, a, &k, &a[k * (k +
+ 1)], &k);
+ slauum_("L", &k, a, &k, info);
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE, and N is even (see paper) */
+/* T1 -> B(0,k+1), T2 -> B(0,k), S -> B(0,0), */
+/* T1 -> a(0+k*(k+1)), T2 -> a(0+k*k), S -> a(0+0)); lda=k */
+
+ slauum_("U", &k, &a[k * (k + 1)], &k, info);
+ ssyrk_("U", "T", &k, &k, &c_b11, a, &k, &c_b11, &a[k * (k + 1)
+ ], &k);
+ strmm_("L", "L", "T", "N", &k, &k, &c_b11, &a[k * k], &k, a, &
+ k);
+ slauum_("L", &k, &a[k * k], &k, info);
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of SPFTRI */
+
+} /* spftri_ */
diff --git a/contrib/libs/clapack/spftrs.c b/contrib/libs/clapack/spftrs.c
new file mode 100644
index 0000000000..d8b3f6a668
--- /dev/null
+++ b/contrib/libs/clapack/spftrs.c
@@ -0,0 +1,238 @@
+/* spftrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b10 = 1.f;
+
+/* Subroutine */ int spftrs_(char *transr, char *uplo, integer *n, integer *
+ nrhs, real *a, real *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int stfsm_(char *, char *, char *, char *, char *,
+ integer *, integer *, real *, real *, real *, integer *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPFTRS solves a system of linear equations A*X = B with a symmetric */
+/* positive definite matrix A using the Cholesky factorization */
+/* A = U**T*U or A = L*L**T computed by SPFTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': The Normal TRANSR of RFP A is stored; */
+/* = 'T': The Transpose TRANSR of RFP A is stored. */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': Upper triangle of RFP A is stored; */
+/* = 'L': Lower triangle of RFP A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input) REAL array, dimension ( N*(N+1)/2 ) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* of RFP A = U**H*U or RFP A = L*L**T, as computed by SPFTRF. */
+/* See note below for more details about RFP A. */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Notes */
+/* ===== */
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* even. We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* the transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* the transpose of the last three columns of AP lower. */
+/* This covers the case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 03 04 05 33 43 53 */
+/* 13 14 15 00 44 54 */
+/* 23 24 25 10 11 55 */
+/* 33 34 35 20 21 22 */
+/* 00 44 45 30 31 32 */
+/* 01 11 55 40 41 42 */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* odd. We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* the transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* the transpose of the last two columns of AP lower. */
+/* This covers the case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 02 03 04 00 33 43 */
+/* 12 13 14 10 11 44 */
+/* 22 23 24 20 21 22 */
+/* 00 33 34 30 31 32 */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+/* RFP A RFP A */
+
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "T")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPFTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+/* start execution: there are two triangular solves */
+
+ if (lower) {
+ stfsm_(transr, "L", uplo, "N", "N", n, nrhs, &c_b10, a, &b[b_offset],
+ ldb);
+ stfsm_(transr, "L", uplo, "T", "N", n, nrhs, &c_b10, a, &b[b_offset],
+ ldb);
+ } else {
+ stfsm_(transr, "L", uplo, "T", "N", n, nrhs, &c_b10, a, &b[b_offset],
+ ldb);
+ stfsm_(transr, "L", uplo, "N", "N", n, nrhs, &c_b10, a, &b[b_offset],
+ ldb);
+ }
+
+ return 0;
+
+/* End of SPFTRS */
+
+} /* spftrs_ */
diff --git a/contrib/libs/clapack/spocon.c b/contrib/libs/clapack/spocon.c
new file mode 100644
index 0000000000..6d1804d6e1
--- /dev/null
+++ b/contrib/libs/clapack/spocon.c
@@ -0,0 +1,217 @@
+/* spocon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int spocon_(char *uplo, integer *n, real *a, integer *lda,
+ real *anorm, real *rcond, real *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+ real r__1;
+
+ /* Local variables */
+ integer ix, kase;
+ real scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int srscl_(integer *, real *, real *, integer *);
+ logical upper;
+ extern /* Subroutine */ int slacn2_(integer *, real *, real *, integer *,
+ real *, integer *, integer *);
+ real scalel;
+ extern doublereal slamch_(char *);
+ real scaleu;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ real ainvnm;
+ char normin[1];
+ extern /* Subroutine */ int slatrs_(char *, char *, char *, char *,
+ integer *, real *, integer *, real *, real *, real *, integer *);
+ real smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call SLACN2 in place of SLACON, 7 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPOCON estimates the reciprocal of the condition number (in the */
+/* 1-norm) of a real symmetric positive definite matrix using the */
+/* Cholesky factorization A = U**T*U or A = L*L**T computed by SPOTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**T*U or A = L*L**T, as computed by SPOTRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* ANORM (input) REAL */
+/* The 1-norm (or infinity-norm) of the symmetric matrix A. */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*anorm < 0.f) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPOCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.f;
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ } else if (*anorm == 0.f) {
+ return 0;
+ }
+
+ smlnum = slamch_("Safe minimum");
+
+/* Estimate the 1-norm of inv(A). */
+
+ kase = 0;
+ *(unsigned char *)normin = 'N';
+L10:
+ slacn2_(n, &work[*n + 1], &work[1], &iwork[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (upper) {
+
+/* Multiply by inv(U'). */
+
+ slatrs_("Upper", "Transpose", "Non-unit", normin, n, &a[a_offset],
+ lda, &work[1], &scalel, &work[(*n << 1) + 1], info);
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by inv(U). */
+
+ slatrs_("Upper", "No transpose", "Non-unit", normin, n, &a[
+ a_offset], lda, &work[1], &scaleu, &work[(*n << 1) + 1],
+ info);
+ } else {
+
+/* Multiply by inv(L). */
+
+ slatrs_("Lower", "No transpose", "Non-unit", normin, n, &a[
+ a_offset], lda, &work[1], &scalel, &work[(*n << 1) + 1],
+ info);
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by inv(L'). */
+
+ slatrs_("Lower", "Transpose", "Non-unit", normin, n, &a[a_offset],
+ lda, &work[1], &scaleu, &work[(*n << 1) + 1], info);
+ }
+
+/* Multiply by 1/SCALE if doing so will not cause overflow. */
+
+ scale = scalel * scaleu;
+ if (scale != 1.f) {
+ ix = isamax_(n, &work[1], &c__1);
+ if (scale < (r__1 = work[ix], dabs(r__1)) * smlnum || scale ==
+ 0.f) {
+ goto L20;
+ }
+ srscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / ainvnm / *anorm;
+ }
+
+L20:
+ return 0;
+
+/* End of SPOCON */
+
+} /* spocon_ */
diff --git a/contrib/libs/clapack/spoequ.c b/contrib/libs/clapack/spoequ.c
new file mode 100644
index 0000000000..9b23897125
--- /dev/null
+++ b/contrib/libs/clapack/spoequ.c
@@ -0,0 +1,174 @@
+/* spoequ.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int spoequ_(integer *n, real *a, integer *lda, real *s, real
+ *scond, real *amax, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ real smin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPOEQU computes row and column scalings intended to equilibrate a */
+/* symmetric positive definite matrix A and reduce its condition number */
+/* (with respect to the two-norm). S contains the scale factors, */
+/* S(i) = 1/sqrt(A(i,i)), chosen so that the scaled matrix B with */
+/* elements B(i,j) = S(i)*A(i,j)*S(j) has ones on the diagonal. This */
+/* choice of S puts the condition number of B within a factor N of the */
+/* smallest possible condition number over all possible diagonal */
+/* scalings. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The N-by-N symmetric positive definite matrix whose scaling */
+/* factors are to be computed. Only the diagonal elements of A */
+/* are referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* S (output) REAL array, dimension (N) */
+/* If INFO = 0, S contains the scale factors for A. */
+
+/* SCOND (output) REAL */
+/* If INFO = 0, S contains the ratio of the smallest S(i) to */
+/* the largest S(i). If SCOND >= 0.1 and AMAX is neither too */
+/* large nor too small, it is not worth scaling by S. */
+
+/* AMAX (output) REAL */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element is nonpositive. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*lda < max(1,*n)) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPOEQU", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *scond = 1.f;
+ *amax = 0.f;
+ return 0;
+ }
+
+/* Find the minimum and maximum diagonal elements. */
+
+ s[1] = a[a_dim1 + 1];
+ smin = s[1];
+ *amax = s[1];
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ s[i__] = a[i__ + i__ * a_dim1];
+/* Computing MIN */
+ r__1 = smin, r__2 = s[i__];
+ smin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = *amax, r__2 = s[i__];
+ *amax = dmax(r__1,r__2);
+/* L10: */
+ }
+
+ if (smin <= 0.f) {
+
+/* Find the first non-positive diagonal element and return. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (s[i__] <= 0.f) {
+ *info = i__;
+ return 0;
+ }
+/* L20: */
+ }
+ } else {
+
+/* Set the scale factors to the reciprocals */
+/* of the diagonal elements. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ s[i__] = 1.f / sqrt(s[i__]);
+/* L30: */
+ }
+
+/* Compute SCOND = min(S(I)) / max(S(I)) */
+
+ *scond = sqrt(smin) / sqrt(*amax);
+ }
+ return 0;
+
+/* End of SPOEQU */
+
+} /* spoequ_ */
diff --git a/contrib/libs/clapack/spoequb.c b/contrib/libs/clapack/spoequb.c
new file mode 100644
index 0000000000..cd6865a861
--- /dev/null
+++ b/contrib/libs/clapack/spoequb.c
@@ -0,0 +1,188 @@
+/* spoequb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int spoequb_(integer *n, real *a, integer *lda, real *s,
+ real *scond, real *amax, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double log(doublereal), pow_ri(real *, integer *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ real tmp, base, smin;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- Contributed by James Demmel, Deaglan Halligan, Yozo Hida and -- */
+/* -- Jason Riedy of Univ. of California Berkeley. -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley and NAG Ltd. -- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPOEQU computes row and column scalings intended to equilibrate a */
+/* symmetric positive definite matrix A and reduce its condition number */
+/* (with respect to the two-norm). S contains the scale factors, */
+/* S(i) = 1/sqrt(A(i,i)), chosen so that the scaled matrix B with */
+/* elements B(i,j) = S(i)*A(i,j)*S(j) has ones on the diagonal. This */
+/* choice of S puts the condition number of B within a factor N of the */
+/* smallest possible condition number over all possible diagonal */
+/* scalings. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The N-by-N symmetric positive definite matrix whose scaling */
+/* factors are to be computed. Only the diagonal elements of A */
+/* are referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* S (output) REAL array, dimension (N) */
+/* If INFO = 0, S contains the scale factors for A. */
+
+/* SCOND (output) REAL */
+/* If INFO = 0, S contains the ratio of the smallest S(i) to */
+/* the largest S(i). If SCOND >= 0.1 and AMAX is neither too */
+/* large nor too small, it is not worth scaling by S. */
+
+/* AMAX (output) REAL */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element is nonpositive. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+/* Positive definite only performs 1 pass of equilibration. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*lda < max(1,*n)) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPOEQUB", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ *scond = 1.f;
+ *amax = 0.f;
+ return 0;
+ }
+ base = slamch_("B");
+ tmp = -.5f / log(base);
+
+/* Find the minimum and maximum diagonal elements. */
+
+ s[1] = a[a_dim1 + 1];
+ smin = s[1];
+ *amax = s[1];
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ s[i__] = a[i__ + i__ * a_dim1];
+/* Computing MIN */
+ r__1 = smin, r__2 = s[i__];
+ smin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = *amax, r__2 = s[i__];
+ *amax = dmax(r__1,r__2);
+/* L10: */
+ }
+
+ if (smin <= 0.f) {
+
+/* Find the first non-positive diagonal element and return. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (s[i__] <= 0.f) {
+ *info = i__;
+ return 0;
+ }
+/* L20: */
+ }
+ } else {
+
+/* Set the scale factors to the reciprocals */
+/* of the diagonal elements. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = (integer) (tmp * log(s[i__]));
+ s[i__] = pow_ri(&base, &i__2);
+/* L30: */
+ }
+
+/* Compute SCOND = min(S(I)) / max(S(I)). */
+
+ *scond = sqrt(smin) / sqrt(*amax);
+ }
+
+ return 0;
+
+/* End of SPOEQUB */
+
+} /* spoequb_ */
diff --git a/contrib/libs/clapack/sporfs.c b/contrib/libs/clapack/sporfs.c
new file mode 100644
index 0000000000..d0ef95e03e
--- /dev/null
+++ b/contrib/libs/clapack/sporfs.c
@@ -0,0 +1,421 @@
+/* sporfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b12 = -1.f;
+static real c_b14 = 1.f;
+
+/* Subroutine */ int sporfs_(char *uplo, integer *n, integer *nrhs, real *a,
+ integer *lda, real *af, integer *ldaf, real *b, integer *ldb, real *x,
+ integer *ldx, real *ferr, real *berr, real *work, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2, i__3;
+ real r__1, r__2, r__3;
+
+ /* Local variables */
+ integer i__, j, k;
+ real s, xk;
+ integer nz;
+ real eps;
+ integer kase;
+ real safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3], count;
+ logical upper;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), saxpy_(integer *, real *, real *, integer *, real *,
+ integer *), ssymv_(char *, integer *, real *, real *, integer *,
+ real *, integer *, real *, real *, integer *), slacn2_(
+ integer *, real *, real *, integer *, real *, integer *, integer *
+);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real lstres;
+ extern /* Subroutine */ int spotrs_(char *, integer *, integer *, real *,
+ integer *, real *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call SLACN2 in place of SLACON, 7 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPORFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is symmetric positive definite, */
+/* and provides error bounds and backward error estimates for the */
+/* solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The symmetric matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of A contains the upper triangular part */
+/* of the matrix A, and the strictly lower triangular part of A */
+/* is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of A contains the lower triangular part of */
+/* the matrix A, and the strictly upper triangular part of A is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input) REAL array, dimension (LDAF,N) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**T*U or A = L*L**T, as computed by SPOTRF. */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* B (input) REAL array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) REAL array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by SPOTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldx < max(1,*n)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPORFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.f;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X */
+
+ scopy_(n, &b[j * b_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+ ssymv_(uplo, n, &c_b12, &a[a_offset], lda, &x[j * x_dim1 + 1], &c__1,
+ &c_b14, &work[*n + 1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] = (r__1 = b[i__ + j * b_dim1], dabs(r__1));
+/* L30: */
+ }
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ xk = (r__1 = x[k + j * x_dim1], dabs(r__1));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ work[i__] += (r__1 = a[i__ + k * a_dim1], dabs(r__1)) *
+ xk;
+ s += (r__1 = a[i__ + k * a_dim1], dabs(r__1)) * (r__2 = x[
+ i__ + j * x_dim1], dabs(r__2));
+/* L40: */
+ }
+ work[k] = work[k] + (r__1 = a[k + k * a_dim1], dabs(r__1)) *
+ xk + s;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ xk = (r__1 = x[k + j * x_dim1], dabs(r__1));
+ work[k] += (r__1 = a[k + k * a_dim1], dabs(r__1)) * xk;
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ work[i__] += (r__1 = a[i__ + k * a_dim1], dabs(r__1)) *
+ xk;
+ s += (r__1 = a[i__ + k * a_dim1], dabs(r__1)) * (r__2 = x[
+ i__ + j * x_dim1], dabs(r__2));
+/* L60: */
+ }
+ work[k] += s;
+/* L70: */
+ }
+ }
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+/* Computing MAX */
+ r__2 = s, r__3 = (r__1 = work[*n + i__], dabs(r__1)) / work[
+ i__];
+ s = dmax(r__2,r__3);
+ } else {
+/* Computing MAX */
+ r__2 = s, r__3 = ((r__1 = work[*n + i__], dabs(r__1)) + safe1)
+ / (work[i__] + safe1);
+ s = dmax(r__2,r__3);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2.f <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ spotrs_(uplo, n, &c__1, &af[af_offset], ldaf, &work[*n + 1], n,
+ info);
+ saxpy_(n, &c_b14, &work[*n + 1], &c__1, &x[j * x_dim1 + 1], &c__1)
+ ;
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use SLACN2 to estimate the infinity-norm of the matrix */
+/* inv(A) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+ work[i__] = (r__1 = work[*n + i__], dabs(r__1)) + nz * eps *
+ work[i__];
+ } else {
+ work[i__] = (r__1 = work[*n + i__], dabs(r__1)) + nz * eps *
+ work[i__] + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ slacn2_(n, &work[(*n << 1) + 1], &work[*n + 1], &iwork[1], &ferr[j], &
+ kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(A'). */
+
+ spotrs_(uplo, n, &c__1, &af[af_offset], ldaf, &work[*n + 1],
+ n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L110: */
+ }
+ } else if (kase == 2) {
+
+/* Multiply by inv(A)*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L120: */
+ }
+ spotrs_(uplo, n, &c__1, &af[af_offset], ldaf, &work[*n + 1],
+ n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = lstres, r__3 = (r__1 = x[i__ + j * x_dim1], dabs(r__1));
+ lstres = dmax(r__2,r__3);
+/* L130: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of SPORFS */
+
+} /* sporfs_ */
diff --git a/contrib/libs/clapack/sposv.c b/contrib/libs/clapack/sposv.c
new file mode 100644
index 0000000000..02d37eeb33
--- /dev/null
+++ b/contrib/libs/clapack/sposv.c
@@ -0,0 +1,151 @@
+/* sposv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sposv_(char *uplo, integer *n, integer *nrhs, real *a,
+ integer *lda, real *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), spotrf_(
+ char *, integer *, real *, integer *, integer *), spotrs_(
+ char *, integer *, integer *, real *, integer *, real *, integer *
+, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPOSV computes the solution to a real system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N symmetric positive definite matrix and X and B */
+/* are N-by-NRHS matrices. */
+
+/* The Cholesky decomposition is used to factor A as */
+/* A = U**T* U, if UPLO = 'U', or */
+/* A = L * L**T, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is a lower triangular */
+/* matrix. The factored form of A is then used to solve the system of */
+/* equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization A = U**T*U or A = L*L**T. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i of A is not */
+/* positive definite, so the factorization could not be */
+/* completed, and the solution has not been computed. */
+
+/* ===================================================================== */
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPOSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the Cholesky factorization A = U'*U or A = L*L'. */
+
+ spotrf_(uplo, n, &a[a_offset], lda, info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ spotrs_(uplo, n, nrhs, &a[a_offset], lda, &b[b_offset], ldb, info);
+
+ }
+ return 0;
+
+/* End of SPOSV */
+
+} /* sposv_ */
diff --git a/contrib/libs/clapack/sposvx.c b/contrib/libs/clapack/sposvx.c
new file mode 100644
index 0000000000..16ca8115d1
--- /dev/null
+++ b/contrib/libs/clapack/sposvx.c
@@ -0,0 +1,446 @@
+/* sposvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sposvx_(char *fact, char *uplo, integer *n, integer *
+ nrhs, real *a, integer *lda, real *af, integer *ldaf, char *equed,
+ real *s, real *b, integer *ldb, real *x, integer *ldx, real *rcond,
+ real *ferr, real *berr, real *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Local variables */
+ integer i__, j;
+ real amax, smin, smax;
+ extern logical lsame_(char *, char *);
+ real scond, anorm;
+ logical equil, rcequ;
+ extern doublereal slamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ integer infequ;
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *), spocon_(char *, integer *,
+ real *, integer *, real *, real *, real *, integer *, integer *);
+ extern doublereal slansy_(char *, char *, integer *, real *, integer *,
+ real *);
+ real smlnum;
+ extern /* Subroutine */ int slaqsy_(char *, integer *, real *, integer *,
+ real *, real *, real *, char *), spoequ_(integer *
+, real *, integer *, real *, real *, real *, integer *), sporfs_(
+ char *, integer *, integer *, real *, integer *, real *, integer *
+, real *, integer *, real *, integer *, real *, real *, real *,
+ integer *, integer *), spotrf_(char *, integer *, real *,
+ integer *, integer *), spotrs_(char *, integer *, integer
+ *, real *, integer *, real *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPOSVX uses the Cholesky factorization A = U**T*U or A = L*L**T to */
+/* compute the solution to a real system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N symmetric positive definite matrix and X and B */
+/* are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'E', real scaling factors are computed to equilibrate */
+/* the system: */
+/* diag(S) * A * diag(S) * inv(diag(S)) * X = diag(S) * B */
+/* Whether or not the system will be equilibrated depends on the */
+/* scaling of the matrix A, but if equilibration is used, A is */
+/* overwritten by diag(S)*A*diag(S) and B by diag(S)*B. */
+
+/* 2. If FACT = 'N' or 'E', the Cholesky decomposition is used to */
+/* factor the matrix A (after equilibration if FACT = 'E') as */
+/* A = U**T* U, if UPLO = 'U', or */
+/* A = L * L**T, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is a lower triangular */
+/* matrix. */
+
+/* 3. If the leading i-by-i principal minor is not positive definite, */
+/* then the routine returns with INFO = i. Otherwise, the factored */
+/* form of A is used to estimate the condition number of the matrix */
+/* A. If the reciprocal of the condition number is less than machine */
+/* precision, INFO = N+1 is returned as a warning, but the routine */
+/* still goes on to solve for X and compute error bounds as */
+/* described below. */
+
+/* 4. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 5. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* 6. If equilibration was used, the matrix X is premultiplied by */
+/* diag(S) so that it solves the original system before */
+/* equilibration. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of the matrix A is */
+/* supplied on entry, and if not, whether the matrix A should be */
+/* equilibrated before it is factored. */
+/* = 'F': On entry, AF contains the factored form of A. */
+/* If EQUED = 'Y', the matrix A has been equilibrated */
+/* with scaling factors given by S. A and AF will not */
+/* be modified. */
+/* = 'N': The matrix A will be copied to AF and factored. */
+/* = 'E': The matrix A will be equilibrated if necessary, then */
+/* copied to AF and factored. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A, except if FACT = 'F' and */
+/* EQUED = 'Y', then A must contain the equilibrated matrix */
+/* diag(S)*A*diag(S). If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. A is not modified if */
+/* FACT = 'F' or 'N', or if FACT = 'E' and EQUED = 'N' on exit. */
+
+/* On exit, if FACT = 'E' and EQUED = 'Y', A is overwritten by */
+/* diag(S)*A*diag(S). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input or output) REAL array, dimension (LDAF,N) */
+/* If FACT = 'F', then AF is an input argument and on entry */
+/* contains the triangular factor U or L from the Cholesky */
+/* factorization A = U**T*U or A = L*L**T, in the same storage */
+/* format as A. If EQUED .ne. 'N', then AF is the factored form */
+/* of the equilibrated matrix diag(S)*A*diag(S). */
+
+/* If FACT = 'N', then AF is an output argument and on exit */
+/* returns the triangular factor U or L from the Cholesky */
+/* factorization A = U**T*U or A = L*L**T of the original */
+/* matrix A. */
+
+/* If FACT = 'E', then AF is an output argument and on exit */
+/* returns the triangular factor U or L from the Cholesky */
+/* factorization A = U**T*U or A = L*L**T of the equilibrated */
+/* matrix A (see the description of A for the form of the */
+/* equilibrated matrix). */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* EQUED (input or output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration (always true if FACT = 'N'). */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+/* EQUED is an input argument if FACT = 'F'; otherwise, it is an */
+/* output argument. */
+
+/* S (input or output) REAL array, dimension (N) */
+/* The scale factors for A; not accessed if EQUED = 'N'. S is */
+/* an input argument if FACT = 'F'; otherwise, S is an output */
+/* argument. If FACT = 'F' and EQUED = 'Y', each element of S */
+/* must be positive. */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if EQUED = 'N', B is not modified; if EQUED = 'Y', */
+/* B is overwritten by diag(S) * B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) REAL array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X to */
+/* the original system of equations. Note that if EQUED = 'Y', */
+/* A and B are modified on exit, and the solution to the */
+/* equilibrated system is inv(diag(S))*X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) REAL */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A after equilibration (if done). If RCOND is less than the */
+/* machine precision (in particular, if RCOND = 0), the matrix */
+/* is singular to working precision. This condition is */
+/* indicated by a return code of INFO > 0. */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: the leading minor of order i of A is */
+/* not positive definite, so the factorization */
+/* could not be completed, and the solution has not */
+/* been computed. RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ --s;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ equil = lsame_(fact, "E");
+ if (nofact || equil) {
+ *(unsigned char *)equed = 'N';
+ rcequ = FALSE_;
+ } else {
+ rcequ = lsame_(equed, "Y");
+ smlnum = slamch_("Safe minimum");
+ bignum = 1.f / smlnum;
+ }
+
+/* Test the input parameters. */
+
+ if (! nofact && ! equil && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! lsame_(uplo, "U") && ! lsame_(uplo,
+ "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -8;
+ } else if (lsame_(fact, "F") && ! (rcequ || lsame_(
+ equed, "N"))) {
+ *info = -9;
+ } else {
+ if (rcequ) {
+ smin = bignum;
+ smax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ r__1 = smin, r__2 = s[j];
+ smin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = smax, r__2 = s[j];
+ smax = dmax(r__1,r__2);
+/* L10: */
+ }
+ if (smin <= 0.f) {
+ *info = -10;
+ } else if (*n > 0) {
+ scond = dmax(smin,smlnum) / dmin(smax,bignum);
+ } else {
+ scond = 1.f;
+ }
+ }
+ if (*info == 0) {
+ if (*ldb < max(1,*n)) {
+ *info = -12;
+ } else if (*ldx < max(1,*n)) {
+ *info = -14;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPOSVX", &i__1);
+ return 0;
+ }
+
+ if (equil) {
+
+/* Compute row and column scalings to equilibrate the matrix A. */
+
+ spoequ_(n, &a[a_offset], lda, &s[1], &scond, &amax, &infequ);
+ if (infequ == 0) {
+
+/* Equilibrate the matrix. */
+
+ slaqsy_(uplo, n, &a[a_offset], lda, &s[1], &scond, &amax, equed);
+ rcequ = lsame_(equed, "Y");
+ }
+ }
+
+/* Scale the right hand side. */
+
+ if (rcequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = s[i__] * b[i__ + j * b_dim1];
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+
+ if (nofact || equil) {
+
+/* Compute the Cholesky factorization A = U'*U or A = L*L'. */
+
+ slacpy_(uplo, n, n, &a[a_offset], lda, &af[af_offset], ldaf);
+ spotrf_(uplo, n, &af[af_offset], ldaf, info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.f;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = slansy_("1", uplo, n, &a[a_offset], lda, &work[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ spocon_(uplo, n, &af[af_offset], ldaf, &anorm, rcond, &work[1], &iwork[1],
+ info);
+
+/* Compute the solution matrix X. */
+
+ slacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ spotrs_(uplo, n, nrhs, &af[af_offset], ldaf, &x[x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solution and */
+/* compute error bounds and backward error estimates for it. */
+
+ sporfs_(uplo, n, nrhs, &a[a_offset], lda, &af[af_offset], ldaf, &b[
+ b_offset], ldb, &x[x_offset], ldx, &ferr[1], &berr[1], &work[1], &
+ iwork[1], info);
+
+/* Transform the solution matrix X to a solution of the original */
+/* system. */
+
+ if (rcequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[i__ + j * x_dim1] = s[i__] * x[i__ + j * x_dim1];
+/* L40: */
+ }
+/* L50: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= scond;
+/* L60: */
+ }
+ }
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < slamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of SPOSVX */
+
+} /* sposvx_ */
diff --git a/contrib/libs/clapack/spotf2.c b/contrib/libs/clapack/spotf2.c
new file mode 100644
index 0000000000..234977421b
--- /dev/null
+++ b/contrib/libs/clapack/spotf2.c
@@ -0,0 +1,221 @@
+/* spotf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b10 = -1.f;
+static real c_b12 = 1.f;
+
+/* Subroutine */ int spotf2_(char *uplo, integer *n, real *a, integer *lda,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer j;
+ real ajj;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ sgemv_(char *, integer *, integer *, real *, real *, integer *,
+ real *, integer *, real *, real *, integer *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern logical sisnan_(real *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPOTF2 computes the Cholesky factorization of a real symmetric */
+/* positive definite matrix A. */
+
+/* The factorization has the form */
+/* A = U' * U , if UPLO = 'U', or */
+/* A = L * L', if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular. */
+
+/* This is the unblocked version of the algorithm, calling Level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n by n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization A = U'*U or A = L*L'. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, the leading minor of order k is not */
+/* positive definite, and the factorization could not be */
+/* completed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPOTF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute U(J,J) and test for non-positive-definiteness. */
+
+ i__2 = j - 1;
+ ajj = a[j + j * a_dim1] - sdot_(&i__2, &a[j * a_dim1 + 1], &c__1,
+ &a[j * a_dim1 + 1], &c__1);
+ if (ajj <= 0.f || sisnan_(&ajj)) {
+ a[j + j * a_dim1] = ajj;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ a[j + j * a_dim1] = ajj;
+
+/* Compute elements J+1:N of row J. */
+
+ if (j < *n) {
+ i__2 = j - 1;
+ i__3 = *n - j;
+ sgemv_("Transpose", &i__2, &i__3, &c_b10, &a[(j + 1) * a_dim1
+ + 1], lda, &a[j * a_dim1 + 1], &c__1, &c_b12, &a[j + (
+ j + 1) * a_dim1], lda);
+ i__2 = *n - j;
+ r__1 = 1.f / ajj;
+ sscal_(&i__2, &r__1, &a[j + (j + 1) * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute L(J,J) and test for non-positive-definiteness. */
+
+ i__2 = j - 1;
+ ajj = a[j + j * a_dim1] - sdot_(&i__2, &a[j + a_dim1], lda, &a[j
+ + a_dim1], lda);
+ if (ajj <= 0.f || sisnan_(&ajj)) {
+ a[j + j * a_dim1] = ajj;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ a[j + j * a_dim1] = ajj;
+
+/* Compute elements J+1:N of column J. */
+
+ if (j < *n) {
+ i__2 = *n - j;
+ i__3 = j - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b10, &a[j + 1 +
+ a_dim1], lda, &a[j + a_dim1], lda, &c_b12, &a[j + 1 +
+ j * a_dim1], &c__1);
+ i__2 = *n - j;
+ r__1 = 1.f / ajj;
+ sscal_(&i__2, &r__1, &a[j + 1 + j * a_dim1], &c__1);
+ }
+/* L20: */
+ }
+ }
+ goto L40;
+
+L30:
+ *info = j;
+
+L40:
+ return 0;
+
+/* End of SPOTF2 */
+
+} /* spotf2_ */
diff --git a/contrib/libs/clapack/spotrf.c b/contrib/libs/clapack/spotrf.c
new file mode 100644
index 0000000000..ff33e9e07b
--- /dev/null
+++ b/contrib/libs/clapack/spotrf.c
@@ -0,0 +1,243 @@
+/* spotrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static real c_b13 = -1.f;
+static real c_b14 = 1.f;
+
+/* Subroutine */ int spotrf_(char *uplo, integer *n, real *a, integer *lda,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer j, jb, nb;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ logical upper;
+ extern /* Subroutine */ int strsm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+), ssyrk_(char *, char *, integer
+ *, integer *, real *, real *, integer *, real *, real *, integer *
+), spotf2_(char *, integer *, real *, integer *,
+ integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPOTRF computes the Cholesky factorization of a real symmetric */
+/* positive definite matrix A. */
+
+/* The factorization has the form */
+/* A = U**T * U, if UPLO = 'U', or */
+/* A = L * L**T, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular. */
+
+/* This is the block version of the algorithm, calling Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization A = U**T*U or A = L*L**T. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i is not */
+/* positive definite, and the factorization could not be */
+/* completed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPOTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "SPOTRF", uplo, n, &c_n1, &c_n1, &c_n1);
+ if (nb <= 1 || nb >= *n) {
+
+/* Use unblocked code. */
+
+ spotf2_(uplo, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code. */
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Update and factorize the current diagonal block and test */
+/* for non-positive-definiteness. */
+
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - j + 1;
+ jb = min(i__3,i__4);
+ i__3 = j - 1;
+ ssyrk_("Upper", "Transpose", &jb, &i__3, &c_b13, &a[j *
+ a_dim1 + 1], lda, &c_b14, &a[j + j * a_dim1], lda);
+ spotf2_("Upper", &jb, &a[j + j * a_dim1], lda, info);
+ if (*info != 0) {
+ goto L30;
+ }
+ if (j + jb <= *n) {
+
+/* Compute the current block row. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j - 1;
+ sgemm_("Transpose", "No transpose", &jb, &i__3, &i__4, &
+ c_b13, &a[j * a_dim1 + 1], lda, &a[(j + jb) *
+ a_dim1 + 1], lda, &c_b14, &a[j + (j + jb) *
+ a_dim1], lda);
+ i__3 = *n - j - jb + 1;
+ strsm_("Left", "Upper", "Transpose", "Non-unit", &jb, &
+ i__3, &c_b14, &a[j + j * a_dim1], lda, &a[j + (j
+ + jb) * a_dim1], lda);
+ }
+/* L10: */
+ }
+
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (j = 1; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Update and factorize the current diagonal block and test */
+/* for non-positive-definiteness. */
+
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - j + 1;
+ jb = min(i__3,i__4);
+ i__3 = j - 1;
+ ssyrk_("Lower", "No transpose", &jb, &i__3, &c_b13, &a[j +
+ a_dim1], lda, &c_b14, &a[j + j * a_dim1], lda);
+ spotf2_("Lower", &jb, &a[j + j * a_dim1], lda, info);
+ if (*info != 0) {
+ goto L30;
+ }
+ if (j + jb <= *n) {
+
+/* Compute the current block column. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j - 1;
+ sgemm_("No transpose", "Transpose", &i__3, &jb, &i__4, &
+ c_b13, &a[j + jb + a_dim1], lda, &a[j + a_dim1],
+ lda, &c_b14, &a[j + jb + j * a_dim1], lda);
+ i__3 = *n - j - jb + 1;
+ strsm_("Right", "Lower", "Transpose", "Non-unit", &i__3, &
+ jb, &c_b14, &a[j + j * a_dim1], lda, &a[j + jb +
+ j * a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+ }
+ goto L40;
+
+L30:
+ *info = *info + j - 1;
+
+L40:
+ return 0;
+
+/* End of SPOTRF */
+
+} /* spotrf_ */
diff --git a/contrib/libs/clapack/spotri.c b/contrib/libs/clapack/spotri.c
new file mode 100644
index 0000000000..26698387a5
--- /dev/null
+++ b/contrib/libs/clapack/spotri.c
@@ -0,0 +1,124 @@
+/* spotri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int spotri_(char *uplo, integer *n, real *a, integer *lda,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), slauum_(
+ char *, integer *, real *, integer *, integer *), strtri_(
+ char *, char *, integer *, real *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPOTRI computes the inverse of a real symmetric positive definite */
+/* matrix A using the Cholesky factorization A = U**T*U or A = L*L**T */
+/* computed by SPOTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the triangular factor U or L from the Cholesky */
+/* factorization A = U**T*U or A = L*L**T, as computed by */
+/* SPOTRF. */
+/* On exit, the upper or lower triangle of the (symmetric) */
+/* inverse of A, overwriting the input factor U or L. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the (i,i) element of the factor U or L is */
+/* zero, and the inverse could not be computed. */
+
+/* ===================================================================== */
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPOTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Invert the triangular Cholesky factor U or L. */
+
+ strtri_(uplo, "Non-unit", n, &a[a_offset], lda, info);
+ if (*info > 0) {
+ return 0;
+ }
+
+/* Form inv(U)*inv(U)' or inv(L)'*inv(L). */
+
+ slauum_(uplo, n, &a[a_offset], lda, info);
+
+ return 0;
+
+/* End of SPOTRI */
+
+} /* spotri_ */
diff --git a/contrib/libs/clapack/spotrs.c b/contrib/libs/clapack/spotrs.c
new file mode 100644
index 0000000000..ba0d324ed9
--- /dev/null
+++ b/contrib/libs/clapack/spotrs.c
@@ -0,0 +1,164 @@
+/* spotrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b9 = 1.f;
+
+/* Subroutine */ int spotrs_(char *uplo, integer *n, integer *nrhs, real *a,
+ integer *lda, real *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int strsm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPOTRS solves a system of linear equations A*X = B with a symmetric */
+/* positive definite matrix A using the Cholesky factorization */
+/* A = U**T*U or A = L*L**T computed by SPOTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**T*U or A = L*L**T, as computed by SPOTRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPOTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Solve A*X = B where A = U'*U. */
+
+/* Solve U'*X = B, overwriting B with X. */
+
+ strsm_("Left", "Upper", "Transpose", "Non-unit", n, nrhs, &c_b9, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+/* Solve U*X = B, overwriting B with X. */
+
+ strsm_("Left", "Upper", "No transpose", "Non-unit", n, nrhs, &c_b9, &
+ a[a_offset], lda, &b[b_offset], ldb);
+ } else {
+
+/* Solve A*X = B where A = L*L'. */
+
+/* Solve L*X = B, overwriting B with X. */
+
+ strsm_("Left", "Lower", "No transpose", "Non-unit", n, nrhs, &c_b9, &
+ a[a_offset], lda, &b[b_offset], ldb);
+
+/* Solve L'*X = B, overwriting B with X. */
+
+ strsm_("Left", "Lower", "Transpose", "Non-unit", n, nrhs, &c_b9, &a[
+ a_offset], lda, &b[b_offset], ldb);
+ }
+
+ return 0;
+
+/* End of SPOTRS */
+
+} /* spotrs_ */
diff --git a/contrib/libs/clapack/sppcon.c b/contrib/libs/clapack/sppcon.c
new file mode 100644
index 0000000000..8e3d0c1b0c
--- /dev/null
+++ b/contrib/libs/clapack/sppcon.c
@@ -0,0 +1,213 @@
+/* sppcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sppcon_(char *uplo, integer *n, real *ap, real *anorm,
+ real *rcond, real *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1;
+
+ /* Local variables */
+ integer ix, kase;
+ real scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int srscl_(integer *, real *, real *, integer *);
+ logical upper;
+ extern /* Subroutine */ int slacn2_(integer *, real *, real *, integer *,
+ real *, integer *, integer *);
+ real scalel;
+ extern doublereal slamch_(char *);
+ real scaleu;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ real ainvnm;
+ char normin[1];
+ extern /* Subroutine */ int slatps_(char *, char *, char *, char *,
+ integer *, real *, real *, real *, real *, integer *);
+ real smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call SLACN2 in place of SLACON, 7 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPPCON estimates the reciprocal of the condition number (in the */
+/* 1-norm) of a real symmetric positive definite packed matrix using */
+/* the Cholesky factorization A = U**T*U or A = L*L**T computed by */
+/* SPPTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) REAL array, dimension (N*(N+1)/2) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**T*U or A = L*L**T, packed columnwise in a linear */
+/* array. The j-th column of U or L is stored in the array AP */
+/* as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = U(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = L(i,j) for j<=i<=n. */
+
+/* ANORM (input) REAL */
+/* The 1-norm (or infinity-norm) of the symmetric matrix A. */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --iwork;
+ --work;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*anorm < 0.f) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPPCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.f;
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ } else if (*anorm == 0.f) {
+ return 0;
+ }
+
+ smlnum = slamch_("Safe minimum");
+
+/* Estimate the 1-norm of the inverse. */
+
+ kase = 0;
+ *(unsigned char *)normin = 'N';
+L10:
+ slacn2_(n, &work[*n + 1], &work[1], &iwork[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (upper) {
+
+/* Multiply by inv(U'). */
+
+ slatps_("Upper", "Transpose", "Non-unit", normin, n, &ap[1], &
+ work[1], &scalel, &work[(*n << 1) + 1], info);
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by inv(U). */
+
+ slatps_("Upper", "No transpose", "Non-unit", normin, n, &ap[1], &
+ work[1], &scaleu, &work[(*n << 1) + 1], info);
+ } else {
+
+/* Multiply by inv(L). */
+
+ slatps_("Lower", "No transpose", "Non-unit", normin, n, &ap[1], &
+ work[1], &scalel, &work[(*n << 1) + 1], info);
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by inv(L'). */
+
+ slatps_("Lower", "Transpose", "Non-unit", normin, n, &ap[1], &
+ work[1], &scaleu, &work[(*n << 1) + 1], info);
+ }
+
+/* Multiply by 1/SCALE if doing so will not cause overflow. */
+
+ scale = scalel * scaleu;
+ if (scale != 1.f) {
+ ix = isamax_(n, &work[1], &c__1);
+ if (scale < (r__1 = work[ix], dabs(r__1)) * smlnum || scale ==
+ 0.f) {
+ goto L20;
+ }
+ srscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / ainvnm / *anorm;
+ }
+
+L20:
+ return 0;
+
+/* End of SPPCON */
+
+} /* sppcon_ */
diff --git a/contrib/libs/clapack/sppequ.c b/contrib/libs/clapack/sppequ.c
new file mode 100644
index 0000000000..44142dd91d
--- /dev/null
+++ b/contrib/libs/clapack/sppequ.c
@@ -0,0 +1,208 @@
+/* sppequ.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sppequ_(char *uplo, integer *n, real *ap, real *s, real *
+ scond, real *amax, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, jj;
+ real smin;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPPEQU computes row and column scalings intended to equilibrate a */
+/* symmetric positive definite matrix A in packed storage and reduce */
+/* its condition number (with respect to the two-norm). S contains the */
+/* scale factors, S(i)=1/sqrt(A(i,i)), chosen so that the scaled matrix */
+/* B with elements B(i,j)=S(i)*A(i,j)*S(j) has ones on the diagonal. */
+/* This choice of S puts the condition number of B within a factor N of */
+/* the smallest possible condition number over all possible diagonal */
+/* scalings. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) REAL array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the symmetric matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* S (output) REAL array, dimension (N) */
+/* If INFO = 0, S contains the scale factors for A. */
+
+/* SCOND (output) REAL */
+/* If INFO = 0, S contains the ratio of the smallest S(i) to */
+/* the largest S(i). If SCOND >= 0.1 and AMAX is neither too */
+/* large nor too small, it is not worth scaling by S. */
+
+/* AMAX (output) REAL */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element is nonpositive. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --s;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPPEQU", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *scond = 1.f;
+ *amax = 0.f;
+ return 0;
+ }
+
+/* Initialize SMIN and AMAX. */
+
+ s[1] = ap[1];
+ smin = s[1];
+ *amax = s[1];
+
+ if (upper) {
+
+/* UPLO = 'U': Upper triangle of A is stored. */
+/* Find the minimum and maximum diagonal elements. */
+
+ jj = 1;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ jj += i__;
+ s[i__] = ap[jj];
+/* Computing MIN */
+ r__1 = smin, r__2 = s[i__];
+ smin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = *amax, r__2 = s[i__];
+ *amax = dmax(r__1,r__2);
+/* L10: */
+ }
+
+ } else {
+
+/* UPLO = 'L': Lower triangle of A is stored. */
+/* Find the minimum and maximum diagonal elements. */
+
+ jj = 1;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ jj = jj + *n - i__ + 2;
+ s[i__] = ap[jj];
+/* Computing MIN */
+ r__1 = smin, r__2 = s[i__];
+ smin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = *amax, r__2 = s[i__];
+ *amax = dmax(r__1,r__2);
+/* L20: */
+ }
+ }
+
+ if (smin <= 0.f) {
+
+/* Find the first non-positive diagonal element and return. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (s[i__] <= 0.f) {
+ *info = i__;
+ return 0;
+ }
+/* L30: */
+ }
+ } else {
+
+/* Set the scale factors to the reciprocals */
+/* of the diagonal elements. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ s[i__] = 1.f / sqrt(s[i__]);
+/* L40: */
+ }
+
+/* Compute SCOND = min(S(I)) / max(S(I)) */
+
+ *scond = sqrt(smin) / sqrt(*amax);
+ }
+ return 0;
+
+/* End of SPPEQU */
+
+} /* sppequ_ */
diff --git a/contrib/libs/clapack/spprfs.c b/contrib/libs/clapack/spprfs.c
new file mode 100644
index 0000000000..b12391f2b9
--- /dev/null
+++ b/contrib/libs/clapack/spprfs.c
@@ -0,0 +1,408 @@
+/* spprfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b12 = -1.f;
+static real c_b14 = 1.f;
+
+/* Subroutine */ int spprfs_(char *uplo, integer *n, integer *nrhs, real *ap,
+ real *afp, real *b, integer *ldb, real *x, integer *ldx, real *ferr,
+ real *berr, real *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3;
+ real r__1, r__2, r__3;
+
+ /* Local variables */
+ integer i__, j, k;
+ real s;
+ integer ik, kk;
+ real xk;
+ integer nz;
+ real eps;
+ integer kase;
+ real safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3], count;
+ logical upper;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), saxpy_(integer *, real *, real *, integer *, real *,
+ integer *), sspmv_(char *, integer *, real *, real *, real *,
+ integer *, real *, real *, integer *), slacn2_(integer *,
+ real *, real *, integer *, real *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real lstres;
+ extern /* Subroutine */ int spptrs_(char *, integer *, integer *, real *,
+ real *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call SLACN2 in place of SLACON, 7 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPPRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is symmetric positive definite */
+/* and packed, and provides error bounds and backward error estimates */
+/* for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AP (input) REAL array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the symmetric matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* AFP (input) REAL array, dimension (N*(N+1)/2) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**T*U or A = L*L**T, as computed by SPPTRF/CPPTRF, */
+/* packed columnwise in a linear array in the same format as A */
+/* (see AP). */
+
+/* B (input) REAL array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) REAL array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by SPPTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --afp;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ } else if (*ldx < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPPRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.f;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X */
+
+ scopy_(n, &b[j * b_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+ sspmv_(uplo, n, &c_b12, &ap[1], &x[j * x_dim1 + 1], &c__1, &c_b14, &
+ work[*n + 1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] = (r__1 = b[i__ + j * b_dim1], dabs(r__1));
+/* L30: */
+ }
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ kk = 1;
+ if (upper) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ xk = (r__1 = x[k + j * x_dim1], dabs(r__1));
+ ik = kk;
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ work[i__] += (r__1 = ap[ik], dabs(r__1)) * xk;
+ s += (r__1 = ap[ik], dabs(r__1)) * (r__2 = x[i__ + j *
+ x_dim1], dabs(r__2));
+ ++ik;
+/* L40: */
+ }
+ work[k] = work[k] + (r__1 = ap[kk + k - 1], dabs(r__1)) * xk
+ + s;
+ kk += k;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ xk = (r__1 = x[k + j * x_dim1], dabs(r__1));
+ work[k] += (r__1 = ap[kk], dabs(r__1)) * xk;
+ ik = kk + 1;
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ work[i__] += (r__1 = ap[ik], dabs(r__1)) * xk;
+ s += (r__1 = ap[ik], dabs(r__1)) * (r__2 = x[i__ + j *
+ x_dim1], dabs(r__2));
+ ++ik;
+/* L60: */
+ }
+ work[k] += s;
+ kk += *n - k + 1;
+/* L70: */
+ }
+ }
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+/* Computing MAX */
+ r__2 = s, r__3 = (r__1 = work[*n + i__], dabs(r__1)) / work[
+ i__];
+ s = dmax(r__2,r__3);
+ } else {
+/* Computing MAX */
+ r__2 = s, r__3 = ((r__1 = work[*n + i__], dabs(r__1)) + safe1)
+ / (work[i__] + safe1);
+ s = dmax(r__2,r__3);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2.f <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ spptrs_(uplo, n, &c__1, &afp[1], &work[*n + 1], n, info);
+ saxpy_(n, &c_b14, &work[*n + 1], &c__1, &x[j * x_dim1 + 1], &c__1)
+ ;
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use SLACN2 to estimate the infinity-norm of the matrix */
+/* inv(A) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+ work[i__] = (r__1 = work[*n + i__], dabs(r__1)) + nz * eps *
+ work[i__];
+ } else {
+ work[i__] = (r__1 = work[*n + i__], dabs(r__1)) + nz * eps *
+ work[i__] + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ slacn2_(n, &work[(*n << 1) + 1], &work[*n + 1], &iwork[1], &ferr[j], &
+ kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(A'). */
+
+ spptrs_(uplo, n, &c__1, &afp[1], &work[*n + 1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L110: */
+ }
+ } else if (kase == 2) {
+
+/* Multiply by inv(A)*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L120: */
+ }
+ spptrs_(uplo, n, &c__1, &afp[1], &work[*n + 1], n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = lstres, r__3 = (r__1 = x[i__ + j * x_dim1], dabs(r__1));
+ lstres = dmax(r__2,r__3);
+/* L130: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of SPPRFS */
+
+} /* spprfs_ */
diff --git a/contrib/libs/clapack/sppsv.c b/contrib/libs/clapack/sppsv.c
new file mode 100644
index 0000000000..cbeb78b0d6
--- /dev/null
+++ b/contrib/libs/clapack/sppsv.c
@@ -0,0 +1,160 @@
+/* sppsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sppsv_(char *uplo, integer *n, integer *nrhs, real *ap,
+ real *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), spptrf_(
+ char *, integer *, real *, integer *), spptrs_(char *,
+ integer *, integer *, real *, real *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPPSV computes the solution to a real system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N symmetric positive definite matrix stored in */
+/* packed format and X and B are N-by-NRHS matrices. */
+
+/* The Cholesky decomposition is used to factor A as */
+/* A = U**T* U, if UPLO = 'U', or */
+/* A = L * L**T, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is a lower triangular */
+/* matrix. The factored form of A is then used to solve the system of */
+/* equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AP (input/output) REAL array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization A = U**T*U or A = L*L**T, in the same storage */
+/* format as A. */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i of A is not */
+/* positive definite, so the factorization could not be */
+/* completed, and the solution has not been computed. */
+
+/* Further Details */
+/* =============== */
+
+/* The packed storage scheme is illustrated by the following example */
+/* when N = 4, UPLO = 'U': */
+
+/* Two-dimensional storage of the symmetric matrix A: */
+
+/* a11 a12 a13 a14 */
+/* a22 a23 a24 */
+/* a33 a34 (aij = conjg(aji)) */
+/* a44 */
+
+/* Packed storage of the upper triangle of A: */
+
+/* AP = [ a11, a12, a22, a13, a23, a33, a14, a24, a34, a44 ] */
+
+/* ===================================================================== */
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPPSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the Cholesky factorization A = U'*U or A = L*L'. */
+
+ spptrf_(uplo, n, &ap[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ spptrs_(uplo, n, nrhs, &ap[1], &b[b_offset], ldb, info);
+
+ }
+ return 0;
+
+/* End of SPPSV */
+
+} /* sppsv_ */
diff --git a/contrib/libs/clapack/sppsvx.c b/contrib/libs/clapack/sppsvx.c
new file mode 100644
index 0000000000..15f5d6ed84
--- /dev/null
+++ b/contrib/libs/clapack/sppsvx.c
@@ -0,0 +1,452 @@
+/* sppsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sppsvx_(char *fact, char *uplo, integer *n, integer *
+ nrhs, real *ap, real *afp, char *equed, real *s, real *b, integer *
+ ldb, real *x, integer *ldx, real *rcond, real *ferr, real *berr, real
+ *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Local variables */
+ integer i__, j;
+ real amax, smin, smax;
+ extern logical lsame_(char *, char *);
+ real scond, anorm;
+ logical equil, rcequ;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ extern doublereal slamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ integer infequ;
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *);
+ extern doublereal slansp_(char *, char *, integer *, real *, real *);
+ extern /* Subroutine */ int sppcon_(char *, integer *, real *, real *,
+ real *, real *, integer *, integer *), slaqsp_(char *,
+ integer *, real *, real *, real *, real *, char *)
+ ;
+ real smlnum;
+ extern /* Subroutine */ int sppequ_(char *, integer *, real *, real *,
+ real *, real *, integer *), spprfs_(char *, integer *,
+ integer *, real *, real *, real *, integer *, real *, integer *,
+ real *, real *, real *, integer *, integer *), spptrf_(
+ char *, integer *, real *, integer *), spptrs_(char *,
+ integer *, integer *, real *, real *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPPSVX uses the Cholesky factorization A = U**T*U or A = L*L**T to */
+/* compute the solution to a real system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N symmetric positive definite matrix stored in */
+/* packed format and X and B are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'E', real scaling factors are computed to equilibrate */
+/* the system: */
+/* diag(S) * A * diag(S) * inv(diag(S)) * X = diag(S) * B */
+/* Whether or not the system will be equilibrated depends on the */
+/* scaling of the matrix A, but if equilibration is used, A is */
+/* overwritten by diag(S)*A*diag(S) and B by diag(S)*B. */
+
+/* 2. If FACT = 'N' or 'E', the Cholesky decomposition is used to */
+/* factor the matrix A (after equilibration if FACT = 'E') as */
+/* A = U**T* U, if UPLO = 'U', or */
+/* A = L * L**T, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is a lower triangular */
+/* matrix. */
+
+/* 3. If the leading i-by-i principal minor is not positive definite, */
+/* then the routine returns with INFO = i. Otherwise, the factored */
+/* form of A is used to estimate the condition number of the matrix */
+/* A. If the reciprocal of the condition number is less than machine */
+/* precision, INFO = N+1 is returned as a warning, but the routine */
+/* still goes on to solve for X and compute error bounds as */
+/* described below. */
+
+/* 4. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 5. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* 6. If equilibration was used, the matrix X is premultiplied by */
+/* diag(S) so that it solves the original system before */
+/* equilibration. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of the matrix A is */
+/* supplied on entry, and if not, whether the matrix A should be */
+/* equilibrated before it is factored. */
+/* = 'F': On entry, AFP contains the factored form of A. */
+/* If EQUED = 'Y', the matrix A has been equilibrated */
+/* with scaling factors given by S. AP and AFP will not */
+/* be modified. */
+/* = 'N': The matrix A will be copied to AFP and factored. */
+/* = 'E': The matrix A will be equilibrated if necessary, then */
+/* copied to AFP and factored. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AP (input/output) REAL array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array, except if FACT = 'F' */
+/* and EQUED = 'Y', then A must contain the equilibrated matrix */
+/* diag(S)*A*diag(S). The j-th column of A is stored in the */
+/* array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. A is not modified if */
+/* FACT = 'F' or 'N', or if FACT = 'E' and EQUED = 'N' on exit. */
+
+/* On exit, if FACT = 'E' and EQUED = 'Y', A is overwritten by */
+/* diag(S)*A*diag(S). */
+
+/* AFP (input or output) REAL array, dimension */
+/* (N*(N+1)/2) */
+/* If FACT = 'F', then AFP is an input argument and on entry */
+/* contains the triangular factor U or L from the Cholesky */
+/* factorization A = U'*U or A = L*L', in the same storage */
+/* format as A. If EQUED .ne. 'N', then AFP is the factored */
+/* form of the equilibrated matrix A. */
+
+/* If FACT = 'N', then AFP is an output argument and on exit */
+/* returns the triangular factor U or L from the Cholesky */
+/* factorization A = U'*U or A = L*L' of the original matrix A. */
+
+/* If FACT = 'E', then AFP is an output argument and on exit */
+/* returns the triangular factor U or L from the Cholesky */
+/* factorization A = U'*U or A = L*L' of the equilibrated */
+/* matrix A (see the description of AP for the form of the */
+/* equilibrated matrix). */
+
+/* EQUED (input or output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration (always true if FACT = 'N'). */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+/* EQUED is an input argument if FACT = 'F'; otherwise, it is an */
+/* output argument. */
+
+/* S (input or output) REAL array, dimension (N) */
+/* The scale factors for A; not accessed if EQUED = 'N'. S is */
+/* an input argument if FACT = 'F'; otherwise, S is an output */
+/* argument. If FACT = 'F' and EQUED = 'Y', each element of S */
+/* must be positive. */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if EQUED = 'N', B is not modified; if EQUED = 'Y', */
+/* B is overwritten by diag(S) * B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) REAL array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X to */
+/* the original system of equations. Note that if EQUED = 'Y', */
+/* A and B are modified on exit, and the solution to the */
+/* equilibrated system is inv(diag(S))*X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) REAL */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A after equilibration (if done). If RCOND is less than the */
+/* machine precision (in particular, if RCOND = 0), the matrix */
+/* is singular to working precision. This condition is */
+/* indicated by a return code of INFO > 0. */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: the leading minor of order i of A is */
+/* not positive definite, so the factorization */
+/* could not be completed, and the solution has not */
+/* been computed. RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* Further Details */
+/* =============== */
+
+/* The packed storage scheme is illustrated by the following example */
+/* when N = 4, UPLO = 'U': */
+
+/* Two-dimensional storage of the symmetric matrix A: */
+
+/* a11 a12 a13 a14 */
+/* a22 a23 a24 */
+/* a33 a34 (aij = conjg(aji)) */
+/* a44 */
+
+/* Packed storage of the upper triangle of A: */
+
+/* AP = [ a11, a12, a22, a13, a23, a33, a14, a24, a34, a44 ] */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --ap;
+ --afp;
+ --s;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ equil = lsame_(fact, "E");
+ if (nofact || equil) {
+ *(unsigned char *)equed = 'N';
+ rcequ = FALSE_;
+ } else {
+ rcequ = lsame_(equed, "Y");
+ smlnum = slamch_("Safe minimum");
+ bignum = 1.f / smlnum;
+ }
+
+/* Test the input parameters. */
+
+ if (! nofact && ! equil && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! lsame_(uplo, "U") && ! lsame_(uplo,
+ "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (lsame_(fact, "F") && ! (rcequ || lsame_(
+ equed, "N"))) {
+ *info = -7;
+ } else {
+ if (rcequ) {
+ smin = bignum;
+ smax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ r__1 = smin, r__2 = s[j];
+ smin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = smax, r__2 = s[j];
+ smax = dmax(r__1,r__2);
+/* L10: */
+ }
+ if (smin <= 0.f) {
+ *info = -8;
+ } else if (*n > 0) {
+ scond = dmax(smin,smlnum) / dmin(smax,bignum);
+ } else {
+ scond = 1.f;
+ }
+ }
+ if (*info == 0) {
+ if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldx < max(1,*n)) {
+ *info = -12;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPPSVX", &i__1);
+ return 0;
+ }
+
+ if (equil) {
+
+/* Compute row and column scalings to equilibrate the matrix A. */
+
+ sppequ_(uplo, n, &ap[1], &s[1], &scond, &amax, &infequ);
+ if (infequ == 0) {
+
+/* Equilibrate the matrix. */
+
+ slaqsp_(uplo, n, &ap[1], &s[1], &scond, &amax, equed);
+ rcequ = lsame_(equed, "Y");
+ }
+ }
+
+/* Scale the right-hand side. */
+
+ if (rcequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = s[i__] * b[i__ + j * b_dim1];
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+
+ if (nofact || equil) {
+
+/* Compute the Cholesky factorization A = U'*U or A = L*L'. */
+
+ i__1 = *n * (*n + 1) / 2;
+ scopy_(&i__1, &ap[1], &c__1, &afp[1], &c__1);
+ spptrf_(uplo, n, &afp[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.f;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = slansp_("I", uplo, n, &ap[1], &work[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ sppcon_(uplo, n, &afp[1], &anorm, rcond, &work[1], &iwork[1], info);
+
+/* Compute the solution matrix X. */
+
+ slacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ spptrs_(uplo, n, nrhs, &afp[1], &x[x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solution and */
+/* compute error bounds and backward error estimates for it. */
+
+ spprfs_(uplo, n, nrhs, &ap[1], &afp[1], &b[b_offset], ldb, &x[x_offset],
+ ldx, &ferr[1], &berr[1], &work[1], &iwork[1], info);
+
+/* Transform the solution matrix X to a solution of the original */
+/* system. */
+
+ if (rcequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[i__ + j * x_dim1] = s[i__] * x[i__ + j * x_dim1];
+/* L40: */
+ }
+/* L50: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= scond;
+/* L60: */
+ }
+ }
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < slamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of SPPSVX */
+
+} /* sppsvx_ */
diff --git a/contrib/libs/clapack/spptrf.c b/contrib/libs/clapack/spptrf.c
new file mode 100644
index 0000000000..997337b26c
--- /dev/null
+++ b/contrib/libs/clapack/spptrf.c
@@ -0,0 +1,221 @@
+/* spptrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b16 = -1.f;
+
+/* Subroutine */ int spptrf_(char *uplo, integer *n, real *ap, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer j, jc, jj;
+ real ajj;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ extern /* Subroutine */ int sspr_(char *, integer *, real *, real *,
+ integer *, real *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ logical upper;
+ extern /* Subroutine */ int stpsv_(char *, char *, char *, integer *,
+ real *, real *, integer *), xerbla_(char *
+, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPPTRF computes the Cholesky factorization of a real symmetric */
+/* positive definite matrix A stored in packed format. */
+
+/* The factorization has the form */
+/* A = U**T * U, if UPLO = 'U', or */
+/* A = L * L**T, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) REAL array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. */
+
+/* On exit, if INFO = 0, the triangular factor U or L from the */
+/* Cholesky factorization A = U**T*U or A = L*L**T, in the same */
+/* storage format as A. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i is not */
+/* positive definite, and the factorization could not be */
+/* completed. */
+
+/* Further Details */
+/* ======= ======= */
+
+/* The packed storage scheme is illustrated by the following example */
+/* when N = 4, UPLO = 'U': */
+
+/* Two-dimensional storage of the symmetric matrix A: */
+
+/* a11 a12 a13 a14 */
+/* a22 a23 a24 */
+/* a33 a34 (aij = aji) */
+/* a44 */
+
+/* Packed storage of the upper triangle of A: */
+
+/* AP = [ a11, a12, a22, a13, a23, a33, a14, a24, a34, a44 ] */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPPTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ jj = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jc = jj + 1;
+ jj += j;
+
+/* Compute elements 1:J-1 of column J. */
+
+ if (j > 1) {
+ i__2 = j - 1;
+ stpsv_("Upper", "Transpose", "Non-unit", &i__2, &ap[1], &ap[
+ jc], &c__1);
+ }
+
+/* Compute U(J,J) and test for non-positive-definiteness. */
+
+ i__2 = j - 1;
+ ajj = ap[jj] - sdot_(&i__2, &ap[jc], &c__1, &ap[jc], &c__1);
+ if (ajj <= 0.f) {
+ ap[jj] = ajj;
+ goto L30;
+ }
+ ap[jj] = sqrt(ajj);
+/* L10: */
+ }
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ jj = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute L(J,J) and test for non-positive-definiteness. */
+
+ ajj = ap[jj];
+ if (ajj <= 0.f) {
+ ap[jj] = ajj;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ ap[jj] = ajj;
+
+/* Compute elements J+1:N of column J and update the trailing */
+/* submatrix. */
+
+ if (j < *n) {
+ i__2 = *n - j;
+ r__1 = 1.f / ajj;
+ sscal_(&i__2, &r__1, &ap[jj + 1], &c__1);
+ i__2 = *n - j;
+ sspr_("Lower", &i__2, &c_b16, &ap[jj + 1], &c__1, &ap[jj + *n
+ - j + 1]);
+ jj = jj + *n - j + 1;
+ }
+/* L20: */
+ }
+ }
+ goto L40;
+
+L30:
+ *info = j;
+
+L40:
+ return 0;
+
+/* End of SPPTRF */
+
+} /* spptrf_ */
diff --git a/contrib/libs/clapack/spptri.c b/contrib/libs/clapack/spptri.c
new file mode 100644
index 0000000000..c4e7e95c97
--- /dev/null
+++ b/contrib/libs/clapack/spptri.c
@@ -0,0 +1,171 @@
+/* spptri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b8 = 1.f;
+static integer c__1 = 1;
+
+/* Subroutine */ int spptri_(char *uplo, integer *n, real *ap, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer j, jc, jj;
+ real ajj;
+ integer jjn;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ extern /* Subroutine */ int sspr_(char *, integer *, real *, real *,
+ integer *, real *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ logical upper;
+ extern /* Subroutine */ int stpmv_(char *, char *, char *, integer *,
+ real *, real *, integer *), xerbla_(char *
+, integer *), stptri_(char *, char *, integer *, real *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPPTRI computes the inverse of a real symmetric positive definite */
+/* matrix A using the Cholesky factorization A = U**T*U or A = L*L**T */
+/* computed by SPPTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangular factor is stored in AP; */
+/* = 'L': Lower triangular factor is stored in AP. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) REAL array, dimension (N*(N+1)/2) */
+/* On entry, the triangular factor U or L from the Cholesky */
+/* factorization A = U**T*U or A = L*L**T, packed columnwise as */
+/* a linear array. The j-th column of U or L is stored in the */
+/* array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = U(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = L(i,j) for j<=i<=n. */
+
+/* On exit, the upper or lower triangle of the (symmetric) */
+/* inverse of A, overwriting the input factor U or L. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the (i,i) element of the factor U or L is */
+/* zero, and the inverse could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPPTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Invert the triangular Cholesky factor U or L. */
+
+ stptri_(uplo, "Non-unit", n, &ap[1], info);
+ if (*info > 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute the product inv(U) * inv(U)'. */
+
+ jj = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jc = jj + 1;
+ jj += j;
+ if (j > 1) {
+ i__2 = j - 1;
+ sspr_("Upper", &i__2, &c_b8, &ap[jc], &c__1, &ap[1]);
+ }
+ ajj = ap[jj];
+ sscal_(&j, &ajj, &ap[jc], &c__1);
+/* L10: */
+ }
+
+ } else {
+
+/* Compute the product inv(L)' * inv(L). */
+
+ jj = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jjn = jj + *n - j + 1;
+ i__2 = *n - j + 1;
+ ap[jj] = sdot_(&i__2, &ap[jj], &c__1, &ap[jj], &c__1);
+ if (j < *n) {
+ i__2 = *n - j;
+ stpmv_("Lower", "Transpose", "Non-unit", &i__2, &ap[jjn], &ap[
+ jj + 1], &c__1);
+ }
+ jj = jjn;
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of SPPTRI */
+
+} /* spptri_ */
diff --git a/contrib/libs/clapack/spptrs.c b/contrib/libs/clapack/spptrs.c
new file mode 100644
index 0000000000..6d0d30e872
--- /dev/null
+++ b/contrib/libs/clapack/spptrs.c
@@ -0,0 +1,170 @@
+/* spptrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int spptrs_(char *uplo, integer *n, integer *nrhs, real *ap,
+ real *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ integer i__;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int stpsv_(char *, char *, char *, integer *,
+ real *, real *, integer *), xerbla_(char *
+, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPPTRS solves a system of linear equations A*X = B with a symmetric */
+/* positive definite matrix A in packed storage using the Cholesky */
+/* factorization A = U**T*U or A = L*L**T computed by SPPTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AP (input) REAL array, dimension (N*(N+1)/2) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**T*U or A = L*L**T, packed columnwise in a linear */
+/* array. The j-th column of U or L is stored in the array AP */
+/* as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = U(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = L(i,j) for j<=i<=n. */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPPTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Solve A*X = B where A = U'*U. */
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Solve U'*X = B, overwriting B with X. */
+
+ stpsv_("Upper", "Transpose", "Non-unit", n, &ap[1], &b[i__ *
+ b_dim1 + 1], &c__1);
+
+/* Solve U*X = B, overwriting B with X. */
+
+ stpsv_("Upper", "No transpose", "Non-unit", n, &ap[1], &b[i__ *
+ b_dim1 + 1], &c__1);
+/* L10: */
+ }
+ } else {
+
+/* Solve A*X = B where A = L*L'. */
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Solve L*Y = B, overwriting B with X. */
+
+ stpsv_("Lower", "No transpose", "Non-unit", n, &ap[1], &b[i__ *
+ b_dim1 + 1], &c__1);
+
+/* Solve L'*X = Y, overwriting B with X. */
+
+ stpsv_("Lower", "Transpose", "Non-unit", n, &ap[1], &b[i__ *
+ b_dim1 + 1], &c__1);
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of SPPTRS */
+
+} /* spptrs_ */
diff --git a/contrib/libs/clapack/spstf2.c b/contrib/libs/clapack/spstf2.c
new file mode 100644
index 0000000000..0eab03ee9b
--- /dev/null
+++ b/contrib/libs/clapack/spstf2.c
@@ -0,0 +1,392 @@
+/* spstf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b16 = -1.f;
+static real c_b18 = 1.f;
+
+/* Subroutine */ int spstf2_(char *uplo, integer *n, real *a, integer *lda,
+ integer *piv, integer *rank, real *tol, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, maxlocval;
+ real ajj;
+ integer pvt;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ integer itemp;
+ extern /* Subroutine */ int sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *);
+ real stemp;
+ logical upper;
+ extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
+ integer *);
+ real sstop;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern logical sisnan_(real *);
+ extern integer smaxloc_(real *, integer *);
+
+
+/* -- LAPACK PROTOTYPE routine (version 3.2) -- */
+/* Craig Lucas, University of Manchester / NAG Ltd. */
+/* October, 2008 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPSTF2 computes the Cholesky factorization with complete */
+/* pivoting of a real symmetric positive semidefinite matrix A. */
+
+/* The factorization has the form */
+/* P' * A * P = U' * U , if UPLO = 'U', */
+/* P' * A * P = L * L', if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular, and */
+/* P is stored as vector PIV. */
+
+/* This algorithm does not attempt to check that A is positive */
+/* semidefinite. This version of the algorithm calls level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n by n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization as above. */
+
+/* PIV (output) INTEGER array, dimension (N) */
+/* PIV is such that the nonzero entries are P( PIV(K), K ) = 1. */
+
+/* RANK (output) INTEGER */
+/* The rank of A given by the number of steps the algorithm */
+/* completed. */
+
+/* TOL (input) REAL */
+/* User defined tolerance. If TOL < 0, then N*U*MAX( A( K,K ) ) */
+/* will be used. The algorithm terminates at the (K-1)st step */
+/* if the pivot <= TOL. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* WORK REAL array, dimension (2*N) */
+/* Work space. */
+
+/* INFO (output) INTEGER */
+/* < 0: If INFO = -K, the K-th argument had an illegal value, */
+/* = 0: algorithm completed successfully, and */
+/* > 0: the matrix A is either rank deficient with computed rank */
+/* as returned in RANK, or is indefinite. See Section 7 of */
+/* LAPACK Working Note #161 for further information. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ --work;
+ --piv;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPSTF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Initialize PIV */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ piv[i__] = i__;
+/* L100: */
+ }
+
+/* Compute stopping value */
+
+ pvt = 1;
+ ajj = a[pvt + pvt * a_dim1];
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if (a[i__ + i__ * a_dim1] > ajj) {
+ pvt = i__;
+ ajj = a[pvt + pvt * a_dim1];
+ }
+ }
+ if (ajj == 0.f || sisnan_(&ajj)) {
+ *rank = 0;
+ *info = 1;
+ goto L170;
+ }
+
+/* Compute stopping value if not supplied */
+
+ if (*tol < 0.f) {
+ sstop = *n * slamch_("Epsilon") * ajj;
+ } else {
+ sstop = *tol;
+ }
+
+/* Set first half of WORK to zero, holds dot products */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L110: */
+ }
+
+ if (upper) {
+
+/* Compute the Cholesky factorization P' * A * P = U' * U */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Find pivot, test for exit, else swap rows and columns */
+/* Update dot products, compute possible pivots which are */
+/* stored in the second half of WORK */
+
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+
+ if (j > 1) {
+/* Computing 2nd power */
+ r__1 = a[j - 1 + i__ * a_dim1];
+ work[i__] += r__1 * r__1;
+ }
+ work[*n + i__] = a[i__ + i__ * a_dim1] - work[i__];
+
+/* L120: */
+ }
+
+ if (j > 1) {
+ maxlocval = (*n << 1) - (*n + j) + 1;
+ itemp = smaxloc_(&work[*n + j], &maxlocval);
+ pvt = itemp + j - 1;
+ ajj = work[*n + pvt];
+ if (ajj <= sstop || sisnan_(&ajj)) {
+ a[j + j * a_dim1] = ajj;
+ goto L160;
+ }
+ }
+
+ if (j != pvt) {
+
+/* Pivot OK, so can now swap pivot rows and columns */
+
+ a[pvt + pvt * a_dim1] = a[j + j * a_dim1];
+ i__2 = j - 1;
+ sswap_(&i__2, &a[j * a_dim1 + 1], &c__1, &a[pvt * a_dim1 + 1],
+ &c__1);
+ if (pvt < *n) {
+ i__2 = *n - pvt;
+ sswap_(&i__2, &a[j + (pvt + 1) * a_dim1], lda, &a[pvt + (
+ pvt + 1) * a_dim1], lda);
+ }
+ i__2 = pvt - j - 1;
+ sswap_(&i__2, &a[j + (j + 1) * a_dim1], lda, &a[j + 1 + pvt *
+ a_dim1], &c__1);
+
+/* Swap dot products and PIV */
+
+ stemp = work[j];
+ work[j] = work[pvt];
+ work[pvt] = stemp;
+ itemp = piv[pvt];
+ piv[pvt] = piv[j];
+ piv[j] = itemp;
+ }
+
+ ajj = sqrt(ajj);
+ a[j + j * a_dim1] = ajj;
+
+/* Compute elements J+1:N of row J */
+
+ if (j < *n) {
+ i__2 = j - 1;
+ i__3 = *n - j;
+ sgemv_("Trans", &i__2, &i__3, &c_b16, &a[(j + 1) * a_dim1 + 1]
+, lda, &a[j * a_dim1 + 1], &c__1, &c_b18, &a[j + (j +
+ 1) * a_dim1], lda);
+ i__2 = *n - j;
+ r__1 = 1.f / ajj;
+ sscal_(&i__2, &r__1, &a[j + (j + 1) * a_dim1], lda);
+ }
+
+/* L130: */
+ }
+
+ } else {
+
+/* Compute the Cholesky factorization P' * A * P = L * L' */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Find pivot, test for exit, else swap rows and columns */
+/* Update dot products, compute possible pivots which are */
+/* stored in the second half of WORK */
+
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+
+ if (j > 1) {
+/* Computing 2nd power */
+ r__1 = a[i__ + (j - 1) * a_dim1];
+ work[i__] += r__1 * r__1;
+ }
+ work[*n + i__] = a[i__ + i__ * a_dim1] - work[i__];
+
+/* L140: */
+ }
+
+ if (j > 1) {
+ maxlocval = (*n << 1) - (*n + j) + 1;
+ itemp = smaxloc_(&work[*n + j], &maxlocval);
+ pvt = itemp + j - 1;
+ ajj = work[*n + pvt];
+ if (ajj <= sstop || sisnan_(&ajj)) {
+ a[j + j * a_dim1] = ajj;
+ goto L160;
+ }
+ }
+
+ if (j != pvt) {
+
+/* Pivot OK, so can now swap pivot rows and columns */
+
+ a[pvt + pvt * a_dim1] = a[j + j * a_dim1];
+ i__2 = j - 1;
+ sswap_(&i__2, &a[j + a_dim1], lda, &a[pvt + a_dim1], lda);
+ if (pvt < *n) {
+ i__2 = *n - pvt;
+ sswap_(&i__2, &a[pvt + 1 + j * a_dim1], &c__1, &a[pvt + 1
+ + pvt * a_dim1], &c__1);
+ }
+ i__2 = pvt - j - 1;
+ sswap_(&i__2, &a[j + 1 + j * a_dim1], &c__1, &a[pvt + (j + 1)
+ * a_dim1], lda);
+
+/* Swap dot products and PIV */
+
+ stemp = work[j];
+ work[j] = work[pvt];
+ work[pvt] = stemp;
+ itemp = piv[pvt];
+ piv[pvt] = piv[j];
+ piv[j] = itemp;
+ }
+
+ ajj = sqrt(ajj);
+ a[j + j * a_dim1] = ajj;
+
+/* Compute elements J+1:N of column J */
+
+ if (j < *n) {
+ i__2 = *n - j;
+ i__3 = j - 1;
+ sgemv_("No Trans", &i__2, &i__3, &c_b16, &a[j + 1 + a_dim1],
+ lda, &a[j + a_dim1], lda, &c_b18, &a[j + 1 + j *
+ a_dim1], &c__1);
+ i__2 = *n - j;
+ r__1 = 1.f / ajj;
+ sscal_(&i__2, &r__1, &a[j + 1 + j * a_dim1], &c__1);
+ }
+
+/* L150: */
+ }
+
+ }
+
+/* Ran to completion, A has full rank */
+
+ *rank = *n;
+
+ goto L170;
+L160:
+
+/* Rank is number of steps completed. Set INFO = 1 to signal */
+/* that the factorization cannot be used to solve a system. */
+
+ *rank = j - 1;
+ *info = 1;
+
+L170:
+ return 0;
+
+/* End of SPSTF2 */
+
+} /* spstf2_ */
diff --git a/contrib/libs/clapack/spstrf.c b/contrib/libs/clapack/spstrf.c
new file mode 100644
index 0000000000..0f46f2ff2c
--- /dev/null
+++ b/contrib/libs/clapack/spstrf.c
@@ -0,0 +1,466 @@
+/* spstrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static real c_b22 = -1.f;
+static real c_b24 = 1.f;
+
+/* Subroutine */ int spstrf_(char *uplo, integer *n, real *a, integer *lda,
+ integer *piv, integer *rank, real *tol, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, maxlocval, jb, nb;
+ real ajj;
+ integer pvt;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ integer itemp;
+ extern /* Subroutine */ int sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *);
+ real stemp;
+ logical upper;
+ extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
+ integer *);
+ real sstop;
+ extern /* Subroutine */ int ssyrk_(char *, char *, integer *, integer *,
+ real *, real *, integer *, real *, real *, integer *), spstf2_(char *, integer *, real *, integer *, integer *,
+ integer *, real *, real *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern logical sisnan_(real *);
+ extern integer smaxloc_(real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Craig Lucas, University of Manchester / NAG Ltd. */
+/* October, 2008 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPSTRF computes the Cholesky factorization with complete */
+/* pivoting of a real symmetric positive semidefinite matrix A. */
+
+/* The factorization has the form */
+/* P' * A * P = U' * U , if UPLO = 'U', */
+/* P' * A * P = L * L', if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular, and */
+/* P is stored as vector PIV. */
+
+/* This algorithm does not attempt to check that A is positive */
+/* semidefinite. This version of the algorithm calls level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n by n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization as above. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* PIV (output) INTEGER array, dimension (N) */
+/* PIV is such that the nonzero entries are P( PIV(K), K ) = 1. */
+
+/* RANK (output) INTEGER */
+/* The rank of A given by the number of steps the algorithm */
+/* completed. */
+
+/* TOL (input) REAL */
+/* User defined tolerance. If TOL < 0, then N*U*MAX( A(K,K) ) */
+/* will be used. The algorithm terminates at the (K-1)st step */
+/* if the pivot <= TOL. */
+
+/* WORK REAL array, dimension (2*N) */
+/* Work space. */
+
+/* INFO (output) INTEGER */
+/* < 0: If INFO = -K, the K-th argument had an illegal value, */
+/* = 0: algorithm completed successfully, and */
+/* > 0: the matrix A is either rank deficient with computed rank */
+/* as returned in RANK, or is indefinite. See Section 7 of */
+/* LAPACK Working Note #161 for further information. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --work;
+ --piv;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPSTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get block size */
+
+ nb = ilaenv_(&c__1, "SPOTRF", uplo, n, &c_n1, &c_n1, &c_n1);
+ if (nb <= 1 || nb >= *n) {
+
+/* Use unblocked code */
+
+ spstf2_(uplo, n, &a[a_dim1 + 1], lda, &piv[1], rank, tol, &work[1],
+ info);
+ goto L200;
+
+ } else {
+
+/* Initialize PIV */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ piv[i__] = i__;
+/* L100: */
+ }
+
+/* Compute stopping value */
+
+ pvt = 1;
+ ajj = a[pvt + pvt * a_dim1];
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if (a[i__ + i__ * a_dim1] > ajj) {
+ pvt = i__;
+ ajj = a[pvt + pvt * a_dim1];
+ }
+ }
+ if (ajj == 0.f || sisnan_(&ajj)) {
+ *rank = 0;
+ *info = 1;
+ goto L200;
+ }
+
+/* Compute stopping value if not supplied */
+
+ if (*tol < 0.f) {
+ sstop = *n * slamch_("Epsilon") * ajj;
+ } else {
+ sstop = *tol;
+ }
+
+
+ if (upper) {
+
+/* Compute the Cholesky factorization P' * A * P = U' * U */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (k = 1; i__2 < 0 ? k >= i__1 : k <= i__1; k += i__2) {
+
+/* Account for last block not being NB wide */
+
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - k + 1;
+ jb = min(i__3,i__4);
+
+/* Set relevant part of first half of WORK to zero, */
+/* holds dot products */
+
+ i__3 = *n;
+ for (i__ = k; i__ <= i__3; ++i__) {
+ work[i__] = 0.f;
+/* L110: */
+ }
+
+ i__3 = k + jb - 1;
+ for (j = k; j <= i__3; ++j) {
+
+/* Find pivot, test for exit, else swap rows and columns */
+/* Update dot products, compute possible pivots which are */
+/* stored in the second half of WORK */
+
+ i__4 = *n;
+ for (i__ = j; i__ <= i__4; ++i__) {
+
+ if (j > k) {
+/* Computing 2nd power */
+ r__1 = a[j - 1 + i__ * a_dim1];
+ work[i__] += r__1 * r__1;
+ }
+ work[*n + i__] = a[i__ + i__ * a_dim1] - work[i__];
+
+/* L120: */
+ }
+
+ if (j > 1) {
+ maxlocval = (*n << 1) - (*n + j) + 1;
+ itemp = smaxloc_(&work[*n + j], &maxlocval);
+ pvt = itemp + j - 1;
+ ajj = work[*n + pvt];
+ if (ajj <= sstop || sisnan_(&ajj)) {
+ a[j + j * a_dim1] = ajj;
+ goto L190;
+ }
+ }
+
+ if (j != pvt) {
+
+/* Pivot OK, so can now swap pivot rows and columns */
+
+ a[pvt + pvt * a_dim1] = a[j + j * a_dim1];
+ i__4 = j - 1;
+ sswap_(&i__4, &a[j * a_dim1 + 1], &c__1, &a[pvt *
+ a_dim1 + 1], &c__1);
+ if (pvt < *n) {
+ i__4 = *n - pvt;
+ sswap_(&i__4, &a[j + (pvt + 1) * a_dim1], lda, &a[
+ pvt + (pvt + 1) * a_dim1], lda);
+ }
+ i__4 = pvt - j - 1;
+ sswap_(&i__4, &a[j + (j + 1) * a_dim1], lda, &a[j + 1
+ + pvt * a_dim1], &c__1);
+
+/* Swap dot products and PIV */
+
+ stemp = work[j];
+ work[j] = work[pvt];
+ work[pvt] = stemp;
+ itemp = piv[pvt];
+ piv[pvt] = piv[j];
+ piv[j] = itemp;
+ }
+
+ ajj = sqrt(ajj);
+ a[j + j * a_dim1] = ajj;
+
+/* Compute elements J+1:N of row J. */
+
+ if (j < *n) {
+ i__4 = j - k;
+ i__5 = *n - j;
+ sgemv_("Trans", &i__4, &i__5, &c_b22, &a[k + (j + 1) *
+ a_dim1], lda, &a[k + j * a_dim1], &c__1, &
+ c_b24, &a[j + (j + 1) * a_dim1], lda);
+ i__4 = *n - j;
+ r__1 = 1.f / ajj;
+ sscal_(&i__4, &r__1, &a[j + (j + 1) * a_dim1], lda);
+ }
+
+/* L130: */
+ }
+
+/* Update trailing matrix, J already incremented */
+
+ if (k + jb <= *n) {
+ i__3 = *n - j + 1;
+ ssyrk_("Upper", "Trans", &i__3, &jb, &c_b22, &a[k + j *
+ a_dim1], lda, &c_b24, &a[j + j * a_dim1], lda);
+ }
+
+/* L140: */
+ }
+
+ } else {
+
+/* Compute the Cholesky factorization P' * A * P = L * L' */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (k = 1; i__1 < 0 ? k >= i__2 : k <= i__2; k += i__1) {
+
+/* Account for last block not being NB wide */
+
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - k + 1;
+ jb = min(i__3,i__4);
+
+/* Set relevant part of first half of WORK to zero, */
+/* holds dot products */
+
+ i__3 = *n;
+ for (i__ = k; i__ <= i__3; ++i__) {
+ work[i__] = 0.f;
+/* L150: */
+ }
+
+ i__3 = k + jb - 1;
+ for (j = k; j <= i__3; ++j) {
+
+/* Find pivot, test for exit, else swap rows and columns */
+/* Update dot products, compute possible pivots which are */
+/* stored in the second half of WORK */
+
+ i__4 = *n;
+ for (i__ = j; i__ <= i__4; ++i__) {
+
+ if (j > k) {
+/* Computing 2nd power */
+ r__1 = a[i__ + (j - 1) * a_dim1];
+ work[i__] += r__1 * r__1;
+ }
+ work[*n + i__] = a[i__ + i__ * a_dim1] - work[i__];
+
+/* L160: */
+ }
+
+ if (j > 1) {
+ maxlocval = (*n << 1) - (*n + j) + 1;
+ itemp = smaxloc_(&work[*n + j], &maxlocval);
+ pvt = itemp + j - 1;
+ ajj = work[*n + pvt];
+ if (ajj <= sstop || sisnan_(&ajj)) {
+ a[j + j * a_dim1] = ajj;
+ goto L190;
+ }
+ }
+
+ if (j != pvt) {
+
+/* Pivot OK, so can now swap pivot rows and columns */
+
+ a[pvt + pvt * a_dim1] = a[j + j * a_dim1];
+ i__4 = j - 1;
+ sswap_(&i__4, &a[j + a_dim1], lda, &a[pvt + a_dim1],
+ lda);
+ if (pvt < *n) {
+ i__4 = *n - pvt;
+ sswap_(&i__4, &a[pvt + 1 + j * a_dim1], &c__1, &a[
+ pvt + 1 + pvt * a_dim1], &c__1);
+ }
+ i__4 = pvt - j - 1;
+ sswap_(&i__4, &a[j + 1 + j * a_dim1], &c__1, &a[pvt +
+ (j + 1) * a_dim1], lda);
+
+/* Swap dot products and PIV */
+
+ stemp = work[j];
+ work[j] = work[pvt];
+ work[pvt] = stemp;
+ itemp = piv[pvt];
+ piv[pvt] = piv[j];
+ piv[j] = itemp;
+ }
+
+ ajj = sqrt(ajj);
+ a[j + j * a_dim1] = ajj;
+
+/* Compute elements J+1:N of column J. */
+
+ if (j < *n) {
+ i__4 = *n - j;
+ i__5 = j - k;
+ sgemv_("No Trans", &i__4, &i__5, &c_b22, &a[j + 1 + k
+ * a_dim1], lda, &a[j + k * a_dim1], lda, &
+ c_b24, &a[j + 1 + j * a_dim1], &c__1);
+ i__4 = *n - j;
+ r__1 = 1.f / ajj;
+ sscal_(&i__4, &r__1, &a[j + 1 + j * a_dim1], &c__1);
+ }
+
+/* L170: */
+ }
+
+/* Update trailing matrix, J already incremented */
+
+ if (k + jb <= *n) {
+ i__3 = *n - j + 1;
+ ssyrk_("Lower", "No Trans", &i__3, &jb, &c_b22, &a[j + k *
+ a_dim1], lda, &c_b24, &a[j + j * a_dim1], lda);
+ }
+
+/* L180: */
+ }
+
+ }
+ }
+
+/* Ran to completion, A has full rank */
+
+ *rank = *n;
+
+ goto L200;
+L190:
+
+/* Rank is the number of steps completed. Set INFO = 1 to signal */
+/* that the factorization cannot be used to solve a system. */
+
+ *rank = j - 1;
+ *info = 1;
+
+L200:
+ return 0;
+
+/* End of SPSTRF */
+
+} /* spstrf_ */
diff --git a/contrib/libs/clapack/sptcon.c b/contrib/libs/clapack/sptcon.c
new file mode 100644
index 0000000000..1861ebeee7
--- /dev/null
+++ b/contrib/libs/clapack/sptcon.c
@@ -0,0 +1,184 @@
+/* sptcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sptcon_(integer *n, real *d__, real *e, real *anorm,
+ real *rcond, real *work, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1;
+
+ /* Local variables */
+ integer i__, ix;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ real ainvnm;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPTCON computes the reciprocal of the condition number (in the */
+/* 1-norm) of a real symmetric positive definite tridiagonal matrix */
+/* using the factorization A = L*D*L**T or A = U**T*D*U computed by */
+/* SPTTRF. */
+
+/* Norm(inv(A)) is computed by a direct method, and the reciprocal of */
+/* the condition number is computed as */
+/* RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* D (input) REAL array, dimension (N) */
+/* The n diagonal elements of the diagonal matrix D from the */
+/* factorization of A, as computed by SPTTRF. */
+
+/* E (input) REAL array, dimension (N-1) */
+/* The (n-1) off-diagonal elements of the unit bidiagonal factor */
+/* U or L from the factorization of A, as computed by SPTTRF. */
+
+/* ANORM (input) REAL */
+/* The 1-norm of the original matrix A. */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is the */
+/* 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The method used is described in Nicholas J. Higham, "Efficient */
+/* Algorithms for Computing the Condition Number of a Tridiagonal */
+/* Matrix", SIAM J. Sci. Stat. Comput., Vol. 7, No. 1, January 1986. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments. */
+
+ /* Parameter adjustments */
+ --work;
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*anorm < 0.f) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPTCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.f;
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ } else if (*anorm == 0.f) {
+ return 0;
+ }
+
+/* Check that D(1:N) is positive. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] <= 0.f) {
+ return 0;
+ }
+/* L10: */
+ }
+
+/* Solve M(A) * x = e, where M(A) = (m(i,j)) is given by */
+
+/* m(i,j) = abs(A(i,j)), i = j, */
+/* m(i,j) = -abs(A(i,j)), i .ne. j, */
+
+/* and e = [ 1, 1, ..., 1 ]'. Note M(A) = M(L)*D*M(L)'. */
+
+/* Solve M(L) * x = e. */
+
+ work[1] = 1.f;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ work[i__] = work[i__ - 1] * (r__1 = e[i__ - 1], dabs(r__1)) + 1.f;
+/* L20: */
+ }
+
+/* Solve D * M(L)' * x = b. */
+
+ work[*n] /= d__[*n];
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ work[i__] = work[i__] / d__[i__] + work[i__ + 1] * (r__1 = e[i__],
+ dabs(r__1));
+/* L30: */
+ }
+
+/* Compute AINVNM = max(x(i)), 1<=i<=n. */
+
+ ix = isamax_(n, &work[1], &c__1);
+ ainvnm = (r__1 = work[ix], dabs(r__1));
+
+/* Compute the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / ainvnm / *anorm;
+ }
+
+ return 0;
+
+/* End of SPTCON */
+
+} /* sptcon_ */
diff --git a/contrib/libs/clapack/spteqr.c b/contrib/libs/clapack/spteqr.c
new file mode 100644
index 0000000000..90fd206bd7
--- /dev/null
+++ b/contrib/libs/clapack/spteqr.c
@@ -0,0 +1,240 @@
+/* spteqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b7 = 0.f;
+static real c_b8 = 1.f;
+static integer c__0 = 0;
+static integer c__1 = 1;
+
+/* Subroutine */ int spteqr_(char *compz, integer *n, real *d__, real *e,
+ real *z__, integer *ldz, real *work, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real c__[1] /* was [1][1] */;
+ integer i__;
+ real vt[1] /* was [1][1] */;
+ integer nru;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), slaset_(
+ char *, integer *, integer *, real *, real *, real *, integer *), sbdsqr_(char *, integer *, integer *, integer *, integer
+ *, real *, real *, real *, integer *, real *, integer *, real *,
+ integer *, real *, integer *);
+ integer icompz;
+ extern /* Subroutine */ int spttrf_(integer *, real *, real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPTEQR computes all eigenvalues and, optionally, eigenvectors of a */
+/* symmetric positive definite tridiagonal matrix by first factoring the */
+/* matrix using SPTTRF, and then calling SBDSQR to compute the singular */
+/* values of the bidiagonal factor. */
+
+/* This routine computes the eigenvalues of the positive definite */
+/* tridiagonal matrix to high relative accuracy. This means that if the */
+/* eigenvalues range over many orders of magnitude in size, then the */
+/* small eigenvalues and corresponding eigenvectors will be computed */
+/* more accurately than, for example, with the standard QR method. */
+
+/* The eigenvectors of a full or band symmetric positive definite matrix */
+/* can also be found if SSYTRD, SSPTRD, or SSBTRD has been used to */
+/* reduce this matrix to tridiagonal form. (The reduction to tridiagonal */
+/* form, however, may preclude the possibility of obtaining high */
+/* relative accuracy in the small eigenvalues of the original matrix, if */
+/* these eigenvalues range over many orders of magnitude.) */
+
+/* Arguments */
+/* ========= */
+
+/* COMPZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only. */
+/* = 'V': Compute eigenvectors of original symmetric */
+/* matrix also. Array Z contains the orthogonal */
+/* matrix used to reduce the original matrix to */
+/* tridiagonal form. */
+/* = 'I': Compute eigenvectors of tridiagonal matrix also. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the n diagonal elements of the tridiagonal */
+/* matrix. */
+/* On normal exit, D contains the eigenvalues, in descending */
+/* order. */
+
+/* E (input/output) REAL array, dimension (N-1) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix. */
+/* On exit, E has been destroyed. */
+
+/* Z (input/output) REAL array, dimension (LDZ, N) */
+/* On entry, if COMPZ = 'V', the orthogonal matrix used in the */
+/* reduction to tridiagonal form. */
+/* On exit, if COMPZ = 'V', the orthonormal eigenvectors of the */
+/* original symmetric matrix; */
+/* if COMPZ = 'I', the orthonormal eigenvectors of the */
+/* tridiagonal matrix. */
+/* If INFO > 0 on exit, Z contains the eigenvectors associated */
+/* with only the stored eigenvalues. */
+/* If COMPZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* COMPZ = 'V' or 'I', LDZ >= max(1,N). */
+
+/* WORK (workspace) REAL array, dimension (4*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, and i is: */
+/* <= N the Cholesky factorization of the matrix could */
+/* not be performed because the i-th principal minor */
+/* was not positive definite. */
+/* > N the SVD algorithm failed to converge; */
+/* if INFO = N+i, i off-diagonal elements of the */
+/* bidiagonal factor did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (lsame_(compz, "N")) {
+ icompz = 0;
+ } else if (lsame_(compz, "V")) {
+ icompz = 1;
+ } else if (lsame_(compz, "I")) {
+ icompz = 2;
+ } else {
+ icompz = -1;
+ }
+ if (icompz < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldz < 1 || icompz > 0 && *ldz < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPTEQR", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (icompz > 0) {
+ z__[z_dim1 + 1] = 1.f;
+ }
+ return 0;
+ }
+ if (icompz == 2) {
+ slaset_("Full", n, n, &c_b7, &c_b8, &z__[z_offset], ldz);
+ }
+
+/* Call SPTTRF to factor the matrix. */
+
+ spttrf_(n, &d__[1], &e[1], info);
+ if (*info != 0) {
+ return 0;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = sqrt(d__[i__]);
+/* L10: */
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ e[i__] *= d__[i__];
+/* L20: */
+ }
+
+/* Call SBDSQR to compute the singular values/vectors of the */
+/* bidiagonal factor. */
+
+ if (icompz > 0) {
+ nru = *n;
+ } else {
+ nru = 0;
+ }
+ sbdsqr_("Lower", n, &c__0, &nru, &c__0, &d__[1], &e[1], vt, &c__1, &z__[
+ z_offset], ldz, c__, &c__1, &work[1], info);
+
+/* Square the singular values. */
+
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] *= d__[i__];
+/* L30: */
+ }
+ } else {
+ *info = *n + *info;
+ }
+
+ return 0;
+
+/* End of SPTEQR */
+
+} /* spteqr_ */
diff --git a/contrib/libs/clapack/sptrfs.c b/contrib/libs/clapack/sptrfs.c
new file mode 100644
index 0000000000..f6d9adb7e1
--- /dev/null
+++ b/contrib/libs/clapack/sptrfs.c
@@ -0,0 +1,365 @@
+/* sptrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b11 = 1.f;
+
+/* Subroutine */ int sptrfs_(integer *n, integer *nrhs, real *d__, real *e,
+ real *df, real *ef, real *b, integer *ldb, real *x, integer *ldx,
+ real *ferr, real *berr, real *work, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2;
+ real r__1, r__2, r__3;
+
+ /* Local variables */
+ integer i__, j;
+ real s, bi, cx, dx, ex;
+ integer ix, nz;
+ real eps, safe1, safe2;
+ integer count;
+ extern /* Subroutine */ int saxpy_(integer *, real *, real *, integer *,
+ real *, integer *);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ real lstres;
+ extern /* Subroutine */ int spttrs_(integer *, integer *, real *, real *,
+ real *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPTRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is symmetric positive definite */
+/* and tridiagonal, and provides error bounds and backward error */
+/* estimates for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* D (input) REAL array, dimension (N) */
+/* The n diagonal elements of the tridiagonal matrix A. */
+
+/* E (input) REAL array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of the tridiagonal matrix A. */
+
+/* DF (input) REAL array, dimension (N) */
+/* The n diagonal elements of the diagonal matrix D from the */
+/* factorization computed by SPTTRF. */
+
+/* EF (input) REAL array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of the unit bidiagonal factor */
+/* L from the factorization computed by SPTTRF. */
+
+/* B (input) REAL array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) REAL array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by SPTTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) REAL array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ --df;
+ --ef;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*nrhs < 0) {
+ *info = -2;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*ldx < max(1,*n)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPTRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = 4;
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.f;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X. Also compute */
+/* abs(A)*abs(x) + abs(b) for use in the backward error bound. */
+
+ if (*n == 1) {
+ bi = b[j * b_dim1 + 1];
+ dx = d__[1] * x[j * x_dim1 + 1];
+ work[*n + 1] = bi - dx;
+ work[1] = dabs(bi) + dabs(dx);
+ } else {
+ bi = b[j * b_dim1 + 1];
+ dx = d__[1] * x[j * x_dim1 + 1];
+ ex = e[1] * x[j * x_dim1 + 2];
+ work[*n + 1] = bi - dx - ex;
+ work[1] = dabs(bi) + dabs(dx) + dabs(ex);
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ bi = b[i__ + j * b_dim1];
+ cx = e[i__ - 1] * x[i__ - 1 + j * x_dim1];
+ dx = d__[i__] * x[i__ + j * x_dim1];
+ ex = e[i__] * x[i__ + 1 + j * x_dim1];
+ work[*n + i__] = bi - cx - dx - ex;
+ work[i__] = dabs(bi) + dabs(cx) + dabs(dx) + dabs(ex);
+/* L30: */
+ }
+ bi = b[*n + j * b_dim1];
+ cx = e[*n - 1] * x[*n - 1 + j * x_dim1];
+ dx = d__[*n] * x[*n + j * x_dim1];
+ work[*n + *n] = bi - cx - dx;
+ work[*n] = dabs(bi) + dabs(cx) + dabs(dx);
+ }
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+/* Computing MAX */
+ r__2 = s, r__3 = (r__1 = work[*n + i__], dabs(r__1)) / work[
+ i__];
+ s = dmax(r__2,r__3);
+ } else {
+/* Computing MAX */
+ r__2 = s, r__3 = ((r__1 = work[*n + i__], dabs(r__1)) + safe1)
+ / (work[i__] + safe1);
+ s = dmax(r__2,r__3);
+ }
+/* L40: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2.f <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ spttrs_(n, &c__1, &df[1], &ef[1], &work[*n + 1], n, info);
+ saxpy_(n, &c_b11, &work[*n + 1], &c__1, &x[j * x_dim1 + 1], &c__1)
+ ;
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+ work[i__] = (r__1 = work[*n + i__], dabs(r__1)) + nz * eps *
+ work[i__];
+ } else {
+ work[i__] = (r__1 = work[*n + i__], dabs(r__1)) + nz * eps *
+ work[i__] + safe1;
+ }
+/* L50: */
+ }
+ ix = isamax_(n, &work[1], &c__1);
+ ferr[j] = work[ix];
+
+/* Estimate the norm of inv(A). */
+
+/* Solve M(A) * x = e, where M(A) = (m(i,j)) is given by */
+
+/* m(i,j) = abs(A(i,j)), i = j, */
+/* m(i,j) = -abs(A(i,j)), i .ne. j, */
+
+/* and e = [ 1, 1, ..., 1 ]'. Note M(A) = M(L)*D*M(L)'. */
+
+/* Solve M(L) * x = e. */
+
+ work[1] = 1.f;
+ i__2 = *n;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ work[i__] = work[i__ - 1] * (r__1 = ef[i__ - 1], dabs(r__1)) +
+ 1.f;
+/* L60: */
+ }
+
+/* Solve D * M(L)' * x = b. */
+
+ work[*n] /= df[*n];
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ work[i__] = work[i__] / df[i__] + work[i__ + 1] * (r__1 = ef[i__],
+ dabs(r__1));
+/* L70: */
+ }
+
+/* Compute norm(inv(A)) = max(x(i)), 1<=i<=n. */
+
+ ix = isamax_(n, &work[1], &c__1);
+ ferr[j] *= (r__1 = work[ix], dabs(r__1));
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = lstres, r__3 = (r__1 = x[i__ + j * x_dim1], dabs(r__1));
+ lstres = dmax(r__2,r__3);
+/* L80: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L90: */
+ }
+
+ return 0;
+
+/* End of SPTRFS */
+
+} /* sptrfs_ */
diff --git a/contrib/libs/clapack/sptsv.c b/contrib/libs/clapack/sptsv.c
new file mode 100644
index 0000000000..f39fe1e390
--- /dev/null
+++ b/contrib/libs/clapack/sptsv.c
@@ -0,0 +1,129 @@
+/* sptsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sptsv_(integer *n, integer *nrhs, real *d__, real *e,
+ real *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int xerbla_(char *, integer *), spttrf_(
+ integer *, real *, real *, integer *), spttrs_(integer *, integer
+ *, real *, real *, real *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPTSV computes the solution to a real system of linear equations */
+/* A*X = B, where A is an N-by-N symmetric positive definite tridiagonal */
+/* matrix, and X and B are N-by-NRHS matrices. */
+
+/* A is factored as A = L*D*L**T, and the factored form of A is then */
+/* used to solve the system of equations. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the n diagonal elements of the tridiagonal matrix */
+/* A. On exit, the n diagonal elements of the diagonal matrix */
+/* D from the factorization A = L*D*L**T. */
+
+/* E (input/output) REAL array, dimension (N-1) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix A. On exit, the (n-1) subdiagonal elements of the */
+/* unit bidiagonal factor L from the L*D*L**T factorization of */
+/* A. (E can also be regarded as the superdiagonal of the unit */
+/* bidiagonal factor U from the U**T*D*U factorization of A.) */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i is not */
+/* positive definite, and the solution has not been */
+/* computed. The factorization has not been completed */
+/* unless i = N. */
+
+/* ===================================================================== */
+
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*nrhs < 0) {
+ *info = -2;
+ } else if (*ldb < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPTSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the L*D*L' (or U'*D*U) factorization of A. */
+
+ spttrf_(n, &d__[1], &e[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ spttrs_(n, nrhs, &d__[1], &e[1], &b[b_offset], ldb, info);
+ }
+ return 0;
+
+/* End of SPTSV */
+
+} /* sptsv_ */
diff --git a/contrib/libs/clapack/sptsvx.c b/contrib/libs/clapack/sptsvx.c
new file mode 100644
index 0000000000..d8f52037ee
--- /dev/null
+++ b/contrib/libs/clapack/sptsvx.c
@@ -0,0 +1,279 @@
+/* sptsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sptsvx_(char *fact, integer *n, integer *nrhs, real *d__,
+ real *e, real *df, real *ef, real *b, integer *ldb, real *x, integer
+ *ldx, real *rcond, real *ferr, real *berr, real *work, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ real anorm;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ extern doublereal slamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int xerbla_(char *, integer *), slacpy_(
+ char *, integer *, integer *, real *, integer *, real *, integer *
+);
+ extern doublereal slanst_(char *, integer *, real *, real *);
+ extern /* Subroutine */ int sptcon_(integer *, real *, real *, real *,
+ real *, real *, integer *), sptrfs_(integer *, integer *, real *,
+ real *, real *, real *, real *, integer *, real *, integer *,
+ real *, real *, real *, integer *), spttrf_(integer *, real *,
+ real *, integer *), spttrs_(integer *, integer *, real *, real *,
+ real *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPTSVX uses the factorization A = L*D*L**T to compute the solution */
+/* to a real system of linear equations A*X = B, where A is an N-by-N */
+/* symmetric positive definite tridiagonal matrix and X and B are */
+/* N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'N', the matrix A is factored as A = L*D*L**T, where L */
+/* is a unit lower bidiagonal matrix and D is diagonal. The */
+/* factorization can also be regarded as having the form */
+/* A = U**T*D*U. */
+
+/* 2. If the leading i-by-i principal minor is not positive definite, */
+/* then the routine returns with INFO = i. Otherwise, the factored */
+/* form of A is used to estimate the condition number of the matrix */
+/* A. If the reciprocal of the condition number is less than machine */
+/* precision, INFO = N+1 is returned as a warning, but the routine */
+/* still goes on to solve for X and compute error bounds as */
+/* described below. */
+
+/* 3. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 4. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of A has been */
+/* supplied on entry. */
+/* = 'F': On entry, DF and EF contain the factored form of A. */
+/* D, E, DF, and EF will not be modified. */
+/* = 'N': The matrix A will be copied to DF and EF and */
+/* factored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* D (input) REAL array, dimension (N) */
+/* The n diagonal elements of the tridiagonal matrix A. */
+
+/* E (input) REAL array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of the tridiagonal matrix A. */
+
+/* DF (input or output) REAL array, dimension (N) */
+/* If FACT = 'F', then DF is an input argument and on entry */
+/* contains the n diagonal elements of the diagonal matrix D */
+/* from the L*D*L**T factorization of A. */
+/* If FACT = 'N', then DF is an output argument and on exit */
+/* contains the n diagonal elements of the diagonal matrix D */
+/* from the L*D*L**T factorization of A. */
+
+/* EF (input or output) REAL array, dimension (N-1) */
+/* If FACT = 'F', then EF is an input argument and on entry */
+/* contains the (n-1) subdiagonal elements of the unit */
+/* bidiagonal factor L from the L*D*L**T factorization of A. */
+/* If FACT = 'N', then EF is an output argument and on exit */
+/* contains the (n-1) subdiagonal elements of the unit */
+/* bidiagonal factor L from the L*D*L**T factorization of A. */
+
+/* B (input) REAL array, dimension (LDB,NRHS) */
+/* The N-by-NRHS right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) REAL array, dimension (LDX,NRHS) */
+/* If INFO = 0 of INFO = N+1, the N-by-NRHS solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) REAL */
+/* The reciprocal condition number of the matrix A. If RCOND */
+/* is less than the machine precision (in particular, if */
+/* RCOND = 0), the matrix is singular to working precision. */
+/* This condition is indicated by a return code of INFO > 0. */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in any */
+/* element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) REAL array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: the leading minor of order i of A is */
+/* not positive definite, so the factorization */
+/* could not be completed, and the solution has not */
+/* been computed. RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ --df;
+ --ef;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ if (! nofact && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldx < max(1,*n)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPTSVX", &i__1);
+ return 0;
+ }
+
+ if (nofact) {
+
+/* Compute the L*D*L' (or U'*D*U) factorization of A. */
+
+ scopy_(n, &d__[1], &c__1, &df[1], &c__1);
+ if (*n > 1) {
+ i__1 = *n - 1;
+ scopy_(&i__1, &e[1], &c__1, &ef[1], &c__1);
+ }
+ spttrf_(n, &df[1], &ef[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.f;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = slanst_("1", n, &d__[1], &e[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ sptcon_(n, &df[1], &ef[1], &anorm, rcond, &work[1], info);
+
+/* Compute the solution vectors X. */
+
+ slacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ spttrs_(n, nrhs, &df[1], &ef[1], &x[x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solutions and */
+/* compute error bounds and backward error estimates for them. */
+
+ sptrfs_(n, nrhs, &d__[1], &e[1], &df[1], &ef[1], &b[b_offset], ldb, &x[
+ x_offset], ldx, &ferr[1], &berr[1], &work[1], info);
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < slamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of SPTSVX */
+
+} /* sptsvx_ */
diff --git a/contrib/libs/clapack/spttrf.c b/contrib/libs/clapack/spttrf.c
new file mode 100644
index 0000000000..f3cda23b4d
--- /dev/null
+++ b/contrib/libs/clapack/spttrf.c
@@ -0,0 +1,180 @@
+/* spttrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int spttrf_(integer *n, real *d__, real *e, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ integer i__, i4;
+ real ei;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPTTRF computes the L*D*L' factorization of a real symmetric */
+/* positive definite tridiagonal matrix A. The factorization may also */
+/* be regarded as having the form A = U'*D*U. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the n diagonal elements of the tridiagonal matrix */
+/* A. On exit, the n diagonal elements of the diagonal matrix */
+/* D from the L*D*L' factorization of A. */
+
+/* E (input/output) REAL array, dimension (N-1) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix A. On exit, the (n-1) subdiagonal elements of the */
+/* unit bidiagonal factor L from the L*D*L' factorization of A. */
+/* E can also be regarded as the superdiagonal of the unit */
+/* bidiagonal factor U from the U'*D*U factorization of A. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, the leading minor of order k is not */
+/* positive definite; if k < N, the factorization could not */
+/* be completed, while if k = N, the factorization was */
+/* completed, but D(N) <= 0. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ i__1 = -(*info);
+ xerbla_("SPTTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Compute the L*D*L' (or U'*D*U) factorization of A. */
+
+ i4 = (*n - 1) % 4;
+ i__1 = i4;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] <= 0.f) {
+ *info = i__;
+ goto L30;
+ }
+ ei = e[i__];
+ e[i__] = ei / d__[i__];
+ d__[i__ + 1] -= e[i__] * ei;
+/* L10: */
+ }
+
+ i__1 = *n - 4;
+ for (i__ = i4 + 1; i__ <= i__1; i__ += 4) {
+
+/* Drop out of the loop if d(i) <= 0: the matrix is not positive */
+/* definite. */
+
+ if (d__[i__] <= 0.f) {
+ *info = i__;
+ goto L30;
+ }
+
+/* Solve for e(i) and d(i+1). */
+
+ ei = e[i__];
+ e[i__] = ei / d__[i__];
+ d__[i__ + 1] -= e[i__] * ei;
+
+ if (d__[i__ + 1] <= 0.f) {
+ *info = i__ + 1;
+ goto L30;
+ }
+
+/* Solve for e(i+1) and d(i+2). */
+
+ ei = e[i__ + 1];
+ e[i__ + 1] = ei / d__[i__ + 1];
+ d__[i__ + 2] -= e[i__ + 1] * ei;
+
+ if (d__[i__ + 2] <= 0.f) {
+ *info = i__ + 2;
+ goto L30;
+ }
+
+/* Solve for e(i+2) and d(i+3). */
+
+ ei = e[i__ + 2];
+ e[i__ + 2] = ei / d__[i__ + 2];
+ d__[i__ + 3] -= e[i__ + 2] * ei;
+
+ if (d__[i__ + 3] <= 0.f) {
+ *info = i__ + 3;
+ goto L30;
+ }
+
+/* Solve for e(i+3) and d(i+4). */
+
+ ei = e[i__ + 3];
+ e[i__ + 3] = ei / d__[i__ + 3];
+ d__[i__ + 4] -= e[i__ + 3] * ei;
+/* L20: */
+ }
+
+/* Check d(n) for positive definiteness. */
+
+ if (d__[*n] <= 0.f) {
+ *info = *n;
+ }
+
+L30:
+ return 0;
+
+/* End of SPTTRF */
+
+} /* spttrf_ */
diff --git a/contrib/libs/clapack/spttrs.c b/contrib/libs/clapack/spttrs.c
new file mode 100644
index 0000000000..bac3da4bec
--- /dev/null
+++ b/contrib/libs/clapack/spttrs.c
@@ -0,0 +1,156 @@
+/* spttrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int spttrs_(integer *n, integer *nrhs, real *d__, real *e,
+ real *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer j, jb, nb;
+ extern /* Subroutine */ int sptts2_(integer *, integer *, real *, real *,
+ real *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPTTRS solves a tridiagonal system of the form */
+/* A * X = B */
+/* using the L*D*L' factorization of A computed by SPTTRF. D is a */
+/* diagonal matrix specified in the vector D, L is a unit bidiagonal */
+/* matrix whose subdiagonal is specified in the vector E, and X and B */
+/* are N by NRHS matrices. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the tridiagonal matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* D (input) REAL array, dimension (N) */
+/* The n diagonal elements of the diagonal matrix D from the */
+/* L*D*L' factorization of A. */
+
+/* E (input) REAL array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of the unit bidiagonal factor */
+/* L from the L*D*L' factorization of A. E can also be regarded */
+/* as the superdiagonal of the unit bidiagonal factor U from the */
+/* factorization A = U'*D*U. */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the right hand side vectors B for the system of */
+/* linear equations. */
+/* On exit, the solution vectors, X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*nrhs < 0) {
+ *info = -2;
+ } else if (*ldb < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPTTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+/* Determine the number of right-hand sides to solve at a time. */
+
+ if (*nrhs == 1) {
+ nb = 1;
+ } else {
+/* Computing MAX */
+ i__1 = 1, i__2 = ilaenv_(&c__1, "SPTTRS", " ", n, nrhs, &c_n1, &c_n1);
+ nb = max(i__1,i__2);
+ }
+
+ if (nb >= *nrhs) {
+ sptts2_(n, nrhs, &d__[1], &e[1], &b[b_offset], ldb);
+ } else {
+ i__1 = *nrhs;
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = *nrhs - j + 1;
+ jb = min(i__3,nb);
+ sptts2_(n, &jb, &d__[1], &e[1], &b[j * b_dim1 + 1], ldb);
+/* L10: */
+ }
+ }
+
+ return 0;
+
+/* End of SPTTRS */
+
+} /* spttrs_ */
diff --git a/contrib/libs/clapack/sptts2.c b/contrib/libs/clapack/sptts2.c
new file mode 100644
index 0000000000..f67ca9f488
--- /dev/null
+++ b/contrib/libs/clapack/sptts2.c
@@ -0,0 +1,130 @@
+/* sptts2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sptts2_(integer *n, integer *nrhs, real *d__, real *e,
+ real *b, integer *ldb)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2;
+ real r__1;
+
+ /* Local variables */
+ integer i__, j;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SPTTS2 solves a tridiagonal system of the form */
+/* A * X = B */
+/* using the L*D*L' factorization of A computed by SPTTRF. D is a */
+/* diagonal matrix specified in the vector D, L is a unit bidiagonal */
+/* matrix whose subdiagonal is specified in the vector E, and X and B */
+/* are N by NRHS matrices. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the tridiagonal matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* D (input) REAL array, dimension (N) */
+/* The n diagonal elements of the diagonal matrix D from the */
+/* L*D*L' factorization of A. */
+
+/* E (input) REAL array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of the unit bidiagonal factor */
+/* L from the L*D*L' factorization of A. E can also be regarded */
+/* as the superdiagonal of the unit bidiagonal factor U from the */
+/* factorization A = U'*D*U. */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the right hand side vectors B for the system of */
+/* linear equations. */
+/* On exit, the solution vectors, X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ if (*n <= 1) {
+ if (*n == 1) {
+ r__1 = 1.f / d__[1];
+ sscal_(nrhs, &r__1, &b[b_offset], ldb);
+ }
+ return 0;
+ }
+
+/* Solve A * X = B using the factorization A = L*D*L', */
+/* overwriting each right hand side vector with its solution. */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Solve L * x = b. */
+
+ i__2 = *n;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] -= b[i__ - 1 + j * b_dim1] * e[i__ - 1];
+/* L10: */
+ }
+
+/* Solve D * L' * x = b. */
+
+ b[*n + j * b_dim1] /= d__[*n];
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ b[i__ + j * b_dim1] = b[i__ + j * b_dim1] / d__[i__] - b[i__ + 1
+ + j * b_dim1] * e[i__];
+/* L20: */
+ }
+/* L30: */
+ }
+
+ return 0;
+
+/* End of SPTTS2 */
+
+} /* sptts2_ */
diff --git a/contrib/libs/clapack/srscl.c b/contrib/libs/clapack/srscl.c
new file mode 100644
index 0000000000..462c809242
--- /dev/null
+++ b/contrib/libs/clapack/srscl.c
@@ -0,0 +1,133 @@
+/* srscl.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int srscl_(integer *n, real *sa, real *sx, integer *incx)
+{
+ real mul, cden;
+ logical done;
+ real cnum, cden1, cnum1;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ slabad_(real *, real *);
+ extern doublereal slamch_(char *);
+ real bignum, smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SRSCL multiplies an n-element real vector x by the real scalar 1/a. */
+/* This is done without overflow or underflow as long as */
+/* the final result x/a does not overflow or underflow. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of components of the vector x. */
+
+/* SA (input) REAL */
+/* The scalar a which is used to divide each component of x. */
+/* SA must be >= 0, or the subroutine will divide by zero. */
+
+/* SX (input/output) REAL array, dimension */
+/* (1+(N-1)*abs(INCX)) */
+/* The n-element vector x. */
+
+/* INCX (input) INTEGER */
+/* The increment between successive values of the vector SX. */
+/* > 0: SX(1) = X(1) and SX(1+(i-1)*INCX) = x(i), 1< i<= n */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --sx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+
+/* Initialize the denominator to SA and the numerator to 1. */
+
+ cden = *sa;
+ cnum = 1.f;
+
+L10:
+ cden1 = cden * smlnum;
+ cnum1 = cnum / bignum;
+ if (dabs(cden1) > dabs(cnum) && cnum != 0.f) {
+
+/* Pre-multiply X by SMLNUM if CDEN is large compared to CNUM. */
+
+ mul = smlnum;
+ done = FALSE_;
+ cden = cden1;
+ } else if (dabs(cnum1) > dabs(cden)) {
+
+/* Pre-multiply X by BIGNUM if CDEN is small compared to CNUM. */
+
+ mul = bignum;
+ done = FALSE_;
+ cnum = cnum1;
+ } else {
+
+/* Multiply X by CNUM / CDEN and return. */
+
+ mul = cnum / cden;
+ done = TRUE_;
+ }
+
+/* Scale the vector X by MUL */
+
+ sscal_(n, &mul, &sx[1], incx);
+
+ if (! done) {
+ goto L10;
+ }
+
+ return 0;
+
+/* End of SRSCL */
+
+} /* srscl_ */
diff --git a/contrib/libs/clapack/ssbev.c b/contrib/libs/clapack/ssbev.c
new file mode 100644
index 0000000000..bb0af95059
--- /dev/null
+++ b/contrib/libs/clapack/ssbev.c
@@ -0,0 +1,265 @@
+/* ssbev.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b11 = 1.f;
+static integer c__1 = 1;
+
+/* Subroutine */ int ssbev_(char *jobz, char *uplo, integer *n, integer *kd,
+ real *ab, integer *ldab, real *w, real *z__, integer *ldz, real *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, z_dim1, z_offset, i__1;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real eps;
+ integer inde;
+ real anrm;
+ integer imax;
+ real rmin, rmax, sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ logical lower, wantz;
+ integer iscale;
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ extern doublereal slansb_(char *, char *, integer *, integer *, real *,
+ integer *, real *);
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ integer indwrk;
+ extern /* Subroutine */ int ssbtrd_(char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, real *, integer *, real *,
+ integer *), ssterf_(integer *, real *, real *,
+ integer *);
+ real smlnum;
+ extern /* Subroutine */ int ssteqr_(char *, integer *, real *, real *,
+ real *, integer *, real *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSBEV computes all the eigenvalues and, optionally, eigenvectors of */
+/* a real symmetric band matrix A. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) REAL array, dimension (LDAB, N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, AB is overwritten by values generated during the */
+/* reduction to tridiagonal form. If UPLO = 'U', the first */
+/* superdiagonal and the diagonal of the tridiagonal matrix T */
+/* are returned in rows KD and KD+1 of AB, and if UPLO = 'L', */
+/* the diagonal and first subdiagonal of T are returned in the */
+/* first two rows of AB. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD + 1. */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) REAL array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the orthonormal */
+/* eigenvectors of the matrix A, with the i-th column of Z */
+/* holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) REAL array, dimension (max(1,3*N-2)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the algorithm failed to converge; i */
+/* off-diagonal elements of an intermediate tridiagonal */
+/* form did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lower = lsame_(uplo, "L");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*kd < 0) {
+ *info = -4;
+ } else if (*ldab < *kd + 1) {
+ *info = -6;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -9;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSBEV ", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (lower) {
+ w[1] = ab[ab_dim1 + 1];
+ } else {
+ w[1] = ab[*kd + 1 + ab_dim1];
+ }
+ if (wantz) {
+ z__[z_dim1 + 1] = 1.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = slansb_("M", uplo, n, kd, &ab[ab_offset], ldab, &work[1]);
+ iscale = 0;
+ if (anrm > 0.f && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ if (lower) {
+ slascl_("B", kd, kd, &c_b11, &sigma, n, n, &ab[ab_offset], ldab,
+ info);
+ } else {
+ slascl_("Q", kd, kd, &c_b11, &sigma, n, n, &ab[ab_offset], ldab,
+ info);
+ }
+ }
+
+/* Call SSBTRD to reduce symmetric band matrix to tridiagonal form. */
+
+ inde = 1;
+ indwrk = inde + *n;
+ ssbtrd_(jobz, uplo, n, kd, &ab[ab_offset], ldab, &w[1], &work[inde], &z__[
+ z_offset], ldz, &work[indwrk], &iinfo);
+
+/* For eigenvalues only, call SSTERF. For eigenvectors, call SSTEQR. */
+
+ if (! wantz) {
+ ssterf_(n, &w[1], &work[inde], info);
+ } else {
+ ssteqr_(jobz, n, &w[1], &work[inde], &z__[z_offset], ldz, &work[
+ indwrk], info);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *n;
+ } else {
+ imax = *info - 1;
+ }
+ r__1 = 1.f / sigma;
+ sscal_(&imax, &r__1, &w[1], &c__1);
+ }
+
+ return 0;
+
+/* End of SSBEV */
+
+} /* ssbev_ */
diff --git a/contrib/libs/clapack/ssbevd.c b/contrib/libs/clapack/ssbevd.c
new file mode 100644
index 0000000000..7d58eaf197
--- /dev/null
+++ b/contrib/libs/clapack/ssbevd.c
@@ -0,0 +1,332 @@
+/* ssbevd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b11 = 1.f;
+static real c_b18 = 0.f;
+static integer c__1 = 1;
+
+/* Subroutine */ int ssbevd_(char *jobz, char *uplo, integer *n, integer *kd,
+ real *ab, integer *ldab, real *w, real *z__, integer *ldz, real *work,
+ integer *lwork, integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, z_dim1, z_offset, i__1;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real eps;
+ integer inde;
+ real anrm, rmin, rmax, sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ sgemm_(char *, char *, integer *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *);
+ integer lwmin;
+ logical lower, wantz;
+ integer indwk2, llwrk2, iscale;
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ extern doublereal slansb_(char *, char *, integer *, integer *, real *,
+ integer *, real *);
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *), sstedc_(char *, integer *, real *, real *, real *,
+ integer *, real *, integer *, integer *, integer *, integer *), slacpy_(char *, integer *, integer *, real *, integer *,
+ real *, integer *);
+ integer indwrk, liwmin;
+ extern /* Subroutine */ int ssbtrd_(char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, real *, integer *, real *,
+ integer *), ssterf_(integer *, real *, real *,
+ integer *);
+ real smlnum;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSBEVD computes all the eigenvalues and, optionally, eigenvectors of */
+/* a real symmetric band matrix A. If eigenvectors are desired, it uses */
+/* a divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) REAL array, dimension (LDAB, N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, AB is overwritten by values generated during the */
+/* reduction to tridiagonal form. If UPLO = 'U', the first */
+/* superdiagonal and the diagonal of the tridiagonal matrix T */
+/* are returned in rows KD and KD+1 of AB, and if UPLO = 'L', */
+/* the diagonal and first subdiagonal of T are returned in the */
+/* first two rows of AB. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD + 1. */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) REAL array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the orthonormal */
+/* eigenvectors of the matrix A, with the i-th column of Z */
+/* holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace/output) REAL array, */
+/* dimension (LWORK) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* IF N <= 1, LWORK must be at least 1. */
+/* If JOBZ = 'N' and N > 2, LWORK must be at least 2*N. */
+/* If JOBZ = 'V' and N > 2, LWORK must be at least */
+/* ( 1 + 5*N + 2*N**2 ). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal sizes of the WORK and IWORK */
+/* arrays, returns these values as the first entries of the WORK */
+/* and IWORK arrays, and no error message related to LWORK or */
+/* LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array LIWORK. */
+/* If JOBZ = 'N' or N <= 1, LIWORK must be at least 1. */
+/* If JOBZ = 'V' and N > 2, LIWORK must be at least 3 + 5*N. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK and IWORK arrays, and no error message related to */
+/* LWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the algorithm failed to converge; i */
+/* off-diagonal elements of an intermediate tridiagonal */
+/* form did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lower = lsame_(uplo, "L");
+ lquery = *lwork == -1 || *liwork == -1;
+
+ *info = 0;
+ if (*n <= 1) {
+ liwmin = 1;
+ lwmin = 1;
+ } else {
+ if (wantz) {
+ liwmin = *n * 5 + 3;
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = *n * 5 + 1 + (i__1 * i__1 << 1);
+ } else {
+ liwmin = 1;
+ lwmin = *n << 1;
+ }
+ }
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*kd < 0) {
+ *info = -4;
+ } else if (*ldab < *kd + 1) {
+ *info = -6;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+ work[1] = (real) lwmin;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -11;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSBEVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ w[1] = ab[ab_dim1 + 1];
+ if (wantz) {
+ z__[z_dim1 + 1] = 1.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = slansb_("M", uplo, n, kd, &ab[ab_offset], ldab, &work[1]);
+ iscale = 0;
+ if (anrm > 0.f && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ if (lower) {
+ slascl_("B", kd, kd, &c_b11, &sigma, n, n, &ab[ab_offset], ldab,
+ info);
+ } else {
+ slascl_("Q", kd, kd, &c_b11, &sigma, n, n, &ab[ab_offset], ldab,
+ info);
+ }
+ }
+
+/* Call SSBTRD to reduce symmetric band matrix to tridiagonal form. */
+
+ inde = 1;
+ indwrk = inde + *n;
+ indwk2 = indwrk + *n * *n;
+ llwrk2 = *lwork - indwk2 + 1;
+ ssbtrd_(jobz, uplo, n, kd, &ab[ab_offset], ldab, &w[1], &work[inde], &z__[
+ z_offset], ldz, &work[indwrk], &iinfo);
+
+/* For eigenvalues only, call SSTERF. For eigenvectors, call SSTEDC. */
+
+ if (! wantz) {
+ ssterf_(n, &w[1], &work[inde], info);
+ } else {
+ sstedc_("I", n, &w[1], &work[inde], &work[indwrk], n, &work[indwk2], &
+ llwrk2, &iwork[1], liwork, info);
+ sgemm_("N", "N", n, n, n, &c_b11, &z__[z_offset], ldz, &work[indwrk],
+ n, &c_b18, &work[indwk2], n);
+ slacpy_("A", n, n, &work[indwk2], n, &z__[z_offset], ldz);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ r__1 = 1.f / sigma;
+ sscal_(n, &r__1, &w[1], &c__1);
+ }
+
+ work[1] = (real) lwmin;
+ iwork[1] = liwmin;
+ return 0;
+
+/* End of SSBEVD */
+
+} /* ssbevd_ */
diff --git a/contrib/libs/clapack/ssbevx.c b/contrib/libs/clapack/ssbevx.c
new file mode 100644
index 0000000000..d9d1ce4234
--- /dev/null
+++ b/contrib/libs/clapack/ssbevx.c
@@ -0,0 +1,513 @@
+/* ssbevx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b14 = 1.f;
+static integer c__1 = 1;
+static real c_b34 = 0.f;
+
+/* Subroutine */ int ssbevx_(char *jobz, char *range, char *uplo, integer *n,
+ integer *kd, real *ab, integer *ldab, real *q, integer *ldq, real *vl,
+ real *vu, integer *il, integer *iu, real *abstol, integer *m, real *
+ w, real *z__, integer *ldz, real *work, integer *iwork, integer *
+ ifail, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, q_dim1, q_offset, z_dim1, z_offset, i__1,
+ i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, jj;
+ real eps, vll, vuu, tmp1;
+ integer indd, inde;
+ real anrm;
+ integer imax;
+ real rmin, rmax;
+ logical test;
+ integer itmp1, indee;
+ real sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ char order[1];
+ extern /* Subroutine */ int sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *);
+ logical lower;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), sswap_(integer *, real *, integer *, real *, integer *
+);
+ logical wantz, alleig, indeig;
+ integer iscale, indibl;
+ logical valeig;
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real abstll, bignum;
+ extern doublereal slansb_(char *, char *, integer *, integer *, real *,
+ integer *, real *);
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ integer indisp, indiwo;
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *);
+ integer indwrk;
+ extern /* Subroutine */ int ssbtrd_(char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, real *, integer *, real *,
+ integer *), sstein_(integer *, real *, real *,
+ integer *, real *, integer *, integer *, real *, integer *, real *
+, integer *, integer *, integer *), ssterf_(integer *, real *,
+ real *, integer *);
+ integer nsplit;
+ real smlnum;
+ extern /* Subroutine */ int sstebz_(char *, char *, integer *, real *,
+ real *, integer *, integer *, real *, real *, real *, integer *,
+ integer *, real *, integer *, integer *, real *, integer *,
+ integer *), ssteqr_(char *, integer *, real *,
+ real *, real *, integer *, real *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSBEVX computes selected eigenvalues and, optionally, eigenvectors */
+/* of a real symmetric band matrix A. Eigenvalues and eigenvectors can */
+/* be selected by specifying either a range of values or a range of */
+/* indices for the desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found; */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found; */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) REAL array, dimension (LDAB, N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, AB is overwritten by values generated during the */
+/* reduction to tridiagonal form. If UPLO = 'U', the first */
+/* superdiagonal and the diagonal of the tridiagonal matrix T */
+/* are returned in rows KD and KD+1 of AB, and if UPLO = 'L', */
+/* the diagonal and first subdiagonal of T are returned in the */
+/* first two rows of AB. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD + 1. */
+
+/* Q (output) REAL array, dimension (LDQ, N) */
+/* If JOBZ = 'V', the N-by-N orthogonal matrix used in the */
+/* reduction to tridiagonal form. */
+/* If JOBZ = 'N', the array Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. If JOBZ = 'V', then */
+/* LDQ >= max(1,N). */
+
+/* VL (input) REAL */
+/* VU (input) REAL */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) REAL */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing AB to tridiagonal form. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*SLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*SLAMCH('S'). */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices */
+/* with Guaranteed High Relative Accuracy," by Demmel and */
+/* Kahan, LAPACK Working Note #3. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) REAL array, dimension (N) */
+/* The first M elements contain the selected eigenvalues in */
+/* ascending order. */
+
+/* Z (output) REAL array, dimension (LDZ, max(1,M)) */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If an eigenvector fails to converge, then that column of Z */
+/* contains the latest approximation to the eigenvector, and the */
+/* index of the eigenvector is returned in IFAIL. */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) REAL array, dimension (7*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+
+/* IFAIL (output) INTEGER array, dimension (N) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvectors that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, then i eigenvectors failed to converge. */
+/* Their indices are stored in array IFAIL. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+ lower = lsame_(uplo, "L");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*kd < 0) {
+ *info = -5;
+ } else if (*ldab < *kd + 1) {
+ *info = -7;
+ } else if (wantz && *ldq < max(1,*n)) {
+ *info = -9;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -11;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -12;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -13;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -18;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSBEVX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ *m = 1;
+ if (lower) {
+ tmp1 = ab[ab_dim1 + 1];
+ } else {
+ tmp1 = ab[*kd + 1 + ab_dim1];
+ }
+ if (valeig) {
+ if (! (*vl < tmp1 && *vu >= tmp1)) {
+ *m = 0;
+ }
+ }
+ if (*m == 1) {
+ w[1] = tmp1;
+ if (wantz) {
+ z__[z_dim1 + 1] = 1.f;
+ }
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+/* Computing MIN */
+ r__1 = sqrt(bignum), r__2 = 1.f / sqrt(sqrt(safmin));
+ rmax = dmin(r__1,r__2);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ iscale = 0;
+ abstll = *abstol;
+ if (valeig) {
+ vll = *vl;
+ vuu = *vu;
+ } else {
+ vll = 0.f;
+ vuu = 0.f;
+ }
+ anrm = slansb_("M", uplo, n, kd, &ab[ab_offset], ldab, &work[1]);
+ if (anrm > 0.f && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ if (lower) {
+ slascl_("B", kd, kd, &c_b14, &sigma, n, n, &ab[ab_offset], ldab,
+ info);
+ } else {
+ slascl_("Q", kd, kd, &c_b14, &sigma, n, n, &ab[ab_offset], ldab,
+ info);
+ }
+ if (*abstol > 0.f) {
+ abstll = *abstol * sigma;
+ }
+ if (valeig) {
+ vll = *vl * sigma;
+ vuu = *vu * sigma;
+ }
+ }
+
+/* Call SSBTRD to reduce symmetric band matrix to tridiagonal form. */
+
+ indd = 1;
+ inde = indd + *n;
+ indwrk = inde + *n;
+ ssbtrd_(jobz, uplo, n, kd, &ab[ab_offset], ldab, &work[indd], &work[inde],
+ &q[q_offset], ldq, &work[indwrk], &iinfo);
+
+/* If all eigenvalues are desired and ABSTOL is less than or equal */
+/* to zero, then call SSTERF or SSTEQR. If this fails for some */
+/* eigenvalue, then try SSTEBZ. */
+
+ test = FALSE_;
+ if (indeig) {
+ if (*il == 1 && *iu == *n) {
+ test = TRUE_;
+ }
+ }
+ if ((alleig || test) && *abstol <= 0.f) {
+ scopy_(n, &work[indd], &c__1, &w[1], &c__1);
+ indee = indwrk + (*n << 1);
+ if (! wantz) {
+ i__1 = *n - 1;
+ scopy_(&i__1, &work[inde], &c__1, &work[indee], &c__1);
+ ssterf_(n, &w[1], &work[indee], info);
+ } else {
+ slacpy_("A", n, n, &q[q_offset], ldq, &z__[z_offset], ldz);
+ i__1 = *n - 1;
+ scopy_(&i__1, &work[inde], &c__1, &work[indee], &c__1);
+ ssteqr_(jobz, n, &w[1], &work[indee], &z__[z_offset], ldz, &work[
+ indwrk], info);
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ifail[i__] = 0;
+/* L10: */
+ }
+ }
+ }
+ if (*info == 0) {
+ *m = *n;
+ goto L30;
+ }
+ *info = 0;
+ }
+
+/* Otherwise, call SSTEBZ and, if eigenvectors are desired, SSTEIN. */
+
+ if (wantz) {
+ *(unsigned char *)order = 'B';
+ } else {
+ *(unsigned char *)order = 'E';
+ }
+ indibl = 1;
+ indisp = indibl + *n;
+ indiwo = indisp + *n;
+ sstebz_(range, order, n, &vll, &vuu, il, iu, &abstll, &work[indd], &work[
+ inde], m, &nsplit, &w[1], &iwork[indibl], &iwork[indisp], &work[
+ indwrk], &iwork[indiwo], info);
+
+ if (wantz) {
+ sstein_(n, &work[indd], &work[inde], m, &w[1], &iwork[indibl], &iwork[
+ indisp], &z__[z_offset], ldz, &work[indwrk], &iwork[indiwo], &
+ ifail[1], info);
+
+/* Apply orthogonal matrix used in reduction to tridiagonal */
+/* form to eigenvectors returned by SSTEIN. */
+
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ scopy_(n, &z__[j * z_dim1 + 1], &c__1, &work[1], &c__1);
+ sgemv_("N", n, n, &c_b14, &q[q_offset], ldq, &work[1], &c__1, &
+ c_b34, &z__[j * z_dim1 + 1], &c__1);
+/* L20: */
+ }
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+L30:
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *m;
+ } else {
+ imax = *info - 1;
+ }
+ r__1 = 1.f / sigma;
+ sscal_(&imax, &r__1, &w[1], &c__1);
+ }
+
+/* If eigenvalues are not in order, then sort them, along with */
+/* eigenvectors. */
+
+ if (wantz) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp1 = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp1) {
+ i__ = jj;
+ tmp1 = w[jj];
+ }
+/* L40: */
+ }
+
+ if (i__ != 0) {
+ itmp1 = iwork[indibl + i__ - 1];
+ w[i__] = w[j];
+ iwork[indibl + i__ - 1] = iwork[indibl + j - 1];
+ w[j] = tmp1;
+ iwork[indibl + j - 1] = itmp1;
+ sswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1],
+ &c__1);
+ if (*info != 0) {
+ itmp1 = ifail[i__];
+ ifail[i__] = ifail[j];
+ ifail[j] = itmp1;
+ }
+ }
+/* L50: */
+ }
+ }
+
+ return 0;
+
+/* End of SSBEVX */
+
+} /* ssbevx_ */
diff --git a/contrib/libs/clapack/ssbgst.c b/contrib/libs/clapack/ssbgst.c
new file mode 100644
index 0000000000..1698683ef4
--- /dev/null
+++ b/contrib/libs/clapack/ssbgst.c
@@ -0,0 +1,1752 @@
+/* ssbgst.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b8 = 0.f;
+static real c_b9 = 1.f;
+static integer c__1 = 1;
+static real c_b20 = -1.f;
+
+/* Subroutine */ int ssbgst_(char *vect, char *uplo, integer *n, integer *ka,
+ integer *kb, real *ab, integer *ldab, real *bb, integer *ldbb, real *
+ x, integer *ldx, real *work, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, bb_dim1, bb_offset, x_dim1, x_offset, i__1,
+ i__2, i__3, i__4;
+ real r__1;
+
+ /* Local variables */
+ integer i__, j, k, l, m;
+ real t;
+ integer i0, i1, i2, j1, j2;
+ real ra;
+ integer nr, nx, ka1, kb1;
+ real ra1;
+ integer j1t, j2t;
+ real bii;
+ integer kbt, nrt, inca;
+ extern /* Subroutine */ int sger_(integer *, integer *, real *, real *,
+ integer *, real *, integer *, real *, integer *), srot_(integer *,
+ real *, integer *, real *, integer *, real *, real *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ logical upper, wantx;
+ extern /* Subroutine */ int slar2v_(integer *, real *, real *, real *,
+ integer *, real *, real *, integer *), xerbla_(char *, integer *);
+ logical update;
+ extern /* Subroutine */ int slaset_(char *, integer *, integer *, real *,
+ real *, real *, integer *), slartg_(real *, real *, real *
+, real *, real *), slargv_(integer *, real *, integer *, real *,
+ integer *, real *, integer *), slartv_(integer *, real *, integer
+ *, real *, integer *, real *, real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSBGST reduces a real symmetric-definite banded generalized */
+/* eigenproblem A*x = lambda*B*x to standard form C*y = lambda*y, */
+/* such that C has the same bandwidth as A. */
+
+/* B must have been previously factorized as S**T*S by SPBSTF, using a */
+/* split Cholesky factorization. A is overwritten by C = X**T*A*X, where */
+/* X = S**(-1)*Q and Q is an orthogonal matrix chosen to preserve the */
+/* bandwidth of A. */
+
+/* Arguments */
+/* ========= */
+
+/* VECT (input) CHARACTER*1 */
+/* = 'N': do not form the transformation matrix X; */
+/* = 'V': form X. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* KA (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KA >= 0. */
+
+/* KB (input) INTEGER */
+/* The number of superdiagonals of the matrix B if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KA >= KB >= 0. */
+
+/* AB (input/output) REAL array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first ka+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(ka+1+i-j,j) = A(i,j) for max(1,j-ka)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+ka). */
+
+/* On exit, the transformed matrix X**T*A*X, stored in the same */
+/* format as A. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KA+1. */
+
+/* BB (input) REAL array, dimension (LDBB,N) */
+/* The banded factor S from the split Cholesky factorization of */
+/* B, as returned by SPBSTF, stored in the first KB+1 rows of */
+/* the array. */
+
+/* LDBB (input) INTEGER */
+/* The leading dimension of the array BB. LDBB >= KB+1. */
+
+/* X (output) REAL array, dimension (LDX,N) */
+/* If VECT = 'V', the n-by-n matrix X. */
+/* If VECT = 'N', the array X is not referenced. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. */
+/* LDX >= max(1,N) if VECT = 'V'; LDX >= 1 otherwise. */
+
+/* WORK (workspace) REAL array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ bb_dim1 = *ldbb;
+ bb_offset = 1 + bb_dim1;
+ bb -= bb_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --work;
+
+ /* Function Body */
+ wantx = lsame_(vect, "V");
+ upper = lsame_(uplo, "U");
+ ka1 = *ka + 1;
+ kb1 = *kb + 1;
+ *info = 0;
+ if (! wantx && ! lsame_(vect, "N")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ka < 0) {
+ *info = -4;
+ } else if (*kb < 0 || *kb > *ka) {
+ *info = -5;
+ } else if (*ldab < *ka + 1) {
+ *info = -7;
+ } else if (*ldbb < *kb + 1) {
+ *info = -9;
+ } else if (*ldx < 1 || wantx && *ldx < max(1,*n)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSBGST", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ inca = *ldab * ka1;
+
+/* Initialize X to the unit matrix, if needed */
+
+ if (wantx) {
+ slaset_("Full", n, n, &c_b8, &c_b9, &x[x_offset], ldx);
+ }
+
+/* Set M to the splitting point m. It must be the same value as is */
+/* used in SPBSTF. The chosen value allows the arrays WORK and RWORK */
+/* to be of dimension (N). */
+
+ m = (*n + *kb) / 2;
+
+/* The routine works in two phases, corresponding to the two halves */
+/* of the split Cholesky factorization of B as S**T*S where */
+
+/* S = ( U ) */
+/* ( M L ) */
+
+/* with U upper triangular of order m, and L lower triangular of */
+/* order n-m. S has the same bandwidth as B. */
+
+/* S is treated as a product of elementary matrices: */
+
+/* S = S(m)*S(m-1)*...*S(2)*S(1)*S(m+1)*S(m+2)*...*S(n-1)*S(n) */
+
+/* where S(i) is determined by the i-th row of S. */
+
+/* In phase 1, the index i takes the values n, n-1, ... , m+1; */
+/* in phase 2, it takes the values 1, 2, ... , m. */
+
+/* For each value of i, the current matrix A is updated by forming */
+/* inv(S(i))**T*A*inv(S(i)). This creates a triangular bulge outside */
+/* the band of A. The bulge is then pushed down toward the bottom of */
+/* A in phase 1, and up toward the top of A in phase 2, by applying */
+/* plane rotations. */
+
+/* There are kb*(kb+1)/2 elements in the bulge, but at most 2*kb-1 */
+/* of them are linearly independent, so annihilating a bulge requires */
+/* only 2*kb-1 plane rotations. The rotations are divided into a 1st */
+/* set of kb-1 rotations, and a 2nd set of kb rotations. */
+
+/* Wherever possible, rotations are generated and applied in vector */
+/* operations of length NR between the indices J1 and J2 (sometimes */
+/* replaced by modified values NRT, J1T or J2T). */
+
+/* The cosines and sines of the rotations are stored in the array */
+/* WORK. The cosines of the 1st set of rotations are stored in */
+/* elements n+2:n+m-kb-1 and the sines of the 1st set in elements */
+/* 2:m-kb-1; the cosines of the 2nd set are stored in elements */
+/* n+m-kb+1:2*n and the sines of the second set in elements m-kb+1:n. */
+
+/* The bulges are not formed explicitly; nonzero elements outside the */
+/* band are created only when they are required for generating new */
+/* rotations; they are stored in the array WORK, in positions where */
+/* they are later overwritten by the sines of the rotations which */
+/* annihilate them. */
+
+/* **************************** Phase 1 ***************************** */
+
+/* The logical structure of this phase is: */
+
+/* UPDATE = .TRUE. */
+/* DO I = N, M + 1, -1 */
+/* use S(i) to update A and create a new bulge */
+/* apply rotations to push all bulges KA positions downward */
+/* END DO */
+/* UPDATE = .FALSE. */
+/* DO I = M + KA + 1, N - 1 */
+/* apply rotations to push all bulges KA positions downward */
+/* END DO */
+
+/* To avoid duplicating code, the two loops are merged. */
+
+ update = TRUE_;
+ i__ = *n + 1;
+L10:
+ if (update) {
+ --i__;
+/* Computing MIN */
+ i__1 = *kb, i__2 = i__ - 1;
+ kbt = min(i__1,i__2);
+ i0 = i__ - 1;
+/* Computing MIN */
+ i__1 = *n, i__2 = i__ + *ka;
+ i1 = min(i__1,i__2);
+ i2 = i__ - kbt + ka1;
+ if (i__ < m + 1) {
+ update = FALSE_;
+ ++i__;
+ i0 = m;
+ if (*ka == 0) {
+ goto L480;
+ }
+ goto L10;
+ }
+ } else {
+ i__ += *ka;
+ if (i__ > *n - 1) {
+ goto L480;
+ }
+ }
+
+ if (upper) {
+
+/* Transform A, working with the upper triangle */
+
+ if (update) {
+
+/* Form inv(S(i))**T * A * inv(S(i)) */
+
+ bii = bb[kb1 + i__ * bb_dim1];
+ i__1 = i1;
+ for (j = i__; j <= i__1; ++j) {
+ ab[i__ - j + ka1 + j * ab_dim1] /= bii;
+/* L20: */
+ }
+/* Computing MAX */
+ i__1 = 1, i__2 = i__ - *ka;
+ i__3 = i__;
+ for (j = max(i__1,i__2); j <= i__3; ++j) {
+ ab[j - i__ + ka1 + i__ * ab_dim1] /= bii;
+/* L30: */
+ }
+ i__3 = i__ - 1;
+ for (k = i__ - kbt; k <= i__3; ++k) {
+ i__1 = k;
+ for (j = i__ - kbt; j <= i__1; ++j) {
+ ab[j - k + ka1 + k * ab_dim1] = ab[j - k + ka1 + k *
+ ab_dim1] - bb[j - i__ + kb1 + i__ * bb_dim1] * ab[
+ k - i__ + ka1 + i__ * ab_dim1] - bb[k - i__ + kb1
+ + i__ * bb_dim1] * ab[j - i__ + ka1 + i__ *
+ ab_dim1] + ab[ka1 + i__ * ab_dim1] * bb[j - i__ +
+ kb1 + i__ * bb_dim1] * bb[k - i__ + kb1 + i__ *
+ bb_dim1];
+/* L40: */
+ }
+/* Computing MAX */
+ i__1 = 1, i__2 = i__ - *ka;
+ i__4 = i__ - kbt - 1;
+ for (j = max(i__1,i__2); j <= i__4; ++j) {
+ ab[j - k + ka1 + k * ab_dim1] -= bb[k - i__ + kb1 + i__ *
+ bb_dim1] * ab[j - i__ + ka1 + i__ * ab_dim1];
+/* L50: */
+ }
+/* L60: */
+ }
+ i__3 = i1;
+ for (j = i__; j <= i__3; ++j) {
+/* Computing MAX */
+ i__4 = j - *ka, i__1 = i__ - kbt;
+ i__2 = i__ - 1;
+ for (k = max(i__4,i__1); k <= i__2; ++k) {
+ ab[k - j + ka1 + j * ab_dim1] -= bb[k - i__ + kb1 + i__ *
+ bb_dim1] * ab[i__ - j + ka1 + j * ab_dim1];
+/* L70: */
+ }
+/* L80: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by inv(S(i)) */
+
+ i__3 = *n - m;
+ r__1 = 1.f / bii;
+ sscal_(&i__3, &r__1, &x[m + 1 + i__ * x_dim1], &c__1);
+ if (kbt > 0) {
+ i__3 = *n - m;
+ sger_(&i__3, &kbt, &c_b20, &x[m + 1 + i__ * x_dim1], &
+ c__1, &bb[kb1 - kbt + i__ * bb_dim1], &c__1, &x[m
+ + 1 + (i__ - kbt) * x_dim1], ldx);
+ }
+ }
+
+/* store a(i,i1) in RA1 for use in next loop over K */
+
+ ra1 = ab[i__ - i1 + ka1 + i1 * ab_dim1];
+ }
+
+/* Generate and apply vectors of rotations to chase all the */
+/* existing bulges KA positions down toward the bottom of the */
+/* band */
+
+ i__3 = *kb - 1;
+ for (k = 1; k <= i__3; ++k) {
+ if (update) {
+
+/* Determine the rotations which would annihilate the bulge */
+/* which has in theory just been created */
+
+ if (i__ - k + *ka < *n && i__ - k > 1) {
+
+/* generate rotation to annihilate a(i,i-k+ka+1) */
+
+ slartg_(&ab[k + 1 + (i__ - k + *ka) * ab_dim1], &ra1, &
+ work[*n + i__ - k + *ka - m], &work[i__ - k + *ka
+ - m], &ra);
+
+/* create nonzero element a(i-k,i-k+ka+1) outside the */
+/* band and store it in WORK(i-k) */
+
+ t = -bb[kb1 - k + i__ * bb_dim1] * ra1;
+ work[i__ - k] = work[*n + i__ - k + *ka - m] * t - work[
+ i__ - k + *ka - m] * ab[(i__ - k + *ka) * ab_dim1
+ + 1];
+ ab[(i__ - k + *ka) * ab_dim1 + 1] = work[i__ - k + *ka -
+ m] * t + work[*n + i__ - k + *ka - m] * ab[(i__ -
+ k + *ka) * ab_dim1 + 1];
+ ra1 = ra;
+ }
+ }
+/* Computing MAX */
+ i__2 = 1, i__4 = k - i0 + 2;
+ j2 = i__ - k - 1 + max(i__2,i__4) * ka1;
+ nr = (*n - j2 + *ka) / ka1;
+ j1 = j2 + (nr - 1) * ka1;
+ if (update) {
+/* Computing MAX */
+ i__2 = j2, i__4 = i__ + (*ka << 1) - k + 1;
+ j2t = max(i__2,i__4);
+ } else {
+ j2t = j2;
+ }
+ nrt = (*n - j2t + *ka) / ka1;
+ i__2 = j1;
+ i__4 = ka1;
+ for (j = j2t; i__4 < 0 ? j >= i__2 : j <= i__2; j += i__4) {
+
+/* create nonzero element a(j-ka,j+1) outside the band */
+/* and store it in WORK(j-m) */
+
+ work[j - m] *= ab[(j + 1) * ab_dim1 + 1];
+ ab[(j + 1) * ab_dim1 + 1] = work[*n + j - m] * ab[(j + 1) *
+ ab_dim1 + 1];
+/* L90: */
+ }
+
+/* generate rotations in 1st set to annihilate elements which */
+/* have been created outside the band */
+
+ if (nrt > 0) {
+ slargv_(&nrt, &ab[j2t * ab_dim1 + 1], &inca, &work[j2t - m], &
+ ka1, &work[*n + j2t - m], &ka1);
+ }
+ if (nr > 0) {
+
+/* apply rotations in 1st set from the right */
+
+ i__4 = *ka - 1;
+ for (l = 1; l <= i__4; ++l) {
+ slartv_(&nr, &ab[ka1 - l + j2 * ab_dim1], &inca, &ab[*ka
+ - l + (j2 + 1) * ab_dim1], &inca, &work[*n + j2 -
+ m], &work[j2 - m], &ka1);
+/* L100: */
+ }
+
+/* apply rotations in 1st set from both sides to diagonal */
+/* blocks */
+
+ slar2v_(&nr, &ab[ka1 + j2 * ab_dim1], &ab[ka1 + (j2 + 1) *
+ ab_dim1], &ab[*ka + (j2 + 1) * ab_dim1], &inca, &work[
+ *n + j2 - m], &work[j2 - m], &ka1);
+
+ }
+
+/* start applying rotations in 1st set from the left */
+
+ i__4 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__4; --l) {
+ nrt = (*n - j2 + l) / ka1;
+ if (nrt > 0) {
+ slartv_(&nrt, &ab[l + (j2 + ka1 - l) * ab_dim1], &inca, &
+ ab[l + 1 + (j2 + ka1 - l) * ab_dim1], &inca, &
+ work[*n + j2 - m], &work[j2 - m], &ka1);
+ }
+/* L110: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 1st set */
+
+ i__4 = j1;
+ i__2 = ka1;
+ for (j = j2; i__2 < 0 ? j >= i__4 : j <= i__4; j += i__2) {
+ i__1 = *n - m;
+ srot_(&i__1, &x[m + 1 + j * x_dim1], &c__1, &x[m + 1 + (j
+ + 1) * x_dim1], &c__1, &work[*n + j - m], &work[j
+ - m]);
+/* L120: */
+ }
+ }
+/* L130: */
+ }
+
+ if (update) {
+ if (i2 <= *n && kbt > 0) {
+
+/* create nonzero element a(i-kbt,i-kbt+ka+1) outside the */
+/* band and store it in WORK(i-kbt) */
+
+ work[i__ - kbt] = -bb[kb1 - kbt + i__ * bb_dim1] * ra1;
+ }
+ }
+
+ for (k = *kb; k >= 1; --k) {
+ if (update) {
+/* Computing MAX */
+ i__3 = 2, i__2 = k - i0 + 1;
+ j2 = i__ - k - 1 + max(i__3,i__2) * ka1;
+ } else {
+/* Computing MAX */
+ i__3 = 1, i__2 = k - i0 + 1;
+ j2 = i__ - k - 1 + max(i__3,i__2) * ka1;
+ }
+
+/* finish applying rotations in 2nd set from the left */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (*n - j2 + *ka + l) / ka1;
+ if (nrt > 0) {
+ slartv_(&nrt, &ab[l + (j2 - l + 1) * ab_dim1], &inca, &ab[
+ l + 1 + (j2 - l + 1) * ab_dim1], &inca, &work[*n
+ + j2 - *ka], &work[j2 - *ka], &ka1);
+ }
+/* L140: */
+ }
+ nr = (*n - j2 + *ka) / ka1;
+ j1 = j2 + (nr - 1) * ka1;
+ i__3 = j2;
+ i__2 = -ka1;
+ for (j = j1; i__2 < 0 ? j >= i__3 : j <= i__3; j += i__2) {
+ work[j] = work[j - *ka];
+ work[*n + j] = work[*n + j - *ka];
+/* L150: */
+ }
+ i__2 = j1;
+ i__3 = ka1;
+ for (j = j2; i__3 < 0 ? j >= i__2 : j <= i__2; j += i__3) {
+
+/* create nonzero element a(j-ka,j+1) outside the band */
+/* and store it in WORK(j) */
+
+ work[j] *= ab[(j + 1) * ab_dim1 + 1];
+ ab[(j + 1) * ab_dim1 + 1] = work[*n + j] * ab[(j + 1) *
+ ab_dim1 + 1];
+/* L160: */
+ }
+ if (update) {
+ if (i__ - k < *n - *ka && k <= kbt) {
+ work[i__ - k + *ka] = work[i__ - k];
+ }
+ }
+/* L170: */
+ }
+
+ for (k = *kb; k >= 1; --k) {
+/* Computing MAX */
+ i__3 = 1, i__2 = k - i0 + 1;
+ j2 = i__ - k - 1 + max(i__3,i__2) * ka1;
+ nr = (*n - j2 + *ka) / ka1;
+ j1 = j2 + (nr - 1) * ka1;
+ if (nr > 0) {
+
+/* generate rotations in 2nd set to annihilate elements */
+/* which have been created outside the band */
+
+ slargv_(&nr, &ab[j2 * ab_dim1 + 1], &inca, &work[j2], &ka1, &
+ work[*n + j2], &ka1);
+
+/* apply rotations in 2nd set from the right */
+
+ i__3 = *ka - 1;
+ for (l = 1; l <= i__3; ++l) {
+ slartv_(&nr, &ab[ka1 - l + j2 * ab_dim1], &inca, &ab[*ka
+ - l + (j2 + 1) * ab_dim1], &inca, &work[*n + j2],
+ &work[j2], &ka1);
+/* L180: */
+ }
+
+/* apply rotations in 2nd set from both sides to diagonal */
+/* blocks */
+
+ slar2v_(&nr, &ab[ka1 + j2 * ab_dim1], &ab[ka1 + (j2 + 1) *
+ ab_dim1], &ab[*ka + (j2 + 1) * ab_dim1], &inca, &work[
+ *n + j2], &work[j2], &ka1);
+
+ }
+
+/* start applying rotations in 2nd set from the left */
+
+ i__3 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__3; --l) {
+ nrt = (*n - j2 + l) / ka1;
+ if (nrt > 0) {
+ slartv_(&nrt, &ab[l + (j2 + ka1 - l) * ab_dim1], &inca, &
+ ab[l + 1 + (j2 + ka1 - l) * ab_dim1], &inca, &
+ work[*n + j2], &work[j2], &ka1);
+ }
+/* L190: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 2nd set */
+
+ i__3 = j1;
+ i__2 = ka1;
+ for (j = j2; i__2 < 0 ? j >= i__3 : j <= i__3; j += i__2) {
+ i__4 = *n - m;
+ srot_(&i__4, &x[m + 1 + j * x_dim1], &c__1, &x[m + 1 + (j
+ + 1) * x_dim1], &c__1, &work[*n + j], &work[j]);
+/* L200: */
+ }
+ }
+/* L210: */
+ }
+
+ i__2 = *kb - 1;
+ for (k = 1; k <= i__2; ++k) {
+/* Computing MAX */
+ i__3 = 1, i__4 = k - i0 + 2;
+ j2 = i__ - k - 1 + max(i__3,i__4) * ka1;
+
+/* finish applying rotations in 1st set from the left */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (*n - j2 + l) / ka1;
+ if (nrt > 0) {
+ slartv_(&nrt, &ab[l + (j2 + ka1 - l) * ab_dim1], &inca, &
+ ab[l + 1 + (j2 + ka1 - l) * ab_dim1], &inca, &
+ work[*n + j2 - m], &work[j2 - m], &ka1);
+ }
+/* L220: */
+ }
+/* L230: */
+ }
+
+ if (*kb > 1) {
+ i__2 = i__ - *kb + (*ka << 1) + 1;
+ for (j = *n - 1; j >= i__2; --j) {
+ work[*n + j - m] = work[*n + j - *ka - m];
+ work[j - m] = work[j - *ka - m];
+/* L240: */
+ }
+ }
+
+ } else {
+
+/* Transform A, working with the lower triangle */
+
+ if (update) {
+
+/* Form inv(S(i))**T * A * inv(S(i)) */
+
+ bii = bb[i__ * bb_dim1 + 1];
+ i__2 = i1;
+ for (j = i__; j <= i__2; ++j) {
+ ab[j - i__ + 1 + i__ * ab_dim1] /= bii;
+/* L250: */
+ }
+/* Computing MAX */
+ i__2 = 1, i__3 = i__ - *ka;
+ i__4 = i__;
+ for (j = max(i__2,i__3); j <= i__4; ++j) {
+ ab[i__ - j + 1 + j * ab_dim1] /= bii;
+/* L260: */
+ }
+ i__4 = i__ - 1;
+ for (k = i__ - kbt; k <= i__4; ++k) {
+ i__2 = k;
+ for (j = i__ - kbt; j <= i__2; ++j) {
+ ab[k - j + 1 + j * ab_dim1] = ab[k - j + 1 + j * ab_dim1]
+ - bb[i__ - j + 1 + j * bb_dim1] * ab[i__ - k + 1
+ + k * ab_dim1] - bb[i__ - k + 1 + k * bb_dim1] *
+ ab[i__ - j + 1 + j * ab_dim1] + ab[i__ * ab_dim1
+ + 1] * bb[i__ - j + 1 + j * bb_dim1] * bb[i__ - k
+ + 1 + k * bb_dim1];
+/* L270: */
+ }
+/* Computing MAX */
+ i__2 = 1, i__3 = i__ - *ka;
+ i__1 = i__ - kbt - 1;
+ for (j = max(i__2,i__3); j <= i__1; ++j) {
+ ab[k - j + 1 + j * ab_dim1] -= bb[i__ - k + 1 + k *
+ bb_dim1] * ab[i__ - j + 1 + j * ab_dim1];
+/* L280: */
+ }
+/* L290: */
+ }
+ i__4 = i1;
+ for (j = i__; j <= i__4; ++j) {
+/* Computing MAX */
+ i__1 = j - *ka, i__2 = i__ - kbt;
+ i__3 = i__ - 1;
+ for (k = max(i__1,i__2); k <= i__3; ++k) {
+ ab[j - k + 1 + k * ab_dim1] -= bb[i__ - k + 1 + k *
+ bb_dim1] * ab[j - i__ + 1 + i__ * ab_dim1];
+/* L300: */
+ }
+/* L310: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by inv(S(i)) */
+
+ i__4 = *n - m;
+ r__1 = 1.f / bii;
+ sscal_(&i__4, &r__1, &x[m + 1 + i__ * x_dim1], &c__1);
+ if (kbt > 0) {
+ i__4 = *n - m;
+ i__3 = *ldbb - 1;
+ sger_(&i__4, &kbt, &c_b20, &x[m + 1 + i__ * x_dim1], &
+ c__1, &bb[kbt + 1 + (i__ - kbt) * bb_dim1], &i__3,
+ &x[m + 1 + (i__ - kbt) * x_dim1], ldx);
+ }
+ }
+
+/* store a(i1,i) in RA1 for use in next loop over K */
+
+ ra1 = ab[i1 - i__ + 1 + i__ * ab_dim1];
+ }
+
+/* Generate and apply vectors of rotations to chase all the */
+/* existing bulges KA positions down toward the bottom of the */
+/* band */
+
+ i__4 = *kb - 1;
+ for (k = 1; k <= i__4; ++k) {
+ if (update) {
+
+/* Determine the rotations which would annihilate the bulge */
+/* which has in theory just been created */
+
+ if (i__ - k + *ka < *n && i__ - k > 1) {
+
+/* generate rotation to annihilate a(i-k+ka+1,i) */
+
+ slartg_(&ab[ka1 - k + i__ * ab_dim1], &ra1, &work[*n +
+ i__ - k + *ka - m], &work[i__ - k + *ka - m], &ra)
+ ;
+
+/* create nonzero element a(i-k+ka+1,i-k) outside the */
+/* band and store it in WORK(i-k) */
+
+ t = -bb[k + 1 + (i__ - k) * bb_dim1] * ra1;
+ work[i__ - k] = work[*n + i__ - k + *ka - m] * t - work[
+ i__ - k + *ka - m] * ab[ka1 + (i__ - k) * ab_dim1]
+ ;
+ ab[ka1 + (i__ - k) * ab_dim1] = work[i__ - k + *ka - m] *
+ t + work[*n + i__ - k + *ka - m] * ab[ka1 + (i__
+ - k) * ab_dim1];
+ ra1 = ra;
+ }
+ }
+/* Computing MAX */
+ i__3 = 1, i__1 = k - i0 + 2;
+ j2 = i__ - k - 1 + max(i__3,i__1) * ka1;
+ nr = (*n - j2 + *ka) / ka1;
+ j1 = j2 + (nr - 1) * ka1;
+ if (update) {
+/* Computing MAX */
+ i__3 = j2, i__1 = i__ + (*ka << 1) - k + 1;
+ j2t = max(i__3,i__1);
+ } else {
+ j2t = j2;
+ }
+ nrt = (*n - j2t + *ka) / ka1;
+ i__3 = j1;
+ i__1 = ka1;
+ for (j = j2t; i__1 < 0 ? j >= i__3 : j <= i__3; j += i__1) {
+
+/* create nonzero element a(j+1,j-ka) outside the band */
+/* and store it in WORK(j-m) */
+
+ work[j - m] *= ab[ka1 + (j - *ka + 1) * ab_dim1];
+ ab[ka1 + (j - *ka + 1) * ab_dim1] = work[*n + j - m] * ab[ka1
+ + (j - *ka + 1) * ab_dim1];
+/* L320: */
+ }
+
+/* generate rotations in 1st set to annihilate elements which */
+/* have been created outside the band */
+
+ if (nrt > 0) {
+ slargv_(&nrt, &ab[ka1 + (j2t - *ka) * ab_dim1], &inca, &work[
+ j2t - m], &ka1, &work[*n + j2t - m], &ka1);
+ }
+ if (nr > 0) {
+
+/* apply rotations in 1st set from the left */
+
+ i__1 = *ka - 1;
+ for (l = 1; l <= i__1; ++l) {
+ slartv_(&nr, &ab[l + 1 + (j2 - l) * ab_dim1], &inca, &ab[
+ l + 2 + (j2 - l) * ab_dim1], &inca, &work[*n + j2
+ - m], &work[j2 - m], &ka1);
+/* L330: */
+ }
+
+/* apply rotations in 1st set from both sides to diagonal */
+/* blocks */
+
+ slar2v_(&nr, &ab[j2 * ab_dim1 + 1], &ab[(j2 + 1) * ab_dim1 +
+ 1], &ab[j2 * ab_dim1 + 2], &inca, &work[*n + j2 - m],
+ &work[j2 - m], &ka1);
+
+ }
+
+/* start applying rotations in 1st set from the right */
+
+ i__1 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__1; --l) {
+ nrt = (*n - j2 + l) / ka1;
+ if (nrt > 0) {
+ slartv_(&nrt, &ab[ka1 - l + 1 + j2 * ab_dim1], &inca, &ab[
+ ka1 - l + (j2 + 1) * ab_dim1], &inca, &work[*n +
+ j2 - m], &work[j2 - m], &ka1);
+ }
+/* L340: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 1st set */
+
+ i__1 = j1;
+ i__3 = ka1;
+ for (j = j2; i__3 < 0 ? j >= i__1 : j <= i__1; j += i__3) {
+ i__2 = *n - m;
+ srot_(&i__2, &x[m + 1 + j * x_dim1], &c__1, &x[m + 1 + (j
+ + 1) * x_dim1], &c__1, &work[*n + j - m], &work[j
+ - m]);
+/* L350: */
+ }
+ }
+/* L360: */
+ }
+
+ if (update) {
+ if (i2 <= *n && kbt > 0) {
+
+/* create nonzero element a(i-kbt+ka+1,i-kbt) outside the */
+/* band and store it in WORK(i-kbt) */
+
+ work[i__ - kbt] = -bb[kbt + 1 + (i__ - kbt) * bb_dim1] * ra1;
+ }
+ }
+
+ for (k = *kb; k >= 1; --k) {
+ if (update) {
+/* Computing MAX */
+ i__4 = 2, i__3 = k - i0 + 1;
+ j2 = i__ - k - 1 + max(i__4,i__3) * ka1;
+ } else {
+/* Computing MAX */
+ i__4 = 1, i__3 = k - i0 + 1;
+ j2 = i__ - k - 1 + max(i__4,i__3) * ka1;
+ }
+
+/* finish applying rotations in 2nd set from the right */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (*n - j2 + *ka + l) / ka1;
+ if (nrt > 0) {
+ slartv_(&nrt, &ab[ka1 - l + 1 + (j2 - *ka) * ab_dim1], &
+ inca, &ab[ka1 - l + (j2 - *ka + 1) * ab_dim1], &
+ inca, &work[*n + j2 - *ka], &work[j2 - *ka], &ka1)
+ ;
+ }
+/* L370: */
+ }
+ nr = (*n - j2 + *ka) / ka1;
+ j1 = j2 + (nr - 1) * ka1;
+ i__4 = j2;
+ i__3 = -ka1;
+ for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) {
+ work[j] = work[j - *ka];
+ work[*n + j] = work[*n + j - *ka];
+/* L380: */
+ }
+ i__3 = j1;
+ i__4 = ka1;
+ for (j = j2; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {
+
+/* create nonzero element a(j+1,j-ka) outside the band */
+/* and store it in WORK(j) */
+
+ work[j] *= ab[ka1 + (j - *ka + 1) * ab_dim1];
+ ab[ka1 + (j - *ka + 1) * ab_dim1] = work[*n + j] * ab[ka1 + (
+ j - *ka + 1) * ab_dim1];
+/* L390: */
+ }
+ if (update) {
+ if (i__ - k < *n - *ka && k <= kbt) {
+ work[i__ - k + *ka] = work[i__ - k];
+ }
+ }
+/* L400: */
+ }
+
+ for (k = *kb; k >= 1; --k) {
+/* Computing MAX */
+ i__4 = 1, i__3 = k - i0 + 1;
+ j2 = i__ - k - 1 + max(i__4,i__3) * ka1;
+ nr = (*n - j2 + *ka) / ka1;
+ j1 = j2 + (nr - 1) * ka1;
+ if (nr > 0) {
+
+/* generate rotations in 2nd set to annihilate elements */
+/* which have been created outside the band */
+
+ slargv_(&nr, &ab[ka1 + (j2 - *ka) * ab_dim1], &inca, &work[j2]
+, &ka1, &work[*n + j2], &ka1);
+
+/* apply rotations in 2nd set from the left */
+
+ i__4 = *ka - 1;
+ for (l = 1; l <= i__4; ++l) {
+ slartv_(&nr, &ab[l + 1 + (j2 - l) * ab_dim1], &inca, &ab[
+ l + 2 + (j2 - l) * ab_dim1], &inca, &work[*n + j2]
+, &work[j2], &ka1);
+/* L410: */
+ }
+
+/* apply rotations in 2nd set from both sides to diagonal */
+/* blocks */
+
+ slar2v_(&nr, &ab[j2 * ab_dim1 + 1], &ab[(j2 + 1) * ab_dim1 +
+ 1], &ab[j2 * ab_dim1 + 2], &inca, &work[*n + j2], &
+ work[j2], &ka1);
+
+ }
+
+/* start applying rotations in 2nd set from the right */
+
+ i__4 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__4; --l) {
+ nrt = (*n - j2 + l) / ka1;
+ if (nrt > 0) {
+ slartv_(&nrt, &ab[ka1 - l + 1 + j2 * ab_dim1], &inca, &ab[
+ ka1 - l + (j2 + 1) * ab_dim1], &inca, &work[*n +
+ j2], &work[j2], &ka1);
+ }
+/* L420: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 2nd set */
+
+ i__4 = j1;
+ i__3 = ka1;
+ for (j = j2; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) {
+ i__1 = *n - m;
+ srot_(&i__1, &x[m + 1 + j * x_dim1], &c__1, &x[m + 1 + (j
+ + 1) * x_dim1], &c__1, &work[*n + j], &work[j]);
+/* L430: */
+ }
+ }
+/* L440: */
+ }
+
+ i__3 = *kb - 1;
+ for (k = 1; k <= i__3; ++k) {
+/* Computing MAX */
+ i__4 = 1, i__1 = k - i0 + 2;
+ j2 = i__ - k - 1 + max(i__4,i__1) * ka1;
+
+/* finish applying rotations in 1st set from the right */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (*n - j2 + l) / ka1;
+ if (nrt > 0) {
+ slartv_(&nrt, &ab[ka1 - l + 1 + j2 * ab_dim1], &inca, &ab[
+ ka1 - l + (j2 + 1) * ab_dim1], &inca, &work[*n +
+ j2 - m], &work[j2 - m], &ka1);
+ }
+/* L450: */
+ }
+/* L460: */
+ }
+
+ if (*kb > 1) {
+ i__3 = i__ - *kb + (*ka << 1) + 1;
+ for (j = *n - 1; j >= i__3; --j) {
+ work[*n + j - m] = work[*n + j - *ka - m];
+ work[j - m] = work[j - *ka - m];
+/* L470: */
+ }
+ }
+
+ }
+
+ goto L10;
+
+L480:
+
+/* **************************** Phase 2 ***************************** */
+
+/* The logical structure of this phase is: */
+
+/* UPDATE = .TRUE. */
+/* DO I = 1, M */
+/* use S(i) to update A and create a new bulge */
+/* apply rotations to push all bulges KA positions upward */
+/* END DO */
+/* UPDATE = .FALSE. */
+/* DO I = M - KA - 1, 2, -1 */
+/* apply rotations to push all bulges KA positions upward */
+/* END DO */
+
+/* To avoid duplicating code, the two loops are merged. */
+
+ update = TRUE_;
+ i__ = 0;
+L490:
+ if (update) {
+ ++i__;
+/* Computing MIN */
+ i__3 = *kb, i__4 = m - i__;
+ kbt = min(i__3,i__4);
+ i0 = i__ + 1;
+/* Computing MAX */
+ i__3 = 1, i__4 = i__ - *ka;
+ i1 = max(i__3,i__4);
+ i2 = i__ + kbt - ka1;
+ if (i__ > m) {
+ update = FALSE_;
+ --i__;
+ i0 = m + 1;
+ if (*ka == 0) {
+ return 0;
+ }
+ goto L490;
+ }
+ } else {
+ i__ -= *ka;
+ if (i__ < 2) {
+ return 0;
+ }
+ }
+
+ if (i__ < m - kbt) {
+ nx = m;
+ } else {
+ nx = *n;
+ }
+
+ if (upper) {
+
+/* Transform A, working with the upper triangle */
+
+ if (update) {
+
+/* Form inv(S(i))**T * A * inv(S(i)) */
+
+ bii = bb[kb1 + i__ * bb_dim1];
+ i__3 = i__;
+ for (j = i1; j <= i__3; ++j) {
+ ab[j - i__ + ka1 + i__ * ab_dim1] /= bii;
+/* L500: */
+ }
+/* Computing MIN */
+ i__4 = *n, i__1 = i__ + *ka;
+ i__3 = min(i__4,i__1);
+ for (j = i__; j <= i__3; ++j) {
+ ab[i__ - j + ka1 + j * ab_dim1] /= bii;
+/* L510: */
+ }
+ i__3 = i__ + kbt;
+ for (k = i__ + 1; k <= i__3; ++k) {
+ i__4 = i__ + kbt;
+ for (j = k; j <= i__4; ++j) {
+ ab[k - j + ka1 + j * ab_dim1] = ab[k - j + ka1 + j *
+ ab_dim1] - bb[i__ - j + kb1 + j * bb_dim1] * ab[
+ i__ - k + ka1 + k * ab_dim1] - bb[i__ - k + kb1 +
+ k * bb_dim1] * ab[i__ - j + ka1 + j * ab_dim1] +
+ ab[ka1 + i__ * ab_dim1] * bb[i__ - j + kb1 + j *
+ bb_dim1] * bb[i__ - k + kb1 + k * bb_dim1];
+/* L520: */
+ }
+/* Computing MIN */
+ i__1 = *n, i__2 = i__ + *ka;
+ i__4 = min(i__1,i__2);
+ for (j = i__ + kbt + 1; j <= i__4; ++j) {
+ ab[k - j + ka1 + j * ab_dim1] -= bb[i__ - k + kb1 + k *
+ bb_dim1] * ab[i__ - j + ka1 + j * ab_dim1];
+/* L530: */
+ }
+/* L540: */
+ }
+ i__3 = i__;
+ for (j = i1; j <= i__3; ++j) {
+/* Computing MIN */
+ i__1 = j + *ka, i__2 = i__ + kbt;
+ i__4 = min(i__1,i__2);
+ for (k = i__ + 1; k <= i__4; ++k) {
+ ab[j - k + ka1 + k * ab_dim1] -= bb[i__ - k + kb1 + k *
+ bb_dim1] * ab[j - i__ + ka1 + i__ * ab_dim1];
+/* L550: */
+ }
+/* L560: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by inv(S(i)) */
+
+ r__1 = 1.f / bii;
+ sscal_(&nx, &r__1, &x[i__ * x_dim1 + 1], &c__1);
+ if (kbt > 0) {
+ i__3 = *ldbb - 1;
+ sger_(&nx, &kbt, &c_b20, &x[i__ * x_dim1 + 1], &c__1, &bb[
+ *kb + (i__ + 1) * bb_dim1], &i__3, &x[(i__ + 1) *
+ x_dim1 + 1], ldx);
+ }
+ }
+
+/* store a(i1,i) in RA1 for use in next loop over K */
+
+ ra1 = ab[i1 - i__ + ka1 + i__ * ab_dim1];
+ }
+
+/* Generate and apply vectors of rotations to chase all the */
+/* existing bulges KA positions up toward the top of the band */
+
+ i__3 = *kb - 1;
+ for (k = 1; k <= i__3; ++k) {
+ if (update) {
+
+/* Determine the rotations which would annihilate the bulge */
+/* which has in theory just been created */
+
+ if (i__ + k - ka1 > 0 && i__ + k < m) {
+
+/* generate rotation to annihilate a(i+k-ka-1,i) */
+
+ slartg_(&ab[k + 1 + i__ * ab_dim1], &ra1, &work[*n + i__
+ + k - *ka], &work[i__ + k - *ka], &ra);
+
+/* create nonzero element a(i+k-ka-1,i+k) outside the */
+/* band and store it in WORK(m-kb+i+k) */
+
+ t = -bb[kb1 - k + (i__ + k) * bb_dim1] * ra1;
+ work[m - *kb + i__ + k] = work[*n + i__ + k - *ka] * t -
+ work[i__ + k - *ka] * ab[(i__ + k) * ab_dim1 + 1];
+ ab[(i__ + k) * ab_dim1 + 1] = work[i__ + k - *ka] * t +
+ work[*n + i__ + k - *ka] * ab[(i__ + k) * ab_dim1
+ + 1];
+ ra1 = ra;
+ }
+ }
+/* Computing MAX */
+ i__4 = 1, i__1 = k + i0 - m + 1;
+ j2 = i__ + k + 1 - max(i__4,i__1) * ka1;
+ nr = (j2 + *ka - 1) / ka1;
+ j1 = j2 - (nr - 1) * ka1;
+ if (update) {
+/* Computing MIN */
+ i__4 = j2, i__1 = i__ - (*ka << 1) + k - 1;
+ j2t = min(i__4,i__1);
+ } else {
+ j2t = j2;
+ }
+ nrt = (j2t + *ka - 1) / ka1;
+ i__4 = j2t;
+ i__1 = ka1;
+ for (j = j1; i__1 < 0 ? j >= i__4 : j <= i__4; j += i__1) {
+
+/* create nonzero element a(j-1,j+ka) outside the band */
+/* and store it in WORK(j) */
+
+ work[j] *= ab[(j + *ka - 1) * ab_dim1 + 1];
+ ab[(j + *ka - 1) * ab_dim1 + 1] = work[*n + j] * ab[(j + *ka
+ - 1) * ab_dim1 + 1];
+/* L570: */
+ }
+
+/* generate rotations in 1st set to annihilate elements which */
+/* have been created outside the band */
+
+ if (nrt > 0) {
+ slargv_(&nrt, &ab[(j1 + *ka) * ab_dim1 + 1], &inca, &work[j1],
+ &ka1, &work[*n + j1], &ka1);
+ }
+ if (nr > 0) {
+
+/* apply rotations in 1st set from the left */
+
+ i__1 = *ka - 1;
+ for (l = 1; l <= i__1; ++l) {
+ slartv_(&nr, &ab[ka1 - l + (j1 + l) * ab_dim1], &inca, &
+ ab[*ka - l + (j1 + l) * ab_dim1], &inca, &work[*n
+ + j1], &work[j1], &ka1);
+/* L580: */
+ }
+
+/* apply rotations in 1st set from both sides to diagonal */
+/* blocks */
+
+ slar2v_(&nr, &ab[ka1 + j1 * ab_dim1], &ab[ka1 + (j1 - 1) *
+ ab_dim1], &ab[*ka + j1 * ab_dim1], &inca, &work[*n +
+ j1], &work[j1], &ka1);
+
+ }
+
+/* start applying rotations in 1st set from the right */
+
+ i__1 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__1; --l) {
+ nrt = (j2 + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ slartv_(&nrt, &ab[l + j1t * ab_dim1], &inca, &ab[l + 1 + (
+ j1t - 1) * ab_dim1], &inca, &work[*n + j1t], &
+ work[j1t], &ka1);
+ }
+/* L590: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 1st set */
+
+ i__1 = j2;
+ i__4 = ka1;
+ for (j = j1; i__4 < 0 ? j >= i__1 : j <= i__1; j += i__4) {
+ srot_(&nx, &x[j * x_dim1 + 1], &c__1, &x[(j - 1) * x_dim1
+ + 1], &c__1, &work[*n + j], &work[j]);
+/* L600: */
+ }
+ }
+/* L610: */
+ }
+
+ if (update) {
+ if (i2 > 0 && kbt > 0) {
+
+/* create nonzero element a(i+kbt-ka-1,i+kbt) outside the */
+/* band and store it in WORK(m-kb+i+kbt) */
+
+ work[m - *kb + i__ + kbt] = -bb[kb1 - kbt + (i__ + kbt) *
+ bb_dim1] * ra1;
+ }
+ }
+
+ for (k = *kb; k >= 1; --k) {
+ if (update) {
+/* Computing MAX */
+ i__3 = 2, i__4 = k + i0 - m;
+ j2 = i__ + k + 1 - max(i__3,i__4) * ka1;
+ } else {
+/* Computing MAX */
+ i__3 = 1, i__4 = k + i0 - m;
+ j2 = i__ + k + 1 - max(i__3,i__4) * ka1;
+ }
+
+/* finish applying rotations in 2nd set from the right */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (j2 + *ka + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ slartv_(&nrt, &ab[l + (j1t + *ka) * ab_dim1], &inca, &ab[
+ l + 1 + (j1t + *ka - 1) * ab_dim1], &inca, &work[*
+ n + m - *kb + j1t + *ka], &work[m - *kb + j1t + *
+ ka], &ka1);
+ }
+/* L620: */
+ }
+ nr = (j2 + *ka - 1) / ka1;
+ j1 = j2 - (nr - 1) * ka1;
+ i__3 = j2;
+ i__4 = ka1;
+ for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {
+ work[m - *kb + j] = work[m - *kb + j + *ka];
+ work[*n + m - *kb + j] = work[*n + m - *kb + j + *ka];
+/* L630: */
+ }
+ i__4 = j2;
+ i__3 = ka1;
+ for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) {
+
+/* create nonzero element a(j-1,j+ka) outside the band */
+/* and store it in WORK(m-kb+j) */
+
+ work[m - *kb + j] *= ab[(j + *ka - 1) * ab_dim1 + 1];
+ ab[(j + *ka - 1) * ab_dim1 + 1] = work[*n + m - *kb + j] * ab[
+ (j + *ka - 1) * ab_dim1 + 1];
+/* L640: */
+ }
+ if (update) {
+ if (i__ + k > ka1 && k <= kbt) {
+ work[m - *kb + i__ + k - *ka] = work[m - *kb + i__ + k];
+ }
+ }
+/* L650: */
+ }
+
+ for (k = *kb; k >= 1; --k) {
+/* Computing MAX */
+ i__3 = 1, i__4 = k + i0 - m;
+ j2 = i__ + k + 1 - max(i__3,i__4) * ka1;
+ nr = (j2 + *ka - 1) / ka1;
+ j1 = j2 - (nr - 1) * ka1;
+ if (nr > 0) {
+
+/* generate rotations in 2nd set to annihilate elements */
+/* which have been created outside the band */
+
+ slargv_(&nr, &ab[(j1 + *ka) * ab_dim1 + 1], &inca, &work[m - *
+ kb + j1], &ka1, &work[*n + m - *kb + j1], &ka1);
+
+/* apply rotations in 2nd set from the left */
+
+ i__3 = *ka - 1;
+ for (l = 1; l <= i__3; ++l) {
+ slartv_(&nr, &ab[ka1 - l + (j1 + l) * ab_dim1], &inca, &
+ ab[*ka - l + (j1 + l) * ab_dim1], &inca, &work[*n
+ + m - *kb + j1], &work[m - *kb + j1], &ka1);
+/* L660: */
+ }
+
+/* apply rotations in 2nd set from both sides to diagonal */
+/* blocks */
+
+ slar2v_(&nr, &ab[ka1 + j1 * ab_dim1], &ab[ka1 + (j1 - 1) *
+ ab_dim1], &ab[*ka + j1 * ab_dim1], &inca, &work[*n +
+ m - *kb + j1], &work[m - *kb + j1], &ka1);
+
+ }
+
+/* start applying rotations in 2nd set from the right */
+
+ i__3 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__3; --l) {
+ nrt = (j2 + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ slartv_(&nrt, &ab[l + j1t * ab_dim1], &inca, &ab[l + 1 + (
+ j1t - 1) * ab_dim1], &inca, &work[*n + m - *kb +
+ j1t], &work[m - *kb + j1t], &ka1);
+ }
+/* L670: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 2nd set */
+
+ i__3 = j2;
+ i__4 = ka1;
+ for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {
+ srot_(&nx, &x[j * x_dim1 + 1], &c__1, &x[(j - 1) * x_dim1
+ + 1], &c__1, &work[*n + m - *kb + j], &work[m - *
+ kb + j]);
+/* L680: */
+ }
+ }
+/* L690: */
+ }
+
+ i__4 = *kb - 1;
+ for (k = 1; k <= i__4; ++k) {
+/* Computing MAX */
+ i__3 = 1, i__1 = k + i0 - m + 1;
+ j2 = i__ + k + 1 - max(i__3,i__1) * ka1;
+
+/* finish applying rotations in 1st set from the right */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (j2 + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ slartv_(&nrt, &ab[l + j1t * ab_dim1], &inca, &ab[l + 1 + (
+ j1t - 1) * ab_dim1], &inca, &work[*n + j1t], &
+ work[j1t], &ka1);
+ }
+/* L700: */
+ }
+/* L710: */
+ }
+
+ if (*kb > 1) {
+/* Computing MIN */
+ i__3 = i__ + *kb;
+ i__4 = min(i__3,m) - (*ka << 1) - 1;
+ for (j = 2; j <= i__4; ++j) {
+ work[*n + j] = work[*n + j + *ka];
+ work[j] = work[j + *ka];
+/* L720: */
+ }
+ }
+
+ } else {
+
+/* Transform A, working with the lower triangle */
+
+ if (update) {
+
+/* Form inv(S(i))**T * A * inv(S(i)) */
+
+ bii = bb[i__ * bb_dim1 + 1];
+ i__4 = i__;
+ for (j = i1; j <= i__4; ++j) {
+ ab[i__ - j + 1 + j * ab_dim1] /= bii;
+/* L730: */
+ }
+/* Computing MIN */
+ i__3 = *n, i__1 = i__ + *ka;
+ i__4 = min(i__3,i__1);
+ for (j = i__; j <= i__4; ++j) {
+ ab[j - i__ + 1 + i__ * ab_dim1] /= bii;
+/* L740: */
+ }
+ i__4 = i__ + kbt;
+ for (k = i__ + 1; k <= i__4; ++k) {
+ i__3 = i__ + kbt;
+ for (j = k; j <= i__3; ++j) {
+ ab[j - k + 1 + k * ab_dim1] = ab[j - k + 1 + k * ab_dim1]
+ - bb[j - i__ + 1 + i__ * bb_dim1] * ab[k - i__ +
+ 1 + i__ * ab_dim1] - bb[k - i__ + 1 + i__ *
+ bb_dim1] * ab[j - i__ + 1 + i__ * ab_dim1] + ab[
+ i__ * ab_dim1 + 1] * bb[j - i__ + 1 + i__ *
+ bb_dim1] * bb[k - i__ + 1 + i__ * bb_dim1];
+/* L750: */
+ }
+/* Computing MIN */
+ i__1 = *n, i__2 = i__ + *ka;
+ i__3 = min(i__1,i__2);
+ for (j = i__ + kbt + 1; j <= i__3; ++j) {
+ ab[j - k + 1 + k * ab_dim1] -= bb[k - i__ + 1 + i__ *
+ bb_dim1] * ab[j - i__ + 1 + i__ * ab_dim1];
+/* L760: */
+ }
+/* L770: */
+ }
+ i__4 = i__;
+ for (j = i1; j <= i__4; ++j) {
+/* Computing MIN */
+ i__1 = j + *ka, i__2 = i__ + kbt;
+ i__3 = min(i__1,i__2);
+ for (k = i__ + 1; k <= i__3; ++k) {
+ ab[k - j + 1 + j * ab_dim1] -= bb[k - i__ + 1 + i__ *
+ bb_dim1] * ab[i__ - j + 1 + j * ab_dim1];
+/* L780: */
+ }
+/* L790: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by inv(S(i)) */
+
+ r__1 = 1.f / bii;
+ sscal_(&nx, &r__1, &x[i__ * x_dim1 + 1], &c__1);
+ if (kbt > 0) {
+ sger_(&nx, &kbt, &c_b20, &x[i__ * x_dim1 + 1], &c__1, &bb[
+ i__ * bb_dim1 + 2], &c__1, &x[(i__ + 1) * x_dim1
+ + 1], ldx);
+ }
+ }
+
+/* store a(i,i1) in RA1 for use in next loop over K */
+
+ ra1 = ab[i__ - i1 + 1 + i1 * ab_dim1];
+ }
+
+/* Generate and apply vectors of rotations to chase all the */
+/* existing bulges KA positions up toward the top of the band */
+
+ i__4 = *kb - 1;
+ for (k = 1; k <= i__4; ++k) {
+ if (update) {
+
+/* Determine the rotations which would annihilate the bulge */
+/* which has in theory just been created */
+
+ if (i__ + k - ka1 > 0 && i__ + k < m) {
+
+/* generate rotation to annihilate a(i,i+k-ka-1) */
+
+ slartg_(&ab[ka1 - k + (i__ + k - *ka) * ab_dim1], &ra1, &
+ work[*n + i__ + k - *ka], &work[i__ + k - *ka], &
+ ra);
+
+/* create nonzero element a(i+k,i+k-ka-1) outside the */
+/* band and store it in WORK(m-kb+i+k) */
+
+ t = -bb[k + 1 + i__ * bb_dim1] * ra1;
+ work[m - *kb + i__ + k] = work[*n + i__ + k - *ka] * t -
+ work[i__ + k - *ka] * ab[ka1 + (i__ + k - *ka) *
+ ab_dim1];
+ ab[ka1 + (i__ + k - *ka) * ab_dim1] = work[i__ + k - *ka]
+ * t + work[*n + i__ + k - *ka] * ab[ka1 + (i__ +
+ k - *ka) * ab_dim1];
+ ra1 = ra;
+ }
+ }
+/* Computing MAX */
+ i__3 = 1, i__1 = k + i0 - m + 1;
+ j2 = i__ + k + 1 - max(i__3,i__1) * ka1;
+ nr = (j2 + *ka - 1) / ka1;
+ j1 = j2 - (nr - 1) * ka1;
+ if (update) {
+/* Computing MIN */
+ i__3 = j2, i__1 = i__ - (*ka << 1) + k - 1;
+ j2t = min(i__3,i__1);
+ } else {
+ j2t = j2;
+ }
+ nrt = (j2t + *ka - 1) / ka1;
+ i__3 = j2t;
+ i__1 = ka1;
+ for (j = j1; i__1 < 0 ? j >= i__3 : j <= i__3; j += i__1) {
+
+/* create nonzero element a(j+ka,j-1) outside the band */
+/* and store it in WORK(j) */
+
+ work[j] *= ab[ka1 + (j - 1) * ab_dim1];
+ ab[ka1 + (j - 1) * ab_dim1] = work[*n + j] * ab[ka1 + (j - 1)
+ * ab_dim1];
+/* L800: */
+ }
+
+/* generate rotations in 1st set to annihilate elements which */
+/* have been created outside the band */
+
+ if (nrt > 0) {
+ slargv_(&nrt, &ab[ka1 + j1 * ab_dim1], &inca, &work[j1], &ka1,
+ &work[*n + j1], &ka1);
+ }
+ if (nr > 0) {
+
+/* apply rotations in 1st set from the right */
+
+ i__1 = *ka - 1;
+ for (l = 1; l <= i__1; ++l) {
+ slartv_(&nr, &ab[l + 1 + j1 * ab_dim1], &inca, &ab[l + 2
+ + (j1 - 1) * ab_dim1], &inca, &work[*n + j1], &
+ work[j1], &ka1);
+/* L810: */
+ }
+
+/* apply rotations in 1st set from both sides to diagonal */
+/* blocks */
+
+ slar2v_(&nr, &ab[j1 * ab_dim1 + 1], &ab[(j1 - 1) * ab_dim1 +
+ 1], &ab[(j1 - 1) * ab_dim1 + 2], &inca, &work[*n + j1]
+, &work[j1], &ka1);
+
+ }
+
+/* start applying rotations in 1st set from the left */
+
+ i__1 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__1; --l) {
+ nrt = (j2 + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ slartv_(&nrt, &ab[ka1 - l + 1 + (j1t - ka1 + l) * ab_dim1]
+, &inca, &ab[ka1 - l + (j1t - ka1 + l) * ab_dim1],
+ &inca, &work[*n + j1t], &work[j1t], &ka1);
+ }
+/* L820: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 1st set */
+
+ i__1 = j2;
+ i__3 = ka1;
+ for (j = j1; i__3 < 0 ? j >= i__1 : j <= i__1; j += i__3) {
+ srot_(&nx, &x[j * x_dim1 + 1], &c__1, &x[(j - 1) * x_dim1
+ + 1], &c__1, &work[*n + j], &work[j]);
+/* L830: */
+ }
+ }
+/* L840: */
+ }
+
+ if (update) {
+ if (i2 > 0 && kbt > 0) {
+
+/* create nonzero element a(i+kbt,i+kbt-ka-1) outside the */
+/* band and store it in WORK(m-kb+i+kbt) */
+
+ work[m - *kb + i__ + kbt] = -bb[kbt + 1 + i__ * bb_dim1] *
+ ra1;
+ }
+ }
+
+ for (k = *kb; k >= 1; --k) {
+ if (update) {
+/* Computing MAX */
+ i__4 = 2, i__3 = k + i0 - m;
+ j2 = i__ + k + 1 - max(i__4,i__3) * ka1;
+ } else {
+/* Computing MAX */
+ i__4 = 1, i__3 = k + i0 - m;
+ j2 = i__ + k + 1 - max(i__4,i__3) * ka1;
+ }
+
+/* finish applying rotations in 2nd set from the left */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (j2 + *ka + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ slartv_(&nrt, &ab[ka1 - l + 1 + (j1t + l - 1) * ab_dim1],
+ &inca, &ab[ka1 - l + (j1t + l - 1) * ab_dim1], &
+ inca, &work[*n + m - *kb + j1t + *ka], &work[m - *
+ kb + j1t + *ka], &ka1);
+ }
+/* L850: */
+ }
+ nr = (j2 + *ka - 1) / ka1;
+ j1 = j2 - (nr - 1) * ka1;
+ i__4 = j2;
+ i__3 = ka1;
+ for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) {
+ work[m - *kb + j] = work[m - *kb + j + *ka];
+ work[*n + m - *kb + j] = work[*n + m - *kb + j + *ka];
+/* L860: */
+ }
+ i__3 = j2;
+ i__4 = ka1;
+ for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {
+
+/* create nonzero element a(j+ka,j-1) outside the band */
+/* and store it in WORK(m-kb+j) */
+
+ work[m - *kb + j] *= ab[ka1 + (j - 1) * ab_dim1];
+ ab[ka1 + (j - 1) * ab_dim1] = work[*n + m - *kb + j] * ab[ka1
+ + (j - 1) * ab_dim1];
+/* L870: */
+ }
+ if (update) {
+ if (i__ + k > ka1 && k <= kbt) {
+ work[m - *kb + i__ + k - *ka] = work[m - *kb + i__ + k];
+ }
+ }
+/* L880: */
+ }
+
+ for (k = *kb; k >= 1; --k) {
+/* Computing MAX */
+ i__4 = 1, i__3 = k + i0 - m;
+ j2 = i__ + k + 1 - max(i__4,i__3) * ka1;
+ nr = (j2 + *ka - 1) / ka1;
+ j1 = j2 - (nr - 1) * ka1;
+ if (nr > 0) {
+
+/* generate rotations in 2nd set to annihilate elements */
+/* which have been created outside the band */
+
+ slargv_(&nr, &ab[ka1 + j1 * ab_dim1], &inca, &work[m - *kb +
+ j1], &ka1, &work[*n + m - *kb + j1], &ka1);
+
+/* apply rotations in 2nd set from the right */
+
+ i__4 = *ka - 1;
+ for (l = 1; l <= i__4; ++l) {
+ slartv_(&nr, &ab[l + 1 + j1 * ab_dim1], &inca, &ab[l + 2
+ + (j1 - 1) * ab_dim1], &inca, &work[*n + m - *kb
+ + j1], &work[m - *kb + j1], &ka1);
+/* L890: */
+ }
+
+/* apply rotations in 2nd set from both sides to diagonal */
+/* blocks */
+
+ slar2v_(&nr, &ab[j1 * ab_dim1 + 1], &ab[(j1 - 1) * ab_dim1 +
+ 1], &ab[(j1 - 1) * ab_dim1 + 2], &inca, &work[*n + m
+ - *kb + j1], &work[m - *kb + j1], &ka1);
+
+ }
+
+/* start applying rotations in 2nd set from the left */
+
+ i__4 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__4; --l) {
+ nrt = (j2 + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ slartv_(&nrt, &ab[ka1 - l + 1 + (j1t - ka1 + l) * ab_dim1]
+, &inca, &ab[ka1 - l + (j1t - ka1 + l) * ab_dim1],
+ &inca, &work[*n + m - *kb + j1t], &work[m - *kb
+ + j1t], &ka1);
+ }
+/* L900: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 2nd set */
+
+ i__4 = j2;
+ i__3 = ka1;
+ for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) {
+ srot_(&nx, &x[j * x_dim1 + 1], &c__1, &x[(j - 1) * x_dim1
+ + 1], &c__1, &work[*n + m - *kb + j], &work[m - *
+ kb + j]);
+/* L910: */
+ }
+ }
+/* L920: */
+ }
+
+ i__3 = *kb - 1;
+ for (k = 1; k <= i__3; ++k) {
+/* Computing MAX */
+ i__4 = 1, i__1 = k + i0 - m + 1;
+ j2 = i__ + k + 1 - max(i__4,i__1) * ka1;
+
+/* finish applying rotations in 1st set from the left */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (j2 + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ slartv_(&nrt, &ab[ka1 - l + 1 + (j1t - ka1 + l) * ab_dim1]
+, &inca, &ab[ka1 - l + (j1t - ka1 + l) * ab_dim1],
+ &inca, &work[*n + j1t], &work[j1t], &ka1);
+ }
+/* L930: */
+ }
+/* L940: */
+ }
+
+ if (*kb > 1) {
+/* Computing MIN */
+ i__4 = i__ + *kb;
+ i__3 = min(i__4,m) - (*ka << 1) - 1;
+ for (j = 2; j <= i__3; ++j) {
+ work[*n + j] = work[*n + j + *ka];
+ work[j] = work[j + *ka];
+/* L950: */
+ }
+ }
+
+ }
+
+ goto L490;
+
+/* End of SSBGST */
+
+} /* ssbgst_ */
diff --git a/contrib/libs/clapack/ssbgv.c b/contrib/libs/clapack/ssbgv.c
new file mode 100644
index 0000000000..bdd9525433
--- /dev/null
+++ b/contrib/libs/clapack/ssbgv.c
@@ -0,0 +1,232 @@
+/* ssbgv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ssbgv_(char *jobz, char *uplo, integer *n, integer *ka,
+ integer *kb, real *ab, integer *ldab, real *bb, integer *ldbb, real *
+ w, real *z__, integer *ldz, real *work, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, bb_dim1, bb_offset, z_dim1, z_offset, i__1;
+
+ /* Local variables */
+ integer inde;
+ char vect[1];
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical upper, wantz;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ integer indwrk;
+ extern /* Subroutine */ int spbstf_(char *, integer *, integer *, real *,
+ integer *, integer *), ssbtrd_(char *, char *, integer *,
+ integer *, real *, integer *, real *, real *, real *, integer *,
+ real *, integer *), ssbgst_(char *, char *,
+ integer *, integer *, integer *, real *, integer *, real *,
+ integer *, real *, integer *, real *, integer *),
+ ssterf_(integer *, real *, real *, integer *), ssteqr_(char *,
+ integer *, real *, real *, real *, integer *, real *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSBGV computes all the eigenvalues, and optionally, the eigenvectors */
+/* of a real generalized symmetric-definite banded eigenproblem, of */
+/* the form A*x=(lambda)*B*x. Here A and B are assumed to be symmetric */
+/* and banded, and B is also positive definite. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* KA (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KA >= 0. */
+
+/* KB (input) INTEGER */
+/* The number of superdiagonals of the matrix B if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KB >= 0. */
+
+/* AB (input/output) REAL array, dimension (LDAB, N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first ka+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(ka+1+i-j,j) = A(i,j) for max(1,j-ka)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+ka). */
+
+/* On exit, the contents of AB are destroyed. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KA+1. */
+
+/* BB (input/output) REAL array, dimension (LDBB, N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix B, stored in the first kb+1 rows of the array. The */
+/* j-th column of B is stored in the j-th column of the array BB */
+/* as follows: */
+/* if UPLO = 'U', BB(kb+1+i-j,j) = B(i,j) for max(1,j-kb)<=i<=j; */
+/* if UPLO = 'L', BB(1+i-j,j) = B(i,j) for j<=i<=min(n,j+kb). */
+
+/* On exit, the factor S from the split Cholesky factorization */
+/* B = S**T*S, as returned by SPBSTF. */
+
+/* LDBB (input) INTEGER */
+/* The leading dimension of the array BB. LDBB >= KB+1. */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) REAL array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the matrix Z of */
+/* eigenvectors, with the i-th column of Z holding the */
+/* eigenvector associated with W(i). The eigenvectors are */
+/* normalized so that Z**T*B*Z = I. */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= N. */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is: */
+/* <= N: the algorithm failed to converge: */
+/* i off-diagonal elements of an intermediate */
+/* tridiagonal form did not converge to zero; */
+/* > N: if INFO = N + i, for 1 <= i <= N, then SPBSTF */
+/* returned INFO = i: B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ bb_dim1 = *ldbb;
+ bb_offset = 1 + bb_dim1;
+ bb -= bb_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ka < 0) {
+ *info = -4;
+ } else if (*kb < 0 || *kb > *ka) {
+ *info = -5;
+ } else if (*ldab < *ka + 1) {
+ *info = -7;
+ } else if (*ldbb < *kb + 1) {
+ *info = -9;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSBGV ", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a split Cholesky factorization of B. */
+
+ spbstf_(uplo, n, kb, &bb[bb_offset], ldbb, info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem. */
+
+ inde = 1;
+ indwrk = inde + *n;
+ ssbgst_(jobz, uplo, n, ka, kb, &ab[ab_offset], ldab, &bb[bb_offset], ldbb,
+ &z__[z_offset], ldz, &work[indwrk], &iinfo)
+ ;
+
+/* Reduce to tridiagonal form. */
+
+ if (wantz) {
+ *(unsigned char *)vect = 'U';
+ } else {
+ *(unsigned char *)vect = 'N';
+ }
+ ssbtrd_(vect, uplo, n, ka, &ab[ab_offset], ldab, &w[1], &work[inde], &z__[
+ z_offset], ldz, &work[indwrk], &iinfo);
+
+/* For eigenvalues only, call SSTERF. For eigenvectors, call SSTEQR. */
+
+ if (! wantz) {
+ ssterf_(n, &w[1], &work[inde], info);
+ } else {
+ ssteqr_(jobz, n, &w[1], &work[inde], &z__[z_offset], ldz, &work[
+ indwrk], info);
+ }
+ return 0;
+
+/* End of SSBGV */
+
+} /* ssbgv_ */
diff --git a/contrib/libs/clapack/ssbgvd.c b/contrib/libs/clapack/ssbgvd.c
new file mode 100644
index 0000000000..a3cebb694a
--- /dev/null
+++ b/contrib/libs/clapack/ssbgvd.c
@@ -0,0 +1,327 @@
+/* ssbgvd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b12 = 1.f;
+static real c_b13 = 0.f;
+
+/* Subroutine */ int ssbgvd_(char *jobz, char *uplo, integer *n, integer *ka,
+ integer *kb, real *ab, integer *ldab, real *bb, integer *ldbb, real *
+ w, real *z__, integer *ldz, real *work, integer *lwork, integer *
+ iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, bb_dim1, bb_offset, z_dim1, z_offset, i__1;
+
+ /* Local variables */
+ integer inde;
+ char vect[1];
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ integer lwmin;
+ logical upper, wantz;
+ integer indwk2, llwrk2;
+ extern /* Subroutine */ int xerbla_(char *, integer *), sstedc_(
+ char *, integer *, real *, real *, real *, integer *, real *,
+ integer *, integer *, integer *, integer *), slacpy_(char
+ *, integer *, integer *, real *, integer *, real *, integer *);
+ integer indwrk, liwmin;
+ extern /* Subroutine */ int spbstf_(char *, integer *, integer *, real *,
+ integer *, integer *), ssbtrd_(char *, char *, integer *,
+ integer *, real *, integer *, real *, real *, real *, integer *,
+ real *, integer *), ssbgst_(char *, char *,
+ integer *, integer *, integer *, real *, integer *, real *,
+ integer *, real *, integer *, real *, integer *),
+ ssterf_(integer *, real *, real *, integer *);
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSBGVD computes all the eigenvalues, and optionally, the eigenvectors */
+/* of a real generalized symmetric-definite banded eigenproblem, of the */
+/* form A*x=(lambda)*B*x. Here A and B are assumed to be symmetric and */
+/* banded, and B is also positive definite. If eigenvectors are */
+/* desired, it uses a divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* KA (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KA >= 0. */
+
+/* KB (input) INTEGER */
+/* The number of superdiagonals of the matrix B if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KB >= 0. */
+
+/* AB (input/output) REAL array, dimension (LDAB, N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first ka+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(ka+1+i-j,j) = A(i,j) for max(1,j-ka)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+ka). */
+
+/* On exit, the contents of AB are destroyed. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KA+1. */
+
+/* BB (input/output) REAL array, dimension (LDBB, N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix B, stored in the first kb+1 rows of the array. The */
+/* j-th column of B is stored in the j-th column of the array BB */
+/* as follows: */
+/* if UPLO = 'U', BB(ka+1+i-j,j) = B(i,j) for max(1,j-kb)<=i<=j; */
+/* if UPLO = 'L', BB(1+i-j,j) = B(i,j) for j<=i<=min(n,j+kb). */
+
+/* On exit, the factor S from the split Cholesky factorization */
+/* B = S**T*S, as returned by SPBSTF. */
+
+/* LDBB (input) INTEGER */
+/* The leading dimension of the array BB. LDBB >= KB+1. */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) REAL array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the matrix Z of */
+/* eigenvectors, with the i-th column of Z holding the */
+/* eigenvector associated with W(i). The eigenvectors are */
+/* normalized so Z**T*B*Z = I. */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If N <= 1, LWORK >= 1. */
+/* If JOBZ = 'N' and N > 1, LWORK >= 3*N. */
+/* If JOBZ = 'V' and N > 1, LWORK >= 1 + 5*N + 2*N**2. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal sizes of the WORK and IWORK */
+/* arrays, returns these values as the first entries of the WORK */
+/* and IWORK arrays, and no error message related to LWORK or */
+/* LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if LIWORK > 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* If JOBZ = 'N' or N <= 1, LIWORK >= 1. */
+/* If JOBZ = 'V' and N > 1, LIWORK >= 3 + 5*N. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK and IWORK arrays, and no error message related to */
+/* LWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is: */
+/* <= N: the algorithm failed to converge: */
+/* i off-diagonal elements of an intermediate */
+/* tridiagonal form did not converge to zero; */
+/* > N: if INFO = N + i, for 1 <= i <= N, then SPBSTF */
+/* returned INFO = i: B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ bb_dim1 = *ldbb;
+ bb_offset = 1 + bb_dim1;
+ bb -= bb_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1 || *liwork == -1;
+
+ *info = 0;
+ if (*n <= 1) {
+ liwmin = 1;
+ lwmin = 1;
+ } else if (wantz) {
+ liwmin = *n * 5 + 3;
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = *n * 5 + 1 + (i__1 * i__1 << 1);
+ } else {
+ liwmin = 1;
+ lwmin = *n << 1;
+ }
+
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ka < 0) {
+ *info = -4;
+ } else if (*kb < 0 || *kb > *ka) {
+ *info = -5;
+ } else if (*ldab < *ka + 1) {
+ *info = -7;
+ } else if (*ldbb < *kb + 1) {
+ *info = -9;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+ work[1] = (real) lwmin;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -14;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -16;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSBGVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a split Cholesky factorization of B. */
+
+ spbstf_(uplo, n, kb, &bb[bb_offset], ldbb, info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem. */
+
+ inde = 1;
+ indwrk = inde + *n;
+ indwk2 = indwrk + *n * *n;
+ llwrk2 = *lwork - indwk2 + 1;
+ ssbgst_(jobz, uplo, n, ka, kb, &ab[ab_offset], ldab, &bb[bb_offset], ldbb,
+ &z__[z_offset], ldz, &work[indwrk], &iinfo)
+ ;
+
+/* Reduce to tridiagonal form. */
+
+ if (wantz) {
+ *(unsigned char *)vect = 'U';
+ } else {
+ *(unsigned char *)vect = 'N';
+ }
+ ssbtrd_(vect, uplo, n, ka, &ab[ab_offset], ldab, &w[1], &work[inde], &z__[
+ z_offset], ldz, &work[indwrk], &iinfo);
+
+/* For eigenvalues only, call SSTERF. For eigenvectors, call SSTEDC. */
+
+ if (! wantz) {
+ ssterf_(n, &w[1], &work[inde], info);
+ } else {
+ sstedc_("I", n, &w[1], &work[inde], &work[indwrk], n, &work[indwk2], &
+ llwrk2, &iwork[1], liwork, info);
+ sgemm_("N", "N", n, n, n, &c_b12, &z__[z_offset], ldz, &work[indwrk],
+ n, &c_b13, &work[indwk2], n);
+ slacpy_("A", n, n, &work[indwk2], n, &z__[z_offset], ldz);
+ }
+
+ work[1] = (real) lwmin;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of SSBGVD */
+
+} /* ssbgvd_ */
diff --git a/contrib/libs/clapack/ssbgvx.c b/contrib/libs/clapack/ssbgvx.c
new file mode 100644
index 0000000000..39873e5c5f
--- /dev/null
+++ b/contrib/libs/clapack/ssbgvx.c
@@ -0,0 +1,461 @@
+/* ssbgvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b25 = 1.f;
+static real c_b27 = 0.f;
+
+/* Subroutine */ int ssbgvx_(char *jobz, char *range, char *uplo, integer *n,
+ integer *ka, integer *kb, real *ab, integer *ldab, real *bb, integer *
+ ldbb, real *q, integer *ldq, real *vl, real *vu, integer *il, integer
+ *iu, real *abstol, integer *m, real *w, real *z__, integer *ldz, real
+ *work, integer *iwork, integer *ifail, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, bb_dim1, bb_offset, q_dim1, q_offset, z_dim1,
+ z_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, jj;
+ real tmp1;
+ integer indd, inde;
+ char vect[1];
+ logical test;
+ integer itmp1, indee;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ char order[1];
+ extern /* Subroutine */ int sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *);
+ logical upper;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), sswap_(integer *, real *, integer *, real *, integer *
+);
+ logical wantz, alleig, indeig;
+ integer indibl;
+ logical valeig;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ integer indisp, indiwo;
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *);
+ integer indwrk;
+ extern /* Subroutine */ int spbstf_(char *, integer *, integer *, real *,
+ integer *, integer *), ssbtrd_(char *, char *, integer *,
+ integer *, real *, integer *, real *, real *, real *, integer *,
+ real *, integer *), ssbgst_(char *, char *,
+ integer *, integer *, integer *, real *, integer *, real *,
+ integer *, real *, integer *, real *, integer *),
+ sstein_(integer *, real *, real *, integer *, real *, integer *,
+ integer *, real *, integer *, real *, integer *, integer *,
+ integer *), ssterf_(integer *, real *, real *, integer *);
+ integer nsplit;
+ extern /* Subroutine */ int sstebz_(char *, char *, integer *, real *,
+ real *, integer *, integer *, real *, real *, real *, integer *,
+ integer *, real *, integer *, integer *, real *, integer *,
+ integer *), ssteqr_(char *, integer *, real *,
+ real *, real *, integer *, real *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSBGVX computes selected eigenvalues, and optionally, eigenvectors */
+/* of a real generalized symmetric-definite banded eigenproblem, of */
+/* the form A*x=(lambda)*B*x. Here A and B are assumed to be symmetric */
+/* and banded, and B is also positive definite. Eigenvalues and */
+/* eigenvectors can be selected by specifying either all eigenvalues, */
+/* a range of values or a range of indices for the desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* KA (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KA >= 0. */
+
+/* KB (input) INTEGER */
+/* The number of superdiagonals of the matrix B if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KB >= 0. */
+
+/* AB (input/output) REAL array, dimension (LDAB, N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first ka+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(ka+1+i-j,j) = A(i,j) for max(1,j-ka)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+ka). */
+
+/* On exit, the contents of AB are destroyed. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KA+1. */
+
+/* BB (input/output) REAL array, dimension (LDBB, N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix B, stored in the first kb+1 rows of the array. The */
+/* j-th column of B is stored in the j-th column of the array BB */
+/* as follows: */
+/* if UPLO = 'U', BB(ka+1+i-j,j) = B(i,j) for max(1,j-kb)<=i<=j; */
+/* if UPLO = 'L', BB(1+i-j,j) = B(i,j) for j<=i<=min(n,j+kb). */
+
+/* On exit, the factor S from the split Cholesky factorization */
+/* B = S**T*S, as returned by SPBSTF. */
+
+/* LDBB (input) INTEGER */
+/* The leading dimension of the array BB. LDBB >= KB+1. */
+
+/* Q (output) REAL array, dimension (LDQ, N) */
+/* If JOBZ = 'V', the n-by-n matrix used in the reduction of */
+/* A*x = (lambda)*B*x to standard form, i.e. C*x = (lambda)*x, */
+/* and consequently C to tridiagonal form. */
+/* If JOBZ = 'N', the array Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. If JOBZ = 'N', */
+/* LDQ >= 1. If JOBZ = 'V', LDQ >= max(1,N). */
+
+/* VL (input) REAL */
+/* VU (input) REAL */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) REAL */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing A to tridiagonal form. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*SLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*SLAMCH('S'). */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) REAL array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the matrix Z of */
+/* eigenvectors, with the i-th column of Z holding the */
+/* eigenvector associated with W(i). The eigenvectors are */
+/* normalized so Z**T*B*Z = I. */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace/output) REAL array, dimension (7N) */
+
+/* IWORK (workspace/output) INTEGER array, dimension (5N) */
+
+/* IFAIL (output) INTEGER array, dimension (M) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvalues that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0 : successful exit */
+/* < 0 : if INFO = -i, the i-th argument had an illegal value */
+/* <= N: if INFO = i, then i eigenvectors failed to converge. */
+/* Their indices are stored in IFAIL. */
+/* > N : SPBSTF returned an error code; i.e., */
+/* if INFO = N + i, for 1 <= i <= N, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ bb_dim1 = *ldbb;
+ bb_offset = 1 + bb_dim1;
+ bb -= bb_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ka < 0) {
+ *info = -5;
+ } else if (*kb < 0 || *kb > *ka) {
+ *info = -6;
+ } else if (*ldab < *ka + 1) {
+ *info = -8;
+ } else if (*ldbb < *kb + 1) {
+ *info = -10;
+ } else if (*ldq < 1 || wantz && *ldq < *n) {
+ *info = -12;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -14;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -15;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -16;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -21;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSBGVX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a split Cholesky factorization of B. */
+
+ spbstf_(uplo, n, kb, &bb[bb_offset], ldbb, info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem. */
+
+ ssbgst_(jobz, uplo, n, ka, kb, &ab[ab_offset], ldab, &bb[bb_offset], ldbb,
+ &q[q_offset], ldq, &work[1], &iinfo);
+
+/* Reduce symmetric band matrix to tridiagonal form. */
+
+ indd = 1;
+ inde = indd + *n;
+ indwrk = inde + *n;
+ if (wantz) {
+ *(unsigned char *)vect = 'U';
+ } else {
+ *(unsigned char *)vect = 'N';
+ }
+ ssbtrd_(vect, uplo, n, ka, &ab[ab_offset], ldab, &work[indd], &work[inde],
+ &q[q_offset], ldq, &work[indwrk], &iinfo);
+
+/* If all eigenvalues are desired and ABSTOL is less than or equal */
+/* to zero, then call SSTERF or SSTEQR. If this fails for some */
+/* eigenvalue, then try SSTEBZ. */
+
+ test = FALSE_;
+ if (indeig) {
+ if (*il == 1 && *iu == *n) {
+ test = TRUE_;
+ }
+ }
+ if ((alleig || test) && *abstol <= 0.f) {
+ scopy_(n, &work[indd], &c__1, &w[1], &c__1);
+ indee = indwrk + (*n << 1);
+ i__1 = *n - 1;
+ scopy_(&i__1, &work[inde], &c__1, &work[indee], &c__1);
+ if (! wantz) {
+ ssterf_(n, &w[1], &work[indee], info);
+ } else {
+ slacpy_("A", n, n, &q[q_offset], ldq, &z__[z_offset], ldz);
+ ssteqr_(jobz, n, &w[1], &work[indee], &z__[z_offset], ldz, &work[
+ indwrk], info);
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ifail[i__] = 0;
+/* L10: */
+ }
+ }
+ }
+ if (*info == 0) {
+ *m = *n;
+ goto L30;
+ }
+ *info = 0;
+ }
+
+/* Otherwise, call SSTEBZ and, if eigenvectors are desired, */
+/* call SSTEIN. */
+
+ if (wantz) {
+ *(unsigned char *)order = 'B';
+ } else {
+ *(unsigned char *)order = 'E';
+ }
+ indibl = 1;
+ indisp = indibl + *n;
+ indiwo = indisp + *n;
+ sstebz_(range, order, n, vl, vu, il, iu, abstol, &work[indd], &work[inde],
+ m, &nsplit, &w[1], &iwork[indibl], &iwork[indisp], &work[indwrk],
+ &iwork[indiwo], info);
+
+ if (wantz) {
+ sstein_(n, &work[indd], &work[inde], m, &w[1], &iwork[indibl], &iwork[
+ indisp], &z__[z_offset], ldz, &work[indwrk], &iwork[indiwo], &
+ ifail[1], info);
+
+/* Apply transformation matrix used in reduction to tridiagonal */
+/* form to eigenvectors returned by SSTEIN. */
+
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ scopy_(n, &z__[j * z_dim1 + 1], &c__1, &work[1], &c__1);
+ sgemv_("N", n, n, &c_b25, &q[q_offset], ldq, &work[1], &c__1, &
+ c_b27, &z__[j * z_dim1 + 1], &c__1);
+/* L20: */
+ }
+ }
+
+L30:
+
+/* If eigenvalues are not in order, then sort them, along with */
+/* eigenvectors. */
+
+ if (wantz) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp1 = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp1) {
+ i__ = jj;
+ tmp1 = w[jj];
+ }
+/* L40: */
+ }
+
+ if (i__ != 0) {
+ itmp1 = iwork[indibl + i__ - 1];
+ w[i__] = w[j];
+ iwork[indibl + i__ - 1] = iwork[indibl + j - 1];
+ w[j] = tmp1;
+ iwork[indibl + j - 1] = itmp1;
+ sswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1],
+ &c__1);
+ if (*info != 0) {
+ itmp1 = ifail[i__];
+ ifail[i__] = ifail[j];
+ ifail[j] = itmp1;
+ }
+ }
+/* L50: */
+ }
+ }
+
+ return 0;
+
+/* End of SSBGVX */
+
+} /* ssbgvx_ */
diff --git a/contrib/libs/clapack/ssbtrd.c b/contrib/libs/clapack/ssbtrd.c
new file mode 100644
index 0000000000..32b9bcd2aa
--- /dev/null
+++ b/contrib/libs/clapack/ssbtrd.c
@@ -0,0 +1,710 @@
+/* ssbtrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b9 = 0.f;
+static real c_b10 = 1.f;
+static integer c__1 = 1;
+
+/* Subroutine */ int ssbtrd_(char *vect, char *uplo, integer *n, integer *kd,
+ real *ab, integer *ldab, real *d__, real *e, real *q, integer *ldq,
+ real *work, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, q_dim1, q_offset, i__1, i__2, i__3, i__4,
+ i__5;
+
+ /* Local variables */
+ integer i__, j, k, l, i2, j1, j2, nq, nr, kd1, ibl, iqb, kdn, jin, nrt,
+ kdm1, inca, jend, lend, jinc, incx, last;
+ real temp;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *);
+ integer j1end, j1inc, iqend;
+ extern logical lsame_(char *, char *);
+ logical initq, wantq, upper;
+ extern /* Subroutine */ int slar2v_(integer *, real *, real *, real *,
+ integer *, real *, real *, integer *);
+ integer iqaend;
+ extern /* Subroutine */ int xerbla_(char *, integer *), slaset_(
+ char *, integer *, integer *, real *, real *, real *, integer *), slartg_(real *, real *, real *, real *, real *), slargv_(
+ integer *, real *, integer *, real *, integer *, real *, integer *
+), slartv_(integer *, real *, integer *, real *, integer *, real *
+, real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSBTRD reduces a real symmetric band matrix A to symmetric */
+/* tridiagonal form T by an orthogonal similarity transformation: */
+/* Q**T * A * Q = T. */
+
+/* Arguments */
+/* ========= */
+
+/* VECT (input) CHARACTER*1 */
+/* = 'N': do not form Q; */
+/* = 'V': form Q; */
+/* = 'U': update a matrix X, by forming X*Q. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) REAL array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+/* On exit, the diagonal elements of AB are overwritten by the */
+/* diagonal elements of the tridiagonal matrix T; if KD > 0, the */
+/* elements on the first superdiagonal (if UPLO = 'U') or the */
+/* first subdiagonal (if UPLO = 'L') are overwritten by the */
+/* off-diagonal elements of T; the rest of AB is overwritten by */
+/* values generated during the reduction. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* D (output) REAL array, dimension (N) */
+/* The diagonal elements of the tridiagonal matrix T. */
+
+/* E (output) REAL array, dimension (N-1) */
+/* The off-diagonal elements of the tridiagonal matrix T: */
+/* E(i) = T(i,i+1) if UPLO = 'U'; E(i) = T(i+1,i) if UPLO = 'L'. */
+
+/* Q (input/output) REAL array, dimension (LDQ,N) */
+/* On entry, if VECT = 'U', then Q must contain an N-by-N */
+/* matrix X; if VECT = 'N' or 'V', then Q need not be set. */
+
+/* On exit: */
+/* if VECT = 'V', Q contains the N-by-N orthogonal matrix Q; */
+/* if VECT = 'U', Q contains the product X*Q; */
+/* if VECT = 'N', the array Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. */
+/* LDQ >= 1, and LDQ >= N if VECT = 'V' or 'U'. */
+
+/* WORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* Modified by Linda Kaufman, Bell Labs. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --d__;
+ --e;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --work;
+
+ /* Function Body */
+ initq = lsame_(vect, "V");
+ wantq = initq || lsame_(vect, "U");
+ upper = lsame_(uplo, "U");
+ kd1 = *kd + 1;
+ kdm1 = *kd - 1;
+ incx = *ldab - 1;
+ iqend = 1;
+
+ *info = 0;
+ if (! wantq && ! lsame_(vect, "N")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*kd < 0) {
+ *info = -4;
+ } else if (*ldab < kd1) {
+ *info = -6;
+ } else if (*ldq < max(1,*n) && wantq) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSBTRD", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Initialize Q to the unit matrix, if needed */
+
+ if (initq) {
+ slaset_("Full", n, n, &c_b9, &c_b10, &q[q_offset], ldq);
+ }
+
+/* Wherever possible, plane rotations are generated and applied in */
+/* vector operations of length NR over the index set J1:J2:KD1. */
+
+/* The cosines and sines of the plane rotations are stored in the */
+/* arrays D and WORK. */
+
+ inca = kd1 * *ldab;
+/* Computing MIN */
+ i__1 = *n - 1;
+ kdn = min(i__1,*kd);
+ if (upper) {
+
+ if (*kd > 1) {
+
+/* Reduce to tridiagonal form, working with upper triangle */
+
+ nr = 0;
+ j1 = kdn + 2;
+ j2 = 1;
+
+ i__1 = *n - 2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Reduce i-th row of matrix to tridiagonal form */
+
+ for (k = kdn + 1; k >= 2; --k) {
+ j1 += kdn;
+ j2 += kdn;
+
+ if (nr > 0) {
+
+/* generate plane rotations to annihilate nonzero */
+/* elements which have been created outside the band */
+
+ slargv_(&nr, &ab[(j1 - 1) * ab_dim1 + 1], &inca, &
+ work[j1], &kd1, &d__[j1], &kd1);
+
+/* apply rotations from the right */
+
+
+/* Dependent on the the number of diagonals either */
+/* SLARTV or SROT is used */
+
+ if (nr >= (*kd << 1) - 1) {
+ i__2 = *kd - 1;
+ for (l = 1; l <= i__2; ++l) {
+ slartv_(&nr, &ab[l + 1 + (j1 - 1) * ab_dim1],
+ &inca, &ab[l + j1 * ab_dim1], &inca, &
+ d__[j1], &work[j1], &kd1);
+/* L10: */
+ }
+
+ } else {
+ jend = j1 + (nr - 1) * kd1;
+ i__2 = jend;
+ i__3 = kd1;
+ for (jinc = j1; i__3 < 0 ? jinc >= i__2 : jinc <=
+ i__2; jinc += i__3) {
+ srot_(&kdm1, &ab[(jinc - 1) * ab_dim1 + 2], &
+ c__1, &ab[jinc * ab_dim1 + 1], &c__1,
+ &d__[jinc], &work[jinc]);
+/* L20: */
+ }
+ }
+ }
+
+
+ if (k > 2) {
+ if (k <= *n - i__ + 1) {
+
+/* generate plane rotation to annihilate a(i,i+k-1) */
+/* within the band */
+
+ slartg_(&ab[*kd - k + 3 + (i__ + k - 2) * ab_dim1]
+, &ab[*kd - k + 2 + (i__ + k - 1) *
+ ab_dim1], &d__[i__ + k - 1], &work[i__ +
+ k - 1], &temp);
+ ab[*kd - k + 3 + (i__ + k - 2) * ab_dim1] = temp;
+
+/* apply rotation from the right */
+
+ i__3 = k - 3;
+ srot_(&i__3, &ab[*kd - k + 4 + (i__ + k - 2) *
+ ab_dim1], &c__1, &ab[*kd - k + 3 + (i__ +
+ k - 1) * ab_dim1], &c__1, &d__[i__ + k -
+ 1], &work[i__ + k - 1]);
+ }
+ ++nr;
+ j1 = j1 - kdn - 1;
+ }
+
+/* apply plane rotations from both sides to diagonal */
+/* blocks */
+
+ if (nr > 0) {
+ slar2v_(&nr, &ab[kd1 + (j1 - 1) * ab_dim1], &ab[kd1 +
+ j1 * ab_dim1], &ab[*kd + j1 * ab_dim1], &inca,
+ &d__[j1], &work[j1], &kd1);
+ }
+
+/* apply plane rotations from the left */
+
+ if (nr > 0) {
+ if ((*kd << 1) - 1 < nr) {
+
+/* Dependent on the the number of diagonals either */
+/* SLARTV or SROT is used */
+
+ i__3 = *kd - 1;
+ for (l = 1; l <= i__3; ++l) {
+ if (j2 + l > *n) {
+ nrt = nr - 1;
+ } else {
+ nrt = nr;
+ }
+ if (nrt > 0) {
+ slartv_(&nrt, &ab[*kd - l + (j1 + l) *
+ ab_dim1], &inca, &ab[*kd - l + 1
+ + (j1 + l) * ab_dim1], &inca, &
+ d__[j1], &work[j1], &kd1);
+ }
+/* L30: */
+ }
+ } else {
+ j1end = j1 + kd1 * (nr - 2);
+ if (j1end >= j1) {
+ i__3 = j1end;
+ i__2 = kd1;
+ for (jin = j1; i__2 < 0 ? jin >= i__3 : jin <=
+ i__3; jin += i__2) {
+ i__4 = *kd - 1;
+ srot_(&i__4, &ab[*kd - 1 + (jin + 1) *
+ ab_dim1], &incx, &ab[*kd + (jin +
+ 1) * ab_dim1], &incx, &d__[jin], &
+ work[jin]);
+/* L40: */
+ }
+ }
+/* Computing MIN */
+ i__2 = kdm1, i__3 = *n - j2;
+ lend = min(i__2,i__3);
+ last = j1end + kd1;
+ if (lend > 0) {
+ srot_(&lend, &ab[*kd - 1 + (last + 1) *
+ ab_dim1], &incx, &ab[*kd + (last + 1)
+ * ab_dim1], &incx, &d__[last], &work[
+ last]);
+ }
+ }
+ }
+
+ if (wantq) {
+
+/* accumulate product of plane rotations in Q */
+
+ if (initq) {
+
+/* take advantage of the fact that Q was */
+/* initially the Identity matrix */
+
+ iqend = max(iqend,j2);
+/* Computing MAX */
+ i__2 = 0, i__3 = k - 3;
+ i2 = max(i__2,i__3);
+ iqaend = i__ * *kd + 1;
+ if (k == 2) {
+ iqaend += *kd;
+ }
+ iqaend = min(iqaend,iqend);
+ i__2 = j2;
+ i__3 = kd1;
+ for (j = j1; i__3 < 0 ? j >= i__2 : j <= i__2; j
+ += i__3) {
+ ibl = i__ - i2 / kdm1;
+ ++i2;
+/* Computing MAX */
+ i__4 = 1, i__5 = j - ibl;
+ iqb = max(i__4,i__5);
+ nq = iqaend + 1 - iqb;
+/* Computing MIN */
+ i__4 = iqaend + *kd;
+ iqaend = min(i__4,iqend);
+ srot_(&nq, &q[iqb + (j - 1) * q_dim1], &c__1,
+ &q[iqb + j * q_dim1], &c__1, &d__[j],
+ &work[j]);
+/* L50: */
+ }
+ } else {
+
+ i__3 = j2;
+ i__2 = kd1;
+ for (j = j1; i__2 < 0 ? j >= i__3 : j <= i__3; j
+ += i__2) {
+ srot_(n, &q[(j - 1) * q_dim1 + 1], &c__1, &q[
+ j * q_dim1 + 1], &c__1, &d__[j], &
+ work[j]);
+/* L60: */
+ }
+ }
+
+ }
+
+ if (j2 + kdn > *n) {
+
+/* adjust J2 to keep within the bounds of the matrix */
+
+ --nr;
+ j2 = j2 - kdn - 1;
+ }
+
+ i__2 = j2;
+ i__3 = kd1;
+ for (j = j1; i__3 < 0 ? j >= i__2 : j <= i__2; j += i__3)
+ {
+
+/* create nonzero element a(j-1,j+kd) outside the band */
+/* and store it in WORK */
+
+ work[j + *kd] = work[j] * ab[(j + *kd) * ab_dim1 + 1];
+ ab[(j + *kd) * ab_dim1 + 1] = d__[j] * ab[(j + *kd) *
+ ab_dim1 + 1];
+/* L70: */
+ }
+/* L80: */
+ }
+/* L90: */
+ }
+ }
+
+ if (*kd > 0) {
+
+/* copy off-diagonal elements to E */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ e[i__] = ab[*kd + (i__ + 1) * ab_dim1];
+/* L100: */
+ }
+ } else {
+
+/* set E to zero if original matrix was diagonal */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ e[i__] = 0.f;
+/* L110: */
+ }
+ }
+
+/* copy diagonal elements to D */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = ab[kd1 + i__ * ab_dim1];
+/* L120: */
+ }
+
+ } else {
+
+ if (*kd > 1) {
+
+/* Reduce to tridiagonal form, working with lower triangle */
+
+ nr = 0;
+ j1 = kdn + 2;
+ j2 = 1;
+
+ i__1 = *n - 2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Reduce i-th column of matrix to tridiagonal form */
+
+ for (k = kdn + 1; k >= 2; --k) {
+ j1 += kdn;
+ j2 += kdn;
+
+ if (nr > 0) {
+
+/* generate plane rotations to annihilate nonzero */
+/* elements which have been created outside the band */
+
+ slargv_(&nr, &ab[kd1 + (j1 - kd1) * ab_dim1], &inca, &
+ work[j1], &kd1, &d__[j1], &kd1);
+
+/* apply plane rotations from one side */
+
+
+/* Dependent on the the number of diagonals either */
+/* SLARTV or SROT is used */
+
+ if (nr > (*kd << 1) - 1) {
+ i__3 = *kd - 1;
+ for (l = 1; l <= i__3; ++l) {
+ slartv_(&nr, &ab[kd1 - l + (j1 - kd1 + l) *
+ ab_dim1], &inca, &ab[kd1 - l + 1 + (
+ j1 - kd1 + l) * ab_dim1], &inca, &d__[
+ j1], &work[j1], &kd1);
+/* L130: */
+ }
+ } else {
+ jend = j1 + kd1 * (nr - 1);
+ i__3 = jend;
+ i__2 = kd1;
+ for (jinc = j1; i__2 < 0 ? jinc >= i__3 : jinc <=
+ i__3; jinc += i__2) {
+ srot_(&kdm1, &ab[*kd + (jinc - *kd) * ab_dim1]
+, &incx, &ab[kd1 + (jinc - *kd) *
+ ab_dim1], &incx, &d__[jinc], &work[
+ jinc]);
+/* L140: */
+ }
+ }
+
+ }
+
+ if (k > 2) {
+ if (k <= *n - i__ + 1) {
+
+/* generate plane rotation to annihilate a(i+k-1,i) */
+/* within the band */
+
+ slartg_(&ab[k - 1 + i__ * ab_dim1], &ab[k + i__ *
+ ab_dim1], &d__[i__ + k - 1], &work[i__ +
+ k - 1], &temp);
+ ab[k - 1 + i__ * ab_dim1] = temp;
+
+/* apply rotation from the left */
+
+ i__2 = k - 3;
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ srot_(&i__2, &ab[k - 2 + (i__ + 1) * ab_dim1], &
+ i__3, &ab[k - 1 + (i__ + 1) * ab_dim1], &
+ i__4, &d__[i__ + k - 1], &work[i__ + k -
+ 1]);
+ }
+ ++nr;
+ j1 = j1 - kdn - 1;
+ }
+
+/* apply plane rotations from both sides to diagonal */
+/* blocks */
+
+ if (nr > 0) {
+ slar2v_(&nr, &ab[(j1 - 1) * ab_dim1 + 1], &ab[j1 *
+ ab_dim1 + 1], &ab[(j1 - 1) * ab_dim1 + 2], &
+ inca, &d__[j1], &work[j1], &kd1);
+ }
+
+/* apply plane rotations from the right */
+
+
+/* Dependent on the the number of diagonals either */
+/* SLARTV or SROT is used */
+
+ if (nr > 0) {
+ if (nr > (*kd << 1) - 1) {
+ i__2 = *kd - 1;
+ for (l = 1; l <= i__2; ++l) {
+ if (j2 + l > *n) {
+ nrt = nr - 1;
+ } else {
+ nrt = nr;
+ }
+ if (nrt > 0) {
+ slartv_(&nrt, &ab[l + 2 + (j1 - 1) *
+ ab_dim1], &inca, &ab[l + 1 + j1 *
+ ab_dim1], &inca, &d__[j1], &work[
+ j1], &kd1);
+ }
+/* L150: */
+ }
+ } else {
+ j1end = j1 + kd1 * (nr - 2);
+ if (j1end >= j1) {
+ i__2 = j1end;
+ i__3 = kd1;
+ for (j1inc = j1; i__3 < 0 ? j1inc >= i__2 :
+ j1inc <= i__2; j1inc += i__3) {
+ srot_(&kdm1, &ab[(j1inc - 1) * ab_dim1 +
+ 3], &c__1, &ab[j1inc * ab_dim1 +
+ 2], &c__1, &d__[j1inc], &work[
+ j1inc]);
+/* L160: */
+ }
+ }
+/* Computing MIN */
+ i__3 = kdm1, i__2 = *n - j2;
+ lend = min(i__3,i__2);
+ last = j1end + kd1;
+ if (lend > 0) {
+ srot_(&lend, &ab[(last - 1) * ab_dim1 + 3], &
+ c__1, &ab[last * ab_dim1 + 2], &c__1,
+ &d__[last], &work[last]);
+ }
+ }
+ }
+
+
+
+ if (wantq) {
+
+/* accumulate product of plane rotations in Q */
+
+ if (initq) {
+
+/* take advantage of the fact that Q was */
+/* initially the Identity matrix */
+
+ iqend = max(iqend,j2);
+/* Computing MAX */
+ i__3 = 0, i__2 = k - 3;
+ i2 = max(i__3,i__2);
+ iqaend = i__ * *kd + 1;
+ if (k == 2) {
+ iqaend += *kd;
+ }
+ iqaend = min(iqaend,iqend);
+ i__3 = j2;
+ i__2 = kd1;
+ for (j = j1; i__2 < 0 ? j >= i__3 : j <= i__3; j
+ += i__2) {
+ ibl = i__ - i2 / kdm1;
+ ++i2;
+/* Computing MAX */
+ i__4 = 1, i__5 = j - ibl;
+ iqb = max(i__4,i__5);
+ nq = iqaend + 1 - iqb;
+/* Computing MIN */
+ i__4 = iqaend + *kd;
+ iqaend = min(i__4,iqend);
+ srot_(&nq, &q[iqb + (j - 1) * q_dim1], &c__1,
+ &q[iqb + j * q_dim1], &c__1, &d__[j],
+ &work[j]);
+/* L170: */
+ }
+ } else {
+
+ i__2 = j2;
+ i__3 = kd1;
+ for (j = j1; i__3 < 0 ? j >= i__2 : j <= i__2; j
+ += i__3) {
+ srot_(n, &q[(j - 1) * q_dim1 + 1], &c__1, &q[
+ j * q_dim1 + 1], &c__1, &d__[j], &
+ work[j]);
+/* L180: */
+ }
+ }
+ }
+
+ if (j2 + kdn > *n) {
+
+/* adjust J2 to keep within the bounds of the matrix */
+
+ --nr;
+ j2 = j2 - kdn - 1;
+ }
+
+ i__3 = j2;
+ i__2 = kd1;
+ for (j = j1; i__2 < 0 ? j >= i__3 : j <= i__3; j += i__2)
+ {
+
+/* create nonzero element a(j+kd,j-1) outside the */
+/* band and store it in WORK */
+
+ work[j + *kd] = work[j] * ab[kd1 + j * ab_dim1];
+ ab[kd1 + j * ab_dim1] = d__[j] * ab[kd1 + j * ab_dim1]
+ ;
+/* L190: */
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+ }
+
+ if (*kd > 0) {
+
+/* copy off-diagonal elements to E */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ e[i__] = ab[i__ * ab_dim1 + 2];
+/* L220: */
+ }
+ } else {
+
+/* set E to zero if original matrix was diagonal */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ e[i__] = 0.f;
+/* L230: */
+ }
+ }
+
+/* copy diagonal elements to D */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = ab[i__ * ab_dim1 + 1];
+/* L240: */
+ }
+ }
+
+ return 0;
+
+/* End of SSBTRD */
+
+} /* ssbtrd_ */
diff --git a/contrib/libs/clapack/ssfrk.c b/contrib/libs/clapack/ssfrk.c
new file mode 100644
index 0000000000..0b841597e3
--- /dev/null
+++ b/contrib/libs/clapack/ssfrk.c
@@ -0,0 +1,516 @@
+/* ssfrk.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ssfrk_(char *transr, char *uplo, char *trans, integer *n,
+ integer *k, real *alpha, real *a, integer *lda, real *beta, real *
+ c__)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+
+ /* Local variables */
+ integer j, n1, n2, nk, info;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ integer nrowa;
+ logical lower;
+ extern /* Subroutine */ int ssyrk_(char *, char *, integer *, integer *,
+ real *, real *, integer *, real *, real *, integer *), xerbla_(char *, integer *);
+ logical nisodd, notrans;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Julien Langou of the Univ. of Colorado Denver -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Level 3 BLAS like routine for C in RFP Format. */
+
+/* SSFRK performs one of the symmetric rank--k operations */
+
+/* C := alpha*A*A' + beta*C, */
+
+/* or */
+
+/* C := alpha*A'*A + beta*C, */
+
+/* where alpha and beta are real scalars, C is an n--by--n symmetric */
+/* matrix and A is an n--by--k matrix in the first case and a k--by--n */
+/* matrix in the second case. */
+
+/* Arguments */
+/* ========== */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': The Normal Form of RFP A is stored; */
+/* = 'T': The Transpose Form of RFP A is stored. */
+
+/* UPLO - (input) CHARACTER */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array C is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of C */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of C */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* TRANS - (input) CHARACTER */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' C := alpha*A*A' + beta*C. */
+
+/* TRANS = 'T' or 't' C := alpha*A'*A + beta*C. */
+
+/* Unchanged on exit. */
+
+/* N - (input) INTEGER. */
+/* On entry, N specifies the order of the matrix C. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* K - (input) INTEGER. */
+/* On entry with TRANS = 'N' or 'n', K specifies the number */
+/* of columns of the matrix A, and on entry with TRANS = 'T' */
+/* or 't', K specifies the number of rows of the matrix A. K */
+/* must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - (input) REAL. */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - (input) REAL array of DIMENSION ( LDA, ka ), where KA */
+/* is K when TRANS = 'N' or 'n', and is N otherwise. Before */
+/* entry with TRANS = 'N' or 'n', the leading N--by--K part of */
+/* the array A must contain the matrix A, otherwise the leading */
+/* K--by--N part of the array A must contain the matrix A. */
+/* Unchanged on exit. */
+
+/* LDA - (input) INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When TRANS = 'N' or 'n' */
+/* then LDA must be at least max( 1, n ), otherwise LDA must */
+/* be at least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* BETA - (input) REAL. */
+/* On entry, BETA specifies the scalar beta. */
+/* Unchanged on exit. */
+
+
+/* C - (input/output) REAL array, dimension ( NT ); */
+/* NT = N*(N+1)/2. On entry, the symmetric matrix C in RFP */
+/* Format. RFP Format is described by TRANSR, UPLO and N. */
+
+/* Arguments */
+/* ========== */
+
+/* .. */
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --c__;
+
+ /* Function Body */
+ info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ notrans = lsame_(trans, "N");
+
+ if (notrans) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+
+ if (! normaltransr && ! lsame_(transr, "T")) {
+ info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ info = -2;
+ } else if (! notrans && ! lsame_(trans, "T")) {
+ info = -3;
+ } else if (*n < 0) {
+ info = -4;
+ } else if (*k < 0) {
+ info = -5;
+ } else if (*lda < max(1,nrowa)) {
+ info = -8;
+ }
+ if (info != 0) {
+ i__1 = -info;
+ xerbla_("SSFRK ", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+/* The quick return case: ((ALPHA.EQ.0).AND.(BETA.NE.ZERO)) is not */
+/* done (it is in SSYRK for example) and left in the general case. */
+
+ if (*n == 0 || (*alpha == 0.f || *k == 0) && *beta == 1.f) {
+ return 0;
+ }
+
+ if (*alpha == 0.f && *beta == 0.f) {
+ i__1 = *n * (*n + 1) / 2;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j] = 0.f;
+ }
+ return 0;
+ }
+
+/* C is N-by-N. */
+/* If N is odd, set NISODD = .TRUE., and N1 and N2. */
+/* If N is even, NISODD = .FALSE., and NK. */
+
+ if (*n % 2 == 0) {
+ nisodd = FALSE_;
+ nk = *n / 2;
+ } else {
+ nisodd = TRUE_;
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+ }
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* N is odd, TRANSR = 'N', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* N is odd, TRANSR = 'N', UPLO = 'L', and TRANS = 'N' */
+
+ ssyrk_("L", "N", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[1], n);
+ ssyrk_("U", "N", &n2, k, alpha, &a[n1 + 1 + a_dim1], lda,
+ beta, &c__[*n + 1], n);
+ sgemm_("N", "T", &n2, &n1, k, alpha, &a[n1 + 1 + a_dim1],
+ lda, &a[a_dim1 + 1], lda, beta, &c__[n1 + 1], n);
+
+ } else {
+
+/* N is odd, TRANSR = 'N', UPLO = 'L', and TRANS = 'T' */
+
+ ssyrk_("L", "T", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[1], n);
+ ssyrk_("U", "T", &n2, k, alpha, &a[(n1 + 1) * a_dim1 + 1],
+ lda, beta, &c__[*n + 1], n)
+ ;
+ sgemm_("T", "N", &n2, &n1, k, alpha, &a[(n1 + 1) * a_dim1
+ + 1], lda, &a[a_dim1 + 1], lda, beta, &c__[n1 + 1]
+, n);
+
+ }
+
+ } else {
+
+/* N is odd, TRANSR = 'N', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* N is odd, TRANSR = 'N', UPLO = 'U', and TRANS = 'N' */
+
+ ssyrk_("L", "N", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[n2 + 1], n);
+ ssyrk_("U", "N", &n2, k, alpha, &a[n2 + a_dim1], lda,
+ beta, &c__[n1 + 1], n);
+ sgemm_("N", "T", &n1, &n2, k, alpha, &a[a_dim1 + 1], lda,
+ &a[n2 + a_dim1], lda, beta, &c__[1], n);
+
+ } else {
+
+/* N is odd, TRANSR = 'N', UPLO = 'U', and TRANS = 'T' */
+
+ ssyrk_("L", "T", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[n2 + 1], n);
+ ssyrk_("U", "T", &n2, k, alpha, &a[n2 * a_dim1 + 1], lda,
+ beta, &c__[n1 + 1], n);
+ sgemm_("T", "N", &n1, &n2, k, alpha, &a[a_dim1 + 1], lda,
+ &a[n2 * a_dim1 + 1], lda, beta, &c__[1], n);
+
+ }
+
+ }
+
+ } else {
+
+/* N is odd, and TRANSR = 'T' */
+
+ if (lower) {
+
+/* N is odd, TRANSR = 'T', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* N is odd, TRANSR = 'T', UPLO = 'L', and TRANS = 'N' */
+
+ ssyrk_("U", "N", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[1], &n1);
+ ssyrk_("L", "N", &n2, k, alpha, &a[n1 + 1 + a_dim1], lda,
+ beta, &c__[2], &n1);
+ sgemm_("N", "T", &n1, &n2, k, alpha, &a[a_dim1 + 1], lda,
+ &a[n1 + 1 + a_dim1], lda, beta, &c__[n1 * n1 + 1],
+ &n1);
+
+ } else {
+
+/* N is odd, TRANSR = 'T', UPLO = 'L', and TRANS = 'T' */
+
+ ssyrk_("U", "T", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[1], &n1);
+ ssyrk_("L", "T", &n2, k, alpha, &a[(n1 + 1) * a_dim1 + 1],
+ lda, beta, &c__[2], &n1);
+ sgemm_("T", "N", &n1, &n2, k, alpha, &a[a_dim1 + 1], lda,
+ &a[(n1 + 1) * a_dim1 + 1], lda, beta, &c__[n1 *
+ n1 + 1], &n1);
+
+ }
+
+ } else {
+
+/* N is odd, TRANSR = 'T', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* N is odd, TRANSR = 'T', UPLO = 'U', and TRANS = 'N' */
+
+ ssyrk_("U", "N", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[n2 * n2 + 1], &n2);
+ ssyrk_("L", "N", &n2, k, alpha, &a[n1 + 1 + a_dim1], lda,
+ beta, &c__[n1 * n2 + 1], &n2);
+ sgemm_("N", "T", &n2, &n1, k, alpha, &a[n1 + 1 + a_dim1],
+ lda, &a[a_dim1 + 1], lda, beta, &c__[1], &n2);
+
+ } else {
+
+/* N is odd, TRANSR = 'T', UPLO = 'U', and TRANS = 'T' */
+
+ ssyrk_("U", "T", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[n2 * n2 + 1], &n2);
+ ssyrk_("L", "T", &n2, k, alpha, &a[(n1 + 1) * a_dim1 + 1],
+ lda, beta, &c__[n1 * n2 + 1], &n2);
+ sgemm_("T", "N", &n2, &n1, k, alpha, &a[(n1 + 1) * a_dim1
+ + 1], lda, &a[a_dim1 + 1], lda, beta, &c__[1], &
+ n2);
+
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* N is even, TRANSR = 'N', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* N is even, TRANSR = 'N', UPLO = 'L', and TRANS = 'N' */
+
+ i__1 = *n + 1;
+ ssyrk_("L", "N", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[2], &i__1);
+ i__1 = *n + 1;
+ ssyrk_("U", "N", &nk, k, alpha, &a[nk + 1 + a_dim1], lda,
+ beta, &c__[1], &i__1);
+ i__1 = *n + 1;
+ sgemm_("N", "T", &nk, &nk, k, alpha, &a[nk + 1 + a_dim1],
+ lda, &a[a_dim1 + 1], lda, beta, &c__[nk + 2], &
+ i__1);
+
+ } else {
+
+/* N is even, TRANSR = 'N', UPLO = 'L', and TRANS = 'T' */
+
+ i__1 = *n + 1;
+ ssyrk_("L", "T", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[2], &i__1);
+ i__1 = *n + 1;
+ ssyrk_("U", "T", &nk, k, alpha, &a[(nk + 1) * a_dim1 + 1],
+ lda, beta, &c__[1], &i__1);
+ i__1 = *n + 1;
+ sgemm_("T", "N", &nk, &nk, k, alpha, &a[(nk + 1) * a_dim1
+ + 1], lda, &a[a_dim1 + 1], lda, beta, &c__[nk + 2]
+, &i__1);
+
+ }
+
+ } else {
+
+/* N is even, TRANSR = 'N', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* N is even, TRANSR = 'N', UPLO = 'U', and TRANS = 'N' */
+
+ i__1 = *n + 1;
+ ssyrk_("L", "N", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[nk + 2], &i__1);
+ i__1 = *n + 1;
+ ssyrk_("U", "N", &nk, k, alpha, &a[nk + 1 + a_dim1], lda,
+ beta, &c__[nk + 1], &i__1);
+ i__1 = *n + 1;
+ sgemm_("N", "T", &nk, &nk, k, alpha, &a[a_dim1 + 1], lda,
+ &a[nk + 1 + a_dim1], lda, beta, &c__[1], &i__1);
+
+ } else {
+
+/* N is even, TRANSR = 'N', UPLO = 'U', and TRANS = 'T' */
+
+ i__1 = *n + 1;
+ ssyrk_("L", "T", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[nk + 2], &i__1);
+ i__1 = *n + 1;
+ ssyrk_("U", "T", &nk, k, alpha, &a[(nk + 1) * a_dim1 + 1],
+ lda, beta, &c__[nk + 1], &i__1);
+ i__1 = *n + 1;
+ sgemm_("T", "N", &nk, &nk, k, alpha, &a[a_dim1 + 1], lda,
+ &a[(nk + 1) * a_dim1 + 1], lda, beta, &c__[1], &
+ i__1);
+
+ }
+
+ }
+
+ } else {
+
+/* N is even, and TRANSR = 'T' */
+
+ if (lower) {
+
+/* N is even, TRANSR = 'T', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* N is even, TRANSR = 'T', UPLO = 'L', and TRANS = 'N' */
+
+ ssyrk_("U", "N", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[nk + 1], &nk);
+ ssyrk_("L", "N", &nk, k, alpha, &a[nk + 1 + a_dim1], lda,
+ beta, &c__[1], &nk);
+ sgemm_("N", "T", &nk, &nk, k, alpha, &a[a_dim1 + 1], lda,
+ &a[nk + 1 + a_dim1], lda, beta, &c__[(nk + 1) *
+ nk + 1], &nk);
+
+ } else {
+
+/* N is even, TRANSR = 'T', UPLO = 'L', and TRANS = 'T' */
+
+ ssyrk_("U", "T", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[nk + 1], &nk);
+ ssyrk_("L", "T", &nk, k, alpha, &a[(nk + 1) * a_dim1 + 1],
+ lda, beta, &c__[1], &nk);
+ sgemm_("T", "N", &nk, &nk, k, alpha, &a[a_dim1 + 1], lda,
+ &a[(nk + 1) * a_dim1 + 1], lda, beta, &c__[(nk +
+ 1) * nk + 1], &nk);
+
+ }
+
+ } else {
+
+/* N is even, TRANSR = 'T', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* N is even, TRANSR = 'T', UPLO = 'U', and TRANS = 'N' */
+
+ ssyrk_("U", "N", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[nk * (nk + 1) + 1], &nk);
+ ssyrk_("L", "N", &nk, k, alpha, &a[nk + 1 + a_dim1], lda,
+ beta, &c__[nk * nk + 1], &nk);
+ sgemm_("N", "T", &nk, &nk, k, alpha, &a[nk + 1 + a_dim1],
+ lda, &a[a_dim1 + 1], lda, beta, &c__[1], &nk);
+
+ } else {
+
+/* N is even, TRANSR = 'T', UPLO = 'U', and TRANS = 'T' */
+
+ ssyrk_("U", "T", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[nk * (nk + 1) + 1], &nk);
+ ssyrk_("L", "T", &nk, k, alpha, &a[(nk + 1) * a_dim1 + 1],
+ lda, beta, &c__[nk * nk + 1], &nk);
+ sgemm_("T", "N", &nk, &nk, k, alpha, &a[(nk + 1) * a_dim1
+ + 1], lda, &a[a_dim1 + 1], lda, beta, &c__[1], &
+ nk);
+
+ }
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of SSFRK */
+
+} /* ssfrk_ */
diff --git a/contrib/libs/clapack/sspcon.c b/contrib/libs/clapack/sspcon.c
new file mode 100644
index 0000000000..efbc487c8f
--- /dev/null
+++ b/contrib/libs/clapack/sspcon.c
@@ -0,0 +1,196 @@
+/* sspcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sspcon_(char *uplo, integer *n, real *ap, integer *ipiv,
+ real *anorm, real *rcond, real *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ integer i__, ip, kase;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical upper;
+ extern /* Subroutine */ int slacn2_(integer *, real *, real *, integer *,
+ real *, integer *, integer *), xerbla_(char *, integer *);
+ real ainvnm;
+ extern /* Subroutine */ int ssptrs_(char *, integer *, integer *, real *,
+ integer *, real *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call SLACN2 in place of SLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSPCON estimates the reciprocal of the condition number (in the */
+/* 1-norm) of a real symmetric packed matrix A using the factorization */
+/* A = U*D*U**T or A = L*D*L**T computed by SSPTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**T; */
+/* = 'L': Lower triangular, form is A = L*D*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) REAL array, dimension (N*(N+1)/2) */
+/* The block diagonal matrix D and the multipliers used to */
+/* obtain the factor U or L as computed by SSPTRF, stored as a */
+/* packed triangular matrix. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by SSPTRF. */
+
+/* ANORM (input) REAL */
+/* The 1-norm of the original matrix A. */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) REAL array, dimension (2*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --iwork;
+ --work;
+ --ipiv;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*anorm < 0.f) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSPCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.f;
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ } else if (*anorm <= 0.f) {
+ return 0;
+ }
+
+/* Check that the diagonal matrix D is nonsingular. */
+
+ if (upper) {
+
+/* Upper triangular storage: examine D from bottom to top */
+
+ ip = *n * (*n + 1) / 2;
+ for (i__ = *n; i__ >= 1; --i__) {
+ if (ipiv[i__] > 0 && ap[ip] == 0.f) {
+ return 0;
+ }
+ ip -= i__;
+/* L10: */
+ }
+ } else {
+
+/* Lower triangular storage: examine D from top to bottom. */
+
+ ip = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (ipiv[i__] > 0 && ap[ip] == 0.f) {
+ return 0;
+ }
+ ip = ip + *n - i__ + 1;
+/* L20: */
+ }
+ }
+
+/* Estimate the 1-norm of the inverse. */
+
+ kase = 0;
+L30:
+ slacn2_(n, &work[*n + 1], &work[1], &iwork[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+
+/* Multiply by inv(L*D*L') or inv(U*D*U'). */
+
+ ssptrs_(uplo, n, &c__1, &ap[1], &ipiv[1], &work[1], n, info);
+ goto L30;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / ainvnm / *anorm;
+ }
+
+ return 0;
+
+/* End of SSPCON */
+
+} /* sspcon_ */
diff --git a/contrib/libs/clapack/sspev.c b/contrib/libs/clapack/sspev.c
new file mode 100644
index 0000000000..7b6176f0d1
--- /dev/null
+++ b/contrib/libs/clapack/sspev.c
@@ -0,0 +1,240 @@
+/* sspev.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sspev_(char *jobz, char *uplo, integer *n, real *ap,
+ real *w, real *z__, integer *ldz, real *work, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real eps;
+ integer inde;
+ real anrm;
+ integer imax;
+ real rmin, rmax, sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ logical wantz;
+ integer iscale;
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ integer indtau, indwrk;
+ extern doublereal slansp_(char *, char *, integer *, real *, real *);
+ extern /* Subroutine */ int ssterf_(integer *, real *, real *, integer *);
+ real smlnum;
+ extern /* Subroutine */ int sopgtr_(char *, integer *, real *, real *,
+ real *, integer *, real *, integer *), ssptrd_(char *,
+ integer *, real *, real *, real *, real *, integer *),
+ ssteqr_(char *, integer *, real *, real *, real *, integer *,
+ real *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSPEV computes all the eigenvalues and, optionally, eigenvectors of a */
+/* real symmetric matrix A in packed storage. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) REAL array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, AP is overwritten by values generated during the */
+/* reduction to tridiagonal form. If UPLO = 'U', the diagonal */
+/* and first superdiagonal of the tridiagonal matrix T overwrite */
+/* the corresponding elements of A, and if UPLO = 'L', the */
+/* diagonal and first subdiagonal of T overwrite the */
+/* corresponding elements of A. */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) REAL array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the orthonormal */
+/* eigenvectors of the matrix A, with the i-th column of Z */
+/* holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, the algorithm failed to converge; i */
+/* off-diagonal elements of an intermediate tridiagonal */
+/* form did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lsame_(uplo, "U") || lsame_(uplo,
+ "L"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -7;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSPEV ", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ w[1] = ap[1];
+ if (wantz) {
+ z__[z_dim1 + 1] = 1.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = slansp_("M", uplo, n, &ap[1], &work[1]);
+ iscale = 0;
+ if (anrm > 0.f && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ i__1 = *n * (*n + 1) / 2;
+ sscal_(&i__1, &sigma, &ap[1], &c__1);
+ }
+
+/* Call SSPTRD to reduce symmetric packed matrix to tridiagonal form. */
+
+ inde = 1;
+ indtau = inde + *n;
+ ssptrd_(uplo, n, &ap[1], &w[1], &work[inde], &work[indtau], &iinfo);
+
+/* For eigenvalues only, call SSTERF. For eigenvectors, first call */
+/* SOPGTR to generate the orthogonal matrix, then call SSTEQR. */
+
+ if (! wantz) {
+ ssterf_(n, &w[1], &work[inde], info);
+ } else {
+ indwrk = indtau + *n;
+ sopgtr_(uplo, n, &ap[1], &work[indtau], &z__[z_offset], ldz, &work[
+ indwrk], &iinfo);
+ ssteqr_(jobz, n, &w[1], &work[inde], &z__[z_offset], ldz, &work[
+ indtau], info);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *n;
+ } else {
+ imax = *info - 1;
+ }
+ r__1 = 1.f / sigma;
+ sscal_(&imax, &r__1, &w[1], &c__1);
+ }
+
+ return 0;
+
+/* End of SSPEV */
+
+} /* sspev_ */
diff --git a/contrib/libs/clapack/sspevd.c b/contrib/libs/clapack/sspevd.c
new file mode 100644
index 0000000000..7abae02d77
--- /dev/null
+++ b/contrib/libs/clapack/sspevd.c
@@ -0,0 +1,310 @@
+/* sspevd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sspevd_(char *jobz, char *uplo, integer *n, real *ap,
+ real *w, real *z__, integer *ldz, real *work, integer *lwork, integer
+ *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real eps;
+ integer inde;
+ real anrm, rmin, rmax, sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ integer lwmin;
+ logical wantz;
+ integer iscale;
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ integer indtau;
+ extern /* Subroutine */ int sstedc_(char *, integer *, real *, real *,
+ real *, integer *, real *, integer *, integer *, integer *,
+ integer *);
+ integer indwrk, liwmin;
+ extern doublereal slansp_(char *, char *, integer *, real *, real *);
+ extern /* Subroutine */ int ssterf_(integer *, real *, real *, integer *);
+ integer llwork;
+ real smlnum;
+ extern /* Subroutine */ int ssptrd_(char *, integer *, real *, real *,
+ real *, real *, integer *);
+ logical lquery;
+ extern /* Subroutine */ int sopmtr_(char *, char *, char *, integer *,
+ integer *, real *, real *, real *, integer *, real *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSPEVD computes all the eigenvalues and, optionally, eigenvectors */
+/* of a real symmetric matrix A in packed storage. If eigenvectors are */
+/* desired, it uses a divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) REAL array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, AP is overwritten by values generated during the */
+/* reduction to tridiagonal form. If UPLO = 'U', the diagonal */
+/* and first superdiagonal of the tridiagonal matrix T overwrite */
+/* the corresponding elements of A, and if UPLO = 'L', the */
+/* diagonal and first subdiagonal of T overwrite the */
+/* corresponding elements of A. */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) REAL array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the orthonormal */
+/* eigenvectors of the matrix A, with the i-th column of Z */
+/* holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the required LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If N <= 1, LWORK must be at least 1. */
+/* If JOBZ = 'N' and N > 1, LWORK must be at least 2*N. */
+/* If JOBZ = 'V' and N > 1, LWORK must be at least */
+/* 1 + 6*N + N**2. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the required sizes of the WORK and IWORK */
+/* arrays, returns these values as the first entries of the WORK */
+/* and IWORK arrays, and no error message related to LWORK or */
+/* LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the required LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* If JOBZ = 'N' or N <= 1, LIWORK must be at least 1. */
+/* If JOBZ = 'V' and N > 1, LIWORK must be at least 3 + 5*N. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the required sizes of the WORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK and IWORK arrays, and no error message related to */
+/* LWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, the algorithm failed to converge; i */
+/* off-diagonal elements of an intermediate tridiagonal */
+/* form did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lquery = *lwork == -1 || *liwork == -1;
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lsame_(uplo, "U") || lsame_(uplo,
+ "L"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -7;
+ }
+
+ if (*info == 0) {
+ if (*n <= 1) {
+ liwmin = 1;
+ lwmin = 1;
+ } else {
+ if (wantz) {
+ liwmin = *n * 5 + 3;
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = *n * 6 + 1 + i__1 * i__1;
+ } else {
+ liwmin = 1;
+ lwmin = *n << 1;
+ }
+ }
+ iwork[1] = liwmin;
+ work[1] = (real) lwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -9;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -11;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSPEVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ w[1] = ap[1];
+ if (wantz) {
+ z__[z_dim1 + 1] = 1.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = slansp_("M", uplo, n, &ap[1], &work[1]);
+ iscale = 0;
+ if (anrm > 0.f && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ i__1 = *n * (*n + 1) / 2;
+ sscal_(&i__1, &sigma, &ap[1], &c__1);
+ }
+
+/* Call SSPTRD to reduce symmetric packed matrix to tridiagonal form. */
+
+ inde = 1;
+ indtau = inde + *n;
+ ssptrd_(uplo, n, &ap[1], &w[1], &work[inde], &work[indtau], &iinfo);
+
+/* For eigenvalues only, call SSTERF. For eigenvectors, first call */
+/* SSTEDC to generate the eigenvector matrix, WORK(INDWRK), of the */
+/* tridiagonal matrix, then call SOPMTR to multiply it by the */
+/* Householder transformations represented in AP. */
+
+ if (! wantz) {
+ ssterf_(n, &w[1], &work[inde], info);
+ } else {
+ indwrk = indtau + *n;
+ llwork = *lwork - indwrk + 1;
+ sstedc_("I", n, &w[1], &work[inde], &z__[z_offset], ldz, &work[indwrk]
+, &llwork, &iwork[1], liwork, info);
+ sopmtr_("L", uplo, "N", n, n, &ap[1], &work[indtau], &z__[z_offset],
+ ldz, &work[indwrk], &iinfo);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ r__1 = 1.f / sigma;
+ sscal_(n, &r__1, &w[1], &c__1);
+ }
+
+ work[1] = (real) lwmin;
+ iwork[1] = liwmin;
+ return 0;
+
+/* End of SSPEVD */
+
+} /* sspevd_ */
diff --git a/contrib/libs/clapack/sspevx.c b/contrib/libs/clapack/sspevx.c
new file mode 100644
index 0000000000..172aa32152
--- /dev/null
+++ b/contrib/libs/clapack/sspevx.c
@@ -0,0 +1,461 @@
+/* sspevx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sspevx_(char *jobz, char *range, char *uplo, integer *n,
+ real *ap, real *vl, real *vu, integer *il, integer *iu, real *abstol,
+ integer *m, real *w, real *z__, integer *ldz, real *work, integer *
+ iwork, integer *ifail, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, jj;
+ real eps, vll, vuu, tmp1;
+ integer indd, inde;
+ real anrm;
+ integer imax;
+ real rmin, rmax;
+ logical test;
+ integer itmp1, indee;
+ real sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ char order[1];
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), sswap_(integer *, real *, integer *, real *, integer *
+);
+ logical wantz, alleig, indeig;
+ integer iscale, indibl;
+ logical valeig;
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real abstll, bignum;
+ integer indtau, indisp, indiwo, indwrk;
+ extern doublereal slansp_(char *, char *, integer *, real *, real *);
+ extern /* Subroutine */ int sstein_(integer *, real *, real *, integer *,
+ real *, integer *, integer *, real *, integer *, real *, integer *
+, integer *, integer *), ssterf_(integer *, real *, real *,
+ integer *);
+ integer nsplit;
+ extern /* Subroutine */ int sstebz_(char *, char *, integer *, real *,
+ real *, integer *, integer *, real *, real *, real *, integer *,
+ integer *, real *, integer *, integer *, real *, integer *,
+ integer *);
+ real smlnum;
+ extern /* Subroutine */ int sopgtr_(char *, integer *, real *, real *,
+ real *, integer *, real *, integer *), ssptrd_(char *,
+ integer *, real *, real *, real *, real *, integer *),
+ ssteqr_(char *, integer *, real *, real *, real *, integer *,
+ real *, integer *), sopmtr_(char *, char *, char *,
+ integer *, integer *, real *, real *, real *, integer *, real *,
+ integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSPEVX computes selected eigenvalues and, optionally, eigenvectors */
+/* of a real symmetric matrix A in packed storage. Eigenvalues/vectors */
+/* can be selected by specifying either a range of values or a range of */
+/* indices for the desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found; */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found; */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) REAL array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, AP is overwritten by values generated during the */
+/* reduction to tridiagonal form. If UPLO = 'U', the diagonal */
+/* and first superdiagonal of the tridiagonal matrix T overwrite */
+/* the corresponding elements of A, and if UPLO = 'L', the */
+/* diagonal and first subdiagonal of T overwrite the */
+/* corresponding elements of A. */
+
+/* VL (input) REAL */
+/* VU (input) REAL */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) REAL */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing AP to tridiagonal form. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*SLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*SLAMCH('S'). */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices */
+/* with Guaranteed High Relative Accuracy," by Demmel and */
+/* Kahan, LAPACK Working Note #3. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the selected eigenvalues in ascending order. */
+
+/* Z (output) REAL array, dimension (LDZ, max(1,M)) */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If an eigenvector fails to converge, then that column of Z */
+/* contains the latest approximation to the eigenvector, and the */
+/* index of the eigenvector is returned in IFAIL. */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) REAL array, dimension (8*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+
+/* IFAIL (output) INTEGER array, dimension (N) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvectors that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, then i eigenvectors failed to converge. */
+/* Their indices are stored in array IFAIL. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (! (lsame_(uplo, "L") || lsame_(uplo,
+ "U"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -7;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -8;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -9;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -14;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSPEVX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (alleig || indeig) {
+ *m = 1;
+ w[1] = ap[1];
+ } else {
+ if (*vl < ap[1] && *vu >= ap[1]) {
+ *m = 1;
+ w[1] = ap[1];
+ }
+ }
+ if (wantz) {
+ z__[z_dim1 + 1] = 1.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+/* Computing MIN */
+ r__1 = sqrt(bignum), r__2 = 1.f / sqrt(sqrt(safmin));
+ rmax = dmin(r__1,r__2);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ iscale = 0;
+ abstll = *abstol;
+ if (valeig) {
+ vll = *vl;
+ vuu = *vu;
+ } else {
+ vll = 0.f;
+ vuu = 0.f;
+ }
+ anrm = slansp_("M", uplo, n, &ap[1], &work[1]);
+ if (anrm > 0.f && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ i__1 = *n * (*n + 1) / 2;
+ sscal_(&i__1, &sigma, &ap[1], &c__1);
+ if (*abstol > 0.f) {
+ abstll = *abstol * sigma;
+ }
+ if (valeig) {
+ vll = *vl * sigma;
+ vuu = *vu * sigma;
+ }
+ }
+
+/* Call SSPTRD to reduce symmetric packed matrix to tridiagonal form. */
+
+ indtau = 1;
+ inde = indtau + *n;
+ indd = inde + *n;
+ indwrk = indd + *n;
+ ssptrd_(uplo, n, &ap[1], &work[indd], &work[inde], &work[indtau], &iinfo);
+
+/* If all eigenvalues are desired and ABSTOL is less than or equal */
+/* to zero, then call SSTERF or SOPGTR and SSTEQR. If this fails */
+/* for some eigenvalue, then try SSTEBZ. */
+
+ test = FALSE_;
+ if (indeig) {
+ if (*il == 1 && *iu == *n) {
+ test = TRUE_;
+ }
+ }
+ if ((alleig || test) && *abstol <= 0.f) {
+ scopy_(n, &work[indd], &c__1, &w[1], &c__1);
+ indee = indwrk + (*n << 1);
+ if (! wantz) {
+ i__1 = *n - 1;
+ scopy_(&i__1, &work[inde], &c__1, &work[indee], &c__1);
+ ssterf_(n, &w[1], &work[indee], info);
+ } else {
+ sopgtr_(uplo, n, &ap[1], &work[indtau], &z__[z_offset], ldz, &
+ work[indwrk], &iinfo);
+ i__1 = *n - 1;
+ scopy_(&i__1, &work[inde], &c__1, &work[indee], &c__1);
+ ssteqr_(jobz, n, &w[1], &work[indee], &z__[z_offset], ldz, &work[
+ indwrk], info);
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ifail[i__] = 0;
+/* L10: */
+ }
+ }
+ }
+ if (*info == 0) {
+ *m = *n;
+ goto L20;
+ }
+ *info = 0;
+ }
+
+/* Otherwise, call SSTEBZ and, if eigenvectors are desired, SSTEIN. */
+
+ if (wantz) {
+ *(unsigned char *)order = 'B';
+ } else {
+ *(unsigned char *)order = 'E';
+ }
+ indibl = 1;
+ indisp = indibl + *n;
+ indiwo = indisp + *n;
+ sstebz_(range, order, n, &vll, &vuu, il, iu, &abstll, &work[indd], &work[
+ inde], m, &nsplit, &w[1], &iwork[indibl], &iwork[indisp], &work[
+ indwrk], &iwork[indiwo], info);
+
+ if (wantz) {
+ sstein_(n, &work[indd], &work[inde], m, &w[1], &iwork[indibl], &iwork[
+ indisp], &z__[z_offset], ldz, &work[indwrk], &iwork[indiwo], &
+ ifail[1], info);
+
+/* Apply orthogonal matrix used in reduction to tridiagonal */
+/* form to eigenvectors returned by SSTEIN. */
+
+ sopmtr_("L", uplo, "N", n, m, &ap[1], &work[indtau], &z__[z_offset],
+ ldz, &work[indwrk], &iinfo);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+L20:
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *m;
+ } else {
+ imax = *info - 1;
+ }
+ r__1 = 1.f / sigma;
+ sscal_(&imax, &r__1, &w[1], &c__1);
+ }
+
+/* If eigenvalues are not in order, then sort them, along with */
+/* eigenvectors. */
+
+ if (wantz) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp1 = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp1) {
+ i__ = jj;
+ tmp1 = w[jj];
+ }
+/* L30: */
+ }
+
+ if (i__ != 0) {
+ itmp1 = iwork[indibl + i__ - 1];
+ w[i__] = w[j];
+ iwork[indibl + i__ - 1] = iwork[indibl + j - 1];
+ w[j] = tmp1;
+ iwork[indibl + j - 1] = itmp1;
+ sswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1],
+ &c__1);
+ if (*info != 0) {
+ itmp1 = ifail[i__];
+ ifail[i__] = ifail[j];
+ ifail[j] = itmp1;
+ }
+ }
+/* L40: */
+ }
+ }
+
+ return 0;
+
+/* End of SSPEVX */
+
+} /* sspevx_ */
diff --git a/contrib/libs/clapack/sspgst.c b/contrib/libs/clapack/sspgst.c
new file mode 100644
index 0000000000..a86622ab23
--- /dev/null
+++ b/contrib/libs/clapack/sspgst.c
@@ -0,0 +1,281 @@
+/* sspgst.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b9 = -1.f;
+static real c_b11 = 1.f;
+
+/* Subroutine */ int sspgst_(integer *itype, char *uplo, integer *n, real *ap,
+ real *bp, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ real r__1;
+
+ /* Local variables */
+ integer j, k, j1, k1, jj, kk;
+ real ct, ajj;
+ integer j1j1;
+ real akk;
+ integer k1k1;
+ real bjj, bkk;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ extern /* Subroutine */ int sspr2_(char *, integer *, real *, real *,
+ integer *, real *, integer *, real *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ logical upper;
+ extern /* Subroutine */ int saxpy_(integer *, real *, real *, integer *,
+ real *, integer *), sspmv_(char *, integer *, real *, real *,
+ real *, integer *, real *, real *, integer *), stpmv_(
+ char *, char *, char *, integer *, real *, real *, integer *), stpsv_(char *, char *, char *, integer *,
+ real *, real *, integer *), xerbla_(char
+ *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSPGST reduces a real symmetric-definite generalized eigenproblem */
+/* to standard form, using packed storage. */
+
+/* If ITYPE = 1, the problem is A*x = lambda*B*x, */
+/* and A is overwritten by inv(U**T)*A*inv(U) or inv(L)*A*inv(L**T) */
+
+/* If ITYPE = 2 or 3, the problem is A*B*x = lambda*x or */
+/* B*A*x = lambda*x, and A is overwritten by U*A*U**T or L**T*A*L. */
+
+/* B must have been previously factorized as U**T*U or L*L**T by SPPTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* = 1: compute inv(U**T)*A*inv(U) or inv(L)*A*inv(L**T); */
+/* = 2 or 3: compute U*A*U**T or L**T*A*L. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored and B is factored as */
+/* U**T*U; */
+/* = 'L': Lower triangle of A is stored and B is factored as */
+/* L*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* AP (input/output) REAL array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, if INFO = 0, the transformed matrix, stored in the */
+/* same format as A. */
+
+/* BP (input) REAL array, dimension (N*(N+1)/2) */
+/* The triangular factor from the Cholesky factorization of B, */
+/* stored in the same format as A, as returned by SPPTRF. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --bp;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSPGST", &i__1);
+ return 0;
+ }
+
+ if (*itype == 1) {
+ if (upper) {
+
+/* Compute inv(U')*A*inv(U) */
+
+/* J1 and JJ are the indices of A(1,j) and A(j,j) */
+
+ jj = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ j1 = jj + 1;
+ jj += j;
+
+/* Compute the j-th column of the upper triangle of A */
+
+ bjj = bp[jj];
+ stpsv_(uplo, "Transpose", "Nonunit", &j, &bp[1], &ap[j1], &
+ c__1);
+ i__2 = j - 1;
+ sspmv_(uplo, &i__2, &c_b9, &ap[1], &bp[j1], &c__1, &c_b11, &
+ ap[j1], &c__1);
+ i__2 = j - 1;
+ r__1 = 1.f / bjj;
+ sscal_(&i__2, &r__1, &ap[j1], &c__1);
+ i__2 = j - 1;
+ ap[jj] = (ap[jj] - sdot_(&i__2, &ap[j1], &c__1, &bp[j1], &
+ c__1)) / bjj;
+/* L10: */
+ }
+ } else {
+
+/* Compute inv(L)*A*inv(L') */
+
+/* KK and K1K1 are the indices of A(k,k) and A(k+1,k+1) */
+
+ kk = 1;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ k1k1 = kk + *n - k + 1;
+
+/* Update the lower triangle of A(k:n,k:n) */
+
+ akk = ap[kk];
+ bkk = bp[kk];
+/* Computing 2nd power */
+ r__1 = bkk;
+ akk /= r__1 * r__1;
+ ap[kk] = akk;
+ if (k < *n) {
+ i__2 = *n - k;
+ r__1 = 1.f / bkk;
+ sscal_(&i__2, &r__1, &ap[kk + 1], &c__1);
+ ct = akk * -.5f;
+ i__2 = *n - k;
+ saxpy_(&i__2, &ct, &bp[kk + 1], &c__1, &ap[kk + 1], &c__1)
+ ;
+ i__2 = *n - k;
+ sspr2_(uplo, &i__2, &c_b9, &ap[kk + 1], &c__1, &bp[kk + 1]
+, &c__1, &ap[k1k1]);
+ i__2 = *n - k;
+ saxpy_(&i__2, &ct, &bp[kk + 1], &c__1, &ap[kk + 1], &c__1)
+ ;
+ i__2 = *n - k;
+ stpsv_(uplo, "No transpose", "Non-unit", &i__2, &bp[k1k1],
+ &ap[kk + 1], &c__1);
+ }
+ kk = k1k1;
+/* L20: */
+ }
+ }
+ } else {
+ if (upper) {
+
+/* Compute U*A*U' */
+
+/* K1 and KK are the indices of A(1,k) and A(k,k) */
+
+ kk = 0;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ k1 = kk + 1;
+ kk += k;
+
+/* Update the upper triangle of A(1:k,1:k) */
+
+ akk = ap[kk];
+ bkk = bp[kk];
+ i__2 = k - 1;
+ stpmv_(uplo, "No transpose", "Non-unit", &i__2, &bp[1], &ap[
+ k1], &c__1);
+ ct = akk * .5f;
+ i__2 = k - 1;
+ saxpy_(&i__2, &ct, &bp[k1], &c__1, &ap[k1], &c__1);
+ i__2 = k - 1;
+ sspr2_(uplo, &i__2, &c_b11, &ap[k1], &c__1, &bp[k1], &c__1, &
+ ap[1]);
+ i__2 = k - 1;
+ saxpy_(&i__2, &ct, &bp[k1], &c__1, &ap[k1], &c__1);
+ i__2 = k - 1;
+ sscal_(&i__2, &bkk, &ap[k1], &c__1);
+/* Computing 2nd power */
+ r__1 = bkk;
+ ap[kk] = akk * (r__1 * r__1);
+/* L30: */
+ }
+ } else {
+
+/* Compute L'*A*L */
+
+/* JJ and J1J1 are the indices of A(j,j) and A(j+1,j+1) */
+
+ jj = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ j1j1 = jj + *n - j + 1;
+
+/* Compute the j-th column of the lower triangle of A */
+
+ ajj = ap[jj];
+ bjj = bp[jj];
+ i__2 = *n - j;
+ ap[jj] = ajj * bjj + sdot_(&i__2, &ap[jj + 1], &c__1, &bp[jj
+ + 1], &c__1);
+ i__2 = *n - j;
+ sscal_(&i__2, &bjj, &ap[jj + 1], &c__1);
+ i__2 = *n - j;
+ sspmv_(uplo, &i__2, &c_b11, &ap[j1j1], &bp[jj + 1], &c__1, &
+ c_b11, &ap[jj + 1], &c__1);
+ i__2 = *n - j + 1;
+ stpmv_(uplo, "Transpose", "Non-unit", &i__2, &bp[jj], &ap[jj],
+ &c__1);
+ jj = j1j1;
+/* L40: */
+ }
+ }
+ }
+ return 0;
+
+/* End of SSPGST */
+
+} /* sspgst_ */
diff --git a/contrib/libs/clapack/sspgv.c b/contrib/libs/clapack/sspgv.c
new file mode 100644
index 0000000000..1433001caf
--- /dev/null
+++ b/contrib/libs/clapack/sspgv.c
@@ -0,0 +1,240 @@
+/* sspgv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sspgv_(integer *itype, char *jobz, char *uplo, integer *
+ n, real *ap, real *bp, real *w, real *z__, integer *ldz, real *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+
+ /* Local variables */
+ integer j, neig;
+ extern logical lsame_(char *, char *);
+ char trans[1];
+ logical upper;
+ extern /* Subroutine */ int sspev_(char *, char *, integer *, real *,
+ real *, real *, integer *, real *, integer *);
+ logical wantz;
+ extern /* Subroutine */ int stpmv_(char *, char *, char *, integer *,
+ real *, real *, integer *), stpsv_(char *,
+ char *, char *, integer *, real *, real *, integer *), xerbla_(char *, integer *), spptrf_(char
+ *, integer *, real *, integer *), sspgst_(integer *, char
+ *, integer *, real *, real *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSPGV computes all the eigenvalues and, optionally, the eigenvectors */
+/* of a real generalized symmetric-definite eigenproblem, of the form */
+/* A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x. */
+/* Here A and B are assumed to be symmetric, stored in packed format, */
+/* and B is also positive definite. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* Specifies the problem type to be solved: */
+/* = 1: A*x = (lambda)*B*x */
+/* = 2: A*B*x = (lambda)*x */
+/* = 3: B*A*x = (lambda)*x */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* AP (input/output) REAL array, dimension */
+/* (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, the contents of AP are destroyed. */
+
+/* BP (input/output) REAL array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* B, packed columnwise in a linear array. The j-th column of B */
+/* is stored in the array BP as follows: */
+/* if UPLO = 'U', BP(i + (j-1)*j/2) = B(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', BP(i + (j-1)*(2*n-j)/2) = B(i,j) for j<=i<=n. */
+
+/* On exit, the triangular factor U or L from the Cholesky */
+/* factorization B = U**T*U or B = L*L**T, in the same storage */
+/* format as B. */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) REAL array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the matrix Z of */
+/* eigenvectors. The eigenvectors are normalized as follows: */
+/* if ITYPE = 1 or 2, Z**T*B*Z = I; */
+/* if ITYPE = 3, Z**T*inv(B)*Z = I. */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: SPPTRF or SSPEV returned an error code: */
+/* <= N: if INFO = i, SSPEV failed to converge; */
+/* i off-diagonal elements of an intermediate */
+/* tridiagonal form did not converge to zero. */
+/* > N: if INFO = n + i, for 1 <= i <= n, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --bp;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+
+ *info = 0;
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -2;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSPGV ", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a Cholesky factorization of B. */
+
+ spptrf_(uplo, n, &bp[1], info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem and solve. */
+
+ sspgst_(itype, uplo, n, &ap[1], &bp[1], info);
+ sspev_(jobz, uplo, n, &ap[1], &w[1], &z__[z_offset], ldz, &work[1], info);
+
+ if (wantz) {
+
+/* Backtransform eigenvectors to the original problem. */
+
+ neig = *n;
+ if (*info > 0) {
+ neig = *info - 1;
+ }
+ if (*itype == 1 || *itype == 2) {
+
+/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x; */
+/* backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'N';
+ } else {
+ *(unsigned char *)trans = 'T';
+ }
+
+ i__1 = neig;
+ for (j = 1; j <= i__1; ++j) {
+ stpsv_(uplo, trans, "Non-unit", n, &bp[1], &z__[j * z_dim1 +
+ 1], &c__1);
+/* L10: */
+ }
+
+ } else if (*itype == 3) {
+
+/* For B*A*x=(lambda)*x; */
+/* backtransform eigenvectors: x = L*y or U'*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'T';
+ } else {
+ *(unsigned char *)trans = 'N';
+ }
+
+ i__1 = neig;
+ for (j = 1; j <= i__1; ++j) {
+ stpmv_(uplo, trans, "Non-unit", n, &bp[1], &z__[j * z_dim1 +
+ 1], &c__1);
+/* L20: */
+ }
+ }
+ }
+ return 0;
+
+/* End of SSPGV */
+
+} /* sspgv_ */
diff --git a/contrib/libs/clapack/sspgvd.c b/contrib/libs/clapack/sspgvd.c
new file mode 100644
index 0000000000..0ad98eb8bb
--- /dev/null
+++ b/contrib/libs/clapack/sspgvd.c
@@ -0,0 +1,330 @@
+/* sspgvd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sspgvd_(integer *itype, char *jobz, char *uplo, integer *
+ n, real *ap, real *bp, real *w, real *z__, integer *ldz, real *work,
+ integer *lwork, integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+ real r__1, r__2;
+
+ /* Local variables */
+ integer j, neig;
+ extern logical lsame_(char *, char *);
+ integer lwmin;
+ char trans[1];
+ logical upper, wantz;
+ extern /* Subroutine */ int stpmv_(char *, char *, char *, integer *,
+ real *, real *, integer *), stpsv_(char *,
+ char *, char *, integer *, real *, real *, integer *), xerbla_(char *, integer *);
+ integer liwmin;
+ extern /* Subroutine */ int sspevd_(char *, char *, integer *, real *,
+ real *, real *, integer *, real *, integer *, integer *, integer *
+, integer *), spptrf_(char *, integer *, real *,
+ integer *);
+ logical lquery;
+ extern /* Subroutine */ int sspgst_(integer *, char *, integer *, real *,
+ real *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSPGVD computes all the eigenvalues, and optionally, the eigenvectors */
+/* of a real generalized symmetric-definite eigenproblem, of the form */
+/* A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x. Here A and */
+/* B are assumed to be symmetric, stored in packed format, and B is also */
+/* positive definite. */
+/* If eigenvectors are desired, it uses a divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* Specifies the problem type to be solved: */
+/* = 1: A*x = (lambda)*B*x */
+/* = 2: A*B*x = (lambda)*x */
+/* = 3: B*A*x = (lambda)*x */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* AP (input/output) REAL array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, the contents of AP are destroyed. */
+
+/* BP (input/output) REAL array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* B, packed columnwise in a linear array. The j-th column of B */
+/* is stored in the array BP as follows: */
+/* if UPLO = 'U', BP(i + (j-1)*j/2) = B(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', BP(i + (j-1)*(2*n-j)/2) = B(i,j) for j<=i<=n. */
+
+/* On exit, the triangular factor U or L from the Cholesky */
+/* factorization B = U**T*U or B = L*L**T, in the same storage */
+/* format as B. */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) REAL array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the matrix Z of */
+/* eigenvectors. The eigenvectors are normalized as follows: */
+/* if ITYPE = 1 or 2, Z**T*B*Z = I; */
+/* if ITYPE = 3, Z**T*inv(B)*Z = I. */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the required LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If N <= 1, LWORK >= 1. */
+/* If JOBZ = 'N' and N > 1, LWORK >= 2*N. */
+/* If JOBZ = 'V' and N > 1, LWORK >= 1 + 6*N + 2*N**2. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the required sizes of the WORK and IWORK */
+/* arrays, returns these values as the first entries of the WORK */
+/* and IWORK arrays, and no error message related to LWORK or */
+/* LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the required LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* If JOBZ = 'N' or N <= 1, LIWORK >= 1. */
+/* If JOBZ = 'V' and N > 1, LIWORK >= 3 + 5*N. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the required sizes of the WORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK and IWORK arrays, and no error message related to */
+/* LWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: SPPTRF or SSPEVD returned an error code: */
+/* <= N: if INFO = i, SSPEVD failed to converge; */
+/* i off-diagonal elements of an intermediate */
+/* tridiagonal form did not converge to zero; */
+/* > N: if INFO = N + i, for 1 <= i <= N, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --bp;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1 || *liwork == -1;
+
+ *info = 0;
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -2;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+ if (*n <= 1) {
+ liwmin = 1;
+ lwmin = 1;
+ } else {
+ if (wantz) {
+ liwmin = *n * 5 + 3;
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = *n * 6 + 1 + (i__1 * i__1 << 1);
+ } else {
+ liwmin = 1;
+ lwmin = *n << 1;
+ }
+ }
+ work[1] = (real) lwmin;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -11;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSPGVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a Cholesky factorization of BP. */
+
+ spptrf_(uplo, n, &bp[1], info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem and solve. */
+
+ sspgst_(itype, uplo, n, &ap[1], &bp[1], info);
+ sspevd_(jobz, uplo, n, &ap[1], &w[1], &z__[z_offset], ldz, &work[1],
+ lwork, &iwork[1], liwork, info);
+/* Computing MAX */
+ r__1 = (real) lwmin;
+ lwmin = dmax(r__1,work[1]);
+/* Computing MAX */
+ r__1 = (real) liwmin, r__2 = (real) iwork[1];
+ liwmin = dmax(r__1,r__2);
+
+ if (wantz) {
+
+/* Backtransform eigenvectors to the original problem. */
+
+ neig = *n;
+ if (*info > 0) {
+ neig = *info - 1;
+ }
+ if (*itype == 1 || *itype == 2) {
+
+/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x; */
+/* backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'N';
+ } else {
+ *(unsigned char *)trans = 'T';
+ }
+
+ i__1 = neig;
+ for (j = 1; j <= i__1; ++j) {
+ stpsv_(uplo, trans, "Non-unit", n, &bp[1], &z__[j * z_dim1 +
+ 1], &c__1);
+/* L10: */
+ }
+
+ } else if (*itype == 3) {
+
+/* For B*A*x=(lambda)*x; */
+/* backtransform eigenvectors: x = L*y or U'*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'T';
+ } else {
+ *(unsigned char *)trans = 'N';
+ }
+
+ i__1 = neig;
+ for (j = 1; j <= i__1; ++j) {
+ stpmv_(uplo, trans, "Non-unit", n, &bp[1], &z__[j * z_dim1 +
+ 1], &c__1);
+/* L20: */
+ }
+ }
+ }
+
+ work[1] = (real) lwmin;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of SSPGVD */
+
+} /* sspgvd_ */
diff --git a/contrib/libs/clapack/sspgvx.c b/contrib/libs/clapack/sspgvx.c
new file mode 100644
index 0000000000..30271be901
--- /dev/null
+++ b/contrib/libs/clapack/sspgvx.c
@@ -0,0 +1,339 @@
+/* sspgvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sspgvx_(integer *itype, char *jobz, char *range, char *
+ uplo, integer *n, real *ap, real *bp, real *vl, real *vu, integer *il,
+ integer *iu, real *abstol, integer *m, real *w, real *z__, integer *
+ ldz, real *work, integer *iwork, integer *ifail, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+
+ /* Local variables */
+ integer j;
+ extern logical lsame_(char *, char *);
+ char trans[1];
+ logical upper, wantz;
+ extern /* Subroutine */ int stpmv_(char *, char *, char *, integer *,
+ real *, real *, integer *), stpsv_(char *,
+ char *, char *, integer *, real *, real *, integer *);
+ logical alleig, indeig, valeig;
+ extern /* Subroutine */ int xerbla_(char *, integer *), spptrf_(
+ char *, integer *, real *, integer *), sspgst_(integer *,
+ char *, integer *, real *, real *, integer *), sspevx_(
+ char *, char *, char *, integer *, real *, real *, real *,
+ integer *, integer *, real *, integer *, real *, real *, integer *
+, real *, integer *, integer *, integer *)
+ ;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSPGVX computes selected eigenvalues, and optionally, eigenvectors */
+/* of a real generalized symmetric-definite eigenproblem, of the form */
+/* A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x. Here A */
+/* and B are assumed to be symmetric, stored in packed storage, and B */
+/* is also positive definite. Eigenvalues and eigenvectors can be */
+/* selected by specifying either a range of values or a range of indices */
+/* for the desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* Specifies the problem type to be solved: */
+/* = 1: A*x = (lambda)*B*x */
+/* = 2: A*B*x = (lambda)*x */
+/* = 3: B*A*x = (lambda)*x */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A and B are stored; */
+/* = 'L': Lower triangle of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix pencil (A,B). N >= 0. */
+
+/* AP (input/output) REAL array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, the contents of AP are destroyed. */
+
+/* BP (input/output) REAL array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* B, packed columnwise in a linear array. The j-th column of B */
+/* is stored in the array BP as follows: */
+/* if UPLO = 'U', BP(i + (j-1)*j/2) = B(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', BP(i + (j-1)*(2*n-j)/2) = B(i,j) for j<=i<=n. */
+
+/* On exit, the triangular factor U or L from the Cholesky */
+/* factorization B = U**T*U or B = L*L**T, in the same storage */
+/* format as B. */
+
+/* VL (input) REAL */
+/* VU (input) REAL */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) REAL */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing A to tridiagonal form. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*SLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*SLAMCH('S'). */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) REAL array, dimension (N) */
+/* On normal exit, the first M elements contain the selected */
+/* eigenvalues in ascending order. */
+
+/* Z (output) REAL array, dimension (LDZ, max(1,M)) */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* The eigenvectors are normalized as follows: */
+/* if ITYPE = 1 or 2, Z**T*B*Z = I; */
+/* if ITYPE = 3, Z**T*inv(B)*Z = I. */
+
+/* If an eigenvector fails to converge, then that column of Z */
+/* contains the latest approximation to the eigenvector, and the */
+/* index of the eigenvector is returned in IFAIL. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) REAL array, dimension (8*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+
+/* IFAIL (output) INTEGER array, dimension (N) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvectors that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: SPPTRF or SSPEVX returned an error code: */
+/* <= N: if INFO = i, SSPEVX failed to converge; */
+/* i eigenvectors failed to converge. Their indices */
+/* are stored in array IFAIL. */
+/* > N: if INFO = N + i, for 1 <= i <= N, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --bp;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ upper = lsame_(uplo, "U");
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+
+ *info = 0;
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -2;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -3;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -9;
+ }
+ } else if (indeig) {
+ if (*il < 1) {
+ *info = -10;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -11;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -16;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSPGVX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a Cholesky factorization of B. */
+
+ spptrf_(uplo, n, &bp[1], info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem and solve. */
+
+ sspgst_(itype, uplo, n, &ap[1], &bp[1], info);
+ sspevx_(jobz, range, uplo, n, &ap[1], vl, vu, il, iu, abstol, m, &w[1], &
+ z__[z_offset], ldz, &work[1], &iwork[1], &ifail[1], info);
+
+ if (wantz) {
+
+/* Backtransform eigenvectors to the original problem. */
+
+ if (*info > 0) {
+ *m = *info - 1;
+ }
+ if (*itype == 1 || *itype == 2) {
+
+/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x; */
+/* backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'N';
+ } else {
+ *(unsigned char *)trans = 'T';
+ }
+
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ stpsv_(uplo, trans, "Non-unit", n, &bp[1], &z__[j * z_dim1 +
+ 1], &c__1);
+/* L10: */
+ }
+
+ } else if (*itype == 3) {
+
+/* For B*A*x=(lambda)*x; */
+/* backtransform eigenvectors: x = L*y or U'*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'T';
+ } else {
+ *(unsigned char *)trans = 'N';
+ }
+
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ stpmv_(uplo, trans, "Non-unit", n, &bp[1], &z__[j * z_dim1 +
+ 1], &c__1);
+/* L20: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SSPGVX */
+
+} /* sspgvx_ */
diff --git a/contrib/libs/clapack/ssprfs.c b/contrib/libs/clapack/ssprfs.c
new file mode 100644
index 0000000000..9e02bea0d6
--- /dev/null
+++ b/contrib/libs/clapack/ssprfs.c
@@ -0,0 +1,417 @@
+/* ssprfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b12 = -1.f;
+static real c_b14 = 1.f;
+
+/* Subroutine */ int ssprfs_(char *uplo, integer *n, integer *nrhs, real *ap,
+ real *afp, integer *ipiv, real *b, integer *ldb, real *x, integer *
+ ldx, real *ferr, real *berr, real *work, integer *iwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3;
+ real r__1, r__2, r__3;
+
+ /* Local variables */
+ integer i__, j, k;
+ real s;
+ integer ik, kk;
+ real xk;
+ integer nz;
+ real eps;
+ integer kase;
+ real safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3], count;
+ logical upper;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), saxpy_(integer *, real *, real *, integer *, real *,
+ integer *), sspmv_(char *, integer *, real *, real *, real *,
+ integer *, real *, real *, integer *), slacn2_(integer *,
+ real *, real *, integer *, real *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real lstres;
+ extern /* Subroutine */ int ssptrs_(char *, integer *, integer *, real *,
+ integer *, real *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call SLACN2 in place of SLACON, 5 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSPRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is symmetric indefinite */
+/* and packed, and provides error bounds and backward error estimates */
+/* for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AP (input) REAL array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the symmetric matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* AFP (input) REAL array, dimension (N*(N+1)/2) */
+/* The factored form of the matrix A. AFP contains the block */
+/* diagonal matrix D and the multipliers used to obtain the */
+/* factor U or L from the factorization A = U*D*U**T or */
+/* A = L*D*L**T as computed by SSPTRF, stored as a packed */
+/* triangular matrix. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by SSPTRF. */
+
+/* B (input) REAL array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) REAL array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by SSPTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --afp;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*ldx < max(1,*n)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSPRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.f;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X */
+
+ scopy_(n, &b[j * b_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+ sspmv_(uplo, n, &c_b12, &ap[1], &x[j * x_dim1 + 1], &c__1, &c_b14, &
+ work[*n + 1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] = (r__1 = b[i__ + j * b_dim1], dabs(r__1));
+/* L30: */
+ }
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ kk = 1;
+ if (upper) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ xk = (r__1 = x[k + j * x_dim1], dabs(r__1));
+ ik = kk;
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ work[i__] += (r__1 = ap[ik], dabs(r__1)) * xk;
+ s += (r__1 = ap[ik], dabs(r__1)) * (r__2 = x[i__ + j *
+ x_dim1], dabs(r__2));
+ ++ik;
+/* L40: */
+ }
+ work[k] = work[k] + (r__1 = ap[kk + k - 1], dabs(r__1)) * xk
+ + s;
+ kk += k;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ xk = (r__1 = x[k + j * x_dim1], dabs(r__1));
+ work[k] += (r__1 = ap[kk], dabs(r__1)) * xk;
+ ik = kk + 1;
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ work[i__] += (r__1 = ap[ik], dabs(r__1)) * xk;
+ s += (r__1 = ap[ik], dabs(r__1)) * (r__2 = x[i__ + j *
+ x_dim1], dabs(r__2));
+ ++ik;
+/* L60: */
+ }
+ work[k] += s;
+ kk += *n - k + 1;
+/* L70: */
+ }
+ }
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+/* Computing MAX */
+ r__2 = s, r__3 = (r__1 = work[*n + i__], dabs(r__1)) / work[
+ i__];
+ s = dmax(r__2,r__3);
+ } else {
+/* Computing MAX */
+ r__2 = s, r__3 = ((r__1 = work[*n + i__], dabs(r__1)) + safe1)
+ / (work[i__] + safe1);
+ s = dmax(r__2,r__3);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2.f <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ ssptrs_(uplo, n, &c__1, &afp[1], &ipiv[1], &work[*n + 1], n, info);
+ saxpy_(n, &c_b14, &work[*n + 1], &c__1, &x[j * x_dim1 + 1], &c__1)
+ ;
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use SLACN2 to estimate the infinity-norm of the matrix */
+/* inv(A) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+ work[i__] = (r__1 = work[*n + i__], dabs(r__1)) + nz * eps *
+ work[i__];
+ } else {
+ work[i__] = (r__1 = work[*n + i__], dabs(r__1)) + nz * eps *
+ work[i__] + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ slacn2_(n, &work[(*n << 1) + 1], &work[*n + 1], &iwork[1], &ferr[j], &
+ kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(A'). */
+
+ ssptrs_(uplo, n, &c__1, &afp[1], &ipiv[1], &work[*n + 1], n,
+ info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L110: */
+ }
+ } else if (kase == 2) {
+
+/* Multiply by inv(A)*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L120: */
+ }
+ ssptrs_(uplo, n, &c__1, &afp[1], &ipiv[1], &work[*n + 1], n,
+ info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = lstres, r__3 = (r__1 = x[i__ + j * x_dim1], dabs(r__1));
+ lstres = dmax(r__2,r__3);
+/* L130: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of SSPRFS */
+
+} /* ssprfs_ */
diff --git a/contrib/libs/clapack/sspsv.c b/contrib/libs/clapack/sspsv.c
new file mode 100644
index 0000000000..e902d618a4
--- /dev/null
+++ b/contrib/libs/clapack/sspsv.c
@@ -0,0 +1,176 @@
+/* sspsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sspsv_(char *uplo, integer *n, integer *nrhs, real *ap,
+ integer *ipiv, real *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), ssptrf_(
+ char *, integer *, real *, integer *, integer *), ssptrs_(
+ char *, integer *, integer *, real *, integer *, real *, integer *
+, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSPSV computes the solution to a real system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N symmetric matrix stored in packed format and X */
+/* and B are N-by-NRHS matrices. */
+
+/* The diagonal pivoting method is used to factor A as */
+/* A = U * D * U**T, if UPLO = 'U', or */
+/* A = L * D * L**T, if UPLO = 'L', */
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, D is symmetric and block diagonal with 1-by-1 */
+/* and 2-by-2 diagonal blocks. The factored form of A is then used to */
+/* solve the system of equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AP (input/output) REAL array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. */
+
+/* On exit, the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**T or A = L*D*L**T as computed by SSPTRF, stored as */
+/* a packed triangular matrix in the same storage format as A. */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D, as */
+/* determined by SSPTRF. If IPIV(k) > 0, then rows and columns */
+/* k and IPIV(k) were interchanged, and D(k,k) is a 1-by-1 */
+/* diagonal block. If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, */
+/* then rows and columns k-1 and -IPIV(k) were interchanged and */
+/* D(k-1:k,k-1:k) is a 2-by-2 diagonal block. If UPLO = 'L' and */
+/* IPIV(k) = IPIV(k+1) < 0, then rows and columns k+1 and */
+/* -IPIV(k) were interchanged and D(k:k+1,k:k+1) is a 2-by-2 */
+/* diagonal block. */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, so the solution could not be */
+/* computed. */
+
+/* Further Details */
+/* =============== */
+
+/* The packed storage scheme is illustrated by the following example */
+/* when N = 4, UPLO = 'U': */
+
+/* Two-dimensional storage of the symmetric matrix A: */
+
+/* a11 a12 a13 a14 */
+/* a22 a23 a24 */
+/* a33 a34 (aij = aji) */
+/* a44 */
+
+/* Packed storage of the upper triangle of A: */
+
+/* AP = [ a11, a12, a22, a13, a23, a33, a14, a24, a34, a44 ] */
+
+/* ===================================================================== */
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSPSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the factorization A = U*D*U' or A = L*D*L'. */
+
+ ssptrf_(uplo, n, &ap[1], &ipiv[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ ssptrs_(uplo, n, nrhs, &ap[1], &ipiv[1], &b[b_offset], ldb, info);
+
+ }
+ return 0;
+
+/* End of SSPSV */
+
+} /* sspsv_ */
diff --git a/contrib/libs/clapack/sspsvx.c b/contrib/libs/clapack/sspsvx.c
new file mode 100644
index 0000000000..96b88fe649
--- /dev/null
+++ b/contrib/libs/clapack/sspsvx.c
@@ -0,0 +1,325 @@
+/* sspsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sspsvx_(char *fact, char *uplo, integer *n, integer *
+ nrhs, real *ap, real *afp, integer *ipiv, real *b, integer *ldb, real
+ *x, integer *ldx, real *rcond, real *ferr, real *berr, real *work,
+ integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ real anorm;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ extern doublereal slamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int xerbla_(char *, integer *), slacpy_(
+ char *, integer *, integer *, real *, integer *, real *, integer *
+);
+ extern doublereal slansp_(char *, char *, integer *, real *, real *);
+ extern /* Subroutine */ int sspcon_(char *, integer *, real *, integer *,
+ real *, real *, real *, integer *, integer *), ssprfs_(
+ char *, integer *, integer *, real *, real *, integer *, real *,
+ integer *, real *, integer *, real *, real *, real *, integer *,
+ integer *), ssptrf_(char *, integer *, real *, integer *,
+ integer *), ssptrs_(char *, integer *, integer *, real *,
+ integer *, real *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSPSVX uses the diagonal pivoting factorization A = U*D*U**T or */
+/* A = L*D*L**T to compute the solution to a real system of linear */
+/* equations A * X = B, where A is an N-by-N symmetric matrix stored */
+/* in packed format and X and B are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'N', the diagonal pivoting method is used to factor A as */
+/* A = U * D * U**T, if UPLO = 'U', or */
+/* A = L * D * L**T, if UPLO = 'L', */
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices and D is symmetric and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* 2. If some D(i,i)=0, so that D is exactly singular, then the routine */
+/* returns with INFO = i. Otherwise, the factored form of A is used */
+/* to estimate the condition number of the matrix A. If the */
+/* reciprocal of the condition number is less than machine precision, */
+/* INFO = N+1 is returned as a warning, but the routine still goes on */
+/* to solve for X and compute error bounds as described below. */
+
+/* 3. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 4. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of A has been */
+/* supplied on entry. */
+/* = 'F': On entry, AFP and IPIV contain the factored form of */
+/* A. AP, AFP and IPIV will not be modified. */
+/* = 'N': The matrix A will be copied to AFP and factored. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AP (input) REAL array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the symmetric matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. */
+
+/* AFP (input or output) REAL array, dimension */
+/* (N*(N+1)/2) */
+/* If FACT = 'F', then AFP is an input argument and on entry */
+/* contains the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**T or A = L*D*L**T as computed by SSPTRF, stored as */
+/* a packed triangular matrix in the same storage format as A. */
+
+/* If FACT = 'N', then AFP is an output argument and on exit */
+/* contains the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**T or A = L*D*L**T as computed by SSPTRF, stored as */
+/* a packed triangular matrix in the same storage format as A. */
+
+/* IPIV (input or output) INTEGER array, dimension (N) */
+/* If FACT = 'F', then IPIV is an input argument and on entry */
+/* contains details of the interchanges and the block structure */
+/* of D, as determined by SSPTRF. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* If FACT = 'N', then IPIV is an output argument and on exit */
+/* contains details of the interchanges and the block structure */
+/* of D, as determined by SSPTRF. */
+
+/* B (input) REAL array, dimension (LDB,NRHS) */
+/* The N-by-NRHS right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) REAL array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) REAL */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A. If RCOND is less than the machine precision (in */
+/* particular, if RCOND = 0), the matrix is singular to working */
+/* precision. This condition is indicated by a return code of */
+/* INFO > 0. */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: D(i,i) is exactly zero. The factorization */
+/* has been completed but the factor D is exactly */
+/* singular, so the solution and error bounds could */
+/* not be computed. RCOND = 0 is returned. */
+/* = N+1: D is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* Further Details */
+/* =============== */
+
+/* The packed storage scheme is illustrated by the following example */
+/* when N = 4, UPLO = 'U': */
+
+/* Two-dimensional storage of the symmetric matrix A: */
+
+/* a11 a12 a13 a14 */
+/* a22 a23 a24 */
+/* a33 a34 (aij = aji) */
+/* a44 */
+
+/* Packed storage of the upper triangle of A: */
+
+/* AP = [ a11, a12, a22, a13, a23, a33, a14, a24, a34, a44 ] */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --afp;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ if (! nofact && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! lsame_(uplo, "U") && ! lsame_(uplo,
+ "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldx < max(1,*n)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSPSVX", &i__1);
+ return 0;
+ }
+
+ if (nofact) {
+
+/* Compute the factorization A = U*D*U' or A = L*D*L'. */
+
+ i__1 = *n * (*n + 1) / 2;
+ scopy_(&i__1, &ap[1], &c__1, &afp[1], &c__1);
+ ssptrf_(uplo, n, &afp[1], &ipiv[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.f;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = slansp_("I", uplo, n, &ap[1], &work[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ sspcon_(uplo, n, &afp[1], &ipiv[1], &anorm, rcond, &work[1], &iwork[1],
+ info);
+
+/* Compute the solution vectors X. */
+
+ slacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ ssptrs_(uplo, n, nrhs, &afp[1], &ipiv[1], &x[x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solutions and */
+/* compute error bounds and backward error estimates for them. */
+
+ ssprfs_(uplo, n, nrhs, &ap[1], &afp[1], &ipiv[1], &b[b_offset], ldb, &x[
+ x_offset], ldx, &ferr[1], &berr[1], &work[1], &iwork[1], info);
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < slamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of SSPSVX */
+
+} /* sspsvx_ */
diff --git a/contrib/libs/clapack/ssptrd.c b/contrib/libs/clapack/ssptrd.c
new file mode 100644
index 0000000000..db5eb88f2a
--- /dev/null
+++ b/contrib/libs/clapack/ssptrd.c
@@ -0,0 +1,275 @@
+/* ssptrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b8 = 0.f;
+static real c_b14 = -1.f;
+
+/* Subroutine */ int ssptrd_(char *uplo, integer *n, real *ap, real *d__,
+ real *e, real *tau, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__, i1, ii, i1i1;
+ real taui;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ extern /* Subroutine */ int sspr2_(char *, integer *, real *, real *,
+ integer *, real *, integer *, real *);
+ real alpha;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int saxpy_(integer *, real *, real *, integer *,
+ real *, integer *), sspmv_(char *, integer *, real *, real *,
+ real *, integer *, real *, real *, integer *), xerbla_(
+ char *, integer *), slarfg_(integer *, real *, real *,
+ integer *, real *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSPTRD reduces a real symmetric matrix A stored in packed form to */
+/* symmetric tridiagonal form T by an orthogonal similarity */
+/* transformation: Q**T * A * Q = T. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) REAL array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+/* On exit, if UPLO = 'U', the diagonal and first superdiagonal */
+/* of A are overwritten by the corresponding elements of the */
+/* tridiagonal matrix T, and the elements above the first */
+/* superdiagonal, with the array TAU, represent the orthogonal */
+/* matrix Q as a product of elementary reflectors; if UPLO */
+/* = 'L', the diagonal and first subdiagonal of A are over- */
+/* written by the corresponding elements of the tridiagonal */
+/* matrix T, and the elements below the first subdiagonal, with */
+/* the array TAU, represent the orthogonal matrix Q as a product */
+/* of elementary reflectors. See Further Details. */
+
+/* D (output) REAL array, dimension (N) */
+/* The diagonal elements of the tridiagonal matrix T: */
+/* D(i) = A(i,i). */
+
+/* E (output) REAL array, dimension (N-1) */
+/* The off-diagonal elements of the tridiagonal matrix T: */
+/* E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'. */
+
+/* TAU (output) REAL array, dimension (N-1) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* If UPLO = 'U', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(n-1) . . . H(2) H(1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in AP, */
+/* overwriting A(1:i-1,i+1), and tau is stored in TAU(i). */
+
+/* If UPLO = 'L', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(1) H(2) . . . H(n-1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in AP, */
+/* overwriting A(i+2:n,i), and tau is stored in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ --tau;
+ --e;
+ --d__;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSPTRD", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Reduce the upper triangle of A. */
+/* I1 is the index in AP of A(1,I+1). */
+
+ i1 = *n * (*n - 1) / 2 + 1;
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+
+/* Generate elementary reflector H(i) = I - tau * v * v' */
+/* to annihilate A(1:i-1,i+1) */
+
+ slarfg_(&i__, &ap[i1 + i__ - 1], &ap[i1], &c__1, &taui);
+ e[i__] = ap[i1 + i__ - 1];
+
+ if (taui != 0.f) {
+
+/* Apply H(i) from both sides to A(1:i,1:i) */
+
+ ap[i1 + i__ - 1] = 1.f;
+
+/* Compute y := tau * A * v storing y in TAU(1:i) */
+
+ sspmv_(uplo, &i__, &taui, &ap[1], &ap[i1], &c__1, &c_b8, &tau[
+ 1], &c__1);
+
+/* Compute w := y - 1/2 * tau * (y'*v) * v */
+
+ alpha = taui * -.5f * sdot_(&i__, &tau[1], &c__1, &ap[i1], &
+ c__1);
+ saxpy_(&i__, &alpha, &ap[i1], &c__1, &tau[1], &c__1);
+
+/* Apply the transformation as a rank-2 update: */
+/* A := A - v * w' - w * v' */
+
+ sspr2_(uplo, &i__, &c_b14, &ap[i1], &c__1, &tau[1], &c__1, &
+ ap[1]);
+
+ ap[i1 + i__ - 1] = e[i__];
+ }
+ d__[i__ + 1] = ap[i1 + i__];
+ tau[i__] = taui;
+ i1 -= i__;
+/* L10: */
+ }
+ d__[1] = ap[1];
+ } else {
+
+/* Reduce the lower triangle of A. II is the index in AP of */
+/* A(i,i) and I1I1 is the index of A(i+1,i+1). */
+
+ ii = 1;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i1i1 = ii + *n - i__ + 1;
+
+/* Generate elementary reflector H(i) = I - tau * v * v' */
+/* to annihilate A(i+2:n,i) */
+
+ i__2 = *n - i__;
+ slarfg_(&i__2, &ap[ii + 1], &ap[ii + 2], &c__1, &taui);
+ e[i__] = ap[ii + 1];
+
+ if (taui != 0.f) {
+
+/* Apply H(i) from both sides to A(i+1:n,i+1:n) */
+
+ ap[ii + 1] = 1.f;
+
+/* Compute y := tau * A * v storing y in TAU(i:n-1) */
+
+ i__2 = *n - i__;
+ sspmv_(uplo, &i__2, &taui, &ap[i1i1], &ap[ii + 1], &c__1, &
+ c_b8, &tau[i__], &c__1);
+
+/* Compute w := y - 1/2 * tau * (y'*v) * v */
+
+ i__2 = *n - i__;
+ alpha = taui * -.5f * sdot_(&i__2, &tau[i__], &c__1, &ap[ii +
+ 1], &c__1);
+ i__2 = *n - i__;
+ saxpy_(&i__2, &alpha, &ap[ii + 1], &c__1, &tau[i__], &c__1);
+
+/* Apply the transformation as a rank-2 update: */
+/* A := A - v * w' - w * v' */
+
+ i__2 = *n - i__;
+ sspr2_(uplo, &i__2, &c_b14, &ap[ii + 1], &c__1, &tau[i__], &
+ c__1, &ap[i1i1]);
+
+ ap[ii + 1] = e[i__];
+ }
+ d__[i__] = ap[ii];
+ tau[i__] = taui;
+ ii = i1i1;
+/* L20: */
+ }
+ d__[*n] = ap[ii];
+ }
+
+ return 0;
+
+/* End of SSPTRD */
+
+} /* ssptrd_ */
diff --git a/contrib/libs/clapack/ssptrf.c b/contrib/libs/clapack/ssptrf.c
new file mode 100644
index 0000000000..df4bb436e4
--- /dev/null
+++ b/contrib/libs/clapack/ssptrf.c
@@ -0,0 +1,627 @@
+/* ssptrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ssptrf_(char *uplo, integer *n, real *ap, integer *ipiv,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ real r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k;
+ real t, r1, d11, d12, d21, d22;
+ integer kc, kk, kp;
+ real wk;
+ integer kx, knc, kpc, npp;
+ real wkm1, wkp1;
+ integer imax, jmax;
+ extern /* Subroutine */ int sspr_(char *, integer *, real *, real *,
+ integer *, real *);
+ real alpha;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ integer kstep;
+ logical upper;
+ extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
+ integer *);
+ real absakk;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ real colmax, rowmax;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSPTRF computes the factorization of a real symmetric matrix A stored */
+/* in packed format using the Bunch-Kaufman diagonal pivoting method: */
+
+/* A = U*D*U**T or A = L*D*L**T */
+
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, and D is symmetric and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) REAL array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L, stored as a packed triangular */
+/* matrix overwriting A (see below for further details). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, and division by zero will occur if it */
+/* is used to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* 5-96 - Based on modifications by J. Lewis, Boeing Computer Services */
+/* Company */
+
+/* If UPLO = 'U', then A = U*D*U', where */
+/* U = P(n)*U(n)* ... *P(k)U(k)* ..., */
+/* i.e., U is a product of terms P(k)*U(k), where k decreases from n to */
+/* 1 in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and U(k) is a unit upper triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I v 0 ) k-s */
+/* U(k) = ( 0 I 0 ) s */
+/* ( 0 0 I ) n-k */
+/* k-s s n-k */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(1:k-1,k). */
+/* If s = 2, the upper triangle of D(k) overwrites A(k-1,k-1), A(k-1,k), */
+/* and A(k,k), and v overwrites A(1:k-2,k-1:k). */
+
+/* If UPLO = 'L', then A = L*D*L', where */
+/* L = P(1)*L(1)* ... *P(k)*L(k)* ..., */
+/* i.e., L is a product of terms P(k)*L(k), where k increases from 1 to */
+/* n in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and L(k) is a unit lower triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I 0 0 ) k-1 */
+/* L(k) = ( 0 I 0 ) s */
+/* ( 0 v I ) n-k-s+1 */
+/* k-1 s n-k-s+1 */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(k+1:n,k). */
+/* If s = 2, the lower triangle of D(k) overwrites A(k,k), A(k+1,k), */
+/* and A(k+1,k+1), and v overwrites A(k+2:n,k:k+1). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ipiv;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSPTRF", &i__1);
+ return 0;
+ }
+
+/* Initialize ALPHA for use in choosing pivot block size. */
+
+ alpha = (sqrt(17.f) + 1.f) / 8.f;
+
+ if (upper) {
+
+/* Factorize A as U*D*U' using the upper triangle of A */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2 */
+
+ k = *n;
+ kc = (*n - 1) * *n / 2 + 1;
+L10:
+ knc = kc;
+
+/* If K < 1, exit from loop */
+
+ if (k < 1) {
+ goto L110;
+ }
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ absakk = (r__1 = ap[kc + k - 1], dabs(r__1));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ imax = isamax_(&i__1, &ap[kc], &c__1);
+ colmax = (r__1 = ap[kc + imax - 1], dabs(r__1));
+ } else {
+ colmax = 0.f;
+ }
+
+ if (dmax(absakk,colmax) == 0.f) {
+
+/* Column K is zero: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ rowmax = 0.f;
+ jmax = imax;
+ kx = imax * (imax + 1) / 2 + imax;
+ i__1 = k;
+ for (j = imax + 1; j <= i__1; ++j) {
+ if ((r__1 = ap[kx], dabs(r__1)) > rowmax) {
+ rowmax = (r__1 = ap[kx], dabs(r__1));
+ jmax = j;
+ }
+ kx += j;
+/* L20: */
+ }
+ kpc = (imax - 1) * imax / 2 + 1;
+ if (imax > 1) {
+ i__1 = imax - 1;
+ jmax = isamax_(&i__1, &ap[kpc], &c__1);
+/* Computing MAX */
+ r__2 = rowmax, r__3 = (r__1 = ap[kpc + jmax - 1], dabs(
+ r__1));
+ rowmax = dmax(r__2,r__3);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else if ((r__1 = ap[kpc + imax - 1], dabs(r__1)) >= alpha *
+ rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+ } else {
+
+/* interchange rows and columns K-1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+
+ kk = k - kstep + 1;
+ if (kstep == 2) {
+ knc = knc - k + 1;
+ }
+ if (kp != kk) {
+
+/* Interchange rows and columns KK and KP in the leading */
+/* submatrix A(1:k,1:k) */
+
+ i__1 = kp - 1;
+ sswap_(&i__1, &ap[knc], &c__1, &ap[kpc], &c__1);
+ kx = kpc + kp - 1;
+ i__1 = kk - 1;
+ for (j = kp + 1; j <= i__1; ++j) {
+ kx = kx + j - 1;
+ t = ap[knc + j - 1];
+ ap[knc + j - 1] = ap[kx];
+ ap[kx] = t;
+/* L30: */
+ }
+ t = ap[knc + kk - 1];
+ ap[knc + kk - 1] = ap[kpc + kp - 1];
+ ap[kpc + kp - 1] = t;
+ if (kstep == 2) {
+ t = ap[kc + k - 2];
+ ap[kc + k - 2] = ap[kc + kp - 1];
+ ap[kc + kp - 1] = t;
+ }
+ }
+
+/* Update the leading submatrix */
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k now holds */
+
+/* W(k) = U(k)*D(k) */
+
+/* where U(k) is the k-th column of U */
+
+/* Perform a rank-1 update of A(1:k-1,1:k-1) as */
+
+/* A := A - U(k)*D(k)*U(k)' = A - W(k)*1/D(k)*W(k)' */
+
+ r1 = 1.f / ap[kc + k - 1];
+ i__1 = k - 1;
+ r__1 = -r1;
+ sspr_(uplo, &i__1, &r__1, &ap[kc], &c__1, &ap[1]);
+
+/* Store U(k) in column k */
+
+ i__1 = k - 1;
+ sscal_(&i__1, &r1, &ap[kc], &c__1);
+ } else {
+
+/* 2-by-2 pivot block D(k): columns k and k-1 now hold */
+
+/* ( W(k-1) W(k) ) = ( U(k-1) U(k) )*D(k) */
+
+/* where U(k) and U(k-1) are the k-th and (k-1)-th columns */
+/* of U */
+
+/* Perform a rank-2 update of A(1:k-2,1:k-2) as */
+
+/* A := A - ( U(k-1) U(k) )*D(k)*( U(k-1) U(k) )' */
+/* = A - ( W(k-1) W(k) )*inv(D(k))*( W(k-1) W(k) )' */
+
+ if (k > 2) {
+
+ d12 = ap[k - 1 + (k - 1) * k / 2];
+ d22 = ap[k - 1 + (k - 2) * (k - 1) / 2] / d12;
+ d11 = ap[k + (k - 1) * k / 2] / d12;
+ t = 1.f / (d11 * d22 - 1.f);
+ d12 = t / d12;
+
+ for (j = k - 2; j >= 1; --j) {
+ wkm1 = d12 * (d11 * ap[j + (k - 2) * (k - 1) / 2] -
+ ap[j + (k - 1) * k / 2]);
+ wk = d12 * (d22 * ap[j + (k - 1) * k / 2] - ap[j + (k
+ - 2) * (k - 1) / 2]);
+ for (i__ = j; i__ >= 1; --i__) {
+ ap[i__ + (j - 1) * j / 2] = ap[i__ + (j - 1) * j /
+ 2] - ap[i__ + (k - 1) * k / 2] * wk - ap[
+ i__ + (k - 2) * (k - 1) / 2] * wkm1;
+/* L40: */
+ }
+ ap[j + (k - 1) * k / 2] = wk;
+ ap[j + (k - 2) * (k - 1) / 2] = wkm1;
+/* L50: */
+ }
+
+ }
+
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k - 1] = -kp;
+ }
+
+/* Decrease K and return to the start of the main loop */
+
+ k -= kstep;
+ kc = knc - k;
+ goto L10;
+
+ } else {
+
+/* Factorize A as L*D*L' using the lower triangle of A */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2 */
+
+ k = 1;
+ kc = 1;
+ npp = *n * (*n + 1) / 2;
+L60:
+ knc = kc;
+
+/* If K > N, exit from loop */
+
+ if (k > *n) {
+ goto L110;
+ }
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ absakk = (r__1 = ap[kc], dabs(r__1));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ imax = k + isamax_(&i__1, &ap[kc + 1], &c__1);
+ colmax = (r__1 = ap[kc + imax - k], dabs(r__1));
+ } else {
+ colmax = 0.f;
+ }
+
+ if (dmax(absakk,colmax) == 0.f) {
+
+/* Column K is zero: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ rowmax = 0.f;
+ kx = kc + imax - k;
+ i__1 = imax - 1;
+ for (j = k; j <= i__1; ++j) {
+ if ((r__1 = ap[kx], dabs(r__1)) > rowmax) {
+ rowmax = (r__1 = ap[kx], dabs(r__1));
+ jmax = j;
+ }
+ kx = kx + *n - j;
+/* L70: */
+ }
+ kpc = npp - (*n - imax + 1) * (*n - imax + 2) / 2 + 1;
+ if (imax < *n) {
+ i__1 = *n - imax;
+ jmax = imax + isamax_(&i__1, &ap[kpc + 1], &c__1);
+/* Computing MAX */
+ r__2 = rowmax, r__3 = (r__1 = ap[kpc + jmax - imax], dabs(
+ r__1));
+ rowmax = dmax(r__2,r__3);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else if ((r__1 = ap[kpc], dabs(r__1)) >= alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+ } else {
+
+/* interchange rows and columns K+1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+
+ kk = k + kstep - 1;
+ if (kstep == 2) {
+ knc = knc + *n - k + 1;
+ }
+ if (kp != kk) {
+
+/* Interchange rows and columns KK and KP in the trailing */
+/* submatrix A(k:n,k:n) */
+
+ if (kp < *n) {
+ i__1 = *n - kp;
+ sswap_(&i__1, &ap[knc + kp - kk + 1], &c__1, &ap[kpc + 1],
+ &c__1);
+ }
+ kx = knc + kp - kk;
+ i__1 = kp - 1;
+ for (j = kk + 1; j <= i__1; ++j) {
+ kx = kx + *n - j + 1;
+ t = ap[knc + j - kk];
+ ap[knc + j - kk] = ap[kx];
+ ap[kx] = t;
+/* L80: */
+ }
+ t = ap[knc];
+ ap[knc] = ap[kpc];
+ ap[kpc] = t;
+ if (kstep == 2) {
+ t = ap[kc + 1];
+ ap[kc + 1] = ap[kc + kp - k];
+ ap[kc + kp - k] = t;
+ }
+ }
+
+/* Update the trailing submatrix */
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k now holds */
+
+/* W(k) = L(k)*D(k) */
+
+/* where L(k) is the k-th column of L */
+
+ if (k < *n) {
+
+/* Perform a rank-1 update of A(k+1:n,k+1:n) as */
+
+/* A := A - L(k)*D(k)*L(k)' = A - W(k)*(1/D(k))*W(k)' */
+
+ r1 = 1.f / ap[kc];
+ i__1 = *n - k;
+ r__1 = -r1;
+ sspr_(uplo, &i__1, &r__1, &ap[kc + 1], &c__1, &ap[kc + *n
+ - k + 1]);
+
+/* Store L(k) in column K */
+
+ i__1 = *n - k;
+ sscal_(&i__1, &r1, &ap[kc + 1], &c__1);
+ }
+ } else {
+
+/* 2-by-2 pivot block D(k): columns K and K+1 now hold */
+
+/* ( W(k) W(k+1) ) = ( L(k) L(k+1) )*D(k) */
+
+/* where L(k) and L(k+1) are the k-th and (k+1)-th columns */
+/* of L */
+
+ if (k < *n - 1) {
+
+/* Perform a rank-2 update of A(k+2:n,k+2:n) as */
+
+/* A := A - ( L(k) L(k+1) )*D(k)*( L(k) L(k+1) )' */
+/* = A - ( W(k) W(k+1) )*inv(D(k))*( W(k) W(k+1) )' */
+
+ d21 = ap[k + 1 + (k - 1) * ((*n << 1) - k) / 2];
+ d11 = ap[k + 1 + k * ((*n << 1) - k - 1) / 2] / d21;
+ d22 = ap[k + (k - 1) * ((*n << 1) - k) / 2] / d21;
+ t = 1.f / (d11 * d22 - 1.f);
+ d21 = t / d21;
+
+ i__1 = *n;
+ for (j = k + 2; j <= i__1; ++j) {
+ wk = d21 * (d11 * ap[j + (k - 1) * ((*n << 1) - k) /
+ 2] - ap[j + k * ((*n << 1) - k - 1) / 2]);
+ wkp1 = d21 * (d22 * ap[j + k * ((*n << 1) - k - 1) /
+ 2] - ap[j + (k - 1) * ((*n << 1) - k) / 2]);
+
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ap[i__ + (j - 1) * ((*n << 1) - j) / 2] = ap[i__
+ + (j - 1) * ((*n << 1) - j) / 2] - ap[i__
+ + (k - 1) * ((*n << 1) - k) / 2] * wk -
+ ap[i__ + k * ((*n << 1) - k - 1) / 2] *
+ wkp1;
+/* L90: */
+ }
+
+ ap[j + (k - 1) * ((*n << 1) - k) / 2] = wk;
+ ap[j + k * ((*n << 1) - k - 1) / 2] = wkp1;
+
+/* L100: */
+ }
+ }
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k + 1] = -kp;
+ }
+
+/* Increase K and return to the start of the main loop */
+
+ k += kstep;
+ kc = knc + *n - k + 2;
+ goto L60;
+
+ }
+
+L110:
+ return 0;
+
+/* End of SSPTRF */
+
+} /* ssptrf_ */
diff --git a/contrib/libs/clapack/ssptri.c b/contrib/libs/clapack/ssptri.c
new file mode 100644
index 0000000000..bdaa88404e
--- /dev/null
+++ b/contrib/libs/clapack/ssptri.c
@@ -0,0 +1,407 @@
+/* ssptri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b11 = -1.f;
+static real c_b13 = 0.f;
+
+/* Subroutine */ int ssptri_(char *uplo, integer *n, real *ap, integer *ipiv,
+ real *work, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1;
+
+ /* Local variables */
+ real d__;
+ integer j, k;
+ real t, ak;
+ integer kc, kp, kx, kpc, npp;
+ real akp1, temp;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ real akkp1;
+ extern logical lsame_(char *, char *);
+ integer kstep;
+ logical upper;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), sswap_(integer *, real *, integer *, real *, integer *
+), sspmv_(char *, integer *, real *, real *, real *, integer *,
+ real *, real *, integer *), xerbla_(char *, integer *);
+ integer kcnext;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSPTRI computes the inverse of a real symmetric indefinite matrix */
+/* A in packed storage using the factorization A = U*D*U**T or */
+/* A = L*D*L**T computed by SSPTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**T; */
+/* = 'L': Lower triangular, form is A = L*D*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) REAL array, dimension (N*(N+1)/2) */
+/* On entry, the block diagonal matrix D and the multipliers */
+/* used to obtain the factor U or L as computed by SSPTRF, */
+/* stored as a packed triangular matrix. */
+
+/* On exit, if INFO = 0, the (symmetric) inverse of the original */
+/* matrix, stored as a packed triangular matrix. The j-th column */
+/* of inv(A) is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = inv(A)(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', */
+/* AP(i + (j-1)*(2n-j)/2) = inv(A)(i,j) for j<=i<=n. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by SSPTRF. */
+
+/* WORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) = 0; the matrix is singular and its */
+/* inverse could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --work;
+ --ipiv;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSPTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check that the diagonal matrix D is nonsingular. */
+
+ if (upper) {
+
+/* Upper triangular storage: examine D from bottom to top */
+
+ kp = *n * (*n + 1) / 2;
+ for (*info = *n; *info >= 1; --(*info)) {
+ if (ipiv[*info] > 0 && ap[kp] == 0.f) {
+ return 0;
+ }
+ kp -= *info;
+/* L10: */
+ }
+ } else {
+
+/* Lower triangular storage: examine D from top to bottom. */
+
+ kp = 1;
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ if (ipiv[*info] > 0 && ap[kp] == 0.f) {
+ return 0;
+ }
+ kp = kp + *n - *info + 1;
+/* L20: */
+ }
+ }
+ *info = 0;
+
+ if (upper) {
+
+/* Compute inv(A) from the factorization A = U*D*U'. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+ kc = 1;
+L30:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L50;
+ }
+
+ kcnext = kc + k;
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Invert the diagonal block. */
+
+ ap[kc + k - 1] = 1.f / ap[kc + k - 1];
+
+/* Compute column K of the inverse. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ scopy_(&i__1, &ap[kc], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ sspmv_(uplo, &i__1, &c_b11, &ap[1], &work[1], &c__1, &c_b13, &
+ ap[kc], &c__1);
+ i__1 = k - 1;
+ ap[kc + k - 1] -= sdot_(&i__1, &work[1], &c__1, &ap[kc], &
+ c__1);
+ }
+ kstep = 1;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Invert the diagonal block. */
+
+ t = (r__1 = ap[kcnext + k - 1], dabs(r__1));
+ ak = ap[kc + k - 1] / t;
+ akp1 = ap[kcnext + k] / t;
+ akkp1 = ap[kcnext + k - 1] / t;
+ d__ = t * (ak * akp1 - 1.f);
+ ap[kc + k - 1] = akp1 / d__;
+ ap[kcnext + k] = ak / d__;
+ ap[kcnext + k - 1] = -akkp1 / d__;
+
+/* Compute columns K and K+1 of the inverse. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ scopy_(&i__1, &ap[kc], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ sspmv_(uplo, &i__1, &c_b11, &ap[1], &work[1], &c__1, &c_b13, &
+ ap[kc], &c__1);
+ i__1 = k - 1;
+ ap[kc + k - 1] -= sdot_(&i__1, &work[1], &c__1, &ap[kc], &
+ c__1);
+ i__1 = k - 1;
+ ap[kcnext + k - 1] -= sdot_(&i__1, &ap[kc], &c__1, &ap[kcnext]
+, &c__1);
+ i__1 = k - 1;
+ scopy_(&i__1, &ap[kcnext], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ sspmv_(uplo, &i__1, &c_b11, &ap[1], &work[1], &c__1, &c_b13, &
+ ap[kcnext], &c__1);
+ i__1 = k - 1;
+ ap[kcnext + k] -= sdot_(&i__1, &work[1], &c__1, &ap[kcnext], &
+ c__1);
+ }
+ kstep = 2;
+ kcnext = kcnext + k + 1;
+ }
+
+ kp = (i__1 = ipiv[k], abs(i__1));
+ if (kp != k) {
+
+/* Interchange rows and columns K and KP in the leading */
+/* submatrix A(1:k+1,1:k+1) */
+
+ kpc = (kp - 1) * kp / 2 + 1;
+ i__1 = kp - 1;
+ sswap_(&i__1, &ap[kc], &c__1, &ap[kpc], &c__1);
+ kx = kpc + kp - 1;
+ i__1 = k - 1;
+ for (j = kp + 1; j <= i__1; ++j) {
+ kx = kx + j - 1;
+ temp = ap[kc + j - 1];
+ ap[kc + j - 1] = ap[kx];
+ ap[kx] = temp;
+/* L40: */
+ }
+ temp = ap[kc + k - 1];
+ ap[kc + k - 1] = ap[kpc + kp - 1];
+ ap[kpc + kp - 1] = temp;
+ if (kstep == 2) {
+ temp = ap[kc + k + k - 1];
+ ap[kc + k + k - 1] = ap[kc + k + kp - 1];
+ ap[kc + k + kp - 1] = temp;
+ }
+ }
+
+ k += kstep;
+ kc = kcnext;
+ goto L30;
+L50:
+
+ ;
+ } else {
+
+/* Compute inv(A) from the factorization A = L*D*L'. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ npp = *n * (*n + 1) / 2;
+ k = *n;
+ kc = npp;
+L60:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L80;
+ }
+
+ kcnext = kc - (*n - k + 2);
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Invert the diagonal block. */
+
+ ap[kc] = 1.f / ap[kc];
+
+/* Compute column K of the inverse. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ scopy_(&i__1, &ap[kc + 1], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ sspmv_(uplo, &i__1, &c_b11, &ap[kc + *n - k + 1], &work[1], &
+ c__1, &c_b13, &ap[kc + 1], &c__1);
+ i__1 = *n - k;
+ ap[kc] -= sdot_(&i__1, &work[1], &c__1, &ap[kc + 1], &c__1);
+ }
+ kstep = 1;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Invert the diagonal block. */
+
+ t = (r__1 = ap[kcnext + 1], dabs(r__1));
+ ak = ap[kcnext] / t;
+ akp1 = ap[kc] / t;
+ akkp1 = ap[kcnext + 1] / t;
+ d__ = t * (ak * akp1 - 1.f);
+ ap[kcnext] = akp1 / d__;
+ ap[kc] = ak / d__;
+ ap[kcnext + 1] = -akkp1 / d__;
+
+/* Compute columns K-1 and K of the inverse. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ scopy_(&i__1, &ap[kc + 1], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ sspmv_(uplo, &i__1, &c_b11, &ap[kc + (*n - k + 1)], &work[1],
+ &c__1, &c_b13, &ap[kc + 1], &c__1);
+ i__1 = *n - k;
+ ap[kc] -= sdot_(&i__1, &work[1], &c__1, &ap[kc + 1], &c__1);
+ i__1 = *n - k;
+ ap[kcnext + 1] -= sdot_(&i__1, &ap[kc + 1], &c__1, &ap[kcnext
+ + 2], &c__1);
+ i__1 = *n - k;
+ scopy_(&i__1, &ap[kcnext + 2], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ sspmv_(uplo, &i__1, &c_b11, &ap[kc + (*n - k + 1)], &work[1],
+ &c__1, &c_b13, &ap[kcnext + 2], &c__1);
+ i__1 = *n - k;
+ ap[kcnext] -= sdot_(&i__1, &work[1], &c__1, &ap[kcnext + 2], &
+ c__1);
+ }
+ kstep = 2;
+ kcnext -= *n - k + 3;
+ }
+
+ kp = (i__1 = ipiv[k], abs(i__1));
+ if (kp != k) {
+
+/* Interchange rows and columns K and KP in the trailing */
+/* submatrix A(k-1:n,k-1:n) */
+
+ kpc = npp - (*n - kp + 1) * (*n - kp + 2) / 2 + 1;
+ if (kp < *n) {
+ i__1 = *n - kp;
+ sswap_(&i__1, &ap[kc + kp - k + 1], &c__1, &ap[kpc + 1], &
+ c__1);
+ }
+ kx = kc + kp - k;
+ i__1 = kp - 1;
+ for (j = k + 1; j <= i__1; ++j) {
+ kx = kx + *n - j + 1;
+ temp = ap[kc + j - k];
+ ap[kc + j - k] = ap[kx];
+ ap[kx] = temp;
+/* L70: */
+ }
+ temp = ap[kc];
+ ap[kc] = ap[kpc];
+ ap[kpc] = temp;
+ if (kstep == 2) {
+ temp = ap[kc - *n + k - 1];
+ ap[kc - *n + k - 1] = ap[kc - *n + kp - 1];
+ ap[kc - *n + kp - 1] = temp;
+ }
+ }
+
+ k -= kstep;
+ kc = kcnext;
+ goto L60;
+L80:
+ ;
+ }
+
+ return 0;
+
+/* End of SSPTRI */
+
+} /* ssptri_ */
diff --git a/contrib/libs/clapack/ssptrs.c b/contrib/libs/clapack/ssptrs.c
new file mode 100644
index 0000000000..c83efd61bf
--- /dev/null
+++ b/contrib/libs/clapack/ssptrs.c
@@ -0,0 +1,452 @@
+/* ssptrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b7 = -1.f;
+static integer c__1 = 1;
+static real c_b19 = 1.f;
+
+/* Subroutine */ int ssptrs_(char *uplo, integer *n, integer *nrhs, real *ap,
+ integer *ipiv, real *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+ real r__1;
+
+ /* Local variables */
+ integer j, k;
+ real ak, bk;
+ integer kc, kp;
+ real akm1, bkm1;
+ extern /* Subroutine */ int sger_(integer *, integer *, real *, real *,
+ integer *, real *, integer *, real *, integer *);
+ real akm1k;
+ extern logical lsame_(char *, char *);
+ real denom;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ sgemv_(char *, integer *, integer *, real *, real *, integer *,
+ real *, integer *, real *, real *, integer *);
+ logical upper;
+ extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
+ integer *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSPTRS solves a system of linear equations A*X = B with a real */
+/* symmetric matrix A stored in packed format using the factorization */
+/* A = U*D*U**T or A = L*D*L**T computed by SSPTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**T; */
+/* = 'L': Lower triangular, form is A = L*D*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AP (input) REAL array, dimension (N*(N+1)/2) */
+/* The block diagonal matrix D and the multipliers used to */
+/* obtain the factor U or L as computed by SSPTRF, stored as a */
+/* packed triangular matrix. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by SSPTRF. */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --ap;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSPTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Solve A*X = B, where A = U*D*U'. */
+
+/* First solve U*D*X = B, overwriting B with X. */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+ kc = *n * (*n + 1) / 2 + 1;
+L10:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L30;
+ }
+
+ kc -= k;
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ sswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(U(K)), where U(K) is the transformation */
+/* stored in column K of A. */
+
+ i__1 = k - 1;
+ sger_(&i__1, nrhs, &c_b7, &ap[kc], &c__1, &b[k + b_dim1], ldb, &b[
+ b_dim1 + 1], ldb);
+
+/* Multiply by the inverse of the diagonal block. */
+
+ r__1 = 1.f / ap[kc + k - 1];
+ sscal_(nrhs, &r__1, &b[k + b_dim1], ldb);
+ --k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Interchange rows K-1 and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k - 1) {
+ sswap_(nrhs, &b[k - 1 + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(U(K)), where U(K) is the transformation */
+/* stored in columns K-1 and K of A. */
+
+ i__1 = k - 2;
+ sger_(&i__1, nrhs, &c_b7, &ap[kc], &c__1, &b[k + b_dim1], ldb, &b[
+ b_dim1 + 1], ldb);
+ i__1 = k - 2;
+ sger_(&i__1, nrhs, &c_b7, &ap[kc - (k - 1)], &c__1, &b[k - 1 +
+ b_dim1], ldb, &b[b_dim1 + 1], ldb);
+
+/* Multiply by the inverse of the diagonal block. */
+
+ akm1k = ap[kc + k - 2];
+ akm1 = ap[kc - 1] / akm1k;
+ ak = ap[kc + k - 1] / akm1k;
+ denom = akm1 * ak - 1.f;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ bkm1 = b[k - 1 + j * b_dim1] / akm1k;
+ bk = b[k + j * b_dim1] / akm1k;
+ b[k - 1 + j * b_dim1] = (ak * bkm1 - bk) / denom;
+ b[k + j * b_dim1] = (akm1 * bk - bkm1) / denom;
+/* L20: */
+ }
+ kc = kc - k + 1;
+ k += -2;
+ }
+
+ goto L10;
+L30:
+
+/* Next solve U'*X = B, overwriting B with X. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+ kc = 1;
+L40:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L50;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Multiply by inv(U'(K)), where U(K) is the transformation */
+/* stored in column K of A. */
+
+ i__1 = k - 1;
+ sgemv_("Transpose", &i__1, nrhs, &c_b7, &b[b_offset], ldb, &ap[kc]
+, &c__1, &c_b19, &b[k + b_dim1], ldb);
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ sswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ kc += k;
+ ++k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Multiply by inv(U'(K+1)), where U(K+1) is the transformation */
+/* stored in columns K and K+1 of A. */
+
+ i__1 = k - 1;
+ sgemv_("Transpose", &i__1, nrhs, &c_b7, &b[b_offset], ldb, &ap[kc]
+, &c__1, &c_b19, &b[k + b_dim1], ldb);
+ i__1 = k - 1;
+ sgemv_("Transpose", &i__1, nrhs, &c_b7, &b[b_offset], ldb, &ap[kc
+ + k], &c__1, &c_b19, &b[k + 1 + b_dim1], ldb);
+
+/* Interchange rows K and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k) {
+ sswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ kc = kc + (k << 1) + 1;
+ k += 2;
+ }
+
+ goto L40;
+L50:
+
+ ;
+ } else {
+
+/* Solve A*X = B, where A = L*D*L'. */
+
+/* First solve L*D*X = B, overwriting B with X. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+ kc = 1;
+L60:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L80;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ sswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(L(K)), where L(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ sger_(&i__1, nrhs, &c_b7, &ap[kc + 1], &c__1, &b[k + b_dim1],
+ ldb, &b[k + 1 + b_dim1], ldb);
+ }
+
+/* Multiply by the inverse of the diagonal block. */
+
+ r__1 = 1.f / ap[kc];
+ sscal_(nrhs, &r__1, &b[k + b_dim1], ldb);
+ kc = kc + *n - k + 1;
+ ++k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Interchange rows K+1 and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k + 1) {
+ sswap_(nrhs, &b[k + 1 + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(L(K)), where L(K) is the transformation */
+/* stored in columns K and K+1 of A. */
+
+ if (k < *n - 1) {
+ i__1 = *n - k - 1;
+ sger_(&i__1, nrhs, &c_b7, &ap[kc + 2], &c__1, &b[k + b_dim1],
+ ldb, &b[k + 2 + b_dim1], ldb);
+ i__1 = *n - k - 1;
+ sger_(&i__1, nrhs, &c_b7, &ap[kc + *n - k + 2], &c__1, &b[k +
+ 1 + b_dim1], ldb, &b[k + 2 + b_dim1], ldb);
+ }
+
+/* Multiply by the inverse of the diagonal block. */
+
+ akm1k = ap[kc + 1];
+ akm1 = ap[kc] / akm1k;
+ ak = ap[kc + *n - k + 1] / akm1k;
+ denom = akm1 * ak - 1.f;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ bkm1 = b[k + j * b_dim1] / akm1k;
+ bk = b[k + 1 + j * b_dim1] / akm1k;
+ b[k + j * b_dim1] = (ak * bkm1 - bk) / denom;
+ b[k + 1 + j * b_dim1] = (akm1 * bk - bkm1) / denom;
+/* L70: */
+ }
+ kc = kc + (*n - k << 1) + 1;
+ k += 2;
+ }
+
+ goto L60;
+L80:
+
+/* Next solve L'*X = B, overwriting B with X. */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+ kc = *n * (*n + 1) / 2 + 1;
+L90:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L100;
+ }
+
+ kc -= *n - k + 1;
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Multiply by inv(L'(K)), where L(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ sgemv_("Transpose", &i__1, nrhs, &c_b7, &b[k + 1 + b_dim1],
+ ldb, &ap[kc + 1], &c__1, &c_b19, &b[k + b_dim1], ldb);
+ }
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ sswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ --k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Multiply by inv(L'(K-1)), where L(K-1) is the transformation */
+/* stored in columns K-1 and K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ sgemv_("Transpose", &i__1, nrhs, &c_b7, &b[k + 1 + b_dim1],
+ ldb, &ap[kc + 1], &c__1, &c_b19, &b[k + b_dim1], ldb);
+ i__1 = *n - k;
+ sgemv_("Transpose", &i__1, nrhs, &c_b7, &b[k + 1 + b_dim1],
+ ldb, &ap[kc - (*n - k)], &c__1, &c_b19, &b[k - 1 +
+ b_dim1], ldb);
+ }
+
+/* Interchange rows K and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k) {
+ sswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ kc -= *n - k + 2;
+ k += -2;
+ }
+
+ goto L90;
+L100:
+ ;
+ }
+
+ return 0;
+
+/* End of SSPTRS */
+
+} /* ssptrs_ */
diff --git a/contrib/libs/clapack/sstebz.c b/contrib/libs/clapack/sstebz.c
new file mode 100644
index 0000000000..c2457fd8d5
--- /dev/null
+++ b/contrib/libs/clapack/sstebz.c
@@ -0,0 +1,773 @@
+/* sstebz.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+static integer c__0 = 0;
+
+/* Subroutine */ int sstebz_(char *range, char *order, integer *n, real *vl,
+ real *vu, integer *il, integer *iu, real *abstol, real *d__, real *e,
+ integer *m, integer *nsplit, real *w, integer *iblock, integer *
+ isplit, real *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ real r__1, r__2, r__3, r__4, r__5;
+
+ /* Builtin functions */
+ double sqrt(doublereal), log(doublereal);
+
+ /* Local variables */
+ integer j, ib, jb, ie, je, nb;
+ real gl;
+ integer im, in;
+ real gu;
+ integer iw;
+ real wl, wu;
+ integer nwl;
+ real ulp, wlu, wul;
+ integer nwu;
+ real tmp1, tmp2;
+ integer iend, ioff, iout, itmp1, jdisc;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ real atoli;
+ integer iwoff;
+ real bnorm;
+ integer itmax;
+ real wkill, rtoli, tnorm;
+ integer ibegin, irange, idiscl;
+ extern doublereal slamch_(char *);
+ real safemn;
+ integer idumma[1];
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer idiscu;
+ extern /* Subroutine */ int slaebz_(integer *, integer *, integer *,
+ integer *, integer *, integer *, real *, real *, real *, real *,
+ real *, real *, integer *, real *, real *, integer *, integer *,
+ real *, integer *, integer *);
+ integer iorder;
+ logical ncnvrg;
+ real pivmin;
+ logical toofew;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+/* 8-18-00: Increase FUDGE factor for T3E (eca) */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSTEBZ computes the eigenvalues of a symmetric tridiagonal */
+/* matrix T. The user may ask for all eigenvalues, all eigenvalues */
+/* in the half-open interval (VL, VU], or the IL-th through IU-th */
+/* eigenvalues. */
+
+/* To avoid overflow, the matrix must be scaled so that its */
+/* largest element is no greater than overflow**(1/2) * */
+/* underflow**(1/4) in absolute value, and for greatest */
+/* accuracy, it should not be much smaller than that. */
+
+/* See W. Kahan "Accurate Eigenvalues of a Symmetric Tridiagonal */
+/* Matrix", Report CS41, Computer Science Dept., Stanford */
+/* University, July 21, 1966. */
+
+/* Arguments */
+/* ========= */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': ("All") all eigenvalues will be found. */
+/* = 'V': ("Value") all eigenvalues in the half-open interval */
+/* (VL, VU] will be found. */
+/* = 'I': ("Index") the IL-th through IU-th eigenvalues (of the */
+/* entire matrix) will be found. */
+
+/* ORDER (input) CHARACTER*1 */
+/* = 'B': ("By Block") the eigenvalues will be grouped by */
+/* split-off block (see IBLOCK, ISPLIT) and */
+/* ordered from smallest to largest within */
+/* the block. */
+/* = 'E': ("Entire matrix") */
+/* the eigenvalues for the entire matrix */
+/* will be ordered from smallest to */
+/* largest. */
+
+/* N (input) INTEGER */
+/* The order of the tridiagonal matrix T. N >= 0. */
+
+/* VL (input) REAL */
+/* VU (input) REAL */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. Eigenvalues less than or equal */
+/* to VL, or greater than VU, will not be returned. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) REAL */
+/* The absolute tolerance for the eigenvalues. An eigenvalue */
+/* (or cluster) is considered to be located if it has been */
+/* determined to lie in an interval whose width is ABSTOL or */
+/* less. If ABSTOL is less than or equal to zero, then ULP*|T| */
+/* will be used, where |T| means the 1-norm of T. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*SLAMCH('S'), not zero. */
+
+/* D (input) REAL array, dimension (N) */
+/* The n diagonal elements of the tridiagonal matrix T. */
+
+/* E (input) REAL array, dimension (N-1) */
+/* The (n-1) off-diagonal elements of the tridiagonal matrix T. */
+
+/* M (output) INTEGER */
+/* The actual number of eigenvalues found. 0 <= M <= N. */
+/* (See also the description of INFO=2,3.) */
+
+/* NSPLIT (output) INTEGER */
+/* The number of diagonal blocks in the matrix T. */
+/* 1 <= NSPLIT <= N. */
+
+/* W (output) REAL array, dimension (N) */
+/* On exit, the first M elements of W will contain the */
+/* eigenvalues. (SSTEBZ may use the remaining N-M elements as */
+/* workspace.) */
+
+/* IBLOCK (output) INTEGER array, dimension (N) */
+/* At each row/column j where E(j) is zero or small, the */
+/* matrix T is considered to split into a block diagonal */
+/* matrix. On exit, if INFO = 0, IBLOCK(i) specifies to which */
+/* block (from 1 to the number of blocks) the eigenvalue W(i) */
+/* belongs. (SSTEBZ may use the remaining N-M elements as */
+/* workspace.) */
+
+/* ISPLIT (output) INTEGER array, dimension (N) */
+/* The splitting points, at which T breaks up into submatrices. */
+/* The first submatrix consists of rows/columns 1 to ISPLIT(1), */
+/* the second of rows/columns ISPLIT(1)+1 through ISPLIT(2), */
+/* etc., and the NSPLIT-th consists of rows/columns */
+/* ISPLIT(NSPLIT-1)+1 through ISPLIT(NSPLIT)=N. */
+/* (Only the first NSPLIT elements will actually be used, but */
+/* since the user cannot know a priori what value NSPLIT will */
+/* have, N words must be reserved for ISPLIT.) */
+
+/* WORK (workspace) REAL array, dimension (4*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (3*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: some or all of the eigenvalues failed to converge or */
+/* were not computed: */
+/* =1 or 3: Bisection failed to converge for some */
+/* eigenvalues; these eigenvalues are flagged by a */
+/* negative block number. The effect is that the */
+/* eigenvalues may not be as accurate as the */
+/* absolute and relative tolerances. This is */
+/* generally caused by unexpectedly inaccurate */
+/* arithmetic. */
+/* =2 or 3: RANGE='I' only: Not all of the eigenvalues */
+/* IL:IU were found. */
+/* Effect: M < IU+1-IL */
+/* Cause: non-monotonic arithmetic, causing the */
+/* Sturm sequence to be non-monotonic. */
+/* Cure: recalculate, using RANGE='A', and pick */
+/* out eigenvalues IL:IU. In some cases, */
+/* increasing the PARAMETER "FUDGE" may */
+/* make things work. */
+/* = 4: RANGE='I', and the Gershgorin interval */
+/* initially used was too small. No eigenvalues */
+/* were computed. */
+/* Probable cause: your machine has sloppy */
+/* floating-point arithmetic. */
+/* Cure: Increase the PARAMETER "FUDGE", */
+/* recompile, and try again. */
+
+/* Internal Parameters */
+/* =================== */
+
+/* RELFAC REAL, default = 2.0e0 */
+/* The relative tolerance. An interval (a,b] lies within */
+/* "relative tolerance" if b-a < RELFAC*ulp*max(|a|,|b|), */
+/* where "ulp" is the machine precision (distance from 1 to */
+/* the next larger floating point number.) */
+
+/* FUDGE REAL, default = 2 */
+/* A "fudge factor" to widen the Gershgorin intervals. Ideally, */
+/* a value of 1 should work, but on machines with sloppy */
+/* arithmetic, this needs to be larger. The default for */
+/* publicly released versions should be large enough to handle */
+/* the worst machine around. Note that this has no effect */
+/* on accuracy of the solution. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --iwork;
+ --work;
+ --isplit;
+ --iblock;
+ --w;
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+
+/* Decode RANGE */
+
+ if (lsame_(range, "A")) {
+ irange = 1;
+ } else if (lsame_(range, "V")) {
+ irange = 2;
+ } else if (lsame_(range, "I")) {
+ irange = 3;
+ } else {
+ irange = 0;
+ }
+
+/* Decode ORDER */
+
+ if (lsame_(order, "B")) {
+ iorder = 2;
+ } else if (lsame_(order, "E")) {
+ iorder = 1;
+ } else {
+ iorder = 0;
+ }
+
+/* Check for Errors */
+
+ if (irange <= 0) {
+ *info = -1;
+ } else if (iorder <= 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (irange == 2) {
+ if (*vl >= *vu) {
+ *info = -5;
+ }
+ } else if (irange == 3 && (*il < 1 || *il > max(1,*n))) {
+ *info = -6;
+ } else if (irange == 3 && (*iu < min(*n,*il) || *iu > *n)) {
+ *info = -7;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSTEBZ", &i__1);
+ return 0;
+ }
+
+/* Initialize error flags */
+
+ *info = 0;
+ ncnvrg = FALSE_;
+ toofew = FALSE_;
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Simplifications: */
+
+ if (irange == 3 && *il == 1 && *iu == *n) {
+ irange = 1;
+ }
+
+/* Get machine constants */
+/* NB is the minimum vector length for vector bisection, or 0 */
+/* if only scalar is to be done. */
+
+ safemn = slamch_("S");
+ ulp = slamch_("P");
+ rtoli = ulp * 2.f;
+ nb = ilaenv_(&c__1, "SSTEBZ", " ", n, &c_n1, &c_n1, &c_n1);
+ if (nb <= 1) {
+ nb = 0;
+ }
+
+/* Special Case when N=1 */
+
+ if (*n == 1) {
+ *nsplit = 1;
+ isplit[1] = 1;
+ if (irange == 2 && (*vl >= d__[1] || *vu < d__[1])) {
+ *m = 0;
+ } else {
+ w[1] = d__[1];
+ iblock[1] = 1;
+ *m = 1;
+ }
+ return 0;
+ }
+
+/* Compute Splitting Points */
+
+ *nsplit = 1;
+ work[*n] = 0.f;
+ pivmin = 1.f;
+
+/* DIR$ NOVECTOR */
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing 2nd power */
+ r__1 = e[j - 1];
+ tmp1 = r__1 * r__1;
+/* Computing 2nd power */
+ r__2 = ulp;
+ if ((r__1 = d__[j] * d__[j - 1], dabs(r__1)) * (r__2 * r__2) + safemn
+ > tmp1) {
+ isplit[*nsplit] = j - 1;
+ ++(*nsplit);
+ work[j - 1] = 0.f;
+ } else {
+ work[j - 1] = tmp1;
+ pivmin = dmax(pivmin,tmp1);
+ }
+/* L10: */
+ }
+ isplit[*nsplit] = *n;
+ pivmin *= safemn;
+
+/* Compute Interval and ATOLI */
+
+ if (irange == 3) {
+
+/* RANGE='I': Compute the interval containing eigenvalues */
+/* IL through IU. */
+
+/* Compute Gershgorin interval for entire (split) matrix */
+/* and use it as the initial interval */
+
+ gu = d__[1];
+ gl = d__[1];
+ tmp1 = 0.f;
+
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ tmp2 = sqrt(work[j]);
+/* Computing MAX */
+ r__1 = gu, r__2 = d__[j] + tmp1 + tmp2;
+ gu = dmax(r__1,r__2);
+/* Computing MIN */
+ r__1 = gl, r__2 = d__[j] - tmp1 - tmp2;
+ gl = dmin(r__1,r__2);
+ tmp1 = tmp2;
+/* L20: */
+ }
+
+/* Computing MAX */
+ r__1 = gu, r__2 = d__[*n] + tmp1;
+ gu = dmax(r__1,r__2);
+/* Computing MIN */
+ r__1 = gl, r__2 = d__[*n] - tmp1;
+ gl = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = dabs(gl), r__2 = dabs(gu);
+ tnorm = dmax(r__1,r__2);
+ gl = gl - tnorm * 2.1f * ulp * *n - pivmin * 4.2000000000000002f;
+ gu = gu + tnorm * 2.1f * ulp * *n + pivmin * 2.1f;
+
+/* Compute Iteration parameters */
+
+ itmax = (integer) ((log(tnorm + pivmin) - log(pivmin)) / log(2.f)) +
+ 2;
+ if (*abstol <= 0.f) {
+ atoli = ulp * tnorm;
+ } else {
+ atoli = *abstol;
+ }
+
+ work[*n + 1] = gl;
+ work[*n + 2] = gl;
+ work[*n + 3] = gu;
+ work[*n + 4] = gu;
+ work[*n + 5] = gl;
+ work[*n + 6] = gu;
+ iwork[1] = -1;
+ iwork[2] = -1;
+ iwork[3] = *n + 1;
+ iwork[4] = *n + 1;
+ iwork[5] = *il - 1;
+ iwork[6] = *iu;
+
+ slaebz_(&c__3, &itmax, n, &c__2, &c__2, &nb, &atoli, &rtoli, &pivmin,
+ &d__[1], &e[1], &work[1], &iwork[5], &work[*n + 1], &work[*n
+ + 5], &iout, &iwork[1], &w[1], &iblock[1], &iinfo);
+
+ if (iwork[6] == *iu) {
+ wl = work[*n + 1];
+ wlu = work[*n + 3];
+ nwl = iwork[1];
+ wu = work[*n + 4];
+ wul = work[*n + 2];
+ nwu = iwork[4];
+ } else {
+ wl = work[*n + 2];
+ wlu = work[*n + 4];
+ nwl = iwork[2];
+ wu = work[*n + 3];
+ wul = work[*n + 1];
+ nwu = iwork[3];
+ }
+
+ if (nwl < 0 || nwl >= *n || nwu < 1 || nwu > *n) {
+ *info = 4;
+ return 0;
+ }
+ } else {
+
+/* RANGE='A' or 'V' -- Set ATOLI */
+
+/* Computing MAX */
+ r__3 = dabs(d__[1]) + dabs(e[1]), r__4 = (r__1 = d__[*n], dabs(r__1))
+ + (r__2 = e[*n - 1], dabs(r__2));
+ tnorm = dmax(r__3,r__4);
+
+ i__1 = *n - 1;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing MAX */
+ r__4 = tnorm, r__5 = (r__1 = d__[j], dabs(r__1)) + (r__2 = e[j -
+ 1], dabs(r__2)) + (r__3 = e[j], dabs(r__3));
+ tnorm = dmax(r__4,r__5);
+/* L30: */
+ }
+
+ if (*abstol <= 0.f) {
+ atoli = ulp * tnorm;
+ } else {
+ atoli = *abstol;
+ }
+
+ if (irange == 2) {
+ wl = *vl;
+ wu = *vu;
+ } else {
+ wl = 0.f;
+ wu = 0.f;
+ }
+ }
+
+/* Find Eigenvalues -- Loop Over Blocks and recompute NWL and NWU. */
+/* NWL accumulates the number of eigenvalues .le. WL, */
+/* NWU accumulates the number of eigenvalues .le. WU */
+
+ *m = 0;
+ iend = 0;
+ *info = 0;
+ nwl = 0;
+ nwu = 0;
+
+ i__1 = *nsplit;
+ for (jb = 1; jb <= i__1; ++jb) {
+ ioff = iend;
+ ibegin = ioff + 1;
+ iend = isplit[jb];
+ in = iend - ioff;
+
+ if (in == 1) {
+
+/* Special Case -- IN=1 */
+
+ if (irange == 1 || wl >= d__[ibegin] - pivmin) {
+ ++nwl;
+ }
+ if (irange == 1 || wu >= d__[ibegin] - pivmin) {
+ ++nwu;
+ }
+ if (irange == 1 || wl < d__[ibegin] - pivmin && wu >= d__[ibegin]
+ - pivmin) {
+ ++(*m);
+ w[*m] = d__[ibegin];
+ iblock[*m] = jb;
+ }
+ } else {
+
+/* General Case -- IN > 1 */
+
+/* Compute Gershgorin Interval */
+/* and use it as the initial interval */
+
+ gu = d__[ibegin];
+ gl = d__[ibegin];
+ tmp1 = 0.f;
+
+ i__2 = iend - 1;
+ for (j = ibegin; j <= i__2; ++j) {
+ tmp2 = (r__1 = e[j], dabs(r__1));
+/* Computing MAX */
+ r__1 = gu, r__2 = d__[j] + tmp1 + tmp2;
+ gu = dmax(r__1,r__2);
+/* Computing MIN */
+ r__1 = gl, r__2 = d__[j] - tmp1 - tmp2;
+ gl = dmin(r__1,r__2);
+ tmp1 = tmp2;
+/* L40: */
+ }
+
+/* Computing MAX */
+ r__1 = gu, r__2 = d__[iend] + tmp1;
+ gu = dmax(r__1,r__2);
+/* Computing MIN */
+ r__1 = gl, r__2 = d__[iend] - tmp1;
+ gl = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = dabs(gl), r__2 = dabs(gu);
+ bnorm = dmax(r__1,r__2);
+ gl = gl - bnorm * 2.1f * ulp * in - pivmin * 2.1f;
+ gu = gu + bnorm * 2.1f * ulp * in + pivmin * 2.1f;
+
+/* Compute ATOLI for the current submatrix */
+
+ if (*abstol <= 0.f) {
+/* Computing MAX */
+ r__1 = dabs(gl), r__2 = dabs(gu);
+ atoli = ulp * dmax(r__1,r__2);
+ } else {
+ atoli = *abstol;
+ }
+
+ if (irange > 1) {
+ if (gu < wl) {
+ nwl += in;
+ nwu += in;
+ goto L70;
+ }
+ gl = dmax(gl,wl);
+ gu = dmin(gu,wu);
+ if (gl >= gu) {
+ goto L70;
+ }
+ }
+
+/* Set Up Initial Interval */
+
+ work[*n + 1] = gl;
+ work[*n + in + 1] = gu;
+ slaebz_(&c__1, &c__0, &in, &in, &c__1, &nb, &atoli, &rtoli, &
+ pivmin, &d__[ibegin], &e[ibegin], &work[ibegin], idumma, &
+ work[*n + 1], &work[*n + (in << 1) + 1], &im, &iwork[1], &
+ w[*m + 1], &iblock[*m + 1], &iinfo);
+
+ nwl += iwork[1];
+ nwu += iwork[in + 1];
+ iwoff = *m - iwork[1];
+
+/* Compute Eigenvalues */
+
+ itmax = (integer) ((log(gu - gl + pivmin) - log(pivmin)) / log(
+ 2.f)) + 2;
+ slaebz_(&c__2, &itmax, &in, &in, &c__1, &nb, &atoli, &rtoli, &
+ pivmin, &d__[ibegin], &e[ibegin], &work[ibegin], idumma, &
+ work[*n + 1], &work[*n + (in << 1) + 1], &iout, &iwork[1],
+ &w[*m + 1], &iblock[*m + 1], &iinfo);
+
+/* Copy Eigenvalues Into W and IBLOCK */
+/* Use -JB for block number for unconverged eigenvalues. */
+
+ i__2 = iout;
+ for (j = 1; j <= i__2; ++j) {
+ tmp1 = (work[j + *n] + work[j + in + *n]) * .5f;
+
+/* Flag non-convergence. */
+
+ if (j > iout - iinfo) {
+ ncnvrg = TRUE_;
+ ib = -jb;
+ } else {
+ ib = jb;
+ }
+ i__3 = iwork[j + in] + iwoff;
+ for (je = iwork[j] + 1 + iwoff; je <= i__3; ++je) {
+ w[je] = tmp1;
+ iblock[je] = ib;
+/* L50: */
+ }
+/* L60: */
+ }
+
+ *m += im;
+ }
+L70:
+ ;
+ }
+
+/* If RANGE='I', then (WL,WU) contains eigenvalues NWL+1,...,NWU */
+/* If NWL+1 < IL or NWU > IU, discard extra eigenvalues. */
+
+ if (irange == 3) {
+ im = 0;
+ idiscl = *il - 1 - nwl;
+ idiscu = nwu - *iu;
+
+ if (idiscl > 0 || idiscu > 0) {
+ i__1 = *m;
+ for (je = 1; je <= i__1; ++je) {
+ if (w[je] <= wlu && idiscl > 0) {
+ --idiscl;
+ } else if (w[je] >= wul && idiscu > 0) {
+ --idiscu;
+ } else {
+ ++im;
+ w[im] = w[je];
+ iblock[im] = iblock[je];
+ }
+/* L80: */
+ }
+ *m = im;
+ }
+ if (idiscl > 0 || idiscu > 0) {
+
+/* Code to deal with effects of bad arithmetic: */
+/* Some low eigenvalues to be discarded are not in (WL,WLU], */
+/* or high eigenvalues to be discarded are not in (WUL,WU] */
+/* so just kill off the smallest IDISCL/largest IDISCU */
+/* eigenvalues, by simply finding the smallest/largest */
+/* eigenvalue(s). */
+
+/* (If N(w) is monotone non-decreasing, this should never */
+/* happen.) */
+
+ if (idiscl > 0) {
+ wkill = wu;
+ i__1 = idiscl;
+ for (jdisc = 1; jdisc <= i__1; ++jdisc) {
+ iw = 0;
+ i__2 = *m;
+ for (je = 1; je <= i__2; ++je) {
+ if (iblock[je] != 0 && (w[je] < wkill || iw == 0)) {
+ iw = je;
+ wkill = w[je];
+ }
+/* L90: */
+ }
+ iblock[iw] = 0;
+/* L100: */
+ }
+ }
+ if (idiscu > 0) {
+
+ wkill = wl;
+ i__1 = idiscu;
+ for (jdisc = 1; jdisc <= i__1; ++jdisc) {
+ iw = 0;
+ i__2 = *m;
+ for (je = 1; je <= i__2; ++je) {
+ if (iblock[je] != 0 && (w[je] > wkill || iw == 0)) {
+ iw = je;
+ wkill = w[je];
+ }
+/* L110: */
+ }
+ iblock[iw] = 0;
+/* L120: */
+ }
+ }
+ im = 0;
+ i__1 = *m;
+ for (je = 1; je <= i__1; ++je) {
+ if (iblock[je] != 0) {
+ ++im;
+ w[im] = w[je];
+ iblock[im] = iblock[je];
+ }
+/* L130: */
+ }
+ *m = im;
+ }
+ if (idiscl < 0 || idiscu < 0) {
+ toofew = TRUE_;
+ }
+ }
+
+/* If ORDER='B', do nothing -- the eigenvalues are already sorted */
+/* by block. */
+/* If ORDER='E', sort the eigenvalues from smallest to largest */
+
+ if (iorder == 1 && *nsplit > 1) {
+ i__1 = *m - 1;
+ for (je = 1; je <= i__1; ++je) {
+ ie = 0;
+ tmp1 = w[je];
+ i__2 = *m;
+ for (j = je + 1; j <= i__2; ++j) {
+ if (w[j] < tmp1) {
+ ie = j;
+ tmp1 = w[j];
+ }
+/* L140: */
+ }
+
+ if (ie != 0) {
+ itmp1 = iblock[ie];
+ w[ie] = w[je];
+ iblock[ie] = iblock[je];
+ w[je] = tmp1;
+ iblock[je] = itmp1;
+ }
+/* L150: */
+ }
+ }
+
+ *info = 0;
+ if (ncnvrg) {
+ ++(*info);
+ }
+ if (toofew) {
+ *info += 2;
+ }
+ return 0;
+
+/* End of SSTEBZ */
+
+} /* sstebz_ */
diff --git a/contrib/libs/clapack/sstedc.c b/contrib/libs/clapack/sstedc.c
new file mode 100644
index 0000000000..950f318c8a
--- /dev/null
+++ b/contrib/libs/clapack/sstedc.c
@@ -0,0 +1,484 @@
+/* sstedc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__9 = 9;
+static integer c__0 = 0;
+static integer c__2 = 2;
+static real c_b17 = 0.f;
+static real c_b18 = 1.f;
+static integer c__1 = 1;
+
+/* Subroutine */ int sstedc_(char *compz, integer *n, real *d__, real *e,
+ real *z__, integer *ldz, real *work, integer *lwork, integer *iwork,
+ integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double log(doublereal);
+ integer pow_ii(integer *, integer *);
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, m;
+ real p;
+ integer ii, lgn;
+ real eps, tiny;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ integer lwmin, start;
+ extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
+ integer *), slaed0_(integer *, integer *, integer *, real *, real
+ *, real *, integer *, real *, integer *, real *, integer *,
+ integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer finish;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *), slacpy_(char *, integer *, integer *, real *, integer *,
+ real *, integer *), slaset_(char *, integer *, integer *,
+ real *, real *, real *, integer *);
+ integer liwmin, icompz;
+ real orgnrm;
+ extern doublereal slanst_(char *, integer *, real *, real *);
+ extern /* Subroutine */ int ssterf_(integer *, real *, real *, integer *),
+ slasrt_(char *, integer *, real *, integer *);
+ logical lquery;
+ integer smlsiz;
+ extern /* Subroutine */ int ssteqr_(char *, integer *, real *, real *,
+ real *, integer *, real *, integer *);
+ integer storez, strtrw;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSTEDC computes all eigenvalues and, optionally, eigenvectors of a */
+/* symmetric tridiagonal matrix using the divide and conquer method. */
+/* The eigenvectors of a full or band real symmetric matrix can also be */
+/* found if SSYTRD or SSPTRD or SSBTRD has been used to reduce this */
+/* matrix to tridiagonal form. */
+
+/* This code makes very mild assumptions about floating point */
+/* arithmetic. It will work on machines with a guard digit in */
+/* add/subtract, or on those binary machines without guard digits */
+/* which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2. */
+/* It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. See SLAED3 for details. */
+
+/* Arguments */
+/* ========= */
+
+/* COMPZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only. */
+/* = 'I': Compute eigenvectors of tridiagonal matrix also. */
+/* = 'V': Compute eigenvectors of original dense symmetric */
+/* matrix also. On entry, Z contains the orthogonal */
+/* matrix used to reduce the original matrix to */
+/* tridiagonal form. */
+
+/* N (input) INTEGER */
+/* The dimension of the symmetric tridiagonal matrix. N >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the diagonal elements of the tridiagonal matrix. */
+/* On exit, if INFO = 0, the eigenvalues in ascending order. */
+
+/* E (input/output) REAL array, dimension (N-1) */
+/* On entry, the subdiagonal elements of the tridiagonal matrix. */
+/* On exit, E has been destroyed. */
+
+/* Z (input/output) REAL array, dimension (LDZ,N) */
+/* On entry, if COMPZ = 'V', then Z contains the orthogonal */
+/* matrix used in the reduction to tridiagonal form. */
+/* On exit, if INFO = 0, then if COMPZ = 'V', Z contains the */
+/* orthonormal eigenvectors of the original symmetric matrix, */
+/* and if COMPZ = 'I', Z contains the orthonormal eigenvectors */
+/* of the symmetric tridiagonal matrix. */
+/* If COMPZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1. */
+/* If eigenvectors are desired, then LDZ >= max(1,N). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If COMPZ = 'N' or N <= 1 then LWORK must be at least 1. */
+/* If COMPZ = 'V' and N > 1 then LWORK must be at least */
+/* ( 1 + 3*N + 2*N*lg N + 3*N**2 ), */
+/* where lg( N ) = smallest integer k such */
+/* that 2**k >= N. */
+/* If COMPZ = 'I' and N > 1 then LWORK must be at least */
+/* ( 1 + 4*N + N**2 ). */
+/* Note that for COMPZ = 'I' or 'V', then if N is less than or */
+/* equal to the minimum divide size, usually 25, then LWORK need */
+/* only be max(1,2*(N-1)). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* If COMPZ = 'N' or N <= 1 then LIWORK must be at least 1. */
+/* If COMPZ = 'V' and N > 1 then LIWORK must be at least */
+/* ( 6 + 6*N + 5*N*lg N ). */
+/* If COMPZ = 'I' and N > 1 then LIWORK must be at least */
+/* ( 3 + 5*N ). */
+/* Note that for COMPZ = 'I' or 'V', then if N is less than or */
+/* equal to the minimum divide size, usually 25, then LIWORK */
+/* need only be 1. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal size of the IWORK array, */
+/* returns this value as the first entry of the IWORK array, and */
+/* no error message related to LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: The algorithm failed to compute an eigenvalue while */
+/* working on the submatrix lying in rows and columns */
+/* INFO/(N+1) through mod(INFO,N+1). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Jeff Rutter, Computer Science Division, University of California */
+/* at Berkeley, USA */
+/* Modified by Francoise Tisseur, University of Tennessee. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1 || *liwork == -1;
+
+ if (lsame_(compz, "N")) {
+ icompz = 0;
+ } else if (lsame_(compz, "V")) {
+ icompz = 1;
+ } else if (lsame_(compz, "I")) {
+ icompz = 2;
+ } else {
+ icompz = -1;
+ }
+ if (icompz < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldz < 1 || icompz > 0 && *ldz < max(1,*n)) {
+ *info = -6;
+ }
+
+ if (*info == 0) {
+
+/* Compute the workspace requirements */
+
+ smlsiz = ilaenv_(&c__9, "SSTEDC", " ", &c__0, &c__0, &c__0, &c__0);
+ if (*n <= 1 || icompz == 0) {
+ liwmin = 1;
+ lwmin = 1;
+ } else if (*n <= smlsiz) {
+ liwmin = 1;
+ lwmin = *n - 1 << 1;
+ } else {
+ lgn = (integer) (log((real) (*n)) / log(2.f));
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ if (icompz == 1) {
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = *n * 3 + 1 + (*n << 1) * lgn + i__1 * i__1 * 3;
+ liwmin = *n * 6 + 6 + *n * 5 * lgn;
+ } else if (icompz == 2) {
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = (*n << 2) + 1 + i__1 * i__1;
+ liwmin = *n * 5 + 3;
+ }
+ }
+ work[1] = (real) lwmin;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -8;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -10;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSTEDC", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*n == 1) {
+ if (icompz != 0) {
+ z__[z_dim1 + 1] = 1.f;
+ }
+ return 0;
+ }
+
+/* If the following conditional clause is removed, then the routine */
+/* will use the Divide and Conquer routine to compute only the */
+/* eigenvalues, which requires (3N + 3N**2) real workspace and */
+/* (2 + 5N + 2N lg(N)) integer workspace. */
+/* Since on many architectures SSTERF is much faster than any other */
+/* algorithm for finding eigenvalues only, it is used here */
+/* as the default. If the conditional clause is removed, then */
+/* information on the size of workspace needs to be changed. */
+
+/* If COMPZ = 'N', use SSTERF to compute the eigenvalues. */
+
+ if (icompz == 0) {
+ ssterf_(n, &d__[1], &e[1], info);
+ goto L50;
+ }
+
+/* If N is smaller than the minimum divide size (SMLSIZ+1), then */
+/* solve the problem with another solver. */
+
+ if (*n <= smlsiz) {
+
+ ssteqr_(compz, n, &d__[1], &e[1], &z__[z_offset], ldz, &work[1], info);
+
+ } else {
+
+/* If COMPZ = 'V', the Z matrix must be stored elsewhere for later */
+/* use. */
+
+ if (icompz == 1) {
+ storez = *n * *n + 1;
+ } else {
+ storez = 1;
+ }
+
+ if (icompz == 2) {
+ slaset_("Full", n, n, &c_b17, &c_b18, &z__[z_offset], ldz);
+ }
+
+/* Scale. */
+
+ orgnrm = slanst_("M", n, &d__[1], &e[1]);
+ if (orgnrm == 0.f) {
+ goto L50;
+ }
+
+ eps = slamch_("Epsilon");
+
+ start = 1;
+
+/* while ( START <= N ) */
+
+L10:
+ if (start <= *n) {
+
+/* Let FINISH be the position of the next subdiagonal entry */
+/* such that E( FINISH ) <= TINY or FINISH = N if no such */
+/* subdiagonal exists. The matrix identified by the elements */
+/* between START and FINISH constitutes an independent */
+/* sub-problem. */
+
+ finish = start;
+L20:
+ if (finish < *n) {
+ tiny = eps * sqrt((r__1 = d__[finish], dabs(r__1))) * sqrt((
+ r__2 = d__[finish + 1], dabs(r__2)));
+ if ((r__1 = e[finish], dabs(r__1)) > tiny) {
+ ++finish;
+ goto L20;
+ }
+ }
+
+/* (Sub) Problem determined. Compute its size and solve it. */
+
+ m = finish - start + 1;
+ if (m == 1) {
+ start = finish + 1;
+ goto L10;
+ }
+ if (m > smlsiz) {
+
+/* Scale. */
+
+ orgnrm = slanst_("M", &m, &d__[start], &e[start]);
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b18, &m, &c__1, &d__[
+ start], &m, info);
+ i__1 = m - 1;
+ i__2 = m - 1;
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b18, &i__1, &c__1, &e[
+ start], &i__2, info);
+
+ if (icompz == 1) {
+ strtrw = 1;
+ } else {
+ strtrw = start;
+ }
+ slaed0_(&icompz, n, &m, &d__[start], &e[start], &z__[strtrw +
+ start * z_dim1], ldz, &work[1], n, &work[storez], &
+ iwork[1], info);
+ if (*info != 0) {
+ *info = (*info / (m + 1) + start - 1) * (*n + 1) + *info %
+ (m + 1) + start - 1;
+ goto L50;
+ }
+
+/* Scale back. */
+
+ slascl_("G", &c__0, &c__0, &c_b18, &orgnrm, &m, &c__1, &d__[
+ start], &m, info);
+
+ } else {
+ if (icompz == 1) {
+
+/* Since QR won't update a Z matrix which is larger than */
+/* the length of D, we must solve the sub-problem in a */
+/* workspace and then multiply back into Z. */
+
+ ssteqr_("I", &m, &d__[start], &e[start], &work[1], &m, &
+ work[m * m + 1], info);
+ slacpy_("A", n, &m, &z__[start * z_dim1 + 1], ldz, &work[
+ storez], n);
+ sgemm_("N", "N", n, &m, &m, &c_b18, &work[storez], n, &
+ work[1], &m, &c_b17, &z__[start * z_dim1 + 1],
+ ldz);
+ } else if (icompz == 2) {
+ ssteqr_("I", &m, &d__[start], &e[start], &z__[start +
+ start * z_dim1], ldz, &work[1], info);
+ } else {
+ ssterf_(&m, &d__[start], &e[start], info);
+ }
+ if (*info != 0) {
+ *info = start * (*n + 1) + finish;
+ goto L50;
+ }
+ }
+
+ start = finish + 1;
+ goto L10;
+ }
+
+/* endwhile */
+
+/* If the problem split any number of times, then the eigenvalues */
+/* will not be properly ordered. Here we permute the eigenvalues */
+/* (and the associated eigenvectors) into ascending order. */
+
+ if (m != *n) {
+ if (icompz == 0) {
+
+/* Use Quick Sort */
+
+ slasrt_("I", n, &d__[1], info);
+
+ } else {
+
+/* Use Selection Sort to minimize swaps of eigenvectors */
+
+ i__1 = *n;
+ for (ii = 2; ii <= i__1; ++ii) {
+ i__ = ii - 1;
+ k = i__;
+ p = d__[i__];
+ i__2 = *n;
+ for (j = ii; j <= i__2; ++j) {
+ if (d__[j] < p) {
+ k = j;
+ p = d__[j];
+ }
+/* L30: */
+ }
+ if (k != i__) {
+ d__[k] = d__[i__];
+ d__[i__] = p;
+ sswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k *
+ z_dim1 + 1], &c__1);
+ }
+/* L40: */
+ }
+ }
+ }
+ }
+
+L50:
+ work[1] = (real) lwmin;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of SSTEDC */
+
+} /* sstedc_ */
diff --git a/contrib/libs/clapack/sstegr.c b/contrib/libs/clapack/sstegr.c
new file mode 100644
index 0000000000..77714cf305
--- /dev/null
+++ b/contrib/libs/clapack/sstegr.c
@@ -0,0 +1,209 @@
+/* sstegr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int sstegr_(char *jobz, char *range, integer *n, real *d__,
+ real *e, real *vl, real *vu, integer *il, integer *iu, real *abstol,
+ integer *m, real *w, real *z__, integer *ldz, integer *isuppz, real *
+ work, integer *lwork, integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset;
+
+ /* Local variables */
+ logical tryrac;
+ extern /* Subroutine */ int sstemr_(char *, char *, integer *, real *,
+ real *, real *, real *, integer *, integer *, integer *, real *,
+ real *, integer *, integer *, integer *, logical *, real *,
+ integer *, integer *, integer *, integer *);
+
+
+
+/* -- LAPACK computational routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSTEGR computes selected eigenvalues and, optionally, eigenvectors */
+/* of a real symmetric tridiagonal matrix T. Any such unreduced matrix has */
+/* a well defined set of pairwise different real eigenvalues, the corresponding */
+/* real eigenvectors are pairwise orthogonal. */
+
+/* The spectrum may be computed either completely or partially by specifying */
+/* either an interval (VL,VU] or a range of indices IL:IU for the desired */
+/* eigenvalues. */
+
+/* SSTEGR is a compatability wrapper around the improved SSTEMR routine. */
+/* See SSTEMR for further details. */
+
+/* One important change is that the ABSTOL parameter no longer provides any */
+/* benefit and hence is no longer used. */
+
+/* Note : SSTEGR and SSTEMR work only on machines which follow */
+/* IEEE-754 floating-point standard in their handling of infinities and */
+/* NaNs. Normal execution may create these exceptiona values and hence */
+/* may abort due to a floating point exception in environments which */
+/* do not conform to the IEEE-754 standard. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the N diagonal elements of the tridiagonal matrix */
+/* T. On exit, D is overwritten. */
+
+/* E (input/output) REAL array, dimension (N) */
+/* On entry, the (N-1) subdiagonal elements of the tridiagonal */
+/* matrix T in elements 1 to N-1 of E. E(N) need not be set on */
+/* input, but is used internally as workspace. */
+/* On exit, E is overwritten. */
+
+/* VL (input) REAL */
+/* VU (input) REAL */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) REAL */
+/* Unused. Was the absolute error tolerance for the */
+/* eigenvalues/eigenvectors in previous versions. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) REAL array, dimension (N) */
+/* The first M elements contain the selected eigenvalues in */
+/* ascending order. */
+
+/* Z (output) REAL array, dimension (LDZ, max(1,M) ) */
+/* If JOBZ = 'V', and if INFO = 0, then the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix T */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+/* Supplying N columns is always safe. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', then LDZ >= max(1,N). */
+
+/* ISUPPZ (output) INTEGER ARRAY, dimension ( 2*max(1,M) ) */
+/* The support of the eigenvectors in Z, i.e., the indices */
+/* indicating the nonzero elements in Z. The i-th computed eigenvector */
+/* is nonzero only in elements ISUPPZ( 2*i-1 ) through */
+/* ISUPPZ( 2*i ). This is relevant in the case when the matrix */
+/* is split. ISUPPZ is only accessed when JOBZ is 'V' and N > 0. */
+
+/* WORK (workspace/output) REAL array, dimension (LWORK) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal */
+/* (and minimal) LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,18*N) */
+/* if JOBZ = 'V', and LWORK >= max(1,12*N) if JOBZ = 'N'. */
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (LIWORK) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. LIWORK >= max(1,10*N) */
+/* if the eigenvectors are desired, and LIWORK >= max(1,8*N) */
+/* if only the eigenvalues are to be computed. */
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal size of the IWORK array, */
+/* returns this value as the first entry of the IWORK array, and */
+/* no error message related to LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* On exit, INFO */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = 1X, internal error in SLARRE, */
+/* if INFO = 2X, internal error in SLARRV. */
+/* Here, the digit X = ABS( IINFO ) < 10, where IINFO is */
+/* the nonzero error code returned by SLARRE or */
+/* SLARRV, respectively. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Inderjit Dhillon, IBM Almaden, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, LBNL/NERSC, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --isuppz;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ tryrac = FALSE_;
+ sstemr_(jobz, range, n, &d__[1], &e[1], vl, vu, il, iu, m, &w[1], &z__[
+ z_offset], ldz, n, &isuppz[1], &tryrac, &work[1], lwork, &iwork[1]
+, liwork, info);
+
+/* End of SSTEGR */
+
+ return 0;
+} /* sstegr_ */
diff --git a/contrib/libs/clapack/sstein.c b/contrib/libs/clapack/sstein.c
new file mode 100644
index 0000000000..cd9ae7681e
--- /dev/null
+++ b/contrib/libs/clapack/sstein.c
@@ -0,0 +1,449 @@
+/* sstein.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__2 = 2;
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int sstein_(integer *n, real *d__, real *e, integer *m, real
+ *w, integer *iblock, integer *isplit, real *z__, integer *ldz, real *
+ work, integer *iwork, integer *ifail, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2, i__3;
+ real r__1, r__2, r__3, r__4, r__5;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, b1, j1, bn;
+ real xj, scl, eps, ctr, sep, nrm, tol;
+ integer its;
+ real xjm, eps1;
+ integer jblk, nblk, jmax;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *),
+ snrm2_(integer *, real *, integer *);
+ integer iseed[4], gpind, iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ extern doublereal sasum_(integer *, real *, integer *);
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ real ortol;
+ extern /* Subroutine */ int saxpy_(integer *, real *, real *, integer *,
+ real *, integer *);
+ integer indrv1, indrv2, indrv3, indrv4, indrv5;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), slagtf_(
+ integer *, real *, real *, real *, real *, real *, real *,
+ integer *, integer *);
+ integer nrmchk;
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int slagts_(integer *, integer *, real *, real *,
+ real *, real *, integer *, real *, real *, integer *);
+ integer blksiz;
+ real onenrm, pertol;
+ extern /* Subroutine */ int slarnv_(integer *, integer *, integer *, real
+ *);
+ real stpcrt;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSTEIN computes the eigenvectors of a real symmetric tridiagonal */
+/* matrix T corresponding to specified eigenvalues, using inverse */
+/* iteration. */
+
+/* The maximum number of iterations allowed for each eigenvector is */
+/* specified by an internal parameter MAXITS (currently set to 5). */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input) REAL array, dimension (N) */
+/* The n diagonal elements of the tridiagonal matrix T. */
+
+/* E (input) REAL array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of the tridiagonal matrix */
+/* T, in elements 1 to N-1. */
+
+/* M (input) INTEGER */
+/* The number of eigenvectors to be found. 0 <= M <= N. */
+
+/* W (input) REAL array, dimension (N) */
+/* The first M elements of W contain the eigenvalues for */
+/* which eigenvectors are to be computed. The eigenvalues */
+/* should be grouped by split-off block and ordered from */
+/* smallest to largest within the block. ( The output array */
+/* W from SSTEBZ with ORDER = 'B' is expected here. ) */
+
+/* IBLOCK (input) INTEGER array, dimension (N) */
+/* The submatrix indices associated with the corresponding */
+/* eigenvalues in W; IBLOCK(i)=1 if eigenvalue W(i) belongs to */
+/* the first submatrix from the top, =2 if W(i) belongs to */
+/* the second submatrix, etc. ( The output array IBLOCK */
+/* from SSTEBZ is expected here. ) */
+
+/* ISPLIT (input) INTEGER array, dimension (N) */
+/* The splitting points, at which T breaks up into submatrices. */
+/* The first submatrix consists of rows/columns 1 to */
+/* ISPLIT( 1 ), the second of rows/columns ISPLIT( 1 )+1 */
+/* through ISPLIT( 2 ), etc. */
+/* ( The output array ISPLIT from SSTEBZ is expected here. ) */
+
+/* Z (output) REAL array, dimension (LDZ, M) */
+/* The computed eigenvectors. The eigenvector associated */
+/* with the eigenvalue W(i) is stored in the i-th column of */
+/* Z. Any vector which fails to converge is set to its current */
+/* iterate after MAXITS iterations. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= max(1,N). */
+
+/* WORK (workspace) REAL array, dimension (5*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* IFAIL (output) INTEGER array, dimension (M) */
+/* On normal exit, all elements of IFAIL are zero. */
+/* If one or more eigenvectors fail to converge after */
+/* MAXITS iterations, then their indices are stored in */
+/* array IFAIL. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, then i eigenvectors failed to converge */
+/* in MAXITS iterations. Their indices are stored in */
+/* array IFAIL. */
+
+/* Internal Parameters */
+/* =================== */
+
+/* MAXITS INTEGER, default = 5 */
+/* The maximum number of iterations performed. */
+
+/* EXTRA INTEGER, default = 2 */
+/* The number of iterations performed after norm growth */
+/* criterion is satisfied, should be at least 1. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ --w;
+ --iblock;
+ --isplit;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ *info = 0;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ifail[i__] = 0;
+/* L10: */
+ }
+
+ if (*n < 0) {
+ *info = -1;
+ } else if (*m < 0 || *m > *n) {
+ *info = -4;
+ } else if (*ldz < max(1,*n)) {
+ *info = -9;
+ } else {
+ i__1 = *m;
+ for (j = 2; j <= i__1; ++j) {
+ if (iblock[j] < iblock[j - 1]) {
+ *info = -6;
+ goto L30;
+ }
+ if (iblock[j] == iblock[j - 1] && w[j] < w[j - 1]) {
+ *info = -5;
+ goto L30;
+ }
+/* L20: */
+ }
+L30:
+ ;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSTEIN", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *m == 0) {
+ return 0;
+ } else if (*n == 1) {
+ z__[z_dim1 + 1] = 1.f;
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ eps = slamch_("Precision");
+
+/* Initialize seed for random number generator SLARNV. */
+
+ for (i__ = 1; i__ <= 4; ++i__) {
+ iseed[i__ - 1] = 1;
+/* L40: */
+ }
+
+/* Initialize pointers. */
+
+ indrv1 = 0;
+ indrv2 = indrv1 + *n;
+ indrv3 = indrv2 + *n;
+ indrv4 = indrv3 + *n;
+ indrv5 = indrv4 + *n;
+
+/* Compute eigenvectors of matrix blocks. */
+
+ j1 = 1;
+ i__1 = iblock[*m];
+ for (nblk = 1; nblk <= i__1; ++nblk) {
+
+/* Find starting and ending indices of block nblk. */
+
+ if (nblk == 1) {
+ b1 = 1;
+ } else {
+ b1 = isplit[nblk - 1] + 1;
+ }
+ bn = isplit[nblk];
+ blksiz = bn - b1 + 1;
+ if (blksiz == 1) {
+ goto L60;
+ }
+ gpind = b1;
+
+/* Compute reorthogonalization criterion and stopping criterion. */
+
+ onenrm = (r__1 = d__[b1], dabs(r__1)) + (r__2 = e[b1], dabs(r__2));
+/* Computing MAX */
+ r__3 = onenrm, r__4 = (r__1 = d__[bn], dabs(r__1)) + (r__2 = e[bn - 1]
+ , dabs(r__2));
+ onenrm = dmax(r__3,r__4);
+ i__2 = bn - 1;
+ for (i__ = b1 + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__4 = onenrm, r__5 = (r__1 = d__[i__], dabs(r__1)) + (r__2 = e[
+ i__ - 1], dabs(r__2)) + (r__3 = e[i__], dabs(r__3));
+ onenrm = dmax(r__4,r__5);
+/* L50: */
+ }
+ ortol = onenrm * .001f;
+
+ stpcrt = sqrt(.1f / blksiz);
+
+/* Loop through eigenvalues of block nblk. */
+
+L60:
+ jblk = 0;
+ i__2 = *m;
+ for (j = j1; j <= i__2; ++j) {
+ if (iblock[j] != nblk) {
+ j1 = j;
+ goto L160;
+ }
+ ++jblk;
+ xj = w[j];
+
+/* Skip all the work if the block size is one. */
+
+ if (blksiz == 1) {
+ work[indrv1 + 1] = 1.f;
+ goto L120;
+ }
+
+/* If eigenvalues j and j-1 are too close, add a relatively */
+/* small perturbation. */
+
+ if (jblk > 1) {
+ eps1 = (r__1 = eps * xj, dabs(r__1));
+ pertol = eps1 * 10.f;
+ sep = xj - xjm;
+ if (sep < pertol) {
+ xj = xjm + pertol;
+ }
+ }
+
+ its = 0;
+ nrmchk = 0;
+
+/* Get random starting vector. */
+
+ slarnv_(&c__2, iseed, &blksiz, &work[indrv1 + 1]);
+
+/* Copy the matrix T so it won't be destroyed in factorization. */
+
+ scopy_(&blksiz, &d__[b1], &c__1, &work[indrv4 + 1], &c__1);
+ i__3 = blksiz - 1;
+ scopy_(&i__3, &e[b1], &c__1, &work[indrv2 + 2], &c__1);
+ i__3 = blksiz - 1;
+ scopy_(&i__3, &e[b1], &c__1, &work[indrv3 + 1], &c__1);
+
+/* Compute LU factors with partial pivoting ( PT = LU ) */
+
+ tol = 0.f;
+ slagtf_(&blksiz, &work[indrv4 + 1], &xj, &work[indrv2 + 2], &work[
+ indrv3 + 1], &tol, &work[indrv5 + 1], &iwork[1], &iinfo);
+
+/* Update iteration count. */
+
+L70:
+ ++its;
+ if (its > 5) {
+ goto L100;
+ }
+
+/* Normalize and scale the righthand side vector Pb. */
+
+/* Computing MAX */
+ r__2 = eps, r__3 = (r__1 = work[indrv4 + blksiz], dabs(r__1));
+ scl = blksiz * onenrm * dmax(r__2,r__3) / sasum_(&blksiz, &work[
+ indrv1 + 1], &c__1);
+ sscal_(&blksiz, &scl, &work[indrv1 + 1], &c__1);
+
+/* Solve the system LU = Pb. */
+
+ slagts_(&c_n1, &blksiz, &work[indrv4 + 1], &work[indrv2 + 2], &
+ work[indrv3 + 1], &work[indrv5 + 1], &iwork[1], &work[
+ indrv1 + 1], &tol, &iinfo);
+
+/* Reorthogonalize by modified Gram-Schmidt if eigenvalues are */
+/* close enough. */
+
+ if (jblk == 1) {
+ goto L90;
+ }
+ if ((r__1 = xj - xjm, dabs(r__1)) > ortol) {
+ gpind = j;
+ }
+ if (gpind != j) {
+ i__3 = j - 1;
+ for (i__ = gpind; i__ <= i__3; ++i__) {
+ ctr = -sdot_(&blksiz, &work[indrv1 + 1], &c__1, &z__[b1 +
+ i__ * z_dim1], &c__1);
+ saxpy_(&blksiz, &ctr, &z__[b1 + i__ * z_dim1], &c__1, &
+ work[indrv1 + 1], &c__1);
+/* L80: */
+ }
+ }
+
+/* Check the infinity norm of the iterate. */
+
+L90:
+ jmax = isamax_(&blksiz, &work[indrv1 + 1], &c__1);
+ nrm = (r__1 = work[indrv1 + jmax], dabs(r__1));
+
+/* Continue for additional iterations after norm reaches */
+/* stopping criterion. */
+
+ if (nrm < stpcrt) {
+ goto L70;
+ }
+ ++nrmchk;
+ if (nrmchk < 3) {
+ goto L70;
+ }
+
+ goto L110;
+
+/* If stopping criterion was not satisfied, update info and */
+/* store eigenvector number in array ifail. */
+
+L100:
+ ++(*info);
+ ifail[*info] = j;
+
+/* Accept iterate as jth eigenvector. */
+
+L110:
+ scl = 1.f / snrm2_(&blksiz, &work[indrv1 + 1], &c__1);
+ jmax = isamax_(&blksiz, &work[indrv1 + 1], &c__1);
+ if (work[indrv1 + jmax] < 0.f) {
+ scl = -scl;
+ }
+ sscal_(&blksiz, &scl, &work[indrv1 + 1], &c__1);
+L120:
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ z__[i__ + j * z_dim1] = 0.f;
+/* L130: */
+ }
+ i__3 = blksiz;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ z__[b1 + i__ - 1 + j * z_dim1] = work[indrv1 + i__];
+/* L140: */
+ }
+
+/* Save the shift to check eigenvalue spacing at next */
+/* iteration. */
+
+ xjm = xj;
+
+/* L150: */
+ }
+L160:
+ ;
+ }
+
+ return 0;
+
+/* End of SSTEIN */
+
+} /* sstein_ */
diff --git a/contrib/libs/clapack/sstemr.c b/contrib/libs/clapack/sstemr.c
new file mode 100644
index 0000000000..64a646b9f5
--- /dev/null
+++ b/contrib/libs/clapack/sstemr.c
@@ -0,0 +1,726 @@
+/* sstemr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b18 = .003f;
+
+/* Subroutine */ int sstemr_(char *jobz, char *range, integer *n, real *d__,
+ real *e, real *vl, real *vu, integer *il, integer *iu, integer *m,
+ real *w, real *z__, integer *ldz, integer *nzc, integer *isuppz,
+ logical *tryrac, real *work, integer *lwork, integer *iwork, integer *
+ liwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ real r1, r2;
+ integer jj;
+ real cs;
+ integer in;
+ real sn, wl, wu;
+ integer iil, iiu;
+ real eps, tmp;
+ integer indd, iend, jblk, wend;
+ real rmin, rmax;
+ integer itmp;
+ real tnrm;
+ integer inde2;
+ extern /* Subroutine */ int slae2_(real *, real *, real *, real *, real *)
+ ;
+ integer itmp2;
+ real rtol1, rtol2, scale;
+ integer indgp;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ integer iindw, ilast, lwmin;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), sswap_(integer *, real *, integer *, real *, integer *
+);
+ logical wantz;
+ extern /* Subroutine */ int slaev2_(real *, real *, real *, real *, real *
+, real *, real *);
+ logical alleig;
+ integer ibegin;
+ logical indeig;
+ integer iindbl;
+ logical valeig;
+ extern doublereal slamch_(char *);
+ integer wbegin;
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ integer inderr, iindwk, indgrs, offset;
+ extern /* Subroutine */ int slarrc_(char *, integer *, real *, real *,
+ real *, real *, real *, integer *, integer *, integer *, integer *
+), slarre_(char *, integer *, real *, real *, integer *,
+ integer *, real *, real *, real *, real *, real *, real *,
+ integer *, integer *, integer *, real *, real *, real *, integer *
+, integer *, real *, real *, real *, integer *, integer *)
+ ;
+ real thresh;
+ integer iinspl, indwrk, ifirst, liwmin, nzcmin;
+ real pivmin;
+ extern doublereal slanst_(char *, integer *, real *, real *);
+ extern /* Subroutine */ int slarrj_(integer *, real *, real *, integer *,
+ integer *, real *, integer *, real *, real *, real *, integer *,
+ real *, real *, integer *), slarrr_(integer *, real *, real *,
+ integer *);
+ integer nsplit;
+ extern /* Subroutine */ int slarrv_(integer *, real *, real *, real *,
+ real *, real *, integer *, integer *, integer *, integer *, real *
+, real *, real *, real *, real *, real *, integer *, integer *,
+ real *, real *, integer *, integer *, real *, integer *, integer *
+);
+ real smlnum;
+ extern /* Subroutine */ int slasrt_(char *, integer *, real *, integer *);
+ logical lquery, zquery;
+
+
+/* -- LAPACK computational routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSTEMR computes selected eigenvalues and, optionally, eigenvectors */
+/* of a real symmetric tridiagonal matrix T. Any such unreduced matrix has */
+/* a well defined set of pairwise different real eigenvalues, the corresponding */
+/* real eigenvectors are pairwise orthogonal. */
+
+/* The spectrum may be computed either completely or partially by specifying */
+/* either an interval (VL,VU] or a range of indices IL:IU for the desired */
+/* eigenvalues. */
+
+/* Depending on the number of desired eigenvalues, these are computed either */
+/* by bisection or the dqds algorithm. Numerically orthogonal eigenvectors are */
+/* computed by the use of various suitable L D L^T factorizations near clusters */
+/* of close eigenvalues (referred to as RRRs, Relatively Robust */
+/* Representations). An informal sketch of the algorithm follows. */
+
+/* For each unreduced block (submatrix) of T, */
+/* (a) Compute T - sigma I = L D L^T, so that L and D */
+/* define all the wanted eigenvalues to high relative accuracy. */
+/* This means that small relative changes in the entries of D and L */
+/* cause only small relative changes in the eigenvalues and */
+/* eigenvectors. The standard (unfactored) representation of the */
+/* tridiagonal matrix T does not have this property in general. */
+/* (b) Compute the eigenvalues to suitable accuracy. */
+/* If the eigenvectors are desired, the algorithm attains full */
+/* accuracy of the computed eigenvalues only right before */
+/* the corresponding vectors have to be computed, see steps c) and d). */
+/* (c) For each cluster of close eigenvalues, select a new */
+/* shift close to the cluster, find a new factorization, and refine */
+/* the shifted eigenvalues to suitable accuracy. */
+/* (d) For each eigenvalue with a large enough relative separation compute */
+/* the corresponding eigenvector by forming a rank revealing twisted */
+/* factorization. Go back to (c) for any clusters that remain. */
+
+/* For more details, see: */
+/* - Inderjit S. Dhillon and Beresford N. Parlett: "Multiple representations */
+/* to compute orthogonal eigenvectors of symmetric tridiagonal matrices," */
+/* Linear Algebra and its Applications, 387(1), pp. 1-28, August 2004. */
+/* - Inderjit Dhillon and Beresford Parlett: "Orthogonal Eigenvectors and */
+/* Relative Gaps," SIAM Journal on Matrix Analysis and Applications, Vol. 25, */
+/* 2004. Also LAPACK Working Note 154. */
+/* - Inderjit Dhillon: "A new O(n^2) algorithm for the symmetric */
+/* tridiagonal eigenvalue/eigenvector problem", */
+/* Computer Science Division Technical Report No. UCB/CSD-97-971, */
+/* UC Berkeley, May 1997. */
+
+/* Notes: */
+/* 1.SSTEMR works only on machines which follow IEEE-754 */
+/* floating-point standard in their handling of infinities and NaNs. */
+/* This permits the use of efficient inner loops avoiding a check for */
+/* zero divisors. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the N diagonal elements of the tridiagonal matrix */
+/* T. On exit, D is overwritten. */
+
+/* E (input/output) REAL array, dimension (N) */
+/* On entry, the (N-1) subdiagonal elements of the tridiagonal */
+/* matrix T in elements 1 to N-1 of E. E(N) need not be set on */
+/* input, but is used internally as workspace. */
+/* On exit, E is overwritten. */
+
+/* VL (input) REAL */
+/* VU (input) REAL */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) REAL array, dimension (N) */
+/* The first M elements contain the selected eigenvalues in */
+/* ascending order. */
+
+/* Z (output) REAL array, dimension (LDZ, max(1,M) ) */
+/* If JOBZ = 'V', and if INFO = 0, then the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix T */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and can be computed with a workspace */
+/* query by setting NZC = -1, see below. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', then LDZ >= max(1,N). */
+
+/* NZC (input) INTEGER */
+/* The number of eigenvectors to be held in the array Z. */
+/* If RANGE = 'A', then NZC >= max(1,N). */
+/* If RANGE = 'V', then NZC >= the number of eigenvalues in (VL,VU]. */
+/* If RANGE = 'I', then NZC >= IU-IL+1. */
+/* If NZC = -1, then a workspace query is assumed; the */
+/* routine calculates the number of columns of the array Z that */
+/* are needed to hold the eigenvectors. */
+/* This value is returned as the first entry of the Z array, and */
+/* no error message related to NZC is issued by XERBLA. */
+
+/* ISUPPZ (output) INTEGER ARRAY, dimension ( 2*max(1,M) ) */
+/* The support of the eigenvectors in Z, i.e., the indices */
+/* indicating the nonzero elements in Z. The i-th computed eigenvector */
+/* is nonzero only in elements ISUPPZ( 2*i-1 ) through */
+/* ISUPPZ( 2*i ). This is relevant in the case when the matrix */
+/* is split. ISUPPZ is only accessed when JOBZ is 'V' and N > 0. */
+
+/* TRYRAC (input/output) LOGICAL */
+/* If TRYRAC.EQ..TRUE., indicates that the code should check whether */
+/* the tridiagonal matrix defines its eigenvalues to high relative */
+/* accuracy. If so, the code uses relative-accuracy preserving */
+/* algorithms that might be (a bit) slower depending on the matrix. */
+/* If the matrix does not define its eigenvalues to high relative */
+/* accuracy, the code can uses possibly faster algorithms. */
+/* If TRYRAC.EQ..FALSE., the code is not required to guarantee */
+/* relatively accurate eigenvalues and can use the fastest possible */
+/* techniques. */
+/* On exit, a .TRUE. TRYRAC will be set to .FALSE. if the matrix */
+/* does not define its eigenvalues to high relative accuracy. */
+
+/* WORK (workspace/output) REAL array, dimension (LWORK) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal */
+/* (and minimal) LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,18*N) */
+/* if JOBZ = 'V', and LWORK >= max(1,12*N) if JOBZ = 'N'. */
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (LIWORK) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. LIWORK >= max(1,10*N) */
+/* if the eigenvectors are desired, and LIWORK >= max(1,8*N) */
+/* if only the eigenvalues are to be computed. */
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal size of the IWORK array, */
+/* returns this value as the first entry of the IWORK array, and */
+/* no error message related to LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* On exit, INFO */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = 1X, internal error in SLARRE, */
+/* if INFO = 2X, internal error in SLARRV. */
+/* Here, the digit X = ABS( IINFO ) < 10, where IINFO is */
+/* the nonzero error code returned by SLARRE or */
+/* SLARRV, respectively. */
+
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --isuppz;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+
+ lquery = *lwork == -1 || *liwork == -1;
+ zquery = *nzc == -1;
+/* SSTEMR needs WORK of size 6*N, IWORK of size 3*N. */
+/* In addition, SLARRE needs WORK of size 6*N, IWORK of size 5*N. */
+/* Furthermore, SLARRV needs WORK of size 12*N, IWORK of size 7*N. */
+ if (wantz) {
+ lwmin = *n * 18;
+ liwmin = *n * 10;
+ } else {
+/* need less workspace if only the eigenvalues are wanted */
+ lwmin = *n * 12;
+ liwmin = *n << 3;
+ }
+ wl = 0.f;
+ wu = 0.f;
+ iil = 0;
+ iiu = 0;
+ if (valeig) {
+/* We do not reference VL, VU in the cases RANGE = 'I','A' */
+/* The interval (WL, WU] contains all the wanted eigenvalues. */
+/* It is either given by the user or computed in SLARRE. */
+ wl = *vl;
+ wu = *vu;
+ } else if (indeig) {
+/* We do not reference IL, IU in the cases RANGE = 'V','A' */
+ iil = *il;
+ iiu = *iu;
+ }
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (valeig && *n > 0 && wu <= wl) {
+ *info = -7;
+ } else if (indeig && (iil < 1 || iil > *n)) {
+ *info = -8;
+ } else if (indeig && (iiu < iil || iiu > *n)) {
+ *info = -9;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -13;
+ } else if (*lwork < lwmin && ! lquery) {
+ *info = -17;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -19;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+/* Computing MIN */
+ r__1 = sqrt(bignum), r__2 = 1.f / sqrt(sqrt(safmin));
+ rmax = dmin(r__1,r__2);
+
+ if (*info == 0) {
+ work[1] = (real) lwmin;
+ iwork[1] = liwmin;
+
+ if (wantz && alleig) {
+ nzcmin = *n;
+ } else if (wantz && valeig) {
+ slarrc_("T", n, vl, vu, &d__[1], &e[1], &safmin, &nzcmin, &itmp, &
+ itmp2, info);
+ } else if (wantz && indeig) {
+ nzcmin = iiu - iil + 1;
+ } else {
+/* WANTZ .EQ. FALSE. */
+ nzcmin = 0;
+ }
+ if (zquery && *info == 0) {
+ z__[z_dim1 + 1] = (real) nzcmin;
+ } else if (*nzc < nzcmin && ! zquery) {
+ *info = -14;
+ }
+ }
+ if (*info != 0) {
+
+ i__1 = -(*info);
+ xerbla_("SSTEMR", &i__1);
+
+ return 0;
+ } else if (lquery || zquery) {
+ return 0;
+ }
+
+/* Handle N = 0, 1, and 2 cases immediately */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (alleig || indeig) {
+ *m = 1;
+ w[1] = d__[1];
+ } else {
+ if (wl < d__[1] && wu >= d__[1]) {
+ *m = 1;
+ w[1] = d__[1];
+ }
+ }
+ if (wantz && ! zquery) {
+ z__[z_dim1 + 1] = 1.f;
+ isuppz[1] = 1;
+ isuppz[2] = 1;
+ }
+ return 0;
+ }
+
+ if (*n == 2) {
+ if (! wantz) {
+ slae2_(&d__[1], &e[1], &d__[2], &r1, &r2);
+ } else if (wantz && ! zquery) {
+ slaev2_(&d__[1], &e[1], &d__[2], &r1, &r2, &cs, &sn);
+ }
+ if (alleig || valeig && r2 > wl && r2 <= wu || indeig && iil == 1) {
+ ++(*m);
+ w[*m] = r2;
+ if (wantz && ! zquery) {
+ z__[*m * z_dim1 + 1] = -sn;
+ z__[*m * z_dim1 + 2] = cs;
+/* Note: At most one of SN and CS can be zero. */
+ if (sn != 0.f) {
+ if (cs != 0.f) {
+ isuppz[(*m << 1) - 1] = 1;
+ isuppz[(*m << 1) - 1] = 2;
+ } else {
+ isuppz[(*m << 1) - 1] = 1;
+ isuppz[(*m << 1) - 1] = 1;
+ }
+ } else {
+ isuppz[(*m << 1) - 1] = 2;
+ isuppz[*m * 2] = 2;
+ }
+ }
+ }
+ if (alleig || valeig && r1 > wl && r1 <= wu || indeig && iiu == 2) {
+ ++(*m);
+ w[*m] = r1;
+ if (wantz && ! zquery) {
+ z__[*m * z_dim1 + 1] = cs;
+ z__[*m * z_dim1 + 2] = sn;
+/* Note: At most one of SN and CS can be zero. */
+ if (sn != 0.f) {
+ if (cs != 0.f) {
+ isuppz[(*m << 1) - 1] = 1;
+ isuppz[(*m << 1) - 1] = 2;
+ } else {
+ isuppz[(*m << 1) - 1] = 1;
+ isuppz[(*m << 1) - 1] = 1;
+ }
+ } else {
+ isuppz[(*m << 1) - 1] = 2;
+ isuppz[*m * 2] = 2;
+ }
+ }
+ }
+ return 0;
+ }
+/* Continue with general N */
+ indgrs = 1;
+ inderr = (*n << 1) + 1;
+ indgp = *n * 3 + 1;
+ indd = (*n << 2) + 1;
+ inde2 = *n * 5 + 1;
+ indwrk = *n * 6 + 1;
+
+ iinspl = 1;
+ iindbl = *n + 1;
+ iindw = (*n << 1) + 1;
+ iindwk = *n * 3 + 1;
+
+/* Scale matrix to allowable range, if necessary. */
+/* The allowable range is related to the PIVMIN parameter; see the */
+/* comments in SLARRD. The preference for scaling small values */
+/* up is heuristic; we expect users' matrices not to be close to the */
+/* RMAX threshold. */
+
+ scale = 1.f;
+ tnrm = slanst_("M", n, &d__[1], &e[1]);
+ if (tnrm > 0.f && tnrm < rmin) {
+ scale = rmin / tnrm;
+ } else if (tnrm > rmax) {
+ scale = rmax / tnrm;
+ }
+ if (scale != 1.f) {
+ sscal_(n, &scale, &d__[1], &c__1);
+ i__1 = *n - 1;
+ sscal_(&i__1, &scale, &e[1], &c__1);
+ tnrm *= scale;
+ if (valeig) {
+/* If eigenvalues in interval have to be found, */
+/* scale (WL, WU] accordingly */
+ wl *= scale;
+ wu *= scale;
+ }
+ }
+
+/* Compute the desired eigenvalues of the tridiagonal after splitting */
+/* into smaller subblocks if the corresponding off-diagonal elements */
+/* are small */
+/* THRESH is the splitting parameter for SLARRE */
+/* A negative THRESH forces the old splitting criterion based on the */
+/* size of the off-diagonal. A positive THRESH switches to splitting */
+/* which preserves relative accuracy. */
+
+ if (*tryrac) {
+/* Test whether the matrix warrants the more expensive relative approach. */
+ slarrr_(n, &d__[1], &e[1], &iinfo);
+ } else {
+/* The user does not care about relative accurately eigenvalues */
+ iinfo = -1;
+ }
+/* Set the splitting criterion */
+ if (iinfo == 0) {
+ thresh = eps;
+ } else {
+ thresh = -eps;
+/* relative accuracy is desired but T does not guarantee it */
+ *tryrac = FALSE_;
+ }
+
+ if (*tryrac) {
+/* Copy original diagonal, needed to guarantee relative accuracy */
+ scopy_(n, &d__[1], &c__1, &work[indd], &c__1);
+ }
+/* Store the squares of the offdiagonal values of T */
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing 2nd power */
+ r__1 = e[j];
+ work[inde2 + j - 1] = r__1 * r__1;
+/* L5: */
+ }
+/* Set the tolerance parameters for bisection */
+ if (! wantz) {
+/* SLARRE computes the eigenvalues to full precision. */
+ rtol1 = eps * 4.f;
+ rtol2 = eps * 4.f;
+ } else {
+/* SLARRE computes the eigenvalues to less than full precision. */
+/* SLARRV will refine the eigenvalue approximations, and we can */
+/* need less accurate initial bisection in SLARRE. */
+/* Note: these settings do only affect the subset case and SLARRE */
+/* Computing MAX */
+ r__1 = sqrt(eps) * .05f, r__2 = eps * 4.f;
+ rtol1 = dmax(r__1,r__2);
+/* Computing MAX */
+ r__1 = sqrt(eps) * .005f, r__2 = eps * 4.f;
+ rtol2 = dmax(r__1,r__2);
+ }
+ slarre_(range, n, &wl, &wu, &iil, &iiu, &d__[1], &e[1], &work[inde2], &
+ rtol1, &rtol2, &thresh, &nsplit, &iwork[iinspl], m, &w[1], &work[
+ inderr], &work[indgp], &iwork[iindbl], &iwork[iindw], &work[
+ indgrs], &pivmin, &work[indwrk], &iwork[iindwk], &iinfo);
+ if (iinfo != 0) {
+ *info = abs(iinfo) + 10;
+ return 0;
+ }
+/* Note that if RANGE .NE. 'V', SLARRE computes bounds on the desired */
+/* part of the spectrum. All desired eigenvalues are contained in */
+/* (WL,WU] */
+ if (wantz) {
+
+/* Compute the desired eigenvectors corresponding to the computed */
+/* eigenvalues */
+
+ slarrv_(n, &wl, &wu, &d__[1], &e[1], &pivmin, &iwork[iinspl], m, &
+ c__1, m, &c_b18, &rtol1, &rtol2, &w[1], &work[inderr], &work[
+ indgp], &iwork[iindbl], &iwork[iindw], &work[indgrs], &z__[
+ z_offset], ldz, &isuppz[1], &work[indwrk], &iwork[iindwk], &
+ iinfo);
+ if (iinfo != 0) {
+ *info = abs(iinfo) + 20;
+ return 0;
+ }
+ } else {
+/* SLARRE computes eigenvalues of the (shifted) root representation */
+/* SLARRV returns the eigenvalues of the unshifted matrix. */
+/* However, if the eigenvectors are not desired by the user, we need */
+/* to apply the corresponding shifts from SLARRE to obtain the */
+/* eigenvalues of the original matrix. */
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ itmp = iwork[iindbl + j - 1];
+ w[j] += e[iwork[iinspl + itmp - 1]];
+/* L20: */
+ }
+ }
+
+ if (*tryrac) {
+/* Refine computed eigenvalues so that they are relatively accurate */
+/* with respect to the original matrix T. */
+ ibegin = 1;
+ wbegin = 1;
+ i__1 = iwork[iindbl + *m - 1];
+ for (jblk = 1; jblk <= i__1; ++jblk) {
+ iend = iwork[iinspl + jblk - 1];
+ in = iend - ibegin + 1;
+ wend = wbegin - 1;
+/* check if any eigenvalues have to be refined in this block */
+L36:
+ if (wend < *m) {
+ if (iwork[iindbl + wend] == jblk) {
+ ++wend;
+ goto L36;
+ }
+ }
+ if (wend < wbegin) {
+ ibegin = iend + 1;
+ goto L39;
+ }
+ offset = iwork[iindw + wbegin - 1] - 1;
+ ifirst = iwork[iindw + wbegin - 1];
+ ilast = iwork[iindw + wend - 1];
+ rtol2 = eps * 4.f;
+ slarrj_(&in, &work[indd + ibegin - 1], &work[inde2 + ibegin - 1],
+ &ifirst, &ilast, &rtol2, &offset, &w[wbegin], &work[
+ inderr + wbegin - 1], &work[indwrk], &iwork[iindwk], &
+ pivmin, &tnrm, &iinfo);
+ ibegin = iend + 1;
+ wbegin = wend + 1;
+L39:
+ ;
+ }
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (scale != 1.f) {
+ r__1 = 1.f / scale;
+ sscal_(m, &r__1, &w[1], &c__1);
+ }
+
+/* If eigenvalues are not in increasing order, then sort them, */
+/* possibly along with eigenvectors. */
+
+ if (nsplit > 1) {
+ if (! wantz) {
+ slasrt_("I", m, &w[1], &iinfo);
+ if (iinfo != 0) {
+ *info = 3;
+ return 0;
+ }
+ } else {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp) {
+ i__ = jj;
+ tmp = w[jj];
+ }
+/* L50: */
+ }
+ if (i__ != 0) {
+ w[i__] = w[j];
+ w[j] = tmp;
+ if (wantz) {
+ sswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j *
+ z_dim1 + 1], &c__1);
+ itmp = isuppz[(i__ << 1) - 1];
+ isuppz[(i__ << 1) - 1] = isuppz[(j << 1) - 1];
+ isuppz[(j << 1) - 1] = itmp;
+ itmp = isuppz[i__ * 2];
+ isuppz[i__ * 2] = isuppz[j * 2];
+ isuppz[j * 2] = itmp;
+ }
+ }
+/* L60: */
+ }
+ }
+ }
+
+
+ work[1] = (real) lwmin;
+ iwork[1] = liwmin;
+ return 0;
+
+/* End of SSTEMR */
+
+} /* sstemr_ */
diff --git a/contrib/libs/clapack/ssteqr.c b/contrib/libs/clapack/ssteqr.c
new file mode 100644
index 0000000000..a4740fe9a3
--- /dev/null
+++ b/contrib/libs/clapack/ssteqr.c
@@ -0,0 +1,617 @@
+/* ssteqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b9 = 0.f;
+static real c_b10 = 1.f;
+static integer c__0 = 0;
+static integer c__1 = 1;
+static integer c__2 = 2;
+
+/* Subroutine */ int ssteqr_(char *compz, integer *n, real *d__, real *e,
+ real *z__, integer *ldz, real *work, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ real b, c__, f, g;
+ integer i__, j, k, l, m;
+ real p, r__, s;
+ integer l1, ii, mm, lm1, mm1, nm1;
+ real rt1, rt2, eps;
+ integer lsv;
+ real tst, eps2;
+ integer lend, jtot;
+ extern /* Subroutine */ int slae2_(real *, real *, real *, real *, real *)
+ ;
+ extern logical lsame_(char *, char *);
+ real anorm;
+ extern /* Subroutine */ int slasr_(char *, char *, char *, integer *,
+ integer *, real *, real *, real *, integer *), sswap_(integer *, real *, integer *, real *, integer *);
+ integer lendm1, lendp1;
+ extern /* Subroutine */ int slaev2_(real *, real *, real *, real *, real *
+, real *, real *);
+ extern doublereal slapy2_(real *, real *);
+ integer iscale;
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real safmax;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ integer lendsv;
+ extern /* Subroutine */ int slartg_(real *, real *, real *, real *, real *
+), slaset_(char *, integer *, integer *, real *, real *, real *,
+ integer *);
+ real ssfmin;
+ integer nmaxit, icompz;
+ real ssfmax;
+ extern doublereal slanst_(char *, integer *, real *, real *);
+ extern /* Subroutine */ int slasrt_(char *, integer *, real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSTEQR computes all eigenvalues and, optionally, eigenvectors of a */
+/* symmetric tridiagonal matrix using the implicit QL or QR method. */
+/* The eigenvectors of a full or band symmetric matrix can also be found */
+/* if SSYTRD or SSPTRD or SSBTRD has been used to reduce this matrix to */
+/* tridiagonal form. */
+
+/* Arguments */
+/* ========= */
+
+/* COMPZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only. */
+/* = 'V': Compute eigenvalues and eigenvectors of the original */
+/* symmetric matrix. On entry, Z must contain the */
+/* orthogonal matrix used to reduce the original matrix */
+/* to tridiagonal form. */
+/* = 'I': Compute eigenvalues and eigenvectors of the */
+/* tridiagonal matrix. Z is initialized to the identity */
+/* matrix. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the diagonal elements of the tridiagonal matrix. */
+/* On exit, if INFO = 0, the eigenvalues in ascending order. */
+
+/* E (input/output) REAL array, dimension (N-1) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix. */
+/* On exit, E has been destroyed. */
+
+/* Z (input/output) REAL array, dimension (LDZ, N) */
+/* On entry, if COMPZ = 'V', then Z contains the orthogonal */
+/* matrix used in the reduction to tridiagonal form. */
+/* On exit, if INFO = 0, then if COMPZ = 'V', Z contains the */
+/* orthonormal eigenvectors of the original symmetric matrix, */
+/* and if COMPZ = 'I', Z contains the orthonormal eigenvectors */
+/* of the symmetric tridiagonal matrix. */
+/* If COMPZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* eigenvectors are desired, then LDZ >= max(1,N). */
+
+/* WORK (workspace) REAL array, dimension (max(1,2*N-2)) */
+/* If COMPZ = 'N', then WORK is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: the algorithm has failed to find all the eigenvalues in */
+/* a total of 30*N iterations; if INFO = i, then i */
+/* elements of E have not converged to zero; on exit, D */
+/* and E contain the elements of a symmetric tridiagonal */
+/* matrix which is orthogonally similar to the original */
+/* matrix. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (lsame_(compz, "N")) {
+ icompz = 0;
+ } else if (lsame_(compz, "V")) {
+ icompz = 1;
+ } else if (lsame_(compz, "I")) {
+ icompz = 2;
+ } else {
+ icompz = -1;
+ }
+ if (icompz < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldz < 1 || icompz > 0 && *ldz < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSTEQR", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (icompz == 2) {
+ z__[z_dim1 + 1] = 1.f;
+ }
+ return 0;
+ }
+
+/* Determine the unit roundoff and over/underflow thresholds. */
+
+ eps = slamch_("E");
+/* Computing 2nd power */
+ r__1 = eps;
+ eps2 = r__1 * r__1;
+ safmin = slamch_("S");
+ safmax = 1.f / safmin;
+ ssfmax = sqrt(safmax) / 3.f;
+ ssfmin = sqrt(safmin) / eps2;
+
+/* Compute the eigenvalues and eigenvectors of the tridiagonal */
+/* matrix. */
+
+ if (icompz == 2) {
+ slaset_("Full", n, n, &c_b9, &c_b10, &z__[z_offset], ldz);
+ }
+
+ nmaxit = *n * 30;
+ jtot = 0;
+
+/* Determine where the matrix splits and choose QL or QR iteration */
+/* for each block, according to whether top or bottom diagonal */
+/* element is smaller. */
+
+ l1 = 1;
+ nm1 = *n - 1;
+
+L10:
+ if (l1 > *n) {
+ goto L160;
+ }
+ if (l1 > 1) {
+ e[l1 - 1] = 0.f;
+ }
+ if (l1 <= nm1) {
+ i__1 = nm1;
+ for (m = l1; m <= i__1; ++m) {
+ tst = (r__1 = e[m], dabs(r__1));
+ if (tst == 0.f) {
+ goto L30;
+ }
+ if (tst <= sqrt((r__1 = d__[m], dabs(r__1))) * sqrt((r__2 = d__[m
+ + 1], dabs(r__2))) * eps) {
+ e[m] = 0.f;
+ goto L30;
+ }
+/* L20: */
+ }
+ }
+ m = *n;
+
+L30:
+ l = l1;
+ lsv = l;
+ lend = m;
+ lendsv = lend;
+ l1 = m + 1;
+ if (lend == l) {
+ goto L10;
+ }
+
+/* Scale submatrix in rows and columns L to LEND */
+
+ i__1 = lend - l + 1;
+ anorm = slanst_("I", &i__1, &d__[l], &e[l]);
+ iscale = 0;
+ if (anorm == 0.f) {
+ goto L10;
+ }
+ if (anorm > ssfmax) {
+ iscale = 1;
+ i__1 = lend - l + 1;
+ slascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ slascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n,
+ info);
+ } else if (anorm < ssfmin) {
+ iscale = 2;
+ i__1 = lend - l + 1;
+ slascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ slascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n,
+ info);
+ }
+
+/* Choose between QL and QR iteration */
+
+ if ((r__1 = d__[lend], dabs(r__1)) < (r__2 = d__[l], dabs(r__2))) {
+ lend = lsv;
+ l = lendsv;
+ }
+
+ if (lend > l) {
+
+/* QL Iteration */
+
+/* Look for small subdiagonal element. */
+
+L40:
+ if (l != lend) {
+ lendm1 = lend - 1;
+ i__1 = lendm1;
+ for (m = l; m <= i__1; ++m) {
+/* Computing 2nd power */
+ r__2 = (r__1 = e[m], dabs(r__1));
+ tst = r__2 * r__2;
+ if (tst <= eps2 * (r__1 = d__[m], dabs(r__1)) * (r__2 = d__[m
+ + 1], dabs(r__2)) + safmin) {
+ goto L60;
+ }
+/* L50: */
+ }
+ }
+
+ m = lend;
+
+L60:
+ if (m < lend) {
+ e[m] = 0.f;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L80;
+ }
+
+/* If remaining matrix is 2-by-2, use SLAE2 or SLAEV2 */
+/* to compute its eigensystem. */
+
+ if (m == l + 1) {
+ if (icompz > 0) {
+ slaev2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2, &c__, &s);
+ work[l] = c__;
+ work[*n - 1 + l] = s;
+ slasr_("R", "V", "B", n, &c__2, &work[l], &work[*n - 1 + l], &
+ z__[l * z_dim1 + 1], ldz);
+ } else {
+ slae2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2);
+ }
+ d__[l] = rt1;
+ d__[l + 1] = rt2;
+ e[l] = 0.f;
+ l += 2;
+ if (l <= lend) {
+ goto L40;
+ }
+ goto L140;
+ }
+
+ if (jtot == nmaxit) {
+ goto L140;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ g = (d__[l + 1] - p) / (e[l] * 2.f);
+ r__ = slapy2_(&g, &c_b10);
+ g = d__[m] - p + e[l] / (g + r_sign(&r__, &g));
+
+ s = 1.f;
+ c__ = 1.f;
+ p = 0.f;
+
+/* Inner loop */
+
+ mm1 = m - 1;
+ i__1 = l;
+ for (i__ = mm1; i__ >= i__1; --i__) {
+ f = s * e[i__];
+ b = c__ * e[i__];
+ slartg_(&g, &f, &c__, &s, &r__);
+ if (i__ != m - 1) {
+ e[i__ + 1] = r__;
+ }
+ g = d__[i__ + 1] - p;
+ r__ = (d__[i__] - g) * s + c__ * 2.f * b;
+ p = s * r__;
+ d__[i__ + 1] = g + p;
+ g = c__ * r__ - b;
+
+/* If eigenvectors are desired, then save rotations. */
+
+ if (icompz > 0) {
+ work[i__] = c__;
+ work[*n - 1 + i__] = -s;
+ }
+
+/* L70: */
+ }
+
+/* If eigenvectors are desired, then apply saved rotations. */
+
+ if (icompz > 0) {
+ mm = m - l + 1;
+ slasr_("R", "V", "B", n, &mm, &work[l], &work[*n - 1 + l], &z__[l
+ * z_dim1 + 1], ldz);
+ }
+
+ d__[l] -= p;
+ e[l] = g;
+ goto L40;
+
+/* Eigenvalue found. */
+
+L80:
+ d__[l] = p;
+
+ ++l;
+ if (l <= lend) {
+ goto L40;
+ }
+ goto L140;
+
+ } else {
+
+/* QR Iteration */
+
+/* Look for small superdiagonal element. */
+
+L90:
+ if (l != lend) {
+ lendp1 = lend + 1;
+ i__1 = lendp1;
+ for (m = l; m >= i__1; --m) {
+/* Computing 2nd power */
+ r__2 = (r__1 = e[m - 1], dabs(r__1));
+ tst = r__2 * r__2;
+ if (tst <= eps2 * (r__1 = d__[m], dabs(r__1)) * (r__2 = d__[m
+ - 1], dabs(r__2)) + safmin) {
+ goto L110;
+ }
+/* L100: */
+ }
+ }
+
+ m = lend;
+
+L110:
+ if (m > lend) {
+ e[m - 1] = 0.f;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L130;
+ }
+
+/* If remaining matrix is 2-by-2, use SLAE2 or SLAEV2 */
+/* to compute its eigensystem. */
+
+ if (m == l - 1) {
+ if (icompz > 0) {
+ slaev2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2, &c__, &s)
+ ;
+ work[m] = c__;
+ work[*n - 1 + m] = s;
+ slasr_("R", "V", "F", n, &c__2, &work[m], &work[*n - 1 + m], &
+ z__[(l - 1) * z_dim1 + 1], ldz);
+ } else {
+ slae2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2);
+ }
+ d__[l - 1] = rt1;
+ d__[l] = rt2;
+ e[l - 1] = 0.f;
+ l += -2;
+ if (l >= lend) {
+ goto L90;
+ }
+ goto L140;
+ }
+
+ if (jtot == nmaxit) {
+ goto L140;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ g = (d__[l - 1] - p) / (e[l - 1] * 2.f);
+ r__ = slapy2_(&g, &c_b10);
+ g = d__[m] - p + e[l - 1] / (g + r_sign(&r__, &g));
+
+ s = 1.f;
+ c__ = 1.f;
+ p = 0.f;
+
+/* Inner loop */
+
+ lm1 = l - 1;
+ i__1 = lm1;
+ for (i__ = m; i__ <= i__1; ++i__) {
+ f = s * e[i__];
+ b = c__ * e[i__];
+ slartg_(&g, &f, &c__, &s, &r__);
+ if (i__ != m) {
+ e[i__ - 1] = r__;
+ }
+ g = d__[i__] - p;
+ r__ = (d__[i__ + 1] - g) * s + c__ * 2.f * b;
+ p = s * r__;
+ d__[i__] = g + p;
+ g = c__ * r__ - b;
+
+/* If eigenvectors are desired, then save rotations. */
+
+ if (icompz > 0) {
+ work[i__] = c__;
+ work[*n - 1 + i__] = s;
+ }
+
+/* L120: */
+ }
+
+/* If eigenvectors are desired, then apply saved rotations. */
+
+ if (icompz > 0) {
+ mm = l - m + 1;
+ slasr_("R", "V", "F", n, &mm, &work[m], &work[*n - 1 + m], &z__[m
+ * z_dim1 + 1], ldz);
+ }
+
+ d__[l] -= p;
+ e[lm1] = g;
+ goto L90;
+
+/* Eigenvalue found. */
+
+L130:
+ d__[l] = p;
+
+ --l;
+ if (l >= lend) {
+ goto L90;
+ }
+ goto L140;
+
+ }
+
+/* Undo scaling if necessary */
+
+L140:
+ if (iscale == 1) {
+ i__1 = lendsv - lsv + 1;
+ slascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ i__1 = lendsv - lsv;
+ slascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &e[lsv], n,
+ info);
+ } else if (iscale == 2) {
+ i__1 = lendsv - lsv + 1;
+ slascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ i__1 = lendsv - lsv;
+ slascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &e[lsv], n,
+ info);
+ }
+
+/* Check for no convergence to an eigenvalue after a total */
+/* of N*MAXIT iterations. */
+
+ if (jtot < nmaxit) {
+ goto L10;
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (e[i__] != 0.f) {
+ ++(*info);
+ }
+/* L150: */
+ }
+ goto L190;
+
+/* Order eigenvalues and eigenvectors. */
+
+L160:
+ if (icompz == 0) {
+
+/* Use Quick Sort */
+
+ slasrt_("I", n, &d__[1], info);
+
+ } else {
+
+/* Use Selection Sort to minimize swaps of eigenvectors */
+
+ i__1 = *n;
+ for (ii = 2; ii <= i__1; ++ii) {
+ i__ = ii - 1;
+ k = i__;
+ p = d__[i__];
+ i__2 = *n;
+ for (j = ii; j <= i__2; ++j) {
+ if (d__[j] < p) {
+ k = j;
+ p = d__[j];
+ }
+/* L170: */
+ }
+ if (k != i__) {
+ d__[k] = d__[i__];
+ d__[i__] = p;
+ sswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1 + 1],
+ &c__1);
+ }
+/* L180: */
+ }
+ }
+
+L190:
+ return 0;
+
+/* End of SSTEQR */
+
+} /* ssteqr_ */
diff --git a/contrib/libs/clapack/ssterf.c b/contrib/libs/clapack/ssterf.c
new file mode 100644
index 0000000000..653dce04b3
--- /dev/null
+++ b/contrib/libs/clapack/ssterf.c
@@ -0,0 +1,460 @@
+/* ssterf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__0 = 0;
+static integer c__1 = 1;
+static real c_b32 = 1.f;
+
+/* Subroutine */ int ssterf_(integer *n, real *d__, real *e, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ real c__;
+ integer i__, l, m;
+ real p, r__, s;
+ integer l1;
+ real bb, rt1, rt2, eps, rte;
+ integer lsv;
+ real eps2, oldc;
+ integer lend, jtot;
+ extern /* Subroutine */ int slae2_(real *, real *, real *, real *, real *)
+ ;
+ real gamma, alpha, sigma, anorm;
+ extern doublereal slapy2_(real *, real *);
+ integer iscale;
+ real oldgam;
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real safmax;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ integer lendsv;
+ real ssfmin;
+ integer nmaxit;
+ real ssfmax;
+ extern doublereal slanst_(char *, integer *, real *, real *);
+ extern /* Subroutine */ int slasrt_(char *, integer *, real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSTERF computes all eigenvalues of a symmetric tridiagonal matrix */
+/* using the Pal-Walker-Kahan variant of the QL or QR algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the n diagonal elements of the tridiagonal matrix. */
+/* On exit, if INFO = 0, the eigenvalues in ascending order. */
+
+/* E (input/output) REAL array, dimension (N-1) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix. */
+/* On exit, E has been destroyed. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: the algorithm failed to find all of the eigenvalues in */
+/* a total of 30*N iterations; if INFO = i, then i */
+/* elements of E have not converged to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+
+/* Quick return if possible */
+
+ if (*n < 0) {
+ *info = -1;
+ i__1 = -(*info);
+ xerbla_("SSTERF", &i__1);
+ return 0;
+ }
+ if (*n <= 1) {
+ return 0;
+ }
+
+/* Determine the unit roundoff for this environment. */
+
+ eps = slamch_("E");
+/* Computing 2nd power */
+ r__1 = eps;
+ eps2 = r__1 * r__1;
+ safmin = slamch_("S");
+ safmax = 1.f / safmin;
+ ssfmax = sqrt(safmax) / 3.f;
+ ssfmin = sqrt(safmin) / eps2;
+
+/* Compute the eigenvalues of the tridiagonal matrix. */
+
+ nmaxit = *n * 30;
+ sigma = 0.f;
+ jtot = 0;
+
+/* Determine where the matrix splits and choose QL or QR iteration */
+/* for each block, according to whether top or bottom diagonal */
+/* element is smaller. */
+
+ l1 = 1;
+
+L10:
+ if (l1 > *n) {
+ goto L170;
+ }
+ if (l1 > 1) {
+ e[l1 - 1] = 0.f;
+ }
+ i__1 = *n - 1;
+ for (m = l1; m <= i__1; ++m) {
+ if ((r__3 = e[m], dabs(r__3)) <= sqrt((r__1 = d__[m], dabs(r__1))) *
+ sqrt((r__2 = d__[m + 1], dabs(r__2))) * eps) {
+ e[m] = 0.f;
+ goto L30;
+ }
+/* L20: */
+ }
+ m = *n;
+
+L30:
+ l = l1;
+ lsv = l;
+ lend = m;
+ lendsv = lend;
+ l1 = m + 1;
+ if (lend == l) {
+ goto L10;
+ }
+
+/* Scale submatrix in rows and columns L to LEND */
+
+ i__1 = lend - l + 1;
+ anorm = slanst_("I", &i__1, &d__[l], &e[l]);
+ iscale = 0;
+ if (anorm > ssfmax) {
+ iscale = 1;
+ i__1 = lend - l + 1;
+ slascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ slascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n,
+ info);
+ } else if (anorm < ssfmin) {
+ iscale = 2;
+ i__1 = lend - l + 1;
+ slascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ slascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n,
+ info);
+ }
+
+ i__1 = lend - 1;
+ for (i__ = l; i__ <= i__1; ++i__) {
+/* Computing 2nd power */
+ r__1 = e[i__];
+ e[i__] = r__1 * r__1;
+/* L40: */
+ }
+
+/* Choose between QL and QR iteration */
+
+ if ((r__1 = d__[lend], dabs(r__1)) < (r__2 = d__[l], dabs(r__2))) {
+ lend = lsv;
+ l = lendsv;
+ }
+
+ if (lend >= l) {
+
+/* QL Iteration */
+
+/* Look for small subdiagonal element. */
+
+L50:
+ if (l != lend) {
+ i__1 = lend - 1;
+ for (m = l; m <= i__1; ++m) {
+ if ((r__2 = e[m], dabs(r__2)) <= eps2 * (r__1 = d__[m] * d__[
+ m + 1], dabs(r__1))) {
+ goto L70;
+ }
+/* L60: */
+ }
+ }
+ m = lend;
+
+L70:
+ if (m < lend) {
+ e[m] = 0.f;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L90;
+ }
+
+/* If remaining matrix is 2 by 2, use SLAE2 to compute its */
+/* eigenvalues. */
+
+ if (m == l + 1) {
+ rte = sqrt(e[l]);
+ slae2_(&d__[l], &rte, &d__[l + 1], &rt1, &rt2);
+ d__[l] = rt1;
+ d__[l + 1] = rt2;
+ e[l] = 0.f;
+ l += 2;
+ if (l <= lend) {
+ goto L50;
+ }
+ goto L150;
+ }
+
+ if (jtot == nmaxit) {
+ goto L150;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ rte = sqrt(e[l]);
+ sigma = (d__[l + 1] - p) / (rte * 2.f);
+ r__ = slapy2_(&sigma, &c_b32);
+ sigma = p - rte / (sigma + r_sign(&r__, &sigma));
+
+ c__ = 1.f;
+ s = 0.f;
+ gamma = d__[m] - sigma;
+ p = gamma * gamma;
+
+/* Inner loop */
+
+ i__1 = l;
+ for (i__ = m - 1; i__ >= i__1; --i__) {
+ bb = e[i__];
+ r__ = p + bb;
+ if (i__ != m - 1) {
+ e[i__ + 1] = s * r__;
+ }
+ oldc = c__;
+ c__ = p / r__;
+ s = bb / r__;
+ oldgam = gamma;
+ alpha = d__[i__];
+ gamma = c__ * (alpha - sigma) - s * oldgam;
+ d__[i__ + 1] = oldgam + (alpha - gamma);
+ if (c__ != 0.f) {
+ p = gamma * gamma / c__;
+ } else {
+ p = oldc * bb;
+ }
+/* L80: */
+ }
+
+ e[l] = s * p;
+ d__[l] = sigma + gamma;
+ goto L50;
+
+/* Eigenvalue found. */
+
+L90:
+ d__[l] = p;
+
+ ++l;
+ if (l <= lend) {
+ goto L50;
+ }
+ goto L150;
+
+ } else {
+
+/* QR Iteration */
+
+/* Look for small superdiagonal element. */
+
+L100:
+ i__1 = lend + 1;
+ for (m = l; m >= i__1; --m) {
+ if ((r__2 = e[m - 1], dabs(r__2)) <= eps2 * (r__1 = d__[m] * d__[
+ m - 1], dabs(r__1))) {
+ goto L120;
+ }
+/* L110: */
+ }
+ m = lend;
+
+L120:
+ if (m > lend) {
+ e[m - 1] = 0.f;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L140;
+ }
+
+/* If remaining matrix is 2 by 2, use SLAE2 to compute its */
+/* eigenvalues. */
+
+ if (m == l - 1) {
+ rte = sqrt(e[l - 1]);
+ slae2_(&d__[l], &rte, &d__[l - 1], &rt1, &rt2);
+ d__[l] = rt1;
+ d__[l - 1] = rt2;
+ e[l - 1] = 0.f;
+ l += -2;
+ if (l >= lend) {
+ goto L100;
+ }
+ goto L150;
+ }
+
+ if (jtot == nmaxit) {
+ goto L150;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ rte = sqrt(e[l - 1]);
+ sigma = (d__[l - 1] - p) / (rte * 2.f);
+ r__ = slapy2_(&sigma, &c_b32);
+ sigma = p - rte / (sigma + r_sign(&r__, &sigma));
+
+ c__ = 1.f;
+ s = 0.f;
+ gamma = d__[m] - sigma;
+ p = gamma * gamma;
+
+/* Inner loop */
+
+ i__1 = l - 1;
+ for (i__ = m; i__ <= i__1; ++i__) {
+ bb = e[i__];
+ r__ = p + bb;
+ if (i__ != m) {
+ e[i__ - 1] = s * r__;
+ }
+ oldc = c__;
+ c__ = p / r__;
+ s = bb / r__;
+ oldgam = gamma;
+ alpha = d__[i__ + 1];
+ gamma = c__ * (alpha - sigma) - s * oldgam;
+ d__[i__] = oldgam + (alpha - gamma);
+ if (c__ != 0.f) {
+ p = gamma * gamma / c__;
+ } else {
+ p = oldc * bb;
+ }
+/* L130: */
+ }
+
+ e[l - 1] = s * p;
+ d__[l] = sigma + gamma;
+ goto L100;
+
+/* Eigenvalue found. */
+
+L140:
+ d__[l] = p;
+
+ --l;
+ if (l >= lend) {
+ goto L100;
+ }
+ goto L150;
+
+ }
+
+/* Undo scaling if necessary */
+
+L150:
+ if (iscale == 1) {
+ i__1 = lendsv - lsv + 1;
+ slascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ }
+ if (iscale == 2) {
+ i__1 = lendsv - lsv + 1;
+ slascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ }
+
+/* Check for no convergence to an eigenvalue after a total */
+/* of N*MAXIT iterations. */
+
+ if (jtot < nmaxit) {
+ goto L10;
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (e[i__] != 0.f) {
+ ++(*info);
+ }
+/* L160: */
+ }
+ goto L180;
+
+/* Sort eigenvalues in increasing order. */
+
+L170:
+ slasrt_("I", n, &d__[1], info);
+
+L180:
+ return 0;
+
+/* End of SSTERF */
+
+} /* ssterf_ */
diff --git a/contrib/libs/clapack/sstev.c b/contrib/libs/clapack/sstev.c
new file mode 100644
index 0000000000..5fe9ab2b80
--- /dev/null
+++ b/contrib/libs/clapack/sstev.c
@@ -0,0 +1,209 @@
+/* sstev.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sstev_(char *jobz, integer *n, real *d__, real *e, real *
+ z__, integer *ldz, real *work, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real eps;
+ integer imax;
+ real rmin, rmax, tnrm, sigma;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ logical wantz;
+ integer iscale;
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ extern doublereal slanst_(char *, integer *, real *, real *);
+ extern /* Subroutine */ int ssterf_(integer *, real *, real *, integer *);
+ real smlnum;
+ extern /* Subroutine */ int ssteqr_(char *, integer *, real *, real *,
+ real *, integer *, real *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSTEV computes all eigenvalues and, optionally, eigenvectors of a */
+/* real symmetric tridiagonal matrix A. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the n diagonal elements of the tridiagonal matrix */
+/* A. */
+/* On exit, if INFO = 0, the eigenvalues in ascending order. */
+
+/* E (input/output) REAL array, dimension (N-1) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix A, stored in elements 1 to N-1 of E. */
+/* On exit, the contents of E are destroyed. */
+
+/* Z (output) REAL array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the orthonormal */
+/* eigenvectors of the matrix A, with the i-th column of Z */
+/* holding the eigenvector associated with D(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) REAL array, dimension (max(1,2*N-2)) */
+/* If JOBZ = 'N', WORK is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the algorithm failed to converge; i */
+/* off-diagonal elements of E did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -6;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSTEV ", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (wantz) {
+ z__[z_dim1 + 1] = 1.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ iscale = 0;
+ tnrm = slanst_("M", n, &d__[1], &e[1]);
+ if (tnrm > 0.f && tnrm < rmin) {
+ iscale = 1;
+ sigma = rmin / tnrm;
+ } else if (tnrm > rmax) {
+ iscale = 1;
+ sigma = rmax / tnrm;
+ }
+ if (iscale == 1) {
+ sscal_(n, &sigma, &d__[1], &c__1);
+ i__1 = *n - 1;
+ sscal_(&i__1, &sigma, &e[1], &c__1);
+ }
+
+/* For eigenvalues only, call SSTERF. For eigenvalues and */
+/* eigenvectors, call SSTEQR. */
+
+ if (! wantz) {
+ ssterf_(n, &d__[1], &e[1], info);
+ } else {
+ ssteqr_("I", n, &d__[1], &e[1], &z__[z_offset], ldz, &work[1], info);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *n;
+ } else {
+ imax = *info - 1;
+ }
+ r__1 = 1.f / sigma;
+ sscal_(&imax, &r__1, &d__[1], &c__1);
+ }
+
+ return 0;
+
+/* End of SSTEV */
+
+} /* sstev_ */
diff --git a/contrib/libs/clapack/sstevd.c b/contrib/libs/clapack/sstevd.c
new file mode 100644
index 0000000000..d7e6ce2430
--- /dev/null
+++ b/contrib/libs/clapack/sstevd.c
@@ -0,0 +1,270 @@
+/* sstevd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sstevd_(char *jobz, integer *n, real *d__, real *e, real
+ *z__, integer *ldz, real *work, integer *lwork, integer *iwork,
+ integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real eps, rmin, rmax, tnrm, sigma;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ integer lwmin;
+ logical wantz;
+ integer iscale;
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ extern /* Subroutine */ int sstedc_(char *, integer *, real *, real *,
+ real *, integer *, real *, integer *, integer *, integer *,
+ integer *);
+ integer liwmin;
+ extern doublereal slanst_(char *, integer *, real *, real *);
+ extern /* Subroutine */ int ssterf_(integer *, real *, real *, integer *);
+ real smlnum;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSTEVD computes all eigenvalues and, optionally, eigenvectors of a */
+/* real symmetric tridiagonal matrix. If eigenvectors are desired, it */
+/* uses a divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the n diagonal elements of the tridiagonal matrix */
+/* A. */
+/* On exit, if INFO = 0, the eigenvalues in ascending order. */
+
+/* E (input/output) REAL array, dimension (N-1) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix A, stored in elements 1 to N-1 of E. */
+/* On exit, the contents of E are destroyed. */
+
+/* Z (output) REAL array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the orthonormal */
+/* eigenvectors of the matrix A, with the i-th column of Z */
+/* holding the eigenvector associated with D(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace/output) REAL array, */
+/* dimension (LWORK) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If JOBZ = 'N' or N <= 1 then LWORK must be at least 1. */
+/* If JOBZ = 'V' and N > 1 then LWORK must be at least */
+/* ( 1 + 4*N + N**2 ). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal sizes of the WORK and IWORK */
+/* arrays, returns these values as the first entries of the WORK */
+/* and IWORK arrays, and no error message related to LWORK or */
+/* LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* If JOBZ = 'N' or N <= 1 then LIWORK must be at least 1. */
+/* If JOBZ = 'V' and N > 1 then LIWORK must be at least 3+5*N. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK and IWORK arrays, and no error message related to */
+/* LWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the algorithm failed to converge; i */
+/* off-diagonal elements of E did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lquery = *lwork == -1 || *liwork == -1;
+
+ *info = 0;
+ liwmin = 1;
+ lwmin = 1;
+ if (*n > 1 && wantz) {
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = (*n << 2) + 1 + i__1 * i__1;
+ liwmin = *n * 5 + 3;
+ }
+
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -6;
+ }
+
+ if (*info == 0) {
+ work[1] = (real) lwmin;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -8;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -10;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSTEVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (wantz) {
+ z__[z_dim1 + 1] = 1.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ iscale = 0;
+ tnrm = slanst_("M", n, &d__[1], &e[1]);
+ if (tnrm > 0.f && tnrm < rmin) {
+ iscale = 1;
+ sigma = rmin / tnrm;
+ } else if (tnrm > rmax) {
+ iscale = 1;
+ sigma = rmax / tnrm;
+ }
+ if (iscale == 1) {
+ sscal_(n, &sigma, &d__[1], &c__1);
+ i__1 = *n - 1;
+ sscal_(&i__1, &sigma, &e[1], &c__1);
+ }
+
+/* For eigenvalues only, call SSTERF. For eigenvalues and */
+/* eigenvectors, call SSTEDC. */
+
+ if (! wantz) {
+ ssterf_(n, &d__[1], &e[1], info);
+ } else {
+ sstedc_("I", n, &d__[1], &e[1], &z__[z_offset], ldz, &work[1], lwork,
+ &iwork[1], liwork, info);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ r__1 = 1.f / sigma;
+ sscal_(n, &r__1, &d__[1], &c__1);
+ }
+
+ work[1] = (real) lwmin;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of SSTEVD */
+
+} /* sstevd_ */
diff --git a/contrib/libs/clapack/sstevr.c b/contrib/libs/clapack/sstevr.c
new file mode 100644
index 0000000000..279446ae0a
--- /dev/null
+++ b/contrib/libs/clapack/sstevr.c
@@ -0,0 +1,541 @@
+/* sstevr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__10 = 10;
+static integer c__1 = 1;
+static integer c__2 = 2;
+static integer c__3 = 3;
+static integer c__4 = 4;
+
+/* Subroutine */ int sstevr_(char *jobz, char *range, integer *n, real *d__,
+ real *e, real *vl, real *vu, integer *il, integer *iu, real *abstol,
+ integer *m, real *w, real *z__, integer *ldz, integer *isuppz, real *
+ work, integer *lwork, integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, jj;
+ real eps, vll, vuu, tmp1;
+ integer imax;
+ real rmin, rmax;
+ logical test;
+ real tnrm, sigma;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ char order[1];
+ integer lwmin;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), sswap_(integer *, real *, integer *, real *, integer *
+);
+ logical wantz, alleig, indeig;
+ integer iscale, ieeeok, indibl, indifl;
+ logical valeig;
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ integer indisp, indiwo, liwmin;
+ logical tryrac;
+ extern doublereal slanst_(char *, integer *, real *, real *);
+ extern /* Subroutine */ int sstein_(integer *, real *, real *, integer *,
+ real *, integer *, integer *, real *, integer *, real *, integer *
+, integer *, integer *), ssterf_(integer *, real *, real *,
+ integer *);
+ integer nsplit;
+ extern /* Subroutine */ int sstebz_(char *, char *, integer *, real *,
+ real *, integer *, integer *, real *, real *, real *, integer *,
+ integer *, real *, integer *, integer *, real *, integer *,
+ integer *);
+ real smlnum;
+ extern /* Subroutine */ int sstemr_(char *, char *, integer *, real *,
+ real *, real *, real *, integer *, integer *, integer *, real *,
+ real *, integer *, integer *, integer *, logical *, real *,
+ integer *, integer *, integer *, integer *);
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSTEVR computes selected eigenvalues and, optionally, eigenvectors */
+/* of a real symmetric tridiagonal matrix T. Eigenvalues and */
+/* eigenvectors can be selected by specifying either a range of values */
+/* or a range of indices for the desired eigenvalues. */
+
+/* Whenever possible, SSTEVR calls SSTEMR to compute the */
+/* eigenspectrum using Relatively Robust Representations. SSTEMR */
+/* computes eigenvalues by the dqds algorithm, while orthogonal */
+/* eigenvectors are computed from various "good" L D L^T representations */
+/* (also known as Relatively Robust Representations). Gram-Schmidt */
+/* orthogonalization is avoided as far as possible. More specifically, */
+/* the various steps of the algorithm are as follows. For the i-th */
+/* unreduced block of T, */
+/* (a) Compute T - sigma_i = L_i D_i L_i^T, such that L_i D_i L_i^T */
+/* is a relatively robust representation, */
+/* (b) Compute the eigenvalues, lambda_j, of L_i D_i L_i^T to high */
+/* relative accuracy by the dqds algorithm, */
+/* (c) If there is a cluster of close eigenvalues, "choose" sigma_i */
+/* close to the cluster, and go to step (a), */
+/* (d) Given the approximate eigenvalue lambda_j of L_i D_i L_i^T, */
+/* compute the corresponding eigenvector by forming a */
+/* rank-revealing twisted factorization. */
+/* The desired accuracy of the output can be specified by the input */
+/* parameter ABSTOL. */
+
+/* For more details, see "A new O(n^2) algorithm for the symmetric */
+/* tridiagonal eigenvalue/eigenvector problem", by Inderjit Dhillon, */
+/* Computer Science Division Technical Report No. UCB//CSD-97-971, */
+/* UC Berkeley, May 1997. */
+
+
+/* Note 1 : SSTEVR calls SSTEMR when the full spectrum is requested */
+/* on machines which conform to the ieee-754 floating point standard. */
+/* SSTEVR calls SSTEBZ and SSTEIN on non-ieee machines and */
+/* when partial spectrum requests are made. */
+
+/* Normal execution of SSTEMR may create NaNs and infinities and */
+/* hence may abort due to a floating point exception in environments */
+/* which do not handle NaNs and infinities in the ieee standard default */
+/* manner. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+/* ********* For RANGE = 'V' or 'I' and IU - IL < N - 1, SSTEBZ and */
+/* ********* SSTEIN are called */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the n diagonal elements of the tridiagonal matrix */
+/* A. */
+/* On exit, D may be multiplied by a constant factor chosen */
+/* to avoid over/underflow in computing the eigenvalues. */
+
+/* E (input/output) REAL array, dimension (max(1,N-1)) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix A in elements 1 to N-1 of E. */
+/* On exit, E may be multiplied by a constant factor chosen */
+/* to avoid over/underflow in computing the eigenvalues. */
+
+/* VL (input) REAL */
+/* VU (input) REAL */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) REAL */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing A to tridiagonal form. */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices */
+/* with Guaranteed High Relative Accuracy," by Demmel and */
+/* Kahan, LAPACK Working Note #3. */
+
+/* If high relative accuracy is important, set ABSTOL to */
+/* SLAMCH( 'Safe minimum' ). Doing so will guarantee that */
+/* eigenvalues are computed to high relative accuracy when */
+/* possible in future releases. The current code does not */
+/* make any guarantees about high relative accuracy, but */
+/* future releases will. See J. Barlow and J. Demmel, */
+/* "Computing Accurate Eigensystems of Scaled Diagonally */
+/* Dominant Matrices", LAPACK Working Note #7, for a discussion */
+/* of which matrices define their eigenvalues to high relative */
+/* accuracy. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) REAL array, dimension (N) */
+/* The first M elements contain the selected eigenvalues in */
+/* ascending order. */
+
+/* Z (output) REAL array, dimension (LDZ, max(1,M) ) */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* ISUPPZ (output) INTEGER array, dimension ( 2*max(1,M) ) */
+/* The support of the eigenvectors in Z, i.e., the indices */
+/* indicating the nonzero elements in Z. The i-th eigenvector */
+/* is nonzero only in elements ISUPPZ( 2*i-1 ) through */
+/* ISUPPZ( 2*i ). */
+/* ********* Implemented only for RANGE = 'A' or 'I' and IU - IL = N - 1 */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal (and */
+/* minimal) LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= 20*N. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal sizes of the WORK and IWORK */
+/* arrays, returns these values as the first entries of the WORK */
+/* and IWORK arrays, and no error message related to LWORK or */
+/* LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal (and */
+/* minimal) LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. LIWORK >= 10*N. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK and IWORK arrays, and no error message related to */
+/* LWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: Internal error */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Inderjit Dhillon, IBM Almaden, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Ken Stanley, Computer Science Division, University of */
+/* California at Berkeley, USA */
+/* Jason Riedy, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --isuppz;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ ieeeok = ilaenv_(&c__10, "SSTEVR", "N", &c__1, &c__2, &c__3, &c__4);
+
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+
+ lquery = *lwork == -1 || *liwork == -1;
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * 20;
+ lwmin = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * 10;
+ liwmin = max(i__1,i__2);
+
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -7;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -8;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -9;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -14;
+ }
+ }
+
+ if (*info == 0) {
+ work[1] = (real) lwmin;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -17;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -19;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSTEVR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (alleig || indeig) {
+ *m = 1;
+ w[1] = d__[1];
+ } else {
+ if (*vl < d__[1] && *vu >= d__[1]) {
+ *m = 1;
+ w[1] = d__[1];
+ }
+ }
+ if (wantz) {
+ z__[z_dim1 + 1] = 1.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+/* Computing MIN */
+ r__1 = sqrt(bignum), r__2 = 1.f / sqrt(sqrt(safmin));
+ rmax = dmin(r__1,r__2);
+
+
+/* Scale matrix to allowable range, if necessary. */
+
+ iscale = 0;
+ vll = *vl;
+ vuu = *vu;
+
+ tnrm = slanst_("M", n, &d__[1], &e[1]);
+ if (tnrm > 0.f && tnrm < rmin) {
+ iscale = 1;
+ sigma = rmin / tnrm;
+ } else if (tnrm > rmax) {
+ iscale = 1;
+ sigma = rmax / tnrm;
+ }
+ if (iscale == 1) {
+ sscal_(n, &sigma, &d__[1], &c__1);
+ i__1 = *n - 1;
+ sscal_(&i__1, &sigma, &e[1], &c__1);
+ if (valeig) {
+ vll = *vl * sigma;
+ vuu = *vu * sigma;
+ }
+ }
+/* Initialize indices into workspaces. Note: These indices are used only */
+/* if SSTERF or SSTEMR fail. */
+/* IWORK(INDIBL:INDIBL+M-1) corresponds to IBLOCK in SSTEBZ and */
+/* stores the block indices of each of the M<=N eigenvalues. */
+ indibl = 1;
+/* IWORK(INDISP:INDISP+NSPLIT-1) corresponds to ISPLIT in SSTEBZ and */
+/* stores the starting and finishing indices of each block. */
+ indisp = indibl + *n;
+/* IWORK(INDIFL:INDIFL+N-1) stores the indices of eigenvectors */
+/* that corresponding to eigenvectors that fail to converge in */
+/* SSTEIN. This information is discarded; if any fail, the driver */
+/* returns INFO > 0. */
+ indifl = indisp + *n;
+/* INDIWO is the offset of the remaining integer workspace. */
+ indiwo = indisp + *n;
+
+/* If all eigenvalues are desired, then */
+/* call SSTERF or SSTEMR. If this fails for some eigenvalue, then */
+/* try SSTEBZ. */
+
+
+ test = FALSE_;
+ if (indeig) {
+ if (*il == 1 && *iu == *n) {
+ test = TRUE_;
+ }
+ }
+ if ((alleig || test) && ieeeok == 1) {
+ i__1 = *n - 1;
+ scopy_(&i__1, &e[1], &c__1, &work[1], &c__1);
+ if (! wantz) {
+ scopy_(n, &d__[1], &c__1, &w[1], &c__1);
+ ssterf_(n, &w[1], &work[1], info);
+ } else {
+ scopy_(n, &d__[1], &c__1, &work[*n + 1], &c__1);
+ if (*abstol <= *n * 2.f * eps) {
+ tryrac = TRUE_;
+ } else {
+ tryrac = FALSE_;
+ }
+ i__1 = *lwork - (*n << 1);
+ sstemr_(jobz, "A", n, &work[*n + 1], &work[1], vl, vu, il, iu, m,
+ &w[1], &z__[z_offset], ldz, n, &isuppz[1], &tryrac, &work[
+ (*n << 1) + 1], &i__1, &iwork[1], liwork, info);
+
+ }
+ if (*info == 0) {
+ *m = *n;
+ goto L10;
+ }
+ *info = 0;
+ }
+
+/* Otherwise, call SSTEBZ and, if eigenvectors are desired, SSTEIN. */
+
+ if (wantz) {
+ *(unsigned char *)order = 'B';
+ } else {
+ *(unsigned char *)order = 'E';
+ }
+ sstebz_(range, order, n, &vll, &vuu, il, iu, abstol, &d__[1], &e[1], m, &
+ nsplit, &w[1], &iwork[indibl], &iwork[indisp], &work[1], &iwork[
+ indiwo], info);
+
+ if (wantz) {
+ sstein_(n, &d__[1], &e[1], m, &w[1], &iwork[indibl], &iwork[indisp], &
+ z__[z_offset], ldz, &work[1], &iwork[indiwo], &iwork[indifl],
+ info);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+L10:
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *m;
+ } else {
+ imax = *info - 1;
+ }
+ r__1 = 1.f / sigma;
+ sscal_(&imax, &r__1, &w[1], &c__1);
+ }
+
+/* If eigenvalues are not in order, then sort them, along with */
+/* eigenvectors. */
+
+ if (wantz) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp1 = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp1) {
+ i__ = jj;
+ tmp1 = w[jj];
+ }
+/* L20: */
+ }
+
+ if (i__ != 0) {
+ w[i__] = w[j];
+ w[j] = tmp1;
+ sswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1],
+ &c__1);
+ }
+/* L30: */
+ }
+ }
+
+/* Causes problems with tests 19 & 20: */
+/* IF (wantz .and. INDEIG ) Z( 1,1) = Z(1,1) / 1.002 + .002 */
+
+
+ work[1] = (real) lwmin;
+ iwork[1] = liwmin;
+ return 0;
+
+/* End of SSTEVR */
+
+} /* sstevr_ */
diff --git a/contrib/libs/clapack/sstevx.c b/contrib/libs/clapack/sstevx.c
new file mode 100644
index 0000000000..5c3df453ca
--- /dev/null
+++ b/contrib/libs/clapack/sstevx.c
@@ -0,0 +1,427 @@
+/* sstevx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int sstevx_(char *jobz, char *range, integer *n, real *d__,
+ real *e, real *vl, real *vu, integer *il, integer *iu, real *abstol,
+ integer *m, real *w, real *z__, integer *ldz, real *work, integer *
+ iwork, integer *ifail, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, jj;
+ real eps, vll, vuu, tmp1;
+ integer imax;
+ real rmin, rmax;
+ logical test;
+ real tnrm;
+ integer itmp1;
+ real sigma;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ char order[1];
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), sswap_(integer *, real *, integer *, real *, integer *
+);
+ logical wantz, alleig, indeig;
+ integer iscale, indibl;
+ logical valeig;
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ integer indisp, indiwo, indwrk;
+ extern doublereal slanst_(char *, integer *, real *, real *);
+ extern /* Subroutine */ int sstein_(integer *, real *, real *, integer *,
+ real *, integer *, integer *, real *, integer *, real *, integer *
+, integer *, integer *), ssterf_(integer *, real *, real *,
+ integer *);
+ integer nsplit;
+ extern /* Subroutine */ int sstebz_(char *, char *, integer *, real *,
+ real *, integer *, integer *, real *, real *, real *, integer *,
+ integer *, real *, integer *, integer *, real *, integer *,
+ integer *);
+ real smlnum;
+ extern /* Subroutine */ int ssteqr_(char *, integer *, real *, real *,
+ real *, integer *, real *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSTEVX computes selected eigenvalues and, optionally, eigenvectors */
+/* of a real symmetric tridiagonal matrix A. Eigenvalues and */
+/* eigenvectors can be selected by specifying either a range of values */
+/* or a range of indices for the desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) REAL array, dimension (N) */
+/* On entry, the n diagonal elements of the tridiagonal matrix */
+/* A. */
+/* On exit, D may be multiplied by a constant factor chosen */
+/* to avoid over/underflow in computing the eigenvalues. */
+
+/* E (input/output) REAL array, dimension (max(1,N-1)) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix A in elements 1 to N-1 of E. */
+/* On exit, E may be multiplied by a constant factor chosen */
+/* to avoid over/underflow in computing the eigenvalues. */
+
+/* VL (input) REAL */
+/* VU (input) REAL */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) REAL */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less */
+/* than or equal to zero, then EPS*|T| will be used in */
+/* its place, where |T| is the 1-norm of the tridiagonal */
+/* matrix. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*SLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*SLAMCH('S'). */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices */
+/* with Guaranteed High Relative Accuracy," by Demmel and */
+/* Kahan, LAPACK Working Note #3. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) REAL array, dimension (N) */
+/* The first M elements contain the selected eigenvalues in */
+/* ascending order. */
+
+/* Z (output) REAL array, dimension (LDZ, max(1,M) ) */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If an eigenvector fails to converge (INFO > 0), then that */
+/* column of Z contains the latest approximation to the */
+/* eigenvector, and the index of the eigenvector is returned */
+/* in IFAIL. If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) REAL array, dimension (5*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+
+/* IFAIL (output) INTEGER array, dimension (N) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvectors that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, then i eigenvectors failed to converge. */
+/* Their indices are stored in array IFAIL. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -7;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -8;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -9;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -14;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSTEVX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (alleig || indeig) {
+ *m = 1;
+ w[1] = d__[1];
+ } else {
+ if (*vl < d__[1] && *vu >= d__[1]) {
+ *m = 1;
+ w[1] = d__[1];
+ }
+ }
+ if (wantz) {
+ z__[z_dim1 + 1] = 1.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+/* Computing MIN */
+ r__1 = sqrt(bignum), r__2 = 1.f / sqrt(sqrt(safmin));
+ rmax = dmin(r__1,r__2);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ iscale = 0;
+ if (valeig) {
+ vll = *vl;
+ vuu = *vu;
+ } else {
+ vll = 0.f;
+ vuu = 0.f;
+ }
+ tnrm = slanst_("M", n, &d__[1], &e[1]);
+ if (tnrm > 0.f && tnrm < rmin) {
+ iscale = 1;
+ sigma = rmin / tnrm;
+ } else if (tnrm > rmax) {
+ iscale = 1;
+ sigma = rmax / tnrm;
+ }
+ if (iscale == 1) {
+ sscal_(n, &sigma, &d__[1], &c__1);
+ i__1 = *n - 1;
+ sscal_(&i__1, &sigma, &e[1], &c__1);
+ if (valeig) {
+ vll = *vl * sigma;
+ vuu = *vu * sigma;
+ }
+ }
+
+/* If all eigenvalues are desired and ABSTOL is less than zero, then */
+/* call SSTERF or SSTEQR. If this fails for some eigenvalue, then */
+/* try SSTEBZ. */
+
+ test = FALSE_;
+ if (indeig) {
+ if (*il == 1 && *iu == *n) {
+ test = TRUE_;
+ }
+ }
+ if ((alleig || test) && *abstol <= 0.f) {
+ scopy_(n, &d__[1], &c__1, &w[1], &c__1);
+ i__1 = *n - 1;
+ scopy_(&i__1, &e[1], &c__1, &work[1], &c__1);
+ indwrk = *n + 1;
+ if (! wantz) {
+ ssterf_(n, &w[1], &work[1], info);
+ } else {
+ ssteqr_("I", n, &w[1], &work[1], &z__[z_offset], ldz, &work[
+ indwrk], info);
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ifail[i__] = 0;
+/* L10: */
+ }
+ }
+ }
+ if (*info == 0) {
+ *m = *n;
+ goto L20;
+ }
+ *info = 0;
+ }
+
+/* Otherwise, call SSTEBZ and, if eigenvectors are desired, SSTEIN. */
+
+ if (wantz) {
+ *(unsigned char *)order = 'B';
+ } else {
+ *(unsigned char *)order = 'E';
+ }
+ indwrk = 1;
+ indibl = 1;
+ indisp = indibl + *n;
+ indiwo = indisp + *n;
+ sstebz_(range, order, n, &vll, &vuu, il, iu, abstol, &d__[1], &e[1], m, &
+ nsplit, &w[1], &iwork[indibl], &iwork[indisp], &work[indwrk], &
+ iwork[indiwo], info);
+
+ if (wantz) {
+ sstein_(n, &d__[1], &e[1], m, &w[1], &iwork[indibl], &iwork[indisp], &
+ z__[z_offset], ldz, &work[indwrk], &iwork[indiwo], &ifail[1],
+ info);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+L20:
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *m;
+ } else {
+ imax = *info - 1;
+ }
+ r__1 = 1.f / sigma;
+ sscal_(&imax, &r__1, &w[1], &c__1);
+ }
+
+/* If eigenvalues are not in order, then sort them, along with */
+/* eigenvectors. */
+
+ if (wantz) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp1 = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp1) {
+ i__ = jj;
+ tmp1 = w[jj];
+ }
+/* L30: */
+ }
+
+ if (i__ != 0) {
+ itmp1 = iwork[indibl + i__ - 1];
+ w[i__] = w[j];
+ iwork[indibl + i__ - 1] = iwork[indibl + j - 1];
+ w[j] = tmp1;
+ iwork[indibl + j - 1] = itmp1;
+ sswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1],
+ &c__1);
+ if (*info != 0) {
+ itmp1 = ifail[i__];
+ ifail[i__] = ifail[j];
+ ifail[j] = itmp1;
+ }
+ }
+/* L40: */
+ }
+ }
+
+ return 0;
+
+/* End of SSTEVX */
+
+} /* sstevx_ */
diff --git a/contrib/libs/clapack/ssycon.c b/contrib/libs/clapack/ssycon.c
new file mode 100644
index 0000000000..e5531089b4
--- /dev/null
+++ b/contrib/libs/clapack/ssycon.c
@@ -0,0 +1,202 @@
+/* ssycon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ssycon_(char *uplo, integer *n, real *a, integer *lda,
+ integer *ipiv, real *anorm, real *rcond, real *work, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+
+ /* Local variables */
+ integer i__, kase;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical upper;
+ extern /* Subroutine */ int slacn2_(integer *, real *, real *, integer *,
+ real *, integer *, integer *), xerbla_(char *, integer *);
+ real ainvnm;
+ extern /* Subroutine */ int ssytrs_(char *, integer *, integer *, real *,
+ integer *, integer *, real *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call SLACN2 in place of SLACON, 7 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYCON estimates the reciprocal of the condition number (in the */
+/* 1-norm) of a real symmetric matrix A using the factorization */
+/* A = U*D*U**T or A = L*D*L**T computed by SSYTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**T; */
+/* = 'L': Lower triangular, form is A = L*D*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The block diagonal matrix D and the multipliers used to */
+/* obtain the factor U or L as computed by SSYTRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by SSYTRF. */
+
+/* ANORM (input) REAL */
+/* The 1-norm of the original matrix A. */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) REAL array, dimension (2*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*anorm < 0.f) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSYCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.f;
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ } else if (*anorm <= 0.f) {
+ return 0;
+ }
+
+/* Check that the diagonal matrix D is nonsingular. */
+
+ if (upper) {
+
+/* Upper triangular storage: examine D from bottom to top */
+
+ for (i__ = *n; i__ >= 1; --i__) {
+ if (ipiv[i__] > 0 && a[i__ + i__ * a_dim1] == 0.f) {
+ return 0;
+ }
+/* L10: */
+ }
+ } else {
+
+/* Lower triangular storage: examine D from top to bottom. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (ipiv[i__] > 0 && a[i__ + i__ * a_dim1] == 0.f) {
+ return 0;
+ }
+/* L20: */
+ }
+ }
+
+/* Estimate the 1-norm of the inverse. */
+
+ kase = 0;
+L30:
+ slacn2_(n, &work[*n + 1], &work[1], &iwork[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+
+/* Multiply by inv(L*D*L') or inv(U*D*U'). */
+
+ ssytrs_(uplo, n, &c__1, &a[a_offset], lda, &ipiv[1], &work[1], n,
+ info);
+ goto L30;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / ainvnm / *anorm;
+ }
+
+ return 0;
+
+/* End of SSYCON */
+
+} /* ssycon_ */
diff --git a/contrib/libs/clapack/ssyequb.c b/contrib/libs/clapack/ssyequb.c
new file mode 100644
index 0000000000..0ed411fca5
--- /dev/null
+++ b/contrib/libs/clapack/ssyequb.c
@@ -0,0 +1,334 @@
+/* ssyequb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ssyequb_(char *uplo, integer *n, real *a, integer *lda,
+ real *s, real *scond, real *amax, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal), log(doublereal), pow_ri(real *, integer *);
+
+ /* Local variables */
+ real d__;
+ integer i__, j;
+ real t, u, c0, c1, c2, si;
+ logical up;
+ real avg, std, tol, base;
+ integer iter;
+ real smin, smax, scale;
+ extern logical lsame_(char *, char *);
+ real sumsq;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ extern /* Subroutine */ int slassq_(integer *, real *, integer *, real *,
+ real *);
+ real smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- Contributed by James Demmel, Deaglan Halligan, Yozo Hida and -- */
+/* -- Jason Riedy of Univ. of California Berkeley. -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley and NAG Ltd. -- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYEQUB computes row and column scalings intended to equilibrate a */
+/* symmetric matrix A and reduce its condition number */
+/* (with respect to the two-norm). S contains the scale factors, */
+/* S(i) = 1/sqrt(A(i,i)), chosen so that the scaled matrix B with */
+/* elements B(i,j) = S(i)*A(i,j)*S(j) has ones on the diagonal. This */
+/* choice of S puts the condition number of B within a factor N of the */
+/* smallest possible condition number over all possible diagonal */
+/* scalings. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The N-by-N symmetric matrix whose scaling */
+/* factors are to be computed. Only the diagonal elements of A */
+/* are referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* S (output) REAL array, dimension (N) */
+/* If INFO = 0, S contains the scale factors for A. */
+
+/* SCOND (output) REAL */
+/* If INFO = 0, S contains the ratio of the smallest S(i) to */
+/* the largest S(i). If SCOND >= 0.1 and AMAX is neither too */
+/* large nor too small, it is not worth scaling by S. */
+
+/* AMAX (output) REAL */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element is nonpositive. */
+
+/* Further Details */
+/* ======= ======= */
+
+/* Reference: Livne, O.E. and Golub, G.H., "Scaling by Binormalization", */
+/* Numerical Algorithms, vol. 35, no. 1, pp. 97-120, January 2004. */
+/* DOI 10.1023/B:NUMA.0000016606.32820.69 */
+/* Tech report version: http://ruready.utah.edu/archive/papers/bin.pdf */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (! (lsame_(uplo, "U") || lsame_(uplo, "L"))) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSYEQUB", &i__1);
+ return 0;
+ }
+ up = lsame_(uplo, "U");
+ *amax = 0.f;
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ *scond = 1.f;
+ return 0;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ s[i__] = 0.f;
+ }
+ *amax = 0.f;
+ if (up) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = s[i__], r__3 = (r__1 = a[i__ + j * a_dim1], dabs(r__1))
+ ;
+ s[i__] = dmax(r__2,r__3);
+/* Computing MAX */
+ r__2 = s[j], r__3 = (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+ s[j] = dmax(r__2,r__3);
+/* Computing MAX */
+ r__2 = *amax, r__3 = (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+ *amax = dmax(r__2,r__3);
+ }
+/* Computing MAX */
+ r__2 = s[j], r__3 = (r__1 = a[j + j * a_dim1], dabs(r__1));
+ s[j] = dmax(r__2,r__3);
+/* Computing MAX */
+ r__2 = *amax, r__3 = (r__1 = a[j + j * a_dim1], dabs(r__1));
+ *amax = dmax(r__2,r__3);
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ r__2 = s[j], r__3 = (r__1 = a[j + j * a_dim1], dabs(r__1));
+ s[j] = dmax(r__2,r__3);
+/* Computing MAX */
+ r__2 = *amax, r__3 = (r__1 = a[j + j * a_dim1], dabs(r__1));
+ *amax = dmax(r__2,r__3);
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = s[i__], r__3 = (r__1 = a[i__ + j * a_dim1], dabs(r__1))
+ ;
+ s[i__] = dmax(r__2,r__3);
+/* Computing MAX */
+ r__2 = s[j], r__3 = (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+ s[j] = dmax(r__2,r__3);
+/* Computing MAX */
+ r__2 = *amax, r__3 = (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+ *amax = dmax(r__2,r__3);
+ }
+ }
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ s[j] = 1.f / s[j];
+ }
+ tol = 1.f / sqrt(*n * 2.f);
+ for (iter = 1; iter <= 100; ++iter) {
+ scale = 0.f;
+ sumsq = 0.f;
+/* BETA = |A|S */
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+ }
+ if (up) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ t = (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+ work[i__] += (r__1 = a[i__ + j * a_dim1], dabs(r__1)) * s[
+ j];
+ work[j] += (r__1 = a[i__ + j * a_dim1], dabs(r__1)) * s[
+ i__];
+ }
+ work[j] += (r__1 = a[j + j * a_dim1], dabs(r__1)) * s[j];
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] += (r__1 = a[j + j * a_dim1], dabs(r__1)) * s[j];
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ t = (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+ work[i__] += (r__1 = a[i__ + j * a_dim1], dabs(r__1)) * s[
+ j];
+ work[j] += (r__1 = a[i__ + j * a_dim1], dabs(r__1)) * s[
+ i__];
+ }
+ }
+ }
+/* avg = s^T beta / n */
+ avg = 0.f;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ avg += s[i__] * work[i__];
+ }
+ avg /= *n;
+ std = 0.f;
+ i__1 = *n * 3;
+ for (i__ = (*n << 1) + 1; i__ <= i__1; ++i__) {
+ work[i__] = s[i__ - (*n << 1)] * work[i__ - (*n << 1)] - avg;
+ }
+ slassq_(n, &work[(*n << 1) + 1], &c__1, &scale, &sumsq);
+ std = scale * sqrt(sumsq / *n);
+ if (std < tol * avg) {
+ goto L999;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ t = (r__1 = a[i__ + i__ * a_dim1], dabs(r__1));
+ si = s[i__];
+ c2 = (*n - 1) * t;
+ c1 = (*n - 2) * (work[i__] - t * si);
+ c0 = -(t * si) * si + work[i__] * 2 * si - *n * avg;
+ d__ = c1 * c1 - c0 * 4 * c2;
+ if (d__ <= 0.f) {
+ *info = -1;
+ return 0;
+ }
+ si = c0 * -2 / (c1 + sqrt(d__));
+ d__ = si - s[i__];
+ u = 0.f;
+ if (up) {
+ i__2 = i__;
+ for (j = 1; j <= i__2; ++j) {
+ t = (r__1 = a[j + i__ * a_dim1], dabs(r__1));
+ u += s[j] * t;
+ work[j] += d__ * t;
+ }
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ t = (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+ u += s[j] * t;
+ work[j] += d__ * t;
+ }
+ } else {
+ i__2 = i__;
+ for (j = 1; j <= i__2; ++j) {
+ t = (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+ u += s[j] * t;
+ work[j] += d__ * t;
+ }
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ t = (r__1 = a[j + i__ * a_dim1], dabs(r__1));
+ u += s[j] * t;
+ work[j] += d__ * t;
+ }
+ }
+ avg += (u + work[i__]) * d__ / *n;
+ s[i__] = si;
+ }
+ }
+L999:
+ smlnum = slamch_("SAFEMIN");
+ bignum = 1.f / smlnum;
+ smin = bignum;
+ smax = 0.f;
+ t = 1.f / sqrt(avg);
+ base = slamch_("B");
+ u = 1.f / log(base);
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = (integer) (u * log(s[i__] * t));
+ s[i__] = pow_ri(&base, &i__2);
+/* Computing MIN */
+ r__1 = smin, r__2 = s[i__];
+ smin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = smax, r__2 = s[i__];
+ smax = dmax(r__1,r__2);
+ }
+ *scond = dmax(smin,smlnum) / dmin(smax,bignum);
+
+ return 0;
+} /* ssyequb_ */
diff --git a/contrib/libs/clapack/ssyev.c b/contrib/libs/clapack/ssyev.c
new file mode 100644
index 0000000000..49319d02c1
--- /dev/null
+++ b/contrib/libs/clapack/ssyev.c
@@ -0,0 +1,276 @@
+/* ssyev.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__0 = 0;
+static real c_b17 = 1.f;
+
+/* Subroutine */ int ssyev_(char *jobz, char *uplo, integer *n, real *a,
+ integer *lda, real *w, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer nb;
+ real eps;
+ integer inde;
+ real anrm;
+ integer imax;
+ real rmin, rmax, sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ logical lower, wantz;
+ integer iscale;
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ integer indtau, indwrk;
+ extern /* Subroutine */ int ssterf_(integer *, real *, real *, integer *);
+ extern doublereal slansy_(char *, char *, integer *, real *, integer *,
+ real *);
+ integer llwork;
+ real smlnum;
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int sorgtr_(char *, integer *, real *, integer *,
+ real *, real *, integer *, integer *), ssteqr_(char *,
+ integer *, real *, real *, real *, integer *, real *, integer *), ssytrd_(char *, integer *, real *, integer *, real *,
+ real *, real *, real *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYEV computes all eigenvalues and, optionally, eigenvectors of a */
+/* real symmetric matrix A. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA, N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+/* On exit, if JOBZ = 'V', then if INFO = 0, A contains the */
+/* orthonormal eigenvectors of the matrix A. */
+/* If JOBZ = 'N', then on exit the lower triangle (if UPLO='L') */
+/* or the upper triangle (if UPLO='U') of A, including the */
+/* diagonal, is destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= max(1,3*N-1). */
+/* For optimal efficiency, LWORK >= (NB+2)*N, */
+/* where NB is the blocksize for SSYTRD returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the algorithm failed to converge; i */
+/* off-diagonal elements of an intermediate tridiagonal */
+/* form did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ --work;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lower = lsame_(uplo, "L");
+ lquery = *lwork == -1;
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+
+ if (*info == 0) {
+ nb = ilaenv_(&c__1, "SSYTRD", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = 1, i__2 = (nb + 2) * *n;
+ lwkopt = max(i__1,i__2);
+ work[1] = (real) lwkopt;
+
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * 3 - 1;
+ if (*lwork < max(i__1,i__2) && ! lquery) {
+ *info = -8;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSYEV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ w[1] = a[a_dim1 + 1];
+ work[1] = 2.f;
+ if (wantz) {
+ a[a_dim1 + 1] = 1.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = slansy_("M", uplo, n, &a[a_offset], lda, &work[1]);
+ iscale = 0;
+ if (anrm > 0.f && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ slascl_(uplo, &c__0, &c__0, &c_b17, &sigma, n, n, &a[a_offset], lda,
+ info);
+ }
+
+/* Call SSYTRD to reduce symmetric matrix to tridiagonal form. */
+
+ inde = 1;
+ indtau = inde + *n;
+ indwrk = indtau + *n;
+ llwork = *lwork - indwrk + 1;
+ ssytrd_(uplo, n, &a[a_offset], lda, &w[1], &work[inde], &work[indtau], &
+ work[indwrk], &llwork, &iinfo);
+
+/* For eigenvalues only, call SSTERF. For eigenvectors, first call */
+/* SORGTR to generate the orthogonal matrix, then call SSTEQR. */
+
+ if (! wantz) {
+ ssterf_(n, &w[1], &work[inde], info);
+ } else {
+ sorgtr_(uplo, n, &a[a_offset], lda, &work[indtau], &work[indwrk], &
+ llwork, &iinfo);
+ ssteqr_(jobz, n, &w[1], &work[inde], &a[a_offset], lda, &work[indtau],
+ info);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *n;
+ } else {
+ imax = *info - 1;
+ }
+ r__1 = 1.f / sigma;
+ sscal_(&imax, &r__1, &w[1], &c__1);
+ }
+
+/* Set WORK(1) to optimal workspace size. */
+
+ work[1] = (real) lwkopt;
+
+ return 0;
+
+/* End of SSYEV */
+
+} /* ssyev_ */
diff --git a/contrib/libs/clapack/ssyevd.c b/contrib/libs/clapack/ssyevd.c
new file mode 100644
index 0000000000..430de7dec0
--- /dev/null
+++ b/contrib/libs/clapack/ssyevd.c
@@ -0,0 +1,344 @@
+/* ssyevd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__0 = 0;
+static real c_b17 = 1.f;
+
+/* Subroutine */ int ssyevd_(char *jobz, char *uplo, integer *n, real *a,
+ integer *lda, real *w, real *work, integer *lwork, integer *iwork,
+ integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real eps;
+ integer inde;
+ real anrm, rmin, rmax;
+ integer lopt;
+ real sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ integer lwmin, liopt;
+ logical lower, wantz;
+ integer indwk2, llwrk2, iscale;
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ integer indtau;
+ extern /* Subroutine */ int sstedc_(char *, integer *, real *, real *,
+ real *, integer *, real *, integer *, integer *, integer *,
+ integer *), slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *);
+ integer indwrk, liwmin;
+ extern /* Subroutine */ int ssterf_(integer *, real *, real *, integer *);
+ extern doublereal slansy_(char *, char *, integer *, real *, integer *,
+ real *);
+ integer llwork;
+ real smlnum;
+ logical lquery;
+ extern /* Subroutine */ int sormtr_(char *, char *, char *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *), ssytrd_(char *,
+ integer *, real *, integer *, real *, real *, real *, real *,
+ integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYEVD computes all eigenvalues and, optionally, eigenvectors of a */
+/* real symmetric matrix A. If eigenvectors are desired, it uses a */
+/* divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Because of large use of BLAS of level 3, SSYEVD needs N**2 more */
+/* workspace than SSYEVX. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA, N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+/* On exit, if JOBZ = 'V', then if INFO = 0, A contains the */
+/* orthonormal eigenvectors of the matrix A. */
+/* If JOBZ = 'N', then on exit the lower triangle (if UPLO='L') */
+/* or the upper triangle (if UPLO='U') of A, including the */
+/* diagonal, is destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* WORK (workspace/output) REAL array, */
+/* dimension (LWORK) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If N <= 1, LWORK must be at least 1. */
+/* If JOBZ = 'N' and N > 1, LWORK must be at least 2*N+1. */
+/* If JOBZ = 'V' and N > 1, LWORK must be at least */
+/* 1 + 6*N + 2*N**2. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal sizes of the WORK and IWORK */
+/* arrays, returns these values as the first entries of the WORK */
+/* and IWORK arrays, and no error message related to LWORK or */
+/* LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* If N <= 1, LIWORK must be at least 1. */
+/* If JOBZ = 'N' and N > 1, LIWORK must be at least 1. */
+/* If JOBZ = 'V' and N > 1, LIWORK must be at least 3 + 5*N. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK and IWORK arrays, and no error message related to */
+/* LWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i and JOBZ = 'N', then the algorithm failed */
+/* to converge; i off-diagonal elements of an intermediate */
+/* tridiagonal form did not converge to zero; */
+/* if INFO = i and JOBZ = 'V', then the algorithm failed */
+/* to compute an eigenvalue while working on the submatrix */
+/* lying in rows and columns INFO/(N+1) through */
+/* mod(INFO,N+1). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Jeff Rutter, Computer Science Division, University of California */
+/* at Berkeley, USA */
+/* Modified by Francoise Tisseur, University of Tennessee. */
+
+/* Modified description of INFO. Sven, 16 Feb 05. */
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lower = lsame_(uplo, "L");
+ lquery = *lwork == -1 || *liwork == -1;
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+
+ if (*info == 0) {
+ if (*n <= 1) {
+ liwmin = 1;
+ lwmin = 1;
+ lopt = lwmin;
+ liopt = liwmin;
+ } else {
+ if (wantz) {
+ liwmin = *n * 5 + 3;
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = *n * 6 + 1 + (i__1 * i__1 << 1);
+ } else {
+ liwmin = 1;
+ lwmin = (*n << 1) + 1;
+ }
+/* Computing MAX */
+ i__1 = lwmin, i__2 = (*n << 1) + ilaenv_(&c__1, "SSYTRD", uplo, n,
+ &c_n1, &c_n1, &c_n1);
+ lopt = max(i__1,i__2);
+ liopt = liwmin;
+ }
+ work[1] = (real) lopt;
+ iwork[1] = liopt;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -8;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -10;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSYEVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ w[1] = a[a_dim1 + 1];
+ if (wantz) {
+ a[a_dim1 + 1] = 1.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = slansy_("M", uplo, n, &a[a_offset], lda, &work[1]);
+ iscale = 0;
+ if (anrm > 0.f && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ slascl_(uplo, &c__0, &c__0, &c_b17, &sigma, n, n, &a[a_offset], lda,
+ info);
+ }
+
+/* Call SSYTRD to reduce symmetric matrix to tridiagonal form. */
+
+ inde = 1;
+ indtau = inde + *n;
+ indwrk = indtau + *n;
+ llwork = *lwork - indwrk + 1;
+ indwk2 = indwrk + *n * *n;
+ llwrk2 = *lwork - indwk2 + 1;
+
+ ssytrd_(uplo, n, &a[a_offset], lda, &w[1], &work[inde], &work[indtau], &
+ work[indwrk], &llwork, &iinfo);
+
+/* For eigenvalues only, call SSTERF. For eigenvectors, first call */
+/* SSTEDC to generate the eigenvector matrix, WORK(INDWRK), of the */
+/* tridiagonal matrix, then call SORMTR to multiply it by the */
+/* Householder transformations stored in A. */
+
+ if (! wantz) {
+ ssterf_(n, &w[1], &work[inde], info);
+ } else {
+ sstedc_("I", n, &w[1], &work[inde], &work[indwrk], n, &work[indwk2], &
+ llwrk2, &iwork[1], liwork, info);
+ sormtr_("L", uplo, "N", n, n, &a[a_offset], lda, &work[indtau], &work[
+ indwrk], n, &work[indwk2], &llwrk2, &iinfo);
+ slacpy_("A", n, n, &work[indwrk], n, &a[a_offset], lda);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ r__1 = 1.f / sigma;
+ sscal_(n, &r__1, &w[1], &c__1);
+ }
+
+ work[1] = (real) lopt;
+ iwork[1] = liopt;
+
+ return 0;
+
+/* End of SSYEVD */
+
+} /* ssyevd_ */
diff --git a/contrib/libs/clapack/ssyevr.c b/contrib/libs/clapack/ssyevr.c
new file mode 100644
index 0000000000..af26cb0bb5
--- /dev/null
+++ b/contrib/libs/clapack/ssyevr.c
@@ -0,0 +1,658 @@
+/* ssyevr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__10 = 10;
+static integer c__1 = 1;
+static integer c__2 = 2;
+static integer c__3 = 3;
+static integer c__4 = 4;
+static integer c_n1 = -1;
+
+/* Subroutine */ int ssyevr_(char *jobz, char *range, char *uplo, integer *n,
+ real *a, integer *lda, real *vl, real *vu, integer *il, integer *iu,
+ real *abstol, integer *m, real *w, real *z__, integer *ldz, integer *
+ isuppz, real *work, integer *lwork, integer *iwork, integer *liwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, z_dim1, z_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, nb, jj;
+ real eps, vll, vuu, tmp1;
+ integer indd, inde;
+ real anrm;
+ integer imax;
+ real rmin, rmax;
+ logical test;
+ integer inddd, indee;
+ real sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ char order[1];
+ integer indwk, lwmin;
+ logical lower;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), sswap_(integer *, real *, integer *, real *, integer *
+);
+ logical wantz, alleig, indeig;
+ integer iscale, ieeeok, indibl, indifl;
+ logical valeig;
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real abstll, bignum;
+ integer indtau, indisp, indiwo, indwkn, liwmin;
+ logical tryrac;
+ extern /* Subroutine */ int sstein_(integer *, real *, real *, integer *,
+ real *, integer *, integer *, real *, integer *, real *, integer *
+, integer *, integer *), ssterf_(integer *, real *, real *,
+ integer *);
+ integer llwrkn, llwork, nsplit;
+ real smlnum;
+ extern doublereal slansy_(char *, char *, integer *, real *, integer *,
+ real *);
+ extern /* Subroutine */ int sstebz_(char *, char *, integer *, real *,
+ real *, integer *, integer *, real *, real *, real *, integer *,
+ integer *, real *, integer *, integer *, real *, integer *,
+ integer *), sstemr_(char *, char *, integer *,
+ real *, real *, real *, real *, integer *, integer *, integer *,
+ real *, real *, integer *, integer *, integer *, logical *, real *
+, integer *, integer *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int sormtr_(char *, char *, char *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *), ssytrd_(char *,
+ integer *, real *, integer *, real *, real *, real *, real *,
+ integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYEVR computes selected eigenvalues and, optionally, eigenvectors */
+/* of a real symmetric matrix A. Eigenvalues and eigenvectors can be */
+/* selected by specifying either a range of values or a range of */
+/* indices for the desired eigenvalues. */
+
+/* SSYEVR first reduces the matrix A to tridiagonal form T with a call */
+/* to SSYTRD. Then, whenever possible, SSYEVR calls SSTEMR to compute */
+/* the eigenspectrum using Relatively Robust Representations. SSTEMR */
+/* computes eigenvalues by the dqds algorithm, while orthogonal */
+/* eigenvectors are computed from various "good" L D L^T representations */
+/* (also known as Relatively Robust Representations). Gram-Schmidt */
+/* orthogonalization is avoided as far as possible. More specifically, */
+/* the various steps of the algorithm are as follows. */
+
+/* For each unreduced block (submatrix) of T, */
+/* (a) Compute T - sigma I = L D L^T, so that L and D */
+/* define all the wanted eigenvalues to high relative accuracy. */
+/* This means that small relative changes in the entries of D and L */
+/* cause only small relative changes in the eigenvalues and */
+/* eigenvectors. The standard (unfactored) representation of the */
+/* tridiagonal matrix T does not have this property in general. */
+/* (b) Compute the eigenvalues to suitable accuracy. */
+/* If the eigenvectors are desired, the algorithm attains full */
+/* accuracy of the computed eigenvalues only right before */
+/* the corresponding vectors have to be computed, see steps c) and d). */
+/* (c) For each cluster of close eigenvalues, select a new */
+/* shift close to the cluster, find a new factorization, and refine */
+/* the shifted eigenvalues to suitable accuracy. */
+/* (d) For each eigenvalue with a large enough relative separation compute */
+/* the corresponding eigenvector by forming a rank revealing twisted */
+/* factorization. Go back to (c) for any clusters that remain. */
+
+/* The desired accuracy of the output can be specified by the input */
+/* parameter ABSTOL. */
+
+/* For more details, see SSTEMR's documentation and: */
+/* - Inderjit S. Dhillon and Beresford N. Parlett: "Multiple representations */
+/* to compute orthogonal eigenvectors of symmetric tridiagonal matrices," */
+/* Linear Algebra and its Applications, 387(1), pp. 1-28, August 2004. */
+/* - Inderjit Dhillon and Beresford Parlett: "Orthogonal Eigenvectors and */
+/* Relative Gaps," SIAM Journal on Matrix Analysis and Applications, Vol. 25, */
+/* 2004. Also LAPACK Working Note 154. */
+/* - Inderjit Dhillon: "A new O(n^2) algorithm for the symmetric */
+/* tridiagonal eigenvalue/eigenvector problem", */
+/* Computer Science Division Technical Report No. UCB/CSD-97-971, */
+/* UC Berkeley, May 1997. */
+
+
+/* Note 1 : SSYEVR calls SSTEMR when the full spectrum is requested */
+/* on machines which conform to the ieee-754 floating point standard. */
+/* SSYEVR calls SSTEBZ and SSTEIN on non-ieee machines and */
+/* when partial spectrum requests are made. */
+
+/* Normal execution of SSTEMR may create NaNs and infinities and */
+/* hence may abort due to a floating point exception in environments */
+/* which do not handle NaNs and infinities in the ieee standard default */
+/* manner. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+/* ********* For RANGE = 'V' or 'I' and IU - IL < N - 1, SSTEBZ and */
+/* ********* SSTEIN are called */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA, N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+/* On exit, the lower triangle (if UPLO='L') or the upper */
+/* triangle (if UPLO='U') of A, including the diagonal, is */
+/* destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* VL (input) REAL */
+/* VU (input) REAL */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) REAL */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing A to tridiagonal form. */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices */
+/* with Guaranteed High Relative Accuracy," by Demmel and */
+/* Kahan, LAPACK Working Note #3. */
+
+/* If high relative accuracy is important, set ABSTOL to */
+/* SLAMCH( 'Safe minimum' ). Doing so will guarantee that */
+/* eigenvalues are computed to high relative accuracy when */
+/* possible in future releases. The current code does not */
+/* make any guarantees about high relative accuracy, but */
+/* future releases will. See J. Barlow and J. Demmel, */
+/* "Computing Accurate Eigensystems of Scaled Diagonally */
+/* Dominant Matrices", LAPACK Working Note #7, for a discussion */
+/* of which matrices define their eigenvalues to high relative */
+/* accuracy. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) REAL array, dimension (N) */
+/* The first M elements contain the selected eigenvalues in */
+/* ascending order. */
+
+/* Z (output) REAL array, dimension (LDZ, max(1,M)) */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+/* Supplying N columns is always safe. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* ISUPPZ (output) INTEGER array, dimension ( 2*max(1,M) ) */
+/* The support of the eigenvectors in Z, i.e., the indices */
+/* indicating the nonzero elements in Z. The i-th eigenvector */
+/* is nonzero only in elements ISUPPZ( 2*i-1 ) through */
+/* ISUPPZ( 2*i ). */
+/* ********* Implemented only for RANGE = 'A' or 'I' and IU - IL = N - 1 */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,26*N). */
+/* For optimal efficiency, LWORK >= (NB+6)*N, */
+/* where NB is the max of the blocksize for SSYTRD and SORMTR */
+/* returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal sizes of the WORK and IWORK */
+/* arrays, returns these values as the first entries of the WORK */
+/* and IWORK arrays, and no error message related to LWORK or */
+/* LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. LIWORK >= max(1,10*N). */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK and IWORK arrays, and no error message related to */
+/* LWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: Internal error */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Inderjit Dhillon, IBM Almaden, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Ken Stanley, Computer Science Division, University of */
+/* California at Berkeley, USA */
+/* Jason Riedy, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --isuppz;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ ieeeok = ilaenv_(&c__10, "SSYEVR", "N", &c__1, &c__2, &c__3, &c__4);
+
+ lower = lsame_(uplo, "L");
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+
+ lquery = *lwork == -1 || *liwork == -1;
+
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * 26;
+ lwmin = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * 10;
+ liwmin = max(i__1,i__2);
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -8;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -9;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -10;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -15;
+ }
+ }
+
+ if (*info == 0) {
+ nb = ilaenv_(&c__1, "SSYTRD", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__1, "SORMTR", uplo, n, &c_n1, &c_n1, &
+ c_n1);
+ nb = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = (nb + 1) * *n;
+ lwkopt = max(i__1,lwmin);
+ work[1] = (real) lwkopt;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -18;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -20;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSYEVR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ if (*n == 1) {
+ work[1] = 26.f;
+ if (alleig || indeig) {
+ *m = 1;
+ w[1] = a[a_dim1 + 1];
+ } else {
+ if (*vl < a[a_dim1 + 1] && *vu >= a[a_dim1 + 1]) {
+ *m = 1;
+ w[1] = a[a_dim1 + 1];
+ }
+ }
+ if (wantz) {
+ z__[z_dim1 + 1] = 1.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+/* Computing MIN */
+ r__1 = sqrt(bignum), r__2 = 1.f / sqrt(sqrt(safmin));
+ rmax = dmin(r__1,r__2);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ iscale = 0;
+ abstll = *abstol;
+ if (valeig) {
+ vll = *vl;
+ vuu = *vu;
+ }
+ anrm = slansy_("M", uplo, n, &a[a_offset], lda, &work[1]);
+ if (anrm > 0.f && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ if (lower) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j + 1;
+ sscal_(&i__2, &sigma, &a[j + j * a_dim1], &c__1);
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sscal_(&j, &sigma, &a[j * a_dim1 + 1], &c__1);
+/* L20: */
+ }
+ }
+ if (*abstol > 0.f) {
+ abstll = *abstol * sigma;
+ }
+ if (valeig) {
+ vll = *vl * sigma;
+ vuu = *vu * sigma;
+ }
+ }
+/* Initialize indices into workspaces. Note: The IWORK indices are */
+/* used only if SSTERF or SSTEMR fail. */
+/* WORK(INDTAU:INDTAU+N-1) stores the scalar factors of the */
+/* elementary reflectors used in SSYTRD. */
+ indtau = 1;
+/* WORK(INDD:INDD+N-1) stores the tridiagonal's diagonal entries. */
+ indd = indtau + *n;
+/* WORK(INDE:INDE+N-1) stores the off-diagonal entries of the */
+/* tridiagonal matrix from SSYTRD. */
+ inde = indd + *n;
+/* WORK(INDDD:INDDD+N-1) is a copy of the diagonal entries over */
+/* -written by SSTEMR (the SSTERF path copies the diagonal to W). */
+ inddd = inde + *n;
+/* WORK(INDEE:INDEE+N-1) is a copy of the off-diagonal entries over */
+/* -written while computing the eigenvalues in SSTERF and SSTEMR. */
+ indee = inddd + *n;
+/* INDWK is the starting offset of the left-over workspace, and */
+/* LLWORK is the remaining workspace size. */
+ indwk = indee + *n;
+ llwork = *lwork - indwk + 1;
+/* IWORK(INDIBL:INDIBL+M-1) corresponds to IBLOCK in SSTEBZ and */
+/* stores the block indices of each of the M<=N eigenvalues. */
+ indibl = 1;
+/* IWORK(INDISP:INDISP+NSPLIT-1) corresponds to ISPLIT in SSTEBZ and */
+/* stores the starting and finishing indices of each block. */
+ indisp = indibl + *n;
+/* IWORK(INDIFL:INDIFL+N-1) stores the indices of eigenvectors */
+/* that corresponding to eigenvectors that fail to converge in */
+/* SSTEIN. This information is discarded; if any fail, the driver */
+/* returns INFO > 0. */
+ indifl = indisp + *n;
+/* INDIWO is the offset of the remaining integer workspace. */
+ indiwo = indisp + *n;
+
+/* Call SSYTRD to reduce symmetric matrix to tridiagonal form. */
+
+ ssytrd_(uplo, n, &a[a_offset], lda, &work[indd], &work[inde], &work[
+ indtau], &work[indwk], &llwork, &iinfo);
+
+/* If all eigenvalues are desired */
+/* then call SSTERF or SSTEMR and SORMTR. */
+
+ test = FALSE_;
+ if (indeig) {
+ if (*il == 1 && *iu == *n) {
+ test = TRUE_;
+ }
+ }
+ if ((alleig || test) && ieeeok == 1) {
+ if (! wantz) {
+ scopy_(n, &work[indd], &c__1, &w[1], &c__1);
+ i__1 = *n - 1;
+ scopy_(&i__1, &work[inde], &c__1, &work[indee], &c__1);
+ ssterf_(n, &w[1], &work[indee], info);
+ } else {
+ i__1 = *n - 1;
+ scopy_(&i__1, &work[inde], &c__1, &work[indee], &c__1);
+ scopy_(n, &work[indd], &c__1, &work[inddd], &c__1);
+
+ if (*abstol <= *n * 2.f * eps) {
+ tryrac = TRUE_;
+ } else {
+ tryrac = FALSE_;
+ }
+ sstemr_(jobz, "A", n, &work[inddd], &work[indee], vl, vu, il, iu,
+ m, &w[1], &z__[z_offset], ldz, n, &isuppz[1], &tryrac, &
+ work[indwk], lwork, &iwork[1], liwork, info);
+
+
+
+/* Apply orthogonal matrix used in reduction to tridiagonal */
+/* form to eigenvectors returned by SSTEIN. */
+
+ if (wantz && *info == 0) {
+ indwkn = inde;
+ llwrkn = *lwork - indwkn + 1;
+ sormtr_("L", uplo, "N", n, m, &a[a_offset], lda, &work[indtau]
+, &z__[z_offset], ldz, &work[indwkn], &llwrkn, &iinfo);
+ }
+ }
+
+
+ if (*info == 0) {
+/* Everything worked. Skip SSTEBZ/SSTEIN. IWORK(:) are */
+/* undefined. */
+ *m = *n;
+ goto L30;
+ }
+ *info = 0;
+ }
+
+/* Otherwise, call SSTEBZ and, if eigenvectors are desired, SSTEIN. */
+/* Also call SSTEBZ and SSTEIN if SSTEMR fails. */
+
+ if (wantz) {
+ *(unsigned char *)order = 'B';
+ } else {
+ *(unsigned char *)order = 'E';
+ }
+ sstebz_(range, order, n, &vll, &vuu, il, iu, &abstll, &work[indd], &work[
+ inde], m, &nsplit, &w[1], &iwork[indibl], &iwork[indisp], &work[
+ indwk], &iwork[indiwo], info);
+
+ if (wantz) {
+ sstein_(n, &work[indd], &work[inde], m, &w[1], &iwork[indibl], &iwork[
+ indisp], &z__[z_offset], ldz, &work[indwk], &iwork[indiwo], &
+ iwork[indifl], info);
+
+/* Apply orthogonal matrix used in reduction to tridiagonal */
+/* form to eigenvectors returned by SSTEIN. */
+
+ indwkn = inde;
+ llwrkn = *lwork - indwkn + 1;
+ sormtr_("L", uplo, "N", n, m, &a[a_offset], lda, &work[indtau], &z__[
+ z_offset], ldz, &work[indwkn], &llwrkn, &iinfo);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+/* Jump here if SSTEMR/SSTEIN succeeded. */
+L30:
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *m;
+ } else {
+ imax = *info - 1;
+ }
+ r__1 = 1.f / sigma;
+ sscal_(&imax, &r__1, &w[1], &c__1);
+ }
+
+/* If eigenvalues are not in order, then sort them, along with */
+/* eigenvectors. Note: We do not sort the IFAIL portion of IWORK. */
+/* It may not be initialized (if SSTEMR/SSTEIN succeeded), and we do */
+/* not return this detailed information to the user. */
+
+ if (wantz) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp1 = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp1) {
+ i__ = jj;
+ tmp1 = w[jj];
+ }
+/* L40: */
+ }
+
+ if (i__ != 0) {
+ w[i__] = w[j];
+ w[j] = tmp1;
+ sswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1],
+ &c__1);
+ }
+/* L50: */
+ }
+ }
+
+/* Set WORK(1) to optimal workspace size. */
+
+ work[1] = (real) lwkopt;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of SSYEVR */
+
+} /* ssyevr_ */
diff --git a/contrib/libs/clapack/ssyevx.c b/contrib/libs/clapack/ssyevx.c
new file mode 100644
index 0000000000..8b6679c7d1
--- /dev/null
+++ b/contrib/libs/clapack/ssyevx.c
@@ -0,0 +1,531 @@
+/* ssyevx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int ssyevx_(char *jobz, char *range, char *uplo, integer *n,
+ real *a, integer *lda, real *vl, real *vu, integer *il, integer *iu,
+ real *abstol, integer *m, real *w, real *z__, integer *ldz, real *
+ work, integer *lwork, integer *iwork, integer *ifail, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, z_dim1, z_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, nb, jj;
+ real eps, vll, vuu, tmp1;
+ integer indd, inde;
+ real anrm;
+ integer imax;
+ real rmin, rmax;
+ logical test;
+ integer itmp1, indee;
+ real sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ char order[1];
+ logical lower;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), sswap_(integer *, real *, integer *, real *, integer *
+);
+ logical wantz, alleig, indeig;
+ integer iscale, indibl;
+ logical valeig;
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real abstll, bignum;
+ integer indtau, indisp, indiwo, indwkn;
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *);
+ integer indwrk, lwkmin;
+ extern /* Subroutine */ int sstein_(integer *, real *, real *, integer *,
+ real *, integer *, integer *, real *, integer *, real *, integer *
+, integer *, integer *), ssterf_(integer *, real *, real *,
+ integer *);
+ integer llwrkn, llwork, nsplit;
+ real smlnum;
+ extern doublereal slansy_(char *, char *, integer *, real *, integer *,
+ real *);
+ extern /* Subroutine */ int sstebz_(char *, char *, integer *, real *,
+ real *, integer *, integer *, real *, real *, real *, integer *,
+ integer *, real *, integer *, integer *, real *, integer *,
+ integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int sorgtr_(char *, integer *, real *, integer *,
+ real *, real *, integer *, integer *), ssteqr_(char *,
+ integer *, real *, real *, real *, integer *, real *, integer *), sormtr_(char *, char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, integer *, real *, integer *,
+ integer *), ssytrd_(char *, integer *,
+ real *, integer *, real *, real *, real *, real *, integer *,
+ integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYEVX computes selected eigenvalues and, optionally, eigenvectors */
+/* of a real symmetric matrix A. Eigenvalues and eigenvectors can be */
+/* selected by specifying either a range of values or a range of indices */
+/* for the desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA, N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+/* On exit, the lower triangle (if UPLO='L') or the upper */
+/* triangle (if UPLO='U') of A, including the diagonal, is */
+/* destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* VL (input) REAL */
+/* VU (input) REAL */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) REAL */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing A to tridiagonal form. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*SLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*SLAMCH('S'). */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices */
+/* with Guaranteed High Relative Accuracy," by Demmel and */
+/* Kahan, LAPACK Working Note #3. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) REAL array, dimension (N) */
+/* On normal exit, the first M elements contain the selected */
+/* eigenvalues in ascending order. */
+
+/* Z (output) REAL array, dimension (LDZ, max(1,M)) */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If an eigenvector fails to converge, then that column of Z */
+/* contains the latest approximation to the eigenvector, and the */
+/* index of the eigenvector is returned in IFAIL. */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= 1, when N <= 1; */
+/* otherwise 8*N. */
+/* For optimal efficiency, LWORK >= (NB+3)*N, */
+/* where NB is the max of the blocksize for SSYTRD and SORMTR */
+/* returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+
+/* IFAIL (output) INTEGER array, dimension (N) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvectors that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, then i eigenvectors failed to converge. */
+/* Their indices are stored in array IFAIL. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ lower = lsame_(uplo, "L");
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+ lquery = *lwork == -1;
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -8;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -9;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -10;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -15;
+ }
+ }
+
+ if (*info == 0) {
+ if (*n <= 1) {
+ lwkmin = 1;
+ work[1] = (real) lwkmin;
+ } else {
+ lwkmin = *n << 3;
+ nb = ilaenv_(&c__1, "SSYTRD", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__1, "SORMTR", uplo, n, &c_n1, &c_n1,
+ &c_n1);
+ nb = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = lwkmin, i__2 = (nb + 3) * *n;
+ lwkopt = max(i__1,i__2);
+ work[1] = (real) lwkopt;
+ }
+
+ if (*lwork < lwkmin && ! lquery) {
+ *info = -17;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSYEVX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (alleig || indeig) {
+ *m = 1;
+ w[1] = a[a_dim1 + 1];
+ } else {
+ if (*vl < a[a_dim1 + 1] && *vu >= a[a_dim1 + 1]) {
+ *m = 1;
+ w[1] = a[a_dim1 + 1];
+ }
+ }
+ if (wantz) {
+ z__[z_dim1 + 1] = 1.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+/* Computing MIN */
+ r__1 = sqrt(bignum), r__2 = 1.f / sqrt(sqrt(safmin));
+ rmax = dmin(r__1,r__2);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ iscale = 0;
+ abstll = *abstol;
+ if (valeig) {
+ vll = *vl;
+ vuu = *vu;
+ }
+ anrm = slansy_("M", uplo, n, &a[a_offset], lda, &work[1]);
+ if (anrm > 0.f && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ if (lower) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j + 1;
+ sscal_(&i__2, &sigma, &a[j + j * a_dim1], &c__1);
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sscal_(&j, &sigma, &a[j * a_dim1 + 1], &c__1);
+/* L20: */
+ }
+ }
+ if (*abstol > 0.f) {
+ abstll = *abstol * sigma;
+ }
+ if (valeig) {
+ vll = *vl * sigma;
+ vuu = *vu * sigma;
+ }
+ }
+
+/* Call SSYTRD to reduce symmetric matrix to tridiagonal form. */
+
+ indtau = 1;
+ inde = indtau + *n;
+ indd = inde + *n;
+ indwrk = indd + *n;
+ llwork = *lwork - indwrk + 1;
+ ssytrd_(uplo, n, &a[a_offset], lda, &work[indd], &work[inde], &work[
+ indtau], &work[indwrk], &llwork, &iinfo);
+
+/* If all eigenvalues are desired and ABSTOL is less than or equal to */
+/* zero, then call SSTERF or SORGTR and SSTEQR. If this fails for */
+/* some eigenvalue, then try SSTEBZ. */
+
+ test = FALSE_;
+ if (indeig) {
+ if (*il == 1 && *iu == *n) {
+ test = TRUE_;
+ }
+ }
+ if ((alleig || test) && *abstol <= 0.f) {
+ scopy_(n, &work[indd], &c__1, &w[1], &c__1);
+ indee = indwrk + (*n << 1);
+ if (! wantz) {
+ i__1 = *n - 1;
+ scopy_(&i__1, &work[inde], &c__1, &work[indee], &c__1);
+ ssterf_(n, &w[1], &work[indee], info);
+ } else {
+ slacpy_("A", n, n, &a[a_offset], lda, &z__[z_offset], ldz);
+ sorgtr_(uplo, n, &z__[z_offset], ldz, &work[indtau], &work[indwrk]
+, &llwork, &iinfo);
+ i__1 = *n - 1;
+ scopy_(&i__1, &work[inde], &c__1, &work[indee], &c__1);
+ ssteqr_(jobz, n, &w[1], &work[indee], &z__[z_offset], ldz, &work[
+ indwrk], info);
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ifail[i__] = 0;
+/* L30: */
+ }
+ }
+ }
+ if (*info == 0) {
+ *m = *n;
+ goto L40;
+ }
+ *info = 0;
+ }
+
+/* Otherwise, call SSTEBZ and, if eigenvectors are desired, SSTEIN. */
+
+ if (wantz) {
+ *(unsigned char *)order = 'B';
+ } else {
+ *(unsigned char *)order = 'E';
+ }
+ indibl = 1;
+ indisp = indibl + *n;
+ indiwo = indisp + *n;
+ sstebz_(range, order, n, &vll, &vuu, il, iu, &abstll, &work[indd], &work[
+ inde], m, &nsplit, &w[1], &iwork[indibl], &iwork[indisp], &work[
+ indwrk], &iwork[indiwo], info);
+
+ if (wantz) {
+ sstein_(n, &work[indd], &work[inde], m, &w[1], &iwork[indibl], &iwork[
+ indisp], &z__[z_offset], ldz, &work[indwrk], &iwork[indiwo], &
+ ifail[1], info);
+
+/* Apply orthogonal matrix used in reduction to tridiagonal */
+/* form to eigenvectors returned by SSTEIN. */
+
+ indwkn = inde;
+ llwrkn = *lwork - indwkn + 1;
+ sormtr_("L", uplo, "N", n, m, &a[a_offset], lda, &work[indtau], &z__[
+ z_offset], ldz, &work[indwkn], &llwrkn, &iinfo);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+L40:
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *m;
+ } else {
+ imax = *info - 1;
+ }
+ r__1 = 1.f / sigma;
+ sscal_(&imax, &r__1, &w[1], &c__1);
+ }
+
+/* If eigenvalues are not in order, then sort them, along with */
+/* eigenvectors. */
+
+ if (wantz) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp1 = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp1) {
+ i__ = jj;
+ tmp1 = w[jj];
+ }
+/* L50: */
+ }
+
+ if (i__ != 0) {
+ itmp1 = iwork[indibl + i__ - 1];
+ w[i__] = w[j];
+ iwork[indibl + i__ - 1] = iwork[indibl + j - 1];
+ w[j] = tmp1;
+ iwork[indibl + j - 1] = itmp1;
+ sswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1],
+ &c__1);
+ if (*info != 0) {
+ itmp1 = ifail[i__];
+ ifail[i__] = ifail[j];
+ ifail[j] = itmp1;
+ }
+ }
+/* L60: */
+ }
+ }
+
+/* Set WORK(1) to optimal workspace size. */
+
+ work[1] = (real) lwkopt;
+
+ return 0;
+
+/* End of SSYEVX */
+
+} /* ssyevx_ */
diff --git a/contrib/libs/clapack/ssygs2.c b/contrib/libs/clapack/ssygs2.c
new file mode 100644
index 0000000000..bb683e1944
--- /dev/null
+++ b/contrib/libs/clapack/ssygs2.c
@@ -0,0 +1,296 @@
+/* ssygs2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b6 = -1.f;
+static integer c__1 = 1;
+static real c_b27 = 1.f;
+
+/* Subroutine */ int ssygs2_(integer *itype, char *uplo, integer *n, real *a,
+ integer *lda, real *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+ real r__1;
+
+ /* Local variables */
+ integer k;
+ real ct, akk, bkk;
+ extern /* Subroutine */ int ssyr2_(char *, integer *, real *, real *,
+ integer *, real *, integer *, real *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ logical upper;
+ extern /* Subroutine */ int saxpy_(integer *, real *, real *, integer *,
+ real *, integer *), strmv_(char *, char *, char *, integer *,
+ real *, integer *, real *, integer *),
+ strsv_(char *, char *, char *, integer *, real *, integer *, real
+ *, integer *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYGS2 reduces a real symmetric-definite generalized eigenproblem */
+/* to standard form. */
+
+/* If ITYPE = 1, the problem is A*x = lambda*B*x, */
+/* and A is overwritten by inv(U')*A*inv(U) or inv(L)*A*inv(L') */
+
+/* If ITYPE = 2 or 3, the problem is A*B*x = lambda*x or */
+/* B*A*x = lambda*x, and A is overwritten by U*A*U` or L'*A*L. */
+
+/* B must have been previously factorized as U'*U or L*L' by SPOTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* = 1: compute inv(U')*A*inv(U) or inv(L)*A*inv(L'); */
+/* = 2 or 3: compute U*A*U' or L'*A*L. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored, and how B has been factorized. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n by n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the transformed matrix, stored in the */
+/* same format as A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input) REAL array, dimension (LDB,N) */
+/* The triangular factor from the Cholesky factorization of B, */
+/* as returned by SPOTRF. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSYGS2", &i__1);
+ return 0;
+ }
+
+ if (*itype == 1) {
+ if (upper) {
+
+/* Compute inv(U')*A*inv(U) */
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+
+/* Update the upper triangle of A(k:n,k:n) */
+
+ akk = a[k + k * a_dim1];
+ bkk = b[k + k * b_dim1];
+/* Computing 2nd power */
+ r__1 = bkk;
+ akk /= r__1 * r__1;
+ a[k + k * a_dim1] = akk;
+ if (k < *n) {
+ i__2 = *n - k;
+ r__1 = 1.f / bkk;
+ sscal_(&i__2, &r__1, &a[k + (k + 1) * a_dim1], lda);
+ ct = akk * -.5f;
+ i__2 = *n - k;
+ saxpy_(&i__2, &ct, &b[k + (k + 1) * b_dim1], ldb, &a[k + (
+ k + 1) * a_dim1], lda);
+ i__2 = *n - k;
+ ssyr2_(uplo, &i__2, &c_b6, &a[k + (k + 1) * a_dim1], lda,
+ &b[k + (k + 1) * b_dim1], ldb, &a[k + 1 + (k + 1)
+ * a_dim1], lda);
+ i__2 = *n - k;
+ saxpy_(&i__2, &ct, &b[k + (k + 1) * b_dim1], ldb, &a[k + (
+ k + 1) * a_dim1], lda);
+ i__2 = *n - k;
+ strsv_(uplo, "Transpose", "Non-unit", &i__2, &b[k + 1 + (
+ k + 1) * b_dim1], ldb, &a[k + (k + 1) * a_dim1],
+ lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute inv(L)*A*inv(L') */
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+
+/* Update the lower triangle of A(k:n,k:n) */
+
+ akk = a[k + k * a_dim1];
+ bkk = b[k + k * b_dim1];
+/* Computing 2nd power */
+ r__1 = bkk;
+ akk /= r__1 * r__1;
+ a[k + k * a_dim1] = akk;
+ if (k < *n) {
+ i__2 = *n - k;
+ r__1 = 1.f / bkk;
+ sscal_(&i__2, &r__1, &a[k + 1 + k * a_dim1], &c__1);
+ ct = akk * -.5f;
+ i__2 = *n - k;
+ saxpy_(&i__2, &ct, &b[k + 1 + k * b_dim1], &c__1, &a[k +
+ 1 + k * a_dim1], &c__1);
+ i__2 = *n - k;
+ ssyr2_(uplo, &i__2, &c_b6, &a[k + 1 + k * a_dim1], &c__1,
+ &b[k + 1 + k * b_dim1], &c__1, &a[k + 1 + (k + 1)
+ * a_dim1], lda);
+ i__2 = *n - k;
+ saxpy_(&i__2, &ct, &b[k + 1 + k * b_dim1], &c__1, &a[k +
+ 1 + k * a_dim1], &c__1);
+ i__2 = *n - k;
+ strsv_(uplo, "No transpose", "Non-unit", &i__2, &b[k + 1
+ + (k + 1) * b_dim1], ldb, &a[k + 1 + k * a_dim1],
+ &c__1);
+ }
+/* L20: */
+ }
+ }
+ } else {
+ if (upper) {
+
+/* Compute U*A*U' */
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+
+/* Update the upper triangle of A(1:k,1:k) */
+
+ akk = a[k + k * a_dim1];
+ bkk = b[k + k * b_dim1];
+ i__2 = k - 1;
+ strmv_(uplo, "No transpose", "Non-unit", &i__2, &b[b_offset],
+ ldb, &a[k * a_dim1 + 1], &c__1);
+ ct = akk * .5f;
+ i__2 = k - 1;
+ saxpy_(&i__2, &ct, &b[k * b_dim1 + 1], &c__1, &a[k * a_dim1 +
+ 1], &c__1);
+ i__2 = k - 1;
+ ssyr2_(uplo, &i__2, &c_b27, &a[k * a_dim1 + 1], &c__1, &b[k *
+ b_dim1 + 1], &c__1, &a[a_offset], lda);
+ i__2 = k - 1;
+ saxpy_(&i__2, &ct, &b[k * b_dim1 + 1], &c__1, &a[k * a_dim1 +
+ 1], &c__1);
+ i__2 = k - 1;
+ sscal_(&i__2, &bkk, &a[k * a_dim1 + 1], &c__1);
+/* Computing 2nd power */
+ r__1 = bkk;
+ a[k + k * a_dim1] = akk * (r__1 * r__1);
+/* L30: */
+ }
+ } else {
+
+/* Compute L'*A*L */
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+
+/* Update the lower triangle of A(1:k,1:k) */
+
+ akk = a[k + k * a_dim1];
+ bkk = b[k + k * b_dim1];
+ i__2 = k - 1;
+ strmv_(uplo, "Transpose", "Non-unit", &i__2, &b[b_offset],
+ ldb, &a[k + a_dim1], lda);
+ ct = akk * .5f;
+ i__2 = k - 1;
+ saxpy_(&i__2, &ct, &b[k + b_dim1], ldb, &a[k + a_dim1], lda);
+ i__2 = k - 1;
+ ssyr2_(uplo, &i__2, &c_b27, &a[k + a_dim1], lda, &b[k +
+ b_dim1], ldb, &a[a_offset], lda);
+ i__2 = k - 1;
+ saxpy_(&i__2, &ct, &b[k + b_dim1], ldb, &a[k + a_dim1], lda);
+ i__2 = k - 1;
+ sscal_(&i__2, &bkk, &a[k + a_dim1], lda);
+/* Computing 2nd power */
+ r__1 = bkk;
+ a[k + k * a_dim1] = akk * (r__1 * r__1);
+/* L40: */
+ }
+ }
+ }
+ return 0;
+
+/* End of SSYGS2 */
+
+} /* ssygs2_ */
diff --git a/contrib/libs/clapack/ssygst.c b/contrib/libs/clapack/ssygst.c
new file mode 100644
index 0000000000..4dffa59771
--- /dev/null
+++ b/contrib/libs/clapack/ssygst.c
@@ -0,0 +1,342 @@
+/* ssygst.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static real c_b14 = 1.f;
+static real c_b16 = -.5f;
+static real c_b19 = -1.f;
+static real c_b52 = .5f;
+
+/* Subroutine */ int ssygst_(integer *itype, char *uplo, integer *n, real *a,
+ integer *lda, real *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer k, kb, nb;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int strmm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+), ssymm_(char *, char *, integer
+ *, integer *, real *, real *, integer *, real *, integer *, real *
+, real *, integer *), strsm_(char *, char *, char
+ *, char *, integer *, integer *, real *, real *, integer *, real *
+, integer *), ssygs2_(integer *,
+ char *, integer *, real *, integer *, real *, integer *, integer *
+), ssyr2k_(char *, char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYGST reduces a real symmetric-definite generalized eigenproblem */
+/* to standard form. */
+
+/* If ITYPE = 1, the problem is A*x = lambda*B*x, */
+/* and A is overwritten by inv(U**T)*A*inv(U) or inv(L)*A*inv(L**T) */
+
+/* If ITYPE = 2 or 3, the problem is A*B*x = lambda*x or */
+/* B*A*x = lambda*x, and A is overwritten by U*A*U**T or L**T*A*L. */
+
+/* B must have been previously factorized as U**T*U or L*L**T by SPOTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* = 1: compute inv(U**T)*A*inv(U) or inv(L)*A*inv(L**T); */
+/* = 2 or 3: compute U*A*U**T or L**T*A*L. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored and B is factored as */
+/* U**T*U; */
+/* = 'L': Lower triangle of A is stored and B is factored as */
+/* L*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the transformed matrix, stored in the */
+/* same format as A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input) REAL array, dimension (LDB,N) */
+/* The triangular factor from the Cholesky factorization of B, */
+/* as returned by SPOTRF. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSYGST", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "SSYGST", uplo, n, &c_n1, &c_n1, &c_n1);
+
+ if (nb <= 1 || nb >= *n) {
+
+/* Use unblocked code */
+
+ ssygs2_(itype, uplo, n, &a[a_offset], lda, &b[b_offset], ldb, info);
+ } else {
+
+/* Use blocked code */
+
+ if (*itype == 1) {
+ if (upper) {
+
+/* Compute inv(U')*A*inv(U) */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (k = 1; i__2 < 0 ? k >= i__1 : k <= i__1; k += i__2) {
+/* Computing MIN */
+ i__3 = *n - k + 1;
+ kb = min(i__3,nb);
+
+/* Update the upper triangle of A(k:n,k:n) */
+
+ ssygs2_(itype, uplo, &kb, &a[k + k * a_dim1], lda, &b[k +
+ k * b_dim1], ldb, info);
+ if (k + kb <= *n) {
+ i__3 = *n - k - kb + 1;
+ strsm_("Left", uplo, "Transpose", "Non-unit", &kb, &
+ i__3, &c_b14, &b[k + k * b_dim1], ldb, &a[k +
+ (k + kb) * a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ ssymm_("Left", uplo, &kb, &i__3, &c_b16, &a[k + k *
+ a_dim1], lda, &b[k + (k + kb) * b_dim1], ldb,
+ &c_b14, &a[k + (k + kb) * a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ ssyr2k_(uplo, "Transpose", &i__3, &kb, &c_b19, &a[k +
+ (k + kb) * a_dim1], lda, &b[k + (k + kb) *
+ b_dim1], ldb, &c_b14, &a[k + kb + (k + kb) *
+ a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ ssymm_("Left", uplo, &kb, &i__3, &c_b16, &a[k + k *
+ a_dim1], lda, &b[k + (k + kb) * b_dim1], ldb,
+ &c_b14, &a[k + (k + kb) * a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ strsm_("Right", uplo, "No transpose", "Non-unit", &kb,
+ &i__3, &c_b14, &b[k + kb + (k + kb) * b_dim1]
+, ldb, &a[k + (k + kb) * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute inv(L)*A*inv(L') */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (k = 1; i__1 < 0 ? k >= i__2 : k <= i__2; k += i__1) {
+/* Computing MIN */
+ i__3 = *n - k + 1;
+ kb = min(i__3,nb);
+
+/* Update the lower triangle of A(k:n,k:n) */
+
+ ssygs2_(itype, uplo, &kb, &a[k + k * a_dim1], lda, &b[k +
+ k * b_dim1], ldb, info);
+ if (k + kb <= *n) {
+ i__3 = *n - k - kb + 1;
+ strsm_("Right", uplo, "Transpose", "Non-unit", &i__3,
+ &kb, &c_b14, &b[k + k * b_dim1], ldb, &a[k +
+ kb + k * a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ ssymm_("Right", uplo, &i__3, &kb, &c_b16, &a[k + k *
+ a_dim1], lda, &b[k + kb + k * b_dim1], ldb, &
+ c_b14, &a[k + kb + k * a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ ssyr2k_(uplo, "No transpose", &i__3, &kb, &c_b19, &a[
+ k + kb + k * a_dim1], lda, &b[k + kb + k *
+ b_dim1], ldb, &c_b14, &a[k + kb + (k + kb) *
+ a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ ssymm_("Right", uplo, &i__3, &kb, &c_b16, &a[k + k *
+ a_dim1], lda, &b[k + kb + k * b_dim1], ldb, &
+ c_b14, &a[k + kb + k * a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ strsm_("Left", uplo, "No transpose", "Non-unit", &
+ i__3, &kb, &c_b14, &b[k + kb + (k + kb) *
+ b_dim1], ldb, &a[k + kb + k * a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+ } else {
+ if (upper) {
+
+/* Compute U*A*U' */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (k = 1; i__2 < 0 ? k >= i__1 : k <= i__1; k += i__2) {
+/* Computing MIN */
+ i__3 = *n - k + 1;
+ kb = min(i__3,nb);
+
+/* Update the upper triangle of A(1:k+kb-1,1:k+kb-1) */
+
+ i__3 = k - 1;
+ strmm_("Left", uplo, "No transpose", "Non-unit", &i__3, &
+ kb, &c_b14, &b[b_offset], ldb, &a[k * a_dim1 + 1],
+ lda)
+ ;
+ i__3 = k - 1;
+ ssymm_("Right", uplo, &i__3, &kb, &c_b52, &a[k + k *
+ a_dim1], lda, &b[k * b_dim1 + 1], ldb, &c_b14, &a[
+ k * a_dim1 + 1], lda);
+ i__3 = k - 1;
+ ssyr2k_(uplo, "No transpose", &i__3, &kb, &c_b14, &a[k *
+ a_dim1 + 1], lda, &b[k * b_dim1 + 1], ldb, &c_b14,
+ &a[a_offset], lda);
+ i__3 = k - 1;
+ ssymm_("Right", uplo, &i__3, &kb, &c_b52, &a[k + k *
+ a_dim1], lda, &b[k * b_dim1 + 1], ldb, &c_b14, &a[
+ k * a_dim1 + 1], lda);
+ i__3 = k - 1;
+ strmm_("Right", uplo, "Transpose", "Non-unit", &i__3, &kb,
+ &c_b14, &b[k + k * b_dim1], ldb, &a[k * a_dim1 +
+ 1], lda);
+ ssygs2_(itype, uplo, &kb, &a[k + k * a_dim1], lda, &b[k +
+ k * b_dim1], ldb, info);
+/* L30: */
+ }
+ } else {
+
+/* Compute L'*A*L */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (k = 1; i__1 < 0 ? k >= i__2 : k <= i__2; k += i__1) {
+/* Computing MIN */
+ i__3 = *n - k + 1;
+ kb = min(i__3,nb);
+
+/* Update the lower triangle of A(1:k+kb-1,1:k+kb-1) */
+
+ i__3 = k - 1;
+ strmm_("Right", uplo, "No transpose", "Non-unit", &kb, &
+ i__3, &c_b14, &b[b_offset], ldb, &a[k + a_dim1],
+ lda);
+ i__3 = k - 1;
+ ssymm_("Left", uplo, &kb, &i__3, &c_b52, &a[k + k *
+ a_dim1], lda, &b[k + b_dim1], ldb, &c_b14, &a[k +
+ a_dim1], lda);
+ i__3 = k - 1;
+ ssyr2k_(uplo, "Transpose", &i__3, &kb, &c_b14, &a[k +
+ a_dim1], lda, &b[k + b_dim1], ldb, &c_b14, &a[
+ a_offset], lda);
+ i__3 = k - 1;
+ ssymm_("Left", uplo, &kb, &i__3, &c_b52, &a[k + k *
+ a_dim1], lda, &b[k + b_dim1], ldb, &c_b14, &a[k +
+ a_dim1], lda);
+ i__3 = k - 1;
+ strmm_("Left", uplo, "Transpose", "Non-unit", &kb, &i__3,
+ &c_b14, &b[k + k * b_dim1], ldb, &a[k + a_dim1],
+ lda);
+ ssygs2_(itype, uplo, &kb, &a[k + k * a_dim1], lda, &b[k +
+ k * b_dim1], ldb, info);
+/* L40: */
+ }
+ }
+ }
+ }
+ return 0;
+
+/* End of SSYGST */
+
+} /* ssygst_ */
diff --git a/contrib/libs/clapack/ssygv.c b/contrib/libs/clapack/ssygv.c
new file mode 100644
index 0000000000..b27e8d56fb
--- /dev/null
+++ b/contrib/libs/clapack/ssygv.c
@@ -0,0 +1,283 @@
+/* ssygv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static real c_b16 = 1.f;
+
+/* Subroutine */ int ssygv_(integer *itype, char *jobz, char *uplo, integer *
+ n, real *a, integer *lda, real *b, integer *ldb, real *w, real *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+
+ /* Local variables */
+ integer nb, neig;
+ extern logical lsame_(char *, char *);
+ char trans[1];
+ logical upper;
+ extern /* Subroutine */ int strmm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+);
+ logical wantz;
+ extern /* Subroutine */ int strsm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+), ssyev_(char *, char *, integer
+ *, real *, integer *, real *, real *, integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer lwkmin;
+ extern /* Subroutine */ int spotrf_(char *, integer *, real *, integer *,
+ integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int ssygst_(integer *, char *, integer *, real *,
+ integer *, real *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYGV computes all the eigenvalues, and optionally, the eigenvectors */
+/* of a real generalized symmetric-definite eigenproblem, of the form */
+/* A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x. */
+/* Here A and B are assumed to be symmetric and B is also */
+/* positive definite. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* Specifies the problem type to be solved: */
+/* = 1: A*x = (lambda)*B*x */
+/* = 2: A*B*x = (lambda)*x */
+/* = 3: B*A*x = (lambda)*x */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA, N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+
+/* On exit, if JOBZ = 'V', then if INFO = 0, A contains the */
+/* matrix Z of eigenvectors. The eigenvectors are normalized */
+/* as follows: */
+/* if ITYPE = 1 or 2, Z**T*B*Z = I; */
+/* if ITYPE = 3, Z**T*inv(B)*Z = I. */
+/* If JOBZ = 'N', then on exit the upper triangle (if UPLO='U') */
+/* or the lower triangle (if UPLO='L') of A, including the */
+/* diagonal, is destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) REAL array, dimension (LDB, N) */
+/* On entry, the symmetric positive definite matrix B. */
+/* If UPLO = 'U', the leading N-by-N upper triangular part of B */
+/* contains the upper triangular part of the matrix B. */
+/* If UPLO = 'L', the leading N-by-N lower triangular part of B */
+/* contains the lower triangular part of the matrix B. */
+
+/* On exit, if INFO <= N, the part of B containing the matrix is */
+/* overwritten by the triangular factor U or L from the Cholesky */
+/* factorization B = U**T*U or B = L*L**T. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= max(1,3*N-1). */
+/* For optimal efficiency, LWORK >= (NB+2)*N, */
+/* where NB is the blocksize for SSYTRD returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: SPOTRF or SSYEV returned an error code: */
+/* <= N: if INFO = i, SSYEV failed to converge; */
+/* i off-diagonal elements of an intermediate */
+/* tridiagonal form did not converge to zero; */
+/* > N: if INFO = N + i, for 1 <= i <= N, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --w;
+ --work;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+
+ *info = 0;
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -2;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+
+ if (*info == 0) {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * 3 - 1;
+ lwkmin = max(i__1,i__2);
+ nb = ilaenv_(&c__1, "SSYTRD", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = lwkmin, i__2 = (nb + 2) * *n;
+ lwkopt = max(i__1,i__2);
+ work[1] = (real) lwkopt;
+
+ if (*lwork < lwkmin && ! lquery) {
+ *info = -11;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSYGV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a Cholesky factorization of B. */
+
+ spotrf_(uplo, n, &b[b_offset], ldb, info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem and solve. */
+
+ ssygst_(itype, uplo, n, &a[a_offset], lda, &b[b_offset], ldb, info);
+ ssyev_(jobz, uplo, n, &a[a_offset], lda, &w[1], &work[1], lwork, info);
+
+ if (wantz) {
+
+/* Backtransform eigenvectors to the original problem. */
+
+ neig = *n;
+ if (*info > 0) {
+ neig = *info - 1;
+ }
+ if (*itype == 1 || *itype == 2) {
+
+/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x; */
+/* backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'N';
+ } else {
+ *(unsigned char *)trans = 'T';
+ }
+
+ strsm_("Left", uplo, trans, "Non-unit", n, &neig, &c_b16, &b[
+ b_offset], ldb, &a[a_offset], lda);
+
+ } else if (*itype == 3) {
+
+/* For B*A*x=(lambda)*x; */
+/* backtransform eigenvectors: x = L*y or U'*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'T';
+ } else {
+ *(unsigned char *)trans = 'N';
+ }
+
+ strmm_("Left", uplo, trans, "Non-unit", n, &neig, &c_b16, &b[
+ b_offset], ldb, &a[a_offset], lda);
+ }
+ }
+
+ work[1] = (real) lwkopt;
+ return 0;
+
+/* End of SSYGV */
+
+} /* ssygv_ */
diff --git a/contrib/libs/clapack/ssygvd.c b/contrib/libs/clapack/ssygvd.c
new file mode 100644
index 0000000000..fb1a2d097d
--- /dev/null
+++ b/contrib/libs/clapack/ssygvd.c
@@ -0,0 +1,337 @@
+/* ssygvd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b11 = 1.f;
+
+/* Subroutine */ int ssygvd_(integer *itype, char *jobz, char *uplo, integer *
+ n, real *a, integer *lda, real *b, integer *ldb, real *w, real *work,
+ integer *lwork, integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+ real r__1, r__2;
+
+ /* Local variables */
+ integer lopt;
+ extern logical lsame_(char *, char *);
+ integer lwmin;
+ char trans[1];
+ integer liopt;
+ logical upper;
+ extern /* Subroutine */ int strmm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+);
+ logical wantz;
+ extern /* Subroutine */ int strsm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+), xerbla_(char *, integer *);
+ integer liwmin;
+ extern /* Subroutine */ int spotrf_(char *, integer *, real *, integer *,
+ integer *), ssyevd_(char *, char *, integer *, real *,
+ integer *, real *, real *, integer *, integer *, integer *,
+ integer *);
+ logical lquery;
+ extern /* Subroutine */ int ssygst_(integer *, char *, integer *, real *,
+ integer *, real *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYGVD computes all the eigenvalues, and optionally, the eigenvectors */
+/* of a real generalized symmetric-definite eigenproblem, of the form */
+/* A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x. Here A and */
+/* B are assumed to be symmetric and B is also positive definite. */
+/* If eigenvectors are desired, it uses a divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* Specifies the problem type to be solved: */
+/* = 1: A*x = (lambda)*B*x */
+/* = 2: A*B*x = (lambda)*x */
+/* = 3: B*A*x = (lambda)*x */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA, N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+
+/* On exit, if JOBZ = 'V', then if INFO = 0, A contains the */
+/* matrix Z of eigenvectors. The eigenvectors are normalized */
+/* as follows: */
+/* if ITYPE = 1 or 2, Z**T*B*Z = I; */
+/* if ITYPE = 3, Z**T*inv(B)*Z = I. */
+/* If JOBZ = 'N', then on exit the upper triangle (if UPLO='U') */
+/* or the lower triangle (if UPLO='L') of A, including the */
+/* diagonal, is destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) REAL array, dimension (LDB, N) */
+/* On entry, the symmetric matrix B. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of B contains the */
+/* upper triangular part of the matrix B. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of B contains */
+/* the lower triangular part of the matrix B. */
+
+/* On exit, if INFO <= N, the part of B containing the matrix is */
+/* overwritten by the triangular factor U or L from the Cholesky */
+/* factorization B = U**T*U or B = L*L**T. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* W (output) REAL array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If N <= 1, LWORK >= 1. */
+/* If JOBZ = 'N' and N > 1, LWORK >= 2*N+1. */
+/* If JOBZ = 'V' and N > 1, LWORK >= 1 + 6*N + 2*N**2. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal sizes of the WORK and IWORK */
+/* arrays, returns these values as the first entries of the WORK */
+/* and IWORK arrays, and no error message related to LWORK or */
+/* LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* If N <= 1, LIWORK >= 1. */
+/* If JOBZ = 'N' and N > 1, LIWORK >= 1. */
+/* If JOBZ = 'V' and N > 1, LIWORK >= 3 + 5*N. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK and IWORK arrays, and no error message related to */
+/* LWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: SPOTRF or SSYEVD returned an error code: */
+/* <= N: if INFO = i and JOBZ = 'N', then the algorithm */
+/* failed to converge; i off-diagonal elements of an */
+/* intermediate tridiagonal form did not converge to */
+/* zero; */
+/* if INFO = i and JOBZ = 'V', then the algorithm */
+/* failed to compute an eigenvalue while working on */
+/* the submatrix lying in rows and columns INFO/(N+1) */
+/* through mod(INFO,N+1); */
+/* > N: if INFO = N + i, for 1 <= i <= N, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* Modified so that no backsubstitution is performed if SSYEVD fails to */
+/* converge (NEIG in old code could be greater than N causing out of */
+/* bounds reference to A - reported by Ralf Meyer). Also corrected the */
+/* description of INFO and the test on ITYPE. Sven, 16 Feb 05. */
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --w;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1 || *liwork == -1;
+
+ *info = 0;
+ if (*n <= 1) {
+ liwmin = 1;
+ lwmin = 1;
+ } else if (wantz) {
+ liwmin = *n * 5 + 3;
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = *n * 6 + 1 + (i__1 * i__1 << 1);
+ } else {
+ liwmin = 1;
+ lwmin = (*n << 1) + 1;
+ }
+ lopt = lwmin;
+ liopt = liwmin;
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -2;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+
+ if (*info == 0) {
+ work[1] = (real) lopt;
+ iwork[1] = liopt;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -11;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSYGVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a Cholesky factorization of B. */
+
+ spotrf_(uplo, n, &b[b_offset], ldb, info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem and solve. */
+
+ ssygst_(itype, uplo, n, &a[a_offset], lda, &b[b_offset], ldb, info);
+ ssyevd_(jobz, uplo, n, &a[a_offset], lda, &w[1], &work[1], lwork, &iwork[
+ 1], liwork, info);
+/* Computing MAX */
+ r__1 = (real) lopt;
+ lopt = dmax(r__1,work[1]);
+/* Computing MAX */
+ r__1 = (real) liopt, r__2 = (real) iwork[1];
+ liopt = dmax(r__1,r__2);
+
+ if (wantz && *info == 0) {
+
+/* Backtransform eigenvectors to the original problem. */
+
+ if (*itype == 1 || *itype == 2) {
+
+/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x; */
+/* backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'N';
+ } else {
+ *(unsigned char *)trans = 'T';
+ }
+
+ strsm_("Left", uplo, trans, "Non-unit", n, n, &c_b11, &b[b_offset]
+, ldb, &a[a_offset], lda);
+
+ } else if (*itype == 3) {
+
+/* For B*A*x=(lambda)*x; */
+/* backtransform eigenvectors: x = L*y or U'*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'T';
+ } else {
+ *(unsigned char *)trans = 'N';
+ }
+
+ strmm_("Left", uplo, trans, "Non-unit", n, n, &c_b11, &b[b_offset]
+, ldb, &a[a_offset], lda);
+ }
+ }
+
+ work[1] = (real) lopt;
+ iwork[1] = liopt;
+
+ return 0;
+
+/* End of SSYGVD */
+
+} /* ssygvd_ */
diff --git a/contrib/libs/clapack/ssygvx.c b/contrib/libs/clapack/ssygvx.c
new file mode 100644
index 0000000000..28c8c3a3f6
--- /dev/null
+++ b/contrib/libs/clapack/ssygvx.c
@@ -0,0 +1,395 @@
+/* ssygvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static real c_b19 = 1.f;
+
+/* Subroutine */ int ssygvx_(integer *itype, char *jobz, char *range, char *
+ uplo, integer *n, real *a, integer *lda, real *b, integer *ldb, real *
+ vl, real *vu, integer *il, integer *iu, real *abstol, integer *m,
+ real *w, real *z__, integer *ldz, real *work, integer *lwork, integer
+ *iwork, integer *ifail, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, z_dim1, z_offset, i__1, i__2;
+
+ /* Local variables */
+ integer nb;
+ extern logical lsame_(char *, char *);
+ char trans[1];
+ logical upper;
+ extern /* Subroutine */ int strmm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+);
+ logical wantz;
+ extern /* Subroutine */ int strsm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+);
+ logical alleig, indeig, valeig;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer lwkmin;
+ extern /* Subroutine */ int spotrf_(char *, integer *, real *, integer *,
+ integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int ssygst_(integer *, char *, integer *, real *,
+ integer *, real *, integer *, integer *), ssyevx_(char *,
+ char *, char *, integer *, real *, integer *, real *, real *,
+ integer *, integer *, real *, integer *, real *, real *, integer *
+, real *, integer *, integer *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYGVX computes selected eigenvalues, and optionally, eigenvectors */
+/* of a real generalized symmetric-definite eigenproblem, of the form */
+/* A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x. Here A */
+/* and B are assumed to be symmetric and B is also positive definite. */
+/* Eigenvalues and eigenvectors can be selected by specifying either a */
+/* range of values or a range of indices for the desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* Specifies the problem type to be solved: */
+/* = 1: A*x = (lambda)*B*x */
+/* = 2: A*B*x = (lambda)*x */
+/* = 3: B*A*x = (lambda)*x */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A and B are stored; */
+/* = 'L': Lower triangle of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix pencil (A,B). N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA, N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+
+/* On exit, the lower triangle (if UPLO='L') or the upper */
+/* triangle (if UPLO='U') of A, including the diagonal, is */
+/* destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) REAL array, dimension (LDA, N) */
+/* On entry, the symmetric matrix B. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of B contains the */
+/* upper triangular part of the matrix B. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of B contains */
+/* the lower triangular part of the matrix B. */
+
+/* On exit, if INFO <= N, the part of B containing the matrix is */
+/* overwritten by the triangular factor U or L from the Cholesky */
+/* factorization B = U**T*U or B = L*L**T. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* VL (input) REAL */
+/* VU (input) REAL */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) REAL */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing A to tridiagonal form. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*DLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*SLAMCH('S'). */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) REAL array, dimension (N) */
+/* On normal exit, the first M elements contain the selected */
+/* eigenvalues in ascending order. */
+
+/* Z (output) REAL array, dimension (LDZ, max(1,M)) */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* The eigenvectors are normalized as follows: */
+/* if ITYPE = 1 or 2, Z**T*B*Z = I; */
+/* if ITYPE = 3, Z**T*inv(B)*Z = I. */
+
+/* If an eigenvector fails to converge, then that column of Z */
+/* contains the latest approximation to the eigenvector, and the */
+/* index of the eigenvector is returned in IFAIL. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= max(1,8*N). */
+/* For optimal efficiency, LWORK >= (NB+3)*N, */
+/* where NB is the blocksize for SSYTRD returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+
+/* IFAIL (output) INTEGER array, dimension (N) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvectors that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: SPOTRF or SSYEVX returned an error code: */
+/* <= N: if INFO = i, SSYEVX failed to converge; */
+/* i eigenvectors failed to converge. Their indices */
+/* are stored in array IFAIL. */
+/* > N: if INFO = N + i, for 1 <= i <= N, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ upper = lsame_(uplo, "U");
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+ lquery = *lwork == -1;
+
+ *info = 0;
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -2;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -3;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -11;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -12;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -13;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -18;
+ }
+ }
+
+ if (*info == 0) {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n << 3;
+ lwkmin = max(i__1,i__2);
+ nb = ilaenv_(&c__1, "SSYTRD", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = lwkmin, i__2 = (nb + 3) * *n;
+ lwkopt = max(i__1,i__2);
+ work[1] = (real) lwkopt;
+
+ if (*lwork < lwkmin && ! lquery) {
+ *info = -20;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSYGVX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a Cholesky factorization of B. */
+
+ spotrf_(uplo, n, &b[b_offset], ldb, info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem and solve. */
+
+ ssygst_(itype, uplo, n, &a[a_offset], lda, &b[b_offset], ldb, info);
+ ssyevx_(jobz, range, uplo, n, &a[a_offset], lda, vl, vu, il, iu, abstol,
+ m, &w[1], &z__[z_offset], ldz, &work[1], lwork, &iwork[1], &ifail[
+ 1], info);
+
+ if (wantz) {
+
+/* Backtransform eigenvectors to the original problem. */
+
+ if (*info > 0) {
+ *m = *info - 1;
+ }
+ if (*itype == 1 || *itype == 2) {
+
+/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x; */
+/* backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'N';
+ } else {
+ *(unsigned char *)trans = 'T';
+ }
+
+ strsm_("Left", uplo, trans, "Non-unit", n, m, &c_b19, &b[b_offset]
+, ldb, &z__[z_offset], ldz);
+
+ } else if (*itype == 3) {
+
+/* For B*A*x=(lambda)*x; */
+/* backtransform eigenvectors: x = L*y or U'*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'T';
+ } else {
+ *(unsigned char *)trans = 'N';
+ }
+
+ strmm_("Left", uplo, trans, "Non-unit", n, m, &c_b19, &b[b_offset]
+, ldb, &z__[z_offset], ldz);
+ }
+ }
+
+/* Set WORK(1) to optimal workspace size. */
+
+ work[1] = (real) lwkopt;
+
+ return 0;
+
+/* End of SSYGVX */
+
+} /* ssygvx_ */
diff --git a/contrib/libs/clapack/ssyrfs.c b/contrib/libs/clapack/ssyrfs.c
new file mode 100644
index 0000000000..09a9585fbf
--- /dev/null
+++ b/contrib/libs/clapack/ssyrfs.c
@@ -0,0 +1,427 @@
+/* ssyrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b12 = -1.f;
+static real c_b14 = 1.f;
+
+/* Subroutine */ int ssyrfs_(char *uplo, integer *n, integer *nrhs, real *a,
+ integer *lda, real *af, integer *ldaf, integer *ipiv, real *b,
+ integer *ldb, real *x, integer *ldx, real *ferr, real *berr, real *
+ work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2, i__3;
+ real r__1, r__2, r__3;
+
+ /* Local variables */
+ integer i__, j, k;
+ real s, xk;
+ integer nz;
+ real eps;
+ integer kase;
+ real safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3], count;
+ logical upper;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), saxpy_(integer *, real *, real *, integer *, real *,
+ integer *), ssymv_(char *, integer *, real *, real *, integer *,
+ real *, integer *, real *, real *, integer *), slacn2_(
+ integer *, real *, real *, integer *, real *, integer *, integer *
+);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real lstres;
+ extern /* Subroutine */ int ssytrs_(char *, integer *, integer *, real *,
+ integer *, integer *, real *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call SLACN2 in place of SLACON, 7 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is symmetric indefinite, and */
+/* provides error bounds and backward error estimates for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The symmetric matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of A contains the upper triangular part */
+/* of the matrix A, and the strictly lower triangular part of A */
+/* is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of A contains the lower triangular part of */
+/* the matrix A, and the strictly upper triangular part of A is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input) REAL array, dimension (LDAF,N) */
+/* The factored form of the matrix A. AF contains the block */
+/* diagonal matrix D and the multipliers used to obtain the */
+/* factor U or L from the factorization A = U*D*U**T or */
+/* A = L*D*L**T as computed by SSYTRF. */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by SSYTRF. */
+
+/* B (input) REAL array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) REAL array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by SSYTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldx < max(1,*n)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSYRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.f;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X */
+
+ scopy_(n, &b[j * b_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+ ssymv_(uplo, n, &c_b12, &a[a_offset], lda, &x[j * x_dim1 + 1], &c__1,
+ &c_b14, &work[*n + 1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] = (r__1 = b[i__ + j * b_dim1], dabs(r__1));
+/* L30: */
+ }
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ xk = (r__1 = x[k + j * x_dim1], dabs(r__1));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ work[i__] += (r__1 = a[i__ + k * a_dim1], dabs(r__1)) *
+ xk;
+ s += (r__1 = a[i__ + k * a_dim1], dabs(r__1)) * (r__2 = x[
+ i__ + j * x_dim1], dabs(r__2));
+/* L40: */
+ }
+ work[k] = work[k] + (r__1 = a[k + k * a_dim1], dabs(r__1)) *
+ xk + s;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ xk = (r__1 = x[k + j * x_dim1], dabs(r__1));
+ work[k] += (r__1 = a[k + k * a_dim1], dabs(r__1)) * xk;
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ work[i__] += (r__1 = a[i__ + k * a_dim1], dabs(r__1)) *
+ xk;
+ s += (r__1 = a[i__ + k * a_dim1], dabs(r__1)) * (r__2 = x[
+ i__ + j * x_dim1], dabs(r__2));
+/* L60: */
+ }
+ work[k] += s;
+/* L70: */
+ }
+ }
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+/* Computing MAX */
+ r__2 = s, r__3 = (r__1 = work[*n + i__], dabs(r__1)) / work[
+ i__];
+ s = dmax(r__2,r__3);
+ } else {
+/* Computing MAX */
+ r__2 = s, r__3 = ((r__1 = work[*n + i__], dabs(r__1)) + safe1)
+ / (work[i__] + safe1);
+ s = dmax(r__2,r__3);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2.f <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ ssytrs_(uplo, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &work[*n
+ + 1], n, info);
+ saxpy_(n, &c_b14, &work[*n + 1], &c__1, &x[j * x_dim1 + 1], &c__1)
+ ;
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use SLACN2 to estimate the infinity-norm of the matrix */
+/* inv(A) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+ work[i__] = (r__1 = work[*n + i__], dabs(r__1)) + nz * eps *
+ work[i__];
+ } else {
+ work[i__] = (r__1 = work[*n + i__], dabs(r__1)) + nz * eps *
+ work[i__] + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ slacn2_(n, &work[(*n << 1) + 1], &work[*n + 1], &iwork[1], &ferr[j], &
+ kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(A'). */
+
+ ssytrs_(uplo, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &work[
+ *n + 1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L110: */
+ }
+ } else if (kase == 2) {
+
+/* Multiply by inv(A)*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L120: */
+ }
+ ssytrs_(uplo, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &work[
+ *n + 1], n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = lstres, r__3 = (r__1 = x[i__ + j * x_dim1], dabs(r__1));
+ lstres = dmax(r__2,r__3);
+/* L130: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of SSYRFS */
+
+} /* ssyrfs_ */
diff --git a/contrib/libs/clapack/ssysv.c b/contrib/libs/clapack/ssysv.c
new file mode 100644
index 0000000000..15b4ef7ce3
--- /dev/null
+++ b/contrib/libs/clapack/ssysv.c
@@ -0,0 +1,214 @@
+/* ssysv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int ssysv_(char *uplo, integer *n, integer *nrhs, real *a,
+ integer *lda, integer *ipiv, real *b, integer *ldb, real *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ integer nb;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int ssytrf_(char *, integer *, real *, integer *,
+ integer *, real *, integer *, integer *), ssytrs_(char *,
+ integer *, integer *, real *, integer *, integer *, real *,
+ integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYSV computes the solution to a real system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N symmetric matrix and X and B are N-by-NRHS */
+/* matrices. */
+
+/* The diagonal pivoting method is used to factor A as */
+/* A = U * D * U**T, if UPLO = 'U', or */
+/* A = L * D * L**T, if UPLO = 'L', */
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, and D is symmetric and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. The factored form of A is then */
+/* used to solve the system of equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the block diagonal matrix D and the */
+/* multipliers used to obtain the factor U or L from the */
+/* factorization A = U*D*U**T or A = L*D*L**T as computed by */
+/* SSYTRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D, as */
+/* determined by SSYTRF. If IPIV(k) > 0, then rows and columns */
+/* k and IPIV(k) were interchanged, and D(k,k) is a 1-by-1 */
+/* diagonal block. If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, */
+/* then rows and columns k-1 and -IPIV(k) were interchanged and */
+/* D(k-1:k,k-1:k) is a 2-by-2 diagonal block. If UPLO = 'L' and */
+/* IPIV(k) = IPIV(k+1) < 0, then rows and columns k+1 and */
+/* -IPIV(k) were interchanged and D(k:k+1,k:k+1) is a 2-by-2 */
+/* diagonal block. */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of WORK. LWORK >= 1, and for best performance */
+/* LWORK >= max(1,N*NB), where NB is the optimal blocksize for */
+/* SSYTRF. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, so the solution could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*lwork < 1 && ! lquery) {
+ *info = -10;
+ }
+
+ if (*info == 0) {
+ if (*n == 0) {
+ lwkopt = 1;
+ } else {
+ nb = ilaenv_(&c__1, "SSYTRF", uplo, n, &c_n1, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ }
+ work[1] = (real) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSYSV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Compute the factorization A = U*D*U' or A = L*D*L'. */
+
+ ssytrf_(uplo, n, &a[a_offset], lda, &ipiv[1], &work[1], lwork, info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ ssytrs_(uplo, n, nrhs, &a[a_offset], lda, &ipiv[1], &b[b_offset], ldb,
+ info);
+
+ }
+
+ work[1] = (real) lwkopt;
+
+ return 0;
+
+/* End of SSYSV */
+
+} /* ssysv_ */
diff --git a/contrib/libs/clapack/ssysvx.c b/contrib/libs/clapack/ssysvx.c
new file mode 100644
index 0000000000..df5e8014a7
--- /dev/null
+++ b/contrib/libs/clapack/ssysvx.c
@@ -0,0 +1,368 @@
+/* ssysvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int ssysvx_(char *fact, char *uplo, integer *n, integer *
+ nrhs, real *a, integer *lda, real *af, integer *ldaf, integer *ipiv,
+ real *b, integer *ldb, real *x, integer *ldx, real *rcond, real *ferr,
+ real *berr, real *work, integer *lwork, integer *iwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2;
+
+ /* Local variables */
+ integer nb;
+ extern logical lsame_(char *, char *);
+ real anorm;
+ extern doublereal slamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *);
+ extern doublereal slansy_(char *, char *, integer *, real *, integer *,
+ real *);
+ extern /* Subroutine */ int ssycon_(char *, integer *, real *, integer *,
+ integer *, real *, real *, real *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int ssyrfs_(char *, integer *, integer *, real *,
+ integer *, real *, integer *, integer *, real *, integer *, real *
+, integer *, real *, real *, real *, integer *, integer *)
+ , ssytrf_(char *, integer *, real *, integer *, integer *, real *,
+ integer *, integer *), ssytrs_(char *, integer *,
+ integer *, real *, integer *, integer *, real *, integer *,
+ integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYSVX uses the diagonal pivoting factorization to compute the */
+/* solution to a real system of linear equations A * X = B, */
+/* where A is an N-by-N symmetric matrix and X and B are N-by-NRHS */
+/* matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'N', the diagonal pivoting method is used to factor A. */
+/* The form of the factorization is */
+/* A = U * D * U**T, if UPLO = 'U', or */
+/* A = L * D * L**T, if UPLO = 'L', */
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, and D is symmetric and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* 2. If some D(i,i)=0, so that D is exactly singular, then the routine */
+/* returns with INFO = i. Otherwise, the factored form of A is used */
+/* to estimate the condition number of the matrix A. If the */
+/* reciprocal of the condition number is less than machine precision, */
+/* INFO = N+1 is returned as a warning, but the routine still goes on */
+/* to solve for X and compute error bounds as described below. */
+
+/* 3. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 4. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of A has been */
+/* supplied on entry. */
+/* = 'F': On entry, AF and IPIV contain the factored form of */
+/* A. AF and IPIV will not be modified. */
+/* = 'N': The matrix A will be copied to AF and factored. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The symmetric matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of A contains the upper triangular part */
+/* of the matrix A, and the strictly lower triangular part of A */
+/* is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of A contains the lower triangular part of */
+/* the matrix A, and the strictly upper triangular part of A is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input or output) REAL array, dimension (LDAF,N) */
+/* If FACT = 'F', then AF is an input argument and on entry */
+/* contains the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**T or A = L*D*L**T as computed by SSYTRF. */
+
+/* If FACT = 'N', then AF is an output argument and on exit */
+/* returns the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**T or A = L*D*L**T. */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* IPIV (input or output) INTEGER array, dimension (N) */
+/* If FACT = 'F', then IPIV is an input argument and on entry */
+/* contains details of the interchanges and the block structure */
+/* of D, as determined by SSYTRF. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* If FACT = 'N', then IPIV is an output argument and on exit */
+/* contains details of the interchanges and the block structure */
+/* of D, as determined by SSYTRF. */
+
+/* B (input) REAL array, dimension (LDB,NRHS) */
+/* The N-by-NRHS right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) REAL array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) REAL */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A. If RCOND is less than the machine precision (in */
+/* particular, if RCOND = 0), the matrix is singular to working */
+/* precision. This condition is indicated by a return code of */
+/* INFO > 0. */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of WORK. LWORK >= max(1,3*N), and for best */
+/* performance, when FACT = 'N', LWORK >= max(1,3*N,N*NB), where */
+/* NB is the optimal blocksize for SSYTRF. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: D(i,i) is exactly zero. The factorization */
+/* has been completed but the factor D is exactly */
+/* singular, so the solution and error bounds could */
+/* not be computed. RCOND = 0 is returned. */
+/* = N+1: D is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ lquery = *lwork == -1;
+ if (! nofact && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! lsame_(uplo, "U") && ! lsame_(uplo,
+ "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -8;
+ } else if (*ldb < max(1,*n)) {
+ *info = -11;
+ } else if (*ldx < max(1,*n)) {
+ *info = -13;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * 3;
+ if (*lwork < max(i__1,i__2) && ! lquery) {
+ *info = -18;
+ }
+ }
+
+ if (*info == 0) {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * 3;
+ lwkopt = max(i__1,i__2);
+ if (nofact) {
+ nb = ilaenv_(&c__1, "SSYTRF", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = *n * nb;
+ lwkopt = max(i__1,i__2);
+ }
+ work[1] = (real) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSYSVX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+ if (nofact) {
+
+/* Compute the factorization A = U*D*U' or A = L*D*L'. */
+
+ slacpy_(uplo, n, n, &a[a_offset], lda, &af[af_offset], ldaf);
+ ssytrf_(uplo, n, &af[af_offset], ldaf, &ipiv[1], &work[1], lwork,
+ info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.f;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = slansy_("I", uplo, n, &a[a_offset], lda, &work[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ ssycon_(uplo, n, &af[af_offset], ldaf, &ipiv[1], &anorm, rcond, &work[1],
+ &iwork[1], info);
+
+/* Compute the solution vectors X. */
+
+ slacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ ssytrs_(uplo, n, nrhs, &af[af_offset], ldaf, &ipiv[1], &x[x_offset], ldx,
+ info);
+
+/* Use iterative refinement to improve the computed solutions and */
+/* compute error bounds and backward error estimates for them. */
+
+ ssyrfs_(uplo, n, nrhs, &a[a_offset], lda, &af[af_offset], ldaf, &ipiv[1],
+ &b[b_offset], ldb, &x[x_offset], ldx, &ferr[1], &berr[1], &work[1]
+, &iwork[1], info);
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < slamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ work[1] = (real) lwkopt;
+
+ return 0;
+
+/* End of SSYSVX */
+
+} /* ssysvx_ */
diff --git a/contrib/libs/clapack/ssytd2.c b/contrib/libs/clapack/ssytd2.c
new file mode 100644
index 0000000000..f2026353f6
--- /dev/null
+++ b/contrib/libs/clapack/ssytd2.c
@@ -0,0 +1,302 @@
+/* ssytd2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b8 = 0.f;
+static real c_b14 = -1.f;
+
+/* Subroutine */ int ssytd2_(char *uplo, integer *n, real *a, integer *lda,
+ real *d__, real *e, real *tau, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__;
+ real taui;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ extern /* Subroutine */ int ssyr2_(char *, integer *, real *, real *,
+ integer *, real *, integer *, real *, integer *);
+ real alpha;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int saxpy_(integer *, real *, real *, integer *,
+ real *, integer *), ssymv_(char *, integer *, real *, real *,
+ integer *, real *, integer *, real *, real *, integer *),
+ xerbla_(char *, integer *), slarfg_(integer *, real *,
+ real *, integer *, real *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYTD2 reduces a real symmetric matrix A to symmetric tridiagonal */
+/* form T by an orthogonal similarity transformation: Q' * A * Q = T. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n-by-n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n-by-n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+/* On exit, if UPLO = 'U', the diagonal and first superdiagonal */
+/* of A are overwritten by the corresponding elements of the */
+/* tridiagonal matrix T, and the elements above the first */
+/* superdiagonal, with the array TAU, represent the orthogonal */
+/* matrix Q as a product of elementary reflectors; if UPLO */
+/* = 'L', the diagonal and first subdiagonal of A are over- */
+/* written by the corresponding elements of the tridiagonal */
+/* matrix T, and the elements below the first subdiagonal, with */
+/* the array TAU, represent the orthogonal matrix Q as a product */
+/* of elementary reflectors. See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* D (output) REAL array, dimension (N) */
+/* The diagonal elements of the tridiagonal matrix T: */
+/* D(i) = A(i,i). */
+
+/* E (output) REAL array, dimension (N-1) */
+/* The off-diagonal elements of the tridiagonal matrix T: */
+/* E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'. */
+
+/* TAU (output) REAL array, dimension (N-1) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* If UPLO = 'U', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(n-1) . . . H(2) H(1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in */
+/* A(1:i-1,i+1), and tau in TAU(i). */
+
+/* If UPLO = 'L', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(1) H(2) . . . H(n-1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i), */
+/* and tau in TAU(i). */
+
+/* The contents of A on exit are illustrated by the following examples */
+/* with n = 5: */
+
+/* if UPLO = 'U': if UPLO = 'L': */
+
+/* ( d e v2 v3 v4 ) ( d ) */
+/* ( d e v3 v4 ) ( e d ) */
+/* ( d e v4 ) ( v1 e d ) */
+/* ( d e ) ( v1 v2 e d ) */
+/* ( d ) ( v1 v2 v3 e d ) */
+
+/* where d and e denote diagonal and off-diagonal elements of T, and vi */
+/* denotes an element of the vector defining H(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tau;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSYTD2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Reduce the upper triangle of A */
+
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+
+/* Generate elementary reflector H(i) = I - tau * v * v' */
+/* to annihilate A(1:i-1,i+1) */
+
+ slarfg_(&i__, &a[i__ + (i__ + 1) * a_dim1], &a[(i__ + 1) * a_dim1
+ + 1], &c__1, &taui);
+ e[i__] = a[i__ + (i__ + 1) * a_dim1];
+
+ if (taui != 0.f) {
+
+/* Apply H(i) from both sides to A(1:i,1:i) */
+
+ a[i__ + (i__ + 1) * a_dim1] = 1.f;
+
+/* Compute x := tau * A * v storing x in TAU(1:i) */
+
+ ssymv_(uplo, &i__, &taui, &a[a_offset], lda, &a[(i__ + 1) *
+ a_dim1 + 1], &c__1, &c_b8, &tau[1], &c__1);
+
+/* Compute w := x - 1/2 * tau * (x'*v) * v */
+
+ alpha = taui * -.5f * sdot_(&i__, &tau[1], &c__1, &a[(i__ + 1)
+ * a_dim1 + 1], &c__1);
+ saxpy_(&i__, &alpha, &a[(i__ + 1) * a_dim1 + 1], &c__1, &tau[
+ 1], &c__1);
+
+/* Apply the transformation as a rank-2 update: */
+/* A := A - v * w' - w * v' */
+
+ ssyr2_(uplo, &i__, &c_b14, &a[(i__ + 1) * a_dim1 + 1], &c__1,
+ &tau[1], &c__1, &a[a_offset], lda);
+
+ a[i__ + (i__ + 1) * a_dim1] = e[i__];
+ }
+ d__[i__ + 1] = a[i__ + 1 + (i__ + 1) * a_dim1];
+ tau[i__] = taui;
+/* L10: */
+ }
+ d__[1] = a[a_dim1 + 1];
+ } else {
+
+/* Reduce the lower triangle of A */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) = I - tau * v * v' */
+/* to annihilate A(i+2:n,i) */
+
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ slarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3, *n)+ i__ *
+ a_dim1], &c__1, &taui);
+ e[i__] = a[i__ + 1 + i__ * a_dim1];
+
+ if (taui != 0.f) {
+
+/* Apply H(i) from both sides to A(i+1:n,i+1:n) */
+
+ a[i__ + 1 + i__ * a_dim1] = 1.f;
+
+/* Compute x := tau * A * v storing y in TAU(i:n-1) */
+
+ i__2 = *n - i__;
+ ssymv_(uplo, &i__2, &taui, &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b8, &tau[
+ i__], &c__1);
+
+/* Compute w := x - 1/2 * tau * (x'*v) * v */
+
+ i__2 = *n - i__;
+ alpha = taui * -.5f * sdot_(&i__2, &tau[i__], &c__1, &a[i__ +
+ 1 + i__ * a_dim1], &c__1);
+ i__2 = *n - i__;
+ saxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
+ i__], &c__1);
+
+/* Apply the transformation as a rank-2 update: */
+/* A := A - v * w' - w * v' */
+
+ i__2 = *n - i__;
+ ssyr2_(uplo, &i__2, &c_b14, &a[i__ + 1 + i__ * a_dim1], &c__1,
+ &tau[i__], &c__1, &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda);
+
+ a[i__ + 1 + i__ * a_dim1] = e[i__];
+ }
+ d__[i__] = a[i__ + i__ * a_dim1];
+ tau[i__] = taui;
+/* L20: */
+ }
+ d__[*n] = a[*n + *n * a_dim1];
+ }
+
+ return 0;
+
+/* End of SSYTD2 */
+
+} /* ssytd2_ */
diff --git a/contrib/libs/clapack/ssytf2.c b/contrib/libs/clapack/ssytf2.c
new file mode 100644
index 0000000000..245137d00e
--- /dev/null
+++ b/contrib/libs/clapack/ssytf2.c
@@ -0,0 +1,608 @@
+/* ssytf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ssytf2_(char *uplo, integer *n, real *a, integer *lda,
+ integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k;
+ real t, r1, d11, d12, d21, d22;
+ integer kk, kp;
+ real wk, wkm1, wkp1;
+ integer imax, jmax;
+ extern /* Subroutine */ int ssyr_(char *, integer *, real *, real *,
+ integer *, real *, integer *);
+ real alpha;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ integer kstep;
+ logical upper;
+ extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
+ integer *);
+ real absakk;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ real colmax;
+ extern logical sisnan_(real *);
+ real rowmax;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYTF2 computes the factorization of a real symmetric matrix A using */
+/* the Bunch-Kaufman diagonal pivoting method: */
+
+/* A = U*D*U' or A = L*D*L' */
+
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, U' is the transpose of U, and D is symmetric and */
+/* block diagonal with 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* This is the unblocked version of the algorithm, calling Level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n-by-n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n-by-n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L (see below for further details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, D(k,k) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, and division by zero will occur if it */
+/* is used to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* 09-29-06 - patch from */
+/* Bobby Cheng, MathWorks */
+
+/* Replace l.204 and l.372 */
+/* IF( MAX( ABSAKK, COLMAX ).EQ.ZERO ) THEN */
+/* by */
+/* IF( (MAX( ABSAKK, COLMAX ).EQ.ZERO) .OR. SISNAN(ABSAKK) ) THEN */
+
+/* 01-01-96 - Based on modifications by */
+/* J. Lewis, Boeing Computer Services Company */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+/* 1-96 - Based on modifications by J. Lewis, Boeing Computer Services */
+/* Company */
+
+/* If UPLO = 'U', then A = U*D*U', where */
+/* U = P(n)*U(n)* ... *P(k)U(k)* ..., */
+/* i.e., U is a product of terms P(k)*U(k), where k decreases from n to */
+/* 1 in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and U(k) is a unit upper triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I v 0 ) k-s */
+/* U(k) = ( 0 I 0 ) s */
+/* ( 0 0 I ) n-k */
+/* k-s s n-k */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(1:k-1,k). */
+/* If s = 2, the upper triangle of D(k) overwrites A(k-1,k-1), A(k-1,k), */
+/* and A(k,k), and v overwrites A(1:k-2,k-1:k). */
+
+/* If UPLO = 'L', then A = L*D*L', where */
+/* L = P(1)*L(1)* ... *P(k)*L(k)* ..., */
+/* i.e., L is a product of terms P(k)*L(k), where k increases from 1 to */
+/* n in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and L(k) is a unit lower triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I 0 0 ) k-1 */
+/* L(k) = ( 0 I 0 ) s */
+/* ( 0 v I ) n-k-s+1 */
+/* k-1 s n-k-s+1 */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(k+1:n,k). */
+/* If s = 2, the lower triangle of D(k) overwrites A(k,k), A(k+1,k), */
+/* and A(k+1,k+1), and v overwrites A(k+2:n,k:k+1). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSYTF2", &i__1);
+ return 0;
+ }
+
+/* Initialize ALPHA for use in choosing pivot block size. */
+
+ alpha = (sqrt(17.f) + 1.f) / 8.f;
+
+ if (upper) {
+
+/* Factorize A as U*D*U' using the upper triangle of A */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2 */
+
+ k = *n;
+L10:
+
+/* If K < 1, exit from loop */
+
+ if (k < 1) {
+ goto L70;
+ }
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ absakk = (r__1 = a[k + k * a_dim1], dabs(r__1));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ imax = isamax_(&i__1, &a[k * a_dim1 + 1], &c__1);
+ colmax = (r__1 = a[imax + k * a_dim1], dabs(r__1));
+ } else {
+ colmax = 0.f;
+ }
+
+ if (dmax(absakk,colmax) == 0.f || sisnan_(&absakk)) {
+
+/* Column K is zero or contains a NaN: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ i__1 = k - imax;
+ jmax = imax + isamax_(&i__1, &a[imax + (imax + 1) * a_dim1],
+ lda);
+ rowmax = (r__1 = a[imax + jmax * a_dim1], dabs(r__1));
+ if (imax > 1) {
+ i__1 = imax - 1;
+ jmax = isamax_(&i__1, &a[imax * a_dim1 + 1], &c__1);
+/* Computing MAX */
+ r__2 = rowmax, r__3 = (r__1 = a[jmax + imax * a_dim1],
+ dabs(r__1));
+ rowmax = dmax(r__2,r__3);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else if ((r__1 = a[imax + imax * a_dim1], dabs(r__1)) >=
+ alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+ } else {
+
+/* interchange rows and columns K-1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+
+ kk = k - kstep + 1;
+ if (kp != kk) {
+
+/* Interchange rows and columns KK and KP in the leading */
+/* submatrix A(1:k,1:k) */
+
+ i__1 = kp - 1;
+ sswap_(&i__1, &a[kk * a_dim1 + 1], &c__1, &a[kp * a_dim1 + 1],
+ &c__1);
+ i__1 = kk - kp - 1;
+ sswap_(&i__1, &a[kp + 1 + kk * a_dim1], &c__1, &a[kp + (kp +
+ 1) * a_dim1], lda);
+ t = a[kk + kk * a_dim1];
+ a[kk + kk * a_dim1] = a[kp + kp * a_dim1];
+ a[kp + kp * a_dim1] = t;
+ if (kstep == 2) {
+ t = a[k - 1 + k * a_dim1];
+ a[k - 1 + k * a_dim1] = a[kp + k * a_dim1];
+ a[kp + k * a_dim1] = t;
+ }
+ }
+
+/* Update the leading submatrix */
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k now holds */
+
+/* W(k) = U(k)*D(k) */
+
+/* where U(k) is the k-th column of U */
+
+/* Perform a rank-1 update of A(1:k-1,1:k-1) as */
+
+/* A := A - U(k)*D(k)*U(k)' = A - W(k)*1/D(k)*W(k)' */
+
+ r1 = 1.f / a[k + k * a_dim1];
+ i__1 = k - 1;
+ r__1 = -r1;
+ ssyr_(uplo, &i__1, &r__1, &a[k * a_dim1 + 1], &c__1, &a[
+ a_offset], lda);
+
+/* Store U(k) in column k */
+
+ i__1 = k - 1;
+ sscal_(&i__1, &r1, &a[k * a_dim1 + 1], &c__1);
+ } else {
+
+/* 2-by-2 pivot block D(k): columns k and k-1 now hold */
+
+/* ( W(k-1) W(k) ) = ( U(k-1) U(k) )*D(k) */
+
+/* where U(k) and U(k-1) are the k-th and (k-1)-th columns */
+/* of U */
+
+/* Perform a rank-2 update of A(1:k-2,1:k-2) as */
+
+/* A := A - ( U(k-1) U(k) )*D(k)*( U(k-1) U(k) )' */
+/* = A - ( W(k-1) W(k) )*inv(D(k))*( W(k-1) W(k) )' */
+
+ if (k > 2) {
+
+ d12 = a[k - 1 + k * a_dim1];
+ d22 = a[k - 1 + (k - 1) * a_dim1] / d12;
+ d11 = a[k + k * a_dim1] / d12;
+ t = 1.f / (d11 * d22 - 1.f);
+ d12 = t / d12;
+
+ for (j = k - 2; j >= 1; --j) {
+ wkm1 = d12 * (d11 * a[j + (k - 1) * a_dim1] - a[j + k
+ * a_dim1]);
+ wk = d12 * (d22 * a[j + k * a_dim1] - a[j + (k - 1) *
+ a_dim1]);
+ for (i__ = j; i__ >= 1; --i__) {
+ a[i__ + j * a_dim1] = a[i__ + j * a_dim1] - a[i__
+ + k * a_dim1] * wk - a[i__ + (k - 1) *
+ a_dim1] * wkm1;
+/* L20: */
+ }
+ a[j + k * a_dim1] = wk;
+ a[j + (k - 1) * a_dim1] = wkm1;
+/* L30: */
+ }
+
+ }
+
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k - 1] = -kp;
+ }
+
+/* Decrease K and return to the start of the main loop */
+
+ k -= kstep;
+ goto L10;
+
+ } else {
+
+/* Factorize A as L*D*L' using the lower triangle of A */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2 */
+
+ k = 1;
+L40:
+
+/* If K > N, exit from loop */
+
+ if (k > *n) {
+ goto L70;
+ }
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ absakk = (r__1 = a[k + k * a_dim1], dabs(r__1));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ imax = k + isamax_(&i__1, &a[k + 1 + k * a_dim1], &c__1);
+ colmax = (r__1 = a[imax + k * a_dim1], dabs(r__1));
+ } else {
+ colmax = 0.f;
+ }
+
+ if (dmax(absakk,colmax) == 0.f || sisnan_(&absakk)) {
+
+/* Column K is zero or contains a NaN: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ i__1 = imax - k;
+ jmax = k - 1 + isamax_(&i__1, &a[imax + k * a_dim1], lda);
+ rowmax = (r__1 = a[imax + jmax * a_dim1], dabs(r__1));
+ if (imax < *n) {
+ i__1 = *n - imax;
+ jmax = imax + isamax_(&i__1, &a[imax + 1 + imax * a_dim1],
+ &c__1);
+/* Computing MAX */
+ r__2 = rowmax, r__3 = (r__1 = a[jmax + imax * a_dim1],
+ dabs(r__1));
+ rowmax = dmax(r__2,r__3);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else if ((r__1 = a[imax + imax * a_dim1], dabs(r__1)) >=
+ alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+ } else {
+
+/* interchange rows and columns K+1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+
+ kk = k + kstep - 1;
+ if (kp != kk) {
+
+/* Interchange rows and columns KK and KP in the trailing */
+/* submatrix A(k:n,k:n) */
+
+ if (kp < *n) {
+ i__1 = *n - kp;
+ sswap_(&i__1, &a[kp + 1 + kk * a_dim1], &c__1, &a[kp + 1
+ + kp * a_dim1], &c__1);
+ }
+ i__1 = kp - kk - 1;
+ sswap_(&i__1, &a[kk + 1 + kk * a_dim1], &c__1, &a[kp + (kk +
+ 1) * a_dim1], lda);
+ t = a[kk + kk * a_dim1];
+ a[kk + kk * a_dim1] = a[kp + kp * a_dim1];
+ a[kp + kp * a_dim1] = t;
+ if (kstep == 2) {
+ t = a[k + 1 + k * a_dim1];
+ a[k + 1 + k * a_dim1] = a[kp + k * a_dim1];
+ a[kp + k * a_dim1] = t;
+ }
+ }
+
+/* Update the trailing submatrix */
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k now holds */
+
+/* W(k) = L(k)*D(k) */
+
+/* where L(k) is the k-th column of L */
+
+ if (k < *n) {
+
+/* Perform a rank-1 update of A(k+1:n,k+1:n) as */
+
+/* A := A - L(k)*D(k)*L(k)' = A - W(k)*(1/D(k))*W(k)' */
+
+ d11 = 1.f / a[k + k * a_dim1];
+ i__1 = *n - k;
+ r__1 = -d11;
+ ssyr_(uplo, &i__1, &r__1, &a[k + 1 + k * a_dim1], &c__1, &
+ a[k + 1 + (k + 1) * a_dim1], lda);
+
+/* Store L(k) in column K */
+
+ i__1 = *n - k;
+ sscal_(&i__1, &d11, &a[k + 1 + k * a_dim1], &c__1);
+ }
+ } else {
+
+/* 2-by-2 pivot block D(k) */
+
+ if (k < *n - 1) {
+
+/* Perform a rank-2 update of A(k+2:n,k+2:n) as */
+
+/* A := A - ( (A(k) A(k+1))*D(k)**(-1) ) * (A(k) A(k+1))' */
+
+/* where L(k) and L(k+1) are the k-th and (k+1)-th */
+/* columns of L */
+
+ d21 = a[k + 1 + k * a_dim1];
+ d11 = a[k + 1 + (k + 1) * a_dim1] / d21;
+ d22 = a[k + k * a_dim1] / d21;
+ t = 1.f / (d11 * d22 - 1.f);
+ d21 = t / d21;
+
+ i__1 = *n;
+ for (j = k + 2; j <= i__1; ++j) {
+
+ wk = d21 * (d11 * a[j + k * a_dim1] - a[j + (k + 1) *
+ a_dim1]);
+ wkp1 = d21 * (d22 * a[j + (k + 1) * a_dim1] - a[j + k
+ * a_dim1]);
+
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + j * a_dim1] - a[i__
+ + k * a_dim1] * wk - a[i__ + (k + 1) *
+ a_dim1] * wkp1;
+/* L50: */
+ }
+
+ a[j + k * a_dim1] = wk;
+ a[j + (k + 1) * a_dim1] = wkp1;
+
+/* L60: */
+ }
+ }
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k + 1] = -kp;
+ }
+
+/* Increase K and return to the start of the main loop */
+
+ k += kstep;
+ goto L40;
+
+ }
+
+L70:
+
+ return 0;
+
+/* End of SSYTF2 */
+
+} /* ssytf2_ */
diff --git a/contrib/libs/clapack/ssytrd.c b/contrib/libs/clapack/ssytrd.c
new file mode 100644
index 0000000000..d4fe7b47ad
--- /dev/null
+++ b/contrib/libs/clapack/ssytrd.c
@@ -0,0 +1,360 @@
+/* ssytrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+static real c_b22 = -1.f;
+static real c_b23 = 1.f;
+
+/* Subroutine */ int ssytrd_(char *uplo, integer *n, real *a, integer *lda,
+ real *d__, real *e, real *tau, real *work, integer *lwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, nb, kk, nx, iws;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ logical upper;
+ extern /* Subroutine */ int ssytd2_(char *, integer *, real *, integer *,
+ real *, real *, real *, integer *), ssyr2k_(char *, char *
+, integer *, integer *, real *, real *, integer *, real *,
+ integer *, real *, real *, integer *), xerbla_(
+ char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int slatrd_(char *, integer *, integer *, real *,
+ integer *, real *, real *, real *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYTRD reduces a real symmetric matrix A to real symmetric */
+/* tridiagonal form T by an orthogonal similarity transformation: */
+/* Q**T * A * Q = T. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+/* On exit, if UPLO = 'U', the diagonal and first superdiagonal */
+/* of A are overwritten by the corresponding elements of the */
+/* tridiagonal matrix T, and the elements above the first */
+/* superdiagonal, with the array TAU, represent the orthogonal */
+/* matrix Q as a product of elementary reflectors; if UPLO */
+/* = 'L', the diagonal and first subdiagonal of A are over- */
+/* written by the corresponding elements of the tridiagonal */
+/* matrix T, and the elements below the first subdiagonal, with */
+/* the array TAU, represent the orthogonal matrix Q as a product */
+/* of elementary reflectors. See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* D (output) REAL array, dimension (N) */
+/* The diagonal elements of the tridiagonal matrix T: */
+/* D(i) = A(i,i). */
+
+/* E (output) REAL array, dimension (N-1) */
+/* The off-diagonal elements of the tridiagonal matrix T: */
+/* E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'. */
+
+/* TAU (output) REAL array, dimension (N-1) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= 1. */
+/* For optimum performance LWORK >= N*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* If UPLO = 'U', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(n-1) . . . H(2) H(1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in */
+/* A(1:i-1,i+1), and tau in TAU(i). */
+
+/* If UPLO = 'L', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(1) H(2) . . . H(n-1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real scalar, and v is a real vector with */
+/* v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i), */
+/* and tau in TAU(i). */
+
+/* The contents of A on exit are illustrated by the following examples */
+/* with n = 5: */
+
+/* if UPLO = 'U': if UPLO = 'L': */
+
+/* ( d e v2 v3 v4 ) ( d ) */
+/* ( d e v3 v4 ) ( e d ) */
+/* ( d e v4 ) ( v1 e d ) */
+/* ( d e ) ( v1 v2 e d ) */
+/* ( d ) ( v1 v2 v3 e d ) */
+
+/* where d and e denote diagonal and off-diagonal elements of T, and vi */
+/* denotes an element of the vector defining H(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*lwork < 1 && ! lquery) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+
+/* Determine the block size. */
+
+ nb = ilaenv_(&c__1, "SSYTRD", uplo, n, &c_n1, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ work[1] = (real) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSYTRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ nx = *n;
+ iws = 1;
+ if (nb > 1 && nb < *n) {
+
+/* Determine when to cross over from blocked to unblocked code */
+/* (last block is always handled by unblocked code). */
+
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__3, "SSYTRD", uplo, n, &c_n1, &c_n1, &
+ c_n1);
+ nx = max(i__1,i__2);
+ if (nx < *n) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: determine the */
+/* minimum value of NB, and reduce NB or force use of */
+/* unblocked code by setting NX = N. */
+
+/* Computing MAX */
+ i__1 = *lwork / ldwork;
+ nb = max(i__1,1);
+ nbmin = ilaenv_(&c__2, "SSYTRD", uplo, n, &c_n1, &c_n1, &c_n1);
+ if (nb < nbmin) {
+ nx = *n;
+ }
+ }
+ } else {
+ nx = *n;
+ }
+ } else {
+ nb = 1;
+ }
+
+ if (upper) {
+
+/* Reduce the upper triangle of A. */
+/* Columns 1:kk are handled by the unblocked method. */
+
+ kk = *n - (*n - nx + nb - 1) / nb * nb;
+ i__1 = kk + 1;
+ i__2 = -nb;
+ for (i__ = *n - nb + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+
+/* Reduce columns i:i+nb-1 to tridiagonal form and form the */
+/* matrix W which is needed to update the unreduced part of */
+/* the matrix */
+
+ i__3 = i__ + nb - 1;
+ slatrd_(uplo, &i__3, &nb, &a[a_offset], lda, &e[1], &tau[1], &
+ work[1], &ldwork);
+
+/* Update the unreduced submatrix A(1:i-1,1:i-1), using an */
+/* update of the form: A := A - V*W' - W*V' */
+
+ i__3 = i__ - 1;
+ ssyr2k_(uplo, "No transpose", &i__3, &nb, &c_b22, &a[i__ * a_dim1
+ + 1], lda, &work[1], &ldwork, &c_b23, &a[a_offset], lda);
+
+/* Copy superdiagonal elements back into A, and diagonal */
+/* elements into D */
+
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ a[j - 1 + j * a_dim1] = e[j - 1];
+ d__[j] = a[j + j * a_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* Use unblocked code to reduce the last or only block */
+
+ ssytd2_(uplo, &kk, &a[a_offset], lda, &d__[1], &e[1], &tau[1], &iinfo);
+ } else {
+
+/* Reduce the lower triangle of A */
+
+ i__2 = *n - nx;
+ i__1 = nb;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+
+/* Reduce columns i:i+nb-1 to tridiagonal form and form the */
+/* matrix W which is needed to update the unreduced part of */
+/* the matrix */
+
+ i__3 = *n - i__ + 1;
+ slatrd_(uplo, &i__3, &nb, &a[i__ + i__ * a_dim1], lda, &e[i__], &
+ tau[i__], &work[1], &ldwork);
+
+/* Update the unreduced submatrix A(i+ib:n,i+ib:n), using */
+/* an update of the form: A := A - V*W' - W*V' */
+
+ i__3 = *n - i__ - nb + 1;
+ ssyr2k_(uplo, "No transpose", &i__3, &nb, &c_b22, &a[i__ + nb +
+ i__ * a_dim1], lda, &work[nb + 1], &ldwork, &c_b23, &a[
+ i__ + nb + (i__ + nb) * a_dim1], lda);
+
+/* Copy subdiagonal elements back into A, and diagonal */
+/* elements into D */
+
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ a[j + 1 + j * a_dim1] = e[j];
+ d__[j] = a[j + j * a_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Use unblocked code to reduce the last or only block */
+
+ i__1 = *n - i__ + 1;
+ ssytd2_(uplo, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__],
+ &tau[i__], &iinfo);
+ }
+
+ work[1] = (real) lwkopt;
+ return 0;
+
+/* End of SSYTRD */
+
+} /* ssytrd_ */
diff --git a/contrib/libs/clapack/ssytrf.c b/contrib/libs/clapack/ssytrf.c
new file mode 100644
index 0000000000..bc70303183
--- /dev/null
+++ b/contrib/libs/clapack/ssytrf.c
@@ -0,0 +1,339 @@
+/* ssytrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+
+/* Subroutine */ int ssytrf_(char *uplo, integer *n, real *a, integer *lda,
+ integer *ipiv, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer j, k, kb, nb, iws;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ logical upper;
+ extern /* Subroutine */ int ssytf2_(char *, integer *, real *, integer *,
+ integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int slasyf_(char *, integer *, integer *, integer
+ *, real *, integer *, integer *, real *, integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYTRF computes the factorization of a real symmetric matrix A using */
+/* the Bunch-Kaufman diagonal pivoting method. The form of the */
+/* factorization is */
+
+/* A = U*D*U**T or A = L*D*L**T */
+
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, and D is symmetric and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* This is the blocked version of the algorithm, calling Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L (see below for further details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of WORK. LWORK >=1. For best performance */
+/* LWORK >= N*NB, where NB is the block size returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, and division by zero will occur if it */
+/* is used to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* If UPLO = 'U', then A = U*D*U', where */
+/* U = P(n)*U(n)* ... *P(k)U(k)* ..., */
+/* i.e., U is a product of terms P(k)*U(k), where k decreases from n to */
+/* 1 in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and U(k) is a unit upper triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I v 0 ) k-s */
+/* U(k) = ( 0 I 0 ) s */
+/* ( 0 0 I ) n-k */
+/* k-s s n-k */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(1:k-1,k). */
+/* If s = 2, the upper triangle of D(k) overwrites A(k-1,k-1), A(k-1,k), */
+/* and A(k,k), and v overwrites A(1:k-2,k-1:k). */
+
+/* If UPLO = 'L', then A = L*D*L', where */
+/* L = P(1)*L(1)* ... *P(k)*L(k)* ..., */
+/* i.e., L is a product of terms P(k)*L(k), where k increases from 1 to */
+/* n in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and L(k) is a unit lower triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I 0 0 ) k-1 */
+/* L(k) = ( 0 I 0 ) s */
+/* ( 0 v I ) n-k-s+1 */
+/* k-1 s n-k-s+1 */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(k+1:n,k). */
+/* If s = 2, the lower triangle of D(k) overwrites A(k,k), A(k+1,k), */
+/* and A(k+1,k+1), and v overwrites A(k+2:n,k:k+1). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*lwork < 1 && ! lquery) {
+ *info = -7;
+ }
+
+ if (*info == 0) {
+
+/* Determine the block size */
+
+ nb = ilaenv_(&c__1, "SSYTRF", uplo, n, &c_n1, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ work[1] = (real) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSYTRF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = *n;
+ if (nb > 1 && nb < *n) {
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+/* Computing MAX */
+ i__1 = *lwork / ldwork;
+ nb = max(i__1,1);
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SSYTRF", uplo, n, &c_n1, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = 1;
+ }
+ if (nb < nbmin) {
+ nb = *n;
+ }
+
+ if (upper) {
+
+/* Factorize A as U*D*U' using the upper triangle of A */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* KB, where KB is the number of columns factorized by SLASYF; */
+/* KB is either NB or NB-1, or K for the last block */
+
+ k = *n;
+L10:
+
+/* If K < 1, exit from loop */
+
+ if (k < 1) {
+ goto L40;
+ }
+
+ if (k > nb) {
+
+/* Factorize columns k-kb+1:k of A and use blocked code to */
+/* update columns 1:k-kb */
+
+ slasyf_(uplo, &k, &nb, &kb, &a[a_offset], lda, &ipiv[1], &work[1],
+ &ldwork, &iinfo);
+ } else {
+
+/* Use unblocked code to factorize columns 1:k of A */
+
+ ssytf2_(uplo, &k, &a[a_offset], lda, &ipiv[1], &iinfo);
+ kb = k;
+ }
+
+/* Set INFO on the first occurrence of a zero pivot */
+
+ if (*info == 0 && iinfo > 0) {
+ *info = iinfo;
+ }
+
+/* Decrease K and return to the start of the main loop */
+
+ k -= kb;
+ goto L10;
+
+ } else {
+
+/* Factorize A as L*D*L' using the lower triangle of A */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* KB, where KB is the number of columns factorized by SLASYF; */
+/* KB is either NB or NB-1, or N-K+1 for the last block */
+
+ k = 1;
+L20:
+
+/* If K > N, exit from loop */
+
+ if (k > *n) {
+ goto L40;
+ }
+
+ if (k <= *n - nb) {
+
+/* Factorize columns k:k+kb-1 of A and use blocked code to */
+/* update columns k+kb:n */
+
+ i__1 = *n - k + 1;
+ slasyf_(uplo, &i__1, &nb, &kb, &a[k + k * a_dim1], lda, &ipiv[k],
+ &work[1], &ldwork, &iinfo);
+ } else {
+
+/* Use unblocked code to factorize columns k:n of A */
+
+ i__1 = *n - k + 1;
+ ssytf2_(uplo, &i__1, &a[k + k * a_dim1], lda, &ipiv[k], &iinfo);
+ kb = *n - k + 1;
+ }
+
+/* Set INFO on the first occurrence of a zero pivot */
+
+ if (*info == 0 && iinfo > 0) {
+ *info = iinfo + k - 1;
+ }
+
+/* Adjust IPIV */
+
+ i__1 = k + kb - 1;
+ for (j = k; j <= i__1; ++j) {
+ if (ipiv[j] > 0) {
+ ipiv[j] = ipiv[j] + k - 1;
+ } else {
+ ipiv[j] = ipiv[j] - k + 1;
+ }
+/* L30: */
+ }
+
+/* Increase K and return to the start of the main loop */
+
+ k += kb;
+ goto L20;
+
+ }
+
+L40:
+ work[1] = (real) lwkopt;
+ return 0;
+
+/* End of SSYTRF */
+
+} /* ssytrf_ */
diff --git a/contrib/libs/clapack/ssytri.c b/contrib/libs/clapack/ssytri.c
new file mode 100644
index 0000000000..24700baeec
--- /dev/null
+++ b/contrib/libs/clapack/ssytri.c
@@ -0,0 +1,394 @@
+/* ssytri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b11 = -1.f;
+static real c_b13 = 0.f;
+
+/* Subroutine */ int ssytri_(char *uplo, integer *n, real *a, integer *lda,
+ integer *ipiv, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+ real r__1;
+
+ /* Local variables */
+ real d__;
+ integer k;
+ real t, ak;
+ integer kp;
+ real akp1, temp;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ real akkp1;
+ extern logical lsame_(char *, char *);
+ integer kstep;
+ logical upper;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), sswap_(integer *, real *, integer *, real *, integer *
+), ssymv_(char *, integer *, real *, real *, integer *, real *,
+ integer *, real *, real *, integer *), xerbla_(char *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYTRI computes the inverse of a real symmetric indefinite matrix */
+/* A using the factorization A = U*D*U**T or A = L*D*L**T computed by */
+/* SSYTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**T; */
+/* = 'L': Lower triangular, form is A = L*D*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the block diagonal matrix D and the multipliers */
+/* used to obtain the factor U or L as computed by SSYTRF. */
+
+/* On exit, if INFO = 0, the (symmetric) inverse of the original */
+/* matrix. If UPLO = 'U', the upper triangular part of the */
+/* inverse is formed and the part of A below the diagonal is not */
+/* referenced; if UPLO = 'L' the lower triangular part of the */
+/* inverse is formed and the part of A above the diagonal is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by SSYTRF. */
+
+/* WORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) = 0; the matrix is singular and its */
+/* inverse could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSYTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check that the diagonal matrix D is nonsingular. */
+
+ if (upper) {
+
+/* Upper triangular storage: examine D from bottom to top */
+
+ for (*info = *n; *info >= 1; --(*info)) {
+ if (ipiv[*info] > 0 && a[*info + *info * a_dim1] == 0.f) {
+ return 0;
+ }
+/* L10: */
+ }
+ } else {
+
+/* Lower triangular storage: examine D from top to bottom. */
+
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ if (ipiv[*info] > 0 && a[*info + *info * a_dim1] == 0.f) {
+ return 0;
+ }
+/* L20: */
+ }
+ }
+ *info = 0;
+
+ if (upper) {
+
+/* Compute inv(A) from the factorization A = U*D*U'. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+L30:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L40;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Invert the diagonal block. */
+
+ a[k + k * a_dim1] = 1.f / a[k + k * a_dim1];
+
+/* Compute column K of the inverse. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ scopy_(&i__1, &a[k * a_dim1 + 1], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ ssymv_(uplo, &i__1, &c_b11, &a[a_offset], lda, &work[1], &
+ c__1, &c_b13, &a[k * a_dim1 + 1], &c__1);
+ i__1 = k - 1;
+ a[k + k * a_dim1] -= sdot_(&i__1, &work[1], &c__1, &a[k *
+ a_dim1 + 1], &c__1);
+ }
+ kstep = 1;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Invert the diagonal block. */
+
+ t = (r__1 = a[k + (k + 1) * a_dim1], dabs(r__1));
+ ak = a[k + k * a_dim1] / t;
+ akp1 = a[k + 1 + (k + 1) * a_dim1] / t;
+ akkp1 = a[k + (k + 1) * a_dim1] / t;
+ d__ = t * (ak * akp1 - 1.f);
+ a[k + k * a_dim1] = akp1 / d__;
+ a[k + 1 + (k + 1) * a_dim1] = ak / d__;
+ a[k + (k + 1) * a_dim1] = -akkp1 / d__;
+
+/* Compute columns K and K+1 of the inverse. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ scopy_(&i__1, &a[k * a_dim1 + 1], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ ssymv_(uplo, &i__1, &c_b11, &a[a_offset], lda, &work[1], &
+ c__1, &c_b13, &a[k * a_dim1 + 1], &c__1);
+ i__1 = k - 1;
+ a[k + k * a_dim1] -= sdot_(&i__1, &work[1], &c__1, &a[k *
+ a_dim1 + 1], &c__1);
+ i__1 = k - 1;
+ a[k + (k + 1) * a_dim1] -= sdot_(&i__1, &a[k * a_dim1 + 1], &
+ c__1, &a[(k + 1) * a_dim1 + 1], &c__1);
+ i__1 = k - 1;
+ scopy_(&i__1, &a[(k + 1) * a_dim1 + 1], &c__1, &work[1], &
+ c__1);
+ i__1 = k - 1;
+ ssymv_(uplo, &i__1, &c_b11, &a[a_offset], lda, &work[1], &
+ c__1, &c_b13, &a[(k + 1) * a_dim1 + 1], &c__1);
+ i__1 = k - 1;
+ a[k + 1 + (k + 1) * a_dim1] -= sdot_(&i__1, &work[1], &c__1, &
+ a[(k + 1) * a_dim1 + 1], &c__1);
+ }
+ kstep = 2;
+ }
+
+ kp = (i__1 = ipiv[k], abs(i__1));
+ if (kp != k) {
+
+/* Interchange rows and columns K and KP in the leading */
+/* submatrix A(1:k+1,1:k+1) */
+
+ i__1 = kp - 1;
+ sswap_(&i__1, &a[k * a_dim1 + 1], &c__1, &a[kp * a_dim1 + 1], &
+ c__1);
+ i__1 = k - kp - 1;
+ sswap_(&i__1, &a[kp + 1 + k * a_dim1], &c__1, &a[kp + (kp + 1) *
+ a_dim1], lda);
+ temp = a[k + k * a_dim1];
+ a[k + k * a_dim1] = a[kp + kp * a_dim1];
+ a[kp + kp * a_dim1] = temp;
+ if (kstep == 2) {
+ temp = a[k + (k + 1) * a_dim1];
+ a[k + (k + 1) * a_dim1] = a[kp + (k + 1) * a_dim1];
+ a[kp + (k + 1) * a_dim1] = temp;
+ }
+ }
+
+ k += kstep;
+ goto L30;
+L40:
+
+ ;
+ } else {
+
+/* Compute inv(A) from the factorization A = L*D*L'. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+L50:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L60;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Invert the diagonal block. */
+
+ a[k + k * a_dim1] = 1.f / a[k + k * a_dim1];
+
+/* Compute column K of the inverse. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ scopy_(&i__1, &a[k + 1 + k * a_dim1], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ ssymv_(uplo, &i__1, &c_b11, &a[k + 1 + (k + 1) * a_dim1], lda,
+ &work[1], &c__1, &c_b13, &a[k + 1 + k * a_dim1], &
+ c__1);
+ i__1 = *n - k;
+ a[k + k * a_dim1] -= sdot_(&i__1, &work[1], &c__1, &a[k + 1 +
+ k * a_dim1], &c__1);
+ }
+ kstep = 1;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Invert the diagonal block. */
+
+ t = (r__1 = a[k + (k - 1) * a_dim1], dabs(r__1));
+ ak = a[k - 1 + (k - 1) * a_dim1] / t;
+ akp1 = a[k + k * a_dim1] / t;
+ akkp1 = a[k + (k - 1) * a_dim1] / t;
+ d__ = t * (ak * akp1 - 1.f);
+ a[k - 1 + (k - 1) * a_dim1] = akp1 / d__;
+ a[k + k * a_dim1] = ak / d__;
+ a[k + (k - 1) * a_dim1] = -akkp1 / d__;
+
+/* Compute columns K-1 and K of the inverse. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ scopy_(&i__1, &a[k + 1 + k * a_dim1], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ ssymv_(uplo, &i__1, &c_b11, &a[k + 1 + (k + 1) * a_dim1], lda,
+ &work[1], &c__1, &c_b13, &a[k + 1 + k * a_dim1], &
+ c__1);
+ i__1 = *n - k;
+ a[k + k * a_dim1] -= sdot_(&i__1, &work[1], &c__1, &a[k + 1 +
+ k * a_dim1], &c__1);
+ i__1 = *n - k;
+ a[k + (k - 1) * a_dim1] -= sdot_(&i__1, &a[k + 1 + k * a_dim1]
+, &c__1, &a[k + 1 + (k - 1) * a_dim1], &c__1);
+ i__1 = *n - k;
+ scopy_(&i__1, &a[k + 1 + (k - 1) * a_dim1], &c__1, &work[1], &
+ c__1);
+ i__1 = *n - k;
+ ssymv_(uplo, &i__1, &c_b11, &a[k + 1 + (k + 1) * a_dim1], lda,
+ &work[1], &c__1, &c_b13, &a[k + 1 + (k - 1) * a_dim1]
+, &c__1);
+ i__1 = *n - k;
+ a[k - 1 + (k - 1) * a_dim1] -= sdot_(&i__1, &work[1], &c__1, &
+ a[k + 1 + (k - 1) * a_dim1], &c__1);
+ }
+ kstep = 2;
+ }
+
+ kp = (i__1 = ipiv[k], abs(i__1));
+ if (kp != k) {
+
+/* Interchange rows and columns K and KP in the trailing */
+/* submatrix A(k-1:n,k-1:n) */
+
+ if (kp < *n) {
+ i__1 = *n - kp;
+ sswap_(&i__1, &a[kp + 1 + k * a_dim1], &c__1, &a[kp + 1 + kp *
+ a_dim1], &c__1);
+ }
+ i__1 = kp - k - 1;
+ sswap_(&i__1, &a[k + 1 + k * a_dim1], &c__1, &a[kp + (k + 1) *
+ a_dim1], lda);
+ temp = a[k + k * a_dim1];
+ a[k + k * a_dim1] = a[kp + kp * a_dim1];
+ a[kp + kp * a_dim1] = temp;
+ if (kstep == 2) {
+ temp = a[k + (k - 1) * a_dim1];
+ a[k + (k - 1) * a_dim1] = a[kp + (k - 1) * a_dim1];
+ a[kp + (k - 1) * a_dim1] = temp;
+ }
+ }
+
+ k -= kstep;
+ goto L50;
+L60:
+ ;
+ }
+
+ return 0;
+
+/* End of SSYTRI */
+
+} /* ssytri_ */
diff --git a/contrib/libs/clapack/ssytrs.c b/contrib/libs/clapack/ssytrs.c
new file mode 100644
index 0000000000..a27e5e1528
--- /dev/null
+++ b/contrib/libs/clapack/ssytrs.c
@@ -0,0 +1,449 @@
+/* ssytrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b7 = -1.f;
+static integer c__1 = 1;
+static real c_b19 = 1.f;
+
+/* Subroutine */ int ssytrs_(char *uplo, integer *n, integer *nrhs, real *a,
+ integer *lda, integer *ipiv, real *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+ real r__1;
+
+ /* Local variables */
+ integer j, k;
+ real ak, bk;
+ integer kp;
+ real akm1, bkm1;
+ extern /* Subroutine */ int sger_(integer *, integer *, real *, real *,
+ integer *, real *, integer *, real *, integer *);
+ real akm1k;
+ extern logical lsame_(char *, char *);
+ real denom;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ sgemv_(char *, integer *, integer *, real *, real *, integer *,
+ real *, integer *, real *, real *, integer *);
+ logical upper;
+ extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
+ integer *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* SSYTRS solves a system of linear equations A*X = B with a real */
+/* symmetric matrix A using the factorization A = U*D*U**T or */
+/* A = L*D*L**T computed by SSYTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**T; */
+/* = 'L': Lower triangular, form is A = L*D*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The block diagonal matrix D and the multipliers used to */
+/* obtain the factor U or L as computed by SSYTRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by SSYTRF. */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSYTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Solve A*X = B, where A = U*D*U'. */
+
+/* First solve U*D*X = B, overwriting B with X. */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+L10:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L30;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ sswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(U(K)), where U(K) is the transformation */
+/* stored in column K of A. */
+
+ i__1 = k - 1;
+ sger_(&i__1, nrhs, &c_b7, &a[k * a_dim1 + 1], &c__1, &b[k +
+ b_dim1], ldb, &b[b_dim1 + 1], ldb);
+
+/* Multiply by the inverse of the diagonal block. */
+
+ r__1 = 1.f / a[k + k * a_dim1];
+ sscal_(nrhs, &r__1, &b[k + b_dim1], ldb);
+ --k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Interchange rows K-1 and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k - 1) {
+ sswap_(nrhs, &b[k - 1 + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(U(K)), where U(K) is the transformation */
+/* stored in columns K-1 and K of A. */
+
+ i__1 = k - 2;
+ sger_(&i__1, nrhs, &c_b7, &a[k * a_dim1 + 1], &c__1, &b[k +
+ b_dim1], ldb, &b[b_dim1 + 1], ldb);
+ i__1 = k - 2;
+ sger_(&i__1, nrhs, &c_b7, &a[(k - 1) * a_dim1 + 1], &c__1, &b[k -
+ 1 + b_dim1], ldb, &b[b_dim1 + 1], ldb);
+
+/* Multiply by the inverse of the diagonal block. */
+
+ akm1k = a[k - 1 + k * a_dim1];
+ akm1 = a[k - 1 + (k - 1) * a_dim1] / akm1k;
+ ak = a[k + k * a_dim1] / akm1k;
+ denom = akm1 * ak - 1.f;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ bkm1 = b[k - 1 + j * b_dim1] / akm1k;
+ bk = b[k + j * b_dim1] / akm1k;
+ b[k - 1 + j * b_dim1] = (ak * bkm1 - bk) / denom;
+ b[k + j * b_dim1] = (akm1 * bk - bkm1) / denom;
+/* L20: */
+ }
+ k += -2;
+ }
+
+ goto L10;
+L30:
+
+/* Next solve U'*X = B, overwriting B with X. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+L40:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L50;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Multiply by inv(U'(K)), where U(K) is the transformation */
+/* stored in column K of A. */
+
+ i__1 = k - 1;
+ sgemv_("Transpose", &i__1, nrhs, &c_b7, &b[b_offset], ldb, &a[k *
+ a_dim1 + 1], &c__1, &c_b19, &b[k + b_dim1], ldb);
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ sswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ ++k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Multiply by inv(U'(K+1)), where U(K+1) is the transformation */
+/* stored in columns K and K+1 of A. */
+
+ i__1 = k - 1;
+ sgemv_("Transpose", &i__1, nrhs, &c_b7, &b[b_offset], ldb, &a[k *
+ a_dim1 + 1], &c__1, &c_b19, &b[k + b_dim1], ldb);
+ i__1 = k - 1;
+ sgemv_("Transpose", &i__1, nrhs, &c_b7, &b[b_offset], ldb, &a[(k
+ + 1) * a_dim1 + 1], &c__1, &c_b19, &b[k + 1 + b_dim1],
+ ldb);
+
+/* Interchange rows K and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k) {
+ sswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ k += 2;
+ }
+
+ goto L40;
+L50:
+
+ ;
+ } else {
+
+/* Solve A*X = B, where A = L*D*L'. */
+
+/* First solve L*D*X = B, overwriting B with X. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+L60:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L80;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ sswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(L(K)), where L(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ sger_(&i__1, nrhs, &c_b7, &a[k + 1 + k * a_dim1], &c__1, &b[k
+ + b_dim1], ldb, &b[k + 1 + b_dim1], ldb);
+ }
+
+/* Multiply by the inverse of the diagonal block. */
+
+ r__1 = 1.f / a[k + k * a_dim1];
+ sscal_(nrhs, &r__1, &b[k + b_dim1], ldb);
+ ++k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Interchange rows K+1 and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k + 1) {
+ sswap_(nrhs, &b[k + 1 + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(L(K)), where L(K) is the transformation */
+/* stored in columns K and K+1 of A. */
+
+ if (k < *n - 1) {
+ i__1 = *n - k - 1;
+ sger_(&i__1, nrhs, &c_b7, &a[k + 2 + k * a_dim1], &c__1, &b[k
+ + b_dim1], ldb, &b[k + 2 + b_dim1], ldb);
+ i__1 = *n - k - 1;
+ sger_(&i__1, nrhs, &c_b7, &a[k + 2 + (k + 1) * a_dim1], &c__1,
+ &b[k + 1 + b_dim1], ldb, &b[k + 2 + b_dim1], ldb);
+ }
+
+/* Multiply by the inverse of the diagonal block. */
+
+ akm1k = a[k + 1 + k * a_dim1];
+ akm1 = a[k + k * a_dim1] / akm1k;
+ ak = a[k + 1 + (k + 1) * a_dim1] / akm1k;
+ denom = akm1 * ak - 1.f;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ bkm1 = b[k + j * b_dim1] / akm1k;
+ bk = b[k + 1 + j * b_dim1] / akm1k;
+ b[k + j * b_dim1] = (ak * bkm1 - bk) / denom;
+ b[k + 1 + j * b_dim1] = (akm1 * bk - bkm1) / denom;
+/* L70: */
+ }
+ k += 2;
+ }
+
+ goto L60;
+L80:
+
+/* Next solve L'*X = B, overwriting B with X. */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+L90:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L100;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Multiply by inv(L'(K)), where L(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ sgemv_("Transpose", &i__1, nrhs, &c_b7, &b[k + 1 + b_dim1],
+ ldb, &a[k + 1 + k * a_dim1], &c__1, &c_b19, &b[k +
+ b_dim1], ldb);
+ }
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ sswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ --k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Multiply by inv(L'(K-1)), where L(K-1) is the transformation */
+/* stored in columns K-1 and K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ sgemv_("Transpose", &i__1, nrhs, &c_b7, &b[k + 1 + b_dim1],
+ ldb, &a[k + 1 + k * a_dim1], &c__1, &c_b19, &b[k +
+ b_dim1], ldb);
+ i__1 = *n - k;
+ sgemv_("Transpose", &i__1, nrhs, &c_b7, &b[k + 1 + b_dim1],
+ ldb, &a[k + 1 + (k - 1) * a_dim1], &c__1, &c_b19, &b[
+ k - 1 + b_dim1], ldb);
+ }
+
+/* Interchange rows K and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k) {
+ sswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ k += -2;
+ }
+
+ goto L90;
+L100:
+ ;
+ }
+
+ return 0;
+
+/* End of SSYTRS */
+
+} /* ssytrs_ */
diff --git a/contrib/libs/clapack/stbcon.c b/contrib/libs/clapack/stbcon.c
new file mode 100644
index 0000000000..a567270c16
--- /dev/null
+++ b/contrib/libs/clapack/stbcon.c
@@ -0,0 +1,247 @@
+/* stbcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int stbcon_(char *norm, char *uplo, char *diag, integer *n,
+ integer *kd, real *ab, integer *ldab, real *rcond, real *work,
+ integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1;
+ real r__1;
+
+ /* Local variables */
+ integer ix, kase, kase1;
+ real scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ real anorm;
+ extern /* Subroutine */ int srscl_(integer *, real *, real *, integer *);
+ logical upper;
+ real xnorm;
+ extern /* Subroutine */ int slacn2_(integer *, real *, real *, integer *,
+ real *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ extern doublereal slantb_(char *, char *, char *, integer *, integer *,
+ real *, integer *, real *);
+ real ainvnm;
+ extern /* Subroutine */ int slatbs_(char *, char *, char *, char *,
+ integer *, integer *, real *, integer *, real *, real *, real *,
+ integer *);
+ logical onenrm;
+ char normin[1];
+ real smlnum;
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call SLACN2 in place of SLACON, 7 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STBCON estimates the reciprocal of the condition number of a */
+/* triangular band matrix A, in either the 1-norm or the infinity-norm. */
+
+/* The norm of A is computed and an estimate is obtained for */
+/* norm(inv(A)), then the reciprocal of the condition number is */
+/* computed as */
+/* RCOND = 1 / ( norm(A) * norm(inv(A)) ). */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies whether the 1-norm condition number or the */
+/* infinity-norm condition number is required: */
+/* = '1' or 'O': 1-norm; */
+/* = 'I': Infinity-norm. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals or subdiagonals of the */
+/* triangular band matrix A. KD >= 0. */
+
+/* AB (input) REAL array, dimension (LDAB,N) */
+/* The upper or lower triangular band matrix A, stored in the */
+/* first kd+1 rows of the array. The j-th column of A is stored */
+/* in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+/* If DIAG = 'U', the diagonal elements of A are not referenced */
+/* and are assumed to be 1. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(norm(A) * norm(inv(A))). */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O");
+ nounit = lsame_(diag, "N");
+
+ if (! onenrm && ! lsame_(norm, "I")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*kd < 0) {
+ *info = -5;
+ } else if (*ldab < *kd + 1) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STBCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ }
+
+ *rcond = 0.f;
+ smlnum = slamch_("Safe minimum") * (real) max(1,*n);
+
+/* Compute the norm of the triangular matrix A. */
+
+ anorm = slantb_(norm, uplo, diag, n, kd, &ab[ab_offset], ldab, &work[1]);
+
+/* Continue only if ANORM > 0. */
+
+ if (anorm > 0.f) {
+
+/* Estimate the norm of the inverse of A. */
+
+ ainvnm = 0.f;
+ *(unsigned char *)normin = 'N';
+ if (onenrm) {
+ kase1 = 1;
+ } else {
+ kase1 = 2;
+ }
+ kase = 0;
+L10:
+ slacn2_(n, &work[*n + 1], &work[1], &iwork[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (kase == kase1) {
+
+/* Multiply by inv(A). */
+
+ slatbs_(uplo, "No transpose", diag, normin, n, kd, &ab[
+ ab_offset], ldab, &work[1], &scale, &work[(*n << 1) +
+ 1], info)
+ ;
+ } else {
+
+/* Multiply by inv(A'). */
+
+ slatbs_(uplo, "Transpose", diag, normin, n, kd, &ab[ab_offset]
+, ldab, &work[1], &scale, &work[(*n << 1) + 1], info);
+ }
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by 1/SCALE if doing so will not cause overflow. */
+
+ if (scale != 1.f) {
+ ix = isamax_(n, &work[1], &c__1);
+ xnorm = (r__1 = work[ix], dabs(r__1));
+ if (scale < xnorm * smlnum || scale == 0.f) {
+ goto L20;
+ }
+ srscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / anorm / ainvnm;
+ }
+ }
+
+L20:
+ return 0;
+
+/* End of STBCON */
+
+} /* stbcon_ */
diff --git a/contrib/libs/clapack/stbrfs.c b/contrib/libs/clapack/stbrfs.c
new file mode 100644
index 0000000000..3ef7749d2f
--- /dev/null
+++ b/contrib/libs/clapack/stbrfs.c
@@ -0,0 +1,519 @@
+/* stbrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b19 = -1.f;
+
+/* Subroutine */ int stbrfs_(char *uplo, char *trans, char *diag, integer *n,
+ integer *kd, integer *nrhs, real *ab, integer *ldab, real *b, integer
+ *ldb, real *x, integer *ldx, real *ferr, real *berr, real *work,
+ integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, b_dim1, b_offset, x_dim1, x_offset, i__1,
+ i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3;
+
+ /* Local variables */
+ integer i__, j, k;
+ real s, xk;
+ integer nz;
+ real eps;
+ integer kase;
+ real safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical upper;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), stbmv_(char *, char *, char *, integer *, integer *,
+ real *, integer *, real *, integer *),
+ stbsv_(char *, char *, char *, integer *, integer *, real *,
+ integer *, real *, integer *), saxpy_(
+ integer *, real *, real *, integer *, real *, integer *), slacn2_(
+ integer *, real *, real *, integer *, real *, integer *, integer *
+);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+ char transt[1];
+ logical nounit;
+ real lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call SLACN2 in place of SLACON, 7 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STBRFS provides error bounds and backward error estimates for the */
+/* solution to a system of linear equations with a triangular band */
+/* coefficient matrix. */
+
+/* The solution matrix X must be computed by STBTRS or some other */
+/* means before entering this routine. STBRFS does not do iterative */
+/* refinement because doing so cannot improve the backward error. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose = Transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals or subdiagonals of the */
+/* triangular band matrix A. KD >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AB (input) REAL array, dimension (LDAB,N) */
+/* The upper or lower triangular band matrix A, stored in the */
+/* first kd+1 rows of the array. The j-th column of A is stored */
+/* in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+/* If DIAG = 'U', the diagonal elements of A are not referenced */
+/* and are assumed to be 1. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* B (input) REAL array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input) REAL array, dimension (LDX,NRHS) */
+/* The solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*kd < 0) {
+ *info = -5;
+ } else if (*nrhs < 0) {
+ *info = -6;
+ } else if (*ldab < *kd + 1) {
+ *info = -8;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldx < max(1,*n)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STBRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *kd + 2;
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute residual R = B - op(A) * X, */
+/* where op(A) = A or A', depending on TRANS. */
+
+ scopy_(n, &x[j * x_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+ stbmv_(uplo, trans, diag, n, kd, &ab[ab_offset], ldab, &work[*n + 1],
+ &c__1);
+ saxpy_(n, &c_b19, &b[j * b_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] = (r__1 = b[i__ + j * b_dim1], dabs(r__1));
+/* L20: */
+ }
+
+ if (notran) {
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (r__1 = x[k + j * x_dim1], dabs(r__1));
+/* Computing MAX */
+ i__3 = 1, i__4 = k - *kd;
+ i__5 = k;
+ for (i__ = max(i__3,i__4); i__ <= i__5; ++i__) {
+ work[i__] += (r__1 = ab[*kd + 1 + i__ - k + k *
+ ab_dim1], dabs(r__1)) * xk;
+/* L30: */
+ }
+/* L40: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (r__1 = x[k + j * x_dim1], dabs(r__1));
+/* Computing MAX */
+ i__5 = 1, i__3 = k - *kd;
+ i__4 = k - 1;
+ for (i__ = max(i__5,i__3); i__ <= i__4; ++i__) {
+ work[i__] += (r__1 = ab[*kd + 1 + i__ - k + k *
+ ab_dim1], dabs(r__1)) * xk;
+/* L50: */
+ }
+ work[k] += xk;
+/* L60: */
+ }
+ }
+ } else {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (r__1 = x[k + j * x_dim1], dabs(r__1));
+/* Computing MIN */
+ i__5 = *n, i__3 = k + *kd;
+ i__4 = min(i__5,i__3);
+ for (i__ = k; i__ <= i__4; ++i__) {
+ work[i__] += (r__1 = ab[i__ + 1 - k + k * ab_dim1]
+ , dabs(r__1)) * xk;
+/* L70: */
+ }
+/* L80: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (r__1 = x[k + j * x_dim1], dabs(r__1));
+/* Computing MIN */
+ i__5 = *n, i__3 = k + *kd;
+ i__4 = min(i__5,i__3);
+ for (i__ = k + 1; i__ <= i__4; ++i__) {
+ work[i__] += (r__1 = ab[i__ + 1 - k + k * ab_dim1]
+ , dabs(r__1)) * xk;
+/* L90: */
+ }
+ work[k] += xk;
+/* L100: */
+ }
+ }
+ }
+ } else {
+
+/* Compute abs(A')*abs(X) + abs(B). */
+
+ if (upper) {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+/* Computing MAX */
+ i__4 = 1, i__5 = k - *kd;
+ i__3 = k;
+ for (i__ = max(i__4,i__5); i__ <= i__3; ++i__) {
+ s += (r__1 = ab[*kd + 1 + i__ - k + k * ab_dim1],
+ dabs(r__1)) * (r__2 = x[i__ + j * x_dim1],
+ dabs(r__2));
+/* L110: */
+ }
+ work[k] += s;
+/* L120: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = (r__1 = x[k + j * x_dim1], dabs(r__1));
+/* Computing MAX */
+ i__3 = 1, i__4 = k - *kd;
+ i__5 = k - 1;
+ for (i__ = max(i__3,i__4); i__ <= i__5; ++i__) {
+ s += (r__1 = ab[*kd + 1 + i__ - k + k * ab_dim1],
+ dabs(r__1)) * (r__2 = x[i__ + j * x_dim1],
+ dabs(r__2));
+/* L130: */
+ }
+ work[k] += s;
+/* L140: */
+ }
+ }
+ } else {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+/* Computing MIN */
+ i__3 = *n, i__4 = k + *kd;
+ i__5 = min(i__3,i__4);
+ for (i__ = k; i__ <= i__5; ++i__) {
+ s += (r__1 = ab[i__ + 1 - k + k * ab_dim1], dabs(
+ r__1)) * (r__2 = x[i__ + j * x_dim1],
+ dabs(r__2));
+/* L150: */
+ }
+ work[k] += s;
+/* L160: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = (r__1 = x[k + j * x_dim1], dabs(r__1));
+/* Computing MIN */
+ i__3 = *n, i__4 = k + *kd;
+ i__5 = min(i__3,i__4);
+ for (i__ = k + 1; i__ <= i__5; ++i__) {
+ s += (r__1 = ab[i__ + 1 - k + k * ab_dim1], dabs(
+ r__1)) * (r__2 = x[i__ + j * x_dim1],
+ dabs(r__2));
+/* L170: */
+ }
+ work[k] += s;
+/* L180: */
+ }
+ }
+ }
+ }
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+/* Computing MAX */
+ r__2 = s, r__3 = (r__1 = work[*n + i__], dabs(r__1)) / work[
+ i__];
+ s = dmax(r__2,r__3);
+ } else {
+/* Computing MAX */
+ r__2 = s, r__3 = ((r__1 = work[*n + i__], dabs(r__1)) + safe1)
+ / (work[i__] + safe1);
+ s = dmax(r__2,r__3);
+ }
+/* L190: */
+ }
+ berr[j] = s;
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(op(A)))* */
+/* ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(op(A)) is the inverse of op(A) */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(op(A))*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use SLACN2 to estimate the infinity-norm of the matrix */
+/* inv(op(A)) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+ work[i__] = (r__1 = work[*n + i__], dabs(r__1)) + nz * eps *
+ work[i__];
+ } else {
+ work[i__] = (r__1 = work[*n + i__], dabs(r__1)) + nz * eps *
+ work[i__] + safe1;
+ }
+/* L200: */
+ }
+
+ kase = 0;
+L210:
+ slacn2_(n, &work[(*n << 1) + 1], &work[*n + 1], &iwork[1], &ferr[j], &
+ kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(op(A)'). */
+
+ stbsv_(uplo, transt, diag, n, kd, &ab[ab_offset], ldab, &work[
+ *n + 1], &c__1);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L220: */
+ }
+ } else {
+
+/* Multiply by inv(op(A))*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L230: */
+ }
+ stbsv_(uplo, trans, diag, n, kd, &ab[ab_offset], ldab, &work[*
+ n + 1], &c__1);
+ }
+ goto L210;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = lstres, r__3 = (r__1 = x[i__ + j * x_dim1], dabs(r__1));
+ lstres = dmax(r__2,r__3);
+/* L240: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L250: */
+ }
+
+ return 0;
+
+/* End of STBRFS */
+
+} /* stbrfs_ */
diff --git a/contrib/libs/clapack/stbtrs.c b/contrib/libs/clapack/stbtrs.c
new file mode 100644
index 0000000000..3fdcfa22be
--- /dev/null
+++ b/contrib/libs/clapack/stbtrs.c
@@ -0,0 +1,203 @@
+/* stbtrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int stbtrs_(char *uplo, char *trans, char *diag, integer *n,
+ integer *kd, integer *nrhs, real *ab, integer *ldab, real *b, integer
+ *ldb, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ integer j;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int stbsv_(char *, char *, char *, integer *,
+ integer *, real *, integer *, real *, integer *), xerbla_(char *, integer *);
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STBTRS solves a triangular system of the form */
+
+/* A * X = B or A**T * X = B, */
+
+/* where A is a triangular band matrix of order N, and B is an */
+/* N-by NRHS matrix. A check is made to verify that A is nonsingular. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose = Transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals or subdiagonals of the */
+/* triangular band matrix A. KD >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AB (input) REAL array, dimension (LDAB,N) */
+/* The upper or lower triangular band matrix A, stored in the */
+/* first kd+1 rows of AB. The j-th column of A is stored */
+/* in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+/* If DIAG = 'U', the diagonal elements of A are not referenced */
+/* and are assumed to be 1. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, if INFO = 0, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element of A is zero, */
+/* indicating that the matrix is singular and the */
+/* solutions X have not been computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ nounit = lsame_(diag, "N");
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*kd < 0) {
+ *info = -5;
+ } else if (*nrhs < 0) {
+ *info = -6;
+ } else if (*ldab < *kd + 1) {
+ *info = -8;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STBTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check for singularity. */
+
+ if (nounit) {
+ if (upper) {
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ if (ab[*kd + 1 + *info * ab_dim1] == 0.f) {
+ return 0;
+ }
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ if (ab[*info * ab_dim1 + 1] == 0.f) {
+ return 0;
+ }
+/* L20: */
+ }
+ }
+ }
+ *info = 0;
+
+/* Solve A * X = B or A' * X = B. */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ stbsv_(uplo, trans, diag, n, kd, &ab[ab_offset], ldab, &b[j * b_dim1
+ + 1], &c__1);
+/* L30: */
+ }
+
+ return 0;
+
+/* End of STBTRS */
+
+} /* stbtrs_ */
diff --git a/contrib/libs/clapack/stfsm.c b/contrib/libs/clapack/stfsm.c
new file mode 100644
index 0000000000..caec1199bc
--- /dev/null
+++ b/contrib/libs/clapack/stfsm.c
@@ -0,0 +1,973 @@
+/* stfsm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b23 = -1.f;
+static real c_b27 = 1.f;
+
+/* Subroutine */ int stfsm_(char *transr, char *side, char *uplo, char *trans,
+ char *diag, integer *m, integer *n, real *alpha, real *a, real *b,
+ integer *ldb)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, k, m1, m2, n1, n2, info;
+ logical normaltransr, lside;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ logical lower;
+ extern /* Subroutine */ int strsm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+), xerbla_(char *, integer *);
+ logical misodd, nisodd, notrans;
+
+
+/* -- LAPACK routine (version 3.2.1) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- April 2009 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Level 3 BLAS like routine for A in RFP Format. */
+
+/* STFSM solves the matrix equation */
+
+/* op( A )*X = alpha*B or X*op( A ) = alpha*B */
+
+/* where alpha is a scalar, X and B are m by n matrices, A is a unit, or */
+/* non-unit, upper or lower triangular matrix and op( A ) is one of */
+
+/* op( A ) = A or op( A ) = A'. */
+
+/* A is in Rectangular Full Packed (RFP) Format. */
+
+/* The matrix X is overwritten on B. */
+
+/* Arguments */
+/* ========== */
+
+/* TRANSR - (input) CHARACTER */
+/* = 'N': The Normal Form of RFP A is stored; */
+/* = 'T': The Transpose Form of RFP A is stored. */
+
+/* SIDE - (input) CHARACTER */
+/* On entry, SIDE specifies whether op( A ) appears on the left */
+/* or right of X as follows: */
+
+/* SIDE = 'L' or 'l' op( A )*X = alpha*B. */
+
+/* SIDE = 'R' or 'r' X*op( A ) = alpha*B. */
+
+/* Unchanged on exit. */
+
+/* UPLO - (input) CHARACTER */
+/* On entry, UPLO specifies whether the RFP matrix A came from */
+/* an upper or lower triangular matrix as follows: */
+/* UPLO = 'U' or 'u' RFP A came from an upper triangular matrix */
+/* UPLO = 'L' or 'l' RFP A came from a lower triangular matrix */
+
+/* Unchanged on exit. */
+
+/* TRANS - (input) CHARACTER */
+/* On entry, TRANS specifies the form of op( A ) to be used */
+/* in the matrix multiplication as follows: */
+
+/* TRANS = 'N' or 'n' op( A ) = A. */
+
+/* TRANS = 'T' or 't' op( A ) = A'. */
+
+/* Unchanged on exit. */
+
+/* DIAG - (input) CHARACTER */
+/* On entry, DIAG specifies whether or not RFP A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* M - (input) INTEGER. */
+/* On entry, M specifies the number of rows of B. M must be at */
+/* least zero. */
+/* Unchanged on exit. */
+
+/* N - (input) INTEGER. */
+/* On entry, N specifies the number of columns of B. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - (input) REAL. */
+/* On entry, ALPHA specifies the scalar alpha. When alpha is */
+/* zero then A is not referenced and B need not be set before */
+/* entry. */
+/* Unchanged on exit. */
+
+/* A - (input) REAL array, dimension (NT); */
+/* NT = N*(N+1)/2. On entry, the matrix A in RFP Format. */
+/* RFP Format is described by TRANSR, UPLO and N as follows: */
+/* If TRANSR='N' then RFP A is (0:N,0:K-1) when N is even; */
+/* K=N/2. RFP A is (0:N-1,0:K) when N is odd; K=N/2. If */
+/* TRANSR = 'T' then RFP is the transpose of RFP A as */
+/* defined when TRANSR = 'N'. The contents of RFP A are defined */
+/* by UPLO as follows: If UPLO = 'U' the RFP A contains the NT */
+/* elements of upper packed A either in normal or */
+/* transpose Format. If UPLO = 'L' the RFP A contains */
+/* the NT elements of lower packed A either in normal or */
+/* transpose Format. The LDA of RFP A is (N+1)/2 when */
+/* TRANSR = 'T'. When TRANSR is 'N' the LDA is N+1 when N is */
+/* even and is N when is odd. */
+/* See the Note below for more details. Unchanged on exit. */
+
+/* B - (input/ouptut) REAL array, DIMENSION (LDB,N) */
+/* Before entry, the leading m by n part of the array B must */
+/* contain the right-hand side matrix B, and on exit is */
+/* overwritten by the solution matrix X. */
+
+/* LDB - (input) INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. LDB must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+/* Further Details */
+/* =============== */
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* even. We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* the transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* the transpose of the last three columns of AP lower. */
+/* This covers the case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 03 04 05 33 43 53 */
+/* 13 14 15 00 44 54 */
+/* 23 24 25 10 11 55 */
+/* 33 34 35 20 21 22 */
+/* 00 44 45 30 31 32 */
+/* 01 11 55 40 41 42 */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* odd. We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* the transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* the transpose of the last two columns of AP lower. */
+/* This covers the case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 02 03 04 00 33 43 */
+/* 12 13 14 10 11 44 */
+/* 22 23 24 20 21 22 */
+/* 00 33 34 30 31 32 */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+/* RFP A RFP A */
+
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* Reference */
+/* ========= */
+
+/* ===================================================================== */
+
+/* .. */
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb - 1 - 0 + 1;
+ b_offset = 0 + b_dim1 * 0;
+ b -= b_offset;
+
+ /* Function Body */
+ info = 0;
+ normaltransr = lsame_(transr, "N");
+ lside = lsame_(side, "L");
+ lower = lsame_(uplo, "L");
+ notrans = lsame_(trans, "N");
+ if (! normaltransr && ! lsame_(transr, "T")) {
+ info = -1;
+ } else if (! lside && ! lsame_(side, "R")) {
+ info = -2;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ info = -3;
+ } else if (! notrans && ! lsame_(trans, "T")) {
+ info = -4;
+ } else if (! lsame_(diag, "N") && ! lsame_(diag,
+ "U")) {
+ info = -5;
+ } else if (*m < 0) {
+ info = -6;
+ } else if (*n < 0) {
+ info = -7;
+ } else if (*ldb < max(1,*m)) {
+ info = -11;
+ }
+ if (info != 0) {
+ i__1 = -info;
+ xerbla_("STFSM ", &i__1);
+ return 0;
+ }
+
+/* Quick return when ( (N.EQ.0).OR.(M.EQ.0) ) */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Quick return when ALPHA.EQ.(0D+0) */
+
+ if (*alpha == 0.f) {
+ i__1 = *n - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *m - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+ if (lside) {
+
+/* SIDE = 'L' */
+
+/* A is M-by-M. */
+/* If M is odd, set NISODD = .TRUE., and M1 and M2. */
+/* If M is even, NISODD = .FALSE., and M. */
+
+ if (*m % 2 == 0) {
+ misodd = FALSE_;
+ k = *m / 2;
+ } else {
+ misodd = TRUE_;
+ if (lower) {
+ m2 = *m / 2;
+ m1 = *m - m2;
+ } else {
+ m1 = *m / 2;
+ m2 = *m - m1;
+ }
+ }
+
+ if (misodd) {
+
+/* SIDE = 'L' and N is odd */
+
+ if (normaltransr) {
+
+/* SIDE = 'L', N is odd, and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SIDE ='L', N is odd, TRANSR = 'N', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='L', N is odd, TRANSR = 'N', UPLO = 'L', and */
+/* TRANS = 'N' */
+
+ if (*m == 1) {
+ strsm_("L", "L", "N", diag, &m1, n, alpha, a, m, &
+ b[b_offset], ldb);
+ } else {
+ strsm_("L", "L", "N", diag, &m1, n, alpha, a, m, &
+ b[b_offset], ldb);
+ sgemm_("N", "N", &m2, n, &m1, &c_b23, &a[m1], m, &
+ b[b_offset], ldb, alpha, &b[m1], ldb);
+ strsm_("L", "U", "T", diag, &m2, n, &c_b27, &a[*m]
+, m, &b[m1], ldb);
+ }
+
+ } else {
+
+/* SIDE ='L', N is odd, TRANSR = 'N', UPLO = 'L', and */
+/* TRANS = 'T' */
+
+ if (*m == 1) {
+ strsm_("L", "L", "T", diag, &m1, n, alpha, a, m, &
+ b[b_offset], ldb);
+ } else {
+ strsm_("L", "U", "N", diag, &m2, n, alpha, &a[*m],
+ m, &b[m1], ldb);
+ sgemm_("T", "N", &m1, n, &m2, &c_b23, &a[m1], m, &
+ b[m1], ldb, alpha, &b[b_offset], ldb);
+ strsm_("L", "L", "T", diag, &m1, n, &c_b27, a, m,
+ &b[b_offset], ldb);
+ }
+
+ }
+
+ } else {
+
+/* SIDE ='L', N is odd, TRANSR = 'N', and UPLO = 'U' */
+
+ if (! notrans) {
+
+/* SIDE ='L', N is odd, TRANSR = 'N', UPLO = 'U', and */
+/* TRANS = 'N' */
+
+ strsm_("L", "L", "N", diag, &m1, n, alpha, &a[m2], m,
+ &b[b_offset], ldb);
+ sgemm_("T", "N", &m2, n, &m1, &c_b23, a, m, &b[
+ b_offset], ldb, alpha, &b[m1], ldb);
+ strsm_("L", "U", "T", diag, &m2, n, &c_b27, &a[m1], m,
+ &b[m1], ldb);
+
+ } else {
+
+/* SIDE ='L', N is odd, TRANSR = 'N', UPLO = 'U', and */
+/* TRANS = 'T' */
+
+ strsm_("L", "U", "N", diag, &m2, n, alpha, &a[m1], m,
+ &b[m1], ldb);
+ sgemm_("N", "N", &m1, n, &m2, &c_b23, a, m, &b[m1],
+ ldb, alpha, &b[b_offset], ldb);
+ strsm_("L", "L", "T", diag, &m1, n, &c_b27, &a[m2], m,
+ &b[b_offset], ldb);
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'L', N is odd, and TRANSR = 'T' */
+
+ if (lower) {
+
+/* SIDE ='L', N is odd, TRANSR = 'T', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='L', N is odd, TRANSR = 'T', UPLO = 'L', and */
+/* TRANS = 'N' */
+
+ if (*m == 1) {
+ strsm_("L", "U", "T", diag, &m1, n, alpha, a, &m1,
+ &b[b_offset], ldb);
+ } else {
+ strsm_("L", "U", "T", diag, &m1, n, alpha, a, &m1,
+ &b[b_offset], ldb);
+ sgemm_("T", "N", &m2, n, &m1, &c_b23, &a[m1 * m1],
+ &m1, &b[b_offset], ldb, alpha, &b[m1],
+ ldb);
+ strsm_("L", "L", "N", diag, &m2, n, &c_b27, &a[1],
+ &m1, &b[m1], ldb);
+ }
+
+ } else {
+
+/* SIDE ='L', N is odd, TRANSR = 'T', UPLO = 'L', and */
+/* TRANS = 'T' */
+
+ if (*m == 1) {
+ strsm_("L", "U", "N", diag, &m1, n, alpha, a, &m1,
+ &b[b_offset], ldb);
+ } else {
+ strsm_("L", "L", "T", diag, &m2, n, alpha, &a[1],
+ &m1, &b[m1], ldb);
+ sgemm_("N", "N", &m1, n, &m2, &c_b23, &a[m1 * m1],
+ &m1, &b[m1], ldb, alpha, &b[b_offset],
+ ldb);
+ strsm_("L", "U", "N", diag, &m1, n, &c_b27, a, &
+ m1, &b[b_offset], ldb);
+ }
+
+ }
+
+ } else {
+
+/* SIDE ='L', N is odd, TRANSR = 'T', and UPLO = 'U' */
+
+ if (! notrans) {
+
+/* SIDE ='L', N is odd, TRANSR = 'T', UPLO = 'U', and */
+/* TRANS = 'N' */
+
+ strsm_("L", "U", "T", diag, &m1, n, alpha, &a[m2 * m2]
+, &m2, &b[b_offset], ldb);
+ sgemm_("N", "N", &m2, n, &m1, &c_b23, a, &m2, &b[
+ b_offset], ldb, alpha, &b[m1], ldb);
+ strsm_("L", "L", "N", diag, &m2, n, &c_b27, &a[m1 *
+ m2], &m2, &b[m1], ldb);
+
+ } else {
+
+/* SIDE ='L', N is odd, TRANSR = 'T', UPLO = 'U', and */
+/* TRANS = 'T' */
+
+ strsm_("L", "L", "T", diag, &m2, n, alpha, &a[m1 * m2]
+, &m2, &b[m1], ldb);
+ sgemm_("T", "N", &m1, n, &m2, &c_b23, a, &m2, &b[m1],
+ ldb, alpha, &b[b_offset], ldb);
+ strsm_("L", "U", "N", diag, &m1, n, &c_b27, &a[m2 *
+ m2], &m2, &b[b_offset], ldb);
+
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'L' and N is even */
+
+ if (normaltransr) {
+
+/* SIDE = 'L', N is even, and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SIDE ='L', N is even, TRANSR = 'N', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='L', N is even, TRANSR = 'N', UPLO = 'L', */
+/* and TRANS = 'N' */
+
+ i__1 = *m + 1;
+ strsm_("L", "L", "N", diag, &k, n, alpha, &a[1], &
+ i__1, &b[b_offset], ldb);
+ i__1 = *m + 1;
+ sgemm_("N", "N", &k, n, &k, &c_b23, &a[k + 1], &i__1,
+ &b[b_offset], ldb, alpha, &b[k], ldb);
+ i__1 = *m + 1;
+ strsm_("L", "U", "T", diag, &k, n, &c_b27, a, &i__1, &
+ b[k], ldb);
+
+ } else {
+
+/* SIDE ='L', N is even, TRANSR = 'N', UPLO = 'L', */
+/* and TRANS = 'T' */
+
+ i__1 = *m + 1;
+ strsm_("L", "U", "N", diag, &k, n, alpha, a, &i__1, &
+ b[k], ldb);
+ i__1 = *m + 1;
+ sgemm_("T", "N", &k, n, &k, &c_b23, &a[k + 1], &i__1,
+ &b[k], ldb, alpha, &b[b_offset], ldb);
+ i__1 = *m + 1;
+ strsm_("L", "L", "T", diag, &k, n, &c_b27, &a[1], &
+ i__1, &b[b_offset], ldb);
+
+ }
+
+ } else {
+
+/* SIDE ='L', N is even, TRANSR = 'N', and UPLO = 'U' */
+
+ if (! notrans) {
+
+/* SIDE ='L', N is even, TRANSR = 'N', UPLO = 'U', */
+/* and TRANS = 'N' */
+
+ i__1 = *m + 1;
+ strsm_("L", "L", "N", diag, &k, n, alpha, &a[k + 1], &
+ i__1, &b[b_offset], ldb);
+ i__1 = *m + 1;
+ sgemm_("T", "N", &k, n, &k, &c_b23, a, &i__1, &b[
+ b_offset], ldb, alpha, &b[k], ldb);
+ i__1 = *m + 1;
+ strsm_("L", "U", "T", diag, &k, n, &c_b27, &a[k], &
+ i__1, &b[k], ldb);
+
+ } else {
+
+/* SIDE ='L', N is even, TRANSR = 'N', UPLO = 'U', */
+/* and TRANS = 'T' */
+ i__1 = *m + 1;
+ strsm_("L", "U", "N", diag, &k, n, alpha, &a[k], &
+ i__1, &b[k], ldb);
+ i__1 = *m + 1;
+ sgemm_("N", "N", &k, n, &k, &c_b23, a, &i__1, &b[k],
+ ldb, alpha, &b[b_offset], ldb);
+ i__1 = *m + 1;
+ strsm_("L", "L", "T", diag, &k, n, &c_b27, &a[k + 1],
+ &i__1, &b[b_offset], ldb);
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'L', N is even, and TRANSR = 'T' */
+
+ if (lower) {
+
+/* SIDE ='L', N is even, TRANSR = 'T', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='L', N is even, TRANSR = 'T', UPLO = 'L', */
+/* and TRANS = 'N' */
+
+ strsm_("L", "U", "T", diag, &k, n, alpha, &a[k], &k, &
+ b[b_offset], ldb);
+ sgemm_("T", "N", &k, n, &k, &c_b23, &a[k * (k + 1)], &
+ k, &b[b_offset], ldb, alpha, &b[k], ldb);
+ strsm_("L", "L", "N", diag, &k, n, &c_b27, a, &k, &b[
+ k], ldb);
+
+ } else {
+
+/* SIDE ='L', N is even, TRANSR = 'T', UPLO = 'L', */
+/* and TRANS = 'T' */
+
+ strsm_("L", "L", "T", diag, &k, n, alpha, a, &k, &b[k]
+, ldb);
+ sgemm_("N", "N", &k, n, &k, &c_b23, &a[k * (k + 1)], &
+ k, &b[k], ldb, alpha, &b[b_offset], ldb);
+ strsm_("L", "U", "N", diag, &k, n, &c_b27, &a[k], &k,
+ &b[b_offset], ldb);
+
+ }
+
+ } else {
+
+/* SIDE ='L', N is even, TRANSR = 'T', and UPLO = 'U' */
+
+ if (! notrans) {
+
+/* SIDE ='L', N is even, TRANSR = 'T', UPLO = 'U', */
+/* and TRANS = 'N' */
+
+ strsm_("L", "U", "T", diag, &k, n, alpha, &a[k * (k +
+ 1)], &k, &b[b_offset], ldb);
+ sgemm_("N", "N", &k, n, &k, &c_b23, a, &k, &b[
+ b_offset], ldb, alpha, &b[k], ldb);
+ strsm_("L", "L", "N", diag, &k, n, &c_b27, &a[k * k],
+ &k, &b[k], ldb);
+
+ } else {
+
+/* SIDE ='L', N is even, TRANSR = 'T', UPLO = 'U', */
+/* and TRANS = 'T' */
+
+ strsm_("L", "L", "T", diag, &k, n, alpha, &a[k * k], &
+ k, &b[k], ldb);
+ sgemm_("T", "N", &k, n, &k, &c_b23, a, &k, &b[k], ldb,
+ alpha, &b[b_offset], ldb);
+ strsm_("L", "U", "N", diag, &k, n, &c_b27, &a[k * (k
+ + 1)], &k, &b[b_offset], ldb);
+
+ }
+
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'R' */
+
+/* A is N-by-N. */
+/* If N is odd, set NISODD = .TRUE., and N1 and N2. */
+/* If N is even, NISODD = .FALSE., and K. */
+
+ if (*n % 2 == 0) {
+ nisodd = FALSE_;
+ k = *n / 2;
+ } else {
+ nisodd = TRUE_;
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+ }
+
+ if (nisodd) {
+
+/* SIDE = 'R' and N is odd */
+
+ if (normaltransr) {
+
+/* SIDE = 'R', N is odd, and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SIDE ='R', N is odd, TRANSR = 'N', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is odd, TRANSR = 'N', UPLO = 'L', and */
+/* TRANS = 'N' */
+
+ strsm_("R", "U", "T", diag, m, &n2, alpha, &a[*n], n,
+ &b[n1 * b_dim1], ldb);
+ sgemm_("N", "N", m, &n1, &n2, &c_b23, &b[n1 * b_dim1],
+ ldb, &a[n1], n, alpha, b, ldb);
+ strsm_("R", "L", "N", diag, m, &n1, &c_b27, a, n, b,
+ ldb);
+
+ } else {
+
+/* SIDE ='R', N is odd, TRANSR = 'N', UPLO = 'L', and */
+/* TRANS = 'T' */
+
+ strsm_("R", "L", "T", diag, m, &n1, alpha, a, n, b,
+ ldb);
+ sgemm_("N", "T", m, &n2, &n1, &c_b23, b, ldb, &a[n1],
+ n, alpha, &b[n1 * b_dim1], ldb);
+ strsm_("R", "U", "N", diag, m, &n2, &c_b27, &a[*n], n,
+ &b[n1 * b_dim1], ldb);
+
+ }
+
+ } else {
+
+/* SIDE ='R', N is odd, TRANSR = 'N', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is odd, TRANSR = 'N', UPLO = 'U', and */
+/* TRANS = 'N' */
+
+ strsm_("R", "L", "T", diag, m, &n1, alpha, &a[n2], n,
+ b, ldb);
+ sgemm_("N", "N", m, &n2, &n1, &c_b23, b, ldb, a, n,
+ alpha, &b[n1 * b_dim1], ldb);
+ strsm_("R", "U", "N", diag, m, &n2, &c_b27, &a[n1], n,
+ &b[n1 * b_dim1], ldb);
+
+ } else {
+
+/* SIDE ='R', N is odd, TRANSR = 'N', UPLO = 'U', and */
+/* TRANS = 'T' */
+
+ strsm_("R", "U", "T", diag, m, &n2, alpha, &a[n1], n,
+ &b[n1 * b_dim1], ldb);
+ sgemm_("N", "T", m, &n1, &n2, &c_b23, &b[n1 * b_dim1],
+ ldb, a, n, alpha, b, ldb);
+ strsm_("R", "L", "N", diag, m, &n1, &c_b27, &a[n2], n,
+ b, ldb);
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'R', N is odd, and TRANSR = 'T' */
+
+ if (lower) {
+
+/* SIDE ='R', N is odd, TRANSR = 'T', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is odd, TRANSR = 'T', UPLO = 'L', and */
+/* TRANS = 'N' */
+
+ strsm_("R", "L", "N", diag, m, &n2, alpha, &a[1], &n1,
+ &b[n1 * b_dim1], ldb);
+ sgemm_("N", "T", m, &n1, &n2, &c_b23, &b[n1 * b_dim1],
+ ldb, &a[n1 * n1], &n1, alpha, b, ldb);
+ strsm_("R", "U", "T", diag, m, &n1, &c_b27, a, &n1, b,
+ ldb);
+
+ } else {
+
+/* SIDE ='R', N is odd, TRANSR = 'T', UPLO = 'L', and */
+/* TRANS = 'T' */
+
+ strsm_("R", "U", "N", diag, m, &n1, alpha, a, &n1, b,
+ ldb);
+ sgemm_("N", "N", m, &n2, &n1, &c_b23, b, ldb, &a[n1 *
+ n1], &n1, alpha, &b[n1 * b_dim1], ldb);
+ strsm_("R", "L", "T", diag, m, &n2, &c_b27, &a[1], &
+ n1, &b[n1 * b_dim1], ldb);
+
+ }
+
+ } else {
+
+/* SIDE ='R', N is odd, TRANSR = 'T', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is odd, TRANSR = 'T', UPLO = 'U', and */
+/* TRANS = 'N' */
+
+ strsm_("R", "U", "N", diag, m, &n1, alpha, &a[n2 * n2]
+, &n2, b, ldb);
+ sgemm_("N", "T", m, &n2, &n1, &c_b23, b, ldb, a, &n2,
+ alpha, &b[n1 * b_dim1], ldb);
+ strsm_("R", "L", "T", diag, m, &n2, &c_b27, &a[n1 *
+ n2], &n2, &b[n1 * b_dim1], ldb);
+
+ } else {
+
+/* SIDE ='R', N is odd, TRANSR = 'T', UPLO = 'U', and */
+/* TRANS = 'T' */
+
+ strsm_("R", "L", "N", diag, m, &n2, alpha, &a[n1 * n2]
+, &n2, &b[n1 * b_dim1], ldb);
+ sgemm_("N", "N", m, &n1, &n2, &c_b23, &b[n1 * b_dim1],
+ ldb, a, &n2, alpha, b, ldb);
+ strsm_("R", "U", "T", diag, m, &n1, &c_b27, &a[n2 *
+ n2], &n2, b, ldb);
+
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'R' and N is even */
+
+ if (normaltransr) {
+
+/* SIDE = 'R', N is even, and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SIDE ='R', N is even, TRANSR = 'N', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is even, TRANSR = 'N', UPLO = 'L', */
+/* and TRANS = 'N' */
+
+ i__1 = *n + 1;
+ strsm_("R", "U", "T", diag, m, &k, alpha, a, &i__1, &
+ b[k * b_dim1], ldb);
+ i__1 = *n + 1;
+ sgemm_("N", "N", m, &k, &k, &c_b23, &b[k * b_dim1],
+ ldb, &a[k + 1], &i__1, alpha, b, ldb);
+ i__1 = *n + 1;
+ strsm_("R", "L", "N", diag, m, &k, &c_b27, &a[1], &
+ i__1, b, ldb);
+
+ } else {
+
+/* SIDE ='R', N is even, TRANSR = 'N', UPLO = 'L', */
+/* and TRANS = 'T' */
+
+ i__1 = *n + 1;
+ strsm_("R", "L", "T", diag, m, &k, alpha, &a[1], &
+ i__1, b, ldb);
+ i__1 = *n + 1;
+ sgemm_("N", "T", m, &k, &k, &c_b23, b, ldb, &a[k + 1],
+ &i__1, alpha, &b[k * b_dim1], ldb);
+ i__1 = *n + 1;
+ strsm_("R", "U", "N", diag, m, &k, &c_b27, a, &i__1, &
+ b[k * b_dim1], ldb);
+
+ }
+
+ } else {
+
+/* SIDE ='R', N is even, TRANSR = 'N', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is even, TRANSR = 'N', UPLO = 'U', */
+/* and TRANS = 'N' */
+
+ i__1 = *n + 1;
+ strsm_("R", "L", "T", diag, m, &k, alpha, &a[k + 1], &
+ i__1, b, ldb);
+ i__1 = *n + 1;
+ sgemm_("N", "N", m, &k, &k, &c_b23, b, ldb, a, &i__1,
+ alpha, &b[k * b_dim1], ldb);
+ i__1 = *n + 1;
+ strsm_("R", "U", "N", diag, m, &k, &c_b27, &a[k], &
+ i__1, &b[k * b_dim1], ldb);
+
+ } else {
+
+/* SIDE ='R', N is even, TRANSR = 'N', UPLO = 'U', */
+/* and TRANS = 'T' */
+
+ i__1 = *n + 1;
+ strsm_("R", "U", "T", diag, m, &k, alpha, &a[k], &
+ i__1, &b[k * b_dim1], ldb);
+ i__1 = *n + 1;
+ sgemm_("N", "T", m, &k, &k, &c_b23, &b[k * b_dim1],
+ ldb, a, &i__1, alpha, b, ldb);
+ i__1 = *n + 1;
+ strsm_("R", "L", "N", diag, m, &k, &c_b27, &a[k + 1],
+ &i__1, b, ldb);
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'R', N is even, and TRANSR = 'T' */
+
+ if (lower) {
+
+/* SIDE ='R', N is even, TRANSR = 'T', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is even, TRANSR = 'T', UPLO = 'L', */
+/* and TRANS = 'N' */
+
+ strsm_("R", "L", "N", diag, m, &k, alpha, a, &k, &b[k
+ * b_dim1], ldb);
+ sgemm_("N", "T", m, &k, &k, &c_b23, &b[k * b_dim1],
+ ldb, &a[(k + 1) * k], &k, alpha, b, ldb);
+ strsm_("R", "U", "T", diag, m, &k, &c_b27, &a[k], &k,
+ b, ldb);
+
+ } else {
+
+/* SIDE ='R', N is even, TRANSR = 'T', UPLO = 'L', */
+/* and TRANS = 'T' */
+
+ strsm_("R", "U", "N", diag, m, &k, alpha, &a[k], &k,
+ b, ldb);
+ sgemm_("N", "N", m, &k, &k, &c_b23, b, ldb, &a[(k + 1)
+ * k], &k, alpha, &b[k * b_dim1], ldb);
+ strsm_("R", "L", "T", diag, m, &k, &c_b27, a, &k, &b[
+ k * b_dim1], ldb);
+
+ }
+
+ } else {
+
+/* SIDE ='R', N is even, TRANSR = 'T', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is even, TRANSR = 'T', UPLO = 'U', */
+/* and TRANS = 'N' */
+
+ strsm_("R", "U", "N", diag, m, &k, alpha, &a[(k + 1) *
+ k], &k, b, ldb);
+ sgemm_("N", "T", m, &k, &k, &c_b23, b, ldb, a, &k,
+ alpha, &b[k * b_dim1], ldb);
+ strsm_("R", "L", "T", diag, m, &k, &c_b27, &a[k * k],
+ &k, &b[k * b_dim1], ldb);
+
+ } else {
+
+/* SIDE ='R', N is even, TRANSR = 'T', UPLO = 'U', */
+/* and TRANS = 'T' */
+
+ strsm_("R", "L", "N", diag, m, &k, alpha, &a[k * k], &
+ k, &b[k * b_dim1], ldb);
+ sgemm_("N", "N", m, &k, &k, &c_b23, &b[k * b_dim1],
+ ldb, a, &k, alpha, b, ldb);
+ strsm_("R", "U", "T", diag, m, &k, &c_b27, &a[(k + 1)
+ * k], &k, b, ldb);
+
+ }
+
+ }
+
+ }
+
+ }
+ }
+
+ return 0;
+
+/* End of STFSM */
+
+} /* stfsm_ */
diff --git a/contrib/libs/clapack/stftri.c b/contrib/libs/clapack/stftri.c
new file mode 100644
index 0000000000..ac83c977d8
--- /dev/null
+++ b/contrib/libs/clapack/stftri.c
@@ -0,0 +1,473 @@
+/* stftri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b13 = -1.f;
+static real c_b18 = 1.f;
+
+/* Subroutine */ int stftri_(char *transr, char *uplo, char *diag, integer *n,
+ real *a, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer k, n1, n2;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int strmm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+), xerbla_(char *, integer *);
+ logical nisodd;
+ extern /* Subroutine */ int strtri_(char *, char *, integer *, real *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STFTRI computes the inverse of a triangular matrix A stored in RFP */
+/* format. */
+
+/* This is a Level 3 BLAS version of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': The Normal TRANSR of RFP A is stored; */
+/* = 'T': The Transpose TRANSR of RFP A is stored. */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* DIAG (input) CHARACTER */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (NT); */
+/* NT=N*(N+1)/2. On entry, the triangular factor of a Hermitian */
+/* Positive Definite matrix A in RFP format. RFP format is */
+/* described by TRANSR, UPLO, and N as follows: If TRANSR = 'N' */
+/* then RFP A is (0:N,0:k-1) when N is even; k=N/2. RFP A is */
+/* (0:N-1,0:k) when N is odd; k=N/2. IF TRANSR = 'T' then RFP is */
+/* the transpose of RFP A as defined when */
+/* TRANSR = 'N'. The contents of RFP A are defined by UPLO as */
+/* follows: If UPLO = 'U' the RFP A contains the nt elements of */
+/* upper packed A; If UPLO = 'L' the RFP A contains the nt */
+/* elements of lower packed A. The LDA of RFP A is (N+1)/2 when */
+/* TRANSR = 'T'. When TRANSR is 'N' the LDA is N+1 when N is */
+/* even and N is odd. See the Note below for more details. */
+
+/* On exit, the (triangular) inverse of the original matrix, in */
+/* the same storage format. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, A(i,i) is exactly zero. The triangular */
+/* matrix is singular and its inverse can not be computed. */
+
+/* Notes */
+/* ===== */
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* even. We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* the transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* the transpose of the last three columns of AP lower. */
+/* This covers the case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 03 04 05 33 43 53 */
+/* 13 14 15 00 44 54 */
+/* 23 24 25 10 11 55 */
+/* 33 34 35 20 21 22 */
+/* 00 44 45 30 31 32 */
+/* 01 11 55 40 41 42 */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* odd. We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* the transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* the transpose of the last two columns of AP lower. */
+/* This covers the case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 02 03 04 00 33 43 */
+/* 12 13 14 10 11 44 */
+/* 22 23 24 20 21 22 */
+/* 00 33 34 30 31 32 */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+/* RFP A RFP A */
+
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "T")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (! lsame_(diag, "N") && ! lsame_(diag,
+ "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STFTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* If N is odd, set NISODD = .TRUE. */
+/* If N is even, set K = N/2 and NISODD = .FALSE. */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ } else {
+ nisodd = TRUE_;
+ }
+
+/* Set N1 and N2 depending on LOWER */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+
+/* start execution: there are eight cases */
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL and N is odd ( a(0:n-1,0:n1-1) ) */
+/* T1 -> a(0,0), T2 -> a(0,1), S -> a(n1,0) */
+/* T1 -> a(0), T2 -> a(n), S -> a(n1) */
+
+ strtri_("L", diag, &n1, a, n, info);
+ if (*info > 0) {
+ return 0;
+ }
+ strmm_("R", "L", "N", diag, &n2, &n1, &c_b13, a, n, &a[n1], n);
+ strtri_("U", diag, &n2, &a[*n], n, info)
+ ;
+ if (*info > 0) {
+ *info += n1;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ strmm_("L", "U", "T", diag, &n2, &n1, &c_b18, &a[*n], n, &a[
+ n1], n);
+
+ } else {
+
+/* SRPA for UPPER, NORMAL and N is odd ( a(0:n-1,0:n2-1) */
+/* T1 -> a(n1+1,0), T2 -> a(n1,0), S -> a(0,0) */
+/* T1 -> a(n2), T2 -> a(n1), S -> a(0) */
+
+ strtri_("L", diag, &n1, &a[n2], n, info)
+ ;
+ if (*info > 0) {
+ return 0;
+ }
+ strmm_("L", "L", "T", diag, &n1, &n2, &c_b13, &a[n2], n, a, n);
+ strtri_("U", diag, &n2, &a[n1], n, info)
+ ;
+ if (*info > 0) {
+ *info += n1;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ strmm_("R", "U", "N", diag, &n1, &n2, &c_b18, &a[n1], n, a, n);
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'T' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is odd */
+/* T1 -> a(0), T2 -> a(1), S -> a(0+n1*n1) */
+
+ strtri_("U", diag, &n1, a, &n1, info);
+ if (*info > 0) {
+ return 0;
+ }
+ strmm_("L", "U", "N", diag, &n1, &n2, &c_b13, a, &n1, &a[n1 *
+ n1], &n1);
+ strtri_("L", diag, &n2, &a[1], &n1, info);
+ if (*info > 0) {
+ *info += n1;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ strmm_("R", "L", "T", diag, &n1, &n2, &c_b18, &a[1], &n1, &a[
+ n1 * n1], &n1);
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is odd */
+/* T1 -> a(0+n2*n2), T2 -> a(0+n1*n2), S -> a(0) */
+
+ strtri_("U", diag, &n1, &a[n2 * n2], &n2, info);
+ if (*info > 0) {
+ return 0;
+ }
+ strmm_("R", "U", "T", diag, &n2, &n1, &c_b13, &a[n2 * n2], &
+ n2, a, &n2);
+ strtri_("L", diag, &n2, &a[n1 * n2], &n2, info);
+ if (*info > 0) {
+ *info += n1;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ strmm_("L", "L", "N", diag, &n2, &n1, &c_b18, &a[n1 * n2], &
+ n2, a, &n2);
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(1,0), T2 -> a(0,0), S -> a(k+1,0) */
+/* T1 -> a(1), T2 -> a(0), S -> a(k+1) */
+
+ i__1 = *n + 1;
+ strtri_("L", diag, &k, &a[1], &i__1, info);
+ if (*info > 0) {
+ return 0;
+ }
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ strmm_("R", "L", "N", diag, &k, &k, &c_b13, &a[1], &i__1, &a[
+ k + 1], &i__2);
+ i__1 = *n + 1;
+ strtri_("U", diag, &k, a, &i__1, info);
+ if (*info > 0) {
+ *info += k;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ strmm_("L", "U", "T", diag, &k, &k, &c_b18, a, &i__1, &a[k +
+ 1], &i__2)
+ ;
+
+ } else {
+
+/* SRPA for UPPER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(k+1,0) , T2 -> a(k,0), S -> a(0,0) */
+/* T1 -> a(k+1), T2 -> a(k), S -> a(0) */
+
+ i__1 = *n + 1;
+ strtri_("L", diag, &k, &a[k + 1], &i__1, info);
+ if (*info > 0) {
+ return 0;
+ }
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ strmm_("L", "L", "T", diag, &k, &k, &c_b13, &a[k + 1], &i__1,
+ a, &i__2);
+ i__1 = *n + 1;
+ strtri_("U", diag, &k, &a[k], &i__1, info);
+ if (*info > 0) {
+ *info += k;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ strmm_("R", "U", "N", diag, &k, &k, &c_b18, &a[k], &i__1, a, &
+ i__2);
+ }
+ } else {
+
+/* N is even and TRANSR = 'T' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,1), T2 -> B(0,0), S -> B(0,k+1) */
+/* T1 -> a(0+k), T2 -> a(0+0), S -> a(0+k*(k+1)); lda=k */
+
+ strtri_("U", diag, &k, &a[k], &k, info);
+ if (*info > 0) {
+ return 0;
+ }
+ strmm_("L", "U", "N", diag, &k, &k, &c_b13, &a[k], &k, &a[k *
+ (k + 1)], &k);
+ strtri_("L", diag, &k, a, &k, info);
+ if (*info > 0) {
+ *info += k;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ strmm_("R", "L", "T", diag, &k, &k, &c_b18, a, &k, &a[k * (k
+ + 1)], &k)
+ ;
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,k+1), T2 -> B(0,k), S -> B(0,0) */
+/* T1 -> a(0+k*(k+1)), T2 -> a(0+k*k), S -> a(0+0)); lda=k */
+
+ strtri_("U", diag, &k, &a[k * (k + 1)], &k, info);
+ if (*info > 0) {
+ return 0;
+ }
+ strmm_("R", "U", "T", diag, &k, &k, &c_b13, &a[k * (k + 1)], &
+ k, a, &k);
+ strtri_("L", diag, &k, &a[k * k], &k, info);
+ if (*info > 0) {
+ *info += k;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ strmm_("L", "L", "N", diag, &k, &k, &c_b18, &a[k * k], &k, a,
+ &k);
+ }
+ }
+ }
+
+ return 0;
+
+/* End of STFTRI */
+
+} /* stftri_ */
diff --git a/contrib/libs/clapack/stfttp.c b/contrib/libs/clapack/stfttp.c
new file mode 100644
index 0000000000..2c10db5c5e
--- /dev/null
+++ b/contrib/libs/clapack/stfttp.c
@@ -0,0 +1,514 @@
+/* stfttp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int stfttp_(char *transr, char *uplo, integer *n, real *arf,
+ real *ap, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, k, n1, n2, ij, jp, js, nt, lda, ijp;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nisodd;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STFTTP copies a triangular matrix A from rectangular full packed */
+/* format (TF) to standard packed format (TP). */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': ARF is in Normal format; */
+/* = 'T': ARF is in Transpose format; */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* ARF (input) REAL array, dimension ( N*(N+1)/2 ), */
+/* On entry, the upper or lower triangular matrix A stored in */
+/* RFP format. For a further discussion see Notes below. */
+
+/* AP (output) REAL array, dimension ( N*(N+1)/2 ), */
+/* On exit, the upper or lower triangular matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Notes */
+/* ===== */
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* even. We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* the transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* the transpose of the last three columns of AP lower. */
+/* This covers the case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 03 04 05 33 43 53 */
+/* 13 14 15 00 44 54 */
+/* 23 24 25 10 11 55 */
+/* 33 34 35 20 21 22 */
+/* 00 44 45 30 31 32 */
+/* 01 11 55 40 41 42 */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* odd. We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* the transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* the transpose of the last two columns of AP lower. */
+/* This covers the case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 02 03 04 00 33 43 */
+/* 12 13 14 10 11 44 */
+/* 22 23 24 20 21 22 */
+/* 00 33 34 30 31 32 */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+/* RFP A RFP A */
+
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "T")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STFTTP", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (normaltransr) {
+ ap[0] = arf[0];
+ } else {
+ ap[0] = arf[0];
+ }
+ return 0;
+ }
+
+/* Size of array ARF(0:NT-1) */
+
+ nt = *n * (*n + 1) / 2;
+
+/* Set N1 and N2 depending on LOWER */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+/* If N is odd, set NISODD = .TRUE. */
+/* If N is even, set K = N/2 and NISODD = .FALSE. */
+
+/* set lda of ARF^C; ARF^C is (0:(N+1)/2-1,0:N-noe) */
+/* where noe = 0 if n is even, noe = 1 if n is odd */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ lda = *n + 1;
+ } else {
+ nisodd = TRUE_;
+ lda = *n;
+ }
+
+/* ARF^C has lda rows and n+1-noe cols */
+
+ if (! normaltransr) {
+ lda = (*n + 1) / 2;
+ }
+
+/* start execution: there are eight cases */
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL and N is odd ( a(0:n-1,0:n1-1) ) */
+/* T1 -> a(0,0), T2 -> a(0,1), S -> a(n1,0) */
+/* T1 -> a(0), T2 -> a(n), S -> a(n1); lda = n */
+
+ ijp = 0;
+ jp = 0;
+ i__1 = n2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ij = i__ + jp;
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ jp += lda;
+ }
+ i__1 = n2 - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = n2;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ ij = i__ + j * lda;
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL and N is odd ( a(0:n-1,0:n2-1) */
+/* T1 -> a(n1+1,0), T2 -> a(n1,0), S -> a(0,0) */
+/* T1 -> a(n2), T2 -> a(n1), S -> a(0) */
+
+ ijp = 0;
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ ij = n2 + j;
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ ap[ijp] = arf[ij];
+ ++ijp;
+ ij += lda;
+ }
+ }
+ js = 0;
+ i__1 = *n - 1;
+ for (j = n1; j <= i__1; ++j) {
+ ij = js;
+ i__2 = js + j;
+ for (ij = js; ij <= i__2; ++ij) {
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ js += lda;
+ }
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'T' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is odd */
+/* T1 -> A(0,0) , T2 -> A(1,0) , S -> A(0,n1) */
+/* T1 -> a(0+0) , T2 -> a(1+0) , S -> a(0+n1*n1); lda=n1 */
+
+ ijp = 0;
+ i__1 = n2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = *n * lda - 1;
+ i__3 = lda;
+ for (ij = i__ * (lda + 1); i__3 < 0 ? ij >= i__2 : ij <=
+ i__2; ij += i__3) {
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ }
+ js = 1;
+ i__1 = n2 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + n2 - j - 1;
+ for (ij = js; ij <= i__3; ++ij) {
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ js = js + lda + 1;
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is odd */
+/* T1 -> A(0,n1+1), T2 -> A(0,n1), S -> A(0,0) */
+/* T1 -> a(n2*n2), T2 -> a(n1*n2), S -> a(0); lda = n2 */
+
+ ijp = 0;
+ js = n2 * lda;
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + j;
+ for (ij = js; ij <= i__3; ++ij) {
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ js += lda;
+ }
+ i__1 = n1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__3 = i__ + (n1 + i__) * lda;
+ i__2 = lda;
+ for (ij = i__; i__2 < 0 ? ij >= i__3 : ij <= i__3; ij +=
+ i__2) {
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(1,0), T2 -> a(0,0), S -> a(k+1,0) */
+/* T1 -> a(1), T2 -> a(0), S -> a(k+1) */
+
+ ijp = 0;
+ jp = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ij = i__ + 1 + jp;
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ jp += lda;
+ }
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = k - 1;
+ for (j = i__; j <= i__2; ++j) {
+ ij = i__ + j * lda;
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(k+1,0) , T2 -> a(k,0), S -> a(0,0) */
+/* T1 -> a(k+1), T2 -> a(k), S -> a(0) */
+
+ ijp = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ ij = k + 1 + j;
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ ap[ijp] = arf[ij];
+ ++ijp;
+ ij += lda;
+ }
+ }
+ js = 0;
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+ ij = js;
+ i__2 = js + j;
+ for (ij = js; ij <= i__2; ++ij) {
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ js += lda;
+ }
+
+ }
+
+ } else {
+
+/* N is even and TRANSR = 'T' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,1), T2 -> B(0,0), S -> B(0,k+1) */
+/* T1 -> a(0+k), T2 -> a(0+0), S -> a(0+k*(k+1)); lda=k */
+
+ ijp = 0;
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = (*n + 1) * lda - 1;
+ i__3 = lda;
+ for (ij = i__ + (i__ + 1) * lda; i__3 < 0 ? ij >= i__2 :
+ ij <= i__2; ij += i__3) {
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ }
+ js = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + k - j - 1;
+ for (ij = js; ij <= i__3; ++ij) {
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ js = js + lda + 1;
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,k+1), T2 -> B(0,k), S -> B(0,0) */
+/* T1 -> a(0+k*(k+1)), T2 -> a(0+k*k), S -> a(0+0)); lda=k */
+
+ ijp = 0;
+ js = (k + 1) * lda;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + j;
+ for (ij = js; ij <= i__3; ++ij) {
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ js += lda;
+ }
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__3 = i__ + (k + i__) * lda;
+ i__2 = lda;
+ for (ij = i__; i__2 < 0 ? ij >= i__3 : ij <= i__3; ij +=
+ i__2) {
+ ap[ijp] = arf[ij];
+ ++ijp;
+ }
+ }
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of STFTTP */
+
+} /* stfttp_ */
diff --git a/contrib/libs/clapack/stfttr.c b/contrib/libs/clapack/stfttr.c
new file mode 100644
index 0000000000..c085b80d44
--- /dev/null
+++ b/contrib/libs/clapack/stfttr.c
@@ -0,0 +1,491 @@
+/* stfttr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int stfttr_(char *transr, char *uplo, integer *n, real *arf,
+ real *a, integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, k, l, n1, n2, ij, nt, nx2, np1x2;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nisodd;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STFTTR copies a triangular matrix A from rectangular full packed */
+/* format (TF) to standard full format (TR). */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': ARF is in Normal format; */
+/* = 'T': ARF is in Transpose format. */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrices ARF and A. N >= 0. */
+
+/* ARF (input) REAL array, dimension (N*(N+1)/2). */
+/* On entry, the upper (if UPLO = 'U') or lower (if UPLO = 'L') */
+/* matrix A in RFP format. See the "Notes" below for more */
+/* details. */
+
+/* A (output) REAL array, dimension (LDA,N) */
+/* On exit, the triangular matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of the array A contains */
+/* the upper triangular matrix, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of the array A contains */
+/* the lower triangular matrix, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Notes */
+/* ===== */
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* even. We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* the transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* the transpose of the last three columns of AP lower. */
+/* This covers the case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 03 04 05 33 43 53 */
+/* 13 14 15 00 44 54 */
+/* 23 24 25 10 11 55 */
+/* 33 34 35 20 21 22 */
+/* 00 44 45 30 31 32 */
+/* 01 11 55 40 41 42 */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* odd. We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* the transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* the transpose of the last two columns of AP lower. */
+/* This covers the case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 02 03 04 00 33 43 */
+/* 12 13 14 10 11 44 */
+/* 22 23 24 20 21 22 */
+/* 00 33 34 30 31 32 */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+/* RFP A RFP A */
+
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* Reference */
+/* ========= */
+
+/* ===================================================================== */
+
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda - 1 - 0 + 1;
+ a_offset = 0 + a_dim1 * 0;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "T")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STFTTR", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 1) {
+ if (*n == 1) {
+ a[0] = arf[0];
+ }
+ return 0;
+ }
+
+/* Size of array ARF(0:nt-1) */
+
+ nt = *n * (*n + 1) / 2;
+
+/* set N1 and N2 depending on LOWER: for N even N1=N2=K */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+/* If N is odd, set NISODD = .TRUE., LDA=N+1 and A is (N+1)--by--K2. */
+/* If N is even, set K = N/2 and NISODD = .FALSE., LDA=N and A is */
+/* N--by--(N+1)/2. */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ if (! lower) {
+ np1x2 = *n + *n + 2;
+ }
+ } else {
+ nisodd = TRUE_;
+ if (! lower) {
+ nx2 = *n + *n;
+ }
+ }
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* N is odd, TRANSR = 'N', and UPLO = 'L' */
+
+ ij = 0;
+ i__1 = n2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = n2 + j;
+ for (i__ = n1; i__ <= i__2; ++i__) {
+ a[n2 + j + i__ * a_dim1] = arf[ij];
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = arf[ij];
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* N is odd, TRANSR = 'N', and UPLO = 'U' */
+
+ ij = nt - *n;
+ i__1 = n1;
+ for (j = *n - 1; j >= i__1; --j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = arf[ij];
+ ++ij;
+ }
+ i__2 = n1 - 1;
+ for (l = j - n1; l <= i__2; ++l) {
+ a[j - n1 + l * a_dim1] = arf[ij];
+ ++ij;
+ }
+ ij -= nx2;
+ }
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'T' */
+
+ if (lower) {
+
+/* N is odd, TRANSR = 'T', and UPLO = 'L' */
+
+ ij = 0;
+ i__1 = n2 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ a[j + i__ * a_dim1] = arf[ij];
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = n1 + j; i__ <= i__2; ++i__) {
+ a[i__ + (n1 + j) * a_dim1] = arf[ij];
+ ++ij;
+ }
+ }
+ i__1 = *n - 1;
+ for (j = n2; j <= i__1; ++j) {
+ i__2 = n1 - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ a[j + i__ * a_dim1] = arf[ij];
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* N is odd, TRANSR = 'T', and UPLO = 'U' */
+
+ ij = 0;
+ i__1 = n1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = n1; i__ <= i__2; ++i__) {
+ a[j + i__ * a_dim1] = arf[ij];
+ ++ij;
+ }
+ }
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = arf[ij];
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (l = n2 + j; l <= i__2; ++l) {
+ a[n2 + j + l * a_dim1] = arf[ij];
+ ++ij;
+ }
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* N is even, TRANSR = 'N', and UPLO = 'L' */
+
+ ij = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k + j;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ a[k + j + i__ * a_dim1] = arf[ij];
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = arf[ij];
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* N is even, TRANSR = 'N', and UPLO = 'U' */
+
+ ij = nt - *n - 1;
+ i__1 = k;
+ for (j = *n - 1; j >= i__1; --j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = arf[ij];
+ ++ij;
+ }
+ i__2 = k - 1;
+ for (l = j - k; l <= i__2; ++l) {
+ a[j - k + l * a_dim1] = arf[ij];
+ ++ij;
+ }
+ ij -= np1x2;
+ }
+
+ }
+
+ } else {
+
+/* N is even and TRANSR = 'T' */
+
+ if (lower) {
+
+/* N is even, TRANSR = 'T', and UPLO = 'L' */
+
+ ij = 0;
+ j = k;
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ a[i__ + j * a_dim1] = arf[ij];
+ ++ij;
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ a[j + i__ * a_dim1] = arf[ij];
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = k + 1 + j; i__ <= i__2; ++i__) {
+ a[i__ + (k + 1 + j) * a_dim1] = arf[ij];
+ ++ij;
+ }
+ }
+ i__1 = *n - 1;
+ for (j = k - 1; j <= i__1; ++j) {
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ a[j + i__ * a_dim1] = arf[ij];
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* N is even, TRANSR = 'T', and UPLO = 'U' */
+
+ ij = 0;
+ i__1 = k;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ a[j + i__ * a_dim1] = arf[ij];
+ ++ij;
+ }
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = arf[ij];
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (l = k + 1 + j; l <= i__2; ++l) {
+ a[k + 1 + j + l * a_dim1] = arf[ij];
+ ++ij;
+ }
+ }
+/* Note that here, on exit of the loop, J = K-1 */
+ i__1 = j;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ a[i__ + j * a_dim1] = arf[ij];
+ ++ij;
+ }
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of STFTTR */
+
+} /* stfttr_ */
diff --git a/contrib/libs/clapack/stgevc.c b/contrib/libs/clapack/stgevc.c
new file mode 100644
index 0000000000..aa7e3f4784
--- /dev/null
+++ b/contrib/libs/clapack/stgevc.c
@@ -0,0 +1,1415 @@
+/* stgevc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static logical c_true = TRUE_;
+static integer c__2 = 2;
+static real c_b34 = 1.f;
+static integer c__1 = 1;
+static real c_b36 = 0.f;
+static logical c_false = FALSE_;
+
+/* Subroutine */ int stgevc_(char *side, char *howmny, logical *select,
+ integer *n, real *s, integer *lds, real *p, integer *ldp, real *vl,
+ integer *ldvl, real *vr, integer *ldvr, integer *mm, integer *m, real
+ *work, integer *info)
+{
+ /* System generated locals */
+ integer p_dim1, p_offset, s_dim1, s_offset, vl_dim1, vl_offset, vr_dim1,
+ vr_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4, r__5, r__6;
+
+ /* Local variables */
+ integer i__, j, ja, jc, je, na, im, jr, jw, nw;
+ real big;
+ logical lsa, lsb;
+ real ulp, sum[4] /* was [2][2] */;
+ integer ibeg, ieig, iend;
+ real dmin__, temp, xmax, sump[4] /* was [2][2] */, sums[4] /*
+ was [2][2] */, cim2a, cim2b, cre2a, cre2b;
+ extern /* Subroutine */ int slag2_(real *, integer *, real *, integer *,
+ real *, real *, real *, real *, real *, real *);
+ real temp2, bdiag[2], acoef, scale;
+ logical ilall;
+ integer iside;
+ real sbeta;
+ extern logical lsame_(char *, char *);
+ logical il2by2;
+ integer iinfo;
+ real small;
+ logical compl;
+ real anorm, bnorm;
+ logical compr;
+ extern /* Subroutine */ int sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *), slaln2_(logical *, integer *, integer *, real *, real *,
+ real *, integer *, real *, real *, real *, integer *, real *,
+ real *, real *, integer *, real *, real *, integer *);
+ real temp2i, temp2r;
+ logical ilabad, ilbbad;
+ real acoefa, bcoefa, cimaga, cimagb;
+ logical ilback;
+ extern /* Subroutine */ int slabad_(real *, real *);
+ real bcoefi, ascale, bscale, creala, crealb, bcoefr;
+ extern doublereal slamch_(char *);
+ real salfar, safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real xscale, bignum;
+ logical ilcomp, ilcplx;
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *);
+ integer ihwmny;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+
+/* Purpose */
+/* ======= */
+
+/* STGEVC computes some or all of the right and/or left eigenvectors of */
+/* a pair of real matrices (S,P), where S is a quasi-triangular matrix */
+/* and P is upper triangular. Matrix pairs of this type are produced by */
+/* the generalized Schur factorization of a matrix pair (A,B): */
+
+/* A = Q*S*Z**T, B = Q*P*Z**T */
+
+/* as computed by SGGHRD + SHGEQZ. */
+
+/* The right eigenvector x and the left eigenvector y of (S,P) */
+/* corresponding to an eigenvalue w are defined by: */
+
+/* S*x = w*P*x, (y**H)*S = w*(y**H)*P, */
+
+/* where y**H denotes the conjugate tranpose of y. */
+/* The eigenvalues are not input to this routine, but are computed */
+/* directly from the diagonal blocks of S and P. */
+
+/* This routine returns the matrices X and/or Y of right and left */
+/* eigenvectors of (S,P), or the products Z*X and/or Q*Y, */
+/* where Z and Q are input matrices. */
+/* If Q and Z are the orthogonal factors from the generalized Schur */
+/* factorization of a matrix pair (A,B), then Z*X and Q*Y */
+/* are the matrices of right and left eigenvectors of (A,B). */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'R': compute right eigenvectors only; */
+/* = 'L': compute left eigenvectors only; */
+/* = 'B': compute both right and left eigenvectors. */
+
+/* HOWMNY (input) CHARACTER*1 */
+/* = 'A': compute all right and/or left eigenvectors; */
+/* = 'B': compute all right and/or left eigenvectors, */
+/* backtransformed by the matrices in VR and/or VL; */
+/* = 'S': compute selected right and/or left eigenvectors, */
+/* specified by the logical array SELECT. */
+
+/* SELECT (input) LOGICAL array, dimension (N) */
+/* If HOWMNY='S', SELECT specifies the eigenvectors to be */
+/* computed. If w(j) is a real eigenvalue, the corresponding */
+/* real eigenvector is computed if SELECT(j) is .TRUE.. */
+/* If w(j) and w(j+1) are the real and imaginary parts of a */
+/* complex eigenvalue, the corresponding complex eigenvector */
+/* is computed if either SELECT(j) or SELECT(j+1) is .TRUE., */
+/* and on exit SELECT(j) is set to .TRUE. and SELECT(j+1) is */
+/* set to .FALSE.. */
+/* Not referenced if HOWMNY = 'A' or 'B'. */
+
+/* N (input) INTEGER */
+/* The order of the matrices S and P. N >= 0. */
+
+/* S (input) REAL array, dimension (LDS,N) */
+/* The upper quasi-triangular matrix S from a generalized Schur */
+/* factorization, as computed by SHGEQZ. */
+
+/* LDS (input) INTEGER */
+/* The leading dimension of array S. LDS >= max(1,N). */
+
+/* P (input) REAL array, dimension (LDP,N) */
+/* The upper triangular matrix P from a generalized Schur */
+/* factorization, as computed by SHGEQZ. */
+/* 2-by-2 diagonal blocks of P corresponding to 2-by-2 blocks */
+/* of S must be in positive diagonal form. */
+
+/* LDP (input) INTEGER */
+/* The leading dimension of array P. LDP >= max(1,N). */
+
+/* VL (input/output) REAL array, dimension (LDVL,MM) */
+/* On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must */
+/* contain an N-by-N matrix Q (usually the orthogonal matrix Q */
+/* of left Schur vectors returned by SHGEQZ). */
+/* On exit, if SIDE = 'L' or 'B', VL contains: */
+/* if HOWMNY = 'A', the matrix Y of left eigenvectors of (S,P); */
+/* if HOWMNY = 'B', the matrix Q*Y; */
+/* if HOWMNY = 'S', the left eigenvectors of (S,P) specified by */
+/* SELECT, stored consecutively in the columns of */
+/* VL, in the same order as their eigenvalues. */
+
+/* A complex eigenvector corresponding to a complex eigenvalue */
+/* is stored in two consecutive columns, the first holding the */
+/* real part, and the second the imaginary part. */
+
+/* Not referenced if SIDE = 'R'. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of array VL. LDVL >= 1, and if */
+/* SIDE = 'L' or 'B', LDVL >= N. */
+
+/* VR (input/output) REAL array, dimension (LDVR,MM) */
+/* On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must */
+/* contain an N-by-N matrix Z (usually the orthogonal matrix Z */
+/* of right Schur vectors returned by SHGEQZ). */
+
+/* On exit, if SIDE = 'R' or 'B', VR contains: */
+/* if HOWMNY = 'A', the matrix X of right eigenvectors of (S,P); */
+/* if HOWMNY = 'B' or 'b', the matrix Z*X; */
+/* if HOWMNY = 'S' or 's', the right eigenvectors of (S,P) */
+/* specified by SELECT, stored consecutively in the */
+/* columns of VR, in the same order as their */
+/* eigenvalues. */
+
+/* A complex eigenvector corresponding to a complex eigenvalue */
+/* is stored in two consecutive columns, the first holding the */
+/* real part and the second the imaginary part. */
+
+/* Not referenced if SIDE = 'L'. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. LDVR >= 1, and if */
+/* SIDE = 'R' or 'B', LDVR >= N. */
+
+/* MM (input) INTEGER */
+/* The number of columns in the arrays VL and/or VR. MM >= M. */
+
+/* M (output) INTEGER */
+/* The number of columns in the arrays VL and/or VR actually */
+/* used to store the eigenvectors. If HOWMNY = 'A' or 'B', M */
+/* is set to N. Each selected real eigenvector occupies one */
+/* column and each selected complex eigenvector occupies two */
+/* columns. */
+
+/* WORK (workspace) REAL array, dimension (6*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: the 2-by-2 block (INFO:INFO+1) does not have a complex */
+/* eigenvalue. */
+
+/* Further Details */
+/* =============== */
+
+/* Allocation of workspace: */
+/* ---------- -- --------- */
+
+/* WORK( j ) = 1-norm of j-th column of A, above the diagonal */
+/* WORK( N+j ) = 1-norm of j-th column of B, above the diagonal */
+/* WORK( 2*N+1:3*N ) = real part of eigenvector */
+/* WORK( 3*N+1:4*N ) = imaginary part of eigenvector */
+/* WORK( 4*N+1:5*N ) = real part of back-transformed eigenvector */
+/* WORK( 5*N+1:6*N ) = imaginary part of back-transformed eigenvector */
+
+/* Rowwise vs. columnwise solution methods: */
+/* ------- -- ---------- -------- ------- */
+
+/* Finding a generalized eigenvector consists basically of solving the */
+/* singular triangular system */
+
+/* (A - w B) x = 0 (for right) or: (A - w B)**H y = 0 (for left) */
+
+/* Consider finding the i-th right eigenvector (assume all eigenvalues */
+/* are real). The equation to be solved is: */
+/* n i */
+/* 0 = sum C(j,k) v(k) = sum C(j,k) v(k) for j = i,. . .,1 */
+/* k=j k=j */
+
+/* where C = (A - w B) (The components v(i+1:n) are 0.) */
+
+/* The "rowwise" method is: */
+
+/* (1) v(i) := 1 */
+/* for j = i-1,. . .,1: */
+/* i */
+/* (2) compute s = - sum C(j,k) v(k) and */
+/* k=j+1 */
+
+/* (3) v(j) := s / C(j,j) */
+
+/* Step 2 is sometimes called the "dot product" step, since it is an */
+/* inner product between the j-th row and the portion of the eigenvector */
+/* that has been computed so far. */
+
+/* The "columnwise" method consists basically in doing the sums */
+/* for all the rows in parallel. As each v(j) is computed, the */
+/* contribution of v(j) times the j-th column of C is added to the */
+/* partial sums. Since FORTRAN arrays are stored columnwise, this has */
+/* the advantage that at each step, the elements of C that are accessed */
+/* are adjacent to one another, whereas with the rowwise method, the */
+/* elements accessed at a step are spaced LDS (and LDP) words apart. */
+
+/* When finding left eigenvectors, the matrix in question is the */
+/* transpose of the one in storage, so the rowwise method then */
+/* actually accesses columns of A and B at each step, and so is the */
+/* preferred method. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and Test the input parameters */
+
+ /* Parameter adjustments */
+ --select;
+ s_dim1 = *lds;
+ s_offset = 1 + s_dim1;
+ s -= s_offset;
+ p_dim1 = *ldp;
+ p_offset = 1 + p_dim1;
+ p -= p_offset;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+
+ /* Function Body */
+ if (lsame_(howmny, "A")) {
+ ihwmny = 1;
+ ilall = TRUE_;
+ ilback = FALSE_;
+ } else if (lsame_(howmny, "S")) {
+ ihwmny = 2;
+ ilall = FALSE_;
+ ilback = FALSE_;
+ } else if (lsame_(howmny, "B")) {
+ ihwmny = 3;
+ ilall = TRUE_;
+ ilback = TRUE_;
+ } else {
+ ihwmny = -1;
+ ilall = TRUE_;
+ }
+
+ if (lsame_(side, "R")) {
+ iside = 1;
+ compl = FALSE_;
+ compr = TRUE_;
+ } else if (lsame_(side, "L")) {
+ iside = 2;
+ compl = TRUE_;
+ compr = FALSE_;
+ } else if (lsame_(side, "B")) {
+ iside = 3;
+ compl = TRUE_;
+ compr = TRUE_;
+ } else {
+ iside = -1;
+ }
+
+ *info = 0;
+ if (iside < 0) {
+ *info = -1;
+ } else if (ihwmny < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lds < max(1,*n)) {
+ *info = -6;
+ } else if (*ldp < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STGEVC", &i__1);
+ return 0;
+ }
+
+/* Count the number of eigenvectors to be computed */
+
+ if (! ilall) {
+ im = 0;
+ ilcplx = FALSE_;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (ilcplx) {
+ ilcplx = FALSE_;
+ goto L10;
+ }
+ if (j < *n) {
+ if (s[j + 1 + j * s_dim1] != 0.f) {
+ ilcplx = TRUE_;
+ }
+ }
+ if (ilcplx) {
+ if (select[j] || select[j + 1]) {
+ im += 2;
+ }
+ } else {
+ if (select[j]) {
+ ++im;
+ }
+ }
+L10:
+ ;
+ }
+ } else {
+ im = *n;
+ }
+
+/* Check 2-by-2 diagonal blocks of A, B */
+
+ ilabad = FALSE_;
+ ilbbad = FALSE_;
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ if (s[j + 1 + j * s_dim1] != 0.f) {
+ if (p[j + j * p_dim1] == 0.f || p[j + 1 + (j + 1) * p_dim1] ==
+ 0.f || p[j + (j + 1) * p_dim1] != 0.f) {
+ ilbbad = TRUE_;
+ }
+ if (j < *n - 1) {
+ if (s[j + 2 + (j + 1) * s_dim1] != 0.f) {
+ ilabad = TRUE_;
+ }
+ }
+ }
+/* L20: */
+ }
+
+ if (ilabad) {
+ *info = -5;
+ } else if (ilbbad) {
+ *info = -7;
+ } else if (compl && *ldvl < *n || *ldvl < 1) {
+ *info = -10;
+ } else if (compr && *ldvr < *n || *ldvr < 1) {
+ *info = -12;
+ } else if (*mm < im) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STGEVC", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = im;
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Machine Constants */
+
+ safmin = slamch_("Safe minimum");
+ big = 1.f / safmin;
+ slabad_(&safmin, &big);
+ ulp = slamch_("Epsilon") * slamch_("Base");
+ small = safmin * *n / ulp;
+ big = 1.f / small;
+ bignum = 1.f / (safmin * *n);
+
+/* Compute the 1-norm of each column of the strictly upper triangular */
+/* part (i.e., excluding all elements belonging to the diagonal */
+/* blocks) of A and B to check for possible overflow in the */
+/* triangular solver. */
+
+ anorm = (r__1 = s[s_dim1 + 1], dabs(r__1));
+ if (*n > 1) {
+ anorm += (r__1 = s[s_dim1 + 2], dabs(r__1));
+ }
+ bnorm = (r__1 = p[p_dim1 + 1], dabs(r__1));
+ work[1] = 0.f;
+ work[*n + 1] = 0.f;
+
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ temp = 0.f;
+ temp2 = 0.f;
+ if (s[j + (j - 1) * s_dim1] == 0.f) {
+ iend = j - 1;
+ } else {
+ iend = j - 2;
+ }
+ i__2 = iend;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp += (r__1 = s[i__ + j * s_dim1], dabs(r__1));
+ temp2 += (r__1 = p[i__ + j * p_dim1], dabs(r__1));
+/* L30: */
+ }
+ work[j] = temp;
+ work[*n + j] = temp2;
+/* Computing MIN */
+ i__3 = j + 1;
+ i__2 = min(i__3,*n);
+ for (i__ = iend + 1; i__ <= i__2; ++i__) {
+ temp += (r__1 = s[i__ + j * s_dim1], dabs(r__1));
+ temp2 += (r__1 = p[i__ + j * p_dim1], dabs(r__1));
+/* L40: */
+ }
+ anorm = dmax(anorm,temp);
+ bnorm = dmax(bnorm,temp2);
+/* L50: */
+ }
+
+ ascale = 1.f / dmax(anorm,safmin);
+ bscale = 1.f / dmax(bnorm,safmin);
+
+/* Left eigenvectors */
+
+ if (compl) {
+ ieig = 0;
+
+/* Main loop over eigenvalues */
+
+ ilcplx = FALSE_;
+ i__1 = *n;
+ for (je = 1; je <= i__1; ++je) {
+
+/* Skip this iteration if (a) HOWMNY='S' and SELECT=.FALSE., or */
+/* (b) this would be the second of a complex pair. */
+/* Check for complex eigenvalue, so as to be sure of which */
+/* entry(-ies) of SELECT to look at. */
+
+ if (ilcplx) {
+ ilcplx = FALSE_;
+ goto L220;
+ }
+ nw = 1;
+ if (je < *n) {
+ if (s[je + 1 + je * s_dim1] != 0.f) {
+ ilcplx = TRUE_;
+ nw = 2;
+ }
+ }
+ if (ilall) {
+ ilcomp = TRUE_;
+ } else if (ilcplx) {
+ ilcomp = select[je] || select[je + 1];
+ } else {
+ ilcomp = select[je];
+ }
+ if (! ilcomp) {
+ goto L220;
+ }
+
+/* Decide if (a) singular pencil, (b) real eigenvalue, or */
+/* (c) complex eigenvalue. */
+
+ if (! ilcplx) {
+ if ((r__1 = s[je + je * s_dim1], dabs(r__1)) <= safmin && (
+ r__2 = p[je + je * p_dim1], dabs(r__2)) <= safmin) {
+
+/* Singular matrix pencil -- return unit eigenvector */
+
+ ++ieig;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vl[jr + ieig * vl_dim1] = 0.f;
+/* L60: */
+ }
+ vl[ieig + ieig * vl_dim1] = 1.f;
+ goto L220;
+ }
+ }
+
+/* Clear vector */
+
+ i__2 = nw * *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ work[(*n << 1) + jr] = 0.f;
+/* L70: */
+ }
+/* T */
+/* Compute coefficients in ( a A - b B ) y = 0 */
+/* a is ACOEF */
+/* b is BCOEFR + i*BCOEFI */
+
+ if (! ilcplx) {
+
+/* Real eigenvalue */
+
+/* Computing MAX */
+ r__3 = (r__1 = s[je + je * s_dim1], dabs(r__1)) * ascale,
+ r__4 = (r__2 = p[je + je * p_dim1], dabs(r__2)) *
+ bscale, r__3 = max(r__3,r__4);
+ temp = 1.f / dmax(r__3,safmin);
+ salfar = temp * s[je + je * s_dim1] * ascale;
+ sbeta = temp * p[je + je * p_dim1] * bscale;
+ acoef = sbeta * ascale;
+ bcoefr = salfar * bscale;
+ bcoefi = 0.f;
+
+/* Scale to avoid underflow */
+
+ scale = 1.f;
+ lsa = dabs(sbeta) >= safmin && dabs(acoef) < small;
+ lsb = dabs(salfar) >= safmin && dabs(bcoefr) < small;
+ if (lsa) {
+ scale = small / dabs(sbeta) * dmin(anorm,big);
+ }
+ if (lsb) {
+/* Computing MAX */
+ r__1 = scale, r__2 = small / dabs(salfar) * dmin(bnorm,
+ big);
+ scale = dmax(r__1,r__2);
+ }
+ if (lsa || lsb) {
+/* Computing MIN */
+/* Computing MAX */
+ r__3 = 1.f, r__4 = dabs(acoef), r__3 = max(r__3,r__4),
+ r__4 = dabs(bcoefr);
+ r__1 = scale, r__2 = 1.f / (safmin * dmax(r__3,r__4));
+ scale = dmin(r__1,r__2);
+ if (lsa) {
+ acoef = ascale * (scale * sbeta);
+ } else {
+ acoef = scale * acoef;
+ }
+ if (lsb) {
+ bcoefr = bscale * (scale * salfar);
+ } else {
+ bcoefr = scale * bcoefr;
+ }
+ }
+ acoefa = dabs(acoef);
+ bcoefa = dabs(bcoefr);
+
+/* First component is 1 */
+
+ work[(*n << 1) + je] = 1.f;
+ xmax = 1.f;
+ } else {
+
+/* Complex eigenvalue */
+
+ r__1 = safmin * 100.f;
+ slag2_(&s[je + je * s_dim1], lds, &p[je + je * p_dim1], ldp, &
+ r__1, &acoef, &temp, &bcoefr, &temp2, &bcoefi);
+ bcoefi = -bcoefi;
+ if (bcoefi == 0.f) {
+ *info = je;
+ return 0;
+ }
+
+/* Scale to avoid over/underflow */
+
+ acoefa = dabs(acoef);
+ bcoefa = dabs(bcoefr) + dabs(bcoefi);
+ scale = 1.f;
+ if (acoefa * ulp < safmin && acoefa >= safmin) {
+ scale = safmin / ulp / acoefa;
+ }
+ if (bcoefa * ulp < safmin && bcoefa >= safmin) {
+/* Computing MAX */
+ r__1 = scale, r__2 = safmin / ulp / bcoefa;
+ scale = dmax(r__1,r__2);
+ }
+ if (safmin * acoefa > ascale) {
+ scale = ascale / (safmin * acoefa);
+ }
+ if (safmin * bcoefa > bscale) {
+/* Computing MIN */
+ r__1 = scale, r__2 = bscale / (safmin * bcoefa);
+ scale = dmin(r__1,r__2);
+ }
+ if (scale != 1.f) {
+ acoef = scale * acoef;
+ acoefa = dabs(acoef);
+ bcoefr = scale * bcoefr;
+ bcoefi = scale * bcoefi;
+ bcoefa = dabs(bcoefr) + dabs(bcoefi);
+ }
+
+/* Compute first two components of eigenvector */
+
+ temp = acoef * s[je + 1 + je * s_dim1];
+ temp2r = acoef * s[je + je * s_dim1] - bcoefr * p[je + je *
+ p_dim1];
+ temp2i = -bcoefi * p[je + je * p_dim1];
+ if (dabs(temp) > dabs(temp2r) + dabs(temp2i)) {
+ work[(*n << 1) + je] = 1.f;
+ work[*n * 3 + je] = 0.f;
+ work[(*n << 1) + je + 1] = -temp2r / temp;
+ work[*n * 3 + je + 1] = -temp2i / temp;
+ } else {
+ work[(*n << 1) + je + 1] = 1.f;
+ work[*n * 3 + je + 1] = 0.f;
+ temp = acoef * s[je + (je + 1) * s_dim1];
+ work[(*n << 1) + je] = (bcoefr * p[je + 1 + (je + 1) *
+ p_dim1] - acoef * s[je + 1 + (je + 1) * s_dim1]) /
+ temp;
+ work[*n * 3 + je] = bcoefi * p[je + 1 + (je + 1) * p_dim1]
+ / temp;
+ }
+/* Computing MAX */
+ r__5 = (r__1 = work[(*n << 1) + je], dabs(r__1)) + (r__2 =
+ work[*n * 3 + je], dabs(r__2)), r__6 = (r__3 = work[(*
+ n << 1) + je + 1], dabs(r__3)) + (r__4 = work[*n * 3
+ + je + 1], dabs(r__4));
+ xmax = dmax(r__5,r__6);
+ }
+
+/* Computing MAX */
+ r__1 = ulp * acoefa * anorm, r__2 = ulp * bcoefa * bnorm, r__1 =
+ max(r__1,r__2);
+ dmin__ = dmax(r__1,safmin);
+
+/* T */
+/* Triangular solve of (a A - b B) y = 0 */
+
+/* T */
+/* (rowwise in (a A - b B) , or columnwise in (a A - b B) ) */
+
+ il2by2 = FALSE_;
+
+ i__2 = *n;
+ for (j = je + nw; j <= i__2; ++j) {
+ if (il2by2) {
+ il2by2 = FALSE_;
+ goto L160;
+ }
+
+ na = 1;
+ bdiag[0] = p[j + j * p_dim1];
+ if (j < *n) {
+ if (s[j + 1 + j * s_dim1] != 0.f) {
+ il2by2 = TRUE_;
+ bdiag[1] = p[j + 1 + (j + 1) * p_dim1];
+ na = 2;
+ }
+ }
+
+/* Check whether scaling is necessary for dot products */
+
+ xscale = 1.f / dmax(1.f,xmax);
+/* Computing MAX */
+ r__1 = work[j], r__2 = work[*n + j], r__1 = max(r__1,r__2),
+ r__2 = acoefa * work[j] + bcoefa * work[*n + j];
+ temp = dmax(r__1,r__2);
+ if (il2by2) {
+/* Computing MAX */
+ r__1 = temp, r__2 = work[j + 1], r__1 = max(r__1,r__2),
+ r__2 = work[*n + j + 1], r__1 = max(r__1,r__2),
+ r__2 = acoefa * work[j + 1] + bcoefa * work[*n +
+ j + 1];
+ temp = dmax(r__1,r__2);
+ }
+ if (temp > bignum * xscale) {
+ i__3 = nw - 1;
+ for (jw = 0; jw <= i__3; ++jw) {
+ i__4 = j - 1;
+ for (jr = je; jr <= i__4; ++jr) {
+ work[(jw + 2) * *n + jr] = xscale * work[(jw + 2)
+ * *n + jr];
+/* L80: */
+ }
+/* L90: */
+ }
+ xmax *= xscale;
+ }
+
+/* Compute dot products */
+
+/* j-1 */
+/* SUM = sum conjg( a*S(k,j) - b*P(k,j) )*x(k) */
+/* k=je */
+
+/* To reduce the op count, this is done as */
+
+/* _ j-1 _ j-1 */
+/* a*conjg( sum S(k,j)*x(k) ) - b*conjg( sum P(k,j)*x(k) ) */
+/* k=je k=je */
+
+/* which may cause underflow problems if A or B are close */
+/* to underflow. (E.g., less than SMALL.) */
+
+
+/* A series of compiler directives to defeat vectorization */
+/* for the next loop */
+
+/* $PL$ CMCHAR=' ' */
+/* DIR$ NEXTSCALAR */
+/* $DIR SCALAR */
+/* DIR$ NEXT SCALAR */
+/* VD$L NOVECTOR */
+/* DEC$ NOVECTOR */
+/* VD$ NOVECTOR */
+/* VDIR NOVECTOR */
+/* VOCL LOOP,SCALAR */
+/* IBM PREFER SCALAR */
+/* $PL$ CMCHAR='*' */
+
+ i__3 = nw;
+ for (jw = 1; jw <= i__3; ++jw) {
+
+/* $PL$ CMCHAR=' ' */
+/* DIR$ NEXTSCALAR */
+/* $DIR SCALAR */
+/* DIR$ NEXT SCALAR */
+/* VD$L NOVECTOR */
+/* DEC$ NOVECTOR */
+/* VD$ NOVECTOR */
+/* VDIR NOVECTOR */
+/* VOCL LOOP,SCALAR */
+/* IBM PREFER SCALAR */
+/* $PL$ CMCHAR='*' */
+
+ i__4 = na;
+ for (ja = 1; ja <= i__4; ++ja) {
+ sums[ja + (jw << 1) - 3] = 0.f;
+ sump[ja + (jw << 1) - 3] = 0.f;
+
+ i__5 = j - 1;
+ for (jr = je; jr <= i__5; ++jr) {
+ sums[ja + (jw << 1) - 3] += s[jr + (j + ja - 1) *
+ s_dim1] * work[(jw + 1) * *n + jr];
+ sump[ja + (jw << 1) - 3] += p[jr + (j + ja - 1) *
+ p_dim1] * work[(jw + 1) * *n + jr];
+/* L100: */
+ }
+/* L110: */
+ }
+/* L120: */
+ }
+
+/* $PL$ CMCHAR=' ' */
+/* DIR$ NEXTSCALAR */
+/* $DIR SCALAR */
+/* DIR$ NEXT SCALAR */
+/* VD$L NOVECTOR */
+/* DEC$ NOVECTOR */
+/* VD$ NOVECTOR */
+/* VDIR NOVECTOR */
+/* VOCL LOOP,SCALAR */
+/* IBM PREFER SCALAR */
+/* $PL$ CMCHAR='*' */
+
+ i__3 = na;
+ for (ja = 1; ja <= i__3; ++ja) {
+ if (ilcplx) {
+ sum[ja - 1] = -acoef * sums[ja - 1] + bcoefr * sump[
+ ja - 1] - bcoefi * sump[ja + 1];
+ sum[ja + 1] = -acoef * sums[ja + 1] + bcoefr * sump[
+ ja + 1] + bcoefi * sump[ja - 1];
+ } else {
+ sum[ja - 1] = -acoef * sums[ja - 1] + bcoefr * sump[
+ ja - 1];
+ }
+/* L130: */
+ }
+
+/* T */
+/* Solve ( a A - b B ) y = SUM(,) */
+/* with scaling and perturbation of the denominator */
+
+ slaln2_(&c_true, &na, &nw, &dmin__, &acoef, &s[j + j * s_dim1]
+, lds, bdiag, &bdiag[1], sum, &c__2, &bcoefr, &bcoefi,
+ &work[(*n << 1) + j], n, &scale, &temp, &iinfo);
+ if (scale < 1.f) {
+ i__3 = nw - 1;
+ for (jw = 0; jw <= i__3; ++jw) {
+ i__4 = j - 1;
+ for (jr = je; jr <= i__4; ++jr) {
+ work[(jw + 2) * *n + jr] = scale * work[(jw + 2) *
+ *n + jr];
+/* L140: */
+ }
+/* L150: */
+ }
+ xmax = scale * xmax;
+ }
+ xmax = dmax(xmax,temp);
+L160:
+ ;
+ }
+
+/* Copy eigenvector to VL, back transforming if */
+/* HOWMNY='B'. */
+
+ ++ieig;
+ if (ilback) {
+ i__2 = nw - 1;
+ for (jw = 0; jw <= i__2; ++jw) {
+ i__3 = *n + 1 - je;
+ sgemv_("N", n, &i__3, &c_b34, &vl[je * vl_dim1 + 1], ldvl,
+ &work[(jw + 2) * *n + je], &c__1, &c_b36, &work[(
+ jw + 4) * *n + 1], &c__1);
+/* L170: */
+ }
+ slacpy_(" ", n, &nw, &work[(*n << 2) + 1], n, &vl[je *
+ vl_dim1 + 1], ldvl);
+ ibeg = 1;
+ } else {
+ slacpy_(" ", n, &nw, &work[(*n << 1) + 1], n, &vl[ieig *
+ vl_dim1 + 1], ldvl);
+ ibeg = je;
+ }
+
+/* Scale eigenvector */
+
+ xmax = 0.f;
+ if (ilcplx) {
+ i__2 = *n;
+ for (j = ibeg; j <= i__2; ++j) {
+/* Computing MAX */
+ r__3 = xmax, r__4 = (r__1 = vl[j + ieig * vl_dim1], dabs(
+ r__1)) + (r__2 = vl[j + (ieig + 1) * vl_dim1],
+ dabs(r__2));
+ xmax = dmax(r__3,r__4);
+/* L180: */
+ }
+ } else {
+ i__2 = *n;
+ for (j = ibeg; j <= i__2; ++j) {
+/* Computing MAX */
+ r__2 = xmax, r__3 = (r__1 = vl[j + ieig * vl_dim1], dabs(
+ r__1));
+ xmax = dmax(r__2,r__3);
+/* L190: */
+ }
+ }
+
+ if (xmax > safmin) {
+ xscale = 1.f / xmax;
+
+ i__2 = nw - 1;
+ for (jw = 0; jw <= i__2; ++jw) {
+ i__3 = *n;
+ for (jr = ibeg; jr <= i__3; ++jr) {
+ vl[jr + (ieig + jw) * vl_dim1] = xscale * vl[jr + (
+ ieig + jw) * vl_dim1];
+/* L200: */
+ }
+/* L210: */
+ }
+ }
+ ieig = ieig + nw - 1;
+
+L220:
+ ;
+ }
+ }
+
+/* Right eigenvectors */
+
+ if (compr) {
+ ieig = im + 1;
+
+/* Main loop over eigenvalues */
+
+ ilcplx = FALSE_;
+ for (je = *n; je >= 1; --je) {
+
+/* Skip this iteration if (a) HOWMNY='S' and SELECT=.FALSE., or */
+/* (b) this would be the second of a complex pair. */
+/* Check for complex eigenvalue, so as to be sure of which */
+/* entry(-ies) of SELECT to look at -- if complex, SELECT(JE) */
+/* or SELECT(JE-1). */
+/* If this is a complex pair, the 2-by-2 diagonal block */
+/* corresponding to the eigenvalue is in rows/columns JE-1:JE */
+
+ if (ilcplx) {
+ ilcplx = FALSE_;
+ goto L500;
+ }
+ nw = 1;
+ if (je > 1) {
+ if (s[je + (je - 1) * s_dim1] != 0.f) {
+ ilcplx = TRUE_;
+ nw = 2;
+ }
+ }
+ if (ilall) {
+ ilcomp = TRUE_;
+ } else if (ilcplx) {
+ ilcomp = select[je] || select[je - 1];
+ } else {
+ ilcomp = select[je];
+ }
+ if (! ilcomp) {
+ goto L500;
+ }
+
+/* Decide if (a) singular pencil, (b) real eigenvalue, or */
+/* (c) complex eigenvalue. */
+
+ if (! ilcplx) {
+ if ((r__1 = s[je + je * s_dim1], dabs(r__1)) <= safmin && (
+ r__2 = p[je + je * p_dim1], dabs(r__2)) <= safmin) {
+
+/* Singular matrix pencil -- unit eigenvector */
+
+ --ieig;
+ i__1 = *n;
+ for (jr = 1; jr <= i__1; ++jr) {
+ vr[jr + ieig * vr_dim1] = 0.f;
+/* L230: */
+ }
+ vr[ieig + ieig * vr_dim1] = 1.f;
+ goto L500;
+ }
+ }
+
+/* Clear vector */
+
+ i__1 = nw - 1;
+ for (jw = 0; jw <= i__1; ++jw) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ work[(jw + 2) * *n + jr] = 0.f;
+/* L240: */
+ }
+/* L250: */
+ }
+
+/* Compute coefficients in ( a A - b B ) x = 0 */
+/* a is ACOEF */
+/* b is BCOEFR + i*BCOEFI */
+
+ if (! ilcplx) {
+
+/* Real eigenvalue */
+
+/* Computing MAX */
+ r__3 = (r__1 = s[je + je * s_dim1], dabs(r__1)) * ascale,
+ r__4 = (r__2 = p[je + je * p_dim1], dabs(r__2)) *
+ bscale, r__3 = max(r__3,r__4);
+ temp = 1.f / dmax(r__3,safmin);
+ salfar = temp * s[je + je * s_dim1] * ascale;
+ sbeta = temp * p[je + je * p_dim1] * bscale;
+ acoef = sbeta * ascale;
+ bcoefr = salfar * bscale;
+ bcoefi = 0.f;
+
+/* Scale to avoid underflow */
+
+ scale = 1.f;
+ lsa = dabs(sbeta) >= safmin && dabs(acoef) < small;
+ lsb = dabs(salfar) >= safmin && dabs(bcoefr) < small;
+ if (lsa) {
+ scale = small / dabs(sbeta) * dmin(anorm,big);
+ }
+ if (lsb) {
+/* Computing MAX */
+ r__1 = scale, r__2 = small / dabs(salfar) * dmin(bnorm,
+ big);
+ scale = dmax(r__1,r__2);
+ }
+ if (lsa || lsb) {
+/* Computing MIN */
+/* Computing MAX */
+ r__3 = 1.f, r__4 = dabs(acoef), r__3 = max(r__3,r__4),
+ r__4 = dabs(bcoefr);
+ r__1 = scale, r__2 = 1.f / (safmin * dmax(r__3,r__4));
+ scale = dmin(r__1,r__2);
+ if (lsa) {
+ acoef = ascale * (scale * sbeta);
+ } else {
+ acoef = scale * acoef;
+ }
+ if (lsb) {
+ bcoefr = bscale * (scale * salfar);
+ } else {
+ bcoefr = scale * bcoefr;
+ }
+ }
+ acoefa = dabs(acoef);
+ bcoefa = dabs(bcoefr);
+
+/* First component is 1 */
+
+ work[(*n << 1) + je] = 1.f;
+ xmax = 1.f;
+
+/* Compute contribution from column JE of A and B to sum */
+/* (See "Further Details", above.) */
+
+ i__1 = je - 1;
+ for (jr = 1; jr <= i__1; ++jr) {
+ work[(*n << 1) + jr] = bcoefr * p[jr + je * p_dim1] -
+ acoef * s[jr + je * s_dim1];
+/* L260: */
+ }
+ } else {
+
+/* Complex eigenvalue */
+
+ r__1 = safmin * 100.f;
+ slag2_(&s[je - 1 + (je - 1) * s_dim1], lds, &p[je - 1 + (je -
+ 1) * p_dim1], ldp, &r__1, &acoef, &temp, &bcoefr, &
+ temp2, &bcoefi);
+ if (bcoefi == 0.f) {
+ *info = je - 1;
+ return 0;
+ }
+
+/* Scale to avoid over/underflow */
+
+ acoefa = dabs(acoef);
+ bcoefa = dabs(bcoefr) + dabs(bcoefi);
+ scale = 1.f;
+ if (acoefa * ulp < safmin && acoefa >= safmin) {
+ scale = safmin / ulp / acoefa;
+ }
+ if (bcoefa * ulp < safmin && bcoefa >= safmin) {
+/* Computing MAX */
+ r__1 = scale, r__2 = safmin / ulp / bcoefa;
+ scale = dmax(r__1,r__2);
+ }
+ if (safmin * acoefa > ascale) {
+ scale = ascale / (safmin * acoefa);
+ }
+ if (safmin * bcoefa > bscale) {
+/* Computing MIN */
+ r__1 = scale, r__2 = bscale / (safmin * bcoefa);
+ scale = dmin(r__1,r__2);
+ }
+ if (scale != 1.f) {
+ acoef = scale * acoef;
+ acoefa = dabs(acoef);
+ bcoefr = scale * bcoefr;
+ bcoefi = scale * bcoefi;
+ bcoefa = dabs(bcoefr) + dabs(bcoefi);
+ }
+
+/* Compute first two components of eigenvector */
+/* and contribution to sums */
+
+ temp = acoef * s[je + (je - 1) * s_dim1];
+ temp2r = acoef * s[je + je * s_dim1] - bcoefr * p[je + je *
+ p_dim1];
+ temp2i = -bcoefi * p[je + je * p_dim1];
+ if (dabs(temp) >= dabs(temp2r) + dabs(temp2i)) {
+ work[(*n << 1) + je] = 1.f;
+ work[*n * 3 + je] = 0.f;
+ work[(*n << 1) + je - 1] = -temp2r / temp;
+ work[*n * 3 + je - 1] = -temp2i / temp;
+ } else {
+ work[(*n << 1) + je - 1] = 1.f;
+ work[*n * 3 + je - 1] = 0.f;
+ temp = acoef * s[je - 1 + je * s_dim1];
+ work[(*n << 1) + je] = (bcoefr * p[je - 1 + (je - 1) *
+ p_dim1] - acoef * s[je - 1 + (je - 1) * s_dim1]) /
+ temp;
+ work[*n * 3 + je] = bcoefi * p[je - 1 + (je - 1) * p_dim1]
+ / temp;
+ }
+
+/* Computing MAX */
+ r__5 = (r__1 = work[(*n << 1) + je], dabs(r__1)) + (r__2 =
+ work[*n * 3 + je], dabs(r__2)), r__6 = (r__3 = work[(*
+ n << 1) + je - 1], dabs(r__3)) + (r__4 = work[*n * 3
+ + je - 1], dabs(r__4));
+ xmax = dmax(r__5,r__6);
+
+/* Compute contribution from columns JE and JE-1 */
+/* of A and B to the sums. */
+
+ creala = acoef * work[(*n << 1) + je - 1];
+ cimaga = acoef * work[*n * 3 + je - 1];
+ crealb = bcoefr * work[(*n << 1) + je - 1] - bcoefi * work[*n
+ * 3 + je - 1];
+ cimagb = bcoefi * work[(*n << 1) + je - 1] + bcoefr * work[*n
+ * 3 + je - 1];
+ cre2a = acoef * work[(*n << 1) + je];
+ cim2a = acoef * work[*n * 3 + je];
+ cre2b = bcoefr * work[(*n << 1) + je] - bcoefi * work[*n * 3
+ + je];
+ cim2b = bcoefi * work[(*n << 1) + je] + bcoefr * work[*n * 3
+ + je];
+ i__1 = je - 2;
+ for (jr = 1; jr <= i__1; ++jr) {
+ work[(*n << 1) + jr] = -creala * s[jr + (je - 1) * s_dim1]
+ + crealb * p[jr + (je - 1) * p_dim1] - cre2a * s[
+ jr + je * s_dim1] + cre2b * p[jr + je * p_dim1];
+ work[*n * 3 + jr] = -cimaga * s[jr + (je - 1) * s_dim1] +
+ cimagb * p[jr + (je - 1) * p_dim1] - cim2a * s[jr
+ + je * s_dim1] + cim2b * p[jr + je * p_dim1];
+/* L270: */
+ }
+ }
+
+/* Computing MAX */
+ r__1 = ulp * acoefa * anorm, r__2 = ulp * bcoefa * bnorm, r__1 =
+ max(r__1,r__2);
+ dmin__ = dmax(r__1,safmin);
+
+/* Columnwise triangular solve of (a A - b B) x = 0 */
+
+ il2by2 = FALSE_;
+ for (j = je - nw; j >= 1; --j) {
+
+/* If a 2-by-2 block, is in position j-1:j, wait until */
+/* next iteration to process it (when it will be j:j+1) */
+
+ if (! il2by2 && j > 1) {
+ if (s[j + (j - 1) * s_dim1] != 0.f) {
+ il2by2 = TRUE_;
+ goto L370;
+ }
+ }
+ bdiag[0] = p[j + j * p_dim1];
+ if (il2by2) {
+ na = 2;
+ bdiag[1] = p[j + 1 + (j + 1) * p_dim1];
+ } else {
+ na = 1;
+ }
+
+/* Compute x(j) (and x(j+1), if 2-by-2 block) */
+
+ slaln2_(&c_false, &na, &nw, &dmin__, &acoef, &s[j + j *
+ s_dim1], lds, bdiag, &bdiag[1], &work[(*n << 1) + j],
+ n, &bcoefr, &bcoefi, sum, &c__2, &scale, &temp, &
+ iinfo);
+ if (scale < 1.f) {
+
+ i__1 = nw - 1;
+ for (jw = 0; jw <= i__1; ++jw) {
+ i__2 = je;
+ for (jr = 1; jr <= i__2; ++jr) {
+ work[(jw + 2) * *n + jr] = scale * work[(jw + 2) *
+ *n + jr];
+/* L280: */
+ }
+/* L290: */
+ }
+ }
+/* Computing MAX */
+ r__1 = scale * xmax;
+ xmax = dmax(r__1,temp);
+
+ i__1 = nw;
+ for (jw = 1; jw <= i__1; ++jw) {
+ i__2 = na;
+ for (ja = 1; ja <= i__2; ++ja) {
+ work[(jw + 1) * *n + j + ja - 1] = sum[ja + (jw << 1)
+ - 3];
+/* L300: */
+ }
+/* L310: */
+ }
+
+/* w = w + x(j)*(a S(*,j) - b P(*,j) ) with scaling */
+
+ if (j > 1) {
+
+/* Check whether scaling is necessary for sum. */
+
+ xscale = 1.f / dmax(1.f,xmax);
+ temp = acoefa * work[j] + bcoefa * work[*n + j];
+ if (il2by2) {
+/* Computing MAX */
+ r__1 = temp, r__2 = acoefa * work[j + 1] + bcoefa *
+ work[*n + j + 1];
+ temp = dmax(r__1,r__2);
+ }
+/* Computing MAX */
+ r__1 = max(temp,acoefa);
+ temp = dmax(r__1,bcoefa);
+ if (temp > bignum * xscale) {
+
+ i__1 = nw - 1;
+ for (jw = 0; jw <= i__1; ++jw) {
+ i__2 = je;
+ for (jr = 1; jr <= i__2; ++jr) {
+ work[(jw + 2) * *n + jr] = xscale * work[(jw
+ + 2) * *n + jr];
+/* L320: */
+ }
+/* L330: */
+ }
+ xmax *= xscale;
+ }
+
+/* Compute the contributions of the off-diagonals of */
+/* column j (and j+1, if 2-by-2 block) of A and B to the */
+/* sums. */
+
+
+ i__1 = na;
+ for (ja = 1; ja <= i__1; ++ja) {
+ if (ilcplx) {
+ creala = acoef * work[(*n << 1) + j + ja - 1];
+ cimaga = acoef * work[*n * 3 + j + ja - 1];
+ crealb = bcoefr * work[(*n << 1) + j + ja - 1] -
+ bcoefi * work[*n * 3 + j + ja - 1];
+ cimagb = bcoefi * work[(*n << 1) + j + ja - 1] +
+ bcoefr * work[*n * 3 + j + ja - 1];
+ i__2 = j - 1;
+ for (jr = 1; jr <= i__2; ++jr) {
+ work[(*n << 1) + jr] = work[(*n << 1) + jr] -
+ creala * s[jr + (j + ja - 1) * s_dim1]
+ + crealb * p[jr + (j + ja - 1) *
+ p_dim1];
+ work[*n * 3 + jr] = work[*n * 3 + jr] -
+ cimaga * s[jr + (j + ja - 1) * s_dim1]
+ + cimagb * p[jr + (j + ja - 1) *
+ p_dim1];
+/* L340: */
+ }
+ } else {
+ creala = acoef * work[(*n << 1) + j + ja - 1];
+ crealb = bcoefr * work[(*n << 1) + j + ja - 1];
+ i__2 = j - 1;
+ for (jr = 1; jr <= i__2; ++jr) {
+ work[(*n << 1) + jr] = work[(*n << 1) + jr] -
+ creala * s[jr + (j + ja - 1) * s_dim1]
+ + crealb * p[jr + (j + ja - 1) *
+ p_dim1];
+/* L350: */
+ }
+ }
+/* L360: */
+ }
+ }
+
+ il2by2 = FALSE_;
+L370:
+ ;
+ }
+
+/* Copy eigenvector to VR, back transforming if */
+/* HOWMNY='B'. */
+
+ ieig -= nw;
+ if (ilback) {
+
+ i__1 = nw - 1;
+ for (jw = 0; jw <= i__1; ++jw) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ work[(jw + 4) * *n + jr] = work[(jw + 2) * *n + 1] *
+ vr[jr + vr_dim1];
+/* L380: */
+ }
+
+/* A series of compiler directives to defeat */
+/* vectorization for the next loop */
+
+
+ i__2 = je;
+ for (jc = 2; jc <= i__2; ++jc) {
+ i__3 = *n;
+ for (jr = 1; jr <= i__3; ++jr) {
+ work[(jw + 4) * *n + jr] += work[(jw + 2) * *n +
+ jc] * vr[jr + jc * vr_dim1];
+/* L390: */
+ }
+/* L400: */
+ }
+/* L410: */
+ }
+
+ i__1 = nw - 1;
+ for (jw = 0; jw <= i__1; ++jw) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vr[jr + (ieig + jw) * vr_dim1] = work[(jw + 4) * *n +
+ jr];
+/* L420: */
+ }
+/* L430: */
+ }
+
+ iend = *n;
+ } else {
+ i__1 = nw - 1;
+ for (jw = 0; jw <= i__1; ++jw) {
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vr[jr + (ieig + jw) * vr_dim1] = work[(jw + 2) * *n +
+ jr];
+/* L440: */
+ }
+/* L450: */
+ }
+
+ iend = je;
+ }
+
+/* Scale eigenvector */
+
+ xmax = 0.f;
+ if (ilcplx) {
+ i__1 = iend;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ r__3 = xmax, r__4 = (r__1 = vr[j + ieig * vr_dim1], dabs(
+ r__1)) + (r__2 = vr[j + (ieig + 1) * vr_dim1],
+ dabs(r__2));
+ xmax = dmax(r__3,r__4);
+/* L460: */
+ }
+ } else {
+ i__1 = iend;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ r__2 = xmax, r__3 = (r__1 = vr[j + ieig * vr_dim1], dabs(
+ r__1));
+ xmax = dmax(r__2,r__3);
+/* L470: */
+ }
+ }
+
+ if (xmax > safmin) {
+ xscale = 1.f / xmax;
+ i__1 = nw - 1;
+ for (jw = 0; jw <= i__1; ++jw) {
+ i__2 = iend;
+ for (jr = 1; jr <= i__2; ++jr) {
+ vr[jr + (ieig + jw) * vr_dim1] = xscale * vr[jr + (
+ ieig + jw) * vr_dim1];
+/* L480: */
+ }
+/* L490: */
+ }
+ }
+L500:
+ ;
+ }
+ }
+
+ return 0;
+
+/* End of STGEVC */
+
+} /* stgevc_ */
diff --git a/contrib/libs/clapack/stgex2.c b/contrib/libs/clapack/stgex2.c
new file mode 100644
index 0000000000..38d813d6b6
--- /dev/null
+++ b/contrib/libs/clapack/stgex2.c
@@ -0,0 +1,706 @@
+/* stgex2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__4 = 4;
+static real c_b5 = 0.f;
+static integer c__1 = 1;
+static integer c__2 = 2;
+static real c_b42 = 1.f;
+static real c_b48 = -1.f;
+static integer c__0 = 0;
+
+/* Subroutine */ int stgex2_(logical *wantq, logical *wantz, integer *n, real
+ *a, integer *lda, real *b, integer *ldb, real *q, integer *ldq, real *
+ z__, integer *ldz, integer *j1, integer *n1, integer *n2, real *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1,
+ z_offset, i__1, i__2;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ real f, g;
+ integer i__, m;
+ real s[16] /* was [4][4] */, t[16] /* was [4][4] */, be[2], ai[2], ar[2],
+ sa, sb, li[16] /* was [4][4] */, ir[16] /* was [4][4]
+ */, ss, ws, eps;
+ logical weak;
+ real ddum;
+ integer idum;
+ real taul[4], dsum, taur[4], scpy[16] /* was [4][4] */, tcpy[16]
+ /* was [4][4] */;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *);
+ real scale, bqra21, brqa21;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ real licop[16] /* was [4][4] */;
+ integer linfo;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ real ircop[16] /* was [4][4] */, dnorm;
+ integer iwork[4];
+ extern /* Subroutine */ int slagv2_(real *, integer *, real *, integer *,
+ real *, real *, real *, real *, real *, real *, real *), sgeqr2_(
+ integer *, integer *, real *, integer *, real *, real *, integer *
+), sgerq2_(integer *, integer *, real *, integer *, real *, real *
+, integer *), sorg2r_(integer *, integer *, integer *, real *,
+ integer *, real *, real *, integer *), sorgr2_(integer *, integer
+ *, integer *, real *, integer *, real *, real *, integer *),
+ sorm2r_(char *, char *, integer *, integer *, integer *, real *,
+ integer *, real *, real *, integer *, real *, integer *), sormr2_(char *, char *, integer *, integer *, integer *,
+ real *, integer *, real *, real *, integer *, real *, integer *);
+ real dscale;
+ extern /* Subroutine */ int stgsy2_(char *, integer *, integer *, integer
+ *, real *, integer *, real *, integer *, real *, integer *, real *
+, integer *, real *, integer *, real *, integer *, real *, real *,
+ real *, integer *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *), slartg_(real *, real *,
+ real *, real *, real *);
+ real thresh;
+ extern /* Subroutine */ int slaset_(char *, integer *, integer *, real *,
+ real *, real *, integer *), slassq_(integer *, real *,
+ integer *, real *, real *);
+ real smlnum;
+ logical strong;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STGEX2 swaps adjacent diagonal blocks (A11, B11) and (A22, B22) */
+/* of size 1-by-1 or 2-by-2 in an upper (quasi) triangular matrix pair */
+/* (A, B) by an orthogonal equivalence transformation. */
+
+/* (A, B) must be in generalized real Schur canonical form (as returned */
+/* by SGGES), i.e. A is block upper triangular with 1-by-1 and 2-by-2 */
+/* diagonal blocks. B is upper triangular. */
+
+/* Optionally, the matrices Q and Z of generalized Schur vectors are */
+/* updated. */
+
+/* Q(in) * A(in) * Z(in)' = Q(out) * A(out) * Z(out)' */
+/* Q(in) * B(in) * Z(in)' = Q(out) * B(out) * Z(out)' */
+
+
+/* Arguments */
+/* ========= */
+
+/* WANTQ (input) LOGICAL */
+/* .TRUE. : update the left transformation matrix Q; */
+/* .FALSE.: do not update Q. */
+
+/* WANTZ (input) LOGICAL */
+/* .TRUE. : update the right transformation matrix Z; */
+/* .FALSE.: do not update Z. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) REAL arrays, dimensions (LDA,N) */
+/* On entry, the matrix A in the pair (A, B). */
+/* On exit, the updated matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) REAL arrays, dimensions (LDB,N) */
+/* On entry, the matrix B in the pair (A, B). */
+/* On exit, the updated matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* Q (input/output) REAL array, dimension (LDZ,N) */
+/* On entry, if WANTQ = .TRUE., the orthogonal matrix Q. */
+/* On exit, the updated matrix Q. */
+/* Not referenced if WANTQ = .FALSE.. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= 1. */
+/* If WANTQ = .TRUE., LDQ >= N. */
+
+/* Z (input/output) REAL array, dimension (LDZ,N) */
+/* On entry, if WANTZ =.TRUE., the orthogonal matrix Z. */
+/* On exit, the updated matrix Z. */
+/* Not referenced if WANTZ = .FALSE.. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1. */
+/* If WANTZ = .TRUE., LDZ >= N. */
+
+/* J1 (input) INTEGER */
+/* The index to the first block (A11, B11). 1 <= J1 <= N. */
+
+/* N1 (input) INTEGER */
+/* The order of the first block (A11, B11). N1 = 0, 1 or 2. */
+
+/* N2 (input) INTEGER */
+/* The order of the second block (A22, B22). N2 = 0, 1 or 2. */
+
+/* WORK (workspace) REAL array, dimension (MAX(1,LWORK)). */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* LWORK >= MAX( N*(N2+N1), (N2+N1)*(N2+N1)*2 ) */
+
+/* INFO (output) INTEGER */
+/* =0: Successful exit */
+/* >0: If INFO = 1, the transformed matrix (A, B) would be */
+/* too far from generalized Schur form; the blocks are */
+/* not swapped and (A, B) and (Q, Z) are unchanged. */
+/* The problem of swapping is too ill-conditioned. */
+/* <0: If INFO = -16: LWORK is too small. Appropriate value */
+/* for LWORK is returned in WORK(1). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* In the current code both weak and strong stability tests are */
+/* performed. The user can omit the strong stability test by changing */
+/* the internal logical parameter WANDS to .FALSE.. See ref. [2] for */
+/* details. */
+
+/* [1] B. Kagstrom; A Direct Method for Reordering Eigenvalues in the */
+/* Generalized Real Schur Form of a Regular Matrix Pair (A, B), in */
+/* M.S. Moonen et al (eds), Linear Algebra for Large Scale and */
+/* Real-Time Applications, Kluwer Academic Publ. 1993, pp 195-218. */
+
+/* [2] B. Kagstrom and P. Poromaa; Computing Eigenspaces with Specified */
+/* Eigenvalues of a Regular Matrix Pair (A, B) and Condition */
+/* Estimation: Theory, Algorithms and Software, */
+/* Report UMINF - 94.04, Department of Computing Science, Umea */
+/* University, S-901 87 Umea, Sweden, 1994. Also as LAPACK Working */
+/* Note 87. To appear in Numerical Algorithms, 1996. */
+
+/* ===================================================================== */
+/* Replaced various illegal calls to SCOPY by calls to SLASET, or by DO */
+/* loops. Sven Hammarling, 1/5/02. */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+/* Quick return if possible */
+
+ if (*n <= 1 || *n1 <= 0 || *n2 <= 0) {
+ return 0;
+ }
+ if (*n1 > *n || *j1 + *n1 > *n) {
+ return 0;
+ }
+ m = *n1 + *n2;
+/* Computing MAX */
+ i__1 = *n * m, i__2 = m * m << 1;
+ if (*lwork < max(i__1,i__2)) {
+ *info = -16;
+/* Computing MAX */
+ i__1 = *n * m, i__2 = m * m << 1;
+ work[1] = (real) max(i__1,i__2);
+ return 0;
+ }
+
+ weak = FALSE_;
+ strong = FALSE_;
+
+/* Make a local copy of selected block */
+
+ slaset_("Full", &c__4, &c__4, &c_b5, &c_b5, li, &c__4);
+ slaset_("Full", &c__4, &c__4, &c_b5, &c_b5, ir, &c__4);
+ slacpy_("Full", &m, &m, &a[*j1 + *j1 * a_dim1], lda, s, &c__4);
+ slacpy_("Full", &m, &m, &b[*j1 + *j1 * b_dim1], ldb, t, &c__4);
+
+/* Compute threshold for testing acceptance of swapping. */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S") / eps;
+ dscale = 0.f;
+ dsum = 1.f;
+ slacpy_("Full", &m, &m, s, &c__4, &work[1], &m);
+ i__1 = m * m;
+ slassq_(&i__1, &work[1], &c__1, &dscale, &dsum);
+ slacpy_("Full", &m, &m, t, &c__4, &work[1], &m);
+ i__1 = m * m;
+ slassq_(&i__1, &work[1], &c__1, &dscale, &dsum);
+ dnorm = dscale * sqrt(dsum);
+/* Computing MAX */
+ r__1 = eps * 10.f * dnorm;
+ thresh = dmax(r__1,smlnum);
+
+ if (m == 2) {
+
+/* CASE 1: Swap 1-by-1 and 1-by-1 blocks. */
+
+/* Compute orthogonal QL and RQ that swap 1-by-1 and 1-by-1 blocks */
+/* using Givens rotations and perform the swap tentatively. */
+
+ f = s[5] * t[0] - t[5] * s[0];
+ g = s[5] * t[4] - t[5] * s[4];
+ sb = dabs(t[5]);
+ sa = dabs(s[5]);
+ slartg_(&f, &g, &ir[4], ir, &ddum);
+ ir[1] = -ir[4];
+ ir[5] = ir[0];
+ srot_(&c__2, s, &c__1, &s[4], &c__1, ir, &ir[1]);
+ srot_(&c__2, t, &c__1, &t[4], &c__1, ir, &ir[1]);
+ if (sa >= sb) {
+ slartg_(s, &s[1], li, &li[1], &ddum);
+ } else {
+ slartg_(t, &t[1], li, &li[1], &ddum);
+ }
+ srot_(&c__2, s, &c__4, &s[1], &c__4, li, &li[1]);
+ srot_(&c__2, t, &c__4, &t[1], &c__4, li, &li[1]);
+ li[5] = li[0];
+ li[4] = -li[1];
+
+/* Weak stability test: */
+/* |S21| + |T21| <= O(EPS * F-norm((S, T))) */
+
+ ws = dabs(s[1]) + dabs(t[1]);
+ weak = ws <= thresh;
+ if (! weak) {
+ goto L70;
+ }
+
+ if (TRUE_) {
+
+/* Strong stability test: */
+/* F-norm((A-QL'*S*QR, B-QL'*T*QR)) <= O(EPS*F-norm((A,B))) */
+
+ slacpy_("Full", &m, &m, &a[*j1 + *j1 * a_dim1], lda, &work[m * m
+ + 1], &m);
+ sgemm_("N", "N", &m, &m, &m, &c_b42, li, &c__4, s, &c__4, &c_b5, &
+ work[1], &m);
+ sgemm_("N", "T", &m, &m, &m, &c_b48, &work[1], &m, ir, &c__4, &
+ c_b42, &work[m * m + 1], &m);
+ dscale = 0.f;
+ dsum = 1.f;
+ i__1 = m * m;
+ slassq_(&i__1, &work[m * m + 1], &c__1, &dscale, &dsum);
+
+ slacpy_("Full", &m, &m, &b[*j1 + *j1 * b_dim1], ldb, &work[m * m
+ + 1], &m);
+ sgemm_("N", "N", &m, &m, &m, &c_b42, li, &c__4, t, &c__4, &c_b5, &
+ work[1], &m);
+ sgemm_("N", "T", &m, &m, &m, &c_b48, &work[1], &m, ir, &c__4, &
+ c_b42, &work[m * m + 1], &m);
+ i__1 = m * m;
+ slassq_(&i__1, &work[m * m + 1], &c__1, &dscale, &dsum);
+ ss = dscale * sqrt(dsum);
+ strong = ss <= thresh;
+ if (! strong) {
+ goto L70;
+ }
+ }
+
+/* Update (A(J1:J1+M-1, M+J1:N), B(J1:J1+M-1, M+J1:N)) and */
+/* (A(1:J1-1, J1:J1+M), B(1:J1-1, J1:J1+M)). */
+
+ i__1 = *j1 + 1;
+ srot_(&i__1, &a[*j1 * a_dim1 + 1], &c__1, &a[(*j1 + 1) * a_dim1 + 1],
+ &c__1, ir, &ir[1]);
+ i__1 = *j1 + 1;
+ srot_(&i__1, &b[*j1 * b_dim1 + 1], &c__1, &b[(*j1 + 1) * b_dim1 + 1],
+ &c__1, ir, &ir[1]);
+ i__1 = *n - *j1 + 1;
+ srot_(&i__1, &a[*j1 + *j1 * a_dim1], lda, &a[*j1 + 1 + *j1 * a_dim1],
+ lda, li, &li[1]);
+ i__1 = *n - *j1 + 1;
+ srot_(&i__1, &b[*j1 + *j1 * b_dim1], ldb, &b[*j1 + 1 + *j1 * b_dim1],
+ ldb, li, &li[1]);
+
+/* Set N1-by-N2 (2,1) - blocks to ZERO. */
+
+ a[*j1 + 1 + *j1 * a_dim1] = 0.f;
+ b[*j1 + 1 + *j1 * b_dim1] = 0.f;
+
+/* Accumulate transformations into Q and Z if requested. */
+
+ if (*wantz) {
+ srot_(n, &z__[*j1 * z_dim1 + 1], &c__1, &z__[(*j1 + 1) * z_dim1 +
+ 1], &c__1, ir, &ir[1]);
+ }
+ if (*wantq) {
+ srot_(n, &q[*j1 * q_dim1 + 1], &c__1, &q[(*j1 + 1) * q_dim1 + 1],
+ &c__1, li, &li[1]);
+ }
+
+/* Exit with INFO = 0 if swap was successfully performed. */
+
+ return 0;
+
+ } else {
+
+/* CASE 2: Swap 1-by-1 and 2-by-2 blocks, or 2-by-2 */
+/* and 2-by-2 blocks. */
+
+/* Solve the generalized Sylvester equation */
+/* S11 * R - L * S22 = SCALE * S12 */
+/* T11 * R - L * T22 = SCALE * T12 */
+/* for R and L. Solutions in LI and IR. */
+
+ slacpy_("Full", n1, n2, &t[(*n1 + 1 << 2) - 4], &c__4, li, &c__4);
+ slacpy_("Full", n1, n2, &s[(*n1 + 1 << 2) - 4], &c__4, &ir[*n2 + 1 + (
+ *n1 + 1 << 2) - 5], &c__4);
+ stgsy2_("N", &c__0, n1, n2, s, &c__4, &s[*n1 + 1 + (*n1 + 1 << 2) - 5]
+, &c__4, &ir[*n2 + 1 + (*n1 + 1 << 2) - 5], &c__4, t, &c__4, &
+ t[*n1 + 1 + (*n1 + 1 << 2) - 5], &c__4, li, &c__4, &scale, &
+ dsum, &dscale, iwork, &idum, &linfo);
+
+/* Compute orthogonal matrix QL: */
+
+/* QL' * LI = [ TL ] */
+/* [ 0 ] */
+/* where */
+/* LI = [ -L ] */
+/* [ SCALE * identity(N2) ] */
+
+ i__1 = *n2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ sscal_(n1, &c_b48, &li[(i__ << 2) - 4], &c__1);
+ li[*n1 + i__ + (i__ << 2) - 5] = scale;
+/* L10: */
+ }
+ sgeqr2_(&m, n2, li, &c__4, taul, &work[1], &linfo);
+ if (linfo != 0) {
+ goto L70;
+ }
+ sorg2r_(&m, &m, n2, li, &c__4, taul, &work[1], &linfo);
+ if (linfo != 0) {
+ goto L70;
+ }
+
+/* Compute orthogonal matrix RQ: */
+
+/* IR * RQ' = [ 0 TR], */
+
+/* where IR = [ SCALE * identity(N1), R ] */
+
+ i__1 = *n1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ir[*n2 + i__ + (i__ << 2) - 5] = scale;
+/* L20: */
+ }
+ sgerq2_(n1, &m, &ir[*n2], &c__4, taur, &work[1], &linfo);
+ if (linfo != 0) {
+ goto L70;
+ }
+ sorgr2_(&m, &m, n1, ir, &c__4, taur, &work[1], &linfo);
+ if (linfo != 0) {
+ goto L70;
+ }
+
+/* Perform the swapping tentatively: */
+
+ sgemm_("T", "N", &m, &m, &m, &c_b42, li, &c__4, s, &c__4, &c_b5, &
+ work[1], &m);
+ sgemm_("N", "T", &m, &m, &m, &c_b42, &work[1], &m, ir, &c__4, &c_b5,
+ s, &c__4);
+ sgemm_("T", "N", &m, &m, &m, &c_b42, li, &c__4, t, &c__4, &c_b5, &
+ work[1], &m);
+ sgemm_("N", "T", &m, &m, &m, &c_b42, &work[1], &m, ir, &c__4, &c_b5,
+ t, &c__4);
+ slacpy_("F", &m, &m, s, &c__4, scpy, &c__4);
+ slacpy_("F", &m, &m, t, &c__4, tcpy, &c__4);
+ slacpy_("F", &m, &m, ir, &c__4, ircop, &c__4);
+ slacpy_("F", &m, &m, li, &c__4, licop, &c__4);
+
+/* Triangularize the B-part by an RQ factorization. */
+/* Apply transformation (from left) to A-part, giving S. */
+
+ sgerq2_(&m, &m, t, &c__4, taur, &work[1], &linfo);
+ if (linfo != 0) {
+ goto L70;
+ }
+ sormr2_("R", "T", &m, &m, &m, t, &c__4, taur, s, &c__4, &work[1], &
+ linfo);
+ if (linfo != 0) {
+ goto L70;
+ }
+ sormr2_("L", "N", &m, &m, &m, t, &c__4, taur, ir, &c__4, &work[1], &
+ linfo);
+ if (linfo != 0) {
+ goto L70;
+ }
+
+/* Compute F-norm(S21) in BRQA21. (T21 is 0.) */
+
+ dscale = 0.f;
+ dsum = 1.f;
+ i__1 = *n2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ slassq_(n1, &s[*n2 + 1 + (i__ << 2) - 5], &c__1, &dscale, &dsum);
+/* L30: */
+ }
+ brqa21 = dscale * sqrt(dsum);
+
+/* Triangularize the B-part by a QR factorization. */
+/* Apply transformation (from right) to A-part, giving S. */
+
+ sgeqr2_(&m, &m, tcpy, &c__4, taul, &work[1], &linfo);
+ if (linfo != 0) {
+ goto L70;
+ }
+ sorm2r_("L", "T", &m, &m, &m, tcpy, &c__4, taul, scpy, &c__4, &work[1]
+, info);
+ sorm2r_("R", "N", &m, &m, &m, tcpy, &c__4, taul, licop, &c__4, &work[
+ 1], info);
+ if (linfo != 0) {
+ goto L70;
+ }
+
+/* Compute F-norm(S21) in BQRA21. (T21 is 0.) */
+
+ dscale = 0.f;
+ dsum = 1.f;
+ i__1 = *n2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ slassq_(n1, &scpy[*n2 + 1 + (i__ << 2) - 5], &c__1, &dscale, &
+ dsum);
+/* L40: */
+ }
+ bqra21 = dscale * sqrt(dsum);
+
+/* Decide which method to use. */
+/* Weak stability test: */
+/* F-norm(S21) <= O(EPS * F-norm((S, T))) */
+
+ if (bqra21 <= brqa21 && bqra21 <= thresh) {
+ slacpy_("F", &m, &m, scpy, &c__4, s, &c__4);
+ slacpy_("F", &m, &m, tcpy, &c__4, t, &c__4);
+ slacpy_("F", &m, &m, ircop, &c__4, ir, &c__4);
+ slacpy_("F", &m, &m, licop, &c__4, li, &c__4);
+ } else if (brqa21 >= thresh) {
+ goto L70;
+ }
+
+/* Set lower triangle of B-part to zero */
+
+ i__1 = m - 1;
+ i__2 = m - 1;
+ slaset_("Lower", &i__1, &i__2, &c_b5, &c_b5, &t[1], &c__4);
+
+ if (TRUE_) {
+
+/* Strong stability test: */
+/* F-norm((A-QL*S*QR', B-QL*T*QR')) <= O(EPS*F-norm((A,B))) */
+
+ slacpy_("Full", &m, &m, &a[*j1 + *j1 * a_dim1], lda, &work[m * m
+ + 1], &m);
+ sgemm_("N", "N", &m, &m, &m, &c_b42, li, &c__4, s, &c__4, &c_b5, &
+ work[1], &m);
+ sgemm_("N", "N", &m, &m, &m, &c_b48, &work[1], &m, ir, &c__4, &
+ c_b42, &work[m * m + 1], &m);
+ dscale = 0.f;
+ dsum = 1.f;
+ i__1 = m * m;
+ slassq_(&i__1, &work[m * m + 1], &c__1, &dscale, &dsum);
+
+ slacpy_("Full", &m, &m, &b[*j1 + *j1 * b_dim1], ldb, &work[m * m
+ + 1], &m);
+ sgemm_("N", "N", &m, &m, &m, &c_b42, li, &c__4, t, &c__4, &c_b5, &
+ work[1], &m);
+ sgemm_("N", "N", &m, &m, &m, &c_b48, &work[1], &m, ir, &c__4, &
+ c_b42, &work[m * m + 1], &m);
+ i__1 = m * m;
+ slassq_(&i__1, &work[m * m + 1], &c__1, &dscale, &dsum);
+ ss = dscale * sqrt(dsum);
+ strong = ss <= thresh;
+ if (! strong) {
+ goto L70;
+ }
+
+ }
+
+/* If the swap is accepted ("weakly" and "strongly"), apply the */
+/* transformations and set N1-by-N2 (2,1)-block to zero. */
+
+ slaset_("Full", n1, n2, &c_b5, &c_b5, &s[*n2], &c__4);
+
+/* copy back M-by-M diagonal block starting at index J1 of (A, B) */
+
+ slacpy_("F", &m, &m, s, &c__4, &a[*j1 + *j1 * a_dim1], lda)
+ ;
+ slacpy_("F", &m, &m, t, &c__4, &b[*j1 + *j1 * b_dim1], ldb)
+ ;
+ slaset_("Full", &c__4, &c__4, &c_b5, &c_b5, t, &c__4);
+
+/* Standardize existing 2-by-2 blocks. */
+
+ i__1 = m * m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L50: */
+ }
+ work[1] = 1.f;
+ t[0] = 1.f;
+ idum = *lwork - m * m - 2;
+ if (*n2 > 1) {
+ slagv2_(&a[*j1 + *j1 * a_dim1], lda, &b[*j1 + *j1 * b_dim1], ldb,
+ ar, ai, be, &work[1], &work[2], t, &t[1]);
+ work[m + 1] = -work[2];
+ work[m + 2] = work[1];
+ t[*n2 + (*n2 << 2) - 5] = t[0];
+ t[4] = -t[1];
+ }
+ work[m * m] = 1.f;
+ t[m + (m << 2) - 5] = 1.f;
+
+ if (*n1 > 1) {
+ slagv2_(&a[*j1 + *n2 + (*j1 + *n2) * a_dim1], lda, &b[*j1 + *n2 +
+ (*j1 + *n2) * b_dim1], ldb, taur, taul, &work[m * m + 1],
+ &work[*n2 * m + *n2 + 1], &work[*n2 * m + *n2 + 2], &t[*
+ n2 + 1 + (*n2 + 1 << 2) - 5], &t[m + (m - 1 << 2) - 5]);
+ work[m * m] = work[*n2 * m + *n2 + 1];
+ work[m * m - 1] = -work[*n2 * m + *n2 + 2];
+ t[m + (m << 2) - 5] = t[*n2 + 1 + (*n2 + 1 << 2) - 5];
+ t[m - 1 + (m << 2) - 5] = -t[m + (m - 1 << 2) - 5];
+ }
+ sgemm_("T", "N", n2, n1, n2, &c_b42, &work[1], &m, &a[*j1 + (*j1 + *
+ n2) * a_dim1], lda, &c_b5, &work[m * m + 1], n2);
+ slacpy_("Full", n2, n1, &work[m * m + 1], n2, &a[*j1 + (*j1 + *n2) *
+ a_dim1], lda);
+ sgemm_("T", "N", n2, n1, n2, &c_b42, &work[1], &m, &b[*j1 + (*j1 + *
+ n2) * b_dim1], ldb, &c_b5, &work[m * m + 1], n2);
+ slacpy_("Full", n2, n1, &work[m * m + 1], n2, &b[*j1 + (*j1 + *n2) *
+ b_dim1], ldb);
+ sgemm_("N", "N", &m, &m, &m, &c_b42, li, &c__4, &work[1], &m, &c_b5, &
+ work[m * m + 1], &m);
+ slacpy_("Full", &m, &m, &work[m * m + 1], &m, li, &c__4);
+ sgemm_("N", "N", n2, n1, n1, &c_b42, &a[*j1 + (*j1 + *n2) * a_dim1],
+ lda, &t[*n2 + 1 + (*n2 + 1 << 2) - 5], &c__4, &c_b5, &work[1],
+ n2);
+ slacpy_("Full", n2, n1, &work[1], n2, &a[*j1 + (*j1 + *n2) * a_dim1],
+ lda);
+ sgemm_("N", "N", n2, n1, n1, &c_b42, &b[*j1 + (*j1 + *n2) * b_dim1],
+ ldb, &t[*n2 + 1 + (*n2 + 1 << 2) - 5], &c__4, &c_b5, &work[1],
+ n2);
+ slacpy_("Full", n2, n1, &work[1], n2, &b[*j1 + (*j1 + *n2) * b_dim1],
+ ldb);
+ sgemm_("T", "N", &m, &m, &m, &c_b42, ir, &c__4, t, &c__4, &c_b5, &
+ work[1], &m);
+ slacpy_("Full", &m, &m, &work[1], &m, ir, &c__4);
+
+/* Accumulate transformations into Q and Z if requested. */
+
+ if (*wantq) {
+ sgemm_("N", "N", n, &m, &m, &c_b42, &q[*j1 * q_dim1 + 1], ldq, li,
+ &c__4, &c_b5, &work[1], n);
+ slacpy_("Full", n, &m, &work[1], n, &q[*j1 * q_dim1 + 1], ldq);
+
+ }
+
+ if (*wantz) {
+ sgemm_("N", "N", n, &m, &m, &c_b42, &z__[*j1 * z_dim1 + 1], ldz,
+ ir, &c__4, &c_b5, &work[1], n);
+ slacpy_("Full", n, &m, &work[1], n, &z__[*j1 * z_dim1 + 1], ldz);
+
+ }
+
+/* Update (A(J1:J1+M-1, M+J1:N), B(J1:J1+M-1, M+J1:N)) and */
+/* (A(1:J1-1, J1:J1+M), B(1:J1-1, J1:J1+M)). */
+
+ i__ = *j1 + m;
+ if (i__ <= *n) {
+ i__1 = *n - i__ + 1;
+ sgemm_("T", "N", &m, &i__1, &m, &c_b42, li, &c__4, &a[*j1 + i__ *
+ a_dim1], lda, &c_b5, &work[1], &m);
+ i__1 = *n - i__ + 1;
+ slacpy_("Full", &m, &i__1, &work[1], &m, &a[*j1 + i__ * a_dim1],
+ lda);
+ i__1 = *n - i__ + 1;
+ sgemm_("T", "N", &m, &i__1, &m, &c_b42, li, &c__4, &b[*j1 + i__ *
+ b_dim1], ldb, &c_b5, &work[1], &m);
+ i__1 = *n - i__ + 1;
+ slacpy_("Full", &m, &i__1, &work[1], &m, &b[*j1 + i__ * b_dim1],
+ ldb);
+ }
+ i__ = *j1 - 1;
+ if (i__ > 0) {
+ sgemm_("N", "N", &i__, &m, &m, &c_b42, &a[*j1 * a_dim1 + 1], lda,
+ ir, &c__4, &c_b5, &work[1], &i__);
+ slacpy_("Full", &i__, &m, &work[1], &i__, &a[*j1 * a_dim1 + 1],
+ lda);
+ sgemm_("N", "N", &i__, &m, &m, &c_b42, &b[*j1 * b_dim1 + 1], ldb,
+ ir, &c__4, &c_b5, &work[1], &i__);
+ slacpy_("Full", &i__, &m, &work[1], &i__, &b[*j1 * b_dim1 + 1],
+ ldb);
+ }
+
+/* Exit with INFO = 0 if swap was successfully performed. */
+
+ return 0;
+
+ }
+
+/* Exit with INFO = 1 if swap was rejected. */
+
+L70:
+
+ *info = 1;
+ return 0;
+
+/* End of STGEX2 */
+
+} /* stgex2_ */
diff --git a/contrib/libs/clapack/stgexc.c b/contrib/libs/clapack/stgexc.c
new file mode 100644
index 0000000000..1255ee097f
--- /dev/null
+++ b/contrib/libs/clapack/stgexc.c
@@ -0,0 +1,514 @@
+/* stgexc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__2 = 2;
+
+/* Subroutine */ int stgexc_(logical *wantq, logical *wantz, integer *n, real
+ *a, integer *lda, real *b, integer *ldb, real *q, integer *ldq, real *
+ z__, integer *ldz, integer *ifst, integer *ilst, real *work, integer *
+ lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1,
+ z_offset, i__1;
+
+ /* Local variables */
+ integer nbf, nbl, here, lwmin;
+ extern /* Subroutine */ int stgex2_(logical *, logical *, integer *, real
+ *, integer *, real *, integer *, real *, integer *, real *,
+ integer *, integer *, integer *, integer *, real *, integer *,
+ integer *), xerbla_(char *, integer *);
+ integer nbnext;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STGEXC reorders the generalized real Schur decomposition of a real */
+/* matrix pair (A,B) using an orthogonal equivalence transformation */
+
+/* (A, B) = Q * (A, B) * Z', */
+
+/* so that the diagonal block of (A, B) with row index IFST is moved */
+/* to row ILST. */
+
+/* (A, B) must be in generalized real Schur canonical form (as returned */
+/* by SGGES), i.e. A is block upper triangular with 1-by-1 and 2-by-2 */
+/* diagonal blocks. B is upper triangular. */
+
+/* Optionally, the matrices Q and Z of generalized Schur vectors are */
+/* updated. */
+
+/* Q(in) * A(in) * Z(in)' = Q(out) * A(out) * Z(out)' */
+/* Q(in) * B(in) * Z(in)' = Q(out) * B(out) * Z(out)' */
+
+
+/* Arguments */
+/* ========= */
+
+/* WANTQ (input) LOGICAL */
+/* .TRUE. : update the left transformation matrix Q; */
+/* .FALSE.: do not update Q. */
+
+/* WANTZ (input) LOGICAL */
+/* .TRUE. : update the right transformation matrix Z; */
+/* .FALSE.: do not update Z. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the matrix A in generalized real Schur canonical */
+/* form. */
+/* On exit, the updated matrix A, again in generalized */
+/* real Schur canonical form. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) REAL array, dimension (LDB,N) */
+/* On entry, the matrix B in generalized real Schur canonical */
+/* form (A,B). */
+/* On exit, the updated matrix B, again in generalized */
+/* real Schur canonical form (A,B). */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* Q (input/output) REAL array, dimension (LDZ,N) */
+/* On entry, if WANTQ = .TRUE., the orthogonal matrix Q. */
+/* On exit, the updated matrix Q. */
+/* If WANTQ = .FALSE., Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= 1. */
+/* If WANTQ = .TRUE., LDQ >= N. */
+
+/* Z (input/output) REAL array, dimension (LDZ,N) */
+/* On entry, if WANTZ = .TRUE., the orthogonal matrix Z. */
+/* On exit, the updated matrix Z. */
+/* If WANTZ = .FALSE., Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1. */
+/* If WANTZ = .TRUE., LDZ >= N. */
+
+/* IFST (input/output) INTEGER */
+/* ILST (input/output) INTEGER */
+/* Specify the reordering of the diagonal blocks of (A, B). */
+/* The block with row index IFST is moved to row ILST, by a */
+/* sequence of swapping between adjacent blocks. */
+/* On exit, if IFST pointed on entry to the second row of */
+/* a 2-by-2 block, it is changed to point to the first row; */
+/* ILST always points to the first row of the block in its */
+/* final position (which may differ from its input value by */
+/* +1 or -1). 1 <= IFST, ILST <= N. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* LWORK >= 1 when N <= 1, otherwise LWORK >= 4*N + 16. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* =0: successful exit. */
+/* <0: if INFO = -i, the i-th argument had an illegal value. */
+/* =1: The transformed matrix pair (A, B) would be too far */
+/* from generalized Schur form; the problem is ill- */
+/* conditioned. (A, B) may have been partially reordered, */
+/* and ILST points to the first row of the current */
+/* position of the block being moved. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* [1] B. Kagstrom; A Direct Method for Reordering Eigenvalues in the */
+/* Generalized Real Schur Form of a Regular Matrix Pair (A, B), in */
+/* M.S. Moonen et al (eds), Linear Algebra for Large Scale and */
+/* Real-Time Applications, Kluwer Academic Publ. 1993, pp 195-218. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test input arguments. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ } else if (*ldq < 1 || *wantq && *ldq < max(1,*n)) {
+ *info = -9;
+ } else if (*ldz < 1 || *wantz && *ldz < max(1,*n)) {
+ *info = -11;
+ } else if (*ifst < 1 || *ifst > *n) {
+ *info = -12;
+ } else if (*ilst < 1 || *ilst > *n) {
+ *info = -13;
+ }
+
+ if (*info == 0) {
+ if (*n <= 1) {
+ lwmin = 1;
+ } else {
+ lwmin = (*n << 2) + 16;
+ }
+ work[1] = (real) lwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -15;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STGEXC", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 1) {
+ return 0;
+ }
+
+/* Determine the first row of the specified block and find out */
+/* if it is 1-by-1 or 2-by-2. */
+
+ if (*ifst > 1) {
+ if (a[*ifst + (*ifst - 1) * a_dim1] != 0.f) {
+ --(*ifst);
+ }
+ }
+ nbf = 1;
+ if (*ifst < *n) {
+ if (a[*ifst + 1 + *ifst * a_dim1] != 0.f) {
+ nbf = 2;
+ }
+ }
+
+/* Determine the first row of the final block */
+/* and find out if it is 1-by-1 or 2-by-2. */
+
+ if (*ilst > 1) {
+ if (a[*ilst + (*ilst - 1) * a_dim1] != 0.f) {
+ --(*ilst);
+ }
+ }
+ nbl = 1;
+ if (*ilst < *n) {
+ if (a[*ilst + 1 + *ilst * a_dim1] != 0.f) {
+ nbl = 2;
+ }
+ }
+ if (*ifst == *ilst) {
+ return 0;
+ }
+
+ if (*ifst < *ilst) {
+
+/* Update ILST. */
+
+ if (nbf == 2 && nbl == 1) {
+ --(*ilst);
+ }
+ if (nbf == 1 && nbl == 2) {
+ ++(*ilst);
+ }
+
+ here = *ifst;
+
+L10:
+
+/* Swap with next one below. */
+
+ if (nbf == 1 || nbf == 2) {
+
+/* Current block either 1-by-1 or 2-by-2. */
+
+ nbnext = 1;
+ if (here + nbf + 1 <= *n) {
+ if (a[here + nbf + 1 + (here + nbf) * a_dim1] != 0.f) {
+ nbnext = 2;
+ }
+ }
+ stgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset], ldb, &q[
+ q_offset], ldq, &z__[z_offset], ldz, &here, &nbf, &nbnext,
+ &work[1], lwork, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ here += nbnext;
+
+/* Test if 2-by-2 block breaks into two 1-by-1 blocks. */
+
+ if (nbf == 2) {
+ if (a[here + 1 + here * a_dim1] == 0.f) {
+ nbf = 3;
+ }
+ }
+
+ } else {
+
+/* Current block consists of two 1-by-1 blocks, each of which */
+/* must be swapped individually. */
+
+ nbnext = 1;
+ if (here + 3 <= *n) {
+ if (a[here + 3 + (here + 2) * a_dim1] != 0.f) {
+ nbnext = 2;
+ }
+ }
+ i__1 = here + 1;
+ stgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset], ldb, &q[
+ q_offset], ldq, &z__[z_offset], ldz, &i__1, &c__1, &
+ nbnext, &work[1], lwork, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ if (nbnext == 1) {
+
+/* Swap two 1-by-1 blocks. */
+
+ stgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset], ldb,
+ &q[q_offset], ldq, &z__[z_offset], ldz, &here, &c__1,
+ &c__1, &work[1], lwork, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ ++here;
+
+ } else {
+
+/* Recompute NBNEXT in case of 2-by-2 split. */
+
+ if (a[here + 2 + (here + 1) * a_dim1] == 0.f) {
+ nbnext = 1;
+ }
+ if (nbnext == 2) {
+
+/* 2-by-2 block did not split. */
+
+ stgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset],
+ ldb, &q[q_offset], ldq, &z__[z_offset], ldz, &
+ here, &c__1, &nbnext, &work[1], lwork, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ here += 2;
+ } else {
+
+/* 2-by-2 block did split. */
+
+ stgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset],
+ ldb, &q[q_offset], ldq, &z__[z_offset], ldz, &
+ here, &c__1, &c__1, &work[1], lwork, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ ++here;
+ stgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset],
+ ldb, &q[q_offset], ldq, &z__[z_offset], ldz, &
+ here, &c__1, &c__1, &work[1], lwork, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ ++here;
+ }
+
+ }
+ }
+ if (here < *ilst) {
+ goto L10;
+ }
+ } else {
+ here = *ifst;
+
+L20:
+
+/* Swap with next one below. */
+
+ if (nbf == 1 || nbf == 2) {
+
+/* Current block either 1-by-1 or 2-by-2. */
+
+ nbnext = 1;
+ if (here >= 3) {
+ if (a[here - 1 + (here - 2) * a_dim1] != 0.f) {
+ nbnext = 2;
+ }
+ }
+ i__1 = here - nbnext;
+ stgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset], ldb, &q[
+ q_offset], ldq, &z__[z_offset], ldz, &i__1, &nbnext, &nbf,
+ &work[1], lwork, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ here -= nbnext;
+
+/* Test if 2-by-2 block breaks into two 1-by-1 blocks. */
+
+ if (nbf == 2) {
+ if (a[here + 1 + here * a_dim1] == 0.f) {
+ nbf = 3;
+ }
+ }
+
+ } else {
+
+/* Current block consists of two 1-by-1 blocks, each of which */
+/* must be swapped individually. */
+
+ nbnext = 1;
+ if (here >= 3) {
+ if (a[here - 1 + (here - 2) * a_dim1] != 0.f) {
+ nbnext = 2;
+ }
+ }
+ i__1 = here - nbnext;
+ stgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset], ldb, &q[
+ q_offset], ldq, &z__[z_offset], ldz, &i__1, &nbnext, &
+ c__1, &work[1], lwork, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ if (nbnext == 1) {
+
+/* Swap two 1-by-1 blocks. */
+
+ stgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset], ldb,
+ &q[q_offset], ldq, &z__[z_offset], ldz, &here, &
+ nbnext, &c__1, &work[1], lwork, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ --here;
+ } else {
+
+/* Recompute NBNEXT in case of 2-by-2 split. */
+
+ if (a[here + (here - 1) * a_dim1] == 0.f) {
+ nbnext = 1;
+ }
+ if (nbnext == 2) {
+
+/* 2-by-2 block did not split. */
+
+ i__1 = here - 1;
+ stgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset],
+ ldb, &q[q_offset], ldq, &z__[z_offset], ldz, &
+ i__1, &c__2, &c__1, &work[1], lwork, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ here += -2;
+ } else {
+
+/* 2-by-2 block did split. */
+
+ stgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset],
+ ldb, &q[q_offset], ldq, &z__[z_offset], ldz, &
+ here, &c__1, &c__1, &work[1], lwork, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ --here;
+ stgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset],
+ ldb, &q[q_offset], ldq, &z__[z_offset], ldz, &
+ here, &c__1, &c__1, &work[1], lwork, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ --here;
+ }
+ }
+ }
+ if (here > *ilst) {
+ goto L20;
+ }
+ }
+ *ilst = here;
+ work[1] = (real) lwmin;
+ return 0;
+
+/* End of STGEXC */
+
+} /* stgexc_ */
diff --git a/contrib/libs/clapack/stgsen.c b/contrib/libs/clapack/stgsen.c
new file mode 100644
index 0000000000..93d55c1706
--- /dev/null
+++ b/contrib/libs/clapack/stgsen.c
@@ -0,0 +1,832 @@
+/* stgsen.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__2 = 2;
+static real c_b28 = 1.f;
+
+/* Subroutine */ int stgsen_(integer *ijob, logical *wantq, logical *wantz,
+ logical *select, integer *n, real *a, integer *lda, real *b, integer *
+ ldb, real *alphar, real *alphai, real *beta, real *q, integer *ldq,
+ real *z__, integer *ldz, integer *m, real *pl, real *pr, real *dif,
+ real *work, integer *lwork, integer *iwork, integer *liwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1,
+ z_offset, i__1, i__2;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ integer i__, k, n1, n2, kk, ks, mn2, ijb;
+ real eps;
+ integer kase;
+ logical pair;
+ integer ierr;
+ real dsum;
+ logical swap;
+ extern /* Subroutine */ int slag2_(real *, integer *, real *, integer *,
+ real *, real *, real *, real *, real *, real *);
+ integer isave[3];
+ logical wantd;
+ integer lwmin;
+ logical wantp;
+ extern /* Subroutine */ int slacn2_(integer *, real *, real *, integer *,
+ real *, integer *, integer *);
+ logical wantd1, wantd2;
+ real dscale, rdscal;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), slacpy_(
+ char *, integer *, integer *, real *, integer *, real *, integer *
+), stgexc_(logical *, logical *, integer *, real *,
+ integer *, real *, integer *, real *, integer *, real *, integer *
+, integer *, integer *, real *, integer *, integer *);
+ integer liwmin;
+ extern /* Subroutine */ int slassq_(integer *, real *, integer *, real *,
+ real *);
+ real smlnum;
+ logical lquery;
+ extern /* Subroutine */ int stgsyl_(char *, integer *, integer *, integer
+ *, real *, integer *, real *, integer *, real *, integer *, real *
+, integer *, real *, integer *, real *, integer *, real *, real *,
+ real *, integer *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+/* January 2007 */
+
+/* Modified to call SLACN2 in place of SLACON, 7 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STGSEN reorders the generalized real Schur decomposition of a real */
+/* matrix pair (A, B) (in terms of an orthonormal equivalence trans- */
+/* formation Q' * (A, B) * Z), so that a selected cluster of eigenvalues */
+/* appears in the leading diagonal blocks of the upper quasi-triangular */
+/* matrix A and the upper triangular B. The leading columns of Q and */
+/* Z form orthonormal bases of the corresponding left and right eigen- */
+/* spaces (deflating subspaces). (A, B) must be in generalized real */
+/* Schur canonical form (as returned by SGGES), i.e. A is block upper */
+/* triangular with 1-by-1 and 2-by-2 diagonal blocks. B is upper */
+/* triangular. */
+
+/* STGSEN also computes the generalized eigenvalues */
+
+/* w(j) = (ALPHAR(j) + i*ALPHAI(j))/BETA(j) */
+
+/* of the reordered matrix pair (A, B). */
+
+/* Optionally, STGSEN computes the estimates of reciprocal condition */
+/* numbers for eigenvalues and eigenspaces. These are Difu[(A11,B11), */
+/* (A22,B22)] and Difl[(A11,B11), (A22,B22)], i.e. the separation(s) */
+/* between the matrix pairs (A11, B11) and (A22,B22) that correspond to */
+/* the selected cluster and the eigenvalues outside the cluster, resp., */
+/* and norms of "projections" onto left and right eigenspaces w.r.t. */
+/* the selected cluster in the (1,1)-block. */
+
+/* Arguments */
+/* ========= */
+
+/* IJOB (input) INTEGER */
+/* Specifies whether condition numbers are required for the */
+/* cluster of eigenvalues (PL and PR) or the deflating subspaces */
+/* (Difu and Difl): */
+/* =0: Only reorder w.r.t. SELECT. No extras. */
+/* =1: Reciprocal of norms of "projections" onto left and right */
+/* eigenspaces w.r.t. the selected cluster (PL and PR). */
+/* =2: Upper bounds on Difu and Difl. F-norm-based estimate */
+/* (DIF(1:2)). */
+/* =3: Estimate of Difu and Difl. 1-norm-based estimate */
+/* (DIF(1:2)). */
+/* About 5 times as expensive as IJOB = 2. */
+/* =4: Compute PL, PR and DIF (i.e. 0, 1 and 2 above): Economic */
+/* version to get it all. */
+/* =5: Compute PL, PR and DIF (i.e. 0, 1 and 3 above) */
+
+/* WANTQ (input) LOGICAL */
+/* .TRUE. : update the left transformation matrix Q; */
+/* .FALSE.: do not update Q. */
+
+/* WANTZ (input) LOGICAL */
+/* .TRUE. : update the right transformation matrix Z; */
+/* .FALSE.: do not update Z. */
+
+/* SELECT (input) LOGICAL array, dimension (N) */
+/* SELECT specifies the eigenvalues in the selected cluster. */
+/* To select a real eigenvalue w(j), SELECT(j) must be set to */
+/* .TRUE.. To select a complex conjugate pair of eigenvalues */
+/* w(j) and w(j+1), corresponding to a 2-by-2 diagonal block, */
+/* either SELECT(j) or SELECT(j+1) or both must be set to */
+/* .TRUE.; a complex conjugate pair of eigenvalues must be */
+/* either both included in the cluster or both excluded. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) REAL array, dimension(LDA,N) */
+/* On entry, the upper quasi-triangular matrix A, with (A, B) in */
+/* generalized real Schur canonical form. */
+/* On exit, A is overwritten by the reordered matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) REAL array, dimension(LDB,N) */
+/* On entry, the upper triangular matrix B, with (A, B) in */
+/* generalized real Schur canonical form. */
+/* On exit, B is overwritten by the reordered matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* ALPHAR (output) REAL array, dimension (N) */
+/* ALPHAI (output) REAL array, dimension (N) */
+/* BETA (output) REAL array, dimension (N) */
+/* On exit, (ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N, will */
+/* be the generalized eigenvalues. ALPHAR(j) + ALPHAI(j)*i */
+/* and BETA(j),j=1,...,N are the diagonals of the complex Schur */
+/* form (S,T) that would result if the 2-by-2 diagonal blocks of */
+/* the real generalized Schur form of (A,B) were further reduced */
+/* to triangular form using complex unitary transformations. */
+/* If ALPHAI(j) is zero, then the j-th eigenvalue is real; if */
+/* positive, then the j-th and (j+1)-st eigenvalues are a */
+/* complex conjugate pair, with ALPHAI(j+1) negative. */
+
+/* Q (input/output) REAL array, dimension (LDQ,N) */
+/* On entry, if WANTQ = .TRUE., Q is an N-by-N matrix. */
+/* On exit, Q has been postmultiplied by the left orthogonal */
+/* transformation matrix which reorder (A, B); The leading M */
+/* columns of Q form orthonormal bases for the specified pair of */
+/* left eigenspaces (deflating subspaces). */
+/* If WANTQ = .FALSE., Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= 1; */
+/* and if WANTQ = .TRUE., LDQ >= N. */
+
+/* Z (input/output) REAL array, dimension (LDZ,N) */
+/* On entry, if WANTZ = .TRUE., Z is an N-by-N matrix. */
+/* On exit, Z has been postmultiplied by the left orthogonal */
+/* transformation matrix which reorder (A, B); The leading M */
+/* columns of Z form orthonormal bases for the specified pair of */
+/* left eigenspaces (deflating subspaces). */
+/* If WANTZ = .FALSE., Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1; */
+/* If WANTZ = .TRUE., LDZ >= N. */
+
+/* M (output) INTEGER */
+/* The dimension of the specified pair of left and right eigen- */
+/* spaces (deflating subspaces). 0 <= M <= N. */
+
+/* PL (output) REAL */
+/* PR (output) REAL */
+/* If IJOB = 1, 4 or 5, PL, PR are lower bounds on the */
+/* reciprocal of the norm of "projections" onto left and right */
+/* eigenspaces with respect to the selected cluster. */
+/* 0 < PL, PR <= 1. */
+/* If M = 0 or M = N, PL = PR = 1. */
+/* If IJOB = 0, 2 or 3, PL and PR are not referenced. */
+
+/* DIF (output) REAL array, dimension (2). */
+/* If IJOB >= 2, DIF(1:2) store the estimates of Difu and Difl. */
+/* If IJOB = 2 or 4, DIF(1:2) are F-norm-based upper bounds on */
+/* Difu and Difl. If IJOB = 3 or 5, DIF(1:2) are 1-norm-based */
+/* estimates of Difu and Difl. */
+/* If M = 0 or N, DIF(1:2) = F-norm([A, B]). */
+/* If IJOB = 0 or 1, DIF is not referenced. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= 4*N+16. */
+/* If IJOB = 1, 2 or 4, LWORK >= MAX(4*N+16, 2*M*(N-M)). */
+/* If IJOB = 3 or 5, LWORK >= MAX(4*N+16, 4*M*(N-M)). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* IF IJOB = 0, IWORK is not referenced. Otherwise, */
+/* on exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. LIWORK >= 1. */
+/* If IJOB = 1, 2 or 4, LIWORK >= N+6. */
+/* If IJOB = 3 or 5, LIWORK >= MAX(2*M*(N-M), N+6). */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal size of the IWORK array, */
+/* returns this value as the first entry of the IWORK array, and */
+/* no error message related to LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* =0: Successful exit. */
+/* <0: If INFO = -i, the i-th argument had an illegal value. */
+/* =1: Reordering of (A, B) failed because the transformed */
+/* matrix pair (A, B) would be too far from generalized */
+/* Schur form; the problem is very ill-conditioned. */
+/* (A, B) may have been partially reordered. */
+/* If requested, 0 is returned in DIF(*), PL and PR. */
+
+/* Further Details */
+/* =============== */
+
+/* STGSEN first collects the selected eigenvalues by computing */
+/* orthogonal U and W that move them to the top left corner of (A, B). */
+/* In other words, the selected eigenvalues are the eigenvalues of */
+/* (A11, B11) in: */
+
+/* U'*(A, B)*W = (A11 A12) (B11 B12) n1 */
+/* ( 0 A22),( 0 B22) n2 */
+/* n1 n2 n1 n2 */
+
+/* where N = n1+n2 and U' means the transpose of U. The first n1 columns */
+/* of U and W span the specified pair of left and right eigenspaces */
+/* (deflating subspaces) of (A, B). */
+
+/* If (A, B) has been obtained from the generalized real Schur */
+/* decomposition of a matrix pair (C, D) = Q*(A, B)*Z', then the */
+/* reordered generalized real Schur form of (C, D) is given by */
+
+/* (C, D) = (Q*U)*(U'*(A, B)*W)*(Z*W)', */
+
+/* and the first n1 columns of Q*U and Z*W span the corresponding */
+/* deflating subspaces of (C, D) (Q and Z store Q*U and Z*W, resp.). */
+
+/* Note that if the selected eigenvalue is sufficiently ill-conditioned, */
+/* then its value may differ significantly from its value before */
+/* reordering. */
+
+/* The reciprocal condition numbers of the left and right eigenspaces */
+/* spanned by the first n1 columns of U and W (or Q*U and Z*W) may */
+/* be returned in DIF(1:2), corresponding to Difu and Difl, resp. */
+
+/* The Difu and Difl are defined as: */
+
+/* Difu[(A11, B11), (A22, B22)] = sigma-min( Zu ) */
+/* and */
+/* Difl[(A11, B11), (A22, B22)] = Difu[(A22, B22), (A11, B11)], */
+
+/* where sigma-min(Zu) is the smallest singular value of the */
+/* (2*n1*n2)-by-(2*n1*n2) matrix */
+
+/* Zu = [ kron(In2, A11) -kron(A22', In1) ] */
+/* [ kron(In2, B11) -kron(B22', In1) ]. */
+
+/* Here, Inx is the identity matrix of size nx and A22' is the */
+/* transpose of A22. kron(X, Y) is the Kronecker product between */
+/* the matrices X and Y. */
+
+/* When DIF(2) is small, small changes in (A, B) can cause large changes */
+/* in the deflating subspace. An approximate (asymptotic) bound on the */
+/* maximum angular error in the computed deflating subspaces is */
+
+/* EPS * norm((A, B)) / DIF(2), */
+
+/* where EPS is the machine precision. */
+
+/* The reciprocal norm of the projectors on the left and right */
+/* eigenspaces associated with (A11, B11) may be returned in PL and PR. */
+/* They are computed as follows. First we compute L and R so that */
+/* P*(A, B)*Q is block diagonal, where */
+
+/* P = ( I -L ) n1 Q = ( I R ) n1 */
+/* ( 0 I ) n2 and ( 0 I ) n2 */
+/* n1 n2 n1 n2 */
+
+/* and (L, R) is the solution to the generalized Sylvester equation */
+
+/* A11*R - L*A22 = -A12 */
+/* B11*R - L*B22 = -B12 */
+
+/* Then PL = (F-norm(L)**2+1)**(-1/2) and PR = (F-norm(R)**2+1)**(-1/2). */
+/* An approximate (asymptotic) bound on the average absolute error of */
+/* the selected eigenvalues is */
+
+/* EPS * norm((A, B)) / PL. */
+
+/* There are also global error bounds which valid for perturbations up */
+/* to a certain restriction: A lower bound (x) on the smallest */
+/* F-norm(E,F) for which an eigenvalue of (A11, B11) may move and */
+/* coalesce with an eigenvalue of (A22, B22) under perturbation (E,F), */
+/* (i.e. (A + E, B + F), is */
+
+/* x = min(Difu,Difl)/((1/(PL*PL)+1/(PR*PR))**(1/2)+2*max(1/PL,1/PR)). */
+
+/* An approximate bound on x can be computed from DIF(1:2), PL and PR. */
+
+/* If y = ( F-norm(E,F) / x) <= 1, the angles between the perturbed */
+/* (L', R') and unperturbed (L, R) left and right deflating subspaces */
+/* associated with the selected cluster in the (1,1)-blocks can be */
+/* bounded as */
+
+/* max-angle(L, L') <= arctan( y * PL / (1 - y * (1 - PL * PL)**(1/2)) */
+/* max-angle(R, R') <= arctan( y * PR / (1 - y * (1 - PR * PR)**(1/2)) */
+
+/* See LAPACK User's Guide section 4.11 or the following references */
+/* for more information. */
+
+/* Note that if the default method for computing the Frobenius-norm- */
+/* based estimate DIF is not wanted (see SLATDF), then the parameter */
+/* IDIFJB (see below) should be changed from 3 to 4 (routine SLATDF */
+/* (IJOB = 2 will be used)). See STGSYL for more details. */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* References */
+/* ========== */
+
+/* [1] B. Kagstrom; A Direct Method for Reordering Eigenvalues in the */
+/* Generalized Real Schur Form of a Regular Matrix Pair (A, B), in */
+/* M.S. Moonen et al (eds), Linear Algebra for Large Scale and */
+/* Real-Time Applications, Kluwer Academic Publ. 1993, pp 195-218. */
+
+/* [2] B. Kagstrom and P. Poromaa; Computing Eigenspaces with Specified */
+/* Eigenvalues of a Regular Matrix Pair (A, B) and Condition */
+/* Estimation: Theory, Algorithms and Software, */
+/* Report UMINF - 94.04, Department of Computing Science, Umea */
+/* University, S-901 87 Umea, Sweden, 1994. Also as LAPACK Working */
+/* Note 87. To appear in Numerical Algorithms, 1996. */
+
+/* [3] B. Kagstrom and P. Poromaa, LAPACK-Style Algorithms and Software */
+/* for Solving the Generalized Sylvester Equation and Estimating the */
+/* Separation between Regular Matrix Pairs, Report UMINF - 93.23, */
+/* Department of Computing Science, Umea University, S-901 87 Umea, */
+/* Sweden, December 1993, Revised April 1994, Also as LAPACK Working */
+/* Note 75. To appear in ACM Trans. on Math. Software, Vol 22, No 1, */
+/* 1996. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters */
+
+ /* Parameter adjustments */
+ --select;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alphar;
+ --alphai;
+ --beta;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --dif;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1 || *liwork == -1;
+
+ if (*ijob < 0 || *ijob > 5) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldq < 1 || *wantq && *ldq < *n) {
+ *info = -14;
+ } else if (*ldz < 1 || *wantz && *ldz < *n) {
+ *info = -16;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STGSEN", &i__1);
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S") / eps;
+ ierr = 0;
+
+ wantp = *ijob == 1 || *ijob >= 4;
+ wantd1 = *ijob == 2 || *ijob == 4;
+ wantd2 = *ijob == 3 || *ijob == 5;
+ wantd = wantd1 || wantd2;
+
+/* Set M to the dimension of the specified pair of deflating */
+/* subspaces. */
+
+ *m = 0;
+ pair = FALSE_;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (pair) {
+ pair = FALSE_;
+ } else {
+ if (k < *n) {
+ if (a[k + 1 + k * a_dim1] == 0.f) {
+ if (select[k]) {
+ ++(*m);
+ }
+ } else {
+ pair = TRUE_;
+ if (select[k] || select[k + 1]) {
+ *m += 2;
+ }
+ }
+ } else {
+ if (select[*n]) {
+ ++(*m);
+ }
+ }
+ }
+/* L10: */
+ }
+
+ if (*ijob == 1 || *ijob == 2 || *ijob == 4) {
+/* Computing MAX */
+ i__1 = 1, i__2 = (*n << 2) + 16, i__1 = max(i__1,i__2), i__2 = (*m <<
+ 1) * (*n - *m);
+ lwmin = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = 1, i__2 = *n + 6;
+ liwmin = max(i__1,i__2);
+ } else if (*ijob == 3 || *ijob == 5) {
+/* Computing MAX */
+ i__1 = 1, i__2 = (*n << 2) + 16, i__1 = max(i__1,i__2), i__2 = (*m <<
+ 2) * (*n - *m);
+ lwmin = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = 1, i__2 = (*m << 1) * (*n - *m), i__1 = max(i__1,i__2), i__2 =
+ *n + 6;
+ liwmin = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = 1, i__2 = (*n << 2) + 16;
+ lwmin = max(i__1,i__2);
+ liwmin = 1;
+ }
+
+ work[1] = (real) lwmin;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -22;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -24;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STGSEN", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == *n || *m == 0) {
+ if (wantp) {
+ *pl = 1.f;
+ *pr = 1.f;
+ }
+ if (wantd) {
+ dscale = 0.f;
+ dsum = 1.f;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ slassq_(n, &a[i__ * a_dim1 + 1], &c__1, &dscale, &dsum);
+ slassq_(n, &b[i__ * b_dim1 + 1], &c__1, &dscale, &dsum);
+/* L20: */
+ }
+ dif[1] = dscale * sqrt(dsum);
+ dif[2] = dif[1];
+ }
+ goto L60;
+ }
+
+/* Collect the selected blocks at the top-left corner of (A, B). */
+
+ ks = 0;
+ pair = FALSE_;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (pair) {
+ pair = FALSE_;
+ } else {
+
+ swap = select[k];
+ if (k < *n) {
+ if (a[k + 1 + k * a_dim1] != 0.f) {
+ pair = TRUE_;
+ swap = swap || select[k + 1];
+ }
+ }
+
+ if (swap) {
+ ++ks;
+
+/* Swap the K-th block to position KS. */
+/* Perform the reordering of diagonal blocks in (A, B) */
+/* by orthogonal transformation matrices and update */
+/* Q and Z accordingly (if requested): */
+
+ kk = k;
+ if (k != ks) {
+ stgexc_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset],
+ ldb, &q[q_offset], ldq, &z__[z_offset], ldz, &kk,
+ &ks, &work[1], lwork, &ierr);
+ }
+
+ if (ierr > 0) {
+
+/* Swap is rejected: exit. */
+
+ *info = 1;
+ if (wantp) {
+ *pl = 0.f;
+ *pr = 0.f;
+ }
+ if (wantd) {
+ dif[1] = 0.f;
+ dif[2] = 0.f;
+ }
+ goto L60;
+ }
+
+ if (pair) {
+ ++ks;
+ }
+ }
+ }
+/* L30: */
+ }
+ if (wantp) {
+
+/* Solve generalized Sylvester equation for R and L */
+/* and compute PL and PR. */
+
+ n1 = *m;
+ n2 = *n - *m;
+ i__ = n1 + 1;
+ ijb = 0;
+ slacpy_("Full", &n1, &n2, &a[i__ * a_dim1 + 1], lda, &work[1], &n1);
+ slacpy_("Full", &n1, &n2, &b[i__ * b_dim1 + 1], ldb, &work[n1 * n2 +
+ 1], &n1);
+ i__1 = *lwork - (n1 << 1) * n2;
+ stgsyl_("N", &ijb, &n1, &n2, &a[a_offset], lda, &a[i__ + i__ * a_dim1]
+, lda, &work[1], &n1, &b[b_offset], ldb, &b[i__ + i__ *
+ b_dim1], ldb, &work[n1 * n2 + 1], &n1, &dscale, &dif[1], &
+ work[(n1 * n2 << 1) + 1], &i__1, &iwork[1], &ierr);
+
+/* Estimate the reciprocal of norms of "projections" onto left */
+/* and right eigenspaces. */
+
+ rdscal = 0.f;
+ dsum = 1.f;
+ i__1 = n1 * n2;
+ slassq_(&i__1, &work[1], &c__1, &rdscal, &dsum);
+ *pl = rdscal * sqrt(dsum);
+ if (*pl == 0.f) {
+ *pl = 1.f;
+ } else {
+ *pl = dscale / (sqrt(dscale * dscale / *pl + *pl) * sqrt(*pl));
+ }
+ rdscal = 0.f;
+ dsum = 1.f;
+ i__1 = n1 * n2;
+ slassq_(&i__1, &work[n1 * n2 + 1], &c__1, &rdscal, &dsum);
+ *pr = rdscal * sqrt(dsum);
+ if (*pr == 0.f) {
+ *pr = 1.f;
+ } else {
+ *pr = dscale / (sqrt(dscale * dscale / *pr + *pr) * sqrt(*pr));
+ }
+ }
+
+ if (wantd) {
+
+/* Compute estimates of Difu and Difl. */
+
+ if (wantd1) {
+ n1 = *m;
+ n2 = *n - *m;
+ i__ = n1 + 1;
+ ijb = 3;
+
+/* Frobenius norm-based Difu-estimate. */
+
+ i__1 = *lwork - (n1 << 1) * n2;
+ stgsyl_("N", &ijb, &n1, &n2, &a[a_offset], lda, &a[i__ + i__ *
+ a_dim1], lda, &work[1], &n1, &b[b_offset], ldb, &b[i__ +
+ i__ * b_dim1], ldb, &work[n1 * n2 + 1], &n1, &dscale, &
+ dif[1], &work[(n1 << 1) * n2 + 1], &i__1, &iwork[1], &
+ ierr);
+
+/* Frobenius norm-based Difl-estimate. */
+
+ i__1 = *lwork - (n1 << 1) * n2;
+ stgsyl_("N", &ijb, &n2, &n1, &a[i__ + i__ * a_dim1], lda, &a[
+ a_offset], lda, &work[1], &n2, &b[i__ + i__ * b_dim1],
+ ldb, &b[b_offset], ldb, &work[n1 * n2 + 1], &n2, &dscale,
+ &dif[2], &work[(n1 << 1) * n2 + 1], &i__1, &iwork[1], &
+ ierr);
+ } else {
+
+
+/* Compute 1-norm-based estimates of Difu and Difl using */
+/* reversed communication with SLACN2. In each step a */
+/* generalized Sylvester equation or a transposed variant */
+/* is solved. */
+
+ kase = 0;
+ n1 = *m;
+ n2 = *n - *m;
+ i__ = n1 + 1;
+ ijb = 0;
+ mn2 = (n1 << 1) * n2;
+
+/* 1-norm-based estimate of Difu. */
+
+L40:
+ slacn2_(&mn2, &work[mn2 + 1], &work[1], &iwork[1], &dif[1], &kase,
+ isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Solve generalized Sylvester equation. */
+
+ i__1 = *lwork - (n1 << 1) * n2;
+ stgsyl_("N", &ijb, &n1, &n2, &a[a_offset], lda, &a[i__ +
+ i__ * a_dim1], lda, &work[1], &n1, &b[b_offset],
+ ldb, &b[i__ + i__ * b_dim1], ldb, &work[n1 * n2 +
+ 1], &n1, &dscale, &dif[1], &work[(n1 << 1) * n2 +
+ 1], &i__1, &iwork[1], &ierr);
+ } else {
+
+/* Solve the transposed variant. */
+
+ i__1 = *lwork - (n1 << 1) * n2;
+ stgsyl_("T", &ijb, &n1, &n2, &a[a_offset], lda, &a[i__ +
+ i__ * a_dim1], lda, &work[1], &n1, &b[b_offset],
+ ldb, &b[i__ + i__ * b_dim1], ldb, &work[n1 * n2 +
+ 1], &n1, &dscale, &dif[1], &work[(n1 << 1) * n2 +
+ 1], &i__1, &iwork[1], &ierr);
+ }
+ goto L40;
+ }
+ dif[1] = dscale / dif[1];
+
+/* 1-norm-based estimate of Difl. */
+
+L50:
+ slacn2_(&mn2, &work[mn2 + 1], &work[1], &iwork[1], &dif[2], &kase,
+ isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Solve generalized Sylvester equation. */
+
+ i__1 = *lwork - (n1 << 1) * n2;
+ stgsyl_("N", &ijb, &n2, &n1, &a[i__ + i__ * a_dim1], lda,
+ &a[a_offset], lda, &work[1], &n2, &b[i__ + i__ *
+ b_dim1], ldb, &b[b_offset], ldb, &work[n1 * n2 +
+ 1], &n2, &dscale, &dif[2], &work[(n1 << 1) * n2 +
+ 1], &i__1, &iwork[1], &ierr);
+ } else {
+
+/* Solve the transposed variant. */
+
+ i__1 = *lwork - (n1 << 1) * n2;
+ stgsyl_("T", &ijb, &n2, &n1, &a[i__ + i__ * a_dim1], lda,
+ &a[a_offset], lda, &work[1], &n2, &b[i__ + i__ *
+ b_dim1], ldb, &b[b_offset], ldb, &work[n1 * n2 +
+ 1], &n2, &dscale, &dif[2], &work[(n1 << 1) * n2 +
+ 1], &i__1, &iwork[1], &ierr);
+ }
+ goto L50;
+ }
+ dif[2] = dscale / dif[2];
+
+ }
+ }
+
+L60:
+
+/* Compute generalized eigenvalues of reordered pair (A, B) and */
+/* normalize the generalized Schur form. */
+
+ pair = FALSE_;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (pair) {
+ pair = FALSE_;
+ } else {
+
+ if (k < *n) {
+ if (a[k + 1 + k * a_dim1] != 0.f) {
+ pair = TRUE_;
+ }
+ }
+
+ if (pair) {
+
+/* Compute the eigenvalue(s) at position K. */
+
+ work[1] = a[k + k * a_dim1];
+ work[2] = a[k + 1 + k * a_dim1];
+ work[3] = a[k + (k + 1) * a_dim1];
+ work[4] = a[k + 1 + (k + 1) * a_dim1];
+ work[5] = b[k + k * b_dim1];
+ work[6] = b[k + 1 + k * b_dim1];
+ work[7] = b[k + (k + 1) * b_dim1];
+ work[8] = b[k + 1 + (k + 1) * b_dim1];
+ r__1 = smlnum * eps;
+ slag2_(&work[1], &c__2, &work[5], &c__2, &r__1, &beta[k], &
+ beta[k + 1], &alphar[k], &alphar[k + 1], &alphai[k]);
+ alphai[k + 1] = -alphai[k];
+
+ } else {
+
+ if (r_sign(&c_b28, &b[k + k * b_dim1]) < 0.f) {
+
+/* If B(K,K) is negative, make it positive */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[k + i__ * a_dim1] = -a[k + i__ * a_dim1];
+ b[k + i__ * b_dim1] = -b[k + i__ * b_dim1];
+ if (*wantq) {
+ q[i__ + k * q_dim1] = -q[i__ + k * q_dim1];
+ }
+/* L80: */
+ }
+ }
+
+ alphar[k] = a[k + k * a_dim1];
+ alphai[k] = 0.f;
+ beta[k] = b[k + k * b_dim1];
+
+ }
+ }
+/* L70: */
+ }
+
+ work[1] = (real) lwmin;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of STGSEN */
+
+} /* stgsen_ */
diff --git a/contrib/libs/clapack/stgsja.c b/contrib/libs/clapack/stgsja.c
new file mode 100644
index 0000000000..677fbe3cc1
--- /dev/null
+++ b/contrib/libs/clapack/stgsja.c
@@ -0,0 +1,619 @@
+/* stgsja.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b13 = 0.f;
+static real c_b14 = 1.f;
+static integer c__1 = 1;
+static real c_b43 = -1.f;
+
+/* Subroutine */ int stgsja_(char *jobu, char *jobv, char *jobq, integer *m,
+ integer *p, integer *n, integer *k, integer *l, real *a, integer *lda,
+ real *b, integer *ldb, real *tola, real *tolb, real *alpha, real *
+ beta, real *u, integer *ldu, real *v, integer *ldv, real *q, integer *
+ ldq, real *work, integer *ncycle, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, u_dim1,
+ u_offset, v_dim1, v_offset, i__1, i__2, i__3, i__4;
+ real r__1;
+
+ /* Local variables */
+ integer i__, j;
+ real a1, a2, a3, b1, b2, b3, csq, csu, csv, snq, rwk, snu, snv;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *);
+ real gamma;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ logical initq, initu, initv, wantq, upper;
+ real error, ssmin;
+ logical wantu, wantv;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), slags2_(logical *, real *, real *, real *, real *,
+ real *, real *, real *, real *, real *, real *, real *, real *);
+ integer kcycle;
+ extern /* Subroutine */ int xerbla_(char *, integer *), slapll_(
+ integer *, real *, integer *, real *, integer *, real *), slartg_(
+ real *, real *, real *, real *, real *), slaset_(char *, integer *
+, integer *, real *, real *, real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STGSJA computes the generalized singular value decomposition (GSVD) */
+/* of two real upper triangular (or trapezoidal) matrices A and B. */
+
+/* On entry, it is assumed that matrices A and B have the following */
+/* forms, which may be obtained by the preprocessing subroutine SGGSVP */
+/* from a general M-by-N matrix A and P-by-N matrix B: */
+
+/* N-K-L K L */
+/* A = K ( 0 A12 A13 ) if M-K-L >= 0; */
+/* L ( 0 0 A23 ) */
+/* M-K-L ( 0 0 0 ) */
+
+/* N-K-L K L */
+/* A = K ( 0 A12 A13 ) if M-K-L < 0; */
+/* M-K ( 0 0 A23 ) */
+
+/* N-K-L K L */
+/* B = L ( 0 0 B13 ) */
+/* P-L ( 0 0 0 ) */
+
+/* where the K-by-K matrix A12 and L-by-L matrix B13 are nonsingular */
+/* upper triangular; A23 is L-by-L upper triangular if M-K-L >= 0, */
+/* otherwise A23 is (M-K)-by-L upper trapezoidal. */
+
+/* On exit, */
+
+/* U'*A*Q = D1*( 0 R ), V'*B*Q = D2*( 0 R ), */
+
+/* where U, V and Q are orthogonal matrices, Z' denotes the transpose */
+/* of Z, R is a nonsingular upper triangular matrix, and D1 and D2 are */
+/* ``diagonal'' matrices, which are of the following structures: */
+
+/* If M-K-L >= 0, */
+
+/* K L */
+/* D1 = K ( I 0 ) */
+/* L ( 0 C ) */
+/* M-K-L ( 0 0 ) */
+
+/* K L */
+/* D2 = L ( 0 S ) */
+/* P-L ( 0 0 ) */
+
+/* N-K-L K L */
+/* ( 0 R ) = K ( 0 R11 R12 ) K */
+/* L ( 0 0 R22 ) L */
+
+/* where */
+
+/* C = diag( ALPHA(K+1), ... , ALPHA(K+L) ), */
+/* S = diag( BETA(K+1), ... , BETA(K+L) ), */
+/* C**2 + S**2 = I. */
+
+/* R is stored in A(1:K+L,N-K-L+1:N) on exit. */
+
+/* If M-K-L < 0, */
+
+/* K M-K K+L-M */
+/* D1 = K ( I 0 0 ) */
+/* M-K ( 0 C 0 ) */
+
+/* K M-K K+L-M */
+/* D2 = M-K ( 0 S 0 ) */
+/* K+L-M ( 0 0 I ) */
+/* P-L ( 0 0 0 ) */
+
+/* N-K-L K M-K K+L-M */
+/* ( 0 R ) = K ( 0 R11 R12 R13 ) */
+/* M-K ( 0 0 R22 R23 ) */
+/* K+L-M ( 0 0 0 R33 ) */
+
+/* where */
+/* C = diag( ALPHA(K+1), ... , ALPHA(M) ), */
+/* S = diag( BETA(K+1), ... , BETA(M) ), */
+/* C**2 + S**2 = I. */
+
+/* R = ( R11 R12 R13 ) is stored in A(1:M, N-K-L+1:N) and R33 is stored */
+/* ( 0 R22 R23 ) */
+/* in B(M-K+1:L,N+M-K-L+1:N) on exit. */
+
+/* The computation of the orthogonal transformation matrices U, V or Q */
+/* is optional. These matrices may either be formed explicitly, or they */
+/* may be postmultiplied into input matrices U1, V1, or Q1. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBU (input) CHARACTER*1 */
+/* = 'U': U must contain an orthogonal matrix U1 on entry, and */
+/* the product U1*U is returned; */
+/* = 'I': U is initialized to the unit matrix, and the */
+/* orthogonal matrix U is returned; */
+/* = 'N': U is not computed. */
+
+/* JOBV (input) CHARACTER*1 */
+/* = 'V': V must contain an orthogonal matrix V1 on entry, and */
+/* the product V1*V is returned; */
+/* = 'I': V is initialized to the unit matrix, and the */
+/* orthogonal matrix V is returned; */
+/* = 'N': V is not computed. */
+
+/* JOBQ (input) CHARACTER*1 */
+/* = 'Q': Q must contain an orthogonal matrix Q1 on entry, and */
+/* the product Q1*Q is returned; */
+/* = 'I': Q is initialized to the unit matrix, and the */
+/* orthogonal matrix Q is returned; */
+/* = 'N': Q is not computed. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* P (input) INTEGER */
+/* The number of rows of the matrix B. P >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrices A and B. N >= 0. */
+
+/* K (input) INTEGER */
+/* L (input) INTEGER */
+/* K and L specify the subblocks in the input matrices A and B: */
+/* A23 = A(K+1:MIN(K+L,M),N-L+1:N) and B13 = B(1:L,N-L+1:N) */
+/* of A and B, whose GSVD is going to be computed by STGSJA. */
+/* See Further details. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, A(N-K+1:N,1:MIN(K+L,M) ) contains the triangular */
+/* matrix R or part of R. See Purpose for details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) REAL array, dimension (LDB,N) */
+/* On entry, the P-by-N matrix B. */
+/* On exit, if necessary, B(M-K+1:L,N+M-K-L+1:N) contains */
+/* a part of R. See Purpose for details. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,P). */
+
+/* TOLA (input) REAL */
+/* TOLB (input) REAL */
+/* TOLA and TOLB are the convergence criteria for the Jacobi- */
+/* Kogbetliantz iteration procedure. Generally, they are the */
+/* same as used in the preprocessing step, say */
+/* TOLA = max(M,N)*norm(A)*MACHEPS, */
+/* TOLB = max(P,N)*norm(B)*MACHEPS. */
+
+/* ALPHA (output) REAL array, dimension (N) */
+/* BETA (output) REAL array, dimension (N) */
+/* On exit, ALPHA and BETA contain the generalized singular */
+/* value pairs of A and B; */
+/* ALPHA(1:K) = 1, */
+/* BETA(1:K) = 0, */
+/* and if M-K-L >= 0, */
+/* ALPHA(K+1:K+L) = diag(C), */
+/* BETA(K+1:K+L) = diag(S), */
+/* or if M-K-L < 0, */
+/* ALPHA(K+1:M)= C, ALPHA(M+1:K+L)= 0 */
+/* BETA(K+1:M) = S, BETA(M+1:K+L) = 1. */
+/* Furthermore, if K+L < N, */
+/* ALPHA(K+L+1:N) = 0 and */
+/* BETA(K+L+1:N) = 0. */
+
+/* U (input/output) REAL array, dimension (LDU,M) */
+/* On entry, if JOBU = 'U', U must contain a matrix U1 (usually */
+/* the orthogonal matrix returned by SGGSVP). */
+/* On exit, */
+/* if JOBU = 'I', U contains the orthogonal matrix U; */
+/* if JOBU = 'U', U contains the product U1*U. */
+/* If JOBU = 'N', U is not referenced. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= max(1,M) if */
+/* JOBU = 'U'; LDU >= 1 otherwise. */
+
+/* V (input/output) REAL array, dimension (LDV,P) */
+/* On entry, if JOBV = 'V', V must contain a matrix V1 (usually */
+/* the orthogonal matrix returned by SGGSVP). */
+/* On exit, */
+/* if JOBV = 'I', V contains the orthogonal matrix V; */
+/* if JOBV = 'V', V contains the product V1*V. */
+/* If JOBV = 'N', V is not referenced. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. LDV >= max(1,P) if */
+/* JOBV = 'V'; LDV >= 1 otherwise. */
+
+/* Q (input/output) REAL array, dimension (LDQ,N) */
+/* On entry, if JOBQ = 'Q', Q must contain a matrix Q1 (usually */
+/* the orthogonal matrix returned by SGGSVP). */
+/* On exit, */
+/* if JOBQ = 'I', Q contains the orthogonal matrix Q; */
+/* if JOBQ = 'Q', Q contains the product Q1*Q. */
+/* If JOBQ = 'N', Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N) if */
+/* JOBQ = 'Q'; LDQ >= 1 otherwise. */
+
+/* WORK (workspace) REAL array, dimension (2*N) */
+
+/* NCYCLE (output) INTEGER */
+/* The number of cycles required for convergence. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1: the procedure does not converge after MAXIT cycles. */
+
+/* Internal Parameters */
+/* =================== */
+
+/* MAXIT INTEGER */
+/* MAXIT specifies the total loops that the iterative procedure */
+/* may take. If after MAXIT cycles, the routine fails to */
+/* converge, we return INFO = 1. */
+
+/* Further Details */
+/* =============== */
+
+/* STGSJA essentially uses a variant of Kogbetliantz algorithm to reduce */
+/* min(L,M-K)-by-L triangular (or trapezoidal) matrix A23 and L-by-L */
+/* matrix B13 to the form: */
+
+/* U1'*A13*Q1 = C1*R1; V1'*B13*Q1 = S1*R1, */
+
+/* where U1, V1 and Q1 are orthogonal matrix, and Z' is the transpose */
+/* of Z. C1 and S1 are diagonal matrices satisfying */
+
+/* C1**2 + S1**2 = I, */
+
+/* and R1 is an L-by-L nonsingular upper triangular matrix. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alpha;
+ --beta;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --work;
+
+ /* Function Body */
+ initu = lsame_(jobu, "I");
+ wantu = initu || lsame_(jobu, "U");
+
+ initv = lsame_(jobv, "I");
+ wantv = initv || lsame_(jobv, "V");
+
+ initq = lsame_(jobq, "I");
+ wantq = initq || lsame_(jobq, "Q");
+
+ *info = 0;
+ if (! (initu || wantu || lsame_(jobu, "N"))) {
+ *info = -1;
+ } else if (! (initv || wantv || lsame_(jobv, "N")))
+ {
+ *info = -2;
+ } else if (! (initq || wantq || lsame_(jobq, "N")))
+ {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*p < 0) {
+ *info = -5;
+ } else if (*n < 0) {
+ *info = -6;
+ } else if (*lda < max(1,*m)) {
+ *info = -10;
+ } else if (*ldb < max(1,*p)) {
+ *info = -12;
+ } else if (*ldu < 1 || wantu && *ldu < *m) {
+ *info = -18;
+ } else if (*ldv < 1 || wantv && *ldv < *p) {
+ *info = -20;
+ } else if (*ldq < 1 || wantq && *ldq < *n) {
+ *info = -22;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STGSJA", &i__1);
+ return 0;
+ }
+
+/* Initialize U, V and Q, if necessary */
+
+ if (initu) {
+ slaset_("Full", m, m, &c_b13, &c_b14, &u[u_offset], ldu);
+ }
+ if (initv) {
+ slaset_("Full", p, p, &c_b13, &c_b14, &v[v_offset], ldv);
+ }
+ if (initq) {
+ slaset_("Full", n, n, &c_b13, &c_b14, &q[q_offset], ldq);
+ }
+
+/* Loop until convergence */
+
+ upper = FALSE_;
+ for (kcycle = 1; kcycle <= 40; ++kcycle) {
+
+ upper = ! upper;
+
+ i__1 = *l - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *l;
+ for (j = i__ + 1; j <= i__2; ++j) {
+
+ a1 = 0.f;
+ a2 = 0.f;
+ a3 = 0.f;
+ if (*k + i__ <= *m) {
+ a1 = a[*k + i__ + (*n - *l + i__) * a_dim1];
+ }
+ if (*k + j <= *m) {
+ a3 = a[*k + j + (*n - *l + j) * a_dim1];
+ }
+
+ b1 = b[i__ + (*n - *l + i__) * b_dim1];
+ b3 = b[j + (*n - *l + j) * b_dim1];
+
+ if (upper) {
+ if (*k + i__ <= *m) {
+ a2 = a[*k + i__ + (*n - *l + j) * a_dim1];
+ }
+ b2 = b[i__ + (*n - *l + j) * b_dim1];
+ } else {
+ if (*k + j <= *m) {
+ a2 = a[*k + j + (*n - *l + i__) * a_dim1];
+ }
+ b2 = b[j + (*n - *l + i__) * b_dim1];
+ }
+
+ slags2_(&upper, &a1, &a2, &a3, &b1, &b2, &b3, &csu, &snu, &
+ csv, &snv, &csq, &snq);
+
+/* Update (K+I)-th and (K+J)-th rows of matrix A: U'*A */
+
+ if (*k + j <= *m) {
+ srot_(l, &a[*k + j + (*n - *l + 1) * a_dim1], lda, &a[*k
+ + i__ + (*n - *l + 1) * a_dim1], lda, &csu, &snu);
+ }
+
+/* Update I-th and J-th rows of matrix B: V'*B */
+
+ srot_(l, &b[j + (*n - *l + 1) * b_dim1], ldb, &b[i__ + (*n - *
+ l + 1) * b_dim1], ldb, &csv, &snv);
+
+/* Update (N-L+I)-th and (N-L+J)-th columns of matrices */
+/* A and B: A*Q and B*Q */
+
+/* Computing MIN */
+ i__4 = *k + *l;
+ i__3 = min(i__4,*m);
+ srot_(&i__3, &a[(*n - *l + j) * a_dim1 + 1], &c__1, &a[(*n - *
+ l + i__) * a_dim1 + 1], &c__1, &csq, &snq);
+
+ srot_(l, &b[(*n - *l + j) * b_dim1 + 1], &c__1, &b[(*n - *l +
+ i__) * b_dim1 + 1], &c__1, &csq, &snq);
+
+ if (upper) {
+ if (*k + i__ <= *m) {
+ a[*k + i__ + (*n - *l + j) * a_dim1] = 0.f;
+ }
+ b[i__ + (*n - *l + j) * b_dim1] = 0.f;
+ } else {
+ if (*k + j <= *m) {
+ a[*k + j + (*n - *l + i__) * a_dim1] = 0.f;
+ }
+ b[j + (*n - *l + i__) * b_dim1] = 0.f;
+ }
+
+/* Update orthogonal matrices U, V, Q, if desired. */
+
+ if (wantu && *k + j <= *m) {
+ srot_(m, &u[(*k + j) * u_dim1 + 1], &c__1, &u[(*k + i__) *
+ u_dim1 + 1], &c__1, &csu, &snu);
+ }
+
+ if (wantv) {
+ srot_(p, &v[j * v_dim1 + 1], &c__1, &v[i__ * v_dim1 + 1],
+ &c__1, &csv, &snv);
+ }
+
+ if (wantq) {
+ srot_(n, &q[(*n - *l + j) * q_dim1 + 1], &c__1, &q[(*n - *
+ l + i__) * q_dim1 + 1], &c__1, &csq, &snq);
+ }
+
+/* L10: */
+ }
+/* L20: */
+ }
+
+ if (! upper) {
+
+/* The matrices A13 and B13 were lower triangular at the start */
+/* of the cycle, and are now upper triangular. */
+
+/* Convergence test: test the parallelism of the corresponding */
+/* rows of A and B. */
+
+ error = 0.f;
+/* Computing MIN */
+ i__2 = *l, i__3 = *m - *k;
+ i__1 = min(i__2,i__3);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *l - i__ + 1;
+ scopy_(&i__2, &a[*k + i__ + (*n - *l + i__) * a_dim1], lda, &
+ work[1], &c__1);
+ i__2 = *l - i__ + 1;
+ scopy_(&i__2, &b[i__ + (*n - *l + i__) * b_dim1], ldb, &work[*
+ l + 1], &c__1);
+ i__2 = *l - i__ + 1;
+ slapll_(&i__2, &work[1], &c__1, &work[*l + 1], &c__1, &ssmin);
+ error = dmax(error,ssmin);
+/* L30: */
+ }
+
+ if (dabs(error) <= dmin(*tola,*tolb)) {
+ goto L50;
+ }
+ }
+
+/* End of cycle loop */
+
+/* L40: */
+ }
+
+/* The algorithm has not converged after MAXIT cycles. */
+
+ *info = 1;
+ goto L100;
+
+L50:
+
+/* If ERROR <= MIN(TOLA,TOLB), then the algorithm has converged. */
+/* Compute the generalized singular value pairs (ALPHA, BETA), and */
+/* set the triangular matrix R to array A. */
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ alpha[i__] = 1.f;
+ beta[i__] = 0.f;
+/* L60: */
+ }
+
+/* Computing MIN */
+ i__2 = *l, i__3 = *m - *k;
+ i__1 = min(i__2,i__3);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+ a1 = a[*k + i__ + (*n - *l + i__) * a_dim1];
+ b1 = b[i__ + (*n - *l + i__) * b_dim1];
+
+ if (a1 != 0.f) {
+ gamma = b1 / a1;
+
+/* change sign if necessary */
+
+ if (gamma < 0.f) {
+ i__2 = *l - i__ + 1;
+ sscal_(&i__2, &c_b43, &b[i__ + (*n - *l + i__) * b_dim1], ldb)
+ ;
+ if (wantv) {
+ sscal_(p, &c_b43, &v[i__ * v_dim1 + 1], &c__1);
+ }
+ }
+
+ r__1 = dabs(gamma);
+ slartg_(&r__1, &c_b14, &beta[*k + i__], &alpha[*k + i__], &rwk);
+
+ if (alpha[*k + i__] >= beta[*k + i__]) {
+ i__2 = *l - i__ + 1;
+ r__1 = 1.f / alpha[*k + i__];
+ sscal_(&i__2, &r__1, &a[*k + i__ + (*n - *l + i__) * a_dim1],
+ lda);
+ } else {
+ i__2 = *l - i__ + 1;
+ r__1 = 1.f / beta[*k + i__];
+ sscal_(&i__2, &r__1, &b[i__ + (*n - *l + i__) * b_dim1], ldb);
+ i__2 = *l - i__ + 1;
+ scopy_(&i__2, &b[i__ + (*n - *l + i__) * b_dim1], ldb, &a[*k
+ + i__ + (*n - *l + i__) * a_dim1], lda);
+ }
+
+ } else {
+
+ alpha[*k + i__] = 0.f;
+ beta[*k + i__] = 1.f;
+ i__2 = *l - i__ + 1;
+ scopy_(&i__2, &b[i__ + (*n - *l + i__) * b_dim1], ldb, &a[*k +
+ i__ + (*n - *l + i__) * a_dim1], lda);
+
+ }
+
+/* L70: */
+ }
+
+/* Post-assignment */
+
+ i__1 = *k + *l;
+ for (i__ = *m + 1; i__ <= i__1; ++i__) {
+ alpha[i__] = 0.f;
+ beta[i__] = 1.f;
+/* L80: */
+ }
+
+ if (*k + *l < *n) {
+ i__1 = *n;
+ for (i__ = *k + *l + 1; i__ <= i__1; ++i__) {
+ alpha[i__] = 0.f;
+ beta[i__] = 0.f;
+/* L90: */
+ }
+ }
+
+L100:
+ *ncycle = kcycle;
+ return 0;
+
+/* End of STGSJA */
+
+} /* stgsja_ */
diff --git a/contrib/libs/clapack/stgsna.c b/contrib/libs/clapack/stgsna.c
new file mode 100644
index 0000000000..d469299e31
--- /dev/null
+++ b/contrib/libs/clapack/stgsna.c
@@ -0,0 +1,691 @@
+/* stgsna.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b19 = 1.f;
+static real c_b21 = 0.f;
+static integer c__2 = 2;
+static logical c_false = FALSE_;
+static integer c__3 = 3;
+
+/* Subroutine */ int stgsna_(char *job, char *howmny, logical *select,
+ integer *n, real *a, integer *lda, real *b, integer *ldb, real *vl,
+ integer *ldvl, real *vr, integer *ldvr, real *s, real *dif, integer *
+ mm, integer *m, real *work, integer *lwork, integer *iwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vl_dim1, vl_offset, vr_dim1,
+ vr_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, k;
+ real c1, c2;
+ integer n1, n2, ks, iz;
+ real eps, beta, cond;
+ logical pair;
+ integer ierr;
+ real uhav, uhbv;
+ integer ifst;
+ real lnrm;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ integer ilst;
+ real rnrm;
+ extern /* Subroutine */ int slag2_(real *, integer *, real *, integer *,
+ real *, real *, real *, real *, real *, real *);
+ extern doublereal snrm2_(integer *, real *, integer *);
+ real root1, root2, scale;
+ extern logical lsame_(char *, char *);
+ real uhavi, uhbvi;
+ extern /* Subroutine */ int sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *);
+ real tmpii;
+ integer lwmin;
+ logical wants;
+ real tmpir, tmpri, dummy[1], tmprr;
+ extern doublereal slapy2_(real *, real *);
+ real dummy1[1], alphai, alphar;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical wantbh, wantdf;
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *), stgexc_(logical *, logical
+ *, integer *, real *, integer *, real *, integer *, real *,
+ integer *, real *, integer *, integer *, integer *, real *,
+ integer *, integer *);
+ logical somcon;
+ real alprqt, smlnum;
+ logical lquery;
+ extern /* Subroutine */ int stgsyl_(char *, integer *, integer *, integer
+ *, real *, integer *, real *, integer *, real *, integer *, real *
+, integer *, real *, integer *, real *, integer *, real *, real *,
+ real *, integer *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STGSNA estimates reciprocal condition numbers for specified */
+/* eigenvalues and/or eigenvectors of a matrix pair (A, B) in */
+/* generalized real Schur canonical form (or of any matrix pair */
+/* (Q*A*Z', Q*B*Z') with orthogonal matrices Q and Z, where */
+/* Z' denotes the transpose of Z. */
+
+/* (A, B) must be in generalized real Schur form (as returned by SGGES), */
+/* i.e. A is block upper triangular with 1-by-1 and 2-by-2 diagonal */
+/* blocks. B is upper triangular. */
+
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies whether condition numbers are required for */
+/* eigenvalues (S) or eigenvectors (DIF): */
+/* = 'E': for eigenvalues only (S); */
+/* = 'V': for eigenvectors only (DIF); */
+/* = 'B': for both eigenvalues and eigenvectors (S and DIF). */
+
+/* HOWMNY (input) CHARACTER*1 */
+/* = 'A': compute condition numbers for all eigenpairs; */
+/* = 'S': compute condition numbers for selected eigenpairs */
+/* specified by the array SELECT. */
+
+/* SELECT (input) LOGICAL array, dimension (N) */
+/* If HOWMNY = 'S', SELECT specifies the eigenpairs for which */
+/* condition numbers are required. To select condition numbers */
+/* for the eigenpair corresponding to a real eigenvalue w(j), */
+/* SELECT(j) must be set to .TRUE.. To select condition numbers */
+/* corresponding to a complex conjugate pair of eigenvalues w(j) */
+/* and w(j+1), either SELECT(j) or SELECT(j+1) or both, must be */
+/* set to .TRUE.. */
+/* If HOWMNY = 'A', SELECT is not referenced. */
+
+/* N (input) INTEGER */
+/* The order of the square matrix pair (A, B). N >= 0. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The upper quasi-triangular matrix A in the pair (A,B). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input) REAL array, dimension (LDB,N) */
+/* The upper triangular matrix B in the pair (A,B). */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* VL (input) REAL array, dimension (LDVL,M) */
+/* If JOB = 'E' or 'B', VL must contain left eigenvectors of */
+/* (A, B), corresponding to the eigenpairs specified by HOWMNY */
+/* and SELECT. The eigenvectors must be stored in consecutive */
+/* columns of VL, as returned by STGEVC. */
+/* If JOB = 'V', VL is not referenced. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the array VL. LDVL >= 1. */
+/* If JOB = 'E' or 'B', LDVL >= N. */
+
+/* VR (input) REAL array, dimension (LDVR,M) */
+/* If JOB = 'E' or 'B', VR must contain right eigenvectors of */
+/* (A, B), corresponding to the eigenpairs specified by HOWMNY */
+/* and SELECT. The eigenvectors must be stored in consecutive */
+/* columns ov VR, as returned by STGEVC. */
+/* If JOB = 'V', VR is not referenced. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. LDVR >= 1. */
+/* If JOB = 'E' or 'B', LDVR >= N. */
+
+/* S (output) REAL array, dimension (MM) */
+/* If JOB = 'E' or 'B', the reciprocal condition numbers of the */
+/* selected eigenvalues, stored in consecutive elements of the */
+/* array. For a complex conjugate pair of eigenvalues two */
+/* consecutive elements of S are set to the same value. Thus */
+/* S(j), DIF(j), and the j-th columns of VL and VR all */
+/* correspond to the same eigenpair (but not in general the */
+/* j-th eigenpair, unless all eigenpairs are selected). */
+/* If JOB = 'V', S is not referenced. */
+
+/* DIF (output) REAL array, dimension (MM) */
+/* If JOB = 'V' or 'B', the estimated reciprocal condition */
+/* numbers of the selected eigenvectors, stored in consecutive */
+/* elements of the array. For a complex eigenvector two */
+/* consecutive elements of DIF are set to the same value. If */
+/* the eigenvalues cannot be reordered to compute DIF(j), DIF(j) */
+/* is set to 0; this can only occur when the true value would be */
+/* very small anyway. */
+/* If JOB = 'E', DIF is not referenced. */
+
+/* MM (input) INTEGER */
+/* The number of elements in the arrays S and DIF. MM >= M. */
+
+/* M (output) INTEGER */
+/* The number of elements of the arrays S and DIF used to store */
+/* the specified condition numbers; for each selected real */
+/* eigenvalue one element is used, and for each selected complex */
+/* conjugate pair of eigenvalues, two elements are used. */
+/* If HOWMNY = 'A', M is set to N. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+/* If JOB = 'V' or 'B' LWORK >= 2*N*(N+2)+16. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace) INTEGER array, dimension (N + 6) */
+/* If JOB = 'E', IWORK is not referenced. */
+
+/* INFO (output) INTEGER */
+/* =0: Successful exit */
+/* <0: If INFO = -i, the i-th argument had an illegal value */
+
+
+/* Further Details */
+/* =============== */
+
+/* The reciprocal of the condition number of a generalized eigenvalue */
+/* w = (a, b) is defined as */
+
+/* S(w) = (|u'Av|**2 + |u'Bv|**2)**(1/2) / (norm(u)*norm(v)) */
+
+/* where u and v are the left and right eigenvectors of (A, B) */
+/* corresponding to w; |z| denotes the absolute value of the complex */
+/* number, and norm(u) denotes the 2-norm of the vector u. */
+/* The pair (a, b) corresponds to an eigenvalue w = a/b (= u'Av/u'Bv) */
+/* of the matrix pair (A, B). If both a and b equal zero, then (A B) is */
+/* singular and S(I) = -1 is returned. */
+
+/* An approximate error bound on the chordal distance between the i-th */
+/* computed generalized eigenvalue w and the corresponding exact */
+/* eigenvalue lambda is */
+
+/* chord(w, lambda) <= EPS * norm(A, B) / S(I) */
+
+/* where EPS is the machine precision. */
+
+/* The reciprocal of the condition number DIF(i) of right eigenvector u */
+/* and left eigenvector v corresponding to the generalized eigenvalue w */
+/* is defined as follows: */
+
+/* a) If the i-th eigenvalue w = (a,b) is real */
+
+/* Suppose U and V are orthogonal transformations such that */
+
+/* U'*(A, B)*V = (S, T) = ( a * ) ( b * ) 1 */
+/* ( 0 S22 ),( 0 T22 ) n-1 */
+/* 1 n-1 1 n-1 */
+
+/* Then the reciprocal condition number DIF(i) is */
+
+/* Difl((a, b), (S22, T22)) = sigma-min( Zl ), */
+
+/* where sigma-min(Zl) denotes the smallest singular value of the */
+/* 2(n-1)-by-2(n-1) matrix */
+
+/* Zl = [ kron(a, In-1) -kron(1, S22) ] */
+/* [ kron(b, In-1) -kron(1, T22) ] . */
+
+/* Here In-1 is the identity matrix of size n-1. kron(X, Y) is the */
+/* Kronecker product between the matrices X and Y. */
+
+/* Note that if the default method for computing DIF(i) is wanted */
+/* (see SLATDF), then the parameter DIFDRI (see below) should be */
+/* changed from 3 to 4 (routine SLATDF(IJOB = 2 will be used)). */
+/* See STGSYL for more details. */
+
+/* b) If the i-th and (i+1)-th eigenvalues are complex conjugate pair, */
+
+/* Suppose U and V are orthogonal transformations such that */
+
+/* U'*(A, B)*V = (S, T) = ( S11 * ) ( T11 * ) 2 */
+/* ( 0 S22 ),( 0 T22) n-2 */
+/* 2 n-2 2 n-2 */
+
+/* and (S11, T11) corresponds to the complex conjugate eigenvalue */
+/* pair (w, conjg(w)). There exist unitary matrices U1 and V1 such */
+/* that */
+
+/* U1'*S11*V1 = ( s11 s12 ) and U1'*T11*V1 = ( t11 t12 ) */
+/* ( 0 s22 ) ( 0 t22 ) */
+
+/* where the generalized eigenvalues w = s11/t11 and */
+/* conjg(w) = s22/t22. */
+
+/* Then the reciprocal condition number DIF(i) is bounded by */
+
+/* min( d1, max( 1, |real(s11)/real(s22)| )*d2 ) */
+
+/* where, d1 = Difl((s11, t11), (s22, t22)) = sigma-min(Z1), where */
+/* Z1 is the complex 2-by-2 matrix */
+
+/* Z1 = [ s11 -s22 ] */
+/* [ t11 -t22 ], */
+
+/* This is done by computing (using real arithmetic) the */
+/* roots of the characteristical polynomial det(Z1' * Z1 - lambda I), */
+/* where Z1' denotes the conjugate transpose of Z1 and det(X) denotes */
+/* the determinant of X. */
+
+/* and d2 is an upper bound on Difl((S11, T11), (S22, T22)), i.e. an */
+/* upper bound on sigma-min(Z2), where Z2 is (2n-2)-by-(2n-2) */
+
+/* Z2 = [ kron(S11', In-2) -kron(I2, S22) ] */
+/* [ kron(T11', In-2) -kron(I2, T22) ] */
+
+/* Note that if the default method for computing DIF is wanted (see */
+/* SLATDF), then the parameter DIFDRI (see below) should be changed */
+/* from 3 to 4 (routine SLATDF(IJOB = 2 will be used)). See STGSYL */
+/* for more details. */
+
+/* For each eigenvalue/vector specified by SELECT, DIF stores a */
+/* Frobenius norm-based estimate of Difl. */
+
+/* An approximate error bound for the i-th computed eigenvector VL(i) or */
+/* VR(i) is given by */
+
+/* EPS * norm(A, B) / DIF(i). */
+
+/* See ref. [2-3] for more details and further references. */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* References */
+/* ========== */
+
+/* [1] B. Kagstrom; A Direct Method for Reordering Eigenvalues in the */
+/* Generalized Real Schur Form of a Regular Matrix Pair (A, B), in */
+/* M.S. Moonen et al (eds), Linear Algebra for Large Scale and */
+/* Real-Time Applications, Kluwer Academic Publ. 1993, pp 195-218. */
+
+/* [2] B. Kagstrom and P. Poromaa; Computing Eigenspaces with Specified */
+/* Eigenvalues of a Regular Matrix Pair (A, B) and Condition */
+/* Estimation: Theory, Algorithms and Software, */
+/* Report UMINF - 94.04, Department of Computing Science, Umea */
+/* University, S-901 87 Umea, Sweden, 1994. Also as LAPACK Working */
+/* Note 87. To appear in Numerical Algorithms, 1996. */
+
+/* [3] B. Kagstrom and P. Poromaa, LAPACK-Style Algorithms and Software */
+/* for Solving the Generalized Sylvester Equation and Estimating the */
+/* Separation between Regular Matrix Pairs, Report UMINF - 93.23, */
+/* Department of Computing Science, Umea University, S-901 87 Umea, */
+/* Sweden, December 1993, Revised April 1994, Also as LAPACK Working */
+/* Note 75. To appear in ACM Trans. on Math. Software, Vol 22, */
+/* No 1, 1996. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters */
+
+ /* Parameter adjustments */
+ --select;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --s;
+ --dif;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantbh = lsame_(job, "B");
+ wants = lsame_(job, "E") || wantbh;
+ wantdf = lsame_(job, "V") || wantbh;
+
+ somcon = lsame_(howmny, "S");
+
+ *info = 0;
+ lquery = *lwork == -1;
+
+ if (! wants && ! wantdf) {
+ *info = -1;
+ } else if (! lsame_(howmny, "A") && ! somcon) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (wants && *ldvl < *n) {
+ *info = -10;
+ } else if (wants && *ldvr < *n) {
+ *info = -12;
+ } else {
+
+/* Set M to the number of eigenpairs for which condition numbers */
+/* are required, and test MM. */
+
+ if (somcon) {
+ *m = 0;
+ pair = FALSE_;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (pair) {
+ pair = FALSE_;
+ } else {
+ if (k < *n) {
+ if (a[k + 1 + k * a_dim1] == 0.f) {
+ if (select[k]) {
+ ++(*m);
+ }
+ } else {
+ pair = TRUE_;
+ if (select[k] || select[k + 1]) {
+ *m += 2;
+ }
+ }
+ } else {
+ if (select[*n]) {
+ ++(*m);
+ }
+ }
+ }
+/* L10: */
+ }
+ } else {
+ *m = *n;
+ }
+
+ if (*n == 0) {
+ lwmin = 1;
+ } else if (lsame_(job, "V") || lsame_(job,
+ "B")) {
+ lwmin = (*n << 1) * (*n + 2) + 16;
+ } else {
+ lwmin = *n;
+ }
+ work[1] = (real) lwmin;
+
+ if (*mm < *m) {
+ *info = -15;
+ } else if (*lwork < lwmin && ! lquery) {
+ *info = -18;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STGSNA", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S") / eps;
+ ks = 0;
+ pair = FALSE_;
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+
+/* Determine whether A(k,k) begins a 1-by-1 or 2-by-2 block. */
+
+ if (pair) {
+ pair = FALSE_;
+ goto L20;
+ } else {
+ if (k < *n) {
+ pair = a[k + 1 + k * a_dim1] != 0.f;
+ }
+ }
+
+/* Determine whether condition numbers are required for the k-th */
+/* eigenpair. */
+
+ if (somcon) {
+ if (pair) {
+ if (! select[k] && ! select[k + 1]) {
+ goto L20;
+ }
+ } else {
+ if (! select[k]) {
+ goto L20;
+ }
+ }
+ }
+
+ ++ks;
+
+ if (wants) {
+
+/* Compute the reciprocal condition number of the k-th */
+/* eigenvalue. */
+
+ if (pair) {
+
+/* Complex eigenvalue pair. */
+
+ r__1 = snrm2_(n, &vr[ks * vr_dim1 + 1], &c__1);
+ r__2 = snrm2_(n, &vr[(ks + 1) * vr_dim1 + 1], &c__1);
+ rnrm = slapy2_(&r__1, &r__2);
+ r__1 = snrm2_(n, &vl[ks * vl_dim1 + 1], &c__1);
+ r__2 = snrm2_(n, &vl[(ks + 1) * vl_dim1 + 1], &c__1);
+ lnrm = slapy2_(&r__1, &r__2);
+ sgemv_("N", n, n, &c_b19, &a[a_offset], lda, &vr[ks * vr_dim1
+ + 1], &c__1, &c_b21, &work[1], &c__1);
+ tmprr = sdot_(n, &work[1], &c__1, &vl[ks * vl_dim1 + 1], &
+ c__1);
+ tmpri = sdot_(n, &work[1], &c__1, &vl[(ks + 1) * vl_dim1 + 1],
+ &c__1);
+ sgemv_("N", n, n, &c_b19, &a[a_offset], lda, &vr[(ks + 1) *
+ vr_dim1 + 1], &c__1, &c_b21, &work[1], &c__1);
+ tmpii = sdot_(n, &work[1], &c__1, &vl[(ks + 1) * vl_dim1 + 1],
+ &c__1);
+ tmpir = sdot_(n, &work[1], &c__1, &vl[ks * vl_dim1 + 1], &
+ c__1);
+ uhav = tmprr + tmpii;
+ uhavi = tmpir - tmpri;
+ sgemv_("N", n, n, &c_b19, &b[b_offset], ldb, &vr[ks * vr_dim1
+ + 1], &c__1, &c_b21, &work[1], &c__1);
+ tmprr = sdot_(n, &work[1], &c__1, &vl[ks * vl_dim1 + 1], &
+ c__1);
+ tmpri = sdot_(n, &work[1], &c__1, &vl[(ks + 1) * vl_dim1 + 1],
+ &c__1);
+ sgemv_("N", n, n, &c_b19, &b[b_offset], ldb, &vr[(ks + 1) *
+ vr_dim1 + 1], &c__1, &c_b21, &work[1], &c__1);
+ tmpii = sdot_(n, &work[1], &c__1, &vl[(ks + 1) * vl_dim1 + 1],
+ &c__1);
+ tmpir = sdot_(n, &work[1], &c__1, &vl[ks * vl_dim1 + 1], &
+ c__1);
+ uhbv = tmprr + tmpii;
+ uhbvi = tmpir - tmpri;
+ uhav = slapy2_(&uhav, &uhavi);
+ uhbv = slapy2_(&uhbv, &uhbvi);
+ cond = slapy2_(&uhav, &uhbv);
+ s[ks] = cond / (rnrm * lnrm);
+ s[ks + 1] = s[ks];
+
+ } else {
+
+/* Real eigenvalue. */
+
+ rnrm = snrm2_(n, &vr[ks * vr_dim1 + 1], &c__1);
+ lnrm = snrm2_(n, &vl[ks * vl_dim1 + 1], &c__1);
+ sgemv_("N", n, n, &c_b19, &a[a_offset], lda, &vr[ks * vr_dim1
+ + 1], &c__1, &c_b21, &work[1], &c__1);
+ uhav = sdot_(n, &work[1], &c__1, &vl[ks * vl_dim1 + 1], &c__1)
+ ;
+ sgemv_("N", n, n, &c_b19, &b[b_offset], ldb, &vr[ks * vr_dim1
+ + 1], &c__1, &c_b21, &work[1], &c__1);
+ uhbv = sdot_(n, &work[1], &c__1, &vl[ks * vl_dim1 + 1], &c__1)
+ ;
+ cond = slapy2_(&uhav, &uhbv);
+ if (cond == 0.f) {
+ s[ks] = -1.f;
+ } else {
+ s[ks] = cond / (rnrm * lnrm);
+ }
+ }
+ }
+
+ if (wantdf) {
+ if (*n == 1) {
+ dif[ks] = slapy2_(&a[a_dim1 + 1], &b[b_dim1 + 1]);
+ goto L20;
+ }
+
+/* Estimate the reciprocal condition number of the k-th */
+/* eigenvectors. */
+ if (pair) {
+
+/* Copy the 2-by 2 pencil beginning at (A(k,k), B(k, k)). */
+/* Compute the eigenvalue(s) at position K. */
+
+ work[1] = a[k + k * a_dim1];
+ work[2] = a[k + 1 + k * a_dim1];
+ work[3] = a[k + (k + 1) * a_dim1];
+ work[4] = a[k + 1 + (k + 1) * a_dim1];
+ work[5] = b[k + k * b_dim1];
+ work[6] = b[k + 1 + k * b_dim1];
+ work[7] = b[k + (k + 1) * b_dim1];
+ work[8] = b[k + 1 + (k + 1) * b_dim1];
+ r__1 = smlnum * eps;
+ slag2_(&work[1], &c__2, &work[5], &c__2, &r__1, &beta, dummy1,
+ &alphar, dummy, &alphai);
+ alprqt = 1.f;
+ c1 = (alphar * alphar + alphai * alphai + beta * beta) * 2.f;
+ c2 = beta * 4.f * beta * alphai * alphai;
+ root1 = c1 + sqrt(c1 * c1 - c2 * 4.f);
+ root2 = c2 / root1;
+ root1 /= 2.f;
+/* Computing MIN */
+ r__1 = sqrt(root1), r__2 = sqrt(root2);
+ cond = dmin(r__1,r__2);
+ }
+
+/* Copy the matrix (A, B) to the array WORK and swap the */
+/* diagonal block beginning at A(k,k) to the (1,1) position. */
+
+ slacpy_("Full", n, n, &a[a_offset], lda, &work[1], n);
+ slacpy_("Full", n, n, &b[b_offset], ldb, &work[*n * *n + 1], n);
+ ifst = k;
+ ilst = 1;
+
+ i__2 = *lwork - (*n << 1) * *n;
+ stgexc_(&c_false, &c_false, n, &work[1], n, &work[*n * *n + 1], n,
+ dummy, &c__1, dummy1, &c__1, &ifst, &ilst, &work[(*n * *
+ n << 1) + 1], &i__2, &ierr);
+
+ if (ierr > 0) {
+
+/* Ill-conditioned problem - swap rejected. */
+
+ dif[ks] = 0.f;
+ } else {
+
+/* Reordering successful, solve generalized Sylvester */
+/* equation for R and L, */
+/* A22 * R - L * A11 = A12 */
+/* B22 * R - L * B11 = B12, */
+/* and compute estimate of Difl((A11,B11), (A22, B22)). */
+
+ n1 = 1;
+ if (work[2] != 0.f) {
+ n1 = 2;
+ }
+ n2 = *n - n1;
+ if (n2 == 0) {
+ dif[ks] = cond;
+ } else {
+ i__ = *n * *n + 1;
+ iz = (*n << 1) * *n + 1;
+ i__2 = *lwork - (*n << 1) * *n;
+ stgsyl_("N", &c__3, &n2, &n1, &work[*n * n1 + n1 + 1], n,
+ &work[1], n, &work[n1 + 1], n, &work[*n * n1 + n1
+ + i__], n, &work[i__], n, &work[n1 + i__], n, &
+ scale, &dif[ks], &work[iz + 1], &i__2, &iwork[1],
+ &ierr);
+
+ if (pair) {
+/* Computing MIN */
+ r__1 = dmax(1.f,alprqt) * dif[ks];
+ dif[ks] = dmin(r__1,cond);
+ }
+ }
+ }
+ if (pair) {
+ dif[ks + 1] = dif[ks];
+ }
+ }
+ if (pair) {
+ ++ks;
+ }
+
+L20:
+ ;
+ }
+ work[1] = (real) lwmin;
+ return 0;
+
+/* End of STGSNA */
+
+} /* stgsna_ */
diff --git a/contrib/libs/clapack/stgsy2.c b/contrib/libs/clapack/stgsy2.c
new file mode 100644
index 0000000000..4c3cd044a5
--- /dev/null
+++ b/contrib/libs/clapack/stgsy2.c
@@ -0,0 +1,1106 @@
+/* stgsy2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__8 = 8;
+static integer c__1 = 1;
+static real c_b27 = -1.f;
+static real c_b42 = 1.f;
+static real c_b56 = 0.f;
+
+/* Subroutine */ int stgsy2_(char *trans, integer *ijob, integer *m, integer *
+ n, real *a, integer *lda, real *b, integer *ldb, real *c__, integer *
+ ldc, real *d__, integer *ldd, real *e, integer *lde, real *f, integer
+ *ldf, real *scale, real *rdsum, real *rdscal, integer *iwork, integer
+ *pq, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, d_dim1,
+ d_offset, e_dim1, e_offset, f_dim1, f_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, k, p, q;
+ real z__[64] /* was [8][8] */;
+ integer ie, je, mb, nb, ii, jj, is, js;
+ real rhs[8];
+ integer isp1, jsp1;
+ extern /* Subroutine */ int sger_(integer *, integer *, real *, real *,
+ integer *, real *, integer *, real *, integer *);
+ integer ierr, zdim, ipiv[8], jpiv[8];
+ real alpha;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ sgemm_(char *, char *, integer *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *), sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *), scopy_(integer *, real *, integer *, real *, integer *),
+ saxpy_(integer *, real *, real *, integer *, real *, integer *),
+ sgesc2_(integer *, real *, integer *, real *, integer *, integer *
+, real *), sgetc2_(integer *, real *, integer *, integer *,
+ integer *, integer *);
+ real scaloc;
+ extern /* Subroutine */ int slatdf_(integer *, integer *, real *, integer
+ *, real *, real *, real *, integer *, integer *), xerbla_(char *,
+ integer *), slaset_(char *, integer *, integer *, real *,
+ real *, real *, integer *);
+ logical notran;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* January 2007 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STGSY2 solves the generalized Sylvester equation: */
+
+/* A * R - L * B = scale * C (1) */
+/* D * R - L * E = scale * F, */
+
+/* using Level 1 and 2 BLAS. where R and L are unknown M-by-N matrices, */
+/* (A, D), (B, E) and (C, F) are given matrix pairs of size M-by-M, */
+/* N-by-N and M-by-N, respectively, with real entries. (A, D) and (B, E) */
+/* must be in generalized Schur canonical form, i.e. A, B are upper */
+/* quasi triangular and D, E are upper triangular. The solution (R, L) */
+/* overwrites (C, F). 0 <= SCALE <= 1 is an output scaling factor */
+/* chosen to avoid overflow. */
+
+/* In matrix notation solving equation (1) corresponds to solve */
+/* Z*x = scale*b, where Z is defined as */
+
+/* Z = [ kron(In, A) -kron(B', Im) ] (2) */
+/* [ kron(In, D) -kron(E', Im) ], */
+
+/* Ik is the identity matrix of size k and X' is the transpose of X. */
+/* kron(X, Y) is the Kronecker product between the matrices X and Y. */
+/* In the process of solving (1), we solve a number of such systems */
+/* where Dim(In), Dim(In) = 1 or 2. */
+
+/* If TRANS = 'T', solve the transposed system Z'*y = scale*b for y, */
+/* which is equivalent to solve for R and L in */
+
+/* A' * R + D' * L = scale * C (3) */
+/* R * B' + L * E' = scale * -F */
+
+/* This case is used to compute an estimate of Dif[(A, D), (B, E)] = */
+/* sigma_min(Z) using reverse communicaton with SLACON. */
+
+/* STGSY2 also (IJOB >= 1) contributes to the computation in STGSYL */
+/* of an upper bound on the separation between to matrix pairs. Then */
+/* the input (A, D), (B, E) are sub-pencils of the matrix pair in */
+/* STGSYL. See STGSYL for details. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N', solve the generalized Sylvester equation (1). */
+/* = 'T': solve the 'transposed' system (3). */
+
+/* IJOB (input) INTEGER */
+/* Specifies what kind of functionality to be performed. */
+/* = 0: solve (1) only. */
+/* = 1: A contribution from this subsystem to a Frobenius */
+/* norm-based estimate of the separation between two matrix */
+/* pairs is computed. (look ahead strategy is used). */
+/* = 2: A contribution from this subsystem to a Frobenius */
+/* norm-based estimate of the separation between two matrix */
+/* pairs is computed. (SGECON on sub-systems is used.) */
+/* Not referenced if TRANS = 'T'. */
+
+/* M (input) INTEGER */
+/* On entry, M specifies the order of A and D, and the row */
+/* dimension of C, F, R and L. */
+
+/* N (input) INTEGER */
+/* On entry, N specifies the order of B and E, and the column */
+/* dimension of C, F, R and L. */
+
+/* A (input) REAL array, dimension (LDA, M) */
+/* On entry, A contains an upper quasi triangular matrix. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the matrix A. LDA >= max(1, M). */
+
+/* B (input) REAL array, dimension (LDB, N) */
+/* On entry, B contains an upper quasi triangular matrix. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the matrix B. LDB >= max(1, N). */
+
+/* C (input/output) REAL array, dimension (LDC, N) */
+/* On entry, C contains the right-hand-side of the first matrix */
+/* equation in (1). */
+/* On exit, if IJOB = 0, C has been overwritten by the */
+/* solution R. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the matrix C. LDC >= max(1, M). */
+
+/* D (input) REAL array, dimension (LDD, M) */
+/* On entry, D contains an upper triangular matrix. */
+
+/* LDD (input) INTEGER */
+/* The leading dimension of the matrix D. LDD >= max(1, M). */
+
+/* E (input) REAL array, dimension (LDE, N) */
+/* On entry, E contains an upper triangular matrix. */
+
+/* LDE (input) INTEGER */
+/* The leading dimension of the matrix E. LDE >= max(1, N). */
+
+/* F (input/output) REAL array, dimension (LDF, N) */
+/* On entry, F contains the right-hand-side of the second matrix */
+/* equation in (1). */
+/* On exit, if IJOB = 0, F has been overwritten by the */
+/* solution L. */
+
+/* LDF (input) INTEGER */
+/* The leading dimension of the matrix F. LDF >= max(1, M). */
+
+/* SCALE (output) REAL */
+/* On exit, 0 <= SCALE <= 1. If 0 < SCALE < 1, the solutions */
+/* R and L (C and F on entry) will hold the solutions to a */
+/* slightly perturbed system but the input matrices A, B, D and */
+/* E have not been changed. If SCALE = 0, R and L will hold the */
+/* solutions to the homogeneous system with C = F = 0. Normally, */
+/* SCALE = 1. */
+
+/* RDSUM (input/output) REAL */
+/* On entry, the sum of squares of computed contributions to */
+/* the Dif-estimate under computation by STGSYL, where the */
+/* scaling factor RDSCAL (see below) has been factored out. */
+/* On exit, the corresponding sum of squares updated with the */
+/* contributions from the current sub-system. */
+/* If TRANS = 'T' RDSUM is not touched. */
+/* NOTE: RDSUM only makes sense when STGSY2 is called by STGSYL. */
+
+/* RDSCAL (input/output) REAL */
+/* On entry, scaling factor used to prevent overflow in RDSUM. */
+/* On exit, RDSCAL is updated w.r.t. the current contributions */
+/* in RDSUM. */
+/* If TRANS = 'T', RDSCAL is not touched. */
+/* NOTE: RDSCAL only makes sense when STGSY2 is called by */
+/* STGSYL. */
+
+/* IWORK (workspace) INTEGER array, dimension (M+N+2) */
+
+/* PQ (output) INTEGER */
+/* On exit, the number of subsystems (of size 2-by-2, 4-by-4 and */
+/* 8-by-8) solved by this routine. */
+
+/* INFO (output) INTEGER */
+/* On exit, if INFO is set to */
+/* =0: Successful exit */
+/* <0: If INFO = -i, the i-th argument had an illegal value. */
+/* >0: The matrix pairs (A, D) and (B, E) have common or very */
+/* close eigenvalues. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* ===================================================================== */
+/* Replaced various illegal calls to SCOPY by calls to SLASET. */
+/* Sven Hammarling, 27/5/02. */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ d_dim1 = *ldd;
+ d_offset = 1 + d_dim1;
+ d__ -= d_offset;
+ e_dim1 = *lde;
+ e_offset = 1 + e_dim1;
+ e -= e_offset;
+ f_dim1 = *ldf;
+ f_offset = 1 + f_dim1;
+ f -= f_offset;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ ierr = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "T")) {
+ *info = -1;
+ } else if (notran) {
+ if (*ijob < 0 || *ijob > 2) {
+ *info = -2;
+ }
+ }
+ if (*info == 0) {
+ if (*m <= 0) {
+ *info = -3;
+ } else if (*n <= 0) {
+ *info = -4;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*ldd < max(1,*m)) {
+ *info = -12;
+ } else if (*lde < max(1,*n)) {
+ *info = -14;
+ } else if (*ldf < max(1,*m)) {
+ *info = -16;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STGSY2", &i__1);
+ return 0;
+ }
+
+/* Determine block structure of A */
+
+ *pq = 0;
+ p = 0;
+ i__ = 1;
+L10:
+ if (i__ > *m) {
+ goto L20;
+ }
+ ++p;
+ iwork[p] = i__;
+ if (i__ == *m) {
+ goto L20;
+ }
+ if (a[i__ + 1 + i__ * a_dim1] != 0.f) {
+ i__ += 2;
+ } else {
+ ++i__;
+ }
+ goto L10;
+L20:
+ iwork[p + 1] = *m + 1;
+
+/* Determine block structure of B */
+
+ q = p + 1;
+ j = 1;
+L30:
+ if (j > *n) {
+ goto L40;
+ }
+ ++q;
+ iwork[q] = j;
+ if (j == *n) {
+ goto L40;
+ }
+ if (b[j + 1 + j * b_dim1] != 0.f) {
+ j += 2;
+ } else {
+ ++j;
+ }
+ goto L30;
+L40:
+ iwork[q + 1] = *n + 1;
+ *pq = p * (q - p - 1);
+
+ if (notran) {
+
+/* Solve (I, J) - subsystem */
+/* A(I, I) * R(I, J) - L(I, J) * B(J, J) = C(I, J) */
+/* D(I, I) * R(I, J) - L(I, J) * E(J, J) = F(I, J) */
+/* for I = P, P - 1, ..., 1; J = 1, 2, ..., Q */
+
+ *scale = 1.f;
+ scaloc = 1.f;
+ i__1 = q;
+ for (j = p + 2; j <= i__1; ++j) {
+ js = iwork[j];
+ jsp1 = js + 1;
+ je = iwork[j + 1] - 1;
+ nb = je - js + 1;
+ for (i__ = p; i__ >= 1; --i__) {
+
+ is = iwork[i__];
+ isp1 = is + 1;
+ ie = iwork[i__ + 1] - 1;
+ mb = ie - is + 1;
+ zdim = mb * nb << 1;
+
+ if (mb == 1 && nb == 1) {
+
+/* Build a 2-by-2 system Z * x = RHS */
+
+ z__[0] = a[is + is * a_dim1];
+ z__[1] = d__[is + is * d_dim1];
+ z__[8] = -b[js + js * b_dim1];
+ z__[9] = -e[js + js * e_dim1];
+
+/* Set up right hand side(s) */
+
+ rhs[0] = c__[is + js * c_dim1];
+ rhs[1] = f[is + js * f_dim1];
+
+/* Solve Z * x = RHS */
+
+ sgetc2_(&zdim, z__, &c__8, ipiv, jpiv, &ierr);
+ if (ierr > 0) {
+ *info = ierr;
+ }
+
+ if (*ijob == 0) {
+ sgesc2_(&zdim, z__, &c__8, rhs, ipiv, jpiv, &scaloc);
+ if (scaloc != 1.f) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ sscal_(m, &scaloc, &c__[k * c_dim1 + 1], &
+ c__1);
+ sscal_(m, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L50: */
+ }
+ *scale *= scaloc;
+ }
+ } else {
+ slatdf_(ijob, &zdim, z__, &c__8, rhs, rdsum, rdscal,
+ ipiv, jpiv);
+ }
+
+/* Unpack solution vector(s) */
+
+ c__[is + js * c_dim1] = rhs[0];
+ f[is + js * f_dim1] = rhs[1];
+
+/* Substitute R(I, J) and L(I, J) into remaining */
+/* equation. */
+
+ if (i__ > 1) {
+ alpha = -rhs[0];
+ i__2 = is - 1;
+ saxpy_(&i__2, &alpha, &a[is * a_dim1 + 1], &c__1, &
+ c__[js * c_dim1 + 1], &c__1);
+ i__2 = is - 1;
+ saxpy_(&i__2, &alpha, &d__[is * d_dim1 + 1], &c__1, &
+ f[js * f_dim1 + 1], &c__1);
+ }
+ if (j < q) {
+ i__2 = *n - je;
+ saxpy_(&i__2, &rhs[1], &b[js + (je + 1) * b_dim1],
+ ldb, &c__[is + (je + 1) * c_dim1], ldc);
+ i__2 = *n - je;
+ saxpy_(&i__2, &rhs[1], &e[js + (je + 1) * e_dim1],
+ lde, &f[is + (je + 1) * f_dim1], ldf);
+ }
+
+ } else if (mb == 1 && nb == 2) {
+
+/* Build a 4-by-4 system Z * x = RHS */
+
+ z__[0] = a[is + is * a_dim1];
+ z__[1] = 0.f;
+ z__[2] = d__[is + is * d_dim1];
+ z__[3] = 0.f;
+
+ z__[8] = 0.f;
+ z__[9] = a[is + is * a_dim1];
+ z__[10] = 0.f;
+ z__[11] = d__[is + is * d_dim1];
+
+ z__[16] = -b[js + js * b_dim1];
+ z__[17] = -b[js + jsp1 * b_dim1];
+ z__[18] = -e[js + js * e_dim1];
+ z__[19] = -e[js + jsp1 * e_dim1];
+
+ z__[24] = -b[jsp1 + js * b_dim1];
+ z__[25] = -b[jsp1 + jsp1 * b_dim1];
+ z__[26] = 0.f;
+ z__[27] = -e[jsp1 + jsp1 * e_dim1];
+
+/* Set up right hand side(s) */
+
+ rhs[0] = c__[is + js * c_dim1];
+ rhs[1] = c__[is + jsp1 * c_dim1];
+ rhs[2] = f[is + js * f_dim1];
+ rhs[3] = f[is + jsp1 * f_dim1];
+
+/* Solve Z * x = RHS */
+
+ sgetc2_(&zdim, z__, &c__8, ipiv, jpiv, &ierr);
+ if (ierr > 0) {
+ *info = ierr;
+ }
+
+ if (*ijob == 0) {
+ sgesc2_(&zdim, z__, &c__8, rhs, ipiv, jpiv, &scaloc);
+ if (scaloc != 1.f) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ sscal_(m, &scaloc, &c__[k * c_dim1 + 1], &
+ c__1);
+ sscal_(m, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L60: */
+ }
+ *scale *= scaloc;
+ }
+ } else {
+ slatdf_(ijob, &zdim, z__, &c__8, rhs, rdsum, rdscal,
+ ipiv, jpiv);
+ }
+
+/* Unpack solution vector(s) */
+
+ c__[is + js * c_dim1] = rhs[0];
+ c__[is + jsp1 * c_dim1] = rhs[1];
+ f[is + js * f_dim1] = rhs[2];
+ f[is + jsp1 * f_dim1] = rhs[3];
+
+/* Substitute R(I, J) and L(I, J) into remaining */
+/* equation. */
+
+ if (i__ > 1) {
+ i__2 = is - 1;
+ sger_(&i__2, &nb, &c_b27, &a[is * a_dim1 + 1], &c__1,
+ rhs, &c__1, &c__[js * c_dim1 + 1], ldc);
+ i__2 = is - 1;
+ sger_(&i__2, &nb, &c_b27, &d__[is * d_dim1 + 1], &
+ c__1, rhs, &c__1, &f[js * f_dim1 + 1], ldf);
+ }
+ if (j < q) {
+ i__2 = *n - je;
+ saxpy_(&i__2, &rhs[2], &b[js + (je + 1) * b_dim1],
+ ldb, &c__[is + (je + 1) * c_dim1], ldc);
+ i__2 = *n - je;
+ saxpy_(&i__2, &rhs[2], &e[js + (je + 1) * e_dim1],
+ lde, &f[is + (je + 1) * f_dim1], ldf);
+ i__2 = *n - je;
+ saxpy_(&i__2, &rhs[3], &b[jsp1 + (je + 1) * b_dim1],
+ ldb, &c__[is + (je + 1) * c_dim1], ldc);
+ i__2 = *n - je;
+ saxpy_(&i__2, &rhs[3], &e[jsp1 + (je + 1) * e_dim1],
+ lde, &f[is + (je + 1) * f_dim1], ldf);
+ }
+
+ } else if (mb == 2 && nb == 1) {
+
+/* Build a 4-by-4 system Z * x = RHS */
+
+ z__[0] = a[is + is * a_dim1];
+ z__[1] = a[isp1 + is * a_dim1];
+ z__[2] = d__[is + is * d_dim1];
+ z__[3] = 0.f;
+
+ z__[8] = a[is + isp1 * a_dim1];
+ z__[9] = a[isp1 + isp1 * a_dim1];
+ z__[10] = d__[is + isp1 * d_dim1];
+ z__[11] = d__[isp1 + isp1 * d_dim1];
+
+ z__[16] = -b[js + js * b_dim1];
+ z__[17] = 0.f;
+ z__[18] = -e[js + js * e_dim1];
+ z__[19] = 0.f;
+
+ z__[24] = 0.f;
+ z__[25] = -b[js + js * b_dim1];
+ z__[26] = 0.f;
+ z__[27] = -e[js + js * e_dim1];
+
+/* Set up right hand side(s) */
+
+ rhs[0] = c__[is + js * c_dim1];
+ rhs[1] = c__[isp1 + js * c_dim1];
+ rhs[2] = f[is + js * f_dim1];
+ rhs[3] = f[isp1 + js * f_dim1];
+
+/* Solve Z * x = RHS */
+
+ sgetc2_(&zdim, z__, &c__8, ipiv, jpiv, &ierr);
+ if (ierr > 0) {
+ *info = ierr;
+ }
+ if (*ijob == 0) {
+ sgesc2_(&zdim, z__, &c__8, rhs, ipiv, jpiv, &scaloc);
+ if (scaloc != 1.f) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ sscal_(m, &scaloc, &c__[k * c_dim1 + 1], &
+ c__1);
+ sscal_(m, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L70: */
+ }
+ *scale *= scaloc;
+ }
+ } else {
+ slatdf_(ijob, &zdim, z__, &c__8, rhs, rdsum, rdscal,
+ ipiv, jpiv);
+ }
+
+/* Unpack solution vector(s) */
+
+ c__[is + js * c_dim1] = rhs[0];
+ c__[isp1 + js * c_dim1] = rhs[1];
+ f[is + js * f_dim1] = rhs[2];
+ f[isp1 + js * f_dim1] = rhs[3];
+
+/* Substitute R(I, J) and L(I, J) into remaining */
+/* equation. */
+
+ if (i__ > 1) {
+ i__2 = is - 1;
+ sgemv_("N", &i__2, &mb, &c_b27, &a[is * a_dim1 + 1],
+ lda, rhs, &c__1, &c_b42, &c__[js * c_dim1 + 1]
+, &c__1);
+ i__2 = is - 1;
+ sgemv_("N", &i__2, &mb, &c_b27, &d__[is * d_dim1 + 1],
+ ldd, rhs, &c__1, &c_b42, &f[js * f_dim1 + 1],
+ &c__1);
+ }
+ if (j < q) {
+ i__2 = *n - je;
+ sger_(&mb, &i__2, &c_b42, &rhs[2], &c__1, &b[js + (je
+ + 1) * b_dim1], ldb, &c__[is + (je + 1) *
+ c_dim1], ldc);
+ i__2 = *n - je;
+ sger_(&mb, &i__2, &c_b42, &rhs[2], &c__1, &e[js + (je
+ + 1) * e_dim1], lde, &f[is + (je + 1) *
+ f_dim1], ldf);
+ }
+
+ } else if (mb == 2 && nb == 2) {
+
+/* Build an 8-by-8 system Z * x = RHS */
+
+ slaset_("F", &c__8, &c__8, &c_b56, &c_b56, z__, &c__8);
+
+ z__[0] = a[is + is * a_dim1];
+ z__[1] = a[isp1 + is * a_dim1];
+ z__[4] = d__[is + is * d_dim1];
+
+ z__[8] = a[is + isp1 * a_dim1];
+ z__[9] = a[isp1 + isp1 * a_dim1];
+ z__[12] = d__[is + isp1 * d_dim1];
+ z__[13] = d__[isp1 + isp1 * d_dim1];
+
+ z__[18] = a[is + is * a_dim1];
+ z__[19] = a[isp1 + is * a_dim1];
+ z__[22] = d__[is + is * d_dim1];
+
+ z__[26] = a[is + isp1 * a_dim1];
+ z__[27] = a[isp1 + isp1 * a_dim1];
+ z__[30] = d__[is + isp1 * d_dim1];
+ z__[31] = d__[isp1 + isp1 * d_dim1];
+
+ z__[32] = -b[js + js * b_dim1];
+ z__[34] = -b[js + jsp1 * b_dim1];
+ z__[36] = -e[js + js * e_dim1];
+ z__[38] = -e[js + jsp1 * e_dim1];
+
+ z__[41] = -b[js + js * b_dim1];
+ z__[43] = -b[js + jsp1 * b_dim1];
+ z__[45] = -e[js + js * e_dim1];
+ z__[47] = -e[js + jsp1 * e_dim1];
+
+ z__[48] = -b[jsp1 + js * b_dim1];
+ z__[50] = -b[jsp1 + jsp1 * b_dim1];
+ z__[54] = -e[jsp1 + jsp1 * e_dim1];
+
+ z__[57] = -b[jsp1 + js * b_dim1];
+ z__[59] = -b[jsp1 + jsp1 * b_dim1];
+ z__[63] = -e[jsp1 + jsp1 * e_dim1];
+
+/* Set up right hand side(s) */
+
+ k = 1;
+ ii = mb * nb + 1;
+ i__2 = nb - 1;
+ for (jj = 0; jj <= i__2; ++jj) {
+ scopy_(&mb, &c__[is + (js + jj) * c_dim1], &c__1, &
+ rhs[k - 1], &c__1);
+ scopy_(&mb, &f[is + (js + jj) * f_dim1], &c__1, &rhs[
+ ii - 1], &c__1);
+ k += mb;
+ ii += mb;
+/* L80: */
+ }
+
+/* Solve Z * x = RHS */
+
+ sgetc2_(&zdim, z__, &c__8, ipiv, jpiv, &ierr);
+ if (ierr > 0) {
+ *info = ierr;
+ }
+ if (*ijob == 0) {
+ sgesc2_(&zdim, z__, &c__8, rhs, ipiv, jpiv, &scaloc);
+ if (scaloc != 1.f) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ sscal_(m, &scaloc, &c__[k * c_dim1 + 1], &
+ c__1);
+ sscal_(m, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L90: */
+ }
+ *scale *= scaloc;
+ }
+ } else {
+ slatdf_(ijob, &zdim, z__, &c__8, rhs, rdsum, rdscal,
+ ipiv, jpiv);
+ }
+
+/* Unpack solution vector(s) */
+
+ k = 1;
+ ii = mb * nb + 1;
+ i__2 = nb - 1;
+ for (jj = 0; jj <= i__2; ++jj) {
+ scopy_(&mb, &rhs[k - 1], &c__1, &c__[is + (js + jj) *
+ c_dim1], &c__1);
+ scopy_(&mb, &rhs[ii - 1], &c__1, &f[is + (js + jj) *
+ f_dim1], &c__1);
+ k += mb;
+ ii += mb;
+/* L100: */
+ }
+
+/* Substitute R(I, J) and L(I, J) into remaining */
+/* equation. */
+
+ if (i__ > 1) {
+ i__2 = is - 1;
+ sgemm_("N", "N", &i__2, &nb, &mb, &c_b27, &a[is *
+ a_dim1 + 1], lda, rhs, &mb, &c_b42, &c__[js *
+ c_dim1 + 1], ldc);
+ i__2 = is - 1;
+ sgemm_("N", "N", &i__2, &nb, &mb, &c_b27, &d__[is *
+ d_dim1 + 1], ldd, rhs, &mb, &c_b42, &f[js *
+ f_dim1 + 1], ldf);
+ }
+ if (j < q) {
+ k = mb * nb + 1;
+ i__2 = *n - je;
+ sgemm_("N", "N", &mb, &i__2, &nb, &c_b42, &rhs[k - 1],
+ &mb, &b[js + (je + 1) * b_dim1], ldb, &c_b42,
+ &c__[is + (je + 1) * c_dim1], ldc);
+ i__2 = *n - je;
+ sgemm_("N", "N", &mb, &i__2, &nb, &c_b42, &rhs[k - 1],
+ &mb, &e[js + (je + 1) * e_dim1], lde, &c_b42,
+ &f[is + (je + 1) * f_dim1], ldf);
+ }
+
+ }
+
+/* L110: */
+ }
+/* L120: */
+ }
+ } else {
+
+/* Solve (I, J) - subsystem */
+/* A(I, I)' * R(I, J) + D(I, I)' * L(J, J) = C(I, J) */
+/* R(I, I) * B(J, J) + L(I, J) * E(J, J) = -F(I, J) */
+/* for I = 1, 2, ..., P, J = Q, Q - 1, ..., 1 */
+
+ *scale = 1.f;
+ scaloc = 1.f;
+ i__1 = p;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+ is = iwork[i__];
+ isp1 = is + 1;
+ ie = iwork[i__ + 1] - 1;
+ mb = ie - is + 1;
+ i__2 = p + 2;
+ for (j = q; j >= i__2; --j) {
+
+ js = iwork[j];
+ jsp1 = js + 1;
+ je = iwork[j + 1] - 1;
+ nb = je - js + 1;
+ zdim = mb * nb << 1;
+ if (mb == 1 && nb == 1) {
+
+/* Build a 2-by-2 system Z' * x = RHS */
+
+ z__[0] = a[is + is * a_dim1];
+ z__[1] = -b[js + js * b_dim1];
+ z__[8] = d__[is + is * d_dim1];
+ z__[9] = -e[js + js * e_dim1];
+
+/* Set up right hand side(s) */
+
+ rhs[0] = c__[is + js * c_dim1];
+ rhs[1] = f[is + js * f_dim1];
+
+/* Solve Z' * x = RHS */
+
+ sgetc2_(&zdim, z__, &c__8, ipiv, jpiv, &ierr);
+ if (ierr > 0) {
+ *info = ierr;
+ }
+
+ sgesc2_(&zdim, z__, &c__8, rhs, ipiv, jpiv, &scaloc);
+ if (scaloc != 1.f) {
+ i__3 = *n;
+ for (k = 1; k <= i__3; ++k) {
+ sscal_(m, &scaloc, &c__[k * c_dim1 + 1], &c__1);
+ sscal_(m, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L130: */
+ }
+ *scale *= scaloc;
+ }
+
+/* Unpack solution vector(s) */
+
+ c__[is + js * c_dim1] = rhs[0];
+ f[is + js * f_dim1] = rhs[1];
+
+/* Substitute R(I, J) and L(I, J) into remaining */
+/* equation. */
+
+ if (j > p + 2) {
+ alpha = rhs[0];
+ i__3 = js - 1;
+ saxpy_(&i__3, &alpha, &b[js * b_dim1 + 1], &c__1, &f[
+ is + f_dim1], ldf);
+ alpha = rhs[1];
+ i__3 = js - 1;
+ saxpy_(&i__3, &alpha, &e[js * e_dim1 + 1], &c__1, &f[
+ is + f_dim1], ldf);
+ }
+ if (i__ < p) {
+ alpha = -rhs[0];
+ i__3 = *m - ie;
+ saxpy_(&i__3, &alpha, &a[is + (ie + 1) * a_dim1], lda,
+ &c__[ie + 1 + js * c_dim1], &c__1);
+ alpha = -rhs[1];
+ i__3 = *m - ie;
+ saxpy_(&i__3, &alpha, &d__[is + (ie + 1) * d_dim1],
+ ldd, &c__[ie + 1 + js * c_dim1], &c__1);
+ }
+
+ } else if (mb == 1 && nb == 2) {
+
+/* Build a 4-by-4 system Z' * x = RHS */
+
+ z__[0] = a[is + is * a_dim1];
+ z__[1] = 0.f;
+ z__[2] = -b[js + js * b_dim1];
+ z__[3] = -b[jsp1 + js * b_dim1];
+
+ z__[8] = 0.f;
+ z__[9] = a[is + is * a_dim1];
+ z__[10] = -b[js + jsp1 * b_dim1];
+ z__[11] = -b[jsp1 + jsp1 * b_dim1];
+
+ z__[16] = d__[is + is * d_dim1];
+ z__[17] = 0.f;
+ z__[18] = -e[js + js * e_dim1];
+ z__[19] = 0.f;
+
+ z__[24] = 0.f;
+ z__[25] = d__[is + is * d_dim1];
+ z__[26] = -e[js + jsp1 * e_dim1];
+ z__[27] = -e[jsp1 + jsp1 * e_dim1];
+
+/* Set up right hand side(s) */
+
+ rhs[0] = c__[is + js * c_dim1];
+ rhs[1] = c__[is + jsp1 * c_dim1];
+ rhs[2] = f[is + js * f_dim1];
+ rhs[3] = f[is + jsp1 * f_dim1];
+
+/* Solve Z' * x = RHS */
+
+ sgetc2_(&zdim, z__, &c__8, ipiv, jpiv, &ierr);
+ if (ierr > 0) {
+ *info = ierr;
+ }
+ sgesc2_(&zdim, z__, &c__8, rhs, ipiv, jpiv, &scaloc);
+ if (scaloc != 1.f) {
+ i__3 = *n;
+ for (k = 1; k <= i__3; ++k) {
+ sscal_(m, &scaloc, &c__[k * c_dim1 + 1], &c__1);
+ sscal_(m, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L140: */
+ }
+ *scale *= scaloc;
+ }
+
+/* Unpack solution vector(s) */
+
+ c__[is + js * c_dim1] = rhs[0];
+ c__[is + jsp1 * c_dim1] = rhs[1];
+ f[is + js * f_dim1] = rhs[2];
+ f[is + jsp1 * f_dim1] = rhs[3];
+
+/* Substitute R(I, J) and L(I, J) into remaining */
+/* equation. */
+
+ if (j > p + 2) {
+ i__3 = js - 1;
+ saxpy_(&i__3, rhs, &b[js * b_dim1 + 1], &c__1, &f[is
+ + f_dim1], ldf);
+ i__3 = js - 1;
+ saxpy_(&i__3, &rhs[1], &b[jsp1 * b_dim1 + 1], &c__1, &
+ f[is + f_dim1], ldf);
+ i__3 = js - 1;
+ saxpy_(&i__3, &rhs[2], &e[js * e_dim1 + 1], &c__1, &f[
+ is + f_dim1], ldf);
+ i__3 = js - 1;
+ saxpy_(&i__3, &rhs[3], &e[jsp1 * e_dim1 + 1], &c__1, &
+ f[is + f_dim1], ldf);
+ }
+ if (i__ < p) {
+ i__3 = *m - ie;
+ sger_(&i__3, &nb, &c_b27, &a[is + (ie + 1) * a_dim1],
+ lda, rhs, &c__1, &c__[ie + 1 + js * c_dim1],
+ ldc);
+ i__3 = *m - ie;
+ sger_(&i__3, &nb, &c_b27, &d__[is + (ie + 1) * d_dim1]
+, ldd, &rhs[2], &c__1, &c__[ie + 1 + js *
+ c_dim1], ldc);
+ }
+
+ } else if (mb == 2 && nb == 1) {
+
+/* Build a 4-by-4 system Z' * x = RHS */
+
+ z__[0] = a[is + is * a_dim1];
+ z__[1] = a[is + isp1 * a_dim1];
+ z__[2] = -b[js + js * b_dim1];
+ z__[3] = 0.f;
+
+ z__[8] = a[isp1 + is * a_dim1];
+ z__[9] = a[isp1 + isp1 * a_dim1];
+ z__[10] = 0.f;
+ z__[11] = -b[js + js * b_dim1];
+
+ z__[16] = d__[is + is * d_dim1];
+ z__[17] = d__[is + isp1 * d_dim1];
+ z__[18] = -e[js + js * e_dim1];
+ z__[19] = 0.f;
+
+ z__[24] = 0.f;
+ z__[25] = d__[isp1 + isp1 * d_dim1];
+ z__[26] = 0.f;
+ z__[27] = -e[js + js * e_dim1];
+
+/* Set up right hand side(s) */
+
+ rhs[0] = c__[is + js * c_dim1];
+ rhs[1] = c__[isp1 + js * c_dim1];
+ rhs[2] = f[is + js * f_dim1];
+ rhs[3] = f[isp1 + js * f_dim1];
+
+/* Solve Z' * x = RHS */
+
+ sgetc2_(&zdim, z__, &c__8, ipiv, jpiv, &ierr);
+ if (ierr > 0) {
+ *info = ierr;
+ }
+
+ sgesc2_(&zdim, z__, &c__8, rhs, ipiv, jpiv, &scaloc);
+ if (scaloc != 1.f) {
+ i__3 = *n;
+ for (k = 1; k <= i__3; ++k) {
+ sscal_(m, &scaloc, &c__[k * c_dim1 + 1], &c__1);
+ sscal_(m, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L150: */
+ }
+ *scale *= scaloc;
+ }
+
+/* Unpack solution vector(s) */
+
+ c__[is + js * c_dim1] = rhs[0];
+ c__[isp1 + js * c_dim1] = rhs[1];
+ f[is + js * f_dim1] = rhs[2];
+ f[isp1 + js * f_dim1] = rhs[3];
+
+/* Substitute R(I, J) and L(I, J) into remaining */
+/* equation. */
+
+ if (j > p + 2) {
+ i__3 = js - 1;
+ sger_(&mb, &i__3, &c_b42, rhs, &c__1, &b[js * b_dim1
+ + 1], &c__1, &f[is + f_dim1], ldf);
+ i__3 = js - 1;
+ sger_(&mb, &i__3, &c_b42, &rhs[2], &c__1, &e[js *
+ e_dim1 + 1], &c__1, &f[is + f_dim1], ldf);
+ }
+ if (i__ < p) {
+ i__3 = *m - ie;
+ sgemv_("T", &mb, &i__3, &c_b27, &a[is + (ie + 1) *
+ a_dim1], lda, rhs, &c__1, &c_b42, &c__[ie + 1
+ + js * c_dim1], &c__1);
+ i__3 = *m - ie;
+ sgemv_("T", &mb, &i__3, &c_b27, &d__[is + (ie + 1) *
+ d_dim1], ldd, &rhs[2], &c__1, &c_b42, &c__[ie
+ + 1 + js * c_dim1], &c__1);
+ }
+
+ } else if (mb == 2 && nb == 2) {
+
+/* Build an 8-by-8 system Z' * x = RHS */
+
+ slaset_("F", &c__8, &c__8, &c_b56, &c_b56, z__, &c__8);
+
+ z__[0] = a[is + is * a_dim1];
+ z__[1] = a[is + isp1 * a_dim1];
+ z__[4] = -b[js + js * b_dim1];
+ z__[6] = -b[jsp1 + js * b_dim1];
+
+ z__[8] = a[isp1 + is * a_dim1];
+ z__[9] = a[isp1 + isp1 * a_dim1];
+ z__[13] = -b[js + js * b_dim1];
+ z__[15] = -b[jsp1 + js * b_dim1];
+
+ z__[18] = a[is + is * a_dim1];
+ z__[19] = a[is + isp1 * a_dim1];
+ z__[20] = -b[js + jsp1 * b_dim1];
+ z__[22] = -b[jsp1 + jsp1 * b_dim1];
+
+ z__[26] = a[isp1 + is * a_dim1];
+ z__[27] = a[isp1 + isp1 * a_dim1];
+ z__[29] = -b[js + jsp1 * b_dim1];
+ z__[31] = -b[jsp1 + jsp1 * b_dim1];
+
+ z__[32] = d__[is + is * d_dim1];
+ z__[33] = d__[is + isp1 * d_dim1];
+ z__[36] = -e[js + js * e_dim1];
+
+ z__[41] = d__[isp1 + isp1 * d_dim1];
+ z__[45] = -e[js + js * e_dim1];
+
+ z__[50] = d__[is + is * d_dim1];
+ z__[51] = d__[is + isp1 * d_dim1];
+ z__[52] = -e[js + jsp1 * e_dim1];
+ z__[54] = -e[jsp1 + jsp1 * e_dim1];
+
+ z__[59] = d__[isp1 + isp1 * d_dim1];
+ z__[61] = -e[js + jsp1 * e_dim1];
+ z__[63] = -e[jsp1 + jsp1 * e_dim1];
+
+/* Set up right hand side(s) */
+
+ k = 1;
+ ii = mb * nb + 1;
+ i__3 = nb - 1;
+ for (jj = 0; jj <= i__3; ++jj) {
+ scopy_(&mb, &c__[is + (js + jj) * c_dim1], &c__1, &
+ rhs[k - 1], &c__1);
+ scopy_(&mb, &f[is + (js + jj) * f_dim1], &c__1, &rhs[
+ ii - 1], &c__1);
+ k += mb;
+ ii += mb;
+/* L160: */
+ }
+
+
+/* Solve Z' * x = RHS */
+
+ sgetc2_(&zdim, z__, &c__8, ipiv, jpiv, &ierr);
+ if (ierr > 0) {
+ *info = ierr;
+ }
+
+ sgesc2_(&zdim, z__, &c__8, rhs, ipiv, jpiv, &scaloc);
+ if (scaloc != 1.f) {
+ i__3 = *n;
+ for (k = 1; k <= i__3; ++k) {
+ sscal_(m, &scaloc, &c__[k * c_dim1 + 1], &c__1);
+ sscal_(m, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L170: */
+ }
+ *scale *= scaloc;
+ }
+
+/* Unpack solution vector(s) */
+
+ k = 1;
+ ii = mb * nb + 1;
+ i__3 = nb - 1;
+ for (jj = 0; jj <= i__3; ++jj) {
+ scopy_(&mb, &rhs[k - 1], &c__1, &c__[is + (js + jj) *
+ c_dim1], &c__1);
+ scopy_(&mb, &rhs[ii - 1], &c__1, &f[is + (js + jj) *
+ f_dim1], &c__1);
+ k += mb;
+ ii += mb;
+/* L180: */
+ }
+
+/* Substitute R(I, J) and L(I, J) into remaining */
+/* equation. */
+
+ if (j > p + 2) {
+ i__3 = js - 1;
+ sgemm_("N", "T", &mb, &i__3, &nb, &c_b42, &c__[is +
+ js * c_dim1], ldc, &b[js * b_dim1 + 1], ldb, &
+ c_b42, &f[is + f_dim1], ldf);
+ i__3 = js - 1;
+ sgemm_("N", "T", &mb, &i__3, &nb, &c_b42, &f[is + js *
+ f_dim1], ldf, &e[js * e_dim1 + 1], lde, &
+ c_b42, &f[is + f_dim1], ldf);
+ }
+ if (i__ < p) {
+ i__3 = *m - ie;
+ sgemm_("T", "N", &i__3, &nb, &mb, &c_b27, &a[is + (ie
+ + 1) * a_dim1], lda, &c__[is + js * c_dim1],
+ ldc, &c_b42, &c__[ie + 1 + js * c_dim1], ldc);
+ i__3 = *m - ie;
+ sgemm_("T", "N", &i__3, &nb, &mb, &c_b27, &d__[is + (
+ ie + 1) * d_dim1], ldd, &f[is + js * f_dim1],
+ ldf, &c_b42, &c__[ie + 1 + js * c_dim1], ldc);
+ }
+
+ }
+
+/* L190: */
+ }
+/* L200: */
+ }
+
+ }
+ return 0;
+
+/* End of STGSY2 */
+
+} /* stgsy2_ */
diff --git a/contrib/libs/clapack/stgsyl.c b/contrib/libs/clapack/stgsyl.c
new file mode 100644
index 0000000000..33b33c7582
--- /dev/null
+++ b/contrib/libs/clapack/stgsyl.c
@@ -0,0 +1,691 @@
+/* stgsyl.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__2 = 2;
+static integer c_n1 = -1;
+static integer c__5 = 5;
+static real c_b14 = 0.f;
+static integer c__1 = 1;
+static real c_b51 = -1.f;
+static real c_b52 = 1.f;
+
+/* Subroutine */ int stgsyl_(char *trans, integer *ijob, integer *m, integer *
+ n, real *a, integer *lda, real *b, integer *ldb, real *c__, integer *
+ ldc, real *d__, integer *ldd, real *e, integer *lde, real *f, integer
+ *ldf, real *scale, real *dif, real *work, integer *lwork, integer *
+ iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, d_dim1,
+ d_offset, e_dim1, e_offset, f_dim1, f_offset, i__1, i__2, i__3,
+ i__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, p, q, ie, je, mb, nb, is, js, pq;
+ real dsum;
+ integer ppqq;
+ extern logical lsame_(char *, char *);
+ integer ifunc;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ integer linfo;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ integer lwmin;
+ real scale2, dscale;
+ extern /* Subroutine */ int stgsy2_(char *, integer *, integer *, integer
+ *, real *, integer *, real *, integer *, real *, integer *, real *
+, integer *, real *, integer *, real *, integer *, real *, real *,
+ real *, integer *, integer *, integer *);
+ real scaloc;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *), slaset_(char *, integer *,
+ integer *, real *, real *, real *, integer *);
+ integer iround;
+ logical notran;
+ integer isolve;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STGSYL solves the generalized Sylvester equation: */
+
+/* A * R - L * B = scale * C (1) */
+/* D * R - L * E = scale * F */
+
+/* where R and L are unknown m-by-n matrices, (A, D), (B, E) and */
+/* (C, F) are given matrix pairs of size m-by-m, n-by-n and m-by-n, */
+/* respectively, with real entries. (A, D) and (B, E) must be in */
+/* generalized (real) Schur canonical form, i.e. A, B are upper quasi */
+/* triangular and D, E are upper triangular. */
+
+/* The solution (R, L) overwrites (C, F). 0 <= SCALE <= 1 is an output */
+/* scaling factor chosen to avoid overflow. */
+
+/* In matrix notation (1) is equivalent to solve Zx = scale b, where */
+/* Z is defined as */
+
+/* Z = [ kron(In, A) -kron(B', Im) ] (2) */
+/* [ kron(In, D) -kron(E', Im) ]. */
+
+/* Here Ik is the identity matrix of size k and X' is the transpose of */
+/* X. kron(X, Y) is the Kronecker product between the matrices X and Y. */
+
+/* If TRANS = 'T', STGSYL solves the transposed system Z'*y = scale*b, */
+/* which is equivalent to solve for R and L in */
+
+/* A' * R + D' * L = scale * C (3) */
+/* R * B' + L * E' = scale * (-F) */
+
+/* This case (TRANS = 'T') is used to compute an one-norm-based estimate */
+/* of Dif[(A,D), (B,E)], the separation between the matrix pairs (A,D) */
+/* and (B,E), using SLACON. */
+
+/* If IJOB >= 1, STGSYL computes a Frobenius norm-based estimate */
+/* of Dif[(A,D),(B,E)]. That is, the reciprocal of a lower bound on the */
+/* reciprocal of the smallest singular value of Z. See [1-2] for more */
+/* information. */
+
+/* This is a level 3 BLAS algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N', solve the generalized Sylvester equation (1). */
+/* = 'T', solve the 'transposed' system (3). */
+
+/* IJOB (input) INTEGER */
+/* Specifies what kind of functionality to be performed. */
+/* =0: solve (1) only. */
+/* =1: The functionality of 0 and 3. */
+/* =2: The functionality of 0 and 4. */
+/* =3: Only an estimate of Dif[(A,D), (B,E)] is computed. */
+/* (look ahead strategy IJOB = 1 is used). */
+/* =4: Only an estimate of Dif[(A,D), (B,E)] is computed. */
+/* ( SGECON on sub-systems is used ). */
+/* Not referenced if TRANS = 'T'. */
+
+/* M (input) INTEGER */
+/* The order of the matrices A and D, and the row dimension of */
+/* the matrices C, F, R and L. */
+
+/* N (input) INTEGER */
+/* The order of the matrices B and E, and the column dimension */
+/* of the matrices C, F, R and L. */
+
+/* A (input) REAL array, dimension (LDA, M) */
+/* The upper quasi triangular matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1, M). */
+
+/* B (input) REAL array, dimension (LDB, N) */
+/* The upper quasi triangular matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1, N). */
+
+/* C (input/output) REAL array, dimension (LDC, N) */
+/* On entry, C contains the right-hand-side of the first matrix */
+/* equation in (1) or (3). */
+/* On exit, if IJOB = 0, 1 or 2, C has been overwritten by */
+/* the solution R. If IJOB = 3 or 4 and TRANS = 'N', C holds R, */
+/* the solution achieved during the computation of the */
+/* Dif-estimate. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1, M). */
+
+/* D (input) REAL array, dimension (LDD, M) */
+/* The upper triangular matrix D. */
+
+/* LDD (input) INTEGER */
+/* The leading dimension of the array D. LDD >= max(1, M). */
+
+/* E (input) REAL array, dimension (LDE, N) */
+/* The upper triangular matrix E. */
+
+/* LDE (input) INTEGER */
+/* The leading dimension of the array E. LDE >= max(1, N). */
+
+/* F (input/output) REAL array, dimension (LDF, N) */
+/* On entry, F contains the right-hand-side of the second matrix */
+/* equation in (1) or (3). */
+/* On exit, if IJOB = 0, 1 or 2, F has been overwritten by */
+/* the solution L. If IJOB = 3 or 4 and TRANS = 'N', F holds L, */
+/* the solution achieved during the computation of the */
+/* Dif-estimate. */
+
+/* LDF (input) INTEGER */
+/* The leading dimension of the array F. LDF >= max(1, M). */
+
+/* DIF (output) REAL */
+/* On exit DIF is the reciprocal of a lower bound of the */
+/* reciprocal of the Dif-function, i.e. DIF is an upper bound of */
+/* Dif[(A,D), (B,E)] = sigma_min(Z), where Z as in (2). */
+/* IF IJOB = 0 or TRANS = 'T', DIF is not touched. */
+
+/* SCALE (output) REAL */
+/* On exit SCALE is the scaling factor in (1) or (3). */
+/* If 0 < SCALE < 1, C and F hold the solutions R and L, resp., */
+/* to a slightly perturbed system but the input matrices A, B, D */
+/* and E have not been changed. If SCALE = 0, C and F hold the */
+/* solutions R and L, respectively, to the homogeneous system */
+/* with C = F = 0. Normally, SCALE = 1. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK > = 1. */
+/* If IJOB = 1 or 2 and TRANS = 'N', LWORK >= max(1,2*M*N). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace) INTEGER array, dimension (M+N+6) */
+
+/* INFO (output) INTEGER */
+/* =0: successful exit */
+/* <0: If INFO = -i, the i-th argument had an illegal value. */
+/* >0: (A, D) and (B, E) have common or close eigenvalues. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* [1] B. Kagstrom and P. Poromaa, LAPACK-Style Algorithms and Software */
+/* for Solving the Generalized Sylvester Equation and Estimating the */
+/* Separation between Regular Matrix Pairs, Report UMINF - 93.23, */
+/* Department of Computing Science, Umea University, S-901 87 Umea, */
+/* Sweden, December 1993, Revised April 1994, Also as LAPACK Working */
+/* Note 75. To appear in ACM Trans. on Math. Software, Vol 22, */
+/* No 1, 1996. */
+
+/* [2] B. Kagstrom, A Perturbation Analysis of the Generalized Sylvester */
+/* Equation (AR - LB, DR - LE ) = (C, F), SIAM J. Matrix Anal. */
+/* Appl., 15(4):1045-1060, 1994 */
+
+/* [3] B. Kagstrom and L. Westin, Generalized Schur Methods with */
+/* Condition Estimators for Solving the Generalized Sylvester */
+/* Equation, IEEE Transactions on Automatic Control, Vol. 34, No. 7, */
+/* July 1989, pp 745-751. */
+
+/* ===================================================================== */
+/* Replaced various illegal calls to SCOPY by calls to SLASET. */
+/* Sven Hammarling, 1/5/02. */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ d_dim1 = *ldd;
+ d_offset = 1 + d_dim1;
+ d__ -= d_offset;
+ e_dim1 = *lde;
+ e_offset = 1 + e_dim1;
+ e -= e_offset;
+ f_dim1 = *ldf;
+ f_offset = 1 + f_dim1;
+ f -= f_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+ if (! notran && ! lsame_(trans, "T")) {
+ *info = -1;
+ } else if (notran) {
+ if (*ijob < 0 || *ijob > 4) {
+ *info = -2;
+ }
+ }
+ if (*info == 0) {
+ if (*m <= 0) {
+ *info = -3;
+ } else if (*n <= 0) {
+ *info = -4;
+ } else if (*lda < max(1,*m)) {
+ *info = -6;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*ldd < max(1,*m)) {
+ *info = -12;
+ } else if (*lde < max(1,*n)) {
+ *info = -14;
+ } else if (*ldf < max(1,*m)) {
+ *info = -16;
+ }
+ }
+
+ if (*info == 0) {
+ if (notran) {
+ if (*ijob == 1 || *ijob == 2) {
+/* Computing MAX */
+ i__1 = 1, i__2 = (*m << 1) * *n;
+ lwmin = max(i__1,i__2);
+ } else {
+ lwmin = 1;
+ }
+ } else {
+ lwmin = 1;
+ }
+ work[1] = (real) lwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -20;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STGSYL", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ *scale = 1.f;
+ if (notran) {
+ if (*ijob != 0) {
+ *dif = 0.f;
+ }
+ }
+ return 0;
+ }
+
+/* Determine optimal block sizes MB and NB */
+
+ mb = ilaenv_(&c__2, "STGSYL", trans, m, n, &c_n1, &c_n1);
+ nb = ilaenv_(&c__5, "STGSYL", trans, m, n, &c_n1, &c_n1);
+
+ isolve = 1;
+ ifunc = 0;
+ if (notran) {
+ if (*ijob >= 3) {
+ ifunc = *ijob - 2;
+ slaset_("F", m, n, &c_b14, &c_b14, &c__[c_offset], ldc)
+ ;
+ slaset_("F", m, n, &c_b14, &c_b14, &f[f_offset], ldf);
+ } else if (*ijob >= 1 && notran) {
+ isolve = 2;
+ }
+ }
+
+ if (mb <= 1 && nb <= 1 || mb >= *m && nb >= *n) {
+
+ i__1 = isolve;
+ for (iround = 1; iround <= i__1; ++iround) {
+
+/* Use unblocked Level 2 solver */
+
+ dscale = 0.f;
+ dsum = 1.f;
+ pq = 0;
+ stgsy2_(trans, &ifunc, m, n, &a[a_offset], lda, &b[b_offset], ldb,
+ &c__[c_offset], ldc, &d__[d_offset], ldd, &e[e_offset],
+ lde, &f[f_offset], ldf, scale, &dsum, &dscale, &iwork[1],
+ &pq, info);
+ if (dscale != 0.f) {
+ if (*ijob == 1 || *ijob == 3) {
+ *dif = sqrt((real) ((*m << 1) * *n)) / (dscale * sqrt(
+ dsum));
+ } else {
+ *dif = sqrt((real) pq) / (dscale * sqrt(dsum));
+ }
+ }
+
+ if (isolve == 2 && iround == 1) {
+ if (notran) {
+ ifunc = *ijob;
+ }
+ scale2 = *scale;
+ slacpy_("F", m, n, &c__[c_offset], ldc, &work[1], m);
+ slacpy_("F", m, n, &f[f_offset], ldf, &work[*m * *n + 1], m);
+ slaset_("F", m, n, &c_b14, &c_b14, &c__[c_offset], ldc);
+ slaset_("F", m, n, &c_b14, &c_b14, &f[f_offset], ldf);
+ } else if (isolve == 2 && iround == 2) {
+ slacpy_("F", m, n, &work[1], m, &c__[c_offset], ldc);
+ slacpy_("F", m, n, &work[*m * *n + 1], m, &f[f_offset], ldf);
+ *scale = scale2;
+ }
+/* L30: */
+ }
+
+ return 0;
+ }
+
+/* Determine block structure of A */
+
+ p = 0;
+ i__ = 1;
+L40:
+ if (i__ > *m) {
+ goto L50;
+ }
+ ++p;
+ iwork[p] = i__;
+ i__ += mb;
+ if (i__ >= *m) {
+ goto L50;
+ }
+ if (a[i__ + (i__ - 1) * a_dim1] != 0.f) {
+ ++i__;
+ }
+ goto L40;
+L50:
+
+ iwork[p + 1] = *m + 1;
+ if (iwork[p] == iwork[p + 1]) {
+ --p;
+ }
+
+/* Determine block structure of B */
+
+ q = p + 1;
+ j = 1;
+L60:
+ if (j > *n) {
+ goto L70;
+ }
+ ++q;
+ iwork[q] = j;
+ j += nb;
+ if (j >= *n) {
+ goto L70;
+ }
+ if (b[j + (j - 1) * b_dim1] != 0.f) {
+ ++j;
+ }
+ goto L60;
+L70:
+
+ iwork[q + 1] = *n + 1;
+ if (iwork[q] == iwork[q + 1]) {
+ --q;
+ }
+
+ if (notran) {
+
+ i__1 = isolve;
+ for (iround = 1; iround <= i__1; ++iround) {
+
+/* Solve (I, J)-subsystem */
+/* A(I, I) * R(I, J) - L(I, J) * B(J, J) = C(I, J) */
+/* D(I, I) * R(I, J) - L(I, J) * E(J, J) = F(I, J) */
+/* for I = P, P - 1,..., 1; J = 1, 2,..., Q */
+
+ dscale = 0.f;
+ dsum = 1.f;
+ pq = 0;
+ *scale = 1.f;
+ i__2 = q;
+ for (j = p + 2; j <= i__2; ++j) {
+ js = iwork[j];
+ je = iwork[j + 1] - 1;
+ nb = je - js + 1;
+ for (i__ = p; i__ >= 1; --i__) {
+ is = iwork[i__];
+ ie = iwork[i__ + 1] - 1;
+ mb = ie - is + 1;
+ ppqq = 0;
+ stgsy2_(trans, &ifunc, &mb, &nb, &a[is + is * a_dim1],
+ lda, &b[js + js * b_dim1], ldb, &c__[is + js *
+ c_dim1], ldc, &d__[is + is * d_dim1], ldd, &e[js
+ + js * e_dim1], lde, &f[is + js * f_dim1], ldf, &
+ scaloc, &dsum, &dscale, &iwork[q + 2], &ppqq, &
+ linfo);
+ if (linfo > 0) {
+ *info = linfo;
+ }
+
+ pq += ppqq;
+ if (scaloc != 1.f) {
+ i__3 = js - 1;
+ for (k = 1; k <= i__3; ++k) {
+ sscal_(m, &scaloc, &c__[k * c_dim1 + 1], &c__1);
+ sscal_(m, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L80: */
+ }
+ i__3 = je;
+ for (k = js; k <= i__3; ++k) {
+ i__4 = is - 1;
+ sscal_(&i__4, &scaloc, &c__[k * c_dim1 + 1], &
+ c__1);
+ i__4 = is - 1;
+ sscal_(&i__4, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L90: */
+ }
+ i__3 = je;
+ for (k = js; k <= i__3; ++k) {
+ i__4 = *m - ie;
+ sscal_(&i__4, &scaloc, &c__[ie + 1 + k * c_dim1],
+ &c__1);
+ i__4 = *m - ie;
+ sscal_(&i__4, &scaloc, &f[ie + 1 + k * f_dim1], &
+ c__1);
+/* L100: */
+ }
+ i__3 = *n;
+ for (k = je + 1; k <= i__3; ++k) {
+ sscal_(m, &scaloc, &c__[k * c_dim1 + 1], &c__1);
+ sscal_(m, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L110: */
+ }
+ *scale *= scaloc;
+ }
+
+/* Substitute R(I, J) and L(I, J) into remaining */
+/* equation. */
+
+ if (i__ > 1) {
+ i__3 = is - 1;
+ sgemm_("N", "N", &i__3, &nb, &mb, &c_b51, &a[is *
+ a_dim1 + 1], lda, &c__[is + js * c_dim1], ldc,
+ &c_b52, &c__[js * c_dim1 + 1], ldc);
+ i__3 = is - 1;
+ sgemm_("N", "N", &i__3, &nb, &mb, &c_b51, &d__[is *
+ d_dim1 + 1], ldd, &c__[is + js * c_dim1], ldc,
+ &c_b52, &f[js * f_dim1 + 1], ldf);
+ }
+ if (j < q) {
+ i__3 = *n - je;
+ sgemm_("N", "N", &mb, &i__3, &nb, &c_b52, &f[is + js *
+ f_dim1], ldf, &b[js + (je + 1) * b_dim1],
+ ldb, &c_b52, &c__[is + (je + 1) * c_dim1],
+ ldc);
+ i__3 = *n - je;
+ sgemm_("N", "N", &mb, &i__3, &nb, &c_b52, &f[is + js *
+ f_dim1], ldf, &e[js + (je + 1) * e_dim1],
+ lde, &c_b52, &f[is + (je + 1) * f_dim1], ldf);
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ if (dscale != 0.f) {
+ if (*ijob == 1 || *ijob == 3) {
+ *dif = sqrt((real) ((*m << 1) * *n)) / (dscale * sqrt(
+ dsum));
+ } else {
+ *dif = sqrt((real) pq) / (dscale * sqrt(dsum));
+ }
+ }
+ if (isolve == 2 && iround == 1) {
+ if (notran) {
+ ifunc = *ijob;
+ }
+ scale2 = *scale;
+ slacpy_("F", m, n, &c__[c_offset], ldc, &work[1], m);
+ slacpy_("F", m, n, &f[f_offset], ldf, &work[*m * *n + 1], m);
+ slaset_("F", m, n, &c_b14, &c_b14, &c__[c_offset], ldc);
+ slaset_("F", m, n, &c_b14, &c_b14, &f[f_offset], ldf);
+ } else if (isolve == 2 && iround == 2) {
+ slacpy_("F", m, n, &work[1], m, &c__[c_offset], ldc);
+ slacpy_("F", m, n, &work[*m * *n + 1], m, &f[f_offset], ldf);
+ *scale = scale2;
+ }
+/* L150: */
+ }
+
+ } else {
+
+/* Solve transposed (I, J)-subsystem */
+/* A(I, I)' * R(I, J) + D(I, I)' * L(I, J) = C(I, J) */
+/* R(I, J) * B(J, J)' + L(I, J) * E(J, J)' = -F(I, J) */
+/* for I = 1,2,..., P; J = Q, Q-1,..., 1 */
+
+ *scale = 1.f;
+ i__1 = p;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ is = iwork[i__];
+ ie = iwork[i__ + 1] - 1;
+ mb = ie - is + 1;
+ i__2 = p + 2;
+ for (j = q; j >= i__2; --j) {
+ js = iwork[j];
+ je = iwork[j + 1] - 1;
+ nb = je - js + 1;
+ stgsy2_(trans, &ifunc, &mb, &nb, &a[is + is * a_dim1], lda, &
+ b[js + js * b_dim1], ldb, &c__[is + js * c_dim1], ldc,
+ &d__[is + is * d_dim1], ldd, &e[js + js * e_dim1],
+ lde, &f[is + js * f_dim1], ldf, &scaloc, &dsum, &
+ dscale, &iwork[q + 2], &ppqq, &linfo);
+ if (linfo > 0) {
+ *info = linfo;
+ }
+ if (scaloc != 1.f) {
+ i__3 = js - 1;
+ for (k = 1; k <= i__3; ++k) {
+ sscal_(m, &scaloc, &c__[k * c_dim1 + 1], &c__1);
+ sscal_(m, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L160: */
+ }
+ i__3 = je;
+ for (k = js; k <= i__3; ++k) {
+ i__4 = is - 1;
+ sscal_(&i__4, &scaloc, &c__[k * c_dim1 + 1], &c__1);
+ i__4 = is - 1;
+ sscal_(&i__4, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L170: */
+ }
+ i__3 = je;
+ for (k = js; k <= i__3; ++k) {
+ i__4 = *m - ie;
+ sscal_(&i__4, &scaloc, &c__[ie + 1 + k * c_dim1], &
+ c__1);
+ i__4 = *m - ie;
+ sscal_(&i__4, &scaloc, &f[ie + 1 + k * f_dim1], &c__1)
+ ;
+/* L180: */
+ }
+ i__3 = *n;
+ for (k = je + 1; k <= i__3; ++k) {
+ sscal_(m, &scaloc, &c__[k * c_dim1 + 1], &c__1);
+ sscal_(m, &scaloc, &f[k * f_dim1 + 1], &c__1);
+/* L190: */
+ }
+ *scale *= scaloc;
+ }
+
+/* Substitute R(I, J) and L(I, J) into remaining equation. */
+
+ if (j > p + 2) {
+ i__3 = js - 1;
+ sgemm_("N", "T", &mb, &i__3, &nb, &c_b52, &c__[is + js *
+ c_dim1], ldc, &b[js * b_dim1 + 1], ldb, &c_b52, &
+ f[is + f_dim1], ldf);
+ i__3 = js - 1;
+ sgemm_("N", "T", &mb, &i__3, &nb, &c_b52, &f[is + js *
+ f_dim1], ldf, &e[js * e_dim1 + 1], lde, &c_b52, &
+ f[is + f_dim1], ldf);
+ }
+ if (i__ < p) {
+ i__3 = *m - ie;
+ sgemm_("T", "N", &i__3, &nb, &mb, &c_b51, &a[is + (ie + 1)
+ * a_dim1], lda, &c__[is + js * c_dim1], ldc, &
+ c_b52, &c__[ie + 1 + js * c_dim1], ldc);
+ i__3 = *m - ie;
+ sgemm_("T", "N", &i__3, &nb, &mb, &c_b51, &d__[is + (ie +
+ 1) * d_dim1], ldd, &f[is + js * f_dim1], ldf, &
+ c_b52, &c__[ie + 1 + js * c_dim1], ldc);
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+
+ }
+
+ work[1] = (real) lwmin;
+
+ return 0;
+
+/* End of STGSYL */
+
+} /* stgsyl_ */
diff --git a/contrib/libs/clapack/stpcon.c b/contrib/libs/clapack/stpcon.c
new file mode 100644
index 0000000000..e16cac8636
--- /dev/null
+++ b/contrib/libs/clapack/stpcon.c
@@ -0,0 +1,230 @@
+/* stpcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int stpcon_(char *norm, char *uplo, char *diag, integer *n,
+ real *ap, real *rcond, real *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1;
+
+ /* Local variables */
+ integer ix, kase, kase1;
+ real scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ real anorm;
+ extern /* Subroutine */ int srscl_(integer *, real *, real *, integer *);
+ logical upper;
+ real xnorm;
+ extern /* Subroutine */ int slacn2_(integer *, real *, real *, integer *,
+ real *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ real ainvnm;
+ logical onenrm;
+ extern doublereal slantp_(char *, char *, char *, integer *, real *, real
+ *);
+ char normin[1];
+ extern /* Subroutine */ int slatps_(char *, char *, char *, char *,
+ integer *, real *, real *, real *, real *, integer *);
+ real smlnum;
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call SLACN2 in place of SLACON, 7 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STPCON estimates the reciprocal of the condition number of a packed */
+/* triangular matrix A, in either the 1-norm or the infinity-norm. */
+
+/* The norm of A is computed and an estimate is obtained for */
+/* norm(inv(A)), then the reciprocal of the condition number is */
+/* computed as */
+/* RCOND = 1 / ( norm(A) * norm(inv(A)) ). */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies whether the 1-norm condition number or the */
+/* infinity-norm condition number is required: */
+/* = '1' or 'O': 1-norm; */
+/* = 'I': Infinity-norm. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) REAL array, dimension (N*(N+1)/2) */
+/* The upper or lower triangular matrix A, packed columnwise in */
+/* a linear array. The j-th column of A is stored in the array */
+/* AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* If DIAG = 'U', the diagonal elements of A are not referenced */
+/* and are assumed to be 1. */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(norm(A) * norm(inv(A))). */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --iwork;
+ --work;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O");
+ nounit = lsame_(diag, "N");
+
+ if (! onenrm && ! lsame_(norm, "I")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STPCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ }
+
+ *rcond = 0.f;
+ smlnum = slamch_("Safe minimum") * (real) max(1,*n);
+
+/* Compute the norm of the triangular matrix A. */
+
+ anorm = slantp_(norm, uplo, diag, n, &ap[1], &work[1]);
+
+/* Continue only if ANORM > 0. */
+
+ if (anorm > 0.f) {
+
+/* Estimate the norm of the inverse of A. */
+
+ ainvnm = 0.f;
+ *(unsigned char *)normin = 'N';
+ if (onenrm) {
+ kase1 = 1;
+ } else {
+ kase1 = 2;
+ }
+ kase = 0;
+L10:
+ slacn2_(n, &work[*n + 1], &work[1], &iwork[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (kase == kase1) {
+
+/* Multiply by inv(A). */
+
+ slatps_(uplo, "No transpose", diag, normin, n, &ap[1], &work[
+ 1], &scale, &work[(*n << 1) + 1], info);
+ } else {
+
+/* Multiply by inv(A'). */
+
+ slatps_(uplo, "Transpose", diag, normin, n, &ap[1], &work[1],
+ &scale, &work[(*n << 1) + 1], info);
+ }
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by 1/SCALE if doing so will not cause overflow. */
+
+ if (scale != 1.f) {
+ ix = isamax_(n, &work[1], &c__1);
+ xnorm = (r__1 = work[ix], dabs(r__1));
+ if (scale < xnorm * smlnum || scale == 0.f) {
+ goto L20;
+ }
+ srscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / anorm / ainvnm;
+ }
+ }
+
+L20:
+ return 0;
+
+/* End of STPCON */
+
+} /* stpcon_ */
diff --git a/contrib/libs/clapack/stprfs.c b/contrib/libs/clapack/stprfs.c
new file mode 100644
index 0000000000..f72c96b4c0
--- /dev/null
+++ b/contrib/libs/clapack/stprfs.c
@@ -0,0 +1,493 @@
+/* stprfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b19 = -1.f;
+
+/* Subroutine */ int stprfs_(char *uplo, char *trans, char *diag, integer *n,
+ integer *nrhs, real *ap, real *b, integer *ldb, real *x, integer *ldx,
+ real *ferr, real *berr, real *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3;
+ real r__1, r__2, r__3;
+
+ /* Local variables */
+ integer i__, j, k;
+ real s;
+ integer kc;
+ real xk;
+ integer nz;
+ real eps;
+ integer kase;
+ real safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical upper;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), saxpy_(integer *, real *, real *, integer *, real *,
+ integer *), stpmv_(char *, char *, char *, integer *, real *,
+ real *, integer *), stpsv_(char *, char *,
+ char *, integer *, real *, real *, integer *), slacn2_(integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+ char transt[1];
+ logical nounit;
+ real lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call SLACN2 in place of SLACON, 7 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STPRFS provides error bounds and backward error estimates for the */
+/* solution to a system of linear equations with a triangular packed */
+/* coefficient matrix. */
+
+/* The solution matrix X must be computed by STPTRS or some other */
+/* means before entering this routine. STPRFS does not do iterative */
+/* refinement because doing so cannot improve the backward error. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose = Transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AP (input) REAL array, dimension (N*(N+1)/2) */
+/* The upper or lower triangular matrix A, packed columnwise in */
+/* a linear array. The j-th column of A is stored in the array */
+/* AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+/* If DIAG = 'U', the diagonal elements of A are not referenced */
+/* and are assumed to be 1. */
+
+/* B (input) REAL array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input) REAL array, dimension (LDX,NRHS) */
+/* The solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*ldx < max(1,*n)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STPRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute residual R = B - op(A) * X, */
+/* where op(A) = A or A', depending on TRANS. */
+
+ scopy_(n, &x[j * x_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+ stpmv_(uplo, trans, diag, n, &ap[1], &work[*n + 1], &c__1);
+ saxpy_(n, &c_b19, &b[j * b_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] = (r__1 = b[i__ + j * b_dim1], dabs(r__1));
+/* L20: */
+ }
+
+ if (notran) {
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ kc = 1;
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (r__1 = x[k + j * x_dim1], dabs(r__1));
+ i__3 = k;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ work[i__] += (r__1 = ap[kc + i__ - 1], dabs(r__1))
+ * xk;
+/* L30: */
+ }
+ kc += k;
+/* L40: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (r__1 = x[k + j * x_dim1], dabs(r__1));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ work[i__] += (r__1 = ap[kc + i__ - 1], dabs(r__1))
+ * xk;
+/* L50: */
+ }
+ work[k] += xk;
+ kc += k;
+/* L60: */
+ }
+ }
+ } else {
+ kc = 1;
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (r__1 = x[k + j * x_dim1], dabs(r__1));
+ i__3 = *n;
+ for (i__ = k; i__ <= i__3; ++i__) {
+ work[i__] += (r__1 = ap[kc + i__ - k], dabs(r__1))
+ * xk;
+/* L70: */
+ }
+ kc = kc + *n - k + 1;
+/* L80: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (r__1 = x[k + j * x_dim1], dabs(r__1));
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ work[i__] += (r__1 = ap[kc + i__ - k], dabs(r__1))
+ * xk;
+/* L90: */
+ }
+ work[k] += xk;
+ kc = kc + *n - k + 1;
+/* L100: */
+ }
+ }
+ }
+ } else {
+
+/* Compute abs(A')*abs(X) + abs(B). */
+
+ if (upper) {
+ kc = 1;
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ i__3 = k;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ s += (r__1 = ap[kc + i__ - 1], dabs(r__1)) * (
+ r__2 = x[i__ + j * x_dim1], dabs(r__2));
+/* L110: */
+ }
+ work[k] += s;
+ kc += k;
+/* L120: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = (r__1 = x[k + j * x_dim1], dabs(r__1));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ s += (r__1 = ap[kc + i__ - 1], dabs(r__1)) * (
+ r__2 = x[i__ + j * x_dim1], dabs(r__2));
+/* L130: */
+ }
+ work[k] += s;
+ kc += k;
+/* L140: */
+ }
+ }
+ } else {
+ kc = 1;
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ i__3 = *n;
+ for (i__ = k; i__ <= i__3; ++i__) {
+ s += (r__1 = ap[kc + i__ - k], dabs(r__1)) * (
+ r__2 = x[i__ + j * x_dim1], dabs(r__2));
+/* L150: */
+ }
+ work[k] += s;
+ kc = kc + *n - k + 1;
+/* L160: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = (r__1 = x[k + j * x_dim1], dabs(r__1));
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ s += (r__1 = ap[kc + i__ - k], dabs(r__1)) * (
+ r__2 = x[i__ + j * x_dim1], dabs(r__2));
+/* L170: */
+ }
+ work[k] += s;
+ kc = kc + *n - k + 1;
+/* L180: */
+ }
+ }
+ }
+ }
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+/* Computing MAX */
+ r__2 = s, r__3 = (r__1 = work[*n + i__], dabs(r__1)) / work[
+ i__];
+ s = dmax(r__2,r__3);
+ } else {
+/* Computing MAX */
+ r__2 = s, r__3 = ((r__1 = work[*n + i__], dabs(r__1)) + safe1)
+ / (work[i__] + safe1);
+ s = dmax(r__2,r__3);
+ }
+/* L190: */
+ }
+ berr[j] = s;
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(op(A)))* */
+/* ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(op(A)) is the inverse of op(A) */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(op(A))*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use SLACN2 to estimate the infinity-norm of the matrix */
+/* inv(op(A)) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+ work[i__] = (r__1 = work[*n + i__], dabs(r__1)) + nz * eps *
+ work[i__];
+ } else {
+ work[i__] = (r__1 = work[*n + i__], dabs(r__1)) + nz * eps *
+ work[i__] + safe1;
+ }
+/* L200: */
+ }
+
+ kase = 0;
+L210:
+ slacn2_(n, &work[(*n << 1) + 1], &work[*n + 1], &iwork[1], &ferr[j], &
+ kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(op(A)'). */
+
+ stpsv_(uplo, transt, diag, n, &ap[1], &work[*n + 1], &c__1);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L220: */
+ }
+ } else {
+
+/* Multiply by inv(op(A))*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L230: */
+ }
+ stpsv_(uplo, trans, diag, n, &ap[1], &work[*n + 1], &c__1);
+ }
+ goto L210;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = lstres, r__3 = (r__1 = x[i__ + j * x_dim1], dabs(r__1));
+ lstres = dmax(r__2,r__3);
+/* L240: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L250: */
+ }
+
+ return 0;
+
+/* End of STPRFS */
+
+} /* stprfs_ */
diff --git a/contrib/libs/clapack/stptri.c b/contrib/libs/clapack/stptri.c
new file mode 100644
index 0000000000..3f98bb21a0
--- /dev/null
+++ b/contrib/libs/clapack/stptri.c
@@ -0,0 +1,218 @@
+/* stptri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int stptri_(char *uplo, char *diag, integer *n, real *ap,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer j, jc, jj;
+ real ajj;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ logical upper;
+ extern /* Subroutine */ int stpmv_(char *, char *, char *, integer *,
+ real *, real *, integer *), xerbla_(char *
+, integer *);
+ integer jclast;
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STPTRI computes the inverse of a real upper or lower triangular */
+/* matrix A stored in packed format. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) REAL array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangular matrix A, stored */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*((2*n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. */
+/* On exit, the (triangular) inverse of the original matrix, in */
+/* the same packed storage format. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, A(i,i) is exactly zero. The triangular */
+/* matrix is singular and its inverse can not be computed. */
+
+/* Further Details */
+/* =============== */
+
+/* A triangular matrix A can be transferred to packed storage using one */
+/* of the following program segments: */
+
+/* UPLO = 'U': UPLO = 'L': */
+
+/* JC = 1 JC = 1 */
+/* DO 2 J = 1, N DO 2 J = 1, N */
+/* DO 1 I = 1, J DO 1 I = J, N */
+/* AP(JC+I-1) = A(I,J) AP(JC+I-J) = A(I,J) */
+/* 1 CONTINUE 1 CONTINUE */
+/* JC = JC + J JC = JC + N - J + 1 */
+/* 2 CONTINUE 2 CONTINUE */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ nounit = lsame_(diag, "N");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STPTRI", &i__1);
+ return 0;
+ }
+
+/* Check for singularity if non-unit. */
+
+ if (nounit) {
+ if (upper) {
+ jj = 0;
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ jj += *info;
+ if (ap[jj] == 0.f) {
+ return 0;
+ }
+/* L10: */
+ }
+ } else {
+ jj = 1;
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ if (ap[jj] == 0.f) {
+ return 0;
+ }
+ jj = jj + *n - *info + 1;
+/* L20: */
+ }
+ }
+ *info = 0;
+ }
+
+ if (upper) {
+
+/* Compute inverse of upper triangular matrix. */
+
+ jc = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (nounit) {
+ ap[jc + j - 1] = 1.f / ap[jc + j - 1];
+ ajj = -ap[jc + j - 1];
+ } else {
+ ajj = -1.f;
+ }
+
+/* Compute elements 1:j-1 of j-th column. */
+
+ i__2 = j - 1;
+ stpmv_("Upper", "No transpose", diag, &i__2, &ap[1], &ap[jc], &
+ c__1);
+ i__2 = j - 1;
+ sscal_(&i__2, &ajj, &ap[jc], &c__1);
+ jc += j;
+/* L30: */
+ }
+
+ } else {
+
+/* Compute inverse of lower triangular matrix. */
+
+ jc = *n * (*n + 1) / 2;
+ for (j = *n; j >= 1; --j) {
+ if (nounit) {
+ ap[jc] = 1.f / ap[jc];
+ ajj = -ap[jc];
+ } else {
+ ajj = -1.f;
+ }
+ if (j < *n) {
+
+/* Compute elements j+1:n of j-th column. */
+
+ i__1 = *n - j;
+ stpmv_("Lower", "No transpose", diag, &i__1, &ap[jclast], &ap[
+ jc + 1], &c__1);
+ i__1 = *n - j;
+ sscal_(&i__1, &ajj, &ap[jc + 1], &c__1);
+ }
+ jclast = jc;
+ jc = jc - *n + j - 2;
+/* L40: */
+ }
+ }
+
+ return 0;
+
+/* End of STPTRI */
+
+} /* stptri_ */
diff --git a/contrib/libs/clapack/stptrs.c b/contrib/libs/clapack/stptrs.c
new file mode 100644
index 0000000000..48353337f7
--- /dev/null
+++ b/contrib/libs/clapack/stptrs.c
@@ -0,0 +1,192 @@
+/* stptrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int stptrs_(char *uplo, char *trans, char *diag, integer *n,
+ integer *nrhs, real *ap, real *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ integer j, jc;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int stpsv_(char *, char *, char *, integer *,
+ real *, real *, integer *), xerbla_(char *
+, integer *);
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STPTRS solves a triangular system of the form */
+
+/* A * X = B or A**T * X = B, */
+
+/* where A is a triangular matrix of order N stored in packed format, */
+/* and B is an N-by-NRHS matrix. A check is made to verify that A is */
+/* nonsingular. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose = Transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AP (input) REAL array, dimension (N*(N+1)/2) */
+/* The upper or lower triangular matrix A, packed columnwise in */
+/* a linear array. The j-th column of A is stored in the array */
+/* AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, if INFO = 0, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element of A is zero, */
+/* indicating that the matrix is singular and the */
+/* solutions X have not been computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ nounit = lsame_(diag, "N");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STPTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check for singularity. */
+
+ if (nounit) {
+ if (upper) {
+ jc = 1;
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ if (ap[jc + *info - 1] == 0.f) {
+ return 0;
+ }
+ jc += *info;
+/* L10: */
+ }
+ } else {
+ jc = 1;
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ if (ap[jc] == 0.f) {
+ return 0;
+ }
+ jc = jc + *n - *info + 1;
+/* L20: */
+ }
+ }
+ }
+ *info = 0;
+
+/* Solve A * x = b or A' * x = b. */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ stpsv_(uplo, trans, diag, n, &ap[1], &b[j * b_dim1 + 1], &c__1);
+/* L30: */
+ }
+
+ return 0;
+
+/* End of STPTRS */
+
+} /* stptrs_ */
diff --git a/contrib/libs/clapack/stpttf.c b/contrib/libs/clapack/stpttf.c
new file mode 100644
index 0000000000..298f266554
--- /dev/null
+++ b/contrib/libs/clapack/stpttf.c
@@ -0,0 +1,499 @@
+/* stpttf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int stpttf_(char *transr, char *uplo, integer *n, real *ap,
+ real *arf, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, k, n1, n2, ij, jp, js, nt, lda, ijp;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nisodd;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+
+/* Purpose */
+/* ======= */
+
+/* STPTTF copies a triangular matrix A from standard packed format (TP) */
+/* to rectangular full packed format (TF). */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': ARF in Normal format is wanted; */
+/* = 'T': ARF in Conjugate-transpose format is wanted. */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) REAL array, dimension ( N*(N+1)/2 ), */
+/* On entry, the upper or lower triangular matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* ARF (output) REAL array, dimension ( N*(N+1)/2 ), */
+/* On exit, the upper or lower triangular matrix A stored in */
+/* RFP format. For a further discussion see Notes below. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Notes */
+/* ===== */
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* even. We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* the transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* the transpose of the last three columns of AP lower. */
+/* This covers the case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 03 04 05 33 43 53 */
+/* 13 14 15 00 44 54 */
+/* 23 24 25 10 11 55 */
+/* 33 34 35 20 21 22 */
+/* 00 44 45 30 31 32 */
+/* 01 11 55 40 41 42 */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* odd. We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* the transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* the transpose of the last two columns of AP lower. */
+/* This covers the case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 02 03 04 00 33 43 */
+/* 12 13 14 10 11 44 */
+/* 22 23 24 20 21 22 */
+/* 00 33 34 30 31 32 */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+/* RFP A RFP A */
+
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "T")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STPTTF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (normaltransr) {
+ arf[0] = ap[0];
+ } else {
+ arf[0] = ap[0];
+ }
+ return 0;
+ }
+
+/* Size of array ARF(0:NT-1) */
+
+ nt = *n * (*n + 1) / 2;
+
+/* Set N1 and N2 depending on LOWER */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+/* If N is odd, set NISODD = .TRUE. */
+/* If N is even, set K = N/2 and NISODD = .FALSE. */
+
+/* set lda of ARF^C; ARF^C is (0:(N+1)/2-1,0:N-noe) */
+/* where noe = 0 if n is even, noe = 1 if n is odd */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ lda = *n + 1;
+ } else {
+ nisodd = TRUE_;
+ lda = *n;
+ }
+
+/* ARF^C has lda rows and n+1-noe cols */
+
+ if (! normaltransr) {
+ lda = (*n + 1) / 2;
+ }
+
+/* start execution: there are eight cases */
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* N is odd, TRANSR = 'N', and UPLO = 'L' */
+
+ ijp = 0;
+ jp = 0;
+ i__1 = n2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ij = i__ + jp;
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ jp += lda;
+ }
+ i__1 = n2 - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = n2;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ ij = i__ + j * lda;
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ }
+
+ } else {
+
+/* N is odd, TRANSR = 'N', and UPLO = 'U' */
+
+ ijp = 0;
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ ij = n2 + j;
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ arf[ij] = ap[ijp];
+ ++ijp;
+ ij += lda;
+ }
+ }
+ js = 0;
+ i__1 = *n - 1;
+ for (j = n1; j <= i__1; ++j) {
+ ij = js;
+ i__2 = js + j;
+ for (ij = js; ij <= i__2; ++ij) {
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ js += lda;
+ }
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'T' */
+
+ if (lower) {
+
+/* N is odd, TRANSR = 'T', and UPLO = 'L' */
+
+ ijp = 0;
+ i__1 = n2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = *n * lda - 1;
+ i__3 = lda;
+ for (ij = i__ * (lda + 1); i__3 < 0 ? ij >= i__2 : ij <=
+ i__2; ij += i__3) {
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ }
+ js = 1;
+ i__1 = n2 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + n2 - j - 1;
+ for (ij = js; ij <= i__3; ++ij) {
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ js = js + lda + 1;
+ }
+
+ } else {
+
+/* N is odd, TRANSR = 'T', and UPLO = 'U' */
+
+ ijp = 0;
+ js = n2 * lda;
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + j;
+ for (ij = js; ij <= i__3; ++ij) {
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ js += lda;
+ }
+ i__1 = n1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__3 = i__ + (n1 + i__) * lda;
+ i__2 = lda;
+ for (ij = i__; i__2 < 0 ? ij >= i__3 : ij <= i__3; ij +=
+ i__2) {
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* N is even, TRANSR = 'N', and UPLO = 'L' */
+
+ ijp = 0;
+ jp = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ij = i__ + 1 + jp;
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ jp += lda;
+ }
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = k - 1;
+ for (j = i__; j <= i__2; ++j) {
+ ij = i__ + j * lda;
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ }
+
+ } else {
+
+/* N is even, TRANSR = 'N', and UPLO = 'U' */
+
+ ijp = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ ij = k + 1 + j;
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ arf[ij] = ap[ijp];
+ ++ijp;
+ ij += lda;
+ }
+ }
+ js = 0;
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+ ij = js;
+ i__2 = js + j;
+ for (ij = js; ij <= i__2; ++ij) {
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ js += lda;
+ }
+
+ }
+
+ } else {
+
+/* N is even and TRANSR = 'T' */
+
+ if (lower) {
+
+/* N is even, TRANSR = 'T', and UPLO = 'L' */
+
+ ijp = 0;
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = (*n + 1) * lda - 1;
+ i__3 = lda;
+ for (ij = i__ + (i__ + 1) * lda; i__3 < 0 ? ij >= i__2 :
+ ij <= i__2; ij += i__3) {
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ }
+ js = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + k - j - 1;
+ for (ij = js; ij <= i__3; ++ij) {
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ js = js + lda + 1;
+ }
+
+ } else {
+
+/* N is even, TRANSR = 'T', and UPLO = 'U' */
+
+ ijp = 0;
+ js = (k + 1) * lda;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + j;
+ for (ij = js; ij <= i__3; ++ij) {
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ js += lda;
+ }
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__3 = i__ + (k + i__) * lda;
+ i__2 = lda;
+ for (ij = i__; i__2 < 0 ? ij >= i__3 : ij <= i__3; ij +=
+ i__2) {
+ arf[ij] = ap[ijp];
+ ++ijp;
+ }
+ }
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of STPTTF */
+
+} /* stpttf_ */
diff --git a/contrib/libs/clapack/stpttr.c b/contrib/libs/clapack/stpttr.c
new file mode 100644
index 0000000000..123038f0da
--- /dev/null
+++ b/contrib/libs/clapack/stpttr.c
@@ -0,0 +1,144 @@
+/* stpttr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int stpttr_(char *uplo, integer *n, real *ap, real *a,
+ integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, k;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Julien Langou of the Univ. of Colorado Denver -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STPTTR copies a triangular matrix A from standard packed format (TP) */
+/* to standard full format (TR). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular. */
+/* = 'L': A is lower triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) REAL array, dimension ( N*(N+1)/2 ), */
+/* On entry, the upper or lower triangular matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* A (output) REAL array, dimension ( LDA, N ) */
+/* On exit, the triangular matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ lower = lsame_(uplo, "L");
+ if (! lower && ! lsame_(uplo, "U")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STPTTR", &i__1);
+ return 0;
+ }
+
+ if (lower) {
+ k = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ++k;
+ a[i__ + j * a_dim1] = ap[k];
+ }
+ }
+ } else {
+ k = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ ++k;
+ a[i__ + j * a_dim1] = ap[k];
+ }
+ }
+ }
+
+
+ return 0;
+
+/* End of STPTTR */
+
+} /* stpttr_ */
diff --git a/contrib/libs/clapack/strcon.c b/contrib/libs/clapack/strcon.c
new file mode 100644
index 0000000000..430ca58618
--- /dev/null
+++ b/contrib/libs/clapack/strcon.c
@@ -0,0 +1,239 @@
+/* strcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int strcon_(char *norm, char *uplo, char *diag, integer *n,
+ real *a, integer *lda, real *rcond, real *work, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+ real r__1;
+
+ /* Local variables */
+ integer ix, kase, kase1;
+ real scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ real anorm;
+ extern /* Subroutine */ int srscl_(integer *, real *, real *, integer *);
+ logical upper;
+ real xnorm;
+ extern /* Subroutine */ int slacn2_(integer *, real *, real *, integer *,
+ real *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ real ainvnm;
+ logical onenrm;
+ char normin[1];
+ extern doublereal slantr_(char *, char *, char *, integer *, integer *,
+ real *, integer *, real *);
+ extern /* Subroutine */ int slatrs_(char *, char *, char *, char *,
+ integer *, real *, integer *, real *, real *, real *, integer *);
+ real smlnum;
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call SLACN2 in place of SLACON, 7 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STRCON estimates the reciprocal of the condition number of a */
+/* triangular matrix A, in either the 1-norm or the infinity-norm. */
+
+/* The norm of A is computed and an estimate is obtained for */
+/* norm(inv(A)), then the reciprocal of the condition number is */
+/* computed as */
+/* RCOND = 1 / ( norm(A) * norm(inv(A)) ). */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies whether the 1-norm condition number or the */
+/* infinity-norm condition number is required: */
+/* = '1' or 'O': 1-norm; */
+/* = 'I': Infinity-norm. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The triangular matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of the array A contains the upper */
+/* triangular matrix, and the strictly lower triangular part of */
+/* A is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of the array A contains the lower triangular */
+/* matrix, and the strictly upper triangular part of A is not */
+/* referenced. If DIAG = 'U', the diagonal elements of A are */
+/* also not referenced and are assumed to be 1. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* RCOND (output) REAL */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(norm(A) * norm(inv(A))). */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O");
+ nounit = lsame_(diag, "N");
+
+ if (! onenrm && ! lsame_(norm, "I")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STRCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *rcond = 1.f;
+ return 0;
+ }
+
+ *rcond = 0.f;
+ smlnum = slamch_("Safe minimum") * (real) max(1,*n);
+
+/* Compute the norm of the triangular matrix A. */
+
+ anorm = slantr_(norm, uplo, diag, n, n, &a[a_offset], lda, &work[1]);
+
+/* Continue only if ANORM > 0. */
+
+ if (anorm > 0.f) {
+
+/* Estimate the norm of the inverse of A. */
+
+ ainvnm = 0.f;
+ *(unsigned char *)normin = 'N';
+ if (onenrm) {
+ kase1 = 1;
+ } else {
+ kase1 = 2;
+ }
+ kase = 0;
+L10:
+ slacn2_(n, &work[*n + 1], &work[1], &iwork[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (kase == kase1) {
+
+/* Multiply by inv(A). */
+
+ slatrs_(uplo, "No transpose", diag, normin, n, &a[a_offset],
+ lda, &work[1], &scale, &work[(*n << 1) + 1], info);
+ } else {
+
+/* Multiply by inv(A'). */
+
+ slatrs_(uplo, "Transpose", diag, normin, n, &a[a_offset], lda,
+ &work[1], &scale, &work[(*n << 1) + 1], info);
+ }
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by 1/SCALE if doing so will not cause overflow. */
+
+ if (scale != 1.f) {
+ ix = isamax_(n, &work[1], &c__1);
+ xnorm = (r__1 = work[ix], dabs(r__1));
+ if (scale < xnorm * smlnum || scale == 0.f) {
+ goto L20;
+ }
+ srscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.f) {
+ *rcond = 1.f / anorm / ainvnm;
+ }
+ }
+
+L20:
+ return 0;
+
+/* End of STRCON */
+
+} /* strcon_ */
diff --git a/contrib/libs/clapack/strevc.c b/contrib/libs/clapack/strevc.c
new file mode 100644
index 0000000000..1c95a80d61
--- /dev/null
+++ b/contrib/libs/clapack/strevc.c
@@ -0,0 +1,1223 @@
+/* strevc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static logical c_false = FALSE_;
+static integer c__1 = 1;
+static real c_b22 = 1.f;
+static real c_b25 = 0.f;
+static integer c__2 = 2;
+static logical c_true = TRUE_;
+
+/* Subroutine */ int strevc_(char *side, char *howmny, logical *select,
+ integer *n, real *t, integer *ldt, real *vl, integer *ldvl, real *vr,
+ integer *ldvr, integer *mm, integer *m, real *work, integer *info)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2, i__3;
+ real r__1, r__2, r__3, r__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k;
+ real x[4] /* was [2][2] */;
+ integer j1, j2, n2, ii, ki, ip, is;
+ real wi, wr, rec, ulp, beta, emax;
+ logical pair, allv;
+ integer ierr;
+ real unfl, ovfl, smin;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ logical over;
+ real vmax;
+ integer jnxt;
+ real scale;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ real remax;
+ logical leftv;
+ extern /* Subroutine */ int sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *);
+ logical bothv;
+ real vcrit;
+ logical somev;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ real xnorm;
+ extern /* Subroutine */ int saxpy_(integer *, real *, real *, integer *,
+ real *, integer *), slaln2_(logical *, integer *, integer *, real
+ *, real *, real *, integer *, real *, real *, real *, integer *,
+ real *, real *, real *, integer *, real *, real *, integer *),
+ slabad_(real *, real *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ extern integer isamax_(integer *, real *, integer *);
+ logical rightv;
+ real smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STREVC computes some or all of the right and/or left eigenvectors of */
+/* a real upper quasi-triangular matrix T. */
+/* Matrices of this type are produced by the Schur factorization of */
+/* a real general matrix: A = Q*T*Q**T, as computed by SHSEQR. */
+
+/* The right eigenvector x and the left eigenvector y of T corresponding */
+/* to an eigenvalue w are defined by: */
+
+/* T*x = w*x, (y**H)*T = w*(y**H) */
+
+/* where y**H denotes the conjugate transpose of y. */
+/* The eigenvalues are not input to this routine, but are read directly */
+/* from the diagonal blocks of T. */
+
+/* This routine returns the matrices X and/or Y of right and left */
+/* eigenvectors of T, or the products Q*X and/or Q*Y, where Q is an */
+/* input matrix. If Q is the orthogonal factor that reduces a matrix */
+/* A to Schur form T, then Q*X and Q*Y are the matrices of right and */
+/* left eigenvectors of A. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'R': compute right eigenvectors only; */
+/* = 'L': compute left eigenvectors only; */
+/* = 'B': compute both right and left eigenvectors. */
+
+/* HOWMNY (input) CHARACTER*1 */
+/* = 'A': compute all right and/or left eigenvectors; */
+/* = 'B': compute all right and/or left eigenvectors, */
+/* backtransformed by the matrices in VR and/or VL; */
+/* = 'S': compute selected right and/or left eigenvectors, */
+/* as indicated by the logical array SELECT. */
+
+/* SELECT (input/output) LOGICAL array, dimension (N) */
+/* If HOWMNY = 'S', SELECT specifies the eigenvectors to be */
+/* computed. */
+/* If w(j) is a real eigenvalue, the corresponding real */
+/* eigenvector is computed if SELECT(j) is .TRUE.. */
+/* If w(j) and w(j+1) are the real and imaginary parts of a */
+/* complex eigenvalue, the corresponding complex eigenvector is */
+/* computed if either SELECT(j) or SELECT(j+1) is .TRUE., and */
+/* on exit SELECT(j) is set to .TRUE. and SELECT(j+1) is set to */
+/* .FALSE.. */
+/* Not referenced if HOWMNY = 'A' or 'B'. */
+
+/* N (input) INTEGER */
+/* The order of the matrix T. N >= 0. */
+
+/* T (input) REAL array, dimension (LDT,N) */
+/* The upper quasi-triangular matrix T in Schur canonical form. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= max(1,N). */
+
+/* VL (input/output) REAL array, dimension (LDVL,MM) */
+/* On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must */
+/* contain an N-by-N matrix Q (usually the orthogonal matrix Q */
+/* of Schur vectors returned by SHSEQR). */
+/* On exit, if SIDE = 'L' or 'B', VL contains: */
+/* if HOWMNY = 'A', the matrix Y of left eigenvectors of T; */
+/* if HOWMNY = 'B', the matrix Q*Y; */
+/* if HOWMNY = 'S', the left eigenvectors of T specified by */
+/* SELECT, stored consecutively in the columns */
+/* of VL, in the same order as their */
+/* eigenvalues. */
+/* A complex eigenvector corresponding to a complex eigenvalue */
+/* is stored in two consecutive columns, the first holding the */
+/* real part, and the second the imaginary part. */
+/* Not referenced if SIDE = 'R'. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the array VL. LDVL >= 1, and if */
+/* SIDE = 'L' or 'B', LDVL >= N. */
+
+/* VR (input/output) REAL array, dimension (LDVR,MM) */
+/* On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must */
+/* contain an N-by-N matrix Q (usually the orthogonal matrix Q */
+/* of Schur vectors returned by SHSEQR). */
+/* On exit, if SIDE = 'R' or 'B', VR contains: */
+/* if HOWMNY = 'A', the matrix X of right eigenvectors of T; */
+/* if HOWMNY = 'B', the matrix Q*X; */
+/* if HOWMNY = 'S', the right eigenvectors of T specified by */
+/* SELECT, stored consecutively in the columns */
+/* of VR, in the same order as their */
+/* eigenvalues. */
+/* A complex eigenvector corresponding to a complex eigenvalue */
+/* is stored in two consecutive columns, the first holding the */
+/* real part and the second the imaginary part. */
+/* Not referenced if SIDE = 'L'. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. LDVR >= 1, and if */
+/* SIDE = 'R' or 'B', LDVR >= N. */
+
+/* MM (input) INTEGER */
+/* The number of columns in the arrays VL and/or VR. MM >= M. */
+
+/* M (output) INTEGER */
+/* The number of columns in the arrays VL and/or VR actually */
+/* used to store the eigenvectors. */
+/* If HOWMNY = 'A' or 'B', M is set to N. */
+/* Each selected real eigenvector occupies one column and each */
+/* selected complex eigenvector occupies two columns. */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The algorithm used in this program is basically backward (forward) */
+/* substitution, with scaling to make the the code robust against */
+/* possible overflow. */
+
+/* Each eigenvector is normalized so that the element of largest */
+/* magnitude has magnitude 1; here the magnitude of a complex number */
+/* (x,y) is taken to be |x| + |y|. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters */
+
+ /* Parameter adjustments */
+ --select;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+
+ /* Function Body */
+ bothv = lsame_(side, "B");
+ rightv = lsame_(side, "R") || bothv;
+ leftv = lsame_(side, "L") || bothv;
+
+ allv = lsame_(howmny, "A");
+ over = lsame_(howmny, "B");
+ somev = lsame_(howmny, "S");
+
+ *info = 0;
+ if (! rightv && ! leftv) {
+ *info = -1;
+ } else if (! allv && ! over && ! somev) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ldt < max(1,*n)) {
+ *info = -6;
+ } else if (*ldvl < 1 || leftv && *ldvl < *n) {
+ *info = -8;
+ } else if (*ldvr < 1 || rightv && *ldvr < *n) {
+ *info = -10;
+ } else {
+
+/* Set M to the number of columns required to store the selected */
+/* eigenvectors, standardize the array SELECT if necessary, and */
+/* test MM. */
+
+ if (somev) {
+ *m = 0;
+ pair = FALSE_;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (pair) {
+ pair = FALSE_;
+ select[j] = FALSE_;
+ } else {
+ if (j < *n) {
+ if (t[j + 1 + j * t_dim1] == 0.f) {
+ if (select[j]) {
+ ++(*m);
+ }
+ } else {
+ pair = TRUE_;
+ if (select[j] || select[j + 1]) {
+ select[j] = TRUE_;
+ *m += 2;
+ }
+ }
+ } else {
+ if (select[*n]) {
+ ++(*m);
+ }
+ }
+ }
+/* L10: */
+ }
+ } else {
+ *m = *n;
+ }
+
+ if (*mm < *m) {
+ *info = -11;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STREVC", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Set the constants to control overflow. */
+
+ unfl = slamch_("Safe minimum");
+ ovfl = 1.f / unfl;
+ slabad_(&unfl, &ovfl);
+ ulp = slamch_("Precision");
+ smlnum = unfl * (*n / ulp);
+ bignum = (1.f - ulp) / smlnum;
+
+/* Compute 1-norm of each column of strictly upper triangular */
+/* part of T to control overflow in triangular solver. */
+
+ work[1] = 0.f;
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ work[j] = 0.f;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[j] += (r__1 = t[i__ + j * t_dim1], dabs(r__1));
+/* L20: */
+ }
+/* L30: */
+ }
+
+/* Index IP is used to specify the real or complex eigenvalue: */
+/* IP = 0, real eigenvalue, */
+/* 1, first of conjugate complex pair: (wr,wi) */
+/* -1, second of conjugate complex pair: (wr,wi) */
+
+ n2 = *n << 1;
+
+ if (rightv) {
+
+/* Compute right eigenvectors. */
+
+ ip = 0;
+ is = *m;
+ for (ki = *n; ki >= 1; --ki) {
+
+ if (ip == 1) {
+ goto L130;
+ }
+ if (ki == 1) {
+ goto L40;
+ }
+ if (t[ki + (ki - 1) * t_dim1] == 0.f) {
+ goto L40;
+ }
+ ip = -1;
+
+L40:
+ if (somev) {
+ if (ip == 0) {
+ if (! select[ki]) {
+ goto L130;
+ }
+ } else {
+ if (! select[ki - 1]) {
+ goto L130;
+ }
+ }
+ }
+
+/* Compute the KI-th eigenvalue (WR,WI). */
+
+ wr = t[ki + ki * t_dim1];
+ wi = 0.f;
+ if (ip != 0) {
+ wi = sqrt((r__1 = t[ki + (ki - 1) * t_dim1], dabs(r__1))) *
+ sqrt((r__2 = t[ki - 1 + ki * t_dim1], dabs(r__2)));
+ }
+/* Computing MAX */
+ r__1 = ulp * (dabs(wr) + dabs(wi));
+ smin = dmax(r__1,smlnum);
+
+ if (ip == 0) {
+
+/* Real right eigenvector */
+
+ work[ki + *n] = 1.f;
+
+/* Form right-hand side */
+
+ i__1 = ki - 1;
+ for (k = 1; k <= i__1; ++k) {
+ work[k + *n] = -t[k + ki * t_dim1];
+/* L50: */
+ }
+
+/* Solve the upper quasi-triangular system: */
+/* (T(1:KI-1,1:KI-1) - WR)*X = SCALE*WORK. */
+
+ jnxt = ki - 1;
+ for (j = ki - 1; j >= 1; --j) {
+ if (j > jnxt) {
+ goto L60;
+ }
+ j1 = j;
+ j2 = j;
+ jnxt = j - 1;
+ if (j > 1) {
+ if (t[j + (j - 1) * t_dim1] != 0.f) {
+ j1 = j - 1;
+ jnxt = j - 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/* 1-by-1 diagonal block */
+
+ slaln2_(&c_false, &c__1, &c__1, &smin, &c_b22, &t[j +
+ j * t_dim1], ldt, &c_b22, &c_b22, &work[j + *
+ n], n, &wr, &c_b25, x, &c__2, &scale, &xnorm,
+ &ierr);
+
+/* Scale X(1,1) to avoid overflow when updating */
+/* the right-hand side. */
+
+ if (xnorm > 1.f) {
+ if (work[j] > bignum / xnorm) {
+ x[0] /= xnorm;
+ scale /= xnorm;
+ }
+ }
+
+/* Scale if necessary */
+
+ if (scale != 1.f) {
+ sscal_(&ki, &scale, &work[*n + 1], &c__1);
+ }
+ work[j + *n] = x[0];
+
+/* Update right-hand side */
+
+ i__1 = j - 1;
+ r__1 = -x[0];
+ saxpy_(&i__1, &r__1, &t[j * t_dim1 + 1], &c__1, &work[
+ *n + 1], &c__1);
+
+ } else {
+
+/* 2-by-2 diagonal block */
+
+ slaln2_(&c_false, &c__2, &c__1, &smin, &c_b22, &t[j -
+ 1 + (j - 1) * t_dim1], ldt, &c_b22, &c_b22, &
+ work[j - 1 + *n], n, &wr, &c_b25, x, &c__2, &
+ scale, &xnorm, &ierr);
+
+/* Scale X(1,1) and X(2,1) to avoid overflow when */
+/* updating the right-hand side. */
+
+ if (xnorm > 1.f) {
+/* Computing MAX */
+ r__1 = work[j - 1], r__2 = work[j];
+ beta = dmax(r__1,r__2);
+ if (beta > bignum / xnorm) {
+ x[0] /= xnorm;
+ x[1] /= xnorm;
+ scale /= xnorm;
+ }
+ }
+
+/* Scale if necessary */
+
+ if (scale != 1.f) {
+ sscal_(&ki, &scale, &work[*n + 1], &c__1);
+ }
+ work[j - 1 + *n] = x[0];
+ work[j + *n] = x[1];
+
+/* Update right-hand side */
+
+ i__1 = j - 2;
+ r__1 = -x[0];
+ saxpy_(&i__1, &r__1, &t[(j - 1) * t_dim1 + 1], &c__1,
+ &work[*n + 1], &c__1);
+ i__1 = j - 2;
+ r__1 = -x[1];
+ saxpy_(&i__1, &r__1, &t[j * t_dim1 + 1], &c__1, &work[
+ *n + 1], &c__1);
+ }
+L60:
+ ;
+ }
+
+/* Copy the vector x or Q*x to VR and normalize. */
+
+ if (! over) {
+ scopy_(&ki, &work[*n + 1], &c__1, &vr[is * vr_dim1 + 1], &
+ c__1);
+
+ ii = isamax_(&ki, &vr[is * vr_dim1 + 1], &c__1);
+ remax = 1.f / (r__1 = vr[ii + is * vr_dim1], dabs(r__1));
+ sscal_(&ki, &remax, &vr[is * vr_dim1 + 1], &c__1);
+
+ i__1 = *n;
+ for (k = ki + 1; k <= i__1; ++k) {
+ vr[k + is * vr_dim1] = 0.f;
+/* L70: */
+ }
+ } else {
+ if (ki > 1) {
+ i__1 = ki - 1;
+ sgemv_("N", n, &i__1, &c_b22, &vr[vr_offset], ldvr, &
+ work[*n + 1], &c__1, &work[ki + *n], &vr[ki *
+ vr_dim1 + 1], &c__1);
+ }
+
+ ii = isamax_(n, &vr[ki * vr_dim1 + 1], &c__1);
+ remax = 1.f / (r__1 = vr[ii + ki * vr_dim1], dabs(r__1));
+ sscal_(n, &remax, &vr[ki * vr_dim1 + 1], &c__1);
+ }
+
+ } else {
+
+/* Complex right eigenvector. */
+
+/* Initial solve */
+/* [ (T(KI-1,KI-1) T(KI-1,KI) ) - (WR + I* WI)]*X = 0. */
+/* [ (T(KI,KI-1) T(KI,KI) ) ] */
+
+ if ((r__1 = t[ki - 1 + ki * t_dim1], dabs(r__1)) >= (r__2 = t[
+ ki + (ki - 1) * t_dim1], dabs(r__2))) {
+ work[ki - 1 + *n] = 1.f;
+ work[ki + n2] = wi / t[ki - 1 + ki * t_dim1];
+ } else {
+ work[ki - 1 + *n] = -wi / t[ki + (ki - 1) * t_dim1];
+ work[ki + n2] = 1.f;
+ }
+ work[ki + *n] = 0.f;
+ work[ki - 1 + n2] = 0.f;
+
+/* Form right-hand side */
+
+ i__1 = ki - 2;
+ for (k = 1; k <= i__1; ++k) {
+ work[k + *n] = -work[ki - 1 + *n] * t[k + (ki - 1) *
+ t_dim1];
+ work[k + n2] = -work[ki + n2] * t[k + ki * t_dim1];
+/* L80: */
+ }
+
+/* Solve upper quasi-triangular system: */
+/* (T(1:KI-2,1:KI-2) - (WR+i*WI))*X = SCALE*(WORK+i*WORK2) */
+
+ jnxt = ki - 2;
+ for (j = ki - 2; j >= 1; --j) {
+ if (j > jnxt) {
+ goto L90;
+ }
+ j1 = j;
+ j2 = j;
+ jnxt = j - 1;
+ if (j > 1) {
+ if (t[j + (j - 1) * t_dim1] != 0.f) {
+ j1 = j - 1;
+ jnxt = j - 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/* 1-by-1 diagonal block */
+
+ slaln2_(&c_false, &c__1, &c__2, &smin, &c_b22, &t[j +
+ j * t_dim1], ldt, &c_b22, &c_b22, &work[j + *
+ n], n, &wr, &wi, x, &c__2, &scale, &xnorm, &
+ ierr);
+
+/* Scale X(1,1) and X(1,2) to avoid overflow when */
+/* updating the right-hand side. */
+
+ if (xnorm > 1.f) {
+ if (work[j] > bignum / xnorm) {
+ x[0] /= xnorm;
+ x[2] /= xnorm;
+ scale /= xnorm;
+ }
+ }
+
+/* Scale if necessary */
+
+ if (scale != 1.f) {
+ sscal_(&ki, &scale, &work[*n + 1], &c__1);
+ sscal_(&ki, &scale, &work[n2 + 1], &c__1);
+ }
+ work[j + *n] = x[0];
+ work[j + n2] = x[2];
+
+/* Update the right-hand side */
+
+ i__1 = j - 1;
+ r__1 = -x[0];
+ saxpy_(&i__1, &r__1, &t[j * t_dim1 + 1], &c__1, &work[
+ *n + 1], &c__1);
+ i__1 = j - 1;
+ r__1 = -x[2];
+ saxpy_(&i__1, &r__1, &t[j * t_dim1 + 1], &c__1, &work[
+ n2 + 1], &c__1);
+
+ } else {
+
+/* 2-by-2 diagonal block */
+
+ slaln2_(&c_false, &c__2, &c__2, &smin, &c_b22, &t[j -
+ 1 + (j - 1) * t_dim1], ldt, &c_b22, &c_b22, &
+ work[j - 1 + *n], n, &wr, &wi, x, &c__2, &
+ scale, &xnorm, &ierr);
+
+/* Scale X to avoid overflow when updating */
+/* the right-hand side. */
+
+ if (xnorm > 1.f) {
+/* Computing MAX */
+ r__1 = work[j - 1], r__2 = work[j];
+ beta = dmax(r__1,r__2);
+ if (beta > bignum / xnorm) {
+ rec = 1.f / xnorm;
+ x[0] *= rec;
+ x[2] *= rec;
+ x[1] *= rec;
+ x[3] *= rec;
+ scale *= rec;
+ }
+ }
+
+/* Scale if necessary */
+
+ if (scale != 1.f) {
+ sscal_(&ki, &scale, &work[*n + 1], &c__1);
+ sscal_(&ki, &scale, &work[n2 + 1], &c__1);
+ }
+ work[j - 1 + *n] = x[0];
+ work[j + *n] = x[1];
+ work[j - 1 + n2] = x[2];
+ work[j + n2] = x[3];
+
+/* Update the right-hand side */
+
+ i__1 = j - 2;
+ r__1 = -x[0];
+ saxpy_(&i__1, &r__1, &t[(j - 1) * t_dim1 + 1], &c__1,
+ &work[*n + 1], &c__1);
+ i__1 = j - 2;
+ r__1 = -x[1];
+ saxpy_(&i__1, &r__1, &t[j * t_dim1 + 1], &c__1, &work[
+ *n + 1], &c__1);
+ i__1 = j - 2;
+ r__1 = -x[2];
+ saxpy_(&i__1, &r__1, &t[(j - 1) * t_dim1 + 1], &c__1,
+ &work[n2 + 1], &c__1);
+ i__1 = j - 2;
+ r__1 = -x[3];
+ saxpy_(&i__1, &r__1, &t[j * t_dim1 + 1], &c__1, &work[
+ n2 + 1], &c__1);
+ }
+L90:
+ ;
+ }
+
+/* Copy the vector x or Q*x to VR and normalize. */
+
+ if (! over) {
+ scopy_(&ki, &work[*n + 1], &c__1, &vr[(is - 1) * vr_dim1
+ + 1], &c__1);
+ scopy_(&ki, &work[n2 + 1], &c__1, &vr[is * vr_dim1 + 1], &
+ c__1);
+
+ emax = 0.f;
+ i__1 = ki;
+ for (k = 1; k <= i__1; ++k) {
+/* Computing MAX */
+ r__3 = emax, r__4 = (r__1 = vr[k + (is - 1) * vr_dim1]
+ , dabs(r__1)) + (r__2 = vr[k + is * vr_dim1],
+ dabs(r__2));
+ emax = dmax(r__3,r__4);
+/* L100: */
+ }
+
+ remax = 1.f / emax;
+ sscal_(&ki, &remax, &vr[(is - 1) * vr_dim1 + 1], &c__1);
+ sscal_(&ki, &remax, &vr[is * vr_dim1 + 1], &c__1);
+
+ i__1 = *n;
+ for (k = ki + 1; k <= i__1; ++k) {
+ vr[k + (is - 1) * vr_dim1] = 0.f;
+ vr[k + is * vr_dim1] = 0.f;
+/* L110: */
+ }
+
+ } else {
+
+ if (ki > 2) {
+ i__1 = ki - 2;
+ sgemv_("N", n, &i__1, &c_b22, &vr[vr_offset], ldvr, &
+ work[*n + 1], &c__1, &work[ki - 1 + *n], &vr[(
+ ki - 1) * vr_dim1 + 1], &c__1);
+ i__1 = ki - 2;
+ sgemv_("N", n, &i__1, &c_b22, &vr[vr_offset], ldvr, &
+ work[n2 + 1], &c__1, &work[ki + n2], &vr[ki *
+ vr_dim1 + 1], &c__1);
+ } else {
+ sscal_(n, &work[ki - 1 + *n], &vr[(ki - 1) * vr_dim1
+ + 1], &c__1);
+ sscal_(n, &work[ki + n2], &vr[ki * vr_dim1 + 1], &
+ c__1);
+ }
+
+ emax = 0.f;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+/* Computing MAX */
+ r__3 = emax, r__4 = (r__1 = vr[k + (ki - 1) * vr_dim1]
+ , dabs(r__1)) + (r__2 = vr[k + ki * vr_dim1],
+ dabs(r__2));
+ emax = dmax(r__3,r__4);
+/* L120: */
+ }
+ remax = 1.f / emax;
+ sscal_(n, &remax, &vr[(ki - 1) * vr_dim1 + 1], &c__1);
+ sscal_(n, &remax, &vr[ki * vr_dim1 + 1], &c__1);
+ }
+ }
+
+ --is;
+ if (ip != 0) {
+ --is;
+ }
+L130:
+ if (ip == 1) {
+ ip = 0;
+ }
+ if (ip == -1) {
+ ip = 1;
+ }
+/* L140: */
+ }
+ }
+
+ if (leftv) {
+
+/* Compute left eigenvectors. */
+
+ ip = 0;
+ is = 1;
+ i__1 = *n;
+ for (ki = 1; ki <= i__1; ++ki) {
+
+ if (ip == -1) {
+ goto L250;
+ }
+ if (ki == *n) {
+ goto L150;
+ }
+ if (t[ki + 1 + ki * t_dim1] == 0.f) {
+ goto L150;
+ }
+ ip = 1;
+
+L150:
+ if (somev) {
+ if (! select[ki]) {
+ goto L250;
+ }
+ }
+
+/* Compute the KI-th eigenvalue (WR,WI). */
+
+ wr = t[ki + ki * t_dim1];
+ wi = 0.f;
+ if (ip != 0) {
+ wi = sqrt((r__1 = t[ki + (ki + 1) * t_dim1], dabs(r__1))) *
+ sqrt((r__2 = t[ki + 1 + ki * t_dim1], dabs(r__2)));
+ }
+/* Computing MAX */
+ r__1 = ulp * (dabs(wr) + dabs(wi));
+ smin = dmax(r__1,smlnum);
+
+ if (ip == 0) {
+
+/* Real left eigenvector. */
+
+ work[ki + *n] = 1.f;
+
+/* Form right-hand side */
+
+ i__2 = *n;
+ for (k = ki + 1; k <= i__2; ++k) {
+ work[k + *n] = -t[ki + k * t_dim1];
+/* L160: */
+ }
+
+/* Solve the quasi-triangular system: */
+/* (T(KI+1:N,KI+1:N) - WR)'*X = SCALE*WORK */
+
+ vmax = 1.f;
+ vcrit = bignum;
+
+ jnxt = ki + 1;
+ i__2 = *n;
+ for (j = ki + 1; j <= i__2; ++j) {
+ if (j < jnxt) {
+ goto L170;
+ }
+ j1 = j;
+ j2 = j;
+ jnxt = j + 1;
+ if (j < *n) {
+ if (t[j + 1 + j * t_dim1] != 0.f) {
+ j2 = j + 1;
+ jnxt = j + 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/* 1-by-1 diagonal block */
+
+/* Scale if necessary to avoid overflow when forming */
+/* the right-hand side. */
+
+ if (work[j] > vcrit) {
+ rec = 1.f / vmax;
+ i__3 = *n - ki + 1;
+ sscal_(&i__3, &rec, &work[ki + *n], &c__1);
+ vmax = 1.f;
+ vcrit = bignum;
+ }
+
+ i__3 = j - ki - 1;
+ work[j + *n] -= sdot_(&i__3, &t[ki + 1 + j * t_dim1],
+ &c__1, &work[ki + 1 + *n], &c__1);
+
+/* Solve (T(J,J)-WR)'*X = WORK */
+
+ slaln2_(&c_false, &c__1, &c__1, &smin, &c_b22, &t[j +
+ j * t_dim1], ldt, &c_b22, &c_b22, &work[j + *
+ n], n, &wr, &c_b25, x, &c__2, &scale, &xnorm,
+ &ierr);
+
+/* Scale if necessary */
+
+ if (scale != 1.f) {
+ i__3 = *n - ki + 1;
+ sscal_(&i__3, &scale, &work[ki + *n], &c__1);
+ }
+ work[j + *n] = x[0];
+/* Computing MAX */
+ r__2 = (r__1 = work[j + *n], dabs(r__1));
+ vmax = dmax(r__2,vmax);
+ vcrit = bignum / vmax;
+
+ } else {
+
+/* 2-by-2 diagonal block */
+
+/* Scale if necessary to avoid overflow when forming */
+/* the right-hand side. */
+
+/* Computing MAX */
+ r__1 = work[j], r__2 = work[j + 1];
+ beta = dmax(r__1,r__2);
+ if (beta > vcrit) {
+ rec = 1.f / vmax;
+ i__3 = *n - ki + 1;
+ sscal_(&i__3, &rec, &work[ki + *n], &c__1);
+ vmax = 1.f;
+ vcrit = bignum;
+ }
+
+ i__3 = j - ki - 1;
+ work[j + *n] -= sdot_(&i__3, &t[ki + 1 + j * t_dim1],
+ &c__1, &work[ki + 1 + *n], &c__1);
+
+ i__3 = j - ki - 1;
+ work[j + 1 + *n] -= sdot_(&i__3, &t[ki + 1 + (j + 1) *
+ t_dim1], &c__1, &work[ki + 1 + *n], &c__1);
+
+/* Solve */
+/* [T(J,J)-WR T(J,J+1) ]'* X = SCALE*( WORK1 ) */
+/* [T(J+1,J) T(J+1,J+1)-WR] ( WORK2 ) */
+
+ slaln2_(&c_true, &c__2, &c__1, &smin, &c_b22, &t[j +
+ j * t_dim1], ldt, &c_b22, &c_b22, &work[j + *
+ n], n, &wr, &c_b25, x, &c__2, &scale, &xnorm,
+ &ierr);
+
+/* Scale if necessary */
+
+ if (scale != 1.f) {
+ i__3 = *n - ki + 1;
+ sscal_(&i__3, &scale, &work[ki + *n], &c__1);
+ }
+ work[j + *n] = x[0];
+ work[j + 1 + *n] = x[1];
+
+/* Computing MAX */
+ r__3 = (r__1 = work[j + *n], dabs(r__1)), r__4 = (
+ r__2 = work[j + 1 + *n], dabs(r__2)), r__3 =
+ max(r__3,r__4);
+ vmax = dmax(r__3,vmax);
+ vcrit = bignum / vmax;
+
+ }
+L170:
+ ;
+ }
+
+/* Copy the vector x or Q*x to VL and normalize. */
+
+ if (! over) {
+ i__2 = *n - ki + 1;
+ scopy_(&i__2, &work[ki + *n], &c__1, &vl[ki + is *
+ vl_dim1], &c__1);
+
+ i__2 = *n - ki + 1;
+ ii = isamax_(&i__2, &vl[ki + is * vl_dim1], &c__1) + ki -
+ 1;
+ remax = 1.f / (r__1 = vl[ii + is * vl_dim1], dabs(r__1));
+ i__2 = *n - ki + 1;
+ sscal_(&i__2, &remax, &vl[ki + is * vl_dim1], &c__1);
+
+ i__2 = ki - 1;
+ for (k = 1; k <= i__2; ++k) {
+ vl[k + is * vl_dim1] = 0.f;
+/* L180: */
+ }
+
+ } else {
+
+ if (ki < *n) {
+ i__2 = *n - ki;
+ sgemv_("N", n, &i__2, &c_b22, &vl[(ki + 1) * vl_dim1
+ + 1], ldvl, &work[ki + 1 + *n], &c__1, &work[
+ ki + *n], &vl[ki * vl_dim1 + 1], &c__1);
+ }
+
+ ii = isamax_(n, &vl[ki * vl_dim1 + 1], &c__1);
+ remax = 1.f / (r__1 = vl[ii + ki * vl_dim1], dabs(r__1));
+ sscal_(n, &remax, &vl[ki * vl_dim1 + 1], &c__1);
+
+ }
+
+ } else {
+
+/* Complex left eigenvector. */
+
+/* Initial solve: */
+/* ((T(KI,KI) T(KI,KI+1) )' - (WR - I* WI))*X = 0. */
+/* ((T(KI+1,KI) T(KI+1,KI+1)) ) */
+
+ if ((r__1 = t[ki + (ki + 1) * t_dim1], dabs(r__1)) >= (r__2 =
+ t[ki + 1 + ki * t_dim1], dabs(r__2))) {
+ work[ki + *n] = wi / t[ki + (ki + 1) * t_dim1];
+ work[ki + 1 + n2] = 1.f;
+ } else {
+ work[ki + *n] = 1.f;
+ work[ki + 1 + n2] = -wi / t[ki + 1 + ki * t_dim1];
+ }
+ work[ki + 1 + *n] = 0.f;
+ work[ki + n2] = 0.f;
+
+/* Form right-hand side */
+
+ i__2 = *n;
+ for (k = ki + 2; k <= i__2; ++k) {
+ work[k + *n] = -work[ki + *n] * t[ki + k * t_dim1];
+ work[k + n2] = -work[ki + 1 + n2] * t[ki + 1 + k * t_dim1]
+ ;
+/* L190: */
+ }
+
+/* Solve complex quasi-triangular system: */
+/* ( T(KI+2,N:KI+2,N) - (WR-i*WI) )*X = WORK1+i*WORK2 */
+
+ vmax = 1.f;
+ vcrit = bignum;
+
+ jnxt = ki + 2;
+ i__2 = *n;
+ for (j = ki + 2; j <= i__2; ++j) {
+ if (j < jnxt) {
+ goto L200;
+ }
+ j1 = j;
+ j2 = j;
+ jnxt = j + 1;
+ if (j < *n) {
+ if (t[j + 1 + j * t_dim1] != 0.f) {
+ j2 = j + 1;
+ jnxt = j + 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/* 1-by-1 diagonal block */
+
+/* Scale if necessary to avoid overflow when */
+/* forming the right-hand side elements. */
+
+ if (work[j] > vcrit) {
+ rec = 1.f / vmax;
+ i__3 = *n - ki + 1;
+ sscal_(&i__3, &rec, &work[ki + *n], &c__1);
+ i__3 = *n - ki + 1;
+ sscal_(&i__3, &rec, &work[ki + n2], &c__1);
+ vmax = 1.f;
+ vcrit = bignum;
+ }
+
+ i__3 = j - ki - 2;
+ work[j + *n] -= sdot_(&i__3, &t[ki + 2 + j * t_dim1],
+ &c__1, &work[ki + 2 + *n], &c__1);
+ i__3 = j - ki - 2;
+ work[j + n2] -= sdot_(&i__3, &t[ki + 2 + j * t_dim1],
+ &c__1, &work[ki + 2 + n2], &c__1);
+
+/* Solve (T(J,J)-(WR-i*WI))*(X11+i*X12)= WK+I*WK2 */
+
+ r__1 = -wi;
+ slaln2_(&c_false, &c__1, &c__2, &smin, &c_b22, &t[j +
+ j * t_dim1], ldt, &c_b22, &c_b22, &work[j + *
+ n], n, &wr, &r__1, x, &c__2, &scale, &xnorm, &
+ ierr);
+
+/* Scale if necessary */
+
+ if (scale != 1.f) {
+ i__3 = *n - ki + 1;
+ sscal_(&i__3, &scale, &work[ki + *n], &c__1);
+ i__3 = *n - ki + 1;
+ sscal_(&i__3, &scale, &work[ki + n2], &c__1);
+ }
+ work[j + *n] = x[0];
+ work[j + n2] = x[2];
+/* Computing MAX */
+ r__3 = (r__1 = work[j + *n], dabs(r__1)), r__4 = (
+ r__2 = work[j + n2], dabs(r__2)), r__3 = max(
+ r__3,r__4);
+ vmax = dmax(r__3,vmax);
+ vcrit = bignum / vmax;
+
+ } else {
+
+/* 2-by-2 diagonal block */
+
+/* Scale if necessary to avoid overflow when forming */
+/* the right-hand side elements. */
+
+/* Computing MAX */
+ r__1 = work[j], r__2 = work[j + 1];
+ beta = dmax(r__1,r__2);
+ if (beta > vcrit) {
+ rec = 1.f / vmax;
+ i__3 = *n - ki + 1;
+ sscal_(&i__3, &rec, &work[ki + *n], &c__1);
+ i__3 = *n - ki + 1;
+ sscal_(&i__3, &rec, &work[ki + n2], &c__1);
+ vmax = 1.f;
+ vcrit = bignum;
+ }
+
+ i__3 = j - ki - 2;
+ work[j + *n] -= sdot_(&i__3, &t[ki + 2 + j * t_dim1],
+ &c__1, &work[ki + 2 + *n], &c__1);
+
+ i__3 = j - ki - 2;
+ work[j + n2] -= sdot_(&i__3, &t[ki + 2 + j * t_dim1],
+ &c__1, &work[ki + 2 + n2], &c__1);
+
+ i__3 = j - ki - 2;
+ work[j + 1 + *n] -= sdot_(&i__3, &t[ki + 2 + (j + 1) *
+ t_dim1], &c__1, &work[ki + 2 + *n], &c__1);
+
+ i__3 = j - ki - 2;
+ work[j + 1 + n2] -= sdot_(&i__3, &t[ki + 2 + (j + 1) *
+ t_dim1], &c__1, &work[ki + 2 + n2], &c__1);
+
+/* Solve 2-by-2 complex linear equation */
+/* ([T(j,j) T(j,j+1) ]'-(wr-i*wi)*I)*X = SCALE*B */
+/* ([T(j+1,j) T(j+1,j+1)] ) */
+
+ r__1 = -wi;
+ slaln2_(&c_true, &c__2, &c__2, &smin, &c_b22, &t[j +
+ j * t_dim1], ldt, &c_b22, &c_b22, &work[j + *
+ n], n, &wr, &r__1, x, &c__2, &scale, &xnorm, &
+ ierr);
+
+/* Scale if necessary */
+
+ if (scale != 1.f) {
+ i__3 = *n - ki + 1;
+ sscal_(&i__3, &scale, &work[ki + *n], &c__1);
+ i__3 = *n - ki + 1;
+ sscal_(&i__3, &scale, &work[ki + n2], &c__1);
+ }
+ work[j + *n] = x[0];
+ work[j + n2] = x[2];
+ work[j + 1 + *n] = x[1];
+ work[j + 1 + n2] = x[3];
+/* Computing MAX */
+ r__1 = dabs(x[0]), r__2 = dabs(x[2]), r__1 = max(r__1,
+ r__2), r__2 = dabs(x[1]), r__1 = max(r__1,
+ r__2), r__2 = dabs(x[3]), r__1 = max(r__1,
+ r__2);
+ vmax = dmax(r__1,vmax);
+ vcrit = bignum / vmax;
+
+ }
+L200:
+ ;
+ }
+
+/* Copy the vector x or Q*x to VL and normalize. */
+
+ if (! over) {
+ i__2 = *n - ki + 1;
+ scopy_(&i__2, &work[ki + *n], &c__1, &vl[ki + is *
+ vl_dim1], &c__1);
+ i__2 = *n - ki + 1;
+ scopy_(&i__2, &work[ki + n2], &c__1, &vl[ki + (is + 1) *
+ vl_dim1], &c__1);
+
+ emax = 0.f;
+ i__2 = *n;
+ for (k = ki; k <= i__2; ++k) {
+/* Computing MAX */
+ r__3 = emax, r__4 = (r__1 = vl[k + is * vl_dim1],
+ dabs(r__1)) + (r__2 = vl[k + (is + 1) *
+ vl_dim1], dabs(r__2));
+ emax = dmax(r__3,r__4);
+/* L220: */
+ }
+ remax = 1.f / emax;
+ i__2 = *n - ki + 1;
+ sscal_(&i__2, &remax, &vl[ki + is * vl_dim1], &c__1);
+ i__2 = *n - ki + 1;
+ sscal_(&i__2, &remax, &vl[ki + (is + 1) * vl_dim1], &c__1)
+ ;
+
+ i__2 = ki - 1;
+ for (k = 1; k <= i__2; ++k) {
+ vl[k + is * vl_dim1] = 0.f;
+ vl[k + (is + 1) * vl_dim1] = 0.f;
+/* L230: */
+ }
+ } else {
+ if (ki < *n - 1) {
+ i__2 = *n - ki - 1;
+ sgemv_("N", n, &i__2, &c_b22, &vl[(ki + 2) * vl_dim1
+ + 1], ldvl, &work[ki + 2 + *n], &c__1, &work[
+ ki + *n], &vl[ki * vl_dim1 + 1], &c__1);
+ i__2 = *n - ki - 1;
+ sgemv_("N", n, &i__2, &c_b22, &vl[(ki + 2) * vl_dim1
+ + 1], ldvl, &work[ki + 2 + n2], &c__1, &work[
+ ki + 1 + n2], &vl[(ki + 1) * vl_dim1 + 1], &
+ c__1);
+ } else {
+ sscal_(n, &work[ki + *n], &vl[ki * vl_dim1 + 1], &
+ c__1);
+ sscal_(n, &work[ki + 1 + n2], &vl[(ki + 1) * vl_dim1
+ + 1], &c__1);
+ }
+
+ emax = 0.f;
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+/* Computing MAX */
+ r__3 = emax, r__4 = (r__1 = vl[k + ki * vl_dim1],
+ dabs(r__1)) + (r__2 = vl[k + (ki + 1) *
+ vl_dim1], dabs(r__2));
+ emax = dmax(r__3,r__4);
+/* L240: */
+ }
+ remax = 1.f / emax;
+ sscal_(n, &remax, &vl[ki * vl_dim1 + 1], &c__1);
+ sscal_(n, &remax, &vl[(ki + 1) * vl_dim1 + 1], &c__1);
+
+ }
+
+ }
+
+ ++is;
+ if (ip != 0) {
+ ++is;
+ }
+L250:
+ if (ip == -1) {
+ ip = 0;
+ }
+ if (ip == 1) {
+ ip = -1;
+ }
+
+/* L260: */
+ }
+
+ }
+
+ return 0;
+
+/* End of STREVC */
+
+} /* strevc_ */
diff --git a/contrib/libs/clapack/strexc.c b/contrib/libs/clapack/strexc.c
new file mode 100644
index 0000000000..cc8c88f64b
--- /dev/null
+++ b/contrib/libs/clapack/strexc.c
@@ -0,0 +1,403 @@
+/* strexc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__2 = 2;
+
+/* Subroutine */ int strexc_(char *compq, integer *n, real *t, integer *ldt,
+ real *q, integer *ldq, integer *ifst, integer *ilst, real *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, t_dim1, t_offset, i__1;
+
+ /* Local variables */
+ integer nbf, nbl, here;
+ extern logical lsame_(char *, char *);
+ logical wantq;
+ extern /* Subroutine */ int xerbla_(char *, integer *), slaexc_(
+ logical *, integer *, real *, integer *, real *, integer *,
+ integer *, integer *, integer *, real *, integer *);
+ integer nbnext;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STREXC reorders the real Schur factorization of a real matrix */
+/* A = Q*T*Q**T, so that the diagonal block of T with row index IFST is */
+/* moved to row ILST. */
+
+/* The real Schur form T is reordered by an orthogonal similarity */
+/* transformation Z**T*T*Z, and optionally the matrix Q of Schur vectors */
+/* is updated by postmultiplying it with Z. */
+
+/* T must be in Schur canonical form (as returned by SHSEQR), that is, */
+/* block upper triangular with 1-by-1 and 2-by-2 diagonal blocks; each */
+/* 2-by-2 diagonal block has its diagonal elements equal and its */
+/* off-diagonal elements of opposite sign. */
+
+/* Arguments */
+/* ========= */
+
+/* COMPQ (input) CHARACTER*1 */
+/* = 'V': update the matrix Q of Schur vectors; */
+/* = 'N': do not update Q. */
+
+/* N (input) INTEGER */
+/* The order of the matrix T. N >= 0. */
+
+/* T (input/output) REAL array, dimension (LDT,N) */
+/* On entry, the upper quasi-triangular matrix T, in Schur */
+/* Schur canonical form. */
+/* On exit, the reordered upper quasi-triangular matrix, again */
+/* in Schur canonical form. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= max(1,N). */
+
+/* Q (input/output) REAL array, dimension (LDQ,N) */
+/* On entry, if COMPQ = 'V', the matrix Q of Schur vectors. */
+/* On exit, if COMPQ = 'V', Q has been postmultiplied by the */
+/* orthogonal transformation matrix Z which reorders T. */
+/* If COMPQ = 'N', Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N). */
+
+/* IFST (input/output) INTEGER */
+/* ILST (input/output) INTEGER */
+/* Specify the reordering of the diagonal blocks of T. */
+/* The block with row index IFST is moved to row ILST, by a */
+/* sequence of transpositions between adjacent blocks. */
+/* On exit, if IFST pointed on entry to the second row of a */
+/* 2-by-2 block, it is changed to point to the first row; ILST */
+/* always points to the first row of the block in its final */
+/* position (which may differ from its input value by +1 or -1). */
+/* 1 <= IFST <= N; 1 <= ILST <= N. */
+
+/* WORK (workspace) REAL array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* = 1: two adjacent blocks were too close to swap (the problem */
+/* is very ill-conditioned); T may have been partially */
+/* reordered, and ILST points to the first row of the */
+/* current position of the block being moved. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input arguments. */
+
+ /* Parameter adjustments */
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ wantq = lsame_(compq, "V");
+ if (! wantq && ! lsame_(compq, "N")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldt < max(1,*n)) {
+ *info = -4;
+ } else if (*ldq < 1 || wantq && *ldq < max(1,*n)) {
+ *info = -6;
+ } else if (*ifst < 1 || *ifst > *n) {
+ *info = -7;
+ } else if (*ilst < 1 || *ilst > *n) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STREXC", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 1) {
+ return 0;
+ }
+
+/* Determine the first row of specified block */
+/* and find out it is 1 by 1 or 2 by 2. */
+
+ if (*ifst > 1) {
+ if (t[*ifst + (*ifst - 1) * t_dim1] != 0.f) {
+ --(*ifst);
+ }
+ }
+ nbf = 1;
+ if (*ifst < *n) {
+ if (t[*ifst + 1 + *ifst * t_dim1] != 0.f) {
+ nbf = 2;
+ }
+ }
+
+/* Determine the first row of the final block */
+/* and find out it is 1 by 1 or 2 by 2. */
+
+ if (*ilst > 1) {
+ if (t[*ilst + (*ilst - 1) * t_dim1] != 0.f) {
+ --(*ilst);
+ }
+ }
+ nbl = 1;
+ if (*ilst < *n) {
+ if (t[*ilst + 1 + *ilst * t_dim1] != 0.f) {
+ nbl = 2;
+ }
+ }
+
+ if (*ifst == *ilst) {
+ return 0;
+ }
+
+ if (*ifst < *ilst) {
+
+/* Update ILST */
+
+ if (nbf == 2 && nbl == 1) {
+ --(*ilst);
+ }
+ if (nbf == 1 && nbl == 2) {
+ ++(*ilst);
+ }
+
+ here = *ifst;
+
+L10:
+
+/* Swap block with next one below */
+
+ if (nbf == 1 || nbf == 2) {
+
+/* Current block either 1 by 1 or 2 by 2 */
+
+ nbnext = 1;
+ if (here + nbf + 1 <= *n) {
+ if (t[here + nbf + 1 + (here + nbf) * t_dim1] != 0.f) {
+ nbnext = 2;
+ }
+ }
+ slaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &here, &
+ nbf, &nbnext, &work[1], info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ here += nbnext;
+
+/* Test if 2 by 2 block breaks into two 1 by 1 blocks */
+
+ if (nbf == 2) {
+ if (t[here + 1 + here * t_dim1] == 0.f) {
+ nbf = 3;
+ }
+ }
+
+ } else {
+
+/* Current block consists of two 1 by 1 blocks each of which */
+/* must be swapped individually */
+
+ nbnext = 1;
+ if (here + 3 <= *n) {
+ if (t[here + 3 + (here + 2) * t_dim1] != 0.f) {
+ nbnext = 2;
+ }
+ }
+ i__1 = here + 1;
+ slaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &i__1, &
+ c__1, &nbnext, &work[1], info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ if (nbnext == 1) {
+
+/* Swap two 1 by 1 blocks, no problems possible */
+
+ slaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &
+ here, &c__1, &nbnext, &work[1], info);
+ ++here;
+ } else {
+
+/* Recompute NBNEXT in case 2 by 2 split */
+
+ if (t[here + 2 + (here + 1) * t_dim1] == 0.f) {
+ nbnext = 1;
+ }
+ if (nbnext == 2) {
+
+/* 2 by 2 Block did not split */
+
+ slaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &
+ here, &c__1, &nbnext, &work[1], info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ here += 2;
+ } else {
+
+/* 2 by 2 Block did split */
+
+ slaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &
+ here, &c__1, &c__1, &work[1], info);
+ i__1 = here + 1;
+ slaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &
+ i__1, &c__1, &c__1, &work[1], info);
+ here += 2;
+ }
+ }
+ }
+ if (here < *ilst) {
+ goto L10;
+ }
+
+ } else {
+
+ here = *ifst;
+L20:
+
+/* Swap block with next one above */
+
+ if (nbf == 1 || nbf == 2) {
+
+/* Current block either 1 by 1 or 2 by 2 */
+
+ nbnext = 1;
+ if (here >= 3) {
+ if (t[here - 1 + (here - 2) * t_dim1] != 0.f) {
+ nbnext = 2;
+ }
+ }
+ i__1 = here - nbnext;
+ slaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &i__1, &
+ nbnext, &nbf, &work[1], info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ here -= nbnext;
+
+/* Test if 2 by 2 block breaks into two 1 by 1 blocks */
+
+ if (nbf == 2) {
+ if (t[here + 1 + here * t_dim1] == 0.f) {
+ nbf = 3;
+ }
+ }
+
+ } else {
+
+/* Current block consists of two 1 by 1 blocks each of which */
+/* must be swapped individually */
+
+ nbnext = 1;
+ if (here >= 3) {
+ if (t[here - 1 + (here - 2) * t_dim1] != 0.f) {
+ nbnext = 2;
+ }
+ }
+ i__1 = here - nbnext;
+ slaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &i__1, &
+ nbnext, &c__1, &work[1], info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ if (nbnext == 1) {
+
+/* Swap two 1 by 1 blocks, no problems possible */
+
+ slaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &
+ here, &nbnext, &c__1, &work[1], info);
+ --here;
+ } else {
+
+/* Recompute NBNEXT in case 2 by 2 split */
+
+ if (t[here + (here - 1) * t_dim1] == 0.f) {
+ nbnext = 1;
+ }
+ if (nbnext == 2) {
+
+/* 2 by 2 Block did not split */
+
+ i__1 = here - 1;
+ slaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &
+ i__1, &c__2, &c__1, &work[1], info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ here += -2;
+ } else {
+
+/* 2 by 2 Block did split */
+
+ slaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &
+ here, &c__1, &c__1, &work[1], info);
+ i__1 = here - 1;
+ slaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &
+ i__1, &c__1, &c__1, &work[1], info);
+ here += -2;
+ }
+ }
+ }
+ if (here > *ilst) {
+ goto L20;
+ }
+ }
+ *ilst = here;
+
+ return 0;
+
+/* End of STREXC */
+
+} /* strexc_ */
diff --git a/contrib/libs/clapack/strrfs.c b/contrib/libs/clapack/strrfs.c
new file mode 100644
index 0000000000..b914b0ec17
--- /dev/null
+++ b/contrib/libs/clapack/strrfs.c
@@ -0,0 +1,492 @@
+/* strrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b19 = -1.f;
+
+/* Subroutine */ int strrfs_(char *uplo, char *trans, char *diag, integer *n,
+ integer *nrhs, real *a, integer *lda, real *b, integer *ldb, real *x,
+ integer *ldx, real *ferr, real *berr, real *work, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, x_dim1, x_offset, i__1, i__2,
+ i__3;
+ real r__1, r__2, r__3;
+
+ /* Local variables */
+ integer i__, j, k;
+ real s, xk;
+ integer nz;
+ real eps;
+ integer kase;
+ real safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical upper;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), saxpy_(integer *, real *, real *, integer *, real *,
+ integer *), strmv_(char *, char *, char *, integer *, real *,
+ integer *, real *, integer *), strsv_(
+ char *, char *, char *, integer *, real *, integer *, real *,
+ integer *), slacn2_(integer *, real *,
+ real *, integer *, real *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+ char transt[1];
+ logical nounit;
+ real lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call SLACN2 in place of SLACON, 7 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STRRFS provides error bounds and backward error estimates for the */
+/* solution to a system of linear equations with a triangular */
+/* coefficient matrix. */
+
+/* The solution matrix X must be computed by STRTRS or some other */
+/* means before entering this routine. STRRFS does not do iterative */
+/* refinement because doing so cannot improve the backward error. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose = Transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The triangular matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of the array A contains the upper */
+/* triangular matrix, and the strictly lower triangular part of */
+/* A is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of the array A contains the lower triangular */
+/* matrix, and the strictly upper triangular part of A is not */
+/* referenced. If DIAG = 'U', the diagonal elements of A are */
+/* also not referenced and are assumed to be 1. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input) REAL array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input) REAL array, dimension (LDX,NRHS) */
+/* The solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) REAL array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) REAL array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) REAL array, dimension (3*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldx < max(1,*n)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STRRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.f;
+ berr[j] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = slamch_("Epsilon");
+ safmin = slamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute residual R = B - op(A) * X, */
+/* where op(A) = A or A', depending on TRANS. */
+
+ scopy_(n, &x[j * x_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+ strmv_(uplo, trans, diag, n, &a[a_offset], lda, &work[*n + 1], &c__1);
+ saxpy_(n, &c_b19, &b[j * b_dim1 + 1], &c__1, &work[*n + 1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] = (r__1 = b[i__ + j * b_dim1], dabs(r__1));
+/* L20: */
+ }
+
+ if (notran) {
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (r__1 = x[k + j * x_dim1], dabs(r__1));
+ i__3 = k;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ work[i__] += (r__1 = a[i__ + k * a_dim1], dabs(
+ r__1)) * xk;
+/* L30: */
+ }
+/* L40: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (r__1 = x[k + j * x_dim1], dabs(r__1));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ work[i__] += (r__1 = a[i__ + k * a_dim1], dabs(
+ r__1)) * xk;
+/* L50: */
+ }
+ work[k] += xk;
+/* L60: */
+ }
+ }
+ } else {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (r__1 = x[k + j * x_dim1], dabs(r__1));
+ i__3 = *n;
+ for (i__ = k; i__ <= i__3; ++i__) {
+ work[i__] += (r__1 = a[i__ + k * a_dim1], dabs(
+ r__1)) * xk;
+/* L70: */
+ }
+/* L80: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ xk = (r__1 = x[k + j * x_dim1], dabs(r__1));
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ work[i__] += (r__1 = a[i__ + k * a_dim1], dabs(
+ r__1)) * xk;
+/* L90: */
+ }
+ work[k] += xk;
+/* L100: */
+ }
+ }
+ }
+ } else {
+
+/* Compute abs(A')*abs(X) + abs(B). */
+
+ if (upper) {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ i__3 = k;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ s += (r__1 = a[i__ + k * a_dim1], dabs(r__1)) * (
+ r__2 = x[i__ + j * x_dim1], dabs(r__2));
+/* L110: */
+ }
+ work[k] += s;
+/* L120: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = (r__1 = x[k + j * x_dim1], dabs(r__1));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ s += (r__1 = a[i__ + k * a_dim1], dabs(r__1)) * (
+ r__2 = x[i__ + j * x_dim1], dabs(r__2));
+/* L130: */
+ }
+ work[k] += s;
+/* L140: */
+ }
+ }
+ } else {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.f;
+ i__3 = *n;
+ for (i__ = k; i__ <= i__3; ++i__) {
+ s += (r__1 = a[i__ + k * a_dim1], dabs(r__1)) * (
+ r__2 = x[i__ + j * x_dim1], dabs(r__2));
+/* L150: */
+ }
+ work[k] += s;
+/* L160: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = (r__1 = x[k + j * x_dim1], dabs(r__1));
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ s += (r__1 = a[i__ + k * a_dim1], dabs(r__1)) * (
+ r__2 = x[i__ + j * x_dim1], dabs(r__2));
+/* L170: */
+ }
+ work[k] += s;
+/* L180: */
+ }
+ }
+ }
+ }
+ s = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+/* Computing MAX */
+ r__2 = s, r__3 = (r__1 = work[*n + i__], dabs(r__1)) / work[
+ i__];
+ s = dmax(r__2,r__3);
+ } else {
+/* Computing MAX */
+ r__2 = s, r__3 = ((r__1 = work[*n + i__], dabs(r__1)) + safe1)
+ / (work[i__] + safe1);
+ s = dmax(r__2,r__3);
+ }
+/* L190: */
+ }
+ berr[j] = s;
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(op(A)))* */
+/* ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(op(A)) is the inverse of op(A) */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(op(A))*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use SLACN2 to estimate the infinity-norm of the matrix */
+/* inv(op(A)) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (work[i__] > safe2) {
+ work[i__] = (r__1 = work[*n + i__], dabs(r__1)) + nz * eps *
+ work[i__];
+ } else {
+ work[i__] = (r__1 = work[*n + i__], dabs(r__1)) + nz * eps *
+ work[i__] + safe1;
+ }
+/* L200: */
+ }
+
+ kase = 0;
+L210:
+ slacn2_(n, &work[(*n << 1) + 1], &work[*n + 1], &iwork[1], &ferr[j], &
+ kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(op(A)'). */
+
+ strsv_(uplo, transt, diag, n, &a[a_offset], lda, &work[*n + 1]
+, &c__1);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L220: */
+ }
+ } else {
+
+/* Multiply by inv(op(A))*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[*n + i__] = work[i__] * work[*n + i__];
+/* L230: */
+ }
+ strsv_(uplo, trans, diag, n, &a[a_offset], lda, &work[*n + 1],
+ &c__1);
+ }
+ goto L210;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.f;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = lstres, r__3 = (r__1 = x[i__ + j * x_dim1], dabs(r__1));
+ lstres = dmax(r__2,r__3);
+/* L240: */
+ }
+ if (lstres != 0.f) {
+ ferr[j] /= lstres;
+ }
+
+/* L250: */
+ }
+
+ return 0;
+
+/* End of STRRFS */
+
+} /* strrfs_ */
diff --git a/contrib/libs/clapack/strsen.c b/contrib/libs/clapack/strsen.c
new file mode 100644
index 0000000000..8698273051
--- /dev/null
+++ b/contrib/libs/clapack/strsen.c
@@ -0,0 +1,530 @@
+/* strsen.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c_n1 = -1;
+
+/* Subroutine */ int strsen_(char *job, char *compq, logical *select, integer
+ *n, real *t, integer *ldt, real *q, integer *ldq, real *wr, real *wi,
+ integer *m, real *s, real *sep, real *work, integer *lwork, integer *
+ iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, t_dim1, t_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer k, n1, n2, kk, nn, ks;
+ real est;
+ integer kase;
+ logical pair;
+ integer ierr;
+ logical swap;
+ real scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3], lwmin;
+ logical wantq, wants;
+ real rnorm;
+ extern /* Subroutine */ int slacn2_(integer *, real *, real *, integer *,
+ real *, integer *, integer *);
+ extern doublereal slange_(char *, integer *, integer *, real *, integer *,
+ real *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical wantbh;
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *);
+ integer liwmin;
+ extern /* Subroutine */ int strexc_(char *, integer *, real *, integer *,
+ real *, integer *, integer *, integer *, real *, integer *);
+ logical wantsp, lquery;
+ extern /* Subroutine */ int strsyl_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, integer *, real *, integer *
+, real *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call SLACN2 in place of SLACON, 7 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STRSEN reorders the real Schur factorization of a real matrix */
+/* A = Q*T*Q**T, so that a selected cluster of eigenvalues appears in */
+/* the leading diagonal blocks of the upper quasi-triangular matrix T, */
+/* and the leading columns of Q form an orthonormal basis of the */
+/* corresponding right invariant subspace. */
+
+/* Optionally the routine computes the reciprocal condition numbers of */
+/* the cluster of eigenvalues and/or the invariant subspace. */
+
+/* T must be in Schur canonical form (as returned by SHSEQR), that is, */
+/* block upper triangular with 1-by-1 and 2-by-2 diagonal blocks; each */
+/* 2-by-2 diagonal block has its diagonal elemnts equal and its */
+/* off-diagonal elements of opposite sign. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies whether condition numbers are required for the */
+/* cluster of eigenvalues (S) or the invariant subspace (SEP): */
+/* = 'N': none; */
+/* = 'E': for eigenvalues only (S); */
+/* = 'V': for invariant subspace only (SEP); */
+/* = 'B': for both eigenvalues and invariant subspace (S and */
+/* SEP). */
+
+/* COMPQ (input) CHARACTER*1 */
+/* = 'V': update the matrix Q of Schur vectors; */
+/* = 'N': do not update Q. */
+
+/* SELECT (input) LOGICAL array, dimension (N) */
+/* SELECT specifies the eigenvalues in the selected cluster. To */
+/* select a real eigenvalue w(j), SELECT(j) must be set to */
+/* .TRUE.. To select a complex conjugate pair of eigenvalues */
+/* w(j) and w(j+1), corresponding to a 2-by-2 diagonal block, */
+/* either SELECT(j) or SELECT(j+1) or both must be set to */
+/* .TRUE.; a complex conjugate pair of eigenvalues must be */
+/* either both included in the cluster or both excluded. */
+
+/* N (input) INTEGER */
+/* The order of the matrix T. N >= 0. */
+
+/* T (input/output) REAL array, dimension (LDT,N) */
+/* On entry, the upper quasi-triangular matrix T, in Schur */
+/* canonical form. */
+/* On exit, T is overwritten by the reordered matrix T, again in */
+/* Schur canonical form, with the selected eigenvalues in the */
+/* leading diagonal blocks. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= max(1,N). */
+
+/* Q (input/output) REAL array, dimension (LDQ,N) */
+/* On entry, if COMPQ = 'V', the matrix Q of Schur vectors. */
+/* On exit, if COMPQ = 'V', Q has been postmultiplied by the */
+/* orthogonal transformation matrix which reorders T; the */
+/* leading M columns of Q form an orthonormal basis for the */
+/* specified invariant subspace. */
+/* If COMPQ = 'N', Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. */
+/* LDQ >= 1; and if COMPQ = 'V', LDQ >= N. */
+
+/* WR (output) REAL array, dimension (N) */
+/* WI (output) REAL array, dimension (N) */
+/* The real and imaginary parts, respectively, of the reordered */
+/* eigenvalues of T. The eigenvalues are stored in the same */
+/* order as on the diagonal of T, with WR(i) = T(i,i) and, if */
+/* T(i:i+1,i:i+1) is a 2-by-2 diagonal block, WI(i) > 0 and */
+/* WI(i+1) = -WI(i). Note that if a complex eigenvalue is */
+/* sufficiently ill-conditioned, then its value may differ */
+/* significantly from its value before reordering. */
+
+/* M (output) INTEGER */
+/* The dimension of the specified invariant subspace. */
+/* 0 < = M <= N. */
+
+/* S (output) REAL */
+/* If JOB = 'E' or 'B', S is a lower bound on the reciprocal */
+/* condition number for the selected cluster of eigenvalues. */
+/* S cannot underestimate the true reciprocal condition number */
+/* by more than a factor of sqrt(N). If M = 0 or N, S = 1. */
+/* If JOB = 'N' or 'V', S is not referenced. */
+
+/* SEP (output) REAL */
+/* If JOB = 'V' or 'B', SEP is the estimated reciprocal */
+/* condition number of the specified invariant subspace. If */
+/* M = 0 or N, SEP = norm(T). */
+/* If JOB = 'N' or 'E', SEP is not referenced. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If JOB = 'N', LWORK >= max(1,N); */
+/* if JOB = 'E', LWORK >= max(1,M*(N-M)); */
+/* if JOB = 'V' or 'B', LWORK >= max(1,2*M*(N-M)). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* If JOB = 'N' or 'E', LIWORK >= 1; */
+/* if JOB = 'V' or 'B', LIWORK >= max(1,M*(N-M)). */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal size of the IWORK array, */
+/* returns this value as the first entry of the IWORK array, and */
+/* no error message related to LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* = 1: reordering of T failed because some eigenvalues are too */
+/* close to separate (the problem is very ill-conditioned); */
+/* T may have been partially reordered, and WR and WI */
+/* contain the eigenvalues in the same order as in T; S and */
+/* SEP (if requested) are set to zero. */
+
+/* Further Details */
+/* =============== */
+
+/* STRSEN first collects the selected eigenvalues by computing an */
+/* orthogonal transformation Z to move them to the top left corner of T. */
+/* In other words, the selected eigenvalues are the eigenvalues of T11 */
+/* in: */
+
+/* Z'*T*Z = ( T11 T12 ) n1 */
+/* ( 0 T22 ) n2 */
+/* n1 n2 */
+
+/* where N = n1+n2 and Z' means the transpose of Z. The first n1 columns */
+/* of Z span the specified invariant subspace of T. */
+
+/* If T has been obtained from the real Schur factorization of a matrix */
+/* A = Q*T*Q', then the reordered real Schur factorization of A is given */
+/* by A = (Q*Z)*(Z'*T*Z)*(Q*Z)', and the first n1 columns of Q*Z span */
+/* the corresponding invariant subspace of A. */
+
+/* The reciprocal condition number of the average of the eigenvalues of */
+/* T11 may be returned in S. S lies between 0 (very badly conditioned) */
+/* and 1 (very well conditioned). It is computed as follows. First we */
+/* compute R so that */
+
+/* P = ( I R ) n1 */
+/* ( 0 0 ) n2 */
+/* n1 n2 */
+
+/* is the projector on the invariant subspace associated with T11. */
+/* R is the solution of the Sylvester equation: */
+
+/* T11*R - R*T22 = T12. */
+
+/* Let F-norm(M) denote the Frobenius-norm of M and 2-norm(M) denote */
+/* the two-norm of M. Then S is computed as the lower bound */
+
+/* (1 + F-norm(R)**2)**(-1/2) */
+
+/* on the reciprocal of 2-norm(P), the true reciprocal condition number. */
+/* S cannot underestimate 1 / 2-norm(P) by more than a factor of */
+/* sqrt(N). */
+
+/* An approximate error bound for the computed average of the */
+/* eigenvalues of T11 is */
+
+/* EPS * norm(T) / S */
+
+/* where EPS is the machine precision. */
+
+/* The reciprocal condition number of the right invariant subspace */
+/* spanned by the first n1 columns of Z (or of Q*Z) is returned in SEP. */
+/* SEP is defined as the separation of T11 and T22: */
+
+/* sep( T11, T22 ) = sigma-min( C ) */
+
+/* where sigma-min(C) is the smallest singular value of the */
+/* n1*n2-by-n1*n2 matrix */
+
+/* C = kprod( I(n2), T11 ) - kprod( transpose(T22), I(n1) ) */
+
+/* I(m) is an m by m identity matrix, and kprod denotes the Kronecker */
+/* product. We estimate sigma-min(C) by the reciprocal of an estimate of */
+/* the 1-norm of inverse(C). The true reciprocal 1-norm of inverse(C) */
+/* cannot differ from sigma-min(C) by more than a factor of sqrt(n1*n2). */
+
+/* When SEP is small, small changes in T can cause large changes in */
+/* the invariant subspace. An approximate bound on the maximum angular */
+/* error in the computed right invariant subspace is */
+
+/* EPS * norm(T) / SEP */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters */
+
+ /* Parameter adjustments */
+ --select;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --wr;
+ --wi;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantbh = lsame_(job, "B");
+ wants = lsame_(job, "E") || wantbh;
+ wantsp = lsame_(job, "V") || wantbh;
+ wantq = lsame_(compq, "V");
+
+ *info = 0;
+ lquery = *lwork == -1;
+ if (! lsame_(job, "N") && ! wants && ! wantsp) {
+ *info = -1;
+ } else if (! lsame_(compq, "N") && ! wantq) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ldt < max(1,*n)) {
+ *info = -6;
+ } else if (*ldq < 1 || wantq && *ldq < *n) {
+ *info = -8;
+ } else {
+
+/* Set M to the dimension of the specified invariant subspace, */
+/* and test LWORK and LIWORK. */
+
+ *m = 0;
+ pair = FALSE_;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (pair) {
+ pair = FALSE_;
+ } else {
+ if (k < *n) {
+ if (t[k + 1 + k * t_dim1] == 0.f) {
+ if (select[k]) {
+ ++(*m);
+ }
+ } else {
+ pair = TRUE_;
+ if (select[k] || select[k + 1]) {
+ *m += 2;
+ }
+ }
+ } else {
+ if (select[*n]) {
+ ++(*m);
+ }
+ }
+ }
+/* L10: */
+ }
+
+ n1 = *m;
+ n2 = *n - *m;
+ nn = n1 * n2;
+
+ if (wantsp) {
+/* Computing MAX */
+ i__1 = 1, i__2 = nn << 1;
+ lwmin = max(i__1,i__2);
+ liwmin = max(1,nn);
+ } else if (lsame_(job, "N")) {
+ lwmin = max(1,*n);
+ liwmin = 1;
+ } else if (lsame_(job, "E")) {
+ lwmin = max(1,nn);
+ liwmin = 1;
+ }
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -15;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -17;
+ }
+ }
+
+ if (*info == 0) {
+ work[1] = (real) lwmin;
+ iwork[1] = liwmin;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STRSEN", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == *n || *m == 0) {
+ if (wants) {
+ *s = 1.f;
+ }
+ if (wantsp) {
+ *sep = slange_("1", n, n, &t[t_offset], ldt, &work[1]);
+ }
+ goto L40;
+ }
+
+/* Collect the selected blocks at the top-left corner of T. */
+
+ ks = 0;
+ pair = FALSE_;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (pair) {
+ pair = FALSE_;
+ } else {
+ swap = select[k];
+ if (k < *n) {
+ if (t[k + 1 + k * t_dim1] != 0.f) {
+ pair = TRUE_;
+ swap = swap || select[k + 1];
+ }
+ }
+ if (swap) {
+ ++ks;
+
+/* Swap the K-th block to position KS. */
+
+ ierr = 0;
+ kk = k;
+ if (k != ks) {
+ strexc_(compq, n, &t[t_offset], ldt, &q[q_offset], ldq, &
+ kk, &ks, &work[1], &ierr);
+ }
+ if (ierr == 1 || ierr == 2) {
+
+/* Blocks too close to swap: exit. */
+
+ *info = 1;
+ if (wants) {
+ *s = 0.f;
+ }
+ if (wantsp) {
+ *sep = 0.f;
+ }
+ goto L40;
+ }
+ if (pair) {
+ ++ks;
+ }
+ }
+ }
+/* L20: */
+ }
+
+ if (wants) {
+
+/* Solve Sylvester equation for R: */
+
+/* T11*R - R*T22 = scale*T12 */
+
+ slacpy_("F", &n1, &n2, &t[(n1 + 1) * t_dim1 + 1], ldt, &work[1], &n1);
+ strsyl_("N", "N", &c_n1, &n1, &n2, &t[t_offset], ldt, &t[n1 + 1 + (n1
+ + 1) * t_dim1], ldt, &work[1], &n1, &scale, &ierr);
+
+/* Estimate the reciprocal of the condition number of the cluster */
+/* of eigenvalues. */
+
+ rnorm = slange_("F", &n1, &n2, &work[1], &n1, &work[1]);
+ if (rnorm == 0.f) {
+ *s = 1.f;
+ } else {
+ *s = scale / (sqrt(scale * scale / rnorm + rnorm) * sqrt(rnorm));
+ }
+ }
+
+ if (wantsp) {
+
+/* Estimate sep(T11,T22). */
+
+ est = 0.f;
+ kase = 0;
+L30:
+ slacn2_(&nn, &work[nn + 1], &work[1], &iwork[1], &est, &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Solve T11*R - R*T22 = scale*X. */
+
+ strsyl_("N", "N", &c_n1, &n1, &n2, &t[t_offset], ldt, &t[n1 +
+ 1 + (n1 + 1) * t_dim1], ldt, &work[1], &n1, &scale, &
+ ierr);
+ } else {
+
+/* Solve T11'*R - R*T22' = scale*X. */
+
+ strsyl_("T", "T", &c_n1, &n1, &n2, &t[t_offset], ldt, &t[n1 +
+ 1 + (n1 + 1) * t_dim1], ldt, &work[1], &n1, &scale, &
+ ierr);
+ }
+ goto L30;
+ }
+
+ *sep = scale / est;
+ }
+
+L40:
+
+/* Store the output eigenvalues in WR and WI. */
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ wr[k] = t[k + k * t_dim1];
+ wi[k] = 0.f;
+/* L50: */
+ }
+ i__1 = *n - 1;
+ for (k = 1; k <= i__1; ++k) {
+ if (t[k + 1 + k * t_dim1] != 0.f) {
+ wi[k] = sqrt((r__1 = t[k + (k + 1) * t_dim1], dabs(r__1))) * sqrt(
+ (r__2 = t[k + 1 + k * t_dim1], dabs(r__2)));
+ wi[k + 1] = -wi[k];
+ }
+/* L60: */
+ }
+
+ work[1] = (real) lwmin;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of STRSEN */
+
+} /* strsen_ */
diff --git a/contrib/libs/clapack/strsna.c b/contrib/libs/clapack/strsna.c
new file mode 100644
index 0000000000..9631f4c94a
--- /dev/null
+++ b/contrib/libs/clapack/strsna.c
@@ -0,0 +1,603 @@
+/* strsna.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static logical c_true = TRUE_;
+static logical c_false = FALSE_;
+
+/* Subroutine */ int strsna_(char *job, char *howmny, logical *select,
+ integer *n, real *t, integer *ldt, real *vl, integer *ldvl, real *vr,
+ integer *ldvr, real *s, real *sep, integer *mm, integer *m, real *
+ work, integer *ldwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, vl_dim1, vl_offset, vr_dim1, vr_offset,
+ work_dim1, work_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, n2;
+ real cs;
+ integer nn, ks;
+ real sn, mu, eps, est;
+ integer kase;
+ real cond;
+ logical pair;
+ integer ierr;
+ real dumm, prod;
+ integer ifst;
+ real lnrm;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ integer ilst;
+ real rnrm, prod1, prod2;
+ extern doublereal snrm2_(integer *, real *, integer *);
+ real scale, delta;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical wants;
+ real dummy[1];
+ extern /* Subroutine */ int slacn2_(integer *, real *, real *, integer *,
+ real *, integer *, integer *);
+ extern doublereal slapy2_(real *, real *);
+ extern /* Subroutine */ int slabad_(real *, real *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ logical wantbh;
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *);
+ logical somcon;
+ extern /* Subroutine */ int slaqtr_(logical *, logical *, integer *, real
+ *, integer *, real *, real *, real *, real *, real *, integer *),
+ strexc_(char *, integer *, real *, integer *, real *, integer *,
+ integer *, integer *, real *, integer *);
+ real smlnum;
+ logical wantsp;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call SLACN2 in place of SLACON, 7 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STRSNA estimates reciprocal condition numbers for specified */
+/* eigenvalues and/or right eigenvectors of a real upper */
+/* quasi-triangular matrix T (or of any matrix Q*T*Q**T with Q */
+/* orthogonal). */
+
+/* T must be in Schur canonical form (as returned by SHSEQR), that is, */
+/* block upper triangular with 1-by-1 and 2-by-2 diagonal blocks; each */
+/* 2-by-2 diagonal block has its diagonal elements equal and its */
+/* off-diagonal elements of opposite sign. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies whether condition numbers are required for */
+/* eigenvalues (S) or eigenvectors (SEP): */
+/* = 'E': for eigenvalues only (S); */
+/* = 'V': for eigenvectors only (SEP); */
+/* = 'B': for both eigenvalues and eigenvectors (S and SEP). */
+
+/* HOWMNY (input) CHARACTER*1 */
+/* = 'A': compute condition numbers for all eigenpairs; */
+/* = 'S': compute condition numbers for selected eigenpairs */
+/* specified by the array SELECT. */
+
+/* SELECT (input) LOGICAL array, dimension (N) */
+/* If HOWMNY = 'S', SELECT specifies the eigenpairs for which */
+/* condition numbers are required. To select condition numbers */
+/* for the eigenpair corresponding to a real eigenvalue w(j), */
+/* SELECT(j) must be set to .TRUE.. To select condition numbers */
+/* corresponding to a complex conjugate pair of eigenvalues w(j) */
+/* and w(j+1), either SELECT(j) or SELECT(j+1) or both, must be */
+/* set to .TRUE.. */
+/* If HOWMNY = 'A', SELECT is not referenced. */
+
+/* N (input) INTEGER */
+/* The order of the matrix T. N >= 0. */
+
+/* T (input) REAL array, dimension (LDT,N) */
+/* The upper quasi-triangular matrix T, in Schur canonical form. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= max(1,N). */
+
+/* VL (input) REAL array, dimension (LDVL,M) */
+/* If JOB = 'E' or 'B', VL must contain left eigenvectors of T */
+/* (or of any Q*T*Q**T with Q orthogonal), corresponding to the */
+/* eigenpairs specified by HOWMNY and SELECT. The eigenvectors */
+/* must be stored in consecutive columns of VL, as returned by */
+/* SHSEIN or STREVC. */
+/* If JOB = 'V', VL is not referenced. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the array VL. */
+/* LDVL >= 1; and if JOB = 'E' or 'B', LDVL >= N. */
+
+/* VR (input) REAL array, dimension (LDVR,M) */
+/* If JOB = 'E' or 'B', VR must contain right eigenvectors of T */
+/* (or of any Q*T*Q**T with Q orthogonal), corresponding to the */
+/* eigenpairs specified by HOWMNY and SELECT. The eigenvectors */
+/* must be stored in consecutive columns of VR, as returned by */
+/* SHSEIN or STREVC. */
+/* If JOB = 'V', VR is not referenced. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. */
+/* LDVR >= 1; and if JOB = 'E' or 'B', LDVR >= N. */
+
+/* S (output) REAL array, dimension (MM) */
+/* If JOB = 'E' or 'B', the reciprocal condition numbers of the */
+/* selected eigenvalues, stored in consecutive elements of the */
+/* array. For a complex conjugate pair of eigenvalues two */
+/* consecutive elements of S are set to the same value. Thus */
+/* S(j), SEP(j), and the j-th columns of VL and VR all */
+/* correspond to the same eigenpair (but not in general the */
+/* j-th eigenpair, unless all eigenpairs are selected). */
+/* If JOB = 'V', S is not referenced. */
+
+/* SEP (output) REAL array, dimension (MM) */
+/* If JOB = 'V' or 'B', the estimated reciprocal condition */
+/* numbers of the selected eigenvectors, stored in consecutive */
+/* elements of the array. For a complex eigenvector two */
+/* consecutive elements of SEP are set to the same value. If */
+/* the eigenvalues cannot be reordered to compute SEP(j), SEP(j) */
+/* is set to 0; this can only occur when the true value would be */
+/* very small anyway. */
+/* If JOB = 'E', SEP is not referenced. */
+
+/* MM (input) INTEGER */
+/* The number of elements in the arrays S (if JOB = 'E' or 'B') */
+/* and/or SEP (if JOB = 'V' or 'B'). MM >= M. */
+
+/* M (output) INTEGER */
+/* The number of elements of the arrays S and/or SEP actually */
+/* used to store the estimated condition numbers. */
+/* If HOWMNY = 'A', M is set to N. */
+
+/* WORK (workspace) REAL array, dimension (LDWORK,N+6) */
+/* If JOB = 'E', WORK is not referenced. */
+
+/* LDWORK (input) INTEGER */
+/* The leading dimension of the array WORK. */
+/* LDWORK >= 1; and if JOB = 'V' or 'B', LDWORK >= N. */
+
+/* IWORK (workspace) INTEGER array, dimension (2*(N-1)) */
+/* If JOB = 'E', IWORK is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The reciprocal of the condition number of an eigenvalue lambda is */
+/* defined as */
+
+/* S(lambda) = |v'*u| / (norm(u)*norm(v)) */
+
+/* where u and v are the right and left eigenvectors of T corresponding */
+/* to lambda; v' denotes the conjugate-transpose of v, and norm(u) */
+/* denotes the Euclidean norm. These reciprocal condition numbers always */
+/* lie between zero (very badly conditioned) and one (very well */
+/* conditioned). If n = 1, S(lambda) is defined to be 1. */
+
+/* An approximate error bound for a computed eigenvalue W(i) is given by */
+
+/* EPS * norm(T) / S(i) */
+
+/* where EPS is the machine precision. */
+
+/* The reciprocal of the condition number of the right eigenvector u */
+/* corresponding to lambda is defined as follows. Suppose */
+
+/* T = ( lambda c ) */
+/* ( 0 T22 ) */
+
+/* Then the reciprocal condition number is */
+
+/* SEP( lambda, T22 ) = sigma-min( T22 - lambda*I ) */
+
+/* where sigma-min denotes the smallest singular value. We approximate */
+/* the smallest singular value by the reciprocal of an estimate of the */
+/* one-norm of the inverse of T22 - lambda*I. If n = 1, SEP(1) is */
+/* defined to be abs(T(1,1)). */
+
+/* An approximate error bound for a computed right eigenvector VR(i) */
+/* is given by */
+
+/* EPS * norm(T) / SEP(i) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters */
+
+ /* Parameter adjustments */
+ --select;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --s;
+ --sep;
+ work_dim1 = *ldwork;
+ work_offset = 1 + work_dim1;
+ work -= work_offset;
+ --iwork;
+
+ /* Function Body */
+ wantbh = lsame_(job, "B");
+ wants = lsame_(job, "E") || wantbh;
+ wantsp = lsame_(job, "V") || wantbh;
+
+ somcon = lsame_(howmny, "S");
+
+ *info = 0;
+ if (! wants && ! wantsp) {
+ *info = -1;
+ } else if (! lsame_(howmny, "A") && ! somcon) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ldt < max(1,*n)) {
+ *info = -6;
+ } else if (*ldvl < 1 || wants && *ldvl < *n) {
+ *info = -8;
+ } else if (*ldvr < 1 || wants && *ldvr < *n) {
+ *info = -10;
+ } else {
+
+/* Set M to the number of eigenpairs for which condition numbers */
+/* are required, and test MM. */
+
+ if (somcon) {
+ *m = 0;
+ pair = FALSE_;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (pair) {
+ pair = FALSE_;
+ } else {
+ if (k < *n) {
+ if (t[k + 1 + k * t_dim1] == 0.f) {
+ if (select[k]) {
+ ++(*m);
+ }
+ } else {
+ pair = TRUE_;
+ if (select[k] || select[k + 1]) {
+ *m += 2;
+ }
+ }
+ } else {
+ if (select[*n]) {
+ ++(*m);
+ }
+ }
+ }
+/* L10: */
+ }
+ } else {
+ *m = *n;
+ }
+
+ if (*mm < *m) {
+ *info = -13;
+ } else if (*ldwork < 1 || wantsp && *ldwork < *n) {
+ *info = -16;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STRSNA", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (somcon) {
+ if (! select[1]) {
+ return 0;
+ }
+ }
+ if (wants) {
+ s[1] = 1.f;
+ }
+ if (wantsp) {
+ sep[1] = (r__1 = t[t_dim1 + 1], dabs(r__1));
+ }
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S") / eps;
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+
+ ks = 0;
+ pair = FALSE_;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+
+/* Determine whether T(k,k) begins a 1-by-1 or 2-by-2 block. */
+
+ if (pair) {
+ pair = FALSE_;
+ goto L60;
+ } else {
+ if (k < *n) {
+ pair = t[k + 1 + k * t_dim1] != 0.f;
+ }
+ }
+
+/* Determine whether condition numbers are required for the k-th */
+/* eigenpair. */
+
+ if (somcon) {
+ if (pair) {
+ if (! select[k] && ! select[k + 1]) {
+ goto L60;
+ }
+ } else {
+ if (! select[k]) {
+ goto L60;
+ }
+ }
+ }
+
+ ++ks;
+
+ if (wants) {
+
+/* Compute the reciprocal condition number of the k-th */
+/* eigenvalue. */
+
+ if (! pair) {
+
+/* Real eigenvalue. */
+
+ prod = sdot_(n, &vr[ks * vr_dim1 + 1], &c__1, &vl[ks *
+ vl_dim1 + 1], &c__1);
+ rnrm = snrm2_(n, &vr[ks * vr_dim1 + 1], &c__1);
+ lnrm = snrm2_(n, &vl[ks * vl_dim1 + 1], &c__1);
+ s[ks] = dabs(prod) / (rnrm * lnrm);
+ } else {
+
+/* Complex eigenvalue. */
+
+ prod1 = sdot_(n, &vr[ks * vr_dim1 + 1], &c__1, &vl[ks *
+ vl_dim1 + 1], &c__1);
+ prod1 += sdot_(n, &vr[(ks + 1) * vr_dim1 + 1], &c__1, &vl[(ks
+ + 1) * vl_dim1 + 1], &c__1);
+ prod2 = sdot_(n, &vl[ks * vl_dim1 + 1], &c__1, &vr[(ks + 1) *
+ vr_dim1 + 1], &c__1);
+ prod2 -= sdot_(n, &vl[(ks + 1) * vl_dim1 + 1], &c__1, &vr[ks *
+ vr_dim1 + 1], &c__1);
+ r__1 = snrm2_(n, &vr[ks * vr_dim1 + 1], &c__1);
+ r__2 = snrm2_(n, &vr[(ks + 1) * vr_dim1 + 1], &c__1);
+ rnrm = slapy2_(&r__1, &r__2);
+ r__1 = snrm2_(n, &vl[ks * vl_dim1 + 1], &c__1);
+ r__2 = snrm2_(n, &vl[(ks + 1) * vl_dim1 + 1], &c__1);
+ lnrm = slapy2_(&r__1, &r__2);
+ cond = slapy2_(&prod1, &prod2) / (rnrm * lnrm);
+ s[ks] = cond;
+ s[ks + 1] = cond;
+ }
+ }
+
+ if (wantsp) {
+
+/* Estimate the reciprocal condition number of the k-th */
+/* eigenvector. */
+
+/* Copy the matrix T to the array WORK and swap the diagonal */
+/* block beginning at T(k,k) to the (1,1) position. */
+
+ slacpy_("Full", n, n, &t[t_offset], ldt, &work[work_offset],
+ ldwork);
+ ifst = k;
+ ilst = 1;
+ strexc_("No Q", n, &work[work_offset], ldwork, dummy, &c__1, &
+ ifst, &ilst, &work[(*n + 1) * work_dim1 + 1], &ierr);
+
+ if (ierr == 1 || ierr == 2) {
+
+/* Could not swap because blocks not well separated */
+
+ scale = 1.f;
+ est = bignum;
+ } else {
+
+/* Reordering successful */
+
+ if (work[work_dim1 + 2] == 0.f) {
+
+/* Form C = T22 - lambda*I in WORK(2:N,2:N). */
+
+ i__2 = *n;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ work[i__ + i__ * work_dim1] -= work[work_dim1 + 1];
+/* L20: */
+ }
+ n2 = 1;
+ nn = *n - 1;
+ } else {
+
+/* Triangularize the 2 by 2 block by unitary */
+/* transformation U = [ cs i*ss ] */
+/* [ i*ss cs ]. */
+/* such that the (1,1) position of WORK is complex */
+/* eigenvalue lambda with positive imaginary part. (2,2) */
+/* position of WORK is the complex eigenvalue lambda */
+/* with negative imaginary part. */
+
+ mu = sqrt((r__1 = work[(work_dim1 << 1) + 1], dabs(r__1)))
+ * sqrt((r__2 = work[work_dim1 + 2], dabs(r__2)));
+ delta = slapy2_(&mu, &work[work_dim1 + 2]);
+ cs = mu / delta;
+ sn = -work[work_dim1 + 2] / delta;
+
+/* Form */
+
+/* C' = WORK(2:N,2:N) + i*[rwork(1) ..... rwork(n-1) ] */
+/* [ mu ] */
+/* [ .. ] */
+/* [ .. ] */
+/* [ mu ] */
+/* where C' is conjugate transpose of complex matrix C, */
+/* and RWORK is stored starting in the N+1-st column of */
+/* WORK. */
+
+ i__2 = *n;
+ for (j = 3; j <= i__2; ++j) {
+ work[j * work_dim1 + 2] = cs * work[j * work_dim1 + 2]
+ ;
+ work[j + j * work_dim1] -= work[work_dim1 + 1];
+/* L30: */
+ }
+ work[(work_dim1 << 1) + 2] = 0.f;
+
+ work[(*n + 1) * work_dim1 + 1] = mu * 2.f;
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ work[i__ + (*n + 1) * work_dim1] = sn * work[(i__ + 1)
+ * work_dim1 + 1];
+/* L40: */
+ }
+ n2 = 2;
+ nn = *n - 1 << 1;
+ }
+
+/* Estimate norm(inv(C')) */
+
+ est = 0.f;
+ kase = 0;
+L50:
+ slacn2_(&nn, &work[(*n + 2) * work_dim1 + 1], &work[(*n + 4) *
+ work_dim1 + 1], &iwork[1], &est, &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+ if (n2 == 1) {
+
+/* Real eigenvalue: solve C'*x = scale*c. */
+
+ i__2 = *n - 1;
+ slaqtr_(&c_true, &c_true, &i__2, &work[(work_dim1
+ << 1) + 2], ldwork, dummy, &dumm, &scale,
+ &work[(*n + 4) * work_dim1 + 1], &work[(*
+ n + 6) * work_dim1 + 1], &ierr);
+ } else {
+
+/* Complex eigenvalue: solve */
+/* C'*(p+iq) = scale*(c+id) in real arithmetic. */
+
+ i__2 = *n - 1;
+ slaqtr_(&c_true, &c_false, &i__2, &work[(
+ work_dim1 << 1) + 2], ldwork, &work[(*n +
+ 1) * work_dim1 + 1], &mu, &scale, &work[(*
+ n + 4) * work_dim1 + 1], &work[(*n + 6) *
+ work_dim1 + 1], &ierr);
+ }
+ } else {
+ if (n2 == 1) {
+
+/* Real eigenvalue: solve C*x = scale*c. */
+
+ i__2 = *n - 1;
+ slaqtr_(&c_false, &c_true, &i__2, &work[(
+ work_dim1 << 1) + 2], ldwork, dummy, &
+ dumm, &scale, &work[(*n + 4) * work_dim1
+ + 1], &work[(*n + 6) * work_dim1 + 1], &
+ ierr);
+ } else {
+
+/* Complex eigenvalue: solve */
+/* C*(p+iq) = scale*(c+id) in real arithmetic. */
+
+ i__2 = *n - 1;
+ slaqtr_(&c_false, &c_false, &i__2, &work[(
+ work_dim1 << 1) + 2], ldwork, &work[(*n +
+ 1) * work_dim1 + 1], &mu, &scale, &work[(*
+ n + 4) * work_dim1 + 1], &work[(*n + 6) *
+ work_dim1 + 1], &ierr);
+
+ }
+ }
+
+ goto L50;
+ }
+ }
+
+ sep[ks] = scale / dmax(est,smlnum);
+ if (pair) {
+ sep[ks + 1] = sep[ks];
+ }
+ }
+
+ if (pair) {
+ ++ks;
+ }
+
+L60:
+ ;
+ }
+ return 0;
+
+/* End of STRSNA */
+
+} /* strsna_ */
diff --git a/contrib/libs/clapack/strsyl.c b/contrib/libs/clapack/strsyl.c
new file mode 100644
index 0000000000..42a1b4cb31
--- /dev/null
+++ b/contrib/libs/clapack/strsyl.c
@@ -0,0 +1,1316 @@
+/* strsyl.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static logical c_false = FALSE_;
+static integer c__2 = 2;
+static real c_b26 = 1.f;
+static real c_b30 = 0.f;
+static logical c_true = TRUE_;
+
+/* Subroutine */ int strsyl_(char *trana, char *tranb, integer *isgn, integer
+ *m, integer *n, real *a, integer *lda, real *b, integer *ldb, real *
+ c__, integer *ldc, real *scale, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4;
+ real r__1, r__2;
+
+ /* Local variables */
+ integer j, k, l;
+ real x[4] /* was [2][2] */;
+ integer k1, k2, l1, l2;
+ real a11, db, da11, vec[4] /* was [2][2] */, dum[1], eps, sgn;
+ integer ierr;
+ real smin;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ real suml, sumr;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ integer knext, lnext;
+ real xnorm;
+ extern /* Subroutine */ int slaln2_(logical *, integer *, integer *, real
+ *, real *, real *, integer *, real *, real *, real *, integer *,
+ real *, real *, real *, integer *, real *, real *, integer *),
+ slasy2_(logical *, logical *, integer *, integer *, integer *,
+ real *, integer *, real *, integer *, real *, integer *, real *,
+ real *, integer *, real *, integer *), slabad_(real *, real *);
+ real scaloc;
+ extern doublereal slamch_(char *), slange_(char *, integer *,
+ integer *, real *, integer *, real *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ real bignum;
+ logical notrna, notrnb;
+ real smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STRSYL solves the real Sylvester matrix equation: */
+
+/* op(A)*X + X*op(B) = scale*C or */
+/* op(A)*X - X*op(B) = scale*C, */
+
+/* where op(A) = A or A**T, and A and B are both upper quasi- */
+/* triangular. A is M-by-M and B is N-by-N; the right hand side C and */
+/* the solution X are M-by-N; and scale is an output scale factor, set */
+/* <= 1 to avoid overflow in X. */
+
+/* A and B must be in Schur canonical form (as returned by SHSEQR), that */
+/* is, block upper triangular with 1-by-1 and 2-by-2 diagonal blocks; */
+/* each 2-by-2 diagonal block has its diagonal elements equal and its */
+/* off-diagonal elements of opposite sign. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANA (input) CHARACTER*1 */
+/* Specifies the option op(A): */
+/* = 'N': op(A) = A (No transpose) */
+/* = 'T': op(A) = A**T (Transpose) */
+/* = 'C': op(A) = A**H (Conjugate transpose = Transpose) */
+
+/* TRANB (input) CHARACTER*1 */
+/* Specifies the option op(B): */
+/* = 'N': op(B) = B (No transpose) */
+/* = 'T': op(B) = B**T (Transpose) */
+/* = 'C': op(B) = B**H (Conjugate transpose = Transpose) */
+
+/* ISGN (input) INTEGER */
+/* Specifies the sign in the equation: */
+/* = +1: solve op(A)*X + X*op(B) = scale*C */
+/* = -1: solve op(A)*X - X*op(B) = scale*C */
+
+/* M (input) INTEGER */
+/* The order of the matrix A, and the number of rows in the */
+/* matrices X and C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The order of the matrix B, and the number of columns in the */
+/* matrices X and C. N >= 0. */
+
+/* A (input) REAL array, dimension (LDA,M) */
+/* The upper quasi-triangular matrix A, in Schur canonical form. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input) REAL array, dimension (LDB,N) */
+/* The upper quasi-triangular matrix B, in Schur canonical form. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* C (input/output) REAL array, dimension (LDC,N) */
+/* On entry, the M-by-N right hand side matrix C. */
+/* On exit, C is overwritten by the solution matrix X. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M) */
+
+/* SCALE (output) REAL */
+/* The scale factor, scale, set <= 1 to avoid overflow in X. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* = 1: A and B have common or very close eigenvalues; perturbed */
+/* values were used to solve the equation (but the matrices */
+/* A and B are unchanged). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and Test input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ notrna = lsame_(trana, "N");
+ notrnb = lsame_(tranb, "N");
+
+ *info = 0;
+ if (! notrna && ! lsame_(trana, "T") && ! lsame_(
+ trana, "C")) {
+ *info = -1;
+ } else if (! notrnb && ! lsame_(tranb, "T") && !
+ lsame_(tranb, "C")) {
+ *info = -2;
+ } else if (*isgn != 1 && *isgn != -1) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*m)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldc < max(1,*m)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STRSYL", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *scale = 1.f;
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Set constants to control overflow */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+ smlnum = smlnum * (real) (*m * *n) / eps;
+ bignum = 1.f / smlnum;
+
+/* Computing MAX */
+ r__1 = smlnum, r__2 = eps * slange_("M", m, m, &a[a_offset], lda, dum), r__1 = max(r__1,r__2), r__2 = eps * slange_("M", n, n,
+ &b[b_offset], ldb, dum);
+ smin = dmax(r__1,r__2);
+
+ sgn = (real) (*isgn);
+
+ if (notrna && notrnb) {
+
+/* Solve A*X + ISGN*X*B = scale*C. */
+
+/* The (K,L)th block of X is determined starting from */
+/* bottom-left corner column by column by */
+
+/* A(K,K)*X(K,L) + ISGN*X(K,L)*B(L,L) = C(K,L) - R(K,L) */
+
+/* Where */
+/* M L-1 */
+/* R(K,L) = SUM [A(K,I)*X(I,L)] + ISGN*SUM [X(K,J)*B(J,L)]. */
+/* I=K+1 J=1 */
+
+/* Start column loop (index = L) */
+/* L1 (L2) : column index of the first (first) row of X(K,L). */
+
+ lnext = 1;
+ i__1 = *n;
+ for (l = 1; l <= i__1; ++l) {
+ if (l < lnext) {
+ goto L70;
+ }
+ if (l == *n) {
+ l1 = l;
+ l2 = l;
+ } else {
+ if (b[l + 1 + l * b_dim1] != 0.f) {
+ l1 = l;
+ l2 = l + 1;
+ lnext = l + 2;
+ } else {
+ l1 = l;
+ l2 = l;
+ lnext = l + 1;
+ }
+ }
+
+/* Start row loop (index = K) */
+/* K1 (K2): row index of the first (last) row of X(K,L). */
+
+ knext = *m;
+ for (k = *m; k >= 1; --k) {
+ if (k > knext) {
+ goto L60;
+ }
+ if (k == 1) {
+ k1 = k;
+ k2 = k;
+ } else {
+ if (a[k + (k - 1) * a_dim1] != 0.f) {
+ k1 = k - 1;
+ k2 = k;
+ knext = k - 2;
+ } else {
+ k1 = k;
+ k2 = k;
+ knext = k - 1;
+ }
+ }
+
+ if (l1 == l2 && k1 == k2) {
+ i__2 = *m - k1;
+/* Computing MIN */
+ i__3 = k1 + 1;
+/* Computing MIN */
+ i__4 = k1 + 1;
+ suml = sdot_(&i__2, &a[k1 + min(i__3, *m)* a_dim1], lda, &
+ c__[min(i__4, *m)+ l1 * c_dim1], &c__1);
+ i__2 = l1 - 1;
+ sumr = sdot_(&i__2, &c__[k1 + c_dim1], ldc, &b[l1 *
+ b_dim1 + 1], &c__1);
+ vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr);
+ scaloc = 1.f;
+
+ a11 = a[k1 + k1 * a_dim1] + sgn * b[l1 + l1 * b_dim1];
+ da11 = dabs(a11);
+ if (da11 <= smin) {
+ a11 = smin;
+ da11 = smin;
+ *info = 1;
+ }
+ db = dabs(vec[0]);
+ if (da11 < 1.f && db > 1.f) {
+ if (db > bignum * da11) {
+ scaloc = 1.f / db;
+ }
+ }
+ x[0] = vec[0] * scaloc / a11;
+
+ if (scaloc != 1.f) {
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ sscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L10: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+
+ } else if (l1 == l2 && k1 != k2) {
+
+ i__2 = *m - k2;
+/* Computing MIN */
+ i__3 = k2 + 1;
+/* Computing MIN */
+ i__4 = k2 + 1;
+ suml = sdot_(&i__2, &a[k1 + min(i__3, *m)* a_dim1], lda, &
+ c__[min(i__4, *m)+ l1 * c_dim1], &c__1);
+ i__2 = l1 - 1;
+ sumr = sdot_(&i__2, &c__[k1 + c_dim1], ldc, &b[l1 *
+ b_dim1 + 1], &c__1);
+ vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ i__2 = *m - k2;
+/* Computing MIN */
+ i__3 = k2 + 1;
+/* Computing MIN */
+ i__4 = k2 + 1;
+ suml = sdot_(&i__2, &a[k2 + min(i__3, *m)* a_dim1], lda, &
+ c__[min(i__4, *m)+ l1 * c_dim1], &c__1);
+ i__2 = l1 - 1;
+ sumr = sdot_(&i__2, &c__[k2 + c_dim1], ldc, &b[l1 *
+ b_dim1 + 1], &c__1);
+ vec[1] = c__[k2 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ r__1 = -sgn * b[l1 + l1 * b_dim1];
+ slaln2_(&c_false, &c__2, &c__1, &smin, &c_b26, &a[k1 + k1
+ * a_dim1], lda, &c_b26, &c_b26, vec, &c__2, &r__1,
+ &c_b30, x, &c__2, &scaloc, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 1;
+ }
+
+ if (scaloc != 1.f) {
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ sscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L20: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+ c__[k2 + l1 * c_dim1] = x[1];
+
+ } else if (l1 != l2 && k1 == k2) {
+
+ i__2 = *m - k1;
+/* Computing MIN */
+ i__3 = k1 + 1;
+/* Computing MIN */
+ i__4 = k1 + 1;
+ suml = sdot_(&i__2, &a[k1 + min(i__3, *m)* a_dim1], lda, &
+ c__[min(i__4, *m)+ l1 * c_dim1], &c__1);
+ i__2 = l1 - 1;
+ sumr = sdot_(&i__2, &c__[k1 + c_dim1], ldc, &b[l1 *
+ b_dim1 + 1], &c__1);
+ vec[0] = sgn * (c__[k1 + l1 * c_dim1] - (suml + sgn *
+ sumr));
+
+ i__2 = *m - k1;
+/* Computing MIN */
+ i__3 = k1 + 1;
+/* Computing MIN */
+ i__4 = k1 + 1;
+ suml = sdot_(&i__2, &a[k1 + min(i__3, *m)* a_dim1], lda, &
+ c__[min(i__4, *m)+ l2 * c_dim1], &c__1);
+ i__2 = l1 - 1;
+ sumr = sdot_(&i__2, &c__[k1 + c_dim1], ldc, &b[l2 *
+ b_dim1 + 1], &c__1);
+ vec[1] = sgn * (c__[k1 + l2 * c_dim1] - (suml + sgn *
+ sumr));
+
+ r__1 = -sgn * a[k1 + k1 * a_dim1];
+ slaln2_(&c_true, &c__2, &c__1, &smin, &c_b26, &b[l1 + l1 *
+ b_dim1], ldb, &c_b26, &c_b26, vec, &c__2, &r__1,
+ &c_b30, x, &c__2, &scaloc, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 1;
+ }
+
+ if (scaloc != 1.f) {
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ sscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L40: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+ c__[k1 + l2 * c_dim1] = x[1];
+
+ } else if (l1 != l2 && k1 != k2) {
+
+ i__2 = *m - k2;
+/* Computing MIN */
+ i__3 = k2 + 1;
+/* Computing MIN */
+ i__4 = k2 + 1;
+ suml = sdot_(&i__2, &a[k1 + min(i__3, *m)* a_dim1], lda, &
+ c__[min(i__4, *m)+ l1 * c_dim1], &c__1);
+ i__2 = l1 - 1;
+ sumr = sdot_(&i__2, &c__[k1 + c_dim1], ldc, &b[l1 *
+ b_dim1 + 1], &c__1);
+ vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ i__2 = *m - k2;
+/* Computing MIN */
+ i__3 = k2 + 1;
+/* Computing MIN */
+ i__4 = k2 + 1;
+ suml = sdot_(&i__2, &a[k1 + min(i__3, *m)* a_dim1], lda, &
+ c__[min(i__4, *m)+ l2 * c_dim1], &c__1);
+ i__2 = l1 - 1;
+ sumr = sdot_(&i__2, &c__[k1 + c_dim1], ldc, &b[l2 *
+ b_dim1 + 1], &c__1);
+ vec[2] = c__[k1 + l2 * c_dim1] - (suml + sgn * sumr);
+
+ i__2 = *m - k2;
+/* Computing MIN */
+ i__3 = k2 + 1;
+/* Computing MIN */
+ i__4 = k2 + 1;
+ suml = sdot_(&i__2, &a[k2 + min(i__3, *m)* a_dim1], lda, &
+ c__[min(i__4, *m)+ l1 * c_dim1], &c__1);
+ i__2 = l1 - 1;
+ sumr = sdot_(&i__2, &c__[k2 + c_dim1], ldc, &b[l1 *
+ b_dim1 + 1], &c__1);
+ vec[1] = c__[k2 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ i__2 = *m - k2;
+/* Computing MIN */
+ i__3 = k2 + 1;
+/* Computing MIN */
+ i__4 = k2 + 1;
+ suml = sdot_(&i__2, &a[k2 + min(i__3, *m)* a_dim1], lda, &
+ c__[min(i__4, *m)+ l2 * c_dim1], &c__1);
+ i__2 = l1 - 1;
+ sumr = sdot_(&i__2, &c__[k2 + c_dim1], ldc, &b[l2 *
+ b_dim1 + 1], &c__1);
+ vec[3] = c__[k2 + l2 * c_dim1] - (suml + sgn * sumr);
+
+ slasy2_(&c_false, &c_false, isgn, &c__2, &c__2, &a[k1 +
+ k1 * a_dim1], lda, &b[l1 + l1 * b_dim1], ldb, vec,
+ &c__2, &scaloc, x, &c__2, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 1;
+ }
+
+ if (scaloc != 1.f) {
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ sscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L50: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+ c__[k1 + l2 * c_dim1] = x[2];
+ c__[k2 + l1 * c_dim1] = x[1];
+ c__[k2 + l2 * c_dim1] = x[3];
+ }
+
+L60:
+ ;
+ }
+
+L70:
+ ;
+ }
+
+ } else if (! notrna && notrnb) {
+
+/* Solve A' *X + ISGN*X*B = scale*C. */
+
+/* The (K,L)th block of X is determined starting from */
+/* upper-left corner column by column by */
+
+/* A(K,K)'*X(K,L) + ISGN*X(K,L)*B(L,L) = C(K,L) - R(K,L) */
+
+/* Where */
+/* K-1 L-1 */
+/* R(K,L) = SUM [A(I,K)'*X(I,L)] +ISGN*SUM [X(K,J)*B(J,L)] */
+/* I=1 J=1 */
+
+/* Start column loop (index = L) */
+/* L1 (L2): column index of the first (last) row of X(K,L) */
+
+ lnext = 1;
+ i__1 = *n;
+ for (l = 1; l <= i__1; ++l) {
+ if (l < lnext) {
+ goto L130;
+ }
+ if (l == *n) {
+ l1 = l;
+ l2 = l;
+ } else {
+ if (b[l + 1 + l * b_dim1] != 0.f) {
+ l1 = l;
+ l2 = l + 1;
+ lnext = l + 2;
+ } else {
+ l1 = l;
+ l2 = l;
+ lnext = l + 1;
+ }
+ }
+
+/* Start row loop (index = K) */
+/* K1 (K2): row index of the first (last) row of X(K,L) */
+
+ knext = 1;
+ i__2 = *m;
+ for (k = 1; k <= i__2; ++k) {
+ if (k < knext) {
+ goto L120;
+ }
+ if (k == *m) {
+ k1 = k;
+ k2 = k;
+ } else {
+ if (a[k + 1 + k * a_dim1] != 0.f) {
+ k1 = k;
+ k2 = k + 1;
+ knext = k + 2;
+ } else {
+ k1 = k;
+ k2 = k;
+ knext = k + 1;
+ }
+ }
+
+ if (l1 == l2 && k1 == k2) {
+ i__3 = k1 - 1;
+ suml = sdot_(&i__3, &a[k1 * a_dim1 + 1], &c__1, &c__[l1 *
+ c_dim1 + 1], &c__1);
+ i__3 = l1 - 1;
+ sumr = sdot_(&i__3, &c__[k1 + c_dim1], ldc, &b[l1 *
+ b_dim1 + 1], &c__1);
+ vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr);
+ scaloc = 1.f;
+
+ a11 = a[k1 + k1 * a_dim1] + sgn * b[l1 + l1 * b_dim1];
+ da11 = dabs(a11);
+ if (da11 <= smin) {
+ a11 = smin;
+ da11 = smin;
+ *info = 1;
+ }
+ db = dabs(vec[0]);
+ if (da11 < 1.f && db > 1.f) {
+ if (db > bignum * da11) {
+ scaloc = 1.f / db;
+ }
+ }
+ x[0] = vec[0] * scaloc / a11;
+
+ if (scaloc != 1.f) {
+ i__3 = *n;
+ for (j = 1; j <= i__3; ++j) {
+ sscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L80: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+
+ } else if (l1 == l2 && k1 != k2) {
+
+ i__3 = k1 - 1;
+ suml = sdot_(&i__3, &a[k1 * a_dim1 + 1], &c__1, &c__[l1 *
+ c_dim1 + 1], &c__1);
+ i__3 = l1 - 1;
+ sumr = sdot_(&i__3, &c__[k1 + c_dim1], ldc, &b[l1 *
+ b_dim1 + 1], &c__1);
+ vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ i__3 = k1 - 1;
+ suml = sdot_(&i__3, &a[k2 * a_dim1 + 1], &c__1, &c__[l1 *
+ c_dim1 + 1], &c__1);
+ i__3 = l1 - 1;
+ sumr = sdot_(&i__3, &c__[k2 + c_dim1], ldc, &b[l1 *
+ b_dim1 + 1], &c__1);
+ vec[1] = c__[k2 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ r__1 = -sgn * b[l1 + l1 * b_dim1];
+ slaln2_(&c_true, &c__2, &c__1, &smin, &c_b26, &a[k1 + k1 *
+ a_dim1], lda, &c_b26, &c_b26, vec, &c__2, &r__1,
+ &c_b30, x, &c__2, &scaloc, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 1;
+ }
+
+ if (scaloc != 1.f) {
+ i__3 = *n;
+ for (j = 1; j <= i__3; ++j) {
+ sscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L90: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+ c__[k2 + l1 * c_dim1] = x[1];
+
+ } else if (l1 != l2 && k1 == k2) {
+
+ i__3 = k1 - 1;
+ suml = sdot_(&i__3, &a[k1 * a_dim1 + 1], &c__1, &c__[l1 *
+ c_dim1 + 1], &c__1);
+ i__3 = l1 - 1;
+ sumr = sdot_(&i__3, &c__[k1 + c_dim1], ldc, &b[l1 *
+ b_dim1 + 1], &c__1);
+ vec[0] = sgn * (c__[k1 + l1 * c_dim1] - (suml + sgn *
+ sumr));
+
+ i__3 = k1 - 1;
+ suml = sdot_(&i__3, &a[k1 * a_dim1 + 1], &c__1, &c__[l2 *
+ c_dim1 + 1], &c__1);
+ i__3 = l1 - 1;
+ sumr = sdot_(&i__3, &c__[k1 + c_dim1], ldc, &b[l2 *
+ b_dim1 + 1], &c__1);
+ vec[1] = sgn * (c__[k1 + l2 * c_dim1] - (suml + sgn *
+ sumr));
+
+ r__1 = -sgn * a[k1 + k1 * a_dim1];
+ slaln2_(&c_true, &c__2, &c__1, &smin, &c_b26, &b[l1 + l1 *
+ b_dim1], ldb, &c_b26, &c_b26, vec, &c__2, &r__1,
+ &c_b30, x, &c__2, &scaloc, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 1;
+ }
+
+ if (scaloc != 1.f) {
+ i__3 = *n;
+ for (j = 1; j <= i__3; ++j) {
+ sscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L100: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+ c__[k1 + l2 * c_dim1] = x[1];
+
+ } else if (l1 != l2 && k1 != k2) {
+
+ i__3 = k1 - 1;
+ suml = sdot_(&i__3, &a[k1 * a_dim1 + 1], &c__1, &c__[l1 *
+ c_dim1 + 1], &c__1);
+ i__3 = l1 - 1;
+ sumr = sdot_(&i__3, &c__[k1 + c_dim1], ldc, &b[l1 *
+ b_dim1 + 1], &c__1);
+ vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ i__3 = k1 - 1;
+ suml = sdot_(&i__3, &a[k1 * a_dim1 + 1], &c__1, &c__[l2 *
+ c_dim1 + 1], &c__1);
+ i__3 = l1 - 1;
+ sumr = sdot_(&i__3, &c__[k1 + c_dim1], ldc, &b[l2 *
+ b_dim1 + 1], &c__1);
+ vec[2] = c__[k1 + l2 * c_dim1] - (suml + sgn * sumr);
+
+ i__3 = k1 - 1;
+ suml = sdot_(&i__3, &a[k2 * a_dim1 + 1], &c__1, &c__[l1 *
+ c_dim1 + 1], &c__1);
+ i__3 = l1 - 1;
+ sumr = sdot_(&i__3, &c__[k2 + c_dim1], ldc, &b[l1 *
+ b_dim1 + 1], &c__1);
+ vec[1] = c__[k2 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ i__3 = k1 - 1;
+ suml = sdot_(&i__3, &a[k2 * a_dim1 + 1], &c__1, &c__[l2 *
+ c_dim1 + 1], &c__1);
+ i__3 = l1 - 1;
+ sumr = sdot_(&i__3, &c__[k2 + c_dim1], ldc, &b[l2 *
+ b_dim1 + 1], &c__1);
+ vec[3] = c__[k2 + l2 * c_dim1] - (suml + sgn * sumr);
+
+ slasy2_(&c_true, &c_false, isgn, &c__2, &c__2, &a[k1 + k1
+ * a_dim1], lda, &b[l1 + l1 * b_dim1], ldb, vec, &
+ c__2, &scaloc, x, &c__2, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 1;
+ }
+
+ if (scaloc != 1.f) {
+ i__3 = *n;
+ for (j = 1; j <= i__3; ++j) {
+ sscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L110: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+ c__[k1 + l2 * c_dim1] = x[2];
+ c__[k2 + l1 * c_dim1] = x[1];
+ c__[k2 + l2 * c_dim1] = x[3];
+ }
+
+L120:
+ ;
+ }
+L130:
+ ;
+ }
+
+ } else if (! notrna && ! notrnb) {
+
+/* Solve A'*X + ISGN*X*B' = scale*C. */
+
+/* The (K,L)th block of X is determined starting from */
+/* top-right corner column by column by */
+
+/* A(K,K)'*X(K,L) + ISGN*X(K,L)*B(L,L)' = C(K,L) - R(K,L) */
+
+/* Where */
+/* K-1 N */
+/* R(K,L) = SUM [A(I,K)'*X(I,L)] + ISGN*SUM [X(K,J)*B(L,J)']. */
+/* I=1 J=L+1 */
+
+/* Start column loop (index = L) */
+/* L1 (L2): column index of the first (last) row of X(K,L) */
+
+ lnext = *n;
+ for (l = *n; l >= 1; --l) {
+ if (l > lnext) {
+ goto L190;
+ }
+ if (l == 1) {
+ l1 = l;
+ l2 = l;
+ } else {
+ if (b[l + (l - 1) * b_dim1] != 0.f) {
+ l1 = l - 1;
+ l2 = l;
+ lnext = l - 2;
+ } else {
+ l1 = l;
+ l2 = l;
+ lnext = l - 1;
+ }
+ }
+
+/* Start row loop (index = K) */
+/* K1 (K2): row index of the first (last) row of X(K,L) */
+
+ knext = 1;
+ i__1 = *m;
+ for (k = 1; k <= i__1; ++k) {
+ if (k < knext) {
+ goto L180;
+ }
+ if (k == *m) {
+ k1 = k;
+ k2 = k;
+ } else {
+ if (a[k + 1 + k * a_dim1] != 0.f) {
+ k1 = k;
+ k2 = k + 1;
+ knext = k + 2;
+ } else {
+ k1 = k;
+ k2 = k;
+ knext = k + 1;
+ }
+ }
+
+ if (l1 == l2 && k1 == k2) {
+ i__2 = k1 - 1;
+ suml = sdot_(&i__2, &a[k1 * a_dim1 + 1], &c__1, &c__[l1 *
+ c_dim1 + 1], &c__1);
+ i__2 = *n - l1;
+/* Computing MIN */
+ i__3 = l1 + 1;
+/* Computing MIN */
+ i__4 = l1 + 1;
+ sumr = sdot_(&i__2, &c__[k1 + min(i__3, *n)* c_dim1], ldc,
+ &b[l1 + min(i__4, *n)* b_dim1], ldb);
+ vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr);
+ scaloc = 1.f;
+
+ a11 = a[k1 + k1 * a_dim1] + sgn * b[l1 + l1 * b_dim1];
+ da11 = dabs(a11);
+ if (da11 <= smin) {
+ a11 = smin;
+ da11 = smin;
+ *info = 1;
+ }
+ db = dabs(vec[0]);
+ if (da11 < 1.f && db > 1.f) {
+ if (db > bignum * da11) {
+ scaloc = 1.f / db;
+ }
+ }
+ x[0] = vec[0] * scaloc / a11;
+
+ if (scaloc != 1.f) {
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ sscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L140: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+
+ } else if (l1 == l2 && k1 != k2) {
+
+ i__2 = k1 - 1;
+ suml = sdot_(&i__2, &a[k1 * a_dim1 + 1], &c__1, &c__[l1 *
+ c_dim1 + 1], &c__1);
+ i__2 = *n - l2;
+/* Computing MIN */
+ i__3 = l2 + 1;
+/* Computing MIN */
+ i__4 = l2 + 1;
+ sumr = sdot_(&i__2, &c__[k1 + min(i__3, *n)* c_dim1], ldc,
+ &b[l1 + min(i__4, *n)* b_dim1], ldb);
+ vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ i__2 = k1 - 1;
+ suml = sdot_(&i__2, &a[k2 * a_dim1 + 1], &c__1, &c__[l1 *
+ c_dim1 + 1], &c__1);
+ i__2 = *n - l2;
+/* Computing MIN */
+ i__3 = l2 + 1;
+/* Computing MIN */
+ i__4 = l2 + 1;
+ sumr = sdot_(&i__2, &c__[k2 + min(i__3, *n)* c_dim1], ldc,
+ &b[l1 + min(i__4, *n)* b_dim1], ldb);
+ vec[1] = c__[k2 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ r__1 = -sgn * b[l1 + l1 * b_dim1];
+ slaln2_(&c_true, &c__2, &c__1, &smin, &c_b26, &a[k1 + k1 *
+ a_dim1], lda, &c_b26, &c_b26, vec, &c__2, &r__1,
+ &c_b30, x, &c__2, &scaloc, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 1;
+ }
+
+ if (scaloc != 1.f) {
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ sscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L150: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+ c__[k2 + l1 * c_dim1] = x[1];
+
+ } else if (l1 != l2 && k1 == k2) {
+
+ i__2 = k1 - 1;
+ suml = sdot_(&i__2, &a[k1 * a_dim1 + 1], &c__1, &c__[l1 *
+ c_dim1 + 1], &c__1);
+ i__2 = *n - l2;
+/* Computing MIN */
+ i__3 = l2 + 1;
+/* Computing MIN */
+ i__4 = l2 + 1;
+ sumr = sdot_(&i__2, &c__[k1 + min(i__3, *n)* c_dim1], ldc,
+ &b[l1 + min(i__4, *n)* b_dim1], ldb);
+ vec[0] = sgn * (c__[k1 + l1 * c_dim1] - (suml + sgn *
+ sumr));
+
+ i__2 = k1 - 1;
+ suml = sdot_(&i__2, &a[k1 * a_dim1 + 1], &c__1, &c__[l2 *
+ c_dim1 + 1], &c__1);
+ i__2 = *n - l2;
+/* Computing MIN */
+ i__3 = l2 + 1;
+/* Computing MIN */
+ i__4 = l2 + 1;
+ sumr = sdot_(&i__2, &c__[k1 + min(i__3, *n)* c_dim1], ldc,
+ &b[l2 + min(i__4, *n)* b_dim1], ldb);
+ vec[1] = sgn * (c__[k1 + l2 * c_dim1] - (suml + sgn *
+ sumr));
+
+ r__1 = -sgn * a[k1 + k1 * a_dim1];
+ slaln2_(&c_false, &c__2, &c__1, &smin, &c_b26, &b[l1 + l1
+ * b_dim1], ldb, &c_b26, &c_b26, vec, &c__2, &r__1,
+ &c_b30, x, &c__2, &scaloc, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 1;
+ }
+
+ if (scaloc != 1.f) {
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ sscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L160: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+ c__[k1 + l2 * c_dim1] = x[1];
+
+ } else if (l1 != l2 && k1 != k2) {
+
+ i__2 = k1 - 1;
+ suml = sdot_(&i__2, &a[k1 * a_dim1 + 1], &c__1, &c__[l1 *
+ c_dim1 + 1], &c__1);
+ i__2 = *n - l2;
+/* Computing MIN */
+ i__3 = l2 + 1;
+/* Computing MIN */
+ i__4 = l2 + 1;
+ sumr = sdot_(&i__2, &c__[k1 + min(i__3, *n)* c_dim1], ldc,
+ &b[l1 + min(i__4, *n)* b_dim1], ldb);
+ vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ i__2 = k1 - 1;
+ suml = sdot_(&i__2, &a[k1 * a_dim1 + 1], &c__1, &c__[l2 *
+ c_dim1 + 1], &c__1);
+ i__2 = *n - l2;
+/* Computing MIN */
+ i__3 = l2 + 1;
+/* Computing MIN */
+ i__4 = l2 + 1;
+ sumr = sdot_(&i__2, &c__[k1 + min(i__3, *n)* c_dim1], ldc,
+ &b[l2 + min(i__4, *n)* b_dim1], ldb);
+ vec[2] = c__[k1 + l2 * c_dim1] - (suml + sgn * sumr);
+
+ i__2 = k1 - 1;
+ suml = sdot_(&i__2, &a[k2 * a_dim1 + 1], &c__1, &c__[l1 *
+ c_dim1 + 1], &c__1);
+ i__2 = *n - l2;
+/* Computing MIN */
+ i__3 = l2 + 1;
+/* Computing MIN */
+ i__4 = l2 + 1;
+ sumr = sdot_(&i__2, &c__[k2 + min(i__3, *n)* c_dim1], ldc,
+ &b[l1 + min(i__4, *n)* b_dim1], ldb);
+ vec[1] = c__[k2 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ i__2 = k1 - 1;
+ suml = sdot_(&i__2, &a[k2 * a_dim1 + 1], &c__1, &c__[l2 *
+ c_dim1 + 1], &c__1);
+ i__2 = *n - l2;
+/* Computing MIN */
+ i__3 = l2 + 1;
+/* Computing MIN */
+ i__4 = l2 + 1;
+ sumr = sdot_(&i__2, &c__[k2 + min(i__3, *n)* c_dim1], ldc,
+ &b[l2 + min(i__4, *n)* b_dim1], ldb);
+ vec[3] = c__[k2 + l2 * c_dim1] - (suml + sgn * sumr);
+
+ slasy2_(&c_true, &c_true, isgn, &c__2, &c__2, &a[k1 + k1 *
+ a_dim1], lda, &b[l1 + l1 * b_dim1], ldb, vec, &
+ c__2, &scaloc, x, &c__2, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 1;
+ }
+
+ if (scaloc != 1.f) {
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ sscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L170: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+ c__[k1 + l2 * c_dim1] = x[2];
+ c__[k2 + l1 * c_dim1] = x[1];
+ c__[k2 + l2 * c_dim1] = x[3];
+ }
+
+L180:
+ ;
+ }
+L190:
+ ;
+ }
+
+ } else if (notrna && ! notrnb) {
+
+/* Solve A*X + ISGN*X*B' = scale*C. */
+
+/* The (K,L)th block of X is determined starting from */
+/* bottom-right corner column by column by */
+
+/* A(K,K)*X(K,L) + ISGN*X(K,L)*B(L,L)' = C(K,L) - R(K,L) */
+
+/* Where */
+/* M N */
+/* R(K,L) = SUM [A(K,I)*X(I,L)] + ISGN*SUM [X(K,J)*B(L,J)']. */
+/* I=K+1 J=L+1 */
+
+/* Start column loop (index = L) */
+/* L1 (L2): column index of the first (last) row of X(K,L) */
+
+ lnext = *n;
+ for (l = *n; l >= 1; --l) {
+ if (l > lnext) {
+ goto L250;
+ }
+ if (l == 1) {
+ l1 = l;
+ l2 = l;
+ } else {
+ if (b[l + (l - 1) * b_dim1] != 0.f) {
+ l1 = l - 1;
+ l2 = l;
+ lnext = l - 2;
+ } else {
+ l1 = l;
+ l2 = l;
+ lnext = l - 1;
+ }
+ }
+
+/* Start row loop (index = K) */
+/* K1 (K2): row index of the first (last) row of X(K,L) */
+
+ knext = *m;
+ for (k = *m; k >= 1; --k) {
+ if (k > knext) {
+ goto L240;
+ }
+ if (k == 1) {
+ k1 = k;
+ k2 = k;
+ } else {
+ if (a[k + (k - 1) * a_dim1] != 0.f) {
+ k1 = k - 1;
+ k2 = k;
+ knext = k - 2;
+ } else {
+ k1 = k;
+ k2 = k;
+ knext = k - 1;
+ }
+ }
+
+ if (l1 == l2 && k1 == k2) {
+ i__1 = *m - k1;
+/* Computing MIN */
+ i__2 = k1 + 1;
+/* Computing MIN */
+ i__3 = k1 + 1;
+ suml = sdot_(&i__1, &a[k1 + min(i__2, *m)* a_dim1], lda, &
+ c__[min(i__3, *m)+ l1 * c_dim1], &c__1);
+ i__1 = *n - l1;
+/* Computing MIN */
+ i__2 = l1 + 1;
+/* Computing MIN */
+ i__3 = l1 + 1;
+ sumr = sdot_(&i__1, &c__[k1 + min(i__2, *n)* c_dim1], ldc,
+ &b[l1 + min(i__3, *n)* b_dim1], ldb);
+ vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr);
+ scaloc = 1.f;
+
+ a11 = a[k1 + k1 * a_dim1] + sgn * b[l1 + l1 * b_dim1];
+ da11 = dabs(a11);
+ if (da11 <= smin) {
+ a11 = smin;
+ da11 = smin;
+ *info = 1;
+ }
+ db = dabs(vec[0]);
+ if (da11 < 1.f && db > 1.f) {
+ if (db > bignum * da11) {
+ scaloc = 1.f / db;
+ }
+ }
+ x[0] = vec[0] * scaloc / a11;
+
+ if (scaloc != 1.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L200: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+
+ } else if (l1 == l2 && k1 != k2) {
+
+ i__1 = *m - k2;
+/* Computing MIN */
+ i__2 = k2 + 1;
+/* Computing MIN */
+ i__3 = k2 + 1;
+ suml = sdot_(&i__1, &a[k1 + min(i__2, *m)* a_dim1], lda, &
+ c__[min(i__3, *m)+ l1 * c_dim1], &c__1);
+ i__1 = *n - l2;
+/* Computing MIN */
+ i__2 = l2 + 1;
+/* Computing MIN */
+ i__3 = l2 + 1;
+ sumr = sdot_(&i__1, &c__[k1 + min(i__2, *n)* c_dim1], ldc,
+ &b[l1 + min(i__3, *n)* b_dim1], ldb);
+ vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ i__1 = *m - k2;
+/* Computing MIN */
+ i__2 = k2 + 1;
+/* Computing MIN */
+ i__3 = k2 + 1;
+ suml = sdot_(&i__1, &a[k2 + min(i__2, *m)* a_dim1], lda, &
+ c__[min(i__3, *m)+ l1 * c_dim1], &c__1);
+ i__1 = *n - l2;
+/* Computing MIN */
+ i__2 = l2 + 1;
+/* Computing MIN */
+ i__3 = l2 + 1;
+ sumr = sdot_(&i__1, &c__[k2 + min(i__2, *n)* c_dim1], ldc,
+ &b[l1 + min(i__3, *n)* b_dim1], ldb);
+ vec[1] = c__[k2 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ r__1 = -sgn * b[l1 + l1 * b_dim1];
+ slaln2_(&c_false, &c__2, &c__1, &smin, &c_b26, &a[k1 + k1
+ * a_dim1], lda, &c_b26, &c_b26, vec, &c__2, &r__1,
+ &c_b30, x, &c__2, &scaloc, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 1;
+ }
+
+ if (scaloc != 1.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L210: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+ c__[k2 + l1 * c_dim1] = x[1];
+
+ } else if (l1 != l2 && k1 == k2) {
+
+ i__1 = *m - k1;
+/* Computing MIN */
+ i__2 = k1 + 1;
+/* Computing MIN */
+ i__3 = k1 + 1;
+ suml = sdot_(&i__1, &a[k1 + min(i__2, *m)* a_dim1], lda, &
+ c__[min(i__3, *m)+ l1 * c_dim1], &c__1);
+ i__1 = *n - l2;
+/* Computing MIN */
+ i__2 = l2 + 1;
+/* Computing MIN */
+ i__3 = l2 + 1;
+ sumr = sdot_(&i__1, &c__[k1 + min(i__2, *n)* c_dim1], ldc,
+ &b[l1 + min(i__3, *n)* b_dim1], ldb);
+ vec[0] = sgn * (c__[k1 + l1 * c_dim1] - (suml + sgn *
+ sumr));
+
+ i__1 = *m - k1;
+/* Computing MIN */
+ i__2 = k1 + 1;
+/* Computing MIN */
+ i__3 = k1 + 1;
+ suml = sdot_(&i__1, &a[k1 + min(i__2, *m)* a_dim1], lda, &
+ c__[min(i__3, *m)+ l2 * c_dim1], &c__1);
+ i__1 = *n - l2;
+/* Computing MIN */
+ i__2 = l2 + 1;
+/* Computing MIN */
+ i__3 = l2 + 1;
+ sumr = sdot_(&i__1, &c__[k1 + min(i__2, *n)* c_dim1], ldc,
+ &b[l2 + min(i__3, *n)* b_dim1], ldb);
+ vec[1] = sgn * (c__[k1 + l2 * c_dim1] - (suml + sgn *
+ sumr));
+
+ r__1 = -sgn * a[k1 + k1 * a_dim1];
+ slaln2_(&c_false, &c__2, &c__1, &smin, &c_b26, &b[l1 + l1
+ * b_dim1], ldb, &c_b26, &c_b26, vec, &c__2, &r__1,
+ &c_b30, x, &c__2, &scaloc, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 1;
+ }
+
+ if (scaloc != 1.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L220: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+ c__[k1 + l2 * c_dim1] = x[1];
+
+ } else if (l1 != l2 && k1 != k2) {
+
+ i__1 = *m - k2;
+/* Computing MIN */
+ i__2 = k2 + 1;
+/* Computing MIN */
+ i__3 = k2 + 1;
+ suml = sdot_(&i__1, &a[k1 + min(i__2, *m)* a_dim1], lda, &
+ c__[min(i__3, *m)+ l1 * c_dim1], &c__1);
+ i__1 = *n - l2;
+/* Computing MIN */
+ i__2 = l2 + 1;
+/* Computing MIN */
+ i__3 = l2 + 1;
+ sumr = sdot_(&i__1, &c__[k1 + min(i__2, *n)* c_dim1], ldc,
+ &b[l1 + min(i__3, *n)* b_dim1], ldb);
+ vec[0] = c__[k1 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ i__1 = *m - k2;
+/* Computing MIN */
+ i__2 = k2 + 1;
+/* Computing MIN */
+ i__3 = k2 + 1;
+ suml = sdot_(&i__1, &a[k1 + min(i__2, *m)* a_dim1], lda, &
+ c__[min(i__3, *m)+ l2 * c_dim1], &c__1);
+ i__1 = *n - l2;
+/* Computing MIN */
+ i__2 = l2 + 1;
+/* Computing MIN */
+ i__3 = l2 + 1;
+ sumr = sdot_(&i__1, &c__[k1 + min(i__2, *n)* c_dim1], ldc,
+ &b[l2 + min(i__3, *n)* b_dim1], ldb);
+ vec[2] = c__[k1 + l2 * c_dim1] - (suml + sgn * sumr);
+
+ i__1 = *m - k2;
+/* Computing MIN */
+ i__2 = k2 + 1;
+/* Computing MIN */
+ i__3 = k2 + 1;
+ suml = sdot_(&i__1, &a[k2 + min(i__2, *m)* a_dim1], lda, &
+ c__[min(i__3, *m)+ l1 * c_dim1], &c__1);
+ i__1 = *n - l2;
+/* Computing MIN */
+ i__2 = l2 + 1;
+/* Computing MIN */
+ i__3 = l2 + 1;
+ sumr = sdot_(&i__1, &c__[k2 + min(i__2, *n)* c_dim1], ldc,
+ &b[l1 + min(i__3, *n)* b_dim1], ldb);
+ vec[1] = c__[k2 + l1 * c_dim1] - (suml + sgn * sumr);
+
+ i__1 = *m - k2;
+/* Computing MIN */
+ i__2 = k2 + 1;
+/* Computing MIN */
+ i__3 = k2 + 1;
+ suml = sdot_(&i__1, &a[k2 + min(i__2, *m)* a_dim1], lda, &
+ c__[min(i__3, *m)+ l2 * c_dim1], &c__1);
+ i__1 = *n - l2;
+/* Computing MIN */
+ i__2 = l2 + 1;
+/* Computing MIN */
+ i__3 = l2 + 1;
+ sumr = sdot_(&i__1, &c__[k2 + min(i__2, *n)* c_dim1], ldc,
+ &b[l2 + min(i__3, *n)* b_dim1], ldb);
+ vec[3] = c__[k2 + l2 * c_dim1] - (suml + sgn * sumr);
+
+ slasy2_(&c_false, &c_true, isgn, &c__2, &c__2, &a[k1 + k1
+ * a_dim1], lda, &b[l1 + l1 * b_dim1], ldb, vec, &
+ c__2, &scaloc, x, &c__2, &xnorm, &ierr);
+ if (ierr != 0) {
+ *info = 1;
+ }
+
+ if (scaloc != 1.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L230: */
+ }
+ *scale *= scaloc;
+ }
+ c__[k1 + l1 * c_dim1] = x[0];
+ c__[k1 + l2 * c_dim1] = x[2];
+ c__[k2 + l1 * c_dim1] = x[1];
+ c__[k2 + l2 * c_dim1] = x[3];
+ }
+
+L240:
+ ;
+ }
+L250:
+ ;
+ }
+
+ }
+
+ return 0;
+
+/* End of STRSYL */
+
+} /* strsyl_ */
diff --git a/contrib/libs/clapack/strti2.c b/contrib/libs/clapack/strti2.c
new file mode 100644
index 0000000000..e8edd48b19
--- /dev/null
+++ b/contrib/libs/clapack/strti2.c
@@ -0,0 +1,183 @@
+/* strti2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int strti2_(char *uplo, char *diag, integer *n, real *a,
+ integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer j;
+ real ajj;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ logical upper;
+ extern /* Subroutine */ int strmv_(char *, char *, char *, integer *,
+ real *, integer *, real *, integer *),
+ xerbla_(char *, integer *);
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STRTI2 computes the inverse of a real upper or lower triangular */
+/* matrix. */
+
+/* This is the Level 2 BLAS version of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower triangular. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A is unit triangular. */
+/* = 'N': Non-unit triangular */
+/* = 'U': Unit triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the triangular matrix A. If UPLO = 'U', the */
+/* leading n by n upper triangular part of the array A contains */
+/* the upper triangular matrix, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of the array A contains */
+/* the lower triangular matrix, and the strictly upper */
+/* triangular part of A is not referenced. If DIAG = 'U', the */
+/* diagonal elements of A are also not referenced and are */
+/* assumed to be 1. */
+
+/* On exit, the (triangular) inverse of the original matrix, in */
+/* the same storage format. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ nounit = lsame_(diag, "N");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STRTI2", &i__1);
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute inverse of upper triangular matrix. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (nounit) {
+ a[j + j * a_dim1] = 1.f / a[j + j * a_dim1];
+ ajj = -a[j + j * a_dim1];
+ } else {
+ ajj = -1.f;
+ }
+
+/* Compute elements 1:j-1 of j-th column. */
+
+ i__2 = j - 1;
+ strmv_("Upper", "No transpose", diag, &i__2, &a[a_offset], lda, &
+ a[j * a_dim1 + 1], &c__1);
+ i__2 = j - 1;
+ sscal_(&i__2, &ajj, &a[j * a_dim1 + 1], &c__1);
+/* L10: */
+ }
+ } else {
+
+/* Compute inverse of lower triangular matrix. */
+
+ for (j = *n; j >= 1; --j) {
+ if (nounit) {
+ a[j + j * a_dim1] = 1.f / a[j + j * a_dim1];
+ ajj = -a[j + j * a_dim1];
+ } else {
+ ajj = -1.f;
+ }
+ if (j < *n) {
+
+/* Compute elements j+1:n of j-th column. */
+
+ i__1 = *n - j;
+ strmv_("Lower", "No transpose", diag, &i__1, &a[j + 1 + (j +
+ 1) * a_dim1], lda, &a[j + 1 + j * a_dim1], &c__1);
+ i__1 = *n - j;
+ sscal_(&i__1, &ajj, &a[j + 1 + j * a_dim1], &c__1);
+ }
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of STRTI2 */
+
+} /* strti2_ */
diff --git a/contrib/libs/clapack/strtri.c b/contrib/libs/clapack/strtri.c
new file mode 100644
index 0000000000..32337204eb
--- /dev/null
+++ b/contrib/libs/clapack/strtri.c
@@ -0,0 +1,242 @@
+/* strtri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static real c_b18 = 1.f;
+static real c_b22 = -1.f;
+
+/* Subroutine */ int strtri_(char *uplo, char *diag, integer *n, real *a,
+ integer *lda, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, i__1, i__2[2], i__3, i__4, i__5;
+ char ch__1[3];
+ ch__1[2] = 0;
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer j, jb, nb, nn;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int strmm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+), strsm_(char *, char *, char *,
+ char *, integer *, integer *, real *, real *, integer *, real *,
+ integer *), strti2_(char *, char *
+, integer *, real *, integer *, integer *),
+ xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STRTRI computes the inverse of a real upper or lower triangular */
+/* matrix A. */
+
+/* This is the Level 3 BLAS version of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the triangular matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of the array A contains */
+/* the upper triangular matrix, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of the array A contains */
+/* the lower triangular matrix, and the strictly upper */
+/* triangular part of A is not referenced. If DIAG = 'U', the */
+/* diagonal elements of A are also not referenced and are */
+/* assumed to be 1. */
+/* On exit, the (triangular) inverse of the original matrix, in */
+/* the same storage format. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, A(i,i) is exactly zero. The triangular */
+/* matrix is singular and its inverse can not be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ nounit = lsame_(diag, "N");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STRTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check for singularity if non-unit. */
+
+ if (nounit) {
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ if (a[*info + *info * a_dim1] == 0.f) {
+ return 0;
+ }
+/* L10: */
+ }
+ *info = 0;
+ }
+
+/* Determine the block size for this environment. */
+
+/* Writing concatenation */
+ i__2[0] = 1, a__1[0] = uplo;
+ i__2[1] = 1, a__1[1] = diag;
+ s_cat(ch__1, a__1, i__2, &c__2, (ftnlen)2);
+ nb = ilaenv_(&c__1, "STRTRI", ch__1, n, &c_n1, &c_n1, &c_n1);
+ if (nb <= 1 || nb >= *n) {
+
+/* Use unblocked code */
+
+ strti2_(uplo, diag, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code */
+
+ if (upper) {
+
+/* Compute inverse of upper triangular matrix */
+
+ i__1 = *n;
+ i__3 = nb;
+ for (j = 1; i__3 < 0 ? j >= i__1 : j <= i__1; j += i__3) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *n - j + 1;
+ jb = min(i__4,i__5);
+
+/* Compute rows 1:j-1 of current block column */
+
+ i__4 = j - 1;
+ strmm_("Left", "Upper", "No transpose", diag, &i__4, &jb, &
+ c_b18, &a[a_offset], lda, &a[j * a_dim1 + 1], lda);
+ i__4 = j - 1;
+ strsm_("Right", "Upper", "No transpose", diag, &i__4, &jb, &
+ c_b22, &a[j + j * a_dim1], lda, &a[j * a_dim1 + 1],
+ lda);
+
+/* Compute inverse of current diagonal block */
+
+ strti2_("Upper", diag, &jb, &a[j + j * a_dim1], lda, info);
+/* L20: */
+ }
+ } else {
+
+/* Compute inverse of lower triangular matrix */
+
+ nn = (*n - 1) / nb * nb + 1;
+ i__3 = -nb;
+ for (j = nn; i__3 < 0 ? j >= 1 : j <= 1; j += i__3) {
+/* Computing MIN */
+ i__1 = nb, i__4 = *n - j + 1;
+ jb = min(i__1,i__4);
+ if (j + jb <= *n) {
+
+/* Compute rows j+jb:n of current block column */
+
+ i__1 = *n - j - jb + 1;
+ strmm_("Left", "Lower", "No transpose", diag, &i__1, &jb,
+ &c_b18, &a[j + jb + (j + jb) * a_dim1], lda, &a[j
+ + jb + j * a_dim1], lda);
+ i__1 = *n - j - jb + 1;
+ strsm_("Right", "Lower", "No transpose", diag, &i__1, &jb,
+ &c_b22, &a[j + j * a_dim1], lda, &a[j + jb + j *
+ a_dim1], lda);
+ }
+
+/* Compute inverse of current diagonal block */
+
+ strti2_("Lower", diag, &jb, &a[j + j * a_dim1], lda, info);
+/* L30: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of STRTRI */
+
+} /* strtri_ */
diff --git a/contrib/libs/clapack/strtrs.c b/contrib/libs/clapack/strtrs.c
new file mode 100644
index 0000000000..d7f1baab6b
--- /dev/null
+++ b/contrib/libs/clapack/strtrs.c
@@ -0,0 +1,182 @@
+/* strtrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static real c_b12 = 1.f;
+
+/* Subroutine */ int strtrs_(char *uplo, char *trans, char *diag, integer *n,
+ integer *nrhs, real *a, integer *lda, real *b, integer *ldb, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int strsm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+), xerbla_(char *, integer *);
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STRTRS solves a triangular system of the form */
+
+/* A * X = B or A**T * X = B, */
+
+/* where A is a triangular matrix of order N, and B is an N-by-NRHS */
+/* matrix. A check is made to verify that A is nonsingular. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose = Transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* The triangular matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of the array A contains the upper */
+/* triangular matrix, and the strictly lower triangular part of */
+/* A is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of the array A contains the lower triangular */
+/* matrix, and the strictly upper triangular part of A is not */
+/* referenced. If DIAG = 'U', the diagonal elements of A are */
+/* also not referenced and are assumed to be 1. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) REAL array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, if INFO = 0, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element of A is zero, */
+/* indicating that the matrix is singular and the solutions */
+/* X have not been computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ nounit = lsame_(diag, "N");
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STRTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check for singularity. */
+
+ if (nounit) {
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ if (a[*info + *info * a_dim1] == 0.f) {
+ return 0;
+ }
+/* L10: */
+ }
+ }
+ *info = 0;
+
+/* Solve A * x = b or A' * x = b. */
+
+ strsm_("Left", uplo, trans, diag, n, nrhs, &c_b12, &a[a_offset], lda, &b[
+ b_offset], ldb);
+
+ return 0;
+
+/* End of STRTRS */
+
+} /* strtrs_ */
diff --git a/contrib/libs/clapack/strttf.c b/contrib/libs/clapack/strttf.c
new file mode 100644
index 0000000000..eb0993b3f5
--- /dev/null
+++ b/contrib/libs/clapack/strttf.c
@@ -0,0 +1,489 @@
+/* strttf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int strttf_(char *transr, char *uplo, integer *n, real *a,
+ integer *lda, real *arf, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, k, l, n1, n2, ij, nt, nx2, np1x2;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nisodd;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STRTTF copies a triangular matrix A from standard full format (TR) */
+/* to rectangular full packed format (TF) . */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': ARF in Normal form is wanted; */
+/* = 'T': ARF in Transpose form is wanted. */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) REAL array, dimension (LDA,N). */
+/* On entry, the triangular matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of the array A contains */
+/* the upper triangular matrix, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of the array A contains */
+/* the lower triangular matrix, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the matrix A. LDA >= max(1,N). */
+
+/* ARF (output) REAL array, dimension (NT). */
+/* NT=N*(N+1)/2. On exit, the triangular matrix A in RFP format. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Notes */
+/* ===== */
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* even. We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* the transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* the transpose of the last three columns of AP lower. */
+/* This covers the case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 03 04 05 33 43 53 */
+/* 13 14 15 00 44 54 */
+/* 23 24 25 10 11 55 */
+/* 33 34 35 20 21 22 */
+/* 00 44 45 30 31 32 */
+/* 01 11 55 40 41 42 */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We first consider Rectangular Full Packed (RFP) Format when N is */
+/* odd. We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* the transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* the transpose of the last two columns of AP lower. */
+/* This covers the case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* 02 03 04 00 33 43 */
+/* 12 13 14 10 11 44 */
+/* 22 23 24 20 21 22 */
+/* 00 33 34 30 31 32 */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'T'. RFP A in both UPLO cases is just the */
+/* transpose of RFP A above. One therefore gets: */
+
+/* RFP A RFP A */
+
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* Reference */
+/* ========= */
+
+/* ===================================================================== */
+
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda - 1 - 0 + 1;
+ a_offset = 0 + a_dim1 * 0;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "T")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STRTTF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 1) {
+ if (*n == 1) {
+ arf[0] = a[0];
+ }
+ return 0;
+ }
+
+/* Size of array ARF(0:nt-1) */
+
+ nt = *n * (*n + 1) / 2;
+
+/* Set N1 and N2 depending on LOWER: for N even N1=N2=K */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+/* If N is odd, set NISODD = .TRUE., LDA=N+1 and A is (N+1)--by--K2. */
+/* If N is even, set K = N/2 and NISODD = .FALSE., LDA=N and A is */
+/* N--by--(N+1)/2. */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ if (! lower) {
+ np1x2 = *n + *n + 2;
+ }
+ } else {
+ nisodd = TRUE_;
+ if (! lower) {
+ nx2 = *n + *n;
+ }
+ }
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* N is odd, TRANSR = 'N', and UPLO = 'L' */
+
+ ij = 0;
+ i__1 = n2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = n2 + j;
+ for (i__ = n1; i__ <= i__2; ++i__) {
+ arf[ij] = a[n2 + j + i__ * a_dim1];
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ arf[ij] = a[i__ + j * a_dim1];
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* N is odd, TRANSR = 'N', and UPLO = 'U' */
+
+ ij = nt - *n;
+ i__1 = n1;
+ for (j = *n - 1; j >= i__1; --j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ arf[ij] = a[i__ + j * a_dim1];
+ ++ij;
+ }
+ i__2 = n1 - 1;
+ for (l = j - n1; l <= i__2; ++l) {
+ arf[ij] = a[j - n1 + l * a_dim1];
+ ++ij;
+ }
+ ij -= nx2;
+ }
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'T' */
+
+ if (lower) {
+
+/* N is odd, TRANSR = 'T', and UPLO = 'L' */
+
+ ij = 0;
+ i__1 = n2 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ arf[ij] = a[j + i__ * a_dim1];
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = n1 + j; i__ <= i__2; ++i__) {
+ arf[ij] = a[i__ + (n1 + j) * a_dim1];
+ ++ij;
+ }
+ }
+ i__1 = *n - 1;
+ for (j = n2; j <= i__1; ++j) {
+ i__2 = n1 - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ arf[ij] = a[j + i__ * a_dim1];
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* N is odd, TRANSR = 'T', and UPLO = 'U' */
+
+ ij = 0;
+ i__1 = n1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = n1; i__ <= i__2; ++i__) {
+ arf[ij] = a[j + i__ * a_dim1];
+ ++ij;
+ }
+ }
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ arf[ij] = a[i__ + j * a_dim1];
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (l = n2 + j; l <= i__2; ++l) {
+ arf[ij] = a[n2 + j + l * a_dim1];
+ ++ij;
+ }
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* N is even, TRANSR = 'N', and UPLO = 'L' */
+
+ ij = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k + j;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ arf[ij] = a[k + j + i__ * a_dim1];
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ arf[ij] = a[i__ + j * a_dim1];
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* N is even, TRANSR = 'N', and UPLO = 'U' */
+
+ ij = nt - *n - 1;
+ i__1 = k;
+ for (j = *n - 1; j >= i__1; --j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ arf[ij] = a[i__ + j * a_dim1];
+ ++ij;
+ }
+ i__2 = k - 1;
+ for (l = j - k; l <= i__2; ++l) {
+ arf[ij] = a[j - k + l * a_dim1];
+ ++ij;
+ }
+ ij -= np1x2;
+ }
+
+ }
+
+ } else {
+
+/* N is even and TRANSR = 'T' */
+
+ if (lower) {
+
+/* N is even, TRANSR = 'T', and UPLO = 'L' */
+
+ ij = 0;
+ j = k;
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ arf[ij] = a[i__ + j * a_dim1];
+ ++ij;
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ arf[ij] = a[j + i__ * a_dim1];
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = k + 1 + j; i__ <= i__2; ++i__) {
+ arf[ij] = a[i__ + (k + 1 + j) * a_dim1];
+ ++ij;
+ }
+ }
+ i__1 = *n - 1;
+ for (j = k - 1; j <= i__1; ++j) {
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ arf[ij] = a[j + i__ * a_dim1];
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* N is even, TRANSR = 'T', and UPLO = 'U' */
+
+ ij = 0;
+ i__1 = k;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ arf[ij] = a[j + i__ * a_dim1];
+ ++ij;
+ }
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ arf[ij] = a[i__ + j * a_dim1];
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (l = k + 1 + j; l <= i__2; ++l) {
+ arf[ij] = a[k + 1 + j + l * a_dim1];
+ ++ij;
+ }
+ }
+/* Note that here, on exit of the loop, J = K-1 */
+ i__1 = j;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ arf[ij] = a[i__ + j * a_dim1];
+ ++ij;
+ }
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of STRTTF */
+
+} /* strttf_ */
diff --git a/contrib/libs/clapack/strttp.c b/contrib/libs/clapack/strttp.c
new file mode 100644
index 0000000000..2d4f20347f
--- /dev/null
+++ b/contrib/libs/clapack/strttp.c
@@ -0,0 +1,143 @@
+/* strttp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int strttp_(char *uplo, integer *n, real *a, integer *lda,
+ real *ap, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, k;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- and Julien Langou of the Univ. of Colorado Denver -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STRTTP copies a triangular matrix A from full format (TR) to standard */
+/* packed format (TP). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular. */
+/* = 'L': A is lower triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrices AP and A. N >= 0. */
+
+/* A (input) REAL array, dimension (LDA,N) */
+/* On exit, the triangular matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AP (output) REAL array, dimension (N*(N+1)/2 */
+/* On exit, the upper or lower triangular matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ lower = lsame_(uplo, "L");
+ if (! lower && ! lsame_(uplo, "U")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STRTTP", &i__1);
+ return 0;
+ }
+
+ if (lower) {
+ k = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ++k;
+ ap[k] = a[i__ + j * a_dim1];
+ }
+ }
+ } else {
+ k = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ ++k;
+ ap[k] = a[i__ + j * a_dim1];
+ }
+ }
+ }
+
+ return 0;
+
+/* End of STRTTP */
+
+} /* strttp_ */
diff --git a/contrib/libs/clapack/stzrqf.c b/contrib/libs/clapack/stzrqf.c
new file mode 100644
index 0000000000..1a31566aed
--- /dev/null
+++ b/contrib/libs/clapack/stzrqf.c
@@ -0,0 +1,219 @@
+/* stzrqf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static real c_b8 = 1.f;
+
+/* Subroutine */ int stzrqf_(integer *m, integer *n, real *a, integer *lda,
+ real *tau, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real r__1;
+
+ /* Local variables */
+ integer i__, k, m1;
+ extern /* Subroutine */ int sger_(integer *, integer *, real *, real *,
+ integer *, real *, integer *, real *, integer *), sgemv_(char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+, real *, real *, integer *), scopy_(integer *, real *,
+ integer *, real *, integer *), saxpy_(integer *, real *, real *,
+ integer *, real *, integer *), xerbla_(char *, integer *),
+ slarfp_(integer *, real *, real *, integer *, real *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is deprecated and has been replaced by routine STZRZF. */
+
+/* STZRQF reduces the M-by-N ( M<=N ) real upper trapezoidal matrix A */
+/* to upper triangular form by means of orthogonal transformations. */
+
+/* The upper trapezoidal matrix A is factored as */
+
+/* A = ( R 0 ) * Z, */
+
+/* where Z is an N-by-N orthogonal matrix and R is an M-by-M upper */
+/* triangular matrix. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= M. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the leading M-by-N upper trapezoidal part of the */
+/* array A must contain the matrix to be factorized. */
+/* On exit, the leading M-by-M upper triangular part of A */
+/* contains the upper triangular matrix R, and elements M+1 to */
+/* N of the first M rows of A, with the array TAU, represent the */
+/* orthogonal matrix Z as a product of M elementary reflectors. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) REAL array, dimension (M) */
+/* The scalar factors of the elementary reflectors. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The factorization is obtained by Householder's method. The kth */
+/* transformation matrix, Z( k ), which is used to introduce zeros into */
+/* the ( m - k + 1 )th row of A, is given in the form */
+
+/* Z( k ) = ( I 0 ), */
+/* ( 0 T( k ) ) */
+
+/* where */
+
+/* T( k ) = I - tau*u( k )*u( k )', u( k ) = ( 1 ), */
+/* ( 0 ) */
+/* ( z( k ) ) */
+
+/* tau is a scalar and z( k ) is an ( n - m ) element vector. */
+/* tau and z( k ) are chosen to annihilate the elements of the kth row */
+/* of X. */
+
+/* The scalar tau is returned in the kth element of TAU and the vector */
+/* u( k ) in the kth row of A, such that the elements of z( k ) are */
+/* in a( k, m + 1 ), ..., a( k, n ). The elements of R are returned in */
+/* the upper triangular part of A. */
+
+/* Z is given by */
+
+/* Z = Z( 1 ) * Z( 2 ) * ... * Z( m ). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STZRQF", &i__1);
+ return 0;
+ }
+
+/* Perform the factorization. */
+
+ if (*m == 0) {
+ return 0;
+ }
+ if (*m == *n) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ tau[i__] = 0.f;
+/* L10: */
+ }
+ } else {
+/* Computing MIN */
+ i__1 = *m + 1;
+ m1 = min(i__1,*n);
+ for (k = *m; k >= 1; --k) {
+
+/* Use a Householder reflection to zero the kth row of A. */
+/* First set up the reflection. */
+
+ i__1 = *n - *m + 1;
+ slarfp_(&i__1, &a[k + k * a_dim1], &a[k + m1 * a_dim1], lda, &tau[
+ k]);
+
+ if (tau[k] != 0.f && k > 1) {
+
+/* We now perform the operation A := A*P( k ). */
+
+/* Use the first ( k - 1 ) elements of TAU to store a( k ), */
+/* where a( k ) consists of the first ( k - 1 ) elements of */
+/* the kth column of A. Also let B denote the first */
+/* ( k - 1 ) rows of the last ( n - m ) columns of A. */
+
+ i__1 = k - 1;
+ scopy_(&i__1, &a[k * a_dim1 + 1], &c__1, &tau[1], &c__1);
+
+/* Form w = a( k ) + B*z( k ) in TAU. */
+
+ i__1 = k - 1;
+ i__2 = *n - *m;
+ sgemv_("No transpose", &i__1, &i__2, &c_b8, &a[m1 * a_dim1 +
+ 1], lda, &a[k + m1 * a_dim1], lda, &c_b8, &tau[1], &
+ c__1);
+
+/* Now form a( k ) := a( k ) - tau*w */
+/* and B := B - tau*w*z( k )'. */
+
+ i__1 = k - 1;
+ r__1 = -tau[k];
+ saxpy_(&i__1, &r__1, &tau[1], &c__1, &a[k * a_dim1 + 1], &
+ c__1);
+ i__1 = k - 1;
+ i__2 = *n - *m;
+ r__1 = -tau[k];
+ sger_(&i__1, &i__2, &r__1, &tau[1], &c__1, &a[k + m1 * a_dim1]
+, lda, &a[m1 * a_dim1 + 1], lda);
+ }
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of STZRQF */
+
+} /* stzrqf_ */
diff --git a/contrib/libs/clapack/stzrzf.c b/contrib/libs/clapack/stzrzf.c
new file mode 100644
index 0000000000..c5559513b3
--- /dev/null
+++ b/contrib/libs/clapack/stzrzf.c
@@ -0,0 +1,310 @@
+/* stzrzf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int stzrzf_(integer *m, integer *n, real *a, integer *lda,
+ real *tau, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+
+ /* Local variables */
+ integer i__, m1, ib, nb, ki, kk, mu, nx, iws, nbmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int slarzb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, integer *, real *, integer *,
+ real *, integer *, real *, integer *, real *, integer *);
+ integer ldwork;
+ extern /* Subroutine */ int slarzt_(char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int slatrz_(integer *, integer *, integer *, real
+ *, integer *, real *, real *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* STZRZF reduces the M-by-N ( M<=N ) real upper trapezoidal matrix A */
+/* to upper triangular form by means of orthogonal transformations. */
+
+/* The upper trapezoidal matrix A is factored as */
+
+/* A = ( R 0 ) * Z, */
+
+/* where Z is an N-by-N orthogonal matrix and R is an M-by-M upper */
+/* triangular matrix. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= M. */
+
+/* A (input/output) REAL array, dimension (LDA,N) */
+/* On entry, the leading M-by-N upper trapezoidal part of the */
+/* array A must contain the matrix to be factorized. */
+/* On exit, the leading M-by-M upper triangular part of A */
+/* contains the upper triangular matrix R, and elements M+1 to */
+/* N of the first M rows of A, with the array TAU, represent the */
+/* orthogonal matrix Z as a product of M elementary reflectors. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) REAL array, dimension (M) */
+/* The scalar factors of the elementary reflectors. */
+
+/* WORK (workspace/output) REAL array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,M). */
+/* For optimum performance LWORK >= M*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* The factorization is obtained by Householder's method. The kth */
+/* transformation matrix, Z( k ), which is used to introduce zeros into */
+/* the ( m - k + 1 )th row of A, is given in the form */
+
+/* Z( k ) = ( I 0 ), */
+/* ( 0 T( k ) ) */
+
+/* where */
+
+/* T( k ) = I - tau*u( k )*u( k )', u( k ) = ( 1 ), */
+/* ( 0 ) */
+/* ( z( k ) ) */
+
+/* tau is a scalar and z( k ) is an ( n - m ) element vector. */
+/* tau and z( k ) are chosen to annihilate the elements of the kth row */
+/* of X. */
+
+/* The scalar tau is returned in the kth element of TAU and the vector */
+/* u( k ) in the kth row of A, such that the elements of z( k ) are */
+/* in a( k, m + 1 ), ..., a( k, n ). The elements of R are returned in */
+/* the upper triangular part of A. */
+
+/* Z is given by */
+
+/* Z = Z( 1 ) * Z( 2 ) * ... * Z( m ). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+
+ if (*info == 0) {
+ if (*m == 0 || *m == *n) {
+ lwkopt = 1;
+ } else {
+
+/* Determine the block size. */
+
+ nb = ilaenv_(&c__1, "SGERQF", " ", m, n, &c_n1, &c_n1);
+ lwkopt = *m * nb;
+ }
+ work[1] = (real) lwkopt;
+
+ if (*lwork < max(1,*m) && ! lquery) {
+ *info = -7;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STZRZF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0) {
+ return 0;
+ } else if (*m == *n) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ tau[i__] = 0.f;
+/* L10: */
+ }
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 1;
+ iws = *m;
+ if (nb > 1 && nb < *m) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "SGERQF", " ", m, n, &c_n1, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < *m) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SGERQF", " ", m, n, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *m && nx < *m) {
+
+/* Use blocked code initially. */
+/* The last kk rows are handled by the block method. */
+
+/* Computing MIN */
+ i__1 = *m + 1;
+ m1 = min(i__1,*n);
+ ki = (*m - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = *m, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+ i__1 = *m - kk + 1;
+ i__2 = -nb;
+ for (i__ = *m - kk + ki + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1;
+ i__ += i__2) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ ib = min(i__3,nb);
+
+/* Compute the TZ factorization of the current block */
+/* A(i:i+ib-1,i:n) */
+
+ i__3 = *n - i__ + 1;
+ i__4 = *n - *m;
+ slatrz_(&ib, &i__3, &i__4, &a[i__ + i__ * a_dim1], lda, &tau[i__],
+ &work[1]);
+ if (i__ > 1) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__3 = *n - *m;
+ slarzt_("Backward", "Rowwise", &i__3, &ib, &a[i__ + m1 *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(1:i-1,i:n) from the right */
+
+ i__3 = i__ - 1;
+ i__4 = *n - i__ + 1;
+ i__5 = *n - *m;
+ slarzb_("Right", "No transpose", "Backward", "Rowwise", &i__3,
+ &i__4, &ib, &i__5, &a[i__ + m1 * a_dim1], lda, &work[
+ 1], &ldwork, &a[i__ * a_dim1 + 1], lda, &work[ib + 1],
+ &ldwork)
+ ;
+ }
+/* L20: */
+ }
+ mu = i__ + nb - 1;
+ } else {
+ mu = *m;
+ }
+
+/* Use unblocked code to factor the last or only block */
+
+ if (mu > 0) {
+ i__2 = *n - *m;
+ slatrz_(&mu, n, &i__2, &a[a_offset], lda, &tau[1], &work[1]);
+ }
+
+ work[1] = (real) lwkopt;
+
+ return 0;
+
+/* End of STZRZF */
+
+} /* stzrzf_ */
diff --git a/contrib/libs/clapack/zbdsqr.c b/contrib/libs/clapack/zbdsqr.c
new file mode 100644
index 0000000000..7ab5c2da20
--- /dev/null
+++ b/contrib/libs/clapack/zbdsqr.c
@@ -0,0 +1,909 @@
+/* zbdsqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b15 = -.125;
+static integer c__1 = 1;
+static doublereal c_b49 = 1.;
+static doublereal c_b72 = -1.;
+
+/* Subroutine */ int zbdsqr_(char *uplo, integer *n, integer *ncvt, integer *
+ nru, integer *ncc, doublereal *d__, doublereal *e, doublecomplex *vt,
+ integer *ldvt, doublecomplex *u, integer *ldu, doublecomplex *c__,
+ integer *ldc, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1,
+ i__2;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Builtin functions */
+ double pow_dd(doublereal *, doublereal *), sqrt(doublereal), d_sign(
+ doublereal *, doublereal *);
+
+ /* Local variables */
+ doublereal f, g, h__;
+ integer i__, j, m;
+ doublereal r__, cs;
+ integer ll;
+ doublereal sn, mu;
+ integer nm1, nm12, nm13, lll;
+ doublereal eps, sll, tol, abse;
+ integer idir;
+ doublereal abss;
+ integer oldm;
+ doublereal cosl;
+ integer isub, iter;
+ doublereal unfl, sinl, cosr, smin, smax, sinr;
+ extern /* Subroutine */ int dlas2_(doublereal *, doublereal *, doublereal
+ *, doublereal *, doublereal *);
+ extern logical lsame_(char *, char *);
+ doublereal oldcs;
+ integer oldll;
+ doublereal shift, sigmn, oldsn;
+ integer maxit;
+ doublereal sminl, sigmx;
+ logical lower;
+ extern /* Subroutine */ int zlasr_(char *, char *, char *, integer *,
+ integer *, doublereal *, doublereal *, doublecomplex *, integer *), zdrot_(integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *, doublereal *, doublereal *)
+ , zswap_(integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *), dlasq1_(integer *, doublereal *, doublereal *,
+ doublereal *, integer *), dlasv2_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dlartg_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *), xerbla_(char *,
+ integer *), zdscal_(integer *, doublereal *,
+ doublecomplex *, integer *);
+ doublereal sminoa, thresh;
+ logical rotate;
+ doublereal tolmul;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZBDSQR computes the singular values and, optionally, the right and/or */
+/* left singular vectors from the singular value decomposition (SVD) of */
+/* a real N-by-N (upper or lower) bidiagonal matrix B using the implicit */
+/* zero-shift QR algorithm. The SVD of B has the form */
+
+/* B = Q * S * P**H */
+
+/* where S is the diagonal matrix of singular values, Q is an orthogonal */
+/* matrix of left singular vectors, and P is an orthogonal matrix of */
+/* right singular vectors. If left singular vectors are requested, this */
+/* subroutine actually returns U*Q instead of Q, and, if right singular */
+/* vectors are requested, this subroutine returns P**H*VT instead of */
+/* P**H, for given complex input matrices U and VT. When U and VT are */
+/* the unitary matrices that reduce a general matrix A to bidiagonal */
+/* form: A = U*B*VT, as computed by ZGEBRD, then */
+
+/* A = (U*Q) * S * (P**H*VT) */
+
+/* is the SVD of A. Optionally, the subroutine may also compute Q**H*C */
+/* for a given complex input matrix C. */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices With */
+/* Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan, */
+/* LAPACK Working Note #3 (or SIAM J. Sci. Statist. Comput. vol. 11, */
+/* no. 5, pp. 873-912, Sept 1990) and */
+/* "Accurate singular values and differential qd algorithms," by */
+/* B. Parlett and V. Fernando, Technical Report CPAM-554, Mathematics */
+/* Department, University of California at Berkeley, July 1992 */
+/* for a detailed description of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': B is upper bidiagonal; */
+/* = 'L': B is lower bidiagonal. */
+
+/* N (input) INTEGER */
+/* The order of the matrix B. N >= 0. */
+
+/* NCVT (input) INTEGER */
+/* The number of columns of the matrix VT. NCVT >= 0. */
+
+/* NRU (input) INTEGER */
+/* The number of rows of the matrix U. NRU >= 0. */
+
+/* NCC (input) INTEGER */
+/* The number of columns of the matrix C. NCC >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the n diagonal elements of the bidiagonal matrix B. */
+/* On exit, if INFO=0, the singular values of B in decreasing */
+/* order. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (N-1) */
+/* On entry, the N-1 offdiagonal elements of the bidiagonal */
+/* matrix B. */
+/* On exit, if INFO = 0, E is destroyed; if INFO > 0, D and E */
+/* will contain the diagonal and superdiagonal elements of a */
+/* bidiagonal matrix orthogonally equivalent to the one given */
+/* as input. */
+
+/* VT (input/output) COMPLEX*16 array, dimension (LDVT, NCVT) */
+/* On entry, an N-by-NCVT matrix VT. */
+/* On exit, VT is overwritten by P**H * VT. */
+/* Not referenced if NCVT = 0. */
+
+/* LDVT (input) INTEGER */
+/* The leading dimension of the array VT. */
+/* LDVT >= max(1,N) if NCVT > 0; LDVT >= 1 if NCVT = 0. */
+
+/* U (input/output) COMPLEX*16 array, dimension (LDU, N) */
+/* On entry, an NRU-by-N matrix U. */
+/* On exit, U is overwritten by U * Q. */
+/* Not referenced if NRU = 0. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= max(1,NRU). */
+
+/* C (input/output) COMPLEX*16 array, dimension (LDC, NCC) */
+/* On entry, an N-by-NCC matrix C. */
+/* On exit, C is overwritten by Q**H * C. */
+/* Not referenced if NCC = 0. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. */
+/* LDC >= max(1,N) if NCC > 0; LDC >=1 if NCC = 0. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (2*N) */
+/* if NCVT = NRU = NCC = 0, (max(1, 4*N-4)) otherwise */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: If INFO = -i, the i-th argument had an illegal value */
+/* > 0: the algorithm did not converge; D and E contain the */
+/* elements of a bidiagonal matrix which is orthogonally */
+/* similar to the input matrix B; if INFO = i, i */
+/* elements of E have not converged to zero. */
+
+/* Internal Parameters */
+/* =================== */
+
+/* TOLMUL DOUBLE PRECISION, default = max(10,min(100,EPS**(-1/8))) */
+/* TOLMUL controls the convergence criterion of the QR loop. */
+/* If it is positive, TOLMUL*EPS is the desired relative */
+/* precision in the computed singular values. */
+/* If it is negative, abs(TOLMUL*EPS*sigma_max) is the */
+/* desired absolute accuracy in the computed singular */
+/* values (corresponds to relative accuracy */
+/* abs(TOLMUL*EPS) in the largest singular value. */
+/* abs(TOLMUL) should be between 1 and 1/EPS, and preferably */
+/* between 10 (for fast convergence) and .1/EPS */
+/* (for there to be some accuracy in the results). */
+/* Default is to lose at either one eighth or 2 of the */
+/* available decimal digits in each computed singular value */
+/* (whichever is smaller). */
+
+/* MAXITR INTEGER, default = 6 */
+/* MAXITR controls the maximum number of passes of the */
+/* algorithm through its inner loop. The algorithms stops */
+/* (and so fails to converge) if the number of passes */
+/* through the inner loop exceeds MAXITR*N**2. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ lower = lsame_(uplo, "L");
+ if (! lsame_(uplo, "U") && ! lower) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ncvt < 0) {
+ *info = -3;
+ } else if (*nru < 0) {
+ *info = -4;
+ } else if (*ncc < 0) {
+ *info = -5;
+ } else if (*ncvt == 0 && *ldvt < 1 || *ncvt > 0 && *ldvt < max(1,*n)) {
+ *info = -9;
+ } else if (*ldu < max(1,*nru)) {
+ *info = -11;
+ } else if (*ncc == 0 && *ldc < 1 || *ncc > 0 && *ldc < max(1,*n)) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZBDSQR", &i__1);
+ return 0;
+ }
+ if (*n == 0) {
+ return 0;
+ }
+ if (*n == 1) {
+ goto L160;
+ }
+
+/* ROTATE is true if any singular vectors desired, false otherwise */
+
+ rotate = *ncvt > 0 || *nru > 0 || *ncc > 0;
+
+/* If no singular vectors desired, use qd algorithm */
+
+ if (! rotate) {
+ dlasq1_(n, &d__[1], &e[1], &rwork[1], info);
+ return 0;
+ }
+
+ nm1 = *n - 1;
+ nm12 = nm1 + nm1;
+ nm13 = nm12 + nm1;
+ idir = 0;
+
+/* Get machine constants */
+
+ eps = dlamch_("Epsilon");
+ unfl = dlamch_("Safe minimum");
+
+/* If matrix lower bidiagonal, rotate to be upper bidiagonal */
+/* by applying Givens rotations on the left */
+
+ if (lower) {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ rwork[i__] = cs;
+ rwork[nm1 + i__] = sn;
+/* L10: */
+ }
+
+/* Update singular vectors if desired */
+
+ if (*nru > 0) {
+ zlasr_("R", "V", "F", nru, n, &rwork[1], &rwork[*n], &u[u_offset],
+ ldu);
+ }
+ if (*ncc > 0) {
+ zlasr_("L", "V", "F", n, ncc, &rwork[1], &rwork[*n], &c__[
+ c_offset], ldc);
+ }
+ }
+
+/* Compute singular values to relative accuracy TOL */
+/* (By setting TOL to be negative, algorithm will compute */
+/* singular values to absolute accuracy ABS(TOL)*norm(input matrix)) */
+
+/* Computing MAX */
+/* Computing MIN */
+ d__3 = 100., d__4 = pow_dd(&eps, &c_b15);
+ d__1 = 10., d__2 = min(d__3,d__4);
+ tolmul = max(d__1,d__2);
+ tol = tolmul * eps;
+
+/* Compute approximate maximum, minimum singular values */
+
+ smax = 0.;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__2 = smax, d__3 = (d__1 = d__[i__], abs(d__1));
+ smax = max(d__2,d__3);
+/* L20: */
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__2 = smax, d__3 = (d__1 = e[i__], abs(d__1));
+ smax = max(d__2,d__3);
+/* L30: */
+ }
+ sminl = 0.;
+ if (tol >= 0.) {
+
+/* Relative accuracy desired */
+
+ sminoa = abs(d__[1]);
+ if (sminoa == 0.) {
+ goto L50;
+ }
+ mu = sminoa;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ mu = (d__2 = d__[i__], abs(d__2)) * (mu / (mu + (d__1 = e[i__ - 1]
+ , abs(d__1))));
+ sminoa = min(sminoa,mu);
+ if (sminoa == 0.) {
+ goto L50;
+ }
+/* L40: */
+ }
+L50:
+ sminoa /= sqrt((doublereal) (*n));
+/* Computing MAX */
+ d__1 = tol * sminoa, d__2 = *n * 6 * *n * unfl;
+ thresh = max(d__1,d__2);
+ } else {
+
+/* Absolute accuracy desired */
+
+/* Computing MAX */
+ d__1 = abs(tol) * smax, d__2 = *n * 6 * *n * unfl;
+ thresh = max(d__1,d__2);
+ }
+
+/* Prepare for main iteration loop for the singular values */
+/* (MAXIT is the maximum number of passes through the inner */
+/* loop permitted before nonconvergence signalled.) */
+
+ maxit = *n * 6 * *n;
+ iter = 0;
+ oldll = -1;
+ oldm = -1;
+
+/* M points to last element of unconverged part of matrix */
+
+ m = *n;
+
+/* Begin main iteration loop */
+
+L60:
+
+/* Check for convergence or exceeding iteration count */
+
+ if (m <= 1) {
+ goto L160;
+ }
+ if (iter > maxit) {
+ goto L200;
+ }
+
+/* Find diagonal block of matrix to work on */
+
+ if (tol < 0. && (d__1 = d__[m], abs(d__1)) <= thresh) {
+ d__[m] = 0.;
+ }
+ smax = (d__1 = d__[m], abs(d__1));
+ smin = smax;
+ i__1 = m - 1;
+ for (lll = 1; lll <= i__1; ++lll) {
+ ll = m - lll;
+ abss = (d__1 = d__[ll], abs(d__1));
+ abse = (d__1 = e[ll], abs(d__1));
+ if (tol < 0. && abss <= thresh) {
+ d__[ll] = 0.;
+ }
+ if (abse <= thresh) {
+ goto L80;
+ }
+ smin = min(smin,abss);
+/* Computing MAX */
+ d__1 = max(smax,abss);
+ smax = max(d__1,abse);
+/* L70: */
+ }
+ ll = 0;
+ goto L90;
+L80:
+ e[ll] = 0.;
+
+/* Matrix splits since E(LL) = 0 */
+
+ if (ll == m - 1) {
+
+/* Convergence of bottom singular value, return to top of loop */
+
+ --m;
+ goto L60;
+ }
+L90:
+ ++ll;
+
+/* E(LL) through E(M-1) are nonzero, E(LL-1) is zero */
+
+ if (ll == m - 1) {
+
+/* 2 by 2 block, handle separately */
+
+ dlasv2_(&d__[m - 1], &e[m - 1], &d__[m], &sigmn, &sigmx, &sinr, &cosr,
+ &sinl, &cosl);
+ d__[m - 1] = sigmx;
+ e[m - 1] = 0.;
+ d__[m] = sigmn;
+
+/* Compute singular vectors, if desired */
+
+ if (*ncvt > 0) {
+ zdrot_(ncvt, &vt[m - 1 + vt_dim1], ldvt, &vt[m + vt_dim1], ldvt, &
+ cosr, &sinr);
+ }
+ if (*nru > 0) {
+ zdrot_(nru, &u[(m - 1) * u_dim1 + 1], &c__1, &u[m * u_dim1 + 1], &
+ c__1, &cosl, &sinl);
+ }
+ if (*ncc > 0) {
+ zdrot_(ncc, &c__[m - 1 + c_dim1], ldc, &c__[m + c_dim1], ldc, &
+ cosl, &sinl);
+ }
+ m += -2;
+ goto L60;
+ }
+
+/* If working on new submatrix, choose shift direction */
+/* (from larger end diagonal element towards smaller) */
+
+ if (ll > oldm || m < oldll) {
+ if ((d__1 = d__[ll], abs(d__1)) >= (d__2 = d__[m], abs(d__2))) {
+
+/* Chase bulge from top (big end) to bottom (small end) */
+
+ idir = 1;
+ } else {
+
+/* Chase bulge from bottom (big end) to top (small end) */
+
+ idir = 2;
+ }
+ }
+
+/* Apply convergence tests */
+
+ if (idir == 1) {
+
+/* Run convergence test in forward direction */
+/* First apply standard test to bottom of matrix */
+
+ if ((d__2 = e[m - 1], abs(d__2)) <= abs(tol) * (d__1 = d__[m], abs(
+ d__1)) || tol < 0. && (d__3 = e[m - 1], abs(d__3)) <= thresh)
+ {
+ e[m - 1] = 0.;
+ goto L60;
+ }
+
+ if (tol >= 0.) {
+
+/* If relative accuracy desired, */
+/* apply convergence criterion forward */
+
+ mu = (d__1 = d__[ll], abs(d__1));
+ sminl = mu;
+ i__1 = m - 1;
+ for (lll = ll; lll <= i__1; ++lll) {
+ if ((d__1 = e[lll], abs(d__1)) <= tol * mu) {
+ e[lll] = 0.;
+ goto L60;
+ }
+ mu = (d__2 = d__[lll + 1], abs(d__2)) * (mu / (mu + (d__1 = e[
+ lll], abs(d__1))));
+ sminl = min(sminl,mu);
+/* L100: */
+ }
+ }
+
+ } else {
+
+/* Run convergence test in backward direction */
+/* First apply standard test to top of matrix */
+
+ if ((d__2 = e[ll], abs(d__2)) <= abs(tol) * (d__1 = d__[ll], abs(d__1)
+ ) || tol < 0. && (d__3 = e[ll], abs(d__3)) <= thresh) {
+ e[ll] = 0.;
+ goto L60;
+ }
+
+ if (tol >= 0.) {
+
+/* If relative accuracy desired, */
+/* apply convergence criterion backward */
+
+ mu = (d__1 = d__[m], abs(d__1));
+ sminl = mu;
+ i__1 = ll;
+ for (lll = m - 1; lll >= i__1; --lll) {
+ if ((d__1 = e[lll], abs(d__1)) <= tol * mu) {
+ e[lll] = 0.;
+ goto L60;
+ }
+ mu = (d__2 = d__[lll], abs(d__2)) * (mu / (mu + (d__1 = e[lll]
+ , abs(d__1))));
+ sminl = min(sminl,mu);
+/* L110: */
+ }
+ }
+ }
+ oldll = ll;
+ oldm = m;
+
+/* Compute shift. First, test if shifting would ruin relative */
+/* accuracy, and if so set the shift to zero. */
+
+/* Computing MAX */
+ d__1 = eps, d__2 = tol * .01;
+ if (tol >= 0. && *n * tol * (sminl / smax) <= max(d__1,d__2)) {
+
+/* Use a zero shift to avoid loss of relative accuracy */
+
+ shift = 0.;
+ } else {
+
+/* Compute the shift from 2-by-2 block at end of matrix */
+
+ if (idir == 1) {
+ sll = (d__1 = d__[ll], abs(d__1));
+ dlas2_(&d__[m - 1], &e[m - 1], &d__[m], &shift, &r__);
+ } else {
+ sll = (d__1 = d__[m], abs(d__1));
+ dlas2_(&d__[ll], &e[ll], &d__[ll + 1], &shift, &r__);
+ }
+
+/* Test if shift negligible, and if so set to zero */
+
+ if (sll > 0.) {
+/* Computing 2nd power */
+ d__1 = shift / sll;
+ if (d__1 * d__1 < eps) {
+ shift = 0.;
+ }
+ }
+ }
+
+/* Increment iteration count */
+
+ iter = iter + m - ll;
+
+/* If SHIFT = 0, do simplified QR iteration */
+
+ if (shift == 0.) {
+ if (idir == 1) {
+
+/* Chase bulge from top to bottom */
+/* Save cosines and sines for later singular vector updates */
+
+ cs = 1.;
+ oldcs = 1.;
+ i__1 = m - 1;
+ for (i__ = ll; i__ <= i__1; ++i__) {
+ d__1 = d__[i__] * cs;
+ dlartg_(&d__1, &e[i__], &cs, &sn, &r__);
+ if (i__ > ll) {
+ e[i__ - 1] = oldsn * r__;
+ }
+ d__1 = oldcs * r__;
+ d__2 = d__[i__ + 1] * sn;
+ dlartg_(&d__1, &d__2, &oldcs, &oldsn, &d__[i__]);
+ rwork[i__ - ll + 1] = cs;
+ rwork[i__ - ll + 1 + nm1] = sn;
+ rwork[i__ - ll + 1 + nm12] = oldcs;
+ rwork[i__ - ll + 1 + nm13] = oldsn;
+/* L120: */
+ }
+ h__ = d__[m] * cs;
+ d__[m] = h__ * oldcs;
+ e[m - 1] = h__ * oldsn;
+
+/* Update singular vectors */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ zlasr_("L", "V", "F", &i__1, ncvt, &rwork[1], &rwork[*n], &vt[
+ ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ zlasr_("R", "V", "F", nru, &i__1, &rwork[nm12 + 1], &rwork[
+ nm13 + 1], &u[ll * u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ zlasr_("L", "V", "F", &i__1, ncc, &rwork[nm12 + 1], &rwork[
+ nm13 + 1], &c__[ll + c_dim1], ldc);
+ }
+
+/* Test convergence */
+
+ if ((d__1 = e[m - 1], abs(d__1)) <= thresh) {
+ e[m - 1] = 0.;
+ }
+
+ } else {
+
+/* Chase bulge from bottom to top */
+/* Save cosines and sines for later singular vector updates */
+
+ cs = 1.;
+ oldcs = 1.;
+ i__1 = ll + 1;
+ for (i__ = m; i__ >= i__1; --i__) {
+ d__1 = d__[i__] * cs;
+ dlartg_(&d__1, &e[i__ - 1], &cs, &sn, &r__);
+ if (i__ < m) {
+ e[i__] = oldsn * r__;
+ }
+ d__1 = oldcs * r__;
+ d__2 = d__[i__ - 1] * sn;
+ dlartg_(&d__1, &d__2, &oldcs, &oldsn, &d__[i__]);
+ rwork[i__ - ll] = cs;
+ rwork[i__ - ll + nm1] = -sn;
+ rwork[i__ - ll + nm12] = oldcs;
+ rwork[i__ - ll + nm13] = -oldsn;
+/* L130: */
+ }
+ h__ = d__[ll] * cs;
+ d__[ll] = h__ * oldcs;
+ e[ll] = h__ * oldsn;
+
+/* Update singular vectors */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ zlasr_("L", "V", "B", &i__1, ncvt, &rwork[nm12 + 1], &rwork[
+ nm13 + 1], &vt[ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ zlasr_("R", "V", "B", nru, &i__1, &rwork[1], &rwork[*n], &u[
+ ll * u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ zlasr_("L", "V", "B", &i__1, ncc, &rwork[1], &rwork[*n], &c__[
+ ll + c_dim1], ldc);
+ }
+
+/* Test convergence */
+
+ if ((d__1 = e[ll], abs(d__1)) <= thresh) {
+ e[ll] = 0.;
+ }
+ }
+ } else {
+
+/* Use nonzero shift */
+
+ if (idir == 1) {
+
+/* Chase bulge from top to bottom */
+/* Save cosines and sines for later singular vector updates */
+
+ f = ((d__1 = d__[ll], abs(d__1)) - shift) * (d_sign(&c_b49, &d__[
+ ll]) + shift / d__[ll]);
+ g = e[ll];
+ i__1 = m - 1;
+ for (i__ = ll; i__ <= i__1; ++i__) {
+ dlartg_(&f, &g, &cosr, &sinr, &r__);
+ if (i__ > ll) {
+ e[i__ - 1] = r__;
+ }
+ f = cosr * d__[i__] + sinr * e[i__];
+ e[i__] = cosr * e[i__] - sinr * d__[i__];
+ g = sinr * d__[i__ + 1];
+ d__[i__ + 1] = cosr * d__[i__ + 1];
+ dlartg_(&f, &g, &cosl, &sinl, &r__);
+ d__[i__] = r__;
+ f = cosl * e[i__] + sinl * d__[i__ + 1];
+ d__[i__ + 1] = cosl * d__[i__ + 1] - sinl * e[i__];
+ if (i__ < m - 1) {
+ g = sinl * e[i__ + 1];
+ e[i__ + 1] = cosl * e[i__ + 1];
+ }
+ rwork[i__ - ll + 1] = cosr;
+ rwork[i__ - ll + 1 + nm1] = sinr;
+ rwork[i__ - ll + 1 + nm12] = cosl;
+ rwork[i__ - ll + 1 + nm13] = sinl;
+/* L140: */
+ }
+ e[m - 1] = f;
+
+/* Update singular vectors */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ zlasr_("L", "V", "F", &i__1, ncvt, &rwork[1], &rwork[*n], &vt[
+ ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ zlasr_("R", "V", "F", nru, &i__1, &rwork[nm12 + 1], &rwork[
+ nm13 + 1], &u[ll * u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ zlasr_("L", "V", "F", &i__1, ncc, &rwork[nm12 + 1], &rwork[
+ nm13 + 1], &c__[ll + c_dim1], ldc);
+ }
+
+/* Test convergence */
+
+ if ((d__1 = e[m - 1], abs(d__1)) <= thresh) {
+ e[m - 1] = 0.;
+ }
+
+ } else {
+
+/* Chase bulge from bottom to top */
+/* Save cosines and sines for later singular vector updates */
+
+ f = ((d__1 = d__[m], abs(d__1)) - shift) * (d_sign(&c_b49, &d__[m]
+ ) + shift / d__[m]);
+ g = e[m - 1];
+ i__1 = ll + 1;
+ for (i__ = m; i__ >= i__1; --i__) {
+ dlartg_(&f, &g, &cosr, &sinr, &r__);
+ if (i__ < m) {
+ e[i__] = r__;
+ }
+ f = cosr * d__[i__] + sinr * e[i__ - 1];
+ e[i__ - 1] = cosr * e[i__ - 1] - sinr * d__[i__];
+ g = sinr * d__[i__ - 1];
+ d__[i__ - 1] = cosr * d__[i__ - 1];
+ dlartg_(&f, &g, &cosl, &sinl, &r__);
+ d__[i__] = r__;
+ f = cosl * e[i__ - 1] + sinl * d__[i__ - 1];
+ d__[i__ - 1] = cosl * d__[i__ - 1] - sinl * e[i__ - 1];
+ if (i__ > ll + 1) {
+ g = sinl * e[i__ - 2];
+ e[i__ - 2] = cosl * e[i__ - 2];
+ }
+ rwork[i__ - ll] = cosr;
+ rwork[i__ - ll + nm1] = -sinr;
+ rwork[i__ - ll + nm12] = cosl;
+ rwork[i__ - ll + nm13] = -sinl;
+/* L150: */
+ }
+ e[ll] = f;
+
+/* Test convergence */
+
+ if ((d__1 = e[ll], abs(d__1)) <= thresh) {
+ e[ll] = 0.;
+ }
+
+/* Update singular vectors if desired */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ zlasr_("L", "V", "B", &i__1, ncvt, &rwork[nm12 + 1], &rwork[
+ nm13 + 1], &vt[ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ zlasr_("R", "V", "B", nru, &i__1, &rwork[1], &rwork[*n], &u[
+ ll * u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ zlasr_("L", "V", "B", &i__1, ncc, &rwork[1], &rwork[*n], &c__[
+ ll + c_dim1], ldc);
+ }
+ }
+ }
+
+/* QR iteration finished, go back and check convergence */
+
+ goto L60;
+
+/* All singular values converged, so make them positive */
+
+L160:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] < 0.) {
+ d__[i__] = -d__[i__];
+
+/* Change sign of singular vectors, if desired */
+
+ if (*ncvt > 0) {
+ zdscal_(ncvt, &c_b72, &vt[i__ + vt_dim1], ldvt);
+ }
+ }
+/* L170: */
+ }
+
+/* Sort the singular values into decreasing order (insertion sort on */
+/* singular values, but only one transposition per singular vector) */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Scan for smallest D(I) */
+
+ isub = 1;
+ smin = d__[1];
+ i__2 = *n + 1 - i__;
+ for (j = 2; j <= i__2; ++j) {
+ if (d__[j] <= smin) {
+ isub = j;
+ smin = d__[j];
+ }
+/* L180: */
+ }
+ if (isub != *n + 1 - i__) {
+
+/* Swap singular values and vectors */
+
+ d__[isub] = d__[*n + 1 - i__];
+ d__[*n + 1 - i__] = smin;
+ if (*ncvt > 0) {
+ zswap_(ncvt, &vt[isub + vt_dim1], ldvt, &vt[*n + 1 - i__ +
+ vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ zswap_(nru, &u[isub * u_dim1 + 1], &c__1, &u[(*n + 1 - i__) *
+ u_dim1 + 1], &c__1);
+ }
+ if (*ncc > 0) {
+ zswap_(ncc, &c__[isub + c_dim1], ldc, &c__[*n + 1 - i__ +
+ c_dim1], ldc);
+ }
+ }
+/* L190: */
+ }
+ goto L220;
+
+/* Maximum number of iterations exceeded, failure to converge */
+
+L200:
+ *info = 0;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (e[i__] != 0.) {
+ ++(*info);
+ }
+/* L210: */
+ }
+L220:
+ return 0;
+
+/* End of ZBDSQR */
+
+} /* zbdsqr_ */
diff --git a/contrib/libs/clapack/zcgesv.c b/contrib/libs/clapack/zcgesv.c
new file mode 100644
index 0000000000..796322cd6b
--- /dev/null
+++ b/contrib/libs/clapack/zcgesv.c
@@ -0,0 +1,432 @@
+/* zcgesv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {-1.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zcgesv_(integer *n, integer *nrhs, doublecomplex *a,
+ integer *lda, integer *ipiv, doublecomplex *b, integer *ldb,
+ doublecomplex *x, integer *ldx, doublecomplex *work, complex *swork,
+ doublereal *rwork, integer *iter, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, work_dim1, work_offset,
+ x_dim1, x_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__;
+ doublereal cte, eps, anrm;
+ integer ptsa;
+ doublereal rnrm, xnrm;
+ integer ptsx, iiter;
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), zaxpy_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), clag2z_(
+ integer *, integer *, complex *, integer *, doublecomplex *,
+ integer *, integer *), zlag2c_(integer *, integer *,
+ doublecomplex *, integer *, complex *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int cgetrf_(integer *, integer *, complex *,
+ integer *, integer *, integer *), xerbla_(char *, integer *);
+ extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ extern /* Subroutine */ int cgetrs_(char *, integer *, integer *, complex
+ *, integer *, integer *, complex *, integer *, integer *);
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *),
+ zgetrf_(integer *, integer *, doublecomplex *, integer *, integer
+ *, integer *), zgetrs_(char *, integer *, integer *,
+ doublecomplex *, integer *, integer *, doublecomplex *, integer *,
+ integer *);
+
+
+/* -- LAPACK PROTOTYPE driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* January 2007 */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZCGESV computes the solution to a complex system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N matrix and X and B are N-by-NRHS matrices. */
+
+/* ZCGESV first attempts to factorize the matrix in COMPLEX and use this */
+/* factorization within an iterative refinement procedure to produce a */
+/* solution with COMPLEX*16 normwise backward error quality (see below). */
+/* If the approach fails the method switches to a COMPLEX*16 */
+/* factorization and solve. */
+
+/* The iterative refinement is not going to be a winning strategy if */
+/* the ratio COMPLEX performance over COMPLEX*16 performance is too */
+/* small. A reasonable strategy should take the number of right-hand */
+/* sides and the size of the matrix into account. This might be done */
+/* with a call to ILAENV in the future. Up to now, we always try */
+/* iterative refinement. */
+
+/* The iterative refinement process is stopped if */
+/* ITER > ITERMAX */
+/* or for all the RHS we have: */
+/* RNRM < SQRT(N)*XNRM*ANRM*EPS*BWDMAX */
+/* where */
+/* o ITER is the number of the current iteration in the iterative */
+/* refinement process */
+/* o RNRM is the infinity-norm of the residual */
+/* o XNRM is the infinity-norm of the solution */
+/* o ANRM is the infinity-operator-norm of the matrix A */
+/* o EPS is the machine epsilon returned by DLAMCH('Epsilon') */
+/* The value ITERMAX and BWDMAX are fixed to 30 and 1.0D+00 */
+/* respectively. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input or input/ouptut) COMPLEX*16 array, */
+/* dimension (LDA,N) */
+/* On entry, the N-by-N coefficient matrix A. */
+/* On exit, if iterative refinement has been successfully used */
+/* (INFO.EQ.0 and ITER.GE.0, see description below), then A is */
+/* unchanged, if double precision factorization has been used */
+/* (INFO.EQ.0 and ITER.LT.0, see description below), then the */
+/* array A contains the factors L and U from the factorization */
+/* A = P*L*U; the unit diagonal elements of L are not stored. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* The pivot indices that define the permutation matrix P; */
+/* row i of the matrix was interchanged with row IPIV(i). */
+/* Corresponds either to the single precision factorization */
+/* (if INFO.EQ.0 and ITER.GE.0) or the double precision */
+/* factorization (if INFO.EQ.0 and ITER.LT.0). */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* The N-by-NRHS right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* If INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (N*NRHS) */
+/* This array is used to hold the residual vectors. */
+
+/* SWORK (workspace) COMPLEX array, dimension (N*(N+NRHS)) */
+/* This array is used to use the single precision matrix and the */
+/* right-hand sides or solutions in single precision. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* ITER (output) INTEGER */
+/* < 0: iterative refinement has failed, COMPLEX*16 */
+/* factorization has been performed */
+/* -1 : the routine fell back to full precision for */
+/* implementation- or machine-specific reasons */
+/* -2 : narrowing the precision induced an overflow, */
+/* the routine fell back to full precision */
+/* -3 : failure of CGETRF */
+/* -31: stop the iterative refinement after the 30th */
+/* iterations */
+/* > 0: iterative refinement has been sucessfully used. */
+/* Returns the number of iterations */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, U(i,i) computed in COMPLEX*16 is exactly */
+/* zero. The factorization has been completed, but the */
+/* factor U is exactly singular, so the solution */
+/* could not be computed. */
+
+/* ========= */
+
+/* .. Parameters .. */
+
+
+
+
+/* .. Local Scalars .. */
+
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ work_dim1 = *n;
+ work_offset = 1 + work_dim1;
+ work -= work_offset;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --swork;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ *iter = 0;
+
+/* Test the input parameters. */
+
+ if (*n < 0) {
+ *info = -1;
+ } else if (*nrhs < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ } else if (*ldx < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZCGESV", &i__1);
+ return 0;
+ }
+
+/* Quick return if (N.EQ.0). */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Skip single precision iterative refinement if a priori slower */
+/* than double precision factorization. */
+
+ if (FALSE_) {
+ *iter = -1;
+ goto L40;
+ }
+
+/* Compute some constants. */
+
+ anrm = zlange_("I", n, n, &a[a_offset], lda, &rwork[1]);
+ eps = dlamch_("Epsilon");
+ cte = anrm * eps * sqrt((doublereal) (*n)) * 1.;
+
+/* Set the indices PTSA, PTSX for referencing SA and SX in SWORK. */
+
+ ptsa = 1;
+ ptsx = ptsa + *n * *n;
+
+/* Convert B from double precision to single precision and store the */
+/* result in SX. */
+
+ zlag2c_(n, nrhs, &b[b_offset], ldb, &swork[ptsx], n, info);
+
+ if (*info != 0) {
+ *iter = -2;
+ goto L40;
+ }
+
+/* Convert A from double precision to single precision and store the */
+/* result in SA. */
+
+ zlag2c_(n, n, &a[a_offset], lda, &swork[ptsa], n, info);
+
+ if (*info != 0) {
+ *iter = -2;
+ goto L40;
+ }
+
+/* Compute the LU factorization of SA. */
+
+ cgetrf_(n, n, &swork[ptsa], n, &ipiv[1], info);
+
+ if (*info != 0) {
+ *iter = -3;
+ goto L40;
+ }
+
+/* Solve the system SA*SX = SB. */
+
+ cgetrs_("No transpose", n, nrhs, &swork[ptsa], n, &ipiv[1], &swork[ptsx],
+ n, info);
+
+/* Convert SX back to double precision */
+
+ clag2z_(n, nrhs, &swork[ptsx], n, &x[x_offset], ldx, info);
+
+/* Compute R = B - AX (R is WORK). */
+
+ zlacpy_("All", n, nrhs, &b[b_offset], ldb, &work[work_offset], n);
+
+ zgemm_("No Transpose", "No Transpose", n, nrhs, n, &c_b1, &a[a_offset],
+ lda, &x[x_offset], ldx, &c_b2, &work[work_offset], n);
+
+/* Check whether the NRHS normwise backward errors satisfy the */
+/* stopping criterion. If yes, set ITER=0 and return. */
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = izamax_(n, &x[i__ * x_dim1 + 1], &c__1) + i__ * x_dim1;
+ xnrm = (d__1 = x[i__2].r, abs(d__1)) + (d__2 = d_imag(&x[izamax_(n, &
+ x[i__ * x_dim1 + 1], &c__1) + i__ * x_dim1]), abs(d__2));
+ i__2 = izamax_(n, &work[i__ * work_dim1 + 1], &c__1) + i__ *
+ work_dim1;
+ rnrm = (d__1 = work[i__2].r, abs(d__1)) + (d__2 = d_imag(&work[
+ izamax_(n, &work[i__ * work_dim1 + 1], &c__1) + i__ *
+ work_dim1]), abs(d__2));
+ if (rnrm > xnrm * cte) {
+ goto L10;
+ }
+ }
+
+/* If we are here, the NRHS normwise backward errors satisfy the */
+/* stopping criterion. We are good to exit. */
+
+ *iter = 0;
+ return 0;
+
+L10:
+
+ for (iiter = 1; iiter <= 30; ++iiter) {
+
+/* Convert R (in WORK) from double precision to single precision */
+/* and store the result in SX. */
+
+ zlag2c_(n, nrhs, &work[work_offset], n, &swork[ptsx], n, info);
+
+ if (*info != 0) {
+ *iter = -2;
+ goto L40;
+ }
+
+/* Solve the system SA*SX = SR. */
+
+ cgetrs_("No transpose", n, nrhs, &swork[ptsa], n, &ipiv[1], &swork[
+ ptsx], n, info);
+
+/* Convert SX back to double precision and update the current */
+/* iterate. */
+
+ clag2z_(n, nrhs, &swork[ptsx], n, &work[work_offset], n, info);
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ zaxpy_(n, &c_b2, &work[i__ * work_dim1 + 1], &c__1, &x[i__ *
+ x_dim1 + 1], &c__1);
+ }
+
+/* Compute R = B - AX (R is WORK). */
+
+ zlacpy_("All", n, nrhs, &b[b_offset], ldb, &work[work_offset], n);
+
+ zgemm_("No Transpose", "No Transpose", n, nrhs, n, &c_b1, &a[a_offset]
+, lda, &x[x_offset], ldx, &c_b2, &work[work_offset], n);
+
+/* Check whether the NRHS normwise backward errors satisfy the */
+/* stopping criterion. If yes, set ITER=IITER>0 and return. */
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = izamax_(n, &x[i__ * x_dim1 + 1], &c__1) + i__ * x_dim1;
+ xnrm = (d__1 = x[i__2].r, abs(d__1)) + (d__2 = d_imag(&x[izamax_(
+ n, &x[i__ * x_dim1 + 1], &c__1) + i__ * x_dim1]), abs(
+ d__2));
+ i__2 = izamax_(n, &work[i__ * work_dim1 + 1], &c__1) + i__ *
+ work_dim1;
+ rnrm = (d__1 = work[i__2].r, abs(d__1)) + (d__2 = d_imag(&work[
+ izamax_(n, &work[i__ * work_dim1 + 1], &c__1) + i__ *
+ work_dim1]), abs(d__2));
+ if (rnrm > xnrm * cte) {
+ goto L20;
+ }
+ }
+
+/* If we are here, the NRHS normwise backward errors satisfy the */
+/* stopping criterion, we are good to exit. */
+
+ *iter = iiter;
+
+ return 0;
+
+L20:
+
+/* L30: */
+ ;
+ }
+
+/* If we are at this place of the code, this is because we have */
+/* performed ITER=ITERMAX iterations and never satisified the stopping */
+/* criterion, set up the ITER flag accordingly and follow up on double */
+/* precision routine. */
+
+ *iter = -31;
+
+L40:
+
+/* Single-precision iterative refinement failed to converge to a */
+/* satisfactory solution, so we resort to double precision. */
+
+ zgetrf_(n, n, &a[a_offset], lda, &ipiv[1], info);
+
+ if (*info != 0) {
+ return 0;
+ }
+
+ zlacpy_("All", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ zgetrs_("No transpose", n, nrhs, &a[a_offset], lda, &ipiv[1], &x[x_offset]
+, ldx, info);
+
+ return 0;
+
+/* End of ZCGESV. */
+
+} /* zcgesv_ */
diff --git a/contrib/libs/clapack/zcposv.c b/contrib/libs/clapack/zcposv.c
new file mode 100644
index 0000000000..5a2d9c93b2
--- /dev/null
+++ b/contrib/libs/clapack/zcposv.c
@@ -0,0 +1,440 @@
+/* zcposv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {-1.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zcposv_(char *uplo, integer *n, integer *nrhs,
+ doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb,
+ doublecomplex *x, integer *ldx, doublecomplex *work, complex *swork,
+ doublereal *rwork, integer *iter, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, work_dim1, work_offset,
+ x_dim1, x_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__;
+ doublereal cte, eps, anrm;
+ integer ptsa;
+ doublereal rnrm, xnrm;
+ integer ptsx;
+ extern logical lsame_(char *, char *);
+ integer iiter;
+ extern /* Subroutine */ int zhemm_(char *, char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *), zaxpy_(integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *, integer *), zlag2c_(integer *,
+ integer *, doublecomplex *, integer *, complex *, integer *,
+ integer *), clag2z_(integer *, integer *, complex *, integer *,
+ doublecomplex *, integer *, integer *), zlat2c_(char *, integer *,
+ doublecomplex *, integer *, complex *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern doublereal zlanhe_(char *, char *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ extern /* Subroutine */ int cpotrf_(char *, integer *, complex *, integer
+ *, integer *), zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *),
+ cpotrs_(char *, integer *, integer *, complex *, integer *,
+ complex *, integer *, integer *), zpotrf_(char *, integer
+ *, doublecomplex *, integer *, integer *), zpotrs_(char *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *
+, integer *, integer *);
+
+
+/* -- LAPACK PROTOTYPE driver routine (version 3.2.1) -- */
+
+/* -- April 2009 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZCPOSV computes the solution to a complex system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N Hermitian positive definite matrix and X and B */
+/* are N-by-NRHS matrices. */
+
+/* ZCPOSV first attempts to factorize the matrix in COMPLEX and use this */
+/* factorization within an iterative refinement procedure to produce a */
+/* solution with COMPLEX*16 normwise backward error quality (see below). */
+/* If the approach fails the method switches to a COMPLEX*16 */
+/* factorization and solve. */
+
+/* The iterative refinement is not going to be a winning strategy if */
+/* the ratio COMPLEX performance over COMPLEX*16 performance is too */
+/* small. A reasonable strategy should take the number of right-hand */
+/* sides and the size of the matrix into account. This might be done */
+/* with a call to ILAENV in the future. Up to now, we always try */
+/* iterative refinement. */
+
+/* The iterative refinement process is stopped if */
+/* ITER > ITERMAX */
+/* or for all the RHS we have: */
+/* RNRM < SQRT(N)*XNRM*ANRM*EPS*BWDMAX */
+/* where */
+/* o ITER is the number of the current iteration in the iterative */
+/* refinement process */
+/* o RNRM is the infinity-norm of the residual */
+/* o XNRM is the infinity-norm of the solution */
+/* o ANRM is the infinity-operator-norm of the matrix A */
+/* o EPS is the machine epsilon returned by DLAMCH('Epsilon') */
+/* The value ITERMAX and BWDMAX are fixed to 30 and 1.0D+00 */
+/* respectively. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input or input/ouptut) COMPLEX*16 array, */
+/* dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* Note that the imaginary parts of the diagonal */
+/* elements need not be set and are assumed to be zero. */
+
+/* On exit, if iterative refinement has been successfully used */
+/* (INFO.EQ.0 and ITER.GE.0, see description below), then A is */
+/* unchanged, if double precision factorization has been used */
+/* (INFO.EQ.0 and ITER.LT.0, see description below), then the */
+/* array A contains the factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* The N-by-NRHS right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* If INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (N*NRHS) */
+/* This array is used to hold the residual vectors. */
+
+/* SWORK (workspace) COMPLEX array, dimension (N*(N+NRHS)) */
+/* This array is used to use the single precision matrix and the */
+/* right-hand sides or solutions in single precision. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* ITER (output) INTEGER */
+/* < 0: iterative refinement has failed, COMPLEX*16 */
+/* factorization has been performed */
+/* -1 : the routine fell back to full precision for */
+/* implementation- or machine-specific reasons */
+/* -2 : narrowing the precision induced an overflow, */
+/* the routine fell back to full precision */
+/* -3 : failure of CPOTRF */
+/* -31: stop the iterative refinement after the 30th */
+/* iterations */
+/* > 0: iterative refinement has been sucessfully used. */
+/* Returns the number of iterations */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i of */
+/* (COMPLEX*16) A is not positive definite, so the */
+/* factorization could not be completed, and the solution */
+/* has not been computed. */
+
+/* ========= */
+
+/* .. Parameters .. */
+
+
+
+
+/* .. Local Scalars .. */
+
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ work_dim1 = *n;
+ work_offset = 1 + work_dim1;
+ work -= work_offset;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --swork;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ *iter = 0;
+
+/* Test the input parameters. */
+
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ } else if (*ldx < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZCPOSV", &i__1);
+ return 0;
+ }
+
+/* Quick return if (N.EQ.0). */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Skip single precision iterative refinement if a priori slower */
+/* than double precision factorization. */
+
+ if (FALSE_) {
+ *iter = -1;
+ goto L40;
+ }
+
+/* Compute some constants. */
+
+ anrm = zlanhe_("I", uplo, n, &a[a_offset], lda, &rwork[1]);
+ eps = dlamch_("Epsilon");
+ cte = anrm * eps * sqrt((doublereal) (*n)) * 1.;
+
+/* Set the indices PTSA, PTSX for referencing SA and SX in SWORK. */
+
+ ptsa = 1;
+ ptsx = ptsa + *n * *n;
+
+/* Convert B from double precision to single precision and store the */
+/* result in SX. */
+
+ zlag2c_(n, nrhs, &b[b_offset], ldb, &swork[ptsx], n, info);
+
+ if (*info != 0) {
+ *iter = -2;
+ goto L40;
+ }
+
+/* Convert A from double precision to single precision and store the */
+/* result in SA. */
+
+ zlat2c_(uplo, n, &a[a_offset], lda, &swork[ptsa], n, info);
+
+ if (*info != 0) {
+ *iter = -2;
+ goto L40;
+ }
+
+/* Compute the Cholesky factorization of SA. */
+
+ cpotrf_(uplo, n, &swork[ptsa], n, info);
+
+ if (*info != 0) {
+ *iter = -3;
+ goto L40;
+ }
+
+/* Solve the system SA*SX = SB. */
+
+ cpotrs_(uplo, n, nrhs, &swork[ptsa], n, &swork[ptsx], n, info);
+
+/* Convert SX back to COMPLEX*16 */
+
+ clag2z_(n, nrhs, &swork[ptsx], n, &x[x_offset], ldx, info);
+
+/* Compute R = B - AX (R is WORK). */
+
+ zlacpy_("All", n, nrhs, &b[b_offset], ldb, &work[work_offset], n);
+
+ zhemm_("Left", uplo, n, nrhs, &c_b1, &a[a_offset], lda, &x[x_offset], ldx,
+ &c_b2, &work[work_offset], n);
+
+/* Check whether the NRHS normwise backward errors satisfy the */
+/* stopping criterion. If yes, set ITER=0 and return. */
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = izamax_(n, &x[i__ * x_dim1 + 1], &c__1) + i__ * x_dim1;
+ xnrm = (d__1 = x[i__2].r, abs(d__1)) + (d__2 = d_imag(&x[izamax_(n, &
+ x[i__ * x_dim1 + 1], &c__1) + i__ * x_dim1]), abs(d__2));
+ i__2 = izamax_(n, &work[i__ * work_dim1 + 1], &c__1) + i__ *
+ work_dim1;
+ rnrm = (d__1 = work[i__2].r, abs(d__1)) + (d__2 = d_imag(&work[
+ izamax_(n, &work[i__ * work_dim1 + 1], &c__1) + i__ *
+ work_dim1]), abs(d__2));
+ if (rnrm > xnrm * cte) {
+ goto L10;
+ }
+ }
+
+/* If we are here, the NRHS normwise backward errors satisfy the */
+/* stopping criterion. We are good to exit. */
+
+ *iter = 0;
+ return 0;
+
+L10:
+
+ for (iiter = 1; iiter <= 30; ++iiter) {
+
+/* Convert R (in WORK) from double precision to single precision */
+/* and store the result in SX. */
+
+ zlag2c_(n, nrhs, &work[work_offset], n, &swork[ptsx], n, info);
+
+ if (*info != 0) {
+ *iter = -2;
+ goto L40;
+ }
+
+/* Solve the system SA*SX = SR. */
+
+ cpotrs_(uplo, n, nrhs, &swork[ptsa], n, &swork[ptsx], n, info);
+
+/* Convert SX back to double precision and update the current */
+/* iterate. */
+
+ clag2z_(n, nrhs, &swork[ptsx], n, &work[work_offset], n, info);
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ zaxpy_(n, &c_b2, &work[i__ * work_dim1 + 1], &c__1, &x[i__ *
+ x_dim1 + 1], &c__1);
+ }
+
+/* Compute R = B - AX (R is WORK). */
+
+ zlacpy_("All", n, nrhs, &b[b_offset], ldb, &work[work_offset], n);
+
+ zhemm_("L", uplo, n, nrhs, &c_b1, &a[a_offset], lda, &x[x_offset],
+ ldx, &c_b2, &work[work_offset], n);
+
+/* Check whether the NRHS normwise backward errors satisfy the */
+/* stopping criterion. If yes, set ITER=IITER>0 and return. */
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = izamax_(n, &x[i__ * x_dim1 + 1], &c__1) + i__ * x_dim1;
+ xnrm = (d__1 = x[i__2].r, abs(d__1)) + (d__2 = d_imag(&x[izamax_(
+ n, &x[i__ * x_dim1 + 1], &c__1) + i__ * x_dim1]), abs(
+ d__2));
+ i__2 = izamax_(n, &work[i__ * work_dim1 + 1], &c__1) + i__ *
+ work_dim1;
+ rnrm = (d__1 = work[i__2].r, abs(d__1)) + (d__2 = d_imag(&work[
+ izamax_(n, &work[i__ * work_dim1 + 1], &c__1) + i__ *
+ work_dim1]), abs(d__2));
+ if (rnrm > xnrm * cte) {
+ goto L20;
+ }
+ }
+
+/* If we are here, the NRHS normwise backward errors satisfy the */
+/* stopping criterion, we are good to exit. */
+
+ *iter = iiter;
+
+ return 0;
+
+L20:
+
+/* L30: */
+ ;
+ }
+
+/* If we are at this place of the code, this is because we have */
+/* performed ITER=ITERMAX iterations and never satisified the */
+/* stopping criterion, set up the ITER flag accordingly and follow */
+/* up on double precision routine. */
+
+ *iter = -31;
+
+L40:
+
+/* Single-precision iterative refinement failed to converge to a */
+/* satisfactory solution, so we resort to double precision. */
+
+ zpotrf_(uplo, n, &a[a_offset], lda, info);
+
+ if (*info != 0) {
+ return 0;
+ }
+
+ zlacpy_("All", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ zpotrs_(uplo, n, nrhs, &a[a_offset], lda, &x[x_offset], ldx, info);
+
+ return 0;
+
+/* End of ZCPOSV. */
+
+} /* zcposv_ */
diff --git a/contrib/libs/clapack/zdrscl.c b/contrib/libs/clapack/zdrscl.c
new file mode 100644
index 0000000000..a21bd77d97
--- /dev/null
+++ b/contrib/libs/clapack/zdrscl.c
@@ -0,0 +1,135 @@
+/* zdrscl.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zdrscl_(integer *n, doublereal *sa, doublecomplex *sx,
+ integer *incx)
+{
+ doublereal mul, cden;
+ logical done;
+ doublereal cnum, cden1, cnum1;
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int zdscal_(integer *, doublereal *,
+ doublecomplex *, integer *);
+ doublereal bignum, smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZDRSCL multiplies an n-element complex vector x by the real scalar */
+/* 1/a. This is done without overflow or underflow as long as */
+/* the final result x/a does not overflow or underflow. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of components of the vector x. */
+
+/* SA (input) DOUBLE PRECISION */
+/* The scalar a which is used to divide each component of x. */
+/* SA must be >= 0, or the subroutine will divide by zero. */
+
+/* SX (input/output) COMPLEX*16 array, dimension */
+/* (1+(N-1)*abs(INCX)) */
+/* The n-element vector x. */
+
+/* INCX (input) INTEGER */
+/* The increment between successive values of the vector SX. */
+/* > 0: SX(1) = X(1) and SX(1+(i-1)*INCX) = x(i), 1< i<= n */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --sx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+
+/* Initialize the denominator to SA and the numerator to 1. */
+
+ cden = *sa;
+ cnum = 1.;
+
+L10:
+ cden1 = cden * smlnum;
+ cnum1 = cnum / bignum;
+ if (abs(cden1) > abs(cnum) && cnum != 0.) {
+
+/* Pre-multiply X by SMLNUM if CDEN is large compared to CNUM. */
+
+ mul = smlnum;
+ done = FALSE_;
+ cden = cden1;
+ } else if (abs(cnum1) > abs(cden)) {
+
+/* Pre-multiply X by BIGNUM if CDEN is small compared to CNUM. */
+
+ mul = bignum;
+ done = FALSE_;
+ cnum = cnum1;
+ } else {
+
+/* Multiply X by CNUM / CDEN and return. */
+
+ mul = cnum / cden;
+ done = TRUE_;
+ }
+
+/* Scale the vector X by MUL */
+
+ zdscal_(n, &mul, &sx[1], incx);
+
+ if (! done) {
+ goto L10;
+ }
+
+ return 0;
+
+/* End of ZDRSCL */
+
+} /* zdrscl_ */
diff --git a/contrib/libs/clapack/zgbbrd.c b/contrib/libs/clapack/zgbbrd.c
new file mode 100644
index 0000000000..64be234d0f
--- /dev/null
+++ b/contrib/libs/clapack/zgbbrd.c
@@ -0,0 +1,654 @@
+/* zgbbrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zgbbrd_(char *vect, integer *m, integer *n, integer *ncc,
+ integer *kl, integer *ku, doublecomplex *ab, integer *ldab,
+ doublereal *d__, doublereal *e, doublecomplex *q, integer *ldq,
+ doublecomplex *pt, integer *ldpt, doublecomplex *c__, integer *ldc,
+ doublecomplex *work, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, c_dim1, c_offset, pt_dim1, pt_offset, q_dim1,
+ q_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+ double z_abs(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, l;
+ doublecomplex t;
+ integer j1, j2, kb;
+ doublecomplex ra, rb;
+ doublereal rc;
+ integer kk, ml, nr, mu;
+ doublecomplex rs;
+ integer kb1, ml0, mu0, klm, kun, nrt, klu1, inca;
+ doublereal abst;
+ extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublereal *, doublecomplex *);
+ extern logical lsame_(char *, char *);
+ logical wantb, wantc;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *);
+ integer minmn;
+ logical wantq;
+ extern /* Subroutine */ int xerbla_(char *, integer *), zlaset_(
+ char *, integer *, integer *, doublecomplex *, doublecomplex *,
+ doublecomplex *, integer *), zlartg_(doublecomplex *,
+ doublecomplex *, doublereal *, doublecomplex *, doublecomplex *),
+ zlargv_(integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublereal *, integer *);
+ logical wantpt;
+ extern /* Subroutine */ int zlartv_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublereal *, doublecomplex *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGBBRD reduces a complex general m-by-n band matrix A to real upper */
+/* bidiagonal form B by a unitary transformation: Q' * A * P = B. */
+
+/* The routine computes B, and optionally forms Q or P', or computes */
+/* Q'*C for a given matrix C. */
+
+/* Arguments */
+/* ========= */
+
+/* VECT (input) CHARACTER*1 */
+/* Specifies whether or not the matrices Q and P' are to be */
+/* formed. */
+/* = 'N': do not form Q or P'; */
+/* = 'Q': form Q only; */
+/* = 'P': form P' only; */
+/* = 'B': form both. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* NCC (input) INTEGER */
+/* The number of columns of the matrix C. NCC >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals of the matrix A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals of the matrix A. KU >= 0. */
+
+/* AB (input/output) COMPLEX*16 array, dimension (LDAB,N) */
+/* On entry, the m-by-n band matrix A, stored in rows 1 to */
+/* KL+KU+1. The j-th column of A is stored in the j-th column of */
+/* the array AB as follows: */
+/* AB(ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(m,j+kl). */
+/* On exit, A is overwritten by values generated during the */
+/* reduction. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array A. LDAB >= KL+KU+1. */
+
+/* D (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The diagonal elements of the bidiagonal matrix B. */
+
+/* E (output) DOUBLE PRECISION array, dimension (min(M,N)-1) */
+/* The superdiagonal elements of the bidiagonal matrix B. */
+
+/* Q (output) COMPLEX*16 array, dimension (LDQ,M) */
+/* If VECT = 'Q' or 'B', the m-by-m unitary matrix Q. */
+/* If VECT = 'N' or 'P', the array Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. */
+/* LDQ >= max(1,M) if VECT = 'Q' or 'B'; LDQ >= 1 otherwise. */
+
+/* PT (output) COMPLEX*16 array, dimension (LDPT,N) */
+/* If VECT = 'P' or 'B', the n-by-n unitary matrix P'. */
+/* If VECT = 'N' or 'Q', the array PT is not referenced. */
+
+/* LDPT (input) INTEGER */
+/* The leading dimension of the array PT. */
+/* LDPT >= max(1,N) if VECT = 'P' or 'B'; LDPT >= 1 otherwise. */
+
+/* C (input/output) COMPLEX*16 array, dimension (LDC,NCC) */
+/* On entry, an m-by-ncc matrix C. */
+/* On exit, C is overwritten by Q'*C. */
+/* C is not referenced if NCC = 0. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. */
+/* LDC >= max(1,M) if NCC > 0; LDC >= 1 if NCC = 0. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (max(M,N)) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (max(M,N)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --d__;
+ --e;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ pt_dim1 = *ldpt;
+ pt_offset = 1 + pt_dim1;
+ pt -= pt_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ wantb = lsame_(vect, "B");
+ wantq = lsame_(vect, "Q") || wantb;
+ wantpt = lsame_(vect, "P") || wantb;
+ wantc = *ncc > 0;
+ klu1 = *kl + *ku + 1;
+ *info = 0;
+ if (! wantq && ! wantpt && ! lsame_(vect, "N")) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ncc < 0) {
+ *info = -4;
+ } else if (*kl < 0) {
+ *info = -5;
+ } else if (*ku < 0) {
+ *info = -6;
+ } else if (*ldab < klu1) {
+ *info = -8;
+ } else if (*ldq < 1 || wantq && *ldq < max(1,*m)) {
+ *info = -12;
+ } else if (*ldpt < 1 || wantpt && *ldpt < max(1,*n)) {
+ *info = -14;
+ } else if (*ldc < 1 || wantc && *ldc < max(1,*m)) {
+ *info = -16;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGBBRD", &i__1);
+ return 0;
+ }
+
+/* Initialize Q and P' to the unit matrix, if needed */
+
+ if (wantq) {
+ zlaset_("Full", m, m, &c_b1, &c_b2, &q[q_offset], ldq);
+ }
+ if (wantpt) {
+ zlaset_("Full", n, n, &c_b1, &c_b2, &pt[pt_offset], ldpt);
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ minmn = min(*m,*n);
+
+ if (*kl + *ku > 1) {
+
+/* Reduce to upper bidiagonal form if KU > 0; if KU = 0, reduce */
+/* first to lower bidiagonal form and then transform to upper */
+/* bidiagonal */
+
+ if (*ku > 0) {
+ ml0 = 1;
+ mu0 = 2;
+ } else {
+ ml0 = 2;
+ mu0 = 1;
+ }
+
+/* Wherever possible, plane rotations are generated and applied in */
+/* vector operations of length NR over the index set J1:J2:KLU1. */
+
+/* The complex sines of the plane rotations are stored in WORK, */
+/* and the real cosines in RWORK. */
+
+/* Computing MIN */
+ i__1 = *m - 1;
+ klm = min(i__1,*kl);
+/* Computing MIN */
+ i__1 = *n - 1;
+ kun = min(i__1,*ku);
+ kb = klm + kun;
+ kb1 = kb + 1;
+ inca = kb1 * *ldab;
+ nr = 0;
+ j1 = klm + 2;
+ j2 = 1 - kun;
+
+ i__1 = minmn;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Reduce i-th column and i-th row of matrix to bidiagonal form */
+
+ ml = klm + 1;
+ mu = kun + 1;
+ i__2 = kb;
+ for (kk = 1; kk <= i__2; ++kk) {
+ j1 += kb;
+ j2 += kb;
+
+/* generate plane rotations to annihilate nonzero elements */
+/* which have been created below the band */
+
+ if (nr > 0) {
+ zlargv_(&nr, &ab[klu1 + (j1 - klm - 1) * ab_dim1], &inca,
+ &work[j1], &kb1, &rwork[j1], &kb1);
+ }
+
+/* apply plane rotations from the left */
+
+ i__3 = kb;
+ for (l = 1; l <= i__3; ++l) {
+ if (j2 - klm + l - 1 > *n) {
+ nrt = nr - 1;
+ } else {
+ nrt = nr;
+ }
+ if (nrt > 0) {
+ zlartv_(&nrt, &ab[klu1 - l + (j1 - klm + l - 1) *
+ ab_dim1], &inca, &ab[klu1 - l + 1 + (j1 - klm
+ + l - 1) * ab_dim1], &inca, &rwork[j1], &work[
+ j1], &kb1);
+ }
+/* L10: */
+ }
+
+ if (ml > ml0) {
+ if (ml <= *m - i__ + 1) {
+
+/* generate plane rotation to annihilate a(i+ml-1,i) */
+/* within the band, and apply rotation from the left */
+
+ zlartg_(&ab[*ku + ml - 1 + i__ * ab_dim1], &ab[*ku +
+ ml + i__ * ab_dim1], &rwork[i__ + ml - 1], &
+ work[i__ + ml - 1], &ra);
+ i__3 = *ku + ml - 1 + i__ * ab_dim1;
+ ab[i__3].r = ra.r, ab[i__3].i = ra.i;
+ if (i__ < *n) {
+/* Computing MIN */
+ i__4 = *ku + ml - 2, i__5 = *n - i__;
+ i__3 = min(i__4,i__5);
+ i__6 = *ldab - 1;
+ i__7 = *ldab - 1;
+ zrot_(&i__3, &ab[*ku + ml - 2 + (i__ + 1) *
+ ab_dim1], &i__6, &ab[*ku + ml - 1 + (i__
+ + 1) * ab_dim1], &i__7, &rwork[i__ + ml -
+ 1], &work[i__ + ml - 1]);
+ }
+ }
+ ++nr;
+ j1 -= kb1;
+ }
+
+ if (wantq) {
+
+/* accumulate product of plane rotations in Q */
+
+ i__3 = j2;
+ i__4 = kb1;
+ for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4)
+ {
+ d_cnjg(&z__1, &work[j]);
+ zrot_(m, &q[(j - 1) * q_dim1 + 1], &c__1, &q[j *
+ q_dim1 + 1], &c__1, &rwork[j], &z__1);
+/* L20: */
+ }
+ }
+
+ if (wantc) {
+
+/* apply plane rotations to C */
+
+ i__4 = j2;
+ i__3 = kb1;
+ for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3)
+ {
+ zrot_(ncc, &c__[j - 1 + c_dim1], ldc, &c__[j + c_dim1]
+, ldc, &rwork[j], &work[j]);
+/* L30: */
+ }
+ }
+
+ if (j2 + kun > *n) {
+
+/* adjust J2 to keep within the bounds of the matrix */
+
+ --nr;
+ j2 -= kb1;
+ }
+
+ i__3 = j2;
+ i__4 = kb1;
+ for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {
+
+/* create nonzero element a(j-1,j+ku) above the band */
+/* and store it in WORK(n+1:2*n) */
+
+ i__5 = j + kun;
+ i__6 = j;
+ i__7 = (j + kun) * ab_dim1 + 1;
+ z__1.r = work[i__6].r * ab[i__7].r - work[i__6].i * ab[
+ i__7].i, z__1.i = work[i__6].r * ab[i__7].i +
+ work[i__6].i * ab[i__7].r;
+ work[i__5].r = z__1.r, work[i__5].i = z__1.i;
+ i__5 = (j + kun) * ab_dim1 + 1;
+ i__6 = j;
+ i__7 = (j + kun) * ab_dim1 + 1;
+ z__1.r = rwork[i__6] * ab[i__7].r, z__1.i = rwork[i__6] *
+ ab[i__7].i;
+ ab[i__5].r = z__1.r, ab[i__5].i = z__1.i;
+/* L40: */
+ }
+
+/* generate plane rotations to annihilate nonzero elements */
+/* which have been generated above the band */
+
+ if (nr > 0) {
+ zlargv_(&nr, &ab[(j1 + kun - 1) * ab_dim1 + 1], &inca, &
+ work[j1 + kun], &kb1, &rwork[j1 + kun], &kb1);
+ }
+
+/* apply plane rotations from the right */
+
+ i__4 = kb;
+ for (l = 1; l <= i__4; ++l) {
+ if (j2 + l - 1 > *m) {
+ nrt = nr - 1;
+ } else {
+ nrt = nr;
+ }
+ if (nrt > 0) {
+ zlartv_(&nrt, &ab[l + 1 + (j1 + kun - 1) * ab_dim1], &
+ inca, &ab[l + (j1 + kun) * ab_dim1], &inca, &
+ rwork[j1 + kun], &work[j1 + kun], &kb1);
+ }
+/* L50: */
+ }
+
+ if (ml == ml0 && mu > mu0) {
+ if (mu <= *n - i__ + 1) {
+
+/* generate plane rotation to annihilate a(i,i+mu-1) */
+/* within the band, and apply rotation from the right */
+
+ zlartg_(&ab[*ku - mu + 3 + (i__ + mu - 2) * ab_dim1],
+ &ab[*ku - mu + 2 + (i__ + mu - 1) * ab_dim1],
+ &rwork[i__ + mu - 1], &work[i__ + mu - 1], &
+ ra);
+ i__4 = *ku - mu + 3 + (i__ + mu - 2) * ab_dim1;
+ ab[i__4].r = ra.r, ab[i__4].i = ra.i;
+/* Computing MIN */
+ i__3 = *kl + mu - 2, i__5 = *m - i__;
+ i__4 = min(i__3,i__5);
+ zrot_(&i__4, &ab[*ku - mu + 4 + (i__ + mu - 2) *
+ ab_dim1], &c__1, &ab[*ku - mu + 3 + (i__ + mu
+ - 1) * ab_dim1], &c__1, &rwork[i__ + mu - 1],
+ &work[i__ + mu - 1]);
+ }
+ ++nr;
+ j1 -= kb1;
+ }
+
+ if (wantpt) {
+
+/* accumulate product of plane rotations in P' */
+
+ i__4 = j2;
+ i__3 = kb1;
+ for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3)
+ {
+ d_cnjg(&z__1, &work[j + kun]);
+ zrot_(n, &pt[j + kun - 1 + pt_dim1], ldpt, &pt[j +
+ kun + pt_dim1], ldpt, &rwork[j + kun], &z__1);
+/* L60: */
+ }
+ }
+
+ if (j2 + kb > *m) {
+
+/* adjust J2 to keep within the bounds of the matrix */
+
+ --nr;
+ j2 -= kb1;
+ }
+
+ i__3 = j2;
+ i__4 = kb1;
+ for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {
+
+/* create nonzero element a(j+kl+ku,j+ku-1) below the */
+/* band and store it in WORK(1:n) */
+
+ i__5 = j + kb;
+ i__6 = j + kun;
+ i__7 = klu1 + (j + kun) * ab_dim1;
+ z__1.r = work[i__6].r * ab[i__7].r - work[i__6].i * ab[
+ i__7].i, z__1.i = work[i__6].r * ab[i__7].i +
+ work[i__6].i * ab[i__7].r;
+ work[i__5].r = z__1.r, work[i__5].i = z__1.i;
+ i__5 = klu1 + (j + kun) * ab_dim1;
+ i__6 = j + kun;
+ i__7 = klu1 + (j + kun) * ab_dim1;
+ z__1.r = rwork[i__6] * ab[i__7].r, z__1.i = rwork[i__6] *
+ ab[i__7].i;
+ ab[i__5].r = z__1.r, ab[i__5].i = z__1.i;
+/* L70: */
+ }
+
+ if (ml > ml0) {
+ --ml;
+ } else {
+ --mu;
+ }
+/* L80: */
+ }
+/* L90: */
+ }
+ }
+
+ if (*ku == 0 && *kl > 0) {
+
+/* A has been reduced to complex lower bidiagonal form */
+
+/* Transform lower bidiagonal form to upper bidiagonal by applying */
+/* plane rotations from the left, overwriting superdiagonal */
+/* elements on subdiagonal elements */
+
+/* Computing MIN */
+ i__2 = *m - 1;
+ i__1 = min(i__2,*n);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ zlartg_(&ab[i__ * ab_dim1 + 1], &ab[i__ * ab_dim1 + 2], &rc, &rs,
+ &ra);
+ i__2 = i__ * ab_dim1 + 1;
+ ab[i__2].r = ra.r, ab[i__2].i = ra.i;
+ if (i__ < *n) {
+ i__2 = i__ * ab_dim1 + 2;
+ i__4 = (i__ + 1) * ab_dim1 + 1;
+ z__1.r = rs.r * ab[i__4].r - rs.i * ab[i__4].i, z__1.i = rs.r
+ * ab[i__4].i + rs.i * ab[i__4].r;
+ ab[i__2].r = z__1.r, ab[i__2].i = z__1.i;
+ i__2 = (i__ + 1) * ab_dim1 + 1;
+ i__4 = (i__ + 1) * ab_dim1 + 1;
+ z__1.r = rc * ab[i__4].r, z__1.i = rc * ab[i__4].i;
+ ab[i__2].r = z__1.r, ab[i__2].i = z__1.i;
+ }
+ if (wantq) {
+ d_cnjg(&z__1, &rs);
+ zrot_(m, &q[i__ * q_dim1 + 1], &c__1, &q[(i__ + 1) * q_dim1 +
+ 1], &c__1, &rc, &z__1);
+ }
+ if (wantc) {
+ zrot_(ncc, &c__[i__ + c_dim1], ldc, &c__[i__ + 1 + c_dim1],
+ ldc, &rc, &rs);
+ }
+/* L100: */
+ }
+ } else {
+
+/* A has been reduced to complex upper bidiagonal form or is */
+/* diagonal */
+
+ if (*ku > 0 && *m < *n) {
+
+/* Annihilate a(m,m+1) by applying plane rotations from the */
+/* right */
+
+ i__1 = *ku + (*m + 1) * ab_dim1;
+ rb.r = ab[i__1].r, rb.i = ab[i__1].i;
+ for (i__ = *m; i__ >= 1; --i__) {
+ zlartg_(&ab[*ku + 1 + i__ * ab_dim1], &rb, &rc, &rs, &ra);
+ i__1 = *ku + 1 + i__ * ab_dim1;
+ ab[i__1].r = ra.r, ab[i__1].i = ra.i;
+ if (i__ > 1) {
+ d_cnjg(&z__3, &rs);
+ z__2.r = -z__3.r, z__2.i = -z__3.i;
+ i__1 = *ku + i__ * ab_dim1;
+ z__1.r = z__2.r * ab[i__1].r - z__2.i * ab[i__1].i,
+ z__1.i = z__2.r * ab[i__1].i + z__2.i * ab[i__1]
+ .r;
+ rb.r = z__1.r, rb.i = z__1.i;
+ i__1 = *ku + i__ * ab_dim1;
+ i__2 = *ku + i__ * ab_dim1;
+ z__1.r = rc * ab[i__2].r, z__1.i = rc * ab[i__2].i;
+ ab[i__1].r = z__1.r, ab[i__1].i = z__1.i;
+ }
+ if (wantpt) {
+ d_cnjg(&z__1, &rs);
+ zrot_(n, &pt[i__ + pt_dim1], ldpt, &pt[*m + 1 + pt_dim1],
+ ldpt, &rc, &z__1);
+ }
+/* L110: */
+ }
+ }
+ }
+
+/* Make diagonal and superdiagonal elements real, storing them in D */
+/* and E */
+
+ i__1 = *ku + 1 + ab_dim1;
+ t.r = ab[i__1].r, t.i = ab[i__1].i;
+ i__1 = minmn;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ abst = z_abs(&t);
+ d__[i__] = abst;
+ if (abst != 0.) {
+ z__1.r = t.r / abst, z__1.i = t.i / abst;
+ t.r = z__1.r, t.i = z__1.i;
+ } else {
+ t.r = 1., t.i = 0.;
+ }
+ if (wantq) {
+ zscal_(m, &t, &q[i__ * q_dim1 + 1], &c__1);
+ }
+ if (wantc) {
+ d_cnjg(&z__1, &t);
+ zscal_(ncc, &z__1, &c__[i__ + c_dim1], ldc);
+ }
+ if (i__ < minmn) {
+ if (*ku == 0 && *kl == 0) {
+ e[i__] = 0.;
+ i__2 = (i__ + 1) * ab_dim1 + 1;
+ t.r = ab[i__2].r, t.i = ab[i__2].i;
+ } else {
+ if (*ku == 0) {
+ i__2 = i__ * ab_dim1 + 2;
+ d_cnjg(&z__2, &t);
+ z__1.r = ab[i__2].r * z__2.r - ab[i__2].i * z__2.i,
+ z__1.i = ab[i__2].r * z__2.i + ab[i__2].i *
+ z__2.r;
+ t.r = z__1.r, t.i = z__1.i;
+ } else {
+ i__2 = *ku + (i__ + 1) * ab_dim1;
+ d_cnjg(&z__2, &t);
+ z__1.r = ab[i__2].r * z__2.r - ab[i__2].i * z__2.i,
+ z__1.i = ab[i__2].r * z__2.i + ab[i__2].i *
+ z__2.r;
+ t.r = z__1.r, t.i = z__1.i;
+ }
+ abst = z_abs(&t);
+ e[i__] = abst;
+ if (abst != 0.) {
+ z__1.r = t.r / abst, z__1.i = t.i / abst;
+ t.r = z__1.r, t.i = z__1.i;
+ } else {
+ t.r = 1., t.i = 0.;
+ }
+ if (wantpt) {
+ zscal_(n, &t, &pt[i__ + 1 + pt_dim1], ldpt);
+ }
+ i__2 = *ku + 1 + (i__ + 1) * ab_dim1;
+ d_cnjg(&z__2, &t);
+ z__1.r = ab[i__2].r * z__2.r - ab[i__2].i * z__2.i, z__1.i =
+ ab[i__2].r * z__2.i + ab[i__2].i * z__2.r;
+ t.r = z__1.r, t.i = z__1.i;
+ }
+ }
+/* L120: */
+ }
+ return 0;
+
+/* End of ZGBBRD */
+
+} /* zgbbrd_ */
diff --git a/contrib/libs/clapack/zgbcon.c b/contrib/libs/clapack/zgbcon.c
new file mode 100644
index 0000000000..8f72f3ff53
--- /dev/null
+++ b/contrib/libs/clapack/zgbcon.c
@@ -0,0 +1,307 @@
+/* zgbcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zgbcon_(char *norm, integer *n, integer *kl, integer *ku,
+ doublecomplex *ab, integer *ldab, integer *ipiv, doublereal *anorm,
+ doublereal *rcond, doublecomplex *work, doublereal *rwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3;
+ doublereal d__1, d__2;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer j;
+ doublecomplex t;
+ integer kd, lm, jp, ix, kase, kase1;
+ doublereal scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ logical lnoti;
+ extern /* Subroutine */ int zaxpy_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), zlacn2_(
+ integer *, doublecomplex *, doublecomplex *, doublereal *,
+ integer *, integer *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal ainvnm;
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ logical onenrm;
+ extern /* Subroutine */ int zlatbs_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ doublereal *, doublereal *, integer *), zdrscl_(integer *, doublereal *, doublecomplex *,
+ integer *);
+ char normin[1];
+ doublereal smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGBCON estimates the reciprocal of the condition number of a complex */
+/* general band matrix A, in either the 1-norm or the infinity-norm, */
+/* using the LU factorization computed by ZGBTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as */
+/* RCOND = 1 / ( norm(A) * norm(inv(A)) ). */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies whether the 1-norm condition number or the */
+/* infinity-norm condition number is required: */
+/* = '1' or 'O': 1-norm; */
+/* = 'I': Infinity-norm. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* AB (input) COMPLEX*16 array, dimension (LDAB,N) */
+/* Details of the LU factorization of the band matrix A, as */
+/* computed by ZGBTRF. U is stored as an upper triangular band */
+/* matrix with KL+KU superdiagonals in rows 1 to KL+KU+1, and */
+/* the multipliers used during the factorization are stored in */
+/* rows KL+KU+2 to 2*KL+KU+1. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= 2*KL+KU+1. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= N, row i of the matrix was */
+/* interchanged with row IPIV(i). */
+
+/* ANORM (input) DOUBLE PRECISION */
+/* If NORM = '1' or 'O', the 1-norm of the original matrix A. */
+/* If NORM = 'I', the infinity-norm of the original matrix A. */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(norm(A) * norm(inv(A))). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --ipiv;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O");
+ if (! onenrm && ! lsame_(norm, "I")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*ldab < (*kl << 1) + *ku + 1) {
+ *info = -6;
+ } else if (*anorm < 0.) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGBCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.;
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ } else if (*anorm == 0.) {
+ return 0;
+ }
+
+ smlnum = dlamch_("Safe minimum");
+
+/* Estimate the norm of inv(A). */
+
+ ainvnm = 0.;
+ *(unsigned char *)normin = 'N';
+ if (onenrm) {
+ kase1 = 1;
+ } else {
+ kase1 = 2;
+ }
+ kd = *kl + *ku + 1;
+ lnoti = *kl > 0;
+ kase = 0;
+L10:
+ zlacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (kase == kase1) {
+
+/* Multiply by inv(L). */
+
+ if (lnoti) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = *kl, i__3 = *n - j;
+ lm = min(i__2,i__3);
+ jp = ipiv[j];
+ i__2 = jp;
+ t.r = work[i__2].r, t.i = work[i__2].i;
+ if (jp != j) {
+ i__2 = jp;
+ i__3 = j;
+ work[i__2].r = work[i__3].r, work[i__2].i = work[i__3]
+ .i;
+ i__2 = j;
+ work[i__2].r = t.r, work[i__2].i = t.i;
+ }
+ z__1.r = -t.r, z__1.i = -t.i;
+ zaxpy_(&lm, &z__1, &ab[kd + 1 + j * ab_dim1], &c__1, &
+ work[j + 1], &c__1);
+/* L20: */
+ }
+ }
+
+/* Multiply by inv(U). */
+
+ i__1 = *kl + *ku;
+ zlatbs_("Upper", "No transpose", "Non-unit", normin, n, &i__1, &
+ ab[ab_offset], ldab, &work[1], &scale, &rwork[1], info);
+ } else {
+
+/* Multiply by inv(U'). */
+
+ i__1 = *kl + *ku;
+ zlatbs_("Upper", "Conjugate transpose", "Non-unit", normin, n, &
+ i__1, &ab[ab_offset], ldab, &work[1], &scale, &rwork[1],
+ info);
+
+/* Multiply by inv(L'). */
+
+ if (lnoti) {
+ for (j = *n - 1; j >= 1; --j) {
+/* Computing MIN */
+ i__1 = *kl, i__2 = *n - j;
+ lm = min(i__1,i__2);
+ i__1 = j;
+ i__2 = j;
+ zdotc_(&z__2, &lm, &ab[kd + 1 + j * ab_dim1], &c__1, &
+ work[j + 1], &c__1);
+ z__1.r = work[i__2].r - z__2.r, z__1.i = work[i__2].i -
+ z__2.i;
+ work[i__1].r = z__1.r, work[i__1].i = z__1.i;
+ jp = ipiv[j];
+ if (jp != j) {
+ i__1 = jp;
+ t.r = work[i__1].r, t.i = work[i__1].i;
+ i__1 = jp;
+ i__2 = j;
+ work[i__1].r = work[i__2].r, work[i__1].i = work[i__2]
+ .i;
+ i__1 = j;
+ work[i__1].r = t.r, work[i__1].i = t.i;
+ }
+/* L30: */
+ }
+ }
+ }
+
+/* Divide X by 1/SCALE if doing so will not cause overflow. */
+
+ *(unsigned char *)normin = 'Y';
+ if (scale != 1.) {
+ ix = izamax_(n, &work[1], &c__1);
+ i__1 = ix;
+ if (scale < ((d__1 = work[i__1].r, abs(d__1)) + (d__2 = d_imag(&
+ work[ix]), abs(d__2))) * smlnum || scale == 0.) {
+ goto L40;
+ }
+ zdrscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / ainvnm / *anorm;
+ }
+
+L40:
+ return 0;
+
+/* End of ZGBCON */
+
+} /* zgbcon_ */
diff --git a/contrib/libs/clapack/zgbequ.c b/contrib/libs/clapack/zgbequ.c
new file mode 100644
index 0000000000..c9e6d278b3
--- /dev/null
+++ b/contrib/libs/clapack/zgbequ.c
@@ -0,0 +1,330 @@
+/* zgbequ.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zgbequ_(integer *m, integer *n, integer *kl, integer *ku,
+ doublecomplex *ab, integer *ldab, doublereal *r__, doublereal *c__,
+ doublereal *rowcnd, doublereal *colcnd, doublereal *amax, integer *
+ info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, kd;
+ doublereal rcmin, rcmax;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum, smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGBEQU computes row and column scalings intended to equilibrate an */
+/* M-by-N band matrix A and reduce its condition number. R returns the */
+/* row scale factors and C the column scale factors, chosen to try to */
+/* make the largest element in each row and column of the matrix B with */
+/* elements B(i,j)=R(i)*A(i,j)*C(j) have absolute value 1. */
+
+/* R(i) and C(j) are restricted to be between SMLNUM = smallest safe */
+/* number and BIGNUM = largest safe number. Use of these scaling */
+/* factors is not guaranteed to reduce the condition number of A but */
+/* works well in practice. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* AB (input) COMPLEX*16 array, dimension (LDAB,N) */
+/* The band matrix A, stored in rows 1 to KL+KU+1. The j-th */
+/* column of A is stored in the j-th column of the array AB as */
+/* follows: */
+/* AB(ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(m,j+kl). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KL+KU+1. */
+
+/* R (output) DOUBLE PRECISION array, dimension (M) */
+/* If INFO = 0, or INFO > M, R contains the row scale factors */
+/* for A. */
+
+/* C (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, C contains the column scale factors for A. */
+
+/* ROWCND (output) DOUBLE PRECISION */
+/* If INFO = 0 or INFO > M, ROWCND contains the ratio of the */
+/* smallest R(i) to the largest R(i). If ROWCND >= 0.1 and */
+/* AMAX is neither too large nor too small, it is not worth */
+/* scaling by R. */
+
+/* COLCND (output) DOUBLE PRECISION */
+/* If INFO = 0, COLCND contains the ratio of the smallest */
+/* C(i) to the largest C(i). If COLCND >= 0.1, it is not */
+/* worth scaling by C. */
+
+/* AMAX (output) DOUBLE PRECISION */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= M: the i-th row of A is exactly zero */
+/* > M: the (i-M)-th column of A is exactly zero */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --r__;
+ --c__;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*ldab < *kl + *ku + 1) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGBEQU", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ *rowcnd = 1.;
+ *colcnd = 1.;
+ *amax = 0.;
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+
+/* Compute row scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__[i__] = 0.;
+/* L10: */
+ }
+
+/* Find the maximum element in each row. */
+
+ kd = *ku + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j - *ku;
+/* Computing MIN */
+ i__4 = j + *kl;
+ i__3 = min(i__4,*m);
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ i__2 = kd + i__ - j + j * ab_dim1;
+ d__3 = r__[i__], d__4 = (d__1 = ab[i__2].r, abs(d__1)) + (d__2 =
+ d_imag(&ab[kd + i__ - j + j * ab_dim1]), abs(d__2));
+ r__[i__] = max(d__3,d__4);
+/* L20: */
+ }
+/* L30: */
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = rcmax, d__2 = r__[i__];
+ rcmax = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = rcmin, d__2 = r__[i__];
+ rcmin = min(d__1,d__2);
+/* L40: */
+ }
+ *amax = rcmax;
+
+ if (rcmin == 0.) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (r__[i__] == 0.) {
+ *info = i__;
+ return 0;
+ }
+/* L50: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MIN */
+/* Computing MAX */
+ d__2 = r__[i__];
+ d__1 = max(d__2,smlnum);
+ r__[i__] = 1. / min(d__1,bignum);
+/* L60: */
+ }
+
+/* Compute ROWCND = min(R(I)) / max(R(I)) */
+
+ *rowcnd = max(rcmin,smlnum) / min(rcmax,bignum);
+ }
+
+/* Compute column scale factors */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j] = 0.;
+/* L70: */
+ }
+
+/* Find the maximum element in each column, */
+/* assuming the row scaling computed above. */
+
+ kd = *ku + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__3 = j - *ku;
+/* Computing MIN */
+ i__4 = j + *kl;
+ i__2 = min(i__4,*m);
+ for (i__ = max(i__3,1); i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = kd + i__ - j + j * ab_dim1;
+ d__3 = c__[j], d__4 = ((d__1 = ab[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&ab[kd + i__ - j + j * ab_dim1]), abs(d__2))) *
+ r__[i__];
+ c__[j] = max(d__3,d__4);
+/* L80: */
+ }
+/* L90: */
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ d__1 = rcmin, d__2 = c__[j];
+ rcmin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = rcmax, d__2 = c__[j];
+ rcmax = max(d__1,d__2);
+/* L100: */
+ }
+
+ if (rcmin == 0.) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (c__[j] == 0.) {
+ *info = *m + j;
+ return 0;
+ }
+/* L110: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+/* Computing MAX */
+ d__2 = c__[j];
+ d__1 = max(d__2,smlnum);
+ c__[j] = 1. / min(d__1,bignum);
+/* L120: */
+ }
+
+/* Compute COLCND = min(C(J)) / max(C(J)) */
+
+ *colcnd = max(rcmin,smlnum) / min(rcmax,bignum);
+ }
+
+ return 0;
+
+/* End of ZGBEQU */
+
+} /* zgbequ_ */
diff --git a/contrib/libs/clapack/zgbequb.c b/contrib/libs/clapack/zgbequb.c
new file mode 100644
index 0000000000..80c59a93f0
--- /dev/null
+++ b/contrib/libs/clapack/zgbequb.c
@@ -0,0 +1,355 @@
+/* zgbequb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zgbequb_(integer *m, integer *n, integer *kl, integer *
+ ku, doublecomplex *ab, integer *ldab, doublereal *r__, doublereal *
+ c__, doublereal *rowcnd, doublereal *colcnd, doublereal *amax,
+ integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Builtin functions */
+ double log(doublereal), d_imag(doublecomplex *), pow_di(doublereal *,
+ integer *);
+
+ /* Local variables */
+ integer i__, j, kd;
+ doublereal radix, rcmin, rcmax;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum, logrdx, smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- Contributed by James Demmel, Deaglan Halligan, Yozo Hida and -- */
+/* -- Jason Riedy of Univ. of California Berkeley. -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley and NAG Ltd. -- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGBEQUB computes row and column scalings intended to equilibrate an */
+/* M-by-N matrix A and reduce its condition number. R returns the row */
+/* scale factors and C the column scale factors, chosen to try to make */
+/* the largest element in each row and column of the matrix B with */
+/* elements B(i,j)=R(i)*A(i,j)*C(j) have an absolute value of at most */
+/* the radix. */
+
+/* R(i) and C(j) are restricted to be a power of the radix between */
+/* SMLNUM = smallest safe number and BIGNUM = largest safe number. Use */
+/* of these scaling factors is not guaranteed to reduce the condition */
+/* number of A but works well in practice. */
+
+/* This routine differs from ZGEEQU by restricting the scaling factors */
+/* to a power of the radix. Baring over- and underflow, scaling by */
+/* these factors introduces no additional rounding errors. However, the */
+/* scaled entries' magnitured are no longer approximately 1 but lie */
+/* between sqrt(radix) and 1/sqrt(radix). */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* AB (input) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* On entry, the matrix A in band storage, in rows 1 to KL+KU+1. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(KU+1+i-j,j) = A(i,j) for max(1,j-KU)<=i<=min(N,j+kl) */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array A. LDAB >= max(1,M). */
+
+/* R (output) DOUBLE PRECISION array, dimension (M) */
+/* If INFO = 0 or INFO > M, R contains the row scale factors */
+/* for A. */
+
+/* C (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, C contains the column scale factors for A. */
+
+/* ROWCND (output) DOUBLE PRECISION */
+/* If INFO = 0 or INFO > M, ROWCND contains the ratio of the */
+/* smallest R(i) to the largest R(i). If ROWCND >= 0.1 and */
+/* AMAX is neither too large nor too small, it is not worth */
+/* scaling by R. */
+
+/* COLCND (output) DOUBLE PRECISION */
+/* If INFO = 0, COLCND contains the ratio of the smallest */
+/* C(i) to the largest C(i). If COLCND >= 0.1, it is not */
+/* worth scaling by C. */
+
+/* AMAX (output) DOUBLE PRECISION */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= M: the i-th row of A is exactly zero */
+/* > M: the (i-M)-th column of A is exactly zero */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --r__;
+ --c__;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*ldab < *kl + *ku + 1) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGBEQUB", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ *rowcnd = 1.;
+ *colcnd = 1.;
+ *amax = 0.;
+ return 0;
+ }
+
+/* Get machine constants. Assume SMLNUM is a power of the radix. */
+
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+ radix = dlamch_("B");
+ logrdx = log(radix);
+
+/* Compute row scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__[i__] = 0.;
+/* L10: */
+ }
+
+/* Find the maximum element in each row. */
+
+ kd = *ku + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j - *ku;
+/* Computing MIN */
+ i__4 = j + *kl;
+ i__3 = min(i__4,*m);
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ i__2 = kd + i__ - j + j * ab_dim1;
+ d__3 = r__[i__], d__4 = (d__1 = ab[i__2].r, abs(d__1)) + (d__2 =
+ d_imag(&ab[kd + i__ - j + j * ab_dim1]), abs(d__2));
+ r__[i__] = max(d__3,d__4);
+/* L20: */
+ }
+/* L30: */
+ }
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (r__[i__] > 0.) {
+ i__3 = (integer) (log(r__[i__]) / logrdx);
+ r__[i__] = pow_di(&radix, &i__3);
+ }
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = rcmax, d__2 = r__[i__];
+ rcmax = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = rcmin, d__2 = r__[i__];
+ rcmin = min(d__1,d__2);
+/* L40: */
+ }
+ *amax = rcmax;
+
+ if (rcmin == 0.) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (r__[i__] == 0.) {
+ *info = i__;
+ return 0;
+ }
+/* L50: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MIN */
+/* Computing MAX */
+ d__2 = r__[i__];
+ d__1 = max(d__2,smlnum);
+ r__[i__] = 1. / min(d__1,bignum);
+/* L60: */
+ }
+
+/* Compute ROWCND = min(R(I)) / max(R(I)). */
+
+ *rowcnd = max(rcmin,smlnum) / min(rcmax,bignum);
+ }
+
+/* Compute column scale factors. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j] = 0.;
+/* L70: */
+ }
+
+/* Find the maximum element in each column, */
+/* assuming the row scaling computed above. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__3 = j - *ku;
+/* Computing MIN */
+ i__4 = j + *kl;
+ i__2 = min(i__4,*m);
+ for (i__ = max(i__3,1); i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = kd + i__ - j + j * ab_dim1;
+ d__3 = c__[j], d__4 = ((d__1 = ab[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&ab[kd + i__ - j + j * ab_dim1]), abs(d__2))) *
+ r__[i__];
+ c__[j] = max(d__3,d__4);
+/* L80: */
+ }
+ if (c__[j] > 0.) {
+ i__2 = (integer) (log(c__[j]) / logrdx);
+ c__[j] = pow_di(&radix, &i__2);
+ }
+/* L90: */
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ d__1 = rcmin, d__2 = c__[j];
+ rcmin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = rcmax, d__2 = c__[j];
+ rcmax = max(d__1,d__2);
+/* L100: */
+ }
+
+ if (rcmin == 0.) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (c__[j] == 0.) {
+ *info = *m + j;
+ return 0;
+ }
+/* L110: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+/* Computing MAX */
+ d__2 = c__[j];
+ d__1 = max(d__2,smlnum);
+ c__[j] = 1. / min(d__1,bignum);
+/* L120: */
+ }
+
+/* Compute COLCND = min(C(J)) / max(C(J)). */
+
+ *colcnd = max(rcmin,smlnum) / min(rcmax,bignum);
+ }
+
+ return 0;
+
+/* End of ZGBEQUB */
+
+} /* zgbequb_ */
diff --git a/contrib/libs/clapack/zgbrfs.c b/contrib/libs/clapack/zgbrfs.c
new file mode 100644
index 0000000000..f3827606ad
--- /dev/null
+++ b/contrib/libs/clapack/zgbrfs.c
@@ -0,0 +1,494 @@
+/* zgbrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zgbrfs_(char *trans, integer *n, integer *kl, integer *
+ ku, integer *nrhs, doublecomplex *ab, integer *ldab, doublecomplex *
+ afb, integer *ldafb, integer *ipiv, doublecomplex *b, integer *ldb,
+ doublecomplex *x, integer *ldx, doublereal *ferr, doublereal *berr,
+ doublecomplex *work, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, afb_dim1, afb_offset, b_dim1, b_offset,
+ x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal s;
+ integer kk;
+ doublereal xk;
+ integer nz;
+ doublereal eps;
+ integer kase;
+ doublereal safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int zgbmv_(char *, integer *, integer *, integer *
+, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ integer count;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zaxpy_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), zlacn2_(
+ integer *, doublecomplex *, doublecomplex *, doublereal *,
+ integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+ char transn[1], transt[1];
+ doublereal lstres;
+ extern /* Subroutine */ int zgbtrs_(char *, integer *, integer *, integer
+ *, integer *, doublecomplex *, integer *, integer *,
+ doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGBRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is banded, and provides */
+/* error bounds and backward error estimates for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AB (input) COMPLEX*16 array, dimension (LDAB,N) */
+/* The original band matrix A, stored in rows 1 to KL+KU+1. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(n,j+kl). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KL+KU+1. */
+
+/* AFB (input) COMPLEX*16 array, dimension (LDAFB,N) */
+/* Details of the LU factorization of the band matrix A, as */
+/* computed by ZGBTRF. U is stored as an upper triangular band */
+/* matrix with KL+KU superdiagonals in rows 1 to KL+KU+1, and */
+/* the multipliers used during the factorization are stored in */
+/* rows KL+KU+2 to 2*KL+KU+1. */
+
+/* LDAFB (input) INTEGER */
+/* The leading dimension of the array AFB. LDAFB >= 2*KL*KU+1. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices from ZGBTRF; for 1<=i<=N, row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by ZGBTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ afb_dim1 = *ldafb;
+ afb_offset = 1 + afb_dim1;
+ afb -= afb_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "T") && ! lsame_(
+ trans, "C")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*ldab < *kl + *ku + 1) {
+ *info = -7;
+ } else if (*ldafb < (*kl << 1) + *ku + 1) {
+ *info = -9;
+ } else if (*ldb < max(1,*n)) {
+ *info = -12;
+ } else if (*ldx < max(1,*n)) {
+ *info = -14;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGBRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+ if (notran) {
+ *(unsigned char *)transn = 'N';
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transn = 'C';
+ *(unsigned char *)transt = 'N';
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+/* Computing MIN */
+ i__1 = *kl + *ku + 2, i__2 = *n + 1;
+ nz = min(i__1,i__2);
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - op(A) * X, */
+/* where op(A) = A, A**T, or A**H, depending on TRANS. */
+
+ zcopy_(n, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+ z__1.r = -1., z__1.i = -0.;
+ zgbmv_(trans, n, n, kl, ku, &z__1, &ab[ab_offset], ldab, &x[j *
+ x_dim1 + 1], &c__1, &c_b1, &work[1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[i__] = (d__1 = b[i__3].r, abs(d__1)) + (d__2 = d_imag(&b[
+ i__ + j * b_dim1]), abs(d__2));
+/* L30: */
+ }
+
+/* Compute abs(op(A))*abs(X) + abs(B). */
+
+ if (notran) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ kk = *ku + 1 - k;
+ i__3 = k + j * x_dim1;
+ xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[k + j *
+ x_dim1]), abs(d__2));
+/* Computing MAX */
+ i__3 = 1, i__4 = k - *ku;
+/* Computing MIN */
+ i__6 = *n, i__7 = k + *kl;
+ i__5 = min(i__6,i__7);
+ for (i__ = max(i__3,i__4); i__ <= i__5; ++i__) {
+ i__3 = kk + i__ + k * ab_dim1;
+ rwork[i__] += ((d__1 = ab[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&ab[kk + i__ + k * ab_dim1]), abs(d__2))) *
+ xk;
+/* L40: */
+ }
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ kk = *ku + 1 - k;
+/* Computing MAX */
+ i__5 = 1, i__3 = k - *ku;
+/* Computing MIN */
+ i__6 = *n, i__7 = k + *kl;
+ i__4 = min(i__6,i__7);
+ for (i__ = max(i__5,i__3); i__ <= i__4; ++i__) {
+ i__5 = kk + i__ + k * ab_dim1;
+ i__3 = i__ + j * x_dim1;
+ s += ((d__1 = ab[i__5].r, abs(d__1)) + (d__2 = d_imag(&ab[
+ kk + i__ + k * ab_dim1]), abs(d__2))) * ((d__3 =
+ x[i__3].r, abs(d__3)) + (d__4 = d_imag(&x[i__ + j
+ * x_dim1]), abs(d__4)));
+/* L60: */
+ }
+ rwork[k] += s;
+/* L70: */
+ }
+ }
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__4 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2))) / rwork[i__];
+ s = max(d__3,d__4);
+ } else {
+/* Computing MAX */
+ i__4 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = max(d__3,d__4);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2. <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ zgbtrs_(trans, n, kl, ku, &c__1, &afb[afb_offset], ldafb, &ipiv[1]
+, &work[1], n, info);
+ zaxpy_(n, &c_b1, &work[1], &c__1, &x[j * x_dim1 + 1], &c__1);
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(op(A)))* */
+/* ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(op(A)) is the inverse of op(A) */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(op(A))*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use ZLACN2 to estimate the infinity-norm of the matrix */
+/* inv(op(A)) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__4 = i__;
+ rwork[i__] = (d__1 = work[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ ;
+ } else {
+ i__4 = i__;
+ rwork[i__] = (d__1 = work[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ zlacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(op(A)**H). */
+
+ zgbtrs_(transt, n, kl, ku, &c__1, &afb[afb_offset], ldafb, &
+ ipiv[1], &work[1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__4 = i__;
+ i__5 = i__;
+ i__3 = i__;
+ z__1.r = rwork[i__5] * work[i__3].r, z__1.i = rwork[i__5]
+ * work[i__3].i;
+ work[i__4].r = z__1.r, work[i__4].i = z__1.i;
+/* L110: */
+ }
+ } else {
+
+/* Multiply by inv(op(A))*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__4 = i__;
+ i__5 = i__;
+ i__3 = i__;
+ z__1.r = rwork[i__5] * work[i__3].r, z__1.i = rwork[i__5]
+ * work[i__3].i;
+ work[i__4].r = z__1.r, work[i__4].i = z__1.i;
+/* L120: */
+ }
+ zgbtrs_(transn, n, kl, ku, &c__1, &afb[afb_offset], ldafb, &
+ ipiv[1], &work[1], n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__4 = i__ + j * x_dim1;
+ d__3 = lstres, d__4 = (d__1 = x[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&x[i__ + j * x_dim1]), abs(d__2));
+ lstres = max(d__3,d__4);
+/* L130: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of ZGBRFS */
+
+} /* zgbrfs_ */
diff --git a/contrib/libs/clapack/zgbsv.c b/contrib/libs/clapack/zgbsv.c
new file mode 100644
index 0000000000..7be165e6f6
--- /dev/null
+++ b/contrib/libs/clapack/zgbsv.c
@@ -0,0 +1,176 @@
+/* zgbsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zgbsv_(integer *n, integer *kl, integer *ku, integer *
+ nrhs, doublecomplex *ab, integer *ldab, integer *ipiv, doublecomplex *
+ b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int xerbla_(char *, integer *), zgbtrf_(
+ integer *, integer *, integer *, integer *, doublecomplex *,
+ integer *, integer *, integer *), zgbtrs_(char *, integer *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGBSV computes the solution to a complex system of linear equations */
+/* A * X = B, where A is a band matrix of order N with KL subdiagonals */
+/* and KU superdiagonals, and X and B are N-by-NRHS matrices. */
+
+/* The LU decomposition with partial pivoting and row interchanges is */
+/* used to factor A as A = L * U, where L is a product of permutation */
+/* and unit lower triangular matrices with KL subdiagonals, and U is */
+/* upper triangular with KL+KU superdiagonals. The factored form of A */
+/* is then used to solve the system of equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AB (input/output) COMPLEX*16 array, dimension (LDAB,N) */
+/* On entry, the matrix A in band storage, in rows KL+1 to */
+/* 2*KL+KU+1; rows 1 to KL of the array need not be set. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(KL+KU+1+i-j,j) = A(i,j) for max(1,j-KU)<=i<=min(N,j+KL) */
+/* On exit, details of the factorization: U is stored as an */
+/* upper triangular band matrix with KL+KU superdiagonals in */
+/* rows 1 to KL+KU+1, and the multipliers used during the */
+/* factorization are stored in rows KL+KU+2 to 2*KL+KU+1. */
+/* See below for further details. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= 2*KL+KU+1. */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* The pivot indices that define the permutation matrix P; */
+/* row i of the matrix was interchanged with row IPIV(i). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, U(i,i) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, and the solution has not been computed. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* M = N = 6, KL = 2, KU = 1: */
+
+/* On entry: On exit: */
+
+/* * * * + + + * * * u14 u25 u36 */
+/* * * + + + + * * u13 u24 u35 u46 */
+/* * a12 a23 a34 a45 a56 * u12 u23 u34 u45 u56 */
+/* a11 a22 a33 a44 a55 a66 u11 u22 u33 u44 u55 u66 */
+/* a21 a32 a43 a54 a65 * m21 m32 m43 m54 m65 * */
+/* a31 a42 a53 a64 * * m31 m42 m53 m64 * * */
+
+/* Array elements marked * are not used by the routine; elements marked */
+/* + need not be set on entry, but are required by the routine to store */
+/* elements of U because of fill-in resulting from the row interchanges. */
+
+/* ===================================================================== */
+
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*kl < 0) {
+ *info = -2;
+ } else if (*ku < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldab < (*kl << 1) + *ku + 1) {
+ *info = -6;
+ } else if (*ldb < max(*n,1)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGBSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the LU factorization of the band matrix A. */
+
+ zgbtrf_(n, n, kl, ku, &ab[ab_offset], ldab, &ipiv[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ zgbtrs_("No transpose", n, kl, ku, nrhs, &ab[ab_offset], ldab, &ipiv[
+ 1], &b[b_offset], ldb, info);
+ }
+ return 0;
+
+/* End of ZGBSV */
+
+} /* zgbsv_ */
diff --git a/contrib/libs/clapack/zgbsvx.c b/contrib/libs/clapack/zgbsvx.c
new file mode 100644
index 0000000000..860abf599a
--- /dev/null
+++ b/contrib/libs/clapack/zgbsvx.c
@@ -0,0 +1,678 @@
+/* zgbsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zgbsvx_(char *fact, char *trans, integer *n, integer *kl,
+ integer *ku, integer *nrhs, doublecomplex *ab, integer *ldab,
+ doublecomplex *afb, integer *ldafb, integer *ipiv, char *equed,
+ doublereal *r__, doublereal *c__, doublecomplex *b, integer *ldb,
+ doublecomplex *x, integer *ldx, doublereal *rcond, doublereal *ferr,
+ doublereal *berr, doublecomplex *work, doublereal *rwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, afb_dim1, afb_offset, b_dim1, b_offset,
+ x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, j1, j2;
+ doublereal amax;
+ char norm[1];
+ extern logical lsame_(char *, char *);
+ doublereal rcmin, rcmax, anorm;
+ logical equil;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal colcnd;
+ logical nofact;
+ extern doublereal zlangb_(char *, integer *, integer *, integer *,
+ doublecomplex *, integer *, doublereal *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), zlaqgb_(
+ integer *, integer *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, char *);
+ doublereal bignum;
+ extern /* Subroutine */ int zgbcon_(char *, integer *, integer *, integer
+ *, doublecomplex *, integer *, integer *, doublereal *,
+ doublereal *, doublecomplex *, doublereal *, integer *);
+ integer infequ;
+ logical colequ;
+ extern doublereal zlantb_(char *, char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublereal *);
+ doublereal rowcnd;
+ extern /* Subroutine */ int zgbequ_(integer *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *), zgbrfs_(
+ char *, integer *, integer *, integer *, integer *, doublecomplex
+ *, integer *, doublecomplex *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *, doublecomplex *, doublereal *,
+ integer *), zgbtrf_(integer *, integer *, integer *,
+ integer *, doublecomplex *, integer *, integer *, integer *);
+ logical notran;
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ doublereal smlnum;
+ extern /* Subroutine */ int zgbtrs_(char *, integer *, integer *, integer
+ *, integer *, doublecomplex *, integer *, integer *,
+ doublecomplex *, integer *, integer *);
+ logical rowequ;
+ doublereal rpvgrw;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGBSVX uses the LU factorization to compute the solution to a complex */
+/* system of linear equations A * X = B, A**T * X = B, or A**H * X = B, */
+/* where A is a band matrix of order N with KL subdiagonals and KU */
+/* superdiagonals, and X and B are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed by this subroutine: */
+
+/* 1. If FACT = 'E', real scaling factors are computed to equilibrate */
+/* the system: */
+/* TRANS = 'N': diag(R)*A*diag(C) *inv(diag(C))*X = diag(R)*B */
+/* TRANS = 'T': (diag(R)*A*diag(C))**T *inv(diag(R))*X = diag(C)*B */
+/* TRANS = 'C': (diag(R)*A*diag(C))**H *inv(diag(R))*X = diag(C)*B */
+/* Whether or not the system will be equilibrated depends on the */
+/* scaling of the matrix A, but if equilibration is used, A is */
+/* overwritten by diag(R)*A*diag(C) and B by diag(R)*B (if TRANS='N') */
+/* or diag(C)*B (if TRANS = 'T' or 'C'). */
+
+/* 2. If FACT = 'N' or 'E', the LU decomposition is used to factor the */
+/* matrix A (after equilibration if FACT = 'E') as */
+/* A = L * U, */
+/* where L is a product of permutation and unit lower triangular */
+/* matrices with KL subdiagonals, and U is upper triangular with */
+/* KL+KU superdiagonals. */
+
+/* 3. If some U(i,i)=0, so that U is exactly singular, then the routine */
+/* returns with INFO = i. Otherwise, the factored form of A is used */
+/* to estimate the condition number of the matrix A. If the */
+/* reciprocal of the condition number is less than machine precision, */
+/* INFO = N+1 is returned as a warning, but the routine still goes on */
+/* to solve for X and compute error bounds as described below. */
+
+/* 4. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 5. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* 6. If equilibration was used, the matrix X is premultiplied by */
+/* diag(C) (if TRANS = 'N') or diag(R) (if TRANS = 'T' or 'C') so */
+/* that it solves the original system before equilibration. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of the matrix A is */
+/* supplied on entry, and if not, whether the matrix A should be */
+/* equilibrated before it is factored. */
+/* = 'F': On entry, AFB and IPIV contain the factored form of */
+/* A. If EQUED is not 'N', the matrix A has been */
+/* equilibrated with scaling factors given by R and C. */
+/* AB, AFB, and IPIV are not modified. */
+/* = 'N': The matrix A will be copied to AFB and factored. */
+/* = 'E': The matrix A will be equilibrated if necessary, then */
+/* copied to AFB and factored. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations. */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AB (input/output) COMPLEX*16 array, dimension (LDAB,N) */
+/* On entry, the matrix A in band storage, in rows 1 to KL+KU+1. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(KU+1+i-j,j) = A(i,j) for max(1,j-KU)<=i<=min(N,j+kl) */
+
+/* If FACT = 'F' and EQUED is not 'N', then A must have been */
+/* equilibrated by the scaling factors in R and/or C. AB is not */
+/* modified if FACT = 'F' or 'N', or if FACT = 'E' and */
+/* EQUED = 'N' on exit. */
+
+/* On exit, if EQUED .ne. 'N', A is scaled as follows: */
+/* EQUED = 'R': A := diag(R) * A */
+/* EQUED = 'C': A := A * diag(C) */
+/* EQUED = 'B': A := diag(R) * A * diag(C). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KL+KU+1. */
+
+/* AFB (input or output) COMPLEX*16 array, dimension (LDAFB,N) */
+/* If FACT = 'F', then AFB is an input argument and on entry */
+/* contains details of the LU factorization of the band matrix */
+/* A, as computed by ZGBTRF. U is stored as an upper triangular */
+/* band matrix with KL+KU superdiagonals in rows 1 to KL+KU+1, */
+/* and the multipliers used during the factorization are stored */
+/* in rows KL+KU+2 to 2*KL+KU+1. If EQUED .ne. 'N', then AFB is */
+/* the factored form of the equilibrated matrix A. */
+
+/* If FACT = 'N', then AFB is an output argument and on exit */
+/* returns details of the LU factorization of A. */
+
+/* If FACT = 'E', then AFB is an output argument and on exit */
+/* returns details of the LU factorization of the equilibrated */
+/* matrix A (see the description of AB for the form of the */
+/* equilibrated matrix). */
+
+/* LDAFB (input) INTEGER */
+/* The leading dimension of the array AFB. LDAFB >= 2*KL+KU+1. */
+
+/* IPIV (input or output) INTEGER array, dimension (N) */
+/* If FACT = 'F', then IPIV is an input argument and on entry */
+/* contains the pivot indices from the factorization A = L*U */
+/* as computed by ZGBTRF; row i of the matrix was interchanged */
+/* with row IPIV(i). */
+
+/* If FACT = 'N', then IPIV is an output argument and on exit */
+/* contains the pivot indices from the factorization A = L*U */
+/* of the original matrix A. */
+
+/* If FACT = 'E', then IPIV is an output argument and on exit */
+/* contains the pivot indices from the factorization A = L*U */
+/* of the equilibrated matrix A. */
+
+/* EQUED (input or output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration (always true if FACT = 'N'). */
+/* = 'R': Row equilibration, i.e., A has been premultiplied by */
+/* diag(R). */
+/* = 'C': Column equilibration, i.e., A has been postmultiplied */
+/* by diag(C). */
+/* = 'B': Both row and column equilibration, i.e., A has been */
+/* replaced by diag(R) * A * diag(C). */
+/* EQUED is an input argument if FACT = 'F'; otherwise, it is an */
+/* output argument. */
+
+/* R (input or output) DOUBLE PRECISION array, dimension (N) */
+/* The row scale factors for A. If EQUED = 'R' or 'B', A is */
+/* multiplied on the left by diag(R); if EQUED = 'N' or 'C', R */
+/* is not accessed. R is an input argument if FACT = 'F'; */
+/* otherwise, R is an output argument. If FACT = 'F' and */
+/* EQUED = 'R' or 'B', each element of R must be positive. */
+
+/* C (input or output) DOUBLE PRECISION array, dimension (N) */
+/* The column scale factors for A. If EQUED = 'C' or 'B', A is */
+/* multiplied on the right by diag(C); if EQUED = 'N' or 'R', C */
+/* is not accessed. C is an input argument if FACT = 'F'; */
+/* otherwise, C is an output argument. If FACT = 'F' and */
+/* EQUED = 'C' or 'B', each element of C must be positive. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, */
+/* if EQUED = 'N', B is not modified; */
+/* if TRANS = 'N' and EQUED = 'R' or 'B', B is overwritten by */
+/* diag(R)*B; */
+/* if TRANS = 'T' or 'C' and EQUED = 'C' or 'B', B is */
+/* overwritten by diag(C)*B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X */
+/* to the original system of equations. Note that A and B are */
+/* modified on exit if EQUED .ne. 'N', and the solution to the */
+/* equilibrated system is inv(diag(C))*X if TRANS = 'N' and */
+/* EQUED = 'C' or 'B', or inv(diag(R))*X if TRANS = 'T' or 'C' */
+/* and EQUED = 'R' or 'B'. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A after equilibration (if done). If RCOND is less than the */
+/* machine precision (in particular, if RCOND = 0), the matrix */
+/* is singular to working precision. This condition is */
+/* indicated by a return code of INFO > 0. */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace/output) DOUBLE PRECISION array, dimension (N) */
+/* On exit, RWORK(1) contains the reciprocal pivot growth */
+/* factor norm(A)/norm(U). The "max absolute element" norm is */
+/* used. If RWORK(1) is much less than 1, then the stability */
+/* of the LU factorization of the (equilibrated) matrix A */
+/* could be poor. This also means that the solution X, condition */
+/* estimator RCOND, and forward error bound FERR could be */
+/* unreliable. If factorization fails with 0<INFO<=N, then */
+/* RWORK(1) contains the reciprocal pivot growth factor for the */
+/* leading INFO columns of A. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: U(i,i) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, so the solution and error bounds */
+/* could not be computed. RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* ===================================================================== */
+/* Moved setting of INFO = N+1 so INFO does not subsequently get */
+/* overwritten. Sven, 17 Mar 05. */
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ afb_dim1 = *ldafb;
+ afb_offset = 1 + afb_dim1;
+ afb -= afb_offset;
+ --ipiv;
+ --r__;
+ --c__;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ equil = lsame_(fact, "E");
+ notran = lsame_(trans, "N");
+ if (nofact || equil) {
+ *(unsigned char *)equed = 'N';
+ rowequ = FALSE_;
+ colequ = FALSE_;
+ } else {
+ rowequ = lsame_(equed, "R") || lsame_(equed,
+ "B");
+ colequ = lsame_(equed, "C") || lsame_(equed,
+ "B");
+ smlnum = dlamch_("Safe minimum");
+ bignum = 1. / smlnum;
+ }
+
+/* Test the input parameters. */
+
+ if (! nofact && ! equil && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*kl < 0) {
+ *info = -4;
+ } else if (*ku < 0) {
+ *info = -5;
+ } else if (*nrhs < 0) {
+ *info = -6;
+ } else if (*ldab < *kl + *ku + 1) {
+ *info = -8;
+ } else if (*ldafb < (*kl << 1) + *ku + 1) {
+ *info = -10;
+ } else if (lsame_(fact, "F") && ! (rowequ || colequ
+ || lsame_(equed, "N"))) {
+ *info = -12;
+ } else {
+ if (rowequ) {
+ rcmin = bignum;
+ rcmax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ d__1 = rcmin, d__2 = r__[j];
+ rcmin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = rcmax, d__2 = r__[j];
+ rcmax = max(d__1,d__2);
+/* L10: */
+ }
+ if (rcmin <= 0.) {
+ *info = -13;
+ } else if (*n > 0) {
+ rowcnd = max(rcmin,smlnum) / min(rcmax,bignum);
+ } else {
+ rowcnd = 1.;
+ }
+ }
+ if (colequ && *info == 0) {
+ rcmin = bignum;
+ rcmax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ d__1 = rcmin, d__2 = c__[j];
+ rcmin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = rcmax, d__2 = c__[j];
+ rcmax = max(d__1,d__2);
+/* L20: */
+ }
+ if (rcmin <= 0.) {
+ *info = -14;
+ } else if (*n > 0) {
+ colcnd = max(rcmin,smlnum) / min(rcmax,bignum);
+ } else {
+ colcnd = 1.;
+ }
+ }
+ if (*info == 0) {
+ if (*ldb < max(1,*n)) {
+ *info = -16;
+ } else if (*ldx < max(1,*n)) {
+ *info = -18;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGBSVX", &i__1);
+ return 0;
+ }
+
+ if (equil) {
+
+/* Compute row and column scalings to equilibrate the matrix A. */
+
+ zgbequ_(n, n, kl, ku, &ab[ab_offset], ldab, &r__[1], &c__[1], &rowcnd,
+ &colcnd, &amax, &infequ);
+ if (infequ == 0) {
+
+/* Equilibrate the matrix. */
+
+ zlaqgb_(n, n, kl, ku, &ab[ab_offset], ldab, &r__[1], &c__[1], &
+ rowcnd, &colcnd, &amax, equed);
+ rowequ = lsame_(equed, "R") || lsame_(equed,
+ "B");
+ colequ = lsame_(equed, "C") || lsame_(equed,
+ "B");
+ }
+ }
+
+/* Scale the right hand side. */
+
+ if (notran) {
+ if (rowequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * b_dim1;
+ z__1.r = r__[i__4] * b[i__5].r, z__1.i = r__[i__4] * b[
+ i__5].i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else if (colequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * b_dim1;
+ z__1.r = c__[i__4] * b[i__5].r, z__1.i = c__[i__4] * b[i__5]
+ .i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+ if (nofact || equil) {
+
+/* Compute the LU factorization of the band matrix A. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j - *ku;
+ j1 = max(i__2,1);
+/* Computing MIN */
+ i__2 = j + *kl;
+ j2 = min(i__2,*n);
+ i__2 = j2 - j1 + 1;
+ zcopy_(&i__2, &ab[*ku + 1 - j + j1 + j * ab_dim1], &c__1, &afb[*
+ kl + *ku + 1 - j + j1 + j * afb_dim1], &c__1);
+/* L70: */
+ }
+
+ zgbtrf_(n, n, kl, ku, &afb[afb_offset], ldafb, &ipiv[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+
+/* Compute the reciprocal pivot growth factor of the */
+/* leading rank-deficient INFO columns of A. */
+
+ anorm = 0.;
+ i__1 = *info;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = *ku + 2 - j;
+/* Computing MIN */
+ i__4 = *n + *ku + 1 - j, i__5 = *kl + *ku + 1;
+ i__3 = min(i__4,i__5);
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ d__1 = anorm, d__2 = z_abs(&ab[i__ + j * ab_dim1]);
+ anorm = max(d__1,d__2);
+/* L80: */
+ }
+/* L90: */
+ }
+/* Computing MIN */
+ i__3 = *info - 1, i__2 = *kl + *ku;
+ i__1 = min(i__3,i__2);
+/* Computing MAX */
+ i__4 = 1, i__5 = *kl + *ku + 2 - *info;
+ rpvgrw = zlantb_("M", "U", "N", info, &i__1, &afb[max(i__4, i__5)
+ + afb_dim1], ldafb, &rwork[1]);
+ if (rpvgrw == 0.) {
+ rpvgrw = 1.;
+ } else {
+ rpvgrw = anorm / rpvgrw;
+ }
+ rwork[1] = rpvgrw;
+ *rcond = 0.;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A and the */
+/* reciprocal pivot growth factor RPVGRW. */
+
+ if (notran) {
+ *(unsigned char *)norm = '1';
+ } else {
+ *(unsigned char *)norm = 'I';
+ }
+ anorm = zlangb_(norm, n, kl, ku, &ab[ab_offset], ldab, &rwork[1]);
+ i__1 = *kl + *ku;
+ rpvgrw = zlantb_("M", "U", "N", n, &i__1, &afb[afb_offset], ldafb, &rwork[
+ 1]);
+ if (rpvgrw == 0.) {
+ rpvgrw = 1.;
+ } else {
+ rpvgrw = zlangb_("M", n, kl, ku, &ab[ab_offset], ldab, &rwork[1]) / rpvgrw;
+ }
+
+/* Compute the reciprocal of the condition number of A. */
+
+ zgbcon_(norm, n, kl, ku, &afb[afb_offset], ldafb, &ipiv[1], &anorm, rcond,
+ &work[1], &rwork[1], info);
+
+/* Compute the solution matrix X. */
+
+ zlacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ zgbtrs_(trans, n, kl, ku, nrhs, &afb[afb_offset], ldafb, &ipiv[1], &x[
+ x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solution and */
+/* compute error bounds and backward error estimates for it. */
+
+ zgbrfs_(trans, n, kl, ku, nrhs, &ab[ab_offset], ldab, &afb[afb_offset],
+ ldafb, &ipiv[1], &b[b_offset], ldb, &x[x_offset], ldx, &ferr[1], &
+ berr[1], &work[1], &rwork[1], info);
+
+/* Transform the solution matrix X to a solution of the original */
+/* system. */
+
+ if (notran) {
+ if (colequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__2 = i__ + j * x_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * x_dim1;
+ z__1.r = c__[i__4] * x[i__5].r, z__1.i = c__[i__4] * x[
+ i__5].i;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+/* L100: */
+ }
+/* L110: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= colcnd;
+/* L120: */
+ }
+ }
+ } else if (rowequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__2 = i__ + j * x_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * x_dim1;
+ z__1.r = r__[i__4] * x[i__5].r, z__1.i = r__[i__4] * x[i__5]
+ .i;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+/* L130: */
+ }
+/* L140: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= rowcnd;
+/* L150: */
+ }
+ }
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < dlamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ rwork[1] = rpvgrw;
+ return 0;
+
+/* End of ZGBSVX */
+
+} /* zgbsvx_ */
diff --git a/contrib/libs/clapack/zgbtf2.c b/contrib/libs/clapack/zgbtf2.c
new file mode 100644
index 0000000000..354252c862
--- /dev/null
+++ b/contrib/libs/clapack/zgbtf2.c
@@ -0,0 +1,268 @@
+/* zgbtf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zgbtf2_(integer *m, integer *n, integer *kl, integer *ku,
+ doublecomplex *ab, integer *ldab, integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, km, jp, ju, kv;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *), zgeru_(integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *), zswap_(integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *), xerbla_(
+ char *, integer *);
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGBTF2 computes an LU factorization of a complex m-by-n band matrix */
+/* A using partial pivoting with row interchanges. */
+
+/* This is the unblocked version of the algorithm, calling Level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* AB (input/output) COMPLEX*16 array, dimension (LDAB,N) */
+/* On entry, the matrix A in band storage, in rows KL+1 to */
+/* 2*KL+KU+1; rows 1 to KL of the array need not be set. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(kl+ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(m,j+kl) */
+
+/* On exit, details of the factorization: U is stored as an */
+/* upper triangular band matrix with KL+KU superdiagonals in */
+/* rows 1 to KL+KU+1, and the multipliers used during the */
+/* factorization are stored in rows KL+KU+2 to 2*KL+KU+1. */
+/* See below for further details. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= 2*KL+KU+1. */
+
+/* IPIV (output) INTEGER array, dimension (min(M,N)) */
+/* The pivot indices; for 1 <= i <= min(M,N), row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = +i, U(i,i) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, and division by zero will occur if it is used */
+/* to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* M = N = 6, KL = 2, KU = 1: */
+
+/* On entry: On exit: */
+
+/* * * * + + + * * * u14 u25 u36 */
+/* * * + + + + * * u13 u24 u35 u46 */
+/* * a12 a23 a34 a45 a56 * u12 u23 u34 u45 u56 */
+/* a11 a22 a33 a44 a55 a66 u11 u22 u33 u44 u55 u66 */
+/* a21 a32 a43 a54 a65 * m21 m32 m43 m54 m65 * */
+/* a31 a42 a53 a64 * * m31 m42 m53 m64 * * */
+
+/* Array elements marked * are not used by the routine; elements marked */
+/* + need not be set on entry, but are required by the routine to store */
+/* elements of U, because of fill-in resulting from the row */
+/* interchanges. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* KV is the number of superdiagonals in the factor U, allowing for */
+/* fill-in. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --ipiv;
+
+ /* Function Body */
+ kv = *ku + *kl;
+
+/* Test the input parameters. */
+
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*ldab < *kl + kv + 1) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGBTF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Gaussian elimination with partial pivoting */
+
+/* Set fill-in elements in columns KU+2 to KV to zero. */
+
+ i__1 = min(kv,*n);
+ for (j = *ku + 2; j <= i__1; ++j) {
+ i__2 = *kl;
+ for (i__ = kv - j + 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * ab_dim1;
+ ab[i__3].r = 0., ab[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* JU is the index of the last column affected by the current stage */
+/* of the factorization. */
+
+ ju = 1;
+
+ i__1 = min(*m,*n);
+ for (j = 1; j <= i__1; ++j) {
+
+/* Set fill-in elements in column J+KV to zero. */
+
+ if (j + kv <= *n) {
+ i__2 = *kl;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + (j + kv) * ab_dim1;
+ ab[i__3].r = 0., ab[i__3].i = 0.;
+/* L30: */
+ }
+ }
+
+/* Find pivot and test for singularity. KM is the number of */
+/* subdiagonal elements in the current column. */
+
+/* Computing MIN */
+ i__2 = *kl, i__3 = *m - j;
+ km = min(i__2,i__3);
+ i__2 = km + 1;
+ jp = izamax_(&i__2, &ab[kv + 1 + j * ab_dim1], &c__1);
+ ipiv[j] = jp + j - 1;
+ i__2 = kv + jp + j * ab_dim1;
+ if (ab[i__2].r != 0. || ab[i__2].i != 0.) {
+/* Computing MAX */
+/* Computing MIN */
+ i__4 = j + *ku + jp - 1;
+ i__2 = ju, i__3 = min(i__4,*n);
+ ju = max(i__2,i__3);
+
+/* Apply interchange to columns J to JU. */
+
+ if (jp != 1) {
+ i__2 = ju - j + 1;
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ zswap_(&i__2, &ab[kv + jp + j * ab_dim1], &i__3, &ab[kv + 1 +
+ j * ab_dim1], &i__4);
+ }
+ if (km > 0) {
+
+/* Compute multipliers. */
+
+ z_div(&z__1, &c_b1, &ab[kv + 1 + j * ab_dim1]);
+ zscal_(&km, &z__1, &ab[kv + 2 + j * ab_dim1], &c__1);
+
+/* Update trailing submatrix within the band. */
+
+ if (ju > j) {
+ i__2 = ju - j;
+ z__1.r = -1., z__1.i = -0.;
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ zgeru_(&km, &i__2, &z__1, &ab[kv + 2 + j * ab_dim1], &
+ c__1, &ab[kv + (j + 1) * ab_dim1], &i__3, &ab[kv
+ + 1 + (j + 1) * ab_dim1], &i__4);
+ }
+ }
+ } else {
+
+/* If pivot is zero, set INFO to the index of the pivot */
+/* unless a zero pivot has already been found. */
+
+ if (*info == 0) {
+ *info = j;
+ }
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of ZGBTF2 */
+
+} /* zgbtf2_ */
diff --git a/contrib/libs/clapack/zgbtrf.c b/contrib/libs/clapack/zgbtrf.c
new file mode 100644
index 0000000000..2e5c495bde
--- /dev/null
+++ b/contrib/libs/clapack/zgbtrf.c
@@ -0,0 +1,605 @@
+/* zgbtrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+static integer c__65 = 65;
+
+/* Subroutine */ int zgbtrf_(integer *m, integer *n, integer *kl, integer *ku,
+ doublecomplex *ab, integer *ldab, integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, i2, i3, j2, j3, k2, jb, nb, ii, jj, jm, ip, jp, km, ju,
+ kv, nw;
+ doublecomplex temp;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *), zgemm_(char *, char *, integer *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ doublecomplex work13[4160] /* was [65][64] */, work31[4160] /*
+ was [65][64] */;
+ extern /* Subroutine */ int zgeru_(integer *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zcopy_(integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *), zswap_(integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *), ztrsm_(
+ char *, char *, char *, char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *), zgbtf2_(integer *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *), izamax_(integer *,
+ doublecomplex *, integer *);
+ extern /* Subroutine */ int zlaswp_(integer *, doublecomplex *, integer *,
+ integer *, integer *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGBTRF computes an LU factorization of a complex m-by-n band matrix A */
+/* using partial pivoting with row interchanges. */
+
+/* This is the blocked version of the algorithm, calling Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* AB (input/output) COMPLEX*16 array, dimension (LDAB,N) */
+/* On entry, the matrix A in band storage, in rows KL+1 to */
+/* 2*KL+KU+1; rows 1 to KL of the array need not be set. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(kl+ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(m,j+kl) */
+
+/* On exit, details of the factorization: U is stored as an */
+/* upper triangular band matrix with KL+KU superdiagonals in */
+/* rows 1 to KL+KU+1, and the multipliers used during the */
+/* factorization are stored in rows KL+KU+2 to 2*KL+KU+1. */
+/* See below for further details. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= 2*KL+KU+1. */
+
+/* IPIV (output) INTEGER array, dimension (min(M,N)) */
+/* The pivot indices; for 1 <= i <= min(M,N), row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = +i, U(i,i) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, and division by zero will occur if it is used */
+/* to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* M = N = 6, KL = 2, KU = 1: */
+
+/* On entry: On exit: */
+
+/* * * * + + + * * * u14 u25 u36 */
+/* * * + + + + * * u13 u24 u35 u46 */
+/* * a12 a23 a34 a45 a56 * u12 u23 u34 u45 u56 */
+/* a11 a22 a33 a44 a55 a66 u11 u22 u33 u44 u55 u66 */
+/* a21 a32 a43 a54 a65 * m21 m32 m43 m54 m65 * */
+/* a31 a42 a53 a64 * * m31 m42 m53 m64 * * */
+
+/* Array elements marked * are not used by the routine; elements marked */
+/* + need not be set on entry, but are required by the routine to store */
+/* elements of U because of fill-in resulting from the row interchanges. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* KV is the number of superdiagonals in the factor U, allowing for */
+/* fill-in */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --ipiv;
+
+ /* Function Body */
+ kv = *ku + *kl;
+
+/* Test the input parameters. */
+
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*ldab < *kl + kv + 1) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGBTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment */
+
+ nb = ilaenv_(&c__1, "ZGBTRF", " ", m, n, kl, ku);
+
+/* The block size must not exceed the limit set by the size of the */
+/* local arrays WORK13 and WORK31. */
+
+ nb = min(nb,64);
+
+ if (nb <= 1 || nb > *kl) {
+
+/* Use unblocked code */
+
+ zgbtf2_(m, n, kl, ku, &ab[ab_offset], ldab, &ipiv[1], info);
+ } else {
+
+/* Use blocked code */
+
+/* Zero the superdiagonal elements of the work array WORK13 */
+
+ i__1 = nb;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * 65 - 66;
+ work13[i__3].r = 0., work13[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* Zero the subdiagonal elements of the work array WORK31 */
+
+ i__1 = nb;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = nb;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * 65 - 66;
+ work31[i__3].r = 0., work31[i__3].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Gaussian elimination with partial pivoting */
+
+/* Set fill-in elements in columns KU+2 to KV to zero */
+
+ i__1 = min(kv,*n);
+ for (j = *ku + 2; j <= i__1; ++j) {
+ i__2 = *kl;
+ for (i__ = kv - j + 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * ab_dim1;
+ ab[i__3].r = 0., ab[i__3].i = 0.;
+/* L50: */
+ }
+/* L60: */
+ }
+
+/* JU is the index of the last column affected by the current */
+/* stage of the factorization */
+
+ ju = 1;
+
+ i__1 = min(*m,*n);
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = min(*m,*n) - j + 1;
+ jb = min(i__3,i__4);
+
+/* The active part of the matrix is partitioned */
+
+/* A11 A12 A13 */
+/* A21 A22 A23 */
+/* A31 A32 A33 */
+
+/* Here A11, A21 and A31 denote the current block of JB columns */
+/* which is about to be factorized. The number of rows in the */
+/* partitioning are JB, I2, I3 respectively, and the numbers */
+/* of columns are JB, J2, J3. The superdiagonal elements of A13 */
+/* and the subdiagonal elements of A31 lie outside the band. */
+
+/* Computing MIN */
+ i__3 = *kl - jb, i__4 = *m - j - jb + 1;
+ i2 = min(i__3,i__4);
+/* Computing MIN */
+ i__3 = jb, i__4 = *m - j - *kl + 1;
+ i3 = min(i__3,i__4);
+
+/* J2 and J3 are computed after JU has been updated. */
+
+/* Factorize the current block of JB columns */
+
+ i__3 = j + jb - 1;
+ for (jj = j; jj <= i__3; ++jj) {
+
+/* Set fill-in elements in column JJ+KV to zero */
+
+ if (jj + kv <= *n) {
+ i__4 = *kl;
+ for (i__ = 1; i__ <= i__4; ++i__) {
+ i__5 = i__ + (jj + kv) * ab_dim1;
+ ab[i__5].r = 0., ab[i__5].i = 0.;
+/* L70: */
+ }
+ }
+
+/* Find pivot and test for singularity. KM is the number of */
+/* subdiagonal elements in the current column. */
+
+/* Computing MIN */
+ i__4 = *kl, i__5 = *m - jj;
+ km = min(i__4,i__5);
+ i__4 = km + 1;
+ jp = izamax_(&i__4, &ab[kv + 1 + jj * ab_dim1], &c__1);
+ ipiv[jj] = jp + jj - j;
+ i__4 = kv + jp + jj * ab_dim1;
+ if (ab[i__4].r != 0. || ab[i__4].i != 0.) {
+/* Computing MAX */
+/* Computing MIN */
+ i__6 = jj + *ku + jp - 1;
+ i__4 = ju, i__5 = min(i__6,*n);
+ ju = max(i__4,i__5);
+ if (jp != 1) {
+
+/* Apply interchange to columns J to J+JB-1 */
+
+ if (jp + jj - 1 < j + *kl) {
+
+ i__4 = *ldab - 1;
+ i__5 = *ldab - 1;
+ zswap_(&jb, &ab[kv + 1 + jj - j + j * ab_dim1], &
+ i__4, &ab[kv + jp + jj - j + j * ab_dim1],
+ &i__5);
+ } else {
+
+/* The interchange affects columns J to JJ-1 of A31 */
+/* which are stored in the work array WORK31 */
+
+ i__4 = jj - j;
+ i__5 = *ldab - 1;
+ zswap_(&i__4, &ab[kv + 1 + jj - j + j * ab_dim1],
+ &i__5, &work31[jp + jj - j - *kl - 1], &
+ c__65);
+ i__4 = j + jb - jj;
+ i__5 = *ldab - 1;
+ i__6 = *ldab - 1;
+ zswap_(&i__4, &ab[kv + 1 + jj * ab_dim1], &i__5, &
+ ab[kv + jp + jj * ab_dim1], &i__6);
+ }
+ }
+
+/* Compute multipliers */
+
+ z_div(&z__1, &c_b1, &ab[kv + 1 + jj * ab_dim1]);
+ zscal_(&km, &z__1, &ab[kv + 2 + jj * ab_dim1], &c__1);
+
+/* Update trailing submatrix within the band and within */
+/* the current block. JM is the index of the last column */
+/* which needs to be updated. */
+
+/* Computing MIN */
+ i__4 = ju, i__5 = j + jb - 1;
+ jm = min(i__4,i__5);
+ if (jm > jj) {
+ i__4 = jm - jj;
+ z__1.r = -1., z__1.i = -0.;
+ i__5 = *ldab - 1;
+ i__6 = *ldab - 1;
+ zgeru_(&km, &i__4, &z__1, &ab[kv + 2 + jj * ab_dim1],
+ &c__1, &ab[kv + (jj + 1) * ab_dim1], &i__5, &
+ ab[kv + 1 + (jj + 1) * ab_dim1], &i__6);
+ }
+ } else {
+
+/* If pivot is zero, set INFO to the index of the pivot */
+/* unless a zero pivot has already been found. */
+
+ if (*info == 0) {
+ *info = jj;
+ }
+ }
+
+/* Copy current column of A31 into the work array WORK31 */
+
+/* Computing MIN */
+ i__4 = jj - j + 1;
+ nw = min(i__4,i3);
+ if (nw > 0) {
+ zcopy_(&nw, &ab[kv + *kl + 1 - jj + j + jj * ab_dim1], &
+ c__1, &work31[(jj - j + 1) * 65 - 65], &c__1);
+ }
+/* L80: */
+ }
+ if (j + jb <= *n) {
+
+/* Apply the row interchanges to the other blocks. */
+
+/* Computing MIN */
+ i__3 = ju - j + 1;
+ j2 = min(i__3,kv) - jb;
+/* Computing MAX */
+ i__3 = 0, i__4 = ju - j - kv + 1;
+ j3 = max(i__3,i__4);
+
+/* Use ZLASWP to apply the row interchanges to A12, A22, and */
+/* A32. */
+
+ i__3 = *ldab - 1;
+ zlaswp_(&j2, &ab[kv + 1 - jb + (j + jb) * ab_dim1], &i__3, &
+ c__1, &jb, &ipiv[j], &c__1);
+
+/* Adjust the pivot indices. */
+
+ i__3 = j + jb - 1;
+ for (i__ = j; i__ <= i__3; ++i__) {
+ ipiv[i__] = ipiv[i__] + j - 1;
+/* L90: */
+ }
+
+/* Apply the row interchanges to A13, A23, and A33 */
+/* columnwise. */
+
+ k2 = j - 1 + jb + j2;
+ i__3 = j3;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ jj = k2 + i__;
+ i__4 = j + jb - 1;
+ for (ii = j + i__ - 1; ii <= i__4; ++ii) {
+ ip = ipiv[ii];
+ if (ip != ii) {
+ i__5 = kv + 1 + ii - jj + jj * ab_dim1;
+ temp.r = ab[i__5].r, temp.i = ab[i__5].i;
+ i__5 = kv + 1 + ii - jj + jj * ab_dim1;
+ i__6 = kv + 1 + ip - jj + jj * ab_dim1;
+ ab[i__5].r = ab[i__6].r, ab[i__5].i = ab[i__6].i;
+ i__5 = kv + 1 + ip - jj + jj * ab_dim1;
+ ab[i__5].r = temp.r, ab[i__5].i = temp.i;
+ }
+/* L100: */
+ }
+/* L110: */
+ }
+
+/* Update the relevant part of the trailing submatrix */
+
+ if (j2 > 0) {
+
+/* Update A12 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ ztrsm_("Left", "Lower", "No transpose", "Unit", &jb, &j2,
+ &c_b1, &ab[kv + 1 + j * ab_dim1], &i__3, &ab[kv +
+ 1 - jb + (j + jb) * ab_dim1], &i__4);
+
+ if (i2 > 0) {
+
+/* Update A22 */
+
+ z__1.r = -1., z__1.i = -0.;
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ i__5 = *ldab - 1;
+ zgemm_("No transpose", "No transpose", &i2, &j2, &jb,
+ &z__1, &ab[kv + 1 + jb + j * ab_dim1], &i__3,
+ &ab[kv + 1 - jb + (j + jb) * ab_dim1], &i__4,
+ &c_b1, &ab[kv + 1 + (j + jb) * ab_dim1], &
+ i__5);
+ }
+
+ if (i3 > 0) {
+
+/* Update A32 */
+
+ z__1.r = -1., z__1.i = -0.;
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ zgemm_("No transpose", "No transpose", &i3, &j2, &jb,
+ &z__1, work31, &c__65, &ab[kv + 1 - jb + (j +
+ jb) * ab_dim1], &i__3, &c_b1, &ab[kv + *kl +
+ 1 - jb + (j + jb) * ab_dim1], &i__4);
+ }
+ }
+
+ if (j3 > 0) {
+
+/* Copy the lower triangle of A13 into the work array */
+/* WORK13 */
+
+ i__3 = j3;
+ for (jj = 1; jj <= i__3; ++jj) {
+ i__4 = jb;
+ for (ii = jj; ii <= i__4; ++ii) {
+ i__5 = ii + jj * 65 - 66;
+ i__6 = ii - jj + 1 + (jj + j + kv - 1) * ab_dim1;
+ work13[i__5].r = ab[i__6].r, work13[i__5].i = ab[
+ i__6].i;
+/* L120: */
+ }
+/* L130: */
+ }
+
+/* Update A13 in the work array */
+
+ i__3 = *ldab - 1;
+ ztrsm_("Left", "Lower", "No transpose", "Unit", &jb, &j3,
+ &c_b1, &ab[kv + 1 + j * ab_dim1], &i__3, work13, &
+ c__65);
+
+ if (i2 > 0) {
+
+/* Update A23 */
+
+ z__1.r = -1., z__1.i = -0.;
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ zgemm_("No transpose", "No transpose", &i2, &j3, &jb,
+ &z__1, &ab[kv + 1 + jb + j * ab_dim1], &i__3,
+ work13, &c__65, &c_b1, &ab[jb + 1 + (j + kv) *
+ ab_dim1], &i__4);
+ }
+
+ if (i3 > 0) {
+
+/* Update A33 */
+
+ z__1.r = -1., z__1.i = -0.;
+ i__3 = *ldab - 1;
+ zgemm_("No transpose", "No transpose", &i3, &j3, &jb,
+ &z__1, work31, &c__65, work13, &c__65, &c_b1,
+ &ab[*kl + 1 + (j + kv) * ab_dim1], &i__3);
+ }
+
+/* Copy the lower triangle of A13 back into place */
+
+ i__3 = j3;
+ for (jj = 1; jj <= i__3; ++jj) {
+ i__4 = jb;
+ for (ii = jj; ii <= i__4; ++ii) {
+ i__5 = ii - jj + 1 + (jj + j + kv - 1) * ab_dim1;
+ i__6 = ii + jj * 65 - 66;
+ ab[i__5].r = work13[i__6].r, ab[i__5].i = work13[
+ i__6].i;
+/* L140: */
+ }
+/* L150: */
+ }
+ }
+ } else {
+
+/* Adjust the pivot indices. */
+
+ i__3 = j + jb - 1;
+ for (i__ = j; i__ <= i__3; ++i__) {
+ ipiv[i__] = ipiv[i__] + j - 1;
+/* L160: */
+ }
+ }
+
+/* Partially undo the interchanges in the current block to */
+/* restore the upper triangular form of A31 and copy the upper */
+/* triangle of A31 back into place */
+
+ i__3 = j;
+ for (jj = j + jb - 1; jj >= i__3; --jj) {
+ jp = ipiv[jj] - jj + 1;
+ if (jp != 1) {
+
+/* Apply interchange to columns J to JJ-1 */
+
+ if (jp + jj - 1 < j + *kl) {
+
+/* The interchange does not affect A31 */
+
+ i__4 = jj - j;
+ i__5 = *ldab - 1;
+ i__6 = *ldab - 1;
+ zswap_(&i__4, &ab[kv + 1 + jj - j + j * ab_dim1], &
+ i__5, &ab[kv + jp + jj - j + j * ab_dim1], &
+ i__6);
+ } else {
+
+/* The interchange does affect A31 */
+
+ i__4 = jj - j;
+ i__5 = *ldab - 1;
+ zswap_(&i__4, &ab[kv + 1 + jj - j + j * ab_dim1], &
+ i__5, &work31[jp + jj - j - *kl - 1], &c__65);
+ }
+ }
+
+/* Copy the current column of A31 back into place */
+
+/* Computing MIN */
+ i__4 = i3, i__5 = jj - j + 1;
+ nw = min(i__4,i__5);
+ if (nw > 0) {
+ zcopy_(&nw, &work31[(jj - j + 1) * 65 - 65], &c__1, &ab[
+ kv + *kl + 1 - jj + j + jj * ab_dim1], &c__1);
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+
+ return 0;
+
+/* End of ZGBTRF */
+
+} /* zgbtrf_ */
diff --git a/contrib/libs/clapack/zgbtrs.c b/contrib/libs/clapack/zgbtrs.c
new file mode 100644
index 0000000000..a4c641a65d
--- /dev/null
+++ b/contrib/libs/clapack/zgbtrs.c
@@ -0,0 +1,281 @@
+/* zgbtrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zgbtrs_(char *trans, integer *n, integer *kl, integer *
+ ku, integer *nrhs, doublecomplex *ab, integer *ldab, integer *ipiv,
+ doublecomplex *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, b_dim1, b_offset, i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j, l, kd, lm;
+ extern logical lsame_(char *, char *);
+ logical lnoti;
+ extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *),
+ zgeru_(integer *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *)
+ , zswap_(integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *), ztbsv_(char *, char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *), xerbla_(char *, integer *), zlacgv_(
+ integer *, doublecomplex *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGBTRS solves a system of linear equations */
+/* A * X = B, A**T * X = B, or A**H * X = B */
+/* with a general band matrix A using the LU factorization computed */
+/* by ZGBTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations. */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AB (input) COMPLEX*16 array, dimension (LDAB,N) */
+/* Details of the LU factorization of the band matrix A, as */
+/* computed by ZGBTRF. U is stored as an upper triangular band */
+/* matrix with KL+KU superdiagonals in rows 1 to KL+KU+1, and */
+/* the multipliers used during the factorization are stored in */
+/* rows KL+KU+2 to 2*KL+KU+1. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= 2*KL+KU+1. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= N, row i of the matrix was */
+/* interchanged with row IPIV(i). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "T") && ! lsame_(
+ trans, "C")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kl < 0) {
+ *info = -3;
+ } else if (*ku < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*ldab < (*kl << 1) + *ku + 1) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGBTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ kd = *ku + *kl + 1;
+ lnoti = *kl > 0;
+
+ if (notran) {
+
+/* Solve A*X = B. */
+
+/* Solve L*X = B, overwriting B with X. */
+
+/* L is represented as a product of permutations and unit lower */
+/* triangular matrices L = P(1) * L(1) * ... * P(n-1) * L(n-1), */
+/* where each transformation L(i) is a rank-one modification of */
+/* the identity matrix. */
+
+ if (lnoti) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = *kl, i__3 = *n - j;
+ lm = min(i__2,i__3);
+ l = ipiv[j];
+ if (l != j) {
+ zswap_(nrhs, &b[l + b_dim1], ldb, &b[j + b_dim1], ldb);
+ }
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&lm, nrhs, &z__1, &ab[kd + 1 + j * ab_dim1], &c__1, &b[
+ j + b_dim1], ldb, &b[j + 1 + b_dim1], ldb);
+/* L10: */
+ }
+ }
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Solve U*X = B, overwriting B with X. */
+
+ i__2 = *kl + *ku;
+ ztbsv_("Upper", "No transpose", "Non-unit", n, &i__2, &ab[
+ ab_offset], ldab, &b[i__ * b_dim1 + 1], &c__1);
+/* L20: */
+ }
+
+ } else if (lsame_(trans, "T")) {
+
+/* Solve A**T * X = B. */
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Solve U**T * X = B, overwriting B with X. */
+
+ i__2 = *kl + *ku;
+ ztbsv_("Upper", "Transpose", "Non-unit", n, &i__2, &ab[ab_offset],
+ ldab, &b[i__ * b_dim1 + 1], &c__1);
+/* L30: */
+ }
+
+/* Solve L**T * X = B, overwriting B with X. */
+
+ if (lnoti) {
+ for (j = *n - 1; j >= 1; --j) {
+/* Computing MIN */
+ i__1 = *kl, i__2 = *n - j;
+ lm = min(i__1,i__2);
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Transpose", &lm, nrhs, &z__1, &b[j + 1 + b_dim1], ldb,
+ &ab[kd + 1 + j * ab_dim1], &c__1, &c_b1, &b[j +
+ b_dim1], ldb);
+ l = ipiv[j];
+ if (l != j) {
+ zswap_(nrhs, &b[l + b_dim1], ldb, &b[j + b_dim1], ldb);
+ }
+/* L40: */
+ }
+ }
+
+ } else {
+
+/* Solve A**H * X = B. */
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Solve U**H * X = B, overwriting B with X. */
+
+ i__2 = *kl + *ku;
+ ztbsv_("Upper", "Conjugate transpose", "Non-unit", n, &i__2, &ab[
+ ab_offset], ldab, &b[i__ * b_dim1 + 1], &c__1);
+/* L50: */
+ }
+
+/* Solve L**H * X = B, overwriting B with X. */
+
+ if (lnoti) {
+ for (j = *n - 1; j >= 1; --j) {
+/* Computing MIN */
+ i__1 = *kl, i__2 = *n - j;
+ lm = min(i__1,i__2);
+ zlacgv_(nrhs, &b[j + b_dim1], ldb);
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Conjugate transpose", &lm, nrhs, &z__1, &b[j + 1 +
+ b_dim1], ldb, &ab[kd + 1 + j * ab_dim1], &c__1, &c_b1,
+ &b[j + b_dim1], ldb);
+ zlacgv_(nrhs, &b[j + b_dim1], ldb);
+ l = ipiv[j];
+ if (l != j) {
+ zswap_(nrhs, &b[l + b_dim1], ldb, &b[j + b_dim1], ldb);
+ }
+/* L60: */
+ }
+ }
+ }
+ return 0;
+
+/* End of ZGBTRS */
+
+} /* zgbtrs_ */
diff --git a/contrib/libs/clapack/zgebak.c b/contrib/libs/clapack/zgebak.c
new file mode 100644
index 0000000000..2e65764e81
--- /dev/null
+++ b/contrib/libs/clapack/zgebak.c
@@ -0,0 +1,235 @@
+/* zgebak.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zgebak_(char *job, char *side, integer *n, integer *ilo,
+ integer *ihi, doublereal *scale, integer *m, doublecomplex *v,
+ integer *ldv, integer *info)
+{
+ /* System generated locals */
+ integer v_dim1, v_offset, i__1;
+
+ /* Local variables */
+ integer i__, k;
+ doublereal s;
+ integer ii;
+ extern logical lsame_(char *, char *);
+ logical leftv;
+ extern /* Subroutine */ int zswap_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), xerbla_(char *, integer *),
+ zdscal_(integer *, doublereal *, doublecomplex *, integer *);
+ logical rightv;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGEBAK forms the right or left eigenvectors of a complex general */
+/* matrix by backward transformation on the computed eigenvectors of the */
+/* balanced matrix output by ZGEBAL. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies the type of backward transformation required: */
+/* = 'N', do nothing, return immediately; */
+/* = 'P', do backward transformation for permutation only; */
+/* = 'S', do backward transformation for scaling only; */
+/* = 'B', do backward transformations for both permutation and */
+/* scaling. */
+/* JOB must be the same as the argument JOB supplied to ZGEBAL. */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'R': V contains right eigenvectors; */
+/* = 'L': V contains left eigenvectors. */
+
+/* N (input) INTEGER */
+/* The number of rows of the matrix V. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* The integers ILO and IHI determined by ZGEBAL. */
+/* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */
+
+/* SCALE (input) DOUBLE PRECISION array, dimension (N) */
+/* Details of the permutation and scaling factors, as returned */
+/* by ZGEBAL. */
+
+/* M (input) INTEGER */
+/* The number of columns of the matrix V. M >= 0. */
+
+/* V (input/output) COMPLEX*16 array, dimension (LDV,M) */
+/* On entry, the matrix of right or left eigenvectors to be */
+/* transformed, as returned by ZHSEIN or ZTREVC. */
+/* On exit, V is overwritten by the transformed eigenvectors. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. LDV >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and Test the input parameters */
+
+ /* Parameter adjustments */
+ --scale;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+
+ /* Function Body */
+ rightv = lsame_(side, "R");
+ leftv = lsame_(side, "L");
+
+ *info = 0;
+ if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S")
+ && ! lsame_(job, "B")) {
+ *info = -1;
+ } else if (! rightv && ! leftv) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -4;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -5;
+ } else if (*m < 0) {
+ *info = -7;
+ } else if (*ldv < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEBAK", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*m == 0) {
+ return 0;
+ }
+ if (lsame_(job, "N")) {
+ return 0;
+ }
+
+ if (*ilo == *ihi) {
+ goto L30;
+ }
+
+/* Backward balance */
+
+ if (lsame_(job, "S") || lsame_(job, "B")) {
+
+ if (rightv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ s = scale[i__];
+ zdscal_(m, &s, &v[i__ + v_dim1], ldv);
+/* L10: */
+ }
+ }
+
+ if (leftv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ s = 1. / scale[i__];
+ zdscal_(m, &s, &v[i__ + v_dim1], ldv);
+/* L20: */
+ }
+ }
+
+ }
+
+/* Backward permutation */
+
+/* For I = ILO-1 step -1 until 1, */
+/* IHI+1 step 1 until N do -- */
+
+L30:
+ if (lsame_(job, "P") || lsame_(job, "B")) {
+ if (rightv) {
+ i__1 = *n;
+ for (ii = 1; ii <= i__1; ++ii) {
+ i__ = ii;
+ if (i__ >= *ilo && i__ <= *ihi) {
+ goto L40;
+ }
+ if (i__ < *ilo) {
+ i__ = *ilo - ii;
+ }
+ k = (integer) scale[i__];
+ if (k == i__) {
+ goto L40;
+ }
+ zswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L40:
+ ;
+ }
+ }
+
+ if (leftv) {
+ i__1 = *n;
+ for (ii = 1; ii <= i__1; ++ii) {
+ i__ = ii;
+ if (i__ >= *ilo && i__ <= *ihi) {
+ goto L50;
+ }
+ if (i__ < *ilo) {
+ i__ = *ilo - ii;
+ }
+ k = (integer) scale[i__];
+ if (k == i__) {
+ goto L50;
+ }
+ zswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L50:
+ ;
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZGEBAK */
+
+} /* zgebak_ */
diff --git a/contrib/libs/clapack/zgebal.c b/contrib/libs/clapack/zgebal.c
new file mode 100644
index 0000000000..a7bfca23cc
--- /dev/null
+++ b/contrib/libs/clapack/zgebal.c
@@ -0,0 +1,414 @@
+/* zgebal.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zgebal_(char *job, integer *n, doublecomplex *a, integer
+ *lda, integer *ilo, integer *ihi, doublereal *scale, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *), z_abs(doublecomplex *);
+
+ /* Local variables */
+ doublereal c__, f, g;
+ integer i__, j, k, l, m;
+ doublereal r__, s, ca, ra;
+ integer ica, ira, iexc;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zswap_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ doublereal sfmin1, sfmin2, sfmax1, sfmax2;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_(
+ integer *, doublereal *, doublecomplex *, integer *);
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ logical noconv;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGEBAL balances a general complex matrix A. This involves, first, */
+/* permuting A by a similarity transformation to isolate eigenvalues */
+/* in the first 1 to ILO-1 and last IHI+1 to N elements on the */
+/* diagonal; and second, applying a diagonal similarity transformation */
+/* to rows and columns ILO to IHI to make the rows and columns as */
+/* close in norm as possible. Both steps are optional. */
+
+/* Balancing may reduce the 1-norm of the matrix, and improve the */
+/* accuracy of the computed eigenvalues and/or eigenvectors. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies the operations to be performed on A: */
+/* = 'N': none: simply set ILO = 1, IHI = N, SCALE(I) = 1.0 */
+/* for i = 1,...,N; */
+/* = 'P': permute only; */
+/* = 'S': scale only; */
+/* = 'B': both permute and scale. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the input matrix A. */
+/* On exit, A is overwritten by the balanced matrix. */
+/* If JOB = 'N', A is not referenced. */
+/* See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* ILO (output) INTEGER */
+/* IHI (output) INTEGER */
+/* ILO and IHI are set to integers such that on exit */
+/* A(i,j) = 0 if i > j and j = 1,...,ILO-1 or I = IHI+1,...,N. */
+/* If JOB = 'N' or 'S', ILO = 1 and IHI = N. */
+
+/* SCALE (output) DOUBLE PRECISION array, dimension (N) */
+/* Details of the permutations and scaling factors applied to */
+/* A. If P(j) is the index of the row and column interchanged */
+/* with row and column j and D(j) is the scaling factor */
+/* applied to row and column j, then */
+/* SCALE(j) = P(j) for j = 1,...,ILO-1 */
+/* = D(j) for j = ILO,...,IHI */
+/* = P(j) for j = IHI+1,...,N. */
+/* The order in which the interchanges are made is N to IHI+1, */
+/* then 1 to ILO-1. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The permutations consist of row and column interchanges which put */
+/* the matrix in the form */
+
+/* ( T1 X Y ) */
+/* P A P = ( 0 B Z ) */
+/* ( 0 0 T2 ) */
+
+/* where T1 and T2 are upper triangular matrices whose eigenvalues lie */
+/* along the diagonal. The column indices ILO and IHI mark the starting */
+/* and ending columns of the submatrix B. Balancing consists of applying */
+/* a diagonal similarity transformation inv(D) * B * D to make the */
+/* 1-norms of each row of B and its corresponding column nearly equal. */
+/* The output matrix is */
+
+/* ( T1 X*D Y ) */
+/* ( 0 inv(D)*B*D inv(D)*Z ). */
+/* ( 0 0 T2 ) */
+
+/* Information about the permutations P and the diagonal matrix D is */
+/* returned in the vector SCALE. */
+
+/* This subroutine is based on the EISPACK routine CBAL. */
+
+/* Modified by Tzu-Yi Chen, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --scale;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S")
+ && ! lsame_(job, "B")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEBAL", &i__1);
+ return 0;
+ }
+
+ k = 1;
+ l = *n;
+
+ if (*n == 0) {
+ goto L210;
+ }
+
+ if (lsame_(job, "N")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ scale[i__] = 1.;
+/* L10: */
+ }
+ goto L210;
+ }
+
+ if (lsame_(job, "S")) {
+ goto L120;
+ }
+
+/* Permutation to isolate eigenvalues if possible */
+
+ goto L50;
+
+/* Row and column exchange. */
+
+L20:
+ scale[m] = (doublereal) j;
+ if (j == m) {
+ goto L30;
+ }
+
+ zswap_(&l, &a[j * a_dim1 + 1], &c__1, &a[m * a_dim1 + 1], &c__1);
+ i__1 = *n - k + 1;
+ zswap_(&i__1, &a[j + k * a_dim1], lda, &a[m + k * a_dim1], lda);
+
+L30:
+ switch (iexc) {
+ case 1: goto L40;
+ case 2: goto L80;
+ }
+
+/* Search for rows isolating an eigenvalue and push them down. */
+
+L40:
+ if (l == 1) {
+ goto L210;
+ }
+ --l;
+
+L50:
+ for (j = l; j >= 1; --j) {
+
+ i__1 = l;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (i__ == j) {
+ goto L60;
+ }
+ i__2 = j + i__ * a_dim1;
+ if (a[i__2].r != 0. || d_imag(&a[j + i__ * a_dim1]) != 0.) {
+ goto L70;
+ }
+L60:
+ ;
+ }
+
+ m = l;
+ iexc = 1;
+ goto L20;
+L70:
+ ;
+ }
+
+ goto L90;
+
+/* Search for columns isolating an eigenvalue and push them left. */
+
+L80:
+ ++k;
+
+L90:
+ i__1 = l;
+ for (j = k; j <= i__1; ++j) {
+
+ i__2 = l;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ if (i__ == j) {
+ goto L100;
+ }
+ i__3 = i__ + j * a_dim1;
+ if (a[i__3].r != 0. || d_imag(&a[i__ + j * a_dim1]) != 0.) {
+ goto L110;
+ }
+L100:
+ ;
+ }
+
+ m = k;
+ iexc = 2;
+ goto L20;
+L110:
+ ;
+ }
+
+L120:
+ i__1 = l;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ scale[i__] = 1.;
+/* L130: */
+ }
+
+ if (lsame_(job, "P")) {
+ goto L210;
+ }
+
+/* Balance the submatrix in rows K to L. */
+
+/* Iterative loop for norm reduction */
+
+ sfmin1 = dlamch_("S") / dlamch_("P");
+ sfmax1 = 1. / sfmin1;
+ sfmin2 = sfmin1 * 2.;
+ sfmax2 = 1. / sfmin2;
+L140:
+ noconv = FALSE_;
+
+ i__1 = l;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ c__ = 0.;
+ r__ = 0.;
+
+ i__2 = l;
+ for (j = k; j <= i__2; ++j) {
+ if (j == i__) {
+ goto L150;
+ }
+ i__3 = j + i__ * a_dim1;
+ c__ += (d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a[j + i__ *
+ a_dim1]), abs(d__2));
+ i__3 = i__ + j * a_dim1;
+ r__ += (d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a[i__ + j *
+ a_dim1]), abs(d__2));
+L150:
+ ;
+ }
+ ica = izamax_(&l, &a[i__ * a_dim1 + 1], &c__1);
+ ca = z_abs(&a[ica + i__ * a_dim1]);
+ i__2 = *n - k + 1;
+ ira = izamax_(&i__2, &a[i__ + k * a_dim1], lda);
+ ra = z_abs(&a[i__ + (ira + k - 1) * a_dim1]);
+
+/* Guard against zero C or R due to underflow. */
+
+ if (c__ == 0. || r__ == 0.) {
+ goto L200;
+ }
+ g = r__ / 2.;
+ f = 1.;
+ s = c__ + r__;
+L160:
+/* Computing MAX */
+ d__1 = max(f,c__);
+/* Computing MIN */
+ d__2 = min(r__,g);
+ if (c__ >= g || max(d__1,ca) >= sfmax2 || min(d__2,ra) <= sfmin2) {
+ goto L170;
+ }
+ f *= 2.;
+ c__ *= 2.;
+ ca *= 2.;
+ r__ /= 2.;
+ g /= 2.;
+ ra /= 2.;
+ goto L160;
+
+L170:
+ g = c__ / 2.;
+L180:
+/* Computing MIN */
+ d__1 = min(f,c__), d__1 = min(d__1,g);
+ if (g < r__ || max(r__,ra) >= sfmax2 || min(d__1,ca) <= sfmin2) {
+ goto L190;
+ }
+ f /= 2.;
+ c__ /= 2.;
+ g /= 2.;
+ ca /= 2.;
+ r__ *= 2.;
+ ra *= 2.;
+ goto L180;
+
+/* Now balance. */
+
+L190:
+ if (c__ + r__ >= s * .95) {
+ goto L200;
+ }
+ if (f < 1. && scale[i__] < 1.) {
+ if (f * scale[i__] <= sfmin1) {
+ goto L200;
+ }
+ }
+ if (f > 1. && scale[i__] > 1.) {
+ if (scale[i__] >= sfmax1 / f) {
+ goto L200;
+ }
+ }
+ g = 1. / f;
+ scale[i__] *= f;
+ noconv = TRUE_;
+
+ i__2 = *n - k + 1;
+ zdscal_(&i__2, &g, &a[i__ + k * a_dim1], lda);
+ zdscal_(&l, &f, &a[i__ * a_dim1 + 1], &c__1);
+
+L200:
+ ;
+ }
+
+ if (noconv) {
+ goto L140;
+ }
+
+L210:
+ *ilo = k;
+ *ihi = l;
+
+ return 0;
+
+/* End of ZGEBAL */
+
+} /* zgebal_ */
diff --git a/contrib/libs/clapack/zgebd2.c b/contrib/libs/clapack/zgebd2.c
new file mode 100644
index 0000000000..8522fa8451
--- /dev/null
+++ b/contrib/libs/clapack/zgebd2.c
@@ -0,0 +1,345 @@
+/* zgebd2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zgebd2_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, doublereal *d__, doublereal *e, doublecomplex *tauq,
+ doublecomplex *taup, doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__;
+ doublecomplex alpha;
+ extern /* Subroutine */ int zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), xerbla_(char *, integer *), zlarfg_(integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), zlacgv_(integer *, doublecomplex *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGEBD2 reduces a complex general m by n matrix A to upper or lower */
+/* real bidiagonal form B by a unitary transformation: Q' * A * P = B. */
+
+/* If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows in the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns in the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the m by n general matrix to be reduced. */
+/* On exit, */
+/* if m >= n, the diagonal and the first superdiagonal are */
+/* overwritten with the upper bidiagonal matrix B; the */
+/* elements below the diagonal, with the array TAUQ, represent */
+/* the unitary matrix Q as a product of elementary */
+/* reflectors, and the elements above the first superdiagonal, */
+/* with the array TAUP, represent the unitary matrix P as */
+/* a product of elementary reflectors; */
+/* if m < n, the diagonal and the first subdiagonal are */
+/* overwritten with the lower bidiagonal matrix B; the */
+/* elements below the first subdiagonal, with the array TAUQ, */
+/* represent the unitary matrix Q as a product of */
+/* elementary reflectors, and the elements above the diagonal, */
+/* with the array TAUP, represent the unitary matrix P as */
+/* a product of elementary reflectors. */
+/* See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* D (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The diagonal elements of the bidiagonal matrix B: */
+/* D(i) = A(i,i). */
+
+/* E (output) DOUBLE PRECISION array, dimension (min(M,N)-1) */
+/* The off-diagonal elements of the bidiagonal matrix B: */
+/* if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1; */
+/* if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1. */
+
+/* TAUQ (output) COMPLEX*16 array dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the unitary matrix Q. See Further Details. */
+
+/* TAUP (output) COMPLEX*16 array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the unitary matrix P. See Further Details. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (max(M,N)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrices Q and P are represented as products of elementary */
+/* reflectors: */
+
+/* If m >= n, */
+
+/* Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1) */
+
+/* Each H(i) and G(i) has the form: */
+
+/* H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' */
+
+/* where tauq and taup are complex scalars, and v and u are complex */
+/* vectors; v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in */
+/* A(i+1:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in */
+/* A(i,i+2:n); tauq is stored in TAUQ(i) and taup in TAUP(i). */
+
+/* If m < n, */
+
+/* Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m) */
+
+/* Each H(i) and G(i) has the form: */
+
+/* H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' */
+
+/* where tauq and taup are complex scalars, v and u are complex vectors; */
+/* v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in A(i+2:m,i); */
+/* u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n); */
+/* tauq is stored in TAUQ(i) and taup in TAUP(i). */
+
+/* The contents of A on exit are illustrated by the following examples: */
+
+/* m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n): */
+
+/* ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 ) */
+/* ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 ) */
+/* ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 ) */
+/* ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 ) */
+/* ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 ) */
+/* ( v1 v2 v3 v4 v5 ) */
+
+/* where d and e denote diagonal and off-diagonal elements of B, vi */
+/* denotes an element of the vector defining H(i), and ui an element of */
+/* the vector defining G(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info < 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEBD2", &i__1);
+ return 0;
+ }
+
+ if (*m >= *n) {
+
+/* Reduce to upper bidiagonal form */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
+
+ i__2 = i__ + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ zlarfg_(&i__2, &alpha, &a[min(i__3, *m)+ i__ * a_dim1], &c__1, &
+ tauq[i__]);
+ i__2 = i__;
+ d__[i__2] = alpha.r;
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Apply H(i)' to A(i:m,i+1:n) from the left */
+
+ if (i__ < *n) {
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ d_cnjg(&z__1, &tauq[i__]);
+ zlarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &
+ z__1, &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ }
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = i__;
+ a[i__2].r = d__[i__3], a[i__2].i = 0.;
+
+ if (i__ < *n) {
+
+/* Generate elementary reflector G(i) to annihilate */
+/* A(i,i+2:n) */
+
+ i__2 = *n - i__;
+ zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__2 = i__ + (i__ + 1) * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ zlarfg_(&i__2, &alpha, &a[i__ + min(i__3, *n)* a_dim1], lda, &
+ taup[i__]);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+ i__2 = i__ + (i__ + 1) * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Apply G(i) to A(i+1:m,i+1:n) from the right */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ zlarf_("Right", &i__2, &i__3, &a[i__ + (i__ + 1) * a_dim1],
+ lda, &taup[i__], &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda, &work[1]);
+ i__2 = *n - i__;
+ zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__2 = i__ + (i__ + 1) * a_dim1;
+ i__3 = i__;
+ a[i__2].r = e[i__3], a[i__2].i = 0.;
+ } else {
+ i__2 = i__;
+ taup[i__2].r = 0., taup[i__2].i = 0.;
+ }
+/* L10: */
+ }
+ } else {
+
+/* Reduce to lower bidiagonal form */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector G(i) to annihilate A(i,i+1:n) */
+
+ i__2 = *n - i__ + 1;
+ zlacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+ i__2 = i__ + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ zlarfg_(&i__2, &alpha, &a[i__ + min(i__3, *n)* a_dim1], lda, &
+ taup[i__]);
+ i__2 = i__;
+ d__[i__2] = alpha.r;
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Apply G(i) to A(i+1:m,i:n) from the right */
+
+ if (i__ < *m) {
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+ zlarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &
+ taup[i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
+ }
+ i__2 = *n - i__ + 1;
+ zlacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = i__;
+ a[i__2].r = d__[i__3], a[i__2].i = 0.;
+
+ if (i__ < *m) {
+
+/* Generate elementary reflector H(i) to annihilate */
+/* A(i+2:m,i) */
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *m - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ zlarfg_(&i__2, &alpha, &a[min(i__3, *m)+ i__ * a_dim1], &c__1,
+ &tauq[i__]);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+ i__2 = i__ + 1 + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Apply H(i)' to A(i+1:m,i+1:n) from the left */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ d_cnjg(&z__1, &tauq[i__]);
+ zlarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &
+ c__1, &z__1, &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &
+ work[1]);
+ i__2 = i__ + 1 + i__ * a_dim1;
+ i__3 = i__;
+ a[i__2].r = e[i__3], a[i__2].i = 0.;
+ } else {
+ i__2 = i__;
+ tauq[i__2].r = 0., tauq[i__2].i = 0.;
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of ZGEBD2 */
+
+} /* zgebd2_ */
diff --git a/contrib/libs/clapack/zgebrd.c b/contrib/libs/clapack/zgebrd.c
new file mode 100644
index 0000000000..8ec2dffa6c
--- /dev/null
+++ b/contrib/libs/clapack/zgebrd.c
@@ -0,0 +1,351 @@
+/* zgebrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int zgebrd_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, doublereal *d__, doublereal *e, doublecomplex *tauq,
+ doublecomplex *taup, doublecomplex *work, integer *lwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j, nb, nx;
+ doublereal ws;
+ integer nbmin, iinfo, minmn;
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), zgebd2_(integer *, integer *,
+ doublecomplex *, integer *, doublereal *, doublereal *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *),
+ xerbla_(char *, integer *), zlabrd_(integer *, integer *,
+ integer *, doublecomplex *, integer *, doublereal *, doublereal *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwrkx, ldwrky, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGEBRD reduces a general complex M-by-N matrix A to upper or lower */
+/* bidiagonal form B by a unitary transformation: Q**H * A * P = B. */
+
+/* If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows in the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns in the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the M-by-N general matrix to be reduced. */
+/* On exit, */
+/* if m >= n, the diagonal and the first superdiagonal are */
+/* overwritten with the upper bidiagonal matrix B; the */
+/* elements below the diagonal, with the array TAUQ, represent */
+/* the unitary matrix Q as a product of elementary */
+/* reflectors, and the elements above the first superdiagonal, */
+/* with the array TAUP, represent the unitary matrix P as */
+/* a product of elementary reflectors; */
+/* if m < n, the diagonal and the first subdiagonal are */
+/* overwritten with the lower bidiagonal matrix B; the */
+/* elements below the first subdiagonal, with the array TAUQ, */
+/* represent the unitary matrix Q as a product of */
+/* elementary reflectors, and the elements above the diagonal, */
+/* with the array TAUP, represent the unitary matrix P as */
+/* a product of elementary reflectors. */
+/* See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* D (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The diagonal elements of the bidiagonal matrix B: */
+/* D(i) = A(i,i). */
+
+/* E (output) DOUBLE PRECISION array, dimension (min(M,N)-1) */
+/* The off-diagonal elements of the bidiagonal matrix B: */
+/* if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1; */
+/* if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1. */
+
+/* TAUQ (output) COMPLEX*16 array dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the unitary matrix Q. See Further Details. */
+
+/* TAUP (output) COMPLEX*16 array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the unitary matrix P. See Further Details. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= max(1,M,N). */
+/* For optimum performance LWORK >= (M+N)*NB, where NB */
+/* is the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrices Q and P are represented as products of elementary */
+/* reflectors: */
+
+/* If m >= n, */
+
+/* Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1) */
+
+/* Each H(i) and G(i) has the form: */
+
+/* H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' */
+
+/* where tauq and taup are complex scalars, and v and u are complex */
+/* vectors; v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in */
+/* A(i+1:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in */
+/* A(i,i+2:n); tauq is stored in TAUQ(i) and taup in TAUP(i). */
+
+/* If m < n, */
+
+/* Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m) */
+
+/* Each H(i) and G(i) has the form: */
+
+/* H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' */
+
+/* where tauq and taup are complex scalars, and v and u are complex */
+/* vectors; v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in */
+/* A(i+2:m,i); u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in */
+/* A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). */
+
+/* The contents of A on exit are illustrated by the following examples: */
+
+/* m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n): */
+
+/* ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 ) */
+/* ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 ) */
+/* ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 ) */
+/* ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 ) */
+/* ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 ) */
+/* ( v1 v2 v3 v4 v5 ) */
+
+/* where d and e denote diagonal and off-diagonal elements of B, vi */
+/* denotes an element of the vector defining H(i), and ui an element of */
+/* the vector defining G(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+/* Computing MAX */
+ i__1 = 1, i__2 = ilaenv_(&c__1, "ZGEBRD", " ", m, n, &c_n1, &c_n1);
+ nb = max(i__1,i__2);
+ lwkopt = (*m + *n) * nb;
+ d__1 = (doublereal) lwkopt;
+ work[1].r = d__1, work[1].i = 0.;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*m);
+ if (*lwork < max(i__1,*n) && ! lquery) {
+ *info = -10;
+ }
+ }
+ if (*info < 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEBRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ minmn = min(*m,*n);
+ if (minmn == 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ ws = (doublereal) max(*m,*n);
+ ldwrkx = *m;
+ ldwrky = *n;
+
+ if (nb > 1 && nb < minmn) {
+
+/* Set the crossover point NX. */
+
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__3, "ZGEBRD", " ", m, n, &c_n1, &c_n1);
+ nx = max(i__1,i__2);
+
+/* Determine when to switch from blocked to unblocked code. */
+
+ if (nx < minmn) {
+ ws = (doublereal) ((*m + *n) * nb);
+ if ((doublereal) (*lwork) < ws) {
+
+/* Not enough work space for the optimal NB, consider using */
+/* a smaller block size. */
+
+ nbmin = ilaenv_(&c__2, "ZGEBRD", " ", m, n, &c_n1, &c_n1);
+ if (*lwork >= (*m + *n) * nbmin) {
+ nb = *lwork / (*m + *n);
+ } else {
+ nb = 1;
+ nx = minmn;
+ }
+ }
+ }
+ } else {
+ nx = minmn;
+ }
+
+ i__1 = minmn - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+
+/* Reduce rows and columns i:i+ib-1 to bidiagonal form and return */
+/* the matrices X and Y which are needed to update the unreduced */
+/* part of the matrix */
+
+ i__3 = *m - i__ + 1;
+ i__4 = *n - i__ + 1;
+ zlabrd_(&i__3, &i__4, &nb, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[
+ i__], &tauq[i__], &taup[i__], &work[1], &ldwrkx, &work[ldwrkx
+ * nb + 1], &ldwrky);
+
+/* Update the trailing submatrix A(i+ib:m,i+ib:n), using */
+/* an update of the form A := A - V*Y' - X*U' */
+
+ i__3 = *m - i__ - nb + 1;
+ i__4 = *n - i__ - nb + 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "Conjugate transpose", &i__3, &i__4, &nb, &
+ z__1, &a[i__ + nb + i__ * a_dim1], lda, &work[ldwrkx * nb +
+ nb + 1], &ldwrky, &c_b1, &a[i__ + nb + (i__ + nb) * a_dim1],
+ lda);
+ i__3 = *m - i__ - nb + 1;
+ i__4 = *n - i__ - nb + 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "No transpose", &i__3, &i__4, &nb, &z__1, &
+ work[nb + 1], &ldwrkx, &a[i__ + (i__ + nb) * a_dim1], lda, &
+ c_b1, &a[i__ + nb + (i__ + nb) * a_dim1], lda);
+
+/* Copy diagonal and off-diagonal elements of B back into A */
+
+ if (*m >= *n) {
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ i__4 = j + j * a_dim1;
+ i__5 = j;
+ a[i__4].r = d__[i__5], a[i__4].i = 0.;
+ i__4 = j + (j + 1) * a_dim1;
+ i__5 = j;
+ a[i__4].r = e[i__5], a[i__4].i = 0.;
+/* L10: */
+ }
+ } else {
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ i__4 = j + j * a_dim1;
+ i__5 = j;
+ a[i__4].r = d__[i__5], a[i__4].i = 0.;
+ i__4 = j + 1 + j * a_dim1;
+ i__5 = j;
+ a[i__4].r = e[i__5], a[i__4].i = 0.;
+/* L20: */
+ }
+ }
+/* L30: */
+ }
+
+/* Use unblocked code to reduce the remainder of the matrix */
+
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ zgebd2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__], &
+ tauq[i__], &taup[i__], &work[1], &iinfo);
+ work[1].r = ws, work[1].i = 0.;
+ return 0;
+
+/* End of ZGEBRD */
+
+} /* zgebrd_ */
diff --git a/contrib/libs/clapack/zgecon.c b/contrib/libs/clapack/zgecon.c
new file mode 100644
index 0000000000..894ab735cc
--- /dev/null
+++ b/contrib/libs/clapack/zgecon.c
@@ -0,0 +1,235 @@
+/* zgecon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zgecon_(char *norm, integer *n, doublecomplex *a,
+ integer *lda, doublereal *anorm, doublereal *rcond, doublecomplex *
+ work, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ doublereal sl;
+ integer ix;
+ doublereal su;
+ integer kase, kase1;
+ doublereal scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int zlacn2_(integer *, doublecomplex *,
+ doublecomplex *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal ainvnm;
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ logical onenrm;
+ extern /* Subroutine */ int zdrscl_(integer *, doublereal *,
+ doublecomplex *, integer *);
+ char normin[1];
+ doublereal smlnum;
+ extern /* Subroutine */ int zlatrs_(char *, char *, char *, char *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublereal *, doublereal *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGECON estimates the reciprocal of the condition number of a general */
+/* complex matrix A, in either the 1-norm or the infinity-norm, using */
+/* the LU factorization computed by ZGETRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as */
+/* RCOND = 1 / ( norm(A) * norm(inv(A)) ). */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies whether the 1-norm condition number or the */
+/* infinity-norm condition number is required: */
+/* = '1' or 'O': 1-norm; */
+/* = 'I': Infinity-norm. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The factors L and U from the factorization A = P*L*U */
+/* as computed by ZGETRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* ANORM (input) DOUBLE PRECISION */
+/* If NORM = '1' or 'O', the 1-norm of the original matrix A. */
+/* If NORM = 'I', the infinity-norm of the original matrix A. */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(norm(A) * norm(inv(A))). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O");
+ if (! onenrm && ! lsame_(norm, "I")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*anorm < 0.) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGECON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.;
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ } else if (*anorm == 0.) {
+ return 0;
+ }
+
+ smlnum = dlamch_("Safe minimum");
+
+/* Estimate the norm of inv(A). */
+
+ ainvnm = 0.;
+ *(unsigned char *)normin = 'N';
+ if (onenrm) {
+ kase1 = 1;
+ } else {
+ kase1 = 2;
+ }
+ kase = 0;
+L10:
+ zlacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (kase == kase1) {
+
+/* Multiply by inv(L). */
+
+ zlatrs_("Lower", "No transpose", "Unit", normin, n, &a[a_offset],
+ lda, &work[1], &sl, &rwork[1], info);
+
+/* Multiply by inv(U). */
+
+ zlatrs_("Upper", "No transpose", "Non-unit", normin, n, &a[
+ a_offset], lda, &work[1], &su, &rwork[*n + 1], info);
+ } else {
+
+/* Multiply by inv(U'). */
+
+ zlatrs_("Upper", "Conjugate transpose", "Non-unit", normin, n, &a[
+ a_offset], lda, &work[1], &su, &rwork[*n + 1], info);
+
+/* Multiply by inv(L'). */
+
+ zlatrs_("Lower", "Conjugate transpose", "Unit", normin, n, &a[
+ a_offset], lda, &work[1], &sl, &rwork[1], info);
+ }
+
+/* Divide X by 1/(SL*SU) if doing so will not cause overflow. */
+
+ scale = sl * su;
+ *(unsigned char *)normin = 'Y';
+ if (scale != 1.) {
+ ix = izamax_(n, &work[1], &c__1);
+ i__1 = ix;
+ if (scale < ((d__1 = work[i__1].r, abs(d__1)) + (d__2 = d_imag(&
+ work[ix]), abs(d__2))) * smlnum || scale == 0.) {
+ goto L20;
+ }
+ zdrscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / ainvnm / *anorm;
+ }
+
+L20:
+ return 0;
+
+/* End of ZGECON */
+
+} /* zgecon_ */
diff --git a/contrib/libs/clapack/zgeequ.c b/contrib/libs/clapack/zgeequ.c
new file mode 100644
index 0000000000..cb53db4bcb
--- /dev/null
+++ b/contrib/libs/clapack/zgeequ.c
@@ -0,0 +1,306 @@
+/* zgeequ.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zgeequ_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, doublereal *r__, doublereal *c__, doublereal *rowcnd,
+ doublereal *colcnd, doublereal *amax, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal rcmin, rcmax;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum, smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGEEQU computes row and column scalings intended to equilibrate an */
+/* M-by-N matrix A and reduce its condition number. R returns the row */
+/* scale factors and C the column scale factors, chosen to try to make */
+/* the largest element in each row and column of the matrix B with */
+/* elements B(i,j)=R(i)*A(i,j)*C(j) have absolute value 1. */
+
+/* R(i) and C(j) are restricted to be between SMLNUM = smallest safe */
+/* number and BIGNUM = largest safe number. Use of these scaling */
+/* factors is not guaranteed to reduce the condition number of A but */
+/* works well in practice. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The M-by-N matrix whose equilibration factors are */
+/* to be computed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* R (output) DOUBLE PRECISION array, dimension (M) */
+/* If INFO = 0 or INFO > M, R contains the row scale factors */
+/* for A. */
+
+/* C (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, C contains the column scale factors for A. */
+
+/* ROWCND (output) DOUBLE PRECISION */
+/* If INFO = 0 or INFO > M, ROWCND contains the ratio of the */
+/* smallest R(i) to the largest R(i). If ROWCND >= 0.1 and */
+/* AMAX is neither too large nor too small, it is not worth */
+/* scaling by R. */
+
+/* COLCND (output) DOUBLE PRECISION */
+/* If INFO = 0, COLCND contains the ratio of the smallest */
+/* C(i) to the largest C(i). If COLCND >= 0.1, it is not */
+/* worth scaling by C. */
+
+/* AMAX (output) DOUBLE PRECISION */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= M: the i-th row of A is exactly zero */
+/* > M: the (i-M)-th column of A is exactly zero */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --r__;
+ --c__;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEEQU", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ *rowcnd = 1.;
+ *colcnd = 1.;
+ *amax = 0.;
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+
+/* Compute row scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__[i__] = 0.;
+/* L10: */
+ }
+
+/* Find the maximum element in each row. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ d__3 = r__[i__], d__4 = (d__1 = a[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + j * a_dim1]), abs(d__2));
+ r__[i__] = max(d__3,d__4);
+/* L20: */
+ }
+/* L30: */
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = rcmax, d__2 = r__[i__];
+ rcmax = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = rcmin, d__2 = r__[i__];
+ rcmin = min(d__1,d__2);
+/* L40: */
+ }
+ *amax = rcmax;
+
+ if (rcmin == 0.) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (r__[i__] == 0.) {
+ *info = i__;
+ return 0;
+ }
+/* L50: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MIN */
+/* Computing MAX */
+ d__2 = r__[i__];
+ d__1 = max(d__2,smlnum);
+ r__[i__] = 1. / min(d__1,bignum);
+/* L60: */
+ }
+
+/* Compute ROWCND = min(R(I)) / max(R(I)) */
+
+ *rowcnd = max(rcmin,smlnum) / min(rcmax,bignum);
+ }
+
+/* Compute column scale factors */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j] = 0.;
+/* L70: */
+ }
+
+/* Find the maximum element in each column, */
+/* assuming the row scaling computed above. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ d__3 = c__[j], d__4 = ((d__1 = a[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + j * a_dim1]), abs(d__2))) * r__[i__];
+ c__[j] = max(d__3,d__4);
+/* L80: */
+ }
+/* L90: */
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ d__1 = rcmin, d__2 = c__[j];
+ rcmin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = rcmax, d__2 = c__[j];
+ rcmax = max(d__1,d__2);
+/* L100: */
+ }
+
+ if (rcmin == 0.) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (c__[j] == 0.) {
+ *info = *m + j;
+ return 0;
+ }
+/* L110: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+/* Computing MAX */
+ d__2 = c__[j];
+ d__1 = max(d__2,smlnum);
+ c__[j] = 1. / min(d__1,bignum);
+/* L120: */
+ }
+
+/* Compute COLCND = min(C(J)) / max(C(J)) */
+
+ *colcnd = max(rcmin,smlnum) / min(rcmax,bignum);
+ }
+
+ return 0;
+
+/* End of ZGEEQU */
+
+} /* zgeequ_ */
diff --git a/contrib/libs/clapack/zgeequb.c b/contrib/libs/clapack/zgeequb.c
new file mode 100644
index 0000000000..f284cc30fc
--- /dev/null
+++ b/contrib/libs/clapack/zgeequb.c
@@ -0,0 +1,332 @@
+/* zgeequb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zgeequb_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, doublereal *r__, doublereal *c__, doublereal *rowcnd,
+ doublereal *colcnd, doublereal *amax, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Builtin functions */
+ double log(doublereal), d_imag(doublecomplex *), pow_di(doublereal *,
+ integer *);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal radix, rcmin, rcmax;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum, logrdx, smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- Contributed by James Demmel, Deaglan Halligan, Yozo Hida and -- */
+/* -- Jason Riedy of Univ. of California Berkeley. -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley and NAG Ltd. -- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGEEQUB computes row and column scalings intended to equilibrate an */
+/* M-by-N matrix A and reduce its condition number. R returns the row */
+/* scale factors and C the column scale factors, chosen to try to make */
+/* the largest element in each row and column of the matrix B with */
+/* elements B(i,j)=R(i)*A(i,j)*C(j) have an absolute value of at most */
+/* the radix. */
+
+/* R(i) and C(j) are restricted to be a power of the radix between */
+/* SMLNUM = smallest safe number and BIGNUM = largest safe number. Use */
+/* of these scaling factors is not guaranteed to reduce the condition */
+/* number of A but works well in practice. */
+
+/* This routine differs from ZGEEQU by restricting the scaling factors */
+/* to a power of the radix. Baring over- and underflow, scaling by */
+/* these factors introduces no additional rounding errors. However, the */
+/* scaled entries' magnitured are no longer approximately 1 but lie */
+/* between sqrt(radix) and 1/sqrt(radix). */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The M-by-N matrix whose equilibration factors are */
+/* to be computed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* R (output) DOUBLE PRECISION array, dimension (M) */
+/* If INFO = 0 or INFO > M, R contains the row scale factors */
+/* for A. */
+
+/* C (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, C contains the column scale factors for A. */
+
+/* ROWCND (output) DOUBLE PRECISION */
+/* If INFO = 0 or INFO > M, ROWCND contains the ratio of the */
+/* smallest R(i) to the largest R(i). If ROWCND >= 0.1 and */
+/* AMAX is neither too large nor too small, it is not worth */
+/* scaling by R. */
+
+/* COLCND (output) DOUBLE PRECISION */
+/* If INFO = 0, COLCND contains the ratio of the smallest */
+/* C(i) to the largest C(i). If COLCND >= 0.1, it is not */
+/* worth scaling by C. */
+
+/* AMAX (output) DOUBLE PRECISION */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= M: the i-th row of A is exactly zero */
+/* > M: the (i-M)-th column of A is exactly zero */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --r__;
+ --c__;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEEQUB", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ *rowcnd = 1.;
+ *colcnd = 1.;
+ *amax = 0.;
+ return 0;
+ }
+
+/* Get machine constants. Assume SMLNUM is a power of the radix. */
+
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+ radix = dlamch_("B");
+ logrdx = log(radix);
+
+/* Compute row scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__[i__] = 0.;
+/* L10: */
+ }
+
+/* Find the maximum element in each row. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ d__3 = r__[i__], d__4 = (d__1 = a[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + j * a_dim1]), abs(d__2));
+ r__[i__] = max(d__3,d__4);
+/* L20: */
+ }
+/* L30: */
+ }
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (r__[i__] > 0.) {
+ i__2 = (integer) (log(r__[i__]) / logrdx);
+ r__[i__] = pow_di(&radix, &i__2);
+ }
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = rcmax, d__2 = r__[i__];
+ rcmax = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = rcmin, d__2 = r__[i__];
+ rcmin = min(d__1,d__2);
+/* L40: */
+ }
+ *amax = rcmax;
+
+ if (rcmin == 0.) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (r__[i__] == 0.) {
+ *info = i__;
+ return 0;
+ }
+/* L50: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MIN */
+/* Computing MAX */
+ d__2 = r__[i__];
+ d__1 = max(d__2,smlnum);
+ r__[i__] = 1. / min(d__1,bignum);
+/* L60: */
+ }
+
+/* Compute ROWCND = min(R(I)) / max(R(I)). */
+
+ *rowcnd = max(rcmin,smlnum) / min(rcmax,bignum);
+ }
+
+/* Compute column scale factors. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j] = 0.;
+/* L70: */
+ }
+
+/* Find the maximum element in each column, */
+/* assuming the row scaling computed above. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ d__3 = c__[j], d__4 = ((d__1 = a[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + j * a_dim1]), abs(d__2))) * r__[i__];
+ c__[j] = max(d__3,d__4);
+/* L80: */
+ }
+ if (c__[j] > 0.) {
+ i__2 = (integer) (log(c__[j]) / logrdx);
+ c__[j] = pow_di(&radix, &i__2);
+ }
+/* L90: */
+ }
+
+/* Find the maximum and minimum scale factors. */
+
+ rcmin = bignum;
+ rcmax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ d__1 = rcmin, d__2 = c__[j];
+ rcmin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = rcmax, d__2 = c__[j];
+ rcmax = max(d__1,d__2);
+/* L100: */
+ }
+
+ if (rcmin == 0.) {
+
+/* Find the first zero scale factor and return an error code. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (c__[j] == 0.) {
+ *info = *m + j;
+ return 0;
+ }
+/* L110: */
+ }
+ } else {
+
+/* Invert the scale factors. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+/* Computing MAX */
+ d__2 = c__[j];
+ d__1 = max(d__2,smlnum);
+ c__[j] = 1. / min(d__1,bignum);
+/* L120: */
+ }
+
+/* Compute COLCND = min(C(J)) / max(C(J)). */
+
+ *colcnd = max(rcmin,smlnum) / min(rcmax,bignum);
+ }
+
+ return 0;
+
+/* End of ZGEEQUB */
+
+} /* zgeequb_ */
diff --git a/contrib/libs/clapack/zgees.c b/contrib/libs/clapack/zgees.c
new file mode 100644
index 0000000000..ab32998f59
--- /dev/null
+++ b/contrib/libs/clapack/zgees.c
@@ -0,0 +1,409 @@
+/* zgees.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zgees_(char *jobvs, char *sort, L_fp select, integer *n,
+ doublecomplex *a, integer *lda, integer *sdim, doublecomplex *w,
+ doublecomplex *vs, integer *ldvs, doublecomplex *work, integer *lwork,
+ doublereal *rwork, logical *bwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, vs_dim1, vs_offset, i__1, i__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ doublereal s;
+ integer ihi, ilo;
+ doublereal dum[1], eps, sep;
+ integer ibal;
+ doublereal anrm;
+ integer ierr, itau, iwrk, icond, ieval;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), dlabad_(doublereal *, doublereal *);
+ logical scalea;
+ extern doublereal dlamch_(char *);
+ doublereal cscale;
+ extern /* Subroutine */ int zgebak_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublecomplex *, integer *,
+ integer *), zgebal_(char *, integer *,
+ doublecomplex *, integer *, integer *, integer *, doublereal *,
+ integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ doublereal bignum;
+ extern /* Subroutine */ int zgehrd_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *), zlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublecomplex *,
+ integer *, integer *), zlacpy_(char *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *);
+ integer minwrk, maxwrk;
+ doublereal smlnum;
+ extern /* Subroutine */ int zhseqr_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+ integer hswork;
+ extern /* Subroutine */ int zunghr_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *);
+ logical wantst, lquery, wantvs;
+ extern /* Subroutine */ int ztrsen_(char *, char *, logical *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublereal *, doublereal *,
+ doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+/* .. Function Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGEES computes for an N-by-N complex nonsymmetric matrix A, the */
+/* eigenvalues, the Schur form T, and, optionally, the matrix of Schur */
+/* vectors Z. This gives the Schur factorization A = Z*T*(Z**H). */
+
+/* Optionally, it also orders the eigenvalues on the diagonal of the */
+/* Schur form so that selected eigenvalues are at the top left. */
+/* The leading columns of Z then form an orthonormal basis for the */
+/* invariant subspace corresponding to the selected eigenvalues. */
+
+/* A complex matrix is in Schur form if it is upper triangular. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVS (input) CHARACTER*1 */
+/* = 'N': Schur vectors are not computed; */
+/* = 'V': Schur vectors are computed. */
+
+/* SORT (input) CHARACTER*1 */
+/* Specifies whether or not to order the eigenvalues on the */
+/* diagonal of the Schur form. */
+/* = 'N': Eigenvalues are not ordered: */
+/* = 'S': Eigenvalues are ordered (see SELECT). */
+
+/* SELECT (external procedure) LOGICAL FUNCTION of one COMPLEX*16 argument */
+/* SELECT must be declared EXTERNAL in the calling subroutine. */
+/* If SORT = 'S', SELECT is used to select eigenvalues to order */
+/* to the top left of the Schur form. */
+/* IF SORT = 'N', SELECT is not referenced. */
+/* The eigenvalue W(j) is selected if SELECT(W(j)) is true. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the N-by-N matrix A. */
+/* On exit, A has been overwritten by its Schur form T. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* SDIM (output) INTEGER */
+/* If SORT = 'N', SDIM = 0. */
+/* If SORT = 'S', SDIM = number of eigenvalues for which */
+/* SELECT is true. */
+
+/* W (output) COMPLEX*16 array, dimension (N) */
+/* W contains the computed eigenvalues, in the same order that */
+/* they appear on the diagonal of the output Schur form T. */
+
+/* VS (output) COMPLEX*16 array, dimension (LDVS,N) */
+/* If JOBVS = 'V', VS contains the unitary matrix Z of Schur */
+/* vectors. */
+/* If JOBVS = 'N', VS is not referenced. */
+
+/* LDVS (input) INTEGER */
+/* The leading dimension of the array VS. LDVS >= 1; if */
+/* JOBVS = 'V', LDVS >= N. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,2*N). */
+/* For good performance, LWORK must generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* BWORK (workspace) LOGICAL array, dimension (N) */
+/* Not referenced if SORT = 'N'. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, and i is */
+/* <= N: the QR algorithm failed to compute all the */
+/* eigenvalues; elements 1:ILO-1 and i+1:N of W */
+/* contain those eigenvalues which have converged; */
+/* if JOBVS = 'V', VS contains the matrix which */
+/* reduces A to its partially converged Schur form. */
+/* = N+1: the eigenvalues could not be reordered because */
+/* some eigenvalues were too close to separate (the */
+/* problem is very ill-conditioned); */
+/* = N+2: after reordering, roundoff changed values of */
+/* some complex eigenvalues so that leading */
+/* eigenvalues in the Schur form no longer satisfy */
+/* SELECT = .TRUE.. This could also be caused by */
+/* underflow due to scaling. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ vs_dim1 = *ldvs;
+ vs_offset = 1 + vs_dim1;
+ vs -= vs_offset;
+ --work;
+ --rwork;
+ --bwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ wantvs = lsame_(jobvs, "V");
+ wantst = lsame_(sort, "S");
+ if (! wantvs && ! lsame_(jobvs, "N")) {
+ *info = -1;
+ } else if (! wantst && ! lsame_(sort, "N")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldvs < 1 || wantvs && *ldvs < *n) {
+ *info = -10;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* CWorkspace refers to complex workspace, and RWorkspace to real */
+/* workspace. NB refers to the optimal block size for the */
+/* immediately following subroutine, as returned by ILAENV. */
+/* HSWORK refers to the workspace preferred by ZHSEQR, as */
+/* calculated below. HSWORK is computed assuming ILO=1 and IHI=N, */
+/* the worst case.) */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ } else {
+ maxwrk = *n + *n * ilaenv_(&c__1, "ZGEHRD", " ", n, &c__1, n, &
+ c__0);
+ minwrk = *n << 1;
+
+ zhseqr_("S", jobvs, n, &c__1, n, &a[a_offset], lda, &w[1], &vs[
+ vs_offset], ldvs, &work[1], &c_n1, &ieval);
+ hswork = (integer) work[1].r;
+
+ if (! wantvs) {
+ maxwrk = max(maxwrk,hswork);
+ } else {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + (*n - 1) * ilaenv_(&c__1, "ZUNGHR",
+ " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ maxwrk = max(maxwrk,hswork);
+ }
+ }
+ work[1].r = (doublereal) maxwrk, work[1].i = 0.;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEES ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *sdim = 0;
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("P");
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = zlange_("M", n, n, &a[a_offset], lda, dum);
+ scalea = FALSE_;
+ if (anrm > 0. && anrm < smlnum) {
+ scalea = TRUE_;
+ cscale = smlnum;
+ } else if (anrm > bignum) {
+ scalea = TRUE_;
+ cscale = bignum;
+ }
+ if (scalea) {
+ zlascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Permute the matrix to make it more nearly triangular */
+/* (CWorkspace: none) */
+/* (RWorkspace: need N) */
+
+ ibal = 1;
+ zgebal_("P", n, &a[a_offset], lda, &ilo, &ihi, &rwork[ibal], &ierr);
+
+/* Reduce to upper Hessenberg form */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: none) */
+
+ itau = 1;
+ iwrk = *n + itau;
+ i__1 = *lwork - iwrk + 1;
+ zgehrd_(n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1,
+ &ierr);
+
+ if (wantvs) {
+
+/* Copy Householder vectors to VS */
+
+ zlacpy_("L", n, n, &a[a_offset], lda, &vs[vs_offset], ldvs)
+ ;
+
+/* Generate unitary matrix in VS */
+/* (CWorkspace: need 2*N-1, prefer N+(N-1)*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwrk + 1;
+ zunghr_(n, &ilo, &ihi, &vs[vs_offset], ldvs, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+ }
+
+ *sdim = 0;
+
+/* Perform QR iteration, accumulating Schur vectors in VS if desired */
+/* (CWorkspace: need 1, prefer HSWORK (see comments) ) */
+/* (RWorkspace: none) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ zhseqr_("S", jobvs, n, &ilo, &ihi, &a[a_offset], lda, &w[1], &vs[
+ vs_offset], ldvs, &work[iwrk], &i__1, &ieval);
+ if (ieval > 0) {
+ *info = ieval;
+ }
+
+/* Sort eigenvalues if desired */
+
+ if (wantst && *info == 0) {
+ if (scalea) {
+ zlascl_("G", &c__0, &c__0, &cscale, &anrm, n, &c__1, &w[1], n, &
+ ierr);
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ bwork[i__] = (*select)(&w[i__]);
+/* L10: */
+ }
+
+/* Reorder eigenvalues and transform Schur vectors */
+/* (CWorkspace: none) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwrk + 1;
+ ztrsen_("N", jobvs, &bwork[1], n, &a[a_offset], lda, &vs[vs_offset],
+ ldvs, &w[1], sdim, &s, &sep, &work[iwrk], &i__1, &icond);
+ }
+
+ if (wantvs) {
+
+/* Undo balancing */
+/* (CWorkspace: none) */
+/* (RWorkspace: need N) */
+
+ zgebak_("P", "R", n, &ilo, &ihi, &rwork[ibal], n, &vs[vs_offset],
+ ldvs, &ierr);
+ }
+
+ if (scalea) {
+
+/* Undo scaling for the Schur form of A */
+
+ zlascl_("U", &c__0, &c__0, &cscale, &anrm, n, n, &a[a_offset], lda, &
+ ierr);
+ i__1 = *lda + 1;
+ zcopy_(n, &a[a_offset], &i__1, &w[1], &c__1);
+ }
+
+ work[1].r = (doublereal) maxwrk, work[1].i = 0.;
+ return 0;
+
+/* End of ZGEES */
+
+} /* zgees_ */
diff --git a/contrib/libs/clapack/zgeesx.c b/contrib/libs/clapack/zgeesx.c
new file mode 100644
index 0000000000..c04109e6d7
--- /dev/null
+++ b/contrib/libs/clapack/zgeesx.c
@@ -0,0 +1,477 @@
+/* zgeesx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zgeesx_(char *jobvs, char *sort, L_fp select, char *
+ sense, integer *n, doublecomplex *a, integer *lda, integer *sdim,
+ doublecomplex *w, doublecomplex *vs, integer *ldvs, doublereal *
+ rconde, doublereal *rcondv, doublecomplex *work, integer *lwork,
+ doublereal *rwork, logical *bwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, vs_dim1, vs_offset, i__1, i__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, ihi, ilo;
+ doublereal dum[1], eps;
+ integer ibal;
+ doublereal anrm;
+ integer ierr, itau, iwrk, lwrk, icond, ieval;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), dlabad_(doublereal *, doublereal *);
+ logical scalea;
+ extern doublereal dlamch_(char *);
+ doublereal cscale;
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), zgebak_(char *, char *, integer *,
+ integer *, integer *, doublereal *, integer *, doublecomplex *,
+ integer *, integer *), zgebal_(char *, integer *,
+ doublecomplex *, integer *, integer *, integer *, doublereal *,
+ integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ doublereal bignum;
+ extern /* Subroutine */ int zgehrd_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *), zlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublecomplex *,
+ integer *, integer *);
+ logical wantsb, wantse;
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ integer minwrk, maxwrk;
+ logical wantsn;
+ doublereal smlnum;
+ extern /* Subroutine */ int zhseqr_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+ integer hswork;
+ extern /* Subroutine */ int zunghr_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *);
+ logical wantst, wantsv, wantvs;
+ extern /* Subroutine */ int ztrsen_(char *, char *, logical *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublereal *, doublereal *,
+ doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+/* .. Function Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGEESX computes for an N-by-N complex nonsymmetric matrix A, the */
+/* eigenvalues, the Schur form T, and, optionally, the matrix of Schur */
+/* vectors Z. This gives the Schur factorization A = Z*T*(Z**H). */
+
+/* Optionally, it also orders the eigenvalues on the diagonal of the */
+/* Schur form so that selected eigenvalues are at the top left; */
+/* computes a reciprocal condition number for the average of the */
+/* selected eigenvalues (RCONDE); and computes a reciprocal condition */
+/* number for the right invariant subspace corresponding to the */
+/* selected eigenvalues (RCONDV). The leading columns of Z form an */
+/* orthonormal basis for this invariant subspace. */
+
+/* For further explanation of the reciprocal condition numbers RCONDE */
+/* and RCONDV, see Section 4.10 of the LAPACK Users' Guide (where */
+/* these quantities are called s and sep respectively). */
+
+/* A complex matrix is in Schur form if it is upper triangular. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVS (input) CHARACTER*1 */
+/* = 'N': Schur vectors are not computed; */
+/* = 'V': Schur vectors are computed. */
+
+/* SORT (input) CHARACTER*1 */
+/* Specifies whether or not to order the eigenvalues on the */
+/* diagonal of the Schur form. */
+/* = 'N': Eigenvalues are not ordered; */
+/* = 'S': Eigenvalues are ordered (see SELECT). */
+
+/* SELECT (external procedure) LOGICAL FUNCTION of one COMPLEX*16 argument */
+/* SELECT must be declared EXTERNAL in the calling subroutine. */
+/* If SORT = 'S', SELECT is used to select eigenvalues to order */
+/* to the top left of the Schur form. */
+/* If SORT = 'N', SELECT is not referenced. */
+/* An eigenvalue W(j) is selected if SELECT(W(j)) is true. */
+
+/* SENSE (input) CHARACTER*1 */
+/* Determines which reciprocal condition numbers are computed. */
+/* = 'N': None are computed; */
+/* = 'E': Computed for average of selected eigenvalues only; */
+/* = 'V': Computed for selected right invariant subspace only; */
+/* = 'B': Computed for both. */
+/* If SENSE = 'E', 'V' or 'B', SORT must equal 'S'. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA, N) */
+/* On entry, the N-by-N matrix A. */
+/* On exit, A is overwritten by its Schur form T. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* SDIM (output) INTEGER */
+/* If SORT = 'N', SDIM = 0. */
+/* If SORT = 'S', SDIM = number of eigenvalues for which */
+/* SELECT is true. */
+
+/* W (output) COMPLEX*16 array, dimension (N) */
+/* W contains the computed eigenvalues, in the same order */
+/* that they appear on the diagonal of the output Schur form T. */
+
+/* VS (output) COMPLEX*16 array, dimension (LDVS,N) */
+/* If JOBVS = 'V', VS contains the unitary matrix Z of Schur */
+/* vectors. */
+/* If JOBVS = 'N', VS is not referenced. */
+
+/* LDVS (input) INTEGER */
+/* The leading dimension of the array VS. LDVS >= 1, and if */
+/* JOBVS = 'V', LDVS >= N. */
+
+/* RCONDE (output) DOUBLE PRECISION */
+/* If SENSE = 'E' or 'B', RCONDE contains the reciprocal */
+/* condition number for the average of the selected eigenvalues. */
+/* Not referenced if SENSE = 'N' or 'V'. */
+
+/* RCONDV (output) DOUBLE PRECISION */
+/* If SENSE = 'V' or 'B', RCONDV contains the reciprocal */
+/* condition number for the selected right invariant subspace. */
+/* Not referenced if SENSE = 'N' or 'E'. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,2*N). */
+/* Also, if SENSE = 'E' or 'V' or 'B', LWORK >= 2*SDIM*(N-SDIM), */
+/* where SDIM is the number of selected eigenvalues computed by */
+/* this routine. Note that 2*SDIM*(N-SDIM) <= N*N/2. Note also */
+/* that an error is only returned if LWORK < max(1,2*N), but if */
+/* SENSE = 'E' or 'V' or 'B' this may not be large enough. */
+/* For good performance, LWORK must generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates upper bound on the optimal size of the */
+/* array WORK, returns this value as the first entry of the WORK */
+/* array, and no error message related to LWORK is issued by */
+/* XERBLA. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* BWORK (workspace) LOGICAL array, dimension (N) */
+/* Not referenced if SORT = 'N'. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, and i is */
+/* <= N: the QR algorithm failed to compute all the */
+/* eigenvalues; elements 1:ILO-1 and i+1:N of W */
+/* contain those eigenvalues which have converged; if */
+/* JOBVS = 'V', VS contains the transformation which */
+/* reduces A to its partially converged Schur form. */
+/* = N+1: the eigenvalues could not be reordered because some */
+/* eigenvalues were too close to separate (the problem */
+/* is very ill-conditioned); */
+/* = N+2: after reordering, roundoff changed values of some */
+/* complex eigenvalues so that leading eigenvalues in */
+/* the Schur form no longer satisfy SELECT=.TRUE. This */
+/* could also be caused by underflow due to scaling. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ vs_dim1 = *ldvs;
+ vs_offset = 1 + vs_dim1;
+ vs -= vs_offset;
+ --work;
+ --rwork;
+ --bwork;
+
+ /* Function Body */
+ *info = 0;
+ wantvs = lsame_(jobvs, "V");
+ wantst = lsame_(sort, "S");
+ wantsn = lsame_(sense, "N");
+ wantse = lsame_(sense, "E");
+ wantsv = lsame_(sense, "V");
+ wantsb = lsame_(sense, "B");
+ if (! wantvs && ! lsame_(jobvs, "N")) {
+ *info = -1;
+ } else if (! wantst && ! lsame_(sort, "N")) {
+ *info = -2;
+ } else if (! (wantsn || wantse || wantsv || wantsb) || ! wantst && !
+ wantsn) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldvs < 1 || wantvs && *ldvs < *n) {
+ *info = -11;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of real workspace needed at that point in the */
+/* code, as well as the preferred amount for good performance. */
+/* CWorkspace refers to complex workspace, and RWorkspace to real */
+/* workspace. NB refers to the optimal block size for the */
+/* immediately following subroutine, as returned by ILAENV. */
+/* HSWORK refers to the workspace preferred by ZHSEQR, as */
+/* calculated below. HSWORK is computed assuming ILO=1 and IHI=N, */
+/* the worst case. */
+/* If SENSE = 'E', 'V' or 'B', then the amount of workspace needed */
+/* depends on SDIM, which is computed by the routine ZTRSEN later */
+/* in the code.) */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ minwrk = 1;
+ lwrk = 1;
+ } else {
+ maxwrk = *n + *n * ilaenv_(&c__1, "ZGEHRD", " ", n, &c__1, n, &
+ c__0);
+ minwrk = *n << 1;
+
+ zhseqr_("S", jobvs, n, &c__1, n, &a[a_offset], lda, &w[1], &vs[
+ vs_offset], ldvs, &work[1], &c_n1, &ieval);
+ hswork = (integer) work[1].r;
+
+ if (! wantvs) {
+ maxwrk = max(maxwrk,hswork);
+ } else {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + (*n - 1) * ilaenv_(&c__1, "ZUNGHR",
+ " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ maxwrk = max(maxwrk,hswork);
+ }
+ lwrk = maxwrk;
+ if (! wantsn) {
+/* Computing MAX */
+ i__1 = lwrk, i__2 = *n * *n / 2;
+ lwrk = max(i__1,i__2);
+ }
+ }
+ work[1].r = (doublereal) lwrk, work[1].i = 0.;
+
+ if (*lwork < minwrk) {
+ *info = -15;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEESX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *sdim = 0;
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("P");
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = zlange_("M", n, n, &a[a_offset], lda, dum);
+ scalea = FALSE_;
+ if (anrm > 0. && anrm < smlnum) {
+ scalea = TRUE_;
+ cscale = smlnum;
+ } else if (anrm > bignum) {
+ scalea = TRUE_;
+ cscale = bignum;
+ }
+ if (scalea) {
+ zlascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+
+/* Permute the matrix to make it more nearly triangular */
+/* (CWorkspace: none) */
+/* (RWorkspace: need N) */
+
+ ibal = 1;
+ zgebal_("P", n, &a[a_offset], lda, &ilo, &ihi, &rwork[ibal], &ierr);
+
+/* Reduce to upper Hessenberg form */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: none) */
+
+ itau = 1;
+ iwrk = *n + itau;
+ i__1 = *lwork - iwrk + 1;
+ zgehrd_(n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1,
+ &ierr);
+
+ if (wantvs) {
+
+/* Copy Householder vectors to VS */
+
+ zlacpy_("L", n, n, &a[a_offset], lda, &vs[vs_offset], ldvs)
+ ;
+
+/* Generate unitary matrix in VS */
+/* (CWorkspace: need 2*N-1, prefer N+(N-1)*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwrk + 1;
+ zunghr_(n, &ilo, &ihi, &vs[vs_offset], ldvs, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+ }
+
+ *sdim = 0;
+
+/* Perform QR iteration, accumulating Schur vectors in VS if desired */
+/* (CWorkspace: need 1, prefer HSWORK (see comments) ) */
+/* (RWorkspace: none) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ zhseqr_("S", jobvs, n, &ilo, &ihi, &a[a_offset], lda, &w[1], &vs[
+ vs_offset], ldvs, &work[iwrk], &i__1, &ieval);
+ if (ieval > 0) {
+ *info = ieval;
+ }
+
+/* Sort eigenvalues if desired */
+
+ if (wantst && *info == 0) {
+ if (scalea) {
+ zlascl_("G", &c__0, &c__0, &cscale, &anrm, n, &c__1, &w[1], n, &
+ ierr);
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ bwork[i__] = (*select)(&w[i__]);
+/* L10: */
+ }
+
+/* Reorder eigenvalues, transform Schur vectors, and compute */
+/* reciprocal condition numbers */
+/* (CWorkspace: if SENSE is not 'N', need 2*SDIM*(N-SDIM) */
+/* otherwise, need none ) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwrk + 1;
+ ztrsen_(sense, jobvs, &bwork[1], n, &a[a_offset], lda, &vs[vs_offset],
+ ldvs, &w[1], sdim, rconde, rcondv, &work[iwrk], &i__1, &
+ icond);
+ if (! wantsn) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*sdim << 1) * (*n - *sdim);
+ maxwrk = max(i__1,i__2);
+ }
+ if (icond == -14) {
+
+/* Not enough complex workspace */
+
+ *info = -15;
+ }
+ }
+
+ if (wantvs) {
+
+/* Undo balancing */
+/* (CWorkspace: none) */
+/* (RWorkspace: need N) */
+
+ zgebak_("P", "R", n, &ilo, &ihi, &rwork[ibal], n, &vs[vs_offset],
+ ldvs, &ierr);
+ }
+
+ if (scalea) {
+
+/* Undo scaling for the Schur form of A */
+
+ zlascl_("U", &c__0, &c__0, &cscale, &anrm, n, n, &a[a_offset], lda, &
+ ierr);
+ i__1 = *lda + 1;
+ zcopy_(n, &a[a_offset], &i__1, &w[1], &c__1);
+ if ((wantsv || wantsb) && *info == 0) {
+ dum[0] = *rcondv;
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, &c__1, &c__1, dum, &
+ c__1, &ierr);
+ *rcondv = dum[0];
+ }
+ }
+
+ work[1].r = (doublereal) maxwrk, work[1].i = 0.;
+ return 0;
+
+/* End of ZGEESX */
+
+} /* zgeesx_ */
diff --git a/contrib/libs/clapack/zgeev.c b/contrib/libs/clapack/zgeev.c
new file mode 100644
index 0000000000..e4690accbb
--- /dev/null
+++ b/contrib/libs/clapack/zgeev.c
@@ -0,0 +1,533 @@
+/* zgeev.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zgeev_(char *jobvl, char *jobvr, integer *n,
+ doublecomplex *a, integer *lda, doublecomplex *w, doublecomplex *vl,
+ integer *ldvl, doublecomplex *vr, integer *ldvr, doublecomplex *work,
+ integer *lwork, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2, i__3;
+ doublereal d__1, d__2;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_imag(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, k, ihi;
+ doublereal scl;
+ integer ilo;
+ doublereal dum[1], eps;
+ doublecomplex tmp;
+ integer ibal;
+ char side[1];
+ doublereal anrm;
+ integer ierr, itau, iwrk, nout;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *), dlabad_(doublereal *, doublereal *);
+ extern doublereal dznrm2_(integer *, doublecomplex *, integer *);
+ logical scalea;
+ extern doublereal dlamch_(char *);
+ doublereal cscale;
+ extern /* Subroutine */ int zgebak_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublecomplex *, integer *,
+ integer *), zgebal_(char *, integer *,
+ doublecomplex *, integer *, integer *, integer *, doublereal *,
+ integer *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ logical select[1];
+ extern /* Subroutine */ int zdscal_(integer *, doublereal *,
+ doublecomplex *, integer *);
+ doublereal bignum;
+ extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ extern /* Subroutine */ int zgehrd_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *), zlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublecomplex *,
+ integer *, integer *), zlacpy_(char *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *);
+ integer minwrk, maxwrk;
+ logical wantvl;
+ doublereal smlnum;
+ integer hswork, irwork;
+ extern /* Subroutine */ int zhseqr_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *), ztrevc_(char *, char *, logical *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, integer *, integer *, doublecomplex *,
+ doublereal *, integer *);
+ logical lquery, wantvr;
+ extern /* Subroutine */ int zunghr_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGEEV computes for an N-by-N complex nonsymmetric matrix A, the */
+/* eigenvalues and, optionally, the left and/or right eigenvectors. */
+
+/* The right eigenvector v(j) of A satisfies */
+/* A * v(j) = lambda(j) * v(j) */
+/* where lambda(j) is its eigenvalue. */
+/* The left eigenvector u(j) of A satisfies */
+/* u(j)**H * A = lambda(j) * u(j)**H */
+/* where u(j)**H denotes the conjugate transpose of u(j). */
+
+/* The computed eigenvectors are normalized to have Euclidean norm */
+/* equal to 1 and largest component real. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVL (input) CHARACTER*1 */
+/* = 'N': left eigenvectors of A are not computed; */
+/* = 'V': left eigenvectors of are computed. */
+
+/* JOBVR (input) CHARACTER*1 */
+/* = 'N': right eigenvectors of A are not computed; */
+/* = 'V': right eigenvectors of A are computed. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the N-by-N matrix A. */
+/* On exit, A has been overwritten. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* W (output) COMPLEX*16 array, dimension (N) */
+/* W contains the computed eigenvalues. */
+
+/* VL (output) COMPLEX*16 array, dimension (LDVL,N) */
+/* If JOBVL = 'V', the left eigenvectors u(j) are stored one */
+/* after another in the columns of VL, in the same order */
+/* as their eigenvalues. */
+/* If JOBVL = 'N', VL is not referenced. */
+/* u(j) = VL(:,j), the j-th column of VL. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the array VL. LDVL >= 1; if */
+/* JOBVL = 'V', LDVL >= N. */
+
+/* VR (output) COMPLEX*16 array, dimension (LDVR,N) */
+/* If JOBVR = 'V', the right eigenvectors v(j) are stored one */
+/* after another in the columns of VR, in the same order */
+/* as their eigenvalues. */
+/* If JOBVR = 'N', VR is not referenced. */
+/* v(j) = VR(:,j), the j-th column of VR. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. LDVR >= 1; if */
+/* JOBVR = 'V', LDVR >= N. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,2*N). */
+/* For good performance, LWORK must generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, the QR algorithm failed to compute all the */
+/* eigenvalues, and no eigenvectors have been computed; */
+/* elements and i+1:N of W contain eigenvalues which have */
+/* converged. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ wantvl = lsame_(jobvl, "V");
+ wantvr = lsame_(jobvr, "V");
+ if (! wantvl && ! lsame_(jobvl, "N")) {
+ *info = -1;
+ } else if (! wantvr && ! lsame_(jobvr, "N")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldvl < 1 || wantvl && *ldvl < *n) {
+ *info = -8;
+ } else if (*ldvr < 1 || wantvr && *ldvr < *n) {
+ *info = -10;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* CWorkspace refers to complex workspace, and RWorkspace to real */
+/* workspace. NB refers to the optimal block size for the */
+/* immediately following subroutine, as returned by ILAENV. */
+/* HSWORK refers to the workspace preferred by ZHSEQR, as */
+/* calculated below. HSWORK is computed assuming ILO=1 and IHI=N, */
+/* the worst case.) */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ } else {
+ maxwrk = *n + *n * ilaenv_(&c__1, "ZGEHRD", " ", n, &c__1, n, &
+ c__0);
+ minwrk = *n << 1;
+ if (wantvl) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + (*n - 1) * ilaenv_(&c__1, "ZUNGHR",
+ " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ zhseqr_("S", "V", n, &c__1, n, &a[a_offset], lda, &w[1], &vl[
+ vl_offset], ldvl, &work[1], &c_n1, info);
+ } else if (wantvr) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + (*n - 1) * ilaenv_(&c__1, "ZUNGHR",
+ " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ zhseqr_("S", "V", n, &c__1, n, &a[a_offset], lda, &w[1], &vr[
+ vr_offset], ldvr, &work[1], &c_n1, info);
+ } else {
+ zhseqr_("E", "N", n, &c__1, n, &a[a_offset], lda, &w[1], &vr[
+ vr_offset], ldvr, &work[1], &c_n1, info);
+ }
+ hswork = (integer) work[1].r;
+/* Computing MAX */
+ i__1 = max(maxwrk,hswork);
+ maxwrk = max(i__1,minwrk);
+ }
+ work[1].r = (doublereal) maxwrk, work[1].i = 0.;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEEV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("P");
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = zlange_("M", n, n, &a[a_offset], lda, dum);
+ scalea = FALSE_;
+ if (anrm > 0. && anrm < smlnum) {
+ scalea = TRUE_;
+ cscale = smlnum;
+ } else if (anrm > bignum) {
+ scalea = TRUE_;
+ cscale = bignum;
+ }
+ if (scalea) {
+ zlascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Balance the matrix */
+/* (CWorkspace: none) */
+/* (RWorkspace: need N) */
+
+ ibal = 1;
+ zgebal_("B", n, &a[a_offset], lda, &ilo, &ihi, &rwork[ibal], &ierr);
+
+/* Reduce to upper Hessenberg form */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: none) */
+
+ itau = 1;
+ iwrk = itau + *n;
+ i__1 = *lwork - iwrk + 1;
+ zgehrd_(n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1,
+ &ierr);
+
+ if (wantvl) {
+
+/* Want left eigenvectors */
+/* Copy Householder vectors to VL */
+
+ *(unsigned char *)side = 'L';
+ zlacpy_("L", n, n, &a[a_offset], lda, &vl[vl_offset], ldvl)
+ ;
+
+/* Generate unitary matrix in VL */
+/* (CWorkspace: need 2*N-1, prefer N+(N-1)*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwrk + 1;
+ zunghr_(n, &ilo, &ihi, &vl[vl_offset], ldvl, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+
+/* Perform QR iteration, accumulating Schur vectors in VL */
+/* (CWorkspace: need 1, prefer HSWORK (see comments) ) */
+/* (RWorkspace: none) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ zhseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &w[1], &vl[
+ vl_offset], ldvl, &work[iwrk], &i__1, info);
+
+ if (wantvr) {
+
+/* Want left and right eigenvectors */
+/* Copy Schur vectors to VR */
+
+ *(unsigned char *)side = 'B';
+ zlacpy_("F", n, n, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr);
+ }
+
+ } else if (wantvr) {
+
+/* Want right eigenvectors */
+/* Copy Householder vectors to VR */
+
+ *(unsigned char *)side = 'R';
+ zlacpy_("L", n, n, &a[a_offset], lda, &vr[vr_offset], ldvr)
+ ;
+
+/* Generate unitary matrix in VR */
+/* (CWorkspace: need 2*N-1, prefer N+(N-1)*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwrk + 1;
+ zunghr_(n, &ilo, &ihi, &vr[vr_offset], ldvr, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+
+/* Perform QR iteration, accumulating Schur vectors in VR */
+/* (CWorkspace: need 1, prefer HSWORK (see comments) ) */
+/* (RWorkspace: none) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ zhseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &w[1], &vr[
+ vr_offset], ldvr, &work[iwrk], &i__1, info);
+
+ } else {
+
+/* Compute eigenvalues only */
+/* (CWorkspace: need 1, prefer HSWORK (see comments) ) */
+/* (RWorkspace: none) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ zhseqr_("E", "N", n, &ilo, &ihi, &a[a_offset], lda, &w[1], &vr[
+ vr_offset], ldvr, &work[iwrk], &i__1, info);
+ }
+
+/* If INFO > 0 from ZHSEQR, then quit */
+
+ if (*info > 0) {
+ goto L50;
+ }
+
+ if (wantvl || wantvr) {
+
+/* Compute left and/or right eigenvectors */
+/* (CWorkspace: need 2*N) */
+/* (RWorkspace: need 2*N) */
+
+ irwork = ibal + *n;
+ ztrevc_(side, "B", select, n, &a[a_offset], lda, &vl[vl_offset], ldvl,
+ &vr[vr_offset], ldvr, n, &nout, &work[iwrk], &rwork[irwork],
+ &ierr);
+ }
+
+ if (wantvl) {
+
+/* Undo balancing of left eigenvectors */
+/* (CWorkspace: none) */
+/* (RWorkspace: need N) */
+
+ zgebak_("B", "L", n, &ilo, &ihi, &rwork[ibal], n, &vl[vl_offset],
+ ldvl, &ierr);
+
+/* Normalize left eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ scl = 1. / dznrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1);
+ zdscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + i__ * vl_dim1;
+/* Computing 2nd power */
+ d__1 = vl[i__3].r;
+/* Computing 2nd power */
+ d__2 = d_imag(&vl[k + i__ * vl_dim1]);
+ rwork[irwork + k - 1] = d__1 * d__1 + d__2 * d__2;
+/* L10: */
+ }
+ k = idamax_(n, &rwork[irwork], &c__1);
+ d_cnjg(&z__2, &vl[k + i__ * vl_dim1]);
+ d__1 = sqrt(rwork[irwork + k - 1]);
+ z__1.r = z__2.r / d__1, z__1.i = z__2.i / d__1;
+ tmp.r = z__1.r, tmp.i = z__1.i;
+ zscal_(n, &tmp, &vl[i__ * vl_dim1 + 1], &c__1);
+ i__2 = k + i__ * vl_dim1;
+ i__3 = k + i__ * vl_dim1;
+ d__1 = vl[i__3].r;
+ z__1.r = d__1, z__1.i = 0.;
+ vl[i__2].r = z__1.r, vl[i__2].i = z__1.i;
+/* L20: */
+ }
+ }
+
+ if (wantvr) {
+
+/* Undo balancing of right eigenvectors */
+/* (CWorkspace: none) */
+/* (RWorkspace: need N) */
+
+ zgebak_("B", "R", n, &ilo, &ihi, &rwork[ibal], n, &vr[vr_offset],
+ ldvr, &ierr);
+
+/* Normalize right eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ scl = 1. / dznrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1);
+ zdscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + i__ * vr_dim1;
+/* Computing 2nd power */
+ d__1 = vr[i__3].r;
+/* Computing 2nd power */
+ d__2 = d_imag(&vr[k + i__ * vr_dim1]);
+ rwork[irwork + k - 1] = d__1 * d__1 + d__2 * d__2;
+/* L30: */
+ }
+ k = idamax_(n, &rwork[irwork], &c__1);
+ d_cnjg(&z__2, &vr[k + i__ * vr_dim1]);
+ d__1 = sqrt(rwork[irwork + k - 1]);
+ z__1.r = z__2.r / d__1, z__1.i = z__2.i / d__1;
+ tmp.r = z__1.r, tmp.i = z__1.i;
+ zscal_(n, &tmp, &vr[i__ * vr_dim1 + 1], &c__1);
+ i__2 = k + i__ * vr_dim1;
+ i__3 = k + i__ * vr_dim1;
+ d__1 = vr[i__3].r;
+ z__1.r = d__1, z__1.i = 0.;
+ vr[i__2].r = z__1.r, vr[i__2].i = z__1.i;
+/* L40: */
+ }
+ }
+
+/* Undo scaling if necessary */
+
+L50:
+ if (scalea) {
+ i__1 = *n - *info;
+/* Computing MAX */
+ i__3 = *n - *info;
+ i__2 = max(i__3,1);
+ zlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &w[*info + 1]
+, &i__2, &ierr);
+ if (*info > 0) {
+ i__1 = ilo - 1;
+ zlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &w[1], n,
+ &ierr);
+ }
+ }
+
+ work[1].r = (doublereal) maxwrk, work[1].i = 0.;
+ return 0;
+
+/* End of ZGEEV */
+
+} /* zgeev_ */
diff --git a/contrib/libs/clapack/zgeevx.c b/contrib/libs/clapack/zgeevx.c
new file mode 100644
index 0000000000..3b3fbdf505
--- /dev/null
+++ b/contrib/libs/clapack/zgeevx.c
@@ -0,0 +1,686 @@
+/* zgeevx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zgeevx_(char *balanc, char *jobvl, char *jobvr, char *
+ sense, integer *n, doublecomplex *a, integer *lda, doublecomplex *w,
+ doublecomplex *vl, integer *ldvl, doublecomplex *vr, integer *ldvr,
+ integer *ilo, integer *ihi, doublereal *scale, doublereal *abnrm,
+ doublereal *rconde, doublereal *rcondv, doublecomplex *work, integer *
+ lwork, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2, i__3;
+ doublereal d__1, d__2;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_imag(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, k;
+ char job[1];
+ doublereal scl, dum[1], eps;
+ doublecomplex tmp;
+ char side[1];
+ doublereal anrm;
+ integer ierr, itau, iwrk, nout, icond;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *), dlabad_(doublereal *, doublereal *);
+ extern doublereal dznrm2_(integer *, doublecomplex *, integer *);
+ logical scalea;
+ extern doublereal dlamch_(char *);
+ doublereal cscale;
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), zgebak_(char *, char *, integer *,
+ integer *, integer *, doublereal *, integer *, doublecomplex *,
+ integer *, integer *), zgebal_(char *, integer *,
+ doublecomplex *, integer *, integer *, integer *, doublereal *,
+ integer *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ logical select[1];
+ extern /* Subroutine */ int zdscal_(integer *, doublereal *,
+ doublecomplex *, integer *);
+ doublereal bignum;
+ extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ extern /* Subroutine */ int zgehrd_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *), zlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublecomplex *,
+ integer *, integer *), zlacpy_(char *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *);
+ integer minwrk, maxwrk;
+ logical wantvl, wntsnb;
+ integer hswork;
+ logical wntsne;
+ doublereal smlnum;
+ extern /* Subroutine */ int zhseqr_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+ logical lquery, wantvr;
+ extern /* Subroutine */ int ztrevc_(char *, char *, logical *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, integer *, integer *, doublecomplex *,
+ doublereal *, integer *), ztrsna_(char *, char *,
+ logical *, integer *, doublecomplex *, integer *, doublecomplex *
+, integer *, doublecomplex *, integer *, doublereal *, doublereal
+ *, integer *, integer *, doublecomplex *, integer *, doublereal *,
+ integer *), zunghr_(integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, integer *);
+ logical wntsnn, wntsnv;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGEEVX computes for an N-by-N complex nonsymmetric matrix A, the */
+/* eigenvalues and, optionally, the left and/or right eigenvectors. */
+
+/* Optionally also, it computes a balancing transformation to improve */
+/* the conditioning of the eigenvalues and eigenvectors (ILO, IHI, */
+/* SCALE, and ABNRM), reciprocal condition numbers for the eigenvalues */
+/* (RCONDE), and reciprocal condition numbers for the right */
+/* eigenvectors (RCONDV). */
+
+/* The right eigenvector v(j) of A satisfies */
+/* A * v(j) = lambda(j) * v(j) */
+/* where lambda(j) is its eigenvalue. */
+/* The left eigenvector u(j) of A satisfies */
+/* u(j)**H * A = lambda(j) * u(j)**H */
+/* where u(j)**H denotes the conjugate transpose of u(j). */
+
+/* The computed eigenvectors are normalized to have Euclidean norm */
+/* equal to 1 and largest component real. */
+
+/* Balancing a matrix means permuting the rows and columns to make it */
+/* more nearly upper triangular, and applying a diagonal similarity */
+/* transformation D * A * D**(-1), where D is a diagonal matrix, to */
+/* make its rows and columns closer in norm and the condition numbers */
+/* of its eigenvalues and eigenvectors smaller. The computed */
+/* reciprocal condition numbers correspond to the balanced matrix. */
+/* Permuting rows and columns will not change the condition numbers */
+/* (in exact arithmetic) but diagonal scaling will. For further */
+/* explanation of balancing, see section 4.10.2 of the LAPACK */
+/* Users' Guide. */
+
+/* Arguments */
+/* ========= */
+
+/* BALANC (input) CHARACTER*1 */
+/* Indicates how the input matrix should be diagonally scaled */
+/* and/or permuted to improve the conditioning of its */
+/* eigenvalues. */
+/* = 'N': Do not diagonally scale or permute; */
+/* = 'P': Perform permutations to make the matrix more nearly */
+/* upper triangular. Do not diagonally scale; */
+/* = 'S': Diagonally scale the matrix, ie. replace A by */
+/* D*A*D**(-1), where D is a diagonal matrix chosen */
+/* to make the rows and columns of A more equal in */
+/* norm. Do not permute; */
+/* = 'B': Both diagonally scale and permute A. */
+
+/* Computed reciprocal condition numbers will be for the matrix */
+/* after balancing and/or permuting. Permuting does not change */
+/* condition numbers (in exact arithmetic), but balancing does. */
+
+/* JOBVL (input) CHARACTER*1 */
+/* = 'N': left eigenvectors of A are not computed; */
+/* = 'V': left eigenvectors of A are computed. */
+/* If SENSE = 'E' or 'B', JOBVL must = 'V'. */
+
+/* JOBVR (input) CHARACTER*1 */
+/* = 'N': right eigenvectors of A are not computed; */
+/* = 'V': right eigenvectors of A are computed. */
+/* If SENSE = 'E' or 'B', JOBVR must = 'V'. */
+
+/* SENSE (input) CHARACTER*1 */
+/* Determines which reciprocal condition numbers are computed. */
+/* = 'N': None are computed; */
+/* = 'E': Computed for eigenvalues only; */
+/* = 'V': Computed for right eigenvectors only; */
+/* = 'B': Computed for eigenvalues and right eigenvectors. */
+
+/* If SENSE = 'E' or 'B', both left and right eigenvectors */
+/* must also be computed (JOBVL = 'V' and JOBVR = 'V'). */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the N-by-N matrix A. */
+/* On exit, A has been overwritten. If JOBVL = 'V' or */
+/* JOBVR = 'V', A contains the Schur form of the balanced */
+/* version of the matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* W (output) COMPLEX*16 array, dimension (N) */
+/* W contains the computed eigenvalues. */
+
+/* VL (output) COMPLEX*16 array, dimension (LDVL,N) */
+/* If JOBVL = 'V', the left eigenvectors u(j) are stored one */
+/* after another in the columns of VL, in the same order */
+/* as their eigenvalues. */
+/* If JOBVL = 'N', VL is not referenced. */
+/* u(j) = VL(:,j), the j-th column of VL. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the array VL. LDVL >= 1; if */
+/* JOBVL = 'V', LDVL >= N. */
+
+/* VR (output) COMPLEX*16 array, dimension (LDVR,N) */
+/* If JOBVR = 'V', the right eigenvectors v(j) are stored one */
+/* after another in the columns of VR, in the same order */
+/* as their eigenvalues. */
+/* If JOBVR = 'N', VR is not referenced. */
+/* v(j) = VR(:,j), the j-th column of VR. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. LDVR >= 1; if */
+/* JOBVR = 'V', LDVR >= N. */
+
+/* ILO (output) INTEGER */
+/* IHI (output) INTEGER */
+/* ILO and IHI are integer values determined when A was */
+/* balanced. The balanced A(i,j) = 0 if I > J and */
+/* J = 1,...,ILO-1 or I = IHI+1,...,N. */
+
+/* SCALE (output) DOUBLE PRECISION array, dimension (N) */
+/* Details of the permutations and scaling factors applied */
+/* when balancing A. If P(j) is the index of the row and column */
+/* interchanged with row and column j, and D(j) is the scaling */
+/* factor applied to row and column j, then */
+/* SCALE(J) = P(J), for J = 1,...,ILO-1 */
+/* = D(J), for J = ILO,...,IHI */
+/* = P(J) for J = IHI+1,...,N. */
+/* The order in which the interchanges are made is N to IHI+1, */
+/* then 1 to ILO-1. */
+
+/* ABNRM (output) DOUBLE PRECISION */
+/* The one-norm of the balanced matrix (the maximum */
+/* of the sum of absolute values of elements of any column). */
+
+/* RCONDE (output) DOUBLE PRECISION array, dimension (N) */
+/* RCONDE(j) is the reciprocal condition number of the j-th */
+/* eigenvalue. */
+
+/* RCONDV (output) DOUBLE PRECISION array, dimension (N) */
+/* RCONDV(j) is the reciprocal condition number of the j-th */
+/* right eigenvector. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. If SENSE = 'N' or 'E', */
+/* LWORK >= max(1,2*N), and if SENSE = 'V' or 'B', */
+/* LWORK >= N*N+2*N. */
+/* For good performance, LWORK must generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, the QR algorithm failed to compute all the */
+/* eigenvalues, and no eigenvectors or condition numbers */
+/* have been computed; elements 1:ILO-1 and i+1:N of W */
+/* contain eigenvalues which have converged. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --scale;
+ --rconde;
+ --rcondv;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ wantvl = lsame_(jobvl, "V");
+ wantvr = lsame_(jobvr, "V");
+ wntsnn = lsame_(sense, "N");
+ wntsne = lsame_(sense, "E");
+ wntsnv = lsame_(sense, "V");
+ wntsnb = lsame_(sense, "B");
+ if (! (lsame_(balanc, "N") || lsame_(balanc, "S") || lsame_(balanc, "P")
+ || lsame_(balanc, "B"))) {
+ *info = -1;
+ } else if (! wantvl && ! lsame_(jobvl, "N")) {
+ *info = -2;
+ } else if (! wantvr && ! lsame_(jobvr, "N")) {
+ *info = -3;
+ } else if (! (wntsnn || wntsne || wntsnb || wntsnv) || (wntsne || wntsnb)
+ && ! (wantvl && wantvr)) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldvl < 1 || wantvl && *ldvl < *n) {
+ *info = -10;
+ } else if (*ldvr < 1 || wantvr && *ldvr < *n) {
+ *info = -12;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* CWorkspace refers to complex workspace, and RWorkspace to real */
+/* workspace. NB refers to the optimal block size for the */
+/* immediately following subroutine, as returned by ILAENV. */
+/* HSWORK refers to the workspace preferred by ZHSEQR, as */
+/* calculated below. HSWORK is computed assuming ILO=1 and IHI=N, */
+/* the worst case.) */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ } else {
+ maxwrk = *n + *n * ilaenv_(&c__1, "ZGEHRD", " ", n, &c__1, n, &
+ c__0);
+
+ if (wantvl) {
+ zhseqr_("S", "V", n, &c__1, n, &a[a_offset], lda, &w[1], &vl[
+ vl_offset], ldvl, &work[1], &c_n1, info);
+ } else if (wantvr) {
+ zhseqr_("S", "V", n, &c__1, n, &a[a_offset], lda, &w[1], &vr[
+ vr_offset], ldvr, &work[1], &c_n1, info);
+ } else {
+ if (wntsnn) {
+ zhseqr_("E", "N", n, &c__1, n, &a[a_offset], lda, &w[1], &
+ vr[vr_offset], ldvr, &work[1], &c_n1, info);
+ } else {
+ zhseqr_("S", "N", n, &c__1, n, &a[a_offset], lda, &w[1], &
+ vr[vr_offset], ldvr, &work[1], &c_n1, info);
+ }
+ }
+ hswork = (integer) work[1].r;
+
+ if (! wantvl && ! wantvr) {
+ minwrk = *n << 1;
+ if (! (wntsnn || wntsne)) {
+/* Computing MAX */
+ i__1 = minwrk, i__2 = *n * *n + (*n << 1);
+ minwrk = max(i__1,i__2);
+ }
+ maxwrk = max(maxwrk,hswork);
+ if (! (wntsnn || wntsne)) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * *n + (*n << 1);
+ maxwrk = max(i__1,i__2);
+ }
+ } else {
+ minwrk = *n << 1;
+ if (! (wntsnn || wntsne)) {
+/* Computing MAX */
+ i__1 = minwrk, i__2 = *n * *n + (*n << 1);
+ minwrk = max(i__1,i__2);
+ }
+ maxwrk = max(maxwrk,hswork);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + (*n - 1) * ilaenv_(&c__1, "ZUNGHR",
+ " ", n, &c__1, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ if (! (wntsnn || wntsne)) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * *n + (*n << 1);
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n << 1;
+ maxwrk = max(i__1,i__2);
+ }
+ maxwrk = max(maxwrk,minwrk);
+ }
+ work[1].r = (doublereal) maxwrk, work[1].i = 0.;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -20;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEEVX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("P");
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ icond = 0;
+ anrm = zlange_("M", n, n, &a[a_offset], lda, dum);
+ scalea = FALSE_;
+ if (anrm > 0. && anrm < smlnum) {
+ scalea = TRUE_;
+ cscale = smlnum;
+ } else if (anrm > bignum) {
+ scalea = TRUE_;
+ cscale = bignum;
+ }
+ if (scalea) {
+ zlascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Balance the matrix and compute ABNRM */
+
+ zgebal_(balanc, n, &a[a_offset], lda, ilo, ihi, &scale[1], &ierr);
+ *abnrm = zlange_("1", n, n, &a[a_offset], lda, dum);
+ if (scalea) {
+ dum[0] = *abnrm;
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, &c__1, &c__1, dum, &c__1, &
+ ierr);
+ *abnrm = dum[0];
+ }
+
+/* Reduce to upper Hessenberg form */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: none) */
+
+ itau = 1;
+ iwrk = itau + *n;
+ i__1 = *lwork - iwrk + 1;
+ zgehrd_(n, ilo, ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1, &
+ ierr);
+
+ if (wantvl) {
+
+/* Want left eigenvectors */
+/* Copy Householder vectors to VL */
+
+ *(unsigned char *)side = 'L';
+ zlacpy_("L", n, n, &a[a_offset], lda, &vl[vl_offset], ldvl)
+ ;
+
+/* Generate unitary matrix in VL */
+/* (CWorkspace: need 2*N-1, prefer N+(N-1)*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwrk + 1;
+ zunghr_(n, ilo, ihi, &vl[vl_offset], ldvl, &work[itau], &work[iwrk], &
+ i__1, &ierr);
+
+/* Perform QR iteration, accumulating Schur vectors in VL */
+/* (CWorkspace: need 1, prefer HSWORK (see comments) ) */
+/* (RWorkspace: none) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ zhseqr_("S", "V", n, ilo, ihi, &a[a_offset], lda, &w[1], &vl[
+ vl_offset], ldvl, &work[iwrk], &i__1, info);
+
+ if (wantvr) {
+
+/* Want left and right eigenvectors */
+/* Copy Schur vectors to VR */
+
+ *(unsigned char *)side = 'B';
+ zlacpy_("F", n, n, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr);
+ }
+
+ } else if (wantvr) {
+
+/* Want right eigenvectors */
+/* Copy Householder vectors to VR */
+
+ *(unsigned char *)side = 'R';
+ zlacpy_("L", n, n, &a[a_offset], lda, &vr[vr_offset], ldvr)
+ ;
+
+/* Generate unitary matrix in VR */
+/* (CWorkspace: need 2*N-1, prefer N+(N-1)*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwrk + 1;
+ zunghr_(n, ilo, ihi, &vr[vr_offset], ldvr, &work[itau], &work[iwrk], &
+ i__1, &ierr);
+
+/* Perform QR iteration, accumulating Schur vectors in VR */
+/* (CWorkspace: need 1, prefer HSWORK (see comments) ) */
+/* (RWorkspace: none) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ zhseqr_("S", "V", n, ilo, ihi, &a[a_offset], lda, &w[1], &vr[
+ vr_offset], ldvr, &work[iwrk], &i__1, info);
+
+ } else {
+
+/* Compute eigenvalues only */
+/* If condition numbers desired, compute Schur form */
+
+ if (wntsnn) {
+ *(unsigned char *)job = 'E';
+ } else {
+ *(unsigned char *)job = 'S';
+ }
+
+/* (CWorkspace: need 1, prefer HSWORK (see comments) ) */
+/* (RWorkspace: none) */
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ zhseqr_(job, "N", n, ilo, ihi, &a[a_offset], lda, &w[1], &vr[
+ vr_offset], ldvr, &work[iwrk], &i__1, info);
+ }
+
+/* If INFO > 0 from ZHSEQR, then quit */
+
+ if (*info > 0) {
+ goto L50;
+ }
+
+ if (wantvl || wantvr) {
+
+/* Compute left and/or right eigenvectors */
+/* (CWorkspace: need 2*N) */
+/* (RWorkspace: need N) */
+
+ ztrevc_(side, "B", select, n, &a[a_offset], lda, &vl[vl_offset], ldvl,
+ &vr[vr_offset], ldvr, n, &nout, &work[iwrk], &rwork[1], &
+ ierr);
+ }
+
+/* Compute condition numbers if desired */
+/* (CWorkspace: need N*N+2*N unless SENSE = 'E') */
+/* (RWorkspace: need 2*N unless SENSE = 'E') */
+
+ if (! wntsnn) {
+ ztrsna_(sense, "A", select, n, &a[a_offset], lda, &vl[vl_offset],
+ ldvl, &vr[vr_offset], ldvr, &rconde[1], &rcondv[1], n, &nout,
+ &work[iwrk], n, &rwork[1], &icond);
+ }
+
+ if (wantvl) {
+
+/* Undo balancing of left eigenvectors */
+
+ zgebak_(balanc, "L", n, ilo, ihi, &scale[1], n, &vl[vl_offset], ldvl,
+ &ierr);
+
+/* Normalize left eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ scl = 1. / dznrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1);
+ zdscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + i__ * vl_dim1;
+/* Computing 2nd power */
+ d__1 = vl[i__3].r;
+/* Computing 2nd power */
+ d__2 = d_imag(&vl[k + i__ * vl_dim1]);
+ rwork[k] = d__1 * d__1 + d__2 * d__2;
+/* L10: */
+ }
+ k = idamax_(n, &rwork[1], &c__1);
+ d_cnjg(&z__2, &vl[k + i__ * vl_dim1]);
+ d__1 = sqrt(rwork[k]);
+ z__1.r = z__2.r / d__1, z__1.i = z__2.i / d__1;
+ tmp.r = z__1.r, tmp.i = z__1.i;
+ zscal_(n, &tmp, &vl[i__ * vl_dim1 + 1], &c__1);
+ i__2 = k + i__ * vl_dim1;
+ i__3 = k + i__ * vl_dim1;
+ d__1 = vl[i__3].r;
+ z__1.r = d__1, z__1.i = 0.;
+ vl[i__2].r = z__1.r, vl[i__2].i = z__1.i;
+/* L20: */
+ }
+ }
+
+ if (wantvr) {
+
+/* Undo balancing of right eigenvectors */
+
+ zgebak_(balanc, "R", n, ilo, ihi, &scale[1], n, &vr[vr_offset], ldvr,
+ &ierr);
+
+/* Normalize right eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ scl = 1. / dznrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1);
+ zdscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + i__ * vr_dim1;
+/* Computing 2nd power */
+ d__1 = vr[i__3].r;
+/* Computing 2nd power */
+ d__2 = d_imag(&vr[k + i__ * vr_dim1]);
+ rwork[k] = d__1 * d__1 + d__2 * d__2;
+/* L30: */
+ }
+ k = idamax_(n, &rwork[1], &c__1);
+ d_cnjg(&z__2, &vr[k + i__ * vr_dim1]);
+ d__1 = sqrt(rwork[k]);
+ z__1.r = z__2.r / d__1, z__1.i = z__2.i / d__1;
+ tmp.r = z__1.r, tmp.i = z__1.i;
+ zscal_(n, &tmp, &vr[i__ * vr_dim1 + 1], &c__1);
+ i__2 = k + i__ * vr_dim1;
+ i__3 = k + i__ * vr_dim1;
+ d__1 = vr[i__3].r;
+ z__1.r = d__1, z__1.i = 0.;
+ vr[i__2].r = z__1.r, vr[i__2].i = z__1.i;
+/* L40: */
+ }
+ }
+
+/* Undo scaling if necessary */
+
+L50:
+ if (scalea) {
+ i__1 = *n - *info;
+/* Computing MAX */
+ i__3 = *n - *info;
+ i__2 = max(i__3,1);
+ zlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &w[*info + 1]
+, &i__2, &ierr);
+ if (*info == 0) {
+ if ((wntsnv || wntsnb) && icond == 0) {
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, n, &c__1, &rcondv[
+ 1], n, &ierr);
+ }
+ } else {
+ i__1 = *ilo - 1;
+ zlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &w[1], n,
+ &ierr);
+ }
+ }
+
+ work[1].r = (doublereal) maxwrk, work[1].i = 0.;
+ return 0;
+
+/* End of ZGEEVX */
+
+} /* zgeevx_ */
diff --git a/contrib/libs/clapack/zgegs.c b/contrib/libs/clapack/zgegs.c
new file mode 100644
index 0000000000..027abc984a
--- /dev/null
+++ b/contrib/libs/clapack/zgegs.c
@@ -0,0 +1,543 @@
+/* zgegs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zgegs_(char *jobvsl, char *jobvsr, integer *n,
+ doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb,
+ doublecomplex *alpha, doublecomplex *beta, doublecomplex *vsl,
+ integer *ldvsl, doublecomplex *vsr, integer *ldvsr, doublecomplex *
+ work, integer *lwork, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vsl_dim1, vsl_offset,
+ vsr_dim1, vsr_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer nb, nb1, nb2, nb3, ihi, ilo;
+ doublereal eps, anrm, bnrm;
+ integer itau, lopt;
+ extern logical lsame_(char *, char *);
+ integer ileft, iinfo, icols;
+ logical ilvsl;
+ integer iwork;
+ logical ilvsr;
+ integer irows;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int zggbak_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublecomplex *,
+ integer *, integer *), zggbal_(char *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *
+, integer *, doublereal *, doublereal *, doublereal *, integer *);
+ logical ilascl, ilbscl;
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ doublereal bignum;
+ integer ijobvl, iright;
+ extern /* Subroutine */ int zgghrd_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *
+), zlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublecomplex *,
+ integer *, integer *);
+ integer ijobvr;
+ extern /* Subroutine */ int zgeqrf_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *, integer *
+);
+ doublereal anrmto;
+ integer lwkmin;
+ doublereal bnrmto;
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *),
+ zlaset_(char *, integer *, integer *, doublecomplex *,
+ doublecomplex *, doublecomplex *, integer *), zhgeqz_(
+ char *, char *, char *, integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublereal *, integer *);
+ doublereal smlnum;
+ integer irwork, lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int zungqr_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *), zunmqr_(char *, char *, integer *, integer
+ *, integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is deprecated and has been replaced by routine ZGGES. */
+
+/* ZGEGS computes the eigenvalues, Schur form, and, optionally, the */
+/* left and or/right Schur vectors of a complex matrix pair (A,B). */
+/* Given two square matrices A and B, the generalized Schur */
+/* factorization has the form */
+
+/* A = Q*S*Z**H, B = Q*T*Z**H */
+
+/* where Q and Z are unitary matrices and S and T are upper triangular. */
+/* The columns of Q are the left Schur vectors */
+/* and the columns of Z are the right Schur vectors. */
+
+/* If only the eigenvalues of (A,B) are needed, the driver routine */
+/* ZGEGV should be used instead. See ZGEGV for a description of the */
+/* eigenvalues of the generalized nonsymmetric eigenvalue problem */
+/* (GNEP). */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVSL (input) CHARACTER*1 */
+/* = 'N': do not compute the left Schur vectors; */
+/* = 'V': compute the left Schur vectors (returned in VSL). */
+
+/* JOBVSR (input) CHARACTER*1 */
+/* = 'N': do not compute the right Schur vectors; */
+/* = 'V': compute the right Schur vectors (returned in VSR). */
+
+/* N (input) INTEGER */
+/* The order of the matrices A, B, VSL, and VSR. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA, N) */
+/* On entry, the matrix A. */
+/* On exit, the upper triangular matrix S from the generalized */
+/* Schur factorization. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB, N) */
+/* On entry, the matrix B. */
+/* On exit, the upper triangular matrix T from the generalized */
+/* Schur factorization. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB >= max(1,N). */
+
+/* ALPHA (output) COMPLEX*16 array, dimension (N) */
+/* The complex scalars alpha that define the eigenvalues of */
+/* GNEP. ALPHA(j) = S(j,j), the diagonal element of the Schur */
+/* form of A. */
+
+/* BETA (output) COMPLEX*16 array, dimension (N) */
+/* The non-negative real scalars beta that define the */
+/* eigenvalues of GNEP. BETA(j) = T(j,j), the diagonal element */
+/* of the triangular factor T. */
+
+/* Together, the quantities alpha = ALPHA(j) and beta = BETA(j) */
+/* represent the j-th eigenvalue of the matrix pair (A,B), in */
+/* one of the forms lambda = alpha/beta or mu = beta/alpha. */
+/* Since either lambda or mu may overflow, they should not, */
+/* in general, be computed. */
+
+
+/* VSL (output) COMPLEX*16 array, dimension (LDVSL,N) */
+/* If JOBVSL = 'V', the matrix of left Schur vectors Q. */
+/* Not referenced if JOBVSL = 'N'. */
+
+/* LDVSL (input) INTEGER */
+/* The leading dimension of the matrix VSL. LDVSL >= 1, and */
+/* if JOBVSL = 'V', LDVSL >= N. */
+
+/* VSR (output) COMPLEX*16 array, dimension (LDVSR,N) */
+/* If JOBVSR = 'V', the matrix of right Schur vectors Z. */
+/* Not referenced if JOBVSR = 'N'. */
+
+/* LDVSR (input) INTEGER */
+/* The leading dimension of the matrix VSR. LDVSR >= 1, and */
+/* if JOBVSR = 'V', LDVSR >= N. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,2*N). */
+/* For good performance, LWORK must generally be larger. */
+/* To compute the optimal value of LWORK, call ILAENV to get */
+/* blocksizes (for ZGEQRF, ZUNMQR, and CUNGQR.) Then compute: */
+/* NB -- MAX of the blocksizes for ZGEQRF, ZUNMQR, and CUNGQR; */
+/* the optimal LWORK is N*(NB+1). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* =1,...,N: */
+/* The QZ iteration failed. (A,B) are not in Schur */
+/* form, but ALPHA(j) and BETA(j) should be correct for */
+/* j=INFO+1,...,N. */
+/* > N: errors that usually indicate LAPACK problems: */
+/* =N+1: error return from ZGGBAL */
+/* =N+2: error return from ZGEQRF */
+/* =N+3: error return from ZUNMQR */
+/* =N+4: error return from ZUNGQR */
+/* =N+5: error return from ZGGHRD */
+/* =N+6: error return from ZHGEQZ (other than failed */
+/* iteration) */
+/* =N+7: error return from ZGGBAK (computing VSL) */
+/* =N+8: error return from ZGGBAK (computing VSR) */
+/* =N+9: error return from ZLASCL (various places) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alpha;
+ --beta;
+ vsl_dim1 = *ldvsl;
+ vsl_offset = 1 + vsl_dim1;
+ vsl -= vsl_offset;
+ vsr_dim1 = *ldvsr;
+ vsr_offset = 1 + vsr_dim1;
+ vsr -= vsr_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ if (lsame_(jobvsl, "N")) {
+ ijobvl = 1;
+ ilvsl = FALSE_;
+ } else if (lsame_(jobvsl, "V")) {
+ ijobvl = 2;
+ ilvsl = TRUE_;
+ } else {
+ ijobvl = -1;
+ ilvsl = FALSE_;
+ }
+
+ if (lsame_(jobvsr, "N")) {
+ ijobvr = 1;
+ ilvsr = FALSE_;
+ } else if (lsame_(jobvsr, "V")) {
+ ijobvr = 2;
+ ilvsr = TRUE_;
+ } else {
+ ijobvr = -1;
+ ilvsr = FALSE_;
+ }
+
+/* Test the input arguments */
+
+/* Computing MAX */
+ i__1 = *n << 1;
+ lwkmin = max(i__1,1);
+ lwkopt = lwkmin;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ lquery = *lwork == -1;
+ *info = 0;
+ if (ijobvl <= 0) {
+ *info = -1;
+ } else if (ijobvr <= 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ } else if (*ldvsl < 1 || ilvsl && *ldvsl < *n) {
+ *info = -11;
+ } else if (*ldvsr < 1 || ilvsr && *ldvsr < *n) {
+ *info = -13;
+ } else if (*lwork < lwkmin && ! lquery) {
+ *info = -15;
+ }
+
+ if (*info == 0) {
+ nb1 = ilaenv_(&c__1, "ZGEQRF", " ", n, n, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "ZUNMQR", " ", n, n, n, &c_n1);
+ nb3 = ilaenv_(&c__1, "ZUNGQR", " ", n, n, n, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2);
+ nb = max(i__1,nb3);
+ lopt = *n * (nb + 1);
+ work[1].r = (doublereal) lopt, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEGS ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("E") * dlamch_("B");
+ safmin = dlamch_("S");
+ smlnum = *n * safmin / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = zlange_("M", n, n, &a[a_offset], lda, &rwork[1]);
+ ilascl = FALSE_;
+ if (anrm > 0. && anrm < smlnum) {
+ anrmto = smlnum;
+ ilascl = TRUE_;
+ } else if (anrm > bignum) {
+ anrmto = bignum;
+ ilascl = TRUE_;
+ }
+
+ if (ilascl) {
+ zlascl_("G", &c_n1, &c_n1, &anrm, &anrmto, n, n, &a[a_offset], lda, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ }
+
+/* Scale B if max element outside range [SMLNUM,BIGNUM] */
+
+ bnrm = zlange_("M", n, n, &b[b_offset], ldb, &rwork[1]);
+ ilbscl = FALSE_;
+ if (bnrm > 0. && bnrm < smlnum) {
+ bnrmto = smlnum;
+ ilbscl = TRUE_;
+ } else if (bnrm > bignum) {
+ bnrmto = bignum;
+ ilbscl = TRUE_;
+ }
+
+ if (ilbscl) {
+ zlascl_("G", &c_n1, &c_n1, &bnrm, &bnrmto, n, n, &b[b_offset], ldb, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ }
+
+/* Permute the matrix to make it more nearly triangular */
+
+ ileft = 1;
+ iright = *n + 1;
+ irwork = iright + *n;
+ iwork = 1;
+ zggbal_("P", n, &a[a_offset], lda, &b[b_offset], ldb, &ilo, &ihi, &rwork[
+ ileft], &rwork[iright], &rwork[irwork], &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 1;
+ goto L10;
+ }
+
+/* Reduce B to triangular form, and initialize VSL and/or VSR */
+
+ irows = ihi + 1 - ilo;
+ icols = *n + 1 - ilo;
+ itau = iwork;
+ iwork = itau + irows;
+ i__1 = *lwork + 1 - iwork;
+ zgeqrf_(&irows, &icols, &b[ilo + ilo * b_dim1], ldb, &work[itau], &work[
+ iwork], &i__1, &iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__3 = iwork;
+ i__1 = lwkopt, i__2 = (integer) work[i__3].r + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ *info = *n + 2;
+ goto L10;
+ }
+
+ i__1 = *lwork + 1 - iwork;
+ zunmqr_("L", "C", &irows, &icols, &irows, &b[ilo + ilo * b_dim1], ldb, &
+ work[itau], &a[ilo + ilo * a_dim1], lda, &work[iwork], &i__1, &
+ iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__3 = iwork;
+ i__1 = lwkopt, i__2 = (integer) work[i__3].r + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ *info = *n + 3;
+ goto L10;
+ }
+
+ if (ilvsl) {
+ zlaset_("Full", n, n, &c_b1, &c_b2, &vsl[vsl_offset], ldvsl);
+ i__1 = irows - 1;
+ i__2 = irows - 1;
+ zlacpy_("L", &i__1, &i__2, &b[ilo + 1 + ilo * b_dim1], ldb, &vsl[ilo
+ + 1 + ilo * vsl_dim1], ldvsl);
+ i__1 = *lwork + 1 - iwork;
+ zungqr_(&irows, &irows, &irows, &vsl[ilo + ilo * vsl_dim1], ldvsl, &
+ work[itau], &work[iwork], &i__1, &iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__3 = iwork;
+ i__1 = lwkopt, i__2 = (integer) work[i__3].r + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ *info = *n + 4;
+ goto L10;
+ }
+ }
+
+ if (ilvsr) {
+ zlaset_("Full", n, n, &c_b1, &c_b2, &vsr[vsr_offset], ldvsr);
+ }
+
+/* Reduce to generalized Hessenberg form */
+
+ zgghrd_(jobvsl, jobvsr, n, &ilo, &ihi, &a[a_offset], lda, &b[b_offset],
+ ldb, &vsl[vsl_offset], ldvsl, &vsr[vsr_offset], ldvsr, &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 5;
+ goto L10;
+ }
+
+/* Perform QZ algorithm, computing Schur vectors if desired */
+
+ iwork = itau;
+ i__1 = *lwork + 1 - iwork;
+ zhgeqz_("S", jobvsl, jobvsr, n, &ilo, &ihi, &a[a_offset], lda, &b[
+ b_offset], ldb, &alpha[1], &beta[1], &vsl[vsl_offset], ldvsl, &
+ vsr[vsr_offset], ldvsr, &work[iwork], &i__1, &rwork[irwork], &
+ iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__3 = iwork;
+ i__1 = lwkopt, i__2 = (integer) work[i__3].r + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ if (iinfo > 0 && iinfo <= *n) {
+ *info = iinfo;
+ } else if (iinfo > *n && iinfo <= *n << 1) {
+ *info = iinfo - *n;
+ } else {
+ *info = *n + 6;
+ }
+ goto L10;
+ }
+
+/* Apply permutation to VSL and VSR */
+
+ if (ilvsl) {
+ zggbak_("P", "L", n, &ilo, &ihi, &rwork[ileft], &rwork[iright], n, &
+ vsl[vsl_offset], ldvsl, &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 7;
+ goto L10;
+ }
+ }
+ if (ilvsr) {
+ zggbak_("P", "R", n, &ilo, &ihi, &rwork[ileft], &rwork[iright], n, &
+ vsr[vsr_offset], ldvsr, &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 8;
+ goto L10;
+ }
+ }
+
+/* Undo scaling */
+
+ if (ilascl) {
+ zlascl_("U", &c_n1, &c_n1, &anrmto, &anrm, n, n, &a[a_offset], lda, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ zlascl_("G", &c_n1, &c_n1, &anrmto, &anrm, n, &c__1, &alpha[1], n, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ }
+
+ if (ilbscl) {
+ zlascl_("U", &c_n1, &c_n1, &bnrmto, &bnrm, n, n, &b[b_offset], ldb, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ zlascl_("G", &c_n1, &c_n1, &bnrmto, &bnrm, n, &c__1, &beta[1], n, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ return 0;
+ }
+ }
+
+L10:
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZGEGS */
+
+} /* zgegs_ */
diff --git a/contrib/libs/clapack/zgegv.c b/contrib/libs/clapack/zgegv.c
new file mode 100644
index 0000000000..a244aa15c9
--- /dev/null
+++ b/contrib/libs/clapack/zgegv.c
@@ -0,0 +1,781 @@
+/* zgegv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static doublereal c_b29 = 1.;
+
+/* Subroutine */ int zgegv_(char *jobvl, char *jobvr, integer *n,
+ doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb,
+ doublecomplex *alpha, doublecomplex *beta, doublecomplex *vl, integer
+ *ldvl, doublecomplex *vr, integer *ldvr, doublecomplex *work, integer
+ *lwork, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vl_dim1, vl_offset, vr_dim1,
+ vr_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer jc, nb, in, jr, nb1, nb2, nb3, ihi, ilo;
+ doublereal eps;
+ logical ilv;
+ doublereal absb, anrm, bnrm;
+ integer itau;
+ doublereal temp;
+ logical ilvl, ilvr;
+ integer lopt;
+ doublereal anrm1, anrm2, bnrm1, bnrm2, absai, scale, absar, sbeta;
+ extern logical lsame_(char *, char *);
+ integer ileft, iinfo, icols, iwork, irows;
+ extern doublereal dlamch_(char *);
+ doublereal salfai;
+ extern /* Subroutine */ int zggbak_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublecomplex *,
+ integer *, integer *), zggbal_(char *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *
+, integer *, doublereal *, doublereal *, doublereal *, integer *);
+ doublereal salfar, safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal safmax;
+ char chtemp[1];
+ logical ldumma[1];
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ integer ijobvl, iright;
+ logical ilimit;
+ extern /* Subroutine */ int zgghrd_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *
+), zlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublecomplex *,
+ integer *, integer *);
+ integer ijobvr;
+ extern /* Subroutine */ int zgeqrf_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *, integer *
+);
+ integer lwkmin;
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *),
+ zlaset_(char *, integer *, integer *, doublecomplex *,
+ doublecomplex *, doublecomplex *, integer *), ztgevc_(
+ char *, char *, logical *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, integer *, integer *, doublecomplex *,
+ doublereal *, integer *), zhgeqz_(char *, char *,
+ char *, integer *, integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *, doublereal *, integer *);
+ integer irwork, lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int zungqr_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *), zunmqr_(char *, char *, integer *, integer
+ *, integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is deprecated and has been replaced by routine ZGGEV. */
+
+/* ZGEGV computes the eigenvalues and, optionally, the left and/or right */
+/* eigenvectors of a complex matrix pair (A,B). */
+/* Given two square matrices A and B, */
+/* the generalized nonsymmetric eigenvalue problem (GNEP) is to find the */
+/* eigenvalues lambda and corresponding (non-zero) eigenvectors x such */
+/* that */
+/* A*x = lambda*B*x. */
+
+/* An alternate form is to find the eigenvalues mu and corresponding */
+/* eigenvectors y such that */
+/* mu*A*y = B*y. */
+
+/* These two forms are equivalent with mu = 1/lambda and x = y if */
+/* neither lambda nor mu is zero. In order to deal with the case that */
+/* lambda or mu is zero or small, two values alpha and beta are returned */
+/* for each eigenvalue, such that lambda = alpha/beta and */
+/* mu = beta/alpha. */
+
+/* The vectors x and y in the above equations are right eigenvectors of */
+/* the matrix pair (A,B). Vectors u and v satisfying */
+/* u**H*A = lambda*u**H*B or mu*v**H*A = v**H*B */
+/* are left eigenvectors of (A,B). */
+
+/* Note: this routine performs "full balancing" on A and B -- see */
+/* "Further Details", below. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVL (input) CHARACTER*1 */
+/* = 'N': do not compute the left generalized eigenvectors; */
+/* = 'V': compute the left generalized eigenvectors (returned */
+/* in VL). */
+
+/* JOBVR (input) CHARACTER*1 */
+/* = 'N': do not compute the right generalized eigenvectors; */
+/* = 'V': compute the right generalized eigenvectors (returned */
+/* in VR). */
+
+/* N (input) INTEGER */
+/* The order of the matrices A, B, VL, and VR. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA, N) */
+/* On entry, the matrix A. */
+/* If JOBVL = 'V' or JOBVR = 'V', then on exit A */
+/* contains the Schur form of A from the generalized Schur */
+/* factorization of the pair (A,B) after balancing. If no */
+/* eigenvectors were computed, then only the diagonal elements */
+/* of the Schur form will be correct. See ZGGHRD and ZHGEQZ */
+/* for details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB, N) */
+/* On entry, the matrix B. */
+/* If JOBVL = 'V' or JOBVR = 'V', then on exit B contains the */
+/* upper triangular matrix obtained from B in the generalized */
+/* Schur factorization of the pair (A,B) after balancing. */
+/* If no eigenvectors were computed, then only the diagonal */
+/* elements of B will be correct. See ZGGHRD and ZHGEQZ for */
+/* details. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB >= max(1,N). */
+
+/* ALPHA (output) COMPLEX*16 array, dimension (N) */
+/* The complex scalars alpha that define the eigenvalues of */
+/* GNEP. */
+
+/* BETA (output) COMPLEX*16 array, dimension (N) */
+/* The complex scalars beta that define the eigenvalues of GNEP. */
+
+/* Together, the quantities alpha = ALPHA(j) and beta = BETA(j) */
+/* represent the j-th eigenvalue of the matrix pair (A,B), in */
+/* one of the forms lambda = alpha/beta or mu = beta/alpha. */
+/* Since either lambda or mu may overflow, they should not, */
+/* in general, be computed. */
+
+/* VL (output) COMPLEX*16 array, dimension (LDVL,N) */
+/* If JOBVL = 'V', the left eigenvectors u(j) are stored */
+/* in the columns of VL, in the same order as their eigenvalues. */
+/* Each eigenvector is scaled so that its largest component has */
+/* abs(real part) + abs(imag. part) = 1, except for eigenvectors */
+/* corresponding to an eigenvalue with alpha = beta = 0, which */
+/* are set to zero. */
+/* Not referenced if JOBVL = 'N'. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the matrix VL. LDVL >= 1, and */
+/* if JOBVL = 'V', LDVL >= N. */
+
+/* VR (output) COMPLEX*16 array, dimension (LDVR,N) */
+/* If JOBVR = 'V', the right eigenvectors x(j) are stored */
+/* in the columns of VR, in the same order as their eigenvalues. */
+/* Each eigenvector is scaled so that its largest component has */
+/* abs(real part) + abs(imag. part) = 1, except for eigenvectors */
+/* corresponding to an eigenvalue with alpha = beta = 0, which */
+/* are set to zero. */
+/* Not referenced if JOBVR = 'N'. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the matrix VR. LDVR >= 1, and */
+/* if JOBVR = 'V', LDVR >= N. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,2*N). */
+/* For good performance, LWORK must generally be larger. */
+/* To compute the optimal value of LWORK, call ILAENV to get */
+/* blocksizes (for ZGEQRF, ZUNMQR, and ZUNGQR.) Then compute: */
+/* NB -- MAX of the blocksizes for ZGEQRF, ZUNMQR, and ZUNGQR; */
+/* The optimal LWORK is MAX( 2*N, N*(NB+1) ). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace/output) DOUBLE PRECISION array, dimension (8*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* =1,...,N: */
+/* The QZ iteration failed. No eigenvectors have been */
+/* calculated, but ALPHA(j) and BETA(j) should be */
+/* correct for j=INFO+1,...,N. */
+/* > N: errors that usually indicate LAPACK problems: */
+/* =N+1: error return from ZGGBAL */
+/* =N+2: error return from ZGEQRF */
+/* =N+3: error return from ZUNMQR */
+/* =N+4: error return from ZUNGQR */
+/* =N+5: error return from ZGGHRD */
+/* =N+6: error return from ZHGEQZ (other than failed */
+/* iteration) */
+/* =N+7: error return from ZTGEVC */
+/* =N+8: error return from ZGGBAK (computing VL) */
+/* =N+9: error return from ZGGBAK (computing VR) */
+/* =N+10: error return from ZLASCL (various calls) */
+
+/* Further Details */
+/* =============== */
+
+/* Balancing */
+/* --------- */
+
+/* This driver calls ZGGBAL to both permute and scale rows and columns */
+/* of A and B. The permutations PL and PR are chosen so that PL*A*PR */
+/* and PL*B*R will be upper triangular except for the diagonal blocks */
+/* A(i:j,i:j) and B(i:j,i:j), with i and j as close together as */
+/* possible. The diagonal scaling matrices DL and DR are chosen so */
+/* that the pair DL*PL*A*PR*DR, DL*PL*B*PR*DR have elements close to */
+/* one (except for the elements that start out zero.) */
+
+/* After the eigenvalues and eigenvectors of the balanced matrices */
+/* have been computed, ZGGBAK transforms the eigenvectors back to what */
+/* they would have been (in perfect arithmetic) if they had not been */
+/* balanced. */
+
+/* Contents of A and B on Exit */
+/* -------- -- - --- - -- ---- */
+
+/* If any eigenvectors are computed (either JOBVL='V' or JOBVR='V' or */
+/* both), then on exit the arrays A and B will contain the complex Schur */
+/* form[*] of the "balanced" versions of A and B. If no eigenvectors */
+/* are computed, then only the diagonal blocks will be correct. */
+
+/* [*] In other words, upper triangular form. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alpha;
+ --beta;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ if (lsame_(jobvl, "N")) {
+ ijobvl = 1;
+ ilvl = FALSE_;
+ } else if (lsame_(jobvl, "V")) {
+ ijobvl = 2;
+ ilvl = TRUE_;
+ } else {
+ ijobvl = -1;
+ ilvl = FALSE_;
+ }
+
+ if (lsame_(jobvr, "N")) {
+ ijobvr = 1;
+ ilvr = FALSE_;
+ } else if (lsame_(jobvr, "V")) {
+ ijobvr = 2;
+ ilvr = TRUE_;
+ } else {
+ ijobvr = -1;
+ ilvr = FALSE_;
+ }
+ ilv = ilvl || ilvr;
+
+/* Test the input arguments */
+
+/* Computing MAX */
+ i__1 = *n << 1;
+ lwkmin = max(i__1,1);
+ lwkopt = lwkmin;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ lquery = *lwork == -1;
+ *info = 0;
+ if (ijobvl <= 0) {
+ *info = -1;
+ } else if (ijobvr <= 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ } else if (*ldvl < 1 || ilvl && *ldvl < *n) {
+ *info = -11;
+ } else if (*ldvr < 1 || ilvr && *ldvr < *n) {
+ *info = -13;
+ } else if (*lwork < lwkmin && ! lquery) {
+ *info = -15;
+ }
+
+ if (*info == 0) {
+ nb1 = ilaenv_(&c__1, "ZGEQRF", " ", n, n, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "ZUNMQR", " ", n, n, n, &c_n1);
+ nb3 = ilaenv_(&c__1, "ZUNGQR", " ", n, n, n, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2);
+ nb = max(i__1,nb3);
+/* Computing MAX */
+ i__1 = *n << 1, i__2 = *n * (nb + 1);
+ lopt = max(i__1,i__2);
+ work[1].r = (doublereal) lopt, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEGV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("E") * dlamch_("B");
+ safmin = dlamch_("S");
+ safmin += safmin;
+ safmax = 1. / safmin;
+
+/* Scale A */
+
+ anrm = zlange_("M", n, n, &a[a_offset], lda, &rwork[1]);
+ anrm1 = anrm;
+ anrm2 = 1.;
+ if (anrm < 1.) {
+ if (safmax * anrm < 1.) {
+ anrm1 = safmin;
+ anrm2 = safmax * anrm;
+ }
+ }
+
+ if (anrm > 0.) {
+ zlascl_("G", &c_n1, &c_n1, &anrm, &c_b29, n, n, &a[a_offset], lda, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 10;
+ return 0;
+ }
+ }
+
+/* Scale B */
+
+ bnrm = zlange_("M", n, n, &b[b_offset], ldb, &rwork[1]);
+ bnrm1 = bnrm;
+ bnrm2 = 1.;
+ if (bnrm < 1.) {
+ if (safmax * bnrm < 1.) {
+ bnrm1 = safmin;
+ bnrm2 = safmax * bnrm;
+ }
+ }
+
+ if (bnrm > 0.) {
+ zlascl_("G", &c_n1, &c_n1, &bnrm, &c_b29, n, n, &b[b_offset], ldb, &
+ iinfo);
+ if (iinfo != 0) {
+ *info = *n + 10;
+ return 0;
+ }
+ }
+
+/* Permute the matrix to make it more nearly triangular */
+/* Also "balance" the matrix. */
+
+ ileft = 1;
+ iright = *n + 1;
+ irwork = iright + *n;
+ zggbal_("P", n, &a[a_offset], lda, &b[b_offset], ldb, &ilo, &ihi, &rwork[
+ ileft], &rwork[iright], &rwork[irwork], &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 1;
+ goto L80;
+ }
+
+/* Reduce B to triangular form, and initialize VL and/or VR */
+
+ irows = ihi + 1 - ilo;
+ if (ilv) {
+ icols = *n + 1 - ilo;
+ } else {
+ icols = irows;
+ }
+ itau = 1;
+ iwork = itau + irows;
+ i__1 = *lwork + 1 - iwork;
+ zgeqrf_(&irows, &icols, &b[ilo + ilo * b_dim1], ldb, &work[itau], &work[
+ iwork], &i__1, &iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__3 = iwork;
+ i__1 = lwkopt, i__2 = (integer) work[i__3].r + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ *info = *n + 2;
+ goto L80;
+ }
+
+ i__1 = *lwork + 1 - iwork;
+ zunmqr_("L", "C", &irows, &icols, &irows, &b[ilo + ilo * b_dim1], ldb, &
+ work[itau], &a[ilo + ilo * a_dim1], lda, &work[iwork], &i__1, &
+ iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__3 = iwork;
+ i__1 = lwkopt, i__2 = (integer) work[i__3].r + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ *info = *n + 3;
+ goto L80;
+ }
+
+ if (ilvl) {
+ zlaset_("Full", n, n, &c_b1, &c_b2, &vl[vl_offset], ldvl);
+ i__1 = irows - 1;
+ i__2 = irows - 1;
+ zlacpy_("L", &i__1, &i__2, &b[ilo + 1 + ilo * b_dim1], ldb, &vl[ilo +
+ 1 + ilo * vl_dim1], ldvl);
+ i__1 = *lwork + 1 - iwork;
+ zungqr_(&irows, &irows, &irows, &vl[ilo + ilo * vl_dim1], ldvl, &work[
+ itau], &work[iwork], &i__1, &iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__3 = iwork;
+ i__1 = lwkopt, i__2 = (integer) work[i__3].r + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ *info = *n + 4;
+ goto L80;
+ }
+ }
+
+ if (ilvr) {
+ zlaset_("Full", n, n, &c_b1, &c_b2, &vr[vr_offset], ldvr);
+ }
+
+/* Reduce to generalized Hessenberg form */
+
+ if (ilv) {
+
+/* Eigenvectors requested -- work on whole matrix. */
+
+ zgghrd_(jobvl, jobvr, n, &ilo, &ihi, &a[a_offset], lda, &b[b_offset],
+ ldb, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, &iinfo);
+ } else {
+ zgghrd_("N", "N", &irows, &c__1, &irows, &a[ilo + ilo * a_dim1], lda,
+ &b[ilo + ilo * b_dim1], ldb, &vl[vl_offset], ldvl, &vr[
+ vr_offset], ldvr, &iinfo);
+ }
+ if (iinfo != 0) {
+ *info = *n + 5;
+ goto L80;
+ }
+
+/* Perform QZ algorithm */
+
+ iwork = itau;
+ if (ilv) {
+ *(unsigned char *)chtemp = 'S';
+ } else {
+ *(unsigned char *)chtemp = 'E';
+ }
+ i__1 = *lwork + 1 - iwork;
+ zhgeqz_(chtemp, jobvl, jobvr, n, &ilo, &ihi, &a[a_offset], lda, &b[
+ b_offset], ldb, &alpha[1], &beta[1], &vl[vl_offset], ldvl, &vr[
+ vr_offset], ldvr, &work[iwork], &i__1, &rwork[irwork], &iinfo);
+ if (iinfo >= 0) {
+/* Computing MAX */
+ i__3 = iwork;
+ i__1 = lwkopt, i__2 = (integer) work[i__3].r + iwork - 1;
+ lwkopt = max(i__1,i__2);
+ }
+ if (iinfo != 0) {
+ if (iinfo > 0 && iinfo <= *n) {
+ *info = iinfo;
+ } else if (iinfo > *n && iinfo <= *n << 1) {
+ *info = iinfo - *n;
+ } else {
+ *info = *n + 6;
+ }
+ goto L80;
+ }
+
+ if (ilv) {
+
+/* Compute Eigenvectors */
+
+ if (ilvl) {
+ if (ilvr) {
+ *(unsigned char *)chtemp = 'B';
+ } else {
+ *(unsigned char *)chtemp = 'L';
+ }
+ } else {
+ *(unsigned char *)chtemp = 'R';
+ }
+
+ ztgevc_(chtemp, "B", ldumma, n, &a[a_offset], lda, &b[b_offset], ldb,
+ &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, n, &in, &work[
+ iwork], &rwork[irwork], &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 7;
+ goto L80;
+ }
+
+/* Undo balancing on VL and VR, rescale */
+
+ if (ilvl) {
+ zggbak_("P", "L", n, &ilo, &ihi, &rwork[ileft], &rwork[iright], n,
+ &vl[vl_offset], ldvl, &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 8;
+ goto L80;
+ }
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ temp = 0.;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ i__3 = jr + jc * vl_dim1;
+ d__3 = temp, d__4 = (d__1 = vl[i__3].r, abs(d__1)) + (
+ d__2 = d_imag(&vl[jr + jc * vl_dim1]), abs(d__2));
+ temp = max(d__3,d__4);
+/* L10: */
+ }
+ if (temp < safmin) {
+ goto L30;
+ }
+ temp = 1. / temp;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ i__3 = jr + jc * vl_dim1;
+ i__4 = jr + jc * vl_dim1;
+ z__1.r = temp * vl[i__4].r, z__1.i = temp * vl[i__4].i;
+ vl[i__3].r = z__1.r, vl[i__3].i = z__1.i;
+/* L20: */
+ }
+L30:
+ ;
+ }
+ }
+ if (ilvr) {
+ zggbak_("P", "R", n, &ilo, &ihi, &rwork[ileft], &rwork[iright], n,
+ &vr[vr_offset], ldvr, &iinfo);
+ if (iinfo != 0) {
+ *info = *n + 9;
+ goto L80;
+ }
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ temp = 0.;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ i__3 = jr + jc * vr_dim1;
+ d__3 = temp, d__4 = (d__1 = vr[i__3].r, abs(d__1)) + (
+ d__2 = d_imag(&vr[jr + jc * vr_dim1]), abs(d__2));
+ temp = max(d__3,d__4);
+/* L40: */
+ }
+ if (temp < safmin) {
+ goto L60;
+ }
+ temp = 1. / temp;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ i__3 = jr + jc * vr_dim1;
+ i__4 = jr + jc * vr_dim1;
+ z__1.r = temp * vr[i__4].r, z__1.i = temp * vr[i__4].i;
+ vr[i__3].r = z__1.r, vr[i__3].i = z__1.i;
+/* L50: */
+ }
+L60:
+ ;
+ }
+ }
+
+/* End of eigenvector calculation */
+
+ }
+
+/* Undo scaling in alpha, beta */
+
+/* Note: this does not give the alpha and beta for the unscaled */
+/* problem. */
+
+/* Un-scaling is limited to avoid underflow in alpha and beta */
+/* if they are significant. */
+
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ i__2 = jc;
+ absar = (d__1 = alpha[i__2].r, abs(d__1));
+ absai = (d__1 = d_imag(&alpha[jc]), abs(d__1));
+ i__2 = jc;
+ absb = (d__1 = beta[i__2].r, abs(d__1));
+ i__2 = jc;
+ salfar = anrm * alpha[i__2].r;
+ salfai = anrm * d_imag(&alpha[jc]);
+ i__2 = jc;
+ sbeta = bnrm * beta[i__2].r;
+ ilimit = FALSE_;
+ scale = 1.;
+
+/* Check for significant underflow in imaginary part of ALPHA */
+
+/* Computing MAX */
+ d__1 = safmin, d__2 = eps * absar, d__1 = max(d__1,d__2), d__2 = eps *
+ absb;
+ if (abs(salfai) < safmin && absai >= max(d__1,d__2)) {
+ ilimit = TRUE_;
+/* Computing MAX */
+ d__1 = safmin, d__2 = anrm2 * absai;
+ scale = safmin / anrm1 / max(d__1,d__2);
+ }
+
+/* Check for significant underflow in real part of ALPHA */
+
+/* Computing MAX */
+ d__1 = safmin, d__2 = eps * absai, d__1 = max(d__1,d__2), d__2 = eps *
+ absb;
+ if (abs(salfar) < safmin && absar >= max(d__1,d__2)) {
+ ilimit = TRUE_;
+/* Computing MAX */
+/* Computing MAX */
+ d__3 = safmin, d__4 = anrm2 * absar;
+ d__1 = scale, d__2 = safmin / anrm1 / max(d__3,d__4);
+ scale = max(d__1,d__2);
+ }
+
+/* Check for significant underflow in BETA */
+
+/* Computing MAX */
+ d__1 = safmin, d__2 = eps * absar, d__1 = max(d__1,d__2), d__2 = eps *
+ absai;
+ if (abs(sbeta) < safmin && absb >= max(d__1,d__2)) {
+ ilimit = TRUE_;
+/* Computing MAX */
+/* Computing MAX */
+ d__3 = safmin, d__4 = bnrm2 * absb;
+ d__1 = scale, d__2 = safmin / bnrm1 / max(d__3,d__4);
+ scale = max(d__1,d__2);
+ }
+
+/* Check for possible overflow when limiting scaling */
+
+ if (ilimit) {
+/* Computing MAX */
+ d__1 = abs(salfar), d__2 = abs(salfai), d__1 = max(d__1,d__2),
+ d__2 = abs(sbeta);
+ temp = scale * safmin * max(d__1,d__2);
+ if (temp > 1.) {
+ scale /= temp;
+ }
+ if (scale < 1.) {
+ ilimit = FALSE_;
+ }
+ }
+
+/* Recompute un-scaled ALPHA, BETA if necessary. */
+
+ if (ilimit) {
+ i__2 = jc;
+ salfar = scale * alpha[i__2].r * anrm;
+ salfai = scale * d_imag(&alpha[jc]) * anrm;
+ i__2 = jc;
+ z__2.r = scale * beta[i__2].r, z__2.i = scale * beta[i__2].i;
+ z__1.r = bnrm * z__2.r, z__1.i = bnrm * z__2.i;
+ sbeta = z__1.r;
+ }
+ i__2 = jc;
+ z__1.r = salfar, z__1.i = salfai;
+ alpha[i__2].r = z__1.r, alpha[i__2].i = z__1.i;
+ i__2 = jc;
+ beta[i__2].r = sbeta, beta[i__2].i = 0.;
+/* L70: */
+ }
+
+L80:
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZGEGV */
+
+} /* zgegv_ */
diff --git a/contrib/libs/clapack/zgehd2.c b/contrib/libs/clapack/zgehd2.c
new file mode 100644
index 0000000000..af1ba28f34
--- /dev/null
+++ b/contrib/libs/clapack/zgehd2.c
@@ -0,0 +1,199 @@
+/* zgehd2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zgehd2_(integer *n, integer *ilo, integer *ihi,
+ doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *
+ work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__;
+ doublecomplex alpha;
+ extern /* Subroutine */ int zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), xerbla_(char *, integer *), zlarfg_(integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGEHD2 reduces a complex general matrix A to upper Hessenberg form H */
+/* by a unitary similarity transformation: Q' * A * Q = H . */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* It is assumed that A is already upper triangular in rows */
+/* and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally */
+/* set by a previous call to ZGEBAL; otherwise they should be */
+/* set to 1 and N respectively. See Further Details. */
+/* 1 <= ILO <= IHI <= max(1,N). */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the n by n general matrix to be reduced. */
+/* On exit, the upper triangle and the first subdiagonal of A */
+/* are overwritten with the upper Hessenberg matrix H, and the */
+/* elements below the first subdiagonal, with the array TAU, */
+/* represent the unitary matrix Q as a product of elementary */
+/* reflectors. See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAU (output) COMPLEX*16 array, dimension (N-1) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of (ihi-ilo) elementary */
+/* reflectors */
+
+/* Q = H(ilo) H(ilo+1) . . . H(ihi-1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on */
+/* exit in A(i+2:ihi,i), and tau in TAU(i). */
+
+/* The contents of A are illustrated by the following example, with */
+/* n = 7, ilo = 2 and ihi = 6: */
+
+/* on entry, on exit, */
+
+/* ( a a a a a a a ) ( a a h h h h a ) */
+/* ( a a a a a a ) ( a h h h h a ) */
+/* ( a a a a a a ) ( h h h h h h ) */
+/* ( a a a a a a ) ( v2 h h h h h ) */
+/* ( a a a a a a ) ( v2 v3 h h h h ) */
+/* ( a a a a a a ) ( v2 v3 v4 h h h ) */
+/* ( a ) ( a ) */
+
+/* where a denotes an element of the original matrix A, h denotes a */
+/* modified element of the upper Hessenberg matrix H, and vi denotes an */
+/* element of the vector defining H(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -2;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEHD2", &i__1);
+ return 0;
+ }
+
+ i__1 = *ihi - 1;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+
+/* Compute elementary reflector H(i) to annihilate A(i+2:ihi,i) */
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *ihi - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ zlarfg_(&i__2, &alpha, &a[min(i__3, *n)+ i__ * a_dim1], &c__1, &tau[
+ i__]);
+ i__2 = i__ + 1 + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Apply H(i) to A(1:ihi,i+1:ihi) from the right */
+
+ i__2 = *ihi - i__;
+ zlarf_("Right", ihi, &i__2, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
+ i__], &a[(i__ + 1) * a_dim1 + 1], lda, &work[1]);
+
+/* Apply H(i)' to A(i+1:ihi,i+1:n) from the left */
+
+ i__2 = *ihi - i__;
+ i__3 = *n - i__;
+ d_cnjg(&z__1, &tau[i__]);
+ zlarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &c__1, &z__1,
+ &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &work[1]);
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ a[i__2].r = alpha.r, a[i__2].i = alpha.i;
+/* L10: */
+ }
+
+ return 0;
+
+/* End of ZGEHD2 */
+
+} /* zgehd2_ */
diff --git a/contrib/libs/clapack/zgehrd.c b/contrib/libs/clapack/zgehrd.c
new file mode 100644
index 0000000000..0a84ef60a8
--- /dev/null
+++ b/contrib/libs/clapack/zgehrd.c
@@ -0,0 +1,353 @@
+/* zgehrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+static integer c__65 = 65;
+
+/* Subroutine */ int zgehrd_(integer *n, integer *ilo, integer *ihi,
+ doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *
+ work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j;
+ doublecomplex t[4160] /* was [65][64] */;
+ integer ib;
+ doublecomplex ei;
+ integer nb, nh, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), ztrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *
+, doublecomplex *, integer *),
+ zaxpy_(integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zgehd2_(integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *), zlahr2_(integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), xerbla_(
+ char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGEHRD reduces a complex general matrix A to upper Hessenberg form H by */
+/* an unitary similarity transformation: Q' * A * Q = H . */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* It is assumed that A is already upper triangular in rows */
+/* and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally */
+/* set by a previous call to ZGEBAL; otherwise they should be */
+/* set to 1 and N respectively. See Further Details. */
+/* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the N-by-N general matrix to be reduced. */
+/* On exit, the upper triangle and the first subdiagonal of A */
+/* are overwritten with the upper Hessenberg matrix H, and the */
+/* elements below the first subdiagonal, with the array TAU, */
+/* represent the unitary matrix Q as a product of elementary */
+/* reflectors. See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAU (output) COMPLEX*16 array, dimension (N-1) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). Elements 1:ILO-1 and IHI:N-1 of TAU are set to */
+/* zero. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= max(1,N). */
+/* For optimum performance LWORK >= N*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of (ihi-ilo) elementary */
+/* reflectors */
+
+/* Q = H(ilo) H(ilo+1) . . . H(ihi-1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on */
+/* exit in A(i+2:ihi,i), and tau in TAU(i). */
+
+/* The contents of A are illustrated by the following example, with */
+/* n = 7, ilo = 2 and ihi = 6: */
+
+/* on entry, on exit, */
+
+/* ( a a a a a a a ) ( a a h h h h a ) */
+/* ( a a a a a a ) ( a h h h h a ) */
+/* ( a a a a a a ) ( h h h h h h ) */
+/* ( a a a a a a ) ( v2 h h h h h ) */
+/* ( a a a a a a ) ( v2 v3 h h h h ) */
+/* ( a a a a a a ) ( v2 v3 v4 h h h ) */
+/* ( a ) ( a ) */
+
+/* where a denotes an element of the original matrix A, h denotes a */
+/* modified element of the upper Hessenberg matrix H, and vi denotes an */
+/* element of the vector defining H(i). */
+
+/* This file is a slight modification of LAPACK-3.0's ZGEHRD */
+/* subroutine incorporating improvements proposed by Quintana-Orti and */
+/* Van de Geijn (2005). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+/* Computing MIN */
+ i__1 = 64, i__2 = ilaenv_(&c__1, "ZGEHRD", " ", n, ilo, ihi, &c_n1);
+ nb = min(i__1,i__2);
+ lwkopt = *n * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -2;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEHRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Set elements 1:ILO-1 and IHI:N-1 of TAU to zero */
+
+ i__1 = *ilo - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ tau[i__2].r = 0., tau[i__2].i = 0.;
+/* L10: */
+ }
+ i__1 = *n - 1;
+ for (i__ = max(1,*ihi); i__ <= i__1; ++i__) {
+ i__2 = i__;
+ tau[i__2].r = 0., tau[i__2].i = 0.;
+/* L20: */
+ }
+
+/* Quick return if possible */
+
+ nh = *ihi - *ilo + 1;
+ if (nh <= 1) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+/* Determine the block size */
+
+/* Computing MIN */
+ i__1 = 64, i__2 = ilaenv_(&c__1, "ZGEHRD", " ", n, ilo, ihi, &c_n1);
+ nb = min(i__1,i__2);
+ nbmin = 2;
+ iws = 1;
+ if (nb > 1 && nb < nh) {
+
+/* Determine when to cross over from blocked to unblocked code */
+/* (last block is always handled by unblocked code) */
+
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__3, "ZGEHRD", " ", n, ilo, ihi, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < nh) {
+
+/* Determine if workspace is large enough for blocked code */
+
+ iws = *n * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: determine the */
+/* minimum value of NB, and reduce NB or force use of */
+/* unblocked code */
+
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZGEHRD", " ", n, ilo, ihi, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ if (*lwork >= *n * nbmin) {
+ nb = *lwork / *n;
+ } else {
+ nb = 1;
+ }
+ }
+ }
+ }
+ ldwork = *n;
+
+ if (nb < nbmin || nb >= nh) {
+
+/* Use unblocked code below */
+
+ i__ = *ilo;
+
+ } else {
+
+/* Use blocked code */
+
+ i__1 = *ihi - 1 - nx;
+ i__2 = nb;
+ for (i__ = *ilo; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *ihi - i__;
+ ib = min(i__3,i__4);
+
+/* Reduce columns i:i+ib-1 to Hessenberg form, returning the */
+/* matrices V and T of the block reflector H = I - V*T*V' */
+/* which performs the reduction, and also the matrix Y = A*V*T */
+
+ zlahr2_(ihi, &i__, &ib, &a[i__ * a_dim1 + 1], lda, &tau[i__], t, &
+ c__65, &work[1], &ldwork);
+
+/* Apply the block reflector H to A(1:ihi,i+ib:ihi) from the */
+/* right, computing A := A - Y * V'. V(i+ib,ib-1) must be set */
+/* to 1 */
+
+ i__3 = i__ + ib + (i__ + ib - 1) * a_dim1;
+ ei.r = a[i__3].r, ei.i = a[i__3].i;
+ i__3 = i__ + ib + (i__ + ib - 1) * a_dim1;
+ a[i__3].r = 1., a[i__3].i = 0.;
+ i__3 = *ihi - i__ - ib + 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "Conjugate transpose", ihi, &i__3, &ib, &
+ z__1, &work[1], &ldwork, &a[i__ + ib + i__ * a_dim1], lda,
+ &c_b2, &a[(i__ + ib) * a_dim1 + 1], lda);
+ i__3 = i__ + ib + (i__ + ib - 1) * a_dim1;
+ a[i__3].r = ei.r, a[i__3].i = ei.i;
+
+/* Apply the block reflector H to A(1:i,i+1:i+ib-1) from the */
+/* right */
+
+ i__3 = ib - 1;
+ ztrmm_("Right", "Lower", "Conjugate transpose", "Unit", &i__, &
+ i__3, &c_b2, &a[i__ + 1 + i__ * a_dim1], lda, &work[1], &
+ ldwork);
+ i__3 = ib - 2;
+ for (j = 0; j <= i__3; ++j) {
+ z__1.r = -1., z__1.i = -0.;
+ zaxpy_(&i__, &z__1, &work[ldwork * j + 1], &c__1, &a[(i__ + j
+ + 1) * a_dim1 + 1], &c__1);
+/* L30: */
+ }
+
+/* Apply the block reflector H to A(i+1:ihi,i+ib:n) from the */
+/* left */
+
+ i__3 = *ihi - i__;
+ i__4 = *n - i__ - ib + 1;
+ zlarfb_("Left", "Conjugate transpose", "Forward", "Columnwise", &
+ i__3, &i__4, &ib, &a[i__ + 1 + i__ * a_dim1], lda, t, &
+ c__65, &a[i__ + 1 + (i__ + ib) * a_dim1], lda, &work[1], &
+ ldwork);
+/* L40: */
+ }
+ }
+
+/* Use unblocked code to reduce the rest of the matrix */
+
+ zgehd2_(n, &i__, ihi, &a[a_offset], lda, &tau[1], &work[1], &iinfo);
+ work[1].r = (doublereal) iws, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZGEHRD */
+
+} /* zgehrd_ */
diff --git a/contrib/libs/clapack/zgelq2.c b/contrib/libs/clapack/zgelq2.c
new file mode 100644
index 0000000000..30df244446
--- /dev/null
+++ b/contrib/libs/clapack/zgelq2.c
@@ -0,0 +1,165 @@
+/* zgelq2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zgelq2_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, doublecomplex *tau, doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, k;
+ doublecomplex alpha;
+ extern /* Subroutine */ int zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), xerbla_(char *, integer *), zlacgv_(integer *, doublecomplex *, integer *), zlarfp_(
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGELQ2 computes an LQ factorization of a complex m by n matrix A: */
+/* A = L * Q. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the m by n matrix A. */
+/* On exit, the elements on and below the diagonal of the array */
+/* contain the m by min(m,n) lower trapezoidal matrix L (L is */
+/* lower triangular if m <= n); the elements above the diagonal, */
+/* with the array TAU, represent the unitary matrix Q as a */
+/* product of elementary reflectors (see Further Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) COMPLEX*16 array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (M) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(k)' . . . H(2)' H(1)', where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i-1) = 0 and v(i) = 1; conjg(v(i+1:n)) is stored on exit in */
+/* A(i,i+1:n), and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGELQ2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ i__1 = k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i,i+1:n) */
+
+ i__2 = *n - i__ + 1;
+ zlacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+ i__2 = i__ + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ zlarfp_(&i__2, &alpha, &a[i__ + min(i__3, *n)* a_dim1], lda, &tau[i__]
+);
+ if (i__ < *m) {
+
+/* Apply H(i) to A(i+1:m,i:n) from the right */
+
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+ zlarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[
+ i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
+ }
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = alpha.r, a[i__2].i = alpha.i;
+ i__2 = *n - i__ + 1;
+ zlacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+/* L10: */
+ }
+ return 0;
+
+/* End of ZGELQ2 */
+
+} /* zgelq2_ */
diff --git a/contrib/libs/clapack/zgelqf.c b/contrib/libs/clapack/zgelqf.c
new file mode 100644
index 0000000000..36c16dad25
--- /dev/null
+++ b/contrib/libs/clapack/zgelqf.c
@@ -0,0 +1,257 @@
+/* zgelqf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int zgelqf_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, doublecomplex *tau, doublecomplex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, k, ib, nb, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int zgelq2_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *), xerbla_(
+ char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ integer ldwork;
+ extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGELQF computes an LQ factorization of a complex M-by-N matrix A: */
+/* A = L * Q. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the elements on and below the diagonal of the array */
+/* contain the m-by-min(m,n) lower trapezoidal matrix L (L is */
+/* lower triangular if m <= n); the elements above the diagonal, */
+/* with the array TAU, represent the unitary matrix Q as a */
+/* product of elementary reflectors (see Further Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) COMPLEX*16 array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,M). */
+/* For optimum performance LWORK >= M*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(k)' . . . H(2)' H(1)', where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i-1) = 0 and v(i) = 1; conjg(v(i+1:n)) is stored on exit in */
+/* A(i,i+1:n), and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "ZGELQF", " ", m, n, &c_n1, &c_n1);
+ lwkopt = *m * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else if (*lwork < max(1,*m) && ! lquery) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGELQF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ k = min(*m,*n);
+ if (k == 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *m;
+ if (nb > 1 && nb < k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "ZGELQF", " ", m, n, &c_n1, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZGELQF", " ", m, n, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < k && nx < k) {
+
+/* Use blocked code initially */
+
+ i__1 = k - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/* Compute the LQ factorization of the current block */
+/* A(i:i+ib-1,i:n) */
+
+ i__3 = *n - i__ + 1;
+ zgelq2_(&ib, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[
+ 1], &iinfo);
+ if (i__ + ib <= *m) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i) H(i+1) . . . H(i+ib-1) */
+
+ i__3 = *n - i__ + 1;
+ zlarft_("Forward", "Rowwise", &i__3, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(i+ib:m,i:n) from the right */
+
+ i__3 = *m - i__ - ib + 1;
+ i__4 = *n - i__ + 1;
+ zlarfb_("Right", "No transpose", "Forward", "Rowwise", &i__3,
+ &i__4, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], &
+ ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[ib +
+ 1], &ldwork);
+ }
+/* L10: */
+ }
+ } else {
+ i__ = 1;
+ }
+
+/* Use unblocked code to factor the last or only block. */
+
+ if (i__ <= k) {
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ zgelq2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1]
+, &iinfo);
+ }
+
+ work[1].r = (doublereal) iws, work[1].i = 0.;
+ return 0;
+
+/* End of ZGELQF */
+
+} /* zgelqf_ */
diff --git a/contrib/libs/clapack/zgels.c b/contrib/libs/clapack/zgels.c
new file mode 100644
index 0000000000..dac9e32265
--- /dev/null
+++ b/contrib/libs/clapack/zgels.c
@@ -0,0 +1,520 @@
+/* zgels.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__0 = 0;
+
+/* Subroutine */ int zgels_(char *trans, integer *m, integer *n, integer *
+ nrhs, doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb,
+ doublecomplex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__, j, nb, mn;
+ doublereal anrm, bnrm;
+ integer brow;
+ logical tpsd;
+ integer iascl, ibscl;
+ extern logical lsame_(char *, char *);
+ integer wsize;
+ doublereal rwork[1];
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer scllen;
+ doublereal bignum;
+ extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ extern /* Subroutine */ int zgelqf_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *, integer *
+), zlascl_(char *, integer *, integer *, doublereal *, doublereal
+ *, integer *, integer *, doublecomplex *, integer *, integer *), zgeqrf_(integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, doublecomplex *, integer *, integer *), zlaset_(
+ char *, integer *, integer *, doublecomplex *, doublecomplex *,
+ doublecomplex *, integer *);
+ doublereal smlnum;
+ logical lquery;
+ extern /* Subroutine */ int zunmlq_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *), zunmqr_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *), ztrtrs_(char *, char *, char *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *,
+ integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGELS solves overdetermined or underdetermined complex linear systems */
+/* involving an M-by-N matrix A, or its conjugate-transpose, using a QR */
+/* or LQ factorization of A. It is assumed that A has full rank. */
+
+/* The following options are provided: */
+
+/* 1. If TRANS = 'N' and m >= n: find the least squares solution of */
+/* an overdetermined system, i.e., solve the least squares problem */
+/* minimize || B - A*X ||. */
+
+/* 2. If TRANS = 'N' and m < n: find the minimum norm solution of */
+/* an underdetermined system A * X = B. */
+
+/* 3. If TRANS = 'C' and m >= n: find the minimum norm solution of */
+/* an undetermined system A**H * X = B. */
+
+/* 4. If TRANS = 'C' and m < n: find the least squares solution of */
+/* an overdetermined system, i.e., solve the least squares problem */
+/* minimize || B - A**H * X ||. */
+
+/* Several right hand side vectors b and solution vectors x can be */
+/* handled in a single call; they are stored as the columns of the */
+/* M-by-NRHS right hand side matrix B and the N-by-NRHS solution */
+/* matrix X. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': the linear system involves A; */
+/* = 'C': the linear system involves A**H. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of */
+/* columns of the matrices B and X. NRHS >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* if M >= N, A is overwritten by details of its QR */
+/* factorization as returned by ZGEQRF; */
+/* if M < N, A is overwritten by details of its LQ */
+/* factorization as returned by ZGELQF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the matrix B of right hand side vectors, stored */
+/* columnwise; B is M-by-NRHS if TRANS = 'N', or N-by-NRHS */
+/* if TRANS = 'C'. */
+/* On exit, if INFO = 0, B is overwritten by the solution */
+/* vectors, stored columnwise: */
+/* if TRANS = 'N' and m >= n, rows 1 to n of B contain the least */
+/* squares solution vectors; the residual sum of squares for the */
+/* solution in each column is given by the sum of squares of the */
+/* modulus of elements N+1 to M in that column; */
+/* if TRANS = 'N' and m < n, rows 1 to N of B contain the */
+/* minimum norm solution vectors; */
+/* if TRANS = 'C' and m >= n, rows 1 to M of B contain the */
+/* minimum norm solution vectors; */
+/* if TRANS = 'C' and m < n, rows 1 to M of B contain the */
+/* least squares solution vectors; the residual sum of squares */
+/* for the solution in each column is given by the sum of */
+/* squares of the modulus of elements M+1 to N in that column. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= MAX(1,M,N). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* LWORK >= max( 1, MN + max( MN, NRHS ) ). */
+/* For optimal performance, */
+/* LWORK >= max( 1, MN + max( MN, NRHS )*NB ). */
+/* where MN = min(M,N) and NB is the optimum block size. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element of the */
+/* triangular factor of A is zero, so that A does not have */
+/* full rank; the least squares solution could not be */
+/* computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ mn = min(*m,*n);
+ lquery = *lwork == -1;
+ if (! (lsame_(trans, "N") || lsame_(trans, "C"))) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*m)) {
+ *info = -6;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*m);
+ if (*ldb < max(i__1,*n)) {
+ *info = -8;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = 1, i__2 = mn + max(mn,*nrhs);
+ if (*lwork < max(i__1,i__2) && ! lquery) {
+ *info = -10;
+ }
+ }
+ }
+
+/* Figure out optimal block size */
+
+ if (*info == 0 || *info == -10) {
+
+ tpsd = TRUE_;
+ if (lsame_(trans, "N")) {
+ tpsd = FALSE_;
+ }
+
+ if (*m >= *n) {
+ nb = ilaenv_(&c__1, "ZGEQRF", " ", m, n, &c_n1, &c_n1);
+ if (tpsd) {
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__1, "ZUNMQR", "LN", m, nrhs, n, &
+ c_n1);
+ nb = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__1, "ZUNMQR", "LC", m, nrhs, n, &
+ c_n1);
+ nb = max(i__1,i__2);
+ }
+ } else {
+ nb = ilaenv_(&c__1, "ZGELQF", " ", m, n, &c_n1, &c_n1);
+ if (tpsd) {
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__1, "ZUNMLQ", "LC", n, nrhs, m, &
+ c_n1);
+ nb = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__1, "ZUNMLQ", "LN", n, nrhs, m, &
+ c_n1);
+ nb = max(i__1,i__2);
+ }
+ }
+
+/* Computing MAX */
+ i__1 = 1, i__2 = mn + max(mn,*nrhs) * nb;
+ wsize = max(i__1,i__2);
+ d__1 = (doublereal) wsize;
+ work[1].r = d__1, work[1].i = 0.;
+
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGELS ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+/* Computing MIN */
+ i__1 = min(*m,*n);
+ if (min(i__1,*nrhs) == 0) {
+ i__1 = max(*m,*n);
+ zlaset_("Full", &i__1, nrhs, &c_b1, &c_b1, &b[b_offset], ldb);
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ smlnum = dlamch_("S") / dlamch_("P");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+
+/* Scale A, B if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = zlange_("M", m, n, &a[a_offset], lda, rwork);
+ iascl = 0;
+ if (anrm > 0. && anrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ zlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ zlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ zlaset_("F", &i__1, nrhs, &c_b1, &c_b1, &b[b_offset], ldb);
+ goto L50;
+ }
+
+ brow = *m;
+ if (tpsd) {
+ brow = *n;
+ }
+ bnrm = zlange_("M", &brow, nrhs, &b[b_offset], ldb, rwork);
+ ibscl = 0;
+ if (bnrm > 0. && bnrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ zlascl_("G", &c__0, &c__0, &bnrm, &smlnum, &brow, nrhs, &b[b_offset],
+ ldb, info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ zlascl_("G", &c__0, &c__0, &bnrm, &bignum, &brow, nrhs, &b[b_offset],
+ ldb, info);
+ ibscl = 2;
+ }
+
+ if (*m >= *n) {
+
+/* compute QR factorization of A */
+
+ i__1 = *lwork - mn;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[1], &work[mn + 1], &i__1, info)
+ ;
+
+/* workspace at least N, optimally N*NB */
+
+ if (! tpsd) {
+
+/* Least-Squares Problem min || A * X - B || */
+
+/* B(1:M,1:NRHS) := Q' * B(1:M,1:NRHS) */
+
+ i__1 = *lwork - mn;
+ zunmqr_("Left", "Conjugate transpose", m, nrhs, n, &a[a_offset],
+ lda, &work[1], &b[b_offset], ldb, &work[mn + 1], &i__1,
+ info);
+
+/* workspace at least NRHS, optimally NRHS*NB */
+
+/* B(1:N,1:NRHS) := inv(R) * B(1:N,1:NRHS) */
+
+ ztrtrs_("Upper", "No transpose", "Non-unit", n, nrhs, &a[a_offset]
+, lda, &b[b_offset], ldb, info);
+
+ if (*info > 0) {
+ return 0;
+ }
+
+ scllen = *n;
+
+ } else {
+
+/* Overdetermined system of equations A' * X = B */
+
+/* B(1:N,1:NRHS) := inv(R') * B(1:N,1:NRHS) */
+
+ ztrtrs_("Upper", "Conjugate transpose", "Non-unit", n, nrhs, &a[
+ a_offset], lda, &b[b_offset], ldb, info);
+
+ if (*info > 0) {
+ return 0;
+ }
+
+/* B(N+1:M,1:NRHS) = ZERO */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = *n + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0., b[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* B(1:M,1:NRHS) := Q(1:N,:) * B(1:N,1:NRHS) */
+
+ i__1 = *lwork - mn;
+ zunmqr_("Left", "No transpose", m, nrhs, n, &a[a_offset], lda, &
+ work[1], &b[b_offset], ldb, &work[mn + 1], &i__1, info);
+
+/* workspace at least NRHS, optimally NRHS*NB */
+
+ scllen = *m;
+
+ }
+
+ } else {
+
+/* Compute LQ factorization of A */
+
+ i__1 = *lwork - mn;
+ zgelqf_(m, n, &a[a_offset], lda, &work[1], &work[mn + 1], &i__1, info)
+ ;
+
+/* workspace at least M, optimally M*NB. */
+
+ if (! tpsd) {
+
+/* underdetermined system of equations A * X = B */
+
+/* B(1:M,1:NRHS) := inv(L) * B(1:M,1:NRHS) */
+
+ ztrtrs_("Lower", "No transpose", "Non-unit", m, nrhs, &a[a_offset]
+, lda, &b[b_offset], ldb, info);
+
+ if (*info > 0) {
+ return 0;
+ }
+
+/* B(M+1:N,1:NRHS) = 0 */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = *m + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0., b[i__3].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* B(1:N,1:NRHS) := Q(1:N,:)' * B(1:M,1:NRHS) */
+
+ i__1 = *lwork - mn;
+ zunmlq_("Left", "Conjugate transpose", n, nrhs, m, &a[a_offset],
+ lda, &work[1], &b[b_offset], ldb, &work[mn + 1], &i__1,
+ info);
+
+/* workspace at least NRHS, optimally NRHS*NB */
+
+ scllen = *n;
+
+ } else {
+
+/* overdetermined system min || A' * X - B || */
+
+/* B(1:N,1:NRHS) := Q * B(1:N,1:NRHS) */
+
+ i__1 = *lwork - mn;
+ zunmlq_("Left", "No transpose", n, nrhs, m, &a[a_offset], lda, &
+ work[1], &b[b_offset], ldb, &work[mn + 1], &i__1, info);
+
+/* workspace at least NRHS, optimally NRHS*NB */
+
+/* B(1:M,1:NRHS) := inv(L') * B(1:M,1:NRHS) */
+
+ ztrtrs_("Lower", "Conjugate transpose", "Non-unit", m, nrhs, &a[
+ a_offset], lda, &b[b_offset], ldb, info);
+
+ if (*info > 0) {
+ return 0;
+ }
+
+ scllen = *m;
+
+ }
+
+ }
+
+/* Undo scaling */
+
+ if (iascl == 1) {
+ zlascl_("G", &c__0, &c__0, &anrm, &smlnum, &scllen, nrhs, &b[b_offset]
+, ldb, info);
+ } else if (iascl == 2) {
+ zlascl_("G", &c__0, &c__0, &anrm, &bignum, &scllen, nrhs, &b[b_offset]
+, ldb, info);
+ }
+ if (ibscl == 1) {
+ zlascl_("G", &c__0, &c__0, &smlnum, &bnrm, &scllen, nrhs, &b[b_offset]
+, ldb, info);
+ } else if (ibscl == 2) {
+ zlascl_("G", &c__0, &c__0, &bignum, &bnrm, &scllen, nrhs, &b[b_offset]
+, ldb, info);
+ }
+
+L50:
+ d__1 = (doublereal) wsize;
+ work[1].r = d__1, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZGELS */
+
+} /* zgels_ */
diff --git a/contrib/libs/clapack/zgelsd.c b/contrib/libs/clapack/zgelsd.c
new file mode 100644
index 0000000000..9b35320bd3
--- /dev/null
+++ b/contrib/libs/clapack/zgelsd.c
@@ -0,0 +1,724 @@
+/* zgelsd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static integer c__9 = 9;
+static integer c__0 = 0;
+static integer c__6 = 6;
+static integer c_n1 = -1;
+static integer c__1 = 1;
+static doublereal c_b80 = 0.;
+
+/* Subroutine */ int zgelsd_(integer *m, integer *n, integer *nrhs,
+ doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb,
+ doublereal *s, doublereal *rcond, integer *rank, doublecomplex *work,
+ integer *lwork, doublereal *rwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+
+ /* Builtin functions */
+ double log(doublereal);
+
+ /* Local variables */
+ integer ie, il, mm;
+ doublereal eps, anrm, bnrm;
+ integer itau, nlvl, iascl, ibscl;
+ doublereal sfmin;
+ integer minmn, maxmn, itaup, itauq, mnthr, nwork;
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dlaset_(char *, integer *, integer
+ *, doublereal *, doublereal *, doublereal *, integer *),
+ xerbla_(char *, integer *), zgebrd_(integer *, integer *,
+ doublecomplex *, integer *, doublereal *, doublereal *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ doublereal bignum;
+ extern /* Subroutine */ int zgelqf_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *, integer *
+), zlalsd_(char *, integer *, integer *, integer *, doublereal *,
+ doublereal *, doublecomplex *, integer *, doublereal *, integer *,
+ doublecomplex *, doublereal *, integer *, integer *),
+ zlascl_(char *, integer *, integer *, doublereal *, doublereal *,
+ integer *, integer *, doublecomplex *, integer *, integer *), zgeqrf_(integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, doublecomplex *, integer *, integer *);
+ integer ldwork;
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *),
+ zlaset_(char *, integer *, integer *, doublecomplex *,
+ doublecomplex *, doublecomplex *, integer *);
+ integer liwork, minwrk, maxwrk;
+ doublereal smlnum;
+ extern /* Subroutine */ int zunmbr_(char *, char *, char *, integer *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *
+);
+ integer lrwork;
+ logical lquery;
+ integer nrwork, smlsiz;
+ extern /* Subroutine */ int zunmlq_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *), zunmqr_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGELSD computes the minimum-norm solution to a real linear least */
+/* squares problem: */
+/* minimize 2-norm(| b - A*x |) */
+/* using the singular value decomposition (SVD) of A. A is an M-by-N */
+/* matrix which may be rank-deficient. */
+
+/* Several right hand side vectors b and solution vectors x can be */
+/* handled in a single call; they are stored as the columns of the */
+/* M-by-NRHS right hand side matrix B and the N-by-NRHS solution */
+/* matrix X. */
+
+/* The problem is solved in three steps: */
+/* (1) Reduce the coefficient matrix A to bidiagonal form with */
+/* Householder tranformations, reducing the original problem */
+/* into a "bidiagonal least squares problem" (BLS) */
+/* (2) Solve the BLS using a divide and conquer approach. */
+/* (3) Apply back all the Householder tranformations to solve */
+/* the original least squares problem. */
+
+/* The effective rank of A is determined by treating as zero those */
+/* singular values which are less than RCOND times the largest singular */
+/* value. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, A has been destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the M-by-NRHS right hand side matrix B. */
+/* On exit, B is overwritten by the N-by-NRHS solution matrix X. */
+/* If m >= n and RANK = n, the residual sum-of-squares for */
+/* the solution in the i-th column is given by the sum of */
+/* squares of the modulus of elements n+1:m in that column. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,M,N). */
+
+/* S (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The singular values of A in decreasing order. */
+/* The condition number of A in the 2-norm = S(1)/S(min(m,n)). */
+
+/* RCOND (input) DOUBLE PRECISION */
+/* RCOND is used to determine the effective rank of A. */
+/* Singular values S(i) <= RCOND*S(1) are treated as zero. */
+/* If RCOND < 0, machine precision is used instead. */
+
+/* RANK (output) INTEGER */
+/* The effective rank of A, i.e., the number of singular values */
+/* which are greater than RCOND*S(1). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK must be at least 1. */
+/* The exact minimum amount of workspace needed depends on M, */
+/* N and NRHS. As long as LWORK is at least */
+/* 2*N + N*NRHS */
+/* if M is greater than or equal to N or */
+/* 2*M + M*NRHS */
+/* if M is less than N, the code will execute correctly. */
+/* For good performance, LWORK should generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the array WORK and the */
+/* minimum sizes of the arrays RWORK and IWORK, and returns */
+/* these values as the first entries of the WORK, RWORK and */
+/* IWORK arrays, and no error message related to LWORK is issued */
+/* by XERBLA. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LRWORK)) */
+/* LRWORK >= */
+/* 10*N + 2*N*SMLSIZ + 8*N*NLVL + 3*SMLSIZ*NRHS + */
+/* (SMLSIZ+1)**2 */
+/* if M is greater than or equal to N or */
+/* 10*M + 2*M*SMLSIZ + 8*M*NLVL + 3*SMLSIZ*NRHS + */
+/* (SMLSIZ+1)**2 */
+/* if M is less than N, the code will execute correctly. */
+/* SMLSIZ is returned by ILAENV and is equal to the maximum */
+/* size of the subproblems at the bottom of the computation */
+/* tree (usually about 25), and */
+/* NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 ) */
+/* On exit, if INFO = 0, RWORK(1) returns the minimum LRWORK. */
+
+/* IWORK (workspace) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* LIWORK >= max(1, 3*MINMN*NLVL + 11*MINMN), */
+/* where MINMN = MIN( M,N ). */
+/* On exit, if INFO = 0, IWORK(1) returns the minimum LIWORK. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: the algorithm for computing the SVD failed to converge; */
+/* if INFO = i, i off-diagonal elements of an intermediate */
+/* bidiagonal form did not converge to zero. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Ren-Cang Li, Computer Science Division, University of */
+/* California at Berkeley, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --s;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ maxmn = max(*m,*n);
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldb < max(1,maxmn)) {
+ *info = -7;
+ }
+
+/* Compute workspace. */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV.) */
+
+ if (*info == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ liwork = 1;
+ lrwork = 1;
+ if (minmn > 0) {
+ smlsiz = ilaenv_(&c__9, "ZGELSD", " ", &c__0, &c__0, &c__0, &c__0);
+ mnthr = ilaenv_(&c__6, "ZGELSD", " ", m, n, nrhs, &c_n1);
+/* Computing MAX */
+ i__1 = (integer) (log((doublereal) minmn / (doublereal) (smlsiz +
+ 1)) / log(2.)) + 1;
+ nlvl = max(i__1,0);
+ liwork = minmn * 3 * nlvl + minmn * 11;
+ mm = *m;
+ if (*m >= *n && *m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than */
+/* columns. */
+
+ mm = *n;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * ilaenv_(&c__1, "ZGEQRF", " ", m, n,
+ &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *nrhs * ilaenv_(&c__1, "ZUNMQR", "LC",
+ m, nrhs, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ }
+ if (*m >= *n) {
+
+/* Path 1 - overdetermined or exactly determined. */
+
+/* Computing 2nd power */
+ i__1 = smlsiz + 1;
+ lrwork = *n * 10 + (*n << 1) * smlsiz + (*n << 3) * nlvl +
+ smlsiz * 3 * *nrhs + i__1 * i__1;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + (mm + *n) * ilaenv_(&c__1,
+ "ZGEBRD", " ", &mm, n, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *nrhs * ilaenv_(&c__1,
+ "ZUNMBR", "QLC", &mm, nrhs, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + (*n - 1) * ilaenv_(&c__1,
+ "ZUNMBR", "PLN", n, nrhs, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *n * *nrhs;
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = (*n << 1) + mm, i__2 = (*n << 1) + *n * *nrhs;
+ minwrk = max(i__1,i__2);
+ }
+ if (*n > *m) {
+/* Computing 2nd power */
+ i__1 = smlsiz + 1;
+ lrwork = *m * 10 + (*m << 1) * smlsiz + (*m << 3) * nlvl +
+ smlsiz * 3 * *nrhs + i__1 * i__1;
+ if (*n >= mnthr) {
+
+/* Path 2a - underdetermined, with many more columns */
+/* than rows. */
+
+ maxwrk = *m + *m * ilaenv_(&c__1, "ZGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + (*m << 1) *
+ ilaenv_(&c__1, "ZGEBRD", " ", m, m, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + *nrhs *
+ ilaenv_(&c__1, "ZUNMBR", "QLC", m, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + (*m - 1) *
+ ilaenv_(&c__1, "ZUNMLQ", "LC", n, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+ if (*nrhs > 1) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + *m + *m * *nrhs;
+ maxwrk = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 1);
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + *m * *nrhs;
+ maxwrk = max(i__1,i__2);
+/* XXX: Ensure the Path 2a case below is triggered. The workspace */
+/* calculation should use queries for all routines eventually. */
+/* Computing MAX */
+/* Computing MAX */
+ i__3 = *m, i__4 = (*m << 1) - 4, i__3 = max(i__3,i__4),
+ i__3 = max(i__3,*nrhs), i__4 = *n - *m * 3;
+ i__1 = maxwrk, i__2 = (*m << 2) + *m * *m + max(i__3,i__4)
+ ;
+ maxwrk = max(i__1,i__2);
+ } else {
+
+/* Path 2 - underdetermined. */
+
+ maxwrk = (*m << 1) + (*n + *m) * ilaenv_(&c__1, "ZGEBRD",
+ " ", m, n, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *nrhs * ilaenv_(&c__1,
+ "ZUNMBR", "QLC", m, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "ZUNMBR", "PLN", n, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * *nrhs;
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = (*m << 1) + *n, i__2 = (*m << 1) + *m * *nrhs;
+ minwrk = max(i__1,i__2);
+ }
+ }
+ minwrk = min(minwrk,maxwrk);
+ work[1].r = (doublereal) maxwrk, work[1].i = 0.;
+ iwork[1] = liwork;
+ rwork[1] = (doublereal) lrwork;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGELSD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == 0 || *n == 0) {
+ *rank = 0;
+ return 0;
+ }
+
+/* Get machine parameters. */
+
+ eps = dlamch_("P");
+ sfmin = dlamch_("S");
+ smlnum = sfmin / eps;
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+
+/* Scale A if max entry outside range [SMLNUM,BIGNUM]. */
+
+ anrm = zlange_("M", m, n, &a[a_offset], lda, &rwork[1]);
+ iascl = 0;
+ if (anrm > 0. && anrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ zlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM. */
+
+ zlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ zlaset_("F", &i__1, nrhs, &c_b1, &c_b1, &b[b_offset], ldb);
+ dlaset_("F", &minmn, &c__1, &c_b80, &c_b80, &s[1], &c__1);
+ *rank = 0;
+ goto L10;
+ }
+
+/* Scale B if max entry outside range [SMLNUM,BIGNUM]. */
+
+ bnrm = zlange_("M", m, nrhs, &b[b_offset], ldb, &rwork[1]);
+ ibscl = 0;
+ if (bnrm > 0. && bnrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM. */
+
+ zlascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM. */
+
+ zlascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 2;
+ }
+
+/* If M < N make sure B(M+1:N,:) = 0 */
+
+ if (*m < *n) {
+ i__1 = *n - *m;
+ zlaset_("F", &i__1, nrhs, &c_b1, &c_b1, &b[*m + 1 + b_dim1], ldb);
+ }
+
+/* Overdetermined case. */
+
+ if (*m >= *n) {
+
+/* Path 1 - overdetermined or exactly determined. */
+
+ mm = *m;
+ if (*m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than columns */
+
+ mm = *n;
+ itau = 1;
+ nwork = itau + *n;
+
+/* Compute A=Q*R. */
+/* (RWorkspace: need N) */
+/* (CWorkspace: need N, prefer N*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1,
+ info);
+
+/* Multiply B by transpose(Q). */
+/* (RWorkspace: need N) */
+/* (CWorkspace: need NRHS, prefer NRHS*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ zunmqr_("L", "C", m, nrhs, n, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Zero out below R. */
+
+ if (*n > 1) {
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ zlaset_("L", &i__1, &i__2, &c_b1, &c_b1, &a[a_dim1 + 2], lda);
+ }
+ }
+
+ itauq = 1;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+ ie = 1;
+ nrwork = ie + *n;
+
+/* Bidiagonalize R in A. */
+/* (RWorkspace: need N) */
+/* (CWorkspace: need 2*N+MM, prefer 2*N+(MM+N)*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ zgebrd_(&mm, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__1, info);
+
+/* Multiply B by transpose of left bidiagonalizing vectors of R. */
+/* (CWorkspace: need 2*N+NRHS, prefer 2*N+NRHS*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "C", &mm, nrhs, n, &a[a_offset], lda, &work[itauq],
+ &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ zlalsd_("U", &smlsiz, n, nrhs, &s[1], &rwork[ie], &b[b_offset], ldb,
+ rcond, rank, &work[nwork], &rwork[nrwork], &iwork[1], info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of R. */
+
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("P", "L", "N", n, nrhs, n, &a[a_offset], lda, &work[itaup], &
+ b[b_offset], ldb, &work[nwork], &i__1, info);
+
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = *m, i__2 = (*m << 1) - 4, i__1 = max(i__1,i__2), i__1 = max(
+ i__1,*nrhs), i__2 = *n - *m * 3;
+ if (*n >= mnthr && *lwork >= (*m << 2) + *m * *m + max(i__1,i__2)) {
+
+/* Path 2a - underdetermined, with many more columns than rows */
+/* and sufficient workspace for an efficient algorithm. */
+
+ ldwork = *m;
+/* Computing MAX */
+/* Computing MAX */
+ i__3 = *m, i__4 = (*m << 1) - 4, i__3 = max(i__3,i__4), i__3 =
+ max(i__3,*nrhs), i__4 = *n - *m * 3;
+ i__1 = (*m << 2) + *m * *lda + max(i__3,i__4), i__2 = *m * *lda +
+ *m + *m * *nrhs;
+ if (*lwork >= max(i__1,i__2)) {
+ ldwork = *lda;
+ }
+ itau = 1;
+ nwork = *m + 1;
+
+/* Compute A=L*Q. */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1,
+ info);
+ il = nwork;
+
+/* Copy L to WORK(IL), zeroing out above its diagonal. */
+
+ zlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwork);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &work[il + ldwork], &
+ ldwork);
+ itauq = il + ldwork * *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+ ie = 1;
+ nrwork = ie + *m;
+
+/* Bidiagonalize L in WORK(IL). */
+/* (RWorkspace: need M) */
+/* (CWorkspace: need M*M+4*M, prefer M*M+4*M+2*M*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ zgebrd_(m, m, &work[il], &ldwork, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__1, info);
+
+/* Multiply B by transpose of left bidiagonalizing vectors of L. */
+/* (CWorkspace: need M*M+4*M+NRHS, prefer M*M+4*M+NRHS*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "C", m, nrhs, m, &work[il], &ldwork, &work[
+ itauq], &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ zlalsd_("U", &smlsiz, m, nrhs, &s[1], &rwork[ie], &b[b_offset],
+ ldb, rcond, rank, &work[nwork], &rwork[nrwork], &iwork[1],
+ info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of L. */
+
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("P", "L", "N", m, nrhs, m, &work[il], &ldwork, &work[
+ itaup], &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Zero out below first M rows of B. */
+
+ i__1 = *n - *m;
+ zlaset_("F", &i__1, nrhs, &c_b1, &c_b1, &b[*m + 1 + b_dim1], ldb);
+ nwork = itau + *m;
+
+/* Multiply transpose(Q) by B. */
+/* (CWorkspace: need NRHS, prefer NRHS*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ zunmlq_("L", "C", n, nrhs, m, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[nwork], &i__1, info);
+
+ } else {
+
+/* Path 2 - remaining underdetermined cases. */
+
+ itauq = 1;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+ ie = 1;
+ nrwork = ie + *m;
+
+/* Bidiagonalize A. */
+/* (RWorkspace: need M) */
+/* (CWorkspace: need 2*M+N, prefer 2*M+(M+N)*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ zgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__1, info);
+
+/* Multiply B by transpose of left bidiagonalizing vectors. */
+/* (CWorkspace: need 2*M+NRHS, prefer 2*M+NRHS*NB) */
+
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "C", m, nrhs, n, &a[a_offset], lda, &work[itauq]
+, &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ zlalsd_("L", &smlsiz, m, nrhs, &s[1], &rwork[ie], &b[b_offset],
+ ldb, rcond, rank, &work[nwork], &rwork[nrwork], &iwork[1],
+ info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of A. */
+
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("P", "L", "N", n, nrhs, m, &a[a_offset], lda, &work[itaup]
+, &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+ }
+ }
+
+/* Undo scaling. */
+
+ if (iascl == 1) {
+ zlascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb,
+ info);
+ dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ } else if (iascl == 2) {
+ zlascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb,
+ info);
+ dlascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ }
+ if (ibscl == 1) {
+ zlascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ } else if (ibscl == 2) {
+ zlascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ }
+
+L10:
+ work[1].r = (doublereal) maxwrk, work[1].i = 0.;
+ iwork[1] = liwork;
+ rwork[1] = (doublereal) lrwork;
+ return 0;
+
+/* End of ZGELSD */
+
+} /* zgelsd_ */
diff --git a/contrib/libs/clapack/zgelss.c b/contrib/libs/clapack/zgelss.c
new file mode 100644
index 0000000000..29caf0ed7c
--- /dev/null
+++ b/contrib/libs/clapack/zgelss.c
@@ -0,0 +1,828 @@
+/* zgelss.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__6 = 6;
+static integer c_n1 = -1;
+static integer c__1 = 1;
+static integer c__0 = 0;
+static doublereal c_b78 = 0.;
+
+/* Subroutine */ int zgelss_(integer *m, integer *n, integer *nrhs,
+ doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb,
+ doublereal *s, doublereal *rcond, integer *rank, doublecomplex *work,
+ integer *lwork, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Local variables */
+ integer i__, bl, ie, il, mm;
+ doublereal eps, thr, anrm, bnrm;
+ integer itau;
+ doublecomplex vdum[1];
+ integer iascl, ibscl, chunk;
+ doublereal sfmin;
+ integer minmn;
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ integer maxmn, itaup, itauq, mnthr;
+ extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *);
+ integer iwork;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), dlabad_(doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dlaset_(char *, integer *, integer
+ *, doublereal *, doublereal *, doublereal *, integer *),
+ xerbla_(char *, integer *), zgebrd_(integer *, integer *,
+ doublecomplex *, integer *, doublereal *, doublereal *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ doublereal bignum;
+ extern /* Subroutine */ int zgelqf_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *, integer *
+), zlascl_(char *, integer *, integer *, doublereal *, doublereal
+ *, integer *, integer *, doublecomplex *, integer *, integer *), zgeqrf_(integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, doublecomplex *, integer *, integer *), zdrscl_(
+ integer *, doublereal *, doublecomplex *, integer *);
+ integer ldwork;
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *),
+ zlaset_(char *, integer *, integer *, doublecomplex *,
+ doublecomplex *, doublecomplex *, integer *), zbdsqr_(
+ char *, integer *, integer *, integer *, integer *, doublereal *,
+ doublereal *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *, doublereal *, integer *);
+ integer minwrk, maxwrk;
+ extern /* Subroutine */ int zungbr_(char *, integer *, integer *, integer
+ *, doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *);
+ doublereal smlnum;
+ integer irwork;
+ extern /* Subroutine */ int zunmbr_(char *, char *, char *, integer *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *
+);
+ logical lquery;
+ extern /* Subroutine */ int zunmlq_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *), zunmqr_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGELSS computes the minimum norm solution to a complex linear */
+/* least squares problem: */
+
+/* Minimize 2-norm(| b - A*x |). */
+
+/* using the singular value decomposition (SVD) of A. A is an M-by-N */
+/* matrix which may be rank-deficient. */
+
+/* Several right hand side vectors b and solution vectors x can be */
+/* handled in a single call; they are stored as the columns of the */
+/* M-by-NRHS right hand side matrix B and the N-by-NRHS solution matrix */
+/* X. */
+
+/* The effective rank of A is determined by treating as zero those */
+/* singular values which are less than RCOND times the largest singular */
+/* value. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the first min(m,n) rows of A are overwritten with */
+/* its right singular vectors, stored rowwise. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the M-by-NRHS right hand side matrix B. */
+/* On exit, B is overwritten by the N-by-NRHS solution matrix X. */
+/* If m >= n and RANK = n, the residual sum-of-squares for */
+/* the solution in the i-th column is given by the sum of */
+/* squares of the modulus of elements n+1:m in that column. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,M,N). */
+
+/* S (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The singular values of A in decreasing order. */
+/* The condition number of A in the 2-norm = S(1)/S(min(m,n)). */
+
+/* RCOND (input) DOUBLE PRECISION */
+/* RCOND is used to determine the effective rank of A. */
+/* Singular values S(i) <= RCOND*S(1) are treated as zero. */
+/* If RCOND < 0, machine precision is used instead. */
+
+/* RANK (output) INTEGER */
+/* The effective rank of A, i.e., the number of singular values */
+/* which are greater than RCOND*S(1). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= 1, and also: */
+/* LWORK >= 2*min(M,N) + max(M,N,NRHS) */
+/* For good performance, LWORK should generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (5*min(M,N)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: the algorithm for computing the SVD failed to converge; */
+/* if INFO = i, i off-diagonal elements of an intermediate */
+/* bidiagonal form did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --s;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ maxmn = max(*m,*n);
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldb < max(1,maxmn)) {
+ *info = -7;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* CWorkspace refers to complex workspace, and RWorkspace refers */
+/* to real workspace. NB refers to the optimal block size for the */
+/* immediately following subroutine, as returned by ILAENV.) */
+
+ if (*info == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ if (minmn > 0) {
+ mm = *m;
+ mnthr = ilaenv_(&c__6, "ZGELSS", " ", m, n, nrhs, &c_n1);
+ if (*m >= *n && *m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than */
+/* columns */
+
+ mm = *n;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "ZGEQRF",
+ " ", m, n, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *nrhs * ilaenv_(&c__1, "ZUNMQR",
+ "LC", m, nrhs, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ }
+ if (*m >= *n) {
+
+/* Path 1 - overdetermined or exactly determined */
+
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + (mm + *n) * ilaenv_(&c__1,
+ "ZGEBRD", " ", &mm, n, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *nrhs * ilaenv_(&c__1,
+ "ZUNMBR", "QLC", &mm, nrhs, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + (*n - 1) * ilaenv_(&c__1,
+ "ZUNGBR", "P", n, n, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * *nrhs;
+ maxwrk = max(i__1,i__2);
+ minwrk = (*n << 1) + max(*nrhs,*m);
+ }
+ if (*n > *m) {
+ minwrk = (*m << 1) + max(*nrhs,*n);
+ if (*n >= mnthr) {
+
+/* Path 2a - underdetermined, with many more columns */
+/* than rows */
+
+ maxwrk = *m + *m * ilaenv_(&c__1, "ZGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + *m * *m + (*m << 1) *
+ ilaenv_(&c__1, "ZGEBRD", " ", m, m, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + *m * *m + *nrhs * ilaenv_(&
+ c__1, "ZUNMBR", "QLC", m, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + *m * *m + (*m - 1) *
+ ilaenv_(&c__1, "ZUNGBR", "P", m, m, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+ if (*nrhs > 1) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + *m + *m * *nrhs;
+ maxwrk = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 1);
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m + *nrhs * ilaenv_(&c__1, "ZUNMLQ"
+, "LC", n, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+ } else {
+
+/* Path 2 - underdetermined */
+
+ maxwrk = (*m << 1) + (*n + *m) * ilaenv_(&c__1, "ZGEBRD",
+ " ", m, n, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *nrhs * ilaenv_(&c__1,
+ "ZUNMBR", "QLC", m, nrhs, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "P", m, n, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * *nrhs;
+ maxwrk = max(i__1,i__2);
+ }
+ }
+ maxwrk = max(minwrk,maxwrk);
+ }
+ work[1].r = (doublereal) maxwrk, work[1].i = 0.;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGELSS", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ *rank = 0;
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ eps = dlamch_("P");
+ sfmin = dlamch_("S");
+ smlnum = sfmin / eps;
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = zlange_("M", m, n, &a[a_offset], lda, &rwork[1]);
+ iascl = 0;
+ if (anrm > 0. && anrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ zlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ zlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ zlaset_("F", &i__1, nrhs, &c_b1, &c_b1, &b[b_offset], ldb);
+ dlaset_("F", &minmn, &c__1, &c_b78, &c_b78, &s[1], &minmn);
+ *rank = 0;
+ goto L70;
+ }
+
+/* Scale B if max element outside range [SMLNUM,BIGNUM] */
+
+ bnrm = zlange_("M", m, nrhs, &b[b_offset], ldb, &rwork[1]);
+ ibscl = 0;
+ if (bnrm > 0. && bnrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ zlascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ zlascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 2;
+ }
+
+/* Overdetermined case */
+
+ if (*m >= *n) {
+
+/* Path 1 - overdetermined or exactly determined */
+
+ mm = *m;
+ if (*m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than columns */
+
+ mm = *n;
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__1,
+ info);
+
+/* Multiply B by transpose(Q) */
+/* (CWorkspace: need N+NRHS, prefer N+NRHS*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwork + 1;
+ zunmqr_("L", "C", m, nrhs, n, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[iwork], &i__1, info);
+
+/* Zero out below R */
+
+ if (*n > 1) {
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ zlaset_("L", &i__1, &i__2, &c_b1, &c_b1, &a[a_dim1 + 2], lda);
+ }
+ }
+
+ ie = 1;
+ itauq = 1;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in A */
+/* (CWorkspace: need 2*N+MM, prefer 2*N+(MM+N)*NB) */
+/* (RWorkspace: need N) */
+
+ i__1 = *lwork - iwork + 1;
+ zgebrd_(&mm, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq], &
+ work[itaup], &work[iwork], &i__1, info);
+
+/* Multiply B by transpose of left bidiagonalizing vectors of R */
+/* (CWorkspace: need 2*N+NRHS, prefer 2*N+NRHS*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwork + 1;
+ zunmbr_("Q", "L", "C", &mm, nrhs, n, &a[a_offset], lda, &work[itauq],
+ &b[b_offset], ldb, &work[iwork], &i__1, info);
+
+/* Generate right bidiagonalizing vectors of R in A */
+/* (CWorkspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwork + 1;
+ zungbr_("P", n, n, n, &a[a_offset], lda, &work[itaup], &work[iwork], &
+ i__1, info);
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration */
+/* multiply B by transpose of left singular vectors */
+/* compute right singular vectors in A */
+/* (CWorkspace: none) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", n, n, &c__0, nrhs, &s[1], &rwork[ie], &a[a_offset], lda,
+ vdum, &c__1, &b[b_offset], ldb, &rwork[irwork], info);
+ if (*info != 0) {
+ goto L70;
+ }
+
+/* Multiply B by reciprocals of singular values */
+
+/* Computing MAX */
+ d__1 = *rcond * s[1];
+ thr = max(d__1,sfmin);
+ if (*rcond < 0.) {
+/* Computing MAX */
+ d__1 = eps * s[1];
+ thr = max(d__1,sfmin);
+ }
+ *rank = 0;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (s[i__] > thr) {
+ zdrscl_(nrhs, &s[i__], &b[i__ + b_dim1], ldb);
+ ++(*rank);
+ } else {
+ zlaset_("F", &c__1, nrhs, &c_b1, &c_b1, &b[i__ + b_dim1], ldb);
+ }
+/* L10: */
+ }
+
+/* Multiply B by right singular vectors */
+/* (CWorkspace: need N, prefer N*NRHS) */
+/* (RWorkspace: none) */
+
+ if (*lwork >= *ldb * *nrhs && *nrhs > 1) {
+ zgemm_("C", "N", n, nrhs, n, &c_b2, &a[a_offset], lda, &b[
+ b_offset], ldb, &c_b1, &work[1], ldb);
+ zlacpy_("G", n, nrhs, &work[1], ldb, &b[b_offset], ldb)
+ ;
+ } else if (*nrhs > 1) {
+ chunk = *lwork / *n;
+ i__1 = *nrhs;
+ i__2 = chunk;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = *nrhs - i__ + 1;
+ bl = min(i__3,chunk);
+ zgemm_("C", "N", n, &bl, n, &c_b2, &a[a_offset], lda, &b[i__ *
+ b_dim1 + 1], ldb, &c_b1, &work[1], n);
+ zlacpy_("G", n, &bl, &work[1], n, &b[i__ * b_dim1 + 1], ldb);
+/* L20: */
+ }
+ } else {
+ zgemv_("C", n, n, &c_b2, &a[a_offset], lda, &b[b_offset], &c__1, &
+ c_b1, &work[1], &c__1);
+ zcopy_(n, &work[1], &c__1, &b[b_offset], &c__1);
+ }
+
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__2 = max(*m,*nrhs), i__1 = *n - (*m << 1);
+ if (*n >= mnthr && *lwork >= *m * 3 + *m * *m + max(i__2,i__1)) {
+
+/* Underdetermined case, M much less than N */
+
+/* Path 2a - underdetermined, with many more columns than rows */
+/* and sufficient workspace for an efficient algorithm */
+
+ ldwork = *m;
+/* Computing MAX */
+ i__2 = max(*m,*nrhs), i__1 = *n - (*m << 1);
+ if (*lwork >= *m * 3 + *m * *lda + max(i__2,i__1)) {
+ ldwork = *lda;
+ }
+ itau = 1;
+ iwork = *m + 1;
+
+/* Compute A=L*Q */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: none) */
+
+ i__2 = *lwork - iwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2,
+ info);
+ il = iwork;
+
+/* Copy L to WORK(IL), zeroing out above it */
+
+ zlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwork);
+ i__2 = *m - 1;
+ i__1 = *m - 1;
+ zlaset_("U", &i__2, &i__1, &c_b1, &c_b1, &work[il + ldwork], &
+ ldwork);
+ ie = 1;
+ itauq = il + ldwork * *m;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IL) */
+/* (CWorkspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(m, m, &work[il], &ldwork, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[iwork], &i__2, info);
+
+/* Multiply B by transpose of left bidiagonalizing vectors of L */
+/* (CWorkspace: need M*M+3*M+NRHS, prefer M*M+3*M+NRHS*NB) */
+/* (RWorkspace: none) */
+
+ i__2 = *lwork - iwork + 1;
+ zunmbr_("Q", "L", "C", m, nrhs, m, &work[il], &ldwork, &work[
+ itauq], &b[b_offset], ldb, &work[iwork], &i__2, info);
+
+/* Generate right bidiagonalizing vectors of R in WORK(IL) */
+/* (CWorkspace: need M*M+4*M-1, prefer M*M+3*M+(M-1)*NB) */
+/* (RWorkspace: none) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("P", m, m, m, &work[il], &ldwork, &work[itaup], &work[
+ iwork], &i__2, info);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right singular */
+/* vectors of L in WORK(IL) and multiplying B by transpose of */
+/* left singular vectors */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", m, m, &c__0, nrhs, &s[1], &rwork[ie], &work[il], &
+ ldwork, &a[a_offset], lda, &b[b_offset], ldb, &rwork[
+ irwork], info);
+ if (*info != 0) {
+ goto L70;
+ }
+
+/* Multiply B by reciprocals of singular values */
+
+/* Computing MAX */
+ d__1 = *rcond * s[1];
+ thr = max(d__1,sfmin);
+ if (*rcond < 0.) {
+/* Computing MAX */
+ d__1 = eps * s[1];
+ thr = max(d__1,sfmin);
+ }
+ *rank = 0;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (s[i__] > thr) {
+ zdrscl_(nrhs, &s[i__], &b[i__ + b_dim1], ldb);
+ ++(*rank);
+ } else {
+ zlaset_("F", &c__1, nrhs, &c_b1, &c_b1, &b[i__ + b_dim1],
+ ldb);
+ }
+/* L30: */
+ }
+ iwork = il + *m * ldwork;
+
+/* Multiply B by right singular vectors of L in WORK(IL) */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NRHS) */
+/* (RWorkspace: none) */
+
+ if (*lwork >= *ldb * *nrhs + iwork - 1 && *nrhs > 1) {
+ zgemm_("C", "N", m, nrhs, m, &c_b2, &work[il], &ldwork, &b[
+ b_offset], ldb, &c_b1, &work[iwork], ldb);
+ zlacpy_("G", m, nrhs, &work[iwork], ldb, &b[b_offset], ldb);
+ } else if (*nrhs > 1) {
+ chunk = (*lwork - iwork + 1) / *m;
+ i__2 = *nrhs;
+ i__1 = chunk;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *nrhs - i__ + 1;
+ bl = min(i__3,chunk);
+ zgemm_("C", "N", m, &bl, m, &c_b2, &work[il], &ldwork, &b[
+ i__ * b_dim1 + 1], ldb, &c_b1, &work[iwork], m);
+ zlacpy_("G", m, &bl, &work[iwork], m, &b[i__ * b_dim1 + 1]
+, ldb);
+/* L40: */
+ }
+ } else {
+ zgemv_("C", m, m, &c_b2, &work[il], &ldwork, &b[b_dim1 + 1], &
+ c__1, &c_b1, &work[iwork], &c__1);
+ zcopy_(m, &work[iwork], &c__1, &b[b_dim1 + 1], &c__1);
+ }
+
+/* Zero out below first M rows of B */
+
+ i__1 = *n - *m;
+ zlaset_("F", &i__1, nrhs, &c_b1, &c_b1, &b[*m + 1 + b_dim1], ldb);
+ iwork = itau + *m;
+
+/* Multiply transpose(Q) by B */
+/* (CWorkspace: need M+NRHS, prefer M+NHRS*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwork + 1;
+ zunmlq_("L", "C", n, nrhs, m, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[iwork], &i__1, info);
+
+ } else {
+
+/* Path 2 - remaining underdetermined cases */
+
+ ie = 1;
+ itauq = 1;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize A */
+/* (CWorkspace: need 3*M, prefer 2*M+(M+N)*NB) */
+/* (RWorkspace: need N) */
+
+ i__1 = *lwork - iwork + 1;
+ zgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[iwork], &i__1, info);
+
+/* Multiply B by transpose of left bidiagonalizing vectors */
+/* (CWorkspace: need 2*M+NRHS, prefer 2*M+NRHS*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwork + 1;
+ zunmbr_("Q", "L", "C", m, nrhs, n, &a[a_offset], lda, &work[itauq]
+, &b[b_offset], ldb, &work[iwork], &i__1, info);
+
+/* Generate right bidiagonalizing vectors in A */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: none) */
+
+ i__1 = *lwork - iwork + 1;
+ zungbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &work[
+ iwork], &i__1, info);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, */
+/* computing right singular vectors of A in A and */
+/* multiplying B by transpose of left singular vectors */
+/* (CWorkspace: none) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("L", m, n, &c__0, nrhs, &s[1], &rwork[ie], &a[a_offset],
+ lda, vdum, &c__1, &b[b_offset], ldb, &rwork[irwork], info);
+ if (*info != 0) {
+ goto L70;
+ }
+
+/* Multiply B by reciprocals of singular values */
+
+/* Computing MAX */
+ d__1 = *rcond * s[1];
+ thr = max(d__1,sfmin);
+ if (*rcond < 0.) {
+/* Computing MAX */
+ d__1 = eps * s[1];
+ thr = max(d__1,sfmin);
+ }
+ *rank = 0;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (s[i__] > thr) {
+ zdrscl_(nrhs, &s[i__], &b[i__ + b_dim1], ldb);
+ ++(*rank);
+ } else {
+ zlaset_("F", &c__1, nrhs, &c_b1, &c_b1, &b[i__ + b_dim1],
+ ldb);
+ }
+/* L50: */
+ }
+
+/* Multiply B by right singular vectors of A */
+/* (CWorkspace: need N, prefer N*NRHS) */
+/* (RWorkspace: none) */
+
+ if (*lwork >= *ldb * *nrhs && *nrhs > 1) {
+ zgemm_("C", "N", n, nrhs, m, &c_b2, &a[a_offset], lda, &b[
+ b_offset], ldb, &c_b1, &work[1], ldb);
+ zlacpy_("G", n, nrhs, &work[1], ldb, &b[b_offset], ldb);
+ } else if (*nrhs > 1) {
+ chunk = *lwork / *n;
+ i__1 = *nrhs;
+ i__2 = chunk;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *nrhs - i__ + 1;
+ bl = min(i__3,chunk);
+ zgemm_("C", "N", n, &bl, m, &c_b2, &a[a_offset], lda, &b[
+ i__ * b_dim1 + 1], ldb, &c_b1, &work[1], n);
+ zlacpy_("F", n, &bl, &work[1], n, &b[i__ * b_dim1 + 1],
+ ldb);
+/* L60: */
+ }
+ } else {
+ zgemv_("C", m, n, &c_b2, &a[a_offset], lda, &b[b_offset], &
+ c__1, &c_b1, &work[1], &c__1);
+ zcopy_(n, &work[1], &c__1, &b[b_offset], &c__1);
+ }
+ }
+ }
+
+/* Undo scaling */
+
+ if (iascl == 1) {
+ zlascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb,
+ info);
+ dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ } else if (iascl == 2) {
+ zlascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb,
+ info);
+ dlascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ }
+ if (ibscl == 1) {
+ zlascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ } else if (ibscl == 2) {
+ zlascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ }
+L70:
+ work[1].r = (doublereal) maxwrk, work[1].i = 0.;
+ return 0;
+
+/* End of ZGELSS */
+
+} /* zgelss_ */
diff --git a/contrib/libs/clapack/zgelsx.c b/contrib/libs/clapack/zgelsx.c
new file mode 100644
index 0000000000..556353afb8
--- /dev/null
+++ b/contrib/libs/clapack/zgelsx.c
@@ -0,0 +1,471 @@
+/* zgelsx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__0 = 0;
+static integer c__2 = 2;
+static integer c__1 = 1;
+
+/* Subroutine */ int zgelsx_(integer *m, integer *n, integer *nrhs,
+ doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb,
+ integer *jpvt, doublereal *rcond, integer *rank, doublecomplex *work,
+ doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublecomplex c1, c2, s1, s2, t1, t2;
+ integer mn;
+ doublereal anrm, bnrm, smin, smax;
+ integer iascl, ibscl, ismin, ismax;
+ extern /* Subroutine */ int ztrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *),
+ zlaic1_(integer *, integer *, doublecomplex *, doublereal *,
+ doublecomplex *, doublecomplex *, doublereal *, doublecomplex *,
+ doublecomplex *), dlabad_(doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int zunm2r_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), xerbla_(char *, integer *);
+ extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ doublereal bignum;
+ extern /* Subroutine */ int zlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublecomplex *,
+ integer *, integer *), zgeqpf_(integer *, integer *,
+ doublecomplex *, integer *, integer *, doublecomplex *,
+ doublecomplex *, doublereal *, integer *), zlaset_(char *,
+ integer *, integer *, doublecomplex *, doublecomplex *,
+ doublecomplex *, integer *);
+ doublereal sminpr, smaxpr, smlnum;
+ extern /* Subroutine */ int zlatzm_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *), ztzrqf_(
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is deprecated and has been replaced by routine ZGELSY. */
+
+/* ZGELSX computes the minimum-norm solution to a complex linear least */
+/* squares problem: */
+/* minimize || A * X - B || */
+/* using a complete orthogonal factorization of A. A is an M-by-N */
+/* matrix which may be rank-deficient. */
+
+/* Several right hand side vectors b and solution vectors x can be */
+/* handled in a single call; they are stored as the columns of the */
+/* M-by-NRHS right hand side matrix B and the N-by-NRHS solution */
+/* matrix X. */
+
+/* The routine first computes a QR factorization with column pivoting: */
+/* A * P = Q * [ R11 R12 ] */
+/* [ 0 R22 ] */
+/* with R11 defined as the largest leading submatrix whose estimated */
+/* condition number is less than 1/RCOND. The order of R11, RANK, */
+/* is the effective rank of A. */
+
+/* Then, R22 is considered to be negligible, and R12 is annihilated */
+/* by unitary transformations from the right, arriving at the */
+/* complete orthogonal factorization: */
+/* A * P = Q * [ T11 0 ] * Z */
+/* [ 0 0 ] */
+/* The minimum-norm solution is then */
+/* X = P * Z' [ inv(T11)*Q1'*B ] */
+/* [ 0 ] */
+/* where Q1 consists of the first RANK columns of Q. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of */
+/* columns of matrices B and X. NRHS >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, A has been overwritten by details of its */
+/* complete orthogonal factorization. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the M-by-NRHS right hand side matrix B. */
+/* On exit, the N-by-NRHS solution matrix X. */
+/* If m >= n and RANK = n, the residual sum-of-squares for */
+/* the solution in the i-th column is given by the sum of */
+/* squares of elements N+1:M in that column. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,M,N). */
+
+/* JPVT (input/output) INTEGER array, dimension (N) */
+/* On entry, if JPVT(i) .ne. 0, the i-th column of A is an */
+/* initial column, otherwise it is a free column. Before */
+/* the QR factorization of A, all initial columns are */
+/* permuted to the leading positions; only the remaining */
+/* free columns are moved as a result of column pivoting */
+/* during the factorization. */
+/* On exit, if JPVT(i) = k, then the i-th column of A*P */
+/* was the k-th column of A. */
+
+/* RCOND (input) DOUBLE PRECISION */
+/* RCOND is used to determine the effective rank of A, which */
+/* is defined as the order of the largest leading triangular */
+/* submatrix R11 in the QR factorization with pivoting of A, */
+/* whose estimated condition number < 1/RCOND. */
+
+/* RANK (output) INTEGER */
+/* The effective rank of A, i.e., the order of the submatrix */
+/* R11. This is the same as the order of the submatrix T11 */
+/* in the complete orthogonal factorization of A. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension */
+/* (min(M,N) + max( N, 2*min(M,N)+NRHS )), */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --jpvt;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ mn = min(*m,*n);
+ ismin = mn + 1;
+ ismax = (mn << 1) + 1;
+
+/* Test the input arguments. */
+
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*m);
+ if (*ldb < max(i__1,*n)) {
+ *info = -7;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGELSX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+/* Computing MIN */
+ i__1 = min(*m,*n);
+ if (min(i__1,*nrhs) == 0) {
+ *rank = 0;
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ smlnum = dlamch_("S") / dlamch_("P");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+
+/* Scale A, B if max elements outside range [SMLNUM,BIGNUM] */
+
+ anrm = zlange_("M", m, n, &a[a_offset], lda, &rwork[1]);
+ iascl = 0;
+ if (anrm > 0. && anrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ zlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ zlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ zlaset_("F", &i__1, nrhs, &c_b1, &c_b1, &b[b_offset], ldb);
+ *rank = 0;
+ goto L100;
+ }
+
+ bnrm = zlange_("M", m, nrhs, &b[b_offset], ldb, &rwork[1]);
+ ibscl = 0;
+ if (bnrm > 0. && bnrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ zlascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ zlascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 2;
+ }
+
+/* Compute QR factorization with column pivoting of A: */
+/* A * P = Q * R */
+
+ zgeqpf_(m, n, &a[a_offset], lda, &jpvt[1], &work[1], &work[mn + 1], &
+ rwork[1], info);
+
+/* complex workspace MN+N. Real workspace 2*N. Details of Householder */
+/* rotations stored in WORK(1:MN). */
+
+/* Determine RANK using incremental condition estimation */
+
+ i__1 = ismin;
+ work[i__1].r = 1., work[i__1].i = 0.;
+ i__1 = ismax;
+ work[i__1].r = 1., work[i__1].i = 0.;
+ smax = z_abs(&a[a_dim1 + 1]);
+ smin = smax;
+ if (z_abs(&a[a_dim1 + 1]) == 0.) {
+ *rank = 0;
+ i__1 = max(*m,*n);
+ zlaset_("F", &i__1, nrhs, &c_b1, &c_b1, &b[b_offset], ldb);
+ goto L100;
+ } else {
+ *rank = 1;
+ }
+
+L10:
+ if (*rank < mn) {
+ i__ = *rank + 1;
+ zlaic1_(&c__2, rank, &work[ismin], &smin, &a[i__ * a_dim1 + 1], &a[
+ i__ + i__ * a_dim1], &sminpr, &s1, &c1);
+ zlaic1_(&c__1, rank, &work[ismax], &smax, &a[i__ * a_dim1 + 1], &a[
+ i__ + i__ * a_dim1], &smaxpr, &s2, &c2);
+
+ if (smaxpr * *rcond <= sminpr) {
+ i__1 = *rank;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ismin + i__ - 1;
+ i__3 = ismin + i__ - 1;
+ z__1.r = s1.r * work[i__3].r - s1.i * work[i__3].i, z__1.i =
+ s1.r * work[i__3].i + s1.i * work[i__3].r;
+ work[i__2].r = z__1.r, work[i__2].i = z__1.i;
+ i__2 = ismax + i__ - 1;
+ i__3 = ismax + i__ - 1;
+ z__1.r = s2.r * work[i__3].r - s2.i * work[i__3].i, z__1.i =
+ s2.r * work[i__3].i + s2.i * work[i__3].r;
+ work[i__2].r = z__1.r, work[i__2].i = z__1.i;
+/* L20: */
+ }
+ i__1 = ismin + *rank;
+ work[i__1].r = c1.r, work[i__1].i = c1.i;
+ i__1 = ismax + *rank;
+ work[i__1].r = c2.r, work[i__1].i = c2.i;
+ smin = sminpr;
+ smax = smaxpr;
+ ++(*rank);
+ goto L10;
+ }
+ }
+
+/* Logically partition R = [ R11 R12 ] */
+/* [ 0 R22 ] */
+/* where R11 = R(1:RANK,1:RANK) */
+
+/* [R11,R12] = [ T11, 0 ] * Y */
+
+ if (*rank < *n) {
+ ztzrqf_(rank, n, &a[a_offset], lda, &work[mn + 1], info);
+ }
+
+/* Details of Householder rotations stored in WORK(MN+1:2*MN) */
+
+/* B(1:M,1:NRHS) := Q' * B(1:M,1:NRHS) */
+
+ zunm2r_("Left", "Conjugate transpose", m, nrhs, &mn, &a[a_offset], lda, &
+ work[1], &b[b_offset], ldb, &work[(mn << 1) + 1], info);
+
+/* workspace NRHS */
+
+/* B(1:RANK,1:NRHS) := inv(T11) * B(1:RANK,1:NRHS) */
+
+ ztrsm_("Left", "Upper", "No transpose", "Non-unit", rank, nrhs, &c_b2, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+ i__1 = *n;
+ for (i__ = *rank + 1; i__ <= i__1; ++i__) {
+ i__2 = *nrhs;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0., b[i__3].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* B(1:N,1:NRHS) := Y' * B(1:N,1:NRHS) */
+
+ if (*rank < *n) {
+ i__1 = *rank;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *n - *rank + 1;
+ d_cnjg(&z__1, &work[mn + i__]);
+ zlatzm_("Left", &i__2, nrhs, &a[i__ + (*rank + 1) * a_dim1], lda,
+ &z__1, &b[i__ + b_dim1], &b[*rank + 1 + b_dim1], ldb, &
+ work[(mn << 1) + 1]);
+/* L50: */
+ }
+ }
+
+/* workspace NRHS */
+
+/* B(1:N,1:NRHS) := P * B(1:N,1:NRHS) */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = (mn << 1) + i__;
+ work[i__3].r = 1., work[i__3].i = 0.;
+/* L60: */
+ }
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = (mn << 1) + i__;
+ if (work[i__3].r == 1. && work[i__3].i == 0.) {
+ if (jpvt[i__] != i__) {
+ k = i__;
+ i__3 = k + j * b_dim1;
+ t1.r = b[i__3].r, t1.i = b[i__3].i;
+ i__3 = jpvt[k] + j * b_dim1;
+ t2.r = b[i__3].r, t2.i = b[i__3].i;
+L70:
+ i__3 = jpvt[k] + j * b_dim1;
+ b[i__3].r = t1.r, b[i__3].i = t1.i;
+ i__3 = (mn << 1) + k;
+ work[i__3].r = 0., work[i__3].i = 0.;
+ t1.r = t2.r, t1.i = t2.i;
+ k = jpvt[k];
+ i__3 = jpvt[k] + j * b_dim1;
+ t2.r = b[i__3].r, t2.i = b[i__3].i;
+ if (jpvt[k] != i__) {
+ goto L70;
+ }
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = t1.r, b[i__3].i = t1.i;
+ i__3 = (mn << 1) + k;
+ work[i__3].r = 0., work[i__3].i = 0.;
+ }
+ }
+/* L80: */
+ }
+/* L90: */
+ }
+
+/* Undo scaling */
+
+ if (iascl == 1) {
+ zlascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb,
+ info);
+ zlascl_("U", &c__0, &c__0, &smlnum, &anrm, rank, rank, &a[a_offset],
+ lda, info);
+ } else if (iascl == 2) {
+ zlascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb,
+ info);
+ zlascl_("U", &c__0, &c__0, &bignum, &anrm, rank, rank, &a[a_offset],
+ lda, info);
+ }
+ if (ibscl == 1) {
+ zlascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ } else if (ibscl == 2) {
+ zlascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ }
+
+L100:
+
+ return 0;
+
+/* End of ZGELSX */
+
+} /* zgelsx_ */
diff --git a/contrib/libs/clapack/zgelsy.c b/contrib/libs/clapack/zgelsy.c
new file mode 100644
index 0000000000..355ac161d7
--- /dev/null
+++ b/contrib/libs/clapack/zgelsy.c
@@ -0,0 +1,512 @@
+/* zgelsy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__0 = 0;
+static integer c__2 = 2;
+
+/* Subroutine */ int zgelsy_(integer *m, integer *n, integer *nrhs,
+ doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb,
+ integer *jpvt, doublereal *rcond, integer *rank, doublecomplex *work,
+ integer *lwork, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1, d__2;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j;
+ doublecomplex c1, c2, s1, s2;
+ integer nb, mn, nb1, nb2, nb3, nb4;
+ doublereal anrm, bnrm, smin, smax;
+ integer iascl, ibscl, ismin, ismax;
+ doublereal wsize;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), ztrsm_(char *, char *, char *, char *
+, integer *, integer *, doublecomplex *, doublecomplex *, integer
+ *, doublecomplex *, integer *),
+ zlaic1_(integer *, integer *, doublecomplex *, doublereal *,
+ doublecomplex *, doublecomplex *, doublereal *, doublecomplex *,
+ doublecomplex *), dlabad_(doublereal *, doublereal *), zgeqp3_(
+ integer *, integer *, doublecomplex *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublereal *,
+ integer *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ doublereal bignum;
+ extern /* Subroutine */ int zlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublecomplex *,
+ integer *, integer *), zlaset_(char *, integer *,
+ integer *, doublecomplex *, doublecomplex *, doublecomplex *,
+ integer *);
+ doublereal sminpr, smaxpr, smlnum;
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int zunmqr_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *), zunmrz_(char *, char *, integer *, integer *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *
+), ztzrzf_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *, integer *)
+ ;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGELSY computes the minimum-norm solution to a complex linear least */
+/* squares problem: */
+/* minimize || A * X - B || */
+/* using a complete orthogonal factorization of A. A is an M-by-N */
+/* matrix which may be rank-deficient. */
+
+/* Several right hand side vectors b and solution vectors x can be */
+/* handled in a single call; they are stored as the columns of the */
+/* M-by-NRHS right hand side matrix B and the N-by-NRHS solution */
+/* matrix X. */
+
+/* The routine first computes a QR factorization with column pivoting: */
+/* A * P = Q * [ R11 R12 ] */
+/* [ 0 R22 ] */
+/* with R11 defined as the largest leading submatrix whose estimated */
+/* condition number is less than 1/RCOND. The order of R11, RANK, */
+/* is the effective rank of A. */
+
+/* Then, R22 is considered to be negligible, and R12 is annihilated */
+/* by unitary transformations from the right, arriving at the */
+/* complete orthogonal factorization: */
+/* A * P = Q * [ T11 0 ] * Z */
+/* [ 0 0 ] */
+/* The minimum-norm solution is then */
+/* X = P * Z' [ inv(T11)*Q1'*B ] */
+/* [ 0 ] */
+/* where Q1 consists of the first RANK columns of Q. */
+
+/* This routine is basically identical to the original xGELSX except */
+/* three differences: */
+/* o The permutation of matrix B (the right hand side) is faster and */
+/* more simple. */
+/* o The call to the subroutine xGEQPF has been substituted by the */
+/* the call to the subroutine xGEQP3. This subroutine is a Blas-3 */
+/* version of the QR factorization with column pivoting. */
+/* o Matrix B (the right hand side) is updated with Blas-3. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of */
+/* columns of matrices B and X. NRHS >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, A has been overwritten by details of its */
+/* complete orthogonal factorization. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the M-by-NRHS right hand side matrix B. */
+/* On exit, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,M,N). */
+
+/* JPVT (input/output) INTEGER array, dimension (N) */
+/* On entry, if JPVT(i) .ne. 0, the i-th column of A is permuted */
+/* to the front of AP, otherwise column i is a free column. */
+/* On exit, if JPVT(i) = k, then the i-th column of A*P */
+/* was the k-th column of A. */
+
+/* RCOND (input) DOUBLE PRECISION */
+/* RCOND is used to determine the effective rank of A, which */
+/* is defined as the order of the largest leading triangular */
+/* submatrix R11 in the QR factorization with pivoting of A, */
+/* whose estimated condition number < 1/RCOND. */
+
+/* RANK (output) INTEGER */
+/* The effective rank of A, i.e., the order of the submatrix */
+/* R11. This is the same as the order of the submatrix T11 */
+/* in the complete orthogonal factorization of A. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* The unblocked strategy requires that: */
+/* LWORK >= MN + MAX( 2*MN, N+1, MN+NRHS ) */
+/* where MN = min(M,N). */
+/* The block algorithm requires that: */
+/* LWORK >= MN + MAX( 2*MN, NB*(N+1), MN+MN*NB, MN+NB*NRHS ) */
+/* where NB is an upper bound on the blocksize returned */
+/* by ILAENV for the routines ZGEQP3, ZTZRZF, CTZRQF, ZUNMQR, */
+/* and ZUNMRZ. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+/* E. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */
+/* G. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --jpvt;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ mn = min(*m,*n);
+ ismin = mn + 1;
+ ismax = (mn << 1) + 1;
+
+/* Test the input arguments. */
+
+ *info = 0;
+ nb1 = ilaenv_(&c__1, "ZGEQRF", " ", m, n, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "ZGERQF", " ", m, n, &c_n1, &c_n1);
+ nb3 = ilaenv_(&c__1, "ZUNMQR", " ", m, n, nrhs, &c_n1);
+ nb4 = ilaenv_(&c__1, "ZUNMRQ", " ", m, n, nrhs, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2), i__1 = max(i__1,nb3);
+ nb = max(i__1,nb4);
+/* Computing MAX */
+ i__1 = 1, i__2 = mn + (*n << 1) + nb * (*n + 1), i__1 = max(i__1,i__2),
+ i__2 = (mn << 1) + nb * *nrhs;
+ lwkopt = max(i__1,i__2);
+ z__1.r = (doublereal) lwkopt, z__1.i = 0.;
+ work[1].r = z__1.r, work[1].i = z__1.i;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*m);
+ if (*ldb < max(i__1,*n)) {
+ *info = -7;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = mn << 1, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = mn +
+ *nrhs;
+ if (*lwork < mn + max(i__1,i__2) && ! lquery) {
+ *info = -12;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGELSY", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+/* Computing MIN */
+ i__1 = min(*m,*n);
+ if (min(i__1,*nrhs) == 0) {
+ *rank = 0;
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ smlnum = dlamch_("S") / dlamch_("P");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+
+/* Scale A, B if max entries outside range [SMLNUM,BIGNUM] */
+
+ anrm = zlange_("M", m, n, &a[a_offset], lda, &rwork[1]);
+ iascl = 0;
+ if (anrm > 0. && anrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ zlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ zlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ zlaset_("F", &i__1, nrhs, &c_b1, &c_b1, &b[b_offset], ldb);
+ *rank = 0;
+ goto L70;
+ }
+
+ bnrm = zlange_("M", m, nrhs, &b[b_offset], ldb, &rwork[1]);
+ ibscl = 0;
+ if (bnrm > 0. && bnrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ zlascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM */
+
+ zlascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 2;
+ }
+
+/* Compute QR factorization with column pivoting of A: */
+/* A * P = Q * R */
+
+ i__1 = *lwork - mn;
+ zgeqp3_(m, n, &a[a_offset], lda, &jpvt[1], &work[1], &work[mn + 1], &i__1,
+ &rwork[1], info);
+ i__1 = mn + 1;
+ wsize = mn + work[i__1].r;
+
+/* complex workspace: MN+NB*(N+1). real workspace 2*N. */
+/* Details of Householder rotations stored in WORK(1:MN). */
+
+/* Determine RANK using incremental condition estimation */
+
+ i__1 = ismin;
+ work[i__1].r = 1., work[i__1].i = 0.;
+ i__1 = ismax;
+ work[i__1].r = 1., work[i__1].i = 0.;
+ smax = z_abs(&a[a_dim1 + 1]);
+ smin = smax;
+ if (z_abs(&a[a_dim1 + 1]) == 0.) {
+ *rank = 0;
+ i__1 = max(*m,*n);
+ zlaset_("F", &i__1, nrhs, &c_b1, &c_b1, &b[b_offset], ldb);
+ goto L70;
+ } else {
+ *rank = 1;
+ }
+
+L10:
+ if (*rank < mn) {
+ i__ = *rank + 1;
+ zlaic1_(&c__2, rank, &work[ismin], &smin, &a[i__ * a_dim1 + 1], &a[
+ i__ + i__ * a_dim1], &sminpr, &s1, &c1);
+ zlaic1_(&c__1, rank, &work[ismax], &smax, &a[i__ * a_dim1 + 1], &a[
+ i__ + i__ * a_dim1], &smaxpr, &s2, &c2);
+
+ if (smaxpr * *rcond <= sminpr) {
+ i__1 = *rank;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ismin + i__ - 1;
+ i__3 = ismin + i__ - 1;
+ z__1.r = s1.r * work[i__3].r - s1.i * work[i__3].i, z__1.i =
+ s1.r * work[i__3].i + s1.i * work[i__3].r;
+ work[i__2].r = z__1.r, work[i__2].i = z__1.i;
+ i__2 = ismax + i__ - 1;
+ i__3 = ismax + i__ - 1;
+ z__1.r = s2.r * work[i__3].r - s2.i * work[i__3].i, z__1.i =
+ s2.r * work[i__3].i + s2.i * work[i__3].r;
+ work[i__2].r = z__1.r, work[i__2].i = z__1.i;
+/* L20: */
+ }
+ i__1 = ismin + *rank;
+ work[i__1].r = c1.r, work[i__1].i = c1.i;
+ i__1 = ismax + *rank;
+ work[i__1].r = c2.r, work[i__1].i = c2.i;
+ smin = sminpr;
+ smax = smaxpr;
+ ++(*rank);
+ goto L10;
+ }
+ }
+
+/* complex workspace: 3*MN. */
+
+/* Logically partition R = [ R11 R12 ] */
+/* [ 0 R22 ] */
+/* where R11 = R(1:RANK,1:RANK) */
+
+/* [R11,R12] = [ T11, 0 ] * Y */
+
+ if (*rank < *n) {
+ i__1 = *lwork - (mn << 1);
+ ztzrzf_(rank, n, &a[a_offset], lda, &work[mn + 1], &work[(mn << 1) +
+ 1], &i__1, info);
+ }
+
+/* complex workspace: 2*MN. */
+/* Details of Householder rotations stored in WORK(MN+1:2*MN) */
+
+/* B(1:M,1:NRHS) := Q' * B(1:M,1:NRHS) */
+
+ i__1 = *lwork - (mn << 1);
+ zunmqr_("Left", "Conjugate transpose", m, nrhs, &mn, &a[a_offset], lda, &
+ work[1], &b[b_offset], ldb, &work[(mn << 1) + 1], &i__1, info);
+/* Computing MAX */
+ i__1 = (mn << 1) + 1;
+ d__1 = wsize, d__2 = (mn << 1) + work[i__1].r;
+ wsize = max(d__1,d__2);
+
+/* complex workspace: 2*MN+NB*NRHS. */
+
+/* B(1:RANK,1:NRHS) := inv(T11) * B(1:RANK,1:NRHS) */
+
+ ztrsm_("Left", "Upper", "No transpose", "Non-unit", rank, nrhs, &c_b2, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = *rank + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0., b[i__3].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* B(1:N,1:NRHS) := Y' * B(1:N,1:NRHS) */
+
+ if (*rank < *n) {
+ i__1 = *n - *rank;
+ i__2 = *lwork - (mn << 1);
+ zunmrz_("Left", "Conjugate transpose", n, nrhs, rank, &i__1, &a[
+ a_offset], lda, &work[mn + 1], &b[b_offset], ldb, &work[(mn <<
+ 1) + 1], &i__2, info);
+ }
+
+/* complex workspace: 2*MN+NRHS. */
+
+/* B(1:N,1:NRHS) := P * B(1:N,1:NRHS) */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = jpvt[i__];
+ i__4 = i__ + j * b_dim1;
+ work[i__3].r = b[i__4].r, work[i__3].i = b[i__4].i;
+/* L50: */
+ }
+ zcopy_(n, &work[1], &c__1, &b[j * b_dim1 + 1], &c__1);
+/* L60: */
+ }
+
+/* complex workspace: N. */
+
+/* Undo scaling */
+
+ if (iascl == 1) {
+ zlascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb,
+ info);
+ zlascl_("U", &c__0, &c__0, &smlnum, &anrm, rank, rank, &a[a_offset],
+ lda, info);
+ } else if (iascl == 2) {
+ zlascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb,
+ info);
+ zlascl_("U", &c__0, &c__0, &bignum, &anrm, rank, rank, &a[a_offset],
+ lda, info);
+ }
+ if (ibscl == 1) {
+ zlascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ } else if (ibscl == 2) {
+ zlascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ }
+
+L70:
+ z__1.r = (doublereal) lwkopt, z__1.i = 0.;
+ work[1].r = z__1.r, work[1].i = z__1.i;
+
+ return 0;
+
+/* End of ZGELSY */
+
+} /* zgelsy_ */
diff --git a/contrib/libs/clapack/zgeql2.c b/contrib/libs/clapack/zgeql2.c
new file mode 100644
index 0000000000..28107f30b9
--- /dev/null
+++ b/contrib/libs/clapack/zgeql2.c
@@ -0,0 +1,167 @@
+/* zgeql2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zgeql2_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, doublecomplex *tau, doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, k;
+ doublecomplex alpha;
+ extern /* Subroutine */ int zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), xerbla_(char *, integer *), zlarfp_(integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGEQL2 computes a QL factorization of a complex m by n matrix A: */
+/* A = Q * L. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the m by n matrix A. */
+/* On exit, if m >= n, the lower triangle of the subarray */
+/* A(m-n+1:m,1:n) contains the n by n lower triangular matrix L; */
+/* if m <= n, the elements on and below the (n-m)-th */
+/* superdiagonal contain the m by n lower trapezoidal matrix L; */
+/* the remaining elements, with the array TAU, represent the */
+/* unitary matrix Q as a product of elementary reflectors */
+/* (see Further Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) COMPLEX*16 array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(k) . . . H(2) H(1), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(m-k+i+1:m) = 0 and v(m-k+i) = 1; v(1:m-k+i-1) is stored on exit in */
+/* A(1:m-k+i-1,n-k+i), and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEQL2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ for (i__ = k; i__ >= 1; --i__) {
+
+/* Generate elementary reflector H(i) to annihilate */
+/* A(1:m-k+i-1,n-k+i) */
+
+ i__1 = *m - k + i__ + (*n - k + i__) * a_dim1;
+ alpha.r = a[i__1].r, alpha.i = a[i__1].i;
+ i__1 = *m - k + i__;
+ zlarfp_(&i__1, &alpha, &a[(*n - k + i__) * a_dim1 + 1], &c__1, &tau[
+ i__]);
+
+/* Apply H(i)' to A(1:m-k+i,1:n-k+i-1) from the left */
+
+ i__1 = *m - k + i__ + (*n - k + i__) * a_dim1;
+ a[i__1].r = 1., a[i__1].i = 0.;
+ i__1 = *m - k + i__;
+ i__2 = *n - k + i__ - 1;
+ d_cnjg(&z__1, &tau[i__]);
+ zlarf_("Left", &i__1, &i__2, &a[(*n - k + i__) * a_dim1 + 1], &c__1, &
+ z__1, &a[a_offset], lda, &work[1]);
+ i__1 = *m - k + i__ + (*n - k + i__) * a_dim1;
+ a[i__1].r = alpha.r, a[i__1].i = alpha.i;
+/* L10: */
+ }
+ return 0;
+
+/* End of ZGEQL2 */
+
+} /* zgeql2_ */
diff --git a/contrib/libs/clapack/zgeqlf.c b/contrib/libs/clapack/zgeqlf.c
new file mode 100644
index 0000000000..749283a280
--- /dev/null
+++ b/contrib/libs/clapack/zgeqlf.c
@@ -0,0 +1,276 @@
+/* zgeqlf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int zgeqlf_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, doublecomplex *tau, doublecomplex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, k, ib, nb, ki, kk, mu, nu, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int zgeql2_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *), xerbla_(
+ char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ integer ldwork;
+ extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGEQLF computes a QL factorization of a complex M-by-N matrix A: */
+/* A = Q * L. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, */
+/* if m >= n, the lower triangle of the subarray */
+/* A(m-n+1:m,1:n) contains the N-by-N lower triangular matrix L; */
+/* if m <= n, the elements on and below the (n-m)-th */
+/* superdiagonal contain the M-by-N lower trapezoidal matrix L; */
+/* the remaining elements, with the array TAU, represent the */
+/* unitary matrix Q as a product of elementary reflectors */
+/* (see Further Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) COMPLEX*16 array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+/* For optimum performance LWORK >= N*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(k) . . . H(2) H(1), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(m-k+i+1:m) = 0 and v(m-k+i) = 1; v(1:m-k+i-1) is stored on exit in */
+/* A(1:m-k+i-1,n-k+i), and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+
+ if (*info == 0) {
+ k = min(*m,*n);
+ if (k == 0) {
+ lwkopt = 1;
+ } else {
+ nb = ilaenv_(&c__1, "ZGEQLF", " ", m, n, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ if (*lwork < max(1,*n) && ! lquery) {
+ *info = -7;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEQLF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (k == 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 1;
+ iws = *n;
+ if (nb > 1 && nb < k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "ZGEQLF", " ", m, n, &c_n1, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZGEQLF", " ", m, n, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < k && nx < k) {
+
+/* Use blocked code initially. */
+/* The last kk columns are handled by the block method. */
+
+ ki = (k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+ i__1 = k - kk + 1;
+ i__2 = -nb;
+ for (i__ = k - kk + ki + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__
+ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/* Compute the QL factorization of the current block */
+/* A(1:m-k+i+ib-1,n-k+i:n-k+i+ib-1) */
+
+ i__3 = *m - k + i__ + ib - 1;
+ zgeql2_(&i__3, &ib, &a[(*n - k + i__) * a_dim1 + 1], lda, &tau[
+ i__], &work[1], &iinfo);
+ if (*n - k + i__ > 1) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__3 = *m - k + i__ + ib - 1;
+ zlarft_("Backward", "Columnwise", &i__3, &ib, &a[(*n - k +
+ i__) * a_dim1 + 1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(1:m-k+i+ib-1,1:n-k+i-1) from the left */
+
+ i__3 = *m - k + i__ + ib - 1;
+ i__4 = *n - k + i__ - 1;
+ zlarfb_("Left", "Conjugate transpose", "Backward", "Columnwi"
+ "se", &i__3, &i__4, &ib, &a[(*n - k + i__) * a_dim1 +
+ 1], lda, &work[1], &ldwork, &a[a_offset], lda, &work[
+ ib + 1], &ldwork);
+ }
+/* L10: */
+ }
+ mu = *m - k + i__ + nb - 1;
+ nu = *n - k + i__ + nb - 1;
+ } else {
+ mu = *m;
+ nu = *n;
+ }
+
+/* Use unblocked code to factor the last or only block */
+
+ if (mu > 0 && nu > 0) {
+ zgeql2_(&mu, &nu, &a[a_offset], lda, &tau[1], &work[1], &iinfo);
+ }
+
+ work[1].r = (doublereal) iws, work[1].i = 0.;
+ return 0;
+
+/* End of ZGEQLF */
+
+} /* zgeqlf_ */
diff --git a/contrib/libs/clapack/zgeqp3.c b/contrib/libs/clapack/zgeqp3.c
new file mode 100644
index 0000000000..9e95d757ac
--- /dev/null
+++ b/contrib/libs/clapack/zgeqp3.c
@@ -0,0 +1,361 @@
+/* zgeqp3.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int zgeqp3_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, integer *jpvt, doublecomplex *tau, doublecomplex *work,
+ integer *lwork, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer j, jb, na, nb, sm, sn, nx, fjb, iws, nfxd, nbmin, minmn, minws;
+ extern /* Subroutine */ int zswap_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zlaqp2_(integer *, integer *,
+ integer *, doublecomplex *, integer *, integer *, doublecomplex *,
+ doublereal *, doublereal *, doublecomplex *);
+ extern doublereal dznrm2_(integer *, doublecomplex *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zgeqrf_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *, integer *
+);
+ integer topbmn, sminmn;
+ extern /* Subroutine */ int zlaqps_(integer *, integer *, integer *,
+ integer *, integer *, doublecomplex *, integer *, integer *,
+ doublecomplex *, doublereal *, doublereal *, doublecomplex *,
+ doublecomplex *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int zunmqr_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGEQP3 computes a QR factorization with column pivoting of a */
+/* matrix A: A*P = Q*R using Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the upper triangle of the array contains the */
+/* min(M,N)-by-N upper trapezoidal matrix R; the elements below */
+/* the diagonal, together with the array TAU, represent the */
+/* unitary matrix Q as a product of min(M,N) elementary */
+/* reflectors. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* JPVT (input/output) INTEGER array, dimension (N) */
+/* On entry, if JPVT(J).ne.0, the J-th column of A is permuted */
+/* to the front of A*P (a leading column); if JPVT(J)=0, */
+/* the J-th column of A is a free column. */
+/* On exit, if JPVT(J)=K, then the J-th column of A*P was the */
+/* the K-th column of A. */
+
+/* TAU (output) COMPLEX*16 array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO=0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= N+1. */
+/* For optimal performance LWORK >= ( N+1 )*NB, where NB */
+/* is the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a real/complex scalar, and v is a real/complex vector */
+/* with v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in */
+/* A(i+1:m,i), and tau in TAU(i). */
+
+/* Based on contributions by */
+/* G. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */
+/* X. Sun, Computer Science Dept., Duke University, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test input arguments */
+/* ==================== */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --jpvt;
+ --tau;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+
+ if (*info == 0) {
+ minmn = min(*m,*n);
+ if (minmn == 0) {
+ iws = 1;
+ lwkopt = 1;
+ } else {
+ iws = *n + 1;
+ nb = ilaenv_(&c__1, "ZGEQRF", " ", m, n, &c_n1, &c_n1);
+ lwkopt = (*n + 1) * nb;
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ if (*lwork < iws && ! lquery) {
+ *info = -8;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEQP3", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (minmn == 0) {
+ return 0;
+ }
+
+/* Move initial columns up front. */
+
+ nfxd = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (jpvt[j] != 0) {
+ if (j != nfxd) {
+ zswap_(m, &a[j * a_dim1 + 1], &c__1, &a[nfxd * a_dim1 + 1], &
+ c__1);
+ jpvt[j] = jpvt[nfxd];
+ jpvt[nfxd] = j;
+ } else {
+ jpvt[j] = j;
+ }
+ ++nfxd;
+ } else {
+ jpvt[j] = j;
+ }
+/* L10: */
+ }
+ --nfxd;
+
+/* Factorize fixed columns */
+/* ======================= */
+
+/* Compute the QR factorization of fixed columns and update */
+/* remaining columns. */
+
+ if (nfxd > 0) {
+ na = min(*m,nfxd);
+/* CC CALL ZGEQR2( M, NA, A, LDA, TAU, WORK, INFO ) */
+ zgeqrf_(m, &na, &a[a_offset], lda, &tau[1], &work[1], lwork, info);
+/* Computing MAX */
+ i__1 = iws, i__2 = (integer) work[1].r;
+ iws = max(i__1,i__2);
+ if (na < *n) {
+/* CC CALL ZUNM2R( 'Left', 'Conjugate Transpose', M, N-NA, */
+/* CC $ NA, A, LDA, TAU, A( 1, NA+1 ), LDA, WORK, */
+/* CC $ INFO ) */
+ i__1 = *n - na;
+ zunmqr_("Left", "Conjugate Transpose", m, &i__1, &na, &a[a_offset]
+, lda, &tau[1], &a[(na + 1) * a_dim1 + 1], lda, &work[1],
+ lwork, info);
+/* Computing MAX */
+ i__1 = iws, i__2 = (integer) work[1].r;
+ iws = max(i__1,i__2);
+ }
+ }
+
+/* Factorize free columns */
+/* ====================== */
+
+ if (nfxd < minmn) {
+
+ sm = *m - nfxd;
+ sn = *n - nfxd;
+ sminmn = minmn - nfxd;
+
+/* Determine the block size. */
+
+ nb = ilaenv_(&c__1, "ZGEQRF", " ", &sm, &sn, &c_n1, &c_n1);
+ nbmin = 2;
+ nx = 0;
+
+ if (nb > 1 && nb < sminmn) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "ZGEQRF", " ", &sm, &sn, &c_n1, &
+ c_n1);
+ nx = max(i__1,i__2);
+
+
+ if (nx < sminmn) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ minws = (sn + 1) * nb;
+ iws = max(iws,minws);
+ if (*lwork < minws) {
+
+/* Not enough workspace to use optimal NB: Reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / (sn + 1);
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZGEQRF", " ", &sm, &sn, &
+ c_n1, &c_n1);
+ nbmin = max(i__1,i__2);
+
+
+ }
+ }
+ }
+
+/* Initialize partial column norms. The first N elements of work */
+/* store the exact column norms. */
+
+ i__1 = *n;
+ for (j = nfxd + 1; j <= i__1; ++j) {
+ rwork[j] = dznrm2_(&sm, &a[nfxd + 1 + j * a_dim1], &c__1);
+ rwork[*n + j] = rwork[j];
+/* L20: */
+ }
+
+ if (nb >= nbmin && nb < sminmn && nx < sminmn) {
+
+/* Use blocked code initially. */
+
+ j = nfxd + 1;
+
+/* Compute factorization: while loop. */
+
+
+ topbmn = minmn - nx;
+L30:
+ if (j <= topbmn) {
+/* Computing MIN */
+ i__1 = nb, i__2 = topbmn - j + 1;
+ jb = min(i__1,i__2);
+
+/* Factorize JB columns among columns J:N. */
+
+ i__1 = *n - j + 1;
+ i__2 = j - 1;
+ i__3 = *n - j + 1;
+ zlaqps_(m, &i__1, &i__2, &jb, &fjb, &a[j * a_dim1 + 1], lda, &
+ jpvt[j], &tau[j], &rwork[j], &rwork[*n + j], &work[1],
+ &work[jb + 1], &i__3);
+
+ j += fjb;
+ goto L30;
+ }
+ } else {
+ j = nfxd + 1;
+ }
+
+/* Use unblocked code to factor the last or only block. */
+
+
+ if (j <= minmn) {
+ i__1 = *n - j + 1;
+ i__2 = j - 1;
+ zlaqp2_(m, &i__1, &i__2, &a[j * a_dim1 + 1], lda, &jpvt[j], &tau[
+ j], &rwork[j], &rwork[*n + j], &work[1]);
+ }
+
+ }
+
+ work[1].r = (doublereal) iws, work[1].i = 0.;
+ return 0;
+
+/* End of ZGEQP3 */
+
+} /* zgeqp3_ */
diff --git a/contrib/libs/clapack/zgeqpf.c b/contrib/libs/clapack/zgeqpf.c
new file mode 100644
index 0000000000..725c83fc36
--- /dev/null
+++ b/contrib/libs/clapack/zgeqpf.c
@@ -0,0 +1,316 @@
+/* zgeqpf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zgeqpf_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, integer *jpvt, doublecomplex *tau, doublecomplex *work,
+ doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1, d__2;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+ double z_abs(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, ma, mn;
+ doublecomplex aii;
+ integer pvt;
+ doublereal temp, temp2, tol3z;
+ integer itemp;
+ extern /* Subroutine */ int zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), zswap_(integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *), zgeqr2_(
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *);
+ extern doublereal dznrm2_(integer *, doublecomplex *, integer *), dlamch_(
+ char *);
+ extern /* Subroutine */ int zunm2r_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), zlarfp_(
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *);
+
+
+/* -- LAPACK deprecated driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is deprecated and has been replaced by routine ZGEQP3. */
+
+/* ZGEQPF computes a QR factorization with column pivoting of a */
+/* complex M-by-N matrix A: A*P = Q*R. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0 */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the upper triangle of the array contains the */
+/* min(M,N)-by-N upper triangular matrix R; the elements */
+/* below the diagonal, together with the array TAU, */
+/* represent the unitary matrix Q as a product of */
+/* min(m,n) elementary reflectors. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* JPVT (input/output) INTEGER array, dimension (N) */
+/* On entry, if JPVT(i) .ne. 0, the i-th column of A is permuted */
+/* to the front of A*P (a leading column); if JPVT(i) = 0, */
+/* the i-th column of A is a free column. */
+/* On exit, if JPVT(i) = k, then the i-th column of A*P */
+/* was the k-th column of A. */
+
+/* TAU (output) COMPLEX*16 array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(n) */
+
+/* Each H(i) has the form */
+
+/* H = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i). */
+
+/* The matrix P is represented in jpvt as follows: If */
+/* jpvt(j) = i */
+/* then the jth column of P is the ith canonical unit vector. */
+
+/* Partial column norm updating strategy modified by */
+/* Z. Drmac and Z. Bujanovic, Dept. of Mathematics, */
+/* University of Zagreb, Croatia. */
+/* June 2006. */
+/* For more details see LAPACK Working Note 176. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --jpvt;
+ --tau;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEQPF", &i__1);
+ return 0;
+ }
+
+ mn = min(*m,*n);
+ tol3z = sqrt(dlamch_("Epsilon"));
+
+/* Move initial columns up front */
+
+ itemp = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (jpvt[i__] != 0) {
+ if (i__ != itemp) {
+ zswap_(m, &a[i__ * a_dim1 + 1], &c__1, &a[itemp * a_dim1 + 1],
+ &c__1);
+ jpvt[i__] = jpvt[itemp];
+ jpvt[itemp] = i__;
+ } else {
+ jpvt[i__] = i__;
+ }
+ ++itemp;
+ } else {
+ jpvt[i__] = i__;
+ }
+/* L10: */
+ }
+ --itemp;
+
+/* Compute the QR factorization and update remaining columns */
+
+ if (itemp > 0) {
+ ma = min(itemp,*m);
+ zgeqr2_(m, &ma, &a[a_offset], lda, &tau[1], &work[1], info);
+ if (ma < *n) {
+ i__1 = *n - ma;
+ zunm2r_("Left", "Conjugate transpose", m, &i__1, &ma, &a[a_offset]
+, lda, &tau[1], &a[(ma + 1) * a_dim1 + 1], lda, &work[1],
+ info);
+ }
+ }
+
+ if (itemp < mn) {
+
+/* Initialize partial column norms. The first n elements of */
+/* work store the exact column norms. */
+
+ i__1 = *n;
+ for (i__ = itemp + 1; i__ <= i__1; ++i__) {
+ i__2 = *m - itemp;
+ rwork[i__] = dznrm2_(&i__2, &a[itemp + 1 + i__ * a_dim1], &c__1);
+ rwork[*n + i__] = rwork[i__];
+/* L20: */
+ }
+
+/* Compute factorization */
+
+ i__1 = mn;
+ for (i__ = itemp + 1; i__ <= i__1; ++i__) {
+
+/* Determine ith pivot column and swap if necessary */
+
+ i__2 = *n - i__ + 1;
+ pvt = i__ - 1 + idamax_(&i__2, &rwork[i__], &c__1);
+
+ if (pvt != i__) {
+ zswap_(m, &a[pvt * a_dim1 + 1], &c__1, &a[i__ * a_dim1 + 1], &
+ c__1);
+ itemp = jpvt[pvt];
+ jpvt[pvt] = jpvt[i__];
+ jpvt[i__] = itemp;
+ rwork[pvt] = rwork[i__];
+ rwork[*n + pvt] = rwork[*n + i__];
+ }
+
+/* Generate elementary reflector H(i) */
+
+ i__2 = i__ + i__ * a_dim1;
+ aii.r = a[i__2].r, aii.i = a[i__2].i;
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ zlarfp_(&i__2, &aii, &a[min(i__3, *m)+ i__ * a_dim1], &c__1, &tau[
+ i__]);
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = aii.r, a[i__2].i = aii.i;
+
+ if (i__ < *n) {
+
+/* Apply H(i) to A(i:m,i+1:n) from the left */
+
+ i__2 = i__ + i__ * a_dim1;
+ aii.r = a[i__2].r, aii.i = a[i__2].i;
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ d_cnjg(&z__1, &tau[i__]);
+ zlarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &
+ z__1, &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = aii.r, a[i__2].i = aii.i;
+ }
+
+/* Update partial column norms */
+
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ if (rwork[j] != 0.) {
+
+/* NOTE: The following 4 lines follow from the analysis in */
+/* Lapack Working Note 176. */
+
+ temp = z_abs(&a[i__ + j * a_dim1]) / rwork[j];
+/* Computing MAX */
+ d__1 = 0., d__2 = (temp + 1.) * (1. - temp);
+ temp = max(d__1,d__2);
+/* Computing 2nd power */
+ d__1 = rwork[j] / rwork[*n + j];
+ temp2 = temp * (d__1 * d__1);
+ if (temp2 <= tol3z) {
+ if (*m - i__ > 0) {
+ i__3 = *m - i__;
+ rwork[j] = dznrm2_(&i__3, &a[i__ + 1 + j * a_dim1]
+, &c__1);
+ rwork[*n + j] = rwork[j];
+ } else {
+ rwork[j] = 0.;
+ rwork[*n + j] = 0.;
+ }
+ } else {
+ rwork[j] *= sqrt(temp);
+ }
+ }
+/* L30: */
+ }
+
+/* L40: */
+ }
+ }
+ return 0;
+
+/* End of ZGEQPF */
+
+} /* zgeqpf_ */
diff --git a/contrib/libs/clapack/zgeqr2.c b/contrib/libs/clapack/zgeqr2.c
new file mode 100644
index 0000000000..4bc74a23f7
--- /dev/null
+++ b/contrib/libs/clapack/zgeqr2.c
@@ -0,0 +1,169 @@
+/* zgeqr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zgeqr2_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, doublecomplex *tau, doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, k;
+ doublecomplex alpha;
+ extern /* Subroutine */ int zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), xerbla_(char *, integer *), zlarfp_(integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGEQR2 computes a QR factorization of a complex m by n matrix A: */
+/* A = Q * R. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the m by n matrix A. */
+/* On exit, the elements on and above the diagonal of the array */
+/* contain the min(m,n) by n upper trapezoidal matrix R (R is */
+/* upper triangular if m >= n); the elements below the diagonal, */
+/* with the array TAU, represent the unitary matrix Q as a */
+/* product of elementary reflectors (see Further Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) COMPLEX*16 array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), */
+/* and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEQR2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ i__1 = k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
+
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ zlarfp_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3, *m)+ i__ * a_dim1]
+, &c__1, &tau[i__]);
+ if (i__ < *n) {
+
+/* Apply H(i)' to A(i:m,i+1:n) from the left */
+
+ i__2 = i__ + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ d_cnjg(&z__1, &tau[i__]);
+ zlarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &z__1,
+ &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = alpha.r, a[i__2].i = alpha.i;
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of ZGEQR2 */
+
+} /* zgeqr2_ */
diff --git a/contrib/libs/clapack/zgeqrf.c b/contrib/libs/clapack/zgeqrf.c
new file mode 100644
index 0000000000..a26e355f20
--- /dev/null
+++ b/contrib/libs/clapack/zgeqrf.c
@@ -0,0 +1,258 @@
+/* zgeqrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int zgeqrf_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, doublecomplex *tau, doublecomplex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, k, ib, nb, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int zgeqr2_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *), xerbla_(
+ char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ integer ldwork;
+ extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGEQRF computes a QR factorization of a complex M-by-N matrix A: */
+/* A = Q * R. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the elements on and above the diagonal of the array */
+/* contain the min(M,N)-by-N upper trapezoidal matrix R (R is */
+/* upper triangular if m >= n); the elements below the diagonal, */
+/* with the array TAU, represent the unitary matrix Q as a */
+/* product of min(m,n) elementary reflectors (see Further */
+/* Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) COMPLEX*16 array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+/* For optimum performance LWORK >= N*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), */
+/* and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "ZGEQRF", " ", m, n, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGEQRF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ k = min(*m,*n);
+ if (k == 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *n;
+ if (nb > 1 && nb < k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "ZGEQRF", " ", m, n, &c_n1, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZGEQRF", " ", m, n, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < k && nx < k) {
+
+/* Use blocked code initially */
+
+ i__1 = k - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/* Compute the QR factorization of the current block */
+/* A(i:m,i:i+ib-1) */
+
+ i__3 = *m - i__ + 1;
+ zgeqr2_(&i__3, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[
+ 1], &iinfo);
+ if (i__ + ib <= *n) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i) H(i+1) . . . H(i+ib-1) */
+
+ i__3 = *m - i__ + 1;
+ zlarft_("Forward", "Columnwise", &i__3, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(i:m,i+ib:n) from the left */
+
+ i__3 = *m - i__ + 1;
+ i__4 = *n - i__ - ib + 1;
+ zlarfb_("Left", "Conjugate transpose", "Forward", "Columnwise"
+, &i__3, &i__4, &ib, &a[i__ + i__ * a_dim1], lda, &
+ work[1], &ldwork, &a[i__ + (i__ + ib) * a_dim1], lda,
+ &work[ib + 1], &ldwork);
+ }
+/* L10: */
+ }
+ } else {
+ i__ = 1;
+ }
+
+/* Use unblocked code to factor the last or only block. */
+
+ if (i__ <= k) {
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ zgeqr2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1]
+, &iinfo);
+ }
+
+ work[1].r = (doublereal) iws, work[1].i = 0.;
+ return 0;
+
+/* End of ZGEQRF */
+
+} /* zgeqrf_ */
diff --git a/contrib/libs/clapack/zgerfs.c b/contrib/libs/clapack/zgerfs.c
new file mode 100644
index 0000000000..4fb472e0b1
--- /dev/null
+++ b/contrib/libs/clapack/zgerfs.c
@@ -0,0 +1,461 @@
+/* zgerfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zgerfs_(char *trans, integer *n, integer *nrhs,
+ doublecomplex *a, integer *lda, doublecomplex *af, integer *ldaf,
+ integer *ipiv, doublecomplex *b, integer *ldb, doublecomplex *x,
+ integer *ldx, doublereal *ferr, doublereal *berr, doublecomplex *work,
+ doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal s, xk;
+ integer nz;
+ doublereal eps;
+ integer kase;
+ doublereal safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3], count;
+ extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *),
+ zcopy_(integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *), zaxpy_(integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *, integer *), zlacn2_(integer *,
+ doublecomplex *, doublecomplex *, doublereal *, integer *,
+ integer *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+ char transn[1], transt[1];
+ doublereal lstres;
+ extern /* Subroutine */ int zgetrs_(char *, integer *, integer *,
+ doublecomplex *, integer *, integer *, doublecomplex *, integer *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGERFS improves the computed solution to a system of linear */
+/* equations and provides error bounds and backward error estimates for */
+/* the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The original N-by-N matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input) COMPLEX*16 array, dimension (LDAF,N) */
+/* The factors L and U from the factorization A = P*L*U */
+/* as computed by ZGETRF. */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices from ZGETRF; for 1<=i<=N, row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by ZGETRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "T") && ! lsame_(
+ trans, "C")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldx < max(1,*n)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGERFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+ if (notran) {
+ *(unsigned char *)transn = 'N';
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transn = 'C';
+ *(unsigned char *)transt = 'N';
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - op(A) * X, */
+/* where op(A) = A, A**T, or A**H, depending on TRANS. */
+
+ zcopy_(n, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_(trans, n, n, &z__1, &a[a_offset], lda, &x[j * x_dim1 + 1], &
+ c__1, &c_b1, &work[1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[i__] = (d__1 = b[i__3].r, abs(d__1)) + (d__2 = d_imag(&b[
+ i__ + j * b_dim1]), abs(d__2));
+/* L30: */
+ }
+
+/* Compute abs(op(A))*abs(X) + abs(B). */
+
+ if (notran) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[k + j *
+ x_dim1]), abs(d__2));
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ rwork[i__] += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + k * a_dim1]), abs(d__2))) * xk;
+/* L40: */
+ }
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ i__5 = i__ + j * x_dim1;
+ s += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 = d_imag(&a[
+ i__ + k * a_dim1]), abs(d__2))) * ((d__3 = x[i__5]
+ .r, abs(d__3)) + (d__4 = d_imag(&x[i__ + j *
+ x_dim1]), abs(d__4)));
+/* L60: */
+ }
+ rwork[k] += s;
+/* L70: */
+ }
+ }
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__3 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2))) / rwork[i__];
+ s = max(d__3,d__4);
+ } else {
+/* Computing MAX */
+ i__3 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = max(d__3,d__4);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2. <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ zgetrs_(trans, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &work[1],
+ n, info);
+ zaxpy_(n, &c_b1, &work[1], &c__1, &x[j * x_dim1 + 1], &c__1);
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(op(A)))* */
+/* ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(op(A)) is the inverse of op(A) */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(op(A))*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use ZLACN2 to estimate the infinity-norm of the matrix */
+/* inv(op(A)) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__3 = i__;
+ rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ ;
+ } else {
+ i__3 = i__;
+ rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ zlacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(op(A)**H). */
+
+ zgetrs_(transt, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &
+ work[1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ z__1.r = rwork[i__4] * work[i__5].r, z__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+/* L110: */
+ }
+ } else {
+
+/* Multiply by inv(op(A))*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ z__1.r = rwork[i__4] * work[i__5].r, z__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+/* L120: */
+ }
+ zgetrs_(transn, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &
+ work[1], n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * x_dim1;
+ d__3 = lstres, d__4 = (d__1 = x[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&x[i__ + j * x_dim1]), abs(d__2));
+ lstres = max(d__3,d__4);
+/* L130: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of ZGERFS */
+
+} /* zgerfs_ */
diff --git a/contrib/libs/clapack/zgerq2.c b/contrib/libs/clapack/zgerq2.c
new file mode 100644
index 0000000000..a91d0cbe93
--- /dev/null
+++ b/contrib/libs/clapack/zgerq2.c
@@ -0,0 +1,162 @@
+/* zgerq2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zgerq2_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, doublecomplex *tau, doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, k;
+ doublecomplex alpha;
+ extern /* Subroutine */ int zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), xerbla_(char *, integer *), zlacgv_(integer *, doublecomplex *, integer *), zlarfp_(
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGERQ2 computes an RQ factorization of a complex m by n matrix A: */
+/* A = R * Q. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the m by n matrix A. */
+/* On exit, if m <= n, the upper triangle of the subarray */
+/* A(1:m,n-m+1:n) contains the m by m upper triangular matrix R; */
+/* if m >= n, the elements on and above the (m-n)-th subdiagonal */
+/* contain the m by n upper trapezoidal matrix R; the remaining */
+/* elements, with the array TAU, represent the unitary matrix */
+/* Q as a product of elementary reflectors (see Further */
+/* Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) COMPLEX*16 array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (M) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1)' H(2)' . . . H(k)', where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(n-k+i+1:n) = 0 and v(n-k+i) = 1; conjg(v(1:n-k+i-1)) is stored on */
+/* exit in A(m-k+i,1:n-k+i-1), and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGERQ2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ for (i__ = k; i__ >= 1; --i__) {
+
+/* Generate elementary reflector H(i) to annihilate */
+/* A(m-k+i,1:n-k+i-1) */
+
+ i__1 = *n - k + i__;
+ zlacgv_(&i__1, &a[*m - k + i__ + a_dim1], lda);
+ i__1 = *m - k + i__ + (*n - k + i__) * a_dim1;
+ alpha.r = a[i__1].r, alpha.i = a[i__1].i;
+ i__1 = *n - k + i__;
+ zlarfp_(&i__1, &alpha, &a[*m - k + i__ + a_dim1], lda, &tau[i__]);
+
+/* Apply H(i) to A(1:m-k+i-1,1:n-k+i) from the right */
+
+ i__1 = *m - k + i__ + (*n - k + i__) * a_dim1;
+ a[i__1].r = 1., a[i__1].i = 0.;
+ i__1 = *m - k + i__ - 1;
+ i__2 = *n - k + i__;
+ zlarf_("Right", &i__1, &i__2, &a[*m - k + i__ + a_dim1], lda, &tau[
+ i__], &a[a_offset], lda, &work[1]);
+ i__1 = *m - k + i__ + (*n - k + i__) * a_dim1;
+ a[i__1].r = alpha.r, a[i__1].i = alpha.i;
+ i__1 = *n - k + i__ - 1;
+ zlacgv_(&i__1, &a[*m - k + i__ + a_dim1], lda);
+/* L10: */
+ }
+ return 0;
+
+/* End of ZGERQ2 */
+
+} /* zgerq2_ */
diff --git a/contrib/libs/clapack/zgerqf.c b/contrib/libs/clapack/zgerqf.c
new file mode 100644
index 0000000000..9510b77586
--- /dev/null
+++ b/contrib/libs/clapack/zgerqf.c
@@ -0,0 +1,275 @@
+/* zgerqf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int zgerqf_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, doublecomplex *tau, doublecomplex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, k, ib, nb, ki, kk, mu, nu, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int zgerq2_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *), xerbla_(
+ char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ integer ldwork;
+ extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGERQF computes an RQ factorization of a complex M-by-N matrix A: */
+/* A = R * Q. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, */
+/* if m <= n, the upper triangle of the subarray */
+/* A(1:m,n-m+1:n) contains the M-by-M upper triangular matrix R; */
+/* if m >= n, the elements on and above the (m-n)-th subdiagonal */
+/* contain the M-by-N upper trapezoidal matrix R; */
+/* the remaining elements, with the array TAU, represent the */
+/* unitary matrix Q as a product of min(m,n) elementary */
+/* reflectors (see Further Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) COMPLEX*16 array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,M). */
+/* For optimum performance LWORK >= M*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1)' H(2)' . . . H(k)', where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(n-k+i+1:n) = 0 and v(n-k+i) = 1; conjg(v(1:n-k+i-1)) is stored on */
+/* exit in A(m-k+i,1:n-k+i-1), and tau in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+
+ if (*info == 0) {
+ k = min(*m,*n);
+ if (k == 0) {
+ lwkopt = 1;
+ } else {
+ nb = ilaenv_(&c__1, "ZGERQF", " ", m, n, &c_n1, &c_n1);
+ lwkopt = *m * nb;
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ if (*lwork < max(1,*m) && ! lquery) {
+ *info = -7;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGERQF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (k == 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 1;
+ iws = *m;
+ if (nb > 1 && nb < k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "ZGERQF", " ", m, n, &c_n1, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZGERQF", " ", m, n, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < k && nx < k) {
+
+/* Use blocked code initially. */
+/* The last kk rows are handled by the block method. */
+
+ ki = (k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+ i__1 = k - kk + 1;
+ i__2 = -nb;
+ for (i__ = k - kk + ki + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__
+ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/* Compute the RQ factorization of the current block */
+/* A(m-k+i:m-k+i+ib-1,1:n-k+i+ib-1) */
+
+ i__3 = *n - k + i__ + ib - 1;
+ zgerq2_(&ib, &i__3, &a[*m - k + i__ + a_dim1], lda, &tau[i__], &
+ work[1], &iinfo);
+ if (*m - k + i__ > 1) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__3 = *n - k + i__ + ib - 1;
+ zlarft_("Backward", "Rowwise", &i__3, &ib, &a[*m - k + i__ +
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(1:m-k+i-1,1:n-k+i+ib-1) from the right */
+
+ i__3 = *m - k + i__ - 1;
+ i__4 = *n - k + i__ + ib - 1;
+ zlarfb_("Right", "No transpose", "Backward", "Rowwise", &i__3,
+ &i__4, &ib, &a[*m - k + i__ + a_dim1], lda, &work[1],
+ &ldwork, &a[a_offset], lda, &work[ib + 1], &ldwork);
+ }
+/* L10: */
+ }
+ mu = *m - k + i__ + nb - 1;
+ nu = *n - k + i__ + nb - 1;
+ } else {
+ mu = *m;
+ nu = *n;
+ }
+
+/* Use unblocked code to factor the last or only block */
+
+ if (mu > 0 && nu > 0) {
+ zgerq2_(&mu, &nu, &a[a_offset], lda, &tau[1], &work[1], &iinfo);
+ }
+
+ work[1].r = (doublereal) iws, work[1].i = 0.;
+ return 0;
+
+/* End of ZGERQF */
+
+} /* zgerqf_ */
diff --git a/contrib/libs/clapack/zgesc2.c b/contrib/libs/clapack/zgesc2.c
new file mode 100644
index 0000000000..90f667ce90
--- /dev/null
+++ b/contrib/libs/clapack/zgesc2.c
@@ -0,0 +1,206 @@
+/* zgesc2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublecomplex c_b13 = {1.,0.};
+static integer c_n1 = -1;
+
+/* Subroutine */ int zgesc2_(integer *n, doublecomplex *a, integer *lda,
+ doublecomplex *rhs, integer *ipiv, integer *jpiv, doublereal *scale)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *);
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal eps;
+ doublecomplex temp;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *), dlabad_(doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ doublereal bignum;
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ doublereal smlnum;
+ extern /* Subroutine */ int zlaswp_(integer *, doublecomplex *, integer *,
+ integer *, integer *, integer *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGESC2 solves a system of linear equations */
+
+/* A * X = scale* RHS */
+
+/* with a general N-by-N matrix A using the LU factorization with */
+/* complete pivoting computed by ZGETC2. */
+
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA, N) */
+/* On entry, the LU part of the factorization of the n-by-n */
+/* matrix A computed by ZGETC2: A = P * L * U * Q */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1, N). */
+
+/* RHS (input/output) COMPLEX*16 array, dimension N. */
+/* On entry, the right hand side vector b. */
+/* On exit, the solution vector X. */
+
+/* IPIV (input) INTEGER array, dimension (N). */
+/* The pivot indices; for 1 <= i <= N, row i of the */
+/* matrix has been interchanged with row IPIV(i). */
+
+/* JPIV (input) INTEGER array, dimension (N). */
+/* The pivot indices; for 1 <= j <= N, column j of the */
+/* matrix has been interchanged with column JPIV(j). */
+
+/* SCALE (output) DOUBLE PRECISION */
+/* On exit, SCALE contains the scale factor. SCALE is chosen */
+/* 0 <= SCALE <= 1 to prevent owerflow in the solution. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Set constant to control overflow */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --rhs;
+ --ipiv;
+ --jpiv;
+
+ /* Function Body */
+ eps = dlamch_("P");
+ smlnum = dlamch_("S") / eps;
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+
+/* Apply permutations IPIV to RHS */
+
+ i__1 = *n - 1;
+ zlaswp_(&c__1, &rhs[1], lda, &c__1, &i__1, &ipiv[1], &c__1);
+
+/* Solve for L part */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ i__3 = j;
+ i__4 = j;
+ i__5 = j + i__ * a_dim1;
+ i__6 = i__;
+ z__2.r = a[i__5].r * rhs[i__6].r - a[i__5].i * rhs[i__6].i,
+ z__2.i = a[i__5].r * rhs[i__6].i + a[i__5].i * rhs[i__6]
+ .r;
+ z__1.r = rhs[i__4].r - z__2.r, z__1.i = rhs[i__4].i - z__2.i;
+ rhs[i__3].r = z__1.r, rhs[i__3].i = z__1.i;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* Solve for U part */
+
+ *scale = 1.;
+
+/* Check for scaling */
+
+ i__ = izamax_(n, &rhs[1], &c__1);
+ if (smlnum * 2. * z_abs(&rhs[i__]) > z_abs(&a[*n + *n * a_dim1])) {
+ d__1 = z_abs(&rhs[i__]);
+ z__1.r = .5 / d__1, z__1.i = 0. / d__1;
+ temp.r = z__1.r, temp.i = z__1.i;
+ zscal_(n, &temp, &rhs[1], &c__1);
+ *scale *= temp.r;
+ }
+ for (i__ = *n; i__ >= 1; --i__) {
+ z_div(&z__1, &c_b13, &a[i__ + i__ * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__1 = i__;
+ i__2 = i__;
+ z__1.r = rhs[i__2].r * temp.r - rhs[i__2].i * temp.i, z__1.i = rhs[
+ i__2].r * temp.i + rhs[i__2].i * temp.r;
+ rhs[i__1].r = z__1.r, rhs[i__1].i = z__1.i;
+ i__1 = *n;
+ for (j = i__ + 1; j <= i__1; ++j) {
+ i__2 = i__;
+ i__3 = i__;
+ i__4 = j;
+ i__5 = i__ + j * a_dim1;
+ z__3.r = a[i__5].r * temp.r - a[i__5].i * temp.i, z__3.i = a[i__5]
+ .r * temp.i + a[i__5].i * temp.r;
+ z__2.r = rhs[i__4].r * z__3.r - rhs[i__4].i * z__3.i, z__2.i =
+ rhs[i__4].r * z__3.i + rhs[i__4].i * z__3.r;
+ z__1.r = rhs[i__3].r - z__2.r, z__1.i = rhs[i__3].i - z__2.i;
+ rhs[i__2].r = z__1.r, rhs[i__2].i = z__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Apply permutations JPIV to the solution (RHS) */
+
+ i__1 = *n - 1;
+ zlaswp_(&c__1, &rhs[1], lda, &c__1, &i__1, &jpiv[1], &c_n1);
+ return 0;
+
+/* End of ZGESC2 */
+
+} /* zgesc2_ */
diff --git a/contrib/libs/clapack/zgesdd.c b/contrib/libs/clapack/zgesdd.c
new file mode 100644
index 0000000000..6bc435d3a5
--- /dev/null
+++ b/contrib/libs/clapack/zgesdd.c
@@ -0,0 +1,2252 @@
+/* zgesdd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__0 = 0;
+
+/* Subroutine */ int zgesdd_(char *jobz, integer *m, integer *n,
+ doublecomplex *a, integer *lda, doublereal *s, doublecomplex *u,
+ integer *ldu, doublecomplex *vt, integer *ldvt, doublecomplex *work,
+ integer *lwork, doublereal *rwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1,
+ i__2, i__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, ie, il, ir, iu, blk;
+ doublereal dum[1], eps;
+ integer iru, ivt, iscl;
+ doublereal anrm;
+ integer idum[1], ierr, itau, irvt;
+ extern logical lsame_(char *, char *);
+ integer chunk, minmn;
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ integer wrkbl, itaup, itauq;
+ logical wntqa;
+ integer nwork;
+ logical wntqn, wntqo, wntqs;
+ extern /* Subroutine */ int zlacp2_(char *, integer *, integer *,
+ doublereal *, integer *, doublecomplex *, integer *);
+ integer mnthr1, mnthr2;
+ extern /* Subroutine */ int dbdsdc_(char *, char *, integer *, doublereal
+ *, doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), xerbla_(char *, integer *),
+ zgebrd_(integer *, integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *, doublecomplex *, doublecomplex *,
+ doublecomplex *, integer *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ doublereal bignum;
+ extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ extern /* Subroutine */ int zgelqf_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *, integer *
+), zlacrm_(integer *, integer *, doublecomplex *, integer *,
+ doublereal *, integer *, doublecomplex *, integer *, doublereal *)
+ , zlarcm_(integer *, integer *, doublereal *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublereal *), zlascl_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublecomplex *, integer *,
+ integer *), zgeqrf_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *, integer *
+);
+ integer ldwrkl;
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *),
+ zlaset_(char *, integer *, integer *, doublecomplex *,
+ doublecomplex *, doublecomplex *, integer *);
+ integer ldwrkr, minwrk, ldwrku, maxwrk;
+ extern /* Subroutine */ int zungbr_(char *, integer *, integer *, integer
+ *, doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *);
+ integer ldwkvt;
+ doublereal smlnum;
+ logical wntqas;
+ extern /* Subroutine */ int zunmbr_(char *, char *, char *, integer *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *
+), zunglq_(integer *, integer *, integer *
+, doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *);
+ integer nrwork;
+ extern /* Subroutine */ int zungqr_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+/* 8-15-00: Improve consistency of WS calculations (eca) */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGESDD computes the singular value decomposition (SVD) of a complex */
+/* M-by-N matrix A, optionally computing the left and/or right singular */
+/* vectors, by using divide-and-conquer method. The SVD is written */
+
+/* A = U * SIGMA * conjugate-transpose(V) */
+
+/* where SIGMA is an M-by-N matrix which is zero except for its */
+/* min(m,n) diagonal elements, U is an M-by-M unitary matrix, and */
+/* V is an N-by-N unitary matrix. The diagonal elements of SIGMA */
+/* are the singular values of A; they are real and non-negative, and */
+/* are returned in descending order. The first min(m,n) columns of */
+/* U and V are the left and right singular vectors of A. */
+
+/* Note that the routine returns VT = V**H, not V. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* Specifies options for computing all or part of the matrix U: */
+/* = 'A': all M columns of U and all N rows of V**H are */
+/* returned in the arrays U and VT; */
+/* = 'S': the first min(M,N) columns of U and the first */
+/* min(M,N) rows of V**H are returned in the arrays U */
+/* and VT; */
+/* = 'O': If M >= N, the first N columns of U are overwritten */
+/* in the array A and all rows of V**H are returned in */
+/* the array VT; */
+/* otherwise, all columns of U are returned in the */
+/* array U and the first M rows of V**H are overwritten */
+/* in the array A; */
+/* = 'N': no columns of U or rows of V**H are computed. */
+
+/* M (input) INTEGER */
+/* The number of rows of the input matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the input matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, */
+/* if JOBZ = 'O', A is overwritten with the first N columns */
+/* of U (the left singular vectors, stored */
+/* columnwise) if M >= N; */
+/* A is overwritten with the first M rows */
+/* of V**H (the right singular vectors, stored */
+/* rowwise) otherwise. */
+/* if JOBZ .ne. 'O', the contents of A are destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* S (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The singular values of A, sorted so that S(i) >= S(i+1). */
+
+/* U (output) COMPLEX*16 array, dimension (LDU,UCOL) */
+/* UCOL = M if JOBZ = 'A' or JOBZ = 'O' and M < N; */
+/* UCOL = min(M,N) if JOBZ = 'S'. */
+/* If JOBZ = 'A' or JOBZ = 'O' and M < N, U contains the M-by-M */
+/* unitary matrix U; */
+/* if JOBZ = 'S', U contains the first min(M,N) columns of U */
+/* (the left singular vectors, stored columnwise); */
+/* if JOBZ = 'O' and M >= N, or JOBZ = 'N', U is not referenced. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= 1; if */
+/* JOBZ = 'S' or 'A' or JOBZ = 'O' and M < N, LDU >= M. */
+
+/* VT (output) COMPLEX*16 array, dimension (LDVT,N) */
+/* If JOBZ = 'A' or JOBZ = 'O' and M >= N, VT contains the */
+/* N-by-N unitary matrix V**H; */
+/* if JOBZ = 'S', VT contains the first min(M,N) rows of */
+/* V**H (the right singular vectors, stored rowwise); */
+/* if JOBZ = 'O' and M < N, or JOBZ = 'N', VT is not referenced. */
+
+/* LDVT (input) INTEGER */
+/* The leading dimension of the array VT. LDVT >= 1; if */
+/* JOBZ = 'A' or JOBZ = 'O' and M >= N, LDVT >= N; */
+/* if JOBZ = 'S', LDVT >= min(M,N). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= 1. */
+/* if JOBZ = 'N', LWORK >= 2*min(M,N)+max(M,N). */
+/* if JOBZ = 'O', */
+/* LWORK >= 2*min(M,N)*min(M,N)+2*min(M,N)+max(M,N). */
+/* if JOBZ = 'S' or 'A', */
+/* LWORK >= min(M,N)*min(M,N)+2*min(M,N)+max(M,N). */
+/* For good performance, LWORK should generally be larger. */
+
+/* If LWORK = -1, a workspace query is assumed. The optimal */
+/* size for the WORK array is calculated and stored in WORK(1), */
+/* and no other work except argument checking is performed. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LRWORK)) */
+/* If JOBZ = 'N', LRWORK >= 5*min(M,N). */
+/* Otherwise, LRWORK >= 5*min(M,N)*min(M,N) + 7*min(M,N) */
+
+/* IWORK (workspace) INTEGER array, dimension (8*min(M,N)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: The updating process of DBDSDC did not converge. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ mnthr1 = (integer) (minmn * 17. / 9.);
+ mnthr2 = (integer) (minmn * 5. / 3.);
+ wntqa = lsame_(jobz, "A");
+ wntqs = lsame_(jobz, "S");
+ wntqas = wntqa || wntqs;
+ wntqo = lsame_(jobz, "O");
+ wntqn = lsame_(jobz, "N");
+ minwrk = 1;
+ maxwrk = 1;
+
+ if (! (wntqa || wntqs || wntqo || wntqn)) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldu < 1 || wntqas && *ldu < *m || wntqo && *m < *n && *ldu < *
+ m) {
+ *info = -8;
+ } else if (*ldvt < 1 || wntqa && *ldvt < *n || wntqs && *ldvt < minmn ||
+ wntqo && *m >= *n && *ldvt < *n) {
+ *info = -10;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* CWorkspace refers to complex workspace, and RWorkspace to */
+/* real workspace. NB refers to the optimal block size for the */
+/* immediately following subroutine, as returned by ILAENV.) */
+
+ if (*info == 0 && *m > 0 && *n > 0) {
+ if (*m >= *n) {
+
+/* There is no complex work space needed for bidiagonal SVD */
+/* The real work space needed for bidiagonal SVD is BDSPAC */
+/* for computing singular values and singular vectors; BDSPAN */
+/* for computing singular values only. */
+/* BDSPAC = 5*N*N + 7*N */
+/* BDSPAN = MAX(7*N+4, 3*N+2+SMLSIZ*(SMLSIZ+8)) */
+
+ if (*m >= mnthr1) {
+ if (wntqn) {
+
+/* Path 1 (M much larger than N, JOBZ='N') */
+
+ maxwrk = *n + *n * ilaenv_(&c__1, "ZGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", n, n, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+ minwrk = *n * 3;
+ } else if (wntqo) {
+
+/* Path 2 (M much larger than N, JOBZ='O') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "ZGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "ZUNGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "ZUNMBR", "QLN", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "ZUNMBR", "PRC", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+ maxwrk = *m * *n + *n * *n + wrkbl;
+ minwrk = (*n << 1) * *n + *n * 3;
+ } else if (wntqs) {
+
+/* Path 3 (M much larger than N, JOBZ='S') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "ZGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "ZUNGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "ZUNMBR", "QLN", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "ZUNMBR", "PRC", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+ maxwrk = *n * *n + wrkbl;
+ minwrk = *n * *n + *n * 3;
+ } else if (wntqa) {
+
+/* Path 4 (M much larger than N, JOBZ='A') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "ZGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *m * ilaenv_(&c__1, "ZUNGQR",
+ " ", m, m, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "ZUNMBR", "QLN", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "ZUNMBR", "PRC", n, n, n, &c_n1);
+ wrkbl = max(i__1,i__2);
+ maxwrk = *n * *n + wrkbl;
+ minwrk = *n * *n + (*n << 1) + *m;
+ }
+ } else if (*m >= mnthr2) {
+
+/* Path 5 (M much larger than N, but not as much as MNTHR1) */
+
+ maxwrk = (*n << 1) + (*m + *n) * ilaenv_(&c__1, "ZGEBRD",
+ " ", m, n, &c_n1, &c_n1);
+ minwrk = (*n << 1) + *m;
+ if (wntqo) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "P", n, n, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "Q", m, n, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ maxwrk += *m * *n;
+ minwrk += *n * *n;
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "P", n, n, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "Q", m, n, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "P", n, n, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "Q", m, m, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ }
+ } else {
+
+/* Path 6 (M at least N, but not much larger) */
+
+ maxwrk = (*n << 1) + (*m + *n) * ilaenv_(&c__1, "ZGEBRD",
+ " ", m, n, &c_n1, &c_n1);
+ minwrk = (*n << 1) + *m;
+ if (wntqo) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "ZUNMBR", "PRC", n, n, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "ZUNMBR", "QLN", m, n, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ maxwrk += *m * *n;
+ minwrk += *n * *n;
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "ZUNMBR", "PRC", n, n, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "ZUNMBR", "QLN", m, n, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "PRC", n, n, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "QLN", m, m, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ }
+ }
+ } else {
+
+/* There is no complex work space needed for bidiagonal SVD */
+/* The real work space needed for bidiagonal SVD is BDSPAC */
+/* for computing singular values and singular vectors; BDSPAN */
+/* for computing singular values only. */
+/* BDSPAC = 5*M*M + 7*M */
+/* BDSPAN = MAX(7*M+4, 3*M+2+SMLSIZ*(SMLSIZ+8)) */
+
+ if (*n >= mnthr1) {
+ if (wntqn) {
+
+/* Path 1t (N much larger than M, JOBZ='N') */
+
+ maxwrk = *m + *m * ilaenv_(&c__1, "ZGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", m, m, &c_n1, &c_n1);
+ maxwrk = max(i__1,i__2);
+ minwrk = *m * 3;
+ } else if (wntqo) {
+
+/* Path 2t (N much larger than M, JOBZ='O') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "ZGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "ZUNGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "ZUNMBR", "PRC", m, m, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "ZUNMBR", "QLN", m, m, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+ maxwrk = *m * *n + *m * *m + wrkbl;
+ minwrk = (*m << 1) * *m + *m * 3;
+ } else if (wntqs) {
+
+/* Path 3t (N much larger than M, JOBZ='S') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "ZGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "ZUNGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "ZUNMBR", "PRC", m, m, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "ZUNMBR", "QLN", m, m, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+ maxwrk = *m * *m + wrkbl;
+ minwrk = *m * *m + *m * 3;
+ } else if (wntqa) {
+
+/* Path 4t (N much larger than M, JOBZ='A') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "ZGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *n * ilaenv_(&c__1, "ZUNGLQ",
+ " ", n, n, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "ZUNMBR", "PRC", m, m, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "ZUNMBR", "QLN", m, m, m, &c_n1);
+ wrkbl = max(i__1,i__2);
+ maxwrk = *m * *m + wrkbl;
+ minwrk = *m * *m + (*m << 1) + *n;
+ }
+ } else if (*n >= mnthr2) {
+
+/* Path 5t (N much larger than M, but not as much as MNTHR1) */
+
+ maxwrk = (*m << 1) + (*m + *n) * ilaenv_(&c__1, "ZGEBRD",
+ " ", m, n, &c_n1, &c_n1);
+ minwrk = (*m << 1) + *n;
+ if (wntqo) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "P", m, n, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "Q", m, m, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ maxwrk += *m * *n;
+ minwrk += *m * *m;
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "P", m, n, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "Q", m, m, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "P", n, n, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "Q", m, m, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ }
+ } else {
+
+/* Path 6t (N greater than M, but not much larger) */
+
+ maxwrk = (*m << 1) + (*m + *n) * ilaenv_(&c__1, "ZGEBRD",
+ " ", m, n, &c_n1, &c_n1);
+ minwrk = (*m << 1) + *n;
+ if (wntqo) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "ZUNMBR", "PRC", m, n, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "ZUNMBR", "QLN", m, m, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ maxwrk += *m * *n;
+ minwrk += *m * *m;
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "PRC", m, n, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "QLN", m, m, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "PRC", n, n, m, &c_n1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "QLN", m, m, n, &c_n1);
+ maxwrk = max(i__1,i__2);
+ }
+ }
+ }
+ maxwrk = max(maxwrk,minwrk);
+ }
+ if (*info == 0) {
+ work[1].r = (doublereal) maxwrk, work[1].i = 0.;
+ if (*lwork < minwrk && *lwork != -1) {
+ *info = -13;
+ }
+ }
+
+/* Quick returns */
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGESDD", &i__1);
+ return 0;
+ }
+ if (*lwork == -1) {
+ return 0;
+ }
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("P");
+ smlnum = sqrt(dlamch_("S")) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = zlange_("M", m, n, &a[a_offset], lda, dum);
+ iscl = 0;
+ if (anrm > 0. && anrm < smlnum) {
+ iscl = 1;
+ zlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, &
+ ierr);
+ } else if (anrm > bignum) {
+ iscl = 1;
+ zlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+ if (*m >= *n) {
+
+/* A has at least as many rows as columns. If A has sufficiently */
+/* more rows than columns, first reduce using the QR */
+/* decomposition (if sufficient workspace available) */
+
+ if (*m >= mnthr1) {
+
+ if (wntqn) {
+
+/* Path 1 (M much larger than N, JOBZ='N') */
+/* No singular vectors to be computed */
+
+ itau = 1;
+ nwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: need 0) */
+
+ i__1 = *lwork - nwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Zero out below R */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ zlaset_("L", &i__1, &i__2, &c_b1, &c_b1, &a[a_dim1 + 2], lda);
+ ie = 1;
+ itauq = 1;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/* Bidiagonalize R in A */
+/* (CWorkspace: need 3*N, prefer 2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__1 = *lwork - nwork + 1;
+ zgebrd_(n, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+ nrwork = ie + *n;
+
+/* Perform bidiagonal SVD, compute singular values only */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAN) */
+
+ dbdsdc_("U", "N", n, &s[1], &rwork[ie], dum, &c__1, dum, &
+ c__1, dum, idum, &rwork[nrwork], &iwork[1], info);
+
+ } else if (wntqo) {
+
+/* Path 2 (M much larger than N, JOBZ='O') */
+/* N left singular vectors to be overwritten on A and */
+/* N right singular vectors to be computed in VT */
+
+ iu = 1;
+
+/* WORK(IU) is N by N */
+
+ ldwrku = *n;
+ ir = iu + ldwrku * *n;
+ if (*lwork >= *m * *n + *n * *n + *n * 3) {
+
+/* WORK(IR) is M by N */
+
+ ldwrkr = *m;
+ } else {
+ ldwrkr = (*lwork - *n * *n - *n * 3) / *n;
+ }
+ itau = ir + ldwrkr * *n;
+ nwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (CWorkspace: need N*N+2*N, prefer M*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__1 = *lwork - nwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Copy R to WORK( IR ), zeroing out below it */
+
+ zlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ zlaset_("L", &i__1, &i__2, &c_b1, &c_b1, &work[ir + 1], &
+ ldwrkr);
+
+/* Generate Q in A */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__1 = *lwork - nwork + 1;
+ zungqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__1, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IR) */
+/* (CWorkspace: need N*N+3*N, prefer M*N+2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__1 = *lwork - nwork + 1;
+ zgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of R in WORK(IRU) and computing right singular vectors */
+/* of R in WORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ iru = ie + *n;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Copy real matrix RWORK(IRU) to complex matrix WORK(IU) */
+/* Overwrite WORK(IU) by the left singular vectors of R */
+/* (CWorkspace: need 2*N*N+3*N, prefer M*N+N*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ zlacp2_("F", n, n, &rwork[iru], n, &work[iu], &ldwrku);
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[
+ itauq], &work[iu], &ldwrku, &work[nwork], &i__1, &
+ ierr);
+
+/* Copy real matrix RWORK(IRVT) to complex matrix VT */
+/* Overwrite VT by the right singular vectors of R */
+/* (CWorkspace: need N*N+3*N, prefer M*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ zlacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("P", "R", "C", n, n, n, &work[ir], &ldwrkr, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IU), storing result in WORK(IR) and copying to A */
+/* (CWorkspace: need 2*N*N, prefer N*N+M*N) */
+/* (RWorkspace: 0) */
+
+ i__1 = *m;
+ i__2 = ldwrkr;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ chunk = min(i__3,ldwrkr);
+ zgemm_("N", "N", &chunk, n, n, &c_b2, &a[i__ + a_dim1],
+ lda, &work[iu], &ldwrku, &c_b1, &work[ir], &
+ ldwrkr);
+ zlacpy_("F", &chunk, n, &work[ir], &ldwrkr, &a[i__ +
+ a_dim1], lda);
+/* L10: */
+ }
+
+ } else if (wntqs) {
+
+/* Path 3 (M much larger than N, JOBZ='S') */
+/* N left singular vectors to be computed in U and */
+/* N right singular vectors to be computed in VT */
+
+ ir = 1;
+
+/* WORK(IR) is N by N */
+
+ ldwrkr = *n;
+ itau = ir + ldwrkr * *n;
+ nwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (CWorkspace: need N*N+2*N, prefer N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - nwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+
+/* Copy R to WORK(IR), zeroing out below it */
+
+ zlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__2 = *n - 1;
+ i__1 = *n - 1;
+ zlaset_("L", &i__2, &i__1, &c_b1, &c_b1, &work[ir + 1], &
+ ldwrkr);
+
+/* Generate Q in A */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - nwork + 1;
+ zungqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IR) */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - nwork + 1;
+ zgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ iru = ie + *n;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Copy real matrix RWORK(IRU) to complex matrix U */
+/* Overwrite U by left singular vectors of R */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ zlacp2_("F", n, n, &rwork[iru], n, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+/* Copy real matrix RWORK(IRVT) to complex matrix VT */
+/* Overwrite VT by right singular vectors of R */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ zlacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ zunmbr_("P", "R", "C", n, n, n, &work[ir], &ldwrkr, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IR), storing result in U */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: 0) */
+
+ zlacpy_("F", n, n, &u[u_offset], ldu, &work[ir], &ldwrkr);
+ zgemm_("N", "N", m, n, n, &c_b2, &a[a_offset], lda, &work[ir],
+ &ldwrkr, &c_b1, &u[u_offset], ldu);
+
+ } else if (wntqa) {
+
+/* Path 4 (M much larger than N, JOBZ='A') */
+/* M left singular vectors to be computed in U and */
+/* N right singular vectors to be computed in VT */
+
+ iu = 1;
+
+/* WORK(IU) is N by N */
+
+ ldwrku = *n;
+ itau = iu + ldwrku * *n;
+ nwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - nwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+ zlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+
+/* Generate Q in U */
+/* (CWorkspace: need N+M, prefer N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - nwork + 1;
+ zungqr_(m, m, n, &u[u_offset], ldu, &work[itau], &work[nwork],
+ &i__2, &ierr);
+
+/* Produce R in A, zeroing out below it */
+
+ i__2 = *n - 1;
+ i__1 = *n - 1;
+ zlaset_("L", &i__2, &i__1, &c_b1, &c_b1, &a[a_dim1 + 2], lda);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/* Bidiagonalize R in A */
+/* (CWorkspace: need 3*N, prefer 2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - nwork + 1;
+ zgebrd_(n, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+ iru = ie + *n;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Copy real matrix RWORK(IRU) to complex matrix WORK(IU) */
+/* Overwrite WORK(IU) by left singular vectors of R */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ zlacp2_("F", n, n, &rwork[iru], n, &work[iu], &ldwrku);
+ i__2 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "N", n, n, n, &a[a_offset], lda, &work[
+ itauq], &work[iu], &ldwrku, &work[nwork], &i__2, &
+ ierr);
+
+/* Copy real matrix RWORK(IRVT) to complex matrix VT */
+/* Overwrite VT by right singular vectors of R */
+/* (CWorkspace: need 3*N, prefer 2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ zlacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ zunmbr_("P", "R", "C", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+/* Multiply Q in U by left singular vectors of R in */
+/* WORK(IU), storing result in A */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: 0) */
+
+ zgemm_("N", "N", m, n, n, &c_b2, &u[u_offset], ldu, &work[iu],
+ &ldwrku, &c_b1, &a[a_offset], lda);
+
+/* Copy left singular vectors of A from A to U */
+
+ zlacpy_("F", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+
+ }
+
+ } else if (*m >= mnthr2) {
+
+/* MNTHR2 <= M < MNTHR1 */
+
+/* Path 5 (M much larger than N, but not as much as MNTHR1) */
+/* Reduce to bidiagonal form without QR decomposition, use */
+/* ZUNGBR and matrix multiplication to compute singular vectors */
+
+ ie = 1;
+ nrwork = ie + *n;
+ itauq = 1;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/* Bidiagonalize A */
+/* (CWorkspace: need 2*N+M, prefer 2*N+(M+N)*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - nwork + 1;
+ zgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__2, &ierr);
+ if (wntqn) {
+
+/* Compute singular values only */
+/* (Cworkspace: 0) */
+/* (Rworkspace: need BDSPAN) */
+
+ dbdsdc_("U", "N", n, &s[1], &rwork[ie], dum, &c__1, dum, &
+ c__1, dum, idum, &rwork[nrwork], &iwork[1], info);
+ } else if (wntqo) {
+ iu = nwork;
+ iru = nrwork;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+
+/* Copy A to VT, generate P**H */
+/* (Cworkspace: need 2*N, prefer N+N*NB) */
+/* (Rworkspace: 0) */
+
+ zlacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ zungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], &
+ work[nwork], &i__2, &ierr);
+
+/* Generate Q in A */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - nwork + 1;
+ zungbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], &work[
+ nwork], &i__2, &ierr);
+
+ if (*lwork >= *m * *n + *n * 3) {
+
+/* WORK( IU ) is M by N */
+
+ ldwrku = *m;
+ } else {
+
+/* WORK(IU) is LDWRKU by N */
+
+ ldwrku = (*lwork - *n * 3) / *n;
+ }
+ nwork = iu + ldwrku * *n;
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Multiply real matrix RWORK(IRVT) by P**H in VT, */
+/* storing the result in WORK(IU), copying to VT */
+/* (Cworkspace: need 0) */
+/* (Rworkspace: need 3*N*N) */
+
+ zlarcm_(n, n, &rwork[irvt], n, &vt[vt_offset], ldvt, &work[iu]
+, &ldwrku, &rwork[nrwork]);
+ zlacpy_("F", n, n, &work[iu], &ldwrku, &vt[vt_offset], ldvt);
+
+/* Multiply Q in A by real matrix RWORK(IRU), storing the */
+/* result in WORK(IU), copying to A */
+/* (CWorkspace: need N*N, prefer M*N) */
+/* (Rworkspace: need 3*N*N, prefer N*N+2*M*N) */
+
+ nrwork = irvt;
+ i__2 = *m;
+ i__1 = ldwrku;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ chunk = min(i__3,ldwrku);
+ zlacrm_(&chunk, n, &a[i__ + a_dim1], lda, &rwork[iru], n,
+ &work[iu], &ldwrku, &rwork[nrwork]);
+ zlacpy_("F", &chunk, n, &work[iu], &ldwrku, &a[i__ +
+ a_dim1], lda);
+/* L20: */
+ }
+
+ } else if (wntqs) {
+
+/* Copy A to VT, generate P**H */
+/* (Cworkspace: need 2*N, prefer N+N*NB) */
+/* (Rworkspace: 0) */
+
+ zlacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ zungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], &
+ work[nwork], &i__1, &ierr);
+
+/* Copy A to U, generate Q */
+/* (Cworkspace: need 2*N, prefer N+N*NB) */
+/* (Rworkspace: 0) */
+
+ zlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ zungbr_("Q", m, n, n, &u[u_offset], ldu, &work[itauq], &work[
+ nwork], &i__1, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ iru = nrwork;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Multiply real matrix RWORK(IRVT) by P**H in VT, */
+/* storing the result in A, copying to VT */
+/* (Cworkspace: need 0) */
+/* (Rworkspace: need 3*N*N) */
+
+ zlarcm_(n, n, &rwork[irvt], n, &vt[vt_offset], ldvt, &a[
+ a_offset], lda, &rwork[nrwork]);
+ zlacpy_("F", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+/* Multiply Q in U by real matrix RWORK(IRU), storing the */
+/* result in A, copying to U */
+/* (CWorkspace: need 0) */
+/* (Rworkspace: need N*N+2*M*N) */
+
+ nrwork = irvt;
+ zlacrm_(m, n, &u[u_offset], ldu, &rwork[iru], n, &a[a_offset],
+ lda, &rwork[nrwork]);
+ zlacpy_("F", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+ } else {
+
+/* Copy A to VT, generate P**H */
+/* (Cworkspace: need 2*N, prefer N+N*NB) */
+/* (Rworkspace: 0) */
+
+ zlacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ zungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], &
+ work[nwork], &i__1, &ierr);
+
+/* Copy A to U, generate Q */
+/* (Cworkspace: need 2*N, prefer N+N*NB) */
+/* (Rworkspace: 0) */
+
+ zlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ zungbr_("Q", m, m, n, &u[u_offset], ldu, &work[itauq], &work[
+ nwork], &i__1, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ iru = nrwork;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Multiply real matrix RWORK(IRVT) by P**H in VT, */
+/* storing the result in A, copying to VT */
+/* (Cworkspace: need 0) */
+/* (Rworkspace: need 3*N*N) */
+
+ zlarcm_(n, n, &rwork[irvt], n, &vt[vt_offset], ldvt, &a[
+ a_offset], lda, &rwork[nrwork]);
+ zlacpy_("F", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+/* Multiply Q in U by real matrix RWORK(IRU), storing the */
+/* result in A, copying to U */
+/* (CWorkspace: 0) */
+/* (Rworkspace: need 3*N*N) */
+
+ nrwork = irvt;
+ zlacrm_(m, n, &u[u_offset], ldu, &rwork[iru], n, &a[a_offset],
+ lda, &rwork[nrwork]);
+ zlacpy_("F", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+ }
+
+ } else {
+
+/* M .LT. MNTHR2 */
+
+/* Path 6 (M at least N, but not much larger) */
+/* Reduce to bidiagonal form without QR decomposition */
+/* Use ZUNMBR to compute singular vectors */
+
+ ie = 1;
+ nrwork = ie + *n;
+ itauq = 1;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/* Bidiagonalize A */
+/* (CWorkspace: need 2*N+M, prefer 2*N+(M+N)*NB) */
+/* (RWorkspace: need N) */
+
+ i__1 = *lwork - nwork + 1;
+ zgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__1, &ierr);
+ if (wntqn) {
+
+/* Compute singular values only */
+/* (Cworkspace: 0) */
+/* (Rworkspace: need BDSPAN) */
+
+ dbdsdc_("U", "N", n, &s[1], &rwork[ie], dum, &c__1, dum, &
+ c__1, dum, idum, &rwork[nrwork], &iwork[1], info);
+ } else if (wntqo) {
+ iu = nwork;
+ iru = nrwork;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ if (*lwork >= *m * *n + *n * 3) {
+
+/* WORK( IU ) is M by N */
+
+ ldwrku = *m;
+ } else {
+
+/* WORK( IU ) is LDWRKU by N */
+
+ ldwrku = (*lwork - *n * 3) / *n;
+ }
+ nwork = iu + ldwrku * *n;
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Copy real matrix RWORK(IRVT) to complex matrix VT */
+/* Overwrite VT by right singular vectors of A */
+/* (Cworkspace: need 2*N, prefer N+N*NB) */
+/* (Rworkspace: need 0) */
+
+ zlacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("P", "R", "C", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+
+ if (*lwork >= *m * *n + *n * 3) {
+
+/* Copy real matrix RWORK(IRU) to complex matrix WORK(IU) */
+/* Overwrite WORK(IU) by left singular vectors of A, copying */
+/* to A */
+/* (Cworkspace: need M*N+2*N, prefer M*N+N+N*NB) */
+/* (Rworkspace: need 0) */
+
+ zlaset_("F", m, n, &c_b1, &c_b1, &work[iu], &ldwrku);
+ zlacp2_("F", n, n, &rwork[iru], n, &work[iu], &ldwrku);
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[
+ itauq], &work[iu], &ldwrku, &work[nwork], &i__1, &
+ ierr);
+ zlacpy_("F", m, n, &work[iu], &ldwrku, &a[a_offset], lda);
+ } else {
+
+/* Generate Q in A */
+/* (Cworkspace: need 2*N, prefer N+N*NB) */
+/* (Rworkspace: need 0) */
+
+ i__1 = *lwork - nwork + 1;
+ zungbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], &
+ work[nwork], &i__1, &ierr);
+
+/* Multiply Q in A by real matrix RWORK(IRU), storing the */
+/* result in WORK(IU), copying to A */
+/* (CWorkspace: need N*N, prefer M*N) */
+/* (Rworkspace: need 3*N*N, prefer N*N+2*M*N) */
+
+ nrwork = irvt;
+ i__1 = *m;
+ i__2 = ldwrku;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ chunk = min(i__3,ldwrku);
+ zlacrm_(&chunk, n, &a[i__ + a_dim1], lda, &rwork[iru],
+ n, &work[iu], &ldwrku, &rwork[nrwork]);
+ zlacpy_("F", &chunk, n, &work[iu], &ldwrku, &a[i__ +
+ a_dim1], lda);
+/* L30: */
+ }
+ }
+
+ } else if (wntqs) {
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ iru = nrwork;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Copy real matrix RWORK(IRU) to complex matrix U */
+/* Overwrite U by left singular vectors of A */
+/* (CWorkspace: need 3*N, prefer 2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ zlaset_("F", m, n, &c_b1, &c_b1, &u[u_offset], ldu)
+ ;
+ zlacp2_("F", n, n, &rwork[iru], n, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+/* Copy real matrix RWORK(IRVT) to complex matrix VT */
+/* Overwrite VT by right singular vectors of A */
+/* (CWorkspace: need 3*N, prefer 2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ zlacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ zunmbr_("P", "R", "C", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+ } else {
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ iru = nrwork;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ dbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Set the right corner of U to identity matrix */
+
+ zlaset_("F", m, m, &c_b1, &c_b1, &u[u_offset], ldu)
+ ;
+ if (*m > *n) {
+ i__2 = *m - *n;
+ i__1 = *m - *n;
+ zlaset_("F", &i__2, &i__1, &c_b1, &c_b2, &u[*n + 1 + (*n
+ + 1) * u_dim1], ldu);
+ }
+
+/* Copy real matrix RWORK(IRU) to complex matrix U */
+/* Overwrite U by left singular vectors of A */
+/* (CWorkspace: need 2*N+M, prefer 2*N+M*NB) */
+/* (RWorkspace: 0) */
+
+ zlacp2_("F", n, n, &rwork[iru], n, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+/* Copy real matrix RWORK(IRVT) to complex matrix VT */
+/* Overwrite VT by right singular vectors of A */
+/* (CWorkspace: need 3*N, prefer 2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ zlacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ zunmbr_("P", "R", "C", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+ }
+
+ }
+
+ } else {
+
+/* A has more columns than rows. If A has sufficiently more */
+/* columns than rows, first reduce using the LQ decomposition (if */
+/* sufficient workspace available) */
+
+ if (*n >= mnthr1) {
+
+ if (wntqn) {
+
+/* Path 1t (N much larger than M, JOBZ='N') */
+/* No singular vectors to be computed */
+
+ itau = 1;
+ nwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - nwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+
+/* Zero out above L */
+
+ i__2 = *m - 1;
+ i__1 = *m - 1;
+ zlaset_("U", &i__2, &i__1, &c_b1, &c_b1, &a[(a_dim1 << 1) + 1]
+, lda);
+ ie = 1;
+ itauq = 1;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize L in A */
+/* (CWorkspace: need 3*M, prefer 2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - nwork + 1;
+ zgebrd_(m, m, &a[a_offset], lda, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+ nrwork = ie + *m;
+
+/* Perform bidiagonal SVD, compute singular values only */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAN) */
+
+ dbdsdc_("U", "N", m, &s[1], &rwork[ie], dum, &c__1, dum, &
+ c__1, dum, idum, &rwork[nrwork], &iwork[1], info);
+
+ } else if (wntqo) {
+
+/* Path 2t (N much larger than M, JOBZ='O') */
+/* M right singular vectors to be overwritten on A and */
+/* M left singular vectors to be computed in U */
+
+ ivt = 1;
+ ldwkvt = *m;
+
+/* WORK(IVT) is M by M */
+
+ il = ivt + ldwkvt * *m;
+ if (*lwork >= *m * *n + *m * *m + *m * 3) {
+
+/* WORK(IL) M by N */
+
+ ldwrkl = *m;
+ chunk = *n;
+ } else {
+
+/* WORK(IL) is M by CHUNK */
+
+ ldwrkl = *m;
+ chunk = (*lwork - *m * *m - *m * 3) / *m;
+ }
+ itau = il + ldwrkl * chunk;
+ nwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - nwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+
+/* Copy L to WORK(IL), zeroing about above it */
+
+ zlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl);
+ i__2 = *m - 1;
+ i__1 = *m - 1;
+ zlaset_("U", &i__2, &i__1, &c_b1, &c_b1, &work[il + ldwrkl], &
+ ldwrkl);
+
+/* Generate Q in A */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - nwork + 1;
+ zunglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IL) */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - nwork + 1;
+ zgebrd_(m, m, &work[il], &ldwrkl, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ iru = ie + *m;
+ irvt = iru + *m * *m;
+ nrwork = irvt + *m * *m;
+ dbdsdc_("U", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Copy real matrix RWORK(IRU) to complex matrix WORK(IU) */
+/* Overwrite WORK(IU) by the left singular vectors of L */
+/* (CWorkspace: need N*N+3*N, prefer M*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ zlacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+/* Copy real matrix RWORK(IRVT) to complex matrix WORK(IVT) */
+/* Overwrite WORK(IVT) by the right singular vectors of L */
+/* (CWorkspace: need N*N+3*N, prefer M*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ zlacp2_("F", m, m, &rwork[irvt], m, &work[ivt], &ldwkvt);
+ i__2 = *lwork - nwork + 1;
+ zunmbr_("P", "R", "C", m, m, m, &work[il], &ldwrkl, &work[
+ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2, &
+ ierr);
+
+/* Multiply right singular vectors of L in WORK(IL) by Q */
+/* in A, storing result in WORK(IL) and copying to A */
+/* (CWorkspace: need 2*M*M, prefer M*M+M*N)) */
+/* (RWorkspace: 0) */
+
+ i__2 = *n;
+ i__1 = chunk;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *n - i__ + 1;
+ blk = min(i__3,chunk);
+ zgemm_("N", "N", m, &blk, m, &c_b2, &work[ivt], m, &a[i__
+ * a_dim1 + 1], lda, &c_b1, &work[il], &ldwrkl);
+ zlacpy_("F", m, &blk, &work[il], &ldwrkl, &a[i__ * a_dim1
+ + 1], lda);
+/* L40: */
+ }
+
+ } else if (wntqs) {
+
+/* Path 3t (N much larger than M, JOBZ='S') */
+/* M right singular vectors to be computed in VT and */
+/* M left singular vectors to be computed in U */
+
+ il = 1;
+
+/* WORK(IL) is M by M */
+
+ ldwrkl = *m;
+ itau = il + ldwrkl * *m;
+ nwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__1 = *lwork - nwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Copy L to WORK(IL), zeroing out above it */
+
+ zlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &work[il + ldwrkl], &
+ ldwrkl);
+
+/* Generate Q in A */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__1 = *lwork - nwork + 1;
+ zunglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__1, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IL) */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__1 = *lwork - nwork + 1;
+ zgebrd_(m, m, &work[il], &ldwrkl, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ iru = ie + *m;
+ irvt = iru + *m * *m;
+ nrwork = irvt + *m * *m;
+ dbdsdc_("U", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Copy real matrix RWORK(IRU) to complex matrix U */
+/* Overwrite U by left singular vectors of L */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ zlacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+
+/* Copy real matrix RWORK(IRVT) to complex matrix VT */
+/* Overwrite VT by left singular vectors of L */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ zlacp2_("F", m, m, &rwork[irvt], m, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("P", "R", "C", m, m, m, &work[il], &ldwrkl, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+
+/* Copy VT to WORK(IL), multiply right singular vectors of L */
+/* in WORK(IL) by Q in A, storing result in VT */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: 0) */
+
+ zlacpy_("F", m, m, &vt[vt_offset], ldvt, &work[il], &ldwrkl);
+ zgemm_("N", "N", m, n, m, &c_b2, &work[il], &ldwrkl, &a[
+ a_offset], lda, &c_b1, &vt[vt_offset], ldvt);
+
+ } else if (wntqa) {
+
+/* Path 9t (N much larger than M, JOBZ='A') */
+/* N right singular vectors to be computed in VT and */
+/* M left singular vectors to be computed in U */
+
+ ivt = 1;
+
+/* WORK(IVT) is M by M */
+
+ ldwkvt = *m;
+ itau = ivt + ldwkvt * *m;
+ nwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__1 = *lwork - nwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+ zlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+/* Generate Q in VT */
+/* (CWorkspace: need M+N, prefer M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__1 = *lwork - nwork + 1;
+ zunglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &work[
+ nwork], &i__1, &ierr);
+
+/* Produce L in A, zeroing out above it */
+
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &a[(a_dim1 << 1) + 1]
+, lda);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize L in A */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__1 = *lwork - nwork + 1;
+ zgebrd_(m, m, &a[a_offset], lda, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ iru = ie + *m;
+ irvt = iru + *m * *m;
+ nrwork = irvt + *m * *m;
+ dbdsdc_("U", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Copy real matrix RWORK(IRU) to complex matrix U */
+/* Overwrite U by left singular vectors of L */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ zlacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "N", m, m, m, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+
+/* Copy real matrix RWORK(IRVT) to complex matrix WORK(IVT) */
+/* Overwrite WORK(IVT) by right singular vectors of L */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ zlacp2_("F", m, m, &rwork[irvt], m, &work[ivt], &ldwkvt);
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("P", "R", "C", m, m, m, &a[a_offset], lda, &work[
+ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__1, &
+ ierr);
+
+/* Multiply right singular vectors of L in WORK(IVT) by */
+/* Q in VT, storing result in A */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: 0) */
+
+ zgemm_("N", "N", m, n, m, &c_b2, &work[ivt], &ldwkvt, &vt[
+ vt_offset], ldvt, &c_b1, &a[a_offset], lda);
+
+/* Copy right singular vectors of A from A to VT */
+
+ zlacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+ }
+
+ } else if (*n >= mnthr2) {
+
+/* MNTHR2 <= N < MNTHR1 */
+
+/* Path 5t (N much larger than M, but not as much as MNTHR1) */
+/* Reduce to bidiagonal form without QR decomposition, use */
+/* ZUNGBR and matrix multiplication to compute singular vectors */
+
+
+ ie = 1;
+ nrwork = ie + *m;
+ itauq = 1;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize A */
+/* (CWorkspace: need 2*M+N, prefer 2*M+(M+N)*NB) */
+/* (RWorkspace: M) */
+
+ i__1 = *lwork - nwork + 1;
+ zgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__1, &ierr);
+
+ if (wntqn) {
+
+/* Compute singular values only */
+/* (Cworkspace: 0) */
+/* (Rworkspace: need BDSPAN) */
+
+ dbdsdc_("L", "N", m, &s[1], &rwork[ie], dum, &c__1, dum, &
+ c__1, dum, idum, &rwork[nrwork], &iwork[1], info);
+ } else if (wntqo) {
+ irvt = nrwork;
+ iru = irvt + *m * *m;
+ nrwork = iru + *m * *m;
+ ivt = nwork;
+
+/* Copy A to U, generate Q */
+/* (Cworkspace: need 2*M, prefer M+M*NB) */
+/* (Rworkspace: 0) */
+
+ zlacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ zungbr_("Q", m, m, n, &u[u_offset], ldu, &work[itauq], &work[
+ nwork], &i__1, &ierr);
+
+/* Generate P**H in A */
+/* (Cworkspace: need 2*M, prefer M+M*NB) */
+/* (Rworkspace: 0) */
+
+ i__1 = *lwork - nwork + 1;
+ zungbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &work[
+ nwork], &i__1, &ierr);
+
+ ldwkvt = *m;
+ if (*lwork >= *m * *n + *m * 3) {
+
+/* WORK( IVT ) is M by N */
+
+ nwork = ivt + ldwkvt * *n;
+ chunk = *n;
+ } else {
+
+/* WORK( IVT ) is M by CHUNK */
+
+ chunk = (*lwork - *m * 3) / *m;
+ nwork = ivt + ldwkvt * chunk;
+ }
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ dbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Multiply Q in U by real matrix RWORK(IRVT) */
+/* storing the result in WORK(IVT), copying to U */
+/* (Cworkspace: need 0) */
+/* (Rworkspace: need 2*M*M) */
+
+ zlacrm_(m, m, &u[u_offset], ldu, &rwork[iru], m, &work[ivt], &
+ ldwkvt, &rwork[nrwork]);
+ zlacpy_("F", m, m, &work[ivt], &ldwkvt, &u[u_offset], ldu);
+
+/* Multiply RWORK(IRVT) by P**H in A, storing the */
+/* result in WORK(IVT), copying to A */
+/* (CWorkspace: need M*M, prefer M*N) */
+/* (Rworkspace: need 2*M*M, prefer 2*M*N) */
+
+ nrwork = iru;
+ i__1 = *n;
+ i__2 = chunk;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *n - i__ + 1;
+ blk = min(i__3,chunk);
+ zlarcm_(m, &blk, &rwork[irvt], m, &a[i__ * a_dim1 + 1],
+ lda, &work[ivt], &ldwkvt, &rwork[nrwork]);
+ zlacpy_("F", m, &blk, &work[ivt], &ldwkvt, &a[i__ *
+ a_dim1 + 1], lda);
+/* L50: */
+ }
+ } else if (wntqs) {
+
+/* Copy A to U, generate Q */
+/* (Cworkspace: need 2*M, prefer M+M*NB) */
+/* (Rworkspace: 0) */
+
+ zlacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ zungbr_("Q", m, m, n, &u[u_offset], ldu, &work[itauq], &work[
+ nwork], &i__2, &ierr);
+
+/* Copy A to VT, generate P**H */
+/* (Cworkspace: need 2*M, prefer M+M*NB) */
+/* (Rworkspace: 0) */
+
+ zlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ zungbr_("P", m, n, m, &vt[vt_offset], ldvt, &work[itaup], &
+ work[nwork], &i__2, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ irvt = nrwork;
+ iru = irvt + *m * *m;
+ nrwork = iru + *m * *m;
+ dbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Multiply Q in U by real matrix RWORK(IRU), storing the */
+/* result in A, copying to U */
+/* (CWorkspace: need 0) */
+/* (Rworkspace: need 3*M*M) */
+
+ zlacrm_(m, m, &u[u_offset], ldu, &rwork[iru], m, &a[a_offset],
+ lda, &rwork[nrwork]);
+ zlacpy_("F", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+
+/* Multiply real matrix RWORK(IRVT) by P**H in VT, */
+/* storing the result in A, copying to VT */
+/* (Cworkspace: need 0) */
+/* (Rworkspace: need M*M+2*M*N) */
+
+ nrwork = iru;
+ zlarcm_(m, n, &rwork[irvt], m, &vt[vt_offset], ldvt, &a[
+ a_offset], lda, &rwork[nrwork]);
+ zlacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ } else {
+
+/* Copy A to U, generate Q */
+/* (Cworkspace: need 2*M, prefer M+M*NB) */
+/* (Rworkspace: 0) */
+
+ zlacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ zungbr_("Q", m, m, n, &u[u_offset], ldu, &work[itauq], &work[
+ nwork], &i__2, &ierr);
+
+/* Copy A to VT, generate P**H */
+/* (Cworkspace: need 2*M, prefer M+M*NB) */
+/* (Rworkspace: 0) */
+
+ zlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ zungbr_("P", n, n, m, &vt[vt_offset], ldvt, &work[itaup], &
+ work[nwork], &i__2, &ierr);
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ irvt = nrwork;
+ iru = irvt + *m * *m;
+ nrwork = iru + *m * *m;
+ dbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Multiply Q in U by real matrix RWORK(IRU), storing the */
+/* result in A, copying to U */
+/* (CWorkspace: need 0) */
+/* (Rworkspace: need 3*M*M) */
+
+ zlacrm_(m, m, &u[u_offset], ldu, &rwork[iru], m, &a[a_offset],
+ lda, &rwork[nrwork]);
+ zlacpy_("F", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+
+/* Multiply real matrix RWORK(IRVT) by P**H in VT, */
+/* storing the result in A, copying to VT */
+/* (Cworkspace: need 0) */
+/* (Rworkspace: need M*M+2*M*N) */
+
+ zlarcm_(m, n, &rwork[irvt], m, &vt[vt_offset], ldvt, &a[
+ a_offset], lda, &rwork[nrwork]);
+ zlacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ }
+
+ } else {
+
+/* N .LT. MNTHR2 */
+
+/* Path 6t (N greater than M, but not much larger) */
+/* Reduce to bidiagonal form without LQ decomposition */
+/* Use ZUNMBR to compute singular vectors */
+
+ ie = 1;
+ nrwork = ie + *m;
+ itauq = 1;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/* Bidiagonalize A */
+/* (CWorkspace: need 2*M+N, prefer 2*M+(M+N)*NB) */
+/* (RWorkspace: M) */
+
+ i__2 = *lwork - nwork + 1;
+ zgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__2, &ierr);
+ if (wntqn) {
+
+/* Compute singular values only */
+/* (Cworkspace: 0) */
+/* (Rworkspace: need BDSPAN) */
+
+ dbdsdc_("L", "N", m, &s[1], &rwork[ie], dum, &c__1, dum, &
+ c__1, dum, idum, &rwork[nrwork], &iwork[1], info);
+ } else if (wntqo) {
+ ldwkvt = *m;
+ ivt = nwork;
+ if (*lwork >= *m * *n + *m * 3) {
+
+/* WORK( IVT ) is M by N */
+
+ zlaset_("F", m, n, &c_b1, &c_b1, &work[ivt], &ldwkvt);
+ nwork = ivt + ldwkvt * *n;
+ } else {
+
+/* WORK( IVT ) is M by CHUNK */
+
+ chunk = (*lwork - *m * 3) / *m;
+ nwork = ivt + ldwkvt * chunk;
+ }
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ irvt = nrwork;
+ iru = irvt + *m * *m;
+ nrwork = iru + *m * *m;
+ dbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Copy real matrix RWORK(IRU) to complex matrix U */
+/* Overwrite U by left singular vectors of A */
+/* (Cworkspace: need 2*M, prefer M+M*NB) */
+/* (Rworkspace: need 0) */
+
+ zlacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+ if (*lwork >= *m * *n + *m * 3) {
+
+/* Copy real matrix RWORK(IRVT) to complex matrix WORK(IVT) */
+/* Overwrite WORK(IVT) by right singular vectors of A, */
+/* copying to A */
+/* (Cworkspace: need M*N+2*M, prefer M*N+M+M*NB) */
+/* (Rworkspace: need 0) */
+
+ zlacp2_("F", m, m, &rwork[irvt], m, &work[ivt], &ldwkvt);
+ i__2 = *lwork - nwork + 1;
+ zunmbr_("P", "R", "C", m, n, m, &a[a_offset], lda, &work[
+ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2,
+ &ierr);
+ zlacpy_("F", m, n, &work[ivt], &ldwkvt, &a[a_offset], lda);
+ } else {
+
+/* Generate P**H in A */
+/* (Cworkspace: need 2*M, prefer M+M*NB) */
+/* (Rworkspace: need 0) */
+
+ i__2 = *lwork - nwork + 1;
+ zungbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &
+ work[nwork], &i__2, &ierr);
+
+/* Multiply Q in A by real matrix RWORK(IRU), storing the */
+/* result in WORK(IU), copying to A */
+/* (CWorkspace: need M*M, prefer M*N) */
+/* (Rworkspace: need 3*M*M, prefer M*M+2*M*N) */
+
+ nrwork = iru;
+ i__2 = *n;
+ i__1 = chunk;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *n - i__ + 1;
+ blk = min(i__3,chunk);
+ zlarcm_(m, &blk, &rwork[irvt], m, &a[i__ * a_dim1 + 1]
+, lda, &work[ivt], &ldwkvt, &rwork[nrwork]);
+ zlacpy_("F", m, &blk, &work[ivt], &ldwkvt, &a[i__ *
+ a_dim1 + 1], lda);
+/* L60: */
+ }
+ }
+ } else if (wntqs) {
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ irvt = nrwork;
+ iru = irvt + *m * *m;
+ nrwork = iru + *m * *m;
+ dbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Copy real matrix RWORK(IRU) to complex matrix U */
+/* Overwrite U by left singular vectors of A */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: M*M) */
+
+ zlacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+
+/* Copy real matrix RWORK(IRVT) to complex matrix VT */
+/* Overwrite VT by right singular vectors of A */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: M*M) */
+
+ zlaset_("F", m, n, &c_b1, &c_b1, &vt[vt_offset], ldvt);
+ zlacp2_("F", m, m, &rwork[irvt], m, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("P", "R", "C", m, n, m, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ } else {
+
+/* Perform bidiagonal SVD, computing left singular vectors */
+/* of bidiagonal matrix in RWORK(IRU) and computing right */
+/* singular vectors of bidiagonal matrix in RWORK(IRVT) */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ irvt = nrwork;
+ iru = irvt + *m * *m;
+ nrwork = iru + *m * *m;
+
+ dbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Copy real matrix RWORK(IRU) to complex matrix U */
+/* Overwrite U by left singular vectors of A */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: M*M) */
+
+ zlacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+
+/* Set all of VT to identity matrix */
+
+ zlaset_("F", n, n, &c_b1, &c_b2, &vt[vt_offset], ldvt);
+
+/* Copy real matrix RWORK(IRVT) to complex matrix VT */
+/* Overwrite VT by right singular vectors of A */
+/* (CWorkspace: need 2*M+N, prefer 2*M+N*NB) */
+/* (RWorkspace: M*M) */
+
+ zlacp2_("F", m, m, &rwork[irvt], m, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ zunmbr_("P", "R", "C", n, n, m, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ }
+
+ }
+
+ }
+
+/* Undo scaling if necessary */
+
+ if (iscl == 1) {
+ if (anrm > bignum) {
+ dlascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ if (*info != 0 && anrm > bignum) {
+ i__1 = minmn - 1;
+ dlascl_("G", &c__0, &c__0, &bignum, &anrm, &i__1, &c__1, &rwork[
+ ie], &minmn, &ierr);
+ }
+ if (anrm < smlnum) {
+ dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ if (*info != 0 && anrm < smlnum) {
+ i__1 = minmn - 1;
+ dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &i__1, &c__1, &rwork[
+ ie], &minmn, &ierr);
+ }
+ }
+
+/* Return optimal workspace in WORK(1) */
+
+ work[1].r = (doublereal) maxwrk, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZGESDD */
+
+} /* zgesdd_ */
diff --git a/contrib/libs/clapack/zgesv.c b/contrib/libs/clapack/zgesv.c
new file mode 100644
index 0000000000..ec74b35300
--- /dev/null
+++ b/contrib/libs/clapack/zgesv.c
@@ -0,0 +1,140 @@
+/* zgesv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zgesv_(integer *n, integer *nrhs, doublecomplex *a,
+ integer *lda, integer *ipiv, doublecomplex *b, integer *ldb, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int xerbla_(char *, integer *), zgetrf_(
+ integer *, integer *, doublecomplex *, integer *, integer *,
+ integer *), zgetrs_(char *, integer *, integer *, doublecomplex *,
+ integer *, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGESV computes the solution to a complex system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N matrix and X and B are N-by-NRHS matrices. */
+
+/* The LU decomposition with partial pivoting and row interchanges is */
+/* used to factor A as */
+/* A = P * L * U, */
+/* where P is a permutation matrix, L is unit lower triangular, and U is */
+/* upper triangular. The factored form of A is then used to solve the */
+/* system of equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the N-by-N coefficient matrix A. */
+/* On exit, the factors L and U from the factorization */
+/* A = P*L*U; the unit diagonal elements of L are not stored. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* The pivot indices that define the permutation matrix P; */
+/* row i of the matrix was interchanged with row IPIV(i). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS matrix of right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, U(i,i) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, so the solution could not be computed. */
+
+/* ===================================================================== */
+
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*nrhs < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGESV ", &i__1);
+ return 0;
+ }
+
+/* Compute the LU factorization of A. */
+
+ zgetrf_(n, n, &a[a_offset], lda, &ipiv[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ zgetrs_("No transpose", n, nrhs, &a[a_offset], lda, &ipiv[1], &b[
+ b_offset], ldb, info);
+ }
+ return 0;
+
+/* End of ZGESV */
+
+} /* zgesv_ */
diff --git a/contrib/libs/clapack/zgesvd.c b/contrib/libs/clapack/zgesvd.c
new file mode 100644
index 0000000000..d8f217aefb
--- /dev/null
+++ b/contrib/libs/clapack/zgesvd.c
@@ -0,0 +1,4173 @@
+/* zgesvd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__6 = 6;
+static integer c__0 = 0;
+static integer c__2 = 2;
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zgesvd_(char *jobu, char *jobvt, integer *m, integer *n,
+ doublecomplex *a, integer *lda, doublereal *s, doublecomplex *u,
+ integer *ldu, doublecomplex *vt, integer *ldvt, doublecomplex *work,
+ integer *lwork, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1[2],
+ i__2, i__3, i__4;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, ie, ir, iu, blk, ncu;
+ doublereal dum[1], eps;
+ integer nru;
+ doublecomplex cdum[1];
+ integer iscl;
+ doublereal anrm;
+ integer ierr, itau, ncvt, nrvt;
+ extern logical lsame_(char *, char *);
+ integer chunk, minmn;
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ integer wrkbl, itaup, itauq, mnthr, iwork;
+ logical wntua, wntva, wntun, wntuo, wntvn, wntvo, wntus, wntvs;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), xerbla_(char *, integer *),
+ zgebrd_(integer *, integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *, doublecomplex *, doublecomplex *,
+ doublecomplex *, integer *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ doublereal bignum;
+ extern /* Subroutine */ int zgelqf_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *, integer *
+), zlascl_(char *, integer *, integer *, doublereal *, doublereal
+ *, integer *, integer *, doublecomplex *, integer *, integer *), zgeqrf_(integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, doublecomplex *, integer *, integer *), zlacpy_(
+ char *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zlaset_(char *, integer *,
+ integer *, doublecomplex *, doublecomplex *, doublecomplex *,
+ integer *);
+ integer ldwrkr;
+ extern /* Subroutine */ int zbdsqr_(char *, integer *, integer *, integer
+ *, integer *, doublereal *, doublereal *, doublecomplex *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *,
+ doublereal *, integer *);
+ integer minwrk, ldwrku, maxwrk;
+ extern /* Subroutine */ int zungbr_(char *, integer *, integer *, integer
+ *, doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *);
+ doublereal smlnum;
+ integer irwork;
+ extern /* Subroutine */ int zunmbr_(char *, char *, char *, integer *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *
+), zunglq_(integer *, integer *, integer *
+, doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *);
+ logical lquery, wntuas, wntvas;
+ extern /* Subroutine */ int zungqr_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGESVD computes the singular value decomposition (SVD) of a complex */
+/* M-by-N matrix A, optionally computing the left and/or right singular */
+/* vectors. The SVD is written */
+
+/* A = U * SIGMA * conjugate-transpose(V) */
+
+/* where SIGMA is an M-by-N matrix which is zero except for its */
+/* min(m,n) diagonal elements, U is an M-by-M unitary matrix, and */
+/* V is an N-by-N unitary matrix. The diagonal elements of SIGMA */
+/* are the singular values of A; they are real and non-negative, and */
+/* are returned in descending order. The first min(m,n) columns of */
+/* U and V are the left and right singular vectors of A. */
+
+/* Note that the routine returns V**H, not V. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBU (input) CHARACTER*1 */
+/* Specifies options for computing all or part of the matrix U: */
+/* = 'A': all M columns of U are returned in array U: */
+/* = 'S': the first min(m,n) columns of U (the left singular */
+/* vectors) are returned in the array U; */
+/* = 'O': the first min(m,n) columns of U (the left singular */
+/* vectors) are overwritten on the array A; */
+/* = 'N': no columns of U (no left singular vectors) are */
+/* computed. */
+
+/* JOBVT (input) CHARACTER*1 */
+/* Specifies options for computing all or part of the matrix */
+/* V**H: */
+/* = 'A': all N rows of V**H are returned in the array VT; */
+/* = 'S': the first min(m,n) rows of V**H (the right singular */
+/* vectors) are returned in the array VT; */
+/* = 'O': the first min(m,n) rows of V**H (the right singular */
+/* vectors) are overwritten on the array A; */
+/* = 'N': no rows of V**H (no right singular vectors) are */
+/* computed. */
+
+/* JOBVT and JOBU cannot both be 'O'. */
+
+/* M (input) INTEGER */
+/* The number of rows of the input matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the input matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, */
+/* if JOBU = 'O', A is overwritten with the first min(m,n) */
+/* columns of U (the left singular vectors, */
+/* stored columnwise); */
+/* if JOBVT = 'O', A is overwritten with the first min(m,n) */
+/* rows of V**H (the right singular vectors, */
+/* stored rowwise); */
+/* if JOBU .ne. 'O' and JOBVT .ne. 'O', the contents of A */
+/* are destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* S (output) DOUBLE PRECISION array, dimension (min(M,N)) */
+/* The singular values of A, sorted so that S(i) >= S(i+1). */
+
+/* U (output) COMPLEX*16 array, dimension (LDU,UCOL) */
+/* (LDU,M) if JOBU = 'A' or (LDU,min(M,N)) if JOBU = 'S'. */
+/* If JOBU = 'A', U contains the M-by-M unitary matrix U; */
+/* if JOBU = 'S', U contains the first min(m,n) columns of U */
+/* (the left singular vectors, stored columnwise); */
+/* if JOBU = 'N' or 'O', U is not referenced. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= 1; if */
+/* JOBU = 'S' or 'A', LDU >= M. */
+
+/* VT (output) COMPLEX*16 array, dimension (LDVT,N) */
+/* If JOBVT = 'A', VT contains the N-by-N unitary matrix */
+/* V**H; */
+/* if JOBVT = 'S', VT contains the first min(m,n) rows of */
+/* V**H (the right singular vectors, stored rowwise); */
+/* if JOBVT = 'N' or 'O', VT is not referenced. */
+
+/* LDVT (input) INTEGER */
+/* The leading dimension of the array VT. LDVT >= 1; if */
+/* JOBVT = 'A', LDVT >= N; if JOBVT = 'S', LDVT >= min(M,N). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* LWORK >= MAX(1,2*MIN(M,N)+MAX(M,N)). */
+/* For good performance, LWORK should generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (5*min(M,N)) */
+/* On exit, if INFO > 0, RWORK(1:MIN(M,N)-1) contains the */
+/* unconverged superdiagonal elements of an upper bidiagonal */
+/* matrix B whose diagonal is in S (not necessarily sorted). */
+/* B satisfies A = U * B * VT, so it has the same singular */
+/* values as A, and singular vectors related by U and VT. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if ZBDSQR did not converge, INFO specifies how many */
+/* superdiagonals of an intermediate bidiagonal form B */
+/* did not converge to zero. See the description of RWORK */
+/* above for details. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ wntua = lsame_(jobu, "A");
+ wntus = lsame_(jobu, "S");
+ wntuas = wntua || wntus;
+ wntuo = lsame_(jobu, "O");
+ wntun = lsame_(jobu, "N");
+ wntva = lsame_(jobvt, "A");
+ wntvs = lsame_(jobvt, "S");
+ wntvas = wntva || wntvs;
+ wntvo = lsame_(jobvt, "O");
+ wntvn = lsame_(jobvt, "N");
+ lquery = *lwork == -1;
+
+ if (! (wntua || wntus || wntuo || wntun)) {
+ *info = -1;
+ } else if (! (wntva || wntvs || wntvo || wntvn) || wntvo && wntuo) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*m)) {
+ *info = -6;
+ } else if (*ldu < 1 || wntuas && *ldu < *m) {
+ *info = -9;
+ } else if (*ldvt < 1 || wntva && *ldvt < *n || wntvs && *ldvt < minmn) {
+ *info = -11;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* CWorkspace refers to complex workspace, and RWorkspace to */
+/* real workspace. NB refers to the optimal block size for the */
+/* immediately following subroutine, as returned by ILAENV.) */
+
+ if (*info == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ if (*m >= *n && minmn > 0) {
+
+/* Space needed for ZBDSQR is BDSPAC = 5*N */
+
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = jobu;
+ i__1[1] = 1, a__1[1] = jobvt;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ mnthr = ilaenv_(&c__6, "ZGESVD", ch__1, m, n, &c__0, &c__0);
+ if (*m >= mnthr) {
+ if (wntun) {
+
+/* Path 1 (M much larger than N, JOBU='N') */
+
+ maxwrk = *n + *n * ilaenv_(&c__1, "ZGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", n, n, &c_n1, &c_n1);
+ maxwrk = max(i__2,i__3);
+ if (wntvo || wntvas) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = (*n << 1) + (*n - 1) * ilaenv_(&
+ c__1, "ZUNGBR", "P", n, n, n, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ minwrk = *n * 3;
+ } else if (wntuo && wntvn) {
+
+/* Path 2 (M much larger than N, JOBU='O', JOBVT='N') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "ZGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *n * ilaenv_(&c__1, "ZUNGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = *n * *n + wrkbl, i__3 = *n * *n + *m * *n;
+ maxwrk = max(i__2,i__3);
+ minwrk = (*n << 1) + *m;
+ } else if (wntuo && wntvas) {
+
+/* Path 3 (M much larger than N, JOBU='O', JOBVT='S' or */
+/* 'A') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "ZGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *n * ilaenv_(&c__1, "ZUNGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n - 1) * ilaenv_(&c__1,
+ "ZUNGBR", "P", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = *n * *n + wrkbl, i__3 = *n * *n + *m * *n;
+ maxwrk = max(i__2,i__3);
+ minwrk = (*n << 1) + *m;
+ } else if (wntus && wntvn) {
+
+/* Path 4 (M much larger than N, JOBU='S', JOBVT='N') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "ZGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *n * ilaenv_(&c__1, "ZUNGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ maxwrk = *n * *n + wrkbl;
+ minwrk = (*n << 1) + *m;
+ } else if (wntus && wntvo) {
+
+/* Path 5 (M much larger than N, JOBU='S', JOBVT='O') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "ZGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *n * ilaenv_(&c__1, "ZUNGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n - 1) * ilaenv_(&c__1,
+ "ZUNGBR", "P", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ maxwrk = (*n << 1) * *n + wrkbl;
+ minwrk = (*n << 1) + *m;
+ } else if (wntus && wntvas) {
+
+/* Path 6 (M much larger than N, JOBU='S', JOBVT='S' or */
+/* 'A') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "ZGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *n * ilaenv_(&c__1, "ZUNGQR",
+ " ", m, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n - 1) * ilaenv_(&c__1,
+ "ZUNGBR", "P", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ maxwrk = *n * *n + wrkbl;
+ minwrk = (*n << 1) + *m;
+ } else if (wntua && wntvn) {
+
+/* Path 7 (M much larger than N, JOBU='A', JOBVT='N') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "ZGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *m * ilaenv_(&c__1, "ZUNGQR",
+ " ", m, m, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ maxwrk = *n * *n + wrkbl;
+ minwrk = (*n << 1) + *m;
+ } else if (wntua && wntvo) {
+
+/* Path 8 (M much larger than N, JOBU='A', JOBVT='O') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "ZGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *m * ilaenv_(&c__1, "ZUNGQR",
+ " ", m, m, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n - 1) * ilaenv_(&c__1,
+ "ZUNGBR", "P", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ maxwrk = (*n << 1) * *n + wrkbl;
+ minwrk = (*n << 1) + *m;
+ } else if (wntua && wntvas) {
+
+/* Path 9 (M much larger than N, JOBU='A', JOBVT='S' or */
+/* 'A') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "ZGEQRF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *n + *m * ilaenv_(&c__1, "ZUNGQR",
+ " ", m, m, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", n, n, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "Q", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*n << 1) + (*n - 1) * ilaenv_(&c__1,
+ "ZUNGBR", "P", n, n, n, &c_n1);
+ wrkbl = max(i__2,i__3);
+ maxwrk = *n * *n + wrkbl;
+ minwrk = (*n << 1) + *m;
+ }
+ } else {
+
+/* Path 10 (M at least N, but not much larger) */
+
+ maxwrk = (*n << 1) + (*m + *n) * ilaenv_(&c__1, "ZGEBRD",
+ " ", m, n, &c_n1, &c_n1);
+ if (wntus || wntuo) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = (*n << 1) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "Q", m, n, n, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ if (wntua) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = (*n << 1) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "Q", m, m, n, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ if (! wntvn) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = (*n << 1) + (*n - 1) * ilaenv_(&
+ c__1, "ZUNGBR", "P", n, n, n, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ minwrk = (*n << 1) + *m;
+ }
+ } else if (minmn > 0) {
+
+/* Space needed for ZBDSQR is BDSPAC = 5*M */
+
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = jobu;
+ i__1[1] = 1, a__1[1] = jobvt;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ mnthr = ilaenv_(&c__6, "ZGESVD", ch__1, m, n, &c__0, &c__0);
+ if (*n >= mnthr) {
+ if (wntvn) {
+
+/* Path 1t(N much larger than M, JOBVT='N') */
+
+ maxwrk = *m + *m * ilaenv_(&c__1, "ZGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", m, m, &c_n1, &c_n1);
+ maxwrk = max(i__2,i__3);
+ if (wntuo || wntuas) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = (*m << 1) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "Q", m, m, m, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ minwrk = *m * 3;
+ } else if (wntvo && wntun) {
+
+/* Path 2t(N much larger than M, JOBU='N', JOBVT='O') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "ZGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *m * ilaenv_(&c__1, "ZUNGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m - 1) * ilaenv_(&c__1,
+ "ZUNGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = *m * *m + wrkbl, i__3 = *m * *m + *m * *n;
+ maxwrk = max(i__2,i__3);
+ minwrk = (*m << 1) + *n;
+ } else if (wntvo && wntuas) {
+
+/* Path 3t(N much larger than M, JOBU='S' or 'A', */
+/* JOBVT='O') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "ZGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *m * ilaenv_(&c__1, "ZUNGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m - 1) * ilaenv_(&c__1,
+ "ZUNGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "Q", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = *m * *m + wrkbl, i__3 = *m * *m + *m * *n;
+ maxwrk = max(i__2,i__3);
+ minwrk = (*m << 1) + *n;
+ } else if (wntvs && wntun) {
+
+/* Path 4t(N much larger than M, JOBU='N', JOBVT='S') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "ZGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *m * ilaenv_(&c__1, "ZUNGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m - 1) * ilaenv_(&c__1,
+ "ZUNGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ maxwrk = *m * *m + wrkbl;
+ minwrk = (*m << 1) + *n;
+ } else if (wntvs && wntuo) {
+
+/* Path 5t(N much larger than M, JOBU='O', JOBVT='S') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "ZGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *m * ilaenv_(&c__1, "ZUNGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m - 1) * ilaenv_(&c__1,
+ "ZUNGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "Q", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ maxwrk = (*m << 1) * *m + wrkbl;
+ minwrk = (*m << 1) + *n;
+ } else if (wntvs && wntuas) {
+
+/* Path 6t(N much larger than M, JOBU='S' or 'A', */
+/* JOBVT='S') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "ZGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *m * ilaenv_(&c__1, "ZUNGLQ",
+ " ", m, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m - 1) * ilaenv_(&c__1,
+ "ZUNGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "Q", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ maxwrk = *m * *m + wrkbl;
+ minwrk = (*m << 1) + *n;
+ } else if (wntva && wntun) {
+
+/* Path 7t(N much larger than M, JOBU='N', JOBVT='A') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "ZGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *n * ilaenv_(&c__1, "ZUNGLQ",
+ " ", n, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m - 1) * ilaenv_(&c__1,
+ "ZUNGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ maxwrk = *m * *m + wrkbl;
+ minwrk = (*m << 1) + *n;
+ } else if (wntva && wntuo) {
+
+/* Path 8t(N much larger than M, JOBU='O', JOBVT='A') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "ZGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *n * ilaenv_(&c__1, "ZUNGLQ",
+ " ", n, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m - 1) * ilaenv_(&c__1,
+ "ZUNGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "Q", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ maxwrk = (*m << 1) * *m + wrkbl;
+ minwrk = (*m << 1) + *n;
+ } else if (wntva && wntuas) {
+
+/* Path 9t(N much larger than M, JOBU='S' or 'A', */
+/* JOBVT='A') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "ZGELQF", " ", m, n, &
+ c_n1, &c_n1);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *m + *n * ilaenv_(&c__1, "ZUNGLQ",
+ " ", n, n, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m << 1) * ilaenv_(&
+ c__1, "ZGEBRD", " ", m, m, &c_n1, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + (*m - 1) * ilaenv_(&c__1,
+ "ZUNGBR", "P", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = (*m << 1) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "Q", m, m, m, &c_n1);
+ wrkbl = max(i__2,i__3);
+ maxwrk = *m * *m + wrkbl;
+ minwrk = (*m << 1) + *n;
+ }
+ } else {
+
+/* Path 10t(N greater than M, but not much larger) */
+
+ maxwrk = (*m << 1) + (*m + *n) * ilaenv_(&c__1, "ZGEBRD",
+ " ", m, n, &c_n1, &c_n1);
+ if (wntvs || wntvo) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = (*m << 1) + *m * ilaenv_(&c__1,
+ "ZUNGBR", "P", m, n, m, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ if (wntva) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = (*m << 1) + *n * ilaenv_(&c__1,
+ "ZUNGBR", "P", n, n, m, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ if (! wntun) {
+/* Computing MAX */
+ i__2 = maxwrk, i__3 = (*m << 1) + (*m - 1) * ilaenv_(&
+ c__1, "ZUNGBR", "Q", m, m, m, &c_n1);
+ maxwrk = max(i__2,i__3);
+ }
+ minwrk = (*m << 1) + *n;
+ }
+ }
+ maxwrk = max(maxwrk,minwrk);
+ work[1].r = (doublereal) maxwrk, work[1].i = 0.;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info != 0) {
+ i__2 = -(*info);
+ xerbla_("ZGESVD", &i__2);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("P");
+ smlnum = sqrt(dlamch_("S")) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = zlange_("M", m, n, &a[a_offset], lda, dum);
+ iscl = 0;
+ if (anrm > 0. && anrm < smlnum) {
+ iscl = 1;
+ zlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, &
+ ierr);
+ } else if (anrm > bignum) {
+ iscl = 1;
+ zlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+ if (*m >= *n) {
+
+/* A has at least as many rows as columns. If A has sufficiently */
+/* more rows than columns, first reduce using the QR */
+/* decomposition (if sufficient workspace available) */
+
+ if (*m >= mnthr) {
+
+ if (wntun) {
+
+/* Path 1 (M much larger than N, JOBU='N') */
+/* No left singular vectors to be computed */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: need 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork], &
+ i__2, &ierr);
+
+/* Zero out below R */
+
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ zlaset_("L", &i__2, &i__3, &c_b1, &c_b1, &a[a_dim1 + 2], lda);
+ ie = 1;
+ itauq = 1;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in A */
+/* (CWorkspace: need 3*N, prefer 2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(n, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__2, &ierr);
+ ncvt = 0;
+ if (wntvo || wntvas) {
+
+/* If right singular vectors desired, generate P'. */
+/* (CWorkspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("P", n, n, n, &a[a_offset], lda, &work[itaup], &
+ work[iwork], &i__2, &ierr);
+ ncvt = *n;
+ }
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of A in A if desired */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", n, &ncvt, &c__0, &c__0, &s[1], &rwork[ie], &a[
+ a_offset], lda, cdum, &c__1, cdum, &c__1, &rwork[
+ irwork], info);
+
+/* If right singular vectors desired in VT, copy them there */
+
+ if (wntvas) {
+ zlacpy_("F", n, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+ }
+
+ } else if (wntuo && wntvn) {
+
+/* Path 2 (M much larger than N, JOBU='O', JOBVT='N') */
+/* N left singular vectors to be overwritten on A and */
+/* no right singular vectors to be computed */
+
+ if (*lwork >= *n * *n + *n * 3) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *lda * *n;
+ if (*lwork >= max(i__2,i__3) + *lda * *n) {
+
+/* WORK(IU) is LDA by N, WORK(IR) is LDA by N */
+
+ ldwrku = *lda;
+ ldwrkr = *lda;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *lda * *n;
+ if (*lwork >= max(i__2,i__3) + *n * *n) {
+
+/* WORK(IU) is LDA by N, WORK(IR) is N by N */
+
+ ldwrku = *lda;
+ ldwrkr = *n;
+ } else {
+
+/* WORK(IU) is LDWRKU by N, WORK(IR) is N by N */
+
+ ldwrku = (*lwork - *n * *n) / *n;
+ ldwrkr = *n;
+ }
+ }
+ itau = ir + ldwrkr * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (CWorkspace: need N*N+2*N, prefer N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork]
+, &i__2, &ierr);
+
+/* Copy R to WORK(IR) and zero out below it */
+
+ zlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ zlaset_("L", &i__2, &i__3, &c_b1, &c_b1, &work[ir + 1], &
+ ldwrkr);
+
+/* Generate Q in A */
+/* (CWorkspace: need N*N+2*N, prefer N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IR) */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &i__2, &
+ ierr);
+
+/* Generate left vectors bidiagonalizing R */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB) */
+/* (RWorkspace: need 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("Q", n, n, n, &work[ir], &ldwrkr, &work[itauq], &
+ work[iwork], &i__2, &ierr);
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IR) */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", n, &c__0, n, &c__0, &s[1], &rwork[ie], cdum,
+ &c__1, &work[ir], &ldwrkr, cdum, &c__1, &rwork[
+ irwork], info);
+ iu = itauq;
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IR), storing result in WORK(IU) and copying to A */
+/* (CWorkspace: need N*N+N, prefer N*N+M*N) */
+/* (RWorkspace: 0) */
+
+ i__2 = *m;
+ i__3 = ldwrku;
+ for (i__ = 1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__3) {
+/* Computing MIN */
+ i__4 = *m - i__ + 1;
+ chunk = min(i__4,ldwrku);
+ zgemm_("N", "N", &chunk, n, n, &c_b2, &a[i__ + a_dim1]
+, lda, &work[ir], &ldwrkr, &c_b1, &work[iu], &
+ ldwrku);
+ zlacpy_("F", &chunk, n, &work[iu], &ldwrku, &a[i__ +
+ a_dim1], lda);
+/* L10: */
+ }
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ ie = 1;
+ itauq = 1;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize A */
+/* (CWorkspace: need 2*N+M, prefer 2*N+(M+N)*NB) */
+/* (RWorkspace: N) */
+
+ i__3 = *lwork - iwork + 1;
+ zgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__3, &ierr);
+
+/* Generate left vectors bidiagonalizing A */
+/* (CWorkspace: need 3*N, prefer 2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__3 = *lwork - iwork + 1;
+ zungbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], &
+ work[iwork], &i__3, &ierr);
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in A */
+/* (CWorkspace: need 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", n, &c__0, m, &c__0, &s[1], &rwork[ie], cdum,
+ &c__1, &a[a_offset], lda, cdum, &c__1, &rwork[
+ irwork], info);
+
+ }
+
+ } else if (wntuo && wntvas) {
+
+/* Path 3 (M much larger than N, JOBU='O', JOBVT='S' or 'A') */
+/* N left singular vectors to be overwritten on A and */
+/* N right singular vectors to be computed in VT */
+
+ if (*lwork >= *n * *n + *n * 3) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+/* Computing MAX */
+ i__3 = wrkbl, i__2 = *lda * *n;
+ if (*lwork >= max(i__3,i__2) + *lda * *n) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is LDA by N */
+
+ ldwrku = *lda;
+ ldwrkr = *lda;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__3 = wrkbl, i__2 = *lda * *n;
+ if (*lwork >= max(i__3,i__2) + *n * *n) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is N by N */
+
+ ldwrku = *lda;
+ ldwrkr = *n;
+ } else {
+
+/* WORK(IU) is LDWRKU by N and WORK(IR) is N by N */
+
+ ldwrku = (*lwork - *n * *n) / *n;
+ ldwrkr = *n;
+ }
+ }
+ itau = ir + ldwrkr * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (CWorkspace: need N*N+2*N, prefer N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__3 = *lwork - iwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork]
+, &i__3, &ierr);
+
+/* Copy R to VT, zeroing out below it */
+
+ zlacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+ if (*n > 1) {
+ i__3 = *n - 1;
+ i__2 = *n - 1;
+ zlaset_("L", &i__3, &i__2, &c_b1, &c_b1, &vt[vt_dim1
+ + 2], ldvt);
+ }
+
+/* Generate Q in A */
+/* (CWorkspace: need N*N+2*N, prefer N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__3 = *lwork - iwork + 1;
+ zungqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__3, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in VT, copying result to WORK(IR) */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__3 = *lwork - iwork + 1;
+ zgebrd_(n, n, &vt[vt_offset], ldvt, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &i__3, &
+ ierr);
+ zlacpy_("L", n, n, &vt[vt_offset], ldvt, &work[ir], &
+ ldwrkr);
+
+/* Generate left vectors bidiagonalizing R in WORK(IR) */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__3 = *lwork - iwork + 1;
+ zungbr_("Q", n, n, n, &work[ir], &ldwrkr, &work[itauq], &
+ work[iwork], &i__3, &ierr);
+
+/* Generate right vectors bidiagonalizing R in VT */
+/* (CWorkspace: need N*N+3*N-1, prefer N*N+2*N+(N-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__3 = *lwork - iwork + 1;
+ zungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup],
+ &work[iwork], &i__3, &ierr);
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IR) and computing right */
+/* singular vectors of R in VT */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", n, n, n, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &work[ir], &ldwrkr, cdum, &c__1,
+ &rwork[irwork], info);
+ iu = itauq;
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IR), storing result in WORK(IU) and copying to A */
+/* (CWorkspace: need N*N+N, prefer N*N+M*N) */
+/* (RWorkspace: 0) */
+
+ i__3 = *m;
+ i__2 = ldwrku;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__3 : i__ <= i__3; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__4 = *m - i__ + 1;
+ chunk = min(i__4,ldwrku);
+ zgemm_("N", "N", &chunk, n, n, &c_b2, &a[i__ + a_dim1]
+, lda, &work[ir], &ldwrkr, &c_b1, &work[iu], &
+ ldwrku);
+ zlacpy_("F", &chunk, n, &work[iu], &ldwrku, &a[i__ +
+ a_dim1], lda);
+/* L20: */
+ }
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork]
+, &i__2, &ierr);
+
+/* Copy R to VT, zeroing out below it */
+
+ zlacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+ if (*n > 1) {
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ zlaset_("L", &i__2, &i__3, &c_b1, &c_b1, &vt[vt_dim1
+ + 2], ldvt);
+ }
+
+/* Generate Q in A */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in VT */
+/* (CWorkspace: need 3*N, prefer 2*N+2*N*NB) */
+/* (RWorkspace: N) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(n, n, &vt[vt_offset], ldvt, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &i__2, &
+ ierr);
+
+/* Multiply Q in A by left vectors bidiagonalizing R */
+/* (CWorkspace: need 2*N+M, prefer 2*N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunmbr_("Q", "R", "N", m, n, n, &vt[vt_offset], ldvt, &
+ work[itauq], &a[a_offset], lda, &work[iwork], &
+ i__2, &ierr);
+
+/* Generate right vectors bidiagonalizing R in VT */
+/* (CWorkspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup],
+ &work[iwork], &i__2, &ierr);
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in A and computing right */
+/* singular vectors of A in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", n, n, m, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &a[a_offset], lda, cdum, &c__1,
+ &rwork[irwork], info);
+
+ }
+
+ } else if (wntus) {
+
+ if (wntvn) {
+
+/* Path 4 (M much larger than N, JOBU='S', JOBVT='N') */
+/* N left singular vectors to be computed in U and */
+/* no right singular vectors to be computed */
+
+ if (*lwork >= *n * *n + *n * 3) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+ if (*lwork >= wrkbl + *lda * *n) {
+
+/* WORK(IR) is LDA by N */
+
+ ldwrkr = *lda;
+ } else {
+
+/* WORK(IR) is N by N */
+
+ ldwrkr = *n;
+ }
+ itau = ir + ldwrkr * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (CWorkspace: need N*N+2*N, prefer N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy R to WORK(IR), zeroing out below it */
+
+ zlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &
+ ldwrkr);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ zlaset_("L", &i__2, &i__3, &c_b1, &c_b1, &work[ir + 1]
+, &ldwrkr);
+
+/* Generate Q in A */
+/* (CWorkspace: need N*N+2*N, prefer N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungqr_(m, n, n, &a[a_offset], lda, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IR) */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Generate left vectors bidiagonalizing R in WORK(IR) */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("Q", n, n, n, &work[ir], &ldwrkr, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IR) */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", n, &c__0, n, &c__0, &s[1], &rwork[ie],
+ cdum, &c__1, &work[ir], &ldwrkr, cdum, &c__1,
+ &rwork[irwork], info);
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IR), storing result in U */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: 0) */
+
+ zgemm_("N", "N", m, n, n, &c_b2, &a[a_offset], lda, &
+ work[ir], &ldwrkr, &c_b1, &u[u_offset], ldu);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ zlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungqr_(m, n, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Zero out below R in A */
+
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ zlaset_("L", &i__2, &i__3, &c_b1, &c_b1, &a[a_dim1 +
+ 2], lda);
+
+/* Bidiagonalize R in A */
+/* (CWorkspace: need 3*N, prefer 2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(n, n, &a[a_offset], lda, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply Q in U by left vectors bidiagonalizing R */
+/* (CWorkspace: need 2*N+M, prefer 2*N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunmbr_("Q", "R", "N", m, n, n, &a[a_offset], lda, &
+ work[itauq], &u[u_offset], ldu, &work[iwork],
+ &i__2, &ierr)
+ ;
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", n, &c__0, m, &c__0, &s[1], &rwork[ie],
+ cdum, &c__1, &u[u_offset], ldu, cdum, &c__1, &
+ rwork[irwork], info);
+
+ }
+
+ } else if (wntvo) {
+
+/* Path 5 (M much larger than N, JOBU='S', JOBVT='O') */
+/* N left singular vectors to be computed in U and */
+/* N right singular vectors to be overwritten on A */
+
+ if (*lwork >= (*n << 1) * *n + *n * 3) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + (*lda << 1) * *n) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is LDA by N */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *n;
+ ldwrkr = *lda;
+ } else if (*lwork >= wrkbl + (*lda + *n) * *n) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is N by N */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *n;
+ ldwrkr = *n;
+ } else {
+
+/* WORK(IU) is N by N and WORK(IR) is N by N */
+
+ ldwrku = *n;
+ ir = iu + ldwrku * *n;
+ ldwrkr = *n;
+ }
+ itau = ir + ldwrkr * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (CWorkspace: need 2*N*N+2*N, prefer 2*N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy R to WORK(IU), zeroing out below it */
+
+ zlacpy_("U", n, n, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ zlaset_("L", &i__2, &i__3, &c_b1, &c_b1, &work[iu + 1]
+, &ldwrku);
+
+/* Generate Q in A */
+/* (CWorkspace: need 2*N*N+2*N, prefer 2*N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungqr_(m, n, n, &a[a_offset], lda, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IU), copying result to */
+/* WORK(IR) */
+/* (CWorkspace: need 2*N*N+3*N, */
+/* prefer 2*N*N+2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(n, n, &work[iu], &ldwrku, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ zlacpy_("U", n, n, &work[iu], &ldwrku, &work[ir], &
+ ldwrkr);
+
+/* Generate left bidiagonalizing vectors in WORK(IU) */
+/* (CWorkspace: need 2*N*N+3*N, prefer 2*N*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("Q", n, n, n, &work[iu], &ldwrku, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in WORK(IR) */
+/* (CWorkspace: need 2*N*N+3*N-1, */
+/* prefer 2*N*N+2*N+(N-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("P", n, n, n, &work[ir], &ldwrkr, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IU) and computing */
+/* right singular vectors of R in WORK(IR) */
+/* (CWorkspace: need 2*N*N) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", n, n, n, &c__0, &s[1], &rwork[ie], &work[
+ ir], &ldwrkr, &work[iu], &ldwrku, cdum, &c__1,
+ &rwork[irwork], info);
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IU), storing result in U */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: 0) */
+
+ zgemm_("N", "N", m, n, n, &c_b2, &a[a_offset], lda, &
+ work[iu], &ldwrku, &c_b1, &u[u_offset], ldu);
+
+/* Copy right singular vectors of R to A */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: 0) */
+
+ zlacpy_("F", n, n, &work[ir], &ldwrkr, &a[a_offset],
+ lda);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ zlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungqr_(m, n, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Zero out below R in A */
+
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ zlaset_("L", &i__2, &i__3, &c_b1, &c_b1, &a[a_dim1 +
+ 2], lda);
+
+/* Bidiagonalize R in A */
+/* (CWorkspace: need 3*N, prefer 2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(n, n, &a[a_offset], lda, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply Q in U by left vectors bidiagonalizing R */
+/* (CWorkspace: need 2*N+M, prefer 2*N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunmbr_("Q", "R", "N", m, n, n, &a[a_offset], lda, &
+ work[itauq], &u[u_offset], ldu, &work[iwork],
+ &i__2, &ierr)
+ ;
+
+/* Generate right vectors bidiagonalizing R in A */
+/* (CWorkspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("P", n, n, n, &a[a_offset], lda, &work[itaup],
+ &work[iwork], &i__2, &ierr);
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in A */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", n, n, m, &c__0, &s[1], &rwork[ie], &a[
+ a_offset], lda, &u[u_offset], ldu, cdum, &
+ c__1, &rwork[irwork], info);
+
+ }
+
+ } else if (wntvas) {
+
+/* Path 6 (M much larger than N, JOBU='S', JOBVT='S' */
+/* or 'A') */
+/* N left singular vectors to be computed in U and */
+/* N right singular vectors to be computed in VT */
+
+ if (*lwork >= *n * *n + *n * 3) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + *lda * *n) {
+
+/* WORK(IU) is LDA by N */
+
+ ldwrku = *lda;
+ } else {
+
+/* WORK(IU) is N by N */
+
+ ldwrku = *n;
+ }
+ itau = iu + ldwrku * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R */
+/* (CWorkspace: need N*N+2*N, prefer N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy R to WORK(IU), zeroing out below it */
+
+ zlacpy_("U", n, n, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ zlaset_("L", &i__2, &i__3, &c_b1, &c_b1, &work[iu + 1]
+, &ldwrku);
+
+/* Generate Q in A */
+/* (CWorkspace: need N*N+2*N, prefer N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungqr_(m, n, n, &a[a_offset], lda, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IU), copying result to VT */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(n, n, &work[iu], &ldwrku, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ zlacpy_("U", n, n, &work[iu], &ldwrku, &vt[vt_offset],
+ ldvt);
+
+/* Generate left bidiagonalizing vectors in WORK(IU) */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("Q", n, n, n, &work[iu], &ldwrku, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in VT */
+/* (CWorkspace: need N*N+3*N-1, */
+/* prefer N*N+2*N+(N-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[
+ itaup], &work[iwork], &i__2, &ierr)
+ ;
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IU) and computing */
+/* right singular vectors of R in VT */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", n, n, n, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &work[iu], &ldwrku, cdum, &
+ c__1, &rwork[irwork], info);
+
+/* Multiply Q in A by left singular vectors of R in */
+/* WORK(IU), storing result in U */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: 0) */
+
+ zgemm_("N", "N", m, n, n, &c_b2, &a[a_offset], lda, &
+ work[iu], &ldwrku, &c_b1, &u[u_offset], ldu);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ zlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungqr_(m, n, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy R to VT, zeroing out below it */
+
+ zlacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+ if (*n > 1) {
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ zlaset_("L", &i__2, &i__3, &c_b1, &c_b1, &vt[
+ vt_dim1 + 2], ldvt);
+ }
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in VT */
+/* (CWorkspace: need 3*N, prefer 2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(n, n, &vt[vt_offset], ldvt, &s[1], &rwork[ie],
+ &work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply Q in U by left bidiagonalizing vectors */
+/* in VT */
+/* (CWorkspace: need 2*N+M, prefer 2*N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunmbr_("Q", "R", "N", m, n, n, &vt[vt_offset], ldvt,
+ &work[itauq], &u[u_offset], ldu, &work[iwork],
+ &i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in VT */
+/* (CWorkspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[
+ itaup], &work[iwork], &i__2, &ierr)
+ ;
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", n, n, m, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, cdum, &
+ c__1, &rwork[irwork], info);
+
+ }
+
+ }
+
+ } else if (wntua) {
+
+ if (wntvn) {
+
+/* Path 7 (M much larger than N, JOBU='A', JOBVT='N') */
+/* M left singular vectors to be computed in U and */
+/* no right singular vectors to be computed */
+
+/* Computing MAX */
+ i__2 = *n + *m, i__3 = *n * 3;
+ if (*lwork >= *n * *n + max(i__2,i__3)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+ if (*lwork >= wrkbl + *lda * *n) {
+
+/* WORK(IR) is LDA by N */
+
+ ldwrkr = *lda;
+ } else {
+
+/* WORK(IR) is N by N */
+
+ ldwrkr = *n;
+ }
+ itau = ir + ldwrkr * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (CWorkspace: need N*N+2*N, prefer N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ zlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Copy R to WORK(IR), zeroing out below it */
+
+ zlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &
+ ldwrkr);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ zlaset_("L", &i__2, &i__3, &c_b1, &c_b1, &work[ir + 1]
+, &ldwrkr);
+
+/* Generate Q in U */
+/* (CWorkspace: need N*N+N+M, prefer N*N+N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungqr_(m, m, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IR) */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in WORK(IR) */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("Q", n, n, n, &work[ir], &ldwrkr, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IR) */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", n, &c__0, n, &c__0, &s[1], &rwork[ie],
+ cdum, &c__1, &work[ir], &ldwrkr, cdum, &c__1,
+ &rwork[irwork], info);
+
+/* Multiply Q in U by left singular vectors of R in */
+/* WORK(IR), storing result in A */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: 0) */
+
+ zgemm_("N", "N", m, n, n, &c_b2, &u[u_offset], ldu, &
+ work[ir], &ldwrkr, &c_b1, &a[a_offset], lda);
+
+/* Copy left singular vectors of A from A to U */
+
+ zlacpy_("F", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ zlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (CWorkspace: need N+M, prefer N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungqr_(m, m, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Zero out below R in A */
+
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ zlaset_("L", &i__2, &i__3, &c_b1, &c_b1, &a[a_dim1 +
+ 2], lda);
+
+/* Bidiagonalize R in A */
+/* (CWorkspace: need 3*N, prefer 2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(n, n, &a[a_offset], lda, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply Q in U by left bidiagonalizing vectors */
+/* in A */
+/* (CWorkspace: need 2*N+M, prefer 2*N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunmbr_("Q", "R", "N", m, n, n, &a[a_offset], lda, &
+ work[itauq], &u[u_offset], ldu, &work[iwork],
+ &i__2, &ierr)
+ ;
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", n, &c__0, m, &c__0, &s[1], &rwork[ie],
+ cdum, &c__1, &u[u_offset], ldu, cdum, &c__1, &
+ rwork[irwork], info);
+
+ }
+
+ } else if (wntvo) {
+
+/* Path 8 (M much larger than N, JOBU='A', JOBVT='O') */
+/* M left singular vectors to be computed in U and */
+/* N right singular vectors to be overwritten on A */
+
+/* Computing MAX */
+ i__2 = *n + *m, i__3 = *n * 3;
+ if (*lwork >= (*n << 1) * *n + max(i__2,i__3)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + (*lda << 1) * *n) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is LDA by N */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *n;
+ ldwrkr = *lda;
+ } else if (*lwork >= wrkbl + (*lda + *n) * *n) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is N by N */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *n;
+ ldwrkr = *n;
+ } else {
+
+/* WORK(IU) is N by N and WORK(IR) is N by N */
+
+ ldwrku = *n;
+ ir = iu + ldwrku * *n;
+ ldwrkr = *n;
+ }
+ itau = ir + ldwrkr * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (CWorkspace: need 2*N*N+2*N, prefer 2*N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ zlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (CWorkspace: need 2*N*N+N+M, prefer 2*N*N+N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungqr_(m, m, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy R to WORK(IU), zeroing out below it */
+
+ zlacpy_("U", n, n, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ zlaset_("L", &i__2, &i__3, &c_b1, &c_b1, &work[iu + 1]
+, &ldwrku);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IU), copying result to */
+/* WORK(IR) */
+/* (CWorkspace: need 2*N*N+3*N, */
+/* prefer 2*N*N+2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(n, n, &work[iu], &ldwrku, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ zlacpy_("U", n, n, &work[iu], &ldwrku, &work[ir], &
+ ldwrkr);
+
+/* Generate left bidiagonalizing vectors in WORK(IU) */
+/* (CWorkspace: need 2*N*N+3*N, prefer 2*N*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("Q", n, n, n, &work[iu], &ldwrku, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in WORK(IR) */
+/* (CWorkspace: need 2*N*N+3*N-1, */
+/* prefer 2*N*N+2*N+(N-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("P", n, n, n, &work[ir], &ldwrkr, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IU) and computing */
+/* right singular vectors of R in WORK(IR) */
+/* (CWorkspace: need 2*N*N) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", n, n, n, &c__0, &s[1], &rwork[ie], &work[
+ ir], &ldwrkr, &work[iu], &ldwrku, cdum, &c__1,
+ &rwork[irwork], info);
+
+/* Multiply Q in U by left singular vectors of R in */
+/* WORK(IU), storing result in A */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: 0) */
+
+ zgemm_("N", "N", m, n, n, &c_b2, &u[u_offset], ldu, &
+ work[iu], &ldwrku, &c_b1, &a[a_offset], lda);
+
+/* Copy left singular vectors of A from A to U */
+
+ zlacpy_("F", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Copy right singular vectors of R from WORK(IR) to A */
+
+ zlacpy_("F", n, n, &work[ir], &ldwrkr, &a[a_offset],
+ lda);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ zlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (CWorkspace: need N+M, prefer N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungqr_(m, m, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Zero out below R in A */
+
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ zlaset_("L", &i__2, &i__3, &c_b1, &c_b1, &a[a_dim1 +
+ 2], lda);
+
+/* Bidiagonalize R in A */
+/* (CWorkspace: need 3*N, prefer 2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(n, n, &a[a_offset], lda, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply Q in U by left bidiagonalizing vectors */
+/* in A */
+/* (CWorkspace: need 2*N+M, prefer 2*N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunmbr_("Q", "R", "N", m, n, n, &a[a_offset], lda, &
+ work[itauq], &u[u_offset], ldu, &work[iwork],
+ &i__2, &ierr)
+ ;
+
+/* Generate right bidiagonalizing vectors in A */
+/* (CWorkspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("P", n, n, n, &a[a_offset], lda, &work[itaup],
+ &work[iwork], &i__2, &ierr);
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in A */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", n, n, m, &c__0, &s[1], &rwork[ie], &a[
+ a_offset], lda, &u[u_offset], ldu, cdum, &
+ c__1, &rwork[irwork], info);
+
+ }
+
+ } else if (wntvas) {
+
+/* Path 9 (M much larger than N, JOBU='A', JOBVT='S' */
+/* or 'A') */
+/* M left singular vectors to be computed in U and */
+/* N right singular vectors to be computed in VT */
+
+/* Computing MAX */
+ i__2 = *n + *m, i__3 = *n * 3;
+ if (*lwork >= *n * *n + max(i__2,i__3)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + *lda * *n) {
+
+/* WORK(IU) is LDA by N */
+
+ ldwrku = *lda;
+ } else {
+
+/* WORK(IU) is N by N */
+
+ ldwrku = *n;
+ }
+ itau = iu + ldwrku * *n;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (CWorkspace: need N*N+2*N, prefer N*N+N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ zlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (CWorkspace: need N*N+N+M, prefer N*N+N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungqr_(m, m, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy R to WORK(IU), zeroing out below it */
+
+ zlacpy_("U", n, n, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ zlaset_("L", &i__2, &i__3, &c_b1, &c_b1, &work[iu + 1]
+, &ldwrku);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in WORK(IU), copying result to VT */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(n, n, &work[iu], &ldwrku, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ zlacpy_("U", n, n, &work[iu], &ldwrku, &vt[vt_offset],
+ ldvt);
+
+/* Generate left bidiagonalizing vectors in WORK(IU) */
+/* (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("Q", n, n, n, &work[iu], &ldwrku, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in VT */
+/* (CWorkspace: need N*N+3*N-1, */
+/* prefer N*N+2*N+(N-1)*NB) */
+/* (RWorkspace: need 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[
+ itaup], &work[iwork], &i__2, &ierr)
+ ;
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of R in WORK(IU) and computing */
+/* right singular vectors of R in VT */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", n, n, n, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &work[iu], &ldwrku, cdum, &
+ c__1, &rwork[irwork], info);
+
+/* Multiply Q in U by left singular vectors of R in */
+/* WORK(IU), storing result in A */
+/* (CWorkspace: need N*N) */
+/* (RWorkspace: 0) */
+
+ zgemm_("N", "N", m, n, n, &c_b2, &u[u_offset], ldu, &
+ work[iu], &ldwrku, &c_b1, &a[a_offset], lda);
+
+/* Copy left singular vectors of A from A to U */
+
+ zlacpy_("F", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *n;
+
+/* Compute A=Q*R, copying result to U */
+/* (CWorkspace: need 2*N, prefer N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ zlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset],
+ ldu);
+
+/* Generate Q in U */
+/* (CWorkspace: need N+M, prefer N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungqr_(m, m, n, &u[u_offset], ldu, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy R from A to VT, zeroing out below it */
+
+ zlacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+ if (*n > 1) {
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ zlaset_("L", &i__2, &i__3, &c_b1, &c_b1, &vt[
+ vt_dim1 + 2], ldvt);
+ }
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize R in VT */
+/* (CWorkspace: need 3*N, prefer 2*N+2*N*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(n, n, &vt[vt_offset], ldvt, &s[1], &rwork[ie],
+ &work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply Q in U by left bidiagonalizing vectors */
+/* in VT */
+/* (CWorkspace: need 2*N+M, prefer 2*N+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunmbr_("Q", "R", "N", m, n, n, &vt[vt_offset], ldvt,
+ &work[itauq], &u[u_offset], ldu, &work[iwork],
+ &i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in VT */
+/* (CWorkspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[
+ itaup], &work[iwork], &i__2, &ierr)
+ ;
+ irwork = ie + *n;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", n, n, m, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, cdum, &
+ c__1, &rwork[irwork], info);
+
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* M .LT. MNTHR */
+
+/* Path 10 (M at least N, but not much larger) */
+/* Reduce to bidiagonal form without QR decomposition */
+
+ ie = 1;
+ itauq = 1;
+ itaup = itauq + *n;
+ iwork = itaup + *n;
+
+/* Bidiagonalize A */
+/* (CWorkspace: need 2*N+M, prefer 2*N+(M+N)*NB) */
+/* (RWorkspace: need N) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[iwork], &i__2, &ierr);
+ if (wntuas) {
+
+/* If left singular vectors desired in U, copy result to U */
+/* and generate left bidiagonalizing vectors in U */
+/* (CWorkspace: need 2*N+NCU, prefer 2*N+NCU*NB) */
+/* (RWorkspace: 0) */
+
+ zlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+ if (wntus) {
+ ncu = *n;
+ }
+ if (wntua) {
+ ncu = *m;
+ }
+ i__2 = *lwork - iwork + 1;
+ zungbr_("Q", m, &ncu, n, &u[u_offset], ldu, &work[itauq], &
+ work[iwork], &i__2, &ierr);
+ }
+ if (wntvas) {
+
+/* If right singular vectors desired in VT, copy result to */
+/* VT and generate right bidiagonalizing vectors in VT */
+/* (CWorkspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+/* (RWorkspace: 0) */
+
+ zlacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ i__2 = *lwork - iwork + 1;
+ zungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], &
+ work[iwork], &i__2, &ierr);
+ }
+ if (wntuo) {
+
+/* If left singular vectors desired in A, generate left */
+/* bidiagonalizing vectors in A */
+/* (CWorkspace: need 3*N, prefer 2*N+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], &work[
+ iwork], &i__2, &ierr);
+ }
+ if (wntvo) {
+
+/* If right singular vectors desired in A, generate right */
+/* bidiagonalizing vectors in A */
+/* (CWorkspace: need 3*N-1, prefer 2*N+(N-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("P", n, n, n, &a[a_offset], lda, &work[itaup], &work[
+ iwork], &i__2, &ierr);
+ }
+ irwork = ie + *n;
+ if (wntuas || wntuo) {
+ nru = *m;
+ }
+ if (wntun) {
+ nru = 0;
+ }
+ if (wntvas || wntvo) {
+ ncvt = *n;
+ }
+ if (wntvn) {
+ ncvt = 0;
+ }
+ if (! wntuo && ! wntvo) {
+
+/* Perform bidiagonal QR iteration, if desired, computing */
+/* left singular vectors in U and computing right singular */
+/* vectors in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", n, &ncvt, &nru, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, cdum, &c__1, &
+ rwork[irwork], info);
+ } else if (! wntuo && wntvo) {
+
+/* Perform bidiagonal QR iteration, if desired, computing */
+/* left singular vectors in U and computing right singular */
+/* vectors in A */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", n, &ncvt, &nru, &c__0, &s[1], &rwork[ie], &a[
+ a_offset], lda, &u[u_offset], ldu, cdum, &c__1, &
+ rwork[irwork], info);
+ } else {
+
+/* Perform bidiagonal QR iteration, if desired, computing */
+/* left singular vectors in A and computing right singular */
+/* vectors in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", n, &ncvt, &nru, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &a[a_offset], lda, cdum, &c__1, &
+ rwork[irwork], info);
+ }
+
+ }
+
+ } else {
+
+/* A has more columns than rows. If A has sufficiently more */
+/* columns than rows, first reduce using the LQ decomposition (if */
+/* sufficient workspace available) */
+
+ if (*n >= mnthr) {
+
+ if (wntvn) {
+
+/* Path 1t(N much larger than M, JOBVT='N') */
+/* No right singular vectors to be computed */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork], &
+ i__2, &ierr);
+
+/* Zero out above L */
+
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ zlaset_("U", &i__2, &i__3, &c_b1, &c_b1, &a[(a_dim1 << 1) + 1]
+, lda);
+ ie = 1;
+ itauq = 1;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in A */
+/* (CWorkspace: need 3*M, prefer 2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(m, m, &a[a_offset], lda, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__2, &ierr);
+ if (wntuo || wntuas) {
+
+/* If left singular vectors desired, generate Q */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("Q", m, m, m, &a[a_offset], lda, &work[itauq], &
+ work[iwork], &i__2, &ierr);
+ }
+ irwork = ie + *m;
+ nru = 0;
+ if (wntuo || wntuas) {
+ nru = *m;
+ }
+
+/* Perform bidiagonal QR iteration, computing left singular */
+/* vectors of A in A if desired */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", m, &c__0, &nru, &c__0, &s[1], &rwork[ie], cdum, &
+ c__1, &a[a_offset], lda, cdum, &c__1, &rwork[irwork],
+ info);
+
+/* If left singular vectors desired in U, copy them there */
+
+ if (wntuas) {
+ zlacpy_("F", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ }
+
+ } else if (wntvo && wntun) {
+
+/* Path 2t(N much larger than M, JOBU='N', JOBVT='O') */
+/* M right singular vectors to be overwritten on A and */
+/* no left singular vectors to be computed */
+
+ if (*lwork >= *m * *m + *m * 3) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *lda * *n;
+ if (*lwork >= max(i__2,i__3) + *lda * *m) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is LDA by M */
+
+ ldwrku = *lda;
+ chunk = *n;
+ ldwrkr = *lda;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__2 = wrkbl, i__3 = *lda * *n;
+ if (*lwork >= max(i__2,i__3) + *m * *m) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is M by M */
+
+ ldwrku = *lda;
+ chunk = *n;
+ ldwrkr = *m;
+ } else {
+
+/* WORK(IU) is M by CHUNK and WORK(IR) is M by M */
+
+ ldwrku = *m;
+ chunk = (*lwork - *m * *m) / *m;
+ ldwrkr = *m;
+ }
+ }
+ itau = ir + ldwrkr * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork]
+, &i__2, &ierr);
+
+/* Copy L to WORK(IR) and zero out above it */
+
+ zlacpy_("L", m, m, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ zlaset_("U", &i__2, &i__3, &c_b1, &c_b1, &work[ir +
+ ldwrkr], &ldwrkr);
+
+/* Generate Q in A */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IR) */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(m, m, &work[ir], &ldwrkr, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &i__2, &
+ ierr);
+
+/* Generate right vectors bidiagonalizing L */
+/* (CWorkspace: need M*M+3*M-1, prefer M*M+2*M+(M-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("P", m, m, m, &work[ir], &ldwrkr, &work[itaup], &
+ work[iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of L in WORK(IR) */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", m, m, &c__0, &c__0, &s[1], &rwork[ie], &work[
+ ir], &ldwrkr, cdum, &c__1, cdum, &c__1, &rwork[
+ irwork], info);
+ iu = itauq;
+
+/* Multiply right singular vectors of L in WORK(IR) by Q */
+/* in A, storing result in WORK(IU) and copying to A */
+/* (CWorkspace: need M*M+M, prefer M*M+M*N) */
+/* (RWorkspace: 0) */
+
+ i__2 = *n;
+ i__3 = chunk;
+ for (i__ = 1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__3) {
+/* Computing MIN */
+ i__4 = *n - i__ + 1;
+ blk = min(i__4,chunk);
+ zgemm_("N", "N", m, &blk, m, &c_b2, &work[ir], &
+ ldwrkr, &a[i__ * a_dim1 + 1], lda, &c_b1, &
+ work[iu], &ldwrku);
+ zlacpy_("F", m, &blk, &work[iu], &ldwrku, &a[i__ *
+ a_dim1 + 1], lda);
+/* L30: */
+ }
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ ie = 1;
+ itauq = 1;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize A */
+/* (CWorkspace: need 2*M+N, prefer 2*M+(M+N)*NB) */
+/* (RWorkspace: need M) */
+
+ i__3 = *lwork - iwork + 1;
+ zgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__3, &ierr);
+
+/* Generate right vectors bidiagonalizing A */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__3 = *lwork - iwork + 1;
+ zungbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &
+ work[iwork], &i__3, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of A in A */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("L", m, n, &c__0, &c__0, &s[1], &rwork[ie], &a[
+ a_offset], lda, cdum, &c__1, cdum, &c__1, &rwork[
+ irwork], info);
+
+ }
+
+ } else if (wntvo && wntuas) {
+
+/* Path 3t(N much larger than M, JOBU='S' or 'A', JOBVT='O') */
+/* M right singular vectors to be overwritten on A and */
+/* M left singular vectors to be computed in U */
+
+ if (*lwork >= *m * *m + *m * 3) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+/* Computing MAX */
+ i__3 = wrkbl, i__2 = *lda * *n;
+ if (*lwork >= max(i__3,i__2) + *lda * *m) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is LDA by M */
+
+ ldwrku = *lda;
+ chunk = *n;
+ ldwrkr = *lda;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__3 = wrkbl, i__2 = *lda * *n;
+ if (*lwork >= max(i__3,i__2) + *m * *m) {
+
+/* WORK(IU) is LDA by N and WORK(IR) is M by M */
+
+ ldwrku = *lda;
+ chunk = *n;
+ ldwrkr = *m;
+ } else {
+
+/* WORK(IU) is M by CHUNK and WORK(IR) is M by M */
+
+ ldwrku = *m;
+ chunk = (*lwork - *m * *m) / *m;
+ ldwrkr = *m;
+ }
+ }
+ itau = ir + ldwrkr * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__3 = *lwork - iwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork]
+, &i__3, &ierr);
+
+/* Copy L to U, zeroing about above it */
+
+ zlacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ i__3 = *m - 1;
+ i__2 = *m - 1;
+ zlaset_("U", &i__3, &i__2, &c_b1, &c_b1, &u[(u_dim1 << 1)
+ + 1], ldu);
+
+/* Generate Q in A */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__3 = *lwork - iwork + 1;
+ zunglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__3, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in U, copying result to WORK(IR) */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__3 = *lwork - iwork + 1;
+ zgebrd_(m, m, &u[u_offset], ldu, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__3, &ierr);
+ zlacpy_("U", m, m, &u[u_offset], ldu, &work[ir], &ldwrkr);
+
+/* Generate right vectors bidiagonalizing L in WORK(IR) */
+/* (CWorkspace: need M*M+3*M-1, prefer M*M+2*M+(M-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__3 = *lwork - iwork + 1;
+ zungbr_("P", m, m, m, &work[ir], &ldwrkr, &work[itaup], &
+ work[iwork], &i__3, &ierr);
+
+/* Generate left vectors bidiagonalizing L in U */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__3 = *lwork - iwork + 1;
+ zungbr_("Q", m, m, m, &u[u_offset], ldu, &work[itauq], &
+ work[iwork], &i__3, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of L in U, and computing right */
+/* singular vectors of L in WORK(IR) */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", m, m, m, &c__0, &s[1], &rwork[ie], &work[ir],
+ &ldwrkr, &u[u_offset], ldu, cdum, &c__1, &rwork[
+ irwork], info);
+ iu = itauq;
+
+/* Multiply right singular vectors of L in WORK(IR) by Q */
+/* in A, storing result in WORK(IU) and copying to A */
+/* (CWorkspace: need M*M+M, prefer M*M+M*N)) */
+/* (RWorkspace: 0) */
+
+ i__3 = *n;
+ i__2 = chunk;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__3 : i__ <= i__3; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__4 = *n - i__ + 1;
+ blk = min(i__4,chunk);
+ zgemm_("N", "N", m, &blk, m, &c_b2, &work[ir], &
+ ldwrkr, &a[i__ * a_dim1 + 1], lda, &c_b1, &
+ work[iu], &ldwrku);
+ zlacpy_("F", m, &blk, &work[iu], &ldwrku, &a[i__ *
+ a_dim1 + 1], lda);
+/* L40: */
+ }
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[iwork]
+, &i__2, &ierr);
+
+/* Copy L to U, zeroing out above it */
+
+ zlacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ zlaset_("U", &i__2, &i__3, &c_b1, &c_b1, &u[(u_dim1 << 1)
+ + 1], ldu);
+
+/* Generate Q in A */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in U */
+/* (CWorkspace: need 3*M, prefer 2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(m, m, &u[u_offset], ldu, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[iwork], &i__2, &ierr);
+
+/* Multiply right vectors bidiagonalizing L by Q in A */
+/* (CWorkspace: need 2*M+N, prefer 2*M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunmbr_("P", "L", "C", m, n, m, &u[u_offset], ldu, &work[
+ itaup], &a[a_offset], lda, &work[iwork], &i__2, &
+ ierr);
+
+/* Generate left vectors bidiagonalizing L in U */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("Q", m, m, m, &u[u_offset], ldu, &work[itauq], &
+ work[iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in A */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", m, n, m, &c__0, &s[1], &rwork[ie], &a[
+ a_offset], lda, &u[u_offset], ldu, cdum, &c__1, &
+ rwork[irwork], info);
+
+ }
+
+ } else if (wntvs) {
+
+ if (wntun) {
+
+/* Path 4t(N much larger than M, JOBU='N', JOBVT='S') */
+/* M right singular vectors to be computed in VT and */
+/* no left singular vectors to be computed */
+
+ if (*lwork >= *m * *m + *m * 3) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+ if (*lwork >= wrkbl + *lda * *m) {
+
+/* WORK(IR) is LDA by M */
+
+ ldwrkr = *lda;
+ } else {
+
+/* WORK(IR) is M by M */
+
+ ldwrkr = *m;
+ }
+ itau = ir + ldwrkr * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy L to WORK(IR), zeroing out above it */
+
+ zlacpy_("L", m, m, &a[a_offset], lda, &work[ir], &
+ ldwrkr);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ zlaset_("U", &i__2, &i__3, &c_b1, &c_b1, &work[ir +
+ ldwrkr], &ldwrkr);
+
+/* Generate Q in A */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunglq_(m, n, m, &a[a_offset], lda, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IR) */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(m, m, &work[ir], &ldwrkr, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Generate right vectors bidiagonalizing L in */
+/* WORK(IR) */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+(M-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("P", m, m, m, &work[ir], &ldwrkr, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of L in WORK(IR) */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", m, m, &c__0, &c__0, &s[1], &rwork[ie], &
+ work[ir], &ldwrkr, cdum, &c__1, cdum, &c__1, &
+ rwork[irwork], info);
+
+/* Multiply right singular vectors of L in WORK(IR) by */
+/* Q in A, storing result in VT */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: 0) */
+
+ zgemm_("N", "N", m, n, m, &c_b2, &work[ir], &ldwrkr, &
+ a[a_offset], lda, &c_b1, &vt[vt_offset], ldvt);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy result to VT */
+
+ zlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunglq_(m, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Zero out above L in A */
+
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ zlaset_("U", &i__2, &i__3, &c_b1, &c_b1, &a[(a_dim1 <<
+ 1) + 1], lda);
+
+/* Bidiagonalize L in A */
+/* (CWorkspace: need 3*M, prefer 2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(m, m, &a[a_offset], lda, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply right vectors bidiagonalizing L by Q in VT */
+/* (CWorkspace: need 2*M+N, prefer 2*M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunmbr_("P", "L", "C", m, n, m, &a[a_offset], lda, &
+ work[itaup], &vt[vt_offset], ldvt, &work[
+ iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of A in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", m, n, &c__0, &c__0, &s[1], &rwork[ie], &
+ vt[vt_offset], ldvt, cdum, &c__1, cdum, &c__1,
+ &rwork[irwork], info);
+
+ }
+
+ } else if (wntuo) {
+
+/* Path 5t(N much larger than M, JOBU='O', JOBVT='S') */
+/* M right singular vectors to be computed in VT and */
+/* M left singular vectors to be overwritten on A */
+
+ if (*lwork >= (*m << 1) * *m + *m * 3) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + (*lda << 1) * *m) {
+
+/* WORK(IU) is LDA by M and WORK(IR) is LDA by M */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *m;
+ ldwrkr = *lda;
+ } else if (*lwork >= wrkbl + (*lda + *m) * *m) {
+
+/* WORK(IU) is LDA by M and WORK(IR) is M by M */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *m;
+ ldwrkr = *m;
+ } else {
+
+/* WORK(IU) is M by M and WORK(IR) is M by M */
+
+ ldwrku = *m;
+ ir = iu + ldwrku * *m;
+ ldwrkr = *m;
+ }
+ itau = ir + ldwrkr * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (CWorkspace: need 2*M*M+2*M, prefer 2*M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy L to WORK(IU), zeroing out below it */
+
+ zlacpy_("L", m, m, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ zlaset_("U", &i__2, &i__3, &c_b1, &c_b1, &work[iu +
+ ldwrku], &ldwrku);
+
+/* Generate Q in A */
+/* (CWorkspace: need 2*M*M+2*M, prefer 2*M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunglq_(m, n, m, &a[a_offset], lda, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IU), copying result to */
+/* WORK(IR) */
+/* (CWorkspace: need 2*M*M+3*M, */
+/* prefer 2*M*M+2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(m, m, &work[iu], &ldwrku, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ zlacpy_("L", m, m, &work[iu], &ldwrku, &work[ir], &
+ ldwrkr);
+
+/* Generate right bidiagonalizing vectors in WORK(IU) */
+/* (CWorkspace: need 2*M*M+3*M-1, */
+/* prefer 2*M*M+2*M+(M-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("P", m, m, m, &work[iu], &ldwrku, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in WORK(IR) */
+/* (CWorkspace: need 2*M*M+3*M, prefer 2*M*M+2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("Q", m, m, m, &work[ir], &ldwrkr, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of L in WORK(IR) and computing */
+/* right singular vectors of L in WORK(IU) */
+/* (CWorkspace: need 2*M*M) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", m, m, m, &c__0, &s[1], &rwork[ie], &work[
+ iu], &ldwrku, &work[ir], &ldwrkr, cdum, &c__1,
+ &rwork[irwork], info);
+
+/* Multiply right singular vectors of L in WORK(IU) by */
+/* Q in A, storing result in VT */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: 0) */
+
+ zgemm_("N", "N", m, n, m, &c_b2, &work[iu], &ldwrku, &
+ a[a_offset], lda, &c_b1, &vt[vt_offset], ldvt);
+
+/* Copy left singular vectors of L to A */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: 0) */
+
+ zlacpy_("F", m, m, &work[ir], &ldwrkr, &a[a_offset],
+ lda);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ zlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunglq_(m, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Zero out above L in A */
+
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ zlaset_("U", &i__2, &i__3, &c_b1, &c_b1, &a[(a_dim1 <<
+ 1) + 1], lda);
+
+/* Bidiagonalize L in A */
+/* (CWorkspace: need 3*M, prefer 2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(m, m, &a[a_offset], lda, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply right vectors bidiagonalizing L by Q in VT */
+/* (CWorkspace: need 2*M+N, prefer 2*M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunmbr_("P", "L", "C", m, n, m, &a[a_offset], lda, &
+ work[itaup], &vt[vt_offset], ldvt, &work[
+ iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors of L in A */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("Q", m, m, m, &a[a_offset], lda, &work[itauq],
+ &work[iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in A and computing right */
+/* singular vectors of A in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", m, n, m, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &a[a_offset], lda, cdum, &
+ c__1, &rwork[irwork], info);
+
+ }
+
+ } else if (wntuas) {
+
+/* Path 6t(N much larger than M, JOBU='S' or 'A', */
+/* JOBVT='S') */
+/* M right singular vectors to be computed in VT and */
+/* M left singular vectors to be computed in U */
+
+ if (*lwork >= *m * *m + *m * 3) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + *lda * *m) {
+
+/* WORK(IU) is LDA by N */
+
+ ldwrku = *lda;
+ } else {
+
+/* WORK(IU) is LDA by M */
+
+ ldwrku = *m;
+ }
+ itau = iu + ldwrku * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+
+/* Copy L to WORK(IU), zeroing out above it */
+
+ zlacpy_("L", m, m, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ zlaset_("U", &i__2, &i__3, &c_b1, &c_b1, &work[iu +
+ ldwrku], &ldwrku);
+
+/* Generate Q in A */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunglq_(m, n, m, &a[a_offset], lda, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IU), copying result to U */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(m, m, &work[iu], &ldwrku, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ zlacpy_("L", m, m, &work[iu], &ldwrku, &u[u_offset],
+ ldu);
+
+/* Generate right bidiagonalizing vectors in WORK(IU) */
+/* (CWorkspace: need M*M+3*M-1, */
+/* prefer M*M+2*M+(M-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("P", m, m, m, &work[iu], &ldwrku, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in U */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("Q", m, m, m, &u[u_offset], ldu, &work[itauq],
+ &work[iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of L in U and computing right */
+/* singular vectors of L in WORK(IU) */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", m, m, m, &c__0, &s[1], &rwork[ie], &work[
+ iu], &ldwrku, &u[u_offset], ldu, cdum, &c__1,
+ &rwork[irwork], info);
+
+/* Multiply right singular vectors of L in WORK(IU) by */
+/* Q in A, storing result in VT */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: 0) */
+
+ zgemm_("N", "N", m, n, m, &c_b2, &work[iu], &ldwrku, &
+ a[a_offset], lda, &c_b1, &vt[vt_offset], ldvt);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ zlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunglq_(m, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy L to U, zeroing out above it */
+
+ zlacpy_("L", m, m, &a[a_offset], lda, &u[u_offset],
+ ldu);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ zlaset_("U", &i__2, &i__3, &c_b1, &c_b1, &u[(u_dim1 <<
+ 1) + 1], ldu);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in U */
+/* (CWorkspace: need 3*M, prefer 2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(m, m, &u[u_offset], ldu, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply right bidiagonalizing vectors in U by Q */
+/* in VT */
+/* (CWorkspace: need 2*M+N, prefer 2*M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunmbr_("P", "L", "C", m, n, m, &u[u_offset], ldu, &
+ work[itaup], &vt[vt_offset], ldvt, &work[
+ iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in U */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("Q", m, m, m, &u[u_offset], ldu, &work[itauq],
+ &work[iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", m, n, m, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, cdum, &
+ c__1, &rwork[irwork], info);
+
+ }
+
+ }
+
+ } else if (wntva) {
+
+ if (wntun) {
+
+/* Path 7t(N much larger than M, JOBU='N', JOBVT='A') */
+/* N right singular vectors to be computed in VT and */
+/* no left singular vectors to be computed */
+
+/* Computing MAX */
+ i__2 = *n + *m, i__3 = *m * 3;
+ if (*lwork >= *m * *m + max(i__2,i__3)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ ir = 1;
+ if (*lwork >= wrkbl + *lda * *m) {
+
+/* WORK(IR) is LDA by M */
+
+ ldwrkr = *lda;
+ } else {
+
+/* WORK(IR) is M by M */
+
+ ldwrkr = *m;
+ }
+ itau = ir + ldwrkr * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ zlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Copy L to WORK(IR), zeroing out above it */
+
+ zlacpy_("L", m, m, &a[a_offset], lda, &work[ir], &
+ ldwrkr);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ zlaset_("U", &i__2, &i__3, &c_b1, &c_b1, &work[ir +
+ ldwrkr], &ldwrkr);
+
+/* Generate Q in VT */
+/* (CWorkspace: need M*M+M+N, prefer M*M+M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IR) */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(m, m, &work[ir], &ldwrkr, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Generate right bidiagonalizing vectors in WORK(IR) */
+/* (CWorkspace: need M*M+3*M-1, */
+/* prefer M*M+2*M+(M-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("P", m, m, m, &work[ir], &ldwrkr, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of L in WORK(IR) */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", m, m, &c__0, &c__0, &s[1], &rwork[ie], &
+ work[ir], &ldwrkr, cdum, &c__1, cdum, &c__1, &
+ rwork[irwork], info);
+
+/* Multiply right singular vectors of L in WORK(IR) by */
+/* Q in VT, storing result in A */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: 0) */
+
+ zgemm_("N", "N", m, n, m, &c_b2, &work[ir], &ldwrkr, &
+ vt[vt_offset], ldvt, &c_b1, &a[a_offset], lda);
+
+/* Copy right singular vectors of A from A to VT */
+
+ zlacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ zlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (CWorkspace: need M+N, prefer M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Zero out above L in A */
+
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ zlaset_("U", &i__2, &i__3, &c_b1, &c_b1, &a[(a_dim1 <<
+ 1) + 1], lda);
+
+/* Bidiagonalize L in A */
+/* (CWorkspace: need 3*M, prefer 2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(m, m, &a[a_offset], lda, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply right bidiagonalizing vectors in A by Q */
+/* in VT */
+/* (CWorkspace: need 2*M+N, prefer 2*M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunmbr_("P", "L", "C", m, n, m, &a[a_offset], lda, &
+ work[itaup], &vt[vt_offset], ldvt, &work[
+ iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing right */
+/* singular vectors of A in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", m, n, &c__0, &c__0, &s[1], &rwork[ie], &
+ vt[vt_offset], ldvt, cdum, &c__1, cdum, &c__1,
+ &rwork[irwork], info);
+
+ }
+
+ } else if (wntuo) {
+
+/* Path 8t(N much larger than M, JOBU='O', JOBVT='A') */
+/* N right singular vectors to be computed in VT and */
+/* M left singular vectors to be overwritten on A */
+
+/* Computing MAX */
+ i__2 = *n + *m, i__3 = *m * 3;
+ if (*lwork >= (*m << 1) * *m + max(i__2,i__3)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + (*lda << 1) * *m) {
+
+/* WORK(IU) is LDA by M and WORK(IR) is LDA by M */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *m;
+ ldwrkr = *lda;
+ } else if (*lwork >= wrkbl + (*lda + *m) * *m) {
+
+/* WORK(IU) is LDA by M and WORK(IR) is M by M */
+
+ ldwrku = *lda;
+ ir = iu + ldwrku * *m;
+ ldwrkr = *m;
+ } else {
+
+/* WORK(IU) is M by M and WORK(IR) is M by M */
+
+ ldwrku = *m;
+ ir = iu + ldwrku * *m;
+ ldwrkr = *m;
+ }
+ itau = ir + ldwrkr * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (CWorkspace: need 2*M*M+2*M, prefer 2*M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ zlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (CWorkspace: need 2*M*M+M+N, prefer 2*M*M+M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy L to WORK(IU), zeroing out above it */
+
+ zlacpy_("L", m, m, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ zlaset_("U", &i__2, &i__3, &c_b1, &c_b1, &work[iu +
+ ldwrku], &ldwrku);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IU), copying result to */
+/* WORK(IR) */
+/* (CWorkspace: need 2*M*M+3*M, */
+/* prefer 2*M*M+2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(m, m, &work[iu], &ldwrku, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ zlacpy_("L", m, m, &work[iu], &ldwrku, &work[ir], &
+ ldwrkr);
+
+/* Generate right bidiagonalizing vectors in WORK(IU) */
+/* (CWorkspace: need 2*M*M+3*M-1, */
+/* prefer 2*M*M+2*M+(M-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("P", m, m, m, &work[iu], &ldwrku, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in WORK(IR) */
+/* (CWorkspace: need 2*M*M+3*M, prefer 2*M*M+2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("Q", m, m, m, &work[ir], &ldwrkr, &work[itauq]
+, &work[iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of L in WORK(IR) and computing */
+/* right singular vectors of L in WORK(IU) */
+/* (CWorkspace: need 2*M*M) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", m, m, m, &c__0, &s[1], &rwork[ie], &work[
+ iu], &ldwrku, &work[ir], &ldwrkr, cdum, &c__1,
+ &rwork[irwork], info);
+
+/* Multiply right singular vectors of L in WORK(IU) by */
+/* Q in VT, storing result in A */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: 0) */
+
+ zgemm_("N", "N", m, n, m, &c_b2, &work[iu], &ldwrku, &
+ vt[vt_offset], ldvt, &c_b1, &a[a_offset], lda);
+
+/* Copy right singular vectors of A from A to VT */
+
+ zlacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Copy left singular vectors of A from WORK(IR) to A */
+
+ zlacpy_("F", m, m, &work[ir], &ldwrkr, &a[a_offset],
+ lda);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ zlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (CWorkspace: need M+N, prefer M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Zero out above L in A */
+
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ zlaset_("U", &i__2, &i__3, &c_b1, &c_b1, &a[(a_dim1 <<
+ 1) + 1], lda);
+
+/* Bidiagonalize L in A */
+/* (CWorkspace: need 3*M, prefer 2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(m, m, &a[a_offset], lda, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply right bidiagonalizing vectors in A by Q */
+/* in VT */
+/* (CWorkspace: need 2*M+N, prefer 2*M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunmbr_("P", "L", "C", m, n, m, &a[a_offset], lda, &
+ work[itaup], &vt[vt_offset], ldvt, &work[
+ iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in A */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("Q", m, m, m, &a[a_offset], lda, &work[itauq],
+ &work[iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in A and computing right */
+/* singular vectors of A in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", m, n, m, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &a[a_offset], lda, cdum, &
+ c__1, &rwork[irwork], info);
+
+ }
+
+ } else if (wntuas) {
+
+/* Path 9t(N much larger than M, JOBU='S' or 'A', */
+/* JOBVT='A') */
+/* N right singular vectors to be computed in VT and */
+/* M left singular vectors to be computed in U */
+
+/* Computing MAX */
+ i__2 = *n + *m, i__3 = *m * 3;
+ if (*lwork >= *m * *m + max(i__2,i__3)) {
+
+/* Sufficient workspace for a fast algorithm */
+
+ iu = 1;
+ if (*lwork >= wrkbl + *lda * *m) {
+
+/* WORK(IU) is LDA by M */
+
+ ldwrku = *lda;
+ } else {
+
+/* WORK(IU) is M by M */
+
+ ldwrku = *m;
+ }
+ itau = iu + ldwrku * *m;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ zlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (CWorkspace: need M*M+M+N, prefer M*M+M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy L to WORK(IU), zeroing out above it */
+
+ zlacpy_("L", m, m, &a[a_offset], lda, &work[iu], &
+ ldwrku);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ zlaset_("U", &i__2, &i__3, &c_b1, &c_b1, &work[iu +
+ ldwrku], &ldwrku);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in WORK(IU), copying result to U */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(m, m, &work[iu], &ldwrku, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+ zlacpy_("L", m, m, &work[iu], &ldwrku, &u[u_offset],
+ ldu);
+
+/* Generate right bidiagonalizing vectors in WORK(IU) */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+(M-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("P", m, m, m, &work[iu], &ldwrku, &work[itaup]
+, &work[iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in U */
+/* (CWorkspace: need M*M+3*M, prefer M*M+2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("Q", m, m, m, &u[u_offset], ldu, &work[itauq],
+ &work[iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of L in U and computing right */
+/* singular vectors of L in WORK(IU) */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", m, m, m, &c__0, &s[1], &rwork[ie], &work[
+ iu], &ldwrku, &u[u_offset], ldu, cdum, &c__1,
+ &rwork[irwork], info);
+
+/* Multiply right singular vectors of L in WORK(IU) by */
+/* Q in VT, storing result in A */
+/* (CWorkspace: need M*M) */
+/* (RWorkspace: 0) */
+
+ zgemm_("N", "N", m, n, m, &c_b2, &work[iu], &ldwrku, &
+ vt[vt_offset], ldvt, &c_b1, &a[a_offset], lda);
+
+/* Copy right singular vectors of A from A to VT */
+
+ zlacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+ } else {
+
+/* Insufficient workspace for a fast algorithm */
+
+ itau = 1;
+ iwork = itau + *m;
+
+/* Compute A=L*Q, copying result to VT */
+/* (CWorkspace: need 2*M, prefer M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[
+ iwork], &i__2, &ierr);
+ zlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset],
+ ldvt);
+
+/* Generate Q in VT */
+/* (CWorkspace: need M+N, prefer M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &
+ work[iwork], &i__2, &ierr);
+
+/* Copy L to U, zeroing out above it */
+
+ zlacpy_("L", m, m, &a[a_offset], lda, &u[u_offset],
+ ldu);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ zlaset_("U", &i__2, &i__3, &c_b1, &c_b1, &u[(u_dim1 <<
+ 1) + 1], ldu);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize L in U */
+/* (CWorkspace: need 3*M, prefer 2*M+2*M*NB) */
+/* (RWorkspace: need M) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(m, m, &u[u_offset], ldu, &s[1], &rwork[ie], &
+ work[itauq], &work[itaup], &work[iwork], &
+ i__2, &ierr);
+
+/* Multiply right bidiagonalizing vectors in U by Q */
+/* in VT */
+/* (CWorkspace: need 2*M+N, prefer 2*M+N*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zunmbr_("P", "L", "C", m, n, m, &u[u_offset], ldu, &
+ work[itaup], &vt[vt_offset], ldvt, &work[
+ iwork], &i__2, &ierr);
+
+/* Generate left bidiagonalizing vectors in U */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("Q", m, m, m, &u[u_offset], ldu, &work[itauq],
+ &work[iwork], &i__2, &ierr);
+ irwork = ie + *m;
+
+/* Perform bidiagonal QR iteration, computing left */
+/* singular vectors of A in U and computing right */
+/* singular vectors of A in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("U", m, n, m, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, cdum, &
+ c__1, &rwork[irwork], info);
+
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N .LT. MNTHR */
+
+/* Path 10t(N greater than M, but not much larger) */
+/* Reduce to bidiagonal form without LQ decomposition */
+
+ ie = 1;
+ itauq = 1;
+ itaup = itauq + *m;
+ iwork = itaup + *m;
+
+/* Bidiagonalize A */
+/* (CWorkspace: need 2*M+N, prefer 2*M+(M+N)*NB) */
+/* (RWorkspace: M) */
+
+ i__2 = *lwork - iwork + 1;
+ zgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[iwork], &i__2, &ierr);
+ if (wntuas) {
+
+/* If left singular vectors desired in U, copy result to U */
+/* and generate left bidiagonalizing vectors in U */
+/* (CWorkspace: need 3*M-1, prefer 2*M+(M-1)*NB) */
+/* (RWorkspace: 0) */
+
+ zlacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ i__2 = *lwork - iwork + 1;
+ zungbr_("Q", m, m, n, &u[u_offset], ldu, &work[itauq], &work[
+ iwork], &i__2, &ierr);
+ }
+ if (wntvas) {
+
+/* If right singular vectors desired in VT, copy result to */
+/* VT and generate right bidiagonalizing vectors in VT */
+/* (CWorkspace: need 2*M+NRVT, prefer 2*M+NRVT*NB) */
+/* (RWorkspace: 0) */
+
+ zlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ if (wntva) {
+ nrvt = *n;
+ }
+ if (wntvs) {
+ nrvt = *m;
+ }
+ i__2 = *lwork - iwork + 1;
+ zungbr_("P", &nrvt, n, m, &vt[vt_offset], ldvt, &work[itaup],
+ &work[iwork], &i__2, &ierr);
+ }
+ if (wntuo) {
+
+/* If left singular vectors desired in A, generate left */
+/* bidiagonalizing vectors in A */
+/* (CWorkspace: need 3*M-1, prefer 2*M+(M-1)*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("Q", m, m, n, &a[a_offset], lda, &work[itauq], &work[
+ iwork], &i__2, &ierr);
+ }
+ if (wntvo) {
+
+/* If right singular vectors desired in A, generate right */
+/* bidiagonalizing vectors in A */
+/* (CWorkspace: need 3*M, prefer 2*M+M*NB) */
+/* (RWorkspace: 0) */
+
+ i__2 = *lwork - iwork + 1;
+ zungbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &work[
+ iwork], &i__2, &ierr);
+ }
+ irwork = ie + *m;
+ if (wntuas || wntuo) {
+ nru = *m;
+ }
+ if (wntun) {
+ nru = 0;
+ }
+ if (wntvas || wntvo) {
+ ncvt = *n;
+ }
+ if (wntvn) {
+ ncvt = 0;
+ }
+ if (! wntuo && ! wntvo) {
+
+/* Perform bidiagonal QR iteration, if desired, computing */
+/* left singular vectors in U and computing right singular */
+/* vectors in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("L", m, &ncvt, &nru, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, cdum, &c__1, &
+ rwork[irwork], info);
+ } else if (! wntuo && wntvo) {
+
+/* Perform bidiagonal QR iteration, if desired, computing */
+/* left singular vectors in U and computing right singular */
+/* vectors in A */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("L", m, &ncvt, &nru, &c__0, &s[1], &rwork[ie], &a[
+ a_offset], lda, &u[u_offset], ldu, cdum, &c__1, &
+ rwork[irwork], info);
+ } else {
+
+/* Perform bidiagonal QR iteration, if desired, computing */
+/* left singular vectors in A and computing right singular */
+/* vectors in VT */
+/* (CWorkspace: 0) */
+/* (RWorkspace: need BDSPAC) */
+
+ zbdsqr_("L", m, &ncvt, &nru, &c__0, &s[1], &rwork[ie], &vt[
+ vt_offset], ldvt, &a[a_offset], lda, cdum, &c__1, &
+ rwork[irwork], info);
+ }
+
+ }
+
+ }
+
+/* Undo scaling if necessary */
+
+ if (iscl == 1) {
+ if (anrm > bignum) {
+ dlascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ if (*info != 0 && anrm > bignum) {
+ i__2 = minmn - 1;
+ dlascl_("G", &c__0, &c__0, &bignum, &anrm, &i__2, &c__1, &rwork[
+ ie], &minmn, &ierr);
+ }
+ if (anrm < smlnum) {
+ dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ if (*info != 0 && anrm < smlnum) {
+ i__2 = minmn - 1;
+ dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &i__2, &c__1, &rwork[
+ ie], &minmn, &ierr);
+ }
+ }
+
+/* Return optimal workspace in WORK(1) */
+
+ work[1].r = (doublereal) maxwrk, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZGESVD */
+
+} /* zgesvd_ */
diff --git a/contrib/libs/clapack/zgesvx.c b/contrib/libs/clapack/zgesvx.c
new file mode 100644
index 0000000000..44a3ff766b
--- /dev/null
+++ b/contrib/libs/clapack/zgesvx.c
@@ -0,0 +1,610 @@
+/* zgesvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zgesvx_(char *fact, char *trans, integer *n, integer *
+ nrhs, doublecomplex *a, integer *lda, doublecomplex *af, integer *
+ ldaf, integer *ipiv, char *equed, doublereal *r__, doublereal *c__,
+ doublecomplex *b, integer *ldb, doublecomplex *x, integer *ldx,
+ doublereal *rcond, doublereal *ferr, doublereal *berr, doublecomplex *
+ work, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal amax;
+ char norm[1];
+ extern logical lsame_(char *, char *);
+ doublereal rcmin, rcmax, anorm;
+ logical equil;
+ extern doublereal dlamch_(char *);
+ doublereal colcnd;
+ logical nofact;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ doublereal bignum;
+ extern /* Subroutine */ int zlaqge_(integer *, integer *, doublecomplex *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *
+, doublereal *, char *), zgecon_(char *, integer *,
+ doublecomplex *, integer *, doublereal *, doublereal *,
+ doublecomplex *, doublereal *, integer *);
+ integer infequ;
+ logical colequ;
+ doublereal rowcnd;
+ extern /* Subroutine */ int zgeequ_(integer *, integer *, doublecomplex *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *
+, doublereal *, integer *);
+ logical notran;
+ extern /* Subroutine */ int zgerfs_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *, doublecomplex *, doublereal *,
+ integer *), zgetrf_(integer *, integer *, doublecomplex *,
+ integer *, integer *, integer *), zlacpy_(char *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *);
+ extern doublereal zlantr_(char *, char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublereal *);
+ doublereal smlnum;
+ extern /* Subroutine */ int zgetrs_(char *, integer *, integer *,
+ doublecomplex *, integer *, integer *, doublecomplex *, integer *,
+ integer *);
+ logical rowequ;
+ doublereal rpvgrw;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGESVX uses the LU factorization to compute the solution to a complex */
+/* system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N matrix and X and B are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'E', real scaling factors are computed to equilibrate */
+/* the system: */
+/* TRANS = 'N': diag(R)*A*diag(C) *inv(diag(C))*X = diag(R)*B */
+/* TRANS = 'T': (diag(R)*A*diag(C))**T *inv(diag(R))*X = diag(C)*B */
+/* TRANS = 'C': (diag(R)*A*diag(C))**H *inv(diag(R))*X = diag(C)*B */
+/* Whether or not the system will be equilibrated depends on the */
+/* scaling of the matrix A, but if equilibration is used, A is */
+/* overwritten by diag(R)*A*diag(C) and B by diag(R)*B (if TRANS='N') */
+/* or diag(C)*B (if TRANS = 'T' or 'C'). */
+
+/* 2. If FACT = 'N' or 'E', the LU decomposition is used to factor the */
+/* matrix A (after equilibration if FACT = 'E') as */
+/* A = P * L * U, */
+/* where P is a permutation matrix, L is a unit lower triangular */
+/* matrix, and U is upper triangular. */
+
+/* 3. If some U(i,i)=0, so that U is exactly singular, then the routine */
+/* returns with INFO = i. Otherwise, the factored form of A is used */
+/* to estimate the condition number of the matrix A. If the */
+/* reciprocal of the condition number is less than machine precision, */
+/* INFO = N+1 is returned as a warning, but the routine still goes on */
+/* to solve for X and compute error bounds as described below. */
+
+/* 4. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 5. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* 6. If equilibration was used, the matrix X is premultiplied by */
+/* diag(C) (if TRANS = 'N') or diag(R) (if TRANS = 'T' or 'C') so */
+/* that it solves the original system before equilibration. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of the matrix A is */
+/* supplied on entry, and if not, whether the matrix A should be */
+/* equilibrated before it is factored. */
+/* = 'F': On entry, AF and IPIV contain the factored form of A. */
+/* If EQUED is not 'N', the matrix A has been */
+/* equilibrated with scaling factors given by R and C. */
+/* A, AF, and IPIV are not modified. */
+/* = 'N': The matrix A will be copied to AF and factored. */
+/* = 'E': The matrix A will be equilibrated if necessary, then */
+/* copied to AF and factored. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the N-by-N matrix A. If FACT = 'F' and EQUED is */
+/* not 'N', then A must have been equilibrated by the scaling */
+/* factors in R and/or C. A is not modified if FACT = 'F' or */
+/* 'N', or if FACT = 'E' and EQUED = 'N' on exit. */
+
+/* On exit, if EQUED .ne. 'N', A is scaled as follows: */
+/* EQUED = 'R': A := diag(R) * A */
+/* EQUED = 'C': A := A * diag(C) */
+/* EQUED = 'B': A := diag(R) * A * diag(C). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input or output) COMPLEX*16 array, dimension (LDAF,N) */
+/* If FACT = 'F', then AF is an input argument and on entry */
+/* contains the factors L and U from the factorization */
+/* A = P*L*U as computed by ZGETRF. If EQUED .ne. 'N', then */
+/* AF is the factored form of the equilibrated matrix A. */
+
+/* If FACT = 'N', then AF is an output argument and on exit */
+/* returns the factors L and U from the factorization A = P*L*U */
+/* of the original matrix A. */
+
+/* If FACT = 'E', then AF is an output argument and on exit */
+/* returns the factors L and U from the factorization A = P*L*U */
+/* of the equilibrated matrix A (see the description of A for */
+/* the form of the equilibrated matrix). */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* IPIV (input or output) INTEGER array, dimension (N) */
+/* If FACT = 'F', then IPIV is an input argument and on entry */
+/* contains the pivot indices from the factorization A = P*L*U */
+/* as computed by ZGETRF; row i of the matrix was interchanged */
+/* with row IPIV(i). */
+
+/* If FACT = 'N', then IPIV is an output argument and on exit */
+/* contains the pivot indices from the factorization A = P*L*U */
+/* of the original matrix A. */
+
+/* If FACT = 'E', then IPIV is an output argument and on exit */
+/* contains the pivot indices from the factorization A = P*L*U */
+/* of the equilibrated matrix A. */
+
+/* EQUED (input or output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration (always true if FACT = 'N'). */
+/* = 'R': Row equilibration, i.e., A has been premultiplied by */
+/* diag(R). */
+/* = 'C': Column equilibration, i.e., A has been postmultiplied */
+/* by diag(C). */
+/* = 'B': Both row and column equilibration, i.e., A has been */
+/* replaced by diag(R) * A * diag(C). */
+/* EQUED is an input argument if FACT = 'F'; otherwise, it is an */
+/* output argument. */
+
+/* R (input or output) DOUBLE PRECISION array, dimension (N) */
+/* The row scale factors for A. If EQUED = 'R' or 'B', A is */
+/* multiplied on the left by diag(R); if EQUED = 'N' or 'C', R */
+/* is not accessed. R is an input argument if FACT = 'F'; */
+/* otherwise, R is an output argument. If FACT = 'F' and */
+/* EQUED = 'R' or 'B', each element of R must be positive. */
+
+/* C (input or output) DOUBLE PRECISION array, dimension (N) */
+/* The column scale factors for A. If EQUED = 'C' or 'B', A is */
+/* multiplied on the right by diag(C); if EQUED = 'N' or 'R', C */
+/* is not accessed. C is an input argument if FACT = 'F'; */
+/* otherwise, C is an output argument. If FACT = 'F' and */
+/* EQUED = 'C' or 'B', each element of C must be positive. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, */
+/* if EQUED = 'N', B is not modified; */
+/* if TRANS = 'N' and EQUED = 'R' or 'B', B is overwritten by */
+/* diag(R)*B; */
+/* if TRANS = 'T' or 'C' and EQUED = 'C' or 'B', B is */
+/* overwritten by diag(C)*B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X */
+/* to the original system of equations. Note that A and B are */
+/* modified on exit if EQUED .ne. 'N', and the solution to the */
+/* equilibrated system is inv(diag(C))*X if TRANS = 'N' and */
+/* EQUED = 'C' or 'B', or inv(diag(R))*X if TRANS = 'T' or 'C' */
+/* and EQUED = 'R' or 'B'. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A after equilibration (if done). If RCOND is less than the */
+/* machine precision (in particular, if RCOND = 0), the matrix */
+/* is singular to working precision. This condition is */
+/* indicated by a return code of INFO > 0. */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace/output) DOUBLE PRECISION array, dimension (2*N) */
+/* On exit, RWORK(1) contains the reciprocal pivot growth */
+/* factor norm(A)/norm(U). The "max absolute element" norm is */
+/* used. If RWORK(1) is much less than 1, then the stability */
+/* of the LU factorization of the (equilibrated) matrix A */
+/* could be poor. This also means that the solution X, condition */
+/* estimator RCOND, and forward error bound FERR could be */
+/* unreliable. If factorization fails with 0<INFO<=N, then */
+/* RWORK(1) contains the reciprocal pivot growth factor for the */
+/* leading INFO columns of A. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: U(i,i) is exactly zero. The factorization has */
+/* been completed, but the factor U is exactly */
+/* singular, so the solution and error bounds */
+/* could not be computed. RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ --ipiv;
+ --r__;
+ --c__;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ equil = lsame_(fact, "E");
+ notran = lsame_(trans, "N");
+ if (nofact || equil) {
+ *(unsigned char *)equed = 'N';
+ rowequ = FALSE_;
+ colequ = FALSE_;
+ } else {
+ rowequ = lsame_(equed, "R") || lsame_(equed,
+ "B");
+ colequ = lsame_(equed, "C") || lsame_(equed,
+ "B");
+ smlnum = dlamch_("Safe minimum");
+ bignum = 1. / smlnum;
+ }
+
+/* Test the input parameters. */
+
+ if (! nofact && ! equil && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -8;
+ } else if (lsame_(fact, "F") && ! (rowequ || colequ
+ || lsame_(equed, "N"))) {
+ *info = -10;
+ } else {
+ if (rowequ) {
+ rcmin = bignum;
+ rcmax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ d__1 = rcmin, d__2 = r__[j];
+ rcmin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = rcmax, d__2 = r__[j];
+ rcmax = max(d__1,d__2);
+/* L10: */
+ }
+ if (rcmin <= 0.) {
+ *info = -11;
+ } else if (*n > 0) {
+ rowcnd = max(rcmin,smlnum) / min(rcmax,bignum);
+ } else {
+ rowcnd = 1.;
+ }
+ }
+ if (colequ && *info == 0) {
+ rcmin = bignum;
+ rcmax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ d__1 = rcmin, d__2 = c__[j];
+ rcmin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = rcmax, d__2 = c__[j];
+ rcmax = max(d__1,d__2);
+/* L20: */
+ }
+ if (rcmin <= 0.) {
+ *info = -12;
+ } else if (*n > 0) {
+ colcnd = max(rcmin,smlnum) / min(rcmax,bignum);
+ } else {
+ colcnd = 1.;
+ }
+ }
+ if (*info == 0) {
+ if (*ldb < max(1,*n)) {
+ *info = -14;
+ } else if (*ldx < max(1,*n)) {
+ *info = -16;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGESVX", &i__1);
+ return 0;
+ }
+
+ if (equil) {
+
+/* Compute row and column scalings to equilibrate the matrix A. */
+
+ zgeequ_(n, n, &a[a_offset], lda, &r__[1], &c__[1], &rowcnd, &colcnd, &
+ amax, &infequ);
+ if (infequ == 0) {
+
+/* Equilibrate the matrix. */
+
+ zlaqge_(n, n, &a[a_offset], lda, &r__[1], &c__[1], &rowcnd, &
+ colcnd, &amax, equed);
+ rowequ = lsame_(equed, "R") || lsame_(equed,
+ "B");
+ colequ = lsame_(equed, "C") || lsame_(equed,
+ "B");
+ }
+ }
+
+/* Scale the right hand side. */
+
+ if (notran) {
+ if (rowequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * b_dim1;
+ z__1.r = r__[i__4] * b[i__5].r, z__1.i = r__[i__4] * b[
+ i__5].i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else if (colequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * b_dim1;
+ z__1.r = c__[i__4] * b[i__5].r, z__1.i = c__[i__4] * b[i__5]
+ .i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+ if (nofact || equil) {
+
+/* Compute the LU factorization of A. */
+
+ zlacpy_("Full", n, n, &a[a_offset], lda, &af[af_offset], ldaf);
+ zgetrf_(n, n, &af[af_offset], ldaf, &ipiv[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+
+/* Compute the reciprocal pivot growth factor of the */
+/* leading rank-deficient INFO columns of A. */
+
+ rpvgrw = zlantr_("M", "U", "N", info, info, &af[af_offset], ldaf,
+ &rwork[1]);
+ if (rpvgrw == 0.) {
+ rpvgrw = 1.;
+ } else {
+ rpvgrw = zlange_("M", n, info, &a[a_offset], lda, &rwork[1]) / rpvgrw;
+ }
+ rwork[1] = rpvgrw;
+ *rcond = 0.;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A and the */
+/* reciprocal pivot growth factor RPVGRW. */
+
+ if (notran) {
+ *(unsigned char *)norm = '1';
+ } else {
+ *(unsigned char *)norm = 'I';
+ }
+ anorm = zlange_(norm, n, n, &a[a_offset], lda, &rwork[1]);
+ rpvgrw = zlantr_("M", "U", "N", n, n, &af[af_offset], ldaf, &rwork[1]);
+ if (rpvgrw == 0.) {
+ rpvgrw = 1.;
+ } else {
+ rpvgrw = zlange_("M", n, n, &a[a_offset], lda, &rwork[1]) /
+ rpvgrw;
+ }
+
+/* Compute the reciprocal of the condition number of A. */
+
+ zgecon_(norm, n, &af[af_offset], ldaf, &anorm, rcond, &work[1], &rwork[1],
+ info);
+
+/* Compute the solution matrix X. */
+
+ zlacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ zgetrs_(trans, n, nrhs, &af[af_offset], ldaf, &ipiv[1], &x[x_offset], ldx,
+ info);
+
+/* Use iterative refinement to improve the computed solution and */
+/* compute error bounds and backward error estimates for it. */
+
+ zgerfs_(trans, n, nrhs, &a[a_offset], lda, &af[af_offset], ldaf, &ipiv[1],
+ &b[b_offset], ldb, &x[x_offset], ldx, &ferr[1], &berr[1], &work[
+ 1], &rwork[1], info);
+
+/* Transform the solution matrix X to a solution of the original */
+/* system. */
+
+ if (notran) {
+ if (colequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * x_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * x_dim1;
+ z__1.r = c__[i__4] * x[i__5].r, z__1.i = c__[i__4] * x[
+ i__5].i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= colcnd;
+/* L90: */
+ }
+ }
+ } else if (rowequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * x_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * x_dim1;
+ z__1.r = r__[i__4] * x[i__5].r, z__1.i = r__[i__4] * x[i__5]
+ .i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+/* L100: */
+ }
+/* L110: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= rowcnd;
+/* L120: */
+ }
+ }
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < dlamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ rwork[1] = rpvgrw;
+ return 0;
+
+/* End of ZGESVX */
+
+} /* zgesvx_ */
diff --git a/contrib/libs/clapack/zgetc2.c b/contrib/libs/clapack/zgetc2.c
new file mode 100644
index 0000000000..672180fd33
--- /dev/null
+++ b/contrib/libs/clapack/zgetc2.c
@@ -0,0 +1,209 @@
+/* zgetc2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublecomplex c_b10 = {-1.,-0.};
+
+/* Subroutine */ int zgetc2_(integer *n, doublecomplex *a, integer *lda,
+ integer *ipiv, integer *jpiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *);
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, ip, jp;
+ doublereal eps;
+ integer ipv, jpv;
+ doublereal smin, xmax;
+ extern /* Subroutine */ int zgeru_(integer *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zswap_(integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *), dlabad_(doublereal *,
+ doublereal *);
+ extern doublereal dlamch_(char *);
+ doublereal bignum, smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGETC2 computes an LU factorization, using complete pivoting, of the */
+/* n-by-n matrix A. The factorization has the form A = P * L * U * Q, */
+/* where P and Q are permutation matrices, L is lower triangular with */
+/* unit diagonal elements and U is upper triangular. */
+
+/* This is a level 1 BLAS version of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA, N) */
+/* On entry, the n-by-n matrix to be factored. */
+/* On exit, the factors L and U from the factorization */
+/* A = P*L*U*Q; the unit diagonal elements of L are not stored. */
+/* If U(k, k) appears to be less than SMIN, U(k, k) is given the */
+/* value of SMIN, giving a nonsingular perturbed system. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1, N). */
+
+/* IPIV (output) INTEGER array, dimension (N). */
+/* The pivot indices; for 1 <= i <= N, row i of the */
+/* matrix has been interchanged with row IPIV(i). */
+
+/* JPIV (output) INTEGER array, dimension (N). */
+/* The pivot indices; for 1 <= j <= N, column j of the */
+/* matrix has been interchanged with column JPIV(j). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* > 0: if INFO = k, U(k, k) is likely to produce overflow if */
+/* one tries to solve for x in Ax = b. So U is perturbed */
+/* to avoid the overflow. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Set constants to control overflow */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --jpiv;
+
+ /* Function Body */
+ *info = 0;
+ eps = dlamch_("P");
+ smlnum = dlamch_("S") / eps;
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+
+/* Factorize A using complete pivoting. */
+/* Set pivots less than SMIN to SMIN */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Find max element in matrix A */
+
+ xmax = 0.;
+ i__2 = *n;
+ for (ip = i__; ip <= i__2; ++ip) {
+ i__3 = *n;
+ for (jp = i__; jp <= i__3; ++jp) {
+ if (z_abs(&a[ip + jp * a_dim1]) >= xmax) {
+ xmax = z_abs(&a[ip + jp * a_dim1]);
+ ipv = ip;
+ jpv = jp;
+ }
+/* L10: */
+ }
+/* L20: */
+ }
+ if (i__ == 1) {
+/* Computing MAX */
+ d__1 = eps * xmax;
+ smin = max(d__1,smlnum);
+ }
+
+/* Swap rows */
+
+ if (ipv != i__) {
+ zswap_(n, &a[ipv + a_dim1], lda, &a[i__ + a_dim1], lda);
+ }
+ ipiv[i__] = ipv;
+
+/* Swap columns */
+
+ if (jpv != i__) {
+ zswap_(n, &a[jpv * a_dim1 + 1], &c__1, &a[i__ * a_dim1 + 1], &
+ c__1);
+ }
+ jpiv[i__] = jpv;
+
+/* Check for singularity */
+
+ if (z_abs(&a[i__ + i__ * a_dim1]) < smin) {
+ *info = i__;
+ i__2 = i__ + i__ * a_dim1;
+ z__1.r = smin, z__1.i = 0.;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+ }
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ i__3 = j + i__ * a_dim1;
+ z_div(&z__1, &a[j + i__ * a_dim1], &a[i__ + i__ * a_dim1]);
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L30: */
+ }
+ i__2 = *n - i__;
+ i__3 = *n - i__;
+ zgeru_(&i__2, &i__3, &c_b10, &a[i__ + 1 + i__ * a_dim1], &c__1, &a[
+ i__ + (i__ + 1) * a_dim1], lda, &a[i__ + 1 + (i__ + 1) *
+ a_dim1], lda);
+/* L40: */
+ }
+
+ if (z_abs(&a[*n + *n * a_dim1]) < smin) {
+ *info = *n;
+ i__1 = *n + *n * a_dim1;
+ z__1.r = smin, z__1.i = 0.;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ }
+ return 0;
+
+/* End of ZGETC2 */
+
+} /* zgetc2_ */
diff --git a/contrib/libs/clapack/zgetf2.c b/contrib/libs/clapack/zgetf2.c
new file mode 100644
index 0000000000..ce382e3e12
--- /dev/null
+++ b/contrib/libs/clapack/zgetf2.c
@@ -0,0 +1,202 @@
+/* zgetf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zgetf2_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *);
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, jp;
+ doublereal sfmin;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *), zgeru_(integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *), zswap_(integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGETF2 computes an LU factorization of a general m-by-n matrix A */
+/* using partial pivoting with row interchanges. */
+
+/* The factorization has the form */
+/* A = P * L * U */
+/* where P is a permutation matrix, L is lower triangular with unit */
+/* diagonal elements (lower trapezoidal if m > n), and U is upper */
+/* triangular (upper trapezoidal if m < n). */
+
+/* This is the right-looking Level 2 BLAS version of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the m by n matrix to be factored. */
+/* On exit, the factors L and U from the factorization */
+/* A = P*L*U; the unit diagonal elements of L are not stored. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* IPIV (output) INTEGER array, dimension (min(M,N)) */
+/* The pivot indices; for 1 <= i <= min(M,N), row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, U(k,k) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, and division by zero will occur if it is used */
+/* to solve a system of equations. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGETF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Compute machine safe minimum */
+
+ sfmin = dlamch_("S");
+
+ i__1 = min(*m,*n);
+ for (j = 1; j <= i__1; ++j) {
+
+/* Find pivot and test for singularity. */
+
+ i__2 = *m - j + 1;
+ jp = j - 1 + izamax_(&i__2, &a[j + j * a_dim1], &c__1);
+ ipiv[j] = jp;
+ i__2 = jp + j * a_dim1;
+ if (a[i__2].r != 0. || a[i__2].i != 0.) {
+
+/* Apply the interchange to columns 1:N. */
+
+ if (jp != j) {
+ zswap_(n, &a[j + a_dim1], lda, &a[jp + a_dim1], lda);
+ }
+
+/* Compute elements J+1:M of J-th column. */
+
+ if (j < *m) {
+ if (z_abs(&a[j + j * a_dim1]) >= sfmin) {
+ i__2 = *m - j;
+ z_div(&z__1, &c_b1, &a[j + j * a_dim1]);
+ zscal_(&i__2, &z__1, &a[j + 1 + j * a_dim1], &c__1);
+ } else {
+ i__2 = *m - j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = j + i__ + j * a_dim1;
+ z_div(&z__1, &a[j + i__ + j * a_dim1], &a[j + j *
+ a_dim1]);
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L20: */
+ }
+ }
+ }
+
+ } else if (*info == 0) {
+
+ *info = j;
+ }
+
+ if (j < min(*m,*n)) {
+
+/* Update trailing submatrix. */
+
+ i__2 = *m - j;
+ i__3 = *n - j;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__2, &i__3, &z__1, &a[j + 1 + j * a_dim1], &c__1, &a[j +
+ (j + 1) * a_dim1], lda, &a[j + 1 + (j + 1) * a_dim1], lda)
+ ;
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of ZGETF2 */
+
+} /* zgetf2_ */
diff --git a/contrib/libs/clapack/zgetrf.c b/contrib/libs/clapack/zgetrf.c
new file mode 100644
index 0000000000..101dcd4dae
--- /dev/null
+++ b/contrib/libs/clapack/zgetrf.c
@@ -0,0 +1,219 @@
+/* zgetrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zgetrf_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j, jb, nb, iinfo;
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), ztrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *
+, doublecomplex *, integer *),
+ zgetf2_(integer *, integer *, doublecomplex *, integer *, integer
+ *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zlaswp_(integer *, doublecomplex *, integer *,
+ integer *, integer *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGETRF computes an LU factorization of a general M-by-N matrix A */
+/* using partial pivoting with row interchanges. */
+
+/* The factorization has the form */
+/* A = P * L * U */
+/* where P is a permutation matrix, L is lower triangular with unit */
+/* diagonal elements (lower trapezoidal if m > n), and U is upper */
+/* triangular (upper trapezoidal if m < n). */
+
+/* This is the right-looking Level 3 BLAS version of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix to be factored. */
+/* On exit, the factors L and U from the factorization */
+/* A = P*L*U; the unit diagonal elements of L are not stored. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* IPIV (output) INTEGER array, dimension (min(M,N)) */
+/* The pivot indices; for 1 <= i <= min(M,N), row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, U(i,i) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, and division by zero will occur if it is used */
+/* to solve a system of equations. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGETRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "ZGETRF", " ", m, n, &c_n1, &c_n1);
+ if (nb <= 1 || nb >= min(*m,*n)) {
+
+/* Use unblocked code. */
+
+ zgetf2_(m, n, &a[a_offset], lda, &ipiv[1], info);
+ } else {
+
+/* Use blocked code. */
+
+ i__1 = min(*m,*n);
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = min(*m,*n) - j + 1;
+ jb = min(i__3,nb);
+
+/* Factor diagonal and subdiagonal blocks and test for exact */
+/* singularity. */
+
+ i__3 = *m - j + 1;
+ zgetf2_(&i__3, &jb, &a[j + j * a_dim1], lda, &ipiv[j], &iinfo);
+
+/* Adjust INFO and the pivot indices. */
+
+ if (*info == 0 && iinfo > 0) {
+ *info = iinfo + j - 1;
+ }
+/* Computing MIN */
+ i__4 = *m, i__5 = j + jb - 1;
+ i__3 = min(i__4,i__5);
+ for (i__ = j; i__ <= i__3; ++i__) {
+ ipiv[i__] = j - 1 + ipiv[i__];
+/* L10: */
+ }
+
+/* Apply interchanges to columns 1:J-1. */
+
+ i__3 = j - 1;
+ i__4 = j + jb - 1;
+ zlaswp_(&i__3, &a[a_offset], lda, &j, &i__4, &ipiv[1], &c__1);
+
+ if (j + jb <= *n) {
+
+/* Apply interchanges to columns J+JB:N. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j + jb - 1;
+ zlaswp_(&i__3, &a[(j + jb) * a_dim1 + 1], lda, &j, &i__4, &
+ ipiv[1], &c__1);
+
+/* Compute block row of U. */
+
+ i__3 = *n - j - jb + 1;
+ ztrsm_("Left", "Lower", "No transpose", "Unit", &jb, &i__3, &
+ c_b1, &a[j + j * a_dim1], lda, &a[j + (j + jb) *
+ a_dim1], lda);
+ if (j + jb <= *m) {
+
+/* Update trailing submatrix. */
+
+ i__3 = *m - j - jb + 1;
+ i__4 = *n - j - jb + 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "No transpose", &i__3, &i__4, &jb,
+ &z__1, &a[j + jb + j * a_dim1], lda, &a[j + (j +
+ jb) * a_dim1], lda, &c_b1, &a[j + jb + (j + jb) *
+ a_dim1], lda);
+ }
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of ZGETRF */
+
+} /* zgetrf_ */
diff --git a/contrib/libs/clapack/zgetri.c b/contrib/libs/clapack/zgetri.c
new file mode 100644
index 0000000000..246b34ece8
--- /dev/null
+++ b/contrib/libs/clapack/zgetri.c
@@ -0,0 +1,270 @@
+/* zgetri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+
+/* Subroutine */ int zgetri_(integer *n, doublecomplex *a, integer *lda,
+ integer *ipiv, doublecomplex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j, jb, nb, jj, jp, nn, iws, nbmin;
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *),
+ zswap_(integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *), ztrsm_(char *, char *, char *, char *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *),
+ xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int ztrtri_(char *, char *, integer *,
+ doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGETRI computes the inverse of a matrix using the LU factorization */
+/* computed by ZGETRF. */
+
+/* This method inverts U and then computes inv(A) by solving the system */
+/* inv(A)*L = inv(U) for inv(A). */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the factors L and U from the factorization */
+/* A = P*L*U as computed by ZGETRF. */
+/* On exit, if INFO = 0, the inverse of the original matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices from ZGETRF; for 1<=i<=N, row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO=0, then WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+/* For optimal performance LWORK >= N*NB, where NB is */
+/* the optimal blocksize returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, U(i,i) is exactly zero; the matrix is */
+/* singular and its inverse could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "ZGETRI", " ", n, &c_n1, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*lda < max(1,*n)) {
+ *info = -3;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGETRI", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form inv(U). If INFO > 0 from ZTRTRI, then U is singular, */
+/* and the inverse is not computed. */
+
+ ztrtri_("Upper", "Non-unit", n, &a[a_offset], lda, info);
+ if (*info > 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = *n;
+ if (nb > 1 && nb < *n) {
+/* Computing MAX */
+ i__1 = ldwork * nb;
+ iws = max(i__1,1);
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZGETRI", " ", n, &c_n1, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = *n;
+ }
+
+/* Solve the equation inv(A)*L = inv(U) for inv(A). */
+
+ if (nb < nbmin || nb >= *n) {
+
+/* Use unblocked code. */
+
+ for (j = *n; j >= 1; --j) {
+
+/* Copy current column of L to WORK and replace with zeros. */
+
+ i__1 = *n;
+ for (i__ = j + 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__ + j * a_dim1;
+ work[i__2].r = a[i__3].r, work[i__2].i = a[i__3].i;
+ i__2 = i__ + j * a_dim1;
+ a[i__2].r = 0., a[i__2].i = 0.;
+/* L10: */
+ }
+
+/* Compute current column of inv(A). */
+
+ if (j < *n) {
+ i__1 = *n - j;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", n, &i__1, &z__1, &a[(j + 1) * a_dim1 +
+ 1], lda, &work[j + 1], &c__1, &c_b2, &a[j * a_dim1 +
+ 1], &c__1);
+ }
+/* L20: */
+ }
+ } else {
+
+/* Use blocked code. */
+
+ nn = (*n - 1) / nb * nb + 1;
+ i__1 = -nb;
+ for (j = nn; i__1 < 0 ? j >= 1 : j <= 1; j += i__1) {
+/* Computing MIN */
+ i__2 = nb, i__3 = *n - j + 1;
+ jb = min(i__2,i__3);
+
+/* Copy current block column of L to WORK and replace with */
+/* zeros. */
+
+ i__2 = j + jb - 1;
+ for (jj = j; jj <= i__2; ++jj) {
+ i__3 = *n;
+ for (i__ = jj + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + (jj - j) * ldwork;
+ i__5 = i__ + jj * a_dim1;
+ work[i__4].r = a[i__5].r, work[i__4].i = a[i__5].i;
+ i__4 = i__ + jj * a_dim1;
+ a[i__4].r = 0., a[i__4].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Compute current block column of inv(A). */
+
+ if (j + jb <= *n) {
+ i__2 = *n - j - jb + 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "No transpose", n, &jb, &i__2, &z__1, &
+ a[(j + jb) * a_dim1 + 1], lda, &work[j + jb], &ldwork,
+ &c_b2, &a[j * a_dim1 + 1], lda);
+ }
+ ztrsm_("Right", "Lower", "No transpose", "Unit", n, &jb, &c_b2, &
+ work[j], &ldwork, &a[j * a_dim1 + 1], lda);
+/* L50: */
+ }
+ }
+
+/* Apply column interchanges. */
+
+ for (j = *n - 1; j >= 1; --j) {
+ jp = ipiv[j];
+ if (jp != j) {
+ zswap_(n, &a[j * a_dim1 + 1], &c__1, &a[jp * a_dim1 + 1], &c__1);
+ }
+/* L60: */
+ }
+
+ work[1].r = (doublereal) iws, work[1].i = 0.;
+ return 0;
+
+/* End of ZGETRI */
+
+} /* zgetri_ */
diff --git a/contrib/libs/clapack/zgetrs.c b/contrib/libs/clapack/zgetrs.c
new file mode 100644
index 0000000000..2496939040
--- /dev/null
+++ b/contrib/libs/clapack/zgetrs.c
@@ -0,0 +1,187 @@
+/* zgetrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zgetrs_(char *trans, integer *n, integer *nrhs,
+ doublecomplex *a, integer *lda, integer *ipiv, doublecomplex *b,
+ integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ztrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *),
+ xerbla_(char *, integer *);
+ logical notran;
+ extern /* Subroutine */ int zlaswp_(integer *, doublecomplex *, integer *,
+ integer *, integer *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGETRS solves a system of linear equations */
+/* A * X = B, A**T * X = B, or A**H * X = B */
+/* with a general N-by-N matrix A using the LU factorization computed */
+/* by ZGETRF. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The factors L and U from the factorization A = P*L*U */
+/* as computed by ZGETRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices from ZGETRF; for 1<=i<=N, row i of the */
+/* matrix was interchanged with row IPIV(i). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "T") && ! lsame_(
+ trans, "C")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGETRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (notran) {
+
+/* Solve A * X = B. */
+
+/* Apply row interchanges to the right hand sides. */
+
+ zlaswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c__1);
+
+/* Solve L*X = B, overwriting B with X. */
+
+ ztrsm_("Left", "Lower", "No transpose", "Unit", n, nrhs, &c_b1, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+/* Solve U*X = B, overwriting B with X. */
+
+ ztrsm_("Left", "Upper", "No transpose", "Non-unit", n, nrhs, &c_b1, &
+ a[a_offset], lda, &b[b_offset], ldb);
+ } else {
+
+/* Solve A**T * X = B or A**H * X = B. */
+
+/* Solve U'*X = B, overwriting B with X. */
+
+ ztrsm_("Left", "Upper", trans, "Non-unit", n, nrhs, &c_b1, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+/* Solve L'*X = B, overwriting B with X. */
+
+ ztrsm_("Left", "Lower", trans, "Unit", n, nrhs, &c_b1, &a[a_offset],
+ lda, &b[b_offset], ldb);
+
+/* Apply row interchanges to the solution vectors. */
+
+ zlaswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c_n1);
+ }
+
+ return 0;
+
+/* End of ZGETRS */
+
+} /* zgetrs_ */
diff --git a/contrib/libs/clapack/zggbak.c b/contrib/libs/clapack/zggbak.c
new file mode 100644
index 0000000000..dd25ad9aed
--- /dev/null
+++ b/contrib/libs/clapack/zggbak.c
@@ -0,0 +1,273 @@
+/* zggbak.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zggbak_(char *job, char *side, integer *n, integer *ilo,
+ integer *ihi, doublereal *lscale, doublereal *rscale, integer *m,
+ doublecomplex *v, integer *ldv, integer *info)
+{
+ /* System generated locals */
+ integer v_dim1, v_offset, i__1;
+
+ /* Local variables */
+ integer i__, k;
+ extern logical lsame_(char *, char *);
+ logical leftv;
+ extern /* Subroutine */ int zswap_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), xerbla_(char *, integer *),
+ zdscal_(integer *, doublereal *, doublecomplex *, integer *);
+ logical rightv;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGGBAK forms the right or left eigenvectors of a complex generalized */
+/* eigenvalue problem A*x = lambda*B*x, by backward transformation on */
+/* the computed eigenvectors of the balanced pair of matrices output by */
+/* ZGGBAL. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies the type of backward transformation required: */
+/* = 'N': do nothing, return immediately; */
+/* = 'P': do backward transformation for permutation only; */
+/* = 'S': do backward transformation for scaling only; */
+/* = 'B': do backward transformations for both permutation and */
+/* scaling. */
+/* JOB must be the same as the argument JOB supplied to ZGGBAL. */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'R': V contains right eigenvectors; */
+/* = 'L': V contains left eigenvectors. */
+
+/* N (input) INTEGER */
+/* The number of rows of the matrix V. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* The integers ILO and IHI determined by ZGGBAL. */
+/* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */
+
+/* LSCALE (input) DOUBLE PRECISION array, dimension (N) */
+/* Details of the permutations and/or scaling factors applied */
+/* to the left side of A and B, as returned by ZGGBAL. */
+
+/* RSCALE (input) DOUBLE PRECISION array, dimension (N) */
+/* Details of the permutations and/or scaling factors applied */
+/* to the right side of A and B, as returned by ZGGBAL. */
+
+/* M (input) INTEGER */
+/* The number of columns of the matrix V. M >= 0. */
+
+/* V (input/output) COMPLEX*16 array, dimension (LDV,M) */
+/* On entry, the matrix of right or left eigenvectors to be */
+/* transformed, as returned by ZTGEVC. */
+/* On exit, V is overwritten by the transformed eigenvectors. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the matrix V. LDV >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* See R.C. Ward, Balancing the generalized eigenvalue problem, */
+/* SIAM J. Sci. Stat. Comp. 2 (1981), 141-152. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ --lscale;
+ --rscale;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+
+ /* Function Body */
+ rightv = lsame_(side, "R");
+ leftv = lsame_(side, "L");
+
+ *info = 0;
+ if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S")
+ && ! lsame_(job, "B")) {
+ *info = -1;
+ } else if (! rightv && ! leftv) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ilo < 1) {
+ *info = -4;
+ } else if (*n == 0 && *ihi == 0 && *ilo != 1) {
+ *info = -4;
+ } else if (*n > 0 && (*ihi < *ilo || *ihi > max(1,*n))) {
+ *info = -5;
+ } else if (*n == 0 && *ilo == 1 && *ihi != 0) {
+ *info = -5;
+ } else if (*m < 0) {
+ *info = -8;
+ } else if (*ldv < max(1,*n)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGGBAK", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*m == 0) {
+ return 0;
+ }
+ if (lsame_(job, "N")) {
+ return 0;
+ }
+
+ if (*ilo == *ihi) {
+ goto L30;
+ }
+
+/* Backward balance */
+
+ if (lsame_(job, "S") || lsame_(job, "B")) {
+
+/* Backward transformation on right eigenvectors */
+
+ if (rightv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ zdscal_(m, &rscale[i__], &v[i__ + v_dim1], ldv);
+/* L10: */
+ }
+ }
+
+/* Backward transformation on left eigenvectors */
+
+ if (leftv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ zdscal_(m, &lscale[i__], &v[i__ + v_dim1], ldv);
+/* L20: */
+ }
+ }
+ }
+
+/* Backward permutation */
+
+L30:
+ if (lsame_(job, "P") || lsame_(job, "B")) {
+
+/* Backward permutation on right eigenvectors */
+
+ if (rightv) {
+ if (*ilo == 1) {
+ goto L50;
+ }
+ for (i__ = *ilo - 1; i__ >= 1; --i__) {
+ k = (integer) rscale[i__];
+ if (k == i__) {
+ goto L40;
+ }
+ zswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L40:
+ ;
+ }
+
+L50:
+ if (*ihi == *n) {
+ goto L70;
+ }
+ i__1 = *n;
+ for (i__ = *ihi + 1; i__ <= i__1; ++i__) {
+ k = (integer) rscale[i__];
+ if (k == i__) {
+ goto L60;
+ }
+ zswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L60:
+ ;
+ }
+ }
+
+/* Backward permutation on left eigenvectors */
+
+L70:
+ if (leftv) {
+ if (*ilo == 1) {
+ goto L90;
+ }
+ for (i__ = *ilo - 1; i__ >= 1; --i__) {
+ k = (integer) lscale[i__];
+ if (k == i__) {
+ goto L80;
+ }
+ zswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L80:
+ ;
+ }
+
+L90:
+ if (*ihi == *n) {
+ goto L110;
+ }
+ i__1 = *n;
+ for (i__ = *ihi + 1; i__ <= i__1; ++i__) {
+ k = (integer) lscale[i__];
+ if (k == i__) {
+ goto L100;
+ }
+ zswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L100:
+ ;
+ }
+ }
+ }
+
+L110:
+
+ return 0;
+
+/* End of ZGGBAK */
+
+} /* zggbak_ */
diff --git a/contrib/libs/clapack/zggbal.c b/contrib/libs/clapack/zggbal.c
new file mode 100644
index 0000000000..f875672154
--- /dev/null
+++ b/contrib/libs/clapack/zggbal.c
@@ -0,0 +1,657 @@
+/* zggbal.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b36 = 10.;
+static doublereal c_b72 = .5;
+
+/* Subroutine */ int zggbal_(char *job, integer *n, doublecomplex *a, integer
+ *lda, doublecomplex *b, integer *ldb, integer *ilo, integer *ihi,
+ doublereal *lscale, doublereal *rscale, doublereal *work, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double d_lg10(doublereal *), d_imag(doublecomplex *), z_abs(doublecomplex
+ *), d_sign(doublereal *, doublereal *), pow_di(doublereal *,
+ integer *);
+
+ /* Local variables */
+ integer i__, j, k, l, m;
+ doublereal t;
+ integer jc;
+ doublereal ta, tb, tc;
+ integer ir;
+ doublereal ew;
+ integer it, nr, ip1, jp1, lm1;
+ doublereal cab, rab, ewc, cor, sum;
+ integer nrp2, icab, lcab;
+ doublereal beta, coef;
+ integer irab, lrab;
+ doublereal basl, cmax;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ doublereal coef2, coef5, gamma, alpha;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ doublereal sfmin, sfmax;
+ integer iflow;
+ extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *);
+ integer kount;
+ extern /* Subroutine */ int zswap_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal pgamma;
+ extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_(
+ integer *, doublereal *, doublecomplex *, integer *);
+ integer lsfmin;
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ integer lsfmax;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGGBAL balances a pair of general complex matrices (A,B). This */
+/* involves, first, permuting A and B by similarity transformations to */
+/* isolate eigenvalues in the first 1 to ILO$-$1 and last IHI+1 to N */
+/* elements on the diagonal; and second, applying a diagonal similarity */
+/* transformation to rows and columns ILO to IHI to make the rows */
+/* and columns as close in norm as possible. Both steps are optional. */
+
+/* Balancing may reduce the 1-norm of the matrices, and improve the */
+/* accuracy of the computed eigenvalues and/or eigenvectors in the */
+/* generalized eigenvalue problem A*x = lambda*B*x. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies the operations to be performed on A and B: */
+/* = 'N': none: simply set ILO = 1, IHI = N, LSCALE(I) = 1.0 */
+/* and RSCALE(I) = 1.0 for i=1,...,N; */
+/* = 'P': permute only; */
+/* = 'S': scale only; */
+/* = 'B': both permute and scale. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the input matrix A. */
+/* On exit, A is overwritten by the balanced matrix. */
+/* If JOB = 'N', A is not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,N) */
+/* On entry, the input matrix B. */
+/* On exit, B is overwritten by the balanced matrix. */
+/* If JOB = 'N', B is not referenced. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* ILO (output) INTEGER */
+/* IHI (output) INTEGER */
+/* ILO and IHI are set to integers such that on exit */
+/* A(i,j) = 0 and B(i,j) = 0 if i > j and */
+/* j = 1,...,ILO-1 or i = IHI+1,...,N. */
+/* If JOB = 'N' or 'S', ILO = 1 and IHI = N. */
+
+/* LSCALE (output) DOUBLE PRECISION array, dimension (N) */
+/* Details of the permutations and scaling factors applied */
+/* to the left side of A and B. If P(j) is the index of the */
+/* row interchanged with row j, and D(j) is the scaling factor */
+/* applied to row j, then */
+/* LSCALE(j) = P(j) for J = 1,...,ILO-1 */
+/* = D(j) for J = ILO,...,IHI */
+/* = P(j) for J = IHI+1,...,N. */
+/* The order in which the interchanges are made is N to IHI+1, */
+/* then 1 to ILO-1. */
+
+/* RSCALE (output) DOUBLE PRECISION array, dimension (N) */
+/* Details of the permutations and scaling factors applied */
+/* to the right side of A and B. If P(j) is the index of the */
+/* column interchanged with column j, and D(j) is the scaling */
+/* factor applied to column j, then */
+/* RSCALE(j) = P(j) for J = 1,...,ILO-1 */
+/* = D(j) for J = ILO,...,IHI */
+/* = P(j) for J = IHI+1,...,N. */
+/* The order in which the interchanges are made is N to IHI+1, */
+/* then 1 to ILO-1. */
+
+/* WORK (workspace) REAL array, dimension (lwork) */
+/* lwork must be at least max(1,6*N) when JOB = 'S' or 'B', and */
+/* at least 1 when JOB = 'N' or 'P'. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* See R.C. WARD, Balancing the generalized eigenvalue problem, */
+/* SIAM J. Sci. Stat. Comp. 2 (1981), 141-152. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --lscale;
+ --rscale;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S")
+ && ! lsame_(job, "B")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGGBAL", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *ilo = 1;
+ *ihi = *n;
+ return 0;
+ }
+
+ if (*n == 1) {
+ *ilo = 1;
+ *ihi = *n;
+ lscale[1] = 1.;
+ rscale[1] = 1.;
+ return 0;
+ }
+
+ if (lsame_(job, "N")) {
+ *ilo = 1;
+ *ihi = *n;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ lscale[i__] = 1.;
+ rscale[i__] = 1.;
+/* L10: */
+ }
+ return 0;
+ }
+
+ k = 1;
+ l = *n;
+ if (lsame_(job, "S")) {
+ goto L190;
+ }
+
+ goto L30;
+
+/* Permute the matrices A and B to isolate the eigenvalues. */
+
+/* Find row with one nonzero in columns 1 through L */
+
+L20:
+ l = lm1;
+ if (l != 1) {
+ goto L30;
+ }
+
+ rscale[1] = 1.;
+ lscale[1] = 1.;
+ goto L190;
+
+L30:
+ lm1 = l - 1;
+ for (i__ = l; i__ >= 1; --i__) {
+ i__1 = lm1;
+ for (j = 1; j <= i__1; ++j) {
+ jp1 = j + 1;
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__ + j * b_dim1;
+ if (a[i__2].r != 0. || a[i__2].i != 0. || (b[i__3].r != 0. || b[
+ i__3].i != 0.)) {
+ goto L50;
+ }
+/* L40: */
+ }
+ j = l;
+ goto L70;
+
+L50:
+ i__1 = l;
+ for (j = jp1; j <= i__1; ++j) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__ + j * b_dim1;
+ if (a[i__2].r != 0. || a[i__2].i != 0. || (b[i__3].r != 0. || b[
+ i__3].i != 0.)) {
+ goto L80;
+ }
+/* L60: */
+ }
+ j = jp1 - 1;
+
+L70:
+ m = l;
+ iflow = 1;
+ goto L160;
+L80:
+ ;
+ }
+ goto L100;
+
+/* Find column with one nonzero in rows K through N */
+
+L90:
+ ++k;
+
+L100:
+ i__1 = l;
+ for (j = k; j <= i__1; ++j) {
+ i__2 = lm1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ ip1 = i__ + 1;
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * b_dim1;
+ if (a[i__3].r != 0. || a[i__3].i != 0. || (b[i__4].r != 0. || b[
+ i__4].i != 0.)) {
+ goto L120;
+ }
+/* L110: */
+ }
+ i__ = l;
+ goto L140;
+L120:
+ i__2 = l;
+ for (i__ = ip1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * b_dim1;
+ if (a[i__3].r != 0. || a[i__3].i != 0. || (b[i__4].r != 0. || b[
+ i__4].i != 0.)) {
+ goto L150;
+ }
+/* L130: */
+ }
+ i__ = ip1 - 1;
+L140:
+ m = k;
+ iflow = 2;
+ goto L160;
+L150:
+ ;
+ }
+ goto L190;
+
+/* Permute rows M and I */
+
+L160:
+ lscale[m] = (doublereal) i__;
+ if (i__ == m) {
+ goto L170;
+ }
+ i__1 = *n - k + 1;
+ zswap_(&i__1, &a[i__ + k * a_dim1], lda, &a[m + k * a_dim1], lda);
+ i__1 = *n - k + 1;
+ zswap_(&i__1, &b[i__ + k * b_dim1], ldb, &b[m + k * b_dim1], ldb);
+
+/* Permute columns M and J */
+
+L170:
+ rscale[m] = (doublereal) j;
+ if (j == m) {
+ goto L180;
+ }
+ zswap_(&l, &a[j * a_dim1 + 1], &c__1, &a[m * a_dim1 + 1], &c__1);
+ zswap_(&l, &b[j * b_dim1 + 1], &c__1, &b[m * b_dim1 + 1], &c__1);
+
+L180:
+ switch (iflow) {
+ case 1: goto L20;
+ case 2: goto L90;
+ }
+
+L190:
+ *ilo = k;
+ *ihi = l;
+
+ if (lsame_(job, "P")) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ lscale[i__] = 1.;
+ rscale[i__] = 1.;
+/* L195: */
+ }
+ return 0;
+ }
+
+ if (*ilo == *ihi) {
+ return 0;
+ }
+
+/* Balance the submatrix in rows ILO to IHI. */
+
+ nr = *ihi - *ilo + 1;
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ rscale[i__] = 0.;
+ lscale[i__] = 0.;
+
+ work[i__] = 0.;
+ work[i__ + *n] = 0.;
+ work[i__ + (*n << 1)] = 0.;
+ work[i__ + *n * 3] = 0.;
+ work[i__ + (*n << 2)] = 0.;
+ work[i__ + *n * 5] = 0.;
+/* L200: */
+ }
+
+/* Compute right side vector in resulting linear equations */
+
+ basl = d_lg10(&c_b36);
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ i__2 = *ihi;
+ for (j = *ilo; j <= i__2; ++j) {
+ i__3 = i__ + j * a_dim1;
+ if (a[i__3].r == 0. && a[i__3].i == 0.) {
+ ta = 0.;
+ goto L210;
+ }
+ i__3 = i__ + j * a_dim1;
+ d__3 = (d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a[i__ + j *
+ a_dim1]), abs(d__2));
+ ta = d_lg10(&d__3) / basl;
+
+L210:
+ i__3 = i__ + j * b_dim1;
+ if (b[i__3].r == 0. && b[i__3].i == 0.) {
+ tb = 0.;
+ goto L220;
+ }
+ i__3 = i__ + j * b_dim1;
+ d__3 = (d__1 = b[i__3].r, abs(d__1)) + (d__2 = d_imag(&b[i__ + j *
+ b_dim1]), abs(d__2));
+ tb = d_lg10(&d__3) / basl;
+
+L220:
+ work[i__ + (*n << 2)] = work[i__ + (*n << 2)] - ta - tb;
+ work[j + *n * 5] = work[j + *n * 5] - ta - tb;
+/* L230: */
+ }
+/* L240: */
+ }
+
+ coef = 1. / (doublereal) (nr << 1);
+ coef2 = coef * coef;
+ coef5 = coef2 * .5;
+ nrp2 = nr + 2;
+ beta = 0.;
+ it = 1;
+
+/* Start generalized conjugate gradient iteration */
+
+L250:
+
+ gamma = ddot_(&nr, &work[*ilo + (*n << 2)], &c__1, &work[*ilo + (*n << 2)]
+, &c__1) + ddot_(&nr, &work[*ilo + *n * 5], &c__1, &work[*ilo + *
+ n * 5], &c__1);
+
+ ew = 0.;
+ ewc = 0.;
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ ew += work[i__ + (*n << 2)];
+ ewc += work[i__ + *n * 5];
+/* L260: */
+ }
+
+/* Computing 2nd power */
+ d__1 = ew;
+/* Computing 2nd power */
+ d__2 = ewc;
+/* Computing 2nd power */
+ d__3 = ew - ewc;
+ gamma = coef * gamma - coef2 * (d__1 * d__1 + d__2 * d__2) - coef5 * (
+ d__3 * d__3);
+ if (gamma == 0.) {
+ goto L350;
+ }
+ if (it != 1) {
+ beta = gamma / pgamma;
+ }
+ t = coef5 * (ewc - ew * 3.);
+ tc = coef5 * (ew - ewc * 3.);
+
+ dscal_(&nr, &beta, &work[*ilo], &c__1);
+ dscal_(&nr, &beta, &work[*ilo + *n], &c__1);
+
+ daxpy_(&nr, &coef, &work[*ilo + (*n << 2)], &c__1, &work[*ilo + *n], &
+ c__1);
+ daxpy_(&nr, &coef, &work[*ilo + *n * 5], &c__1, &work[*ilo], &c__1);
+
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ work[i__] += tc;
+ work[i__ + *n] += t;
+/* L270: */
+ }
+
+/* Apply matrix to vector */
+
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ kount = 0;
+ sum = 0.;
+ i__2 = *ihi;
+ for (j = *ilo; j <= i__2; ++j) {
+ i__3 = i__ + j * a_dim1;
+ if (a[i__3].r == 0. && a[i__3].i == 0.) {
+ goto L280;
+ }
+ ++kount;
+ sum += work[j];
+L280:
+ i__3 = i__ + j * b_dim1;
+ if (b[i__3].r == 0. && b[i__3].i == 0.) {
+ goto L290;
+ }
+ ++kount;
+ sum += work[j];
+L290:
+ ;
+ }
+ work[i__ + (*n << 1)] = (doublereal) kount * work[i__ + *n] + sum;
+/* L300: */
+ }
+
+ i__1 = *ihi;
+ for (j = *ilo; j <= i__1; ++j) {
+ kount = 0;
+ sum = 0.;
+ i__2 = *ihi;
+ for (i__ = *ilo; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ if (a[i__3].r == 0. && a[i__3].i == 0.) {
+ goto L310;
+ }
+ ++kount;
+ sum += work[i__ + *n];
+L310:
+ i__3 = i__ + j * b_dim1;
+ if (b[i__3].r == 0. && b[i__3].i == 0.) {
+ goto L320;
+ }
+ ++kount;
+ sum += work[i__ + *n];
+L320:
+ ;
+ }
+ work[j + *n * 3] = (doublereal) kount * work[j] + sum;
+/* L330: */
+ }
+
+ sum = ddot_(&nr, &work[*ilo + *n], &c__1, &work[*ilo + (*n << 1)], &c__1)
+ + ddot_(&nr, &work[*ilo], &c__1, &work[*ilo + *n * 3], &c__1);
+ alpha = gamma / sum;
+
+/* Determine correction to current iteration */
+
+ cmax = 0.;
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ cor = alpha * work[i__ + *n];
+ if (abs(cor) > cmax) {
+ cmax = abs(cor);
+ }
+ lscale[i__] += cor;
+ cor = alpha * work[i__];
+ if (abs(cor) > cmax) {
+ cmax = abs(cor);
+ }
+ rscale[i__] += cor;
+/* L340: */
+ }
+ if (cmax < .5) {
+ goto L350;
+ }
+
+ d__1 = -alpha;
+ daxpy_(&nr, &d__1, &work[*ilo + (*n << 1)], &c__1, &work[*ilo + (*n << 2)]
+, &c__1);
+ d__1 = -alpha;
+ daxpy_(&nr, &d__1, &work[*ilo + *n * 3], &c__1, &work[*ilo + *n * 5], &
+ c__1);
+
+ pgamma = gamma;
+ ++it;
+ if (it <= nrp2) {
+ goto L250;
+ }
+
+/* End generalized conjugate gradient iteration */
+
+L350:
+ sfmin = dlamch_("S");
+ sfmax = 1. / sfmin;
+ lsfmin = (integer) (d_lg10(&sfmin) / basl + 1.);
+ lsfmax = (integer) (d_lg10(&sfmax) / basl);
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ i__2 = *n - *ilo + 1;
+ irab = izamax_(&i__2, &a[i__ + *ilo * a_dim1], lda);
+ rab = z_abs(&a[i__ + (irab + *ilo - 1) * a_dim1]);
+ i__2 = *n - *ilo + 1;
+ irab = izamax_(&i__2, &b[i__ + *ilo * b_dim1], ldb);
+/* Computing MAX */
+ d__1 = rab, d__2 = z_abs(&b[i__ + (irab + *ilo - 1) * b_dim1]);
+ rab = max(d__1,d__2);
+ d__1 = rab + sfmin;
+ lrab = (integer) (d_lg10(&d__1) / basl + 1.);
+ ir = (integer) (lscale[i__] + d_sign(&c_b72, &lscale[i__]));
+/* Computing MIN */
+ i__2 = max(ir,lsfmin), i__2 = min(i__2,lsfmax), i__3 = lsfmax - lrab;
+ ir = min(i__2,i__3);
+ lscale[i__] = pow_di(&c_b36, &ir);
+ icab = izamax_(ihi, &a[i__ * a_dim1 + 1], &c__1);
+ cab = z_abs(&a[icab + i__ * a_dim1]);
+ icab = izamax_(ihi, &b[i__ * b_dim1 + 1], &c__1);
+/* Computing MAX */
+ d__1 = cab, d__2 = z_abs(&b[icab + i__ * b_dim1]);
+ cab = max(d__1,d__2);
+ d__1 = cab + sfmin;
+ lcab = (integer) (d_lg10(&d__1) / basl + 1.);
+ jc = (integer) (rscale[i__] + d_sign(&c_b72, &rscale[i__]));
+/* Computing MIN */
+ i__2 = max(jc,lsfmin), i__2 = min(i__2,lsfmax), i__3 = lsfmax - lcab;
+ jc = min(i__2,i__3);
+ rscale[i__] = pow_di(&c_b36, &jc);
+/* L360: */
+ }
+
+/* Row scaling of matrices A and B */
+
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ i__2 = *n - *ilo + 1;
+ zdscal_(&i__2, &lscale[i__], &a[i__ + *ilo * a_dim1], lda);
+ i__2 = *n - *ilo + 1;
+ zdscal_(&i__2, &lscale[i__], &b[i__ + *ilo * b_dim1], ldb);
+/* L370: */
+ }
+
+/* Column scaling of matrices A and B */
+
+ i__1 = *ihi;
+ for (j = *ilo; j <= i__1; ++j) {
+ zdscal_(ihi, &rscale[j], &a[j * a_dim1 + 1], &c__1);
+ zdscal_(ihi, &rscale[j], &b[j * b_dim1 + 1], &c__1);
+/* L380: */
+ }
+
+ return 0;
+
+/* End of ZGGBAL */
+
+} /* zggbal_ */
diff --git a/contrib/libs/clapack/zgges.c b/contrib/libs/clapack/zgges.c
new file mode 100644
index 0000000000..540ae2763f
--- /dev/null
+++ b/contrib/libs/clapack/zgges.c
@@ -0,0 +1,604 @@
+/* zgges.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zgges_(char *jobvsl, char *jobvsr, char *sort, L_fp
+ selctg, integer *n, doublecomplex *a, integer *lda, doublecomplex *b,
+ integer *ldb, integer *sdim, doublecomplex *alpha, doublecomplex *
+ beta, doublecomplex *vsl, integer *ldvsl, doublecomplex *vsr, integer
+ *ldvsr, doublecomplex *work, integer *lwork, doublereal *rwork,
+ logical *bwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vsl_dim1, vsl_offset,
+ vsr_dim1, vsr_offset, i__1, i__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ doublereal dif[2];
+ integer ihi, ilo;
+ doublereal eps, anrm, bnrm;
+ integer idum[1], ierr, itau, iwrk;
+ doublereal pvsl, pvsr;
+ extern logical lsame_(char *, char *);
+ integer ileft, icols;
+ logical cursl, ilvsl, ilvsr;
+ integer irwrk, irows;
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int zggbak_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublecomplex *,
+ integer *, integer *), zggbal_(char *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *
+, integer *, doublereal *, doublereal *, doublereal *, integer *);
+ logical ilascl, ilbscl;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ doublereal bignum;
+ integer ijobvl, iright;
+ extern /* Subroutine */ int zgghrd_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *
+), zlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublecomplex *,
+ integer *, integer *);
+ integer ijobvr;
+ extern /* Subroutine */ int zgeqrf_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *, integer *
+);
+ doublereal anrmto;
+ integer lwkmin;
+ logical lastsl;
+ doublereal bnrmto;
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *),
+ zlaset_(char *, integer *, integer *, doublecomplex *,
+ doublecomplex *, doublecomplex *, integer *), zhgeqz_(
+ char *, char *, char *, integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublereal *, integer *), ztgsen_(integer
+ *, logical *, logical *, logical *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ doublecomplex *, integer *, integer *, integer *, integer *);
+ doublereal smlnum;
+ logical wantst, lquery;
+ integer lwkopt;
+ extern /* Subroutine */ int zungqr_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *), zunmqr_(char *, char *, integer *, integer
+ *, integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+/* .. Function Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGGES computes for a pair of N-by-N complex nonsymmetric matrices */
+/* (A,B), the generalized eigenvalues, the generalized complex Schur */
+/* form (S, T), and optionally left and/or right Schur vectors (VSL */
+/* and VSR). This gives the generalized Schur factorization */
+
+/* (A,B) = ( (VSL)*S*(VSR)**H, (VSL)*T*(VSR)**H ) */
+
+/* where (VSR)**H is the conjugate-transpose of VSR. */
+
+/* Optionally, it also orders the eigenvalues so that a selected cluster */
+/* of eigenvalues appears in the leading diagonal blocks of the upper */
+/* triangular matrix S and the upper triangular matrix T. The leading */
+/* columns of VSL and VSR then form an unitary basis for the */
+/* corresponding left and right eigenspaces (deflating subspaces). */
+
+/* (If only the generalized eigenvalues are needed, use the driver */
+/* ZGGEV instead, which is faster.) */
+
+/* A generalized eigenvalue for a pair of matrices (A,B) is a scalar w */
+/* or a ratio alpha/beta = w, such that A - w*B is singular. It is */
+/* usually represented as the pair (alpha,beta), as there is a */
+/* reasonable interpretation for beta=0, and even for both being zero. */
+
+/* A pair of matrices (S,T) is in generalized complex Schur form if S */
+/* and T are upper triangular and, in addition, the diagonal elements */
+/* of T are non-negative real numbers. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVSL (input) CHARACTER*1 */
+/* = 'N': do not compute the left Schur vectors; */
+/* = 'V': compute the left Schur vectors. */
+
+/* JOBVSR (input) CHARACTER*1 */
+/* = 'N': do not compute the right Schur vectors; */
+/* = 'V': compute the right Schur vectors. */
+
+/* SORT (input) CHARACTER*1 */
+/* Specifies whether or not to order the eigenvalues on the */
+/* diagonal of the generalized Schur form. */
+/* = 'N': Eigenvalues are not ordered; */
+/* = 'S': Eigenvalues are ordered (see SELCTG). */
+
+/* SELCTG (external procedure) LOGICAL FUNCTION of two COMPLEX*16 arguments */
+/* SELCTG must be declared EXTERNAL in the calling subroutine. */
+/* If SORT = 'N', SELCTG is not referenced. */
+/* If SORT = 'S', SELCTG is used to select eigenvalues to sort */
+/* to the top left of the Schur form. */
+/* An eigenvalue ALPHA(j)/BETA(j) is selected if */
+/* SELCTG(ALPHA(j),BETA(j)) is true. */
+
+/* Note that a selected complex eigenvalue may no longer satisfy */
+/* SELCTG(ALPHA(j),BETA(j)) = .TRUE. after ordering, since */
+/* ordering may change the value of complex eigenvalues */
+/* (especially if the eigenvalue is ill-conditioned), in this */
+/* case INFO is set to N+2 (See INFO below). */
+
+/* N (input) INTEGER */
+/* The order of the matrices A, B, VSL, and VSR. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA, N) */
+/* On entry, the first of the pair of matrices. */
+/* On exit, A has been overwritten by its generalized Schur */
+/* form S. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB, N) */
+/* On entry, the second of the pair of matrices. */
+/* On exit, B has been overwritten by its generalized Schur */
+/* form T. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB >= max(1,N). */
+
+/* SDIM (output) INTEGER */
+/* If SORT = 'N', SDIM = 0. */
+/* If SORT = 'S', SDIM = number of eigenvalues (after sorting) */
+/* for which SELCTG is true. */
+
+/* ALPHA (output) COMPLEX*16 array, dimension (N) */
+/* BETA (output) COMPLEX*16 array, dimension (N) */
+/* On exit, ALPHA(j)/BETA(j), j=1,...,N, will be the */
+/* generalized eigenvalues. ALPHA(j), j=1,...,N and BETA(j), */
+/* j=1,...,N are the diagonals of the complex Schur form (A,B) */
+/* output by ZGGES. The BETA(j) will be non-negative real. */
+
+/* Note: the quotients ALPHA(j)/BETA(j) may easily over- or */
+/* underflow, and BETA(j) may even be zero. Thus, the user */
+/* should avoid naively computing the ratio alpha/beta. */
+/* However, ALPHA will be always less than and usually */
+/* comparable with norm(A) in magnitude, and BETA always less */
+/* than and usually comparable with norm(B). */
+
+/* VSL (output) COMPLEX*16 array, dimension (LDVSL,N) */
+/* If JOBVSL = 'V', VSL will contain the left Schur vectors. */
+/* Not referenced if JOBVSL = 'N'. */
+
+/* LDVSL (input) INTEGER */
+/* The leading dimension of the matrix VSL. LDVSL >= 1, and */
+/* if JOBVSL = 'V', LDVSL >= N. */
+
+/* VSR (output) COMPLEX*16 array, dimension (LDVSR,N) */
+/* If JOBVSR = 'V', VSR will contain the right Schur vectors. */
+/* Not referenced if JOBVSR = 'N'. */
+
+/* LDVSR (input) INTEGER */
+/* The leading dimension of the matrix VSR. LDVSR >= 1, and */
+/* if JOBVSR = 'V', LDVSR >= N. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,2*N). */
+/* For good performance, LWORK must generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (8*N) */
+
+/* BWORK (workspace) LOGICAL array, dimension (N) */
+/* Not referenced if SORT = 'N'. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* =1,...,N: */
+/* The QZ iteration failed. (A,B) are not in Schur */
+/* form, but ALPHA(j) and BETA(j) should be correct for */
+/* j=INFO+1,...,N. */
+/* > N: =N+1: other than QZ iteration failed in ZHGEQZ */
+/* =N+2: after reordering, roundoff changed values of */
+/* some complex eigenvalues so that leading */
+/* eigenvalues in the Generalized Schur form no */
+/* longer satisfy SELCTG=.TRUE. This could also */
+/* be caused due to scaling. */
+/* =N+3: reordering falied in ZTGSEN. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alpha;
+ --beta;
+ vsl_dim1 = *ldvsl;
+ vsl_offset = 1 + vsl_dim1;
+ vsl -= vsl_offset;
+ vsr_dim1 = *ldvsr;
+ vsr_offset = 1 + vsr_dim1;
+ vsr -= vsr_offset;
+ --work;
+ --rwork;
+ --bwork;
+
+ /* Function Body */
+ if (lsame_(jobvsl, "N")) {
+ ijobvl = 1;
+ ilvsl = FALSE_;
+ } else if (lsame_(jobvsl, "V")) {
+ ijobvl = 2;
+ ilvsl = TRUE_;
+ } else {
+ ijobvl = -1;
+ ilvsl = FALSE_;
+ }
+
+ if (lsame_(jobvsr, "N")) {
+ ijobvr = 1;
+ ilvsr = FALSE_;
+ } else if (lsame_(jobvsr, "V")) {
+ ijobvr = 2;
+ ilvsr = TRUE_;
+ } else {
+ ijobvr = -1;
+ ilvsr = FALSE_;
+ }
+
+ wantst = lsame_(sort, "S");
+
+/* Test the input arguments */
+
+ *info = 0;
+ lquery = *lwork == -1;
+ if (ijobvl <= 0) {
+ *info = -1;
+ } else if (ijobvr <= 0) {
+ *info = -2;
+ } else if (! wantst && ! lsame_(sort, "N")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldvsl < 1 || ilvsl && *ldvsl < *n) {
+ *info = -14;
+ } else if (*ldvsr < 1 || ilvsr && *ldvsr < *n) {
+ *info = -16;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV.) */
+
+ if (*info == 0) {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n << 1;
+ lwkmin = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = 1, i__2 = *n + *n * ilaenv_(&c__1, "ZGEQRF", " ", n, &c__1, n,
+ &c__0);
+ lwkopt = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = *n + *n * ilaenv_(&c__1, "ZUNMQR", " ", n, &
+ c__1, n, &c_n1);
+ lwkopt = max(i__1,i__2);
+ if (ilvsl) {
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = *n + *n * ilaenv_(&c__1, "ZUNGQR", " ", n, &
+ c__1, n, &c_n1);
+ lwkopt = max(i__1,i__2);
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ if (*lwork < lwkmin && ! lquery) {
+ *info = -18;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGGES ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *sdim = 0;
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("P");
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = zlange_("M", n, n, &a[a_offset], lda, &rwork[1]);
+ ilascl = FALSE_;
+ if (anrm > 0. && anrm < smlnum) {
+ anrmto = smlnum;
+ ilascl = TRUE_;
+ } else if (anrm > bignum) {
+ anrmto = bignum;
+ ilascl = TRUE_;
+ }
+
+ if (ilascl) {
+ zlascl_("G", &c__0, &c__0, &anrm, &anrmto, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Scale B if max element outside range [SMLNUM,BIGNUM] */
+
+ bnrm = zlange_("M", n, n, &b[b_offset], ldb, &rwork[1]);
+ ilbscl = FALSE_;
+ if (bnrm > 0. && bnrm < smlnum) {
+ bnrmto = smlnum;
+ ilbscl = TRUE_;
+ } else if (bnrm > bignum) {
+ bnrmto = bignum;
+ ilbscl = TRUE_;
+ }
+
+ if (ilbscl) {
+ zlascl_("G", &c__0, &c__0, &bnrm, &bnrmto, n, n, &b[b_offset], ldb, &
+ ierr);
+ }
+
+/* Permute the matrix to make it more nearly triangular */
+/* (Real Workspace: need 6*N) */
+
+ ileft = 1;
+ iright = *n + 1;
+ irwrk = iright + *n;
+ zggbal_("P", n, &a[a_offset], lda, &b[b_offset], ldb, &ilo, &ihi, &rwork[
+ ileft], &rwork[iright], &rwork[irwrk], &ierr);
+
+/* Reduce B to triangular form (QR decomposition of B) */
+/* (Complex Workspace: need N, prefer N*NB) */
+
+ irows = ihi + 1 - ilo;
+ icols = *n + 1 - ilo;
+ itau = 1;
+ iwrk = itau + irows;
+ i__1 = *lwork + 1 - iwrk;
+ zgeqrf_(&irows, &icols, &b[ilo + ilo * b_dim1], ldb, &work[itau], &work[
+ iwrk], &i__1, &ierr);
+
+/* Apply the orthogonal transformation to matrix A */
+/* (Complex Workspace: need N, prefer N*NB) */
+
+ i__1 = *lwork + 1 - iwrk;
+ zunmqr_("L", "C", &irows, &icols, &irows, &b[ilo + ilo * b_dim1], ldb, &
+ work[itau], &a[ilo + ilo * a_dim1], lda, &work[iwrk], &i__1, &
+ ierr);
+
+/* Initialize VSL */
+/* (Complex Workspace: need N, prefer N*NB) */
+
+ if (ilvsl) {
+ zlaset_("Full", n, n, &c_b1, &c_b2, &vsl[vsl_offset], ldvsl);
+ if (irows > 1) {
+ i__1 = irows - 1;
+ i__2 = irows - 1;
+ zlacpy_("L", &i__1, &i__2, &b[ilo + 1 + ilo * b_dim1], ldb, &vsl[
+ ilo + 1 + ilo * vsl_dim1], ldvsl);
+ }
+ i__1 = *lwork + 1 - iwrk;
+ zungqr_(&irows, &irows, &irows, &vsl[ilo + ilo * vsl_dim1], ldvsl, &
+ work[itau], &work[iwrk], &i__1, &ierr);
+ }
+
+/* Initialize VSR */
+
+ if (ilvsr) {
+ zlaset_("Full", n, n, &c_b1, &c_b2, &vsr[vsr_offset], ldvsr);
+ }
+
+/* Reduce to generalized Hessenberg form */
+/* (Workspace: none needed) */
+
+ zgghrd_(jobvsl, jobvsr, n, &ilo, &ihi, &a[a_offset], lda, &b[b_offset],
+ ldb, &vsl[vsl_offset], ldvsl, &vsr[vsr_offset], ldvsr, &ierr);
+
+ *sdim = 0;
+
+/* Perform QZ algorithm, computing Schur vectors if desired */
+/* (Complex Workspace: need N) */
+/* (Real Workspace: need N) */
+
+ iwrk = itau;
+ i__1 = *lwork + 1 - iwrk;
+ zhgeqz_("S", jobvsl, jobvsr, n, &ilo, &ihi, &a[a_offset], lda, &b[
+ b_offset], ldb, &alpha[1], &beta[1], &vsl[vsl_offset], ldvsl, &
+ vsr[vsr_offset], ldvsr, &work[iwrk], &i__1, &rwork[irwrk], &ierr);
+ if (ierr != 0) {
+ if (ierr > 0 && ierr <= *n) {
+ *info = ierr;
+ } else if (ierr > *n && ierr <= *n << 1) {
+ *info = ierr - *n;
+ } else {
+ *info = *n + 1;
+ }
+ goto L30;
+ }
+
+/* Sort eigenvalues ALPHA/BETA if desired */
+/* (Workspace: none needed) */
+
+ if (wantst) {
+
+/* Undo scaling on eigenvalues before selecting */
+
+ if (ilascl) {
+ zlascl_("G", &c__0, &c__0, &anrm, &anrmto, n, &c__1, &alpha[1], n,
+ &ierr);
+ }
+ if (ilbscl) {
+ zlascl_("G", &c__0, &c__0, &bnrm, &bnrmto, n, &c__1, &beta[1], n,
+ &ierr);
+ }
+
+/* Select eigenvalues */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ bwork[i__] = (*selctg)(&alpha[i__], &beta[i__]);
+/* L10: */
+ }
+
+ i__1 = *lwork - iwrk + 1;
+ ztgsen_(&c__0, &ilvsl, &ilvsr, &bwork[1], n, &a[a_offset], lda, &b[
+ b_offset], ldb, &alpha[1], &beta[1], &vsl[vsl_offset], ldvsl,
+ &vsr[vsr_offset], ldvsr, sdim, &pvsl, &pvsr, dif, &work[iwrk],
+ &i__1, idum, &c__1, &ierr);
+ if (ierr == 1) {
+ *info = *n + 3;
+ }
+
+ }
+
+/* Apply back-permutation to VSL and VSR */
+/* (Workspace: none needed) */
+
+ if (ilvsl) {
+ zggbak_("P", "L", n, &ilo, &ihi, &rwork[ileft], &rwork[iright], n, &
+ vsl[vsl_offset], ldvsl, &ierr);
+ }
+ if (ilvsr) {
+ zggbak_("P", "R", n, &ilo, &ihi, &rwork[ileft], &rwork[iright], n, &
+ vsr[vsr_offset], ldvsr, &ierr);
+ }
+
+/* Undo scaling */
+
+ if (ilascl) {
+ zlascl_("U", &c__0, &c__0, &anrmto, &anrm, n, n, &a[a_offset], lda, &
+ ierr);
+ zlascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alpha[1], n, &
+ ierr);
+ }
+
+ if (ilbscl) {
+ zlascl_("U", &c__0, &c__0, &bnrmto, &bnrm, n, n, &b[b_offset], ldb, &
+ ierr);
+ zlascl_("G", &c__0, &c__0, &bnrmto, &bnrm, n, &c__1, &beta[1], n, &
+ ierr);
+ }
+
+ if (wantst) {
+
+/* Check if reordering is correct */
+
+ lastsl = TRUE_;
+ *sdim = 0;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ cursl = (*selctg)(&alpha[i__], &beta[i__]);
+ if (cursl) {
+ ++(*sdim);
+ }
+ if (cursl && ! lastsl) {
+ *info = *n + 2;
+ }
+ lastsl = cursl;
+/* L20: */
+ }
+
+ }
+
+L30:
+
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZGGES */
+
+} /* zgges_ */
diff --git a/contrib/libs/clapack/zggesx.c b/contrib/libs/clapack/zggesx.c
new file mode 100644
index 0000000000..b3a5d43ac6
--- /dev/null
+++ b/contrib/libs/clapack/zggesx.c
@@ -0,0 +1,708 @@
+/* zggesx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zggesx_(char *jobvsl, char *jobvsr, char *sort, L_fp
+ selctg, char *sense, integer *n, doublecomplex *a, integer *lda,
+ doublecomplex *b, integer *ldb, integer *sdim, doublecomplex *alpha,
+ doublecomplex *beta, doublecomplex *vsl, integer *ldvsl,
+ doublecomplex *vsr, integer *ldvsr, doublereal *rconde, doublereal *
+ rcondv, doublecomplex *work, integer *lwork, doublereal *rwork,
+ integer *iwork, integer *liwork, logical *bwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vsl_dim1, vsl_offset,
+ vsr_dim1, vsr_offset, i__1, i__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ doublereal pl, pr, dif[2];
+ integer ihi, ilo;
+ doublereal eps;
+ integer ijob;
+ doublereal anrm, bnrm;
+ integer ierr, itau, iwrk, lwrk;
+ extern logical lsame_(char *, char *);
+ integer ileft, icols;
+ logical cursl, ilvsl, ilvsr;
+ integer irwrk, irows;
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int zggbak_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublecomplex *,
+ integer *, integer *), zggbal_(char *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *
+, integer *, doublereal *, doublereal *, doublereal *, integer *);
+ logical ilascl, ilbscl;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ doublereal bignum;
+ integer ijobvl, iright;
+ extern /* Subroutine */ int zgghrd_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *
+), zlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublecomplex *,
+ integer *, integer *);
+ integer ijobvr;
+ logical wantsb;
+ integer liwmin;
+ logical wantse, lastsl;
+ doublereal anrmto, bnrmto;
+ extern /* Subroutine */ int zgeqrf_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *, integer *
+);
+ integer maxwrk;
+ logical wantsn;
+ integer minwrk;
+ doublereal smlnum;
+ extern /* Subroutine */ int zhgeqz_(char *, char *, char *, integer *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *,
+ doublereal *, integer *), zlacpy_(char *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *
+, integer *), zlaset_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *);
+ logical wantst, lquery, wantsv;
+ extern /* Subroutine */ int ztgsen_(integer *, logical *, logical *,
+ logical *, integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *, integer *, integer *, doublereal *,
+ doublereal *, doublereal *, doublecomplex *, integer *, integer *,
+ integer *, integer *), zungqr_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *), zunmqr_(char *, char *, integer *, integer
+ *, integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+/* .. Function Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGGESX computes for a pair of N-by-N complex nonsymmetric matrices */
+/* (A,B), the generalized eigenvalues, the complex Schur form (S,T), */
+/* and, optionally, the left and/or right matrices of Schur vectors (VSL */
+/* and VSR). This gives the generalized Schur factorization */
+
+/* (A,B) = ( (VSL) S (VSR)**H, (VSL) T (VSR)**H ) */
+
+/* where (VSR)**H is the conjugate-transpose of VSR. */
+
+/* Optionally, it also orders the eigenvalues so that a selected cluster */
+/* of eigenvalues appears in the leading diagonal blocks of the upper */
+/* triangular matrix S and the upper triangular matrix T; computes */
+/* a reciprocal condition number for the average of the selected */
+/* eigenvalues (RCONDE); and computes a reciprocal condition number for */
+/* the right and left deflating subspaces corresponding to the selected */
+/* eigenvalues (RCONDV). The leading columns of VSL and VSR then form */
+/* an orthonormal basis for the corresponding left and right eigenspaces */
+/* (deflating subspaces). */
+
+/* A generalized eigenvalue for a pair of matrices (A,B) is a scalar w */
+/* or a ratio alpha/beta = w, such that A - w*B is singular. It is */
+/* usually represented as the pair (alpha,beta), as there is a */
+/* reasonable interpretation for beta=0 or for both being zero. */
+
+/* A pair of matrices (S,T) is in generalized complex Schur form if T is */
+/* upper triangular with non-negative diagonal and S is upper */
+/* triangular. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVSL (input) CHARACTER*1 */
+/* = 'N': do not compute the left Schur vectors; */
+/* = 'V': compute the left Schur vectors. */
+
+/* JOBVSR (input) CHARACTER*1 */
+/* = 'N': do not compute the right Schur vectors; */
+/* = 'V': compute the right Schur vectors. */
+
+/* SORT (input) CHARACTER*1 */
+/* Specifies whether or not to order the eigenvalues on the */
+/* diagonal of the generalized Schur form. */
+/* = 'N': Eigenvalues are not ordered; */
+/* = 'S': Eigenvalues are ordered (see SELCTG). */
+
+/* SELCTG (external procedure) LOGICAL FUNCTION of two COMPLEX*16 arguments */
+/* SELCTG must be declared EXTERNAL in the calling subroutine. */
+/* If SORT = 'N', SELCTG is not referenced. */
+/* If SORT = 'S', SELCTG is used to select eigenvalues to sort */
+/* to the top left of the Schur form. */
+/* Note that a selected complex eigenvalue may no longer satisfy */
+/* SELCTG(ALPHA(j),BETA(j)) = .TRUE. after ordering, since */
+/* ordering may change the value of complex eigenvalues */
+/* (especially if the eigenvalue is ill-conditioned), in this */
+/* case INFO is set to N+3 see INFO below). */
+
+/* SENSE (input) CHARACTER*1 */
+/* Determines which reciprocal condition numbers are computed. */
+/* = 'N' : None are computed; */
+/* = 'E' : Computed for average of selected eigenvalues only; */
+/* = 'V' : Computed for selected deflating subspaces only; */
+/* = 'B' : Computed for both. */
+/* If SENSE = 'E', 'V', or 'B', SORT must equal 'S'. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A, B, VSL, and VSR. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA, N) */
+/* On entry, the first of the pair of matrices. */
+/* On exit, A has been overwritten by its generalized Schur */
+/* form S. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB, N) */
+/* On entry, the second of the pair of matrices. */
+/* On exit, B has been overwritten by its generalized Schur */
+/* form T. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB >= max(1,N). */
+
+/* SDIM (output) INTEGER */
+/* If SORT = 'N', SDIM = 0. */
+/* If SORT = 'S', SDIM = number of eigenvalues (after sorting) */
+/* for which SELCTG is true. */
+
+/* ALPHA (output) COMPLEX*16 array, dimension (N) */
+/* BETA (output) COMPLEX*16 array, dimension (N) */
+/* On exit, ALPHA(j)/BETA(j), j=1,...,N, will be the */
+/* generalized eigenvalues. ALPHA(j) and BETA(j),j=1,...,N are */
+/* the diagonals of the complex Schur form (S,T). BETA(j) will */
+/* be non-negative real. */
+
+/* Note: the quotients ALPHA(j)/BETA(j) may easily over- or */
+/* underflow, and BETA(j) may even be zero. Thus, the user */
+/* should avoid naively computing the ratio alpha/beta. */
+/* However, ALPHA will be always less than and usually */
+/* comparable with norm(A) in magnitude, and BETA always less */
+/* than and usually comparable with norm(B). */
+
+/* VSL (output) COMPLEX*16 array, dimension (LDVSL,N) */
+/* If JOBVSL = 'V', VSL will contain the left Schur vectors. */
+/* Not referenced if JOBVSL = 'N'. */
+
+/* LDVSL (input) INTEGER */
+/* The leading dimension of the matrix VSL. LDVSL >=1, and */
+/* if JOBVSL = 'V', LDVSL >= N. */
+
+/* VSR (output) COMPLEX*16 array, dimension (LDVSR,N) */
+/* If JOBVSR = 'V', VSR will contain the right Schur vectors. */
+/* Not referenced if JOBVSR = 'N'. */
+
+/* LDVSR (input) INTEGER */
+/* The leading dimension of the matrix VSR. LDVSR >= 1, and */
+/* if JOBVSR = 'V', LDVSR >= N. */
+
+/* RCONDE (output) DOUBLE PRECISION array, dimension ( 2 ) */
+/* If SENSE = 'E' or 'B', RCONDE(1) and RCONDE(2) contain the */
+/* reciprocal condition numbers for the average of the selected */
+/* eigenvalues. */
+/* Not referenced if SENSE = 'N' or 'V'. */
+
+/* RCONDV (output) DOUBLE PRECISION array, dimension ( 2 ) */
+/* If SENSE = 'V' or 'B', RCONDV(1) and RCONDV(2) contain the */
+/* reciprocal condition number for the selected deflating */
+/* subspaces. */
+/* Not referenced if SENSE = 'N' or 'E'. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If N = 0, LWORK >= 1, else if SENSE = 'E', 'V', or 'B', */
+/* LWORK >= MAX(1,2*N,2*SDIM*(N-SDIM)), else */
+/* LWORK >= MAX(1,2*N). Note that 2*SDIM*(N-SDIM) <= N*N/2. */
+/* Note also that an error is only returned if */
+/* LWORK < MAX(1,2*N), but if SENSE = 'E' or 'V' or 'B' this may */
+/* not be large enough. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the bound on the optimal size of the WORK */
+/* array and the minimum size of the IWORK array, returns these */
+/* values as the first entries of the WORK and IWORK arrays, and */
+/* no error message related to LWORK or LIWORK is issued by */
+/* XERBLA. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension ( 8*N ) */
+/* Real workspace. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the minimum LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* If SENSE = 'N' or N = 0, LIWORK >= 1, otherwise */
+/* LIWORK >= N+2. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the bound on the optimal size of the */
+/* WORK array and the minimum size of the IWORK array, returns */
+/* these values as the first entries of the WORK and IWORK */
+/* arrays, and no error message related to LWORK or LIWORK is */
+/* issued by XERBLA. */
+
+/* BWORK (workspace) LOGICAL array, dimension (N) */
+/* Not referenced if SORT = 'N'. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1,...,N: */
+/* The QZ iteration failed. (A,B) are not in Schur */
+/* form, but ALPHA(j) and BETA(j) should be correct for */
+/* j=INFO+1,...,N. */
+/* > N: =N+1: other than QZ iteration failed in ZHGEQZ */
+/* =N+2: after reordering, roundoff changed values of */
+/* some complex eigenvalues so that leading */
+/* eigenvalues in the Generalized Schur form no */
+/* longer satisfy SELCTG=.TRUE. This could also */
+/* be caused due to scaling. */
+/* =N+3: reordering failed in ZTGSEN. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alpha;
+ --beta;
+ vsl_dim1 = *ldvsl;
+ vsl_offset = 1 + vsl_dim1;
+ vsl -= vsl_offset;
+ vsr_dim1 = *ldvsr;
+ vsr_offset = 1 + vsr_dim1;
+ vsr -= vsr_offset;
+ --rconde;
+ --rcondv;
+ --work;
+ --rwork;
+ --iwork;
+ --bwork;
+
+ /* Function Body */
+ if (lsame_(jobvsl, "N")) {
+ ijobvl = 1;
+ ilvsl = FALSE_;
+ } else if (lsame_(jobvsl, "V")) {
+ ijobvl = 2;
+ ilvsl = TRUE_;
+ } else {
+ ijobvl = -1;
+ ilvsl = FALSE_;
+ }
+
+ if (lsame_(jobvsr, "N")) {
+ ijobvr = 1;
+ ilvsr = FALSE_;
+ } else if (lsame_(jobvsr, "V")) {
+ ijobvr = 2;
+ ilvsr = TRUE_;
+ } else {
+ ijobvr = -1;
+ ilvsr = FALSE_;
+ }
+
+ wantst = lsame_(sort, "S");
+ wantsn = lsame_(sense, "N");
+ wantse = lsame_(sense, "E");
+ wantsv = lsame_(sense, "V");
+ wantsb = lsame_(sense, "B");
+ lquery = *lwork == -1 || *liwork == -1;
+ if (wantsn) {
+ ijob = 0;
+ } else if (wantse) {
+ ijob = 1;
+ } else if (wantsv) {
+ ijob = 2;
+ } else if (wantsb) {
+ ijob = 4;
+ }
+
+/* Test the input arguments */
+
+ *info = 0;
+ if (ijobvl <= 0) {
+ *info = -1;
+ } else if (ijobvr <= 0) {
+ *info = -2;
+ } else if (! wantst && ! lsame_(sort, "N")) {
+ *info = -3;
+ } else if (! (wantsn || wantse || wantsv || wantsb) || ! wantst && !
+ wantsn) {
+ *info = -5;
+ } else if (*n < 0) {
+ *info = -6;
+ } else if (*lda < max(1,*n)) {
+ *info = -8;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldvsl < 1 || ilvsl && *ldvsl < *n) {
+ *info = -15;
+ } else if (*ldvsr < 1 || ilvsr && *ldvsr < *n) {
+ *info = -17;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV.) */
+
+ if (*info == 0) {
+ if (*n > 0) {
+ minwrk = *n << 1;
+ maxwrk = *n * (ilaenv_(&c__1, "ZGEQRF", " ", n, &c__1, n, &c__0) + 1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * (ilaenv_(&c__1, "ZUNMQR", " ", n, &
+ c__1, n, &c_n1) + 1);
+ maxwrk = max(i__1,i__2);
+ if (ilvsl) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * (ilaenv_(&c__1, "ZUNGQR", " ", n, &
+ c__1, n, &c_n1) + 1);
+ maxwrk = max(i__1,i__2);
+ }
+ lwrk = maxwrk;
+ if (ijob >= 1) {
+/* Computing MAX */
+ i__1 = lwrk, i__2 = *n * *n / 2;
+ lwrk = max(i__1,i__2);
+ }
+ } else {
+ minwrk = 1;
+ maxwrk = 1;
+ lwrk = 1;
+ }
+ work[1].r = (doublereal) lwrk, work[1].i = 0.;
+ if (wantsn || *n == 0) {
+ liwmin = 1;
+ } else {
+ liwmin = *n + 2;
+ }
+ iwork[1] = liwmin;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -21;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -24;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGGESX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *sdim = 0;
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("P");
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = zlange_("M", n, n, &a[a_offset], lda, &rwork[1]);
+ ilascl = FALSE_;
+ if (anrm > 0. && anrm < smlnum) {
+ anrmto = smlnum;
+ ilascl = TRUE_;
+ } else if (anrm > bignum) {
+ anrmto = bignum;
+ ilascl = TRUE_;
+ }
+ if (ilascl) {
+ zlascl_("G", &c__0, &c__0, &anrm, &anrmto, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Scale B if max element outside range [SMLNUM,BIGNUM] */
+
+ bnrm = zlange_("M", n, n, &b[b_offset], ldb, &rwork[1]);
+ ilbscl = FALSE_;
+ if (bnrm > 0. && bnrm < smlnum) {
+ bnrmto = smlnum;
+ ilbscl = TRUE_;
+ } else if (bnrm > bignum) {
+ bnrmto = bignum;
+ ilbscl = TRUE_;
+ }
+ if (ilbscl) {
+ zlascl_("G", &c__0, &c__0, &bnrm, &bnrmto, n, n, &b[b_offset], ldb, &
+ ierr);
+ }
+
+/* Permute the matrix to make it more nearly triangular */
+/* (Real Workspace: need 6*N) */
+
+ ileft = 1;
+ iright = *n + 1;
+ irwrk = iright + *n;
+ zggbal_("P", n, &a[a_offset], lda, &b[b_offset], ldb, &ilo, &ihi, &rwork[
+ ileft], &rwork[iright], &rwork[irwrk], &ierr);
+
+/* Reduce B to triangular form (QR decomposition of B) */
+/* (Complex Workspace: need N, prefer N*NB) */
+
+ irows = ihi + 1 - ilo;
+ icols = *n + 1 - ilo;
+ itau = 1;
+ iwrk = itau + irows;
+ i__1 = *lwork + 1 - iwrk;
+ zgeqrf_(&irows, &icols, &b[ilo + ilo * b_dim1], ldb, &work[itau], &work[
+ iwrk], &i__1, &ierr);
+
+/* Apply the unitary transformation to matrix A */
+/* (Complex Workspace: need N, prefer N*NB) */
+
+ i__1 = *lwork + 1 - iwrk;
+ zunmqr_("L", "C", &irows, &icols, &irows, &b[ilo + ilo * b_dim1], ldb, &
+ work[itau], &a[ilo + ilo * a_dim1], lda, &work[iwrk], &i__1, &
+ ierr);
+
+/* Initialize VSL */
+/* (Complex Workspace: need N, prefer N*NB) */
+
+ if (ilvsl) {
+ zlaset_("Full", n, n, &c_b1, &c_b2, &vsl[vsl_offset], ldvsl);
+ if (irows > 1) {
+ i__1 = irows - 1;
+ i__2 = irows - 1;
+ zlacpy_("L", &i__1, &i__2, &b[ilo + 1 + ilo * b_dim1], ldb, &vsl[
+ ilo + 1 + ilo * vsl_dim1], ldvsl);
+ }
+ i__1 = *lwork + 1 - iwrk;
+ zungqr_(&irows, &irows, &irows, &vsl[ilo + ilo * vsl_dim1], ldvsl, &
+ work[itau], &work[iwrk], &i__1, &ierr);
+ }
+
+/* Initialize VSR */
+
+ if (ilvsr) {
+ zlaset_("Full", n, n, &c_b1, &c_b2, &vsr[vsr_offset], ldvsr);
+ }
+
+/* Reduce to generalized Hessenberg form */
+/* (Workspace: none needed) */
+
+ zgghrd_(jobvsl, jobvsr, n, &ilo, &ihi, &a[a_offset], lda, &b[b_offset],
+ ldb, &vsl[vsl_offset], ldvsl, &vsr[vsr_offset], ldvsr, &ierr);
+
+ *sdim = 0;
+
+/* Perform QZ algorithm, computing Schur vectors if desired */
+/* (Complex Workspace: need N) */
+/* (Real Workspace: need N) */
+
+ iwrk = itau;
+ i__1 = *lwork + 1 - iwrk;
+ zhgeqz_("S", jobvsl, jobvsr, n, &ilo, &ihi, &a[a_offset], lda, &b[
+ b_offset], ldb, &alpha[1], &beta[1], &vsl[vsl_offset], ldvsl, &
+ vsr[vsr_offset], ldvsr, &work[iwrk], &i__1, &rwork[irwrk], &ierr);
+ if (ierr != 0) {
+ if (ierr > 0 && ierr <= *n) {
+ *info = ierr;
+ } else if (ierr > *n && ierr <= *n << 1) {
+ *info = ierr - *n;
+ } else {
+ *info = *n + 1;
+ }
+ goto L40;
+ }
+
+/* Sort eigenvalues ALPHA/BETA and compute the reciprocal of */
+/* condition number(s) */
+
+ if (wantst) {
+
+/* Undo scaling on eigenvalues before SELCTGing */
+
+ if (ilascl) {
+ zlascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alpha[1], n,
+ &ierr);
+ }
+ if (ilbscl) {
+ zlascl_("G", &c__0, &c__0, &bnrmto, &bnrm, n, &c__1, &beta[1], n,
+ &ierr);
+ }
+
+/* Select eigenvalues */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ bwork[i__] = (*selctg)(&alpha[i__], &beta[i__]);
+/* L10: */
+ }
+
+/* Reorder eigenvalues, transform Generalized Schur vectors, and */
+/* compute reciprocal condition numbers */
+/* (Complex Workspace: If IJOB >= 1, need MAX(1, 2*SDIM*(N-SDIM)) */
+/* otherwise, need 1 ) */
+
+ i__1 = *lwork - iwrk + 1;
+ ztgsen_(&ijob, &ilvsl, &ilvsr, &bwork[1], n, &a[a_offset], lda, &b[
+ b_offset], ldb, &alpha[1], &beta[1], &vsl[vsl_offset], ldvsl,
+ &vsr[vsr_offset], ldvsr, sdim, &pl, &pr, dif, &work[iwrk], &
+ i__1, &iwork[1], liwork, &ierr);
+
+ if (ijob >= 1) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*sdim << 1) * (*n - *sdim);
+ maxwrk = max(i__1,i__2);
+ }
+ if (ierr == -21) {
+
+/* not enough complex workspace */
+
+ *info = -21;
+ } else {
+ if (ijob == 1 || ijob == 4) {
+ rconde[1] = pl;
+ rconde[2] = pr;
+ }
+ if (ijob == 2 || ijob == 4) {
+ rcondv[1] = dif[0];
+ rcondv[2] = dif[1];
+ }
+ if (ierr == 1) {
+ *info = *n + 3;
+ }
+ }
+
+ }
+
+/* Apply permutation to VSL and VSR */
+/* (Workspace: none needed) */
+
+ if (ilvsl) {
+ zggbak_("P", "L", n, &ilo, &ihi, &rwork[ileft], &rwork[iright], n, &
+ vsl[vsl_offset], ldvsl, &ierr);
+ }
+
+ if (ilvsr) {
+ zggbak_("P", "R", n, &ilo, &ihi, &rwork[ileft], &rwork[iright], n, &
+ vsr[vsr_offset], ldvsr, &ierr);
+ }
+
+/* Undo scaling */
+
+ if (ilascl) {
+ zlascl_("U", &c__0, &c__0, &anrmto, &anrm, n, n, &a[a_offset], lda, &
+ ierr);
+ zlascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alpha[1], n, &
+ ierr);
+ }
+
+ if (ilbscl) {
+ zlascl_("U", &c__0, &c__0, &bnrmto, &bnrm, n, n, &b[b_offset], ldb, &
+ ierr);
+ zlascl_("G", &c__0, &c__0, &bnrmto, &bnrm, n, &c__1, &beta[1], n, &
+ ierr);
+ }
+
+ if (wantst) {
+
+/* Check if reordering is correct */
+
+ lastsl = TRUE_;
+ *sdim = 0;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ cursl = (*selctg)(&alpha[i__], &beta[i__]);
+ if (cursl) {
+ ++(*sdim);
+ }
+ if (cursl && ! lastsl) {
+ *info = *n + 2;
+ }
+ lastsl = cursl;
+/* L30: */
+ }
+
+ }
+
+L40:
+
+ work[1].r = (doublereal) maxwrk, work[1].i = 0.;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of ZGGESX */
+
+} /* zggesx_ */
diff --git a/contrib/libs/clapack/zggev.c b/contrib/libs/clapack/zggev.c
new file mode 100644
index 0000000000..6adee6b888
--- /dev/null
+++ b/contrib/libs/clapack/zggev.c
@@ -0,0 +1,599 @@
+/* zggev.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+static integer c__0 = 0;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zggev_(char *jobvl, char *jobvr, integer *n,
+ doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb,
+ doublecomplex *alpha, doublecomplex *beta, doublecomplex *vl, integer
+ *ldvl, doublecomplex *vr, integer *ldvr, doublecomplex *work, integer
+ *lwork, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vl_dim1, vl_offset, vr_dim1,
+ vr_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer jc, in, jr, ihi, ilo;
+ doublereal eps;
+ logical ilv;
+ doublereal anrm, bnrm;
+ integer ierr, itau;
+ doublereal temp;
+ logical ilvl, ilvr;
+ integer iwrk;
+ extern logical lsame_(char *, char *);
+ integer ileft, icols, irwrk, irows;
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int zggbak_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublecomplex *,
+ integer *, integer *), zggbal_(char *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *
+, integer *, doublereal *, doublereal *, doublereal *, integer *);
+ logical ilascl, ilbscl;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ logical ldumma[1];
+ char chtemp[1];
+ doublereal bignum;
+ extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ integer ijobvl, iright;
+ extern /* Subroutine */ int zgghrd_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *
+), zlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublecomplex *,
+ integer *, integer *);
+ integer ijobvr;
+ extern /* Subroutine */ int zgeqrf_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *, integer *
+);
+ doublereal anrmto;
+ integer lwkmin;
+ doublereal bnrmto;
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *),
+ zlaset_(char *, integer *, integer *, doublecomplex *,
+ doublecomplex *, doublecomplex *, integer *), ztgevc_(
+ char *, char *, logical *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, integer *, integer *, doublecomplex *,
+ doublereal *, integer *), zhgeqz_(char *, char *,
+ char *, integer *, integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *, doublereal *, integer *);
+ doublereal smlnum;
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int zungqr_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *), zunmqr_(char *, char *, integer *, integer
+ *, integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGGEV computes for a pair of N-by-N complex nonsymmetric matrices */
+/* (A,B), the generalized eigenvalues, and optionally, the left and/or */
+/* right generalized eigenvectors. */
+
+/* A generalized eigenvalue for a pair of matrices (A,B) is a scalar */
+/* lambda or a ratio alpha/beta = lambda, such that A - lambda*B is */
+/* singular. It is usually represented as the pair (alpha,beta), as */
+/* there is a reasonable interpretation for beta=0, and even for both */
+/* being zero. */
+
+/* The right generalized eigenvector v(j) corresponding to the */
+/* generalized eigenvalue lambda(j) of (A,B) satisfies */
+
+/* A * v(j) = lambda(j) * B * v(j). */
+
+/* The left generalized eigenvector u(j) corresponding to the */
+/* generalized eigenvalues lambda(j) of (A,B) satisfies */
+
+/* u(j)**H * A = lambda(j) * u(j)**H * B */
+
+/* where u(j)**H is the conjugate-transpose of u(j). */
+
+/* Arguments */
+/* ========= */
+
+/* JOBVL (input) CHARACTER*1 */
+/* = 'N': do not compute the left generalized eigenvectors; */
+/* = 'V': compute the left generalized eigenvectors. */
+
+/* JOBVR (input) CHARACTER*1 */
+/* = 'N': do not compute the right generalized eigenvectors; */
+/* = 'V': compute the right generalized eigenvectors. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A, B, VL, and VR. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA, N) */
+/* On entry, the matrix A in the pair (A,B). */
+/* On exit, A has been overwritten. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB, N) */
+/* On entry, the matrix B in the pair (A,B). */
+/* On exit, B has been overwritten. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB >= max(1,N). */
+
+/* ALPHA (output) COMPLEX*16 array, dimension (N) */
+/* BETA (output) COMPLEX*16 array, dimension (N) */
+/* On exit, ALPHA(j)/BETA(j), j=1,...,N, will be the */
+/* generalized eigenvalues. */
+
+/* Note: the quotients ALPHA(j)/BETA(j) may easily over- or */
+/* underflow, and BETA(j) may even be zero. Thus, the user */
+/* should avoid naively computing the ratio alpha/beta. */
+/* However, ALPHA will be always less than and usually */
+/* comparable with norm(A) in magnitude, and BETA always less */
+/* than and usually comparable with norm(B). */
+
+/* VL (output) COMPLEX*16 array, dimension (LDVL,N) */
+/* If JOBVL = 'V', the left generalized eigenvectors u(j) are */
+/* stored one after another in the columns of VL, in the same */
+/* order as their eigenvalues. */
+/* Each eigenvector is scaled so the largest component has */
+/* abs(real part) + abs(imag. part) = 1. */
+/* Not referenced if JOBVL = 'N'. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the matrix VL. LDVL >= 1, and */
+/* if JOBVL = 'V', LDVL >= N. */
+
+/* VR (output) COMPLEX*16 array, dimension (LDVR,N) */
+/* If JOBVR = 'V', the right generalized eigenvectors v(j) are */
+/* stored one after another in the columns of VR, in the same */
+/* order as their eigenvalues. */
+/* Each eigenvector is scaled so the largest component has */
+/* abs(real part) + abs(imag. part) = 1. */
+/* Not referenced if JOBVR = 'N'. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the matrix VR. LDVR >= 1, and */
+/* if JOBVR = 'V', LDVR >= N. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,2*N). */
+/* For good performance, LWORK must generally be larger. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace/output) DOUBLE PRECISION array, dimension (8*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* =1,...,N: */
+/* The QZ iteration failed. No eigenvectors have been */
+/* calculated, but ALPHA(j) and BETA(j) should be */
+/* correct for j=INFO+1,...,N. */
+/* > N: =N+1: other then QZ iteration failed in DHGEQZ, */
+/* =N+2: error return from DTGEVC. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alpha;
+ --beta;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ if (lsame_(jobvl, "N")) {
+ ijobvl = 1;
+ ilvl = FALSE_;
+ } else if (lsame_(jobvl, "V")) {
+ ijobvl = 2;
+ ilvl = TRUE_;
+ } else {
+ ijobvl = -1;
+ ilvl = FALSE_;
+ }
+
+ if (lsame_(jobvr, "N")) {
+ ijobvr = 1;
+ ilvr = FALSE_;
+ } else if (lsame_(jobvr, "V")) {
+ ijobvr = 2;
+ ilvr = TRUE_;
+ } else {
+ ijobvr = -1;
+ ilvr = FALSE_;
+ }
+ ilv = ilvl || ilvr;
+
+/* Test the input arguments */
+
+ *info = 0;
+ lquery = *lwork == -1;
+ if (ijobvl <= 0) {
+ *info = -1;
+ } else if (ijobvr <= 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ } else if (*ldvl < 1 || ilvl && *ldvl < *n) {
+ *info = -11;
+ } else if (*ldvr < 1 || ilvr && *ldvr < *n) {
+ *info = -13;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV. The workspace is */
+/* computed assuming ILO = 1 and IHI = N, the worst case.) */
+
+ if (*info == 0) {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n << 1;
+ lwkmin = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = 1, i__2 = *n + *n * ilaenv_(&c__1, "ZGEQRF", " ", n, &c__1, n,
+ &c__0);
+ lwkopt = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = *n + *n * ilaenv_(&c__1, "ZUNMQR", " ", n, &
+ c__1, n, &c__0);
+ lwkopt = max(i__1,i__2);
+ if (ilvl) {
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = *n + *n * ilaenv_(&c__1, "ZUNGQR", " ", n, &
+ c__1, n, &c_n1);
+ lwkopt = max(i__1,i__2);
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ if (*lwork < lwkmin && ! lquery) {
+ *info = -15;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGGEV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("E") * dlamch_("B");
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = zlange_("M", n, n, &a[a_offset], lda, &rwork[1]);
+ ilascl = FALSE_;
+ if (anrm > 0. && anrm < smlnum) {
+ anrmto = smlnum;
+ ilascl = TRUE_;
+ } else if (anrm > bignum) {
+ anrmto = bignum;
+ ilascl = TRUE_;
+ }
+ if (ilascl) {
+ zlascl_("G", &c__0, &c__0, &anrm, &anrmto, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Scale B if max element outside range [SMLNUM,BIGNUM] */
+
+ bnrm = zlange_("M", n, n, &b[b_offset], ldb, &rwork[1]);
+ ilbscl = FALSE_;
+ if (bnrm > 0. && bnrm < smlnum) {
+ bnrmto = smlnum;
+ ilbscl = TRUE_;
+ } else if (bnrm > bignum) {
+ bnrmto = bignum;
+ ilbscl = TRUE_;
+ }
+ if (ilbscl) {
+ zlascl_("G", &c__0, &c__0, &bnrm, &bnrmto, n, n, &b[b_offset], ldb, &
+ ierr);
+ }
+
+/* Permute the matrices A, B to isolate eigenvalues if possible */
+/* (Real Workspace: need 6*N) */
+
+ ileft = 1;
+ iright = *n + 1;
+ irwrk = iright + *n;
+ zggbal_("P", n, &a[a_offset], lda, &b[b_offset], ldb, &ilo, &ihi, &rwork[
+ ileft], &rwork[iright], &rwork[irwrk], &ierr);
+
+/* Reduce B to triangular form (QR decomposition of B) */
+/* (Complex Workspace: need N, prefer N*NB) */
+
+ irows = ihi + 1 - ilo;
+ if (ilv) {
+ icols = *n + 1 - ilo;
+ } else {
+ icols = irows;
+ }
+ itau = 1;
+ iwrk = itau + irows;
+ i__1 = *lwork + 1 - iwrk;
+ zgeqrf_(&irows, &icols, &b[ilo + ilo * b_dim1], ldb, &work[itau], &work[
+ iwrk], &i__1, &ierr);
+
+/* Apply the orthogonal transformation to matrix A */
+/* (Complex Workspace: need N, prefer N*NB) */
+
+ i__1 = *lwork + 1 - iwrk;
+ zunmqr_("L", "C", &irows, &icols, &irows, &b[ilo + ilo * b_dim1], ldb, &
+ work[itau], &a[ilo + ilo * a_dim1], lda, &work[iwrk], &i__1, &
+ ierr);
+
+/* Initialize VL */
+/* (Complex Workspace: need N, prefer N*NB) */
+
+ if (ilvl) {
+ zlaset_("Full", n, n, &c_b1, &c_b2, &vl[vl_offset], ldvl);
+ if (irows > 1) {
+ i__1 = irows - 1;
+ i__2 = irows - 1;
+ zlacpy_("L", &i__1, &i__2, &b[ilo + 1 + ilo * b_dim1], ldb, &vl[
+ ilo + 1 + ilo * vl_dim1], ldvl);
+ }
+ i__1 = *lwork + 1 - iwrk;
+ zungqr_(&irows, &irows, &irows, &vl[ilo + ilo * vl_dim1], ldvl, &work[
+ itau], &work[iwrk], &i__1, &ierr);
+ }
+
+/* Initialize VR */
+
+ if (ilvr) {
+ zlaset_("Full", n, n, &c_b1, &c_b2, &vr[vr_offset], ldvr);
+ }
+
+/* Reduce to generalized Hessenberg form */
+
+ if (ilv) {
+
+/* Eigenvectors requested -- work on whole matrix. */
+
+ zgghrd_(jobvl, jobvr, n, &ilo, &ihi, &a[a_offset], lda, &b[b_offset],
+ ldb, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, &ierr);
+ } else {
+ zgghrd_("N", "N", &irows, &c__1, &irows, &a[ilo + ilo * a_dim1], lda,
+ &b[ilo + ilo * b_dim1], ldb, &vl[vl_offset], ldvl, &vr[
+ vr_offset], ldvr, &ierr);
+ }
+
+/* Perform QZ algorithm (Compute eigenvalues, and optionally, the */
+/* Schur form and Schur vectors) */
+/* (Complex Workspace: need N) */
+/* (Real Workspace: need N) */
+
+ iwrk = itau;
+ if (ilv) {
+ *(unsigned char *)chtemp = 'S';
+ } else {
+ *(unsigned char *)chtemp = 'E';
+ }
+ i__1 = *lwork + 1 - iwrk;
+ zhgeqz_(chtemp, jobvl, jobvr, n, &ilo, &ihi, &a[a_offset], lda, &b[
+ b_offset], ldb, &alpha[1], &beta[1], &vl[vl_offset], ldvl, &vr[
+ vr_offset], ldvr, &work[iwrk], &i__1, &rwork[irwrk], &ierr);
+ if (ierr != 0) {
+ if (ierr > 0 && ierr <= *n) {
+ *info = ierr;
+ } else if (ierr > *n && ierr <= *n << 1) {
+ *info = ierr - *n;
+ } else {
+ *info = *n + 1;
+ }
+ goto L70;
+ }
+
+/* Compute Eigenvectors */
+/* (Real Workspace: need 2*N) */
+/* (Complex Workspace: need 2*N) */
+
+ if (ilv) {
+ if (ilvl) {
+ if (ilvr) {
+ *(unsigned char *)chtemp = 'B';
+ } else {
+ *(unsigned char *)chtemp = 'L';
+ }
+ } else {
+ *(unsigned char *)chtemp = 'R';
+ }
+
+ ztgevc_(chtemp, "B", ldumma, n, &a[a_offset], lda, &b[b_offset], ldb,
+ &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, n, &in, &work[
+ iwrk], &rwork[irwrk], &ierr);
+ if (ierr != 0) {
+ *info = *n + 2;
+ goto L70;
+ }
+
+/* Undo balancing on VL and VR and normalization */
+/* (Workspace: none needed) */
+
+ if (ilvl) {
+ zggbak_("P", "L", n, &ilo, &ihi, &rwork[ileft], &rwork[iright], n,
+ &vl[vl_offset], ldvl, &ierr);
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ temp = 0.;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ i__3 = jr + jc * vl_dim1;
+ d__3 = temp, d__4 = (d__1 = vl[i__3].r, abs(d__1)) + (
+ d__2 = d_imag(&vl[jr + jc * vl_dim1]), abs(d__2));
+ temp = max(d__3,d__4);
+/* L10: */
+ }
+ if (temp < smlnum) {
+ goto L30;
+ }
+ temp = 1. / temp;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ i__3 = jr + jc * vl_dim1;
+ i__4 = jr + jc * vl_dim1;
+ z__1.r = temp * vl[i__4].r, z__1.i = temp * vl[i__4].i;
+ vl[i__3].r = z__1.r, vl[i__3].i = z__1.i;
+/* L20: */
+ }
+L30:
+ ;
+ }
+ }
+ if (ilvr) {
+ zggbak_("P", "R", n, &ilo, &ihi, &rwork[ileft], &rwork[iright], n,
+ &vr[vr_offset], ldvr, &ierr);
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ temp = 0.;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ i__3 = jr + jc * vr_dim1;
+ d__3 = temp, d__4 = (d__1 = vr[i__3].r, abs(d__1)) + (
+ d__2 = d_imag(&vr[jr + jc * vr_dim1]), abs(d__2));
+ temp = max(d__3,d__4);
+/* L40: */
+ }
+ if (temp < smlnum) {
+ goto L60;
+ }
+ temp = 1. / temp;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ i__3 = jr + jc * vr_dim1;
+ i__4 = jr + jc * vr_dim1;
+ z__1.r = temp * vr[i__4].r, z__1.i = temp * vr[i__4].i;
+ vr[i__3].r = z__1.r, vr[i__3].i = z__1.i;
+/* L50: */
+ }
+L60:
+ ;
+ }
+ }
+ }
+
+/* Undo scaling if necessary */
+
+ if (ilascl) {
+ zlascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alpha[1], n, &
+ ierr);
+ }
+
+ if (ilbscl) {
+ zlascl_("G", &c__0, &c__0, &bnrmto, &bnrm, n, &c__1, &beta[1], n, &
+ ierr);
+ }
+
+L70:
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZGGEV */
+
+} /* zggev_ */
diff --git a/contrib/libs/clapack/zggevx.c b/contrib/libs/clapack/zggevx.c
new file mode 100644
index 0000000000..b589a5a4ea
--- /dev/null
+++ b/contrib/libs/clapack/zggevx.c
@@ -0,0 +1,812 @@
+/* zggevx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+static integer c__0 = 0;
+
+/* Subroutine */ int zggevx_(char *balanc, char *jobvl, char *jobvr, char *
+ sense, integer *n, doublecomplex *a, integer *lda, doublecomplex *b,
+ integer *ldb, doublecomplex *alpha, doublecomplex *beta,
+ doublecomplex *vl, integer *ldvl, doublecomplex *vr, integer *ldvr,
+ integer *ilo, integer *ihi, doublereal *lscale, doublereal *rscale,
+ doublereal *abnrm, doublereal *bbnrm, doublereal *rconde, doublereal *
+ rcondv, doublecomplex *work, integer *lwork, doublereal *rwork,
+ integer *iwork, logical *bwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vl_dim1, vl_offset, vr_dim1,
+ vr_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, m, jc, in, jr;
+ doublereal eps;
+ logical ilv;
+ doublereal anrm, bnrm;
+ integer ierr, itau;
+ doublereal temp;
+ logical ilvl, ilvr;
+ integer iwrk, iwrk1;
+ extern logical lsame_(char *, char *);
+ integer icols;
+ logical noscl;
+ integer irows;
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), zggbak_(char *, char *, integer *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublecomplex *, integer *, integer *), zggbal_(
+ char *, integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *);
+ logical ilascl, ilbscl;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ logical ldumma[1];
+ char chtemp[1];
+ doublereal bignum;
+ extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ integer ijobvl;
+ extern /* Subroutine */ int zgghrd_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *
+), zlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublecomplex *,
+ integer *, integer *);
+ integer ijobvr;
+ logical wantsb;
+ extern /* Subroutine */ int zgeqrf_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *, integer *
+);
+ doublereal anrmto;
+ logical wantse;
+ doublereal bnrmto;
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *),
+ zlaset_(char *, integer *, integer *, doublecomplex *,
+ doublecomplex *, doublecomplex *, integer *), ztgevc_(
+ char *, char *, logical *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, integer *, integer *, doublecomplex *,
+ doublereal *, integer *), ztgsna_(char *, char *,
+ logical *, integer *, doublecomplex *, integer *, doublecomplex *
+, integer *, doublecomplex *, integer *, doublecomplex *, integer
+ *, doublereal *, doublereal *, integer *, integer *,
+ doublecomplex *, integer *, integer *, integer *);
+ integer minwrk;
+ extern /* Subroutine */ int zhgeqz_(char *, char *, char *, integer *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *,
+ doublereal *, integer *);
+ integer maxwrk;
+ logical wantsn;
+ doublereal smlnum;
+ logical lquery, wantsv;
+ extern /* Subroutine */ int zungqr_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *), zunmqr_(char *, char *, integer *, integer
+ *, integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGGEVX computes for a pair of N-by-N complex nonsymmetric matrices */
+/* (A,B) the generalized eigenvalues, and optionally, the left and/or */
+/* right generalized eigenvectors. */
+
+/* Optionally, it also computes a balancing transformation to improve */
+/* the conditioning of the eigenvalues and eigenvectors (ILO, IHI, */
+/* LSCALE, RSCALE, ABNRM, and BBNRM), reciprocal condition numbers for */
+/* the eigenvalues (RCONDE), and reciprocal condition numbers for the */
+/* right eigenvectors (RCONDV). */
+
+/* A generalized eigenvalue for a pair of matrices (A,B) is a scalar */
+/* lambda or a ratio alpha/beta = lambda, such that A - lambda*B is */
+/* singular. It is usually represented as the pair (alpha,beta), as */
+/* there is a reasonable interpretation for beta=0, and even for both */
+/* being zero. */
+
+/* The right eigenvector v(j) corresponding to the eigenvalue lambda(j) */
+/* of (A,B) satisfies */
+/* A * v(j) = lambda(j) * B * v(j) . */
+/* The left eigenvector u(j) corresponding to the eigenvalue lambda(j) */
+/* of (A,B) satisfies */
+/* u(j)**H * A = lambda(j) * u(j)**H * B. */
+/* where u(j)**H is the conjugate-transpose of u(j). */
+
+
+/* Arguments */
+/* ========= */
+
+/* BALANC (input) CHARACTER*1 */
+/* Specifies the balance option to be performed: */
+/* = 'N': do not diagonally scale or permute; */
+/* = 'P': permute only; */
+/* = 'S': scale only; */
+/* = 'B': both permute and scale. */
+/* Computed reciprocal condition numbers will be for the */
+/* matrices after permuting and/or balancing. Permuting does */
+/* not change condition numbers (in exact arithmetic), but */
+/* balancing does. */
+
+/* JOBVL (input) CHARACTER*1 */
+/* = 'N': do not compute the left generalized eigenvectors; */
+/* = 'V': compute the left generalized eigenvectors. */
+
+/* JOBVR (input) CHARACTER*1 */
+/* = 'N': do not compute the right generalized eigenvectors; */
+/* = 'V': compute the right generalized eigenvectors. */
+
+/* SENSE (input) CHARACTER*1 */
+/* Determines which reciprocal condition numbers are computed. */
+/* = 'N': none are computed; */
+/* = 'E': computed for eigenvalues only; */
+/* = 'V': computed for eigenvectors only; */
+/* = 'B': computed for eigenvalues and eigenvectors. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A, B, VL, and VR. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA, N) */
+/* On entry, the matrix A in the pair (A,B). */
+/* On exit, A has been overwritten. If JOBVL='V' or JOBVR='V' */
+/* or both, then A contains the first part of the complex Schur */
+/* form of the "balanced" versions of the input A and B. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB, N) */
+/* On entry, the matrix B in the pair (A,B). */
+/* On exit, B has been overwritten. If JOBVL='V' or JOBVR='V' */
+/* or both, then B contains the second part of the complex */
+/* Schur form of the "balanced" versions of the input A and B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB >= max(1,N). */
+
+/* ALPHA (output) COMPLEX*16 array, dimension (N) */
+/* BETA (output) COMPLEX*16 array, dimension (N) */
+/* On exit, ALPHA(j)/BETA(j), j=1,...,N, will be the generalized */
+/* eigenvalues. */
+
+/* Note: the quotient ALPHA(j)/BETA(j) ) may easily over- or */
+/* underflow, and BETA(j) may even be zero. Thus, the user */
+/* should avoid naively computing the ratio ALPHA/BETA. */
+/* However, ALPHA will be always less than and usually */
+/* comparable with norm(A) in magnitude, and BETA always less */
+/* than and usually comparable with norm(B). */
+
+/* VL (output) COMPLEX*16 array, dimension (LDVL,N) */
+/* If JOBVL = 'V', the left generalized eigenvectors u(j) are */
+/* stored one after another in the columns of VL, in the same */
+/* order as their eigenvalues. */
+/* Each eigenvector will be scaled so the largest component */
+/* will have abs(real part) + abs(imag. part) = 1. */
+/* Not referenced if JOBVL = 'N'. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the matrix VL. LDVL >= 1, and */
+/* if JOBVL = 'V', LDVL >= N. */
+
+/* VR (output) COMPLEX*16 array, dimension (LDVR,N) */
+/* If JOBVR = 'V', the right generalized eigenvectors v(j) are */
+/* stored one after another in the columns of VR, in the same */
+/* order as their eigenvalues. */
+/* Each eigenvector will be scaled so the largest component */
+/* will have abs(real part) + abs(imag. part) = 1. */
+/* Not referenced if JOBVR = 'N'. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the matrix VR. LDVR >= 1, and */
+/* if JOBVR = 'V', LDVR >= N. */
+
+/* ILO (output) INTEGER */
+/* IHI (output) INTEGER */
+/* ILO and IHI are integer values such that on exit */
+/* A(i,j) = 0 and B(i,j) = 0 if i > j and */
+/* j = 1,...,ILO-1 or i = IHI+1,...,N. */
+/* If BALANC = 'N' or 'S', ILO = 1 and IHI = N. */
+
+/* LSCALE (output) DOUBLE PRECISION array, dimension (N) */
+/* Details of the permutations and scaling factors applied */
+/* to the left side of A and B. If PL(j) is the index of the */
+/* row interchanged with row j, and DL(j) is the scaling */
+/* factor applied to row j, then */
+/* LSCALE(j) = PL(j) for j = 1,...,ILO-1 */
+/* = DL(j) for j = ILO,...,IHI */
+/* = PL(j) for j = IHI+1,...,N. */
+/* The order in which the interchanges are made is N to IHI+1, */
+/* then 1 to ILO-1. */
+
+/* RSCALE (output) DOUBLE PRECISION array, dimension (N) */
+/* Details of the permutations and scaling factors applied */
+/* to the right side of A and B. If PR(j) is the index of the */
+/* column interchanged with column j, and DR(j) is the scaling */
+/* factor applied to column j, then */
+/* RSCALE(j) = PR(j) for j = 1,...,ILO-1 */
+/* = DR(j) for j = ILO,...,IHI */
+/* = PR(j) for j = IHI+1,...,N */
+/* The order in which the interchanges are made is N to IHI+1, */
+/* then 1 to ILO-1. */
+
+/* ABNRM (output) DOUBLE PRECISION */
+/* The one-norm of the balanced matrix A. */
+
+/* BBNRM (output) DOUBLE PRECISION */
+/* The one-norm of the balanced matrix B. */
+
+/* RCONDE (output) DOUBLE PRECISION array, dimension (N) */
+/* If SENSE = 'E' or 'B', the reciprocal condition numbers of */
+/* the eigenvalues, stored in consecutive elements of the array. */
+/* If SENSE = 'N' or 'V', RCONDE is not referenced. */
+
+/* RCONDV (output) DOUBLE PRECISION array, dimension (N) */
+/* If JOB = 'V' or 'B', the estimated reciprocal condition */
+/* numbers of the eigenvectors, stored in consecutive elements */
+/* of the array. If the eigenvalues cannot be reordered to */
+/* compute RCONDV(j), RCONDV(j) is set to 0; this can only occur */
+/* when the true value would be very small anyway. */
+/* If SENSE = 'N' or 'E', RCONDV is not referenced. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,2*N). */
+/* If SENSE = 'E', LWORK >= max(1,4*N). */
+/* If SENSE = 'V' or 'B', LWORK >= max(1,2*N*N+2*N). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) REAL array, dimension (lrwork) */
+/* lrwork must be at least max(1,6*N) if BALANC = 'S' or 'B', */
+/* and at least max(1,2*N) otherwise. */
+/* Real workspace. */
+
+/* IWORK (workspace) INTEGER array, dimension (N+2) */
+/* If SENSE = 'E', IWORK is not referenced. */
+
+/* BWORK (workspace) LOGICAL array, dimension (N) */
+/* If SENSE = 'N', BWORK is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1,...,N: */
+/* The QZ iteration failed. No eigenvectors have been */
+/* calculated, but ALPHA(j) and BETA(j) should be correct */
+/* for j=INFO+1,...,N. */
+/* > N: =N+1: other than QZ iteration failed in ZHGEQZ. */
+/* =N+2: error return from ZTGEVC. */
+
+/* Further Details */
+/* =============== */
+
+/* Balancing a matrix pair (A,B) includes, first, permuting rows and */
+/* columns to isolate eigenvalues, second, applying diagonal similarity */
+/* transformation to the rows and columns to make the rows and columns */
+/* as close in norm as possible. The computed reciprocal condition */
+/* numbers correspond to the balanced matrix. Permuting rows and columns */
+/* will not change the condition numbers (in exact arithmetic) but */
+/* diagonal scaling will. For further explanation of balancing, see */
+/* section 4.11.1.2 of LAPACK Users' Guide. */
+
+/* An approximate error bound on the chordal distance between the i-th */
+/* computed generalized eigenvalue w and the corresponding exact */
+/* eigenvalue lambda is */
+
+/* chord(w, lambda) <= EPS * norm(ABNRM, BBNRM) / RCONDE(I) */
+
+/* An approximate error bound for the angle between the i-th computed */
+/* eigenvector VL(i) or VR(i) is given by */
+
+/* EPS * norm(ABNRM, BBNRM) / DIF(i). */
+
+/* For further explanation of the reciprocal condition numbers RCONDE */
+/* and RCONDV, see section 4.11 of LAPACK User's Guide. */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alpha;
+ --beta;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --lscale;
+ --rscale;
+ --rconde;
+ --rcondv;
+ --work;
+ --rwork;
+ --iwork;
+ --bwork;
+
+ /* Function Body */
+ if (lsame_(jobvl, "N")) {
+ ijobvl = 1;
+ ilvl = FALSE_;
+ } else if (lsame_(jobvl, "V")) {
+ ijobvl = 2;
+ ilvl = TRUE_;
+ } else {
+ ijobvl = -1;
+ ilvl = FALSE_;
+ }
+
+ if (lsame_(jobvr, "N")) {
+ ijobvr = 1;
+ ilvr = FALSE_;
+ } else if (lsame_(jobvr, "V")) {
+ ijobvr = 2;
+ ilvr = TRUE_;
+ } else {
+ ijobvr = -1;
+ ilvr = FALSE_;
+ }
+ ilv = ilvl || ilvr;
+
+ noscl = lsame_(balanc, "N") || lsame_(balanc, "P");
+ wantsn = lsame_(sense, "N");
+ wantse = lsame_(sense, "E");
+ wantsv = lsame_(sense, "V");
+ wantsb = lsame_(sense, "B");
+
+/* Test the input arguments */
+
+ *info = 0;
+ lquery = *lwork == -1;
+ if (! (noscl || lsame_(balanc, "S") || lsame_(
+ balanc, "B"))) {
+ *info = -1;
+ } else if (ijobvl <= 0) {
+ *info = -2;
+ } else if (ijobvr <= 0) {
+ *info = -3;
+ } else if (! (wantsn || wantse || wantsb || wantsv)) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldvl < 1 || ilvl && *ldvl < *n) {
+ *info = -13;
+ } else if (*ldvr < 1 || ilvr && *ldvr < *n) {
+ *info = -15;
+ }
+
+/* Compute workspace */
+/* (Note: Comments in the code beginning "Workspace:" describe the */
+/* minimal amount of workspace needed at that point in the code, */
+/* as well as the preferred amount for good performance. */
+/* NB refers to the optimal block size for the immediately */
+/* following subroutine, as returned by ILAENV. The workspace is */
+/* computed assuming ILO = 1 and IHI = N, the worst case.) */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ } else {
+ minwrk = *n << 1;
+ if (wantse) {
+ minwrk = *n << 2;
+ } else if (wantsv || wantsb) {
+ minwrk = (*n << 1) * (*n + 1);
+ }
+ maxwrk = minwrk;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "ZGEQRF", " ", n, &
+ c__1, n, &c__0);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "ZUNMQR", " ", n, &
+ c__1, n, &c__0);
+ maxwrk = max(i__1,i__2);
+ if (ilvl) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "ZUNGQR",
+ " ", n, &c__1, n, &c__0);
+ maxwrk = max(i__1,i__2);
+ }
+ }
+ work[1].r = (doublereal) maxwrk, work[1].i = 0.;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -25;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGGEVX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("P");
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = zlange_("M", n, n, &a[a_offset], lda, &rwork[1]);
+ ilascl = FALSE_;
+ if (anrm > 0. && anrm < smlnum) {
+ anrmto = smlnum;
+ ilascl = TRUE_;
+ } else if (anrm > bignum) {
+ anrmto = bignum;
+ ilascl = TRUE_;
+ }
+ if (ilascl) {
+ zlascl_("G", &c__0, &c__0, &anrm, &anrmto, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/* Scale B if max element outside range [SMLNUM,BIGNUM] */
+
+ bnrm = zlange_("M", n, n, &b[b_offset], ldb, &rwork[1]);
+ ilbscl = FALSE_;
+ if (bnrm > 0. && bnrm < smlnum) {
+ bnrmto = smlnum;
+ ilbscl = TRUE_;
+ } else if (bnrm > bignum) {
+ bnrmto = bignum;
+ ilbscl = TRUE_;
+ }
+ if (ilbscl) {
+ zlascl_("G", &c__0, &c__0, &bnrm, &bnrmto, n, n, &b[b_offset], ldb, &
+ ierr);
+ }
+
+/* Permute and/or balance the matrix pair (A,B) */
+/* (Real Workspace: need 6*N if BALANC = 'S' or 'B', 1 otherwise) */
+
+ zggbal_(balanc, n, &a[a_offset], lda, &b[b_offset], ldb, ilo, ihi, &
+ lscale[1], &rscale[1], &rwork[1], &ierr);
+
+/* Compute ABNRM and BBNRM */
+
+ *abnrm = zlange_("1", n, n, &a[a_offset], lda, &rwork[1]);
+ if (ilascl) {
+ rwork[1] = *abnrm;
+ dlascl_("G", &c__0, &c__0, &anrmto, &anrm, &c__1, &c__1, &rwork[1], &
+ c__1, &ierr);
+ *abnrm = rwork[1];
+ }
+
+ *bbnrm = zlange_("1", n, n, &b[b_offset], ldb, &rwork[1]);
+ if (ilbscl) {
+ rwork[1] = *bbnrm;
+ dlascl_("G", &c__0, &c__0, &bnrmto, &bnrm, &c__1, &c__1, &rwork[1], &
+ c__1, &ierr);
+ *bbnrm = rwork[1];
+ }
+
+/* Reduce B to triangular form (QR decomposition of B) */
+/* (Complex Workspace: need N, prefer N*NB ) */
+
+ irows = *ihi + 1 - *ilo;
+ if (ilv || ! wantsn) {
+ icols = *n + 1 - *ilo;
+ } else {
+ icols = irows;
+ }
+ itau = 1;
+ iwrk = itau + irows;
+ i__1 = *lwork + 1 - iwrk;
+ zgeqrf_(&irows, &icols, &b[*ilo + *ilo * b_dim1], ldb, &work[itau], &work[
+ iwrk], &i__1, &ierr);
+
+/* Apply the unitary transformation to A */
+/* (Complex Workspace: need N, prefer N*NB) */
+
+ i__1 = *lwork + 1 - iwrk;
+ zunmqr_("L", "C", &irows, &icols, &irows, &b[*ilo + *ilo * b_dim1], ldb, &
+ work[itau], &a[*ilo + *ilo * a_dim1], lda, &work[iwrk], &i__1, &
+ ierr);
+
+/* Initialize VL and/or VR */
+/* (Workspace: need N, prefer N*NB) */
+
+ if (ilvl) {
+ zlaset_("Full", n, n, &c_b1, &c_b2, &vl[vl_offset], ldvl);
+ if (irows > 1) {
+ i__1 = irows - 1;
+ i__2 = irows - 1;
+ zlacpy_("L", &i__1, &i__2, &b[*ilo + 1 + *ilo * b_dim1], ldb, &vl[
+ *ilo + 1 + *ilo * vl_dim1], ldvl);
+ }
+ i__1 = *lwork + 1 - iwrk;
+ zungqr_(&irows, &irows, &irows, &vl[*ilo + *ilo * vl_dim1], ldvl, &
+ work[itau], &work[iwrk], &i__1, &ierr);
+ }
+
+ if (ilvr) {
+ zlaset_("Full", n, n, &c_b1, &c_b2, &vr[vr_offset], ldvr);
+ }
+
+/* Reduce to generalized Hessenberg form */
+/* (Workspace: none needed) */
+
+ if (ilv || ! wantsn) {
+
+/* Eigenvectors requested -- work on whole matrix. */
+
+ zgghrd_(jobvl, jobvr, n, ilo, ihi, &a[a_offset], lda, &b[b_offset],
+ ldb, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, &ierr);
+ } else {
+ zgghrd_("N", "N", &irows, &c__1, &irows, &a[*ilo + *ilo * a_dim1],
+ lda, &b[*ilo + *ilo * b_dim1], ldb, &vl[vl_offset], ldvl, &vr[
+ vr_offset], ldvr, &ierr);
+ }
+
+/* Perform QZ algorithm (Compute eigenvalues, and optionally, the */
+/* Schur forms and Schur vectors) */
+/* (Complex Workspace: need N) */
+/* (Real Workspace: need N) */
+
+ iwrk = itau;
+ if (ilv || ! wantsn) {
+ *(unsigned char *)chtemp = 'S';
+ } else {
+ *(unsigned char *)chtemp = 'E';
+ }
+
+ i__1 = *lwork + 1 - iwrk;
+ zhgeqz_(chtemp, jobvl, jobvr, n, ilo, ihi, &a[a_offset], lda, &b[b_offset]
+, ldb, &alpha[1], &beta[1], &vl[vl_offset], ldvl, &vr[vr_offset],
+ ldvr, &work[iwrk], &i__1, &rwork[1], &ierr);
+ if (ierr != 0) {
+ if (ierr > 0 && ierr <= *n) {
+ *info = ierr;
+ } else if (ierr > *n && ierr <= *n << 1) {
+ *info = ierr - *n;
+ } else {
+ *info = *n + 1;
+ }
+ goto L90;
+ }
+
+/* Compute Eigenvectors and estimate condition numbers if desired */
+/* ZTGEVC: (Complex Workspace: need 2*N ) */
+/* (Real Workspace: need 2*N ) */
+/* ZTGSNA: (Complex Workspace: need 2*N*N if SENSE='V' or 'B') */
+/* (Integer Workspace: need N+2 ) */
+
+ if (ilv || ! wantsn) {
+ if (ilv) {
+ if (ilvl) {
+ if (ilvr) {
+ *(unsigned char *)chtemp = 'B';
+ } else {
+ *(unsigned char *)chtemp = 'L';
+ }
+ } else {
+ *(unsigned char *)chtemp = 'R';
+ }
+
+ ztgevc_(chtemp, "B", ldumma, n, &a[a_offset], lda, &b[b_offset],
+ ldb, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, n, &in, &
+ work[iwrk], &rwork[1], &ierr);
+ if (ierr != 0) {
+ *info = *n + 2;
+ goto L90;
+ }
+ }
+
+ if (! wantsn) {
+
+/* compute eigenvectors (DTGEVC) and estimate condition */
+/* numbers (DTGSNA). Note that the definition of the condition */
+/* number is not invariant under transformation (u,v) to */
+/* (Q*u, Z*v), where (u,v) are eigenvectors of the generalized */
+/* Schur form (S,T), Q and Z are orthogonal matrices. In order */
+/* to avoid using extra 2*N*N workspace, we have to */
+/* re-calculate eigenvectors and estimate the condition numbers */
+/* one at a time. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ bwork[j] = FALSE_;
+/* L10: */
+ }
+ bwork[i__] = TRUE_;
+
+ iwrk = *n + 1;
+ iwrk1 = iwrk + *n;
+
+ if (wantse || wantsb) {
+ ztgevc_("B", "S", &bwork[1], n, &a[a_offset], lda, &b[
+ b_offset], ldb, &work[1], n, &work[iwrk], n, &
+ c__1, &m, &work[iwrk1], &rwork[1], &ierr);
+ if (ierr != 0) {
+ *info = *n + 2;
+ goto L90;
+ }
+ }
+
+ i__2 = *lwork - iwrk1 + 1;
+ ztgsna_(sense, "S", &bwork[1], n, &a[a_offset], lda, &b[
+ b_offset], ldb, &work[1], n, &work[iwrk], n, &rconde[
+ i__], &rcondv[i__], &c__1, &m, &work[iwrk1], &i__2, &
+ iwork[1], &ierr);
+
+/* L20: */
+ }
+ }
+ }
+
+/* Undo balancing on VL and VR and normalization */
+/* (Workspace: none needed) */
+
+ if (ilvl) {
+ zggbak_(balanc, "L", n, ilo, ihi, &lscale[1], &rscale[1], n, &vl[
+ vl_offset], ldvl, &ierr);
+
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ temp = 0.;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ i__3 = jr + jc * vl_dim1;
+ d__3 = temp, d__4 = (d__1 = vl[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&vl[jr + jc * vl_dim1]), abs(d__2));
+ temp = max(d__3,d__4);
+/* L30: */
+ }
+ if (temp < smlnum) {
+ goto L50;
+ }
+ temp = 1. / temp;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ i__3 = jr + jc * vl_dim1;
+ i__4 = jr + jc * vl_dim1;
+ z__1.r = temp * vl[i__4].r, z__1.i = temp * vl[i__4].i;
+ vl[i__3].r = z__1.r, vl[i__3].i = z__1.i;
+/* L40: */
+ }
+L50:
+ ;
+ }
+ }
+
+ if (ilvr) {
+ zggbak_(balanc, "R", n, ilo, ihi, &lscale[1], &rscale[1], n, &vr[
+ vr_offset], ldvr, &ierr);
+ i__1 = *n;
+ for (jc = 1; jc <= i__1; ++jc) {
+ temp = 0.;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+/* Computing MAX */
+ i__3 = jr + jc * vr_dim1;
+ d__3 = temp, d__4 = (d__1 = vr[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&vr[jr + jc * vr_dim1]), abs(d__2));
+ temp = max(d__3,d__4);
+/* L60: */
+ }
+ if (temp < smlnum) {
+ goto L80;
+ }
+ temp = 1. / temp;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ i__3 = jr + jc * vr_dim1;
+ i__4 = jr + jc * vr_dim1;
+ z__1.r = temp * vr[i__4].r, z__1.i = temp * vr[i__4].i;
+ vr[i__3].r = z__1.r, vr[i__3].i = z__1.i;
+/* L70: */
+ }
+L80:
+ ;
+ }
+ }
+
+/* Undo scaling if necessary */
+
+ if (ilascl) {
+ zlascl_("G", &c__0, &c__0, &anrmto, &anrm, n, &c__1, &alpha[1], n, &
+ ierr);
+ }
+
+ if (ilbscl) {
+ zlascl_("G", &c__0, &c__0, &bnrmto, &bnrm, n, &c__1, &beta[1], n, &
+ ierr);
+ }
+
+L90:
+ work[1].r = (doublereal) maxwrk, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZGGEVX */
+
+} /* zggevx_ */
diff --git a/contrib/libs/clapack/zggglm.c b/contrib/libs/clapack/zggglm.c
new file mode 100644
index 0000000000..a6e9095977
--- /dev/null
+++ b/contrib/libs/clapack/zggglm.c
@@ -0,0 +1,337 @@
+/* zggglm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zggglm_(integer *n, integer *m, integer *p,
+ doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb,
+ doublecomplex *d__, doublecomplex *x, doublecomplex *y, doublecomplex
+ *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, nb, np, nb1, nb2, nb3, nb4, lopt;
+ extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *),
+ zcopy_(integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zggqrf_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *, integer *)
+ ;
+ integer lwkmin, lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int zunmqr_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *), zunmrq_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *), ztrtrs_(char *, char *, char *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *,
+ integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGGGLM solves a general Gauss-Markov linear model (GLM) problem: */
+
+/* minimize || y ||_2 subject to d = A*x + B*y */
+/* x */
+
+/* where A is an N-by-M matrix, B is an N-by-P matrix, and d is a */
+/* given N-vector. It is assumed that M <= N <= M+P, and */
+
+/* rank(A) = M and rank( A B ) = N. */
+
+/* Under these assumptions, the constrained equation is always */
+/* consistent, and there is a unique solution x and a minimal 2-norm */
+/* solution y, which is obtained using a generalized QR factorization */
+/* of the matrices (A, B) given by */
+
+/* A = Q*(R), B = Q*T*Z. */
+/* (0) */
+
+/* In particular, if matrix B is square nonsingular, then the problem */
+/* GLM is equivalent to the following weighted linear least squares */
+/* problem */
+
+/* minimize || inv(B)*(d-A*x) ||_2 */
+/* x */
+
+/* where inv(B) denotes the inverse of B. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of rows of the matrices A and B. N >= 0. */
+
+/* M (input) INTEGER */
+/* The number of columns of the matrix A. 0 <= M <= N. */
+
+/* P (input) INTEGER */
+/* The number of columns of the matrix B. P >= N-M. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,M) */
+/* On entry, the N-by-M matrix A. */
+/* On exit, the upper triangular part of the array A contains */
+/* the M-by-M upper triangular matrix R. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,P) */
+/* On entry, the N-by-P matrix B. */
+/* On exit, if N <= P, the upper triangle of the subarray */
+/* B(1:N,P-N+1:P) contains the N-by-N upper triangular matrix T; */
+/* if N > P, the elements on and above the (N-P)th subdiagonal */
+/* contain the N-by-P upper trapezoidal matrix T. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* D (input/output) COMPLEX*16 array, dimension (N) */
+/* On entry, D is the left hand side of the GLM equation. */
+/* On exit, D is destroyed. */
+
+/* X (output) COMPLEX*16 array, dimension (M) */
+/* Y (output) COMPLEX*16 array, dimension (P) */
+/* On exit, X and Y are the solutions of the GLM problem. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N+M+P). */
+/* For optimum performance, LWORK >= M+min(N,P)+max(N,P)*NB, */
+/* where NB is an upper bound for the optimal blocksizes for */
+/* ZGEQRF, ZGERQF, ZUNMQR and ZUNMRQ. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1: the upper triangular factor R associated with A in the */
+/* generalized QR factorization of the pair (A, B) is */
+/* singular, so that rank(A) < M; the least squares */
+/* solution could not be computed. */
+/* = 2: the bottom (N-M) by (N-M) part of the upper trapezoidal */
+/* factor T associated with B in the generalized QR */
+/* factorization of the pair (A, B) is singular, so that */
+/* rank( A B ) < N; the least squares solution could not */
+/* be computed. */
+
+/* =================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --d__;
+ --x;
+ --y;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ np = min(*n,*p);
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*m < 0 || *m > *n) {
+ *info = -2;
+ } else if (*p < 0 || *p < *n - *m) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+
+/* Calculate workspace */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ lwkmin = 1;
+ lwkopt = 1;
+ } else {
+ nb1 = ilaenv_(&c__1, "ZGEQRF", " ", n, m, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "ZGERQF", " ", n, m, &c_n1, &c_n1);
+ nb3 = ilaenv_(&c__1, "ZUNMQR", " ", n, m, p, &c_n1);
+ nb4 = ilaenv_(&c__1, "ZUNMRQ", " ", n, m, p, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2), i__1 = max(i__1,nb3);
+ nb = max(i__1,nb4);
+ lwkmin = *m + *n + *p;
+ lwkopt = *m + np + max(*n,*p) * nb;
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ if (*lwork < lwkmin && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGGGLM", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Compute the GQR factorization of matrices A and B: */
+
+/* Q'*A = ( R11 ) M, Q'*B*Z' = ( T11 T12 ) M */
+/* ( 0 ) N-M ( 0 T22 ) N-M */
+/* M M+P-N N-M */
+
+/* where R11 and T22 are upper triangular, and Q and Z are */
+/* unitary. */
+
+ i__1 = *lwork - *m - np;
+ zggqrf_(n, m, p, &a[a_offset], lda, &work[1], &b[b_offset], ldb, &work[*m
+ + 1], &work[*m + np + 1], &i__1, info);
+ i__1 = *m + np + 1;
+ lopt = (integer) work[i__1].r;
+
+/* Update left-hand-side vector d = Q'*d = ( d1 ) M */
+/* ( d2 ) N-M */
+
+ i__1 = max(1,*n);
+ i__2 = *lwork - *m - np;
+ zunmqr_("Left", "Conjugate transpose", n, &c__1, m, &a[a_offset], lda, &
+ work[1], &d__[1], &i__1, &work[*m + np + 1], &i__2, info);
+/* Computing MAX */
+ i__3 = *m + np + 1;
+ i__1 = lopt, i__2 = (integer) work[i__3].r;
+ lopt = max(i__1,i__2);
+
+/* Solve T22*y2 = d2 for y2 */
+
+ if (*n > *m) {
+ i__1 = *n - *m;
+ i__2 = *n - *m;
+ ztrtrs_("Upper", "No transpose", "Non unit", &i__1, &c__1, &b[*m + 1
+ + (*m + *p - *n + 1) * b_dim1], ldb, &d__[*m + 1], &i__2,
+ info);
+
+ if (*info > 0) {
+ *info = 1;
+ return 0;
+ }
+
+ i__1 = *n - *m;
+ zcopy_(&i__1, &d__[*m + 1], &c__1, &y[*m + *p - *n + 1], &c__1);
+ }
+
+/* Set y1 = 0 */
+
+ i__1 = *m + *p - *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ y[i__2].r = 0., y[i__2].i = 0.;
+/* L10: */
+ }
+
+/* Update d1 = d1 - T12*y2 */
+
+ i__1 = *n - *m;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", m, &i__1, &z__1, &b[(*m + *p - *n + 1) * b_dim1 +
+ 1], ldb, &y[*m + *p - *n + 1], &c__1, &c_b2, &d__[1], &c__1);
+
+/* Solve triangular system: R11*x = d1 */
+
+ if (*m > 0) {
+ ztrtrs_("Upper", "No Transpose", "Non unit", m, &c__1, &a[a_offset],
+ lda, &d__[1], m, info);
+
+ if (*info > 0) {
+ *info = 2;
+ return 0;
+ }
+
+/* Copy D to X */
+
+ zcopy_(m, &d__[1], &c__1, &x[1], &c__1);
+ }
+
+/* Backward transformation y = Z'*y */
+
+/* Computing MAX */
+ i__1 = 1, i__2 = *n - *p + 1;
+ i__3 = max(1,*p);
+ i__4 = *lwork - *m - np;
+ zunmrq_("Left", "Conjugate transpose", p, &c__1, &np, &b[max(i__1, i__2)+
+ b_dim1], ldb, &work[*m + 1], &y[1], &i__3, &work[*m + np + 1], &
+ i__4, info);
+/* Computing MAX */
+ i__4 = *m + np + 1;
+ i__2 = lopt, i__3 = (integer) work[i__4].r;
+ i__1 = *m + np + max(i__2,i__3);
+ work[1].r = (doublereal) i__1, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZGGGLM */
+
+} /* zggglm_ */
diff --git a/contrib/libs/clapack/zgghrd.c b/contrib/libs/clapack/zgghrd.c
new file mode 100644
index 0000000000..6ad8eb9533
--- /dev/null
+++ b/contrib/libs/clapack/zgghrd.c
@@ -0,0 +1,336 @@
+/* zgghrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static doublecomplex c_b2 = {0.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zgghrd_(char *compq, char *compz, integer *n, integer *
+ ilo, integer *ihi, doublecomplex *a, integer *lda, doublecomplex *b,
+ integer *ldb, doublecomplex *q, integer *ldq, doublecomplex *z__,
+ integer *ldz, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1,
+ z_offset, i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ doublereal c__;
+ doublecomplex s;
+ logical ilq, ilz;
+ integer jcol, jrow;
+ extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublereal *, doublecomplex *);
+ extern logical lsame_(char *, char *);
+ doublecomplex ctemp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ integer icompq, icompz;
+ extern /* Subroutine */ int zlaset_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *), zlartg_(doublecomplex *, doublecomplex *, doublereal *,
+ doublecomplex *, doublecomplex *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGGHRD reduces a pair of complex matrices (A,B) to generalized upper */
+/* Hessenberg form using unitary transformations, where A is a */
+/* general matrix and B is upper triangular. The form of the */
+/* generalized eigenvalue problem is */
+/* A*x = lambda*B*x, */
+/* and B is typically made upper triangular by computing its QR */
+/* factorization and moving the unitary matrix Q to the left side */
+/* of the equation. */
+
+/* This subroutine simultaneously reduces A to a Hessenberg matrix H: */
+/* Q**H*A*Z = H */
+/* and transforms B to another upper triangular matrix T: */
+/* Q**H*B*Z = T */
+/* in order to reduce the problem to its standard form */
+/* H*y = lambda*T*y */
+/* where y = Z**H*x. */
+
+/* The unitary matrices Q and Z are determined as products of Givens */
+/* rotations. They may either be formed explicitly, or they may be */
+/* postmultiplied into input matrices Q1 and Z1, so that */
+/* Q1 * A * Z1**H = (Q1*Q) * H * (Z1*Z)**H */
+/* Q1 * B * Z1**H = (Q1*Q) * T * (Z1*Z)**H */
+/* If Q1 is the unitary matrix from the QR factorization of B in the */
+/* original equation A*x = lambda*B*x, then ZGGHRD reduces the original */
+/* problem to generalized Hessenberg form. */
+
+/* Arguments */
+/* ========= */
+
+/* COMPQ (input) CHARACTER*1 */
+/* = 'N': do not compute Q; */
+/* = 'I': Q is initialized to the unit matrix, and the */
+/* unitary matrix Q is returned; */
+/* = 'V': Q must contain a unitary matrix Q1 on entry, */
+/* and the product Q1*Q is returned. */
+
+/* COMPZ (input) CHARACTER*1 */
+/* = 'N': do not compute Q; */
+/* = 'I': Q is initialized to the unit matrix, and the */
+/* unitary matrix Q is returned; */
+/* = 'V': Q must contain a unitary matrix Q1 on entry, */
+/* and the product Q1*Q is returned. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* ILO and IHI mark the rows and columns of A which are to be */
+/* reduced. It is assumed that A is already upper triangular */
+/* in rows and columns 1:ILO-1 and IHI+1:N. ILO and IHI are */
+/* normally set by a previous call to ZGGBAL; otherwise they */
+/* should be set to 1 and N respectively. */
+/* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA, N) */
+/* On entry, the N-by-N general matrix to be reduced. */
+/* On exit, the upper triangle and the first subdiagonal of A */
+/* are overwritten with the upper Hessenberg matrix H, and the */
+/* rest is set to zero. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB, N) */
+/* On entry, the N-by-N upper triangular matrix B. */
+/* On exit, the upper triangular matrix T = Q**H B Z. The */
+/* elements below the diagonal are set to zero. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* Q (input/output) COMPLEX*16 array, dimension (LDQ, N) */
+/* On entry, if COMPQ = 'V', the unitary matrix Q1, typically */
+/* from the QR factorization of B. */
+/* On exit, if COMPQ='I', the unitary matrix Q, and if */
+/* COMPQ = 'V', the product Q1*Q. */
+/* Not referenced if COMPQ='N'. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. */
+/* LDQ >= N if COMPQ='V' or 'I'; LDQ >= 1 otherwise. */
+
+/* Z (input/output) COMPLEX*16 array, dimension (LDZ, N) */
+/* On entry, if COMPZ = 'V', the unitary matrix Z1. */
+/* On exit, if COMPZ='I', the unitary matrix Z, and if */
+/* COMPZ = 'V', the product Z1*Z. */
+/* Not referenced if COMPZ='N'. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. */
+/* LDZ >= N if COMPZ='V' or 'I'; LDZ >= 1 otherwise. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* This routine reduces A to Hessenberg and B to triangular form by */
+/* an unblocked reduction, as described in _Matrix_Computations_, */
+/* by Golub and van Loan (Johns Hopkins Press). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode COMPQ */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+
+ /* Function Body */
+ if (lsame_(compq, "N")) {
+ ilq = FALSE_;
+ icompq = 1;
+ } else if (lsame_(compq, "V")) {
+ ilq = TRUE_;
+ icompq = 2;
+ } else if (lsame_(compq, "I")) {
+ ilq = TRUE_;
+ icompq = 3;
+ } else {
+ icompq = 0;
+ }
+
+/* Decode COMPZ */
+
+ if (lsame_(compz, "N")) {
+ ilz = FALSE_;
+ icompz = 1;
+ } else if (lsame_(compz, "V")) {
+ ilz = TRUE_;
+ icompz = 2;
+ } else if (lsame_(compz, "I")) {
+ ilz = TRUE_;
+ icompz = 3;
+ } else {
+ icompz = 0;
+ }
+
+/* Test the input parameters. */
+
+ *info = 0;
+ if (icompq <= 0) {
+ *info = -1;
+ } else if (icompz <= 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ilo < 1) {
+ *info = -4;
+ } else if (*ihi > *n || *ihi < *ilo - 1) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (ilq && *ldq < *n || *ldq < 1) {
+ *info = -11;
+ } else if (ilz && *ldz < *n || *ldz < 1) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGGHRD", &i__1);
+ return 0;
+ }
+
+/* Initialize Q and Z if desired. */
+
+ if (icompq == 3) {
+ zlaset_("Full", n, n, &c_b2, &c_b1, &q[q_offset], ldq);
+ }
+ if (icompz == 3) {
+ zlaset_("Full", n, n, &c_b2, &c_b1, &z__[z_offset], ldz);
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 1) {
+ return 0;
+ }
+
+/* Zero out lower triangle of B */
+
+ i__1 = *n - 1;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = jcol + 1; jrow <= i__2; ++jrow) {
+ i__3 = jrow + jcol * b_dim1;
+ b[i__3].r = 0., b[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* Reduce A and B */
+
+ i__1 = *ihi - 2;
+ for (jcol = *ilo; jcol <= i__1; ++jcol) {
+
+ i__2 = jcol + 2;
+ for (jrow = *ihi; jrow >= i__2; --jrow) {
+
+/* Step 1: rotate rows JROW-1, JROW to kill A(JROW,JCOL) */
+
+ i__3 = jrow - 1 + jcol * a_dim1;
+ ctemp.r = a[i__3].r, ctemp.i = a[i__3].i;
+ zlartg_(&ctemp, &a[jrow + jcol * a_dim1], &c__, &s, &a[jrow - 1 +
+ jcol * a_dim1]);
+ i__3 = jrow + jcol * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+ i__3 = *n - jcol;
+ zrot_(&i__3, &a[jrow - 1 + (jcol + 1) * a_dim1], lda, &a[jrow + (
+ jcol + 1) * a_dim1], lda, &c__, &s);
+ i__3 = *n + 2 - jrow;
+ zrot_(&i__3, &b[jrow - 1 + (jrow - 1) * b_dim1], ldb, &b[jrow + (
+ jrow - 1) * b_dim1], ldb, &c__, &s);
+ if (ilq) {
+ d_cnjg(&z__1, &s);
+ zrot_(n, &q[(jrow - 1) * q_dim1 + 1], &c__1, &q[jrow * q_dim1
+ + 1], &c__1, &c__, &z__1);
+ }
+
+/* Step 2: rotate columns JROW, JROW-1 to kill B(JROW,JROW-1) */
+
+ i__3 = jrow + jrow * b_dim1;
+ ctemp.r = b[i__3].r, ctemp.i = b[i__3].i;
+ zlartg_(&ctemp, &b[jrow + (jrow - 1) * b_dim1], &c__, &s, &b[jrow
+ + jrow * b_dim1]);
+ i__3 = jrow + (jrow - 1) * b_dim1;
+ b[i__3].r = 0., b[i__3].i = 0.;
+ zrot_(ihi, &a[jrow * a_dim1 + 1], &c__1, &a[(jrow - 1) * a_dim1 +
+ 1], &c__1, &c__, &s);
+ i__3 = jrow - 1;
+ zrot_(&i__3, &b[jrow * b_dim1 + 1], &c__1, &b[(jrow - 1) * b_dim1
+ + 1], &c__1, &c__, &s);
+ if (ilz) {
+ zrot_(n, &z__[jrow * z_dim1 + 1], &c__1, &z__[(jrow - 1) *
+ z_dim1 + 1], &c__1, &c__, &s);
+ }
+/* L30: */
+ }
+/* L40: */
+ }
+
+ return 0;
+
+/* End of ZGGHRD */
+
+} /* zgghrd_ */
diff --git a/contrib/libs/clapack/zgglse.c b/contrib/libs/clapack/zgglse.c
new file mode 100644
index 0000000000..065663e25d
--- /dev/null
+++ b/contrib/libs/clapack/zgglse.c
@@ -0,0 +1,346 @@
+/* zgglse.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zgglse_(integer *m, integer *n, integer *p,
+ doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb,
+ doublecomplex *c__, doublecomplex *d__, doublecomplex *x,
+ doublecomplex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer nb, mn, nr, nb1, nb2, nb3, nb4, lopt;
+ extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *),
+ zcopy_(integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *), zaxpy_(integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *, integer *), ztrmv_(char *, char *,
+ char *, integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zggrqf_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *, integer *)
+ ;
+ integer lwkmin, lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int zunmqr_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *), zunmrq_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *), ztrtrs_(char *, char *, char *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *,
+ integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* February 2007 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGGLSE solves the linear equality-constrained least squares (LSE) */
+/* problem: */
+
+/* minimize || c - A*x ||_2 subject to B*x = d */
+
+/* where A is an M-by-N matrix, B is a P-by-N matrix, c is a given */
+/* M-vector, and d is a given P-vector. It is assumed that */
+/* P <= N <= M+P, and */
+
+/* rank(B) = P and rank( ( A ) ) = N. */
+/* ( ( B ) ) */
+
+/* These conditions ensure that the LSE problem has a unique solution, */
+/* which is obtained using a generalized RQ factorization of the */
+/* matrices (B, A) given by */
+
+/* B = (0 R)*Q, A = Z*T*Q. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrices A and B. N >= 0. */
+
+/* P (input) INTEGER */
+/* The number of rows of the matrix B. 0 <= P <= N <= M+P. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the elements on and above the diagonal of the array */
+/* contain the min(M,N)-by-N upper trapezoidal matrix T. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,N) */
+/* On entry, the P-by-N matrix B. */
+/* On exit, the upper triangle of the subarray B(1:P,N-P+1:N) */
+/* contains the P-by-P upper triangular matrix R. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,P). */
+
+/* C (input/output) COMPLEX*16 array, dimension (M) */
+/* On entry, C contains the right hand side vector for the */
+/* least squares part of the LSE problem. */
+/* On exit, the residual sum of squares for the solution */
+/* is given by the sum of squares of elements N-P+1 to M of */
+/* vector C. */
+
+/* D (input/output) COMPLEX*16 array, dimension (P) */
+/* On entry, D contains the right hand side vector for the */
+/* constrained equation. */
+/* On exit, D is destroyed. */
+
+/* X (output) COMPLEX*16 array, dimension (N) */
+/* On exit, X is the solution of the LSE problem. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,M+N+P). */
+/* For optimum performance LWORK >= P+min(M,N)+max(M,N)*NB, */
+/* where NB is an upper bound for the optimal blocksizes for */
+/* ZGEQRF, CGERQF, ZUNMQR and CUNMRQ. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1: the upper triangular factor R associated with B in the */
+/* generalized RQ factorization of the pair (B, A) is */
+/* singular, so that rank(B) < P; the least squares */
+/* solution could not be computed. */
+/* = 2: the (N-P) by (N-P) part of the upper trapezoidal factor */
+/* T associated with A in the generalized RQ factorization */
+/* of the pair (B, A) is singular, so that */
+/* rank( (A) ) < N; the least squares solution could not */
+/* ( (B) ) */
+/* be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --c__;
+ --d__;
+ --x;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ mn = min(*m,*n);
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*p < 0 || *p > *n || *p < *n - *m) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldb < max(1,*p)) {
+ *info = -7;
+ }
+
+/* Calculate workspace */
+
+ if (*info == 0) {
+ if (*n == 0) {
+ lwkmin = 1;
+ lwkopt = 1;
+ } else {
+ nb1 = ilaenv_(&c__1, "ZGEQRF", " ", m, n, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "ZGERQF", " ", m, n, &c_n1, &c_n1);
+ nb3 = ilaenv_(&c__1, "ZUNMQR", " ", m, n, p, &c_n1);
+ nb4 = ilaenv_(&c__1, "ZUNMRQ", " ", m, n, p, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2), i__1 = max(i__1,nb3);
+ nb = max(i__1,nb4);
+ lwkmin = *m + *n + *p;
+ lwkopt = *p + mn + max(*m,*n) * nb;
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ if (*lwork < lwkmin && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGGLSE", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Compute the GRQ factorization of matrices B and A: */
+
+/* B*Q' = ( 0 T12 ) P Z'*A*Q' = ( R11 R12 ) N-P */
+/* N-P P ( 0 R22 ) M+P-N */
+/* N-P P */
+
+/* where T12 and R11 are upper triangular, and Q and Z are */
+/* unitary. */
+
+ i__1 = *lwork - *p - mn;
+ zggrqf_(p, m, n, &b[b_offset], ldb, &work[1], &a[a_offset], lda, &work[*p
+ + 1], &work[*p + mn + 1], &i__1, info);
+ i__1 = *p + mn + 1;
+ lopt = (integer) work[i__1].r;
+
+/* Update c = Z'*c = ( c1 ) N-P */
+/* ( c2 ) M+P-N */
+
+ i__1 = max(1,*m);
+ i__2 = *lwork - *p - mn;
+ zunmqr_("Left", "Conjugate Transpose", m, &c__1, &mn, &a[a_offset], lda, &
+ work[*p + 1], &c__[1], &i__1, &work[*p + mn + 1], &i__2, info);
+/* Computing MAX */
+ i__3 = *p + mn + 1;
+ i__1 = lopt, i__2 = (integer) work[i__3].r;
+ lopt = max(i__1,i__2);
+
+/* Solve T12*x2 = d for x2 */
+
+ if (*p > 0) {
+ ztrtrs_("Upper", "No transpose", "Non-unit", p, &c__1, &b[(*n - *p +
+ 1) * b_dim1 + 1], ldb, &d__[1], p, info);
+
+ if (*info > 0) {
+ *info = 1;
+ return 0;
+ }
+
+/* Put the solution in X */
+
+ zcopy_(p, &d__[1], &c__1, &x[*n - *p + 1], &c__1);
+
+/* Update c1 */
+
+ i__1 = *n - *p;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__1, p, &z__1, &a[(*n - *p + 1) * a_dim1 + 1]
+, lda, &d__[1], &c__1, &c_b1, &c__[1], &c__1);
+ }
+
+/* Solve R11*x1 = c1 for x1 */
+
+ if (*n > *p) {
+ i__1 = *n - *p;
+ i__2 = *n - *p;
+ ztrtrs_("Upper", "No transpose", "Non-unit", &i__1, &c__1, &a[
+ a_offset], lda, &c__[1], &i__2, info);
+
+ if (*info > 0) {
+ *info = 2;
+ return 0;
+ }
+
+/* Put the solutions in X */
+
+ i__1 = *n - *p;
+ zcopy_(&i__1, &c__[1], &c__1, &x[1], &c__1);
+ }
+
+/* Compute the residual vector: */
+
+ if (*m < *n) {
+ nr = *m + *p - *n;
+ if (nr > 0) {
+ i__1 = *n - *m;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &nr, &i__1, &z__1, &a[*n - *p + 1 + (*m +
+ 1) * a_dim1], lda, &d__[nr + 1], &c__1, &c_b1, &c__[*n - *
+ p + 1], &c__1);
+ }
+ } else {
+ nr = *p;
+ }
+ if (nr > 0) {
+ ztrmv_("Upper", "No transpose", "Non unit", &nr, &a[*n - *p + 1 + (*n
+ - *p + 1) * a_dim1], lda, &d__[1], &c__1);
+ z__1.r = -1., z__1.i = -0.;
+ zaxpy_(&nr, &z__1, &d__[1], &c__1, &c__[*n - *p + 1], &c__1);
+ }
+
+/* Backward transformation x = Q'*x */
+
+ i__1 = *lwork - *p - mn;
+ zunmrq_("Left", "Conjugate Transpose", n, &c__1, p, &b[b_offset], ldb, &
+ work[1], &x[1], n, &work[*p + mn + 1], &i__1, info);
+/* Computing MAX */
+ i__4 = *p + mn + 1;
+ i__2 = lopt, i__3 = (integer) work[i__4].r;
+ i__1 = *p + mn + max(i__2,i__3);
+ work[1].r = (doublereal) i__1, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZGGLSE */
+
+} /* zgglse_ */
diff --git a/contrib/libs/clapack/zggqrf.c b/contrib/libs/clapack/zggqrf.c
new file mode 100644
index 0000000000..dcd18a8f1d
--- /dev/null
+++ b/contrib/libs/clapack/zggqrf.c
@@ -0,0 +1,270 @@
+/* zggqrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zggqrf_(integer *n, integer *m, integer *p,
+ doublecomplex *a, integer *lda, doublecomplex *taua, doublecomplex *b,
+ integer *ldb, doublecomplex *taub, doublecomplex *work, integer *
+ lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer nb, nb1, nb2, nb3, lopt;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zgeqrf_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *, integer *
+), zgerqf_(integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, doublecomplex *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int zunmqr_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGGQRF computes a generalized QR factorization of an N-by-M matrix A */
+/* and an N-by-P matrix B: */
+
+/* A = Q*R, B = Q*T*Z, */
+
+/* where Q is an N-by-N unitary matrix, Z is a P-by-P unitary matrix, */
+/* and R and T assume one of the forms: */
+
+/* if N >= M, R = ( R11 ) M , or if N < M, R = ( R11 R12 ) N, */
+/* ( 0 ) N-M N M-N */
+/* M */
+
+/* where R11 is upper triangular, and */
+
+/* if N <= P, T = ( 0 T12 ) N, or if N > P, T = ( T11 ) N-P, */
+/* P-N N ( T21 ) P */
+/* P */
+
+/* where T12 or T21 is upper triangular. */
+
+/* In particular, if B is square and nonsingular, the GQR factorization */
+/* of A and B implicitly gives the QR factorization of inv(B)*A: */
+
+/* inv(B)*A = Z'*(inv(T)*R) */
+
+/* where inv(B) denotes the inverse of the matrix B, and Z' denotes the */
+/* conjugate transpose of matrix Z. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of rows of the matrices A and B. N >= 0. */
+
+/* M (input) INTEGER */
+/* The number of columns of the matrix A. M >= 0. */
+
+/* P (input) INTEGER */
+/* The number of columns of the matrix B. P >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,M) */
+/* On entry, the N-by-M matrix A. */
+/* On exit, the elements on and above the diagonal of the array */
+/* contain the min(N,M)-by-M upper trapezoidal matrix R (R is */
+/* upper triangular if N >= M); the elements below the diagonal, */
+/* with the array TAUA, represent the unitary matrix Q as a */
+/* product of min(N,M) elementary reflectors (see Further */
+/* Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAUA (output) COMPLEX*16 array, dimension (min(N,M)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the unitary matrix Q (see Further Details). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,P) */
+/* On entry, the N-by-P matrix B. */
+/* On exit, if N <= P, the upper triangle of the subarray */
+/* B(1:N,P-N+1:P) contains the N-by-N upper triangular matrix T; */
+/* if N > P, the elements on and above the (N-P)-th subdiagonal */
+/* contain the N-by-P upper trapezoidal matrix T; the remaining */
+/* elements, with the array TAUB, represent the unitary */
+/* matrix Z as a product of elementary reflectors (see Further */
+/* Details). */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* TAUB (output) COMPLEX*16 array, dimension (min(N,P)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the unitary matrix Z (see Further Details). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N,M,P). */
+/* For optimum performance LWORK >= max(N,M,P)*max(NB1,NB2,NB3), */
+/* where NB1 is the optimal blocksize for the QR factorization */
+/* of an N-by-M matrix, NB2 is the optimal blocksize for the */
+/* RQ factorization of an N-by-P matrix, and NB3 is the optimal */
+/* blocksize for a call of ZUNMQR. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k), where k = min(n,m). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - taua * v * v' */
+
+/* where taua is a complex scalar, and v is a complex vector with */
+/* v(1:i-1) = 0 and v(i) = 1; v(i+1:n) is stored on exit in A(i+1:n,i), */
+/* and taua in TAUA(i). */
+/* To form Q explicitly, use LAPACK subroutine ZUNGQR. */
+/* To use Q to update another matrix, use LAPACK subroutine ZUNMQR. */
+
+/* The matrix Z is represented as a product of elementary reflectors */
+
+/* Z = H(1) H(2) . . . H(k), where k = min(n,p). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - taub * v * v' */
+
+/* where taub is a complex scalar, and v is a complex vector with */
+/* v(p-k+i+1:p) = 0 and v(p-k+i) = 1; v(1:p-k+i-1) is stored on exit in */
+/* B(n-k+i,1:p-k+i-1), and taub in TAUB(i). */
+/* To form Z explicitly, use LAPACK subroutine ZUNGRQ. */
+/* To use Z to update another matrix, use LAPACK subroutine ZUNMRQ. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --taua;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --taub;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb1 = ilaenv_(&c__1, "ZGEQRF", " ", n, m, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "ZGERQF", " ", n, p, &c_n1, &c_n1);
+ nb3 = ilaenv_(&c__1, "ZUNMQR", " ", n, m, p, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2);
+ nb = max(i__1,nb3);
+/* Computing MAX */
+ i__1 = max(*n,*m);
+ lwkopt = max(i__1,*p) * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*p < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*n), i__1 = max(i__1,*m);
+ if (*lwork < max(i__1,*p) && ! lquery) {
+ *info = -11;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGGQRF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* QR factorization of N-by-M matrix A: A = Q*R */
+
+ zgeqrf_(n, m, &a[a_offset], lda, &taua[1], &work[1], lwork, info);
+ lopt = (integer) work[1].r;
+
+/* Update B := Q'*B. */
+
+ i__1 = min(*n,*m);
+ zunmqr_("Left", "Conjugate Transpose", n, p, &i__1, &a[a_offset], lda, &
+ taua[1], &b[b_offset], ldb, &work[1], lwork, info);
+/* Computing MAX */
+ i__1 = lopt, i__2 = (integer) work[1].r;
+ lopt = max(i__1,i__2);
+
+/* RQ factorization of N-by-P matrix B: B = T*Z. */
+
+ zgerqf_(n, p, &b[b_offset], ldb, &taub[1], &work[1], lwork, info);
+/* Computing MAX */
+ i__2 = lopt, i__3 = (integer) work[1].r;
+ i__1 = max(i__2,i__3);
+ work[1].r = (doublereal) i__1, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZGGQRF */
+
+} /* zggqrf_ */
diff --git a/contrib/libs/clapack/zggrqf.c b/contrib/libs/clapack/zggrqf.c
new file mode 100644
index 0000000000..bc3c559cdc
--- /dev/null
+++ b/contrib/libs/clapack/zggrqf.c
@@ -0,0 +1,271 @@
+/* zggrqf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zggrqf_(integer *m, integer *p, integer *n,
+ doublecomplex *a, integer *lda, doublecomplex *taua, doublecomplex *b,
+ integer *ldb, doublecomplex *taub, doublecomplex *work, integer *
+ lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer nb, nb1, nb2, nb3, lopt;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zgeqrf_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *, integer *
+), zgerqf_(integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, doublecomplex *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int zunmrq_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGGRQF computes a generalized RQ factorization of an M-by-N matrix A */
+/* and a P-by-N matrix B: */
+
+/* A = R*Q, B = Z*T*Q, */
+
+/* where Q is an N-by-N unitary matrix, Z is a P-by-P unitary */
+/* matrix, and R and T assume one of the forms: */
+
+/* if M <= N, R = ( 0 R12 ) M, or if M > N, R = ( R11 ) M-N, */
+/* N-M M ( R21 ) N */
+/* N */
+
+/* where R12 or R21 is upper triangular, and */
+
+/* if P >= N, T = ( T11 ) N , or if P < N, T = ( T11 T12 ) P, */
+/* ( 0 ) P-N P N-P */
+/* N */
+
+/* where T11 is upper triangular. */
+
+/* In particular, if B is square and nonsingular, the GRQ factorization */
+/* of A and B implicitly gives the RQ factorization of A*inv(B): */
+
+/* A*inv(B) = (R*inv(T))*Z' */
+
+/* where inv(B) denotes the inverse of the matrix B, and Z' denotes the */
+/* conjugate transpose of the matrix Z. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* P (input) INTEGER */
+/* The number of rows of the matrix B. P >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrices A and B. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, if M <= N, the upper triangle of the subarray */
+/* A(1:M,N-M+1:N) contains the M-by-M upper triangular matrix R; */
+/* if M > N, the elements on and above the (M-N)-th subdiagonal */
+/* contain the M-by-N upper trapezoidal matrix R; the remaining */
+/* elements, with the array TAUA, represent the unitary */
+/* matrix Q as a product of elementary reflectors (see Further */
+/* Details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAUA (output) COMPLEX*16 array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the unitary matrix Q (see Further Details). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,N) */
+/* On entry, the P-by-N matrix B. */
+/* On exit, the elements on and above the diagonal of the array */
+/* contain the min(P,N)-by-N upper trapezoidal matrix T (T is */
+/* upper triangular if P >= N); the elements below the diagonal, */
+/* with the array TAUB, represent the unitary matrix Z as a */
+/* product of elementary reflectors (see Further Details). */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,P). */
+
+/* TAUB (output) COMPLEX*16 array, dimension (min(P,N)) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the unitary matrix Z (see Further Details). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N,M,P). */
+/* For optimum performance LWORK >= max(N,M,P)*max(NB1,NB2,NB3), */
+/* where NB1 is the optimal blocksize for the RQ factorization */
+/* of an M-by-N matrix, NB2 is the optimal blocksize for the */
+/* QR factorization of a P-by-N matrix, and NB3 is the optimal */
+/* blocksize for a call of ZUNMRQ. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO=-i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k), where k = min(m,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - taua * v * v' */
+
+/* where taua is a complex scalar, and v is a complex vector with */
+/* v(n-k+i+1:n) = 0 and v(n-k+i) = 1; v(1:n-k+i-1) is stored on exit in */
+/* A(m-k+i,1:n-k+i-1), and taua in TAUA(i). */
+/* To form Q explicitly, use LAPACK subroutine ZUNGRQ. */
+/* To use Q to update another matrix, use LAPACK subroutine ZUNMRQ. */
+
+/* The matrix Z is represented as a product of elementary reflectors */
+
+/* Z = H(1) H(2) . . . H(k), where k = min(p,n). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - taub * v * v' */
+
+/* where taub is a complex scalar, and v is a complex vector with */
+/* v(1:i-1) = 0 and v(i) = 1; v(i+1:p) is stored on exit in B(i+1:p,i), */
+/* and taub in TAUB(i). */
+/* To form Z explicitly, use LAPACK subroutine ZUNGQR. */
+/* To use Z to update another matrix, use LAPACK subroutine ZUNMQR. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --taua;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --taub;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb1 = ilaenv_(&c__1, "ZGERQF", " ", m, n, &c_n1, &c_n1);
+ nb2 = ilaenv_(&c__1, "ZGEQRF", " ", p, n, &c_n1, &c_n1);
+ nb3 = ilaenv_(&c__1, "ZUNMRQ", " ", m, n, p, &c_n1);
+/* Computing MAX */
+ i__1 = max(nb1,nb2);
+ nb = max(i__1,nb3);
+/* Computing MAX */
+ i__1 = max(*n,*m);
+ lwkopt = max(i__1,*p) * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*p < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldb < max(1,*p)) {
+ *info = -8;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*m), i__1 = max(i__1,*p);
+ if (*lwork < max(i__1,*n) && ! lquery) {
+ *info = -11;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGGRQF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* RQ factorization of M-by-N matrix A: A = R*Q */
+
+ zgerqf_(m, n, &a[a_offset], lda, &taua[1], &work[1], lwork, info);
+ lopt = (integer) work[1].r;
+
+/* Update B := B*Q' */
+
+ i__1 = min(*m,*n);
+/* Computing MAX */
+ i__2 = 1, i__3 = *m - *n + 1;
+ zunmrq_("Right", "Conjugate Transpose", p, n, &i__1, &a[max(i__2, i__3)+
+ a_dim1], lda, &taua[1], &b[b_offset], ldb, &work[1], lwork, info);
+/* Computing MAX */
+ i__1 = lopt, i__2 = (integer) work[1].r;
+ lopt = max(i__1,i__2);
+
+/* QR factorization of P-by-N matrix B: B = Z*T */
+
+ zgeqrf_(p, n, &b[b_offset], ldb, &taub[1], &work[1], lwork, info);
+/* Computing MAX */
+ i__2 = lopt, i__3 = (integer) work[1].r;
+ i__1 = max(i__2,i__3);
+ work[1].r = (doublereal) i__1, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZGGRQF */
+
+} /* zggrqf_ */
diff --git a/contrib/libs/clapack/zggsvd.c b/contrib/libs/clapack/zggsvd.c
new file mode 100644
index 0000000000..00bf0e5b33
--- /dev/null
+++ b/contrib/libs/clapack/zggsvd.c
@@ -0,0 +1,407 @@
+/* zggsvd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zggsvd_(char *jobu, char *jobv, char *jobq, integer *m,
+ integer *n, integer *p, integer *k, integer *l, doublecomplex *a,
+ integer *lda, doublecomplex *b, integer *ldb, doublereal *alpha,
+ doublereal *beta, doublecomplex *u, integer *ldu, doublecomplex *v,
+ integer *ldv, doublecomplex *q, integer *ldq, doublecomplex *work,
+ doublereal *rwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, u_dim1,
+ u_offset, v_dim1, v_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal ulp;
+ integer ibnd;
+ doublereal tola;
+ integer isub;
+ doublereal tolb, unfl, temp, smax;
+ extern logical lsame_(char *, char *);
+ doublereal anorm, bnorm;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical wantq, wantu, wantv;
+ extern doublereal dlamch_(char *);
+ integer ncycle;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ extern /* Subroutine */ int ztgsja_(char *, char *, char *, integer *,
+ integer *, integer *, integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, integer *),
+ zggsvp_(char *, char *, char *, integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *
+, integer *, doublereal *, doublecomplex *, doublecomplex *,
+ integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGGSVD computes the generalized singular value decomposition (GSVD) */
+/* of an M-by-N complex matrix A and P-by-N complex matrix B: */
+
+/* U'*A*Q = D1*( 0 R ), V'*B*Q = D2*( 0 R ) */
+
+/* where U, V and Q are unitary matrices, and Z' means the conjugate */
+/* transpose of Z. Let K+L = the effective numerical rank of the */
+/* matrix (A',B')', then R is a (K+L)-by-(K+L) nonsingular upper */
+/* triangular matrix, D1 and D2 are M-by-(K+L) and P-by-(K+L) "diagonal" */
+/* matrices and of the following structures, respectively: */
+
+/* If M-K-L >= 0, */
+
+/* K L */
+/* D1 = K ( I 0 ) */
+/* L ( 0 C ) */
+/* M-K-L ( 0 0 ) */
+
+/* K L */
+/* D2 = L ( 0 S ) */
+/* P-L ( 0 0 ) */
+
+/* N-K-L K L */
+/* ( 0 R ) = K ( 0 R11 R12 ) */
+/* L ( 0 0 R22 ) */
+/* where */
+
+/* C = diag( ALPHA(K+1), ... , ALPHA(K+L) ), */
+/* S = diag( BETA(K+1), ... , BETA(K+L) ), */
+/* C**2 + S**2 = I. */
+
+/* R is stored in A(1:K+L,N-K-L+1:N) on exit. */
+
+/* If M-K-L < 0, */
+
+/* K M-K K+L-M */
+/* D1 = K ( I 0 0 ) */
+/* M-K ( 0 C 0 ) */
+
+/* K M-K K+L-M */
+/* D2 = M-K ( 0 S 0 ) */
+/* K+L-M ( 0 0 I ) */
+/* P-L ( 0 0 0 ) */
+
+/* N-K-L K M-K K+L-M */
+/* ( 0 R ) = K ( 0 R11 R12 R13 ) */
+/* M-K ( 0 0 R22 R23 ) */
+/* K+L-M ( 0 0 0 R33 ) */
+
+/* where */
+
+/* C = diag( ALPHA(K+1), ... , ALPHA(M) ), */
+/* S = diag( BETA(K+1), ... , BETA(M) ), */
+/* C**2 + S**2 = I. */
+
+/* (R11 R12 R13 ) is stored in A(1:M, N-K-L+1:N), and R33 is stored */
+/* ( 0 R22 R23 ) */
+/* in B(M-K+1:L,N+M-K-L+1:N) on exit. */
+
+/* The routine computes C, S, R, and optionally the unitary */
+/* transformation matrices U, V and Q. */
+
+/* In particular, if B is an N-by-N nonsingular matrix, then the GSVD of */
+/* A and B implicitly gives the SVD of A*inv(B): */
+/* A*inv(B) = U*(D1*inv(D2))*V'. */
+/* If ( A',B')' has orthnormal columns, then the GSVD of A and B is also */
+/* equal to the CS decomposition of A and B. Furthermore, the GSVD can */
+/* be used to derive the solution of the eigenvalue problem: */
+/* A'*A x = lambda* B'*B x. */
+/* In some literature, the GSVD of A and B is presented in the form */
+/* U'*A*X = ( 0 D1 ), V'*B*X = ( 0 D2 ) */
+/* where U and V are orthogonal and X is nonsingular, and D1 and D2 are */
+/* ``diagonal''. The former GSVD form can be converted to the latter */
+/* form by taking the nonsingular matrix X as */
+
+/* X = Q*( I 0 ) */
+/* ( 0 inv(R) ) */
+
+/* Arguments */
+/* ========= */
+
+/* JOBU (input) CHARACTER*1 */
+/* = 'U': Unitary matrix U is computed; */
+/* = 'N': U is not computed. */
+
+/* JOBV (input) CHARACTER*1 */
+/* = 'V': Unitary matrix V is computed; */
+/* = 'N': V is not computed. */
+
+/* JOBQ (input) CHARACTER*1 */
+/* = 'Q': Unitary matrix Q is computed; */
+/* = 'N': Q is not computed. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrices A and B. N >= 0. */
+
+/* P (input) INTEGER */
+/* The number of rows of the matrix B. P >= 0. */
+
+/* K (output) INTEGER */
+/* L (output) INTEGER */
+/* On exit, K and L specify the dimension of the subblocks */
+/* described in Purpose. */
+/* K + L = effective numerical rank of (A',B')'. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, A contains the triangular matrix R, or part of R. */
+/* See Purpose for details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,N) */
+/* On entry, the P-by-N matrix B. */
+/* On exit, B contains part of the triangular matrix R if */
+/* M-K-L < 0. See Purpose for details. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,P). */
+
+/* ALPHA (output) DOUBLE PRECISION array, dimension (N) */
+/* BETA (output) DOUBLE PRECISION array, dimension (N) */
+/* On exit, ALPHA and BETA contain the generalized singular */
+/* value pairs of A and B; */
+/* ALPHA(1:K) = 1, */
+/* BETA(1:K) = 0, */
+/* and if M-K-L >= 0, */
+/* ALPHA(K+1:K+L) = C, */
+/* BETA(K+1:K+L) = S, */
+/* or if M-K-L < 0, */
+/* ALPHA(K+1:M)= C, ALPHA(M+1:K+L)= 0 */
+/* BETA(K+1:M) = S, BETA(M+1:K+L) = 1 */
+/* and */
+/* ALPHA(K+L+1:N) = 0 */
+/* BETA(K+L+1:N) = 0 */
+
+/* U (output) COMPLEX*16 array, dimension (LDU,M) */
+/* If JOBU = 'U', U contains the M-by-M unitary matrix U. */
+/* If JOBU = 'N', U is not referenced. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= max(1,M) if */
+/* JOBU = 'U'; LDU >= 1 otherwise. */
+
+/* V (output) COMPLEX*16 array, dimension (LDV,P) */
+/* If JOBV = 'V', V contains the P-by-P unitary matrix V. */
+/* If JOBV = 'N', V is not referenced. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. LDV >= max(1,P) if */
+/* JOBV = 'V'; LDV >= 1 otherwise. */
+
+/* Q (output) COMPLEX*16 array, dimension (LDQ,N) */
+/* If JOBQ = 'Q', Q contains the N-by-N unitary matrix Q. */
+/* If JOBQ = 'N', Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N) if */
+/* JOBQ = 'Q'; LDQ >= 1 otherwise. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (max(3*N,M,P)+N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (2*N) */
+
+/* IWORK (workspace/output) INTEGER array, dimension (N) */
+/* On exit, IWORK stores the sorting information. More */
+/* precisely, the following loop will sort ALPHA */
+/* for I = K+1, min(M,K+L) */
+/* swap ALPHA(I) and ALPHA(IWORK(I)) */
+/* endfor */
+/* such that ALPHA(1) >= ALPHA(2) >= ... >= ALPHA(N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, the Jacobi-type procedure failed to */
+/* converge. For further details, see subroutine ZTGSJA. */
+
+/* Internal Parameters */
+/* =================== */
+
+/* TOLA DOUBLE PRECISION */
+/* TOLB DOUBLE PRECISION */
+/* TOLA and TOLB are the thresholds to determine the effective */
+/* rank of (A',B')'. Generally, they are set to */
+/* TOLA = MAX(M,N)*norm(A)*MAZHEPS, */
+/* TOLB = MAX(P,N)*norm(B)*MAZHEPS. */
+/* The size of TOLA and TOLB may affect the size of backward */
+/* errors of the decomposition. */
+
+/* Further Details */
+/* =============== */
+
+/* 2-96 Based on modifications by */
+/* Ming Gu and Huan Ren, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alpha;
+ --beta;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ wantu = lsame_(jobu, "U");
+ wantv = lsame_(jobv, "V");
+ wantq = lsame_(jobq, "Q");
+
+ *info = 0;
+ if (! (wantu || lsame_(jobu, "N"))) {
+ *info = -1;
+ } else if (! (wantv || lsame_(jobv, "N"))) {
+ *info = -2;
+ } else if (! (wantq || lsame_(jobq, "N"))) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*p < 0) {
+ *info = -6;
+ } else if (*lda < max(1,*m)) {
+ *info = -10;
+ } else if (*ldb < max(1,*p)) {
+ *info = -12;
+ } else if (*ldu < 1 || wantu && *ldu < *m) {
+ *info = -16;
+ } else if (*ldv < 1 || wantv && *ldv < *p) {
+ *info = -18;
+ } else if (*ldq < 1 || wantq && *ldq < *n) {
+ *info = -20;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGGSVD", &i__1);
+ return 0;
+ }
+
+/* Compute the Frobenius norm of matrices A and B */
+
+ anorm = zlange_("1", m, n, &a[a_offset], lda, &rwork[1]);
+ bnorm = zlange_("1", p, n, &b[b_offset], ldb, &rwork[1]);
+
+/* Get machine precision and set up threshold for determining */
+/* the effective numerical rank of the matrices A and B. */
+
+ ulp = dlamch_("Precision");
+ unfl = dlamch_("Safe Minimum");
+ tola = max(*m,*n) * max(anorm,unfl) * ulp;
+ tolb = max(*p,*n) * max(bnorm,unfl) * ulp;
+
+ zggsvp_(jobu, jobv, jobq, m, p, n, &a[a_offset], lda, &b[b_offset], ldb, &
+ tola, &tolb, k, l, &u[u_offset], ldu, &v[v_offset], ldv, &q[
+ q_offset], ldq, &iwork[1], &rwork[1], &work[1], &work[*n + 1],
+ info);
+
+/* Compute the GSVD of two upper "triangular" matrices */
+
+ ztgsja_(jobu, jobv, jobq, m, p, n, k, l, &a[a_offset], lda, &b[b_offset],
+ ldb, &tola, &tolb, &alpha[1], &beta[1], &u[u_offset], ldu, &v[
+ v_offset], ldv, &q[q_offset], ldq, &work[1], &ncycle, info);
+
+/* Sort the singular values and store the pivot indices in IWORK */
+/* Copy ALPHA to RWORK, then sort ALPHA in RWORK */
+
+ dcopy_(n, &alpha[1], &c__1, &rwork[1], &c__1);
+/* Computing MIN */
+ i__1 = *l, i__2 = *m - *k;
+ ibnd = min(i__1,i__2);
+ i__1 = ibnd;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Scan for largest ALPHA(K+I) */
+
+ isub = i__;
+ smax = rwork[*k + i__];
+ i__2 = ibnd;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ temp = rwork[*k + j];
+ if (temp > smax) {
+ isub = j;
+ smax = temp;
+ }
+/* L10: */
+ }
+ if (isub != i__) {
+ rwork[*k + isub] = rwork[*k + i__];
+ rwork[*k + i__] = smax;
+ iwork[*k + i__] = *k + isub;
+ } else {
+ iwork[*k + i__] = *k + i__;
+ }
+/* L20: */
+ }
+
+ return 0;
+
+/* End of ZGGSVD */
+
+} /* zggsvd_ */
diff --git a/contrib/libs/clapack/zggsvp.c b/contrib/libs/clapack/zggsvp.c
new file mode 100644
index 0000000000..e323ba697d
--- /dev/null
+++ b/contrib/libs/clapack/zggsvp.c
@@ -0,0 +1,533 @@
+/* zggsvp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+
+/* Subroutine */ int zggsvp_(char *jobu, char *jobv, char *jobq, integer *m,
+ integer *p, integer *n, doublecomplex *a, integer *lda, doublecomplex
+ *b, integer *ldb, doublereal *tola, doublereal *tolb, integer *k,
+ integer *l, doublecomplex *u, integer *ldu, doublecomplex *v, integer
+ *ldv, doublecomplex *q, integer *ldq, integer *iwork, doublereal *
+ rwork, doublecomplex *tau, doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, u_dim1,
+ u_offset, v_dim1, v_offset, i__1, i__2, i__3;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j;
+ extern logical lsame_(char *, char *);
+ logical wantq, wantu, wantv;
+ extern /* Subroutine */ int zgeqr2_(integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *), zgerq2_(
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *), zung2r_(integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *), zunm2r_(char *, char *, integer *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), zunmr2_(char *, char *, integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *, integer *), xerbla_(
+ char *, integer *), zgeqpf_(integer *, integer *,
+ doublecomplex *, integer *, integer *, doublecomplex *,
+ doublecomplex *, doublereal *, integer *), zlacpy_(char *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *);
+ logical forwrd;
+ extern /* Subroutine */ int zlaset_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *), zlapmt_(logical *, integer *, integer *, doublecomplex *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGGSVP computes unitary matrices U, V and Q such that */
+
+/* N-K-L K L */
+/* U'*A*Q = K ( 0 A12 A13 ) if M-K-L >= 0; */
+/* L ( 0 0 A23 ) */
+/* M-K-L ( 0 0 0 ) */
+
+/* N-K-L K L */
+/* = K ( 0 A12 A13 ) if M-K-L < 0; */
+/* M-K ( 0 0 A23 ) */
+
+/* N-K-L K L */
+/* V'*B*Q = L ( 0 0 B13 ) */
+/* P-L ( 0 0 0 ) */
+
+/* where the K-by-K matrix A12 and L-by-L matrix B13 are nonsingular */
+/* upper triangular; A23 is L-by-L upper triangular if M-K-L >= 0, */
+/* otherwise A23 is (M-K)-by-L upper trapezoidal. K+L = the effective */
+/* numerical rank of the (M+P)-by-N matrix (A',B')'. Z' denotes the */
+/* conjugate transpose of Z. */
+
+/* This decomposition is the preprocessing step for computing the */
+/* Generalized Singular Value Decomposition (GSVD), see subroutine */
+/* ZGGSVD. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBU (input) CHARACTER*1 */
+/* = 'U': Unitary matrix U is computed; */
+/* = 'N': U is not computed. */
+
+/* JOBV (input) CHARACTER*1 */
+/* = 'V': Unitary matrix V is computed; */
+/* = 'N': V is not computed. */
+
+/* JOBQ (input) CHARACTER*1 */
+/* = 'Q': Unitary matrix Q is computed; */
+/* = 'N': Q is not computed. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* P (input) INTEGER */
+/* The number of rows of the matrix B. P >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrices A and B. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, A contains the triangular (or trapezoidal) matrix */
+/* described in the Purpose section. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,N) */
+/* On entry, the P-by-N matrix B. */
+/* On exit, B contains the triangular matrix described in */
+/* the Purpose section. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,P). */
+
+/* TOLA (input) DOUBLE PRECISION */
+/* TOLB (input) DOUBLE PRECISION */
+/* TOLA and TOLB are the thresholds to determine the effective */
+/* numerical rank of matrix B and a subblock of A. Generally, */
+/* they are set to */
+/* TOLA = MAX(M,N)*norm(A)*MAZHEPS, */
+/* TOLB = MAX(P,N)*norm(B)*MAZHEPS. */
+/* The size of TOLA and TOLB may affect the size of backward */
+/* errors of the decomposition. */
+
+/* K (output) INTEGER */
+/* L (output) INTEGER */
+/* On exit, K and L specify the dimension of the subblocks */
+/* described in Purpose section. */
+/* K + L = effective numerical rank of (A',B')'. */
+
+/* U (output) COMPLEX*16 array, dimension (LDU,M) */
+/* If JOBU = 'U', U contains the unitary matrix U. */
+/* If JOBU = 'N', U is not referenced. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= max(1,M) if */
+/* JOBU = 'U'; LDU >= 1 otherwise. */
+
+/* V (output) COMPLEX*16 array, dimension (LDV,P) */
+/* If JOBV = 'V', V contains the unitary matrix V. */
+/* If JOBV = 'N', V is not referenced. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. LDV >= max(1,P) if */
+/* JOBV = 'V'; LDV >= 1 otherwise. */
+
+/* Q (output) COMPLEX*16 array, dimension (LDQ,N) */
+/* If JOBQ = 'Q', Q contains the unitary matrix Q. */
+/* If JOBQ = 'N', Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N) if */
+/* JOBQ = 'Q'; LDQ >= 1 otherwise. */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (2*N) */
+
+/* TAU (workspace) COMPLEX*16 array, dimension (N) */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (max(3*N,M,P)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* The subroutine uses LAPACK subroutine ZGEQPF for the QR factorization */
+/* with column pivoting to detect the effective numerical rank of the */
+/* a matrix. It may be replaced by a better rank determination strategy. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --iwork;
+ --rwork;
+ --tau;
+ --work;
+
+ /* Function Body */
+ wantu = lsame_(jobu, "U");
+ wantv = lsame_(jobv, "V");
+ wantq = lsame_(jobq, "Q");
+ forwrd = TRUE_;
+
+ *info = 0;
+ if (! (wantu || lsame_(jobu, "N"))) {
+ *info = -1;
+ } else if (! (wantv || lsame_(jobv, "N"))) {
+ *info = -2;
+ } else if (! (wantq || lsame_(jobq, "N"))) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*p < 0) {
+ *info = -5;
+ } else if (*n < 0) {
+ *info = -6;
+ } else if (*lda < max(1,*m)) {
+ *info = -8;
+ } else if (*ldb < max(1,*p)) {
+ *info = -10;
+ } else if (*ldu < 1 || wantu && *ldu < *m) {
+ *info = -16;
+ } else if (*ldv < 1 || wantv && *ldv < *p) {
+ *info = -18;
+ } else if (*ldq < 1 || wantq && *ldq < *n) {
+ *info = -20;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGGSVP", &i__1);
+ return 0;
+ }
+
+/* QR with column pivoting of B: B*P = V*( S11 S12 ) */
+/* ( 0 0 ) */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ iwork[i__] = 0;
+/* L10: */
+ }
+ zgeqpf_(p, n, &b[b_offset], ldb, &iwork[1], &tau[1], &work[1], &rwork[1],
+ info);
+
+/* Update A := A*P */
+
+ zlapmt_(&forwrd, m, n, &a[a_offset], lda, &iwork[1]);
+
+/* Determine the effective rank of matrix B. */
+
+ *l = 0;
+ i__1 = min(*p,*n);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * b_dim1;
+ if ((d__1 = b[i__2].r, abs(d__1)) + (d__2 = d_imag(&b[i__ + i__ *
+ b_dim1]), abs(d__2)) > *tolb) {
+ ++(*l);
+ }
+/* L20: */
+ }
+
+ if (wantv) {
+
+/* Copy the details of V, and form V. */
+
+ zlaset_("Full", p, p, &c_b1, &c_b1, &v[v_offset], ldv);
+ if (*p > 1) {
+ i__1 = *p - 1;
+ zlacpy_("Lower", &i__1, n, &b[b_dim1 + 2], ldb, &v[v_dim1 + 2],
+ ldv);
+ }
+ i__1 = min(*p,*n);
+ zung2r_(p, p, &i__1, &v[v_offset], ldv, &tau[1], &work[1], info);
+ }
+
+/* Clean up B */
+
+ i__1 = *l - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *l;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0., b[i__3].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+ if (*p > *l) {
+ i__1 = *p - *l;
+ zlaset_("Full", &i__1, n, &c_b1, &c_b1, &b[*l + 1 + b_dim1], ldb);
+ }
+
+ if (wantq) {
+
+/* Set Q = I and Update Q := Q*P */
+
+ zlaset_("Full", n, n, &c_b1, &c_b2, &q[q_offset], ldq);
+ zlapmt_(&forwrd, n, n, &q[q_offset], ldq, &iwork[1]);
+ }
+
+ if (*p >= *l && *n != *l) {
+
+/* RQ factorization of ( S11 S12 ) = ( 0 S12 )*Z */
+
+ zgerq2_(l, n, &b[b_offset], ldb, &tau[1], &work[1], info);
+
+/* Update A := A*Z' */
+
+ zunmr2_("Right", "Conjugate transpose", m, n, l, &b[b_offset], ldb, &
+ tau[1], &a[a_offset], lda, &work[1], info);
+ if (wantq) {
+
+/* Update Q := Q*Z' */
+
+ zunmr2_("Right", "Conjugate transpose", n, n, l, &b[b_offset],
+ ldb, &tau[1], &q[q_offset], ldq, &work[1], info);
+ }
+
+/* Clean up B */
+
+ i__1 = *n - *l;
+ zlaset_("Full", l, &i__1, &c_b1, &c_b1, &b[b_offset], ldb);
+ i__1 = *n;
+ for (j = *n - *l + 1; j <= i__1; ++j) {
+ i__2 = *l;
+ for (i__ = j - *n + *l + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0., b[i__3].i = 0.;
+/* L50: */
+ }
+/* L60: */
+ }
+
+ }
+
+/* Let N-L L */
+/* A = ( A11 A12 ) M, */
+
+/* then the following does the complete QR decomposition of A11: */
+
+/* A11 = U*( 0 T12 )*P1' */
+/* ( 0 0 ) */
+
+ i__1 = *n - *l;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ iwork[i__] = 0;
+/* L70: */
+ }
+ i__1 = *n - *l;
+ zgeqpf_(m, &i__1, &a[a_offset], lda, &iwork[1], &tau[1], &work[1], &rwork[
+ 1], info);
+
+/* Determine the effective rank of A11 */
+
+ *k = 0;
+/* Computing MIN */
+ i__2 = *m, i__3 = *n - *l;
+ i__1 = min(i__2,i__3);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ if ((d__1 = a[i__2].r, abs(d__1)) + (d__2 = d_imag(&a[i__ + i__ *
+ a_dim1]), abs(d__2)) > *tola) {
+ ++(*k);
+ }
+/* L80: */
+ }
+
+/* Update A12 := U'*A12, where A12 = A( 1:M, N-L+1:N ) */
+
+/* Computing MIN */
+ i__2 = *m, i__3 = *n - *l;
+ i__1 = min(i__2,i__3);
+ zunm2r_("Left", "Conjugate transpose", m, l, &i__1, &a[a_offset], lda, &
+ tau[1], &a[(*n - *l + 1) * a_dim1 + 1], lda, &work[1], info);
+
+ if (wantu) {
+
+/* Copy the details of U, and form U */
+
+ zlaset_("Full", m, m, &c_b1, &c_b1, &u[u_offset], ldu);
+ if (*m > 1) {
+ i__1 = *m - 1;
+ i__2 = *n - *l;
+ zlacpy_("Lower", &i__1, &i__2, &a[a_dim1 + 2], lda, &u[u_dim1 + 2]
+, ldu);
+ }
+/* Computing MIN */
+ i__2 = *m, i__3 = *n - *l;
+ i__1 = min(i__2,i__3);
+ zung2r_(m, m, &i__1, &u[u_offset], ldu, &tau[1], &work[1], info);
+ }
+
+ if (wantq) {
+
+/* Update Q( 1:N, 1:N-L ) = Q( 1:N, 1:N-L )*P1 */
+
+ i__1 = *n - *l;
+ zlapmt_(&forwrd, n, &i__1, &q[q_offset], ldq, &iwork[1]);
+ }
+
+/* Clean up A: set the strictly lower triangular part of */
+/* A(1:K, 1:K) = 0, and A( K+1:M, 1:N-L ) = 0. */
+
+ i__1 = *k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+/* L90: */
+ }
+/* L100: */
+ }
+ if (*m > *k) {
+ i__1 = *m - *k;
+ i__2 = *n - *l;
+ zlaset_("Full", &i__1, &i__2, &c_b1, &c_b1, &a[*k + 1 + a_dim1], lda);
+ }
+
+ if (*n - *l > *k) {
+
+/* RQ factorization of ( T11 T12 ) = ( 0 T12 )*Z1 */
+
+ i__1 = *n - *l;
+ zgerq2_(k, &i__1, &a[a_offset], lda, &tau[1], &work[1], info);
+
+ if (wantq) {
+
+/* Update Q( 1:N,1:N-L ) = Q( 1:N,1:N-L )*Z1' */
+
+ i__1 = *n - *l;
+ zunmr2_("Right", "Conjugate transpose", n, &i__1, k, &a[a_offset],
+ lda, &tau[1], &q[q_offset], ldq, &work[1], info);
+ }
+
+/* Clean up A */
+
+ i__1 = *n - *l - *k;
+ zlaset_("Full", k, &i__1, &c_b1, &c_b1, &a[a_offset], lda);
+ i__1 = *n - *l;
+ for (j = *n - *l - *k + 1; j <= i__1; ++j) {
+ i__2 = *k;
+ for (i__ = j - *n + *l + *k + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+/* L110: */
+ }
+/* L120: */
+ }
+
+ }
+
+ if (*m > *k) {
+
+/* QR factorization of A( K+1:M,N-L+1:N ) */
+
+ i__1 = *m - *k;
+ zgeqr2_(&i__1, l, &a[*k + 1 + (*n - *l + 1) * a_dim1], lda, &tau[1], &
+ work[1], info);
+
+ if (wantu) {
+
+/* Update U(:,K+1:M) := U(:,K+1:M)*U1 */
+
+ i__1 = *m - *k;
+/* Computing MIN */
+ i__3 = *m - *k;
+ i__2 = min(i__3,*l);
+ zunm2r_("Right", "No transpose", m, &i__1, &i__2, &a[*k + 1 + (*n
+ - *l + 1) * a_dim1], lda, &tau[1], &u[(*k + 1) * u_dim1 +
+ 1], ldu, &work[1], info);
+ }
+
+/* Clean up */
+
+ i__1 = *n;
+ for (j = *n - *l + 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j - *n + *k + *l + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+/* L130: */
+ }
+/* L140: */
+ }
+
+ }
+
+ return 0;
+
+/* End of ZGGSVP */
+
+} /* zggsvp_ */
diff --git a/contrib/libs/clapack/zgtcon.c b/contrib/libs/clapack/zgtcon.c
new file mode 100644
index 0000000000..a7607fdba9
--- /dev/null
+++ b/contrib/libs/clapack/zgtcon.c
@@ -0,0 +1,209 @@
+/* zgtcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zgtcon_(char *norm, integer *n, doublecomplex *dl,
+ doublecomplex *d__, doublecomplex *du, doublecomplex *du2, integer *
+ ipiv, doublereal *anorm, doublereal *rcond, doublecomplex *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__, kase, kase1;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int zlacn2_(integer *, doublecomplex *,
+ doublecomplex *, doublereal *, integer *, integer *), xerbla_(
+ char *, integer *);
+ doublereal ainvnm;
+ logical onenrm;
+ extern /* Subroutine */ int zgttrs_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, doublecomplex *, doublecomplex *
+, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGTCON estimates the reciprocal of the condition number of a complex */
+/* tridiagonal matrix A using the LU factorization as computed by */
+/* ZGTTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies whether the 1-norm condition number or the */
+/* infinity-norm condition number is required: */
+/* = '1' or 'O': 1-norm; */
+/* = 'I': Infinity-norm. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* DL (input) COMPLEX*16 array, dimension (N-1) */
+/* The (n-1) multipliers that define the matrix L from the */
+/* LU factorization of A as computed by ZGTTRF. */
+
+/* D (input) COMPLEX*16 array, dimension (N) */
+/* The n diagonal elements of the upper triangular matrix U from */
+/* the LU factorization of A. */
+
+/* DU (input) COMPLEX*16 array, dimension (N-1) */
+/* The (n-1) elements of the first superdiagonal of U. */
+
+/* DU2 (input) COMPLEX*16 array, dimension (N-2) */
+/* The (n-2) elements of the second superdiagonal of U. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= n, row i of the matrix was */
+/* interchanged with row IPIV(i). IPIV(i) will always be either */
+/* i or i+1; IPIV(i) = i indicates a row interchange was not */
+/* required. */
+
+/* ANORM (input) DOUBLE PRECISION */
+/* If NORM = '1' or 'O', the 1-norm of the original matrix A. */
+/* If NORM = 'I', the infinity-norm of the original matrix A. */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments. */
+
+ /* Parameter adjustments */
+ --work;
+ --ipiv;
+ --du2;
+ --du;
+ --d__;
+ --dl;
+
+ /* Function Body */
+ *info = 0;
+ onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O");
+ if (! onenrm && ! lsame_(norm, "I")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*anorm < 0.) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGTCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.;
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ } else if (*anorm == 0.) {
+ return 0;
+ }
+
+/* Check that D(1:N) is non-zero. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ if (d__[i__2].r == 0. && d__[i__2].i == 0.) {
+ return 0;
+ }
+/* L10: */
+ }
+
+ ainvnm = 0.;
+ if (onenrm) {
+ kase1 = 1;
+ } else {
+ kase1 = 2;
+ }
+ kase = 0;
+L20:
+ zlacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (kase == kase1) {
+
+/* Multiply by inv(U)*inv(L). */
+
+ zgttrs_("No transpose", n, &c__1, &dl[1], &d__[1], &du[1], &du2[1]
+, &ipiv[1], &work[1], n, info);
+ } else {
+
+/* Multiply by inv(L')*inv(U'). */
+
+ zgttrs_("Conjugate transpose", n, &c__1, &dl[1], &d__[1], &du[1],
+ &du2[1], &ipiv[1], &work[1], n, info);
+ }
+ goto L20;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / ainvnm / *anorm;
+ }
+
+ return 0;
+
+/* End of ZGTCON */
+
+} /* zgtcon_ */
diff --git a/contrib/libs/clapack/zgtrfs.c b/contrib/libs/clapack/zgtrfs.c
new file mode 100644
index 0000000000..53a833783b
--- /dev/null
+++ b/contrib/libs/clapack/zgtrfs.c
@@ -0,0 +1,553 @@
+/* zgtrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b18 = -1.;
+static doublereal c_b19 = 1.;
+static doublecomplex c_b26 = {1.,0.};
+
+/* Subroutine */ int zgtrfs_(char *trans, integer *n, integer *nrhs,
+ doublecomplex *dl, doublecomplex *d__, doublecomplex *du,
+ doublecomplex *dlf, doublecomplex *df, doublecomplex *duf,
+ doublecomplex *du2, integer *ipiv, doublecomplex *b, integer *ldb,
+ doublecomplex *x, integer *ldx, doublereal *ferr, doublereal *berr,
+ doublecomplex *work, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6, i__7, i__8, i__9;
+ doublereal d__1, d__2, d__3, d__4, d__5, d__6, d__7, d__8, d__9, d__10,
+ d__11, d__12, d__13, d__14;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal s;
+ integer nz;
+ doublereal eps;
+ integer kase;
+ doublereal safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3], count;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zaxpy_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), zlacn2_(
+ integer *, doublecomplex *, doublecomplex *, doublereal *,
+ integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *), zlagtm_(
+ char *, integer *, integer *, doublereal *, doublecomplex *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *,
+ doublereal *, doublecomplex *, integer *);
+ logical notran;
+ char transn[1], transt[1];
+ doublereal lstres;
+ extern /* Subroutine */ int zgttrs_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, doublecomplex *, doublecomplex *
+, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGTRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is tridiagonal, and provides */
+/* error bounds and backward error estimates for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* DL (input) COMPLEX*16 array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of A. */
+
+/* D (input) COMPLEX*16 array, dimension (N) */
+/* The diagonal elements of A. */
+
+/* DU (input) COMPLEX*16 array, dimension (N-1) */
+/* The (n-1) superdiagonal elements of A. */
+
+/* DLF (input) COMPLEX*16 array, dimension (N-1) */
+/* The (n-1) multipliers that define the matrix L from the */
+/* LU factorization of A as computed by ZGTTRF. */
+
+/* DF (input) COMPLEX*16 array, dimension (N) */
+/* The n diagonal elements of the upper triangular matrix U from */
+/* the LU factorization of A. */
+
+/* DUF (input) COMPLEX*16 array, dimension (N-1) */
+/* The (n-1) elements of the first superdiagonal of U. */
+
+/* DU2 (input) COMPLEX*16 array, dimension (N-2) */
+/* The (n-2) elements of the second superdiagonal of U. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= n, row i of the matrix was */
+/* interchanged with row IPIV(i). IPIV(i) will always be either */
+/* i or i+1; IPIV(i) = i indicates a row interchange was not */
+/* required. */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by ZGTTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --dl;
+ --d__;
+ --du;
+ --dlf;
+ --df;
+ --duf;
+ --du2;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "T") && ! lsame_(
+ trans, "C")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -13;
+ } else if (*ldx < max(1,*n)) {
+ *info = -15;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGTRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+ if (notran) {
+ *(unsigned char *)transn = 'N';
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transn = 'C';
+ *(unsigned char *)transt = 'N';
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = 4;
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - op(A) * X, */
+/* where op(A) = A, A**T, or A**H, depending on TRANS. */
+
+ zcopy_(n, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+ zlagtm_(trans, n, &c__1, &c_b18, &dl[1], &d__[1], &du[1], &x[j *
+ x_dim1 + 1], ldx, &c_b19, &work[1], n);
+
+/* Compute abs(op(A))*abs(x) + abs(b) for use in the backward */
+/* error bound. */
+
+ if (notran) {
+ if (*n == 1) {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * x_dim1 + 1;
+ rwork[1] = (d__1 = b[i__2].r, abs(d__1)) + (d__2 = d_imag(&b[
+ j * b_dim1 + 1]), abs(d__2)) + ((d__3 = d__[1].r, abs(
+ d__3)) + (d__4 = d_imag(&d__[1]), abs(d__4))) * ((
+ d__5 = x[i__3].r, abs(d__5)) + (d__6 = d_imag(&x[j *
+ x_dim1 + 1]), abs(d__6)));
+ } else {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * x_dim1 + 1;
+ i__4 = j * x_dim1 + 2;
+ rwork[1] = (d__1 = b[i__2].r, abs(d__1)) + (d__2 = d_imag(&b[
+ j * b_dim1 + 1]), abs(d__2)) + ((d__3 = d__[1].r, abs(
+ d__3)) + (d__4 = d_imag(&d__[1]), abs(d__4))) * ((
+ d__5 = x[i__3].r, abs(d__5)) + (d__6 = d_imag(&x[j *
+ x_dim1 + 1]), abs(d__6))) + ((d__7 = du[1].r, abs(
+ d__7)) + (d__8 = d_imag(&du[1]), abs(d__8))) * ((d__9
+ = x[i__4].r, abs(d__9)) + (d__10 = d_imag(&x[j *
+ x_dim1 + 2]), abs(d__10)));
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ - 1;
+ i__5 = i__ - 1 + j * x_dim1;
+ i__6 = i__;
+ i__7 = i__ + j * x_dim1;
+ i__8 = i__;
+ i__9 = i__ + 1 + j * x_dim1;
+ rwork[i__] = (d__1 = b[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&b[i__ + j * b_dim1]), abs(d__2)) + ((d__3
+ = dl[i__4].r, abs(d__3)) + (d__4 = d_imag(&dl[i__
+ - 1]), abs(d__4))) * ((d__5 = x[i__5].r, abs(d__5)
+ ) + (d__6 = d_imag(&x[i__ - 1 + j * x_dim1]), abs(
+ d__6))) + ((d__7 = d__[i__6].r, abs(d__7)) + (
+ d__8 = d_imag(&d__[i__]), abs(d__8))) * ((d__9 =
+ x[i__7].r, abs(d__9)) + (d__10 = d_imag(&x[i__ +
+ j * x_dim1]), abs(d__10))) + ((d__11 = du[i__8].r,
+ abs(d__11)) + (d__12 = d_imag(&du[i__]), abs(
+ d__12))) * ((d__13 = x[i__9].r, abs(d__13)) + (
+ d__14 = d_imag(&x[i__ + 1 + j * x_dim1]), abs(
+ d__14)));
+/* L30: */
+ }
+ i__2 = *n + j * b_dim1;
+ i__3 = *n - 1;
+ i__4 = *n - 1 + j * x_dim1;
+ i__5 = *n;
+ i__6 = *n + j * x_dim1;
+ rwork[*n] = (d__1 = b[i__2].r, abs(d__1)) + (d__2 = d_imag(&b[
+ *n + j * b_dim1]), abs(d__2)) + ((d__3 = dl[i__3].r,
+ abs(d__3)) + (d__4 = d_imag(&dl[*n - 1]), abs(d__4)))
+ * ((d__5 = x[i__4].r, abs(d__5)) + (d__6 = d_imag(&x[*
+ n - 1 + j * x_dim1]), abs(d__6))) + ((d__7 = d__[i__5]
+ .r, abs(d__7)) + (d__8 = d_imag(&d__[*n]), abs(d__8)))
+ * ((d__9 = x[i__6].r, abs(d__9)) + (d__10 = d_imag(&
+ x[*n + j * x_dim1]), abs(d__10)));
+ }
+ } else {
+ if (*n == 1) {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * x_dim1 + 1;
+ rwork[1] = (d__1 = b[i__2].r, abs(d__1)) + (d__2 = d_imag(&b[
+ j * b_dim1 + 1]), abs(d__2)) + ((d__3 = d__[1].r, abs(
+ d__3)) + (d__4 = d_imag(&d__[1]), abs(d__4))) * ((
+ d__5 = x[i__3].r, abs(d__5)) + (d__6 = d_imag(&x[j *
+ x_dim1 + 1]), abs(d__6)));
+ } else {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * x_dim1 + 1;
+ i__4 = j * x_dim1 + 2;
+ rwork[1] = (d__1 = b[i__2].r, abs(d__1)) + (d__2 = d_imag(&b[
+ j * b_dim1 + 1]), abs(d__2)) + ((d__3 = d__[1].r, abs(
+ d__3)) + (d__4 = d_imag(&d__[1]), abs(d__4))) * ((
+ d__5 = x[i__3].r, abs(d__5)) + (d__6 = d_imag(&x[j *
+ x_dim1 + 1]), abs(d__6))) + ((d__7 = dl[1].r, abs(
+ d__7)) + (d__8 = d_imag(&dl[1]), abs(d__8))) * ((d__9
+ = x[i__4].r, abs(d__9)) + (d__10 = d_imag(&x[j *
+ x_dim1 + 2]), abs(d__10)));
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ - 1;
+ i__5 = i__ - 1 + j * x_dim1;
+ i__6 = i__;
+ i__7 = i__ + j * x_dim1;
+ i__8 = i__;
+ i__9 = i__ + 1 + j * x_dim1;
+ rwork[i__] = (d__1 = b[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&b[i__ + j * b_dim1]), abs(d__2)) + ((d__3
+ = du[i__4].r, abs(d__3)) + (d__4 = d_imag(&du[i__
+ - 1]), abs(d__4))) * ((d__5 = x[i__5].r, abs(d__5)
+ ) + (d__6 = d_imag(&x[i__ - 1 + j * x_dim1]), abs(
+ d__6))) + ((d__7 = d__[i__6].r, abs(d__7)) + (
+ d__8 = d_imag(&d__[i__]), abs(d__8))) * ((d__9 =
+ x[i__7].r, abs(d__9)) + (d__10 = d_imag(&x[i__ +
+ j * x_dim1]), abs(d__10))) + ((d__11 = dl[i__8].r,
+ abs(d__11)) + (d__12 = d_imag(&dl[i__]), abs(
+ d__12))) * ((d__13 = x[i__9].r, abs(d__13)) + (
+ d__14 = d_imag(&x[i__ + 1 + j * x_dim1]), abs(
+ d__14)));
+/* L40: */
+ }
+ i__2 = *n + j * b_dim1;
+ i__3 = *n - 1;
+ i__4 = *n - 1 + j * x_dim1;
+ i__5 = *n;
+ i__6 = *n + j * x_dim1;
+ rwork[*n] = (d__1 = b[i__2].r, abs(d__1)) + (d__2 = d_imag(&b[
+ *n + j * b_dim1]), abs(d__2)) + ((d__3 = du[i__3].r,
+ abs(d__3)) + (d__4 = d_imag(&du[*n - 1]), abs(d__4)))
+ * ((d__5 = x[i__4].r, abs(d__5)) + (d__6 = d_imag(&x[*
+ n - 1 + j * x_dim1]), abs(d__6))) + ((d__7 = d__[i__5]
+ .r, abs(d__7)) + (d__8 = d_imag(&d__[*n]), abs(d__8)))
+ * ((d__9 = x[i__6].r, abs(d__9)) + (d__10 = d_imag(&
+ x[*n + j * x_dim1]), abs(d__10)));
+ }
+ }
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__3 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2))) / rwork[i__];
+ s = max(d__3,d__4);
+ } else {
+/* Computing MAX */
+ i__3 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = max(d__3,d__4);
+ }
+/* L50: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2. <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ zgttrs_(trans, n, &c__1, &dlf[1], &df[1], &duf[1], &du2[1], &ipiv[
+ 1], &work[1], n, info);
+ zaxpy_(n, &c_b26, &work[1], &c__1, &x[j * x_dim1 + 1], &c__1);
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(op(A)))* */
+/* ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(op(A)) is the inverse of op(A) */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(op(A))*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use ZLACN2 to estimate the infinity-norm of the matrix */
+/* inv(op(A)) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__3 = i__;
+ rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ ;
+ } else {
+ i__3 = i__;
+ rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ + safe1;
+ }
+/* L60: */
+ }
+
+ kase = 0;
+L70:
+ zlacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(op(A)**H). */
+
+ zgttrs_(transt, n, &c__1, &dlf[1], &df[1], &duf[1], &du2[1], &
+ ipiv[1], &work[1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ z__1.r = rwork[i__4] * work[i__5].r, z__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+/* L80: */
+ }
+ } else {
+
+/* Multiply by inv(op(A))*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ z__1.r = rwork[i__4] * work[i__5].r, z__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+/* L90: */
+ }
+ zgttrs_(transn, n, &c__1, &dlf[1], &df[1], &duf[1], &du2[1], &
+ ipiv[1], &work[1], n, info);
+ }
+ goto L70;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * x_dim1;
+ d__3 = lstres, d__4 = (d__1 = x[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&x[i__ + j * x_dim1]), abs(d__2));
+ lstres = max(d__3,d__4);
+/* L100: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L110: */
+ }
+
+ return 0;
+
+/* End of ZGTRFS */
+
+} /* zgtrfs_ */
diff --git a/contrib/libs/clapack/zgtsv.c b/contrib/libs/clapack/zgtsv.c
new file mode 100644
index 0000000000..e1d9bc2be4
--- /dev/null
+++ b/contrib/libs/clapack/zgtsv.c
@@ -0,0 +1,288 @@
+/* zgtsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zgtsv_(integer *n, integer *nrhs, doublecomplex *dl,
+ doublecomplex *d__, doublecomplex *du, doublecomplex *b, integer *ldb,
+ integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1, z__2, z__3, z__4, z__5;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer j, k;
+ doublecomplex temp, mult;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGTSV solves the equation */
+
+/* A*X = B, */
+
+/* where A is an N-by-N tridiagonal matrix, by Gaussian elimination with */
+/* partial pivoting. */
+
+/* Note that the equation A'*X = B may be solved by interchanging the */
+/* order of the arguments DU and DL. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* DL (input/output) COMPLEX*16 array, dimension (N-1) */
+/* On entry, DL must contain the (n-1) subdiagonal elements of */
+/* A. */
+/* On exit, DL is overwritten by the (n-2) elements of the */
+/* second superdiagonal of the upper triangular matrix U from */
+/* the LU factorization of A, in DL(1), ..., DL(n-2). */
+
+/* D (input/output) COMPLEX*16 array, dimension (N) */
+/* On entry, D must contain the diagonal elements of A. */
+/* On exit, D is overwritten by the n diagonal elements of U. */
+
+/* DU (input/output) COMPLEX*16 array, dimension (N-1) */
+/* On entry, DU must contain the (n-1) superdiagonal elements */
+/* of A. */
+/* On exit, DU is overwritten by the (n-1) elements of the first */
+/* superdiagonal of U. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, U(i,i) is exactly zero, and the solution */
+/* has not been computed. The factorization has not been */
+/* completed unless i = N. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --dl;
+ --d__;
+ --du;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*nrhs < 0) {
+ *info = -2;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGTSV ", &i__1);
+ return 0;
+ }
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ i__1 = *n - 1;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k;
+ if (dl[i__2].r == 0. && dl[i__2].i == 0.) {
+
+/* Subdiagonal is zero, no elimination is required. */
+
+ i__2 = k;
+ if (d__[i__2].r == 0. && d__[i__2].i == 0.) {
+
+/* Diagonal is zero: set INFO = K and return; a unique */
+/* solution can not be found. */
+
+ *info = k;
+ return 0;
+ }
+ } else /* if(complicated condition) */ {
+ i__2 = k;
+ i__3 = k;
+ if ((d__1 = d__[i__2].r, abs(d__1)) + (d__2 = d_imag(&d__[k]),
+ abs(d__2)) >= (d__3 = dl[i__3].r, abs(d__3)) + (d__4 =
+ d_imag(&dl[k]), abs(d__4))) {
+
+/* No row interchange required */
+
+ z_div(&z__1, &dl[k], &d__[k]);
+ mult.r = z__1.r, mult.i = z__1.i;
+ i__2 = k + 1;
+ i__3 = k + 1;
+ i__4 = k;
+ z__2.r = mult.r * du[i__4].r - mult.i * du[i__4].i, z__2.i =
+ mult.r * du[i__4].i + mult.i * du[i__4].r;
+ z__1.r = d__[i__3].r - z__2.r, z__1.i = d__[i__3].i - z__2.i;
+ d__[i__2].r = z__1.r, d__[i__2].i = z__1.i;
+ i__2 = *nrhs;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = k + 1 + j * b_dim1;
+ i__4 = k + 1 + j * b_dim1;
+ i__5 = k + j * b_dim1;
+ z__2.r = mult.r * b[i__5].r - mult.i * b[i__5].i, z__2.i =
+ mult.r * b[i__5].i + mult.i * b[i__5].r;
+ z__1.r = b[i__4].r - z__2.r, z__1.i = b[i__4].i - z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L10: */
+ }
+ if (k < *n - 1) {
+ i__2 = k;
+ dl[i__2].r = 0., dl[i__2].i = 0.;
+ }
+ } else {
+
+/* Interchange rows K and K+1 */
+
+ z_div(&z__1, &d__[k], &dl[k]);
+ mult.r = z__1.r, mult.i = z__1.i;
+ i__2 = k;
+ i__3 = k;
+ d__[i__2].r = dl[i__3].r, d__[i__2].i = dl[i__3].i;
+ i__2 = k + 1;
+ temp.r = d__[i__2].r, temp.i = d__[i__2].i;
+ i__2 = k + 1;
+ i__3 = k;
+ z__2.r = mult.r * temp.r - mult.i * temp.i, z__2.i = mult.r *
+ temp.i + mult.i * temp.r;
+ z__1.r = du[i__3].r - z__2.r, z__1.i = du[i__3].i - z__2.i;
+ d__[i__2].r = z__1.r, d__[i__2].i = z__1.i;
+ if (k < *n - 1) {
+ i__2 = k;
+ i__3 = k + 1;
+ dl[i__2].r = du[i__3].r, dl[i__2].i = du[i__3].i;
+ i__2 = k + 1;
+ z__2.r = -mult.r, z__2.i = -mult.i;
+ i__3 = k;
+ z__1.r = z__2.r * dl[i__3].r - z__2.i * dl[i__3].i,
+ z__1.i = z__2.r * dl[i__3].i + z__2.i * dl[i__3]
+ .r;
+ du[i__2].r = z__1.r, du[i__2].i = z__1.i;
+ }
+ i__2 = k;
+ du[i__2].r = temp.r, du[i__2].i = temp.i;
+ i__2 = *nrhs;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = k + j * b_dim1;
+ temp.r = b[i__3].r, temp.i = b[i__3].i;
+ i__3 = k + j * b_dim1;
+ i__4 = k + 1 + j * b_dim1;
+ b[i__3].r = b[i__4].r, b[i__3].i = b[i__4].i;
+ i__3 = k + 1 + j * b_dim1;
+ i__4 = k + 1 + j * b_dim1;
+ z__2.r = mult.r * b[i__4].r - mult.i * b[i__4].i, z__2.i =
+ mult.r * b[i__4].i + mult.i * b[i__4].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i - z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L20: */
+ }
+ }
+ }
+/* L30: */
+ }
+ i__1 = *n;
+ if (d__[i__1].r == 0. && d__[i__1].i == 0.) {
+ *info = *n;
+ return 0;
+ }
+
+/* Back solve with the matrix U from the factorization. */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n + j * b_dim1;
+ z_div(&z__1, &b[*n + j * b_dim1], &d__[*n]);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ if (*n > 1) {
+ i__2 = *n - 1 + j * b_dim1;
+ i__3 = *n - 1 + j * b_dim1;
+ i__4 = *n - 1;
+ i__5 = *n + j * b_dim1;
+ z__3.r = du[i__4].r * b[i__5].r - du[i__4].i * b[i__5].i, z__3.i =
+ du[i__4].r * b[i__5].i + du[i__4].i * b[i__5].r;
+ z__2.r = b[i__3].r - z__3.r, z__2.i = b[i__3].i - z__3.i;
+ z_div(&z__1, &z__2, &d__[*n - 1]);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ }
+ for (k = *n - 2; k >= 1; --k) {
+ i__2 = k + j * b_dim1;
+ i__3 = k + j * b_dim1;
+ i__4 = k;
+ i__5 = k + 1 + j * b_dim1;
+ z__4.r = du[i__4].r * b[i__5].r - du[i__4].i * b[i__5].i, z__4.i =
+ du[i__4].r * b[i__5].i + du[i__4].i * b[i__5].r;
+ z__3.r = b[i__3].r - z__4.r, z__3.i = b[i__3].i - z__4.i;
+ i__6 = k;
+ i__7 = k + 2 + j * b_dim1;
+ z__5.r = dl[i__6].r * b[i__7].r - dl[i__6].i * b[i__7].i, z__5.i =
+ dl[i__6].r * b[i__7].i + dl[i__6].i * b[i__7].r;
+ z__2.r = z__3.r - z__5.r, z__2.i = z__3.i - z__5.i;
+ z_div(&z__1, &z__2, &d__[k]);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L40: */
+ }
+/* L50: */
+ }
+
+ return 0;
+
+/* End of ZGTSV */
+
+} /* zgtsv_ */
diff --git a/contrib/libs/clapack/zgtsvx.c b/contrib/libs/clapack/zgtsvx.c
new file mode 100644
index 0000000000..aeffccd5ec
--- /dev/null
+++ b/contrib/libs/clapack/zgtsvx.c
@@ -0,0 +1,353 @@
+/* zgtsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zgtsvx_(char *fact, char *trans, integer *n, integer *
+ nrhs, doublecomplex *dl, doublecomplex *d__, doublecomplex *du,
+ doublecomplex *dlf, doublecomplex *df, doublecomplex *duf,
+ doublecomplex *du2, integer *ipiv, doublecomplex *b, integer *ldb,
+ doublecomplex *x, integer *ldx, doublereal *rcond, doublereal *ferr,
+ doublereal *berr, doublecomplex *work, doublereal *rwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1;
+
+ /* Local variables */
+ char norm[1];
+ extern logical lsame_(char *, char *);
+ doublereal anorm;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ extern doublereal dlamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern doublereal zlangt_(char *, integer *, doublecomplex *,
+ doublecomplex *, doublecomplex *);
+ logical notran;
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *),
+ zgtcon_(char *, integer *, doublecomplex *, doublecomplex *,
+ doublecomplex *, doublecomplex *, integer *, doublereal *,
+ doublereal *, doublecomplex *, integer *), zgtrfs_(char *,
+ integer *, integer *, doublecomplex *, doublecomplex *,
+ doublecomplex *, doublecomplex *, doublecomplex *, doublecomplex *
+, doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublereal *, doublereal *,
+ doublecomplex *, doublereal *, integer *), zgttrf_(
+ integer *, doublecomplex *, doublecomplex *, doublecomplex *,
+ doublecomplex *, integer *, integer *), zgttrs_(char *, integer *,
+ integer *, doublecomplex *, doublecomplex *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGTSVX uses the LU factorization to compute the solution to a complex */
+/* system of linear equations A * X = B, A**T * X = B, or A**H * X = B, */
+/* where A is a tridiagonal matrix of order N and X and B are N-by-NRHS */
+/* matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'N', the LU decomposition is used to factor the matrix A */
+/* as A = L * U, where L is a product of permutation and unit lower */
+/* bidiagonal matrices and U is upper triangular with nonzeros in */
+/* only the main diagonal and first two superdiagonals. */
+
+/* 2. If some U(i,i)=0, so that U is exactly singular, then the routine */
+/* returns with INFO = i. Otherwise, the factored form of A is used */
+/* to estimate the condition number of the matrix A. If the */
+/* reciprocal of the condition number is less than machine precision, */
+/* INFO = N+1 is returned as a warning, but the routine still goes on */
+/* to solve for X and compute error bounds as described below. */
+
+/* 3. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 4. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of A has been */
+/* supplied on entry. */
+/* = 'F': DLF, DF, DUF, DU2, and IPIV contain the factored form */
+/* of A; DL, D, DU, DLF, DF, DUF, DU2 and IPIV will not */
+/* be modified. */
+/* = 'N': The matrix will be copied to DLF, DF, and DUF */
+/* and factored. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* DL (input) COMPLEX*16 array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of A. */
+
+/* D (input) COMPLEX*16 array, dimension (N) */
+/* The n diagonal elements of A. */
+
+/* DU (input) COMPLEX*16 array, dimension (N-1) */
+/* The (n-1) superdiagonal elements of A. */
+
+/* DLF (input or output) COMPLEX*16 array, dimension (N-1) */
+/* If FACT = 'F', then DLF is an input argument and on entry */
+/* contains the (n-1) multipliers that define the matrix L from */
+/* the LU factorization of A as computed by ZGTTRF. */
+
+/* If FACT = 'N', then DLF is an output argument and on exit */
+/* contains the (n-1) multipliers that define the matrix L from */
+/* the LU factorization of A. */
+
+/* DF (input or output) COMPLEX*16 array, dimension (N) */
+/* If FACT = 'F', then DF is an input argument and on entry */
+/* contains the n diagonal elements of the upper triangular */
+/* matrix U from the LU factorization of A. */
+
+/* If FACT = 'N', then DF is an output argument and on exit */
+/* contains the n diagonal elements of the upper triangular */
+/* matrix U from the LU factorization of A. */
+
+/* DUF (input or output) COMPLEX*16 array, dimension (N-1) */
+/* If FACT = 'F', then DUF is an input argument and on entry */
+/* contains the (n-1) elements of the first superdiagonal of U. */
+
+/* If FACT = 'N', then DUF is an output argument and on exit */
+/* contains the (n-1) elements of the first superdiagonal of U. */
+
+/* DU2 (input or output) COMPLEX*16 array, dimension (N-2) */
+/* If FACT = 'F', then DU2 is an input argument and on entry */
+/* contains the (n-2) elements of the second superdiagonal of */
+/* U. */
+
+/* If FACT = 'N', then DU2 is an output argument and on exit */
+/* contains the (n-2) elements of the second superdiagonal of */
+/* U. */
+
+/* IPIV (input or output) INTEGER array, dimension (N) */
+/* If FACT = 'F', then IPIV is an input argument and on entry */
+/* contains the pivot indices from the LU factorization of A as */
+/* computed by ZGTTRF. */
+
+/* If FACT = 'N', then IPIV is an output argument and on exit */
+/* contains the pivot indices from the LU factorization of A; */
+/* row i of the matrix was interchanged with row IPIV(i). */
+/* IPIV(i) will always be either i or i+1; IPIV(i) = i indicates */
+/* a row interchange was not required. */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* The N-by-NRHS right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A. If RCOND is less than the machine precision (in */
+/* particular, if RCOND = 0), the matrix is singular to working */
+/* precision. This condition is indicated by a return code of */
+/* INFO > 0. */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: U(i,i) is exactly zero. The factorization */
+/* has not been completed unless i = N, but the */
+/* factor U is exactly singular, so the solution */
+/* and error bounds could not be computed. */
+/* RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --dl;
+ --d__;
+ --du;
+ --dlf;
+ --df;
+ --duf;
+ --du2;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ notran = lsame_(trans, "N");
+ if (! nofact && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -14;
+ } else if (*ldx < max(1,*n)) {
+ *info = -16;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGTSVX", &i__1);
+ return 0;
+ }
+
+ if (nofact) {
+
+/* Compute the LU factorization of A. */
+
+ zcopy_(n, &d__[1], &c__1, &df[1], &c__1);
+ if (*n > 1) {
+ i__1 = *n - 1;
+ zcopy_(&i__1, &dl[1], &c__1, &dlf[1], &c__1);
+ i__1 = *n - 1;
+ zcopy_(&i__1, &du[1], &c__1, &duf[1], &c__1);
+ }
+ zgttrf_(n, &dlf[1], &df[1], &duf[1], &du2[1], &ipiv[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ if (notran) {
+ *(unsigned char *)norm = '1';
+ } else {
+ *(unsigned char *)norm = 'I';
+ }
+ anorm = zlangt_(norm, n, &dl[1], &d__[1], &du[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ zgtcon_(norm, n, &dlf[1], &df[1], &duf[1], &du2[1], &ipiv[1], &anorm,
+ rcond, &work[1], info);
+
+/* Compute the solution vectors X. */
+
+ zlacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ zgttrs_(trans, n, nrhs, &dlf[1], &df[1], &duf[1], &du2[1], &ipiv[1], &x[
+ x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solutions and */
+/* compute error bounds and backward error estimates for them. */
+
+ zgtrfs_(trans, n, nrhs, &dl[1], &d__[1], &du[1], &dlf[1], &df[1], &duf[1],
+ &du2[1], &ipiv[1], &b[b_offset], ldb, &x[x_offset], ldx, &ferr[1]
+, &berr[1], &work[1], &rwork[1], info);
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < dlamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of ZGTSVX */
+
+} /* zgtsvx_ */
diff --git a/contrib/libs/clapack/zgttrf.c b/contrib/libs/clapack/zgttrf.c
new file mode 100644
index 0000000000..714ae0fac6
--- /dev/null
+++ b/contrib/libs/clapack/zgttrf.c
@@ -0,0 +1,275 @@
+/* zgttrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zgttrf_(integer *n, doublecomplex *dl, doublecomplex *
+ d__, doublecomplex *du, doublecomplex *du2, integer *ipiv, integer *
+ info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__;
+ doublecomplex fact, temp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGTTRF computes an LU factorization of a complex tridiagonal matrix A */
+/* using elimination with partial pivoting and row interchanges. */
+
+/* The factorization has the form */
+/* A = L * U */
+/* where L is a product of permutation and unit lower bidiagonal */
+/* matrices and U is upper triangular with nonzeros in only the main */
+/* diagonal and first two superdiagonals. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* DL (input/output) COMPLEX*16 array, dimension (N-1) */
+/* On entry, DL must contain the (n-1) sub-diagonal elements of */
+/* A. */
+
+/* On exit, DL is overwritten by the (n-1) multipliers that */
+/* define the matrix L from the LU factorization of A. */
+
+/* D (input/output) COMPLEX*16 array, dimension (N) */
+/* On entry, D must contain the diagonal elements of A. */
+
+/* On exit, D is overwritten by the n diagonal elements of the */
+/* upper triangular matrix U from the LU factorization of A. */
+
+/* DU (input/output) COMPLEX*16 array, dimension (N-1) */
+/* On entry, DU must contain the (n-1) super-diagonal elements */
+/* of A. */
+
+/* On exit, DU is overwritten by the (n-1) elements of the first */
+/* super-diagonal of U. */
+
+/* DU2 (output) COMPLEX*16 array, dimension (N-2) */
+/* On exit, DU2 is overwritten by the (n-2) elements of the */
+/* second super-diagonal of U. */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= n, row i of the matrix was */
+/* interchanged with row IPIV(i). IPIV(i) will always be either */
+/* i or i+1; IPIV(i) = i indicates a row interchange was not */
+/* required. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, U(k,k) is exactly zero. The factorization */
+/* has been completed, but the factor U is exactly */
+/* singular, and division by zero will occur if it is used */
+/* to solve a system of equations. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --ipiv;
+ --du2;
+ --du;
+ --d__;
+ --dl;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ i__1 = -(*info);
+ xerbla_("ZGTTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Initialize IPIV(i) = i and DU2(i) = 0 */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ipiv[i__] = i__;
+/* L10: */
+ }
+ i__1 = *n - 2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ du2[i__2].r = 0., du2[i__2].i = 0.;
+/* L20: */
+ }
+
+ i__1 = *n - 2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ if ((d__1 = d__[i__2].r, abs(d__1)) + (d__2 = d_imag(&d__[i__]), abs(
+ d__2)) >= (d__3 = dl[i__3].r, abs(d__3)) + (d__4 = d_imag(&dl[
+ i__]), abs(d__4))) {
+
+/* No row interchange required, eliminate DL(I) */
+
+ i__2 = i__;
+ if ((d__1 = d__[i__2].r, abs(d__1)) + (d__2 = d_imag(&d__[i__]),
+ abs(d__2)) != 0.) {
+ z_div(&z__1, &dl[i__], &d__[i__]);
+ fact.r = z__1.r, fact.i = z__1.i;
+ i__2 = i__;
+ dl[i__2].r = fact.r, dl[i__2].i = fact.i;
+ i__2 = i__ + 1;
+ i__3 = i__ + 1;
+ i__4 = i__;
+ z__2.r = fact.r * du[i__4].r - fact.i * du[i__4].i, z__2.i =
+ fact.r * du[i__4].i + fact.i * du[i__4].r;
+ z__1.r = d__[i__3].r - z__2.r, z__1.i = d__[i__3].i - z__2.i;
+ d__[i__2].r = z__1.r, d__[i__2].i = z__1.i;
+ }
+ } else {
+
+/* Interchange rows I and I+1, eliminate DL(I) */
+
+ z_div(&z__1, &d__[i__], &dl[i__]);
+ fact.r = z__1.r, fact.i = z__1.i;
+ i__2 = i__;
+ i__3 = i__;
+ d__[i__2].r = dl[i__3].r, d__[i__2].i = dl[i__3].i;
+ i__2 = i__;
+ dl[i__2].r = fact.r, dl[i__2].i = fact.i;
+ i__2 = i__;
+ temp.r = du[i__2].r, temp.i = du[i__2].i;
+ i__2 = i__;
+ i__3 = i__ + 1;
+ du[i__2].r = d__[i__3].r, du[i__2].i = d__[i__3].i;
+ i__2 = i__ + 1;
+ i__3 = i__ + 1;
+ z__2.r = fact.r * d__[i__3].r - fact.i * d__[i__3].i, z__2.i =
+ fact.r * d__[i__3].i + fact.i * d__[i__3].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i - z__2.i;
+ d__[i__2].r = z__1.r, d__[i__2].i = z__1.i;
+ i__2 = i__;
+ i__3 = i__ + 1;
+ du2[i__2].r = du[i__3].r, du2[i__2].i = du[i__3].i;
+ i__2 = i__ + 1;
+ z__2.r = -fact.r, z__2.i = -fact.i;
+ i__3 = i__ + 1;
+ z__1.r = z__2.r * du[i__3].r - z__2.i * du[i__3].i, z__1.i =
+ z__2.r * du[i__3].i + z__2.i * du[i__3].r;
+ du[i__2].r = z__1.r, du[i__2].i = z__1.i;
+ ipiv[i__] = i__ + 1;
+ }
+/* L30: */
+ }
+ if (*n > 1) {
+ i__ = *n - 1;
+ i__1 = i__;
+ i__2 = i__;
+ if ((d__1 = d__[i__1].r, abs(d__1)) + (d__2 = d_imag(&d__[i__]), abs(
+ d__2)) >= (d__3 = dl[i__2].r, abs(d__3)) + (d__4 = d_imag(&dl[
+ i__]), abs(d__4))) {
+ i__1 = i__;
+ if ((d__1 = d__[i__1].r, abs(d__1)) + (d__2 = d_imag(&d__[i__]),
+ abs(d__2)) != 0.) {
+ z_div(&z__1, &dl[i__], &d__[i__]);
+ fact.r = z__1.r, fact.i = z__1.i;
+ i__1 = i__;
+ dl[i__1].r = fact.r, dl[i__1].i = fact.i;
+ i__1 = i__ + 1;
+ i__2 = i__ + 1;
+ i__3 = i__;
+ z__2.r = fact.r * du[i__3].r - fact.i * du[i__3].i, z__2.i =
+ fact.r * du[i__3].i + fact.i * du[i__3].r;
+ z__1.r = d__[i__2].r - z__2.r, z__1.i = d__[i__2].i - z__2.i;
+ d__[i__1].r = z__1.r, d__[i__1].i = z__1.i;
+ }
+ } else {
+ z_div(&z__1, &d__[i__], &dl[i__]);
+ fact.r = z__1.r, fact.i = z__1.i;
+ i__1 = i__;
+ i__2 = i__;
+ d__[i__1].r = dl[i__2].r, d__[i__1].i = dl[i__2].i;
+ i__1 = i__;
+ dl[i__1].r = fact.r, dl[i__1].i = fact.i;
+ i__1 = i__;
+ temp.r = du[i__1].r, temp.i = du[i__1].i;
+ i__1 = i__;
+ i__2 = i__ + 1;
+ du[i__1].r = d__[i__2].r, du[i__1].i = d__[i__2].i;
+ i__1 = i__ + 1;
+ i__2 = i__ + 1;
+ z__2.r = fact.r * d__[i__2].r - fact.i * d__[i__2].i, z__2.i =
+ fact.r * d__[i__2].i + fact.i * d__[i__2].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i - z__2.i;
+ d__[i__1].r = z__1.r, d__[i__1].i = z__1.i;
+ ipiv[i__] = i__ + 1;
+ }
+ }
+
+/* Check for a zero on the diagonal of U. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ if ((d__1 = d__[i__2].r, abs(d__1)) + (d__2 = d_imag(&d__[i__]), abs(
+ d__2)) == 0.) {
+ *info = i__;
+ goto L50;
+ }
+/* L40: */
+ }
+L50:
+
+ return 0;
+
+/* End of ZGTTRF */
+
+} /* zgttrf_ */
diff --git a/contrib/libs/clapack/zgttrs.c b/contrib/libs/clapack/zgttrs.c
new file mode 100644
index 0000000000..f8130e22cd
--- /dev/null
+++ b/contrib/libs/clapack/zgttrs.c
@@ -0,0 +1,194 @@
+/* zgttrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zgttrs_(char *trans, integer *n, integer *nrhs,
+ doublecomplex *dl, doublecomplex *d__, doublecomplex *du,
+ doublecomplex *du2, integer *ipiv, doublecomplex *b, integer *ldb,
+ integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer j, jb, nb;
+ extern /* Subroutine */ int zgtts2_(integer *, integer *, integer *,
+ doublecomplex *, doublecomplex *, doublecomplex *, doublecomplex *
+, integer *, doublecomplex *, integer *), xerbla_(char *, integer
+ *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer itrans;
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGTTRS solves one of the systems of equations */
+/* A * X = B, A**T * X = B, or A**H * X = B, */
+/* with a tridiagonal matrix A using the LU factorization computed */
+/* by ZGTTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations. */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* DL (input) COMPLEX*16 array, dimension (N-1) */
+/* The (n-1) multipliers that define the matrix L from the */
+/* LU factorization of A. */
+
+/* D (input) COMPLEX*16 array, dimension (N) */
+/* The n diagonal elements of the upper triangular matrix U from */
+/* the LU factorization of A. */
+
+/* DU (input) COMPLEX*16 array, dimension (N-1) */
+/* The (n-1) elements of the first super-diagonal of U. */
+
+/* DU2 (input) COMPLEX*16 array, dimension (N-2) */
+/* The (n-2) elements of the second super-diagonal of U. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= n, row i of the matrix was */
+/* interchanged with row IPIV(i). IPIV(i) will always be either */
+/* i or i+1; IPIV(i) = i indicates a row interchange was not */
+/* required. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the matrix of right hand side vectors B. */
+/* On exit, B is overwritten by the solution vectors X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --dl;
+ --d__;
+ --du;
+ --du2;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ notran = *(unsigned char *)trans == 'N' || *(unsigned char *)trans == 'n';
+ if (! notran && ! (*(unsigned char *)trans == 'T' || *(unsigned char *)
+ trans == 't') && ! (*(unsigned char *)trans == 'C' || *(unsigned
+ char *)trans == 'c')) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(*n,1)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZGTTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+/* Decode TRANS */
+
+ if (notran) {
+ itrans = 0;
+ } else if (*(unsigned char *)trans == 'T' || *(unsigned char *)trans ==
+ 't') {
+ itrans = 1;
+ } else {
+ itrans = 2;
+ }
+
+/* Determine the number of right-hand sides to solve at a time. */
+
+ if (*nrhs == 1) {
+ nb = 1;
+ } else {
+/* Computing MAX */
+ i__1 = 1, i__2 = ilaenv_(&c__1, "ZGTTRS", trans, n, nrhs, &c_n1, &
+ c_n1);
+ nb = max(i__1,i__2);
+ }
+
+ if (nb >= *nrhs) {
+ zgtts2_(&itrans, n, nrhs, &dl[1], &d__[1], &du[1], &du2[1], &ipiv[1],
+ &b[b_offset], ldb);
+ } else {
+ i__1 = *nrhs;
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = *nrhs - j + 1;
+ jb = min(i__3,nb);
+ zgtts2_(&itrans, n, &jb, &dl[1], &d__[1], &du[1], &du2[1], &ipiv[
+ 1], &b[j * b_dim1 + 1], ldb);
+/* L10: */
+ }
+ }
+
+/* End of ZGTTRS */
+
+ return 0;
+} /* zgttrs_ */
diff --git a/contrib/libs/clapack/zgtts2.c b/contrib/libs/clapack/zgtts2.c
new file mode 100644
index 0000000000..fc98b35d2c
--- /dev/null
+++ b/contrib/libs/clapack/zgtts2.c
@@ -0,0 +1,584 @@
+/* zgtts2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zgtts2_(integer *itrans, integer *n, integer *nrhs,
+ doublecomplex *dl, doublecomplex *d__, doublecomplex *du,
+ doublecomplex *du2, integer *ipiv, doublecomplex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7, i__8;
+ doublecomplex z__1, z__2, z__3, z__4, z__5, z__6, z__7, z__8;
+
+ /* Builtin functions */
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *), d_cnjg(
+ doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j;
+ doublecomplex temp;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZGTTS2 solves one of the systems of equations */
+/* A * X = B, A**T * X = B, or A**H * X = B, */
+/* with a tridiagonal matrix A using the LU factorization computed */
+/* by ZGTTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* ITRANS (input) INTEGER */
+/* Specifies the form of the system of equations. */
+/* = 0: A * X = B (No transpose) */
+/* = 1: A**T * X = B (Transpose) */
+/* = 2: A**H * X = B (Conjugate transpose) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* DL (input) COMPLEX*16 array, dimension (N-1) */
+/* The (n-1) multipliers that define the matrix L from the */
+/* LU factorization of A. */
+
+/* D (input) COMPLEX*16 array, dimension (N) */
+/* The n diagonal elements of the upper triangular matrix U from */
+/* the LU factorization of A. */
+
+/* DU (input) COMPLEX*16 array, dimension (N-1) */
+/* The (n-1) elements of the first super-diagonal of U. */
+
+/* DU2 (input) COMPLEX*16 array, dimension (N-2) */
+/* The (n-2) elements of the second super-diagonal of U. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* The pivot indices; for 1 <= i <= n, row i of the matrix was */
+/* interchanged with row IPIV(i). IPIV(i) will always be either */
+/* i or i+1; IPIV(i) = i indicates a row interchange was not */
+/* required. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the matrix of right hand side vectors B. */
+/* On exit, B is overwritten by the solution vectors X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --dl;
+ --d__;
+ --du;
+ --du2;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (*itrans == 0) {
+
+/* Solve A*X = B using the LU factorization of A, */
+/* overwriting each right hand side vector with its solution. */
+
+ if (*nrhs <= 1) {
+ j = 1;
+L10:
+
+/* Solve L*x = b. */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (ipiv[i__] == i__) {
+ i__2 = i__ + 1 + j * b_dim1;
+ i__3 = i__ + 1 + j * b_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * b_dim1;
+ z__2.r = dl[i__4].r * b[i__5].r - dl[i__4].i * b[i__5].i,
+ z__2.i = dl[i__4].r * b[i__5].i + dl[i__4].i * b[
+ i__5].r;
+ z__1.r = b[i__3].r - z__2.r, z__1.i = b[i__3].i - z__2.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ } else {
+ i__2 = i__ + j * b_dim1;
+ temp.r = b[i__2].r, temp.i = b[i__2].i;
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + 1 + j * b_dim1;
+ b[i__2].r = b[i__3].r, b[i__2].i = b[i__3].i;
+ i__2 = i__ + 1 + j * b_dim1;
+ i__3 = i__;
+ i__4 = i__ + j * b_dim1;
+ z__2.r = dl[i__3].r * b[i__4].r - dl[i__3].i * b[i__4].i,
+ z__2.i = dl[i__3].r * b[i__4].i + dl[i__3].i * b[
+ i__4].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i - z__2.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ }
+/* L20: */
+ }
+
+/* Solve U*x = b. */
+
+ i__1 = *n + j * b_dim1;
+ z_div(&z__1, &b[*n + j * b_dim1], &d__[*n]);
+ b[i__1].r = z__1.r, b[i__1].i = z__1.i;
+ if (*n > 1) {
+ i__1 = *n - 1 + j * b_dim1;
+ i__2 = *n - 1 + j * b_dim1;
+ i__3 = *n - 1;
+ i__4 = *n + j * b_dim1;
+ z__3.r = du[i__3].r * b[i__4].r - du[i__3].i * b[i__4].i,
+ z__3.i = du[i__3].r * b[i__4].i + du[i__3].i * b[i__4]
+ .r;
+ z__2.r = b[i__2].r - z__3.r, z__2.i = b[i__2].i - z__3.i;
+ z_div(&z__1, &z__2, &d__[*n - 1]);
+ b[i__1].r = z__1.r, b[i__1].i = z__1.i;
+ }
+ for (i__ = *n - 2; i__ >= 1; --i__) {
+ i__1 = i__ + j * b_dim1;
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__;
+ i__4 = i__ + 1 + j * b_dim1;
+ z__4.r = du[i__3].r * b[i__4].r - du[i__3].i * b[i__4].i,
+ z__4.i = du[i__3].r * b[i__4].i + du[i__3].i * b[i__4]
+ .r;
+ z__3.r = b[i__2].r - z__4.r, z__3.i = b[i__2].i - z__4.i;
+ i__5 = i__;
+ i__6 = i__ + 2 + j * b_dim1;
+ z__5.r = du2[i__5].r * b[i__6].r - du2[i__5].i * b[i__6].i,
+ z__5.i = du2[i__5].r * b[i__6].i + du2[i__5].i * b[
+ i__6].r;
+ z__2.r = z__3.r - z__5.r, z__2.i = z__3.i - z__5.i;
+ z_div(&z__1, &z__2, &d__[i__]);
+ b[i__1].r = z__1.r, b[i__1].i = z__1.i;
+/* L30: */
+ }
+ if (j < *nrhs) {
+ ++j;
+ goto L10;
+ }
+ } else {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Solve L*x = b. */
+
+ i__2 = *n - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (ipiv[i__] == i__) {
+ i__3 = i__ + 1 + j * b_dim1;
+ i__4 = i__ + 1 + j * b_dim1;
+ i__5 = i__;
+ i__6 = i__ + j * b_dim1;
+ z__2.r = dl[i__5].r * b[i__6].r - dl[i__5].i * b[i__6]
+ .i, z__2.i = dl[i__5].r * b[i__6].i + dl[i__5]
+ .i * b[i__6].r;
+ z__1.r = b[i__4].r - z__2.r, z__1.i = b[i__4].i -
+ z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * b_dim1;
+ temp.r = b[i__3].r, temp.i = b[i__3].i;
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + 1 + j * b_dim1;
+ b[i__3].r = b[i__4].r, b[i__3].i = b[i__4].i;
+ i__3 = i__ + 1 + j * b_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * b_dim1;
+ z__2.r = dl[i__4].r * b[i__5].r - dl[i__4].i * b[i__5]
+ .i, z__2.i = dl[i__4].r * b[i__5].i + dl[i__4]
+ .i * b[i__5].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i - z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+ }
+/* L40: */
+ }
+
+/* Solve U*x = b. */
+
+ i__2 = *n + j * b_dim1;
+ z_div(&z__1, &b[*n + j * b_dim1], &d__[*n]);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ if (*n > 1) {
+ i__2 = *n - 1 + j * b_dim1;
+ i__3 = *n - 1 + j * b_dim1;
+ i__4 = *n - 1;
+ i__5 = *n + j * b_dim1;
+ z__3.r = du[i__4].r * b[i__5].r - du[i__4].i * b[i__5].i,
+ z__3.i = du[i__4].r * b[i__5].i + du[i__4].i * b[
+ i__5].r;
+ z__2.r = b[i__3].r - z__3.r, z__2.i = b[i__3].i - z__3.i;
+ z_div(&z__1, &z__2, &d__[*n - 1]);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ }
+ for (i__ = *n - 2; i__ >= 1; --i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ i__5 = i__ + 1 + j * b_dim1;
+ z__4.r = du[i__4].r * b[i__5].r - du[i__4].i * b[i__5].i,
+ z__4.i = du[i__4].r * b[i__5].i + du[i__4].i * b[
+ i__5].r;
+ z__3.r = b[i__3].r - z__4.r, z__3.i = b[i__3].i - z__4.i;
+ i__6 = i__;
+ i__7 = i__ + 2 + j * b_dim1;
+ z__5.r = du2[i__6].r * b[i__7].r - du2[i__6].i * b[i__7]
+ .i, z__5.i = du2[i__6].r * b[i__7].i + du2[i__6]
+ .i * b[i__7].r;
+ z__2.r = z__3.r - z__5.r, z__2.i = z__3.i - z__5.i;
+ z_div(&z__1, &z__2, &d__[i__]);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+ } else if (*itrans == 1) {
+
+/* Solve A**T * X = B. */
+
+ if (*nrhs <= 1) {
+ j = 1;
+L70:
+
+/* Solve U**T * x = b. */
+
+ i__1 = j * b_dim1 + 1;
+ z_div(&z__1, &b[j * b_dim1 + 1], &d__[1]);
+ b[i__1].r = z__1.r, b[i__1].i = z__1.i;
+ if (*n > 1) {
+ i__1 = j * b_dim1 + 2;
+ i__2 = j * b_dim1 + 2;
+ i__3 = j * b_dim1 + 1;
+ z__3.r = du[1].r * b[i__3].r - du[1].i * b[i__3].i, z__3.i =
+ du[1].r * b[i__3].i + du[1].i * b[i__3].r;
+ z__2.r = b[i__2].r - z__3.r, z__2.i = b[i__2].i - z__3.i;
+ z_div(&z__1, &z__2, &d__[2]);
+ b[i__1].r = z__1.r, b[i__1].i = z__1.i;
+ }
+ i__1 = *n;
+ for (i__ = 3; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ - 1;
+ i__5 = i__ - 1 + j * b_dim1;
+ z__4.r = du[i__4].r * b[i__5].r - du[i__4].i * b[i__5].i,
+ z__4.i = du[i__4].r * b[i__5].i + du[i__4].i * b[i__5]
+ .r;
+ z__3.r = b[i__3].r - z__4.r, z__3.i = b[i__3].i - z__4.i;
+ i__6 = i__ - 2;
+ i__7 = i__ - 2 + j * b_dim1;
+ z__5.r = du2[i__6].r * b[i__7].r - du2[i__6].i * b[i__7].i,
+ z__5.i = du2[i__6].r * b[i__7].i + du2[i__6].i * b[
+ i__7].r;
+ z__2.r = z__3.r - z__5.r, z__2.i = z__3.i - z__5.i;
+ z_div(&z__1, &z__2, &d__[i__]);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L80: */
+ }
+
+/* Solve L**T * x = b. */
+
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ if (ipiv[i__] == i__) {
+ i__1 = i__ + j * b_dim1;
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__;
+ i__4 = i__ + 1 + j * b_dim1;
+ z__2.r = dl[i__3].r * b[i__4].r - dl[i__3].i * b[i__4].i,
+ z__2.i = dl[i__3].r * b[i__4].i + dl[i__3].i * b[
+ i__4].r;
+ z__1.r = b[i__2].r - z__2.r, z__1.i = b[i__2].i - z__2.i;
+ b[i__1].r = z__1.r, b[i__1].i = z__1.i;
+ } else {
+ i__1 = i__ + 1 + j * b_dim1;
+ temp.r = b[i__1].r, temp.i = b[i__1].i;
+ i__1 = i__ + 1 + j * b_dim1;
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__;
+ z__2.r = dl[i__3].r * temp.r - dl[i__3].i * temp.i,
+ z__2.i = dl[i__3].r * temp.i + dl[i__3].i *
+ temp.r;
+ z__1.r = b[i__2].r - z__2.r, z__1.i = b[i__2].i - z__2.i;
+ b[i__1].r = z__1.r, b[i__1].i = z__1.i;
+ i__1 = i__ + j * b_dim1;
+ b[i__1].r = temp.r, b[i__1].i = temp.i;
+ }
+/* L90: */
+ }
+ if (j < *nrhs) {
+ ++j;
+ goto L70;
+ }
+ } else {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Solve U**T * x = b. */
+
+ i__2 = j * b_dim1 + 1;
+ z_div(&z__1, &b[j * b_dim1 + 1], &d__[1]);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ if (*n > 1) {
+ i__2 = j * b_dim1 + 2;
+ i__3 = j * b_dim1 + 2;
+ i__4 = j * b_dim1 + 1;
+ z__3.r = du[1].r * b[i__4].r - du[1].i * b[i__4].i,
+ z__3.i = du[1].r * b[i__4].i + du[1].i * b[i__4]
+ .r;
+ z__2.r = b[i__3].r - z__3.r, z__2.i = b[i__3].i - z__3.i;
+ z_div(&z__1, &z__2, &d__[2]);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ }
+ i__2 = *n;
+ for (i__ = 3; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ - 1;
+ i__6 = i__ - 1 + j * b_dim1;
+ z__4.r = du[i__5].r * b[i__6].r - du[i__5].i * b[i__6].i,
+ z__4.i = du[i__5].r * b[i__6].i + du[i__5].i * b[
+ i__6].r;
+ z__3.r = b[i__4].r - z__4.r, z__3.i = b[i__4].i - z__4.i;
+ i__7 = i__ - 2;
+ i__8 = i__ - 2 + j * b_dim1;
+ z__5.r = du2[i__7].r * b[i__8].r - du2[i__7].i * b[i__8]
+ .i, z__5.i = du2[i__7].r * b[i__8].i + du2[i__7]
+ .i * b[i__8].r;
+ z__2.r = z__3.r - z__5.r, z__2.i = z__3.i - z__5.i;
+ z_div(&z__1, &z__2, &d__[i__]);
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L100: */
+ }
+
+/* Solve L**T * x = b. */
+
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ if (ipiv[i__] == i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ i__5 = i__ + 1 + j * b_dim1;
+ z__2.r = dl[i__4].r * b[i__5].r - dl[i__4].i * b[i__5]
+ .i, z__2.i = dl[i__4].r * b[i__5].i + dl[i__4]
+ .i * b[i__5].r;
+ z__1.r = b[i__3].r - z__2.r, z__1.i = b[i__3].i -
+ z__2.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ } else {
+ i__2 = i__ + 1 + j * b_dim1;
+ temp.r = b[i__2].r, temp.i = b[i__2].i;
+ i__2 = i__ + 1 + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ z__2.r = dl[i__4].r * temp.r - dl[i__4].i * temp.i,
+ z__2.i = dl[i__4].r * temp.i + dl[i__4].i *
+ temp.r;
+ z__1.r = b[i__3].r - z__2.r, z__1.i = b[i__3].i -
+ z__2.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ i__2 = i__ + j * b_dim1;
+ b[i__2].r = temp.r, b[i__2].i = temp.i;
+ }
+/* L110: */
+ }
+/* L120: */
+ }
+ }
+ } else {
+
+/* Solve A**H * X = B. */
+
+ if (*nrhs <= 1) {
+ j = 1;
+L130:
+
+/* Solve U**H * x = b. */
+
+ i__1 = j * b_dim1 + 1;
+ d_cnjg(&z__2, &d__[1]);
+ z_div(&z__1, &b[j * b_dim1 + 1], &z__2);
+ b[i__1].r = z__1.r, b[i__1].i = z__1.i;
+ if (*n > 1) {
+ i__1 = j * b_dim1 + 2;
+ i__2 = j * b_dim1 + 2;
+ d_cnjg(&z__4, &du[1]);
+ i__3 = j * b_dim1 + 1;
+ z__3.r = z__4.r * b[i__3].r - z__4.i * b[i__3].i, z__3.i =
+ z__4.r * b[i__3].i + z__4.i * b[i__3].r;
+ z__2.r = b[i__2].r - z__3.r, z__2.i = b[i__2].i - z__3.i;
+ d_cnjg(&z__5, &d__[2]);
+ z_div(&z__1, &z__2, &z__5);
+ b[i__1].r = z__1.r, b[i__1].i = z__1.i;
+ }
+ i__1 = *n;
+ for (i__ = 3; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ d_cnjg(&z__5, &du[i__ - 1]);
+ i__4 = i__ - 1 + j * b_dim1;
+ z__4.r = z__5.r * b[i__4].r - z__5.i * b[i__4].i, z__4.i =
+ z__5.r * b[i__4].i + z__5.i * b[i__4].r;
+ z__3.r = b[i__3].r - z__4.r, z__3.i = b[i__3].i - z__4.i;
+ d_cnjg(&z__7, &du2[i__ - 2]);
+ i__5 = i__ - 2 + j * b_dim1;
+ z__6.r = z__7.r * b[i__5].r - z__7.i * b[i__5].i, z__6.i =
+ z__7.r * b[i__5].i + z__7.i * b[i__5].r;
+ z__2.r = z__3.r - z__6.r, z__2.i = z__3.i - z__6.i;
+ d_cnjg(&z__8, &d__[i__]);
+ z_div(&z__1, &z__2, &z__8);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L140: */
+ }
+
+/* Solve L**H * x = b. */
+
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ if (ipiv[i__] == i__) {
+ i__1 = i__ + j * b_dim1;
+ i__2 = i__ + j * b_dim1;
+ d_cnjg(&z__3, &dl[i__]);
+ i__3 = i__ + 1 + j * b_dim1;
+ z__2.r = z__3.r * b[i__3].r - z__3.i * b[i__3].i, z__2.i =
+ z__3.r * b[i__3].i + z__3.i * b[i__3].r;
+ z__1.r = b[i__2].r - z__2.r, z__1.i = b[i__2].i - z__2.i;
+ b[i__1].r = z__1.r, b[i__1].i = z__1.i;
+ } else {
+ i__1 = i__ + 1 + j * b_dim1;
+ temp.r = b[i__1].r, temp.i = b[i__1].i;
+ i__1 = i__ + 1 + j * b_dim1;
+ i__2 = i__ + j * b_dim1;
+ d_cnjg(&z__3, &dl[i__]);
+ z__2.r = z__3.r * temp.r - z__3.i * temp.i, z__2.i =
+ z__3.r * temp.i + z__3.i * temp.r;
+ z__1.r = b[i__2].r - z__2.r, z__1.i = b[i__2].i - z__2.i;
+ b[i__1].r = z__1.r, b[i__1].i = z__1.i;
+ i__1 = i__ + j * b_dim1;
+ b[i__1].r = temp.r, b[i__1].i = temp.i;
+ }
+/* L150: */
+ }
+ if (j < *nrhs) {
+ ++j;
+ goto L130;
+ }
+ } else {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Solve U**H * x = b. */
+
+ i__2 = j * b_dim1 + 1;
+ d_cnjg(&z__2, &d__[1]);
+ z_div(&z__1, &b[j * b_dim1 + 1], &z__2);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ if (*n > 1) {
+ i__2 = j * b_dim1 + 2;
+ i__3 = j * b_dim1 + 2;
+ d_cnjg(&z__4, &du[1]);
+ i__4 = j * b_dim1 + 1;
+ z__3.r = z__4.r * b[i__4].r - z__4.i * b[i__4].i, z__3.i =
+ z__4.r * b[i__4].i + z__4.i * b[i__4].r;
+ z__2.r = b[i__3].r - z__3.r, z__2.i = b[i__3].i - z__3.i;
+ d_cnjg(&z__5, &d__[2]);
+ z_div(&z__1, &z__2, &z__5);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ }
+ i__2 = *n;
+ for (i__ = 3; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ d_cnjg(&z__5, &du[i__ - 1]);
+ i__5 = i__ - 1 + j * b_dim1;
+ z__4.r = z__5.r * b[i__5].r - z__5.i * b[i__5].i, z__4.i =
+ z__5.r * b[i__5].i + z__5.i * b[i__5].r;
+ z__3.r = b[i__4].r - z__4.r, z__3.i = b[i__4].i - z__4.i;
+ d_cnjg(&z__7, &du2[i__ - 2]);
+ i__6 = i__ - 2 + j * b_dim1;
+ z__6.r = z__7.r * b[i__6].r - z__7.i * b[i__6].i, z__6.i =
+ z__7.r * b[i__6].i + z__7.i * b[i__6].r;
+ z__2.r = z__3.r - z__6.r, z__2.i = z__3.i - z__6.i;
+ d_cnjg(&z__8, &d__[i__]);
+ z_div(&z__1, &z__2, &z__8);
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L160: */
+ }
+
+/* Solve L**H * x = b. */
+
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ if (ipiv[i__] == i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ d_cnjg(&z__3, &dl[i__]);
+ i__4 = i__ + 1 + j * b_dim1;
+ z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4].i,
+ z__2.i = z__3.r * b[i__4].i + z__3.i * b[i__4]
+ .r;
+ z__1.r = b[i__3].r - z__2.r, z__1.i = b[i__3].i -
+ z__2.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ } else {
+ i__2 = i__ + 1 + j * b_dim1;
+ temp.r = b[i__2].r, temp.i = b[i__2].i;
+ i__2 = i__ + 1 + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ d_cnjg(&z__3, &dl[i__]);
+ z__2.r = z__3.r * temp.r - z__3.i * temp.i, z__2.i =
+ z__3.r * temp.i + z__3.i * temp.r;
+ z__1.r = b[i__3].r - z__2.r, z__1.i = b[i__3].i -
+ z__2.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ i__2 = i__ + j * b_dim1;
+ b[i__2].r = temp.r, b[i__2].i = temp.i;
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ }
+
+/* End of ZGTTS2 */
+
+ return 0;
+} /* zgtts2_ */
diff --git a/contrib/libs/clapack/zhbev.c b/contrib/libs/clapack/zhbev.c
new file mode 100644
index 0000000000..31951882ee
--- /dev/null
+++ b/contrib/libs/clapack/zhbev.c
@@ -0,0 +1,273 @@
+/* zhbev.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b11 = 1.;
+static integer c__1 = 1;
+
+/* Subroutine */ int zhbev_(char *jobz, char *uplo, integer *n, integer *kd,
+ doublecomplex *ab, integer *ldab, doublereal *w, doublecomplex *z__,
+ integer *ldz, doublecomplex *work, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, z_dim1, z_offset, i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal eps;
+ integer inde;
+ doublereal anrm;
+ integer imax;
+ doublereal rmin, rmax;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical lower, wantz;
+ extern doublereal dlamch_(char *);
+ integer iscale;
+ doublereal safmin;
+ extern doublereal zlanhb_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublereal *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *), zlascl_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublecomplex *, integer *,
+ integer *), zhbtrd_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublereal *, doublereal *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ integer indrwk;
+ doublereal smlnum;
+ extern /* Subroutine */ int zsteqr_(char *, integer *, doublereal *,
+ doublereal *, doublecomplex *, integer *, doublereal *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHBEV computes all the eigenvalues and, optionally, eigenvectors of */
+/* a complex Hermitian band matrix A. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) COMPLEX*16 array, dimension (LDAB, N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, AB is overwritten by values generated during the */
+/* reduction to tridiagonal form. If UPLO = 'U', the first */
+/* superdiagonal and the diagonal of the tridiagonal matrix T */
+/* are returned in rows KD and KD+1 of AB, and if UPLO = 'L', */
+/* the diagonal and first subdiagonal of T are returned in the */
+/* first two rows of AB. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD + 1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX*16 array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the orthonormal */
+/* eigenvectors of the matrix A, with the i-th column of Z */
+/* holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (max(1,3*N-2)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, the algorithm failed to converge; i */
+/* off-diagonal elements of an intermediate tridiagonal */
+/* form did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lower = lsame_(uplo, "L");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*kd < 0) {
+ *info = -4;
+ } else if (*ldab < *kd + 1) {
+ *info = -6;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -9;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHBEV ", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (lower) {
+ i__1 = ab_dim1 + 1;
+ w[1] = ab[i__1].r;
+ } else {
+ i__1 = *kd + 1 + ab_dim1;
+ w[1] = ab[i__1].r;
+ }
+ if (wantz) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1., z__[i__1].i = 0.;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = zlanhb_("M", uplo, n, kd, &ab[ab_offset], ldab, &rwork[1]);
+ iscale = 0;
+ if (anrm > 0. && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ if (lower) {
+ zlascl_("B", kd, kd, &c_b11, &sigma, n, n, &ab[ab_offset], ldab,
+ info);
+ } else {
+ zlascl_("Q", kd, kd, &c_b11, &sigma, n, n, &ab[ab_offset], ldab,
+ info);
+ }
+ }
+
+/* Call ZHBTRD to reduce Hermitian band matrix to tridiagonal form. */
+
+ inde = 1;
+ zhbtrd_(jobz, uplo, n, kd, &ab[ab_offset], ldab, &w[1], &rwork[inde], &
+ z__[z_offset], ldz, &work[1], &iinfo);
+
+/* For eigenvalues only, call DSTERF. For eigenvectors, call ZSTEQR. */
+
+ if (! wantz) {
+ dsterf_(n, &w[1], &rwork[inde], info);
+ } else {
+ indrwk = inde + *n;
+ zsteqr_(jobz, n, &w[1], &rwork[inde], &z__[z_offset], ldz, &rwork[
+ indrwk], info);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *n;
+ } else {
+ imax = *info - 1;
+ }
+ d__1 = 1. / sigma;
+ dscal_(&imax, &d__1, &w[1], &c__1);
+ }
+
+ return 0;
+
+/* End of ZHBEV */
+
+} /* zhbev_ */
diff --git a/contrib/libs/clapack/zhbevd.c b/contrib/libs/clapack/zhbevd.c
new file mode 100644
index 0000000000..dc58f4b5ed
--- /dev/null
+++ b/contrib/libs/clapack/zhbevd.c
@@ -0,0 +1,381 @@
+/* zhbevd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static doublereal c_b13 = 1.;
+static integer c__1 = 1;
+
+/* Subroutine */ int zhbevd_(char *jobz, char *uplo, integer *n, integer *kd,
+ doublecomplex *ab, integer *ldab, doublereal *w, doublecomplex *z__,
+ integer *ldz, doublecomplex *work, integer *lwork, doublereal *rwork,
+ integer *lrwork, integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, z_dim1, z_offset, i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal eps;
+ integer inde;
+ doublereal anrm;
+ integer imax;
+ doublereal rmin, rmax;
+ integer llwk2;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ integer lwmin;
+ logical lower;
+ integer llrwk;
+ logical wantz;
+ integer indwk2;
+ extern doublereal dlamch_(char *);
+ integer iscale;
+ doublereal safmin;
+ extern doublereal zlanhb_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublereal *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *), zlascl_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublecomplex *, integer *,
+ integer *), zstedc_(char *, integer *, doublereal *,
+ doublereal *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublereal *, integer *, integer *, integer *, integer
+ *), zhbtrd_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublereal *, doublereal *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ integer indwrk, liwmin;
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ integer lrwmin;
+ doublereal smlnum;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHBEVD computes all the eigenvalues and, optionally, eigenvectors of */
+/* a complex Hermitian band matrix A. If eigenvectors are desired, it */
+/* uses a divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) COMPLEX*16 array, dimension (LDAB, N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, AB is overwritten by values generated during the */
+/* reduction to tridiagonal form. If UPLO = 'U', the first */
+/* superdiagonal and the diagonal of the tridiagonal matrix T */
+/* are returned in rows KD and KD+1 of AB, and if UPLO = 'L', */
+/* the diagonal and first subdiagonal of T are returned in the */
+/* first two rows of AB. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD + 1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX*16 array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the orthonormal */
+/* eigenvectors of the matrix A, with the i-th column of Z */
+/* holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If N <= 1, LWORK must be at least 1. */
+/* If JOBZ = 'N' and N > 1, LWORK must be at least N. */
+/* If JOBZ = 'V' and N > 1, LWORK must be at least 2*N**2. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal sizes of the WORK, RWORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* RWORK (workspace/output) DOUBLE PRECISION array, */
+/* dimension (LRWORK) */
+/* On exit, if INFO = 0, RWORK(1) returns the optimal LRWORK. */
+
+/* LRWORK (input) INTEGER */
+/* The dimension of array RWORK. */
+/* If N <= 1, LRWORK must be at least 1. */
+/* If JOBZ = 'N' and N > 1, LRWORK must be at least N. */
+/* If JOBZ = 'V' and N > 1, LRWORK must be at least */
+/* 1 + 5*N + 2*N**2. */
+
+/* If LRWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of array IWORK. */
+/* If JOBZ = 'N' or N <= 1, LIWORK must be at least 1. */
+/* If JOBZ = 'V' and N > 1, LIWORK must be at least 3 + 5*N . */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, the algorithm failed to converge; i */
+/* off-diagonal elements of an intermediate tridiagonal */
+/* form did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lower = lsame_(uplo, "L");
+ lquery = *lwork == -1 || *liwork == -1 || *lrwork == -1;
+
+ *info = 0;
+ if (*n <= 1) {
+ lwmin = 1;
+ lrwmin = 1;
+ liwmin = 1;
+ } else {
+ if (wantz) {
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = i__1 * i__1 << 1;
+/* Computing 2nd power */
+ i__1 = *n;
+ lrwmin = *n * 5 + 1 + (i__1 * i__1 << 1);
+ liwmin = *n * 5 + 3;
+ } else {
+ lwmin = *n;
+ lrwmin = *n;
+ liwmin = 1;
+ }
+ }
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*kd < 0) {
+ *info = -4;
+ } else if (*ldab < *kd + 1) {
+ *info = -6;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+ work[1].r = (doublereal) lwmin, work[1].i = 0.;
+ rwork[1] = (doublereal) lrwmin;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -11;
+ } else if (*lrwork < lrwmin && ! lquery) {
+ *info = -13;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -15;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHBEVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ i__1 = ab_dim1 + 1;
+ w[1] = ab[i__1].r;
+ if (wantz) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1., z__[i__1].i = 0.;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = zlanhb_("M", uplo, n, kd, &ab[ab_offset], ldab, &rwork[1]);
+ iscale = 0;
+ if (anrm > 0. && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ if (lower) {
+ zlascl_("B", kd, kd, &c_b13, &sigma, n, n, &ab[ab_offset], ldab,
+ info);
+ } else {
+ zlascl_("Q", kd, kd, &c_b13, &sigma, n, n, &ab[ab_offset], ldab,
+ info);
+ }
+ }
+
+/* Call ZHBTRD to reduce Hermitian band matrix to tridiagonal form. */
+
+ inde = 1;
+ indwrk = inde + *n;
+ indwk2 = *n * *n + 1;
+ llwk2 = *lwork - indwk2 + 1;
+ llrwk = *lrwork - indwrk + 1;
+ zhbtrd_(jobz, uplo, n, kd, &ab[ab_offset], ldab, &w[1], &rwork[inde], &
+ z__[z_offset], ldz, &work[1], &iinfo);
+
+/* For eigenvalues only, call DSTERF. For eigenvectors, call ZSTEDC. */
+
+ if (! wantz) {
+ dsterf_(n, &w[1], &rwork[inde], info);
+ } else {
+ zstedc_("I", n, &w[1], &rwork[inde], &work[1], n, &work[indwk2], &
+ llwk2, &rwork[indwrk], &llrwk, &iwork[1], liwork, info);
+ zgemm_("N", "N", n, n, n, &c_b2, &z__[z_offset], ldz, &work[1], n, &
+ c_b1, &work[indwk2], n);
+ zlacpy_("A", n, n, &work[indwk2], n, &z__[z_offset], ldz);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *n;
+ } else {
+ imax = *info - 1;
+ }
+ d__1 = 1. / sigma;
+ dscal_(&imax, &d__1, &w[1], &c__1);
+ }
+
+ work[1].r = (doublereal) lwmin, work[1].i = 0.;
+ rwork[1] = (doublereal) lrwmin;
+ iwork[1] = liwmin;
+ return 0;
+
+/* End of ZHBEVD */
+
+} /* zhbevd_ */
diff --git a/contrib/libs/clapack/zhbevx.c b/contrib/libs/clapack/zhbevx.c
new file mode 100644
index 0000000000..2c4bde87ab
--- /dev/null
+++ b/contrib/libs/clapack/zhbevx.c
@@ -0,0 +1,527 @@
+/* zhbevx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static doublereal c_b16 = 1.;
+static integer c__1 = 1;
+
+/* Subroutine */ int zhbevx_(char *jobz, char *range, char *uplo, integer *n,
+ integer *kd, doublecomplex *ab, integer *ldab, doublecomplex *q,
+ integer *ldq, doublereal *vl, doublereal *vu, integer *il, integer *
+ iu, doublereal *abstol, integer *m, doublereal *w, doublecomplex *z__,
+ integer *ldz, doublecomplex *work, doublereal *rwork, integer *iwork,
+ integer *ifail, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, q_dim1, q_offset, z_dim1, z_offset, i__1,
+ i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, jj;
+ doublereal eps, vll, vuu, tmp1;
+ integer indd, inde;
+ doublereal anrm;
+ integer imax;
+ doublereal rmin, rmax;
+ logical test;
+ doublecomplex ctmp1;
+ integer itmp1, indee;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ char order[1];
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical lower;
+ extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *);
+ logical wantz;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zswap_(integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *);
+ extern doublereal dlamch_(char *);
+ logical alleig, indeig;
+ integer iscale, indibl;
+ logical valeig;
+ doublereal safmin;
+ extern doublereal zlanhb_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublereal *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal abstll, bignum;
+ integer indiwk, indisp;
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *), zlascl_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublecomplex *, integer *,
+ integer *), dstebz_(char *, char *, integer *, doublereal
+ *, doublereal *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *, doublereal *, integer *, integer *),
+ zhbtrd_(char *, char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *, doublereal *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ integer indrwk, indwrk;
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ integer nsplit;
+ doublereal smlnum;
+ extern /* Subroutine */ int zstein_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *, integer *, integer *, integer *),
+ zsteqr_(char *, integer *, doublereal *, doublereal *,
+ doublecomplex *, integer *, doublereal *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHBEVX computes selected eigenvalues and, optionally, eigenvectors */
+/* of a complex Hermitian band matrix A. Eigenvalues and eigenvectors */
+/* can be selected by specifying either a range of values or a range of */
+/* indices for the desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found; */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found; */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) COMPLEX*16 array, dimension (LDAB, N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, AB is overwritten by values generated during the */
+/* reduction to tridiagonal form. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD + 1. */
+
+/* Q (output) COMPLEX*16 array, dimension (LDQ, N) */
+/* If JOBZ = 'V', the N-by-N unitary matrix used in the */
+/* reduction to tridiagonal form. */
+/* If JOBZ = 'N', the array Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. If JOBZ = 'V', then */
+/* LDQ >= max(1,N). */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) DOUBLE PRECISION */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing AB to tridiagonal form. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*DLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*DLAMCH('S'). */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices */
+/* with Guaranteed High Relative Accuracy," by Demmel and */
+/* Kahan, LAPACK Working Note #3. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* The first M elements contain the selected eigenvalues in */
+/* ascending order. */
+
+/* Z (output) COMPLEX*16 array, dimension (LDZ, max(1,M)) */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If an eigenvector fails to converge, then that column of Z */
+/* contains the latest approximation to the eigenvector, and the */
+/* index of the eigenvector is returned in IFAIL. */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (7*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+
+/* IFAIL (output) INTEGER array, dimension (N) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvectors that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, then i eigenvectors failed to converge. */
+/* Their indices are stored in array IFAIL. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+ lower = lsame_(uplo, "L");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*kd < 0) {
+ *info = -5;
+ } else if (*ldab < *kd + 1) {
+ *info = -7;
+ } else if (wantz && *ldq < max(1,*n)) {
+ *info = -9;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -11;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -12;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -13;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -18;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHBEVX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ *m = 1;
+ if (lower) {
+ i__1 = ab_dim1 + 1;
+ ctmp1.r = ab[i__1].r, ctmp1.i = ab[i__1].i;
+ } else {
+ i__1 = *kd + 1 + ab_dim1;
+ ctmp1.r = ab[i__1].r, ctmp1.i = ab[i__1].i;
+ }
+ tmp1 = ctmp1.r;
+ if (valeig) {
+ if (! (*vl < tmp1 && *vu >= tmp1)) {
+ *m = 0;
+ }
+ }
+ if (*m == 1) {
+ w[1] = ctmp1.r;
+ if (wantz) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1., z__[i__1].i = 0.;
+ }
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+/* Computing MIN */
+ d__1 = sqrt(bignum), d__2 = 1. / sqrt(sqrt(safmin));
+ rmax = min(d__1,d__2);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ iscale = 0;
+ abstll = *abstol;
+ if (valeig) {
+ vll = *vl;
+ vuu = *vu;
+ } else {
+ vll = 0.;
+ vuu = 0.;
+ }
+ anrm = zlanhb_("M", uplo, n, kd, &ab[ab_offset], ldab, &rwork[1]);
+ if (anrm > 0. && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ if (lower) {
+ zlascl_("B", kd, kd, &c_b16, &sigma, n, n, &ab[ab_offset], ldab,
+ info);
+ } else {
+ zlascl_("Q", kd, kd, &c_b16, &sigma, n, n, &ab[ab_offset], ldab,
+ info);
+ }
+ if (*abstol > 0.) {
+ abstll = *abstol * sigma;
+ }
+ if (valeig) {
+ vll = *vl * sigma;
+ vuu = *vu * sigma;
+ }
+ }
+
+/* Call ZHBTRD to reduce Hermitian band matrix to tridiagonal form. */
+
+ indd = 1;
+ inde = indd + *n;
+ indrwk = inde + *n;
+ indwrk = 1;
+ zhbtrd_(jobz, uplo, n, kd, &ab[ab_offset], ldab, &rwork[indd], &rwork[
+ inde], &q[q_offset], ldq, &work[indwrk], &iinfo);
+
+/* If all eigenvalues are desired and ABSTOL is less than or equal */
+/* to zero, then call DSTERF or ZSTEQR. If this fails for some */
+/* eigenvalue, then try DSTEBZ. */
+
+ test = FALSE_;
+ if (indeig) {
+ if (*il == 1 && *iu == *n) {
+ test = TRUE_;
+ }
+ }
+ if ((alleig || test) && *abstol <= 0.) {
+ dcopy_(n, &rwork[indd], &c__1, &w[1], &c__1);
+ indee = indrwk + (*n << 1);
+ if (! wantz) {
+ i__1 = *n - 1;
+ dcopy_(&i__1, &rwork[inde], &c__1, &rwork[indee], &c__1);
+ dsterf_(n, &w[1], &rwork[indee], info);
+ } else {
+ zlacpy_("A", n, n, &q[q_offset], ldq, &z__[z_offset], ldz);
+ i__1 = *n - 1;
+ dcopy_(&i__1, &rwork[inde], &c__1, &rwork[indee], &c__1);
+ zsteqr_(jobz, n, &w[1], &rwork[indee], &z__[z_offset], ldz, &
+ rwork[indrwk], info);
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ifail[i__] = 0;
+/* L10: */
+ }
+ }
+ }
+ if (*info == 0) {
+ *m = *n;
+ goto L30;
+ }
+ *info = 0;
+ }
+
+/* Otherwise, call DSTEBZ and, if eigenvectors are desired, ZSTEIN. */
+
+ if (wantz) {
+ *(unsigned char *)order = 'B';
+ } else {
+ *(unsigned char *)order = 'E';
+ }
+ indibl = 1;
+ indisp = indibl + *n;
+ indiwk = indisp + *n;
+ dstebz_(range, order, n, &vll, &vuu, il, iu, &abstll, &rwork[indd], &
+ rwork[inde], m, &nsplit, &w[1], &iwork[indibl], &iwork[indisp], &
+ rwork[indrwk], &iwork[indiwk], info);
+
+ if (wantz) {
+ zstein_(n, &rwork[indd], &rwork[inde], m, &w[1], &iwork[indibl], &
+ iwork[indisp], &z__[z_offset], ldz, &rwork[indrwk], &iwork[
+ indiwk], &ifail[1], info);
+
+/* Apply unitary matrix used in reduction to tridiagonal */
+/* form to eigenvectors returned by ZSTEIN. */
+
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ zcopy_(n, &z__[j * z_dim1 + 1], &c__1, &work[1], &c__1);
+ zgemv_("N", n, n, &c_b2, &q[q_offset], ldq, &work[1], &c__1, &
+ c_b1, &z__[j * z_dim1 + 1], &c__1);
+/* L20: */
+ }
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+L30:
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *m;
+ } else {
+ imax = *info - 1;
+ }
+ d__1 = 1. / sigma;
+ dscal_(&imax, &d__1, &w[1], &c__1);
+ }
+
+/* If eigenvalues are not in order, then sort them, along with */
+/* eigenvectors. */
+
+ if (wantz) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp1 = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp1) {
+ i__ = jj;
+ tmp1 = w[jj];
+ }
+/* L40: */
+ }
+
+ if (i__ != 0) {
+ itmp1 = iwork[indibl + i__ - 1];
+ w[i__] = w[j];
+ iwork[indibl + i__ - 1] = iwork[indibl + j - 1];
+ w[j] = tmp1;
+ iwork[indibl + j - 1] = itmp1;
+ zswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1],
+ &c__1);
+ if (*info != 0) {
+ itmp1 = ifail[i__];
+ ifail[i__] = ifail[j];
+ ifail[j] = itmp1;
+ }
+ }
+/* L50: */
+ }
+ }
+
+ return 0;
+
+/* End of ZHBEVX */
+
+} /* zhbevx_ */
diff --git a/contrib/libs/clapack/zhbgst.c b/contrib/libs/clapack/zhbgst.c
new file mode 100644
index 0000000000..59ec866a26
--- /dev/null
+++ b/contrib/libs/clapack/zhbgst.c
@@ -0,0 +1,2152 @@
+/* zhbgst.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zhbgst_(char *vect, char *uplo, integer *n, integer *ka,
+ integer *kb, doublecomplex *ab, integer *ldab, doublecomplex *bb,
+ integer *ldbb, doublecomplex *x, integer *ldx, doublecomplex *work,
+ doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, bb_dim1, bb_offset, x_dim1, x_offset, i__1,
+ i__2, i__3, i__4, i__5, i__6, i__7, i__8;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3, z__4, z__5, z__6, z__7, z__8, z__9, z__10;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k, l, m;
+ doublecomplex t;
+ integer i0, i1, i2, j1, j2;
+ doublecomplex ra;
+ integer nr, nx, ka1, kb1;
+ doublecomplex ra1;
+ integer j1t, j2t;
+ doublereal bii;
+ integer kbt, nrt, inca;
+ extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublereal *, doublecomplex *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zgerc_(integer *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int zgeru_(integer *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ logical wantx;
+ extern /* Subroutine */ int zlar2v_(integer *, doublecomplex *,
+ doublecomplex *, doublecomplex *, integer *, doublereal *,
+ doublecomplex *, integer *), xerbla_(char *, integer *),
+ zdscal_(integer *, doublereal *, doublecomplex *, integer *);
+ logical update;
+ extern /* Subroutine */ int zlacgv_(integer *, doublecomplex *, integer *)
+ , zlaset_(char *, integer *, integer *, doublecomplex *,
+ doublecomplex *, doublecomplex *, integer *), zlartg_(
+ doublecomplex *, doublecomplex *, doublereal *, doublecomplex *,
+ doublecomplex *), zlargv_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublereal *, integer *), zlartv_(
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *,
+ doublereal *, doublecomplex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHBGST reduces a complex Hermitian-definite banded generalized */
+/* eigenproblem A*x = lambda*B*x to standard form C*y = lambda*y, */
+/* such that C has the same bandwidth as A. */
+
+/* B must have been previously factorized as S**H*S by ZPBSTF, using a */
+/* split Cholesky factorization. A is overwritten by C = X**H*A*X, where */
+/* X = S**(-1)*Q and Q is a unitary matrix chosen to preserve the */
+/* bandwidth of A. */
+
+/* Arguments */
+/* ========= */
+
+/* VECT (input) CHARACTER*1 */
+/* = 'N': do not form the transformation matrix X; */
+/* = 'V': form X. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* KA (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KA >= 0. */
+
+/* KB (input) INTEGER */
+/* The number of superdiagonals of the matrix B if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KA >= KB >= 0. */
+
+/* AB (input/output) COMPLEX*16 array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first ka+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(ka+1+i-j,j) = A(i,j) for max(1,j-ka)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+ka). */
+
+/* On exit, the transformed matrix X**H*A*X, stored in the same */
+/* format as A. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KA+1. */
+
+/* BB (input) COMPLEX*16 array, dimension (LDBB,N) */
+/* The banded factor S from the split Cholesky factorization of */
+/* B, as returned by ZPBSTF, stored in the first kb+1 rows of */
+/* the array. */
+
+/* LDBB (input) INTEGER */
+/* The leading dimension of the array BB. LDBB >= KB+1. */
+
+/* X (output) COMPLEX*16 array, dimension (LDX,N) */
+/* If VECT = 'V', the n-by-n matrix X. */
+/* If VECT = 'N', the array X is not referenced. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. */
+/* LDX >= max(1,N) if VECT = 'V'; LDX >= 1 otherwise. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ bb_dim1 = *ldbb;
+ bb_offset = 1 + bb_dim1;
+ bb -= bb_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ wantx = lsame_(vect, "V");
+ upper = lsame_(uplo, "U");
+ ka1 = *ka + 1;
+ kb1 = *kb + 1;
+ *info = 0;
+ if (! wantx && ! lsame_(vect, "N")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ka < 0) {
+ *info = -4;
+ } else if (*kb < 0 || *kb > *ka) {
+ *info = -5;
+ } else if (*ldab < *ka + 1) {
+ *info = -7;
+ } else if (*ldbb < *kb + 1) {
+ *info = -9;
+ } else if (*ldx < 1 || wantx && *ldx < max(1,*n)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHBGST", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ inca = *ldab * ka1;
+
+/* Initialize X to the unit matrix, if needed */
+
+ if (wantx) {
+ zlaset_("Full", n, n, &c_b1, &c_b2, &x[x_offset], ldx);
+ }
+
+/* Set M to the splitting point m. It must be the same value as is */
+/* used in ZPBSTF. The chosen value allows the arrays WORK and RWORK */
+/* to be of dimension (N). */
+
+ m = (*n + *kb) / 2;
+
+/* The routine works in two phases, corresponding to the two halves */
+/* of the split Cholesky factorization of B as S**H*S where */
+
+/* S = ( U ) */
+/* ( M L ) */
+
+/* with U upper triangular of order m, and L lower triangular of */
+/* order n-m. S has the same bandwidth as B. */
+
+/* S is treated as a product of elementary matrices: */
+
+/* S = S(m)*S(m-1)*...*S(2)*S(1)*S(m+1)*S(m+2)*...*S(n-1)*S(n) */
+
+/* where S(i) is determined by the i-th row of S. */
+
+/* In phase 1, the index i takes the values n, n-1, ... , m+1; */
+/* in phase 2, it takes the values 1, 2, ... , m. */
+
+/* For each value of i, the current matrix A is updated by forming */
+/* inv(S(i))**H*A*inv(S(i)). This creates a triangular bulge outside */
+/* the band of A. The bulge is then pushed down toward the bottom of */
+/* A in phase 1, and up toward the top of A in phase 2, by applying */
+/* plane rotations. */
+
+/* There are kb*(kb+1)/2 elements in the bulge, but at most 2*kb-1 */
+/* of them are linearly independent, so annihilating a bulge requires */
+/* only 2*kb-1 plane rotations. The rotations are divided into a 1st */
+/* set of kb-1 rotations, and a 2nd set of kb rotations. */
+
+/* Wherever possible, rotations are generated and applied in vector */
+/* operations of length NR between the indices J1 and J2 (sometimes */
+/* replaced by modified values NRT, J1T or J2T). */
+
+/* The real cosines and complex sines of the rotations are stored in */
+/* the arrays RWORK and WORK, those of the 1st set in elements */
+/* 2:m-kb-1, and those of the 2nd set in elements m-kb+1:n. */
+
+/* The bulges are not formed explicitly; nonzero elements outside the */
+/* band are created only when they are required for generating new */
+/* rotations; they are stored in the array WORK, in positions where */
+/* they are later overwritten by the sines of the rotations which */
+/* annihilate them. */
+
+/* **************************** Phase 1 ***************************** */
+
+/* The logical structure of this phase is: */
+
+/* UPDATE = .TRUE. */
+/* DO I = N, M + 1, -1 */
+/* use S(i) to update A and create a new bulge */
+/* apply rotations to push all bulges KA positions downward */
+/* END DO */
+/* UPDATE = .FALSE. */
+/* DO I = M + KA + 1, N - 1 */
+/* apply rotations to push all bulges KA positions downward */
+/* END DO */
+
+/* To avoid duplicating code, the two loops are merged. */
+
+ update = TRUE_;
+ i__ = *n + 1;
+L10:
+ if (update) {
+ --i__;
+/* Computing MIN */
+ i__1 = *kb, i__2 = i__ - 1;
+ kbt = min(i__1,i__2);
+ i0 = i__ - 1;
+/* Computing MIN */
+ i__1 = *n, i__2 = i__ + *ka;
+ i1 = min(i__1,i__2);
+ i2 = i__ - kbt + ka1;
+ if (i__ < m + 1) {
+ update = FALSE_;
+ ++i__;
+ i0 = m;
+ if (*ka == 0) {
+ goto L480;
+ }
+ goto L10;
+ }
+ } else {
+ i__ += *ka;
+ if (i__ > *n - 1) {
+ goto L480;
+ }
+ }
+
+ if (upper) {
+
+/* Transform A, working with the upper triangle */
+
+ if (update) {
+
+/* Form inv(S(i))**H * A * inv(S(i)) */
+
+ i__1 = kb1 + i__ * bb_dim1;
+ bii = bb[i__1].r;
+ i__1 = ka1 + i__ * ab_dim1;
+ i__2 = ka1 + i__ * ab_dim1;
+ d__1 = ab[i__2].r / bii / bii;
+ ab[i__1].r = d__1, ab[i__1].i = 0.;
+ i__1 = i1;
+ for (j = i__ + 1; j <= i__1; ++j) {
+ i__2 = i__ - j + ka1 + j * ab_dim1;
+ i__3 = i__ - j + ka1 + j * ab_dim1;
+ z__1.r = ab[i__3].r / bii, z__1.i = ab[i__3].i / bii;
+ ab[i__2].r = z__1.r, ab[i__2].i = z__1.i;
+/* L20: */
+ }
+/* Computing MAX */
+ i__1 = 1, i__2 = i__ - *ka;
+ i__3 = i__ - 1;
+ for (j = max(i__1,i__2); j <= i__3; ++j) {
+ i__1 = j - i__ + ka1 + i__ * ab_dim1;
+ i__2 = j - i__ + ka1 + i__ * ab_dim1;
+ z__1.r = ab[i__2].r / bii, z__1.i = ab[i__2].i / bii;
+ ab[i__1].r = z__1.r, ab[i__1].i = z__1.i;
+/* L30: */
+ }
+ i__3 = i__ - 1;
+ for (k = i__ - kbt; k <= i__3; ++k) {
+ i__1 = k;
+ for (j = i__ - kbt; j <= i__1; ++j) {
+ i__2 = j - k + ka1 + k * ab_dim1;
+ i__4 = j - k + ka1 + k * ab_dim1;
+ i__5 = j - i__ + kb1 + i__ * bb_dim1;
+ d_cnjg(&z__5, &ab[k - i__ + ka1 + i__ * ab_dim1]);
+ z__4.r = bb[i__5].r * z__5.r - bb[i__5].i * z__5.i,
+ z__4.i = bb[i__5].r * z__5.i + bb[i__5].i *
+ z__5.r;
+ z__3.r = ab[i__4].r - z__4.r, z__3.i = ab[i__4].i -
+ z__4.i;
+ d_cnjg(&z__7, &bb[k - i__ + kb1 + i__ * bb_dim1]);
+ i__6 = j - i__ + ka1 + i__ * ab_dim1;
+ z__6.r = z__7.r * ab[i__6].r - z__7.i * ab[i__6].i,
+ z__6.i = z__7.r * ab[i__6].i + z__7.i * ab[i__6]
+ .r;
+ z__2.r = z__3.r - z__6.r, z__2.i = z__3.i - z__6.i;
+ i__7 = ka1 + i__ * ab_dim1;
+ d__1 = ab[i__7].r;
+ i__8 = j - i__ + kb1 + i__ * bb_dim1;
+ z__9.r = d__1 * bb[i__8].r, z__9.i = d__1 * bb[i__8].i;
+ d_cnjg(&z__10, &bb[k - i__ + kb1 + i__ * bb_dim1]);
+ z__8.r = z__9.r * z__10.r - z__9.i * z__10.i, z__8.i =
+ z__9.r * z__10.i + z__9.i * z__10.r;
+ z__1.r = z__2.r + z__8.r, z__1.i = z__2.i + z__8.i;
+ ab[i__2].r = z__1.r, ab[i__2].i = z__1.i;
+/* L40: */
+ }
+/* Computing MAX */
+ i__1 = 1, i__2 = i__ - *ka;
+ i__4 = i__ - kbt - 1;
+ for (j = max(i__1,i__2); j <= i__4; ++j) {
+ i__1 = j - k + ka1 + k * ab_dim1;
+ i__2 = j - k + ka1 + k * ab_dim1;
+ d_cnjg(&z__3, &bb[k - i__ + kb1 + i__ * bb_dim1]);
+ i__5 = j - i__ + ka1 + i__ * ab_dim1;
+ z__2.r = z__3.r * ab[i__5].r - z__3.i * ab[i__5].i,
+ z__2.i = z__3.r * ab[i__5].i + z__3.i * ab[i__5]
+ .r;
+ z__1.r = ab[i__2].r - z__2.r, z__1.i = ab[i__2].i -
+ z__2.i;
+ ab[i__1].r = z__1.r, ab[i__1].i = z__1.i;
+/* L50: */
+ }
+/* L60: */
+ }
+ i__3 = i1;
+ for (j = i__; j <= i__3; ++j) {
+/* Computing MAX */
+ i__4 = j - *ka, i__1 = i__ - kbt;
+ i__2 = i__ - 1;
+ for (k = max(i__4,i__1); k <= i__2; ++k) {
+ i__4 = k - j + ka1 + j * ab_dim1;
+ i__1 = k - j + ka1 + j * ab_dim1;
+ i__5 = k - i__ + kb1 + i__ * bb_dim1;
+ i__6 = i__ - j + ka1 + j * ab_dim1;
+ z__2.r = bb[i__5].r * ab[i__6].r - bb[i__5].i * ab[i__6]
+ .i, z__2.i = bb[i__5].r * ab[i__6].i + bb[i__5].i
+ * ab[i__6].r;
+ z__1.r = ab[i__1].r - z__2.r, z__1.i = ab[i__1].i -
+ z__2.i;
+ ab[i__4].r = z__1.r, ab[i__4].i = z__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by inv(S(i)) */
+
+ i__3 = *n - m;
+ d__1 = 1. / bii;
+ zdscal_(&i__3, &d__1, &x[m + 1 + i__ * x_dim1], &c__1);
+ if (kbt > 0) {
+ i__3 = *n - m;
+ z__1.r = -1., z__1.i = -0.;
+ zgerc_(&i__3, &kbt, &z__1, &x[m + 1 + i__ * x_dim1], &
+ c__1, &bb[kb1 - kbt + i__ * bb_dim1], &c__1, &x[m
+ + 1 + (i__ - kbt) * x_dim1], ldx);
+ }
+ }
+
+/* store a(i,i1) in RA1 for use in next loop over K */
+
+ i__3 = i__ - i1 + ka1 + i1 * ab_dim1;
+ ra1.r = ab[i__3].r, ra1.i = ab[i__3].i;
+ }
+
+/* Generate and apply vectors of rotations to chase all the */
+/* existing bulges KA positions down toward the bottom of the */
+/* band */
+
+ i__3 = *kb - 1;
+ for (k = 1; k <= i__3; ++k) {
+ if (update) {
+
+/* Determine the rotations which would annihilate the bulge */
+/* which has in theory just been created */
+
+ if (i__ - k + *ka < *n && i__ - k > 1) {
+
+/* generate rotation to annihilate a(i,i-k+ka+1) */
+
+ zlartg_(&ab[k + 1 + (i__ - k + *ka) * ab_dim1], &ra1, &
+ rwork[i__ - k + *ka - m], &work[i__ - k + *ka - m]
+, &ra);
+
+/* create nonzero element a(i-k,i-k+ka+1) outside the */
+/* band and store it in WORK(i-k) */
+
+ i__2 = kb1 - k + i__ * bb_dim1;
+ z__2.r = -bb[i__2].r, z__2.i = -bb[i__2].i;
+ z__1.r = z__2.r * ra1.r - z__2.i * ra1.i, z__1.i = z__2.r
+ * ra1.i + z__2.i * ra1.r;
+ t.r = z__1.r, t.i = z__1.i;
+ i__2 = i__ - k;
+ i__4 = i__ - k + *ka - m;
+ z__2.r = rwork[i__4] * t.r, z__2.i = rwork[i__4] * t.i;
+ d_cnjg(&z__4, &work[i__ - k + *ka - m]);
+ i__1 = (i__ - k + *ka) * ab_dim1 + 1;
+ z__3.r = z__4.r * ab[i__1].r - z__4.i * ab[i__1].i,
+ z__3.i = z__4.r * ab[i__1].i + z__4.i * ab[i__1]
+ .r;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
+ work[i__2].r = z__1.r, work[i__2].i = z__1.i;
+ i__2 = (i__ - k + *ka) * ab_dim1 + 1;
+ i__4 = i__ - k + *ka - m;
+ z__2.r = work[i__4].r * t.r - work[i__4].i * t.i, z__2.i =
+ work[i__4].r * t.i + work[i__4].i * t.r;
+ i__1 = i__ - k + *ka - m;
+ i__5 = (i__ - k + *ka) * ab_dim1 + 1;
+ z__3.r = rwork[i__1] * ab[i__5].r, z__3.i = rwork[i__1] *
+ ab[i__5].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ ab[i__2].r = z__1.r, ab[i__2].i = z__1.i;
+ ra1.r = ra.r, ra1.i = ra.i;
+ }
+ }
+/* Computing MAX */
+ i__2 = 1, i__4 = k - i0 + 2;
+ j2 = i__ - k - 1 + max(i__2,i__4) * ka1;
+ nr = (*n - j2 + *ka) / ka1;
+ j1 = j2 + (nr - 1) * ka1;
+ if (update) {
+/* Computing MAX */
+ i__2 = j2, i__4 = i__ + (*ka << 1) - k + 1;
+ j2t = max(i__2,i__4);
+ } else {
+ j2t = j2;
+ }
+ nrt = (*n - j2t + *ka) / ka1;
+ i__2 = j1;
+ i__4 = ka1;
+ for (j = j2t; i__4 < 0 ? j >= i__2 : j <= i__2; j += i__4) {
+
+/* create nonzero element a(j-ka,j+1) outside the band */
+/* and store it in WORK(j-m) */
+
+ i__1 = j - m;
+ i__5 = j - m;
+ i__6 = (j + 1) * ab_dim1 + 1;
+ z__1.r = work[i__5].r * ab[i__6].r - work[i__5].i * ab[i__6]
+ .i, z__1.i = work[i__5].r * ab[i__6].i + work[i__5].i
+ * ab[i__6].r;
+ work[i__1].r = z__1.r, work[i__1].i = z__1.i;
+ i__1 = (j + 1) * ab_dim1 + 1;
+ i__5 = j - m;
+ i__6 = (j + 1) * ab_dim1 + 1;
+ z__1.r = rwork[i__5] * ab[i__6].r, z__1.i = rwork[i__5] * ab[
+ i__6].i;
+ ab[i__1].r = z__1.r, ab[i__1].i = z__1.i;
+/* L90: */
+ }
+
+/* generate rotations in 1st set to annihilate elements which */
+/* have been created outside the band */
+
+ if (nrt > 0) {
+ zlargv_(&nrt, &ab[j2t * ab_dim1 + 1], &inca, &work[j2t - m], &
+ ka1, &rwork[j2t - m], &ka1);
+ }
+ if (nr > 0) {
+
+/* apply rotations in 1st set from the right */
+
+ i__4 = *ka - 1;
+ for (l = 1; l <= i__4; ++l) {
+ zlartv_(&nr, &ab[ka1 - l + j2 * ab_dim1], &inca, &ab[*ka
+ - l + (j2 + 1) * ab_dim1], &inca, &rwork[j2 - m],
+ &work[j2 - m], &ka1);
+/* L100: */
+ }
+
+/* apply rotations in 1st set from both sides to diagonal */
+/* blocks */
+
+ zlar2v_(&nr, &ab[ka1 + j2 * ab_dim1], &ab[ka1 + (j2 + 1) *
+ ab_dim1], &ab[*ka + (j2 + 1) * ab_dim1], &inca, &
+ rwork[j2 - m], &work[j2 - m], &ka1);
+
+ zlacgv_(&nr, &work[j2 - m], &ka1);
+ }
+
+/* start applying rotations in 1st set from the left */
+
+ i__4 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__4; --l) {
+ nrt = (*n - j2 + l) / ka1;
+ if (nrt > 0) {
+ zlartv_(&nrt, &ab[l + (j2 + ka1 - l) * ab_dim1], &inca, &
+ ab[l + 1 + (j2 + ka1 - l) * ab_dim1], &inca, &
+ rwork[j2 - m], &work[j2 - m], &ka1);
+ }
+/* L110: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 1st set */
+
+ i__4 = j1;
+ i__2 = ka1;
+ for (j = j2; i__2 < 0 ? j >= i__4 : j <= i__4; j += i__2) {
+ i__1 = *n - m;
+ d_cnjg(&z__1, &work[j - m]);
+ zrot_(&i__1, &x[m + 1 + j * x_dim1], &c__1, &x[m + 1 + (j
+ + 1) * x_dim1], &c__1, &rwork[j - m], &z__1);
+/* L120: */
+ }
+ }
+/* L130: */
+ }
+
+ if (update) {
+ if (i2 <= *n && kbt > 0) {
+
+/* create nonzero element a(i-kbt,i-kbt+ka+1) outside the */
+/* band and store it in WORK(i-kbt) */
+
+ i__3 = i__ - kbt;
+ i__2 = kb1 - kbt + i__ * bb_dim1;
+ z__2.r = -bb[i__2].r, z__2.i = -bb[i__2].i;
+ z__1.r = z__2.r * ra1.r - z__2.i * ra1.i, z__1.i = z__2.r *
+ ra1.i + z__2.i * ra1.r;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+ }
+ }
+
+ for (k = *kb; k >= 1; --k) {
+ if (update) {
+/* Computing MAX */
+ i__3 = 2, i__2 = k - i0 + 1;
+ j2 = i__ - k - 1 + max(i__3,i__2) * ka1;
+ } else {
+/* Computing MAX */
+ i__3 = 1, i__2 = k - i0 + 1;
+ j2 = i__ - k - 1 + max(i__3,i__2) * ka1;
+ }
+
+/* finish applying rotations in 2nd set from the left */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (*n - j2 + *ka + l) / ka1;
+ if (nrt > 0) {
+ zlartv_(&nrt, &ab[l + (j2 - l + 1) * ab_dim1], &inca, &ab[
+ l + 1 + (j2 - l + 1) * ab_dim1], &inca, &rwork[j2
+ - *ka], &work[j2 - *ka], &ka1);
+ }
+/* L140: */
+ }
+ nr = (*n - j2 + *ka) / ka1;
+ j1 = j2 + (nr - 1) * ka1;
+ i__3 = j2;
+ i__2 = -ka1;
+ for (j = j1; i__2 < 0 ? j >= i__3 : j <= i__3; j += i__2) {
+ i__4 = j;
+ i__1 = j - *ka;
+ work[i__4].r = work[i__1].r, work[i__4].i = work[i__1].i;
+ rwork[j] = rwork[j - *ka];
+/* L150: */
+ }
+ i__2 = j1;
+ i__3 = ka1;
+ for (j = j2; i__3 < 0 ? j >= i__2 : j <= i__2; j += i__3) {
+
+/* create nonzero element a(j-ka,j+1) outside the band */
+/* and store it in WORK(j) */
+
+ i__4 = j;
+ i__1 = j;
+ i__5 = (j + 1) * ab_dim1 + 1;
+ z__1.r = work[i__1].r * ab[i__5].r - work[i__1].i * ab[i__5]
+ .i, z__1.i = work[i__1].r * ab[i__5].i + work[i__1].i
+ * ab[i__5].r;
+ work[i__4].r = z__1.r, work[i__4].i = z__1.i;
+ i__4 = (j + 1) * ab_dim1 + 1;
+ i__1 = j;
+ i__5 = (j + 1) * ab_dim1 + 1;
+ z__1.r = rwork[i__1] * ab[i__5].r, z__1.i = rwork[i__1] * ab[
+ i__5].i;
+ ab[i__4].r = z__1.r, ab[i__4].i = z__1.i;
+/* L160: */
+ }
+ if (update) {
+ if (i__ - k < *n - *ka && k <= kbt) {
+ i__3 = i__ - k + *ka;
+ i__2 = i__ - k;
+ work[i__3].r = work[i__2].r, work[i__3].i = work[i__2].i;
+ }
+ }
+/* L170: */
+ }
+
+ for (k = *kb; k >= 1; --k) {
+/* Computing MAX */
+ i__3 = 1, i__2 = k - i0 + 1;
+ j2 = i__ - k - 1 + max(i__3,i__2) * ka1;
+ nr = (*n - j2 + *ka) / ka1;
+ j1 = j2 + (nr - 1) * ka1;
+ if (nr > 0) {
+
+/* generate rotations in 2nd set to annihilate elements */
+/* which have been created outside the band */
+
+ zlargv_(&nr, &ab[j2 * ab_dim1 + 1], &inca, &work[j2], &ka1, &
+ rwork[j2], &ka1);
+
+/* apply rotations in 2nd set from the right */
+
+ i__3 = *ka - 1;
+ for (l = 1; l <= i__3; ++l) {
+ zlartv_(&nr, &ab[ka1 - l + j2 * ab_dim1], &inca, &ab[*ka
+ - l + (j2 + 1) * ab_dim1], &inca, &rwork[j2], &
+ work[j2], &ka1);
+/* L180: */
+ }
+
+/* apply rotations in 2nd set from both sides to diagonal */
+/* blocks */
+
+ zlar2v_(&nr, &ab[ka1 + j2 * ab_dim1], &ab[ka1 + (j2 + 1) *
+ ab_dim1], &ab[*ka + (j2 + 1) * ab_dim1], &inca, &
+ rwork[j2], &work[j2], &ka1);
+
+ zlacgv_(&nr, &work[j2], &ka1);
+ }
+
+/* start applying rotations in 2nd set from the left */
+
+ i__3 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__3; --l) {
+ nrt = (*n - j2 + l) / ka1;
+ if (nrt > 0) {
+ zlartv_(&nrt, &ab[l + (j2 + ka1 - l) * ab_dim1], &inca, &
+ ab[l + 1 + (j2 + ka1 - l) * ab_dim1], &inca, &
+ rwork[j2], &work[j2], &ka1);
+ }
+/* L190: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 2nd set */
+
+ i__3 = j1;
+ i__2 = ka1;
+ for (j = j2; i__2 < 0 ? j >= i__3 : j <= i__3; j += i__2) {
+ i__4 = *n - m;
+ d_cnjg(&z__1, &work[j]);
+ zrot_(&i__4, &x[m + 1 + j * x_dim1], &c__1, &x[m + 1 + (j
+ + 1) * x_dim1], &c__1, &rwork[j], &z__1);
+/* L200: */
+ }
+ }
+/* L210: */
+ }
+
+ i__2 = *kb - 1;
+ for (k = 1; k <= i__2; ++k) {
+/* Computing MAX */
+ i__3 = 1, i__4 = k - i0 + 2;
+ j2 = i__ - k - 1 + max(i__3,i__4) * ka1;
+
+/* finish applying rotations in 1st set from the left */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (*n - j2 + l) / ka1;
+ if (nrt > 0) {
+ zlartv_(&nrt, &ab[l + (j2 + ka1 - l) * ab_dim1], &inca, &
+ ab[l + 1 + (j2 + ka1 - l) * ab_dim1], &inca, &
+ rwork[j2 - m], &work[j2 - m], &ka1);
+ }
+/* L220: */
+ }
+/* L230: */
+ }
+
+ if (*kb > 1) {
+ i__2 = i2 + *ka;
+ for (j = *n - 1; j >= i__2; --j) {
+ rwork[j - m] = rwork[j - *ka - m];
+ i__3 = j - m;
+ i__4 = j - *ka - m;
+ work[i__3].r = work[i__4].r, work[i__3].i = work[i__4].i;
+/* L240: */
+ }
+ }
+
+ } else {
+
+/* Transform A, working with the lower triangle */
+
+ if (update) {
+
+/* Form inv(S(i))**H * A * inv(S(i)) */
+
+ i__2 = i__ * bb_dim1 + 1;
+ bii = bb[i__2].r;
+ i__2 = i__ * ab_dim1 + 1;
+ i__3 = i__ * ab_dim1 + 1;
+ d__1 = ab[i__3].r / bii / bii;
+ ab[i__2].r = d__1, ab[i__2].i = 0.;
+ i__2 = i1;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ i__3 = j - i__ + 1 + i__ * ab_dim1;
+ i__4 = j - i__ + 1 + i__ * ab_dim1;
+ z__1.r = ab[i__4].r / bii, z__1.i = ab[i__4].i / bii;
+ ab[i__3].r = z__1.r, ab[i__3].i = z__1.i;
+/* L250: */
+ }
+/* Computing MAX */
+ i__2 = 1, i__3 = i__ - *ka;
+ i__4 = i__ - 1;
+ for (j = max(i__2,i__3); j <= i__4; ++j) {
+ i__2 = i__ - j + 1 + j * ab_dim1;
+ i__3 = i__ - j + 1 + j * ab_dim1;
+ z__1.r = ab[i__3].r / bii, z__1.i = ab[i__3].i / bii;
+ ab[i__2].r = z__1.r, ab[i__2].i = z__1.i;
+/* L260: */
+ }
+ i__4 = i__ - 1;
+ for (k = i__ - kbt; k <= i__4; ++k) {
+ i__2 = k;
+ for (j = i__ - kbt; j <= i__2; ++j) {
+ i__3 = k - j + 1 + j * ab_dim1;
+ i__1 = k - j + 1 + j * ab_dim1;
+ i__5 = i__ - j + 1 + j * bb_dim1;
+ d_cnjg(&z__5, &ab[i__ - k + 1 + k * ab_dim1]);
+ z__4.r = bb[i__5].r * z__5.r - bb[i__5].i * z__5.i,
+ z__4.i = bb[i__5].r * z__5.i + bb[i__5].i *
+ z__5.r;
+ z__3.r = ab[i__1].r - z__4.r, z__3.i = ab[i__1].i -
+ z__4.i;
+ d_cnjg(&z__7, &bb[i__ - k + 1 + k * bb_dim1]);
+ i__6 = i__ - j + 1 + j * ab_dim1;
+ z__6.r = z__7.r * ab[i__6].r - z__7.i * ab[i__6].i,
+ z__6.i = z__7.r * ab[i__6].i + z__7.i * ab[i__6]
+ .r;
+ z__2.r = z__3.r - z__6.r, z__2.i = z__3.i - z__6.i;
+ i__7 = i__ * ab_dim1 + 1;
+ d__1 = ab[i__7].r;
+ i__8 = i__ - j + 1 + j * bb_dim1;
+ z__9.r = d__1 * bb[i__8].r, z__9.i = d__1 * bb[i__8].i;
+ d_cnjg(&z__10, &bb[i__ - k + 1 + k * bb_dim1]);
+ z__8.r = z__9.r * z__10.r - z__9.i * z__10.i, z__8.i =
+ z__9.r * z__10.i + z__9.i * z__10.r;
+ z__1.r = z__2.r + z__8.r, z__1.i = z__2.i + z__8.i;
+ ab[i__3].r = z__1.r, ab[i__3].i = z__1.i;
+/* L270: */
+ }
+/* Computing MAX */
+ i__2 = 1, i__3 = i__ - *ka;
+ i__1 = i__ - kbt - 1;
+ for (j = max(i__2,i__3); j <= i__1; ++j) {
+ i__2 = k - j + 1 + j * ab_dim1;
+ i__3 = k - j + 1 + j * ab_dim1;
+ d_cnjg(&z__3, &bb[i__ - k + 1 + k * bb_dim1]);
+ i__5 = i__ - j + 1 + j * ab_dim1;
+ z__2.r = z__3.r * ab[i__5].r - z__3.i * ab[i__5].i,
+ z__2.i = z__3.r * ab[i__5].i + z__3.i * ab[i__5]
+ .r;
+ z__1.r = ab[i__3].r - z__2.r, z__1.i = ab[i__3].i -
+ z__2.i;
+ ab[i__2].r = z__1.r, ab[i__2].i = z__1.i;
+/* L280: */
+ }
+/* L290: */
+ }
+ i__4 = i1;
+ for (j = i__; j <= i__4; ++j) {
+/* Computing MAX */
+ i__1 = j - *ka, i__2 = i__ - kbt;
+ i__3 = i__ - 1;
+ for (k = max(i__1,i__2); k <= i__3; ++k) {
+ i__1 = j - k + 1 + k * ab_dim1;
+ i__2 = j - k + 1 + k * ab_dim1;
+ i__5 = i__ - k + 1 + k * bb_dim1;
+ i__6 = j - i__ + 1 + i__ * ab_dim1;
+ z__2.r = bb[i__5].r * ab[i__6].r - bb[i__5].i * ab[i__6]
+ .i, z__2.i = bb[i__5].r * ab[i__6].i + bb[i__5].i
+ * ab[i__6].r;
+ z__1.r = ab[i__2].r - z__2.r, z__1.i = ab[i__2].i -
+ z__2.i;
+ ab[i__1].r = z__1.r, ab[i__1].i = z__1.i;
+/* L300: */
+ }
+/* L310: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by inv(S(i)) */
+
+ i__4 = *n - m;
+ d__1 = 1. / bii;
+ zdscal_(&i__4, &d__1, &x[m + 1 + i__ * x_dim1], &c__1);
+ if (kbt > 0) {
+ i__4 = *n - m;
+ z__1.r = -1., z__1.i = -0.;
+ i__3 = *ldbb - 1;
+ zgeru_(&i__4, &kbt, &z__1, &x[m + 1 + i__ * x_dim1], &
+ c__1, &bb[kbt + 1 + (i__ - kbt) * bb_dim1], &i__3,
+ &x[m + 1 + (i__ - kbt) * x_dim1], ldx);
+ }
+ }
+
+/* store a(i1,i) in RA1 for use in next loop over K */
+
+ i__4 = i1 - i__ + 1 + i__ * ab_dim1;
+ ra1.r = ab[i__4].r, ra1.i = ab[i__4].i;
+ }
+
+/* Generate and apply vectors of rotations to chase all the */
+/* existing bulges KA positions down toward the bottom of the */
+/* band */
+
+ i__4 = *kb - 1;
+ for (k = 1; k <= i__4; ++k) {
+ if (update) {
+
+/* Determine the rotations which would annihilate the bulge */
+/* which has in theory just been created */
+
+ if (i__ - k + *ka < *n && i__ - k > 1) {
+
+/* generate rotation to annihilate a(i-k+ka+1,i) */
+
+ zlartg_(&ab[ka1 - k + i__ * ab_dim1], &ra1, &rwork[i__ -
+ k + *ka - m], &work[i__ - k + *ka - m], &ra);
+
+/* create nonzero element a(i-k+ka+1,i-k) outside the */
+/* band and store it in WORK(i-k) */
+
+ i__3 = k + 1 + (i__ - k) * bb_dim1;
+ z__2.r = -bb[i__3].r, z__2.i = -bb[i__3].i;
+ z__1.r = z__2.r * ra1.r - z__2.i * ra1.i, z__1.i = z__2.r
+ * ra1.i + z__2.i * ra1.r;
+ t.r = z__1.r, t.i = z__1.i;
+ i__3 = i__ - k;
+ i__1 = i__ - k + *ka - m;
+ z__2.r = rwork[i__1] * t.r, z__2.i = rwork[i__1] * t.i;
+ d_cnjg(&z__4, &work[i__ - k + *ka - m]);
+ i__2 = ka1 + (i__ - k) * ab_dim1;
+ z__3.r = z__4.r * ab[i__2].r - z__4.i * ab[i__2].i,
+ z__3.i = z__4.r * ab[i__2].i + z__4.i * ab[i__2]
+ .r;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+ i__3 = ka1 + (i__ - k) * ab_dim1;
+ i__1 = i__ - k + *ka - m;
+ z__2.r = work[i__1].r * t.r - work[i__1].i * t.i, z__2.i =
+ work[i__1].r * t.i + work[i__1].i * t.r;
+ i__2 = i__ - k + *ka - m;
+ i__5 = ka1 + (i__ - k) * ab_dim1;
+ z__3.r = rwork[i__2] * ab[i__5].r, z__3.i = rwork[i__2] *
+ ab[i__5].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ ab[i__3].r = z__1.r, ab[i__3].i = z__1.i;
+ ra1.r = ra.r, ra1.i = ra.i;
+ }
+ }
+/* Computing MAX */
+ i__3 = 1, i__1 = k - i0 + 2;
+ j2 = i__ - k - 1 + max(i__3,i__1) * ka1;
+ nr = (*n - j2 + *ka) / ka1;
+ j1 = j2 + (nr - 1) * ka1;
+ if (update) {
+/* Computing MAX */
+ i__3 = j2, i__1 = i__ + (*ka << 1) - k + 1;
+ j2t = max(i__3,i__1);
+ } else {
+ j2t = j2;
+ }
+ nrt = (*n - j2t + *ka) / ka1;
+ i__3 = j1;
+ i__1 = ka1;
+ for (j = j2t; i__1 < 0 ? j >= i__3 : j <= i__3; j += i__1) {
+
+/* create nonzero element a(j+1,j-ka) outside the band */
+/* and store it in WORK(j-m) */
+
+ i__2 = j - m;
+ i__5 = j - m;
+ i__6 = ka1 + (j - *ka + 1) * ab_dim1;
+ z__1.r = work[i__5].r * ab[i__6].r - work[i__5].i * ab[i__6]
+ .i, z__1.i = work[i__5].r * ab[i__6].i + work[i__5].i
+ * ab[i__6].r;
+ work[i__2].r = z__1.r, work[i__2].i = z__1.i;
+ i__2 = ka1 + (j - *ka + 1) * ab_dim1;
+ i__5 = j - m;
+ i__6 = ka1 + (j - *ka + 1) * ab_dim1;
+ z__1.r = rwork[i__5] * ab[i__6].r, z__1.i = rwork[i__5] * ab[
+ i__6].i;
+ ab[i__2].r = z__1.r, ab[i__2].i = z__1.i;
+/* L320: */
+ }
+
+/* generate rotations in 1st set to annihilate elements which */
+/* have been created outside the band */
+
+ if (nrt > 0) {
+ zlargv_(&nrt, &ab[ka1 + (j2t - *ka) * ab_dim1], &inca, &work[
+ j2t - m], &ka1, &rwork[j2t - m], &ka1);
+ }
+ if (nr > 0) {
+
+/* apply rotations in 1st set from the left */
+
+ i__1 = *ka - 1;
+ for (l = 1; l <= i__1; ++l) {
+ zlartv_(&nr, &ab[l + 1 + (j2 - l) * ab_dim1], &inca, &ab[
+ l + 2 + (j2 - l) * ab_dim1], &inca, &rwork[j2 - m]
+, &work[j2 - m], &ka1);
+/* L330: */
+ }
+
+/* apply rotations in 1st set from both sides to diagonal */
+/* blocks */
+
+ zlar2v_(&nr, &ab[j2 * ab_dim1 + 1], &ab[(j2 + 1) * ab_dim1 +
+ 1], &ab[j2 * ab_dim1 + 2], &inca, &rwork[j2 - m], &
+ work[j2 - m], &ka1);
+
+ zlacgv_(&nr, &work[j2 - m], &ka1);
+ }
+
+/* start applying rotations in 1st set from the right */
+
+ i__1 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__1; --l) {
+ nrt = (*n - j2 + l) / ka1;
+ if (nrt > 0) {
+ zlartv_(&nrt, &ab[ka1 - l + 1 + j2 * ab_dim1], &inca, &ab[
+ ka1 - l + (j2 + 1) * ab_dim1], &inca, &rwork[j2 -
+ m], &work[j2 - m], &ka1);
+ }
+/* L340: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 1st set */
+
+ i__1 = j1;
+ i__3 = ka1;
+ for (j = j2; i__3 < 0 ? j >= i__1 : j <= i__1; j += i__3) {
+ i__2 = *n - m;
+ zrot_(&i__2, &x[m + 1 + j * x_dim1], &c__1, &x[m + 1 + (j
+ + 1) * x_dim1], &c__1, &rwork[j - m], &work[j - m]
+);
+/* L350: */
+ }
+ }
+/* L360: */
+ }
+
+ if (update) {
+ if (i2 <= *n && kbt > 0) {
+
+/* create nonzero element a(i-kbt+ka+1,i-kbt) outside the */
+/* band and store it in WORK(i-kbt) */
+
+ i__4 = i__ - kbt;
+ i__3 = kbt + 1 + (i__ - kbt) * bb_dim1;
+ z__2.r = -bb[i__3].r, z__2.i = -bb[i__3].i;
+ z__1.r = z__2.r * ra1.r - z__2.i * ra1.i, z__1.i = z__2.r *
+ ra1.i + z__2.i * ra1.r;
+ work[i__4].r = z__1.r, work[i__4].i = z__1.i;
+ }
+ }
+
+ for (k = *kb; k >= 1; --k) {
+ if (update) {
+/* Computing MAX */
+ i__4 = 2, i__3 = k - i0 + 1;
+ j2 = i__ - k - 1 + max(i__4,i__3) * ka1;
+ } else {
+/* Computing MAX */
+ i__4 = 1, i__3 = k - i0 + 1;
+ j2 = i__ - k - 1 + max(i__4,i__3) * ka1;
+ }
+
+/* finish applying rotations in 2nd set from the right */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (*n - j2 + *ka + l) / ka1;
+ if (nrt > 0) {
+ zlartv_(&nrt, &ab[ka1 - l + 1 + (j2 - *ka) * ab_dim1], &
+ inca, &ab[ka1 - l + (j2 - *ka + 1) * ab_dim1], &
+ inca, &rwork[j2 - *ka], &work[j2 - *ka], &ka1);
+ }
+/* L370: */
+ }
+ nr = (*n - j2 + *ka) / ka1;
+ j1 = j2 + (nr - 1) * ka1;
+ i__4 = j2;
+ i__3 = -ka1;
+ for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) {
+ i__1 = j;
+ i__2 = j - *ka;
+ work[i__1].r = work[i__2].r, work[i__1].i = work[i__2].i;
+ rwork[j] = rwork[j - *ka];
+/* L380: */
+ }
+ i__3 = j1;
+ i__4 = ka1;
+ for (j = j2; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {
+
+/* create nonzero element a(j+1,j-ka) outside the band */
+/* and store it in WORK(j) */
+
+ i__1 = j;
+ i__2 = j;
+ i__5 = ka1 + (j - *ka + 1) * ab_dim1;
+ z__1.r = work[i__2].r * ab[i__5].r - work[i__2].i * ab[i__5]
+ .i, z__1.i = work[i__2].r * ab[i__5].i + work[i__2].i
+ * ab[i__5].r;
+ work[i__1].r = z__1.r, work[i__1].i = z__1.i;
+ i__1 = ka1 + (j - *ka + 1) * ab_dim1;
+ i__2 = j;
+ i__5 = ka1 + (j - *ka + 1) * ab_dim1;
+ z__1.r = rwork[i__2] * ab[i__5].r, z__1.i = rwork[i__2] * ab[
+ i__5].i;
+ ab[i__1].r = z__1.r, ab[i__1].i = z__1.i;
+/* L390: */
+ }
+ if (update) {
+ if (i__ - k < *n - *ka && k <= kbt) {
+ i__4 = i__ - k + *ka;
+ i__3 = i__ - k;
+ work[i__4].r = work[i__3].r, work[i__4].i = work[i__3].i;
+ }
+ }
+/* L400: */
+ }
+
+ for (k = *kb; k >= 1; --k) {
+/* Computing MAX */
+ i__4 = 1, i__3 = k - i0 + 1;
+ j2 = i__ - k - 1 + max(i__4,i__3) * ka1;
+ nr = (*n - j2 + *ka) / ka1;
+ j1 = j2 + (nr - 1) * ka1;
+ if (nr > 0) {
+
+/* generate rotations in 2nd set to annihilate elements */
+/* which have been created outside the band */
+
+ zlargv_(&nr, &ab[ka1 + (j2 - *ka) * ab_dim1], &inca, &work[j2]
+, &ka1, &rwork[j2], &ka1);
+
+/* apply rotations in 2nd set from the left */
+
+ i__4 = *ka - 1;
+ for (l = 1; l <= i__4; ++l) {
+ zlartv_(&nr, &ab[l + 1 + (j2 - l) * ab_dim1], &inca, &ab[
+ l + 2 + (j2 - l) * ab_dim1], &inca, &rwork[j2], &
+ work[j2], &ka1);
+/* L410: */
+ }
+
+/* apply rotations in 2nd set from both sides to diagonal */
+/* blocks */
+
+ zlar2v_(&nr, &ab[j2 * ab_dim1 + 1], &ab[(j2 + 1) * ab_dim1 +
+ 1], &ab[j2 * ab_dim1 + 2], &inca, &rwork[j2], &work[
+ j2], &ka1);
+
+ zlacgv_(&nr, &work[j2], &ka1);
+ }
+
+/* start applying rotations in 2nd set from the right */
+
+ i__4 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__4; --l) {
+ nrt = (*n - j2 + l) / ka1;
+ if (nrt > 0) {
+ zlartv_(&nrt, &ab[ka1 - l + 1 + j2 * ab_dim1], &inca, &ab[
+ ka1 - l + (j2 + 1) * ab_dim1], &inca, &rwork[j2],
+ &work[j2], &ka1);
+ }
+/* L420: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 2nd set */
+
+ i__4 = j1;
+ i__3 = ka1;
+ for (j = j2; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) {
+ i__1 = *n - m;
+ zrot_(&i__1, &x[m + 1 + j * x_dim1], &c__1, &x[m + 1 + (j
+ + 1) * x_dim1], &c__1, &rwork[j], &work[j]);
+/* L430: */
+ }
+ }
+/* L440: */
+ }
+
+ i__3 = *kb - 1;
+ for (k = 1; k <= i__3; ++k) {
+/* Computing MAX */
+ i__4 = 1, i__1 = k - i0 + 2;
+ j2 = i__ - k - 1 + max(i__4,i__1) * ka1;
+
+/* finish applying rotations in 1st set from the right */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (*n - j2 + l) / ka1;
+ if (nrt > 0) {
+ zlartv_(&nrt, &ab[ka1 - l + 1 + j2 * ab_dim1], &inca, &ab[
+ ka1 - l + (j2 + 1) * ab_dim1], &inca, &rwork[j2 -
+ m], &work[j2 - m], &ka1);
+ }
+/* L450: */
+ }
+/* L460: */
+ }
+
+ if (*kb > 1) {
+ i__3 = i2 + *ka;
+ for (j = *n - 1; j >= i__3; --j) {
+ rwork[j - m] = rwork[j - *ka - m];
+ i__4 = j - m;
+ i__1 = j - *ka - m;
+ work[i__4].r = work[i__1].r, work[i__4].i = work[i__1].i;
+/* L470: */
+ }
+ }
+
+ }
+
+ goto L10;
+
+L480:
+
+/* **************************** Phase 2 ***************************** */
+
+/* The logical structure of this phase is: */
+
+/* UPDATE = .TRUE. */
+/* DO I = 1, M */
+/* use S(i) to update A and create a new bulge */
+/* apply rotations to push all bulges KA positions upward */
+/* END DO */
+/* UPDATE = .FALSE. */
+/* DO I = M - KA - 1, 2, -1 */
+/* apply rotations to push all bulges KA positions upward */
+/* END DO */
+
+/* To avoid duplicating code, the two loops are merged. */
+
+ update = TRUE_;
+ i__ = 0;
+L490:
+ if (update) {
+ ++i__;
+/* Computing MIN */
+ i__3 = *kb, i__4 = m - i__;
+ kbt = min(i__3,i__4);
+ i0 = i__ + 1;
+/* Computing MAX */
+ i__3 = 1, i__4 = i__ - *ka;
+ i1 = max(i__3,i__4);
+ i2 = i__ + kbt - ka1;
+ if (i__ > m) {
+ update = FALSE_;
+ --i__;
+ i0 = m + 1;
+ if (*ka == 0) {
+ return 0;
+ }
+ goto L490;
+ }
+ } else {
+ i__ -= *ka;
+ if (i__ < 2) {
+ return 0;
+ }
+ }
+
+ if (i__ < m - kbt) {
+ nx = m;
+ } else {
+ nx = *n;
+ }
+
+ if (upper) {
+
+/* Transform A, working with the upper triangle */
+
+ if (update) {
+
+/* Form inv(S(i))**H * A * inv(S(i)) */
+
+ i__3 = kb1 + i__ * bb_dim1;
+ bii = bb[i__3].r;
+ i__3 = ka1 + i__ * ab_dim1;
+ i__4 = ka1 + i__ * ab_dim1;
+ d__1 = ab[i__4].r / bii / bii;
+ ab[i__3].r = d__1, ab[i__3].i = 0.;
+ i__3 = i__ - 1;
+ for (j = i1; j <= i__3; ++j) {
+ i__4 = j - i__ + ka1 + i__ * ab_dim1;
+ i__1 = j - i__ + ka1 + i__ * ab_dim1;
+ z__1.r = ab[i__1].r / bii, z__1.i = ab[i__1].i / bii;
+ ab[i__4].r = z__1.r, ab[i__4].i = z__1.i;
+/* L500: */
+ }
+/* Computing MIN */
+ i__4 = *n, i__1 = i__ + *ka;
+ i__3 = min(i__4,i__1);
+ for (j = i__ + 1; j <= i__3; ++j) {
+ i__4 = i__ - j + ka1 + j * ab_dim1;
+ i__1 = i__ - j + ka1 + j * ab_dim1;
+ z__1.r = ab[i__1].r / bii, z__1.i = ab[i__1].i / bii;
+ ab[i__4].r = z__1.r, ab[i__4].i = z__1.i;
+/* L510: */
+ }
+ i__3 = i__ + kbt;
+ for (k = i__ + 1; k <= i__3; ++k) {
+ i__4 = i__ + kbt;
+ for (j = k; j <= i__4; ++j) {
+ i__1 = k - j + ka1 + j * ab_dim1;
+ i__2 = k - j + ka1 + j * ab_dim1;
+ i__5 = i__ - j + kb1 + j * bb_dim1;
+ d_cnjg(&z__5, &ab[i__ - k + ka1 + k * ab_dim1]);
+ z__4.r = bb[i__5].r * z__5.r - bb[i__5].i * z__5.i,
+ z__4.i = bb[i__5].r * z__5.i + bb[i__5].i *
+ z__5.r;
+ z__3.r = ab[i__2].r - z__4.r, z__3.i = ab[i__2].i -
+ z__4.i;
+ d_cnjg(&z__7, &bb[i__ - k + kb1 + k * bb_dim1]);
+ i__6 = i__ - j + ka1 + j * ab_dim1;
+ z__6.r = z__7.r * ab[i__6].r - z__7.i * ab[i__6].i,
+ z__6.i = z__7.r * ab[i__6].i + z__7.i * ab[i__6]
+ .r;
+ z__2.r = z__3.r - z__6.r, z__2.i = z__3.i - z__6.i;
+ i__7 = ka1 + i__ * ab_dim1;
+ d__1 = ab[i__7].r;
+ i__8 = i__ - j + kb1 + j * bb_dim1;
+ z__9.r = d__1 * bb[i__8].r, z__9.i = d__1 * bb[i__8].i;
+ d_cnjg(&z__10, &bb[i__ - k + kb1 + k * bb_dim1]);
+ z__8.r = z__9.r * z__10.r - z__9.i * z__10.i, z__8.i =
+ z__9.r * z__10.i + z__9.i * z__10.r;
+ z__1.r = z__2.r + z__8.r, z__1.i = z__2.i + z__8.i;
+ ab[i__1].r = z__1.r, ab[i__1].i = z__1.i;
+/* L520: */
+ }
+/* Computing MIN */
+ i__1 = *n, i__2 = i__ + *ka;
+ i__4 = min(i__1,i__2);
+ for (j = i__ + kbt + 1; j <= i__4; ++j) {
+ i__1 = k - j + ka1 + j * ab_dim1;
+ i__2 = k - j + ka1 + j * ab_dim1;
+ d_cnjg(&z__3, &bb[i__ - k + kb1 + k * bb_dim1]);
+ i__5 = i__ - j + ka1 + j * ab_dim1;
+ z__2.r = z__3.r * ab[i__5].r - z__3.i * ab[i__5].i,
+ z__2.i = z__3.r * ab[i__5].i + z__3.i * ab[i__5]
+ .r;
+ z__1.r = ab[i__2].r - z__2.r, z__1.i = ab[i__2].i -
+ z__2.i;
+ ab[i__1].r = z__1.r, ab[i__1].i = z__1.i;
+/* L530: */
+ }
+/* L540: */
+ }
+ i__3 = i__;
+ for (j = i1; j <= i__3; ++j) {
+/* Computing MIN */
+ i__1 = j + *ka, i__2 = i__ + kbt;
+ i__4 = min(i__1,i__2);
+ for (k = i__ + 1; k <= i__4; ++k) {
+ i__1 = j - k + ka1 + k * ab_dim1;
+ i__2 = j - k + ka1 + k * ab_dim1;
+ i__5 = i__ - k + kb1 + k * bb_dim1;
+ i__6 = j - i__ + ka1 + i__ * ab_dim1;
+ z__2.r = bb[i__5].r * ab[i__6].r - bb[i__5].i * ab[i__6]
+ .i, z__2.i = bb[i__5].r * ab[i__6].i + bb[i__5].i
+ * ab[i__6].r;
+ z__1.r = ab[i__2].r - z__2.r, z__1.i = ab[i__2].i -
+ z__2.i;
+ ab[i__1].r = z__1.r, ab[i__1].i = z__1.i;
+/* L550: */
+ }
+/* L560: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by inv(S(i)) */
+
+ d__1 = 1. / bii;
+ zdscal_(&nx, &d__1, &x[i__ * x_dim1 + 1], &c__1);
+ if (kbt > 0) {
+ z__1.r = -1., z__1.i = -0.;
+ i__3 = *ldbb - 1;
+ zgeru_(&nx, &kbt, &z__1, &x[i__ * x_dim1 + 1], &c__1, &bb[
+ *kb + (i__ + 1) * bb_dim1], &i__3, &x[(i__ + 1) *
+ x_dim1 + 1], ldx);
+ }
+ }
+
+/* store a(i1,i) in RA1 for use in next loop over K */
+
+ i__3 = i1 - i__ + ka1 + i__ * ab_dim1;
+ ra1.r = ab[i__3].r, ra1.i = ab[i__3].i;
+ }
+
+/* Generate and apply vectors of rotations to chase all the */
+/* existing bulges KA positions up toward the top of the band */
+
+ i__3 = *kb - 1;
+ for (k = 1; k <= i__3; ++k) {
+ if (update) {
+
+/* Determine the rotations which would annihilate the bulge */
+/* which has in theory just been created */
+
+ if (i__ + k - ka1 > 0 && i__ + k < m) {
+
+/* generate rotation to annihilate a(i+k-ka-1,i) */
+
+ zlartg_(&ab[k + 1 + i__ * ab_dim1], &ra1, &rwork[i__ + k
+ - *ka], &work[i__ + k - *ka], &ra);
+
+/* create nonzero element a(i+k-ka-1,i+k) outside the */
+/* band and store it in WORK(m-kb+i+k) */
+
+ i__4 = kb1 - k + (i__ + k) * bb_dim1;
+ z__2.r = -bb[i__4].r, z__2.i = -bb[i__4].i;
+ z__1.r = z__2.r * ra1.r - z__2.i * ra1.i, z__1.i = z__2.r
+ * ra1.i + z__2.i * ra1.r;
+ t.r = z__1.r, t.i = z__1.i;
+ i__4 = m - *kb + i__ + k;
+ i__1 = i__ + k - *ka;
+ z__2.r = rwork[i__1] * t.r, z__2.i = rwork[i__1] * t.i;
+ d_cnjg(&z__4, &work[i__ + k - *ka]);
+ i__2 = (i__ + k) * ab_dim1 + 1;
+ z__3.r = z__4.r * ab[i__2].r - z__4.i * ab[i__2].i,
+ z__3.i = z__4.r * ab[i__2].i + z__4.i * ab[i__2]
+ .r;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
+ work[i__4].r = z__1.r, work[i__4].i = z__1.i;
+ i__4 = (i__ + k) * ab_dim1 + 1;
+ i__1 = i__ + k - *ka;
+ z__2.r = work[i__1].r * t.r - work[i__1].i * t.i, z__2.i =
+ work[i__1].r * t.i + work[i__1].i * t.r;
+ i__2 = i__ + k - *ka;
+ i__5 = (i__ + k) * ab_dim1 + 1;
+ z__3.r = rwork[i__2] * ab[i__5].r, z__3.i = rwork[i__2] *
+ ab[i__5].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ ab[i__4].r = z__1.r, ab[i__4].i = z__1.i;
+ ra1.r = ra.r, ra1.i = ra.i;
+ }
+ }
+/* Computing MAX */
+ i__4 = 1, i__1 = k + i0 - m + 1;
+ j2 = i__ + k + 1 - max(i__4,i__1) * ka1;
+ nr = (j2 + *ka - 1) / ka1;
+ j1 = j2 - (nr - 1) * ka1;
+ if (update) {
+/* Computing MIN */
+ i__4 = j2, i__1 = i__ - (*ka << 1) + k - 1;
+ j2t = min(i__4,i__1);
+ } else {
+ j2t = j2;
+ }
+ nrt = (j2t + *ka - 1) / ka1;
+ i__4 = j2t;
+ i__1 = ka1;
+ for (j = j1; i__1 < 0 ? j >= i__4 : j <= i__4; j += i__1) {
+
+/* create nonzero element a(j-1,j+ka) outside the band */
+/* and store it in WORK(j) */
+
+ i__2 = j;
+ i__5 = j;
+ i__6 = (j + *ka - 1) * ab_dim1 + 1;
+ z__1.r = work[i__5].r * ab[i__6].r - work[i__5].i * ab[i__6]
+ .i, z__1.i = work[i__5].r * ab[i__6].i + work[i__5].i
+ * ab[i__6].r;
+ work[i__2].r = z__1.r, work[i__2].i = z__1.i;
+ i__2 = (j + *ka - 1) * ab_dim1 + 1;
+ i__5 = j;
+ i__6 = (j + *ka - 1) * ab_dim1 + 1;
+ z__1.r = rwork[i__5] * ab[i__6].r, z__1.i = rwork[i__5] * ab[
+ i__6].i;
+ ab[i__2].r = z__1.r, ab[i__2].i = z__1.i;
+/* L570: */
+ }
+
+/* generate rotations in 1st set to annihilate elements which */
+/* have been created outside the band */
+
+ if (nrt > 0) {
+ zlargv_(&nrt, &ab[(j1 + *ka) * ab_dim1 + 1], &inca, &work[j1],
+ &ka1, &rwork[j1], &ka1);
+ }
+ if (nr > 0) {
+
+/* apply rotations in 1st set from the left */
+
+ i__1 = *ka - 1;
+ for (l = 1; l <= i__1; ++l) {
+ zlartv_(&nr, &ab[ka1 - l + (j1 + l) * ab_dim1], &inca, &
+ ab[*ka - l + (j1 + l) * ab_dim1], &inca, &rwork[
+ j1], &work[j1], &ka1);
+/* L580: */
+ }
+
+/* apply rotations in 1st set from both sides to diagonal */
+/* blocks */
+
+ zlar2v_(&nr, &ab[ka1 + j1 * ab_dim1], &ab[ka1 + (j1 - 1) *
+ ab_dim1], &ab[*ka + j1 * ab_dim1], &inca, &rwork[j1],
+ &work[j1], &ka1);
+
+ zlacgv_(&nr, &work[j1], &ka1);
+ }
+
+/* start applying rotations in 1st set from the right */
+
+ i__1 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__1; --l) {
+ nrt = (j2 + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ zlartv_(&nrt, &ab[l + j1t * ab_dim1], &inca, &ab[l + 1 + (
+ j1t - 1) * ab_dim1], &inca, &rwork[j1t], &work[
+ j1t], &ka1);
+ }
+/* L590: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 1st set */
+
+ i__1 = j2;
+ i__4 = ka1;
+ for (j = j1; i__4 < 0 ? j >= i__1 : j <= i__1; j += i__4) {
+ zrot_(&nx, &x[j * x_dim1 + 1], &c__1, &x[(j - 1) * x_dim1
+ + 1], &c__1, &rwork[j], &work[j]);
+/* L600: */
+ }
+ }
+/* L610: */
+ }
+
+ if (update) {
+ if (i2 > 0 && kbt > 0) {
+
+/* create nonzero element a(i+kbt-ka-1,i+kbt) outside the */
+/* band and store it in WORK(m-kb+i+kbt) */
+
+ i__3 = m - *kb + i__ + kbt;
+ i__4 = kb1 - kbt + (i__ + kbt) * bb_dim1;
+ z__2.r = -bb[i__4].r, z__2.i = -bb[i__4].i;
+ z__1.r = z__2.r * ra1.r - z__2.i * ra1.i, z__1.i = z__2.r *
+ ra1.i + z__2.i * ra1.r;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+ }
+ }
+
+ for (k = *kb; k >= 1; --k) {
+ if (update) {
+/* Computing MAX */
+ i__3 = 2, i__4 = k + i0 - m;
+ j2 = i__ + k + 1 - max(i__3,i__4) * ka1;
+ } else {
+/* Computing MAX */
+ i__3 = 1, i__4 = k + i0 - m;
+ j2 = i__ + k + 1 - max(i__3,i__4) * ka1;
+ }
+
+/* finish applying rotations in 2nd set from the right */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (j2 + *ka + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ zlartv_(&nrt, &ab[l + (j1t + *ka) * ab_dim1], &inca, &ab[
+ l + 1 + (j1t + *ka - 1) * ab_dim1], &inca, &rwork[
+ m - *kb + j1t + *ka], &work[m - *kb + j1t + *ka],
+ &ka1);
+ }
+/* L620: */
+ }
+ nr = (j2 + *ka - 1) / ka1;
+ j1 = j2 - (nr - 1) * ka1;
+ i__3 = j2;
+ i__4 = ka1;
+ for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {
+ i__1 = m - *kb + j;
+ i__2 = m - *kb + j + *ka;
+ work[i__1].r = work[i__2].r, work[i__1].i = work[i__2].i;
+ rwork[m - *kb + j] = rwork[m - *kb + j + *ka];
+/* L630: */
+ }
+ i__4 = j2;
+ i__3 = ka1;
+ for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) {
+
+/* create nonzero element a(j-1,j+ka) outside the band */
+/* and store it in WORK(m-kb+j) */
+
+ i__1 = m - *kb + j;
+ i__2 = m - *kb + j;
+ i__5 = (j + *ka - 1) * ab_dim1 + 1;
+ z__1.r = work[i__2].r * ab[i__5].r - work[i__2].i * ab[i__5]
+ .i, z__1.i = work[i__2].r * ab[i__5].i + work[i__2].i
+ * ab[i__5].r;
+ work[i__1].r = z__1.r, work[i__1].i = z__1.i;
+ i__1 = (j + *ka - 1) * ab_dim1 + 1;
+ i__2 = m - *kb + j;
+ i__5 = (j + *ka - 1) * ab_dim1 + 1;
+ z__1.r = rwork[i__2] * ab[i__5].r, z__1.i = rwork[i__2] * ab[
+ i__5].i;
+ ab[i__1].r = z__1.r, ab[i__1].i = z__1.i;
+/* L640: */
+ }
+ if (update) {
+ if (i__ + k > ka1 && k <= kbt) {
+ i__3 = m - *kb + i__ + k - *ka;
+ i__4 = m - *kb + i__ + k;
+ work[i__3].r = work[i__4].r, work[i__3].i = work[i__4].i;
+ }
+ }
+/* L650: */
+ }
+
+ for (k = *kb; k >= 1; --k) {
+/* Computing MAX */
+ i__3 = 1, i__4 = k + i0 - m;
+ j2 = i__ + k + 1 - max(i__3,i__4) * ka1;
+ nr = (j2 + *ka - 1) / ka1;
+ j1 = j2 - (nr - 1) * ka1;
+ if (nr > 0) {
+
+/* generate rotations in 2nd set to annihilate elements */
+/* which have been created outside the band */
+
+ zlargv_(&nr, &ab[(j1 + *ka) * ab_dim1 + 1], &inca, &work[m - *
+ kb + j1], &ka1, &rwork[m - *kb + j1], &ka1);
+
+/* apply rotations in 2nd set from the left */
+
+ i__3 = *ka - 1;
+ for (l = 1; l <= i__3; ++l) {
+ zlartv_(&nr, &ab[ka1 - l + (j1 + l) * ab_dim1], &inca, &
+ ab[*ka - l + (j1 + l) * ab_dim1], &inca, &rwork[m
+ - *kb + j1], &work[m - *kb + j1], &ka1);
+/* L660: */
+ }
+
+/* apply rotations in 2nd set from both sides to diagonal */
+/* blocks */
+
+ zlar2v_(&nr, &ab[ka1 + j1 * ab_dim1], &ab[ka1 + (j1 - 1) *
+ ab_dim1], &ab[*ka + j1 * ab_dim1], &inca, &rwork[m - *
+ kb + j1], &work[m - *kb + j1], &ka1);
+
+ zlacgv_(&nr, &work[m - *kb + j1], &ka1);
+ }
+
+/* start applying rotations in 2nd set from the right */
+
+ i__3 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__3; --l) {
+ nrt = (j2 + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ zlartv_(&nrt, &ab[l + j1t * ab_dim1], &inca, &ab[l + 1 + (
+ j1t - 1) * ab_dim1], &inca, &rwork[m - *kb + j1t],
+ &work[m - *kb + j1t], &ka1);
+ }
+/* L670: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 2nd set */
+
+ i__3 = j2;
+ i__4 = ka1;
+ for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {
+ zrot_(&nx, &x[j * x_dim1 + 1], &c__1, &x[(j - 1) * x_dim1
+ + 1], &c__1, &rwork[m - *kb + j], &work[m - *kb +
+ j]);
+/* L680: */
+ }
+ }
+/* L690: */
+ }
+
+ i__4 = *kb - 1;
+ for (k = 1; k <= i__4; ++k) {
+/* Computing MAX */
+ i__3 = 1, i__1 = k + i0 - m + 1;
+ j2 = i__ + k + 1 - max(i__3,i__1) * ka1;
+
+/* finish applying rotations in 1st set from the right */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (j2 + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ zlartv_(&nrt, &ab[l + j1t * ab_dim1], &inca, &ab[l + 1 + (
+ j1t - 1) * ab_dim1], &inca, &rwork[j1t], &work[
+ j1t], &ka1);
+ }
+/* L700: */
+ }
+/* L710: */
+ }
+
+ if (*kb > 1) {
+ i__4 = i2 - *ka;
+ for (j = 2; j <= i__4; ++j) {
+ rwork[j] = rwork[j + *ka];
+ i__3 = j;
+ i__1 = j + *ka;
+ work[i__3].r = work[i__1].r, work[i__3].i = work[i__1].i;
+/* L720: */
+ }
+ }
+
+ } else {
+
+/* Transform A, working with the lower triangle */
+
+ if (update) {
+
+/* Form inv(S(i))**H * A * inv(S(i)) */
+
+ i__4 = i__ * bb_dim1 + 1;
+ bii = bb[i__4].r;
+ i__4 = i__ * ab_dim1 + 1;
+ i__3 = i__ * ab_dim1 + 1;
+ d__1 = ab[i__3].r / bii / bii;
+ ab[i__4].r = d__1, ab[i__4].i = 0.;
+ i__4 = i__ - 1;
+ for (j = i1; j <= i__4; ++j) {
+ i__3 = i__ - j + 1 + j * ab_dim1;
+ i__1 = i__ - j + 1 + j * ab_dim1;
+ z__1.r = ab[i__1].r / bii, z__1.i = ab[i__1].i / bii;
+ ab[i__3].r = z__1.r, ab[i__3].i = z__1.i;
+/* L730: */
+ }
+/* Computing MIN */
+ i__3 = *n, i__1 = i__ + *ka;
+ i__4 = min(i__3,i__1);
+ for (j = i__ + 1; j <= i__4; ++j) {
+ i__3 = j - i__ + 1 + i__ * ab_dim1;
+ i__1 = j - i__ + 1 + i__ * ab_dim1;
+ z__1.r = ab[i__1].r / bii, z__1.i = ab[i__1].i / bii;
+ ab[i__3].r = z__1.r, ab[i__3].i = z__1.i;
+/* L740: */
+ }
+ i__4 = i__ + kbt;
+ for (k = i__ + 1; k <= i__4; ++k) {
+ i__3 = i__ + kbt;
+ for (j = k; j <= i__3; ++j) {
+ i__1 = j - k + 1 + k * ab_dim1;
+ i__2 = j - k + 1 + k * ab_dim1;
+ i__5 = j - i__ + 1 + i__ * bb_dim1;
+ d_cnjg(&z__5, &ab[k - i__ + 1 + i__ * ab_dim1]);
+ z__4.r = bb[i__5].r * z__5.r - bb[i__5].i * z__5.i,
+ z__4.i = bb[i__5].r * z__5.i + bb[i__5].i *
+ z__5.r;
+ z__3.r = ab[i__2].r - z__4.r, z__3.i = ab[i__2].i -
+ z__4.i;
+ d_cnjg(&z__7, &bb[k - i__ + 1 + i__ * bb_dim1]);
+ i__6 = j - i__ + 1 + i__ * ab_dim1;
+ z__6.r = z__7.r * ab[i__6].r - z__7.i * ab[i__6].i,
+ z__6.i = z__7.r * ab[i__6].i + z__7.i * ab[i__6]
+ .r;
+ z__2.r = z__3.r - z__6.r, z__2.i = z__3.i - z__6.i;
+ i__7 = i__ * ab_dim1 + 1;
+ d__1 = ab[i__7].r;
+ i__8 = j - i__ + 1 + i__ * bb_dim1;
+ z__9.r = d__1 * bb[i__8].r, z__9.i = d__1 * bb[i__8].i;
+ d_cnjg(&z__10, &bb[k - i__ + 1 + i__ * bb_dim1]);
+ z__8.r = z__9.r * z__10.r - z__9.i * z__10.i, z__8.i =
+ z__9.r * z__10.i + z__9.i * z__10.r;
+ z__1.r = z__2.r + z__8.r, z__1.i = z__2.i + z__8.i;
+ ab[i__1].r = z__1.r, ab[i__1].i = z__1.i;
+/* L750: */
+ }
+/* Computing MIN */
+ i__1 = *n, i__2 = i__ + *ka;
+ i__3 = min(i__1,i__2);
+ for (j = i__ + kbt + 1; j <= i__3; ++j) {
+ i__1 = j - k + 1 + k * ab_dim1;
+ i__2 = j - k + 1 + k * ab_dim1;
+ d_cnjg(&z__3, &bb[k - i__ + 1 + i__ * bb_dim1]);
+ i__5 = j - i__ + 1 + i__ * ab_dim1;
+ z__2.r = z__3.r * ab[i__5].r - z__3.i * ab[i__5].i,
+ z__2.i = z__3.r * ab[i__5].i + z__3.i * ab[i__5]
+ .r;
+ z__1.r = ab[i__2].r - z__2.r, z__1.i = ab[i__2].i -
+ z__2.i;
+ ab[i__1].r = z__1.r, ab[i__1].i = z__1.i;
+/* L760: */
+ }
+/* L770: */
+ }
+ i__4 = i__;
+ for (j = i1; j <= i__4; ++j) {
+/* Computing MIN */
+ i__1 = j + *ka, i__2 = i__ + kbt;
+ i__3 = min(i__1,i__2);
+ for (k = i__ + 1; k <= i__3; ++k) {
+ i__1 = k - j + 1 + j * ab_dim1;
+ i__2 = k - j + 1 + j * ab_dim1;
+ i__5 = k - i__ + 1 + i__ * bb_dim1;
+ i__6 = i__ - j + 1 + j * ab_dim1;
+ z__2.r = bb[i__5].r * ab[i__6].r - bb[i__5].i * ab[i__6]
+ .i, z__2.i = bb[i__5].r * ab[i__6].i + bb[i__5].i
+ * ab[i__6].r;
+ z__1.r = ab[i__2].r - z__2.r, z__1.i = ab[i__2].i -
+ z__2.i;
+ ab[i__1].r = z__1.r, ab[i__1].i = z__1.i;
+/* L780: */
+ }
+/* L790: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by inv(S(i)) */
+
+ d__1 = 1. / bii;
+ zdscal_(&nx, &d__1, &x[i__ * x_dim1 + 1], &c__1);
+ if (kbt > 0) {
+ z__1.r = -1., z__1.i = -0.;
+ zgerc_(&nx, &kbt, &z__1, &x[i__ * x_dim1 + 1], &c__1, &bb[
+ i__ * bb_dim1 + 2], &c__1, &x[(i__ + 1) * x_dim1
+ + 1], ldx);
+ }
+ }
+
+/* store a(i,i1) in RA1 for use in next loop over K */
+
+ i__4 = i__ - i1 + 1 + i1 * ab_dim1;
+ ra1.r = ab[i__4].r, ra1.i = ab[i__4].i;
+ }
+
+/* Generate and apply vectors of rotations to chase all the */
+/* existing bulges KA positions up toward the top of the band */
+
+ i__4 = *kb - 1;
+ for (k = 1; k <= i__4; ++k) {
+ if (update) {
+
+/* Determine the rotations which would annihilate the bulge */
+/* which has in theory just been created */
+
+ if (i__ + k - ka1 > 0 && i__ + k < m) {
+
+/* generate rotation to annihilate a(i,i+k-ka-1) */
+
+ zlartg_(&ab[ka1 - k + (i__ + k - *ka) * ab_dim1], &ra1, &
+ rwork[i__ + k - *ka], &work[i__ + k - *ka], &ra);
+
+/* create nonzero element a(i+k,i+k-ka-1) outside the */
+/* band and store it in WORK(m-kb+i+k) */
+
+ i__3 = k + 1 + i__ * bb_dim1;
+ z__2.r = -bb[i__3].r, z__2.i = -bb[i__3].i;
+ z__1.r = z__2.r * ra1.r - z__2.i * ra1.i, z__1.i = z__2.r
+ * ra1.i + z__2.i * ra1.r;
+ t.r = z__1.r, t.i = z__1.i;
+ i__3 = m - *kb + i__ + k;
+ i__1 = i__ + k - *ka;
+ z__2.r = rwork[i__1] * t.r, z__2.i = rwork[i__1] * t.i;
+ d_cnjg(&z__4, &work[i__ + k - *ka]);
+ i__2 = ka1 + (i__ + k - *ka) * ab_dim1;
+ z__3.r = z__4.r * ab[i__2].r - z__4.i * ab[i__2].i,
+ z__3.i = z__4.r * ab[i__2].i + z__4.i * ab[i__2]
+ .r;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+ i__3 = ka1 + (i__ + k - *ka) * ab_dim1;
+ i__1 = i__ + k - *ka;
+ z__2.r = work[i__1].r * t.r - work[i__1].i * t.i, z__2.i =
+ work[i__1].r * t.i + work[i__1].i * t.r;
+ i__2 = i__ + k - *ka;
+ i__5 = ka1 + (i__ + k - *ka) * ab_dim1;
+ z__3.r = rwork[i__2] * ab[i__5].r, z__3.i = rwork[i__2] *
+ ab[i__5].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ ab[i__3].r = z__1.r, ab[i__3].i = z__1.i;
+ ra1.r = ra.r, ra1.i = ra.i;
+ }
+ }
+/* Computing MAX */
+ i__3 = 1, i__1 = k + i0 - m + 1;
+ j2 = i__ + k + 1 - max(i__3,i__1) * ka1;
+ nr = (j2 + *ka - 1) / ka1;
+ j1 = j2 - (nr - 1) * ka1;
+ if (update) {
+/* Computing MIN */
+ i__3 = j2, i__1 = i__ - (*ka << 1) + k - 1;
+ j2t = min(i__3,i__1);
+ } else {
+ j2t = j2;
+ }
+ nrt = (j2t + *ka - 1) / ka1;
+ i__3 = j2t;
+ i__1 = ka1;
+ for (j = j1; i__1 < 0 ? j >= i__3 : j <= i__3; j += i__1) {
+
+/* create nonzero element a(j+ka,j-1) outside the band */
+/* and store it in WORK(j) */
+
+ i__2 = j;
+ i__5 = j;
+ i__6 = ka1 + (j - 1) * ab_dim1;
+ z__1.r = work[i__5].r * ab[i__6].r - work[i__5].i * ab[i__6]
+ .i, z__1.i = work[i__5].r * ab[i__6].i + work[i__5].i
+ * ab[i__6].r;
+ work[i__2].r = z__1.r, work[i__2].i = z__1.i;
+ i__2 = ka1 + (j - 1) * ab_dim1;
+ i__5 = j;
+ i__6 = ka1 + (j - 1) * ab_dim1;
+ z__1.r = rwork[i__5] * ab[i__6].r, z__1.i = rwork[i__5] * ab[
+ i__6].i;
+ ab[i__2].r = z__1.r, ab[i__2].i = z__1.i;
+/* L800: */
+ }
+
+/* generate rotations in 1st set to annihilate elements which */
+/* have been created outside the band */
+
+ if (nrt > 0) {
+ zlargv_(&nrt, &ab[ka1 + j1 * ab_dim1], &inca, &work[j1], &ka1,
+ &rwork[j1], &ka1);
+ }
+ if (nr > 0) {
+
+/* apply rotations in 1st set from the right */
+
+ i__1 = *ka - 1;
+ for (l = 1; l <= i__1; ++l) {
+ zlartv_(&nr, &ab[l + 1 + j1 * ab_dim1], &inca, &ab[l + 2
+ + (j1 - 1) * ab_dim1], &inca, &rwork[j1], &work[
+ j1], &ka1);
+/* L810: */
+ }
+
+/* apply rotations in 1st set from both sides to diagonal */
+/* blocks */
+
+ zlar2v_(&nr, &ab[j1 * ab_dim1 + 1], &ab[(j1 - 1) * ab_dim1 +
+ 1], &ab[(j1 - 1) * ab_dim1 + 2], &inca, &rwork[j1], &
+ work[j1], &ka1);
+
+ zlacgv_(&nr, &work[j1], &ka1);
+ }
+
+/* start applying rotations in 1st set from the left */
+
+ i__1 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__1; --l) {
+ nrt = (j2 + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ zlartv_(&nrt, &ab[ka1 - l + 1 + (j1t - ka1 + l) * ab_dim1]
+, &inca, &ab[ka1 - l + (j1t - ka1 + l) * ab_dim1],
+ &inca, &rwork[j1t], &work[j1t], &ka1);
+ }
+/* L820: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 1st set */
+
+ i__1 = j2;
+ i__3 = ka1;
+ for (j = j1; i__3 < 0 ? j >= i__1 : j <= i__1; j += i__3) {
+ d_cnjg(&z__1, &work[j]);
+ zrot_(&nx, &x[j * x_dim1 + 1], &c__1, &x[(j - 1) * x_dim1
+ + 1], &c__1, &rwork[j], &z__1);
+/* L830: */
+ }
+ }
+/* L840: */
+ }
+
+ if (update) {
+ if (i2 > 0 && kbt > 0) {
+
+/* create nonzero element a(i+kbt,i+kbt-ka-1) outside the */
+/* band and store it in WORK(m-kb+i+kbt) */
+
+ i__4 = m - *kb + i__ + kbt;
+ i__3 = kbt + 1 + i__ * bb_dim1;
+ z__2.r = -bb[i__3].r, z__2.i = -bb[i__3].i;
+ z__1.r = z__2.r * ra1.r - z__2.i * ra1.i, z__1.i = z__2.r *
+ ra1.i + z__2.i * ra1.r;
+ work[i__4].r = z__1.r, work[i__4].i = z__1.i;
+ }
+ }
+
+ for (k = *kb; k >= 1; --k) {
+ if (update) {
+/* Computing MAX */
+ i__4 = 2, i__3 = k + i0 - m;
+ j2 = i__ + k + 1 - max(i__4,i__3) * ka1;
+ } else {
+/* Computing MAX */
+ i__4 = 1, i__3 = k + i0 - m;
+ j2 = i__ + k + 1 - max(i__4,i__3) * ka1;
+ }
+
+/* finish applying rotations in 2nd set from the left */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (j2 + *ka + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ zlartv_(&nrt, &ab[ka1 - l + 1 + (j1t + l - 1) * ab_dim1],
+ &inca, &ab[ka1 - l + (j1t + l - 1) * ab_dim1], &
+ inca, &rwork[m - *kb + j1t + *ka], &work[m - *kb
+ + j1t + *ka], &ka1);
+ }
+/* L850: */
+ }
+ nr = (j2 + *ka - 1) / ka1;
+ j1 = j2 - (nr - 1) * ka1;
+ i__4 = j2;
+ i__3 = ka1;
+ for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) {
+ i__1 = m - *kb + j;
+ i__2 = m - *kb + j + *ka;
+ work[i__1].r = work[i__2].r, work[i__1].i = work[i__2].i;
+ rwork[m - *kb + j] = rwork[m - *kb + j + *ka];
+/* L860: */
+ }
+ i__3 = j2;
+ i__4 = ka1;
+ for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {
+
+/* create nonzero element a(j+ka,j-1) outside the band */
+/* and store it in WORK(m-kb+j) */
+
+ i__1 = m - *kb + j;
+ i__2 = m - *kb + j;
+ i__5 = ka1 + (j - 1) * ab_dim1;
+ z__1.r = work[i__2].r * ab[i__5].r - work[i__2].i * ab[i__5]
+ .i, z__1.i = work[i__2].r * ab[i__5].i + work[i__2].i
+ * ab[i__5].r;
+ work[i__1].r = z__1.r, work[i__1].i = z__1.i;
+ i__1 = ka1 + (j - 1) * ab_dim1;
+ i__2 = m - *kb + j;
+ i__5 = ka1 + (j - 1) * ab_dim1;
+ z__1.r = rwork[i__2] * ab[i__5].r, z__1.i = rwork[i__2] * ab[
+ i__5].i;
+ ab[i__1].r = z__1.r, ab[i__1].i = z__1.i;
+/* L870: */
+ }
+ if (update) {
+ if (i__ + k > ka1 && k <= kbt) {
+ i__4 = m - *kb + i__ + k - *ka;
+ i__3 = m - *kb + i__ + k;
+ work[i__4].r = work[i__3].r, work[i__4].i = work[i__3].i;
+ }
+ }
+/* L880: */
+ }
+
+ for (k = *kb; k >= 1; --k) {
+/* Computing MAX */
+ i__4 = 1, i__3 = k + i0 - m;
+ j2 = i__ + k + 1 - max(i__4,i__3) * ka1;
+ nr = (j2 + *ka - 1) / ka1;
+ j1 = j2 - (nr - 1) * ka1;
+ if (nr > 0) {
+
+/* generate rotations in 2nd set to annihilate elements */
+/* which have been created outside the band */
+
+ zlargv_(&nr, &ab[ka1 + j1 * ab_dim1], &inca, &work[m - *kb +
+ j1], &ka1, &rwork[m - *kb + j1], &ka1);
+
+/* apply rotations in 2nd set from the right */
+
+ i__4 = *ka - 1;
+ for (l = 1; l <= i__4; ++l) {
+ zlartv_(&nr, &ab[l + 1 + j1 * ab_dim1], &inca, &ab[l + 2
+ + (j1 - 1) * ab_dim1], &inca, &rwork[m - *kb + j1]
+, &work[m - *kb + j1], &ka1);
+/* L890: */
+ }
+
+/* apply rotations in 2nd set from both sides to diagonal */
+/* blocks */
+
+ zlar2v_(&nr, &ab[j1 * ab_dim1 + 1], &ab[(j1 - 1) * ab_dim1 +
+ 1], &ab[(j1 - 1) * ab_dim1 + 2], &inca, &rwork[m - *
+ kb + j1], &work[m - *kb + j1], &ka1);
+
+ zlacgv_(&nr, &work[m - *kb + j1], &ka1);
+ }
+
+/* start applying rotations in 2nd set from the left */
+
+ i__4 = *kb - k + 1;
+ for (l = *ka - 1; l >= i__4; --l) {
+ nrt = (j2 + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ zlartv_(&nrt, &ab[ka1 - l + 1 + (j1t - ka1 + l) * ab_dim1]
+, &inca, &ab[ka1 - l + (j1t - ka1 + l) * ab_dim1],
+ &inca, &rwork[m - *kb + j1t], &work[m - *kb +
+ j1t], &ka1);
+ }
+/* L900: */
+ }
+
+ if (wantx) {
+
+/* post-multiply X by product of rotations in 2nd set */
+
+ i__4 = j2;
+ i__3 = ka1;
+ for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) {
+ d_cnjg(&z__1, &work[m - *kb + j]);
+ zrot_(&nx, &x[j * x_dim1 + 1], &c__1, &x[(j - 1) * x_dim1
+ + 1], &c__1, &rwork[m - *kb + j], &z__1);
+/* L910: */
+ }
+ }
+/* L920: */
+ }
+
+ i__3 = *kb - 1;
+ for (k = 1; k <= i__3; ++k) {
+/* Computing MAX */
+ i__4 = 1, i__1 = k + i0 - m + 1;
+ j2 = i__ + k + 1 - max(i__4,i__1) * ka1;
+
+/* finish applying rotations in 1st set from the left */
+
+ for (l = *kb - k; l >= 1; --l) {
+ nrt = (j2 + l - 1) / ka1;
+ j1t = j2 - (nrt - 1) * ka1;
+ if (nrt > 0) {
+ zlartv_(&nrt, &ab[ka1 - l + 1 + (j1t - ka1 + l) * ab_dim1]
+, &inca, &ab[ka1 - l + (j1t - ka1 + l) * ab_dim1],
+ &inca, &rwork[j1t], &work[j1t], &ka1);
+ }
+/* L930: */
+ }
+/* L940: */
+ }
+
+ if (*kb > 1) {
+ i__3 = i2 - *ka;
+ for (j = 2; j <= i__3; ++j) {
+ rwork[j] = rwork[j + *ka];
+ i__4 = j;
+ i__1 = j + *ka;
+ work[i__4].r = work[i__1].r, work[i__4].i = work[i__1].i;
+/* L950: */
+ }
+ }
+
+ }
+
+ goto L490;
+
+/* End of ZHBGST */
+
+} /* zhbgst_ */
diff --git a/contrib/libs/clapack/zhbgv.c b/contrib/libs/clapack/zhbgv.c
new file mode 100644
index 0000000000..5361f0a9d7
--- /dev/null
+++ b/contrib/libs/clapack/zhbgv.c
@@ -0,0 +1,238 @@
+/* zhbgv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zhbgv_(char *jobz, char *uplo, integer *n, integer *ka,
+ integer *kb, doublecomplex *ab, integer *ldab, doublecomplex *bb,
+ integer *ldbb, doublereal *w, doublecomplex *z__, integer *ldz,
+ doublecomplex *work, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, bb_dim1, bb_offset, z_dim1, z_offset, i__1;
+
+ /* Local variables */
+ integer inde;
+ char vect[1];
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical upper, wantz;
+ extern /* Subroutine */ int xerbla_(char *, integer *), dsterf_(
+ integer *, doublereal *, doublereal *, integer *), zhbtrd_(char *,
+ char *, integer *, integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ integer indwrk;
+ extern /* Subroutine */ int zhbgst_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublereal *,
+ integer *), zpbstf_(char *, integer *, integer *,
+ doublecomplex *, integer *, integer *), zsteqr_(char *,
+ integer *, doublereal *, doublereal *, doublecomplex *, integer *,
+ doublereal *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHBGV computes all the eigenvalues, and optionally, the eigenvectors */
+/* of a complex generalized Hermitian-definite banded eigenproblem, of */
+/* the form A*x=(lambda)*B*x. Here A and B are assumed to be Hermitian */
+/* and banded, and B is also positive definite. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* KA (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KA >= 0. */
+
+/* KB (input) INTEGER */
+/* The number of superdiagonals of the matrix B if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KB >= 0. */
+
+/* AB (input/output) COMPLEX*16 array, dimension (LDAB, N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first ka+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(ka+1+i-j,j) = A(i,j) for max(1,j-ka)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+ka). */
+
+/* On exit, the contents of AB are destroyed. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KA+1. */
+
+/* BB (input/output) COMPLEX*16 array, dimension (LDBB, N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix B, stored in the first kb+1 rows of the array. The */
+/* j-th column of B is stored in the j-th column of the array BB */
+/* as follows: */
+/* if UPLO = 'U', BB(kb+1+i-j,j) = B(i,j) for max(1,j-kb)<=i<=j; */
+/* if UPLO = 'L', BB(1+i-j,j) = B(i,j) for j<=i<=min(n,j+kb). */
+
+/* On exit, the factor S from the split Cholesky factorization */
+/* B = S**H*S, as returned by ZPBSTF. */
+
+/* LDBB (input) INTEGER */
+/* The leading dimension of the array BB. LDBB >= KB+1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX*16 array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the matrix Z of */
+/* eigenvectors, with the i-th column of Z holding the */
+/* eigenvector associated with W(i). The eigenvectors are */
+/* normalized so that Z**H*B*Z = I. */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= N. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (3*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is: */
+/* <= N: the algorithm failed to converge: */
+/* i off-diagonal elements of an intermediate */
+/* tridiagonal form did not converge to zero; */
+/* > N: if INFO = N + i, for 1 <= i <= N, then ZPBSTF */
+/* returned INFO = i: B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ bb_dim1 = *ldbb;
+ bb_offset = 1 + bb_dim1;
+ bb -= bb_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ka < 0) {
+ *info = -4;
+ } else if (*kb < 0 || *kb > *ka) {
+ *info = -5;
+ } else if (*ldab < *ka + 1) {
+ *info = -7;
+ } else if (*ldbb < *kb + 1) {
+ *info = -9;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHBGV ", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a split Cholesky factorization of B. */
+
+ zpbstf_(uplo, n, kb, &bb[bb_offset], ldbb, info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem. */
+
+ inde = 1;
+ indwrk = inde + *n;
+ zhbgst_(jobz, uplo, n, ka, kb, &ab[ab_offset], ldab, &bb[bb_offset], ldbb,
+ &z__[z_offset], ldz, &work[1], &rwork[indwrk], &iinfo);
+
+/* Reduce to tridiagonal form. */
+
+ if (wantz) {
+ *(unsigned char *)vect = 'U';
+ } else {
+ *(unsigned char *)vect = 'N';
+ }
+ zhbtrd_(vect, uplo, n, ka, &ab[ab_offset], ldab, &w[1], &rwork[inde], &
+ z__[z_offset], ldz, &work[1], &iinfo);
+
+/* For eigenvalues only, call DSTERF. For eigenvectors, call ZSTEQR. */
+
+ if (! wantz) {
+ dsterf_(n, &w[1], &rwork[inde], info);
+ } else {
+ zsteqr_(jobz, n, &w[1], &rwork[inde], &z__[z_offset], ldz, &rwork[
+ indwrk], info);
+ }
+ return 0;
+
+/* End of ZHBGV */
+
+} /* zhbgv_ */
diff --git a/contrib/libs/clapack/zhbgvd.c b/contrib/libs/clapack/zhbgvd.c
new file mode 100644
index 0000000000..bcdd4a9110
--- /dev/null
+++ b/contrib/libs/clapack/zhbgvd.c
@@ -0,0 +1,359 @@
+/* zhbgvd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static doublecomplex c_b2 = {0.,0.};
+
+/* Subroutine */ int zhbgvd_(char *jobz, char *uplo, integer *n, integer *ka,
+ integer *kb, doublecomplex *ab, integer *ldab, doublecomplex *bb,
+ integer *ldbb, doublereal *w, doublecomplex *z__, integer *ldz,
+ doublecomplex *work, integer *lwork, doublereal *rwork, integer *
+ lrwork, integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, bb_dim1, bb_offset, z_dim1, z_offset, i__1;
+
+ /* Local variables */
+ integer inde;
+ char vect[1];
+ integer llwk2;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ integer lwmin;
+ logical upper;
+ integer llrwk;
+ logical wantz;
+ integer indwk2;
+ extern /* Subroutine */ int xerbla_(char *, integer *), dsterf_(
+ integer *, doublereal *, doublereal *, integer *), zstedc_(char *,
+ integer *, doublereal *, doublereal *, doublecomplex *, integer *
+, doublecomplex *, integer *, doublereal *, integer *, integer *,
+ integer *, integer *), zhbtrd_(char *, char *, integer *,
+ integer *, doublecomplex *, integer *, doublereal *, doublereal *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ integer indwrk, liwmin;
+ extern /* Subroutine */ int zhbgst_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublereal *,
+ integer *), zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ integer lrwmin;
+ extern /* Subroutine */ int zpbstf_(char *, integer *, integer *,
+ doublecomplex *, integer *, integer *);
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHBGVD computes all the eigenvalues, and optionally, the eigenvectors */
+/* of a complex generalized Hermitian-definite banded eigenproblem, of */
+/* the form A*x=(lambda)*B*x. Here A and B are assumed to be Hermitian */
+/* and banded, and B is also positive definite. If eigenvectors are */
+/* desired, it uses a divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* KA (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KA >= 0. */
+
+/* KB (input) INTEGER */
+/* The number of superdiagonals of the matrix B if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KB >= 0. */
+
+/* AB (input/output) COMPLEX*16 array, dimension (LDAB, N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first ka+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(ka+1+i-j,j) = A(i,j) for max(1,j-ka)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+ka). */
+
+/* On exit, the contents of AB are destroyed. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KA+1. */
+
+/* BB (input/output) COMPLEX*16 array, dimension (LDBB, N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix B, stored in the first kb+1 rows of the array. The */
+/* j-th column of B is stored in the j-th column of the array BB */
+/* as follows: */
+/* if UPLO = 'U', BB(kb+1+i-j,j) = B(i,j) for max(1,j-kb)<=i<=j; */
+/* if UPLO = 'L', BB(1+i-j,j) = B(i,j) for j<=i<=min(n,j+kb). */
+
+/* On exit, the factor S from the split Cholesky factorization */
+/* B = S**H*S, as returned by ZPBSTF. */
+
+/* LDBB (input) INTEGER */
+/* The leading dimension of the array BB. LDBB >= KB+1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX*16 array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the matrix Z of */
+/* eigenvectors, with the i-th column of Z holding the */
+/* eigenvector associated with W(i). The eigenvectors are */
+/* normalized so that Z**H*B*Z = I. */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= N. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO=0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If N <= 1, LWORK >= 1. */
+/* If JOBZ = 'N' and N > 1, LWORK >= N. */
+/* If JOBZ = 'V' and N > 1, LWORK >= 2*N**2. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal sizes of the WORK, RWORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* RWORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LRWORK)) */
+/* On exit, if INFO=0, RWORK(1) returns the optimal LRWORK. */
+
+/* LRWORK (input) INTEGER */
+/* The dimension of array RWORK. */
+/* If N <= 1, LRWORK >= 1. */
+/* If JOBZ = 'N' and N > 1, LRWORK >= N. */
+/* If JOBZ = 'V' and N > 1, LRWORK >= 1 + 5*N + 2*N**2. */
+
+/* If LRWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO=0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of array IWORK. */
+/* If JOBZ = 'N' or N <= 1, LIWORK >= 1. */
+/* If JOBZ = 'V' and N > 1, LIWORK >= 3 + 5*N. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is: */
+/* <= N: the algorithm failed to converge: */
+/* i off-diagonal elements of an intermediate */
+/* tridiagonal form did not converge to zero; */
+/* > N: if INFO = N + i, for 1 <= i <= N, then ZPBSTF */
+/* returned INFO = i: B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ bb_dim1 = *ldbb;
+ bb_offset = 1 + bb_dim1;
+ bb -= bb_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1 || *lrwork == -1 || *liwork == -1;
+
+ *info = 0;
+ if (*n <= 1) {
+ lwmin = 1;
+ lrwmin = 1;
+ liwmin = 1;
+ } else if (wantz) {
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = i__1 * i__1 << 1;
+/* Computing 2nd power */
+ i__1 = *n;
+ lrwmin = *n * 5 + 1 + (i__1 * i__1 << 1);
+ liwmin = *n * 5 + 3;
+ } else {
+ lwmin = *n;
+ lrwmin = *n;
+ liwmin = 1;
+ }
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ka < 0) {
+ *info = -4;
+ } else if (*kb < 0 || *kb > *ka) {
+ *info = -5;
+ } else if (*ldab < *ka + 1) {
+ *info = -7;
+ } else if (*ldbb < *kb + 1) {
+ *info = -9;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+ work[1].r = (doublereal) lwmin, work[1].i = 0.;
+ rwork[1] = (doublereal) lrwmin;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -14;
+ } else if (*lrwork < lrwmin && ! lquery) {
+ *info = -16;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -18;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHBGVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a split Cholesky factorization of B. */
+
+ zpbstf_(uplo, n, kb, &bb[bb_offset], ldbb, info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem. */
+
+ inde = 1;
+ indwrk = inde + *n;
+ indwk2 = *n * *n + 1;
+ llwk2 = *lwork - indwk2 + 2;
+ llrwk = *lrwork - indwrk + 2;
+ zhbgst_(jobz, uplo, n, ka, kb, &ab[ab_offset], ldab, &bb[bb_offset], ldbb,
+ &z__[z_offset], ldz, &work[1], &rwork[indwrk], &iinfo);
+
+/* Reduce Hermitian band matrix to tridiagonal form. */
+
+ if (wantz) {
+ *(unsigned char *)vect = 'U';
+ } else {
+ *(unsigned char *)vect = 'N';
+ }
+ zhbtrd_(vect, uplo, n, ka, &ab[ab_offset], ldab, &w[1], &rwork[inde], &
+ z__[z_offset], ldz, &work[1], &iinfo);
+
+/* For eigenvalues only, call DSTERF. For eigenvectors, call ZSTEDC. */
+
+ if (! wantz) {
+ dsterf_(n, &w[1], &rwork[inde], info);
+ } else {
+ zstedc_("I", n, &w[1], &rwork[inde], &work[1], n, &work[indwk2], &
+ llwk2, &rwork[indwrk], &llrwk, &iwork[1], liwork, info);
+ zgemm_("N", "N", n, n, n, &c_b1, &z__[z_offset], ldz, &work[1], n, &
+ c_b2, &work[indwk2], n);
+ zlacpy_("A", n, n, &work[indwk2], n, &z__[z_offset], ldz);
+ }
+
+ work[1].r = (doublereal) lwmin, work[1].i = 0.;
+ rwork[1] = (doublereal) lrwmin;
+ iwork[1] = liwmin;
+ return 0;
+
+/* End of ZHBGVD */
+
+} /* zhbgvd_ */
diff --git a/contrib/libs/clapack/zhbgvx.c b/contrib/libs/clapack/zhbgvx.c
new file mode 100644
index 0000000000..6663aabf8e
--- /dev/null
+++ b/contrib/libs/clapack/zhbgvx.c
@@ -0,0 +1,473 @@
+/* zhbgvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zhbgvx_(char *jobz, char *range, char *uplo, integer *n,
+ integer *ka, integer *kb, doublecomplex *ab, integer *ldab,
+ doublecomplex *bb, integer *ldbb, doublecomplex *q, integer *ldq,
+ doublereal *vl, doublereal *vu, integer *il, integer *iu, doublereal *
+ abstol, integer *m, doublereal *w, doublecomplex *z__, integer *ldz,
+ doublecomplex *work, doublereal *rwork, integer *iwork, integer *
+ ifail, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, bb_dim1, bb_offset, q_dim1, q_offset, z_dim1,
+ z_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, j, jj;
+ doublereal tmp1;
+ integer indd, inde;
+ char vect[1];
+ logical test;
+ integer itmp1, indee;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ char order[1];
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *);
+ logical upper, wantz;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zswap_(integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *);
+ logical alleig, indeig;
+ integer indibl;
+ logical valeig;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ integer indiwk, indisp;
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *), dstebz_(char *, char *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *, doublereal *, integer *, integer *),
+ zhbtrd_(char *, char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *, doublereal *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ integer indrwk, indwrk;
+ extern /* Subroutine */ int zhbgst_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublereal *,
+ integer *), zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ integer nsplit;
+ extern /* Subroutine */ int zpbstf_(char *, integer *, integer *,
+ doublecomplex *, integer *, integer *), zstein_(integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ integer *, doublecomplex *, integer *, doublereal *, integer *,
+ integer *, integer *), zsteqr_(char *, integer *, doublereal *,
+ doublereal *, doublecomplex *, integer *, doublereal *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHBGVX computes all the eigenvalues, and optionally, the eigenvectors */
+/* of a complex generalized Hermitian-definite banded eigenproblem, of */
+/* the form A*x=(lambda)*B*x. Here A and B are assumed to be Hermitian */
+/* and banded, and B is also positive definite. Eigenvalues and */
+/* eigenvectors can be selected by specifying either all eigenvalues, */
+/* a range of values or a range of indices for the desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found; */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found; */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* KA (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KA >= 0. */
+
+/* KB (input) INTEGER */
+/* The number of superdiagonals of the matrix B if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KB >= 0. */
+
+/* AB (input/output) COMPLEX*16 array, dimension (LDAB, N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first ka+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(ka+1+i-j,j) = A(i,j) for max(1,j-ka)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+ka). */
+
+/* On exit, the contents of AB are destroyed. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KA+1. */
+
+/* BB (input/output) COMPLEX*16 array, dimension (LDBB, N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix B, stored in the first kb+1 rows of the array. The */
+/* j-th column of B is stored in the j-th column of the array BB */
+/* as follows: */
+/* if UPLO = 'U', BB(kb+1+i-j,j) = B(i,j) for max(1,j-kb)<=i<=j; */
+/* if UPLO = 'L', BB(1+i-j,j) = B(i,j) for j<=i<=min(n,j+kb). */
+
+/* On exit, the factor S from the split Cholesky factorization */
+/* B = S**H*S, as returned by ZPBSTF. */
+
+/* LDBB (input) INTEGER */
+/* The leading dimension of the array BB. LDBB >= KB+1. */
+
+/* Q (output) COMPLEX*16 array, dimension (LDQ, N) */
+/* If JOBZ = 'V', the n-by-n matrix used in the reduction of */
+/* A*x = (lambda)*B*x to standard form, i.e. C*x = (lambda)*x, */
+/* and consequently C to tridiagonal form. */
+/* If JOBZ = 'N', the array Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. If JOBZ = 'N', */
+/* LDQ >= 1. If JOBZ = 'V', LDQ >= max(1,N). */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) DOUBLE PRECISION */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing AP to tridiagonal form. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*DLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*DLAMCH('S'). */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX*16 array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the matrix Z of */
+/* eigenvectors, with the i-th column of Z holding the */
+/* eigenvector associated with W(i). The eigenvectors are */
+/* normalized so that Z**H*B*Z = I. */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= N. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (7*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+
+/* IFAIL (output) INTEGER array, dimension (N) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvectors that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is: */
+/* <= N: then i eigenvectors failed to converge. Their */
+/* indices are stored in array IFAIL. */
+/* > N: if INFO = N + i, for 1 <= i <= N, then ZPBSTF */
+/* returned INFO = i: B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ bb_dim1 = *ldbb;
+ bb_offset = 1 + bb_dim1;
+ bb -= bb_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ka < 0) {
+ *info = -5;
+ } else if (*kb < 0 || *kb > *ka) {
+ *info = -6;
+ } else if (*ldab < *ka + 1) {
+ *info = -8;
+ } else if (*ldbb < *kb + 1) {
+ *info = -10;
+ } else if (*ldq < 1 || wantz && *ldq < *n) {
+ *info = -12;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -14;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -15;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -16;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -21;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHBGVX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a split Cholesky factorization of B. */
+
+ zpbstf_(uplo, n, kb, &bb[bb_offset], ldbb, info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem. */
+
+ zhbgst_(jobz, uplo, n, ka, kb, &ab[ab_offset], ldab, &bb[bb_offset], ldbb,
+ &q[q_offset], ldq, &work[1], &rwork[1], &iinfo);
+
+/* Solve the standard eigenvalue problem. */
+/* Reduce Hermitian band matrix to tridiagonal form. */
+
+ indd = 1;
+ inde = indd + *n;
+ indrwk = inde + *n;
+ indwrk = 1;
+ if (wantz) {
+ *(unsigned char *)vect = 'U';
+ } else {
+ *(unsigned char *)vect = 'N';
+ }
+ zhbtrd_(vect, uplo, n, ka, &ab[ab_offset], ldab, &rwork[indd], &rwork[
+ inde], &q[q_offset], ldq, &work[indwrk], &iinfo);
+
+/* If all eigenvalues are desired and ABSTOL is less than or equal */
+/* to zero, then call DSTERF or ZSTEQR. If this fails for some */
+/* eigenvalue, then try DSTEBZ. */
+
+ test = FALSE_;
+ if (indeig) {
+ if (*il == 1 && *iu == *n) {
+ test = TRUE_;
+ }
+ }
+ if ((alleig || test) && *abstol <= 0.) {
+ dcopy_(n, &rwork[indd], &c__1, &w[1], &c__1);
+ indee = indrwk + (*n << 1);
+ i__1 = *n - 1;
+ dcopy_(&i__1, &rwork[inde], &c__1, &rwork[indee], &c__1);
+ if (! wantz) {
+ dsterf_(n, &w[1], &rwork[indee], info);
+ } else {
+ zlacpy_("A", n, n, &q[q_offset], ldq, &z__[z_offset], ldz);
+ zsteqr_(jobz, n, &w[1], &rwork[indee], &z__[z_offset], ldz, &
+ rwork[indrwk], info);
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ifail[i__] = 0;
+/* L10: */
+ }
+ }
+ }
+ if (*info == 0) {
+ *m = *n;
+ goto L30;
+ }
+ *info = 0;
+ }
+
+/* Otherwise, call DSTEBZ and, if eigenvectors are desired, */
+/* call ZSTEIN. */
+
+ if (wantz) {
+ *(unsigned char *)order = 'B';
+ } else {
+ *(unsigned char *)order = 'E';
+ }
+ indibl = 1;
+ indisp = indibl + *n;
+ indiwk = indisp + *n;
+ dstebz_(range, order, n, vl, vu, il, iu, abstol, &rwork[indd], &rwork[
+ inde], m, &nsplit, &w[1], &iwork[indibl], &iwork[indisp], &rwork[
+ indrwk], &iwork[indiwk], info);
+
+ if (wantz) {
+ zstein_(n, &rwork[indd], &rwork[inde], m, &w[1], &iwork[indibl], &
+ iwork[indisp], &z__[z_offset], ldz, &rwork[indrwk], &iwork[
+ indiwk], &ifail[1], info);
+
+/* Apply unitary matrix used in reduction to tridiagonal */
+/* form to eigenvectors returned by ZSTEIN. */
+
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ zcopy_(n, &z__[j * z_dim1 + 1], &c__1, &work[1], &c__1);
+ zgemv_("N", n, n, &c_b2, &q[q_offset], ldq, &work[1], &c__1, &
+ c_b1, &z__[j * z_dim1 + 1], &c__1);
+/* L20: */
+ }
+ }
+
+L30:
+
+/* If eigenvalues are not in order, then sort them, along with */
+/* eigenvectors. */
+
+ if (wantz) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp1 = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp1) {
+ i__ = jj;
+ tmp1 = w[jj];
+ }
+/* L40: */
+ }
+
+ if (i__ != 0) {
+ itmp1 = iwork[indibl + i__ - 1];
+ w[i__] = w[j];
+ iwork[indibl + i__ - 1] = iwork[indibl + j - 1];
+ w[j] = tmp1;
+ iwork[indibl + j - 1] = itmp1;
+ zswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1],
+ &c__1);
+ if (*info != 0) {
+ itmp1 = ifail[i__];
+ ifail[i__] = ifail[j];
+ ifail[j] = itmp1;
+ }
+ }
+/* L50: */
+ }
+ }
+
+ return 0;
+
+/* End of ZHBGVX */
+
+} /* zhbgvx_ */
diff --git a/contrib/libs/clapack/zhbtrd.c b/contrib/libs/clapack/zhbtrd.c
new file mode 100644
index 0000000000..f2c359398d
--- /dev/null
+++ b/contrib/libs/clapack/zhbtrd.c
@@ -0,0 +1,810 @@
+/* zhbtrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zhbtrd_(char *vect, char *uplo, integer *n, integer *kd,
+ doublecomplex *ab, integer *ldab, doublereal *d__, doublereal *e,
+ doublecomplex *q, integer *ldq, doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, q_dim1, q_offset, i__1, i__2, i__3, i__4,
+ i__5, i__6;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+ double z_abs(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k, l;
+ doublecomplex t;
+ integer i2, j1, j2, nq, nr, kd1, ibl, iqb, kdn, jin, nrt, kdm1, inca,
+ jend, lend, jinc;
+ doublereal abst;
+ integer incx, last;
+ doublecomplex temp;
+ extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublereal *, doublecomplex *);
+ integer j1end, j1inc, iqend;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *);
+ logical initq, wantq, upper;
+ extern /* Subroutine */ int zlar2v_(integer *, doublecomplex *,
+ doublecomplex *, doublecomplex *, integer *, doublereal *,
+ doublecomplex *, integer *);
+ integer iqaend;
+ extern /* Subroutine */ int xerbla_(char *, integer *), zlacgv_(
+ integer *, doublecomplex *, integer *), zlaset_(char *, integer *,
+ integer *, doublecomplex *, doublecomplex *, doublecomplex *,
+ integer *), zlartg_(doublecomplex *, doublecomplex *,
+ doublereal *, doublecomplex *, doublecomplex *), zlargv_(integer *
+, doublecomplex *, integer *, doublecomplex *, integer *,
+ doublereal *, integer *), zlartv_(integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *, doublereal *,
+ doublecomplex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHBTRD reduces a complex Hermitian band matrix A to real symmetric */
+/* tridiagonal form T by a unitary similarity transformation: */
+/* Q**H * A * Q = T. */
+
+/* Arguments */
+/* ========= */
+
+/* VECT (input) CHARACTER*1 */
+/* = 'N': do not form Q; */
+/* = 'V': form Q; */
+/* = 'U': update a matrix X, by forming X*Q. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) COMPLEX*16 array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+/* On exit, the diagonal elements of AB are overwritten by the */
+/* diagonal elements of the tridiagonal matrix T; if KD > 0, the */
+/* elements on the first superdiagonal (if UPLO = 'U') or the */
+/* first subdiagonal (if UPLO = 'L') are overwritten by the */
+/* off-diagonal elements of T; the rest of AB is overwritten by */
+/* values generated during the reduction. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* D (output) DOUBLE PRECISION array, dimension (N) */
+/* The diagonal elements of the tridiagonal matrix T. */
+
+/* E (output) DOUBLE PRECISION array, dimension (N-1) */
+/* The off-diagonal elements of the tridiagonal matrix T: */
+/* E(i) = T(i,i+1) if UPLO = 'U'; E(i) = T(i+1,i) if UPLO = 'L'. */
+
+/* Q (input/output) COMPLEX*16 array, dimension (LDQ,N) */
+/* On entry, if VECT = 'U', then Q must contain an N-by-N */
+/* matrix X; if VECT = 'N' or 'V', then Q need not be set. */
+
+/* On exit: */
+/* if VECT = 'V', Q contains the N-by-N unitary matrix Q; */
+/* if VECT = 'U', Q contains the product X*Q; */
+/* if VECT = 'N', the array Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. */
+/* LDQ >= 1, and LDQ >= N if VECT = 'V' or 'U'. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* Modified by Linda Kaufman, Bell Labs. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --d__;
+ --e;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --work;
+
+ /* Function Body */
+ initq = lsame_(vect, "V");
+ wantq = initq || lsame_(vect, "U");
+ upper = lsame_(uplo, "U");
+ kd1 = *kd + 1;
+ kdm1 = *kd - 1;
+ incx = *ldab - 1;
+ iqend = 1;
+
+ *info = 0;
+ if (! wantq && ! lsame_(vect, "N")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*kd < 0) {
+ *info = -4;
+ } else if (*ldab < kd1) {
+ *info = -6;
+ } else if (*ldq < max(1,*n) && wantq) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHBTRD", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Initialize Q to the unit matrix, if needed */
+
+ if (initq) {
+ zlaset_("Full", n, n, &c_b1, &c_b2, &q[q_offset], ldq);
+ }
+
+/* Wherever possible, plane rotations are generated and applied in */
+/* vector operations of length NR over the index set J1:J2:KD1. */
+
+/* The real cosines and complex sines of the plane rotations are */
+/* stored in the arrays D and WORK. */
+
+ inca = kd1 * *ldab;
+/* Computing MIN */
+ i__1 = *n - 1;
+ kdn = min(i__1,*kd);
+ if (upper) {
+
+ if (*kd > 1) {
+
+/* Reduce to complex Hermitian tridiagonal form, working with */
+/* the upper triangle */
+
+ nr = 0;
+ j1 = kdn + 2;
+ j2 = 1;
+
+ i__1 = kd1 + ab_dim1;
+ i__2 = kd1 + ab_dim1;
+ d__1 = ab[i__2].r;
+ ab[i__1].r = d__1, ab[i__1].i = 0.;
+ i__1 = *n - 2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Reduce i-th row of matrix to tridiagonal form */
+
+ for (k = kdn + 1; k >= 2; --k) {
+ j1 += kdn;
+ j2 += kdn;
+
+ if (nr > 0) {
+
+/* generate plane rotations to annihilate nonzero */
+/* elements which have been created outside the band */
+
+ zlargv_(&nr, &ab[(j1 - 1) * ab_dim1 + 1], &inca, &
+ work[j1], &kd1, &d__[j1], &kd1);
+
+/* apply rotations from the right */
+
+
+/* Dependent on the the number of diagonals either */
+/* ZLARTV or ZROT is used */
+
+ if (nr >= (*kd << 1) - 1) {
+ i__2 = *kd - 1;
+ for (l = 1; l <= i__2; ++l) {
+ zlartv_(&nr, &ab[l + 1 + (j1 - 1) * ab_dim1],
+ &inca, &ab[l + j1 * ab_dim1], &inca, &
+ d__[j1], &work[j1], &kd1);
+/* L10: */
+ }
+
+ } else {
+ jend = j1 + (nr - 1) * kd1;
+ i__2 = jend;
+ i__3 = kd1;
+ for (jinc = j1; i__3 < 0 ? jinc >= i__2 : jinc <=
+ i__2; jinc += i__3) {
+ zrot_(&kdm1, &ab[(jinc - 1) * ab_dim1 + 2], &
+ c__1, &ab[jinc * ab_dim1 + 1], &c__1,
+ &d__[jinc], &work[jinc]);
+/* L20: */
+ }
+ }
+ }
+
+
+ if (k > 2) {
+ if (k <= *n - i__ + 1) {
+
+/* generate plane rotation to annihilate a(i,i+k-1) */
+/* within the band */
+
+ zlartg_(&ab[*kd - k + 3 + (i__ + k - 2) * ab_dim1]
+, &ab[*kd - k + 2 + (i__ + k - 1) *
+ ab_dim1], &d__[i__ + k - 1], &work[i__ +
+ k - 1], &temp);
+ i__3 = *kd - k + 3 + (i__ + k - 2) * ab_dim1;
+ ab[i__3].r = temp.r, ab[i__3].i = temp.i;
+
+/* apply rotation from the right */
+
+ i__3 = k - 3;
+ zrot_(&i__3, &ab[*kd - k + 4 + (i__ + k - 2) *
+ ab_dim1], &c__1, &ab[*kd - k + 3 + (i__ +
+ k - 1) * ab_dim1], &c__1, &d__[i__ + k -
+ 1], &work[i__ + k - 1]);
+ }
+ ++nr;
+ j1 = j1 - kdn - 1;
+ }
+
+/* apply plane rotations from both sides to diagonal */
+/* blocks */
+
+ if (nr > 0) {
+ zlar2v_(&nr, &ab[kd1 + (j1 - 1) * ab_dim1], &ab[kd1 +
+ j1 * ab_dim1], &ab[*kd + j1 * ab_dim1], &inca,
+ &d__[j1], &work[j1], &kd1);
+ }
+
+/* apply plane rotations from the left */
+
+ if (nr > 0) {
+ zlacgv_(&nr, &work[j1], &kd1);
+ if ((*kd << 1) - 1 < nr) {
+
+/* Dependent on the the number of diagonals either */
+/* ZLARTV or ZROT is used */
+
+ i__3 = *kd - 1;
+ for (l = 1; l <= i__3; ++l) {
+ if (j2 + l > *n) {
+ nrt = nr - 1;
+ } else {
+ nrt = nr;
+ }
+ if (nrt > 0) {
+ zlartv_(&nrt, &ab[*kd - l + (j1 + l) *
+ ab_dim1], &inca, &ab[*kd - l + 1
+ + (j1 + l) * ab_dim1], &inca, &
+ d__[j1], &work[j1], &kd1);
+ }
+/* L30: */
+ }
+ } else {
+ j1end = j1 + kd1 * (nr - 2);
+ if (j1end >= j1) {
+ i__3 = j1end;
+ i__2 = kd1;
+ for (jin = j1; i__2 < 0 ? jin >= i__3 : jin <=
+ i__3; jin += i__2) {
+ i__4 = *kd - 1;
+ zrot_(&i__4, &ab[*kd - 1 + (jin + 1) *
+ ab_dim1], &incx, &ab[*kd + (jin +
+ 1) * ab_dim1], &incx, &d__[jin], &
+ work[jin]);
+/* L40: */
+ }
+ }
+/* Computing MIN */
+ i__2 = kdm1, i__3 = *n - j2;
+ lend = min(i__2,i__3);
+ last = j1end + kd1;
+ if (lend > 0) {
+ zrot_(&lend, &ab[*kd - 1 + (last + 1) *
+ ab_dim1], &incx, &ab[*kd + (last + 1)
+ * ab_dim1], &incx, &d__[last], &work[
+ last]);
+ }
+ }
+ }
+
+ if (wantq) {
+
+/* accumulate product of plane rotations in Q */
+
+ if (initq) {
+
+/* take advantage of the fact that Q was */
+/* initially the Identity matrix */
+
+ iqend = max(iqend,j2);
+/* Computing MAX */
+ i__2 = 0, i__3 = k - 3;
+ i2 = max(i__2,i__3);
+ iqaend = i__ * *kd + 1;
+ if (k == 2) {
+ iqaend += *kd;
+ }
+ iqaend = min(iqaend,iqend);
+ i__2 = j2;
+ i__3 = kd1;
+ for (j = j1; i__3 < 0 ? j >= i__2 : j <= i__2; j
+ += i__3) {
+ ibl = i__ - i2 / kdm1;
+ ++i2;
+/* Computing MAX */
+ i__4 = 1, i__5 = j - ibl;
+ iqb = max(i__4,i__5);
+ nq = iqaend + 1 - iqb;
+/* Computing MIN */
+ i__4 = iqaend + *kd;
+ iqaend = min(i__4,iqend);
+ d_cnjg(&z__1, &work[j]);
+ zrot_(&nq, &q[iqb + (j - 1) * q_dim1], &c__1,
+ &q[iqb + j * q_dim1], &c__1, &d__[j],
+ &z__1);
+/* L50: */
+ }
+ } else {
+
+ i__3 = j2;
+ i__2 = kd1;
+ for (j = j1; i__2 < 0 ? j >= i__3 : j <= i__3; j
+ += i__2) {
+ d_cnjg(&z__1, &work[j]);
+ zrot_(n, &q[(j - 1) * q_dim1 + 1], &c__1, &q[
+ j * q_dim1 + 1], &c__1, &d__[j], &
+ z__1);
+/* L60: */
+ }
+ }
+
+ }
+
+ if (j2 + kdn > *n) {
+
+/* adjust J2 to keep within the bounds of the matrix */
+
+ --nr;
+ j2 = j2 - kdn - 1;
+ }
+
+ i__2 = j2;
+ i__3 = kd1;
+ for (j = j1; i__3 < 0 ? j >= i__2 : j <= i__2; j += i__3)
+ {
+
+/* create nonzero element a(j-1,j+kd) outside the band */
+/* and store it in WORK */
+
+ i__4 = j + *kd;
+ i__5 = j;
+ i__6 = (j + *kd) * ab_dim1 + 1;
+ z__1.r = work[i__5].r * ab[i__6].r - work[i__5].i *
+ ab[i__6].i, z__1.i = work[i__5].r * ab[i__6]
+ .i + work[i__5].i * ab[i__6].r;
+ work[i__4].r = z__1.r, work[i__4].i = z__1.i;
+ i__4 = (j + *kd) * ab_dim1 + 1;
+ i__5 = j;
+ i__6 = (j + *kd) * ab_dim1 + 1;
+ z__1.r = d__[i__5] * ab[i__6].r, z__1.i = d__[i__5] *
+ ab[i__6].i;
+ ab[i__4].r = z__1.r, ab[i__4].i = z__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+/* L90: */
+ }
+ }
+
+ if (*kd > 0) {
+
+/* make off-diagonal elements real and copy them to E */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__3 = *kd + (i__ + 1) * ab_dim1;
+ t.r = ab[i__3].r, t.i = ab[i__3].i;
+ abst = z_abs(&t);
+ i__3 = *kd + (i__ + 1) * ab_dim1;
+ ab[i__3].r = abst, ab[i__3].i = 0.;
+ e[i__] = abst;
+ if (abst != 0.) {
+ z__1.r = t.r / abst, z__1.i = t.i / abst;
+ t.r = z__1.r, t.i = z__1.i;
+ } else {
+ t.r = 1., t.i = 0.;
+ }
+ if (i__ < *n - 1) {
+ i__3 = *kd + (i__ + 2) * ab_dim1;
+ i__2 = *kd + (i__ + 2) * ab_dim1;
+ z__1.r = ab[i__2].r * t.r - ab[i__2].i * t.i, z__1.i = ab[
+ i__2].r * t.i + ab[i__2].i * t.r;
+ ab[i__3].r = z__1.r, ab[i__3].i = z__1.i;
+ }
+ if (wantq) {
+ d_cnjg(&z__1, &t);
+ zscal_(n, &z__1, &q[(i__ + 1) * q_dim1 + 1], &c__1);
+ }
+/* L100: */
+ }
+ } else {
+
+/* set E to zero if original matrix was diagonal */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ e[i__] = 0.;
+/* L110: */
+ }
+ }
+
+/* copy diagonal elements to D */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__3 = i__;
+ i__2 = kd1 + i__ * ab_dim1;
+ d__[i__3] = ab[i__2].r;
+/* L120: */
+ }
+
+ } else {
+
+ if (*kd > 1) {
+
+/* Reduce to complex Hermitian tridiagonal form, working with */
+/* the lower triangle */
+
+ nr = 0;
+ j1 = kdn + 2;
+ j2 = 1;
+
+ i__1 = ab_dim1 + 1;
+ i__3 = ab_dim1 + 1;
+ d__1 = ab[i__3].r;
+ ab[i__1].r = d__1, ab[i__1].i = 0.;
+ i__1 = *n - 2;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Reduce i-th column of matrix to tridiagonal form */
+
+ for (k = kdn + 1; k >= 2; --k) {
+ j1 += kdn;
+ j2 += kdn;
+
+ if (nr > 0) {
+
+/* generate plane rotations to annihilate nonzero */
+/* elements which have been created outside the band */
+
+ zlargv_(&nr, &ab[kd1 + (j1 - kd1) * ab_dim1], &inca, &
+ work[j1], &kd1, &d__[j1], &kd1);
+
+/* apply plane rotations from one side */
+
+
+/* Dependent on the the number of diagonals either */
+/* ZLARTV or ZROT is used */
+
+ if (nr > (*kd << 1) - 1) {
+ i__3 = *kd - 1;
+ for (l = 1; l <= i__3; ++l) {
+ zlartv_(&nr, &ab[kd1 - l + (j1 - kd1 + l) *
+ ab_dim1], &inca, &ab[kd1 - l + 1 + (
+ j1 - kd1 + l) * ab_dim1], &inca, &d__[
+ j1], &work[j1], &kd1);
+/* L130: */
+ }
+ } else {
+ jend = j1 + kd1 * (nr - 1);
+ i__3 = jend;
+ i__2 = kd1;
+ for (jinc = j1; i__2 < 0 ? jinc >= i__3 : jinc <=
+ i__3; jinc += i__2) {
+ zrot_(&kdm1, &ab[*kd + (jinc - *kd) * ab_dim1]
+, &incx, &ab[kd1 + (jinc - *kd) *
+ ab_dim1], &incx, &d__[jinc], &work[
+ jinc]);
+/* L140: */
+ }
+ }
+
+ }
+
+ if (k > 2) {
+ if (k <= *n - i__ + 1) {
+
+/* generate plane rotation to annihilate a(i+k-1,i) */
+/* within the band */
+
+ zlartg_(&ab[k - 1 + i__ * ab_dim1], &ab[k + i__ *
+ ab_dim1], &d__[i__ + k - 1], &work[i__ +
+ k - 1], &temp);
+ i__2 = k - 1 + i__ * ab_dim1;
+ ab[i__2].r = temp.r, ab[i__2].i = temp.i;
+
+/* apply rotation from the left */
+
+ i__2 = k - 3;
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ zrot_(&i__2, &ab[k - 2 + (i__ + 1) * ab_dim1], &
+ i__3, &ab[k - 1 + (i__ + 1) * ab_dim1], &
+ i__4, &d__[i__ + k - 1], &work[i__ + k -
+ 1]);
+ }
+ ++nr;
+ j1 = j1 - kdn - 1;
+ }
+
+/* apply plane rotations from both sides to diagonal */
+/* blocks */
+
+ if (nr > 0) {
+ zlar2v_(&nr, &ab[(j1 - 1) * ab_dim1 + 1], &ab[j1 *
+ ab_dim1 + 1], &ab[(j1 - 1) * ab_dim1 + 2], &
+ inca, &d__[j1], &work[j1], &kd1);
+ }
+
+/* apply plane rotations from the right */
+
+
+/* Dependent on the the number of diagonals either */
+/* ZLARTV or ZROT is used */
+
+ if (nr > 0) {
+ zlacgv_(&nr, &work[j1], &kd1);
+ if (nr > (*kd << 1) - 1) {
+ i__2 = *kd - 1;
+ for (l = 1; l <= i__2; ++l) {
+ if (j2 + l > *n) {
+ nrt = nr - 1;
+ } else {
+ nrt = nr;
+ }
+ if (nrt > 0) {
+ zlartv_(&nrt, &ab[l + 2 + (j1 - 1) *
+ ab_dim1], &inca, &ab[l + 1 + j1 *
+ ab_dim1], &inca, &d__[j1], &work[
+ j1], &kd1);
+ }
+/* L150: */
+ }
+ } else {
+ j1end = j1 + kd1 * (nr - 2);
+ if (j1end >= j1) {
+ i__2 = j1end;
+ i__3 = kd1;
+ for (j1inc = j1; i__3 < 0 ? j1inc >= i__2 :
+ j1inc <= i__2; j1inc += i__3) {
+ zrot_(&kdm1, &ab[(j1inc - 1) * ab_dim1 +
+ 3], &c__1, &ab[j1inc * ab_dim1 +
+ 2], &c__1, &d__[j1inc], &work[
+ j1inc]);
+/* L160: */
+ }
+ }
+/* Computing MIN */
+ i__3 = kdm1, i__2 = *n - j2;
+ lend = min(i__3,i__2);
+ last = j1end + kd1;
+ if (lend > 0) {
+ zrot_(&lend, &ab[(last - 1) * ab_dim1 + 3], &
+ c__1, &ab[last * ab_dim1 + 2], &c__1,
+ &d__[last], &work[last]);
+ }
+ }
+ }
+
+
+
+ if (wantq) {
+
+/* accumulate product of plane rotations in Q */
+
+ if (initq) {
+
+/* take advantage of the fact that Q was */
+/* initially the Identity matrix */
+
+ iqend = max(iqend,j2);
+/* Computing MAX */
+ i__3 = 0, i__2 = k - 3;
+ i2 = max(i__3,i__2);
+ iqaend = i__ * *kd + 1;
+ if (k == 2) {
+ iqaend += *kd;
+ }
+ iqaend = min(iqaend,iqend);
+ i__3 = j2;
+ i__2 = kd1;
+ for (j = j1; i__2 < 0 ? j >= i__3 : j <= i__3; j
+ += i__2) {
+ ibl = i__ - i2 / kdm1;
+ ++i2;
+/* Computing MAX */
+ i__4 = 1, i__5 = j - ibl;
+ iqb = max(i__4,i__5);
+ nq = iqaend + 1 - iqb;
+/* Computing MIN */
+ i__4 = iqaend + *kd;
+ iqaend = min(i__4,iqend);
+ zrot_(&nq, &q[iqb + (j - 1) * q_dim1], &c__1,
+ &q[iqb + j * q_dim1], &c__1, &d__[j],
+ &work[j]);
+/* L170: */
+ }
+ } else {
+
+ i__2 = j2;
+ i__3 = kd1;
+ for (j = j1; i__3 < 0 ? j >= i__2 : j <= i__2; j
+ += i__3) {
+ zrot_(n, &q[(j - 1) * q_dim1 + 1], &c__1, &q[
+ j * q_dim1 + 1], &c__1, &d__[j], &
+ work[j]);
+/* L180: */
+ }
+ }
+ }
+
+ if (j2 + kdn > *n) {
+
+/* adjust J2 to keep within the bounds of the matrix */
+
+ --nr;
+ j2 = j2 - kdn - 1;
+ }
+
+ i__3 = j2;
+ i__2 = kd1;
+ for (j = j1; i__2 < 0 ? j >= i__3 : j <= i__3; j += i__2)
+ {
+
+/* create nonzero element a(j+kd,j-1) outside the */
+/* band and store it in WORK */
+
+ i__4 = j + *kd;
+ i__5 = j;
+ i__6 = kd1 + j * ab_dim1;
+ z__1.r = work[i__5].r * ab[i__6].r - work[i__5].i *
+ ab[i__6].i, z__1.i = work[i__5].r * ab[i__6]
+ .i + work[i__5].i * ab[i__6].r;
+ work[i__4].r = z__1.r, work[i__4].i = z__1.i;
+ i__4 = kd1 + j * ab_dim1;
+ i__5 = j;
+ i__6 = kd1 + j * ab_dim1;
+ z__1.r = d__[i__5] * ab[i__6].r, z__1.i = d__[i__5] *
+ ab[i__6].i;
+ ab[i__4].r = z__1.r, ab[i__4].i = z__1.i;
+/* L190: */
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+ }
+
+ if (*kd > 0) {
+
+/* make off-diagonal elements real and copy them to E */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ * ab_dim1 + 2;
+ t.r = ab[i__2].r, t.i = ab[i__2].i;
+ abst = z_abs(&t);
+ i__2 = i__ * ab_dim1 + 2;
+ ab[i__2].r = abst, ab[i__2].i = 0.;
+ e[i__] = abst;
+ if (abst != 0.) {
+ z__1.r = t.r / abst, z__1.i = t.i / abst;
+ t.r = z__1.r, t.i = z__1.i;
+ } else {
+ t.r = 1., t.i = 0.;
+ }
+ if (i__ < *n - 1) {
+ i__2 = (i__ + 1) * ab_dim1 + 2;
+ i__3 = (i__ + 1) * ab_dim1 + 2;
+ z__1.r = ab[i__3].r * t.r - ab[i__3].i * t.i, z__1.i = ab[
+ i__3].r * t.i + ab[i__3].i * t.r;
+ ab[i__2].r = z__1.r, ab[i__2].i = z__1.i;
+ }
+ if (wantq) {
+ zscal_(n, &t, &q[(i__ + 1) * q_dim1 + 1], &c__1);
+ }
+/* L220: */
+ }
+ } else {
+
+/* set E to zero if original matrix was diagonal */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ e[i__] = 0.;
+/* L230: */
+ }
+ }
+
+/* copy diagonal elements to D */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__ * ab_dim1 + 1;
+ d__[i__2] = ab[i__3].r;
+/* L240: */
+ }
+ }
+
+ return 0;
+
+/* End of ZHBTRD */
+
+} /* zhbtrd_ */
diff --git a/contrib/libs/clapack/zhecon.c b/contrib/libs/clapack/zhecon.c
new file mode 100644
index 0000000000..869cdc034b
--- /dev/null
+++ b/contrib/libs/clapack/zhecon.c
@@ -0,0 +1,203 @@
+/* zhecon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zhecon_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, integer *ipiv, doublereal *anorm, doublereal *rcond,
+ doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, kase;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical upper;
+ extern /* Subroutine */ int zlacn2_(integer *, doublecomplex *,
+ doublecomplex *, doublereal *, integer *, integer *), xerbla_(
+ char *, integer *);
+ doublereal ainvnm;
+ extern /* Subroutine */ int zhetrs_(char *, integer *, integer *,
+ doublecomplex *, integer *, integer *, doublecomplex *, integer *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHECON estimates the reciprocal of the condition number of a complex */
+/* Hermitian matrix A using the factorization A = U*D*U**H or */
+/* A = L*D*L**H computed by ZHETRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**H; */
+/* = 'L': Lower triangular, form is A = L*D*L**H. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The block diagonal matrix D and the multipliers used to */
+/* obtain the factor U or L as computed by ZHETRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by ZHETRF. */
+
+/* ANORM (input) DOUBLE PRECISION */
+/* The 1-norm of the original matrix A. */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*anorm < 0.) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHECON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.;
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ } else if (*anorm <= 0.) {
+ return 0;
+ }
+
+/* Check that the diagonal matrix D is nonsingular. */
+
+ if (upper) {
+
+/* Upper triangular storage: examine D from bottom to top */
+
+ for (i__ = *n; i__ >= 1; --i__) {
+ i__1 = i__ + i__ * a_dim1;
+ if (ipiv[i__] > 0 && (a[i__1].r == 0. && a[i__1].i == 0.)) {
+ return 0;
+ }
+/* L10: */
+ }
+ } else {
+
+/* Lower triangular storage: examine D from top to bottom. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ if (ipiv[i__] > 0 && (a[i__2].r == 0. && a[i__2].i == 0.)) {
+ return 0;
+ }
+/* L20: */
+ }
+ }
+
+/* Estimate the 1-norm of the inverse. */
+
+ kase = 0;
+L30:
+ zlacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+
+/* Multiply by inv(L*D*L') or inv(U*D*U'). */
+
+ zhetrs_(uplo, n, &c__1, &a[a_offset], lda, &ipiv[1], &work[1], n,
+ info);
+ goto L30;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / ainvnm / *anorm;
+ }
+
+ return 0;
+
+/* End of ZHECON */
+
+} /* zhecon_ */
diff --git a/contrib/libs/clapack/zheequb.c b/contrib/libs/clapack/zheequb.c
new file mode 100644
index 0000000000..eb1bcd7829
--- /dev/null
+++ b/contrib/libs/clapack/zheequb.c
@@ -0,0 +1,439 @@
+/* zheequb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zheequb_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, doublereal *s, doublereal *scond, doublereal *amax,
+ doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *), sqrt(doublereal), log(doublereal), pow_di(
+ doublereal *, integer *);
+
+ /* Local variables */
+ doublereal d__;
+ integer i__, j;
+ doublereal t, u, c0, c1, c2, si;
+ logical up;
+ doublereal avg, std, tol, base;
+ integer iter;
+ doublereal smin, smax, scale;
+ extern logical lsame_(char *, char *);
+ doublereal sumsq;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum, smlnum;
+ extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- Contributed by James Demmel, Deaglan Halligan, Yozo Hida and -- */
+/* -- Jason Riedy of Univ. of California Berkeley. -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley and NAG Ltd. -- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSYEQUB computes row and column scalings intended to equilibrate a */
+/* symmetric matrix A and reduce its condition number */
+/* (with respect to the two-norm). S contains the scale factors, */
+/* S(i) = 1/sqrt(A(i,i)), chosen so that the scaled matrix B with */
+/* elements B(i,j) = S(i)*A(i,j)*S(j) has ones on the diagonal. This */
+/* choice of S puts the condition number of B within a factor N of the */
+/* smallest possible condition number over all possible diagonal */
+/* scalings. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The N-by-N symmetric matrix whose scaling */
+/* factors are to be computed. Only the diagonal elements of A */
+/* are referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* S (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, S contains the scale factors for A. */
+
+/* SCOND (output) DOUBLE PRECISION */
+/* If INFO = 0, S contains the ratio of the smallest S(i) to */
+/* the largest S(i). If SCOND >= 0.1 and AMAX is neither too */
+/* large nor too small, it is not worth scaling by S. */
+
+/* AMAX (output) DOUBLE PRECISION */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element is nonpositive. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function Definitions .. */
+
+/* Test input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (! (lsame_(uplo, "U") || lsame_(uplo, "L"))) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHEEQUB", &i__1);
+ return 0;
+ }
+ up = lsame_(uplo, "U");
+ *amax = 0.;
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ *scond = 1.;
+ return 0;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ s[i__] = 0.;
+ }
+ *amax = 0.;
+ if (up) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ d__3 = s[i__], d__4 = (d__1 = a[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + j * a_dim1]), abs(d__2));
+ s[i__] = max(d__3,d__4);
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ d__3 = s[j], d__4 = (d__1 = a[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + j * a_dim1]), abs(d__2));
+ s[j] = max(d__3,d__4);
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ d__3 = *amax, d__4 = (d__1 = a[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + j * a_dim1]), abs(d__2));
+ *amax = max(d__3,d__4);
+ }
+/* Computing MAX */
+ i__2 = j + j * a_dim1;
+ d__3 = s[j], d__4 = (d__1 = a[i__2].r, abs(d__1)) + (d__2 =
+ d_imag(&a[j + j * a_dim1]), abs(d__2));
+ s[j] = max(d__3,d__4);
+/* Computing MAX */
+ i__2 = j + j * a_dim1;
+ d__3 = *amax, d__4 = (d__1 = a[i__2].r, abs(d__1)) + (d__2 =
+ d_imag(&a[j + j * a_dim1]), abs(d__2));
+ *amax = max(d__3,d__4);
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j + j * a_dim1;
+ d__3 = s[j], d__4 = (d__1 = a[i__2].r, abs(d__1)) + (d__2 =
+ d_imag(&a[j + j * a_dim1]), abs(d__2));
+ s[j] = max(d__3,d__4);
+/* Computing MAX */
+ i__2 = j + j * a_dim1;
+ d__3 = *amax, d__4 = (d__1 = a[i__2].r, abs(d__1)) + (d__2 =
+ d_imag(&a[j + j * a_dim1]), abs(d__2));
+ *amax = max(d__3,d__4);
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ d__3 = s[i__], d__4 = (d__1 = a[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + j * a_dim1]), abs(d__2));
+ s[i__] = max(d__3,d__4);
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ d__3 = s[j], d__4 = (d__1 = a[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + j * a_dim1]), abs(d__2));
+ s[j] = max(d__3,d__4);
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ d__3 = *amax, d__4 = (d__1 = a[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + j * a_dim1]), abs(d__2));
+ *amax = max(d__3,d__4);
+ }
+ }
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ s[j] = 1. / s[j];
+ }
+ tol = 1. / sqrt(*n * 2.);
+ for (iter = 1; iter <= 100; ++iter) {
+ scale = 0.;
+ sumsq = 0.;
+/* beta = |A|s */
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ work[i__2].r = 0., work[i__2].i = 0.;
+ }
+ if (up) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ t = (d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a[i__
+ + j * a_dim1]), abs(d__2));
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ d__3 = ((d__1 = a[i__5].r, abs(d__1)) + (d__2 = d_imag(&a[
+ i__ + j * a_dim1]), abs(d__2))) * s[j];
+ z__1.r = work[i__4].r + d__3, z__1.i = work[i__4].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+ i__3 = j;
+ i__4 = j;
+ i__5 = i__ + j * a_dim1;
+ d__3 = ((d__1 = a[i__5].r, abs(d__1)) + (d__2 = d_imag(&a[
+ i__ + j * a_dim1]), abs(d__2))) * s[i__];
+ z__1.r = work[i__4].r + d__3, z__1.i = work[i__4].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+ }
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ d__3 = ((d__1 = a[i__4].r, abs(d__1)) + (d__2 = d_imag(&a[j +
+ j * a_dim1]), abs(d__2))) * s[j];
+ z__1.r = work[i__3].r + d__3, z__1.i = work[i__3].i;
+ work[i__2].r = z__1.r, work[i__2].i = z__1.i;
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ d__3 = ((d__1 = a[i__4].r, abs(d__1)) + (d__2 = d_imag(&a[j +
+ j * a_dim1]), abs(d__2))) * s[j];
+ z__1.r = work[i__3].r + d__3, z__1.i = work[i__3].i;
+ work[i__2].r = z__1.r, work[i__2].i = z__1.i;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ t = (d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a[i__
+ + j * a_dim1]), abs(d__2));
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ d__3 = ((d__1 = a[i__5].r, abs(d__1)) + (d__2 = d_imag(&a[
+ i__ + j * a_dim1]), abs(d__2))) * s[j];
+ z__1.r = work[i__4].r + d__3, z__1.i = work[i__4].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+ i__3 = j;
+ i__4 = j;
+ i__5 = i__ + j * a_dim1;
+ d__3 = ((d__1 = a[i__5].r, abs(d__1)) + (d__2 = d_imag(&a[
+ i__ + j * a_dim1]), abs(d__2))) * s[i__];
+ z__1.r = work[i__4].r + d__3, z__1.i = work[i__4].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+ }
+ }
+ }
+/* avg = s^T beta / n */
+ avg = 0.;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ z__2.r = s[i__2] * work[i__3].r, z__2.i = s[i__2] * work[i__3].i;
+ z__1.r = avg + z__2.r, z__1.i = z__2.i;
+ avg = z__1.r;
+ }
+ avg /= *n;
+ std = 0.;
+ i__1 = *n * 3;
+ for (i__ = (*n << 1) + 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__ - (*n << 1);
+ i__4 = i__ - (*n << 1);
+ z__2.r = s[i__3] * work[i__4].r, z__2.i = s[i__3] * work[i__4].i;
+ z__1.r = z__2.r - avg, z__1.i = z__2.i;
+ work[i__2].r = z__1.r, work[i__2].i = z__1.i;
+ }
+ zlassq_(n, &work[(*n << 1) + 1], &c__1, &scale, &sumsq);
+ std = scale * sqrt(sumsq / *n);
+ if (std < tol * avg) {
+ goto L999;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ t = (d__1 = a[i__2].r, abs(d__1)) + (d__2 = d_imag(&a[i__ + i__ *
+ a_dim1]), abs(d__2));
+ si = s[i__];
+ c2 = (*n - 1) * t;
+ i__2 = *n - 2;
+ i__3 = i__;
+ d__1 = t * si;
+ z__2.r = work[i__3].r - d__1, z__2.i = work[i__3].i;
+ d__2 = (doublereal) i__2;
+ z__1.r = d__2 * z__2.r, z__1.i = d__2 * z__2.i;
+ c1 = z__1.r;
+ d__1 = -(t * si) * si;
+ i__2 = i__;
+ d__2 = 2.;
+ z__4.r = d__2 * work[i__2].r, z__4.i = d__2 * work[i__2].i;
+ z__3.r = si * z__4.r, z__3.i = si * z__4.i;
+ z__2.r = d__1 + z__3.r, z__2.i = z__3.i;
+ d__3 = *n * avg;
+ z__1.r = z__2.r - d__3, z__1.i = z__2.i;
+ c0 = z__1.r;
+ d__ = c1 * c1 - c0 * 4 * c2;
+ if (d__ <= 0.) {
+ *info = -1;
+ return 0;
+ }
+ si = c0 * -2 / (c1 + sqrt(d__));
+ d__ = si - s[i__];
+ u = 0.;
+ if (up) {
+ i__2 = i__;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = j + i__ * a_dim1;
+ t = (d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a[j +
+ i__ * a_dim1]), abs(d__2));
+ u += s[j] * t;
+ i__3 = j;
+ i__4 = j;
+ d__1 = d__ * t;
+ z__1.r = work[i__4].r + d__1, z__1.i = work[i__4].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+ }
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ i__3 = i__ + j * a_dim1;
+ t = (d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a[i__
+ + j * a_dim1]), abs(d__2));
+ u += s[j] * t;
+ i__3 = j;
+ i__4 = j;
+ d__1 = d__ * t;
+ z__1.r = work[i__4].r + d__1, z__1.i = work[i__4].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+ }
+ } else {
+ i__2 = i__;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = i__ + j * a_dim1;
+ t = (d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a[i__
+ + j * a_dim1]), abs(d__2));
+ u += s[j] * t;
+ i__3 = j;
+ i__4 = j;
+ d__1 = d__ * t;
+ z__1.r = work[i__4].r + d__1, z__1.i = work[i__4].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+ }
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ i__3 = j + i__ * a_dim1;
+ t = (d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a[j +
+ i__ * a_dim1]), abs(d__2));
+ u += s[j] * t;
+ i__3 = j;
+ i__4 = j;
+ d__1 = d__ * t;
+ z__1.r = work[i__4].r + d__1, z__1.i = work[i__4].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+ }
+ }
+ i__2 = i__;
+ z__4.r = u + work[i__2].r, z__4.i = work[i__2].i;
+ z__3.r = d__ * z__4.r, z__3.i = d__ * z__4.i;
+ d__1 = (doublereal) (*n);
+ z__2.r = z__3.r / d__1, z__2.i = z__3.i / d__1;
+ z__1.r = avg + z__2.r, z__1.i = z__2.i;
+ avg = z__1.r;
+ s[i__] = si;
+ }
+ }
+L999:
+ smlnum = dlamch_("SAFEMIN");
+ bignum = 1. / smlnum;
+ smin = bignum;
+ smax = 0.;
+ t = 1. / sqrt(avg);
+ base = dlamch_("B");
+ u = 1. / log(base);
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = (integer) (u * log(s[i__] * t));
+ s[i__] = pow_di(&base, &i__2);
+/* Computing MIN */
+ d__1 = smin, d__2 = s[i__];
+ smin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = smax, d__2 = s[i__];
+ smax = max(d__1,d__2);
+ }
+ *scond = max(smin,smlnum) / min(smax,bignum);
+ return 0;
+} /* zheequb_ */
diff --git a/contrib/libs/clapack/zheev.c b/contrib/libs/clapack/zheev.c
new file mode 100644
index 0000000000..f712fd83b2
--- /dev/null
+++ b/contrib/libs/clapack/zheev.c
@@ -0,0 +1,289 @@
+/* zheev.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__0 = 0;
+static doublereal c_b18 = 1.;
+
+/* Subroutine */ int zheev_(char *jobz, char *uplo, integer *n, doublecomplex
+ *a, integer *lda, doublereal *w, doublecomplex *work, integer *lwork,
+ doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer nb;
+ doublereal eps;
+ integer inde;
+ doublereal anrm;
+ integer imax;
+ doublereal rmin, rmax;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical lower, wantz;
+ extern doublereal dlamch_(char *);
+ integer iscale;
+ doublereal safmin;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ extern doublereal zlanhe_(char *, char *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ integer indtau;
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *), zlascl_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublecomplex *, integer *,
+ integer *);
+ integer indwrk;
+ extern /* Subroutine */ int zhetrd_(char *, integer *, doublecomplex *,
+ integer *, doublereal *, doublereal *, doublecomplex *,
+ doublecomplex *, integer *, integer *);
+ integer llwork;
+ doublereal smlnum;
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int zsteqr_(char *, integer *, doublereal *,
+ doublereal *, doublecomplex *, integer *, doublereal *, integer *), zungtr_(char *, integer *, doublecomplex *, integer *,
+ doublecomplex *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHEEV computes all eigenvalues and, optionally, eigenvectors of a */
+/* complex Hermitian matrix A. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA, N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+/* On exit, if JOBZ = 'V', then if INFO = 0, A contains the */
+/* orthonormal eigenvectors of the matrix A. */
+/* If JOBZ = 'N', then on exit the lower triangle (if UPLO='L') */
+/* or the upper triangle (if UPLO='U') of A, including the */
+/* diagonal, is destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= max(1,2*N-1). */
+/* For optimal efficiency, LWORK >= (NB+1)*N, */
+/* where NB is the blocksize for ZHETRD returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (max(1, 3*N-2)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the algorithm failed to converge; i */
+/* off-diagonal elements of an intermediate tridiagonal */
+/* form did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lower = lsame_(uplo, "L");
+ lquery = *lwork == -1;
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+
+ if (*info == 0) {
+ nb = ilaenv_(&c__1, "ZHETRD", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = 1, i__2 = (nb + 1) * *n;
+ lwkopt = max(i__1,i__2);
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+/* Computing MAX */
+ i__1 = 1, i__2 = (*n << 1) - 1;
+ if (*lwork < max(i__1,i__2) && ! lquery) {
+ *info = -8;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHEEV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ i__1 = a_dim1 + 1;
+ w[1] = a[i__1].r;
+ work[1].r = 1., work[1].i = 0.;
+ if (wantz) {
+ i__1 = a_dim1 + 1;
+ a[i__1].r = 1., a[i__1].i = 0.;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = zlanhe_("M", uplo, n, &a[a_offset], lda, &rwork[1]);
+ iscale = 0;
+ if (anrm > 0. && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ zlascl_(uplo, &c__0, &c__0, &c_b18, &sigma, n, n, &a[a_offset], lda,
+ info);
+ }
+
+/* Call ZHETRD to reduce Hermitian matrix to tridiagonal form. */
+
+ inde = 1;
+ indtau = 1;
+ indwrk = indtau + *n;
+ llwork = *lwork - indwrk + 1;
+ zhetrd_(uplo, n, &a[a_offset], lda, &w[1], &rwork[inde], &work[indtau], &
+ work[indwrk], &llwork, &iinfo);
+
+/* For eigenvalues only, call DSTERF. For eigenvectors, first call */
+/* ZUNGTR to generate the unitary matrix, then call ZSTEQR. */
+
+ if (! wantz) {
+ dsterf_(n, &w[1], &rwork[inde], info);
+ } else {
+ zungtr_(uplo, n, &a[a_offset], lda, &work[indtau], &work[indwrk], &
+ llwork, &iinfo);
+ indwrk = inde + *n;
+ zsteqr_(jobz, n, &w[1], &rwork[inde], &a[a_offset], lda, &rwork[
+ indwrk], info);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *n;
+ } else {
+ imax = *info - 1;
+ }
+ d__1 = 1. / sigma;
+ dscal_(&imax, &d__1, &w[1], &c__1);
+ }
+
+/* Set WORK(1) to optimal complex workspace size. */
+
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZHEEV */
+
+} /* zheev_ */
diff --git a/contrib/libs/clapack/zheevd.c b/contrib/libs/clapack/zheevd.c
new file mode 100644
index 0000000000..cf766e45e2
--- /dev/null
+++ b/contrib/libs/clapack/zheevd.c
@@ -0,0 +1,382 @@
+/* zheevd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__0 = 0;
+static doublereal c_b18 = 1.;
+
+/* Subroutine */ int zheevd_(char *jobz, char *uplo, integer *n,
+ doublecomplex *a, integer *lda, doublereal *w, doublecomplex *work,
+ integer *lwork, doublereal *rwork, integer *lrwork, integer *iwork,
+ integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal eps;
+ integer inde;
+ doublereal anrm;
+ integer imax;
+ doublereal rmin, rmax;
+ integer lopt;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo, lwmin, liopt;
+ logical lower;
+ integer llrwk, lropt;
+ logical wantz;
+ integer indwk2, llwrk2;
+ extern doublereal dlamch_(char *);
+ integer iscale;
+ doublereal safmin;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ extern doublereal zlanhe_(char *, char *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ integer indtau;
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *), zlascl_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublecomplex *, integer *,
+ integer *), zstedc_(char *, integer *, doublereal *,
+ doublereal *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublereal *, integer *, integer *, integer *, integer
+ *);
+ integer indrwk, indwrk, liwmin;
+ extern /* Subroutine */ int zhetrd_(char *, integer *, doublecomplex *,
+ integer *, doublereal *, doublereal *, doublecomplex *,
+ doublecomplex *, integer *, integer *), zlacpy_(char *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *);
+ integer lrwmin, llwork;
+ doublereal smlnum;
+ logical lquery;
+ extern /* Subroutine */ int zunmtr_(char *, char *, char *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHEEVD computes all eigenvalues and, optionally, eigenvectors of a */
+/* complex Hermitian matrix A. If eigenvectors are desired, it uses a */
+/* divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA, N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+/* On exit, if JOBZ = 'V', then if INFO = 0, A contains the */
+/* orthonormal eigenvectors of the matrix A. */
+/* If JOBZ = 'N', then on exit the lower triangle (if UPLO='L') */
+/* or the upper triangle (if UPLO='U') of A, including the */
+/* diagonal, is destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. */
+/* If N <= 1, LWORK must be at least 1. */
+/* If JOBZ = 'N' and N > 1, LWORK must be at least N + 1. */
+/* If JOBZ = 'V' and N > 1, LWORK must be at least 2*N + N**2. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal sizes of the WORK, RWORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* RWORK (workspace/output) DOUBLE PRECISION array, */
+/* dimension (LRWORK) */
+/* On exit, if INFO = 0, RWORK(1) returns the optimal LRWORK. */
+
+/* LRWORK (input) INTEGER */
+/* The dimension of the array RWORK. */
+/* If N <= 1, LRWORK must be at least 1. */
+/* If JOBZ = 'N' and N > 1, LRWORK must be at least N. */
+/* If JOBZ = 'V' and N > 1, LRWORK must be at least */
+/* 1 + 5*N + 2*N**2. */
+
+/* If LRWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* If N <= 1, LIWORK must be at least 1. */
+/* If JOBZ = 'N' and N > 1, LIWORK must be at least 1. */
+/* If JOBZ = 'V' and N > 1, LIWORK must be at least 3 + 5*N. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i and JOBZ = 'N', then the algorithm failed */
+/* to converge; i off-diagonal elements of an intermediate */
+/* tridiagonal form did not converge to zero; */
+/* if INFO = i and JOBZ = 'V', then the algorithm failed */
+/* to compute an eigenvalue while working on the submatrix */
+/* lying in rows and columns INFO/(N+1) through */
+/* mod(INFO,N+1). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Jeff Rutter, Computer Science Division, University of California */
+/* at Berkeley, USA */
+
+/* Modified description of INFO. Sven, 16 Feb 05. */
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lower = lsame_(uplo, "L");
+ lquery = *lwork == -1 || *lrwork == -1 || *liwork == -1;
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+
+ if (*info == 0) {
+ if (*n <= 1) {
+ lwmin = 1;
+ lrwmin = 1;
+ liwmin = 1;
+ lopt = lwmin;
+ lropt = lrwmin;
+ liopt = liwmin;
+ } else {
+ if (wantz) {
+ lwmin = (*n << 1) + *n * *n;
+/* Computing 2nd power */
+ i__1 = *n;
+ lrwmin = *n * 5 + 1 + (i__1 * i__1 << 1);
+ liwmin = *n * 5 + 3;
+ } else {
+ lwmin = *n + 1;
+ lrwmin = *n;
+ liwmin = 1;
+ }
+/* Computing MAX */
+ i__1 = lwmin, i__2 = *n + ilaenv_(&c__1, "ZHETRD", uplo, n, &c_n1,
+ &c_n1, &c_n1);
+ lopt = max(i__1,i__2);
+ lropt = lrwmin;
+ liopt = liwmin;
+ }
+ work[1].r = (doublereal) lopt, work[1].i = 0.;
+ rwork[1] = (doublereal) lropt;
+ iwork[1] = liopt;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -8;
+ } else if (*lrwork < lrwmin && ! lquery) {
+ *info = -10;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHEEVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ i__1 = a_dim1 + 1;
+ w[1] = a[i__1].r;
+ if (wantz) {
+ i__1 = a_dim1 + 1;
+ a[i__1].r = 1., a[i__1].i = 0.;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = zlanhe_("M", uplo, n, &a[a_offset], lda, &rwork[1]);
+ iscale = 0;
+ if (anrm > 0. && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ zlascl_(uplo, &c__0, &c__0, &c_b18, &sigma, n, n, &a[a_offset], lda,
+ info);
+ }
+
+/* Call ZHETRD to reduce Hermitian matrix to tridiagonal form. */
+
+ inde = 1;
+ indtau = 1;
+ indwrk = indtau + *n;
+ indrwk = inde + *n;
+ indwk2 = indwrk + *n * *n;
+ llwork = *lwork - indwrk + 1;
+ llwrk2 = *lwork - indwk2 + 1;
+ llrwk = *lrwork - indrwk + 1;
+ zhetrd_(uplo, n, &a[a_offset], lda, &w[1], &rwork[inde], &work[indtau], &
+ work[indwrk], &llwork, &iinfo);
+
+/* For eigenvalues only, call DSTERF. For eigenvectors, first call */
+/* ZSTEDC to generate the eigenvector matrix, WORK(INDWRK), of the */
+/* tridiagonal matrix, then call ZUNMTR to multiply it to the */
+/* Householder transformations represented as Householder vectors in */
+/* A. */
+
+ if (! wantz) {
+ dsterf_(n, &w[1], &rwork[inde], info);
+ } else {
+ zstedc_("I", n, &w[1], &rwork[inde], &work[indwrk], n, &work[indwk2],
+ &llwrk2, &rwork[indrwk], &llrwk, &iwork[1], liwork, info);
+ zunmtr_("L", uplo, "N", n, n, &a[a_offset], lda, &work[indtau], &work[
+ indwrk], n, &work[indwk2], &llwrk2, &iinfo);
+ zlacpy_("A", n, n, &work[indwrk], n, &a[a_offset], lda);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *n;
+ } else {
+ imax = *info - 1;
+ }
+ d__1 = 1. / sigma;
+ dscal_(&imax, &d__1, &w[1], &c__1);
+ }
+
+ work[1].r = (doublereal) lopt, work[1].i = 0.;
+ rwork[1] = (doublereal) lropt;
+ iwork[1] = liopt;
+
+ return 0;
+
+/* End of ZHEEVD */
+
+} /* zheevd_ */
diff --git a/contrib/libs/clapack/zheevr.c b/contrib/libs/clapack/zheevr.c
new file mode 100644
index 0000000000..66d725b949
--- /dev/null
+++ b/contrib/libs/clapack/zheevr.c
@@ -0,0 +1,696 @@
+/* zheevr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__10 = 10;
+static integer c__1 = 1;
+static integer c__2 = 2;
+static integer c__3 = 3;
+static integer c__4 = 4;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zheevr_(char *jobz, char *range, char *uplo, integer *n,
+ doublecomplex *a, integer *lda, doublereal *vl, doublereal *vu,
+ integer *il, integer *iu, doublereal *abstol, integer *m, doublereal *
+ w, doublecomplex *z__, integer *ldz, integer *isuppz, doublecomplex *
+ work, integer *lwork, doublereal *rwork, integer *lrwork, integer *
+ iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, z_dim1, z_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, nb, jj;
+ doublereal eps, vll, vuu, tmp1, anrm;
+ integer imax;
+ doublereal rmin, rmax;
+ logical test;
+ integer itmp1;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ integer indrd, indre;
+ doublereal sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ char order[1];
+ integer indwk;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ integer lwmin;
+ logical lower, wantz;
+ extern /* Subroutine */ int zswap_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ extern doublereal dlamch_(char *);
+ logical alleig, indeig;
+ integer iscale, ieeeok, indibl, indrdd, indifl, indree;
+ logical valeig;
+ doublereal safmin;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_(
+ integer *, doublereal *, doublecomplex *, integer *);
+ doublereal abstll, bignum;
+ integer indtau, indisp;
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *);
+ integer indiwo, indwkn;
+ extern /* Subroutine */ int dstebz_(char *, char *, integer *, doublereal
+ *, doublereal *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *, doublereal *, integer *, integer *);
+ integer indrwk, liwmin;
+ extern /* Subroutine */ int zhetrd_(char *, integer *, doublecomplex *,
+ integer *, doublereal *, doublereal *, doublecomplex *,
+ doublecomplex *, integer *, integer *);
+ logical tryrac;
+ integer lrwmin, llwrkn, llwork, nsplit;
+ doublereal smlnum;
+ extern /* Subroutine */ int zstein_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *, integer *, integer *, integer *);
+ logical lquery;
+ integer lwkopt;
+ extern doublereal zlansy_(char *, char *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ extern /* Subroutine */ int zstemr_(char *, char *, integer *, doublereal
+ *, doublereal *, doublereal *, doublereal *, integer *, integer *,
+ integer *, doublereal *, doublecomplex *, integer *, integer *,
+ integer *, logical *, doublereal *, integer *, integer *, integer
+ *, integer *), zunmtr_(char *, char *, char *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *
+);
+ integer llrwork;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHEEVR computes selected eigenvalues and, optionally, eigenvectors */
+/* of a complex Hermitian matrix A. Eigenvalues and eigenvectors can */
+/* be selected by specifying either a range of values or a range of */
+/* indices for the desired eigenvalues. */
+
+/* ZHEEVR first reduces the matrix A to tridiagonal form T with a call */
+/* to ZHETRD. Then, whenever possible, ZHEEVR calls ZSTEMR to compute */
+/* eigenspectrum using Relatively Robust Representations. ZSTEMR */
+/* computes eigenvalues by the dqds algorithm, while orthogonal */
+/* eigenvectors are computed from various "good" L D L^T representations */
+/* (also known as Relatively Robust Representations). Gram-Schmidt */
+/* orthogonalization is avoided as far as possible. More specifically, */
+/* the various steps of the algorithm are as follows. */
+
+/* For each unreduced block (submatrix) of T, */
+/* (a) Compute T - sigma I = L D L^T, so that L and D */
+/* define all the wanted eigenvalues to high relative accuracy. */
+/* This means that small relative changes in the entries of D and L */
+/* cause only small relative changes in the eigenvalues and */
+/* eigenvectors. The standard (unfactored) representation of the */
+/* tridiagonal matrix T does not have this property in general. */
+/* (b) Compute the eigenvalues to suitable accuracy. */
+/* If the eigenvectors are desired, the algorithm attains full */
+/* accuracy of the computed eigenvalues only right before */
+/* the corresponding vectors have to be computed, see steps c) and d). */
+/* (c) For each cluster of close eigenvalues, select a new */
+/* shift close to the cluster, find a new factorization, and refine */
+/* the shifted eigenvalues to suitable accuracy. */
+/* (d) For each eigenvalue with a large enough relative separation compute */
+/* the corresponding eigenvector by forming a rank revealing twisted */
+/* factorization. Go back to (c) for any clusters that remain. */
+
+/* The desired accuracy of the output can be specified by the input */
+/* parameter ABSTOL. */
+
+/* For more details, see DSTEMR's documentation and: */
+/* - Inderjit S. Dhillon and Beresford N. Parlett: "Multiple representations */
+/* to compute orthogonal eigenvectors of symmetric tridiagonal matrices," */
+/* Linear Algebra and its Applications, 387(1), pp. 1-28, August 2004. */
+/* - Inderjit Dhillon and Beresford Parlett: "Orthogonal Eigenvectors and */
+/* Relative Gaps," SIAM Journal on Matrix Analysis and Applications, Vol. 25, */
+/* 2004. Also LAPACK Working Note 154. */
+/* - Inderjit Dhillon: "A new O(n^2) algorithm for the symmetric */
+/* tridiagonal eigenvalue/eigenvector problem", */
+/* Computer Science Division Technical Report No. UCB/CSD-97-971, */
+/* UC Berkeley, May 1997. */
+
+
+/* Note 1 : ZHEEVR calls ZSTEMR when the full spectrum is requested */
+/* on machines which conform to the ieee-754 floating point standard. */
+/* ZHEEVR calls DSTEBZ and ZSTEIN on non-ieee machines and */
+/* when partial spectrum requests are made. */
+
+/* Normal execution of ZSTEMR may create NaNs and infinities and */
+/* hence may abort due to a floating point exception in environments */
+/* which do not handle NaNs and infinities in the ieee standard default */
+/* manner. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+/* ********* For RANGE = 'V' or 'I' and IU - IL < N - 1, DSTEBZ and */
+/* ********* ZSTEIN are called */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA, N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+/* On exit, the lower triangle (if UPLO='L') or the upper */
+/* triangle (if UPLO='U') of A, including the diagonal, is */
+/* destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) DOUBLE PRECISION */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing A to tridiagonal form. */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices */
+/* with Guaranteed High Relative Accuracy," by Demmel and */
+/* Kahan, LAPACK Working Note #3. */
+
+/* If high relative accuracy is important, set ABSTOL to */
+/* DLAMCH( 'Safe minimum' ). Doing so will guarantee that */
+/* eigenvalues are computed to high relative accuracy when */
+/* possible in future releases. The current code does not */
+/* make any guarantees about high relative accuracy, but */
+/* furutre releases will. See J. Barlow and J. Demmel, */
+/* "Computing Accurate Eigensystems of Scaled Diagonally */
+/* Dominant Matrices", LAPACK Working Note #7, for a discussion */
+/* of which matrices define their eigenvalues to high relative */
+/* accuracy. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* The first M elements contain the selected eigenvalues in */
+/* ascending order. */
+
+/* Z (output) COMPLEX*16 array, dimension (LDZ, max(1,M)) */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* ISUPPZ (output) INTEGER array, dimension ( 2*max(1,M) ) */
+/* The support of the eigenvectors in Z, i.e., the indices */
+/* indicating the nonzero elements in Z. The i-th eigenvector */
+/* is nonzero only in elements ISUPPZ( 2*i-1 ) through */
+/* ISUPPZ( 2*i ). */
+/* ********* Implemented only for RANGE = 'A' or 'I' and IU - IL = N - 1 */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= max(1,2*N). */
+/* For optimal efficiency, LWORK >= (NB+1)*N, */
+/* where NB is the max of the blocksize for ZHETRD and for */
+/* ZUNMTR as returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal sizes of the WORK, RWORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* RWORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LRWORK)) */
+/* On exit, if INFO = 0, RWORK(1) returns the optimal */
+/* (and minimal) LRWORK. */
+
+/* LRWORK (input) INTEGER */
+/* The length of the array RWORK. LRWORK >= max(1,24*N). */
+
+/* If LRWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal */
+/* (and minimal) LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. LIWORK >= max(1,10*N). */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: Internal error */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Inderjit Dhillon, IBM Almaden, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Ken Stanley, Computer Science Division, University of */
+/* California at Berkeley, USA */
+/* Jason Riedy, Computer Science Division, University of */
+/* California at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --isuppz;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ ieeeok = ilaenv_(&c__10, "ZHEEVR", "N", &c__1, &c__2, &c__3, &c__4);
+
+ lower = lsame_(uplo, "L");
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+
+ lquery = *lwork == -1 || *lrwork == -1 || *liwork == -1;
+
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * 24;
+ lrwmin = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * 10;
+ liwmin = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = 1, i__2 = *n << 1;
+ lwmin = max(i__1,i__2);
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -8;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -9;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -10;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -15;
+ }
+ }
+
+ if (*info == 0) {
+ nb = ilaenv_(&c__1, "ZHETRD", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__1, "ZUNMTR", uplo, n, &c_n1, &c_n1, &
+ c_n1);
+ nb = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = (nb + 1) * *n;
+ lwkopt = max(i__1,lwmin);
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ rwork[1] = (doublereal) lrwmin;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -18;
+ } else if (*lrwork < lrwmin && ! lquery) {
+ *info = -20;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -22;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHEEVR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ if (*n == 1) {
+ work[1].r = 2., work[1].i = 0.;
+ if (alleig || indeig) {
+ *m = 1;
+ i__1 = a_dim1 + 1;
+ w[1] = a[i__1].r;
+ } else {
+ i__1 = a_dim1 + 1;
+ i__2 = a_dim1 + 1;
+ if (*vl < a[i__1].r && *vu >= a[i__2].r) {
+ *m = 1;
+ i__1 = a_dim1 + 1;
+ w[1] = a[i__1].r;
+ }
+ }
+ if (wantz) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1., z__[i__1].i = 0.;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+/* Computing MIN */
+ d__1 = sqrt(bignum), d__2 = 1. / sqrt(sqrt(safmin));
+ rmax = min(d__1,d__2);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ iscale = 0;
+ abstll = *abstol;
+ if (valeig) {
+ vll = *vl;
+ vuu = *vu;
+ }
+ anrm = zlansy_("M", uplo, n, &a[a_offset], lda, &rwork[1]);
+ if (anrm > 0. && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ if (lower) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j + 1;
+ zdscal_(&i__2, &sigma, &a[j + j * a_dim1], &c__1);
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ zdscal_(&j, &sigma, &a[j * a_dim1 + 1], &c__1);
+/* L20: */
+ }
+ }
+ if (*abstol > 0.) {
+ abstll = *abstol * sigma;
+ }
+ if (valeig) {
+ vll = *vl * sigma;
+ vuu = *vu * sigma;
+ }
+ }
+/* Initialize indices into workspaces. Note: The IWORK indices are */
+/* used only if DSTERF or ZSTEMR fail. */
+/* WORK(INDTAU:INDTAU+N-1) stores the complex scalar factors of the */
+/* elementary reflectors used in ZHETRD. */
+ indtau = 1;
+/* INDWK is the starting offset of the remaining complex workspace, */
+/* and LLWORK is the remaining complex workspace size. */
+ indwk = indtau + *n;
+ llwork = *lwork - indwk + 1;
+/* RWORK(INDRD:INDRD+N-1) stores the real tridiagonal's diagonal */
+/* entries. */
+ indrd = 1;
+/* RWORK(INDRE:INDRE+N-1) stores the off-diagonal entries of the */
+/* tridiagonal matrix from ZHETRD. */
+ indre = indrd + *n;
+/* RWORK(INDRDD:INDRDD+N-1) is a copy of the diagonal entries over */
+/* -written by ZSTEMR (the DSTERF path copies the diagonal to W). */
+ indrdd = indre + *n;
+/* RWORK(INDREE:INDREE+N-1) is a copy of the off-diagonal entries over */
+/* -written while computing the eigenvalues in DSTERF and ZSTEMR. */
+ indree = indrdd + *n;
+/* INDRWK is the starting offset of the left-over real workspace, and */
+/* LLRWORK is the remaining workspace size. */
+ indrwk = indree + *n;
+ llrwork = *lrwork - indrwk + 1;
+/* IWORK(INDIBL:INDIBL+M-1) corresponds to IBLOCK in DSTEBZ and */
+/* stores the block indices of each of the M<=N eigenvalues. */
+ indibl = 1;
+/* IWORK(INDISP:INDISP+NSPLIT-1) corresponds to ISPLIT in DSTEBZ and */
+/* stores the starting and finishing indices of each block. */
+ indisp = indibl + *n;
+/* IWORK(INDIFL:INDIFL+N-1) stores the indices of eigenvectors */
+/* that corresponding to eigenvectors that fail to converge in */
+/* DSTEIN. This information is discarded; if any fail, the driver */
+/* returns INFO > 0. */
+ indifl = indisp + *n;
+/* INDIWO is the offset of the remaining integer workspace. */
+ indiwo = indisp + *n;
+
+/* Call ZHETRD to reduce Hermitian matrix to tridiagonal form. */
+
+ zhetrd_(uplo, n, &a[a_offset], lda, &rwork[indrd], &rwork[indre], &work[
+ indtau], &work[indwk], &llwork, &iinfo);
+
+/* If all eigenvalues are desired */
+/* then call DSTERF or ZSTEMR and ZUNMTR. */
+
+ test = FALSE_;
+ if (indeig) {
+ if (*il == 1 && *iu == *n) {
+ test = TRUE_;
+ }
+ }
+ if ((alleig || test) && ieeeok == 1) {
+ if (! wantz) {
+ dcopy_(n, &rwork[indrd], &c__1, &w[1], &c__1);
+ i__1 = *n - 1;
+ dcopy_(&i__1, &rwork[indre], &c__1, &rwork[indree], &c__1);
+ dsterf_(n, &w[1], &rwork[indree], info);
+ } else {
+ i__1 = *n - 1;
+ dcopy_(&i__1, &rwork[indre], &c__1, &rwork[indree], &c__1);
+ dcopy_(n, &rwork[indrd], &c__1, &rwork[indrdd], &c__1);
+
+ if (*abstol <= *n * 2. * eps) {
+ tryrac = TRUE_;
+ } else {
+ tryrac = FALSE_;
+ }
+ zstemr_(jobz, "A", n, &rwork[indrdd], &rwork[indree], vl, vu, il,
+ iu, m, &w[1], &z__[z_offset], ldz, n, &isuppz[1], &tryrac,
+ &rwork[indrwk], &llrwork, &iwork[1], liwork, info);
+
+/* Apply unitary matrix used in reduction to tridiagonal */
+/* form to eigenvectors returned by ZSTEIN. */
+
+ if (wantz && *info == 0) {
+ indwkn = indwk;
+ llwrkn = *lwork - indwkn + 1;
+ zunmtr_("L", uplo, "N", n, m, &a[a_offset], lda, &work[indtau]
+, &z__[z_offset], ldz, &work[indwkn], &llwrkn, &iinfo);
+ }
+ }
+
+
+ if (*info == 0) {
+ *m = *n;
+ goto L30;
+ }
+ *info = 0;
+ }
+
+/* Otherwise, call DSTEBZ and, if eigenvectors are desired, ZSTEIN. */
+/* Also call DSTEBZ and ZSTEIN if ZSTEMR fails. */
+
+ if (wantz) {
+ *(unsigned char *)order = 'B';
+ } else {
+ *(unsigned char *)order = 'E';
+ }
+ dstebz_(range, order, n, &vll, &vuu, il, iu, &abstll, &rwork[indrd], &
+ rwork[indre], m, &nsplit, &w[1], &iwork[indibl], &iwork[indisp], &
+ rwork[indrwk], &iwork[indiwo], info);
+
+ if (wantz) {
+ zstein_(n, &rwork[indrd], &rwork[indre], m, &w[1], &iwork[indibl], &
+ iwork[indisp], &z__[z_offset], ldz, &rwork[indrwk], &iwork[
+ indiwo], &iwork[indifl], info);
+
+/* Apply unitary matrix used in reduction to tridiagonal */
+/* form to eigenvectors returned by ZSTEIN. */
+
+ indwkn = indwk;
+ llwrkn = *lwork - indwkn + 1;
+ zunmtr_("L", uplo, "N", n, m, &a[a_offset], lda, &work[indtau], &z__[
+ z_offset], ldz, &work[indwkn], &llwrkn, &iinfo);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+L30:
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *m;
+ } else {
+ imax = *info - 1;
+ }
+ d__1 = 1. / sigma;
+ dscal_(&imax, &d__1, &w[1], &c__1);
+ }
+
+/* If eigenvalues are not in order, then sort them, along with */
+/* eigenvectors. */
+
+ if (wantz) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp1 = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp1) {
+ i__ = jj;
+ tmp1 = w[jj];
+ }
+/* L40: */
+ }
+
+ if (i__ != 0) {
+ itmp1 = iwork[indibl + i__ - 1];
+ w[i__] = w[j];
+ iwork[indibl + i__ - 1] = iwork[indibl + j - 1];
+ w[j] = tmp1;
+ iwork[indibl + j - 1] = itmp1;
+ zswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1],
+ &c__1);
+ }
+/* L50: */
+ }
+ }
+
+/* Set WORK(1) to optimal workspace size. */
+
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ rwork[1] = (doublereal) lrwmin;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of ZHEEVR */
+
+} /* zheevr_ */
diff --git a/contrib/libs/clapack/zheevx.c b/contrib/libs/clapack/zheevx.c
new file mode 100644
index 0000000000..30a609028b
--- /dev/null
+++ b/contrib/libs/clapack/zheevx.c
@@ -0,0 +1,548 @@
+/* zheevx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zheevx_(char *jobz, char *range, char *uplo, integer *n,
+ doublecomplex *a, integer *lda, doublereal *vl, doublereal *vu,
+ integer *il, integer *iu, doublereal *abstol, integer *m, doublereal *
+ w, doublecomplex *z__, integer *ldz, doublecomplex *work, integer *
+ lwork, doublereal *rwork, integer *iwork, integer *ifail, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, z_dim1, z_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, nb, jj;
+ doublereal eps, vll, vuu, tmp1;
+ integer indd, inde;
+ doublereal anrm;
+ integer imax;
+ doublereal rmin, rmax;
+ logical test;
+ integer itmp1, indee;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ char order[1];
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical lower, wantz;
+ extern /* Subroutine */ int zswap_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ extern doublereal dlamch_(char *);
+ logical alleig, indeig;
+ integer iscale, indibl;
+ logical valeig;
+ doublereal safmin;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_(
+ integer *, doublereal *, doublecomplex *, integer *);
+ doublereal abstll, bignum;
+ extern doublereal zlanhe_(char *, char *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ integer indiwk, indisp, indtau;
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *), dstebz_(char *, char *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *, doublereal *, integer *, integer *);
+ integer indrwk, indwrk;
+ extern /* Subroutine */ int zhetrd_(char *, integer *, doublecomplex *,
+ integer *, doublereal *, doublereal *, doublecomplex *,
+ doublecomplex *, integer *, integer *);
+ integer lwkmin;
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ integer llwork, nsplit;
+ doublereal smlnum;
+ extern /* Subroutine */ int zstein_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *, integer *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int zsteqr_(char *, integer *, doublereal *,
+ doublereal *, doublecomplex *, integer *, doublereal *, integer *), zungtr_(char *, integer *, doublecomplex *, integer *,
+ doublecomplex *, doublecomplex *, integer *, integer *),
+ zunmtr_(char *, char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHEEVX computes selected eigenvalues and, optionally, eigenvectors */
+/* of a complex Hermitian matrix A. Eigenvalues and eigenvectors can */
+/* be selected by specifying either a range of values or a range of */
+/* indices for the desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA, N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+/* On exit, the lower triangle (if UPLO='L') or the upper */
+/* triangle (if UPLO='U') of A, including the diagonal, is */
+/* destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) DOUBLE PRECISION */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing A to tridiagonal form. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*DLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*DLAMCH('S'). */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices */
+/* with Guaranteed High Relative Accuracy," by Demmel and */
+/* Kahan, LAPACK Working Note #3. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* On normal exit, the first M elements contain the selected */
+/* eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX*16 array, dimension (LDZ, max(1,M)) */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If an eigenvector fails to converge, then that column of Z */
+/* contains the latest approximation to the eigenvector, and the */
+/* index of the eigenvector is returned in IFAIL. */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= 1, when N <= 1; */
+/* otherwise 2*N. */
+/* For optimal efficiency, LWORK >= (NB+1)*N, */
+/* where NB is the max of the blocksize for ZHETRD and for */
+/* ZUNMTR as returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (7*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+
+/* IFAIL (output) INTEGER array, dimension (N) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvectors that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, then i eigenvectors failed to converge. */
+/* Their indices are stored in array IFAIL. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ lower = lsame_(uplo, "L");
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+ lquery = *lwork == -1;
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (! (lower || lsame_(uplo, "U"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -8;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -9;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -10;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -15;
+ }
+ }
+
+ if (*info == 0) {
+ if (*n <= 1) {
+ lwkmin = 1;
+ work[1].r = (doublereal) lwkmin, work[1].i = 0.;
+ } else {
+ lwkmin = *n << 1;
+ nb = ilaenv_(&c__1, "ZHETRD", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__1, "ZUNMTR", uplo, n, &c_n1, &c_n1,
+ &c_n1);
+ nb = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = 1, i__2 = (nb + 1) * *n;
+ lwkopt = max(i__1,i__2);
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ }
+
+/* Computing MAX */
+ i__1 = 1, i__2 = *n << 1;
+ if (*lwork < max(i__1,i__2) && ! lquery) {
+ *info = -17;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHEEVX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (alleig || indeig) {
+ *m = 1;
+ i__1 = a_dim1 + 1;
+ w[1] = a[i__1].r;
+ } else if (valeig) {
+ i__1 = a_dim1 + 1;
+ i__2 = a_dim1 + 1;
+ if (*vl < a[i__1].r && *vu >= a[i__2].r) {
+ *m = 1;
+ i__1 = a_dim1 + 1;
+ w[1] = a[i__1].r;
+ }
+ }
+ if (wantz) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1., z__[i__1].i = 0.;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+/* Computing MIN */
+ d__1 = sqrt(bignum), d__2 = 1. / sqrt(sqrt(safmin));
+ rmax = min(d__1,d__2);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ iscale = 0;
+ abstll = *abstol;
+ if (valeig) {
+ vll = *vl;
+ vuu = *vu;
+ }
+ anrm = zlanhe_("M", uplo, n, &a[a_offset], lda, &rwork[1]);
+ if (anrm > 0. && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ if (lower) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j + 1;
+ zdscal_(&i__2, &sigma, &a[j + j * a_dim1], &c__1);
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ zdscal_(&j, &sigma, &a[j * a_dim1 + 1], &c__1);
+/* L20: */
+ }
+ }
+ if (*abstol > 0.) {
+ abstll = *abstol * sigma;
+ }
+ if (valeig) {
+ vll = *vl * sigma;
+ vuu = *vu * sigma;
+ }
+ }
+
+/* Call ZHETRD to reduce Hermitian matrix to tridiagonal form. */
+
+ indd = 1;
+ inde = indd + *n;
+ indrwk = inde + *n;
+ indtau = 1;
+ indwrk = indtau + *n;
+ llwork = *lwork - indwrk + 1;
+ zhetrd_(uplo, n, &a[a_offset], lda, &rwork[indd], &rwork[inde], &work[
+ indtau], &work[indwrk], &llwork, &iinfo);
+
+/* If all eigenvalues are desired and ABSTOL is less than or equal to */
+/* zero, then call DSTERF or ZUNGTR and ZSTEQR. If this fails for */
+/* some eigenvalue, then try DSTEBZ. */
+
+ test = FALSE_;
+ if (indeig) {
+ if (*il == 1 && *iu == *n) {
+ test = TRUE_;
+ }
+ }
+ if ((alleig || test) && *abstol <= 0.) {
+ dcopy_(n, &rwork[indd], &c__1, &w[1], &c__1);
+ indee = indrwk + (*n << 1);
+ if (! wantz) {
+ i__1 = *n - 1;
+ dcopy_(&i__1, &rwork[inde], &c__1, &rwork[indee], &c__1);
+ dsterf_(n, &w[1], &rwork[indee], info);
+ } else {
+ zlacpy_("A", n, n, &a[a_offset], lda, &z__[z_offset], ldz);
+ zungtr_(uplo, n, &z__[z_offset], ldz, &work[indtau], &work[indwrk]
+, &llwork, &iinfo);
+ i__1 = *n - 1;
+ dcopy_(&i__1, &rwork[inde], &c__1, &rwork[indee], &c__1);
+ zsteqr_(jobz, n, &w[1], &rwork[indee], &z__[z_offset], ldz, &
+ rwork[indrwk], info);
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ifail[i__] = 0;
+/* L30: */
+ }
+ }
+ }
+ if (*info == 0) {
+ *m = *n;
+ goto L40;
+ }
+ *info = 0;
+ }
+
+/* Otherwise, call DSTEBZ and, if eigenvectors are desired, ZSTEIN. */
+
+ if (wantz) {
+ *(unsigned char *)order = 'B';
+ } else {
+ *(unsigned char *)order = 'E';
+ }
+ indibl = 1;
+ indisp = indibl + *n;
+ indiwk = indisp + *n;
+ dstebz_(range, order, n, &vll, &vuu, il, iu, &abstll, &rwork[indd], &
+ rwork[inde], m, &nsplit, &w[1], &iwork[indibl], &iwork[indisp], &
+ rwork[indrwk], &iwork[indiwk], info);
+
+ if (wantz) {
+ zstein_(n, &rwork[indd], &rwork[inde], m, &w[1], &iwork[indibl], &
+ iwork[indisp], &z__[z_offset], ldz, &rwork[indrwk], &iwork[
+ indiwk], &ifail[1], info);
+
+/* Apply unitary matrix used in reduction to tridiagonal */
+/* form to eigenvectors returned by ZSTEIN. */
+
+ zunmtr_("L", uplo, "N", n, m, &a[a_offset], lda, &work[indtau], &z__[
+ z_offset], ldz, &work[indwrk], &llwork, &iinfo);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+L40:
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *m;
+ } else {
+ imax = *info - 1;
+ }
+ d__1 = 1. / sigma;
+ dscal_(&imax, &d__1, &w[1], &c__1);
+ }
+
+/* If eigenvalues are not in order, then sort them, along with */
+/* eigenvectors. */
+
+ if (wantz) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp1 = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp1) {
+ i__ = jj;
+ tmp1 = w[jj];
+ }
+/* L50: */
+ }
+
+ if (i__ != 0) {
+ itmp1 = iwork[indibl + i__ - 1];
+ w[i__] = w[j];
+ iwork[indibl + i__ - 1] = iwork[indibl + j - 1];
+ w[j] = tmp1;
+ iwork[indibl + j - 1] = itmp1;
+ zswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1],
+ &c__1);
+ if (*info != 0) {
+ itmp1 = ifail[i__];
+ ifail[i__] = ifail[j];
+ ifail[j] = itmp1;
+ }
+ }
+/* L60: */
+ }
+ }
+
+/* Set WORK(1) to optimal complex workspace size. */
+
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZHEEVX */
+
+} /* zheevx_ */
diff --git a/contrib/libs/clapack/zhegs2.c b/contrib/libs/clapack/zhegs2.c
new file mode 100644
index 0000000000..3e7f85e1cc
--- /dev/null
+++ b/contrib/libs/clapack/zhegs2.c
@@ -0,0 +1,338 @@
+/* zhegs2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zhegs2_(integer *itype, char *uplo, integer *n,
+ doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+ doublereal d__1, d__2;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer k;
+ doublecomplex ct;
+ doublereal akk, bkk;
+ extern /* Subroutine */ int zher2_(char *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int zaxpy_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), ztrmv_(
+ char *, char *, char *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), ztrsv_(char *
+, char *, char *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), xerbla_(char
+ *, integer *), zdscal_(integer *, doublereal *,
+ doublecomplex *, integer *), zlacgv_(integer *, doublecomplex *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHEGS2 reduces a complex Hermitian-definite generalized */
+/* eigenproblem to standard form. */
+
+/* If ITYPE = 1, the problem is A*x = lambda*B*x, */
+/* and A is overwritten by inv(U')*A*inv(U) or inv(L)*A*inv(L') */
+
+/* If ITYPE = 2 or 3, the problem is A*B*x = lambda*x or */
+/* B*A*x = lambda*x, and A is overwritten by U*A*U` or L'*A*L. */
+
+/* B must have been previously factorized as U'*U or L*L' by ZPOTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* = 1: compute inv(U')*A*inv(U) or inv(L)*A*inv(L'); */
+/* = 2 or 3: compute U*A*U' or L'*A*L. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* Hermitian matrix A is stored, and how B has been factorized. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* n by n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the transformed matrix, stored in the */
+/* same format as A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,N) */
+/* The triangular factor from the Cholesky factorization of B, */
+/* as returned by ZPOTRF. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHEGS2", &i__1);
+ return 0;
+ }
+
+ if (*itype == 1) {
+ if (upper) {
+
+/* Compute inv(U')*A*inv(U) */
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+
+/* Update the upper triangle of A(k:n,k:n) */
+
+ i__2 = k + k * a_dim1;
+ akk = a[i__2].r;
+ i__2 = k + k * b_dim1;
+ bkk = b[i__2].r;
+/* Computing 2nd power */
+ d__1 = bkk;
+ akk /= d__1 * d__1;
+ i__2 = k + k * a_dim1;
+ a[i__2].r = akk, a[i__2].i = 0.;
+ if (k < *n) {
+ i__2 = *n - k;
+ d__1 = 1. / bkk;
+ zdscal_(&i__2, &d__1, &a[k + (k + 1) * a_dim1], lda);
+ d__1 = akk * -.5;
+ ct.r = d__1, ct.i = 0.;
+ i__2 = *n - k;
+ zlacgv_(&i__2, &a[k + (k + 1) * a_dim1], lda);
+ i__2 = *n - k;
+ zlacgv_(&i__2, &b[k + (k + 1) * b_dim1], ldb);
+ i__2 = *n - k;
+ zaxpy_(&i__2, &ct, &b[k + (k + 1) * b_dim1], ldb, &a[k + (
+ k + 1) * a_dim1], lda);
+ i__2 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zher2_(uplo, &i__2, &z__1, &a[k + (k + 1) * a_dim1], lda,
+ &b[k + (k + 1) * b_dim1], ldb, &a[k + 1 + (k + 1)
+ * a_dim1], lda);
+ i__2 = *n - k;
+ zaxpy_(&i__2, &ct, &b[k + (k + 1) * b_dim1], ldb, &a[k + (
+ k + 1) * a_dim1], lda);
+ i__2 = *n - k;
+ zlacgv_(&i__2, &b[k + (k + 1) * b_dim1], ldb);
+ i__2 = *n - k;
+ ztrsv_(uplo, "Conjugate transpose", "Non-unit", &i__2, &b[
+ k + 1 + (k + 1) * b_dim1], ldb, &a[k + (k + 1) *
+ a_dim1], lda);
+ i__2 = *n - k;
+ zlacgv_(&i__2, &a[k + (k + 1) * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute inv(L)*A*inv(L') */
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+
+/* Update the lower triangle of A(k:n,k:n) */
+
+ i__2 = k + k * a_dim1;
+ akk = a[i__2].r;
+ i__2 = k + k * b_dim1;
+ bkk = b[i__2].r;
+/* Computing 2nd power */
+ d__1 = bkk;
+ akk /= d__1 * d__1;
+ i__2 = k + k * a_dim1;
+ a[i__2].r = akk, a[i__2].i = 0.;
+ if (k < *n) {
+ i__2 = *n - k;
+ d__1 = 1. / bkk;
+ zdscal_(&i__2, &d__1, &a[k + 1 + k * a_dim1], &c__1);
+ d__1 = akk * -.5;
+ ct.r = d__1, ct.i = 0.;
+ i__2 = *n - k;
+ zaxpy_(&i__2, &ct, &b[k + 1 + k * b_dim1], &c__1, &a[k +
+ 1 + k * a_dim1], &c__1);
+ i__2 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zher2_(uplo, &i__2, &z__1, &a[k + 1 + k * a_dim1], &c__1,
+ &b[k + 1 + k * b_dim1], &c__1, &a[k + 1 + (k + 1)
+ * a_dim1], lda);
+ i__2 = *n - k;
+ zaxpy_(&i__2, &ct, &b[k + 1 + k * b_dim1], &c__1, &a[k +
+ 1 + k * a_dim1], &c__1);
+ i__2 = *n - k;
+ ztrsv_(uplo, "No transpose", "Non-unit", &i__2, &b[k + 1
+ + (k + 1) * b_dim1], ldb, &a[k + 1 + k * a_dim1],
+ &c__1);
+ }
+/* L20: */
+ }
+ }
+ } else {
+ if (upper) {
+
+/* Compute U*A*U' */
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+
+/* Update the upper triangle of A(1:k,1:k) */
+
+ i__2 = k + k * a_dim1;
+ akk = a[i__2].r;
+ i__2 = k + k * b_dim1;
+ bkk = b[i__2].r;
+ i__2 = k - 1;
+ ztrmv_(uplo, "No transpose", "Non-unit", &i__2, &b[b_offset],
+ ldb, &a[k * a_dim1 + 1], &c__1);
+ d__1 = akk * .5;
+ ct.r = d__1, ct.i = 0.;
+ i__2 = k - 1;
+ zaxpy_(&i__2, &ct, &b[k * b_dim1 + 1], &c__1, &a[k * a_dim1 +
+ 1], &c__1);
+ i__2 = k - 1;
+ zher2_(uplo, &i__2, &c_b1, &a[k * a_dim1 + 1], &c__1, &b[k *
+ b_dim1 + 1], &c__1, &a[a_offset], lda);
+ i__2 = k - 1;
+ zaxpy_(&i__2, &ct, &b[k * b_dim1 + 1], &c__1, &a[k * a_dim1 +
+ 1], &c__1);
+ i__2 = k - 1;
+ zdscal_(&i__2, &bkk, &a[k * a_dim1 + 1], &c__1);
+ i__2 = k + k * a_dim1;
+/* Computing 2nd power */
+ d__2 = bkk;
+ d__1 = akk * (d__2 * d__2);
+ a[i__2].r = d__1, a[i__2].i = 0.;
+/* L30: */
+ }
+ } else {
+
+/* Compute L'*A*L */
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+
+/* Update the lower triangle of A(1:k,1:k) */
+
+ i__2 = k + k * a_dim1;
+ akk = a[i__2].r;
+ i__2 = k + k * b_dim1;
+ bkk = b[i__2].r;
+ i__2 = k - 1;
+ zlacgv_(&i__2, &a[k + a_dim1], lda);
+ i__2 = k - 1;
+ ztrmv_(uplo, "Conjugate transpose", "Non-unit", &i__2, &b[
+ b_offset], ldb, &a[k + a_dim1], lda);
+ d__1 = akk * .5;
+ ct.r = d__1, ct.i = 0.;
+ i__2 = k - 1;
+ zlacgv_(&i__2, &b[k + b_dim1], ldb);
+ i__2 = k - 1;
+ zaxpy_(&i__2, &ct, &b[k + b_dim1], ldb, &a[k + a_dim1], lda);
+ i__2 = k - 1;
+ zher2_(uplo, &i__2, &c_b1, &a[k + a_dim1], lda, &b[k + b_dim1]
+, ldb, &a[a_offset], lda);
+ i__2 = k - 1;
+ zaxpy_(&i__2, &ct, &b[k + b_dim1], ldb, &a[k + a_dim1], lda);
+ i__2 = k - 1;
+ zlacgv_(&i__2, &b[k + b_dim1], ldb);
+ i__2 = k - 1;
+ zdscal_(&i__2, &bkk, &a[k + a_dim1], lda);
+ i__2 = k - 1;
+ zlacgv_(&i__2, &a[k + a_dim1], lda);
+ i__2 = k + k * a_dim1;
+/* Computing 2nd power */
+ d__2 = bkk;
+ d__1 = akk * (d__2 * d__2);
+ a[i__2].r = d__1, a[i__2].i = 0.;
+/* L40: */
+ }
+ }
+ }
+ return 0;
+
+/* End of ZHEGS2 */
+
+} /* zhegs2_ */
diff --git a/contrib/libs/clapack/zhegst.c b/contrib/libs/clapack/zhegst.c
new file mode 100644
index 0000000000..12d9fa36d9
--- /dev/null
+++ b/contrib/libs/clapack/zhegst.c
@@ -0,0 +1,353 @@
+/* zhegst.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static doublecomplex c_b2 = {.5,0.};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static doublereal c_b18 = 1.;
+
+/* Subroutine */ int zhegst_(integer *itype, char *uplo, integer *n,
+ doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer k, kb, nb;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zhemm_(char *, char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int ztrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *),
+ ztrsm_(char *, char *, char *, char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *), zhegs2_(integer *,
+ char *, integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *, integer *), zher2k_(char *, char *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublereal *, doublecomplex *,
+ integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHEGST reduces a complex Hermitian-definite generalized */
+/* eigenproblem to standard form. */
+
+/* If ITYPE = 1, the problem is A*x = lambda*B*x, */
+/* and A is overwritten by inv(U**H)*A*inv(U) or inv(L)*A*inv(L**H) */
+
+/* If ITYPE = 2 or 3, the problem is A*B*x = lambda*x or */
+/* B*A*x = lambda*x, and A is overwritten by U*A*U**H or L**H*A*L. */
+
+/* B must have been previously factorized as U**H*U or L*L**H by ZPOTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* = 1: compute inv(U**H)*A*inv(U) or inv(L)*A*inv(L**H); */
+/* = 2 or 3: compute U*A*U**H or L**H*A*L. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored and B is factored as */
+/* U**H*U; */
+/* = 'L': Lower triangle of A is stored and B is factored as */
+/* L*L**H. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the transformed matrix, stored in the */
+/* same format as A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,N) */
+/* The triangular factor from the Cholesky factorization of B, */
+/* as returned by ZPOTRF. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHEGST", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "ZHEGST", uplo, n, &c_n1, &c_n1, &c_n1);
+
+ if (nb <= 1 || nb >= *n) {
+
+/* Use unblocked code */
+
+ zhegs2_(itype, uplo, n, &a[a_offset], lda, &b[b_offset], ldb, info);
+ } else {
+
+/* Use blocked code */
+
+ if (*itype == 1) {
+ if (upper) {
+
+/* Compute inv(U')*A*inv(U) */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (k = 1; i__2 < 0 ? k >= i__1 : k <= i__1; k += i__2) {
+/* Computing MIN */
+ i__3 = *n - k + 1;
+ kb = min(i__3,nb);
+
+/* Update the upper triangle of A(k:n,k:n) */
+
+ zhegs2_(itype, uplo, &kb, &a[k + k * a_dim1], lda, &b[k +
+ k * b_dim1], ldb, info);
+ if (k + kb <= *n) {
+ i__3 = *n - k - kb + 1;
+ ztrsm_("Left", uplo, "Conjugate transpose", "Non-unit"
+, &kb, &i__3, &c_b1, &b[k + k * b_dim1], ldb,
+ &a[k + (k + kb) * a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ z__1.r = -.5, z__1.i = -0.;
+ zhemm_("Left", uplo, &kb, &i__3, &z__1, &a[k + k *
+ a_dim1], lda, &b[k + (k + kb) * b_dim1], ldb,
+ &c_b1, &a[k + (k + kb) * a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ z__1.r = -1., z__1.i = -0.;
+ zher2k_(uplo, "Conjugate transpose", &i__3, &kb, &
+ z__1, &a[k + (k + kb) * a_dim1], lda, &b[k + (
+ k + kb) * b_dim1], ldb, &c_b18, &a[k + kb + (
+ k + kb) * a_dim1], lda)
+ ;
+ i__3 = *n - k - kb + 1;
+ z__1.r = -.5, z__1.i = -0.;
+ zhemm_("Left", uplo, &kb, &i__3, &z__1, &a[k + k *
+ a_dim1], lda, &b[k + (k + kb) * b_dim1], ldb,
+ &c_b1, &a[k + (k + kb) * a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ ztrsm_("Right", uplo, "No transpose", "Non-unit", &kb,
+ &i__3, &c_b1, &b[k + kb + (k + kb) * b_dim1],
+ ldb, &a[k + (k + kb) * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute inv(L)*A*inv(L') */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (k = 1; i__1 < 0 ? k >= i__2 : k <= i__2; k += i__1) {
+/* Computing MIN */
+ i__3 = *n - k + 1;
+ kb = min(i__3,nb);
+
+/* Update the lower triangle of A(k:n,k:n) */
+
+ zhegs2_(itype, uplo, &kb, &a[k + k * a_dim1], lda, &b[k +
+ k * b_dim1], ldb, info);
+ if (k + kb <= *n) {
+ i__3 = *n - k - kb + 1;
+ ztrsm_("Right", uplo, "Conjugate transpose", "Non-un"
+ "it", &i__3, &kb, &c_b1, &b[k + k * b_dim1],
+ ldb, &a[k + kb + k * a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ z__1.r = -.5, z__1.i = -0.;
+ zhemm_("Right", uplo, &i__3, &kb, &z__1, &a[k + k *
+ a_dim1], lda, &b[k + kb + k * b_dim1], ldb, &
+ c_b1, &a[k + kb + k * a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ z__1.r = -1., z__1.i = -0.;
+ zher2k_(uplo, "No transpose", &i__3, &kb, &z__1, &a[k
+ + kb + k * a_dim1], lda, &b[k + kb + k *
+ b_dim1], ldb, &c_b18, &a[k + kb + (k + kb) *
+ a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ z__1.r = -.5, z__1.i = -0.;
+ zhemm_("Right", uplo, &i__3, &kb, &z__1, &a[k + k *
+ a_dim1], lda, &b[k + kb + k * b_dim1], ldb, &
+ c_b1, &a[k + kb + k * a_dim1], lda);
+ i__3 = *n - k - kb + 1;
+ ztrsm_("Left", uplo, "No transpose", "Non-unit", &
+ i__3, &kb, &c_b1, &b[k + kb + (k + kb) *
+ b_dim1], ldb, &a[k + kb + k * a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+ } else {
+ if (upper) {
+
+/* Compute U*A*U' */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (k = 1; i__2 < 0 ? k >= i__1 : k <= i__1; k += i__2) {
+/* Computing MIN */
+ i__3 = *n - k + 1;
+ kb = min(i__3,nb);
+
+/* Update the upper triangle of A(1:k+kb-1,1:k+kb-1) */
+
+ i__3 = k - 1;
+ ztrmm_("Left", uplo, "No transpose", "Non-unit", &i__3, &
+ kb, &c_b1, &b[b_offset], ldb, &a[k * a_dim1 + 1],
+ lda);
+ i__3 = k - 1;
+ zhemm_("Right", uplo, &i__3, &kb, &c_b2, &a[k + k *
+ a_dim1], lda, &b[k * b_dim1 + 1], ldb, &c_b1, &a[
+ k * a_dim1 + 1], lda);
+ i__3 = k - 1;
+ zher2k_(uplo, "No transpose", &i__3, &kb, &c_b1, &a[k *
+ a_dim1 + 1], lda, &b[k * b_dim1 + 1], ldb, &c_b18,
+ &a[a_offset], lda);
+ i__3 = k - 1;
+ zhemm_("Right", uplo, &i__3, &kb, &c_b2, &a[k + k *
+ a_dim1], lda, &b[k * b_dim1 + 1], ldb, &c_b1, &a[
+ k * a_dim1 + 1], lda);
+ i__3 = k - 1;
+ ztrmm_("Right", uplo, "Conjugate transpose", "Non-unit", &
+ i__3, &kb, &c_b1, &b[k + k * b_dim1], ldb, &a[k *
+ a_dim1 + 1], lda);
+ zhegs2_(itype, uplo, &kb, &a[k + k * a_dim1], lda, &b[k +
+ k * b_dim1], ldb, info);
+/* L30: */
+ }
+ } else {
+
+/* Compute L'*A*L */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (k = 1; i__1 < 0 ? k >= i__2 : k <= i__2; k += i__1) {
+/* Computing MIN */
+ i__3 = *n - k + 1;
+ kb = min(i__3,nb);
+
+/* Update the lower triangle of A(1:k+kb-1,1:k+kb-1) */
+
+ i__3 = k - 1;
+ ztrmm_("Right", uplo, "No transpose", "Non-unit", &kb, &
+ i__3, &c_b1, &b[b_offset], ldb, &a[k + a_dim1],
+ lda);
+ i__3 = k - 1;
+ zhemm_("Left", uplo, &kb, &i__3, &c_b2, &a[k + k * a_dim1]
+, lda, &b[k + b_dim1], ldb, &c_b1, &a[k + a_dim1],
+ lda);
+ i__3 = k - 1;
+ zher2k_(uplo, "Conjugate transpose", &i__3, &kb, &c_b1, &
+ a[k + a_dim1], lda, &b[k + b_dim1], ldb, &c_b18, &
+ a[a_offset], lda);
+ i__3 = k - 1;
+ zhemm_("Left", uplo, &kb, &i__3, &c_b2, &a[k + k * a_dim1]
+, lda, &b[k + b_dim1], ldb, &c_b1, &a[k + a_dim1],
+ lda);
+ i__3 = k - 1;
+ ztrmm_("Left", uplo, "Conjugate transpose", "Non-unit", &
+ kb, &i__3, &c_b1, &b[k + k * b_dim1], ldb, &a[k +
+ a_dim1], lda);
+ zhegs2_(itype, uplo, &kb, &a[k + k * a_dim1], lda, &b[k +
+ k * b_dim1], ldb, info);
+/* L40: */
+ }
+ }
+ }
+ }
+ return 0;
+
+/* End of ZHEGST */
+
+} /* zhegst_ */
diff --git a/contrib/libs/clapack/zhegv.c b/contrib/libs/clapack/zhegv.c
new file mode 100644
index 0000000000..71eb8420ec
--- /dev/null
+++ b/contrib/libs/clapack/zhegv.c
@@ -0,0 +1,289 @@
+/* zhegv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zhegv_(integer *itype, char *jobz, char *uplo, integer *
+ n, doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb,
+ doublereal *w, doublecomplex *work, integer *lwork, doublereal *rwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+
+ /* Local variables */
+ integer nb, neig;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zheev_(char *, char *, integer *,
+ doublecomplex *, integer *, doublereal *, doublecomplex *,
+ integer *, doublereal *, integer *);
+ char trans[1];
+ logical upper, wantz;
+ extern /* Subroutine */ int ztrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *),
+ ztrsm_(char *, char *, char *, char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zhegst_(integer *, char *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int zpotrf_(char *, integer *, doublecomplex *,
+ integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHEGV computes all the eigenvalues, and optionally, the eigenvectors */
+/* of a complex generalized Hermitian-definite eigenproblem, of the form */
+/* A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x. */
+/* Here A and B are assumed to be Hermitian and B is also */
+/* positive definite. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* Specifies the problem type to be solved: */
+/* = 1: A*x = (lambda)*B*x */
+/* = 2: A*B*x = (lambda)*x */
+/* = 3: B*A*x = (lambda)*x */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA, N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+
+/* On exit, if JOBZ = 'V', then if INFO = 0, A contains the */
+/* matrix Z of eigenvectors. The eigenvectors are normalized */
+/* as follows: */
+/* if ITYPE = 1 or 2, Z**H*B*Z = I; */
+/* if ITYPE = 3, Z**H*inv(B)*Z = I. */
+/* If JOBZ = 'N', then on exit the upper triangle (if UPLO='U') */
+/* or the lower triangle (if UPLO='L') of A, including the */
+/* diagonal, is destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB, N) */
+/* On entry, the Hermitian positive definite matrix B. */
+/* If UPLO = 'U', the leading N-by-N upper triangular part of B */
+/* contains the upper triangular part of the matrix B. */
+/* If UPLO = 'L', the leading N-by-N lower triangular part of B */
+/* contains the lower triangular part of the matrix B. */
+
+/* On exit, if INFO <= N, the part of B containing the matrix is */
+/* overwritten by the triangular factor U or L from the Cholesky */
+/* factorization B = U**H*U or B = L*L**H. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= max(1,2*N-1). */
+/* For optimal efficiency, LWORK >= (NB+1)*N, */
+/* where NB is the blocksize for ZHETRD returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (max(1, 3*N-2)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: ZPOTRF or ZHEEV returned an error code: */
+/* <= N: if INFO = i, ZHEEV failed to converge; */
+/* i off-diagonal elements of an intermediate */
+/* tridiagonal form did not converge to zero; */
+/* > N: if INFO = N + i, for 1 <= i <= N, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --w;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+
+ *info = 0;
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -2;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+
+ if (*info == 0) {
+ nb = ilaenv_(&c__1, "ZHETRD", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = 1, i__2 = (nb + 1) * *n;
+ lwkopt = max(i__1,i__2);
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+/* Computing MAX */
+ i__1 = 1, i__2 = (*n << 1) - 1;
+ if (*lwork < max(i__1,i__2) && ! lquery) {
+ *info = -11;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHEGV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a Cholesky factorization of B. */
+
+ zpotrf_(uplo, n, &b[b_offset], ldb, info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem and solve. */
+
+ zhegst_(itype, uplo, n, &a[a_offset], lda, &b[b_offset], ldb, info);
+ zheev_(jobz, uplo, n, &a[a_offset], lda, &w[1], &work[1], lwork, &rwork[1]
+, info);
+
+ if (wantz) {
+
+/* Backtransform eigenvectors to the original problem. */
+
+ neig = *n;
+ if (*info > 0) {
+ neig = *info - 1;
+ }
+ if (*itype == 1 || *itype == 2) {
+
+/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x; */
+/* backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'N';
+ } else {
+ *(unsigned char *)trans = 'C';
+ }
+
+ ztrsm_("Left", uplo, trans, "Non-unit", n, &neig, &c_b1, &b[
+ b_offset], ldb, &a[a_offset], lda);
+
+ } else if (*itype == 3) {
+
+/* For B*A*x=(lambda)*x; */
+/* backtransform eigenvectors: x = L*y or U'*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'C';
+ } else {
+ *(unsigned char *)trans = 'N';
+ }
+
+ ztrmm_("Left", uplo, trans, "Non-unit", n, &neig, &c_b1, &b[
+ b_offset], ldb, &a[a_offset], lda);
+ }
+ }
+
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZHEGV */
+
+} /* zhegv_ */
diff --git a/contrib/libs/clapack/zhegvd.c b/contrib/libs/clapack/zhegvd.c
new file mode 100644
index 0000000000..6556caf357
--- /dev/null
+++ b/contrib/libs/clapack/zhegvd.c
@@ -0,0 +1,367 @@
+/* zhegvd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+
+/* Subroutine */ int zhegvd_(integer *itype, char *jobz, char *uplo, integer *
+ n, doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb,
+ doublereal *w, doublecomplex *work, integer *lwork, doublereal *rwork,
+ integer *lrwork, integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ integer lopt;
+ extern logical lsame_(char *, char *);
+ integer lwmin;
+ char trans[1];
+ integer liopt;
+ logical upper;
+ integer lropt;
+ logical wantz;
+ extern /* Subroutine */ int ztrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *),
+ ztrsm_(char *, char *, char *, char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *), xerbla_(char *,
+ integer *), zheevd_(char *, char *, integer *,
+ doublecomplex *, integer *, doublereal *, doublecomplex *,
+ integer *, doublereal *, integer *, integer *, integer *, integer
+ *);
+ integer liwmin;
+ extern /* Subroutine */ int zhegst_(integer *, char *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+ integer lrwmin;
+ logical lquery;
+ extern /* Subroutine */ int zpotrf_(char *, integer *, doublecomplex *,
+ integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHEGVD computes all the eigenvalues, and optionally, the eigenvectors */
+/* of a complex generalized Hermitian-definite eigenproblem, of the form */
+/* A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x. Here A and */
+/* B are assumed to be Hermitian and B is also positive definite. */
+/* If eigenvectors are desired, it uses a divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* Specifies the problem type to be solved: */
+/* = 1: A*x = (lambda)*B*x */
+/* = 2: A*B*x = (lambda)*x */
+/* = 3: B*A*x = (lambda)*x */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA, N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+
+/* On exit, if JOBZ = 'V', then if INFO = 0, A contains the */
+/* matrix Z of eigenvectors. The eigenvectors are normalized */
+/* as follows: */
+/* if ITYPE = 1 or 2, Z**H*B*Z = I; */
+/* if ITYPE = 3, Z**H*inv(B)*Z = I. */
+/* If JOBZ = 'N', then on exit the upper triangle (if UPLO='U') */
+/* or the lower triangle (if UPLO='L') of A, including the */
+/* diagonal, is destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB, N) */
+/* On entry, the Hermitian matrix B. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of B contains the */
+/* upper triangular part of the matrix B. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of B contains */
+/* the lower triangular part of the matrix B. */
+
+/* On exit, if INFO <= N, the part of B containing the matrix is */
+/* overwritten by the triangular factor U or L from the Cholesky */
+/* factorization B = U**H*U or B = L*L**H. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. */
+/* If N <= 1, LWORK >= 1. */
+/* If JOBZ = 'N' and N > 1, LWORK >= N + 1. */
+/* If JOBZ = 'V' and N > 1, LWORK >= 2*N + N**2. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal sizes of the WORK, RWORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* RWORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LRWORK)) */
+/* On exit, if INFO = 0, RWORK(1) returns the optimal LRWORK. */
+
+/* LRWORK (input) INTEGER */
+/* The dimension of the array RWORK. */
+/* If N <= 1, LRWORK >= 1. */
+/* If JOBZ = 'N' and N > 1, LRWORK >= N. */
+/* If JOBZ = 'V' and N > 1, LRWORK >= 1 + 5*N + 2*N**2. */
+
+/* If LRWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* If N <= 1, LIWORK >= 1. */
+/* If JOBZ = 'N' and N > 1, LIWORK >= 1. */
+/* If JOBZ = 'V' and N > 1, LIWORK >= 3 + 5*N. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: ZPOTRF or ZHEEVD returned an error code: */
+/* <= N: if INFO = i and JOBZ = 'N', then the algorithm */
+/* failed to converge; i off-diagonal elements of an */
+/* intermediate tridiagonal form did not converge to */
+/* zero; */
+/* if INFO = i and JOBZ = 'V', then the algorithm */
+/* failed to compute an eigenvalue while working on */
+/* the submatrix lying in rows and columns INFO/(N+1) */
+/* through mod(INFO,N+1); */
+/* > N: if INFO = N + i, for 1 <= i <= N, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* Modified so that no backsubstitution is performed if ZHEEVD fails to */
+/* converge (NEIG in old code could be greater than N causing out of */
+/* bounds reference to A - reported by Ralf Meyer). Also corrected the */
+/* description of INFO and the test on ITYPE. Sven, 16 Feb 05. */
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --w;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1 || *lrwork == -1 || *liwork == -1;
+
+ *info = 0;
+ if (*n <= 1) {
+ lwmin = 1;
+ lrwmin = 1;
+ liwmin = 1;
+ } else if (wantz) {
+ lwmin = (*n << 1) + *n * *n;
+ lrwmin = *n * 5 + 1 + (*n << 1) * *n;
+ liwmin = *n * 5 + 3;
+ } else {
+ lwmin = *n + 1;
+ lrwmin = *n;
+ liwmin = 1;
+ }
+ lopt = lwmin;
+ lropt = lrwmin;
+ liopt = liwmin;
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -2;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+
+ if (*info == 0) {
+ work[1].r = (doublereal) lopt, work[1].i = 0.;
+ rwork[1] = (doublereal) lropt;
+ iwork[1] = liopt;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -11;
+ } else if (*lrwork < lrwmin && ! lquery) {
+ *info = -13;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -15;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHEGVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a Cholesky factorization of B. */
+
+ zpotrf_(uplo, n, &b[b_offset], ldb, info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem and solve. */
+
+ zhegst_(itype, uplo, n, &a[a_offset], lda, &b[b_offset], ldb, info);
+ zheevd_(jobz, uplo, n, &a[a_offset], lda, &w[1], &work[1], lwork, &rwork[
+ 1], lrwork, &iwork[1], liwork, info);
+/* Computing MAX */
+ d__1 = (doublereal) lopt, d__2 = work[1].r;
+ lopt = (integer) max(d__1,d__2);
+/* Computing MAX */
+ d__1 = (doublereal) lropt;
+ lropt = (integer) max(d__1,rwork[1]);
+/* Computing MAX */
+ d__1 = (doublereal) liopt, d__2 = (doublereal) iwork[1];
+ liopt = (integer) max(d__1,d__2);
+
+ if (wantz && *info == 0) {
+
+/* Backtransform eigenvectors to the original problem. */
+
+ if (*itype == 1 || *itype == 2) {
+
+/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x; */
+/* backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'N';
+ } else {
+ *(unsigned char *)trans = 'C';
+ }
+
+ ztrsm_("Left", uplo, trans, "Non-unit", n, n, &c_b1, &b[b_offset],
+ ldb, &a[a_offset], lda);
+
+ } else if (*itype == 3) {
+
+/* For B*A*x=(lambda)*x; */
+/* backtransform eigenvectors: x = L*y or U'*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'C';
+ } else {
+ *(unsigned char *)trans = 'N';
+ }
+
+ ztrmm_("Left", uplo, trans, "Non-unit", n, n, &c_b1, &b[b_offset],
+ ldb, &a[a_offset], lda);
+ }
+ }
+
+ work[1].r = (doublereal) lopt, work[1].i = 0.;
+ rwork[1] = (doublereal) lropt;
+ iwork[1] = liopt;
+
+ return 0;
+
+/* End of ZHEGVD */
+
+} /* zhegvd_ */
diff --git a/contrib/libs/clapack/zhegvx.c b/contrib/libs/clapack/zhegvx.c
new file mode 100644
index 0000000000..889981c501
--- /dev/null
+++ b/contrib/libs/clapack/zhegvx.c
@@ -0,0 +1,397 @@
+/* zhegvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zhegvx_(integer *itype, char *jobz, char *range, char *
+ uplo, integer *n, doublecomplex *a, integer *lda, doublecomplex *b,
+ integer *ldb, doublereal *vl, doublereal *vu, integer *il, integer *
+ iu, doublereal *abstol, integer *m, doublereal *w, doublecomplex *z__,
+ integer *ldz, doublecomplex *work, integer *lwork, doublereal *rwork,
+ integer *iwork, integer *ifail, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, z_dim1, z_offset, i__1, i__2;
+
+ /* Local variables */
+ integer nb;
+ extern logical lsame_(char *, char *);
+ char trans[1];
+ logical upper, wantz;
+ extern /* Subroutine */ int ztrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *),
+ ztrsm_(char *, char *, char *, char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *);
+ logical alleig, indeig, valeig;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zhegst_(integer *, char *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *), zheevx_(char *, char *, char *, integer *,
+ doublecomplex *, integer *, doublereal *, doublereal *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublecomplex *
+, integer *, doublecomplex *, integer *, doublereal *, integer *,
+ integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int zpotrf_(char *, integer *, doublecomplex *,
+ integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHEGVX computes selected eigenvalues, and optionally, eigenvectors */
+/* of a complex generalized Hermitian-definite eigenproblem, of the form */
+/* A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x. Here A and */
+/* B are assumed to be Hermitian and B is also positive definite. */
+/* Eigenvalues and eigenvectors can be selected by specifying either a */
+/* range of values or a range of indices for the desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* Specifies the problem type to be solved: */
+/* = 1: A*x = (lambda)*B*x */
+/* = 2: A*B*x = (lambda)*x */
+/* = 3: B*A*x = (lambda)*x */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+/* * */
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA, N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of A contains the */
+/* upper triangular part of the matrix A. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of A contains */
+/* the lower triangular part of the matrix A. */
+
+/* On exit, the lower triangle (if UPLO='L') or the upper */
+/* triangle (if UPLO='U') of A, including the diagonal, is */
+/* destroyed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB, N) */
+/* On entry, the Hermitian matrix B. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of B contains the */
+/* upper triangular part of the matrix B. If UPLO = 'L', */
+/* the leading N-by-N lower triangular part of B contains */
+/* the lower triangular part of the matrix B. */
+
+/* On exit, if INFO <= N, the part of B containing the matrix is */
+/* overwritten by the triangular factor U or L from the Cholesky */
+/* factorization B = U**H*U or B = L*L**H. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) DOUBLE PRECISION */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing A to tridiagonal form. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*DLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*DLAMCH('S'). */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* The first M elements contain the selected */
+/* eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX*16 array, dimension (LDZ, max(1,M)) */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* The eigenvectors are normalized as follows: */
+/* if ITYPE = 1 or 2, Z**T*B*Z = I; */
+/* if ITYPE = 3, Z**T*inv(B)*Z = I. */
+
+/* If an eigenvector fails to converge, then that column of Z */
+/* contains the latest approximation to the eigenvector, and the */
+/* index of the eigenvector is returned in IFAIL. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of the array WORK. LWORK >= max(1,2*N). */
+/* For optimal efficiency, LWORK >= (NB+1)*N, */
+/* where NB is the blocksize for ZHETRD returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (7*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+
+/* IFAIL (output) INTEGER array, dimension (N) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvectors that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: ZPOTRF or ZHEEVX returned an error code: */
+/* <= N: if INFO = i, ZHEEVX failed to converge; */
+/* i eigenvectors failed to converge. Their indices */
+/* are stored in array IFAIL. */
+/* > N: if INFO = N + i, for 1 <= i <= N, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+ lquery = *lwork == -1;
+
+ *info = 0;
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -2;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -3;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -11;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -12;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -13;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -18;
+ }
+ }
+
+ if (*info == 0) {
+ nb = ilaenv_(&c__1, "ZHETRD", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = 1, i__2 = (nb + 1) * *n;
+ lwkopt = max(i__1,i__2);
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+/* Computing MAX */
+ i__1 = 1, i__2 = *n << 1;
+ if (*lwork < max(i__1,i__2) && ! lquery) {
+ *info = -20;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHEGVX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a Cholesky factorization of B. */
+
+ zpotrf_(uplo, n, &b[b_offset], ldb, info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem and solve. */
+
+ zhegst_(itype, uplo, n, &a[a_offset], lda, &b[b_offset], ldb, info);
+ zheevx_(jobz, range, uplo, n, &a[a_offset], lda, vl, vu, il, iu, abstol,
+ m, &w[1], &z__[z_offset], ldz, &work[1], lwork, &rwork[1], &iwork[
+ 1], &ifail[1], info);
+
+ if (wantz) {
+
+/* Backtransform eigenvectors to the original problem. */
+
+ if (*info > 0) {
+ *m = *info - 1;
+ }
+ if (*itype == 1 || *itype == 2) {
+
+/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x; */
+/* backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'N';
+ } else {
+ *(unsigned char *)trans = 'C';
+ }
+
+ ztrsm_("Left", uplo, trans, "Non-unit", n, m, &c_b1, &b[b_offset],
+ ldb, &z__[z_offset], ldz);
+
+ } else if (*itype == 3) {
+
+/* For B*A*x=(lambda)*x; */
+/* backtransform eigenvectors: x = L*y or U'*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'C';
+ } else {
+ *(unsigned char *)trans = 'N';
+ }
+
+ ztrmm_("Left", uplo, trans, "Non-unit", n, m, &c_b1, &b[b_offset],
+ ldb, &z__[z_offset], ldz);
+ }
+ }
+
+/* Set WORK(1) to optimal complex workspace size. */
+
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZHEGVX */
+
+} /* zhegvx_ */
diff --git a/contrib/libs/clapack/zherfs.c b/contrib/libs/clapack/zherfs.c
new file mode 100644
index 0000000000..b92d421aff
--- /dev/null
+++ b/contrib/libs/clapack/zherfs.c
@@ -0,0 +1,473 @@
+/* zherfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zherfs_(char *uplo, integer *n, integer *nrhs,
+ doublecomplex *a, integer *lda, doublecomplex *af, integer *ldaf,
+ integer *ipiv, doublecomplex *b, integer *ldb, doublecomplex *x,
+ integer *ldx, doublereal *ferr, doublereal *berr, doublecomplex *work,
+ doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal s, xk;
+ integer nz;
+ doublereal eps;
+ integer kase;
+ doublereal safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3], count;
+ extern /* Subroutine */ int zhemv_(char *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, doublecomplex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zaxpy_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), zlacn2_(
+ integer *, doublecomplex *, doublecomplex *, doublereal *,
+ integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal lstres;
+ extern /* Subroutine */ int zhetrs_(char *, integer *, integer *,
+ doublecomplex *, integer *, integer *, doublecomplex *, integer *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHERFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is Hermitian indefinite, and */
+/* provides error bounds and backward error estimates for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The Hermitian matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of A contains the upper triangular part */
+/* of the matrix A, and the strictly lower triangular part of A */
+/* is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of A contains the lower triangular part of */
+/* the matrix A, and the strictly upper triangular part of A is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input) COMPLEX*16 array, dimension (LDAF,N) */
+/* The factored form of the matrix A. AF contains the block */
+/* diagonal matrix D and the multipliers used to obtain the */
+/* factor U or L from the factorization A = U*D*U**H or */
+/* A = L*D*L**H as computed by ZHETRF. */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by ZHETRF. */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by ZHETRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldx < max(1,*n)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHERFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X */
+
+ zcopy_(n, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+ z__1.r = -1., z__1.i = -0.;
+ zhemv_(uplo, n, &z__1, &a[a_offset], lda, &x[j * x_dim1 + 1], &c__1, &
+ c_b1, &work[1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[i__] = (d__1 = b[i__3].r, abs(d__1)) + (d__2 = d_imag(&b[
+ i__ + j * b_dim1]), abs(d__2));
+/* L30: */
+ }
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ i__3 = k + j * x_dim1;
+ xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[k + j *
+ x_dim1]), abs(d__2));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ rwork[i__] += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + k * a_dim1]), abs(d__2))) * xk;
+ i__4 = i__ + k * a_dim1;
+ i__5 = i__ + j * x_dim1;
+ s += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 = d_imag(&a[
+ i__ + k * a_dim1]), abs(d__2))) * ((d__3 = x[i__5]
+ .r, abs(d__3)) + (d__4 = d_imag(&x[i__ + j *
+ x_dim1]), abs(d__4)));
+/* L40: */
+ }
+ i__3 = k + k * a_dim1;
+ rwork[k] = rwork[k] + (d__1 = a[i__3].r, abs(d__1)) * xk + s;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ i__3 = k + j * x_dim1;
+ xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[k + j *
+ x_dim1]), abs(d__2));
+ i__3 = k + k * a_dim1;
+ rwork[k] += (d__1 = a[i__3].r, abs(d__1)) * xk;
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ rwork[i__] += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + k * a_dim1]), abs(d__2))) * xk;
+ i__4 = i__ + k * a_dim1;
+ i__5 = i__ + j * x_dim1;
+ s += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 = d_imag(&a[
+ i__ + k * a_dim1]), abs(d__2))) * ((d__3 = x[i__5]
+ .r, abs(d__3)) + (d__4 = d_imag(&x[i__ + j *
+ x_dim1]), abs(d__4)));
+/* L60: */
+ }
+ rwork[k] += s;
+/* L70: */
+ }
+ }
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__3 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2))) / rwork[i__];
+ s = max(d__3,d__4);
+ } else {
+/* Computing MAX */
+ i__3 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = max(d__3,d__4);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2. <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ zhetrs_(uplo, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &work[1],
+ n, info);
+ zaxpy_(n, &c_b1, &work[1], &c__1, &x[j * x_dim1 + 1], &c__1);
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use ZLACN2 to estimate the infinity-norm of the matrix */
+/* inv(A) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__3 = i__;
+ rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ ;
+ } else {
+ i__3 = i__;
+ rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ zlacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(A'). */
+
+ zhetrs_(uplo, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &work[
+ 1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ z__1.r = rwork[i__4] * work[i__5].r, z__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+/* L110: */
+ }
+ } else if (kase == 2) {
+
+/* Multiply by inv(A)*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ z__1.r = rwork[i__4] * work[i__5].r, z__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+/* L120: */
+ }
+ zhetrs_(uplo, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &work[
+ 1], n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * x_dim1;
+ d__3 = lstres, d__4 = (d__1 = x[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&x[i__ + j * x_dim1]), abs(d__2));
+ lstres = max(d__3,d__4);
+/* L130: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of ZHERFS */
+
+} /* zherfs_ */
diff --git a/contrib/libs/clapack/zhesv.c b/contrib/libs/clapack/zhesv.c
new file mode 100644
index 0000000000..76d08b7c8c
--- /dev/null
+++ b/contrib/libs/clapack/zhesv.c
@@ -0,0 +1,213 @@
+/* zhesv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zhesv_(char *uplo, integer *n, integer *nrhs,
+ doublecomplex *a, integer *lda, integer *ipiv, doublecomplex *b,
+ integer *ldb, doublecomplex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ integer nb;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zhetrf_(char *, integer *, doublecomplex *,
+ integer *, integer *, doublecomplex *, integer *, integer *), zhetrs_(char *, integer *, integer *, doublecomplex *,
+ integer *, integer *, doublecomplex *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHESV computes the solution to a complex system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N Hermitian matrix and X and B are N-by-NRHS */
+/* matrices. */
+
+/* The diagonal pivoting method is used to factor A as */
+/* A = U * D * U**H, if UPLO = 'U', or */
+/* A = L * D * L**H, if UPLO = 'L', */
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, and D is Hermitian and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. The factored form of A is then */
+/* used to solve the system of equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the block diagonal matrix D and the */
+/* multipliers used to obtain the factor U or L from the */
+/* factorization A = U*D*U**H or A = L*D*L**H as computed by */
+/* ZHETRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D, as */
+/* determined by ZHETRF. If IPIV(k) > 0, then rows and columns */
+/* k and IPIV(k) were interchanged, and D(k,k) is a 1-by-1 */
+/* diagonal block. If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, */
+/* then rows and columns k-1 and -IPIV(k) were interchanged and */
+/* D(k-1:k,k-1:k) is a 2-by-2 diagonal block. If UPLO = 'L' and */
+/* IPIV(k) = IPIV(k+1) < 0, then rows and columns k+1 and */
+/* -IPIV(k) were interchanged and D(k:k+1,k:k+1) is a 2-by-2 */
+/* diagonal block. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of WORK. LWORK >= 1, and for best performance */
+/* LWORK >= max(1,N*NB), where NB is the optimal blocksize for */
+/* ZHETRF. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, so the solution could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*lwork < 1 && ! lquery) {
+ *info = -10;
+ }
+
+ if (*info == 0) {
+ if (*n == 0) {
+ lwkopt = 1;
+ } else {
+ nb = ilaenv_(&c__1, "ZHETRF", uplo, n, &c_n1, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHESV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Compute the factorization A = U*D*U' or A = L*D*L'. */
+
+ zhetrf_(uplo, n, &a[a_offset], lda, &ipiv[1], &work[1], lwork, info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ zhetrs_(uplo, n, nrhs, &a[a_offset], lda, &ipiv[1], &b[b_offset], ldb,
+ info);
+
+ }
+
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZHESV */
+
+} /* zhesv_ */
diff --git a/contrib/libs/clapack/zhesvx.c b/contrib/libs/clapack/zhesvx.c
new file mode 100644
index 0000000000..71c8492c25
--- /dev/null
+++ b/contrib/libs/clapack/zhesvx.c
@@ -0,0 +1,368 @@
+/* zhesvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zhesvx_(char *fact, char *uplo, integer *n, integer *
+ nrhs, doublecomplex *a, integer *lda, doublecomplex *af, integer *
+ ldaf, integer *ipiv, doublecomplex *b, integer *ldb, doublecomplex *x,
+ integer *ldx, doublereal *rcond, doublereal *ferr, doublereal *berr,
+ doublecomplex *work, integer *lwork, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2;
+
+ /* Local variables */
+ integer nb;
+ extern logical lsame_(char *, char *);
+ doublereal anorm;
+ extern doublereal dlamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern doublereal zlanhe_(char *, char *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ extern /* Subroutine */ int zhecon_(char *, integer *, doublecomplex *,
+ integer *, integer *, doublereal *, doublereal *, doublecomplex *,
+ integer *), zherfs_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *, doublecomplex *, doublereal *,
+ integer *), zhetrf_(char *, integer *, doublecomplex *,
+ integer *, integer *, doublecomplex *, integer *, integer *), zlacpy_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *), zhetrs_(char *,
+ integer *, integer *, doublecomplex *, integer *, integer *,
+ doublecomplex *, integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHESVX uses the diagonal pivoting factorization to compute the */
+/* solution to a complex system of linear equations A * X = B, */
+/* where A is an N-by-N Hermitian matrix and X and B are N-by-NRHS */
+/* matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'N', the diagonal pivoting method is used to factor A. */
+/* The form of the factorization is */
+/* A = U * D * U**H, if UPLO = 'U', or */
+/* A = L * D * L**H, if UPLO = 'L', */
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, and D is Hermitian and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* 2. If some D(i,i)=0, so that D is exactly singular, then the routine */
+/* returns with INFO = i. Otherwise, the factored form of A is used */
+/* to estimate the condition number of the matrix A. If the */
+/* reciprocal of the condition number is less than machine precision, */
+/* INFO = N+1 is returned as a warning, but the routine still goes on */
+/* to solve for X and compute error bounds as described below. */
+
+/* 3. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 4. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of A has been */
+/* supplied on entry. */
+/* = 'F': On entry, AF and IPIV contain the factored form */
+/* of A. A, AF and IPIV will not be modified. */
+/* = 'N': The matrix A will be copied to AF and factored. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The Hermitian matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of A contains the upper triangular part */
+/* of the matrix A, and the strictly lower triangular part of A */
+/* is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of A contains the lower triangular part of */
+/* the matrix A, and the strictly upper triangular part of A is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input or output) COMPLEX*16 array, dimension (LDAF,N) */
+/* If FACT = 'F', then AF is an input argument and on entry */
+/* contains the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**H or A = L*D*L**H as computed by ZHETRF. */
+
+/* If FACT = 'N', then AF is an output argument and on exit */
+/* returns the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**H or A = L*D*L**H. */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* IPIV (input or output) INTEGER array, dimension (N) */
+/* If FACT = 'F', then IPIV is an input argument and on entry */
+/* contains details of the interchanges and the block structure */
+/* of D, as determined by ZHETRF. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* If FACT = 'N', then IPIV is an output argument and on exit */
+/* contains details of the interchanges and the block structure */
+/* of D, as determined by ZHETRF. */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* The N-by-NRHS right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A. If RCOND is less than the machine precision (in */
+/* particular, if RCOND = 0), the matrix is singular to working */
+/* precision. This condition is indicated by a return code of */
+/* INFO > 0. */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of WORK. LWORK >= max(1,2*N), and for best */
+/* performance, when FACT = 'N', LWORK >= max(1,2*N,N*NB), where */
+/* NB is the optimal blocksize for ZHETRF. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: D(i,i) is exactly zero. The factorization */
+/* has been completed but the factor D is exactly */
+/* singular, so the solution and error bounds could */
+/* not be computed. RCOND = 0 is returned. */
+/* = N+1: D is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ lquery = *lwork == -1;
+ if (! nofact && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! lsame_(uplo, "U") && ! lsame_(uplo,
+ "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -8;
+ } else if (*ldb < max(1,*n)) {
+ *info = -11;
+ } else if (*ldx < max(1,*n)) {
+ *info = -13;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n << 1;
+ if (*lwork < max(i__1,i__2) && ! lquery) {
+ *info = -18;
+ }
+ }
+
+ if (*info == 0) {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n << 1;
+ lwkopt = max(i__1,i__2);
+ if (nofact) {
+ nb = ilaenv_(&c__1, "ZHETRF", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = *n * nb;
+ lwkopt = max(i__1,i__2);
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHESVX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+ if (nofact) {
+
+/* Compute the factorization A = U*D*U' or A = L*D*L'. */
+
+ zlacpy_(uplo, n, n, &a[a_offset], lda, &af[af_offset], ldaf);
+ zhetrf_(uplo, n, &af[af_offset], ldaf, &ipiv[1], &work[1], lwork,
+ info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = zlanhe_("I", uplo, n, &a[a_offset], lda, &rwork[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ zhecon_(uplo, n, &af[af_offset], ldaf, &ipiv[1], &anorm, rcond, &work[1],
+ info);
+
+/* Compute the solution vectors X. */
+
+ zlacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ zhetrs_(uplo, n, nrhs, &af[af_offset], ldaf, &ipiv[1], &x[x_offset], ldx,
+ info);
+
+/* Use iterative refinement to improve the computed solutions and */
+/* compute error bounds and backward error estimates for them. */
+
+ zherfs_(uplo, n, nrhs, &a[a_offset], lda, &af[af_offset], ldaf, &ipiv[1],
+ &b[b_offset], ldb, &x[x_offset], ldx, &ferr[1], &berr[1], &work[1]
+, &rwork[1], info);
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < dlamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZHESVX */
+
+} /* zhesvx_ */
diff --git a/contrib/libs/clapack/zhetd2.c b/contrib/libs/clapack/zhetd2.c
new file mode 100644
index 0000000000..a783b5d4d5
--- /dev/null
+++ b/contrib/libs/clapack/zhetd2.c
@@ -0,0 +1,361 @@
+/* zhetd2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b2 = {0.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zhetd2_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, doublereal *d__, doublereal *e, doublecomplex *tau,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Local variables */
+ integer i__;
+ doublecomplex taui;
+ extern /* Subroutine */ int zher2_(char *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ doublecomplex alpha;
+ extern logical lsame_(char *, char *);
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ extern /* Subroutine */ int zhemv_(char *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, doublecomplex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int zaxpy_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), xerbla_(
+ char *, integer *), zlarfg_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHETD2 reduces a complex Hermitian matrix A to real symmetric */
+/* tridiagonal form T by a unitary similarity transformation: */
+/* Q' * A * Q = T. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* Hermitian matrix A is stored: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* n-by-n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n-by-n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+/* On exit, if UPLO = 'U', the diagonal and first superdiagonal */
+/* of A are overwritten by the corresponding elements of the */
+/* tridiagonal matrix T, and the elements above the first */
+/* superdiagonal, with the array TAU, represent the unitary */
+/* matrix Q as a product of elementary reflectors; if UPLO */
+/* = 'L', the diagonal and first subdiagonal of A are over- */
+/* written by the corresponding elements of the tridiagonal */
+/* matrix T, and the elements below the first subdiagonal, with */
+/* the array TAU, represent the unitary matrix Q as a product */
+/* of elementary reflectors. See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* D (output) DOUBLE PRECISION array, dimension (N) */
+/* The diagonal elements of the tridiagonal matrix T: */
+/* D(i) = A(i,i). */
+
+/* E (output) DOUBLE PRECISION array, dimension (N-1) */
+/* The off-diagonal elements of the tridiagonal matrix T: */
+/* E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'. */
+
+/* TAU (output) COMPLEX*16 array, dimension (N-1) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* If UPLO = 'U', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(n-1) . . . H(2) H(1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in */
+/* A(1:i-1,i+1), and tau in TAU(i). */
+
+/* If UPLO = 'L', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(1) H(2) . . . H(n-1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i), */
+/* and tau in TAU(i). */
+
+/* The contents of A on exit are illustrated by the following examples */
+/* with n = 5: */
+
+/* if UPLO = 'U': if UPLO = 'L': */
+
+/* ( d e v2 v3 v4 ) ( d ) */
+/* ( d e v3 v4 ) ( e d ) */
+/* ( d e v4 ) ( v1 e d ) */
+/* ( d e ) ( v1 v2 e d ) */
+/* ( d ) ( v1 v2 v3 e d ) */
+
+/* where d and e denote diagonal and off-diagonal elements of T, and vi */
+/* denotes an element of the vector defining H(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tau;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHETD2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Reduce the upper triangle of A */
+
+ i__1 = *n + *n * a_dim1;
+ i__2 = *n + *n * a_dim1;
+ d__1 = a[i__2].r;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+
+/* Generate elementary reflector H(i) = I - tau * v * v' */
+/* to annihilate A(1:i-1,i+1) */
+
+ i__1 = i__ + (i__ + 1) * a_dim1;
+ alpha.r = a[i__1].r, alpha.i = a[i__1].i;
+ zlarfg_(&i__, &alpha, &a[(i__ + 1) * a_dim1 + 1], &c__1, &taui);
+ i__1 = i__;
+ e[i__1] = alpha.r;
+
+ if (taui.r != 0. || taui.i != 0.) {
+
+/* Apply H(i) from both sides to A(1:i,1:i) */
+
+ i__1 = i__ + (i__ + 1) * a_dim1;
+ a[i__1].r = 1., a[i__1].i = 0.;
+
+/* Compute x := tau * A * v storing x in TAU(1:i) */
+
+ zhemv_(uplo, &i__, &taui, &a[a_offset], lda, &a[(i__ + 1) *
+ a_dim1 + 1], &c__1, &c_b2, &tau[1], &c__1);
+
+/* Compute w := x - 1/2 * tau * (x'*v) * v */
+
+ z__3.r = -.5, z__3.i = -0.;
+ z__2.r = z__3.r * taui.r - z__3.i * taui.i, z__2.i = z__3.r *
+ taui.i + z__3.i * taui.r;
+ zdotc_(&z__4, &i__, &tau[1], &c__1, &a[(i__ + 1) * a_dim1 + 1]
+, &c__1);
+ z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r *
+ z__4.i + z__2.i * z__4.r;
+ alpha.r = z__1.r, alpha.i = z__1.i;
+ zaxpy_(&i__, &alpha, &a[(i__ + 1) * a_dim1 + 1], &c__1, &tau[
+ 1], &c__1);
+
+/* Apply the transformation as a rank-2 update: */
+/* A := A - v * w' - w * v' */
+
+ z__1.r = -1., z__1.i = -0.;
+ zher2_(uplo, &i__, &z__1, &a[(i__ + 1) * a_dim1 + 1], &c__1, &
+ tau[1], &c__1, &a[a_offset], lda);
+
+ } else {
+ i__1 = i__ + i__ * a_dim1;
+ i__2 = i__ + i__ * a_dim1;
+ d__1 = a[i__2].r;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+ }
+ i__1 = i__ + (i__ + 1) * a_dim1;
+ i__2 = i__;
+ a[i__1].r = e[i__2], a[i__1].i = 0.;
+ i__1 = i__ + 1;
+ i__2 = i__ + 1 + (i__ + 1) * a_dim1;
+ d__[i__1] = a[i__2].r;
+ i__1 = i__;
+ tau[i__1].r = taui.r, tau[i__1].i = taui.i;
+/* L10: */
+ }
+ i__1 = a_dim1 + 1;
+ d__[1] = a[i__1].r;
+ } else {
+
+/* Reduce the lower triangle of A */
+
+ i__1 = a_dim1 + 1;
+ i__2 = a_dim1 + 1;
+ d__1 = a[i__2].r;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) = I - tau * v * v' */
+/* to annihilate A(i+2:n,i) */
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ zlarfg_(&i__2, &alpha, &a[min(i__3, *n)+ i__ * a_dim1], &c__1, &
+ taui);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+
+ if (taui.r != 0. || taui.i != 0.) {
+
+/* Apply H(i) from both sides to A(i+1:n,i+1:n) */
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Compute x := tau * A * v storing y in TAU(i:n-1) */
+
+ i__2 = *n - i__;
+ zhemv_(uplo, &i__2, &taui, &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b2, &tau[
+ i__], &c__1);
+
+/* Compute w := x - 1/2 * tau * (x'*v) * v */
+
+ z__3.r = -.5, z__3.i = -0.;
+ z__2.r = z__3.r * taui.r - z__3.i * taui.i, z__2.i = z__3.r *
+ taui.i + z__3.i * taui.r;
+ i__2 = *n - i__;
+ zdotc_(&z__4, &i__2, &tau[i__], &c__1, &a[i__ + 1 + i__ *
+ a_dim1], &c__1);
+ z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r *
+ z__4.i + z__2.i * z__4.r;
+ alpha.r = z__1.r, alpha.i = z__1.i;
+ i__2 = *n - i__;
+ zaxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
+ i__], &c__1);
+
+/* Apply the transformation as a rank-2 update: */
+/* A := A - v * w' - w * v' */
+
+ i__2 = *n - i__;
+ z__1.r = -1., z__1.i = -0.;
+ zher2_(uplo, &i__2, &z__1, &a[i__ + 1 + i__ * a_dim1], &c__1,
+ &tau[i__], &c__1, &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda);
+
+ } else {
+ i__2 = i__ + 1 + (i__ + 1) * a_dim1;
+ i__3 = i__ + 1 + (i__ + 1) * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ }
+ i__2 = i__ + 1 + i__ * a_dim1;
+ i__3 = i__;
+ a[i__2].r = e[i__3], a[i__2].i = 0.;
+ i__2 = i__;
+ i__3 = i__ + i__ * a_dim1;
+ d__[i__2] = a[i__3].r;
+ i__2 = i__;
+ tau[i__2].r = taui.r, tau[i__2].i = taui.i;
+/* L20: */
+ }
+ i__1 = *n;
+ i__2 = *n + *n * a_dim1;
+ d__[i__1] = a[i__2].r;
+ }
+
+ return 0;
+
+/* End of ZHETD2 */
+
+} /* zhetd2_ */
diff --git a/contrib/libs/clapack/zhetf2.c b/contrib/libs/clapack/zhetf2.c
new file mode 100644
index 0000000000..24a5289440
--- /dev/null
+++ b/contrib/libs/clapack/zhetf2.c
@@ -0,0 +1,802 @@
+/* zhetf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zhetf2_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1, z__2, z__3, z__4, z__5, z__6;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_imag(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ doublereal d__;
+ integer i__, j, k;
+ doublecomplex t;
+ doublereal r1, d11;
+ doublecomplex d12;
+ doublereal d22;
+ doublecomplex d21;
+ integer kk, kp;
+ doublecomplex wk;
+ doublereal tt;
+ doublecomplex wkm1, wkp1;
+ integer imax, jmax;
+ extern /* Subroutine */ int zher_(char *, integer *, doublereal *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ doublereal alpha;
+ extern logical lsame_(char *, char *);
+ integer kstep;
+ logical upper;
+ extern /* Subroutine */ int zswap_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ extern doublereal dlapy2_(doublereal *, doublereal *);
+ doublereal absakk;
+ extern logical disnan_(doublereal *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_(
+ integer *, doublereal *, doublecomplex *, integer *);
+ doublereal colmax;
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ doublereal rowmax;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHETF2 computes the factorization of a complex Hermitian matrix A */
+/* using the Bunch-Kaufman diagonal pivoting method: */
+
+/* A = U*D*U' or A = L*D*L' */
+
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, U' is the conjugate transpose of U, and D is */
+/* Hermitian and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* This is the unblocked version of the algorithm, calling Level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* Hermitian matrix A is stored: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* n-by-n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n-by-n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L (see below for further details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, D(k,k) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, and division by zero will occur if it */
+/* is used to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* 09-29-06 - patch from */
+/* Bobby Cheng, MathWorks */
+
+/* Replace l.210 and l.393 */
+/* IF( MAX( ABSAKK, COLMAX ).EQ.ZERO ) THEN */
+/* by */
+/* IF( (MAX( ABSAKK, COLMAX ).EQ.ZERO) .OR. DISNAN(ABSAKK) ) THEN */
+
+/* 01-01-96 - Based on modifications by */
+/* J. Lewis, Boeing Computer Services Company */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* If UPLO = 'U', then A = U*D*U', where */
+/* U = P(n)*U(n)* ... *P(k)U(k)* ..., */
+/* i.e., U is a product of terms P(k)*U(k), where k decreases from n to */
+/* 1 in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and U(k) is a unit upper triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I v 0 ) k-s */
+/* U(k) = ( 0 I 0 ) s */
+/* ( 0 0 I ) n-k */
+/* k-s s n-k */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(1:k-1,k). */
+/* If s = 2, the upper triangle of D(k) overwrites A(k-1,k-1), A(k-1,k), */
+/* and A(k,k), and v overwrites A(1:k-2,k-1:k). */
+
+/* If UPLO = 'L', then A = L*D*L', where */
+/* L = P(1)*L(1)* ... *P(k)*L(k)* ..., */
+/* i.e., L is a product of terms P(k)*L(k), where k increases from 1 to */
+/* n in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and L(k) is a unit lower triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I 0 0 ) k-1 */
+/* L(k) = ( 0 I 0 ) s */
+/* ( 0 v I ) n-k-s+1 */
+/* k-1 s n-k-s+1 */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(k+1:n,k). */
+/* If s = 2, the lower triangle of D(k) overwrites A(k,k), A(k+1,k), */
+/* and A(k+1,k+1), and v overwrites A(k+2:n,k:k+1). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHETF2", &i__1);
+ return 0;
+ }
+
+/* Initialize ALPHA for use in choosing pivot block size. */
+
+ alpha = (sqrt(17.) + 1.) / 8.;
+
+ if (upper) {
+
+/* Factorize A as U*D*U' using the upper triangle of A */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2 */
+
+ k = *n;
+L10:
+
+/* If K < 1, exit from loop */
+
+ if (k < 1) {
+ goto L90;
+ }
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ i__1 = k + k * a_dim1;
+ absakk = (d__1 = a[i__1].r, abs(d__1));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ imax = izamax_(&i__1, &a[k * a_dim1 + 1], &c__1);
+ i__1 = imax + k * a_dim1;
+ colmax = (d__1 = a[i__1].r, abs(d__1)) + (d__2 = d_imag(&a[imax +
+ k * a_dim1]), abs(d__2));
+ } else {
+ colmax = 0.;
+ }
+
+ if (max(absakk,colmax) == 0. || disnan_(&absakk)) {
+
+/* Column K is zero or contains a NaN: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ d__1 = a[i__2].r;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ i__1 = k - imax;
+ jmax = imax + izamax_(&i__1, &a[imax + (imax + 1) * a_dim1],
+ lda);
+ i__1 = imax + jmax * a_dim1;
+ rowmax = (d__1 = a[i__1].r, abs(d__1)) + (d__2 = d_imag(&a[
+ imax + jmax * a_dim1]), abs(d__2));
+ if (imax > 1) {
+ i__1 = imax - 1;
+ jmax = izamax_(&i__1, &a[imax * a_dim1 + 1], &c__1);
+/* Computing MAX */
+ i__1 = jmax + imax * a_dim1;
+ d__3 = rowmax, d__4 = (d__1 = a[i__1].r, abs(d__1)) + (
+ d__2 = d_imag(&a[jmax + imax * a_dim1]), abs(d__2)
+ );
+ rowmax = max(d__3,d__4);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else /* if(complicated condition) */ {
+ i__1 = imax + imax * a_dim1;
+ if ((d__1 = a[i__1].r, abs(d__1)) >= alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+ } else {
+
+/* interchange rows and columns K-1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+ }
+
+ kk = k - kstep + 1;
+ if (kp != kk) {
+
+/* Interchange rows and columns KK and KP in the leading */
+/* submatrix A(1:k,1:k) */
+
+ i__1 = kp - 1;
+ zswap_(&i__1, &a[kk * a_dim1 + 1], &c__1, &a[kp * a_dim1 + 1],
+ &c__1);
+ i__1 = kk - 1;
+ for (j = kp + 1; j <= i__1; ++j) {
+ d_cnjg(&z__1, &a[j + kk * a_dim1]);
+ t.r = z__1.r, t.i = z__1.i;
+ i__2 = j + kk * a_dim1;
+ d_cnjg(&z__1, &a[kp + j * a_dim1]);
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+ i__2 = kp + j * a_dim1;
+ a[i__2].r = t.r, a[i__2].i = t.i;
+/* L20: */
+ }
+ i__1 = kp + kk * a_dim1;
+ d_cnjg(&z__1, &a[kp + kk * a_dim1]);
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ i__1 = kk + kk * a_dim1;
+ r1 = a[i__1].r;
+ i__1 = kk + kk * a_dim1;
+ i__2 = kp + kp * a_dim1;
+ d__1 = a[i__2].r;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+ i__1 = kp + kp * a_dim1;
+ a[i__1].r = r1, a[i__1].i = 0.;
+ if (kstep == 2) {
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ d__1 = a[i__2].r;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+ i__1 = k - 1 + k * a_dim1;
+ t.r = a[i__1].r, t.i = a[i__1].i;
+ i__1 = k - 1 + k * a_dim1;
+ i__2 = kp + k * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + k * a_dim1;
+ a[i__1].r = t.r, a[i__1].i = t.i;
+ }
+ } else {
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ d__1 = a[i__2].r;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+ if (kstep == 2) {
+ i__1 = k - 1 + (k - 1) * a_dim1;
+ i__2 = k - 1 + (k - 1) * a_dim1;
+ d__1 = a[i__2].r;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+ }
+ }
+
+/* Update the leading submatrix */
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k now holds */
+
+/* W(k) = U(k)*D(k) */
+
+/* where U(k) is the k-th column of U */
+
+/* Perform a rank-1 update of A(1:k-1,1:k-1) as */
+
+/* A := A - U(k)*D(k)*U(k)' = A - W(k)*1/D(k)*W(k)' */
+
+ i__1 = k + k * a_dim1;
+ r1 = 1. / a[i__1].r;
+ i__1 = k - 1;
+ d__1 = -r1;
+ zher_(uplo, &i__1, &d__1, &a[k * a_dim1 + 1], &c__1, &a[
+ a_offset], lda);
+
+/* Store U(k) in column k */
+
+ i__1 = k - 1;
+ zdscal_(&i__1, &r1, &a[k * a_dim1 + 1], &c__1);
+ } else {
+
+/* 2-by-2 pivot block D(k): columns k and k-1 now hold */
+
+/* ( W(k-1) W(k) ) = ( U(k-1) U(k) )*D(k) */
+
+/* where U(k) and U(k-1) are the k-th and (k-1)-th columns */
+/* of U */
+
+/* Perform a rank-2 update of A(1:k-2,1:k-2) as */
+
+/* A := A - ( U(k-1) U(k) )*D(k)*( U(k-1) U(k) )' */
+/* = A - ( W(k-1) W(k) )*inv(D(k))*( W(k-1) W(k) )' */
+
+ if (k > 2) {
+
+ i__1 = k - 1 + k * a_dim1;
+ d__1 = a[i__1].r;
+ d__2 = d_imag(&a[k - 1 + k * a_dim1]);
+ d__ = dlapy2_(&d__1, &d__2);
+ i__1 = k - 1 + (k - 1) * a_dim1;
+ d22 = a[i__1].r / d__;
+ i__1 = k + k * a_dim1;
+ d11 = a[i__1].r / d__;
+ tt = 1. / (d11 * d22 - 1.);
+ i__1 = k - 1 + k * a_dim1;
+ z__1.r = a[i__1].r / d__, z__1.i = a[i__1].i / d__;
+ d12.r = z__1.r, d12.i = z__1.i;
+ d__ = tt / d__;
+
+ for (j = k - 2; j >= 1; --j) {
+ i__1 = j + (k - 1) * a_dim1;
+ z__3.r = d11 * a[i__1].r, z__3.i = d11 * a[i__1].i;
+ d_cnjg(&z__5, &d12);
+ i__2 = j + k * a_dim1;
+ z__4.r = z__5.r * a[i__2].r - z__5.i * a[i__2].i,
+ z__4.i = z__5.r * a[i__2].i + z__5.i * a[i__2]
+ .r;
+ z__2.r = z__3.r - z__4.r, z__2.i = z__3.i - z__4.i;
+ z__1.r = d__ * z__2.r, z__1.i = d__ * z__2.i;
+ wkm1.r = z__1.r, wkm1.i = z__1.i;
+ i__1 = j + k * a_dim1;
+ z__3.r = d22 * a[i__1].r, z__3.i = d22 * a[i__1].i;
+ i__2 = j + (k - 1) * a_dim1;
+ z__4.r = d12.r * a[i__2].r - d12.i * a[i__2].i,
+ z__4.i = d12.r * a[i__2].i + d12.i * a[i__2]
+ .r;
+ z__2.r = z__3.r - z__4.r, z__2.i = z__3.i - z__4.i;
+ z__1.r = d__ * z__2.r, z__1.i = d__ * z__2.i;
+ wk.r = z__1.r, wk.i = z__1.i;
+ for (i__ = j; i__ >= 1; --i__) {
+ i__1 = i__ + j * a_dim1;
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__ + k * a_dim1;
+ d_cnjg(&z__4, &wk);
+ z__3.r = a[i__3].r * z__4.r - a[i__3].i * z__4.i,
+ z__3.i = a[i__3].r * z__4.i + a[i__3].i *
+ z__4.r;
+ z__2.r = a[i__2].r - z__3.r, z__2.i = a[i__2].i -
+ z__3.i;
+ i__4 = i__ + (k - 1) * a_dim1;
+ d_cnjg(&z__6, &wkm1);
+ z__5.r = a[i__4].r * z__6.r - a[i__4].i * z__6.i,
+ z__5.i = a[i__4].r * z__6.i + a[i__4].i *
+ z__6.r;
+ z__1.r = z__2.r - z__5.r, z__1.i = z__2.i -
+ z__5.i;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+/* L30: */
+ }
+ i__1 = j + k * a_dim1;
+ a[i__1].r = wk.r, a[i__1].i = wk.i;
+ i__1 = j + (k - 1) * a_dim1;
+ a[i__1].r = wkm1.r, a[i__1].i = wkm1.i;
+ i__1 = j + j * a_dim1;
+ i__2 = j + j * a_dim1;
+ d__1 = a[i__2].r;
+ z__1.r = d__1, z__1.i = 0.;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+/* L40: */
+ }
+
+ }
+
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k - 1] = -kp;
+ }
+
+/* Decrease K and return to the start of the main loop */
+
+ k -= kstep;
+ goto L10;
+
+ } else {
+
+/* Factorize A as L*D*L' using the lower triangle of A */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2 */
+
+ k = 1;
+L50:
+
+/* If K > N, exit from loop */
+
+ if (k > *n) {
+ goto L90;
+ }
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ i__1 = k + k * a_dim1;
+ absakk = (d__1 = a[i__1].r, abs(d__1));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ imax = k + izamax_(&i__1, &a[k + 1 + k * a_dim1], &c__1);
+ i__1 = imax + k * a_dim1;
+ colmax = (d__1 = a[i__1].r, abs(d__1)) + (d__2 = d_imag(&a[imax +
+ k * a_dim1]), abs(d__2));
+ } else {
+ colmax = 0.;
+ }
+
+ if (max(absakk,colmax) == 0. || disnan_(&absakk)) {
+
+/* Column K is zero or contains a NaN: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ d__1 = a[i__2].r;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ i__1 = imax - k;
+ jmax = k - 1 + izamax_(&i__1, &a[imax + k * a_dim1], lda);
+ i__1 = imax + jmax * a_dim1;
+ rowmax = (d__1 = a[i__1].r, abs(d__1)) + (d__2 = d_imag(&a[
+ imax + jmax * a_dim1]), abs(d__2));
+ if (imax < *n) {
+ i__1 = *n - imax;
+ jmax = imax + izamax_(&i__1, &a[imax + 1 + imax * a_dim1],
+ &c__1);
+/* Computing MAX */
+ i__1 = jmax + imax * a_dim1;
+ d__3 = rowmax, d__4 = (d__1 = a[i__1].r, abs(d__1)) + (
+ d__2 = d_imag(&a[jmax + imax * a_dim1]), abs(d__2)
+ );
+ rowmax = max(d__3,d__4);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else /* if(complicated condition) */ {
+ i__1 = imax + imax * a_dim1;
+ if ((d__1 = a[i__1].r, abs(d__1)) >= alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+ } else {
+
+/* interchange rows and columns K+1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+ }
+
+ kk = k + kstep - 1;
+ if (kp != kk) {
+
+/* Interchange rows and columns KK and KP in the trailing */
+/* submatrix A(k:n,k:n) */
+
+ if (kp < *n) {
+ i__1 = *n - kp;
+ zswap_(&i__1, &a[kp + 1 + kk * a_dim1], &c__1, &a[kp + 1
+ + kp * a_dim1], &c__1);
+ }
+ i__1 = kp - 1;
+ for (j = kk + 1; j <= i__1; ++j) {
+ d_cnjg(&z__1, &a[j + kk * a_dim1]);
+ t.r = z__1.r, t.i = z__1.i;
+ i__2 = j + kk * a_dim1;
+ d_cnjg(&z__1, &a[kp + j * a_dim1]);
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+ i__2 = kp + j * a_dim1;
+ a[i__2].r = t.r, a[i__2].i = t.i;
+/* L60: */
+ }
+ i__1 = kp + kk * a_dim1;
+ d_cnjg(&z__1, &a[kp + kk * a_dim1]);
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ i__1 = kk + kk * a_dim1;
+ r1 = a[i__1].r;
+ i__1 = kk + kk * a_dim1;
+ i__2 = kp + kp * a_dim1;
+ d__1 = a[i__2].r;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+ i__1 = kp + kp * a_dim1;
+ a[i__1].r = r1, a[i__1].i = 0.;
+ if (kstep == 2) {
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ d__1 = a[i__2].r;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+ i__1 = k + 1 + k * a_dim1;
+ t.r = a[i__1].r, t.i = a[i__1].i;
+ i__1 = k + 1 + k * a_dim1;
+ i__2 = kp + k * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + k * a_dim1;
+ a[i__1].r = t.r, a[i__1].i = t.i;
+ }
+ } else {
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ d__1 = a[i__2].r;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+ if (kstep == 2) {
+ i__1 = k + 1 + (k + 1) * a_dim1;
+ i__2 = k + 1 + (k + 1) * a_dim1;
+ d__1 = a[i__2].r;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+ }
+ }
+
+/* Update the trailing submatrix */
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k now holds */
+
+/* W(k) = L(k)*D(k) */
+
+/* where L(k) is the k-th column of L */
+
+ if (k < *n) {
+
+/* Perform a rank-1 update of A(k+1:n,k+1:n) as */
+
+/* A := A - L(k)*D(k)*L(k)' = A - W(k)*(1/D(k))*W(k)' */
+
+ i__1 = k + k * a_dim1;
+ r1 = 1. / a[i__1].r;
+ i__1 = *n - k;
+ d__1 = -r1;
+ zher_(uplo, &i__1, &d__1, &a[k + 1 + k * a_dim1], &c__1, &
+ a[k + 1 + (k + 1) * a_dim1], lda);
+
+/* Store L(k) in column K */
+
+ i__1 = *n - k;
+ zdscal_(&i__1, &r1, &a[k + 1 + k * a_dim1], &c__1);
+ }
+ } else {
+
+/* 2-by-2 pivot block D(k) */
+
+ if (k < *n - 1) {
+
+/* Perform a rank-2 update of A(k+2:n,k+2:n) as */
+
+/* A := A - ( L(k) L(k+1) )*D(k)*( L(k) L(k+1) )' */
+/* = A - ( W(k) W(k+1) )*inv(D(k))*( W(k) W(k+1) )' */
+
+/* where L(k) and L(k+1) are the k-th and (k+1)-th */
+/* columns of L */
+
+ i__1 = k + 1 + k * a_dim1;
+ d__1 = a[i__1].r;
+ d__2 = d_imag(&a[k + 1 + k * a_dim1]);
+ d__ = dlapy2_(&d__1, &d__2);
+ i__1 = k + 1 + (k + 1) * a_dim1;
+ d11 = a[i__1].r / d__;
+ i__1 = k + k * a_dim1;
+ d22 = a[i__1].r / d__;
+ tt = 1. / (d11 * d22 - 1.);
+ i__1 = k + 1 + k * a_dim1;
+ z__1.r = a[i__1].r / d__, z__1.i = a[i__1].i / d__;
+ d21.r = z__1.r, d21.i = z__1.i;
+ d__ = tt / d__;
+
+ i__1 = *n;
+ for (j = k + 2; j <= i__1; ++j) {
+ i__2 = j + k * a_dim1;
+ z__3.r = d11 * a[i__2].r, z__3.i = d11 * a[i__2].i;
+ i__3 = j + (k + 1) * a_dim1;
+ z__4.r = d21.r * a[i__3].r - d21.i * a[i__3].i,
+ z__4.i = d21.r * a[i__3].i + d21.i * a[i__3]
+ .r;
+ z__2.r = z__3.r - z__4.r, z__2.i = z__3.i - z__4.i;
+ z__1.r = d__ * z__2.r, z__1.i = d__ * z__2.i;
+ wk.r = z__1.r, wk.i = z__1.i;
+ i__2 = j + (k + 1) * a_dim1;
+ z__3.r = d22 * a[i__2].r, z__3.i = d22 * a[i__2].i;
+ d_cnjg(&z__5, &d21);
+ i__3 = j + k * a_dim1;
+ z__4.r = z__5.r * a[i__3].r - z__5.i * a[i__3].i,
+ z__4.i = z__5.r * a[i__3].i + z__5.i * a[i__3]
+ .r;
+ z__2.r = z__3.r - z__4.r, z__2.i = z__3.i - z__4.i;
+ z__1.r = d__ * z__2.r, z__1.i = d__ * z__2.i;
+ wkp1.r = z__1.r, wkp1.i = z__1.i;
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__ + k * a_dim1;
+ d_cnjg(&z__4, &wk);
+ z__3.r = a[i__5].r * z__4.r - a[i__5].i * z__4.i,
+ z__3.i = a[i__5].r * z__4.i + a[i__5].i *
+ z__4.r;
+ z__2.r = a[i__4].r - z__3.r, z__2.i = a[i__4].i -
+ z__3.i;
+ i__6 = i__ + (k + 1) * a_dim1;
+ d_cnjg(&z__6, &wkp1);
+ z__5.r = a[i__6].r * z__6.r - a[i__6].i * z__6.i,
+ z__5.i = a[i__6].r * z__6.i + a[i__6].i *
+ z__6.r;
+ z__1.r = z__2.r - z__5.r, z__1.i = z__2.i -
+ z__5.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L70: */
+ }
+ i__2 = j + k * a_dim1;
+ a[i__2].r = wk.r, a[i__2].i = wk.i;
+ i__2 = j + (k + 1) * a_dim1;
+ a[i__2].r = wkp1.r, a[i__2].i = wkp1.i;
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ d__1 = a[i__3].r;
+ z__1.r = d__1, z__1.i = 0.;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+/* L80: */
+ }
+ }
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k + 1] = -kp;
+ }
+
+/* Increase K and return to the start of the main loop */
+
+ k += kstep;
+ goto L50;
+
+ }
+
+L90:
+ return 0;
+
+/* End of ZHETF2 */
+
+} /* zhetf2_ */
diff --git a/contrib/libs/clapack/zhetrd.c b/contrib/libs/clapack/zhetrd.c
new file mode 100644
index 0000000000..fcab44a0c0
--- /dev/null
+++ b/contrib/libs/clapack/zhetrd.c
@@ -0,0 +1,370 @@
+/* zhetrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+static doublereal c_b23 = 1.;
+
+/* Subroutine */ int zhetrd_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, doublereal *d__, doublereal *e, doublecomplex *tau,
+ doublecomplex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j, nb, kk, nx, iws;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ logical upper;
+ extern /* Subroutine */ int zhetd2_(char *, integer *, doublecomplex *,
+ integer *, doublereal *, doublereal *, doublecomplex *, integer *), zher2k_(char *, char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublereal *, doublecomplex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zlatrd_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublereal *, doublecomplex *,
+ doublecomplex *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHETRD reduces a complex Hermitian matrix A to real symmetric */
+/* tridiagonal form T by a unitary similarity transformation: */
+/* Q**H * A * Q = T. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+/* On exit, if UPLO = 'U', the diagonal and first superdiagonal */
+/* of A are overwritten by the corresponding elements of the */
+/* tridiagonal matrix T, and the elements above the first */
+/* superdiagonal, with the array TAU, represent the unitary */
+/* matrix Q as a product of elementary reflectors; if UPLO */
+/* = 'L', the diagonal and first subdiagonal of A are over- */
+/* written by the corresponding elements of the tridiagonal */
+/* matrix T, and the elements below the first subdiagonal, with */
+/* the array TAU, represent the unitary matrix Q as a product */
+/* of elementary reflectors. See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* D (output) DOUBLE PRECISION array, dimension (N) */
+/* The diagonal elements of the tridiagonal matrix T: */
+/* D(i) = A(i,i). */
+
+/* E (output) DOUBLE PRECISION array, dimension (N-1) */
+/* The off-diagonal elements of the tridiagonal matrix T: */
+/* E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'. */
+
+/* TAU (output) COMPLEX*16 array, dimension (N-1) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= 1. */
+/* For optimum performance LWORK >= N*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* If UPLO = 'U', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(n-1) . . . H(2) H(1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in */
+/* A(1:i-1,i+1), and tau in TAU(i). */
+
+/* If UPLO = 'L', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(1) H(2) . . . H(n-1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i), */
+/* and tau in TAU(i). */
+
+/* The contents of A on exit are illustrated by the following examples */
+/* with n = 5: */
+
+/* if UPLO = 'U': if UPLO = 'L': */
+
+/* ( d e v2 v3 v4 ) ( d ) */
+/* ( d e v3 v4 ) ( e d ) */
+/* ( d e v4 ) ( v1 e d ) */
+/* ( d e ) ( v1 v2 e d ) */
+/* ( d ) ( v1 v2 v3 e d ) */
+
+/* where d and e denote diagonal and off-diagonal elements of T, and vi */
+/* denotes an element of the vector defining H(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*lwork < 1 && ! lquery) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+
+/* Determine the block size. */
+
+ nb = ilaenv_(&c__1, "ZHETRD", uplo, n, &c_n1, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHETRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ nx = *n;
+ iws = 1;
+ if (nb > 1 && nb < *n) {
+
+/* Determine when to cross over from blocked to unblocked code */
+/* (last block is always handled by unblocked code). */
+
+/* Computing MAX */
+ i__1 = nb, i__2 = ilaenv_(&c__3, "ZHETRD", uplo, n, &c_n1, &c_n1, &
+ c_n1);
+ nx = max(i__1,i__2);
+ if (nx < *n) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: determine the */
+/* minimum value of NB, and reduce NB or force use of */
+/* unblocked code by setting NX = N. */
+
+/* Computing MAX */
+ i__1 = *lwork / ldwork;
+ nb = max(i__1,1);
+ nbmin = ilaenv_(&c__2, "ZHETRD", uplo, n, &c_n1, &c_n1, &c_n1);
+ if (nb < nbmin) {
+ nx = *n;
+ }
+ }
+ } else {
+ nx = *n;
+ }
+ } else {
+ nb = 1;
+ }
+
+ if (upper) {
+
+/* Reduce the upper triangle of A. */
+/* Columns 1:kk are handled by the unblocked method. */
+
+ kk = *n - (*n - nx + nb - 1) / nb * nb;
+ i__1 = kk + 1;
+ i__2 = -nb;
+ for (i__ = *n - nb + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+
+/* Reduce columns i:i+nb-1 to tridiagonal form and form the */
+/* matrix W which is needed to update the unreduced part of */
+/* the matrix */
+
+ i__3 = i__ + nb - 1;
+ zlatrd_(uplo, &i__3, &nb, &a[a_offset], lda, &e[1], &tau[1], &
+ work[1], &ldwork);
+
+/* Update the unreduced submatrix A(1:i-1,1:i-1), using an */
+/* update of the form: A := A - V*W' - W*V' */
+
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zher2k_(uplo, "No transpose", &i__3, &nb, &z__1, &a[i__ * a_dim1
+ + 1], lda, &work[1], &ldwork, &c_b23, &a[a_offset], lda);
+
+/* Copy superdiagonal elements back into A, and diagonal */
+/* elements into D */
+
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ i__4 = j - 1 + j * a_dim1;
+ i__5 = j - 1;
+ a[i__4].r = e[i__5], a[i__4].i = 0.;
+ i__4 = j;
+ i__5 = j + j * a_dim1;
+ d__[i__4] = a[i__5].r;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* Use unblocked code to reduce the last or only block */
+
+ zhetd2_(uplo, &kk, &a[a_offset], lda, &d__[1], &e[1], &tau[1], &iinfo);
+ } else {
+
+/* Reduce the lower triangle of A */
+
+ i__2 = *n - nx;
+ i__1 = nb;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+
+/* Reduce columns i:i+nb-1 to tridiagonal form and form the */
+/* matrix W which is needed to update the unreduced part of */
+/* the matrix */
+
+ i__3 = *n - i__ + 1;
+ zlatrd_(uplo, &i__3, &nb, &a[i__ + i__ * a_dim1], lda, &e[i__], &
+ tau[i__], &work[1], &ldwork);
+
+/* Update the unreduced submatrix A(i+nb:n,i+nb:n), using */
+/* an update of the form: A := A - V*W' - W*V' */
+
+ i__3 = *n - i__ - nb + 1;
+ z__1.r = -1., z__1.i = -0.;
+ zher2k_(uplo, "No transpose", &i__3, &nb, &z__1, &a[i__ + nb +
+ i__ * a_dim1], lda, &work[nb + 1], &ldwork, &c_b23, &a[
+ i__ + nb + (i__ + nb) * a_dim1], lda);
+
+/* Copy subdiagonal elements back into A, and diagonal */
+/* elements into D */
+
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ i__4 = j + 1 + j * a_dim1;
+ i__5 = j;
+ a[i__4].r = e[i__5], a[i__4].i = 0.;
+ i__4 = j;
+ i__5 = j + j * a_dim1;
+ d__[i__4] = a[i__5].r;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Use unblocked code to reduce the last or only block */
+
+ i__1 = *n - i__ + 1;
+ zhetd2_(uplo, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__],
+ &tau[i__], &iinfo);
+ }
+
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ return 0;
+
+/* End of ZHETRD */
+
+} /* zhetrd_ */
diff --git a/contrib/libs/clapack/zhetrf.c b/contrib/libs/clapack/zhetrf.c
new file mode 100644
index 0000000000..7fe29075a6
--- /dev/null
+++ b/contrib/libs/clapack/zhetrf.c
@@ -0,0 +1,336 @@
+/* zhetrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+
+/* Subroutine */ int zhetrf_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, integer *ipiv, doublecomplex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer j, k, kb, nb, iws;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ logical upper;
+ extern /* Subroutine */ int zhetf2_(char *, integer *, doublecomplex *,
+ integer *, integer *, integer *), zlahef_(char *, integer
+ *, integer *, integer *, doublecomplex *, integer *, integer *,
+ doublecomplex *, integer *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork, lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHETRF computes the factorization of a complex Hermitian matrix A */
+/* using the Bunch-Kaufman diagonal pivoting method. The form of the */
+/* factorization is */
+
+/* A = U*D*U**H or A = L*D*L**H */
+
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, and D is Hermitian and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* This is the blocked version of the algorithm, calling Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L (see below for further details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of WORK. LWORK >=1. For best performance */
+/* LWORK >= N*NB, where NB is the block size returned by ILAENV. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, and division by zero will occur if it */
+/* is used to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* If UPLO = 'U', then A = U*D*U', where */
+/* U = P(n)*U(n)* ... *P(k)U(k)* ..., */
+/* i.e., U is a product of terms P(k)*U(k), where k decreases from n to */
+/* 1 in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and U(k) is a unit upper triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I v 0 ) k-s */
+/* U(k) = ( 0 I 0 ) s */
+/* ( 0 0 I ) n-k */
+/* k-s s n-k */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(1:k-1,k). */
+/* If s = 2, the upper triangle of D(k) overwrites A(k-1,k-1), A(k-1,k), */
+/* and A(k,k), and v overwrites A(1:k-2,k-1:k). */
+
+/* If UPLO = 'L', then A = L*D*L', where */
+/* L = P(1)*L(1)* ... *P(k)*L(k)* ..., */
+/* i.e., L is a product of terms P(k)*L(k), where k increases from 1 to */
+/* n in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and L(k) is a unit lower triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I 0 0 ) k-1 */
+/* L(k) = ( 0 I 0 ) s */
+/* ( 0 v I ) n-k-s+1 */
+/* k-1 s n-k-s+1 */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(k+1:n,k). */
+/* If s = 2, the lower triangle of D(k) overwrites A(k,k), A(k+1,k), */
+/* and A(k+1,k+1), and v overwrites A(k+2:n,k:k+1). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*lwork < 1 && ! lquery) {
+ *info = -7;
+ }
+
+ if (*info == 0) {
+
+/* Determine the block size */
+
+ nb = ilaenv_(&c__1, "ZHETRF", uplo, n, &c_n1, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHETRF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = *n;
+ if (nb > 1 && nb < *n) {
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+/* Computing MAX */
+ i__1 = *lwork / ldwork;
+ nb = max(i__1,1);
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZHETRF", uplo, n, &c_n1, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = 1;
+ }
+ if (nb < nbmin) {
+ nb = *n;
+ }
+
+ if (upper) {
+
+/* Factorize A as U*D*U' using the upper triangle of A */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* KB, where KB is the number of columns factorized by ZLAHEF; */
+/* KB is either NB or NB-1, or K for the last block */
+
+ k = *n;
+L10:
+
+/* If K < 1, exit from loop */
+
+ if (k < 1) {
+ goto L40;
+ }
+
+ if (k > nb) {
+
+/* Factorize columns k-kb+1:k of A and use blocked code to */
+/* update columns 1:k-kb */
+
+ zlahef_(uplo, &k, &nb, &kb, &a[a_offset], lda, &ipiv[1], &work[1],
+ n, &iinfo);
+ } else {
+
+/* Use unblocked code to factorize columns 1:k of A */
+
+ zhetf2_(uplo, &k, &a[a_offset], lda, &ipiv[1], &iinfo);
+ kb = k;
+ }
+
+/* Set INFO on the first occurrence of a zero pivot */
+
+ if (*info == 0 && iinfo > 0) {
+ *info = iinfo;
+ }
+
+/* Decrease K and return to the start of the main loop */
+
+ k -= kb;
+ goto L10;
+
+ } else {
+
+/* Factorize A as L*D*L' using the lower triangle of A */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* KB, where KB is the number of columns factorized by ZLAHEF; */
+/* KB is either NB or NB-1, or N-K+1 for the last block */
+
+ k = 1;
+L20:
+
+/* If K > N, exit from loop */
+
+ if (k > *n) {
+ goto L40;
+ }
+
+ if (k <= *n - nb) {
+
+/* Factorize columns k:k+kb-1 of A and use blocked code to */
+/* update columns k+kb:n */
+
+ i__1 = *n - k + 1;
+ zlahef_(uplo, &i__1, &nb, &kb, &a[k + k * a_dim1], lda, &ipiv[k],
+ &work[1], n, &iinfo);
+ } else {
+
+/* Use unblocked code to factorize columns k:n of A */
+
+ i__1 = *n - k + 1;
+ zhetf2_(uplo, &i__1, &a[k + k * a_dim1], lda, &ipiv[k], &iinfo);
+ kb = *n - k + 1;
+ }
+
+/* Set INFO on the first occurrence of a zero pivot */
+
+ if (*info == 0 && iinfo > 0) {
+ *info = iinfo + k - 1;
+ }
+
+/* Adjust IPIV */
+
+ i__1 = k + kb - 1;
+ for (j = k; j <= i__1; ++j) {
+ if (ipiv[j] > 0) {
+ ipiv[j] = ipiv[j] + k - 1;
+ } else {
+ ipiv[j] = ipiv[j] - k + 1;
+ }
+/* L30: */
+ }
+
+/* Increase K and return to the start of the main loop */
+
+ k += kb;
+ goto L20;
+
+ }
+
+L40:
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ return 0;
+
+/* End of ZHETRF */
+
+} /* zhetrf_ */
diff --git a/contrib/libs/clapack/zhetri.c b/contrib/libs/clapack/zhetri.c
new file mode 100644
index 0000000000..7dadd4fb28
--- /dev/null
+++ b/contrib/libs/clapack/zhetri.c
@@ -0,0 +1,510 @@
+/* zhetri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b2 = {0.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zhetri_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, integer *ipiv, doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ doublereal d__;
+ integer j, k;
+ doublereal t, ak;
+ integer kp;
+ doublereal akp1;
+ doublecomplex temp, akkp1;
+ extern logical lsame_(char *, char *);
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ integer kstep;
+ extern /* Subroutine */ int zhemv_(char *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, doublecomplex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zswap_(integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHETRI computes the inverse of a complex Hermitian indefinite matrix */
+/* A using the factorization A = U*D*U**H or A = L*D*L**H computed by */
+/* ZHETRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**H; */
+/* = 'L': Lower triangular, form is A = L*D*L**H. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the block diagonal matrix D and the multipliers */
+/* used to obtain the factor U or L as computed by ZHETRF. */
+
+/* On exit, if INFO = 0, the (Hermitian) inverse of the original */
+/* matrix. If UPLO = 'U', the upper triangular part of the */
+/* inverse is formed and the part of A below the diagonal is not */
+/* referenced; if UPLO = 'L' the lower triangular part of the */
+/* inverse is formed and the part of A above the diagonal is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by ZHETRF. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) = 0; the matrix is singular and its */
+/* inverse could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHETRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check that the diagonal matrix D is nonsingular. */
+
+ if (upper) {
+
+/* Upper triangular storage: examine D from bottom to top */
+
+ for (*info = *n; *info >= 1; --(*info)) {
+ i__1 = *info + *info * a_dim1;
+ if (ipiv[*info] > 0 && (a[i__1].r == 0. && a[i__1].i == 0.)) {
+ return 0;
+ }
+/* L10: */
+ }
+ } else {
+
+/* Lower triangular storage: examine D from top to bottom. */
+
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ i__2 = *info + *info * a_dim1;
+ if (ipiv[*info] > 0 && (a[i__2].r == 0. && a[i__2].i == 0.)) {
+ return 0;
+ }
+/* L20: */
+ }
+ }
+ *info = 0;
+
+ if (upper) {
+
+/* Compute inv(A) from the factorization A = U*D*U'. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+L30:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L50;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Invert the diagonal block. */
+
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ d__1 = 1. / a[i__2].r;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+
+/* Compute column K of the inverse. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ zcopy_(&i__1, &a[k * a_dim1 + 1], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zhemv_(uplo, &i__1, &z__1, &a[a_offset], lda, &work[1], &c__1,
+ &c_b2, &a[k * a_dim1 + 1], &c__1);
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ i__3 = k - 1;
+ zdotc_(&z__2, &i__3, &work[1], &c__1, &a[k * a_dim1 + 1], &
+ c__1);
+ d__1 = z__2.r;
+ z__1.r = a[i__2].r - d__1, z__1.i = a[i__2].i;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ }
+ kstep = 1;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Invert the diagonal block. */
+
+ t = z_abs(&a[k + (k + 1) * a_dim1]);
+ i__1 = k + k * a_dim1;
+ ak = a[i__1].r / t;
+ i__1 = k + 1 + (k + 1) * a_dim1;
+ akp1 = a[i__1].r / t;
+ i__1 = k + (k + 1) * a_dim1;
+ z__1.r = a[i__1].r / t, z__1.i = a[i__1].i / t;
+ akkp1.r = z__1.r, akkp1.i = z__1.i;
+ d__ = t * (ak * akp1 - 1.);
+ i__1 = k + k * a_dim1;
+ d__1 = akp1 / d__;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+ i__1 = k + 1 + (k + 1) * a_dim1;
+ d__1 = ak / d__;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+ i__1 = k + (k + 1) * a_dim1;
+ z__2.r = -akkp1.r, z__2.i = -akkp1.i;
+ z__1.r = z__2.r / d__, z__1.i = z__2.i / d__;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+
+/* Compute columns K and K+1 of the inverse. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ zcopy_(&i__1, &a[k * a_dim1 + 1], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zhemv_(uplo, &i__1, &z__1, &a[a_offset], lda, &work[1], &c__1,
+ &c_b2, &a[k * a_dim1 + 1], &c__1);
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ i__3 = k - 1;
+ zdotc_(&z__2, &i__3, &work[1], &c__1, &a[k * a_dim1 + 1], &
+ c__1);
+ d__1 = z__2.r;
+ z__1.r = a[i__2].r - d__1, z__1.i = a[i__2].i;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ i__1 = k + (k + 1) * a_dim1;
+ i__2 = k + (k + 1) * a_dim1;
+ i__3 = k - 1;
+ zdotc_(&z__2, &i__3, &a[k * a_dim1 + 1], &c__1, &a[(k + 1) *
+ a_dim1 + 1], &c__1);
+ z__1.r = a[i__2].r - z__2.r, z__1.i = a[i__2].i - z__2.i;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ i__1 = k - 1;
+ zcopy_(&i__1, &a[(k + 1) * a_dim1 + 1], &c__1, &work[1], &
+ c__1);
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zhemv_(uplo, &i__1, &z__1, &a[a_offset], lda, &work[1], &c__1,
+ &c_b2, &a[(k + 1) * a_dim1 + 1], &c__1);
+ i__1 = k + 1 + (k + 1) * a_dim1;
+ i__2 = k + 1 + (k + 1) * a_dim1;
+ i__3 = k - 1;
+ zdotc_(&z__2, &i__3, &work[1], &c__1, &a[(k + 1) * a_dim1 + 1]
+, &c__1);
+ d__1 = z__2.r;
+ z__1.r = a[i__2].r - d__1, z__1.i = a[i__2].i;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ }
+ kstep = 2;
+ }
+
+ kp = (i__1 = ipiv[k], abs(i__1));
+ if (kp != k) {
+
+/* Interchange rows and columns K and KP in the leading */
+/* submatrix A(1:k+1,1:k+1) */
+
+ i__1 = kp - 1;
+ zswap_(&i__1, &a[k * a_dim1 + 1], &c__1, &a[kp * a_dim1 + 1], &
+ c__1);
+ i__1 = k - 1;
+ for (j = kp + 1; j <= i__1; ++j) {
+ d_cnjg(&z__1, &a[j + k * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = j + k * a_dim1;
+ d_cnjg(&z__1, &a[kp + j * a_dim1]);
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+ i__2 = kp + j * a_dim1;
+ a[i__2].r = temp.r, a[i__2].i = temp.i;
+/* L40: */
+ }
+ i__1 = kp + k * a_dim1;
+ d_cnjg(&z__1, &a[kp + k * a_dim1]);
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ i__1 = k + k * a_dim1;
+ temp.r = a[i__1].r, temp.i = a[i__1].i;
+ i__1 = k + k * a_dim1;
+ i__2 = kp + kp * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + kp * a_dim1;
+ a[i__1].r = temp.r, a[i__1].i = temp.i;
+ if (kstep == 2) {
+ i__1 = k + (k + 1) * a_dim1;
+ temp.r = a[i__1].r, temp.i = a[i__1].i;
+ i__1 = k + (k + 1) * a_dim1;
+ i__2 = kp + (k + 1) * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + (k + 1) * a_dim1;
+ a[i__1].r = temp.r, a[i__1].i = temp.i;
+ }
+ }
+
+ k += kstep;
+ goto L30;
+L50:
+
+ ;
+ } else {
+
+/* Compute inv(A) from the factorization A = L*D*L'. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+L60:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L80;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Invert the diagonal block. */
+
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ d__1 = 1. / a[i__2].r;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+
+/* Compute column K of the inverse. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ zcopy_(&i__1, &a[k + 1 + k * a_dim1], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zhemv_(uplo, &i__1, &z__1, &a[k + 1 + (k + 1) * a_dim1], lda,
+ &work[1], &c__1, &c_b2, &a[k + 1 + k * a_dim1], &c__1);
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ i__3 = *n - k;
+ zdotc_(&z__2, &i__3, &work[1], &c__1, &a[k + 1 + k * a_dim1],
+ &c__1);
+ d__1 = z__2.r;
+ z__1.r = a[i__2].r - d__1, z__1.i = a[i__2].i;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ }
+ kstep = 1;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Invert the diagonal block. */
+
+ t = z_abs(&a[k + (k - 1) * a_dim1]);
+ i__1 = k - 1 + (k - 1) * a_dim1;
+ ak = a[i__1].r / t;
+ i__1 = k + k * a_dim1;
+ akp1 = a[i__1].r / t;
+ i__1 = k + (k - 1) * a_dim1;
+ z__1.r = a[i__1].r / t, z__1.i = a[i__1].i / t;
+ akkp1.r = z__1.r, akkp1.i = z__1.i;
+ d__ = t * (ak * akp1 - 1.);
+ i__1 = k - 1 + (k - 1) * a_dim1;
+ d__1 = akp1 / d__;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+ i__1 = k + k * a_dim1;
+ d__1 = ak / d__;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+ i__1 = k + (k - 1) * a_dim1;
+ z__2.r = -akkp1.r, z__2.i = -akkp1.i;
+ z__1.r = z__2.r / d__, z__1.i = z__2.i / d__;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+
+/* Compute columns K-1 and K of the inverse. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ zcopy_(&i__1, &a[k + 1 + k * a_dim1], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zhemv_(uplo, &i__1, &z__1, &a[k + 1 + (k + 1) * a_dim1], lda,
+ &work[1], &c__1, &c_b2, &a[k + 1 + k * a_dim1], &c__1);
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ i__3 = *n - k;
+ zdotc_(&z__2, &i__3, &work[1], &c__1, &a[k + 1 + k * a_dim1],
+ &c__1);
+ d__1 = z__2.r;
+ z__1.r = a[i__2].r - d__1, z__1.i = a[i__2].i;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ i__1 = k + (k - 1) * a_dim1;
+ i__2 = k + (k - 1) * a_dim1;
+ i__3 = *n - k;
+ zdotc_(&z__2, &i__3, &a[k + 1 + k * a_dim1], &c__1, &a[k + 1
+ + (k - 1) * a_dim1], &c__1);
+ z__1.r = a[i__2].r - z__2.r, z__1.i = a[i__2].i - z__2.i;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ i__1 = *n - k;
+ zcopy_(&i__1, &a[k + 1 + (k - 1) * a_dim1], &c__1, &work[1], &
+ c__1);
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zhemv_(uplo, &i__1, &z__1, &a[k + 1 + (k + 1) * a_dim1], lda,
+ &work[1], &c__1, &c_b2, &a[k + 1 + (k - 1) * a_dim1],
+ &c__1);
+ i__1 = k - 1 + (k - 1) * a_dim1;
+ i__2 = k - 1 + (k - 1) * a_dim1;
+ i__3 = *n - k;
+ zdotc_(&z__2, &i__3, &work[1], &c__1, &a[k + 1 + (k - 1) *
+ a_dim1], &c__1);
+ d__1 = z__2.r;
+ z__1.r = a[i__2].r - d__1, z__1.i = a[i__2].i;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ }
+ kstep = 2;
+ }
+
+ kp = (i__1 = ipiv[k], abs(i__1));
+ if (kp != k) {
+
+/* Interchange rows and columns K and KP in the trailing */
+/* submatrix A(k-1:n,k-1:n) */
+
+ if (kp < *n) {
+ i__1 = *n - kp;
+ zswap_(&i__1, &a[kp + 1 + k * a_dim1], &c__1, &a[kp + 1 + kp *
+ a_dim1], &c__1);
+ }
+ i__1 = kp - 1;
+ for (j = k + 1; j <= i__1; ++j) {
+ d_cnjg(&z__1, &a[j + k * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = j + k * a_dim1;
+ d_cnjg(&z__1, &a[kp + j * a_dim1]);
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+ i__2 = kp + j * a_dim1;
+ a[i__2].r = temp.r, a[i__2].i = temp.i;
+/* L70: */
+ }
+ i__1 = kp + k * a_dim1;
+ d_cnjg(&z__1, &a[kp + k * a_dim1]);
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ i__1 = k + k * a_dim1;
+ temp.r = a[i__1].r, temp.i = a[i__1].i;
+ i__1 = k + k * a_dim1;
+ i__2 = kp + kp * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + kp * a_dim1;
+ a[i__1].r = temp.r, a[i__1].i = temp.i;
+ if (kstep == 2) {
+ i__1 = k + (k - 1) * a_dim1;
+ temp.r = a[i__1].r, temp.i = a[i__1].i;
+ i__1 = k + (k - 1) * a_dim1;
+ i__2 = kp + (k - 1) * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + (k - 1) * a_dim1;
+ a[i__1].r = temp.r, a[i__1].i = temp.i;
+ }
+ }
+
+ k -= kstep;
+ goto L60;
+L80:
+ ;
+ }
+
+ return 0;
+
+/* End of ZHETRI */
+
+} /* zhetri_ */
diff --git a/contrib/libs/clapack/zhetrs.c b/contrib/libs/clapack/zhetrs.c
new file mode 100644
index 0000000000..ba566623b4
--- /dev/null
+++ b/contrib/libs/clapack/zhetrs.c
@@ -0,0 +1,529 @@
+/* zhetrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zhetrs_(char *uplo, integer *n, integer *nrhs,
+ doublecomplex *a, integer *lda, integer *ipiv, doublecomplex *b,
+ integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *), d_cnjg(
+ doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer j, k;
+ doublereal s;
+ doublecomplex ak, bk;
+ integer kp;
+ doublecomplex akm1, bkm1, akm1k;
+ extern logical lsame_(char *, char *);
+ doublecomplex denom;
+ extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int zgeru_(integer *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zswap_(integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *), xerbla_(char *, integer *), zdscal_(integer *, doublereal *, doublecomplex *,
+ integer *), zlacgv_(integer *, doublecomplex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHETRS solves a system of linear equations A*X = B with a complex */
+/* Hermitian matrix A using the factorization A = U*D*U**H or */
+/* A = L*D*L**H computed by ZHETRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**H; */
+/* = 'L': Lower triangular, form is A = L*D*L**H. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The block diagonal matrix D and the multipliers used to */
+/* obtain the factor U or L as computed by ZHETRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by ZHETRF. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHETRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Solve A*X = B, where A = U*D*U'. */
+
+/* First solve U*D*X = B, overwriting B with X. */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+L10:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L30;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ zswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(U(K)), where U(K) is the transformation */
+/* stored in column K of A. */
+
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__1, nrhs, &z__1, &a[k * a_dim1 + 1], &c__1, &b[k +
+ b_dim1], ldb, &b[b_dim1 + 1], ldb);
+
+/* Multiply by the inverse of the diagonal block. */
+
+ i__1 = k + k * a_dim1;
+ s = 1. / a[i__1].r;
+ zdscal_(nrhs, &s, &b[k + b_dim1], ldb);
+ --k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Interchange rows K-1 and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k - 1) {
+ zswap_(nrhs, &b[k - 1 + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(U(K)), where U(K) is the transformation */
+/* stored in columns K-1 and K of A. */
+
+ i__1 = k - 2;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__1, nrhs, &z__1, &a[k * a_dim1 + 1], &c__1, &b[k +
+ b_dim1], ldb, &b[b_dim1 + 1], ldb);
+ i__1 = k - 2;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__1, nrhs, &z__1, &a[(k - 1) * a_dim1 + 1], &c__1, &b[k
+ - 1 + b_dim1], ldb, &b[b_dim1 + 1], ldb);
+
+/* Multiply by the inverse of the diagonal block. */
+
+ i__1 = k - 1 + k * a_dim1;
+ akm1k.r = a[i__1].r, akm1k.i = a[i__1].i;
+ z_div(&z__1, &a[k - 1 + (k - 1) * a_dim1], &akm1k);
+ akm1.r = z__1.r, akm1.i = z__1.i;
+ d_cnjg(&z__2, &akm1k);
+ z_div(&z__1, &a[k + k * a_dim1], &z__2);
+ ak.r = z__1.r, ak.i = z__1.i;
+ z__2.r = akm1.r * ak.r - akm1.i * ak.i, z__2.i = akm1.r * ak.i +
+ akm1.i * ak.r;
+ z__1.r = z__2.r - 1., z__1.i = z__2.i - 0.;
+ denom.r = z__1.r, denom.i = z__1.i;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ z_div(&z__1, &b[k - 1 + j * b_dim1], &akm1k);
+ bkm1.r = z__1.r, bkm1.i = z__1.i;
+ d_cnjg(&z__2, &akm1k);
+ z_div(&z__1, &b[k + j * b_dim1], &z__2);
+ bk.r = z__1.r, bk.i = z__1.i;
+ i__2 = k - 1 + j * b_dim1;
+ z__3.r = ak.r * bkm1.r - ak.i * bkm1.i, z__3.i = ak.r *
+ bkm1.i + ak.i * bkm1.r;
+ z__2.r = z__3.r - bk.r, z__2.i = z__3.i - bk.i;
+ z_div(&z__1, &z__2, &denom);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ i__2 = k + j * b_dim1;
+ z__3.r = akm1.r * bk.r - akm1.i * bk.i, z__3.i = akm1.r *
+ bk.i + akm1.i * bk.r;
+ z__2.r = z__3.r - bkm1.r, z__2.i = z__3.i - bkm1.i;
+ z_div(&z__1, &z__2, &denom);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L20: */
+ }
+ k += -2;
+ }
+
+ goto L10;
+L30:
+
+/* Next solve U'*X = B, overwriting B with X. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+L40:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L50;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Multiply by inv(U'(K)), where U(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k > 1) {
+ zlacgv_(nrhs, &b[k + b_dim1], ldb);
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Conjugate transpose", &i__1, nrhs, &z__1, &b[b_offset]
+, ldb, &a[k * a_dim1 + 1], &c__1, &c_b1, &b[k +
+ b_dim1], ldb);
+ zlacgv_(nrhs, &b[k + b_dim1], ldb);
+ }
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ zswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ ++k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Multiply by inv(U'(K+1)), where U(K+1) is the transformation */
+/* stored in columns K and K+1 of A. */
+
+ if (k > 1) {
+ zlacgv_(nrhs, &b[k + b_dim1], ldb);
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Conjugate transpose", &i__1, nrhs, &z__1, &b[b_offset]
+, ldb, &a[k * a_dim1 + 1], &c__1, &c_b1, &b[k +
+ b_dim1], ldb);
+ zlacgv_(nrhs, &b[k + b_dim1], ldb);
+
+ zlacgv_(nrhs, &b[k + 1 + b_dim1], ldb);
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Conjugate transpose", &i__1, nrhs, &z__1, &b[b_offset]
+, ldb, &a[(k + 1) * a_dim1 + 1], &c__1, &c_b1, &b[k +
+ 1 + b_dim1], ldb);
+ zlacgv_(nrhs, &b[k + 1 + b_dim1], ldb);
+ }
+
+/* Interchange rows K and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k) {
+ zswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ k += 2;
+ }
+
+ goto L40;
+L50:
+
+ ;
+ } else {
+
+/* Solve A*X = B, where A = L*D*L'. */
+
+/* First solve L*D*X = B, overwriting B with X. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+L60:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L80;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ zswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(L(K)), where L(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__1, nrhs, &z__1, &a[k + 1 + k * a_dim1], &c__1, &b[
+ k + b_dim1], ldb, &b[k + 1 + b_dim1], ldb);
+ }
+
+/* Multiply by the inverse of the diagonal block. */
+
+ i__1 = k + k * a_dim1;
+ s = 1. / a[i__1].r;
+ zdscal_(nrhs, &s, &b[k + b_dim1], ldb);
+ ++k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Interchange rows K+1 and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k + 1) {
+ zswap_(nrhs, &b[k + 1 + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(L(K)), where L(K) is the transformation */
+/* stored in columns K and K+1 of A. */
+
+ if (k < *n - 1) {
+ i__1 = *n - k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__1, nrhs, &z__1, &a[k + 2 + k * a_dim1], &c__1, &b[
+ k + b_dim1], ldb, &b[k + 2 + b_dim1], ldb);
+ i__1 = *n - k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__1, nrhs, &z__1, &a[k + 2 + (k + 1) * a_dim1], &
+ c__1, &b[k + 1 + b_dim1], ldb, &b[k + 2 + b_dim1],
+ ldb);
+ }
+
+/* Multiply by the inverse of the diagonal block. */
+
+ i__1 = k + 1 + k * a_dim1;
+ akm1k.r = a[i__1].r, akm1k.i = a[i__1].i;
+ d_cnjg(&z__2, &akm1k);
+ z_div(&z__1, &a[k + k * a_dim1], &z__2);
+ akm1.r = z__1.r, akm1.i = z__1.i;
+ z_div(&z__1, &a[k + 1 + (k + 1) * a_dim1], &akm1k);
+ ak.r = z__1.r, ak.i = z__1.i;
+ z__2.r = akm1.r * ak.r - akm1.i * ak.i, z__2.i = akm1.r * ak.i +
+ akm1.i * ak.r;
+ z__1.r = z__2.r - 1., z__1.i = z__2.i - 0.;
+ denom.r = z__1.r, denom.i = z__1.i;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ d_cnjg(&z__2, &akm1k);
+ z_div(&z__1, &b[k + j * b_dim1], &z__2);
+ bkm1.r = z__1.r, bkm1.i = z__1.i;
+ z_div(&z__1, &b[k + 1 + j * b_dim1], &akm1k);
+ bk.r = z__1.r, bk.i = z__1.i;
+ i__2 = k + j * b_dim1;
+ z__3.r = ak.r * bkm1.r - ak.i * bkm1.i, z__3.i = ak.r *
+ bkm1.i + ak.i * bkm1.r;
+ z__2.r = z__3.r - bk.r, z__2.i = z__3.i - bk.i;
+ z_div(&z__1, &z__2, &denom);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ i__2 = k + 1 + j * b_dim1;
+ z__3.r = akm1.r * bk.r - akm1.i * bk.i, z__3.i = akm1.r *
+ bk.i + akm1.i * bk.r;
+ z__2.r = z__3.r - bkm1.r, z__2.i = z__3.i - bkm1.i;
+ z_div(&z__1, &z__2, &denom);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L70: */
+ }
+ k += 2;
+ }
+
+ goto L60;
+L80:
+
+/* Next solve L'*X = B, overwriting B with X. */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+L90:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L100;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Multiply by inv(L'(K)), where L(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k < *n) {
+ zlacgv_(nrhs, &b[k + b_dim1], ldb);
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Conjugate transpose", &i__1, nrhs, &z__1, &b[k + 1 +
+ b_dim1], ldb, &a[k + 1 + k * a_dim1], &c__1, &c_b1, &
+ b[k + b_dim1], ldb);
+ zlacgv_(nrhs, &b[k + b_dim1], ldb);
+ }
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ zswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ --k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Multiply by inv(L'(K-1)), where L(K-1) is the transformation */
+/* stored in columns K-1 and K of A. */
+
+ if (k < *n) {
+ zlacgv_(nrhs, &b[k + b_dim1], ldb);
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Conjugate transpose", &i__1, nrhs, &z__1, &b[k + 1 +
+ b_dim1], ldb, &a[k + 1 + k * a_dim1], &c__1, &c_b1, &
+ b[k + b_dim1], ldb);
+ zlacgv_(nrhs, &b[k + b_dim1], ldb);
+
+ zlacgv_(nrhs, &b[k - 1 + b_dim1], ldb);
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Conjugate transpose", &i__1, nrhs, &z__1, &b[k + 1 +
+ b_dim1], ldb, &a[k + 1 + (k - 1) * a_dim1], &c__1, &
+ c_b1, &b[k - 1 + b_dim1], ldb);
+ zlacgv_(nrhs, &b[k - 1 + b_dim1], ldb);
+ }
+
+/* Interchange rows K and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k) {
+ zswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ k += -2;
+ }
+
+ goto L90;
+L100:
+ ;
+ }
+
+ return 0;
+
+/* End of ZHETRS */
+
+} /* zhetrs_ */
diff --git a/contrib/libs/clapack/zhfrk.c b/contrib/libs/clapack/zhfrk.c
new file mode 100644
index 0000000000..87f3043b9b
--- /dev/null
+++ b/contrib/libs/clapack/zhfrk.c
@@ -0,0 +1,531 @@
+/* zhfrk.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zhfrk_(char *transr, char *uplo, char *trans, integer *n,
+ integer *k, doublereal *alpha, doublecomplex *a, integer *lda,
+ doublereal *beta, doublecomplex *c__)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer j, n1, n2, nk, info;
+ doublecomplex cbeta;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), zherk_(char *, char *, integer *,
+ integer *, doublereal *, doublecomplex *, integer *, doublereal *,
+ doublecomplex *, integer *);
+ integer nrowa;
+ logical lower;
+ doublecomplex calpha;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nisodd, notrans;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Julien Langou of the Univ. of Colorado Denver -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Level 3 BLAS like routine for C in RFP Format. */
+
+/* ZHFRK performs one of the Hermitian rank--k operations */
+
+/* C := alpha*A*conjg( A' ) + beta*C, */
+
+/* or */
+
+/* C := alpha*conjg( A' )*A + beta*C, */
+
+/* where alpha and beta are real scalars, C is an n--by--n Hermitian */
+/* matrix and A is an n--by--k matrix in the first case and a k--by--n */
+/* matrix in the second case. */
+
+/* Arguments */
+/* ========== */
+
+/* TRANSR (input) CHARACTER. */
+/* = 'N': The Normal Form of RFP A is stored; */
+/* = 'C': The Conjugate-transpose Form of RFP A is stored. */
+
+/* UPLO - (input) CHARACTER. */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array C is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of C */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of C */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* TRANS - (input) CHARACTER. */
+/* On entry, TRANS specifies the operation to be performed as */
+/* follows: */
+
+/* TRANS = 'N' or 'n' C := alpha*A*conjg( A' ) + beta*C. */
+
+/* TRANS = 'C' or 'c' C := alpha*conjg( A' )*A + beta*C. */
+
+/* Unchanged on exit. */
+
+/* N - (input) INTEGER. */
+/* On entry, N specifies the order of the matrix C. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* K - (input) INTEGER. */
+/* On entry with TRANS = 'N' or 'n', K specifies the number */
+/* of columns of the matrix A, and on entry with */
+/* TRANS = 'C' or 'c', K specifies the number of rows of the */
+/* matrix A. K must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - (input) DOUBLE PRECISION. */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A - (input) COMPLEX*16 array of DIMENSION ( LDA, ka ), where KA */
+/* is K when TRANS = 'N' or 'n', and is N otherwise. Before */
+/* entry with TRANS = 'N' or 'n', the leading N--by--K part of */
+/* the array A must contain the matrix A, otherwise the leading */
+/* K--by--N part of the array A must contain the matrix A. */
+/* Unchanged on exit. */
+
+/* LDA - (input) INTEGER. */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. When TRANS = 'N' or 'n' */
+/* then LDA must be at least max( 1, n ), otherwise LDA must */
+/* be at least max( 1, k ). */
+/* Unchanged on exit. */
+
+/* BETA - (input) DOUBLE PRECISION. */
+/* On entry, BETA specifies the scalar beta. */
+/* Unchanged on exit. */
+
+/* C - (input/output) COMPLEX*16 array, dimension ( N*(N+1)/2 ). */
+/* On entry, the matrix A in RFP Format. RFP Format is */
+/* described by TRANSR, UPLO and N. Note that the imaginary */
+/* parts of the diagonal elements need not be set, they are */
+/* assumed to be zero, and on exit they are set to zero. */
+
+/* Arguments */
+/* ========== */
+
+/* .. */
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --c__;
+
+ /* Function Body */
+ info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ notrans = lsame_(trans, "N");
+
+ if (notrans) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+
+ if (! normaltransr && ! lsame_(transr, "C")) {
+ info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ info = -2;
+ } else if (! notrans && ! lsame_(trans, "C")) {
+ info = -3;
+ } else if (*n < 0) {
+ info = -4;
+ } else if (*k < 0) {
+ info = -5;
+ } else if (*lda < max(1,nrowa)) {
+ info = -8;
+ }
+ if (info != 0) {
+ i__1 = -info;
+ xerbla_("ZHFRK ", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+/* The quick return case: ((ALPHA.EQ.0).AND.(BETA.NE.ZERO)) is not */
+/* done (it is in ZHERK for example) and left in the general case. */
+
+ if (*n == 0 || (*alpha == 0. || *k == 0) && *beta == 1.) {
+ return 0;
+ }
+
+ if (*alpha == 0. && *beta == 0.) {
+ i__1 = *n * (*n + 1) / 2;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ c__[i__2].r = 0., c__[i__2].i = 0.;
+ }
+ return 0;
+ }
+
+ z__1.r = *alpha, z__1.i = 0.;
+ calpha.r = z__1.r, calpha.i = z__1.i;
+ z__1.r = *beta, z__1.i = 0.;
+ cbeta.r = z__1.r, cbeta.i = z__1.i;
+
+/* C is N-by-N. */
+/* If N is odd, set NISODD = .TRUE., and N1 and N2. */
+/* If N is even, NISODD = .FALSE., and NK. */
+
+ if (*n % 2 == 0) {
+ nisodd = FALSE_;
+ nk = *n / 2;
+ } else {
+ nisodd = TRUE_;
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+ }
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* N is odd, TRANSR = 'N', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* N is odd, TRANSR = 'N', UPLO = 'L', and TRANS = 'N' */
+
+ zherk_("L", "N", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[1], n);
+ zherk_("U", "N", &n2, k, alpha, &a[n1 + 1 + a_dim1], lda,
+ beta, &c__[*n + 1], n);
+ zgemm_("N", "C", &n2, &n1, k, &calpha, &a[n1 + 1 + a_dim1]
+, lda, &a[a_dim1 + 1], lda, &cbeta, &c__[n1 + 1],
+ n);
+
+ } else {
+
+/* N is odd, TRANSR = 'N', UPLO = 'L', and TRANS = 'C' */
+
+ zherk_("L", "C", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[1], n);
+ zherk_("U", "C", &n2, k, alpha, &a[(n1 + 1) * a_dim1 + 1],
+ lda, beta, &c__[*n + 1], n)
+ ;
+ zgemm_("C", "N", &n2, &n1, k, &calpha, &a[(n1 + 1) *
+ a_dim1 + 1], lda, &a[a_dim1 + 1], lda, &cbeta, &
+ c__[n1 + 1], n);
+
+ }
+
+ } else {
+
+/* N is odd, TRANSR = 'N', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* N is odd, TRANSR = 'N', UPLO = 'U', and TRANS = 'N' */
+
+ zherk_("L", "N", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[n2 + 1], n);
+ zherk_("U", "N", &n2, k, alpha, &a[n2 + a_dim1], lda,
+ beta, &c__[n1 + 1], n);
+ zgemm_("N", "C", &n1, &n2, k, &calpha, &a[a_dim1 + 1],
+ lda, &a[n2 + a_dim1], lda, &cbeta, &c__[1], n);
+
+ } else {
+
+/* N is odd, TRANSR = 'N', UPLO = 'U', and TRANS = 'C' */
+
+ zherk_("L", "C", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[n2 + 1], n);
+ zherk_("U", "C", &n2, k, alpha, &a[n2 * a_dim1 + 1], lda,
+ beta, &c__[n1 + 1], n);
+ zgemm_("C", "N", &n1, &n2, k, &calpha, &a[a_dim1 + 1],
+ lda, &a[n2 * a_dim1 + 1], lda, &cbeta, &c__[1], n);
+
+ }
+
+ }
+
+ } else {
+
+/* N is odd, and TRANSR = 'C' */
+
+ if (lower) {
+
+/* N is odd, TRANSR = 'C', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* N is odd, TRANSR = 'C', UPLO = 'L', and TRANS = 'N' */
+
+ zherk_("U", "N", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[1], &n1);
+ zherk_("L", "N", &n2, k, alpha, &a[n1 + 1 + a_dim1], lda,
+ beta, &c__[2], &n1);
+ zgemm_("N", "C", &n1, &n2, k, &calpha, &a[a_dim1 + 1],
+ lda, &a[n1 + 1 + a_dim1], lda, &cbeta, &c__[n1 *
+ n1 + 1], &n1);
+
+ } else {
+
+/* N is odd, TRANSR = 'C', UPLO = 'L', and TRANS = 'C' */
+
+ zherk_("U", "C", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[1], &n1);
+ zherk_("L", "C", &n2, k, alpha, &a[(n1 + 1) * a_dim1 + 1],
+ lda, beta, &c__[2], &n1);
+ zgemm_("C", "N", &n1, &n2, k, &calpha, &a[a_dim1 + 1],
+ lda, &a[(n1 + 1) * a_dim1 + 1], lda, &cbeta, &c__[
+ n1 * n1 + 1], &n1);
+
+ }
+
+ } else {
+
+/* N is odd, TRANSR = 'C', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* N is odd, TRANSR = 'C', UPLO = 'U', and TRANS = 'N' */
+
+ zherk_("U", "N", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[n2 * n2 + 1], &n2);
+ zherk_("L", "N", &n2, k, alpha, &a[n1 + 1 + a_dim1], lda,
+ beta, &c__[n1 * n2 + 1], &n2);
+ zgemm_("N", "C", &n2, &n1, k, &calpha, &a[n1 + 1 + a_dim1]
+, lda, &a[a_dim1 + 1], lda, &cbeta, &c__[1], &n2);
+
+ } else {
+
+/* N is odd, TRANSR = 'C', UPLO = 'U', and TRANS = 'C' */
+
+ zherk_("U", "C", &n1, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[n2 * n2 + 1], &n2);
+ zherk_("L", "C", &n2, k, alpha, &a[(n1 + 1) * a_dim1 + 1],
+ lda, beta, &c__[n1 * n2 + 1], &n2);
+ zgemm_("C", "N", &n2, &n1, k, &calpha, &a[(n1 + 1) *
+ a_dim1 + 1], lda, &a[a_dim1 + 1], lda, &cbeta, &
+ c__[1], &n2);
+
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* N is even, TRANSR = 'N', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* N is even, TRANSR = 'N', UPLO = 'L', and TRANS = 'N' */
+
+ i__1 = *n + 1;
+ zherk_("L", "N", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[2], &i__1);
+ i__1 = *n + 1;
+ zherk_("U", "N", &nk, k, alpha, &a[nk + 1 + a_dim1], lda,
+ beta, &c__[1], &i__1);
+ i__1 = *n + 1;
+ zgemm_("N", "C", &nk, &nk, k, &calpha, &a[nk + 1 + a_dim1]
+, lda, &a[a_dim1 + 1], lda, &cbeta, &c__[nk + 2],
+ &i__1);
+
+ } else {
+
+/* N is even, TRANSR = 'N', UPLO = 'L', and TRANS = 'C' */
+
+ i__1 = *n + 1;
+ zherk_("L", "C", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[2], &i__1);
+ i__1 = *n + 1;
+ zherk_("U", "C", &nk, k, alpha, &a[(nk + 1) * a_dim1 + 1],
+ lda, beta, &c__[1], &i__1);
+ i__1 = *n + 1;
+ zgemm_("C", "N", &nk, &nk, k, &calpha, &a[(nk + 1) *
+ a_dim1 + 1], lda, &a[a_dim1 + 1], lda, &cbeta, &
+ c__[nk + 2], &i__1);
+
+ }
+
+ } else {
+
+/* N is even, TRANSR = 'N', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* N is even, TRANSR = 'N', UPLO = 'U', and TRANS = 'N' */
+
+ i__1 = *n + 1;
+ zherk_("L", "N", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[nk + 2], &i__1);
+ i__1 = *n + 1;
+ zherk_("U", "N", &nk, k, alpha, &a[nk + 1 + a_dim1], lda,
+ beta, &c__[nk + 1], &i__1);
+ i__1 = *n + 1;
+ zgemm_("N", "C", &nk, &nk, k, &calpha, &a[a_dim1 + 1],
+ lda, &a[nk + 1 + a_dim1], lda, &cbeta, &c__[1], &
+ i__1);
+
+ } else {
+
+/* N is even, TRANSR = 'N', UPLO = 'U', and TRANS = 'C' */
+
+ i__1 = *n + 1;
+ zherk_("L", "C", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[nk + 2], &i__1);
+ i__1 = *n + 1;
+ zherk_("U", "C", &nk, k, alpha, &a[(nk + 1) * a_dim1 + 1],
+ lda, beta, &c__[nk + 1], &i__1);
+ i__1 = *n + 1;
+ zgemm_("C", "N", &nk, &nk, k, &calpha, &a[a_dim1 + 1],
+ lda, &a[(nk + 1) * a_dim1 + 1], lda, &cbeta, &c__[
+ 1], &i__1);
+
+ }
+
+ }
+
+ } else {
+
+/* N is even, and TRANSR = 'C' */
+
+ if (lower) {
+
+/* N is even, TRANSR = 'C', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* N is even, TRANSR = 'C', UPLO = 'L', and TRANS = 'N' */
+
+ zherk_("U", "N", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[nk + 1], &nk);
+ zherk_("L", "N", &nk, k, alpha, &a[nk + 1 + a_dim1], lda,
+ beta, &c__[1], &nk);
+ zgemm_("N", "C", &nk, &nk, k, &calpha, &a[a_dim1 + 1],
+ lda, &a[nk + 1 + a_dim1], lda, &cbeta, &c__[(nk +
+ 1) * nk + 1], &nk);
+
+ } else {
+
+/* N is even, TRANSR = 'C', UPLO = 'L', and TRANS = 'C' */
+
+ zherk_("U", "C", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[nk + 1], &nk);
+ zherk_("L", "C", &nk, k, alpha, &a[(nk + 1) * a_dim1 + 1],
+ lda, beta, &c__[1], &nk);
+ zgemm_("C", "N", &nk, &nk, k, &calpha, &a[a_dim1 + 1],
+ lda, &a[(nk + 1) * a_dim1 + 1], lda, &cbeta, &c__[
+ (nk + 1) * nk + 1], &nk);
+
+ }
+
+ } else {
+
+/* N is even, TRANSR = 'C', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* N is even, TRANSR = 'C', UPLO = 'U', and TRANS = 'N' */
+
+ zherk_("U", "N", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[nk * (nk + 1) + 1], &nk);
+ zherk_("L", "N", &nk, k, alpha, &a[nk + 1 + a_dim1], lda,
+ beta, &c__[nk * nk + 1], &nk);
+ zgemm_("N", "C", &nk, &nk, k, &calpha, &a[nk + 1 + a_dim1]
+, lda, &a[a_dim1 + 1], lda, &cbeta, &c__[1], &nk);
+
+ } else {
+
+/* N is even, TRANSR = 'C', UPLO = 'U', and TRANS = 'C' */
+
+ zherk_("U", "C", &nk, k, alpha, &a[a_dim1 + 1], lda, beta,
+ &c__[nk * (nk + 1) + 1], &nk);
+ zherk_("L", "C", &nk, k, alpha, &a[(nk + 1) * a_dim1 + 1],
+ lda, beta, &c__[nk * nk + 1], &nk);
+ zgemm_("C", "N", &nk, &nk, k, &calpha, &a[(nk + 1) *
+ a_dim1 + 1], lda, &a[a_dim1 + 1], lda, &cbeta, &
+ c__[1], &nk);
+
+ }
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of ZHFRK */
+
+} /* zhfrk_ */
diff --git a/contrib/libs/clapack/zhgeqz.c b/contrib/libs/clapack/zhgeqz.c
new file mode 100644
index 0000000000..779255908b
--- /dev/null
+++ b/contrib/libs/clapack/zhgeqz.c
@@ -0,0 +1,1149 @@
+/* zhgeqz.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+static integer c__2 = 2;
+
+/* Subroutine */ int zhgeqz_(char *job, char *compq, char *compz, integer *n,
+ integer *ilo, integer *ihi, doublecomplex *h__, integer *ldh,
+ doublecomplex *t, integer *ldt, doublecomplex *alpha, doublecomplex *
+ beta, doublecomplex *q, integer *ldq, doublecomplex *z__, integer *
+ ldz, doublecomplex *work, integer *lwork, doublereal *rwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, q_dim1, q_offset, t_dim1, t_offset, z_dim1,
+ z_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ doublereal d__1, d__2, d__3, d__4, d__5, d__6;
+ doublecomplex z__1, z__2, z__3, z__4, z__5, z__6;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+ double d_imag(doublecomplex *);
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *), pow_zi(
+ doublecomplex *, doublecomplex *, integer *), z_sqrt(
+ doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ doublereal c__;
+ integer j;
+ doublecomplex s, t1;
+ integer jc, in;
+ doublecomplex u12;
+ integer jr;
+ doublecomplex ad11, ad12, ad21, ad22;
+ integer jch;
+ logical ilq, ilz;
+ doublereal ulp;
+ doublecomplex abi22;
+ doublereal absb, atol, btol, temp;
+ extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublereal *, doublecomplex *);
+ doublereal temp2;
+ extern logical lsame_(char *, char *);
+ doublecomplex ctemp;
+ integer iiter, ilast, jiter;
+ doublereal anorm, bnorm;
+ integer maxit;
+ doublecomplex shift;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *);
+ doublereal tempr;
+ doublecomplex ctemp2, ctemp3;
+ logical ilazr2;
+ doublereal ascale, bscale;
+ extern doublereal dlamch_(char *);
+ doublecomplex signbc;
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublecomplex eshift;
+ logical ilschr;
+ integer icompq, ilastm;
+ doublecomplex rtdisc;
+ integer ischur;
+ extern doublereal zlanhs_(char *, integer *, doublecomplex *, integer *,
+ doublereal *);
+ logical ilazro;
+ integer icompz, ifirst;
+ extern /* Subroutine */ int zlartg_(doublecomplex *, doublecomplex *,
+ doublereal *, doublecomplex *, doublecomplex *);
+ integer ifrstm;
+ extern /* Subroutine */ int zlaset_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *);
+ integer istart;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHGEQZ computes the eigenvalues of a complex matrix pair (H,T), */
+/* where H is an upper Hessenberg matrix and T is upper triangular, */
+/* using the single-shift QZ method. */
+/* Matrix pairs of this type are produced by the reduction to */
+/* generalized upper Hessenberg form of a complex matrix pair (A,B): */
+
+/* A = Q1*H*Z1**H, B = Q1*T*Z1**H, */
+
+/* as computed by ZGGHRD. */
+
+/* If JOB='S', then the Hessenberg-triangular pair (H,T) is */
+/* also reduced to generalized Schur form, */
+
+/* H = Q*S*Z**H, T = Q*P*Z**H, */
+
+/* where Q and Z are unitary matrices and S and P are upper triangular. */
+
+/* Optionally, the unitary matrix Q from the generalized Schur */
+/* factorization may be postmultiplied into an input matrix Q1, and the */
+/* unitary matrix Z may be postmultiplied into an input matrix Z1. */
+/* If Q1 and Z1 are the unitary matrices from ZGGHRD that reduced */
+/* the matrix pair (A,B) to generalized Hessenberg form, then the output */
+/* matrices Q1*Q and Z1*Z are the unitary factors from the generalized */
+/* Schur factorization of (A,B): */
+
+/* A = (Q1*Q)*S*(Z1*Z)**H, B = (Q1*Q)*P*(Z1*Z)**H. */
+
+/* To avoid overflow, eigenvalues of the matrix pair (H,T) */
+/* (equivalently, of (A,B)) are computed as a pair of complex values */
+/* (alpha,beta). If beta is nonzero, lambda = alpha / beta is an */
+/* eigenvalue of the generalized nonsymmetric eigenvalue problem (GNEP) */
+/* A*x = lambda*B*x */
+/* and if alpha is nonzero, mu = beta / alpha is an eigenvalue of the */
+/* alternate form of the GNEP */
+/* mu*A*y = B*y. */
+/* The values of alpha and beta for the i-th eigenvalue can be read */
+/* directly from the generalized Schur form: alpha = S(i,i), */
+/* beta = P(i,i). */
+
+/* Ref: C.B. Moler & G.W. Stewart, "An Algorithm for Generalized Matrix */
+/* Eigenvalue Problems", SIAM J. Numer. Anal., 10(1973), */
+/* pp. 241--256. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* = 'E': Compute eigenvalues only; */
+/* = 'S': Computer eigenvalues and the Schur form. */
+
+/* COMPQ (input) CHARACTER*1 */
+/* = 'N': Left Schur vectors (Q) are not computed; */
+/* = 'I': Q is initialized to the unit matrix and the matrix Q */
+/* of left Schur vectors of (H,T) is returned; */
+/* = 'V': Q must contain a unitary matrix Q1 on entry and */
+/* the product Q1*Q is returned. */
+
+/* COMPZ (input) CHARACTER*1 */
+/* = 'N': Right Schur vectors (Z) are not computed; */
+/* = 'I': Q is initialized to the unit matrix and the matrix Z */
+/* of right Schur vectors of (H,T) is returned; */
+/* = 'V': Z must contain a unitary matrix Z1 on entry and */
+/* the product Z1*Z is returned. */
+
+/* N (input) INTEGER */
+/* The order of the matrices H, T, Q, and Z. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* ILO and IHI mark the rows and columns of H which are in */
+/* Hessenberg form. It is assumed that A is already upper */
+/* triangular in rows and columns 1:ILO-1 and IHI+1:N. */
+/* If N > 0, 1 <= ILO <= IHI <= N; if N = 0, ILO=1 and IHI=0. */
+
+/* H (input/output) COMPLEX*16 array, dimension (LDH, N) */
+/* On entry, the N-by-N upper Hessenberg matrix H. */
+/* On exit, if JOB = 'S', H contains the upper triangular */
+/* matrix S from the generalized Schur factorization. */
+/* If JOB = 'E', the diagonal of H matches that of S, but */
+/* the rest of H is unspecified. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH >= max( 1, N ). */
+
+/* T (input/output) COMPLEX*16 array, dimension (LDT, N) */
+/* On entry, the N-by-N upper triangular matrix T. */
+/* On exit, if JOB = 'S', T contains the upper triangular */
+/* matrix P from the generalized Schur factorization. */
+/* If JOB = 'E', the diagonal of T matches that of P, but */
+/* the rest of T is unspecified. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= max( 1, N ). */
+
+/* ALPHA (output) COMPLEX*16 array, dimension (N) */
+/* The complex scalars alpha that define the eigenvalues of */
+/* GNEP. ALPHA(i) = S(i,i) in the generalized Schur */
+/* factorization. */
+
+/* BETA (output) COMPLEX*16 array, dimension (N) */
+/* The real non-negative scalars beta that define the */
+/* eigenvalues of GNEP. BETA(i) = P(i,i) in the generalized */
+/* Schur factorization. */
+
+/* Together, the quantities alpha = ALPHA(j) and beta = BETA(j) */
+/* represent the j-th eigenvalue of the matrix pair (A,B), in */
+/* one of the forms lambda = alpha/beta or mu = beta/alpha. */
+/* Since either lambda or mu may overflow, they should not, */
+/* in general, be computed. */
+
+/* Q (input/output) COMPLEX*16 array, dimension (LDQ, N) */
+/* On entry, if COMPZ = 'V', the unitary matrix Q1 used in the */
+/* reduction of (A,B) to generalized Hessenberg form. */
+/* On exit, if COMPZ = 'I', the unitary matrix of left Schur */
+/* vectors of (H,T), and if COMPZ = 'V', the unitary matrix of */
+/* left Schur vectors of (A,B). */
+/* Not referenced if COMPZ = 'N'. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= 1. */
+/* If COMPQ='V' or 'I', then LDQ >= N. */
+
+/* Z (input/output) COMPLEX*16 array, dimension (LDZ, N) */
+/* On entry, if COMPZ = 'V', the unitary matrix Z1 used in the */
+/* reduction of (A,B) to generalized Hessenberg form. */
+/* On exit, if COMPZ = 'I', the unitary matrix of right Schur */
+/* vectors of (H,T), and if COMPZ = 'V', the unitary matrix of */
+/* right Schur vectors of (A,B). */
+/* Not referenced if COMPZ = 'N'. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1. */
+/* If COMPZ='V' or 'I', then LDZ >= N. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO >= 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* = 1,...,N: the QZ iteration did not converge. (H,T) is not */
+/* in Schur form, but ALPHA(i) and BETA(i), */
+/* i=INFO+1,...,N should be correct. */
+/* = N+1,...,2*N: the shift calculation failed. (H,T) is not */
+/* in Schur form, but ALPHA(i) and BETA(i), */
+/* i=INFO-N+1,...,N should be correct. */
+
+/* Further Details */
+/* =============== */
+
+/* We assume that complex ABS works as long as its value is less than */
+/* overflow. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode JOB, COMPQ, COMPZ */
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ --alpha;
+ --beta;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ if (lsame_(job, "E")) {
+ ilschr = FALSE_;
+ ischur = 1;
+ } else if (lsame_(job, "S")) {
+ ilschr = TRUE_;
+ ischur = 2;
+ } else {
+ ischur = 0;
+ }
+
+ if (lsame_(compq, "N")) {
+ ilq = FALSE_;
+ icompq = 1;
+ } else if (lsame_(compq, "V")) {
+ ilq = TRUE_;
+ icompq = 2;
+ } else if (lsame_(compq, "I")) {
+ ilq = TRUE_;
+ icompq = 3;
+ } else {
+ icompq = 0;
+ }
+
+ if (lsame_(compz, "N")) {
+ ilz = FALSE_;
+ icompz = 1;
+ } else if (lsame_(compz, "V")) {
+ ilz = TRUE_;
+ icompz = 2;
+ } else if (lsame_(compz, "I")) {
+ ilz = TRUE_;
+ icompz = 3;
+ } else {
+ icompz = 0;
+ }
+
+/* Check Argument Values */
+
+ *info = 0;
+ i__1 = max(1,*n);
+ work[1].r = (doublereal) i__1, work[1].i = 0.;
+ lquery = *lwork == -1;
+ if (ischur == 0) {
+ *info = -1;
+ } else if (icompq == 0) {
+ *info = -2;
+ } else if (icompz == 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ilo < 1) {
+ *info = -5;
+ } else if (*ihi > *n || *ihi < *ilo - 1) {
+ *info = -6;
+ } else if (*ldh < *n) {
+ *info = -8;
+ } else if (*ldt < *n) {
+ *info = -10;
+ } else if (*ldq < 1 || ilq && *ldq < *n) {
+ *info = -14;
+ } else if (*ldz < 1 || ilz && *ldz < *n) {
+ *info = -16;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -18;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHGEQZ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+/* WORK( 1 ) = CMPLX( 1 ) */
+ if (*n <= 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+/* Initialize Q and Z */
+
+ if (icompq == 3) {
+ zlaset_("Full", n, n, &c_b1, &c_b2, &q[q_offset], ldq);
+ }
+ if (icompz == 3) {
+ zlaset_("Full", n, n, &c_b1, &c_b2, &z__[z_offset], ldz);
+ }
+
+/* Machine Constants */
+
+ in = *ihi + 1 - *ilo;
+ safmin = dlamch_("S");
+ ulp = dlamch_("E") * dlamch_("B");
+ anorm = zlanhs_("F", &in, &h__[*ilo + *ilo * h_dim1], ldh, &rwork[1]);
+ bnorm = zlanhs_("F", &in, &t[*ilo + *ilo * t_dim1], ldt, &rwork[1]);
+/* Computing MAX */
+ d__1 = safmin, d__2 = ulp * anorm;
+ atol = max(d__1,d__2);
+/* Computing MAX */
+ d__1 = safmin, d__2 = ulp * bnorm;
+ btol = max(d__1,d__2);
+ ascale = 1. / max(safmin,anorm);
+ bscale = 1. / max(safmin,bnorm);
+
+
+/* Set Eigenvalues IHI+1:N */
+
+ i__1 = *n;
+ for (j = *ihi + 1; j <= i__1; ++j) {
+ absb = z_abs(&t[j + j * t_dim1]);
+ if (absb > safmin) {
+ i__2 = j + j * t_dim1;
+ z__2.r = t[i__2].r / absb, z__2.i = t[i__2].i / absb;
+ d_cnjg(&z__1, &z__2);
+ signbc.r = z__1.r, signbc.i = z__1.i;
+ i__2 = j + j * t_dim1;
+ t[i__2].r = absb, t[i__2].i = 0.;
+ if (ilschr) {
+ i__2 = j - 1;
+ zscal_(&i__2, &signbc, &t[j * t_dim1 + 1], &c__1);
+ zscal_(&j, &signbc, &h__[j * h_dim1 + 1], &c__1);
+ } else {
+ i__2 = j + j * h_dim1;
+ i__3 = j + j * h_dim1;
+ z__1.r = h__[i__3].r * signbc.r - h__[i__3].i * signbc.i,
+ z__1.i = h__[i__3].r * signbc.i + h__[i__3].i *
+ signbc.r;
+ h__[i__2].r = z__1.r, h__[i__2].i = z__1.i;
+ }
+ if (ilz) {
+ zscal_(n, &signbc, &z__[j * z_dim1 + 1], &c__1);
+ }
+ } else {
+ i__2 = j + j * t_dim1;
+ t[i__2].r = 0., t[i__2].i = 0.;
+ }
+ i__2 = j;
+ i__3 = j + j * h_dim1;
+ alpha[i__2].r = h__[i__3].r, alpha[i__2].i = h__[i__3].i;
+ i__2 = j;
+ i__3 = j + j * t_dim1;
+ beta[i__2].r = t[i__3].r, beta[i__2].i = t[i__3].i;
+/* L10: */
+ }
+
+/* If IHI < ILO, skip QZ steps */
+
+ if (*ihi < *ilo) {
+ goto L190;
+ }
+
+/* MAIN QZ ITERATION LOOP */
+
+/* Initialize dynamic indices */
+
+/* Eigenvalues ILAST+1:N have been found. */
+/* Column operations modify rows IFRSTM:whatever */
+/* Row operations modify columns whatever:ILASTM */
+
+/* If only eigenvalues are being computed, then */
+/* IFRSTM is the row of the last splitting row above row ILAST; */
+/* this is always at least ILO. */
+/* IITER counts iterations since the last eigenvalue was found, */
+/* to tell when to use an extraordinary shift. */
+/* MAXIT is the maximum number of QZ sweeps allowed. */
+
+ ilast = *ihi;
+ if (ilschr) {
+ ifrstm = 1;
+ ilastm = *n;
+ } else {
+ ifrstm = *ilo;
+ ilastm = *ihi;
+ }
+ iiter = 0;
+ eshift.r = 0., eshift.i = 0.;
+ maxit = (*ihi - *ilo + 1) * 30;
+
+ i__1 = maxit;
+ for (jiter = 1; jiter <= i__1; ++jiter) {
+
+/* Check for too many iterations. */
+
+ if (jiter > maxit) {
+ goto L180;
+ }
+
+/* Split the matrix if possible. */
+
+/* Two tests: */
+/* 1: H(j,j-1)=0 or j=ILO */
+/* 2: T(j,j)=0 */
+
+/* Special case: j=ILAST */
+
+ if (ilast == *ilo) {
+ goto L60;
+ } else {
+ i__2 = ilast + (ilast - 1) * h_dim1;
+ if ((d__1 = h__[i__2].r, abs(d__1)) + (d__2 = d_imag(&h__[ilast +
+ (ilast - 1) * h_dim1]), abs(d__2)) <= atol) {
+ i__2 = ilast + (ilast - 1) * h_dim1;
+ h__[i__2].r = 0., h__[i__2].i = 0.;
+ goto L60;
+ }
+ }
+
+ if (z_abs(&t[ilast + ilast * t_dim1]) <= btol) {
+ i__2 = ilast + ilast * t_dim1;
+ t[i__2].r = 0., t[i__2].i = 0.;
+ goto L50;
+ }
+
+/* General case: j<ILAST */
+
+ i__2 = *ilo;
+ for (j = ilast - 1; j >= i__2; --j) {
+
+/* Test 1: for H(j,j-1)=0 or j=ILO */
+
+ if (j == *ilo) {
+ ilazro = TRUE_;
+ } else {
+ i__3 = j + (j - 1) * h_dim1;
+ if ((d__1 = h__[i__3].r, abs(d__1)) + (d__2 = d_imag(&h__[j +
+ (j - 1) * h_dim1]), abs(d__2)) <= atol) {
+ i__3 = j + (j - 1) * h_dim1;
+ h__[i__3].r = 0., h__[i__3].i = 0.;
+ ilazro = TRUE_;
+ } else {
+ ilazro = FALSE_;
+ }
+ }
+
+/* Test 2: for T(j,j)=0 */
+
+ if (z_abs(&t[j + j * t_dim1]) < btol) {
+ i__3 = j + j * t_dim1;
+ t[i__3].r = 0., t[i__3].i = 0.;
+
+/* Test 1a: Check for 2 consecutive small subdiagonals in A */
+
+ ilazr2 = FALSE_;
+ if (! ilazro) {
+ i__3 = j + (j - 1) * h_dim1;
+ i__4 = j + 1 + j * h_dim1;
+ i__5 = j + j * h_dim1;
+ if (((d__1 = h__[i__3].r, abs(d__1)) + (d__2 = d_imag(&
+ h__[j + (j - 1) * h_dim1]), abs(d__2))) * (ascale
+ * ((d__3 = h__[i__4].r, abs(d__3)) + (d__4 =
+ d_imag(&h__[j + 1 + j * h_dim1]), abs(d__4)))) <=
+ ((d__5 = h__[i__5].r, abs(d__5)) + (d__6 = d_imag(
+ &h__[j + j * h_dim1]), abs(d__6))) * (ascale *
+ atol)) {
+ ilazr2 = TRUE_;
+ }
+ }
+
+/* If both tests pass (1 & 2), i.e., the leading diagonal */
+/* element of B in the block is zero, split a 1x1 block off */
+/* at the top. (I.e., at the J-th row/column) The leading */
+/* diagonal element of the remainder can also be zero, so */
+/* this may have to be done repeatedly. */
+
+ if (ilazro || ilazr2) {
+ i__3 = ilast - 1;
+ for (jch = j; jch <= i__3; ++jch) {
+ i__4 = jch + jch * h_dim1;
+ ctemp.r = h__[i__4].r, ctemp.i = h__[i__4].i;
+ zlartg_(&ctemp, &h__[jch + 1 + jch * h_dim1], &c__, &
+ s, &h__[jch + jch * h_dim1]);
+ i__4 = jch + 1 + jch * h_dim1;
+ h__[i__4].r = 0., h__[i__4].i = 0.;
+ i__4 = ilastm - jch;
+ zrot_(&i__4, &h__[jch + (jch + 1) * h_dim1], ldh, &
+ h__[jch + 1 + (jch + 1) * h_dim1], ldh, &c__,
+ &s);
+ i__4 = ilastm - jch;
+ zrot_(&i__4, &t[jch + (jch + 1) * t_dim1], ldt, &t[
+ jch + 1 + (jch + 1) * t_dim1], ldt, &c__, &s);
+ if (ilq) {
+ d_cnjg(&z__1, &s);
+ zrot_(n, &q[jch * q_dim1 + 1], &c__1, &q[(jch + 1)
+ * q_dim1 + 1], &c__1, &c__, &z__1);
+ }
+ if (ilazr2) {
+ i__4 = jch + (jch - 1) * h_dim1;
+ i__5 = jch + (jch - 1) * h_dim1;
+ z__1.r = c__ * h__[i__5].r, z__1.i = c__ * h__[
+ i__5].i;
+ h__[i__4].r = z__1.r, h__[i__4].i = z__1.i;
+ }
+ ilazr2 = FALSE_;
+ i__4 = jch + 1 + (jch + 1) * t_dim1;
+ if ((d__1 = t[i__4].r, abs(d__1)) + (d__2 = d_imag(&t[
+ jch + 1 + (jch + 1) * t_dim1]), abs(d__2)) >=
+ btol) {
+ if (jch + 1 >= ilast) {
+ goto L60;
+ } else {
+ ifirst = jch + 1;
+ goto L70;
+ }
+ }
+ i__4 = jch + 1 + (jch + 1) * t_dim1;
+ t[i__4].r = 0., t[i__4].i = 0.;
+/* L20: */
+ }
+ goto L50;
+ } else {
+
+/* Only test 2 passed -- chase the zero to T(ILAST,ILAST) */
+/* Then process as in the case T(ILAST,ILAST)=0 */
+
+ i__3 = ilast - 1;
+ for (jch = j; jch <= i__3; ++jch) {
+ i__4 = jch + (jch + 1) * t_dim1;
+ ctemp.r = t[i__4].r, ctemp.i = t[i__4].i;
+ zlartg_(&ctemp, &t[jch + 1 + (jch + 1) * t_dim1], &
+ c__, &s, &t[jch + (jch + 1) * t_dim1]);
+ i__4 = jch + 1 + (jch + 1) * t_dim1;
+ t[i__4].r = 0., t[i__4].i = 0.;
+ if (jch < ilastm - 1) {
+ i__4 = ilastm - jch - 1;
+ zrot_(&i__4, &t[jch + (jch + 2) * t_dim1], ldt, &
+ t[jch + 1 + (jch + 2) * t_dim1], ldt, &
+ c__, &s);
+ }
+ i__4 = ilastm - jch + 2;
+ zrot_(&i__4, &h__[jch + (jch - 1) * h_dim1], ldh, &
+ h__[jch + 1 + (jch - 1) * h_dim1], ldh, &c__,
+ &s);
+ if (ilq) {
+ d_cnjg(&z__1, &s);
+ zrot_(n, &q[jch * q_dim1 + 1], &c__1, &q[(jch + 1)
+ * q_dim1 + 1], &c__1, &c__, &z__1);
+ }
+ i__4 = jch + 1 + jch * h_dim1;
+ ctemp.r = h__[i__4].r, ctemp.i = h__[i__4].i;
+ zlartg_(&ctemp, &h__[jch + 1 + (jch - 1) * h_dim1], &
+ c__, &s, &h__[jch + 1 + jch * h_dim1]);
+ i__4 = jch + 1 + (jch - 1) * h_dim1;
+ h__[i__4].r = 0., h__[i__4].i = 0.;
+ i__4 = jch + 1 - ifrstm;
+ zrot_(&i__4, &h__[ifrstm + jch * h_dim1], &c__1, &h__[
+ ifrstm + (jch - 1) * h_dim1], &c__1, &c__, &s)
+ ;
+ i__4 = jch - ifrstm;
+ zrot_(&i__4, &t[ifrstm + jch * t_dim1], &c__1, &t[
+ ifrstm + (jch - 1) * t_dim1], &c__1, &c__, &s)
+ ;
+ if (ilz) {
+ zrot_(n, &z__[jch * z_dim1 + 1], &c__1, &z__[(jch
+ - 1) * z_dim1 + 1], &c__1, &c__, &s);
+ }
+/* L30: */
+ }
+ goto L50;
+ }
+ } else if (ilazro) {
+
+/* Only test 1 passed -- work on J:ILAST */
+
+ ifirst = j;
+ goto L70;
+ }
+
+/* Neither test passed -- try next J */
+
+/* L40: */
+ }
+
+/* (Drop-through is "impossible") */
+
+ *info = (*n << 1) + 1;
+ goto L210;
+
+/* T(ILAST,ILAST)=0 -- clear H(ILAST,ILAST-1) to split off a */
+/* 1x1 block. */
+
+L50:
+ i__2 = ilast + ilast * h_dim1;
+ ctemp.r = h__[i__2].r, ctemp.i = h__[i__2].i;
+ zlartg_(&ctemp, &h__[ilast + (ilast - 1) * h_dim1], &c__, &s, &h__[
+ ilast + ilast * h_dim1]);
+ i__2 = ilast + (ilast - 1) * h_dim1;
+ h__[i__2].r = 0., h__[i__2].i = 0.;
+ i__2 = ilast - ifrstm;
+ zrot_(&i__2, &h__[ifrstm + ilast * h_dim1], &c__1, &h__[ifrstm + (
+ ilast - 1) * h_dim1], &c__1, &c__, &s);
+ i__2 = ilast - ifrstm;
+ zrot_(&i__2, &t[ifrstm + ilast * t_dim1], &c__1, &t[ifrstm + (ilast -
+ 1) * t_dim1], &c__1, &c__, &s);
+ if (ilz) {
+ zrot_(n, &z__[ilast * z_dim1 + 1], &c__1, &z__[(ilast - 1) *
+ z_dim1 + 1], &c__1, &c__, &s);
+ }
+
+/* H(ILAST,ILAST-1)=0 -- Standardize B, set ALPHA and BETA */
+
+L60:
+ absb = z_abs(&t[ilast + ilast * t_dim1]);
+ if (absb > safmin) {
+ i__2 = ilast + ilast * t_dim1;
+ z__2.r = t[i__2].r / absb, z__2.i = t[i__2].i / absb;
+ d_cnjg(&z__1, &z__2);
+ signbc.r = z__1.r, signbc.i = z__1.i;
+ i__2 = ilast + ilast * t_dim1;
+ t[i__2].r = absb, t[i__2].i = 0.;
+ if (ilschr) {
+ i__2 = ilast - ifrstm;
+ zscal_(&i__2, &signbc, &t[ifrstm + ilast * t_dim1], &c__1);
+ i__2 = ilast + 1 - ifrstm;
+ zscal_(&i__2, &signbc, &h__[ifrstm + ilast * h_dim1], &c__1);
+ } else {
+ i__2 = ilast + ilast * h_dim1;
+ i__3 = ilast + ilast * h_dim1;
+ z__1.r = h__[i__3].r * signbc.r - h__[i__3].i * signbc.i,
+ z__1.i = h__[i__3].r * signbc.i + h__[i__3].i *
+ signbc.r;
+ h__[i__2].r = z__1.r, h__[i__2].i = z__1.i;
+ }
+ if (ilz) {
+ zscal_(n, &signbc, &z__[ilast * z_dim1 + 1], &c__1);
+ }
+ } else {
+ i__2 = ilast + ilast * t_dim1;
+ t[i__2].r = 0., t[i__2].i = 0.;
+ }
+ i__2 = ilast;
+ i__3 = ilast + ilast * h_dim1;
+ alpha[i__2].r = h__[i__3].r, alpha[i__2].i = h__[i__3].i;
+ i__2 = ilast;
+ i__3 = ilast + ilast * t_dim1;
+ beta[i__2].r = t[i__3].r, beta[i__2].i = t[i__3].i;
+
+/* Go to next block -- exit if finished. */
+
+ --ilast;
+ if (ilast < *ilo) {
+ goto L190;
+ }
+
+/* Reset counters */
+
+ iiter = 0;
+ eshift.r = 0., eshift.i = 0.;
+ if (! ilschr) {
+ ilastm = ilast;
+ if (ifrstm > ilast) {
+ ifrstm = *ilo;
+ }
+ }
+ goto L160;
+
+/* QZ step */
+
+/* This iteration only involves rows/columns IFIRST:ILAST. We */
+/* assume IFIRST < ILAST, and that the diagonal of B is non-zero. */
+
+L70:
+ ++iiter;
+ if (! ilschr) {
+ ifrstm = ifirst;
+ }
+
+/* Compute the Shift. */
+
+/* At this point, IFIRST < ILAST, and the diagonal elements of */
+/* T(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in */
+/* magnitude) */
+
+ if (iiter / 10 * 10 != iiter) {
+
+/* The Wilkinson shift (AEP p.512), i.e., the eigenvalue of */
+/* the bottom-right 2x2 block of A inv(B) which is nearest to */
+/* the bottom-right element. */
+
+/* We factor B as U*D, where U has unit diagonals, and */
+/* compute (A*inv(D))*inv(U). */
+
+ i__2 = ilast - 1 + ilast * t_dim1;
+ z__2.r = bscale * t[i__2].r, z__2.i = bscale * t[i__2].i;
+ i__3 = ilast + ilast * t_dim1;
+ z__3.r = bscale * t[i__3].r, z__3.i = bscale * t[i__3].i;
+ z_div(&z__1, &z__2, &z__3);
+ u12.r = z__1.r, u12.i = z__1.i;
+ i__2 = ilast - 1 + (ilast - 1) * h_dim1;
+ z__2.r = ascale * h__[i__2].r, z__2.i = ascale * h__[i__2].i;
+ i__3 = ilast - 1 + (ilast - 1) * t_dim1;
+ z__3.r = bscale * t[i__3].r, z__3.i = bscale * t[i__3].i;
+ z_div(&z__1, &z__2, &z__3);
+ ad11.r = z__1.r, ad11.i = z__1.i;
+ i__2 = ilast + (ilast - 1) * h_dim1;
+ z__2.r = ascale * h__[i__2].r, z__2.i = ascale * h__[i__2].i;
+ i__3 = ilast - 1 + (ilast - 1) * t_dim1;
+ z__3.r = bscale * t[i__3].r, z__3.i = bscale * t[i__3].i;
+ z_div(&z__1, &z__2, &z__3);
+ ad21.r = z__1.r, ad21.i = z__1.i;
+ i__2 = ilast - 1 + ilast * h_dim1;
+ z__2.r = ascale * h__[i__2].r, z__2.i = ascale * h__[i__2].i;
+ i__3 = ilast + ilast * t_dim1;
+ z__3.r = bscale * t[i__3].r, z__3.i = bscale * t[i__3].i;
+ z_div(&z__1, &z__2, &z__3);
+ ad12.r = z__1.r, ad12.i = z__1.i;
+ i__2 = ilast + ilast * h_dim1;
+ z__2.r = ascale * h__[i__2].r, z__2.i = ascale * h__[i__2].i;
+ i__3 = ilast + ilast * t_dim1;
+ z__3.r = bscale * t[i__3].r, z__3.i = bscale * t[i__3].i;
+ z_div(&z__1, &z__2, &z__3);
+ ad22.r = z__1.r, ad22.i = z__1.i;
+ z__2.r = u12.r * ad21.r - u12.i * ad21.i, z__2.i = u12.r * ad21.i
+ + u12.i * ad21.r;
+ z__1.r = ad22.r - z__2.r, z__1.i = ad22.i - z__2.i;
+ abi22.r = z__1.r, abi22.i = z__1.i;
+
+ z__2.r = ad11.r + abi22.r, z__2.i = ad11.i + abi22.i;
+ z__1.r = z__2.r * .5, z__1.i = z__2.i * .5;
+ t1.r = z__1.r, t1.i = z__1.i;
+ pow_zi(&z__4, &t1, &c__2);
+ z__5.r = ad12.r * ad21.r - ad12.i * ad21.i, z__5.i = ad12.r *
+ ad21.i + ad12.i * ad21.r;
+ z__3.r = z__4.r + z__5.r, z__3.i = z__4.i + z__5.i;
+ z__6.r = ad11.r * ad22.r - ad11.i * ad22.i, z__6.i = ad11.r *
+ ad22.i + ad11.i * ad22.r;
+ z__2.r = z__3.r - z__6.r, z__2.i = z__3.i - z__6.i;
+ z_sqrt(&z__1, &z__2);
+ rtdisc.r = z__1.r, rtdisc.i = z__1.i;
+ z__1.r = t1.r - abi22.r, z__1.i = t1.i - abi22.i;
+ z__2.r = t1.r - abi22.r, z__2.i = t1.i - abi22.i;
+ temp = z__1.r * rtdisc.r + d_imag(&z__2) * d_imag(&rtdisc);
+ if (temp <= 0.) {
+ z__1.r = t1.r + rtdisc.r, z__1.i = t1.i + rtdisc.i;
+ shift.r = z__1.r, shift.i = z__1.i;
+ } else {
+ z__1.r = t1.r - rtdisc.r, z__1.i = t1.i - rtdisc.i;
+ shift.r = z__1.r, shift.i = z__1.i;
+ }
+ } else {
+
+/* Exceptional shift. Chosen for no particularly good reason. */
+
+ i__2 = ilast - 1 + ilast * h_dim1;
+ z__4.r = ascale * h__[i__2].r, z__4.i = ascale * h__[i__2].i;
+ i__3 = ilast - 1 + (ilast - 1) * t_dim1;
+ z__5.r = bscale * t[i__3].r, z__5.i = bscale * t[i__3].i;
+ z_div(&z__3, &z__4, &z__5);
+ d_cnjg(&z__2, &z__3);
+ z__1.r = eshift.r + z__2.r, z__1.i = eshift.i + z__2.i;
+ eshift.r = z__1.r, eshift.i = z__1.i;
+ shift.r = eshift.r, shift.i = eshift.i;
+ }
+
+/* Now check for two consecutive small subdiagonals. */
+
+ i__2 = ifirst + 1;
+ for (j = ilast - 1; j >= i__2; --j) {
+ istart = j;
+ i__3 = j + j * h_dim1;
+ z__2.r = ascale * h__[i__3].r, z__2.i = ascale * h__[i__3].i;
+ i__4 = j + j * t_dim1;
+ z__4.r = bscale * t[i__4].r, z__4.i = bscale * t[i__4].i;
+ z__3.r = shift.r * z__4.r - shift.i * z__4.i, z__3.i = shift.r *
+ z__4.i + shift.i * z__4.r;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
+ ctemp.r = z__1.r, ctemp.i = z__1.i;
+ temp = (d__1 = ctemp.r, abs(d__1)) + (d__2 = d_imag(&ctemp), abs(
+ d__2));
+ i__3 = j + 1 + j * h_dim1;
+ temp2 = ascale * ((d__1 = h__[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&h__[j + 1 + j * h_dim1]), abs(d__2)));
+ tempr = max(temp,temp2);
+ if (tempr < 1. && tempr != 0.) {
+ temp /= tempr;
+ temp2 /= tempr;
+ }
+ i__3 = j + (j - 1) * h_dim1;
+ if (((d__1 = h__[i__3].r, abs(d__1)) + (d__2 = d_imag(&h__[j + (j
+ - 1) * h_dim1]), abs(d__2))) * temp2 <= temp * atol) {
+ goto L90;
+ }
+/* L80: */
+ }
+
+ istart = ifirst;
+ i__2 = ifirst + ifirst * h_dim1;
+ z__2.r = ascale * h__[i__2].r, z__2.i = ascale * h__[i__2].i;
+ i__3 = ifirst + ifirst * t_dim1;
+ z__4.r = bscale * t[i__3].r, z__4.i = bscale * t[i__3].i;
+ z__3.r = shift.r * z__4.r - shift.i * z__4.i, z__3.i = shift.r *
+ z__4.i + shift.i * z__4.r;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
+ ctemp.r = z__1.r, ctemp.i = z__1.i;
+L90:
+
+/* Do an implicit-shift QZ sweep. */
+
+/* Initial Q */
+
+ i__2 = istart + 1 + istart * h_dim1;
+ z__1.r = ascale * h__[i__2].r, z__1.i = ascale * h__[i__2].i;
+ ctemp2.r = z__1.r, ctemp2.i = z__1.i;
+ zlartg_(&ctemp, &ctemp2, &c__, &s, &ctemp3);
+
+/* Sweep */
+
+ i__2 = ilast - 1;
+ for (j = istart; j <= i__2; ++j) {
+ if (j > istart) {
+ i__3 = j + (j - 1) * h_dim1;
+ ctemp.r = h__[i__3].r, ctemp.i = h__[i__3].i;
+ zlartg_(&ctemp, &h__[j + 1 + (j - 1) * h_dim1], &c__, &s, &
+ h__[j + (j - 1) * h_dim1]);
+ i__3 = j + 1 + (j - 1) * h_dim1;
+ h__[i__3].r = 0., h__[i__3].i = 0.;
+ }
+
+ i__3 = ilastm;
+ for (jc = j; jc <= i__3; ++jc) {
+ i__4 = j + jc * h_dim1;
+ z__2.r = c__ * h__[i__4].r, z__2.i = c__ * h__[i__4].i;
+ i__5 = j + 1 + jc * h_dim1;
+ z__3.r = s.r * h__[i__5].r - s.i * h__[i__5].i, z__3.i = s.r *
+ h__[i__5].i + s.i * h__[i__5].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ ctemp.r = z__1.r, ctemp.i = z__1.i;
+ i__4 = j + 1 + jc * h_dim1;
+ d_cnjg(&z__4, &s);
+ z__3.r = -z__4.r, z__3.i = -z__4.i;
+ i__5 = j + jc * h_dim1;
+ z__2.r = z__3.r * h__[i__5].r - z__3.i * h__[i__5].i, z__2.i =
+ z__3.r * h__[i__5].i + z__3.i * h__[i__5].r;
+ i__6 = j + 1 + jc * h_dim1;
+ z__5.r = c__ * h__[i__6].r, z__5.i = c__ * h__[i__6].i;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
+ h__[i__4].r = z__1.r, h__[i__4].i = z__1.i;
+ i__4 = j + jc * h_dim1;
+ h__[i__4].r = ctemp.r, h__[i__4].i = ctemp.i;
+ i__4 = j + jc * t_dim1;
+ z__2.r = c__ * t[i__4].r, z__2.i = c__ * t[i__4].i;
+ i__5 = j + 1 + jc * t_dim1;
+ z__3.r = s.r * t[i__5].r - s.i * t[i__5].i, z__3.i = s.r * t[
+ i__5].i + s.i * t[i__5].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ ctemp2.r = z__1.r, ctemp2.i = z__1.i;
+ i__4 = j + 1 + jc * t_dim1;
+ d_cnjg(&z__4, &s);
+ z__3.r = -z__4.r, z__3.i = -z__4.i;
+ i__5 = j + jc * t_dim1;
+ z__2.r = z__3.r * t[i__5].r - z__3.i * t[i__5].i, z__2.i =
+ z__3.r * t[i__5].i + z__3.i * t[i__5].r;
+ i__6 = j + 1 + jc * t_dim1;
+ z__5.r = c__ * t[i__6].r, z__5.i = c__ * t[i__6].i;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
+ t[i__4].r = z__1.r, t[i__4].i = z__1.i;
+ i__4 = j + jc * t_dim1;
+ t[i__4].r = ctemp2.r, t[i__4].i = ctemp2.i;
+/* L100: */
+ }
+ if (ilq) {
+ i__3 = *n;
+ for (jr = 1; jr <= i__3; ++jr) {
+ i__4 = jr + j * q_dim1;
+ z__2.r = c__ * q[i__4].r, z__2.i = c__ * q[i__4].i;
+ d_cnjg(&z__4, &s);
+ i__5 = jr + (j + 1) * q_dim1;
+ z__3.r = z__4.r * q[i__5].r - z__4.i * q[i__5].i, z__3.i =
+ z__4.r * q[i__5].i + z__4.i * q[i__5].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ ctemp.r = z__1.r, ctemp.i = z__1.i;
+ i__4 = jr + (j + 1) * q_dim1;
+ z__3.r = -s.r, z__3.i = -s.i;
+ i__5 = jr + j * q_dim1;
+ z__2.r = z__3.r * q[i__5].r - z__3.i * q[i__5].i, z__2.i =
+ z__3.r * q[i__5].i + z__3.i * q[i__5].r;
+ i__6 = jr + (j + 1) * q_dim1;
+ z__4.r = c__ * q[i__6].r, z__4.i = c__ * q[i__6].i;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ q[i__4].r = z__1.r, q[i__4].i = z__1.i;
+ i__4 = jr + j * q_dim1;
+ q[i__4].r = ctemp.r, q[i__4].i = ctemp.i;
+/* L110: */
+ }
+ }
+
+ i__3 = j + 1 + (j + 1) * t_dim1;
+ ctemp.r = t[i__3].r, ctemp.i = t[i__3].i;
+ zlartg_(&ctemp, &t[j + 1 + j * t_dim1], &c__, &s, &t[j + 1 + (j +
+ 1) * t_dim1]);
+ i__3 = j + 1 + j * t_dim1;
+ t[i__3].r = 0., t[i__3].i = 0.;
+
+/* Computing MIN */
+ i__4 = j + 2;
+ i__3 = min(i__4,ilast);
+ for (jr = ifrstm; jr <= i__3; ++jr) {
+ i__4 = jr + (j + 1) * h_dim1;
+ z__2.r = c__ * h__[i__4].r, z__2.i = c__ * h__[i__4].i;
+ i__5 = jr + j * h_dim1;
+ z__3.r = s.r * h__[i__5].r - s.i * h__[i__5].i, z__3.i = s.r *
+ h__[i__5].i + s.i * h__[i__5].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ ctemp.r = z__1.r, ctemp.i = z__1.i;
+ i__4 = jr + j * h_dim1;
+ d_cnjg(&z__4, &s);
+ z__3.r = -z__4.r, z__3.i = -z__4.i;
+ i__5 = jr + (j + 1) * h_dim1;
+ z__2.r = z__3.r * h__[i__5].r - z__3.i * h__[i__5].i, z__2.i =
+ z__3.r * h__[i__5].i + z__3.i * h__[i__5].r;
+ i__6 = jr + j * h_dim1;
+ z__5.r = c__ * h__[i__6].r, z__5.i = c__ * h__[i__6].i;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
+ h__[i__4].r = z__1.r, h__[i__4].i = z__1.i;
+ i__4 = jr + (j + 1) * h_dim1;
+ h__[i__4].r = ctemp.r, h__[i__4].i = ctemp.i;
+/* L120: */
+ }
+ i__3 = j;
+ for (jr = ifrstm; jr <= i__3; ++jr) {
+ i__4 = jr + (j + 1) * t_dim1;
+ z__2.r = c__ * t[i__4].r, z__2.i = c__ * t[i__4].i;
+ i__5 = jr + j * t_dim1;
+ z__3.r = s.r * t[i__5].r - s.i * t[i__5].i, z__3.i = s.r * t[
+ i__5].i + s.i * t[i__5].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ ctemp.r = z__1.r, ctemp.i = z__1.i;
+ i__4 = jr + j * t_dim1;
+ d_cnjg(&z__4, &s);
+ z__3.r = -z__4.r, z__3.i = -z__4.i;
+ i__5 = jr + (j + 1) * t_dim1;
+ z__2.r = z__3.r * t[i__5].r - z__3.i * t[i__5].i, z__2.i =
+ z__3.r * t[i__5].i + z__3.i * t[i__5].r;
+ i__6 = jr + j * t_dim1;
+ z__5.r = c__ * t[i__6].r, z__5.i = c__ * t[i__6].i;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
+ t[i__4].r = z__1.r, t[i__4].i = z__1.i;
+ i__4 = jr + (j + 1) * t_dim1;
+ t[i__4].r = ctemp.r, t[i__4].i = ctemp.i;
+/* L130: */
+ }
+ if (ilz) {
+ i__3 = *n;
+ for (jr = 1; jr <= i__3; ++jr) {
+ i__4 = jr + (j + 1) * z_dim1;
+ z__2.r = c__ * z__[i__4].r, z__2.i = c__ * z__[i__4].i;
+ i__5 = jr + j * z_dim1;
+ z__3.r = s.r * z__[i__5].r - s.i * z__[i__5].i, z__3.i =
+ s.r * z__[i__5].i + s.i * z__[i__5].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ ctemp.r = z__1.r, ctemp.i = z__1.i;
+ i__4 = jr + j * z_dim1;
+ d_cnjg(&z__4, &s);
+ z__3.r = -z__4.r, z__3.i = -z__4.i;
+ i__5 = jr + (j + 1) * z_dim1;
+ z__2.r = z__3.r * z__[i__5].r - z__3.i * z__[i__5].i,
+ z__2.i = z__3.r * z__[i__5].i + z__3.i * z__[i__5]
+ .r;
+ i__6 = jr + j * z_dim1;
+ z__5.r = c__ * z__[i__6].r, z__5.i = c__ * z__[i__6].i;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
+ z__[i__4].r = z__1.r, z__[i__4].i = z__1.i;
+ i__4 = jr + (j + 1) * z_dim1;
+ z__[i__4].r = ctemp.r, z__[i__4].i = ctemp.i;
+/* L140: */
+ }
+ }
+/* L150: */
+ }
+
+L160:
+
+/* L170: */
+ ;
+ }
+
+/* Drop-through = non-convergence */
+
+L180:
+ *info = ilast;
+ goto L210;
+
+/* Successful completion of all QZ steps */
+
+L190:
+
+/* Set Eigenvalues 1:ILO-1 */
+
+ i__1 = *ilo - 1;
+ for (j = 1; j <= i__1; ++j) {
+ absb = z_abs(&t[j + j * t_dim1]);
+ if (absb > safmin) {
+ i__2 = j + j * t_dim1;
+ z__2.r = t[i__2].r / absb, z__2.i = t[i__2].i / absb;
+ d_cnjg(&z__1, &z__2);
+ signbc.r = z__1.r, signbc.i = z__1.i;
+ i__2 = j + j * t_dim1;
+ t[i__2].r = absb, t[i__2].i = 0.;
+ if (ilschr) {
+ i__2 = j - 1;
+ zscal_(&i__2, &signbc, &t[j * t_dim1 + 1], &c__1);
+ zscal_(&j, &signbc, &h__[j * h_dim1 + 1], &c__1);
+ } else {
+ i__2 = j + j * h_dim1;
+ i__3 = j + j * h_dim1;
+ z__1.r = h__[i__3].r * signbc.r - h__[i__3].i * signbc.i,
+ z__1.i = h__[i__3].r * signbc.i + h__[i__3].i *
+ signbc.r;
+ h__[i__2].r = z__1.r, h__[i__2].i = z__1.i;
+ }
+ if (ilz) {
+ zscal_(n, &signbc, &z__[j * z_dim1 + 1], &c__1);
+ }
+ } else {
+ i__2 = j + j * t_dim1;
+ t[i__2].r = 0., t[i__2].i = 0.;
+ }
+ i__2 = j;
+ i__3 = j + j * h_dim1;
+ alpha[i__2].r = h__[i__3].r, alpha[i__2].i = h__[i__3].i;
+ i__2 = j;
+ i__3 = j + j * t_dim1;
+ beta[i__2].r = t[i__3].r, beta[i__2].i = t[i__3].i;
+/* L200: */
+ }
+
+/* Normal Termination */
+
+ *info = 0;
+
+/* Exit (other than argument error) -- return optimal workspace size */
+
+L210:
+ z__1.r = (doublereal) (*n), z__1.i = 0.;
+ work[1].r = z__1.r, work[1].i = z__1.i;
+ return 0;
+
+/* End of ZHGEQZ */
+
+} /* zhgeqz_ */
diff --git a/contrib/libs/clapack/zhpcon.c b/contrib/libs/clapack/zhpcon.c
new file mode 100644
index 0000000000..e95fe203d1
--- /dev/null
+++ b/contrib/libs/clapack/zhpcon.c
@@ -0,0 +1,197 @@
+/* zhpcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zhpcon_(char *uplo, integer *n, doublecomplex *ap,
+ integer *ipiv, doublereal *anorm, doublereal *rcond, doublecomplex *
+ work, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__, ip, kase;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical upper;
+ extern /* Subroutine */ int zlacn2_(integer *, doublecomplex *,
+ doublecomplex *, doublereal *, integer *, integer *), xerbla_(
+ char *, integer *);
+ doublereal ainvnm;
+ extern /* Subroutine */ int zhptrs_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHPCON estimates the reciprocal of the condition number of a complex */
+/* Hermitian packed matrix A using the factorization A = U*D*U**H or */
+/* A = L*D*L**H computed by ZHPTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**H; */
+/* = 'L': Lower triangular, form is A = L*D*L**H. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* The block diagonal matrix D and the multipliers used to */
+/* obtain the factor U or L as computed by ZHPTRF, stored as a */
+/* packed triangular matrix. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by ZHPTRF. */
+
+/* ANORM (input) DOUBLE PRECISION */
+/* The 1-norm of the original matrix A. */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --work;
+ --ipiv;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*anorm < 0.) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHPCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.;
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ } else if (*anorm <= 0.) {
+ return 0;
+ }
+
+/* Check that the diagonal matrix D is nonsingular. */
+
+ if (upper) {
+
+/* Upper triangular storage: examine D from bottom to top */
+
+ ip = *n * (*n + 1) / 2;
+ for (i__ = *n; i__ >= 1; --i__) {
+ i__1 = ip;
+ if (ipiv[i__] > 0 && (ap[i__1].r == 0. && ap[i__1].i == 0.)) {
+ return 0;
+ }
+ ip -= i__;
+/* L10: */
+ }
+ } else {
+
+/* Lower triangular storage: examine D from top to bottom. */
+
+ ip = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ip;
+ if (ipiv[i__] > 0 && (ap[i__2].r == 0. && ap[i__2].i == 0.)) {
+ return 0;
+ }
+ ip = ip + *n - i__ + 1;
+/* L20: */
+ }
+ }
+
+/* Estimate the 1-norm of the inverse. */
+
+ kase = 0;
+L30:
+ zlacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+
+/* Multiply by inv(L*D*L') or inv(U*D*U'). */
+
+ zhptrs_(uplo, n, &c__1, &ap[1], &ipiv[1], &work[1], n, info);
+ goto L30;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / ainvnm / *anorm;
+ }
+
+ return 0;
+
+/* End of ZHPCON */
+
+} /* zhpcon_ */
diff --git a/contrib/libs/clapack/zhpev.c b/contrib/libs/clapack/zhpev.c
new file mode 100644
index 0000000000..49193c87c1
--- /dev/null
+++ b/contrib/libs/clapack/zhpev.c
@@ -0,0 +1,254 @@
+/* zhpev.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zhpev_(char *jobz, char *uplo, integer *n, doublecomplex
+ *ap, doublereal *w, doublecomplex *z__, integer *ldz, doublecomplex *
+ work, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal eps;
+ integer inde;
+ doublereal anrm;
+ integer imax;
+ doublereal rmin, rmax;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical wantz;
+ extern doublereal dlamch_(char *);
+ integer iscale;
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_(
+ integer *, doublereal *, doublecomplex *, integer *);
+ doublereal bignum;
+ integer indtau;
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern doublereal zlanhp_(char *, char *, integer *, doublecomplex *,
+ doublereal *);
+ integer indrwk, indwrk;
+ doublereal smlnum;
+ extern /* Subroutine */ int zhptrd_(char *, integer *, doublecomplex *,
+ doublereal *, doublereal *, doublecomplex *, integer *),
+ zsteqr_(char *, integer *, doublereal *, doublereal *,
+ doublecomplex *, integer *, doublereal *, integer *),
+ zupgtr_(char *, integer *, doublecomplex *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHPEV computes all the eigenvalues and, optionally, eigenvectors of a */
+/* complex Hermitian matrix in packed storage. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, AP is overwritten by values generated during the */
+/* reduction to tridiagonal form. If UPLO = 'U', the diagonal */
+/* and first superdiagonal of the tridiagonal matrix T overwrite */
+/* the corresponding elements of A, and if UPLO = 'L', the */
+/* diagonal and first subdiagonal of T overwrite the */
+/* corresponding elements of A. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX*16 array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the orthonormal */
+/* eigenvectors of the matrix A, with the i-th column of Z */
+/* holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (max(1, 2*N-1)) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (max(1, 3*N-2)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, the algorithm failed to converge; i */
+/* off-diagonal elements of an intermediate tridiagonal */
+/* form did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lsame_(uplo, "L") || lsame_(uplo,
+ "U"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -7;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHPEV ", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ w[1] = ap[1].r;
+ rwork[1] = 1.;
+ if (wantz) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1., z__[i__1].i = 0.;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = zlanhp_("M", uplo, n, &ap[1], &rwork[1]);
+ iscale = 0;
+ if (anrm > 0. && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ i__1 = *n * (*n + 1) / 2;
+ zdscal_(&i__1, &sigma, &ap[1], &c__1);
+ }
+
+/* Call ZHPTRD to reduce Hermitian packed matrix to tridiagonal form. */
+
+ inde = 1;
+ indtau = 1;
+ zhptrd_(uplo, n, &ap[1], &w[1], &rwork[inde], &work[indtau], &iinfo);
+
+/* For eigenvalues only, call DSTERF. For eigenvectors, first call */
+/* ZUPGTR to generate the orthogonal matrix, then call ZSTEQR. */
+
+ if (! wantz) {
+ dsterf_(n, &w[1], &rwork[inde], info);
+ } else {
+ indwrk = indtau + *n;
+ zupgtr_(uplo, n, &ap[1], &work[indtau], &z__[z_offset], ldz, &work[
+ indwrk], &iinfo);
+ indrwk = inde + *n;
+ zsteqr_(jobz, n, &w[1], &rwork[inde], &z__[z_offset], ldz, &rwork[
+ indrwk], info);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *n;
+ } else {
+ imax = *info - 1;
+ }
+ d__1 = 1. / sigma;
+ dscal_(&imax, &d__1, &w[1], &c__1);
+ }
+
+ return 0;
+
+/* End of ZHPEV */
+
+} /* zhpev_ */
diff --git a/contrib/libs/clapack/zhpevd.c b/contrib/libs/clapack/zhpevd.c
new file mode 100644
index 0000000000..9bc9ad892b
--- /dev/null
+++ b/contrib/libs/clapack/zhpevd.c
@@ -0,0 +1,349 @@
+/* zhpevd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zhpevd_(char *jobz, char *uplo, integer *n,
+ doublecomplex *ap, doublereal *w, doublecomplex *z__, integer *ldz,
+ doublecomplex *work, integer *lwork, doublereal *rwork, integer *
+ lrwork, integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal eps;
+ integer inde;
+ doublereal anrm;
+ integer imax;
+ doublereal rmin, rmax;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo, lwmin, llrwk, llwrk;
+ logical wantz;
+ extern doublereal dlamch_(char *);
+ integer iscale;
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_(
+ integer *, doublereal *, doublecomplex *, integer *);
+ doublereal bignum;
+ integer indtau;
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern doublereal zlanhp_(char *, char *, integer *, doublecomplex *,
+ doublereal *);
+ extern /* Subroutine */ int zstedc_(char *, integer *, doublereal *,
+ doublereal *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublereal *, integer *, integer *, integer *, integer
+ *);
+ integer indrwk, indwrk, liwmin, lrwmin;
+ doublereal smlnum;
+ extern /* Subroutine */ int zhptrd_(char *, integer *, doublecomplex *,
+ doublereal *, doublereal *, doublecomplex *, integer *);
+ logical lquery;
+ extern /* Subroutine */ int zupmtr_(char *, char *, char *, integer *,
+ integer *, doublecomplex *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHPEVD computes all the eigenvalues and, optionally, eigenvectors of */
+/* a complex Hermitian matrix A in packed storage. If eigenvectors are */
+/* desired, it uses a divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, AP is overwritten by values generated during the */
+/* reduction to tridiagonal form. If UPLO = 'U', the diagonal */
+/* and first superdiagonal of the tridiagonal matrix T overwrite */
+/* the corresponding elements of A, and if UPLO = 'L', the */
+/* diagonal and first subdiagonal of T overwrite the */
+/* corresponding elements of A. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX*16 array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the orthonormal */
+/* eigenvectors of the matrix A, with the i-th column of Z */
+/* holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the required LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of array WORK. */
+/* If N <= 1, LWORK must be at least 1. */
+/* If JOBZ = 'N' and N > 1, LWORK must be at least N. */
+/* If JOBZ = 'V' and N > 1, LWORK must be at least 2*N. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the required sizes of the WORK, RWORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* RWORK (workspace/output) DOUBLE PRECISION array, */
+/* dimension (LRWORK) */
+/* On exit, if INFO = 0, RWORK(1) returns the required LRWORK. */
+
+/* LRWORK (input) INTEGER */
+/* The dimension of array RWORK. */
+/* If N <= 1, LRWORK must be at least 1. */
+/* If JOBZ = 'N' and N > 1, LRWORK must be at least N. */
+/* If JOBZ = 'V' and N > 1, LRWORK must be at least */
+/* 1 + 5*N + 2*N**2. */
+
+/* If LRWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the required sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the required LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of array IWORK. */
+/* If JOBZ = 'N' or N <= 1, LIWORK must be at least 1. */
+/* If JOBZ = 'V' and N > 1, LIWORK must be at least 3 + 5*N. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the required sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, the algorithm failed to converge; i */
+/* off-diagonal elements of an intermediate tridiagonal */
+/* form did not converge to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lquery = *lwork == -1 || *lrwork == -1 || *liwork == -1;
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (lsame_(uplo, "L") || lsame_(uplo,
+ "U"))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -7;
+ }
+
+ if (*info == 0) {
+ if (*n <= 1) {
+ lwmin = 1;
+ liwmin = 1;
+ lrwmin = 1;
+ } else {
+ if (wantz) {
+ lwmin = *n << 1;
+/* Computing 2nd power */
+ i__1 = *n;
+ lrwmin = *n * 5 + 1 + (i__1 * i__1 << 1);
+ liwmin = *n * 5 + 3;
+ } else {
+ lwmin = *n;
+ lrwmin = *n;
+ liwmin = 1;
+ }
+ }
+ work[1].r = (doublereal) lwmin, work[1].i = 0.;
+ rwork[1] = (doublereal) lrwmin;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -9;
+ } else if (*lrwork < lrwmin && ! lquery) {
+ *info = -11;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHPEVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ w[1] = ap[1].r;
+ if (wantz) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1., z__[i__1].i = 0.;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = zlanhp_("M", uplo, n, &ap[1], &rwork[1]);
+ iscale = 0;
+ if (anrm > 0. && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ i__1 = *n * (*n + 1) / 2;
+ zdscal_(&i__1, &sigma, &ap[1], &c__1);
+ }
+
+/* Call ZHPTRD to reduce Hermitian packed matrix to tridiagonal form. */
+
+ inde = 1;
+ indtau = 1;
+ indrwk = inde + *n;
+ indwrk = indtau + *n;
+ llwrk = *lwork - indwrk + 1;
+ llrwk = *lrwork - indrwk + 1;
+ zhptrd_(uplo, n, &ap[1], &w[1], &rwork[inde], &work[indtau], &iinfo);
+
+/* For eigenvalues only, call DSTERF. For eigenvectors, first call */
+/* ZUPGTR to generate the orthogonal matrix, then call ZSTEDC. */
+
+ if (! wantz) {
+ dsterf_(n, &w[1], &rwork[inde], info);
+ } else {
+ zstedc_("I", n, &w[1], &rwork[inde], &z__[z_offset], ldz, &work[
+ indwrk], &llwrk, &rwork[indrwk], &llrwk, &iwork[1], liwork,
+ info);
+ zupmtr_("L", uplo, "N", n, n, &ap[1], &work[indtau], &z__[z_offset],
+ ldz, &work[indwrk], &iinfo);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *n;
+ } else {
+ imax = *info - 1;
+ }
+ d__1 = 1. / sigma;
+ dscal_(&imax, &d__1, &w[1], &c__1);
+ }
+
+ work[1].r = (doublereal) lwmin, work[1].i = 0.;
+ rwork[1] = (doublereal) lrwmin;
+ iwork[1] = liwmin;
+ return 0;
+
+/* End of ZHPEVD */
+
+} /* zhpevd_ */
diff --git a/contrib/libs/clapack/zhpevx.c b/contrib/libs/clapack/zhpevx.c
new file mode 100644
index 0000000000..3f5d87d54e
--- /dev/null
+++ b/contrib/libs/clapack/zhpevx.c
@@ -0,0 +1,475 @@
+/* zhpevx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zhpevx_(char *jobz, char *range, char *uplo, integer *n,
+ doublecomplex *ap, doublereal *vl, doublereal *vu, integer *il,
+ integer *iu, doublereal *abstol, integer *m, doublereal *w,
+ doublecomplex *z__, integer *ldz, doublecomplex *work, doublereal *
+ rwork, integer *iwork, integer *ifail, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, jj;
+ doublereal eps, vll, vuu, tmp1;
+ integer indd, inde;
+ doublereal anrm;
+ integer imax;
+ doublereal rmin, rmax;
+ logical test;
+ integer itmp1, indee;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal sigma;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ char order[1];
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ logical wantz;
+ extern /* Subroutine */ int zswap_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ extern doublereal dlamch_(char *);
+ logical alleig, indeig;
+ integer iscale, indibl;
+ logical valeig;
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_(
+ integer *, doublereal *, doublecomplex *, integer *);
+ doublereal abstll, bignum;
+ integer indiwk, indisp, indtau;
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *), dstebz_(char *, char *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *, doublereal *, integer *, integer *);
+ extern doublereal zlanhp_(char *, char *, integer *, doublecomplex *,
+ doublereal *);
+ integer indrwk, indwrk, nsplit;
+ doublereal smlnum;
+ extern /* Subroutine */ int zhptrd_(char *, integer *, doublecomplex *,
+ doublereal *, doublereal *, doublecomplex *, integer *),
+ zstein_(integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, integer *, doublecomplex *, integer *,
+ doublereal *, integer *, integer *, integer *), zsteqr_(char *,
+ integer *, doublereal *, doublereal *, doublecomplex *, integer *,
+ doublereal *, integer *), zupgtr_(char *, integer *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zupmtr_(char *, char *, char
+ *, integer *, integer *, doublecomplex *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHPEVX computes selected eigenvalues and, optionally, eigenvectors */
+/* of a complex Hermitian matrix A in packed storage. */
+/* Eigenvalues/vectors can be selected by specifying either a range of */
+/* values or a range of indices for the desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found; */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found; */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, AP is overwritten by values generated during the */
+/* reduction to tridiagonal form. If UPLO = 'U', the diagonal */
+/* and first superdiagonal of the tridiagonal matrix T overwrite */
+/* the corresponding elements of A, and if UPLO = 'L', the */
+/* diagonal and first subdiagonal of T overwrite the */
+/* corresponding elements of A. */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) DOUBLE PRECISION */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing AP to tridiagonal form. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*DLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*DLAMCH('S'). */
+
+/* See "Computing Small Singular Values of Bidiagonal Matrices */
+/* with Guaranteed High Relative Accuracy," by Demmel and */
+/* Kahan, LAPACK Working Note #3. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the selected eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX*16 array, dimension (LDZ, max(1,M)) */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If an eigenvector fails to converge, then that column of Z */
+/* contains the latest approximation to the eigenvector, and */
+/* the index of the eigenvector is returned in IFAIL. */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (7*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+
+/* IFAIL (output) INTEGER array, dimension (N) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvectors that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, then i eigenvectors failed to converge. */
+/* Their indices are stored in array IFAIL. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (! (lsame_(uplo, "L") || lsame_(uplo,
+ "U"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -7;
+ }
+ } else if (indeig) {
+ if (*il < 1 || *il > max(1,*n)) {
+ *info = -8;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -9;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -14;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHPEVX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (alleig || indeig) {
+ *m = 1;
+ w[1] = ap[1].r;
+ } else {
+ if (*vl < ap[1].r && *vu >= ap[1].r) {
+ *m = 1;
+ w[1] = ap[1].r;
+ }
+ }
+ if (wantz) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1., z__[i__1].i = 0.;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+/* Computing MIN */
+ d__1 = sqrt(bignum), d__2 = 1. / sqrt(sqrt(safmin));
+ rmax = min(d__1,d__2);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ iscale = 0;
+ abstll = *abstol;
+ if (valeig) {
+ vll = *vl;
+ vuu = *vu;
+ } else {
+ vll = 0.;
+ vuu = 0.;
+ }
+ anrm = zlanhp_("M", uplo, n, &ap[1], &rwork[1]);
+ if (anrm > 0. && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ i__1 = *n * (*n + 1) / 2;
+ zdscal_(&i__1, &sigma, &ap[1], &c__1);
+ if (*abstol > 0.) {
+ abstll = *abstol * sigma;
+ }
+ if (valeig) {
+ vll = *vl * sigma;
+ vuu = *vu * sigma;
+ }
+ }
+
+/* Call ZHPTRD to reduce Hermitian packed matrix to tridiagonal form. */
+
+ indd = 1;
+ inde = indd + *n;
+ indrwk = inde + *n;
+ indtau = 1;
+ indwrk = indtau + *n;
+ zhptrd_(uplo, n, &ap[1], &rwork[indd], &rwork[inde], &work[indtau], &
+ iinfo);
+
+/* If all eigenvalues are desired and ABSTOL is less than or equal */
+/* to zero, then call DSTERF or ZUPGTR and ZSTEQR. If this fails */
+/* for some eigenvalue, then try DSTEBZ. */
+
+ test = FALSE_;
+ if (indeig) {
+ if (*il == 1 && *iu == *n) {
+ test = TRUE_;
+ }
+ }
+ if ((alleig || test) && *abstol <= 0.) {
+ dcopy_(n, &rwork[indd], &c__1, &w[1], &c__1);
+ indee = indrwk + (*n << 1);
+ if (! wantz) {
+ i__1 = *n - 1;
+ dcopy_(&i__1, &rwork[inde], &c__1, &rwork[indee], &c__1);
+ dsterf_(n, &w[1], &rwork[indee], info);
+ } else {
+ zupgtr_(uplo, n, &ap[1], &work[indtau], &z__[z_offset], ldz, &
+ work[indwrk], &iinfo);
+ i__1 = *n - 1;
+ dcopy_(&i__1, &rwork[inde], &c__1, &rwork[indee], &c__1);
+ zsteqr_(jobz, n, &w[1], &rwork[indee], &z__[z_offset], ldz, &
+ rwork[indrwk], info);
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ifail[i__] = 0;
+/* L10: */
+ }
+ }
+ }
+ if (*info == 0) {
+ *m = *n;
+ goto L20;
+ }
+ *info = 0;
+ }
+
+/* Otherwise, call DSTEBZ and, if eigenvectors are desired, ZSTEIN. */
+
+ if (wantz) {
+ *(unsigned char *)order = 'B';
+ } else {
+ *(unsigned char *)order = 'E';
+ }
+ indibl = 1;
+ indisp = indibl + *n;
+ indiwk = indisp + *n;
+ dstebz_(range, order, n, &vll, &vuu, il, iu, &abstll, &rwork[indd], &
+ rwork[inde], m, &nsplit, &w[1], &iwork[indibl], &iwork[indisp], &
+ rwork[indrwk], &iwork[indiwk], info);
+
+ if (wantz) {
+ zstein_(n, &rwork[indd], &rwork[inde], m, &w[1], &iwork[indibl], &
+ iwork[indisp], &z__[z_offset], ldz, &rwork[indrwk], &iwork[
+ indiwk], &ifail[1], info);
+
+/* Apply unitary matrix used in reduction to tridiagonal */
+/* form to eigenvectors returned by ZSTEIN. */
+
+ indwrk = indtau + *n;
+ zupmtr_("L", uplo, "N", n, m, &ap[1], &work[indtau], &z__[z_offset],
+ ldz, &work[indwrk], &iinfo);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+L20:
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *m;
+ } else {
+ imax = *info - 1;
+ }
+ d__1 = 1. / sigma;
+ dscal_(&imax, &d__1, &w[1], &c__1);
+ }
+
+/* If eigenvalues are not in order, then sort them, along with */
+/* eigenvectors. */
+
+ if (wantz) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp1 = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp1) {
+ i__ = jj;
+ tmp1 = w[jj];
+ }
+/* L30: */
+ }
+
+ if (i__ != 0) {
+ itmp1 = iwork[indibl + i__ - 1];
+ w[i__] = w[j];
+ iwork[indibl + i__ - 1] = iwork[indibl + j - 1];
+ w[j] = tmp1;
+ iwork[indibl + j - 1] = itmp1;
+ zswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1],
+ &c__1);
+ if (*info != 0) {
+ itmp1 = ifail[i__];
+ ifail[i__] = ifail[j];
+ ifail[j] = itmp1;
+ }
+ }
+/* L40: */
+ }
+ }
+
+ return 0;
+
+/* End of ZHPEVX */
+
+} /* zhpevx_ */
diff --git a/contrib/libs/clapack/zhpgst.c b/contrib/libs/clapack/zhpgst.c
new file mode 100644
index 0000000000..35331524dd
--- /dev/null
+++ b/contrib/libs/clapack/zhpgst.c
@@ -0,0 +1,313 @@
+/* zhpgst.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zhpgst_(integer *itype, char *uplo, integer *n,
+ doublecomplex *ap, doublecomplex *bp, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ doublereal d__1, d__2;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Local variables */
+ integer j, k, j1, k1, jj, kk;
+ doublecomplex ct;
+ doublereal ajj;
+ integer j1j1;
+ doublereal akk;
+ integer k1k1;
+ doublereal bjj, bkk;
+ extern /* Subroutine */ int zhpr2_(char *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *);
+ extern logical lsame_(char *, char *);
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int zhpmv_(char *, integer *, doublecomplex *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *), zaxpy_(integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *), ztpmv_(char *, char *, char *, integer *,
+ doublecomplex *, doublecomplex *, integer *), ztpsv_(char *, char *, char *, integer *, doublecomplex *
+, doublecomplex *, integer *), xerbla_(
+ char *, integer *), zdscal_(integer *, doublereal *,
+ doublecomplex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHPGST reduces a complex Hermitian-definite generalized */
+/* eigenproblem to standard form, using packed storage. */
+
+/* If ITYPE = 1, the problem is A*x = lambda*B*x, */
+/* and A is overwritten by inv(U**H)*A*inv(U) or inv(L)*A*inv(L**H) */
+
+/* If ITYPE = 2 or 3, the problem is A*B*x = lambda*x or */
+/* B*A*x = lambda*x, and A is overwritten by U*A*U**H or L**H*A*L. */
+
+/* B must have been previously factorized as U**H*U or L*L**H by ZPPTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* = 1: compute inv(U**H)*A*inv(U) or inv(L)*A*inv(L**H); */
+/* = 2 or 3: compute U*A*U**H or L**H*A*L. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored and B is factored as */
+/* U**H*U; */
+/* = 'L': Lower triangle of A is stored and B is factored as */
+/* L*L**H. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* AP (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, if INFO = 0, the transformed matrix, stored in the */
+/* same format as A. */
+
+/* BP (input) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* The triangular factor from the Cholesky factorization of B, */
+/* stored in the same format as A, as returned by ZPPTRF. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --bp;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHPGST", &i__1);
+ return 0;
+ }
+
+ if (*itype == 1) {
+ if (upper) {
+
+/* Compute inv(U')*A*inv(U) */
+
+/* J1 and JJ are the indices of A(1,j) and A(j,j) */
+
+ jj = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ j1 = jj + 1;
+ jj += j;
+
+/* Compute the j-th column of the upper triangle of A */
+
+ i__2 = jj;
+ i__3 = jj;
+ d__1 = ap[i__3].r;
+ ap[i__2].r = d__1, ap[i__2].i = 0.;
+ i__2 = jj;
+ bjj = bp[i__2].r;
+ ztpsv_(uplo, "Conjugate transpose", "Non-unit", &j, &bp[1], &
+ ap[j1], &c__1);
+ i__2 = j - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zhpmv_(uplo, &i__2, &z__1, &ap[1], &bp[j1], &c__1, &c_b1, &ap[
+ j1], &c__1);
+ i__2 = j - 1;
+ d__1 = 1. / bjj;
+ zdscal_(&i__2, &d__1, &ap[j1], &c__1);
+ i__2 = jj;
+ i__3 = jj;
+ i__4 = j - 1;
+ zdotc_(&z__3, &i__4, &ap[j1], &c__1, &bp[j1], &c__1);
+ z__2.r = ap[i__3].r - z__3.r, z__2.i = ap[i__3].i - z__3.i;
+ z__1.r = z__2.r / bjj, z__1.i = z__2.i / bjj;
+ ap[i__2].r = z__1.r, ap[i__2].i = z__1.i;
+/* L10: */
+ }
+ } else {
+
+/* Compute inv(L)*A*inv(L') */
+
+/* KK and K1K1 are the indices of A(k,k) and A(k+1,k+1) */
+
+ kk = 1;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ k1k1 = kk + *n - k + 1;
+
+/* Update the lower triangle of A(k:n,k:n) */
+
+ i__2 = kk;
+ akk = ap[i__2].r;
+ i__2 = kk;
+ bkk = bp[i__2].r;
+/* Computing 2nd power */
+ d__1 = bkk;
+ akk /= d__1 * d__1;
+ i__2 = kk;
+ ap[i__2].r = akk, ap[i__2].i = 0.;
+ if (k < *n) {
+ i__2 = *n - k;
+ d__1 = 1. / bkk;
+ zdscal_(&i__2, &d__1, &ap[kk + 1], &c__1);
+ d__1 = akk * -.5;
+ ct.r = d__1, ct.i = 0.;
+ i__2 = *n - k;
+ zaxpy_(&i__2, &ct, &bp[kk + 1], &c__1, &ap[kk + 1], &c__1)
+ ;
+ i__2 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zhpr2_(uplo, &i__2, &z__1, &ap[kk + 1], &c__1, &bp[kk + 1]
+, &c__1, &ap[k1k1]);
+ i__2 = *n - k;
+ zaxpy_(&i__2, &ct, &bp[kk + 1], &c__1, &ap[kk + 1], &c__1)
+ ;
+ i__2 = *n - k;
+ ztpsv_(uplo, "No transpose", "Non-unit", &i__2, &bp[k1k1],
+ &ap[kk + 1], &c__1);
+ }
+ kk = k1k1;
+/* L20: */
+ }
+ }
+ } else {
+ if (upper) {
+
+/* Compute U*A*U' */
+
+/* K1 and KK are the indices of A(1,k) and A(k,k) */
+
+ kk = 0;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ k1 = kk + 1;
+ kk += k;
+
+/* Update the upper triangle of A(1:k,1:k) */
+
+ i__2 = kk;
+ akk = ap[i__2].r;
+ i__2 = kk;
+ bkk = bp[i__2].r;
+ i__2 = k - 1;
+ ztpmv_(uplo, "No transpose", "Non-unit", &i__2, &bp[1], &ap[
+ k1], &c__1);
+ d__1 = akk * .5;
+ ct.r = d__1, ct.i = 0.;
+ i__2 = k - 1;
+ zaxpy_(&i__2, &ct, &bp[k1], &c__1, &ap[k1], &c__1);
+ i__2 = k - 1;
+ zhpr2_(uplo, &i__2, &c_b1, &ap[k1], &c__1, &bp[k1], &c__1, &
+ ap[1]);
+ i__2 = k - 1;
+ zaxpy_(&i__2, &ct, &bp[k1], &c__1, &ap[k1], &c__1);
+ i__2 = k - 1;
+ zdscal_(&i__2, &bkk, &ap[k1], &c__1);
+ i__2 = kk;
+/* Computing 2nd power */
+ d__2 = bkk;
+ d__1 = akk * (d__2 * d__2);
+ ap[i__2].r = d__1, ap[i__2].i = 0.;
+/* L30: */
+ }
+ } else {
+
+/* Compute L'*A*L */
+
+/* JJ and J1J1 are the indices of A(j,j) and A(j+1,j+1) */
+
+ jj = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ j1j1 = jj + *n - j + 1;
+
+/* Compute the j-th column of the lower triangle of A */
+
+ i__2 = jj;
+ ajj = ap[i__2].r;
+ i__2 = jj;
+ bjj = bp[i__2].r;
+ i__2 = jj;
+ d__1 = ajj * bjj;
+ i__3 = *n - j;
+ zdotc_(&z__2, &i__3, &ap[jj + 1], &c__1, &bp[jj + 1], &c__1);
+ z__1.r = d__1 + z__2.r, z__1.i = z__2.i;
+ ap[i__2].r = z__1.r, ap[i__2].i = z__1.i;
+ i__2 = *n - j;
+ zdscal_(&i__2, &bjj, &ap[jj + 1], &c__1);
+ i__2 = *n - j;
+ zhpmv_(uplo, &i__2, &c_b1, &ap[j1j1], &bp[jj + 1], &c__1, &
+ c_b1, &ap[jj + 1], &c__1);
+ i__2 = *n - j + 1;
+ ztpmv_(uplo, "Conjugate transpose", "Non-unit", &i__2, &bp[jj]
+, &ap[jj], &c__1);
+ jj = j1j1;
+/* L40: */
+ }
+ }
+ }
+ return 0;
+
+/* End of ZHPGST */
+
+} /* zhpgst_ */
diff --git a/contrib/libs/clapack/zhpgv.c b/contrib/libs/clapack/zhpgv.c
new file mode 100644
index 0000000000..ffd205fc86
--- /dev/null
+++ b/contrib/libs/clapack/zhpgv.c
@@ -0,0 +1,246 @@
+/* zhpgv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zhpgv_(integer *itype, char *jobz, char *uplo, integer *
+ n, doublecomplex *ap, doublecomplex *bp, doublereal *w, doublecomplex
+ *z__, integer *ldz, doublecomplex *work, doublereal *rwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+
+ /* Local variables */
+ integer j, neig;
+ extern logical lsame_(char *, char *);
+ char trans[1];
+ logical upper;
+ extern /* Subroutine */ int zhpev_(char *, char *, integer *,
+ doublecomplex *, doublereal *, doublecomplex *, integer *,
+ doublecomplex *, doublereal *, integer *);
+ logical wantz;
+ extern /* Subroutine */ int ztpmv_(char *, char *, char *, integer *,
+ doublecomplex *, doublecomplex *, integer *), ztpsv_(char *, char *, char *, integer *, doublecomplex *
+, doublecomplex *, integer *), xerbla_(
+ char *, integer *), zhpgst_(integer *, char *, integer *,
+ doublecomplex *, doublecomplex *, integer *), zpptrf_(
+ char *, integer *, doublecomplex *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHPGV computes all the eigenvalues and, optionally, the eigenvectors */
+/* of a complex generalized Hermitian-definite eigenproblem, of the form */
+/* A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x. */
+/* Here A and B are assumed to be Hermitian, stored in packed format, */
+/* and B is also positive definite. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* Specifies the problem type to be solved: */
+/* = 1: A*x = (lambda)*B*x */
+/* = 2: A*B*x = (lambda)*x */
+/* = 3: B*A*x = (lambda)*x */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* AP (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, the contents of AP are destroyed. */
+
+/* BP (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* B, packed columnwise in a linear array. The j-th column of B */
+/* is stored in the array BP as follows: */
+/* if UPLO = 'U', BP(i + (j-1)*j/2) = B(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', BP(i + (j-1)*(2*n-j)/2) = B(i,j) for j<=i<=n. */
+
+/* On exit, the triangular factor U or L from the Cholesky */
+/* factorization B = U**H*U or B = L*L**H, in the same storage */
+/* format as B. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX*16 array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the matrix Z of */
+/* eigenvectors. The eigenvectors are normalized as follows: */
+/* if ITYPE = 1 or 2, Z**H*B*Z = I; */
+/* if ITYPE = 3, Z**H*inv(B)*Z = I. */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (max(1, 2*N-1)) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (max(1, 3*N-2)) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: ZPPTRF or ZHPEV returned an error code: */
+/* <= N: if INFO = i, ZHPEV failed to converge; */
+/* i off-diagonal elements of an intermediate */
+/* tridiagonal form did not convergeto zero; */
+/* > N: if INFO = N + i, for 1 <= i <= n, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --bp;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+
+ *info = 0;
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -2;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHPGV ", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a Cholesky factorization of B. */
+
+ zpptrf_(uplo, n, &bp[1], info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem and solve. */
+
+ zhpgst_(itype, uplo, n, &ap[1], &bp[1], info);
+ zhpev_(jobz, uplo, n, &ap[1], &w[1], &z__[z_offset], ldz, &work[1], &
+ rwork[1], info);
+
+ if (wantz) {
+
+/* Backtransform eigenvectors to the original problem. */
+
+ neig = *n;
+ if (*info > 0) {
+ neig = *info - 1;
+ }
+ if (*itype == 1 || *itype == 2) {
+
+/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x; */
+/* backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'N';
+ } else {
+ *(unsigned char *)trans = 'C';
+ }
+
+ i__1 = neig;
+ for (j = 1; j <= i__1; ++j) {
+ ztpsv_(uplo, trans, "Non-unit", n, &bp[1], &z__[j * z_dim1 +
+ 1], &c__1);
+/* L10: */
+ }
+
+ } else if (*itype == 3) {
+
+/* For B*A*x=(lambda)*x; */
+/* backtransform eigenvectors: x = L*y or U'*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'C';
+ } else {
+ *(unsigned char *)trans = 'N';
+ }
+
+ i__1 = neig;
+ for (j = 1; j <= i__1; ++j) {
+ ztpmv_(uplo, trans, "Non-unit", n, &bp[1], &z__[j * z_dim1 +
+ 1], &c__1);
+/* L20: */
+ }
+ }
+ }
+ return 0;
+
+/* End of ZHPGV */
+
+} /* zhpgv_ */
diff --git a/contrib/libs/clapack/zhpgvd.c b/contrib/libs/clapack/zhpgvd.c
new file mode 100644
index 0000000000..b47f68974f
--- /dev/null
+++ b/contrib/libs/clapack/zhpgvd.c
@@ -0,0 +1,359 @@
+/* zhpgvd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zhpgvd_(integer *itype, char *jobz, char *uplo, integer *
+ n, doublecomplex *ap, doublecomplex *bp, doublereal *w, doublecomplex
+ *z__, integer *ldz, doublecomplex *work, integer *lwork, doublereal *
+ rwork, integer *lrwork, integer *iwork, integer *liwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ integer j, neig;
+ extern logical lsame_(char *, char *);
+ integer lwmin;
+ char trans[1];
+ logical upper, wantz;
+ extern /* Subroutine */ int ztpmv_(char *, char *, char *, integer *,
+ doublecomplex *, doublecomplex *, integer *), ztpsv_(char *, char *, char *, integer *, doublecomplex *
+, doublecomplex *, integer *), xerbla_(
+ char *, integer *);
+ integer liwmin;
+ extern /* Subroutine */ int zhpevd_(char *, char *, integer *,
+ doublecomplex *, doublereal *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublereal *, integer *, integer *,
+ integer *, integer *);
+ integer lrwmin;
+ extern /* Subroutine */ int zhpgst_(integer *, char *, integer *,
+ doublecomplex *, doublecomplex *, integer *);
+ logical lquery;
+ extern /* Subroutine */ int zpptrf_(char *, integer *, doublecomplex *,
+ integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHPGVD computes all the eigenvalues and, optionally, the eigenvectors */
+/* of a complex generalized Hermitian-definite eigenproblem, of the form */
+/* A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x. Here A and */
+/* B are assumed to be Hermitian, stored in packed format, and B is also */
+/* positive definite. */
+/* If eigenvectors are desired, it uses a divide and conquer algorithm. */
+
+/* The divide and conquer algorithm makes very mild assumptions about */
+/* floating point arithmetic. It will work on machines with a guard */
+/* digit in add/subtract, or on those binary machines without guard */
+/* digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
+/* Cray-2. It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* Specifies the problem type to be solved: */
+/* = 1: A*x = (lambda)*B*x */
+/* = 2: A*B*x = (lambda)*x */
+/* = 3: B*A*x = (lambda)*x */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* AP (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, the contents of AP are destroyed. */
+
+/* BP (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* B, packed columnwise in a linear array. The j-th column of B */
+/* is stored in the array BP as follows: */
+/* if UPLO = 'U', BP(i + (j-1)*j/2) = B(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', BP(i + (j-1)*(2*n-j)/2) = B(i,j) for j<=i<=n. */
+
+/* On exit, the triangular factor U or L from the Cholesky */
+/* factorization B = U**H*U or B = L*L**H, in the same storage */
+/* format as B. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, the eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX*16 array, dimension (LDZ, N) */
+/* If JOBZ = 'V', then if INFO = 0, Z contains the matrix Z of */
+/* eigenvectors. The eigenvectors are normalized as follows: */
+/* if ITYPE = 1 or 2, Z**H*B*Z = I; */
+/* if ITYPE = 3, Z**H*inv(B)*Z = I. */
+/* If JOBZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the required LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of array WORK. */
+/* If N <= 1, LWORK >= 1. */
+/* If JOBZ = 'N' and N > 1, LWORK >= N. */
+/* If JOBZ = 'V' and N > 1, LWORK >= 2*N. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the required sizes of the WORK, RWORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LRWORK)) */
+/* On exit, if INFO = 0, RWORK(1) returns the required LRWORK. */
+
+/* LRWORK (input) INTEGER */
+/* The dimension of array RWORK. */
+/* If N <= 1, LRWORK >= 1. */
+/* If JOBZ = 'N' and N > 1, LRWORK >= N. */
+/* If JOBZ = 'V' and N > 1, LRWORK >= 1 + 5*N + 2*N**2. */
+
+/* If LRWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the required sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the required LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of array IWORK. */
+/* If JOBZ = 'N' or N <= 1, LIWORK >= 1. */
+/* If JOBZ = 'V' and N > 1, LIWORK >= 3 + 5*N. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the required sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: ZPPTRF or ZHPEVD returned an error code: */
+/* <= N: if INFO = i, ZHPEVD failed to converge; */
+/* i off-diagonal elements of an intermediate */
+/* tridiagonal form did not convergeto zero; */
+/* > N: if INFO = N + i, for 1 <= i <= n, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --bp;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1 || *lrwork == -1 || *liwork == -1;
+
+ *info = 0;
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -2;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+ if (*n <= 1) {
+ lwmin = 1;
+ liwmin = 1;
+ lrwmin = 1;
+ } else {
+ if (wantz) {
+ lwmin = *n << 1;
+/* Computing 2nd power */
+ i__1 = *n;
+ lrwmin = *n * 5 + 1 + (i__1 * i__1 << 1);
+ liwmin = *n * 5 + 3;
+ } else {
+ lwmin = *n;
+ lrwmin = *n;
+ liwmin = 1;
+ }
+ }
+
+ work[1].r = (doublereal) lwmin, work[1].i = 0.;
+ rwork[1] = (doublereal) lrwmin;
+ iwork[1] = liwmin;
+ if (*lwork < lwmin && ! lquery) {
+ *info = -11;
+ } else if (*lrwork < lrwmin && ! lquery) {
+ *info = -13;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -15;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHPGVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a Cholesky factorization of B. */
+
+ zpptrf_(uplo, n, &bp[1], info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem and solve. */
+
+ zhpgst_(itype, uplo, n, &ap[1], &bp[1], info);
+ zhpevd_(jobz, uplo, n, &ap[1], &w[1], &z__[z_offset], ldz, &work[1],
+ lwork, &rwork[1], lrwork, &iwork[1], liwork, info);
+/* Computing MAX */
+ d__1 = (doublereal) lwmin, d__2 = work[1].r;
+ lwmin = (integer) max(d__1,d__2);
+/* Computing MAX */
+ d__1 = (doublereal) lrwmin;
+ lrwmin = (integer) max(d__1,rwork[1]);
+/* Computing MAX */
+ d__1 = (doublereal) liwmin, d__2 = (doublereal) iwork[1];
+ liwmin = (integer) max(d__1,d__2);
+
+ if (wantz) {
+
+/* Backtransform eigenvectors to the original problem. */
+
+ neig = *n;
+ if (*info > 0) {
+ neig = *info - 1;
+ }
+ if (*itype == 1 || *itype == 2) {
+
+/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x; */
+/* backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'N';
+ } else {
+ *(unsigned char *)trans = 'C';
+ }
+
+ i__1 = neig;
+ for (j = 1; j <= i__1; ++j) {
+ ztpsv_(uplo, trans, "Non-unit", n, &bp[1], &z__[j * z_dim1 +
+ 1], &c__1);
+/* L10: */
+ }
+
+ } else if (*itype == 3) {
+
+/* For B*A*x=(lambda)*x; */
+/* backtransform eigenvectors: x = L*y or U'*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'C';
+ } else {
+ *(unsigned char *)trans = 'N';
+ }
+
+ i__1 = neig;
+ for (j = 1; j <= i__1; ++j) {
+ ztpmv_(uplo, trans, "Non-unit", n, &bp[1], &z__[j * z_dim1 +
+ 1], &c__1);
+/* L20: */
+ }
+ }
+ }
+
+ work[1].r = (doublereal) lwmin, work[1].i = 0.;
+ rwork[1] = (doublereal) lrwmin;
+ iwork[1] = liwmin;
+ return 0;
+
+/* End of ZHPGVD */
+
+} /* zhpgvd_ */
diff --git a/contrib/libs/clapack/zhpgvx.c b/contrib/libs/clapack/zhpgvx.c
new file mode 100644
index 0000000000..2c76291756
--- /dev/null
+++ b/contrib/libs/clapack/zhpgvx.c
@@ -0,0 +1,344 @@
+/* zhpgvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zhpgvx_(integer *itype, char *jobz, char *range, char *
+ uplo, integer *n, doublecomplex *ap, doublecomplex *bp, doublereal *
+ vl, doublereal *vu, integer *il, integer *iu, doublereal *abstol,
+ integer *m, doublereal *w, doublecomplex *z__, integer *ldz,
+ doublecomplex *work, doublereal *rwork, integer *iwork, integer *
+ ifail, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+
+ /* Local variables */
+ integer j;
+ extern logical lsame_(char *, char *);
+ char trans[1];
+ logical upper, wantz;
+ extern /* Subroutine */ int ztpmv_(char *, char *, char *, integer *,
+ doublecomplex *, doublecomplex *, integer *), ztpsv_(char *, char *, char *, integer *, doublecomplex *
+, doublecomplex *, integer *);
+ logical alleig, indeig, valeig;
+ extern /* Subroutine */ int xerbla_(char *, integer *), zhpgst_(
+ integer *, char *, integer *, doublecomplex *, doublecomplex *,
+ integer *), zhpevx_(char *, char *, char *, integer *,
+ doublecomplex *, doublereal *, doublereal *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublecomplex *, integer *
+, doublecomplex *, doublereal *, integer *, integer *, integer *), zpptrf_(char *, integer *, doublecomplex
+ *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHPGVX computes selected eigenvalues and, optionally, eigenvectors */
+/* of a complex generalized Hermitian-definite eigenproblem, of the form */
+/* A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x. Here A and */
+/* B are assumed to be Hermitian, stored in packed format, and B is also */
+/* positive definite. Eigenvalues and eigenvectors can be selected by */
+/* specifying either a range of values or a range of indices for the */
+/* desired eigenvalues. */
+
+/* Arguments */
+/* ========= */
+
+/* ITYPE (input) INTEGER */
+/* Specifies the problem type to be solved: */
+/* = 1: A*x = (lambda)*B*x */
+/* = 2: A*B*x = (lambda)*x */
+/* = 3: B*A*x = (lambda)*x */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found; */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found; */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangles of A and B are stored; */
+/* = 'L': Lower triangles of A and B are stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* AP (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, the contents of AP are destroyed. */
+
+/* BP (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* B, packed columnwise in a linear array. The j-th column of B */
+/* is stored in the array BP as follows: */
+/* if UPLO = 'U', BP(i + (j-1)*j/2) = B(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', BP(i + (j-1)*(2*n-j)/2) = B(i,j) for j<=i<=n. */
+
+/* On exit, the triangular factor U or L from the Cholesky */
+/* factorization B = U**H*U or B = L*L**H, in the same storage */
+/* format as B. */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) DOUBLE PRECISION */
+/* The absolute error tolerance for the eigenvalues. */
+/* An approximate eigenvalue is accepted as converged */
+/* when it is determined to lie in an interval [a,b] */
+/* of width less than or equal to */
+
+/* ABSTOL + EPS * max( |a|,|b| ) , */
+
+/* where EPS is the machine precision. If ABSTOL is less than */
+/* or equal to zero, then EPS*|T| will be used in its place, */
+/* where |T| is the 1-norm of the tridiagonal matrix obtained */
+/* by reducing AP to tridiagonal form. */
+
+/* Eigenvalues will be computed most accurately when ABSTOL is */
+/* set to twice the underflow threshold 2*DLAMCH('S'), not zero. */
+/* If this routine returns with INFO>0, indicating that some */
+/* eigenvectors did not converge, try setting ABSTOL to */
+/* 2*DLAMCH('S'). */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* On normal exit, the first M elements contain the selected */
+/* eigenvalues in ascending order. */
+
+/* Z (output) COMPLEX*16 array, dimension (LDZ, N) */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix A */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* The eigenvectors are normalized as follows: */
+/* if ITYPE = 1 or 2, Z**H*B*Z = I; */
+/* if ITYPE = 3, Z**H*inv(B)*Z = I. */
+
+/* If an eigenvector fails to converge, then that column of Z */
+/* contains the latest approximation to the eigenvector, and the */
+/* index of the eigenvector is returned in IFAIL. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (7*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (5*N) */
+
+/* IFAIL (output) INTEGER array, dimension (N) */
+/* If JOBZ = 'V', then if INFO = 0, the first M elements of */
+/* IFAIL are zero. If INFO > 0, then IFAIL contains the */
+/* indices of the eigenvectors that failed to converge. */
+/* If JOBZ = 'N', then IFAIL is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: ZPPTRF or ZHPEVX returned an error code: */
+/* <= N: if INFO = i, ZHPEVX failed to converge; */
+/* i eigenvectors failed to converge. Their indices */
+/* are stored in array IFAIL. */
+/* > N: if INFO = N + i, for 1 <= i <= n, then the leading */
+/* minor of order i of B is not positive definite. */
+/* The factorization of B could not be completed and */
+/* no eigenvalues or eigenvectors were computed. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --bp;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ upper = lsame_(uplo, "U");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+
+ *info = 0;
+ if (*itype < 1 || *itype > 3) {
+ *info = -1;
+ } else if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -2;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -3;
+ } else if (! (upper || lsame_(uplo, "L"))) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else {
+ if (valeig) {
+ if (*n > 0 && *vu <= *vl) {
+ *info = -9;
+ }
+ } else if (indeig) {
+ if (*il < 1) {
+ *info = -10;
+ } else if (*iu < min(*n,*il) || *iu > *n) {
+ *info = -11;
+ }
+ }
+ }
+ if (*info == 0) {
+ if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -16;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHPGVX", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Form a Cholesky factorization of B. */
+
+ zpptrf_(uplo, n, &bp[1], info);
+ if (*info != 0) {
+ *info = *n + *info;
+ return 0;
+ }
+
+/* Transform problem to standard eigenvalue problem and solve. */
+
+ zhpgst_(itype, uplo, n, &ap[1], &bp[1], info);
+ zhpevx_(jobz, range, uplo, n, &ap[1], vl, vu, il, iu, abstol, m, &w[1], &
+ z__[z_offset], ldz, &work[1], &rwork[1], &iwork[1], &ifail[1],
+ info);
+
+ if (wantz) {
+
+/* Backtransform eigenvectors to the original problem. */
+
+ if (*info > 0) {
+ *m = *info - 1;
+ }
+ if (*itype == 1 || *itype == 2) {
+
+/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x; */
+/* backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'N';
+ } else {
+ *(unsigned char *)trans = 'C';
+ }
+
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ ztpsv_(uplo, trans, "Non-unit", n, &bp[1], &z__[j * z_dim1 +
+ 1], &c__1);
+/* L10: */
+ }
+
+ } else if (*itype == 3) {
+
+/* For B*A*x=(lambda)*x; */
+/* backtransform eigenvectors: x = L*y or U'*y */
+
+ if (upper) {
+ *(unsigned char *)trans = 'C';
+ } else {
+ *(unsigned char *)trans = 'N';
+ }
+
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ ztpmv_(uplo, trans, "Non-unit", n, &bp[1], &z__[j * z_dim1 +
+ 1], &c__1);
+/* L20: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZHPGVX */
+
+} /* zhpgvx_ */
diff --git a/contrib/libs/clapack/zhprfs.c b/contrib/libs/clapack/zhprfs.c
new file mode 100644
index 0000000000..75e7f05f0c
--- /dev/null
+++ b/contrib/libs/clapack/zhprfs.c
@@ -0,0 +1,462 @@
+/* zhprfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zhprfs_(char *uplo, integer *n, integer *nrhs,
+ doublecomplex *ap, doublecomplex *afp, integer *ipiv, doublecomplex *
+ b, integer *ldb, doublecomplex *x, integer *ldx, doublereal *ferr,
+ doublereal *berr, doublecomplex *work, doublereal *rwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal s;
+ integer ik, kk;
+ doublereal xk;
+ integer nz;
+ doublereal eps;
+ integer kase;
+ doublereal safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3], count;
+ logical upper;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zhpmv_(char *, integer *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, doublecomplex *, integer *), zaxpy_(
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zlacn2_(integer *, doublecomplex *,
+ doublecomplex *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal lstres;
+ extern /* Subroutine */ int zhptrs_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHPRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is Hermitian indefinite */
+/* and packed, and provides error bounds and backward error estimates */
+/* for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AP (input) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the Hermitian matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* AFP (input) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* The factored form of the matrix A. AFP contains the block */
+/* diagonal matrix D and the multipliers used to obtain the */
+/* factor U or L from the factorization A = U*D*U**H or */
+/* A = L*D*L**H as computed by ZHPTRF, stored as a packed */
+/* triangular matrix. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by ZHPTRF. */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by ZHPTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --afp;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*ldx < max(1,*n)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHPRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X */
+
+ zcopy_(n, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+ z__1.r = -1., z__1.i = -0.;
+ zhpmv_(uplo, n, &z__1, &ap[1], &x[j * x_dim1 + 1], &c__1, &c_b1, &
+ work[1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[i__] = (d__1 = b[i__3].r, abs(d__1)) + (d__2 = d_imag(&b[
+ i__ + j * b_dim1]), abs(d__2));
+/* L30: */
+ }
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ kk = 1;
+ if (upper) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ i__3 = k + j * x_dim1;
+ xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[k + j *
+ x_dim1]), abs(d__2));
+ ik = kk;
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = ik;
+ rwork[i__] += ((d__1 = ap[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&ap[ik]), abs(d__2))) * xk;
+ i__4 = ik;
+ i__5 = i__ + j * x_dim1;
+ s += ((d__1 = ap[i__4].r, abs(d__1)) + (d__2 = d_imag(&ap[
+ ik]), abs(d__2))) * ((d__3 = x[i__5].r, abs(d__3))
+ + (d__4 = d_imag(&x[i__ + j * x_dim1]), abs(d__4)
+ ));
+ ++ik;
+/* L40: */
+ }
+ i__3 = kk + k - 1;
+ rwork[k] = rwork[k] + (d__1 = ap[i__3].r, abs(d__1)) * xk + s;
+ kk += k;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ i__3 = k + j * x_dim1;
+ xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[k + j *
+ x_dim1]), abs(d__2));
+ i__3 = kk;
+ rwork[k] += (d__1 = ap[i__3].r, abs(d__1)) * xk;
+ ik = kk + 1;
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ i__4 = ik;
+ rwork[i__] += ((d__1 = ap[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&ap[ik]), abs(d__2))) * xk;
+ i__4 = ik;
+ i__5 = i__ + j * x_dim1;
+ s += ((d__1 = ap[i__4].r, abs(d__1)) + (d__2 = d_imag(&ap[
+ ik]), abs(d__2))) * ((d__3 = x[i__5].r, abs(d__3))
+ + (d__4 = d_imag(&x[i__ + j * x_dim1]), abs(d__4)
+ ));
+ ++ik;
+/* L60: */
+ }
+ rwork[k] += s;
+ kk += *n - k + 1;
+/* L70: */
+ }
+ }
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__3 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2))) / rwork[i__];
+ s = max(d__3,d__4);
+ } else {
+/* Computing MAX */
+ i__3 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = max(d__3,d__4);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2. <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ zhptrs_(uplo, n, &c__1, &afp[1], &ipiv[1], &work[1], n, info);
+ zaxpy_(n, &c_b1, &work[1], &c__1, &x[j * x_dim1 + 1], &c__1);
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use ZLACN2 to estimate the infinity-norm of the matrix */
+/* inv(A) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__3 = i__;
+ rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ ;
+ } else {
+ i__3 = i__;
+ rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ zlacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(A'). */
+
+ zhptrs_(uplo, n, &c__1, &afp[1], &ipiv[1], &work[1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ z__1.r = rwork[i__4] * work[i__5].r, z__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+/* L110: */
+ }
+ } else if (kase == 2) {
+
+/* Multiply by inv(A)*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ z__1.r = rwork[i__4] * work[i__5].r, z__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+/* L120: */
+ }
+ zhptrs_(uplo, n, &c__1, &afp[1], &ipiv[1], &work[1], n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * x_dim1;
+ d__3 = lstres, d__4 = (d__1 = x[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&x[i__ + j * x_dim1]), abs(d__2));
+ lstres = max(d__3,d__4);
+/* L130: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of ZHPRFS */
+
+} /* zhprfs_ */
diff --git a/contrib/libs/clapack/zhpsv.c b/contrib/libs/clapack/zhpsv.c
new file mode 100644
index 0000000000..fe60b30035
--- /dev/null
+++ b/contrib/libs/clapack/zhpsv.c
@@ -0,0 +1,177 @@
+/* zhpsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zhpsv_(char *uplo, integer *n, integer *nrhs,
+ doublecomplex *ap, integer *ipiv, doublecomplex *b, integer *ldb,
+ integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), zhptrf_(
+ char *, integer *, doublecomplex *, integer *, integer *),
+ zhptrs_(char *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHPSV computes the solution to a complex system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N Hermitian matrix stored in packed format and X */
+/* and B are N-by-NRHS matrices. */
+
+/* The diagonal pivoting method is used to factor A as */
+/* A = U * D * U**H, if UPLO = 'U', or */
+/* A = L * D * L**H, if UPLO = 'L', */
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, D is Hermitian and block diagonal with 1-by-1 */
+/* and 2-by-2 diagonal blocks. The factored form of A is then used to */
+/* solve the system of equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AP (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. */
+
+/* On exit, the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**H or A = L*D*L**H as computed by ZHPTRF, stored as */
+/* a packed triangular matrix in the same storage format as A. */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D, as */
+/* determined by ZHPTRF. If IPIV(k) > 0, then rows and columns */
+/* k and IPIV(k) were interchanged, and D(k,k) is a 1-by-1 */
+/* diagonal block. If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, */
+/* then rows and columns k-1 and -IPIV(k) were interchanged and */
+/* D(k-1:k,k-1:k) is a 2-by-2 diagonal block. If UPLO = 'L' and */
+/* IPIV(k) = IPIV(k+1) < 0, then rows and columns k+1 and */
+/* -IPIV(k) were interchanged and D(k:k+1,k:k+1) is a 2-by-2 */
+/* diagonal block. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, so the solution could not be */
+/* computed. */
+
+/* Further Details */
+/* =============== */
+
+/* The packed storage scheme is illustrated by the following example */
+/* when N = 4, UPLO = 'U': */
+
+/* Two-dimensional storage of the Hermitian matrix A: */
+
+/* a11 a12 a13 a14 */
+/* a22 a23 a24 */
+/* a33 a34 (aij = conjg(aji)) */
+/* a44 */
+
+/* Packed storage of the upper triangle of A: */
+
+/* AP = [ a11, a12, a22, a13, a23, a33, a14, a24, a34, a44 ] */
+
+/* ===================================================================== */
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHPSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the factorization A = U*D*U' or A = L*D*L'. */
+
+ zhptrf_(uplo, n, &ap[1], &ipiv[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ zhptrs_(uplo, n, nrhs, &ap[1], &ipiv[1], &b[b_offset], ldb, info);
+
+ }
+ return 0;
+
+/* End of ZHPSV */
+
+} /* zhpsv_ */
diff --git a/contrib/libs/clapack/zhpsvx.c b/contrib/libs/clapack/zhpsvx.c
new file mode 100644
index 0000000000..2b927439f7
--- /dev/null
+++ b/contrib/libs/clapack/zhpsvx.c
@@ -0,0 +1,326 @@
+/* zhpsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zhpsvx_(char *fact, char *uplo, integer *n, integer *
+ nrhs, doublecomplex *ap, doublecomplex *afp, integer *ipiv,
+ doublecomplex *b, integer *ldb, doublecomplex *x, integer *ldx,
+ doublereal *rcond, doublereal *ferr, doublereal *berr, doublecomplex *
+ work, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ doublereal anorm;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ extern doublereal dlamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern doublereal zlanhp_(char *, char *, integer *, doublecomplex *,
+ doublereal *);
+ extern /* Subroutine */ int zhpcon_(char *, integer *, doublecomplex *,
+ integer *, doublereal *, doublereal *, doublecomplex *, integer *), zlacpy_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *), zhprfs_(char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *, doublecomplex *, doublereal *,
+ integer *), zhptrf_(char *, integer *, doublecomplex *,
+ integer *, integer *), zhptrs_(char *, integer *, integer
+ *, doublecomplex *, integer *, doublecomplex *, integer *,
+ integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHPSVX uses the diagonal pivoting factorization A = U*D*U**H or */
+/* A = L*D*L**H to compute the solution to a complex system of linear */
+/* equations A * X = B, where A is an N-by-N Hermitian matrix stored */
+/* in packed format and X and B are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'N', the diagonal pivoting method is used to factor A as */
+/* A = U * D * U**H, if UPLO = 'U', or */
+/* A = L * D * L**H, if UPLO = 'L', */
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices and D is Hermitian and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* 2. If some D(i,i)=0, so that D is exactly singular, then the routine */
+/* returns with INFO = i. Otherwise, the factored form of A is used */
+/* to estimate the condition number of the matrix A. If the */
+/* reciprocal of the condition number is less than machine precision, */
+/* INFO = N+1 is returned as a warning, but the routine still goes on */
+/* to solve for X and compute error bounds as described below. */
+
+/* 3. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 4. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of A has been */
+/* supplied on entry. */
+/* = 'F': On entry, AFP and IPIV contain the factored form of */
+/* A. AFP and IPIV will not be modified. */
+/* = 'N': The matrix A will be copied to AFP and factored. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AP (input) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the Hermitian matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. */
+
+/* AFP (input or output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* If FACT = 'F', then AFP is an input argument and on entry */
+/* contains the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**H or A = L*D*L**H as computed by ZHPTRF, stored as */
+/* a packed triangular matrix in the same storage format as A. */
+
+/* If FACT = 'N', then AFP is an output argument and on exit */
+/* contains the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**H or A = L*D*L**H as computed by ZHPTRF, stored as */
+/* a packed triangular matrix in the same storage format as A. */
+
+/* IPIV (input or output) INTEGER array, dimension (N) */
+/* If FACT = 'F', then IPIV is an input argument and on entry */
+/* contains details of the interchanges and the block structure */
+/* of D, as determined by ZHPTRF. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* If FACT = 'N', then IPIV is an output argument and on exit */
+/* contains details of the interchanges and the block structure */
+/* of D, as determined by ZHPTRF. */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* The N-by-NRHS right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A. If RCOND is less than the machine precision (in */
+/* particular, if RCOND = 0), the matrix is singular to working */
+/* precision. This condition is indicated by a return code of */
+/* INFO > 0. */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: D(i,i) is exactly zero. The factorization */
+/* has been completed but the factor D is exactly */
+/* singular, so the solution and error bounds could */
+/* not be computed. RCOND = 0 is returned. */
+/* = N+1: D is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* Further Details */
+/* =============== */
+
+/* The packed storage scheme is illustrated by the following example */
+/* when N = 4, UPLO = 'U': */
+
+/* Two-dimensional storage of the Hermitian matrix A: */
+
+/* a11 a12 a13 a14 */
+/* a22 a23 a24 */
+/* a33 a34 (aij = conjg(aji)) */
+/* a44 */
+
+/* Packed storage of the upper triangle of A: */
+
+/* AP = [ a11, a12, a22, a13, a23, a33, a14, a24, a34, a44 ] */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --afp;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ if (! nofact && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! lsame_(uplo, "U") && ! lsame_(uplo,
+ "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldx < max(1,*n)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHPSVX", &i__1);
+ return 0;
+ }
+
+ if (nofact) {
+
+/* Compute the factorization A = U*D*U' or A = L*D*L'. */
+
+ i__1 = *n * (*n + 1) / 2;
+ zcopy_(&i__1, &ap[1], &c__1, &afp[1], &c__1);
+ zhptrf_(uplo, n, &afp[1], &ipiv[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = zlanhp_("I", uplo, n, &ap[1], &rwork[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ zhpcon_(uplo, n, &afp[1], &ipiv[1], &anorm, rcond, &work[1], info);
+
+/* Compute the solution vectors X. */
+
+ zlacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ zhptrs_(uplo, n, nrhs, &afp[1], &ipiv[1], &x[x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solutions and */
+/* compute error bounds and backward error estimates for them. */
+
+ zhprfs_(uplo, n, nrhs, &ap[1], &afp[1], &ipiv[1], &b[b_offset], ldb, &x[
+ x_offset], ldx, &ferr[1], &berr[1], &work[1], &rwork[1], info);
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < dlamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of ZHPSVX */
+
+} /* zhpsvx_ */
diff --git a/contrib/libs/clapack/zhptrd.c b/contrib/libs/clapack/zhptrd.c
new file mode 100644
index 0000000000..482983352e
--- /dev/null
+++ b/contrib/libs/clapack/zhptrd.c
@@ -0,0 +1,319 @@
+/* zhptrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b2 = {0.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zhptrd_(char *uplo, integer *n, doublecomplex *ap,
+ doublereal *d__, doublereal *e, doublecomplex *tau, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Local variables */
+ integer i__, i1, ii, i1i1;
+ doublecomplex taui;
+ extern /* Subroutine */ int zhpr2_(char *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *);
+ doublecomplex alpha;
+ extern logical lsame_(char *, char *);
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int zhpmv_(char *, integer *, doublecomplex *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *), zaxpy_(integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *), xerbla_(char *, integer *), zlarfg_(integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHPTRD reduces a complex Hermitian matrix A stored in packed form to */
+/* real symmetric tridiagonal form T by a unitary similarity */
+/* transformation: Q**H * A * Q = T. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+/* On exit, if UPLO = 'U', the diagonal and first superdiagonal */
+/* of A are overwritten by the corresponding elements of the */
+/* tridiagonal matrix T, and the elements above the first */
+/* superdiagonal, with the array TAU, represent the unitary */
+/* matrix Q as a product of elementary reflectors; if UPLO */
+/* = 'L', the diagonal and first subdiagonal of A are over- */
+/* written by the corresponding elements of the tridiagonal */
+/* matrix T, and the elements below the first subdiagonal, with */
+/* the array TAU, represent the unitary matrix Q as a product */
+/* of elementary reflectors. See Further Details. */
+
+/* D (output) DOUBLE PRECISION array, dimension (N) */
+/* The diagonal elements of the tridiagonal matrix T: */
+/* D(i) = A(i,i). */
+
+/* E (output) DOUBLE PRECISION array, dimension (N-1) */
+/* The off-diagonal elements of the tridiagonal matrix T: */
+/* E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'. */
+
+/* TAU (output) COMPLEX*16 array, dimension (N-1) */
+/* The scalar factors of the elementary reflectors (see Further */
+/* Details). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* If UPLO = 'U', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(n-1) . . . H(2) H(1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in AP, */
+/* overwriting A(1:i-1,i+1), and tau is stored in TAU(i). */
+
+/* If UPLO = 'L', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(1) H(2) . . . H(n-1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in AP, */
+/* overwriting A(i+2:n,i), and tau is stored in TAU(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ --tau;
+ --e;
+ --d__;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHPTRD", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Reduce the upper triangle of A. */
+/* I1 is the index in AP of A(1,I+1). */
+
+ i1 = *n * (*n - 1) / 2 + 1;
+ i__1 = i1 + *n - 1;
+ i__2 = i1 + *n - 1;
+ d__1 = ap[i__2].r;
+ ap[i__1].r = d__1, ap[i__1].i = 0.;
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+
+/* Generate elementary reflector H(i) = I - tau * v * v' */
+/* to annihilate A(1:i-1,i+1) */
+
+ i__1 = i1 + i__ - 1;
+ alpha.r = ap[i__1].r, alpha.i = ap[i__1].i;
+ zlarfg_(&i__, &alpha, &ap[i1], &c__1, &taui);
+ i__1 = i__;
+ e[i__1] = alpha.r;
+
+ if (taui.r != 0. || taui.i != 0.) {
+
+/* Apply H(i) from both sides to A(1:i,1:i) */
+
+ i__1 = i1 + i__ - 1;
+ ap[i__1].r = 1., ap[i__1].i = 0.;
+
+/* Compute y := tau * A * v storing y in TAU(1:i) */
+
+ zhpmv_(uplo, &i__, &taui, &ap[1], &ap[i1], &c__1, &c_b2, &tau[
+ 1], &c__1);
+
+/* Compute w := y - 1/2 * tau * (y'*v) * v */
+
+ z__3.r = -.5, z__3.i = -0.;
+ z__2.r = z__3.r * taui.r - z__3.i * taui.i, z__2.i = z__3.r *
+ taui.i + z__3.i * taui.r;
+ zdotc_(&z__4, &i__, &tau[1], &c__1, &ap[i1], &c__1);
+ z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r *
+ z__4.i + z__2.i * z__4.r;
+ alpha.r = z__1.r, alpha.i = z__1.i;
+ zaxpy_(&i__, &alpha, &ap[i1], &c__1, &tau[1], &c__1);
+
+/* Apply the transformation as a rank-2 update: */
+/* A := A - v * w' - w * v' */
+
+ z__1.r = -1., z__1.i = -0.;
+ zhpr2_(uplo, &i__, &z__1, &ap[i1], &c__1, &tau[1], &c__1, &ap[
+ 1]);
+
+ }
+ i__1 = i1 + i__ - 1;
+ i__2 = i__;
+ ap[i__1].r = e[i__2], ap[i__1].i = 0.;
+ i__1 = i__ + 1;
+ i__2 = i1 + i__;
+ d__[i__1] = ap[i__2].r;
+ i__1 = i__;
+ tau[i__1].r = taui.r, tau[i__1].i = taui.i;
+ i1 -= i__;
+/* L10: */
+ }
+ d__[1] = ap[1].r;
+ } else {
+
+/* Reduce the lower triangle of A. II is the index in AP of */
+/* A(i,i) and I1I1 is the index of A(i+1,i+1). */
+
+ ii = 1;
+ d__1 = ap[1].r;
+ ap[1].r = d__1, ap[1].i = 0.;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i1i1 = ii + *n - i__ + 1;
+
+/* Generate elementary reflector H(i) = I - tau * v * v' */
+/* to annihilate A(i+2:n,i) */
+
+ i__2 = ii + 1;
+ alpha.r = ap[i__2].r, alpha.i = ap[i__2].i;
+ i__2 = *n - i__;
+ zlarfg_(&i__2, &alpha, &ap[ii + 2], &c__1, &taui);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+
+ if (taui.r != 0. || taui.i != 0.) {
+
+/* Apply H(i) from both sides to A(i+1:n,i+1:n) */
+
+ i__2 = ii + 1;
+ ap[i__2].r = 1., ap[i__2].i = 0.;
+
+/* Compute y := tau * A * v storing y in TAU(i:n-1) */
+
+ i__2 = *n - i__;
+ zhpmv_(uplo, &i__2, &taui, &ap[i1i1], &ap[ii + 1], &c__1, &
+ c_b2, &tau[i__], &c__1);
+
+/* Compute w := y - 1/2 * tau * (y'*v) * v */
+
+ z__3.r = -.5, z__3.i = -0.;
+ z__2.r = z__3.r * taui.r - z__3.i * taui.i, z__2.i = z__3.r *
+ taui.i + z__3.i * taui.r;
+ i__2 = *n - i__;
+ zdotc_(&z__4, &i__2, &tau[i__], &c__1, &ap[ii + 1], &c__1);
+ z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r *
+ z__4.i + z__2.i * z__4.r;
+ alpha.r = z__1.r, alpha.i = z__1.i;
+ i__2 = *n - i__;
+ zaxpy_(&i__2, &alpha, &ap[ii + 1], &c__1, &tau[i__], &c__1);
+
+/* Apply the transformation as a rank-2 update: */
+/* A := A - v * w' - w * v' */
+
+ i__2 = *n - i__;
+ z__1.r = -1., z__1.i = -0.;
+ zhpr2_(uplo, &i__2, &z__1, &ap[ii + 1], &c__1, &tau[i__], &
+ c__1, &ap[i1i1]);
+
+ }
+ i__2 = ii + 1;
+ i__3 = i__;
+ ap[i__2].r = e[i__3], ap[i__2].i = 0.;
+ i__2 = i__;
+ i__3 = ii;
+ d__[i__2] = ap[i__3].r;
+ i__2 = i__;
+ tau[i__2].r = taui.r, tau[i__2].i = taui.i;
+ ii = i1i1;
+/* L20: */
+ }
+ i__1 = *n;
+ i__2 = ii;
+ d__[i__1] = ap[i__2].r;
+ }
+
+ return 0;
+
+/* End of ZHPTRD */
+
+} /* zhptrd_ */
diff --git a/contrib/libs/clapack/zhptrf.c b/contrib/libs/clapack/zhptrf.c
new file mode 100644
index 0000000000..12d1a8a637
--- /dev/null
+++ b/contrib/libs/clapack/zhptrf.c
@@ -0,0 +1,821 @@
+/* zhptrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zhptrf_(char *uplo, integer *n, doublecomplex *ap,
+ integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4, i__5, i__6;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1, z__2, z__3, z__4, z__5, z__6;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_imag(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ doublereal d__;
+ integer i__, j, k;
+ doublecomplex t;
+ doublereal r1, d11;
+ doublecomplex d12;
+ doublereal d22;
+ doublecomplex d21;
+ integer kc, kk, kp;
+ doublecomplex wk;
+ integer kx;
+ doublereal tt;
+ integer knc, kpc, npp;
+ doublecomplex wkm1, wkp1;
+ integer imax, jmax;
+ extern /* Subroutine */ int zhpr_(char *, integer *, doublereal *,
+ doublecomplex *, integer *, doublecomplex *);
+ doublereal alpha;
+ extern logical lsame_(char *, char *);
+ integer kstep;
+ logical upper;
+ extern /* Subroutine */ int zswap_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ extern doublereal dlapy2_(doublereal *, doublereal *);
+ doublereal absakk;
+ extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_(
+ integer *, doublereal *, doublecomplex *, integer *);
+ doublereal colmax;
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ doublereal rowmax;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHPTRF computes the factorization of a complex Hermitian packed */
+/* matrix A using the Bunch-Kaufman diagonal pivoting method: */
+
+/* A = U*D*U**H or A = L*D*L**H */
+
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, and D is Hermitian and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L, stored as a packed triangular */
+/* matrix overwriting A (see below for further details). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, and division by zero will occur if it */
+/* is used to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* 5-96 - Based on modifications by J. Lewis, Boeing Computer Services */
+/* Company */
+
+/* If UPLO = 'U', then A = U*D*U', where */
+/* U = P(n)*U(n)* ... *P(k)U(k)* ..., */
+/* i.e., U is a product of terms P(k)*U(k), where k decreases from n to */
+/* 1 in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and U(k) is a unit upper triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I v 0 ) k-s */
+/* U(k) = ( 0 I 0 ) s */
+/* ( 0 0 I ) n-k */
+/* k-s s n-k */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(1:k-1,k). */
+/* If s = 2, the upper triangle of D(k) overwrites A(k-1,k-1), A(k-1,k), */
+/* and A(k,k), and v overwrites A(1:k-2,k-1:k). */
+
+/* If UPLO = 'L', then A = L*D*L', where */
+/* L = P(1)*L(1)* ... *P(k)*L(k)* ..., */
+/* i.e., L is a product of terms P(k)*L(k), where k increases from 1 to */
+/* n in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and L(k) is a unit lower triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I 0 0 ) k-1 */
+/* L(k) = ( 0 I 0 ) s */
+/* ( 0 v I ) n-k-s+1 */
+/* k-1 s n-k-s+1 */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(k+1:n,k). */
+/* If s = 2, the lower triangle of D(k) overwrites A(k,k), A(k+1,k), */
+/* and A(k+1,k+1), and v overwrites A(k+2:n,k:k+1). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ipiv;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHPTRF", &i__1);
+ return 0;
+ }
+
+/* Initialize ALPHA for use in choosing pivot block size. */
+
+ alpha = (sqrt(17.) + 1.) / 8.;
+
+ if (upper) {
+
+/* Factorize A as U*D*U' using the upper triangle of A */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2 */
+
+ k = *n;
+ kc = (*n - 1) * *n / 2 + 1;
+L10:
+ knc = kc;
+
+/* If K < 1, exit from loop */
+
+ if (k < 1) {
+ goto L110;
+ }
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ i__1 = kc + k - 1;
+ absakk = (d__1 = ap[i__1].r, abs(d__1));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ imax = izamax_(&i__1, &ap[kc], &c__1);
+ i__1 = kc + imax - 1;
+ colmax = (d__1 = ap[i__1].r, abs(d__1)) + (d__2 = d_imag(&ap[kc +
+ imax - 1]), abs(d__2));
+ } else {
+ colmax = 0.;
+ }
+
+ if (max(absakk,colmax) == 0.) {
+
+/* Column K is zero: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ i__1 = kc + k - 1;
+ i__2 = kc + k - 1;
+ d__1 = ap[i__2].r;
+ ap[i__1].r = d__1, ap[i__1].i = 0.;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ rowmax = 0.;
+ jmax = imax;
+ kx = imax * (imax + 1) / 2 + imax;
+ i__1 = k;
+ for (j = imax + 1; j <= i__1; ++j) {
+ i__2 = kx;
+ if ((d__1 = ap[i__2].r, abs(d__1)) + (d__2 = d_imag(&ap[
+ kx]), abs(d__2)) > rowmax) {
+ i__2 = kx;
+ rowmax = (d__1 = ap[i__2].r, abs(d__1)) + (d__2 =
+ d_imag(&ap[kx]), abs(d__2));
+ jmax = j;
+ }
+ kx += j;
+/* L20: */
+ }
+ kpc = (imax - 1) * imax / 2 + 1;
+ if (imax > 1) {
+ i__1 = imax - 1;
+ jmax = izamax_(&i__1, &ap[kpc], &c__1);
+/* Computing MAX */
+ i__1 = kpc + jmax - 1;
+ d__3 = rowmax, d__4 = (d__1 = ap[i__1].r, abs(d__1)) + (
+ d__2 = d_imag(&ap[kpc + jmax - 1]), abs(d__2));
+ rowmax = max(d__3,d__4);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else /* if(complicated condition) */ {
+ i__1 = kpc + imax - 1;
+ if ((d__1 = ap[i__1].r, abs(d__1)) >= alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+ } else {
+
+/* interchange rows and columns K-1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+ }
+
+ kk = k - kstep + 1;
+ if (kstep == 2) {
+ knc = knc - k + 1;
+ }
+ if (kp != kk) {
+
+/* Interchange rows and columns KK and KP in the leading */
+/* submatrix A(1:k,1:k) */
+
+ i__1 = kp - 1;
+ zswap_(&i__1, &ap[knc], &c__1, &ap[kpc], &c__1);
+ kx = kpc + kp - 1;
+ i__1 = kk - 1;
+ for (j = kp + 1; j <= i__1; ++j) {
+ kx = kx + j - 1;
+ d_cnjg(&z__1, &ap[knc + j - 1]);
+ t.r = z__1.r, t.i = z__1.i;
+ i__2 = knc + j - 1;
+ d_cnjg(&z__1, &ap[kx]);
+ ap[i__2].r = z__1.r, ap[i__2].i = z__1.i;
+ i__2 = kx;
+ ap[i__2].r = t.r, ap[i__2].i = t.i;
+/* L30: */
+ }
+ i__1 = kx + kk - 1;
+ d_cnjg(&z__1, &ap[kx + kk - 1]);
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+ i__1 = knc + kk - 1;
+ r1 = ap[i__1].r;
+ i__1 = knc + kk - 1;
+ i__2 = kpc + kp - 1;
+ d__1 = ap[i__2].r;
+ ap[i__1].r = d__1, ap[i__1].i = 0.;
+ i__1 = kpc + kp - 1;
+ ap[i__1].r = r1, ap[i__1].i = 0.;
+ if (kstep == 2) {
+ i__1 = kc + k - 1;
+ i__2 = kc + k - 1;
+ d__1 = ap[i__2].r;
+ ap[i__1].r = d__1, ap[i__1].i = 0.;
+ i__1 = kc + k - 2;
+ t.r = ap[i__1].r, t.i = ap[i__1].i;
+ i__1 = kc + k - 2;
+ i__2 = kc + kp - 1;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kc + kp - 1;
+ ap[i__1].r = t.r, ap[i__1].i = t.i;
+ }
+ } else {
+ i__1 = kc + k - 1;
+ i__2 = kc + k - 1;
+ d__1 = ap[i__2].r;
+ ap[i__1].r = d__1, ap[i__1].i = 0.;
+ if (kstep == 2) {
+ i__1 = kc - 1;
+ i__2 = kc - 1;
+ d__1 = ap[i__2].r;
+ ap[i__1].r = d__1, ap[i__1].i = 0.;
+ }
+ }
+
+/* Update the leading submatrix */
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k now holds */
+
+/* W(k) = U(k)*D(k) */
+
+/* where U(k) is the k-th column of U */
+
+/* Perform a rank-1 update of A(1:k-1,1:k-1) as */
+
+/* A := A - U(k)*D(k)*U(k)' = A - W(k)*1/D(k)*W(k)' */
+
+ i__1 = kc + k - 1;
+ r1 = 1. / ap[i__1].r;
+ i__1 = k - 1;
+ d__1 = -r1;
+ zhpr_(uplo, &i__1, &d__1, &ap[kc], &c__1, &ap[1]);
+
+/* Store U(k) in column k */
+
+ i__1 = k - 1;
+ zdscal_(&i__1, &r1, &ap[kc], &c__1);
+ } else {
+
+/* 2-by-2 pivot block D(k): columns k and k-1 now hold */
+
+/* ( W(k-1) W(k) ) = ( U(k-1) U(k) )*D(k) */
+
+/* where U(k) and U(k-1) are the k-th and (k-1)-th columns */
+/* of U */
+
+/* Perform a rank-2 update of A(1:k-2,1:k-2) as */
+
+/* A := A - ( U(k-1) U(k) )*D(k)*( U(k-1) U(k) )' */
+/* = A - ( W(k-1) W(k) )*inv(D(k))*( W(k-1) W(k) )' */
+
+ if (k > 2) {
+
+ i__1 = k - 1 + (k - 1) * k / 2;
+ d__1 = ap[i__1].r;
+ d__2 = d_imag(&ap[k - 1 + (k - 1) * k / 2]);
+ d__ = dlapy2_(&d__1, &d__2);
+ i__1 = k - 1 + (k - 2) * (k - 1) / 2;
+ d22 = ap[i__1].r / d__;
+ i__1 = k + (k - 1) * k / 2;
+ d11 = ap[i__1].r / d__;
+ tt = 1. / (d11 * d22 - 1.);
+ i__1 = k - 1 + (k - 1) * k / 2;
+ z__1.r = ap[i__1].r / d__, z__1.i = ap[i__1].i / d__;
+ d12.r = z__1.r, d12.i = z__1.i;
+ d__ = tt / d__;
+
+ for (j = k - 2; j >= 1; --j) {
+ i__1 = j + (k - 2) * (k - 1) / 2;
+ z__3.r = d11 * ap[i__1].r, z__3.i = d11 * ap[i__1].i;
+ d_cnjg(&z__5, &d12);
+ i__2 = j + (k - 1) * k / 2;
+ z__4.r = z__5.r * ap[i__2].r - z__5.i * ap[i__2].i,
+ z__4.i = z__5.r * ap[i__2].i + z__5.i * ap[
+ i__2].r;
+ z__2.r = z__3.r - z__4.r, z__2.i = z__3.i - z__4.i;
+ z__1.r = d__ * z__2.r, z__1.i = d__ * z__2.i;
+ wkm1.r = z__1.r, wkm1.i = z__1.i;
+ i__1 = j + (k - 1) * k / 2;
+ z__3.r = d22 * ap[i__1].r, z__3.i = d22 * ap[i__1].i;
+ i__2 = j + (k - 2) * (k - 1) / 2;
+ z__4.r = d12.r * ap[i__2].r - d12.i * ap[i__2].i,
+ z__4.i = d12.r * ap[i__2].i + d12.i * ap[i__2]
+ .r;
+ z__2.r = z__3.r - z__4.r, z__2.i = z__3.i - z__4.i;
+ z__1.r = d__ * z__2.r, z__1.i = d__ * z__2.i;
+ wk.r = z__1.r, wk.i = z__1.i;
+ for (i__ = j; i__ >= 1; --i__) {
+ i__1 = i__ + (j - 1) * j / 2;
+ i__2 = i__ + (j - 1) * j / 2;
+ i__3 = i__ + (k - 1) * k / 2;
+ d_cnjg(&z__4, &wk);
+ z__3.r = ap[i__3].r * z__4.r - ap[i__3].i *
+ z__4.i, z__3.i = ap[i__3].r * z__4.i + ap[
+ i__3].i * z__4.r;
+ z__2.r = ap[i__2].r - z__3.r, z__2.i = ap[i__2].i
+ - z__3.i;
+ i__4 = i__ + (k - 2) * (k - 1) / 2;
+ d_cnjg(&z__6, &wkm1);
+ z__5.r = ap[i__4].r * z__6.r - ap[i__4].i *
+ z__6.i, z__5.i = ap[i__4].r * z__6.i + ap[
+ i__4].i * z__6.r;
+ z__1.r = z__2.r - z__5.r, z__1.i = z__2.i -
+ z__5.i;
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+/* L40: */
+ }
+ i__1 = j + (k - 1) * k / 2;
+ ap[i__1].r = wk.r, ap[i__1].i = wk.i;
+ i__1 = j + (k - 2) * (k - 1) / 2;
+ ap[i__1].r = wkm1.r, ap[i__1].i = wkm1.i;
+ i__1 = j + (j - 1) * j / 2;
+ i__2 = j + (j - 1) * j / 2;
+ d__1 = ap[i__2].r;
+ z__1.r = d__1, z__1.i = 0.;
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+/* L50: */
+ }
+
+ }
+
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k - 1] = -kp;
+ }
+
+/* Decrease K and return to the start of the main loop */
+
+ k -= kstep;
+ kc = knc - k;
+ goto L10;
+
+ } else {
+
+/* Factorize A as L*D*L' using the lower triangle of A */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2 */
+
+ k = 1;
+ kc = 1;
+ npp = *n * (*n + 1) / 2;
+L60:
+ knc = kc;
+
+/* If K > N, exit from loop */
+
+ if (k > *n) {
+ goto L110;
+ }
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ i__1 = kc;
+ absakk = (d__1 = ap[i__1].r, abs(d__1));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ imax = k + izamax_(&i__1, &ap[kc + 1], &c__1);
+ i__1 = kc + imax - k;
+ colmax = (d__1 = ap[i__1].r, abs(d__1)) + (d__2 = d_imag(&ap[kc +
+ imax - k]), abs(d__2));
+ } else {
+ colmax = 0.;
+ }
+
+ if (max(absakk,colmax) == 0.) {
+
+/* Column K is zero: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ i__1 = kc;
+ i__2 = kc;
+ d__1 = ap[i__2].r;
+ ap[i__1].r = d__1, ap[i__1].i = 0.;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ rowmax = 0.;
+ kx = kc + imax - k;
+ i__1 = imax - 1;
+ for (j = k; j <= i__1; ++j) {
+ i__2 = kx;
+ if ((d__1 = ap[i__2].r, abs(d__1)) + (d__2 = d_imag(&ap[
+ kx]), abs(d__2)) > rowmax) {
+ i__2 = kx;
+ rowmax = (d__1 = ap[i__2].r, abs(d__1)) + (d__2 =
+ d_imag(&ap[kx]), abs(d__2));
+ jmax = j;
+ }
+ kx = kx + *n - j;
+/* L70: */
+ }
+ kpc = npp - (*n - imax + 1) * (*n - imax + 2) / 2 + 1;
+ if (imax < *n) {
+ i__1 = *n - imax;
+ jmax = imax + izamax_(&i__1, &ap[kpc + 1], &c__1);
+/* Computing MAX */
+ i__1 = kpc + jmax - imax;
+ d__3 = rowmax, d__4 = (d__1 = ap[i__1].r, abs(d__1)) + (
+ d__2 = d_imag(&ap[kpc + jmax - imax]), abs(d__2));
+ rowmax = max(d__3,d__4);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else /* if(complicated condition) */ {
+ i__1 = kpc;
+ if ((d__1 = ap[i__1].r, abs(d__1)) >= alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+ } else {
+
+/* interchange rows and columns K+1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+ }
+
+ kk = k + kstep - 1;
+ if (kstep == 2) {
+ knc = knc + *n - k + 1;
+ }
+ if (kp != kk) {
+
+/* Interchange rows and columns KK and KP in the trailing */
+/* submatrix A(k:n,k:n) */
+
+ if (kp < *n) {
+ i__1 = *n - kp;
+ zswap_(&i__1, &ap[knc + kp - kk + 1], &c__1, &ap[kpc + 1],
+ &c__1);
+ }
+ kx = knc + kp - kk;
+ i__1 = kp - 1;
+ for (j = kk + 1; j <= i__1; ++j) {
+ kx = kx + *n - j + 1;
+ d_cnjg(&z__1, &ap[knc + j - kk]);
+ t.r = z__1.r, t.i = z__1.i;
+ i__2 = knc + j - kk;
+ d_cnjg(&z__1, &ap[kx]);
+ ap[i__2].r = z__1.r, ap[i__2].i = z__1.i;
+ i__2 = kx;
+ ap[i__2].r = t.r, ap[i__2].i = t.i;
+/* L80: */
+ }
+ i__1 = knc + kp - kk;
+ d_cnjg(&z__1, &ap[knc + kp - kk]);
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+ i__1 = knc;
+ r1 = ap[i__1].r;
+ i__1 = knc;
+ i__2 = kpc;
+ d__1 = ap[i__2].r;
+ ap[i__1].r = d__1, ap[i__1].i = 0.;
+ i__1 = kpc;
+ ap[i__1].r = r1, ap[i__1].i = 0.;
+ if (kstep == 2) {
+ i__1 = kc;
+ i__2 = kc;
+ d__1 = ap[i__2].r;
+ ap[i__1].r = d__1, ap[i__1].i = 0.;
+ i__1 = kc + 1;
+ t.r = ap[i__1].r, t.i = ap[i__1].i;
+ i__1 = kc + 1;
+ i__2 = kc + kp - k;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kc + kp - k;
+ ap[i__1].r = t.r, ap[i__1].i = t.i;
+ }
+ } else {
+ i__1 = kc;
+ i__2 = kc;
+ d__1 = ap[i__2].r;
+ ap[i__1].r = d__1, ap[i__1].i = 0.;
+ if (kstep == 2) {
+ i__1 = knc;
+ i__2 = knc;
+ d__1 = ap[i__2].r;
+ ap[i__1].r = d__1, ap[i__1].i = 0.;
+ }
+ }
+
+/* Update the trailing submatrix */
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k now holds */
+
+/* W(k) = L(k)*D(k) */
+
+/* where L(k) is the k-th column of L */
+
+ if (k < *n) {
+
+/* Perform a rank-1 update of A(k+1:n,k+1:n) as */
+
+/* A := A - L(k)*D(k)*L(k)' = A - W(k)*(1/D(k))*W(k)' */
+
+ i__1 = kc;
+ r1 = 1. / ap[i__1].r;
+ i__1 = *n - k;
+ d__1 = -r1;
+ zhpr_(uplo, &i__1, &d__1, &ap[kc + 1], &c__1, &ap[kc + *n
+ - k + 1]);
+
+/* Store L(k) in column K */
+
+ i__1 = *n - k;
+ zdscal_(&i__1, &r1, &ap[kc + 1], &c__1);
+ }
+ } else {
+
+/* 2-by-2 pivot block D(k): columns K and K+1 now hold */
+
+/* ( W(k) W(k+1) ) = ( L(k) L(k+1) )*D(k) */
+
+/* where L(k) and L(k+1) are the k-th and (k+1)-th columns */
+/* of L */
+
+ if (k < *n - 1) {
+
+/* Perform a rank-2 update of A(k+2:n,k+2:n) as */
+
+/* A := A - ( L(k) L(k+1) )*D(k)*( L(k) L(k+1) )' */
+/* = A - ( W(k) W(k+1) )*inv(D(k))*( W(k) W(k+1) )' */
+
+/* where L(k) and L(k+1) are the k-th and (k+1)-th */
+/* columns of L */
+
+ i__1 = k + 1 + (k - 1) * ((*n << 1) - k) / 2;
+ d__1 = ap[i__1].r;
+ d__2 = d_imag(&ap[k + 1 + (k - 1) * ((*n << 1) - k) / 2]);
+ d__ = dlapy2_(&d__1, &d__2);
+ i__1 = k + 1 + k * ((*n << 1) - k - 1) / 2;
+ d11 = ap[i__1].r / d__;
+ i__1 = k + (k - 1) * ((*n << 1) - k) / 2;
+ d22 = ap[i__1].r / d__;
+ tt = 1. / (d11 * d22 - 1.);
+ i__1 = k + 1 + (k - 1) * ((*n << 1) - k) / 2;
+ z__1.r = ap[i__1].r / d__, z__1.i = ap[i__1].i / d__;
+ d21.r = z__1.r, d21.i = z__1.i;
+ d__ = tt / d__;
+
+ i__1 = *n;
+ for (j = k + 2; j <= i__1; ++j) {
+ i__2 = j + (k - 1) * ((*n << 1) - k) / 2;
+ z__3.r = d11 * ap[i__2].r, z__3.i = d11 * ap[i__2].i;
+ i__3 = j + k * ((*n << 1) - k - 1) / 2;
+ z__4.r = d21.r * ap[i__3].r - d21.i * ap[i__3].i,
+ z__4.i = d21.r * ap[i__3].i + d21.i * ap[i__3]
+ .r;
+ z__2.r = z__3.r - z__4.r, z__2.i = z__3.i - z__4.i;
+ z__1.r = d__ * z__2.r, z__1.i = d__ * z__2.i;
+ wk.r = z__1.r, wk.i = z__1.i;
+ i__2 = j + k * ((*n << 1) - k - 1) / 2;
+ z__3.r = d22 * ap[i__2].r, z__3.i = d22 * ap[i__2].i;
+ d_cnjg(&z__5, &d21);
+ i__3 = j + (k - 1) * ((*n << 1) - k) / 2;
+ z__4.r = z__5.r * ap[i__3].r - z__5.i * ap[i__3].i,
+ z__4.i = z__5.r * ap[i__3].i + z__5.i * ap[
+ i__3].r;
+ z__2.r = z__3.r - z__4.r, z__2.i = z__3.i - z__4.i;
+ z__1.r = d__ * z__2.r, z__1.i = d__ * z__2.i;
+ wkp1.r = z__1.r, wkp1.i = z__1.i;
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + (j - 1) * ((*n << 1) - j) / 2;
+ i__4 = i__ + (j - 1) * ((*n << 1) - j) / 2;
+ i__5 = i__ + (k - 1) * ((*n << 1) - k) / 2;
+ d_cnjg(&z__4, &wk);
+ z__3.r = ap[i__5].r * z__4.r - ap[i__5].i *
+ z__4.i, z__3.i = ap[i__5].r * z__4.i + ap[
+ i__5].i * z__4.r;
+ z__2.r = ap[i__4].r - z__3.r, z__2.i = ap[i__4].i
+ - z__3.i;
+ i__6 = i__ + k * ((*n << 1) - k - 1) / 2;
+ d_cnjg(&z__6, &wkp1);
+ z__5.r = ap[i__6].r * z__6.r - ap[i__6].i *
+ z__6.i, z__5.i = ap[i__6].r * z__6.i + ap[
+ i__6].i * z__6.r;
+ z__1.r = z__2.r - z__5.r, z__1.i = z__2.i -
+ z__5.i;
+ ap[i__3].r = z__1.r, ap[i__3].i = z__1.i;
+/* L90: */
+ }
+ i__2 = j + (k - 1) * ((*n << 1) - k) / 2;
+ ap[i__2].r = wk.r, ap[i__2].i = wk.i;
+ i__2 = j + k * ((*n << 1) - k - 1) / 2;
+ ap[i__2].r = wkp1.r, ap[i__2].i = wkp1.i;
+ i__2 = j + (j - 1) * ((*n << 1) - j) / 2;
+ i__3 = j + (j - 1) * ((*n << 1) - j) / 2;
+ d__1 = ap[i__3].r;
+ z__1.r = d__1, z__1.i = 0.;
+ ap[i__2].r = z__1.r, ap[i__2].i = z__1.i;
+/* L100: */
+ }
+ }
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k + 1] = -kp;
+ }
+
+/* Increase K and return to the start of the main loop */
+
+ k += kstep;
+ kc = knc + *n - k + 2;
+ goto L60;
+
+ }
+
+L110:
+ return 0;
+
+/* End of ZHPTRF */
+
+} /* zhptrf_ */
diff --git a/contrib/libs/clapack/zhptri.c b/contrib/libs/clapack/zhptri.c
new file mode 100644
index 0000000000..f4e4938d9e
--- /dev/null
+++ b/contrib/libs/clapack/zhptri.c
@@ -0,0 +1,513 @@
+/* zhptri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b2 = {0.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zhptri_(char *uplo, integer *n, doublecomplex *ap,
+ integer *ipiv, doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublereal d__1;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ doublereal d__;
+ integer j, k;
+ doublereal t, ak;
+ integer kc, kp, kx, kpc, npp;
+ doublereal akp1;
+ doublecomplex temp, akkp1;
+ extern logical lsame_(char *, char *);
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ integer kstep;
+ logical upper;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zhpmv_(char *, integer *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, doublecomplex *, integer *), zswap_(
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *)
+ , xerbla_(char *, integer *);
+ integer kcnext;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHPTRI computes the inverse of a complex Hermitian indefinite matrix */
+/* A in packed storage using the factorization A = U*D*U**H or */
+/* A = L*D*L**H computed by ZHPTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**H; */
+/* = 'L': Lower triangular, form is A = L*D*L**H. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* On entry, the block diagonal matrix D and the multipliers */
+/* used to obtain the factor U or L as computed by ZHPTRF, */
+/* stored as a packed triangular matrix. */
+
+/* On exit, if INFO = 0, the (Hermitian) inverse of the original */
+/* matrix, stored as a packed triangular matrix. The j-th column */
+/* of inv(A) is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = inv(A)(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', */
+/* AP(i + (j-1)*(2n-j)/2) = inv(A)(i,j) for j<=i<=n. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by ZHPTRF. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) = 0; the matrix is singular and its */
+/* inverse could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --work;
+ --ipiv;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHPTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check that the diagonal matrix D is nonsingular. */
+
+ if (upper) {
+
+/* Upper triangular storage: examine D from bottom to top */
+
+ kp = *n * (*n + 1) / 2;
+ for (*info = *n; *info >= 1; --(*info)) {
+ i__1 = kp;
+ if (ipiv[*info] > 0 && (ap[i__1].r == 0. && ap[i__1].i == 0.)) {
+ return 0;
+ }
+ kp -= *info;
+/* L10: */
+ }
+ } else {
+
+/* Lower triangular storage: examine D from top to bottom. */
+
+ kp = 1;
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ i__2 = kp;
+ if (ipiv[*info] > 0 && (ap[i__2].r == 0. && ap[i__2].i == 0.)) {
+ return 0;
+ }
+ kp = kp + *n - *info + 1;
+/* L20: */
+ }
+ }
+ *info = 0;
+
+ if (upper) {
+
+/* Compute inv(A) from the factorization A = U*D*U'. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+ kc = 1;
+L30:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L50;
+ }
+
+ kcnext = kc + k;
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Invert the diagonal block. */
+
+ i__1 = kc + k - 1;
+ i__2 = kc + k - 1;
+ d__1 = 1. / ap[i__2].r;
+ ap[i__1].r = d__1, ap[i__1].i = 0.;
+
+/* Compute column K of the inverse. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ zcopy_(&i__1, &ap[kc], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zhpmv_(uplo, &i__1, &z__1, &ap[1], &work[1], &c__1, &c_b2, &
+ ap[kc], &c__1);
+ i__1 = kc + k - 1;
+ i__2 = kc + k - 1;
+ i__3 = k - 1;
+ zdotc_(&z__2, &i__3, &work[1], &c__1, &ap[kc], &c__1);
+ d__1 = z__2.r;
+ z__1.r = ap[i__2].r - d__1, z__1.i = ap[i__2].i;
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+ }
+ kstep = 1;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Invert the diagonal block. */
+
+ t = z_abs(&ap[kcnext + k - 1]);
+ i__1 = kc + k - 1;
+ ak = ap[i__1].r / t;
+ i__1 = kcnext + k;
+ akp1 = ap[i__1].r / t;
+ i__1 = kcnext + k - 1;
+ z__1.r = ap[i__1].r / t, z__1.i = ap[i__1].i / t;
+ akkp1.r = z__1.r, akkp1.i = z__1.i;
+ d__ = t * (ak * akp1 - 1.);
+ i__1 = kc + k - 1;
+ d__1 = akp1 / d__;
+ ap[i__1].r = d__1, ap[i__1].i = 0.;
+ i__1 = kcnext + k;
+ d__1 = ak / d__;
+ ap[i__1].r = d__1, ap[i__1].i = 0.;
+ i__1 = kcnext + k - 1;
+ z__2.r = -akkp1.r, z__2.i = -akkp1.i;
+ z__1.r = z__2.r / d__, z__1.i = z__2.i / d__;
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+
+/* Compute columns K and K+1 of the inverse. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ zcopy_(&i__1, &ap[kc], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zhpmv_(uplo, &i__1, &z__1, &ap[1], &work[1], &c__1, &c_b2, &
+ ap[kc], &c__1);
+ i__1 = kc + k - 1;
+ i__2 = kc + k - 1;
+ i__3 = k - 1;
+ zdotc_(&z__2, &i__3, &work[1], &c__1, &ap[kc], &c__1);
+ d__1 = z__2.r;
+ z__1.r = ap[i__2].r - d__1, z__1.i = ap[i__2].i;
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+ i__1 = kcnext + k - 1;
+ i__2 = kcnext + k - 1;
+ i__3 = k - 1;
+ zdotc_(&z__2, &i__3, &ap[kc], &c__1, &ap[kcnext], &c__1);
+ z__1.r = ap[i__2].r - z__2.r, z__1.i = ap[i__2].i - z__2.i;
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+ i__1 = k - 1;
+ zcopy_(&i__1, &ap[kcnext], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zhpmv_(uplo, &i__1, &z__1, &ap[1], &work[1], &c__1, &c_b2, &
+ ap[kcnext], &c__1);
+ i__1 = kcnext + k;
+ i__2 = kcnext + k;
+ i__3 = k - 1;
+ zdotc_(&z__2, &i__3, &work[1], &c__1, &ap[kcnext], &c__1);
+ d__1 = z__2.r;
+ z__1.r = ap[i__2].r - d__1, z__1.i = ap[i__2].i;
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+ }
+ kstep = 2;
+ kcnext = kcnext + k + 1;
+ }
+
+ kp = (i__1 = ipiv[k], abs(i__1));
+ if (kp != k) {
+
+/* Interchange rows and columns K and KP in the leading */
+/* submatrix A(1:k+1,1:k+1) */
+
+ kpc = (kp - 1) * kp / 2 + 1;
+ i__1 = kp - 1;
+ zswap_(&i__1, &ap[kc], &c__1, &ap[kpc], &c__1);
+ kx = kpc + kp - 1;
+ i__1 = k - 1;
+ for (j = kp + 1; j <= i__1; ++j) {
+ kx = kx + j - 1;
+ d_cnjg(&z__1, &ap[kc + j - 1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = kc + j - 1;
+ d_cnjg(&z__1, &ap[kx]);
+ ap[i__2].r = z__1.r, ap[i__2].i = z__1.i;
+ i__2 = kx;
+ ap[i__2].r = temp.r, ap[i__2].i = temp.i;
+/* L40: */
+ }
+ i__1 = kc + kp - 1;
+ d_cnjg(&z__1, &ap[kc + kp - 1]);
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+ i__1 = kc + k - 1;
+ temp.r = ap[i__1].r, temp.i = ap[i__1].i;
+ i__1 = kc + k - 1;
+ i__2 = kpc + kp - 1;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kpc + kp - 1;
+ ap[i__1].r = temp.r, ap[i__1].i = temp.i;
+ if (kstep == 2) {
+ i__1 = kc + k + k - 1;
+ temp.r = ap[i__1].r, temp.i = ap[i__1].i;
+ i__1 = kc + k + k - 1;
+ i__2 = kc + k + kp - 1;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kc + k + kp - 1;
+ ap[i__1].r = temp.r, ap[i__1].i = temp.i;
+ }
+ }
+
+ k += kstep;
+ kc = kcnext;
+ goto L30;
+L50:
+
+ ;
+ } else {
+
+/* Compute inv(A) from the factorization A = L*D*L'. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ npp = *n * (*n + 1) / 2;
+ k = *n;
+ kc = npp;
+L60:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L80;
+ }
+
+ kcnext = kc - (*n - k + 2);
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Invert the diagonal block. */
+
+ i__1 = kc;
+ i__2 = kc;
+ d__1 = 1. / ap[i__2].r;
+ ap[i__1].r = d__1, ap[i__1].i = 0.;
+
+/* Compute column K of the inverse. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ zcopy_(&i__1, &ap[kc + 1], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zhpmv_(uplo, &i__1, &z__1, &ap[kc + *n - k + 1], &work[1], &
+ c__1, &c_b2, &ap[kc + 1], &c__1);
+ i__1 = kc;
+ i__2 = kc;
+ i__3 = *n - k;
+ zdotc_(&z__2, &i__3, &work[1], &c__1, &ap[kc + 1], &c__1);
+ d__1 = z__2.r;
+ z__1.r = ap[i__2].r - d__1, z__1.i = ap[i__2].i;
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+ }
+ kstep = 1;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Invert the diagonal block. */
+
+ t = z_abs(&ap[kcnext + 1]);
+ i__1 = kcnext;
+ ak = ap[i__1].r / t;
+ i__1 = kc;
+ akp1 = ap[i__1].r / t;
+ i__1 = kcnext + 1;
+ z__1.r = ap[i__1].r / t, z__1.i = ap[i__1].i / t;
+ akkp1.r = z__1.r, akkp1.i = z__1.i;
+ d__ = t * (ak * akp1 - 1.);
+ i__1 = kcnext;
+ d__1 = akp1 / d__;
+ ap[i__1].r = d__1, ap[i__1].i = 0.;
+ i__1 = kc;
+ d__1 = ak / d__;
+ ap[i__1].r = d__1, ap[i__1].i = 0.;
+ i__1 = kcnext + 1;
+ z__2.r = -akkp1.r, z__2.i = -akkp1.i;
+ z__1.r = z__2.r / d__, z__1.i = z__2.i / d__;
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+
+/* Compute columns K-1 and K of the inverse. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ zcopy_(&i__1, &ap[kc + 1], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zhpmv_(uplo, &i__1, &z__1, &ap[kc + (*n - k + 1)], &work[1], &
+ c__1, &c_b2, &ap[kc + 1], &c__1);
+ i__1 = kc;
+ i__2 = kc;
+ i__3 = *n - k;
+ zdotc_(&z__2, &i__3, &work[1], &c__1, &ap[kc + 1], &c__1);
+ d__1 = z__2.r;
+ z__1.r = ap[i__2].r - d__1, z__1.i = ap[i__2].i;
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+ i__1 = kcnext + 1;
+ i__2 = kcnext + 1;
+ i__3 = *n - k;
+ zdotc_(&z__2, &i__3, &ap[kc + 1], &c__1, &ap[kcnext + 2], &
+ c__1);
+ z__1.r = ap[i__2].r - z__2.r, z__1.i = ap[i__2].i - z__2.i;
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+ i__1 = *n - k;
+ zcopy_(&i__1, &ap[kcnext + 2], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zhpmv_(uplo, &i__1, &z__1, &ap[kc + (*n - k + 1)], &work[1], &
+ c__1, &c_b2, &ap[kcnext + 2], &c__1);
+ i__1 = kcnext;
+ i__2 = kcnext;
+ i__3 = *n - k;
+ zdotc_(&z__2, &i__3, &work[1], &c__1, &ap[kcnext + 2], &c__1);
+ d__1 = z__2.r;
+ z__1.r = ap[i__2].r - d__1, z__1.i = ap[i__2].i;
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+ }
+ kstep = 2;
+ kcnext -= *n - k + 3;
+ }
+
+ kp = (i__1 = ipiv[k], abs(i__1));
+ if (kp != k) {
+
+/* Interchange rows and columns K and KP in the trailing */
+/* submatrix A(k-1:n,k-1:n) */
+
+ kpc = npp - (*n - kp + 1) * (*n - kp + 2) / 2 + 1;
+ if (kp < *n) {
+ i__1 = *n - kp;
+ zswap_(&i__1, &ap[kc + kp - k + 1], &c__1, &ap[kpc + 1], &
+ c__1);
+ }
+ kx = kc + kp - k;
+ i__1 = kp - 1;
+ for (j = k + 1; j <= i__1; ++j) {
+ kx = kx + *n - j + 1;
+ d_cnjg(&z__1, &ap[kc + j - k]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = kc + j - k;
+ d_cnjg(&z__1, &ap[kx]);
+ ap[i__2].r = z__1.r, ap[i__2].i = z__1.i;
+ i__2 = kx;
+ ap[i__2].r = temp.r, ap[i__2].i = temp.i;
+/* L70: */
+ }
+ i__1 = kc + kp - k;
+ d_cnjg(&z__1, &ap[kc + kp - k]);
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+ i__1 = kc;
+ temp.r = ap[i__1].r, temp.i = ap[i__1].i;
+ i__1 = kc;
+ i__2 = kpc;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kpc;
+ ap[i__1].r = temp.r, ap[i__1].i = temp.i;
+ if (kstep == 2) {
+ i__1 = kc - *n + k - 1;
+ temp.r = ap[i__1].r, temp.i = ap[i__1].i;
+ i__1 = kc - *n + k - 1;
+ i__2 = kc - *n + kp - 1;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kc - *n + kp - 1;
+ ap[i__1].r = temp.r, ap[i__1].i = temp.i;
+ }
+ }
+
+ k -= kstep;
+ kc = kcnext;
+ goto L60;
+L80:
+ ;
+ }
+
+ return 0;
+
+/* End of ZHPTRI */
+
+} /* zhptri_ */
diff --git a/contrib/libs/clapack/zhptrs.c b/contrib/libs/clapack/zhptrs.c
new file mode 100644
index 0000000000..0c35c7900d
--- /dev/null
+++ b/contrib/libs/clapack/zhptrs.c
@@ -0,0 +1,532 @@
+/* zhptrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zhptrs_(char *uplo, integer *n, integer *nrhs,
+ doublecomplex *ap, integer *ipiv, doublecomplex *b, integer *ldb,
+ integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *), d_cnjg(
+ doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer j, k;
+ doublereal s;
+ doublecomplex ak, bk;
+ integer kc, kp;
+ doublecomplex akm1, bkm1, akm1k;
+ extern logical lsame_(char *, char *);
+ doublecomplex denom;
+ extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int zgeru_(integer *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zswap_(integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *), xerbla_(char *, integer *), zdscal_(integer *, doublereal *, doublecomplex *,
+ integer *), zlacgv_(integer *, doublecomplex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHPTRS solves a system of linear equations A*X = B with a complex */
+/* Hermitian matrix A stored in packed format using the factorization */
+/* A = U*D*U**H or A = L*D*L**H computed by ZHPTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**H; */
+/* = 'L': Lower triangular, form is A = L*D*L**H. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AP (input) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* The block diagonal matrix D and the multipliers used to */
+/* obtain the factor U or L as computed by ZHPTRF, stored as a */
+/* packed triangular matrix. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by ZHPTRF. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --ap;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHPTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Solve A*X = B, where A = U*D*U'. */
+
+/* First solve U*D*X = B, overwriting B with X. */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+ kc = *n * (*n + 1) / 2 + 1;
+L10:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L30;
+ }
+
+ kc -= k;
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ zswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(U(K)), where U(K) is the transformation */
+/* stored in column K of A. */
+
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__1, nrhs, &z__1, &ap[kc], &c__1, &b[k + b_dim1], ldb, &
+ b[b_dim1 + 1], ldb);
+
+/* Multiply by the inverse of the diagonal block. */
+
+ i__1 = kc + k - 1;
+ s = 1. / ap[i__1].r;
+ zdscal_(nrhs, &s, &b[k + b_dim1], ldb);
+ --k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Interchange rows K-1 and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k - 1) {
+ zswap_(nrhs, &b[k - 1 + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(U(K)), where U(K) is the transformation */
+/* stored in columns K-1 and K of A. */
+
+ i__1 = k - 2;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__1, nrhs, &z__1, &ap[kc], &c__1, &b[k + b_dim1], ldb, &
+ b[b_dim1 + 1], ldb);
+ i__1 = k - 2;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__1, nrhs, &z__1, &ap[kc - (k - 1)], &c__1, &b[k - 1 +
+ b_dim1], ldb, &b[b_dim1 + 1], ldb);
+
+/* Multiply by the inverse of the diagonal block. */
+
+ i__1 = kc + k - 2;
+ akm1k.r = ap[i__1].r, akm1k.i = ap[i__1].i;
+ z_div(&z__1, &ap[kc - 1], &akm1k);
+ akm1.r = z__1.r, akm1.i = z__1.i;
+ d_cnjg(&z__2, &akm1k);
+ z_div(&z__1, &ap[kc + k - 1], &z__2);
+ ak.r = z__1.r, ak.i = z__1.i;
+ z__2.r = akm1.r * ak.r - akm1.i * ak.i, z__2.i = akm1.r * ak.i +
+ akm1.i * ak.r;
+ z__1.r = z__2.r - 1., z__1.i = z__2.i - 0.;
+ denom.r = z__1.r, denom.i = z__1.i;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ z_div(&z__1, &b[k - 1 + j * b_dim1], &akm1k);
+ bkm1.r = z__1.r, bkm1.i = z__1.i;
+ d_cnjg(&z__2, &akm1k);
+ z_div(&z__1, &b[k + j * b_dim1], &z__2);
+ bk.r = z__1.r, bk.i = z__1.i;
+ i__2 = k - 1 + j * b_dim1;
+ z__3.r = ak.r * bkm1.r - ak.i * bkm1.i, z__3.i = ak.r *
+ bkm1.i + ak.i * bkm1.r;
+ z__2.r = z__3.r - bk.r, z__2.i = z__3.i - bk.i;
+ z_div(&z__1, &z__2, &denom);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ i__2 = k + j * b_dim1;
+ z__3.r = akm1.r * bk.r - akm1.i * bk.i, z__3.i = akm1.r *
+ bk.i + akm1.i * bk.r;
+ z__2.r = z__3.r - bkm1.r, z__2.i = z__3.i - bkm1.i;
+ z_div(&z__1, &z__2, &denom);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L20: */
+ }
+ kc = kc - k + 1;
+ k += -2;
+ }
+
+ goto L10;
+L30:
+
+/* Next solve U'*X = B, overwriting B with X. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+ kc = 1;
+L40:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L50;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Multiply by inv(U'(K)), where U(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k > 1) {
+ zlacgv_(nrhs, &b[k + b_dim1], ldb);
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Conjugate transpose", &i__1, nrhs, &z__1, &b[b_offset]
+, ldb, &ap[kc], &c__1, &c_b1, &b[k + b_dim1], ldb);
+ zlacgv_(nrhs, &b[k + b_dim1], ldb);
+ }
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ zswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ kc += k;
+ ++k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Multiply by inv(U'(K+1)), where U(K+1) is the transformation */
+/* stored in columns K and K+1 of A. */
+
+ if (k > 1) {
+ zlacgv_(nrhs, &b[k + b_dim1], ldb);
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Conjugate transpose", &i__1, nrhs, &z__1, &b[b_offset]
+, ldb, &ap[kc], &c__1, &c_b1, &b[k + b_dim1], ldb);
+ zlacgv_(nrhs, &b[k + b_dim1], ldb);
+
+ zlacgv_(nrhs, &b[k + 1 + b_dim1], ldb);
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Conjugate transpose", &i__1, nrhs, &z__1, &b[b_offset]
+, ldb, &ap[kc + k], &c__1, &c_b1, &b[k + 1 + b_dim1],
+ ldb);
+ zlacgv_(nrhs, &b[k + 1 + b_dim1], ldb);
+ }
+
+/* Interchange rows K and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k) {
+ zswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ kc = kc + (k << 1) + 1;
+ k += 2;
+ }
+
+ goto L40;
+L50:
+
+ ;
+ } else {
+
+/* Solve A*X = B, where A = L*D*L'. */
+
+/* First solve L*D*X = B, overwriting B with X. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+ kc = 1;
+L60:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L80;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ zswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(L(K)), where L(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__1, nrhs, &z__1, &ap[kc + 1], &c__1, &b[k + b_dim1],
+ ldb, &b[k + 1 + b_dim1], ldb);
+ }
+
+/* Multiply by the inverse of the diagonal block. */
+
+ i__1 = kc;
+ s = 1. / ap[i__1].r;
+ zdscal_(nrhs, &s, &b[k + b_dim1], ldb);
+ kc = kc + *n - k + 1;
+ ++k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Interchange rows K+1 and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k + 1) {
+ zswap_(nrhs, &b[k + 1 + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(L(K)), where L(K) is the transformation */
+/* stored in columns K and K+1 of A. */
+
+ if (k < *n - 1) {
+ i__1 = *n - k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__1, nrhs, &z__1, &ap[kc + 2], &c__1, &b[k + b_dim1],
+ ldb, &b[k + 2 + b_dim1], ldb);
+ i__1 = *n - k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__1, nrhs, &z__1, &ap[kc + *n - k + 2], &c__1, &b[k
+ + 1 + b_dim1], ldb, &b[k + 2 + b_dim1], ldb);
+ }
+
+/* Multiply by the inverse of the diagonal block. */
+
+ i__1 = kc + 1;
+ akm1k.r = ap[i__1].r, akm1k.i = ap[i__1].i;
+ d_cnjg(&z__2, &akm1k);
+ z_div(&z__1, &ap[kc], &z__2);
+ akm1.r = z__1.r, akm1.i = z__1.i;
+ z_div(&z__1, &ap[kc + *n - k + 1], &akm1k);
+ ak.r = z__1.r, ak.i = z__1.i;
+ z__2.r = akm1.r * ak.r - akm1.i * ak.i, z__2.i = akm1.r * ak.i +
+ akm1.i * ak.r;
+ z__1.r = z__2.r - 1., z__1.i = z__2.i - 0.;
+ denom.r = z__1.r, denom.i = z__1.i;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ d_cnjg(&z__2, &akm1k);
+ z_div(&z__1, &b[k + j * b_dim1], &z__2);
+ bkm1.r = z__1.r, bkm1.i = z__1.i;
+ z_div(&z__1, &b[k + 1 + j * b_dim1], &akm1k);
+ bk.r = z__1.r, bk.i = z__1.i;
+ i__2 = k + j * b_dim1;
+ z__3.r = ak.r * bkm1.r - ak.i * bkm1.i, z__3.i = ak.r *
+ bkm1.i + ak.i * bkm1.r;
+ z__2.r = z__3.r - bk.r, z__2.i = z__3.i - bk.i;
+ z_div(&z__1, &z__2, &denom);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ i__2 = k + 1 + j * b_dim1;
+ z__3.r = akm1.r * bk.r - akm1.i * bk.i, z__3.i = akm1.r *
+ bk.i + akm1.i * bk.r;
+ z__2.r = z__3.r - bkm1.r, z__2.i = z__3.i - bkm1.i;
+ z_div(&z__1, &z__2, &denom);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L70: */
+ }
+ kc = kc + (*n - k << 1) + 1;
+ k += 2;
+ }
+
+ goto L60;
+L80:
+
+/* Next solve L'*X = B, overwriting B with X. */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+ kc = *n * (*n + 1) / 2 + 1;
+L90:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L100;
+ }
+
+ kc -= *n - k + 1;
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Multiply by inv(L'(K)), where L(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k < *n) {
+ zlacgv_(nrhs, &b[k + b_dim1], ldb);
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Conjugate transpose", &i__1, nrhs, &z__1, &b[k + 1 +
+ b_dim1], ldb, &ap[kc + 1], &c__1, &c_b1, &b[k +
+ b_dim1], ldb);
+ zlacgv_(nrhs, &b[k + b_dim1], ldb);
+ }
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ zswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ --k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Multiply by inv(L'(K-1)), where L(K-1) is the transformation */
+/* stored in columns K-1 and K of A. */
+
+ if (k < *n) {
+ zlacgv_(nrhs, &b[k + b_dim1], ldb);
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Conjugate transpose", &i__1, nrhs, &z__1, &b[k + 1 +
+ b_dim1], ldb, &ap[kc + 1], &c__1, &c_b1, &b[k +
+ b_dim1], ldb);
+ zlacgv_(nrhs, &b[k + b_dim1], ldb);
+
+ zlacgv_(nrhs, &b[k - 1 + b_dim1], ldb);
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Conjugate transpose", &i__1, nrhs, &z__1, &b[k + 1 +
+ b_dim1], ldb, &ap[kc - (*n - k)], &c__1, &c_b1, &b[k
+ - 1 + b_dim1], ldb);
+ zlacgv_(nrhs, &b[k - 1 + b_dim1], ldb);
+ }
+
+/* Interchange rows K and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k) {
+ zswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ kc -= *n - k + 2;
+ k += -2;
+ }
+
+ goto L90;
+L100:
+ ;
+ }
+
+ return 0;
+
+/* End of ZHPTRS */
+
+} /* zhptrs_ */
diff --git a/contrib/libs/clapack/zhsein.c b/contrib/libs/clapack/zhsein.c
new file mode 100644
index 0000000000..0e27b9bea9
--- /dev/null
+++ b/contrib/libs/clapack/zhsein.c
@@ -0,0 +1,433 @@
+/* zhsein.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static logical c_false = FALSE_;
+static logical c_true = TRUE_;
+
+/* Subroutine */ int zhsein_(char *side, char *eigsrc, char *initv, logical *
+ select, integer *n, doublecomplex *h__, integer *ldh, doublecomplex *
+ w, doublecomplex *vl, integer *ldvl, doublecomplex *vr, integer *ldvr,
+ integer *mm, integer *m, doublecomplex *work, doublereal *rwork,
+ integer *ifaill, integer *ifailr, integer *info)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2, i__3;
+ doublereal d__1, d__2;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__, k, kl, kr, ks;
+ doublecomplex wk;
+ integer kln;
+ doublereal ulp, eps3, unfl;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical leftv, bothv;
+ doublereal hnorm;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), zlaein_(
+ logical *, logical *, integer *, doublecomplex *, integer *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *);
+ extern doublereal zlanhs_(char *, integer *, doublecomplex *, integer *,
+ doublereal *);
+ logical noinit;
+ integer ldwork;
+ logical rightv, fromqr;
+ doublereal smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZHSEIN uses inverse iteration to find specified right and/or left */
+/* eigenvectors of a complex upper Hessenberg matrix H. */
+
+/* The right eigenvector x and the left eigenvector y of the matrix H */
+/* corresponding to an eigenvalue w are defined by: */
+
+/* H * x = w * x, y**h * H = w * y**h */
+
+/* where y**h denotes the conjugate transpose of the vector y. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'R': compute right eigenvectors only; */
+/* = 'L': compute left eigenvectors only; */
+/* = 'B': compute both right and left eigenvectors. */
+
+/* EIGSRC (input) CHARACTER*1 */
+/* Specifies the source of eigenvalues supplied in W: */
+/* = 'Q': the eigenvalues were found using ZHSEQR; thus, if */
+/* H has zero subdiagonal elements, and so is */
+/* block-triangular, then the j-th eigenvalue can be */
+/* assumed to be an eigenvalue of the block containing */
+/* the j-th row/column. This property allows ZHSEIN to */
+/* perform inverse iteration on just one diagonal block. */
+/* = 'N': no assumptions are made on the correspondence */
+/* between eigenvalues and diagonal blocks. In this */
+/* case, ZHSEIN must always perform inverse iteration */
+/* using the whole matrix H. */
+
+/* INITV (input) CHARACTER*1 */
+/* = 'N': no initial vectors are supplied; */
+/* = 'U': user-supplied initial vectors are stored in the arrays */
+/* VL and/or VR. */
+
+/* SELECT (input) LOGICAL array, dimension (N) */
+/* Specifies the eigenvectors to be computed. To select the */
+/* eigenvector corresponding to the eigenvalue W(j), */
+/* SELECT(j) must be set to .TRUE.. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H. N >= 0. */
+
+/* H (input) COMPLEX*16 array, dimension (LDH,N) */
+/* The upper Hessenberg matrix H. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH >= max(1,N). */
+
+/* W (input/output) COMPLEX*16 array, dimension (N) */
+/* On entry, the eigenvalues of H. */
+/* On exit, the real parts of W may have been altered since */
+/* close eigenvalues are perturbed slightly in searching for */
+/* independent eigenvectors. */
+
+/* VL (input/output) COMPLEX*16 array, dimension (LDVL,MM) */
+/* On entry, if INITV = 'U' and SIDE = 'L' or 'B', VL must */
+/* contain starting vectors for the inverse iteration for the */
+/* left eigenvectors; the starting vector for each eigenvector */
+/* must be in the same column in which the eigenvector will be */
+/* stored. */
+/* On exit, if SIDE = 'L' or 'B', the left eigenvectors */
+/* specified by SELECT will be stored consecutively in the */
+/* columns of VL, in the same order as their eigenvalues. */
+/* If SIDE = 'R', VL is not referenced. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the array VL. */
+/* LDVL >= max(1,N) if SIDE = 'L' or 'B'; LDVL >= 1 otherwise. */
+
+/* VR (input/output) COMPLEX*16 array, dimension (LDVR,MM) */
+/* On entry, if INITV = 'U' and SIDE = 'R' or 'B', VR must */
+/* contain starting vectors for the inverse iteration for the */
+/* right eigenvectors; the starting vector for each eigenvector */
+/* must be in the same column in which the eigenvector will be */
+/* stored. */
+/* On exit, if SIDE = 'R' or 'B', the right eigenvectors */
+/* specified by SELECT will be stored consecutively in the */
+/* columns of VR, in the same order as their eigenvalues. */
+/* If SIDE = 'L', VR is not referenced. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. */
+/* LDVR >= max(1,N) if SIDE = 'R' or 'B'; LDVR >= 1 otherwise. */
+
+/* MM (input) INTEGER */
+/* The number of columns in the arrays VL and/or VR. MM >= M. */
+
+/* M (output) INTEGER */
+/* The number of columns in the arrays VL and/or VR required to */
+/* store the eigenvectors (= the number of .TRUE. elements in */
+/* SELECT). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (N*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* IFAILL (output) INTEGER array, dimension (MM) */
+/* If SIDE = 'L' or 'B', IFAILL(i) = j > 0 if the left */
+/* eigenvector in the i-th column of VL (corresponding to the */
+/* eigenvalue w(j)) failed to converge; IFAILL(i) = 0 if the */
+/* eigenvector converged satisfactorily. */
+/* If SIDE = 'R', IFAILL is not referenced. */
+
+/* IFAILR (output) INTEGER array, dimension (MM) */
+/* If SIDE = 'R' or 'B', IFAILR(i) = j > 0 if the right */
+/* eigenvector in the i-th column of VR (corresponding to the */
+/* eigenvalue w(j)) failed to converge; IFAILR(i) = 0 if the */
+/* eigenvector converged satisfactorily. */
+/* If SIDE = 'L', IFAILR is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, i is the number of eigenvectors which */
+/* failed to converge; see IFAILL and IFAILR for further */
+/* details. */
+
+/* Further Details */
+/* =============== */
+
+/* Each eigenvector is normalized so that the element of largest */
+/* magnitude has magnitude 1; here the magnitude of a complex number */
+/* (x,y) is taken to be |x|+|y|. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters. */
+
+ /* Parameter adjustments */
+ --select;
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --w;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+ --rwork;
+ --ifaill;
+ --ifailr;
+
+ /* Function Body */
+ bothv = lsame_(side, "B");
+ rightv = lsame_(side, "R") || bothv;
+ leftv = lsame_(side, "L") || bothv;
+
+ fromqr = lsame_(eigsrc, "Q");
+
+ noinit = lsame_(initv, "N");
+
+/* Set M to the number of columns required to store the selected */
+/* eigenvectors. */
+
+ *m = 0;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (select[k]) {
+ ++(*m);
+ }
+/* L10: */
+ }
+
+ *info = 0;
+ if (! rightv && ! leftv) {
+ *info = -1;
+ } else if (! fromqr && ! lsame_(eigsrc, "N")) {
+ *info = -2;
+ } else if (! noinit && ! lsame_(initv, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*ldh < max(1,*n)) {
+ *info = -7;
+ } else if (*ldvl < 1 || leftv && *ldvl < *n) {
+ *info = -10;
+ } else if (*ldvr < 1 || rightv && *ldvr < *n) {
+ *info = -12;
+ } else if (*mm < *m) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZHSEIN", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Set machine-dependent constants. */
+
+ unfl = dlamch_("Safe minimum");
+ ulp = dlamch_("Precision");
+ smlnum = unfl * (*n / ulp);
+
+ ldwork = *n;
+
+ kl = 1;
+ kln = 0;
+ if (fromqr) {
+ kr = 0;
+ } else {
+ kr = *n;
+ }
+ ks = 1;
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (select[k]) {
+
+/* Compute eigenvector(s) corresponding to W(K). */
+
+ if (fromqr) {
+
+/* If affiliation of eigenvalues is known, check whether */
+/* the matrix splits. */
+
+/* Determine KL and KR such that 1 <= KL <= K <= KR <= N */
+/* and H(KL,KL-1) and H(KR+1,KR) are zero (or KL = 1 or */
+/* KR = N). */
+
+/* Then inverse iteration can be performed with the */
+/* submatrix H(KL:N,KL:N) for a left eigenvector, and with */
+/* the submatrix H(1:KR,1:KR) for a right eigenvector. */
+
+ i__2 = kl + 1;
+ for (i__ = k; i__ >= i__2; --i__) {
+ i__3 = i__ + (i__ - 1) * h_dim1;
+ if (h__[i__3].r == 0. && h__[i__3].i == 0.) {
+ goto L30;
+ }
+/* L20: */
+ }
+L30:
+ kl = i__;
+ if (k > kr) {
+ i__2 = *n - 1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ i__3 = i__ + 1 + i__ * h_dim1;
+ if (h__[i__3].r == 0. && h__[i__3].i == 0.) {
+ goto L50;
+ }
+/* L40: */
+ }
+L50:
+ kr = i__;
+ }
+ }
+
+ if (kl != kln) {
+ kln = kl;
+
+/* Compute infinity-norm of submatrix H(KL:KR,KL:KR) if it */
+/* has not ben computed before. */
+
+ i__2 = kr - kl + 1;
+ hnorm = zlanhs_("I", &i__2, &h__[kl + kl * h_dim1], ldh, &
+ rwork[1]);
+ if (hnorm > 0.) {
+ eps3 = hnorm * ulp;
+ } else {
+ eps3 = smlnum;
+ }
+ }
+
+/* Perturb eigenvalue if it is close to any previous */
+/* selected eigenvalues affiliated to the submatrix */
+/* H(KL:KR,KL:KR). Close roots are modified by EPS3. */
+
+ i__2 = k;
+ wk.r = w[i__2].r, wk.i = w[i__2].i;
+L60:
+ i__2 = kl;
+ for (i__ = k - 1; i__ >= i__2; --i__) {
+ i__3 = i__;
+ z__2.r = w[i__3].r - wk.r, z__2.i = w[i__3].i - wk.i;
+ z__1.r = z__2.r, z__1.i = z__2.i;
+ if (select[i__] && (d__1 = z__1.r, abs(d__1)) + (d__2 =
+ d_imag(&z__1), abs(d__2)) < eps3) {
+ z__1.r = wk.r + eps3, z__1.i = wk.i;
+ wk.r = z__1.r, wk.i = z__1.i;
+ goto L60;
+ }
+/* L70: */
+ }
+ i__2 = k;
+ w[i__2].r = wk.r, w[i__2].i = wk.i;
+
+ if (leftv) {
+
+/* Compute left eigenvector. */
+
+ i__2 = *n - kl + 1;
+ zlaein_(&c_false, &noinit, &i__2, &h__[kl + kl * h_dim1], ldh,
+ &wk, &vl[kl + ks * vl_dim1], &work[1], &ldwork, &
+ rwork[1], &eps3, &smlnum, &iinfo);
+ if (iinfo > 0) {
+ ++(*info);
+ ifaill[ks] = k;
+ } else {
+ ifaill[ks] = 0;
+ }
+ i__2 = kl - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + ks * vl_dim1;
+ vl[i__3].r = 0., vl[i__3].i = 0.;
+/* L80: */
+ }
+ }
+ if (rightv) {
+
+/* Compute right eigenvector. */
+
+ zlaein_(&c_true, &noinit, &kr, &h__[h_offset], ldh, &wk, &vr[
+ ks * vr_dim1 + 1], &work[1], &ldwork, &rwork[1], &
+ eps3, &smlnum, &iinfo);
+ if (iinfo > 0) {
+ ++(*info);
+ ifailr[ks] = k;
+ } else {
+ ifailr[ks] = 0;
+ }
+ i__2 = *n;
+ for (i__ = kr + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + ks * vr_dim1;
+ vr[i__3].r = 0., vr[i__3].i = 0.;
+/* L90: */
+ }
+ }
+ ++ks;
+ }
+/* L100: */
+ }
+
+ return 0;
+
+/* End of ZHSEIN */
+
+} /* zhsein_ */
diff --git a/contrib/libs/clapack/zhseqr.c b/contrib/libs/clapack/zhseqr.c
new file mode 100644
index 0000000000..dc69792a99
--- /dev/null
+++ b/contrib/libs/clapack/zhseqr.c
@@ -0,0 +1,483 @@
+/* zhseqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+static integer c__12 = 12;
+static integer c__2 = 2;
+static integer c__49 = 49;
+
+/* Subroutine */ int zhseqr_(char *job, char *compz, integer *n, integer *ilo,
+ integer *ihi, doublecomplex *h__, integer *ldh, doublecomplex *w,
+ doublecomplex *z__, integer *ldz, doublecomplex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3[2];
+ doublereal d__1, d__2, d__3;
+ doublecomplex z__1;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ doublecomplex hl[2401] /* was [49][49] */;
+ integer kbot, nmin;
+ extern logical lsame_(char *, char *);
+ logical initz;
+ doublecomplex workl[49];
+ logical wantt, wantz;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zlaqr0_(logical *, logical *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, integer *), xerbla_(char *, integer *
+);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zlahqr_(logical *, logical *, integer *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *, integer *, doublecomplex *, integer *, integer *),
+ zlacpy_(char *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zlaset_(char *, integer *,
+ integer *, doublecomplex *, doublecomplex *, doublecomplex *,
+ integer *);
+ logical lquery;
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+/* Purpose */
+/* ======= */
+
+/* ZHSEQR computes the eigenvalues of a Hessenberg matrix H */
+/* and, optionally, the matrices T and Z from the Schur decomposition */
+/* H = Z T Z**H, where T is an upper triangular matrix (the */
+/* Schur form), and Z is the unitary matrix of Schur vectors. */
+
+/* Optionally Z may be postmultiplied into an input unitary */
+/* matrix Q so that this routine can give the Schur factorization */
+/* of a matrix A which has been reduced to the Hessenberg form H */
+/* by the unitary matrix Q: A = Q*H*Q**H = (QZ)*H*(QZ)**H. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* = 'E': compute eigenvalues only; */
+/* = 'S': compute eigenvalues and the Schur form T. */
+
+/* COMPZ (input) CHARACTER*1 */
+/* = 'N': no Schur vectors are computed; */
+/* = 'I': Z is initialized to the unit matrix and the matrix Z */
+/* of Schur vectors of H is returned; */
+/* = 'V': Z must contain an unitary matrix Q on entry, and */
+/* the product Q*Z is returned. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H. N .GE. 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* It is assumed that H is already upper triangular in rows */
+/* and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally */
+/* set by a previous call to ZGEBAL, and then passed to ZGEHRD */
+/* when the matrix output by ZGEBAL is reduced to Hessenberg */
+/* form. Otherwise ILO and IHI should be set to 1 and N */
+/* respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. */
+/* If N = 0, then ILO = 1 and IHI = 0. */
+
+/* H (input/output) COMPLEX*16 array, dimension (LDH,N) */
+/* On entry, the upper Hessenberg matrix H. */
+/* On exit, if INFO = 0 and JOB = 'S', H contains the upper */
+/* triangular matrix T from the Schur decomposition (the */
+/* Schur form). If INFO = 0 and JOB = 'E', the contents of */
+/* H are unspecified on exit. (The output value of H when */
+/* INFO.GT.0 is given under the description of INFO below.) */
+
+/* Unlike earlier versions of ZHSEQR, this subroutine may */
+/* explicitly H(i,j) = 0 for i.GT.j and j = 1, 2, ... ILO-1 */
+/* or j = IHI+1, IHI+2, ... N. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH .GE. max(1,N). */
+
+/* W (output) COMPLEX*16 array, dimension (N) */
+/* The computed eigenvalues. If JOB = 'S', the eigenvalues are */
+/* stored in the same order as on the diagonal of the Schur */
+/* form returned in H, with W(i) = H(i,i). */
+
+/* Z (input/output) COMPLEX*16 array, dimension (LDZ,N) */
+/* If COMPZ = 'N', Z is not referenced. */
+/* If COMPZ = 'I', on entry Z need not be set and on exit, */
+/* if INFO = 0, Z contains the unitary matrix Z of the Schur */
+/* vectors of H. If COMPZ = 'V', on entry Z must contain an */
+/* N-by-N matrix Q, which is assumed to be equal to the unit */
+/* matrix except for the submatrix Z(ILO:IHI,ILO:IHI). On exit, */
+/* if INFO = 0, Z contains Q*Z. */
+/* Normally Q is the unitary matrix generated by ZUNGHR */
+/* after the call to ZGEHRD which formed the Hessenberg matrix */
+/* H. (The output value of Z when INFO.GT.0 is given under */
+/* the description of INFO below.) */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. if COMPZ = 'I' or */
+/* COMPZ = 'V', then LDZ.GE.MAX(1,N). Otherwize, LDZ.GE.1. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) */
+/* On exit, if INFO = 0, WORK(1) returns an estimate of */
+/* the optimal value for LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK .GE. max(1,N) */
+/* is sufficient and delivers very good and sometimes */
+/* optimal performance. However, LWORK as large as 11*N */
+/* may be required for optimal performance. A workspace */
+/* query is recommended to determine the optimal workspace */
+/* size. */
+
+/* If LWORK = -1, then ZHSEQR does a workspace query. */
+/* In this case, ZHSEQR checks the input parameters and */
+/* estimates the optimal workspace size for the given */
+/* values of N, ILO and IHI. The estimate is returned */
+/* in WORK(1). No error message related to LWORK is */
+/* issued by XERBLA. Neither H nor Z are accessed. */
+
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* .LT. 0: if INFO = -i, the i-th argument had an illegal */
+/* value */
+/* .GT. 0: if INFO = i, ZHSEQR failed to compute all of */
+/* the eigenvalues. Elements 1:ilo-1 and i+1:n of WR */
+/* and WI contain those eigenvalues which have been */
+/* successfully computed. (Failures are rare.) */
+
+/* If INFO .GT. 0 and JOB = 'E', then on exit, the */
+/* remaining unconverged eigenvalues are the eigen- */
+/* values of the upper Hessenberg matrix rows and */
+/* columns ILO through INFO of the final, output */
+/* value of H. */
+
+/* If INFO .GT. 0 and JOB = 'S', then on exit */
+
+/* (*) (initial value of H)*U = U*(final value of H) */
+
+/* where U is a unitary matrix. The final */
+/* value of H is upper Hessenberg and triangular in */
+/* rows and columns INFO+1 through IHI. */
+
+/* If INFO .GT. 0 and COMPZ = 'V', then on exit */
+
+/* (final value of Z) = (initial value of Z)*U */
+
+/* where U is the unitary matrix in (*) (regard- */
+/* less of the value of JOB.) */
+
+/* If INFO .GT. 0 and COMPZ = 'I', then on exit */
+/* (final value of Z) = U */
+/* where U is the unitary matrix in (*) (regard- */
+/* less of the value of JOB.) */
+
+/* If INFO .GT. 0 and COMPZ = 'N', then Z is not */
+/* accessed. */
+
+/* ================================================================ */
+/* Default values supplied by */
+/* ILAENV(ISPEC,'ZHSEQR',JOB(:1)//COMPZ(:1),N,ILO,IHI,LWORK). */
+/* It is suggested that these defaults be adjusted in order */
+/* to attain best performance in each particular */
+/* computational environment. */
+
+/* ISPEC=12: The ZLAHQR vs ZLAQR0 crossover point. */
+/* Default: 75. (Must be at least 11.) */
+
+/* ISPEC=13: Recommended deflation window size. */
+/* This depends on ILO, IHI and NS. NS is the */
+/* number of simultaneous shifts returned */
+/* by ILAENV(ISPEC=15). (See ISPEC=15 below.) */
+/* The default for (IHI-ILO+1).LE.500 is NS. */
+/* The default for (IHI-ILO+1).GT.500 is 3*NS/2. */
+
+/* ISPEC=14: Nibble crossover point. (See IPARMQ for */
+/* details.) Default: 14% of deflation window */
+/* size. */
+
+/* ISPEC=15: Number of simultaneous shifts in a multishift */
+/* QR iteration. */
+
+/* If IHI-ILO+1 is ... */
+
+/* greater than ...but less ... the */
+/* or equal to ... than default is */
+
+/* 1 30 NS = 2(+) */
+/* 30 60 NS = 4(+) */
+/* 60 150 NS = 10(+) */
+/* 150 590 NS = ** */
+/* 590 3000 NS = 64 */
+/* 3000 6000 NS = 128 */
+/* 6000 infinity NS = 256 */
+
+/* (+) By default some or all matrices of this order */
+/* are passed to the implicit double shift routine */
+/* ZLAHQR and this parameter is ignored. See */
+/* ISPEC=12 above and comments in IPARMQ for */
+/* details. */
+
+/* (**) The asterisks (**) indicate an ad-hoc */
+/* function of N increasing from 10 to 64. */
+
+/* ISPEC=16: Select structured matrix multiply. */
+/* If the number of simultaneous shifts (specified */
+/* by ISPEC=15) is less than 14, then the default */
+/* for ISPEC=16 is 0. Otherwise the default for */
+/* ISPEC=16 is 2. */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+/* References: */
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part I: Maintaining Well Focused Shifts, and Level 3 */
+/* Performance, SIAM Journal of Matrix Analysis, volume 23, pages */
+/* 929--947, 2002. */
+
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part II: Aggressive Early Deflation, SIAM Journal */
+/* of Matrix Analysis, volume 23, pages 948--973, 2002. */
+
+/* ================================================================ */
+/* .. Parameters .. */
+
+/* ==== Matrices of order NTINY or smaller must be processed by */
+/* . ZLAHQR because of insufficient subdiagonal scratch space. */
+/* . (This is a hard limit.) ==== */
+
+/* ==== NL allocates some local workspace to help small matrices */
+/* . through a rare ZLAHQR failure. NL .GT. NTINY = 11 is */
+/* . required and NL .LE. NMIN = ILAENV(ISPEC=12,...) is recom- */
+/* . mended. (The default value of NMIN is 75.) Using NL = 49 */
+/* . allows up to six simultaneous shifts and a 16-by-16 */
+/* . deflation window. ==== */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* ==== Decode and check the input parameters. ==== */
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ wantt = lsame_(job, "S");
+ initz = lsame_(compz, "I");
+ wantz = initz || lsame_(compz, "V");
+ d__1 = (doublereal) max(1,*n);
+ z__1.r = d__1, z__1.i = 0.;
+ work[1].r = z__1.r, work[1].i = z__1.i;
+ lquery = *lwork == -1;
+
+ *info = 0;
+ if (! lsame_(job, "E") && ! wantt) {
+ *info = -1;
+ } else if (! lsame_(compz, "N") && ! wantz) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -4;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -5;
+ } else if (*ldh < max(1,*n)) {
+ *info = -7;
+ } else if (*ldz < 1 || wantz && *ldz < max(1,*n)) {
+ *info = -10;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info != 0) {
+
+/* ==== Quick return in case of invalid argument. ==== */
+
+ i__1 = -(*info);
+ xerbla_("ZHSEQR", &i__1);
+ return 0;
+
+ } else if (*n == 0) {
+
+/* ==== Quick return in case N = 0; nothing to do. ==== */
+
+ return 0;
+
+ } else if (lquery) {
+
+/* ==== Quick return in case of a workspace query ==== */
+
+ zlaqr0_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &w[1], ilo,
+ ihi, &z__[z_offset], ldz, &work[1], lwork, info);
+/* ==== Ensure reported workspace size is backward-compatible with */
+/* . previous LAPACK versions. ==== */
+/* Computing MAX */
+ d__2 = work[1].r, d__3 = (doublereal) max(1,*n);
+ d__1 = max(d__2,d__3);
+ z__1.r = d__1, z__1.i = 0.;
+ work[1].r = z__1.r, work[1].i = z__1.i;
+ return 0;
+
+ } else {
+
+/* ==== copy eigenvalues isolated by ZGEBAL ==== */
+
+ if (*ilo > 1) {
+ i__1 = *ilo - 1;
+ i__2 = *ldh + 1;
+ zcopy_(&i__1, &h__[h_offset], &i__2, &w[1], &c__1);
+ }
+ if (*ihi < *n) {
+ i__1 = *n - *ihi;
+ i__2 = *ldh + 1;
+ zcopy_(&i__1, &h__[*ihi + 1 + (*ihi + 1) * h_dim1], &i__2, &w[*
+ ihi + 1], &c__1);
+ }
+
+/* ==== Initialize Z, if requested ==== */
+
+ if (initz) {
+ zlaset_("A", n, n, &c_b1, &c_b2, &z__[z_offset], ldz);
+ }
+
+/* ==== Quick return if possible ==== */
+
+ if (*ilo == *ihi) {
+ i__1 = *ilo;
+ i__2 = *ilo + *ilo * h_dim1;
+ w[i__1].r = h__[i__2].r, w[i__1].i = h__[i__2].i;
+ return 0;
+ }
+
+/* ==== ZLAHQR/ZLAQR0 crossover point ==== */
+
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = job;
+ i__3[1] = 1, a__1[1] = compz;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ nmin = ilaenv_(&c__12, "ZHSEQR", ch__1, n, ilo, ihi, lwork);
+ nmin = max(11,nmin);
+
+/* ==== ZLAQR0 for big matrices; ZLAHQR for small ones ==== */
+
+ if (*n > nmin) {
+ zlaqr0_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &w[1],
+ ilo, ihi, &z__[z_offset], ldz, &work[1], lwork, info);
+ } else {
+
+/* ==== Small matrix ==== */
+
+ zlahqr_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &w[1],
+ ilo, ihi, &z__[z_offset], ldz, info);
+
+ if (*info > 0) {
+
+/* ==== A rare ZLAHQR failure! ZLAQR0 sometimes succeeds */
+/* . when ZLAHQR fails. ==== */
+
+ kbot = *info;
+
+ if (*n >= 49) {
+
+/* ==== Larger matrices have enough subdiagonal scratch */
+/* . space to call ZLAQR0 directly. ==== */
+
+ zlaqr0_(&wantt, &wantz, n, ilo, &kbot, &h__[h_offset],
+ ldh, &w[1], ilo, ihi, &z__[z_offset], ldz, &work[
+ 1], lwork, info);
+
+ } else {
+
+/* ==== Tiny matrices don't have enough subdiagonal */
+/* . scratch space to benefit from ZLAQR0. Hence, */
+/* . tiny matrices must be copied into a larger */
+/* . array before calling ZLAQR0. ==== */
+
+ zlacpy_("A", n, n, &h__[h_offset], ldh, hl, &c__49);
+ i__1 = *n + 1 + *n * 49 - 50;
+ hl[i__1].r = 0., hl[i__1].i = 0.;
+ i__1 = 49 - *n;
+ zlaset_("A", &c__49, &i__1, &c_b1, &c_b1, &hl[(*n + 1) *
+ 49 - 49], &c__49);
+ zlaqr0_(&wantt, &wantz, &c__49, ilo, &kbot, hl, &c__49, &
+ w[1], ilo, ihi, &z__[z_offset], ldz, workl, &
+ c__49, info);
+ if (wantt || *info != 0) {
+ zlacpy_("A", n, n, hl, &c__49, &h__[h_offset], ldh);
+ }
+ }
+ }
+ }
+
+/* ==== Clear out the trash, if necessary. ==== */
+
+ if ((wantt || *info != 0) && *n > 2) {
+ i__1 = *n - 2;
+ i__2 = *n - 2;
+ zlaset_("L", &i__1, &i__2, &c_b1, &c_b1, &h__[h_dim1 + 3], ldh);
+ }
+
+/* ==== Ensure reported workspace size is backward-compatible with */
+/* . previous LAPACK versions. ==== */
+
+/* Computing MAX */
+ d__2 = (doublereal) max(1,*n), d__3 = work[1].r;
+ d__1 = max(d__2,d__3);
+ z__1.r = d__1, z__1.i = 0.;
+ work[1].r = z__1.r, work[1].i = z__1.i;
+ }
+
+/* ==== End of ZHSEQR ==== */
+
+ return 0;
+} /* zhseqr_ */
diff --git a/contrib/libs/clapack/zlabrd.c b/contrib/libs/clapack/zlabrd.c
new file mode 100644
index 0000000000..4019ed37a5
--- /dev/null
+++ b/contrib/libs/clapack/zlabrd.c
@@ -0,0 +1,502 @@
+/* zlabrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zlabrd_(integer *m, integer *n, integer *nb,
+ doublecomplex *a, integer *lda, doublereal *d__, doublereal *e,
+ doublecomplex *tauq, doublecomplex *taup, doublecomplex *x, integer *
+ ldx, doublecomplex *y, integer *ldy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, x_dim1, x_offset, y_dim1, y_offset, i__1, i__2,
+ i__3;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__;
+ doublecomplex alpha;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *), zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *),
+ zlarfg_(integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *), zlacgv_(integer *, doublecomplex *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLABRD reduces the first NB rows and columns of a complex general */
+/* m by n matrix A to upper or lower real bidiagonal form by a unitary */
+/* transformation Q' * A * P, and returns the matrices X and Y which */
+/* are needed to apply the transformation to the unreduced part of A. */
+
+/* If m >= n, A is reduced to upper bidiagonal form; if m < n, to lower */
+/* bidiagonal form. */
+
+/* This is an auxiliary routine called by ZGEBRD */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows in the matrix A. */
+
+/* N (input) INTEGER */
+/* The number of columns in the matrix A. */
+
+/* NB (input) INTEGER */
+/* The number of leading rows and columns of A to be reduced. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the m by n general matrix to be reduced. */
+/* On exit, the first NB rows and columns of the matrix are */
+/* overwritten; the rest of the array is unchanged. */
+/* If m >= n, elements on and below the diagonal in the first NB */
+/* columns, with the array TAUQ, represent the unitary */
+/* matrix Q as a product of elementary reflectors; and */
+/* elements above the diagonal in the first NB rows, with the */
+/* array TAUP, represent the unitary matrix P as a product */
+/* of elementary reflectors. */
+/* If m < n, elements below the diagonal in the first NB */
+/* columns, with the array TAUQ, represent the unitary */
+/* matrix Q as a product of elementary reflectors, and */
+/* elements on and above the diagonal in the first NB rows, */
+/* with the array TAUP, represent the unitary matrix P as */
+/* a product of elementary reflectors. */
+/* See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* D (output) DOUBLE PRECISION array, dimension (NB) */
+/* The diagonal elements of the first NB rows and columns of */
+/* the reduced matrix. D(i) = A(i,i). */
+
+/* E (output) DOUBLE PRECISION array, dimension (NB) */
+/* The off-diagonal elements of the first NB rows and columns of */
+/* the reduced matrix. */
+
+/* TAUQ (output) COMPLEX*16 array dimension (NB) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the unitary matrix Q. See Further Details. */
+
+/* TAUP (output) COMPLEX*16 array, dimension (NB) */
+/* The scalar factors of the elementary reflectors which */
+/* represent the unitary matrix P. See Further Details. */
+
+/* X (output) COMPLEX*16 array, dimension (LDX,NB) */
+/* The m-by-nb matrix X required to update the unreduced part */
+/* of A. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,M). */
+
+/* Y (output) COMPLEX*16 array, dimension (LDY,NB) */
+/* The n-by-nb matrix Y required to update the unreduced part */
+/* of A. */
+
+/* LDY (input) INTEGER */
+/* The leading dimension of the array Y. LDY >= max(1,N). */
+
+/* Further Details */
+/* =============== */
+
+/* The matrices Q and P are represented as products of elementary */
+/* reflectors: */
+
+/* Q = H(1) H(2) . . . H(nb) and P = G(1) G(2) . . . G(nb) */
+
+/* Each H(i) and G(i) has the form: */
+
+/* H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' */
+
+/* where tauq and taup are complex scalars, and v and u are complex */
+/* vectors. */
+
+/* If m >= n, v(1:i-1) = 0, v(i) = 1, and v(i:m) is stored on exit in */
+/* A(i:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+1:n) is stored on exit in */
+/* A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). */
+
+/* If m < n, v(1:i) = 0, v(i+1) = 1, and v(i+1:m) is stored on exit in */
+/* A(i+2:m,i); u(1:i-1) = 0, u(i) = 1, and u(i:n) is stored on exit in */
+/* A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). */
+
+/* The elements of the vectors v and u together form the m-by-nb matrix */
+/* V and the nb-by-n matrix U' which are needed, with X and Y, to apply */
+/* the transformation to the unreduced part of the matrix, using a block */
+/* update of the form: A := A - V*Y' - X*U'. */
+
+/* The contents of A on exit are illustrated by the following examples */
+/* with nb = 2: */
+
+/* m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n): */
+
+/* ( 1 1 u1 u1 u1 ) ( 1 u1 u1 u1 u1 u1 ) */
+/* ( v1 1 1 u2 u2 ) ( 1 1 u2 u2 u2 u2 ) */
+/* ( v1 v2 a a a ) ( v1 1 a a a a ) */
+/* ( v1 v2 a a a ) ( v1 v2 a a a a ) */
+/* ( v1 v2 a a a ) ( v1 v2 a a a a ) */
+/* ( v1 v2 a a a ) */
+
+/* where a denotes an element of the original matrix which is unchanged, */
+/* vi denotes an element of the vector defining H(i), and ui an element */
+/* of the vector defining G(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ y_dim1 = *ldy;
+ y_offset = 1 + y_dim1;
+ y -= y_offset;
+
+ /* Function Body */
+ if (*m <= 0 || *n <= 0) {
+ return 0;
+ }
+
+ if (*m >= *n) {
+
+/* Reduce to upper bidiagonal form */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i:m,i) */
+
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &y[i__ + y_dim1], ldy);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &a[i__ + a_dim1], lda,
+ &y[i__ + y_dim1], ldy, &c_b2, &a[i__ + i__ * a_dim1], &
+ c__1);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &y[i__ + y_dim1], ldy);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &x[i__ + x_dim1], ldx,
+ &a[i__ * a_dim1 + 1], &c__1, &c_b2, &a[i__ + i__ *
+ a_dim1], &c__1);
+
+/* Generate reflection Q(i) to annihilate A(i+1:m,i) */
+
+ i__2 = i__ + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ zlarfg_(&i__2, &alpha, &a[min(i__3, *m)+ i__ * a_dim1], &c__1, &
+ tauq[i__]);
+ i__2 = i__;
+ d__[i__2] = alpha.r;
+ if (i__ < *n) {
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Compute Y(i+1:n,i) */
+
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[i__ + (
+ i__ + 1) * a_dim1], lda, &a[i__ + i__ * a_dim1], &
+ c__1, &c_b1, &y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[i__ +
+ a_dim1], lda, &a[i__ + i__ * a_dim1], &c__1, &c_b1, &
+ y[i__ * y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &y[i__ + 1 +
+ y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b2, &y[
+ i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &x[i__ +
+ x_dim1], ldx, &a[i__ + i__ * a_dim1], &c__1, &c_b1, &
+ y[i__ * y_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &z__1, &a[(i__ +
+ 1) * a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &
+ c_b2, &y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *n - i__;
+ zscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1);
+
+/* Update A(i,i+1:n) */
+
+ i__2 = *n - i__;
+ zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ zlacgv_(&i__, &a[i__ + a_dim1], lda);
+ i__2 = *n - i__;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__, &z__1, &y[i__ + 1 +
+ y_dim1], ldy, &a[i__ + a_dim1], lda, &c_b2, &a[i__ + (
+ i__ + 1) * a_dim1], lda);
+ zlacgv_(&i__, &a[i__ + a_dim1], lda);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &x[i__ + x_dim1], ldx);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &z__1, &a[(i__ +
+ 1) * a_dim1 + 1], lda, &x[i__ + x_dim1], ldx, &c_b2, &
+ a[i__ + (i__ + 1) * a_dim1], lda);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &x[i__ + x_dim1], ldx);
+
+/* Generate reflection P(i) to annihilate A(i,i+2:n) */
+
+ i__2 = i__ + (i__ + 1) * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ zlarfg_(&i__2, &alpha, &a[i__ + min(i__3, *n)* a_dim1], lda, &
+ taup[i__]);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+ i__2 = i__ + (i__ + 1) * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Compute X(i+1:m,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ zgemv_("No transpose", &i__2, &i__3, &c_b2, &a[i__ + 1 + (i__
+ + 1) * a_dim1], lda, &a[i__ + (i__ + 1) * a_dim1],
+ lda, &c_b1, &x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *n - i__;
+ zgemv_("Conjugate transpose", &i__2, &i__, &c_b2, &y[i__ + 1
+ + y_dim1], ldy, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ c_b1, &x[i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__, &z__1, &a[i__ + 1 +
+ a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b2, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ zgemv_("No transpose", &i__2, &i__3, &c_b2, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ c_b1, &x[i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &x[i__ + 1 +
+ x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b2, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *m - i__;
+ zscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *n - i__;
+ zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Reduce to lower bidiagonal form */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i,i:n) */
+
+ i__2 = *n - i__ + 1;
+ zlacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &a[i__ + a_dim1], lda);
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &y[i__ + y_dim1], ldy,
+ &a[i__ + a_dim1], lda, &c_b2, &a[i__ + i__ * a_dim1],
+ lda);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &a[i__ + a_dim1], lda);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &x[i__ + x_dim1], ldx);
+ i__2 = i__ - 1;
+ i__3 = *n - i__ + 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &z__1, &a[i__ *
+ a_dim1 + 1], lda, &x[i__ + x_dim1], ldx, &c_b2, &a[i__ +
+ i__ * a_dim1], lda);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &x[i__ + x_dim1], ldx);
+
+/* Generate reflection P(i) to annihilate A(i,i+1:n) */
+
+ i__2 = i__ + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ zlarfg_(&i__2, &alpha, &a[i__ + min(i__3, *n)* a_dim1], lda, &
+ taup[i__]);
+ i__2 = i__;
+ d__[i__2] = alpha.r;
+ if (i__ < *m) {
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Compute X(i+1:m,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+ zgemv_("No transpose", &i__2, &i__3, &c_b2, &a[i__ + 1 + i__ *
+ a_dim1], lda, &a[i__ + i__ * a_dim1], lda, &c_b1, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &y[i__ +
+ y_dim1], ldy, &a[i__ + i__ * a_dim1], lda, &c_b1, &x[
+ i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &a[i__ + 1 +
+ a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b2, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__ + 1;
+ zgemv_("No transpose", &i__2, &i__3, &c_b2, &a[i__ * a_dim1 +
+ 1], lda, &a[i__ + i__ * a_dim1], lda, &c_b1, &x[i__ *
+ x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &x[i__ + 1 +
+ x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b2, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *m - i__;
+ zscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *n - i__ + 1;
+ zlacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+
+/* Update A(i+1:m,i) */
+
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &y[i__ + y_dim1], ldy);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &a[i__ + 1 +
+ a_dim1], lda, &y[i__ + y_dim1], ldy, &c_b2, &a[i__ +
+ 1 + i__ * a_dim1], &c__1);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &y[i__ + y_dim1], ldy);
+ i__2 = *m - i__;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__, &z__1, &x[i__ + 1 +
+ x_dim1], ldx, &a[i__ * a_dim1 + 1], &c__1, &c_b2, &a[
+ i__ + 1 + i__ * a_dim1], &c__1);
+
+/* Generate reflection Q(i) to annihilate A(i+2:m,i) */
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *m - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ zlarfg_(&i__2, &alpha, &a[min(i__3, *m)+ i__ * a_dim1], &c__1,
+ &tauq[i__]);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+ i__2 = i__ + 1 + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Compute Y(i+1:n,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[i__ + 1
+ + (i__ + 1) * a_dim1], lda, &a[i__ + 1 + i__ * a_dim1]
+, &c__1, &c_b1, &y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[i__ + 1
+ + a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b1, &y[i__ * y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &y[i__ + 1 +
+ y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b2, &y[
+ i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__;
+ zgemv_("Conjugate transpose", &i__2, &i__, &c_b2, &x[i__ + 1
+ + x_dim1], ldx, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b1, &y[i__ * y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Conjugate transpose", &i__, &i__2, &z__1, &a[(i__ + 1)
+ * a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &
+ c_b2, &y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *n - i__;
+ zscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1);
+ } else {
+ i__2 = *n - i__ + 1;
+ zlacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of ZLABRD */
+
+} /* zlabrd_ */
diff --git a/contrib/libs/clapack/zlacgv.c b/contrib/libs/clapack/zlacgv.c
new file mode 100644
index 0000000000..e455696fc5
--- /dev/null
+++ b/contrib/libs/clapack/zlacgv.c
@@ -0,0 +1,95 @@
+/* zlacgv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlacgv_(integer *n, doublecomplex *x, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, ioff;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLACGV conjugates a complex vector of length N. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The length of the vector X. N >= 0. */
+
+/* X (input/output) COMPLEX*16 array, dimension */
+/* (1+(N-1)*abs(INCX)) */
+/* On entry, the vector of length N to be conjugated. */
+/* On exit, X is overwritten with conjg(X). */
+
+/* INCX (input) INTEGER */
+/* The spacing between successive elements of X. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*incx == 1) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ d_cnjg(&z__1, &x[i__]);
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+/* L10: */
+ }
+ } else {
+ ioff = 1;
+ if (*incx < 0) {
+ ioff = 1 - (*n - 1) * *incx;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ioff;
+ d_cnjg(&z__1, &x[ioff]);
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ ioff += *incx;
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of ZLACGV */
+
+} /* zlacgv_ */
diff --git a/contrib/libs/clapack/zlacn2.c b/contrib/libs/clapack/zlacn2.c
new file mode 100644
index 0000000000..522fdafb0b
--- /dev/null
+++ b/contrib/libs/clapack/zlacn2.c
@@ -0,0 +1,283 @@
+/* zlacn2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zlacn2_(integer *n, doublecomplex *v, doublecomplex *x,
+ doublereal *est, integer *kase, integer *isave)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublereal d__1, d__2;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *), d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__;
+ doublereal temp, absxi;
+ integer jlast;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ extern integer izmax1_(integer *, doublecomplex *, integer *);
+ extern doublereal dzsum1_(integer *, doublecomplex *, integer *), dlamch_(
+ char *);
+ doublereal safmin, altsgn, estold;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLACN2 estimates the 1-norm of a square, complex matrix A. */
+/* Reverse communication is used for evaluating matrix-vector products. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 1. */
+
+/* V (workspace) COMPLEX*16 array, dimension (N) */
+/* On the final return, V = A*W, where EST = norm(V)/norm(W) */
+/* (W is not returned). */
+
+/* X (input/output) COMPLEX*16 array, dimension (N) */
+/* On an intermediate return, X should be overwritten by */
+/* A * X, if KASE=1, */
+/* A' * X, if KASE=2, */
+/* where A' is the conjugate transpose of A, and ZLACN2 must be */
+/* re-called with all the other parameters unchanged. */
+
+/* EST (input/output) DOUBLE PRECISION */
+/* On entry with KASE = 1 or 2 and ISAVE(1) = 3, EST should be */
+/* unchanged from the previous call to ZLACN2. */
+/* On exit, EST is an estimate (a lower bound) for norm(A). */
+
+/* KASE (input/output) INTEGER */
+/* On the initial call to ZLACN2, KASE should be 0. */
+/* On an intermediate return, KASE will be 1 or 2, indicating */
+/* whether X should be overwritten by A * X or A' * X. */
+/* On the final return from ZLACN2, KASE will again be 0. */
+
+/* ISAVE (input/output) INTEGER array, dimension (3) */
+/* ISAVE is used to save variables between calls to ZLACN2 */
+
+/* Further Details */
+/* ======= ======= */
+
+/* Contributed by Nick Higham, University of Manchester. */
+/* Originally named CONEST, dated March 16, 1988. */
+
+/* Reference: N.J. Higham, "FORTRAN codes for estimating the one-norm of */
+/* a real or complex matrix, with applications to condition estimation", */
+/* ACM Trans. Math. Soft., vol. 14, no. 4, pp. 381-396, December 1988. */
+
+/* Last modified: April, 1999 */
+
+/* This is a thread safe version of ZLACON, which uses the array ISAVE */
+/* in place of a SAVE statement, as follows: */
+
+/* ZLACON ZLACN2 */
+/* JUMP ISAVE(1) */
+/* J ISAVE(2) */
+/* ITER ISAVE(3) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --isave;
+ --x;
+ --v;
+
+ /* Function Body */
+ safmin = dlamch_("Safe minimum");
+ if (*kase == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ d__1 = 1. / (doublereal) (*n);
+ z__1.r = d__1, z__1.i = 0.;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+/* L10: */
+ }
+ *kase = 1;
+ isave[1] = 1;
+ return 0;
+ }
+
+ switch (isave[1]) {
+ case 1: goto L20;
+ case 2: goto L40;
+ case 3: goto L70;
+ case 4: goto L90;
+ case 5: goto L120;
+ }
+
+/* ................ ENTRY (ISAVE( 1 ) = 1) */
+/* FIRST ITERATION. X HAS BEEN OVERWRITTEN BY A*X. */
+
+L20:
+ if (*n == 1) {
+ v[1].r = x[1].r, v[1].i = x[1].i;
+ *est = z_abs(&v[1]);
+/* ... QUIT */
+ goto L130;
+ }
+ *est = dzsum1_(n, &x[1], &c__1);
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ absxi = z_abs(&x[i__]);
+ if (absxi > safmin) {
+ i__2 = i__;
+ i__3 = i__;
+ d__1 = x[i__3].r / absxi;
+ d__2 = d_imag(&x[i__]) / absxi;
+ z__1.r = d__1, z__1.i = d__2;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ } else {
+ i__2 = i__;
+ x[i__2].r = 1., x[i__2].i = 0.;
+ }
+/* L30: */
+ }
+ *kase = 2;
+ isave[1] = 2;
+ return 0;
+
+/* ................ ENTRY (ISAVE( 1 ) = 2) */
+/* FIRST ITERATION. X HAS BEEN OVERWRITTEN BY CTRANS(A)*X. */
+
+L40:
+ isave[2] = izmax1_(n, &x[1], &c__1);
+ isave[3] = 2;
+
+/* MAIN LOOP - ITERATIONS 2,3,...,ITMAX. */
+
+L50:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ x[i__2].r = 0., x[i__2].i = 0.;
+/* L60: */
+ }
+ i__1 = isave[2];
+ x[i__1].r = 1., x[i__1].i = 0.;
+ *kase = 1;
+ isave[1] = 3;
+ return 0;
+
+/* ................ ENTRY (ISAVE( 1 ) = 3) */
+/* X HAS BEEN OVERWRITTEN BY A*X. */
+
+L70:
+ zcopy_(n, &x[1], &c__1, &v[1], &c__1);
+ estold = *est;
+ *est = dzsum1_(n, &v[1], &c__1);
+
+/* TEST FOR CYCLING. */
+ if (*est <= estold) {
+ goto L100;
+ }
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ absxi = z_abs(&x[i__]);
+ if (absxi > safmin) {
+ i__2 = i__;
+ i__3 = i__;
+ d__1 = x[i__3].r / absxi;
+ d__2 = d_imag(&x[i__]) / absxi;
+ z__1.r = d__1, z__1.i = d__2;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ } else {
+ i__2 = i__;
+ x[i__2].r = 1., x[i__2].i = 0.;
+ }
+/* L80: */
+ }
+ *kase = 2;
+ isave[1] = 4;
+ return 0;
+
+/* ................ ENTRY (ISAVE( 1 ) = 4) */
+/* X HAS BEEN OVERWRITTEN BY CTRANS(A)*X. */
+
+L90:
+ jlast = isave[2];
+ isave[2] = izmax1_(n, &x[1], &c__1);
+ if (z_abs(&x[jlast]) != z_abs(&x[isave[2]]) && isave[3] < 5) {
+ ++isave[3];
+ goto L50;
+ }
+
+/* ITERATION COMPLETE. FINAL STAGE. */
+
+L100:
+ altsgn = 1.;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ d__1 = altsgn * ((doublereal) (i__ - 1) / (doublereal) (*n - 1) + 1.);
+ z__1.r = d__1, z__1.i = 0.;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ altsgn = -altsgn;
+/* L110: */
+ }
+ *kase = 1;
+ isave[1] = 5;
+ return 0;
+
+/* ................ ENTRY (ISAVE( 1 ) = 5) */
+/* X HAS BEEN OVERWRITTEN BY A*X. */
+
+L120:
+ temp = dzsum1_(n, &x[1], &c__1) / (doublereal) (*n * 3) * 2.;
+ if (temp > *est) {
+ zcopy_(n, &x[1], &c__1, &v[1], &c__1);
+ *est = temp;
+ }
+
+L130:
+ *kase = 0;
+ return 0;
+
+/* End of ZLACN2 */
+
+} /* zlacn2_ */
diff --git a/contrib/libs/clapack/zlacon.c b/contrib/libs/clapack/zlacon.c
new file mode 100644
index 0000000000..9291b3f74c
--- /dev/null
+++ b/contrib/libs/clapack/zlacon.c
@@ -0,0 +1,275 @@
+/* zlacon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zlacon_(integer *n, doublecomplex *v, doublecomplex *x,
+ doublereal *est, integer *kase)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublereal d__1, d__2;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *), d_imag(doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, iter;
+ static doublereal temp;
+ static integer jump;
+ static doublereal absxi;
+ static integer jlast;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ extern integer izmax1_(integer *, doublecomplex *, integer *);
+ extern doublereal dzsum1_(integer *, doublecomplex *, integer *), dlamch_(
+ char *);
+ static doublereal safmin, altsgn, estold;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLACON estimates the 1-norm of a square, complex matrix A. */
+/* Reverse communication is used for evaluating matrix-vector products. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 1. */
+
+/* V (workspace) COMPLEX*16 array, dimension (N) */
+/* On the final return, V = A*W, where EST = norm(V)/norm(W) */
+/* (W is not returned). */
+
+/* X (input/output) COMPLEX*16 array, dimension (N) */
+/* On an intermediate return, X should be overwritten by */
+/* A * X, if KASE=1, */
+/* A' * X, if KASE=2, */
+/* where A' is the conjugate transpose of A, and ZLACON must be */
+/* re-called with all the other parameters unchanged. */
+
+/* EST (input/output) DOUBLE PRECISION */
+/* On entry with KASE = 1 or 2 and JUMP = 3, EST should be */
+/* unchanged from the previous call to ZLACON. */
+/* On exit, EST is an estimate (a lower bound) for norm(A). */
+
+/* KASE (input/output) INTEGER */
+/* On the initial call to ZLACON, KASE should be 0. */
+/* On an intermediate return, KASE will be 1 or 2, indicating */
+/* whether X should be overwritten by A * X or A' * X. */
+/* On the final return from ZLACON, KASE will again be 0. */
+
+/* Further Details */
+/* ======= ======= */
+
+/* Contributed by Nick Higham, University of Manchester. */
+/* Originally named CONEST, dated March 16, 1988. */
+
+/* Reference: N.J. Higham, "FORTRAN codes for estimating the one-norm of */
+/* a real or complex matrix, with applications to condition estimation", */
+/* ACM Trans. Math. Soft., vol. 14, no. 4, pp. 381-396, December 1988. */
+
+/* Last modified: April, 1999 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Save statement .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --x;
+ --v;
+
+ /* Function Body */
+ safmin = dlamch_("Safe minimum");
+ if (*kase == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ d__1 = 1. / (doublereal) (*n);
+ z__1.r = d__1, z__1.i = 0.;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+/* L10: */
+ }
+ *kase = 1;
+ jump = 1;
+ return 0;
+ }
+
+ switch (jump) {
+ case 1: goto L20;
+ case 2: goto L40;
+ case 3: goto L70;
+ case 4: goto L90;
+ case 5: goto L120;
+ }
+
+/* ................ ENTRY (JUMP = 1) */
+/* FIRST ITERATION. X HAS BEEN OVERWRITTEN BY A*X. */
+
+L20:
+ if (*n == 1) {
+ v[1].r = x[1].r, v[1].i = x[1].i;
+ *est = z_abs(&v[1]);
+/* ... QUIT */
+ goto L130;
+ }
+ *est = dzsum1_(n, &x[1], &c__1);
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ absxi = z_abs(&x[i__]);
+ if (absxi > safmin) {
+ i__2 = i__;
+ i__3 = i__;
+ d__1 = x[i__3].r / absxi;
+ d__2 = d_imag(&x[i__]) / absxi;
+ z__1.r = d__1, z__1.i = d__2;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ } else {
+ i__2 = i__;
+ x[i__2].r = 1., x[i__2].i = 0.;
+ }
+/* L30: */
+ }
+ *kase = 2;
+ jump = 2;
+ return 0;
+
+/* ................ ENTRY (JUMP = 2) */
+/* FIRST ITERATION. X HAS BEEN OVERWRITTEN BY CTRANS(A)*X. */
+
+L40:
+ j = izmax1_(n, &x[1], &c__1);
+ iter = 2;
+
+/* MAIN LOOP - ITERATIONS 2,3,...,ITMAX. */
+
+L50:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ x[i__2].r = 0., x[i__2].i = 0.;
+/* L60: */
+ }
+ i__1 = j;
+ x[i__1].r = 1., x[i__1].i = 0.;
+ *kase = 1;
+ jump = 3;
+ return 0;
+
+/* ................ ENTRY (JUMP = 3) */
+/* X HAS BEEN OVERWRITTEN BY A*X. */
+
+L70:
+ zcopy_(n, &x[1], &c__1, &v[1], &c__1);
+ estold = *est;
+ *est = dzsum1_(n, &v[1], &c__1);
+
+/* TEST FOR CYCLING. */
+ if (*est <= estold) {
+ goto L100;
+ }
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ absxi = z_abs(&x[i__]);
+ if (absxi > safmin) {
+ i__2 = i__;
+ i__3 = i__;
+ d__1 = x[i__3].r / absxi;
+ d__2 = d_imag(&x[i__]) / absxi;
+ z__1.r = d__1, z__1.i = d__2;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ } else {
+ i__2 = i__;
+ x[i__2].r = 1., x[i__2].i = 0.;
+ }
+/* L80: */
+ }
+ *kase = 2;
+ jump = 4;
+ return 0;
+
+/* ................ ENTRY (JUMP = 4) */
+/* X HAS BEEN OVERWRITTEN BY CTRANS(A)*X. */
+
+L90:
+ jlast = j;
+ j = izmax1_(n, &x[1], &c__1);
+ if (z_abs(&x[jlast]) != z_abs(&x[j]) && iter < 5) {
+ ++iter;
+ goto L50;
+ }
+
+/* ITERATION COMPLETE. FINAL STAGE. */
+
+L100:
+ altsgn = 1.;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ d__1 = altsgn * ((doublereal) (i__ - 1) / (doublereal) (*n - 1) + 1.);
+ z__1.r = d__1, z__1.i = 0.;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ altsgn = -altsgn;
+/* L110: */
+ }
+ *kase = 1;
+ jump = 5;
+ return 0;
+
+/* ................ ENTRY (JUMP = 5) */
+/* X HAS BEEN OVERWRITTEN BY A*X. */
+
+L120:
+ temp = dzsum1_(n, &x[1], &c__1) / (doublereal) (*n * 3) * 2.;
+ if (temp > *est) {
+ zcopy_(n, &x[1], &c__1, &v[1], &c__1);
+ *est = temp;
+ }
+
+L130:
+ *kase = 0;
+ return 0;
+
+/* End of ZLACON */
+
+} /* zlacon_ */
diff --git a/contrib/libs/clapack/zlacp2.c b/contrib/libs/clapack/zlacp2.c
new file mode 100644
index 0000000000..2a1ecdb9bb
--- /dev/null
+++ b/contrib/libs/clapack/zlacp2.c
@@ -0,0 +1,134 @@
+/* zlacp2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlacp2_(char *uplo, integer *m, integer *n, doublereal *
+ a, integer *lda, doublecomplex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLACP2 copies all or part of a real two-dimensional matrix A to a */
+/* complex matrix B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies the part of the matrix A to be copied to B. */
+/* = 'U': Upper triangular part */
+/* = 'L': Lower triangular part */
+/* Otherwise: All of the matrix A */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA,N) */
+/* The m by n matrix A. If UPLO = 'U', only the upper trapezium */
+/* is accessed; if UPLO = 'L', only the lower trapezium is */
+/* accessed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (output) COMPLEX*16 array, dimension (LDB,N) */
+/* On exit, B = A in the locations specified by UPLO. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(j,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * a_dim1;
+ b[i__3].r = a[i__4], b[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+
+ } else if (lsame_(uplo, "L")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * a_dim1;
+ b[i__3].r = a[i__4], b[i__3].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * a_dim1;
+ b[i__3].r = a[i__4], b[i__3].i = 0.;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+ return 0;
+
+/* End of ZLACP2 */
+
+} /* zlacp2_ */
diff --git a/contrib/libs/clapack/zlacpy.c b/contrib/libs/clapack/zlacpy.c
new file mode 100644
index 0000000000..dfa21c02c3
--- /dev/null
+++ b/contrib/libs/clapack/zlacpy.c
@@ -0,0 +1,134 @@
+/* zlacpy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlacpy_(char *uplo, integer *m, integer *n,
+ doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLACPY copies all or part of a two-dimensional matrix A to another */
+/* matrix B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies the part of the matrix A to be copied to B. */
+/* = 'U': Upper triangular part */
+/* = 'L': Lower triangular part */
+/* Otherwise: All of the matrix A */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The m by n matrix A. If UPLO = 'U', only the upper trapezium */
+/* is accessed; if UPLO = 'L', only the lower trapezium is */
+/* accessed. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (output) COMPLEX*16 array, dimension (LDB,N) */
+/* On exit, B = A in the locations specified by UPLO. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(j,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * a_dim1;
+ b[i__3].r = a[i__4].r, b[i__3].i = a[i__4].i;
+/* L10: */
+ }
+/* L20: */
+ }
+
+ } else if (lsame_(uplo, "L")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * a_dim1;
+ b[i__3].r = a[i__4].r, b[i__3].i = a[i__4].i;
+/* L30: */
+ }
+/* L40: */
+ }
+
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * a_dim1;
+ b[i__3].r = a[i__4].r, b[i__3].i = a[i__4].i;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+ return 0;
+
+/* End of ZLACPY */
+
+} /* zlacpy_ */
diff --git a/contrib/libs/clapack/zlacrm.c b/contrib/libs/clapack/zlacrm.c
new file mode 100644
index 0000000000..96fb840e15
--- /dev/null
+++ b/contrib/libs/clapack/zlacrm.c
@@ -0,0 +1,177 @@
+/* zlacrm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b6 = 1.;
+static doublereal c_b7 = 0.;
+
+/* Subroutine */ int zlacrm_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, doublereal *b, integer *ldb, doublecomplex *c__,
+ integer *ldc, doublereal *rwork)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, a_dim1, a_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4, i__5;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, l;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLACRM performs a very simple matrix-matrix multiplication: */
+/* C := A * B, */
+/* where A is M by N and complex; B is N by N and real; */
+/* C is M by N and complex. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A and of the matrix C. */
+/* M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns and rows of the matrix B and */
+/* the number of columns of the matrix C. */
+/* N >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA, N) */
+/* A contains the M by N matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >=max(1,M). */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB, N) */
+/* B contains the N by N matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >=max(1,N). */
+
+/* C (input) COMPLEX*16 array, dimension (LDC, N) */
+/* C contains the M by N matrix C. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >=max(1,N). */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (2*M*N) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --rwork;
+
+ /* Function Body */
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ rwork[(j - 1) * *m + i__] = a[i__3].r;
+/* L10: */
+ }
+/* L20: */
+ }
+
+ l = *m * *n + 1;
+ dgemm_("N", "N", m, n, n, &c_b6, &rwork[1], m, &b[b_offset], ldb, &c_b7, &
+ rwork[l], m);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = l + (j - 1) * *m + i__ - 1;
+ c__[i__3].r = rwork[i__4], c__[i__3].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ rwork[(j - 1) * *m + i__] = d_imag(&a[i__ + j * a_dim1]);
+/* L50: */
+ }
+/* L60: */
+ }
+ dgemm_("N", "N", m, n, n, &c_b6, &rwork[1], m, &b[b_offset], ldb, &c_b7, &
+ rwork[l], m);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ d__1 = c__[i__4].r;
+ i__5 = l + (j - 1) * *m + i__ - 1;
+ z__1.r = d__1, z__1.i = rwork[i__5];
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+
+ return 0;
+
+/* End of ZLACRM */
+
+} /* zlacrm_ */
diff --git a/contrib/libs/clapack/zlacrt.c b/contrib/libs/clapack/zlacrt.c
new file mode 100644
index 0000000000..270a9df58a
--- /dev/null
+++ b/contrib/libs/clapack/zlacrt.c
@@ -0,0 +1,156 @@
+/* zlacrt.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlacrt_(integer *n, doublecomplex *cx, integer *incx,
+ doublecomplex *cy, integer *incy, doublecomplex *c__, doublecomplex *
+ s)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Local variables */
+ integer i__, ix, iy;
+ doublecomplex ctemp;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLACRT performs the operation */
+
+/* ( c s )( x ) ==> ( x ) */
+/* ( -s c )( y ) ( y ) */
+
+/* where c and s are complex and the vectors x and y are complex. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of elements in the vectors CX and CY. */
+
+/* CX (input/output) COMPLEX*16 array, dimension (N) */
+/* On input, the vector x. */
+/* On output, CX is overwritten with c*x + s*y. */
+
+/* INCX (input) INTEGER */
+/* The increment between successive values of CX. INCX <> 0. */
+
+/* CY (input/output) COMPLEX*16 array, dimension (N) */
+/* On input, the vector y. */
+/* On output, CY is overwritten with -s*x + c*y. */
+
+/* INCY (input) INTEGER */
+/* The increment between successive values of CY. INCY <> 0. */
+
+/* C (input) COMPLEX*16 */
+/* S (input) COMPLEX*16 */
+/* C and S define the matrix */
+/* [ C S ]. */
+/* [ -S C ] */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --cy;
+ --cx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* Code for unequal increments or equal increments not equal to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ z__2.r = c__->r * cx[i__2].r - c__->i * cx[i__2].i, z__2.i = c__->r *
+ cx[i__2].i + c__->i * cx[i__2].r;
+ i__3 = iy;
+ z__3.r = s->r * cy[i__3].r - s->i * cy[i__3].i, z__3.i = s->r * cy[
+ i__3].i + s->i * cy[i__3].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ ctemp.r = z__1.r, ctemp.i = z__1.i;
+ i__2 = iy;
+ i__3 = iy;
+ z__2.r = c__->r * cy[i__3].r - c__->i * cy[i__3].i, z__2.i = c__->r *
+ cy[i__3].i + c__->i * cy[i__3].r;
+ i__4 = ix;
+ z__3.r = s->r * cx[i__4].r - s->i * cx[i__4].i, z__3.i = s->r * cx[
+ i__4].i + s->i * cx[i__4].r;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
+ cy[i__2].r = z__1.r, cy[i__2].i = z__1.i;
+ i__2 = ix;
+ cx[i__2].r = ctemp.r, cx[i__2].i = ctemp.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* Code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ z__2.r = c__->r * cx[i__2].r - c__->i * cx[i__2].i, z__2.i = c__->r *
+ cx[i__2].i + c__->i * cx[i__2].r;
+ i__3 = i__;
+ z__3.r = s->r * cy[i__3].r - s->i * cy[i__3].i, z__3.i = s->r * cy[
+ i__3].i + s->i * cy[i__3].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ ctemp.r = z__1.r, ctemp.i = z__1.i;
+ i__2 = i__;
+ i__3 = i__;
+ z__2.r = c__->r * cy[i__3].r - c__->i * cy[i__3].i, z__2.i = c__->r *
+ cy[i__3].i + c__->i * cy[i__3].r;
+ i__4 = i__;
+ z__3.r = s->r * cx[i__4].r - s->i * cx[i__4].i, z__3.i = s->r * cx[
+ i__4].i + s->i * cx[i__4].r;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
+ cy[i__2].r = z__1.r, cy[i__2].i = z__1.i;
+ i__2 = i__;
+ cx[i__2].r = ctemp.r, cx[i__2].i = ctemp.i;
+/* L30: */
+ }
+ return 0;
+} /* zlacrt_ */
diff --git a/contrib/libs/clapack/zladiv.c b/contrib/libs/clapack/zladiv.c
new file mode 100644
index 0000000000..d92be5a1cb
--- /dev/null
+++ b/contrib/libs/clapack/zladiv.c
@@ -0,0 +1,75 @@
+/* zladiv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Double Complex */ VOID zladiv_(doublecomplex * ret_val, doublecomplex *x,
+ doublecomplex *y)
+{
+ /* System generated locals */
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ doublereal zi, zr;
+ extern /* Subroutine */ int dladiv_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLADIV := X / Y, where X and Y are complex. The computation of X / Y */
+/* will not overflow on an intermediary step unless the results */
+/* overflows. */
+
+/* Arguments */
+/* ========= */
+
+/* X (input) COMPLEX*16 */
+/* Y (input) COMPLEX*16 */
+/* The complex scalars X and Y. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ d__1 = x->r;
+ d__2 = d_imag(x);
+ d__3 = y->r;
+ d__4 = d_imag(y);
+ dladiv_(&d__1, &d__2, &d__3, &d__4, &zr, &zi);
+ z__1.r = zr, z__1.i = zi;
+ ret_val->r = z__1.r, ret_val->i = z__1.i;
+
+ return ;
+
+/* End of ZLADIV */
+
+} /* zladiv_ */
diff --git a/contrib/libs/clapack/zlaed0.c b/contrib/libs/clapack/zlaed0.c
new file mode 100644
index 0000000000..c8a76adf89
--- /dev/null
+++ b/contrib/libs/clapack/zlaed0.c
@@ -0,0 +1,366 @@
+/* zlaed0.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__9 = 9;
+static integer c__0 = 0;
+static integer c__2 = 2;
+static integer c__1 = 1;
+
+/* Subroutine */ int zlaed0_(integer *qsiz, integer *n, doublereal *d__,
+ doublereal *e, doublecomplex *q, integer *ldq, doublecomplex *qstore,
+ integer *ldqs, doublereal *rwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, qstore_dim1, qstore_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double log(doublereal);
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ integer i__, j, k, ll, iq, lgn, msd2, smm1, spm1, spm2;
+ doublereal temp;
+ integer curr, iperm;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ integer indxq, iwrem, iqptr, tlvls;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zlaed7_(integer *, integer *,
+ integer *, integer *, integer *, integer *, doublereal *,
+ doublecomplex *, integer *, doublereal *, integer *, doublereal *,
+ integer *, integer *, integer *, integer *, integer *,
+ doublereal *, doublecomplex *, doublereal *, integer *, integer *)
+ ;
+ integer igivcl;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zlacrm_(integer *, integer *, doublecomplex *,
+ integer *, doublereal *, integer *, doublecomplex *, integer *,
+ doublereal *);
+ integer igivnm, submat, curprb, subpbs, igivpt;
+ extern /* Subroutine */ int dsteqr_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *);
+ integer curlvl, matsiz, iprmpt, smlsiz;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Using the divide and conquer method, ZLAED0 computes all eigenvalues */
+/* of a symmetric tridiagonal matrix which is one diagonal block of */
+/* those from reducing a dense or band Hermitian matrix and */
+/* corresponding eigenvectors of the dense or band matrix. */
+
+/* Arguments */
+/* ========= */
+
+/* QSIZ (input) INTEGER */
+/* The dimension of the unitary matrix used to reduce */
+/* the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1. */
+
+/* N (input) INTEGER */
+/* The dimension of the symmetric tridiagonal matrix. N >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the diagonal elements of the tridiagonal matrix. */
+/* On exit, the eigenvalues in ascending order. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (N-1) */
+/* On entry, the off-diagonal elements of the tridiagonal matrix. */
+/* On exit, E has been destroyed. */
+
+/* Q (input/output) COMPLEX*16 array, dimension (LDQ,N) */
+/* On entry, Q must contain an QSIZ x N matrix whose columns */
+/* unitarily orthonormal. It is a part of the unitary matrix */
+/* that reduces the full dense Hermitian matrix to a */
+/* (reducible) symmetric tridiagonal matrix. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N). */
+
+/* IWORK (workspace) INTEGER array, */
+/* the dimension of IWORK must be at least */
+/* 6 + 6*N + 5*N*lg N */
+/* ( lg( N ) = smallest integer k */
+/* such that 2^k >= N ) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, */
+/* dimension (1 + 3*N + 2*N*lg N + 3*N**2) */
+/* ( lg( N ) = smallest integer k */
+/* such that 2^k >= N ) */
+
+/* QSTORE (workspace) COMPLEX*16 array, dimension (LDQS, N) */
+/* Used to store parts of */
+/* the eigenvector matrix when the updating matrix multiplies */
+/* take place. */
+
+/* LDQS (input) INTEGER */
+/* The leading dimension of the array QSTORE. */
+/* LDQS >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: The algorithm failed to compute an eigenvalue while */
+/* working on the submatrix lying in rows and columns */
+/* INFO/(N+1) through mod(INFO,N+1). */
+
+/* ===================================================================== */
+
+/* Warning: N could be as big as QSIZ! */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ qstore_dim1 = *ldqs;
+ qstore_offset = 1 + qstore_dim1;
+ qstore -= qstore_offset;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+/* IF( ICOMPQ .LT. 0 .OR. ICOMPQ .GT. 2 ) THEN */
+/* INFO = -1 */
+/* ELSE IF( ( ICOMPQ .EQ. 1 ) .AND. ( QSIZ .LT. MAX( 0, N ) ) ) */
+/* $ THEN */
+ if (*qsiz < max(0,*n)) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldq < max(1,*n)) {
+ *info = -6;
+ } else if (*ldqs < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZLAED0", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ smlsiz = ilaenv_(&c__9, "ZLAED0", " ", &c__0, &c__0, &c__0, &c__0);
+
+/* Determine the size and placement of the submatrices, and save in */
+/* the leading elements of IWORK. */
+
+ iwork[1] = *n;
+ subpbs = 1;
+ tlvls = 0;
+L10:
+ if (iwork[subpbs] > smlsiz) {
+ for (j = subpbs; j >= 1; --j) {
+ iwork[j * 2] = (iwork[j] + 1) / 2;
+ iwork[(j << 1) - 1] = iwork[j] / 2;
+/* L20: */
+ }
+ ++tlvls;
+ subpbs <<= 1;
+ goto L10;
+ }
+ i__1 = subpbs;
+ for (j = 2; j <= i__1; ++j) {
+ iwork[j] += iwork[j - 1];
+/* L30: */
+ }
+
+/* Divide the matrix into SUBPBS submatrices of size at most SMLSIZ+1 */
+/* using rank-1 modifications (cuts). */
+
+ spm1 = subpbs - 1;
+ i__1 = spm1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ submat = iwork[i__] + 1;
+ smm1 = submat - 1;
+ d__[smm1] -= (d__1 = e[smm1], abs(d__1));
+ d__[submat] -= (d__1 = e[smm1], abs(d__1));
+/* L40: */
+ }
+
+ indxq = (*n << 2) + 3;
+
+/* Set up workspaces for eigenvalues only/accumulate new vectors */
+/* routine */
+
+ temp = log((doublereal) (*n)) / log(2.);
+ lgn = (integer) temp;
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ iprmpt = indxq + *n + 1;
+ iperm = iprmpt + *n * lgn;
+ iqptr = iperm + *n * lgn;
+ igivpt = iqptr + *n + 2;
+ igivcl = igivpt + *n * lgn;
+
+ igivnm = 1;
+ iq = igivnm + (*n << 1) * lgn;
+/* Computing 2nd power */
+ i__1 = *n;
+ iwrem = iq + i__1 * i__1 + 1;
+/* Initialize pointers */
+ i__1 = subpbs;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ iwork[iprmpt + i__] = 1;
+ iwork[igivpt + i__] = 1;
+/* L50: */
+ }
+ iwork[iqptr] = 1;
+
+/* Solve each submatrix eigenproblem at the bottom of the divide and */
+/* conquer tree. */
+
+ curr = 0;
+ i__1 = spm1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ if (i__ == 0) {
+ submat = 1;
+ matsiz = iwork[1];
+ } else {
+ submat = iwork[i__] + 1;
+ matsiz = iwork[i__ + 1] - iwork[i__];
+ }
+ ll = iq - 1 + iwork[iqptr + curr];
+ dsteqr_("I", &matsiz, &d__[submat], &e[submat], &rwork[ll], &matsiz, &
+ rwork[1], info);
+ zlacrm_(qsiz, &matsiz, &q[submat * q_dim1 + 1], ldq, &rwork[ll], &
+ matsiz, &qstore[submat * qstore_dim1 + 1], ldqs, &rwork[iwrem]
+);
+/* Computing 2nd power */
+ i__2 = matsiz;
+ iwork[iqptr + curr + 1] = iwork[iqptr + curr] + i__2 * i__2;
+ ++curr;
+ if (*info > 0) {
+ *info = submat * (*n + 1) + submat + matsiz - 1;
+ return 0;
+ }
+ k = 1;
+ i__2 = iwork[i__ + 1];
+ for (j = submat; j <= i__2; ++j) {
+ iwork[indxq + j] = k;
+ ++k;
+/* L60: */
+ }
+/* L70: */
+ }
+
+/* Successively merge eigensystems of adjacent submatrices */
+/* into eigensystem for the corresponding larger matrix. */
+
+/* while ( SUBPBS > 1 ) */
+
+ curlvl = 1;
+L80:
+ if (subpbs > 1) {
+ spm2 = subpbs - 2;
+ i__1 = spm2;
+ for (i__ = 0; i__ <= i__1; i__ += 2) {
+ if (i__ == 0) {
+ submat = 1;
+ matsiz = iwork[2];
+ msd2 = iwork[1];
+ curprb = 0;
+ } else {
+ submat = iwork[i__] + 1;
+ matsiz = iwork[i__ + 2] - iwork[i__];
+ msd2 = matsiz / 2;
+ ++curprb;
+ }
+
+/* Merge lower order eigensystems (of size MSD2 and MATSIZ - MSD2) */
+/* into an eigensystem of size MATSIZ. ZLAED7 handles the case */
+/* when the eigenvectors of a full or band Hermitian matrix (which */
+/* was reduced to tridiagonal form) are desired. */
+
+/* I am free to use Q as a valuable working space until Loop 150. */
+
+ zlaed7_(&matsiz, &msd2, qsiz, &tlvls, &curlvl, &curprb, &d__[
+ submat], &qstore[submat * qstore_dim1 + 1], ldqs, &e[
+ submat + msd2 - 1], &iwork[indxq + submat], &rwork[iq], &
+ iwork[iqptr], &iwork[iprmpt], &iwork[iperm], &iwork[
+ igivpt], &iwork[igivcl], &rwork[igivnm], &q[submat *
+ q_dim1 + 1], &rwork[iwrem], &iwork[subpbs + 1], info);
+ if (*info > 0) {
+ *info = submat * (*n + 1) + submat + matsiz - 1;
+ return 0;
+ }
+ iwork[i__ / 2 + 1] = iwork[i__ + 2];
+/* L90: */
+ }
+ subpbs /= 2;
+ ++curlvl;
+ goto L80;
+ }
+
+/* end while */
+
+/* Re-merge the eigenvalues/vectors which were deflated at the final */
+/* merge step. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ j = iwork[indxq + i__];
+ rwork[i__] = d__[j];
+ zcopy_(qsiz, &qstore[j * qstore_dim1 + 1], &c__1, &q[i__ * q_dim1 + 1]
+, &c__1);
+/* L100: */
+ }
+ dcopy_(n, &rwork[1], &c__1, &d__[1], &c__1);
+
+ return 0;
+
+/* End of ZLAED0 */
+
+} /* zlaed0_ */
diff --git a/contrib/libs/clapack/zlaed7.c b/contrib/libs/clapack/zlaed7.c
new file mode 100644
index 0000000000..ec956a7cd6
--- /dev/null
+++ b/contrib/libs/clapack/zlaed7.c
@@ -0,0 +1,327 @@
+/* zlaed7.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__2 = 2;
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zlaed7_(integer *n, integer *cutpnt, integer *qsiz,
+ integer *tlvls, integer *curlvl, integer *curpbm, doublereal *d__,
+ doublecomplex *q, integer *ldq, doublereal *rho, integer *indxq,
+ doublereal *qstore, integer *qptr, integer *prmptr, integer *perm,
+ integer *givptr, integer *givcol, doublereal *givnum, doublecomplex *
+ work, doublereal *rwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ integer i__, k, n1, n2, iq, iw, iz, ptr, indx, curr, indxc, indxp;
+ extern /* Subroutine */ int dlaed9_(integer *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *),
+ zlaed8_(integer *, integer *, integer *, doublecomplex *, integer
+ *, doublereal *, doublereal *, integer *, doublereal *,
+ doublereal *, doublecomplex *, integer *, doublereal *, integer *,
+ integer *, integer *, integer *, integer *, integer *,
+ doublereal *, integer *), dlaeda_(integer *, integer *, integer *,
+ integer *, integer *, integer *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *);
+ integer idlmda;
+ extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
+ integer *, integer *, integer *), xerbla_(char *, integer *), zlacrm_(integer *, integer *, doublecomplex *, integer *,
+ doublereal *, integer *, doublecomplex *, integer *, doublereal *
+);
+ integer coltyp;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAED7 computes the updated eigensystem of a diagonal */
+/* matrix after modification by a rank-one symmetric matrix. This */
+/* routine is used only for the eigenproblem which requires all */
+/* eigenvalues and optionally eigenvectors of a dense or banded */
+/* Hermitian matrix that has been reduced to tridiagonal form. */
+
+/* T = Q(in) ( D(in) + RHO * Z*Z' ) Q'(in) = Q(out) * D(out) * Q'(out) */
+
+/* where Z = Q'u, u is a vector of length N with ones in the */
+/* CUTPNT and CUTPNT + 1 th elements and zeros elsewhere. */
+
+/* The eigenvectors of the original matrix are stored in Q, and the */
+/* eigenvalues are in D. The algorithm consists of three stages: */
+
+/* The first stage consists of deflating the size of the problem */
+/* when there are multiple eigenvalues or if there is a zero in */
+/* the Z vector. For each such occurence the dimension of the */
+/* secular equation problem is reduced by one. This stage is */
+/* performed by the routine DLAED2. */
+
+/* The second stage consists of calculating the updated */
+/* eigenvalues. This is done by finding the roots of the secular */
+/* equation via the routine DLAED4 (as called by SLAED3). */
+/* This routine also calculates the eigenvectors of the current */
+/* problem. */
+
+/* The final stage consists of computing the updated eigenvectors */
+/* directly using the updated eigenvalues. The eigenvectors for */
+/* the current problem are multiplied with the eigenvectors from */
+/* the overall problem. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The dimension of the symmetric tridiagonal matrix. N >= 0. */
+
+/* CUTPNT (input) INTEGER */
+/* Contains the location of the last eigenvalue in the leading */
+/* sub-matrix. min(1,N) <= CUTPNT <= N. */
+
+/* QSIZ (input) INTEGER */
+/* The dimension of the unitary matrix used to reduce */
+/* the full matrix to tridiagonal form. QSIZ >= N. */
+
+/* TLVLS (input) INTEGER */
+/* The total number of merging levels in the overall divide and */
+/* conquer tree. */
+
+/* CURLVL (input) INTEGER */
+/* The current level in the overall merge routine, */
+/* 0 <= curlvl <= tlvls. */
+
+/* CURPBM (input) INTEGER */
+/* The current problem in the current level in the overall */
+/* merge routine (counting from upper left to lower right). */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the eigenvalues of the rank-1-perturbed matrix. */
+/* On exit, the eigenvalues of the repaired matrix. */
+
+/* Q (input/output) COMPLEX*16 array, dimension (LDQ,N) */
+/* On entry, the eigenvectors of the rank-1-perturbed matrix. */
+/* On exit, the eigenvectors of the repaired tridiagonal matrix. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N). */
+
+/* RHO (input) DOUBLE PRECISION */
+/* Contains the subdiagonal element used to create the rank-1 */
+/* modification. */
+
+/* INDXQ (output) INTEGER array, dimension (N) */
+/* This contains the permutation which will reintegrate the */
+/* subproblem just solved back into sorted order, */
+/* ie. D( INDXQ( I = 1, N ) ) will be in ascending order. */
+
+/* IWORK (workspace) INTEGER array, dimension (4*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, */
+/* dimension (3*N+2*QSIZ*N) */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (QSIZ*N) */
+
+/* QSTORE (input/output) DOUBLE PRECISION array, dimension (N**2+1) */
+/* Stores eigenvectors of submatrices encountered during */
+/* divide and conquer, packed together. QPTR points to */
+/* beginning of the submatrices. */
+
+/* QPTR (input/output) INTEGER array, dimension (N+2) */
+/* List of indices pointing to beginning of submatrices stored */
+/* in QSTORE. The submatrices are numbered starting at the */
+/* bottom left of the divide and conquer tree, from left to */
+/* right and bottom to top. */
+
+/* PRMPTR (input) INTEGER array, dimension (N lg N) */
+/* Contains a list of pointers which indicate where in PERM a */
+/* level's permutation is stored. PRMPTR(i+1) - PRMPTR(i) */
+/* indicates the size of the permutation and also the size of */
+/* the full, non-deflated problem. */
+
+/* PERM (input) INTEGER array, dimension (N lg N) */
+/* Contains the permutations (from deflation and sorting) to be */
+/* applied to each eigenblock. */
+
+/* GIVPTR (input) INTEGER array, dimension (N lg N) */
+/* Contains a list of pointers which indicate where in GIVCOL a */
+/* level's Givens rotations are stored. GIVPTR(i+1) - GIVPTR(i) */
+/* indicates the number of Givens rotations. */
+
+/* GIVCOL (input) INTEGER array, dimension (2, N lg N) */
+/* Each pair of numbers indicates a pair of columns to take place */
+/* in a Givens rotation. */
+
+/* GIVNUM (input) DOUBLE PRECISION array, dimension (2, N lg N) */
+/* Each number indicates the S value to be used in the */
+/* corresponding Givens rotation. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = 1, an eigenvalue did not converge */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --indxq;
+ --qstore;
+ --qptr;
+ --prmptr;
+ --perm;
+ --givptr;
+ givcol -= 3;
+ givnum -= 3;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+/* IF( ICOMPQ.LT.0 .OR. ICOMPQ.GT.1 ) THEN */
+/* INFO = -1 */
+/* ELSE IF( N.LT.0 ) THEN */
+ if (*n < 0) {
+ *info = -1;
+ } else if (min(1,*n) > *cutpnt || *n < *cutpnt) {
+ *info = -2;
+ } else if (*qsiz < *n) {
+ *info = -3;
+ } else if (*ldq < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZLAED7", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* The following values are for bookkeeping purposes only. They are */
+/* integer pointers which indicate the portion of the workspace */
+/* used by a particular array in DLAED2 and SLAED3. */
+
+ iz = 1;
+ idlmda = iz + *n;
+ iw = idlmda + *n;
+ iq = iw + *n;
+
+ indx = 1;
+ indxc = indx + *n;
+ coltyp = indxc + *n;
+ indxp = coltyp + *n;
+
+/* Form the z-vector which consists of the last row of Q_1 and the */
+/* first row of Q_2. */
+
+ ptr = pow_ii(&c__2, tlvls) + 1;
+ i__1 = *curlvl - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *tlvls - i__;
+ ptr += pow_ii(&c__2, &i__2);
+/* L10: */
+ }
+ curr = ptr + *curpbm;
+ dlaeda_(n, tlvls, curlvl, curpbm, &prmptr[1], &perm[1], &givptr[1], &
+ givcol[3], &givnum[3], &qstore[1], &qptr[1], &rwork[iz], &rwork[
+ iz + *n], info);
+
+/* When solving the final problem, we no longer need the stored data, */
+/* so we will overwrite the data from this level onto the previously */
+/* used storage space. */
+
+ if (*curlvl == *tlvls) {
+ qptr[curr] = 1;
+ prmptr[curr] = 1;
+ givptr[curr] = 1;
+ }
+
+/* Sort and Deflate eigenvalues. */
+
+ zlaed8_(&k, n, qsiz, &q[q_offset], ldq, &d__[1], rho, cutpnt, &rwork[iz],
+ &rwork[idlmda], &work[1], qsiz, &rwork[iw], &iwork[indxp], &iwork[
+ indx], &indxq[1], &perm[prmptr[curr]], &givptr[curr + 1], &givcol[
+ (givptr[curr] << 1) + 1], &givnum[(givptr[curr] << 1) + 1], info);
+ prmptr[curr + 1] = prmptr[curr] + *n;
+ givptr[curr + 1] += givptr[curr];
+
+/* Solve Secular Equation. */
+
+ if (k != 0) {
+ dlaed9_(&k, &c__1, &k, n, &d__[1], &rwork[iq], &k, rho, &rwork[idlmda]
+, &rwork[iw], &qstore[qptr[curr]], &k, info);
+ zlacrm_(qsiz, &k, &work[1], qsiz, &qstore[qptr[curr]], &k, &q[
+ q_offset], ldq, &rwork[iq]);
+/* Computing 2nd power */
+ i__1 = k;
+ qptr[curr + 1] = qptr[curr] + i__1 * i__1;
+ if (*info != 0) {
+ return 0;
+ }
+
+/* Prepare the INDXQ sorting premutation. */
+
+ n1 = k;
+ n2 = *n - k;
+ dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &indxq[1]);
+ } else {
+ qptr[curr + 1] = qptr[curr];
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ indxq[i__] = i__;
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of ZLAED7 */
+
+} /* zlaed7_ */
diff --git a/contrib/libs/clapack/zlaed8.c b/contrib/libs/clapack/zlaed8.c
new file mode 100644
index 0000000000..9533c0c613
--- /dev/null
+++ b/contrib/libs/clapack/zlaed8.c
@@ -0,0 +1,436 @@
+/* zlaed8.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b3 = -1.;
+static integer c__1 = 1;
+
+/* Subroutine */ int zlaed8_(integer *k, integer *n, integer *qsiz,
+ doublecomplex *q, integer *ldq, doublereal *d__, doublereal *rho,
+ integer *cutpnt, doublereal *z__, doublereal *dlamda, doublecomplex *
+ q2, integer *ldq2, doublereal *w, integer *indxp, integer *indx,
+ integer *indxq, integer *perm, integer *givptr, integer *givcol,
+ doublereal *givnum, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, q2_dim1, q2_offset, i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublereal c__;
+ integer i__, j;
+ doublereal s, t;
+ integer k2, n1, n2, jp, n1p1;
+ doublereal eps, tau, tol;
+ integer jlam, imax, jmax;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dcopy_(integer *, doublereal *, integer *, doublereal
+ *, integer *), zdrot_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublereal *, doublereal *), zcopy_(
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *)
+ ;
+ extern doublereal dlapy2_(doublereal *, doublereal *), dlamch_(char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
+ integer *, integer *, integer *), xerbla_(char *, integer *), zlacpy_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAED8 merges the two sets of eigenvalues together into a single */
+/* sorted set. Then it tries to deflate the size of the problem. */
+/* There are two ways in which deflation can occur: when two or more */
+/* eigenvalues are close together or if there is a tiny element in the */
+/* Z vector. For each such occurrence the order of the related secular */
+/* equation problem is reduced by one. */
+
+/* Arguments */
+/* ========= */
+
+/* K (output) INTEGER */
+/* Contains the number of non-deflated eigenvalues. */
+/* This is the order of the related secular equation. */
+
+/* N (input) INTEGER */
+/* The dimension of the symmetric tridiagonal matrix. N >= 0. */
+
+/* QSIZ (input) INTEGER */
+/* The dimension of the unitary matrix used to reduce */
+/* the dense or band matrix to tridiagonal form. */
+/* QSIZ >= N if ICOMPQ = 1. */
+
+/* Q (input/output) COMPLEX*16 array, dimension (LDQ,N) */
+/* On entry, Q contains the eigenvectors of the partially solved */
+/* system which has been previously updated in matrix */
+/* multiplies with other partially solved eigensystems. */
+/* On exit, Q contains the trailing (N-K) updated eigenvectors */
+/* (those which were deflated) in its last N-K columns. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max( 1, N ). */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, D contains the eigenvalues of the two submatrices to */
+/* be combined. On exit, D contains the trailing (N-K) updated */
+/* eigenvalues (those which were deflated) sorted into increasing */
+/* order. */
+
+/* RHO (input/output) DOUBLE PRECISION */
+/* Contains the off diagonal element associated with the rank-1 */
+/* cut which originally split the two submatrices which are now */
+/* being recombined. RHO is modified during the computation to */
+/* the value required by DLAED3. */
+
+/* CUTPNT (input) INTEGER */
+/* Contains the location of the last eigenvalue in the leading */
+/* sub-matrix. MIN(1,N) <= CUTPNT <= N. */
+
+/* Z (input) DOUBLE PRECISION array, dimension (N) */
+/* On input this vector contains the updating vector (the last */
+/* row of the first sub-eigenvector matrix and the first row of */
+/* the second sub-eigenvector matrix). The contents of Z are */
+/* destroyed during the updating process. */
+
+/* DLAMDA (output) DOUBLE PRECISION array, dimension (N) */
+/* Contains a copy of the first K eigenvalues which will be used */
+/* by DLAED3 to form the secular equation. */
+
+/* Q2 (output) COMPLEX*16 array, dimension (LDQ2,N) */
+/* If ICOMPQ = 0, Q2 is not referenced. Otherwise, */
+/* Contains a copy of the first K eigenvectors which will be used */
+/* by DLAED7 in a matrix multiply (DGEMM) to update the new */
+/* eigenvectors. */
+
+/* LDQ2 (input) INTEGER */
+/* The leading dimension of the array Q2. LDQ2 >= max( 1, N ). */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* This will hold the first k values of the final */
+/* deflation-altered z-vector and will be passed to DLAED3. */
+
+/* INDXP (workspace) INTEGER array, dimension (N) */
+/* This will contain the permutation used to place deflated */
+/* values of D at the end of the array. On output INDXP(1:K) */
+/* points to the nondeflated D-values and INDXP(K+1:N) */
+/* points to the deflated eigenvalues. */
+
+/* INDX (workspace) INTEGER array, dimension (N) */
+/* This will contain the permutation used to sort the contents of */
+/* D into ascending order. */
+
+/* INDXQ (input) INTEGER array, dimension (N) */
+/* This contains the permutation which separately sorts the two */
+/* sub-problems in D into ascending order. Note that elements in */
+/* the second half of this permutation must first have CUTPNT */
+/* added to their values in order to be accurate. */
+
+/* PERM (output) INTEGER array, dimension (N) */
+/* Contains the permutations (from deflation and sorting) to be */
+/* applied to each eigenblock. */
+
+/* GIVPTR (output) INTEGER */
+/* Contains the number of Givens rotations which took place in */
+/* this subproblem. */
+
+/* GIVCOL (output) INTEGER array, dimension (2, N) */
+/* Each pair of numbers indicates a pair of columns to take place */
+/* in a Givens rotation. */
+
+/* GIVNUM (output) DOUBLE PRECISION array, dimension (2, N) */
+/* Each number indicates the S value to be used in the */
+/* corresponding Givens rotation. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --d__;
+ --z__;
+ --dlamda;
+ q2_dim1 = *ldq2;
+ q2_offset = 1 + q2_dim1;
+ q2 -= q2_offset;
+ --w;
+ --indxp;
+ --indx;
+ --indxq;
+ --perm;
+ givcol -= 3;
+ givnum -= 3;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -2;
+ } else if (*qsiz < *n) {
+ *info = -3;
+ } else if (*ldq < max(1,*n)) {
+ *info = -5;
+ } else if (*cutpnt < min(1,*n) || *cutpnt > *n) {
+ *info = -8;
+ } else if (*ldq2 < max(1,*n)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZLAED8", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ n1 = *cutpnt;
+ n2 = *n - n1;
+ n1p1 = n1 + 1;
+
+ if (*rho < 0.) {
+ dscal_(&n2, &c_b3, &z__[n1p1], &c__1);
+ }
+
+/* Normalize z so that norm(z) = 1 */
+
+ t = 1. / sqrt(2.);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ indx[j] = j;
+/* L10: */
+ }
+ dscal_(n, &t, &z__[1], &c__1);
+ *rho = (d__1 = *rho * 2., abs(d__1));
+
+/* Sort the eigenvalues into increasing order */
+
+ i__1 = *n;
+ for (i__ = *cutpnt + 1; i__ <= i__1; ++i__) {
+ indxq[i__] += *cutpnt;
+/* L20: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlamda[i__] = d__[indxq[i__]];
+ w[i__] = z__[indxq[i__]];
+/* L30: */
+ }
+ i__ = 1;
+ j = *cutpnt + 1;
+ dlamrg_(&n1, &n2, &dlamda[1], &c__1, &c__1, &indx[1]);
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = dlamda[indx[i__]];
+ z__[i__] = w[indx[i__]];
+/* L40: */
+ }
+
+/* Calculate the allowable deflation tolerance */
+
+ imax = idamax_(n, &z__[1], &c__1);
+ jmax = idamax_(n, &d__[1], &c__1);
+ eps = dlamch_("Epsilon");
+ tol = eps * 8. * (d__1 = d__[jmax], abs(d__1));
+
+/* If the rank-1 modifier is small enough, no more needs to be done */
+/* -- except to reorganize Q so that its columns correspond with the */
+/* elements in D. */
+
+ if (*rho * (d__1 = z__[imax], abs(d__1)) <= tol) {
+ *k = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ perm[j] = indxq[indx[j]];
+ zcopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 + 1]
+, &c__1);
+/* L50: */
+ }
+ zlacpy_("A", qsiz, n, &q2[q2_dim1 + 1], ldq2, &q[q_dim1 + 1], ldq);
+ return 0;
+ }
+
+/* If there are multiple eigenvalues then the problem deflates. Here */
+/* the number of equal eigenvalues are found. As each equal */
+/* eigenvalue is found, an elementary reflector is computed to rotate */
+/* the corresponding eigensubspace so that the corresponding */
+/* components of Z are zero in this new basis. */
+
+ *k = 0;
+ *givptr = 0;
+ k2 = *n + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*rho * (d__1 = z__[j], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ indxp[k2] = j;
+ if (j == *n) {
+ goto L100;
+ }
+ } else {
+ jlam = j;
+ goto L70;
+ }
+/* L60: */
+ }
+L70:
+ ++j;
+ if (j > *n) {
+ goto L90;
+ }
+ if (*rho * (d__1 = z__[j], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ indxp[k2] = j;
+ } else {
+
+/* Check if eigenvalues are close enough to allow deflation. */
+
+ s = z__[jlam];
+ c__ = z__[j];
+
+/* Find sqrt(a**2+b**2) without overflow or */
+/* destructive underflow. */
+
+ tau = dlapy2_(&c__, &s);
+ t = d__[j] - d__[jlam];
+ c__ /= tau;
+ s = -s / tau;
+ if ((d__1 = t * c__ * s, abs(d__1)) <= tol) {
+
+/* Deflation is possible. */
+
+ z__[j] = tau;
+ z__[jlam] = 0.;
+
+/* Record the appropriate Givens rotation */
+
+ ++(*givptr);
+ givcol[(*givptr << 1) + 1] = indxq[indx[jlam]];
+ givcol[(*givptr << 1) + 2] = indxq[indx[j]];
+ givnum[(*givptr << 1) + 1] = c__;
+ givnum[(*givptr << 1) + 2] = s;
+ zdrot_(qsiz, &q[indxq[indx[jlam]] * q_dim1 + 1], &c__1, &q[indxq[
+ indx[j]] * q_dim1 + 1], &c__1, &c__, &s);
+ t = d__[jlam] * c__ * c__ + d__[j] * s * s;
+ d__[j] = d__[jlam] * s * s + d__[j] * c__ * c__;
+ d__[jlam] = t;
+ --k2;
+ i__ = 1;
+L80:
+ if (k2 + i__ <= *n) {
+ if (d__[jlam] < d__[indxp[k2 + i__]]) {
+ indxp[k2 + i__ - 1] = indxp[k2 + i__];
+ indxp[k2 + i__] = jlam;
+ ++i__;
+ goto L80;
+ } else {
+ indxp[k2 + i__ - 1] = jlam;
+ }
+ } else {
+ indxp[k2 + i__ - 1] = jlam;
+ }
+ jlam = j;
+ } else {
+ ++(*k);
+ w[*k] = z__[jlam];
+ dlamda[*k] = d__[jlam];
+ indxp[*k] = jlam;
+ jlam = j;
+ }
+ }
+ goto L70;
+L90:
+
+/* Record the last eigenvalue. */
+
+ ++(*k);
+ w[*k] = z__[jlam];
+ dlamda[*k] = d__[jlam];
+ indxp[*k] = jlam;
+
+L100:
+
+/* Sort the eigenvalues and corresponding eigenvectors into DLAMDA */
+/* and Q2 respectively. The eigenvalues/vectors which were not */
+/* deflated go into the first K slots of DLAMDA and Q2 respectively, */
+/* while those which were deflated go into the last N - K slots. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jp = indxp[j];
+ dlamda[j] = d__[jp];
+ perm[j] = indxq[indx[jp]];
+ zcopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 + 1], &
+ c__1);
+/* L110: */
+ }
+
+/* The deflated eigenvalues and their corresponding vectors go back */
+/* into the last N - K slots of D and Q respectively. */
+
+ if (*k < *n) {
+ i__1 = *n - *k;
+ dcopy_(&i__1, &dlamda[*k + 1], &c__1, &d__[*k + 1], &c__1);
+ i__1 = *n - *k;
+ zlacpy_("A", qsiz, &i__1, &q2[(*k + 1) * q2_dim1 + 1], ldq2, &q[(*k +
+ 1) * q_dim1 + 1], ldq);
+ }
+
+ return 0;
+
+/* End of ZLAED8 */
+
+} /* zlaed8_ */
diff --git a/contrib/libs/clapack/zlaein.c b/contrib/libs/clapack/zlaein.c
new file mode 100644
index 0000000000..7b03e27631
--- /dev/null
+++ b/contrib/libs/clapack/zlaein.c
@@ -0,0 +1,397 @@
+/* zlaein.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zlaein_(logical *rightv, logical *noinit, integer *n,
+ doublecomplex *h__, integer *ldh, doublecomplex *w, doublecomplex *v,
+ doublecomplex *b, integer *ldb, doublereal *rwork, doublereal *eps3,
+ doublereal *smlnum, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, h_dim1, h_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j;
+ doublecomplex x, ei, ej;
+ integer its, ierr;
+ doublecomplex temp;
+ doublereal scale;
+ char trans[1];
+ doublereal rtemp, rootn, vnorm;
+ extern doublereal dznrm2_(integer *, doublecomplex *, integer *);
+ extern /* Subroutine */ int zdscal_(integer *, doublereal *,
+ doublecomplex *, integer *);
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ extern /* Double Complex */ VOID zladiv_(doublecomplex *, doublecomplex *,
+ doublecomplex *);
+ char normin[1];
+ extern doublereal dzasum_(integer *, doublecomplex *, integer *);
+ doublereal nrmsml;
+ extern /* Subroutine */ int zlatrs_(char *, char *, char *, char *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublereal *, doublereal *, integer *);
+ doublereal growto;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAEIN uses inverse iteration to find a right or left eigenvector */
+/* corresponding to the eigenvalue W of a complex upper Hessenberg */
+/* matrix H. */
+
+/* Arguments */
+/* ========= */
+
+/* RIGHTV (input) LOGICAL */
+/* = .TRUE. : compute right eigenvector; */
+/* = .FALSE.: compute left eigenvector. */
+
+/* NOINIT (input) LOGICAL */
+/* = .TRUE. : no initial vector supplied in V */
+/* = .FALSE.: initial vector supplied in V. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H. N >= 0. */
+
+/* H (input) COMPLEX*16 array, dimension (LDH,N) */
+/* The upper Hessenberg matrix H. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH >= max(1,N). */
+
+/* W (input) COMPLEX*16 */
+/* The eigenvalue of H whose corresponding right or left */
+/* eigenvector is to be computed. */
+
+/* V (input/output) COMPLEX*16 array, dimension (N) */
+/* On entry, if NOINIT = .FALSE., V must contain a starting */
+/* vector for inverse iteration; otherwise V need not be set. */
+/* On exit, V contains the computed eigenvector, normalized so */
+/* that the component of largest magnitude has magnitude 1; here */
+/* the magnitude of a complex number (x,y) is taken to be */
+/* |x| + |y|. */
+
+/* B (workspace) COMPLEX*16 array, dimension (LDB,N) */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* EPS3 (input) DOUBLE PRECISION */
+/* A small machine-dependent value which is used to perturb */
+/* close eigenvalues, and to replace zero pivots. */
+
+/* SMLNUM (input) DOUBLE PRECISION */
+/* A machine-dependent value close to the underflow threshold. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* = 1: inverse iteration did not converge; V is set to the */
+/* last iterate. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --v;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+
+/* GROWTO is the threshold used in the acceptance test for an */
+/* eigenvector. */
+
+ rootn = sqrt((doublereal) (*n));
+ growto = .1 / rootn;
+/* Computing MAX */
+ d__1 = 1., d__2 = *eps3 * rootn;
+ nrmsml = max(d__1,d__2) * *smlnum;
+
+/* Form B = H - W*I (except that the subdiagonal elements are not */
+/* stored). */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * h_dim1;
+ b[i__3].r = h__[i__4].r, b[i__3].i = h__[i__4].i;
+/* L10: */
+ }
+ i__2 = j + j * b_dim1;
+ i__3 = j + j * h_dim1;
+ z__1.r = h__[i__3].r - w->r, z__1.i = h__[i__3].i - w->i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L20: */
+ }
+
+ if (*noinit) {
+
+/* Initialize V. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ v[i__2].r = *eps3, v[i__2].i = 0.;
+/* L30: */
+ }
+ } else {
+
+/* Scale supplied initial vector. */
+
+ vnorm = dznrm2_(n, &v[1], &c__1);
+ d__1 = *eps3 * rootn / max(vnorm,nrmsml);
+ zdscal_(n, &d__1, &v[1], &c__1);
+ }
+
+ if (*rightv) {
+
+/* LU decomposition with partial pivoting of B, replacing zero */
+/* pivots by EPS3. */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + 1 + i__ * h_dim1;
+ ei.r = h__[i__2].r, ei.i = h__[i__2].i;
+ i__2 = i__ + i__ * b_dim1;
+ if ((d__1 = b[i__2].r, abs(d__1)) + (d__2 = d_imag(&b[i__ + i__ *
+ b_dim1]), abs(d__2)) < (d__3 = ei.r, abs(d__3)) + (d__4 =
+ d_imag(&ei), abs(d__4))) {
+
+/* Interchange rows and eliminate. */
+
+ zladiv_(&z__1, &b[i__ + i__ * b_dim1], &ei);
+ x.r = z__1.r, x.i = z__1.i;
+ i__2 = i__ + i__ * b_dim1;
+ b[i__2].r = ei.r, b[i__2].i = ei.i;
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ i__3 = i__ + 1 + j * b_dim1;
+ temp.r = b[i__3].r, temp.i = b[i__3].i;
+ i__3 = i__ + 1 + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ z__2.r = x.r * temp.r - x.i * temp.i, z__2.i = x.r *
+ temp.i + x.i * temp.r;
+ z__1.r = b[i__4].r - z__2.r, z__1.i = b[i__4].i - z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = temp.r, b[i__3].i = temp.i;
+/* L40: */
+ }
+ } else {
+
+/* Eliminate without interchange. */
+
+ i__2 = i__ + i__ * b_dim1;
+ if (b[i__2].r == 0. && b[i__2].i == 0.) {
+ i__3 = i__ + i__ * b_dim1;
+ b[i__3].r = *eps3, b[i__3].i = 0.;
+ }
+ zladiv_(&z__1, &ei, &b[i__ + i__ * b_dim1]);
+ x.r = z__1.r, x.i = z__1.i;
+ if (x.r != 0. || x.i != 0.) {
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ i__3 = i__ + 1 + j * b_dim1;
+ i__4 = i__ + 1 + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ z__2.r = x.r * b[i__5].r - x.i * b[i__5].i, z__2.i =
+ x.r * b[i__5].i + x.i * b[i__5].r;
+ z__1.r = b[i__4].r - z__2.r, z__1.i = b[i__4].i -
+ z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L50: */
+ }
+ }
+ }
+/* L60: */
+ }
+ i__1 = *n + *n * b_dim1;
+ if (b[i__1].r == 0. && b[i__1].i == 0.) {
+ i__2 = *n + *n * b_dim1;
+ b[i__2].r = *eps3, b[i__2].i = 0.;
+ }
+
+ *(unsigned char *)trans = 'N';
+
+ } else {
+
+/* UL decomposition with partial pivoting of B, replacing zero */
+/* pivots by EPS3. */
+
+ for (j = *n; j >= 2; --j) {
+ i__1 = j + (j - 1) * h_dim1;
+ ej.r = h__[i__1].r, ej.i = h__[i__1].i;
+ i__1 = j + j * b_dim1;
+ if ((d__1 = b[i__1].r, abs(d__1)) + (d__2 = d_imag(&b[j + j *
+ b_dim1]), abs(d__2)) < (d__3 = ej.r, abs(d__3)) + (d__4 =
+ d_imag(&ej), abs(d__4))) {
+
+/* Interchange columns and eliminate. */
+
+ zladiv_(&z__1, &b[j + j * b_dim1], &ej);
+ x.r = z__1.r, x.i = z__1.i;
+ i__1 = j + j * b_dim1;
+ b[i__1].r = ej.r, b[i__1].i = ej.i;
+ i__1 = j - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + (j - 1) * b_dim1;
+ temp.r = b[i__2].r, temp.i = b[i__2].i;
+ i__2 = i__ + (j - 1) * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ z__2.r = x.r * temp.r - x.i * temp.i, z__2.i = x.r *
+ temp.i + x.i * temp.r;
+ z__1.r = b[i__3].r - z__2.r, z__1.i = b[i__3].i - z__2.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ i__2 = i__ + j * b_dim1;
+ b[i__2].r = temp.r, b[i__2].i = temp.i;
+/* L70: */
+ }
+ } else {
+
+/* Eliminate without interchange. */
+
+ i__1 = j + j * b_dim1;
+ if (b[i__1].r == 0. && b[i__1].i == 0.) {
+ i__2 = j + j * b_dim1;
+ b[i__2].r = *eps3, b[i__2].i = 0.;
+ }
+ zladiv_(&z__1, &ej, &b[j + j * b_dim1]);
+ x.r = z__1.r, x.i = z__1.i;
+ if (x.r != 0. || x.i != 0.) {
+ i__1 = j - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + (j - 1) * b_dim1;
+ i__3 = i__ + (j - 1) * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ z__2.r = x.r * b[i__4].r - x.i * b[i__4].i, z__2.i =
+ x.r * b[i__4].i + x.i * b[i__4].r;
+ z__1.r = b[i__3].r - z__2.r, z__1.i = b[i__3].i -
+ z__2.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L80: */
+ }
+ }
+ }
+/* L90: */
+ }
+ i__1 = b_dim1 + 1;
+ if (b[i__1].r == 0. && b[i__1].i == 0.) {
+ i__2 = b_dim1 + 1;
+ b[i__2].r = *eps3, b[i__2].i = 0.;
+ }
+
+ *(unsigned char *)trans = 'C';
+
+ }
+
+ *(unsigned char *)normin = 'N';
+ i__1 = *n;
+ for (its = 1; its <= i__1; ++its) {
+
+/* Solve U*x = scale*v for a right eigenvector */
+/* or U'*x = scale*v for a left eigenvector, */
+/* overwriting x on v. */
+
+ zlatrs_("Upper", trans, "Nonunit", normin, n, &b[b_offset], ldb, &v[1]
+, &scale, &rwork[1], &ierr);
+ *(unsigned char *)normin = 'Y';
+
+/* Test for sufficient growth in the norm of v. */
+
+ vnorm = dzasum_(n, &v[1], &c__1);
+ if (vnorm >= growto * scale) {
+ goto L120;
+ }
+
+/* Choose new orthogonal starting vector and try again. */
+
+ rtemp = *eps3 / (rootn + 1.);
+ v[1].r = *eps3, v[1].i = 0.;
+ i__2 = *n;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ v[i__3].r = rtemp, v[i__3].i = 0.;
+/* L100: */
+ }
+ i__2 = *n - its + 1;
+ i__3 = *n - its + 1;
+ d__1 = *eps3 * rootn;
+ z__1.r = v[i__3].r - d__1, z__1.i = v[i__3].i;
+ v[i__2].r = z__1.r, v[i__2].i = z__1.i;
+/* L110: */
+ }
+
+/* Failure to find eigenvector in N iterations. */
+
+ *info = 1;
+
+L120:
+
+/* Normalize eigenvector. */
+
+ i__ = izamax_(n, &v[1], &c__1);
+ i__1 = i__;
+ d__3 = 1. / ((d__1 = v[i__1].r, abs(d__1)) + (d__2 = d_imag(&v[i__]), abs(
+ d__2)));
+ zdscal_(n, &d__3, &v[1], &c__1);
+
+ return 0;
+
+/* End of ZLAEIN */
+
+} /* zlaein_ */
diff --git a/contrib/libs/clapack/zlaesy.c b/contrib/libs/clapack/zlaesy.c
new file mode 100644
index 0000000000..a3774696f9
--- /dev/null
+++ b/contrib/libs/clapack/zlaesy.c
@@ -0,0 +1,208 @@
+/* zlaesy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__2 = 2;
+
+/* Subroutine */ int zlaesy_(doublecomplex *a, doublecomplex *b,
+ doublecomplex *c__, doublecomplex *rt1, doublecomplex *rt2,
+ doublecomplex *evscal, doublecomplex *cs1, doublecomplex *sn1)
+{
+ /* System generated locals */
+ doublereal d__1, d__2;
+ doublecomplex z__1, z__2, z__3, z__4, z__5, z__6, z__7;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *);
+ void pow_zi(doublecomplex *, doublecomplex *, integer *), z_sqrt(
+ doublecomplex *, doublecomplex *), z_div(doublecomplex *,
+ doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ doublecomplex s, t;
+ doublereal z__;
+ doublecomplex tmp;
+ doublereal babs, tabs, evnorm;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAESY computes the eigendecomposition of a 2-by-2 symmetric matrix */
+/* ( ( A, B );( B, C ) ) */
+/* provided the norm of the matrix of eigenvectors is larger than */
+/* some threshold value. */
+
+/* RT1 is the eigenvalue of larger absolute value, and RT2 of */
+/* smaller absolute value. If the eigenvectors are computed, then */
+/* on return ( CS1, SN1 ) is the unit eigenvector for RT1, hence */
+
+/* [ CS1 SN1 ] . [ A B ] . [ CS1 -SN1 ] = [ RT1 0 ] */
+/* [ -SN1 CS1 ] [ B C ] [ SN1 CS1 ] [ 0 RT2 ] */
+
+/* Arguments */
+/* ========= */
+
+/* A (input) COMPLEX*16 */
+/* The ( 1, 1 ) element of input matrix. */
+
+/* B (input) COMPLEX*16 */
+/* The ( 1, 2 ) element of input matrix. The ( 2, 1 ) element */
+/* is also given by B, since the 2-by-2 matrix is symmetric. */
+
+/* C (input) COMPLEX*16 */
+/* The ( 2, 2 ) element of input matrix. */
+
+/* RT1 (output) COMPLEX*16 */
+/* The eigenvalue of larger modulus. */
+
+/* RT2 (output) COMPLEX*16 */
+/* The eigenvalue of smaller modulus. */
+
+/* EVSCAL (output) COMPLEX*16 */
+/* The complex value by which the eigenvector matrix was scaled */
+/* to make it orthonormal. If EVSCAL is zero, the eigenvectors */
+/* were not computed. This means one of two things: the 2-by-2 */
+/* matrix could not be diagonalized, or the norm of the matrix */
+/* of eigenvectors before scaling was larger than the threshold */
+/* value THRESH (set below). */
+
+/* CS1 (output) COMPLEX*16 */
+/* SN1 (output) COMPLEX*16 */
+/* If EVSCAL .NE. 0, ( CS1, SN1 ) is the unit right eigenvector */
+/* for RT1. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+
+/* Special case: The matrix is actually diagonal. */
+/* To avoid divide by zero later, we treat this case separately. */
+
+ if (z_abs(b) == 0.) {
+ rt1->r = a->r, rt1->i = a->i;
+ rt2->r = c__->r, rt2->i = c__->i;
+ if (z_abs(rt1) < z_abs(rt2)) {
+ tmp.r = rt1->r, tmp.i = rt1->i;
+ rt1->r = rt2->r, rt1->i = rt2->i;
+ rt2->r = tmp.r, rt2->i = tmp.i;
+ cs1->r = 0., cs1->i = 0.;
+ sn1->r = 1., sn1->i = 0.;
+ } else {
+ cs1->r = 1., cs1->i = 0.;
+ sn1->r = 0., sn1->i = 0.;
+ }
+ } else {
+
+/* Compute the eigenvalues and eigenvectors. */
+/* The characteristic equation is */
+/* lambda **2 - (A+C) lambda + (A*C - B*B) */
+/* and we solve it using the quadratic formula. */
+
+ z__2.r = a->r + c__->r, z__2.i = a->i + c__->i;
+ z__1.r = z__2.r * .5, z__1.i = z__2.i * .5;
+ s.r = z__1.r, s.i = z__1.i;
+ z__2.r = a->r - c__->r, z__2.i = a->i - c__->i;
+ z__1.r = z__2.r * .5, z__1.i = z__2.i * .5;
+ t.r = z__1.r, t.i = z__1.i;
+
+/* Take the square root carefully to avoid over/under flow. */
+
+ babs = z_abs(b);
+ tabs = z_abs(&t);
+ z__ = max(babs,tabs);
+ if (z__ > 0.) {
+ z__5.r = t.r / z__, z__5.i = t.i / z__;
+ pow_zi(&z__4, &z__5, &c__2);
+ z__7.r = b->r / z__, z__7.i = b->i / z__;
+ pow_zi(&z__6, &z__7, &c__2);
+ z__3.r = z__4.r + z__6.r, z__3.i = z__4.i + z__6.i;
+ z_sqrt(&z__2, &z__3);
+ z__1.r = z__ * z__2.r, z__1.i = z__ * z__2.i;
+ t.r = z__1.r, t.i = z__1.i;
+ }
+
+/* Compute the two eigenvalues. RT1 and RT2 are exchanged */
+/* if necessary so that RT1 will have the greater magnitude. */
+
+ z__1.r = s.r + t.r, z__1.i = s.i + t.i;
+ rt1->r = z__1.r, rt1->i = z__1.i;
+ z__1.r = s.r - t.r, z__1.i = s.i - t.i;
+ rt2->r = z__1.r, rt2->i = z__1.i;
+ if (z_abs(rt1) < z_abs(rt2)) {
+ tmp.r = rt1->r, tmp.i = rt1->i;
+ rt1->r = rt2->r, rt1->i = rt2->i;
+ rt2->r = tmp.r, rt2->i = tmp.i;
+ }
+
+/* Choose CS1 = 1 and SN1 to satisfy the first equation, then */
+/* scale the components of this eigenvector so that the matrix */
+/* of eigenvectors X satisfies X * X' = I . (No scaling is */
+/* done if the norm of the eigenvalue matrix is less than THRESH.) */
+
+ z__2.r = rt1->r - a->r, z__2.i = rt1->i - a->i;
+ z_div(&z__1, &z__2, b);
+ sn1->r = z__1.r, sn1->i = z__1.i;
+ tabs = z_abs(sn1);
+ if (tabs > 1.) {
+/* Computing 2nd power */
+ d__2 = 1. / tabs;
+ d__1 = d__2 * d__2;
+ z__5.r = sn1->r / tabs, z__5.i = sn1->i / tabs;
+ pow_zi(&z__4, &z__5, &c__2);
+ z__3.r = d__1 + z__4.r, z__3.i = z__4.i;
+ z_sqrt(&z__2, &z__3);
+ z__1.r = tabs * z__2.r, z__1.i = tabs * z__2.i;
+ t.r = z__1.r, t.i = z__1.i;
+ } else {
+ z__3.r = sn1->r * sn1->r - sn1->i * sn1->i, z__3.i = sn1->r *
+ sn1->i + sn1->i * sn1->r;
+ z__2.r = z__3.r + 1., z__2.i = z__3.i + 0.;
+ z_sqrt(&z__1, &z__2);
+ t.r = z__1.r, t.i = z__1.i;
+ }
+ evnorm = z_abs(&t);
+ if (evnorm >= .1) {
+ z_div(&z__1, &c_b1, &t);
+ evscal->r = z__1.r, evscal->i = z__1.i;
+ cs1->r = evscal->r, cs1->i = evscal->i;
+ z__1.r = sn1->r * evscal->r - sn1->i * evscal->i, z__1.i = sn1->r
+ * evscal->i + sn1->i * evscal->r;
+ sn1->r = z__1.r, sn1->i = z__1.i;
+ } else {
+ evscal->r = 0., evscal->i = 0.;
+ }
+ }
+ return 0;
+
+/* End of ZLAESY */
+
+} /* zlaesy_ */
diff --git a/contrib/libs/clapack/zlaev2.c b/contrib/libs/clapack/zlaev2.c
new file mode 100644
index 0000000000..4a66392825
--- /dev/null
+++ b/contrib/libs/clapack/zlaev2.c
@@ -0,0 +1,125 @@
+/* zlaev2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlaev2_(doublecomplex *a, doublecomplex *b,
+ doublecomplex *c__, doublereal *rt1, doublereal *rt2, doublereal *cs1,
+ doublecomplex *sn1)
+{
+ /* System generated locals */
+ doublereal d__1, d__2, d__3;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ doublereal t;
+ doublecomplex w;
+ extern /* Subroutine */ int dlaev2_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAEV2 computes the eigendecomposition of a 2-by-2 Hermitian matrix */
+/* [ A B ] */
+/* [ CONJG(B) C ]. */
+/* On return, RT1 is the eigenvalue of larger absolute value, RT2 is the */
+/* eigenvalue of smaller absolute value, and (CS1,SN1) is the unit right */
+/* eigenvector for RT1, giving the decomposition */
+
+/* [ CS1 CONJG(SN1) ] [ A B ] [ CS1 -CONJG(SN1) ] = [ RT1 0 ] */
+/* [-SN1 CS1 ] [ CONJG(B) C ] [ SN1 CS1 ] [ 0 RT2 ]. */
+
+/* Arguments */
+/* ========= */
+
+/* A (input) COMPLEX*16 */
+/* The (1,1) element of the 2-by-2 matrix. */
+
+/* B (input) COMPLEX*16 */
+/* The (1,2) element and the conjugate of the (2,1) element of */
+/* the 2-by-2 matrix. */
+
+/* C (input) COMPLEX*16 */
+/* The (2,2) element of the 2-by-2 matrix. */
+
+/* RT1 (output) DOUBLE PRECISION */
+/* The eigenvalue of larger absolute value. */
+
+/* RT2 (output) DOUBLE PRECISION */
+/* The eigenvalue of smaller absolute value. */
+
+/* CS1 (output) DOUBLE PRECISION */
+/* SN1 (output) COMPLEX*16 */
+/* The vector (CS1, SN1) is a unit right eigenvector for RT1. */
+
+/* Further Details */
+/* =============== */
+
+/* RT1 is accurate to a few ulps barring over/underflow. */
+
+/* RT2 may be inaccurate if there is massive cancellation in the */
+/* determinant A*C-B*B; higher precision or correctly rounded or */
+/* correctly truncated arithmetic would be needed to compute RT2 */
+/* accurately in all cases. */
+
+/* CS1 and SN1 are accurate to a few ulps barring over/underflow. */
+
+/* Overflow is possible only if RT1 is within a factor of 5 of overflow. */
+/* Underflow is harmless if the input data is 0 or exceeds */
+/* underflow_threshold / macheps. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ if (z_abs(b) == 0.) {
+ w.r = 1., w.i = 0.;
+ } else {
+ d_cnjg(&z__2, b);
+ d__1 = z_abs(b);
+ z__1.r = z__2.r / d__1, z__1.i = z__2.i / d__1;
+ w.r = z__1.r, w.i = z__1.i;
+ }
+ d__1 = a->r;
+ d__2 = z_abs(b);
+ d__3 = c__->r;
+ dlaev2_(&d__1, &d__2, &d__3, rt1, rt2, cs1, &t);
+ z__1.r = t * w.r, z__1.i = t * w.i;
+ sn1->r = z__1.r, sn1->i = z__1.i;
+ return 0;
+
+/* End of ZLAEV2 */
+
+} /* zlaev2_ */
diff --git a/contrib/libs/clapack/zlag2c.c b/contrib/libs/clapack/zlag2c.c
new file mode 100644
index 0000000000..6f408f9c7d
--- /dev/null
+++ b/contrib/libs/clapack/zlag2c.c
@@ -0,0 +1,124 @@
+/* zlag2c.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlag2c_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, complex *sa, integer *ldsa, integer *info)
+{
+ /* System generated locals */
+ integer sa_dim1, sa_offset, a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal rmax;
+ extern doublereal slamch_(char *);
+
+
+/* -- LAPACK PROTOTYPE auxiliary routine (version 3.1.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* August 2007 */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAG2C converts a COMPLEX*16 matrix, SA, to a COMPLEX matrix, A. */
+
+/* RMAX is the overflow for the SINGLE PRECISION arithmetic */
+/* ZLAG2C checks that all the entries of A are between -RMAX and */
+/* RMAX. If not the convertion is aborted and a flag is raised. */
+
+/* This is an auxiliary routine so there is no argument checking. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of lines of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the M-by-N coefficient matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* SA (output) COMPLEX array, dimension (LDSA,N) */
+/* On exit, if INFO=0, the M-by-N coefficient matrix SA; if */
+/* INFO>0, the content of SA is unspecified. */
+
+/* LDSA (input) INTEGER */
+/* The leading dimension of the array SA. LDSA >= max(1,M). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* = 1: an entry of the matrix A is greater than the SINGLE */
+/* PRECISION overflow threshold, in this case, the content */
+/* of SA in exit is unspecified. */
+
+/* ========= */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ sa_dim1 = *ldsa;
+ sa_offset = 1 + sa_dim1;
+ sa -= sa_offset;
+
+ /* Function Body */
+ rmax = slamch_("O");
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ if (a[i__3].r < -rmax || a[i__4].r > rmax || d_imag(&a[i__ + j *
+ a_dim1]) < -rmax || d_imag(&a[i__ + j * a_dim1]) > rmax) {
+ *info = 1;
+ goto L30;
+ }
+ i__3 = i__ + j * sa_dim1;
+ i__4 = i__ + j * a_dim1;
+ sa[i__3].r = a[i__4].r, sa[i__3].i = a[i__4].i;
+/* L10: */
+ }
+/* L20: */
+ }
+ *info = 0;
+L30:
+ return 0;
+
+/* End of ZLAG2C */
+
+} /* zlag2c_ */
diff --git a/contrib/libs/clapack/zlags2.c b/contrib/libs/clapack/zlags2.c
new file mode 100644
index 0000000000..89ac38ff7f
--- /dev/null
+++ b/contrib/libs/clapack/zlags2.c
@@ -0,0 +1,468 @@
+/* zlags2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlags2_(logical *upper, doublereal *a1, doublecomplex *
+ a2, doublereal *a3, doublereal *b1, doublecomplex *b2, doublereal *b3,
+ doublereal *csu, doublecomplex *snu, doublereal *csv, doublecomplex *
+ snv, doublereal *csq, doublecomplex *snq)
+{
+ /* System generated locals */
+ doublereal d__1, d__2, d__3, d__4, d__5, d__6, d__7, d__8;
+ doublecomplex z__1, z__2, z__3, z__4, z__5;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *), d_imag(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ doublereal a;
+ doublecomplex b, c__;
+ doublereal d__;
+ doublecomplex r__, d1;
+ doublereal s1, s2, fb, fc;
+ doublecomplex ua11, ua12, ua21, ua22, vb11, vb12, vb21, vb22;
+ doublereal csl, csr, snl, snr, aua11, aua12, aua21, aua22, avb12, avb11,
+ avb21, avb22, ua11r, ua22r, vb11r, vb22r;
+ extern /* Subroutine */ int dlasv2_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *), zlartg_(doublecomplex *
+, doublecomplex *, doublereal *, doublecomplex *, doublecomplex *)
+ ;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAGS2 computes 2-by-2 unitary matrices U, V and Q, such */
+/* that if ( UPPER ) then */
+
+/* U'*A*Q = U'*( A1 A2 )*Q = ( x 0 ) */
+/* ( 0 A3 ) ( x x ) */
+/* and */
+/* V'*B*Q = V'*( B1 B2 )*Q = ( x 0 ) */
+/* ( 0 B3 ) ( x x ) */
+
+/* or if ( .NOT.UPPER ) then */
+
+/* U'*A*Q = U'*( A1 0 )*Q = ( x x ) */
+/* ( A2 A3 ) ( 0 x ) */
+/* and */
+/* V'*B*Q = V'*( B1 0 )*Q = ( x x ) */
+/* ( B2 B3 ) ( 0 x ) */
+/* where */
+
+/* U = ( CSU SNU ), V = ( CSV SNV ), */
+/* ( -CONJG(SNU) CSU ) ( -CONJG(SNV) CSV ) */
+
+/* Q = ( CSQ SNQ ) */
+/* ( -CONJG(SNQ) CSQ ) */
+
+/* Z' denotes the conjugate transpose of Z. */
+
+/* The rows of the transformed A and B are parallel. Moreover, if the */
+/* input 2-by-2 matrix A is not zero, then the transformed (1,1) entry */
+/* of A is not zero. If the input matrices A and B are both not zero, */
+/* then the transformed (2,2) element of B is not zero, except when the */
+/* first rows of input A and B are parallel and the second rows are */
+/* zero. */
+
+/* Arguments */
+/* ========= */
+
+/* UPPER (input) LOGICAL */
+/* = .TRUE.: the input matrices A and B are upper triangular. */
+/* = .FALSE.: the input matrices A and B are lower triangular. */
+
+/* A1 (input) DOUBLE PRECISION */
+/* A2 (input) COMPLEX*16 */
+/* A3 (input) DOUBLE PRECISION */
+/* On entry, A1, A2 and A3 are elements of the input 2-by-2 */
+/* upper (lower) triangular matrix A. */
+
+/* B1 (input) DOUBLE PRECISION */
+/* B2 (input) COMPLEX*16 */
+/* B3 (input) DOUBLE PRECISION */
+/* On entry, B1, B2 and B3 are elements of the input 2-by-2 */
+/* upper (lower) triangular matrix B. */
+
+/* CSU (output) DOUBLE PRECISION */
+/* SNU (output) COMPLEX*16 */
+/* The desired unitary matrix U. */
+
+/* CSV (output) DOUBLE PRECISION */
+/* SNV (output) COMPLEX*16 */
+/* The desired unitary matrix V. */
+
+/* CSQ (output) DOUBLE PRECISION */
+/* SNQ (output) COMPLEX*16 */
+/* The desired unitary matrix Q. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ if (*upper) {
+
+/* Input matrices A and B are upper triangular matrices */
+
+/* Form matrix C = A*adj(B) = ( a b ) */
+/* ( 0 d ) */
+
+ a = *a1 * *b3;
+ d__ = *a3 * *b1;
+ z__2.r = *b1 * a2->r, z__2.i = *b1 * a2->i;
+ z__3.r = *a1 * b2->r, z__3.i = *a1 * b2->i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
+ b.r = z__1.r, b.i = z__1.i;
+ fb = z_abs(&b);
+
+/* Transform complex 2-by-2 matrix C to real matrix by unitary */
+/* diagonal matrix diag(1,D1). */
+
+ d1.r = 1., d1.i = 0.;
+ if (fb != 0.) {
+ z__1.r = b.r / fb, z__1.i = b.i / fb;
+ d1.r = z__1.r, d1.i = z__1.i;
+ }
+
+/* The SVD of real 2 by 2 triangular C */
+
+/* ( CSL -SNL )*( A B )*( CSR SNR ) = ( R 0 ) */
+/* ( SNL CSL ) ( 0 D ) ( -SNR CSR ) ( 0 T ) */
+
+ dlasv2_(&a, &fb, &d__, &s1, &s2, &snr, &csr, &snl, &csl);
+
+ if (abs(csl) >= abs(snl) || abs(csr) >= abs(snr)) {
+
+/* Compute the (1,1) and (1,2) elements of U'*A and V'*B, */
+/* and (1,2) element of |U|'*|A| and |V|'*|B|. */
+
+ ua11r = csl * *a1;
+ z__2.r = csl * a2->r, z__2.i = csl * a2->i;
+ z__4.r = snl * d1.r, z__4.i = snl * d1.i;
+ z__3.r = *a3 * z__4.r, z__3.i = *a3 * z__4.i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ ua12.r = z__1.r, ua12.i = z__1.i;
+
+ vb11r = csr * *b1;
+ z__2.r = csr * b2->r, z__2.i = csr * b2->i;
+ z__4.r = snr * d1.r, z__4.i = snr * d1.i;
+ z__3.r = *b3 * z__4.r, z__3.i = *b3 * z__4.i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ vb12.r = z__1.r, vb12.i = z__1.i;
+
+ aua12 = abs(csl) * ((d__1 = a2->r, abs(d__1)) + (d__2 = d_imag(a2)
+ , abs(d__2))) + abs(snl) * abs(*a3);
+ avb12 = abs(csr) * ((d__1 = b2->r, abs(d__1)) + (d__2 = d_imag(b2)
+ , abs(d__2))) + abs(snr) * abs(*b3);
+
+/* zero (1,2) elements of U'*A and V'*B */
+
+ if (abs(ua11r) + ((d__1 = ua12.r, abs(d__1)) + (d__2 = d_imag(&
+ ua12), abs(d__2))) == 0.) {
+ z__2.r = vb11r, z__2.i = 0.;
+ z__1.r = -z__2.r, z__1.i = -z__2.i;
+ d_cnjg(&z__3, &vb12);
+ zlartg_(&z__1, &z__3, csq, snq, &r__);
+ } else if (abs(vb11r) + ((d__1 = vb12.r, abs(d__1)) + (d__2 =
+ d_imag(&vb12), abs(d__2))) == 0.) {
+ z__2.r = ua11r, z__2.i = 0.;
+ z__1.r = -z__2.r, z__1.i = -z__2.i;
+ d_cnjg(&z__3, &ua12);
+ zlartg_(&z__1, &z__3, csq, snq, &r__);
+ } else if (aua12 / (abs(ua11r) + ((d__1 = ua12.r, abs(d__1)) + (
+ d__2 = d_imag(&ua12), abs(d__2)))) <= avb12 / (abs(vb11r)
+ + ((d__3 = vb12.r, abs(d__3)) + (d__4 = d_imag(&vb12),
+ abs(d__4))))) {
+ z__2.r = ua11r, z__2.i = 0.;
+ z__1.r = -z__2.r, z__1.i = -z__2.i;
+ d_cnjg(&z__3, &ua12);
+ zlartg_(&z__1, &z__3, csq, snq, &r__);
+ } else {
+ z__2.r = vb11r, z__2.i = 0.;
+ z__1.r = -z__2.r, z__1.i = -z__2.i;
+ d_cnjg(&z__3, &vb12);
+ zlartg_(&z__1, &z__3, csq, snq, &r__);
+ }
+
+ *csu = csl;
+ z__2.r = -d1.r, z__2.i = -d1.i;
+ z__1.r = snl * z__2.r, z__1.i = snl * z__2.i;
+ snu->r = z__1.r, snu->i = z__1.i;
+ *csv = csr;
+ z__2.r = -d1.r, z__2.i = -d1.i;
+ z__1.r = snr * z__2.r, z__1.i = snr * z__2.i;
+ snv->r = z__1.r, snv->i = z__1.i;
+
+ } else {
+
+/* Compute the (2,1) and (2,2) elements of U'*A and V'*B, */
+/* and (2,2) element of |U|'*|A| and |V|'*|B|. */
+
+ d_cnjg(&z__4, &d1);
+ z__3.r = -z__4.r, z__3.i = -z__4.i;
+ z__2.r = snl * z__3.r, z__2.i = snl * z__3.i;
+ z__1.r = *a1 * z__2.r, z__1.i = *a1 * z__2.i;
+ ua21.r = z__1.r, ua21.i = z__1.i;
+ d_cnjg(&z__5, &d1);
+ z__4.r = -z__5.r, z__4.i = -z__5.i;
+ z__3.r = snl * z__4.r, z__3.i = snl * z__4.i;
+ z__2.r = z__3.r * a2->r - z__3.i * a2->i, z__2.i = z__3.r * a2->i
+ + z__3.i * a2->r;
+ d__1 = csl * *a3;
+ z__1.r = z__2.r + d__1, z__1.i = z__2.i;
+ ua22.r = z__1.r, ua22.i = z__1.i;
+
+ d_cnjg(&z__4, &d1);
+ z__3.r = -z__4.r, z__3.i = -z__4.i;
+ z__2.r = snr * z__3.r, z__2.i = snr * z__3.i;
+ z__1.r = *b1 * z__2.r, z__1.i = *b1 * z__2.i;
+ vb21.r = z__1.r, vb21.i = z__1.i;
+ d_cnjg(&z__5, &d1);
+ z__4.r = -z__5.r, z__4.i = -z__5.i;
+ z__3.r = snr * z__4.r, z__3.i = snr * z__4.i;
+ z__2.r = z__3.r * b2->r - z__3.i * b2->i, z__2.i = z__3.r * b2->i
+ + z__3.i * b2->r;
+ d__1 = csr * *b3;
+ z__1.r = z__2.r + d__1, z__1.i = z__2.i;
+ vb22.r = z__1.r, vb22.i = z__1.i;
+
+ aua22 = abs(snl) * ((d__1 = a2->r, abs(d__1)) + (d__2 = d_imag(a2)
+ , abs(d__2))) + abs(csl) * abs(*a3);
+ avb22 = abs(snr) * ((d__1 = b2->r, abs(d__1)) + (d__2 = d_imag(b2)
+ , abs(d__2))) + abs(csr) * abs(*b3);
+
+/* zero (2,2) elements of U'*A and V'*B, and then swap. */
+
+ if ((d__1 = ua21.r, abs(d__1)) + (d__2 = d_imag(&ua21), abs(d__2))
+ + ((d__3 = ua22.r, abs(d__3)) + (d__4 = d_imag(&ua22),
+ abs(d__4))) == 0.) {
+ d_cnjg(&z__2, &vb21);
+ z__1.r = -z__2.r, z__1.i = -z__2.i;
+ d_cnjg(&z__3, &vb22);
+ zlartg_(&z__1, &z__3, csq, snq, &r__);
+ } else if ((d__1 = vb21.r, abs(d__1)) + (d__2 = d_imag(&vb21),
+ abs(d__2)) + z_abs(&vb22) == 0.) {
+ d_cnjg(&z__2, &ua21);
+ z__1.r = -z__2.r, z__1.i = -z__2.i;
+ d_cnjg(&z__3, &ua22);
+ zlartg_(&z__1, &z__3, csq, snq, &r__);
+ } else if (aua22 / ((d__1 = ua21.r, abs(d__1)) + (d__2 = d_imag(&
+ ua21), abs(d__2)) + ((d__3 = ua22.r, abs(d__3)) + (d__4 =
+ d_imag(&ua22), abs(d__4)))) <= avb22 / ((d__5 = vb21.r,
+ abs(d__5)) + (d__6 = d_imag(&vb21), abs(d__6)) + ((d__7 =
+ vb22.r, abs(d__7)) + (d__8 = d_imag(&vb22), abs(d__8)))))
+ {
+ d_cnjg(&z__2, &ua21);
+ z__1.r = -z__2.r, z__1.i = -z__2.i;
+ d_cnjg(&z__3, &ua22);
+ zlartg_(&z__1, &z__3, csq, snq, &r__);
+ } else {
+ d_cnjg(&z__2, &vb21);
+ z__1.r = -z__2.r, z__1.i = -z__2.i;
+ d_cnjg(&z__3, &vb22);
+ zlartg_(&z__1, &z__3, csq, snq, &r__);
+ }
+
+ *csu = snl;
+ z__1.r = csl * d1.r, z__1.i = csl * d1.i;
+ snu->r = z__1.r, snu->i = z__1.i;
+ *csv = snr;
+ z__1.r = csr * d1.r, z__1.i = csr * d1.i;
+ snv->r = z__1.r, snv->i = z__1.i;
+
+ }
+
+ } else {
+
+/* Input matrices A and B are lower triangular matrices */
+
+/* Form matrix C = A*adj(B) = ( a 0 ) */
+/* ( c d ) */
+
+ a = *a1 * *b3;
+ d__ = *a3 * *b1;
+ z__2.r = *b3 * a2->r, z__2.i = *b3 * a2->i;
+ z__3.r = *a3 * b2->r, z__3.i = *a3 * b2->i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
+ c__.r = z__1.r, c__.i = z__1.i;
+ fc = z_abs(&c__);
+
+/* Transform complex 2-by-2 matrix C to real matrix by unitary */
+/* diagonal matrix diag(d1,1). */
+
+ d1.r = 1., d1.i = 0.;
+ if (fc != 0.) {
+ z__1.r = c__.r / fc, z__1.i = c__.i / fc;
+ d1.r = z__1.r, d1.i = z__1.i;
+ }
+
+/* The SVD of real 2 by 2 triangular C */
+
+/* ( CSL -SNL )*( A 0 )*( CSR SNR ) = ( R 0 ) */
+/* ( SNL CSL ) ( C D ) ( -SNR CSR ) ( 0 T ) */
+
+ dlasv2_(&a, &fc, &d__, &s1, &s2, &snr, &csr, &snl, &csl);
+
+ if (abs(csr) >= abs(snr) || abs(csl) >= abs(snl)) {
+
+/* Compute the (2,1) and (2,2) elements of U'*A and V'*B, */
+/* and (2,1) element of |U|'*|A| and |V|'*|B|. */
+
+ z__4.r = -d1.r, z__4.i = -d1.i;
+ z__3.r = snr * z__4.r, z__3.i = snr * z__4.i;
+ z__2.r = *a1 * z__3.r, z__2.i = *a1 * z__3.i;
+ z__5.r = csr * a2->r, z__5.i = csr * a2->i;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
+ ua21.r = z__1.r, ua21.i = z__1.i;
+ ua22r = csr * *a3;
+
+ z__4.r = -d1.r, z__4.i = -d1.i;
+ z__3.r = snl * z__4.r, z__3.i = snl * z__4.i;
+ z__2.r = *b1 * z__3.r, z__2.i = *b1 * z__3.i;
+ z__5.r = csl * b2->r, z__5.i = csl * b2->i;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
+ vb21.r = z__1.r, vb21.i = z__1.i;
+ vb22r = csl * *b3;
+
+ aua21 = abs(snr) * abs(*a1) + abs(csr) * ((d__1 = a2->r, abs(d__1)
+ ) + (d__2 = d_imag(a2), abs(d__2)));
+ avb21 = abs(snl) * abs(*b1) + abs(csl) * ((d__1 = b2->r, abs(d__1)
+ ) + (d__2 = d_imag(b2), abs(d__2)));
+
+/* zero (2,1) elements of U'*A and V'*B. */
+
+ if ((d__1 = ua21.r, abs(d__1)) + (d__2 = d_imag(&ua21), abs(d__2))
+ + abs(ua22r) == 0.) {
+ z__1.r = vb22r, z__1.i = 0.;
+ zlartg_(&z__1, &vb21, csq, snq, &r__);
+ } else if ((d__1 = vb21.r, abs(d__1)) + (d__2 = d_imag(&vb21),
+ abs(d__2)) + abs(vb22r) == 0.) {
+ z__1.r = ua22r, z__1.i = 0.;
+ zlartg_(&z__1, &ua21, csq, snq, &r__);
+ } else if (aua21 / ((d__1 = ua21.r, abs(d__1)) + (d__2 = d_imag(&
+ ua21), abs(d__2)) + abs(ua22r)) <= avb21 / ((d__3 =
+ vb21.r, abs(d__3)) + (d__4 = d_imag(&vb21), abs(d__4)) +
+ abs(vb22r))) {
+ z__1.r = ua22r, z__1.i = 0.;
+ zlartg_(&z__1, &ua21, csq, snq, &r__);
+ } else {
+ z__1.r = vb22r, z__1.i = 0.;
+ zlartg_(&z__1, &vb21, csq, snq, &r__);
+ }
+
+ *csu = csr;
+ d_cnjg(&z__3, &d1);
+ z__2.r = -z__3.r, z__2.i = -z__3.i;
+ z__1.r = snr * z__2.r, z__1.i = snr * z__2.i;
+ snu->r = z__1.r, snu->i = z__1.i;
+ *csv = csl;
+ d_cnjg(&z__3, &d1);
+ z__2.r = -z__3.r, z__2.i = -z__3.i;
+ z__1.r = snl * z__2.r, z__1.i = snl * z__2.i;
+ snv->r = z__1.r, snv->i = z__1.i;
+
+ } else {
+
+/* Compute the (1,1) and (1,2) elements of U'*A and V'*B, */
+/* and (1,1) element of |U|'*|A| and |V|'*|B|. */
+
+ d__1 = csr * *a1;
+ d_cnjg(&z__4, &d1);
+ z__3.r = snr * z__4.r, z__3.i = snr * z__4.i;
+ z__2.r = z__3.r * a2->r - z__3.i * a2->i, z__2.i = z__3.r * a2->i
+ + z__3.i * a2->r;
+ z__1.r = d__1 + z__2.r, z__1.i = z__2.i;
+ ua11.r = z__1.r, ua11.i = z__1.i;
+ d_cnjg(&z__3, &d1);
+ z__2.r = snr * z__3.r, z__2.i = snr * z__3.i;
+ z__1.r = *a3 * z__2.r, z__1.i = *a3 * z__2.i;
+ ua12.r = z__1.r, ua12.i = z__1.i;
+
+ d__1 = csl * *b1;
+ d_cnjg(&z__4, &d1);
+ z__3.r = snl * z__4.r, z__3.i = snl * z__4.i;
+ z__2.r = z__3.r * b2->r - z__3.i * b2->i, z__2.i = z__3.r * b2->i
+ + z__3.i * b2->r;
+ z__1.r = d__1 + z__2.r, z__1.i = z__2.i;
+ vb11.r = z__1.r, vb11.i = z__1.i;
+ d_cnjg(&z__3, &d1);
+ z__2.r = snl * z__3.r, z__2.i = snl * z__3.i;
+ z__1.r = *b3 * z__2.r, z__1.i = *b3 * z__2.i;
+ vb12.r = z__1.r, vb12.i = z__1.i;
+
+ aua11 = abs(csr) * abs(*a1) + abs(snr) * ((d__1 = a2->r, abs(d__1)
+ ) + (d__2 = d_imag(a2), abs(d__2)));
+ avb11 = abs(csl) * abs(*b1) + abs(snl) * ((d__1 = b2->r, abs(d__1)
+ ) + (d__2 = d_imag(b2), abs(d__2)));
+
+/* zero (1,1) elements of U'*A and V'*B, and then swap. */
+
+ if ((d__1 = ua11.r, abs(d__1)) + (d__2 = d_imag(&ua11), abs(d__2))
+ + ((d__3 = ua12.r, abs(d__3)) + (d__4 = d_imag(&ua12),
+ abs(d__4))) == 0.) {
+ zlartg_(&vb12, &vb11, csq, snq, &r__);
+ } else if ((d__1 = vb11.r, abs(d__1)) + (d__2 = d_imag(&vb11),
+ abs(d__2)) + ((d__3 = vb12.r, abs(d__3)) + (d__4 = d_imag(
+ &vb12), abs(d__4))) == 0.) {
+ zlartg_(&ua12, &ua11, csq, snq, &r__);
+ } else if (aua11 / ((d__1 = ua11.r, abs(d__1)) + (d__2 = d_imag(&
+ ua11), abs(d__2)) + ((d__3 = ua12.r, abs(d__3)) + (d__4 =
+ d_imag(&ua12), abs(d__4)))) <= avb11 / ((d__5 = vb11.r,
+ abs(d__5)) + (d__6 = d_imag(&vb11), abs(d__6)) + ((d__7 =
+ vb12.r, abs(d__7)) + (d__8 = d_imag(&vb12), abs(d__8)))))
+ {
+ zlartg_(&ua12, &ua11, csq, snq, &r__);
+ } else {
+ zlartg_(&vb12, &vb11, csq, snq, &r__);
+ }
+
+ *csu = snr;
+ d_cnjg(&z__2, &d1);
+ z__1.r = csr * z__2.r, z__1.i = csr * z__2.i;
+ snu->r = z__1.r, snu->i = z__1.i;
+ *csv = snl;
+ d_cnjg(&z__2, &d1);
+ z__1.r = csl * z__2.r, z__1.i = csl * z__2.i;
+ snv->r = z__1.r, snv->i = z__1.i;
+
+ }
+
+ }
+
+ return 0;
+
+/* End of ZLAGS2 */
+
+} /* zlags2_ */
diff --git a/contrib/libs/clapack/zlagtm.c b/contrib/libs/clapack/zlagtm.c
new file mode 100644
index 0000000000..dcd377562a
--- /dev/null
+++ b/contrib/libs/clapack/zlagtm.c
@@ -0,0 +1,599 @@
+/* zlagtm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlagtm_(char *trans, integer *n, integer *nrhs,
+ doublereal *alpha, doublecomplex *dl, doublecomplex *d__,
+ doublecomplex *du, doublecomplex *x, integer *ldx, doublereal *beta,
+ doublecomplex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6, i__7, i__8, i__9, i__10;
+ doublecomplex z__1, z__2, z__3, z__4, z__5, z__6, z__7, z__8, z__9;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAGTM performs a matrix-vector product of the form */
+
+/* B := alpha * A * X + beta * B */
+
+/* where A is a tridiagonal matrix of order N, B and X are N by NRHS */
+/* matrices, and alpha and beta are real scalars, each of which may be */
+/* 0., 1., or -1. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the operation applied to A. */
+/* = 'N': No transpose, B := alpha * A * X + beta * B */
+/* = 'T': Transpose, B := alpha * A**T * X + beta * B */
+/* = 'C': Conjugate transpose, B := alpha * A**H * X + beta * B */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices X and B. */
+
+/* ALPHA (input) DOUBLE PRECISION */
+/* The scalar alpha. ALPHA must be 0., 1., or -1.; otherwise, */
+/* it is assumed to be 0. */
+
+/* DL (input) COMPLEX*16 array, dimension (N-1) */
+/* The (n-1) sub-diagonal elements of T. */
+
+/* D (input) COMPLEX*16 array, dimension (N) */
+/* The diagonal elements of T. */
+
+/* DU (input) COMPLEX*16 array, dimension (N-1) */
+/* The (n-1) super-diagonal elements of T. */
+
+/* X (input) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* The N by NRHS matrix X. */
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(N,1). */
+
+/* BETA (input) DOUBLE PRECISION */
+/* The scalar beta. BETA must be 0., 1., or -1.; otherwise, */
+/* it is assumed to be 1. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the N by NRHS matrix B. */
+/* On exit, B is overwritten by the matrix expression */
+/* B := alpha * A * X + beta * B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(N,1). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --dl;
+ --d__;
+ --du;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Multiply B by BETA if BETA.NE.1. */
+
+ if (*beta == 0.) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0., b[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if (*beta == -1.) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ z__1.r = -b[i__4].r, z__1.i = -b[i__4].i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+
+ if (*alpha == 1.) {
+ if (lsame_(trans, "N")) {
+
+/* Compute B := B + A*X */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ if (*n == 1) {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * b_dim1 + 1;
+ i__4 = j * x_dim1 + 1;
+ z__2.r = d__[1].r * x[i__4].r - d__[1].i * x[i__4].i,
+ z__2.i = d__[1].r * x[i__4].i + d__[1].i * x[i__4]
+ .r;
+ z__1.r = b[i__3].r + z__2.r, z__1.i = b[i__3].i + z__2.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ } else {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * b_dim1 + 1;
+ i__4 = j * x_dim1 + 1;
+ z__3.r = d__[1].r * x[i__4].r - d__[1].i * x[i__4].i,
+ z__3.i = d__[1].r * x[i__4].i + d__[1].i * x[i__4]
+ .r;
+ z__2.r = b[i__3].r + z__3.r, z__2.i = b[i__3].i + z__3.i;
+ i__5 = j * x_dim1 + 2;
+ z__4.r = du[1].r * x[i__5].r - du[1].i * x[i__5].i,
+ z__4.i = du[1].r * x[i__5].i + du[1].i * x[i__5]
+ .r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ i__2 = *n + j * b_dim1;
+ i__3 = *n + j * b_dim1;
+ i__4 = *n - 1;
+ i__5 = *n - 1 + j * x_dim1;
+ z__3.r = dl[i__4].r * x[i__5].r - dl[i__4].i * x[i__5].i,
+ z__3.i = dl[i__4].r * x[i__5].i + dl[i__4].i * x[
+ i__5].r;
+ z__2.r = b[i__3].r + z__3.r, z__2.i = b[i__3].i + z__3.i;
+ i__6 = *n;
+ i__7 = *n + j * x_dim1;
+ z__4.r = d__[i__6].r * x[i__7].r - d__[i__6].i * x[i__7]
+ .i, z__4.i = d__[i__6].r * x[i__7].i + d__[i__6]
+ .i * x[i__7].r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ - 1;
+ i__6 = i__ - 1 + j * x_dim1;
+ z__4.r = dl[i__5].r * x[i__6].r - dl[i__5].i * x[i__6]
+ .i, z__4.i = dl[i__5].r * x[i__6].i + dl[i__5]
+ .i * x[i__6].r;
+ z__3.r = b[i__4].r + z__4.r, z__3.i = b[i__4].i +
+ z__4.i;
+ i__7 = i__;
+ i__8 = i__ + j * x_dim1;
+ z__5.r = d__[i__7].r * x[i__8].r - d__[i__7].i * x[
+ i__8].i, z__5.i = d__[i__7].r * x[i__8].i +
+ d__[i__7].i * x[i__8].r;
+ z__2.r = z__3.r + z__5.r, z__2.i = z__3.i + z__5.i;
+ i__9 = i__;
+ i__10 = i__ + 1 + j * x_dim1;
+ z__6.r = du[i__9].r * x[i__10].r - du[i__9].i * x[
+ i__10].i, z__6.i = du[i__9].r * x[i__10].i +
+ du[i__9].i * x[i__10].r;
+ z__1.r = z__2.r + z__6.r, z__1.i = z__2.i + z__6.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else if (lsame_(trans, "T")) {
+
+/* Compute B := B + A**T * X */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ if (*n == 1) {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * b_dim1 + 1;
+ i__4 = j * x_dim1 + 1;
+ z__2.r = d__[1].r * x[i__4].r - d__[1].i * x[i__4].i,
+ z__2.i = d__[1].r * x[i__4].i + d__[1].i * x[i__4]
+ .r;
+ z__1.r = b[i__3].r + z__2.r, z__1.i = b[i__3].i + z__2.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ } else {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * b_dim1 + 1;
+ i__4 = j * x_dim1 + 1;
+ z__3.r = d__[1].r * x[i__4].r - d__[1].i * x[i__4].i,
+ z__3.i = d__[1].r * x[i__4].i + d__[1].i * x[i__4]
+ .r;
+ z__2.r = b[i__3].r + z__3.r, z__2.i = b[i__3].i + z__3.i;
+ i__5 = j * x_dim1 + 2;
+ z__4.r = dl[1].r * x[i__5].r - dl[1].i * x[i__5].i,
+ z__4.i = dl[1].r * x[i__5].i + dl[1].i * x[i__5]
+ .r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ i__2 = *n + j * b_dim1;
+ i__3 = *n + j * b_dim1;
+ i__4 = *n - 1;
+ i__5 = *n - 1 + j * x_dim1;
+ z__3.r = du[i__4].r * x[i__5].r - du[i__4].i * x[i__5].i,
+ z__3.i = du[i__4].r * x[i__5].i + du[i__4].i * x[
+ i__5].r;
+ z__2.r = b[i__3].r + z__3.r, z__2.i = b[i__3].i + z__3.i;
+ i__6 = *n;
+ i__7 = *n + j * x_dim1;
+ z__4.r = d__[i__6].r * x[i__7].r - d__[i__6].i * x[i__7]
+ .i, z__4.i = d__[i__6].r * x[i__7].i + d__[i__6]
+ .i * x[i__7].r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ - 1;
+ i__6 = i__ - 1 + j * x_dim1;
+ z__4.r = du[i__5].r * x[i__6].r - du[i__5].i * x[i__6]
+ .i, z__4.i = du[i__5].r * x[i__6].i + du[i__5]
+ .i * x[i__6].r;
+ z__3.r = b[i__4].r + z__4.r, z__3.i = b[i__4].i +
+ z__4.i;
+ i__7 = i__;
+ i__8 = i__ + j * x_dim1;
+ z__5.r = d__[i__7].r * x[i__8].r - d__[i__7].i * x[
+ i__8].i, z__5.i = d__[i__7].r * x[i__8].i +
+ d__[i__7].i * x[i__8].r;
+ z__2.r = z__3.r + z__5.r, z__2.i = z__3.i + z__5.i;
+ i__9 = i__;
+ i__10 = i__ + 1 + j * x_dim1;
+ z__6.r = dl[i__9].r * x[i__10].r - dl[i__9].i * x[
+ i__10].i, z__6.i = dl[i__9].r * x[i__10].i +
+ dl[i__9].i * x[i__10].r;
+ z__1.r = z__2.r + z__6.r, z__1.i = z__2.i + z__6.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L70: */
+ }
+ }
+/* L80: */
+ }
+ } else if (lsame_(trans, "C")) {
+
+/* Compute B := B + A**H * X */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ if (*n == 1) {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * b_dim1 + 1;
+ d_cnjg(&z__3, &d__[1]);
+ i__4 = j * x_dim1 + 1;
+ z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i, z__2.i =
+ z__3.r * x[i__4].i + z__3.i * x[i__4].r;
+ z__1.r = b[i__3].r + z__2.r, z__1.i = b[i__3].i + z__2.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ } else {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * b_dim1 + 1;
+ d_cnjg(&z__4, &d__[1]);
+ i__4 = j * x_dim1 + 1;
+ z__3.r = z__4.r * x[i__4].r - z__4.i * x[i__4].i, z__3.i =
+ z__4.r * x[i__4].i + z__4.i * x[i__4].r;
+ z__2.r = b[i__3].r + z__3.r, z__2.i = b[i__3].i + z__3.i;
+ d_cnjg(&z__6, &dl[1]);
+ i__5 = j * x_dim1 + 2;
+ z__5.r = z__6.r * x[i__5].r - z__6.i * x[i__5].i, z__5.i =
+ z__6.r * x[i__5].i + z__6.i * x[i__5].r;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ i__2 = *n + j * b_dim1;
+ i__3 = *n + j * b_dim1;
+ d_cnjg(&z__4, &du[*n - 1]);
+ i__4 = *n - 1 + j * x_dim1;
+ z__3.r = z__4.r * x[i__4].r - z__4.i * x[i__4].i, z__3.i =
+ z__4.r * x[i__4].i + z__4.i * x[i__4].r;
+ z__2.r = b[i__3].r + z__3.r, z__2.i = b[i__3].i + z__3.i;
+ d_cnjg(&z__6, &d__[*n]);
+ i__5 = *n + j * x_dim1;
+ z__5.r = z__6.r * x[i__5].r - z__6.i * x[i__5].i, z__5.i =
+ z__6.r * x[i__5].i + z__6.i * x[i__5].r;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ d_cnjg(&z__5, &du[i__ - 1]);
+ i__5 = i__ - 1 + j * x_dim1;
+ z__4.r = z__5.r * x[i__5].r - z__5.i * x[i__5].i,
+ z__4.i = z__5.r * x[i__5].i + z__5.i * x[i__5]
+ .r;
+ z__3.r = b[i__4].r + z__4.r, z__3.i = b[i__4].i +
+ z__4.i;
+ d_cnjg(&z__7, &d__[i__]);
+ i__6 = i__ + j * x_dim1;
+ z__6.r = z__7.r * x[i__6].r - z__7.i * x[i__6].i,
+ z__6.i = z__7.r * x[i__6].i + z__7.i * x[i__6]
+ .r;
+ z__2.r = z__3.r + z__6.r, z__2.i = z__3.i + z__6.i;
+ d_cnjg(&z__9, &dl[i__]);
+ i__7 = i__ + 1 + j * x_dim1;
+ z__8.r = z__9.r * x[i__7].r - z__9.i * x[i__7].i,
+ z__8.i = z__9.r * x[i__7].i + z__9.i * x[i__7]
+ .r;
+ z__1.r = z__2.r + z__8.r, z__1.i = z__2.i + z__8.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L90: */
+ }
+ }
+/* L100: */
+ }
+ }
+ } else if (*alpha == -1.) {
+ if (lsame_(trans, "N")) {
+
+/* Compute B := B - A*X */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ if (*n == 1) {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * b_dim1 + 1;
+ i__4 = j * x_dim1 + 1;
+ z__2.r = d__[1].r * x[i__4].r - d__[1].i * x[i__4].i,
+ z__2.i = d__[1].r * x[i__4].i + d__[1].i * x[i__4]
+ .r;
+ z__1.r = b[i__3].r - z__2.r, z__1.i = b[i__3].i - z__2.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ } else {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * b_dim1 + 1;
+ i__4 = j * x_dim1 + 1;
+ z__3.r = d__[1].r * x[i__4].r - d__[1].i * x[i__4].i,
+ z__3.i = d__[1].r * x[i__4].i + d__[1].i * x[i__4]
+ .r;
+ z__2.r = b[i__3].r - z__3.r, z__2.i = b[i__3].i - z__3.i;
+ i__5 = j * x_dim1 + 2;
+ z__4.r = du[1].r * x[i__5].r - du[1].i * x[i__5].i,
+ z__4.i = du[1].r * x[i__5].i + du[1].i * x[i__5]
+ .r;
+ z__1.r = z__2.r - z__4.r, z__1.i = z__2.i - z__4.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ i__2 = *n + j * b_dim1;
+ i__3 = *n + j * b_dim1;
+ i__4 = *n - 1;
+ i__5 = *n - 1 + j * x_dim1;
+ z__3.r = dl[i__4].r * x[i__5].r - dl[i__4].i * x[i__5].i,
+ z__3.i = dl[i__4].r * x[i__5].i + dl[i__4].i * x[
+ i__5].r;
+ z__2.r = b[i__3].r - z__3.r, z__2.i = b[i__3].i - z__3.i;
+ i__6 = *n;
+ i__7 = *n + j * x_dim1;
+ z__4.r = d__[i__6].r * x[i__7].r - d__[i__6].i * x[i__7]
+ .i, z__4.i = d__[i__6].r * x[i__7].i + d__[i__6]
+ .i * x[i__7].r;
+ z__1.r = z__2.r - z__4.r, z__1.i = z__2.i - z__4.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ - 1;
+ i__6 = i__ - 1 + j * x_dim1;
+ z__4.r = dl[i__5].r * x[i__6].r - dl[i__5].i * x[i__6]
+ .i, z__4.i = dl[i__5].r * x[i__6].i + dl[i__5]
+ .i * x[i__6].r;
+ z__3.r = b[i__4].r - z__4.r, z__3.i = b[i__4].i -
+ z__4.i;
+ i__7 = i__;
+ i__8 = i__ + j * x_dim1;
+ z__5.r = d__[i__7].r * x[i__8].r - d__[i__7].i * x[
+ i__8].i, z__5.i = d__[i__7].r * x[i__8].i +
+ d__[i__7].i * x[i__8].r;
+ z__2.r = z__3.r - z__5.r, z__2.i = z__3.i - z__5.i;
+ i__9 = i__;
+ i__10 = i__ + 1 + j * x_dim1;
+ z__6.r = du[i__9].r * x[i__10].r - du[i__9].i * x[
+ i__10].i, z__6.i = du[i__9].r * x[i__10].i +
+ du[i__9].i * x[i__10].r;
+ z__1.r = z__2.r - z__6.r, z__1.i = z__2.i - z__6.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+ } else if (lsame_(trans, "T")) {
+
+/* Compute B := B - A'*X */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ if (*n == 1) {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * b_dim1 + 1;
+ i__4 = j * x_dim1 + 1;
+ z__2.r = d__[1].r * x[i__4].r - d__[1].i * x[i__4].i,
+ z__2.i = d__[1].r * x[i__4].i + d__[1].i * x[i__4]
+ .r;
+ z__1.r = b[i__3].r - z__2.r, z__1.i = b[i__3].i - z__2.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ } else {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * b_dim1 + 1;
+ i__4 = j * x_dim1 + 1;
+ z__3.r = d__[1].r * x[i__4].r - d__[1].i * x[i__4].i,
+ z__3.i = d__[1].r * x[i__4].i + d__[1].i * x[i__4]
+ .r;
+ z__2.r = b[i__3].r - z__3.r, z__2.i = b[i__3].i - z__3.i;
+ i__5 = j * x_dim1 + 2;
+ z__4.r = dl[1].r * x[i__5].r - dl[1].i * x[i__5].i,
+ z__4.i = dl[1].r * x[i__5].i + dl[1].i * x[i__5]
+ .r;
+ z__1.r = z__2.r - z__4.r, z__1.i = z__2.i - z__4.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ i__2 = *n + j * b_dim1;
+ i__3 = *n + j * b_dim1;
+ i__4 = *n - 1;
+ i__5 = *n - 1 + j * x_dim1;
+ z__3.r = du[i__4].r * x[i__5].r - du[i__4].i * x[i__5].i,
+ z__3.i = du[i__4].r * x[i__5].i + du[i__4].i * x[
+ i__5].r;
+ z__2.r = b[i__3].r - z__3.r, z__2.i = b[i__3].i - z__3.i;
+ i__6 = *n;
+ i__7 = *n + j * x_dim1;
+ z__4.r = d__[i__6].r * x[i__7].r - d__[i__6].i * x[i__7]
+ .i, z__4.i = d__[i__6].r * x[i__7].i + d__[i__6]
+ .i * x[i__7].r;
+ z__1.r = z__2.r - z__4.r, z__1.i = z__2.i - z__4.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ - 1;
+ i__6 = i__ - 1 + j * x_dim1;
+ z__4.r = du[i__5].r * x[i__6].r - du[i__5].i * x[i__6]
+ .i, z__4.i = du[i__5].r * x[i__6].i + du[i__5]
+ .i * x[i__6].r;
+ z__3.r = b[i__4].r - z__4.r, z__3.i = b[i__4].i -
+ z__4.i;
+ i__7 = i__;
+ i__8 = i__ + j * x_dim1;
+ z__5.r = d__[i__7].r * x[i__8].r - d__[i__7].i * x[
+ i__8].i, z__5.i = d__[i__7].r * x[i__8].i +
+ d__[i__7].i * x[i__8].r;
+ z__2.r = z__3.r - z__5.r, z__2.i = z__3.i - z__5.i;
+ i__9 = i__;
+ i__10 = i__ + 1 + j * x_dim1;
+ z__6.r = dl[i__9].r * x[i__10].r - dl[i__9].i * x[
+ i__10].i, z__6.i = dl[i__9].r * x[i__10].i +
+ dl[i__9].i * x[i__10].r;
+ z__1.r = z__2.r - z__6.r, z__1.i = z__2.i - z__6.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L130: */
+ }
+ }
+/* L140: */
+ }
+ } else if (lsame_(trans, "C")) {
+
+/* Compute B := B - A'*X */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ if (*n == 1) {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * b_dim1 + 1;
+ d_cnjg(&z__3, &d__[1]);
+ i__4 = j * x_dim1 + 1;
+ z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i, z__2.i =
+ z__3.r * x[i__4].i + z__3.i * x[i__4].r;
+ z__1.r = b[i__3].r - z__2.r, z__1.i = b[i__3].i - z__2.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ } else {
+ i__2 = j * b_dim1 + 1;
+ i__3 = j * b_dim1 + 1;
+ d_cnjg(&z__4, &d__[1]);
+ i__4 = j * x_dim1 + 1;
+ z__3.r = z__4.r * x[i__4].r - z__4.i * x[i__4].i, z__3.i =
+ z__4.r * x[i__4].i + z__4.i * x[i__4].r;
+ z__2.r = b[i__3].r - z__3.r, z__2.i = b[i__3].i - z__3.i;
+ d_cnjg(&z__6, &dl[1]);
+ i__5 = j * x_dim1 + 2;
+ z__5.r = z__6.r * x[i__5].r - z__6.i * x[i__5].i, z__5.i =
+ z__6.r * x[i__5].i + z__6.i * x[i__5].r;
+ z__1.r = z__2.r - z__5.r, z__1.i = z__2.i - z__5.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ i__2 = *n + j * b_dim1;
+ i__3 = *n + j * b_dim1;
+ d_cnjg(&z__4, &du[*n - 1]);
+ i__4 = *n - 1 + j * x_dim1;
+ z__3.r = z__4.r * x[i__4].r - z__4.i * x[i__4].i, z__3.i =
+ z__4.r * x[i__4].i + z__4.i * x[i__4].r;
+ z__2.r = b[i__3].r - z__3.r, z__2.i = b[i__3].i - z__3.i;
+ d_cnjg(&z__6, &d__[*n]);
+ i__5 = *n + j * x_dim1;
+ z__5.r = z__6.r * x[i__5].r - z__6.i * x[i__5].i, z__5.i =
+ z__6.r * x[i__5].i + z__6.i * x[i__5].r;
+ z__1.r = z__2.r - z__5.r, z__1.i = z__2.i - z__5.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ d_cnjg(&z__5, &du[i__ - 1]);
+ i__5 = i__ - 1 + j * x_dim1;
+ z__4.r = z__5.r * x[i__5].r - z__5.i * x[i__5].i,
+ z__4.i = z__5.r * x[i__5].i + z__5.i * x[i__5]
+ .r;
+ z__3.r = b[i__4].r - z__4.r, z__3.i = b[i__4].i -
+ z__4.i;
+ d_cnjg(&z__7, &d__[i__]);
+ i__6 = i__ + j * x_dim1;
+ z__6.r = z__7.r * x[i__6].r - z__7.i * x[i__6].i,
+ z__6.i = z__7.r * x[i__6].i + z__7.i * x[i__6]
+ .r;
+ z__2.r = z__3.r - z__6.r, z__2.i = z__3.i - z__6.i;
+ d_cnjg(&z__9, &dl[i__]);
+ i__7 = i__ + 1 + j * x_dim1;
+ z__8.r = z__9.r * x[i__7].r - z__9.i * x[i__7].i,
+ z__8.i = z__9.r * x[i__7].i + z__9.i * x[i__7]
+ .r;
+ z__1.r = z__2.r - z__8.r, z__1.i = z__2.i - z__8.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L150: */
+ }
+ }
+/* L160: */
+ }
+ }
+ }
+ return 0;
+
+/* End of ZLAGTM */
+
+} /* zlagtm_ */
diff --git a/contrib/libs/clapack/zlahef.c b/contrib/libs/clapack/zlahef.c
new file mode 100644
index 0000000000..048db906be
--- /dev/null
+++ b/contrib/libs/clapack/zlahef.c
@@ -0,0 +1,938 @@
+/* zlahef.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zlahef_(char *uplo, integer *n, integer *nb, integer *kb,
+ doublecomplex *a, integer *lda, integer *ipiv, doublecomplex *w,
+ integer *ldw, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_imag(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *), z_div(doublecomplex *,
+ doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer j, k;
+ doublereal t, r1;
+ doublecomplex d11, d21, d22;
+ integer jb, jj, kk, jp, kp, kw, kkw, imax, jmax;
+ doublereal alpha;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ integer kstep;
+ extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *),
+ zcopy_(integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *), zswap_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ doublereal absakk;
+ extern /* Subroutine */ int zdscal_(integer *, doublereal *,
+ doublecomplex *, integer *);
+ doublereal colmax;
+ extern /* Subroutine */ int zlacgv_(integer *, doublecomplex *, integer *)
+ ;
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ doublereal rowmax;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAHEF computes a partial factorization of a complex Hermitian */
+/* matrix A using the Bunch-Kaufman diagonal pivoting method. The */
+/* partial factorization has the form: */
+
+/* A = ( I U12 ) ( A11 0 ) ( I 0 ) if UPLO = 'U', or: */
+/* ( 0 U22 ) ( 0 D ) ( U12' U22' ) */
+
+/* A = ( L11 0 ) ( D 0 ) ( L11' L21' ) if UPLO = 'L' */
+/* ( L21 I ) ( 0 A22 ) ( 0 I ) */
+
+/* where the order of D is at most NB. The actual order is returned in */
+/* the argument KB, and is either NB or NB-1, or N if N <= NB. */
+/* Note that U' denotes the conjugate transpose of U. */
+
+/* ZLAHEF is an auxiliary routine called by ZHETRF. It uses blocked code */
+/* (calling Level 3 BLAS) to update the submatrix A11 (if UPLO = 'U') or */
+/* A22 (if UPLO = 'L'). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* Hermitian matrix A is stored: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NB (input) INTEGER */
+/* The maximum number of columns of the matrix A that should be */
+/* factored. NB should be at least 2 to allow for 2-by-2 pivot */
+/* blocks. */
+
+/* KB (output) INTEGER */
+/* The number of columns of A that were actually factored. */
+/* KB is either NB-1 or NB, or N if N <= NB. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* n-by-n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n-by-n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+/* On exit, A contains details of the partial factorization. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D. */
+/* If UPLO = 'U', only the last KB elements of IPIV are set; */
+/* if UPLO = 'L', only the first KB elements are set. */
+
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* W (workspace) COMPLEX*16 array, dimension (LDW,NB) */
+
+/* LDW (input) INTEGER */
+/* The leading dimension of the array W. LDW >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* > 0: if INFO = k, D(k,k) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ w_dim1 = *ldw;
+ w_offset = 1 + w_dim1;
+ w -= w_offset;
+
+ /* Function Body */
+ *info = 0;
+
+/* Initialize ALPHA for use in choosing pivot block size. */
+
+ alpha = (sqrt(17.) + 1.) / 8.;
+
+ if (lsame_(uplo, "U")) {
+
+/* Factorize the trailing columns of A using the upper triangle */
+/* of A and working backwards, and compute the matrix W = U12*D */
+/* for use in updating A11 (note that conjg(W) is actually stored) */
+
+/* K is the main loop index, decreasing from N in steps of 1 or 2 */
+
+/* KW is the column of W which corresponds to column K of A */
+
+ k = *n;
+L10:
+ kw = *nb + k - *n;
+
+/* Exit from loop */
+
+ if (k <= *n - *nb + 1 && *nb < *n || k < 1) {
+ goto L30;
+ }
+
+/* Copy column K of A to column KW of W and update it */
+
+ i__1 = k - 1;
+ zcopy_(&i__1, &a[k * a_dim1 + 1], &c__1, &w[kw * w_dim1 + 1], &c__1);
+ i__1 = k + kw * w_dim1;
+ i__2 = k + k * a_dim1;
+ d__1 = a[i__2].r;
+ w[i__1].r = d__1, w[i__1].i = 0.;
+ if (k < *n) {
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &k, &i__1, &z__1, &a[(k + 1) * a_dim1 + 1],
+ lda, &w[k + (kw + 1) * w_dim1], ldw, &c_b1, &w[kw *
+ w_dim1 + 1], &c__1);
+ i__1 = k + kw * w_dim1;
+ i__2 = k + kw * w_dim1;
+ d__1 = w[i__2].r;
+ w[i__1].r = d__1, w[i__1].i = 0.;
+ }
+
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ i__1 = k + kw * w_dim1;
+ absakk = (d__1 = w[i__1].r, abs(d__1));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ imax = izamax_(&i__1, &w[kw * w_dim1 + 1], &c__1);
+ i__1 = imax + kw * w_dim1;
+ colmax = (d__1 = w[i__1].r, abs(d__1)) + (d__2 = d_imag(&w[imax +
+ kw * w_dim1]), abs(d__2));
+ } else {
+ colmax = 0.;
+ }
+
+ if (max(absakk,colmax) == 0.) {
+
+/* Column K is zero: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ d__1 = a[i__2].r;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* Copy column IMAX to column KW-1 of W and update it */
+
+ i__1 = imax - 1;
+ zcopy_(&i__1, &a[imax * a_dim1 + 1], &c__1, &w[(kw - 1) *
+ w_dim1 + 1], &c__1);
+ i__1 = imax + (kw - 1) * w_dim1;
+ i__2 = imax + imax * a_dim1;
+ d__1 = a[i__2].r;
+ w[i__1].r = d__1, w[i__1].i = 0.;
+ i__1 = k - imax;
+ zcopy_(&i__1, &a[imax + (imax + 1) * a_dim1], lda, &w[imax +
+ 1 + (kw - 1) * w_dim1], &c__1);
+ i__1 = k - imax;
+ zlacgv_(&i__1, &w[imax + 1 + (kw - 1) * w_dim1], &c__1);
+ if (k < *n) {
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &k, &i__1, &z__1, &a[(k + 1) *
+ a_dim1 + 1], lda, &w[imax + (kw + 1) * w_dim1],
+ ldw, &c_b1, &w[(kw - 1) * w_dim1 + 1], &c__1);
+ i__1 = imax + (kw - 1) * w_dim1;
+ i__2 = imax + (kw - 1) * w_dim1;
+ d__1 = w[i__2].r;
+ w[i__1].r = d__1, w[i__1].i = 0.;
+ }
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ i__1 = k - imax;
+ jmax = imax + izamax_(&i__1, &w[imax + 1 + (kw - 1) * w_dim1],
+ &c__1);
+ i__1 = jmax + (kw - 1) * w_dim1;
+ rowmax = (d__1 = w[i__1].r, abs(d__1)) + (d__2 = d_imag(&w[
+ jmax + (kw - 1) * w_dim1]), abs(d__2));
+ if (imax > 1) {
+ i__1 = imax - 1;
+ jmax = izamax_(&i__1, &w[(kw - 1) * w_dim1 + 1], &c__1);
+/* Computing MAX */
+ i__1 = jmax + (kw - 1) * w_dim1;
+ d__3 = rowmax, d__4 = (d__1 = w[i__1].r, abs(d__1)) + (
+ d__2 = d_imag(&w[jmax + (kw - 1) * w_dim1]), abs(
+ d__2));
+ rowmax = max(d__3,d__4);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else /* if(complicated condition) */ {
+ i__1 = imax + (kw - 1) * w_dim1;
+ if ((d__1 = w[i__1].r, abs(d__1)) >= alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+
+/* copy column KW-1 of W to column KW */
+
+ zcopy_(&k, &w[(kw - 1) * w_dim1 + 1], &c__1, &w[kw *
+ w_dim1 + 1], &c__1);
+ } else {
+
+/* interchange rows and columns K-1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+ }
+
+ kk = k - kstep + 1;
+ kkw = *nb + kk - *n;
+
+/* Updated column KP is already stored in column KKW of W */
+
+ if (kp != kk) {
+
+/* Copy non-updated column KK to column KP */
+
+ i__1 = kp + kp * a_dim1;
+ i__2 = kk + kk * a_dim1;
+ d__1 = a[i__2].r;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+ i__1 = kk - 1 - kp;
+ zcopy_(&i__1, &a[kp + 1 + kk * a_dim1], &c__1, &a[kp + (kp +
+ 1) * a_dim1], lda);
+ i__1 = kk - 1 - kp;
+ zlacgv_(&i__1, &a[kp + (kp + 1) * a_dim1], lda);
+ i__1 = kp - 1;
+ zcopy_(&i__1, &a[kk * a_dim1 + 1], &c__1, &a[kp * a_dim1 + 1],
+ &c__1);
+
+/* Interchange rows KK and KP in last KK columns of A and W */
+
+ if (kk < *n) {
+ i__1 = *n - kk;
+ zswap_(&i__1, &a[kk + (kk + 1) * a_dim1], lda, &a[kp + (
+ kk + 1) * a_dim1], lda);
+ }
+ i__1 = *n - kk + 1;
+ zswap_(&i__1, &w[kk + kkw * w_dim1], ldw, &w[kp + kkw *
+ w_dim1], ldw);
+ }
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column KW of W now holds */
+
+/* W(k) = U(k)*D(k) */
+
+/* where U(k) is the k-th column of U */
+
+/* Store U(k) in column k of A */
+
+ zcopy_(&k, &w[kw * w_dim1 + 1], &c__1, &a[k * a_dim1 + 1], &
+ c__1);
+ i__1 = k + k * a_dim1;
+ r1 = 1. / a[i__1].r;
+ i__1 = k - 1;
+ zdscal_(&i__1, &r1, &a[k * a_dim1 + 1], &c__1);
+
+/* Conjugate W(k) */
+
+ i__1 = k - 1;
+ zlacgv_(&i__1, &w[kw * w_dim1 + 1], &c__1);
+ } else {
+
+/* 2-by-2 pivot block D(k): columns KW and KW-1 of W now */
+/* hold */
+
+/* ( W(k-1) W(k) ) = ( U(k-1) U(k) )*D(k) */
+
+/* where U(k) and U(k-1) are the k-th and (k-1)-th columns */
+/* of U */
+
+ if (k > 2) {
+
+/* Store U(k) and U(k-1) in columns k and k-1 of A */
+
+ i__1 = k - 1 + kw * w_dim1;
+ d21.r = w[i__1].r, d21.i = w[i__1].i;
+ d_cnjg(&z__2, &d21);
+ z_div(&z__1, &w[k + kw * w_dim1], &z__2);
+ d11.r = z__1.r, d11.i = z__1.i;
+ z_div(&z__1, &w[k - 1 + (kw - 1) * w_dim1], &d21);
+ d22.r = z__1.r, d22.i = z__1.i;
+ z__1.r = d11.r * d22.r - d11.i * d22.i, z__1.i = d11.r *
+ d22.i + d11.i * d22.r;
+ t = 1. / (z__1.r - 1.);
+ z__2.r = t, z__2.i = 0.;
+ z_div(&z__1, &z__2, &d21);
+ d21.r = z__1.r, d21.i = z__1.i;
+ i__1 = k - 2;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + (k - 1) * a_dim1;
+ i__3 = j + (kw - 1) * w_dim1;
+ z__3.r = d11.r * w[i__3].r - d11.i * w[i__3].i,
+ z__3.i = d11.r * w[i__3].i + d11.i * w[i__3]
+ .r;
+ i__4 = j + kw * w_dim1;
+ z__2.r = z__3.r - w[i__4].r, z__2.i = z__3.i - w[i__4]
+ .i;
+ z__1.r = d21.r * z__2.r - d21.i * z__2.i, z__1.i =
+ d21.r * z__2.i + d21.i * z__2.r;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+ i__2 = j + k * a_dim1;
+ d_cnjg(&z__2, &d21);
+ i__3 = j + kw * w_dim1;
+ z__4.r = d22.r * w[i__3].r - d22.i * w[i__3].i,
+ z__4.i = d22.r * w[i__3].i + d22.i * w[i__3]
+ .r;
+ i__4 = j + (kw - 1) * w_dim1;
+ z__3.r = z__4.r - w[i__4].r, z__3.i = z__4.i - w[i__4]
+ .i;
+ z__1.r = z__2.r * z__3.r - z__2.i * z__3.i, z__1.i =
+ z__2.r * z__3.i + z__2.i * z__3.r;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+/* L20: */
+ }
+ }
+
+/* Copy D(k) to A */
+
+ i__1 = k - 1 + (k - 1) * a_dim1;
+ i__2 = k - 1 + (kw - 1) * w_dim1;
+ a[i__1].r = w[i__2].r, a[i__1].i = w[i__2].i;
+ i__1 = k - 1 + k * a_dim1;
+ i__2 = k - 1 + kw * w_dim1;
+ a[i__1].r = w[i__2].r, a[i__1].i = w[i__2].i;
+ i__1 = k + k * a_dim1;
+ i__2 = k + kw * w_dim1;
+ a[i__1].r = w[i__2].r, a[i__1].i = w[i__2].i;
+
+/* Conjugate W(k) and W(k-1) */
+
+ i__1 = k - 1;
+ zlacgv_(&i__1, &w[kw * w_dim1 + 1], &c__1);
+ i__1 = k - 2;
+ zlacgv_(&i__1, &w[(kw - 1) * w_dim1 + 1], &c__1);
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k - 1] = -kp;
+ }
+
+/* Decrease K and return to the start of the main loop */
+
+ k -= kstep;
+ goto L10;
+
+L30:
+
+/* Update the upper triangle of A11 (= A(1:k,1:k)) as */
+
+/* A11 := A11 - U12*D*U12' = A11 - U12*W' */
+
+/* computing blocks of NB columns at a time (note that conjg(W) is */
+/* actually stored) */
+
+ i__1 = -(*nb);
+ for (j = (k - 1) / *nb * *nb + 1; i__1 < 0 ? j >= 1 : j <= 1; j +=
+ i__1) {
+/* Computing MIN */
+ i__2 = *nb, i__3 = k - j + 1;
+ jb = min(i__2,i__3);
+
+/* Update the upper triangle of the diagonal block */
+
+ i__2 = j + jb - 1;
+ for (jj = j; jj <= i__2; ++jj) {
+ i__3 = jj + jj * a_dim1;
+ i__4 = jj + jj * a_dim1;
+ d__1 = a[i__4].r;
+ a[i__3].r = d__1, a[i__3].i = 0.;
+ i__3 = jj - j + 1;
+ i__4 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__3, &i__4, &z__1, &a[j + (k + 1) *
+ a_dim1], lda, &w[jj + (kw + 1) * w_dim1], ldw, &c_b1,
+ &a[j + jj * a_dim1], &c__1);
+ i__3 = jj + jj * a_dim1;
+ i__4 = jj + jj * a_dim1;
+ d__1 = a[i__4].r;
+ a[i__3].r = d__1, a[i__3].i = 0.;
+/* L40: */
+ }
+
+/* Update the rectangular superdiagonal block */
+
+ i__2 = j - 1;
+ i__3 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "Transpose", &i__2, &jb, &i__3, &z__1, &a[(
+ k + 1) * a_dim1 + 1], lda, &w[j + (kw + 1) * w_dim1], ldw,
+ &c_b1, &a[j * a_dim1 + 1], lda);
+/* L50: */
+ }
+
+/* Put U12 in standard form by partially undoing the interchanges */
+/* in columns k+1:n */
+
+ j = k + 1;
+L60:
+ jj = j;
+ jp = ipiv[j];
+ if (jp < 0) {
+ jp = -jp;
+ ++j;
+ }
+ ++j;
+ if (jp != jj && j <= *n) {
+ i__1 = *n - j + 1;
+ zswap_(&i__1, &a[jp + j * a_dim1], lda, &a[jj + j * a_dim1], lda);
+ }
+ if (j <= *n) {
+ goto L60;
+ }
+
+/* Set KB to the number of columns factorized */
+
+ *kb = *n - k;
+
+ } else {
+
+/* Factorize the leading columns of A using the lower triangle */
+/* of A and working forwards, and compute the matrix W = L21*D */
+/* for use in updating A22 (note that conjg(W) is actually stored) */
+
+/* K is the main loop index, increasing from 1 in steps of 1 or 2 */
+
+ k = 1;
+L70:
+
+/* Exit from loop */
+
+ if (k >= *nb && *nb < *n || k > *n) {
+ goto L90;
+ }
+
+/* Copy column K of A to column K of W and update it */
+
+ i__1 = k + k * w_dim1;
+ i__2 = k + k * a_dim1;
+ d__1 = a[i__2].r;
+ w[i__1].r = d__1, w[i__1].i = 0.;
+ if (k < *n) {
+ i__1 = *n - k;
+ zcopy_(&i__1, &a[k + 1 + k * a_dim1], &c__1, &w[k + 1 + k *
+ w_dim1], &c__1);
+ }
+ i__1 = *n - k + 1;
+ i__2 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__1, &i__2, &z__1, &a[k + a_dim1], lda, &w[k
+ + w_dim1], ldw, &c_b1, &w[k + k * w_dim1], &c__1);
+ i__1 = k + k * w_dim1;
+ i__2 = k + k * w_dim1;
+ d__1 = w[i__2].r;
+ w[i__1].r = d__1, w[i__1].i = 0.;
+
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ i__1 = k + k * w_dim1;
+ absakk = (d__1 = w[i__1].r, abs(d__1));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ imax = k + izamax_(&i__1, &w[k + 1 + k * w_dim1], &c__1);
+ i__1 = imax + k * w_dim1;
+ colmax = (d__1 = w[i__1].r, abs(d__1)) + (d__2 = d_imag(&w[imax +
+ k * w_dim1]), abs(d__2));
+ } else {
+ colmax = 0.;
+ }
+
+ if (max(absakk,colmax) == 0.) {
+
+/* Column K is zero: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ d__1 = a[i__2].r;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* Copy column IMAX to column K+1 of W and update it */
+
+ i__1 = imax - k;
+ zcopy_(&i__1, &a[imax + k * a_dim1], lda, &w[k + (k + 1) *
+ w_dim1], &c__1);
+ i__1 = imax - k;
+ zlacgv_(&i__1, &w[k + (k + 1) * w_dim1], &c__1);
+ i__1 = imax + (k + 1) * w_dim1;
+ i__2 = imax + imax * a_dim1;
+ d__1 = a[i__2].r;
+ w[i__1].r = d__1, w[i__1].i = 0.;
+ if (imax < *n) {
+ i__1 = *n - imax;
+ zcopy_(&i__1, &a[imax + 1 + imax * a_dim1], &c__1, &w[
+ imax + 1 + (k + 1) * w_dim1], &c__1);
+ }
+ i__1 = *n - k + 1;
+ i__2 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__1, &i__2, &z__1, &a[k + a_dim1],
+ lda, &w[imax + w_dim1], ldw, &c_b1, &w[k + (k + 1) *
+ w_dim1], &c__1);
+ i__1 = imax + (k + 1) * w_dim1;
+ i__2 = imax + (k + 1) * w_dim1;
+ d__1 = w[i__2].r;
+ w[i__1].r = d__1, w[i__1].i = 0.;
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ i__1 = imax - k;
+ jmax = k - 1 + izamax_(&i__1, &w[k + (k + 1) * w_dim1], &c__1)
+ ;
+ i__1 = jmax + (k + 1) * w_dim1;
+ rowmax = (d__1 = w[i__1].r, abs(d__1)) + (d__2 = d_imag(&w[
+ jmax + (k + 1) * w_dim1]), abs(d__2));
+ if (imax < *n) {
+ i__1 = *n - imax;
+ jmax = imax + izamax_(&i__1, &w[imax + 1 + (k + 1) *
+ w_dim1], &c__1);
+/* Computing MAX */
+ i__1 = jmax + (k + 1) * w_dim1;
+ d__3 = rowmax, d__4 = (d__1 = w[i__1].r, abs(d__1)) + (
+ d__2 = d_imag(&w[jmax + (k + 1) * w_dim1]), abs(
+ d__2));
+ rowmax = max(d__3,d__4);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else /* if(complicated condition) */ {
+ i__1 = imax + (k + 1) * w_dim1;
+ if ((d__1 = w[i__1].r, abs(d__1)) >= alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+
+/* copy column K+1 of W to column K */
+
+ i__1 = *n - k + 1;
+ zcopy_(&i__1, &w[k + (k + 1) * w_dim1], &c__1, &w[k +
+ k * w_dim1], &c__1);
+ } else {
+
+/* interchange rows and columns K+1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+ }
+
+ kk = k + kstep - 1;
+
+/* Updated column KP is already stored in column KK of W */
+
+ if (kp != kk) {
+
+/* Copy non-updated column KK to column KP */
+
+ i__1 = kp + kp * a_dim1;
+ i__2 = kk + kk * a_dim1;
+ d__1 = a[i__2].r;
+ a[i__1].r = d__1, a[i__1].i = 0.;
+ i__1 = kp - kk - 1;
+ zcopy_(&i__1, &a[kk + 1 + kk * a_dim1], &c__1, &a[kp + (kk +
+ 1) * a_dim1], lda);
+ i__1 = kp - kk - 1;
+ zlacgv_(&i__1, &a[kp + (kk + 1) * a_dim1], lda);
+ if (kp < *n) {
+ i__1 = *n - kp;
+ zcopy_(&i__1, &a[kp + 1 + kk * a_dim1], &c__1, &a[kp + 1
+ + kp * a_dim1], &c__1);
+ }
+
+/* Interchange rows KK and KP in first KK columns of A and W */
+
+ i__1 = kk - 1;
+ zswap_(&i__1, &a[kk + a_dim1], lda, &a[kp + a_dim1], lda);
+ zswap_(&kk, &w[kk + w_dim1], ldw, &w[kp + w_dim1], ldw);
+ }
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k of W now holds */
+
+/* W(k) = L(k)*D(k) */
+
+/* where L(k) is the k-th column of L */
+
+/* Store L(k) in column k of A */
+
+ i__1 = *n - k + 1;
+ zcopy_(&i__1, &w[k + k * w_dim1], &c__1, &a[k + k * a_dim1], &
+ c__1);
+ if (k < *n) {
+ i__1 = k + k * a_dim1;
+ r1 = 1. / a[i__1].r;
+ i__1 = *n - k;
+ zdscal_(&i__1, &r1, &a[k + 1 + k * a_dim1], &c__1);
+
+/* Conjugate W(k) */
+
+ i__1 = *n - k;
+ zlacgv_(&i__1, &w[k + 1 + k * w_dim1], &c__1);
+ }
+ } else {
+
+/* 2-by-2 pivot block D(k): columns k and k+1 of W now hold */
+
+/* ( W(k) W(k+1) ) = ( L(k) L(k+1) )*D(k) */
+
+/* where L(k) and L(k+1) are the k-th and (k+1)-th columns */
+/* of L */
+
+ if (k < *n - 1) {
+
+/* Store L(k) and L(k+1) in columns k and k+1 of A */
+
+ i__1 = k + 1 + k * w_dim1;
+ d21.r = w[i__1].r, d21.i = w[i__1].i;
+ z_div(&z__1, &w[k + 1 + (k + 1) * w_dim1], &d21);
+ d11.r = z__1.r, d11.i = z__1.i;
+ d_cnjg(&z__2, &d21);
+ z_div(&z__1, &w[k + k * w_dim1], &z__2);
+ d22.r = z__1.r, d22.i = z__1.i;
+ z__1.r = d11.r * d22.r - d11.i * d22.i, z__1.i = d11.r *
+ d22.i + d11.i * d22.r;
+ t = 1. / (z__1.r - 1.);
+ z__2.r = t, z__2.i = 0.;
+ z_div(&z__1, &z__2, &d21);
+ d21.r = z__1.r, d21.i = z__1.i;
+ i__1 = *n;
+ for (j = k + 2; j <= i__1; ++j) {
+ i__2 = j + k * a_dim1;
+ d_cnjg(&z__2, &d21);
+ i__3 = j + k * w_dim1;
+ z__4.r = d11.r * w[i__3].r - d11.i * w[i__3].i,
+ z__4.i = d11.r * w[i__3].i + d11.i * w[i__3]
+ .r;
+ i__4 = j + (k + 1) * w_dim1;
+ z__3.r = z__4.r - w[i__4].r, z__3.i = z__4.i - w[i__4]
+ .i;
+ z__1.r = z__2.r * z__3.r - z__2.i * z__3.i, z__1.i =
+ z__2.r * z__3.i + z__2.i * z__3.r;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+ i__2 = j + (k + 1) * a_dim1;
+ i__3 = j + (k + 1) * w_dim1;
+ z__3.r = d22.r * w[i__3].r - d22.i * w[i__3].i,
+ z__3.i = d22.r * w[i__3].i + d22.i * w[i__3]
+ .r;
+ i__4 = j + k * w_dim1;
+ z__2.r = z__3.r - w[i__4].r, z__2.i = z__3.i - w[i__4]
+ .i;
+ z__1.r = d21.r * z__2.r - d21.i * z__2.i, z__1.i =
+ d21.r * z__2.i + d21.i * z__2.r;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+/* L80: */
+ }
+ }
+
+/* Copy D(k) to A */
+
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * w_dim1;
+ a[i__1].r = w[i__2].r, a[i__1].i = w[i__2].i;
+ i__1 = k + 1 + k * a_dim1;
+ i__2 = k + 1 + k * w_dim1;
+ a[i__1].r = w[i__2].r, a[i__1].i = w[i__2].i;
+ i__1 = k + 1 + (k + 1) * a_dim1;
+ i__2 = k + 1 + (k + 1) * w_dim1;
+ a[i__1].r = w[i__2].r, a[i__1].i = w[i__2].i;
+
+/* Conjugate W(k) and W(k+1) */
+
+ i__1 = *n - k;
+ zlacgv_(&i__1, &w[k + 1 + k * w_dim1], &c__1);
+ i__1 = *n - k - 1;
+ zlacgv_(&i__1, &w[k + 2 + (k + 1) * w_dim1], &c__1);
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k + 1] = -kp;
+ }
+
+/* Increase K and return to the start of the main loop */
+
+ k += kstep;
+ goto L70;
+
+L90:
+
+/* Update the lower triangle of A22 (= A(k:n,k:n)) as */
+
+/* A22 := A22 - L21*D*L21' = A22 - L21*W' */
+
+/* computing blocks of NB columns at a time (note that conjg(W) is */
+/* actually stored) */
+
+ i__1 = *n;
+ i__2 = *nb;
+ for (j = k; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = *nb, i__4 = *n - j + 1;
+ jb = min(i__3,i__4);
+
+/* Update the lower triangle of the diagonal block */
+
+ i__3 = j + jb - 1;
+ for (jj = j; jj <= i__3; ++jj) {
+ i__4 = jj + jj * a_dim1;
+ i__5 = jj + jj * a_dim1;
+ d__1 = a[i__5].r;
+ a[i__4].r = d__1, a[i__4].i = 0.;
+ i__4 = j + jb - jj;
+ i__5 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__4, &i__5, &z__1, &a[jj + a_dim1],
+ lda, &w[jj + w_dim1], ldw, &c_b1, &a[jj + jj * a_dim1]
+, &c__1);
+ i__4 = jj + jj * a_dim1;
+ i__5 = jj + jj * a_dim1;
+ d__1 = a[i__5].r;
+ a[i__4].r = d__1, a[i__4].i = 0.;
+/* L100: */
+ }
+
+/* Update the rectangular subdiagonal block */
+
+ if (j + jb <= *n) {
+ i__3 = *n - j - jb + 1;
+ i__4 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "Transpose", &i__3, &jb, &i__4, &z__1,
+ &a[j + jb + a_dim1], lda, &w[j + w_dim1], ldw, &c_b1,
+ &a[j + jb + j * a_dim1], lda);
+ }
+/* L110: */
+ }
+
+/* Put L21 in standard form by partially undoing the interchanges */
+/* in columns 1:k-1 */
+
+ j = k - 1;
+L120:
+ jj = j;
+ jp = ipiv[j];
+ if (jp < 0) {
+ jp = -jp;
+ --j;
+ }
+ --j;
+ if (jp != jj && j >= 1) {
+ zswap_(&j, &a[jp + a_dim1], lda, &a[jj + a_dim1], lda);
+ }
+ if (j >= 1) {
+ goto L120;
+ }
+
+/* Set KB to the number of columns factorized */
+
+ *kb = k - 1;
+
+ }
+ return 0;
+
+/* End of ZLAHEF */
+
+} /* zlahef_ */
diff --git a/contrib/libs/clapack/zlahqr.c b/contrib/libs/clapack/zlahqr.c
new file mode 100644
index 0000000000..ea4178f527
--- /dev/null
+++ b/contrib/libs/clapack/zlahqr.c
@@ -0,0 +1,755 @@
+/* zlahqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c__2 = 2;
+
+/* Subroutine */ int zlahqr_(logical *wantt, logical *wantz, integer *n,
+ integer *ilo, integer *ihi, doublecomplex *h__, integer *ldh,
+ doublecomplex *w, integer *iloz, integer *ihiz, doublecomplex *z__,
+ integer *ldz, integer *info)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1, d__2, d__3, d__4, d__5, d__6;
+ doublecomplex z__1, z__2, z__3, z__4, z__5, z__6, z__7;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+ double z_abs(doublecomplex *);
+ void z_sqrt(doublecomplex *, doublecomplex *), pow_zi(doublecomplex *,
+ doublecomplex *, integer *);
+
+ /* Local variables */
+ integer i__, j, k, l, m;
+ doublereal s;
+ doublecomplex t, u, v[2], x, y;
+ integer i1, i2;
+ doublecomplex t1;
+ doublereal t2;
+ doublecomplex v2;
+ doublereal aa, ab, ba, bb, h10;
+ doublecomplex h11;
+ doublereal h21;
+ doublecomplex h22, sc;
+ integer nh, nz;
+ doublereal sx;
+ integer jhi;
+ doublecomplex h11s;
+ integer jlo, its;
+ doublereal ulp;
+ doublecomplex sum;
+ doublereal tst;
+ doublecomplex temp;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *);
+ doublereal rtemp;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), dlabad_(doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin, safmax;
+ extern /* Subroutine */ int zlarfg_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *);
+ extern /* Double Complex */ VOID zladiv_(doublecomplex *, doublecomplex *,
+ doublecomplex *);
+ doublereal smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAHQR is an auxiliary routine called by CHSEQR to update the */
+/* eigenvalues and Schur decomposition already computed by CHSEQR, by */
+/* dealing with the Hessenberg submatrix in rows and columns ILO to */
+/* IHI. */
+
+/* Arguments */
+/* ========= */
+
+/* WANTT (input) LOGICAL */
+/* = .TRUE. : the full Schur form T is required; */
+/* = .FALSE.: only eigenvalues are required. */
+
+/* WANTZ (input) LOGICAL */
+/* = .TRUE. : the matrix of Schur vectors Z is required; */
+/* = .FALSE.: Schur vectors are not required. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* It is assumed that H is already upper triangular in rows and */
+/* columns IHI+1:N, and that H(ILO,ILO-1) = 0 (unless ILO = 1). */
+/* ZLAHQR works primarily with the Hessenberg submatrix in rows */
+/* and columns ILO to IHI, but applies transformations to all of */
+/* H if WANTT is .TRUE.. */
+/* 1 <= ILO <= max(1,IHI); IHI <= N. */
+
+/* H (input/output) COMPLEX*16 array, dimension (LDH,N) */
+/* On entry, the upper Hessenberg matrix H. */
+/* On exit, if INFO is zero and if WANTT is .TRUE., then H */
+/* is upper triangular in rows and columns ILO:IHI. If INFO */
+/* is zero and if WANTT is .FALSE., then the contents of H */
+/* are unspecified on exit. The output state of H in case */
+/* INF is positive is below under the description of INFO. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH >= max(1,N). */
+
+/* W (output) COMPLEX*16 array, dimension (N) */
+/* The computed eigenvalues ILO to IHI are stored in the */
+/* corresponding elements of W. If WANTT is .TRUE., the */
+/* eigenvalues are stored in the same order as on the diagonal */
+/* of the Schur form returned in H, with W(i) = H(i,i). */
+
+/* ILOZ (input) INTEGER */
+/* IHIZ (input) INTEGER */
+/* Specify the rows of Z to which transformations must be */
+/* applied if WANTZ is .TRUE.. */
+/* 1 <= ILOZ <= ILO; IHI <= IHIZ <= N. */
+
+/* Z (input/output) COMPLEX*16 array, dimension (LDZ,N) */
+/* If WANTZ is .TRUE., on entry Z must contain the current */
+/* matrix Z of transformations accumulated by CHSEQR, and on */
+/* exit Z has been updated; transformations are applied only to */
+/* the submatrix Z(ILOZ:IHIZ,ILO:IHI). */
+/* If WANTZ is .FALSE., Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* .GT. 0: if INFO = i, ZLAHQR failed to compute all the */
+/* eigenvalues ILO to IHI in a total of 30 iterations */
+/* per eigenvalue; elements i+1:ihi of W contain */
+/* those eigenvalues which have been successfully */
+/* computed. */
+
+/* If INFO .GT. 0 and WANTT is .FALSE., then on exit, */
+/* the remaining unconverged eigenvalues are the */
+/* eigenvalues of the upper Hessenberg matrix */
+/* rows and columns ILO thorugh INFO of the final, */
+/* output value of H. */
+
+/* If INFO .GT. 0 and WANTT is .TRUE., then on exit */
+/* (*) (initial value of H)*U = U*(final value of H) */
+/* where U is an orthognal matrix. The final */
+/* value of H is upper Hessenberg and triangular in */
+/* rows and columns INFO+1 through IHI. */
+
+/* If INFO .GT. 0 and WANTZ is .TRUE., then on exit */
+/* (final value of Z) = (initial value of Z)*U */
+/* where U is the orthogonal matrix in (*) */
+/* (regardless of the value of WANTT.) */
+
+/* Further Details */
+/* =============== */
+
+/* 02-96 Based on modifications by */
+/* David Day, Sandia National Laboratory, USA */
+
+/* 12-04 Further modifications by */
+/* Ralph Byers, University of Kansas, USA */
+/* This is a modified version of ZLAHQR from LAPACK version 3.0. */
+/* It is (1) more robust against overflow and underflow and */
+/* (2) adopts the more conservative Ahues & Tisseur stopping */
+/* criterion (LAWN 122, 1997). */
+
+/* ========================================================= */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+
+ /* Function Body */
+ *info = 0;
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*ilo == *ihi) {
+ i__1 = *ilo;
+ i__2 = *ilo + *ilo * h_dim1;
+ w[i__1].r = h__[i__2].r, w[i__1].i = h__[i__2].i;
+ return 0;
+ }
+
+/* ==== clear out the trash ==== */
+ i__1 = *ihi - 3;
+ for (j = *ilo; j <= i__1; ++j) {
+ i__2 = j + 2 + j * h_dim1;
+ h__[i__2].r = 0., h__[i__2].i = 0.;
+ i__2 = j + 3 + j * h_dim1;
+ h__[i__2].r = 0., h__[i__2].i = 0.;
+/* L10: */
+ }
+ if (*ilo <= *ihi - 2) {
+ i__1 = *ihi + (*ihi - 2) * h_dim1;
+ h__[i__1].r = 0., h__[i__1].i = 0.;
+ }
+/* ==== ensure that subdiagonal entries are real ==== */
+ if (*wantt) {
+ jlo = 1;
+ jhi = *n;
+ } else {
+ jlo = *ilo;
+ jhi = *ihi;
+ }
+ i__1 = *ihi;
+ for (i__ = *ilo + 1; i__ <= i__1; ++i__) {
+ if (d_imag(&h__[i__ + (i__ - 1) * h_dim1]) != 0.) {
+/* ==== The following redundant normalization */
+/* . avoids problems with both gradual and */
+/* . sudden underflow in ABS(H(I,I-1)) ==== */
+ i__2 = i__ + (i__ - 1) * h_dim1;
+ i__3 = i__ + (i__ - 1) * h_dim1;
+ d__3 = (d__1 = h__[i__3].r, abs(d__1)) + (d__2 = d_imag(&h__[i__
+ + (i__ - 1) * h_dim1]), abs(d__2));
+ z__1.r = h__[i__2].r / d__3, z__1.i = h__[i__2].i / d__3;
+ sc.r = z__1.r, sc.i = z__1.i;
+ d_cnjg(&z__2, &sc);
+ d__1 = z_abs(&sc);
+ z__1.r = z__2.r / d__1, z__1.i = z__2.i / d__1;
+ sc.r = z__1.r, sc.i = z__1.i;
+ i__2 = i__ + (i__ - 1) * h_dim1;
+ d__1 = z_abs(&h__[i__ + (i__ - 1) * h_dim1]);
+ h__[i__2].r = d__1, h__[i__2].i = 0.;
+ i__2 = jhi - i__ + 1;
+ zscal_(&i__2, &sc, &h__[i__ + i__ * h_dim1], ldh);
+/* Computing MIN */
+ i__3 = jhi, i__4 = i__ + 1;
+ i__2 = min(i__3,i__4) - jlo + 1;
+ d_cnjg(&z__1, &sc);
+ zscal_(&i__2, &z__1, &h__[jlo + i__ * h_dim1], &c__1);
+ if (*wantz) {
+ i__2 = *ihiz - *iloz + 1;
+ d_cnjg(&z__1, &sc);
+ zscal_(&i__2, &z__1, &z__[*iloz + i__ * z_dim1], &c__1);
+ }
+ }
+/* L20: */
+ }
+
+ nh = *ihi - *ilo + 1;
+ nz = *ihiz - *iloz + 1;
+
+/* Set machine-dependent constants for the stopping criterion. */
+
+ safmin = dlamch_("SAFE MINIMUM");
+ safmax = 1. / safmin;
+ dlabad_(&safmin, &safmax);
+ ulp = dlamch_("PRECISION");
+ smlnum = safmin * ((doublereal) nh / ulp);
+
+/* I1 and I2 are the indices of the first row and last column of H */
+/* to which transformations must be applied. If eigenvalues only are */
+/* being computed, I1 and I2 are set inside the main loop. */
+
+ if (*wantt) {
+ i1 = 1;
+ i2 = *n;
+ }
+
+/* The main loop begins here. I is the loop index and decreases from */
+/* IHI to ILO in steps of 1. Each iteration of the loop works */
+/* with the active submatrix in rows and columns L to I. */
+/* Eigenvalues I+1 to IHI have already converged. Either L = ILO, or */
+/* H(L,L-1) is negligible so that the matrix splits. */
+
+ i__ = *ihi;
+L30:
+ if (i__ < *ilo) {
+ goto L150;
+ }
+
+/* Perform QR iterations on rows and columns ILO to I until a */
+/* submatrix of order 1 splits off at the bottom because a */
+/* subdiagonal element has become negligible. */
+
+ l = *ilo;
+ for (its = 0; its <= 30; ++its) {
+
+/* Look for a single small subdiagonal element. */
+
+ i__1 = l + 1;
+ for (k = i__; k >= i__1; --k) {
+ i__2 = k + (k - 1) * h_dim1;
+ if ((d__1 = h__[i__2].r, abs(d__1)) + (d__2 = d_imag(&h__[k + (k
+ - 1) * h_dim1]), abs(d__2)) <= smlnum) {
+ goto L50;
+ }
+ i__2 = k - 1 + (k - 1) * h_dim1;
+ i__3 = k + k * h_dim1;
+ tst = (d__1 = h__[i__2].r, abs(d__1)) + (d__2 = d_imag(&h__[k - 1
+ + (k - 1) * h_dim1]), abs(d__2)) + ((d__3 = h__[i__3].r,
+ abs(d__3)) + (d__4 = d_imag(&h__[k + k * h_dim1]), abs(
+ d__4)));
+ if (tst == 0.) {
+ if (k - 2 >= *ilo) {
+ i__2 = k - 1 + (k - 2) * h_dim1;
+ tst += (d__1 = h__[i__2].r, abs(d__1));
+ }
+ if (k + 1 <= *ihi) {
+ i__2 = k + 1 + k * h_dim1;
+ tst += (d__1 = h__[i__2].r, abs(d__1));
+ }
+ }
+/* ==== The following is a conservative small subdiagonal */
+/* . deflation criterion due to Ahues & Tisseur (LAWN 122, */
+/* . 1997). It has better mathematical foundation and */
+/* . improves accuracy in some examples. ==== */
+ i__2 = k + (k - 1) * h_dim1;
+ if ((d__1 = h__[i__2].r, abs(d__1)) <= ulp * tst) {
+/* Computing MAX */
+ i__2 = k + (k - 1) * h_dim1;
+ i__3 = k - 1 + k * h_dim1;
+ d__5 = (d__1 = h__[i__2].r, abs(d__1)) + (d__2 = d_imag(&h__[
+ k + (k - 1) * h_dim1]), abs(d__2)), d__6 = (d__3 =
+ h__[i__3].r, abs(d__3)) + (d__4 = d_imag(&h__[k - 1 +
+ k * h_dim1]), abs(d__4));
+ ab = max(d__5,d__6);
+/* Computing MIN */
+ i__2 = k + (k - 1) * h_dim1;
+ i__3 = k - 1 + k * h_dim1;
+ d__5 = (d__1 = h__[i__2].r, abs(d__1)) + (d__2 = d_imag(&h__[
+ k + (k - 1) * h_dim1]), abs(d__2)), d__6 = (d__3 =
+ h__[i__3].r, abs(d__3)) + (d__4 = d_imag(&h__[k - 1 +
+ k * h_dim1]), abs(d__4));
+ ba = min(d__5,d__6);
+ i__2 = k - 1 + (k - 1) * h_dim1;
+ i__3 = k + k * h_dim1;
+ z__2.r = h__[i__2].r - h__[i__3].r, z__2.i = h__[i__2].i -
+ h__[i__3].i;
+ z__1.r = z__2.r, z__1.i = z__2.i;
+/* Computing MAX */
+ i__4 = k + k * h_dim1;
+ d__5 = (d__1 = h__[i__4].r, abs(d__1)) + (d__2 = d_imag(&h__[
+ k + k * h_dim1]), abs(d__2)), d__6 = (d__3 = z__1.r,
+ abs(d__3)) + (d__4 = d_imag(&z__1), abs(d__4));
+ aa = max(d__5,d__6);
+ i__2 = k - 1 + (k - 1) * h_dim1;
+ i__3 = k + k * h_dim1;
+ z__2.r = h__[i__2].r - h__[i__3].r, z__2.i = h__[i__2].i -
+ h__[i__3].i;
+ z__1.r = z__2.r, z__1.i = z__2.i;
+/* Computing MIN */
+ i__4 = k + k * h_dim1;
+ d__5 = (d__1 = h__[i__4].r, abs(d__1)) + (d__2 = d_imag(&h__[
+ k + k * h_dim1]), abs(d__2)), d__6 = (d__3 = z__1.r,
+ abs(d__3)) + (d__4 = d_imag(&z__1), abs(d__4));
+ bb = min(d__5,d__6);
+ s = aa + ab;
+/* Computing MAX */
+ d__1 = smlnum, d__2 = ulp * (bb * (aa / s));
+ if (ba * (ab / s) <= max(d__1,d__2)) {
+ goto L50;
+ }
+ }
+/* L40: */
+ }
+L50:
+ l = k;
+ if (l > *ilo) {
+
+/* H(L,L-1) is negligible */
+
+ i__1 = l + (l - 1) * h_dim1;
+ h__[i__1].r = 0., h__[i__1].i = 0.;
+ }
+
+/* Exit from loop if a submatrix of order 1 has split off. */
+
+ if (l >= i__) {
+ goto L140;
+ }
+
+/* Now the active submatrix is in rows and columns L to I. If */
+/* eigenvalues only are being computed, only the active submatrix */
+/* need be transformed. */
+
+ if (! (*wantt)) {
+ i1 = l;
+ i2 = i__;
+ }
+
+ if (its == 10) {
+
+/* Exceptional shift. */
+
+ i__1 = l + 1 + l * h_dim1;
+ s = (d__1 = h__[i__1].r, abs(d__1)) * .75;
+ i__1 = l + l * h_dim1;
+ z__1.r = s + h__[i__1].r, z__1.i = h__[i__1].i;
+ t.r = z__1.r, t.i = z__1.i;
+ } else if (its == 20) {
+
+/* Exceptional shift. */
+
+ i__1 = i__ + (i__ - 1) * h_dim1;
+ s = (d__1 = h__[i__1].r, abs(d__1)) * .75;
+ i__1 = i__ + i__ * h_dim1;
+ z__1.r = s + h__[i__1].r, z__1.i = h__[i__1].i;
+ t.r = z__1.r, t.i = z__1.i;
+ } else {
+
+/* Wilkinson's shift. */
+
+ i__1 = i__ + i__ * h_dim1;
+ t.r = h__[i__1].r, t.i = h__[i__1].i;
+ z_sqrt(&z__2, &h__[i__ - 1 + i__ * h_dim1]);
+ z_sqrt(&z__3, &h__[i__ + (i__ - 1) * h_dim1]);
+ z__1.r = z__2.r * z__3.r - z__2.i * z__3.i, z__1.i = z__2.r *
+ z__3.i + z__2.i * z__3.r;
+ u.r = z__1.r, u.i = z__1.i;
+ s = (d__1 = u.r, abs(d__1)) + (d__2 = d_imag(&u), abs(d__2));
+ if (s != 0.) {
+ i__1 = i__ - 1 + (i__ - 1) * h_dim1;
+ z__2.r = h__[i__1].r - t.r, z__2.i = h__[i__1].i - t.i;
+ z__1.r = z__2.r * .5, z__1.i = z__2.i * .5;
+ x.r = z__1.r, x.i = z__1.i;
+ sx = (d__1 = x.r, abs(d__1)) + (d__2 = d_imag(&x), abs(d__2));
+/* Computing MAX */
+ d__3 = s, d__4 = (d__1 = x.r, abs(d__1)) + (d__2 = d_imag(&x),
+ abs(d__2));
+ s = max(d__3,d__4);
+ z__5.r = x.r / s, z__5.i = x.i / s;
+ pow_zi(&z__4, &z__5, &c__2);
+ z__7.r = u.r / s, z__7.i = u.i / s;
+ pow_zi(&z__6, &z__7, &c__2);
+ z__3.r = z__4.r + z__6.r, z__3.i = z__4.i + z__6.i;
+ z_sqrt(&z__2, &z__3);
+ z__1.r = s * z__2.r, z__1.i = s * z__2.i;
+ y.r = z__1.r, y.i = z__1.i;
+ if (sx > 0.) {
+ z__1.r = x.r / sx, z__1.i = x.i / sx;
+ z__2.r = x.r / sx, z__2.i = x.i / sx;
+ if (z__1.r * y.r + d_imag(&z__2) * d_imag(&y) < 0.) {
+ z__3.r = -y.r, z__3.i = -y.i;
+ y.r = z__3.r, y.i = z__3.i;
+ }
+ }
+ z__4.r = x.r + y.r, z__4.i = x.i + y.i;
+ zladiv_(&z__3, &u, &z__4);
+ z__2.r = u.r * z__3.r - u.i * z__3.i, z__2.i = u.r * z__3.i +
+ u.i * z__3.r;
+ z__1.r = t.r - z__2.r, z__1.i = t.i - z__2.i;
+ t.r = z__1.r, t.i = z__1.i;
+ }
+ }
+
+/* Look for two consecutive small subdiagonal elements. */
+
+ i__1 = l + 1;
+ for (m = i__ - 1; m >= i__1; --m) {
+
+/* Determine the effect of starting the single-shift QR */
+/* iteration at row M, and see if this would make H(M,M-1) */
+/* negligible. */
+
+ i__2 = m + m * h_dim1;
+ h11.r = h__[i__2].r, h11.i = h__[i__2].i;
+ i__2 = m + 1 + (m + 1) * h_dim1;
+ h22.r = h__[i__2].r, h22.i = h__[i__2].i;
+ z__1.r = h11.r - t.r, z__1.i = h11.i - t.i;
+ h11s.r = z__1.r, h11s.i = z__1.i;
+ i__2 = m + 1 + m * h_dim1;
+ h21 = h__[i__2].r;
+ s = (d__1 = h11s.r, abs(d__1)) + (d__2 = d_imag(&h11s), abs(d__2))
+ + abs(h21);
+ z__1.r = h11s.r / s, z__1.i = h11s.i / s;
+ h11s.r = z__1.r, h11s.i = z__1.i;
+ h21 /= s;
+ v[0].r = h11s.r, v[0].i = h11s.i;
+ v[1].r = h21, v[1].i = 0.;
+ i__2 = m + (m - 1) * h_dim1;
+ h10 = h__[i__2].r;
+ if (abs(h10) * abs(h21) <= ulp * (((d__1 = h11s.r, abs(d__1)) + (
+ d__2 = d_imag(&h11s), abs(d__2))) * ((d__3 = h11.r, abs(
+ d__3)) + (d__4 = d_imag(&h11), abs(d__4)) + ((d__5 =
+ h22.r, abs(d__5)) + (d__6 = d_imag(&h22), abs(d__6)))))) {
+ goto L70;
+ }
+/* L60: */
+ }
+ i__1 = l + l * h_dim1;
+ h11.r = h__[i__1].r, h11.i = h__[i__1].i;
+ i__1 = l + 1 + (l + 1) * h_dim1;
+ h22.r = h__[i__1].r, h22.i = h__[i__1].i;
+ z__1.r = h11.r - t.r, z__1.i = h11.i - t.i;
+ h11s.r = z__1.r, h11s.i = z__1.i;
+ i__1 = l + 1 + l * h_dim1;
+ h21 = h__[i__1].r;
+ s = (d__1 = h11s.r, abs(d__1)) + (d__2 = d_imag(&h11s), abs(d__2)) +
+ abs(h21);
+ z__1.r = h11s.r / s, z__1.i = h11s.i / s;
+ h11s.r = z__1.r, h11s.i = z__1.i;
+ h21 /= s;
+ v[0].r = h11s.r, v[0].i = h11s.i;
+ v[1].r = h21, v[1].i = 0.;
+L70:
+
+/* Single-shift QR step */
+
+ i__1 = i__ - 1;
+ for (k = m; k <= i__1; ++k) {
+
+/* The first iteration of this loop determines a reflection G */
+/* from the vector V and applies it from left and right to H, */
+/* thus creating a nonzero bulge below the subdiagonal. */
+
+/* Each subsequent iteration determines a reflection G to */
+/* restore the Hessenberg form in the (K-1)th column, and thus */
+/* chases the bulge one step toward the bottom of the active */
+/* submatrix. */
+
+/* V(2) is always real before the call to ZLARFG, and hence */
+/* after the call T2 ( = T1*V(2) ) is also real. */
+
+ if (k > m) {
+ zcopy_(&c__2, &h__[k + (k - 1) * h_dim1], &c__1, v, &c__1);
+ }
+ zlarfg_(&c__2, v, &v[1], &c__1, &t1);
+ if (k > m) {
+ i__2 = k + (k - 1) * h_dim1;
+ h__[i__2].r = v[0].r, h__[i__2].i = v[0].i;
+ i__2 = k + 1 + (k - 1) * h_dim1;
+ h__[i__2].r = 0., h__[i__2].i = 0.;
+ }
+ v2.r = v[1].r, v2.i = v[1].i;
+ z__1.r = t1.r * v2.r - t1.i * v2.i, z__1.i = t1.r * v2.i + t1.i *
+ v2.r;
+ t2 = z__1.r;
+
+/* Apply G from the left to transform the rows of the matrix */
+/* in columns K to I2. */
+
+ i__2 = i2;
+ for (j = k; j <= i__2; ++j) {
+ d_cnjg(&z__3, &t1);
+ i__3 = k + j * h_dim1;
+ z__2.r = z__3.r * h__[i__3].r - z__3.i * h__[i__3].i, z__2.i =
+ z__3.r * h__[i__3].i + z__3.i * h__[i__3].r;
+ i__4 = k + 1 + j * h_dim1;
+ z__4.r = t2 * h__[i__4].r, z__4.i = t2 * h__[i__4].i;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__3 = k + j * h_dim1;
+ i__4 = k + j * h_dim1;
+ z__1.r = h__[i__4].r - sum.r, z__1.i = h__[i__4].i - sum.i;
+ h__[i__3].r = z__1.r, h__[i__3].i = z__1.i;
+ i__3 = k + 1 + j * h_dim1;
+ i__4 = k + 1 + j * h_dim1;
+ z__2.r = sum.r * v2.r - sum.i * v2.i, z__2.i = sum.r * v2.i +
+ sum.i * v2.r;
+ z__1.r = h__[i__4].r - z__2.r, z__1.i = h__[i__4].i - z__2.i;
+ h__[i__3].r = z__1.r, h__[i__3].i = z__1.i;
+/* L80: */
+ }
+
+/* Apply G from the right to transform the columns of the */
+/* matrix in rows I1 to min(K+2,I). */
+
+/* Computing MIN */
+ i__3 = k + 2;
+ i__2 = min(i__3,i__);
+ for (j = i1; j <= i__2; ++j) {
+ i__3 = j + k * h_dim1;
+ z__2.r = t1.r * h__[i__3].r - t1.i * h__[i__3].i, z__2.i =
+ t1.r * h__[i__3].i + t1.i * h__[i__3].r;
+ i__4 = j + (k + 1) * h_dim1;
+ z__3.r = t2 * h__[i__4].r, z__3.i = t2 * h__[i__4].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__3 = j + k * h_dim1;
+ i__4 = j + k * h_dim1;
+ z__1.r = h__[i__4].r - sum.r, z__1.i = h__[i__4].i - sum.i;
+ h__[i__3].r = z__1.r, h__[i__3].i = z__1.i;
+ i__3 = j + (k + 1) * h_dim1;
+ i__4 = j + (k + 1) * h_dim1;
+ d_cnjg(&z__3, &v2);
+ z__2.r = sum.r * z__3.r - sum.i * z__3.i, z__2.i = sum.r *
+ z__3.i + sum.i * z__3.r;
+ z__1.r = h__[i__4].r - z__2.r, z__1.i = h__[i__4].i - z__2.i;
+ h__[i__3].r = z__1.r, h__[i__3].i = z__1.i;
+/* L90: */
+ }
+
+ if (*wantz) {
+
+/* Accumulate transformations in the matrix Z */
+
+ i__2 = *ihiz;
+ for (j = *iloz; j <= i__2; ++j) {
+ i__3 = j + k * z_dim1;
+ z__2.r = t1.r * z__[i__3].r - t1.i * z__[i__3].i, z__2.i =
+ t1.r * z__[i__3].i + t1.i * z__[i__3].r;
+ i__4 = j + (k + 1) * z_dim1;
+ z__3.r = t2 * z__[i__4].r, z__3.i = t2 * z__[i__4].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__3 = j + k * z_dim1;
+ i__4 = j + k * z_dim1;
+ z__1.r = z__[i__4].r - sum.r, z__1.i = z__[i__4].i -
+ sum.i;
+ z__[i__3].r = z__1.r, z__[i__3].i = z__1.i;
+ i__3 = j + (k + 1) * z_dim1;
+ i__4 = j + (k + 1) * z_dim1;
+ d_cnjg(&z__3, &v2);
+ z__2.r = sum.r * z__3.r - sum.i * z__3.i, z__2.i = sum.r *
+ z__3.i + sum.i * z__3.r;
+ z__1.r = z__[i__4].r - z__2.r, z__1.i = z__[i__4].i -
+ z__2.i;
+ z__[i__3].r = z__1.r, z__[i__3].i = z__1.i;
+/* L100: */
+ }
+ }
+
+ if (k == m && m > l) {
+
+/* If the QR step was started at row M > L because two */
+/* consecutive small subdiagonals were found, then extra */
+/* scaling must be performed to ensure that H(M,M-1) remains */
+/* real. */
+
+ z__1.r = 1. - t1.r, z__1.i = 0. - t1.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ d__1 = z_abs(&temp);
+ z__1.r = temp.r / d__1, z__1.i = temp.i / d__1;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = m + 1 + m * h_dim1;
+ i__3 = m + 1 + m * h_dim1;
+ d_cnjg(&z__2, &temp);
+ z__1.r = h__[i__3].r * z__2.r - h__[i__3].i * z__2.i, z__1.i =
+ h__[i__3].r * z__2.i + h__[i__3].i * z__2.r;
+ h__[i__2].r = z__1.r, h__[i__2].i = z__1.i;
+ if (m + 2 <= i__) {
+ i__2 = m + 2 + (m + 1) * h_dim1;
+ i__3 = m + 2 + (m + 1) * h_dim1;
+ z__1.r = h__[i__3].r * temp.r - h__[i__3].i * temp.i,
+ z__1.i = h__[i__3].r * temp.i + h__[i__3].i *
+ temp.r;
+ h__[i__2].r = z__1.r, h__[i__2].i = z__1.i;
+ }
+ i__2 = i__;
+ for (j = m; j <= i__2; ++j) {
+ if (j != m + 1) {
+ if (i2 > j) {
+ i__3 = i2 - j;
+ zscal_(&i__3, &temp, &h__[j + (j + 1) * h_dim1],
+ ldh);
+ }
+ i__3 = j - i1;
+ d_cnjg(&z__1, &temp);
+ zscal_(&i__3, &z__1, &h__[i1 + j * h_dim1], &c__1);
+ if (*wantz) {
+ d_cnjg(&z__1, &temp);
+ zscal_(&nz, &z__1, &z__[*iloz + j * z_dim1], &
+ c__1);
+ }
+ }
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+
+/* Ensure that H(I,I-1) is real. */
+
+ i__1 = i__ + (i__ - 1) * h_dim1;
+ temp.r = h__[i__1].r, temp.i = h__[i__1].i;
+ if (d_imag(&temp) != 0.) {
+ rtemp = z_abs(&temp);
+ i__1 = i__ + (i__ - 1) * h_dim1;
+ h__[i__1].r = rtemp, h__[i__1].i = 0.;
+ z__1.r = temp.r / rtemp, z__1.i = temp.i / rtemp;
+ temp.r = z__1.r, temp.i = z__1.i;
+ if (i2 > i__) {
+ i__1 = i2 - i__;
+ d_cnjg(&z__1, &temp);
+ zscal_(&i__1, &z__1, &h__[i__ + (i__ + 1) * h_dim1], ldh);
+ }
+ i__1 = i__ - i1;
+ zscal_(&i__1, &temp, &h__[i1 + i__ * h_dim1], &c__1);
+ if (*wantz) {
+ zscal_(&nz, &temp, &z__[*iloz + i__ * z_dim1], &c__1);
+ }
+ }
+
+/* L130: */
+ }
+
+/* Failure to converge in remaining number of iterations */
+
+ *info = i__;
+ return 0;
+
+L140:
+
+/* H(I,I-1) is negligible: one eigenvalue has converged. */
+
+ i__1 = i__;
+ i__2 = i__ + i__ * h_dim1;
+ w[i__1].r = h__[i__2].r, w[i__1].i = h__[i__2].i;
+
+/* return to start of the main loop with new value of I. */
+
+ i__ = l - 1;
+ goto L30;
+
+L150:
+ return 0;
+
+/* End of ZLAHQR */
+
+} /* zlahqr_ */
diff --git a/contrib/libs/clapack/zlahr2.c b/contrib/libs/clapack/zlahr2.c
new file mode 100644
index 0000000000..761a5f6518
--- /dev/null
+++ b/contrib/libs/clapack/zlahr2.c
@@ -0,0 +1,331 @@
+/* zlahr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zlahr2_(integer *n, integer *k, integer *nb,
+ doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *t,
+ integer *ldt, doublecomplex *y, integer *ldy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, t_dim1, t_offset, y_dim1, y_offset, i__1, i__2,
+ i__3;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__;
+ doublecomplex ei;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *), zgemm_(char *, char *, integer *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *),
+ zcopy_(integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *), ztrmm_(char *, char *, char *, char *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *),
+ zaxpy_(integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *), ztrmv_(char *, char *, char *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *), zlarfg_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *), zlacgv_(integer *,
+ doublecomplex *, integer *), zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAHR2 reduces the first NB columns of A complex general n-BY-(n-k+1) */
+/* matrix A so that elements below the k-th subdiagonal are zero. The */
+/* reduction is performed by an unitary similarity transformation */
+/* Q' * A * Q. The routine returns the matrices V and T which determine */
+/* Q as a block reflector I - V*T*V', and also the matrix Y = A * V * T. */
+
+/* This is an auxiliary routine called by ZGEHRD. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* K (input) INTEGER */
+/* The offset for the reduction. Elements below the k-th */
+/* subdiagonal in the first NB columns are reduced to zero. */
+/* K < N. */
+
+/* NB (input) INTEGER */
+/* The number of columns to be reduced. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N-K+1) */
+/* On entry, the n-by-(n-k+1) general matrix A. */
+/* On exit, the elements on and above the k-th subdiagonal in */
+/* the first NB columns are overwritten with the corresponding */
+/* elements of the reduced matrix; the elements below the k-th */
+/* subdiagonal, with the array TAU, represent the matrix Q as a */
+/* product of elementary reflectors. The other columns of A are */
+/* unchanged. See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAU (output) COMPLEX*16 array, dimension (NB) */
+/* The scalar factors of the elementary reflectors. See Further */
+/* Details. */
+
+/* T (output) COMPLEX*16 array, dimension (LDT,NB) */
+/* The upper triangular matrix T. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= NB. */
+
+/* Y (output) COMPLEX*16 array, dimension (LDY,NB) */
+/* The n-by-nb matrix Y. */
+
+/* LDY (input) INTEGER */
+/* The leading dimension of the array Y. LDY >= N. */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of nb elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(nb). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i+k-1) = 0, v(i+k) = 1; v(i+k+1:n) is stored on exit in */
+/* A(i+k+1:n,i), and tau in TAU(i). */
+
+/* The elements of the vectors v together form the (n-k+1)-by-nb matrix */
+/* V which is needed, with T and Y, to apply the transformation to the */
+/* unreduced part of the matrix, using an update of the form: */
+/* A := (I - V*T*V') * (A - Y*V'). */
+
+/* The contents of A on exit are illustrated by the following example */
+/* with n = 7, k = 3 and nb = 2: */
+
+/* ( a a a a a ) */
+/* ( a a a a a ) */
+/* ( a a a a a ) */
+/* ( h h a a a ) */
+/* ( v1 h a a a ) */
+/* ( v1 v2 a a a ) */
+/* ( v1 v2 a a a ) */
+
+/* where a denotes an element of the original matrix A, h denotes a */
+/* modified element of the upper Hessenberg matrix H, and vi denotes an */
+/* element of the vector defining H(i). */
+
+/* This file is a slight modification of LAPACK-3.0's ZLAHRD */
+/* incorporating improvements proposed by Quintana-Orti and Van de */
+/* Gejin. Note that the entries of A(1:K,2:NB) differ from those */
+/* returned by the original LAPACK routine. This function is */
+/* not backward compatible with LAPACK3.0. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --tau;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ y_dim1 = *ldy;
+ y_offset = 1 + y_dim1;
+ y -= y_offset;
+
+ /* Function Body */
+ if (*n <= 1) {
+ return 0;
+ }
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (i__ > 1) {
+
+/* Update A(K+1:N,I) */
+
+/* Update I-th column of A - Y * V' */
+
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &a[*k + i__ - 1 + a_dim1], lda);
+ i__2 = *n - *k;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("NO TRANSPOSE", &i__2, &i__3, &z__1, &y[*k + 1 + y_dim1],
+ ldy, &a[*k + i__ - 1 + a_dim1], lda, &c_b2, &a[*k + 1 +
+ i__ * a_dim1], &c__1);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &a[*k + i__ - 1 + a_dim1], lda);
+
+/* Apply I - V * T' * V' to this column (call it b) from the */
+/* left, using the last column of T as workspace */
+
+/* Let V = ( V1 ) and b = ( b1 ) (first I-1 rows) */
+/* ( V2 ) ( b2 ) */
+
+/* where V1 is unit lower triangular */
+
+/* w := V1' * b1 */
+
+ i__2 = i__ - 1;
+ zcopy_(&i__2, &a[*k + 1 + i__ * a_dim1], &c__1, &t[*nb * t_dim1 +
+ 1], &c__1);
+ i__2 = i__ - 1;
+ ztrmv_("Lower", "Conjugate transpose", "UNIT", &i__2, &a[*k + 1 +
+ a_dim1], lda, &t[*nb * t_dim1 + 1], &c__1);
+
+/* w := w + V2'*b2 */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[*k + i__ +
+ a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b2, &
+ t[*nb * t_dim1 + 1], &c__1);
+
+/* w := T'*w */
+
+ i__2 = i__ - 1;
+ ztrmv_("Upper", "Conjugate transpose", "NON-UNIT", &i__2, &t[
+ t_offset], ldt, &t[*nb * t_dim1 + 1], &c__1);
+
+/* b2 := b2 - V2*w */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("NO TRANSPOSE", &i__2, &i__3, &z__1, &a[*k + i__ + a_dim1],
+ lda, &t[*nb * t_dim1 + 1], &c__1, &c_b2, &a[*k + i__ +
+ i__ * a_dim1], &c__1);
+
+/* b1 := b1 - V1*w */
+
+ i__2 = i__ - 1;
+ ztrmv_("Lower", "NO TRANSPOSE", "UNIT", &i__2, &a[*k + 1 + a_dim1]
+, lda, &t[*nb * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zaxpy_(&i__2, &z__1, &t[*nb * t_dim1 + 1], &c__1, &a[*k + 1 + i__
+ * a_dim1], &c__1);
+
+ i__2 = *k + i__ - 1 + (i__ - 1) * a_dim1;
+ a[i__2].r = ei.r, a[i__2].i = ei.i;
+ }
+
+/* Generate the elementary reflector H(I) to annihilate */
+/* A(K+I+1:N,I) */
+
+ i__2 = *n - *k - i__ + 1;
+/* Computing MIN */
+ i__3 = *k + i__ + 1;
+ zlarfg_(&i__2, &a[*k + i__ + i__ * a_dim1], &a[min(i__3, *n)+ i__ *
+ a_dim1], &c__1, &tau[i__]);
+ i__2 = *k + i__ + i__ * a_dim1;
+ ei.r = a[i__2].r, ei.i = a[i__2].i;
+ i__2 = *k + i__ + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Compute Y(K+1:N,I) */
+
+ i__2 = *n - *k;
+ i__3 = *n - *k - i__ + 1;
+ zgemv_("NO TRANSPOSE", &i__2, &i__3, &c_b2, &a[*k + 1 + (i__ + 1) *
+ a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b1, &y[*
+ k + 1 + i__ * y_dim1], &c__1);
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[*k + i__ +
+ a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b1, &t[
+ i__ * t_dim1 + 1], &c__1);
+ i__2 = *n - *k;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("NO TRANSPOSE", &i__2, &i__3, &z__1, &y[*k + 1 + y_dim1], ldy,
+ &t[i__ * t_dim1 + 1], &c__1, &c_b2, &y[*k + 1 + i__ * y_dim1],
+ &c__1);
+ i__2 = *n - *k;
+ zscal_(&i__2, &tau[i__], &y[*k + 1 + i__ * y_dim1], &c__1);
+
+/* Compute T(1:I,I) */
+
+ i__2 = i__ - 1;
+ i__3 = i__;
+ z__1.r = -tau[i__3].r, z__1.i = -tau[i__3].i;
+ zscal_(&i__2, &z__1, &t[i__ * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ ztrmv_("Upper", "No Transpose", "NON-UNIT", &i__2, &t[t_offset], ldt,
+ &t[i__ * t_dim1 + 1], &c__1)
+ ;
+ i__2 = i__ + i__ * t_dim1;
+ i__3 = i__;
+ t[i__2].r = tau[i__3].r, t[i__2].i = tau[i__3].i;
+
+/* L10: */
+ }
+ i__1 = *k + *nb + *nb * a_dim1;
+ a[i__1].r = ei.r, a[i__1].i = ei.i;
+
+/* Compute Y(1:K,1:NB) */
+
+ zlacpy_("ALL", k, nb, &a[(a_dim1 << 1) + 1], lda, &y[y_offset], ldy);
+ ztrmm_("RIGHT", "Lower", "NO TRANSPOSE", "UNIT", k, nb, &c_b2, &a[*k + 1
+ + a_dim1], lda, &y[y_offset], ldy);
+ if (*n > *k + *nb) {
+ i__1 = *n - *k - *nb;
+ zgemm_("NO TRANSPOSE", "NO TRANSPOSE", k, nb, &i__1, &c_b2, &a[(*nb +
+ 2) * a_dim1 + 1], lda, &a[*k + 1 + *nb + a_dim1], lda, &c_b2,
+ &y[y_offset], ldy);
+ }
+ ztrmm_("RIGHT", "Upper", "NO TRANSPOSE", "NON-UNIT", k, nb, &c_b2, &t[
+ t_offset], ldt, &y[y_offset], ldy);
+
+ return 0;
+
+/* End of ZLAHR2 */
+
+} /* zlahr2_ */
diff --git a/contrib/libs/clapack/zlahrd.c b/contrib/libs/clapack/zlahrd.c
new file mode 100644
index 0000000000..b5a8f6fc82
--- /dev/null
+++ b/contrib/libs/clapack/zlahrd.c
@@ -0,0 +1,301 @@
+/* zlahrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zlahrd_(integer *n, integer *k, integer *nb,
+ doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *t,
+ integer *ldt, doublecomplex *y, integer *ldy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, t_dim1, t_offset, y_dim1, y_offset, i__1, i__2,
+ i__3;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__;
+ doublecomplex ei;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *), zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *),
+ zcopy_(integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *), zaxpy_(integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *, integer *), ztrmv_(char *, char *,
+ char *, integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *), zlarfg_(integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *),
+ zlacgv_(integer *, doublecomplex *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAHRD reduces the first NB columns of a complex general n-by-(n-k+1) */
+/* matrix A so that elements below the k-th subdiagonal are zero. The */
+/* reduction is performed by a unitary similarity transformation */
+/* Q' * A * Q. The routine returns the matrices V and T which determine */
+/* Q as a block reflector I - V*T*V', and also the matrix Y = A * V * T. */
+
+/* This is an OBSOLETE auxiliary routine. */
+/* This routine will be 'deprecated' in a future release. */
+/* Please use the new routine ZLAHR2 instead. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* K (input) INTEGER */
+/* The offset for the reduction. Elements below the k-th */
+/* subdiagonal in the first NB columns are reduced to zero. */
+
+/* NB (input) INTEGER */
+/* The number of columns to be reduced. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N-K+1) */
+/* On entry, the n-by-(n-k+1) general matrix A. */
+/* On exit, the elements on and above the k-th subdiagonal in */
+/* the first NB columns are overwritten with the corresponding */
+/* elements of the reduced matrix; the elements below the k-th */
+/* subdiagonal, with the array TAU, represent the matrix Q as a */
+/* product of elementary reflectors. The other columns of A are */
+/* unchanged. See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAU (output) COMPLEX*16 array, dimension (NB) */
+/* The scalar factors of the elementary reflectors. See Further */
+/* Details. */
+
+/* T (output) COMPLEX*16 array, dimension (LDT,NB) */
+/* The upper triangular matrix T. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= NB. */
+
+/* Y (output) COMPLEX*16 array, dimension (LDY,NB) */
+/* The n-by-nb matrix Y. */
+
+/* LDY (input) INTEGER */
+/* The leading dimension of the array Y. LDY >= max(1,N). */
+
+/* Further Details */
+/* =============== */
+
+/* The matrix Q is represented as a product of nb elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(nb). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i+k-1) = 0, v(i+k) = 1; v(i+k+1:n) is stored on exit in */
+/* A(i+k+1:n,i), and tau in TAU(i). */
+
+/* The elements of the vectors v together form the (n-k+1)-by-nb matrix */
+/* V which is needed, with T and Y, to apply the transformation to the */
+/* unreduced part of the matrix, using an update of the form: */
+/* A := (I - V*T*V') * (A - Y*V'). */
+
+/* The contents of A on exit are illustrated by the following example */
+/* with n = 7, k = 3 and nb = 2: */
+
+/* ( a h a a a ) */
+/* ( a h a a a ) */
+/* ( a h a a a ) */
+/* ( h h a a a ) */
+/* ( v1 h a a a ) */
+/* ( v1 v2 a a a ) */
+/* ( v1 v2 a a a ) */
+
+/* where a denotes an element of the original matrix A, h denotes a */
+/* modified element of the upper Hessenberg matrix H, and vi denotes an */
+/* element of the vector defining H(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --tau;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ y_dim1 = *ldy;
+ y_offset = 1 + y_dim1;
+ y -= y_offset;
+
+ /* Function Body */
+ if (*n <= 1) {
+ return 0;
+ }
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (i__ > 1) {
+
+/* Update A(1:n,i) */
+
+/* Compute i-th column of A - Y * V' */
+
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &a[*k + i__ - 1 + a_dim1], lda);
+ i__2 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", n, &i__2, &z__1, &y[y_offset], ldy, &a[*k
+ + i__ - 1 + a_dim1], lda, &c_b2, &a[i__ * a_dim1 + 1], &
+ c__1);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &a[*k + i__ - 1 + a_dim1], lda);
+
+/* Apply I - V * T' * V' to this column (call it b) from the */
+/* left, using the last column of T as workspace */
+
+/* Let V = ( V1 ) and b = ( b1 ) (first I-1 rows) */
+/* ( V2 ) ( b2 ) */
+
+/* where V1 is unit lower triangular */
+
+/* w := V1' * b1 */
+
+ i__2 = i__ - 1;
+ zcopy_(&i__2, &a[*k + 1 + i__ * a_dim1], &c__1, &t[*nb * t_dim1 +
+ 1], &c__1);
+ i__2 = i__ - 1;
+ ztrmv_("Lower", "Conjugate transpose", "Unit", &i__2, &a[*k + 1 +
+ a_dim1], lda, &t[*nb * t_dim1 + 1], &c__1);
+
+/* w := w + V2'*b2 */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[*k + i__ +
+ a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b2, &
+ t[*nb * t_dim1 + 1], &c__1);
+
+/* w := T'*w */
+
+ i__2 = i__ - 1;
+ ztrmv_("Upper", "Conjugate transpose", "Non-unit", &i__2, &t[
+ t_offset], ldt, &t[*nb * t_dim1 + 1], &c__1);
+
+/* b2 := b2 - V2*w */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &a[*k + i__ + a_dim1],
+ lda, &t[*nb * t_dim1 + 1], &c__1, &c_b2, &a[*k + i__ +
+ i__ * a_dim1], &c__1);
+
+/* b1 := b1 - V1*w */
+
+ i__2 = i__ - 1;
+ ztrmv_("Lower", "No transpose", "Unit", &i__2, &a[*k + 1 + a_dim1]
+, lda, &t[*nb * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zaxpy_(&i__2, &z__1, &t[*nb * t_dim1 + 1], &c__1, &a[*k + 1 + i__
+ * a_dim1], &c__1);
+
+ i__2 = *k + i__ - 1 + (i__ - 1) * a_dim1;
+ a[i__2].r = ei.r, a[i__2].i = ei.i;
+ }
+
+/* Generate the elementary reflector H(i) to annihilate */
+/* A(k+i+1:n,i) */
+
+ i__2 = *k + i__ + i__ * a_dim1;
+ ei.r = a[i__2].r, ei.i = a[i__2].i;
+ i__2 = *n - *k - i__ + 1;
+/* Computing MIN */
+ i__3 = *k + i__ + 1;
+ zlarfg_(&i__2, &ei, &a[min(i__3, *n)+ i__ * a_dim1], &c__1, &tau[i__])
+ ;
+ i__2 = *k + i__ + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Compute Y(1:n,i) */
+
+ i__2 = *n - *k - i__ + 1;
+ zgemv_("No transpose", n, &i__2, &c_b2, &a[(i__ + 1) * a_dim1 + 1],
+ lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b1, &y[i__ *
+ y_dim1 + 1], &c__1);
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[*k + i__ +
+ a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b1, &t[
+ i__ * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", n, &i__2, &z__1, &y[y_offset], ldy, &t[i__ *
+ t_dim1 + 1], &c__1, &c_b2, &y[i__ * y_dim1 + 1], &c__1);
+ zscal_(n, &tau[i__], &y[i__ * y_dim1 + 1], &c__1);
+
+/* Compute T(1:i,i) */
+
+ i__2 = i__ - 1;
+ i__3 = i__;
+ z__1.r = -tau[i__3].r, z__1.i = -tau[i__3].i;
+ zscal_(&i__2, &z__1, &t[i__ * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ ztrmv_("Upper", "No transpose", "Non-unit", &i__2, &t[t_offset], ldt,
+ &t[i__ * t_dim1 + 1], &c__1)
+ ;
+ i__2 = i__ + i__ * t_dim1;
+ i__3 = i__;
+ t[i__2].r = tau[i__3].r, t[i__2].i = tau[i__3].i;
+
+/* L10: */
+ }
+ i__1 = *k + *nb + *nb * a_dim1;
+ a[i__1].r = ei.r, a[i__1].i = ei.i;
+
+ return 0;
+
+/* End of ZLAHRD */
+
+} /* zlahrd_ */
diff --git a/contrib/libs/clapack/zlaic1.c b/contrib/libs/clapack/zlaic1.c
new file mode 100644
index 0000000000..4312ffe974
--- /dev/null
+++ b/contrib/libs/clapack/zlaic1.c
@@ -0,0 +1,451 @@
+/* zlaic1.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zlaic1_(integer *job, integer *j, doublecomplex *x,
+ doublereal *sest, doublecomplex *w, doublecomplex *gamma, doublereal *
+ sestpr, doublecomplex *s, doublecomplex *c__)
+{
+ /* System generated locals */
+ doublereal d__1, d__2;
+ doublecomplex z__1, z__2, z__3, z__4, z__5, z__6;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *), z_sqrt(doublecomplex *,
+ doublecomplex *);
+ double sqrt(doublereal);
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ doublereal b, t, s1, s2, scl, eps, tmp;
+ doublecomplex sine;
+ doublereal test, zeta1, zeta2;
+ doublecomplex alpha;
+ doublereal norma;
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal absgam, absalp;
+ doublecomplex cosine;
+ doublereal absest;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAIC1 applies one step of incremental condition estimation in */
+/* its simplest version: */
+
+/* Let x, twonorm(x) = 1, be an approximate singular vector of an j-by-j */
+/* lower triangular matrix L, such that */
+/* twonorm(L*x) = sest */
+/* Then ZLAIC1 computes sestpr, s, c such that */
+/* the vector */
+/* [ s*x ] */
+/* xhat = [ c ] */
+/* is an approximate singular vector of */
+/* [ L 0 ] */
+/* Lhat = [ w' gamma ] */
+/* in the sense that */
+/* twonorm(Lhat*xhat) = sestpr. */
+
+/* Depending on JOB, an estimate for the largest or smallest singular */
+/* value is computed. */
+
+/* Note that [s c]' and sestpr**2 is an eigenpair of the system */
+
+/* diag(sest*sest, 0) + [alpha gamma] * [ conjg(alpha) ] */
+/* [ conjg(gamma) ] */
+
+/* where alpha = conjg(x)'*w. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) INTEGER */
+/* = 1: an estimate for the largest singular value is computed. */
+/* = 2: an estimate for the smallest singular value is computed. */
+
+/* J (input) INTEGER */
+/* Length of X and W */
+
+/* X (input) COMPLEX*16 array, dimension (J) */
+/* The j-vector x. */
+
+/* SEST (input) DOUBLE PRECISION */
+/* Estimated singular value of j by j matrix L */
+
+/* W (input) COMPLEX*16 array, dimension (J) */
+/* The j-vector w. */
+
+/* GAMMA (input) COMPLEX*16 */
+/* The diagonal element gamma. */
+
+/* SESTPR (output) DOUBLE PRECISION */
+/* Estimated singular value of (j+1) by (j+1) matrix Lhat. */
+
+/* S (output) COMPLEX*16 */
+/* Sine needed in forming xhat. */
+
+/* C (output) COMPLEX*16 */
+/* Cosine needed in forming xhat. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --w;
+ --x;
+
+ /* Function Body */
+ eps = dlamch_("Epsilon");
+ zdotc_(&z__1, j, &x[1], &c__1, &w[1], &c__1);
+ alpha.r = z__1.r, alpha.i = z__1.i;
+
+ absalp = z_abs(&alpha);
+ absgam = z_abs(gamma);
+ absest = abs(*sest);
+
+ if (*job == 1) {
+
+/* Estimating largest singular value */
+
+/* special cases */
+
+ if (*sest == 0.) {
+ s1 = max(absgam,absalp);
+ if (s1 == 0.) {
+ s->r = 0., s->i = 0.;
+ c__->r = 1., c__->i = 0.;
+ *sestpr = 0.;
+ } else {
+ z__1.r = alpha.r / s1, z__1.i = alpha.i / s1;
+ s->r = z__1.r, s->i = z__1.i;
+ z__1.r = gamma->r / s1, z__1.i = gamma->i / s1;
+ c__->r = z__1.r, c__->i = z__1.i;
+ d_cnjg(&z__4, s);
+ z__3.r = s->r * z__4.r - s->i * z__4.i, z__3.i = s->r *
+ z__4.i + s->i * z__4.r;
+ d_cnjg(&z__6, c__);
+ z__5.r = c__->r * z__6.r - c__->i * z__6.i, z__5.i = c__->r *
+ z__6.i + c__->i * z__6.r;
+ z__2.r = z__3.r + z__5.r, z__2.i = z__3.i + z__5.i;
+ z_sqrt(&z__1, &z__2);
+ tmp = z__1.r;
+ z__1.r = s->r / tmp, z__1.i = s->i / tmp;
+ s->r = z__1.r, s->i = z__1.i;
+ z__1.r = c__->r / tmp, z__1.i = c__->i / tmp;
+ c__->r = z__1.r, c__->i = z__1.i;
+ *sestpr = s1 * tmp;
+ }
+ return 0;
+ } else if (absgam <= eps * absest) {
+ s->r = 1., s->i = 0.;
+ c__->r = 0., c__->i = 0.;
+ tmp = max(absest,absalp);
+ s1 = absest / tmp;
+ s2 = absalp / tmp;
+ *sestpr = tmp * sqrt(s1 * s1 + s2 * s2);
+ return 0;
+ } else if (absalp <= eps * absest) {
+ s1 = absgam;
+ s2 = absest;
+ if (s1 <= s2) {
+ s->r = 1., s->i = 0.;
+ c__->r = 0., c__->i = 0.;
+ *sestpr = s2;
+ } else {
+ s->r = 0., s->i = 0.;
+ c__->r = 1., c__->i = 0.;
+ *sestpr = s1;
+ }
+ return 0;
+ } else if (absest <= eps * absalp || absest <= eps * absgam) {
+ s1 = absgam;
+ s2 = absalp;
+ if (s1 <= s2) {
+ tmp = s1 / s2;
+ scl = sqrt(tmp * tmp + 1.);
+ *sestpr = s2 * scl;
+ z__2.r = alpha.r / s2, z__2.i = alpha.i / s2;
+ z__1.r = z__2.r / scl, z__1.i = z__2.i / scl;
+ s->r = z__1.r, s->i = z__1.i;
+ z__2.r = gamma->r / s2, z__2.i = gamma->i / s2;
+ z__1.r = z__2.r / scl, z__1.i = z__2.i / scl;
+ c__->r = z__1.r, c__->i = z__1.i;
+ } else {
+ tmp = s2 / s1;
+ scl = sqrt(tmp * tmp + 1.);
+ *sestpr = s1 * scl;
+ z__2.r = alpha.r / s1, z__2.i = alpha.i / s1;
+ z__1.r = z__2.r / scl, z__1.i = z__2.i / scl;
+ s->r = z__1.r, s->i = z__1.i;
+ z__2.r = gamma->r / s1, z__2.i = gamma->i / s1;
+ z__1.r = z__2.r / scl, z__1.i = z__2.i / scl;
+ c__->r = z__1.r, c__->i = z__1.i;
+ }
+ return 0;
+ } else {
+
+/* normal case */
+
+ zeta1 = absalp / absest;
+ zeta2 = absgam / absest;
+
+ b = (1. - zeta1 * zeta1 - zeta2 * zeta2) * .5;
+ d__1 = zeta1 * zeta1;
+ c__->r = d__1, c__->i = 0.;
+ if (b > 0.) {
+ d__1 = b * b;
+ z__4.r = d__1 + c__->r, z__4.i = c__->i;
+ z_sqrt(&z__3, &z__4);
+ z__2.r = b + z__3.r, z__2.i = z__3.i;
+ z_div(&z__1, c__, &z__2);
+ t = z__1.r;
+ } else {
+ d__1 = b * b;
+ z__3.r = d__1 + c__->r, z__3.i = c__->i;
+ z_sqrt(&z__2, &z__3);
+ z__1.r = z__2.r - b, z__1.i = z__2.i;
+ t = z__1.r;
+ }
+
+ z__3.r = alpha.r / absest, z__3.i = alpha.i / absest;
+ z__2.r = -z__3.r, z__2.i = -z__3.i;
+ z__1.r = z__2.r / t, z__1.i = z__2.i / t;
+ sine.r = z__1.r, sine.i = z__1.i;
+ z__3.r = gamma->r / absest, z__3.i = gamma->i / absest;
+ z__2.r = -z__3.r, z__2.i = -z__3.i;
+ d__1 = t + 1.;
+ z__1.r = z__2.r / d__1, z__1.i = z__2.i / d__1;
+ cosine.r = z__1.r, cosine.i = z__1.i;
+ d_cnjg(&z__4, &sine);
+ z__3.r = sine.r * z__4.r - sine.i * z__4.i, z__3.i = sine.r *
+ z__4.i + sine.i * z__4.r;
+ d_cnjg(&z__6, &cosine);
+ z__5.r = cosine.r * z__6.r - cosine.i * z__6.i, z__5.i = cosine.r
+ * z__6.i + cosine.i * z__6.r;
+ z__2.r = z__3.r + z__5.r, z__2.i = z__3.i + z__5.i;
+ z_sqrt(&z__1, &z__2);
+ tmp = z__1.r;
+ z__1.r = sine.r / tmp, z__1.i = sine.i / tmp;
+ s->r = z__1.r, s->i = z__1.i;
+ z__1.r = cosine.r / tmp, z__1.i = cosine.i / tmp;
+ c__->r = z__1.r, c__->i = z__1.i;
+ *sestpr = sqrt(t + 1.) * absest;
+ return 0;
+ }
+
+ } else if (*job == 2) {
+
+/* Estimating smallest singular value */
+
+/* special cases */
+
+ if (*sest == 0.) {
+ *sestpr = 0.;
+ if (max(absgam,absalp) == 0.) {
+ sine.r = 1., sine.i = 0.;
+ cosine.r = 0., cosine.i = 0.;
+ } else {
+ d_cnjg(&z__2, gamma);
+ z__1.r = -z__2.r, z__1.i = -z__2.i;
+ sine.r = z__1.r, sine.i = z__1.i;
+ d_cnjg(&z__1, &alpha);
+ cosine.r = z__1.r, cosine.i = z__1.i;
+ }
+/* Computing MAX */
+ d__1 = z_abs(&sine), d__2 = z_abs(&cosine);
+ s1 = max(d__1,d__2);
+ z__1.r = sine.r / s1, z__1.i = sine.i / s1;
+ s->r = z__1.r, s->i = z__1.i;
+ z__1.r = cosine.r / s1, z__1.i = cosine.i / s1;
+ c__->r = z__1.r, c__->i = z__1.i;
+ d_cnjg(&z__4, s);
+ z__3.r = s->r * z__4.r - s->i * z__4.i, z__3.i = s->r * z__4.i +
+ s->i * z__4.r;
+ d_cnjg(&z__6, c__);
+ z__5.r = c__->r * z__6.r - c__->i * z__6.i, z__5.i = c__->r *
+ z__6.i + c__->i * z__6.r;
+ z__2.r = z__3.r + z__5.r, z__2.i = z__3.i + z__5.i;
+ z_sqrt(&z__1, &z__2);
+ tmp = z__1.r;
+ z__1.r = s->r / tmp, z__1.i = s->i / tmp;
+ s->r = z__1.r, s->i = z__1.i;
+ z__1.r = c__->r / tmp, z__1.i = c__->i / tmp;
+ c__->r = z__1.r, c__->i = z__1.i;
+ return 0;
+ } else if (absgam <= eps * absest) {
+ s->r = 0., s->i = 0.;
+ c__->r = 1., c__->i = 0.;
+ *sestpr = absgam;
+ return 0;
+ } else if (absalp <= eps * absest) {
+ s1 = absgam;
+ s2 = absest;
+ if (s1 <= s2) {
+ s->r = 0., s->i = 0.;
+ c__->r = 1., c__->i = 0.;
+ *sestpr = s1;
+ } else {
+ s->r = 1., s->i = 0.;
+ c__->r = 0., c__->i = 0.;
+ *sestpr = s2;
+ }
+ return 0;
+ } else if (absest <= eps * absalp || absest <= eps * absgam) {
+ s1 = absgam;
+ s2 = absalp;
+ if (s1 <= s2) {
+ tmp = s1 / s2;
+ scl = sqrt(tmp * tmp + 1.);
+ *sestpr = absest * (tmp / scl);
+ d_cnjg(&z__4, gamma);
+ z__3.r = z__4.r / s2, z__3.i = z__4.i / s2;
+ z__2.r = -z__3.r, z__2.i = -z__3.i;
+ z__1.r = z__2.r / scl, z__1.i = z__2.i / scl;
+ s->r = z__1.r, s->i = z__1.i;
+ d_cnjg(&z__3, &alpha);
+ z__2.r = z__3.r / s2, z__2.i = z__3.i / s2;
+ z__1.r = z__2.r / scl, z__1.i = z__2.i / scl;
+ c__->r = z__1.r, c__->i = z__1.i;
+ } else {
+ tmp = s2 / s1;
+ scl = sqrt(tmp * tmp + 1.);
+ *sestpr = absest / scl;
+ d_cnjg(&z__4, gamma);
+ z__3.r = z__4.r / s1, z__3.i = z__4.i / s1;
+ z__2.r = -z__3.r, z__2.i = -z__3.i;
+ z__1.r = z__2.r / scl, z__1.i = z__2.i / scl;
+ s->r = z__1.r, s->i = z__1.i;
+ d_cnjg(&z__3, &alpha);
+ z__2.r = z__3.r / s1, z__2.i = z__3.i / s1;
+ z__1.r = z__2.r / scl, z__1.i = z__2.i / scl;
+ c__->r = z__1.r, c__->i = z__1.i;
+ }
+ return 0;
+ } else {
+
+/* normal case */
+
+ zeta1 = absalp / absest;
+ zeta2 = absgam / absest;
+
+/* Computing MAX */
+ d__1 = zeta1 * zeta1 + 1. + zeta1 * zeta2, d__2 = zeta1 * zeta2 +
+ zeta2 * zeta2;
+ norma = max(d__1,d__2);
+
+/* See if root is closer to zero or to ONE */
+
+ test = (zeta1 - zeta2) * 2. * (zeta1 + zeta2) + 1.;
+ if (test >= 0.) {
+
+/* root is close to zero, compute directly */
+
+ b = (zeta1 * zeta1 + zeta2 * zeta2 + 1.) * .5;
+ d__1 = zeta2 * zeta2;
+ c__->r = d__1, c__->i = 0.;
+ d__2 = b * b;
+ z__2.r = d__2 - c__->r, z__2.i = -c__->i;
+ d__1 = b + sqrt(z_abs(&z__2));
+ z__1.r = c__->r / d__1, z__1.i = c__->i / d__1;
+ t = z__1.r;
+ z__2.r = alpha.r / absest, z__2.i = alpha.i / absest;
+ d__1 = 1. - t;
+ z__1.r = z__2.r / d__1, z__1.i = z__2.i / d__1;
+ sine.r = z__1.r, sine.i = z__1.i;
+ z__3.r = gamma->r / absest, z__3.i = gamma->i / absest;
+ z__2.r = -z__3.r, z__2.i = -z__3.i;
+ z__1.r = z__2.r / t, z__1.i = z__2.i / t;
+ cosine.r = z__1.r, cosine.i = z__1.i;
+ *sestpr = sqrt(t + eps * 4. * eps * norma) * absest;
+ } else {
+
+/* root is closer to ONE, shift by that amount */
+
+ b = (zeta2 * zeta2 + zeta1 * zeta1 - 1.) * .5;
+ d__1 = zeta1 * zeta1;
+ c__->r = d__1, c__->i = 0.;
+ if (b >= 0.) {
+ z__2.r = -c__->r, z__2.i = -c__->i;
+ d__1 = b * b;
+ z__5.r = d__1 + c__->r, z__5.i = c__->i;
+ z_sqrt(&z__4, &z__5);
+ z__3.r = b + z__4.r, z__3.i = z__4.i;
+ z_div(&z__1, &z__2, &z__3);
+ t = z__1.r;
+ } else {
+ d__1 = b * b;
+ z__3.r = d__1 + c__->r, z__3.i = c__->i;
+ z_sqrt(&z__2, &z__3);
+ z__1.r = b - z__2.r, z__1.i = -z__2.i;
+ t = z__1.r;
+ }
+ z__3.r = alpha.r / absest, z__3.i = alpha.i / absest;
+ z__2.r = -z__3.r, z__2.i = -z__3.i;
+ z__1.r = z__2.r / t, z__1.i = z__2.i / t;
+ sine.r = z__1.r, sine.i = z__1.i;
+ z__3.r = gamma->r / absest, z__3.i = gamma->i / absest;
+ z__2.r = -z__3.r, z__2.i = -z__3.i;
+ d__1 = t + 1.;
+ z__1.r = z__2.r / d__1, z__1.i = z__2.i / d__1;
+ cosine.r = z__1.r, cosine.i = z__1.i;
+ *sestpr = sqrt(t + 1. + eps * 4. * eps * norma) * absest;
+ }
+ d_cnjg(&z__4, &sine);
+ z__3.r = sine.r * z__4.r - sine.i * z__4.i, z__3.i = sine.r *
+ z__4.i + sine.i * z__4.r;
+ d_cnjg(&z__6, &cosine);
+ z__5.r = cosine.r * z__6.r - cosine.i * z__6.i, z__5.i = cosine.r
+ * z__6.i + cosine.i * z__6.r;
+ z__2.r = z__3.r + z__5.r, z__2.i = z__3.i + z__5.i;
+ z_sqrt(&z__1, &z__2);
+ tmp = z__1.r;
+ z__1.r = sine.r / tmp, z__1.i = sine.i / tmp;
+ s->r = z__1.r, s->i = z__1.i;
+ z__1.r = cosine.r / tmp, z__1.i = cosine.i / tmp;
+ c__->r = z__1.r, c__->i = z__1.i;
+ return 0;
+
+ }
+ }
+ return 0;
+
+/* End of ZLAIC1 */
+
+} /* zlaic1_ */
diff --git a/contrib/libs/clapack/zlals0.c b/contrib/libs/clapack/zlals0.c
new file mode 100644
index 0000000000..354ed21d14
--- /dev/null
+++ b/contrib/libs/clapack/zlals0.c
@@ -0,0 +1,563 @@
+/* zlals0.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b5 = -1.;
+static integer c__1 = 1;
+static doublereal c_b13 = 1.;
+static doublereal c_b15 = 0.;
+static integer c__0 = 0;
+
+/* Subroutine */ int zlals0_(integer *icompq, integer *nl, integer *nr,
+ integer *sqre, integer *nrhs, doublecomplex *b, integer *ldb,
+ doublecomplex *bx, integer *ldbx, integer *perm, integer *givptr,
+ integer *givcol, integer *ldgcol, doublereal *givnum, integer *ldgnum,
+ doublereal *poles, doublereal *difl, doublereal *difr, doublereal *
+ z__, integer *k, doublereal *c__, doublereal *s, doublereal *rwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, difr_dim1, difr_offset, givnum_dim1,
+ givnum_offset, poles_dim1, poles_offset, b_dim1, b_offset,
+ bx_dim1, bx_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, m, n;
+ doublereal dj;
+ integer nlp1, jcol;
+ doublereal temp;
+ integer jrow;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ doublereal diflj, difrj, dsigj;
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), zdrot_(integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *);
+ extern doublereal dlamc3_(doublereal *, doublereal *);
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), xerbla_(char *, integer *);
+ doublereal dsigjp;
+ extern /* Subroutine */ int zdscal_(integer *, doublereal *,
+ doublecomplex *, integer *), zlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublecomplex *
+, integer *, integer *), zlacpy_(char *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLALS0 applies back the multiplying factors of either the left or the */
+/* right singular vector matrix of a diagonal matrix appended by a row */
+/* to the right hand side matrix B in solving the least squares problem */
+/* using the divide-and-conquer SVD approach. */
+
+/* For the left singular vector matrix, three types of orthogonal */
+/* matrices are involved: */
+
+/* (1L) Givens rotations: the number of such rotations is GIVPTR; the */
+/* pairs of columns/rows they were applied to are stored in GIVCOL; */
+/* and the C- and S-values of these rotations are stored in GIVNUM. */
+
+/* (2L) Permutation. The (NL+1)-st row of B is to be moved to the first */
+/* row, and for J=2:N, PERM(J)-th row of B is to be moved to the */
+/* J-th row. */
+
+/* (3L) The left singular vector matrix of the remaining matrix. */
+
+/* For the right singular vector matrix, four types of orthogonal */
+/* matrices are involved: */
+
+/* (1R) The right singular vector matrix of the remaining matrix. */
+
+/* (2R) If SQRE = 1, one extra Givens rotation to generate the right */
+/* null space. */
+
+/* (3R) The inverse transformation of (2L). */
+
+/* (4R) The inverse transformation of (1L). */
+
+/* Arguments */
+/* ========= */
+
+/* ICOMPQ (input) INTEGER */
+/* Specifies whether singular vectors are to be computed in */
+/* factored form: */
+/* = 0: Left singular vector matrix. */
+/* = 1: Right singular vector matrix. */
+
+/* NL (input) INTEGER */
+/* The row dimension of the upper block. NL >= 1. */
+
+/* NR (input) INTEGER */
+/* The row dimension of the lower block. NR >= 1. */
+
+/* SQRE (input) INTEGER */
+/* = 0: the lower block is an NR-by-NR square matrix. */
+/* = 1: the lower block is an NR-by-(NR+1) rectangular matrix. */
+
+/* The bidiagonal matrix has row dimension N = NL + NR + 1, */
+/* and column dimension M = N + SQRE. */
+
+/* NRHS (input) INTEGER */
+/* The number of columns of B and BX. NRHS must be at least 1. */
+
+/* B (input/output) COMPLEX*16 array, dimension ( LDB, NRHS ) */
+/* On input, B contains the right hand sides of the least */
+/* squares problem in rows 1 through M. On output, B contains */
+/* the solution X in rows 1 through N. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B. LDB must be at least */
+/* max(1,MAX( M, N ) ). */
+
+/* BX (workspace) COMPLEX*16 array, dimension ( LDBX, NRHS ) */
+
+/* LDBX (input) INTEGER */
+/* The leading dimension of BX. */
+
+/* PERM (input) INTEGER array, dimension ( N ) */
+/* The permutations (from deflation and sorting) applied */
+/* to the two blocks. */
+
+/* GIVPTR (input) INTEGER */
+/* The number of Givens rotations which took place in this */
+/* subproblem. */
+
+/* GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 ) */
+/* Each pair of numbers indicates a pair of rows/columns */
+/* involved in a Givens rotation. */
+
+/* LDGCOL (input) INTEGER */
+/* The leading dimension of GIVCOL, must be at least N. */
+
+/* GIVNUM (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) */
+/* Each number indicates the C or S value used in the */
+/* corresponding Givens rotation. */
+
+/* LDGNUM (input) INTEGER */
+/* The leading dimension of arrays DIFR, POLES and */
+/* GIVNUM, must be at least K. */
+
+/* POLES (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) */
+/* On entry, POLES(1:K, 1) contains the new singular */
+/* values obtained from solving the secular equation, and */
+/* POLES(1:K, 2) is an array containing the poles in the secular */
+/* equation. */
+
+/* DIFL (input) DOUBLE PRECISION array, dimension ( K ). */
+/* On entry, DIFL(I) is the distance between I-th updated */
+/* (undeflated) singular value and the I-th (undeflated) old */
+/* singular value. */
+
+/* DIFR (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ). */
+/* On entry, DIFR(I, 1) contains the distances between I-th */
+/* updated (undeflated) singular value and the I+1-th */
+/* (undeflated) old singular value. And DIFR(I, 2) is the */
+/* normalizing factor for the I-th right singular vector. */
+
+/* Z (input) DOUBLE PRECISION array, dimension ( K ) */
+/* Contain the components of the deflation-adjusted updating row */
+/* vector. */
+
+/* K (input) INTEGER */
+/* Contains the dimension of the non-deflated matrix, */
+/* This is the order of the related secular equation. 1 <= K <=N. */
+
+/* C (input) DOUBLE PRECISION */
+/* C contains garbage if SQRE =0 and the C-value of a Givens */
+/* rotation related to the right null space if SQRE = 1. */
+
+/* S (input) DOUBLE PRECISION */
+/* S contains garbage if SQRE =0 and the S-value of a Givens */
+/* rotation related to the right null space if SQRE = 1. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension */
+/* ( K*(1+NRHS) + 2*NRHS ) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Ren-Cang Li, Computer Science Division, University of */
+/* California at Berkeley, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ bx_dim1 = *ldbx;
+ bx_offset = 1 + bx_dim1;
+ bx -= bx_offset;
+ --perm;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ difr_dim1 = *ldgnum;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ poles_dim1 = *ldgnum;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ givnum_dim1 = *ldgnum;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ --difl;
+ --z__;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*nl < 1) {
+ *info = -2;
+ } else if (*nr < 1) {
+ *info = -3;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -4;
+ }
+
+ n = *nl + *nr + 1;
+
+ if (*nrhs < 1) {
+ *info = -5;
+ } else if (*ldb < n) {
+ *info = -7;
+ } else if (*ldbx < n) {
+ *info = -9;
+ } else if (*givptr < 0) {
+ *info = -11;
+ } else if (*ldgcol < n) {
+ *info = -13;
+ } else if (*ldgnum < n) {
+ *info = -15;
+ } else if (*k < 1) {
+ *info = -20;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZLALS0", &i__1);
+ return 0;
+ }
+
+ m = n + *sqre;
+ nlp1 = *nl + 1;
+
+ if (*icompq == 0) {
+
+/* Apply back orthogonal transformations from the left. */
+
+/* Step (1L): apply back the Givens rotations performed. */
+
+ i__1 = *givptr;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ zdrot_(nrhs, &b[givcol[i__ + (givcol_dim1 << 1)] + b_dim1], ldb, &
+ b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[i__ +
+ (givnum_dim1 << 1)], &givnum[i__ + givnum_dim1]);
+/* L10: */
+ }
+
+/* Step (2L): permute rows of B. */
+
+ zcopy_(nrhs, &b[nlp1 + b_dim1], ldb, &bx[bx_dim1 + 1], ldbx);
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ zcopy_(nrhs, &b[perm[i__] + b_dim1], ldb, &bx[i__ + bx_dim1],
+ ldbx);
+/* L20: */
+ }
+
+/* Step (3L): apply the inverse of the left singular vector */
+/* matrix to BX. */
+
+ if (*k == 1) {
+ zcopy_(nrhs, &bx[bx_offset], ldbx, &b[b_offset], ldb);
+ if (z__[1] < 0.) {
+ zdscal_(nrhs, &c_b5, &b[b_offset], ldb);
+ }
+ } else {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ diflj = difl[j];
+ dj = poles[j + poles_dim1];
+ dsigj = -poles[j + (poles_dim1 << 1)];
+ if (j < *k) {
+ difrj = -difr[j + difr_dim1];
+ dsigjp = -poles[j + 1 + (poles_dim1 << 1)];
+ }
+ if (z__[j] == 0. || poles[j + (poles_dim1 << 1)] == 0.) {
+ rwork[j] = 0.;
+ } else {
+ rwork[j] = -poles[j + (poles_dim1 << 1)] * z__[j] / diflj
+ / (poles[j + (poles_dim1 << 1)] + dj);
+ }
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (z__[i__] == 0. || poles[i__ + (poles_dim1 << 1)] ==
+ 0.) {
+ rwork[i__] = 0.;
+ } else {
+ rwork[i__] = poles[i__ + (poles_dim1 << 1)] * z__[i__]
+ / (dlamc3_(&poles[i__ + (poles_dim1 << 1)], &
+ dsigj) - diflj) / (poles[i__ + (poles_dim1 <<
+ 1)] + dj);
+ }
+/* L30: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ if (z__[i__] == 0. || poles[i__ + (poles_dim1 << 1)] ==
+ 0.) {
+ rwork[i__] = 0.;
+ } else {
+ rwork[i__] = poles[i__ + (poles_dim1 << 1)] * z__[i__]
+ / (dlamc3_(&poles[i__ + (poles_dim1 << 1)], &
+ dsigjp) + difrj) / (poles[i__ + (poles_dim1 <<
+ 1)] + dj);
+ }
+/* L40: */
+ }
+ rwork[1] = -1.;
+ temp = dnrm2_(k, &rwork[1], &c__1);
+
+/* Since B and BX are complex, the following call to DGEMV */
+/* is performed in two steps (real and imaginary parts). */
+
+/* CALL DGEMV( 'T', K, NRHS, ONE, BX, LDBX, WORK, 1, ZERO, */
+/* $ B( J, 1 ), LDB ) */
+
+ i__ = *k + (*nrhs << 1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = *k;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++i__;
+ i__4 = jrow + jcol * bx_dim1;
+ rwork[i__] = bx[i__4].r;
+/* L50: */
+ }
+/* L60: */
+ }
+ dgemv_("T", k, nrhs, &c_b13, &rwork[*k + 1 + (*nrhs << 1)], k,
+ &rwork[1], &c__1, &c_b15, &rwork[*k + 1], &c__1);
+ i__ = *k + (*nrhs << 1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = *k;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++i__;
+ rwork[i__] = d_imag(&bx[jrow + jcol * bx_dim1]);
+/* L70: */
+ }
+/* L80: */
+ }
+ dgemv_("T", k, nrhs, &c_b13, &rwork[*k + 1 + (*nrhs << 1)], k,
+ &rwork[1], &c__1, &c_b15, &rwork[*k + 1 + *nrhs], &
+ c__1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = j + jcol * b_dim1;
+ i__4 = jcol + *k;
+ i__5 = jcol + *k + *nrhs;
+ z__1.r = rwork[i__4], z__1.i = rwork[i__5];
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L90: */
+ }
+ zlascl_("G", &c__0, &c__0, &temp, &c_b13, &c__1, nrhs, &b[j +
+ b_dim1], ldb, info);
+/* L100: */
+ }
+ }
+
+/* Move the deflated rows of BX to B also. */
+
+ if (*k < max(m,n)) {
+ i__1 = n - *k;
+ zlacpy_("A", &i__1, nrhs, &bx[*k + 1 + bx_dim1], ldbx, &b[*k + 1
+ + b_dim1], ldb);
+ }
+ } else {
+
+/* Apply back the right orthogonal transformations. */
+
+/* Step (1R): apply back the new right singular vector matrix */
+/* to B. */
+
+ if (*k == 1) {
+ zcopy_(nrhs, &b[b_offset], ldb, &bx[bx_offset], ldbx);
+ } else {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dsigj = poles[j + (poles_dim1 << 1)];
+ if (z__[j] == 0.) {
+ rwork[j] = 0.;
+ } else {
+ rwork[j] = -z__[j] / difl[j] / (dsigj + poles[j +
+ poles_dim1]) / difr[j + (difr_dim1 << 1)];
+ }
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (z__[j] == 0.) {
+ rwork[i__] = 0.;
+ } else {
+ d__1 = -poles[i__ + 1 + (poles_dim1 << 1)];
+ rwork[i__] = z__[j] / (dlamc3_(&dsigj, &d__1) - difr[
+ i__ + difr_dim1]) / (dsigj + poles[i__ +
+ poles_dim1]) / difr[i__ + (difr_dim1 << 1)];
+ }
+/* L110: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ if (z__[j] == 0.) {
+ rwork[i__] = 0.;
+ } else {
+ d__1 = -poles[i__ + (poles_dim1 << 1)];
+ rwork[i__] = z__[j] / (dlamc3_(&dsigj, &d__1) - difl[
+ i__]) / (dsigj + poles[i__ + poles_dim1]) /
+ difr[i__ + (difr_dim1 << 1)];
+ }
+/* L120: */
+ }
+
+/* Since B and BX are complex, the following call to DGEMV */
+/* is performed in two steps (real and imaginary parts). */
+
+/* CALL DGEMV( 'T', K, NRHS, ONE, B, LDB, WORK, 1, ZERO, */
+/* $ BX( J, 1 ), LDBX ) */
+
+ i__ = *k + (*nrhs << 1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = *k;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++i__;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[i__] = b[i__4].r;
+/* L130: */
+ }
+/* L140: */
+ }
+ dgemv_("T", k, nrhs, &c_b13, &rwork[*k + 1 + (*nrhs << 1)], k,
+ &rwork[1], &c__1, &c_b15, &rwork[*k + 1], &c__1);
+ i__ = *k + (*nrhs << 1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = *k;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++i__;
+ rwork[i__] = d_imag(&b[jrow + jcol * b_dim1]);
+/* L150: */
+ }
+/* L160: */
+ }
+ dgemv_("T", k, nrhs, &c_b13, &rwork[*k + 1 + (*nrhs << 1)], k,
+ &rwork[1], &c__1, &c_b15, &rwork[*k + 1 + *nrhs], &
+ c__1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = j + jcol * bx_dim1;
+ i__4 = jcol + *k;
+ i__5 = jcol + *k + *nrhs;
+ z__1.r = rwork[i__4], z__1.i = rwork[i__5];
+ bx[i__3].r = z__1.r, bx[i__3].i = z__1.i;
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+
+/* Step (2R): if SQRE = 1, apply back the rotation that is */
+/* related to the right null space of the subproblem. */
+
+ if (*sqre == 1) {
+ zcopy_(nrhs, &b[m + b_dim1], ldb, &bx[m + bx_dim1], ldbx);
+ zdrot_(nrhs, &bx[bx_dim1 + 1], ldbx, &bx[m + bx_dim1], ldbx, c__,
+ s);
+ }
+ if (*k < max(m,n)) {
+ i__1 = n - *k;
+ zlacpy_("A", &i__1, nrhs, &b[*k + 1 + b_dim1], ldb, &bx[*k + 1 +
+ bx_dim1], ldbx);
+ }
+
+/* Step (3R): permute rows of B. */
+
+ zcopy_(nrhs, &bx[bx_dim1 + 1], ldbx, &b[nlp1 + b_dim1], ldb);
+ if (*sqre == 1) {
+ zcopy_(nrhs, &bx[m + bx_dim1], ldbx, &b[m + b_dim1], ldb);
+ }
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ zcopy_(nrhs, &bx[i__ + bx_dim1], ldbx, &b[perm[i__] + b_dim1],
+ ldb);
+/* L190: */
+ }
+
+/* Step (4R): apply back the Givens rotations performed. */
+
+ for (i__ = *givptr; i__ >= 1; --i__) {
+ d__1 = -givnum[i__ + givnum_dim1];
+ zdrot_(nrhs, &b[givcol[i__ + (givcol_dim1 << 1)] + b_dim1], ldb, &
+ b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[i__ +
+ (givnum_dim1 << 1)], &d__1);
+/* L200: */
+ }
+ }
+
+ return 0;
+
+/* End of ZLALS0 */
+
+} /* zlals0_ */
diff --git a/contrib/libs/clapack/zlalsa.c b/contrib/libs/clapack/zlalsa.c
new file mode 100644
index 0000000000..9b29e00a5e
--- /dev/null
+++ b/contrib/libs/clapack/zlalsa.c
@@ -0,0 +1,664 @@
+/* zlalsa.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b9 = 1.;
+static doublereal c_b10 = 0.;
+static integer c__2 = 2;
+
+/* Subroutine */ int zlalsa_(integer *icompq, integer *smlsiz, integer *n,
+ integer *nrhs, doublecomplex *b, integer *ldb, doublecomplex *bx,
+ integer *ldbx, doublereal *u, integer *ldu, doublereal *vt, integer *
+ k, doublereal *difl, doublereal *difr, doublereal *z__, doublereal *
+ poles, integer *givptr, integer *givcol, integer *ldgcol, integer *
+ perm, doublereal *givnum, doublereal *c__, doublereal *s, doublereal *
+ rwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, perm_dim1, perm_offset, difl_dim1,
+ difl_offset, difr_dim1, difr_offset, givnum_dim1, givnum_offset,
+ poles_dim1, poles_offset, u_dim1, u_offset, vt_dim1, vt_offset,
+ z_dim1, z_offset, b_dim1, b_offset, bx_dim1, bx_offset, i__1,
+ i__2, i__3, i__4, i__5, i__6;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ integer i__, j, i1, ic, lf, nd, ll, nl, nr, im1, nlf, nrf, lvl, ndb1,
+ nlp1, lvl2, nrp1, jcol, nlvl, sqre, jrow, jimag;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ integer jreal, inode, ndiml, ndimr;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zlals0_(integer *, integer *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, integer *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *), dlasdt_(integer *, integer *, integer *
+, integer *, integer *, integer *, integer *), xerbla_(char *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLALSA is an itermediate step in solving the least squares problem */
+/* by computing the SVD of the coefficient matrix in compact form (The */
+/* singular vectors are computed as products of simple orthorgonal */
+/* matrices.). */
+
+/* If ICOMPQ = 0, ZLALSA applies the inverse of the left singular vector */
+/* matrix of an upper bidiagonal matrix to the right hand side; and if */
+/* ICOMPQ = 1, ZLALSA applies the right singular vector matrix to the */
+/* right hand side. The singular vector matrices were generated in */
+/* compact form by ZLALSA. */
+
+/* Arguments */
+/* ========= */
+
+/* ICOMPQ (input) INTEGER */
+/* Specifies whether the left or the right singular vector */
+/* matrix is involved. */
+/* = 0: Left singular vector matrix */
+/* = 1: Right singular vector matrix */
+
+/* SMLSIZ (input) INTEGER */
+/* The maximum size of the subproblems at the bottom of the */
+/* computation tree. */
+
+/* N (input) INTEGER */
+/* The row and column dimensions of the upper bidiagonal matrix. */
+
+/* NRHS (input) INTEGER */
+/* The number of columns of B and BX. NRHS must be at least 1. */
+
+/* B (input/output) COMPLEX*16 array, dimension ( LDB, NRHS ) */
+/* On input, B contains the right hand sides of the least */
+/* squares problem in rows 1 through M. */
+/* On output, B contains the solution X in rows 1 through N. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B in the calling subprogram. */
+/* LDB must be at least max(1,MAX( M, N ) ). */
+
+/* BX (output) COMPLEX*16 array, dimension ( LDBX, NRHS ) */
+/* On exit, the result of applying the left or right singular */
+/* vector matrix to B. */
+
+/* LDBX (input) INTEGER */
+/* The leading dimension of BX. */
+
+/* U (input) DOUBLE PRECISION array, dimension ( LDU, SMLSIZ ). */
+/* On entry, U contains the left singular vector matrices of all */
+/* subproblems at the bottom level. */
+
+/* LDU (input) INTEGER, LDU = > N. */
+/* The leading dimension of arrays U, VT, DIFL, DIFR, */
+/* POLES, GIVNUM, and Z. */
+
+/* VT (input) DOUBLE PRECISION array, dimension ( LDU, SMLSIZ+1 ). */
+/* On entry, VT' contains the right singular vector matrices of */
+/* all subproblems at the bottom level. */
+
+/* K (input) INTEGER array, dimension ( N ). */
+
+/* DIFL (input) DOUBLE PRECISION array, dimension ( LDU, NLVL ). */
+/* where NLVL = INT(log_2 (N/(SMLSIZ+1))) + 1. */
+
+/* DIFR (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ). */
+/* On entry, DIFL(*, I) and DIFR(*, 2 * I -1) record */
+/* distances between singular values on the I-th level and */
+/* singular values on the (I -1)-th level, and DIFR(*, 2 * I) */
+/* record the normalizing factors of the right singular vectors */
+/* matrices of subproblems on I-th level. */
+
+/* Z (input) DOUBLE PRECISION array, dimension ( LDU, NLVL ). */
+/* On entry, Z(1, I) contains the components of the deflation- */
+/* adjusted updating row vector for subproblems on the I-th */
+/* level. */
+
+/* POLES (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ). */
+/* On entry, POLES(*, 2 * I -1: 2 * I) contains the new and old */
+/* singular values involved in the secular equations on the I-th */
+/* level. */
+
+/* GIVPTR (input) INTEGER array, dimension ( N ). */
+/* On entry, GIVPTR( I ) records the number of Givens */
+/* rotations performed on the I-th problem on the computation */
+/* tree. */
+
+/* GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 * NLVL ). */
+/* On entry, for each I, GIVCOL(*, 2 * I - 1: 2 * I) records the */
+/* locations of Givens rotations performed on the I-th level on */
+/* the computation tree. */
+
+/* LDGCOL (input) INTEGER, LDGCOL = > N. */
+/* The leading dimension of arrays GIVCOL and PERM. */
+
+/* PERM (input) INTEGER array, dimension ( LDGCOL, NLVL ). */
+/* On entry, PERM(*, I) records permutations done on the I-th */
+/* level of the computation tree. */
+
+/* GIVNUM (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ). */
+/* On entry, GIVNUM(*, 2 *I -1 : 2 * I) records the C- and S- */
+/* values of Givens rotations performed on the I-th level on the */
+/* computation tree. */
+
+/* C (input) DOUBLE PRECISION array, dimension ( N ). */
+/* On entry, if the I-th subproblem is not square, */
+/* C( I ) contains the C-value of a Givens rotation related to */
+/* the right null space of the I-th subproblem. */
+
+/* S (input) DOUBLE PRECISION array, dimension ( N ). */
+/* On entry, if the I-th subproblem is not square, */
+/* S( I ) contains the S-value of a Givens rotation related to */
+/* the right null space of the I-th subproblem. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension at least */
+/* max ( N, (SMLSZ+1)*NRHS*3 ). */
+
+/* IWORK (workspace) INTEGER array. */
+/* The dimension must be at least 3 * N */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Ren-Cang Li, Computer Science Division, University of */
+/* California at Berkeley, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ bx_dim1 = *ldbx;
+ bx_offset = 1 + bx_dim1;
+ bx -= bx_offset;
+ givnum_dim1 = *ldu;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ poles_dim1 = *ldu;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ z_dim1 = *ldu;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ difr_dim1 = *ldu;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ difl_dim1 = *ldu;
+ difl_offset = 1 + difl_dim1;
+ difl -= difl_offset;
+ vt_dim1 = *ldu;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ --k;
+ --givptr;
+ perm_dim1 = *ldgcol;
+ perm_offset = 1 + perm_dim1;
+ perm -= perm_offset;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ --c__;
+ --s;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*smlsiz < 3) {
+ *info = -2;
+ } else if (*n < *smlsiz) {
+ *info = -3;
+ } else if (*nrhs < 1) {
+ *info = -4;
+ } else if (*ldb < *n) {
+ *info = -6;
+ } else if (*ldbx < *n) {
+ *info = -8;
+ } else if (*ldu < *n) {
+ *info = -10;
+ } else if (*ldgcol < *n) {
+ *info = -19;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZLALSA", &i__1);
+ return 0;
+ }
+
+/* Book-keeping and setting up the computation tree. */
+
+ inode = 1;
+ ndiml = inode + *n;
+ ndimr = ndiml + *n;
+
+ dlasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr],
+ smlsiz);
+
+/* The following code applies back the left singular vector factors. */
+/* For applying back the right singular vector factors, go to 170. */
+
+ if (*icompq == 1) {
+ goto L170;
+ }
+
+/* The nodes on the bottom level of the tree were solved */
+/* by DLASDQ. The corresponding left and right singular vector */
+/* matrices are in explicit form. First apply back the left */
+/* singular vector matrices. */
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+
+/* IC : center row of each node */
+/* NL : number of rows of left subproblem */
+/* NR : number of rows of right subproblem */
+/* NLF: starting row of the left subproblem */
+/* NRF: starting row of the right subproblem */
+
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nr = iwork[ndimr + i1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+
+/* Since B and BX are complex, the following call to DGEMM */
+/* is performed in two steps (real and imaginary parts). */
+
+/* CALL DGEMM( 'T', 'N', NL, NRHS, NL, ONE, U( NLF, 1 ), LDU, */
+/* $ B( NLF, 1 ), LDB, ZERO, BX( NLF, 1 ), LDBX ) */
+
+ j = nl * *nrhs << 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nl - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++j;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__4].r;
+/* L10: */
+ }
+/* L20: */
+ }
+ dgemm_("T", "N", &nl, nrhs, &nl, &c_b9, &u[nlf + u_dim1], ldu, &rwork[
+ (nl * *nrhs << 1) + 1], &nl, &c_b10, &rwork[1], &nl);
+ j = nl * *nrhs << 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nl - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++j;
+ rwork[j] = d_imag(&b[jrow + jcol * b_dim1]);
+/* L30: */
+ }
+/* L40: */
+ }
+ dgemm_("T", "N", &nl, nrhs, &nl, &c_b9, &u[nlf + u_dim1], ldu, &rwork[
+ (nl * *nrhs << 1) + 1], &nl, &c_b10, &rwork[nl * *nrhs + 1], &
+ nl);
+ jreal = 0;
+ jimag = nl * *nrhs;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nl - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * bx_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ z__1.r = rwork[i__5], z__1.i = rwork[i__6];
+ bx[i__4].r = z__1.r, bx[i__4].i = z__1.i;
+/* L50: */
+ }
+/* L60: */
+ }
+
+/* Since B and BX are complex, the following call to DGEMM */
+/* is performed in two steps (real and imaginary parts). */
+
+/* CALL DGEMM( 'T', 'N', NR, NRHS, NR, ONE, U( NRF, 1 ), LDU, */
+/* $ B( NRF, 1 ), LDB, ZERO, BX( NRF, 1 ), LDBX ) */
+
+ j = nr * *nrhs << 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nr - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++j;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__4].r;
+/* L70: */
+ }
+/* L80: */
+ }
+ dgemm_("T", "N", &nr, nrhs, &nr, &c_b9, &u[nrf + u_dim1], ldu, &rwork[
+ (nr * *nrhs << 1) + 1], &nr, &c_b10, &rwork[1], &nr);
+ j = nr * *nrhs << 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nr - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++j;
+ rwork[j] = d_imag(&b[jrow + jcol * b_dim1]);
+/* L90: */
+ }
+/* L100: */
+ }
+ dgemm_("T", "N", &nr, nrhs, &nr, &c_b9, &u[nrf + u_dim1], ldu, &rwork[
+ (nr * *nrhs << 1) + 1], &nr, &c_b10, &rwork[nr * *nrhs + 1], &
+ nr);
+ jreal = 0;
+ jimag = nr * *nrhs;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nr - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * bx_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ z__1.r = rwork[i__5], z__1.i = rwork[i__6];
+ bx[i__4].r = z__1.r, bx[i__4].i = z__1.i;
+/* L110: */
+ }
+/* L120: */
+ }
+
+/* L130: */
+ }
+
+/* Next copy the rows of B that correspond to unchanged rows */
+/* in the bidiagonal matrix to BX. */
+
+ i__1 = nd;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ic = iwork[inode + i__ - 1];
+ zcopy_(nrhs, &b[ic + b_dim1], ldb, &bx[ic + bx_dim1], ldbx);
+/* L140: */
+ }
+
+/* Finally go through the left singular vector matrices of all */
+/* the other subproblems bottom-up on the tree. */
+
+ j = pow_ii(&c__2, &nlvl);
+ sqre = 0;
+
+ for (lvl = nlvl; lvl >= 1; --lvl) {
+ lvl2 = (lvl << 1) - 1;
+
+/* find the first node LF and last node LL on */
+/* the current level LVL */
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__1 = lvl - 1;
+ lf = pow_ii(&c__2, &i__1);
+ ll = (lf << 1) - 1;
+ }
+ i__1 = ll;
+ for (i__ = lf; i__ <= i__1; ++i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ --j;
+ zlals0_(icompq, &nl, &nr, &sqre, nrhs, &bx[nlf + bx_dim1], ldbx, &
+ b[nlf + b_dim1], ldb, &perm[nlf + lvl * perm_dim1], &
+ givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &
+ givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 *
+ poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf +
+ lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[
+ j], &s[j], &rwork[1], info);
+/* L150: */
+ }
+/* L160: */
+ }
+ goto L330;
+
+/* ICOMPQ = 1: applying back the right singular vector factors. */
+
+L170:
+
+/* First now go through the right singular vector matrices of all */
+/* the tree nodes top-down. */
+
+ j = 0;
+ i__1 = nlvl;
+ for (lvl = 1; lvl <= i__1; ++lvl) {
+ lvl2 = (lvl << 1) - 1;
+
+/* Find the first node LF and last node LL on */
+/* the current level LVL. */
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__2 = lvl - 1;
+ lf = pow_ii(&c__2, &i__2);
+ ll = (lf << 1) - 1;
+ }
+ i__2 = lf;
+ for (i__ = ll; i__ >= i__2; --i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ if (i__ == ll) {
+ sqre = 0;
+ } else {
+ sqre = 1;
+ }
+ ++j;
+ zlals0_(icompq, &nl, &nr, &sqre, nrhs, &b[nlf + b_dim1], ldb, &bx[
+ nlf + bx_dim1], ldbx, &perm[nlf + lvl * perm_dim1], &
+ givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &
+ givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 *
+ poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf +
+ lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[
+ j], &s[j], &rwork[1], info);
+/* L180: */
+ }
+/* L190: */
+ }
+
+/* The nodes on the bottom level of the tree were solved */
+/* by DLASDQ. The corresponding right singular vector */
+/* matrices are in explicit form. Apply them back. */
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nr = iwork[ndimr + i1];
+ nlp1 = nl + 1;
+ if (i__ == nd) {
+ nrp1 = nr;
+ } else {
+ nrp1 = nr + 1;
+ }
+ nlf = ic - nl;
+ nrf = ic + 1;
+
+/* Since B and BX are complex, the following call to DGEMM is */
+/* performed in two steps (real and imaginary parts). */
+
+/* CALL DGEMM( 'T', 'N', NLP1, NRHS, NLP1, ONE, VT( NLF, 1 ), LDU, */
+/* $ B( NLF, 1 ), LDB, ZERO, BX( NLF, 1 ), LDBX ) */
+
+ j = nlp1 * *nrhs << 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nlp1 - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++j;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__4].r;
+/* L200: */
+ }
+/* L210: */
+ }
+ dgemm_("T", "N", &nlp1, nrhs, &nlp1, &c_b9, &vt[nlf + vt_dim1], ldu, &
+ rwork[(nlp1 * *nrhs << 1) + 1], &nlp1, &c_b10, &rwork[1], &
+ nlp1);
+ j = nlp1 * *nrhs << 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nlp1 - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++j;
+ rwork[j] = d_imag(&b[jrow + jcol * b_dim1]);
+/* L220: */
+ }
+/* L230: */
+ }
+ dgemm_("T", "N", &nlp1, nrhs, &nlp1, &c_b9, &vt[nlf + vt_dim1], ldu, &
+ rwork[(nlp1 * *nrhs << 1) + 1], &nlp1, &c_b10, &rwork[nlp1 * *
+ nrhs + 1], &nlp1);
+ jreal = 0;
+ jimag = nlp1 * *nrhs;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nlp1 - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * bx_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ z__1.r = rwork[i__5], z__1.i = rwork[i__6];
+ bx[i__4].r = z__1.r, bx[i__4].i = z__1.i;
+/* L240: */
+ }
+/* L250: */
+ }
+
+/* Since B and BX are complex, the following call to DGEMM is */
+/* performed in two steps (real and imaginary parts). */
+
+/* CALL DGEMM( 'T', 'N', NRP1, NRHS, NRP1, ONE, VT( NRF, 1 ), LDU, */
+/* $ B( NRF, 1 ), LDB, ZERO, BX( NRF, 1 ), LDBX ) */
+
+ j = nrp1 * *nrhs << 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nrp1 - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++j;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__4].r;
+/* L260: */
+ }
+/* L270: */
+ }
+ dgemm_("T", "N", &nrp1, nrhs, &nrp1, &c_b9, &vt[nrf + vt_dim1], ldu, &
+ rwork[(nrp1 * *nrhs << 1) + 1], &nrp1, &c_b10, &rwork[1], &
+ nrp1);
+ j = nrp1 * *nrhs << 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nrp1 - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++j;
+ rwork[j] = d_imag(&b[jrow + jcol * b_dim1]);
+/* L280: */
+ }
+/* L290: */
+ }
+ dgemm_("T", "N", &nrp1, nrhs, &nrp1, &c_b9, &vt[nrf + vt_dim1], ldu, &
+ rwork[(nrp1 * *nrhs << 1) + 1], &nrp1, &c_b10, &rwork[nrp1 * *
+ nrhs + 1], &nrp1);
+ jreal = 0;
+ jimag = nrp1 * *nrhs;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nrp1 - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * bx_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ z__1.r = rwork[i__5], z__1.i = rwork[i__6];
+ bx[i__4].r = z__1.r, bx[i__4].i = z__1.i;
+/* L300: */
+ }
+/* L310: */
+ }
+
+/* L320: */
+ }
+
+L330:
+
+ return 0;
+
+/* End of ZLALSA */
+
+} /* zlalsa_ */
diff --git a/contrib/libs/clapack/zlalsd.c b/contrib/libs/clapack/zlalsd.c
new file mode 100644
index 0000000000..3c37773b2e
--- /dev/null
+++ b/contrib/libs/clapack/zlalsd.c
@@ -0,0 +1,758 @@
+/* zlalsd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static integer c__1 = 1;
+static integer c__0 = 0;
+static doublereal c_b10 = 1.;
+static doublereal c_b35 = 0.;
+
+/* Subroutine */ int zlalsd_(char *uplo, integer *smlsiz, integer *n, integer
+ *nrhs, doublereal *d__, doublereal *e, doublecomplex *b, integer *ldb,
+ doublereal *rcond, integer *rank, doublecomplex *work, doublereal *
+ rwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *), log(doublereal), d_sign(doublereal *,
+ doublereal *);
+
+ /* Local variables */
+ integer c__, i__, j, k;
+ doublereal r__;
+ integer s, u, z__;
+ doublereal cs;
+ integer bx;
+ doublereal sn;
+ integer st, vt, nm1, st1;
+ doublereal eps;
+ integer iwk;
+ doublereal tol;
+ integer difl, difr;
+ doublereal rcnd;
+ integer jcol, irwb, perm, nsub, nlvl, sqre, bxst, jrow, irwu, jimag;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ integer jreal, irwib, poles, sizei, irwrb, nsize;
+ extern /* Subroutine */ int zdrot_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublereal *, doublereal *), zcopy_(
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *)
+ ;
+ integer irwvt, icmpq1, icmpq2;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dlasda_(integer *, integer *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ integer *), dlascl_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlasdq_(char *, integer *, integer *, integer
+ *, integer *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *), dlaset_(char *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *), dlartg_(doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *), xerbla_(char *, integer *);
+ integer givcol;
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ extern /* Subroutine */ int zlalsa_(integer *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *,
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *), zlascl_(char *, integer *,
+ integer *, doublereal *, doublereal *, integer *, integer *,
+ doublecomplex *, integer *, integer *), dlasrt_(char *,
+ integer *, doublereal *, integer *), zlacpy_(char *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *), zlaset_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *);
+ doublereal orgnrm;
+ integer givnum, givptr, nrwork, irwwrk, smlszp;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLALSD uses the singular value decomposition of A to solve the least */
+/* squares problem of finding X to minimize the Euclidean norm of each */
+/* column of A*X-B, where A is N-by-N upper bidiagonal, and X and B */
+/* are N-by-NRHS. The solution X overwrites B. */
+
+/* The singular values of A smaller than RCOND times the largest */
+/* singular value are treated as zero in solving the least squares */
+/* problem; in this case a minimum norm solution is returned. */
+/* The actual singular values are returned in D in ascending order. */
+
+/* This code makes very mild assumptions about floating point */
+/* arithmetic. It will work on machines with a guard digit in */
+/* add/subtract, or on those binary machines without guard digits */
+/* which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2. */
+/* It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': D and E define an upper bidiagonal matrix. */
+/* = 'L': D and E define a lower bidiagonal matrix. */
+
+/* SMLSIZ (input) INTEGER */
+/* The maximum size of the subproblems at the bottom of the */
+/* computation tree. */
+
+/* N (input) INTEGER */
+/* The dimension of the bidiagonal matrix. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of columns of B. NRHS must be at least 1. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry D contains the main diagonal of the bidiagonal */
+/* matrix. On exit, if INFO = 0, D contains its singular values. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (N-1) */
+/* Contains the super-diagonal entries of the bidiagonal matrix. */
+/* On exit, E has been destroyed. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On input, B contains the right hand sides of the least */
+/* squares problem. On output, B contains the solution X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of B in the calling subprogram. */
+/* LDB must be at least max(1,N). */
+
+/* RCOND (input) DOUBLE PRECISION */
+/* The singular values of A less than or equal to RCOND times */
+/* the largest singular value are treated as zero in solving */
+/* the least squares problem. If RCOND is negative, */
+/* machine precision is used instead. */
+/* For example, if diag(S)*X=B were the least squares problem, */
+/* where diag(S) is a diagonal matrix of singular values, the */
+/* solution would be X(i) = B(i) / S(i) if S(i) is greater than */
+/* RCOND*max(S), and X(i) = 0 if S(i) is less than or equal to */
+/* RCOND*max(S). */
+
+/* RANK (output) INTEGER */
+/* The number of singular values of A greater than RCOND times */
+/* the largest singular value. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension at least */
+/* (N * NRHS). */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension at least */
+/* (9*N + 2*N*SMLSIZ + 8*N*NLVL + 3*SMLSIZ*NRHS + (SMLSIZ+1)**2), */
+/* where */
+/* NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 ) */
+
+/* IWORK (workspace) INTEGER array, dimension at least */
+/* (3*N*NLVL + 11*N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: The algorithm failed to compute an singular value while */
+/* working on the submatrix lying in rows and columns */
+/* INFO/(N+1) through MOD(INFO,N+1). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Ming Gu and Ren-Cang Li, Computer Science Division, University of */
+/* California at Berkeley, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 1) {
+ *info = -4;
+ } else if (*ldb < 1 || *ldb < *n) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZLALSD", &i__1);
+ return 0;
+ }
+
+ eps = dlamch_("Epsilon");
+
+/* Set up the tolerance. */
+
+ if (*rcond <= 0. || *rcond >= 1.) {
+ rcnd = eps;
+ } else {
+ rcnd = *rcond;
+ }
+
+ *rank = 0;
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ } else if (*n == 1) {
+ if (d__[1] == 0.) {
+ zlaset_("A", &c__1, nrhs, &c_b1, &c_b1, &b[b_offset], ldb);
+ } else {
+ *rank = 1;
+ zlascl_("G", &c__0, &c__0, &d__[1], &c_b10, &c__1, nrhs, &b[
+ b_offset], ldb, info);
+ d__[1] = abs(d__[1]);
+ }
+ return 0;
+ }
+
+/* Rotate the matrix if it is lower bidiagonal. */
+
+ if (*(unsigned char *)uplo == 'L') {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (*nrhs == 1) {
+ zdrot_(&c__1, &b[i__ + b_dim1], &c__1, &b[i__ + 1 + b_dim1], &
+ c__1, &cs, &sn);
+ } else {
+ rwork[(i__ << 1) - 1] = cs;
+ rwork[i__ * 2] = sn;
+ }
+/* L10: */
+ }
+ if (*nrhs > 1) {
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *n - 1;
+ for (j = 1; j <= i__2; ++j) {
+ cs = rwork[(j << 1) - 1];
+ sn = rwork[j * 2];
+ zdrot_(&c__1, &b[j + i__ * b_dim1], &c__1, &b[j + 1 + i__
+ * b_dim1], &c__1, &cs, &sn);
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+ }
+
+/* Scale. */
+
+ nm1 = *n - 1;
+ orgnrm = dlanst_("M", n, &d__[1], &e[1]);
+ if (orgnrm == 0.) {
+ zlaset_("A", n, nrhs, &c_b1, &c_b1, &b[b_offset], ldb);
+ return 0;
+ }
+
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b10, n, &c__1, &d__[1], n, info);
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b10, &nm1, &c__1, &e[1], &nm1,
+ info);
+
+/* If N is smaller than the minimum divide size SMLSIZ, then solve */
+/* the problem with another solver. */
+
+ if (*n <= *smlsiz) {
+ irwu = 1;
+ irwvt = irwu + *n * *n;
+ irwwrk = irwvt + *n * *n;
+ irwrb = irwwrk;
+ irwib = irwrb + *n * *nrhs;
+ irwb = irwib + *n * *nrhs;
+ dlaset_("A", n, n, &c_b35, &c_b10, &rwork[irwu], n);
+ dlaset_("A", n, n, &c_b35, &c_b10, &rwork[irwvt], n);
+ dlasdq_("U", &c__0, n, n, n, &c__0, &d__[1], &e[1], &rwork[irwvt], n,
+ &rwork[irwu], n, &rwork[irwwrk], &c__1, &rwork[irwwrk], info);
+ if (*info != 0) {
+ return 0;
+ }
+
+/* In the real version, B is passed to DLASDQ and multiplied */
+/* internally by Q'. Here B is complex and that product is */
+/* computed below in two steps (real and imaginary parts). */
+
+ j = irwb - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++j;
+ i__3 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__3].r;
+/* L40: */
+ }
+/* L50: */
+ }
+ dgemm_("T", "N", n, nrhs, n, &c_b10, &rwork[irwu], n, &rwork[irwb], n,
+ &c_b35, &rwork[irwrb], n);
+ j = irwb - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++j;
+ rwork[j] = d_imag(&b[jrow + jcol * b_dim1]);
+/* L60: */
+ }
+/* L70: */
+ }
+ dgemm_("T", "N", n, nrhs, n, &c_b10, &rwork[irwu], n, &rwork[irwb], n,
+ &c_b35, &rwork[irwib], n);
+ jreal = irwrb - 1;
+ jimag = irwib - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__3 = jrow + jcol * b_dim1;
+ i__4 = jreal;
+ i__5 = jimag;
+ z__1.r = rwork[i__4], z__1.i = rwork[i__5];
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L80: */
+ }
+/* L90: */
+ }
+
+ tol = rcnd * (d__1 = d__[idamax_(n, &d__[1], &c__1)], abs(d__1));
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] <= tol) {
+ zlaset_("A", &c__1, nrhs, &c_b1, &c_b1, &b[i__ + b_dim1], ldb);
+ } else {
+ zlascl_("G", &c__0, &c__0, &d__[i__], &c_b10, &c__1, nrhs, &b[
+ i__ + b_dim1], ldb, info);
+ ++(*rank);
+ }
+/* L100: */
+ }
+
+/* Since B is complex, the following call to DGEMM is performed */
+/* in two steps (real and imaginary parts). That is for V * B */
+/* (in the real version of the code V' is stored in WORK). */
+
+/* CALL DGEMM( 'T', 'N', N, NRHS, N, ONE, WORK, N, B, LDB, ZERO, */
+/* $ WORK( NWORK ), N ) */
+
+ j = irwb - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++j;
+ i__3 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__3].r;
+/* L110: */
+ }
+/* L120: */
+ }
+ dgemm_("T", "N", n, nrhs, n, &c_b10, &rwork[irwvt], n, &rwork[irwb],
+ n, &c_b35, &rwork[irwrb], n);
+ j = irwb - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++j;
+ rwork[j] = d_imag(&b[jrow + jcol * b_dim1]);
+/* L130: */
+ }
+/* L140: */
+ }
+ dgemm_("T", "N", n, nrhs, n, &c_b10, &rwork[irwvt], n, &rwork[irwb],
+ n, &c_b35, &rwork[irwib], n);
+ jreal = irwrb - 1;
+ jimag = irwib - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__3 = jrow + jcol * b_dim1;
+ i__4 = jreal;
+ i__5 = jimag;
+ z__1.r = rwork[i__4], z__1.i = rwork[i__5];
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L150: */
+ }
+/* L160: */
+ }
+
+/* Unscale. */
+
+ dlascl_("G", &c__0, &c__0, &c_b10, &orgnrm, n, &c__1, &d__[1], n,
+ info);
+ dlasrt_("D", n, &d__[1], info);
+ zlascl_("G", &c__0, &c__0, &orgnrm, &c_b10, n, nrhs, &b[b_offset],
+ ldb, info);
+
+ return 0;
+ }
+
+/* Book-keeping and setting up some constants. */
+
+ nlvl = (integer) (log((doublereal) (*n) / (doublereal) (*smlsiz + 1)) /
+ log(2.)) + 1;
+
+ smlszp = *smlsiz + 1;
+
+ u = 1;
+ vt = *smlsiz * *n + 1;
+ difl = vt + smlszp * *n;
+ difr = difl + nlvl * *n;
+ z__ = difr + (nlvl * *n << 1);
+ c__ = z__ + nlvl * *n;
+ s = c__ + *n;
+ poles = s + *n;
+ givnum = poles + (nlvl << 1) * *n;
+ nrwork = givnum + (nlvl << 1) * *n;
+ bx = 1;
+
+ irwrb = nrwork;
+ irwib = irwrb + *smlsiz * *nrhs;
+ irwb = irwib + *smlsiz * *nrhs;
+
+ sizei = *n + 1;
+ k = sizei + *n;
+ givptr = k + *n;
+ perm = givptr + *n;
+ givcol = perm + nlvl * *n;
+ iwk = givcol + (nlvl * *n << 1);
+
+ st = 1;
+ sqre = 0;
+ icmpq1 = 1;
+ icmpq2 = 0;
+ nsub = 0;
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = d__[i__], abs(d__1)) < eps) {
+ d__[i__] = d_sign(&eps, &d__[i__]);
+ }
+/* L170: */
+ }
+
+ i__1 = nm1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = e[i__], abs(d__1)) < eps || i__ == nm1) {
+ ++nsub;
+ iwork[nsub] = st;
+
+/* Subproblem found. First determine its size and then */
+/* apply divide and conquer on it. */
+
+ if (i__ < nm1) {
+
+/* A subproblem with E(I) small for I < NM1. */
+
+ nsize = i__ - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ } else if ((d__1 = e[i__], abs(d__1)) >= eps) {
+
+/* A subproblem with E(NM1) not too small but I = NM1. */
+
+ nsize = *n - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ } else {
+
+/* A subproblem with E(NM1) small. This implies an */
+/* 1-by-1 subproblem at D(N), which is not solved */
+/* explicitly. */
+
+ nsize = i__ - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ ++nsub;
+ iwork[nsub] = *n;
+ iwork[sizei + nsub - 1] = 1;
+ zcopy_(nrhs, &b[*n + b_dim1], ldb, &work[bx + nm1], n);
+ }
+ st1 = st - 1;
+ if (nsize == 1) {
+
+/* This is a 1-by-1 subproblem and is not solved */
+/* explicitly. */
+
+ zcopy_(nrhs, &b[st + b_dim1], ldb, &work[bx + st1], n);
+ } else if (nsize <= *smlsiz) {
+
+/* This is a small subproblem and is solved by DLASDQ. */
+
+ dlaset_("A", &nsize, &nsize, &c_b35, &c_b10, &rwork[vt + st1],
+ n);
+ dlaset_("A", &nsize, &nsize, &c_b35, &c_b10, &rwork[u + st1],
+ n);
+ dlasdq_("U", &c__0, &nsize, &nsize, &nsize, &c__0, &d__[st], &
+ e[st], &rwork[vt + st1], n, &rwork[u + st1], n, &
+ rwork[nrwork], &c__1, &rwork[nrwork], info)
+ ;
+ if (*info != 0) {
+ return 0;
+ }
+
+/* In the real version, B is passed to DLASDQ and multiplied */
+/* internally by Q'. Here B is complex and that product is */
+/* computed below in two steps (real and imaginary parts). */
+
+ j = irwb - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = st + nsize - 1;
+ for (jrow = st; jrow <= i__3; ++jrow) {
+ ++j;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__4].r;
+/* L180: */
+ }
+/* L190: */
+ }
+ dgemm_("T", "N", &nsize, nrhs, &nsize, &c_b10, &rwork[u + st1]
+, n, &rwork[irwb], &nsize, &c_b35, &rwork[irwrb], &
+ nsize);
+ j = irwb - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = st + nsize - 1;
+ for (jrow = st; jrow <= i__3; ++jrow) {
+ ++j;
+ rwork[j] = d_imag(&b[jrow + jcol * b_dim1]);
+/* L200: */
+ }
+/* L210: */
+ }
+ dgemm_("T", "N", &nsize, nrhs, &nsize, &c_b10, &rwork[u + st1]
+, n, &rwork[irwb], &nsize, &c_b35, &rwork[irwib], &
+ nsize);
+ jreal = irwrb - 1;
+ jimag = irwib - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = st + nsize - 1;
+ for (jrow = st; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * b_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ z__1.r = rwork[i__5], z__1.i = rwork[i__6];
+ b[i__4].r = z__1.r, b[i__4].i = z__1.i;
+/* L220: */
+ }
+/* L230: */
+ }
+
+ zlacpy_("A", &nsize, nrhs, &b[st + b_dim1], ldb, &work[bx +
+ st1], n);
+ } else {
+
+/* A large problem. Solve it using divide and conquer. */
+
+ dlasda_(&icmpq1, smlsiz, &nsize, &sqre, &d__[st], &e[st], &
+ rwork[u + st1], n, &rwork[vt + st1], &iwork[k + st1],
+ &rwork[difl + st1], &rwork[difr + st1], &rwork[z__ +
+ st1], &rwork[poles + st1], &iwork[givptr + st1], &
+ iwork[givcol + st1], n, &iwork[perm + st1], &rwork[
+ givnum + st1], &rwork[c__ + st1], &rwork[s + st1], &
+ rwork[nrwork], &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ bxst = bx + st1;
+ zlalsa_(&icmpq2, smlsiz, &nsize, nrhs, &b[st + b_dim1], ldb, &
+ work[bxst], n, &rwork[u + st1], n, &rwork[vt + st1], &
+ iwork[k + st1], &rwork[difl + st1], &rwork[difr + st1]
+, &rwork[z__ + st1], &rwork[poles + st1], &iwork[
+ givptr + st1], &iwork[givcol + st1], n, &iwork[perm +
+ st1], &rwork[givnum + st1], &rwork[c__ + st1], &rwork[
+ s + st1], &rwork[nrwork], &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+ st = i__ + 1;
+ }
+/* L240: */
+ }
+
+/* Apply the singular values and treat the tiny ones as zero. */
+
+ tol = rcnd * (d__1 = d__[idamax_(n, &d__[1], &c__1)], abs(d__1));
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Some of the elements in D can be negative because 1-by-1 */
+/* subproblems were not solved explicitly. */
+
+ if ((d__1 = d__[i__], abs(d__1)) <= tol) {
+ zlaset_("A", &c__1, nrhs, &c_b1, &c_b1, &work[bx + i__ - 1], n);
+ } else {
+ ++(*rank);
+ zlascl_("G", &c__0, &c__0, &d__[i__], &c_b10, &c__1, nrhs, &work[
+ bx + i__ - 1], n, info);
+ }
+ d__[i__] = (d__1 = d__[i__], abs(d__1));
+/* L250: */
+ }
+
+/* Now apply back the right singular vectors. */
+
+ icmpq2 = 1;
+ i__1 = nsub;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ st = iwork[i__];
+ st1 = st - 1;
+ nsize = iwork[sizei + i__ - 1];
+ bxst = bx + st1;
+ if (nsize == 1) {
+ zcopy_(nrhs, &work[bxst], n, &b[st + b_dim1], ldb);
+ } else if (nsize <= *smlsiz) {
+
+/* Since B and BX are complex, the following call to DGEMM */
+/* is performed in two steps (real and imaginary parts). */
+
+/* CALL DGEMM( 'T', 'N', NSIZE, NRHS, NSIZE, ONE, */
+/* $ RWORK( VT+ST1 ), N, RWORK( BXST ), N, ZERO, */
+/* $ B( ST, 1 ), LDB ) */
+
+ j = bxst - *n - 1;
+ jreal = irwb - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ j += *n;
+ i__3 = nsize;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++jreal;
+ i__4 = j + jrow;
+ rwork[jreal] = work[i__4].r;
+/* L260: */
+ }
+/* L270: */
+ }
+ dgemm_("T", "N", &nsize, nrhs, &nsize, &c_b10, &rwork[vt + st1],
+ n, &rwork[irwb], &nsize, &c_b35, &rwork[irwrb], &nsize);
+ j = bxst - *n - 1;
+ jimag = irwb - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ j += *n;
+ i__3 = nsize;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++jimag;
+ rwork[jimag] = d_imag(&work[j + jrow]);
+/* L280: */
+ }
+/* L290: */
+ }
+ dgemm_("T", "N", &nsize, nrhs, &nsize, &c_b10, &rwork[vt + st1],
+ n, &rwork[irwb], &nsize, &c_b35, &rwork[irwib], &nsize);
+ jreal = irwrb - 1;
+ jimag = irwib - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = st + nsize - 1;
+ for (jrow = st; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * b_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ z__1.r = rwork[i__5], z__1.i = rwork[i__6];
+ b[i__4].r = z__1.r, b[i__4].i = z__1.i;
+/* L300: */
+ }
+/* L310: */
+ }
+ } else {
+ zlalsa_(&icmpq2, smlsiz, &nsize, nrhs, &work[bxst], n, &b[st +
+ b_dim1], ldb, &rwork[u + st1], n, &rwork[vt + st1], &
+ iwork[k + st1], &rwork[difl + st1], &rwork[difr + st1], &
+ rwork[z__ + st1], &rwork[poles + st1], &iwork[givptr +
+ st1], &iwork[givcol + st1], n, &iwork[perm + st1], &rwork[
+ givnum + st1], &rwork[c__ + st1], &rwork[s + st1], &rwork[
+ nrwork], &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+/* L320: */
+ }
+
+/* Unscale and sort the singular values. */
+
+ dlascl_("G", &c__0, &c__0, &c_b10, &orgnrm, n, &c__1, &d__[1], n, info);
+ dlasrt_("D", n, &d__[1], info);
+ zlascl_("G", &c__0, &c__0, &orgnrm, &c_b10, n, nrhs, &b[b_offset], ldb,
+ info);
+
+ return 0;
+
+/* End of ZLALSD */
+
+} /* zlalsd_ */
diff --git a/contrib/libs/clapack/zlangb.c b/contrib/libs/clapack/zlangb.c
new file mode 100644
index 0000000000..0f64ade284
--- /dev/null
+++ b/contrib/libs/clapack/zlangb.c
@@ -0,0 +1,224 @@
+/* zlangb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal zlangb_(char *norm, integer *n, integer *kl, integer *ku,
+ doublecomplex *ab, integer *ldab, doublereal *work)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ doublereal ret_val, d__1, d__2;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, l;
+ doublereal sum, scale;
+ extern logical lsame_(char *, char *);
+ doublereal value;
+ extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLANGB returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of an */
+/* n by n band matrix A, with kl sub-diagonals and ku super-diagonals. */
+
+/* Description */
+/* =========== */
+
+/* ZLANGB returns the value */
+
+/* ZLANGB = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in ZLANGB as described */
+/* above. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, ZLANGB is */
+/* set to zero. */
+
+/* KL (input) INTEGER */
+/* The number of sub-diagonals of the matrix A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of super-diagonals of the matrix A. KU >= 0. */
+
+/* AB (input) COMPLEX*16 array, dimension (LDAB,N) */
+/* The band matrix A, stored in rows 1 to KL+KU+1. The j-th */
+/* column of A is stored in the j-th column of the array AB as */
+/* follows: */
+/* AB(ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(n,j+kl). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KL+KU+1. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I'; otherwise, WORK is not */
+/* referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = *ku + 2 - j;
+/* Computing MIN */
+ i__4 = *n + *ku + 1 - j, i__5 = *kl + *ku + 1;
+ i__3 = min(i__4,i__5);
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&ab[i__ + j * ab_dim1]);
+ value = max(d__1,d__2);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.;
+/* Computing MAX */
+ i__3 = *ku + 2 - j;
+/* Computing MIN */
+ i__4 = *n + *ku + 1 - j, i__5 = *kl + *ku + 1;
+ i__2 = min(i__4,i__5);
+ for (i__ = max(i__3,1); i__ <= i__2; ++i__) {
+ sum += z_abs(&ab[i__ + j * ab_dim1]);
+/* L30: */
+ }
+ value = max(value,sum);
+/* L40: */
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L50: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ k = *ku + 1 - j;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *ku;
+/* Computing MIN */
+ i__5 = *n, i__6 = j + *kl;
+ i__4 = min(i__5,i__6);
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ work[i__] += z_abs(&ab[k + i__ + j * ab_dim1]);
+/* L60: */
+ }
+/* L70: */
+ }
+ value = 0.;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L80: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *ku;
+ l = max(i__4,i__2);
+ k = *ku + 1 - j + l;
+/* Computing MIN */
+ i__2 = *n, i__3 = j + *kl;
+ i__4 = min(i__2,i__3) - l + 1;
+ zlassq_(&i__4, &ab[k + j * ab_dim1], &c__1, &scale, &sum);
+/* L90: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of ZLANGB */
+
+} /* zlangb_ */
diff --git a/contrib/libs/clapack/zlange.c b/contrib/libs/clapack/zlange.c
new file mode 100644
index 0000000000..28d646558e
--- /dev/null
+++ b/contrib/libs/clapack/zlange.c
@@ -0,0 +1,199 @@
+/* zlange.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal zlange_(char *norm, integer *m, integer *n, doublecomplex *a,
+ integer *lda, doublereal *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal ret_val, d__1, d__2;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal sum, scale;
+ extern logical lsame_(char *, char *);
+ doublereal value;
+ extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLANGE returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* complex matrix A. */
+
+/* Description */
+/* =========== */
+
+/* ZLANGE returns the value */
+
+/* ZLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in ZLANGE as described */
+/* above. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. When M = 0, */
+/* ZLANGE is set to zero. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. When N = 0, */
+/* ZLANGE is set to zero. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The m by n matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(M,1). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= M when NORM = 'I'; otherwise, WORK is not */
+/* referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (min(*m,*n) == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * a_dim1]);
+ value = max(d__1,d__2);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += z_abs(&a[i__ + j * a_dim1]);
+/* L30: */
+ }
+ value = max(value,sum);
+/* L40: */
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L50: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += z_abs(&a[i__ + j * a_dim1]);
+/* L60: */
+ }
+/* L70: */
+ }
+ value = 0.;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L80: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ zlassq_(m, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L90: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of ZLANGE */
+
+} /* zlange_ */
diff --git a/contrib/libs/clapack/zlangt.c b/contrib/libs/clapack/zlangt.c
new file mode 100644
index 0000000000..02f244f30c
--- /dev/null
+++ b/contrib/libs/clapack/zlangt.c
@@ -0,0 +1,195 @@
+/* zlangt.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal zlangt_(char *norm, integer *n, doublecomplex *dl, doublecomplex *
+ d__, doublecomplex *du)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal ret_val, d__1, d__2;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ doublereal sum, scale;
+ extern logical lsame_(char *, char *);
+ doublereal anorm;
+ extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLANGT returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* complex tridiagonal matrix A. */
+
+/* Description */
+/* =========== */
+
+/* ZLANGT returns the value */
+
+/* ZLANGT = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in ZLANGT as described */
+/* above. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, ZLANGT is */
+/* set to zero. */
+
+/* DL (input) COMPLEX*16 array, dimension (N-1) */
+/* The (n-1) sub-diagonal elements of A. */
+
+/* D (input) COMPLEX*16 array, dimension (N) */
+/* The diagonal elements of A. */
+
+/* DU (input) COMPLEX*16 array, dimension (N-1) */
+/* The (n-1) super-diagonal elements of A. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --du;
+ --d__;
+ --dl;
+
+ /* Function Body */
+ if (*n <= 0) {
+ anorm = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ anorm = z_abs(&d__[*n]);
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = anorm, d__2 = z_abs(&dl[i__]);
+ anorm = max(d__1,d__2);
+/* Computing MAX */
+ d__1 = anorm, d__2 = z_abs(&d__[i__]);
+ anorm = max(d__1,d__2);
+/* Computing MAX */
+ d__1 = anorm, d__2 = z_abs(&du[i__]);
+ anorm = max(d__1,d__2);
+/* L10: */
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ if (*n == 1) {
+ anorm = z_abs(&d__[1]);
+ } else {
+/* Computing MAX */
+ d__1 = z_abs(&d__[1]) + z_abs(&dl[1]), d__2 = z_abs(&d__[*n]) +
+ z_abs(&du[*n - 1]);
+ anorm = max(d__1,d__2);
+ i__1 = *n - 1;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = anorm, d__2 = z_abs(&d__[i__]) + z_abs(&dl[i__]) +
+ z_abs(&du[i__ - 1]);
+ anorm = max(d__1,d__2);
+/* L20: */
+ }
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ if (*n == 1) {
+ anorm = z_abs(&d__[1]);
+ } else {
+/* Computing MAX */
+ d__1 = z_abs(&d__[1]) + z_abs(&du[1]), d__2 = z_abs(&d__[*n]) +
+ z_abs(&dl[*n - 1]);
+ anorm = max(d__1,d__2);
+ i__1 = *n - 1;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = anorm, d__2 = z_abs(&d__[i__]) + z_abs(&du[i__]) +
+ z_abs(&dl[i__ - 1]);
+ anorm = max(d__1,d__2);
+/* L30: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ zlassq_(n, &d__[1], &c__1, &scale, &sum);
+ if (*n > 1) {
+ i__1 = *n - 1;
+ zlassq_(&i__1, &dl[1], &c__1, &scale, &sum);
+ i__1 = *n - 1;
+ zlassq_(&i__1, &du[1], &c__1, &scale, &sum);
+ }
+ anorm = scale * sqrt(sum);
+ }
+
+ ret_val = anorm;
+ return ret_val;
+
+/* End of ZLANGT */
+
+} /* zlangt_ */
diff --git a/contrib/libs/clapack/zlanhb.c b/contrib/libs/clapack/zlanhb.c
new file mode 100644
index 0000000000..285565d5b0
--- /dev/null
+++ b/contrib/libs/clapack/zlanhb.c
@@ -0,0 +1,291 @@
+/* zlanhb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal zlanhb_(char *norm, char *uplo, integer *n, integer *k,
+ doublecomplex *ab, integer *ldab, doublereal *work)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+ doublereal ret_val, d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, l;
+ doublereal sum, absa, scale;
+ extern logical lsame_(char *, char *);
+ doublereal value;
+ extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLANHB returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of an */
+/* n by n hermitian band matrix A, with k super-diagonals. */
+
+/* Description */
+/* =========== */
+
+/* ZLANHB returns the value */
+
+/* ZLANHB = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in ZLANHB as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* band matrix A is supplied. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, ZLANHB is */
+/* set to zero. */
+
+/* K (input) INTEGER */
+/* The number of super-diagonals or sub-diagonals of the */
+/* band matrix A. K >= 0. */
+
+/* AB (input) COMPLEX*16 array, dimension (LDAB,N) */
+/* The upper or lower triangle of the hermitian band matrix A, */
+/* stored in the first K+1 rows of AB. The j-th column of A is */
+/* stored in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(k+1+i-j,j) = A(i,j) for max(1,j-k)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+k). */
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set and are assumed to be zero. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= K+1. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, */
+/* WORK is not referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = *k + 2 - j;
+ i__3 = *k;
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&ab[i__ + j * ab_dim1]);
+ value = max(d__1,d__2);
+/* L10: */
+ }
+/* Computing MAX */
+ i__3 = *k + 1 + j * ab_dim1;
+ d__2 = value, d__3 = (d__1 = ab[i__3].r, abs(d__1));
+ value = max(d__2,d__3);
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__3 = j * ab_dim1 + 1;
+ d__2 = value, d__3 = (d__1 = ab[i__3].r, abs(d__1));
+ value = max(d__2,d__3);
+/* Computing MIN */
+ i__2 = *n + 1 - j, i__4 = *k + 1;
+ i__3 = min(i__2,i__4);
+ for (i__ = 2; i__ <= i__3; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&ab[i__ + j * ab_dim1]);
+ value = max(d__1,d__2);
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
+
+/* Find normI(A) ( = norm1(A), since A is hermitian). */
+
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.;
+ l = *k + 1 - j;
+/* Computing MAX */
+ i__3 = 1, i__2 = j - *k;
+ i__4 = j - 1;
+ for (i__ = max(i__3,i__2); i__ <= i__4; ++i__) {
+ absa = z_abs(&ab[l + i__ + j * ab_dim1]);
+ sum += absa;
+ work[i__] += absa;
+/* L50: */
+ }
+ i__4 = *k + 1 + j * ab_dim1;
+ work[j] = sum + (d__1 = ab[i__4].r, abs(d__1));
+/* L60: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L80: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__4 = j * ab_dim1 + 1;
+ sum = work[j] + (d__1 = ab[i__4].r, abs(d__1));
+ l = 1 - j;
+/* Computing MIN */
+ i__3 = *n, i__2 = j + *k;
+ i__4 = min(i__3,i__2);
+ for (i__ = j + 1; i__ <= i__4; ++i__) {
+ absa = z_abs(&ab[l + i__ + j * ab_dim1]);
+ sum += absa;
+ work[i__] += absa;
+/* L90: */
+ }
+ value = max(value,sum);
+/* L100: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ if (*k > 0) {
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = j - 1;
+ i__4 = min(i__3,*k);
+/* Computing MAX */
+ i__2 = *k + 2 - j;
+ zlassq_(&i__4, &ab[max(i__2, 1)+ j * ab_dim1], &c__1, &
+ scale, &sum);
+/* L110: */
+ }
+ l = *k + 1;
+ } else {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n - j;
+ i__4 = min(i__3,*k);
+ zlassq_(&i__4, &ab[j * ab_dim1 + 2], &c__1, &scale, &sum);
+/* L120: */
+ }
+ l = 1;
+ }
+ sum *= 2;
+ } else {
+ l = 1;
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__4 = l + j * ab_dim1;
+ if (ab[i__4].r != 0.) {
+ i__4 = l + j * ab_dim1;
+ absa = (d__1 = ab[i__4].r, abs(d__1));
+ if (scale < absa) {
+/* Computing 2nd power */
+ d__1 = scale / absa;
+ sum = sum * (d__1 * d__1) + 1.;
+ scale = absa;
+ } else {
+/* Computing 2nd power */
+ d__1 = absa / scale;
+ sum += d__1 * d__1;
+ }
+ }
+/* L130: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of ZLANHB */
+
+} /* zlanhb_ */
diff --git a/contrib/libs/clapack/zlanhe.c b/contrib/libs/clapack/zlanhe.c
new file mode 100644
index 0000000000..1a6f9ff633
--- /dev/null
+++ b/contrib/libs/clapack/zlanhe.c
@@ -0,0 +1,265 @@
+/* zlanhe.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal zlanhe_(char *norm, char *uplo, integer *n, doublecomplex *a,
+ integer *lda, doublereal *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal ret_val, d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal sum, absa, scale;
+ extern logical lsame_(char *, char *);
+ doublereal value;
+ extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLANHE returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* complex hermitian matrix A. */
+
+/* Description */
+/* =========== */
+
+/* ZLANHE returns the value */
+
+/* ZLANHE = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in ZLANHE as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* hermitian matrix A is to be referenced. */
+/* = 'U': Upper triangular part of A is referenced */
+/* = 'L': Lower triangular part of A is referenced */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, ZLANHE is */
+/* set to zero. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The hermitian matrix A. If UPLO = 'U', the leading n by n */
+/* upper triangular part of A contains the upper triangular part */
+/* of the matrix A, and the strictly lower triangular part of A */
+/* is not referenced. If UPLO = 'L', the leading n by n lower */
+/* triangular part of A contains the lower triangular part of */
+/* the matrix A, and the strictly upper triangular part of A is */
+/* not referenced. Note that the imaginary parts of the diagonal */
+/* elements need not be set and are assumed to be zero. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(N,1). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, */
+/* WORK is not referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * a_dim1]);
+ value = max(d__1,d__2);
+/* L10: */
+ }
+/* Computing MAX */
+ i__2 = j + j * a_dim1;
+ d__2 = value, d__3 = (d__1 = a[i__2].r, abs(d__1));
+ value = max(d__2,d__3);
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j + j * a_dim1;
+ d__2 = value, d__3 = (d__1 = a[i__2].r, abs(d__1));
+ value = max(d__2,d__3);
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * a_dim1]);
+ value = max(d__1,d__2);
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
+
+/* Find normI(A) ( = norm1(A), since A is hermitian). */
+
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ absa = z_abs(&a[i__ + j * a_dim1]);
+ sum += absa;
+ work[i__] += absa;
+/* L50: */
+ }
+ i__2 = j + j * a_dim1;
+ work[j] = sum + (d__1 = a[i__2].r, abs(d__1));
+/* L60: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L80: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + j * a_dim1;
+ sum = work[j] + (d__1 = a[i__2].r, abs(d__1));
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ absa = z_abs(&a[i__ + j * a_dim1]);
+ sum += absa;
+ work[i__] += absa;
+/* L90: */
+ }
+ value = max(value,sum);
+/* L100: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ zlassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L110: */
+ }
+ } else {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ zlassq_(&i__2, &a[j + 1 + j * a_dim1], &c__1, &scale, &sum);
+/* L120: */
+ }
+ }
+ sum *= 2;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ if (a[i__2].r != 0.) {
+ i__2 = i__ + i__ * a_dim1;
+ absa = (d__1 = a[i__2].r, abs(d__1));
+ if (scale < absa) {
+/* Computing 2nd power */
+ d__1 = scale / absa;
+ sum = sum * (d__1 * d__1) + 1.;
+ scale = absa;
+ } else {
+/* Computing 2nd power */
+ d__1 = absa / scale;
+ sum += d__1 * d__1;
+ }
+ }
+/* L130: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of ZLANHE */
+
+} /* zlanhe_ */
diff --git a/contrib/libs/clapack/zlanhf.c b/contrib/libs/clapack/zlanhf.c
new file mode 100644
index 0000000000..c78146df8e
--- /dev/null
+++ b/contrib/libs/clapack/zlanhf.c
@@ -0,0 +1,1803 @@
+/* zlanhf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal zlanhf_(char *norm, char *transr, char *uplo, integer *n,
+ doublecomplex *a, doublereal *work)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal ret_val, d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, l;
+ doublereal s;
+ integer n1;
+ doublereal aa;
+ integer lda, ifm, noe, ilu;
+ doublereal scale;
+ extern logical lsame_(char *, char *);
+ doublereal value;
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLANHF returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* complex Hermitian matrix A in RFP format. */
+
+/* Description */
+/* =========== */
+
+/* ZLANHF returns the value */
+
+/* ZLANHF = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER */
+/* Specifies the value to be returned in ZLANHF as described */
+/* above. */
+
+/* TRANSR (input) CHARACTER */
+/* Specifies whether the RFP format of A is normal or */
+/* conjugate-transposed format. */
+/* = 'N': RFP format is Normal */
+/* = 'C': RFP format is Conjugate-transposed */
+
+/* UPLO (input) CHARACTER */
+/* On entry, UPLO specifies whether the RFP matrix A came from */
+/* an upper or lower triangular matrix as follows: */
+
+/* UPLO = 'U' or 'u' RFP A came from an upper triangular */
+/* matrix */
+
+/* UPLO = 'L' or 'l' RFP A came from a lower triangular */
+/* matrix */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, ZLANHF is */
+/* set to zero. */
+
+/* A (input) COMPLEX*16 array, dimension ( N*(N+1)/2 ); */
+/* On entry, the matrix A in RFP Format. */
+/* RFP Format is described by TRANSR, UPLO and N as follows: */
+/* If TRANSR='N' then RFP A is (0:N,0:K-1) when N is even; */
+/* K=N/2. RFP A is (0:N-1,0:K) when N is odd; K=N/2. If */
+/* TRANSR = 'C' then RFP is the Conjugate-transpose of RFP A */
+/* as defined when TRANSR = 'N'. The contents of RFP A are */
+/* defined by UPLO as follows: If UPLO = 'U' the RFP A */
+/* contains the ( N*(N+1)/2 ) elements of upper packed A */
+/* either in normal or conjugate-transpose Format. If */
+/* UPLO = 'L' the RFP A contains the ( N*(N+1) /2 ) elements */
+/* of lower packed A either in normal or conjugate-transpose */
+/* Format. The LDA of RFP A is (N+1)/2 when TRANSR = 'C'. When */
+/* TRANSR is 'N' the LDA is N+1 when N is even and is N when */
+/* is odd. See the Note below for more details. */
+/* Unchanged on exit. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (LWORK), */
+/* where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, */
+/* WORK is not referenced. */
+
+/* Note: */
+/* ===== */
+
+/* We first consider Standard Packed Format when N is even. */
+/* We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* conjugate-transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* conjugate-transpose of the last three columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- -- */
+/* 03 04 05 33 43 53 */
+/* -- -- */
+/* 13 14 15 00 44 54 */
+/* -- */
+/* 23 24 25 10 11 55 */
+
+/* 33 34 35 20 21 22 */
+/* -- */
+/* 00 44 45 30 31 32 */
+/* -- -- */
+/* 01 11 55 40 41 42 */
+/* -- -- -- */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We next consider Standard Packed Format when N is odd. */
+/* We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* conjugate-transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* conjugate-transpose of the last two columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- */
+/* 02 03 04 00 33 43 */
+/* -- */
+/* 12 13 14 10 11 44 */
+
+/* 22 23 24 20 21 22 */
+/* -- */
+/* 00 33 34 30 31 32 */
+/* -- -- */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- */
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ if (*n == 0) {
+ ret_val = 0.;
+ return ret_val;
+ }
+
+/* set noe = 1 if n is odd. if n is even set noe=0 */
+
+ noe = 1;
+ if (*n % 2 == 0) {
+ noe = 0;
+ }
+
+/* set ifm = 0 when form='C' or 'c' and 1 otherwise */
+
+ ifm = 1;
+ if (lsame_(transr, "C")) {
+ ifm = 0;
+ }
+
+/* set ilu = 0 when uplo='U or 'u' and 1 otherwise */
+
+ ilu = 1;
+ if (lsame_(uplo, "U")) {
+ ilu = 0;
+ }
+
+/* set lda = (n+1)/2 when ifm = 0 */
+/* set lda = n when ifm = 1 and noe = 1 */
+/* set lda = n+1 when ifm = 1 and noe = 0 */
+
+ if (ifm == 1) {
+ if (noe == 1) {
+ lda = *n;
+ } else {
+/* noe=0 */
+ lda = *n + 1;
+ }
+ } else {
+/* ifm=0 */
+ lda = (*n + 1) / 2;
+ }
+
+ if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ k = (*n + 1) / 2;
+ value = 0.;
+ if (noe == 1) {
+/* n is odd & n = k + k - 1 */
+ if (ifm == 1) {
+/* A is n by k */
+ if (ilu == 1) {
+/* uplo ='L' */
+ j = 0;
+/* -> L(0,0) */
+/* Computing MAX */
+ i__1 = j + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__1].r, abs(d__1));
+ value = max(d__2,d__3);
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 2;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ i__ = j - 1;
+/* L(k+j,k+j) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__2].r, abs(d__1));
+ value = max(d__2,d__3);
+ i__ = j;
+/* -> L(j,j) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__2].r, abs(d__1));
+ value = max(d__2,d__3);
+ i__2 = *n - 1;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ }
+ } else {
+/* uplo = 'U' */
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k + j - 2;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ i__ = k + j - 1;
+/* -> U(i,i) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__2].r, abs(d__1));
+ value = max(d__2,d__3);
+ ++i__;
+/* =k+j; i -> U(j,j) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__2].r, abs(d__1));
+ value = max(d__2,d__3);
+ i__2 = *n - 1;
+ for (i__ = k + j + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ }
+ i__1 = *n - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+/* j=k-1 */
+ }
+/* i=n-1 -> U(n-1,n-1) */
+/* Computing MAX */
+ i__1 = i__ + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__1].r, abs(d__1));
+ value = max(d__2,d__3);
+ }
+ } else {
+/* xpose case; A is k by n */
+ if (ilu == 1) {
+/* uplo ='L' */
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ i__ = j;
+/* L(i,i) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__2].r, abs(d__1));
+ value = max(d__2,d__3);
+ i__ = j + 1;
+/* L(j+k,j+k) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__2].r, abs(d__1));
+ value = max(d__2,d__3);
+ i__2 = k - 1;
+ for (i__ = j + 2; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ }
+ j = k - 1;
+ i__1 = k - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ i__ = k - 1;
+/* -> L(i,i) is at A(i,j) */
+/* Computing MAX */
+ i__1 = i__ + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__1].r, abs(d__1));
+ value = max(d__2,d__3);
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ }
+ } else {
+/* uplo = 'U' */
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ }
+ j = k - 1;
+/* -> U(j,j) is at A(0,j) */
+/* Computing MAX */
+ i__1 = j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__1].r, abs(d__1));
+ value = max(d__2,d__3);
+ i__1 = k - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+ i__2 = j - k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ i__ = j - k;
+/* -> U(i,i) at A(i,j) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__2].r, abs(d__1));
+ value = max(d__2,d__3);
+ i__ = j - k + 1;
+/* U(j,j) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__2].r, abs(d__1));
+ value = max(d__2,d__3);
+ i__2 = k - 1;
+ for (i__ = j - k + 2; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ }
+ }
+ }
+ } else {
+/* n is even & k = n/2 */
+ if (ifm == 1) {
+/* A is n+1 by k */
+ if (ilu == 1) {
+/* uplo ='L' */
+ j = 0;
+/* -> L(k,k) & j=1 -> L(0,0) */
+/* Computing MAX */
+ i__1 = j + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__1].r, abs(d__1));
+ value = max(d__2,d__3);
+/* Computing MAX */
+ i__1 = j + 1 + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__1].r, abs(d__1));
+ value = max(d__2,d__3);
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ i__ = j;
+/* L(k+j,k+j) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__2].r, abs(d__1));
+ value = max(d__2,d__3);
+ i__ = j + 1;
+/* -> L(j,j) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__2].r, abs(d__1));
+ value = max(d__2,d__3);
+ i__2 = *n;
+ for (i__ = j + 2; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ }
+ } else {
+/* uplo = 'U' */
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k + j - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ i__ = k + j;
+/* -> U(i,i) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__2].r, abs(d__1));
+ value = max(d__2,d__3);
+ ++i__;
+/* =k+j+1; i -> U(j,j) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__2].r, abs(d__1));
+ value = max(d__2,d__3);
+ i__2 = *n;
+ for (i__ = k + j + 2; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ }
+ i__1 = *n - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+/* j=k-1 */
+ }
+/* i=n-1 -> U(n-1,n-1) */
+/* Computing MAX */
+ i__1 = i__ + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__1].r, abs(d__1));
+ value = max(d__2,d__3);
+ i__ = *n;
+/* -> U(k-1,k-1) */
+/* Computing MAX */
+ i__1 = i__ + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__1].r, abs(d__1));
+ value = max(d__2,d__3);
+ }
+ } else {
+/* xpose case; A is k by n+1 */
+ if (ilu == 1) {
+/* uplo ='L' */
+ j = 0;
+/* -> L(k,k) at A(0,0) */
+/* Computing MAX */
+ i__1 = j + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__1].r, abs(d__1));
+ value = max(d__2,d__3);
+ i__1 = k - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 2;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ i__ = j - 1;
+/* L(i,i) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__2].r, abs(d__1));
+ value = max(d__2,d__3);
+ i__ = j;
+/* L(j+k,j+k) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__2].r, abs(d__1));
+ value = max(d__2,d__3);
+ i__2 = k - 1;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ }
+ j = k;
+ i__1 = k - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ i__ = k - 1;
+/* -> L(i,i) is at A(i,j) */
+/* Computing MAX */
+ i__1 = i__ + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__1].r, abs(d__1));
+ value = max(d__2,d__3);
+ i__1 = *n;
+ for (j = k + 1; j <= i__1; ++j) {
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ }
+ } else {
+/* uplo = 'U' */
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ }
+ j = k;
+/* -> U(j,j) is at A(0,j) */
+/* Computing MAX */
+ i__1 = j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__1].r, abs(d__1));
+ value = max(d__2,d__3);
+ i__1 = k - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ i__1 = *n - 1;
+ for (j = k + 1; j <= i__1; ++j) {
+ i__2 = j - k - 2;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ i__ = j - k - 1;
+/* -> U(i,i) at A(i,j) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__2].r, abs(d__1));
+ value = max(d__2,d__3);
+ i__ = j - k;
+/* U(j,j) */
+/* Computing MAX */
+ i__2 = i__ + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__2].r, abs(d__1));
+ value = max(d__2,d__3);
+ i__2 = k - 1;
+ for (i__ = j - k + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ }
+ j = *n;
+ i__1 = k - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * lda]);
+ value = max(d__1,d__2);
+ }
+ i__ = k - 1;
+/* U(k,k) at A(i,j) */
+/* Computing MAX */
+ i__1 = i__ + j * lda;
+ d__2 = value, d__3 = (d__1 = a[i__1].r, abs(d__1));
+ value = max(d__2,d__3);
+ }
+ }
+ }
+ } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
+
+/* Find normI(A) ( = norm1(A), since A is Hermitian). */
+
+ if (ifm == 1) {
+/* A is 'N' */
+ k = *n / 2;
+ if (noe == 1) {
+/* n is odd & A is n by (n+1)/2 */
+ if (ilu == 0) {
+/* uplo = 'U' */
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+ }
+ i__1 = k;
+ for (j = 0; j <= i__1; ++j) {
+ s = 0.;
+ i__2 = k + j - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = z_abs(&a[i__ + j * lda]);
+/* -> A(i,j+k) */
+ s += aa;
+ work[i__] += aa;
+ }
+ i__2 = i__ + j * lda;
+ aa = (d__1 = a[i__2].r, abs(d__1));
+/* -> A(j+k,j+k) */
+ work[j + k] = s + aa;
+ if (i__ == k + k) {
+ goto L10;
+ }
+ ++i__;
+ i__2 = i__ + j * lda;
+ aa = (d__1 = a[i__2].r, abs(d__1));
+/* -> A(j,j) */
+ work[j] += aa;
+ s = 0.;
+ i__2 = k - 1;
+ for (l = j + 1; l <= i__2; ++l) {
+ ++i__;
+ aa = z_abs(&a[i__ + j * lda]);
+/* -> A(l,j) */
+ s += aa;
+ work[l] += aa;
+ }
+ work[j] += s;
+ }
+L10:
+ i__ = idamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ } else {
+/* ilu = 1 & uplo = 'L' */
+ ++k;
+/* k=(n+1)/2 for n odd and ilu=1 */
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+ }
+ for (j = k - 1; j >= 0; --j) {
+ s = 0.;
+ i__1 = j - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ aa = z_abs(&a[i__ + j * lda]);
+/* -> A(j+k,i+k) */
+ s += aa;
+ work[i__ + k] += aa;
+ }
+ if (j > 0) {
+ i__1 = i__ + j * lda;
+ aa = (d__1 = a[i__1].r, abs(d__1));
+/* -> A(j+k,j+k) */
+ s += aa;
+ work[i__ + k] += s;
+/* i=j */
+ ++i__;
+ }
+ i__1 = i__ + j * lda;
+ aa = (d__1 = a[i__1].r, abs(d__1));
+/* -> A(j,j) */
+ work[j] = aa;
+ s = 0.;
+ i__1 = *n - 1;
+ for (l = j + 1; l <= i__1; ++l) {
+ ++i__;
+ aa = z_abs(&a[i__ + j * lda]);
+/* -> A(l,j) */
+ s += aa;
+ work[l] += aa;
+ }
+ work[j] += s;
+ }
+ i__ = idamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ }
+ } else {
+/* n is even & A is n+1 by k = n/2 */
+ if (ilu == 0) {
+/* uplo = 'U' */
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+ }
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ s = 0.;
+ i__2 = k + j - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = z_abs(&a[i__ + j * lda]);
+/* -> A(i,j+k) */
+ s += aa;
+ work[i__] += aa;
+ }
+ i__2 = i__ + j * lda;
+ aa = (d__1 = a[i__2].r, abs(d__1));
+/* -> A(j+k,j+k) */
+ work[j + k] = s + aa;
+ ++i__;
+ i__2 = i__ + j * lda;
+ aa = (d__1 = a[i__2].r, abs(d__1));
+/* -> A(j,j) */
+ work[j] += aa;
+ s = 0.;
+ i__2 = k - 1;
+ for (l = j + 1; l <= i__2; ++l) {
+ ++i__;
+ aa = z_abs(&a[i__ + j * lda]);
+/* -> A(l,j) */
+ s += aa;
+ work[l] += aa;
+ }
+ work[j] += s;
+ }
+ i__ = idamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ } else {
+/* ilu = 1 & uplo = 'L' */
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+ }
+ for (j = k - 1; j >= 0; --j) {
+ s = 0.;
+ i__1 = j - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ aa = z_abs(&a[i__ + j * lda]);
+/* -> A(j+k,i+k) */
+ s += aa;
+ work[i__ + k] += aa;
+ }
+ i__1 = i__ + j * lda;
+ aa = (d__1 = a[i__1].r, abs(d__1));
+/* -> A(j+k,j+k) */
+ s += aa;
+ work[i__ + k] += s;
+/* i=j */
+ ++i__;
+ i__1 = i__ + j * lda;
+ aa = (d__1 = a[i__1].r, abs(d__1));
+/* -> A(j,j) */
+ work[j] = aa;
+ s = 0.;
+ i__1 = *n - 1;
+ for (l = j + 1; l <= i__1; ++l) {
+ ++i__;
+ aa = z_abs(&a[i__ + j * lda]);
+/* -> A(l,j) */
+ s += aa;
+ work[l] += aa;
+ }
+ work[j] += s;
+ }
+ i__ = idamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ }
+ }
+ } else {
+/* ifm=0 */
+ k = *n / 2;
+ if (noe == 1) {
+/* n is odd & A is (n+1)/2 by n */
+ if (ilu == 0) {
+/* uplo = 'U' */
+ n1 = k;
+/* n/2 */
+ ++k;
+/* k is the row size and lda */
+ i__1 = *n - 1;
+ for (i__ = n1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+ }
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ s = 0.;
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = z_abs(&a[i__ + j * lda]);
+/* A(j,n1+i) */
+ work[i__ + n1] += aa;
+ s += aa;
+ }
+ work[j] = s;
+ }
+/* j=n1=k-1 is special */
+ i__1 = j * lda;
+ s = (d__1 = a[i__1].r, abs(d__1));
+/* A(k-1,k-1) */
+ i__1 = k - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ aa = z_abs(&a[i__ + j * lda]);
+/* A(k-1,i+n1) */
+ work[i__ + n1] += aa;
+ s += aa;
+ }
+ work[j] += s;
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+ s = 0.;
+ i__2 = j - k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = z_abs(&a[i__ + j * lda]);
+/* A(i,j-k) */
+ work[i__] += aa;
+ s += aa;
+ }
+/* i=j-k */
+ i__2 = i__ + j * lda;
+ aa = (d__1 = a[i__2].r, abs(d__1));
+/* A(j-k,j-k) */
+ s += aa;
+ work[j - k] += s;
+ ++i__;
+ i__2 = i__ + j * lda;
+ s = (d__1 = a[i__2].r, abs(d__1));
+/* A(j,j) */
+ i__2 = *n - 1;
+ for (l = j + 1; l <= i__2; ++l) {
+ ++i__;
+ aa = z_abs(&a[i__ + j * lda]);
+/* A(j,l) */
+ work[l] += aa;
+ s += aa;
+ }
+ work[j] += s;
+ }
+ i__ = idamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ } else {
+/* ilu=1 & uplo = 'L' */
+ ++k;
+/* k=(n+1)/2 for n odd and ilu=1 */
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+/* process */
+ s = 0.;
+ i__2 = j - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = z_abs(&a[i__ + j * lda]);
+/* A(j,i) */
+ work[i__] += aa;
+ s += aa;
+ }
+ i__2 = i__ + j * lda;
+ aa = (d__1 = a[i__2].r, abs(d__1));
+/* i=j so process of A(j,j) */
+ s += aa;
+ work[j] = s;
+/* is initialised here */
+ ++i__;
+/* i=j process A(j+k,j+k) */
+ i__2 = i__ + j * lda;
+ aa = (d__1 = a[i__2].r, abs(d__1));
+ s = aa;
+ i__2 = *n - 1;
+ for (l = k + j + 1; l <= i__2; ++l) {
+ ++i__;
+ aa = z_abs(&a[i__ + j * lda]);
+/* A(l,k+j) */
+ s += aa;
+ work[l] += aa;
+ }
+ work[k + j] += s;
+ }
+/* j=k-1 is special :process col A(k-1,0:k-1) */
+ s = 0.;
+ i__1 = k - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ aa = z_abs(&a[i__ + j * lda]);
+/* A(k,i) */
+ work[i__] += aa;
+ s += aa;
+ }
+/* i=k-1 */
+ i__1 = i__ + j * lda;
+ aa = (d__1 = a[i__1].r, abs(d__1));
+/* A(k-1,k-1) */
+ s += aa;
+ work[i__] = s;
+/* done with col j=k+1 */
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+/* process col j of A = A(j,0:k-1) */
+ s = 0.;
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = z_abs(&a[i__ + j * lda]);
+/* A(j,i) */
+ work[i__] += aa;
+ s += aa;
+ }
+ work[j] += s;
+ }
+ i__ = idamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ }
+ } else {
+/* n is even & A is k=n/2 by n+1 */
+ if (ilu == 0) {
+/* uplo = 'U' */
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+ }
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ s = 0.;
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = z_abs(&a[i__ + j * lda]);
+/* A(j,i+k) */
+ work[i__ + k] += aa;
+ s += aa;
+ }
+ work[j] = s;
+ }
+/* j=k */
+ i__1 = j * lda;
+ aa = (d__1 = a[i__1].r, abs(d__1));
+/* A(k,k) */
+ s = aa;
+ i__1 = k - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ aa = z_abs(&a[i__ + j * lda]);
+/* A(k,k+i) */
+ work[i__ + k] += aa;
+ s += aa;
+ }
+ work[j] += s;
+ i__1 = *n - 1;
+ for (j = k + 1; j <= i__1; ++j) {
+ s = 0.;
+ i__2 = j - 2 - k;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = z_abs(&a[i__ + j * lda]);
+/* A(i,j-k-1) */
+ work[i__] += aa;
+ s += aa;
+ }
+/* i=j-1-k */
+ i__2 = i__ + j * lda;
+ aa = (d__1 = a[i__2].r, abs(d__1));
+/* A(j-k-1,j-k-1) */
+ s += aa;
+ work[j - k - 1] += s;
+ ++i__;
+ i__2 = i__ + j * lda;
+ aa = (d__1 = a[i__2].r, abs(d__1));
+/* A(j,j) */
+ s = aa;
+ i__2 = *n - 1;
+ for (l = j + 1; l <= i__2; ++l) {
+ ++i__;
+ aa = z_abs(&a[i__ + j * lda]);
+/* A(j,l) */
+ work[l] += aa;
+ s += aa;
+ }
+ work[j] += s;
+ }
+/* j=n */
+ s = 0.;
+ i__1 = k - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ aa = z_abs(&a[i__ + j * lda]);
+/* A(i,k-1) */
+ work[i__] += aa;
+ s += aa;
+ }
+/* i=k-1 */
+ i__1 = i__ + j * lda;
+ aa = (d__1 = a[i__1].r, abs(d__1));
+/* A(k-1,k-1) */
+ s += aa;
+ work[i__] += s;
+ i__ = idamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ } else {
+/* ilu=1 & uplo = 'L' */
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+ }
+/* j=0 is special :process col A(k:n-1,k) */
+ s = (d__1 = a[0].r, abs(d__1));
+/* A(k,k) */
+ i__1 = k - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ aa = z_abs(&a[i__]);
+/* A(k+i,k) */
+ work[i__ + k] += aa;
+ s += aa;
+ }
+ work[k] += s;
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* process */
+ s = 0.;
+ i__2 = j - 2;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = z_abs(&a[i__ + j * lda]);
+/* A(j-1,i) */
+ work[i__] += aa;
+ s += aa;
+ }
+ i__2 = i__ + j * lda;
+ aa = (d__1 = a[i__2].r, abs(d__1));
+/* i=j-1 so process of A(j-1,j-1) */
+ s += aa;
+ work[j - 1] = s;
+/* is initialised here */
+ ++i__;
+/* i=j process A(j+k,j+k) */
+ i__2 = i__ + j * lda;
+ aa = (d__1 = a[i__2].r, abs(d__1));
+ s = aa;
+ i__2 = *n - 1;
+ for (l = k + j + 1; l <= i__2; ++l) {
+ ++i__;
+ aa = z_abs(&a[i__ + j * lda]);
+/* A(l,k+j) */
+ s += aa;
+ work[l] += aa;
+ }
+ work[k + j] += s;
+ }
+/* j=k is special :process col A(k,0:k-1) */
+ s = 0.;
+ i__1 = k - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ aa = z_abs(&a[i__ + j * lda]);
+/* A(k,i) */
+ work[i__] += aa;
+ s += aa;
+ }
+
+/* i=k-1 */
+ i__1 = i__ + j * lda;
+ aa = (d__1 = a[i__1].r, abs(d__1));
+/* A(k-1,k-1) */
+ s += aa;
+ work[i__] = s;
+/* done with col j=k+1 */
+ i__1 = *n;
+ for (j = k + 1; j <= i__1; ++j) {
+
+/* process col j-1 of A = A(j-1,0:k-1) */
+ s = 0.;
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ aa = z_abs(&a[i__ + j * lda]);
+/* A(j-1,i) */
+ work[i__] += aa;
+ s += aa;
+ }
+ work[j - 1] += s;
+ }
+ i__ = idamax_(n, work, &c__1);
+ value = work[i__ - 1];
+ }
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ k = (*n + 1) / 2;
+ scale = 0.;
+ s = 1.;
+ if (noe == 1) {
+/* n is odd */
+ if (ifm == 1) {
+/* A is normal & A is n by k */
+ if (ilu == 0) {
+/* A is upper */
+ i__1 = k - 3;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - j - 2;
+ zlassq_(&i__2, &a[k + j + 1 + j * lda], &c__1, &scale,
+ &s);
+/* L at A(k,0) */
+ }
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k + j - 1;
+ zlassq_(&i__2, &a[j * lda], &c__1, &scale, &s);
+/* trap U at A(0,0) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ l = k - 1;
+/* -> U(k,k) at A(k-1,0) */
+ i__1 = k - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = l;
+ aa = a[i__2].r;
+/* U(k+i,k+i) */
+ if (aa != 0.) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ d__1 = scale / aa;
+ s = s * (d__1 * d__1) + 1.;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ d__1 = aa / scale;
+ s += d__1 * d__1;
+ }
+ }
+ i__2 = l + 1;
+ aa = a[i__2].r;
+/* U(i,i) */
+ if (aa != 0.) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ d__1 = scale / aa;
+ s = s * (d__1 * d__1) + 1.;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ d__1 = aa / scale;
+ s += d__1 * d__1;
+ }
+ }
+ l = l + lda + 1;
+ }
+ i__1 = l;
+ aa = a[i__1].r;
+/* U(n-1,n-1) */
+ if (aa != 0.) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ d__1 = scale / aa;
+ s = s * (d__1 * d__1) + 1.;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ d__1 = aa / scale;
+ s += d__1 * d__1;
+ }
+ }
+ } else {
+/* ilu=1 & A is lower */
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - j - 1;
+ zlassq_(&i__2, &a[j + 1 + j * lda], &c__1, &scale, &s)
+ ;
+/* trap L at A(0,0) */
+ }
+ i__1 = k - 2;
+ for (j = 1; j <= i__1; ++j) {
+ zlassq_(&j, &a[(j + 1) * lda], &c__1, &scale, &s);
+/* U at A(0,1) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ aa = a[0].r;
+/* L(0,0) at A(0,0) */
+ if (aa != 0.) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ d__1 = scale / aa;
+ s = s * (d__1 * d__1) + 1.;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ d__1 = aa / scale;
+ s += d__1 * d__1;
+ }
+ }
+ l = lda;
+/* -> L(k,k) at A(0,1) */
+ i__1 = k - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = l;
+ aa = a[i__2].r;
+/* L(k-1+i,k-1+i) */
+ if (aa != 0.) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ d__1 = scale / aa;
+ s = s * (d__1 * d__1) + 1.;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ d__1 = aa / scale;
+ s += d__1 * d__1;
+ }
+ }
+ i__2 = l + 1;
+ aa = a[i__2].r;
+/* L(i,i) */
+ if (aa != 0.) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ d__1 = scale / aa;
+ s = s * (d__1 * d__1) + 1.;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ d__1 = aa / scale;
+ s += d__1 * d__1;
+ }
+ }
+ l = l + lda + 1;
+ }
+ }
+ } else {
+/* A is xpose & A is k by n */
+ if (ilu == 0) {
+/* A' is upper */
+ i__1 = k - 2;
+ for (j = 1; j <= i__1; ++j) {
+ zlassq_(&j, &a[(k + j) * lda], &c__1, &scale, &s);
+/* U at A(0,k) */
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ zlassq_(&k, &a[j * lda], &c__1, &scale, &s);
+/* k by k-1 rect. at A(0,0) */
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - j - 1;
+ zlassq_(&i__2, &a[j + 1 + (j + k - 1) * lda], &c__1, &
+ scale, &s);
+/* L at A(0,k-1) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ l = k * lda - lda;
+/* -> U(k-1,k-1) at A(0,k-1) */
+ i__1 = l;
+ aa = a[i__1].r;
+/* U(k-1,k-1) */
+ if (aa != 0.) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ d__1 = scale / aa;
+ s = s * (d__1 * d__1) + 1.;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ d__1 = aa / scale;
+ s += d__1 * d__1;
+ }
+ }
+ l += lda;
+/* -> U(0,0) at A(0,k) */
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+ i__2 = l;
+ aa = a[i__2].r;
+/* -> U(j-k,j-k) */
+ if (aa != 0.) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ d__1 = scale / aa;
+ s = s * (d__1 * d__1) + 1.;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ d__1 = aa / scale;
+ s += d__1 * d__1;
+ }
+ }
+ i__2 = l + 1;
+ aa = a[i__2].r;
+/* -> U(j,j) */
+ if (aa != 0.) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ d__1 = scale / aa;
+ s = s * (d__1 * d__1) + 1.;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ d__1 = aa / scale;
+ s += d__1 * d__1;
+ }
+ }
+ l = l + lda + 1;
+ }
+ } else {
+/* A' is lower */
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ zlassq_(&j, &a[j * lda], &c__1, &scale, &s);
+/* U at A(0,0) */
+ }
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+ zlassq_(&k, &a[j * lda], &c__1, &scale, &s);
+/* k by k-1 rect. at A(0,k) */
+ }
+ i__1 = k - 3;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - j - 2;
+ zlassq_(&i__2, &a[j + 2 + j * lda], &c__1, &scale, &s)
+ ;
+/* L at A(1,0) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ l = 0;
+/* -> L(0,0) at A(0,0) */
+ i__1 = k - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = l;
+ aa = a[i__2].r;
+/* L(i,i) */
+ if (aa != 0.) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ d__1 = scale / aa;
+ s = s * (d__1 * d__1) + 1.;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ d__1 = aa / scale;
+ s += d__1 * d__1;
+ }
+ }
+ i__2 = l + 1;
+ aa = a[i__2].r;
+/* L(k+i,k+i) */
+ if (aa != 0.) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ d__1 = scale / aa;
+ s = s * (d__1 * d__1) + 1.;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ d__1 = aa / scale;
+ s += d__1 * d__1;
+ }
+ }
+ l = l + lda + 1;
+ }
+/* L-> k-1 + (k-1)*lda or L(k-1,k-1) at A(k-1,k-1) */
+ i__1 = l;
+ aa = a[i__1].r;
+/* L(k-1,k-1) at A(k-1,k-1) */
+ if (aa != 0.) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ d__1 = scale / aa;
+ s = s * (d__1 * d__1) + 1.;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ d__1 = aa / scale;
+ s += d__1 * d__1;
+ }
+ }
+ }
+ }
+ } else {
+/* n is even */
+ if (ifm == 1) {
+/* A is normal */
+ if (ilu == 0) {
+/* A is upper */
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - j - 1;
+ zlassq_(&i__2, &a[k + j + 2 + j * lda], &c__1, &scale,
+ &s);
+/* L at A(k+1,0) */
+ }
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k + j;
+ zlassq_(&i__2, &a[j * lda], &c__1, &scale, &s);
+/* trap U at A(0,0) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ l = k;
+/* -> U(k,k) at A(k,0) */
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = l;
+ aa = a[i__2].r;
+/* U(k+i,k+i) */
+ if (aa != 0.) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ d__1 = scale / aa;
+ s = s * (d__1 * d__1) + 1.;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ d__1 = aa / scale;
+ s += d__1 * d__1;
+ }
+ }
+ i__2 = l + 1;
+ aa = a[i__2].r;
+/* U(i,i) */
+ if (aa != 0.) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ d__1 = scale / aa;
+ s = s * (d__1 * d__1) + 1.;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ d__1 = aa / scale;
+ s += d__1 * d__1;
+ }
+ }
+ l = l + lda + 1;
+ }
+ } else {
+/* ilu=1 & A is lower */
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - j - 1;
+ zlassq_(&i__2, &a[j + 2 + j * lda], &c__1, &scale, &s)
+ ;
+/* trap L at A(1,0) */
+ }
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ zlassq_(&j, &a[j * lda], &c__1, &scale, &s);
+/* U at A(0,0) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ l = 0;
+/* -> L(k,k) at A(0,0) */
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = l;
+ aa = a[i__2].r;
+/* L(k-1+i,k-1+i) */
+ if (aa != 0.) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ d__1 = scale / aa;
+ s = s * (d__1 * d__1) + 1.;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ d__1 = aa / scale;
+ s += d__1 * d__1;
+ }
+ }
+ i__2 = l + 1;
+ aa = a[i__2].r;
+/* L(i,i) */
+ if (aa != 0.) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ d__1 = scale / aa;
+ s = s * (d__1 * d__1) + 1.;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ d__1 = aa / scale;
+ s += d__1 * d__1;
+ }
+ }
+ l = l + lda + 1;
+ }
+ }
+ } else {
+/* A is xpose */
+ if (ilu == 0) {
+/* A' is upper */
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ zlassq_(&j, &a[(k + 1 + j) * lda], &c__1, &scale, &s);
+/* U at A(0,k+1) */
+ }
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ zlassq_(&k, &a[j * lda], &c__1, &scale, &s);
+/* k by k rect. at A(0,0) */
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - j - 1;
+ zlassq_(&i__2, &a[j + 1 + (j + k) * lda], &c__1, &
+ scale, &s);
+/* L at A(0,k) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ l = k * lda;
+/* -> U(k,k) at A(0,k) */
+ i__1 = l;
+ aa = a[i__1].r;
+/* U(k,k) */
+ if (aa != 0.) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ d__1 = scale / aa;
+ s = s * (d__1 * d__1) + 1.;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ d__1 = aa / scale;
+ s += d__1 * d__1;
+ }
+ }
+ l += lda;
+/* -> U(0,0) at A(0,k+1) */
+ i__1 = *n - 1;
+ for (j = k + 1; j <= i__1; ++j) {
+ i__2 = l;
+ aa = a[i__2].r;
+/* -> U(j-k-1,j-k-1) */
+ if (aa != 0.) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ d__1 = scale / aa;
+ s = s * (d__1 * d__1) + 1.;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ d__1 = aa / scale;
+ s += d__1 * d__1;
+ }
+ }
+ i__2 = l + 1;
+ aa = a[i__2].r;
+/* -> U(j,j) */
+ if (aa != 0.) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ d__1 = scale / aa;
+ s = s * (d__1 * d__1) + 1.;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ d__1 = aa / scale;
+ s += d__1 * d__1;
+ }
+ }
+ l = l + lda + 1;
+ }
+/* L=k-1+n*lda */
+/* -> U(k-1,k-1) at A(k-1,n) */
+ i__1 = l;
+ aa = a[i__1].r;
+/* U(k,k) */
+ if (aa != 0.) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ d__1 = scale / aa;
+ s = s * (d__1 * d__1) + 1.;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ d__1 = aa / scale;
+ s += d__1 * d__1;
+ }
+ }
+ } else {
+/* A' is lower */
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ zlassq_(&j, &a[(j + 1) * lda], &c__1, &scale, &s);
+/* U at A(0,1) */
+ }
+ i__1 = *n;
+ for (j = k + 1; j <= i__1; ++j) {
+ zlassq_(&k, &a[j * lda], &c__1, &scale, &s);
+/* k by k rect. at A(0,k+1) */
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k - j - 1;
+ zlassq_(&i__2, &a[j + 1 + j * lda], &c__1, &scale, &s)
+ ;
+/* L at A(0,0) */
+ }
+ s += s;
+/* double s for the off diagonal elements */
+ l = 0;
+/* -> L(k,k) at A(0,0) */
+ i__1 = l;
+ aa = a[i__1].r;
+/* L(k,k) at A(0,0) */
+ if (aa != 0.) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ d__1 = scale / aa;
+ s = s * (d__1 * d__1) + 1.;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ d__1 = aa / scale;
+ s += d__1 * d__1;
+ }
+ }
+ l = lda;
+/* -> L(0,0) at A(0,1) */
+ i__1 = k - 2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = l;
+ aa = a[i__2].r;
+/* L(i,i) */
+ if (aa != 0.) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ d__1 = scale / aa;
+ s = s * (d__1 * d__1) + 1.;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ d__1 = aa / scale;
+ s += d__1 * d__1;
+ }
+ }
+ i__2 = l + 1;
+ aa = a[i__2].r;
+/* L(k+i+1,k+i+1) */
+ if (aa != 0.) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ d__1 = scale / aa;
+ s = s * (d__1 * d__1) + 1.;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ d__1 = aa / scale;
+ s += d__1 * d__1;
+ }
+ }
+ l = l + lda + 1;
+ }
+/* L-> k - 1 + k*lda or L(k-1,k-1) at A(k-1,k) */
+ i__1 = l;
+ aa = a[i__1].r;
+/* L(k-1,k-1) at A(k-1,k) */
+ if (aa != 0.) {
+ if (scale < aa) {
+/* Computing 2nd power */
+ d__1 = scale / aa;
+ s = s * (d__1 * d__1) + 1.;
+ scale = aa;
+ } else {
+/* Computing 2nd power */
+ d__1 = aa / scale;
+ s += d__1 * d__1;
+ }
+ }
+ }
+ }
+ }
+ value = scale * sqrt(s);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of ZLANHF */
+
+} /* zlanhf_ */
diff --git a/contrib/libs/clapack/zlanhp.c b/contrib/libs/clapack/zlanhp.c
new file mode 100644
index 0000000000..80c8cd28b5
--- /dev/null
+++ b/contrib/libs/clapack/zlanhp.c
@@ -0,0 +1,277 @@
+/* zlanhp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal zlanhp_(char *norm, char *uplo, integer *n, doublecomplex *ap,
+ doublereal *work)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal ret_val, d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal sum, absa, scale;
+ extern logical lsame_(char *, char *);
+ doublereal value;
+ extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLANHP returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* complex hermitian matrix A, supplied in packed form. */
+
+/* Description */
+/* =========== */
+
+/* ZLANHP returns the value */
+
+/* ZLANHP = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in ZLANHP as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* hermitian matrix A is supplied. */
+/* = 'U': Upper triangular part of A is supplied */
+/* = 'L': Lower triangular part of A is supplied */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, ZLANHP is */
+/* set to zero. */
+
+/* AP (input) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the hermitian matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set and are assumed to be zero. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, */
+/* WORK is not referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --work;
+ --ap;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ k = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + j - 1;
+ for (i__ = k + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&ap[i__]);
+ value = max(d__1,d__2);
+/* L10: */
+ }
+ k += j;
+/* Computing MAX */
+ i__2 = k;
+ d__2 = value, d__3 = (d__1 = ap[i__2].r, abs(d__1));
+ value = max(d__2,d__3);
+/* L20: */
+ }
+ } else {
+ k = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = k;
+ d__2 = value, d__3 = (d__1 = ap[i__2].r, abs(d__1));
+ value = max(d__2,d__3);
+ i__2 = k + *n - j;
+ for (i__ = k + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&ap[i__]);
+ value = max(d__1,d__2);
+/* L30: */
+ }
+ k = k + *n - j + 1;
+/* L40: */
+ }
+ }
+ } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
+
+/* Find normI(A) ( = norm1(A), since A is hermitian). */
+
+ value = 0.;
+ k = 1;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ absa = z_abs(&ap[k]);
+ sum += absa;
+ work[i__] += absa;
+ ++k;
+/* L50: */
+ }
+ i__2 = k;
+ work[j] = sum + (d__1 = ap[i__2].r, abs(d__1));
+ ++k;
+/* L60: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L80: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k;
+ sum = work[j] + (d__1 = ap[i__2].r, abs(d__1));
+ ++k;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ absa = z_abs(&ap[k]);
+ sum += absa;
+ work[i__] += absa;
+ ++k;
+/* L90: */
+ }
+ value = max(value,sum);
+/* L100: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ k = 2;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ zlassq_(&i__2, &ap[k], &c__1, &scale, &sum);
+ k += j;
+/* L110: */
+ }
+ } else {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ zlassq_(&i__2, &ap[k], &c__1, &scale, &sum);
+ k = k + *n - j + 1;
+/* L120: */
+ }
+ }
+ sum *= 2;
+ k = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = k;
+ if (ap[i__2].r != 0.) {
+ i__2 = k;
+ absa = (d__1 = ap[i__2].r, abs(d__1));
+ if (scale < absa) {
+/* Computing 2nd power */
+ d__1 = scale / absa;
+ sum = sum * (d__1 * d__1) + 1.;
+ scale = absa;
+ } else {
+/* Computing 2nd power */
+ d__1 = absa / scale;
+ sum += d__1 * d__1;
+ }
+ }
+ if (lsame_(uplo, "U")) {
+ k = k + i__ + 1;
+ } else {
+ k = k + *n - i__ + 1;
+ }
+/* L130: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of ZLANHP */
+
+} /* zlanhp_ */
diff --git a/contrib/libs/clapack/zlanhs.c b/contrib/libs/clapack/zlanhs.c
new file mode 100644
index 0000000000..77f25f78d5
--- /dev/null
+++ b/contrib/libs/clapack/zlanhs.c
@@ -0,0 +1,205 @@
+/* zlanhs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal zlanhs_(char *norm, integer *n, doublecomplex *a, integer *lda,
+ doublereal *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ doublereal ret_val, d__1, d__2;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal sum, scale;
+ extern logical lsame_(char *, char *);
+ doublereal value;
+ extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLANHS returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* Hessenberg matrix A. */
+
+/* Description */
+/* =========== */
+
+/* ZLANHS returns the value */
+
+/* ZLANHS = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in ZLANHS as described */
+/* above. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, ZLANHS is */
+/* set to zero. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The n by n upper Hessenberg matrix A; the part of A below the */
+/* first sub-diagonal is not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(N,1). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I'; otherwise, WORK is not */
+/* referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * a_dim1]);
+ value = max(d__1,d__2);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.;
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += z_abs(&a[i__ + j * a_dim1]);
+/* L30: */
+ }
+ value = max(value,sum);
+/* L40: */
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L50: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += z_abs(&a[i__ + j * a_dim1]);
+/* L60: */
+ }
+/* L70: */
+ }
+ value = 0.;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L80: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ zlassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L90: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of ZLANHS */
+
+} /* zlanhs_ */
diff --git a/contrib/libs/clapack/zlanht.c b/contrib/libs/clapack/zlanht.c
new file mode 100644
index 0000000000..6eac013402
--- /dev/null
+++ b/contrib/libs/clapack/zlanht.c
@@ -0,0 +1,167 @@
+/* zlanht.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal zlanht_(char *norm, integer *n, doublereal *d__, doublecomplex *e)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal ret_val, d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ doublereal sum, scale;
+ extern logical lsame_(char *, char *);
+ doublereal anorm;
+ extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *), zlassq_(integer *, doublecomplex *,
+ integer *, doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLANHT returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* complex Hermitian tridiagonal matrix A. */
+
+/* Description */
+/* =========== */
+
+/* ZLANHT returns the value */
+
+/* ZLANHT = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in ZLANHT as described */
+/* above. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, ZLANHT is */
+/* set to zero. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The diagonal elements of A. */
+
+/* E (input) COMPLEX*16 array, dimension (N-1) */
+/* The (n-1) sub-diagonal or super-diagonal elements of A. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --e;
+ --d__;
+
+ /* Function Body */
+ if (*n <= 0) {
+ anorm = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ anorm = (d__1 = d__[*n], abs(d__1));
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__2 = anorm, d__3 = (d__1 = d__[i__], abs(d__1));
+ anorm = max(d__2,d__3);
+/* Computing MAX */
+ d__1 = anorm, d__2 = z_abs(&e[i__]);
+ anorm = max(d__1,d__2);
+/* L10: */
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1' || lsame_(norm, "I")) {
+
+/* Find norm1(A). */
+
+ if (*n == 1) {
+ anorm = abs(d__[1]);
+ } else {
+/* Computing MAX */
+ d__2 = abs(d__[1]) + z_abs(&e[1]), d__3 = z_abs(&e[*n - 1]) + (
+ d__1 = d__[*n], abs(d__1));
+ anorm = max(d__2,d__3);
+ i__1 = *n - 1;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__2 = anorm, d__3 = (d__1 = d__[i__], abs(d__1)) + z_abs(&e[
+ i__]) + z_abs(&e[i__ - 1]);
+ anorm = max(d__2,d__3);
+/* L20: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ if (*n > 1) {
+ i__1 = *n - 1;
+ zlassq_(&i__1, &e[1], &c__1, &scale, &sum);
+ sum *= 2;
+ }
+ dlassq_(n, &d__[1], &c__1, &scale, &sum);
+ anorm = scale * sqrt(sum);
+ }
+
+ ret_val = anorm;
+ return ret_val;
+
+/* End of ZLANHT */
+
+} /* zlanht_ */
diff --git a/contrib/libs/clapack/zlansb.c b/contrib/libs/clapack/zlansb.c
new file mode 100644
index 0000000000..6d3c11c065
--- /dev/null
+++ b/contrib/libs/clapack/zlansb.c
@@ -0,0 +1,261 @@
+/* zlansb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal zlansb_(char *norm, char *uplo, integer *n, integer *k,
+ doublecomplex *ab, integer *ldab, doublereal *work)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+ doublereal ret_val, d__1, d__2;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, l;
+ doublereal sum, absa, scale;
+ extern logical lsame_(char *, char *);
+ doublereal value;
+ extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLANSB returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of an */
+/* n by n symmetric band matrix A, with k super-diagonals. */
+
+/* Description */
+/* =========== */
+
+/* ZLANSB returns the value */
+
+/* ZLANSB = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in ZLANSB as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* band matrix A is supplied. */
+/* = 'U': Upper triangular part is supplied */
+/* = 'L': Lower triangular part is supplied */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, ZLANSB is */
+/* set to zero. */
+
+/* K (input) INTEGER */
+/* The number of super-diagonals or sub-diagonals of the */
+/* band matrix A. K >= 0. */
+
+/* AB (input) COMPLEX*16 array, dimension (LDAB,N) */
+/* The upper or lower triangle of the symmetric band matrix A, */
+/* stored in the first K+1 rows of AB. The j-th column of A is */
+/* stored in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(k+1+i-j,j) = A(i,j) for max(1,j-k)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+k). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= K+1. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, */
+/* WORK is not referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = *k + 2 - j;
+ i__3 = *k + 1;
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&ab[i__ + j * ab_dim1]);
+ value = max(d__1,d__2);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = *n + 1 - j, i__4 = *k + 1;
+ i__3 = min(i__2,i__4);
+ for (i__ = 1; i__ <= i__3; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&ab[i__ + j * ab_dim1]);
+ value = max(d__1,d__2);
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
+
+/* Find normI(A) ( = norm1(A), since A is symmetric). */
+
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.;
+ l = *k + 1 - j;
+/* Computing MAX */
+ i__3 = 1, i__2 = j - *k;
+ i__4 = j - 1;
+ for (i__ = max(i__3,i__2); i__ <= i__4; ++i__) {
+ absa = z_abs(&ab[l + i__ + j * ab_dim1]);
+ sum += absa;
+ work[i__] += absa;
+/* L50: */
+ }
+ work[j] = sum + z_abs(&ab[*k + 1 + j * ab_dim1]);
+/* L60: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L80: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = work[j] + z_abs(&ab[j * ab_dim1 + 1]);
+ l = 1 - j;
+/* Computing MIN */
+ i__3 = *n, i__2 = j + *k;
+ i__4 = min(i__3,i__2);
+ for (i__ = j + 1; i__ <= i__4; ++i__) {
+ absa = z_abs(&ab[l + i__ + j * ab_dim1]);
+ sum += absa;
+ work[i__] += absa;
+/* L90: */
+ }
+ value = max(value,sum);
+/* L100: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ if (*k > 0) {
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = j - 1;
+ i__4 = min(i__3,*k);
+/* Computing MAX */
+ i__2 = *k + 2 - j;
+ zlassq_(&i__4, &ab[max(i__2, 1)+ j * ab_dim1], &c__1, &
+ scale, &sum);
+/* L110: */
+ }
+ l = *k + 1;
+ } else {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n - j;
+ i__4 = min(i__3,*k);
+ zlassq_(&i__4, &ab[j * ab_dim1 + 2], &c__1, &scale, &sum);
+/* L120: */
+ }
+ l = 1;
+ }
+ sum *= 2;
+ } else {
+ l = 1;
+ }
+ zlassq_(n, &ab[l + ab_dim1], ldab, &scale, &sum);
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of ZLANSB */
+
+} /* zlansb_ */
diff --git a/contrib/libs/clapack/zlansp.c b/contrib/libs/clapack/zlansp.c
new file mode 100644
index 0000000000..35260bbcf6
--- /dev/null
+++ b/contrib/libs/clapack/zlansp.c
@@ -0,0 +1,278 @@
+/* zlansp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal zlansp_(char *norm, char *uplo, integer *n, doublecomplex *ap,
+ doublereal *work)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal ret_val, d__1, d__2;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *), d_imag(doublecomplex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal sum, absa, scale;
+ extern logical lsame_(char *, char *);
+ doublereal value;
+ extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLANSP returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* complex symmetric matrix A, supplied in packed form. */
+
+/* Description */
+/* =========== */
+
+/* ZLANSP returns the value */
+
+/* ZLANSP = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in ZLANSP as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is supplied. */
+/* = 'U': Upper triangular part of A is supplied */
+/* = 'L': Lower triangular part of A is supplied */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, ZLANSP is */
+/* set to zero. */
+
+/* AP (input) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the symmetric matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, */
+/* WORK is not referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --work;
+ --ap;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ k = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + j - 1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&ap[i__]);
+ value = max(d__1,d__2);
+/* L10: */
+ }
+ k += j;
+/* L20: */
+ }
+ } else {
+ k = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + *n - j;
+ for (i__ = k; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&ap[i__]);
+ value = max(d__1,d__2);
+/* L30: */
+ }
+ k = k + *n - j + 1;
+/* L40: */
+ }
+ }
+ } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
+
+/* Find normI(A) ( = norm1(A), since A is symmetric). */
+
+ value = 0.;
+ k = 1;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ absa = z_abs(&ap[k]);
+ sum += absa;
+ work[i__] += absa;
+ ++k;
+/* L50: */
+ }
+ work[j] = sum + z_abs(&ap[k]);
+ ++k;
+/* L60: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L80: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = work[j] + z_abs(&ap[k]);
+ ++k;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ absa = z_abs(&ap[k]);
+ sum += absa;
+ work[i__] += absa;
+ ++k;
+/* L90: */
+ }
+ value = max(value,sum);
+/* L100: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ k = 2;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ zlassq_(&i__2, &ap[k], &c__1, &scale, &sum);
+ k += j;
+/* L110: */
+ }
+ } else {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ zlassq_(&i__2, &ap[k], &c__1, &scale, &sum);
+ k = k + *n - j + 1;
+/* L120: */
+ }
+ }
+ sum *= 2;
+ k = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = k;
+ if (ap[i__2].r != 0.) {
+ i__2 = k;
+ absa = (d__1 = ap[i__2].r, abs(d__1));
+ if (scale < absa) {
+/* Computing 2nd power */
+ d__1 = scale / absa;
+ sum = sum * (d__1 * d__1) + 1.;
+ scale = absa;
+ } else {
+/* Computing 2nd power */
+ d__1 = absa / scale;
+ sum += d__1 * d__1;
+ }
+ }
+ if (d_imag(&ap[k]) != 0.) {
+ absa = (d__1 = d_imag(&ap[k]), abs(d__1));
+ if (scale < absa) {
+/* Computing 2nd power */
+ d__1 = scale / absa;
+ sum = sum * (d__1 * d__1) + 1.;
+ scale = absa;
+ } else {
+/* Computing 2nd power */
+ d__1 = absa / scale;
+ sum += d__1 * d__1;
+ }
+ }
+ if (lsame_(uplo, "U")) {
+ k = k + i__ + 1;
+ } else {
+ k = k + *n - i__ + 1;
+ }
+/* L130: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of ZLANSP */
+
+} /* zlansp_ */
diff --git a/contrib/libs/clapack/zlansy.c b/contrib/libs/clapack/zlansy.c
new file mode 100644
index 0000000000..6239fdd5e9
--- /dev/null
+++ b/contrib/libs/clapack/zlansy.c
@@ -0,0 +1,237 @@
+/* zlansy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal zlansy_(char *norm, char *uplo, integer *n, doublecomplex *a,
+ integer *lda, doublereal *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal ret_val, d__1, d__2;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal sum, absa, scale;
+ extern logical lsame_(char *, char *);
+ doublereal value;
+ extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLANSY returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* complex symmetric matrix A. */
+
+/* Description */
+/* =========== */
+
+/* ZLANSY returns the value */
+
+/* ZLANSY = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in ZLANSY as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is to be referenced. */
+/* = 'U': Upper triangular part of A is referenced */
+/* = 'L': Lower triangular part of A is referenced */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, ZLANSY is */
+/* set to zero. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The symmetric matrix A. If UPLO = 'U', the leading n by n */
+/* upper triangular part of A contains the upper triangular part */
+/* of the matrix A, and the strictly lower triangular part of A */
+/* is not referenced. If UPLO = 'L', the leading n by n lower */
+/* triangular part of A contains the lower triangular part of */
+/* the matrix A, and the strictly upper triangular part of A is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(N,1). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, */
+/* WORK is not referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * a_dim1]);
+ value = max(d__1,d__2);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * a_dim1]);
+ value = max(d__1,d__2);
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
+
+/* Find normI(A) ( = norm1(A), since A is symmetric). */
+
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ absa = z_abs(&a[i__ + j * a_dim1]);
+ sum += absa;
+ work[i__] += absa;
+/* L50: */
+ }
+ work[j] = sum + z_abs(&a[j + j * a_dim1]);
+/* L60: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L80: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = work[j] + z_abs(&a[j + j * a_dim1]);
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ absa = z_abs(&a[i__ + j * a_dim1]);
+ sum += absa;
+ work[i__] += absa;
+/* L90: */
+ }
+ value = max(value,sum);
+/* L100: */
+ }
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ zlassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L110: */
+ }
+ } else {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ zlassq_(&i__2, &a[j + 1 + j * a_dim1], &c__1, &scale, &sum);
+/* L120: */
+ }
+ }
+ sum *= 2;
+ i__1 = *lda + 1;
+ zlassq_(n, &a[a_offset], &i__1, &scale, &sum);
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of ZLANSY */
+
+} /* zlansy_ */
diff --git a/contrib/libs/clapack/zlantb.c b/contrib/libs/clapack/zlantb.c
new file mode 100644
index 0000000000..28ad3e23e0
--- /dev/null
+++ b/contrib/libs/clapack/zlantb.c
@@ -0,0 +1,426 @@
+/* zlantb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal zlantb_(char *norm, char *uplo, char *diag, integer *n, integer *k,
+ doublecomplex *ab, integer *ldab, doublereal *work)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal ret_val, d__1, d__2;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, l;
+ doublereal sum, scale;
+ logical udiag;
+ extern logical lsame_(char *, char *);
+ doublereal value;
+ extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLANTB returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of an */
+/* n by n triangular band matrix A, with ( k + 1 ) diagonals. */
+
+/* Description */
+/* =========== */
+
+/* ZLANTB returns the value */
+
+/* ZLANTB = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in ZLANTB as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower triangular. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A is unit triangular. */
+/* = 'N': Non-unit triangular */
+/* = 'U': Unit triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, ZLANTB is */
+/* set to zero. */
+
+/* K (input) INTEGER */
+/* The number of super-diagonals of the matrix A if UPLO = 'U', */
+/* or the number of sub-diagonals of the matrix A if UPLO = 'L'. */
+/* K >= 0. */
+
+/* AB (input) COMPLEX*16 array, dimension (LDAB,N) */
+/* The upper or lower triangular band matrix A, stored in the */
+/* first k+1 rows of AB. The j-th column of A is stored */
+/* in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(k+1+i-j,j) = A(i,j) for max(1,j-k)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+k). */
+/* Note that when DIAG = 'U', the elements of the array AB */
+/* corresponding to the diagonal elements of the matrix A are */
+/* not referenced, but are assumed to be one. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= K+1. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I'; otherwise, WORK is not */
+/* referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ if (lsame_(diag, "U")) {
+ value = 1.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = *k + 2 - j;
+ i__3 = *k;
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&ab[i__ + j * ab_dim1]);
+ value = max(d__1,d__2);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = *n + 1 - j, i__4 = *k + 1;
+ i__3 = min(i__2,i__4);
+ for (i__ = 2; i__ <= i__3; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&ab[i__ + j * ab_dim1]);
+ value = max(d__1,d__2);
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else {
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__3 = *k + 2 - j;
+ i__2 = *k + 1;
+ for (i__ = max(i__3,1); i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&ab[i__ + j * ab_dim1]);
+ value = max(d__1,d__2);
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n + 1 - j, i__4 = *k + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&ab[i__ + j * ab_dim1]);
+ value = max(d__1,d__2);
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.;
+ udiag = lsame_(diag, "U");
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (udiag) {
+ sum = 1.;
+/* Computing MAX */
+ i__2 = *k + 2 - j;
+ i__3 = *k;
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+ sum += z_abs(&ab[i__ + j * ab_dim1]);
+/* L90: */
+ }
+ } else {
+ sum = 0.;
+/* Computing MAX */
+ i__3 = *k + 2 - j;
+ i__2 = *k + 1;
+ for (i__ = max(i__3,1); i__ <= i__2; ++i__) {
+ sum += z_abs(&ab[i__ + j * ab_dim1]);
+/* L100: */
+ }
+ }
+ value = max(value,sum);
+/* L110: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (udiag) {
+ sum = 1.;
+/* Computing MIN */
+ i__3 = *n + 1 - j, i__4 = *k + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ sum += z_abs(&ab[i__ + j * ab_dim1]);
+/* L120: */
+ }
+ } else {
+ sum = 0.;
+/* Computing MIN */
+ i__3 = *n + 1 - j, i__4 = *k + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += z_abs(&ab[i__ + j * ab_dim1]);
+/* L130: */
+ }
+ }
+ value = max(value,sum);
+/* L140: */
+ }
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ if (lsame_(diag, "U")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 1.;
+/* L150: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ l = *k + 1 - j;
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *k;
+ i__4 = j - 1;
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ work[i__] += z_abs(&ab[l + i__ + j * ab_dim1]);
+/* L160: */
+ }
+/* L170: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L180: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ l = *k + 1 - j;
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *k;
+ i__3 = j;
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ work[i__] += z_abs(&ab[l + i__ + j * ab_dim1]);
+/* L190: */
+ }
+/* L200: */
+ }
+ }
+ } else {
+ if (lsame_(diag, "U")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 1.;
+/* L210: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ l = 1 - j;
+/* Computing MIN */
+ i__4 = *n, i__2 = j + *k;
+ i__3 = min(i__4,i__2);
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ work[i__] += z_abs(&ab[l + i__ + j * ab_dim1]);
+/* L220: */
+ }
+/* L230: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L240: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ l = 1 - j;
+/* Computing MIN */
+ i__4 = *n, i__2 = j + *k;
+ i__3 = min(i__4,i__2);
+ for (i__ = j; i__ <= i__3; ++i__) {
+ work[i__] += z_abs(&ab[l + i__ + j * ab_dim1]);
+/* L250: */
+ }
+/* L260: */
+ }
+ }
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L270: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ if (lsame_(uplo, "U")) {
+ if (lsame_(diag, "U")) {
+ scale = 1.;
+ sum = (doublereal) (*n);
+ if (*k > 0) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing MIN */
+ i__4 = j - 1;
+ i__3 = min(i__4,*k);
+/* Computing MAX */
+ i__2 = *k + 2 - j;
+ zlassq_(&i__3, &ab[max(i__2, 1)+ j * ab_dim1], &c__1,
+ &scale, &sum);
+/* L280: */
+ }
+ }
+ } else {
+ scale = 0.;
+ sum = 1.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__4 = j, i__2 = *k + 1;
+ i__3 = min(i__4,i__2);
+/* Computing MAX */
+ i__5 = *k + 2 - j;
+ zlassq_(&i__3, &ab[max(i__5, 1)+ j * ab_dim1], &c__1, &
+ scale, &sum);
+/* L290: */
+ }
+ }
+ } else {
+ if (lsame_(diag, "U")) {
+ scale = 1.;
+ sum = (doublereal) (*n);
+ if (*k > 0) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__4 = *n - j;
+ i__3 = min(i__4,*k);
+ zlassq_(&i__3, &ab[j * ab_dim1 + 2], &c__1, &scale, &
+ sum);
+/* L300: */
+ }
+ }
+ } else {
+ scale = 0.;
+ sum = 1.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__4 = *n - j + 1, i__2 = *k + 1;
+ i__3 = min(i__4,i__2);
+ zlassq_(&i__3, &ab[j * ab_dim1 + 1], &c__1, &scale, &sum);
+/* L310: */
+ }
+ }
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of ZLANTB */
+
+} /* zlantb_ */
diff --git a/contrib/libs/clapack/zlantp.c b/contrib/libs/clapack/zlantp.c
new file mode 100644
index 0000000000..ffcfddfa45
--- /dev/null
+++ b/contrib/libs/clapack/zlantp.c
@@ -0,0 +1,391 @@
+/* zlantp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal zlantp_(char *norm, char *uplo, char *diag, integer *n,
+ doublecomplex *ap, doublereal *work)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal ret_val, d__1, d__2;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal sum, scale;
+ logical udiag;
+ extern logical lsame_(char *, char *);
+ doublereal value;
+ extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLANTP returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* triangular matrix A, supplied in packed form. */
+
+/* Description */
+/* =========== */
+
+/* ZLANTP returns the value */
+
+/* ZLANTP = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in ZLANTP as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower triangular. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A is unit triangular. */
+/* = 'N': Non-unit triangular */
+/* = 'U': Unit triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. When N = 0, ZLANTP is */
+/* set to zero. */
+
+/* AP (input) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* The upper or lower triangular matrix A, packed columnwise in */
+/* a linear array. The j-th column of A is stored in the array */
+/* AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* Note that when DIAG = 'U', the elements of the array AP */
+/* corresponding to the diagonal elements of the matrix A are */
+/* not referenced, but are assumed to be one. */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= N when NORM = 'I'; otherwise, WORK is not */
+/* referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --work;
+ --ap;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ k = 1;
+ if (lsame_(diag, "U")) {
+ value = 1.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + j - 2;
+ for (i__ = k; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&ap[i__]);
+ value = max(d__1,d__2);
+/* L10: */
+ }
+ k += j;
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + *n - j;
+ for (i__ = k + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&ap[i__]);
+ value = max(d__1,d__2);
+/* L30: */
+ }
+ k = k + *n - j + 1;
+/* L40: */
+ }
+ }
+ } else {
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + j - 1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&ap[i__]);
+ value = max(d__1,d__2);
+/* L50: */
+ }
+ k += j;
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + *n - j;
+ for (i__ = k; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&ap[i__]);
+ value = max(d__1,d__2);
+/* L70: */
+ }
+ k = k + *n - j + 1;
+/* L80: */
+ }
+ }
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.;
+ k = 1;
+ udiag = lsame_(diag, "U");
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (udiag) {
+ sum = 1.;
+ i__2 = k + j - 2;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ sum += z_abs(&ap[i__]);
+/* L90: */
+ }
+ } else {
+ sum = 0.;
+ i__2 = k + j - 1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ sum += z_abs(&ap[i__]);
+/* L100: */
+ }
+ }
+ k += j;
+ value = max(value,sum);
+/* L110: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (udiag) {
+ sum = 1.;
+ i__2 = k + *n - j;
+ for (i__ = k + 1; i__ <= i__2; ++i__) {
+ sum += z_abs(&ap[i__]);
+/* L120: */
+ }
+ } else {
+ sum = 0.;
+ i__2 = k + *n - j;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ sum += z_abs(&ap[i__]);
+/* L130: */
+ }
+ }
+ k = k + *n - j + 1;
+ value = max(value,sum);
+/* L140: */
+ }
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ k = 1;
+ if (lsame_(uplo, "U")) {
+ if (lsame_(diag, "U")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 1.;
+/* L150: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += z_abs(&ap[k]);
+ ++k;
+/* L160: */
+ }
+ ++k;
+/* L170: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L180: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += z_abs(&ap[k]);
+ ++k;
+/* L190: */
+ }
+/* L200: */
+ }
+ }
+ } else {
+ if (lsame_(diag, "U")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 1.;
+/* L210: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ ++k;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ work[i__] += z_abs(&ap[k]);
+ ++k;
+/* L220: */
+ }
+/* L230: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L240: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ work[i__] += z_abs(&ap[k]);
+ ++k;
+/* L250: */
+ }
+/* L260: */
+ }
+ }
+ }
+ value = 0.;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L270: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ if (lsame_(uplo, "U")) {
+ if (lsame_(diag, "U")) {
+ scale = 1.;
+ sum = (doublereal) (*n);
+ k = 2;
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ zlassq_(&i__2, &ap[k], &c__1, &scale, &sum);
+ k += j;
+/* L280: */
+ }
+ } else {
+ scale = 0.;
+ sum = 1.;
+ k = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ zlassq_(&j, &ap[k], &c__1, &scale, &sum);
+ k += j;
+/* L290: */
+ }
+ }
+ } else {
+ if (lsame_(diag, "U")) {
+ scale = 1.;
+ sum = (doublereal) (*n);
+ k = 2;
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ zlassq_(&i__2, &ap[k], &c__1, &scale, &sum);
+ k = k + *n - j + 1;
+/* L300: */
+ }
+ } else {
+ scale = 0.;
+ sum = 1.;
+ k = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j + 1;
+ zlassq_(&i__2, &ap[k], &c__1, &scale, &sum);
+ k = k + *n - j + 1;
+/* L310: */
+ }
+ }
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of ZLANTP */
+
+} /* zlantp_ */
diff --git a/contrib/libs/clapack/zlantr.c b/contrib/libs/clapack/zlantr.c
new file mode 100644
index 0000000000..43ddb89c25
--- /dev/null
+++ b/contrib/libs/clapack/zlantr.c
@@ -0,0 +1,394 @@
+/* zlantr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+doublereal zlantr_(char *norm, char *uplo, char *diag, integer *m, integer *n,
+ doublecomplex *a, integer *lda, doublereal *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ doublereal ret_val, d__1, d__2;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal sum, scale;
+ logical udiag;
+ extern logical lsame_(char *, char *);
+ doublereal value;
+ extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLANTR returns the value of the one norm, or the Frobenius norm, or */
+/* the infinity norm, or the element of largest absolute value of a */
+/* trapezoidal or triangular matrix A. */
+
+/* Description */
+/* =========== */
+
+/* ZLANTR returns the value */
+
+/* ZLANTR = ( max(abs(A(i,j))), NORM = 'M' or 'm' */
+/* ( */
+/* ( norm1(A), NORM = '1', 'O' or 'o' */
+/* ( */
+/* ( normI(A), NORM = 'I' or 'i' */
+/* ( */
+/* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */
+
+/* where norm1 denotes the one norm of a matrix (maximum column sum), */
+/* normI denotes the infinity norm of a matrix (maximum row sum) and */
+/* normF denotes the Frobenius norm of a matrix (square root of sum of */
+/* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies the value to be returned in ZLANTR as described */
+/* above. */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower trapezoidal. */
+/* = 'U': Upper trapezoidal */
+/* = 'L': Lower trapezoidal */
+/* Note that A is triangular instead of trapezoidal if M = N. */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A has unit diagonal. */
+/* = 'N': Non-unit diagonal */
+/* = 'U': Unit diagonal */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0, and if */
+/* UPLO = 'U', M <= N. When M = 0, ZLANTR is set to zero. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0, and if */
+/* UPLO = 'L', N <= M. When N = 0, ZLANTR is set to zero. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The trapezoidal matrix A (A is triangular if M = N). */
+/* If UPLO = 'U', the leading m by n upper trapezoidal part of */
+/* the array A contains the upper trapezoidal matrix, and the */
+/* strictly lower triangular part of A is not referenced. */
+/* If UPLO = 'L', the leading m by n lower trapezoidal part of */
+/* the array A contains the lower trapezoidal matrix, and the */
+/* strictly upper triangular part of A is not referenced. Note */
+/* that when DIAG = 'U', the diagonal elements of A are not */
+/* referenced and are assumed to be one. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(M,1). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), */
+/* where LWORK >= M when NORM = 'I'; otherwise, WORK is not */
+/* referenced. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (min(*m,*n) == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ if (lsame_(diag, "U")) {
+ value = 1.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *m, i__4 = j - 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * a_dim1]);
+ value = max(d__1,d__2);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * a_dim1]);
+ value = max(d__1,d__2);
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else {
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(*m,j);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * a_dim1]);
+ value = max(d__1,d__2);
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = z_abs(&a[i__ + j * a_dim1]);
+ value = max(d__1,d__2);
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ } else if (lsame_(norm, "O") || *(unsigned char *)
+ norm == '1') {
+
+/* Find norm1(A). */
+
+ value = 0.;
+ udiag = lsame_(diag, "U");
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (udiag && j <= *m) {
+ sum = 1.;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += z_abs(&a[i__ + j * a_dim1]);
+/* L90: */
+ }
+ } else {
+ sum = 0.;
+ i__2 = min(*m,j);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += z_abs(&a[i__ + j * a_dim1]);
+/* L100: */
+ }
+ }
+ value = max(value,sum);
+/* L110: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (udiag) {
+ sum = 1.;
+ i__2 = *m;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ sum += z_abs(&a[i__ + j * a_dim1]);
+/* L120: */
+ }
+ } else {
+ sum = 0.;
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ sum += z_abs(&a[i__ + j * a_dim1]);
+/* L130: */
+ }
+ }
+ value = max(value,sum);
+/* L140: */
+ }
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ if (lsame_(uplo, "U")) {
+ if (lsame_(diag, "U")) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 1.;
+/* L150: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *m, i__4 = j - 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += z_abs(&a[i__ + j * a_dim1]);
+/* L160: */
+ }
+/* L170: */
+ }
+ } else {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L180: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(*m,j);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += z_abs(&a[i__ + j * a_dim1]);
+/* L190: */
+ }
+/* L200: */
+ }
+ }
+ } else {
+ if (lsame_(diag, "U")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 1.;
+/* L210: */
+ }
+ i__1 = *m;
+ for (i__ = *n + 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L220: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ work[i__] += z_abs(&a[i__ + j * a_dim1]);
+/* L230: */
+ }
+/* L240: */
+ }
+ } else {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L250: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ work[i__] += z_abs(&a[i__ + j * a_dim1]);
+/* L260: */
+ }
+/* L270: */
+ }
+ }
+ }
+ value = 0.;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L280: */
+ }
+ } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+
+/* Find normF(A). */
+
+ if (lsame_(uplo, "U")) {
+ if (lsame_(diag, "U")) {
+ scale = 1.;
+ sum = (doublereal) min(*m,*n);
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *m, i__4 = j - 1;
+ i__2 = min(i__3,i__4);
+ zlassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L290: */
+ }
+ } else {
+ scale = 0.;
+ sum = 1.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(*m,j);
+ zlassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L300: */
+ }
+ }
+ } else {
+ if (lsame_(diag, "U")) {
+ scale = 1.;
+ sum = (doublereal) min(*m,*n);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m - j;
+/* Computing MIN */
+ i__3 = *m, i__4 = j + 1;
+ zlassq_(&i__2, &a[min(i__3, i__4)+ j * a_dim1], &c__1, &
+ scale, &sum);
+/* L310: */
+ }
+ } else {
+ scale = 0.;
+ sum = 1.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m - j + 1;
+ zlassq_(&i__2, &a[j + j * a_dim1], &c__1, &scale, &sum);
+/* L320: */
+ }
+ }
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of ZLANTR */
+
+} /* zlantr_ */
diff --git a/contrib/libs/clapack/zlapll.c b/contrib/libs/clapack/zlapll.c
new file mode 100644
index 0000000000..66a4405855
--- /dev/null
+++ b/contrib/libs/clapack/zlapll.c
@@ -0,0 +1,143 @@
+/* zlapll.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlapll_(integer *n, doublecomplex *x, integer *incx,
+ doublecomplex *y, integer *incy, doublereal *ssmin)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2, d__3;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+ double z_abs(doublecomplex *);
+
+ /* Local variables */
+ doublecomplex c__, a11, a12, a22, tau;
+ extern /* Subroutine */ int dlas2_(doublereal *, doublereal *, doublereal
+ *, doublereal *, doublereal *);
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ doublereal ssmax;
+ extern /* Subroutine */ int zaxpy_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), zlarfg_(
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Given two column vectors X and Y, let */
+
+/* A = ( X Y ). */
+
+/* The subroutine first computes the QR factorization of A = Q*R, */
+/* and then computes the SVD of the 2-by-2 upper triangular matrix R. */
+/* The smaller singular value of R is returned in SSMIN, which is used */
+/* as the measurement of the linear dependency of the vectors X and Y. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The length of the vectors X and Y. */
+
+/* X (input/output) COMPLEX*16 array, dimension (1+(N-1)*INCX) */
+/* On entry, X contains the N-vector X. */
+/* On exit, X is overwritten. */
+
+/* INCX (input) INTEGER */
+/* The increment between successive elements of X. INCX > 0. */
+
+/* Y (input/output) COMPLEX*16 array, dimension (1+(N-1)*INCY) */
+/* On entry, Y contains the N-vector Y. */
+/* On exit, Y is overwritten. */
+
+/* INCY (input) INTEGER */
+/* The increment between successive elements of Y. INCY > 0. */
+
+/* SSMIN (output) DOUBLE PRECISION */
+/* The smallest singular value of the N-by-2 matrix A = ( X Y ). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --y;
+ --x;
+
+ /* Function Body */
+ if (*n <= 1) {
+ *ssmin = 0.;
+ return 0;
+ }
+
+/* Compute the QR factorization of the N-by-2 matrix ( X Y ) */
+
+ zlarfg_(n, &x[1], &x[*incx + 1], incx, &tau);
+ a11.r = x[1].r, a11.i = x[1].i;
+ x[1].r = 1., x[1].i = 0.;
+
+ d_cnjg(&z__3, &tau);
+ z__2.r = -z__3.r, z__2.i = -z__3.i;
+ zdotc_(&z__4, n, &x[1], incx, &y[1], incy);
+ z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r * z__4.i +
+ z__2.i * z__4.r;
+ c__.r = z__1.r, c__.i = z__1.i;
+ zaxpy_(n, &c__, &x[1], incx, &y[1], incy);
+
+ i__1 = *n - 1;
+ zlarfg_(&i__1, &y[*incy + 1], &y[(*incy << 1) + 1], incy, &tau);
+
+ a12.r = y[1].r, a12.i = y[1].i;
+ i__1 = *incy + 1;
+ a22.r = y[i__1].r, a22.i = y[i__1].i;
+
+/* Compute the SVD of 2-by-2 Upper triangular matrix. */
+
+ d__1 = z_abs(&a11);
+ d__2 = z_abs(&a12);
+ d__3 = z_abs(&a22);
+ dlas2_(&d__1, &d__2, &d__3, ssmin, &ssmax);
+
+ return 0;
+
+/* End of ZLAPLL */
+
+} /* zlapll_ */
diff --git a/contrib/libs/clapack/zlapmt.c b/contrib/libs/clapack/zlapmt.c
new file mode 100644
index 0000000000..365526efe2
--- /dev/null
+++ b/contrib/libs/clapack/zlapmt.c
@@ -0,0 +1,186 @@
+/* zlapmt.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlapmt_(logical *forwrd, integer *m, integer *n,
+ doublecomplex *x, integer *ldx, integer *k)
+{
+ /* System generated locals */
+ integer x_dim1, x_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, ii, in;
+ doublecomplex temp;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAPMT rearranges the columns of the M by N matrix X as specified */
+/* by the permutation K(1),K(2),...,K(N) of the integers 1,...,N. */
+/* If FORWRD = .TRUE., forward permutation: */
+
+/* X(*,K(J)) is moved X(*,J) for J = 1,2,...,N. */
+
+/* If FORWRD = .FALSE., backward permutation: */
+
+/* X(*,J) is moved to X(*,K(J)) for J = 1,2,...,N. */
+
+/* Arguments */
+/* ========= */
+
+/* FORWRD (input) LOGICAL */
+/* = .TRUE., forward permutation */
+/* = .FALSE., backward permutation */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix X. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix X. N >= 0. */
+
+/* X (input/output) COMPLEX*16 array, dimension (LDX,N) */
+/* On entry, the M by N matrix X. */
+/* On exit, X contains the permuted matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X, LDX >= MAX(1,M). */
+
+/* K (input/output) INTEGER array, dimension (N) */
+/* On entry, K contains the permutation vector. K is used as */
+/* internal workspace, but reset to its original value on */
+/* output. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --k;
+
+ /* Function Body */
+ if (*n <= 1) {
+ return 0;
+ }
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ k[i__] = -k[i__];
+/* L10: */
+ }
+
+ if (*forwrd) {
+
+/* Forward permutation */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+ if (k[i__] > 0) {
+ goto L40;
+ }
+
+ j = i__;
+ k[j] = -k[j];
+ in = k[j];
+
+L20:
+ if (k[in] > 0) {
+ goto L40;
+ }
+
+ i__2 = *m;
+ for (ii = 1; ii <= i__2; ++ii) {
+ i__3 = ii + j * x_dim1;
+ temp.r = x[i__3].r, temp.i = x[i__3].i;
+ i__3 = ii + j * x_dim1;
+ i__4 = ii + in * x_dim1;
+ x[i__3].r = x[i__4].r, x[i__3].i = x[i__4].i;
+ i__3 = ii + in * x_dim1;
+ x[i__3].r = temp.r, x[i__3].i = temp.i;
+/* L30: */
+ }
+
+ k[in] = -k[in];
+ j = in;
+ in = k[in];
+ goto L20;
+
+L40:
+
+/* L50: */
+ ;
+ }
+
+ } else {
+
+/* Backward permutation */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+ if (k[i__] > 0) {
+ goto L80;
+ }
+
+ k[i__] = -k[i__];
+ j = k[i__];
+L60:
+ if (j == i__) {
+ goto L80;
+ }
+
+ i__2 = *m;
+ for (ii = 1; ii <= i__2; ++ii) {
+ i__3 = ii + i__ * x_dim1;
+ temp.r = x[i__3].r, temp.i = x[i__3].i;
+ i__3 = ii + i__ * x_dim1;
+ i__4 = ii + j * x_dim1;
+ x[i__3].r = x[i__4].r, x[i__3].i = x[i__4].i;
+ i__3 = ii + j * x_dim1;
+ x[i__3].r = temp.r, x[i__3].i = temp.i;
+/* L70: */
+ }
+
+ k[j] = -k[j];
+ j = k[j];
+ goto L60;
+
+L80:
+
+/* L90: */
+ ;
+ }
+
+ }
+
+ return 0;
+
+/* End of ZLAPMT */
+
+} /* zlapmt_ */
diff --git a/contrib/libs/clapack/zlaqgb.c b/contrib/libs/clapack/zlaqgb.c
new file mode 100644
index 0000000000..c105a33db5
--- /dev/null
+++ b/contrib/libs/clapack/zlaqgb.c
@@ -0,0 +1,227 @@
+/* zlaqgb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlaqgb_(integer *m, integer *n, integer *kl, integer *ku,
+ doublecomplex *ab, integer *ldab, doublereal *r__, doublereal *c__,
+ doublereal *rowcnd, doublereal *colcnd, doublereal *amax, char *equed)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal cj, large, small;
+ extern doublereal dlamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAQGB equilibrates a general M by N band matrix A with KL */
+/* subdiagonals and KU superdiagonals using the row and scaling factors */
+/* in the vectors R and C. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* KL (input) INTEGER */
+/* The number of subdiagonals within the band of A. KL >= 0. */
+
+/* KU (input) INTEGER */
+/* The number of superdiagonals within the band of A. KU >= 0. */
+
+/* AB (input/output) COMPLEX*16 array, dimension (LDAB,N) */
+/* On entry, the matrix A in band storage, in rows 1 to KL+KU+1. */
+/* The j-th column of A is stored in the j-th column of the */
+/* array AB as follows: */
+/* AB(ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(m,j+kl) */
+
+/* On exit, the equilibrated matrix, in the same storage format */
+/* as A. See EQUED for the form of the equilibrated matrix. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDA >= KL+KU+1. */
+
+/* R (input) DOUBLE PRECISION array, dimension (M) */
+/* The row scale factors for A. */
+
+/* C (input) DOUBLE PRECISION array, dimension (N) */
+/* The column scale factors for A. */
+
+/* ROWCND (input) DOUBLE PRECISION */
+/* Ratio of the smallest R(i) to the largest R(i). */
+
+/* COLCND (input) DOUBLE PRECISION */
+/* Ratio of the smallest C(i) to the largest C(i). */
+
+/* AMAX (input) DOUBLE PRECISION */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration */
+/* = 'R': Row equilibration, i.e., A has been premultiplied by */
+/* diag(R). */
+/* = 'C': Column equilibration, i.e., A has been postmultiplied */
+/* by diag(C). */
+/* = 'B': Both row and column equilibration, i.e., A has been */
+/* replaced by diag(R) * A * diag(C). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if row or column scaling */
+/* should be done based on the ratio of the row or column scaling */
+/* factors. If ROWCND < THRESH, row scaling is done, and if */
+/* COLCND < THRESH, column scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if row scaling */
+/* should be done based on the absolute size of the largest matrix */
+/* element. If AMAX > LARGE or AMAX < SMALL, row scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --r__;
+ --c__;
+
+ /* Function Body */
+ if (*m <= 0 || *n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = dlamch_("Safe minimum") / dlamch_("Precision");
+ large = 1. / small;
+
+ if (*rowcnd >= .1 && *amax >= small && *amax <= large) {
+
+/* No row scaling */
+
+ if (*colcnd >= .1) {
+
+/* No column scaling */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Column scaling */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = c__[j];
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__4 = min(i__5,i__6);
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ i__2 = *ku + 1 + i__ - j + j * ab_dim1;
+ i__3 = *ku + 1 + i__ - j + j * ab_dim1;
+ z__1.r = cj * ab[i__3].r, z__1.i = cj * ab[i__3].i;
+ ab[i__2].r = z__1.r, ab[i__2].i = z__1.i;
+/* L10: */
+ }
+/* L20: */
+ }
+ *(unsigned char *)equed = 'C';
+ }
+ } else if (*colcnd >= .1) {
+
+/* Row scaling, no column scaling */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__4 = 1, i__2 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__3 = min(i__5,i__6);
+ for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) {
+ i__4 = *ku + 1 + i__ - j + j * ab_dim1;
+ i__2 = i__;
+ i__5 = *ku + 1 + i__ - j + j * ab_dim1;
+ z__1.r = r__[i__2] * ab[i__5].r, z__1.i = r__[i__2] * ab[i__5]
+ .i;
+ ab[i__4].r = z__1.r, ab[i__4].i = z__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ *(unsigned char *)equed = 'R';
+ } else {
+
+/* Row and column scaling */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = c__[j];
+/* Computing MAX */
+ i__3 = 1, i__4 = j - *ku;
+/* Computing MIN */
+ i__5 = *m, i__6 = j + *kl;
+ i__2 = min(i__5,i__6);
+ for (i__ = max(i__3,i__4); i__ <= i__2; ++i__) {
+ i__3 = *ku + 1 + i__ - j + j * ab_dim1;
+ d__1 = cj * r__[i__];
+ i__4 = *ku + 1 + i__ - j + j * ab_dim1;
+ z__1.r = d__1 * ab[i__4].r, z__1.i = d__1 * ab[i__4].i;
+ ab[i__3].r = z__1.r, ab[i__3].i = z__1.i;
+/* L50: */
+ }
+/* L60: */
+ }
+ *(unsigned char *)equed = 'B';
+ }
+
+ return 0;
+
+/* End of ZLAQGB */
+
+} /* zlaqgb_ */
diff --git a/contrib/libs/clapack/zlaqge.c b/contrib/libs/clapack/zlaqge.c
new file mode 100644
index 0000000000..a07ee30ecd
--- /dev/null
+++ b/contrib/libs/clapack/zlaqge.c
@@ -0,0 +1,202 @@
+/* zlaqge.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlaqge_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, doublereal *r__, doublereal *c__, doublereal *rowcnd,
+ doublereal *colcnd, doublereal *amax, char *equed)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal cj, large, small;
+ extern doublereal dlamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAQGE equilibrates a general M by N matrix A using the row and */
+/* column scaling factors in the vectors R and C. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the M by N matrix A. */
+/* On exit, the equilibrated matrix. See EQUED for the form of */
+/* the equilibrated matrix. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(M,1). */
+
+/* R (input) DOUBLE PRECISION array, dimension (M) */
+/* The row scale factors for A. */
+
+/* C (input) DOUBLE PRECISION array, dimension (N) */
+/* The column scale factors for A. */
+
+/* ROWCND (input) DOUBLE PRECISION */
+/* Ratio of the smallest R(i) to the largest R(i). */
+
+/* COLCND (input) DOUBLE PRECISION */
+/* Ratio of the smallest C(i) to the largest C(i). */
+
+/* AMAX (input) DOUBLE PRECISION */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration */
+/* = 'R': Row equilibration, i.e., A has been premultiplied by */
+/* diag(R). */
+/* = 'C': Column equilibration, i.e., A has been postmultiplied */
+/* by diag(C). */
+/* = 'B': Both row and column equilibration, i.e., A has been */
+/* replaced by diag(R) * A * diag(C). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if row or column scaling */
+/* should be done based on the ratio of the row or column scaling */
+/* factors. If ROWCND < THRESH, row scaling is done, and if */
+/* COLCND < THRESH, column scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if row scaling */
+/* should be done based on the absolute size of the largest matrix */
+/* element. If AMAX > LARGE or AMAX < SMALL, row scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --r__;
+ --c__;
+
+ /* Function Body */
+ if (*m <= 0 || *n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = dlamch_("Safe minimum") / dlamch_("Precision");
+ large = 1. / small;
+
+ if (*rowcnd >= .1 && *amax >= small && *amax <= large) {
+
+/* No row scaling */
+
+ if (*colcnd >= .1) {
+
+/* No column scaling */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Column scaling */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = c__[j];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ z__1.r = cj * a[i__4].r, z__1.i = cj * a[i__4].i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L10: */
+ }
+/* L20: */
+ }
+ *(unsigned char *)equed = 'C';
+ }
+ } else if (*colcnd >= .1) {
+
+/* Row scaling, no column scaling */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ z__1.r = r__[i__4] * a[i__5].r, z__1.i = r__[i__4] * a[i__5]
+ .i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ *(unsigned char *)equed = 'R';
+ } else {
+
+/* Row and column scaling */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = c__[j];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ d__1 = cj * r__[i__];
+ i__4 = i__ + j * a_dim1;
+ z__1.r = d__1 * a[i__4].r, z__1.i = d__1 * a[i__4].i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L50: */
+ }
+/* L60: */
+ }
+ *(unsigned char *)equed = 'B';
+ }
+
+ return 0;
+
+/* End of ZLAQGE */
+
+} /* zlaqge_ */
diff --git a/contrib/libs/clapack/zlaqhb.c b/contrib/libs/clapack/zlaqhb.c
new file mode 100644
index 0000000000..6d01748a1c
--- /dev/null
+++ b/contrib/libs/clapack/zlaqhb.c
@@ -0,0 +1,201 @@
+/* zlaqhb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlaqhb_(char *uplo, integer *n, integer *kd,
+ doublecomplex *ab, integer *ldab, doublereal *s, doublereal *scond,
+ doublereal *amax, char *equed)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal cj, large;
+ extern logical lsame_(char *, char *);
+ doublereal small;
+ extern doublereal dlamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAQHB equilibrates a symmetric band matrix A using the scaling */
+/* factors in the vector S. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of super-diagonals of the matrix A if UPLO = 'U', */
+/* or the number of sub-diagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) COMPLEX*16 array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, if INFO = 0, the triangular factor U or L from the */
+/* Cholesky factorization A = U'*U or A = L*L' of the band */
+/* matrix A, in the same storage format as A. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* S (output) DOUBLE PRECISION array, dimension (N) */
+/* The scale factors for A. */
+
+/* SCOND (input) DOUBLE PRECISION */
+/* Ratio of the smallest S(i) to the largest S(i). */
+
+/* AMAX (input) DOUBLE PRECISION */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies whether or not equilibration was done. */
+/* = 'N': No equilibration. */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if scaling should be done */
+/* based on the ratio of the scaling factors. If SCOND < THRESH, */
+/* scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if scaling should */
+/* be done based on the absolute size of the largest matrix element. */
+/* If AMAX > LARGE or AMAX < SMALL, scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --s;
+
+ /* Function Body */
+ if (*n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = dlamch_("Safe minimum") / dlamch_("Precision");
+ large = 1. / small;
+
+ if (*scond >= .1 && *amax >= small && *amax <= large) {
+
+/* No equilibration */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Replace A by diag(S) * A * diag(S). */
+
+ if (lsame_(uplo, "U")) {
+
+/* Upper triangle of A is stored in band format. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *kd;
+ i__4 = j - 1;
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ i__2 = *kd + 1 + i__ - j + j * ab_dim1;
+ d__1 = cj * s[i__];
+ i__3 = *kd + 1 + i__ - j + j * ab_dim1;
+ z__1.r = d__1 * ab[i__3].r, z__1.i = d__1 * ab[i__3].i;
+ ab[i__2].r = z__1.r, ab[i__2].i = z__1.i;
+/* L10: */
+ }
+ i__4 = *kd + 1 + j * ab_dim1;
+ i__2 = *kd + 1 + j * ab_dim1;
+ d__1 = cj * cj * ab[i__2].r;
+ ab[i__4].r = d__1, ab[i__4].i = 0.;
+/* L20: */
+ }
+ } else {
+
+/* Lower triangle of A is stored. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__4 = j * ab_dim1 + 1;
+ i__2 = j * ab_dim1 + 1;
+ d__1 = cj * cj * ab[i__2].r;
+ ab[i__4].r = d__1, ab[i__4].i = 0.;
+/* Computing MIN */
+ i__2 = *n, i__3 = j + *kd;
+ i__4 = min(i__2,i__3);
+ for (i__ = j + 1; i__ <= i__4; ++i__) {
+ i__2 = i__ + 1 - j + j * ab_dim1;
+ d__1 = cj * s[i__];
+ i__3 = i__ + 1 - j + j * ab_dim1;
+ z__1.r = d__1 * ab[i__3].r, z__1.i = d__1 * ab[i__3].i;
+ ab[i__2].r = z__1.r, ab[i__2].i = z__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ *(unsigned char *)equed = 'Y';
+ }
+
+ return 0;
+
+/* End of ZLAQHB */
+
+} /* zlaqhb_ */
diff --git a/contrib/libs/clapack/zlaqhe.c b/contrib/libs/clapack/zlaqhe.c
new file mode 100644
index 0000000000..b77fc0aa91
--- /dev/null
+++ b/contrib/libs/clapack/zlaqhe.c
@@ -0,0 +1,193 @@
+/* zlaqhe.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlaqhe_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, doublereal *s, doublereal *scond, doublereal *amax,
+ char *equed)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal cj, large;
+ extern logical lsame_(char *, char *);
+ doublereal small;
+ extern doublereal dlamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAQHE equilibrates a Hermitian matrix A using the scaling factors */
+/* in the vector S. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* Hermitian matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* n by n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if EQUED = 'Y', the equilibrated matrix: */
+/* diag(S) * A * diag(S). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(N,1). */
+
+/* S (input) DOUBLE PRECISION array, dimension (N) */
+/* The scale factors for A. */
+
+/* SCOND (input) DOUBLE PRECISION */
+/* Ratio of the smallest S(i) to the largest S(i). */
+
+/* AMAX (input) DOUBLE PRECISION */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies whether or not equilibration was done. */
+/* = 'N': No equilibration. */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if scaling should be done */
+/* based on the ratio of the scaling factors. If SCOND < THRESH, */
+/* scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if scaling should */
+/* be done based on the absolute size of the largest matrix element. */
+/* If AMAX > LARGE or AMAX < SMALL, scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+
+ /* Function Body */
+ if (*n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = dlamch_("Safe minimum") / dlamch_("Precision");
+ large = 1. / small;
+
+ if (*scond >= .1 && *amax >= small && *amax <= large) {
+
+/* No equilibration */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Replace A by diag(S) * A * diag(S). */
+
+ if (lsame_(uplo, "U")) {
+
+/* Upper triangle of A is stored. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ d__1 = cj * s[i__];
+ i__4 = i__ + j * a_dim1;
+ z__1.r = d__1 * a[i__4].r, z__1.i = d__1 * a[i__4].i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L10: */
+ }
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ d__1 = cj * cj * a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+/* L20: */
+ }
+ } else {
+
+/* Lower triangle of A is stored. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ d__1 = cj * cj * a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ d__1 = cj * s[i__];
+ i__4 = i__ + j * a_dim1;
+ z__1.r = d__1 * a[i__4].r, z__1.i = d__1 * a[i__4].i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ *(unsigned char *)equed = 'Y';
+ }
+
+ return 0;
+
+/* End of ZLAQHE */
+
+} /* zlaqhe_ */
diff --git a/contrib/libs/clapack/zlaqhp.c b/contrib/libs/clapack/zlaqhp.c
new file mode 100644
index 0000000000..15019ff6b4
--- /dev/null
+++ b/contrib/libs/clapack/zlaqhp.c
@@ -0,0 +1,189 @@
+/* zlaqhp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlaqhp_(char *uplo, integer *n, doublecomplex *ap,
+ doublereal *s, doublereal *scond, doublereal *amax, char *equed)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j, jc;
+ doublereal cj, large;
+ extern logical lsame_(char *, char *);
+ doublereal small;
+ extern doublereal dlamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAQHP equilibrates a Hermitian matrix A using the scaling factors */
+/* in the vector S. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* Hermitian matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, the equilibrated matrix: diag(S) * A * diag(S), in */
+/* the same storage format as A. */
+
+/* S (input) DOUBLE PRECISION array, dimension (N) */
+/* The scale factors for A. */
+
+/* SCOND (input) DOUBLE PRECISION */
+/* Ratio of the smallest S(i) to the largest S(i). */
+
+/* AMAX (input) DOUBLE PRECISION */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies whether or not equilibration was done. */
+/* = 'N': No equilibration. */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if scaling should be done */
+/* based on the ratio of the scaling factors. If SCOND < THRESH, */
+/* scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if scaling should */
+/* be done based on the absolute size of the largest matrix element. */
+/* If AMAX > LARGE or AMAX < SMALL, scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --s;
+ --ap;
+
+ /* Function Body */
+ if (*n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = dlamch_("Safe minimum") / dlamch_("Precision");
+ large = 1. / small;
+
+ if (*scond >= .1 && *amax >= small && *amax <= large) {
+
+/* No equilibration */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Replace A by diag(S) * A * diag(S). */
+
+ if (lsame_(uplo, "U")) {
+
+/* Upper triangle of A is stored. */
+
+ jc = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = jc + i__ - 1;
+ d__1 = cj * s[i__];
+ i__4 = jc + i__ - 1;
+ z__1.r = d__1 * ap[i__4].r, z__1.i = d__1 * ap[i__4].i;
+ ap[i__3].r = z__1.r, ap[i__3].i = z__1.i;
+/* L10: */
+ }
+ i__2 = jc + j - 1;
+ i__3 = jc + j - 1;
+ d__1 = cj * cj * ap[i__3].r;
+ ap[i__2].r = d__1, ap[i__2].i = 0.;
+ jc += j;
+/* L20: */
+ }
+ } else {
+
+/* Lower triangle of A is stored. */
+
+ jc = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__2 = jc;
+ i__3 = jc;
+ d__1 = cj * cj * ap[i__3].r;
+ ap[i__2].r = d__1, ap[i__2].i = 0.;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = jc + i__ - j;
+ d__1 = cj * s[i__];
+ i__4 = jc + i__ - j;
+ z__1.r = d__1 * ap[i__4].r, z__1.i = d__1 * ap[i__4].i;
+ ap[i__3].r = z__1.r, ap[i__3].i = z__1.i;
+/* L30: */
+ }
+ jc = jc + *n - j + 1;
+/* L40: */
+ }
+ }
+ *(unsigned char *)equed = 'Y';
+ }
+
+ return 0;
+
+/* End of ZLAQHP */
+
+} /* zlaqhp_ */
diff --git a/contrib/libs/clapack/zlaqp2.c b/contrib/libs/clapack/zlaqp2.c
new file mode 100644
index 0000000000..8b8567541f
--- /dev/null
+++ b/contrib/libs/clapack/zlaqp2.c
@@ -0,0 +1,242 @@
+/* zlaqp2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zlaqp2_(integer *m, integer *n, integer *offset,
+ doublecomplex *a, integer *lda, integer *jpvt, doublecomplex *tau,
+ doublereal *vn1, doublereal *vn2, doublecomplex *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+ double z_abs(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, mn;
+ doublecomplex aii;
+ integer pvt;
+ doublereal temp, temp2, tol3z;
+ integer offpi, itemp;
+ extern /* Subroutine */ int zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), zswap_(integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ extern doublereal dznrm2_(integer *, doublecomplex *, integer *), dlamch_(
+ char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int zlarfp_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAQP2 computes a QR factorization with column pivoting of */
+/* the block A(OFFSET+1:M,1:N). */
+/* The block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* OFFSET (input) INTEGER */
+/* The number of rows of the matrix A that must be pivoted */
+/* but no factorized. OFFSET >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, the upper triangle of block A(OFFSET+1:M,1:N) is */
+/* the triangular factor obtained; the elements in block */
+/* A(OFFSET+1:M,1:N) below the diagonal, together with the */
+/* array TAU, represent the orthogonal matrix Q as a product of */
+/* elementary reflectors. Block A(1:OFFSET,1:N) has been */
+/* accordingly pivoted, but no factorized. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* JPVT (input/output) INTEGER array, dimension (N) */
+/* On entry, if JPVT(i) .ne. 0, the i-th column of A is permuted */
+/* to the front of A*P (a leading column); if JPVT(i) = 0, */
+/* the i-th column of A is a free column. */
+/* On exit, if JPVT(i) = k, then the i-th column of A*P */
+/* was the k-th column of A. */
+
+/* TAU (output) COMPLEX*16 array, dimension (min(M,N)) */
+/* The scalar factors of the elementary reflectors. */
+
+/* VN1 (input/output) DOUBLE PRECISION array, dimension (N) */
+/* The vector with the partial column norms. */
+
+/* VN2 (input/output) DOUBLE PRECISION array, dimension (N) */
+/* The vector with the exact column norms. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (N) */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* G. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */
+/* X. Sun, Computer Science Dept., Duke University, USA */
+
+/* Partial column norm updating strategy modified by */
+/* Z. Drmac and Z. Bujanovic, Dept. of Mathematics, */
+/* University of Zagreb, Croatia. */
+/* June 2006. */
+/* For more details see LAPACK Working Note 176. */
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --jpvt;
+ --tau;
+ --vn1;
+ --vn2;
+ --work;
+
+ /* Function Body */
+/* Computing MIN */
+ i__1 = *m - *offset;
+ mn = min(i__1,*n);
+ tol3z = sqrt(dlamch_("Epsilon"));
+
+/* Compute factorization. */
+
+ i__1 = mn;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+ offpi = *offset + i__;
+
+/* Determine ith pivot column and swap if necessary. */
+
+ i__2 = *n - i__ + 1;
+ pvt = i__ - 1 + idamax_(&i__2, &vn1[i__], &c__1);
+
+ if (pvt != i__) {
+ zswap_(m, &a[pvt * a_dim1 + 1], &c__1, &a[i__ * a_dim1 + 1], &
+ c__1);
+ itemp = jpvt[pvt];
+ jpvt[pvt] = jpvt[i__];
+ jpvt[i__] = itemp;
+ vn1[pvt] = vn1[i__];
+ vn2[pvt] = vn2[i__];
+ }
+
+/* Generate elementary reflector H(i). */
+
+ if (offpi < *m) {
+ i__2 = *m - offpi + 1;
+ zlarfp_(&i__2, &a[offpi + i__ * a_dim1], &a[offpi + 1 + i__ *
+ a_dim1], &c__1, &tau[i__]);
+ } else {
+ zlarfp_(&c__1, &a[*m + i__ * a_dim1], &a[*m + i__ * a_dim1], &
+ c__1, &tau[i__]);
+ }
+
+ if (i__ < *n) {
+
+/* Apply H(i)' to A(offset+i:m,i+1:n) from the left. */
+
+ i__2 = offpi + i__ * a_dim1;
+ aii.r = a[i__2].r, aii.i = a[i__2].i;
+ i__2 = offpi + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+ i__2 = *m - offpi + 1;
+ i__3 = *n - i__;
+ d_cnjg(&z__1, &tau[i__]);
+ zlarf_("Left", &i__2, &i__3, &a[offpi + i__ * a_dim1], &c__1, &
+ z__1, &a[offpi + (i__ + 1) * a_dim1], lda, &work[1]);
+ i__2 = offpi + i__ * a_dim1;
+ a[i__2].r = aii.r, a[i__2].i = aii.i;
+ }
+
+/* Update partial column norms. */
+
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ if (vn1[j] != 0.) {
+
+/* NOTE: The following 4 lines follow from the analysis in */
+/* Lapack Working Note 176. */
+
+/* Computing 2nd power */
+ d__1 = z_abs(&a[offpi + j * a_dim1]) / vn1[j];
+ temp = 1. - d__1 * d__1;
+ temp = max(temp,0.);
+/* Computing 2nd power */
+ d__1 = vn1[j] / vn2[j];
+ temp2 = temp * (d__1 * d__1);
+ if (temp2 <= tol3z) {
+ if (offpi < *m) {
+ i__3 = *m - offpi;
+ vn1[j] = dznrm2_(&i__3, &a[offpi + 1 + j * a_dim1], &
+ c__1);
+ vn2[j] = vn1[j];
+ } else {
+ vn1[j] = 0.;
+ vn2[j] = 0.;
+ }
+ } else {
+ vn1[j] *= sqrt(temp);
+ }
+ }
+/* L10: */
+ }
+
+/* L20: */
+ }
+
+ return 0;
+
+/* End of ZLAQP2 */
+
+} /* zlaqp2_ */
diff --git a/contrib/libs/clapack/zlaqps.c b/contrib/libs/clapack/zlaqps.c
new file mode 100644
index 0000000000..3fd88c684d
--- /dev/null
+++ b/contrib/libs/clapack/zlaqps.c
@@ -0,0 +1,364 @@
+/* zlaqps.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zlaqps_(integer *m, integer *n, integer *offset, integer
+ *nb, integer *kb, doublecomplex *a, integer *lda, integer *jpvt,
+ doublecomplex *tau, doublereal *vn1, doublereal *vn2, doublecomplex *
+ auxv, doublecomplex *f, integer *ldf)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, f_dim1, f_offset, i__1, i__2, i__3;
+ doublereal d__1, d__2;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+ double z_abs(doublecomplex *);
+ integer i_dnnt(doublereal *);
+
+ /* Local variables */
+ integer j, k, rk;
+ doublecomplex akk;
+ integer pvt;
+ doublereal temp, temp2, tol3z;
+ integer itemp;
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *),
+ zswap_(integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *);
+ extern doublereal dznrm2_(integer *, doublecomplex *, integer *), dlamch_(
+ char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ integer lsticc;
+ extern /* Subroutine */ int zlarfp_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *);
+ integer lastrk;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAQPS computes a step of QR factorization with column pivoting */
+/* of a complex M-by-N matrix A by using Blas-3. It tries to factorize */
+/* NB columns from A starting from the row OFFSET+1, and updates all */
+/* of the matrix with Blas-3 xGEMM. */
+
+/* In some cases, due to catastrophic cancellations, it cannot */
+/* factorize NB columns. Hence, the actual number of factorized */
+/* columns is returned in KB. */
+
+/* Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0 */
+
+/* OFFSET (input) INTEGER */
+/* The number of rows of A that have been factorized in */
+/* previous steps. */
+
+/* NB (input) INTEGER */
+/* The number of columns to factorize. */
+
+/* KB (output) INTEGER */
+/* The number of columns actually factorized. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, block A(OFFSET+1:M,1:KB) is the triangular */
+/* factor obtained and block A(1:OFFSET,1:N) has been */
+/* accordingly pivoted, but no factorized. */
+/* The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has */
+/* been updated. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* JPVT (input/output) INTEGER array, dimension (N) */
+/* JPVT(I) = K <==> Column K of the full matrix A has been */
+/* permuted into position I in AP. */
+
+/* TAU (output) COMPLEX*16 array, dimension (KB) */
+/* The scalar factors of the elementary reflectors. */
+
+/* VN1 (input/output) DOUBLE PRECISION array, dimension (N) */
+/* The vector with the partial column norms. */
+
+/* VN2 (input/output) DOUBLE PRECISION array, dimension (N) */
+/* The vector with the exact column norms. */
+
+/* AUXV (input/output) COMPLEX*16 array, dimension (NB) */
+/* Auxiliar vector. */
+
+/* F (input/output) COMPLEX*16 array, dimension (LDF,NB) */
+/* Matrix F' = L*Y'*A. */
+
+/* LDF (input) INTEGER */
+/* The leading dimension of the array F. LDF >= max(1,N). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* G. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */
+/* X. Sun, Computer Science Dept., Duke University, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --jpvt;
+ --tau;
+ --vn1;
+ --vn2;
+ --auxv;
+ f_dim1 = *ldf;
+ f_offset = 1 + f_dim1;
+ f -= f_offset;
+
+ /* Function Body */
+/* Computing MIN */
+ i__1 = *m, i__2 = *n + *offset;
+ lastrk = min(i__1,i__2);
+ lsticc = 0;
+ k = 0;
+ tol3z = sqrt(dlamch_("Epsilon"));
+
+/* Beginning of while loop. */
+
+L10:
+ if (k < *nb && lsticc == 0) {
+ ++k;
+ rk = *offset + k;
+
+/* Determine ith pivot column and swap if necessary */
+
+ i__1 = *n - k + 1;
+ pvt = k - 1 + idamax_(&i__1, &vn1[k], &c__1);
+ if (pvt != k) {
+ zswap_(m, &a[pvt * a_dim1 + 1], &c__1, &a[k * a_dim1 + 1], &c__1);
+ i__1 = k - 1;
+ zswap_(&i__1, &f[pvt + f_dim1], ldf, &f[k + f_dim1], ldf);
+ itemp = jpvt[pvt];
+ jpvt[pvt] = jpvt[k];
+ jpvt[k] = itemp;
+ vn1[pvt] = vn1[k];
+ vn2[pvt] = vn2[k];
+ }
+
+/* Apply previous Householder reflectors to column K: */
+/* A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)'. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + j * f_dim1;
+ d_cnjg(&z__1, &f[k + j * f_dim1]);
+ f[i__2].r = z__1.r, f[i__2].i = z__1.i;
+/* L20: */
+ }
+ i__1 = *m - rk + 1;
+ i__2 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__1, &i__2, &z__1, &a[rk + a_dim1], lda,
+ &f[k + f_dim1], ldf, &c_b2, &a[rk + k * a_dim1], &c__1);
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = k + j * f_dim1;
+ d_cnjg(&z__1, &f[k + j * f_dim1]);
+ f[i__2].r = z__1.r, f[i__2].i = z__1.i;
+/* L30: */
+ }
+ }
+
+/* Generate elementary reflector H(k). */
+
+ if (rk < *m) {
+ i__1 = *m - rk + 1;
+ zlarfp_(&i__1, &a[rk + k * a_dim1], &a[rk + 1 + k * a_dim1], &
+ c__1, &tau[k]);
+ } else {
+ zlarfp_(&c__1, &a[rk + k * a_dim1], &a[rk + k * a_dim1], &c__1, &
+ tau[k]);
+ }
+
+ i__1 = rk + k * a_dim1;
+ akk.r = a[i__1].r, akk.i = a[i__1].i;
+ i__1 = rk + k * a_dim1;
+ a[i__1].r = 1., a[i__1].i = 0.;
+
+/* Compute Kth column of F: */
+
+/* Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K). */
+
+ if (k < *n) {
+ i__1 = *m - rk + 1;
+ i__2 = *n - k;
+ zgemv_("Conjugate transpose", &i__1, &i__2, &tau[k], &a[rk + (k +
+ 1) * a_dim1], lda, &a[rk + k * a_dim1], &c__1, &c_b1, &f[
+ k + 1 + k * f_dim1], &c__1);
+ }
+
+/* Padding F(1:K,K) with zeros. */
+
+ i__1 = k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + k * f_dim1;
+ f[i__2].r = 0., f[i__2].i = 0.;
+/* L40: */
+ }
+
+/* Incremental updating of F: */
+/* F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)' */
+/* *A(RK:M,K). */
+
+ if (k > 1) {
+ i__1 = *m - rk + 1;
+ i__2 = k - 1;
+ i__3 = k;
+ z__1.r = -tau[i__3].r, z__1.i = -tau[i__3].i;
+ zgemv_("Conjugate transpose", &i__1, &i__2, &z__1, &a[rk + a_dim1]
+, lda, &a[rk + k * a_dim1], &c__1, &c_b1, &auxv[1], &c__1);
+
+ i__1 = k - 1;
+ zgemv_("No transpose", n, &i__1, &c_b2, &f[f_dim1 + 1], ldf, &
+ auxv[1], &c__1, &c_b2, &f[k * f_dim1 + 1], &c__1);
+ }
+
+/* Update the current row of A: */
+/* A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "Conjugate transpose", &c__1, &i__1, &k, &
+ z__1, &a[rk + a_dim1], lda, &f[k + 1 + f_dim1], ldf, &
+ c_b2, &a[rk + (k + 1) * a_dim1], lda);
+ }
+
+/* Update partial column norms. */
+
+ if (rk < lastrk) {
+ i__1 = *n;
+ for (j = k + 1; j <= i__1; ++j) {
+ if (vn1[j] != 0.) {
+
+/* NOTE: The following 4 lines follow from the analysis in */
+/* Lapack Working Note 176. */
+
+ temp = z_abs(&a[rk + j * a_dim1]) / vn1[j];
+/* Computing MAX */
+ d__1 = 0., d__2 = (temp + 1.) * (1. - temp);
+ temp = max(d__1,d__2);
+/* Computing 2nd power */
+ d__1 = vn1[j] / vn2[j];
+ temp2 = temp * (d__1 * d__1);
+ if (temp2 <= tol3z) {
+ vn2[j] = (doublereal) lsticc;
+ lsticc = j;
+ } else {
+ vn1[j] *= sqrt(temp);
+ }
+ }
+/* L50: */
+ }
+ }
+
+ i__1 = rk + k * a_dim1;
+ a[i__1].r = akk.r, a[i__1].i = akk.i;
+
+/* End of while loop. */
+
+ goto L10;
+ }
+ *kb = k;
+ rk = *offset + *kb;
+
+/* Apply the block reflector to the rest of the matrix: */
+/* A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) - */
+/* A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)'. */
+
+/* Computing MIN */
+ i__1 = *n, i__2 = *m - *offset;
+ if (*kb < min(i__1,i__2)) {
+ i__1 = *m - rk;
+ i__2 = *n - *kb;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "Conjugate transpose", &i__1, &i__2, kb, &z__1,
+ &a[rk + 1 + a_dim1], lda, &f[*kb + 1 + f_dim1], ldf, &c_b2, &
+ a[rk + 1 + (*kb + 1) * a_dim1], lda);
+ }
+
+/* Recomputation of difficult columns. */
+
+L60:
+ if (lsticc > 0) {
+ itemp = i_dnnt(&vn2[lsticc]);
+ i__1 = *m - rk;
+ vn1[lsticc] = dznrm2_(&i__1, &a[rk + 1 + lsticc * a_dim1], &c__1);
+
+/* NOTE: The computation of VN1( LSTICC ) relies on the fact that */
+/* SNRM2 does not fail on vectors with norm below the value of */
+/* SQRT(DLAMCH('S')) */
+
+ vn2[lsticc] = vn1[lsticc];
+ lsticc = itemp;
+ goto L60;
+ }
+
+ return 0;
+
+/* End of ZLAQPS */
+
+} /* zlaqps_ */
diff --git a/contrib/libs/clapack/zlaqr0.c b/contrib/libs/clapack/zlaqr0.c
new file mode 100644
index 0000000000..7b76f0e596
--- /dev/null
+++ b/contrib/libs/clapack/zlaqr0.c
@@ -0,0 +1,787 @@
+/* zlaqr0.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__13 = 13;
+static integer c__15 = 15;
+static integer c_n1 = -1;
+static integer c__12 = 12;
+static integer c__14 = 14;
+static integer c__16 = 16;
+static logical c_false = FALSE_;
+static integer c__1 = 1;
+static integer c__3 = 3;
+
+/* Subroutine */ int zlaqr0_(logical *wantt, logical *wantz, integer *n,
+ integer *ilo, integer *ihi, doublecomplex *h__, integer *ldh,
+ doublecomplex *w, integer *iloz, integer *ihiz, doublecomplex *z__,
+ integer *ldz, doublecomplex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4, d__5, d__6, d__7, d__8;
+ doublecomplex z__1, z__2, z__3, z__4, z__5;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+ void z_sqrt(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, k;
+ doublereal s;
+ doublecomplex aa, bb, cc, dd;
+ integer ld, nh, it, ks, kt, ku, kv, ls, ns, nw;
+ doublecomplex tr2, det;
+ integer inf, kdu, nho, nve, kwh, nsr, nwr, kwv, ndec, ndfl, kbot, nmin;
+ doublecomplex swap;
+ integer ktop;
+ doublecomplex zdum[1] /* was [1][1] */;
+ integer kacc22, itmax, nsmax, nwmax, kwtop;
+ extern /* Subroutine */ int zlaqr3_(logical *, logical *, integer *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ integer *, integer *, doublecomplex *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *, integer *,
+ doublecomplex *, integer *, integer *, doublecomplex *, integer *
+, doublecomplex *, integer *), zlaqr4_(logical *, logical *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, integer *), zlaqr5_(logical *,
+ logical *, integer *, integer *, integer *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, integer *, doublecomplex *, integer *,
+ integer *, doublecomplex *, integer *);
+ integer nibble;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ char jbcmpz[1];
+ doublecomplex rtdisc;
+ integer nwupbd;
+ logical sorted;
+ extern /* Subroutine */ int zlahqr_(logical *, logical *, integer *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *, integer *, doublecomplex *, integer *, integer *),
+ zlacpy_(char *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ integer lwkopt;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAQR0 computes the eigenvalues of a Hessenberg matrix H */
+/* and, optionally, the matrices T and Z from the Schur decomposition */
+/* H = Z T Z**H, where T is an upper triangular matrix (the */
+/* Schur form), and Z is the unitary matrix of Schur vectors. */
+
+/* Optionally Z may be postmultiplied into an input unitary */
+/* matrix Q so that this routine can give the Schur factorization */
+/* of a matrix A which has been reduced to the Hessenberg form H */
+/* by the unitary matrix Q: A = Q*H*Q**H = (QZ)*H*(QZ)**H. */
+
+/* Arguments */
+/* ========= */
+
+/* WANTT (input) LOGICAL */
+/* = .TRUE. : the full Schur form T is required; */
+/* = .FALSE.: only eigenvalues are required. */
+
+/* WANTZ (input) LOGICAL */
+/* = .TRUE. : the matrix of Schur vectors Z is required; */
+/* = .FALSE.: Schur vectors are not required. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H. N .GE. 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* It is assumed that H is already upper triangular in rows */
+/* and columns 1:ILO-1 and IHI+1:N and, if ILO.GT.1, */
+/* H(ILO,ILO-1) is zero. ILO and IHI are normally set by a */
+/* previous call to ZGEBAL, and then passed to ZGEHRD when the */
+/* matrix output by ZGEBAL is reduced to Hessenberg form. */
+/* Otherwise, ILO and IHI should be set to 1 and N, */
+/* respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. */
+/* If N = 0, then ILO = 1 and IHI = 0. */
+
+/* H (input/output) COMPLEX*16 array, dimension (LDH,N) */
+/* On entry, the upper Hessenberg matrix H. */
+/* On exit, if INFO = 0 and WANTT is .TRUE., then H */
+/* contains the upper triangular matrix T from the Schur */
+/* decomposition (the Schur form). If INFO = 0 and WANT is */
+/* .FALSE., then the contents of H are unspecified on exit. */
+/* (The output value of H when INFO.GT.0 is given under the */
+/* description of INFO below.) */
+
+/* This subroutine may explicitly set H(i,j) = 0 for i.GT.j and */
+/* j = 1, 2, ... ILO-1 or j = IHI+1, IHI+2, ... N. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH .GE. max(1,N). */
+
+/* W (output) COMPLEX*16 array, dimension (N) */
+/* The computed eigenvalues of H(ILO:IHI,ILO:IHI) are stored */
+/* in W(ILO:IHI). If WANTT is .TRUE., then the eigenvalues are */
+/* stored in the same order as on the diagonal of the Schur */
+/* form returned in H, with W(i) = H(i,i). */
+
+/* Z (input/output) COMPLEX*16 array, dimension (LDZ,IHI) */
+/* If WANTZ is .FALSE., then Z is not referenced. */
+/* If WANTZ is .TRUE., then Z(ILO:IHI,ILOZ:IHIZ) is */
+/* replaced by Z(ILO:IHI,ILOZ:IHIZ)*U where U is the */
+/* orthogonal Schur factor of H(ILO:IHI,ILO:IHI). */
+/* (The output value of Z when INFO.GT.0 is given under */
+/* the description of INFO below.) */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. if WANTZ is .TRUE. */
+/* then LDZ.GE.MAX(1,IHIZ). Otherwize, LDZ.GE.1. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension LWORK */
+/* On exit, if LWORK = -1, WORK(1) returns an estimate of */
+/* the optimal value for LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK .GE. max(1,N) */
+/* is sufficient, but LWORK typically as large as 6*N may */
+/* be required for optimal performance. A workspace query */
+/* to determine the optimal workspace size is recommended. */
+
+/* If LWORK = -1, then ZLAQR0 does a workspace query. */
+/* In this case, ZLAQR0 checks the input parameters and */
+/* estimates the optimal workspace size for the given */
+/* values of N, ILO and IHI. The estimate is returned */
+/* in WORK(1). No error message related to LWORK is */
+/* issued by XERBLA. Neither H nor Z are accessed. */
+
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* .GT. 0: if INFO = i, ZLAQR0 failed to compute all of */
+/* the eigenvalues. Elements 1:ilo-1 and i+1:n of WR */
+/* and WI contain those eigenvalues which have been */
+/* successfully computed. (Failures are rare.) */
+
+/* If INFO .GT. 0 and WANT is .FALSE., then on exit, */
+/* the remaining unconverged eigenvalues are the eigen- */
+/* values of the upper Hessenberg matrix rows and */
+/* columns ILO through INFO of the final, output */
+/* value of H. */
+
+/* If INFO .GT. 0 and WANTT is .TRUE., then on exit */
+
+/* (*) (initial value of H)*U = U*(final value of H) */
+
+/* where U is a unitary matrix. The final */
+/* value of H is upper Hessenberg and triangular in */
+/* rows and columns INFO+1 through IHI. */
+
+/* If INFO .GT. 0 and WANTZ is .TRUE., then on exit */
+
+/* (final value of Z(ILO:IHI,ILOZ:IHIZ) */
+/* = (initial value of Z(ILO:IHI,ILOZ:IHIZ)*U */
+
+/* where U is the unitary matrix in (*) (regard- */
+/* less of the value of WANTT.) */
+
+/* If INFO .GT. 0 and WANTZ is .FALSE., then Z is not */
+/* accessed. */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+/* References: */
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part I: Maintaining Well Focused Shifts, and Level 3 */
+/* Performance, SIAM Journal of Matrix Analysis, volume 23, pages */
+/* 929--947, 2002. */
+
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part II: Aggressive Early Deflation, SIAM Journal */
+/* of Matrix Analysis, volume 23, pages 948--973, 2002. */
+
+/* ================================================================ */
+/* .. Parameters .. */
+
+/* ==== Matrices of order NTINY or smaller must be processed by */
+/* . ZLAHQR because of insufficient subdiagonal scratch space. */
+/* . (This is a hard limit.) ==== */
+
+/* ==== Exceptional deflation windows: try to cure rare */
+/* . slow convergence by varying the size of the */
+/* . deflation window after KEXNW iterations. ==== */
+
+/* ==== Exceptional shifts: try to cure rare slow convergence */
+/* . with ad-hoc exceptional shifts every KEXSH iterations. */
+/* . ==== */
+
+/* ==== The constant WILK1 is used to form the exceptional */
+/* . shifts. ==== */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+/* ==== Quick return for N = 0: nothing to do. ==== */
+
+ if (*n == 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ if (*n <= 11) {
+
+/* ==== Tiny matrices must use ZLAHQR. ==== */
+
+ lwkopt = 1;
+ if (*lwork != -1) {
+ zlahqr_(wantt, wantz, n, ilo, ihi, &h__[h_offset], ldh, &w[1],
+ iloz, ihiz, &z__[z_offset], ldz, info);
+ }
+ } else {
+
+/* ==== Use small bulge multi-shift QR with aggressive early */
+/* . deflation on larger-than-tiny matrices. ==== */
+
+/* ==== Hope for the best. ==== */
+
+ *info = 0;
+
+/* ==== Set up job flags for ILAENV. ==== */
+
+ if (*wantt) {
+ *(unsigned char *)jbcmpz = 'S';
+ } else {
+ *(unsigned char *)jbcmpz = 'E';
+ }
+ if (*wantz) {
+ *(unsigned char *)&jbcmpz[1] = 'V';
+ } else {
+ *(unsigned char *)&jbcmpz[1] = 'N';
+ }
+
+/* ==== NWR = recommended deflation window size. At this */
+/* . point, N .GT. NTINY = 11, so there is enough */
+/* . subdiagonal workspace for NWR.GE.2 as required. */
+/* . (In fact, there is enough subdiagonal space for */
+/* . NWR.GE.3.) ==== */
+
+ nwr = ilaenv_(&c__13, "ZLAQR0", jbcmpz, n, ilo, ihi, lwork);
+ nwr = max(2,nwr);
+/* Computing MIN */
+ i__1 = *ihi - *ilo + 1, i__2 = (*n - 1) / 3, i__1 = min(i__1,i__2);
+ nwr = min(i__1,nwr);
+
+/* ==== NSR = recommended number of simultaneous shifts. */
+/* . At this point N .GT. NTINY = 11, so there is at */
+/* . enough subdiagonal workspace for NSR to be even */
+/* . and greater than or equal to two as required. ==== */
+
+ nsr = ilaenv_(&c__15, "ZLAQR0", jbcmpz, n, ilo, ihi, lwork);
+/* Computing MIN */
+ i__1 = nsr, i__2 = (*n + 6) / 9, i__1 = min(i__1,i__2), i__2 = *ihi -
+ *ilo;
+ nsr = min(i__1,i__2);
+/* Computing MAX */
+ i__1 = 2, i__2 = nsr - nsr % 2;
+ nsr = max(i__1,i__2);
+
+/* ==== Estimate optimal workspace ==== */
+
+/* ==== Workspace query call to ZLAQR3 ==== */
+
+ i__1 = nwr + 1;
+ zlaqr3_(wantt, wantz, n, ilo, ihi, &i__1, &h__[h_offset], ldh, iloz,
+ ihiz, &z__[z_offset], ldz, &ls, &ld, &w[1], &h__[h_offset],
+ ldh, n, &h__[h_offset], ldh, n, &h__[h_offset], ldh, &work[1],
+ &c_n1);
+
+/* ==== Optimal workspace = MAX(ZLAQR5, ZLAQR3) ==== */
+
+/* Computing MAX */
+ i__1 = nsr * 3 / 2, i__2 = (integer) work[1].r;
+ lwkopt = max(i__1,i__2);
+
+/* ==== Quick return in case of workspace query. ==== */
+
+ if (*lwork == -1) {
+ d__1 = (doublereal) lwkopt;
+ z__1.r = d__1, z__1.i = 0.;
+ work[1].r = z__1.r, work[1].i = z__1.i;
+ return 0;
+ }
+
+/* ==== ZLAHQR/ZLAQR0 crossover point ==== */
+
+ nmin = ilaenv_(&c__12, "ZLAQR0", jbcmpz, n, ilo, ihi, lwork);
+ nmin = max(11,nmin);
+
+/* ==== Nibble crossover point ==== */
+
+ nibble = ilaenv_(&c__14, "ZLAQR0", jbcmpz, n, ilo, ihi, lwork);
+ nibble = max(0,nibble);
+
+/* ==== Accumulate reflections during ttswp? Use block */
+/* . 2-by-2 structure during matrix-matrix multiply? ==== */
+
+ kacc22 = ilaenv_(&c__16, "ZLAQR0", jbcmpz, n, ilo, ihi, lwork);
+ kacc22 = max(0,kacc22);
+ kacc22 = min(2,kacc22);
+
+/* ==== NWMAX = the largest possible deflation window for */
+/* . which there is sufficient workspace. ==== */
+
+/* Computing MIN */
+ i__1 = (*n - 1) / 3, i__2 = *lwork / 2;
+ nwmax = min(i__1,i__2);
+ nw = nwmax;
+
+/* ==== NSMAX = the Largest number of simultaneous shifts */
+/* . for which there is sufficient workspace. ==== */
+
+/* Computing MIN */
+ i__1 = (*n + 6) / 9, i__2 = (*lwork << 1) / 3;
+ nsmax = min(i__1,i__2);
+ nsmax -= nsmax % 2;
+
+/* ==== NDFL: an iteration count restarted at deflation. ==== */
+
+ ndfl = 1;
+
+/* ==== ITMAX = iteration limit ==== */
+
+/* Computing MAX */
+ i__1 = 10, i__2 = *ihi - *ilo + 1;
+ itmax = max(i__1,i__2) * 30;
+
+/* ==== Last row and column in the active block ==== */
+
+ kbot = *ihi;
+
+/* ==== Main Loop ==== */
+
+ i__1 = itmax;
+ for (it = 1; it <= i__1; ++it) {
+
+/* ==== Done when KBOT falls below ILO ==== */
+
+ if (kbot < *ilo) {
+ goto L80;
+ }
+
+/* ==== Locate active block ==== */
+
+ i__2 = *ilo + 1;
+ for (k = kbot; k >= i__2; --k) {
+ i__3 = k + (k - 1) * h_dim1;
+ if (h__[i__3].r == 0. && h__[i__3].i == 0.) {
+ goto L20;
+ }
+/* L10: */
+ }
+ k = *ilo;
+L20:
+ ktop = k;
+
+/* ==== Select deflation window size: */
+/* . Typical Case: */
+/* . If possible and advisable, nibble the entire */
+/* . active block. If not, use size MIN(NWR,NWMAX) */
+/* . or MIN(NWR+1,NWMAX) depending upon which has */
+/* . the smaller corresponding subdiagonal entry */
+/* . (a heuristic). */
+/* . */
+/* . Exceptional Case: */
+/* . If there have been no deflations in KEXNW or */
+/* . more iterations, then vary the deflation window */
+/* . size. At first, because, larger windows are, */
+/* . in general, more powerful than smaller ones, */
+/* . rapidly increase the window to the maximum possible. */
+/* . Then, gradually reduce the window size. ==== */
+
+ nh = kbot - ktop + 1;
+ nwupbd = min(nh,nwmax);
+ if (ndfl < 5) {
+ nw = min(nwupbd,nwr);
+ } else {
+/* Computing MIN */
+ i__2 = nwupbd, i__3 = nw << 1;
+ nw = min(i__2,i__3);
+ }
+ if (nw < nwmax) {
+ if (nw >= nh - 1) {
+ nw = nh;
+ } else {
+ kwtop = kbot - nw + 1;
+ i__2 = kwtop + (kwtop - 1) * h_dim1;
+ i__3 = kwtop - 1 + (kwtop - 2) * h_dim1;
+ if ((d__1 = h__[i__2].r, abs(d__1)) + (d__2 = d_imag(&h__[
+ kwtop + (kwtop - 1) * h_dim1]), abs(d__2)) > (
+ d__3 = h__[i__3].r, abs(d__3)) + (d__4 = d_imag(&
+ h__[kwtop - 1 + (kwtop - 2) * h_dim1]), abs(d__4))
+ ) {
+ ++nw;
+ }
+ }
+ }
+ if (ndfl < 5) {
+ ndec = -1;
+ } else if (ndec >= 0 || nw >= nwupbd) {
+ ++ndec;
+ if (nw - ndec < 2) {
+ ndec = 0;
+ }
+ nw -= ndec;
+ }
+
+/* ==== Aggressive early deflation: */
+/* . split workspace under the subdiagonal into */
+/* . - an nw-by-nw work array V in the lower */
+/* . left-hand-corner, */
+/* . - an NW-by-at-least-NW-but-more-is-better */
+/* . (NW-by-NHO) horizontal work array along */
+/* . the bottom edge, */
+/* . - an at-least-NW-but-more-is-better (NHV-by-NW) */
+/* . vertical work array along the left-hand-edge. */
+/* . ==== */
+
+ kv = *n - nw + 1;
+ kt = nw + 1;
+ nho = *n - nw - 1 - kt + 1;
+ kwv = nw + 2;
+ nve = *n - nw - kwv + 1;
+
+/* ==== Aggressive early deflation ==== */
+
+ zlaqr3_(wantt, wantz, n, &ktop, &kbot, &nw, &h__[h_offset], ldh,
+ iloz, ihiz, &z__[z_offset], ldz, &ls, &ld, &w[1], &h__[kv
+ + h_dim1], ldh, &nho, &h__[kv + kt * h_dim1], ldh, &nve, &
+ h__[kwv + h_dim1], ldh, &work[1], lwork);
+
+/* ==== Adjust KBOT accounting for new deflations. ==== */
+
+ kbot -= ld;
+
+/* ==== KS points to the shifts. ==== */
+
+ ks = kbot - ls + 1;
+
+/* ==== Skip an expensive QR sweep if there is a (partly */
+/* . heuristic) reason to expect that many eigenvalues */
+/* . will deflate without it. Here, the QR sweep is */
+/* . skipped if many eigenvalues have just been deflated */
+/* . or if the remaining active block is small. */
+
+ if (ld == 0 || ld * 100 <= nw * nibble && kbot - ktop + 1 > min(
+ nmin,nwmax)) {
+
+/* ==== NS = nominal number of simultaneous shifts. */
+/* . This may be lowered (slightly) if ZLAQR3 */
+/* . did not provide that many shifts. ==== */
+
+/* Computing MIN */
+/* Computing MAX */
+ i__4 = 2, i__5 = kbot - ktop;
+ i__2 = min(nsmax,nsr), i__3 = max(i__4,i__5);
+ ns = min(i__2,i__3);
+ ns -= ns % 2;
+
+/* ==== If there have been no deflations */
+/* . in a multiple of KEXSH iterations, */
+/* . then try exceptional shifts. */
+/* . Otherwise use shifts provided by */
+/* . ZLAQR3 above or from the eigenvalues */
+/* . of a trailing principal submatrix. ==== */
+
+ if (ndfl % 6 == 0) {
+ ks = kbot - ns + 1;
+ i__2 = ks + 1;
+ for (i__ = kbot; i__ >= i__2; i__ += -2) {
+ i__3 = i__;
+ i__4 = i__ + i__ * h_dim1;
+ i__5 = i__ + (i__ - 1) * h_dim1;
+ d__3 = ((d__1 = h__[i__5].r, abs(d__1)) + (d__2 =
+ d_imag(&h__[i__ + (i__ - 1) * h_dim1]), abs(
+ d__2))) * .75;
+ z__1.r = h__[i__4].r + d__3, z__1.i = h__[i__4].i;
+ w[i__3].r = z__1.r, w[i__3].i = z__1.i;
+ i__3 = i__ - 1;
+ i__4 = i__;
+ w[i__3].r = w[i__4].r, w[i__3].i = w[i__4].i;
+/* L30: */
+ }
+ } else {
+
+/* ==== Got NS/2 or fewer shifts? Use ZLAQR4 or */
+/* . ZLAHQR on a trailing principal submatrix to */
+/* . get more. (Since NS.LE.NSMAX.LE.(N+6)/9, */
+/* . there is enough space below the subdiagonal */
+/* . to fit an NS-by-NS scratch array.) ==== */
+
+ if (kbot - ks + 1 <= ns / 2) {
+ ks = kbot - ns + 1;
+ kt = *n - ns + 1;
+ zlacpy_("A", &ns, &ns, &h__[ks + ks * h_dim1], ldh, &
+ h__[kt + h_dim1], ldh);
+ if (ns > nmin) {
+ zlaqr4_(&c_false, &c_false, &ns, &c__1, &ns, &h__[
+ kt + h_dim1], ldh, &w[ks], &c__1, &c__1,
+ zdum, &c__1, &work[1], lwork, &inf);
+ } else {
+ zlahqr_(&c_false, &c_false, &ns, &c__1, &ns, &h__[
+ kt + h_dim1], ldh, &w[ks], &c__1, &c__1,
+ zdum, &c__1, &inf);
+ }
+ ks += inf;
+
+/* ==== In case of a rare QR failure use */
+/* . eigenvalues of the trailing 2-by-2 */
+/* . principal submatrix. Scale to avoid */
+/* . overflows, underflows and subnormals. */
+/* . (The scale factor S can not be zero, */
+/* . because H(KBOT,KBOT-1) is nonzero.) ==== */
+
+ if (ks >= kbot) {
+ i__2 = kbot - 1 + (kbot - 1) * h_dim1;
+ i__3 = kbot + (kbot - 1) * h_dim1;
+ i__4 = kbot - 1 + kbot * h_dim1;
+ i__5 = kbot + kbot * h_dim1;
+ s = (d__1 = h__[i__2].r, abs(d__1)) + (d__2 =
+ d_imag(&h__[kbot - 1 + (kbot - 1) *
+ h_dim1]), abs(d__2)) + ((d__3 = h__[i__3]
+ .r, abs(d__3)) + (d__4 = d_imag(&h__[kbot
+ + (kbot - 1) * h_dim1]), abs(d__4))) + ((
+ d__5 = h__[i__4].r, abs(d__5)) + (d__6 =
+ d_imag(&h__[kbot - 1 + kbot * h_dim1]),
+ abs(d__6))) + ((d__7 = h__[i__5].r, abs(
+ d__7)) + (d__8 = d_imag(&h__[kbot + kbot *
+ h_dim1]), abs(d__8)));
+ i__2 = kbot - 1 + (kbot - 1) * h_dim1;
+ z__1.r = h__[i__2].r / s, z__1.i = h__[i__2].i /
+ s;
+ aa.r = z__1.r, aa.i = z__1.i;
+ i__2 = kbot + (kbot - 1) * h_dim1;
+ z__1.r = h__[i__2].r / s, z__1.i = h__[i__2].i /
+ s;
+ cc.r = z__1.r, cc.i = z__1.i;
+ i__2 = kbot - 1 + kbot * h_dim1;
+ z__1.r = h__[i__2].r / s, z__1.i = h__[i__2].i /
+ s;
+ bb.r = z__1.r, bb.i = z__1.i;
+ i__2 = kbot + kbot * h_dim1;
+ z__1.r = h__[i__2].r / s, z__1.i = h__[i__2].i /
+ s;
+ dd.r = z__1.r, dd.i = z__1.i;
+ z__2.r = aa.r + dd.r, z__2.i = aa.i + dd.i;
+ z__1.r = z__2.r / 2., z__1.i = z__2.i / 2.;
+ tr2.r = z__1.r, tr2.i = z__1.i;
+ z__3.r = aa.r - tr2.r, z__3.i = aa.i - tr2.i;
+ z__4.r = dd.r - tr2.r, z__4.i = dd.i - tr2.i;
+ z__2.r = z__3.r * z__4.r - z__3.i * z__4.i,
+ z__2.i = z__3.r * z__4.i + z__3.i *
+ z__4.r;
+ z__5.r = bb.r * cc.r - bb.i * cc.i, z__5.i = bb.r
+ * cc.i + bb.i * cc.r;
+ z__1.r = z__2.r - z__5.r, z__1.i = z__2.i -
+ z__5.i;
+ det.r = z__1.r, det.i = z__1.i;
+ z__2.r = -det.r, z__2.i = -det.i;
+ z_sqrt(&z__1, &z__2);
+ rtdisc.r = z__1.r, rtdisc.i = z__1.i;
+ i__2 = kbot - 1;
+ z__2.r = tr2.r + rtdisc.r, z__2.i = tr2.i +
+ rtdisc.i;
+ z__1.r = s * z__2.r, z__1.i = s * z__2.i;
+ w[i__2].r = z__1.r, w[i__2].i = z__1.i;
+ i__2 = kbot;
+ z__2.r = tr2.r - rtdisc.r, z__2.i = tr2.i -
+ rtdisc.i;
+ z__1.r = s * z__2.r, z__1.i = s * z__2.i;
+ w[i__2].r = z__1.r, w[i__2].i = z__1.i;
+
+ ks = kbot - 1;
+ }
+ }
+
+ if (kbot - ks + 1 > ns) {
+
+/* ==== Sort the shifts (Helps a little) ==== */
+
+ sorted = FALSE_;
+ i__2 = ks + 1;
+ for (k = kbot; k >= i__2; --k) {
+ if (sorted) {
+ goto L60;
+ }
+ sorted = TRUE_;
+ i__3 = k - 1;
+ for (i__ = ks; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ i__5 = i__ + 1;
+ if ((d__1 = w[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&w[i__]), abs(d__2)) < (d__3 =
+ w[i__5].r, abs(d__3)) + (d__4 =
+ d_imag(&w[i__ + 1]), abs(d__4))) {
+ sorted = FALSE_;
+ i__4 = i__;
+ swap.r = w[i__4].r, swap.i = w[i__4].i;
+ i__4 = i__;
+ i__5 = i__ + 1;
+ w[i__4].r = w[i__5].r, w[i__4].i = w[i__5]
+ .i;
+ i__4 = i__ + 1;
+ w[i__4].r = swap.r, w[i__4].i = swap.i;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+L60:
+ ;
+ }
+ }
+
+/* ==== If there are only two shifts, then use */
+/* . only one. ==== */
+
+ if (kbot - ks + 1 == 2) {
+ i__2 = kbot;
+ i__3 = kbot + kbot * h_dim1;
+ z__2.r = w[i__2].r - h__[i__3].r, z__2.i = w[i__2].i -
+ h__[i__3].i;
+ z__1.r = z__2.r, z__1.i = z__2.i;
+ i__4 = kbot - 1;
+ i__5 = kbot + kbot * h_dim1;
+ z__4.r = w[i__4].r - h__[i__5].r, z__4.i = w[i__4].i -
+ h__[i__5].i;
+ z__3.r = z__4.r, z__3.i = z__4.i;
+ if ((d__1 = z__1.r, abs(d__1)) + (d__2 = d_imag(&z__1),
+ abs(d__2)) < (d__3 = z__3.r, abs(d__3)) + (d__4 =
+ d_imag(&z__3), abs(d__4))) {
+ i__2 = kbot - 1;
+ i__3 = kbot;
+ w[i__2].r = w[i__3].r, w[i__2].i = w[i__3].i;
+ } else {
+ i__2 = kbot;
+ i__3 = kbot - 1;
+ w[i__2].r = w[i__3].r, w[i__2].i = w[i__3].i;
+ }
+ }
+
+/* ==== Use up to NS of the the smallest magnatiude */
+/* . shifts. If there aren't NS shifts available, */
+/* . then use them all, possibly dropping one to */
+/* . make the number of shifts even. ==== */
+
+/* Computing MIN */
+ i__2 = ns, i__3 = kbot - ks + 1;
+ ns = min(i__2,i__3);
+ ns -= ns % 2;
+ ks = kbot - ns + 1;
+
+/* ==== Small-bulge multi-shift QR sweep: */
+/* . split workspace under the subdiagonal into */
+/* . - a KDU-by-KDU work array U in the lower */
+/* . left-hand-corner, */
+/* . - a KDU-by-at-least-KDU-but-more-is-better */
+/* . (KDU-by-NHo) horizontal work array WH along */
+/* . the bottom edge, */
+/* . - and an at-least-KDU-but-more-is-better-by-KDU */
+/* . (NVE-by-KDU) vertical work WV arrow along */
+/* . the left-hand-edge. ==== */
+
+ kdu = ns * 3 - 3;
+ ku = *n - kdu + 1;
+ kwh = kdu + 1;
+ nho = *n - kdu - 3 - (kdu + 1) + 1;
+ kwv = kdu + 4;
+ nve = *n - kdu - kwv + 1;
+
+/* ==== Small-bulge multi-shift QR sweep ==== */
+
+ zlaqr5_(wantt, wantz, &kacc22, n, &ktop, &kbot, &ns, &w[ks], &
+ h__[h_offset], ldh, iloz, ihiz, &z__[z_offset], ldz, &
+ work[1], &c__3, &h__[ku + h_dim1], ldh, &nve, &h__[
+ kwv + h_dim1], ldh, &nho, &h__[ku + kwh * h_dim1],
+ ldh);
+ }
+
+/* ==== Note progress (or the lack of it). ==== */
+
+ if (ld > 0) {
+ ndfl = 1;
+ } else {
+ ++ndfl;
+ }
+
+/* ==== End of main loop ==== */
+/* L70: */
+ }
+
+/* ==== Iteration limit exceeded. Set INFO to show where */
+/* . the problem occurred and exit. ==== */
+
+ *info = kbot;
+L80:
+ ;
+ }
+
+/* ==== Return the optimal value of LWORK. ==== */
+
+ d__1 = (doublereal) lwkopt;
+ z__1.r = d__1, z__1.i = 0.;
+ work[1].r = z__1.r, work[1].i = z__1.i;
+
+/* ==== End of ZLAQR0 ==== */
+
+ return 0;
+} /* zlaqr0_ */
diff --git a/contrib/libs/clapack/zlaqr1.c b/contrib/libs/clapack/zlaqr1.c
new file mode 100644
index 0000000000..fc0bb5e6e1
--- /dev/null
+++ b/contrib/libs/clapack/zlaqr1.c
@@ -0,0 +1,197 @@
+/* zlaqr1.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlaqr1_(integer *n, doublecomplex *h__, integer *ldh,
+ doublecomplex *s1, doublecomplex *s2, doublecomplex *v)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1, d__2, d__3, d__4, d__5, d__6;
+ doublecomplex z__1, z__2, z__3, z__4, z__5, z__6, z__7, z__8;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ doublereal s;
+ doublecomplex h21s, h31s;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Given a 2-by-2 or 3-by-3 matrix H, ZLAQR1 sets v to a */
+/* scalar multiple of the first column of the product */
+
+/* (*) K = (H - s1*I)*(H - s2*I) */
+
+/* scaling to avoid overflows and most underflows. */
+
+/* This is useful for starting double implicit shift bulges */
+/* in the QR algorithm. */
+
+
+/* N (input) integer */
+/* Order of the matrix H. N must be either 2 or 3. */
+
+/* H (input) COMPLEX*16 array of dimension (LDH,N) */
+/* The 2-by-2 or 3-by-3 matrix H in (*). */
+
+/* LDH (input) integer */
+/* The leading dimension of H as declared in */
+/* the calling procedure. LDH.GE.N */
+
+/* S1 (input) COMPLEX*16 */
+/* S2 S1 and S2 are the shifts defining K in (*) above. */
+
+/* V (output) COMPLEX*16 array of dimension N */
+/* A scalar multiple of the first column of the */
+/* matrix K in (*). */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --v;
+
+ /* Function Body */
+ if (*n == 2) {
+ i__1 = h_dim1 + 1;
+ z__2.r = h__[i__1].r - s2->r, z__2.i = h__[i__1].i - s2->i;
+ z__1.r = z__2.r, z__1.i = z__2.i;
+ i__2 = h_dim1 + 2;
+ s = (d__1 = z__1.r, abs(d__1)) + (d__2 = d_imag(&z__1), abs(d__2)) + (
+ (d__3 = h__[i__2].r, abs(d__3)) + (d__4 = d_imag(&h__[h_dim1
+ + 2]), abs(d__4)));
+ if (s == 0.) {
+ v[1].r = 0., v[1].i = 0.;
+ v[2].r = 0., v[2].i = 0.;
+ } else {
+ i__1 = h_dim1 + 2;
+ z__1.r = h__[i__1].r / s, z__1.i = h__[i__1].i / s;
+ h21s.r = z__1.r, h21s.i = z__1.i;
+ i__1 = (h_dim1 << 1) + 1;
+ z__2.r = h21s.r * h__[i__1].r - h21s.i * h__[i__1].i, z__2.i =
+ h21s.r * h__[i__1].i + h21s.i * h__[i__1].r;
+ i__2 = h_dim1 + 1;
+ z__4.r = h__[i__2].r - s1->r, z__4.i = h__[i__2].i - s1->i;
+ i__3 = h_dim1 + 1;
+ z__6.r = h__[i__3].r - s2->r, z__6.i = h__[i__3].i - s2->i;
+ z__5.r = z__6.r / s, z__5.i = z__6.i / s;
+ z__3.r = z__4.r * z__5.r - z__4.i * z__5.i, z__3.i = z__4.r *
+ z__5.i + z__4.i * z__5.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ v[1].r = z__1.r, v[1].i = z__1.i;
+ i__1 = h_dim1 + 1;
+ i__2 = (h_dim1 << 1) + 2;
+ z__4.r = h__[i__1].r + h__[i__2].r, z__4.i = h__[i__1].i + h__[
+ i__2].i;
+ z__3.r = z__4.r - s1->r, z__3.i = z__4.i - s1->i;
+ z__2.r = z__3.r - s2->r, z__2.i = z__3.i - s2->i;
+ z__1.r = h21s.r * z__2.r - h21s.i * z__2.i, z__1.i = h21s.r *
+ z__2.i + h21s.i * z__2.r;
+ v[2].r = z__1.r, v[2].i = z__1.i;
+ }
+ } else {
+ i__1 = h_dim1 + 1;
+ z__2.r = h__[i__1].r - s2->r, z__2.i = h__[i__1].i - s2->i;
+ z__1.r = z__2.r, z__1.i = z__2.i;
+ i__2 = h_dim1 + 2;
+ i__3 = h_dim1 + 3;
+ s = (d__1 = z__1.r, abs(d__1)) + (d__2 = d_imag(&z__1), abs(d__2)) + (
+ (d__3 = h__[i__2].r, abs(d__3)) + (d__4 = d_imag(&h__[h_dim1
+ + 2]), abs(d__4))) + ((d__5 = h__[i__3].r, abs(d__5)) + (d__6
+ = d_imag(&h__[h_dim1 + 3]), abs(d__6)));
+ if (s == 0.) {
+ v[1].r = 0., v[1].i = 0.;
+ v[2].r = 0., v[2].i = 0.;
+ v[3].r = 0., v[3].i = 0.;
+ } else {
+ i__1 = h_dim1 + 2;
+ z__1.r = h__[i__1].r / s, z__1.i = h__[i__1].i / s;
+ h21s.r = z__1.r, h21s.i = z__1.i;
+ i__1 = h_dim1 + 3;
+ z__1.r = h__[i__1].r / s, z__1.i = h__[i__1].i / s;
+ h31s.r = z__1.r, h31s.i = z__1.i;
+ i__1 = h_dim1 + 1;
+ z__4.r = h__[i__1].r - s1->r, z__4.i = h__[i__1].i - s1->i;
+ i__2 = h_dim1 + 1;
+ z__6.r = h__[i__2].r - s2->r, z__6.i = h__[i__2].i - s2->i;
+ z__5.r = z__6.r / s, z__5.i = z__6.i / s;
+ z__3.r = z__4.r * z__5.r - z__4.i * z__5.i, z__3.i = z__4.r *
+ z__5.i + z__4.i * z__5.r;
+ i__3 = (h_dim1 << 1) + 1;
+ z__7.r = h__[i__3].r * h21s.r - h__[i__3].i * h21s.i, z__7.i =
+ h__[i__3].r * h21s.i + h__[i__3].i * h21s.r;
+ z__2.r = z__3.r + z__7.r, z__2.i = z__3.i + z__7.i;
+ i__4 = h_dim1 * 3 + 1;
+ z__8.r = h__[i__4].r * h31s.r - h__[i__4].i * h31s.i, z__8.i =
+ h__[i__4].r * h31s.i + h__[i__4].i * h31s.r;
+ z__1.r = z__2.r + z__8.r, z__1.i = z__2.i + z__8.i;
+ v[1].r = z__1.r, v[1].i = z__1.i;
+ i__1 = h_dim1 + 1;
+ i__2 = (h_dim1 << 1) + 2;
+ z__5.r = h__[i__1].r + h__[i__2].r, z__5.i = h__[i__1].i + h__[
+ i__2].i;
+ z__4.r = z__5.r - s1->r, z__4.i = z__5.i - s1->i;
+ z__3.r = z__4.r - s2->r, z__3.i = z__4.i - s2->i;
+ z__2.r = h21s.r * z__3.r - h21s.i * z__3.i, z__2.i = h21s.r *
+ z__3.i + h21s.i * z__3.r;
+ i__3 = h_dim1 * 3 + 2;
+ z__6.r = h__[i__3].r * h31s.r - h__[i__3].i * h31s.i, z__6.i =
+ h__[i__3].r * h31s.i + h__[i__3].i * h31s.r;
+ z__1.r = z__2.r + z__6.r, z__1.i = z__2.i + z__6.i;
+ v[2].r = z__1.r, v[2].i = z__1.i;
+ i__1 = h_dim1 + 1;
+ i__2 = h_dim1 * 3 + 3;
+ z__5.r = h__[i__1].r + h__[i__2].r, z__5.i = h__[i__1].i + h__[
+ i__2].i;
+ z__4.r = z__5.r - s1->r, z__4.i = z__5.i - s1->i;
+ z__3.r = z__4.r - s2->r, z__3.i = z__4.i - s2->i;
+ z__2.r = h31s.r * z__3.r - h31s.i * z__3.i, z__2.i = h31s.r *
+ z__3.i + h31s.i * z__3.r;
+ i__3 = (h_dim1 << 1) + 3;
+ z__6.r = h21s.r * h__[i__3].r - h21s.i * h__[i__3].i, z__6.i =
+ h21s.r * h__[i__3].i + h21s.i * h__[i__3].r;
+ z__1.r = z__2.r + z__6.r, z__1.i = z__2.i + z__6.i;
+ v[3].r = z__1.r, v[3].i = z__1.i;
+ }
+ }
+ return 0;
+} /* zlaqr1_ */
diff --git a/contrib/libs/clapack/zlaqr2.c b/contrib/libs/clapack/zlaqr2.c
new file mode 100644
index 0000000000..48ccec1ce2
--- /dev/null
+++ b/contrib/libs/clapack/zlaqr2.c
@@ -0,0 +1,611 @@
+/* zlaqr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static logical c_true = TRUE_;
+
+/* Subroutine */ int zlaqr2_(logical *wantt, logical *wantz, integer *n,
+ integer *ktop, integer *kbot, integer *nw, doublecomplex *h__,
+ integer *ldh, integer *iloz, integer *ihiz, doublecomplex *z__,
+ integer *ldz, integer *ns, integer *nd, doublecomplex *sh,
+ doublecomplex *v, integer *ldv, integer *nh, doublecomplex *t,
+ integer *ldt, integer *nv, doublecomplex *wv, integer *ldwv,
+ doublecomplex *work, integer *lwork)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, t_dim1, t_offset, v_dim1, v_offset, wv_dim1,
+ wv_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1, d__2, d__3, d__4, d__5, d__6;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j;
+ doublecomplex s;
+ integer jw;
+ doublereal foo;
+ integer kln;
+ doublecomplex tau;
+ integer knt;
+ doublereal ulp;
+ integer lwk1, lwk2;
+ doublecomplex beta;
+ integer kcol, info, ifst, ilst, ltop, krow;
+ extern /* Subroutine */ int zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *);
+ integer infqr;
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ integer kwtop;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), dlabad_(doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin, safmax;
+ extern /* Subroutine */ int zgehrd_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *), zlarfg_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *), zlahqr_(logical *,
+ logical *, integer *, integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *, integer *, doublecomplex *,
+ integer *, integer *), zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *),
+ zlaset_(char *, integer *, integer *, doublecomplex *,
+ doublecomplex *, doublecomplex *, integer *);
+ doublereal smlnum;
+ extern /* Subroutine */ int ztrexc_(char *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *, integer *, integer *,
+ integer *);
+ integer lwkopt;
+ extern /* Subroutine */ int zunmhr_(char *, char *, integer *, integer *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *
+);
+
+
+/* -- LAPACK auxiliary routine (version 3.2.1) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* -- April 2009 -- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* This subroutine is identical to ZLAQR3 except that it avoids */
+/* recursion by calling ZLAHQR instead of ZLAQR4. */
+
+
+/* ****************************************************************** */
+/* Aggressive early deflation: */
+
+/* This subroutine accepts as input an upper Hessenberg matrix */
+/* H and performs an unitary similarity transformation */
+/* designed to detect and deflate fully converged eigenvalues from */
+/* a trailing principal submatrix. On output H has been over- */
+/* written by a new Hessenberg matrix that is a perturbation of */
+/* an unitary similarity transformation of H. It is to be */
+/* hoped that the final version of H has many zero subdiagonal */
+/* entries. */
+
+/* ****************************************************************** */
+/* WANTT (input) LOGICAL */
+/* If .TRUE., then the Hessenberg matrix H is fully updated */
+/* so that the triangular Schur factor may be */
+/* computed (in cooperation with the calling subroutine). */
+/* If .FALSE., then only enough of H is updated to preserve */
+/* the eigenvalues. */
+
+/* WANTZ (input) LOGICAL */
+/* If .TRUE., then the unitary matrix Z is updated so */
+/* so that the unitary Schur factor may be computed */
+/* (in cooperation with the calling subroutine). */
+/* If .FALSE., then Z is not referenced. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H and (if WANTZ is .TRUE.) the */
+/* order of the unitary matrix Z. */
+
+/* KTOP (input) INTEGER */
+/* It is assumed that either KTOP = 1 or H(KTOP,KTOP-1)=0. */
+/* KBOT and KTOP together determine an isolated block */
+/* along the diagonal of the Hessenberg matrix. */
+
+/* KBOT (input) INTEGER */
+/* It is assumed without a check that either */
+/* KBOT = N or H(KBOT+1,KBOT)=0. KBOT and KTOP together */
+/* determine an isolated block along the diagonal of the */
+/* Hessenberg matrix. */
+
+/* NW (input) INTEGER */
+/* Deflation window size. 1 .LE. NW .LE. (KBOT-KTOP+1). */
+
+/* H (input/output) COMPLEX*16 array, dimension (LDH,N) */
+/* On input the initial N-by-N section of H stores the */
+/* Hessenberg matrix undergoing aggressive early deflation. */
+/* On output H has been transformed by a unitary */
+/* similarity transformation, perturbed, and the returned */
+/* to Hessenberg form that (it is to be hoped) has some */
+/* zero subdiagonal entries. */
+
+/* LDH (input) integer */
+/* Leading dimension of H just as declared in the calling */
+/* subroutine. N .LE. LDH */
+
+/* ILOZ (input) INTEGER */
+/* IHIZ (input) INTEGER */
+/* Specify the rows of Z to which transformations must be */
+/* applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N. */
+
+/* Z (input/output) COMPLEX*16 array, dimension (LDZ,N) */
+/* IF WANTZ is .TRUE., then on output, the unitary */
+/* similarity transformation mentioned above has been */
+/* accumulated into Z(ILOZ:IHIZ,ILO:IHI) from the right. */
+/* If WANTZ is .FALSE., then Z is unreferenced. */
+
+/* LDZ (input) integer */
+/* The leading dimension of Z just as declared in the */
+/* calling subroutine. 1 .LE. LDZ. */
+
+/* NS (output) integer */
+/* The number of unconverged (ie approximate) eigenvalues */
+/* returned in SR and SI that may be used as shifts by the */
+/* calling subroutine. */
+
+/* ND (output) integer */
+/* The number of converged eigenvalues uncovered by this */
+/* subroutine. */
+
+/* SH (output) COMPLEX*16 array, dimension KBOT */
+/* On output, approximate eigenvalues that may */
+/* be used for shifts are stored in SH(KBOT-ND-NS+1) */
+/* through SR(KBOT-ND). Converged eigenvalues are */
+/* stored in SH(KBOT-ND+1) through SH(KBOT). */
+
+/* V (workspace) COMPLEX*16 array, dimension (LDV,NW) */
+/* An NW-by-NW work array. */
+
+/* LDV (input) integer scalar */
+/* The leading dimension of V just as declared in the */
+/* calling subroutine. NW .LE. LDV */
+
+/* NH (input) integer scalar */
+/* The number of columns of T. NH.GE.NW. */
+
+/* T (workspace) COMPLEX*16 array, dimension (LDT,NW) */
+
+/* LDT (input) integer */
+/* The leading dimension of T just as declared in the */
+/* calling subroutine. NW .LE. LDT */
+
+/* NV (input) integer */
+/* The number of rows of work array WV available for */
+/* workspace. NV.GE.NW. */
+
+/* WV (workspace) COMPLEX*16 array, dimension (LDWV,NW) */
+
+/* LDWV (input) integer */
+/* The leading dimension of W just as declared in the */
+/* calling subroutine. NW .LE. LDV */
+
+/* WORK (workspace) COMPLEX*16 array, dimension LWORK. */
+/* On exit, WORK(1) is set to an estimate of the optimal value */
+/* of LWORK for the given values of N, NW, KTOP and KBOT. */
+
+/* LWORK (input) integer */
+/* The dimension of the work array WORK. LWORK = 2*NW */
+/* suffices, but greater efficiency may result from larger */
+/* values of LWORK. */
+
+/* If LWORK = -1, then a workspace query is assumed; ZLAQR2 */
+/* only estimates the optimal workspace size for the given */
+/* values of N, NW, KTOP and KBOT. The estimate is returned */
+/* in WORK(1). No error message related to LWORK is issued */
+/* by XERBLA. Neither H nor Z are accessed. */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* ==== Estimate optimal workspace. ==== */
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --sh;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ wv_dim1 = *ldwv;
+ wv_offset = 1 + wv_dim1;
+ wv -= wv_offset;
+ --work;
+
+ /* Function Body */
+/* Computing MIN */
+ i__1 = *nw, i__2 = *kbot - *ktop + 1;
+ jw = min(i__1,i__2);
+ if (jw <= 2) {
+ lwkopt = 1;
+ } else {
+
+/* ==== Workspace query call to ZGEHRD ==== */
+
+ i__1 = jw - 1;
+ zgehrd_(&jw, &c__1, &i__1, &t[t_offset], ldt, &work[1], &work[1], &
+ c_n1, &info);
+ lwk1 = (integer) work[1].r;
+
+/* ==== Workspace query call to ZUNMHR ==== */
+
+ i__1 = jw - 1;
+ zunmhr_("R", "N", &jw, &jw, &c__1, &i__1, &t[t_offset], ldt, &work[1],
+ &v[v_offset], ldv, &work[1], &c_n1, &info);
+ lwk2 = (integer) work[1].r;
+
+/* ==== Optimal workspace ==== */
+
+ lwkopt = jw + max(lwk1,lwk2);
+ }
+
+/* ==== Quick return in case of workspace query. ==== */
+
+ if (*lwork == -1) {
+ d__1 = (doublereal) lwkopt;
+ z__1.r = d__1, z__1.i = 0.;
+ work[1].r = z__1.r, work[1].i = z__1.i;
+ return 0;
+ }
+
+/* ==== Nothing to do ... */
+/* ... for an empty active block ... ==== */
+ *ns = 0;
+ *nd = 0;
+ work[1].r = 1., work[1].i = 0.;
+ if (*ktop > *kbot) {
+ return 0;
+ }
+/* ... nor for an empty deflation window. ==== */
+ if (*nw < 1) {
+ return 0;
+ }
+
+/* ==== Machine constants ==== */
+
+ safmin = dlamch_("SAFE MINIMUM");
+ safmax = 1. / safmin;
+ dlabad_(&safmin, &safmax);
+ ulp = dlamch_("PRECISION");
+ smlnum = safmin * ((doublereal) (*n) / ulp);
+
+/* ==== Setup deflation window ==== */
+
+/* Computing MIN */
+ i__1 = *nw, i__2 = *kbot - *ktop + 1;
+ jw = min(i__1,i__2);
+ kwtop = *kbot - jw + 1;
+ if (kwtop == *ktop) {
+ s.r = 0., s.i = 0.;
+ } else {
+ i__1 = kwtop + (kwtop - 1) * h_dim1;
+ s.r = h__[i__1].r, s.i = h__[i__1].i;
+ }
+
+ if (*kbot == kwtop) {
+
+/* ==== 1-by-1 deflation window: not much to do ==== */
+
+ i__1 = kwtop;
+ i__2 = kwtop + kwtop * h_dim1;
+ sh[i__1].r = h__[i__2].r, sh[i__1].i = h__[i__2].i;
+ *ns = 1;
+ *nd = 0;
+/* Computing MAX */
+ i__1 = kwtop + kwtop * h_dim1;
+ d__5 = smlnum, d__6 = ulp * ((d__1 = h__[i__1].r, abs(d__1)) + (d__2 =
+ d_imag(&h__[kwtop + kwtop * h_dim1]), abs(d__2)));
+ if ((d__3 = s.r, abs(d__3)) + (d__4 = d_imag(&s), abs(d__4)) <= max(
+ d__5,d__6)) {
+ *ns = 0;
+ *nd = 1;
+ if (kwtop > *ktop) {
+ i__1 = kwtop + (kwtop - 1) * h_dim1;
+ h__[i__1].r = 0., h__[i__1].i = 0.;
+ }
+ }
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+/* ==== Convert to spike-triangular form. (In case of a */
+/* . rare QR failure, this routine continues to do */
+/* . aggressive early deflation using that part of */
+/* . the deflation window that converged using INFQR */
+/* . here and there to keep track.) ==== */
+
+ zlacpy_("U", &jw, &jw, &h__[kwtop + kwtop * h_dim1], ldh, &t[t_offset],
+ ldt);
+ i__1 = jw - 1;
+ i__2 = *ldh + 1;
+ i__3 = *ldt + 1;
+ zcopy_(&i__1, &h__[kwtop + 1 + kwtop * h_dim1], &i__2, &t[t_dim1 + 2], &
+ i__3);
+
+ zlaset_("A", &jw, &jw, &c_b1, &c_b2, &v[v_offset], ldv);
+ zlahqr_(&c_true, &c_true, &jw, &c__1, &jw, &t[t_offset], ldt, &sh[kwtop],
+ &c__1, &jw, &v[v_offset], ldv, &infqr);
+
+/* ==== Deflation detection loop ==== */
+
+ *ns = jw;
+ ilst = infqr + 1;
+ i__1 = jw;
+ for (knt = infqr + 1; knt <= i__1; ++knt) {
+
+/* ==== Small spike tip deflation test ==== */
+
+ i__2 = *ns + *ns * t_dim1;
+ foo = (d__1 = t[i__2].r, abs(d__1)) + (d__2 = d_imag(&t[*ns + *ns *
+ t_dim1]), abs(d__2));
+ if (foo == 0.) {
+ foo = (d__1 = s.r, abs(d__1)) + (d__2 = d_imag(&s), abs(d__2));
+ }
+ i__2 = *ns * v_dim1 + 1;
+/* Computing MAX */
+ d__5 = smlnum, d__6 = ulp * foo;
+ if (((d__1 = s.r, abs(d__1)) + (d__2 = d_imag(&s), abs(d__2))) * ((
+ d__3 = v[i__2].r, abs(d__3)) + (d__4 = d_imag(&v[*ns * v_dim1
+ + 1]), abs(d__4))) <= max(d__5,d__6)) {
+
+/* ==== One more converged eigenvalue ==== */
+
+ --(*ns);
+ } else {
+
+/* ==== One undeflatable eigenvalue. Move it up out of the */
+/* . way. (ZTREXC can not fail in this case.) ==== */
+
+ ifst = *ns;
+ ztrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, &
+ ilst, &info);
+ ++ilst;
+ }
+/* L10: */
+ }
+
+/* ==== Return to Hessenberg form ==== */
+
+ if (*ns == 0) {
+ s.r = 0., s.i = 0.;
+ }
+
+ if (*ns < jw) {
+
+/* ==== sorting the diagonal of T improves accuracy for */
+/* . graded matrices. ==== */
+
+ i__1 = *ns;
+ for (i__ = infqr + 1; i__ <= i__1; ++i__) {
+ ifst = i__;
+ i__2 = *ns;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ i__3 = j + j * t_dim1;
+ i__4 = ifst + ifst * t_dim1;
+ if ((d__1 = t[i__3].r, abs(d__1)) + (d__2 = d_imag(&t[j + j *
+ t_dim1]), abs(d__2)) > (d__3 = t[i__4].r, abs(d__3))
+ + (d__4 = d_imag(&t[ifst + ifst * t_dim1]), abs(d__4))
+ ) {
+ ifst = j;
+ }
+/* L20: */
+ }
+ ilst = i__;
+ if (ifst != ilst) {
+ ztrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst,
+ &ilst, &info);
+ }
+/* L30: */
+ }
+ }
+
+/* ==== Restore shift/eigenvalue array from T ==== */
+
+ i__1 = jw;
+ for (i__ = infqr + 1; i__ <= i__1; ++i__) {
+ i__2 = kwtop + i__ - 1;
+ i__3 = i__ + i__ * t_dim1;
+ sh[i__2].r = t[i__3].r, sh[i__2].i = t[i__3].i;
+/* L40: */
+ }
+
+
+ if (*ns < jw || s.r == 0. && s.i == 0.) {
+ if (*ns > 1 && (s.r != 0. || s.i != 0.)) {
+
+/* ==== Reflect spike back into lower triangle ==== */
+
+ zcopy_(ns, &v[v_offset], ldv, &work[1], &c__1);
+ i__1 = *ns;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ d_cnjg(&z__1, &work[i__]);
+ work[i__2].r = z__1.r, work[i__2].i = z__1.i;
+/* L50: */
+ }
+ beta.r = work[1].r, beta.i = work[1].i;
+ zlarfg_(ns, &beta, &work[2], &c__1, &tau);
+ work[1].r = 1., work[1].i = 0.;
+
+ i__1 = jw - 2;
+ i__2 = jw - 2;
+ zlaset_("L", &i__1, &i__2, &c_b1, &c_b1, &t[t_dim1 + 3], ldt);
+
+ d_cnjg(&z__1, &tau);
+ zlarf_("L", ns, &jw, &work[1], &c__1, &z__1, &t[t_offset], ldt, &
+ work[jw + 1]);
+ zlarf_("R", ns, ns, &work[1], &c__1, &tau, &t[t_offset], ldt, &
+ work[jw + 1]);
+ zlarf_("R", &jw, ns, &work[1], &c__1, &tau, &v[v_offset], ldv, &
+ work[jw + 1]);
+
+ i__1 = *lwork - jw;
+ zgehrd_(&jw, &c__1, ns, &t[t_offset], ldt, &work[1], &work[jw + 1]
+, &i__1, &info);
+ }
+
+/* ==== Copy updated reduced window into place ==== */
+
+ if (kwtop > 1) {
+ i__1 = kwtop + (kwtop - 1) * h_dim1;
+ d_cnjg(&z__2, &v[v_dim1 + 1]);
+ z__1.r = s.r * z__2.r - s.i * z__2.i, z__1.i = s.r * z__2.i + s.i
+ * z__2.r;
+ h__[i__1].r = z__1.r, h__[i__1].i = z__1.i;
+ }
+ zlacpy_("U", &jw, &jw, &t[t_offset], ldt, &h__[kwtop + kwtop * h_dim1]
+, ldh);
+ i__1 = jw - 1;
+ i__2 = *ldt + 1;
+ i__3 = *ldh + 1;
+ zcopy_(&i__1, &t[t_dim1 + 2], &i__2, &h__[kwtop + 1 + kwtop * h_dim1],
+ &i__3);
+
+/* ==== Accumulate orthogonal matrix in order update */
+/* . H and Z, if requested. ==== */
+
+ if (*ns > 1 && (s.r != 0. || s.i != 0.)) {
+ i__1 = *lwork - jw;
+ zunmhr_("R", "N", &jw, ns, &c__1, ns, &t[t_offset], ldt, &work[1],
+ &v[v_offset], ldv, &work[jw + 1], &i__1, &info);
+ }
+
+/* ==== Update vertical slab in H ==== */
+
+ if (*wantt) {
+ ltop = 1;
+ } else {
+ ltop = *ktop;
+ }
+ i__1 = kwtop - 1;
+ i__2 = *nv;
+ for (krow = ltop; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *nv, i__4 = kwtop - krow;
+ kln = min(i__3,i__4);
+ zgemm_("N", "N", &kln, &jw, &jw, &c_b2, &h__[krow + kwtop *
+ h_dim1], ldh, &v[v_offset], ldv, &c_b1, &wv[wv_offset],
+ ldwv);
+ zlacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &h__[krow + kwtop *
+ h_dim1], ldh);
+/* L60: */
+ }
+
+/* ==== Update horizontal slab in H ==== */
+
+ if (*wantt) {
+ i__2 = *n;
+ i__1 = *nh;
+ for (kcol = *kbot + 1; i__1 < 0 ? kcol >= i__2 : kcol <= i__2;
+ kcol += i__1) {
+/* Computing MIN */
+ i__3 = *nh, i__4 = *n - kcol + 1;
+ kln = min(i__3,i__4);
+ zgemm_("C", "N", &jw, &kln, &jw, &c_b2, &v[v_offset], ldv, &
+ h__[kwtop + kcol * h_dim1], ldh, &c_b1, &t[t_offset],
+ ldt);
+ zlacpy_("A", &jw, &kln, &t[t_offset], ldt, &h__[kwtop + kcol *
+ h_dim1], ldh);
+/* L70: */
+ }
+ }
+
+/* ==== Update vertical slab in Z ==== */
+
+ if (*wantz) {
+ i__1 = *ihiz;
+ i__2 = *nv;
+ for (krow = *iloz; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *nv, i__4 = *ihiz - krow + 1;
+ kln = min(i__3,i__4);
+ zgemm_("N", "N", &kln, &jw, &jw, &c_b2, &z__[krow + kwtop *
+ z_dim1], ldz, &v[v_offset], ldv, &c_b1, &wv[wv_offset]
+, ldwv);
+ zlacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &z__[krow +
+ kwtop * z_dim1], ldz);
+/* L80: */
+ }
+ }
+ }
+
+/* ==== Return the number of deflations ... ==== */
+
+ *nd = jw - *ns;
+
+/* ==== ... and the number of shifts. (Subtracting */
+/* . INFQR from the spike length takes care */
+/* . of the case of a rare QR failure while */
+/* . calculating eigenvalues of the deflation */
+/* . window.) ==== */
+
+ *ns -= infqr;
+
+/* ==== Return optimal workspace. ==== */
+
+ d__1 = (doublereal) lwkopt;
+ z__1.r = d__1, z__1.i = 0.;
+ work[1].r = z__1.r, work[1].i = z__1.i;
+
+/* ==== End of ZLAQR2 ==== */
+
+ return 0;
+} /* zlaqr2_ */
diff --git a/contrib/libs/clapack/zlaqr3.c b/contrib/libs/clapack/zlaqr3.c
new file mode 100644
index 0000000000..a9d3453c63
--- /dev/null
+++ b/contrib/libs/clapack/zlaqr3.c
@@ -0,0 +1,630 @@
+/* zlaqr3.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static logical c_true = TRUE_;
+static integer c__12 = 12;
+
+/* Subroutine */ int zlaqr3_(logical *wantt, logical *wantz, integer *n,
+ integer *ktop, integer *kbot, integer *nw, doublecomplex *h__,
+ integer *ldh, integer *iloz, integer *ihiz, doublecomplex *z__,
+ integer *ldz, integer *ns, integer *nd, doublecomplex *sh,
+ doublecomplex *v, integer *ldv, integer *nh, doublecomplex *t,
+ integer *ldt, integer *nv, doublecomplex *wv, integer *ldwv,
+ doublecomplex *work, integer *lwork)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, t_dim1, t_offset, v_dim1, v_offset, wv_dim1,
+ wv_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1, d__2, d__3, d__4, d__5, d__6;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j;
+ doublecomplex s;
+ integer jw;
+ doublereal foo;
+ integer kln;
+ doublecomplex tau;
+ integer knt;
+ doublereal ulp;
+ integer lwk1, lwk2, lwk3;
+ doublecomplex beta;
+ integer kcol, info, nmin, ifst, ilst, ltop, krow;
+ extern /* Subroutine */ int zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *);
+ integer infqr;
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ integer kwtop;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), dlabad_(doublereal *, doublereal *),
+ zlaqr4_(logical *, logical *, integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *
+);
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ doublereal safmax;
+ extern /* Subroutine */ int zgehrd_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *), zlarfg_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *), zlahqr_(logical *,
+ logical *, integer *, integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *, integer *, doublecomplex *,
+ integer *, integer *), zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *),
+ zlaset_(char *, integer *, integer *, doublecomplex *,
+ doublecomplex *, doublecomplex *, integer *);
+ doublereal smlnum;
+ extern /* Subroutine */ int ztrexc_(char *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *, integer *, integer *,
+ integer *);
+ integer lwkopt;
+ extern /* Subroutine */ int zunmhr_(char *, char *, integer *, integer *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *
+);
+
+
+/* -- LAPACK auxiliary routine (version 3.2.1) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* -- April 2009 -- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* ****************************************************************** */
+/* Aggressive early deflation: */
+
+/* This subroutine accepts as input an upper Hessenberg matrix */
+/* H and performs an unitary similarity transformation */
+/* designed to detect and deflate fully converged eigenvalues from */
+/* a trailing principal submatrix. On output H has been over- */
+/* written by a new Hessenberg matrix that is a perturbation of */
+/* an unitary similarity transformation of H. It is to be */
+/* hoped that the final version of H has many zero subdiagonal */
+/* entries. */
+
+/* ****************************************************************** */
+/* WANTT (input) LOGICAL */
+/* If .TRUE., then the Hessenberg matrix H is fully updated */
+/* so that the triangular Schur factor may be */
+/* computed (in cooperation with the calling subroutine). */
+/* If .FALSE., then only enough of H is updated to preserve */
+/* the eigenvalues. */
+
+/* WANTZ (input) LOGICAL */
+/* If .TRUE., then the unitary matrix Z is updated so */
+/* so that the unitary Schur factor may be computed */
+/* (in cooperation with the calling subroutine). */
+/* If .FALSE., then Z is not referenced. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H and (if WANTZ is .TRUE.) the */
+/* order of the unitary matrix Z. */
+
+/* KTOP (input) INTEGER */
+/* It is assumed that either KTOP = 1 or H(KTOP,KTOP-1)=0. */
+/* KBOT and KTOP together determine an isolated block */
+/* along the diagonal of the Hessenberg matrix. */
+
+/* KBOT (input) INTEGER */
+/* It is assumed without a check that either */
+/* KBOT = N or H(KBOT+1,KBOT)=0. KBOT and KTOP together */
+/* determine an isolated block along the diagonal of the */
+/* Hessenberg matrix. */
+
+/* NW (input) INTEGER */
+/* Deflation window size. 1 .LE. NW .LE. (KBOT-KTOP+1). */
+
+/* H (input/output) COMPLEX*16 array, dimension (LDH,N) */
+/* On input the initial N-by-N section of H stores the */
+/* Hessenberg matrix undergoing aggressive early deflation. */
+/* On output H has been transformed by a unitary */
+/* similarity transformation, perturbed, and the returned */
+/* to Hessenberg form that (it is to be hoped) has some */
+/* zero subdiagonal entries. */
+
+/* LDH (input) integer */
+/* Leading dimension of H just as declared in the calling */
+/* subroutine. N .LE. LDH */
+
+/* ILOZ (input) INTEGER */
+/* IHIZ (input) INTEGER */
+/* Specify the rows of Z to which transformations must be */
+/* applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N. */
+
+/* Z (input/output) COMPLEX*16 array, dimension (LDZ,N) */
+/* IF WANTZ is .TRUE., then on output, the unitary */
+/* similarity transformation mentioned above has been */
+/* accumulated into Z(ILOZ:IHIZ,ILO:IHI) from the right. */
+/* If WANTZ is .FALSE., then Z is unreferenced. */
+
+/* LDZ (input) integer */
+/* The leading dimension of Z just as declared in the */
+/* calling subroutine. 1 .LE. LDZ. */
+
+/* NS (output) integer */
+/* The number of unconverged (ie approximate) eigenvalues */
+/* returned in SR and SI that may be used as shifts by the */
+/* calling subroutine. */
+
+/* ND (output) integer */
+/* The number of converged eigenvalues uncovered by this */
+/* subroutine. */
+
+/* SH (output) COMPLEX*16 array, dimension KBOT */
+/* On output, approximate eigenvalues that may */
+/* be used for shifts are stored in SH(KBOT-ND-NS+1) */
+/* through SR(KBOT-ND). Converged eigenvalues are */
+/* stored in SH(KBOT-ND+1) through SH(KBOT). */
+
+/* V (workspace) COMPLEX*16 array, dimension (LDV,NW) */
+/* An NW-by-NW work array. */
+
+/* LDV (input) integer scalar */
+/* The leading dimension of V just as declared in the */
+/* calling subroutine. NW .LE. LDV */
+
+/* NH (input) integer scalar */
+/* The number of columns of T. NH.GE.NW. */
+
+/* T (workspace) COMPLEX*16 array, dimension (LDT,NW) */
+
+/* LDT (input) integer */
+/* The leading dimension of T just as declared in the */
+/* calling subroutine. NW .LE. LDT */
+
+/* NV (input) integer */
+/* The number of rows of work array WV available for */
+/* workspace. NV.GE.NW. */
+
+/* WV (workspace) COMPLEX*16 array, dimension (LDWV,NW) */
+
+/* LDWV (input) integer */
+/* The leading dimension of W just as declared in the */
+/* calling subroutine. NW .LE. LDV */
+
+/* WORK (workspace) COMPLEX*16 array, dimension LWORK. */
+/* On exit, WORK(1) is set to an estimate of the optimal value */
+/* of LWORK for the given values of N, NW, KTOP and KBOT. */
+
+/* LWORK (input) integer */
+/* The dimension of the work array WORK. LWORK = 2*NW */
+/* suffices, but greater efficiency may result from larger */
+/* values of LWORK. */
+
+/* If LWORK = -1, then a workspace query is assumed; ZLAQR3 */
+/* only estimates the optimal workspace size for the given */
+/* values of N, NW, KTOP and KBOT. The estimate is returned */
+/* in WORK(1). No error message related to LWORK is issued */
+/* by XERBLA. Neither H nor Z are accessed. */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* ==== Estimate optimal workspace. ==== */
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --sh;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ wv_dim1 = *ldwv;
+ wv_offset = 1 + wv_dim1;
+ wv -= wv_offset;
+ --work;
+
+ /* Function Body */
+/* Computing MIN */
+ i__1 = *nw, i__2 = *kbot - *ktop + 1;
+ jw = min(i__1,i__2);
+ if (jw <= 2) {
+ lwkopt = 1;
+ } else {
+
+/* ==== Workspace query call to ZGEHRD ==== */
+
+ i__1 = jw - 1;
+ zgehrd_(&jw, &c__1, &i__1, &t[t_offset], ldt, &work[1], &work[1], &
+ c_n1, &info);
+ lwk1 = (integer) work[1].r;
+
+/* ==== Workspace query call to ZUNMHR ==== */
+
+ i__1 = jw - 1;
+ zunmhr_("R", "N", &jw, &jw, &c__1, &i__1, &t[t_offset], ldt, &work[1],
+ &v[v_offset], ldv, &work[1], &c_n1, &info);
+ lwk2 = (integer) work[1].r;
+
+/* ==== Workspace query call to ZLAQR4 ==== */
+
+ zlaqr4_(&c_true, &c_true, &jw, &c__1, &jw, &t[t_offset], ldt, &sh[1],
+ &c__1, &jw, &v[v_offset], ldv, &work[1], &c_n1, &infqr);
+ lwk3 = (integer) work[1].r;
+
+/* ==== Optimal workspace ==== */
+
+/* Computing MAX */
+ i__1 = jw + max(lwk1,lwk2);
+ lwkopt = max(i__1,lwk3);
+ }
+
+/* ==== Quick return in case of workspace query. ==== */
+
+ if (*lwork == -1) {
+ d__1 = (doublereal) lwkopt;
+ z__1.r = d__1, z__1.i = 0.;
+ work[1].r = z__1.r, work[1].i = z__1.i;
+ return 0;
+ }
+
+/* ==== Nothing to do ... */
+/* ... for an empty active block ... ==== */
+ *ns = 0;
+ *nd = 0;
+ work[1].r = 1., work[1].i = 0.;
+ if (*ktop > *kbot) {
+ return 0;
+ }
+/* ... nor for an empty deflation window. ==== */
+ if (*nw < 1) {
+ return 0;
+ }
+
+/* ==== Machine constants ==== */
+
+ safmin = dlamch_("SAFE MINIMUM");
+ safmax = 1. / safmin;
+ dlabad_(&safmin, &safmax);
+ ulp = dlamch_("PRECISION");
+ smlnum = safmin * ((doublereal) (*n) / ulp);
+
+/* ==== Setup deflation window ==== */
+
+/* Computing MIN */
+ i__1 = *nw, i__2 = *kbot - *ktop + 1;
+ jw = min(i__1,i__2);
+ kwtop = *kbot - jw + 1;
+ if (kwtop == *ktop) {
+ s.r = 0., s.i = 0.;
+ } else {
+ i__1 = kwtop + (kwtop - 1) * h_dim1;
+ s.r = h__[i__1].r, s.i = h__[i__1].i;
+ }
+
+ if (*kbot == kwtop) {
+
+/* ==== 1-by-1 deflation window: not much to do ==== */
+
+ i__1 = kwtop;
+ i__2 = kwtop + kwtop * h_dim1;
+ sh[i__1].r = h__[i__2].r, sh[i__1].i = h__[i__2].i;
+ *ns = 1;
+ *nd = 0;
+/* Computing MAX */
+ i__1 = kwtop + kwtop * h_dim1;
+ d__5 = smlnum, d__6 = ulp * ((d__1 = h__[i__1].r, abs(d__1)) + (d__2 =
+ d_imag(&h__[kwtop + kwtop * h_dim1]), abs(d__2)));
+ if ((d__3 = s.r, abs(d__3)) + (d__4 = d_imag(&s), abs(d__4)) <= max(
+ d__5,d__6)) {
+ *ns = 0;
+ *nd = 1;
+ if (kwtop > *ktop) {
+ i__1 = kwtop + (kwtop - 1) * h_dim1;
+ h__[i__1].r = 0., h__[i__1].i = 0.;
+ }
+ }
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+/* ==== Convert to spike-triangular form. (In case of a */
+/* . rare QR failure, this routine continues to do */
+/* . aggressive early deflation using that part of */
+/* . the deflation window that converged using INFQR */
+/* . here and there to keep track.) ==== */
+
+ zlacpy_("U", &jw, &jw, &h__[kwtop + kwtop * h_dim1], ldh, &t[t_offset],
+ ldt);
+ i__1 = jw - 1;
+ i__2 = *ldh + 1;
+ i__3 = *ldt + 1;
+ zcopy_(&i__1, &h__[kwtop + 1 + kwtop * h_dim1], &i__2, &t[t_dim1 + 2], &
+ i__3);
+
+ zlaset_("A", &jw, &jw, &c_b1, &c_b2, &v[v_offset], ldv);
+ nmin = ilaenv_(&c__12, "ZLAQR3", "SV", &jw, &c__1, &jw, lwork);
+ if (jw > nmin) {
+ zlaqr4_(&c_true, &c_true, &jw, &c__1, &jw, &t[t_offset], ldt, &sh[
+ kwtop], &c__1, &jw, &v[v_offset], ldv, &work[1], lwork, &
+ infqr);
+ } else {
+ zlahqr_(&c_true, &c_true, &jw, &c__1, &jw, &t[t_offset], ldt, &sh[
+ kwtop], &c__1, &jw, &v[v_offset], ldv, &infqr);
+ }
+
+/* ==== Deflation detection loop ==== */
+
+ *ns = jw;
+ ilst = infqr + 1;
+ i__1 = jw;
+ for (knt = infqr + 1; knt <= i__1; ++knt) {
+
+/* ==== Small spike tip deflation test ==== */
+
+ i__2 = *ns + *ns * t_dim1;
+ foo = (d__1 = t[i__2].r, abs(d__1)) + (d__2 = d_imag(&t[*ns + *ns *
+ t_dim1]), abs(d__2));
+ if (foo == 0.) {
+ foo = (d__1 = s.r, abs(d__1)) + (d__2 = d_imag(&s), abs(d__2));
+ }
+ i__2 = *ns * v_dim1 + 1;
+/* Computing MAX */
+ d__5 = smlnum, d__6 = ulp * foo;
+ if (((d__1 = s.r, abs(d__1)) + (d__2 = d_imag(&s), abs(d__2))) * ((
+ d__3 = v[i__2].r, abs(d__3)) + (d__4 = d_imag(&v[*ns * v_dim1
+ + 1]), abs(d__4))) <= max(d__5,d__6)) {
+
+/* ==== One more converged eigenvalue ==== */
+
+ --(*ns);
+ } else {
+
+/* ==== One undeflatable eigenvalue. Move it up out of the */
+/* . way. (ZTREXC can not fail in this case.) ==== */
+
+ ifst = *ns;
+ ztrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, &
+ ilst, &info);
+ ++ilst;
+ }
+/* L10: */
+ }
+
+/* ==== Return to Hessenberg form ==== */
+
+ if (*ns == 0) {
+ s.r = 0., s.i = 0.;
+ }
+
+ if (*ns < jw) {
+
+/* ==== sorting the diagonal of T improves accuracy for */
+/* . graded matrices. ==== */
+
+ i__1 = *ns;
+ for (i__ = infqr + 1; i__ <= i__1; ++i__) {
+ ifst = i__;
+ i__2 = *ns;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ i__3 = j + j * t_dim1;
+ i__4 = ifst + ifst * t_dim1;
+ if ((d__1 = t[i__3].r, abs(d__1)) + (d__2 = d_imag(&t[j + j *
+ t_dim1]), abs(d__2)) > (d__3 = t[i__4].r, abs(d__3))
+ + (d__4 = d_imag(&t[ifst + ifst * t_dim1]), abs(d__4))
+ ) {
+ ifst = j;
+ }
+/* L20: */
+ }
+ ilst = i__;
+ if (ifst != ilst) {
+ ztrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst,
+ &ilst, &info);
+ }
+/* L30: */
+ }
+ }
+
+/* ==== Restore shift/eigenvalue array from T ==== */
+
+ i__1 = jw;
+ for (i__ = infqr + 1; i__ <= i__1; ++i__) {
+ i__2 = kwtop + i__ - 1;
+ i__3 = i__ + i__ * t_dim1;
+ sh[i__2].r = t[i__3].r, sh[i__2].i = t[i__3].i;
+/* L40: */
+ }
+
+
+ if (*ns < jw || s.r == 0. && s.i == 0.) {
+ if (*ns > 1 && (s.r != 0. || s.i != 0.)) {
+
+/* ==== Reflect spike back into lower triangle ==== */
+
+ zcopy_(ns, &v[v_offset], ldv, &work[1], &c__1);
+ i__1 = *ns;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ d_cnjg(&z__1, &work[i__]);
+ work[i__2].r = z__1.r, work[i__2].i = z__1.i;
+/* L50: */
+ }
+ beta.r = work[1].r, beta.i = work[1].i;
+ zlarfg_(ns, &beta, &work[2], &c__1, &tau);
+ work[1].r = 1., work[1].i = 0.;
+
+ i__1 = jw - 2;
+ i__2 = jw - 2;
+ zlaset_("L", &i__1, &i__2, &c_b1, &c_b1, &t[t_dim1 + 3], ldt);
+
+ d_cnjg(&z__1, &tau);
+ zlarf_("L", ns, &jw, &work[1], &c__1, &z__1, &t[t_offset], ldt, &
+ work[jw + 1]);
+ zlarf_("R", ns, ns, &work[1], &c__1, &tau, &t[t_offset], ldt, &
+ work[jw + 1]);
+ zlarf_("R", &jw, ns, &work[1], &c__1, &tau, &v[v_offset], ldv, &
+ work[jw + 1]);
+
+ i__1 = *lwork - jw;
+ zgehrd_(&jw, &c__1, ns, &t[t_offset], ldt, &work[1], &work[jw + 1]
+, &i__1, &info);
+ }
+
+/* ==== Copy updated reduced window into place ==== */
+
+ if (kwtop > 1) {
+ i__1 = kwtop + (kwtop - 1) * h_dim1;
+ d_cnjg(&z__2, &v[v_dim1 + 1]);
+ z__1.r = s.r * z__2.r - s.i * z__2.i, z__1.i = s.r * z__2.i + s.i
+ * z__2.r;
+ h__[i__1].r = z__1.r, h__[i__1].i = z__1.i;
+ }
+ zlacpy_("U", &jw, &jw, &t[t_offset], ldt, &h__[kwtop + kwtop * h_dim1]
+, ldh);
+ i__1 = jw - 1;
+ i__2 = *ldt + 1;
+ i__3 = *ldh + 1;
+ zcopy_(&i__1, &t[t_dim1 + 2], &i__2, &h__[kwtop + 1 + kwtop * h_dim1],
+ &i__3);
+
+/* ==== Accumulate orthogonal matrix in order update */
+/* . H and Z, if requested. ==== */
+
+ if (*ns > 1 && (s.r != 0. || s.i != 0.)) {
+ i__1 = *lwork - jw;
+ zunmhr_("R", "N", &jw, ns, &c__1, ns, &t[t_offset], ldt, &work[1],
+ &v[v_offset], ldv, &work[jw + 1], &i__1, &info);
+ }
+
+/* ==== Update vertical slab in H ==== */
+
+ if (*wantt) {
+ ltop = 1;
+ } else {
+ ltop = *ktop;
+ }
+ i__1 = kwtop - 1;
+ i__2 = *nv;
+ for (krow = ltop; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *nv, i__4 = kwtop - krow;
+ kln = min(i__3,i__4);
+ zgemm_("N", "N", &kln, &jw, &jw, &c_b2, &h__[krow + kwtop *
+ h_dim1], ldh, &v[v_offset], ldv, &c_b1, &wv[wv_offset],
+ ldwv);
+ zlacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &h__[krow + kwtop *
+ h_dim1], ldh);
+/* L60: */
+ }
+
+/* ==== Update horizontal slab in H ==== */
+
+ if (*wantt) {
+ i__2 = *n;
+ i__1 = *nh;
+ for (kcol = *kbot + 1; i__1 < 0 ? kcol >= i__2 : kcol <= i__2;
+ kcol += i__1) {
+/* Computing MIN */
+ i__3 = *nh, i__4 = *n - kcol + 1;
+ kln = min(i__3,i__4);
+ zgemm_("C", "N", &jw, &kln, &jw, &c_b2, &v[v_offset], ldv, &
+ h__[kwtop + kcol * h_dim1], ldh, &c_b1, &t[t_offset],
+ ldt);
+ zlacpy_("A", &jw, &kln, &t[t_offset], ldt, &h__[kwtop + kcol *
+ h_dim1], ldh);
+/* L70: */
+ }
+ }
+
+/* ==== Update vertical slab in Z ==== */
+
+ if (*wantz) {
+ i__1 = *ihiz;
+ i__2 = *nv;
+ for (krow = *iloz; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *nv, i__4 = *ihiz - krow + 1;
+ kln = min(i__3,i__4);
+ zgemm_("N", "N", &kln, &jw, &jw, &c_b2, &z__[krow + kwtop *
+ z_dim1], ldz, &v[v_offset], ldv, &c_b1, &wv[wv_offset]
+, ldwv);
+ zlacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &z__[krow +
+ kwtop * z_dim1], ldz);
+/* L80: */
+ }
+ }
+ }
+
+/* ==== Return the number of deflations ... ==== */
+
+ *nd = jw - *ns;
+
+/* ==== ... and the number of shifts. (Subtracting */
+/* . INFQR from the spike length takes care */
+/* . of the case of a rare QR failure while */
+/* . calculating eigenvalues of the deflation */
+/* . window.) ==== */
+
+ *ns -= infqr;
+
+/* ==== Return optimal workspace. ==== */
+
+ d__1 = (doublereal) lwkopt;
+ z__1.r = d__1, z__1.i = 0.;
+ work[1].r = z__1.r, work[1].i = z__1.i;
+
+/* ==== End of ZLAQR3 ==== */
+
+ return 0;
+} /* zlaqr3_ */
diff --git a/contrib/libs/clapack/zlaqr4.c b/contrib/libs/clapack/zlaqr4.c
new file mode 100644
index 0000000000..b5d7a2bca7
--- /dev/null
+++ b/contrib/libs/clapack/zlaqr4.c
@@ -0,0 +1,785 @@
+/* zlaqr4.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__13 = 13;
+static integer c__15 = 15;
+static integer c_n1 = -1;
+static integer c__12 = 12;
+static integer c__14 = 14;
+static integer c__16 = 16;
+static logical c_false = FALSE_;
+static integer c__1 = 1;
+static integer c__3 = 3;
+
+/* Subroutine */ int zlaqr4_(logical *wantt, logical *wantz, integer *n,
+ integer *ilo, integer *ihi, doublecomplex *h__, integer *ldh,
+ doublecomplex *w, integer *iloz, integer *ihiz, doublecomplex *z__,
+ integer *ldz, doublecomplex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4, d__5, d__6, d__7, d__8;
+ doublecomplex z__1, z__2, z__3, z__4, z__5;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+ void z_sqrt(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, k;
+ doublereal s;
+ doublecomplex aa, bb, cc, dd;
+ integer ld, nh, it, ks, kt, ku, kv, ls, ns, nw;
+ doublecomplex tr2, det;
+ integer inf, kdu, nho, nve, kwh, nsr, nwr, kwv, ndec, ndfl, kbot, nmin;
+ doublecomplex swap;
+ integer ktop;
+ doublecomplex zdum[1] /* was [1][1] */;
+ integer kacc22, itmax, nsmax, nwmax, kwtop;
+ extern /* Subroutine */ int zlaqr2_(logical *, logical *, integer *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ integer *, integer *, doublecomplex *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *, integer *,
+ doublecomplex *, integer *, integer *, doublecomplex *, integer *
+, doublecomplex *, integer *), zlaqr5_(logical *, logical *,
+ integer *, integer *, integer *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, integer *, doublecomplex *, integer *,
+ integer *, doublecomplex *, integer *);
+ integer nibble;
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ char jbcmpz[1];
+ doublecomplex rtdisc;
+ integer nwupbd;
+ logical sorted;
+ extern /* Subroutine */ int zlahqr_(logical *, logical *, integer *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *, integer *, doublecomplex *, integer *, integer *),
+ zlacpy_(char *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ integer lwkopt;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* This subroutine implements one level of recursion for ZLAQR0. */
+/* It is a complete implementation of the small bulge multi-shift */
+/* QR algorithm. It may be called by ZLAQR0 and, for large enough */
+/* deflation window size, it may be called by ZLAQR3. This */
+/* subroutine is identical to ZLAQR0 except that it calls ZLAQR2 */
+/* instead of ZLAQR3. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAQR4 computes the eigenvalues of a Hessenberg matrix H */
+/* and, optionally, the matrices T and Z from the Schur decomposition */
+/* H = Z T Z**H, where T is an upper triangular matrix (the */
+/* Schur form), and Z is the unitary matrix of Schur vectors. */
+
+/* Optionally Z may be postmultiplied into an input unitary */
+/* matrix Q so that this routine can give the Schur factorization */
+/* of a matrix A which has been reduced to the Hessenberg form H */
+/* by the unitary matrix Q: A = Q*H*Q**H = (QZ)*H*(QZ)**H. */
+
+/* Arguments */
+/* ========= */
+
+/* WANTT (input) LOGICAL */
+/* = .TRUE. : the full Schur form T is required; */
+/* = .FALSE.: only eigenvalues are required. */
+
+/* WANTZ (input) LOGICAL */
+/* = .TRUE. : the matrix of Schur vectors Z is required; */
+/* = .FALSE.: Schur vectors are not required. */
+
+/* N (input) INTEGER */
+/* The order of the matrix H. N .GE. 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* It is assumed that H is already upper triangular in rows */
+/* and columns 1:ILO-1 and IHI+1:N and, if ILO.GT.1, */
+/* H(ILO,ILO-1) is zero. ILO and IHI are normally set by a */
+/* previous call to ZGEBAL, and then passed to ZGEHRD when the */
+/* matrix output by ZGEBAL is reduced to Hessenberg form. */
+/* Otherwise, ILO and IHI should be set to 1 and N, */
+/* respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. */
+/* If N = 0, then ILO = 1 and IHI = 0. */
+
+/* H (input/output) COMPLEX*16 array, dimension (LDH,N) */
+/* On entry, the upper Hessenberg matrix H. */
+/* On exit, if INFO = 0 and WANTT is .TRUE., then H */
+/* contains the upper triangular matrix T from the Schur */
+/* decomposition (the Schur form). If INFO = 0 and WANT is */
+/* .FALSE., then the contents of H are unspecified on exit. */
+/* (The output value of H when INFO.GT.0 is given under the */
+/* description of INFO below.) */
+
+/* This subroutine may explicitly set H(i,j) = 0 for i.GT.j and */
+/* j = 1, 2, ... ILO-1 or j = IHI+1, IHI+2, ... N. */
+
+/* LDH (input) INTEGER */
+/* The leading dimension of the array H. LDH .GE. max(1,N). */
+
+/* W (output) COMPLEX*16 array, dimension (N) */
+/* The computed eigenvalues of H(ILO:IHI,ILO:IHI) are stored */
+/* in W(ILO:IHI). If WANTT is .TRUE., then the eigenvalues are */
+/* stored in the same order as on the diagonal of the Schur */
+/* form returned in H, with W(i) = H(i,i). */
+
+/* Z (input/output) COMPLEX*16 array, dimension (LDZ,IHI) */
+/* If WANTZ is .FALSE., then Z is not referenced. */
+/* If WANTZ is .TRUE., then Z(ILO:IHI,ILOZ:IHIZ) is */
+/* replaced by Z(ILO:IHI,ILOZ:IHIZ)*U where U is the */
+/* orthogonal Schur factor of H(ILO:IHI,ILO:IHI). */
+/* (The output value of Z when INFO.GT.0 is given under */
+/* the description of INFO below.) */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. if WANTZ is .TRUE. */
+/* then LDZ.GE.MAX(1,IHIZ). Otherwize, LDZ.GE.1. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension LWORK */
+/* On exit, if LWORK = -1, WORK(1) returns an estimate of */
+/* the optimal value for LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK .GE. max(1,N) */
+/* is sufficient, but LWORK typically as large as 6*N may */
+/* be required for optimal performance. A workspace query */
+/* to determine the optimal workspace size is recommended. */
+
+/* If LWORK = -1, then ZLAQR4 does a workspace query. */
+/* In this case, ZLAQR4 checks the input parameters and */
+/* estimates the optimal workspace size for the given */
+/* values of N, ILO and IHI. The estimate is returned */
+/* in WORK(1). No error message related to LWORK is */
+/* issued by XERBLA. Neither H nor Z are accessed. */
+
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* .GT. 0: if INFO = i, ZLAQR4 failed to compute all of */
+/* the eigenvalues. Elements 1:ilo-1 and i+1:n of WR */
+/* and WI contain those eigenvalues which have been */
+/* successfully computed. (Failures are rare.) */
+
+/* If INFO .GT. 0 and WANT is .FALSE., then on exit, */
+/* the remaining unconverged eigenvalues are the eigen- */
+/* values of the upper Hessenberg matrix rows and */
+/* columns ILO through INFO of the final, output */
+/* value of H. */
+
+/* If INFO .GT. 0 and WANTT is .TRUE., then on exit */
+
+/* (*) (initial value of H)*U = U*(final value of H) */
+
+/* where U is a unitary matrix. The final */
+/* value of H is upper Hessenberg and triangular in */
+/* rows and columns INFO+1 through IHI. */
+
+/* If INFO .GT. 0 and WANTZ is .TRUE., then on exit */
+
+/* (final value of Z(ILO:IHI,ILOZ:IHIZ) */
+/* = (initial value of Z(ILO:IHI,ILOZ:IHIZ)*U */
+
+/* where U is the unitary matrix in (*) (regard- */
+/* less of the value of WANTT.) */
+
+/* If INFO .GT. 0 and WANTZ is .FALSE., then Z is not */
+/* accessed. */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+/* References: */
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part I: Maintaining Well Focused Shifts, and Level 3 */
+/* Performance, SIAM Journal of Matrix Analysis, volume 23, pages */
+/* 929--947, 2002. */
+
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part II: Aggressive Early Deflation, SIAM Journal */
+/* of Matrix Analysis, volume 23, pages 948--973, 2002. */
+
+/* ================================================================ */
+/* .. Parameters .. */
+
+/* ==== Matrices of order NTINY or smaller must be processed by */
+/* . ZLAHQR because of insufficient subdiagonal scratch space. */
+/* . (This is a hard limit.) ==== */
+
+/* ==== Exceptional deflation windows: try to cure rare */
+/* . slow convergence by varying the size of the */
+/* . deflation window after KEXNW iterations. ==== */
+
+/* ==== Exceptional shifts: try to cure rare slow convergence */
+/* . with ad-hoc exceptional shifts every KEXSH iterations. */
+/* . ==== */
+
+/* ==== The constant WILK1 is used to form the exceptional */
+/* . shifts. ==== */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+/* ==== Quick return for N = 0: nothing to do. ==== */
+
+ if (*n == 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ if (*n <= 11) {
+
+/* ==== Tiny matrices must use ZLAHQR. ==== */
+
+ lwkopt = 1;
+ if (*lwork != -1) {
+ zlahqr_(wantt, wantz, n, ilo, ihi, &h__[h_offset], ldh, &w[1],
+ iloz, ihiz, &z__[z_offset], ldz, info);
+ }
+ } else {
+
+/* ==== Use small bulge multi-shift QR with aggressive early */
+/* . deflation on larger-than-tiny matrices. ==== */
+
+/* ==== Hope for the best. ==== */
+
+ *info = 0;
+
+/* ==== Set up job flags for ILAENV. ==== */
+
+ if (*wantt) {
+ *(unsigned char *)jbcmpz = 'S';
+ } else {
+ *(unsigned char *)jbcmpz = 'E';
+ }
+ if (*wantz) {
+ *(unsigned char *)&jbcmpz[1] = 'V';
+ } else {
+ *(unsigned char *)&jbcmpz[1] = 'N';
+ }
+
+/* ==== NWR = recommended deflation window size. At this */
+/* . point, N .GT. NTINY = 11, so there is enough */
+/* . subdiagonal workspace for NWR.GE.2 as required. */
+/* . (In fact, there is enough subdiagonal space for */
+/* . NWR.GE.3.) ==== */
+
+ nwr = ilaenv_(&c__13, "ZLAQR4", jbcmpz, n, ilo, ihi, lwork);
+ nwr = max(2,nwr);
+/* Computing MIN */
+ i__1 = *ihi - *ilo + 1, i__2 = (*n - 1) / 3, i__1 = min(i__1,i__2);
+ nwr = min(i__1,nwr);
+
+/* ==== NSR = recommended number of simultaneous shifts. */
+/* . At this point N .GT. NTINY = 11, so there is at */
+/* . enough subdiagonal workspace for NSR to be even */
+/* . and greater than or equal to two as required. ==== */
+
+ nsr = ilaenv_(&c__15, "ZLAQR4", jbcmpz, n, ilo, ihi, lwork);
+/* Computing MIN */
+ i__1 = nsr, i__2 = (*n + 6) / 9, i__1 = min(i__1,i__2), i__2 = *ihi -
+ *ilo;
+ nsr = min(i__1,i__2);
+/* Computing MAX */
+ i__1 = 2, i__2 = nsr - nsr % 2;
+ nsr = max(i__1,i__2);
+
+/* ==== Estimate optimal workspace ==== */
+
+/* ==== Workspace query call to ZLAQR2 ==== */
+
+ i__1 = nwr + 1;
+ zlaqr2_(wantt, wantz, n, ilo, ihi, &i__1, &h__[h_offset], ldh, iloz,
+ ihiz, &z__[z_offset], ldz, &ls, &ld, &w[1], &h__[h_offset],
+ ldh, n, &h__[h_offset], ldh, n, &h__[h_offset], ldh, &work[1],
+ &c_n1);
+
+/* ==== Optimal workspace = MAX(ZLAQR5, ZLAQR2) ==== */
+
+/* Computing MAX */
+ i__1 = nsr * 3 / 2, i__2 = (integer) work[1].r;
+ lwkopt = max(i__1,i__2);
+
+/* ==== Quick return in case of workspace query. ==== */
+
+ if (*lwork == -1) {
+ d__1 = (doublereal) lwkopt;
+ z__1.r = d__1, z__1.i = 0.;
+ work[1].r = z__1.r, work[1].i = z__1.i;
+ return 0;
+ }
+
+/* ==== ZLAHQR/ZLAQR0 crossover point ==== */
+
+ nmin = ilaenv_(&c__12, "ZLAQR4", jbcmpz, n, ilo, ihi, lwork);
+ nmin = max(11,nmin);
+
+/* ==== Nibble crossover point ==== */
+
+ nibble = ilaenv_(&c__14, "ZLAQR4", jbcmpz, n, ilo, ihi, lwork);
+ nibble = max(0,nibble);
+
+/* ==== Accumulate reflections during ttswp? Use block */
+/* . 2-by-2 structure during matrix-matrix multiply? ==== */
+
+ kacc22 = ilaenv_(&c__16, "ZLAQR4", jbcmpz, n, ilo, ihi, lwork);
+ kacc22 = max(0,kacc22);
+ kacc22 = min(2,kacc22);
+
+/* ==== NWMAX = the largest possible deflation window for */
+/* . which there is sufficient workspace. ==== */
+
+/* Computing MIN */
+ i__1 = (*n - 1) / 3, i__2 = *lwork / 2;
+ nwmax = min(i__1,i__2);
+ nw = nwmax;
+
+/* ==== NSMAX = the Largest number of simultaneous shifts */
+/* . for which there is sufficient workspace. ==== */
+
+/* Computing MIN */
+ i__1 = (*n + 6) / 9, i__2 = (*lwork << 1) / 3;
+ nsmax = min(i__1,i__2);
+ nsmax -= nsmax % 2;
+
+/* ==== NDFL: an iteration count restarted at deflation. ==== */
+
+ ndfl = 1;
+
+/* ==== ITMAX = iteration limit ==== */
+
+/* Computing MAX */
+ i__1 = 10, i__2 = *ihi - *ilo + 1;
+ itmax = max(i__1,i__2) * 30;
+
+/* ==== Last row and column in the active block ==== */
+
+ kbot = *ihi;
+
+/* ==== Main Loop ==== */
+
+ i__1 = itmax;
+ for (it = 1; it <= i__1; ++it) {
+
+/* ==== Done when KBOT falls below ILO ==== */
+
+ if (kbot < *ilo) {
+ goto L80;
+ }
+
+/* ==== Locate active block ==== */
+
+ i__2 = *ilo + 1;
+ for (k = kbot; k >= i__2; --k) {
+ i__3 = k + (k - 1) * h_dim1;
+ if (h__[i__3].r == 0. && h__[i__3].i == 0.) {
+ goto L20;
+ }
+/* L10: */
+ }
+ k = *ilo;
+L20:
+ ktop = k;
+
+/* ==== Select deflation window size: */
+/* . Typical Case: */
+/* . If possible and advisable, nibble the entire */
+/* . active block. If not, use size MIN(NWR,NWMAX) */
+/* . or MIN(NWR+1,NWMAX) depending upon which has */
+/* . the smaller corresponding subdiagonal entry */
+/* . (a heuristic). */
+/* . */
+/* . Exceptional Case: */
+/* . If there have been no deflations in KEXNW or */
+/* . more iterations, then vary the deflation window */
+/* . size. At first, because, larger windows are, */
+/* . in general, more powerful than smaller ones, */
+/* . rapidly increase the window to the maximum possible. */
+/* . Then, gradually reduce the window size. ==== */
+
+ nh = kbot - ktop + 1;
+ nwupbd = min(nh,nwmax);
+ if (ndfl < 5) {
+ nw = min(nwupbd,nwr);
+ } else {
+/* Computing MIN */
+ i__2 = nwupbd, i__3 = nw << 1;
+ nw = min(i__2,i__3);
+ }
+ if (nw < nwmax) {
+ if (nw >= nh - 1) {
+ nw = nh;
+ } else {
+ kwtop = kbot - nw + 1;
+ i__2 = kwtop + (kwtop - 1) * h_dim1;
+ i__3 = kwtop - 1 + (kwtop - 2) * h_dim1;
+ if ((d__1 = h__[i__2].r, abs(d__1)) + (d__2 = d_imag(&h__[
+ kwtop + (kwtop - 1) * h_dim1]), abs(d__2)) > (
+ d__3 = h__[i__3].r, abs(d__3)) + (d__4 = d_imag(&
+ h__[kwtop - 1 + (kwtop - 2) * h_dim1]), abs(d__4))
+ ) {
+ ++nw;
+ }
+ }
+ }
+ if (ndfl < 5) {
+ ndec = -1;
+ } else if (ndec >= 0 || nw >= nwupbd) {
+ ++ndec;
+ if (nw - ndec < 2) {
+ ndec = 0;
+ }
+ nw -= ndec;
+ }
+
+/* ==== Aggressive early deflation: */
+/* . split workspace under the subdiagonal into */
+/* . - an nw-by-nw work array V in the lower */
+/* . left-hand-corner, */
+/* . - an NW-by-at-least-NW-but-more-is-better */
+/* . (NW-by-NHO) horizontal work array along */
+/* . the bottom edge, */
+/* . - an at-least-NW-but-more-is-better (NHV-by-NW) */
+/* . vertical work array along the left-hand-edge. */
+/* . ==== */
+
+ kv = *n - nw + 1;
+ kt = nw + 1;
+ nho = *n - nw - 1 - kt + 1;
+ kwv = nw + 2;
+ nve = *n - nw - kwv + 1;
+
+/* ==== Aggressive early deflation ==== */
+
+ zlaqr2_(wantt, wantz, n, &ktop, &kbot, &nw, &h__[h_offset], ldh,
+ iloz, ihiz, &z__[z_offset], ldz, &ls, &ld, &w[1], &h__[kv
+ + h_dim1], ldh, &nho, &h__[kv + kt * h_dim1], ldh, &nve, &
+ h__[kwv + h_dim1], ldh, &work[1], lwork);
+
+/* ==== Adjust KBOT accounting for new deflations. ==== */
+
+ kbot -= ld;
+
+/* ==== KS points to the shifts. ==== */
+
+ ks = kbot - ls + 1;
+
+/* ==== Skip an expensive QR sweep if there is a (partly */
+/* . heuristic) reason to expect that many eigenvalues */
+/* . will deflate without it. Here, the QR sweep is */
+/* . skipped if many eigenvalues have just been deflated */
+/* . or if the remaining active block is small. */
+
+ if (ld == 0 || ld * 100 <= nw * nibble && kbot - ktop + 1 > min(
+ nmin,nwmax)) {
+
+/* ==== NS = nominal number of simultaneous shifts. */
+/* . This may be lowered (slightly) if ZLAQR2 */
+/* . did not provide that many shifts. ==== */
+
+/* Computing MIN */
+/* Computing MAX */
+ i__4 = 2, i__5 = kbot - ktop;
+ i__2 = min(nsmax,nsr), i__3 = max(i__4,i__5);
+ ns = min(i__2,i__3);
+ ns -= ns % 2;
+
+/* ==== If there have been no deflations */
+/* . in a multiple of KEXSH iterations, */
+/* . then try exceptional shifts. */
+/* . Otherwise use shifts provided by */
+/* . ZLAQR2 above or from the eigenvalues */
+/* . of a trailing principal submatrix. ==== */
+
+ if (ndfl % 6 == 0) {
+ ks = kbot - ns + 1;
+ i__2 = ks + 1;
+ for (i__ = kbot; i__ >= i__2; i__ += -2) {
+ i__3 = i__;
+ i__4 = i__ + i__ * h_dim1;
+ i__5 = i__ + (i__ - 1) * h_dim1;
+ d__3 = ((d__1 = h__[i__5].r, abs(d__1)) + (d__2 =
+ d_imag(&h__[i__ + (i__ - 1) * h_dim1]), abs(
+ d__2))) * .75;
+ z__1.r = h__[i__4].r + d__3, z__1.i = h__[i__4].i;
+ w[i__3].r = z__1.r, w[i__3].i = z__1.i;
+ i__3 = i__ - 1;
+ i__4 = i__;
+ w[i__3].r = w[i__4].r, w[i__3].i = w[i__4].i;
+/* L30: */
+ }
+ } else {
+
+/* ==== Got NS/2 or fewer shifts? Use ZLAHQR */
+/* . on a trailing principal submatrix to */
+/* . get more. (Since NS.LE.NSMAX.LE.(N+6)/9, */
+/* . there is enough space below the subdiagonal */
+/* . to fit an NS-by-NS scratch array.) ==== */
+
+ if (kbot - ks + 1 <= ns / 2) {
+ ks = kbot - ns + 1;
+ kt = *n - ns + 1;
+ zlacpy_("A", &ns, &ns, &h__[ks + ks * h_dim1], ldh, &
+ h__[kt + h_dim1], ldh);
+ zlahqr_(&c_false, &c_false, &ns, &c__1, &ns, &h__[kt
+ + h_dim1], ldh, &w[ks], &c__1, &c__1, zdum, &
+ c__1, &inf);
+ ks += inf;
+
+/* ==== In case of a rare QR failure use */
+/* . eigenvalues of the trailing 2-by-2 */
+/* . principal submatrix. Scale to avoid */
+/* . overflows, underflows and subnormals. */
+/* . (The scale factor S can not be zero, */
+/* . because H(KBOT,KBOT-1) is nonzero.) ==== */
+
+ if (ks >= kbot) {
+ i__2 = kbot - 1 + (kbot - 1) * h_dim1;
+ i__3 = kbot + (kbot - 1) * h_dim1;
+ i__4 = kbot - 1 + kbot * h_dim1;
+ i__5 = kbot + kbot * h_dim1;
+ s = (d__1 = h__[i__2].r, abs(d__1)) + (d__2 =
+ d_imag(&h__[kbot - 1 + (kbot - 1) *
+ h_dim1]), abs(d__2)) + ((d__3 = h__[i__3]
+ .r, abs(d__3)) + (d__4 = d_imag(&h__[kbot
+ + (kbot - 1) * h_dim1]), abs(d__4))) + ((
+ d__5 = h__[i__4].r, abs(d__5)) + (d__6 =
+ d_imag(&h__[kbot - 1 + kbot * h_dim1]),
+ abs(d__6))) + ((d__7 = h__[i__5].r, abs(
+ d__7)) + (d__8 = d_imag(&h__[kbot + kbot *
+ h_dim1]), abs(d__8)));
+ i__2 = kbot - 1 + (kbot - 1) * h_dim1;
+ z__1.r = h__[i__2].r / s, z__1.i = h__[i__2].i /
+ s;
+ aa.r = z__1.r, aa.i = z__1.i;
+ i__2 = kbot + (kbot - 1) * h_dim1;
+ z__1.r = h__[i__2].r / s, z__1.i = h__[i__2].i /
+ s;
+ cc.r = z__1.r, cc.i = z__1.i;
+ i__2 = kbot - 1 + kbot * h_dim1;
+ z__1.r = h__[i__2].r / s, z__1.i = h__[i__2].i /
+ s;
+ bb.r = z__1.r, bb.i = z__1.i;
+ i__2 = kbot + kbot * h_dim1;
+ z__1.r = h__[i__2].r / s, z__1.i = h__[i__2].i /
+ s;
+ dd.r = z__1.r, dd.i = z__1.i;
+ z__2.r = aa.r + dd.r, z__2.i = aa.i + dd.i;
+ z__1.r = z__2.r / 2., z__1.i = z__2.i / 2.;
+ tr2.r = z__1.r, tr2.i = z__1.i;
+ z__3.r = aa.r - tr2.r, z__3.i = aa.i - tr2.i;
+ z__4.r = dd.r - tr2.r, z__4.i = dd.i - tr2.i;
+ z__2.r = z__3.r * z__4.r - z__3.i * z__4.i,
+ z__2.i = z__3.r * z__4.i + z__3.i *
+ z__4.r;
+ z__5.r = bb.r * cc.r - bb.i * cc.i, z__5.i = bb.r
+ * cc.i + bb.i * cc.r;
+ z__1.r = z__2.r - z__5.r, z__1.i = z__2.i -
+ z__5.i;
+ det.r = z__1.r, det.i = z__1.i;
+ z__2.r = -det.r, z__2.i = -det.i;
+ z_sqrt(&z__1, &z__2);
+ rtdisc.r = z__1.r, rtdisc.i = z__1.i;
+ i__2 = kbot - 1;
+ z__2.r = tr2.r + rtdisc.r, z__2.i = tr2.i +
+ rtdisc.i;
+ z__1.r = s * z__2.r, z__1.i = s * z__2.i;
+ w[i__2].r = z__1.r, w[i__2].i = z__1.i;
+ i__2 = kbot;
+ z__2.r = tr2.r - rtdisc.r, z__2.i = tr2.i -
+ rtdisc.i;
+ z__1.r = s * z__2.r, z__1.i = s * z__2.i;
+ w[i__2].r = z__1.r, w[i__2].i = z__1.i;
+
+ ks = kbot - 1;
+ }
+ }
+
+ if (kbot - ks + 1 > ns) {
+
+/* ==== Sort the shifts (Helps a little) ==== */
+
+ sorted = FALSE_;
+ i__2 = ks + 1;
+ for (k = kbot; k >= i__2; --k) {
+ if (sorted) {
+ goto L60;
+ }
+ sorted = TRUE_;
+ i__3 = k - 1;
+ for (i__ = ks; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ i__5 = i__ + 1;
+ if ((d__1 = w[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&w[i__]), abs(d__2)) < (d__3 =
+ w[i__5].r, abs(d__3)) + (d__4 =
+ d_imag(&w[i__ + 1]), abs(d__4))) {
+ sorted = FALSE_;
+ i__4 = i__;
+ swap.r = w[i__4].r, swap.i = w[i__4].i;
+ i__4 = i__;
+ i__5 = i__ + 1;
+ w[i__4].r = w[i__5].r, w[i__4].i = w[i__5]
+ .i;
+ i__4 = i__ + 1;
+ w[i__4].r = swap.r, w[i__4].i = swap.i;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+L60:
+ ;
+ }
+ }
+
+/* ==== If there are only two shifts, then use */
+/* . only one. ==== */
+
+ if (kbot - ks + 1 == 2) {
+ i__2 = kbot;
+ i__3 = kbot + kbot * h_dim1;
+ z__2.r = w[i__2].r - h__[i__3].r, z__2.i = w[i__2].i -
+ h__[i__3].i;
+ z__1.r = z__2.r, z__1.i = z__2.i;
+ i__4 = kbot - 1;
+ i__5 = kbot + kbot * h_dim1;
+ z__4.r = w[i__4].r - h__[i__5].r, z__4.i = w[i__4].i -
+ h__[i__5].i;
+ z__3.r = z__4.r, z__3.i = z__4.i;
+ if ((d__1 = z__1.r, abs(d__1)) + (d__2 = d_imag(&z__1),
+ abs(d__2)) < (d__3 = z__3.r, abs(d__3)) + (d__4 =
+ d_imag(&z__3), abs(d__4))) {
+ i__2 = kbot - 1;
+ i__3 = kbot;
+ w[i__2].r = w[i__3].r, w[i__2].i = w[i__3].i;
+ } else {
+ i__2 = kbot;
+ i__3 = kbot - 1;
+ w[i__2].r = w[i__3].r, w[i__2].i = w[i__3].i;
+ }
+ }
+
+/* ==== Use up to NS of the the smallest magnatiude */
+/* . shifts. If there aren't NS shifts available, */
+/* . then use them all, possibly dropping one to */
+/* . make the number of shifts even. ==== */
+
+/* Computing MIN */
+ i__2 = ns, i__3 = kbot - ks + 1;
+ ns = min(i__2,i__3);
+ ns -= ns % 2;
+ ks = kbot - ns + 1;
+
+/* ==== Small-bulge multi-shift QR sweep: */
+/* . split workspace under the subdiagonal into */
+/* . - a KDU-by-KDU work array U in the lower */
+/* . left-hand-corner, */
+/* . - a KDU-by-at-least-KDU-but-more-is-better */
+/* . (KDU-by-NHo) horizontal work array WH along */
+/* . the bottom edge, */
+/* . - and an at-least-KDU-but-more-is-better-by-KDU */
+/* . (NVE-by-KDU) vertical work WV arrow along */
+/* . the left-hand-edge. ==== */
+
+ kdu = ns * 3 - 3;
+ ku = *n - kdu + 1;
+ kwh = kdu + 1;
+ nho = *n - kdu - 3 - (kdu + 1) + 1;
+ kwv = kdu + 4;
+ nve = *n - kdu - kwv + 1;
+
+/* ==== Small-bulge multi-shift QR sweep ==== */
+
+ zlaqr5_(wantt, wantz, &kacc22, n, &ktop, &kbot, &ns, &w[ks], &
+ h__[h_offset], ldh, iloz, ihiz, &z__[z_offset], ldz, &
+ work[1], &c__3, &h__[ku + h_dim1], ldh, &nve, &h__[
+ kwv + h_dim1], ldh, &nho, &h__[ku + kwh * h_dim1],
+ ldh);
+ }
+
+/* ==== Note progress (or the lack of it). ==== */
+
+ if (ld > 0) {
+ ndfl = 1;
+ } else {
+ ++ndfl;
+ }
+
+/* ==== End of main loop ==== */
+/* L70: */
+ }
+
+/* ==== Iteration limit exceeded. Set INFO to show where */
+/* . the problem occurred and exit. ==== */
+
+ *info = kbot;
+L80:
+ ;
+ }
+
+/* ==== Return the optimal value of LWORK. ==== */
+
+ d__1 = (doublereal) lwkopt;
+ z__1.r = d__1, z__1.i = 0.;
+ work[1].r = z__1.r, work[1].i = z__1.i;
+
+/* ==== End of ZLAQR4 ==== */
+
+ return 0;
+} /* zlaqr4_ */
diff --git a/contrib/libs/clapack/zlaqr5.c b/contrib/libs/clapack/zlaqr5.c
new file mode 100644
index 0000000000..ad61b76dcc
--- /dev/null
+++ b/contrib/libs/clapack/zlaqr5.c
@@ -0,0 +1,1349 @@
+/* zlaqr5.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__3 = 3;
+static integer c__1 = 1;
+static integer c__2 = 2;
+
+/* Subroutine */ int zlaqr5_(logical *wantt, logical *wantz, integer *kacc22,
+ integer *n, integer *ktop, integer *kbot, integer *nshfts,
+ doublecomplex *s, doublecomplex *h__, integer *ldh, integer *iloz,
+ integer *ihiz, doublecomplex *z__, integer *ldz, doublecomplex *v,
+ integer *ldv, doublecomplex *u, integer *ldu, integer *nv,
+ doublecomplex *wv, integer *ldwv, integer *nh, doublecomplex *wh,
+ integer *ldwh)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, u_dim1, u_offset, v_dim1, v_offset, wh_dim1,
+ wh_offset, wv_dim1, wv_offset, z_dim1, z_offset, i__1, i__2, i__3,
+ i__4, i__5, i__6, i__7, i__8, i__9, i__10, i__11;
+ doublereal d__1, d__2, d__3, d__4, d__5, d__6, d__7, d__8, d__9, d__10;
+ doublecomplex z__1, z__2, z__3, z__4, z__5, z__6, z__7, z__8;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer j, k, m, i2, j2, i4, j4, k1;
+ doublereal h11, h12, h21, h22;
+ integer m22, ns, nu;
+ doublecomplex vt[3];
+ doublereal scl;
+ integer kdu, kms;
+ doublereal ulp;
+ integer knz, kzs;
+ doublereal tst1, tst2;
+ doublecomplex beta;
+ logical blk22, bmp22;
+ integer mend, jcol, jlen, jbot, mbot, jtop, jrow, mtop;
+ doublecomplex alpha;
+ logical accum;
+ integer ndcol, incol, krcol, nbmps;
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), ztrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *
+, doublecomplex *, integer *),
+ dlabad_(doublereal *, doublereal *), zlaqr1_(integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ doublecomplex *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin, safmax;
+ extern /* Subroutine */ int zlarfg_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *);
+ doublecomplex refsum;
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *),
+ zlaset_(char *, integer *, integer *, doublecomplex *,
+ doublecomplex *, doublecomplex *, integer *);
+ integer mstart;
+ doublereal smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* This auxiliary subroutine called by ZLAQR0 performs a */
+/* single small-bulge multi-shift QR sweep. */
+
+/* WANTT (input) logical scalar */
+/* WANTT = .true. if the triangular Schur factor */
+/* is being computed. WANTT is set to .false. otherwise. */
+
+/* WANTZ (input) logical scalar */
+/* WANTZ = .true. if the unitary Schur factor is being */
+/* computed. WANTZ is set to .false. otherwise. */
+
+/* KACC22 (input) integer with value 0, 1, or 2. */
+/* Specifies the computation mode of far-from-diagonal */
+/* orthogonal updates. */
+/* = 0: ZLAQR5 does not accumulate reflections and does not */
+/* use matrix-matrix multiply to update far-from-diagonal */
+/* matrix entries. */
+/* = 1: ZLAQR5 accumulates reflections and uses matrix-matrix */
+/* multiply to update the far-from-diagonal matrix entries. */
+/* = 2: ZLAQR5 accumulates reflections, uses matrix-matrix */
+/* multiply to update the far-from-diagonal matrix entries, */
+/* and takes advantage of 2-by-2 block structure during */
+/* matrix multiplies. */
+
+/* N (input) integer scalar */
+/* N is the order of the Hessenberg matrix H upon which this */
+/* subroutine operates. */
+
+/* KTOP (input) integer scalar */
+/* KBOT (input) integer scalar */
+/* These are the first and last rows and columns of an */
+/* isolated diagonal block upon which the QR sweep is to be */
+/* applied. It is assumed without a check that */
+/* either KTOP = 1 or H(KTOP,KTOP-1) = 0 */
+/* and */
+/* either KBOT = N or H(KBOT+1,KBOT) = 0. */
+
+/* NSHFTS (input) integer scalar */
+/* NSHFTS gives the number of simultaneous shifts. NSHFTS */
+/* must be positive and even. */
+
+/* S (input/output) COMPLEX*16 array of size (NSHFTS) */
+/* S contains the shifts of origin that define the multi- */
+/* shift QR sweep. On output S may be reordered. */
+
+/* H (input/output) COMPLEX*16 array of size (LDH,N) */
+/* On input H contains a Hessenberg matrix. On output a */
+/* multi-shift QR sweep with shifts SR(J)+i*SI(J) is applied */
+/* to the isolated diagonal block in rows and columns KTOP */
+/* through KBOT. */
+
+/* LDH (input) integer scalar */
+/* LDH is the leading dimension of H just as declared in the */
+/* calling procedure. LDH.GE.MAX(1,N). */
+
+/* ILOZ (input) INTEGER */
+/* IHIZ (input) INTEGER */
+/* Specify the rows of Z to which transformations must be */
+/* applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N */
+
+/* Z (input/output) COMPLEX*16 array of size (LDZ,IHI) */
+/* If WANTZ = .TRUE., then the QR Sweep unitary */
+/* similarity transformation is accumulated into */
+/* Z(ILOZ:IHIZ,ILO:IHI) from the right. */
+/* If WANTZ = .FALSE., then Z is unreferenced. */
+
+/* LDZ (input) integer scalar */
+/* LDA is the leading dimension of Z just as declared in */
+/* the calling procedure. LDZ.GE.N. */
+
+/* V (workspace) COMPLEX*16 array of size (LDV,NSHFTS/2) */
+
+/* LDV (input) integer scalar */
+/* LDV is the leading dimension of V as declared in the */
+/* calling procedure. LDV.GE.3. */
+
+/* U (workspace) COMPLEX*16 array of size */
+/* (LDU,3*NSHFTS-3) */
+
+/* LDU (input) integer scalar */
+/* LDU is the leading dimension of U just as declared in the */
+/* in the calling subroutine. LDU.GE.3*NSHFTS-3. */
+
+/* NH (input) integer scalar */
+/* NH is the number of columns in array WH available for */
+/* workspace. NH.GE.1. */
+
+/* WH (workspace) COMPLEX*16 array of size (LDWH,NH) */
+
+/* LDWH (input) integer scalar */
+/* Leading dimension of WH just as declared in the */
+/* calling procedure. LDWH.GE.3*NSHFTS-3. */
+
+/* NV (input) integer scalar */
+/* NV is the number of rows in WV agailable for workspace. */
+/* NV.GE.1. */
+
+/* WV (workspace) COMPLEX*16 array of size */
+/* (LDWV,3*NSHFTS-3) */
+
+/* LDWV (input) integer scalar */
+/* LDWV is the leading dimension of WV as declared in the */
+/* in the calling subroutine. LDWV.GE.NV. */
+
+/* ================================================================ */
+/* Based on contributions by */
+/* Karen Braman and Ralph Byers, Department of Mathematics, */
+/* University of Kansas, USA */
+
+/* ================================================================ */
+/* Reference: */
+
+/* K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
+/* Algorithm Part I: Maintaining Well Focused Shifts, and */
+/* Level 3 Performance, SIAM Journal of Matrix Analysis, */
+/* volume 23, pages 929--947, 2002. */
+
+/* ================================================================ */
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* ==== If there are no shifts, then there is nothing to do. ==== */
+
+ /* Parameter adjustments */
+ --s;
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ wv_dim1 = *ldwv;
+ wv_offset = 1 + wv_dim1;
+ wv -= wv_offset;
+ wh_dim1 = *ldwh;
+ wh_offset = 1 + wh_dim1;
+ wh -= wh_offset;
+
+ /* Function Body */
+ if (*nshfts < 2) {
+ return 0;
+ }
+
+/* ==== If the active block is empty or 1-by-1, then there */
+/* . is nothing to do. ==== */
+
+ if (*ktop >= *kbot) {
+ return 0;
+ }
+
+/* ==== NSHFTS is supposed to be even, but if it is odd, */
+/* . then simply reduce it by one. ==== */
+
+ ns = *nshfts - *nshfts % 2;
+
+/* ==== Machine constants for deflation ==== */
+
+ safmin = dlamch_("SAFE MINIMUM");
+ safmax = 1. / safmin;
+ dlabad_(&safmin, &safmax);
+ ulp = dlamch_("PRECISION");
+ smlnum = safmin * ((doublereal) (*n) / ulp);
+
+/* ==== Use accumulated reflections to update far-from-diagonal */
+/* . entries ? ==== */
+
+ accum = *kacc22 == 1 || *kacc22 == 2;
+
+/* ==== If so, exploit the 2-by-2 block structure? ==== */
+
+ blk22 = ns > 2 && *kacc22 == 2;
+
+/* ==== clear trash ==== */
+
+ if (*ktop + 2 <= *kbot) {
+ i__1 = *ktop + 2 + *ktop * h_dim1;
+ h__[i__1].r = 0., h__[i__1].i = 0.;
+ }
+
+/* ==== NBMPS = number of 2-shift bulges in the chain ==== */
+
+ nbmps = ns / 2;
+
+/* ==== KDU = width of slab ==== */
+
+ kdu = nbmps * 6 - 3;
+
+/* ==== Create and chase chains of NBMPS bulges ==== */
+
+ i__1 = *kbot - 2;
+ i__2 = nbmps * 3 - 2;
+ for (incol = (1 - nbmps) * 3 + *ktop - 1; i__2 < 0 ? incol >= i__1 :
+ incol <= i__1; incol += i__2) {
+ ndcol = incol + kdu;
+ if (accum) {
+ zlaset_("ALL", &kdu, &kdu, &c_b1, &c_b2, &u[u_offset], ldu);
+ }
+
+/* ==== Near-the-diagonal bulge chase. The following loop */
+/* . performs the near-the-diagonal part of a small bulge */
+/* . multi-shift QR sweep. Each 6*NBMPS-2 column diagonal */
+/* . chunk extends from column INCOL to column NDCOL */
+/* . (including both column INCOL and column NDCOL). The */
+/* . following loop chases a 3*NBMPS column long chain of */
+/* . NBMPS bulges 3*NBMPS-2 columns to the right. (INCOL */
+/* . may be less than KTOP and and NDCOL may be greater than */
+/* . KBOT indicating phantom columns from which to chase */
+/* . bulges before they are actually introduced or to which */
+/* . to chase bulges beyond column KBOT.) ==== */
+
+/* Computing MIN */
+ i__4 = incol + nbmps * 3 - 3, i__5 = *kbot - 2;
+ i__3 = min(i__4,i__5);
+ for (krcol = incol; krcol <= i__3; ++krcol) {
+
+/* ==== Bulges number MTOP to MBOT are active double implicit */
+/* . shift bulges. There may or may not also be small */
+/* . 2-by-2 bulge, if there is room. The inactive bulges */
+/* . (if any) must wait until the active bulges have moved */
+/* . down the diagonal to make room. The phantom matrix */
+/* . paradigm described above helps keep track. ==== */
+
+/* Computing MAX */
+ i__4 = 1, i__5 = (*ktop - 1 - krcol + 2) / 3 + 1;
+ mtop = max(i__4,i__5);
+/* Computing MIN */
+ i__4 = nbmps, i__5 = (*kbot - krcol) / 3;
+ mbot = min(i__4,i__5);
+ m22 = mbot + 1;
+ bmp22 = mbot < nbmps && krcol + (m22 - 1) * 3 == *kbot - 2;
+
+/* ==== Generate reflections to chase the chain right */
+/* . one column. (The minimum value of K is KTOP-1.) ==== */
+
+ i__4 = mbot;
+ for (m = mtop; m <= i__4; ++m) {
+ k = krcol + (m - 1) * 3;
+ if (k == *ktop - 1) {
+ zlaqr1_(&c__3, &h__[*ktop + *ktop * h_dim1], ldh, &s[(m <<
+ 1) - 1], &s[m * 2], &v[m * v_dim1 + 1]);
+ i__5 = m * v_dim1 + 1;
+ alpha.r = v[i__5].r, alpha.i = v[i__5].i;
+ zlarfg_(&c__3, &alpha, &v[m * v_dim1 + 2], &c__1, &v[m *
+ v_dim1 + 1]);
+ } else {
+ i__5 = k + 1 + k * h_dim1;
+ beta.r = h__[i__5].r, beta.i = h__[i__5].i;
+ i__5 = m * v_dim1 + 2;
+ i__6 = k + 2 + k * h_dim1;
+ v[i__5].r = h__[i__6].r, v[i__5].i = h__[i__6].i;
+ i__5 = m * v_dim1 + 3;
+ i__6 = k + 3 + k * h_dim1;
+ v[i__5].r = h__[i__6].r, v[i__5].i = h__[i__6].i;
+ zlarfg_(&c__3, &beta, &v[m * v_dim1 + 2], &c__1, &v[m *
+ v_dim1 + 1]);
+
+/* ==== A Bulge may collapse because of vigilant */
+/* . deflation or destructive underflow. In the */
+/* . underflow case, try the two-small-subdiagonals */
+/* . trick to try to reinflate the bulge. ==== */
+
+ i__5 = k + 3 + k * h_dim1;
+ i__6 = k + 3 + (k + 1) * h_dim1;
+ i__7 = k + 3 + (k + 2) * h_dim1;
+ if (h__[i__5].r != 0. || h__[i__5].i != 0. || (h__[i__6]
+ .r != 0. || h__[i__6].i != 0.) || h__[i__7].r ==
+ 0. && h__[i__7].i == 0.) {
+
+/* ==== Typical case: not collapsed (yet). ==== */
+
+ i__5 = k + 1 + k * h_dim1;
+ h__[i__5].r = beta.r, h__[i__5].i = beta.i;
+ i__5 = k + 2 + k * h_dim1;
+ h__[i__5].r = 0., h__[i__5].i = 0.;
+ i__5 = k + 3 + k * h_dim1;
+ h__[i__5].r = 0., h__[i__5].i = 0.;
+ } else {
+
+/* ==== Atypical case: collapsed. Attempt to */
+/* . reintroduce ignoring H(K+1,K) and H(K+2,K). */
+/* . If the fill resulting from the new */
+/* . reflector is too large, then abandon it. */
+/* . Otherwise, use the new one. ==== */
+
+ zlaqr1_(&c__3, &h__[k + 1 + (k + 1) * h_dim1], ldh, &
+ s[(m << 1) - 1], &s[m * 2], vt);
+ alpha.r = vt[0].r, alpha.i = vt[0].i;
+ zlarfg_(&c__3, &alpha, &vt[1], &c__1, vt);
+ d_cnjg(&z__2, vt);
+ i__5 = k + 1 + k * h_dim1;
+ d_cnjg(&z__5, &vt[1]);
+ i__6 = k + 2 + k * h_dim1;
+ z__4.r = z__5.r * h__[i__6].r - z__5.i * h__[i__6].i,
+ z__4.i = z__5.r * h__[i__6].i + z__5.i * h__[
+ i__6].r;
+ z__3.r = h__[i__5].r + z__4.r, z__3.i = h__[i__5].i +
+ z__4.i;
+ z__1.r = z__2.r * z__3.r - z__2.i * z__3.i, z__1.i =
+ z__2.r * z__3.i + z__2.i * z__3.r;
+ refsum.r = z__1.r, refsum.i = z__1.i;
+
+ i__5 = k + 2 + k * h_dim1;
+ z__3.r = refsum.r * vt[1].r - refsum.i * vt[1].i,
+ z__3.i = refsum.r * vt[1].i + refsum.i * vt[1]
+ .r;
+ z__2.r = h__[i__5].r - z__3.r, z__2.i = h__[i__5].i -
+ z__3.i;
+ z__1.r = z__2.r, z__1.i = z__2.i;
+ z__5.r = refsum.r * vt[2].r - refsum.i * vt[2].i,
+ z__5.i = refsum.r * vt[2].i + refsum.i * vt[2]
+ .r;
+ z__4.r = z__5.r, z__4.i = z__5.i;
+ i__6 = k + k * h_dim1;
+ i__7 = k + 1 + (k + 1) * h_dim1;
+ i__8 = k + 2 + (k + 2) * h_dim1;
+ if ((d__1 = z__1.r, abs(d__1)) + (d__2 = d_imag(&z__1)
+ , abs(d__2)) + ((d__3 = z__4.r, abs(d__3)) + (
+ d__4 = d_imag(&z__4), abs(d__4))) > ulp * ((
+ d__5 = h__[i__6].r, abs(d__5)) + (d__6 =
+ d_imag(&h__[k + k * h_dim1]), abs(d__6)) + ((
+ d__7 = h__[i__7].r, abs(d__7)) + (d__8 =
+ d_imag(&h__[k + 1 + (k + 1) * h_dim1]), abs(
+ d__8))) + ((d__9 = h__[i__8].r, abs(d__9)) + (
+ d__10 = d_imag(&h__[k + 2 + (k + 2) * h_dim1])
+ , abs(d__10))))) {
+
+/* ==== Starting a new bulge here would */
+/* . create non-negligible fill. Use */
+/* . the old one with trepidation. ==== */
+
+ i__5 = k + 1 + k * h_dim1;
+ h__[i__5].r = beta.r, h__[i__5].i = beta.i;
+ i__5 = k + 2 + k * h_dim1;
+ h__[i__5].r = 0., h__[i__5].i = 0.;
+ i__5 = k + 3 + k * h_dim1;
+ h__[i__5].r = 0., h__[i__5].i = 0.;
+ } else {
+
+/* ==== Stating a new bulge here would */
+/* . create only negligible fill. */
+/* . Replace the old reflector with */
+/* . the new one. ==== */
+
+ i__5 = k + 1 + k * h_dim1;
+ i__6 = k + 1 + k * h_dim1;
+ z__1.r = h__[i__6].r - refsum.r, z__1.i = h__[
+ i__6].i - refsum.i;
+ h__[i__5].r = z__1.r, h__[i__5].i = z__1.i;
+ i__5 = k + 2 + k * h_dim1;
+ h__[i__5].r = 0., h__[i__5].i = 0.;
+ i__5 = k + 3 + k * h_dim1;
+ h__[i__5].r = 0., h__[i__5].i = 0.;
+ i__5 = m * v_dim1 + 1;
+ v[i__5].r = vt[0].r, v[i__5].i = vt[0].i;
+ i__5 = m * v_dim1 + 2;
+ v[i__5].r = vt[1].r, v[i__5].i = vt[1].i;
+ i__5 = m * v_dim1 + 3;
+ v[i__5].r = vt[2].r, v[i__5].i = vt[2].i;
+ }
+ }
+ }
+/* L10: */
+ }
+
+/* ==== Generate a 2-by-2 reflection, if needed. ==== */
+
+ k = krcol + (m22 - 1) * 3;
+ if (bmp22) {
+ if (k == *ktop - 1) {
+ zlaqr1_(&c__2, &h__[k + 1 + (k + 1) * h_dim1], ldh, &s[(
+ m22 << 1) - 1], &s[m22 * 2], &v[m22 * v_dim1 + 1])
+ ;
+ i__4 = m22 * v_dim1 + 1;
+ beta.r = v[i__4].r, beta.i = v[i__4].i;
+ zlarfg_(&c__2, &beta, &v[m22 * v_dim1 + 2], &c__1, &v[m22
+ * v_dim1 + 1]);
+ } else {
+ i__4 = k + 1 + k * h_dim1;
+ beta.r = h__[i__4].r, beta.i = h__[i__4].i;
+ i__4 = m22 * v_dim1 + 2;
+ i__5 = k + 2 + k * h_dim1;
+ v[i__4].r = h__[i__5].r, v[i__4].i = h__[i__5].i;
+ zlarfg_(&c__2, &beta, &v[m22 * v_dim1 + 2], &c__1, &v[m22
+ * v_dim1 + 1]);
+ i__4 = k + 1 + k * h_dim1;
+ h__[i__4].r = beta.r, h__[i__4].i = beta.i;
+ i__4 = k + 2 + k * h_dim1;
+ h__[i__4].r = 0., h__[i__4].i = 0.;
+ }
+ }
+
+/* ==== Multiply H by reflections from the left ==== */
+
+ if (accum) {
+ jbot = min(ndcol,*kbot);
+ } else if (*wantt) {
+ jbot = *n;
+ } else {
+ jbot = *kbot;
+ }
+ i__4 = jbot;
+ for (j = max(*ktop,krcol); j <= i__4; ++j) {
+/* Computing MIN */
+ i__5 = mbot, i__6 = (j - krcol + 2) / 3;
+ mend = min(i__5,i__6);
+ i__5 = mend;
+ for (m = mtop; m <= i__5; ++m) {
+ k = krcol + (m - 1) * 3;
+ d_cnjg(&z__2, &v[m * v_dim1 + 1]);
+ i__6 = k + 1 + j * h_dim1;
+ d_cnjg(&z__6, &v[m * v_dim1 + 2]);
+ i__7 = k + 2 + j * h_dim1;
+ z__5.r = z__6.r * h__[i__7].r - z__6.i * h__[i__7].i,
+ z__5.i = z__6.r * h__[i__7].i + z__6.i * h__[i__7]
+ .r;
+ z__4.r = h__[i__6].r + z__5.r, z__4.i = h__[i__6].i +
+ z__5.i;
+ d_cnjg(&z__8, &v[m * v_dim1 + 3]);
+ i__8 = k + 3 + j * h_dim1;
+ z__7.r = z__8.r * h__[i__8].r - z__8.i * h__[i__8].i,
+ z__7.i = z__8.r * h__[i__8].i + z__8.i * h__[i__8]
+ .r;
+ z__3.r = z__4.r + z__7.r, z__3.i = z__4.i + z__7.i;
+ z__1.r = z__2.r * z__3.r - z__2.i * z__3.i, z__1.i =
+ z__2.r * z__3.i + z__2.i * z__3.r;
+ refsum.r = z__1.r, refsum.i = z__1.i;
+ i__6 = k + 1 + j * h_dim1;
+ i__7 = k + 1 + j * h_dim1;
+ z__1.r = h__[i__7].r - refsum.r, z__1.i = h__[i__7].i -
+ refsum.i;
+ h__[i__6].r = z__1.r, h__[i__6].i = z__1.i;
+ i__6 = k + 2 + j * h_dim1;
+ i__7 = k + 2 + j * h_dim1;
+ i__8 = m * v_dim1 + 2;
+ z__2.r = refsum.r * v[i__8].r - refsum.i * v[i__8].i,
+ z__2.i = refsum.r * v[i__8].i + refsum.i * v[i__8]
+ .r;
+ z__1.r = h__[i__7].r - z__2.r, z__1.i = h__[i__7].i -
+ z__2.i;
+ h__[i__6].r = z__1.r, h__[i__6].i = z__1.i;
+ i__6 = k + 3 + j * h_dim1;
+ i__7 = k + 3 + j * h_dim1;
+ i__8 = m * v_dim1 + 3;
+ z__2.r = refsum.r * v[i__8].r - refsum.i * v[i__8].i,
+ z__2.i = refsum.r * v[i__8].i + refsum.i * v[i__8]
+ .r;
+ z__1.r = h__[i__7].r - z__2.r, z__1.i = h__[i__7].i -
+ z__2.i;
+ h__[i__6].r = z__1.r, h__[i__6].i = z__1.i;
+/* L20: */
+ }
+/* L30: */
+ }
+ if (bmp22) {
+ k = krcol + (m22 - 1) * 3;
+/* Computing MAX */
+ i__4 = k + 1;
+ i__5 = jbot;
+ for (j = max(i__4,*ktop); j <= i__5; ++j) {
+ d_cnjg(&z__2, &v[m22 * v_dim1 + 1]);
+ i__4 = k + 1 + j * h_dim1;
+ d_cnjg(&z__5, &v[m22 * v_dim1 + 2]);
+ i__6 = k + 2 + j * h_dim1;
+ z__4.r = z__5.r * h__[i__6].r - z__5.i * h__[i__6].i,
+ z__4.i = z__5.r * h__[i__6].i + z__5.i * h__[i__6]
+ .r;
+ z__3.r = h__[i__4].r + z__4.r, z__3.i = h__[i__4].i +
+ z__4.i;
+ z__1.r = z__2.r * z__3.r - z__2.i * z__3.i, z__1.i =
+ z__2.r * z__3.i + z__2.i * z__3.r;
+ refsum.r = z__1.r, refsum.i = z__1.i;
+ i__4 = k + 1 + j * h_dim1;
+ i__6 = k + 1 + j * h_dim1;
+ z__1.r = h__[i__6].r - refsum.r, z__1.i = h__[i__6].i -
+ refsum.i;
+ h__[i__4].r = z__1.r, h__[i__4].i = z__1.i;
+ i__4 = k + 2 + j * h_dim1;
+ i__6 = k + 2 + j * h_dim1;
+ i__7 = m22 * v_dim1 + 2;
+ z__2.r = refsum.r * v[i__7].r - refsum.i * v[i__7].i,
+ z__2.i = refsum.r * v[i__7].i + refsum.i * v[i__7]
+ .r;
+ z__1.r = h__[i__6].r - z__2.r, z__1.i = h__[i__6].i -
+ z__2.i;
+ h__[i__4].r = z__1.r, h__[i__4].i = z__1.i;
+/* L40: */
+ }
+ }
+
+/* ==== Multiply H by reflections from the right. */
+/* . Delay filling in the last row until the */
+/* . vigilant deflation check is complete. ==== */
+
+ if (accum) {
+ jtop = max(*ktop,incol);
+ } else if (*wantt) {
+ jtop = 1;
+ } else {
+ jtop = *ktop;
+ }
+ i__5 = mbot;
+ for (m = mtop; m <= i__5; ++m) {
+ i__4 = m * v_dim1 + 1;
+ if (v[i__4].r != 0. || v[i__4].i != 0.) {
+ k = krcol + (m - 1) * 3;
+/* Computing MIN */
+ i__6 = *kbot, i__7 = k + 3;
+ i__4 = min(i__6,i__7);
+ for (j = jtop; j <= i__4; ++j) {
+ i__6 = m * v_dim1 + 1;
+ i__7 = j + (k + 1) * h_dim1;
+ i__8 = m * v_dim1 + 2;
+ i__9 = j + (k + 2) * h_dim1;
+ z__4.r = v[i__8].r * h__[i__9].r - v[i__8].i * h__[
+ i__9].i, z__4.i = v[i__8].r * h__[i__9].i + v[
+ i__8].i * h__[i__9].r;
+ z__3.r = h__[i__7].r + z__4.r, z__3.i = h__[i__7].i +
+ z__4.i;
+ i__10 = m * v_dim1 + 3;
+ i__11 = j + (k + 3) * h_dim1;
+ z__5.r = v[i__10].r * h__[i__11].r - v[i__10].i * h__[
+ i__11].i, z__5.i = v[i__10].r * h__[i__11].i
+ + v[i__10].i * h__[i__11].r;
+ z__2.r = z__3.r + z__5.r, z__2.i = z__3.i + z__5.i;
+ z__1.r = v[i__6].r * z__2.r - v[i__6].i * z__2.i,
+ z__1.i = v[i__6].r * z__2.i + v[i__6].i *
+ z__2.r;
+ refsum.r = z__1.r, refsum.i = z__1.i;
+ i__6 = j + (k + 1) * h_dim1;
+ i__7 = j + (k + 1) * h_dim1;
+ z__1.r = h__[i__7].r - refsum.r, z__1.i = h__[i__7].i
+ - refsum.i;
+ h__[i__6].r = z__1.r, h__[i__6].i = z__1.i;
+ i__6 = j + (k + 2) * h_dim1;
+ i__7 = j + (k + 2) * h_dim1;
+ d_cnjg(&z__3, &v[m * v_dim1 + 2]);
+ z__2.r = refsum.r * z__3.r - refsum.i * z__3.i,
+ z__2.i = refsum.r * z__3.i + refsum.i *
+ z__3.r;
+ z__1.r = h__[i__7].r - z__2.r, z__1.i = h__[i__7].i -
+ z__2.i;
+ h__[i__6].r = z__1.r, h__[i__6].i = z__1.i;
+ i__6 = j + (k + 3) * h_dim1;
+ i__7 = j + (k + 3) * h_dim1;
+ d_cnjg(&z__3, &v[m * v_dim1 + 3]);
+ z__2.r = refsum.r * z__3.r - refsum.i * z__3.i,
+ z__2.i = refsum.r * z__3.i + refsum.i *
+ z__3.r;
+ z__1.r = h__[i__7].r - z__2.r, z__1.i = h__[i__7].i -
+ z__2.i;
+ h__[i__6].r = z__1.r, h__[i__6].i = z__1.i;
+/* L50: */
+ }
+
+ if (accum) {
+
+/* ==== Accumulate U. (If necessary, update Z later */
+/* . with with an efficient matrix-matrix */
+/* . multiply.) ==== */
+
+ kms = k - incol;
+/* Computing MAX */
+ i__4 = 1, i__6 = *ktop - incol;
+ i__7 = kdu;
+ for (j = max(i__4,i__6); j <= i__7; ++j) {
+ i__4 = m * v_dim1 + 1;
+ i__6 = j + (kms + 1) * u_dim1;
+ i__8 = m * v_dim1 + 2;
+ i__9 = j + (kms + 2) * u_dim1;
+ z__4.r = v[i__8].r * u[i__9].r - v[i__8].i * u[
+ i__9].i, z__4.i = v[i__8].r * u[i__9].i +
+ v[i__8].i * u[i__9].r;
+ z__3.r = u[i__6].r + z__4.r, z__3.i = u[i__6].i +
+ z__4.i;
+ i__10 = m * v_dim1 + 3;
+ i__11 = j + (kms + 3) * u_dim1;
+ z__5.r = v[i__10].r * u[i__11].r - v[i__10].i * u[
+ i__11].i, z__5.i = v[i__10].r * u[i__11]
+ .i + v[i__10].i * u[i__11].r;
+ z__2.r = z__3.r + z__5.r, z__2.i = z__3.i +
+ z__5.i;
+ z__1.r = v[i__4].r * z__2.r - v[i__4].i * z__2.i,
+ z__1.i = v[i__4].r * z__2.i + v[i__4].i *
+ z__2.r;
+ refsum.r = z__1.r, refsum.i = z__1.i;
+ i__4 = j + (kms + 1) * u_dim1;
+ i__6 = j + (kms + 1) * u_dim1;
+ z__1.r = u[i__6].r - refsum.r, z__1.i = u[i__6].i
+ - refsum.i;
+ u[i__4].r = z__1.r, u[i__4].i = z__1.i;
+ i__4 = j + (kms + 2) * u_dim1;
+ i__6 = j + (kms + 2) * u_dim1;
+ d_cnjg(&z__3, &v[m * v_dim1 + 2]);
+ z__2.r = refsum.r * z__3.r - refsum.i * z__3.i,
+ z__2.i = refsum.r * z__3.i + refsum.i *
+ z__3.r;
+ z__1.r = u[i__6].r - z__2.r, z__1.i = u[i__6].i -
+ z__2.i;
+ u[i__4].r = z__1.r, u[i__4].i = z__1.i;
+ i__4 = j + (kms + 3) * u_dim1;
+ i__6 = j + (kms + 3) * u_dim1;
+ d_cnjg(&z__3, &v[m * v_dim1 + 3]);
+ z__2.r = refsum.r * z__3.r - refsum.i * z__3.i,
+ z__2.i = refsum.r * z__3.i + refsum.i *
+ z__3.r;
+ z__1.r = u[i__6].r - z__2.r, z__1.i = u[i__6].i -
+ z__2.i;
+ u[i__4].r = z__1.r, u[i__4].i = z__1.i;
+/* L60: */
+ }
+ } else if (*wantz) {
+
+/* ==== U is not accumulated, so update Z */
+/* . now by multiplying by reflections */
+/* . from the right. ==== */
+
+ i__7 = *ihiz;
+ for (j = *iloz; j <= i__7; ++j) {
+ i__4 = m * v_dim1 + 1;
+ i__6 = j + (k + 1) * z_dim1;
+ i__8 = m * v_dim1 + 2;
+ i__9 = j + (k + 2) * z_dim1;
+ z__4.r = v[i__8].r * z__[i__9].r - v[i__8].i *
+ z__[i__9].i, z__4.i = v[i__8].r * z__[
+ i__9].i + v[i__8].i * z__[i__9].r;
+ z__3.r = z__[i__6].r + z__4.r, z__3.i = z__[i__6]
+ .i + z__4.i;
+ i__10 = m * v_dim1 + 3;
+ i__11 = j + (k + 3) * z_dim1;
+ z__5.r = v[i__10].r * z__[i__11].r - v[i__10].i *
+ z__[i__11].i, z__5.i = v[i__10].r * z__[
+ i__11].i + v[i__10].i * z__[i__11].r;
+ z__2.r = z__3.r + z__5.r, z__2.i = z__3.i +
+ z__5.i;
+ z__1.r = v[i__4].r * z__2.r - v[i__4].i * z__2.i,
+ z__1.i = v[i__4].r * z__2.i + v[i__4].i *
+ z__2.r;
+ refsum.r = z__1.r, refsum.i = z__1.i;
+ i__4 = j + (k + 1) * z_dim1;
+ i__6 = j + (k + 1) * z_dim1;
+ z__1.r = z__[i__6].r - refsum.r, z__1.i = z__[
+ i__6].i - refsum.i;
+ z__[i__4].r = z__1.r, z__[i__4].i = z__1.i;
+ i__4 = j + (k + 2) * z_dim1;
+ i__6 = j + (k + 2) * z_dim1;
+ d_cnjg(&z__3, &v[m * v_dim1 + 2]);
+ z__2.r = refsum.r * z__3.r - refsum.i * z__3.i,
+ z__2.i = refsum.r * z__3.i + refsum.i *
+ z__3.r;
+ z__1.r = z__[i__6].r - z__2.r, z__1.i = z__[i__6]
+ .i - z__2.i;
+ z__[i__4].r = z__1.r, z__[i__4].i = z__1.i;
+ i__4 = j + (k + 3) * z_dim1;
+ i__6 = j + (k + 3) * z_dim1;
+ d_cnjg(&z__3, &v[m * v_dim1 + 3]);
+ z__2.r = refsum.r * z__3.r - refsum.i * z__3.i,
+ z__2.i = refsum.r * z__3.i + refsum.i *
+ z__3.r;
+ z__1.r = z__[i__6].r - z__2.r, z__1.i = z__[i__6]
+ .i - z__2.i;
+ z__[i__4].r = z__1.r, z__[i__4].i = z__1.i;
+/* L70: */
+ }
+ }
+ }
+/* L80: */
+ }
+
+/* ==== Special case: 2-by-2 reflection (if needed) ==== */
+
+ k = krcol + (m22 - 1) * 3;
+ i__5 = m22 * v_dim1 + 1;
+ if (bmp22 && (v[i__5].r != 0. || v[i__5].i != 0.)) {
+/* Computing MIN */
+ i__7 = *kbot, i__4 = k + 3;
+ i__5 = min(i__7,i__4);
+ for (j = jtop; j <= i__5; ++j) {
+ i__7 = m22 * v_dim1 + 1;
+ i__4 = j + (k + 1) * h_dim1;
+ i__6 = m22 * v_dim1 + 2;
+ i__8 = j + (k + 2) * h_dim1;
+ z__3.r = v[i__6].r * h__[i__8].r - v[i__6].i * h__[i__8]
+ .i, z__3.i = v[i__6].r * h__[i__8].i + v[i__6].i *
+ h__[i__8].r;
+ z__2.r = h__[i__4].r + z__3.r, z__2.i = h__[i__4].i +
+ z__3.i;
+ z__1.r = v[i__7].r * z__2.r - v[i__7].i * z__2.i, z__1.i =
+ v[i__7].r * z__2.i + v[i__7].i * z__2.r;
+ refsum.r = z__1.r, refsum.i = z__1.i;
+ i__7 = j + (k + 1) * h_dim1;
+ i__4 = j + (k + 1) * h_dim1;
+ z__1.r = h__[i__4].r - refsum.r, z__1.i = h__[i__4].i -
+ refsum.i;
+ h__[i__7].r = z__1.r, h__[i__7].i = z__1.i;
+ i__7 = j + (k + 2) * h_dim1;
+ i__4 = j + (k + 2) * h_dim1;
+ d_cnjg(&z__3, &v[m22 * v_dim1 + 2]);
+ z__2.r = refsum.r * z__3.r - refsum.i * z__3.i, z__2.i =
+ refsum.r * z__3.i + refsum.i * z__3.r;
+ z__1.r = h__[i__4].r - z__2.r, z__1.i = h__[i__4].i -
+ z__2.i;
+ h__[i__7].r = z__1.r, h__[i__7].i = z__1.i;
+/* L90: */
+ }
+
+ if (accum) {
+ kms = k - incol;
+/* Computing MAX */
+ i__5 = 1, i__7 = *ktop - incol;
+ i__4 = kdu;
+ for (j = max(i__5,i__7); j <= i__4; ++j) {
+ i__5 = m22 * v_dim1 + 1;
+ i__7 = j + (kms + 1) * u_dim1;
+ i__6 = m22 * v_dim1 + 2;
+ i__8 = j + (kms + 2) * u_dim1;
+ z__3.r = v[i__6].r * u[i__8].r - v[i__6].i * u[i__8]
+ .i, z__3.i = v[i__6].r * u[i__8].i + v[i__6]
+ .i * u[i__8].r;
+ z__2.r = u[i__7].r + z__3.r, z__2.i = u[i__7].i +
+ z__3.i;
+ z__1.r = v[i__5].r * z__2.r - v[i__5].i * z__2.i,
+ z__1.i = v[i__5].r * z__2.i + v[i__5].i *
+ z__2.r;
+ refsum.r = z__1.r, refsum.i = z__1.i;
+ i__5 = j + (kms + 1) * u_dim1;
+ i__7 = j + (kms + 1) * u_dim1;
+ z__1.r = u[i__7].r - refsum.r, z__1.i = u[i__7].i -
+ refsum.i;
+ u[i__5].r = z__1.r, u[i__5].i = z__1.i;
+ i__5 = j + (kms + 2) * u_dim1;
+ i__7 = j + (kms + 2) * u_dim1;
+ d_cnjg(&z__3, &v[m22 * v_dim1 + 2]);
+ z__2.r = refsum.r * z__3.r - refsum.i * z__3.i,
+ z__2.i = refsum.r * z__3.i + refsum.i *
+ z__3.r;
+ z__1.r = u[i__7].r - z__2.r, z__1.i = u[i__7].i -
+ z__2.i;
+ u[i__5].r = z__1.r, u[i__5].i = z__1.i;
+/* L100: */
+ }
+ } else if (*wantz) {
+ i__4 = *ihiz;
+ for (j = *iloz; j <= i__4; ++j) {
+ i__5 = m22 * v_dim1 + 1;
+ i__7 = j + (k + 1) * z_dim1;
+ i__6 = m22 * v_dim1 + 2;
+ i__8 = j + (k + 2) * z_dim1;
+ z__3.r = v[i__6].r * z__[i__8].r - v[i__6].i * z__[
+ i__8].i, z__3.i = v[i__6].r * z__[i__8].i + v[
+ i__6].i * z__[i__8].r;
+ z__2.r = z__[i__7].r + z__3.r, z__2.i = z__[i__7].i +
+ z__3.i;
+ z__1.r = v[i__5].r * z__2.r - v[i__5].i * z__2.i,
+ z__1.i = v[i__5].r * z__2.i + v[i__5].i *
+ z__2.r;
+ refsum.r = z__1.r, refsum.i = z__1.i;
+ i__5 = j + (k + 1) * z_dim1;
+ i__7 = j + (k + 1) * z_dim1;
+ z__1.r = z__[i__7].r - refsum.r, z__1.i = z__[i__7].i
+ - refsum.i;
+ z__[i__5].r = z__1.r, z__[i__5].i = z__1.i;
+ i__5 = j + (k + 2) * z_dim1;
+ i__7 = j + (k + 2) * z_dim1;
+ d_cnjg(&z__3, &v[m22 * v_dim1 + 2]);
+ z__2.r = refsum.r * z__3.r - refsum.i * z__3.i,
+ z__2.i = refsum.r * z__3.i + refsum.i *
+ z__3.r;
+ z__1.r = z__[i__7].r - z__2.r, z__1.i = z__[i__7].i -
+ z__2.i;
+ z__[i__5].r = z__1.r, z__[i__5].i = z__1.i;
+/* L110: */
+ }
+ }
+ }
+
+/* ==== Vigilant deflation check ==== */
+
+ mstart = mtop;
+ if (krcol + (mstart - 1) * 3 < *ktop) {
+ ++mstart;
+ }
+ mend = mbot;
+ if (bmp22) {
+ ++mend;
+ }
+ if (krcol == *kbot - 2) {
+ ++mend;
+ }
+ i__4 = mend;
+ for (m = mstart; m <= i__4; ++m) {
+/* Computing MIN */
+ i__5 = *kbot - 1, i__7 = krcol + (m - 1) * 3;
+ k = min(i__5,i__7);
+
+/* ==== The following convergence test requires that */
+/* . the tradition small-compared-to-nearby-diagonals */
+/* . criterion and the Ahues & Tisseur (LAWN 122, 1997) */
+/* . criteria both be satisfied. The latter improves */
+/* . accuracy in some examples. Falling back on an */
+/* . alternate convergence criterion when TST1 or TST2 */
+/* . is zero (as done here) is traditional but probably */
+/* . unnecessary. ==== */
+
+ i__5 = k + 1 + k * h_dim1;
+ if (h__[i__5].r != 0. || h__[i__5].i != 0.) {
+ i__5 = k + k * h_dim1;
+ i__7 = k + 1 + (k + 1) * h_dim1;
+ tst1 = (d__1 = h__[i__5].r, abs(d__1)) + (d__2 = d_imag(&
+ h__[k + k * h_dim1]), abs(d__2)) + ((d__3 = h__[
+ i__7].r, abs(d__3)) + (d__4 = d_imag(&h__[k + 1 +
+ (k + 1) * h_dim1]), abs(d__4)));
+ if (tst1 == 0.) {
+ if (k >= *ktop + 1) {
+ i__5 = k + (k - 1) * h_dim1;
+ tst1 += (d__1 = h__[i__5].r, abs(d__1)) + (d__2 =
+ d_imag(&h__[k + (k - 1) * h_dim1]), abs(
+ d__2));
+ }
+ if (k >= *ktop + 2) {
+ i__5 = k + (k - 2) * h_dim1;
+ tst1 += (d__1 = h__[i__5].r, abs(d__1)) + (d__2 =
+ d_imag(&h__[k + (k - 2) * h_dim1]), abs(
+ d__2));
+ }
+ if (k >= *ktop + 3) {
+ i__5 = k + (k - 3) * h_dim1;
+ tst1 += (d__1 = h__[i__5].r, abs(d__1)) + (d__2 =
+ d_imag(&h__[k + (k - 3) * h_dim1]), abs(
+ d__2));
+ }
+ if (k <= *kbot - 2) {
+ i__5 = k + 2 + (k + 1) * h_dim1;
+ tst1 += (d__1 = h__[i__5].r, abs(d__1)) + (d__2 =
+ d_imag(&h__[k + 2 + (k + 1) * h_dim1]),
+ abs(d__2));
+ }
+ if (k <= *kbot - 3) {
+ i__5 = k + 3 + (k + 1) * h_dim1;
+ tst1 += (d__1 = h__[i__5].r, abs(d__1)) + (d__2 =
+ d_imag(&h__[k + 3 + (k + 1) * h_dim1]),
+ abs(d__2));
+ }
+ if (k <= *kbot - 4) {
+ i__5 = k + 4 + (k + 1) * h_dim1;
+ tst1 += (d__1 = h__[i__5].r, abs(d__1)) + (d__2 =
+ d_imag(&h__[k + 4 + (k + 1) * h_dim1]),
+ abs(d__2));
+ }
+ }
+ i__5 = k + 1 + k * h_dim1;
+/* Computing MAX */
+ d__3 = smlnum, d__4 = ulp * tst1;
+ if ((d__1 = h__[i__5].r, abs(d__1)) + (d__2 = d_imag(&h__[
+ k + 1 + k * h_dim1]), abs(d__2)) <= max(d__3,d__4)
+ ) {
+/* Computing MAX */
+ i__5 = k + 1 + k * h_dim1;
+ i__7 = k + (k + 1) * h_dim1;
+ d__5 = (d__1 = h__[i__5].r, abs(d__1)) + (d__2 =
+ d_imag(&h__[k + 1 + k * h_dim1]), abs(d__2)),
+ d__6 = (d__3 = h__[i__7].r, abs(d__3)) + (
+ d__4 = d_imag(&h__[k + (k + 1) * h_dim1]),
+ abs(d__4));
+ h12 = max(d__5,d__6);
+/* Computing MIN */
+ i__5 = k + 1 + k * h_dim1;
+ i__7 = k + (k + 1) * h_dim1;
+ d__5 = (d__1 = h__[i__5].r, abs(d__1)) + (d__2 =
+ d_imag(&h__[k + 1 + k * h_dim1]), abs(d__2)),
+ d__6 = (d__3 = h__[i__7].r, abs(d__3)) + (
+ d__4 = d_imag(&h__[k + (k + 1) * h_dim1]),
+ abs(d__4));
+ h21 = min(d__5,d__6);
+ i__5 = k + k * h_dim1;
+ i__7 = k + 1 + (k + 1) * h_dim1;
+ z__2.r = h__[i__5].r - h__[i__7].r, z__2.i = h__[i__5]
+ .i - h__[i__7].i;
+ z__1.r = z__2.r, z__1.i = z__2.i;
+/* Computing MAX */
+ i__6 = k + 1 + (k + 1) * h_dim1;
+ d__5 = (d__1 = h__[i__6].r, abs(d__1)) + (d__2 =
+ d_imag(&h__[k + 1 + (k + 1) * h_dim1]), abs(
+ d__2)), d__6 = (d__3 = z__1.r, abs(d__3)) + (
+ d__4 = d_imag(&z__1), abs(d__4));
+ h11 = max(d__5,d__6);
+ i__5 = k + k * h_dim1;
+ i__7 = k + 1 + (k + 1) * h_dim1;
+ z__2.r = h__[i__5].r - h__[i__7].r, z__2.i = h__[i__5]
+ .i - h__[i__7].i;
+ z__1.r = z__2.r, z__1.i = z__2.i;
+/* Computing MIN */
+ i__6 = k + 1 + (k + 1) * h_dim1;
+ d__5 = (d__1 = h__[i__6].r, abs(d__1)) + (d__2 =
+ d_imag(&h__[k + 1 + (k + 1) * h_dim1]), abs(
+ d__2)), d__6 = (d__3 = z__1.r, abs(d__3)) + (
+ d__4 = d_imag(&z__1), abs(d__4));
+ h22 = min(d__5,d__6);
+ scl = h11 + h12;
+ tst2 = h22 * (h11 / scl);
+
+/* Computing MAX */
+ d__1 = smlnum, d__2 = ulp * tst2;
+ if (tst2 == 0. || h21 * (h12 / scl) <= max(d__1,d__2))
+ {
+ i__5 = k + 1 + k * h_dim1;
+ h__[i__5].r = 0., h__[i__5].i = 0.;
+ }
+ }
+ }
+/* L120: */
+ }
+
+/* ==== Fill in the last row of each bulge. ==== */
+
+/* Computing MIN */
+ i__4 = nbmps, i__5 = (*kbot - krcol - 1) / 3;
+ mend = min(i__4,i__5);
+ i__4 = mend;
+ for (m = mtop; m <= i__4; ++m) {
+ k = krcol + (m - 1) * 3;
+ i__5 = m * v_dim1 + 1;
+ i__7 = m * v_dim1 + 3;
+ z__2.r = v[i__5].r * v[i__7].r - v[i__5].i * v[i__7].i,
+ z__2.i = v[i__5].r * v[i__7].i + v[i__5].i * v[i__7]
+ .r;
+ i__6 = k + 4 + (k + 3) * h_dim1;
+ z__1.r = z__2.r * h__[i__6].r - z__2.i * h__[i__6].i, z__1.i =
+ z__2.r * h__[i__6].i + z__2.i * h__[i__6].r;
+ refsum.r = z__1.r, refsum.i = z__1.i;
+ i__5 = k + 4 + (k + 1) * h_dim1;
+ z__1.r = -refsum.r, z__1.i = -refsum.i;
+ h__[i__5].r = z__1.r, h__[i__5].i = z__1.i;
+ i__5 = k + 4 + (k + 2) * h_dim1;
+ z__2.r = -refsum.r, z__2.i = -refsum.i;
+ d_cnjg(&z__3, &v[m * v_dim1 + 2]);
+ z__1.r = z__2.r * z__3.r - z__2.i * z__3.i, z__1.i = z__2.r *
+ z__3.i + z__2.i * z__3.r;
+ h__[i__5].r = z__1.r, h__[i__5].i = z__1.i;
+ i__5 = k + 4 + (k + 3) * h_dim1;
+ i__7 = k + 4 + (k + 3) * h_dim1;
+ d_cnjg(&z__3, &v[m * v_dim1 + 3]);
+ z__2.r = refsum.r * z__3.r - refsum.i * z__3.i, z__2.i =
+ refsum.r * z__3.i + refsum.i * z__3.r;
+ z__1.r = h__[i__7].r - z__2.r, z__1.i = h__[i__7].i - z__2.i;
+ h__[i__5].r = z__1.r, h__[i__5].i = z__1.i;
+/* L130: */
+ }
+
+/* ==== End of near-the-diagonal bulge chase. ==== */
+
+/* L140: */
+ }
+
+/* ==== Use U (if accumulated) to update far-from-diagonal */
+/* . entries in H. If required, use U to update Z as */
+/* . well. ==== */
+
+ if (accum) {
+ if (*wantt) {
+ jtop = 1;
+ jbot = *n;
+ } else {
+ jtop = *ktop;
+ jbot = *kbot;
+ }
+ if (! blk22 || incol < *ktop || ndcol > *kbot || ns <= 2) {
+
+/* ==== Updates not exploiting the 2-by-2 block */
+/* . structure of U. K1 and NU keep track of */
+/* . the location and size of U in the special */
+/* . cases of introducing bulges and chasing */
+/* . bulges off the bottom. In these special */
+/* . cases and in case the number of shifts */
+/* . is NS = 2, there is no 2-by-2 block */
+/* . structure to exploit. ==== */
+
+/* Computing MAX */
+ i__3 = 1, i__4 = *ktop - incol;
+ k1 = max(i__3,i__4);
+/* Computing MAX */
+ i__3 = 0, i__4 = ndcol - *kbot;
+ nu = kdu - max(i__3,i__4) - k1 + 1;
+
+/* ==== Horizontal Multiply ==== */
+
+ i__3 = jbot;
+ i__4 = *nh;
+ for (jcol = min(ndcol,*kbot) + 1; i__4 < 0 ? jcol >= i__3 :
+ jcol <= i__3; jcol += i__4) {
+/* Computing MIN */
+ i__5 = *nh, i__7 = jbot - jcol + 1;
+ jlen = min(i__5,i__7);
+ zgemm_("C", "N", &nu, &jlen, &nu, &c_b2, &u[k1 + k1 *
+ u_dim1], ldu, &h__[incol + k1 + jcol * h_dim1],
+ ldh, &c_b1, &wh[wh_offset], ldwh);
+ zlacpy_("ALL", &nu, &jlen, &wh[wh_offset], ldwh, &h__[
+ incol + k1 + jcol * h_dim1], ldh);
+/* L150: */
+ }
+
+/* ==== Vertical multiply ==== */
+
+ i__4 = max(*ktop,incol) - 1;
+ i__3 = *nv;
+ for (jrow = jtop; i__3 < 0 ? jrow >= i__4 : jrow <= i__4;
+ jrow += i__3) {
+/* Computing MIN */
+ i__5 = *nv, i__7 = max(*ktop,incol) - jrow;
+ jlen = min(i__5,i__7);
+ zgemm_("N", "N", &jlen, &nu, &nu, &c_b2, &h__[jrow + (
+ incol + k1) * h_dim1], ldh, &u[k1 + k1 * u_dim1],
+ ldu, &c_b1, &wv[wv_offset], ldwv);
+ zlacpy_("ALL", &jlen, &nu, &wv[wv_offset], ldwv, &h__[
+ jrow + (incol + k1) * h_dim1], ldh);
+/* L160: */
+ }
+
+/* ==== Z multiply (also vertical) ==== */
+
+ if (*wantz) {
+ i__3 = *ihiz;
+ i__4 = *nv;
+ for (jrow = *iloz; i__4 < 0 ? jrow >= i__3 : jrow <= i__3;
+ jrow += i__4) {
+/* Computing MIN */
+ i__5 = *nv, i__7 = *ihiz - jrow + 1;
+ jlen = min(i__5,i__7);
+ zgemm_("N", "N", &jlen, &nu, &nu, &c_b2, &z__[jrow + (
+ incol + k1) * z_dim1], ldz, &u[k1 + k1 *
+ u_dim1], ldu, &c_b1, &wv[wv_offset], ldwv);
+ zlacpy_("ALL", &jlen, &nu, &wv[wv_offset], ldwv, &z__[
+ jrow + (incol + k1) * z_dim1], ldz)
+ ;
+/* L170: */
+ }
+ }
+ } else {
+
+/* ==== Updates exploiting U's 2-by-2 block structure. */
+/* . (I2, I4, J2, J4 are the last rows and columns */
+/* . of the blocks.) ==== */
+
+ i2 = (kdu + 1) / 2;
+ i4 = kdu;
+ j2 = i4 - i2;
+ j4 = kdu;
+
+/* ==== KZS and KNZ deal with the band of zeros */
+/* . along the diagonal of one of the triangular */
+/* . blocks. ==== */
+
+ kzs = j4 - j2 - (ns + 1);
+ knz = ns + 1;
+
+/* ==== Horizontal multiply ==== */
+
+ i__4 = jbot;
+ i__3 = *nh;
+ for (jcol = min(ndcol,*kbot) + 1; i__3 < 0 ? jcol >= i__4 :
+ jcol <= i__4; jcol += i__3) {
+/* Computing MIN */
+ i__5 = *nh, i__7 = jbot - jcol + 1;
+ jlen = min(i__5,i__7);
+
+/* ==== Copy bottom of H to top+KZS of scratch ==== */
+/* (The first KZS rows get multiplied by zero.) ==== */
+
+ zlacpy_("ALL", &knz, &jlen, &h__[incol + 1 + j2 + jcol *
+ h_dim1], ldh, &wh[kzs + 1 + wh_dim1], ldwh);
+
+/* ==== Multiply by U21' ==== */
+
+ zlaset_("ALL", &kzs, &jlen, &c_b1, &c_b1, &wh[wh_offset],
+ ldwh);
+ ztrmm_("L", "U", "C", "N", &knz, &jlen, &c_b2, &u[j2 + 1
+ + (kzs + 1) * u_dim1], ldu, &wh[kzs + 1 + wh_dim1]
+, ldwh);
+
+/* ==== Multiply top of H by U11' ==== */
+
+ zgemm_("C", "N", &i2, &jlen, &j2, &c_b2, &u[u_offset],
+ ldu, &h__[incol + 1 + jcol * h_dim1], ldh, &c_b2,
+ &wh[wh_offset], ldwh);
+
+/* ==== Copy top of H to bottom of WH ==== */
+
+ zlacpy_("ALL", &j2, &jlen, &h__[incol + 1 + jcol * h_dim1]
+, ldh, &wh[i2 + 1 + wh_dim1], ldwh);
+
+/* ==== Multiply by U21' ==== */
+
+ ztrmm_("L", "L", "C", "N", &j2, &jlen, &c_b2, &u[(i2 + 1)
+ * u_dim1 + 1], ldu, &wh[i2 + 1 + wh_dim1], ldwh);
+
+/* ==== Multiply by U22 ==== */
+
+ i__5 = i4 - i2;
+ i__7 = j4 - j2;
+ zgemm_("C", "N", &i__5, &jlen, &i__7, &c_b2, &u[j2 + 1 + (
+ i2 + 1) * u_dim1], ldu, &h__[incol + 1 + j2 +
+ jcol * h_dim1], ldh, &c_b2, &wh[i2 + 1 + wh_dim1],
+ ldwh);
+
+/* ==== Copy it back ==== */
+
+ zlacpy_("ALL", &kdu, &jlen, &wh[wh_offset], ldwh, &h__[
+ incol + 1 + jcol * h_dim1], ldh);
+/* L180: */
+ }
+
+/* ==== Vertical multiply ==== */
+
+ i__3 = max(incol,*ktop) - 1;
+ i__4 = *nv;
+ for (jrow = jtop; i__4 < 0 ? jrow >= i__3 : jrow <= i__3;
+ jrow += i__4) {
+/* Computing MIN */
+ i__5 = *nv, i__7 = max(incol,*ktop) - jrow;
+ jlen = min(i__5,i__7);
+
+/* ==== Copy right of H to scratch (the first KZS */
+/* . columns get multiplied by zero) ==== */
+
+ zlacpy_("ALL", &jlen, &knz, &h__[jrow + (incol + 1 + j2) *
+ h_dim1], ldh, &wv[(kzs + 1) * wv_dim1 + 1], ldwv);
+
+/* ==== Multiply by U21 ==== */
+
+ zlaset_("ALL", &jlen, &kzs, &c_b1, &c_b1, &wv[wv_offset],
+ ldwv);
+ ztrmm_("R", "U", "N", "N", &jlen, &knz, &c_b2, &u[j2 + 1
+ + (kzs + 1) * u_dim1], ldu, &wv[(kzs + 1) *
+ wv_dim1 + 1], ldwv);
+
+/* ==== Multiply by U11 ==== */
+
+ zgemm_("N", "N", &jlen, &i2, &j2, &c_b2, &h__[jrow + (
+ incol + 1) * h_dim1], ldh, &u[u_offset], ldu, &
+ c_b2, &wv[wv_offset], ldwv);
+
+/* ==== Copy left of H to right of scratch ==== */
+
+ zlacpy_("ALL", &jlen, &j2, &h__[jrow + (incol + 1) *
+ h_dim1], ldh, &wv[(i2 + 1) * wv_dim1 + 1], ldwv);
+
+/* ==== Multiply by U21 ==== */
+
+ i__5 = i4 - i2;
+ ztrmm_("R", "L", "N", "N", &jlen, &i__5, &c_b2, &u[(i2 +
+ 1) * u_dim1 + 1], ldu, &wv[(i2 + 1) * wv_dim1 + 1]
+, ldwv);
+
+/* ==== Multiply by U22 ==== */
+
+ i__5 = i4 - i2;
+ i__7 = j4 - j2;
+ zgemm_("N", "N", &jlen, &i__5, &i__7, &c_b2, &h__[jrow + (
+ incol + 1 + j2) * h_dim1], ldh, &u[j2 + 1 + (i2 +
+ 1) * u_dim1], ldu, &c_b2, &wv[(i2 + 1) * wv_dim1
+ + 1], ldwv);
+
+/* ==== Copy it back ==== */
+
+ zlacpy_("ALL", &jlen, &kdu, &wv[wv_offset], ldwv, &h__[
+ jrow + (incol + 1) * h_dim1], ldh);
+/* L190: */
+ }
+
+/* ==== Multiply Z (also vertical) ==== */
+
+ if (*wantz) {
+ i__4 = *ihiz;
+ i__3 = *nv;
+ for (jrow = *iloz; i__3 < 0 ? jrow >= i__4 : jrow <= i__4;
+ jrow += i__3) {
+/* Computing MIN */
+ i__5 = *nv, i__7 = *ihiz - jrow + 1;
+ jlen = min(i__5,i__7);
+
+/* ==== Copy right of Z to left of scratch (first */
+/* . KZS columns get multiplied by zero) ==== */
+
+ zlacpy_("ALL", &jlen, &knz, &z__[jrow + (incol + 1 +
+ j2) * z_dim1], ldz, &wv[(kzs + 1) * wv_dim1 +
+ 1], ldwv);
+
+/* ==== Multiply by U12 ==== */
+
+ zlaset_("ALL", &jlen, &kzs, &c_b1, &c_b1, &wv[
+ wv_offset], ldwv);
+ ztrmm_("R", "U", "N", "N", &jlen, &knz, &c_b2, &u[j2
+ + 1 + (kzs + 1) * u_dim1], ldu, &wv[(kzs + 1)
+ * wv_dim1 + 1], ldwv);
+
+/* ==== Multiply by U11 ==== */
+
+ zgemm_("N", "N", &jlen, &i2, &j2, &c_b2, &z__[jrow + (
+ incol + 1) * z_dim1], ldz, &u[u_offset], ldu,
+ &c_b2, &wv[wv_offset], ldwv);
+
+/* ==== Copy left of Z to right of scratch ==== */
+
+ zlacpy_("ALL", &jlen, &j2, &z__[jrow + (incol + 1) *
+ z_dim1], ldz, &wv[(i2 + 1) * wv_dim1 + 1],
+ ldwv);
+
+/* ==== Multiply by U21 ==== */
+
+ i__5 = i4 - i2;
+ ztrmm_("R", "L", "N", "N", &jlen, &i__5, &c_b2, &u[(
+ i2 + 1) * u_dim1 + 1], ldu, &wv[(i2 + 1) *
+ wv_dim1 + 1], ldwv);
+
+/* ==== Multiply by U22 ==== */
+
+ i__5 = i4 - i2;
+ i__7 = j4 - j2;
+ zgemm_("N", "N", &jlen, &i__5, &i__7, &c_b2, &z__[
+ jrow + (incol + 1 + j2) * z_dim1], ldz, &u[j2
+ + 1 + (i2 + 1) * u_dim1], ldu, &c_b2, &wv[(i2
+ + 1) * wv_dim1 + 1], ldwv);
+
+/* ==== Copy the result back to Z ==== */
+
+ zlacpy_("ALL", &jlen, &kdu, &wv[wv_offset], ldwv, &
+ z__[jrow + (incol + 1) * z_dim1], ldz);
+/* L200: */
+ }
+ }
+ }
+ }
+/* L210: */
+ }
+
+/* ==== End of ZLAQR5 ==== */
+
+ return 0;
+} /* zlaqr5_ */
diff --git a/contrib/libs/clapack/zlaqsb.c b/contrib/libs/clapack/zlaqsb.c
new file mode 100644
index 0000000000..0db434917f
--- /dev/null
+++ b/contrib/libs/clapack/zlaqsb.c
@@ -0,0 +1,193 @@
+/* zlaqsb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlaqsb_(char *uplo, integer *n, integer *kd,
+ doublecomplex *ab, integer *ldab, doublereal *s, doublereal *scond,
+ doublereal *amax, char *equed)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal cj, large;
+ extern logical lsame_(char *, char *);
+ doublereal small;
+ extern doublereal dlamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAQSB equilibrates a symmetric band matrix A using the scaling */
+/* factors in the vector S. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of super-diagonals of the matrix A if UPLO = 'U', */
+/* or the number of sub-diagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) COMPLEX*16 array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the symmetric band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, if INFO = 0, the triangular factor U or L from the */
+/* Cholesky factorization A = U'*U or A = L*L' of the band */
+/* matrix A, in the same storage format as A. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* S (input) DOUBLE PRECISION array, dimension (N) */
+/* The scale factors for A. */
+
+/* SCOND (input) DOUBLE PRECISION */
+/* Ratio of the smallest S(i) to the largest S(i). */
+
+/* AMAX (input) DOUBLE PRECISION */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies whether or not equilibration was done. */
+/* = 'N': No equilibration. */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if scaling should be done */
+/* based on the ratio of the scaling factors. If SCOND < THRESH, */
+/* scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if scaling should */
+/* be done based on the absolute size of the largest matrix element. */
+/* If AMAX > LARGE or AMAX < SMALL, scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --s;
+
+ /* Function Body */
+ if (*n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = dlamch_("Safe minimum") / dlamch_("Precision");
+ large = 1. / small;
+
+ if (*scond >= .1 && *amax >= small && *amax <= large) {
+
+/* No equilibration */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Replace A by diag(S) * A * diag(S). */
+
+ if (lsame_(uplo, "U")) {
+
+/* Upper triangle of A is stored in band format. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+/* Computing MAX */
+ i__2 = 1, i__3 = j - *kd;
+ i__4 = j;
+ for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) {
+ i__2 = *kd + 1 + i__ - j + j * ab_dim1;
+ d__1 = cj * s[i__];
+ i__3 = *kd + 1 + i__ - j + j * ab_dim1;
+ z__1.r = d__1 * ab[i__3].r, z__1.i = d__1 * ab[i__3].i;
+ ab[i__2].r = z__1.r, ab[i__2].i = z__1.i;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+
+/* Lower triangle of A is stored. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+/* Computing MIN */
+ i__2 = *n, i__3 = j + *kd;
+ i__4 = min(i__2,i__3);
+ for (i__ = j; i__ <= i__4; ++i__) {
+ i__2 = i__ + 1 - j + j * ab_dim1;
+ d__1 = cj * s[i__];
+ i__3 = i__ + 1 - j + j * ab_dim1;
+ z__1.r = d__1 * ab[i__3].r, z__1.i = d__1 * ab[i__3].i;
+ ab[i__2].r = z__1.r, ab[i__2].i = z__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ *(unsigned char *)equed = 'Y';
+ }
+
+ return 0;
+
+/* End of ZLAQSB */
+
+} /* zlaqsb_ */
diff --git a/contrib/libs/clapack/zlaqsp.c b/contrib/libs/clapack/zlaqsp.c
new file mode 100644
index 0000000000..545b88e439
--- /dev/null
+++ b/contrib/libs/clapack/zlaqsp.c
@@ -0,0 +1,179 @@
+/* zlaqsp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlaqsp_(char *uplo, integer *n, doublecomplex *ap,
+ doublereal *s, doublereal *scond, doublereal *amax, char *equed)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j, jc;
+ doublereal cj, large;
+ extern logical lsame_(char *, char *);
+ doublereal small;
+ extern doublereal dlamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAQSP equilibrates a symmetric matrix A using the scaling factors */
+/* in the vector S. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, the equilibrated matrix: diag(S) * A * diag(S), in */
+/* the same storage format as A. */
+
+/* S (input) DOUBLE PRECISION array, dimension (N) */
+/* The scale factors for A. */
+
+/* SCOND (input) DOUBLE PRECISION */
+/* Ratio of the smallest S(i) to the largest S(i). */
+
+/* AMAX (input) DOUBLE PRECISION */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies whether or not equilibration was done. */
+/* = 'N': No equilibration. */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if scaling should be done */
+/* based on the ratio of the scaling factors. If SCOND < THRESH, */
+/* scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if scaling should */
+/* be done based on the absolute size of the largest matrix element. */
+/* If AMAX > LARGE or AMAX < SMALL, scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --s;
+ --ap;
+
+ /* Function Body */
+ if (*n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = dlamch_("Safe minimum") / dlamch_("Precision");
+ large = 1. / small;
+
+ if (*scond >= .1 && *amax >= small && *amax <= large) {
+
+/* No equilibration */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Replace A by diag(S) * A * diag(S). */
+
+ if (lsame_(uplo, "U")) {
+
+/* Upper triangle of A is stored. */
+
+ jc = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = jc + i__ - 1;
+ d__1 = cj * s[i__];
+ i__4 = jc + i__ - 1;
+ z__1.r = d__1 * ap[i__4].r, z__1.i = d__1 * ap[i__4].i;
+ ap[i__3].r = z__1.r, ap[i__3].i = z__1.i;
+/* L10: */
+ }
+ jc += j;
+/* L20: */
+ }
+ } else {
+
+/* Lower triangle of A is stored. */
+
+ jc = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = jc + i__ - j;
+ d__1 = cj * s[i__];
+ i__4 = jc + i__ - j;
+ z__1.r = d__1 * ap[i__4].r, z__1.i = d__1 * ap[i__4].i;
+ ap[i__3].r = z__1.r, ap[i__3].i = z__1.i;
+/* L30: */
+ }
+ jc = jc + *n - j + 1;
+/* L40: */
+ }
+ }
+ *(unsigned char *)equed = 'Y';
+ }
+
+ return 0;
+
+/* End of ZLAQSP */
+
+} /* zlaqsp_ */
diff --git a/contrib/libs/clapack/zlaqsy.c b/contrib/libs/clapack/zlaqsy.c
new file mode 100644
index 0000000000..8dc634ed8e
--- /dev/null
+++ b/contrib/libs/clapack/zlaqsy.c
@@ -0,0 +1,183 @@
+/* zlaqsy.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlaqsy_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, doublereal *s, doublereal *scond, doublereal *amax,
+ char *equed)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal cj, large;
+ extern logical lsame_(char *, char *);
+ doublereal small;
+ extern doublereal dlamch_(char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAQSY equilibrates a symmetric matrix A using the scaling factors */
+/* in the vector S. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n by n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if EQUED = 'Y', the equilibrated matrix: */
+/* diag(S) * A * diag(S). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(N,1). */
+
+/* S (input) DOUBLE PRECISION array, dimension (N) */
+/* The scale factors for A. */
+
+/* SCOND (input) DOUBLE PRECISION */
+/* Ratio of the smallest S(i) to the largest S(i). */
+
+/* AMAX (input) DOUBLE PRECISION */
+/* Absolute value of largest matrix entry. */
+
+/* EQUED (output) CHARACTER*1 */
+/* Specifies whether or not equilibration was done. */
+/* = 'N': No equilibration. */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+
+/* Internal Parameters */
+/* =================== */
+
+/* THRESH is a threshold value used to decide if scaling should be done */
+/* based on the ratio of the scaling factors. If SCOND < THRESH, */
+/* scaling is done. */
+
+/* LARGE and SMALL are threshold values used to decide if scaling should */
+/* be done based on the absolute size of the largest matrix element. */
+/* If AMAX > LARGE or AMAX < SMALL, scaling is done. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+
+ /* Function Body */
+ if (*n <= 0) {
+ *(unsigned char *)equed = 'N';
+ return 0;
+ }
+
+/* Initialize LARGE and SMALL. */
+
+ small = dlamch_("Safe minimum") / dlamch_("Precision");
+ large = 1. / small;
+
+ if (*scond >= .1 && *amax >= small && *amax <= large) {
+
+/* No equilibration */
+
+ *(unsigned char *)equed = 'N';
+ } else {
+
+/* Replace A by diag(S) * A * diag(S). */
+
+ if (lsame_(uplo, "U")) {
+
+/* Upper triangle of A is stored. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ d__1 = cj * s[i__];
+ i__4 = i__ + j * a_dim1;
+ z__1.r = d__1 * a[i__4].r, z__1.i = d__1 * a[i__4].i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+
+/* Lower triangle of A is stored. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ cj = s[j];
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ d__1 = cj * s[i__];
+ i__4 = i__ + j * a_dim1;
+ z__1.r = d__1 * a[i__4].r, z__1.i = d__1 * a[i__4].i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ *(unsigned char *)equed = 'Y';
+ }
+
+ return 0;
+
+/* End of ZLAQSY */
+
+} /* zlaqsy_ */
diff --git a/contrib/libs/clapack/zlar1v.c b/contrib/libs/clapack/zlar1v.c
new file mode 100644
index 0000000000..15231c9c77
--- /dev/null
+++ b/contrib/libs/clapack/zlar1v.c
@@ -0,0 +1,501 @@
+/* zlar1v.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlar1v_(integer *n, integer *b1, integer *bn, doublereal
+ *lambda, doublereal *d__, doublereal *l, doublereal *ld, doublereal *
+ lld, doublereal *pivmin, doublereal *gaptol, doublecomplex *z__,
+ logical *wantnc, integer *negcnt, doublereal *ztz, doublereal *mingma,
+ integer *r__, integer *isuppz, doublereal *nrminv, doublereal *resid,
+ doublereal *rqcorr, doublereal *work)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ doublereal d__1;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ doublereal s;
+ integer r1, r2;
+ doublereal eps, tmp;
+ integer neg1, neg2, indp, inds;
+ doublereal dplus;
+ extern doublereal dlamch_(char *);
+ extern logical disnan_(doublereal *);
+ integer indlpl, indumn;
+ doublereal dminus;
+ logical sawnan1, sawnan2;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAR1V computes the (scaled) r-th column of the inverse of */
+/* the sumbmatrix in rows B1 through BN of the tridiagonal matrix */
+/* L D L^T - sigma I. When sigma is close to an eigenvalue, the */
+/* computed vector is an accurate eigenvector. Usually, r corresponds */
+/* to the index where the eigenvector is largest in magnitude. */
+/* The following steps accomplish this computation : */
+/* (a) Stationary qd transform, L D L^T - sigma I = L(+) D(+) L(+)^T, */
+/* (b) Progressive qd transform, L D L^T - sigma I = U(-) D(-) U(-)^T, */
+/* (c) Computation of the diagonal elements of the inverse of */
+/* L D L^T - sigma I by combining the above transforms, and choosing */
+/* r as the index where the diagonal of the inverse is (one of the) */
+/* largest in magnitude. */
+/* (d) Computation of the (scaled) r-th column of the inverse using the */
+/* twisted factorization obtained by combining the top part of the */
+/* the stationary and the bottom part of the progressive transform. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix L D L^T. */
+
+/* B1 (input) INTEGER */
+/* First index of the submatrix of L D L^T. */
+
+/* BN (input) INTEGER */
+/* Last index of the submatrix of L D L^T. */
+
+/* LAMBDA (input) DOUBLE PRECISION */
+/* The shift. In order to compute an accurate eigenvector, */
+/* LAMBDA should be a good approximation to an eigenvalue */
+/* of L D L^T. */
+
+/* L (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of the unit bidiagonal matrix */
+/* L, in elements 1 to N-1. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The n diagonal elements of the diagonal matrix D. */
+
+/* LD (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The n-1 elements L(i)*D(i). */
+
+/* LLD (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The n-1 elements L(i)*L(i)*D(i). */
+
+/* PIVMIN (input) DOUBLE PRECISION */
+/* The minimum pivot in the Sturm sequence. */
+
+/* GAPTOL (input) DOUBLE PRECISION */
+/* Tolerance that indicates when eigenvector entries are negligible */
+/* w.r.t. their contribution to the residual. */
+
+/* Z (input/output) COMPLEX*16 array, dimension (N) */
+/* On input, all entries of Z must be set to 0. */
+/* On output, Z contains the (scaled) r-th column of the */
+/* inverse. The scaling is such that Z(R) equals 1. */
+
+/* WANTNC (input) LOGICAL */
+/* Specifies whether NEGCNT has to be computed. */
+
+/* NEGCNT (output) INTEGER */
+/* If WANTNC is .TRUE. then NEGCNT = the number of pivots < pivmin */
+/* in the matrix factorization L D L^T, and NEGCNT = -1 otherwise. */
+
+/* ZTZ (output) DOUBLE PRECISION */
+/* The square of the 2-norm of Z. */
+
+/* MINGMA (output) DOUBLE PRECISION */
+/* The reciprocal of the largest (in magnitude) diagonal */
+/* element of the inverse of L D L^T - sigma I. */
+
+/* R (input/output) INTEGER */
+/* The twist index for the twisted factorization used to */
+/* compute Z. */
+/* On input, 0 <= R <= N. If R is input as 0, R is set to */
+/* the index where (L D L^T - sigma I)^{-1} is largest */
+/* in magnitude. If 1 <= R <= N, R is unchanged. */
+/* On output, R contains the twist index used to compute Z. */
+/* Ideally, R designates the position of the maximum entry in the */
+/* eigenvector. */
+
+/* ISUPPZ (output) INTEGER array, dimension (2) */
+/* The support of the vector in Z, i.e., the vector Z is */
+/* nonzero only in elements ISUPPZ(1) through ISUPPZ( 2 ). */
+
+/* NRMINV (output) DOUBLE PRECISION */
+/* NRMINV = 1/SQRT( ZTZ ) */
+
+/* RESID (output) DOUBLE PRECISION */
+/* The residual of the FP vector. */
+/* RESID = ABS( MINGMA )/SQRT( ZTZ ) */
+
+/* RQCORR (output) DOUBLE PRECISION */
+/* The Rayleigh Quotient correction to LAMBDA. */
+/* RQCORR = MINGMA*TMP */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (4*N) */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --work;
+ --isuppz;
+ --z__;
+ --lld;
+ --ld;
+ --l;
+ --d__;
+
+ /* Function Body */
+ eps = dlamch_("Precision");
+ if (*r__ == 0) {
+ r1 = *b1;
+ r2 = *bn;
+ } else {
+ r1 = *r__;
+ r2 = *r__;
+ }
+/* Storage for LPLUS */
+ indlpl = 0;
+/* Storage for UMINUS */
+ indumn = *n;
+ inds = (*n << 1) + 1;
+ indp = *n * 3 + 1;
+ if (*b1 == 1) {
+ work[inds] = 0.;
+ } else {
+ work[inds + *b1 - 1] = lld[*b1 - 1];
+ }
+
+/* Compute the stationary transform (using the differential form) */
+/* until the index R2. */
+
+ sawnan1 = FALSE_;
+ neg1 = 0;
+ s = work[inds + *b1 - 1] - *lambda;
+ i__1 = r1 - 1;
+ for (i__ = *b1; i__ <= i__1; ++i__) {
+ dplus = d__[i__] + s;
+ work[indlpl + i__] = ld[i__] / dplus;
+ if (dplus < 0.) {
+ ++neg1;
+ }
+ work[inds + i__] = s * work[indlpl + i__] * l[i__];
+ s = work[inds + i__] - *lambda;
+/* L50: */
+ }
+ sawnan1 = disnan_(&s);
+ if (sawnan1) {
+ goto L60;
+ }
+ i__1 = r2 - 1;
+ for (i__ = r1; i__ <= i__1; ++i__) {
+ dplus = d__[i__] + s;
+ work[indlpl + i__] = ld[i__] / dplus;
+ work[inds + i__] = s * work[indlpl + i__] * l[i__];
+ s = work[inds + i__] - *lambda;
+/* L51: */
+ }
+ sawnan1 = disnan_(&s);
+
+L60:
+ if (sawnan1) {
+/* Runs a slower version of the above loop if a NaN is detected */
+ neg1 = 0;
+ s = work[inds + *b1 - 1] - *lambda;
+ i__1 = r1 - 1;
+ for (i__ = *b1; i__ <= i__1; ++i__) {
+ dplus = d__[i__] + s;
+ if (abs(dplus) < *pivmin) {
+ dplus = -(*pivmin);
+ }
+ work[indlpl + i__] = ld[i__] / dplus;
+ if (dplus < 0.) {
+ ++neg1;
+ }
+ work[inds + i__] = s * work[indlpl + i__] * l[i__];
+ if (work[indlpl + i__] == 0.) {
+ work[inds + i__] = lld[i__];
+ }
+ s = work[inds + i__] - *lambda;
+/* L70: */
+ }
+ i__1 = r2 - 1;
+ for (i__ = r1; i__ <= i__1; ++i__) {
+ dplus = d__[i__] + s;
+ if (abs(dplus) < *pivmin) {
+ dplus = -(*pivmin);
+ }
+ work[indlpl + i__] = ld[i__] / dplus;
+ work[inds + i__] = s * work[indlpl + i__] * l[i__];
+ if (work[indlpl + i__] == 0.) {
+ work[inds + i__] = lld[i__];
+ }
+ s = work[inds + i__] - *lambda;
+/* L71: */
+ }
+ }
+
+/* Compute the progressive transform (using the differential form) */
+/* until the index R1 */
+
+ sawnan2 = FALSE_;
+ neg2 = 0;
+ work[indp + *bn - 1] = d__[*bn] - *lambda;
+ i__1 = r1;
+ for (i__ = *bn - 1; i__ >= i__1; --i__) {
+ dminus = lld[i__] + work[indp + i__];
+ tmp = d__[i__] / dminus;
+ if (dminus < 0.) {
+ ++neg2;
+ }
+ work[indumn + i__] = l[i__] * tmp;
+ work[indp + i__ - 1] = work[indp + i__] * tmp - *lambda;
+/* L80: */
+ }
+ tmp = work[indp + r1 - 1];
+ sawnan2 = disnan_(&tmp);
+ if (sawnan2) {
+/* Runs a slower version of the above loop if a NaN is detected */
+ neg2 = 0;
+ i__1 = r1;
+ for (i__ = *bn - 1; i__ >= i__1; --i__) {
+ dminus = lld[i__] + work[indp + i__];
+ if (abs(dminus) < *pivmin) {
+ dminus = -(*pivmin);
+ }
+ tmp = d__[i__] / dminus;
+ if (dminus < 0.) {
+ ++neg2;
+ }
+ work[indumn + i__] = l[i__] * tmp;
+ work[indp + i__ - 1] = work[indp + i__] * tmp - *lambda;
+ if (tmp == 0.) {
+ work[indp + i__ - 1] = d__[i__] - *lambda;
+ }
+/* L100: */
+ }
+ }
+
+/* Find the index (from R1 to R2) of the largest (in magnitude) */
+/* diagonal element of the inverse */
+
+ *mingma = work[inds + r1 - 1] + work[indp + r1 - 1];
+ if (*mingma < 0.) {
+ ++neg1;
+ }
+ if (*wantnc) {
+ *negcnt = neg1 + neg2;
+ } else {
+ *negcnt = -1;
+ }
+ if (abs(*mingma) == 0.) {
+ *mingma = eps * work[inds + r1 - 1];
+ }
+ *r__ = r1;
+ i__1 = r2 - 1;
+ for (i__ = r1; i__ <= i__1; ++i__) {
+ tmp = work[inds + i__] + work[indp + i__];
+ if (tmp == 0.) {
+ tmp = eps * work[inds + i__];
+ }
+ if (abs(tmp) <= abs(*mingma)) {
+ *mingma = tmp;
+ *r__ = i__ + 1;
+ }
+/* L110: */
+ }
+
+/* Compute the FP vector: solve N^T v = e_r */
+
+ isuppz[1] = *b1;
+ isuppz[2] = *bn;
+ i__1 = *r__;
+ z__[i__1].r = 1., z__[i__1].i = 0.;
+ *ztz = 1.;
+
+/* Compute the FP vector upwards from R */
+
+ if (! sawnan1 && ! sawnan2) {
+ i__1 = *b1;
+ for (i__ = *r__ - 1; i__ >= i__1; --i__) {
+ i__2 = i__;
+ i__3 = indlpl + i__;
+ i__4 = i__ + 1;
+ z__2.r = work[i__3] * z__[i__4].r, z__2.i = work[i__3] * z__[i__4]
+ .i;
+ z__1.r = -z__2.r, z__1.i = -z__2.i;
+ z__[i__2].r = z__1.r, z__[i__2].i = z__1.i;
+ if ((z_abs(&z__[i__]) + z_abs(&z__[i__ + 1])) * (d__1 = ld[i__],
+ abs(d__1)) < *gaptol) {
+ i__2 = i__;
+ z__[i__2].r = 0., z__[i__2].i = 0.;
+ isuppz[1] = i__ + 1;
+ goto L220;
+ }
+ i__2 = i__;
+ i__3 = i__;
+ z__1.r = z__[i__2].r * z__[i__3].r - z__[i__2].i * z__[i__3].i,
+ z__1.i = z__[i__2].r * z__[i__3].i + z__[i__2].i * z__[
+ i__3].r;
+ *ztz += z__1.r;
+/* L210: */
+ }
+L220:
+ ;
+ } else {
+/* Run slower loop if NaN occurred. */
+ i__1 = *b1;
+ for (i__ = *r__ - 1; i__ >= i__1; --i__) {
+ i__2 = i__ + 1;
+ if (z__[i__2].r == 0. && z__[i__2].i == 0.) {
+ i__2 = i__;
+ d__1 = -(ld[i__ + 1] / ld[i__]);
+ i__3 = i__ + 2;
+ z__1.r = d__1 * z__[i__3].r, z__1.i = d__1 * z__[i__3].i;
+ z__[i__2].r = z__1.r, z__[i__2].i = z__1.i;
+ } else {
+ i__2 = i__;
+ i__3 = indlpl + i__;
+ i__4 = i__ + 1;
+ z__2.r = work[i__3] * z__[i__4].r, z__2.i = work[i__3] * z__[
+ i__4].i;
+ z__1.r = -z__2.r, z__1.i = -z__2.i;
+ z__[i__2].r = z__1.r, z__[i__2].i = z__1.i;
+ }
+ if ((z_abs(&z__[i__]) + z_abs(&z__[i__ + 1])) * (d__1 = ld[i__],
+ abs(d__1)) < *gaptol) {
+ i__2 = i__;
+ z__[i__2].r = 0., z__[i__2].i = 0.;
+ isuppz[1] = i__ + 1;
+ goto L240;
+ }
+ i__2 = i__;
+ i__3 = i__;
+ z__1.r = z__[i__2].r * z__[i__3].r - z__[i__2].i * z__[i__3].i,
+ z__1.i = z__[i__2].r * z__[i__3].i + z__[i__2].i * z__[
+ i__3].r;
+ *ztz += z__1.r;
+/* L230: */
+ }
+L240:
+ ;
+ }
+/* Compute the FP vector downwards from R in blocks of size BLKSIZ */
+ if (! sawnan1 && ! sawnan2) {
+ i__1 = *bn - 1;
+ for (i__ = *r__; i__ <= i__1; ++i__) {
+ i__2 = i__ + 1;
+ i__3 = indumn + i__;
+ i__4 = i__;
+ z__2.r = work[i__3] * z__[i__4].r, z__2.i = work[i__3] * z__[i__4]
+ .i;
+ z__1.r = -z__2.r, z__1.i = -z__2.i;
+ z__[i__2].r = z__1.r, z__[i__2].i = z__1.i;
+ if ((z_abs(&z__[i__]) + z_abs(&z__[i__ + 1])) * (d__1 = ld[i__],
+ abs(d__1)) < *gaptol) {
+ i__2 = i__ + 1;
+ z__[i__2].r = 0., z__[i__2].i = 0.;
+ isuppz[2] = i__;
+ goto L260;
+ }
+ i__2 = i__ + 1;
+ i__3 = i__ + 1;
+ z__1.r = z__[i__2].r * z__[i__3].r - z__[i__2].i * z__[i__3].i,
+ z__1.i = z__[i__2].r * z__[i__3].i + z__[i__2].i * z__[
+ i__3].r;
+ *ztz += z__1.r;
+/* L250: */
+ }
+L260:
+ ;
+ } else {
+/* Run slower loop if NaN occurred. */
+ i__1 = *bn - 1;
+ for (i__ = *r__; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ if (z__[i__2].r == 0. && z__[i__2].i == 0.) {
+ i__2 = i__ + 1;
+ d__1 = -(ld[i__ - 1] / ld[i__]);
+ i__3 = i__ - 1;
+ z__1.r = d__1 * z__[i__3].r, z__1.i = d__1 * z__[i__3].i;
+ z__[i__2].r = z__1.r, z__[i__2].i = z__1.i;
+ } else {
+ i__2 = i__ + 1;
+ i__3 = indumn + i__;
+ i__4 = i__;
+ z__2.r = work[i__3] * z__[i__4].r, z__2.i = work[i__3] * z__[
+ i__4].i;
+ z__1.r = -z__2.r, z__1.i = -z__2.i;
+ z__[i__2].r = z__1.r, z__[i__2].i = z__1.i;
+ }
+ if ((z_abs(&z__[i__]) + z_abs(&z__[i__ + 1])) * (d__1 = ld[i__],
+ abs(d__1)) < *gaptol) {
+ i__2 = i__ + 1;
+ z__[i__2].r = 0., z__[i__2].i = 0.;
+ isuppz[2] = i__;
+ goto L280;
+ }
+ i__2 = i__ + 1;
+ i__3 = i__ + 1;
+ z__1.r = z__[i__2].r * z__[i__3].r - z__[i__2].i * z__[i__3].i,
+ z__1.i = z__[i__2].r * z__[i__3].i + z__[i__2].i * z__[
+ i__3].r;
+ *ztz += z__1.r;
+/* L270: */
+ }
+L280:
+ ;
+ }
+
+/* Compute quantities for convergence test */
+
+ tmp = 1. / *ztz;
+ *nrminv = sqrt(tmp);
+ *resid = abs(*mingma) * *nrminv;
+ *rqcorr = *mingma * tmp;
+
+
+ return 0;
+
+/* End of ZLAR1V */
+
+} /* zlar1v_ */
diff --git a/contrib/libs/clapack/zlar2v.c b/contrib/libs/clapack/zlar2v.c
new file mode 100644
index 0000000000..ce51144c1c
--- /dev/null
+++ b/contrib/libs/clapack/zlar2v.c
@@ -0,0 +1,160 @@
+/* zlar2v.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlar2v_(integer *n, doublecomplex *x, doublecomplex *y,
+ doublecomplex *z__, integer *incx, doublereal *c__, doublecomplex *s,
+ integer *incc)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3, z__4, z__5;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__;
+ doublecomplex t2, t3, t4;
+ doublereal t5, t6;
+ integer ic;
+ doublereal ci;
+ doublecomplex si;
+ integer ix;
+ doublereal xi, yi;
+ doublecomplex zi;
+ doublereal t1i, t1r, sii, zii, sir, zir;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAR2V applies a vector of complex plane rotations with real cosines */
+/* from both sides to a sequence of 2-by-2 complex Hermitian matrices, */
+/* defined by the elements of the vectors x, y and z. For i = 1,2,...,n */
+
+/* ( x(i) z(i) ) := */
+/* ( conjg(z(i)) y(i) ) */
+
+/* ( c(i) conjg(s(i)) ) ( x(i) z(i) ) ( c(i) -conjg(s(i)) ) */
+/* ( -s(i) c(i) ) ( conjg(z(i)) y(i) ) ( s(i) c(i) ) */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of plane rotations to be applied. */
+
+/* X (input/output) COMPLEX*16 array, dimension (1+(N-1)*INCX) */
+/* The vector x; the elements of x are assumed to be real. */
+
+/* Y (input/output) COMPLEX*16 array, dimension (1+(N-1)*INCX) */
+/* The vector y; the elements of y are assumed to be real. */
+
+/* Z (input/output) COMPLEX*16 array, dimension (1+(N-1)*INCX) */
+/* The vector z. */
+
+/* INCX (input) INTEGER */
+/* The increment between elements of X, Y and Z. INCX > 0. */
+
+/* C (input) DOUBLE PRECISION array, dimension (1+(N-1)*INCC) */
+/* The cosines of the plane rotations. */
+
+/* S (input) COMPLEX*16 array, dimension (1+(N-1)*INCC) */
+/* The sines of the plane rotations. */
+
+/* INCC (input) INTEGER */
+/* The increment between elements of C and S. INCC > 0. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --s;
+ --c__;
+ --z__;
+ --y;
+ --x;
+
+ /* Function Body */
+ ix = 1;
+ ic = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ xi = x[i__2].r;
+ i__2 = ix;
+ yi = y[i__2].r;
+ i__2 = ix;
+ zi.r = z__[i__2].r, zi.i = z__[i__2].i;
+ zir = zi.r;
+ zii = d_imag(&zi);
+ ci = c__[ic];
+ i__2 = ic;
+ si.r = s[i__2].r, si.i = s[i__2].i;
+ sir = si.r;
+ sii = d_imag(&si);
+ t1r = sir * zir - sii * zii;
+ t1i = sir * zii + sii * zir;
+ z__1.r = ci * zi.r, z__1.i = ci * zi.i;
+ t2.r = z__1.r, t2.i = z__1.i;
+ d_cnjg(&z__3, &si);
+ z__2.r = xi * z__3.r, z__2.i = xi * z__3.i;
+ z__1.r = t2.r - z__2.r, z__1.i = t2.i - z__2.i;
+ t3.r = z__1.r, t3.i = z__1.i;
+ d_cnjg(&z__2, &t2);
+ z__3.r = yi * si.r, z__3.i = yi * si.i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ t4.r = z__1.r, t4.i = z__1.i;
+ t5 = ci * xi + t1r;
+ t6 = ci * yi - t1r;
+ i__2 = ix;
+ d__1 = ci * t5 + (sir * t4.r + sii * d_imag(&t4));
+ x[i__2].r = d__1, x[i__2].i = 0.;
+ i__2 = ix;
+ d__1 = ci * t6 - (sir * t3.r - sii * d_imag(&t3));
+ y[i__2].r = d__1, y[i__2].i = 0.;
+ i__2 = ix;
+ z__2.r = ci * t3.r, z__2.i = ci * t3.i;
+ d_cnjg(&z__4, &si);
+ z__5.r = t6, z__5.i = t1i;
+ z__3.r = z__4.r * z__5.r - z__4.i * z__5.i, z__3.i = z__4.r * z__5.i
+ + z__4.i * z__5.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ z__[i__2].r = z__1.r, z__[i__2].i = z__1.i;
+ ix += *incx;
+ ic += *incc;
+/* L10: */
+ }
+ return 0;
+
+/* End of ZLAR2V */
+
+} /* zlar2v_ */
diff --git a/contrib/libs/clapack/zlarcm.c b/contrib/libs/clapack/zlarcm.c
new file mode 100644
index 0000000000..2dd2f0ea51
--- /dev/null
+++ b/contrib/libs/clapack/zlarcm.c
@@ -0,0 +1,177 @@
+/* zlarcm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b6 = 1.;
+static doublereal c_b7 = 0.;
+
+/* Subroutine */ int zlarcm_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublecomplex *b, integer *ldb, doublecomplex *c__, integer *ldc,
+ doublereal *rwork)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4, i__5;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, l;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLARCM performs a very simple matrix-matrix multiplication: */
+/* C := A * B, */
+/* where A is M by M and real; B is M by N and complex; */
+/* C is M by N and complex. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A and of the matrix C. */
+/* M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns and rows of the matrix B and */
+/* the number of columns of the matrix C. */
+/* N >= 0. */
+
+/* A (input) DOUBLE PRECISION array, dimension (LDA, M) */
+/* A contains the M by M matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >=max(1,M). */
+
+/* B (input) DOUBLE PRECISION array, dimension (LDB, N) */
+/* B contains the M by N matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >=max(1,M). */
+
+/* C (input) COMPLEX*16 array, dimension (LDC, N) */
+/* C contains the M by N matrix C. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >=max(1,M). */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (2*M*N) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --rwork;
+
+ /* Function Body */
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[(j - 1) * *m + i__] = b[i__3].r;
+/* L10: */
+ }
+/* L20: */
+ }
+
+ l = *m * *n + 1;
+ dgemm_("N", "N", m, n, m, &c_b6, &a[a_offset], lda, &rwork[1], m, &c_b7, &
+ rwork[l], m);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = l + (j - 1) * *m + i__ - 1;
+ c__[i__3].r = rwork[i__4], c__[i__3].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ rwork[(j - 1) * *m + i__] = d_imag(&b[i__ + j * b_dim1]);
+/* L50: */
+ }
+/* L60: */
+ }
+ dgemm_("N", "N", m, n, m, &c_b6, &a[a_offset], lda, &rwork[1], m, &c_b7, &
+ rwork[l], m);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ d__1 = c__[i__4].r;
+ i__5 = l + (j - 1) * *m + i__ - 1;
+ z__1.r = d__1, z__1.i = rwork[i__5];
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+
+ return 0;
+
+/* End of ZLARCM */
+
+} /* zlarcm_ */
diff --git a/contrib/libs/clapack/zlarf.c b/contrib/libs/clapack/zlarf.c
new file mode 100644
index 0000000000..0b8ad0217a
--- /dev/null
+++ b/contrib/libs/clapack/zlarf.c
@@ -0,0 +1,200 @@
+/* zlarf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static doublecomplex c_b2 = {0.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zlarf_(char *side, integer *m, integer *n, doublecomplex
+ *v, integer *incv, doublecomplex *tau, doublecomplex *c__, integer *
+ ldc, doublecomplex *work)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, i__1;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__;
+ logical applyleft;
+ extern logical lsame_(char *, char *);
+ integer lastc;
+ extern /* Subroutine */ int zgerc_(integer *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *);
+ integer lastv;
+ extern integer ilazlc_(integer *, integer *, doublecomplex *, integer *),
+ ilazlr_(integer *, integer *, doublecomplex *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLARF applies a complex elementary reflector H to a complex M-by-N */
+/* matrix C, from either the left or the right. H is represented in the */
+/* form */
+
+/* H = I - tau * v * v' */
+
+/* where tau is a complex scalar and v is a complex vector. */
+
+/* If tau = 0, then H is taken to be the unit matrix. */
+
+/* To apply H' (the conjugate transpose of H), supply conjg(tau) instead */
+/* tau. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': form H * C */
+/* = 'R': form C * H */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. */
+
+/* V (input) COMPLEX*16 array, dimension */
+/* (1 + (M-1)*abs(INCV)) if SIDE = 'L' */
+/* or (1 + (N-1)*abs(INCV)) if SIDE = 'R' */
+/* The vector v in the representation of H. V is not used if */
+/* TAU = 0. */
+
+/* INCV (input) INTEGER */
+/* The increment between elements of v. INCV <> 0. */
+
+/* TAU (input) COMPLEX*16 */
+/* The value tau in the representation of H. */
+
+/* C (input/output) COMPLEX*16 array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by the matrix H * C if SIDE = 'L', */
+/* or C * H if SIDE = 'R'. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension */
+/* (N) if SIDE = 'L' */
+/* or (M) if SIDE = 'R' */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --v;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ applyleft = lsame_(side, "L");
+ lastv = 0;
+ lastc = 0;
+ if (tau->r != 0. || tau->i != 0.) {
+/* Set up variables for scanning V. LASTV begins pointing to the end */
+/* of V. */
+ if (applyleft) {
+ lastv = *m;
+ } else {
+ lastv = *n;
+ }
+ if (*incv > 0) {
+ i__ = (lastv - 1) * *incv + 1;
+ } else {
+ i__ = 1;
+ }
+/* Look for the last non-zero row in V. */
+ for(;;) { /* while(complicated condition) */
+ i__1 = i__;
+ if (!(lastv > 0 && (v[i__1].r == 0. && v[i__1].i == 0.)))
+ break;
+ --lastv;
+ i__ -= *incv;
+ }
+ if (applyleft) {
+/* Scan for the last non-zero column in C(1:lastv,:). */
+ lastc = ilazlc_(&lastv, n, &c__[c_offset], ldc);
+ } else {
+/* Scan for the last non-zero row in C(:,1:lastv). */
+ lastc = ilazlr_(m, &lastv, &c__[c_offset], ldc);
+ }
+ }
+/* Note that lastc.eq.0 renders the BLAS operations null; no special */
+/* case is needed at this level. */
+ if (applyleft) {
+
+/* Form H * C */
+
+ if (lastv > 0) {
+
+/* w(1:lastc,1) := C(1:lastv,1:lastc)' * v(1:lastv,1) */
+
+ zgemv_("Conjugate transpose", &lastv, &lastc, &c_b1, &c__[
+ c_offset], ldc, &v[1], incv, &c_b2, &work[1], &c__1);
+
+/* C(1:lastv,1:lastc) := C(...) - v(1:lastv,1) * w(1:lastc,1)' */
+
+ z__1.r = -tau->r, z__1.i = -tau->i;
+ zgerc_(&lastv, &lastc, &z__1, &v[1], incv, &work[1], &c__1, &c__[
+ c_offset], ldc);
+ }
+ } else {
+
+/* Form C * H */
+
+ if (lastv > 0) {
+
+/* w(1:lastc,1) := C(1:lastc,1:lastv) * v(1:lastv,1) */
+
+ zgemv_("No transpose", &lastc, &lastv, &c_b1, &c__[c_offset], ldc,
+ &v[1], incv, &c_b2, &work[1], &c__1);
+
+/* C(1:lastc,1:lastv) := C(...) - w(1:lastc,1) * v(1:lastv,1)' */
+
+ z__1.r = -tau->r, z__1.i = -tau->i;
+ zgerc_(&lastc, &lastv, &z__1, &work[1], &c__1, &v[1], incv, &c__[
+ c_offset], ldc);
+ }
+ }
+ return 0;
+
+/* End of ZLARF */
+
+} /* zlarf_ */
diff --git a/contrib/libs/clapack/zlarfb.c b/contrib/libs/clapack/zlarfb.c
new file mode 100644
index 0000000000..cdd584e457
--- /dev/null
+++ b/contrib/libs/clapack/zlarfb.c
@@ -0,0 +1,839 @@
+/* zlarfb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zlarfb_(char *side, char *trans, char *direct, char *
+ storev, integer *m, integer *n, integer *k, doublecomplex *v, integer
+ *ldv, doublecomplex *t, integer *ldt, doublecomplex *c__, integer *
+ ldc, doublecomplex *work, integer *ldwork)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, t_dim1, t_offset, v_dim1, v_offset, work_dim1,
+ work_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j;
+ extern logical lsame_(char *, char *);
+ integer lastc;
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ integer lastv;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), ztrmm_(char *, char *, char *, char *
+, integer *, integer *, doublecomplex *, doublecomplex *, integer
+ *, doublecomplex *, integer *);
+ extern integer ilazlc_(integer *, integer *, doublecomplex *, integer *);
+ extern /* Subroutine */ int zlacgv_(integer *, doublecomplex *, integer *)
+ ;
+ extern integer ilazlr_(integer *, integer *, doublecomplex *, integer *);
+ char transt[1];
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLARFB applies a complex block reflector H or its transpose H' to a */
+/* complex M-by-N matrix C, from either the left or the right. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply H or H' from the Left */
+/* = 'R': apply H or H' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply H (No transpose) */
+/* = 'C': apply H' (Conjugate transpose) */
+
+/* DIRECT (input) CHARACTER*1 */
+/* Indicates how H is formed from a product of elementary */
+/* reflectors */
+/* = 'F': H = H(1) H(2) . . . H(k) (Forward) */
+/* = 'B': H = H(k) . . . H(2) H(1) (Backward) */
+
+/* STOREV (input) CHARACTER*1 */
+/* Indicates how the vectors which define the elementary */
+/* reflectors are stored: */
+/* = 'C': Columnwise */
+/* = 'R': Rowwise */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. */
+
+/* K (input) INTEGER */
+/* The order of the matrix T (= the number of elementary */
+/* reflectors whose product defines the block reflector). */
+
+/* V (input) COMPLEX*16 array, dimension */
+/* (LDV,K) if STOREV = 'C' */
+/* (LDV,M) if STOREV = 'R' and SIDE = 'L' */
+/* (LDV,N) if STOREV = 'R' and SIDE = 'R' */
+/* The matrix V. See further details. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. */
+/* If STOREV = 'C' and SIDE = 'L', LDV >= max(1,M); */
+/* if STOREV = 'C' and SIDE = 'R', LDV >= max(1,N); */
+/* if STOREV = 'R', LDV >= K. */
+
+/* T (input) COMPLEX*16 array, dimension (LDT,K) */
+/* The triangular K-by-K matrix T in the representation of the */
+/* block reflector. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= K. */
+
+/* C (input/output) COMPLEX*16 array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by H*C or H'*C or C*H or C*H'. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (LDWORK,K) */
+
+/* LDWORK (input) INTEGER */
+/* The leading dimension of the array WORK. */
+/* If SIDE = 'L', LDWORK >= max(1,N); */
+/* if SIDE = 'R', LDWORK >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ work_dim1 = *ldwork;
+ work_offset = 1 + work_dim1;
+ work -= work_offset;
+
+ /* Function Body */
+ if (*m <= 0 || *n <= 0) {
+ return 0;
+ }
+
+ if (lsame_(trans, "N")) {
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ if (lsame_(storev, "C")) {
+
+ if (lsame_(direct, "F")) {
+
+/* Let V = ( V1 ) (first K rows) */
+/* ( V2 ) */
+/* where V1 is unit lower triangular. */
+
+ if (lsame_(side, "L")) {
+
+/* Form H * C or H' * C where C = ( C1 ) */
+/* ( C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = ilazlr_(m, k, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = ilazlc_(&lastv, n, &c__[c_offset], ldc);
+
+/* W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK) */
+
+/* W := C1' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ zcopy_(&lastc, &c__[j + c_dim1], ldc, &work[j * work_dim1
+ + 1], &c__1);
+ zlacgv_(&lastc, &work[j * work_dim1 + 1], &c__1);
+/* L10: */
+ }
+
+/* W := W * V1 */
+
+ ztrmm_("Right", "Lower", "No transpose", "Unit", &lastc, k, &
+ c_b1, &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C2'*V2 */
+
+ i__1 = lastv - *k;
+ zgemm_("Conjugate transpose", "No transpose", &lastc, k, &
+ i__1, &c_b1, &c__[*k + 1 + c_dim1], ldc, &v[*k +
+ 1 + v_dim1], ldv, &c_b1, &work[work_offset],
+ ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ ztrmm_("Right", "Upper", transt, "Non-unit", &lastc, k, &c_b1,
+ &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V * W' */
+
+ if (*m > *k) {
+
+/* C2 := C2 - V2 * W' */
+
+ i__1 = lastv - *k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "Conjugate transpose", &i__1, &
+ lastc, k, &z__1, &v[*k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork, &c_b1, &c__[*k + 1 + c_dim1]
+, ldc);
+ }
+
+/* W := W * V1' */
+
+ ztrmm_("Right", "Lower", "Conjugate transpose", "Unit", &
+ lastc, k, &c_b1, &v[v_offset], ldv, &work[work_offset]
+, ldwork)
+ ;
+
+/* C1 := C1 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * c_dim1;
+ i__4 = j + i__ * c_dim1;
+ d_cnjg(&z__2, &work[i__ + j * work_dim1]);
+ z__1.r = c__[i__4].r - z__2.r, z__1.i = c__[i__4].i -
+ z__2.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L20: */
+ }
+/* L30: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/* Form C * H or C * H' where C = ( C1 C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = ilazlr_(n, k, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = ilazlr_(m, &lastv, &c__[c_offset], ldc);
+
+/* W := C * V = (C1*V1 + C2*V2) (stored in WORK) */
+
+/* W := C1 */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ zcopy_(&lastc, &c__[j * c_dim1 + 1], &c__1, &work[j *
+ work_dim1 + 1], &c__1);
+/* L40: */
+ }
+
+/* W := W * V1 */
+
+ ztrmm_("Right", "Lower", "No transpose", "Unit", &lastc, k, &
+ c_b1, &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C2 * V2 */
+
+ i__1 = lastv - *k;
+ zgemm_("No transpose", "No transpose", &lastc, k, &i__1, &
+ c_b1, &c__[(*k + 1) * c_dim1 + 1], ldc, &v[*k + 1
+ + v_dim1], ldv, &c_b1, &work[work_offset], ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ ztrmm_("Right", "Upper", trans, "Non-unit", &lastc, k, &c_b1,
+ &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V' */
+
+ if (lastv > *k) {
+
+/* C2 := C2 - W * V2' */
+
+ i__1 = lastv - *k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "Conjugate transpose", &lastc, &
+ i__1, k, &z__1, &work[work_offset], ldwork, &v[*k
+ + 1 + v_dim1], ldv, &c_b1, &c__[(*k + 1) * c_dim1
+ + 1], ldc);
+ }
+
+/* W := W * V1' */
+
+ ztrmm_("Right", "Lower", "Conjugate transpose", "Unit", &
+ lastc, k, &c_b1, &v[v_offset], ldv, &work[work_offset]
+, ldwork)
+ ;
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * work_dim1;
+ z__1.r = c__[i__4].r - work[i__5].r, z__1.i = c__[
+ i__4].i - work[i__5].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+ } else {
+
+/* Let V = ( V1 ) */
+/* ( V2 ) (last K rows) */
+/* where V2 is unit upper triangular. */
+
+ if (lsame_(side, "L")) {
+
+/* Form H * C or H' * C where C = ( C1 ) */
+/* ( C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = ilazlr_(m, k, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = ilazlc_(&lastv, n, &c__[c_offset], ldc);
+
+/* W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK) */
+
+/* W := C2' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ zcopy_(&lastc, &c__[lastv - *k + j + c_dim1], ldc, &work[
+ j * work_dim1 + 1], &c__1);
+ zlacgv_(&lastc, &work[j * work_dim1 + 1], &c__1);
+/* L70: */
+ }
+
+/* W := W * V2 */
+
+ ztrmm_("Right", "Upper", "No transpose", "Unit", &lastc, k, &
+ c_b1, &v[lastv - *k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C1'*V1 */
+
+ i__1 = lastv - *k;
+ zgemm_("Conjugate transpose", "No transpose", &lastc, k, &
+ i__1, &c_b1, &c__[c_offset], ldc, &v[v_offset],
+ ldv, &c_b1, &work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ ztrmm_("Right", "Lower", transt, "Non-unit", &lastc, k, &c_b1,
+ &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V * W' */
+
+ if (lastv > *k) {
+
+/* C1 := C1 - V1 * W' */
+
+ i__1 = lastv - *k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "Conjugate transpose", &i__1, &
+ lastc, k, &z__1, &v[v_offset], ldv, &work[
+ work_offset], ldwork, &c_b1, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2' */
+
+ ztrmm_("Right", "Upper", "Conjugate transpose", "Unit", &
+ lastc, k, &c_b1, &v[lastv - *k + 1 + v_dim1], ldv, &
+ work[work_offset], ldwork);
+
+/* C2 := C2 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = lastv - *k + j + i__ * c_dim1;
+ i__4 = lastv - *k + j + i__ * c_dim1;
+ d_cnjg(&z__2, &work[i__ + j * work_dim1]);
+ z__1.r = c__[i__4].r - z__2.r, z__1.i = c__[i__4].i -
+ z__2.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L80: */
+ }
+/* L90: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/* Form C * H or C * H' where C = ( C1 C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = ilazlr_(n, k, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = ilazlr_(m, &lastv, &c__[c_offset], ldc);
+
+/* W := C * V = (C1*V1 + C2*V2) (stored in WORK) */
+
+/* W := C2 */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ zcopy_(&lastc, &c__[(lastv - *k + j) * c_dim1 + 1], &c__1,
+ &work[j * work_dim1 + 1], &c__1);
+/* L100: */
+ }
+
+/* W := W * V2 */
+
+ ztrmm_("Right", "Upper", "No transpose", "Unit", &lastc, k, &
+ c_b1, &v[lastv - *k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C1 * V1 */
+
+ i__1 = lastv - *k;
+ zgemm_("No transpose", "No transpose", &lastc, k, &i__1, &
+ c_b1, &c__[c_offset], ldc, &v[v_offset], ldv, &
+ c_b1, &work[work_offset], ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ ztrmm_("Right", "Lower", trans, "Non-unit", &lastc, k, &c_b1,
+ &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V' */
+
+ if (lastv > *k) {
+
+/* C1 := C1 - W * V1' */
+
+ i__1 = lastv - *k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "Conjugate transpose", &lastc, &
+ i__1, k, &z__1, &work[work_offset], ldwork, &v[
+ v_offset], ldv, &c_b1, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2' */
+
+ ztrmm_("Right", "Upper", "Conjugate transpose", "Unit", &
+ lastc, k, &c_b1, &v[lastv - *k + 1 + v_dim1], ldv, &
+ work[work_offset], ldwork);
+
+/* C2 := C2 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + (lastv - *k + j) * c_dim1;
+ i__4 = i__ + (lastv - *k + j) * c_dim1;
+ i__5 = i__ + j * work_dim1;
+ z__1.r = c__[i__4].r - work[i__5].r, z__1.i = c__[
+ i__4].i - work[i__5].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L110: */
+ }
+/* L120: */
+ }
+ }
+ }
+
+ } else if (lsame_(storev, "R")) {
+
+ if (lsame_(direct, "F")) {
+
+/* Let V = ( V1 V2 ) (V1: first K columns) */
+/* where V1 is unit upper triangular. */
+
+ if (lsame_(side, "L")) {
+
+/* Form H * C or H' * C where C = ( C1 ) */
+/* ( C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = ilazlc_(k, m, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = ilazlc_(&lastv, n, &c__[c_offset], ldc);
+
+/* W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK) */
+
+/* W := C1' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ zcopy_(&lastc, &c__[j + c_dim1], ldc, &work[j * work_dim1
+ + 1], &c__1);
+ zlacgv_(&lastc, &work[j * work_dim1 + 1], &c__1);
+/* L130: */
+ }
+
+/* W := W * V1' */
+
+ ztrmm_("Right", "Upper", "Conjugate transpose", "Unit", &
+ lastc, k, &c_b1, &v[v_offset], ldv, &work[work_offset]
+, ldwork)
+ ;
+ if (lastv > *k) {
+
+/* W := W + C2'*V2' */
+
+ i__1 = lastv - *k;
+ zgemm_("Conjugate transpose", "Conjugate transpose", &
+ lastc, k, &i__1, &c_b1, &c__[*k + 1 + c_dim1],
+ ldc, &v[(*k + 1) * v_dim1 + 1], ldv, &c_b1, &work[
+ work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ ztrmm_("Right", "Upper", transt, "Non-unit", &lastc, k, &c_b1,
+ &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V' * W' */
+
+ if (lastv > *k) {
+
+/* C2 := C2 - V2' * W' */
+
+ i__1 = lastv - *k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("Conjugate transpose", "Conjugate transpose", &
+ i__1, &lastc, k, &z__1, &v[(*k + 1) * v_dim1 + 1],
+ ldv, &work[work_offset], ldwork, &c_b1, &c__[*k
+ + 1 + c_dim1], ldc);
+ }
+
+/* W := W * V1 */
+
+ ztrmm_("Right", "Upper", "No transpose", "Unit", &lastc, k, &
+ c_b1, &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * c_dim1;
+ i__4 = j + i__ * c_dim1;
+ d_cnjg(&z__2, &work[i__ + j * work_dim1]);
+ z__1.r = c__[i__4].r - z__2.r, z__1.i = c__[i__4].i -
+ z__2.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L140: */
+ }
+/* L150: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/* Form C * H or C * H' where C = ( C1 C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = ilazlc_(k, n, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = ilazlr_(m, &lastv, &c__[c_offset], ldc);
+
+/* W := C * V' = (C1*V1' + C2*V2') (stored in WORK) */
+
+/* W := C1 */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ zcopy_(&lastc, &c__[j * c_dim1 + 1], &c__1, &work[j *
+ work_dim1 + 1], &c__1);
+/* L160: */
+ }
+
+/* W := W * V1' */
+
+ ztrmm_("Right", "Upper", "Conjugate transpose", "Unit", &
+ lastc, k, &c_b1, &v[v_offset], ldv, &work[work_offset]
+, ldwork)
+ ;
+ if (lastv > *k) {
+
+/* W := W + C2 * V2' */
+
+ i__1 = lastv - *k;
+ zgemm_("No transpose", "Conjugate transpose", &lastc, k, &
+ i__1, &c_b1, &c__[(*k + 1) * c_dim1 + 1], ldc, &v[
+ (*k + 1) * v_dim1 + 1], ldv, &c_b1, &work[
+ work_offset], ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ ztrmm_("Right", "Upper", trans, "Non-unit", &lastc, k, &c_b1,
+ &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V */
+
+ if (lastv > *k) {
+
+/* C2 := C2 - W * V2 */
+
+ i__1 = lastv - *k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "No transpose", &lastc, &i__1, k, &
+ z__1, &work[work_offset], ldwork, &v[(*k + 1) *
+ v_dim1 + 1], ldv, &c_b1, &c__[(*k + 1) * c_dim1 +
+ 1], ldc);
+ }
+
+/* W := W * V1 */
+
+ ztrmm_("Right", "Upper", "No transpose", "Unit", &lastc, k, &
+ c_b1, &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * work_dim1;
+ z__1.r = c__[i__4].r - work[i__5].r, z__1.i = c__[
+ i__4].i - work[i__5].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L170: */
+ }
+/* L180: */
+ }
+
+ }
+
+ } else {
+
+/* Let V = ( V1 V2 ) (V2: last K columns) */
+/* where V2 is unit lower triangular. */
+
+ if (lsame_(side, "L")) {
+
+/* Form H * C or H' * C where C = ( C1 ) */
+/* ( C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = ilazlc_(k, m, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = ilazlc_(&lastv, n, &c__[c_offset], ldc);
+
+/* W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK) */
+
+/* W := C2' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ zcopy_(&lastc, &c__[lastv - *k + j + c_dim1], ldc, &work[
+ j * work_dim1 + 1], &c__1);
+ zlacgv_(&lastc, &work[j * work_dim1 + 1], &c__1);
+/* L190: */
+ }
+
+/* W := W * V2' */
+
+ ztrmm_("Right", "Lower", "Conjugate transpose", "Unit", &
+ lastc, k, &c_b1, &v[(lastv - *k + 1) * v_dim1 + 1],
+ ldv, &work[work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C1'*V1' */
+
+ i__1 = lastv - *k;
+ zgemm_("Conjugate transpose", "Conjugate transpose", &
+ lastc, k, &i__1, &c_b1, &c__[c_offset], ldc, &v[
+ v_offset], ldv, &c_b1, &work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ ztrmm_("Right", "Lower", transt, "Non-unit", &lastc, k, &c_b1,
+ &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V' * W' */
+
+ if (lastv > *k) {
+
+/* C1 := C1 - V1' * W' */
+
+ i__1 = lastv - *k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("Conjugate transpose", "Conjugate transpose", &
+ i__1, &lastc, k, &z__1, &v[v_offset], ldv, &work[
+ work_offset], ldwork, &c_b1, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2 */
+
+ ztrmm_("Right", "Lower", "No transpose", "Unit", &lastc, k, &
+ c_b1, &v[(lastv - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+
+/* C2 := C2 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = lastv - *k + j + i__ * c_dim1;
+ i__4 = lastv - *k + j + i__ * c_dim1;
+ d_cnjg(&z__2, &work[i__ + j * work_dim1]);
+ z__1.r = c__[i__4].r - z__2.r, z__1.i = c__[i__4].i -
+ z__2.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L200: */
+ }
+/* L210: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/* Form C * H or C * H' where C = ( C1 C2 ) */
+
+/* Computing MAX */
+ i__1 = *k, i__2 = ilazlc_(k, n, &v[v_offset], ldv);
+ lastv = max(i__1,i__2);
+ lastc = ilazlr_(m, &lastv, &c__[c_offset], ldc);
+
+/* W := C * V' = (C1*V1' + C2*V2') (stored in WORK) */
+
+/* W := C2 */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ zcopy_(&lastc, &c__[(lastv - *k + j) * c_dim1 + 1], &c__1,
+ &work[j * work_dim1 + 1], &c__1);
+/* L220: */
+ }
+
+/* W := W * V2' */
+
+ ztrmm_("Right", "Lower", "Conjugate transpose", "Unit", &
+ lastc, k, &c_b1, &v[(lastv - *k + 1) * v_dim1 + 1],
+ ldv, &work[work_offset], ldwork);
+ if (lastv > *k) {
+
+/* W := W + C1 * V1' */
+
+ i__1 = lastv - *k;
+ zgemm_("No transpose", "Conjugate transpose", &lastc, k, &
+ i__1, &c_b1, &c__[c_offset], ldc, &v[v_offset],
+ ldv, &c_b1, &work[work_offset], ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ ztrmm_("Right", "Lower", trans, "Non-unit", &lastc, k, &c_b1,
+ &t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V */
+
+ if (lastv > *k) {
+
+/* C1 := C1 - W * V1 */
+
+ i__1 = lastv - *k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "No transpose", &lastc, &i__1, k, &
+ z__1, &work[work_offset], ldwork, &v[v_offset],
+ ldv, &c_b1, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2 */
+
+ ztrmm_("Right", "Lower", "No transpose", "Unit", &lastc, k, &
+ c_b1, &v[(lastv - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = lastc;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + (lastv - *k + j) * c_dim1;
+ i__4 = i__ + (lastv - *k + j) * c_dim1;
+ i__5 = i__ + j * work_dim1;
+ z__1.r = c__[i__4].r - work[i__5].r, z__1.i = c__[
+ i__4].i - work[i__5].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L230: */
+ }
+/* L240: */
+ }
+
+ }
+
+ }
+ }
+
+ return 0;
+
+/* End of ZLARFB */
+
+} /* zlarfb_ */
diff --git a/contrib/libs/clapack/zlarfg.c b/contrib/libs/clapack/zlarfg.c
new file mode 100644
index 0000000000..d18efe5848
--- /dev/null
+++ b/contrib/libs/clapack/zlarfg.c
@@ -0,0 +1,191 @@
+/* zlarfg.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b5 = {1.,0.};
+
+/* Subroutine */ int zlarfg_(integer *n, doublecomplex *alpha, doublecomplex *
+ x, integer *incx, doublecomplex *tau)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ integer j, knt;
+ doublereal beta, alphi, alphr;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *);
+ doublereal xnorm;
+ extern doublereal dlapy3_(doublereal *, doublereal *, doublereal *),
+ dznrm2_(integer *, doublecomplex *, integer *), dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int zdscal_(integer *, doublereal *,
+ doublecomplex *, integer *);
+ doublereal rsafmn;
+ extern /* Double Complex */ VOID zladiv_(doublecomplex *, doublecomplex *,
+ doublecomplex *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLARFG generates a complex elementary reflector H of order n, such */
+/* that */
+
+/* H' * ( alpha ) = ( beta ), H' * H = I. */
+/* ( x ) ( 0 ) */
+
+/* where alpha and beta are scalars, with beta real, and x is an */
+/* (n-1)-element complex vector. H is represented in the form */
+
+/* H = I - tau * ( 1 ) * ( 1 v' ) , */
+/* ( v ) */
+
+/* where tau is a complex scalar and v is a complex (n-1)-element */
+/* vector. Note that H is not hermitian. */
+
+/* If the elements of x are all zero and alpha is real, then tau = 0 */
+/* and H is taken to be the unit matrix. */
+
+/* Otherwise 1 <= real(tau) <= 2 and abs(tau-1) <= 1 . */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the elementary reflector. */
+
+/* ALPHA (input/output) COMPLEX*16 */
+/* On entry, the value alpha. */
+/* On exit, it is overwritten with the value beta. */
+
+/* X (input/output) COMPLEX*16 array, dimension */
+/* (1+(N-2)*abs(INCX)) */
+/* On entry, the vector x. */
+/* On exit, it is overwritten with the vector v. */
+
+/* INCX (input) INTEGER */
+/* The increment between elements of X. INCX > 0. */
+
+/* TAU (output) COMPLEX*16 */
+/* The value tau. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n <= 0) {
+ tau->r = 0., tau->i = 0.;
+ return 0;
+ }
+
+ i__1 = *n - 1;
+ xnorm = dznrm2_(&i__1, &x[1], incx);
+ alphr = alpha->r;
+ alphi = d_imag(alpha);
+
+ if (xnorm == 0. && alphi == 0.) {
+
+/* H = I */
+
+ tau->r = 0., tau->i = 0.;
+ } else {
+
+/* general case */
+
+ d__1 = dlapy3_(&alphr, &alphi, &xnorm);
+ beta = -d_sign(&d__1, &alphr);
+ safmin = dlamch_("S") / dlamch_("E");
+ rsafmn = 1. / safmin;
+
+ knt = 0;
+ if (abs(beta) < safmin) {
+
+/* XNORM, BETA may be inaccurate; scale X and recompute them */
+
+L10:
+ ++knt;
+ i__1 = *n - 1;
+ zdscal_(&i__1, &rsafmn, &x[1], incx);
+ beta *= rsafmn;
+ alphi *= rsafmn;
+ alphr *= rsafmn;
+ if (abs(beta) < safmin) {
+ goto L10;
+ }
+
+/* New BETA is at most 1, at least SAFMIN */
+
+ i__1 = *n - 1;
+ xnorm = dznrm2_(&i__1, &x[1], incx);
+ z__1.r = alphr, z__1.i = alphi;
+ alpha->r = z__1.r, alpha->i = z__1.i;
+ d__1 = dlapy3_(&alphr, &alphi, &xnorm);
+ beta = -d_sign(&d__1, &alphr);
+ }
+ d__1 = (beta - alphr) / beta;
+ d__2 = -alphi / beta;
+ z__1.r = d__1, z__1.i = d__2;
+ tau->r = z__1.r, tau->i = z__1.i;
+ z__2.r = alpha->r - beta, z__2.i = alpha->i;
+ zladiv_(&z__1, &c_b5, &z__2);
+ alpha->r = z__1.r, alpha->i = z__1.i;
+ i__1 = *n - 1;
+ zscal_(&i__1, alpha, &x[1], incx);
+
+/* If ALPHA is subnormal, it may lose relative accuracy */
+
+ i__1 = knt;
+ for (j = 1; j <= i__1; ++j) {
+ beta *= safmin;
+/* L20: */
+ }
+ alpha->r = beta, alpha->i = 0.;
+ }
+
+ return 0;
+
+/* End of ZLARFG */
+
+} /* zlarfg_ */
diff --git a/contrib/libs/clapack/zlarfp.c b/contrib/libs/clapack/zlarfp.c
new file mode 100644
index 0000000000..38fda669d5
--- /dev/null
+++ b/contrib/libs/clapack/zlarfp.c
@@ -0,0 +1,236 @@
+/* zlarfp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b5 = {1.,0.};
+
+/* Subroutine */ int zlarfp_(integer *n, doublecomplex *alpha, doublecomplex *
+ x, integer *incx, doublecomplex *tau)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal d__1, d__2;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ integer j, knt;
+ doublereal beta, alphi, alphr;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *);
+ doublereal xnorm;
+ extern doublereal dlapy2_(doublereal *, doublereal *), dlapy3_(doublereal
+ *, doublereal *, doublereal *), dznrm2_(integer *, doublecomplex *
+, integer *), dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int zdscal_(integer *, doublereal *,
+ doublecomplex *, integer *);
+ doublereal rsafmn;
+ extern /* Double Complex */ VOID zladiv_(doublecomplex *, doublecomplex *,
+ doublecomplex *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLARFP generates a complex elementary reflector H of order n, such */
+/* that */
+
+/* H' * ( alpha ) = ( beta ), H' * H = I. */
+/* ( x ) ( 0 ) */
+
+/* where alpha and beta are scalars, beta is real and non-negative, and */
+/* x is an (n-1)-element complex vector. H is represented in the form */
+
+/* H = I - tau * ( 1 ) * ( 1 v' ) , */
+/* ( v ) */
+
+/* where tau is a complex scalar and v is a complex (n-1)-element */
+/* vector. Note that H is not hermitian. */
+
+/* If the elements of x are all zero and alpha is real, then tau = 0 */
+/* and H is taken to be the unit matrix. */
+
+/* Otherwise 1 <= real(tau) <= 2 and abs(tau-1) <= 1 . */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the elementary reflector. */
+
+/* ALPHA (input/output) COMPLEX*16 */
+/* On entry, the value alpha. */
+/* On exit, it is overwritten with the value beta. */
+
+/* X (input/output) COMPLEX*16 array, dimension */
+/* (1+(N-2)*abs(INCX)) */
+/* On entry, the vector x. */
+/* On exit, it is overwritten with the vector v. */
+
+/* INCX (input) INTEGER */
+/* The increment between elements of X. INCX > 0. */
+
+/* TAU (output) COMPLEX*16 */
+/* The value tau. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n <= 0) {
+ tau->r = 0., tau->i = 0.;
+ return 0;
+ }
+
+ i__1 = *n - 1;
+ xnorm = dznrm2_(&i__1, &x[1], incx);
+ alphr = alpha->r;
+ alphi = d_imag(alpha);
+
+ if (xnorm == 0. && alphi == 0.) {
+
+/* H = [1-alpha/abs(alpha) 0; 0 I], sign chosen so ALPHA >= 0. */
+
+ if (alphi == 0.) {
+ if (alphr >= 0.) {
+/* When TAU.eq.ZERO, the vector is special-cased to be */
+/* all zeros in the application routines. We do not need */
+/* to clear it. */
+ tau->r = 0., tau->i = 0.;
+ } else {
+/* However, the application routines rely on explicit */
+/* zero checks when TAU.ne.ZERO, and we must clear X. */
+ tau->r = 2., tau->i = 0.;
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = (j - 1) * *incx + 1;
+ x[i__2].r = 0., x[i__2].i = 0.;
+ }
+ z__1.r = -alpha->r, z__1.i = -alpha->i;
+ alpha->r = z__1.r, alpha->i = z__1.i;
+ }
+ } else {
+/* Only "reflecting" the diagonal entry to be real and non-negative. */
+ xnorm = dlapy2_(&alphr, &alphi);
+ d__1 = 1. - alphr / xnorm;
+ d__2 = -alphi / xnorm;
+ z__1.r = d__1, z__1.i = d__2;
+ tau->r = z__1.r, tau->i = z__1.i;
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = (j - 1) * *incx + 1;
+ x[i__2].r = 0., x[i__2].i = 0.;
+ }
+ alpha->r = xnorm, alpha->i = 0.;
+ }
+ } else {
+
+/* general case */
+
+ d__1 = dlapy3_(&alphr, &alphi, &xnorm);
+ beta = d_sign(&d__1, &alphr);
+ safmin = dlamch_("S") / dlamch_("E");
+ rsafmn = 1. / safmin;
+
+ knt = 0;
+ if (abs(beta) < safmin) {
+
+/* XNORM, BETA may be inaccurate; scale X and recompute them */
+
+L10:
+ ++knt;
+ i__1 = *n - 1;
+ zdscal_(&i__1, &rsafmn, &x[1], incx);
+ beta *= rsafmn;
+ alphi *= rsafmn;
+ alphr *= rsafmn;
+ if (abs(beta) < safmin) {
+ goto L10;
+ }
+
+/* New BETA is at most 1, at least SAFMIN */
+
+ i__1 = *n - 1;
+ xnorm = dznrm2_(&i__1, &x[1], incx);
+ z__1.r = alphr, z__1.i = alphi;
+ alpha->r = z__1.r, alpha->i = z__1.i;
+ d__1 = dlapy3_(&alphr, &alphi, &xnorm);
+ beta = d_sign(&d__1, &alphr);
+ }
+ z__1.r = alpha->r + beta, z__1.i = alpha->i;
+ alpha->r = z__1.r, alpha->i = z__1.i;
+ if (beta < 0.) {
+ beta = -beta;
+ z__2.r = -alpha->r, z__2.i = -alpha->i;
+ z__1.r = z__2.r / beta, z__1.i = z__2.i / beta;
+ tau->r = z__1.r, tau->i = z__1.i;
+ } else {
+ alphr = alphi * (alphi / alpha->r);
+ alphr += xnorm * (xnorm / alpha->r);
+ d__1 = alphr / beta;
+ d__2 = -alphi / beta;
+ z__1.r = d__1, z__1.i = d__2;
+ tau->r = z__1.r, tau->i = z__1.i;
+ d__1 = -alphr;
+ z__1.r = d__1, z__1.i = alphi;
+ alpha->r = z__1.r, alpha->i = z__1.i;
+ }
+ zladiv_(&z__1, &c_b5, alpha);
+ alpha->r = z__1.r, alpha->i = z__1.i;
+ i__1 = *n - 1;
+ zscal_(&i__1, alpha, &x[1], incx);
+
+/* If BETA is subnormal, it may lose relative accuracy */
+
+ i__1 = knt;
+ for (j = 1; j <= i__1; ++j) {
+ beta *= safmin;
+/* L20: */
+ }
+ alpha->r = beta, alpha->i = 0.;
+ }
+
+ return 0;
+
+/* End of ZLARFP */
+
+} /* zlarfp_ */
diff --git a/contrib/libs/clapack/zlarft.c b/contrib/libs/clapack/zlarft.c
new file mode 100644
index 0000000000..b55adc2abb
--- /dev/null
+++ b/contrib/libs/clapack/zlarft.c
@@ -0,0 +1,362 @@
+/* zlarft.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b2 = {0.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zlarft_(char *direct, char *storev, integer *n, integer *
+ k, doublecomplex *v, integer *ldv, doublecomplex *tau, doublecomplex *
+ t, integer *ldt)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, v_dim1, v_offset, i__1, i__2, i__3, i__4;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j, prevlastv;
+ doublecomplex vii;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *);
+ integer lastv;
+ extern /* Subroutine */ int ztrmv_(char *, char *, char *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *), zlacgv_(integer *, doublecomplex *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLARFT forms the triangular factor T of a complex block reflector H */
+/* of order n, which is defined as a product of k elementary reflectors. */
+
+/* If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular; */
+
+/* If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular. */
+
+/* If STOREV = 'C', the vector which defines the elementary reflector */
+/* H(i) is stored in the i-th column of the array V, and */
+
+/* H = I - V * T * V' */
+
+/* If STOREV = 'R', the vector which defines the elementary reflector */
+/* H(i) is stored in the i-th row of the array V, and */
+
+/* H = I - V' * T * V */
+
+/* Arguments */
+/* ========= */
+
+/* DIRECT (input) CHARACTER*1 */
+/* Specifies the order in which the elementary reflectors are */
+/* multiplied to form the block reflector: */
+/* = 'F': H = H(1) H(2) . . . H(k) (Forward) */
+/* = 'B': H = H(k) . . . H(2) H(1) (Backward) */
+
+/* STOREV (input) CHARACTER*1 */
+/* Specifies how the vectors which define the elementary */
+/* reflectors are stored (see also Further Details): */
+/* = 'C': columnwise */
+/* = 'R': rowwise */
+
+/* N (input) INTEGER */
+/* The order of the block reflector H. N >= 0. */
+
+/* K (input) INTEGER */
+/* The order of the triangular factor T (= the number of */
+/* elementary reflectors). K >= 1. */
+
+/* V (input/output) COMPLEX*16 array, dimension */
+/* (LDV,K) if STOREV = 'C' */
+/* (LDV,N) if STOREV = 'R' */
+/* The matrix V. See further details. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. */
+/* If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K. */
+
+/* TAU (input) COMPLEX*16 array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i). */
+
+/* T (output) COMPLEX*16 array, dimension (LDT,K) */
+/* The k by k triangular factor T of the block reflector. */
+/* If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is */
+/* lower triangular. The rest of the array is not used. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= K. */
+
+/* Further Details */
+/* =============== */
+
+/* The shape of the matrix V and the storage of the vectors which define */
+/* the H(i) is best illustrated by the following example with n = 5 and */
+/* k = 3. The elements equal to 1 are not stored; the corresponding */
+/* array elements are modified but restored on exit. The rest of the */
+/* array is not used. */
+
+/* DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R': */
+
+/* V = ( 1 ) V = ( 1 v1 v1 v1 v1 ) */
+/* ( v1 1 ) ( 1 v2 v2 v2 ) */
+/* ( v1 v2 1 ) ( 1 v3 v3 ) */
+/* ( v1 v2 v3 ) */
+/* ( v1 v2 v3 ) */
+
+/* DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R': */
+
+/* V = ( v1 v2 v3 ) V = ( v1 v1 1 ) */
+/* ( v1 v2 v3 ) ( v2 v2 v2 1 ) */
+/* ( 1 v2 v3 ) ( v3 v3 v3 v3 1 ) */
+/* ( 1 v3 ) */
+/* ( 1 ) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ --tau;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+
+ /* Function Body */
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (lsame_(direct, "F")) {
+ prevlastv = *n;
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ prevlastv = max(prevlastv,i__);
+ i__2 = i__;
+ if (tau[i__2].r == 0. && tau[i__2].i == 0.) {
+
+/* H(i) = I */
+
+ i__2 = i__;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = j + i__ * t_dim1;
+ t[i__3].r = 0., t[i__3].i = 0.;
+/* L10: */
+ }
+ } else {
+
+/* general case */
+
+ i__2 = i__ + i__ * v_dim1;
+ vii.r = v[i__2].r, vii.i = v[i__2].i;
+ i__2 = i__ + i__ * v_dim1;
+ v[i__2].r = 1., v[i__2].i = 0.;
+ if (lsame_(storev, "C")) {
+/* Skip any trailing zeros. */
+ i__2 = i__ + 1;
+ for (lastv = *n; lastv >= i__2; --lastv) {
+ i__3 = lastv + i__ * v_dim1;
+ if (v[i__3].r != 0. || v[i__3].i != 0.) {
+ break;
+ }
+ }
+ j = min(lastv,prevlastv);
+
+/* T(1:i-1,i) := - tau(i) * V(i:j,1:i-1)' * V(i:j,i) */
+
+ i__2 = j - i__ + 1;
+ i__3 = i__ - 1;
+ i__4 = i__;
+ z__1.r = -tau[i__4].r, z__1.i = -tau[i__4].i;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &z__1, &v[i__
+ + v_dim1], ldv, &v[i__ + i__ * v_dim1], &c__1, &
+ c_b2, &t[i__ * t_dim1 + 1], &c__1);
+ } else {
+/* Skip any trailing zeros. */
+ i__2 = i__ + 1;
+ for (lastv = *n; lastv >= i__2; --lastv) {
+ i__3 = i__ + lastv * v_dim1;
+ if (v[i__3].r != 0. || v[i__3].i != 0.) {
+ break;
+ }
+ }
+ j = min(lastv,prevlastv);
+
+/* T(1:i-1,i) := - tau(i) * V(1:i-1,i:j) * V(i,i:j)' */
+
+ if (i__ < j) {
+ i__2 = j - i__;
+ zlacgv_(&i__2, &v[i__ + (i__ + 1) * v_dim1], ldv);
+ }
+ i__2 = i__ - 1;
+ i__3 = j - i__ + 1;
+ i__4 = i__;
+ z__1.r = -tau[i__4].r, z__1.i = -tau[i__4].i;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &v[i__ *
+ v_dim1 + 1], ldv, &v[i__ + i__ * v_dim1], ldv, &
+ c_b2, &t[i__ * t_dim1 + 1], &c__1);
+ if (i__ < j) {
+ i__2 = j - i__;
+ zlacgv_(&i__2, &v[i__ + (i__ + 1) * v_dim1], ldv);
+ }
+ }
+ i__2 = i__ + i__ * v_dim1;
+ v[i__2].r = vii.r, v[i__2].i = vii.i;
+
+/* T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) */
+
+ i__2 = i__ - 1;
+ ztrmv_("Upper", "No transpose", "Non-unit", &i__2, &t[
+ t_offset], ldt, &t[i__ * t_dim1 + 1], &c__1);
+ i__2 = i__ + i__ * t_dim1;
+ i__3 = i__;
+ t[i__2].r = tau[i__3].r, t[i__2].i = tau[i__3].i;
+ if (i__ > 1) {
+ prevlastv = max(prevlastv,lastv);
+ } else {
+ prevlastv = lastv;
+ }
+ }
+/* L20: */
+ }
+ } else {
+ prevlastv = 1;
+ for (i__ = *k; i__ >= 1; --i__) {
+ i__1 = i__;
+ if (tau[i__1].r == 0. && tau[i__1].i == 0.) {
+
+/* H(i) = I */
+
+ i__1 = *k;
+ for (j = i__; j <= i__1; ++j) {
+ i__2 = j + i__ * t_dim1;
+ t[i__2].r = 0., t[i__2].i = 0.;
+/* L30: */
+ }
+ } else {
+
+/* general case */
+
+ if (i__ < *k) {
+ if (lsame_(storev, "C")) {
+ i__1 = *n - *k + i__ + i__ * v_dim1;
+ vii.r = v[i__1].r, vii.i = v[i__1].i;
+ i__1 = *n - *k + i__ + i__ * v_dim1;
+ v[i__1].r = 1., v[i__1].i = 0.;
+/* Skip any leading zeros. */
+ i__1 = i__ - 1;
+ for (lastv = 1; lastv <= i__1; ++lastv) {
+ i__2 = lastv + i__ * v_dim1;
+ if (v[i__2].r != 0. || v[i__2].i != 0.) {
+ break;
+ }
+ }
+ j = max(lastv,prevlastv);
+
+/* T(i+1:k,i) := */
+/* - tau(i) * V(j:n-k+i,i+1:k)' * V(j:n-k+i,i) */
+
+ i__1 = *n - *k + i__ - j + 1;
+ i__2 = *k - i__;
+ i__3 = i__;
+ z__1.r = -tau[i__3].r, z__1.i = -tau[i__3].i;
+ zgemv_("Conjugate transpose", &i__1, &i__2, &z__1, &v[
+ j + (i__ + 1) * v_dim1], ldv, &v[j + i__ *
+ v_dim1], &c__1, &c_b2, &t[i__ + 1 + i__ *
+ t_dim1], &c__1);
+ i__1 = *n - *k + i__ + i__ * v_dim1;
+ v[i__1].r = vii.r, v[i__1].i = vii.i;
+ } else {
+ i__1 = i__ + (*n - *k + i__) * v_dim1;
+ vii.r = v[i__1].r, vii.i = v[i__1].i;
+ i__1 = i__ + (*n - *k + i__) * v_dim1;
+ v[i__1].r = 1., v[i__1].i = 0.;
+/* Skip any leading zeros. */
+ i__1 = i__ - 1;
+ for (lastv = 1; lastv <= i__1; ++lastv) {
+ i__2 = i__ + lastv * v_dim1;
+ if (v[i__2].r != 0. || v[i__2].i != 0.) {
+ break;
+ }
+ }
+ j = max(lastv,prevlastv);
+
+/* T(i+1:k,i) := */
+/* - tau(i) * V(i+1:k,j:n-k+i) * V(i,j:n-k+i)' */
+
+ i__1 = *n - *k + i__ - 1 - j + 1;
+ zlacgv_(&i__1, &v[i__ + j * v_dim1], ldv);
+ i__1 = *k - i__;
+ i__2 = *n - *k + i__ - j + 1;
+ i__3 = i__;
+ z__1.r = -tau[i__3].r, z__1.i = -tau[i__3].i;
+ zgemv_("No transpose", &i__1, &i__2, &z__1, &v[i__ +
+ 1 + j * v_dim1], ldv, &v[i__ + j * v_dim1],
+ ldv, &c_b2, &t[i__ + 1 + i__ * t_dim1], &c__1);
+ i__1 = *n - *k + i__ - 1 - j + 1;
+ zlacgv_(&i__1, &v[i__ + j * v_dim1], ldv);
+ i__1 = i__ + (*n - *k + i__) * v_dim1;
+ v[i__1].r = vii.r, v[i__1].i = vii.i;
+ }
+
+/* T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) */
+
+ i__1 = *k - i__;
+ ztrmv_("Lower", "No transpose", "Non-unit", &i__1, &t[i__
+ + 1 + (i__ + 1) * t_dim1], ldt, &t[i__ + 1 + i__ *
+ t_dim1], &c__1)
+ ;
+ if (i__ > 1) {
+ prevlastv = min(prevlastv,lastv);
+ } else {
+ prevlastv = lastv;
+ }
+ }
+ i__1 = i__ + i__ * t_dim1;
+ i__2 = i__;
+ t[i__1].r = tau[i__2].r, t[i__1].i = tau[i__2].i;
+ }
+/* L40: */
+ }
+ }
+ return 0;
+
+/* End of ZLARFT */
+
+} /* zlarft_ */
diff --git a/contrib/libs/clapack/zlarfx.c b/contrib/libs/clapack/zlarfx.c
new file mode 100644
index 0000000000..1da9e3ecce
--- /dev/null
+++ b/contrib/libs/clapack/zlarfx.c
@@ -0,0 +1,2050 @@
+/* zlarfx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zlarfx_(char *side, integer *m, integer *n,
+ doublecomplex *v, doublecomplex *tau, doublecomplex *c__, integer *
+ ldc, doublecomplex *work)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7, i__8,
+ i__9, i__10, i__11;
+ doublecomplex z__1, z__2, z__3, z__4, z__5, z__6, z__7, z__8, z__9, z__10,
+ z__11, z__12, z__13, z__14, z__15, z__16, z__17, z__18, z__19;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer j;
+ doublecomplex t1, t2, t3, t4, t5, t6, t7, t8, t9, v1, v2, v3, v4, v5, v6,
+ v7, v8, v9, t10, v10, sum;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLARFX applies a complex elementary reflector H to a complex m by n */
+/* matrix C, from either the left or the right. H is represented in the */
+/* form */
+
+/* H = I - tau * v * v' */
+
+/* where tau is a complex scalar and v is a complex vector. */
+
+/* If tau = 0, then H is taken to be the unit matrix */
+
+/* This version uses inline code if H has order < 11. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': form H * C */
+/* = 'R': form C * H */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. */
+
+/* V (input) COMPLEX*16 array, dimension (M) if SIDE = 'L' */
+/* or (N) if SIDE = 'R' */
+/* The vector v in the representation of H. */
+
+/* TAU (input) COMPLEX*16 */
+/* The value tau in the representation of H. */
+
+/* C (input/output) COMPLEX*16 array, dimension (LDC,N) */
+/* On entry, the m by n matrix C. */
+/* On exit, C is overwritten by the matrix H * C if SIDE = 'L', */
+/* or C * H if SIDE = 'R'. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDA >= max(1,M). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (N) if SIDE = 'L' */
+/* or (M) if SIDE = 'R' */
+/* WORK is not referenced if H has order < 11. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --v;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ if (tau->r == 0. && tau->i == 0.) {
+ return 0;
+ }
+ if (lsame_(side, "L")) {
+
+/* Form H * C, where H has order m. */
+
+ switch (*m) {
+ case 1: goto L10;
+ case 2: goto L30;
+ case 3: goto L50;
+ case 4: goto L70;
+ case 5: goto L90;
+ case 6: goto L110;
+ case 7: goto L130;
+ case 8: goto L150;
+ case 9: goto L170;
+ case 10: goto L190;
+ }
+
+/* Code for general M */
+
+ zlarf_(side, m, n, &v[1], &c__1, tau, &c__[c_offset], ldc, &work[1]);
+ goto L410;
+L10:
+
+/* Special code for 1 x 1 Householder */
+
+ z__3.r = tau->r * v[1].r - tau->i * v[1].i, z__3.i = tau->r * v[1].i
+ + tau->i * v[1].r;
+ d_cnjg(&z__4, &v[1]);
+ z__2.r = z__3.r * z__4.r - z__3.i * z__4.i, z__2.i = z__3.r * z__4.i
+ + z__3.i * z__4.r;
+ z__1.r = 1. - z__2.r, z__1.i = 0. - z__2.i;
+ t1.r = z__1.r, t1.i = z__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ z__1.r = t1.r * c__[i__3].r - t1.i * c__[i__3].i, z__1.i = t1.r *
+ c__[i__3].i + t1.i * c__[i__3].r;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L20: */
+ }
+ goto L410;
+L30:
+
+/* Special code for 2 x 2 Householder */
+
+ d_cnjg(&z__1, &v[1]);
+ v1.r = z__1.r, v1.i = z__1.i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ d_cnjg(&z__1, &v[2]);
+ v2.r = z__1.r, v2.i = z__1.i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ z__2.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__2.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ z__3.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__3.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L40: */
+ }
+ goto L410;
+L50:
+
+/* Special code for 3 x 3 Householder */
+
+ d_cnjg(&z__1, &v[1]);
+ v1.r = z__1.r, v1.i = z__1.i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ d_cnjg(&z__1, &v[2]);
+ v2.r = z__1.r, v2.i = z__1.i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ d_cnjg(&z__1, &v[3]);
+ v3.r = z__1.r, v3.i = z__1.i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ z__3.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__3.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ z__4.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__4.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__2.r = z__3.r + z__4.r, z__2.i = z__3.i + z__4.i;
+ i__4 = j * c_dim1 + 3;
+ z__5.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__5.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L60: */
+ }
+ goto L410;
+L70:
+
+/* Special code for 4 x 4 Householder */
+
+ d_cnjg(&z__1, &v[1]);
+ v1.r = z__1.r, v1.i = z__1.i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ d_cnjg(&z__1, &v[2]);
+ v2.r = z__1.r, v2.i = z__1.i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ d_cnjg(&z__1, &v[3]);
+ v3.r = z__1.r, v3.i = z__1.i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ d_cnjg(&z__1, &v[4]);
+ v4.r = z__1.r, v4.i = z__1.i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ z__4.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__4.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ z__5.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__5.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__3.r = z__4.r + z__5.r, z__3.i = z__4.i + z__5.i;
+ i__4 = j * c_dim1 + 3;
+ z__6.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__6.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ z__2.r = z__3.r + z__6.r, z__2.i = z__3.i + z__6.i;
+ i__5 = j * c_dim1 + 4;
+ z__7.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__7.i = v4.r *
+ c__[i__5].i + v4.i * c__[i__5].r;
+ z__1.r = z__2.r + z__7.r, z__1.i = z__2.i + z__7.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L80: */
+ }
+ goto L410;
+L90:
+
+/* Special code for 5 x 5 Householder */
+
+ d_cnjg(&z__1, &v[1]);
+ v1.r = z__1.r, v1.i = z__1.i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ d_cnjg(&z__1, &v[2]);
+ v2.r = z__1.r, v2.i = z__1.i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ d_cnjg(&z__1, &v[3]);
+ v3.r = z__1.r, v3.i = z__1.i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ d_cnjg(&z__1, &v[4]);
+ v4.r = z__1.r, v4.i = z__1.i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ d_cnjg(&z__1, &v[5]);
+ v5.r = z__1.r, v5.i = z__1.i;
+ d_cnjg(&z__2, &v5);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t5.r = z__1.r, t5.i = z__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ z__5.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__5.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ z__6.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__6.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__4.r = z__5.r + z__6.r, z__4.i = z__5.i + z__6.i;
+ i__4 = j * c_dim1 + 3;
+ z__7.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__7.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ z__3.r = z__4.r + z__7.r, z__3.i = z__4.i + z__7.i;
+ i__5 = j * c_dim1 + 4;
+ z__8.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__8.i = v4.r *
+ c__[i__5].i + v4.i * c__[i__5].r;
+ z__2.r = z__3.r + z__8.r, z__2.i = z__3.i + z__8.i;
+ i__6 = j * c_dim1 + 5;
+ z__9.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__9.i = v5.r *
+ c__[i__6].i + v5.i * c__[i__6].r;
+ z__1.r = z__2.r + z__9.r, z__1.i = z__2.i + z__9.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 5;
+ i__3 = j * c_dim1 + 5;
+ z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L100: */
+ }
+ goto L410;
+L110:
+
+/* Special code for 6 x 6 Householder */
+
+ d_cnjg(&z__1, &v[1]);
+ v1.r = z__1.r, v1.i = z__1.i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ d_cnjg(&z__1, &v[2]);
+ v2.r = z__1.r, v2.i = z__1.i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ d_cnjg(&z__1, &v[3]);
+ v3.r = z__1.r, v3.i = z__1.i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ d_cnjg(&z__1, &v[4]);
+ v4.r = z__1.r, v4.i = z__1.i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ d_cnjg(&z__1, &v[5]);
+ v5.r = z__1.r, v5.i = z__1.i;
+ d_cnjg(&z__2, &v5);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t5.r = z__1.r, t5.i = z__1.i;
+ d_cnjg(&z__1, &v[6]);
+ v6.r = z__1.r, v6.i = z__1.i;
+ d_cnjg(&z__2, &v6);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t6.r = z__1.r, t6.i = z__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ z__6.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__6.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ z__7.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__7.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__5.r = z__6.r + z__7.r, z__5.i = z__6.i + z__7.i;
+ i__4 = j * c_dim1 + 3;
+ z__8.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__8.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ z__4.r = z__5.r + z__8.r, z__4.i = z__5.i + z__8.i;
+ i__5 = j * c_dim1 + 4;
+ z__9.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__9.i = v4.r *
+ c__[i__5].i + v4.i * c__[i__5].r;
+ z__3.r = z__4.r + z__9.r, z__3.i = z__4.i + z__9.i;
+ i__6 = j * c_dim1 + 5;
+ z__10.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__10.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ z__2.r = z__3.r + z__10.r, z__2.i = z__3.i + z__10.i;
+ i__7 = j * c_dim1 + 6;
+ z__11.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__11.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ z__1.r = z__2.r + z__11.r, z__1.i = z__2.i + z__11.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 5;
+ i__3 = j * c_dim1 + 5;
+ z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 6;
+ i__3 = j * c_dim1 + 6;
+ z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L120: */
+ }
+ goto L410;
+L130:
+
+/* Special code for 7 x 7 Householder */
+
+ d_cnjg(&z__1, &v[1]);
+ v1.r = z__1.r, v1.i = z__1.i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ d_cnjg(&z__1, &v[2]);
+ v2.r = z__1.r, v2.i = z__1.i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ d_cnjg(&z__1, &v[3]);
+ v3.r = z__1.r, v3.i = z__1.i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ d_cnjg(&z__1, &v[4]);
+ v4.r = z__1.r, v4.i = z__1.i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ d_cnjg(&z__1, &v[5]);
+ v5.r = z__1.r, v5.i = z__1.i;
+ d_cnjg(&z__2, &v5);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t5.r = z__1.r, t5.i = z__1.i;
+ d_cnjg(&z__1, &v[6]);
+ v6.r = z__1.r, v6.i = z__1.i;
+ d_cnjg(&z__2, &v6);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t6.r = z__1.r, t6.i = z__1.i;
+ d_cnjg(&z__1, &v[7]);
+ v7.r = z__1.r, v7.i = z__1.i;
+ d_cnjg(&z__2, &v7);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t7.r = z__1.r, t7.i = z__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ z__7.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__7.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ z__8.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__8.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__6.r = z__7.r + z__8.r, z__6.i = z__7.i + z__8.i;
+ i__4 = j * c_dim1 + 3;
+ z__9.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__9.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ z__5.r = z__6.r + z__9.r, z__5.i = z__6.i + z__9.i;
+ i__5 = j * c_dim1 + 4;
+ z__10.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__10.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ z__4.r = z__5.r + z__10.r, z__4.i = z__5.i + z__10.i;
+ i__6 = j * c_dim1 + 5;
+ z__11.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__11.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ z__3.r = z__4.r + z__11.r, z__3.i = z__4.i + z__11.i;
+ i__7 = j * c_dim1 + 6;
+ z__12.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__12.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ z__2.r = z__3.r + z__12.r, z__2.i = z__3.i + z__12.i;
+ i__8 = j * c_dim1 + 7;
+ z__13.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, z__13.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ z__1.r = z__2.r + z__13.r, z__1.i = z__2.i + z__13.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 5;
+ i__3 = j * c_dim1 + 5;
+ z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 6;
+ i__3 = j * c_dim1 + 6;
+ z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 7;
+ i__3 = j * c_dim1 + 7;
+ z__2.r = sum.r * t7.r - sum.i * t7.i, z__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L140: */
+ }
+ goto L410;
+L150:
+
+/* Special code for 8 x 8 Householder */
+
+ d_cnjg(&z__1, &v[1]);
+ v1.r = z__1.r, v1.i = z__1.i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ d_cnjg(&z__1, &v[2]);
+ v2.r = z__1.r, v2.i = z__1.i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ d_cnjg(&z__1, &v[3]);
+ v3.r = z__1.r, v3.i = z__1.i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ d_cnjg(&z__1, &v[4]);
+ v4.r = z__1.r, v4.i = z__1.i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ d_cnjg(&z__1, &v[5]);
+ v5.r = z__1.r, v5.i = z__1.i;
+ d_cnjg(&z__2, &v5);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t5.r = z__1.r, t5.i = z__1.i;
+ d_cnjg(&z__1, &v[6]);
+ v6.r = z__1.r, v6.i = z__1.i;
+ d_cnjg(&z__2, &v6);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t6.r = z__1.r, t6.i = z__1.i;
+ d_cnjg(&z__1, &v[7]);
+ v7.r = z__1.r, v7.i = z__1.i;
+ d_cnjg(&z__2, &v7);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t7.r = z__1.r, t7.i = z__1.i;
+ d_cnjg(&z__1, &v[8]);
+ v8.r = z__1.r, v8.i = z__1.i;
+ d_cnjg(&z__2, &v8);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t8.r = z__1.r, t8.i = z__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ z__8.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__8.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ z__9.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__9.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__7.r = z__8.r + z__9.r, z__7.i = z__8.i + z__9.i;
+ i__4 = j * c_dim1 + 3;
+ z__10.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__10.i = v3.r
+ * c__[i__4].i + v3.i * c__[i__4].r;
+ z__6.r = z__7.r + z__10.r, z__6.i = z__7.i + z__10.i;
+ i__5 = j * c_dim1 + 4;
+ z__11.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__11.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ z__5.r = z__6.r + z__11.r, z__5.i = z__6.i + z__11.i;
+ i__6 = j * c_dim1 + 5;
+ z__12.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__12.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ z__4.r = z__5.r + z__12.r, z__4.i = z__5.i + z__12.i;
+ i__7 = j * c_dim1 + 6;
+ z__13.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__13.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ z__3.r = z__4.r + z__13.r, z__3.i = z__4.i + z__13.i;
+ i__8 = j * c_dim1 + 7;
+ z__14.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, z__14.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ z__2.r = z__3.r + z__14.r, z__2.i = z__3.i + z__14.i;
+ i__9 = j * c_dim1 + 8;
+ z__15.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, z__15.i = v8.r
+ * c__[i__9].i + v8.i * c__[i__9].r;
+ z__1.r = z__2.r + z__15.r, z__1.i = z__2.i + z__15.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 5;
+ i__3 = j * c_dim1 + 5;
+ z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 6;
+ i__3 = j * c_dim1 + 6;
+ z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 7;
+ i__3 = j * c_dim1 + 7;
+ z__2.r = sum.r * t7.r - sum.i * t7.i, z__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 8;
+ i__3 = j * c_dim1 + 8;
+ z__2.r = sum.r * t8.r - sum.i * t8.i, z__2.i = sum.r * t8.i +
+ sum.i * t8.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L160: */
+ }
+ goto L410;
+L170:
+
+/* Special code for 9 x 9 Householder */
+
+ d_cnjg(&z__1, &v[1]);
+ v1.r = z__1.r, v1.i = z__1.i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ d_cnjg(&z__1, &v[2]);
+ v2.r = z__1.r, v2.i = z__1.i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ d_cnjg(&z__1, &v[3]);
+ v3.r = z__1.r, v3.i = z__1.i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ d_cnjg(&z__1, &v[4]);
+ v4.r = z__1.r, v4.i = z__1.i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ d_cnjg(&z__1, &v[5]);
+ v5.r = z__1.r, v5.i = z__1.i;
+ d_cnjg(&z__2, &v5);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t5.r = z__1.r, t5.i = z__1.i;
+ d_cnjg(&z__1, &v[6]);
+ v6.r = z__1.r, v6.i = z__1.i;
+ d_cnjg(&z__2, &v6);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t6.r = z__1.r, t6.i = z__1.i;
+ d_cnjg(&z__1, &v[7]);
+ v7.r = z__1.r, v7.i = z__1.i;
+ d_cnjg(&z__2, &v7);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t7.r = z__1.r, t7.i = z__1.i;
+ d_cnjg(&z__1, &v[8]);
+ v8.r = z__1.r, v8.i = z__1.i;
+ d_cnjg(&z__2, &v8);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t8.r = z__1.r, t8.i = z__1.i;
+ d_cnjg(&z__1, &v[9]);
+ v9.r = z__1.r, v9.i = z__1.i;
+ d_cnjg(&z__2, &v9);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t9.r = z__1.r, t9.i = z__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ z__9.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__9.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ z__10.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__10.i = v2.r
+ * c__[i__3].i + v2.i * c__[i__3].r;
+ z__8.r = z__9.r + z__10.r, z__8.i = z__9.i + z__10.i;
+ i__4 = j * c_dim1 + 3;
+ z__11.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__11.i = v3.r
+ * c__[i__4].i + v3.i * c__[i__4].r;
+ z__7.r = z__8.r + z__11.r, z__7.i = z__8.i + z__11.i;
+ i__5 = j * c_dim1 + 4;
+ z__12.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__12.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ z__6.r = z__7.r + z__12.r, z__6.i = z__7.i + z__12.i;
+ i__6 = j * c_dim1 + 5;
+ z__13.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__13.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ z__5.r = z__6.r + z__13.r, z__5.i = z__6.i + z__13.i;
+ i__7 = j * c_dim1 + 6;
+ z__14.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__14.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ z__4.r = z__5.r + z__14.r, z__4.i = z__5.i + z__14.i;
+ i__8 = j * c_dim1 + 7;
+ z__15.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, z__15.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ z__3.r = z__4.r + z__15.r, z__3.i = z__4.i + z__15.i;
+ i__9 = j * c_dim1 + 8;
+ z__16.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, z__16.i = v8.r
+ * c__[i__9].i + v8.i * c__[i__9].r;
+ z__2.r = z__3.r + z__16.r, z__2.i = z__3.i + z__16.i;
+ i__10 = j * c_dim1 + 9;
+ z__17.r = v9.r * c__[i__10].r - v9.i * c__[i__10].i, z__17.i =
+ v9.r * c__[i__10].i + v9.i * c__[i__10].r;
+ z__1.r = z__2.r + z__17.r, z__1.i = z__2.i + z__17.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 5;
+ i__3 = j * c_dim1 + 5;
+ z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 6;
+ i__3 = j * c_dim1 + 6;
+ z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 7;
+ i__3 = j * c_dim1 + 7;
+ z__2.r = sum.r * t7.r - sum.i * t7.i, z__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 8;
+ i__3 = j * c_dim1 + 8;
+ z__2.r = sum.r * t8.r - sum.i * t8.i, z__2.i = sum.r * t8.i +
+ sum.i * t8.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 9;
+ i__3 = j * c_dim1 + 9;
+ z__2.r = sum.r * t9.r - sum.i * t9.i, z__2.i = sum.r * t9.i +
+ sum.i * t9.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L180: */
+ }
+ goto L410;
+L190:
+
+/* Special code for 10 x 10 Householder */
+
+ d_cnjg(&z__1, &v[1]);
+ v1.r = z__1.r, v1.i = z__1.i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ d_cnjg(&z__1, &v[2]);
+ v2.r = z__1.r, v2.i = z__1.i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ d_cnjg(&z__1, &v[3]);
+ v3.r = z__1.r, v3.i = z__1.i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ d_cnjg(&z__1, &v[4]);
+ v4.r = z__1.r, v4.i = z__1.i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ d_cnjg(&z__1, &v[5]);
+ v5.r = z__1.r, v5.i = z__1.i;
+ d_cnjg(&z__2, &v5);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t5.r = z__1.r, t5.i = z__1.i;
+ d_cnjg(&z__1, &v[6]);
+ v6.r = z__1.r, v6.i = z__1.i;
+ d_cnjg(&z__2, &v6);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t6.r = z__1.r, t6.i = z__1.i;
+ d_cnjg(&z__1, &v[7]);
+ v7.r = z__1.r, v7.i = z__1.i;
+ d_cnjg(&z__2, &v7);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t7.r = z__1.r, t7.i = z__1.i;
+ d_cnjg(&z__1, &v[8]);
+ v8.r = z__1.r, v8.i = z__1.i;
+ d_cnjg(&z__2, &v8);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t8.r = z__1.r, t8.i = z__1.i;
+ d_cnjg(&z__1, &v[9]);
+ v9.r = z__1.r, v9.i = z__1.i;
+ d_cnjg(&z__2, &v9);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t9.r = z__1.r, t9.i = z__1.i;
+ d_cnjg(&z__1, &v[10]);
+ v10.r = z__1.r, v10.i = z__1.i;
+ d_cnjg(&z__2, &v10);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t10.r = z__1.r, t10.i = z__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ z__10.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__10.i = v1.r
+ * c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ z__11.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__11.i = v2.r
+ * c__[i__3].i + v2.i * c__[i__3].r;
+ z__9.r = z__10.r + z__11.r, z__9.i = z__10.i + z__11.i;
+ i__4 = j * c_dim1 + 3;
+ z__12.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__12.i = v3.r
+ * c__[i__4].i + v3.i * c__[i__4].r;
+ z__8.r = z__9.r + z__12.r, z__8.i = z__9.i + z__12.i;
+ i__5 = j * c_dim1 + 4;
+ z__13.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__13.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ z__7.r = z__8.r + z__13.r, z__7.i = z__8.i + z__13.i;
+ i__6 = j * c_dim1 + 5;
+ z__14.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__14.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ z__6.r = z__7.r + z__14.r, z__6.i = z__7.i + z__14.i;
+ i__7 = j * c_dim1 + 6;
+ z__15.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__15.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ z__5.r = z__6.r + z__15.r, z__5.i = z__6.i + z__15.i;
+ i__8 = j * c_dim1 + 7;
+ z__16.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, z__16.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ z__4.r = z__5.r + z__16.r, z__4.i = z__5.i + z__16.i;
+ i__9 = j * c_dim1 + 8;
+ z__17.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, z__17.i = v8.r
+ * c__[i__9].i + v8.i * c__[i__9].r;
+ z__3.r = z__4.r + z__17.r, z__3.i = z__4.i + z__17.i;
+ i__10 = j * c_dim1 + 9;
+ z__18.r = v9.r * c__[i__10].r - v9.i * c__[i__10].i, z__18.i =
+ v9.r * c__[i__10].i + v9.i * c__[i__10].r;
+ z__2.r = z__3.r + z__18.r, z__2.i = z__3.i + z__18.i;
+ i__11 = j * c_dim1 + 10;
+ z__19.r = v10.r * c__[i__11].r - v10.i * c__[i__11].i, z__19.i =
+ v10.r * c__[i__11].i + v10.i * c__[i__11].r;
+ z__1.r = z__2.r + z__19.r, z__1.i = z__2.i + z__19.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 5;
+ i__3 = j * c_dim1 + 5;
+ z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 6;
+ i__3 = j * c_dim1 + 6;
+ z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 7;
+ i__3 = j * c_dim1 + 7;
+ z__2.r = sum.r * t7.r - sum.i * t7.i, z__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 8;
+ i__3 = j * c_dim1 + 8;
+ z__2.r = sum.r * t8.r - sum.i * t8.i, z__2.i = sum.r * t8.i +
+ sum.i * t8.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 9;
+ i__3 = j * c_dim1 + 9;
+ z__2.r = sum.r * t9.r - sum.i * t9.i, z__2.i = sum.r * t9.i +
+ sum.i * t9.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j * c_dim1 + 10;
+ i__3 = j * c_dim1 + 10;
+ z__2.r = sum.r * t10.r - sum.i * t10.i, z__2.i = sum.r * t10.i +
+ sum.i * t10.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L200: */
+ }
+ goto L410;
+ } else {
+
+/* Form C * H, where H has order n. */
+
+ switch (*n) {
+ case 1: goto L210;
+ case 2: goto L230;
+ case 3: goto L250;
+ case 4: goto L270;
+ case 5: goto L290;
+ case 6: goto L310;
+ case 7: goto L330;
+ case 8: goto L350;
+ case 9: goto L370;
+ case 10: goto L390;
+ }
+
+/* Code for general N */
+
+ zlarf_(side, m, n, &v[1], &c__1, tau, &c__[c_offset], ldc, &work[1]);
+ goto L410;
+L210:
+
+/* Special code for 1 x 1 Householder */
+
+ z__3.r = tau->r * v[1].r - tau->i * v[1].i, z__3.i = tau->r * v[1].i
+ + tau->i * v[1].r;
+ d_cnjg(&z__4, &v[1]);
+ z__2.r = z__3.r * z__4.r - z__3.i * z__4.i, z__2.i = z__3.r * z__4.i
+ + z__3.i * z__4.r;
+ z__1.r = 1. - z__2.r, z__1.i = 0. - z__2.i;
+ t1.r = z__1.r, t1.i = z__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ z__1.r = t1.r * c__[i__3].r - t1.i * c__[i__3].i, z__1.i = t1.r *
+ c__[i__3].i + t1.i * c__[i__3].r;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L220: */
+ }
+ goto L410;
+L230:
+
+/* Special code for 2 x 2 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ z__2.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__2.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + (c_dim1 << 1);
+ z__3.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__3.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + (c_dim1 << 1);
+ i__3 = j + (c_dim1 << 1);
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L240: */
+ }
+ goto L410;
+L250:
+
+/* Special code for 3 x 3 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ z__3.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__3.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + (c_dim1 << 1);
+ z__4.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__4.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__2.r = z__3.r + z__4.r, z__2.i = z__3.i + z__4.i;
+ i__4 = j + c_dim1 * 3;
+ z__5.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__5.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + (c_dim1 << 1);
+ i__3 = j + (c_dim1 << 1);
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L260: */
+ }
+ goto L410;
+L270:
+
+/* Special code for 4 x 4 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ z__4.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__4.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + (c_dim1 << 1);
+ z__5.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__5.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__3.r = z__4.r + z__5.r, z__3.i = z__4.i + z__5.i;
+ i__4 = j + c_dim1 * 3;
+ z__6.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__6.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ z__2.r = z__3.r + z__6.r, z__2.i = z__3.i + z__6.i;
+ i__5 = j + (c_dim1 << 2);
+ z__7.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__7.i = v4.r *
+ c__[i__5].i + v4.i * c__[i__5].r;
+ z__1.r = z__2.r + z__7.r, z__1.i = z__2.i + z__7.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + (c_dim1 << 1);
+ i__3 = j + (c_dim1 << 1);
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + (c_dim1 << 2);
+ i__3 = j + (c_dim1 << 2);
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L280: */
+ }
+ goto L410;
+L290:
+
+/* Special code for 5 x 5 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ v5.r = v[5].r, v5.i = v[5].i;
+ d_cnjg(&z__2, &v5);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t5.r = z__1.r, t5.i = z__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ z__5.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__5.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + (c_dim1 << 1);
+ z__6.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__6.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__4.r = z__5.r + z__6.r, z__4.i = z__5.i + z__6.i;
+ i__4 = j + c_dim1 * 3;
+ z__7.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__7.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ z__3.r = z__4.r + z__7.r, z__3.i = z__4.i + z__7.i;
+ i__5 = j + (c_dim1 << 2);
+ z__8.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__8.i = v4.r *
+ c__[i__5].i + v4.i * c__[i__5].r;
+ z__2.r = z__3.r + z__8.r, z__2.i = z__3.i + z__8.i;
+ i__6 = j + c_dim1 * 5;
+ z__9.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__9.i = v5.r *
+ c__[i__6].i + v5.i * c__[i__6].r;
+ z__1.r = z__2.r + z__9.r, z__1.i = z__2.i + z__9.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + (c_dim1 << 1);
+ i__3 = j + (c_dim1 << 1);
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + (c_dim1 << 2);
+ i__3 = j + (c_dim1 << 2);
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 5;
+ i__3 = j + c_dim1 * 5;
+ z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L300: */
+ }
+ goto L410;
+L310:
+
+/* Special code for 6 x 6 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ v5.r = v[5].r, v5.i = v[5].i;
+ d_cnjg(&z__2, &v5);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t5.r = z__1.r, t5.i = z__1.i;
+ v6.r = v[6].r, v6.i = v[6].i;
+ d_cnjg(&z__2, &v6);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t6.r = z__1.r, t6.i = z__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ z__6.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__6.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + (c_dim1 << 1);
+ z__7.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__7.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__5.r = z__6.r + z__7.r, z__5.i = z__6.i + z__7.i;
+ i__4 = j + c_dim1 * 3;
+ z__8.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__8.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ z__4.r = z__5.r + z__8.r, z__4.i = z__5.i + z__8.i;
+ i__5 = j + (c_dim1 << 2);
+ z__9.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__9.i = v4.r *
+ c__[i__5].i + v4.i * c__[i__5].r;
+ z__3.r = z__4.r + z__9.r, z__3.i = z__4.i + z__9.i;
+ i__6 = j + c_dim1 * 5;
+ z__10.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__10.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ z__2.r = z__3.r + z__10.r, z__2.i = z__3.i + z__10.i;
+ i__7 = j + c_dim1 * 6;
+ z__11.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__11.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ z__1.r = z__2.r + z__11.r, z__1.i = z__2.i + z__11.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + (c_dim1 << 1);
+ i__3 = j + (c_dim1 << 1);
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + (c_dim1 << 2);
+ i__3 = j + (c_dim1 << 2);
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 5;
+ i__3 = j + c_dim1 * 5;
+ z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 6;
+ i__3 = j + c_dim1 * 6;
+ z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L320: */
+ }
+ goto L410;
+L330:
+
+/* Special code for 7 x 7 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ v5.r = v[5].r, v5.i = v[5].i;
+ d_cnjg(&z__2, &v5);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t5.r = z__1.r, t5.i = z__1.i;
+ v6.r = v[6].r, v6.i = v[6].i;
+ d_cnjg(&z__2, &v6);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t6.r = z__1.r, t6.i = z__1.i;
+ v7.r = v[7].r, v7.i = v[7].i;
+ d_cnjg(&z__2, &v7);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t7.r = z__1.r, t7.i = z__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ z__7.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__7.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + (c_dim1 << 1);
+ z__8.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__8.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__6.r = z__7.r + z__8.r, z__6.i = z__7.i + z__8.i;
+ i__4 = j + c_dim1 * 3;
+ z__9.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__9.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ z__5.r = z__6.r + z__9.r, z__5.i = z__6.i + z__9.i;
+ i__5 = j + (c_dim1 << 2);
+ z__10.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__10.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ z__4.r = z__5.r + z__10.r, z__4.i = z__5.i + z__10.i;
+ i__6 = j + c_dim1 * 5;
+ z__11.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__11.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ z__3.r = z__4.r + z__11.r, z__3.i = z__4.i + z__11.i;
+ i__7 = j + c_dim1 * 6;
+ z__12.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__12.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ z__2.r = z__3.r + z__12.r, z__2.i = z__3.i + z__12.i;
+ i__8 = j + c_dim1 * 7;
+ z__13.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, z__13.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ z__1.r = z__2.r + z__13.r, z__1.i = z__2.i + z__13.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + (c_dim1 << 1);
+ i__3 = j + (c_dim1 << 1);
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + (c_dim1 << 2);
+ i__3 = j + (c_dim1 << 2);
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 5;
+ i__3 = j + c_dim1 * 5;
+ z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 6;
+ i__3 = j + c_dim1 * 6;
+ z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 7;
+ i__3 = j + c_dim1 * 7;
+ z__2.r = sum.r * t7.r - sum.i * t7.i, z__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L340: */
+ }
+ goto L410;
+L350:
+
+/* Special code for 8 x 8 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ v5.r = v[5].r, v5.i = v[5].i;
+ d_cnjg(&z__2, &v5);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t5.r = z__1.r, t5.i = z__1.i;
+ v6.r = v[6].r, v6.i = v[6].i;
+ d_cnjg(&z__2, &v6);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t6.r = z__1.r, t6.i = z__1.i;
+ v7.r = v[7].r, v7.i = v[7].i;
+ d_cnjg(&z__2, &v7);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t7.r = z__1.r, t7.i = z__1.i;
+ v8.r = v[8].r, v8.i = v[8].i;
+ d_cnjg(&z__2, &v8);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t8.r = z__1.r, t8.i = z__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ z__8.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__8.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + (c_dim1 << 1);
+ z__9.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__9.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ z__7.r = z__8.r + z__9.r, z__7.i = z__8.i + z__9.i;
+ i__4 = j + c_dim1 * 3;
+ z__10.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__10.i = v3.r
+ * c__[i__4].i + v3.i * c__[i__4].r;
+ z__6.r = z__7.r + z__10.r, z__6.i = z__7.i + z__10.i;
+ i__5 = j + (c_dim1 << 2);
+ z__11.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__11.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ z__5.r = z__6.r + z__11.r, z__5.i = z__6.i + z__11.i;
+ i__6 = j + c_dim1 * 5;
+ z__12.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__12.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ z__4.r = z__5.r + z__12.r, z__4.i = z__5.i + z__12.i;
+ i__7 = j + c_dim1 * 6;
+ z__13.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__13.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ z__3.r = z__4.r + z__13.r, z__3.i = z__4.i + z__13.i;
+ i__8 = j + c_dim1 * 7;
+ z__14.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, z__14.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ z__2.r = z__3.r + z__14.r, z__2.i = z__3.i + z__14.i;
+ i__9 = j + (c_dim1 << 3);
+ z__15.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, z__15.i = v8.r
+ * c__[i__9].i + v8.i * c__[i__9].r;
+ z__1.r = z__2.r + z__15.r, z__1.i = z__2.i + z__15.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + (c_dim1 << 1);
+ i__3 = j + (c_dim1 << 1);
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + (c_dim1 << 2);
+ i__3 = j + (c_dim1 << 2);
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 5;
+ i__3 = j + c_dim1 * 5;
+ z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 6;
+ i__3 = j + c_dim1 * 6;
+ z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 7;
+ i__3 = j + c_dim1 * 7;
+ z__2.r = sum.r * t7.r - sum.i * t7.i, z__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + (c_dim1 << 3);
+ i__3 = j + (c_dim1 << 3);
+ z__2.r = sum.r * t8.r - sum.i * t8.i, z__2.i = sum.r * t8.i +
+ sum.i * t8.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L360: */
+ }
+ goto L410;
+L370:
+
+/* Special code for 9 x 9 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ v5.r = v[5].r, v5.i = v[5].i;
+ d_cnjg(&z__2, &v5);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t5.r = z__1.r, t5.i = z__1.i;
+ v6.r = v[6].r, v6.i = v[6].i;
+ d_cnjg(&z__2, &v6);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t6.r = z__1.r, t6.i = z__1.i;
+ v7.r = v[7].r, v7.i = v[7].i;
+ d_cnjg(&z__2, &v7);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t7.r = z__1.r, t7.i = z__1.i;
+ v8.r = v[8].r, v8.i = v[8].i;
+ d_cnjg(&z__2, &v8);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t8.r = z__1.r, t8.i = z__1.i;
+ v9.r = v[9].r, v9.i = v[9].i;
+ d_cnjg(&z__2, &v9);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t9.r = z__1.r, t9.i = z__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ z__9.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__9.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + (c_dim1 << 1);
+ z__10.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__10.i = v2.r
+ * c__[i__3].i + v2.i * c__[i__3].r;
+ z__8.r = z__9.r + z__10.r, z__8.i = z__9.i + z__10.i;
+ i__4 = j + c_dim1 * 3;
+ z__11.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__11.i = v3.r
+ * c__[i__4].i + v3.i * c__[i__4].r;
+ z__7.r = z__8.r + z__11.r, z__7.i = z__8.i + z__11.i;
+ i__5 = j + (c_dim1 << 2);
+ z__12.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__12.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ z__6.r = z__7.r + z__12.r, z__6.i = z__7.i + z__12.i;
+ i__6 = j + c_dim1 * 5;
+ z__13.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__13.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ z__5.r = z__6.r + z__13.r, z__5.i = z__6.i + z__13.i;
+ i__7 = j + c_dim1 * 6;
+ z__14.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__14.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ z__4.r = z__5.r + z__14.r, z__4.i = z__5.i + z__14.i;
+ i__8 = j + c_dim1 * 7;
+ z__15.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, z__15.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ z__3.r = z__4.r + z__15.r, z__3.i = z__4.i + z__15.i;
+ i__9 = j + (c_dim1 << 3);
+ z__16.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, z__16.i = v8.r
+ * c__[i__9].i + v8.i * c__[i__9].r;
+ z__2.r = z__3.r + z__16.r, z__2.i = z__3.i + z__16.i;
+ i__10 = j + c_dim1 * 9;
+ z__17.r = v9.r * c__[i__10].r - v9.i * c__[i__10].i, z__17.i =
+ v9.r * c__[i__10].i + v9.i * c__[i__10].r;
+ z__1.r = z__2.r + z__17.r, z__1.i = z__2.i + z__17.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + (c_dim1 << 1);
+ i__3 = j + (c_dim1 << 1);
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + (c_dim1 << 2);
+ i__3 = j + (c_dim1 << 2);
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 5;
+ i__3 = j + c_dim1 * 5;
+ z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 6;
+ i__3 = j + c_dim1 * 6;
+ z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 7;
+ i__3 = j + c_dim1 * 7;
+ z__2.r = sum.r * t7.r - sum.i * t7.i, z__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + (c_dim1 << 3);
+ i__3 = j + (c_dim1 << 3);
+ z__2.r = sum.r * t8.r - sum.i * t8.i, z__2.i = sum.r * t8.i +
+ sum.i * t8.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 9;
+ i__3 = j + c_dim1 * 9;
+ z__2.r = sum.r * t9.r - sum.i * t9.i, z__2.i = sum.r * t9.i +
+ sum.i * t9.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L380: */
+ }
+ goto L410;
+L390:
+
+/* Special code for 10 x 10 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ d_cnjg(&z__2, &v1);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t1.r = z__1.r, t1.i = z__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ d_cnjg(&z__2, &v2);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t2.r = z__1.r, t2.i = z__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ d_cnjg(&z__2, &v3);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t3.r = z__1.r, t3.i = z__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ d_cnjg(&z__2, &v4);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t4.r = z__1.r, t4.i = z__1.i;
+ v5.r = v[5].r, v5.i = v[5].i;
+ d_cnjg(&z__2, &v5);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t5.r = z__1.r, t5.i = z__1.i;
+ v6.r = v[6].r, v6.i = v[6].i;
+ d_cnjg(&z__2, &v6);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t6.r = z__1.r, t6.i = z__1.i;
+ v7.r = v[7].r, v7.i = v[7].i;
+ d_cnjg(&z__2, &v7);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t7.r = z__1.r, t7.i = z__1.i;
+ v8.r = v[8].r, v8.i = v[8].i;
+ d_cnjg(&z__2, &v8);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t8.r = z__1.r, t8.i = z__1.i;
+ v9.r = v[9].r, v9.i = v[9].i;
+ d_cnjg(&z__2, &v9);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t9.r = z__1.r, t9.i = z__1.i;
+ v10.r = v[10].r, v10.i = v[10].i;
+ d_cnjg(&z__2, &v10);
+ z__1.r = tau->r * z__2.r - tau->i * z__2.i, z__1.i = tau->r * z__2.i
+ + tau->i * z__2.r;
+ t10.r = z__1.r, t10.i = z__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ z__10.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, z__10.i = v1.r
+ * c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + (c_dim1 << 1);
+ z__11.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, z__11.i = v2.r
+ * c__[i__3].i + v2.i * c__[i__3].r;
+ z__9.r = z__10.r + z__11.r, z__9.i = z__10.i + z__11.i;
+ i__4 = j + c_dim1 * 3;
+ z__12.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, z__12.i = v3.r
+ * c__[i__4].i + v3.i * c__[i__4].r;
+ z__8.r = z__9.r + z__12.r, z__8.i = z__9.i + z__12.i;
+ i__5 = j + (c_dim1 << 2);
+ z__13.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, z__13.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ z__7.r = z__8.r + z__13.r, z__7.i = z__8.i + z__13.i;
+ i__6 = j + c_dim1 * 5;
+ z__14.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, z__14.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ z__6.r = z__7.r + z__14.r, z__6.i = z__7.i + z__14.i;
+ i__7 = j + c_dim1 * 6;
+ z__15.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, z__15.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ z__5.r = z__6.r + z__15.r, z__5.i = z__6.i + z__15.i;
+ i__8 = j + c_dim1 * 7;
+ z__16.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, z__16.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ z__4.r = z__5.r + z__16.r, z__4.i = z__5.i + z__16.i;
+ i__9 = j + (c_dim1 << 3);
+ z__17.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, z__17.i = v8.r
+ * c__[i__9].i + v8.i * c__[i__9].r;
+ z__3.r = z__4.r + z__17.r, z__3.i = z__4.i + z__17.i;
+ i__10 = j + c_dim1 * 9;
+ z__18.r = v9.r * c__[i__10].r - v9.i * c__[i__10].i, z__18.i =
+ v9.r * c__[i__10].i + v9.i * c__[i__10].r;
+ z__2.r = z__3.r + z__18.r, z__2.i = z__3.i + z__18.i;
+ i__11 = j + c_dim1 * 10;
+ z__19.r = v10.r * c__[i__11].r - v10.i * c__[i__11].i, z__19.i =
+ v10.r * c__[i__11].i + v10.i * c__[i__11].r;
+ z__1.r = z__2.r + z__19.r, z__1.i = z__2.i + z__19.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ z__2.r = sum.r * t1.r - sum.i * t1.i, z__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + (c_dim1 << 1);
+ i__3 = j + (c_dim1 << 1);
+ z__2.r = sum.r * t2.r - sum.i * t2.i, z__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ z__2.r = sum.r * t3.r - sum.i * t3.i, z__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + (c_dim1 << 2);
+ i__3 = j + (c_dim1 << 2);
+ z__2.r = sum.r * t4.r - sum.i * t4.i, z__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 5;
+ i__3 = j + c_dim1 * 5;
+ z__2.r = sum.r * t5.r - sum.i * t5.i, z__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 6;
+ i__3 = j + c_dim1 * 6;
+ z__2.r = sum.r * t6.r - sum.i * t6.i, z__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 7;
+ i__3 = j + c_dim1 * 7;
+ z__2.r = sum.r * t7.r - sum.i * t7.i, z__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + (c_dim1 << 3);
+ i__3 = j + (c_dim1 << 3);
+ z__2.r = sum.r * t8.r - sum.i * t8.i, z__2.i = sum.r * t8.i +
+ sum.i * t8.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 9;
+ i__3 = j + c_dim1 * 9;
+ z__2.r = sum.r * t9.r - sum.i * t9.i, z__2.i = sum.r * t9.i +
+ sum.i * t9.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+ i__2 = j + c_dim1 * 10;
+ i__3 = j + c_dim1 * 10;
+ z__2.r = sum.r * t10.r - sum.i * t10.i, z__2.i = sum.r * t10.i +
+ sum.i * t10.r;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ c__[i__2].r = z__1.r, c__[i__2].i = z__1.i;
+/* L400: */
+ }
+ goto L410;
+ }
+L410:
+ return 0;
+
+/* End of ZLARFX */
+
+} /* zlarfx_ */
diff --git a/contrib/libs/clapack/zlargv.c b/contrib/libs/clapack/zlargv.c
new file mode 100644
index 0000000000..74a4b6f7d8
--- /dev/null
+++ b/contrib/libs/clapack/zlargv.c
@@ -0,0 +1,336 @@
+/* zlargv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlargv_(integer *n, doublecomplex *x, integer *incx,
+ doublecomplex *y, integer *incy, doublereal *c__, integer *incc)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal d__1, d__2, d__3, d__4, d__5, d__6, d__7, d__8, d__9, d__10;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ double log(doublereal), pow_di(doublereal *, integer *), d_imag(
+ doublecomplex *), sqrt(doublereal);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ doublereal d__;
+ doublecomplex f, g;
+ integer i__, j;
+ doublecomplex r__;
+ doublereal f2, g2;
+ integer ic;
+ doublereal di;
+ doublecomplex ff;
+ doublereal cs, dr;
+ doublecomplex fs, gs;
+ integer ix, iy;
+ doublecomplex sn;
+ doublereal f2s, g2s, eps, scale;
+ integer count;
+ doublereal safmn2;
+ extern doublereal dlapy2_(doublereal *, doublereal *);
+ doublereal safmx2;
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLARGV generates a vector of complex plane rotations with real */
+/* cosines, determined by elements of the complex vectors x and y. */
+/* For i = 1,2,...,n */
+
+/* ( c(i) s(i) ) ( x(i) ) = ( r(i) ) */
+/* ( -conjg(s(i)) c(i) ) ( y(i) ) = ( 0 ) */
+
+/* where c(i)**2 + ABS(s(i))**2 = 1 */
+
+/* The following conventions are used (these are the same as in ZLARTG, */
+/* but differ from the BLAS1 routine ZROTG): */
+/* If y(i)=0, then c(i)=1 and s(i)=0. */
+/* If x(i)=0, then c(i)=0 and s(i) is chosen so that r(i) is real. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of plane rotations to be generated. */
+
+/* X (input/output) COMPLEX*16 array, dimension (1+(N-1)*INCX) */
+/* On entry, the vector x. */
+/* On exit, x(i) is overwritten by r(i), for i = 1,...,n. */
+
+/* INCX (input) INTEGER */
+/* The increment between elements of X. INCX > 0. */
+
+/* Y (input/output) COMPLEX*16 array, dimension (1+(N-1)*INCY) */
+/* On entry, the vector y. */
+/* On exit, the sines of the plane rotations. */
+
+/* INCY (input) INTEGER */
+/* The increment between elements of Y. INCY > 0. */
+
+/* C (output) DOUBLE PRECISION array, dimension (1+(N-1)*INCC) */
+/* The cosines of the plane rotations. */
+
+/* INCC (input) INTEGER */
+/* The increment between elements of C. INCC > 0. */
+
+/* Further Details */
+/* ======= ======= */
+
+/* 6-6-96 - Modified with a new algorithm by W. Kahan and J. Demmel */
+
+/* This version has a few statements commented out for thread safety */
+/* (machine parameters are computed on each entry). 10 feb 03, SJH. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* LOGICAL FIRST */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Save statement .. */
+/* SAVE FIRST, SAFMX2, SAFMIN, SAFMN2 */
+/* .. */
+/* .. Data statements .. */
+/* DATA FIRST / .TRUE. / */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* IF( FIRST ) THEN */
+/* FIRST = .FALSE. */
+ /* Parameter adjustments */
+ --c__;
+ --y;
+ --x;
+
+ /* Function Body */
+ safmin = dlamch_("S");
+ eps = dlamch_("E");
+ d__1 = dlamch_("B");
+ i__1 = (integer) (log(safmin / eps) / log(dlamch_("B")) / 2.);
+ safmn2 = pow_di(&d__1, &i__1);
+ safmx2 = 1. / safmn2;
+/* END IF */
+ ix = 1;
+ iy = 1;
+ ic = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ f.r = x[i__2].r, f.i = x[i__2].i;
+ i__2 = iy;
+ g.r = y[i__2].r, g.i = y[i__2].i;
+
+/* Use identical algorithm as in ZLARTG */
+
+/* Computing MAX */
+/* Computing MAX */
+ d__7 = (d__1 = f.r, abs(d__1)), d__8 = (d__2 = d_imag(&f), abs(d__2));
+/* Computing MAX */
+ d__9 = (d__3 = g.r, abs(d__3)), d__10 = (d__4 = d_imag(&g), abs(d__4))
+ ;
+ d__5 = max(d__7,d__8), d__6 = max(d__9,d__10);
+ scale = max(d__5,d__6);
+ fs.r = f.r, fs.i = f.i;
+ gs.r = g.r, gs.i = g.i;
+ count = 0;
+ if (scale >= safmx2) {
+L10:
+ ++count;
+ z__1.r = safmn2 * fs.r, z__1.i = safmn2 * fs.i;
+ fs.r = z__1.r, fs.i = z__1.i;
+ z__1.r = safmn2 * gs.r, z__1.i = safmn2 * gs.i;
+ gs.r = z__1.r, gs.i = z__1.i;
+ scale *= safmn2;
+ if (scale >= safmx2) {
+ goto L10;
+ }
+ } else if (scale <= safmn2) {
+ if (g.r == 0. && g.i == 0.) {
+ cs = 1.;
+ sn.r = 0., sn.i = 0.;
+ r__.r = f.r, r__.i = f.i;
+ goto L50;
+ }
+L20:
+ --count;
+ z__1.r = safmx2 * fs.r, z__1.i = safmx2 * fs.i;
+ fs.r = z__1.r, fs.i = z__1.i;
+ z__1.r = safmx2 * gs.r, z__1.i = safmx2 * gs.i;
+ gs.r = z__1.r, gs.i = z__1.i;
+ scale *= safmx2;
+ if (scale <= safmn2) {
+ goto L20;
+ }
+ }
+/* Computing 2nd power */
+ d__1 = fs.r;
+/* Computing 2nd power */
+ d__2 = d_imag(&fs);
+ f2 = d__1 * d__1 + d__2 * d__2;
+/* Computing 2nd power */
+ d__1 = gs.r;
+/* Computing 2nd power */
+ d__2 = d_imag(&gs);
+ g2 = d__1 * d__1 + d__2 * d__2;
+ if (f2 <= max(g2,1.) * safmin) {
+
+/* This is a rare case: F is very small. */
+
+ if (f.r == 0. && f.i == 0.) {
+ cs = 0.;
+ d__2 = g.r;
+ d__3 = d_imag(&g);
+ d__1 = dlapy2_(&d__2, &d__3);
+ r__.r = d__1, r__.i = 0.;
+/* Do complex/real division explicitly with two real */
+/* divisions */
+ d__1 = gs.r;
+ d__2 = d_imag(&gs);
+ d__ = dlapy2_(&d__1, &d__2);
+ d__1 = gs.r / d__;
+ d__2 = -d_imag(&gs) / d__;
+ z__1.r = d__1, z__1.i = d__2;
+ sn.r = z__1.r, sn.i = z__1.i;
+ goto L50;
+ }
+ d__1 = fs.r;
+ d__2 = d_imag(&fs);
+ f2s = dlapy2_(&d__1, &d__2);
+/* G2 and G2S are accurate */
+/* G2 is at least SAFMIN, and G2S is at least SAFMN2 */
+ g2s = sqrt(g2);
+/* Error in CS from underflow in F2S is at most */
+/* UNFL / SAFMN2 .lt. sqrt(UNFL*EPS) .lt. EPS */
+/* If MAX(G2,ONE)=G2, then F2 .lt. G2*SAFMIN, */
+/* and so CS .lt. sqrt(SAFMIN) */
+/* If MAX(G2,ONE)=ONE, then F2 .lt. SAFMIN */
+/* and so CS .lt. sqrt(SAFMIN)/SAFMN2 = sqrt(EPS) */
+/* Therefore, CS = F2S/G2S / sqrt( 1 + (F2S/G2S)**2 ) = F2S/G2S */
+ cs = f2s / g2s;
+/* Make sure abs(FF) = 1 */
+/* Do complex/real division explicitly with 2 real divisions */
+/* Computing MAX */
+ d__3 = (d__1 = f.r, abs(d__1)), d__4 = (d__2 = d_imag(&f), abs(
+ d__2));
+ if (max(d__3,d__4) > 1.) {
+ d__1 = f.r;
+ d__2 = d_imag(&f);
+ d__ = dlapy2_(&d__1, &d__2);
+ d__1 = f.r / d__;
+ d__2 = d_imag(&f) / d__;
+ z__1.r = d__1, z__1.i = d__2;
+ ff.r = z__1.r, ff.i = z__1.i;
+ } else {
+ dr = safmx2 * f.r;
+ di = safmx2 * d_imag(&f);
+ d__ = dlapy2_(&dr, &di);
+ d__1 = dr / d__;
+ d__2 = di / d__;
+ z__1.r = d__1, z__1.i = d__2;
+ ff.r = z__1.r, ff.i = z__1.i;
+ }
+ d__1 = gs.r / g2s;
+ d__2 = -d_imag(&gs) / g2s;
+ z__2.r = d__1, z__2.i = d__2;
+ z__1.r = ff.r * z__2.r - ff.i * z__2.i, z__1.i = ff.r * z__2.i +
+ ff.i * z__2.r;
+ sn.r = z__1.r, sn.i = z__1.i;
+ z__2.r = cs * f.r, z__2.i = cs * f.i;
+ z__3.r = sn.r * g.r - sn.i * g.i, z__3.i = sn.r * g.i + sn.i *
+ g.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ r__.r = z__1.r, r__.i = z__1.i;
+ } else {
+
+/* This is the most common case. */
+/* Neither F2 nor F2/G2 are less than SAFMIN */
+/* F2S cannot overflow, and it is accurate */
+
+ f2s = sqrt(g2 / f2 + 1.);
+/* Do the F2S(real)*FS(complex) multiply with two real */
+/* multiplies */
+ d__1 = f2s * fs.r;
+ d__2 = f2s * d_imag(&fs);
+ z__1.r = d__1, z__1.i = d__2;
+ r__.r = z__1.r, r__.i = z__1.i;
+ cs = 1. / f2s;
+ d__ = f2 + g2;
+/* Do complex/real division explicitly with two real divisions */
+ d__1 = r__.r / d__;
+ d__2 = d_imag(&r__) / d__;
+ z__1.r = d__1, z__1.i = d__2;
+ sn.r = z__1.r, sn.i = z__1.i;
+ d_cnjg(&z__2, &gs);
+ z__1.r = sn.r * z__2.r - sn.i * z__2.i, z__1.i = sn.r * z__2.i +
+ sn.i * z__2.r;
+ sn.r = z__1.r, sn.i = z__1.i;
+ if (count != 0) {
+ if (count > 0) {
+ i__2 = count;
+ for (j = 1; j <= i__2; ++j) {
+ z__1.r = safmx2 * r__.r, z__1.i = safmx2 * r__.i;
+ r__.r = z__1.r, r__.i = z__1.i;
+/* L30: */
+ }
+ } else {
+ i__2 = -count;
+ for (j = 1; j <= i__2; ++j) {
+ z__1.r = safmn2 * r__.r, z__1.i = safmn2 * r__.i;
+ r__.r = z__1.r, r__.i = z__1.i;
+/* L40: */
+ }
+ }
+ }
+ }
+L50:
+ c__[ic] = cs;
+ i__2 = iy;
+ y[i__2].r = sn.r, y[i__2].i = sn.i;
+ i__2 = ix;
+ x[i__2].r = r__.r, x[i__2].i = r__.i;
+ ic += *incc;
+ iy += *incy;
+ ix += *incx;
+/* L60: */
+ }
+ return 0;
+
+/* End of ZLARGV */
+
+} /* zlargv_ */
diff --git a/contrib/libs/clapack/zlarnv.c b/contrib/libs/clapack/zlarnv.c
new file mode 100644
index 0000000000..2e4e257a88
--- /dev/null
+++ b/contrib/libs/clapack/zlarnv.c
@@ -0,0 +1,190 @@
+/* zlarnv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlarnv_(integer *idist, integer *iseed, integer *n,
+ doublecomplex *x)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ double log(doublereal), sqrt(doublereal);
+ void z_exp(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__;
+ doublereal u[128];
+ integer il, iv;
+ extern /* Subroutine */ int dlaruv_(integer *, integer *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLARNV returns a vector of n random complex numbers from a uniform or */
+/* normal distribution. */
+
+/* Arguments */
+/* ========= */
+
+/* IDIST (input) INTEGER */
+/* Specifies the distribution of the random numbers: */
+/* = 1: real and imaginary parts each uniform (0,1) */
+/* = 2: real and imaginary parts each uniform (-1,1) */
+/* = 3: real and imaginary parts each normal (0,1) */
+/* = 4: uniformly distributed on the disc abs(z) < 1 */
+/* = 5: uniformly distributed on the circle abs(z) = 1 */
+
+/* ISEED (input/output) INTEGER array, dimension (4) */
+/* On entry, the seed of the random number generator; the array */
+/* elements must be between 0 and 4095, and ISEED(4) must be */
+/* odd. */
+/* On exit, the seed is updated. */
+
+/* N (input) INTEGER */
+/* The number of random numbers to be generated. */
+
+/* X (output) COMPLEX*16 array, dimension (N) */
+/* The generated random numbers. */
+
+/* Further Details */
+/* =============== */
+
+/* This routine calls the auxiliary routine DLARUV to generate random */
+/* real numbers from a uniform (0,1) distribution, in batches of up to */
+/* 128 using vectorisable code. The Box-Muller method is used to */
+/* transform numbers from a uniform to a normal distribution. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --x;
+ --iseed;
+
+ /* Function Body */
+ i__1 = *n;
+ for (iv = 1; iv <= i__1; iv += 64) {
+/* Computing MIN */
+ i__2 = 64, i__3 = *n - iv + 1;
+ il = min(i__2,i__3);
+
+/* Call DLARUV to generate 2*IL real numbers from a uniform (0,1) */
+/* distribution (2*IL <= LV) */
+
+ i__2 = il << 1;
+ dlaruv_(&iseed[1], &i__2, u);
+
+ if (*idist == 1) {
+
+/* Copy generated numbers */
+
+ i__2 = il;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = iv + i__ - 1;
+ i__4 = (i__ << 1) - 2;
+ i__5 = (i__ << 1) - 1;
+ z__1.r = u[i__4], z__1.i = u[i__5];
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+/* L10: */
+ }
+ } else if (*idist == 2) {
+
+/* Convert generated numbers to uniform (-1,1) distribution */
+
+ i__2 = il;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = iv + i__ - 1;
+ d__1 = u[(i__ << 1) - 2] * 2. - 1.;
+ d__2 = u[(i__ << 1) - 1] * 2. - 1.;
+ z__1.r = d__1, z__1.i = d__2;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+/* L20: */
+ }
+ } else if (*idist == 3) {
+
+/* Convert generated numbers to normal (0,1) distribution */
+
+ i__2 = il;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = iv + i__ - 1;
+ d__1 = sqrt(log(u[(i__ << 1) - 2]) * -2.);
+ d__2 = u[(i__ << 1) - 1] * 6.2831853071795864769252867663;
+ z__3.r = 0., z__3.i = d__2;
+ z_exp(&z__2, &z__3);
+ z__1.r = d__1 * z__2.r, z__1.i = d__1 * z__2.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+/* L30: */
+ }
+ } else if (*idist == 4) {
+
+/* Convert generated numbers to complex numbers uniformly */
+/* distributed on the unit disk */
+
+ i__2 = il;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = iv + i__ - 1;
+ d__1 = sqrt(u[(i__ << 1) - 2]);
+ d__2 = u[(i__ << 1) - 1] * 6.2831853071795864769252867663;
+ z__3.r = 0., z__3.i = d__2;
+ z_exp(&z__2, &z__3);
+ z__1.r = d__1 * z__2.r, z__1.i = d__1 * z__2.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+/* L40: */
+ }
+ } else if (*idist == 5) {
+
+/* Convert generated numbers to complex numbers uniformly */
+/* distributed on the unit circle */
+
+ i__2 = il;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = iv + i__ - 1;
+ d__1 = u[(i__ << 1) - 1] * 6.2831853071795864769252867663;
+ z__2.r = 0., z__2.i = d__1;
+ z_exp(&z__1, &z__2);
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ return 0;
+
+/* End of ZLARNV */
+
+} /* zlarnv_ */
diff --git a/contrib/libs/clapack/zlarrv.c b/contrib/libs/clapack/zlarrv.c
new file mode 100644
index 0000000000..2edf4519a5
--- /dev/null
+++ b/contrib/libs/clapack/zlarrv.c
@@ -0,0 +1,1022 @@
+/* zlarrv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static integer c__1 = 1;
+static integer c__2 = 2;
+static doublereal c_b28 = 0.;
+
+/* Subroutine */ int zlarrv_(integer *n, doublereal *vl, doublereal *vu,
+ doublereal *d__, doublereal *l, doublereal *pivmin, integer *isplit,
+ integer *m, integer *dol, integer *dou, doublereal *minrgp,
+ doublereal *rtol1, doublereal *rtol2, doublereal *w, doublereal *werr,
+ doublereal *wgap, integer *iblock, integer *indexw, doublereal *gers,
+ doublecomplex *z__, integer *ldz, integer *isuppz, doublereal *work,
+ integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ doublereal d__1, d__2;
+ doublecomplex z__1;
+ logical L__1;
+
+ /* Builtin functions */
+ double log(doublereal);
+
+ /* Local variables */
+ integer minwsize, i__, j, k, p, q, miniwsize, ii;
+ doublereal gl;
+ integer im, in;
+ doublereal gu, gap, eps, tau, tol, tmp;
+ integer zto;
+ doublereal ztz;
+ integer iend, jblk;
+ doublereal lgap;
+ integer done;
+ doublereal rgap, left;
+ integer wend, iter;
+ doublereal bstw;
+ integer itmp1, indld;
+ doublereal fudge;
+ integer idone;
+ doublereal sigma;
+ integer iinfo, iindr;
+ doublereal resid;
+ logical eskip;
+ doublereal right;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ integer nclus, zfrom;
+ doublereal rqtol;
+ integer iindc1, iindc2, indin1, indin2;
+ logical stp2ii;
+ extern /* Subroutine */ int zlar1v_(integer *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublecomplex *,
+ logical *, integer *, doublereal *, doublereal *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *)
+ ;
+ doublereal lambda;
+ extern doublereal dlamch_(char *);
+ integer ibegin, indeig;
+ logical needbs;
+ integer indlld;
+ doublereal sgndef, mingma;
+ extern /* Subroutine */ int dlarrb_(integer *, doublereal *, doublereal *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *, integer *);
+ integer oldien, oldncl, wbegin;
+ doublereal spdiam;
+ integer negcnt;
+ extern /* Subroutine */ int dlarrf_(integer *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *);
+ integer oldcls;
+ doublereal savgap;
+ integer ndepth;
+ doublereal ssigma;
+ extern /* Subroutine */ int zdscal_(integer *, doublereal *,
+ doublecomplex *, integer *);
+ logical usedbs;
+ integer iindwk, offset;
+ doublereal gaptol;
+ integer newcls, oldfst, indwrk, windex, oldlst;
+ logical usedrq;
+ integer newfst, newftt, parity, windmn, windpl, isupmn, newlst, zusedl;
+ doublereal bstres;
+ integer newsiz, zusedu, zusedw;
+ doublereal nrminv;
+ logical tryrqc;
+ integer isupmx;
+ doublereal rqcorr;
+ extern /* Subroutine */ int zlaset_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLARRV computes the eigenvectors of the tridiagonal matrix */
+/* T = L D L^T given L, D and APPROXIMATIONS to the eigenvalues of L D L^T. */
+/* The input eigenvalues should have been computed by DLARRE. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* Lower and upper bounds of the interval that contains the desired */
+/* eigenvalues. VL < VU. Needed to compute gaps on the left or right */
+/* end of the extremal eigenvalues in the desired RANGE. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the N diagonal elements of the diagonal matrix D. */
+/* On exit, D may be overwritten. */
+
+/* L (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the (N-1) subdiagonal elements of the unit */
+/* bidiagonal matrix L are in elements 1 to N-1 of L */
+/* (if the matrix is not splitted.) At the end of each block */
+/* is stored the corresponding shift as given by DLARRE. */
+/* On exit, L is overwritten. */
+
+/* PIVMIN (in) DOUBLE PRECISION */
+/* The minimum pivot allowed in the Sturm sequence. */
+
+/* ISPLIT (input) INTEGER array, dimension (N) */
+/* The splitting points, at which T breaks up into blocks. */
+/* The first block consists of rows/columns 1 to */
+/* ISPLIT( 1 ), the second of rows/columns ISPLIT( 1 )+1 */
+/* through ISPLIT( 2 ), etc. */
+
+/* M (input) INTEGER */
+/* The total number of input eigenvalues. 0 <= M <= N. */
+
+/* DOL (input) INTEGER */
+/* DOU (input) INTEGER */
+/* If the user wants to compute only selected eigenvectors from all */
+/* the eigenvalues supplied, he can specify an index range DOL:DOU. */
+/* Or else the setting DOL=1, DOU=M should be applied. */
+/* Note that DOL and DOU refer to the order in which the eigenvalues */
+/* are stored in W. */
+/* If the user wants to compute only selected eigenpairs, then */
+/* the columns DOL-1 to DOU+1 of the eigenvector space Z contain the */
+/* computed eigenvectors. All other columns of Z are set to zero. */
+
+/* MINRGP (input) DOUBLE PRECISION */
+
+/* RTOL1 (input) DOUBLE PRECISION */
+/* RTOL2 (input) DOUBLE PRECISION */
+/* Parameters for bisection. */
+/* An interval [LEFT,RIGHT] has converged if */
+/* RIGHT-LEFT.LT.MAX( RTOL1*GAP, RTOL2*MAX(|LEFT|,|RIGHT|) ) */
+
+/* W (input/output) DOUBLE PRECISION array, dimension (N) */
+/* The first M elements of W contain the APPROXIMATE eigenvalues for */
+/* which eigenvectors are to be computed. The eigenvalues */
+/* should be grouped by split-off block and ordered from */
+/* smallest to largest within the block ( The output array */
+/* W from DLARRE is expected here ). Furthermore, they are with */
+/* respect to the shift of the corresponding root representation */
+/* for their block. On exit, W holds the eigenvalues of the */
+/* UNshifted matrix. */
+
+/* WERR (input/output) DOUBLE PRECISION array, dimension (N) */
+/* The first M elements contain the semiwidth of the uncertainty */
+/* interval of the corresponding eigenvalue in W */
+
+/* WGAP (input/output) DOUBLE PRECISION array, dimension (N) */
+/* The separation from the right neighbor eigenvalue in W. */
+
+/* IBLOCK (input) INTEGER array, dimension (N) */
+/* The indices of the blocks (submatrices) associated with the */
+/* corresponding eigenvalues in W; IBLOCK(i)=1 if eigenvalue */
+/* W(i) belongs to the first block from the top, =2 if W(i) */
+/* belongs to the second block, etc. */
+
+/* INDEXW (input) INTEGER array, dimension (N) */
+/* The indices of the eigenvalues within each block (submatrix); */
+/* for example, INDEXW(i)= 10 and IBLOCK(i)=2 imply that the */
+/* i-th eigenvalue W(i) is the 10-th eigenvalue in the second block. */
+
+/* GERS (input) DOUBLE PRECISION array, dimension (2*N) */
+/* The N Gerschgorin intervals (the i-th Gerschgorin interval */
+/* is (GERS(2*i-1), GERS(2*i)). The Gerschgorin intervals should */
+/* be computed from the original UNshifted matrix. */
+
+/* Z (output) COMPLEX*16 array, dimension (LDZ, max(1,M) ) */
+/* If INFO = 0, the first M columns of Z contain the */
+/* orthonormal eigenvectors of the matrix T */
+/* corresponding to the input eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', LDZ >= max(1,N). */
+
+/* ISUPPZ (output) INTEGER array, dimension ( 2*max(1,M) ) */
+/* The support of the eigenvectors in Z, i.e., the indices */
+/* indicating the nonzero elements in Z. The I-th eigenvector */
+/* is nonzero only in elements ISUPPZ( 2*I-1 ) through */
+/* ISUPPZ( 2*I ). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (12*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (7*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+
+/* > 0: A problem occured in ZLARRV. */
+/* < 0: One of the called subroutines signaled an internal problem. */
+/* Needs inspection of the corresponding parameter IINFO */
+/* for further information. */
+
+/* =-1: Problem in DLARRB when refining a child's eigenvalues. */
+/* =-2: Problem in DLARRF when computing the RRR of a child. */
+/* When a child is inside a tight cluster, it can be difficult */
+/* to find an RRR. A partial remedy from the user's point of */
+/* view is to make the parameter MINRGP smaller and recompile. */
+/* However, as the orthogonality of the computed vectors is */
+/* proportional to 1/MINRGP, the user should be aware that */
+/* he might be trading in precision when he decreases MINRGP. */
+/* =-3: Problem in DLARRB when refining a single eigenvalue */
+/* after the Rayleigh correction was rejected. */
+/* = 5: The Rayleigh Quotient Iteration failed to converge to */
+/* full accuracy in MAXITR steps. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+/* .. */
+/* The first N entries of WORK are reserved for the eigenvalues */
+ /* Parameter adjustments */
+ --d__;
+ --l;
+ --isplit;
+ --w;
+ --werr;
+ --wgap;
+ --iblock;
+ --indexw;
+ --gers;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --isuppz;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ indld = *n + 1;
+ indlld = (*n << 1) + 1;
+ indin1 = *n * 3 + 1;
+ indin2 = (*n << 2) + 1;
+ indwrk = *n * 5 + 1;
+ minwsize = *n * 12;
+ i__1 = minwsize;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L5: */
+ }
+/* IWORK(IINDR+1:IINDR+N) hold the twist indices R for the */
+/* factorization used to compute the FP vector */
+ iindr = 0;
+/* IWORK(IINDC1+1:IINC2+N) are used to store the clusters of the current */
+/* layer and the one above. */
+ iindc1 = *n;
+ iindc2 = *n << 1;
+ iindwk = *n * 3 + 1;
+ miniwsize = *n * 7;
+ i__1 = miniwsize;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ iwork[i__] = 0;
+/* L10: */
+ }
+ zusedl = 1;
+ if (*dol > 1) {
+/* Set lower bound for use of Z */
+ zusedl = *dol - 1;
+ }
+ zusedu = *m;
+ if (*dou < *m) {
+/* Set lower bound for use of Z */
+ zusedu = *dou + 1;
+ }
+/* The width of the part of Z that is used */
+ zusedw = zusedu - zusedl + 1;
+ zlaset_("Full", n, &zusedw, &c_b1, &c_b1, &z__[zusedl * z_dim1 + 1], ldz);
+ eps = dlamch_("Precision");
+ rqtol = eps * 2.;
+
+/* Set expert flags for standard code. */
+ tryrqc = TRUE_;
+ if (*dol == 1 && *dou == *m) {
+ } else {
+/* Only selected eigenpairs are computed. Since the other evalues */
+/* are not refined by RQ iteration, bisection has to compute to full */
+/* accuracy. */
+ *rtol1 = eps * 4.;
+ *rtol2 = eps * 4.;
+ }
+/* The entries WBEGIN:WEND in W, WERR, WGAP correspond to the */
+/* desired eigenvalues. The support of the nonzero eigenvector */
+/* entries is contained in the interval IBEGIN:IEND. */
+/* Remark that if k eigenpairs are desired, then the eigenvectors */
+/* are stored in k contiguous columns of Z. */
+/* DONE is the number of eigenvectors already computed */
+ done = 0;
+ ibegin = 1;
+ wbegin = 1;
+ i__1 = iblock[*m];
+ for (jblk = 1; jblk <= i__1; ++jblk) {
+ iend = isplit[jblk];
+ sigma = l[iend];
+/* Find the eigenvectors of the submatrix indexed IBEGIN */
+/* through IEND. */
+ wend = wbegin - 1;
+L15:
+ if (wend < *m) {
+ if (iblock[wend + 1] == jblk) {
+ ++wend;
+ goto L15;
+ }
+ }
+ if (wend < wbegin) {
+ ibegin = iend + 1;
+ goto L170;
+ } else if (wend < *dol || wbegin > *dou) {
+ ibegin = iend + 1;
+ wbegin = wend + 1;
+ goto L170;
+ }
+/* Find local spectral diameter of the block */
+ gl = gers[(ibegin << 1) - 1];
+ gu = gers[ibegin * 2];
+ i__2 = iend;
+ for (i__ = ibegin + 1; i__ <= i__2; ++i__) {
+/* Computing MIN */
+ d__1 = gers[(i__ << 1) - 1];
+ gl = min(d__1,gl);
+/* Computing MAX */
+ d__1 = gers[i__ * 2];
+ gu = max(d__1,gu);
+/* L20: */
+ }
+ spdiam = gu - gl;
+/* OLDIEN is the last index of the previous block */
+ oldien = ibegin - 1;
+/* Calculate the size of the current block */
+ in = iend - ibegin + 1;
+/* The number of eigenvalues in the current block */
+ im = wend - wbegin + 1;
+/* This is for a 1x1 block */
+ if (ibegin == iend) {
+ ++done;
+ i__2 = ibegin + wbegin * z_dim1;
+ z__[i__2].r = 1., z__[i__2].i = 0.;
+ isuppz[(wbegin << 1) - 1] = ibegin;
+ isuppz[wbegin * 2] = ibegin;
+ w[wbegin] += sigma;
+ work[wbegin] = w[wbegin];
+ ibegin = iend + 1;
+ ++wbegin;
+ goto L170;
+ }
+/* The desired (shifted) eigenvalues are stored in W(WBEGIN:WEND) */
+/* Note that these can be approximations, in this case, the corresp. */
+/* entries of WERR give the size of the uncertainty interval. */
+/* The eigenvalue approximations will be refined when necessary as */
+/* high relative accuracy is required for the computation of the */
+/* corresponding eigenvectors. */
+ dcopy_(&im, &w[wbegin], &c__1, &work[wbegin], &c__1);
+/* We store in W the eigenvalue approximations w.r.t. the original */
+/* matrix T. */
+ i__2 = im;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ w[wbegin + i__ - 1] += sigma;
+/* L30: */
+ }
+/* NDEPTH is the current depth of the representation tree */
+ ndepth = 0;
+/* PARITY is either 1 or 0 */
+ parity = 1;
+/* NCLUS is the number of clusters for the next level of the */
+/* representation tree, we start with NCLUS = 1 for the root */
+ nclus = 1;
+ iwork[iindc1 + 1] = 1;
+ iwork[iindc1 + 2] = im;
+/* IDONE is the number of eigenvectors already computed in the current */
+/* block */
+ idone = 0;
+/* loop while( IDONE.LT.IM ) */
+/* generate the representation tree for the current block and */
+/* compute the eigenvectors */
+L40:
+ if (idone < im) {
+/* This is a crude protection against infinitely deep trees */
+ if (ndepth > *m) {
+ *info = -2;
+ return 0;
+ }
+/* breadth first processing of the current level of the representation */
+/* tree: OLDNCL = number of clusters on current level */
+ oldncl = nclus;
+/* reset NCLUS to count the number of child clusters */
+ nclus = 0;
+
+ parity = 1 - parity;
+ if (parity == 0) {
+ oldcls = iindc1;
+ newcls = iindc2;
+ } else {
+ oldcls = iindc2;
+ newcls = iindc1;
+ }
+/* Process the clusters on the current level */
+ i__2 = oldncl;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ j = oldcls + (i__ << 1);
+/* OLDFST, OLDLST = first, last index of current cluster. */
+/* cluster indices start with 1 and are relative */
+/* to WBEGIN when accessing W, WGAP, WERR, Z */
+ oldfst = iwork[j - 1];
+ oldlst = iwork[j];
+ if (ndepth > 0) {
+/* Retrieve relatively robust representation (RRR) of cluster */
+/* that has been computed at the previous level */
+/* The RRR is stored in Z and overwritten once the eigenvectors */
+/* have been computed or when the cluster is refined */
+ if (*dol == 1 && *dou == *m) {
+/* Get representation from location of the leftmost evalue */
+/* of the cluster */
+ j = wbegin + oldfst - 1;
+ } else {
+ if (wbegin + oldfst - 1 < *dol) {
+/* Get representation from the left end of Z array */
+ j = *dol - 1;
+ } else if (wbegin + oldfst - 1 > *dou) {
+/* Get representation from the right end of Z array */
+ j = *dou;
+ } else {
+ j = wbegin + oldfst - 1;
+ }
+ }
+ i__3 = in - 1;
+ for (k = 1; k <= i__3; ++k) {
+ i__4 = ibegin + k - 1 + j * z_dim1;
+ d__[ibegin + k - 1] = z__[i__4].r;
+ i__4 = ibegin + k - 1 + (j + 1) * z_dim1;
+ l[ibegin + k - 1] = z__[i__4].r;
+/* L45: */
+ }
+ i__3 = iend + j * z_dim1;
+ d__[iend] = z__[i__3].r;
+ i__3 = iend + (j + 1) * z_dim1;
+ sigma = z__[i__3].r;
+/* Set the corresponding entries in Z to zero */
+ zlaset_("Full", &in, &c__2, &c_b1, &c_b1, &z__[ibegin + j
+ * z_dim1], ldz);
+ }
+/* Compute DL and DLL of current RRR */
+ i__3 = iend - 1;
+ for (j = ibegin; j <= i__3; ++j) {
+ tmp = d__[j] * l[j];
+ work[indld - 1 + j] = tmp;
+ work[indlld - 1 + j] = tmp * l[j];
+/* L50: */
+ }
+ if (ndepth > 0) {
+/* P and Q are index of the first and last eigenvalue to compute */
+/* within the current block */
+ p = indexw[wbegin - 1 + oldfst];
+ q = indexw[wbegin - 1 + oldlst];
+/* Offset for the arrays WORK, WGAP and WERR, i.e., th P-OFFSET */
+/* thru' Q-OFFSET elements of these arrays are to be used. */
+/* OFFSET = P-OLDFST */
+ offset = indexw[wbegin] - 1;
+/* perform limited bisection (if necessary) to get approximate */
+/* eigenvalues to the precision needed. */
+ dlarrb_(&in, &d__[ibegin], &work[indlld + ibegin - 1], &p,
+ &q, rtol1, rtol2, &offset, &work[wbegin], &wgap[
+ wbegin], &werr[wbegin], &work[indwrk], &iwork[
+ iindwk], pivmin, &spdiam, &in, &iinfo);
+ if (iinfo != 0) {
+ *info = -1;
+ return 0;
+ }
+/* We also recompute the extremal gaps. W holds all eigenvalues */
+/* of the unshifted matrix and must be used for computation */
+/* of WGAP, the entries of WORK might stem from RRRs with */
+/* different shifts. The gaps from WBEGIN-1+OLDFST to */
+/* WBEGIN-1+OLDLST are correctly computed in DLARRB. */
+/* However, we only allow the gaps to become greater since */
+/* this is what should happen when we decrease WERR */
+ if (oldfst > 1) {
+/* Computing MAX */
+ d__1 = wgap[wbegin + oldfst - 2], d__2 = w[wbegin +
+ oldfst - 1] - werr[wbegin + oldfst - 1] - w[
+ wbegin + oldfst - 2] - werr[wbegin + oldfst -
+ 2];
+ wgap[wbegin + oldfst - 2] = max(d__1,d__2);
+ }
+ if (wbegin + oldlst - 1 < wend) {
+/* Computing MAX */
+ d__1 = wgap[wbegin + oldlst - 1], d__2 = w[wbegin +
+ oldlst] - werr[wbegin + oldlst] - w[wbegin +
+ oldlst - 1] - werr[wbegin + oldlst - 1];
+ wgap[wbegin + oldlst - 1] = max(d__1,d__2);
+ }
+/* Each time the eigenvalues in WORK get refined, we store */
+/* the newly found approximation with all shifts applied in W */
+ i__3 = oldlst;
+ for (j = oldfst; j <= i__3; ++j) {
+ w[wbegin + j - 1] = work[wbegin + j - 1] + sigma;
+/* L53: */
+ }
+ }
+/* Process the current node. */
+ newfst = oldfst;
+ i__3 = oldlst;
+ for (j = oldfst; j <= i__3; ++j) {
+ if (j == oldlst) {
+/* we are at the right end of the cluster, this is also the */
+/* boundary of the child cluster */
+ newlst = j;
+ } else if (wgap[wbegin + j - 1] >= *minrgp * (d__1 = work[
+ wbegin + j - 1], abs(d__1))) {
+/* the right relative gap is big enough, the child cluster */
+/* (NEWFST,..,NEWLST) is well separated from the following */
+ newlst = j;
+ } else {
+/* inside a child cluster, the relative gap is not */
+/* big enough. */
+ goto L140;
+ }
+/* Compute size of child cluster found */
+ newsiz = newlst - newfst + 1;
+/* NEWFTT is the place in Z where the new RRR or the computed */
+/* eigenvector is to be stored */
+ if (*dol == 1 && *dou == *m) {
+/* Store representation at location of the leftmost evalue */
+/* of the cluster */
+ newftt = wbegin + newfst - 1;
+ } else {
+ if (wbegin + newfst - 1 < *dol) {
+/* Store representation at the left end of Z array */
+ newftt = *dol - 1;
+ } else if (wbegin + newfst - 1 > *dou) {
+/* Store representation at the right end of Z array */
+ newftt = *dou;
+ } else {
+ newftt = wbegin + newfst - 1;
+ }
+ }
+ if (newsiz > 1) {
+
+/* Current child is not a singleton but a cluster. */
+/* Compute and store new representation of child. */
+
+
+/* Compute left and right cluster gap. */
+
+/* LGAP and RGAP are not computed from WORK because */
+/* the eigenvalue approximations may stem from RRRs */
+/* different shifts. However, W hold all eigenvalues */
+/* of the unshifted matrix. Still, the entries in WGAP */
+/* have to be computed from WORK since the entries */
+/* in W might be of the same order so that gaps are not */
+/* exhibited correctly for very close eigenvalues. */
+ if (newfst == 1) {
+/* Computing MAX */
+ d__1 = 0., d__2 = w[wbegin] - werr[wbegin] - *vl;
+ lgap = max(d__1,d__2);
+ } else {
+ lgap = wgap[wbegin + newfst - 2];
+ }
+ rgap = wgap[wbegin + newlst - 1];
+
+/* Compute left- and rightmost eigenvalue of child */
+/* to high precision in order to shift as close */
+/* as possible and obtain as large relative gaps */
+/* as possible */
+
+ for (k = 1; k <= 2; ++k) {
+ if (k == 1) {
+ p = indexw[wbegin - 1 + newfst];
+ } else {
+ p = indexw[wbegin - 1 + newlst];
+ }
+ offset = indexw[wbegin] - 1;
+ dlarrb_(&in, &d__[ibegin], &work[indlld + ibegin
+ - 1], &p, &p, &rqtol, &rqtol, &offset, &
+ work[wbegin], &wgap[wbegin], &werr[wbegin]
+, &work[indwrk], &iwork[iindwk], pivmin, &
+ spdiam, &in, &iinfo);
+/* L55: */
+ }
+
+ if (wbegin + newlst - 1 < *dol || wbegin + newfst - 1
+ > *dou) {
+/* if the cluster contains no desired eigenvalues */
+/* skip the computation of that branch of the rep. tree */
+
+/* We could skip before the refinement of the extremal */
+/* eigenvalues of the child, but then the representation */
+/* tree could be different from the one when nothing is */
+/* skipped. For this reason we skip at this place. */
+ idone = idone + newlst - newfst + 1;
+ goto L139;
+ }
+
+/* Compute RRR of child cluster. */
+/* Note that the new RRR is stored in Z */
+
+/* DLARRF needs LWORK = 2*N */
+ dlarrf_(&in, &d__[ibegin], &l[ibegin], &work[indld +
+ ibegin - 1], &newfst, &newlst, &work[wbegin],
+ &wgap[wbegin], &werr[wbegin], &spdiam, &lgap,
+ &rgap, pivmin, &tau, &work[indin1], &work[
+ indin2], &work[indwrk], &iinfo);
+/* In the complex case, DLARRF cannot write */
+/* the new RRR directly into Z and needs an intermediate */
+/* workspace */
+ i__4 = in - 1;
+ for (k = 1; k <= i__4; ++k) {
+ i__5 = ibegin + k - 1 + newftt * z_dim1;
+ i__6 = indin1 + k - 1;
+ z__1.r = work[i__6], z__1.i = 0.;
+ z__[i__5].r = z__1.r, z__[i__5].i = z__1.i;
+ i__5 = ibegin + k - 1 + (newftt + 1) * z_dim1;
+ i__6 = indin2 + k - 1;
+ z__1.r = work[i__6], z__1.i = 0.;
+ z__[i__5].r = z__1.r, z__[i__5].i = z__1.i;
+/* L56: */
+ }
+ i__4 = iend + newftt * z_dim1;
+ i__5 = indin1 + in - 1;
+ z__1.r = work[i__5], z__1.i = 0.;
+ z__[i__4].r = z__1.r, z__[i__4].i = z__1.i;
+ if (iinfo == 0) {
+/* a new RRR for the cluster was found by DLARRF */
+/* update shift and store it */
+ ssigma = sigma + tau;
+ i__4 = iend + (newftt + 1) * z_dim1;
+ z__1.r = ssigma, z__1.i = 0.;
+ z__[i__4].r = z__1.r, z__[i__4].i = z__1.i;
+/* WORK() are the midpoints and WERR() the semi-width */
+/* Note that the entries in W are unchanged. */
+ i__4 = newlst;
+ for (k = newfst; k <= i__4; ++k) {
+ fudge = eps * 3. * (d__1 = work[wbegin + k -
+ 1], abs(d__1));
+ work[wbegin + k - 1] -= tau;
+ fudge += eps * 4. * (d__1 = work[wbegin + k -
+ 1], abs(d__1));
+/* Fudge errors */
+ werr[wbegin + k - 1] += fudge;
+/* Gaps are not fudged. Provided that WERR is small */
+/* when eigenvalues are close, a zero gap indicates */
+/* that a new representation is needed for resolving */
+/* the cluster. A fudge could lead to a wrong decision */
+/* of judging eigenvalues 'separated' which in */
+/* reality are not. This could have a negative impact */
+/* on the orthogonality of the computed eigenvectors. */
+/* L116: */
+ }
+ ++nclus;
+ k = newcls + (nclus << 1);
+ iwork[k - 1] = newfst;
+ iwork[k] = newlst;
+ } else {
+ *info = -2;
+ return 0;
+ }
+ } else {
+
+/* Compute eigenvector of singleton */
+
+ iter = 0;
+
+ tol = log((doublereal) in) * 4. * eps;
+
+ k = newfst;
+ windex = wbegin + k - 1;
+/* Computing MAX */
+ i__4 = windex - 1;
+ windmn = max(i__4,1);
+/* Computing MIN */
+ i__4 = windex + 1;
+ windpl = min(i__4,*m);
+ lambda = work[windex];
+ ++done;
+/* Check if eigenvector computation is to be skipped */
+ if (windex < *dol || windex > *dou) {
+ eskip = TRUE_;
+ goto L125;
+ } else {
+ eskip = FALSE_;
+ }
+ left = work[windex] - werr[windex];
+ right = work[windex] + werr[windex];
+ indeig = indexw[windex];
+/* Note that since we compute the eigenpairs for a child, */
+/* all eigenvalue approximations are w.r.t the same shift. */
+/* In this case, the entries in WORK should be used for */
+/* computing the gaps since they exhibit even very small */
+/* differences in the eigenvalues, as opposed to the */
+/* entries in W which might "look" the same. */
+ if (k == 1) {
+/* In the case RANGE='I' and with not much initial */
+/* accuracy in LAMBDA and VL, the formula */
+/* LGAP = MAX( ZERO, (SIGMA - VL) + LAMBDA ) */
+/* can lead to an overestimation of the left gap and */
+/* thus to inadequately early RQI 'convergence'. */
+/* Prevent this by forcing a small left gap. */
+/* Computing MAX */
+ d__1 = abs(left), d__2 = abs(right);
+ lgap = eps * max(d__1,d__2);
+ } else {
+ lgap = wgap[windmn];
+ }
+ if (k == im) {
+/* In the case RANGE='I' and with not much initial */
+/* accuracy in LAMBDA and VU, the formula */
+/* can lead to an overestimation of the right gap and */
+/* thus to inadequately early RQI 'convergence'. */
+/* Prevent this by forcing a small right gap. */
+/* Computing MAX */
+ d__1 = abs(left), d__2 = abs(right);
+ rgap = eps * max(d__1,d__2);
+ } else {
+ rgap = wgap[windex];
+ }
+ gap = min(lgap,rgap);
+ if (k == 1 || k == im) {
+/* The eigenvector support can become wrong */
+/* because significant entries could be cut off due to a */
+/* large GAPTOL parameter in LAR1V. Prevent this. */
+ gaptol = 0.;
+ } else {
+ gaptol = gap * eps;
+ }
+ isupmn = in;
+ isupmx = 1;
+/* Update WGAP so that it holds the minimum gap */
+/* to the left or the right. This is crucial in the */
+/* case where bisection is used to ensure that the */
+/* eigenvalue is refined up to the required precision. */
+/* The correct value is restored afterwards. */
+ savgap = wgap[windex];
+ wgap[windex] = gap;
+/* We want to use the Rayleigh Quotient Correction */
+/* as often as possible since it converges quadratically */
+/* when we are close enough to the desired eigenvalue. */
+/* However, the Rayleigh Quotient can have the wrong sign */
+/* and lead us away from the desired eigenvalue. In this */
+/* case, the best we can do is to use bisection. */
+ usedbs = FALSE_;
+ usedrq = FALSE_;
+/* Bisection is initially turned off unless it is forced */
+ needbs = ! tryrqc;
+L120:
+/* Check if bisection should be used to refine eigenvalue */
+ if (needbs) {
+/* Take the bisection as new iterate */
+ usedbs = TRUE_;
+ itmp1 = iwork[iindr + windex];
+ offset = indexw[wbegin] - 1;
+ d__1 = eps * 2.;
+ dlarrb_(&in, &d__[ibegin], &work[indlld + ibegin
+ - 1], &indeig, &indeig, &c_b28, &d__1, &
+ offset, &work[wbegin], &wgap[wbegin], &
+ werr[wbegin], &work[indwrk], &iwork[
+ iindwk], pivmin, &spdiam, &itmp1, &iinfo);
+ if (iinfo != 0) {
+ *info = -3;
+ return 0;
+ }
+ lambda = work[windex];
+/* Reset twist index from inaccurate LAMBDA to */
+/* force computation of true MINGMA */
+ iwork[iindr + windex] = 0;
+ }
+/* Given LAMBDA, compute the eigenvector. */
+ L__1 = ! usedbs;
+ zlar1v_(&in, &c__1, &in, &lambda, &d__[ibegin], &l[
+ ibegin], &work[indld + ibegin - 1], &work[
+ indlld + ibegin - 1], pivmin, &gaptol, &z__[
+ ibegin + windex * z_dim1], &L__1, &negcnt, &
+ ztz, &mingma, &iwork[iindr + windex], &isuppz[
+ (windex << 1) - 1], &nrminv, &resid, &rqcorr,
+ &work[indwrk]);
+ if (iter == 0) {
+ bstres = resid;
+ bstw = lambda;
+ } else if (resid < bstres) {
+ bstres = resid;
+ bstw = lambda;
+ }
+/* Computing MIN */
+ i__4 = isupmn, i__5 = isuppz[(windex << 1) - 1];
+ isupmn = min(i__4,i__5);
+/* Computing MAX */
+ i__4 = isupmx, i__5 = isuppz[windex * 2];
+ isupmx = max(i__4,i__5);
+ ++iter;
+/* sin alpha <= |resid|/gap */
+/* Note that both the residual and the gap are */
+/* proportional to the matrix, so ||T|| doesn't play */
+/* a role in the quotient */
+
+/* Convergence test for Rayleigh-Quotient iteration */
+/* (omitted when Bisection has been used) */
+
+ if (resid > tol * gap && abs(rqcorr) > rqtol * abs(
+ lambda) && ! usedbs) {
+/* We need to check that the RQCORR update doesn't */
+/* move the eigenvalue away from the desired one and */
+/* towards a neighbor. -> protection with bisection */
+ if (indeig <= negcnt) {
+/* The wanted eigenvalue lies to the left */
+ sgndef = -1.;
+ } else {
+/* The wanted eigenvalue lies to the right */
+ sgndef = 1.;
+ }
+/* We only use the RQCORR if it improves the */
+/* the iterate reasonably. */
+ if (rqcorr * sgndef >= 0. && lambda + rqcorr <=
+ right && lambda + rqcorr >= left) {
+ usedrq = TRUE_;
+/* Store new midpoint of bisection interval in WORK */
+ if (sgndef == 1.) {
+/* The current LAMBDA is on the left of the true */
+/* eigenvalue */
+ left = lambda;
+/* We prefer to assume that the error estimate */
+/* is correct. We could make the interval not */
+/* as a bracket but to be modified if the RQCORR */
+/* chooses to. In this case, the RIGHT side should */
+/* be modified as follows: */
+/* RIGHT = MAX(RIGHT, LAMBDA + RQCORR) */
+ } else {
+/* The current LAMBDA is on the right of the true */
+/* eigenvalue */
+ right = lambda;
+/* See comment about assuming the error estimate is */
+/* correct above. */
+/* LEFT = MIN(LEFT, LAMBDA + RQCORR) */
+ }
+ work[windex] = (right + left) * .5;
+/* Take RQCORR since it has the correct sign and */
+/* improves the iterate reasonably */
+ lambda += rqcorr;
+/* Update width of error interval */
+ werr[windex] = (right - left) * .5;
+ } else {
+ needbs = TRUE_;
+ }
+ if (right - left < rqtol * abs(lambda)) {
+/* The eigenvalue is computed to bisection accuracy */
+/* compute eigenvector and stop */
+ usedbs = TRUE_;
+ goto L120;
+ } else if (iter < 10) {
+ goto L120;
+ } else if (iter == 10) {
+ needbs = TRUE_;
+ goto L120;
+ } else {
+ *info = 5;
+ return 0;
+ }
+ } else {
+ stp2ii = FALSE_;
+ if (usedrq && usedbs && bstres <= resid) {
+ lambda = bstw;
+ stp2ii = TRUE_;
+ }
+ if (stp2ii) {
+/* improve error angle by second step */
+ L__1 = ! usedbs;
+ zlar1v_(&in, &c__1, &in, &lambda, &d__[ibegin]
+, &l[ibegin], &work[indld + ibegin -
+ 1], &work[indlld + ibegin - 1],
+ pivmin, &gaptol, &z__[ibegin + windex
+ * z_dim1], &L__1, &negcnt, &ztz, &
+ mingma, &iwork[iindr + windex], &
+ isuppz[(windex << 1) - 1], &nrminv, &
+ resid, &rqcorr, &work[indwrk]);
+ }
+ work[windex] = lambda;
+ }
+
+/* Compute FP-vector support w.r.t. whole matrix */
+
+ isuppz[(windex << 1) - 1] += oldien;
+ isuppz[windex * 2] += oldien;
+ zfrom = isuppz[(windex << 1) - 1];
+ zto = isuppz[windex * 2];
+ isupmn += oldien;
+ isupmx += oldien;
+/* Ensure vector is ok if support in the RQI has changed */
+ if (isupmn < zfrom) {
+ i__4 = zfrom - 1;
+ for (ii = isupmn; ii <= i__4; ++ii) {
+ i__5 = ii + windex * z_dim1;
+ z__[i__5].r = 0., z__[i__5].i = 0.;
+/* L122: */
+ }
+ }
+ if (isupmx > zto) {
+ i__4 = isupmx;
+ for (ii = zto + 1; ii <= i__4; ++ii) {
+ i__5 = ii + windex * z_dim1;
+ z__[i__5].r = 0., z__[i__5].i = 0.;
+/* L123: */
+ }
+ }
+ i__4 = zto - zfrom + 1;
+ zdscal_(&i__4, &nrminv, &z__[zfrom + windex * z_dim1],
+ &c__1);
+L125:
+/* Update W */
+ w[windex] = lambda + sigma;
+/* Recompute the gaps on the left and right */
+/* But only allow them to become larger and not */
+/* smaller (which can only happen through "bad" */
+/* cancellation and doesn't reflect the theory */
+/* where the initial gaps are underestimated due */
+/* to WERR being too crude.) */
+ if (! eskip) {
+ if (k > 1) {
+/* Computing MAX */
+ d__1 = wgap[windmn], d__2 = w[windex] - werr[
+ windex] - w[windmn] - werr[windmn];
+ wgap[windmn] = max(d__1,d__2);
+ }
+ if (windex < wend) {
+/* Computing MAX */
+ d__1 = savgap, d__2 = w[windpl] - werr[windpl]
+ - w[windex] - werr[windex];
+ wgap[windex] = max(d__1,d__2);
+ }
+ }
+ ++idone;
+ }
+/* here ends the code for the current child */
+
+L139:
+/* Proceed to any remaining child nodes */
+ newfst = j + 1;
+L140:
+ ;
+ }
+/* L150: */
+ }
+ ++ndepth;
+ goto L40;
+ }
+ ibegin = iend + 1;
+ wbegin = wend + 1;
+L170:
+ ;
+ }
+
+ return 0;
+
+/* End of ZLARRV */
+
+} /* zlarrv_ */
diff --git a/contrib/libs/clapack/zlartg.c b/contrib/libs/clapack/zlartg.c
new file mode 100644
index 0000000000..5afdbb9d0b
--- /dev/null
+++ b/contrib/libs/clapack/zlartg.c
@@ -0,0 +1,285 @@
+/* zlartg.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlartg_(doublecomplex *f, doublecomplex *g, doublereal *
+ cs, doublecomplex *sn, doublecomplex *r__)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2, d__3, d__4, d__5, d__6, d__7, d__8, d__9, d__10;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ double log(doublereal), pow_di(doublereal *, integer *), d_imag(
+ doublecomplex *), sqrt(doublereal);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ doublereal d__;
+ integer i__;
+ doublereal f2, g2;
+ doublecomplex ff;
+ doublereal di, dr;
+ doublecomplex fs, gs;
+ doublereal f2s, g2s, eps, scale;
+ integer count;
+ doublereal safmn2;
+ extern doublereal dlapy2_(doublereal *, doublereal *);
+ doublereal safmx2;
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLARTG generates a plane rotation so that */
+
+/* [ CS SN ] [ F ] [ R ] */
+/* [ __ ] . [ ] = [ ] where CS**2 + |SN|**2 = 1. */
+/* [ -SN CS ] [ G ] [ 0 ] */
+
+/* This is a faster version of the BLAS1 routine ZROTG, except for */
+/* the following differences: */
+/* F and G are unchanged on return. */
+/* If G=0, then CS=1 and SN=0. */
+/* If F=0, then CS=0 and SN is chosen so that R is real. */
+
+/* Arguments */
+/* ========= */
+
+/* F (input) COMPLEX*16 */
+/* The first component of vector to be rotated. */
+
+/* G (input) COMPLEX*16 */
+/* The second component of vector to be rotated. */
+
+/* CS (output) DOUBLE PRECISION */
+/* The cosine of the rotation. */
+
+/* SN (output) COMPLEX*16 */
+/* The sine of the rotation. */
+
+/* R (output) COMPLEX*16 */
+/* The nonzero component of the rotated vector. */
+
+/* Further Details */
+/* ======= ======= */
+
+/* 3-5-96 - Modified with a new algorithm by W. Kahan and J. Demmel */
+
+/* This version has a few statements commented out for thread safety */
+/* (machine parameters are computed on each entry). 10 feb 03, SJH. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* LOGICAL FIRST */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Save statement .. */
+/* SAVE FIRST, SAFMX2, SAFMIN, SAFMN2 */
+/* .. */
+/* .. Data statements .. */
+/* DATA FIRST / .TRUE. / */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* IF( FIRST ) THEN */
+ safmin = dlamch_("S");
+ eps = dlamch_("E");
+ d__1 = dlamch_("B");
+ i__1 = (integer) (log(safmin / eps) / log(dlamch_("B")) / 2.);
+ safmn2 = pow_di(&d__1, &i__1);
+ safmx2 = 1. / safmn2;
+/* FIRST = .FALSE. */
+/* END IF */
+/* Computing MAX */
+/* Computing MAX */
+ d__7 = (d__1 = f->r, abs(d__1)), d__8 = (d__2 = d_imag(f), abs(d__2));
+/* Computing MAX */
+ d__9 = (d__3 = g->r, abs(d__3)), d__10 = (d__4 = d_imag(g), abs(d__4));
+ d__5 = max(d__7,d__8), d__6 = max(d__9,d__10);
+ scale = max(d__5,d__6);
+ fs.r = f->r, fs.i = f->i;
+ gs.r = g->r, gs.i = g->i;
+ count = 0;
+ if (scale >= safmx2) {
+L10:
+ ++count;
+ z__1.r = safmn2 * fs.r, z__1.i = safmn2 * fs.i;
+ fs.r = z__1.r, fs.i = z__1.i;
+ z__1.r = safmn2 * gs.r, z__1.i = safmn2 * gs.i;
+ gs.r = z__1.r, gs.i = z__1.i;
+ scale *= safmn2;
+ if (scale >= safmx2) {
+ goto L10;
+ }
+ } else if (scale <= safmn2) {
+ if (g->r == 0. && g->i == 0.) {
+ *cs = 1.;
+ sn->r = 0., sn->i = 0.;
+ r__->r = f->r, r__->i = f->i;
+ return 0;
+ }
+L20:
+ --count;
+ z__1.r = safmx2 * fs.r, z__1.i = safmx2 * fs.i;
+ fs.r = z__1.r, fs.i = z__1.i;
+ z__1.r = safmx2 * gs.r, z__1.i = safmx2 * gs.i;
+ gs.r = z__1.r, gs.i = z__1.i;
+ scale *= safmx2;
+ if (scale <= safmn2) {
+ goto L20;
+ }
+ }
+/* Computing 2nd power */
+ d__1 = fs.r;
+/* Computing 2nd power */
+ d__2 = d_imag(&fs);
+ f2 = d__1 * d__1 + d__2 * d__2;
+/* Computing 2nd power */
+ d__1 = gs.r;
+/* Computing 2nd power */
+ d__2 = d_imag(&gs);
+ g2 = d__1 * d__1 + d__2 * d__2;
+ if (f2 <= max(g2,1.) * safmin) {
+
+/* This is a rare case: F is very small. */
+
+ if (f->r == 0. && f->i == 0.) {
+ *cs = 0.;
+ d__2 = g->r;
+ d__3 = d_imag(g);
+ d__1 = dlapy2_(&d__2, &d__3);
+ r__->r = d__1, r__->i = 0.;
+/* Do complex/real division explicitly with two real divisions */
+ d__1 = gs.r;
+ d__2 = d_imag(&gs);
+ d__ = dlapy2_(&d__1, &d__2);
+ d__1 = gs.r / d__;
+ d__2 = -d_imag(&gs) / d__;
+ z__1.r = d__1, z__1.i = d__2;
+ sn->r = z__1.r, sn->i = z__1.i;
+ return 0;
+ }
+ d__1 = fs.r;
+ d__2 = d_imag(&fs);
+ f2s = dlapy2_(&d__1, &d__2);
+/* G2 and G2S are accurate */
+/* G2 is at least SAFMIN, and G2S is at least SAFMN2 */
+ g2s = sqrt(g2);
+/* Error in CS from underflow in F2S is at most */
+/* UNFL / SAFMN2 .lt. sqrt(UNFL*EPS) .lt. EPS */
+/* If MAX(G2,ONE)=G2, then F2 .lt. G2*SAFMIN, */
+/* and so CS .lt. sqrt(SAFMIN) */
+/* If MAX(G2,ONE)=ONE, then F2 .lt. SAFMIN */
+/* and so CS .lt. sqrt(SAFMIN)/SAFMN2 = sqrt(EPS) */
+/* Therefore, CS = F2S/G2S / sqrt( 1 + (F2S/G2S)**2 ) = F2S/G2S */
+ *cs = f2s / g2s;
+/* Make sure abs(FF) = 1 */
+/* Do complex/real division explicitly with 2 real divisions */
+/* Computing MAX */
+ d__3 = (d__1 = f->r, abs(d__1)), d__4 = (d__2 = d_imag(f), abs(d__2));
+ if (max(d__3,d__4) > 1.) {
+ d__1 = f->r;
+ d__2 = d_imag(f);
+ d__ = dlapy2_(&d__1, &d__2);
+ d__1 = f->r / d__;
+ d__2 = d_imag(f) / d__;
+ z__1.r = d__1, z__1.i = d__2;
+ ff.r = z__1.r, ff.i = z__1.i;
+ } else {
+ dr = safmx2 * f->r;
+ di = safmx2 * d_imag(f);
+ d__ = dlapy2_(&dr, &di);
+ d__1 = dr / d__;
+ d__2 = di / d__;
+ z__1.r = d__1, z__1.i = d__2;
+ ff.r = z__1.r, ff.i = z__1.i;
+ }
+ d__1 = gs.r / g2s;
+ d__2 = -d_imag(&gs) / g2s;
+ z__2.r = d__1, z__2.i = d__2;
+ z__1.r = ff.r * z__2.r - ff.i * z__2.i, z__1.i = ff.r * z__2.i + ff.i
+ * z__2.r;
+ sn->r = z__1.r, sn->i = z__1.i;
+ z__2.r = *cs * f->r, z__2.i = *cs * f->i;
+ z__3.r = sn->r * g->r - sn->i * g->i, z__3.i = sn->r * g->i + sn->i *
+ g->r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ r__->r = z__1.r, r__->i = z__1.i;
+ } else {
+
+/* This is the most common case. */
+/* Neither F2 nor F2/G2 are less than SAFMIN */
+/* F2S cannot overflow, and it is accurate */
+
+ f2s = sqrt(g2 / f2 + 1.);
+/* Do the F2S(real)*FS(complex) multiply with two real multiplies */
+ d__1 = f2s * fs.r;
+ d__2 = f2s * d_imag(&fs);
+ z__1.r = d__1, z__1.i = d__2;
+ r__->r = z__1.r, r__->i = z__1.i;
+ *cs = 1. / f2s;
+ d__ = f2 + g2;
+/* Do complex/real division explicitly with two real divisions */
+ d__1 = r__->r / d__;
+ d__2 = d_imag(r__) / d__;
+ z__1.r = d__1, z__1.i = d__2;
+ sn->r = z__1.r, sn->i = z__1.i;
+ d_cnjg(&z__2, &gs);
+ z__1.r = sn->r * z__2.r - sn->i * z__2.i, z__1.i = sn->r * z__2.i +
+ sn->i * z__2.r;
+ sn->r = z__1.r, sn->i = z__1.i;
+ if (count != 0) {
+ if (count > 0) {
+ i__1 = count;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ z__1.r = safmx2 * r__->r, z__1.i = safmx2 * r__->i;
+ r__->r = z__1.r, r__->i = z__1.i;
+/* L30: */
+ }
+ } else {
+ i__1 = -count;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ z__1.r = safmn2 * r__->r, z__1.i = safmn2 * r__->i;
+ r__->r = z__1.r, r__->i = z__1.i;
+/* L40: */
+ }
+ }
+ }
+ }
+ return 0;
+
+/* End of ZLARTG */
+
+} /* zlartg_ */
diff --git a/contrib/libs/clapack/zlartv.c b/contrib/libs/clapack/zlartv.c
new file mode 100644
index 0000000000..1cb3607fbc
--- /dev/null
+++ b/contrib/libs/clapack/zlartv.c
@@ -0,0 +1,126 @@
+/* zlartv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlartv_(integer *n, doublecomplex *x, integer *incx,
+ doublecomplex *y, integer *incy, doublereal *c__, doublecomplex *s,
+ integer *incc)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, ic, ix, iy;
+ doublecomplex xi, yi;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLARTV applies a vector of complex plane rotations with real cosines */
+/* to elements of the complex vectors x and y. For i = 1,2,...,n */
+
+/* ( x(i) ) := ( c(i) s(i) ) ( x(i) ) */
+/* ( y(i) ) ( -conjg(s(i)) c(i) ) ( y(i) ) */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of plane rotations to be applied. */
+
+/* X (input/output) COMPLEX*16 array, dimension (1+(N-1)*INCX) */
+/* The vector x. */
+
+/* INCX (input) INTEGER */
+/* The increment between elements of X. INCX > 0. */
+
+/* Y (input/output) COMPLEX*16 array, dimension (1+(N-1)*INCY) */
+/* The vector y. */
+
+/* INCY (input) INTEGER */
+/* The increment between elements of Y. INCY > 0. */
+
+/* C (input) DOUBLE PRECISION array, dimension (1+(N-1)*INCC) */
+/* The cosines of the plane rotations. */
+
+/* S (input) COMPLEX*16 array, dimension (1+(N-1)*INCC) */
+/* The sines of the plane rotations. */
+
+/* INCC (input) INTEGER */
+/* The increment between elements of C and S. INCC > 0. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --s;
+ --c__;
+ --y;
+ --x;
+
+ /* Function Body */
+ ix = 1;
+ iy = 1;
+ ic = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ xi.r = x[i__2].r, xi.i = x[i__2].i;
+ i__2 = iy;
+ yi.r = y[i__2].r, yi.i = y[i__2].i;
+ i__2 = ix;
+ i__3 = ic;
+ z__2.r = c__[i__3] * xi.r, z__2.i = c__[i__3] * xi.i;
+ i__4 = ic;
+ z__3.r = s[i__4].r * yi.r - s[i__4].i * yi.i, z__3.i = s[i__4].r *
+ yi.i + s[i__4].i * yi.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ i__2 = iy;
+ i__3 = ic;
+ z__2.r = c__[i__3] * yi.r, z__2.i = c__[i__3] * yi.i;
+ d_cnjg(&z__4, &s[ic]);
+ z__3.r = z__4.r * xi.r - z__4.i * xi.i, z__3.i = z__4.r * xi.i +
+ z__4.i * xi.r;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ ix += *incx;
+ iy += *incy;
+ ic += *incc;
+/* L10: */
+ }
+ return 0;
+
+/* End of ZLARTV */
+
+} /* zlartv_ */
diff --git a/contrib/libs/clapack/zlarz.c b/contrib/libs/clapack/zlarz.c
new file mode 100644
index 0000000000..78b8a562ad
--- /dev/null
+++ b/contrib/libs/clapack/zlarz.c
@@ -0,0 +1,200 @@
+/* zlarz.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zlarz_(char *side, integer *m, integer *n, integer *l,
+ doublecomplex *v, integer *incv, doublecomplex *tau, doublecomplex *
+ c__, integer *ldc, doublecomplex *work)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset;
+ doublecomplex z__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zgerc_(integer *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *),
+ zgeru_(integer *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *)
+ , zcopy_(integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *), zaxpy_(integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *, integer *), zlacgv_(integer *,
+ doublecomplex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLARZ applies a complex elementary reflector H to a complex */
+/* M-by-N matrix C, from either the left or the right. H is represented */
+/* in the form */
+
+/* H = I - tau * v * v' */
+
+/* where tau is a complex scalar and v is a complex vector. */
+
+/* If tau = 0, then H is taken to be the unit matrix. */
+
+/* To apply H' (the conjugate transpose of H), supply conjg(tau) instead */
+/* tau. */
+
+/* H is a product of k elementary reflectors as returned by ZTZRZF. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': form H * C */
+/* = 'R': form C * H */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. */
+
+/* L (input) INTEGER */
+/* The number of entries of the vector V containing */
+/* the meaningful part of the Householder vectors. */
+/* If SIDE = 'L', M >= L >= 0, if SIDE = 'R', N >= L >= 0. */
+
+/* V (input) COMPLEX*16 array, dimension (1+(L-1)*abs(INCV)) */
+/* The vector v in the representation of H as returned by */
+/* ZTZRZF. V is not used if TAU = 0. */
+
+/* INCV (input) INTEGER */
+/* The increment between elements of v. INCV <> 0. */
+
+/* TAU (input) COMPLEX*16 */
+/* The value tau in the representation of H. */
+
+/* C (input/output) COMPLEX*16 array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by the matrix H * C if SIDE = 'L', */
+/* or C * H if SIDE = 'R'. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension */
+/* (N) if SIDE = 'L' */
+/* or (M) if SIDE = 'R' */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --v;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ if (lsame_(side, "L")) {
+
+/* Form H * C */
+
+ if (tau->r != 0. || tau->i != 0.) {
+
+/* w( 1:n ) = conjg( C( 1, 1:n ) ) */
+
+ zcopy_(n, &c__[c_offset], ldc, &work[1], &c__1);
+ zlacgv_(n, &work[1], &c__1);
+
+/* w( 1:n ) = conjg( w( 1:n ) + C( m-l+1:m, 1:n )' * v( 1:l ) ) */
+
+ zgemv_("Conjugate transpose", l, n, &c_b1, &c__[*m - *l + 1 +
+ c_dim1], ldc, &v[1], incv, &c_b1, &work[1], &c__1);
+ zlacgv_(n, &work[1], &c__1);
+
+/* C( 1, 1:n ) = C( 1, 1:n ) - tau * w( 1:n ) */
+
+ z__1.r = -tau->r, z__1.i = -tau->i;
+ zaxpy_(n, &z__1, &work[1], &c__1, &c__[c_offset], ldc);
+
+/* C( m-l+1:m, 1:n ) = C( m-l+1:m, 1:n ) - ... */
+/* tau * v( 1:l ) * conjg( w( 1:n )' ) */
+
+ z__1.r = -tau->r, z__1.i = -tau->i;
+ zgeru_(l, n, &z__1, &v[1], incv, &work[1], &c__1, &c__[*m - *l +
+ 1 + c_dim1], ldc);
+ }
+
+ } else {
+
+/* Form C * H */
+
+ if (tau->r != 0. || tau->i != 0.) {
+
+/* w( 1:m ) = C( 1:m, 1 ) */
+
+ zcopy_(m, &c__[c_offset], &c__1, &work[1], &c__1);
+
+/* w( 1:m ) = w( 1:m ) + C( 1:m, n-l+1:n, 1:n ) * v( 1:l ) */
+
+ zgemv_("No transpose", m, l, &c_b1, &c__[(*n - *l + 1) * c_dim1 +
+ 1], ldc, &v[1], incv, &c_b1, &work[1], &c__1);
+
+/* C( 1:m, 1 ) = C( 1:m, 1 ) - tau * w( 1:m ) */
+
+ z__1.r = -tau->r, z__1.i = -tau->i;
+ zaxpy_(m, &z__1, &work[1], &c__1, &c__[c_offset], &c__1);
+
+/* C( 1:m, n-l+1:n ) = C( 1:m, n-l+1:n ) - ... */
+/* tau * w( 1:m ) * v( 1:l )' */
+
+ z__1.r = -tau->r, z__1.i = -tau->i;
+ zgerc_(m, l, &z__1, &work[1], &c__1, &v[1], incv, &c__[(*n - *l +
+ 1) * c_dim1 + 1], ldc);
+
+ }
+
+ }
+
+ return 0;
+
+/* End of ZLARZ */
+
+} /* zlarz_ */
diff --git a/contrib/libs/clapack/zlarzb.c b/contrib/libs/clapack/zlarzb.c
new file mode 100644
index 0000000000..624b6713b3
--- /dev/null
+++ b/contrib/libs/clapack/zlarzb.c
@@ -0,0 +1,323 @@
+/* zlarzb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zlarzb_(char *side, char *trans, char *direct, char *
+ storev, integer *m, integer *n, integer *k, integer *l, doublecomplex
+ *v, integer *ldv, doublecomplex *t, integer *ldt, doublecomplex *c__,
+ integer *ldc, doublecomplex *work, integer *ldwork)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, t_dim1, t_offset, v_dim1, v_offset, work_dim1,
+ work_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j, info;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), zcopy_(integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *), ztrmm_(char *, char *,
+ char *, char *, integer *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), xerbla_(char *, integer *),
+ zlacgv_(integer *, doublecomplex *, integer *);
+ char transt[1];
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLARZB applies a complex block reflector H or its transpose H**H */
+/* to a complex distributed M-by-N C from the left or the right. */
+
+/* Currently, only STOREV = 'R' and DIRECT = 'B' are supported. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply H or H' from the Left */
+/* = 'R': apply H or H' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply H (No transpose) */
+/* = 'C': apply H' (Conjugate transpose) */
+
+/* DIRECT (input) CHARACTER*1 */
+/* Indicates how H is formed from a product of elementary */
+/* reflectors */
+/* = 'F': H = H(1) H(2) . . . H(k) (Forward, not supported yet) */
+/* = 'B': H = H(k) . . . H(2) H(1) (Backward) */
+
+/* STOREV (input) CHARACTER*1 */
+/* Indicates how the vectors which define the elementary */
+/* reflectors are stored: */
+/* = 'C': Columnwise (not supported yet) */
+/* = 'R': Rowwise */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. */
+
+/* K (input) INTEGER */
+/* The order of the matrix T (= the number of elementary */
+/* reflectors whose product defines the block reflector). */
+
+/* L (input) INTEGER */
+/* The number of columns of the matrix V containing the */
+/* meaningful part of the Householder reflectors. */
+/* If SIDE = 'L', M >= L >= 0, if SIDE = 'R', N >= L >= 0. */
+
+/* V (input) COMPLEX*16 array, dimension (LDV,NV). */
+/* If STOREV = 'C', NV = K; if STOREV = 'R', NV = L. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. */
+/* If STOREV = 'C', LDV >= L; if STOREV = 'R', LDV >= K. */
+
+/* T (input) COMPLEX*16 array, dimension (LDT,K) */
+/* The triangular K-by-K matrix T in the representation of the */
+/* block reflector. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= K. */
+
+/* C (input/output) COMPLEX*16 array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by H*C or H'*C or C*H or C*H'. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (LDWORK,K) */
+
+/* LDWORK (input) INTEGER */
+/* The leading dimension of the array WORK. */
+/* If SIDE = 'L', LDWORK >= max(1,N); */
+/* if SIDE = 'R', LDWORK >= max(1,M). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ work_dim1 = *ldwork;
+ work_offset = 1 + work_dim1;
+ work -= work_offset;
+
+ /* Function Body */
+ if (*m <= 0 || *n <= 0) {
+ return 0;
+ }
+
+/* Check for currently supported options */
+
+ info = 0;
+ if (! lsame_(direct, "B")) {
+ info = -3;
+ } else if (! lsame_(storev, "R")) {
+ info = -4;
+ }
+ if (info != 0) {
+ i__1 = -info;
+ xerbla_("ZLARZB", &i__1);
+ return 0;
+ }
+
+ if (lsame_(trans, "N")) {
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ if (lsame_(side, "L")) {
+
+/* Form H * C or H' * C */
+
+/* W( 1:n, 1:k ) = conjg( C( 1:k, 1:n )' ) */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ zcopy_(n, &c__[j + c_dim1], ldc, &work[j * work_dim1 + 1], &c__1);
+/* L10: */
+ }
+
+/* W( 1:n, 1:k ) = W( 1:n, 1:k ) + ... */
+/* conjg( C( m-l+1:m, 1:n )' ) * V( 1:k, 1:l )' */
+
+ if (*l > 0) {
+ zgemm_("Transpose", "Conjugate transpose", n, k, l, &c_b1, &c__[*
+ m - *l + 1 + c_dim1], ldc, &v[v_offset], ldv, &c_b1, &
+ work[work_offset], ldwork);
+ }
+
+/* W( 1:n, 1:k ) = W( 1:n, 1:k ) * T' or W( 1:m, 1:k ) * T */
+
+ ztrmm_("Right", "Lower", transt, "Non-unit", n, k, &c_b1, &t[t_offset]
+, ldt, &work[work_offset], ldwork);
+
+/* C( 1:k, 1:n ) = C( 1:k, 1:n ) - conjg( W( 1:n, 1:k )' ) */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *k;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ i__5 = j + i__ * work_dim1;
+ z__1.r = c__[i__4].r - work[i__5].r, z__1.i = c__[i__4].i -
+ work[i__5].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L20: */
+ }
+/* L30: */
+ }
+
+/* C( m-l+1:m, 1:n ) = C( m-l+1:m, 1:n ) - ... */
+/* conjg( V( 1:k, 1:l )' ) * conjg( W( 1:n, 1:k )' ) */
+
+ if (*l > 0) {
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("Transpose", "Transpose", l, n, k, &z__1, &v[v_offset],
+ ldv, &work[work_offset], ldwork, &c_b1, &c__[*m - *l + 1
+ + c_dim1], ldc);
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/* Form C * H or C * H' */
+
+/* W( 1:m, 1:k ) = C( 1:m, 1:k ) */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ zcopy_(m, &c__[j * c_dim1 + 1], &c__1, &work[j * work_dim1 + 1], &
+ c__1);
+/* L40: */
+ }
+
+/* W( 1:m, 1:k ) = W( 1:m, 1:k ) + ... */
+/* C( 1:m, n-l+1:n ) * conjg( V( 1:k, 1:l )' ) */
+
+ if (*l > 0) {
+ zgemm_("No transpose", "Transpose", m, k, l, &c_b1, &c__[(*n - *l
+ + 1) * c_dim1 + 1], ldc, &v[v_offset], ldv, &c_b1, &work[
+ work_offset], ldwork);
+ }
+
+/* W( 1:m, 1:k ) = W( 1:m, 1:k ) * conjg( T ) or */
+/* W( 1:m, 1:k ) * conjg( T' ) */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *k - j + 1;
+ zlacgv_(&i__2, &t[j + j * t_dim1], &c__1);
+/* L50: */
+ }
+ ztrmm_("Right", "Lower", trans, "Non-unit", m, k, &c_b1, &t[t_offset],
+ ldt, &work[work_offset], ldwork);
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *k - j + 1;
+ zlacgv_(&i__2, &t[j + j * t_dim1], &c__1);
+/* L60: */
+ }
+
+/* C( 1:m, 1:k ) = C( 1:m, 1:k ) - W( 1:m, 1:k ) */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * work_dim1;
+ z__1.r = c__[i__4].r - work[i__5].r, z__1.i = c__[i__4].i -
+ work[i__5].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+
+/* C( 1:m, n-l+1:n ) = C( 1:m, n-l+1:n ) - ... */
+/* W( 1:m, 1:k ) * conjg( V( 1:k, 1:l ) ) */
+
+ i__1 = *l;
+ for (j = 1; j <= i__1; ++j) {
+ zlacgv_(k, &v[j * v_dim1 + 1], &c__1);
+/* L90: */
+ }
+ if (*l > 0) {
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "No transpose", m, l, k, &z__1, &work[
+ work_offset], ldwork, &v[v_offset], ldv, &c_b1, &c__[(*n
+ - *l + 1) * c_dim1 + 1], ldc);
+ }
+ i__1 = *l;
+ for (j = 1; j <= i__1; ++j) {
+ zlacgv_(k, &v[j * v_dim1 + 1], &c__1);
+/* L100: */
+ }
+
+ }
+
+ return 0;
+
+/* End of ZLARZB */
+
+} /* zlarzb_ */
diff --git a/contrib/libs/clapack/zlarzt.c b/contrib/libs/clapack/zlarzt.c
new file mode 100644
index 0000000000..f9e6a4715e
--- /dev/null
+++ b/contrib/libs/clapack/zlarzt.c
@@ -0,0 +1,238 @@
+/* zlarzt.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zlarzt_(char *direct, char *storev, integer *n, integer *
+ k, doublecomplex *v, integer *ldv, doublecomplex *tau, doublecomplex *
+ t, integer *ldt)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, v_dim1, v_offset, i__1, i__2;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j, info;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *),
+ ztrmv_(char *, char *, char *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *),
+ xerbla_(char *, integer *), zlacgv_(integer *,
+ doublecomplex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLARZT forms the triangular factor T of a complex block reflector */
+/* H of order > n, which is defined as a product of k elementary */
+/* reflectors. */
+
+/* If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular; */
+
+/* If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular. */
+
+/* If STOREV = 'C', the vector which defines the elementary reflector */
+/* H(i) is stored in the i-th column of the array V, and */
+
+/* H = I - V * T * V' */
+
+/* If STOREV = 'R', the vector which defines the elementary reflector */
+/* H(i) is stored in the i-th row of the array V, and */
+
+/* H = I - V' * T * V */
+
+/* Currently, only STOREV = 'R' and DIRECT = 'B' are supported. */
+
+/* Arguments */
+/* ========= */
+
+/* DIRECT (input) CHARACTER*1 */
+/* Specifies the order in which the elementary reflectors are */
+/* multiplied to form the block reflector: */
+/* = 'F': H = H(1) H(2) . . . H(k) (Forward, not supported yet) */
+/* = 'B': H = H(k) . . . H(2) H(1) (Backward) */
+
+/* STOREV (input) CHARACTER*1 */
+/* Specifies how the vectors which define the elementary */
+/* reflectors are stored (see also Further Details): */
+/* = 'C': columnwise (not supported yet) */
+/* = 'R': rowwise */
+
+/* N (input) INTEGER */
+/* The order of the block reflector H. N >= 0. */
+
+/* K (input) INTEGER */
+/* The order of the triangular factor T (= the number of */
+/* elementary reflectors). K >= 1. */
+
+/* V (input/output) COMPLEX*16 array, dimension */
+/* (LDV,K) if STOREV = 'C' */
+/* (LDV,N) if STOREV = 'R' */
+/* The matrix V. See further details. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. */
+/* If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K. */
+
+/* TAU (input) COMPLEX*16 array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i). */
+
+/* T (output) COMPLEX*16 array, dimension (LDT,K) */
+/* The k by k triangular factor T of the block reflector. */
+/* If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is */
+/* lower triangular. The rest of the array is not used. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= K. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* The shape of the matrix V and the storage of the vectors which define */
+/* the H(i) is best illustrated by the following example with n = 5 and */
+/* k = 3. The elements equal to 1 are not stored; the corresponding */
+/* array elements are modified but restored on exit. The rest of the */
+/* array is not used. */
+
+/* DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R': */
+
+/* ______V_____ */
+/* ( v1 v2 v3 ) / \ */
+/* ( v1 v2 v3 ) ( v1 v1 v1 v1 v1 . . . . 1 ) */
+/* V = ( v1 v2 v3 ) ( v2 v2 v2 v2 v2 . . . 1 ) */
+/* ( v1 v2 v3 ) ( v3 v3 v3 v3 v3 . . 1 ) */
+/* ( v1 v2 v3 ) */
+/* . . . */
+/* . . . */
+/* 1 . . */
+/* 1 . */
+/* 1 */
+
+/* DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R': */
+
+/* ______V_____ */
+/* 1 / \ */
+/* . 1 ( 1 . . . . v1 v1 v1 v1 v1 ) */
+/* . . 1 ( . 1 . . . v2 v2 v2 v2 v2 ) */
+/* . . . ( . . 1 . . v3 v3 v3 v3 v3 ) */
+/* . . . */
+/* ( v1 v2 v3 ) */
+/* ( v1 v2 v3 ) */
+/* V = ( v1 v2 v3 ) */
+/* ( v1 v2 v3 ) */
+/* ( v1 v2 v3 ) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Check for currently supported options */
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ --tau;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(direct, "B")) {
+ info = -1;
+ } else if (! lsame_(storev, "R")) {
+ info = -2;
+ }
+ if (info != 0) {
+ i__1 = -info;
+ xerbla_("ZLARZT", &i__1);
+ return 0;
+ }
+
+ for (i__ = *k; i__ >= 1; --i__) {
+ i__1 = i__;
+ if (tau[i__1].r == 0. && tau[i__1].i == 0.) {
+
+/* H(i) = I */
+
+ i__1 = *k;
+ for (j = i__; j <= i__1; ++j) {
+ i__2 = j + i__ * t_dim1;
+ t[i__2].r = 0., t[i__2].i = 0.;
+/* L10: */
+ }
+ } else {
+
+/* general case */
+
+ if (i__ < *k) {
+
+/* T(i+1:k,i) = - tau(i) * V(i+1:k,1:n) * V(i,1:n)' */
+
+ zlacgv_(n, &v[i__ + v_dim1], ldv);
+ i__1 = *k - i__;
+ i__2 = i__;
+ z__1.r = -tau[i__2].r, z__1.i = -tau[i__2].i;
+ zgemv_("No transpose", &i__1, n, &z__1, &v[i__ + 1 + v_dim1],
+ ldv, &v[i__ + v_dim1], ldv, &c_b1, &t[i__ + 1 + i__ *
+ t_dim1], &c__1);
+ zlacgv_(n, &v[i__ + v_dim1], ldv);
+
+/* T(i+1:k,i) = T(i+1:k,i+1:k) * T(i+1:k,i) */
+
+ i__1 = *k - i__;
+ ztrmv_("Lower", "No transpose", "Non-unit", &i__1, &t[i__ + 1
+ + (i__ + 1) * t_dim1], ldt, &t[i__ + 1 + i__ * t_dim1]
+, &c__1);
+ }
+ i__1 = i__ + i__ * t_dim1;
+ i__2 = i__;
+ t[i__1].r = tau[i__2].r, t[i__1].i = tau[i__2].i;
+ }
+/* L20: */
+ }
+ return 0;
+
+/* End of ZLARZT */
+
+} /* zlarzt_ */
diff --git a/contrib/libs/clapack/zlascl.c b/contrib/libs/clapack/zlascl.c
new file mode 100644
index 0000000000..f0023c07b9
--- /dev/null
+++ b/contrib/libs/clapack/zlascl.c
@@ -0,0 +1,376 @@
+/* zlascl.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlascl_(char *type__, integer *kl, integer *ku,
+ doublereal *cfrom, doublereal *cto, integer *m, integer *n,
+ doublecomplex *a, integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j, k1, k2, k3, k4;
+ doublereal mul, cto1;
+ logical done;
+ doublereal ctoc;
+ extern logical lsame_(char *, char *);
+ integer itype;
+ doublereal cfrom1;
+ extern doublereal dlamch_(char *);
+ doublereal cfromc;
+ extern logical disnan_(doublereal *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum, smlnum;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLASCL multiplies the M by N complex matrix A by the real scalar */
+/* CTO/CFROM. This is done without over/underflow as long as the final */
+/* result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that */
+/* A may be full, upper triangular, lower triangular, upper Hessenberg, */
+/* or banded. */
+
+/* Arguments */
+/* ========= */
+
+/* TYPE (input) CHARACTER*1 */
+/* TYPE indices the storage type of the input matrix. */
+/* = 'G': A is a full matrix. */
+/* = 'L': A is a lower triangular matrix. */
+/* = 'U': A is an upper triangular matrix. */
+/* = 'H': A is an upper Hessenberg matrix. */
+/* = 'B': A is a symmetric band matrix with lower bandwidth KL */
+/* and upper bandwidth KU and with the only the lower */
+/* half stored. */
+/* = 'Q': A is a symmetric band matrix with lower bandwidth KL */
+/* and upper bandwidth KU and with the only the upper */
+/* half stored. */
+/* = 'Z': A is a band matrix with lower bandwidth KL and upper */
+/* bandwidth KU. */
+
+/* KL (input) INTEGER */
+/* The lower bandwidth of A. Referenced only if TYPE = 'B', */
+/* 'Q' or 'Z'. */
+
+/* KU (input) INTEGER */
+/* The upper bandwidth of A. Referenced only if TYPE = 'B', */
+/* 'Q' or 'Z'. */
+
+/* CFROM (input) DOUBLE PRECISION */
+/* CTO (input) DOUBLE PRECISION */
+/* The matrix A is multiplied by CTO/CFROM. A(I,J) is computed */
+/* without over/underflow if the final result CTO*A(I,J)/CFROM */
+/* can be represented without over/underflow. CFROM must be */
+/* nonzero. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* The matrix to be multiplied by CTO/CFROM. See TYPE for the */
+/* storage type. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* INFO (output) INTEGER */
+/* 0 - successful exit */
+/* <0 - if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+
+ if (lsame_(type__, "G")) {
+ itype = 0;
+ } else if (lsame_(type__, "L")) {
+ itype = 1;
+ } else if (lsame_(type__, "U")) {
+ itype = 2;
+ } else if (lsame_(type__, "H")) {
+ itype = 3;
+ } else if (lsame_(type__, "B")) {
+ itype = 4;
+ } else if (lsame_(type__, "Q")) {
+ itype = 5;
+ } else if (lsame_(type__, "Z")) {
+ itype = 6;
+ } else {
+ itype = -1;
+ }
+
+ if (itype == -1) {
+ *info = -1;
+ } else if (*cfrom == 0. || disnan_(cfrom)) {
+ *info = -4;
+ } else if (disnan_(cto)) {
+ *info = -5;
+ } else if (*m < 0) {
+ *info = -6;
+ } else if (*n < 0 || itype == 4 && *n != *m || itype == 5 && *n != *m) {
+ *info = -7;
+ } else if (itype <= 3 && *lda < max(1,*m)) {
+ *info = -9;
+ } else if (itype >= 4) {
+/* Computing MAX */
+ i__1 = *m - 1;
+ if (*kl < 0 || *kl > max(i__1,0)) {
+ *info = -2;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = *n - 1;
+ if (*ku < 0 || *ku > max(i__1,0) || (itype == 4 || itype == 5) &&
+ *kl != *ku) {
+ *info = -3;
+ } else if (itype == 4 && *lda < *kl + 1 || itype == 5 && *lda < *
+ ku + 1 || itype == 6 && *lda < (*kl << 1) + *ku + 1) {
+ *info = -9;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZLASCL", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *m == 0) {
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+
+ cfromc = *cfrom;
+ ctoc = *cto;
+
+L10:
+ cfrom1 = cfromc * smlnum;
+ if (cfrom1 == cfromc) {
+/* CFROMC is an inf. Multiply by a correctly signed zero for */
+/* finite CTOC, or a NaN if CTOC is infinite. */
+ mul = ctoc / cfromc;
+ done = TRUE_;
+ cto1 = ctoc;
+ } else {
+ cto1 = ctoc / bignum;
+ if (cto1 == ctoc) {
+/* CTOC is either 0 or an inf. In both cases, CTOC itself */
+/* serves as the correct multiplication factor. */
+ mul = ctoc;
+ done = TRUE_;
+ cfromc = 1.;
+ } else if (abs(cfrom1) > abs(ctoc) && ctoc != 0.) {
+ mul = smlnum;
+ done = FALSE_;
+ cfromc = cfrom1;
+ } else if (abs(cto1) > abs(cfromc)) {
+ mul = bignum;
+ done = FALSE_;
+ ctoc = cto1;
+ } else {
+ mul = ctoc / cfromc;
+ done = TRUE_;
+ }
+ }
+
+ if (itype == 0) {
+
+/* Full matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ z__1.r = mul * a[i__4].r, z__1.i = mul * a[i__4].i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L20: */
+ }
+/* L30: */
+ }
+
+ } else if (itype == 1) {
+
+/* Lower triangular matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ z__1.r = mul * a[i__4].r, z__1.i = mul * a[i__4].i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L40: */
+ }
+/* L50: */
+ }
+
+ } else if (itype == 2) {
+
+/* Upper triangular matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(j,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ z__1.r = mul * a[i__4].r, z__1.i = mul * a[i__4].i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L60: */
+ }
+/* L70: */
+ }
+
+ } else if (itype == 3) {
+
+/* Upper Hessenberg matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = j + 1;
+ i__2 = min(i__3,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ z__1.r = mul * a[i__4].r, z__1.i = mul * a[i__4].i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L80: */
+ }
+/* L90: */
+ }
+
+ } else if (itype == 4) {
+
+/* Lower half of a symmetric band matrix */
+
+ k3 = *kl + 1;
+ k4 = *n + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = k3, i__4 = k4 - j;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ z__1.r = mul * a[i__4].r, z__1.i = mul * a[i__4].i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L100: */
+ }
+/* L110: */
+ }
+
+ } else if (itype == 5) {
+
+/* Upper half of a symmetric band matrix */
+
+ k1 = *ku + 2;
+ k3 = *ku + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = k1 - j;
+ i__3 = k3;
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+ i__2 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ z__1.r = mul * a[i__4].r, z__1.i = mul * a[i__4].i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+/* L120: */
+ }
+/* L130: */
+ }
+
+ } else if (itype == 6) {
+
+/* Band matrix */
+
+ k1 = *kl + *ku + 2;
+ k2 = *kl + 1;
+ k3 = (*kl << 1) + *ku + 1;
+ k4 = *kl + *ku + 1 + *m;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__3 = k1 - j;
+/* Computing MIN */
+ i__4 = k3, i__5 = k4 - j;
+ i__2 = min(i__4,i__5);
+ for (i__ = max(i__3,k2); i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ z__1.r = mul * a[i__4].r, z__1.i = mul * a[i__4].i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L140: */
+ }
+/* L150: */
+ }
+
+ }
+
+ if (! done) {
+ goto L10;
+ }
+
+ return 0;
+
+/* End of ZLASCL */
+
+} /* zlascl_ */
diff --git a/contrib/libs/clapack/zlaset.c b/contrib/libs/clapack/zlaset.c
new file mode 100644
index 0000000000..6e07daf648
--- /dev/null
+++ b/contrib/libs/clapack/zlaset.c
@@ -0,0 +1,163 @@
+/* zlaset.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlaset_(char *uplo, integer *m, integer *n,
+ doublecomplex *alpha, doublecomplex *beta, doublecomplex *a, integer *
+ lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLASET initializes a 2-D array A to BETA on the diagonal and */
+/* ALPHA on the offdiagonals. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies the part of the matrix A to be set. */
+/* = 'U': Upper triangular part is set. The lower triangle */
+/* is unchanged. */
+/* = 'L': Lower triangular part is set. The upper triangle */
+/* is unchanged. */
+/* Otherwise: All of the matrix A is set. */
+
+/* M (input) INTEGER */
+/* On entry, M specifies the number of rows of A. */
+
+/* N (input) INTEGER */
+/* On entry, N specifies the number of columns of A. */
+
+/* ALPHA (input) COMPLEX*16 */
+/* All the offdiagonal array elements are set to ALPHA. */
+
+/* BETA (input) COMPLEX*16 */
+/* All the diagonal array elements are set to BETA. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the m by n matrix A. */
+/* On exit, A(i,j) = ALPHA, 1 <= i <= m, 1 <= j <= n, i.ne.j; */
+/* A(i,i) = BETA , 1 <= i <= min(m,n) */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ if (lsame_(uplo, "U")) {
+
+/* Set the diagonal to BETA and the strictly upper triangular */
+/* part of the array to ALPHA. */
+
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = j - 1;
+ i__2 = min(i__3,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = alpha->r, a[i__3].i = alpha->i;
+/* L10: */
+ }
+/* L20: */
+ }
+ i__1 = min(*n,*m);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = beta->r, a[i__2].i = beta->i;
+/* L30: */
+ }
+
+ } else if (lsame_(uplo, "L")) {
+
+/* Set the diagonal to BETA and the strictly lower triangular */
+/* part of the array to ALPHA. */
+
+ i__1 = min(*m,*n);
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = alpha->r, a[i__3].i = alpha->i;
+/* L40: */
+ }
+/* L50: */
+ }
+ i__1 = min(*n,*m);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = beta->r, a[i__2].i = beta->i;
+/* L60: */
+ }
+
+ } else {
+
+/* Set the array to BETA on the diagonal and ALPHA on the */
+/* offdiagonal. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = alpha->r, a[i__3].i = alpha->i;
+/* L70: */
+ }
+/* L80: */
+ }
+ i__1 = min(*m,*n);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = beta->r, a[i__2].i = beta->i;
+/* L90: */
+ }
+ }
+
+ return 0;
+
+/* End of ZLASET */
+
+} /* zlaset_ */
diff --git a/contrib/libs/clapack/zlasr.c b/contrib/libs/clapack/zlasr.c
new file mode 100644
index 0000000000..5fa4701d45
--- /dev/null
+++ b/contrib/libs/clapack/zlasr.c
@@ -0,0 +1,610 @@
+/* zlasr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlasr_(char *side, char *pivot, char *direct, integer *m,
+ integer *n, doublereal *c__, doublereal *s, doublecomplex *a,
+ integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Local variables */
+ integer i__, j, info;
+ doublecomplex temp;
+ extern logical lsame_(char *, char *);
+ doublereal ctemp, stemp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLASR applies a sequence of real plane rotations to a complex matrix */
+/* A, from either the left or the right. */
+
+/* When SIDE = 'L', the transformation takes the form */
+
+/* A := P*A */
+
+/* and when SIDE = 'R', the transformation takes the form */
+
+/* A := A*P**T */
+
+/* where P is an orthogonal matrix consisting of a sequence of z plane */
+/* rotations, with z = M when SIDE = 'L' and z = N when SIDE = 'R', */
+/* and P**T is the transpose of P. */
+
+/* When DIRECT = 'F' (Forward sequence), then */
+
+/* P = P(z-1) * ... * P(2) * P(1) */
+
+/* and when DIRECT = 'B' (Backward sequence), then */
+
+/* P = P(1) * P(2) * ... * P(z-1) */
+
+/* where P(k) is a plane rotation matrix defined by the 2-by-2 rotation */
+
+/* R(k) = ( c(k) s(k) ) */
+/* = ( -s(k) c(k) ). */
+
+/* When PIVOT = 'V' (Variable pivot), the rotation is performed */
+/* for the plane (k,k+1), i.e., P(k) has the form */
+
+/* P(k) = ( 1 ) */
+/* ( ... ) */
+/* ( 1 ) */
+/* ( c(k) s(k) ) */
+/* ( -s(k) c(k) ) */
+/* ( 1 ) */
+/* ( ... ) */
+/* ( 1 ) */
+
+/* where R(k) appears as a rank-2 modification to the identity matrix in */
+/* rows and columns k and k+1. */
+
+/* When PIVOT = 'T' (Top pivot), the rotation is performed for the */
+/* plane (1,k+1), so P(k) has the form */
+
+/* P(k) = ( c(k) s(k) ) */
+/* ( 1 ) */
+/* ( ... ) */
+/* ( 1 ) */
+/* ( -s(k) c(k) ) */
+/* ( 1 ) */
+/* ( ... ) */
+/* ( 1 ) */
+
+/* where R(k) appears in rows and columns 1 and k+1. */
+
+/* Similarly, when PIVOT = 'B' (Bottom pivot), the rotation is */
+/* performed for the plane (k,z), giving P(k) the form */
+
+/* P(k) = ( 1 ) */
+/* ( ... ) */
+/* ( 1 ) */
+/* ( c(k) s(k) ) */
+/* ( 1 ) */
+/* ( ... ) */
+/* ( 1 ) */
+/* ( -s(k) c(k) ) */
+
+/* where R(k) appears in rows and columns k and z. The rotations are */
+/* performed without ever forming P(k) explicitly. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* Specifies whether the plane rotation matrix P is applied to */
+/* A on the left or the right. */
+/* = 'L': Left, compute A := P*A */
+/* = 'R': Right, compute A:= A*P**T */
+
+/* PIVOT (input) CHARACTER*1 */
+/* Specifies the plane for which P(k) is a plane rotation */
+/* matrix. */
+/* = 'V': Variable pivot, the plane (k,k+1) */
+/* = 'T': Top pivot, the plane (1,k+1) */
+/* = 'B': Bottom pivot, the plane (k,z) */
+
+/* DIRECT (input) CHARACTER*1 */
+/* Specifies whether P is a forward or backward sequence of */
+/* plane rotations. */
+/* = 'F': Forward, P = P(z-1)*...*P(2)*P(1) */
+/* = 'B': Backward, P = P(1)*P(2)*...*P(z-1) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. If m <= 1, an immediate */
+/* return is effected. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. If n <= 1, an */
+/* immediate return is effected. */
+
+/* C (input) DOUBLE PRECISION array, dimension */
+/* (M-1) if SIDE = 'L' */
+/* (N-1) if SIDE = 'R' */
+/* The cosines c(k) of the plane rotations. */
+
+/* S (input) DOUBLE PRECISION array, dimension */
+/* (M-1) if SIDE = 'L' */
+/* (N-1) if SIDE = 'R' */
+/* The sines s(k) of the plane rotations. The 2-by-2 plane */
+/* rotation part of the matrix P(k), R(k), has the form */
+/* R(k) = ( c(k) s(k) ) */
+/* ( -s(k) c(k) ). */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* The M-by-N matrix A. On exit, A is overwritten by P*A if */
+/* SIDE = 'R' or by A*P**T if SIDE = 'L'. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ --c__;
+ --s;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! (lsame_(side, "L") || lsame_(side, "R"))) {
+ info = 1;
+ } else if (! (lsame_(pivot, "V") || lsame_(pivot,
+ "T") || lsame_(pivot, "B"))) {
+ info = 2;
+ } else if (! (lsame_(direct, "F") || lsame_(direct,
+ "B"))) {
+ info = 3;
+ } else if (*m < 0) {
+ info = 4;
+ } else if (*n < 0) {
+ info = 5;
+ } else if (*lda < max(1,*m)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("ZLASR ", &info);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+ if (lsame_(side, "L")) {
+
+/* Form P * A */
+
+ if (lsame_(pivot, "V")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = j + 1 + i__ * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ i__3 = j + 1 + i__ * a_dim1;
+ z__2.r = ctemp * temp.r, z__2.i = ctemp * temp.i;
+ i__4 = j + i__ * a_dim1;
+ z__3.r = stemp * a[i__4].r, z__3.i = stemp * a[
+ i__4].i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
+ z__3.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ i__3 = j + i__ * a_dim1;
+ z__2.r = stemp * temp.r, z__2.i = stemp * temp.i;
+ i__4 = j + i__ * a_dim1;
+ z__3.r = ctemp * a[i__4].r, z__3.i = ctemp * a[
+ i__4].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = j + 1 + i__ * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ i__2 = j + 1 + i__ * a_dim1;
+ z__2.r = ctemp * temp.r, z__2.i = ctemp * temp.i;
+ i__3 = j + i__ * a_dim1;
+ z__3.r = stemp * a[i__3].r, z__3.i = stemp * a[
+ i__3].i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
+ z__3.i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+ i__2 = j + i__ * a_dim1;
+ z__2.r = stemp * temp.r, z__2.i = stemp * temp.i;
+ i__3 = j + i__ * a_dim1;
+ z__3.r = ctemp * a[i__3].r, z__3.i = ctemp * a[
+ i__3].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+/* L30: */
+ }
+ }
+/* L40: */
+ }
+ }
+ } else if (lsame_(pivot, "T")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m;
+ for (j = 2; j <= i__1; ++j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if (ctemp != 1. || stemp != 0.) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ i__3 = j + i__ * a_dim1;
+ z__2.r = ctemp * temp.r, z__2.i = ctemp * temp.i;
+ i__4 = i__ * a_dim1 + 1;
+ z__3.r = stemp * a[i__4].r, z__3.i = stemp * a[
+ i__4].i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
+ z__3.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ i__3 = i__ * a_dim1 + 1;
+ z__2.r = stemp * temp.r, z__2.i = stemp * temp.i;
+ i__4 = i__ * a_dim1 + 1;
+ z__3.r = ctemp * a[i__4].r, z__3.i = ctemp * a[
+ i__4].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m; j >= 2; --j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if (ctemp != 1. || stemp != 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = j + i__ * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ i__2 = j + i__ * a_dim1;
+ z__2.r = ctemp * temp.r, z__2.i = ctemp * temp.i;
+ i__3 = i__ * a_dim1 + 1;
+ z__3.r = stemp * a[i__3].r, z__3.i = stemp * a[
+ i__3].i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
+ z__3.i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+ i__2 = i__ * a_dim1 + 1;
+ z__2.r = stemp * temp.r, z__2.i = stemp * temp.i;
+ i__3 = i__ * a_dim1 + 1;
+ z__3.r = ctemp * a[i__3].r, z__3.i = ctemp * a[
+ i__3].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+/* L70: */
+ }
+ }
+/* L80: */
+ }
+ }
+ } else if (lsame_(pivot, "B")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ i__3 = j + i__ * a_dim1;
+ i__4 = *m + i__ * a_dim1;
+ z__2.r = stemp * a[i__4].r, z__2.i = stemp * a[
+ i__4].i;
+ z__3.r = ctemp * temp.r, z__3.i = ctemp * temp.i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ i__3 = *m + i__ * a_dim1;
+ i__4 = *m + i__ * a_dim1;
+ z__2.r = ctemp * a[i__4].r, z__2.i = ctemp * a[
+ i__4].i;
+ z__3.r = stemp * temp.r, z__3.i = stemp * temp.i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
+ z__3.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L90: */
+ }
+ }
+/* L100: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = j + i__ * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ i__2 = j + i__ * a_dim1;
+ i__3 = *m + i__ * a_dim1;
+ z__2.r = stemp * a[i__3].r, z__2.i = stemp * a[
+ i__3].i;
+ z__3.r = ctemp * temp.r, z__3.i = ctemp * temp.i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+ i__2 = *m + i__ * a_dim1;
+ i__3 = *m + i__ * a_dim1;
+ z__2.r = ctemp * a[i__3].r, z__2.i = ctemp * a[
+ i__3].i;
+ z__3.r = stemp * temp.r, z__3.i = stemp * temp.i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
+ z__3.i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+ }
+ }
+ } else if (lsame_(side, "R")) {
+
+/* Form A * P' */
+
+ if (lsame_(pivot, "V")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + (j + 1) * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ i__3 = i__ + (j + 1) * a_dim1;
+ z__2.r = ctemp * temp.r, z__2.i = ctemp * temp.i;
+ i__4 = i__ + j * a_dim1;
+ z__3.r = stemp * a[i__4].r, z__3.i = stemp * a[
+ i__4].i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
+ z__3.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ i__3 = i__ + j * a_dim1;
+ z__2.r = stemp * temp.r, z__2.i = stemp * temp.i;
+ i__4 = i__ + j * a_dim1;
+ z__3.r = ctemp * a[i__4].r, z__3.i = ctemp * a[
+ i__4].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L130: */
+ }
+ }
+/* L140: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + (j + 1) * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ i__2 = i__ + (j + 1) * a_dim1;
+ z__2.r = ctemp * temp.r, z__2.i = ctemp * temp.i;
+ i__3 = i__ + j * a_dim1;
+ z__3.r = stemp * a[i__3].r, z__3.i = stemp * a[
+ i__3].i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
+ z__3.i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+ i__2 = i__ + j * a_dim1;
+ z__2.r = stemp * temp.r, z__2.i = stemp * temp.i;
+ i__3 = i__ + j * a_dim1;
+ z__3.r = ctemp * a[i__3].r, z__3.i = ctemp * a[
+ i__3].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+/* L150: */
+ }
+ }
+/* L160: */
+ }
+ }
+ } else if (lsame_(pivot, "T")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if (ctemp != 1. || stemp != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ i__3 = i__ + j * a_dim1;
+ z__2.r = ctemp * temp.r, z__2.i = ctemp * temp.i;
+ i__4 = i__ + a_dim1;
+ z__3.r = stemp * a[i__4].r, z__3.i = stemp * a[
+ i__4].i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
+ z__3.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ i__3 = i__ + a_dim1;
+ z__2.r = stemp * temp.r, z__2.i = stemp * temp.i;
+ i__4 = i__ + a_dim1;
+ z__3.r = ctemp * a[i__4].r, z__3.i = ctemp * a[
+ i__4].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L170: */
+ }
+ }
+/* L180: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n; j >= 2; --j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if (ctemp != 1. || stemp != 0.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ i__2 = i__ + j * a_dim1;
+ z__2.r = ctemp * temp.r, z__2.i = ctemp * temp.i;
+ i__3 = i__ + a_dim1;
+ z__3.r = stemp * a[i__3].r, z__3.i = stemp * a[
+ i__3].i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
+ z__3.i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+ i__2 = i__ + a_dim1;
+ z__2.r = stemp * temp.r, z__2.i = stemp * temp.i;
+ i__3 = i__ + a_dim1;
+ z__3.r = ctemp * a[i__3].r, z__3.i = ctemp * a[
+ i__3].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+/* L190: */
+ }
+ }
+/* L200: */
+ }
+ }
+ } else if (lsame_(pivot, "B")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + *n * a_dim1;
+ z__2.r = stemp * a[i__4].r, z__2.i = stemp * a[
+ i__4].i;
+ z__3.r = ctemp * temp.r, z__3.i = ctemp * temp.i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ i__3 = i__ + *n * a_dim1;
+ i__4 = i__ + *n * a_dim1;
+ z__2.r = ctemp * a[i__4].r, z__2.i = ctemp * a[
+ i__4].i;
+ z__3.r = stemp * temp.r, z__3.i = stemp * temp.i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
+ z__3.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L210: */
+ }
+ }
+/* L220: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if (ctemp != 1. || stemp != 0.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__ + *n * a_dim1;
+ z__2.r = stemp * a[i__3].r, z__2.i = stemp * a[
+ i__3].i;
+ z__3.r = ctemp * temp.r, z__3.i = ctemp * temp.i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+ i__2 = i__ + *n * a_dim1;
+ i__3 = i__ + *n * a_dim1;
+ z__2.r = ctemp * a[i__3].r, z__2.i = ctemp * a[
+ i__3].i;
+ z__3.r = stemp * temp.r, z__3.i = stemp * temp.i;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
+ z__3.i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+/* L230: */
+ }
+ }
+/* L240: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZLASR */
+
+} /* zlasr_ */
diff --git a/contrib/libs/clapack/zlassq.c b/contrib/libs/clapack/zlassq.c
new file mode 100644
index 0000000000..0c0f42a7de
--- /dev/null
+++ b/contrib/libs/clapack/zlassq.c
@@ -0,0 +1,138 @@
+/* zlassq.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlassq_(integer *n, doublecomplex *x, integer *incx,
+ doublereal *scale, doublereal *sumsq)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer ix;
+ doublereal temp1;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLASSQ returns the values scl and ssq such that */
+
+/* ( scl**2 )*ssq = x( 1 )**2 +...+ x( n )**2 + ( scale**2 )*sumsq, */
+
+/* where x( i ) = abs( X( 1 + ( i - 1 )*INCX ) ). The value of sumsq is */
+/* assumed to be at least unity and the value of ssq will then satisfy */
+
+/* 1.0 .le. ssq .le. ( sumsq + 2*n ). */
+
+/* scale is assumed to be non-negative and scl returns the value */
+
+/* scl = max( scale, abs( real( x( i ) ) ), abs( aimag( x( i ) ) ) ), */
+/* i */
+
+/* scale and sumsq must be supplied in SCALE and SUMSQ respectively. */
+/* SCALE and SUMSQ are overwritten by scl and ssq respectively. */
+
+/* The routine makes only one pass through the vector X. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of elements to be used from the vector X. */
+
+/* X (input) COMPLEX*16 array, dimension (N) */
+/* The vector x as described above. */
+/* x( i ) = X( 1 + ( i - 1 )*INCX ), 1 <= i <= n. */
+
+/* INCX (input) INTEGER */
+/* The increment between successive values of the vector X. */
+/* INCX > 0. */
+
+/* SCALE (input/output) DOUBLE PRECISION */
+/* On entry, the value scale in the equation above. */
+/* On exit, SCALE is overwritten with the value scl . */
+
+/* SUMSQ (input/output) DOUBLE PRECISION */
+/* On entry, the value sumsq in the equation above. */
+/* On exit, SUMSQ is overwritten with the value ssq . */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n > 0) {
+ i__1 = (*n - 1) * *incx + 1;
+ i__2 = *incx;
+ for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) {
+ i__3 = ix;
+ if (x[i__3].r != 0.) {
+ i__3 = ix;
+ temp1 = (d__1 = x[i__3].r, abs(d__1));
+ if (*scale < temp1) {
+/* Computing 2nd power */
+ d__1 = *scale / temp1;
+ *sumsq = *sumsq * (d__1 * d__1) + 1;
+ *scale = temp1;
+ } else {
+/* Computing 2nd power */
+ d__1 = temp1 / *scale;
+ *sumsq += d__1 * d__1;
+ }
+ }
+ if (d_imag(&x[ix]) != 0.) {
+ temp1 = (d__1 = d_imag(&x[ix]), abs(d__1));
+ if (*scale < temp1) {
+/* Computing 2nd power */
+ d__1 = *scale / temp1;
+ *sumsq = *sumsq * (d__1 * d__1) + 1;
+ *scale = temp1;
+ } else {
+/* Computing 2nd power */
+ d__1 = temp1 / *scale;
+ *sumsq += d__1 * d__1;
+ }
+ }
+/* L10: */
+ }
+ }
+
+ return 0;
+
+/* End of ZLASSQ */
+
+} /* zlassq_ */
diff --git a/contrib/libs/clapack/zlaswp.c b/contrib/libs/clapack/zlaswp.c
new file mode 100644
index 0000000000..3956e40316
--- /dev/null
+++ b/contrib/libs/clapack/zlaswp.c
@@ -0,0 +1,166 @@
+/* zlaswp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlaswp_(integer *n, doublecomplex *a, integer *lda,
+ integer *k1, integer *k2, integer *ipiv, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+
+ /* Local variables */
+ integer i__, j, k, i1, i2, n32, ip, ix, ix0, inc;
+ doublecomplex temp;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLASWP performs a series of row interchanges on the matrix A. */
+/* One row interchange is initiated for each of rows K1 through K2 of A. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the matrix of column dimension N to which the row */
+/* interchanges will be applied. */
+/* On exit, the permuted matrix. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+
+/* K1 (input) INTEGER */
+/* The first element of IPIV for which a row interchange will */
+/* be done. */
+
+/* K2 (input) INTEGER */
+/* The last element of IPIV for which a row interchange will */
+/* be done. */
+
+/* IPIV (input) INTEGER array, dimension (K2*abs(INCX)) */
+/* The vector of pivot indices. Only the elements in positions */
+/* K1 through K2 of IPIV are accessed. */
+/* IPIV(K) = L implies rows K and L are to be interchanged. */
+
+/* INCX (input) INTEGER */
+/* The increment between successive values of IPIV. If IPIV */
+/* is negative, the pivots are applied in reverse order. */
+
+/* Further Details */
+/* =============== */
+
+/* Modified by */
+/* R. C. Whaley, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Interchange row I with row IPIV(I) for each of rows K1 through K2. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ if (*incx > 0) {
+ ix0 = *k1;
+ i1 = *k1;
+ i2 = *k2;
+ inc = 1;
+ } else if (*incx < 0) {
+ ix0 = (1 - *k2) * *incx + 1;
+ i1 = *k2;
+ i2 = *k1;
+ inc = -1;
+ } else {
+ return 0;
+ }
+
+ n32 = *n / 32 << 5;
+ if (n32 != 0) {
+ i__1 = n32;
+ for (j = 1; j <= i__1; j += 32) {
+ ix = ix0;
+ i__2 = i2;
+ i__3 = inc;
+ for (i__ = i1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__3)
+ {
+ ip = ipiv[ix];
+ if (ip != i__) {
+ i__4 = j + 31;
+ for (k = j; k <= i__4; ++k) {
+ i__5 = i__ + k * a_dim1;
+ temp.r = a[i__5].r, temp.i = a[i__5].i;
+ i__5 = i__ + k * a_dim1;
+ i__6 = ip + k * a_dim1;
+ a[i__5].r = a[i__6].r, a[i__5].i = a[i__6].i;
+ i__5 = ip + k * a_dim1;
+ a[i__5].r = temp.r, a[i__5].i = temp.i;
+/* L10: */
+ }
+ }
+ ix += *incx;
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+ if (n32 != *n) {
+ ++n32;
+ ix = ix0;
+ i__1 = i2;
+ i__3 = inc;
+ for (i__ = i1; i__3 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__3) {
+ ip = ipiv[ix];
+ if (ip != i__) {
+ i__2 = *n;
+ for (k = n32; k <= i__2; ++k) {
+ i__4 = i__ + k * a_dim1;
+ temp.r = a[i__4].r, temp.i = a[i__4].i;
+ i__4 = i__ + k * a_dim1;
+ i__5 = ip + k * a_dim1;
+ a[i__4].r = a[i__5].r, a[i__4].i = a[i__5].i;
+ i__4 = ip + k * a_dim1;
+ a[i__4].r = temp.r, a[i__4].i = temp.i;
+/* L40: */
+ }
+ }
+ ix += *incx;
+/* L50: */
+ }
+ }
+
+ return 0;
+
+/* End of ZLASWP */
+
+} /* zlaswp_ */
diff --git a/contrib/libs/clapack/zlasyf.c b/contrib/libs/clapack/zlasyf.c
new file mode 100644
index 0000000000..e55fc2ca8a
--- /dev/null
+++ b/contrib/libs/clapack/zlasyf.c
@@ -0,0 +1,831 @@
+/* zlasyf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zlasyf_(char *uplo, integer *n, integer *nb, integer *kb,
+ doublecomplex *a, integer *lda, integer *ipiv, doublecomplex *w,
+ integer *ldw, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_imag(doublecomplex *);
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer j, k;
+ doublecomplex t, r1, d11, d21, d22;
+ integer jb, jj, kk, jp, kp, kw, kkw, imax, jmax;
+ doublereal alpha;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *), zgemm_(char *, char *, integer *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ integer kstep;
+ extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *),
+ zcopy_(integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *), zswap_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ doublereal absakk, colmax;
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ doublereal rowmax;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLASYF computes a partial factorization of a complex symmetric matrix */
+/* A using the Bunch-Kaufman diagonal pivoting method. The partial */
+/* factorization has the form: */
+
+/* A = ( I U12 ) ( A11 0 ) ( I 0 ) if UPLO = 'U', or: */
+/* ( 0 U22 ) ( 0 D ) ( U12' U22' ) */
+
+/* A = ( L11 0 ) ( D 0 ) ( L11' L21' ) if UPLO = 'L' */
+/* ( L21 I ) ( 0 A22 ) ( 0 I ) */
+
+/* where the order of D is at most NB. The actual order is returned in */
+/* the argument KB, and is either NB or NB-1, or N if N <= NB. */
+/* Note that U' denotes the transpose of U. */
+
+/* ZLASYF is an auxiliary routine called by ZSYTRF. It uses blocked code */
+/* (calling Level 3 BLAS) to update the submatrix A11 (if UPLO = 'U') or */
+/* A22 (if UPLO = 'L'). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NB (input) INTEGER */
+/* The maximum number of columns of the matrix A that should be */
+/* factored. NB should be at least 2 to allow for 2-by-2 pivot */
+/* blocks. */
+
+/* KB (output) INTEGER */
+/* The number of columns of A that were actually factored. */
+/* KB is either NB-1 or NB, or N if N <= NB. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n-by-n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n-by-n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+/* On exit, A contains details of the partial factorization. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D. */
+/* If UPLO = 'U', only the last KB elements of IPIV are set; */
+/* if UPLO = 'L', only the first KB elements are set. */
+
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* W (workspace) COMPLEX*16 array, dimension (LDW,NB) */
+
+/* LDW (input) INTEGER */
+/* The leading dimension of the array W. LDW >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* > 0: if INFO = k, D(k,k) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ w_dim1 = *ldw;
+ w_offset = 1 + w_dim1;
+ w -= w_offset;
+
+ /* Function Body */
+ *info = 0;
+
+/* Initialize ALPHA for use in choosing pivot block size. */
+
+ alpha = (sqrt(17.) + 1.) / 8.;
+
+ if (lsame_(uplo, "U")) {
+
+/* Factorize the trailing columns of A using the upper triangle */
+/* of A and working backwards, and compute the matrix W = U12*D */
+/* for use in updating A11 */
+
+/* K is the main loop index, decreasing from N in steps of 1 or 2 */
+
+/* KW is the column of W which corresponds to column K of A */
+
+ k = *n;
+L10:
+ kw = *nb + k - *n;
+
+/* Exit from loop */
+
+ if (k <= *n - *nb + 1 && *nb < *n || k < 1) {
+ goto L30;
+ }
+
+/* Copy column K of A to column KW of W and update it */
+
+ zcopy_(&k, &a[k * a_dim1 + 1], &c__1, &w[kw * w_dim1 + 1], &c__1);
+ if (k < *n) {
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &k, &i__1, &z__1, &a[(k + 1) * a_dim1 + 1],
+ lda, &w[k + (kw + 1) * w_dim1], ldw, &c_b1, &w[kw *
+ w_dim1 + 1], &c__1);
+ }
+
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ i__1 = k + kw * w_dim1;
+ absakk = (d__1 = w[i__1].r, abs(d__1)) + (d__2 = d_imag(&w[k + kw *
+ w_dim1]), abs(d__2));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ imax = izamax_(&i__1, &w[kw * w_dim1 + 1], &c__1);
+ i__1 = imax + kw * w_dim1;
+ colmax = (d__1 = w[i__1].r, abs(d__1)) + (d__2 = d_imag(&w[imax +
+ kw * w_dim1]), abs(d__2));
+ } else {
+ colmax = 0.;
+ }
+
+ if (max(absakk,colmax) == 0.) {
+
+/* Column K is zero: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* Copy column IMAX to column KW-1 of W and update it */
+
+ zcopy_(&imax, &a[imax * a_dim1 + 1], &c__1, &w[(kw - 1) *
+ w_dim1 + 1], &c__1);
+ i__1 = k - imax;
+ zcopy_(&i__1, &a[imax + (imax + 1) * a_dim1], lda, &w[imax +
+ 1 + (kw - 1) * w_dim1], &c__1);
+ if (k < *n) {
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &k, &i__1, &z__1, &a[(k + 1) *
+ a_dim1 + 1], lda, &w[imax + (kw + 1) * w_dim1],
+ ldw, &c_b1, &w[(kw - 1) * w_dim1 + 1], &c__1);
+ }
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ i__1 = k - imax;
+ jmax = imax + izamax_(&i__1, &w[imax + 1 + (kw - 1) * w_dim1],
+ &c__1);
+ i__1 = jmax + (kw - 1) * w_dim1;
+ rowmax = (d__1 = w[i__1].r, abs(d__1)) + (d__2 = d_imag(&w[
+ jmax + (kw - 1) * w_dim1]), abs(d__2));
+ if (imax > 1) {
+ i__1 = imax - 1;
+ jmax = izamax_(&i__1, &w[(kw - 1) * w_dim1 + 1], &c__1);
+/* Computing MAX */
+ i__1 = jmax + (kw - 1) * w_dim1;
+ d__3 = rowmax, d__4 = (d__1 = w[i__1].r, abs(d__1)) + (
+ d__2 = d_imag(&w[jmax + (kw - 1) * w_dim1]), abs(
+ d__2));
+ rowmax = max(d__3,d__4);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else /* if(complicated condition) */ {
+ i__1 = imax + (kw - 1) * w_dim1;
+ if ((d__1 = w[i__1].r, abs(d__1)) + (d__2 = d_imag(&w[
+ imax + (kw - 1) * w_dim1]), abs(d__2)) >= alpha *
+ rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+
+/* copy column KW-1 of W to column KW */
+
+ zcopy_(&k, &w[(kw - 1) * w_dim1 + 1], &c__1, &w[kw *
+ w_dim1 + 1], &c__1);
+ } else {
+
+/* interchange rows and columns K-1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+ }
+
+ kk = k - kstep + 1;
+ kkw = *nb + kk - *n;
+
+/* Updated column KP is already stored in column KKW of W */
+
+ if (kp != kk) {
+
+/* Copy non-updated column KK to column KP */
+
+ i__1 = kp + k * a_dim1;
+ i__2 = kk + k * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = k - 1 - kp;
+ zcopy_(&i__1, &a[kp + 1 + kk * a_dim1], &c__1, &a[kp + (kp +
+ 1) * a_dim1], lda);
+ zcopy_(&kp, &a[kk * a_dim1 + 1], &c__1, &a[kp * a_dim1 + 1], &
+ c__1);
+
+/* Interchange rows KK and KP in last KK columns of A and W */
+
+ i__1 = *n - kk + 1;
+ zswap_(&i__1, &a[kk + kk * a_dim1], lda, &a[kp + kk * a_dim1],
+ lda);
+ i__1 = *n - kk + 1;
+ zswap_(&i__1, &w[kk + kkw * w_dim1], ldw, &w[kp + kkw *
+ w_dim1], ldw);
+ }
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column KW of W now holds */
+
+/* W(k) = U(k)*D(k) */
+
+/* where U(k) is the k-th column of U */
+
+/* Store U(k) in column k of A */
+
+ zcopy_(&k, &w[kw * w_dim1 + 1], &c__1, &a[k * a_dim1 + 1], &
+ c__1);
+ z_div(&z__1, &c_b1, &a[k + k * a_dim1]);
+ r1.r = z__1.r, r1.i = z__1.i;
+ i__1 = k - 1;
+ zscal_(&i__1, &r1, &a[k * a_dim1 + 1], &c__1);
+ } else {
+
+/* 2-by-2 pivot block D(k): columns KW and KW-1 of W now */
+/* hold */
+
+/* ( W(k-1) W(k) ) = ( U(k-1) U(k) )*D(k) */
+
+/* where U(k) and U(k-1) are the k-th and (k-1)-th columns */
+/* of U */
+
+ if (k > 2) {
+
+/* Store U(k) and U(k-1) in columns k and k-1 of A */
+
+ i__1 = k - 1 + kw * w_dim1;
+ d21.r = w[i__1].r, d21.i = w[i__1].i;
+ z_div(&z__1, &w[k + kw * w_dim1], &d21);
+ d11.r = z__1.r, d11.i = z__1.i;
+ z_div(&z__1, &w[k - 1 + (kw - 1) * w_dim1], &d21);
+ d22.r = z__1.r, d22.i = z__1.i;
+ z__3.r = d11.r * d22.r - d11.i * d22.i, z__3.i = d11.r *
+ d22.i + d11.i * d22.r;
+ z__2.r = z__3.r - 1., z__2.i = z__3.i - 0.;
+ z_div(&z__1, &c_b1, &z__2);
+ t.r = z__1.r, t.i = z__1.i;
+ z_div(&z__1, &t, &d21);
+ d21.r = z__1.r, d21.i = z__1.i;
+ i__1 = k - 2;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + (k - 1) * a_dim1;
+ i__3 = j + (kw - 1) * w_dim1;
+ z__3.r = d11.r * w[i__3].r - d11.i * w[i__3].i,
+ z__3.i = d11.r * w[i__3].i + d11.i * w[i__3]
+ .r;
+ i__4 = j + kw * w_dim1;
+ z__2.r = z__3.r - w[i__4].r, z__2.i = z__3.i - w[i__4]
+ .i;
+ z__1.r = d21.r * z__2.r - d21.i * z__2.i, z__1.i =
+ d21.r * z__2.i + d21.i * z__2.r;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+ i__2 = j + k * a_dim1;
+ i__3 = j + kw * w_dim1;
+ z__3.r = d22.r * w[i__3].r - d22.i * w[i__3].i,
+ z__3.i = d22.r * w[i__3].i + d22.i * w[i__3]
+ .r;
+ i__4 = j + (kw - 1) * w_dim1;
+ z__2.r = z__3.r - w[i__4].r, z__2.i = z__3.i - w[i__4]
+ .i;
+ z__1.r = d21.r * z__2.r - d21.i * z__2.i, z__1.i =
+ d21.r * z__2.i + d21.i * z__2.r;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+/* L20: */
+ }
+ }
+
+/* Copy D(k) to A */
+
+ i__1 = k - 1 + (k - 1) * a_dim1;
+ i__2 = k - 1 + (kw - 1) * w_dim1;
+ a[i__1].r = w[i__2].r, a[i__1].i = w[i__2].i;
+ i__1 = k - 1 + k * a_dim1;
+ i__2 = k - 1 + kw * w_dim1;
+ a[i__1].r = w[i__2].r, a[i__1].i = w[i__2].i;
+ i__1 = k + k * a_dim1;
+ i__2 = k + kw * w_dim1;
+ a[i__1].r = w[i__2].r, a[i__1].i = w[i__2].i;
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k - 1] = -kp;
+ }
+
+/* Decrease K and return to the start of the main loop */
+
+ k -= kstep;
+ goto L10;
+
+L30:
+
+/* Update the upper triangle of A11 (= A(1:k,1:k)) as */
+
+/* A11 := A11 - U12*D*U12' = A11 - U12*W' */
+
+/* computing blocks of NB columns at a time */
+
+ i__1 = -(*nb);
+ for (j = (k - 1) / *nb * *nb + 1; i__1 < 0 ? j >= 1 : j <= 1; j +=
+ i__1) {
+/* Computing MIN */
+ i__2 = *nb, i__3 = k - j + 1;
+ jb = min(i__2,i__3);
+
+/* Update the upper triangle of the diagonal block */
+
+ i__2 = j + jb - 1;
+ for (jj = j; jj <= i__2; ++jj) {
+ i__3 = jj - j + 1;
+ i__4 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__3, &i__4, &z__1, &a[j + (k + 1) *
+ a_dim1], lda, &w[jj + (kw + 1) * w_dim1], ldw, &c_b1,
+ &a[j + jj * a_dim1], &c__1);
+/* L40: */
+ }
+
+/* Update the rectangular superdiagonal block */
+
+ i__2 = j - 1;
+ i__3 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "Transpose", &i__2, &jb, &i__3, &z__1, &a[(
+ k + 1) * a_dim1 + 1], lda, &w[j + (kw + 1) * w_dim1], ldw,
+ &c_b1, &a[j * a_dim1 + 1], lda);
+/* L50: */
+ }
+
+/* Put U12 in standard form by partially undoing the interchanges */
+/* in columns k+1:n */
+
+ j = k + 1;
+L60:
+ jj = j;
+ jp = ipiv[j];
+ if (jp < 0) {
+ jp = -jp;
+ ++j;
+ }
+ ++j;
+ if (jp != jj && j <= *n) {
+ i__1 = *n - j + 1;
+ zswap_(&i__1, &a[jp + j * a_dim1], lda, &a[jj + j * a_dim1], lda);
+ }
+ if (j <= *n) {
+ goto L60;
+ }
+
+/* Set KB to the number of columns factorized */
+
+ *kb = *n - k;
+
+ } else {
+
+/* Factorize the leading columns of A using the lower triangle */
+/* of A and working forwards, and compute the matrix W = L21*D */
+/* for use in updating A22 */
+
+/* K is the main loop index, increasing from 1 in steps of 1 or 2 */
+
+ k = 1;
+L70:
+
+/* Exit from loop */
+
+ if (k >= *nb && *nb < *n || k > *n) {
+ goto L90;
+ }
+
+/* Copy column K of A to column K of W and update it */
+
+ i__1 = *n - k + 1;
+ zcopy_(&i__1, &a[k + k * a_dim1], &c__1, &w[k + k * w_dim1], &c__1);
+ i__1 = *n - k + 1;
+ i__2 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__1, &i__2, &z__1, &a[k + a_dim1], lda, &w[k
+ + w_dim1], ldw, &c_b1, &w[k + k * w_dim1], &c__1);
+
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ i__1 = k + k * w_dim1;
+ absakk = (d__1 = w[i__1].r, abs(d__1)) + (d__2 = d_imag(&w[k + k *
+ w_dim1]), abs(d__2));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ imax = k + izamax_(&i__1, &w[k + 1 + k * w_dim1], &c__1);
+ i__1 = imax + k * w_dim1;
+ colmax = (d__1 = w[i__1].r, abs(d__1)) + (d__2 = d_imag(&w[imax +
+ k * w_dim1]), abs(d__2));
+ } else {
+ colmax = 0.;
+ }
+
+ if (max(absakk,colmax) == 0.) {
+
+/* Column K is zero: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* Copy column IMAX to column K+1 of W and update it */
+
+ i__1 = imax - k;
+ zcopy_(&i__1, &a[imax + k * a_dim1], lda, &w[k + (k + 1) *
+ w_dim1], &c__1);
+ i__1 = *n - imax + 1;
+ zcopy_(&i__1, &a[imax + imax * a_dim1], &c__1, &w[imax + (k +
+ 1) * w_dim1], &c__1);
+ i__1 = *n - k + 1;
+ i__2 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__1, &i__2, &z__1, &a[k + a_dim1],
+ lda, &w[imax + w_dim1], ldw, &c_b1, &w[k + (k + 1) *
+ w_dim1], &c__1);
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ i__1 = imax - k;
+ jmax = k - 1 + izamax_(&i__1, &w[k + (k + 1) * w_dim1], &c__1)
+ ;
+ i__1 = jmax + (k + 1) * w_dim1;
+ rowmax = (d__1 = w[i__1].r, abs(d__1)) + (d__2 = d_imag(&w[
+ jmax + (k + 1) * w_dim1]), abs(d__2));
+ if (imax < *n) {
+ i__1 = *n - imax;
+ jmax = imax + izamax_(&i__1, &w[imax + 1 + (k + 1) *
+ w_dim1], &c__1);
+/* Computing MAX */
+ i__1 = jmax + (k + 1) * w_dim1;
+ d__3 = rowmax, d__4 = (d__1 = w[i__1].r, abs(d__1)) + (
+ d__2 = d_imag(&w[jmax + (k + 1) * w_dim1]), abs(
+ d__2));
+ rowmax = max(d__3,d__4);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else /* if(complicated condition) */ {
+ i__1 = imax + (k + 1) * w_dim1;
+ if ((d__1 = w[i__1].r, abs(d__1)) + (d__2 = d_imag(&w[
+ imax + (k + 1) * w_dim1]), abs(d__2)) >= alpha *
+ rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+
+/* copy column K+1 of W to column K */
+
+ i__1 = *n - k + 1;
+ zcopy_(&i__1, &w[k + (k + 1) * w_dim1], &c__1, &w[k +
+ k * w_dim1], &c__1);
+ } else {
+
+/* interchange rows and columns K+1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+ }
+
+ kk = k + kstep - 1;
+
+/* Updated column KP is already stored in column KK of W */
+
+ if (kp != kk) {
+
+/* Copy non-updated column KK to column KP */
+
+ i__1 = kp + k * a_dim1;
+ i__2 = kk + k * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp - k - 1;
+ zcopy_(&i__1, &a[k + 1 + kk * a_dim1], &c__1, &a[kp + (k + 1)
+ * a_dim1], lda);
+ i__1 = *n - kp + 1;
+ zcopy_(&i__1, &a[kp + kk * a_dim1], &c__1, &a[kp + kp *
+ a_dim1], &c__1);
+
+/* Interchange rows KK and KP in first KK columns of A and W */
+
+ zswap_(&kk, &a[kk + a_dim1], lda, &a[kp + a_dim1], lda);
+ zswap_(&kk, &w[kk + w_dim1], ldw, &w[kp + w_dim1], ldw);
+ }
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k of W now holds */
+
+/* W(k) = L(k)*D(k) */
+
+/* where L(k) is the k-th column of L */
+
+/* Store L(k) in column k of A */
+
+ i__1 = *n - k + 1;
+ zcopy_(&i__1, &w[k + k * w_dim1], &c__1, &a[k + k * a_dim1], &
+ c__1);
+ if (k < *n) {
+ z_div(&z__1, &c_b1, &a[k + k * a_dim1]);
+ r1.r = z__1.r, r1.i = z__1.i;
+ i__1 = *n - k;
+ zscal_(&i__1, &r1, &a[k + 1 + k * a_dim1], &c__1);
+ }
+ } else {
+
+/* 2-by-2 pivot block D(k): columns k and k+1 of W now hold */
+
+/* ( W(k) W(k+1) ) = ( L(k) L(k+1) )*D(k) */
+
+/* where L(k) and L(k+1) are the k-th and (k+1)-th columns */
+/* of L */
+
+ if (k < *n - 1) {
+
+/* Store L(k) and L(k+1) in columns k and k+1 of A */
+
+ i__1 = k + 1 + k * w_dim1;
+ d21.r = w[i__1].r, d21.i = w[i__1].i;
+ z_div(&z__1, &w[k + 1 + (k + 1) * w_dim1], &d21);
+ d11.r = z__1.r, d11.i = z__1.i;
+ z_div(&z__1, &w[k + k * w_dim1], &d21);
+ d22.r = z__1.r, d22.i = z__1.i;
+ z__3.r = d11.r * d22.r - d11.i * d22.i, z__3.i = d11.r *
+ d22.i + d11.i * d22.r;
+ z__2.r = z__3.r - 1., z__2.i = z__3.i - 0.;
+ z_div(&z__1, &c_b1, &z__2);
+ t.r = z__1.r, t.i = z__1.i;
+ z_div(&z__1, &t, &d21);
+ d21.r = z__1.r, d21.i = z__1.i;
+ i__1 = *n;
+ for (j = k + 2; j <= i__1; ++j) {
+ i__2 = j + k * a_dim1;
+ i__3 = j + k * w_dim1;
+ z__3.r = d11.r * w[i__3].r - d11.i * w[i__3].i,
+ z__3.i = d11.r * w[i__3].i + d11.i * w[i__3]
+ .r;
+ i__4 = j + (k + 1) * w_dim1;
+ z__2.r = z__3.r - w[i__4].r, z__2.i = z__3.i - w[i__4]
+ .i;
+ z__1.r = d21.r * z__2.r - d21.i * z__2.i, z__1.i =
+ d21.r * z__2.i + d21.i * z__2.r;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+ i__2 = j + (k + 1) * a_dim1;
+ i__3 = j + (k + 1) * w_dim1;
+ z__3.r = d22.r * w[i__3].r - d22.i * w[i__3].i,
+ z__3.i = d22.r * w[i__3].i + d22.i * w[i__3]
+ .r;
+ i__4 = j + k * w_dim1;
+ z__2.r = z__3.r - w[i__4].r, z__2.i = z__3.i - w[i__4]
+ .i;
+ z__1.r = d21.r * z__2.r - d21.i * z__2.i, z__1.i =
+ d21.r * z__2.i + d21.i * z__2.r;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+/* L80: */
+ }
+ }
+
+/* Copy D(k) to A */
+
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * w_dim1;
+ a[i__1].r = w[i__2].r, a[i__1].i = w[i__2].i;
+ i__1 = k + 1 + k * a_dim1;
+ i__2 = k + 1 + k * w_dim1;
+ a[i__1].r = w[i__2].r, a[i__1].i = w[i__2].i;
+ i__1 = k + 1 + (k + 1) * a_dim1;
+ i__2 = k + 1 + (k + 1) * w_dim1;
+ a[i__1].r = w[i__2].r, a[i__1].i = w[i__2].i;
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k + 1] = -kp;
+ }
+
+/* Increase K and return to the start of the main loop */
+
+ k += kstep;
+ goto L70;
+
+L90:
+
+/* Update the lower triangle of A22 (= A(k:n,k:n)) as */
+
+/* A22 := A22 - L21*D*L21' = A22 - L21*W' */
+
+/* computing blocks of NB columns at a time */
+
+ i__1 = *n;
+ i__2 = *nb;
+ for (j = k; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = *nb, i__4 = *n - j + 1;
+ jb = min(i__3,i__4);
+
+/* Update the lower triangle of the diagonal block */
+
+ i__3 = j + jb - 1;
+ for (jj = j; jj <= i__3; ++jj) {
+ i__4 = j + jb - jj;
+ i__5 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__4, &i__5, &z__1, &a[jj + a_dim1],
+ lda, &w[jj + w_dim1], ldw, &c_b1, &a[jj + jj * a_dim1]
+, &c__1);
+/* L100: */
+ }
+
+/* Update the rectangular subdiagonal block */
+
+ if (j + jb <= *n) {
+ i__3 = *n - j - jb + 1;
+ i__4 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "Transpose", &i__3, &jb, &i__4, &z__1,
+ &a[j + jb + a_dim1], lda, &w[j + w_dim1], ldw, &c_b1,
+ &a[j + jb + j * a_dim1], lda);
+ }
+/* L110: */
+ }
+
+/* Put L21 in standard form by partially undoing the interchanges */
+/* in columns 1:k-1 */
+
+ j = k - 1;
+L120:
+ jj = j;
+ jp = ipiv[j];
+ if (jp < 0) {
+ jp = -jp;
+ --j;
+ }
+ --j;
+ if (jp != jj && j >= 1) {
+ zswap_(&j, &a[jp + a_dim1], lda, &a[jj + a_dim1], lda);
+ }
+ if (j >= 1) {
+ goto L120;
+ }
+
+/* Set KB to the number of columns factorized */
+
+ *kb = k - 1;
+
+ }
+ return 0;
+
+/* End of ZLASYF */
+
+} /* zlasyf_ */
diff --git a/contrib/libs/clapack/zlat2c.c b/contrib/libs/clapack/zlat2c.c
new file mode 100644
index 0000000000..d7a943d412
--- /dev/null
+++ b/contrib/libs/clapack/zlat2c.c
@@ -0,0 +1,152 @@
+/* zlat2c.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlat2c_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, complex *sa, integer *ldsa, integer *info)
+{
+ /* System generated locals */
+ integer sa_dim1, sa_offset, a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal rmax;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern doublereal slamch_(char *);
+
+
+/* -- LAPACK PROTOTYPE auxiliary routine (version 3.1.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* May 2007 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAT2C converts a COMPLEX*16 triangular matrix, SA, to a COMPLEX */
+/* triangular matrix, A. */
+
+/* RMAX is the overflow for the SINGLE PRECISION arithmetic */
+/* ZLAT2C checks that all the entries of A are between -RMAX and */
+/* RMAX. If not the convertion is aborted and a flag is raised. */
+
+/* This is an auxiliary routine so there is no argument checking. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* N (input) INTEGER */
+/* The number of rows and columns of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the N-by-N triangular coefficient matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* SA (output) COMPLEX array, dimension (LDSA,N) */
+/* Only the UPLO part of SA is referenced. On exit, if INFO=0, */
+/* the N-by-N coefficient matrix SA; if INFO>0, the content of */
+/* the UPLO part of SA is unspecified. */
+
+/* LDSA (input) INTEGER */
+/* The leading dimension of the array SA. LDSA >= max(1,M). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* = 1: an entry of the matrix A is greater than the SINGLE */
+/* PRECISION overflow threshold, in this case, the content */
+/* of the UPLO part of SA in exit is unspecified. */
+
+/* ========= */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ sa_dim1 = *ldsa;
+ sa_offset = 1 + sa_dim1;
+ sa -= sa_offset;
+
+ /* Function Body */
+ rmax = slamch_("O");
+ upper = lsame_(uplo, "U");
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ if (a[i__3].r < -rmax || a[i__4].r > rmax || d_imag(&a[i__ +
+ j * a_dim1]) < -rmax || d_imag(&a[i__ + j * a_dim1])
+ > rmax) {
+ *info = 1;
+ goto L50;
+ }
+ i__3 = i__ + j * sa_dim1;
+ i__4 = i__ + j * a_dim1;
+ sa[i__3].r = a[i__4].r, sa[i__3].i = a[i__4].i;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ if (a[i__3].r < -rmax || a[i__4].r > rmax || d_imag(&a[i__ +
+ j * a_dim1]) < -rmax || d_imag(&a[i__ + j * a_dim1])
+ > rmax) {
+ *info = 1;
+ goto L50;
+ }
+ i__3 = i__ + j * sa_dim1;
+ i__4 = i__ + j * a_dim1;
+ sa[i__3].r = a[i__4].r, sa[i__3].i = a[i__4].i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+L50:
+
+ return 0;
+
+/* End of ZLAT2C */
+
+} /* zlat2c_ */
diff --git a/contrib/libs/clapack/zlatbs.c b/contrib/libs/clapack/zlatbs.c
new file mode 100644
index 0000000000..f9629aade9
--- /dev/null
+++ b/contrib/libs/clapack/zlatbs.c
@@ -0,0 +1,1195 @@
+/* zlatbs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b36 = .5;
+
+/* Subroutine */ int zlatbs_(char *uplo, char *trans, char *diag, char *
+ normin, integer *n, integer *kd, doublecomplex *ab, integer *ldab,
+ doublecomplex *x, doublereal *scale, doublereal *cnorm, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal xj, rec, tjj;
+ integer jinc, jlen;
+ doublereal xbnd;
+ integer imax;
+ doublereal tmax;
+ doublecomplex tjjs;
+ doublereal xmax, grow;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ integer maind;
+ extern logical lsame_(char *, char *);
+ doublereal tscal;
+ doublecomplex uscal;
+ integer jlast;
+ doublecomplex csumj;
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ logical upper;
+ extern /* Double Complex */ VOID zdotu_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ extern /* Subroutine */ int ztbsv_(char *, char *, char *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *), zaxpy_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), dlabad_(
+ doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_(
+ integer *, doublereal *, doublecomplex *, integer *);
+ doublereal bignum;
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ extern /* Double Complex */ VOID zladiv_(doublecomplex *, doublecomplex *,
+ doublecomplex *);
+ logical notran;
+ integer jfirst;
+ extern doublereal dzasum_(integer *, doublecomplex *, integer *);
+ doublereal smlnum;
+ logical nounit;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLATBS solves one of the triangular systems */
+
+/* A * x = s*b, A**T * x = s*b, or A**H * x = s*b, */
+
+/* with scaling to prevent overflow, where A is an upper or lower */
+/* triangular band matrix. Here A' denotes the transpose of A, x and b */
+/* are n-element vectors, and s is a scaling factor, usually less than */
+/* or equal to 1, chosen so that the components of x will be less than */
+/* the overflow threshold. If the unscaled problem will not cause */
+/* overflow, the Level 2 BLAS routine ZTBSV is called. If the matrix A */
+/* is singular (A(j,j) = 0 for some j), then s is set to 0 and a */
+/* non-trivial solution to A*x = 0 is returned. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower triangular. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the operation applied to A. */
+/* = 'N': Solve A * x = s*b (No transpose) */
+/* = 'T': Solve A**T * x = s*b (Transpose) */
+/* = 'C': Solve A**H * x = s*b (Conjugate transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A is unit triangular. */
+/* = 'N': Non-unit triangular */
+/* = 'U': Unit triangular */
+
+/* NORMIN (input) CHARACTER*1 */
+/* Specifies whether CNORM has been set or not. */
+/* = 'Y': CNORM contains the column norms on entry */
+/* = 'N': CNORM is not set on entry. On exit, the norms will */
+/* be computed and stored in CNORM. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of subdiagonals or superdiagonals in the */
+/* triangular matrix A. KD >= 0. */
+
+/* AB (input) COMPLEX*16 array, dimension (LDAB,N) */
+/* The upper or lower triangular band matrix A, stored in the */
+/* first KD+1 rows of the array. The j-th column of A is stored */
+/* in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* X (input/output) COMPLEX*16 array, dimension (N) */
+/* On entry, the right hand side b of the triangular system. */
+/* On exit, X is overwritten by the solution vector x. */
+
+/* SCALE (output) DOUBLE PRECISION */
+/* The scaling factor s for the triangular system */
+/* A * x = s*b, A**T * x = s*b, or A**H * x = s*b. */
+/* If SCALE = 0, the matrix A is singular or badly scaled, and */
+/* the vector x is an exact or approximate solution to A*x = 0. */
+
+/* CNORM (input or output) DOUBLE PRECISION array, dimension (N) */
+
+/* If NORMIN = 'Y', CNORM is an input argument and CNORM(j) */
+/* contains the norm of the off-diagonal part of the j-th column */
+/* of A. If TRANS = 'N', CNORM(j) must be greater than or equal */
+/* to the infinity-norm, and if TRANS = 'T' or 'C', CNORM(j) */
+/* must be greater than or equal to the 1-norm. */
+
+/* If NORMIN = 'N', CNORM is an output argument and CNORM(j) */
+/* returns the 1-norm of the offdiagonal part of the j-th column */
+/* of A. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* Further Details */
+/* ======= ======= */
+
+/* A rough bound on x is computed; if that is less than overflow, ZTBSV */
+/* is called, otherwise, specific code is used which checks for possible */
+/* overflow or divide-by-zero at every operation. */
+
+/* A columnwise scheme is used for solving A*x = b. The basic algorithm */
+/* if A is lower triangular is */
+
+/* x[1:n] := b[1:n] */
+/* for j = 1, ..., n */
+/* x(j) := x(j) / A(j,j) */
+/* x[j+1:n] := x[j+1:n] - x(j) * A[j+1:n,j] */
+/* end */
+
+/* Define bounds on the components of x after j iterations of the loop: */
+/* M(j) = bound on x[1:j] */
+/* G(j) = bound on x[j+1:n] */
+/* Initially, let M(0) = 0 and G(0) = max{x(i), i=1,...,n}. */
+
+/* Then for iteration j+1 we have */
+/* M(j+1) <= G(j) / | A(j+1,j+1) | */
+/* G(j+1) <= G(j) + M(j+1) * | A[j+2:n,j+1] | */
+/* <= G(j) ( 1 + CNORM(j+1) / | A(j+1,j+1) | ) */
+
+/* where CNORM(j+1) is greater than or equal to the infinity-norm of */
+/* column j+1 of A, not counting the diagonal. Hence */
+
+/* G(j) <= G(0) product ( 1 + CNORM(i) / | A(i,i) | ) */
+/* 1<=i<=j */
+/* and */
+
+/* |x(j)| <= ( G(0) / |A(j,j)| ) product ( 1 + CNORM(i) / |A(i,i)| ) */
+/* 1<=i< j */
+
+/* Since |x(j)| <= M(j), we use the Level 2 BLAS routine ZTBSV if the */
+/* reciprocal of the largest M(j), j=1,..,n, is larger than */
+/* max(underflow, 1/overflow). */
+
+/* The bound on x(j) is also used to determine when a step in the */
+/* columnwise method can be performed without fear of overflow. If */
+/* the computed bound is greater than a large constant, x is scaled to */
+/* prevent overflow, but if the bound overflows, x is set to 0, x(j) to */
+/* 1, and scale to 0, and a non-trivial solution to A*x = 0 is found. */
+
+/* Similarly, a row-wise scheme is used to solve A**T *x = b or */
+/* A**H *x = b. The basic algorithm for A upper triangular is */
+
+/* for j = 1, ..., n */
+/* x(j) := ( b(j) - A[1:j-1,j]' * x[1:j-1] ) / A(j,j) */
+/* end */
+
+/* We simultaneously compute two bounds */
+/* G(j) = bound on ( b(i) - A[1:i-1,i]' * x[1:i-1] ), 1<=i<=j */
+/* M(j) = bound on x(i), 1<=i<=j */
+
+/* The initial values are G(0) = 0, M(0) = max{b(i), i=1,..,n}, and we */
+/* add the constraint G(j) >= G(j-1) and M(j) >= M(j-1) for j >= 1. */
+/* Then the bound on x(j) is */
+
+/* M(j) <= M(j-1) * ( 1 + CNORM(j) ) / | A(j,j) | */
+
+/* <= M(0) * product ( ( 1 + CNORM(i) ) / |A(i,i)| ) */
+/* 1<=i<=j */
+
+/* and we can safely call ZTBSV if 1/M(n) and 1/G(n) are both greater */
+/* than max(underflow, 1/overflow). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --x;
+ --cnorm;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+/* Test the input parameters. */
+
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (! lsame_(normin, "Y") && ! lsame_(normin,
+ "N")) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*kd < 0) {
+ *info = -6;
+ } else if (*ldab < *kd + 1) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZLATBS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine machine dependent parameters to control overflow. */
+
+ smlnum = dlamch_("Safe minimum");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+ smlnum /= dlamch_("Precision");
+ bignum = 1. / smlnum;
+ *scale = 1.;
+
+ if (lsame_(normin, "N")) {
+
+/* Compute the 1-norm of each column, not including the diagonal. */
+
+ if (upper) {
+
+/* A is upper triangular. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = *kd, i__3 = j - 1;
+ jlen = min(i__2,i__3);
+ cnorm[j] = dzasum_(&jlen, &ab[*kd + 1 - jlen + j * ab_dim1], &
+ c__1);
+/* L10: */
+ }
+ } else {
+
+/* A is lower triangular. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = *kd, i__3 = *n - j;
+ jlen = min(i__2,i__3);
+ if (jlen > 0) {
+ cnorm[j] = dzasum_(&jlen, &ab[j * ab_dim1 + 2], &c__1);
+ } else {
+ cnorm[j] = 0.;
+ }
+/* L20: */
+ }
+ }
+ }
+
+/* Scale the column norms by TSCAL if the maximum element in CNORM is */
+/* greater than BIGNUM/2. */
+
+ imax = idamax_(n, &cnorm[1], &c__1);
+ tmax = cnorm[imax];
+ if (tmax <= bignum * .5) {
+ tscal = 1.;
+ } else {
+ tscal = .5 / (smlnum * tmax);
+ dscal_(n, &tscal, &cnorm[1], &c__1);
+ }
+
+/* Compute a bound on the computed solution vector to see if the */
+/* Level 2 BLAS routine ZTBSV can be used. */
+
+ xmax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j;
+ d__3 = xmax, d__4 = (d__1 = x[i__2].r / 2., abs(d__1)) + (d__2 =
+ d_imag(&x[j]) / 2., abs(d__2));
+ xmax = max(d__3,d__4);
+/* L30: */
+ }
+ xbnd = xmax;
+ if (notran) {
+
+/* Compute the growth in A * x = b. */
+
+ if (upper) {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ maind = *kd + 1;
+ } else {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ maind = 1;
+ }
+
+ if (tscal != 1.) {
+ grow = 0.;
+ goto L60;
+ }
+
+ if (nounit) {
+
+/* A is non-unit triangular. */
+
+/* Compute GROW = 1/G(j) and XBND = 1/M(j). */
+/* Initially, G(0) = max{x(i), i=1,...,n}. */
+
+ grow = .5 / max(xbnd,smlnum);
+ xbnd = grow;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L60;
+ }
+
+ i__3 = maind + j * ab_dim1;
+ tjjs.r = ab[i__3].r, tjjs.i = ab[i__3].i;
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs), abs(
+ d__2));
+
+ if (tjj >= smlnum) {
+
+/* M(j) = G(j-1) / abs(A(j,j)) */
+
+/* Computing MIN */
+ d__1 = xbnd, d__2 = min(1.,tjj) * grow;
+ xbnd = min(d__1,d__2);
+ } else {
+
+/* M(j) could overflow, set XBND to 0. */
+
+ xbnd = 0.;
+ }
+
+ if (tjj + cnorm[j] >= smlnum) {
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) / abs(A(j,j)) ) */
+
+ grow *= tjj / (tjj + cnorm[j]);
+ } else {
+
+/* G(j) could overflow, set GROW to 0. */
+
+ grow = 0.;
+ }
+/* L40: */
+ }
+ grow = xbnd;
+ } else {
+
+/* A is unit triangular. */
+
+/* Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. */
+
+/* Computing MIN */
+ d__1 = 1., d__2 = .5 / max(xbnd,smlnum);
+ grow = min(d__1,d__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L60;
+ }
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) ) */
+
+ grow *= 1. / (cnorm[j] + 1.);
+/* L50: */
+ }
+ }
+L60:
+
+ ;
+ } else {
+
+/* Compute the growth in A**T * x = b or A**H * x = b. */
+
+ if (upper) {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ maind = *kd + 1;
+ } else {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ maind = 1;
+ }
+
+ if (tscal != 1.) {
+ grow = 0.;
+ goto L90;
+ }
+
+ if (nounit) {
+
+/* A is non-unit triangular. */
+
+/* Compute GROW = 1/G(j) and XBND = 1/M(j). */
+/* Initially, M(0) = max{x(i), i=1,...,n}. */
+
+ grow = .5 / max(xbnd,smlnum);
+ xbnd = grow;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L90;
+ }
+
+/* G(j) = max( G(j-1), M(j-1)*( 1 + CNORM(j) ) ) */
+
+ xj = cnorm[j] + 1.;
+/* Computing MIN */
+ d__1 = grow, d__2 = xbnd / xj;
+ grow = min(d__1,d__2);
+
+ i__3 = maind + j * ab_dim1;
+ tjjs.r = ab[i__3].r, tjjs.i = ab[i__3].i;
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs), abs(
+ d__2));
+
+ if (tjj >= smlnum) {
+
+/* M(j) = M(j-1)*( 1 + CNORM(j) ) / abs(A(j,j)) */
+
+ if (xj > tjj) {
+ xbnd *= tjj / xj;
+ }
+ } else {
+
+/* M(j) could overflow, set XBND to 0. */
+
+ xbnd = 0.;
+ }
+/* L70: */
+ }
+ grow = min(grow,xbnd);
+ } else {
+
+/* A is unit triangular. */
+
+/* Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. */
+
+/* Computing MIN */
+ d__1 = 1., d__2 = .5 / max(xbnd,smlnum);
+ grow = min(d__1,d__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L90;
+ }
+
+/* G(j) = ( 1 + CNORM(j) )*G(j-1) */
+
+ xj = cnorm[j] + 1.;
+ grow /= xj;
+/* L80: */
+ }
+ }
+L90:
+ ;
+ }
+
+ if (grow * tscal > smlnum) {
+
+/* Use the Level 2 BLAS solve if the reciprocal of the bound on */
+/* elements of X is not too small. */
+
+ ztbsv_(uplo, trans, diag, n, kd, &ab[ab_offset], ldab, &x[1], &c__1);
+ } else {
+
+/* Use a Level 1 BLAS solve, scaling intermediate results. */
+
+ if (xmax > bignum * .5) {
+
+/* Scale X so that its components are less than or equal to */
+/* BIGNUM in absolute value. */
+
+ *scale = bignum * .5 / xmax;
+ zdscal_(n, scale, &x[1], &c__1);
+ xmax = bignum;
+ } else {
+ xmax *= 2.;
+ }
+
+ if (notran) {
+
+/* Solve A * x = b */
+
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Compute x(j) = b(j) / A(j,j), scaling x if necessary. */
+
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j]),
+ abs(d__2));
+ if (nounit) {
+ i__3 = maind + j * ab_dim1;
+ z__1.r = tscal * ab[i__3].r, z__1.i = tscal * ab[i__3].i;
+ tjjs.r = z__1.r, tjjs.i = z__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.;
+ if (tscal == 1.) {
+ goto L110;
+ }
+ }
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs), abs(
+ d__2));
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.) {
+ if (xj > tjj * bignum) {
+
+/* Scale x by 1/b(j). */
+
+ rec = 1. / xj;
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ i__3 = j;
+ zladiv_(&z__1, &x[j], &tjjs);
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j])
+ , abs(d__2));
+ } else if (tjj > 0.) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM */
+/* to avoid overflow when dividing by A(j,j). */
+
+ rec = tjj * bignum / xj;
+ if (cnorm[j] > 1.) {
+
+/* Scale by 1/CNORM(j) to avoid overflow when */
+/* multiplying x(j) times column j. */
+
+ rec /= cnorm[j];
+ }
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ i__3 = j;
+ zladiv_(&z__1, &x[j], &tjjs);
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j])
+ , abs(d__2));
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0, and compute a solution to A*x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ x[i__4].r = 0., x[i__4].i = 0.;
+/* L100: */
+ }
+ i__3 = j;
+ x[i__3].r = 1., x[i__3].i = 0.;
+ xj = 1.;
+ *scale = 0.;
+ xmax = 0.;
+ }
+L110:
+
+/* Scale x if necessary to avoid overflow when adding a */
+/* multiple of column j of A. */
+
+ if (xj > 1.) {
+ rec = 1. / xj;
+ if (cnorm[j] > (bignum - xmax) * rec) {
+
+/* Scale x by 1/(2*abs(x(j))). */
+
+ rec *= .5;
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ }
+ } else if (xj * cnorm[j] > bignum - xmax) {
+
+/* Scale x by 1/2. */
+
+ zdscal_(n, &c_b36, &x[1], &c__1);
+ *scale *= .5;
+ }
+
+ if (upper) {
+ if (j > 1) {
+
+/* Compute the update */
+/* x(max(1,j-kd):j-1) := x(max(1,j-kd):j-1) - */
+/* x(j)* A(max(1,j-kd):j-1,j) */
+
+/* Computing MIN */
+ i__3 = *kd, i__4 = j - 1;
+ jlen = min(i__3,i__4);
+ i__3 = j;
+ z__2.r = -x[i__3].r, z__2.i = -x[i__3].i;
+ z__1.r = tscal * z__2.r, z__1.i = tscal * z__2.i;
+ zaxpy_(&jlen, &z__1, &ab[*kd + 1 - jlen + j * ab_dim1]
+, &c__1, &x[j - jlen], &c__1);
+ i__3 = j - 1;
+ i__ = izamax_(&i__3, &x[1], &c__1);
+ i__3 = i__;
+ xmax = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(
+ &x[i__]), abs(d__2));
+ }
+ } else if (j < *n) {
+
+/* Compute the update */
+/* x(j+1:min(j+kd,n)) := x(j+1:min(j+kd,n)) - */
+/* x(j) * A(j+1:min(j+kd,n),j) */
+
+/* Computing MIN */
+ i__3 = *kd, i__4 = *n - j;
+ jlen = min(i__3,i__4);
+ if (jlen > 0) {
+ i__3 = j;
+ z__2.r = -x[i__3].r, z__2.i = -x[i__3].i;
+ z__1.r = tscal * z__2.r, z__1.i = tscal * z__2.i;
+ zaxpy_(&jlen, &z__1, &ab[j * ab_dim1 + 2], &c__1, &x[
+ j + 1], &c__1);
+ }
+ i__3 = *n - j;
+ i__ = j + izamax_(&i__3, &x[j + 1], &c__1);
+ i__3 = i__;
+ xmax = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[
+ i__]), abs(d__2));
+ }
+/* L120: */
+ }
+
+ } else if (lsame_(trans, "T")) {
+
+/* Solve A**T * x = b */
+
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Compute x(j) = b(j) - sum A(k,j)*x(k). */
+/* k<>j */
+
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j]),
+ abs(d__2));
+ uscal.r = tscal, uscal.i = 0.;
+ rec = 1. / max(xmax,1.);
+ if (cnorm[j] > (bignum - xj) * rec) {
+
+/* If x(j) could overflow, scale x by 1/(2*XMAX). */
+
+ rec *= .5;
+ if (nounit) {
+ i__3 = maind + j * ab_dim1;
+ z__1.r = tscal * ab[i__3].r, z__1.i = tscal * ab[i__3]
+ .i;
+ tjjs.r = z__1.r, tjjs.i = z__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.;
+ }
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs),
+ abs(d__2));
+ if (tjj > 1.) {
+
+/* Divide by A(j,j) when scaling x if A(j,j) > 1. */
+
+/* Computing MIN */
+ d__1 = 1., d__2 = rec * tjj;
+ rec = min(d__1,d__2);
+ zladiv_(&z__1, &uscal, &tjjs);
+ uscal.r = z__1.r, uscal.i = z__1.i;
+ }
+ if (rec < 1.) {
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ csumj.r = 0., csumj.i = 0.;
+ if (uscal.r == 1. && uscal.i == 0.) {
+
+/* If the scaling needed for A in the dot product is 1, */
+/* call ZDOTU to perform the dot product. */
+
+ if (upper) {
+/* Computing MIN */
+ i__3 = *kd, i__4 = j - 1;
+ jlen = min(i__3,i__4);
+ zdotu_(&z__1, &jlen, &ab[*kd + 1 - jlen + j * ab_dim1]
+, &c__1, &x[j - jlen], &c__1);
+ csumj.r = z__1.r, csumj.i = z__1.i;
+ } else {
+/* Computing MIN */
+ i__3 = *kd, i__4 = *n - j;
+ jlen = min(i__3,i__4);
+ if (jlen > 1) {
+ zdotu_(&z__1, &jlen, &ab[j * ab_dim1 + 2], &c__1,
+ &x[j + 1], &c__1);
+ csumj.r = z__1.r, csumj.i = z__1.i;
+ }
+ }
+ } else {
+
+/* Otherwise, use in-line code for the dot product. */
+
+ if (upper) {
+/* Computing MIN */
+ i__3 = *kd, i__4 = j - 1;
+ jlen = min(i__3,i__4);
+ i__3 = jlen;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = *kd + i__ - jlen + j * ab_dim1;
+ z__3.r = ab[i__4].r * uscal.r - ab[i__4].i *
+ uscal.i, z__3.i = ab[i__4].r * uscal.i +
+ ab[i__4].i * uscal.r;
+ i__5 = j - jlen - 1 + i__;
+ z__2.r = z__3.r * x[i__5].r - z__3.i * x[i__5].i,
+ z__2.i = z__3.r * x[i__5].i + z__3.i * x[
+ i__5].r;
+ z__1.r = csumj.r + z__2.r, z__1.i = csumj.i +
+ z__2.i;
+ csumj.r = z__1.r, csumj.i = z__1.i;
+/* L130: */
+ }
+ } else {
+/* Computing MIN */
+ i__3 = *kd, i__4 = *n - j;
+ jlen = min(i__3,i__4);
+ i__3 = jlen;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + 1 + j * ab_dim1;
+ z__3.r = ab[i__4].r * uscal.r - ab[i__4].i *
+ uscal.i, z__3.i = ab[i__4].r * uscal.i +
+ ab[i__4].i * uscal.r;
+ i__5 = j + i__;
+ z__2.r = z__3.r * x[i__5].r - z__3.i * x[i__5].i,
+ z__2.i = z__3.r * x[i__5].i + z__3.i * x[
+ i__5].r;
+ z__1.r = csumj.r + z__2.r, z__1.i = csumj.i +
+ z__2.i;
+ csumj.r = z__1.r, csumj.i = z__1.i;
+/* L140: */
+ }
+ }
+ }
+
+ z__1.r = tscal, z__1.i = 0.;
+ if (uscal.r == z__1.r && uscal.i == z__1.i) {
+
+/* Compute x(j) := ( x(j) - CSUMJ ) / A(j,j) if 1/A(j,j) */
+/* was not used to scale the dotproduct. */
+
+ i__3 = j;
+ i__4 = j;
+ z__1.r = x[i__4].r - csumj.r, z__1.i = x[i__4].i -
+ csumj.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j])
+ , abs(d__2));
+ if (nounit) {
+
+/* Compute x(j) = x(j) / A(j,j), scaling if necessary. */
+
+ i__3 = maind + j * ab_dim1;
+ z__1.r = tscal * ab[i__3].r, z__1.i = tscal * ab[i__3]
+ .i;
+ tjjs.r = z__1.r, tjjs.i = z__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.;
+ if (tscal == 1.) {
+ goto L160;
+ }
+ }
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs),
+ abs(d__2));
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.) {
+ if (xj > tjj * bignum) {
+
+/* Scale X by 1/abs(x(j)). */
+
+ rec = 1. / xj;
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ i__3 = j;
+ zladiv_(&z__1, &x[j], &tjjs);
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ } else if (tjj > 0.) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */
+
+ rec = tjj * bignum / xj;
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ i__3 = j;
+ zladiv_(&z__1, &x[j], &tjjs);
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0 and compute a solution to A**T *x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ x[i__4].r = 0., x[i__4].i = 0.;
+/* L150: */
+ }
+ i__3 = j;
+ x[i__3].r = 1., x[i__3].i = 0.;
+ *scale = 0.;
+ xmax = 0.;
+ }
+L160:
+ ;
+ } else {
+
+/* Compute x(j) := x(j) / A(j,j) - CSUMJ if the dot */
+/* product has already been divided by 1/A(j,j). */
+
+ i__3 = j;
+ zladiv_(&z__2, &x[j], &tjjs);
+ z__1.r = z__2.r - csumj.r, z__1.i = z__2.i - csumj.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ }
+/* Computing MAX */
+ i__3 = j;
+ d__3 = xmax, d__4 = (d__1 = x[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&x[j]), abs(d__2));
+ xmax = max(d__3,d__4);
+/* L170: */
+ }
+
+ } else {
+
+/* Solve A**H * x = b */
+
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Compute x(j) = b(j) - sum A(k,j)*x(k). */
+/* k<>j */
+
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j]),
+ abs(d__2));
+ uscal.r = tscal, uscal.i = 0.;
+ rec = 1. / max(xmax,1.);
+ if (cnorm[j] > (bignum - xj) * rec) {
+
+/* If x(j) could overflow, scale x by 1/(2*XMAX). */
+
+ rec *= .5;
+ if (nounit) {
+ d_cnjg(&z__2, &ab[maind + j * ab_dim1]);
+ z__1.r = tscal * z__2.r, z__1.i = tscal * z__2.i;
+ tjjs.r = z__1.r, tjjs.i = z__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.;
+ }
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs),
+ abs(d__2));
+ if (tjj > 1.) {
+
+/* Divide by A(j,j) when scaling x if A(j,j) > 1. */
+
+/* Computing MIN */
+ d__1 = 1., d__2 = rec * tjj;
+ rec = min(d__1,d__2);
+ zladiv_(&z__1, &uscal, &tjjs);
+ uscal.r = z__1.r, uscal.i = z__1.i;
+ }
+ if (rec < 1.) {
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ csumj.r = 0., csumj.i = 0.;
+ if (uscal.r == 1. && uscal.i == 0.) {
+
+/* If the scaling needed for A in the dot product is 1, */
+/* call ZDOTC to perform the dot product. */
+
+ if (upper) {
+/* Computing MIN */
+ i__3 = *kd, i__4 = j - 1;
+ jlen = min(i__3,i__4);
+ zdotc_(&z__1, &jlen, &ab[*kd + 1 - jlen + j * ab_dim1]
+, &c__1, &x[j - jlen], &c__1);
+ csumj.r = z__1.r, csumj.i = z__1.i;
+ } else {
+/* Computing MIN */
+ i__3 = *kd, i__4 = *n - j;
+ jlen = min(i__3,i__4);
+ if (jlen > 1) {
+ zdotc_(&z__1, &jlen, &ab[j * ab_dim1 + 2], &c__1,
+ &x[j + 1], &c__1);
+ csumj.r = z__1.r, csumj.i = z__1.i;
+ }
+ }
+ } else {
+
+/* Otherwise, use in-line code for the dot product. */
+
+ if (upper) {
+/* Computing MIN */
+ i__3 = *kd, i__4 = j - 1;
+ jlen = min(i__3,i__4);
+ i__3 = jlen;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ d_cnjg(&z__4, &ab[*kd + i__ - jlen + j * ab_dim1])
+ ;
+ z__3.r = z__4.r * uscal.r - z__4.i * uscal.i,
+ z__3.i = z__4.r * uscal.i + z__4.i *
+ uscal.r;
+ i__4 = j - jlen - 1 + i__;
+ z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i,
+ z__2.i = z__3.r * x[i__4].i + z__3.i * x[
+ i__4].r;
+ z__1.r = csumj.r + z__2.r, z__1.i = csumj.i +
+ z__2.i;
+ csumj.r = z__1.r, csumj.i = z__1.i;
+/* L180: */
+ }
+ } else {
+/* Computing MIN */
+ i__3 = *kd, i__4 = *n - j;
+ jlen = min(i__3,i__4);
+ i__3 = jlen;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ d_cnjg(&z__4, &ab[i__ + 1 + j * ab_dim1]);
+ z__3.r = z__4.r * uscal.r - z__4.i * uscal.i,
+ z__3.i = z__4.r * uscal.i + z__4.i *
+ uscal.r;
+ i__4 = j + i__;
+ z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i,
+ z__2.i = z__3.r * x[i__4].i + z__3.i * x[
+ i__4].r;
+ z__1.r = csumj.r + z__2.r, z__1.i = csumj.i +
+ z__2.i;
+ csumj.r = z__1.r, csumj.i = z__1.i;
+/* L190: */
+ }
+ }
+ }
+
+ z__1.r = tscal, z__1.i = 0.;
+ if (uscal.r == z__1.r && uscal.i == z__1.i) {
+
+/* Compute x(j) := ( x(j) - CSUMJ ) / A(j,j) if 1/A(j,j) */
+/* was not used to scale the dotproduct. */
+
+ i__3 = j;
+ i__4 = j;
+ z__1.r = x[i__4].r - csumj.r, z__1.i = x[i__4].i -
+ csumj.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j])
+ , abs(d__2));
+ if (nounit) {
+
+/* Compute x(j) = x(j) / A(j,j), scaling if necessary. */
+
+ d_cnjg(&z__2, &ab[maind + j * ab_dim1]);
+ z__1.r = tscal * z__2.r, z__1.i = tscal * z__2.i;
+ tjjs.r = z__1.r, tjjs.i = z__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.;
+ if (tscal == 1.) {
+ goto L210;
+ }
+ }
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs),
+ abs(d__2));
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.) {
+ if (xj > tjj * bignum) {
+
+/* Scale X by 1/abs(x(j)). */
+
+ rec = 1. / xj;
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ i__3 = j;
+ zladiv_(&z__1, &x[j], &tjjs);
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ } else if (tjj > 0.) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */
+
+ rec = tjj * bignum / xj;
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ i__3 = j;
+ zladiv_(&z__1, &x[j], &tjjs);
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0 and compute a solution to A**H *x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ x[i__4].r = 0., x[i__4].i = 0.;
+/* L200: */
+ }
+ i__3 = j;
+ x[i__3].r = 1., x[i__3].i = 0.;
+ *scale = 0.;
+ xmax = 0.;
+ }
+L210:
+ ;
+ } else {
+
+/* Compute x(j) := x(j) / A(j,j) - CSUMJ if the dot */
+/* product has already been divided by 1/A(j,j). */
+
+ i__3 = j;
+ zladiv_(&z__2, &x[j], &tjjs);
+ z__1.r = z__2.r - csumj.r, z__1.i = z__2.i - csumj.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ }
+/* Computing MAX */
+ i__3 = j;
+ d__3 = xmax, d__4 = (d__1 = x[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&x[j]), abs(d__2));
+ xmax = max(d__3,d__4);
+/* L220: */
+ }
+ }
+ *scale /= tscal;
+ }
+
+/* Scale the column norms by 1/TSCAL for return. */
+
+ if (tscal != 1.) {
+ d__1 = 1. / tscal;
+ dscal_(n, &d__1, &cnorm[1], &c__1);
+ }
+
+ return 0;
+
+/* End of ZLATBS */
+
+} /* zlatbs_ */
diff --git a/contrib/libs/clapack/zlatdf.c b/contrib/libs/clapack/zlatdf.c
new file mode 100644
index 0000000000..4bb8ddb254
--- /dev/null
+++ b/contrib/libs/clapack/zlatdf.c
@@ -0,0 +1,359 @@
+/* zlatdf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static doublereal c_b24 = 1.;
+
+/* Subroutine */ int zlatdf_(integer *ijob, integer *n, doublecomplex *z__,
+ integer *ldz, doublecomplex *rhs, doublereal *rdsum, doublereal *
+ rdscal, integer *ipiv, integer *jpiv)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *);
+ double z_abs(doublecomplex *);
+ void z_sqrt(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublecomplex bm, bp, xm[2], xp[2];
+ integer info;
+ doublecomplex temp, work[8];
+ doublereal scale;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *);
+ doublecomplex pmone;
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ doublereal rtemp, sminu, rwork[2];
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ doublereal splus;
+ extern /* Subroutine */ int zaxpy_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), zgesc2_(
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *,
+ integer *, doublereal *), zgecon_(char *, integer *,
+ doublecomplex *, integer *, doublereal *, doublereal *,
+ doublecomplex *, doublereal *, integer *);
+ extern doublereal dzasum_(integer *, doublecomplex *, integer *);
+ extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *), zlaswp_(integer *, doublecomplex *,
+ integer *, integer *, integer *, integer *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLATDF computes the contribution to the reciprocal Dif-estimate */
+/* by solving for x in Z * x = b, where b is chosen such that the norm */
+/* of x is as large as possible. It is assumed that LU decomposition */
+/* of Z has been computed by ZGETC2. On entry RHS = f holds the */
+/* contribution from earlier solved sub-systems, and on return RHS = x. */
+
+/* The factorization of Z returned by ZGETC2 has the form */
+/* Z = P * L * U * Q, where P and Q are permutation matrices. L is lower */
+/* triangular with unit diagonal elements and U is upper triangular. */
+
+/* Arguments */
+/* ========= */
+
+/* IJOB (input) INTEGER */
+/* IJOB = 2: First compute an approximative null-vector e */
+/* of Z using ZGECON, e is normalized and solve for */
+/* Zx = +-e - f with the sign giving the greater value of */
+/* 2-norm(x). About 5 times as expensive as Default. */
+/* IJOB .ne. 2: Local look ahead strategy where */
+/* all entries of the r.h.s. b is choosen as either +1 or */
+/* -1. Default. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Z. */
+
+/* Z (input) DOUBLE PRECISION array, dimension (LDZ, N) */
+/* On entry, the LU part of the factorization of the n-by-n */
+/* matrix Z computed by ZGETC2: Z = P * L * U * Q */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDA >= max(1, N). */
+
+/* RHS (input/output) DOUBLE PRECISION array, dimension (N). */
+/* On entry, RHS contains contributions from other subsystems. */
+/* On exit, RHS contains the solution of the subsystem with */
+/* entries according to the value of IJOB (see above). */
+
+/* RDSUM (input/output) DOUBLE PRECISION */
+/* On entry, the sum of squares of computed contributions to */
+/* the Dif-estimate under computation by ZTGSYL, where the */
+/* scaling factor RDSCAL (see below) has been factored out. */
+/* On exit, the corresponding sum of squares updated with the */
+/* contributions from the current sub-system. */
+/* If TRANS = 'T' RDSUM is not touched. */
+/* NOTE: RDSUM only makes sense when ZTGSY2 is called by CTGSYL. */
+
+/* RDSCAL (input/output) DOUBLE PRECISION */
+/* On entry, scaling factor used to prevent overflow in RDSUM. */
+/* On exit, RDSCAL is updated w.r.t. the current contributions */
+/* in RDSUM. */
+/* If TRANS = 'T', RDSCAL is not touched. */
+/* NOTE: RDSCAL only makes sense when ZTGSY2 is called by */
+/* ZTGSYL. */
+
+/* IPIV (input) INTEGER array, dimension (N). */
+/* The pivot indices; for 1 <= i <= N, row i of the */
+/* matrix has been interchanged with row IPIV(i). */
+
+/* JPIV (input) INTEGER array, dimension (N). */
+/* The pivot indices; for 1 <= j <= N, column j of the */
+/* matrix has been interchanged with column JPIV(j). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* This routine is a further developed implementation of algorithm */
+/* BSOLVE in [1] using complete pivoting in the LU factorization. */
+
+/* [1] Bo Kagstrom and Lars Westin, */
+/* Generalized Schur Methods with Condition Estimators for */
+/* Solving the Generalized Sylvester Equation, IEEE Transactions */
+/* on Automatic Control, Vol. 34, No. 7, July 1989, pp 745-751. */
+
+/* [2] Peter Poromaa, */
+/* On Efficient and Robust Estimators for the Separation */
+/* between two Regular Matrix Pairs with Applications in */
+/* Condition Estimation. Report UMINF-95.05, Department of */
+/* Computing Science, Umea University, S-901 87 Umea, Sweden, */
+/* 1995. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --rhs;
+ --ipiv;
+ --jpiv;
+
+ /* Function Body */
+ if (*ijob != 2) {
+
+/* Apply permutations IPIV to RHS */
+
+ i__1 = *n - 1;
+ zlaswp_(&c__1, &rhs[1], ldz, &c__1, &i__1, &ipiv[1], &c__1);
+
+/* Solve for L-part choosing RHS either to +1 or -1. */
+
+ z__1.r = -1., z__1.i = -0.;
+ pmone.r = z__1.r, pmone.i = z__1.i;
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ z__1.r = rhs[i__2].r + 1., z__1.i = rhs[i__2].i + 0.;
+ bp.r = z__1.r, bp.i = z__1.i;
+ i__2 = j;
+ z__1.r = rhs[i__2].r - 1., z__1.i = rhs[i__2].i - 0.;
+ bm.r = z__1.r, bm.i = z__1.i;
+ splus = 1.;
+
+/* Lockahead for L- part RHS(1:N-1) = +-1 */
+/* SPLUS and SMIN computed more efficiently than in BSOLVE[1]. */
+
+ i__2 = *n - j;
+ zdotc_(&z__1, &i__2, &z__[j + 1 + j * z_dim1], &c__1, &z__[j + 1
+ + j * z_dim1], &c__1);
+ splus += z__1.r;
+ i__2 = *n - j;
+ zdotc_(&z__1, &i__2, &z__[j + 1 + j * z_dim1], &c__1, &rhs[j + 1],
+ &c__1);
+ sminu = z__1.r;
+ i__2 = j;
+ splus *= rhs[i__2].r;
+ if (splus > sminu) {
+ i__2 = j;
+ rhs[i__2].r = bp.r, rhs[i__2].i = bp.i;
+ } else if (sminu > splus) {
+ i__2 = j;
+ rhs[i__2].r = bm.r, rhs[i__2].i = bm.i;
+ } else {
+
+/* In this case the updating sums are equal and we can */
+/* choose RHS(J) +1 or -1. The first time this happens we */
+/* choose -1, thereafter +1. This is a simple way to get */
+/* good estimates of matrices like Byers well-known example */
+/* (see [1]). (Not done in BSOLVE.) */
+
+ i__2 = j;
+ i__3 = j;
+ z__1.r = rhs[i__3].r + pmone.r, z__1.i = rhs[i__3].i +
+ pmone.i;
+ rhs[i__2].r = z__1.r, rhs[i__2].i = z__1.i;
+ pmone.r = 1., pmone.i = 0.;
+ }
+
+/* Compute the remaining r.h.s. */
+
+ i__2 = j;
+ z__1.r = -rhs[i__2].r, z__1.i = -rhs[i__2].i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = *n - j;
+ zaxpy_(&i__2, &temp, &z__[j + 1 + j * z_dim1], &c__1, &rhs[j + 1],
+ &c__1);
+/* L10: */
+ }
+
+/* Solve for U- part, lockahead for RHS(N) = +-1. This is not done */
+/* In BSOLVE and will hopefully give us a better estimate because */
+/* any ill-conditioning of the original matrix is transfered to U */
+/* and not to L. U(N, N) is an approximation to sigma_min(LU). */
+
+ i__1 = *n - 1;
+ zcopy_(&i__1, &rhs[1], &c__1, work, &c__1);
+ i__1 = *n - 1;
+ i__2 = *n;
+ z__1.r = rhs[i__2].r + 1., z__1.i = rhs[i__2].i + 0.;
+ work[i__1].r = z__1.r, work[i__1].i = z__1.i;
+ i__1 = *n;
+ i__2 = *n;
+ z__1.r = rhs[i__2].r - 1., z__1.i = rhs[i__2].i - 0.;
+ rhs[i__1].r = z__1.r, rhs[i__1].i = z__1.i;
+ splus = 0.;
+ sminu = 0.;
+ for (i__ = *n; i__ >= 1; --i__) {
+ z_div(&z__1, &c_b1, &z__[i__ + i__ * z_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__1 = i__ - 1;
+ i__2 = i__ - 1;
+ z__1.r = work[i__2].r * temp.r - work[i__2].i * temp.i, z__1.i =
+ work[i__2].r * temp.i + work[i__2].i * temp.r;
+ work[i__1].r = z__1.r, work[i__1].i = z__1.i;
+ i__1 = i__;
+ i__2 = i__;
+ z__1.r = rhs[i__2].r * temp.r - rhs[i__2].i * temp.i, z__1.i =
+ rhs[i__2].r * temp.i + rhs[i__2].i * temp.r;
+ rhs[i__1].r = z__1.r, rhs[i__1].i = z__1.i;
+ i__1 = *n;
+ for (k = i__ + 1; k <= i__1; ++k) {
+ i__2 = i__ - 1;
+ i__3 = i__ - 1;
+ i__4 = k - 1;
+ i__5 = i__ + k * z_dim1;
+ z__3.r = z__[i__5].r * temp.r - z__[i__5].i * temp.i, z__3.i =
+ z__[i__5].r * temp.i + z__[i__5].i * temp.r;
+ z__2.r = work[i__4].r * z__3.r - work[i__4].i * z__3.i,
+ z__2.i = work[i__4].r * z__3.i + work[i__4].i *
+ z__3.r;
+ z__1.r = work[i__3].r - z__2.r, z__1.i = work[i__3].i -
+ z__2.i;
+ work[i__2].r = z__1.r, work[i__2].i = z__1.i;
+ i__2 = i__;
+ i__3 = i__;
+ i__4 = k;
+ i__5 = i__ + k * z_dim1;
+ z__3.r = z__[i__5].r * temp.r - z__[i__5].i * temp.i, z__3.i =
+ z__[i__5].r * temp.i + z__[i__5].i * temp.r;
+ z__2.r = rhs[i__4].r * z__3.r - rhs[i__4].i * z__3.i, z__2.i =
+ rhs[i__4].r * z__3.i + rhs[i__4].i * z__3.r;
+ z__1.r = rhs[i__3].r - z__2.r, z__1.i = rhs[i__3].i - z__2.i;
+ rhs[i__2].r = z__1.r, rhs[i__2].i = z__1.i;
+/* L20: */
+ }
+ splus += z_abs(&work[i__ - 1]);
+ sminu += z_abs(&rhs[i__]);
+/* L30: */
+ }
+ if (splus > sminu) {
+ zcopy_(n, work, &c__1, &rhs[1], &c__1);
+ }
+
+/* Apply the permutations JPIV to the computed solution (RHS) */
+
+ i__1 = *n - 1;
+ zlaswp_(&c__1, &rhs[1], ldz, &c__1, &i__1, &jpiv[1], &c_n1);
+
+/* Compute the sum of squares */
+
+ zlassq_(n, &rhs[1], &c__1, rdscal, rdsum);
+ return 0;
+ }
+
+/* ENTRY IJOB = 2 */
+
+/* Compute approximate nullvector XM of Z */
+
+ zgecon_("I", n, &z__[z_offset], ldz, &c_b24, &rtemp, work, rwork, &info);
+ zcopy_(n, &work[*n], &c__1, xm, &c__1);
+
+/* Compute RHS */
+
+ i__1 = *n - 1;
+ zlaswp_(&c__1, xm, ldz, &c__1, &i__1, &ipiv[1], &c_n1);
+ zdotc_(&z__3, n, xm, &c__1, xm, &c__1);
+ z_sqrt(&z__2, &z__3);
+ z_div(&z__1, &c_b1, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ zscal_(n, &temp, xm, &c__1);
+ zcopy_(n, xm, &c__1, xp, &c__1);
+ zaxpy_(n, &c_b1, &rhs[1], &c__1, xp, &c__1);
+ z__1.r = -1., z__1.i = -0.;
+ zaxpy_(n, &z__1, xm, &c__1, &rhs[1], &c__1);
+ zgesc2_(n, &z__[z_offset], ldz, &rhs[1], &ipiv[1], &jpiv[1], &scale);
+ zgesc2_(n, &z__[z_offset], ldz, xp, &ipiv[1], &jpiv[1], &scale);
+ if (dzasum_(n, xp, &c__1) > dzasum_(n, &rhs[1], &c__1)) {
+ zcopy_(n, xp, &c__1, &rhs[1], &c__1);
+ }
+
+/* Compute the sum of squares */
+
+ zlassq_(n, &rhs[1], &c__1, rdscal, rdsum);
+ return 0;
+
+/* End of ZLATDF */
+
+} /* zlatdf_ */
diff --git a/contrib/libs/clapack/zlatps.c b/contrib/libs/clapack/zlatps.c
new file mode 100644
index 0000000000..be62296881
--- /dev/null
+++ b/contrib/libs/clapack/zlatps.c
@@ -0,0 +1,1163 @@
+/* zlatps.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b36 = .5;
+
+/* Subroutine */ int zlatps_(char *uplo, char *trans, char *diag, char *
+ normin, integer *n, doublecomplex *ap, doublecomplex *x, doublereal *
+ scale, doublereal *cnorm, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, ip;
+ doublereal xj, rec, tjj;
+ integer jinc, jlen;
+ doublereal xbnd;
+ integer imax;
+ doublereal tmax;
+ doublecomplex tjjs;
+ doublereal xmax, grow;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ doublereal tscal;
+ doublecomplex uscal;
+ integer jlast;
+ doublecomplex csumj;
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ logical upper;
+ extern /* Double Complex */ VOID zdotu_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ extern /* Subroutine */ int zaxpy_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), ztpsv_(
+ char *, char *, char *, integer *, doublecomplex *, doublecomplex
+ *, integer *), dlabad_(doublereal *,
+ doublereal *);
+ extern doublereal dlamch_(char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_(
+ integer *, doublereal *, doublecomplex *, integer *);
+ doublereal bignum;
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ extern /* Double Complex */ VOID zladiv_(doublecomplex *, doublecomplex *,
+ doublecomplex *);
+ logical notran;
+ integer jfirst;
+ extern doublereal dzasum_(integer *, doublecomplex *, integer *);
+ doublereal smlnum;
+ logical nounit;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLATPS solves one of the triangular systems */
+
+/* A * x = s*b, A**T * x = s*b, or A**H * x = s*b, */
+
+/* with scaling to prevent overflow, where A is an upper or lower */
+/* triangular matrix stored in packed form. Here A**T denotes the */
+/* transpose of A, A**H denotes the conjugate transpose of A, x and b */
+/* are n-element vectors, and s is a scaling factor, usually less than */
+/* or equal to 1, chosen so that the components of x will be less than */
+/* the overflow threshold. If the unscaled problem will not cause */
+/* overflow, the Level 2 BLAS routine ZTPSV is called. If the matrix A */
+/* is singular (A(j,j) = 0 for some j), then s is set to 0 and a */
+/* non-trivial solution to A*x = 0 is returned. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower triangular. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the operation applied to A. */
+/* = 'N': Solve A * x = s*b (No transpose) */
+/* = 'T': Solve A**T * x = s*b (Transpose) */
+/* = 'C': Solve A**H * x = s*b (Conjugate transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A is unit triangular. */
+/* = 'N': Non-unit triangular */
+/* = 'U': Unit triangular */
+
+/* NORMIN (input) CHARACTER*1 */
+/* Specifies whether CNORM has been set or not. */
+/* = 'Y': CNORM contains the column norms on entry */
+/* = 'N': CNORM is not set on entry. On exit, the norms will */
+/* be computed and stored in CNORM. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* The upper or lower triangular matrix A, packed columnwise in */
+/* a linear array. The j-th column of A is stored in the array */
+/* AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* X (input/output) COMPLEX*16 array, dimension (N) */
+/* On entry, the right hand side b of the triangular system. */
+/* On exit, X is overwritten by the solution vector x. */
+
+/* SCALE (output) DOUBLE PRECISION */
+/* The scaling factor s for the triangular system */
+/* A * x = s*b, A**T * x = s*b, or A**H * x = s*b. */
+/* If SCALE = 0, the matrix A is singular or badly scaled, and */
+/* the vector x is an exact or approximate solution to A*x = 0. */
+
+/* CNORM (input or output) DOUBLE PRECISION array, dimension (N) */
+
+/* If NORMIN = 'Y', CNORM is an input argument and CNORM(j) */
+/* contains the norm of the off-diagonal part of the j-th column */
+/* of A. If TRANS = 'N', CNORM(j) must be greater than or equal */
+/* to the infinity-norm, and if TRANS = 'T' or 'C', CNORM(j) */
+/* must be greater than or equal to the 1-norm. */
+
+/* If NORMIN = 'N', CNORM is an output argument and CNORM(j) */
+/* returns the 1-norm of the offdiagonal part of the j-th column */
+/* of A. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* Further Details */
+/* ======= ======= */
+
+/* A rough bound on x is computed; if that is less than overflow, ZTPSV */
+/* is called, otherwise, specific code is used which checks for possible */
+/* overflow or divide-by-zero at every operation. */
+
+/* A columnwise scheme is used for solving A*x = b. The basic algorithm */
+/* if A is lower triangular is */
+
+/* x[1:n] := b[1:n] */
+/* for j = 1, ..., n */
+/* x(j) := x(j) / A(j,j) */
+/* x[j+1:n] := x[j+1:n] - x(j) * A[j+1:n,j] */
+/* end */
+
+/* Define bounds on the components of x after j iterations of the loop: */
+/* M(j) = bound on x[1:j] */
+/* G(j) = bound on x[j+1:n] */
+/* Initially, let M(0) = 0 and G(0) = max{x(i), i=1,...,n}. */
+
+/* Then for iteration j+1 we have */
+/* M(j+1) <= G(j) / | A(j+1,j+1) | */
+/* G(j+1) <= G(j) + M(j+1) * | A[j+2:n,j+1] | */
+/* <= G(j) ( 1 + CNORM(j+1) / | A(j+1,j+1) | ) */
+
+/* where CNORM(j+1) is greater than or equal to the infinity-norm of */
+/* column j+1 of A, not counting the diagonal. Hence */
+
+/* G(j) <= G(0) product ( 1 + CNORM(i) / | A(i,i) | ) */
+/* 1<=i<=j */
+/* and */
+
+/* |x(j)| <= ( G(0) / |A(j,j)| ) product ( 1 + CNORM(i) / |A(i,i)| ) */
+/* 1<=i< j */
+
+/* Since |x(j)| <= M(j), we use the Level 2 BLAS routine ZTPSV if the */
+/* reciprocal of the largest M(j), j=1,..,n, is larger than */
+/* max(underflow, 1/overflow). */
+
+/* The bound on x(j) is also used to determine when a step in the */
+/* columnwise method can be performed without fear of overflow. If */
+/* the computed bound is greater than a large constant, x is scaled to */
+/* prevent overflow, but if the bound overflows, x is set to 0, x(j) to */
+/* 1, and scale to 0, and a non-trivial solution to A*x = 0 is found. */
+
+/* Similarly, a row-wise scheme is used to solve A**T *x = b or */
+/* A**H *x = b. The basic algorithm for A upper triangular is */
+
+/* for j = 1, ..., n */
+/* x(j) := ( b(j) - A[1:j-1,j]' * x[1:j-1] ) / A(j,j) */
+/* end */
+
+/* We simultaneously compute two bounds */
+/* G(j) = bound on ( b(i) - A[1:i-1,i]' * x[1:i-1] ), 1<=i<=j */
+/* M(j) = bound on x(i), 1<=i<=j */
+
+/* The initial values are G(0) = 0, M(0) = max{b(i), i=1,..,n}, and we */
+/* add the constraint G(j) >= G(j-1) and M(j) >= M(j-1) for j >= 1. */
+/* Then the bound on x(j) is */
+
+/* M(j) <= M(j-1) * ( 1 + CNORM(j) ) / | A(j,j) | */
+
+/* <= M(0) * product ( ( 1 + CNORM(i) ) / |A(i,i)| ) */
+/* 1<=i<=j */
+
+/* and we can safely call ZTPSV if 1/M(n) and 1/G(n) are both greater */
+/* than max(underflow, 1/overflow). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --cnorm;
+ --x;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+/* Test the input parameters. */
+
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (! lsame_(normin, "Y") && ! lsame_(normin,
+ "N")) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZLATPS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine machine dependent parameters to control overflow. */
+
+ smlnum = dlamch_("Safe minimum");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+ smlnum /= dlamch_("Precision");
+ bignum = 1. / smlnum;
+ *scale = 1.;
+
+ if (lsame_(normin, "N")) {
+
+/* Compute the 1-norm of each column, not including the diagonal. */
+
+ if (upper) {
+
+/* A is upper triangular. */
+
+ ip = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ cnorm[j] = dzasum_(&i__2, &ap[ip], &c__1);
+ ip += j;
+/* L10: */
+ }
+ } else {
+
+/* A is lower triangular. */
+
+ ip = 1;
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ cnorm[j] = dzasum_(&i__2, &ap[ip + 1], &c__1);
+ ip = ip + *n - j + 1;
+/* L20: */
+ }
+ cnorm[*n] = 0.;
+ }
+ }
+
+/* Scale the column norms by TSCAL if the maximum element in CNORM is */
+/* greater than BIGNUM/2. */
+
+ imax = idamax_(n, &cnorm[1], &c__1);
+ tmax = cnorm[imax];
+ if (tmax <= bignum * .5) {
+ tscal = 1.;
+ } else {
+ tscal = .5 / (smlnum * tmax);
+ dscal_(n, &tscal, &cnorm[1], &c__1);
+ }
+
+/* Compute a bound on the computed solution vector to see if the */
+/* Level 2 BLAS routine ZTPSV can be used. */
+
+ xmax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j;
+ d__3 = xmax, d__4 = (d__1 = x[i__2].r / 2., abs(d__1)) + (d__2 =
+ d_imag(&x[j]) / 2., abs(d__2));
+ xmax = max(d__3,d__4);
+/* L30: */
+ }
+ xbnd = xmax;
+ if (notran) {
+
+/* Compute the growth in A * x = b. */
+
+ if (upper) {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ } else {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ }
+
+ if (tscal != 1.) {
+ grow = 0.;
+ goto L60;
+ }
+
+ if (nounit) {
+
+/* A is non-unit triangular. */
+
+/* Compute GROW = 1/G(j) and XBND = 1/M(j). */
+/* Initially, G(0) = max{x(i), i=1,...,n}. */
+
+ grow = .5 / max(xbnd,smlnum);
+ xbnd = grow;
+ ip = jfirst * (jfirst + 1) / 2;
+ jlen = *n;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L60;
+ }
+
+ i__3 = ip;
+ tjjs.r = ap[i__3].r, tjjs.i = ap[i__3].i;
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs), abs(
+ d__2));
+
+ if (tjj >= smlnum) {
+
+/* M(j) = G(j-1) / abs(A(j,j)) */
+
+/* Computing MIN */
+ d__1 = xbnd, d__2 = min(1.,tjj) * grow;
+ xbnd = min(d__1,d__2);
+ } else {
+
+/* M(j) could overflow, set XBND to 0. */
+
+ xbnd = 0.;
+ }
+
+ if (tjj + cnorm[j] >= smlnum) {
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) / abs(A(j,j)) ) */
+
+ grow *= tjj / (tjj + cnorm[j]);
+ } else {
+
+/* G(j) could overflow, set GROW to 0. */
+
+ grow = 0.;
+ }
+ ip += jinc * jlen;
+ --jlen;
+/* L40: */
+ }
+ grow = xbnd;
+ } else {
+
+/* A is unit triangular. */
+
+/* Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. */
+
+/* Computing MIN */
+ d__1 = 1., d__2 = .5 / max(xbnd,smlnum);
+ grow = min(d__1,d__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L60;
+ }
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) ) */
+
+ grow *= 1. / (cnorm[j] + 1.);
+/* L50: */
+ }
+ }
+L60:
+
+ ;
+ } else {
+
+/* Compute the growth in A**T * x = b or A**H * x = b. */
+
+ if (upper) {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ } else {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ }
+
+ if (tscal != 1.) {
+ grow = 0.;
+ goto L90;
+ }
+
+ if (nounit) {
+
+/* A is non-unit triangular. */
+
+/* Compute GROW = 1/G(j) and XBND = 1/M(j). */
+/* Initially, M(0) = max{x(i), i=1,...,n}. */
+
+ grow = .5 / max(xbnd,smlnum);
+ xbnd = grow;
+ ip = jfirst * (jfirst + 1) / 2;
+ jlen = 1;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L90;
+ }
+
+/* G(j) = max( G(j-1), M(j-1)*( 1 + CNORM(j) ) ) */
+
+ xj = cnorm[j] + 1.;
+/* Computing MIN */
+ d__1 = grow, d__2 = xbnd / xj;
+ grow = min(d__1,d__2);
+
+ i__3 = ip;
+ tjjs.r = ap[i__3].r, tjjs.i = ap[i__3].i;
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs), abs(
+ d__2));
+
+ if (tjj >= smlnum) {
+
+/* M(j) = M(j-1)*( 1 + CNORM(j) ) / abs(A(j,j)) */
+
+ if (xj > tjj) {
+ xbnd *= tjj / xj;
+ }
+ } else {
+
+/* M(j) could overflow, set XBND to 0. */
+
+ xbnd = 0.;
+ }
+ ++jlen;
+ ip += jinc * jlen;
+/* L70: */
+ }
+ grow = min(grow,xbnd);
+ } else {
+
+/* A is unit triangular. */
+
+/* Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. */
+
+/* Computing MIN */
+ d__1 = 1., d__2 = .5 / max(xbnd,smlnum);
+ grow = min(d__1,d__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L90;
+ }
+
+/* G(j) = ( 1 + CNORM(j) )*G(j-1) */
+
+ xj = cnorm[j] + 1.;
+ grow /= xj;
+/* L80: */
+ }
+ }
+L90:
+ ;
+ }
+
+ if (grow * tscal > smlnum) {
+
+/* Use the Level 2 BLAS solve if the reciprocal of the bound on */
+/* elements of X is not too small. */
+
+ ztpsv_(uplo, trans, diag, n, &ap[1], &x[1], &c__1);
+ } else {
+
+/* Use a Level 1 BLAS solve, scaling intermediate results. */
+
+ if (xmax > bignum * .5) {
+
+/* Scale X so that its components are less than or equal to */
+/* BIGNUM in absolute value. */
+
+ *scale = bignum * .5 / xmax;
+ zdscal_(n, scale, &x[1], &c__1);
+ xmax = bignum;
+ } else {
+ xmax *= 2.;
+ }
+
+ if (notran) {
+
+/* Solve A * x = b */
+
+ ip = jfirst * (jfirst + 1) / 2;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Compute x(j) = b(j) / A(j,j), scaling x if necessary. */
+
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j]),
+ abs(d__2));
+ if (nounit) {
+ i__3 = ip;
+ z__1.r = tscal * ap[i__3].r, z__1.i = tscal * ap[i__3].i;
+ tjjs.r = z__1.r, tjjs.i = z__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.;
+ if (tscal == 1.) {
+ goto L110;
+ }
+ }
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs), abs(
+ d__2));
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.) {
+ if (xj > tjj * bignum) {
+
+/* Scale x by 1/b(j). */
+
+ rec = 1. / xj;
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ i__3 = j;
+ zladiv_(&z__1, &x[j], &tjjs);
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j])
+ , abs(d__2));
+ } else if (tjj > 0.) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM */
+/* to avoid overflow when dividing by A(j,j). */
+
+ rec = tjj * bignum / xj;
+ if (cnorm[j] > 1.) {
+
+/* Scale by 1/CNORM(j) to avoid overflow when */
+/* multiplying x(j) times column j. */
+
+ rec /= cnorm[j];
+ }
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ i__3 = j;
+ zladiv_(&z__1, &x[j], &tjjs);
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j])
+ , abs(d__2));
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0, and compute a solution to A*x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ x[i__4].r = 0., x[i__4].i = 0.;
+/* L100: */
+ }
+ i__3 = j;
+ x[i__3].r = 1., x[i__3].i = 0.;
+ xj = 1.;
+ *scale = 0.;
+ xmax = 0.;
+ }
+L110:
+
+/* Scale x if necessary to avoid overflow when adding a */
+/* multiple of column j of A. */
+
+ if (xj > 1.) {
+ rec = 1. / xj;
+ if (cnorm[j] > (bignum - xmax) * rec) {
+
+/* Scale x by 1/(2*abs(x(j))). */
+
+ rec *= .5;
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ }
+ } else if (xj * cnorm[j] > bignum - xmax) {
+
+/* Scale x by 1/2. */
+
+ zdscal_(n, &c_b36, &x[1], &c__1);
+ *scale *= .5;
+ }
+
+ if (upper) {
+ if (j > 1) {
+
+/* Compute the update */
+/* x(1:j-1) := x(1:j-1) - x(j) * A(1:j-1,j) */
+
+ i__3 = j - 1;
+ i__4 = j;
+ z__2.r = -x[i__4].r, z__2.i = -x[i__4].i;
+ z__1.r = tscal * z__2.r, z__1.i = tscal * z__2.i;
+ zaxpy_(&i__3, &z__1, &ap[ip - j + 1], &c__1, &x[1], &
+ c__1);
+ i__3 = j - 1;
+ i__ = izamax_(&i__3, &x[1], &c__1);
+ i__3 = i__;
+ xmax = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(
+ &x[i__]), abs(d__2));
+ }
+ ip -= j;
+ } else {
+ if (j < *n) {
+
+/* Compute the update */
+/* x(j+1:n) := x(j+1:n) - x(j) * A(j+1:n,j) */
+
+ i__3 = *n - j;
+ i__4 = j;
+ z__2.r = -x[i__4].r, z__2.i = -x[i__4].i;
+ z__1.r = tscal * z__2.r, z__1.i = tscal * z__2.i;
+ zaxpy_(&i__3, &z__1, &ap[ip + 1], &c__1, &x[j + 1], &
+ c__1);
+ i__3 = *n - j;
+ i__ = j + izamax_(&i__3, &x[j + 1], &c__1);
+ i__3 = i__;
+ xmax = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(
+ &x[i__]), abs(d__2));
+ }
+ ip = ip + *n - j + 1;
+ }
+/* L120: */
+ }
+
+ } else if (lsame_(trans, "T")) {
+
+/* Solve A**T * x = b */
+
+ ip = jfirst * (jfirst + 1) / 2;
+ jlen = 1;
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Compute x(j) = b(j) - sum A(k,j)*x(k). */
+/* k<>j */
+
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j]),
+ abs(d__2));
+ uscal.r = tscal, uscal.i = 0.;
+ rec = 1. / max(xmax,1.);
+ if (cnorm[j] > (bignum - xj) * rec) {
+
+/* If x(j) could overflow, scale x by 1/(2*XMAX). */
+
+ rec *= .5;
+ if (nounit) {
+ i__3 = ip;
+ z__1.r = tscal * ap[i__3].r, z__1.i = tscal * ap[i__3]
+ .i;
+ tjjs.r = z__1.r, tjjs.i = z__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.;
+ }
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs),
+ abs(d__2));
+ if (tjj > 1.) {
+
+/* Divide by A(j,j) when scaling x if A(j,j) > 1. */
+
+/* Computing MIN */
+ d__1 = 1., d__2 = rec * tjj;
+ rec = min(d__1,d__2);
+ zladiv_(&z__1, &uscal, &tjjs);
+ uscal.r = z__1.r, uscal.i = z__1.i;
+ }
+ if (rec < 1.) {
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ csumj.r = 0., csumj.i = 0.;
+ if (uscal.r == 1. && uscal.i == 0.) {
+
+/* If the scaling needed for A in the dot product is 1, */
+/* call ZDOTU to perform the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ zdotu_(&z__1, &i__3, &ap[ip - j + 1], &c__1, &x[1], &
+ c__1);
+ csumj.r = z__1.r, csumj.i = z__1.i;
+ } else if (j < *n) {
+ i__3 = *n - j;
+ zdotu_(&z__1, &i__3, &ap[ip + 1], &c__1, &x[j + 1], &
+ c__1);
+ csumj.r = z__1.r, csumj.i = z__1.i;
+ }
+ } else {
+
+/* Otherwise, use in-line code for the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = ip - j + i__;
+ z__3.r = ap[i__4].r * uscal.r - ap[i__4].i *
+ uscal.i, z__3.i = ap[i__4].r * uscal.i +
+ ap[i__4].i * uscal.r;
+ i__5 = i__;
+ z__2.r = z__3.r * x[i__5].r - z__3.i * x[i__5].i,
+ z__2.i = z__3.r * x[i__5].i + z__3.i * x[
+ i__5].r;
+ z__1.r = csumj.r + z__2.r, z__1.i = csumj.i +
+ z__2.i;
+ csumj.r = z__1.r, csumj.i = z__1.i;
+/* L130: */
+ }
+ } else if (j < *n) {
+ i__3 = *n - j;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = ip + i__;
+ z__3.r = ap[i__4].r * uscal.r - ap[i__4].i *
+ uscal.i, z__3.i = ap[i__4].r * uscal.i +
+ ap[i__4].i * uscal.r;
+ i__5 = j + i__;
+ z__2.r = z__3.r * x[i__5].r - z__3.i * x[i__5].i,
+ z__2.i = z__3.r * x[i__5].i + z__3.i * x[
+ i__5].r;
+ z__1.r = csumj.r + z__2.r, z__1.i = csumj.i +
+ z__2.i;
+ csumj.r = z__1.r, csumj.i = z__1.i;
+/* L140: */
+ }
+ }
+ }
+
+ z__1.r = tscal, z__1.i = 0.;
+ if (uscal.r == z__1.r && uscal.i == z__1.i) {
+
+/* Compute x(j) := ( x(j) - CSUMJ ) / A(j,j) if 1/A(j,j) */
+/* was not used to scale the dotproduct. */
+
+ i__3 = j;
+ i__4 = j;
+ z__1.r = x[i__4].r - csumj.r, z__1.i = x[i__4].i -
+ csumj.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j])
+ , abs(d__2));
+ if (nounit) {
+
+/* Compute x(j) = x(j) / A(j,j), scaling if necessary. */
+
+ i__3 = ip;
+ z__1.r = tscal * ap[i__3].r, z__1.i = tscal * ap[i__3]
+ .i;
+ tjjs.r = z__1.r, tjjs.i = z__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.;
+ if (tscal == 1.) {
+ goto L160;
+ }
+ }
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs),
+ abs(d__2));
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.) {
+ if (xj > tjj * bignum) {
+
+/* Scale X by 1/abs(x(j)). */
+
+ rec = 1. / xj;
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ i__3 = j;
+ zladiv_(&z__1, &x[j], &tjjs);
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ } else if (tjj > 0.) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */
+
+ rec = tjj * bignum / xj;
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ i__3 = j;
+ zladiv_(&z__1, &x[j], &tjjs);
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0 and compute a solution to A**T *x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ x[i__4].r = 0., x[i__4].i = 0.;
+/* L150: */
+ }
+ i__3 = j;
+ x[i__3].r = 1., x[i__3].i = 0.;
+ *scale = 0.;
+ xmax = 0.;
+ }
+L160:
+ ;
+ } else {
+
+/* Compute x(j) := x(j) / A(j,j) - CSUMJ if the dot */
+/* product has already been divided by 1/A(j,j). */
+
+ i__3 = j;
+ zladiv_(&z__2, &x[j], &tjjs);
+ z__1.r = z__2.r - csumj.r, z__1.i = z__2.i - csumj.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ }
+/* Computing MAX */
+ i__3 = j;
+ d__3 = xmax, d__4 = (d__1 = x[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&x[j]), abs(d__2));
+ xmax = max(d__3,d__4);
+ ++jlen;
+ ip += jinc * jlen;
+/* L170: */
+ }
+
+ } else {
+
+/* Solve A**H * x = b */
+
+ ip = jfirst * (jfirst + 1) / 2;
+ jlen = 1;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Compute x(j) = b(j) - sum A(k,j)*x(k). */
+/* k<>j */
+
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j]),
+ abs(d__2));
+ uscal.r = tscal, uscal.i = 0.;
+ rec = 1. / max(xmax,1.);
+ if (cnorm[j] > (bignum - xj) * rec) {
+
+/* If x(j) could overflow, scale x by 1/(2*XMAX). */
+
+ rec *= .5;
+ if (nounit) {
+ d_cnjg(&z__2, &ap[ip]);
+ z__1.r = tscal * z__2.r, z__1.i = tscal * z__2.i;
+ tjjs.r = z__1.r, tjjs.i = z__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.;
+ }
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs),
+ abs(d__2));
+ if (tjj > 1.) {
+
+/* Divide by A(j,j) when scaling x if A(j,j) > 1. */
+
+/* Computing MIN */
+ d__1 = 1., d__2 = rec * tjj;
+ rec = min(d__1,d__2);
+ zladiv_(&z__1, &uscal, &tjjs);
+ uscal.r = z__1.r, uscal.i = z__1.i;
+ }
+ if (rec < 1.) {
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ csumj.r = 0., csumj.i = 0.;
+ if (uscal.r == 1. && uscal.i == 0.) {
+
+/* If the scaling needed for A in the dot product is 1, */
+/* call ZDOTC to perform the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ zdotc_(&z__1, &i__3, &ap[ip - j + 1], &c__1, &x[1], &
+ c__1);
+ csumj.r = z__1.r, csumj.i = z__1.i;
+ } else if (j < *n) {
+ i__3 = *n - j;
+ zdotc_(&z__1, &i__3, &ap[ip + 1], &c__1, &x[j + 1], &
+ c__1);
+ csumj.r = z__1.r, csumj.i = z__1.i;
+ }
+ } else {
+
+/* Otherwise, use in-line code for the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ d_cnjg(&z__4, &ap[ip - j + i__]);
+ z__3.r = z__4.r * uscal.r - z__4.i * uscal.i,
+ z__3.i = z__4.r * uscal.i + z__4.i *
+ uscal.r;
+ i__4 = i__;
+ z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i,
+ z__2.i = z__3.r * x[i__4].i + z__3.i * x[
+ i__4].r;
+ z__1.r = csumj.r + z__2.r, z__1.i = csumj.i +
+ z__2.i;
+ csumj.r = z__1.r, csumj.i = z__1.i;
+/* L180: */
+ }
+ } else if (j < *n) {
+ i__3 = *n - j;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ d_cnjg(&z__4, &ap[ip + i__]);
+ z__3.r = z__4.r * uscal.r - z__4.i * uscal.i,
+ z__3.i = z__4.r * uscal.i + z__4.i *
+ uscal.r;
+ i__4 = j + i__;
+ z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i,
+ z__2.i = z__3.r * x[i__4].i + z__3.i * x[
+ i__4].r;
+ z__1.r = csumj.r + z__2.r, z__1.i = csumj.i +
+ z__2.i;
+ csumj.r = z__1.r, csumj.i = z__1.i;
+/* L190: */
+ }
+ }
+ }
+
+ z__1.r = tscal, z__1.i = 0.;
+ if (uscal.r == z__1.r && uscal.i == z__1.i) {
+
+/* Compute x(j) := ( x(j) - CSUMJ ) / A(j,j) if 1/A(j,j) */
+/* was not used to scale the dotproduct. */
+
+ i__3 = j;
+ i__4 = j;
+ z__1.r = x[i__4].r - csumj.r, z__1.i = x[i__4].i -
+ csumj.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j])
+ , abs(d__2));
+ if (nounit) {
+
+/* Compute x(j) = x(j) / A(j,j), scaling if necessary. */
+
+ d_cnjg(&z__2, &ap[ip]);
+ z__1.r = tscal * z__2.r, z__1.i = tscal * z__2.i;
+ tjjs.r = z__1.r, tjjs.i = z__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.;
+ if (tscal == 1.) {
+ goto L210;
+ }
+ }
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs),
+ abs(d__2));
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.) {
+ if (xj > tjj * bignum) {
+
+/* Scale X by 1/abs(x(j)). */
+
+ rec = 1. / xj;
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ i__3 = j;
+ zladiv_(&z__1, &x[j], &tjjs);
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ } else if (tjj > 0.) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */
+
+ rec = tjj * bignum / xj;
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ i__3 = j;
+ zladiv_(&z__1, &x[j], &tjjs);
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0 and compute a solution to A**H *x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ x[i__4].r = 0., x[i__4].i = 0.;
+/* L200: */
+ }
+ i__3 = j;
+ x[i__3].r = 1., x[i__3].i = 0.;
+ *scale = 0.;
+ xmax = 0.;
+ }
+L210:
+ ;
+ } else {
+
+/* Compute x(j) := x(j) / A(j,j) - CSUMJ if the dot */
+/* product has already been divided by 1/A(j,j). */
+
+ i__3 = j;
+ zladiv_(&z__2, &x[j], &tjjs);
+ z__1.r = z__2.r - csumj.r, z__1.i = z__2.i - csumj.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ }
+/* Computing MAX */
+ i__3 = j;
+ d__3 = xmax, d__4 = (d__1 = x[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&x[j]), abs(d__2));
+ xmax = max(d__3,d__4);
+ ++jlen;
+ ip += jinc * jlen;
+/* L220: */
+ }
+ }
+ *scale /= tscal;
+ }
+
+/* Scale the column norms by 1/TSCAL for return. */
+
+ if (tscal != 1.) {
+ d__1 = 1. / tscal;
+ dscal_(n, &d__1, &cnorm[1], &c__1);
+ }
+
+ return 0;
+
+/* End of ZLATPS */
+
+} /* zlatps_ */
diff --git a/contrib/libs/clapack/zlatrd.c b/contrib/libs/clapack/zlatrd.c
new file mode 100644
index 0000000000..231c85d0e1
--- /dev/null
+++ b/contrib/libs/clapack/zlatrd.c
@@ -0,0 +1,420 @@
+/* zlatrd.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zlatrd_(char *uplo, integer *n, integer *nb,
+ doublecomplex *a, integer *lda, doublereal *e, doublecomplex *tau,
+ doublecomplex *w, integer *ldw)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Local variables */
+ integer i__, iw;
+ doublecomplex alpha;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *);
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *),
+ zhemv_(char *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *), zaxpy_(integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *), zlarfg_(integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), zlacgv_(integer *, doublecomplex *,
+ integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLATRD reduces NB rows and columns of a complex Hermitian matrix A to */
+/* Hermitian tridiagonal form by a unitary similarity */
+/* transformation Q' * A * Q, and returns the matrices V and W which are */
+/* needed to apply the transformation to the unreduced part of A. */
+
+/* If UPLO = 'U', ZLATRD reduces the last NB rows and columns of a */
+/* matrix, of which the upper triangle is supplied; */
+/* if UPLO = 'L', ZLATRD reduces the first NB rows and columns of a */
+/* matrix, of which the lower triangle is supplied. */
+
+/* This is an auxiliary routine called by ZHETRD. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* Hermitian matrix A is stored: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. */
+
+/* NB (input) INTEGER */
+/* The number of rows and columns to be reduced. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* n-by-n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n-by-n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+/* On exit: */
+/* if UPLO = 'U', the last NB columns have been reduced to */
+/* tridiagonal form, with the diagonal elements overwriting */
+/* the diagonal elements of A; the elements above the diagonal */
+/* with the array TAU, represent the unitary matrix Q as a */
+/* product of elementary reflectors; */
+/* if UPLO = 'L', the first NB columns have been reduced to */
+/* tridiagonal form, with the diagonal elements overwriting */
+/* the diagonal elements of A; the elements below the diagonal */
+/* with the array TAU, represent the unitary matrix Q as a */
+/* product of elementary reflectors. */
+/* See Further Details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* E (output) DOUBLE PRECISION array, dimension (N-1) */
+/* If UPLO = 'U', E(n-nb:n-1) contains the superdiagonal */
+/* elements of the last NB columns of the reduced matrix; */
+/* if UPLO = 'L', E(1:nb) contains the subdiagonal elements of */
+/* the first NB columns of the reduced matrix. */
+
+/* TAU (output) COMPLEX*16 array, dimension (N-1) */
+/* The scalar factors of the elementary reflectors, stored in */
+/* TAU(n-nb:n-1) if UPLO = 'U', and in TAU(1:nb) if UPLO = 'L'. */
+/* See Further Details. */
+
+/* W (output) COMPLEX*16 array, dimension (LDW,NB) */
+/* The n-by-nb matrix W required to update the unreduced part */
+/* of A. */
+
+/* LDW (input) INTEGER */
+/* The leading dimension of the array W. LDW >= max(1,N). */
+
+/* Further Details */
+/* =============== */
+
+/* If UPLO = 'U', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(n) H(n-1) . . . H(n-nb+1). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(i:n) = 0 and v(i-1) = 1; v(1:i-1) is stored on exit in A(1:i-1,i), */
+/* and tau in TAU(i-1). */
+
+/* If UPLO = 'L', the matrix Q is represented as a product of elementary */
+/* reflectors */
+
+/* Q = H(1) H(2) . . . H(nb). */
+
+/* Each H(i) has the form */
+
+/* H(i) = I - tau * v * v' */
+
+/* where tau is a complex scalar, and v is a complex vector with */
+/* v(1:i) = 0 and v(i+1) = 1; v(i+1:n) is stored on exit in A(i+1:n,i), */
+/* and tau in TAU(i). */
+
+/* The elements of the vectors v together form the n-by-nb matrix V */
+/* which is needed, with W, to apply the transformation to the unreduced */
+/* part of the matrix, using a Hermitian rank-2k update of the form: */
+/* A := A - V*W' - W*V'. */
+
+/* The contents of A on exit are illustrated by the following examples */
+/* with n = 5 and nb = 2: */
+
+/* if UPLO = 'U': if UPLO = 'L': */
+
+/* ( a a a v4 v5 ) ( d ) */
+/* ( a a v4 v5 ) ( 1 d ) */
+/* ( a 1 v5 ) ( v1 1 a ) */
+/* ( d 1 ) ( v1 v2 a a ) */
+/* ( d ) ( v1 v2 a a a ) */
+
+/* where d denotes a diagonal element of the reduced matrix, a denotes */
+/* an element of the original matrix that is unchanged, and vi denotes */
+/* an element of the vector defining H(i). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --e;
+ --tau;
+ w_dim1 = *ldw;
+ w_offset = 1 + w_dim1;
+ w -= w_offset;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+
+ if (lsame_(uplo, "U")) {
+
+/* Reduce last NB columns of upper triangle */
+
+ i__1 = *n - *nb + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ iw = i__ - *n + *nb;
+ if (i__ < *n) {
+
+/* Update A(1:i,i) */
+
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = i__ + i__ * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ i__2 = *n - i__;
+ zlacgv_(&i__2, &w[i__ + (iw + 1) * w_dim1], ldw);
+ i__2 = *n - i__;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__, &i__2, &z__1, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &w[i__ + (iw + 1) * w_dim1], ldw, &
+ c_b2, &a[i__ * a_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ zlacgv_(&i__2, &w[i__ + (iw + 1) * w_dim1], ldw);
+ i__2 = *n - i__;
+ zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__2 = *n - i__;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__, &i__2, &z__1, &w[(iw + 1) *
+ w_dim1 + 1], ldw, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ c_b2, &a[i__ * a_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = i__ + i__ * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ }
+ if (i__ > 1) {
+
+/* Generate elementary reflector H(i) to annihilate */
+/* A(1:i-2,i) */
+
+ i__2 = i__ - 1 + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = i__ - 1;
+ zlarfg_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &tau[i__
+ - 1]);
+ i__2 = i__ - 1;
+ e[i__2] = alpha.r;
+ i__2 = i__ - 1 + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Compute W(1:i-1,i) */
+
+ i__2 = i__ - 1;
+ zhemv_("Upper", &i__2, &c_b2, &a[a_offset], lda, &a[i__ *
+ a_dim1 + 1], &c__1, &c_b1, &w[iw * w_dim1 + 1], &c__1);
+ if (i__ < *n) {
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &w[(iw
+ + 1) * w_dim1 + 1], ldw, &a[i__ * a_dim1 + 1], &
+ c__1, &c_b1, &w[i__ + 1 + iw * w_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &w[i__ + 1 + iw * w_dim1], &
+ c__1, &c_b2, &w[iw * w_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[(
+ i__ + 1) * a_dim1 + 1], lda, &a[i__ * a_dim1 + 1],
+ &c__1, &c_b1, &w[i__ + 1 + iw * w_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &w[(iw + 1) *
+ w_dim1 + 1], ldw, &w[i__ + 1 + iw * w_dim1], &
+ c__1, &c_b2, &w[iw * w_dim1 + 1], &c__1);
+ }
+ i__2 = i__ - 1;
+ zscal_(&i__2, &tau[i__ - 1], &w[iw * w_dim1 + 1], &c__1);
+ z__3.r = -.5, z__3.i = -0.;
+ i__2 = i__ - 1;
+ z__2.r = z__3.r * tau[i__2].r - z__3.i * tau[i__2].i, z__2.i =
+ z__3.r * tau[i__2].i + z__3.i * tau[i__2].r;
+ i__3 = i__ - 1;
+ zdotc_(&z__4, &i__3, &w[iw * w_dim1 + 1], &c__1, &a[i__ *
+ a_dim1 + 1], &c__1);
+ z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r *
+ z__4.i + z__2.i * z__4.r;
+ alpha.r = z__1.r, alpha.i = z__1.i;
+ i__2 = i__ - 1;
+ zaxpy_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &w[iw *
+ w_dim1 + 1], &c__1);
+ }
+
+/* L10: */
+ }
+ } else {
+
+/* Reduce first NB columns of lower triangle */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i:n,i) */
+
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = i__ + i__ * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &w[i__ + w_dim1], ldw);
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &a[i__ + a_dim1], lda,
+ &w[i__ + w_dim1], ldw, &c_b2, &a[i__ + i__ * a_dim1], &
+ c__1);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &w[i__ + w_dim1], ldw);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &a[i__ + a_dim1], lda);
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &w[i__ + w_dim1], ldw,
+ &a[i__ + a_dim1], lda, &c_b2, &a[i__ + i__ * a_dim1], &
+ c__1);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &a[i__ + a_dim1], lda);
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = i__ + i__ * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ if (i__ < *n) {
+
+/* Generate elementary reflector H(i) to annihilate */
+/* A(i+2:n,i) */
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ zlarfg_(&i__2, &alpha, &a[min(i__3, *n)+ i__ * a_dim1], &c__1,
+ &tau[i__]);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+ i__2 = i__ + 1 + i__ * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+
+/* Compute W(i+1:n,i) */
+
+ i__2 = *n - i__;
+ zhemv_("Lower", &i__2, &c_b2, &a[i__ + 1 + (i__ + 1) * a_dim1]
+, lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b1, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &w[i__ + 1
+ + w_dim1], ldw, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b1, &w[i__ * w_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &a[i__ + 1 +
+ a_dim1], lda, &w[i__ * w_dim1 + 1], &c__1, &c_b2, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[i__ + 1
+ + a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b1, &w[i__ * w_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &w[i__ + 1 +
+ w_dim1], ldw, &w[i__ * w_dim1 + 1], &c__1, &c_b2, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ zscal_(&i__2, &tau[i__], &w[i__ + 1 + i__ * w_dim1], &c__1);
+ z__3.r = -.5, z__3.i = -0.;
+ i__2 = i__;
+ z__2.r = z__3.r * tau[i__2].r - z__3.i * tau[i__2].i, z__2.i =
+ z__3.r * tau[i__2].i + z__3.i * tau[i__2].r;
+ i__3 = *n - i__;
+ zdotc_(&z__4, &i__3, &w[i__ + 1 + i__ * w_dim1], &c__1, &a[
+ i__ + 1 + i__ * a_dim1], &c__1);
+ z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r *
+ z__4.i + z__2.i * z__4.r;
+ alpha.r = z__1.r, alpha.i = z__1.i;
+ i__2 = *n - i__;
+ zaxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ }
+
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of ZLATRD */
+
+} /* zlatrd_ */
diff --git a/contrib/libs/clapack/zlatrs.c b/contrib/libs/clapack/zlatrs.c
new file mode 100644
index 0000000000..06fe9bb832
--- /dev/null
+++ b/contrib/libs/clapack/zlatrs.c
@@ -0,0 +1,1150 @@
+/* zlatrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b36 = .5;
+
+/* Subroutine */ int zlatrs_(char *uplo, char *trans, char *diag, char *
+ normin, integer *n, doublecomplex *a, integer *lda, doublecomplex *x,
+ doublereal *scale, doublereal *cnorm, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal xj, rec, tjj;
+ integer jinc;
+ doublereal xbnd;
+ integer imax;
+ doublereal tmax;
+ doublecomplex tjjs;
+ doublereal xmax, grow;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ doublereal tscal;
+ doublecomplex uscal;
+ integer jlast;
+ doublecomplex csumj;
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ logical upper;
+ extern /* Double Complex */ VOID zdotu_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ extern /* Subroutine */ int zaxpy_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), ztrsv_(
+ char *, char *, char *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), dlabad_(
+ doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_(
+ integer *, doublereal *, doublecomplex *, integer *);
+ doublereal bignum;
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ extern /* Double Complex */ VOID zladiv_(doublecomplex *, doublecomplex *,
+ doublecomplex *);
+ logical notran;
+ integer jfirst;
+ extern doublereal dzasum_(integer *, doublecomplex *, integer *);
+ doublereal smlnum;
+ logical nounit;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLATRS solves one of the triangular systems */
+
+/* A * x = s*b, A**T * x = s*b, or A**H * x = s*b, */
+
+/* with scaling to prevent overflow. Here A is an upper or lower */
+/* triangular matrix, A**T denotes the transpose of A, A**H denotes the */
+/* conjugate transpose of A, x and b are n-element vectors, and s is a */
+/* scaling factor, usually less than or equal to 1, chosen so that the */
+/* components of x will be less than the overflow threshold. If the */
+/* unscaled problem will not cause overflow, the Level 2 BLAS routine */
+/* ZTRSV is called. If the matrix A is singular (A(j,j) = 0 for some j), */
+/* then s is set to 0 and a non-trivial solution to A*x = 0 is returned. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower triangular. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the operation applied to A. */
+/* = 'N': Solve A * x = s*b (No transpose) */
+/* = 'T': Solve A**T * x = s*b (Transpose) */
+/* = 'C': Solve A**H * x = s*b (Conjugate transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A is unit triangular. */
+/* = 'N': Non-unit triangular */
+/* = 'U': Unit triangular */
+
+/* NORMIN (input) CHARACTER*1 */
+/* Specifies whether CNORM has been set or not. */
+/* = 'Y': CNORM contains the column norms on entry */
+/* = 'N': CNORM is not set on entry. On exit, the norms will */
+/* be computed and stored in CNORM. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The triangular matrix A. If UPLO = 'U', the leading n by n */
+/* upper triangular part of the array A contains the upper */
+/* triangular matrix, and the strictly lower triangular part of */
+/* A is not referenced. If UPLO = 'L', the leading n by n lower */
+/* triangular part of the array A contains the lower triangular */
+/* matrix, and the strictly upper triangular part of A is not */
+/* referenced. If DIAG = 'U', the diagonal elements of A are */
+/* also not referenced and are assumed to be 1. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max (1,N). */
+
+/* X (input/output) COMPLEX*16 array, dimension (N) */
+/* On entry, the right hand side b of the triangular system. */
+/* On exit, X is overwritten by the solution vector x. */
+
+/* SCALE (output) DOUBLE PRECISION */
+/* The scaling factor s for the triangular system */
+/* A * x = s*b, A**T * x = s*b, or A**H * x = s*b. */
+/* If SCALE = 0, the matrix A is singular or badly scaled, and */
+/* the vector x is an exact or approximate solution to A*x = 0. */
+
+/* CNORM (input or output) DOUBLE PRECISION array, dimension (N) */
+
+/* If NORMIN = 'Y', CNORM is an input argument and CNORM(j) */
+/* contains the norm of the off-diagonal part of the j-th column */
+/* of A. If TRANS = 'N', CNORM(j) must be greater than or equal */
+/* to the infinity-norm, and if TRANS = 'T' or 'C', CNORM(j) */
+/* must be greater than or equal to the 1-norm. */
+
+/* If NORMIN = 'N', CNORM is an output argument and CNORM(j) */
+/* returns the 1-norm of the offdiagonal part of the j-th column */
+/* of A. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* Further Details */
+/* ======= ======= */
+
+/* A rough bound on x is computed; if that is less than overflow, ZTRSV */
+/* is called, otherwise, specific code is used which checks for possible */
+/* overflow or divide-by-zero at every operation. */
+
+/* A columnwise scheme is used for solving A*x = b. The basic algorithm */
+/* if A is lower triangular is */
+
+/* x[1:n] := b[1:n] */
+/* for j = 1, ..., n */
+/* x(j) := x(j) / A(j,j) */
+/* x[j+1:n] := x[j+1:n] - x(j) * A[j+1:n,j] */
+/* end */
+
+/* Define bounds on the components of x after j iterations of the loop: */
+/* M(j) = bound on x[1:j] */
+/* G(j) = bound on x[j+1:n] */
+/* Initially, let M(0) = 0 and G(0) = max{x(i), i=1,...,n}. */
+
+/* Then for iteration j+1 we have */
+/* M(j+1) <= G(j) / | A(j+1,j+1) | */
+/* G(j+1) <= G(j) + M(j+1) * | A[j+2:n,j+1] | */
+/* <= G(j) ( 1 + CNORM(j+1) / | A(j+1,j+1) | ) */
+
+/* where CNORM(j+1) is greater than or equal to the infinity-norm of */
+/* column j+1 of A, not counting the diagonal. Hence */
+
+/* G(j) <= G(0) product ( 1 + CNORM(i) / | A(i,i) | ) */
+/* 1<=i<=j */
+/* and */
+
+/* |x(j)| <= ( G(0) / |A(j,j)| ) product ( 1 + CNORM(i) / |A(i,i)| ) */
+/* 1<=i< j */
+
+/* Since |x(j)| <= M(j), we use the Level 2 BLAS routine ZTRSV if the */
+/* reciprocal of the largest M(j), j=1,..,n, is larger than */
+/* max(underflow, 1/overflow). */
+
+/* The bound on x(j) is also used to determine when a step in the */
+/* columnwise method can be performed without fear of overflow. If */
+/* the computed bound is greater than a large constant, x is scaled to */
+/* prevent overflow, but if the bound overflows, x is set to 0, x(j) to */
+/* 1, and scale to 0, and a non-trivial solution to A*x = 0 is found. */
+
+/* Similarly, a row-wise scheme is used to solve A**T *x = b or */
+/* A**H *x = b. The basic algorithm for A upper triangular is */
+
+/* for j = 1, ..., n */
+/* x(j) := ( b(j) - A[1:j-1,j]' * x[1:j-1] ) / A(j,j) */
+/* end */
+
+/* We simultaneously compute two bounds */
+/* G(j) = bound on ( b(i) - A[1:i-1,i]' * x[1:i-1] ), 1<=i<=j */
+/* M(j) = bound on x(i), 1<=i<=j */
+
+/* The initial values are G(0) = 0, M(0) = max{b(i), i=1,..,n}, and we */
+/* add the constraint G(j) >= G(j-1) and M(j) >= M(j-1) for j >= 1. */
+/* Then the bound on x(j) is */
+
+/* M(j) <= M(j-1) * ( 1 + CNORM(j) ) / | A(j,j) | */
+
+/* <= M(0) * product ( ( 1 + CNORM(i) ) / |A(i,i)| ) */
+/* 1<=i<=j */
+
+/* and we can safely call ZTRSV if 1/M(n) and 1/G(n) are both greater */
+/* than max(underflow, 1/overflow). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --cnorm;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+/* Test the input parameters. */
+
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (! lsame_(normin, "Y") && ! lsame_(normin,
+ "N")) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZLATRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine machine dependent parameters to control overflow. */
+
+ smlnum = dlamch_("Safe minimum");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+ smlnum /= dlamch_("Precision");
+ bignum = 1. / smlnum;
+ *scale = 1.;
+
+ if (lsame_(normin, "N")) {
+
+/* Compute the 1-norm of each column, not including the diagonal. */
+
+ if (upper) {
+
+/* A is upper triangular. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ cnorm[j] = dzasum_(&i__2, &a[j * a_dim1 + 1], &c__1);
+/* L10: */
+ }
+ } else {
+
+/* A is lower triangular. */
+
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ cnorm[j] = dzasum_(&i__2, &a[j + 1 + j * a_dim1], &c__1);
+/* L20: */
+ }
+ cnorm[*n] = 0.;
+ }
+ }
+
+/* Scale the column norms by TSCAL if the maximum element in CNORM is */
+/* greater than BIGNUM/2. */
+
+ imax = idamax_(n, &cnorm[1], &c__1);
+ tmax = cnorm[imax];
+ if (tmax <= bignum * .5) {
+ tscal = 1.;
+ } else {
+ tscal = .5 / (smlnum * tmax);
+ dscal_(n, &tscal, &cnorm[1], &c__1);
+ }
+
+/* Compute a bound on the computed solution vector to see if the */
+/* Level 2 BLAS routine ZTRSV can be used. */
+
+ xmax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j;
+ d__3 = xmax, d__4 = (d__1 = x[i__2].r / 2., abs(d__1)) + (d__2 =
+ d_imag(&x[j]) / 2., abs(d__2));
+ xmax = max(d__3,d__4);
+/* L30: */
+ }
+ xbnd = xmax;
+
+ if (notran) {
+
+/* Compute the growth in A * x = b. */
+
+ if (upper) {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ } else {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ }
+
+ if (tscal != 1.) {
+ grow = 0.;
+ goto L60;
+ }
+
+ if (nounit) {
+
+/* A is non-unit triangular. */
+
+/* Compute GROW = 1/G(j) and XBND = 1/M(j). */
+/* Initially, G(0) = max{x(i), i=1,...,n}. */
+
+ grow = .5 / max(xbnd,smlnum);
+ xbnd = grow;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L60;
+ }
+
+ i__3 = j + j * a_dim1;
+ tjjs.r = a[i__3].r, tjjs.i = a[i__3].i;
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs), abs(
+ d__2));
+
+ if (tjj >= smlnum) {
+
+/* M(j) = G(j-1) / abs(A(j,j)) */
+
+/* Computing MIN */
+ d__1 = xbnd, d__2 = min(1.,tjj) * grow;
+ xbnd = min(d__1,d__2);
+ } else {
+
+/* M(j) could overflow, set XBND to 0. */
+
+ xbnd = 0.;
+ }
+
+ if (tjj + cnorm[j] >= smlnum) {
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) / abs(A(j,j)) ) */
+
+ grow *= tjj / (tjj + cnorm[j]);
+ } else {
+
+/* G(j) could overflow, set GROW to 0. */
+
+ grow = 0.;
+ }
+/* L40: */
+ }
+ grow = xbnd;
+ } else {
+
+/* A is unit triangular. */
+
+/* Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. */
+
+/* Computing MIN */
+ d__1 = 1., d__2 = .5 / max(xbnd,smlnum);
+ grow = min(d__1,d__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L60;
+ }
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) ) */
+
+ grow *= 1. / (cnorm[j] + 1.);
+/* L50: */
+ }
+ }
+L60:
+
+ ;
+ } else {
+
+/* Compute the growth in A**T * x = b or A**H * x = b. */
+
+ if (upper) {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ } else {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ }
+
+ if (tscal != 1.) {
+ grow = 0.;
+ goto L90;
+ }
+
+ if (nounit) {
+
+/* A is non-unit triangular. */
+
+/* Compute GROW = 1/G(j) and XBND = 1/M(j). */
+/* Initially, M(0) = max{x(i), i=1,...,n}. */
+
+ grow = .5 / max(xbnd,smlnum);
+ xbnd = grow;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L90;
+ }
+
+/* G(j) = max( G(j-1), M(j-1)*( 1 + CNORM(j) ) ) */
+
+ xj = cnorm[j] + 1.;
+/* Computing MIN */
+ d__1 = grow, d__2 = xbnd / xj;
+ grow = min(d__1,d__2);
+
+ i__3 = j + j * a_dim1;
+ tjjs.r = a[i__3].r, tjjs.i = a[i__3].i;
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs), abs(
+ d__2));
+
+ if (tjj >= smlnum) {
+
+/* M(j) = M(j-1)*( 1 + CNORM(j) ) / abs(A(j,j)) */
+
+ if (xj > tjj) {
+ xbnd *= tjj / xj;
+ }
+ } else {
+
+/* M(j) could overflow, set XBND to 0. */
+
+ xbnd = 0.;
+ }
+/* L70: */
+ }
+ grow = min(grow,xbnd);
+ } else {
+
+/* A is unit triangular. */
+
+/* Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. */
+
+/* Computing MIN */
+ d__1 = 1., d__2 = .5 / max(xbnd,smlnum);
+ grow = min(d__1,d__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L90;
+ }
+
+/* G(j) = ( 1 + CNORM(j) )*G(j-1) */
+
+ xj = cnorm[j] + 1.;
+ grow /= xj;
+/* L80: */
+ }
+ }
+L90:
+ ;
+ }
+
+ if (grow * tscal > smlnum) {
+
+/* Use the Level 2 BLAS solve if the reciprocal of the bound on */
+/* elements of X is not too small. */
+
+ ztrsv_(uplo, trans, diag, n, &a[a_offset], lda, &x[1], &c__1);
+ } else {
+
+/* Use a Level 1 BLAS solve, scaling intermediate results. */
+
+ if (xmax > bignum * .5) {
+
+/* Scale X so that its components are less than or equal to */
+/* BIGNUM in absolute value. */
+
+ *scale = bignum * .5 / xmax;
+ zdscal_(n, scale, &x[1], &c__1);
+ xmax = bignum;
+ } else {
+ xmax *= 2.;
+ }
+
+ if (notran) {
+
+/* Solve A * x = b */
+
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Compute x(j) = b(j) / A(j,j), scaling x if necessary. */
+
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j]),
+ abs(d__2));
+ if (nounit) {
+ i__3 = j + j * a_dim1;
+ z__1.r = tscal * a[i__3].r, z__1.i = tscal * a[i__3].i;
+ tjjs.r = z__1.r, tjjs.i = z__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.;
+ if (tscal == 1.) {
+ goto L110;
+ }
+ }
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs), abs(
+ d__2));
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.) {
+ if (xj > tjj * bignum) {
+
+/* Scale x by 1/b(j). */
+
+ rec = 1. / xj;
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ i__3 = j;
+ zladiv_(&z__1, &x[j], &tjjs);
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j])
+ , abs(d__2));
+ } else if (tjj > 0.) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM */
+/* to avoid overflow when dividing by A(j,j). */
+
+ rec = tjj * bignum / xj;
+ if (cnorm[j] > 1.) {
+
+/* Scale by 1/CNORM(j) to avoid overflow when */
+/* multiplying x(j) times column j. */
+
+ rec /= cnorm[j];
+ }
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ i__3 = j;
+ zladiv_(&z__1, &x[j], &tjjs);
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j])
+ , abs(d__2));
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0, and compute a solution to A*x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ x[i__4].r = 0., x[i__4].i = 0.;
+/* L100: */
+ }
+ i__3 = j;
+ x[i__3].r = 1., x[i__3].i = 0.;
+ xj = 1.;
+ *scale = 0.;
+ xmax = 0.;
+ }
+L110:
+
+/* Scale x if necessary to avoid overflow when adding a */
+/* multiple of column j of A. */
+
+ if (xj > 1.) {
+ rec = 1. / xj;
+ if (cnorm[j] > (bignum - xmax) * rec) {
+
+/* Scale x by 1/(2*abs(x(j))). */
+
+ rec *= .5;
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ }
+ } else if (xj * cnorm[j] > bignum - xmax) {
+
+/* Scale x by 1/2. */
+
+ zdscal_(n, &c_b36, &x[1], &c__1);
+ *scale *= .5;
+ }
+
+ if (upper) {
+ if (j > 1) {
+
+/* Compute the update */
+/* x(1:j-1) := x(1:j-1) - x(j) * A(1:j-1,j) */
+
+ i__3 = j - 1;
+ i__4 = j;
+ z__2.r = -x[i__4].r, z__2.i = -x[i__4].i;
+ z__1.r = tscal * z__2.r, z__1.i = tscal * z__2.i;
+ zaxpy_(&i__3, &z__1, &a[j * a_dim1 + 1], &c__1, &x[1],
+ &c__1);
+ i__3 = j - 1;
+ i__ = izamax_(&i__3, &x[1], &c__1);
+ i__3 = i__;
+ xmax = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(
+ &x[i__]), abs(d__2));
+ }
+ } else {
+ if (j < *n) {
+
+/* Compute the update */
+/* x(j+1:n) := x(j+1:n) - x(j) * A(j+1:n,j) */
+
+ i__3 = *n - j;
+ i__4 = j;
+ z__2.r = -x[i__4].r, z__2.i = -x[i__4].i;
+ z__1.r = tscal * z__2.r, z__1.i = tscal * z__2.i;
+ zaxpy_(&i__3, &z__1, &a[j + 1 + j * a_dim1], &c__1, &
+ x[j + 1], &c__1);
+ i__3 = *n - j;
+ i__ = j + izamax_(&i__3, &x[j + 1], &c__1);
+ i__3 = i__;
+ xmax = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(
+ &x[i__]), abs(d__2));
+ }
+ }
+/* L120: */
+ }
+
+ } else if (lsame_(trans, "T")) {
+
+/* Solve A**T * x = b */
+
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Compute x(j) = b(j) - sum A(k,j)*x(k). */
+/* k<>j */
+
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j]),
+ abs(d__2));
+ uscal.r = tscal, uscal.i = 0.;
+ rec = 1. / max(xmax,1.);
+ if (cnorm[j] > (bignum - xj) * rec) {
+
+/* If x(j) could overflow, scale x by 1/(2*XMAX). */
+
+ rec *= .5;
+ if (nounit) {
+ i__3 = j + j * a_dim1;
+ z__1.r = tscal * a[i__3].r, z__1.i = tscal * a[i__3]
+ .i;
+ tjjs.r = z__1.r, tjjs.i = z__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.;
+ }
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs),
+ abs(d__2));
+ if (tjj > 1.) {
+
+/* Divide by A(j,j) when scaling x if A(j,j) > 1. */
+
+/* Computing MIN */
+ d__1 = 1., d__2 = rec * tjj;
+ rec = min(d__1,d__2);
+ zladiv_(&z__1, &uscal, &tjjs);
+ uscal.r = z__1.r, uscal.i = z__1.i;
+ }
+ if (rec < 1.) {
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ csumj.r = 0., csumj.i = 0.;
+ if (uscal.r == 1. && uscal.i == 0.) {
+
+/* If the scaling needed for A in the dot product is 1, */
+/* call ZDOTU to perform the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ zdotu_(&z__1, &i__3, &a[j * a_dim1 + 1], &c__1, &x[1],
+ &c__1);
+ csumj.r = z__1.r, csumj.i = z__1.i;
+ } else if (j < *n) {
+ i__3 = *n - j;
+ zdotu_(&z__1, &i__3, &a[j + 1 + j * a_dim1], &c__1, &
+ x[j + 1], &c__1);
+ csumj.r = z__1.r, csumj.i = z__1.i;
+ }
+ } else {
+
+/* Otherwise, use in-line code for the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * a_dim1;
+ z__3.r = a[i__4].r * uscal.r - a[i__4].i *
+ uscal.i, z__3.i = a[i__4].r * uscal.i + a[
+ i__4].i * uscal.r;
+ i__5 = i__;
+ z__2.r = z__3.r * x[i__5].r - z__3.i * x[i__5].i,
+ z__2.i = z__3.r * x[i__5].i + z__3.i * x[
+ i__5].r;
+ z__1.r = csumj.r + z__2.r, z__1.i = csumj.i +
+ z__2.i;
+ csumj.r = z__1.r, csumj.i = z__1.i;
+/* L130: */
+ }
+ } else if (j < *n) {
+ i__3 = *n;
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * a_dim1;
+ z__3.r = a[i__4].r * uscal.r - a[i__4].i *
+ uscal.i, z__3.i = a[i__4].r * uscal.i + a[
+ i__4].i * uscal.r;
+ i__5 = i__;
+ z__2.r = z__3.r * x[i__5].r - z__3.i * x[i__5].i,
+ z__2.i = z__3.r * x[i__5].i + z__3.i * x[
+ i__5].r;
+ z__1.r = csumj.r + z__2.r, z__1.i = csumj.i +
+ z__2.i;
+ csumj.r = z__1.r, csumj.i = z__1.i;
+/* L140: */
+ }
+ }
+ }
+
+ z__1.r = tscal, z__1.i = 0.;
+ if (uscal.r == z__1.r && uscal.i == z__1.i) {
+
+/* Compute x(j) := ( x(j) - CSUMJ ) / A(j,j) if 1/A(j,j) */
+/* was not used to scale the dotproduct. */
+
+ i__3 = j;
+ i__4 = j;
+ z__1.r = x[i__4].r - csumj.r, z__1.i = x[i__4].i -
+ csumj.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j])
+ , abs(d__2));
+ if (nounit) {
+ i__3 = j + j * a_dim1;
+ z__1.r = tscal * a[i__3].r, z__1.i = tscal * a[i__3]
+ .i;
+ tjjs.r = z__1.r, tjjs.i = z__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.;
+ if (tscal == 1.) {
+ goto L160;
+ }
+ }
+
+/* Compute x(j) = x(j) / A(j,j), scaling if necessary. */
+
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs),
+ abs(d__2));
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.) {
+ if (xj > tjj * bignum) {
+
+/* Scale X by 1/abs(x(j)). */
+
+ rec = 1. / xj;
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ i__3 = j;
+ zladiv_(&z__1, &x[j], &tjjs);
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ } else if (tjj > 0.) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */
+
+ rec = tjj * bignum / xj;
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ i__3 = j;
+ zladiv_(&z__1, &x[j], &tjjs);
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0 and compute a solution to A**T *x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ x[i__4].r = 0., x[i__4].i = 0.;
+/* L150: */
+ }
+ i__3 = j;
+ x[i__3].r = 1., x[i__3].i = 0.;
+ *scale = 0.;
+ xmax = 0.;
+ }
+L160:
+ ;
+ } else {
+
+/* Compute x(j) := x(j) / A(j,j) - CSUMJ if the dot */
+/* product has already been divided by 1/A(j,j). */
+
+ i__3 = j;
+ zladiv_(&z__2, &x[j], &tjjs);
+ z__1.r = z__2.r - csumj.r, z__1.i = z__2.i - csumj.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ }
+/* Computing MAX */
+ i__3 = j;
+ d__3 = xmax, d__4 = (d__1 = x[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&x[j]), abs(d__2));
+ xmax = max(d__3,d__4);
+/* L170: */
+ }
+
+ } else {
+
+/* Solve A**H * x = b */
+
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Compute x(j) = b(j) - sum A(k,j)*x(k). */
+/* k<>j */
+
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j]),
+ abs(d__2));
+ uscal.r = tscal, uscal.i = 0.;
+ rec = 1. / max(xmax,1.);
+ if (cnorm[j] > (bignum - xj) * rec) {
+
+/* If x(j) could overflow, scale x by 1/(2*XMAX). */
+
+ rec *= .5;
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z__1.r = tscal * z__2.r, z__1.i = tscal * z__2.i;
+ tjjs.r = z__1.r, tjjs.i = z__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.;
+ }
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs),
+ abs(d__2));
+ if (tjj > 1.) {
+
+/* Divide by A(j,j) when scaling x if A(j,j) > 1. */
+
+/* Computing MIN */
+ d__1 = 1., d__2 = rec * tjj;
+ rec = min(d__1,d__2);
+ zladiv_(&z__1, &uscal, &tjjs);
+ uscal.r = z__1.r, uscal.i = z__1.i;
+ }
+ if (rec < 1.) {
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ csumj.r = 0., csumj.i = 0.;
+ if (uscal.r == 1. && uscal.i == 0.) {
+
+/* If the scaling needed for A in the dot product is 1, */
+/* call ZDOTC to perform the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ zdotc_(&z__1, &i__3, &a[j * a_dim1 + 1], &c__1, &x[1],
+ &c__1);
+ csumj.r = z__1.r, csumj.i = z__1.i;
+ } else if (j < *n) {
+ i__3 = *n - j;
+ zdotc_(&z__1, &i__3, &a[j + 1 + j * a_dim1], &c__1, &
+ x[j + 1], &c__1);
+ csumj.r = z__1.r, csumj.i = z__1.i;
+ }
+ } else {
+
+/* Otherwise, use in-line code for the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ d_cnjg(&z__4, &a[i__ + j * a_dim1]);
+ z__3.r = z__4.r * uscal.r - z__4.i * uscal.i,
+ z__3.i = z__4.r * uscal.i + z__4.i *
+ uscal.r;
+ i__4 = i__;
+ z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i,
+ z__2.i = z__3.r * x[i__4].i + z__3.i * x[
+ i__4].r;
+ z__1.r = csumj.r + z__2.r, z__1.i = csumj.i +
+ z__2.i;
+ csumj.r = z__1.r, csumj.i = z__1.i;
+/* L180: */
+ }
+ } else if (j < *n) {
+ i__3 = *n;
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ d_cnjg(&z__4, &a[i__ + j * a_dim1]);
+ z__3.r = z__4.r * uscal.r - z__4.i * uscal.i,
+ z__3.i = z__4.r * uscal.i + z__4.i *
+ uscal.r;
+ i__4 = i__;
+ z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i,
+ z__2.i = z__3.r * x[i__4].i + z__3.i * x[
+ i__4].r;
+ z__1.r = csumj.r + z__2.r, z__1.i = csumj.i +
+ z__2.i;
+ csumj.r = z__1.r, csumj.i = z__1.i;
+/* L190: */
+ }
+ }
+ }
+
+ z__1.r = tscal, z__1.i = 0.;
+ if (uscal.r == z__1.r && uscal.i == z__1.i) {
+
+/* Compute x(j) := ( x(j) - CSUMJ ) / A(j,j) if 1/A(j,j) */
+/* was not used to scale the dotproduct. */
+
+ i__3 = j;
+ i__4 = j;
+ z__1.r = x[i__4].r - csumj.r, z__1.i = x[i__4].i -
+ csumj.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ i__3 = j;
+ xj = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[j])
+ , abs(d__2));
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z__1.r = tscal * z__2.r, z__1.i = tscal * z__2.i;
+ tjjs.r = z__1.r, tjjs.i = z__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.;
+ if (tscal == 1.) {
+ goto L210;
+ }
+ }
+
+/* Compute x(j) = x(j) / A(j,j), scaling if necessary. */
+
+ tjj = (d__1 = tjjs.r, abs(d__1)) + (d__2 = d_imag(&tjjs),
+ abs(d__2));
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.) {
+ if (xj > tjj * bignum) {
+
+/* Scale X by 1/abs(x(j)). */
+
+ rec = 1. / xj;
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ i__3 = j;
+ zladiv_(&z__1, &x[j], &tjjs);
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ } else if (tjj > 0.) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */
+
+ rec = tjj * bignum / xj;
+ zdscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ i__3 = j;
+ zladiv_(&z__1, &x[j], &tjjs);
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ } else {
+
+/* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */
+/* scale = 0 and compute a solution to A**H *x = 0. */
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ x[i__4].r = 0., x[i__4].i = 0.;
+/* L200: */
+ }
+ i__3 = j;
+ x[i__3].r = 1., x[i__3].i = 0.;
+ *scale = 0.;
+ xmax = 0.;
+ }
+L210:
+ ;
+ } else {
+
+/* Compute x(j) := x(j) / A(j,j) - CSUMJ if the dot */
+/* product has already been divided by 1/A(j,j). */
+
+ i__3 = j;
+ zladiv_(&z__2, &x[j], &tjjs);
+ z__1.r = z__2.r - csumj.r, z__1.i = z__2.i - csumj.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ }
+/* Computing MAX */
+ i__3 = j;
+ d__3 = xmax, d__4 = (d__1 = x[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&x[j]), abs(d__2));
+ xmax = max(d__3,d__4);
+/* L220: */
+ }
+ }
+ *scale /= tscal;
+ }
+
+/* Scale the column norms by 1/TSCAL for return. */
+
+ if (tscal != 1.) {
+ d__1 = 1. / tscal;
+ dscal_(n, &d__1, &cnorm[1], &c__1);
+ }
+
+ return 0;
+
+/* End of ZLATRS */
+
+} /* zlatrs_ */
diff --git a/contrib/libs/clapack/zlatrz.c b/contrib/libs/clapack/zlatrz.c
new file mode 100644
index 0000000000..21b55c15bd
--- /dev/null
+++ b/contrib/libs/clapack/zlatrz.c
@@ -0,0 +1,181 @@
+/* zlatrz.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zlatrz_(integer *m, integer *n, integer *l,
+ doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *
+ work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__;
+ doublecomplex alpha;
+ extern /* Subroutine */ int zlarz_(char *, integer *, integer *, integer *
+, doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), zlacgv_(integer *,
+ doublecomplex *, integer *), zlarfp_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLATRZ factors the M-by-(M+L) complex upper trapezoidal matrix */
+/* [ A1 A2 ] = [ A(1:M,1:M) A(1:M,N-L+1:N) ] as ( R 0 ) * Z by means */
+/* of unitary transformations, where Z is an (M+L)-by-(M+L) unitary */
+/* matrix and, R and A1 are M-by-M upper triangular matrices. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= 0. */
+
+/* L (input) INTEGER */
+/* The number of columns of the matrix A containing the */
+/* meaningful part of the Householder vectors. N-M >= L >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the leading M-by-N upper trapezoidal part of the */
+/* array A must contain the matrix to be factorized. */
+/* On exit, the leading M-by-M upper triangular part of A */
+/* contains the upper triangular matrix R, and elements N-L+1 to */
+/* N of the first M rows of A, with the array TAU, represent the */
+/* unitary matrix Z as a product of M elementary reflectors. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) COMPLEX*16 array, dimension (M) */
+/* The scalar factors of the elementary reflectors. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (M) */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* The factorization is obtained by Householder's method. The kth */
+/* transformation matrix, Z( k ), which is used to introduce zeros into */
+/* the ( m - k + 1 )th row of A, is given in the form */
+
+/* Z( k ) = ( I 0 ), */
+/* ( 0 T( k ) ) */
+
+/* where */
+
+/* T( k ) = I - tau*u( k )*u( k )', u( k ) = ( 1 ), */
+/* ( 0 ) */
+/* ( z( k ) ) */
+
+/* tau is a scalar and z( k ) is an l element vector. tau and z( k ) */
+/* are chosen to annihilate the elements of the kth row of A2. */
+
+/* The scalar tau is returned in the kth element of TAU and the vector */
+/* u( k ) in the kth row of A2, such that the elements of z( k ) are */
+/* in a( k, l + 1 ), ..., a( k, n ). The elements of R are returned in */
+/* the upper triangular part of A1. */
+
+/* Z is given by */
+
+/* Z = Z( 1 ) * Z( 2 ) * ... * Z( m ). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ if (*m == 0) {
+ return 0;
+ } else if (*m == *n) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ tau[i__2].r = 0., tau[i__2].i = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+ for (i__ = *m; i__ >= 1; --i__) {
+
+/* Generate elementary reflector H(i) to annihilate */
+/* [ A(i,i) A(i,n-l+1:n) ] */
+
+ zlacgv_(l, &a[i__ + (*n - *l + 1) * a_dim1], lda);
+ d_cnjg(&z__1, &a[i__ + i__ * a_dim1]);
+ alpha.r = z__1.r, alpha.i = z__1.i;
+ i__1 = *l + 1;
+ zlarfp_(&i__1, &alpha, &a[i__ + (*n - *l + 1) * a_dim1], lda, &tau[
+ i__]);
+ i__1 = i__;
+ d_cnjg(&z__1, &tau[i__]);
+ tau[i__1].r = z__1.r, tau[i__1].i = z__1.i;
+
+/* Apply H(i) to A(1:i-1,i:n) from the right */
+
+ i__1 = i__ - 1;
+ i__2 = *n - i__ + 1;
+ d_cnjg(&z__1, &tau[i__]);
+ zlarz_("Right", &i__1, &i__2, l, &a[i__ + (*n - *l + 1) * a_dim1],
+ lda, &z__1, &a[i__ * a_dim1 + 1], lda, &work[1]);
+ i__1 = i__ + i__ * a_dim1;
+ d_cnjg(&z__1, &alpha);
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+
+/* L20: */
+ }
+
+ return 0;
+
+/* End of ZLATRZ */
+
+} /* zlatrz_ */
diff --git a/contrib/libs/clapack/zlatzm.c b/contrib/libs/clapack/zlatzm.c
new file mode 100644
index 0000000000..74b1114569
--- /dev/null
+++ b/contrib/libs/clapack/zlatzm.c
@@ -0,0 +1,198 @@
+/* zlatzm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zlatzm_(char *side, integer *m, integer *n,
+ doublecomplex *v, integer *incv, doublecomplex *tau, doublecomplex *
+ c1, doublecomplex *c2, integer *ldc, doublecomplex *work)
+{
+ /* System generated locals */
+ integer c1_dim1, c1_offset, c2_dim1, c2_offset, i__1;
+ doublecomplex z__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zgerc_(integer *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *),
+ zgeru_(integer *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *)
+ , zcopy_(integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *), zaxpy_(integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *, integer *), zlacgv_(integer *,
+ doublecomplex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is deprecated and has been replaced by routine ZUNMRZ. */
+
+/* ZLATZM applies a Householder matrix generated by ZTZRQF to a matrix. */
+
+/* Let P = I - tau*u*u', u = ( 1 ), */
+/* ( v ) */
+/* where v is an (m-1) vector if SIDE = 'L', or a (n-1) vector if */
+/* SIDE = 'R'. */
+
+/* If SIDE equals 'L', let */
+/* C = [ C1 ] 1 */
+/* [ C2 ] m-1 */
+/* n */
+/* Then C is overwritten by P*C. */
+
+/* If SIDE equals 'R', let */
+/* C = [ C1, C2 ] m */
+/* 1 n-1 */
+/* Then C is overwritten by C*P. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': form P * C */
+/* = 'R': form C * P */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. */
+
+/* V (input) COMPLEX*16 array, dimension */
+/* (1 + (M-1)*abs(INCV)) if SIDE = 'L' */
+/* (1 + (N-1)*abs(INCV)) if SIDE = 'R' */
+/* The vector v in the representation of P. V is not used */
+/* if TAU = 0. */
+
+/* INCV (input) INTEGER */
+/* The increment between elements of v. INCV <> 0 */
+
+/* TAU (input) COMPLEX*16 */
+/* The value tau in the representation of P. */
+
+/* C1 (input/output) COMPLEX*16 array, dimension */
+/* (LDC,N) if SIDE = 'L' */
+/* (M,1) if SIDE = 'R' */
+/* On entry, the n-vector C1 if SIDE = 'L', or the m-vector C1 */
+/* if SIDE = 'R'. */
+
+/* On exit, the first row of P*C if SIDE = 'L', or the first */
+/* column of C*P if SIDE = 'R'. */
+
+/* C2 (input/output) COMPLEX*16 array, dimension */
+/* (LDC, N) if SIDE = 'L' */
+/* (LDC, N-1) if SIDE = 'R' */
+/* On entry, the (m - 1) x n matrix C2 if SIDE = 'L', or the */
+/* m x (n - 1) matrix C2 if SIDE = 'R'. */
+
+/* On exit, rows 2:m of P*C if SIDE = 'L', or columns 2:m of C*P */
+/* if SIDE = 'R'. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the arrays C1 and C2. */
+/* LDC >= max(1,M). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension */
+/* (N) if SIDE = 'L' */
+/* (M) if SIDE = 'R' */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --v;
+ c2_dim1 = *ldc;
+ c2_offset = 1 + c2_dim1;
+ c2 -= c2_offset;
+ c1_dim1 = *ldc;
+ c1_offset = 1 + c1_dim1;
+ c1 -= c1_offset;
+ --work;
+
+ /* Function Body */
+ if (min(*m,*n) == 0 || tau->r == 0. && tau->i == 0.) {
+ return 0;
+ }
+
+ if (lsame_(side, "L")) {
+
+/* w := conjg( C1 + v' * C2 ) */
+
+ zcopy_(n, &c1[c1_offset], ldc, &work[1], &c__1);
+ zlacgv_(n, &work[1], &c__1);
+ i__1 = *m - 1;
+ zgemv_("Conjugate transpose", &i__1, n, &c_b1, &c2[c2_offset], ldc, &
+ v[1], incv, &c_b1, &work[1], &c__1);
+
+/* [ C1 ] := [ C1 ] - tau* [ 1 ] * w' */
+/* [ C2 ] [ C2 ] [ v ] */
+
+ zlacgv_(n, &work[1], &c__1);
+ z__1.r = -tau->r, z__1.i = -tau->i;
+ zaxpy_(n, &z__1, &work[1], &c__1, &c1[c1_offset], ldc);
+ i__1 = *m - 1;
+ z__1.r = -tau->r, z__1.i = -tau->i;
+ zgeru_(&i__1, n, &z__1, &v[1], incv, &work[1], &c__1, &c2[c2_offset],
+ ldc);
+
+ } else if (lsame_(side, "R")) {
+
+/* w := C1 + C2 * v */
+
+ zcopy_(m, &c1[c1_offset], &c__1, &work[1], &c__1);
+ i__1 = *n - 1;
+ zgemv_("No transpose", m, &i__1, &c_b1, &c2[c2_offset], ldc, &v[1],
+ incv, &c_b1, &work[1], &c__1);
+
+/* [ C1, C2 ] := [ C1, C2 ] - tau* w * [ 1 , v'] */
+
+ z__1.r = -tau->r, z__1.i = -tau->i;
+ zaxpy_(m, &z__1, &work[1], &c__1, &c1[c1_offset], &c__1);
+ i__1 = *n - 1;
+ z__1.r = -tau->r, z__1.i = -tau->i;
+ zgerc_(m, &i__1, &z__1, &work[1], &c__1, &v[1], incv, &c2[c2_offset],
+ ldc);
+ }
+
+ return 0;
+
+/* End of ZLATZM */
+
+} /* zlatzm_ */
diff --git a/contrib/libs/clapack/zlauu2.c b/contrib/libs/clapack/zlauu2.c
new file mode 100644
index 0000000000..e593c5aab2
--- /dev/null
+++ b/contrib/libs/clapack/zlauu2.c
@@ -0,0 +1,203 @@
+/* zlauu2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zlauu2_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__;
+ doublereal aii;
+ extern logical lsame_(char *, char *);
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_(
+ integer *, doublereal *, doublecomplex *, integer *), zlacgv_(
+ integer *, doublecomplex *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAUU2 computes the product U * U' or L' * L, where the triangular */
+/* factor U or L is stored in the upper or lower triangular part of */
+/* the array A. */
+
+/* If UPLO = 'U' or 'u' then the upper triangle of the result is stored, */
+/* overwriting the factor U in A. */
+/* If UPLO = 'L' or 'l' then the lower triangle of the result is stored, */
+/* overwriting the factor L in A. */
+
+/* This is the unblocked form of the algorithm, calling Level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the triangular factor stored in the array A */
+/* is upper or lower triangular: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the triangular factor U or L. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the triangular factor U or L. */
+/* On exit, if UPLO = 'U', the upper triangle of A is */
+/* overwritten with the upper triangle of the product U * U'; */
+/* if UPLO = 'L', the lower triangle of A is overwritten with */
+/* the lower triangle of the product L' * L. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZLAUU2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute the product U * U'. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ aii = a[i__2].r;
+ if (i__ < *n) {
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = *n - i__;
+ zdotc_(&z__1, &i__3, &a[i__ + (i__ + 1) * a_dim1], lda, &a[
+ i__ + (i__ + 1) * a_dim1], lda);
+ d__1 = aii * aii + z__1.r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ i__2 = *n - i__;
+ zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ z__1.r = aii, z__1.i = 0.;
+ zgemv_("No transpose", &i__2, &i__3, &c_b1, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ z__1, &a[i__ * a_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ } else {
+ zdscal_(&i__, &aii, &a[i__ * a_dim1 + 1], &c__1);
+ }
+/* L10: */
+ }
+
+ } else {
+
+/* Compute the product L' * L. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ aii = a[i__2].r;
+ if (i__ < *n) {
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = *n - i__;
+ zdotc_(&z__1, &i__3, &a[i__ + 1 + i__ * a_dim1], &c__1, &a[
+ i__ + 1 + i__ * a_dim1], &c__1);
+ d__1 = aii * aii + z__1.r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &a[i__ + a_dim1], lda);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ z__1.r = aii, z__1.i = 0.;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b1, &a[i__ + 1
+ + a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ z__1, &a[i__ + a_dim1], lda);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &a[i__ + a_dim1], lda);
+ } else {
+ zdscal_(&i__, &aii, &a[i__ + a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of ZLAUU2 */
+
+} /* zlauu2_ */
diff --git a/contrib/libs/clapack/zlauum.c b/contrib/libs/clapack/zlauum.c
new file mode 100644
index 0000000000..21c37890c1
--- /dev/null
+++ b/contrib/libs/clapack/zlauum.c
@@ -0,0 +1,217 @@
+/* zlauum.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static doublereal c_b21 = 1.;
+
+/* Subroutine */ int zlauum_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, ib, nb;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), zherk_(char *, char *, integer *,
+ integer *, doublereal *, doublecomplex *, integer *, doublereal *,
+ doublecomplex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int ztrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *),
+ zlauu2_(char *, integer *, doublecomplex *, integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZLAUUM computes the product U * U' or L' * L, where the triangular */
+/* factor U or L is stored in the upper or lower triangular part of */
+/* the array A. */
+
+/* If UPLO = 'U' or 'u' then the upper triangle of the result is stored, */
+/* overwriting the factor U in A. */
+/* If UPLO = 'L' or 'l' then the lower triangle of the result is stored, */
+/* overwriting the factor L in A. */
+
+/* This is the blocked form of the algorithm, calling Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the triangular factor stored in the array A */
+/* is upper or lower triangular: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the triangular factor U or L. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the triangular factor U or L. */
+/* On exit, if UPLO = 'U', the upper triangle of A is */
+/* overwritten with the upper triangle of the product U * U'; */
+/* if UPLO = 'L', the lower triangle of A is overwritten with */
+/* the lower triangle of the product L' * L. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZLAUUM", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "ZLAUUM", uplo, n, &c_n1, &c_n1, &c_n1);
+
+ if (nb <= 1 || nb >= *n) {
+
+/* Use unblocked code */
+
+ zlauu2_(uplo, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code */
+
+ if (upper) {
+
+/* Compute the product U * U'. */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - i__ + 1;
+ ib = min(i__3,i__4);
+ i__3 = i__ - 1;
+ ztrmm_("Right", "Upper", "Conjugate transpose", "Non-unit", &
+ i__3, &ib, &c_b1, &a[i__ + i__ * a_dim1], lda, &a[i__
+ * a_dim1 + 1], lda);
+ zlauu2_("Upper", &ib, &a[i__ + i__ * a_dim1], lda, info);
+ if (i__ + ib <= *n) {
+ i__3 = i__ - 1;
+ i__4 = *n - i__ - ib + 1;
+ zgemm_("No transpose", "Conjugate transpose", &i__3, &ib,
+ &i__4, &c_b1, &a[(i__ + ib) * a_dim1 + 1], lda, &
+ a[i__ + (i__ + ib) * a_dim1], lda, &c_b1, &a[i__ *
+ a_dim1 + 1], lda);
+ i__3 = *n - i__ - ib + 1;
+ zherk_("Upper", "No transpose", &ib, &i__3, &c_b21, &a[
+ i__ + (i__ + ib) * a_dim1], lda, &c_b21, &a[i__ +
+ i__ * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute the product L' * L. */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - i__ + 1;
+ ib = min(i__3,i__4);
+ i__3 = i__ - 1;
+ ztrmm_("Left", "Lower", "Conjugate transpose", "Non-unit", &
+ ib, &i__3, &c_b1, &a[i__ + i__ * a_dim1], lda, &a[i__
+ + a_dim1], lda);
+ zlauu2_("Lower", &ib, &a[i__ + i__ * a_dim1], lda, info);
+ if (i__ + ib <= *n) {
+ i__3 = i__ - 1;
+ i__4 = *n - i__ - ib + 1;
+ zgemm_("Conjugate transpose", "No transpose", &ib, &i__3,
+ &i__4, &c_b1, &a[i__ + ib + i__ * a_dim1], lda, &
+ a[i__ + ib + a_dim1], lda, &c_b1, &a[i__ + a_dim1]
+, lda);
+ i__3 = *n - i__ - ib + 1;
+ zherk_("Lower", "Conjugate transpose", &ib, &i__3, &c_b21,
+ &a[i__ + ib + i__ * a_dim1], lda, &c_b21, &a[i__
+ + i__ * a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZLAUUM */
+
+} /* zlauum_ */
diff --git a/contrib/libs/clapack/zpbcon.c b/contrib/libs/clapack/zpbcon.c
new file mode 100644
index 0000000000..bbfa6dfa07
--- /dev/null
+++ b/contrib/libs/clapack/zpbcon.c
@@ -0,0 +1,236 @@
+/* zpbcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zpbcon_(char *uplo, integer *n, integer *kd,
+ doublecomplex *ab, integer *ldab, doublereal *anorm, doublereal *
+ rcond, doublecomplex *work, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer ix, kase;
+ doublereal scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical upper;
+ extern /* Subroutine */ int zlacn2_(integer *, doublecomplex *,
+ doublecomplex *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal scalel, scaleu;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal ainvnm;
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ extern /* Subroutine */ int zlatbs_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ doublereal *, doublereal *, integer *), zdrscl_(integer *, doublereal *, doublecomplex *,
+ integer *);
+ char normin[1];
+ doublereal smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPBCON estimates the reciprocal of the condition number (in the */
+/* 1-norm) of a complex Hermitian positive definite band matrix using */
+/* the Cholesky factorization A = U**H*U or A = L*L**H computed by */
+/* ZPBTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangular factor stored in AB; */
+/* = 'L': Lower triangular factor stored in AB. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of sub-diagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input) COMPLEX*16 array, dimension (LDAB,N) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**H*U or A = L*L**H of the band matrix A, stored in the */
+/* first KD+1 rows of the array. The j-th column of U or L is */
+/* stored in the j-th column of the array AB as follows: */
+/* if UPLO ='U', AB(kd+1+i-j,j) = U(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO ='L', AB(1+i-j,j) = L(i,j) for j<=i<=min(n,j+kd). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* ANORM (input) DOUBLE PRECISION */
+/* The 1-norm (or infinity-norm) of the Hermitian band matrix A. */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*ldab < *kd + 1) {
+ *info = -5;
+ } else if (*anorm < 0.) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPBCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.;
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ } else if (*anorm == 0.) {
+ return 0;
+ }
+
+ smlnum = dlamch_("Safe minimum");
+
+/* Estimate the 1-norm of the inverse. */
+
+ kase = 0;
+ *(unsigned char *)normin = 'N';
+L10:
+ zlacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (upper) {
+
+/* Multiply by inv(U'). */
+
+ zlatbs_("Upper", "Conjugate transpose", "Non-unit", normin, n, kd,
+ &ab[ab_offset], ldab, &work[1], &scalel, &rwork[1], info);
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by inv(U). */
+
+ zlatbs_("Upper", "No transpose", "Non-unit", normin, n, kd, &ab[
+ ab_offset], ldab, &work[1], &scaleu, &rwork[1], info);
+ } else {
+
+/* Multiply by inv(L). */
+
+ zlatbs_("Lower", "No transpose", "Non-unit", normin, n, kd, &ab[
+ ab_offset], ldab, &work[1], &scalel, &rwork[1], info);
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by inv(L'). */
+
+ zlatbs_("Lower", "Conjugate transpose", "Non-unit", normin, n, kd,
+ &ab[ab_offset], ldab, &work[1], &scaleu, &rwork[1], info);
+ }
+
+/* Multiply by 1/SCALE if doing so will not cause overflow. */
+
+ scale = scalel * scaleu;
+ if (scale != 1.) {
+ ix = izamax_(n, &work[1], &c__1);
+ i__1 = ix;
+ if (scale < ((d__1 = work[i__1].r, abs(d__1)) + (d__2 = d_imag(&
+ work[ix]), abs(d__2))) * smlnum || scale == 0.) {
+ goto L20;
+ }
+ zdrscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / ainvnm / *anorm;
+ }
+
+L20:
+
+ return 0;
+
+/* End of ZPBCON */
+
+} /* zpbcon_ */
diff --git a/contrib/libs/clapack/zpbequ.c b/contrib/libs/clapack/zpbequ.c
new file mode 100644
index 0000000000..49bbad013f
--- /dev/null
+++ b/contrib/libs/clapack/zpbequ.c
@@ -0,0 +1,205 @@
+/* zpbequ.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zpbequ_(char *uplo, integer *n, integer *kd,
+ doublecomplex *ab, integer *ldab, doublereal *s, doublereal *scond,
+ doublereal *amax, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal smin;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPBEQU computes row and column scalings intended to equilibrate a */
+/* Hermitian positive definite band matrix A and reduce its condition */
+/* number (with respect to the two-norm). S contains the scale factors, */
+/* S(i) = 1/sqrt(A(i,i)), chosen so that the scaled matrix B with */
+/* elements B(i,j) = S(i)*A(i,j)*S(j) has ones on the diagonal. This */
+/* choice of S puts the condition number of B within a factor N of the */
+/* smallest possible condition number over all possible diagonal */
+/* scalings. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangular of A is stored; */
+/* = 'L': Lower triangular of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input) COMPLEX*16 array, dimension (LDAB,N) */
+/* The upper or lower triangle of the Hermitian band matrix A, */
+/* stored in the first KD+1 rows of the array. The j-th column */
+/* of A is stored in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array A. LDAB >= KD+1. */
+
+/* S (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, S contains the scale factors for A. */
+
+/* SCOND (output) DOUBLE PRECISION */
+/* If INFO = 0, S contains the ratio of the smallest S(i) to */
+/* the largest S(i). If SCOND >= 0.1 and AMAX is neither too */
+/* large nor too small, it is not worth scaling by S. */
+
+/* AMAX (output) DOUBLE PRECISION */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, the i-th diagonal element is nonpositive. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --s;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*ldab < *kd + 1) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPBEQU", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *scond = 1.;
+ *amax = 0.;
+ return 0;
+ }
+
+ if (upper) {
+ j = *kd + 1;
+ } else {
+ j = 1;
+ }
+
+/* Initialize SMIN and AMAX. */
+
+ i__1 = j + ab_dim1;
+ s[1] = ab[i__1].r;
+ smin = s[1];
+ *amax = s[1];
+
+/* Find the minimum and maximum diagonal elements. */
+
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ i__2 = j + i__ * ab_dim1;
+ s[i__] = ab[i__2].r;
+/* Computing MIN */
+ d__1 = smin, d__2 = s[i__];
+ smin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = *amax, d__2 = s[i__];
+ *amax = max(d__1,d__2);
+/* L10: */
+ }
+
+ if (smin <= 0.) {
+
+/* Find the first non-positive diagonal element and return. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (s[i__] <= 0.) {
+ *info = i__;
+ return 0;
+ }
+/* L20: */
+ }
+ } else {
+
+/* Set the scale factors to the reciprocals */
+/* of the diagonal elements. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ s[i__] = 1. / sqrt(s[i__]);
+/* L30: */
+ }
+
+/* Compute SCOND = min(S(I)) / max(S(I)) */
+
+ *scond = sqrt(smin) / sqrt(*amax);
+ }
+ return 0;
+
+/* End of ZPBEQU */
+
+} /* zpbequ_ */
diff --git a/contrib/libs/clapack/zpbrfs.c b/contrib/libs/clapack/zpbrfs.c
new file mode 100644
index 0000000000..6df330e793
--- /dev/null
+++ b/contrib/libs/clapack/zpbrfs.c
@@ -0,0 +1,483 @@
+/* zpbrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zpbrfs_(char *uplo, integer *n, integer *kd, integer *
+ nrhs, doublecomplex *ab, integer *ldab, doublecomplex *afb, integer *
+ ldafb, doublecomplex *b, integer *ldb, doublecomplex *x, integer *ldx,
+ doublereal *ferr, doublereal *berr, doublecomplex *work, doublereal *
+ rwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, afb_dim1, afb_offset, b_dim1, b_offset,
+ x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k, l;
+ doublereal s, xk;
+ integer nz;
+ doublereal eps;
+ integer kase;
+ doublereal safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Subroutine */ int zhbmv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *);
+ integer count;
+ logical upper;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zaxpy_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), zlacn2_(
+ integer *, doublecomplex *, doublecomplex *, doublereal *,
+ integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal lstres;
+ extern /* Subroutine */ int zpbtrs_(char *, integer *, integer *, integer
+ *, doublecomplex *, integer *, doublecomplex *, integer *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPBRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is Hermitian positive definite */
+/* and banded, and provides error bounds and backward error estimates */
+/* for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AB (input) DOUBLE PRECISION array, dimension (LDAB,N) */
+/* The upper or lower triangle of the Hermitian band matrix A, */
+/* stored in the first KD+1 rows of the array. The j-th column */
+/* of A is stored in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* AFB (input) COMPLEX*16 array, dimension (LDAFB,N) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**H*U or A = L*L**H of the band matrix A as computed by */
+/* ZPBTRF, in the same storage format as A (see AB). */
+
+/* LDAFB (input) INTEGER */
+/* The leading dimension of the array AFB. LDAFB >= KD+1. */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by ZPBTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ afb_dim1 = *ldafb;
+ afb_offset = 1 + afb_dim1;
+ afb -= afb_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldab < *kd + 1) {
+ *info = -6;
+ } else if (*ldafb < *kd + 1) {
+ *info = -8;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldx < max(1,*n)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPBRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+/* Computing MIN */
+ i__1 = *n + 1, i__2 = (*kd << 1) + 2;
+ nz = min(i__1,i__2);
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X */
+
+ zcopy_(n, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+ z__1.r = -1., z__1.i = -0.;
+ zhbmv_(uplo, n, kd, &z__1, &ab[ab_offset], ldab, &x[j * x_dim1 + 1], &
+ c__1, &c_b1, &work[1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[i__] = (d__1 = b[i__3].r, abs(d__1)) + (d__2 = d_imag(&b[
+ i__ + j * b_dim1]), abs(d__2));
+/* L30: */
+ }
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ i__3 = k + j * x_dim1;
+ xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[k + j *
+ x_dim1]), abs(d__2));
+ l = *kd + 1 - k;
+/* Computing MAX */
+ i__3 = 1, i__4 = k - *kd;
+ i__5 = k - 1;
+ for (i__ = max(i__3,i__4); i__ <= i__5; ++i__) {
+ i__3 = l + i__ + k * ab_dim1;
+ rwork[i__] += ((d__1 = ab[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&ab[l + i__ + k * ab_dim1]), abs(d__2))) *
+ xk;
+ i__3 = l + i__ + k * ab_dim1;
+ i__4 = i__ + j * x_dim1;
+ s += ((d__1 = ab[i__3].r, abs(d__1)) + (d__2 = d_imag(&ab[
+ l + i__ + k * ab_dim1]), abs(d__2))) * ((d__3 = x[
+ i__4].r, abs(d__3)) + (d__4 = d_imag(&x[i__ + j *
+ x_dim1]), abs(d__4)));
+/* L40: */
+ }
+ i__5 = *kd + 1 + k * ab_dim1;
+ rwork[k] = rwork[k] + (d__1 = ab[i__5].r, abs(d__1)) * xk + s;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ i__5 = k + j * x_dim1;
+ xk = (d__1 = x[i__5].r, abs(d__1)) + (d__2 = d_imag(&x[k + j *
+ x_dim1]), abs(d__2));
+ i__5 = k * ab_dim1 + 1;
+ rwork[k] += (d__1 = ab[i__5].r, abs(d__1)) * xk;
+ l = 1 - k;
+/* Computing MIN */
+ i__3 = *n, i__4 = k + *kd;
+ i__5 = min(i__3,i__4);
+ for (i__ = k + 1; i__ <= i__5; ++i__) {
+ i__3 = l + i__ + k * ab_dim1;
+ rwork[i__] += ((d__1 = ab[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&ab[l + i__ + k * ab_dim1]), abs(d__2))) *
+ xk;
+ i__3 = l + i__ + k * ab_dim1;
+ i__4 = i__ + j * x_dim1;
+ s += ((d__1 = ab[i__3].r, abs(d__1)) + (d__2 = d_imag(&ab[
+ l + i__ + k * ab_dim1]), abs(d__2))) * ((d__3 = x[
+ i__4].r, abs(d__3)) + (d__4 = d_imag(&x[i__ + j *
+ x_dim1]), abs(d__4)));
+/* L60: */
+ }
+ rwork[k] += s;
+/* L70: */
+ }
+ }
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__5 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__5].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2))) / rwork[i__];
+ s = max(d__3,d__4);
+ } else {
+/* Computing MAX */
+ i__5 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__5].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = max(d__3,d__4);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2. <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ zpbtrs_(uplo, n, kd, &c__1, &afb[afb_offset], ldafb, &work[1], n,
+ info);
+ zaxpy_(n, &c_b1, &work[1], &c__1, &x[j * x_dim1 + 1], &c__1);
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use ZLACN2 to estimate the infinity-norm of the matrix */
+/* inv(A) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__5 = i__;
+ rwork[i__] = (d__1 = work[i__5].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ ;
+ } else {
+ i__5 = i__;
+ rwork[i__] = (d__1 = work[i__5].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ zlacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(A'). */
+
+ zpbtrs_(uplo, n, kd, &c__1, &afb[afb_offset], ldafb, &work[1],
+ n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__5 = i__;
+ i__3 = i__;
+ i__4 = i__;
+ z__1.r = rwork[i__3] * work[i__4].r, z__1.i = rwork[i__3]
+ * work[i__4].i;
+ work[i__5].r = z__1.r, work[i__5].i = z__1.i;
+/* L110: */
+ }
+ } else if (kase == 2) {
+
+/* Multiply by inv(A)*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__5 = i__;
+ i__3 = i__;
+ i__4 = i__;
+ z__1.r = rwork[i__3] * work[i__4].r, z__1.i = rwork[i__3]
+ * work[i__4].i;
+ work[i__5].r = z__1.r, work[i__5].i = z__1.i;
+/* L120: */
+ }
+ zpbtrs_(uplo, n, kd, &c__1, &afb[afb_offset], ldafb, &work[1],
+ n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__5 = i__ + j * x_dim1;
+ d__3 = lstres, d__4 = (d__1 = x[i__5].r, abs(d__1)) + (d__2 =
+ d_imag(&x[i__ + j * x_dim1]), abs(d__2));
+ lstres = max(d__3,d__4);
+/* L130: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of ZPBRFS */
+
+} /* zpbrfs_ */
diff --git a/contrib/libs/clapack/zpbstf.c b/contrib/libs/clapack/zpbstf.c
new file mode 100644
index 0000000000..93481ac9a9
--- /dev/null
+++ b/contrib/libs/clapack/zpbstf.c
@@ -0,0 +1,334 @@
+/* zpbstf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b9 = -1.;
+
+/* Subroutine */ int zpbstf_(char *uplo, integer *n, integer *kd,
+ doublecomplex *ab, integer *ldab, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer j, m, km;
+ doublereal ajj;
+ integer kld;
+ extern /* Subroutine */ int zher_(char *, integer *, doublereal *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_(
+ integer *, doublereal *, doublecomplex *, integer *), zlacgv_(
+ integer *, doublecomplex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPBSTF computes a split Cholesky factorization of a complex */
+/* Hermitian positive definite band matrix A. */
+
+/* This routine is designed to be used in conjunction with ZHBGST. */
+
+/* The factorization has the form A = S**H*S where S is a band matrix */
+/* of the same bandwidth as A and the following structure: */
+
+/* S = ( U ) */
+/* ( M L ) */
+
+/* where U is upper triangular of order m = (n+kd)/2, and L is lower */
+/* triangular of order n-m. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) COMPLEX*16 array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first kd+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, if INFO = 0, the factor S from the split Cholesky */
+/* factorization A = S**H*S. See Further Details. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the factorization could not be completed, */
+/* because the updated element a(i,i) was negative; the */
+/* matrix A is not positive definite. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* N = 7, KD = 2: */
+
+/* S = ( s11 s12 s13 ) */
+/* ( s22 s23 s24 ) */
+/* ( s33 s34 ) */
+/* ( s44 ) */
+/* ( s53 s54 s55 ) */
+/* ( s64 s65 s66 ) */
+/* ( s75 s76 s77 ) */
+
+/* If UPLO = 'U', the array AB holds: */
+
+/* on entry: on exit: */
+
+/* * * a13 a24 a35 a46 a57 * * s13 s24 s53' s64' s75' */
+/* * a12 a23 a34 a45 a56 a67 * s12 s23 s34 s54' s65' s76' */
+/* a11 a22 a33 a44 a55 a66 a77 s11 s22 s33 s44 s55 s66 s77 */
+
+/* If UPLO = 'L', the array AB holds: */
+
+/* on entry: on exit: */
+
+/* a11 a22 a33 a44 a55 a66 a77 s11 s22 s33 s44 s55 s66 s77 */
+/* a21 a32 a43 a54 a65 a76 * s12' s23' s34' s54 s65 s76 * */
+/* a31 a42 a53 a64 a64 * * s13' s24' s53 s64 s75 * * */
+
+/* Array elements marked * are not used by the routine; s12' denotes */
+/* conjg(s12); the diagonal elements of S are real. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*ldab < *kd + 1) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPBSTF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Computing MAX */
+ i__1 = 1, i__2 = *ldab - 1;
+ kld = max(i__1,i__2);
+
+/* Set the splitting point m. */
+
+ m = (*n + *kd) / 2;
+
+ if (upper) {
+
+/* Factorize A(m+1:n,m+1:n) as L**H*L, and update A(1:m,1:m). */
+
+ i__1 = m + 1;
+ for (j = *n; j >= i__1; --j) {
+
+/* Compute s(j,j) and test for non-positive-definiteness. */
+
+ i__2 = *kd + 1 + j * ab_dim1;
+ ajj = ab[i__2].r;
+ if (ajj <= 0.) {
+ i__2 = *kd + 1 + j * ab_dim1;
+ ab[i__2].r = ajj, ab[i__2].i = 0.;
+ goto L50;
+ }
+ ajj = sqrt(ajj);
+ i__2 = *kd + 1 + j * ab_dim1;
+ ab[i__2].r = ajj, ab[i__2].i = 0.;
+/* Computing MIN */
+ i__2 = j - 1;
+ km = min(i__2,*kd);
+
+/* Compute elements j-km:j-1 of the j-th column and update the */
+/* the leading submatrix within the band. */
+
+ d__1 = 1. / ajj;
+ zdscal_(&km, &d__1, &ab[*kd + 1 - km + j * ab_dim1], &c__1);
+ zher_("Upper", &km, &c_b9, &ab[*kd + 1 - km + j * ab_dim1], &c__1,
+ &ab[*kd + 1 + (j - km) * ab_dim1], &kld);
+/* L10: */
+ }
+
+/* Factorize the updated submatrix A(1:m,1:m) as U**H*U. */
+
+ i__1 = m;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute s(j,j) and test for non-positive-definiteness. */
+
+ i__2 = *kd + 1 + j * ab_dim1;
+ ajj = ab[i__2].r;
+ if (ajj <= 0.) {
+ i__2 = *kd + 1 + j * ab_dim1;
+ ab[i__2].r = ajj, ab[i__2].i = 0.;
+ goto L50;
+ }
+ ajj = sqrt(ajj);
+ i__2 = *kd + 1 + j * ab_dim1;
+ ab[i__2].r = ajj, ab[i__2].i = 0.;
+/* Computing MIN */
+ i__2 = *kd, i__3 = m - j;
+ km = min(i__2,i__3);
+
+/* Compute elements j+1:j+km of the j-th row and update the */
+/* trailing submatrix within the band. */
+
+ if (km > 0) {
+ d__1 = 1. / ajj;
+ zdscal_(&km, &d__1, &ab[*kd + (j + 1) * ab_dim1], &kld);
+ zlacgv_(&km, &ab[*kd + (j + 1) * ab_dim1], &kld);
+ zher_("Upper", &km, &c_b9, &ab[*kd + (j + 1) * ab_dim1], &kld,
+ &ab[*kd + 1 + (j + 1) * ab_dim1], &kld);
+ zlacgv_(&km, &ab[*kd + (j + 1) * ab_dim1], &kld);
+ }
+/* L20: */
+ }
+ } else {
+
+/* Factorize A(m+1:n,m+1:n) as L**H*L, and update A(1:m,1:m). */
+
+ i__1 = m + 1;
+ for (j = *n; j >= i__1; --j) {
+
+/* Compute s(j,j) and test for non-positive-definiteness. */
+
+ i__2 = j * ab_dim1 + 1;
+ ajj = ab[i__2].r;
+ if (ajj <= 0.) {
+ i__2 = j * ab_dim1 + 1;
+ ab[i__2].r = ajj, ab[i__2].i = 0.;
+ goto L50;
+ }
+ ajj = sqrt(ajj);
+ i__2 = j * ab_dim1 + 1;
+ ab[i__2].r = ajj, ab[i__2].i = 0.;
+/* Computing MIN */
+ i__2 = j - 1;
+ km = min(i__2,*kd);
+
+/* Compute elements j-km:j-1 of the j-th row and update the */
+/* trailing submatrix within the band. */
+
+ d__1 = 1. / ajj;
+ zdscal_(&km, &d__1, &ab[km + 1 + (j - km) * ab_dim1], &kld);
+ zlacgv_(&km, &ab[km + 1 + (j - km) * ab_dim1], &kld);
+ zher_("Lower", &km, &c_b9, &ab[km + 1 + (j - km) * ab_dim1], &kld,
+ &ab[(j - km) * ab_dim1 + 1], &kld);
+ zlacgv_(&km, &ab[km + 1 + (j - km) * ab_dim1], &kld);
+/* L30: */
+ }
+
+/* Factorize the updated submatrix A(1:m,1:m) as U**H*U. */
+
+ i__1 = m;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute s(j,j) and test for non-positive-definiteness. */
+
+ i__2 = j * ab_dim1 + 1;
+ ajj = ab[i__2].r;
+ if (ajj <= 0.) {
+ i__2 = j * ab_dim1 + 1;
+ ab[i__2].r = ajj, ab[i__2].i = 0.;
+ goto L50;
+ }
+ ajj = sqrt(ajj);
+ i__2 = j * ab_dim1 + 1;
+ ab[i__2].r = ajj, ab[i__2].i = 0.;
+/* Computing MIN */
+ i__2 = *kd, i__3 = m - j;
+ km = min(i__2,i__3);
+
+/* Compute elements j+1:j+km of the j-th column and update the */
+/* trailing submatrix within the band. */
+
+ if (km > 0) {
+ d__1 = 1. / ajj;
+ zdscal_(&km, &d__1, &ab[j * ab_dim1 + 2], &c__1);
+ zher_("Lower", &km, &c_b9, &ab[j * ab_dim1 + 2], &c__1, &ab[(
+ j + 1) * ab_dim1 + 1], &kld);
+ }
+/* L40: */
+ }
+ }
+ return 0;
+
+L50:
+ *info = j;
+ return 0;
+
+/* End of ZPBSTF */
+
+} /* zpbstf_ */
diff --git a/contrib/libs/clapack/zpbsv.c b/contrib/libs/clapack/zpbsv.c
new file mode 100644
index 0000000000..bac0f83950
--- /dev/null
+++ b/contrib/libs/clapack/zpbsv.c
@@ -0,0 +1,183 @@
+/* zpbsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zpbsv_(char *uplo, integer *n, integer *kd, integer *
+ nrhs, doublecomplex *ab, integer *ldab, doublecomplex *b, integer *
+ ldb, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), zpbtrf_(
+ char *, integer *, integer *, doublecomplex *, integer *, integer
+ *), zpbtrs_(char *, integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPBSV computes the solution to a complex system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N Hermitian positive definite band matrix and X */
+/* and B are N-by-NRHS matrices. */
+
+/* The Cholesky decomposition is used to factor A as */
+/* A = U**H * U, if UPLO = 'U', or */
+/* A = L * L**H, if UPLO = 'L', */
+/* where U is an upper triangular band matrix, and L is a lower */
+/* triangular band matrix, with the same number of superdiagonals or */
+/* subdiagonals as A. The factored form of A is then used to solve the */
+/* system of equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AB (input/output) COMPLEX*16 array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(KD+1+i-j,j) = A(i,j) for max(1,j-KD)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(N,j+KD). */
+/* See below for further details. */
+
+/* On exit, if INFO = 0, the triangular factor U or L from the */
+/* Cholesky factorization A = U**H*U or A = L*L**H of the band */
+/* matrix A, in the same storage format as A. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i of A is not */
+/* positive definite, so the factorization could not be */
+/* completed, and the solution has not been computed. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* N = 6, KD = 2, and UPLO = 'U': */
+
+/* On entry: On exit: */
+
+/* * * a13 a24 a35 a46 * * u13 u24 u35 u46 */
+/* * a12 a23 a34 a45 a56 * u12 u23 u34 u45 u56 */
+/* a11 a22 a33 a44 a55 a66 u11 u22 u33 u44 u55 u66 */
+
+/* Similarly, if UPLO = 'L' the format of A is as follows: */
+
+/* On entry: On exit: */
+
+/* a11 a22 a33 a44 a55 a66 l11 l22 l33 l44 l55 l66 */
+/* a21 a32 a43 a54 a65 * l21 l32 l43 l54 l65 * */
+/* a31 a42 a53 a64 * * l31 l42 l53 l64 * * */
+
+/* Array elements marked * are not used by the routine. */
+
+/* ===================================================================== */
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldab < *kd + 1) {
+ *info = -6;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPBSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the Cholesky factorization A = U'*U or A = L*L'. */
+
+ zpbtrf_(uplo, n, kd, &ab[ab_offset], ldab, info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ zpbtrs_(uplo, n, kd, nrhs, &ab[ab_offset], ldab, &b[b_offset], ldb,
+ info);
+
+ }
+ return 0;
+
+/* End of ZPBSV */
+
+} /* zpbsv_ */
diff --git a/contrib/libs/clapack/zpbsvx.c b/contrib/libs/clapack/zpbsvx.c
new file mode 100644
index 0000000000..51e6109994
--- /dev/null
+++ b/contrib/libs/clapack/zpbsvx.c
@@ -0,0 +1,528 @@
+/* zpbsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zpbsvx_(char *fact, char *uplo, integer *n, integer *kd,
+ integer *nrhs, doublecomplex *ab, integer *ldab, doublecomplex *afb,
+ integer *ldafb, char *equed, doublereal *s, doublecomplex *b, integer
+ *ldb, doublecomplex *x, integer *ldx, doublereal *rcond, doublereal *
+ ferr, doublereal *berr, doublecomplex *work, doublereal *rwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, afb_dim1, afb_offset, b_dim1, b_offset,
+ x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j, j1, j2;
+ doublereal amax, smin, smax;
+ extern logical lsame_(char *, char *);
+ doublereal scond, anorm;
+ logical equil, rcequ, upper;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ extern doublereal dlamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern doublereal zlanhb_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublereal *);
+ doublereal bignum;
+ extern /* Subroutine */ int zlaqhb_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublereal *, doublereal *,
+ doublereal *, char *);
+ integer infequ;
+ extern /* Subroutine */ int zpbcon_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublereal *, doublereal *,
+ doublecomplex *, doublereal *, integer *), zlacpy_(char *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *
+, integer *), zpbequ_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *), zpbrfs_(char *, integer *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *
+, doublereal *, doublereal *, doublecomplex *, doublereal *,
+ integer *), zpbtrf_(char *, integer *, integer *,
+ doublecomplex *, integer *, integer *);
+ doublereal smlnum;
+ extern /* Subroutine */ int zpbtrs_(char *, integer *, integer *, integer
+ *, doublecomplex *, integer *, doublecomplex *, integer *,
+ integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPBSVX uses the Cholesky factorization A = U**H*U or A = L*L**H to */
+/* compute the solution to a complex system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N Hermitian positive definite band matrix and X */
+/* and B are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'E', real scaling factors are computed to equilibrate */
+/* the system: */
+/* diag(S) * A * diag(S) * inv(diag(S)) * X = diag(S) * B */
+/* Whether or not the system will be equilibrated depends on the */
+/* scaling of the matrix A, but if equilibration is used, A is */
+/* overwritten by diag(S)*A*diag(S) and B by diag(S)*B. */
+
+/* 2. If FACT = 'N' or 'E', the Cholesky decomposition is used to */
+/* factor the matrix A (after equilibration if FACT = 'E') as */
+/* A = U**H * U, if UPLO = 'U', or */
+/* A = L * L**H, if UPLO = 'L', */
+/* where U is an upper triangular band matrix, and L is a lower */
+/* triangular band matrix. */
+
+/* 3. If the leading i-by-i principal minor is not positive definite, */
+/* then the routine returns with INFO = i. Otherwise, the factored */
+/* form of A is used to estimate the condition number of the matrix */
+/* A. If the reciprocal of the condition number is less than machine */
+/* precision, INFO = N+1 is returned as a warning, but the routine */
+/* still goes on to solve for X and compute error bounds as */
+/* described below. */
+
+/* 4. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 5. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* 6. If equilibration was used, the matrix X is premultiplied by */
+/* diag(S) so that it solves the original system before */
+/* equilibration. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of the matrix A is */
+/* supplied on entry, and if not, whether the matrix A should be */
+/* equilibrated before it is factored. */
+/* = 'F': On entry, AFB contains the factored form of A. */
+/* If EQUED = 'Y', the matrix A has been equilibrated */
+/* with scaling factors given by S. AB and AFB will not */
+/* be modified. */
+/* = 'N': The matrix A will be copied to AFB and factored. */
+/* = 'E': The matrix A will be equilibrated if necessary, then */
+/* copied to AFB and factored. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right-hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AB (input/output) COMPLEX*16 array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first KD+1 rows of the array, except */
+/* if FACT = 'F' and EQUED = 'Y', then A must contain the */
+/* equilibrated matrix diag(S)*A*diag(S). The j-th column of A */
+/* is stored in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(KD+1+i-j,j) = A(i,j) for max(1,j-KD)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(N,j+KD). */
+/* See below for further details. */
+
+/* On exit, if FACT = 'E' and EQUED = 'Y', A is overwritten by */
+/* diag(S)*A*diag(S). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array A. LDAB >= KD+1. */
+
+/* AFB (input or output) COMPLEX*16 array, dimension (LDAFB,N) */
+/* If FACT = 'F', then AFB is an input argument and on entry */
+/* contains the triangular factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H of the band matrix */
+/* A, in the same storage format as A (see AB). If EQUED = 'Y', */
+/* then AFB is the factored form of the equilibrated matrix A. */
+
+/* If FACT = 'N', then AFB is an output argument and on exit */
+/* returns the triangular factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H. */
+
+/* If FACT = 'E', then AFB is an output argument and on exit */
+/* returns the triangular factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H of the equilibrated */
+/* matrix A (see the description of A for the form of the */
+/* equilibrated matrix). */
+
+/* LDAFB (input) INTEGER */
+/* The leading dimension of the array AFB. LDAFB >= KD+1. */
+
+/* EQUED (input or output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration (always true if FACT = 'N'). */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+/* EQUED is an input argument if FACT = 'F'; otherwise, it is an */
+/* output argument. */
+
+/* S (input or output) DOUBLE PRECISION array, dimension (N) */
+/* The scale factors for A; not accessed if EQUED = 'N'. S is */
+/* an input argument if FACT = 'F'; otherwise, S is an output */
+/* argument. If FACT = 'F' and EQUED = 'Y', each element of S */
+/* must be positive. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if EQUED = 'N', B is not modified; if EQUED = 'Y', */
+/* B is overwritten by diag(S) * B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X to */
+/* the original system of equations. Note that if EQUED = 'Y', */
+/* A and B are modified on exit, and the solution to the */
+/* equilibrated system is inv(diag(S))*X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A after equilibration (if done). If RCOND is less than the */
+/* machine precision (in particular, if RCOND = 0), the matrix */
+/* is singular to working precision. This condition is */
+/* indicated by a return code of INFO > 0. */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: the leading minor of order i of A is */
+/* not positive definite, so the factorization */
+/* could not be completed, and the solution has not */
+/* been computed. RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* N = 6, KD = 2, and UPLO = 'U': */
+
+/* Two-dimensional storage of the Hermitian matrix A: */
+
+/* a11 a12 a13 */
+/* a22 a23 a24 */
+/* a33 a34 a35 */
+/* a44 a45 a46 */
+/* a55 a56 */
+/* (aij=conjg(aji)) a66 */
+
+/* Band storage of the upper triangle of A: */
+
+/* * * a13 a24 a35 a46 */
+/* * a12 a23 a34 a45 a56 */
+/* a11 a22 a33 a44 a55 a66 */
+
+/* Similarly, if UPLO = 'L' the format of A is as follows: */
+
+/* a11 a22 a33 a44 a55 a66 */
+/* a21 a32 a43 a54 a65 * */
+/* a31 a42 a53 a64 * * */
+
+/* Array elements marked * are not used by the routine. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ afb_dim1 = *ldafb;
+ afb_offset = 1 + afb_dim1;
+ afb -= afb_offset;
+ --s;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ equil = lsame_(fact, "E");
+ upper = lsame_(uplo, "U");
+ if (nofact || equil) {
+ *(unsigned char *)equed = 'N';
+ rcequ = FALSE_;
+ } else {
+ rcequ = lsame_(equed, "Y");
+ smlnum = dlamch_("Safe minimum");
+ bignum = 1. / smlnum;
+ }
+
+/* Test the input parameters. */
+
+ if (! nofact && ! equil && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*kd < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*ldab < *kd + 1) {
+ *info = -7;
+ } else if (*ldafb < *kd + 1) {
+ *info = -9;
+ } else if (lsame_(fact, "F") && ! (rcequ || lsame_(
+ equed, "N"))) {
+ *info = -10;
+ } else {
+ if (rcequ) {
+ smin = bignum;
+ smax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ d__1 = smin, d__2 = s[j];
+ smin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = smax, d__2 = s[j];
+ smax = max(d__1,d__2);
+/* L10: */
+ }
+ if (smin <= 0.) {
+ *info = -11;
+ } else if (*n > 0) {
+ scond = max(smin,smlnum) / min(smax,bignum);
+ } else {
+ scond = 1.;
+ }
+ }
+ if (*info == 0) {
+ if (*ldb < max(1,*n)) {
+ *info = -13;
+ } else if (*ldx < max(1,*n)) {
+ *info = -15;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPBSVX", &i__1);
+ return 0;
+ }
+
+ if (equil) {
+
+/* Compute row and column scalings to equilibrate the matrix A. */
+
+ zpbequ_(uplo, n, kd, &ab[ab_offset], ldab, &s[1], &scond, &amax, &
+ infequ);
+ if (infequ == 0) {
+
+/* Equilibrate the matrix. */
+
+ zlaqhb_(uplo, n, kd, &ab[ab_offset], ldab, &s[1], &scond, &amax,
+ equed);
+ rcequ = lsame_(equed, "Y");
+ }
+ }
+
+/* Scale the right-hand side. */
+
+ if (rcequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * b_dim1;
+ z__1.r = s[i__4] * b[i__5].r, z__1.i = s[i__4] * b[i__5].i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+
+ if (nofact || equil) {
+
+/* Compute the Cholesky factorization A = U'*U or A = L*L'. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j - *kd;
+ j1 = max(i__2,1);
+ i__2 = j - j1 + 1;
+ zcopy_(&i__2, &ab[*kd + 1 - j + j1 + j * ab_dim1], &c__1, &
+ afb[*kd + 1 - j + j1 + j * afb_dim1], &c__1);
+/* L40: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__2 = j + *kd;
+ j2 = min(i__2,*n);
+ i__2 = j2 - j + 1;
+ zcopy_(&i__2, &ab[j * ab_dim1 + 1], &c__1, &afb[j * afb_dim1
+ + 1], &c__1);
+/* L50: */
+ }
+ }
+
+ zpbtrf_(uplo, n, kd, &afb[afb_offset], ldafb, info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = zlanhb_("1", uplo, n, kd, &ab[ab_offset], ldab, &rwork[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ zpbcon_(uplo, n, kd, &afb[afb_offset], ldafb, &anorm, rcond, &work[1], &
+ rwork[1], info);
+
+/* Compute the solution matrix X. */
+
+ zlacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ zpbtrs_(uplo, n, kd, nrhs, &afb[afb_offset], ldafb, &x[x_offset], ldx,
+ info);
+
+/* Use iterative refinement to improve the computed solution and */
+/* compute error bounds and backward error estimates for it. */
+
+ zpbrfs_(uplo, n, kd, nrhs, &ab[ab_offset], ldab, &afb[afb_offset], ldafb,
+ &b[b_offset], ldb, &x[x_offset], ldx, &ferr[1], &berr[1], &work[1]
+, &rwork[1], info);
+
+/* Transform the solution matrix X to a solution of the original */
+/* system. */
+
+ if (rcequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * x_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * x_dim1;
+ z__1.r = s[i__4] * x[i__5].r, z__1.i = s[i__4] * x[i__5].i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+/* L60: */
+ }
+/* L70: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= scond;
+/* L80: */
+ }
+ }
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < dlamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of ZPBSVX */
+
+} /* zpbsvx_ */
diff --git a/contrib/libs/clapack/zpbtf2.c b/contrib/libs/clapack/zpbtf2.c
new file mode 100644
index 0000000000..de9734cb92
--- /dev/null
+++ b/contrib/libs/clapack/zpbtf2.c
@@ -0,0 +1,255 @@
+/* zpbtf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b8 = -1.;
+static integer c__1 = 1;
+
+/* Subroutine */ int zpbtf2_(char *uplo, integer *n, integer *kd,
+ doublecomplex *ab, integer *ldab, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer j, kn;
+ doublereal ajj;
+ integer kld;
+ extern /* Subroutine */ int zher_(char *, integer *, doublereal *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_(
+ integer *, doublereal *, doublecomplex *, integer *), zlacgv_(
+ integer *, doublecomplex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPBTF2 computes the Cholesky factorization of a complex Hermitian */
+/* positive definite band matrix A. */
+
+/* The factorization has the form */
+/* A = U' * U , if UPLO = 'U', or */
+/* A = L * L', if UPLO = 'L', */
+/* where U is an upper triangular matrix, U' is the conjugate transpose */
+/* of U, and L is lower triangular. */
+
+/* This is the unblocked version of the algorithm, calling Level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* Hermitian matrix A is stored: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of super-diagonals of the matrix A if UPLO = 'U', */
+/* or the number of sub-diagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) COMPLEX*16 array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, if INFO = 0, the triangular factor U or L from the */
+/* Cholesky factorization A = U'*U or A = L*L' of the band */
+/* matrix A, in the same storage format as A. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, the leading minor of order k is not */
+/* positive definite, and the factorization could not be */
+/* completed. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* N = 6, KD = 2, and UPLO = 'U': */
+
+/* On entry: On exit: */
+
+/* * * a13 a24 a35 a46 * * u13 u24 u35 u46 */
+/* * a12 a23 a34 a45 a56 * u12 u23 u34 u45 u56 */
+/* a11 a22 a33 a44 a55 a66 u11 u22 u33 u44 u55 u66 */
+
+/* Similarly, if UPLO = 'L' the format of A is as follows: */
+
+/* On entry: On exit: */
+
+/* a11 a22 a33 a44 a55 a66 l11 l22 l33 l44 l55 l66 */
+/* a21 a32 a43 a54 a65 * l21 l32 l43 l54 l65 * */
+/* a31 a42 a53 a64 * * l31 l42 l53 l64 * * */
+
+/* Array elements marked * are not used by the routine. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*ldab < *kd + 1) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPBTF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Computing MAX */
+ i__1 = 1, i__2 = *ldab - 1;
+ kld = max(i__1,i__2);
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute U(J,J) and test for non-positive-definiteness. */
+
+ i__2 = *kd + 1 + j * ab_dim1;
+ ajj = ab[i__2].r;
+ if (ajj <= 0.) {
+ i__2 = *kd + 1 + j * ab_dim1;
+ ab[i__2].r = ajj, ab[i__2].i = 0.;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ i__2 = *kd + 1 + j * ab_dim1;
+ ab[i__2].r = ajj, ab[i__2].i = 0.;
+
+/* Compute elements J+1:J+KN of row J and update the */
+/* trailing submatrix within the band. */
+
+/* Computing MIN */
+ i__2 = *kd, i__3 = *n - j;
+ kn = min(i__2,i__3);
+ if (kn > 0) {
+ d__1 = 1. / ajj;
+ zdscal_(&kn, &d__1, &ab[*kd + (j + 1) * ab_dim1], &kld);
+ zlacgv_(&kn, &ab[*kd + (j + 1) * ab_dim1], &kld);
+ zher_("Upper", &kn, &c_b8, &ab[*kd + (j + 1) * ab_dim1], &kld,
+ &ab[*kd + 1 + (j + 1) * ab_dim1], &kld);
+ zlacgv_(&kn, &ab[*kd + (j + 1) * ab_dim1], &kld);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute L(J,J) and test for non-positive-definiteness. */
+
+ i__2 = j * ab_dim1 + 1;
+ ajj = ab[i__2].r;
+ if (ajj <= 0.) {
+ i__2 = j * ab_dim1 + 1;
+ ab[i__2].r = ajj, ab[i__2].i = 0.;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ i__2 = j * ab_dim1 + 1;
+ ab[i__2].r = ajj, ab[i__2].i = 0.;
+
+/* Compute elements J+1:J+KN of column J and update the */
+/* trailing submatrix within the band. */
+
+/* Computing MIN */
+ i__2 = *kd, i__3 = *n - j;
+ kn = min(i__2,i__3);
+ if (kn > 0) {
+ d__1 = 1. / ajj;
+ zdscal_(&kn, &d__1, &ab[j * ab_dim1 + 2], &c__1);
+ zher_("Lower", &kn, &c_b8, &ab[j * ab_dim1 + 2], &c__1, &ab[(
+ j + 1) * ab_dim1 + 1], &kld);
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+L30:
+ *info = j;
+ return 0;
+
+/* End of ZPBTF2 */
+
+} /* zpbtf2_ */
diff --git a/contrib/libs/clapack/zpbtrf.c b/contrib/libs/clapack/zpbtrf.c
new file mode 100644
index 0000000000..79743f3e09
--- /dev/null
+++ b/contrib/libs/clapack/zpbtrf.c
@@ -0,0 +1,490 @@
+/* zpbtrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static doublereal c_b21 = -1.;
+static doublereal c_b22 = 1.;
+static integer c__33 = 33;
+
+/* Subroutine */ int zpbtrf_(char *uplo, integer *n, integer *kd,
+ doublecomplex *ab, integer *ldab, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j, i2, i3, ib, nb, ii, jj;
+ doublecomplex work[1056] /* was [33][32] */;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), zherk_(char *, char *, integer *,
+ integer *, doublereal *, doublecomplex *, integer *, doublereal *,
+ doublecomplex *, integer *), ztrsm_(char *, char
+ *, char *, char *, integer *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), zpbtf2_(char *, integer *, integer *,
+ doublecomplex *, integer *, integer *), zpotf2_(char *,
+ integer *, doublecomplex *, integer *, integer *),
+ xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPBTRF computes the Cholesky factorization of a complex Hermitian */
+/* positive definite band matrix A. */
+
+/* The factorization has the form */
+/* A = U**H * U, if UPLO = 'U', or */
+/* A = L * L**H, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* AB (input/output) COMPLEX*16 array, dimension (LDAB,N) */
+/* On entry, the upper or lower triangle of the Hermitian band */
+/* matrix A, stored in the first KD+1 rows of the array. The */
+/* j-th column of A is stored in the j-th column of the array AB */
+/* as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+
+/* On exit, if INFO = 0, the triangular factor U or L from the */
+/* Cholesky factorization A = U**H*U or A = L*L**H of the band */
+/* matrix A, in the same storage format as A. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i is not */
+/* positive definite, and the factorization could not be */
+/* completed. */
+
+/* Further Details */
+/* =============== */
+
+/* The band storage scheme is illustrated by the following example, when */
+/* N = 6, KD = 2, and UPLO = 'U': */
+
+/* On entry: On exit: */
+
+/* * * a13 a24 a35 a46 * * u13 u24 u35 u46 */
+/* * a12 a23 a34 a45 a56 * u12 u23 u34 u45 u56 */
+/* a11 a22 a33 a44 a55 a66 u11 u22 u33 u44 u55 u66 */
+
+/* Similarly, if UPLO = 'L' the format of A is as follows: */
+
+/* On entry: On exit: */
+
+/* a11 a22 a33 a44 a55 a66 l11 l22 l33 l44 l55 l66 */
+/* a21 a32 a43 a54 a65 * l21 l32 l43 l54 l65 * */
+/* a31 a42 a53 a64 * * l31 l42 l53 l64 * * */
+
+/* Array elements marked * are not used by the routine. */
+
+/* Contributed by */
+/* Peter Mayes and Giuseppe Radicati, IBM ECSEC, Rome, March 23, 1989 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*ldab < *kd + 1) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPBTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment */
+
+ nb = ilaenv_(&c__1, "ZPBTRF", uplo, n, kd, &c_n1, &c_n1);
+
+/* The block size must not exceed the semi-bandwidth KD, and must not */
+/* exceed the limit set by the size of the local array WORK. */
+
+ nb = min(nb,32);
+
+ if (nb <= 1 || nb > *kd) {
+
+/* Use unblocked code */
+
+ zpbtf2_(uplo, n, kd, &ab[ab_offset], ldab, info);
+ } else {
+
+/* Use blocked code */
+
+ if (lsame_(uplo, "U")) {
+
+/* Compute the Cholesky factorization of a Hermitian band */
+/* matrix, given the upper triangle of the matrix in band */
+/* storage. */
+
+/* Zero the upper triangle of the work array. */
+
+ i__1 = nb;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * 33 - 34;
+ work[i__3].r = 0., work[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* Process the band matrix one diagonal block at a time. */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - i__ + 1;
+ ib = min(i__3,i__4);
+
+/* Factorize the diagonal block */
+
+ i__3 = *ldab - 1;
+ zpotf2_(uplo, &ib, &ab[*kd + 1 + i__ * ab_dim1], &i__3, &ii);
+ if (ii != 0) {
+ *info = i__ + ii - 1;
+ goto L150;
+ }
+ if (i__ + ib <= *n) {
+
+/* Update the relevant part of the trailing submatrix. */
+/* If A11 denotes the diagonal block which has just been */
+/* factorized, then we need to update the remaining */
+/* blocks in the diagram: */
+
+/* A11 A12 A13 */
+/* A22 A23 */
+/* A33 */
+
+/* The numbers of rows and columns in the partitioning */
+/* are IB, I2, I3 respectively. The blocks A12, A22 and */
+/* A23 are empty if IB = KD. The upper triangle of A13 */
+/* lies outside the band. */
+
+/* Computing MIN */
+ i__3 = *kd - ib, i__4 = *n - i__ - ib + 1;
+ i2 = min(i__3,i__4);
+/* Computing MIN */
+ i__3 = ib, i__4 = *n - i__ - *kd + 1;
+ i3 = min(i__3,i__4);
+
+ if (i2 > 0) {
+
+/* Update A12 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ ztrsm_("Left", "Upper", "Conjugate transpose", "Non-"
+ "unit", &ib, &i2, &c_b1, &ab[*kd + 1 + i__ *
+ ab_dim1], &i__3, &ab[*kd + 1 - ib + (i__ + ib)
+ * ab_dim1], &i__4);
+
+/* Update A22 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ zherk_("Upper", "Conjugate transpose", &i2, &ib, &
+ c_b21, &ab[*kd + 1 - ib + (i__ + ib) *
+ ab_dim1], &i__3, &c_b22, &ab[*kd + 1 + (i__ +
+ ib) * ab_dim1], &i__4);
+ }
+
+ if (i3 > 0) {
+
+/* Copy the lower triangle of A13 into the work array. */
+
+ i__3 = i3;
+ for (jj = 1; jj <= i__3; ++jj) {
+ i__4 = ib;
+ for (ii = jj; ii <= i__4; ++ii) {
+ i__5 = ii + jj * 33 - 34;
+ i__6 = ii - jj + 1 + (jj + i__ + *kd - 1) *
+ ab_dim1;
+ work[i__5].r = ab[i__6].r, work[i__5].i = ab[
+ i__6].i;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Update A13 (in the work array). */
+
+ i__3 = *ldab - 1;
+ ztrsm_("Left", "Upper", "Conjugate transpose", "Non-"
+ "unit", &ib, &i3, &c_b1, &ab[*kd + 1 + i__ *
+ ab_dim1], &i__3, work, &c__33);
+
+/* Update A23 */
+
+ if (i2 > 0) {
+ z__1.r = -1., z__1.i = -0.;
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ zgemm_("Conjugate transpose", "No transpose", &i2,
+ &i3, &ib, &z__1, &ab[*kd + 1 - ib + (i__
+ + ib) * ab_dim1], &i__3, work, &c__33, &
+ c_b1, &ab[ib + 1 + (i__ + *kd) * ab_dim1],
+ &i__4);
+ }
+
+/* Update A33 */
+
+ i__3 = *ldab - 1;
+ zherk_("Upper", "Conjugate transpose", &i3, &ib, &
+ c_b21, work, &c__33, &c_b22, &ab[*kd + 1 + (
+ i__ + *kd) * ab_dim1], &i__3);
+
+/* Copy the lower triangle of A13 back into place. */
+
+ i__3 = i3;
+ for (jj = 1; jj <= i__3; ++jj) {
+ i__4 = ib;
+ for (ii = jj; ii <= i__4; ++ii) {
+ i__5 = ii - jj + 1 + (jj + i__ + *kd - 1) *
+ ab_dim1;
+ i__6 = ii + jj * 33 - 34;
+ ab[i__5].r = work[i__6].r, ab[i__5].i = work[
+ i__6].i;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+ }
+/* L70: */
+ }
+ } else {
+
+/* Compute the Cholesky factorization of a Hermitian band */
+/* matrix, given the lower triangle of the matrix in band */
+/* storage. */
+
+/* Zero the lower triangle of the work array. */
+
+ i__2 = nb;
+ for (j = 1; j <= i__2; ++j) {
+ i__1 = nb;
+ for (i__ = j + 1; i__ <= i__1; ++i__) {
+ i__3 = i__ + j * 33 - 34;
+ work[i__3].r = 0., work[i__3].i = 0.;
+/* L80: */
+ }
+/* L90: */
+ }
+
+/* Process the band matrix one diagonal block at a time. */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - i__ + 1;
+ ib = min(i__3,i__4);
+
+/* Factorize the diagonal block */
+
+ i__3 = *ldab - 1;
+ zpotf2_(uplo, &ib, &ab[i__ * ab_dim1 + 1], &i__3, &ii);
+ if (ii != 0) {
+ *info = i__ + ii - 1;
+ goto L150;
+ }
+ if (i__ + ib <= *n) {
+
+/* Update the relevant part of the trailing submatrix. */
+/* If A11 denotes the diagonal block which has just been */
+/* factorized, then we need to update the remaining */
+/* blocks in the diagram: */
+
+/* A11 */
+/* A21 A22 */
+/* A31 A32 A33 */
+
+/* The numbers of rows and columns in the partitioning */
+/* are IB, I2, I3 respectively. The blocks A21, A22 and */
+/* A32 are empty if IB = KD. The lower triangle of A31 */
+/* lies outside the band. */
+
+/* Computing MIN */
+ i__3 = *kd - ib, i__4 = *n - i__ - ib + 1;
+ i2 = min(i__3,i__4);
+/* Computing MIN */
+ i__3 = ib, i__4 = *n - i__ - *kd + 1;
+ i3 = min(i__3,i__4);
+
+ if (i2 > 0) {
+
+/* Update A21 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ ztrsm_("Right", "Lower", "Conjugate transpose", "Non"
+ "-unit", &i2, &ib, &c_b1, &ab[i__ * ab_dim1 +
+ 1], &i__3, &ab[ib + 1 + i__ * ab_dim1], &i__4);
+
+/* Update A22 */
+
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ zherk_("Lower", "No transpose", &i2, &ib, &c_b21, &ab[
+ ib + 1 + i__ * ab_dim1], &i__3, &c_b22, &ab[(
+ i__ + ib) * ab_dim1 + 1], &i__4);
+ }
+
+ if (i3 > 0) {
+
+/* Copy the upper triangle of A31 into the work array. */
+
+ i__3 = ib;
+ for (jj = 1; jj <= i__3; ++jj) {
+ i__4 = min(jj,i3);
+ for (ii = 1; ii <= i__4; ++ii) {
+ i__5 = ii + jj * 33 - 34;
+ i__6 = *kd + 1 - jj + ii + (jj + i__ - 1) *
+ ab_dim1;
+ work[i__5].r = ab[i__6].r, work[i__5].i = ab[
+ i__6].i;
+/* L100: */
+ }
+/* L110: */
+ }
+
+/* Update A31 (in the work array). */
+
+ i__3 = *ldab - 1;
+ ztrsm_("Right", "Lower", "Conjugate transpose", "Non"
+ "-unit", &i3, &ib, &c_b1, &ab[i__ * ab_dim1 +
+ 1], &i__3, work, &c__33);
+
+/* Update A32 */
+
+ if (i2 > 0) {
+ z__1.r = -1., z__1.i = -0.;
+ i__3 = *ldab - 1;
+ i__4 = *ldab - 1;
+ zgemm_("No transpose", "Conjugate transpose", &i3,
+ &i2, &ib, &z__1, work, &c__33, &ab[ib +
+ 1 + i__ * ab_dim1], &i__3, &c_b1, &ab[*kd
+ + 1 - ib + (i__ + ib) * ab_dim1], &i__4);
+ }
+
+/* Update A33 */
+
+ i__3 = *ldab - 1;
+ zherk_("Lower", "No transpose", &i3, &ib, &c_b21,
+ work, &c__33, &c_b22, &ab[(i__ + *kd) *
+ ab_dim1 + 1], &i__3);
+
+/* Copy the upper triangle of A31 back into place. */
+
+ i__3 = ib;
+ for (jj = 1; jj <= i__3; ++jj) {
+ i__4 = min(jj,i3);
+ for (ii = 1; ii <= i__4; ++ii) {
+ i__5 = *kd + 1 - jj + ii + (jj + i__ - 1) *
+ ab_dim1;
+ i__6 = ii + jj * 33 - 34;
+ ab[i__5].r = work[i__6].r, ab[i__5].i = work[
+ i__6].i;
+/* L120: */
+ }
+/* L130: */
+ }
+ }
+ }
+/* L140: */
+ }
+ }
+ }
+ return 0;
+
+L150:
+ return 0;
+
+/* End of ZPBTRF */
+
+} /* zpbtrf_ */
diff --git a/contrib/libs/clapack/zpbtrs.c b/contrib/libs/clapack/zpbtrs.c
new file mode 100644
index 0000000000..5ba99f312b
--- /dev/null
+++ b/contrib/libs/clapack/zpbtrs.c
@@ -0,0 +1,183 @@
+/* zpbtrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zpbtrs_(char *uplo, integer *n, integer *kd, integer *
+ nrhs, doublecomplex *ab, integer *ldab, doublecomplex *b, integer *
+ ldb, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ integer j;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int ztbsv_(char *, char *, char *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPBTRS solves a system of linear equations A*X = B with a Hermitian */
+/* positive definite band matrix A using the Cholesky factorization */
+/* A = U**H*U or A = L*L**H computed by ZPBTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangular factor stored in AB; */
+/* = 'L': Lower triangular factor stored in AB. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals of the matrix A if UPLO = 'U', */
+/* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AB (input) COMPLEX*16 array, dimension (LDAB,N) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**H*U or A = L*L**H of the band matrix A, stored in the */
+/* first KD+1 rows of the array. The j-th column of U or L is */
+/* stored in the j-th column of the array AB as follows: */
+/* if UPLO ='U', AB(kd+1+i-j,j) = U(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO ='L', AB(1+i-j,j) = L(i,j) for j<=i<=min(n,j+kd). */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*kd < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldab < *kd + 1) {
+ *info = -6;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPBTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Solve A*X = B where A = U'*U. */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Solve U'*X = B, overwriting B with X. */
+
+ ztbsv_("Upper", "Conjugate transpose", "Non-unit", n, kd, &ab[
+ ab_offset], ldab, &b[j * b_dim1 + 1], &c__1);
+
+/* Solve U*X = B, overwriting B with X. */
+
+ ztbsv_("Upper", "No transpose", "Non-unit", n, kd, &ab[ab_offset],
+ ldab, &b[j * b_dim1 + 1], &c__1);
+/* L10: */
+ }
+ } else {
+
+/* Solve A*X = B where A = L*L'. */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Solve L*X = B, overwriting B with X. */
+
+ ztbsv_("Lower", "No transpose", "Non-unit", n, kd, &ab[ab_offset],
+ ldab, &b[j * b_dim1 + 1], &c__1);
+
+/* Solve L'*X = B, overwriting B with X. */
+
+ ztbsv_("Lower", "Conjugate transpose", "Non-unit", n, kd, &ab[
+ ab_offset], ldab, &b[j * b_dim1 + 1], &c__1);
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of ZPBTRS */
+
+} /* zpbtrs_ */
diff --git a/contrib/libs/clapack/zpftrf.c b/contrib/libs/clapack/zpftrf.c
new file mode 100644
index 0000000000..7eae7067fe
--- /dev/null
+++ b/contrib/libs/clapack/zpftrf.c
@@ -0,0 +1,475 @@
+/* zpftrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static doublereal c_b15 = -1.;
+static doublereal c_b16 = 1.;
+
+/* Subroutine */ int zpftrf_(char *transr, char *uplo, integer *n,
+ doublecomplex *a, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer k, n1, n2;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zherk_(char *, char *, integer *, integer *,
+ doublereal *, doublecomplex *, integer *, doublereal *,
+ doublecomplex *, integer *);
+ logical lower;
+ extern /* Subroutine */ int ztrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *),
+ xerbla_(char *, integer *);
+ logical nisodd;
+ extern /* Subroutine */ int zpotrf_(char *, integer *, doublecomplex *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPFTRF computes the Cholesky factorization of a complex Hermitian */
+/* positive definite matrix A. */
+
+/* The factorization has the form */
+/* A = U**H * U, if UPLO = 'U', or */
+/* A = L * L**H, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular. */
+
+/* This is the block version of the algorithm, calling Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': The Normal TRANSR of RFP A is stored; */
+/* = 'C': The Conjugate-transpose TRANSR of RFP A is stored. */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': Upper triangle of RFP A is stored; */
+/* = 'L': Lower triangle of RFP A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX array, dimension ( N*(N+1)/2 ); */
+/* On entry, the Hermitian matrix A in RFP format. RFP format is */
+/* described by TRANSR, UPLO, and N as follows: If TRANSR = 'N' */
+/* then RFP A is (0:N,0:k-1) when N is even; k=N/2. RFP A is */
+/* (0:N-1,0:k) when N is odd; k=N/2. IF TRANSR = 'C' then RFP is */
+/* the Conjugate-transpose of RFP A as defined when */
+/* TRANSR = 'N'. The contents of RFP A are defined by UPLO as */
+/* follows: If UPLO = 'U' the RFP A contains the nt elements of */
+/* upper packed A. If UPLO = 'L' the RFP A contains the elements */
+/* of lower packed A. The LDA of RFP A is (N+1)/2 when TRANSR = */
+/* 'C'. When TRANSR is 'N' the LDA is N+1 when N is even and N */
+/* is odd. See the Note below for more details. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization RFP A = U**H*U or RFP A = L*L**H. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i is not */
+/* positive definite, and the factorization could not be */
+/* completed. */
+
+/* Further Notes on RFP Format: */
+/* ============================ */
+
+/* We first consider Standard Packed Format when N is even. */
+/* We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* conjugate-transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* conjugate-transpose of the last three columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- -- */
+/* 03 04 05 33 43 53 */
+/* -- -- */
+/* 13 14 15 00 44 54 */
+/* -- */
+/* 23 24 25 10 11 55 */
+
+/* 33 34 35 20 21 22 */
+/* -- */
+/* 00 44 45 30 31 32 */
+/* -- -- */
+/* 01 11 55 40 41 42 */
+/* -- -- -- */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We next consider Standard Packed Format when N is odd. */
+/* We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* conjugate-transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* conjugate-transpose of the last two columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- */
+/* 02 03 04 00 33 43 */
+/* -- */
+/* 12 13 14 10 11 44 */
+
+/* 22 23 24 20 21 22 */
+/* -- */
+/* 00 33 34 30 31 32 */
+/* -- -- */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- */
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "C")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPFTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* If N is odd, set NISODD = .TRUE. */
+/* If N is even, set K = N/2 and NISODD = .FALSE. */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ } else {
+ nisodd = TRUE_;
+ }
+
+/* Set N1 and N2 depending on LOWER */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+/* start execution: there are eight cases */
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL and N is odd ( a(0:n-1,0:n1-1) ) */
+/* T1 -> a(0,0), T2 -> a(0,1), S -> a(n1,0) */
+/* T1 -> a(0), T2 -> a(n), S -> a(n1) */
+
+ zpotrf_("L", &n1, a, n, info);
+ if (*info > 0) {
+ return 0;
+ }
+ ztrsm_("R", "L", "C", "N", &n2, &n1, &c_b1, a, n, &a[n1], n);
+ zherk_("U", "N", &n2, &n1, &c_b15, &a[n1], n, &c_b16, &a[*n],
+ n);
+ zpotrf_("U", &n2, &a[*n], n, info);
+ if (*info > 0) {
+ *info += n1;
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL and N is odd ( a(0:n-1,0:n2-1) */
+/* T1 -> a(n1+1,0), T2 -> a(n1,0), S -> a(0,0) */
+/* T1 -> a(n2), T2 -> a(n1), S -> a(0) */
+
+ zpotrf_("L", &n1, &a[n2], n, info);
+ if (*info > 0) {
+ return 0;
+ }
+ ztrsm_("L", "L", "N", "N", &n1, &n2, &c_b1, &a[n2], n, a, n);
+ zherk_("U", "C", &n2, &n1, &c_b15, a, n, &c_b16, &a[n1], n);
+ zpotrf_("U", &n2, &a[n1], n, info);
+ if (*info > 0) {
+ *info += n1;
+ }
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is odd */
+/* T1 -> A(0,0) , T2 -> A(1,0) , S -> A(0,n1) */
+/* T1 -> a(0+0) , T2 -> a(1+0) , S -> a(0+n1*n1); lda=n1 */
+
+ zpotrf_("U", &n1, a, &n1, info);
+ if (*info > 0) {
+ return 0;
+ }
+ ztrsm_("L", "U", "C", "N", &n1, &n2, &c_b1, a, &n1, &a[n1 *
+ n1], &n1);
+ zherk_("L", "C", &n2, &n1, &c_b15, &a[n1 * n1], &n1, &c_b16, &
+ a[1], &n1);
+ zpotrf_("L", &n2, &a[1], &n1, info);
+ if (*info > 0) {
+ *info += n1;
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is odd */
+/* T1 -> A(0,n1+1), T2 -> A(0,n1), S -> A(0,0) */
+/* T1 -> a(n2*n2), T2 -> a(n1*n2), S -> a(0); lda = n2 */
+
+ zpotrf_("U", &n1, &a[n2 * n2], &n2, info);
+ if (*info > 0) {
+ return 0;
+ }
+ ztrsm_("R", "U", "N", "N", &n2, &n1, &c_b1, &a[n2 * n2], &n2,
+ a, &n2);
+ zherk_("L", "N", &n2, &n1, &c_b15, a, &n2, &c_b16, &a[n1 * n2]
+, &n2);
+ zpotrf_("L", &n2, &a[n1 * n2], &n2, info);
+ if (*info > 0) {
+ *info += n1;
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(1,0), T2 -> a(0,0), S -> a(k+1,0) */
+/* T1 -> a(1), T2 -> a(0), S -> a(k+1) */
+
+ i__1 = *n + 1;
+ zpotrf_("L", &k, &a[1], &i__1, info);
+ if (*info > 0) {
+ return 0;
+ }
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ ztrsm_("R", "L", "C", "N", &k, &k, &c_b1, &a[1], &i__1, &a[k
+ + 1], &i__2);
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ zherk_("U", "N", &k, &k, &c_b15, &a[k + 1], &i__1, &c_b16, a,
+ &i__2);
+ i__1 = *n + 1;
+ zpotrf_("U", &k, a, &i__1, info);
+ if (*info > 0) {
+ *info += k;
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(k+1,0) , T2 -> a(k,0), S -> a(0,0) */
+/* T1 -> a(k+1), T2 -> a(k), S -> a(0) */
+
+ i__1 = *n + 1;
+ zpotrf_("L", &k, &a[k + 1], &i__1, info);
+ if (*info > 0) {
+ return 0;
+ }
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ ztrsm_("L", "L", "N", "N", &k, &k, &c_b1, &a[k + 1], &i__1, a,
+ &i__2);
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ zherk_("U", "C", &k, &k, &c_b15, a, &i__1, &c_b16, &a[k], &
+ i__2);
+ i__1 = *n + 1;
+ zpotrf_("U", &k, &a[k], &i__1, info);
+ if (*info > 0) {
+ *info += k;
+ }
+
+ }
+
+ } else {
+
+/* N is even and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,1), T2 -> B(0,0), S -> B(0,k+1) */
+/* T1 -> a(0+k), T2 -> a(0+0), S -> a(0+k*(k+1)); lda=k */
+
+ zpotrf_("U", &k, &a[k], &k, info);
+ if (*info > 0) {
+ return 0;
+ }
+ ztrsm_("L", "U", "C", "N", &k, &k, &c_b1, &a[k], &n1, &a[k * (
+ k + 1)], &k);
+ zherk_("L", "C", &k, &k, &c_b15, &a[k * (k + 1)], &k, &c_b16,
+ a, &k);
+ zpotrf_("L", &k, a, &k, info);
+ if (*info > 0) {
+ *info += k;
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,k+1), T2 -> B(0,k), S -> B(0,0) */
+/* T1 -> a(0+k*(k+1)), T2 -> a(0+k*k), S -> a(0+0)); lda=k */
+
+ zpotrf_("U", &k, &a[k * (k + 1)], &k, info);
+ if (*info > 0) {
+ return 0;
+ }
+ ztrsm_("R", "U", "N", "N", &k, &k, &c_b1, &a[k * (k + 1)], &k,
+ a, &k);
+ zherk_("L", "N", &k, &k, &c_b15, a, &k, &c_b16, &a[k * k], &k);
+ zpotrf_("L", &k, &a[k * k], &k, info);
+ if (*info > 0) {
+ *info += k;
+ }
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of ZPFTRF */
+
+} /* zpftrf_ */
diff --git a/contrib/libs/clapack/zpftri.c b/contrib/libs/clapack/zpftri.c
new file mode 100644
index 0000000000..c23659a657
--- /dev/null
+++ b/contrib/libs/clapack/zpftri.c
@@ -0,0 +1,426 @@
+/* zpftri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static doublereal c_b12 = 1.;
+
+/* Subroutine */ int zpftri_(char *transr, char *uplo, integer *n,
+ doublecomplex *a, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer k, n1, n2;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zherk_(char *, char *, integer *, integer *,
+ doublereal *, doublecomplex *, integer *, doublereal *,
+ doublecomplex *, integer *);
+ logical lower;
+ extern /* Subroutine */ int ztrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *),
+ xerbla_(char *, integer *);
+ logical nisodd;
+ extern /* Subroutine */ int zlauum_(char *, integer *, doublecomplex *,
+ integer *, integer *), ztftri_(char *, char *, char *,
+ integer *, doublecomplex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPFTRI computes the inverse of a complex Hermitian positive definite */
+/* matrix A using the Cholesky factorization A = U**H*U or A = L*L**H */
+/* computed by ZPFTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': The Normal TRANSR of RFP A is stored; */
+/* = 'C': The Conjugate-transpose TRANSR of RFP A is stored. */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension ( N*(N+1)/2 ); */
+/* On entry, the Hermitian matrix A in RFP format. RFP format is */
+/* described by TRANSR, UPLO, and N as follows: If TRANSR = 'N' */
+/* then RFP A is (0:N,0:k-1) when N is even; k=N/2. RFP A is */
+/* (0:N-1,0:k) when N is odd; k=N/2. IF TRANSR = 'C' then RFP is */
+/* the Conjugate-transpose of RFP A as defined when */
+/* TRANSR = 'N'. The contents of RFP A are defined by UPLO as */
+/* follows: If UPLO = 'U' the RFP A contains the nt elements of */
+/* upper packed A. If UPLO = 'L' the RFP A contains the elements */
+/* of lower packed A. The LDA of RFP A is (N+1)/2 when TRANSR = */
+/* 'C'. When TRANSR is 'N' the LDA is N+1 when N is even and N */
+/* is odd. See the Note below for more details. */
+
+/* On exit, the Hermitian inverse of the original matrix, in the */
+/* same storage format. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the (i,i) element of the factor U or L is */
+/* zero, and the inverse could not be computed. */
+
+/* Note: */
+/* ===== */
+
+/* We first consider Standard Packed Format when N is even. */
+/* We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* conjugate-transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* conjugate-transpose of the last three columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- -- */
+/* 03 04 05 33 43 53 */
+/* -- -- */
+/* 13 14 15 00 44 54 */
+/* -- */
+/* 23 24 25 10 11 55 */
+
+/* 33 34 35 20 21 22 */
+/* -- */
+/* 00 44 45 30 31 32 */
+/* -- -- */
+/* 01 11 55 40 41 42 */
+/* -- -- -- */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We next consider Standard Packed Format when N is odd. */
+/* We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* conjugate-transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* conjugate-transpose of the last two columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- */
+/* 02 03 04 00 33 43 */
+/* -- */
+/* 12 13 14 10 11 44 */
+
+/* 22 23 24 20 21 22 */
+/* -- */
+/* 00 33 34 30 31 32 */
+/* -- -- */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- */
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "C")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPFTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Invert the triangular Cholesky factor U or L. */
+
+ ztftri_(transr, uplo, "N", n, a, info);
+ if (*info > 0) {
+ return 0;
+ }
+
+/* If N is odd, set NISODD = .TRUE. */
+/* If N is even, set K = N/2 and NISODD = .FALSE. */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ } else {
+ nisodd = TRUE_;
+ }
+
+/* Set N1 and N2 depending on LOWER */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+/* Start execution of triangular matrix multiply: inv(U)*inv(U)^C or */
+/* inv(L)^C*inv(L). There are eight cases. */
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL and N is odd ( a(0:n-1,0:N1-1) ) */
+/* T1 -> a(0,0), T2 -> a(0,1), S -> a(N1,0) */
+/* T1 -> a(0), T2 -> a(n), S -> a(N1) */
+
+ zlauum_("L", &n1, a, n, info);
+ zherk_("L", "C", &n1, &n2, &c_b12, &a[n1], n, &c_b12, a, n);
+ ztrmm_("L", "U", "N", "N", &n2, &n1, &c_b1, &a[*n], n, &a[n1],
+ n);
+ zlauum_("U", &n2, &a[*n], n, info);
+
+ } else {
+
+/* SRPA for UPPER, NORMAL and N is odd ( a(0:n-1,0:N2-1) */
+/* T1 -> a(N1+1,0), T2 -> a(N1,0), S -> a(0,0) */
+/* T1 -> a(N2), T2 -> a(N1), S -> a(0) */
+
+ zlauum_("L", &n1, &a[n2], n, info);
+ zherk_("L", "N", &n1, &n2, &c_b12, a, n, &c_b12, &a[n2], n);
+ ztrmm_("R", "U", "C", "N", &n1, &n2, &c_b1, &a[n1], n, a, n);
+ zlauum_("U", &n2, &a[n1], n, info);
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE, and N is odd */
+/* T1 -> a(0), T2 -> a(1), S -> a(0+N1*N1) */
+
+ zlauum_("U", &n1, a, &n1, info);
+ zherk_("U", "N", &n1, &n2, &c_b12, &a[n1 * n1], &n1, &c_b12,
+ a, &n1);
+ ztrmm_("R", "L", "N", "N", &n1, &n2, &c_b1, &a[1], &n1, &a[n1
+ * n1], &n1);
+ zlauum_("L", &n2, &a[1], &n1, info);
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE, and N is odd */
+/* T1 -> a(0+N2*N2), T2 -> a(0+N1*N2), S -> a(0) */
+
+ zlauum_("U", &n1, &a[n2 * n2], &n2, info);
+ zherk_("U", "C", &n1, &n2, &c_b12, a, &n2, &c_b12, &a[n2 * n2]
+, &n2);
+ ztrmm_("L", "L", "C", "N", &n2, &n1, &c_b1, &a[n1 * n2], &n2,
+ a, &n2);
+ zlauum_("L", &n2, &a[n1 * n2], &n2, info);
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(1,0), T2 -> a(0,0), S -> a(k+1,0) */
+/* T1 -> a(1), T2 -> a(0), S -> a(k+1) */
+
+ i__1 = *n + 1;
+ zlauum_("L", &k, &a[1], &i__1, info);
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ zherk_("L", "C", &k, &k, &c_b12, &a[k + 1], &i__1, &c_b12, &a[
+ 1], &i__2);
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ ztrmm_("L", "U", "N", "N", &k, &k, &c_b1, a, &i__1, &a[k + 1],
+ &i__2);
+ i__1 = *n + 1;
+ zlauum_("U", &k, a, &i__1, info);
+
+ } else {
+
+/* SRPA for UPPER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(k+1,0) , T2 -> a(k,0), S -> a(0,0) */
+/* T1 -> a(k+1), T2 -> a(k), S -> a(0) */
+
+ i__1 = *n + 1;
+ zlauum_("L", &k, &a[k + 1], &i__1, info);
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ zherk_("L", "N", &k, &k, &c_b12, a, &i__1, &c_b12, &a[k + 1],
+ &i__2);
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ ztrmm_("R", "U", "C", "N", &k, &k, &c_b1, &a[k], &i__1, a, &
+ i__2);
+ i__1 = *n + 1;
+ zlauum_("U", &k, &a[k], &i__1, info);
+
+ }
+
+ } else {
+
+/* N is even and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE, and N is even (see paper) */
+/* T1 -> B(0,1), T2 -> B(0,0), S -> B(0,k+1), */
+/* T1 -> a(0+k), T2 -> a(0+0), S -> a(0+k*(k+1)); lda=k */
+
+ zlauum_("U", &k, &a[k], &k, info);
+ zherk_("U", "N", &k, &k, &c_b12, &a[k * (k + 1)], &k, &c_b12,
+ &a[k], &k);
+ ztrmm_("R", "L", "N", "N", &k, &k, &c_b1, a, &k, &a[k * (k +
+ 1)], &k);
+ zlauum_("L", &k, a, &k, info);
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE, and N is even (see paper) */
+/* T1 -> B(0,k+1), T2 -> B(0,k), S -> B(0,0), */
+/* T1 -> a(0+k*(k+1)), T2 -> a(0+k*k), S -> a(0+0)); lda=k */
+
+ zlauum_("U", &k, &a[k * (k + 1)], &k, info);
+ zherk_("U", "C", &k, &k, &c_b12, a, &k, &c_b12, &a[k * (k + 1)
+ ], &k);
+ ztrmm_("L", "L", "C", "N", &k, &k, &c_b1, &a[k * k], &k, a, &
+ k);
+ zlauum_("L", &k, &a[k * k], &k, info);
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of ZPFTRI */
+
+} /* zpftri_ */
diff --git a/contrib/libs/clapack/zpftrs.c b/contrib/libs/clapack/zpftrs.c
new file mode 100644
index 0000000000..9e5b151f8e
--- /dev/null
+++ b/contrib/libs/clapack/zpftrs.c
@@ -0,0 +1,260 @@
+/* zpftrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+
+/* Subroutine */ int zpftrs_(char *transr, char *uplo, integer *n, integer *
+ nrhs, doublecomplex *a, doublecomplex *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int ztfsm_(char *, char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, doublecomplex *,
+ doublecomplex *, integer *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPFTRS solves a system of linear equations A*X = B with a Hermitian */
+/* positive definite matrix A using the Cholesky factorization */
+/* A = U**H*U or A = L*L**H computed by ZPFTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': The Normal TRANSR of RFP A is stored; */
+/* = 'C': The Conjugate-transpose TRANSR of RFP A is stored. */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': Upper triangle of RFP A is stored; */
+/* = 'L': Lower triangle of RFP A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension ( N*(N+1)/2 ); */
+/* The triangular factor U or L from the Cholesky factorization */
+/* of RFP A = U**H*U or RFP A = L*L**H, as computed by ZPFTRF. */
+/* See note below for more details about RFP A. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Note: */
+/* ===== */
+
+/* We first consider Standard Packed Format when N is even. */
+/* We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* conjugate-transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* conjugate-transpose of the last three columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- -- */
+/* 03 04 05 33 43 53 */
+/* -- -- */
+/* 13 14 15 00 44 54 */
+/* -- */
+/* 23 24 25 10 11 55 */
+
+/* 33 34 35 20 21 22 */
+/* -- */
+/* 00 44 45 30 31 32 */
+/* -- -- */
+/* 01 11 55 40 41 42 */
+/* -- -- -- */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We next consider Standard Packed Format when N is odd. */
+/* We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* conjugate-transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* conjugate-transpose of the last two columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- */
+/* 02 03 04 00 33 43 */
+/* -- */
+/* 12 13 14 10 11 44 */
+
+/* 22 23 24 20 21 22 */
+/* -- */
+/* 00 33 34 30 31 32 */
+/* -- -- */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- */
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "C")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPFTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+/* start execution: there are two triangular solves */
+
+ if (lower) {
+ ztfsm_(transr, "L", uplo, "N", "N", n, nrhs, &c_b1, a, &b[b_offset],
+ ldb);
+ ztfsm_(transr, "L", uplo, "C", "N", n, nrhs, &c_b1, a, &b[b_offset],
+ ldb);
+ } else {
+ ztfsm_(transr, "L", uplo, "C", "N", n, nrhs, &c_b1, a, &b[b_offset],
+ ldb);
+ ztfsm_(transr, "L", uplo, "N", "N", n, nrhs, &c_b1, a, &b[b_offset],
+ ldb);
+ }
+
+ return 0;
+
+/* End of ZPFTRS */
+
+} /* zpftrs_ */
diff --git a/contrib/libs/clapack/zpocon.c b/contrib/libs/clapack/zpocon.c
new file mode 100644
index 0000000000..f3dc04f50e
--- /dev/null
+++ b/contrib/libs/clapack/zpocon.c
@@ -0,0 +1,225 @@
+/* zpocon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zpocon_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, doublereal *anorm, doublereal *rcond, doublecomplex *
+ work, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer ix, kase;
+ doublereal scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical upper;
+ extern /* Subroutine */ int zlacn2_(integer *, doublecomplex *,
+ doublecomplex *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal scalel, scaleu;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal ainvnm;
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ extern /* Subroutine */ int zdrscl_(integer *, doublereal *,
+ doublecomplex *, integer *);
+ char normin[1];
+ doublereal smlnum;
+ extern /* Subroutine */ int zlatrs_(char *, char *, char *, char *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublereal *, doublereal *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPOCON estimates the reciprocal of the condition number (in the */
+/* 1-norm) of a complex Hermitian positive definite matrix using the */
+/* Cholesky factorization A = U**H*U or A = L*L**H computed by ZPOTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**H*U or A = L*L**H, as computed by ZPOTRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* ANORM (input) DOUBLE PRECISION */
+/* The 1-norm (or infinity-norm) of the Hermitian matrix A. */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*anorm < 0.) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPOCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.;
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ } else if (*anorm == 0.) {
+ return 0;
+ }
+
+ smlnum = dlamch_("Safe minimum");
+
+/* Estimate the 1-norm of inv(A). */
+
+ kase = 0;
+ *(unsigned char *)normin = 'N';
+L10:
+ zlacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (upper) {
+
+/* Multiply by inv(U'). */
+
+ zlatrs_("Upper", "Conjugate transpose", "Non-unit", normin, n, &a[
+ a_offset], lda, &work[1], &scalel, &rwork[1], info);
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by inv(U). */
+
+ zlatrs_("Upper", "No transpose", "Non-unit", normin, n, &a[
+ a_offset], lda, &work[1], &scaleu, &rwork[1], info);
+ } else {
+
+/* Multiply by inv(L). */
+
+ zlatrs_("Lower", "No transpose", "Non-unit", normin, n, &a[
+ a_offset], lda, &work[1], &scalel, &rwork[1], info);
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by inv(L'). */
+
+ zlatrs_("Lower", "Conjugate transpose", "Non-unit", normin, n, &a[
+ a_offset], lda, &work[1], &scaleu, &rwork[1], info);
+ }
+
+/* Multiply by 1/SCALE if doing so will not cause overflow. */
+
+ scale = scalel * scaleu;
+ if (scale != 1.) {
+ ix = izamax_(n, &work[1], &c__1);
+ i__1 = ix;
+ if (scale < ((d__1 = work[i__1].r, abs(d__1)) + (d__2 = d_imag(&
+ work[ix]), abs(d__2))) * smlnum || scale == 0.) {
+ goto L20;
+ }
+ zdrscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / ainvnm / *anorm;
+ }
+
+L20:
+ return 0;
+
+/* End of ZPOCON */
+
+} /* zpocon_ */
diff --git a/contrib/libs/clapack/zpoequ.c b/contrib/libs/clapack/zpoequ.c
new file mode 100644
index 0000000000..a6eb77b98d
--- /dev/null
+++ b/contrib/libs/clapack/zpoequ.c
@@ -0,0 +1,176 @@
+/* zpoequ.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zpoequ_(integer *n, doublecomplex *a, integer *lda,
+ doublereal *s, doublereal *scond, doublereal *amax, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ doublereal smin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPOEQU computes row and column scalings intended to equilibrate a */
+/* Hermitian positive definite matrix A and reduce its condition number */
+/* (with respect to the two-norm). S contains the scale factors, */
+/* S(i) = 1/sqrt(A(i,i)), chosen so that the scaled matrix B with */
+/* elements B(i,j) = S(i)*A(i,j)*S(j) has ones on the diagonal. This */
+/* choice of S puts the condition number of B within a factor N of the */
+/* smallest possible condition number over all possible diagonal */
+/* scalings. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The N-by-N Hermitian positive definite matrix whose scaling */
+/* factors are to be computed. Only the diagonal elements of A */
+/* are referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* S (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, S contains the scale factors for A. */
+
+/* SCOND (output) DOUBLE PRECISION */
+/* If INFO = 0, S contains the ratio of the smallest S(i) to */
+/* the largest S(i). If SCOND >= 0.1 and AMAX is neither too */
+/* large nor too small, it is not worth scaling by S. */
+
+/* AMAX (output) DOUBLE PRECISION */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element is nonpositive. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*lda < max(1,*n)) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPOEQU", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *scond = 1.;
+ *amax = 0.;
+ return 0;
+ }
+
+/* Find the minimum and maximum diagonal elements. */
+
+ i__1 = a_dim1 + 1;
+ s[1] = a[i__1].r;
+ smin = s[1];
+ *amax = s[1];
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ s[i__] = a[i__2].r;
+/* Computing MIN */
+ d__1 = smin, d__2 = s[i__];
+ smin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = *amax, d__2 = s[i__];
+ *amax = max(d__1,d__2);
+/* L10: */
+ }
+
+ if (smin <= 0.) {
+
+/* Find the first non-positive diagonal element and return. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (s[i__] <= 0.) {
+ *info = i__;
+ return 0;
+ }
+/* L20: */
+ }
+ } else {
+
+/* Set the scale factors to the reciprocals */
+/* of the diagonal elements. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ s[i__] = 1. / sqrt(s[i__]);
+/* L30: */
+ }
+
+/* Compute SCOND = min(S(I)) / max(S(I)) */
+
+ *scond = sqrt(smin) / sqrt(*amax);
+ }
+ return 0;
+
+/* End of ZPOEQU */
+
+} /* zpoequ_ */
diff --git a/contrib/libs/clapack/zpoequb.c b/contrib/libs/clapack/zpoequb.c
new file mode 100644
index 0000000000..e1235c7978
--- /dev/null
+++ b/contrib/libs/clapack/zpoequb.c
@@ -0,0 +1,195 @@
+/* zpoequb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zpoequb_(integer *n, doublecomplex *a, integer *lda,
+ doublereal *s, doublereal *scond, doublereal *amax, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double log(doublereal), pow_di(doublereal *, integer *), sqrt(doublereal);
+
+ /* Local variables */
+ integer i__;
+ doublereal tmp, base, smin;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- Contributed by James Demmel, Deaglan Halligan, Yozo Hida and -- */
+/* -- Jason Riedy of Univ. of California Berkeley. -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley and NAG Ltd. -- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPOEQUB computes row and column scalings intended to equilibrate a */
+/* symmetric positive definite matrix A and reduce its condition number */
+/* (with respect to the two-norm). S contains the scale factors, */
+/* S(i) = 1/sqrt(A(i,i)), chosen so that the scaled matrix B with */
+/* elements B(i,j) = S(i)*A(i,j)*S(j) has ones on the diagonal. This */
+/* choice of S puts the condition number of B within a factor N of the */
+/* smallest possible condition number over all possible diagonal */
+/* scalings. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The N-by-N symmetric positive definite matrix whose scaling */
+/* factors are to be computed. Only the diagonal elements of A */
+/* are referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* S (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, S contains the scale factors for A. */
+
+/* SCOND (output) DOUBLE PRECISION */
+/* If INFO = 0, S contains the ratio of the smallest S(i) to */
+/* the largest S(i). If SCOND >= 0.1 and AMAX is neither too */
+/* large nor too small, it is not worth scaling by S. */
+
+/* AMAX (output) DOUBLE PRECISION */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element is nonpositive. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function Definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+/* Positive definite only performs 1 pass of equilibration. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*lda < max(1,*n)) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPOEQUB", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ *scond = 1.;
+ *amax = 0.;
+ return 0;
+ }
+ base = dlamch_("B");
+ tmp = -.5 / log(base);
+
+/* Find the minimum and maximum diagonal elements. */
+
+ i__1 = a_dim1 + 1;
+ s[1] = a[i__1].r;
+ smin = s[1];
+ *amax = s[1];
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__ + i__ * a_dim1;
+ s[i__2] = a[i__3].r;
+/* Computing MIN */
+ d__1 = smin, d__2 = s[i__];
+ smin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = *amax, d__2 = s[i__];
+ *amax = max(d__1,d__2);
+/* L10: */
+ }
+
+ if (smin <= 0.) {
+
+/* Find the first non-positive diagonal element and return. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (s[i__] <= 0.) {
+ *info = i__;
+ return 0;
+ }
+/* L20: */
+ }
+ } else {
+
+/* Set the scale factors to the reciprocals */
+/* of the diagonal elements. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = (integer) (tmp * log(s[i__]));
+ s[i__] = pow_di(&base, &i__2);
+/* L30: */
+ }
+
+/* Compute SCOND = min(S(I)) / max(S(I)). */
+
+ *scond = sqrt(smin) / sqrt(*amax);
+ }
+
+ return 0;
+
+/* End of ZPOEQUB */
+
+} /* zpoequb_ */
diff --git a/contrib/libs/clapack/zporfs.c b/contrib/libs/clapack/zporfs.c
new file mode 100644
index 0000000000..b42267dc97
--- /dev/null
+++ b/contrib/libs/clapack/zporfs.c
@@ -0,0 +1,465 @@
+/* zporfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zporfs_(char *uplo, integer *n, integer *nrhs,
+ doublecomplex *a, integer *lda, doublecomplex *af, integer *ldaf,
+ doublecomplex *b, integer *ldb, doublecomplex *x, integer *ldx,
+ doublereal *ferr, doublereal *berr, doublecomplex *work, doublereal *
+ rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal s, xk;
+ integer nz;
+ doublereal eps;
+ integer kase;
+ doublereal safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3], count;
+ extern /* Subroutine */ int zhemv_(char *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, doublecomplex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zaxpy_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), zlacn2_(
+ integer *, doublecomplex *, doublecomplex *, doublereal *,
+ integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal lstres;
+ extern /* Subroutine */ int zpotrs_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPORFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is Hermitian positive definite, */
+/* and provides error bounds and backward error estimates for the */
+/* solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The Hermitian matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of A contains the upper triangular part */
+/* of the matrix A, and the strictly lower triangular part of A */
+/* is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of A contains the lower triangular part of */
+/* the matrix A, and the strictly upper triangular part of A is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input) COMPLEX*16 array, dimension (LDAF,N) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**H*U or A = L*L**H, as computed by ZPOTRF. */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by ZPOTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ==================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldx < max(1,*n)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPORFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X */
+
+ zcopy_(n, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+ z__1.r = -1., z__1.i = -0.;
+ zhemv_(uplo, n, &z__1, &a[a_offset], lda, &x[j * x_dim1 + 1], &c__1, &
+ c_b1, &work[1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[i__] = (d__1 = b[i__3].r, abs(d__1)) + (d__2 = d_imag(&b[
+ i__ + j * b_dim1]), abs(d__2));
+/* L30: */
+ }
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ i__3 = k + j * x_dim1;
+ xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[k + j *
+ x_dim1]), abs(d__2));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ rwork[i__] += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + k * a_dim1]), abs(d__2))) * xk;
+ i__4 = i__ + k * a_dim1;
+ i__5 = i__ + j * x_dim1;
+ s += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 = d_imag(&a[
+ i__ + k * a_dim1]), abs(d__2))) * ((d__3 = x[i__5]
+ .r, abs(d__3)) + (d__4 = d_imag(&x[i__ + j *
+ x_dim1]), abs(d__4)));
+/* L40: */
+ }
+ i__3 = k + k * a_dim1;
+ rwork[k] = rwork[k] + (d__1 = a[i__3].r, abs(d__1)) * xk + s;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ i__3 = k + j * x_dim1;
+ xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[k + j *
+ x_dim1]), abs(d__2));
+ i__3 = k + k * a_dim1;
+ rwork[k] += (d__1 = a[i__3].r, abs(d__1)) * xk;
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ rwork[i__] += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + k * a_dim1]), abs(d__2))) * xk;
+ i__4 = i__ + k * a_dim1;
+ i__5 = i__ + j * x_dim1;
+ s += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 = d_imag(&a[
+ i__ + k * a_dim1]), abs(d__2))) * ((d__3 = x[i__5]
+ .r, abs(d__3)) + (d__4 = d_imag(&x[i__ + j *
+ x_dim1]), abs(d__4)));
+/* L60: */
+ }
+ rwork[k] += s;
+/* L70: */
+ }
+ }
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__3 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2))) / rwork[i__];
+ s = max(d__3,d__4);
+ } else {
+/* Computing MAX */
+ i__3 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = max(d__3,d__4);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2. <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ zpotrs_(uplo, n, &c__1, &af[af_offset], ldaf, &work[1], n, info);
+ zaxpy_(n, &c_b1, &work[1], &c__1, &x[j * x_dim1 + 1], &c__1);
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use ZLACN2 to estimate the infinity-norm of the matrix */
+/* inv(A) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__3 = i__;
+ rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ ;
+ } else {
+ i__3 = i__;
+ rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ zlacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(A'). */
+
+ zpotrs_(uplo, n, &c__1, &af[af_offset], ldaf, &work[1], n,
+ info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ z__1.r = rwork[i__4] * work[i__5].r, z__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+/* L110: */
+ }
+ } else if (kase == 2) {
+
+/* Multiply by inv(A)*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ z__1.r = rwork[i__4] * work[i__5].r, z__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+/* L120: */
+ }
+ zpotrs_(uplo, n, &c__1, &af[af_offset], ldaf, &work[1], n,
+ info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * x_dim1;
+ d__3 = lstres, d__4 = (d__1 = x[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&x[i__ + j * x_dim1]), abs(d__2));
+ lstres = max(d__3,d__4);
+/* L130: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of ZPORFS */
+
+} /* zporfs_ */
diff --git a/contrib/libs/clapack/zposv.c b/contrib/libs/clapack/zposv.c
new file mode 100644
index 0000000000..134b5bc937
--- /dev/null
+++ b/contrib/libs/clapack/zposv.c
@@ -0,0 +1,152 @@
+/* zposv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zposv_(char *uplo, integer *n, integer *nrhs,
+ doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), zpotrf_(
+ char *, integer *, doublecomplex *, integer *, integer *),
+ zpotrs_(char *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPOSV computes the solution to a complex system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N Hermitian positive definite matrix and X and B */
+/* are N-by-NRHS matrices. */
+
+/* The Cholesky decomposition is used to factor A as */
+/* A = U**H* U, if UPLO = 'U', or */
+/* A = L * L**H, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is a lower triangular */
+/* matrix. The factored form of A is then used to solve the system of */
+/* equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i of A is not */
+/* positive definite, so the factorization could not be */
+/* completed, and the solution has not been computed. */
+
+/* ===================================================================== */
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPOSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the Cholesky factorization A = U'*U or A = L*L'. */
+
+ zpotrf_(uplo, n, &a[a_offset], lda, info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ zpotrs_(uplo, n, nrhs, &a[a_offset], lda, &b[b_offset], ldb, info);
+
+ }
+ return 0;
+
+/* End of ZPOSV */
+
+} /* zposv_ */
diff --git a/contrib/libs/clapack/zposvx.c b/contrib/libs/clapack/zposvx.c
new file mode 100644
index 0000000000..7eabf96605
--- /dev/null
+++ b/contrib/libs/clapack/zposvx.c
@@ -0,0 +1,462 @@
+/* zposvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zposvx_(char *fact, char *uplo, integer *n, integer *
+ nrhs, doublecomplex *a, integer *lda, doublecomplex *af, integer *
+ ldaf, char *equed, doublereal *s, doublecomplex *b, integer *ldb,
+ doublecomplex *x, integer *ldx, doublereal *rcond, doublereal *ferr,
+ doublereal *berr, doublecomplex *work, doublereal *rwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal amax, smin, smax;
+ extern logical lsame_(char *, char *);
+ doublereal scond, anorm;
+ logical equil, rcequ;
+ extern doublereal dlamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ extern doublereal zlanhe_(char *, char *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ extern /* Subroutine */ int zlaqhe_(char *, integer *, doublecomplex *,
+ integer *, doublereal *, doublereal *, doublereal *, char *);
+ integer infequ;
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *),
+ zpocon_(char *, integer *, doublecomplex *, integer *, doublereal
+ *, doublereal *, doublecomplex *, doublereal *, integer *)
+ ;
+ doublereal smlnum;
+ extern /* Subroutine */ int zpoequ_(integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *), zporfs_(
+ char *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublereal *, doublereal *,
+ doublecomplex *, doublereal *, integer *), zpotrf_(char *,
+ integer *, doublecomplex *, integer *, integer *),
+ zpotrs_(char *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPOSVX uses the Cholesky factorization A = U**H*U or A = L*L**H to */
+/* compute the solution to a complex system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N Hermitian positive definite matrix and X and B */
+/* are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'E', real scaling factors are computed to equilibrate */
+/* the system: */
+/* diag(S) * A * diag(S) * inv(diag(S)) * X = diag(S) * B */
+/* Whether or not the system will be equilibrated depends on the */
+/* scaling of the matrix A, but if equilibration is used, A is */
+/* overwritten by diag(S)*A*diag(S) and B by diag(S)*B. */
+
+/* 2. If FACT = 'N' or 'E', the Cholesky decomposition is used to */
+/* factor the matrix A (after equilibration if FACT = 'E') as */
+/* A = U**H* U, if UPLO = 'U', or */
+/* A = L * L**H, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is a lower triangular */
+/* matrix. */
+
+/* 3. If the leading i-by-i principal minor is not positive definite, */
+/* then the routine returns with INFO = i. Otherwise, the factored */
+/* form of A is used to estimate the condition number of the matrix */
+/* A. If the reciprocal of the condition number is less than machine */
+/* precision, INFO = N+1 is returned as a warning, but the routine */
+/* still goes on to solve for X and compute error bounds as */
+/* described below. */
+
+/* 4. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 5. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* 6. If equilibration was used, the matrix X is premultiplied by */
+/* diag(S) so that it solves the original system before */
+/* equilibration. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of the matrix A is */
+/* supplied on entry, and if not, whether the matrix A should be */
+/* equilibrated before it is factored. */
+/* = 'F': On entry, AF contains the factored form of A. */
+/* If EQUED = 'Y', the matrix A has been equilibrated */
+/* with scaling factors given by S. A and AF will not */
+/* be modified. */
+/* = 'N': The matrix A will be copied to AF and factored. */
+/* = 'E': The matrix A will be equilibrated if necessary, then */
+/* copied to AF and factored. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A, except if FACT = 'F' and */
+/* EQUED = 'Y', then A must contain the equilibrated matrix */
+/* diag(S)*A*diag(S). If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. A is not modified if */
+/* FACT = 'F' or 'N', or if FACT = 'E' and EQUED = 'N' on exit. */
+
+/* On exit, if FACT = 'E' and EQUED = 'Y', A is overwritten by */
+/* diag(S)*A*diag(S). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input or output) COMPLEX*16 array, dimension (LDAF,N) */
+/* If FACT = 'F', then AF is an input argument and on entry */
+/* contains the triangular factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H, in the same storage */
+/* format as A. If EQUED .ne. 'N', then AF is the factored form */
+/* of the equilibrated matrix diag(S)*A*diag(S). */
+
+/* If FACT = 'N', then AF is an output argument and on exit */
+/* returns the triangular factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H of the original */
+/* matrix A. */
+
+/* If FACT = 'E', then AF is an output argument and on exit */
+/* returns the triangular factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H of the equilibrated */
+/* matrix A (see the description of A for the form of the */
+/* equilibrated matrix). */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* EQUED (input or output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration (always true if FACT = 'N'). */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+/* EQUED is an input argument if FACT = 'F'; otherwise, it is an */
+/* output argument. */
+
+/* S (input or output) DOUBLE PRECISION array, dimension (N) */
+/* The scale factors for A; not accessed if EQUED = 'N'. S is */
+/* an input argument if FACT = 'F'; otherwise, S is an output */
+/* argument. If FACT = 'F' and EQUED = 'Y', each element of S */
+/* must be positive. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS righthand side matrix B. */
+/* On exit, if EQUED = 'N', B is not modified; if EQUED = 'Y', */
+/* B is overwritten by diag(S) * B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X to */
+/* the original system of equations. Note that if EQUED = 'Y', */
+/* A and B are modified on exit, and the solution to the */
+/* equilibrated system is inv(diag(S))*X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A after equilibration (if done). If RCOND is less than the */
+/* machine precision (in particular, if RCOND = 0), the matrix */
+/* is singular to working precision. This condition is */
+/* indicated by a return code of INFO > 0. */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: the leading minor of order i of A is */
+/* not positive definite, so the factorization */
+/* could not be completed, and the solution has not */
+/* been computed. RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ --s;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ equil = lsame_(fact, "E");
+ if (nofact || equil) {
+ *(unsigned char *)equed = 'N';
+ rcequ = FALSE_;
+ } else {
+ rcequ = lsame_(equed, "Y");
+ smlnum = dlamch_("Safe minimum");
+ bignum = 1. / smlnum;
+ }
+
+/* Test the input parameters. */
+
+ if (! nofact && ! equil && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! lsame_(uplo, "U") && ! lsame_(uplo,
+ "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -8;
+ } else if (lsame_(fact, "F") && ! (rcequ || lsame_(
+ equed, "N"))) {
+ *info = -9;
+ } else {
+ if (rcequ) {
+ smin = bignum;
+ smax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ d__1 = smin, d__2 = s[j];
+ smin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = smax, d__2 = s[j];
+ smax = max(d__1,d__2);
+/* L10: */
+ }
+ if (smin <= 0.) {
+ *info = -10;
+ } else if (*n > 0) {
+ scond = max(smin,smlnum) / min(smax,bignum);
+ } else {
+ scond = 1.;
+ }
+ }
+ if (*info == 0) {
+ if (*ldb < max(1,*n)) {
+ *info = -12;
+ } else if (*ldx < max(1,*n)) {
+ *info = -14;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPOSVX", &i__1);
+ return 0;
+ }
+
+ if (equil) {
+
+/* Compute row and column scalings to equilibrate the matrix A. */
+
+ zpoequ_(n, &a[a_offset], lda, &s[1], &scond, &amax, &infequ);
+ if (infequ == 0) {
+
+/* Equilibrate the matrix. */
+
+ zlaqhe_(uplo, n, &a[a_offset], lda, &s[1], &scond, &amax, equed);
+ rcequ = lsame_(equed, "Y");
+ }
+ }
+
+/* Scale the right hand side. */
+
+ if (rcequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * b_dim1;
+ z__1.r = s[i__4] * b[i__5].r, z__1.i = s[i__4] * b[i__5].i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+
+ if (nofact || equil) {
+
+/* Compute the Cholesky factorization A = U'*U or A = L*L'. */
+
+ zlacpy_(uplo, n, n, &a[a_offset], lda, &af[af_offset], ldaf);
+ zpotrf_(uplo, n, &af[af_offset], ldaf, info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = zlanhe_("1", uplo, n, &a[a_offset], lda, &rwork[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ zpocon_(uplo, n, &af[af_offset], ldaf, &anorm, rcond, &work[1], &rwork[1],
+ info);
+
+/* Compute the solution matrix X. */
+
+ zlacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ zpotrs_(uplo, n, nrhs, &af[af_offset], ldaf, &x[x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solution and */
+/* compute error bounds and backward error estimates for it. */
+
+ zporfs_(uplo, n, nrhs, &a[a_offset], lda, &af[af_offset], ldaf, &b[
+ b_offset], ldb, &x[x_offset], ldx, &ferr[1], &berr[1], &work[1], &
+ rwork[1], info);
+
+/* Transform the solution matrix X to a solution of the original */
+/* system. */
+
+ if (rcequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * x_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * x_dim1;
+ z__1.r = s[i__4] * x[i__5].r, z__1.i = s[i__4] * x[i__5].i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+/* L40: */
+ }
+/* L50: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= scond;
+/* L60: */
+ }
+ }
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < dlamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of ZPOSVX */
+
+} /* zposvx_ */
diff --git a/contrib/libs/clapack/zpotf2.c b/contrib/libs/clapack/zpotf2.c
new file mode 100644
index 0000000000..ffdb708a60
--- /dev/null
+++ b/contrib/libs/clapack/zpotf2.c
@@ -0,0 +1,245 @@
+/* zpotf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zpotf2_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer j;
+ doublereal ajj;
+ extern logical lsame_(char *, char *);
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *);
+ logical upper;
+ extern logical disnan_(doublereal *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_(
+ integer *, doublereal *, doublecomplex *, integer *), zlacgv_(
+ integer *, doublecomplex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPOTF2 computes the Cholesky factorization of a complex Hermitian */
+/* positive definite matrix A. */
+
+/* The factorization has the form */
+/* A = U' * U , if UPLO = 'U', or */
+/* A = L * L', if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular. */
+
+/* This is the unblocked version of the algorithm, calling Level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* Hermitian matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* n by n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization A = U'*U or A = L*L'. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, the leading minor of order k is not */
+/* positive definite, and the factorization could not be */
+/* completed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPOTF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute U(J,J) and test for non-positive-definiteness. */
+
+ i__2 = j + j * a_dim1;
+ d__1 = a[i__2].r;
+ i__3 = j - 1;
+ zdotc_(&z__2, &i__3, &a[j * a_dim1 + 1], &c__1, &a[j * a_dim1 + 1]
+, &c__1);
+ z__1.r = d__1 - z__2.r, z__1.i = -z__2.i;
+ ajj = z__1.r;
+ if (ajj <= 0. || disnan_(&ajj)) {
+ i__2 = j + j * a_dim1;
+ a[i__2].r = ajj, a[i__2].i = 0.;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ i__2 = j + j * a_dim1;
+ a[i__2].r = ajj, a[i__2].i = 0.;
+
+/* Compute elements J+1:N of row J. */
+
+ if (j < *n) {
+ i__2 = j - 1;
+ zlacgv_(&i__2, &a[j * a_dim1 + 1], &c__1);
+ i__2 = j - 1;
+ i__3 = *n - j;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Transpose", &i__2, &i__3, &z__1, &a[(j + 1) * a_dim1
+ + 1], lda, &a[j * a_dim1 + 1], &c__1, &c_b1, &a[j + (
+ j + 1) * a_dim1], lda);
+ i__2 = j - 1;
+ zlacgv_(&i__2, &a[j * a_dim1 + 1], &c__1);
+ i__2 = *n - j;
+ d__1 = 1. / ajj;
+ zdscal_(&i__2, &d__1, &a[j + (j + 1) * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute L(J,J) and test for non-positive-definiteness. */
+
+ i__2 = j + j * a_dim1;
+ d__1 = a[i__2].r;
+ i__3 = j - 1;
+ zdotc_(&z__2, &i__3, &a[j + a_dim1], lda, &a[j + a_dim1], lda);
+ z__1.r = d__1 - z__2.r, z__1.i = -z__2.i;
+ ajj = z__1.r;
+ if (ajj <= 0. || disnan_(&ajj)) {
+ i__2 = j + j * a_dim1;
+ a[i__2].r = ajj, a[i__2].i = 0.;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ i__2 = j + j * a_dim1;
+ a[i__2].r = ajj, a[i__2].i = 0.;
+
+/* Compute elements J+1:N of column J. */
+
+ if (j < *n) {
+ i__2 = j - 1;
+ zlacgv_(&i__2, &a[j + a_dim1], lda);
+ i__2 = *n - j;
+ i__3 = j - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No transpose", &i__2, &i__3, &z__1, &a[j + 1 + a_dim1]
+, lda, &a[j + a_dim1], lda, &c_b1, &a[j + 1 + j *
+ a_dim1], &c__1);
+ i__2 = j - 1;
+ zlacgv_(&i__2, &a[j + a_dim1], lda);
+ i__2 = *n - j;
+ d__1 = 1. / ajj;
+ zdscal_(&i__2, &d__1, &a[j + 1 + j * a_dim1], &c__1);
+ }
+/* L20: */
+ }
+ }
+ goto L40;
+
+L30:
+ *info = j;
+
+L40:
+ return 0;
+
+/* End of ZPOTF2 */
+
+} /* zpotf2_ */
diff --git a/contrib/libs/clapack/zpotrf.c b/contrib/libs/clapack/zpotrf.c
new file mode 100644
index 0000000000..04edc4084e
--- /dev/null
+++ b/contrib/libs/clapack/zpotrf.c
@@ -0,0 +1,248 @@
+/* zpotrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static doublereal c_b14 = -1.;
+static doublereal c_b15 = 1.;
+
+/* Subroutine */ int zpotrf_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer j, jb, nb;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), zherk_(char *, char *, integer *,
+ integer *, doublereal *, doublecomplex *, integer *, doublereal *,
+ doublecomplex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int ztrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *),
+ zpotf2_(char *, integer *, doublecomplex *, integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPOTRF computes the Cholesky factorization of a complex Hermitian */
+/* positive definite matrix A. */
+
+/* The factorization has the form */
+/* A = U**H * U, if UPLO = 'U', or */
+/* A = L * L**H, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular. */
+
+/* This is the block version of the algorithm, calling Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i is not */
+/* positive definite, and the factorization could not be */
+/* completed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPOTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "ZPOTRF", uplo, n, &c_n1, &c_n1, &c_n1);
+ if (nb <= 1 || nb >= *n) {
+
+/* Use unblocked code. */
+
+ zpotf2_(uplo, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code. */
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Update and factorize the current diagonal block and test */
+/* for non-positive-definiteness. */
+
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - j + 1;
+ jb = min(i__3,i__4);
+ i__3 = j - 1;
+ zherk_("Upper", "Conjugate transpose", &jb, &i__3, &c_b14, &a[
+ j * a_dim1 + 1], lda, &c_b15, &a[j + j * a_dim1], lda);
+ zpotf2_("Upper", &jb, &a[j + j * a_dim1], lda, info);
+ if (*info != 0) {
+ goto L30;
+ }
+ if (j + jb <= *n) {
+
+/* Compute the current block row. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("Conjugate transpose", "No transpose", &jb, &i__3,
+ &i__4, &z__1, &a[j * a_dim1 + 1], lda, &a[(j + jb)
+ * a_dim1 + 1], lda, &c_b1, &a[j + (j + jb) *
+ a_dim1], lda);
+ i__3 = *n - j - jb + 1;
+ ztrsm_("Left", "Upper", "Conjugate transpose", "Non-unit",
+ &jb, &i__3, &c_b1, &a[j + j * a_dim1], lda, &a[j
+ + (j + jb) * a_dim1], lda);
+ }
+/* L10: */
+ }
+
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (j = 1; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Update and factorize the current diagonal block and test */
+/* for non-positive-definiteness. */
+
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - j + 1;
+ jb = min(i__3,i__4);
+ i__3 = j - 1;
+ zherk_("Lower", "No transpose", &jb, &i__3, &c_b14, &a[j +
+ a_dim1], lda, &c_b15, &a[j + j * a_dim1], lda);
+ zpotf2_("Lower", &jb, &a[j + j * a_dim1], lda, info);
+ if (*info != 0) {
+ goto L30;
+ }
+ if (j + jb <= *n) {
+
+/* Compute the current block column. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("No transpose", "Conjugate transpose", &i__3, &jb,
+ &i__4, &z__1, &a[j + jb + a_dim1], lda, &a[j +
+ a_dim1], lda, &c_b1, &a[j + jb + j * a_dim1], lda);
+ i__3 = *n - j - jb + 1;
+ ztrsm_("Right", "Lower", "Conjugate transpose", "Non-unit"
+, &i__3, &jb, &c_b1, &a[j + j * a_dim1], lda, &a[
+ j + jb + j * a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+ }
+ goto L40;
+
+L30:
+ *info = *info + j - 1;
+
+L40:
+ return 0;
+
+/* End of ZPOTRF */
+
+} /* zpotrf_ */
diff --git a/contrib/libs/clapack/zpotri.c b/contrib/libs/clapack/zpotri.c
new file mode 100644
index 0000000000..54978de219
--- /dev/null
+++ b/contrib/libs/clapack/zpotri.c
@@ -0,0 +1,125 @@
+/* zpotri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zpotri_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), zlauum_(
+ char *, integer *, doublecomplex *, integer *, integer *),
+ ztrtri_(char *, char *, integer *, doublecomplex *, integer *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPOTRI computes the inverse of a complex Hermitian positive definite */
+/* matrix A using the Cholesky factorization A = U**H*U or A = L*L**H */
+/* computed by ZPOTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the triangular factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H, as computed by */
+/* ZPOTRF. */
+/* On exit, the upper or lower triangle of the (Hermitian) */
+/* inverse of A, overwriting the input factor U or L. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the (i,i) element of the factor U or L is */
+/* zero, and the inverse could not be computed. */
+
+/* ===================================================================== */
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPOTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Invert the triangular Cholesky factor U or L. */
+
+ ztrtri_(uplo, "Non-unit", n, &a[a_offset], lda, info);
+ if (*info > 0) {
+ return 0;
+ }
+
+/* Form inv(U)*inv(U)' or inv(L)'*inv(L). */
+
+ zlauum_(uplo, n, &a[a_offset], lda, info);
+
+ return 0;
+
+/* End of ZPOTRI */
+
+} /* zpotri_ */
diff --git a/contrib/libs/clapack/zpotrs.c b/contrib/libs/clapack/zpotrs.c
new file mode 100644
index 0000000000..908e25f124
--- /dev/null
+++ b/contrib/libs/clapack/zpotrs.c
@@ -0,0 +1,166 @@
+/* zpotrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+
+/* Subroutine */ int zpotrs_(char *uplo, integer *n, integer *nrhs,
+ doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int ztrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *),
+ xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPOTRS solves a system of linear equations A*X = B with a Hermitian */
+/* positive definite matrix A using the Cholesky factorization */
+/* A = U**H*U or A = L*L**H computed by ZPOTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**H*U or A = L*L**H, as computed by ZPOTRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPOTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Solve A*X = B where A = U'*U. */
+
+/* Solve U'*X = B, overwriting B with X. */
+
+ ztrsm_("Left", "Upper", "Conjugate transpose", "Non-unit", n, nrhs, &
+ c_b1, &a[a_offset], lda, &b[b_offset], ldb);
+
+/* Solve U*X = B, overwriting B with X. */
+
+ ztrsm_("Left", "Upper", "No transpose", "Non-unit", n, nrhs, &c_b1, &
+ a[a_offset], lda, &b[b_offset], ldb);
+ } else {
+
+/* Solve A*X = B where A = L*L'. */
+
+/* Solve L*X = B, overwriting B with X. */
+
+ ztrsm_("Left", "Lower", "No transpose", "Non-unit", n, nrhs, &c_b1, &
+ a[a_offset], lda, &b[b_offset], ldb);
+
+/* Solve L'*X = B, overwriting B with X. */
+
+ ztrsm_("Left", "Lower", "Conjugate transpose", "Non-unit", n, nrhs, &
+ c_b1, &a[a_offset], lda, &b[b_offset], ldb);
+ }
+
+ return 0;
+
+/* End of ZPOTRS */
+
+} /* zpotrs_ */
diff --git a/contrib/libs/clapack/zppcon.c b/contrib/libs/clapack/zppcon.c
new file mode 100644
index 0000000000..026cddc295
--- /dev/null
+++ b/contrib/libs/clapack/zppcon.c
@@ -0,0 +1,223 @@
+/* zppcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zppcon_(char *uplo, integer *n, doublecomplex *ap,
+ doublereal *anorm, doublereal *rcond, doublecomplex *work, doublereal
+ *rwork, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer ix, kase;
+ doublereal scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical upper;
+ extern /* Subroutine */ int zlacn2_(integer *, doublecomplex *,
+ doublecomplex *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal scalel, scaleu;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal ainvnm;
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ extern /* Subroutine */ int zdrscl_(integer *, doublereal *,
+ doublecomplex *, integer *);
+ char normin[1];
+ doublereal smlnum;
+ extern /* Subroutine */ int zlatps_(char *, char *, char *, char *,
+ integer *, doublecomplex *, doublecomplex *, doublereal *,
+ doublereal *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPPCON estimates the reciprocal of the condition number (in the */
+/* 1-norm) of a complex Hermitian positive definite packed matrix using */
+/* the Cholesky factorization A = U**H*U or A = L*L**H computed by */
+/* ZPPTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**H*U or A = L*L**H, packed columnwise in a linear */
+/* array. The j-th column of U or L is stored in the array AP */
+/* as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = U(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = L(i,j) for j<=i<=n. */
+
+/* ANORM (input) DOUBLE PRECISION */
+/* The 1-norm (or infinity-norm) of the Hermitian matrix A. */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --rwork;
+ --work;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*anorm < 0.) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPPCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.;
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ } else if (*anorm == 0.) {
+ return 0;
+ }
+
+ smlnum = dlamch_("Safe minimum");
+
+/* Estimate the 1-norm of the inverse. */
+
+ kase = 0;
+ *(unsigned char *)normin = 'N';
+L10:
+ zlacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (upper) {
+
+/* Multiply by inv(U'). */
+
+ zlatps_("Upper", "Conjugate transpose", "Non-unit", normin, n, &
+ ap[1], &work[1], &scalel, &rwork[1], info);
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by inv(U). */
+
+ zlatps_("Upper", "No transpose", "Non-unit", normin, n, &ap[1], &
+ work[1], &scaleu, &rwork[1], info);
+ } else {
+
+/* Multiply by inv(L). */
+
+ zlatps_("Lower", "No transpose", "Non-unit", normin, n, &ap[1], &
+ work[1], &scalel, &rwork[1], info);
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by inv(L'). */
+
+ zlatps_("Lower", "Conjugate transpose", "Non-unit", normin, n, &
+ ap[1], &work[1], &scaleu, &rwork[1], info);
+ }
+
+/* Multiply by 1/SCALE if doing so will not cause overflow. */
+
+ scale = scalel * scaleu;
+ if (scale != 1.) {
+ ix = izamax_(n, &work[1], &c__1);
+ i__1 = ix;
+ if (scale < ((d__1 = work[i__1].r, abs(d__1)) + (d__2 = d_imag(&
+ work[ix]), abs(d__2))) * smlnum || scale == 0.) {
+ goto L20;
+ }
+ zdrscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / ainvnm / *anorm;
+ }
+
+L20:
+ return 0;
+
+/* End of ZPPCON */
+
+} /* zppcon_ */
diff --git a/contrib/libs/clapack/zppequ.c b/contrib/libs/clapack/zppequ.c
new file mode 100644
index 0000000000..d8dcefa188
--- /dev/null
+++ b/contrib/libs/clapack/zppequ.c
@@ -0,0 +1,210 @@
+/* zppequ.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zppequ_(char *uplo, integer *n, doublecomplex *ap,
+ doublereal *s, doublereal *scond, doublereal *amax, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, jj;
+ doublereal smin;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPPEQU computes row and column scalings intended to equilibrate a */
+/* Hermitian positive definite matrix A in packed storage and reduce */
+/* its condition number (with respect to the two-norm). S contains the */
+/* scale factors, S(i)=1/sqrt(A(i,i)), chosen so that the scaled matrix */
+/* B with elements B(i,j)=S(i)*A(i,j)*S(j) has ones on the diagonal. */
+/* This choice of S puts the condition number of B within a factor N of */
+/* the smallest possible condition number over all possible diagonal */
+/* scalings. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the Hermitian matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* S (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, S contains the scale factors for A. */
+
+/* SCOND (output) DOUBLE PRECISION */
+/* If INFO = 0, S contains the ratio of the smallest S(i) to */
+/* the largest S(i). If SCOND >= 0.1 and AMAX is neither too */
+/* large nor too small, it is not worth scaling by S. */
+
+/* AMAX (output) DOUBLE PRECISION */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element is nonpositive. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --s;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPPEQU", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *scond = 1.;
+ *amax = 0.;
+ return 0;
+ }
+
+/* Initialize SMIN and AMAX. */
+
+ s[1] = ap[1].r;
+ smin = s[1];
+ *amax = s[1];
+
+ if (upper) {
+
+/* UPLO = 'U': Upper triangle of A is stored. */
+/* Find the minimum and maximum diagonal elements. */
+
+ jj = 1;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ jj += i__;
+ i__2 = jj;
+ s[i__] = ap[i__2].r;
+/* Computing MIN */
+ d__1 = smin, d__2 = s[i__];
+ smin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = *amax, d__2 = s[i__];
+ *amax = max(d__1,d__2);
+/* L10: */
+ }
+
+ } else {
+
+/* UPLO = 'L': Lower triangle of A is stored. */
+/* Find the minimum and maximum diagonal elements. */
+
+ jj = 1;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ jj = jj + *n - i__ + 2;
+ i__2 = jj;
+ s[i__] = ap[i__2].r;
+/* Computing MIN */
+ d__1 = smin, d__2 = s[i__];
+ smin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = *amax, d__2 = s[i__];
+ *amax = max(d__1,d__2);
+/* L20: */
+ }
+ }
+
+ if (smin <= 0.) {
+
+/* Find the first non-positive diagonal element and return. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (s[i__] <= 0.) {
+ *info = i__;
+ return 0;
+ }
+/* L30: */
+ }
+ } else {
+
+/* Set the scale factors to the reciprocals */
+/* of the diagonal elements. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ s[i__] = 1. / sqrt(s[i__]);
+/* L40: */
+ }
+
+/* Compute SCOND = min(S(I)) / max(S(I)) */
+
+ *scond = sqrt(smin) / sqrt(*amax);
+ }
+ return 0;
+
+/* End of ZPPEQU */
+
+} /* zppequ_ */
diff --git a/contrib/libs/clapack/zpprfs.c b/contrib/libs/clapack/zpprfs.c
new file mode 100644
index 0000000000..0ad55017f4
--- /dev/null
+++ b/contrib/libs/clapack/zpprfs.c
@@ -0,0 +1,457 @@
+/* zpprfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zpprfs_(char *uplo, integer *n, integer *nrhs,
+ doublecomplex *ap, doublecomplex *afp, doublecomplex *b, integer *ldb,
+ doublecomplex *x, integer *ldx, doublereal *ferr, doublereal *berr,
+ doublecomplex *work, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal s;
+ integer ik, kk;
+ doublereal xk;
+ integer nz;
+ doublereal eps;
+ integer kase;
+ doublereal safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3], count;
+ logical upper;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zhpmv_(char *, integer *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, doublecomplex *, integer *), zaxpy_(
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zlacn2_(integer *, doublecomplex *,
+ doublecomplex *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal lstres;
+ extern /* Subroutine */ int zpptrs_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPPRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is Hermitian positive definite */
+/* and packed, and provides error bounds and backward error estimates */
+/* for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AP (input) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the Hermitian matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* AFP (input) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**H*U or A = L*L**H, as computed by DPPTRF/ZPPTRF, */
+/* packed columnwise in a linear array in the same format as A */
+/* (see AP). */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by ZPPTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ==================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --afp;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ } else if (*ldx < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPPRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X */
+
+ zcopy_(n, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+ z__1.r = -1., z__1.i = -0.;
+ zhpmv_(uplo, n, &z__1, &ap[1], &x[j * x_dim1 + 1], &c__1, &c_b1, &
+ work[1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[i__] = (d__1 = b[i__3].r, abs(d__1)) + (d__2 = d_imag(&b[
+ i__ + j * b_dim1]), abs(d__2));
+/* L30: */
+ }
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ kk = 1;
+ if (upper) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ i__3 = k + j * x_dim1;
+ xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[k + j *
+ x_dim1]), abs(d__2));
+ ik = kk;
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = ik;
+ rwork[i__] += ((d__1 = ap[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&ap[ik]), abs(d__2))) * xk;
+ i__4 = ik;
+ i__5 = i__ + j * x_dim1;
+ s += ((d__1 = ap[i__4].r, abs(d__1)) + (d__2 = d_imag(&ap[
+ ik]), abs(d__2))) * ((d__3 = x[i__5].r, abs(d__3))
+ + (d__4 = d_imag(&x[i__ + j * x_dim1]), abs(d__4)
+ ));
+ ++ik;
+/* L40: */
+ }
+ i__3 = kk + k - 1;
+ rwork[k] = rwork[k] + (d__1 = ap[i__3].r, abs(d__1)) * xk + s;
+ kk += k;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ i__3 = k + j * x_dim1;
+ xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[k + j *
+ x_dim1]), abs(d__2));
+ i__3 = kk;
+ rwork[k] += (d__1 = ap[i__3].r, abs(d__1)) * xk;
+ ik = kk + 1;
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ i__4 = ik;
+ rwork[i__] += ((d__1 = ap[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&ap[ik]), abs(d__2))) * xk;
+ i__4 = ik;
+ i__5 = i__ + j * x_dim1;
+ s += ((d__1 = ap[i__4].r, abs(d__1)) + (d__2 = d_imag(&ap[
+ ik]), abs(d__2))) * ((d__3 = x[i__5].r, abs(d__3))
+ + (d__4 = d_imag(&x[i__ + j * x_dim1]), abs(d__4)
+ ));
+ ++ik;
+/* L60: */
+ }
+ rwork[k] += s;
+ kk += *n - k + 1;
+/* L70: */
+ }
+ }
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__3 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2))) / rwork[i__];
+ s = max(d__3,d__4);
+ } else {
+/* Computing MAX */
+ i__3 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = max(d__3,d__4);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2. <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ zpptrs_(uplo, n, &c__1, &afp[1], &work[1], n, info);
+ zaxpy_(n, &c_b1, &work[1], &c__1, &x[j * x_dim1 + 1], &c__1);
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use ZLACN2 to estimate the infinity-norm of the matrix */
+/* inv(A) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__3 = i__;
+ rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ ;
+ } else {
+ i__3 = i__;
+ rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ zlacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(A'). */
+
+ zpptrs_(uplo, n, &c__1, &afp[1], &work[1], n, info)
+ ;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ z__1.r = rwork[i__4] * work[i__5].r, z__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+/* L110: */
+ }
+ } else if (kase == 2) {
+
+/* Multiply by inv(A)*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ z__1.r = rwork[i__4] * work[i__5].r, z__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+/* L120: */
+ }
+ zpptrs_(uplo, n, &c__1, &afp[1], &work[1], n, info)
+ ;
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * x_dim1;
+ d__3 = lstres, d__4 = (d__1 = x[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&x[i__ + j * x_dim1]), abs(d__2));
+ lstres = max(d__3,d__4);
+/* L130: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of ZPPRFS */
+
+} /* zpprfs_ */
diff --git a/contrib/libs/clapack/zppsv.c b/contrib/libs/clapack/zppsv.c
new file mode 100644
index 0000000000..205047428c
--- /dev/null
+++ b/contrib/libs/clapack/zppsv.c
@@ -0,0 +1,161 @@
+/* zppsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zppsv_(char *uplo, integer *n, integer *nrhs,
+ doublecomplex *ap, doublecomplex *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), zpptrf_(
+ char *, integer *, doublecomplex *, integer *), zpptrs_(
+ char *, integer *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPPSV computes the solution to a complex system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N Hermitian positive definite matrix stored in */
+/* packed format and X and B are N-by-NRHS matrices. */
+
+/* The Cholesky decomposition is used to factor A as */
+/* A = U**H* U, if UPLO = 'U', or */
+/* A = L * L**H, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is a lower triangular */
+/* matrix. The factored form of A is then used to solve the system of */
+/* equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AP (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H, in the same storage */
+/* format as A. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i of A is not */
+/* positive definite, so the factorization could not be */
+/* completed, and the solution has not been computed. */
+
+/* Further Details */
+/* =============== */
+
+/* The packed storage scheme is illustrated by the following example */
+/* when N = 4, UPLO = 'U': */
+
+/* Two-dimensional storage of the Hermitian matrix A: */
+
+/* a11 a12 a13 a14 */
+/* a22 a23 a24 */
+/* a33 a34 (aij = conjg(aji)) */
+/* a44 */
+
+/* Packed storage of the upper triangle of A: */
+
+/* AP = [ a11, a12, a22, a13, a23, a33, a14, a24, a34, a44 ] */
+
+/* ===================================================================== */
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPPSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the Cholesky factorization A = U'*U or A = L*L'. */
+
+ zpptrf_(uplo, n, &ap[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ zpptrs_(uplo, n, nrhs, &ap[1], &b[b_offset], ldb, info);
+
+ }
+ return 0;
+
+/* End of ZPPSV */
+
+} /* zppsv_ */
diff --git a/contrib/libs/clapack/zppsvx.c b/contrib/libs/clapack/zppsvx.c
new file mode 100644
index 0000000000..30e662b644
--- /dev/null
+++ b/contrib/libs/clapack/zppsvx.c
@@ -0,0 +1,465 @@
+/* zppsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zppsvx_(char *fact, char *uplo, integer *n, integer *
+ nrhs, doublecomplex *ap, doublecomplex *afp, char *equed, doublereal *
+ s, doublecomplex *b, integer *ldb, doublecomplex *x, integer *ldx,
+ doublereal *rcond, doublereal *ferr, doublereal *berr, doublecomplex *
+ work, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j;
+ doublereal amax, smin, smax;
+ extern logical lsame_(char *, char *);
+ doublereal scond, anorm;
+ logical equil, rcequ;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ extern doublereal dlamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ integer infequ;
+ extern doublereal zlanhp_(char *, char *, integer *, doublecomplex *,
+ doublereal *);
+ extern /* Subroutine */ int zlaqhp_(char *, integer *, doublecomplex *,
+ doublereal *, doublereal *, doublereal *, char *),
+ zlacpy_(char *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zppcon_(char *, integer *,
+ doublecomplex *, doublereal *, doublereal *, doublecomplex *,
+ doublereal *, integer *);
+ doublereal smlnum;
+ extern /* Subroutine */ int zppequ_(char *, integer *, doublecomplex *,
+ doublereal *, doublereal *, doublereal *, integer *),
+ zpprfs_(char *, integer *, integer *, doublecomplex *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublereal *, doublereal *, doublecomplex *,
+ doublereal *, integer *), zpptrf_(char *, integer *,
+ doublecomplex *, integer *), zpptrs_(char *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPPSVX uses the Cholesky factorization A = U**H*U or A = L*L**H to */
+/* compute the solution to a complex system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N Hermitian positive definite matrix stored in */
+/* packed format and X and B are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'E', real scaling factors are computed to equilibrate */
+/* the system: */
+/* diag(S) * A * diag(S) * inv(diag(S)) * X = diag(S) * B */
+/* Whether or not the system will be equilibrated depends on the */
+/* scaling of the matrix A, but if equilibration is used, A is */
+/* overwritten by diag(S)*A*diag(S) and B by diag(S)*B. */
+
+/* 2. If FACT = 'N' or 'E', the Cholesky decomposition is used to */
+/* factor the matrix A (after equilibration if FACT = 'E') as */
+/* A = U'* U , if UPLO = 'U', or */
+/* A = L * L', if UPLO = 'L', */
+/* where U is an upper triangular matrix, L is a lower triangular */
+/* matrix, and ' indicates conjugate transpose. */
+
+/* 3. If the leading i-by-i principal minor is not positive definite, */
+/* then the routine returns with INFO = i. Otherwise, the factored */
+/* form of A is used to estimate the condition number of the matrix */
+/* A. If the reciprocal of the condition number is less than machine */
+/* precision, INFO = N+1 is returned as a warning, but the routine */
+/* still goes on to solve for X and compute error bounds as */
+/* described below. */
+
+/* 4. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 5. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* 6. If equilibration was used, the matrix X is premultiplied by */
+/* diag(S) so that it solves the original system before */
+/* equilibration. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of the matrix A is */
+/* supplied on entry, and if not, whether the matrix A should be */
+/* equilibrated before it is factored. */
+/* = 'F': On entry, AFP contains the factored form of A. */
+/* If EQUED = 'Y', the matrix A has been equilibrated */
+/* with scaling factors given by S. AP and AFP will not */
+/* be modified. */
+/* = 'N': The matrix A will be copied to AFP and factored. */
+/* = 'E': The matrix A will be equilibrated if necessary, then */
+/* copied to AFP and factored. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AP (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array, except if FACT = 'F' */
+/* and EQUED = 'Y', then A must contain the equilibrated matrix */
+/* diag(S)*A*diag(S). The j-th column of A is stored in the */
+/* array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. A is not modified if */
+/* FACT = 'F' or 'N', or if FACT = 'E' and EQUED = 'N' on exit. */
+
+/* On exit, if FACT = 'E' and EQUED = 'Y', A is overwritten by */
+/* diag(S)*A*diag(S). */
+
+/* AFP (input or output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* If FACT = 'F', then AFP is an input argument and on entry */
+/* contains the triangular factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H, in the same storage */
+/* format as A. If EQUED .ne. 'N', then AFP is the factored */
+/* form of the equilibrated matrix A. */
+
+/* If FACT = 'N', then AFP is an output argument and on exit */
+/* returns the triangular factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H of the original */
+/* matrix A. */
+
+/* If FACT = 'E', then AFP is an output argument and on exit */
+/* returns the triangular factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H of the equilibrated */
+/* matrix A (see the description of AP for the form of the */
+/* equilibrated matrix). */
+
+/* EQUED (input or output) CHARACTER*1 */
+/* Specifies the form of equilibration that was done. */
+/* = 'N': No equilibration (always true if FACT = 'N'). */
+/* = 'Y': Equilibration was done, i.e., A has been replaced by */
+/* diag(S) * A * diag(S). */
+/* EQUED is an input argument if FACT = 'F'; otherwise, it is an */
+/* output argument. */
+
+/* S (input or output) DOUBLE PRECISION array, dimension (N) */
+/* The scale factors for A; not accessed if EQUED = 'N'. S is */
+/* an input argument if FACT = 'F'; otherwise, S is an output */
+/* argument. If FACT = 'F' and EQUED = 'Y', each element of S */
+/* must be positive. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if EQUED = 'N', B is not modified; if EQUED = 'Y', */
+/* B is overwritten by diag(S) * B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X to */
+/* the original system of equations. Note that if EQUED = 'Y', */
+/* A and B are modified on exit, and the solution to the */
+/* equilibrated system is inv(diag(S))*X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A after equilibration (if done). If RCOND is less than the */
+/* machine precision (in particular, if RCOND = 0), the matrix */
+/* is singular to working precision. This condition is */
+/* indicated by a return code of INFO > 0. */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: the leading minor of order i of A is */
+/* not positive definite, so the factorization */
+/* could not be completed, and the solution has not */
+/* been computed. RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* Further Details */
+/* =============== */
+
+/* The packed storage scheme is illustrated by the following example */
+/* when N = 4, UPLO = 'U': */
+
+/* Two-dimensional storage of the Hermitian matrix A: */
+
+/* a11 a12 a13 a14 */
+/* a22 a23 a24 */
+/* a33 a34 (aij = conjg(aji)) */
+/* a44 */
+
+/* Packed storage of the upper triangle of A: */
+
+/* AP = [ a11, a12, a22, a13, a23, a33, a14, a24, a34, a44 ] */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --ap;
+ --afp;
+ --s;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ equil = lsame_(fact, "E");
+ if (nofact || equil) {
+ *(unsigned char *)equed = 'N';
+ rcequ = FALSE_;
+ } else {
+ rcequ = lsame_(equed, "Y");
+ smlnum = dlamch_("Safe minimum");
+ bignum = 1. / smlnum;
+ }
+
+/* Test the input parameters. */
+
+ if (! nofact && ! equil && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! lsame_(uplo, "U") && ! lsame_(uplo,
+ "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (lsame_(fact, "F") && ! (rcequ || lsame_(
+ equed, "N"))) {
+ *info = -7;
+ } else {
+ if (rcequ) {
+ smin = bignum;
+ smax = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ d__1 = smin, d__2 = s[j];
+ smin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = smax, d__2 = s[j];
+ smax = max(d__1,d__2);
+/* L10: */
+ }
+ if (smin <= 0.) {
+ *info = -8;
+ } else if (*n > 0) {
+ scond = max(smin,smlnum) / min(smax,bignum);
+ } else {
+ scond = 1.;
+ }
+ }
+ if (*info == 0) {
+ if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldx < max(1,*n)) {
+ *info = -12;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPPSVX", &i__1);
+ return 0;
+ }
+
+ if (equil) {
+
+/* Compute row and column scalings to equilibrate the matrix A. */
+
+ zppequ_(uplo, n, &ap[1], &s[1], &scond, &amax, &infequ);
+ if (infequ == 0) {
+
+/* Equilibrate the matrix. */
+
+ zlaqhp_(uplo, n, &ap[1], &s[1], &scond, &amax, equed);
+ rcequ = lsame_(equed, "Y");
+ }
+ }
+
+/* Scale the right-hand side. */
+
+ if (rcequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * b_dim1;
+ z__1.r = s[i__4] * b[i__5].r, z__1.i = s[i__4] * b[i__5].i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+
+ if (nofact || equil) {
+
+/* Compute the Cholesky factorization A = U'*U or A = L*L'. */
+
+ i__1 = *n * (*n + 1) / 2;
+ zcopy_(&i__1, &ap[1], &c__1, &afp[1], &c__1);
+ zpptrf_(uplo, n, &afp[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = zlanhp_("I", uplo, n, &ap[1], &rwork[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ zppcon_(uplo, n, &afp[1], &anorm, rcond, &work[1], &rwork[1], info);
+
+/* Compute the solution matrix X. */
+
+ zlacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ zpptrs_(uplo, n, nrhs, &afp[1], &x[x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solution and */
+/* compute error bounds and backward error estimates for it. */
+
+ zpprfs_(uplo, n, nrhs, &ap[1], &afp[1], &b[b_offset], ldb, &x[x_offset],
+ ldx, &ferr[1], &berr[1], &work[1], &rwork[1], info);
+
+/* Transform the solution matrix X to a solution of the original */
+/* system. */
+
+ if (rcequ) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * x_dim1;
+ i__4 = i__;
+ i__5 = i__ + j * x_dim1;
+ z__1.r = s[i__4] * x[i__5].r, z__1.i = s[i__4] * x[i__5].i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+/* L40: */
+ }
+/* L50: */
+ }
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] /= scond;
+/* L60: */
+ }
+ }
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < dlamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of ZPPSVX */
+
+} /* zppsvx_ */
diff --git a/contrib/libs/clapack/zpptrf.c b/contrib/libs/clapack/zpptrf.c
new file mode 100644
index 0000000000..cf28d0b6e6
--- /dev/null
+++ b/contrib/libs/clapack/zpptrf.c
@@ -0,0 +1,233 @@
+/* zpptrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b16 = -1.;
+
+/* Subroutine */ int zpptrf_(char *uplo, integer *n, doublecomplex *ap,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublereal d__1;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer j, jc, jj;
+ doublereal ajj;
+ extern /* Subroutine */ int zhpr_(char *, integer *, doublereal *,
+ doublecomplex *, integer *, doublecomplex *);
+ extern logical lsame_(char *, char *);
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int ztpsv_(char *, char *, char *, integer *,
+ doublecomplex *, doublecomplex *, integer *), xerbla_(char *, integer *), zdscal_(integer *,
+ doublereal *, doublecomplex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPPTRF computes the Cholesky factorization of a complex Hermitian */
+/* positive definite matrix A stored in packed format. */
+
+/* The factorization has the form */
+/* A = U**H * U, if UPLO = 'U', or */
+/* A = L * L**H, if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the Hermitian matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. */
+
+/* On exit, if INFO = 0, the triangular factor U or L from the */
+/* Cholesky factorization A = U**H*U or A = L*L**H, in the same */
+/* storage format as A. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i is not */
+/* positive definite, and the factorization could not be */
+/* completed. */
+
+/* Further Details */
+/* =============== */
+
+/* The packed storage scheme is illustrated by the following example */
+/* when N = 4, UPLO = 'U': */
+
+/* Two-dimensional storage of the Hermitian matrix A: */
+
+/* a11 a12 a13 a14 */
+/* a22 a23 a24 */
+/* a33 a34 (aij = conjg(aji)) */
+/* a44 */
+
+/* Packed storage of the upper triangle of A: */
+
+/* AP = [ a11, a12, a22, a13, a23, a33, a14, a24, a34, a44 ] */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPPTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ jj = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jc = jj + 1;
+ jj += j;
+
+/* Compute elements 1:J-1 of column J. */
+
+ if (j > 1) {
+ i__2 = j - 1;
+ ztpsv_("Upper", "Conjugate transpose", "Non-unit", &i__2, &ap[
+ 1], &ap[jc], &c__1);
+ }
+
+/* Compute U(J,J) and test for non-positive-definiteness. */
+
+ i__2 = jj;
+ d__1 = ap[i__2].r;
+ i__3 = j - 1;
+ zdotc_(&z__2, &i__3, &ap[jc], &c__1, &ap[jc], &c__1);
+ z__1.r = d__1 - z__2.r, z__1.i = -z__2.i;
+ ajj = z__1.r;
+ if (ajj <= 0.) {
+ i__2 = jj;
+ ap[i__2].r = ajj, ap[i__2].i = 0.;
+ goto L30;
+ }
+ i__2 = jj;
+ d__1 = sqrt(ajj);
+ ap[i__2].r = d__1, ap[i__2].i = 0.;
+/* L10: */
+ }
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ jj = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute L(J,J) and test for non-positive-definiteness. */
+
+ i__2 = jj;
+ ajj = ap[i__2].r;
+ if (ajj <= 0.) {
+ i__2 = jj;
+ ap[i__2].r = ajj, ap[i__2].i = 0.;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ i__2 = jj;
+ ap[i__2].r = ajj, ap[i__2].i = 0.;
+
+/* Compute elements J+1:N of column J and update the trailing */
+/* submatrix. */
+
+ if (j < *n) {
+ i__2 = *n - j;
+ d__1 = 1. / ajj;
+ zdscal_(&i__2, &d__1, &ap[jj + 1], &c__1);
+ i__2 = *n - j;
+ zhpr_("Lower", &i__2, &c_b16, &ap[jj + 1], &c__1, &ap[jj + *n
+ - j + 1]);
+ jj = jj + *n - j + 1;
+ }
+/* L20: */
+ }
+ }
+ goto L40;
+
+L30:
+ *info = j;
+
+L40:
+ return 0;
+
+/* End of ZPPTRF */
+
+} /* zpptrf_ */
diff --git a/contrib/libs/clapack/zpptri.c b/contrib/libs/clapack/zpptri.c
new file mode 100644
index 0000000000..b13fc3c35c
--- /dev/null
+++ b/contrib/libs/clapack/zpptri.c
@@ -0,0 +1,179 @@
+/* zpptri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublereal c_b8 = 1.;
+static integer c__1 = 1;
+
+/* Subroutine */ int zpptri_(char *uplo, integer *n, doublecomplex *ap,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer j, jc, jj;
+ doublereal ajj;
+ integer jjn;
+ extern /* Subroutine */ int zhpr_(char *, integer *, doublereal *,
+ doublecomplex *, integer *, doublecomplex *);
+ extern logical lsame_(char *, char *);
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int ztpmv_(char *, char *, char *, integer *,
+ doublecomplex *, doublecomplex *, integer *), xerbla_(char *, integer *), zdscal_(integer *,
+ doublereal *, doublecomplex *, integer *), ztptri_(char *, char *,
+ integer *, doublecomplex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPPTRI computes the inverse of a complex Hermitian positive definite */
+/* matrix A using the Cholesky factorization A = U**H*U or A = L*L**H */
+/* computed by ZPPTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangular factor is stored in AP; */
+/* = 'L': Lower triangular factor is stored in AP. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* On entry, the triangular factor U or L from the Cholesky */
+/* factorization A = U**H*U or A = L*L**H, packed columnwise as */
+/* a linear array. The j-th column of U or L is stored in the */
+/* array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = U(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = L(i,j) for j<=i<=n. */
+
+/* On exit, the upper or lower triangle of the (Hermitian) */
+/* inverse of A, overwriting the input factor U or L. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the (i,i) element of the factor U or L is */
+/* zero, and the inverse could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPPTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Invert the triangular Cholesky factor U or L. */
+
+ ztptri_(uplo, "Non-unit", n, &ap[1], info);
+ if (*info > 0) {
+ return 0;
+ }
+ if (upper) {
+
+/* Compute the product inv(U) * inv(U)'. */
+
+ jj = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jc = jj + 1;
+ jj += j;
+ if (j > 1) {
+ i__2 = j - 1;
+ zhpr_("Upper", &i__2, &c_b8, &ap[jc], &c__1, &ap[1]);
+ }
+ i__2 = jj;
+ ajj = ap[i__2].r;
+ zdscal_(&j, &ajj, &ap[jc], &c__1);
+/* L10: */
+ }
+
+ } else {
+
+/* Compute the product inv(L)' * inv(L). */
+
+ jj = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jjn = jj + *n - j + 1;
+ i__2 = jj;
+ i__3 = *n - j + 1;
+ zdotc_(&z__1, &i__3, &ap[jj], &c__1, &ap[jj], &c__1);
+ d__1 = z__1.r;
+ ap[i__2].r = d__1, ap[i__2].i = 0.;
+ if (j < *n) {
+ i__2 = *n - j;
+ ztpmv_("Lower", "Conjugate transpose", "Non-unit", &i__2, &ap[
+ jjn], &ap[jj + 1], &c__1);
+ }
+ jj = jjn;
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of ZPPTRI */
+
+} /* zpptri_ */
diff --git a/contrib/libs/clapack/zpptrs.c b/contrib/libs/clapack/zpptrs.c
new file mode 100644
index 0000000000..3999cddcc7
--- /dev/null
+++ b/contrib/libs/clapack/zpptrs.c
@@ -0,0 +1,169 @@
+/* zpptrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zpptrs_(char *uplo, integer *n, integer *nrhs,
+ doublecomplex *ap, doublecomplex *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ integer i__;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int ztpsv_(char *, char *, char *, integer *,
+ doublecomplex *, doublecomplex *, integer *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPPTRS solves a system of linear equations A*X = B with a Hermitian */
+/* positive definite matrix A in packed storage using the Cholesky */
+/* factorization A = U**H*U or A = L*L**H computed by ZPPTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AP (input) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* The triangular factor U or L from the Cholesky factorization */
+/* A = U**H*U or A = L*L**H, packed columnwise in a linear */
+/* array. The j-th column of U or L is stored in the array AP */
+/* as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = U(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = L(i,j) for j<=i<=n. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPPTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Solve A*X = B where A = U'*U. */
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Solve U'*X = B, overwriting B with X. */
+
+ ztpsv_("Upper", "Conjugate transpose", "Non-unit", n, &ap[1], &b[
+ i__ * b_dim1 + 1], &c__1);
+
+/* Solve U*X = B, overwriting B with X. */
+
+ ztpsv_("Upper", "No transpose", "Non-unit", n, &ap[1], &b[i__ *
+ b_dim1 + 1], &c__1);
+/* L10: */
+ }
+ } else {
+
+/* Solve A*X = B where A = L*L'. */
+
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Solve L*Y = B, overwriting B with X. */
+
+ ztpsv_("Lower", "No transpose", "Non-unit", n, &ap[1], &b[i__ *
+ b_dim1 + 1], &c__1);
+
+/* Solve L'*X = Y, overwriting B with X. */
+
+ ztpsv_("Lower", "Conjugate transpose", "Non-unit", n, &ap[1], &b[
+ i__ * b_dim1 + 1], &c__1);
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of ZPPTRS */
+
+} /* zpptrs_ */
diff --git a/contrib/libs/clapack/zpstf2.c b/contrib/libs/clapack/zpstf2.c
new file mode 100644
index 0000000000..7aa424283c
--- /dev/null
+++ b/contrib/libs/clapack/zpstf2.c
@@ -0,0 +1,443 @@
+/* zpstf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zpstf2_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, integer *piv, integer *rank, doublereal *tol,
+ doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, maxlocval;
+ doublereal ajj;
+ integer pvt;
+ extern logical lsame_(char *, char *);
+ doublereal dtemp;
+ integer itemp;
+ extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *);
+ doublereal dstop;
+ logical upper;
+ doublecomplex ztemp;
+ extern /* Subroutine */ int zswap_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ extern doublereal dlamch_(char *);
+ extern logical disnan_(doublereal *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_(
+ integer *, doublereal *, doublecomplex *, integer *), zlacgv_(
+ integer *, doublecomplex *, integer *);
+ extern integer dmaxloc_(doublereal *, integer *);
+
+
+/* -- LAPACK PROTOTYPE routine (version 3.2) -- */
+/* Craig Lucas, University of Manchester / NAG Ltd. */
+/* October, 2008 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPSTF2 computes the Cholesky factorization with complete */
+/* pivoting of a complex Hermitian positive semidefinite matrix A. */
+
+/* The factorization has the form */
+/* P' * A * P = U' * U , if UPLO = 'U', */
+/* P' * A * P = L * L', if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular, and */
+/* P is stored as vector PIV. */
+
+/* This algorithm does not attempt to check that A is positive */
+/* semidefinite. This version of the algorithm calls level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n by n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization as above. */
+
+/* PIV (output) INTEGER array, dimension (N) */
+/* PIV is such that the nonzero entries are P( PIV(K), K ) = 1. */
+
+/* RANK (output) INTEGER */
+/* The rank of A given by the number of steps the algorithm */
+/* completed. */
+
+/* TOL (input) DOUBLE PRECISION */
+/* User defined tolerance. If TOL < 0, then N*U*MAX( A( K,K ) ) */
+/* will be used. The algorithm terminates at the (K-1)st step */
+/* if the pivot <= TOL. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* WORK DOUBLE PRECISION array, dimension (2*N) */
+/* Work space. */
+
+/* INFO (output) INTEGER */
+/* < 0: If INFO = -K, the K-th argument had an illegal value, */
+/* = 0: algorithm completed successfully, and */
+/* > 0: the matrix A is either rank deficient with computed rank */
+/* as returned in RANK, or is indefinite. See Section 7 of */
+/* LAPACK Working Note #161 for further information. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters */
+
+ /* Parameter adjustments */
+ --work;
+ --piv;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPSTF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Initialize PIV */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ piv[i__] = i__;
+/* L100: */
+ }
+
+/* Compute stopping value */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ work[i__] = a[i__2].r;
+/* L110: */
+ }
+ pvt = dmaxloc_(&work[1], n);
+ i__1 = pvt + pvt * a_dim1;
+ ajj = a[i__1].r;
+ if (ajj == 0. || disnan_(&ajj)) {
+ *rank = 0;
+ *info = 1;
+ goto L200;
+ }
+
+/* Compute stopping value if not supplied */
+
+ if (*tol < 0.) {
+ dstop = *n * dlamch_("Epsilon") * ajj;
+ } else {
+ dstop = *tol;
+ }
+
+/* Set first half of WORK to zero, holds dot products */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L120: */
+ }
+
+ if (upper) {
+
+/* Compute the Cholesky factorization P' * A * P = U' * U */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Find pivot, test for exit, else swap rows and columns */
+/* Update dot products, compute possible pivots which are */
+/* stored in the second half of WORK */
+
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+
+ if (j > 1) {
+ d_cnjg(&z__2, &a[j - 1 + i__ * a_dim1]);
+ i__3 = j - 1 + i__ * a_dim1;
+ z__1.r = z__2.r * a[i__3].r - z__2.i * a[i__3].i, z__1.i =
+ z__2.r * a[i__3].i + z__2.i * a[i__3].r;
+ work[i__] += z__1.r;
+ }
+ i__3 = i__ + i__ * a_dim1;
+ work[*n + i__] = a[i__3].r - work[i__];
+
+/* L130: */
+ }
+
+ if (j > 1) {
+ maxlocval = (*n << 1) - (*n + j) + 1;
+ itemp = dmaxloc_(&work[*n + j], &maxlocval);
+ pvt = itemp + j - 1;
+ ajj = work[*n + pvt];
+ if (ajj <= dstop || disnan_(&ajj)) {
+ i__2 = j + j * a_dim1;
+ a[i__2].r = ajj, a[i__2].i = 0.;
+ goto L190;
+ }
+ }
+
+ if (j != pvt) {
+
+/* Pivot OK, so can now swap pivot rows and columns */
+
+ i__2 = pvt + pvt * a_dim1;
+ i__3 = j + j * a_dim1;
+ a[i__2].r = a[i__3].r, a[i__2].i = a[i__3].i;
+ i__2 = j - 1;
+ zswap_(&i__2, &a[j * a_dim1 + 1], &c__1, &a[pvt * a_dim1 + 1],
+ &c__1);
+ if (pvt < *n) {
+ i__2 = *n - pvt;
+ zswap_(&i__2, &a[j + (pvt + 1) * a_dim1], lda, &a[pvt + (
+ pvt + 1) * a_dim1], lda);
+ }
+ i__2 = pvt - 1;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ d_cnjg(&z__1, &a[j + i__ * a_dim1]);
+ ztemp.r = z__1.r, ztemp.i = z__1.i;
+ i__3 = j + i__ * a_dim1;
+ d_cnjg(&z__1, &a[i__ + pvt * a_dim1]);
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ i__3 = i__ + pvt * a_dim1;
+ a[i__3].r = ztemp.r, a[i__3].i = ztemp.i;
+/* L140: */
+ }
+ i__2 = j + pvt * a_dim1;
+ d_cnjg(&z__1, &a[j + pvt * a_dim1]);
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+
+/* Swap dot products and PIV */
+
+ dtemp = work[j];
+ work[j] = work[pvt];
+ work[pvt] = dtemp;
+ itemp = piv[pvt];
+ piv[pvt] = piv[j];
+ piv[j] = itemp;
+ }
+
+ ajj = sqrt(ajj);
+ i__2 = j + j * a_dim1;
+ a[i__2].r = ajj, a[i__2].i = 0.;
+
+/* Compute elements J+1:N of row J */
+
+ if (j < *n) {
+ i__2 = j - 1;
+ zlacgv_(&i__2, &a[j * a_dim1 + 1], &c__1);
+ i__2 = j - 1;
+ i__3 = *n - j;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Trans", &i__2, &i__3, &z__1, &a[(j + 1) * a_dim1 + 1],
+ lda, &a[j * a_dim1 + 1], &c__1, &c_b1, &a[j + (j + 1)
+ * a_dim1], lda);
+ i__2 = j - 1;
+ zlacgv_(&i__2, &a[j * a_dim1 + 1], &c__1);
+ i__2 = *n - j;
+ d__1 = 1. / ajj;
+ zdscal_(&i__2, &d__1, &a[j + (j + 1) * a_dim1], lda);
+ }
+
+/* L150: */
+ }
+
+ } else {
+
+/* Compute the Cholesky factorization P' * A * P = L * L' */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Find pivot, test for exit, else swap rows and columns */
+/* Update dot products, compute possible pivots which are */
+/* stored in the second half of WORK */
+
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+
+ if (j > 1) {
+ d_cnjg(&z__2, &a[i__ + (j - 1) * a_dim1]);
+ i__3 = i__ + (j - 1) * a_dim1;
+ z__1.r = z__2.r * a[i__3].r - z__2.i * a[i__3].i, z__1.i =
+ z__2.r * a[i__3].i + z__2.i * a[i__3].r;
+ work[i__] += z__1.r;
+ }
+ i__3 = i__ + i__ * a_dim1;
+ work[*n + i__] = a[i__3].r - work[i__];
+
+/* L160: */
+ }
+
+ if (j > 1) {
+ maxlocval = (*n << 1) - (*n + j) + 1;
+ itemp = dmaxloc_(&work[*n + j], &maxlocval);
+ pvt = itemp + j - 1;
+ ajj = work[*n + pvt];
+ if (ajj <= dstop || disnan_(&ajj)) {
+ i__2 = j + j * a_dim1;
+ a[i__2].r = ajj, a[i__2].i = 0.;
+ goto L190;
+ }
+ }
+
+ if (j != pvt) {
+
+/* Pivot OK, so can now swap pivot rows and columns */
+
+ i__2 = pvt + pvt * a_dim1;
+ i__3 = j + j * a_dim1;
+ a[i__2].r = a[i__3].r, a[i__2].i = a[i__3].i;
+ i__2 = j - 1;
+ zswap_(&i__2, &a[j + a_dim1], lda, &a[pvt + a_dim1], lda);
+ if (pvt < *n) {
+ i__2 = *n - pvt;
+ zswap_(&i__2, &a[pvt + 1 + j * a_dim1], &c__1, &a[pvt + 1
+ + pvt * a_dim1], &c__1);
+ }
+ i__2 = pvt - 1;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ d_cnjg(&z__1, &a[i__ + j * a_dim1]);
+ ztemp.r = z__1.r, ztemp.i = z__1.i;
+ i__3 = i__ + j * a_dim1;
+ d_cnjg(&z__1, &a[pvt + i__ * a_dim1]);
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ i__3 = pvt + i__ * a_dim1;
+ a[i__3].r = ztemp.r, a[i__3].i = ztemp.i;
+/* L170: */
+ }
+ i__2 = pvt + j * a_dim1;
+ d_cnjg(&z__1, &a[pvt + j * a_dim1]);
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+
+/* Swap dot products and PIV */
+
+ dtemp = work[j];
+ work[j] = work[pvt];
+ work[pvt] = dtemp;
+ itemp = piv[pvt];
+ piv[pvt] = piv[j];
+ piv[j] = itemp;
+ }
+
+ ajj = sqrt(ajj);
+ i__2 = j + j * a_dim1;
+ a[i__2].r = ajj, a[i__2].i = 0.;
+
+/* Compute elements J+1:N of column J */
+
+ if (j < *n) {
+ i__2 = j - 1;
+ zlacgv_(&i__2, &a[j + a_dim1], lda);
+ i__2 = *n - j;
+ i__3 = j - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No Trans", &i__2, &i__3, &z__1, &a[j + 1 + a_dim1],
+ lda, &a[j + a_dim1], lda, &c_b1, &a[j + 1 + j *
+ a_dim1], &c__1);
+ i__2 = j - 1;
+ zlacgv_(&i__2, &a[j + a_dim1], lda);
+ i__2 = *n - j;
+ d__1 = 1. / ajj;
+ zdscal_(&i__2, &d__1, &a[j + 1 + j * a_dim1], &c__1);
+ }
+
+/* L180: */
+ }
+
+ }
+
+/* Ran to completion, A has full rank */
+
+ *rank = *n;
+
+ goto L200;
+L190:
+
+/* Rank is number of steps completed. Set INFO = 1 to signal */
+/* that the factorization cannot be used to solve a system. */
+
+ *rank = j - 1;
+ *info = 1;
+
+L200:
+ return 0;
+
+/* End of ZPSTF2 */
+
+} /* zpstf2_ */
diff --git a/contrib/libs/clapack/zpstrf.c b/contrib/libs/clapack/zpstrf.c
new file mode 100644
index 0000000000..ed1c778e5c
--- /dev/null
+++ b/contrib/libs/clapack/zpstrf.c
@@ -0,0 +1,529 @@
+/* zpstrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static doublereal c_b29 = -1.;
+static doublereal c_b30 = 1.;
+
+/* Subroutine */ int zpstrf_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, integer *piv, integer *rank, doublereal *tol,
+ doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, maxlocval, jb, nb;
+ doublereal ajj;
+ integer pvt;
+ extern logical lsame_(char *, char *);
+ doublereal dtemp;
+ integer itemp;
+ extern /* Subroutine */ int zherk_(char *, char *, integer *, integer *,
+ doublereal *, doublecomplex *, integer *, doublereal *,
+ doublecomplex *, integer *), zgemv_(char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ doublereal dstop;
+ logical upper;
+ doublecomplex ztemp;
+ extern /* Subroutine */ int zswap_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int zpstf2_(char *, integer *, doublecomplex *,
+ integer *, integer *, integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical disnan_(doublereal *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zdscal_(integer *, doublereal *,
+ doublecomplex *, integer *), zlacgv_(integer *, doublecomplex *,
+ integer *);
+ extern integer dmaxloc_(doublereal *, integer *);
+
+
+/* -- LAPACK routine (version 3.2.1) -- */
+
+/* -- Contributed by Craig Lucas, University of Manchester / NAG Ltd. -- */
+/* -- April 2009 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPSTRF computes the Cholesky factorization with complete */
+/* pivoting of a complex Hermitian positive semidefinite matrix A. */
+
+/* The factorization has the form */
+/* P' * A * P = U' * U , if UPLO = 'U', */
+/* P' * A * P = L * L', if UPLO = 'L', */
+/* where U is an upper triangular matrix and L is lower triangular, and */
+/* P is stored as vector PIV. */
+
+/* This algorithm does not attempt to check that A is positive */
+/* semidefinite. This version of the algorithm calls level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n by n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the factor U or L from the Cholesky */
+/* factorization as above. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* PIV (output) INTEGER array, dimension (N) */
+/* PIV is such that the nonzero entries are P( PIV(K), K ) = 1. */
+
+/* RANK (output) INTEGER */
+/* The rank of A given by the number of steps the algorithm */
+/* completed. */
+
+/* TOL (input) DOUBLE PRECISION */
+/* User defined tolerance. If TOL < 0, then N*U*MAX( A(K,K) ) */
+/* will be used. The algorithm terminates at the (K-1)st step */
+/* if the pivot <= TOL. */
+
+/* WORK DOUBLE PRECISION array, dimension (2*N) */
+/* Work space. */
+
+/* INFO (output) INTEGER */
+/* < 0: If INFO = -K, the K-th argument had an illegal value, */
+/* = 0: algorithm completed successfully, and */
+/* > 0: the matrix A is either rank deficient with computed rank */
+/* as returned in RANK, or is indefinite. See Section 7 of */
+/* LAPACK Working Note #161 for further information. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --work;
+ --piv;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPSTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get block size */
+
+ nb = ilaenv_(&c__1, "ZPOTRF", uplo, n, &c_n1, &c_n1, &c_n1);
+ if (nb <= 1 || nb >= *n) {
+
+/* Use unblocked code */
+
+ zpstf2_(uplo, n, &a[a_dim1 + 1], lda, &piv[1], rank, tol, &work[1],
+ info);
+ goto L230;
+
+ } else {
+
+/* Initialize PIV */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ piv[i__] = i__;
+/* L100: */
+ }
+
+/* Compute stopping value */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ work[i__] = a[i__2].r;
+/* L110: */
+ }
+ pvt = dmaxloc_(&work[1], n);
+ i__1 = pvt + pvt * a_dim1;
+ ajj = a[i__1].r;
+ if (ajj == 0. || disnan_(&ajj)) {
+ *rank = 0;
+ *info = 1;
+ goto L230;
+ }
+
+/* Compute stopping value if not supplied */
+
+ if (*tol < 0.) {
+ dstop = *n * dlamch_("Epsilon") * ajj;
+ } else {
+ dstop = *tol;
+ }
+
+
+ if (upper) {
+
+/* Compute the Cholesky factorization P' * A * P = U' * U */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (k = 1; i__2 < 0 ? k >= i__1 : k <= i__1; k += i__2) {
+
+/* Account for last block not being NB wide */
+
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - k + 1;
+ jb = min(i__3,i__4);
+
+/* Set relevant part of first half of WORK to zero, */
+/* holds dot products */
+
+ i__3 = *n;
+ for (i__ = k; i__ <= i__3; ++i__) {
+ work[i__] = 0.;
+/* L120: */
+ }
+
+ i__3 = k + jb - 1;
+ for (j = k; j <= i__3; ++j) {
+
+/* Find pivot, test for exit, else swap rows and columns */
+/* Update dot products, compute possible pivots which are */
+/* stored in the second half of WORK */
+
+ i__4 = *n;
+ for (i__ = j; i__ <= i__4; ++i__) {
+
+ if (j > k) {
+ d_cnjg(&z__2, &a[j - 1 + i__ * a_dim1]);
+ i__5 = j - 1 + i__ * a_dim1;
+ z__1.r = z__2.r * a[i__5].r - z__2.i * a[i__5].i,
+ z__1.i = z__2.r * a[i__5].i + z__2.i * a[
+ i__5].r;
+ work[i__] += z__1.r;
+ }
+ i__5 = i__ + i__ * a_dim1;
+ work[*n + i__] = a[i__5].r - work[i__];
+
+/* L130: */
+ }
+
+ if (j > 1) {
+ maxlocval = (*n << 1) - (*n + j) + 1;
+ itemp = dmaxloc_(&work[*n + j], &maxlocval);
+ pvt = itemp + j - 1;
+ ajj = work[*n + pvt];
+ if (ajj <= dstop || disnan_(&ajj)) {
+ i__4 = j + j * a_dim1;
+ a[i__4].r = ajj, a[i__4].i = 0.;
+ goto L220;
+ }
+ }
+
+ if (j != pvt) {
+
+/* Pivot OK, so can now swap pivot rows and columns */
+
+ i__4 = pvt + pvt * a_dim1;
+ i__5 = j + j * a_dim1;
+ a[i__4].r = a[i__5].r, a[i__4].i = a[i__5].i;
+ i__4 = j - 1;
+ zswap_(&i__4, &a[j * a_dim1 + 1], &c__1, &a[pvt *
+ a_dim1 + 1], &c__1);
+ if (pvt < *n) {
+ i__4 = *n - pvt;
+ zswap_(&i__4, &a[j + (pvt + 1) * a_dim1], lda, &a[
+ pvt + (pvt + 1) * a_dim1], lda);
+ }
+ i__4 = pvt - 1;
+ for (i__ = j + 1; i__ <= i__4; ++i__) {
+ d_cnjg(&z__1, &a[j + i__ * a_dim1]);
+ ztemp.r = z__1.r, ztemp.i = z__1.i;
+ i__5 = j + i__ * a_dim1;
+ d_cnjg(&z__1, &a[i__ + pvt * a_dim1]);
+ a[i__5].r = z__1.r, a[i__5].i = z__1.i;
+ i__5 = i__ + pvt * a_dim1;
+ a[i__5].r = ztemp.r, a[i__5].i = ztemp.i;
+/* L140: */
+ }
+ i__4 = j + pvt * a_dim1;
+ d_cnjg(&z__1, &a[j + pvt * a_dim1]);
+ a[i__4].r = z__1.r, a[i__4].i = z__1.i;
+
+/* Swap dot products and PIV */
+
+ dtemp = work[j];
+ work[j] = work[pvt];
+ work[pvt] = dtemp;
+ itemp = piv[pvt];
+ piv[pvt] = piv[j];
+ piv[j] = itemp;
+ }
+
+ ajj = sqrt(ajj);
+ i__4 = j + j * a_dim1;
+ a[i__4].r = ajj, a[i__4].i = 0.;
+
+/* Compute elements J+1:N of row J. */
+
+ if (j < *n) {
+ i__4 = j - 1;
+ zlacgv_(&i__4, &a[j * a_dim1 + 1], &c__1);
+ i__4 = j - k;
+ i__5 = *n - j;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Trans", &i__4, &i__5, &z__1, &a[k + (j + 1) *
+ a_dim1], lda, &a[k + j * a_dim1], &c__1, &
+ c_b1, &a[j + (j + 1) * a_dim1], lda);
+ i__4 = j - 1;
+ zlacgv_(&i__4, &a[j * a_dim1 + 1], &c__1);
+ i__4 = *n - j;
+ d__1 = 1. / ajj;
+ zdscal_(&i__4, &d__1, &a[j + (j + 1) * a_dim1], lda);
+ }
+
+/* L150: */
+ }
+
+/* Update trailing matrix, J already incremented */
+
+ if (k + jb <= *n) {
+ i__3 = *n - j + 1;
+ zherk_("Upper", "Conj Trans", &i__3, &jb, &c_b29, &a[k +
+ j * a_dim1], lda, &c_b30, &a[j + j * a_dim1], lda);
+ }
+
+/* L160: */
+ }
+
+ } else {
+
+/* Compute the Cholesky factorization P' * A * P = L * L' */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (k = 1; i__1 < 0 ? k >= i__2 : k <= i__2; k += i__1) {
+
+/* Account for last block not being NB wide */
+
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - k + 1;
+ jb = min(i__3,i__4);
+
+/* Set relevant part of first half of WORK to zero, */
+/* holds dot products */
+
+ i__3 = *n;
+ for (i__ = k; i__ <= i__3; ++i__) {
+ work[i__] = 0.;
+/* L170: */
+ }
+
+ i__3 = k + jb - 1;
+ for (j = k; j <= i__3; ++j) {
+
+/* Find pivot, test for exit, else swap rows and columns */
+/* Update dot products, compute possible pivots which are */
+/* stored in the second half of WORK */
+
+ i__4 = *n;
+ for (i__ = j; i__ <= i__4; ++i__) {
+
+ if (j > k) {
+ d_cnjg(&z__2, &a[i__ + (j - 1) * a_dim1]);
+ i__5 = i__ + (j - 1) * a_dim1;
+ z__1.r = z__2.r * a[i__5].r - z__2.i * a[i__5].i,
+ z__1.i = z__2.r * a[i__5].i + z__2.i * a[
+ i__5].r;
+ work[i__] += z__1.r;
+ }
+ i__5 = i__ + i__ * a_dim1;
+ work[*n + i__] = a[i__5].r - work[i__];
+
+/* L180: */
+ }
+
+ if (j > 1) {
+ maxlocval = (*n << 1) - (*n + j) + 1;
+ itemp = dmaxloc_(&work[*n + j], &maxlocval);
+ pvt = itemp + j - 1;
+ ajj = work[*n + pvt];
+ if (ajj <= dstop || disnan_(&ajj)) {
+ i__4 = j + j * a_dim1;
+ a[i__4].r = ajj, a[i__4].i = 0.;
+ goto L220;
+ }
+ }
+
+ if (j != pvt) {
+
+/* Pivot OK, so can now swap pivot rows and columns */
+
+ i__4 = pvt + pvt * a_dim1;
+ i__5 = j + j * a_dim1;
+ a[i__4].r = a[i__5].r, a[i__4].i = a[i__5].i;
+ i__4 = j - 1;
+ zswap_(&i__4, &a[j + a_dim1], lda, &a[pvt + a_dim1],
+ lda);
+ if (pvt < *n) {
+ i__4 = *n - pvt;
+ zswap_(&i__4, &a[pvt + 1 + j * a_dim1], &c__1, &a[
+ pvt + 1 + pvt * a_dim1], &c__1);
+ }
+ i__4 = pvt - 1;
+ for (i__ = j + 1; i__ <= i__4; ++i__) {
+ d_cnjg(&z__1, &a[i__ + j * a_dim1]);
+ ztemp.r = z__1.r, ztemp.i = z__1.i;
+ i__5 = i__ + j * a_dim1;
+ d_cnjg(&z__1, &a[pvt + i__ * a_dim1]);
+ a[i__5].r = z__1.r, a[i__5].i = z__1.i;
+ i__5 = pvt + i__ * a_dim1;
+ a[i__5].r = ztemp.r, a[i__5].i = ztemp.i;
+/* L190: */
+ }
+ i__4 = pvt + j * a_dim1;
+ d_cnjg(&z__1, &a[pvt + j * a_dim1]);
+ a[i__4].r = z__1.r, a[i__4].i = z__1.i;
+
+
+/* Swap dot products and PIV */
+
+ dtemp = work[j];
+ work[j] = work[pvt];
+ work[pvt] = dtemp;
+ itemp = piv[pvt];
+ piv[pvt] = piv[j];
+ piv[j] = itemp;
+ }
+
+ ajj = sqrt(ajj);
+ i__4 = j + j * a_dim1;
+ a[i__4].r = ajj, a[i__4].i = 0.;
+
+/* Compute elements J+1:N of column J. */
+
+ if (j < *n) {
+ i__4 = j - 1;
+ zlacgv_(&i__4, &a[j + a_dim1], lda);
+ i__4 = *n - j;
+ i__5 = j - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("No Trans", &i__4, &i__5, &z__1, &a[j + 1 + k *
+ a_dim1], lda, &a[j + k * a_dim1], lda, &c_b1,
+ &a[j + 1 + j * a_dim1], &c__1);
+ i__4 = j - 1;
+ zlacgv_(&i__4, &a[j + a_dim1], lda);
+ i__4 = *n - j;
+ d__1 = 1. / ajj;
+ zdscal_(&i__4, &d__1, &a[j + 1 + j * a_dim1], &c__1);
+ }
+
+/* L200: */
+ }
+
+/* Update trailing matrix, J already incremented */
+
+ if (k + jb <= *n) {
+ i__3 = *n - j + 1;
+ zherk_("Lower", "No Trans", &i__3, &jb, &c_b29, &a[j + k *
+ a_dim1], lda, &c_b30, &a[j + j * a_dim1], lda);
+ }
+
+/* L210: */
+ }
+
+ }
+ }
+
+/* Ran to completion, A has full rank */
+
+ *rank = *n;
+
+ goto L230;
+L220:
+
+/* Rank is the number of steps completed. Set INFO = 1 to signal */
+/* that the factorization cannot be used to solve a system. */
+
+ *rank = j - 1;
+ *info = 1;
+
+L230:
+ return 0;
+
+/* End of ZPSTRF */
+
+} /* zpstrf_ */
diff --git a/contrib/libs/clapack/zptcon.c b/contrib/libs/clapack/zptcon.c
new file mode 100644
index 0000000000..3e8971c18c
--- /dev/null
+++ b/contrib/libs/clapack/zptcon.c
@@ -0,0 +1,187 @@
+/* zptcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zptcon_(integer *n, doublereal *d__, doublecomplex *e,
+ doublereal *anorm, doublereal *rcond, doublereal *rwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *);
+
+ /* Local variables */
+ integer i__, ix;
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal ainvnm;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPTCON computes the reciprocal of the condition number (in the */
+/* 1-norm) of a complex Hermitian positive definite tridiagonal matrix */
+/* using the factorization A = L*D*L**H or A = U**H*D*U computed by */
+/* ZPTTRF. */
+
+/* Norm(inv(A)) is computed by a direct method, and the reciprocal of */
+/* the condition number is computed as */
+/* RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The n diagonal elements of the diagonal matrix D from the */
+/* factorization of A, as computed by ZPTTRF. */
+
+/* E (input) COMPLEX*16 array, dimension (N-1) */
+/* The (n-1) off-diagonal elements of the unit bidiagonal factor */
+/* U or L from the factorization of A, as computed by ZPTTRF. */
+
+/* ANORM (input) DOUBLE PRECISION */
+/* The 1-norm of the original matrix A. */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is the */
+/* 1-norm of inv(A) computed in this routine. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The method used is described in Nicholas J. Higham, "Efficient */
+/* Algorithms for Computing the Condition Number of a Tridiagonal */
+/* Matrix", SIAM J. Sci. Stat. Comput., Vol. 7, No. 1, January 1986. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments. */
+
+ /* Parameter adjustments */
+ --rwork;
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*anorm < 0.) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPTCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.;
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ } else if (*anorm == 0.) {
+ return 0;
+ }
+
+/* Check that D(1:N) is positive. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] <= 0.) {
+ return 0;
+ }
+/* L10: */
+ }
+
+/* Solve M(A) * x = e, where M(A) = (m(i,j)) is given by */
+
+/* m(i,j) = abs(A(i,j)), i = j, */
+/* m(i,j) = -abs(A(i,j)), i .ne. j, */
+
+/* and e = [ 1, 1, ..., 1 ]'. Note M(A) = M(L)*D*M(L)'. */
+
+/* Solve M(L) * x = e. */
+
+ rwork[1] = 1.;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ rwork[i__] = rwork[i__ - 1] * z_abs(&e[i__ - 1]) + 1.;
+/* L20: */
+ }
+
+/* Solve D * M(L)' * x = b. */
+
+ rwork[*n] /= d__[*n];
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ rwork[i__] = rwork[i__] / d__[i__] + rwork[i__ + 1] * z_abs(&e[i__]);
+/* L30: */
+ }
+
+/* Compute AINVNM = max(x(i)), 1<=i<=n. */
+
+ ix = idamax_(n, &rwork[1], &c__1);
+ ainvnm = (d__1 = rwork[ix], abs(d__1));
+
+/* Compute the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / ainvnm / *anorm;
+ }
+
+ return 0;
+
+/* End of ZPTCON */
+
+} /* zptcon_ */
diff --git a/contrib/libs/clapack/zpteqr.c b/contrib/libs/clapack/zpteqr.c
new file mode 100644
index 0000000000..95584cf95e
--- /dev/null
+++ b/contrib/libs/clapack/zpteqr.c
@@ -0,0 +1,243 @@
+/* zpteqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__0 = 0;
+static integer c__1 = 1;
+
+/* Subroutine */ int zpteqr_(char *compz, integer *n, doublereal *d__,
+ doublereal *e, doublecomplex *z__, integer *ldz, doublereal *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ doublecomplex c__[1] /* was [1][1] */;
+ integer i__;
+ doublecomplex vt[1] /* was [1][1] */;
+ integer nru;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ integer icompz;
+ extern /* Subroutine */ int zlaset_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *), dpttrf_(integer *, doublereal *, doublereal *, integer *)
+ , zbdsqr_(char *, integer *, integer *, integer *, integer *,
+ doublereal *, doublereal *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublereal *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPTEQR computes all eigenvalues and, optionally, eigenvectors of a */
+/* symmetric positive definite tridiagonal matrix by first factoring the */
+/* matrix using DPTTRF and then calling ZBDSQR to compute the singular */
+/* values of the bidiagonal factor. */
+
+/* This routine computes the eigenvalues of the positive definite */
+/* tridiagonal matrix to high relative accuracy. This means that if the */
+/* eigenvalues range over many orders of magnitude in size, then the */
+/* small eigenvalues and corresponding eigenvectors will be computed */
+/* more accurately than, for example, with the standard QR method. */
+
+/* The eigenvectors of a full or band positive definite Hermitian matrix */
+/* can also be found if ZHETRD, ZHPTRD, or ZHBTRD has been used to */
+/* reduce this matrix to tridiagonal form. (The reduction to */
+/* tridiagonal form, however, may preclude the possibility of obtaining */
+/* high relative accuracy in the small eigenvalues of the original */
+/* matrix, if these eigenvalues range over many orders of magnitude.) */
+
+/* Arguments */
+/* ========= */
+
+/* COMPZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only. */
+/* = 'V': Compute eigenvectors of original Hermitian */
+/* matrix also. Array Z contains the unitary matrix */
+/* used to reduce the original matrix to tridiagonal */
+/* form. */
+/* = 'I': Compute eigenvectors of tridiagonal matrix also. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the n diagonal elements of the tridiagonal matrix. */
+/* On normal exit, D contains the eigenvalues, in descending */
+/* order. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (N-1) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix. */
+/* On exit, E has been destroyed. */
+
+/* Z (input/output) COMPLEX*16 array, dimension (LDZ, N) */
+/* On entry, if COMPZ = 'V', the unitary matrix used in the */
+/* reduction to tridiagonal form. */
+/* On exit, if COMPZ = 'V', the orthonormal eigenvectors of the */
+/* original Hermitian matrix; */
+/* if COMPZ = 'I', the orthonormal eigenvectors of the */
+/* tridiagonal matrix. */
+/* If INFO > 0 on exit, Z contains the eigenvectors associated */
+/* with only the stored eigenvalues. */
+/* If COMPZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* COMPZ = 'V' or 'I', LDZ >= max(1,N). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (4*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: if INFO = i, and i is: */
+/* <= N the Cholesky factorization of the matrix could */
+/* not be performed because the i-th principal minor */
+/* was not positive definite. */
+/* > N the SVD algorithm failed to converge; */
+/* if INFO = N+i, i off-diagonal elements of the */
+/* bidiagonal factor did not converge to zero. */
+
+/* ==================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (lsame_(compz, "N")) {
+ icompz = 0;
+ } else if (lsame_(compz, "V")) {
+ icompz = 1;
+ } else if (lsame_(compz, "I")) {
+ icompz = 2;
+ } else {
+ icompz = -1;
+ }
+ if (icompz < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldz < 1 || icompz > 0 && *ldz < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPTEQR", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (icompz > 0) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1., z__[i__1].i = 0.;
+ }
+ return 0;
+ }
+ if (icompz == 2) {
+ zlaset_("Full", n, n, &c_b1, &c_b2, &z__[z_offset], ldz);
+ }
+
+/* Call DPTTRF to factor the matrix. */
+
+ dpttrf_(n, &d__[1], &e[1], info);
+ if (*info != 0) {
+ return 0;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = sqrt(d__[i__]);
+/* L10: */
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ e[i__] *= d__[i__];
+/* L20: */
+ }
+
+/* Call ZBDSQR to compute the singular values/vectors of the */
+/* bidiagonal factor. */
+
+ if (icompz > 0) {
+ nru = *n;
+ } else {
+ nru = 0;
+ }
+ zbdsqr_("Lower", n, &c__0, &nru, &c__0, &d__[1], &e[1], vt, &c__1, &z__[
+ z_offset], ldz, c__, &c__1, &work[1], info);
+
+/* Square the singular values. */
+
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] *= d__[i__];
+/* L30: */
+ }
+ } else {
+ *info = *n + *info;
+ }
+
+ return 0;
+
+/* End of ZPTEQR */
+
+} /* zpteqr_ */
diff --git a/contrib/libs/clapack/zptrfs.c b/contrib/libs/clapack/zptrfs.c
new file mode 100644
index 0000000000..b57ae48d65
--- /dev/null
+++ b/contrib/libs/clapack/zptrfs.c
@@ -0,0 +1,576 @@
+/* zptrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublecomplex c_b16 = {1.,0.};
+
+/* Subroutine */ int zptrfs_(char *uplo, integer *n, integer *nrhs,
+ doublereal *d__, doublecomplex *e, doublereal *df, doublecomplex *ef,
+ doublecomplex *b, integer *ldb, doublecomplex *x, integer *ldx,
+ doublereal *ferr, doublereal *berr, doublecomplex *work, doublereal *
+ rwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6;
+ doublereal d__1, d__2, d__3, d__4, d__5, d__6, d__7, d__8, d__9, d__10,
+ d__11, d__12;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+ double z_abs(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal s;
+ doublecomplex bi, cx, dx, ex;
+ integer ix, nz;
+ doublereal eps, safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer count;
+ logical upper;
+ extern /* Subroutine */ int zaxpy_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ extern doublereal dlamch_(char *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal lstres;
+ extern /* Subroutine */ int zpttrs_(char *, integer *, integer *,
+ doublereal *, doublecomplex *, doublecomplex *, integer *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPTRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is Hermitian positive definite */
+/* and tridiagonal, and provides error bounds and backward error */
+/* estimates for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the superdiagonal or the subdiagonal of the */
+/* tridiagonal matrix A is stored and the form of the */
+/* factorization: */
+/* = 'U': E is the superdiagonal of A, and A = U**H*D*U; */
+/* = 'L': E is the subdiagonal of A, and A = L*D*L**H. */
+/* (The two forms are equivalent if A is real.) */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The n real diagonal elements of the tridiagonal matrix A. */
+
+/* E (input) COMPLEX*16 array, dimension (N-1) */
+/* The (n-1) off-diagonal elements of the tridiagonal matrix A */
+/* (see UPLO). */
+
+/* DF (input) DOUBLE PRECISION array, dimension (N) */
+/* The n diagonal elements of the diagonal matrix D from */
+/* the factorization computed by ZPTTRF. */
+
+/* EF (input) COMPLEX*16 array, dimension (N-1) */
+/* The (n-1) off-diagonal elements of the unit bidiagonal */
+/* factor U or L from the factorization computed by ZPTTRF */
+/* (see UPLO). */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by ZPTTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ --df;
+ --ef;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldx < max(1,*n)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPTRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = 4;
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X. Also compute */
+/* abs(A)*abs(x) + abs(b) for use in the backward error bound. */
+
+ if (upper) {
+ if (*n == 1) {
+ i__2 = j * b_dim1 + 1;
+ bi.r = b[i__2].r, bi.i = b[i__2].i;
+ i__2 = j * x_dim1 + 1;
+ z__1.r = d__[1] * x[i__2].r, z__1.i = d__[1] * x[i__2].i;
+ dx.r = z__1.r, dx.i = z__1.i;
+ z__1.r = bi.r - dx.r, z__1.i = bi.i - dx.i;
+ work[1].r = z__1.r, work[1].i = z__1.i;
+ rwork[1] = (d__1 = bi.r, abs(d__1)) + (d__2 = d_imag(&bi),
+ abs(d__2)) + ((d__3 = dx.r, abs(d__3)) + (d__4 =
+ d_imag(&dx), abs(d__4)));
+ } else {
+ i__2 = j * b_dim1 + 1;
+ bi.r = b[i__2].r, bi.i = b[i__2].i;
+ i__2 = j * x_dim1 + 1;
+ z__1.r = d__[1] * x[i__2].r, z__1.i = d__[1] * x[i__2].i;
+ dx.r = z__1.r, dx.i = z__1.i;
+ i__2 = j * x_dim1 + 2;
+ z__1.r = e[1].r * x[i__2].r - e[1].i * x[i__2].i, z__1.i = e[
+ 1].r * x[i__2].i + e[1].i * x[i__2].r;
+ ex.r = z__1.r, ex.i = z__1.i;
+ z__2.r = bi.r - dx.r, z__2.i = bi.i - dx.i;
+ z__1.r = z__2.r - ex.r, z__1.i = z__2.i - ex.i;
+ work[1].r = z__1.r, work[1].i = z__1.i;
+ i__2 = j * x_dim1 + 2;
+ rwork[1] = (d__1 = bi.r, abs(d__1)) + (d__2 = d_imag(&bi),
+ abs(d__2)) + ((d__3 = dx.r, abs(d__3)) + (d__4 =
+ d_imag(&dx), abs(d__4))) + ((d__5 = e[1].r, abs(d__5))
+ + (d__6 = d_imag(&e[1]), abs(d__6))) * ((d__7 = x[
+ i__2].r, abs(d__7)) + (d__8 = d_imag(&x[j * x_dim1 +
+ 2]), abs(d__8)));
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ bi.r = b[i__3].r, bi.i = b[i__3].i;
+ d_cnjg(&z__2, &e[i__ - 1]);
+ i__3 = i__ - 1 + j * x_dim1;
+ z__1.r = z__2.r * x[i__3].r - z__2.i * x[i__3].i, z__1.i =
+ z__2.r * x[i__3].i + z__2.i * x[i__3].r;
+ cx.r = z__1.r, cx.i = z__1.i;
+ i__3 = i__;
+ i__4 = i__ + j * x_dim1;
+ z__1.r = d__[i__3] * x[i__4].r, z__1.i = d__[i__3] * x[
+ i__4].i;
+ dx.r = z__1.r, dx.i = z__1.i;
+ i__3 = i__;
+ i__4 = i__ + 1 + j * x_dim1;
+ z__1.r = e[i__3].r * x[i__4].r - e[i__3].i * x[i__4].i,
+ z__1.i = e[i__3].r * x[i__4].i + e[i__3].i * x[
+ i__4].r;
+ ex.r = z__1.r, ex.i = z__1.i;
+ i__3 = i__;
+ z__3.r = bi.r - cx.r, z__3.i = bi.i - cx.i;
+ z__2.r = z__3.r - dx.r, z__2.i = z__3.i - dx.i;
+ z__1.r = z__2.r - ex.r, z__1.i = z__2.i - ex.i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+ i__3 = i__ - 1;
+ i__4 = i__ - 1 + j * x_dim1;
+ i__5 = i__;
+ i__6 = i__ + 1 + j * x_dim1;
+ rwork[i__] = (d__1 = bi.r, abs(d__1)) + (d__2 = d_imag(&
+ bi), abs(d__2)) + ((d__3 = e[i__3].r, abs(d__3))
+ + (d__4 = d_imag(&e[i__ - 1]), abs(d__4))) * ((
+ d__5 = x[i__4].r, abs(d__5)) + (d__6 = d_imag(&x[
+ i__ - 1 + j * x_dim1]), abs(d__6))) + ((d__7 =
+ dx.r, abs(d__7)) + (d__8 = d_imag(&dx), abs(d__8))
+ ) + ((d__9 = e[i__5].r, abs(d__9)) + (d__10 =
+ d_imag(&e[i__]), abs(d__10))) * ((d__11 = x[i__6]
+ .r, abs(d__11)) + (d__12 = d_imag(&x[i__ + 1 + j *
+ x_dim1]), abs(d__12)));
+/* L30: */
+ }
+ i__2 = *n + j * b_dim1;
+ bi.r = b[i__2].r, bi.i = b[i__2].i;
+ d_cnjg(&z__2, &e[*n - 1]);
+ i__2 = *n - 1 + j * x_dim1;
+ z__1.r = z__2.r * x[i__2].r - z__2.i * x[i__2].i, z__1.i =
+ z__2.r * x[i__2].i + z__2.i * x[i__2].r;
+ cx.r = z__1.r, cx.i = z__1.i;
+ i__2 = *n;
+ i__3 = *n + j * x_dim1;
+ z__1.r = d__[i__2] * x[i__3].r, z__1.i = d__[i__2] * x[i__3]
+ .i;
+ dx.r = z__1.r, dx.i = z__1.i;
+ i__2 = *n;
+ z__2.r = bi.r - cx.r, z__2.i = bi.i - cx.i;
+ z__1.r = z__2.r - dx.r, z__1.i = z__2.i - dx.i;
+ work[i__2].r = z__1.r, work[i__2].i = z__1.i;
+ i__2 = *n - 1;
+ i__3 = *n - 1 + j * x_dim1;
+ rwork[*n] = (d__1 = bi.r, abs(d__1)) + (d__2 = d_imag(&bi),
+ abs(d__2)) + ((d__3 = e[i__2].r, abs(d__3)) + (d__4 =
+ d_imag(&e[*n - 1]), abs(d__4))) * ((d__5 = x[i__3].r,
+ abs(d__5)) + (d__6 = d_imag(&x[*n - 1 + j * x_dim1]),
+ abs(d__6))) + ((d__7 = dx.r, abs(d__7)) + (d__8 =
+ d_imag(&dx), abs(d__8)));
+ }
+ } else {
+ if (*n == 1) {
+ i__2 = j * b_dim1 + 1;
+ bi.r = b[i__2].r, bi.i = b[i__2].i;
+ i__2 = j * x_dim1 + 1;
+ z__1.r = d__[1] * x[i__2].r, z__1.i = d__[1] * x[i__2].i;
+ dx.r = z__1.r, dx.i = z__1.i;
+ z__1.r = bi.r - dx.r, z__1.i = bi.i - dx.i;
+ work[1].r = z__1.r, work[1].i = z__1.i;
+ rwork[1] = (d__1 = bi.r, abs(d__1)) + (d__2 = d_imag(&bi),
+ abs(d__2)) + ((d__3 = dx.r, abs(d__3)) + (d__4 =
+ d_imag(&dx), abs(d__4)));
+ } else {
+ i__2 = j * b_dim1 + 1;
+ bi.r = b[i__2].r, bi.i = b[i__2].i;
+ i__2 = j * x_dim1 + 1;
+ z__1.r = d__[1] * x[i__2].r, z__1.i = d__[1] * x[i__2].i;
+ dx.r = z__1.r, dx.i = z__1.i;
+ d_cnjg(&z__2, &e[1]);
+ i__2 = j * x_dim1 + 2;
+ z__1.r = z__2.r * x[i__2].r - z__2.i * x[i__2].i, z__1.i =
+ z__2.r * x[i__2].i + z__2.i * x[i__2].r;
+ ex.r = z__1.r, ex.i = z__1.i;
+ z__2.r = bi.r - dx.r, z__2.i = bi.i - dx.i;
+ z__1.r = z__2.r - ex.r, z__1.i = z__2.i - ex.i;
+ work[1].r = z__1.r, work[1].i = z__1.i;
+ i__2 = j * x_dim1 + 2;
+ rwork[1] = (d__1 = bi.r, abs(d__1)) + (d__2 = d_imag(&bi),
+ abs(d__2)) + ((d__3 = dx.r, abs(d__3)) + (d__4 =
+ d_imag(&dx), abs(d__4))) + ((d__5 = e[1].r, abs(d__5))
+ + (d__6 = d_imag(&e[1]), abs(d__6))) * ((d__7 = x[
+ i__2].r, abs(d__7)) + (d__8 = d_imag(&x[j * x_dim1 +
+ 2]), abs(d__8)));
+ i__2 = *n - 1;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ bi.r = b[i__3].r, bi.i = b[i__3].i;
+ i__3 = i__ - 1;
+ i__4 = i__ - 1 + j * x_dim1;
+ z__1.r = e[i__3].r * x[i__4].r - e[i__3].i * x[i__4].i,
+ z__1.i = e[i__3].r * x[i__4].i + e[i__3].i * x[
+ i__4].r;
+ cx.r = z__1.r, cx.i = z__1.i;
+ i__3 = i__;
+ i__4 = i__ + j * x_dim1;
+ z__1.r = d__[i__3] * x[i__4].r, z__1.i = d__[i__3] * x[
+ i__4].i;
+ dx.r = z__1.r, dx.i = z__1.i;
+ d_cnjg(&z__2, &e[i__]);
+ i__3 = i__ + 1 + j * x_dim1;
+ z__1.r = z__2.r * x[i__3].r - z__2.i * x[i__3].i, z__1.i =
+ z__2.r * x[i__3].i + z__2.i * x[i__3].r;
+ ex.r = z__1.r, ex.i = z__1.i;
+ i__3 = i__;
+ z__3.r = bi.r - cx.r, z__3.i = bi.i - cx.i;
+ z__2.r = z__3.r - dx.r, z__2.i = z__3.i - dx.i;
+ z__1.r = z__2.r - ex.r, z__1.i = z__2.i - ex.i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+ i__3 = i__ - 1;
+ i__4 = i__ - 1 + j * x_dim1;
+ i__5 = i__;
+ i__6 = i__ + 1 + j * x_dim1;
+ rwork[i__] = (d__1 = bi.r, abs(d__1)) + (d__2 = d_imag(&
+ bi), abs(d__2)) + ((d__3 = e[i__3].r, abs(d__3))
+ + (d__4 = d_imag(&e[i__ - 1]), abs(d__4))) * ((
+ d__5 = x[i__4].r, abs(d__5)) + (d__6 = d_imag(&x[
+ i__ - 1 + j * x_dim1]), abs(d__6))) + ((d__7 =
+ dx.r, abs(d__7)) + (d__8 = d_imag(&dx), abs(d__8))
+ ) + ((d__9 = e[i__5].r, abs(d__9)) + (d__10 =
+ d_imag(&e[i__]), abs(d__10))) * ((d__11 = x[i__6]
+ .r, abs(d__11)) + (d__12 = d_imag(&x[i__ + 1 + j *
+ x_dim1]), abs(d__12)));
+/* L40: */
+ }
+ i__2 = *n + j * b_dim1;
+ bi.r = b[i__2].r, bi.i = b[i__2].i;
+ i__2 = *n - 1;
+ i__3 = *n - 1 + j * x_dim1;
+ z__1.r = e[i__2].r * x[i__3].r - e[i__2].i * x[i__3].i,
+ z__1.i = e[i__2].r * x[i__3].i + e[i__2].i * x[i__3]
+ .r;
+ cx.r = z__1.r, cx.i = z__1.i;
+ i__2 = *n;
+ i__3 = *n + j * x_dim1;
+ z__1.r = d__[i__2] * x[i__3].r, z__1.i = d__[i__2] * x[i__3]
+ .i;
+ dx.r = z__1.r, dx.i = z__1.i;
+ i__2 = *n;
+ z__2.r = bi.r - cx.r, z__2.i = bi.i - cx.i;
+ z__1.r = z__2.r - dx.r, z__1.i = z__2.i - dx.i;
+ work[i__2].r = z__1.r, work[i__2].i = z__1.i;
+ i__2 = *n - 1;
+ i__3 = *n - 1 + j * x_dim1;
+ rwork[*n] = (d__1 = bi.r, abs(d__1)) + (d__2 = d_imag(&bi),
+ abs(d__2)) + ((d__3 = e[i__2].r, abs(d__3)) + (d__4 =
+ d_imag(&e[*n - 1]), abs(d__4))) * ((d__5 = x[i__3].r,
+ abs(d__5)) + (d__6 = d_imag(&x[*n - 1 + j * x_dim1]),
+ abs(d__6))) + ((d__7 = dx.r, abs(d__7)) + (d__8 =
+ d_imag(&dx), abs(d__8)));
+ }
+ }
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__3 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2))) / rwork[i__];
+ s = max(d__3,d__4);
+ } else {
+/* Computing MAX */
+ i__3 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = max(d__3,d__4);
+ }
+/* L50: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2. <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ zpttrs_(uplo, n, &c__1, &df[1], &ef[1], &work[1], n, info);
+ zaxpy_(n, &c_b16, &work[1], &c__1, &x[j * x_dim1 + 1], &c__1);
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__3 = i__;
+ rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ ;
+ } else {
+ i__3 = i__;
+ rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ + safe1;
+ }
+/* L60: */
+ }
+ ix = idamax_(n, &rwork[1], &c__1);
+ ferr[j] = rwork[ix];
+
+/* Estimate the norm of inv(A). */
+
+/* Solve M(A) * x = e, where M(A) = (m(i,j)) is given by */
+
+/* m(i,j) = abs(A(i,j)), i = j, */
+/* m(i,j) = -abs(A(i,j)), i .ne. j, */
+
+/* and e = [ 1, 1, ..., 1 ]'. Note M(A) = M(L)*D*M(L)'. */
+
+/* Solve M(L) * x = e. */
+
+ rwork[1] = 1.;
+ i__2 = *n;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ rwork[i__] = rwork[i__ - 1] * z_abs(&ef[i__ - 1]) + 1.;
+/* L70: */
+ }
+
+/* Solve D * M(L)' * x = b. */
+
+ rwork[*n] /= df[*n];
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ rwork[i__] = rwork[i__] / df[i__] + rwork[i__ + 1] * z_abs(&ef[
+ i__]);
+/* L80: */
+ }
+
+/* Compute norm(inv(A)) = max(x(i)), 1<=i<=n. */
+
+ ix = idamax_(n, &rwork[1], &c__1);
+ ferr[j] *= (d__1 = rwork[ix], abs(d__1));
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__1 = lstres, d__2 = z_abs(&x[i__ + j * x_dim1]);
+ lstres = max(d__1,d__2);
+/* L90: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L100: */
+ }
+
+ return 0;
+
+/* End of ZPTRFS */
+
+} /* zptrfs_ */
diff --git a/contrib/libs/clapack/zptsv.c b/contrib/libs/clapack/zptsv.c
new file mode 100644
index 0000000000..a29ead413b
--- /dev/null
+++ b/contrib/libs/clapack/zptsv.c
@@ -0,0 +1,130 @@
+/* zptsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zptsv_(integer *n, integer *nrhs, doublereal *d__,
+ doublecomplex *e, doublecomplex *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int xerbla_(char *, integer *), zpttrf_(
+ integer *, doublereal *, doublecomplex *, integer *), zpttrs_(
+ char *, integer *, integer *, doublereal *, doublecomplex *,
+ doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPTSV computes the solution to a complex system of linear equations */
+/* A*X = B, where A is an N-by-N Hermitian positive definite tridiagonal */
+/* matrix, and X and B are N-by-NRHS matrices. */
+
+/* A is factored as A = L*D*L**H, and the factored form of A is then */
+/* used to solve the system of equations. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the n diagonal elements of the tridiagonal matrix */
+/* A. On exit, the n diagonal elements of the diagonal matrix */
+/* D from the factorization A = L*D*L**H. */
+
+/* E (input/output) COMPLEX*16 array, dimension (N-1) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix A. On exit, the (n-1) subdiagonal elements of the */
+/* unit bidiagonal factor L from the L*D*L**H factorization of */
+/* A. E can also be regarded as the superdiagonal of the unit */
+/* bidiagonal factor U from the U**H*D*U factorization of A. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,N) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the leading minor of order i is not */
+/* positive definite, and the solution has not been */
+/* computed. The factorization has not been completed */
+/* unless i = N. */
+
+/* ===================================================================== */
+
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*nrhs < 0) {
+ *info = -2;
+ } else if (*ldb < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPTSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the L*D*L' (or U'*D*U) factorization of A. */
+
+ zpttrf_(n, &d__[1], &e[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ zpttrs_("Lower", n, nrhs, &d__[1], &e[1], &b[b_offset], ldb, info);
+ }
+ return 0;
+
+/* End of ZPTSV */
+
+} /* zptsv_ */
diff --git a/contrib/libs/clapack/zptsvx.c b/contrib/libs/clapack/zptsvx.c
new file mode 100644
index 0000000000..47ad812d08
--- /dev/null
+++ b/contrib/libs/clapack/zptsvx.c
@@ -0,0 +1,290 @@
+/* zptsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zptsvx_(char *fact, integer *n, integer *nrhs,
+ doublereal *d__, doublecomplex *e, doublereal *df, doublecomplex *ef,
+ doublecomplex *b, integer *ldb, doublecomplex *x, integer *ldx,
+ doublereal *rcond, doublereal *ferr, doublereal *berr, doublecomplex *
+ work, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ doublereal anorm;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), zcopy_(integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *);
+ extern doublereal dlamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern doublereal zlanht_(char *, integer *, doublereal *, doublecomplex *
+);
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *),
+ zptcon_(integer *, doublereal *, doublecomplex *, doublereal *,
+ doublereal *, doublereal *, integer *), zptrfs_(char *, integer *,
+ integer *, doublereal *, doublecomplex *, doublereal *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublereal *, doublereal *, doublecomplex *,
+ doublereal *, integer *), zpttrf_(integer *, doublereal *,
+ doublecomplex *, integer *), zpttrs_(char *, integer *, integer *
+, doublereal *, doublecomplex *, doublecomplex *, integer *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPTSVX uses the factorization A = L*D*L**H to compute the solution */
+/* to a complex system of linear equations A*X = B, where A is an */
+/* N-by-N Hermitian positive definite tridiagonal matrix and X and B */
+/* are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'N', the matrix A is factored as A = L*D*L**H, where L */
+/* is a unit lower bidiagonal matrix and D is diagonal. The */
+/* factorization can also be regarded as having the form */
+/* A = U**H*D*U. */
+
+/* 2. If the leading i-by-i principal minor is not positive definite, */
+/* then the routine returns with INFO = i. Otherwise, the factored */
+/* form of A is used to estimate the condition number of the matrix */
+/* A. If the reciprocal of the condition number is less than machine */
+/* precision, INFO = N+1 is returned as a warning, but the routine */
+/* still goes on to solve for X and compute error bounds as */
+/* described below. */
+
+/* 3. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 4. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of the matrix */
+/* A is supplied on entry. */
+/* = 'F': On entry, DF and EF contain the factored form of A. */
+/* D, E, DF, and EF will not be modified. */
+/* = 'N': The matrix A will be copied to DF and EF and */
+/* factored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The n diagonal elements of the tridiagonal matrix A. */
+
+/* E (input) COMPLEX*16 array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of the tridiagonal matrix A. */
+
+/* DF (input or output) DOUBLE PRECISION array, dimension (N) */
+/* If FACT = 'F', then DF is an input argument and on entry */
+/* contains the n diagonal elements of the diagonal matrix D */
+/* from the L*D*L**H factorization of A. */
+/* If FACT = 'N', then DF is an output argument and on exit */
+/* contains the n diagonal elements of the diagonal matrix D */
+/* from the L*D*L**H factorization of A. */
+
+/* EF (input or output) COMPLEX*16 array, dimension (N-1) */
+/* If FACT = 'F', then EF is an input argument and on entry */
+/* contains the (n-1) subdiagonal elements of the unit */
+/* bidiagonal factor L from the L*D*L**H factorization of A. */
+/* If FACT = 'N', then EF is an output argument and on exit */
+/* contains the (n-1) subdiagonal elements of the unit */
+/* bidiagonal factor L from the L*D*L**H factorization of A. */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* The N-by-NRHS right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal condition number of the matrix A. If RCOND */
+/* is less than the machine precision (in particular, if */
+/* RCOND = 0), the matrix is singular to working precision. */
+/* This condition is indicated by a return code of INFO > 0. */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in any */
+/* element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: the leading minor of order i of A is */
+/* not positive definite, so the factorization */
+/* could not be completed, and the solution has not */
+/* been computed. RCOND = 0 is returned. */
+/* = N+1: U is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ --df;
+ --ef;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ if (! nofact && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldx < max(1,*n)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPTSVX", &i__1);
+ return 0;
+ }
+
+ if (nofact) {
+
+/* Compute the L*D*L' (or U'*D*U) factorization of A. */
+
+ dcopy_(n, &d__[1], &c__1, &df[1], &c__1);
+ if (*n > 1) {
+ i__1 = *n - 1;
+ zcopy_(&i__1, &e[1], &c__1, &ef[1], &c__1);
+ }
+ zpttrf_(n, &df[1], &ef[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = zlanht_("1", n, &d__[1], &e[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ zptcon_(n, &df[1], &ef[1], &anorm, rcond, &rwork[1], info);
+
+/* Compute the solution vectors X. */
+
+ zlacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ zpttrs_("Lower", n, nrhs, &df[1], &ef[1], &x[x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solutions and */
+/* compute error bounds and backward error estimates for them. */
+
+ zptrfs_("Lower", n, nrhs, &d__[1], &e[1], &df[1], &ef[1], &b[b_offset],
+ ldb, &x[x_offset], ldx, &ferr[1], &berr[1], &work[1], &rwork[1],
+ info);
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < dlamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of ZPTSVX */
+
+} /* zptsvx_ */
diff --git a/contrib/libs/clapack/zpttrf.c b/contrib/libs/clapack/zpttrf.c
new file mode 100644
index 0000000000..9fb3b09c17
--- /dev/null
+++ b/contrib/libs/clapack/zpttrf.c
@@ -0,0 +1,216 @@
+/* zpttrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zpttrf_(integer *n, doublereal *d__, doublecomplex *e,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ doublereal f, g;
+ integer i__, i4;
+ doublereal eii, eir;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPTTRF computes the L*D*L' factorization of a complex Hermitian */
+/* positive definite tridiagonal matrix A. The factorization may also */
+/* be regarded as having the form A = U'*D*U. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the n diagonal elements of the tridiagonal matrix */
+/* A. On exit, the n diagonal elements of the diagonal matrix */
+/* D from the L*D*L' factorization of A. */
+
+/* E (input/output) COMPLEX*16 array, dimension (N-1) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix A. On exit, the (n-1) subdiagonal elements of the */
+/* unit bidiagonal factor L from the L*D*L' factorization of A. */
+/* E can also be regarded as the superdiagonal of the unit */
+/* bidiagonal factor U from the U'*D*U factorization of A. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, the leading minor of order k is not */
+/* positive definite; if k < N, the factorization could not */
+/* be completed, while if k = N, the factorization was */
+/* completed, but D(N) <= 0. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ i__1 = -(*info);
+ xerbla_("ZPTTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Compute the L*D*L' (or U'*D*U) factorization of A. */
+
+ i4 = (*n - 1) % 4;
+ i__1 = i4;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] <= 0.) {
+ *info = i__;
+ goto L30;
+ }
+ i__2 = i__;
+ eir = e[i__2].r;
+ eii = d_imag(&e[i__]);
+ f = eir / d__[i__];
+ g = eii / d__[i__];
+ i__2 = i__;
+ z__1.r = f, z__1.i = g;
+ e[i__2].r = z__1.r, e[i__2].i = z__1.i;
+ d__[i__ + 1] = d__[i__ + 1] - f * eir - g * eii;
+/* L10: */
+ }
+
+ i__1 = *n - 4;
+ for (i__ = i4 + 1; i__ <= i__1; i__ += 4) {
+
+/* Drop out of the loop if d(i) <= 0: the matrix is not positive */
+/* definite. */
+
+ if (d__[i__] <= 0.) {
+ *info = i__;
+ goto L30;
+ }
+
+/* Solve for e(i) and d(i+1). */
+
+ i__2 = i__;
+ eir = e[i__2].r;
+ eii = d_imag(&e[i__]);
+ f = eir / d__[i__];
+ g = eii / d__[i__];
+ i__2 = i__;
+ z__1.r = f, z__1.i = g;
+ e[i__2].r = z__1.r, e[i__2].i = z__1.i;
+ d__[i__ + 1] = d__[i__ + 1] - f * eir - g * eii;
+
+ if (d__[i__ + 1] <= 0.) {
+ *info = i__ + 1;
+ goto L30;
+ }
+
+/* Solve for e(i+1) and d(i+2). */
+
+ i__2 = i__ + 1;
+ eir = e[i__2].r;
+ eii = d_imag(&e[i__ + 1]);
+ f = eir / d__[i__ + 1];
+ g = eii / d__[i__ + 1];
+ i__2 = i__ + 1;
+ z__1.r = f, z__1.i = g;
+ e[i__2].r = z__1.r, e[i__2].i = z__1.i;
+ d__[i__ + 2] = d__[i__ + 2] - f * eir - g * eii;
+
+ if (d__[i__ + 2] <= 0.) {
+ *info = i__ + 2;
+ goto L30;
+ }
+
+/* Solve for e(i+2) and d(i+3). */
+
+ i__2 = i__ + 2;
+ eir = e[i__2].r;
+ eii = d_imag(&e[i__ + 2]);
+ f = eir / d__[i__ + 2];
+ g = eii / d__[i__ + 2];
+ i__2 = i__ + 2;
+ z__1.r = f, z__1.i = g;
+ e[i__2].r = z__1.r, e[i__2].i = z__1.i;
+ d__[i__ + 3] = d__[i__ + 3] - f * eir - g * eii;
+
+ if (d__[i__ + 3] <= 0.) {
+ *info = i__ + 3;
+ goto L30;
+ }
+
+/* Solve for e(i+3) and d(i+4). */
+
+ i__2 = i__ + 3;
+ eir = e[i__2].r;
+ eii = d_imag(&e[i__ + 3]);
+ f = eir / d__[i__ + 3];
+ g = eii / d__[i__ + 3];
+ i__2 = i__ + 3;
+ z__1.r = f, z__1.i = g;
+ e[i__2].r = z__1.r, e[i__2].i = z__1.i;
+ d__[i__ + 4] = d__[i__ + 4] - f * eir - g * eii;
+/* L20: */
+ }
+
+/* Check d(n) for positive definiteness. */
+
+ if (d__[*n] <= 0.) {
+ *info = *n;
+ }
+
+L30:
+ return 0;
+
+/* End of ZPTTRF */
+
+} /* zpttrf_ */
diff --git a/contrib/libs/clapack/zpttrs.c b/contrib/libs/clapack/zpttrs.c
new file mode 100644
index 0000000000..ebca07e955
--- /dev/null
+++ b/contrib/libs/clapack/zpttrs.c
@@ -0,0 +1,178 @@
+/* zpttrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zpttrs_(char *uplo, integer *n, integer *nrhs,
+ doublereal *d__, doublecomplex *e, doublecomplex *b, integer *ldb,
+ integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer j, jb, nb, iuplo;
+ logical upper;
+ extern /* Subroutine */ int zptts2_(integer *, integer *, integer *,
+ doublereal *, doublecomplex *, doublecomplex *, integer *),
+ xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPTTRS solves a tridiagonal system of the form */
+/* A * X = B */
+/* using the factorization A = U'*D*U or A = L*D*L' computed by ZPTTRF. */
+/* D is a diagonal matrix specified in the vector D, U (or L) is a unit */
+/* bidiagonal matrix whose superdiagonal (subdiagonal) is specified in */
+/* the vector E, and X and B are N by NRHS matrices. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies the form of the factorization and whether the */
+/* vector E is the superdiagonal of the upper bidiagonal factor */
+/* U or the subdiagonal of the lower bidiagonal factor L. */
+/* = 'U': A = U'*D*U, E is the superdiagonal of U */
+/* = 'L': A = L*D*L', E is the subdiagonal of L */
+
+/* N (input) INTEGER */
+/* The order of the tridiagonal matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The n diagonal elements of the diagonal matrix D from the */
+/* factorization A = U'*D*U or A = L*D*L'. */
+
+/* E (input) COMPLEX*16 array, dimension (N-1) */
+/* If UPLO = 'U', the (n-1) superdiagonal elements of the unit */
+/* bidiagonal factor U from the factorization A = U'*D*U. */
+/* If UPLO = 'L', the (n-1) subdiagonal elements of the unit */
+/* bidiagonal factor L from the factorization A = L*D*L'. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the right hand side vectors B for the system of */
+/* linear equations. */
+/* On exit, the solution vectors, X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = *(unsigned char *)uplo == 'U' || *(unsigned char *)uplo == 'u';
+ if (! upper && ! (*(unsigned char *)uplo == 'L' || *(unsigned char *)uplo
+ == 'l')) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPTTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+/* Determine the number of right-hand sides to solve at a time. */
+
+ if (*nrhs == 1) {
+ nb = 1;
+ } else {
+/* Computing MAX */
+ i__1 = 1, i__2 = ilaenv_(&c__1, "ZPTTRS", uplo, n, nrhs, &c_n1, &c_n1);
+ nb = max(i__1,i__2);
+ }
+
+/* Decode UPLO */
+
+ if (upper) {
+ iuplo = 1;
+ } else {
+ iuplo = 0;
+ }
+
+ if (nb >= *nrhs) {
+ zptts2_(&iuplo, n, nrhs, &d__[1], &e[1], &b[b_offset], ldb);
+ } else {
+ i__1 = *nrhs;
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = *nrhs - j + 1;
+ jb = min(i__3,nb);
+ zptts2_(&iuplo, n, &jb, &d__[1], &e[1], &b[j * b_dim1 + 1], ldb);
+/* L10: */
+ }
+ }
+
+ return 0;
+
+/* End of ZPTTRS */
+
+} /* zpttrs_ */
diff --git a/contrib/libs/clapack/zptts2.c b/contrib/libs/clapack/zptts2.c
new file mode 100644
index 0000000000..bcfbedcc48
--- /dev/null
+++ b/contrib/libs/clapack/zptts2.c
@@ -0,0 +1,315 @@
+/* zptts2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zptts2_(integer *iuplo, integer *n, integer *nrhs,
+ doublereal *d__, doublecomplex *e, doublecomplex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j;
+ extern /* Subroutine */ int zdscal_(integer *, doublereal *,
+ doublecomplex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZPTTS2 solves a tridiagonal system of the form */
+/* A * X = B */
+/* using the factorization A = U'*D*U or A = L*D*L' computed by ZPTTRF. */
+/* D is a diagonal matrix specified in the vector D, U (or L) is a unit */
+/* bidiagonal matrix whose superdiagonal (subdiagonal) is specified in */
+/* the vector E, and X and B are N by NRHS matrices. */
+
+/* Arguments */
+/* ========= */
+
+/* IUPLO (input) INTEGER */
+/* Specifies the form of the factorization and whether the */
+/* vector E is the superdiagonal of the upper bidiagonal factor */
+/* U or the subdiagonal of the lower bidiagonal factor L. */
+/* = 1: A = U'*D*U, E is the superdiagonal of U */
+/* = 0: A = L*D*L', E is the subdiagonal of L */
+
+/* N (input) INTEGER */
+/* The order of the tridiagonal matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The n diagonal elements of the diagonal matrix D from the */
+/* factorization A = U'*D*U or A = L*D*L'. */
+
+/* E (input) COMPLEX*16 array, dimension (N-1) */
+/* If IUPLO = 1, the (n-1) superdiagonal elements of the unit */
+/* bidiagonal factor U from the factorization A = U'*D*U. */
+/* If IUPLO = 0, the (n-1) subdiagonal elements of the unit */
+/* bidiagonal factor L from the factorization A = L*D*L'. */
+
+/* B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) */
+/* On entry, the right hand side vectors B for the system of */
+/* linear equations. */
+/* On exit, the solution vectors, X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Quick return if possible */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ if (*n <= 1) {
+ if (*n == 1) {
+ d__1 = 1. / d__[1];
+ zdscal_(nrhs, &d__1, &b[b_offset], ldb);
+ }
+ return 0;
+ }
+
+ if (*iuplo == 1) {
+
+/* Solve A * X = B using the factorization A = U'*D*U, */
+/* overwriting each right hand side vector with its solution. */
+
+ if (*nrhs <= 2) {
+ j = 1;
+L10:
+
+/* Solve U' * x = b. */
+
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ - 1 + j * b_dim1;
+ d_cnjg(&z__3, &e[i__ - 1]);
+ z__2.r = b[i__4].r * z__3.r - b[i__4].i * z__3.i, z__2.i = b[
+ i__4].r * z__3.i + b[i__4].i * z__3.r;
+ z__1.r = b[i__3].r - z__2.r, z__1.i = b[i__3].i - z__2.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L20: */
+ }
+
+/* Solve D * U * x = b. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ z__1.r = b[i__3].r / d__[i__4], z__1.i = b[i__3].i / d__[i__4]
+ ;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L30: */
+ }
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ i__1 = i__ + j * b_dim1;
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + 1 + j * b_dim1;
+ i__4 = i__;
+ z__2.r = b[i__3].r * e[i__4].r - b[i__3].i * e[i__4].i,
+ z__2.i = b[i__3].r * e[i__4].i + b[i__3].i * e[i__4]
+ .r;
+ z__1.r = b[i__2].r - z__2.r, z__1.i = b[i__2].i - z__2.i;
+ b[i__1].r = z__1.r, b[i__1].i = z__1.i;
+/* L40: */
+ }
+ if (j < *nrhs) {
+ ++j;
+ goto L10;
+ }
+ } else {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Solve U' * x = b. */
+
+ i__2 = *n;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ - 1 + j * b_dim1;
+ d_cnjg(&z__3, &e[i__ - 1]);
+ z__2.r = b[i__5].r * z__3.r - b[i__5].i * z__3.i, z__2.i =
+ b[i__5].r * z__3.i + b[i__5].i * z__3.r;
+ z__1.r = b[i__4].r - z__2.r, z__1.i = b[i__4].i - z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L50: */
+ }
+
+/* Solve D * U * x = b. */
+
+ i__2 = *n + j * b_dim1;
+ i__3 = *n + j * b_dim1;
+ i__4 = *n;
+ z__1.r = b[i__3].r / d__[i__4], z__1.i = b[i__3].i / d__[i__4]
+ ;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ z__2.r = b[i__3].r / d__[i__4], z__2.i = b[i__3].i / d__[
+ i__4];
+ i__5 = i__ + 1 + j * b_dim1;
+ i__6 = i__;
+ z__3.r = b[i__5].r * e[i__6].r - b[i__5].i * e[i__6].i,
+ z__3.i = b[i__5].r * e[i__6].i + b[i__5].i * e[
+ i__6].r;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L60: */
+ }
+/* L70: */
+ }
+ }
+ } else {
+
+/* Solve A * X = B using the factorization A = L*D*L', */
+/* overwriting each right hand side vector with its solution. */
+
+ if (*nrhs <= 2) {
+ j = 1;
+L80:
+
+/* Solve L * x = b. */
+
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ - 1 + j * b_dim1;
+ i__5 = i__ - 1;
+ z__2.r = b[i__4].r * e[i__5].r - b[i__4].i * e[i__5].i,
+ z__2.i = b[i__4].r * e[i__5].i + b[i__4].i * e[i__5]
+ .r;
+ z__1.r = b[i__3].r - z__2.r, z__1.i = b[i__3].i - z__2.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L90: */
+ }
+
+/* Solve D * L' * x = b. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ z__1.r = b[i__3].r / d__[i__4], z__1.i = b[i__3].i / d__[i__4]
+ ;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L100: */
+ }
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ i__1 = i__ + j * b_dim1;
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + 1 + j * b_dim1;
+ d_cnjg(&z__3, &e[i__]);
+ z__2.r = b[i__3].r * z__3.r - b[i__3].i * z__3.i, z__2.i = b[
+ i__3].r * z__3.i + b[i__3].i * z__3.r;
+ z__1.r = b[i__2].r - z__2.r, z__1.i = b[i__2].i - z__2.i;
+ b[i__1].r = z__1.r, b[i__1].i = z__1.i;
+/* L110: */
+ }
+ if (j < *nrhs) {
+ ++j;
+ goto L80;
+ }
+ } else {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Solve L * x = b. */
+
+ i__2 = *n;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ - 1 + j * b_dim1;
+ i__6 = i__ - 1;
+ z__2.r = b[i__5].r * e[i__6].r - b[i__5].i * e[i__6].i,
+ z__2.i = b[i__5].r * e[i__6].i + b[i__5].i * e[
+ i__6].r;
+ z__1.r = b[i__4].r - z__2.r, z__1.i = b[i__4].i - z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L120: */
+ }
+
+/* Solve D * L' * x = b. */
+
+ i__2 = *n + j * b_dim1;
+ i__3 = *n + j * b_dim1;
+ i__4 = *n;
+ z__1.r = b[i__3].r / d__[i__4], z__1.i = b[i__3].i / d__[i__4]
+ ;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__;
+ z__2.r = b[i__3].r / d__[i__4], z__2.i = b[i__3].i / d__[
+ i__4];
+ i__5 = i__ + 1 + j * b_dim1;
+ d_cnjg(&z__4, &e[i__]);
+ z__3.r = b[i__5].r * z__4.r - b[i__5].i * z__4.i, z__3.i =
+ b[i__5].r * z__4.i + b[i__5].i * z__4.r;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L130: */
+ }
+/* L140: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZPTTS2 */
+
+} /* zptts2_ */
diff --git a/contrib/libs/clapack/zrot.c b/contrib/libs/clapack/zrot.c
new file mode 100644
index 0000000000..4e716e6c7e
--- /dev/null
+++ b/contrib/libs/clapack/zrot.c
@@ -0,0 +1,155 @@
+/* zrot.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zrot_(integer *n, doublecomplex *cx, integer *incx,
+ doublecomplex *cy, integer *incy, doublereal *c__, doublecomplex *s)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, ix, iy;
+ doublecomplex stemp;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZROT applies a plane rotation, where the cos (C) is real and the */
+/* sin (S) is complex, and the vectors CX and CY are complex. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The number of elements in the vectors CX and CY. */
+
+/* CX (input/output) COMPLEX*16 array, dimension (N) */
+/* On input, the vector X. */
+/* On output, CX is overwritten with C*X + S*Y. */
+
+/* INCX (input) INTEGER */
+/* The increment between successive values of CY. INCX <> 0. */
+
+/* CY (input/output) COMPLEX*16 array, dimension (N) */
+/* On input, the vector Y. */
+/* On output, CY is overwritten with -CONJG(S)*X + C*Y. */
+
+/* INCY (input) INTEGER */
+/* The increment between successive values of CY. INCX <> 0. */
+
+/* C (input) DOUBLE PRECISION */
+/* S (input) COMPLEX*16 */
+/* C and S define a rotation */
+/* [ C S ] */
+/* [ -conjg(S) C ] */
+/* where C*C + S*CONJG(S) = 1.0. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --cy;
+ --cx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/* Code for unequal increments or equal increments not equal to 1 */
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ z__2.r = *c__ * cx[i__2].r, z__2.i = *c__ * cx[i__2].i;
+ i__3 = iy;
+ z__3.r = s->r * cy[i__3].r - s->i * cy[i__3].i, z__3.i = s->r * cy[
+ i__3].i + s->i * cy[i__3].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ stemp.r = z__1.r, stemp.i = z__1.i;
+ i__2 = iy;
+ i__3 = iy;
+ z__2.r = *c__ * cy[i__3].r, z__2.i = *c__ * cy[i__3].i;
+ d_cnjg(&z__4, s);
+ i__4 = ix;
+ z__3.r = z__4.r * cx[i__4].r - z__4.i * cx[i__4].i, z__3.i = z__4.r *
+ cx[i__4].i + z__4.i * cx[i__4].r;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
+ cy[i__2].r = z__1.r, cy[i__2].i = z__1.i;
+ i__2 = ix;
+ cx[i__2].r = stemp.r, cx[i__2].i = stemp.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* Code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ z__2.r = *c__ * cx[i__2].r, z__2.i = *c__ * cx[i__2].i;
+ i__3 = i__;
+ z__3.r = s->r * cy[i__3].r - s->i * cy[i__3].i, z__3.i = s->r * cy[
+ i__3].i + s->i * cy[i__3].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ stemp.r = z__1.r, stemp.i = z__1.i;
+ i__2 = i__;
+ i__3 = i__;
+ z__2.r = *c__ * cy[i__3].r, z__2.i = *c__ * cy[i__3].i;
+ d_cnjg(&z__4, s);
+ i__4 = i__;
+ z__3.r = z__4.r * cx[i__4].r - z__4.i * cx[i__4].i, z__3.i = z__4.r *
+ cx[i__4].i + z__4.i * cx[i__4].r;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
+ cy[i__2].r = z__1.r, cy[i__2].i = z__1.i;
+ i__2 = i__;
+ cx[i__2].r = stemp.r, cx[i__2].i = stemp.i;
+/* L30: */
+ }
+ return 0;
+} /* zrot_ */
diff --git a/contrib/libs/clapack/zspcon.c b/contrib/libs/clapack/zspcon.c
new file mode 100644
index 0000000000..0fc8d28689
--- /dev/null
+++ b/contrib/libs/clapack/zspcon.c
@@ -0,0 +1,197 @@
+/* zspcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zspcon_(char *uplo, integer *n, doublecomplex *ap,
+ integer *ipiv, doublereal *anorm, doublereal *rcond, doublecomplex *
+ work, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ integer i__, ip, kase;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical upper;
+ extern /* Subroutine */ int zlacn2_(integer *, doublecomplex *,
+ doublecomplex *, doublereal *, integer *, integer *), xerbla_(
+ char *, integer *);
+ doublereal ainvnm;
+ extern /* Subroutine */ int zsptrs_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSPCON estimates the reciprocal of the condition number (in the */
+/* 1-norm) of a complex symmetric packed matrix A using the */
+/* factorization A = U*D*U**T or A = L*D*L**T computed by ZSPTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**T; */
+/* = 'L': Lower triangular, form is A = L*D*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* The block diagonal matrix D and the multipliers used to */
+/* obtain the factor U or L as computed by ZSPTRF, stored as a */
+/* packed triangular matrix. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by ZSPTRF. */
+
+/* ANORM (input) DOUBLE PRECISION */
+/* The 1-norm of the original matrix A. */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --work;
+ --ipiv;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*anorm < 0.) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZSPCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.;
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ } else if (*anorm <= 0.) {
+ return 0;
+ }
+
+/* Check that the diagonal matrix D is nonsingular. */
+
+ if (upper) {
+
+/* Upper triangular storage: examine D from bottom to top */
+
+ ip = *n * (*n + 1) / 2;
+ for (i__ = *n; i__ >= 1; --i__) {
+ i__1 = ip;
+ if (ipiv[i__] > 0 && (ap[i__1].r == 0. && ap[i__1].i == 0.)) {
+ return 0;
+ }
+ ip -= i__;
+/* L10: */
+ }
+ } else {
+
+/* Lower triangular storage: examine D from top to bottom. */
+
+ ip = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ip;
+ if (ipiv[i__] > 0 && (ap[i__2].r == 0. && ap[i__2].i == 0.)) {
+ return 0;
+ }
+ ip = ip + *n - i__ + 1;
+/* L20: */
+ }
+ }
+
+/* Estimate the 1-norm of the inverse. */
+
+ kase = 0;
+L30:
+ zlacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+
+/* Multiply by inv(L*D*L') or inv(U*D*U'). */
+
+ zsptrs_(uplo, n, &c__1, &ap[1], &ipiv[1], &work[1], n, info);
+ goto L30;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / ainvnm / *anorm;
+ }
+
+ return 0;
+
+/* End of ZSPCON */
+
+} /* zspcon_ */
diff --git a/contrib/libs/clapack/zspmv.c b/contrib/libs/clapack/zspmv.c
new file mode 100644
index 0000000000..128e1db412
--- /dev/null
+++ b/contrib/libs/clapack/zspmv.c
@@ -0,0 +1,428 @@
+/* zspmv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zspmv_(char *uplo, integer *n, doublecomplex *alpha,
+ doublecomplex *ap, doublecomplex *x, integer *incx, doublecomplex *
+ beta, doublecomplex *y, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Local variables */
+ integer i__, j, k, kk, ix, iy, jx, jy, kx, ky, info;
+ doublecomplex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSPMV performs the matrix-vector operation */
+
+/* y := alpha*A*x + beta*y, */
+
+/* where alpha and beta are scalars, x and y are n element vectors and */
+/* A is an n by n symmetric matrix, supplied in packed form. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO (input) CHARACTER*1 */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the matrix A is supplied in the packed */
+/* array AP as follows: */
+
+/* UPLO = 'U' or 'u' The upper triangular part of A is */
+/* supplied in AP. */
+
+/* UPLO = 'L' or 'l' The lower triangular part of A is */
+/* supplied in AP. */
+
+/* Unchanged on exit. */
+
+/* N (input) INTEGER */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA (input) COMPLEX*16 */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* AP (input) COMPLEX*16 array, dimension at least */
+/* ( ( N*( N + 1 ) )/2 ). */
+/* Before entry, with UPLO = 'U' or 'u', the array AP must */
+/* contain the upper triangular part of the symmetric matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 ) */
+/* and a( 2, 2 ) respectively, and so on. */
+/* Before entry, with UPLO = 'L' or 'l', the array AP must */
+/* contain the lower triangular part of the symmetric matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 ) */
+/* and a( 3, 1 ) respectively, and so on. */
+/* Unchanged on exit. */
+
+/* X (input) COMPLEX*16 array, dimension at least */
+/* ( 1 + ( N - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the N- */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX (input) INTEGER */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* BETA (input) COMPLEX*16 */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then Y need not be set on input. */
+/* Unchanged on exit. */
+
+/* Y (input/output) COMPLEX*16 array, dimension at least */
+/* ( 1 + ( N - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. On exit, Y is overwritten by the updated */
+/* vector y. */
+
+/* INCY (input) INTEGER */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --y;
+ --x;
+ --ap;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 6;
+ } else if (*incy == 0) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("ZSPMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || alpha->r == 0. && alpha->i == 0. && (beta->r == 1. &&
+ beta->i == 0.)) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y. */
+
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+
+/* Start the operations. In this version the elements of the array AP */
+/* are accessed sequentially with one pass through AP. */
+
+/* First form y := beta*y. */
+
+ if (beta->r != 1. || beta->i != 0.) {
+ if (*incy == 1) {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ y[i__2].r = 0., y[i__2].i = 0.;
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ y[i__2].r = 0., y[i__2].i = 0.;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = iy;
+ z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (alpha->r == 0. && alpha->i == 0.) {
+ return 0;
+ }
+ kk = 1;
+ if (lsame_(uplo, "U")) {
+
+/* Form y when AP contains the upper triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ k = kk;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = k;
+ z__2.r = temp1.r * ap[i__5].r - temp1.i * ap[i__5].i,
+ z__2.i = temp1.r * ap[i__5].i + temp1.i * ap[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ i__3 = k;
+ i__4 = i__;
+ z__2.r = ap[i__3].r * x[i__4].r - ap[i__3].i * x[i__4].i,
+ z__2.i = ap[i__3].r * x[i__4].i + ap[i__3].i * x[
+ i__4].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ ++k;
+/* L50: */
+ }
+ i__2 = j;
+ i__3 = j;
+ i__4 = kk + j - 1;
+ z__3.r = temp1.r * ap[i__4].r - temp1.i * ap[i__4].i, z__3.i =
+ temp1.r * ap[i__4].i + temp1.i * ap[i__4].r;
+ z__2.r = y[i__3].r + z__3.r, z__2.i = y[i__3].i + z__3.i;
+ z__4.r = alpha->r * temp2.r - alpha->i * temp2.i, z__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ kk += j;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ ix = kx;
+ iy = ky;
+ i__2 = kk + j - 2;
+ for (k = kk; k <= i__2; ++k) {
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = k;
+ z__2.r = temp1.r * ap[i__5].r - temp1.i * ap[i__5].i,
+ z__2.i = temp1.r * ap[i__5].i + temp1.i * ap[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ i__3 = k;
+ i__4 = ix;
+ z__2.r = ap[i__3].r * x[i__4].r - ap[i__3].i * x[i__4].i,
+ z__2.i = ap[i__3].r * x[i__4].i + ap[i__3].i * x[
+ i__4].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ i__2 = jy;
+ i__3 = jy;
+ i__4 = kk + j - 1;
+ z__3.r = temp1.r * ap[i__4].r - temp1.i * ap[i__4].i, z__3.i =
+ temp1.r * ap[i__4].i + temp1.i * ap[i__4].r;
+ z__2.r = y[i__3].r + z__3.r, z__2.i = y[i__3].i + z__3.i;
+ z__4.r = alpha->r * temp2.r - alpha->i * temp2.i, z__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ jx += *incx;
+ jy += *incy;
+ kk += j;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y when AP contains the lower triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ i__2 = j;
+ i__3 = j;
+ i__4 = kk;
+ z__2.r = temp1.r * ap[i__4].r - temp1.i * ap[i__4].i, z__2.i =
+ temp1.r * ap[i__4].i + temp1.i * ap[i__4].r;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ k = kk + 1;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = k;
+ z__2.r = temp1.r * ap[i__5].r - temp1.i * ap[i__5].i,
+ z__2.i = temp1.r * ap[i__5].i + temp1.i * ap[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ i__3 = k;
+ i__4 = i__;
+ z__2.r = ap[i__3].r * x[i__4].r - ap[i__3].i * x[i__4].i,
+ z__2.i = ap[i__3].r * x[i__4].i + ap[i__3].i * x[
+ i__4].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ ++k;
+/* L90: */
+ }
+ i__2 = j;
+ i__3 = j;
+ z__2.r = alpha->r * temp2.r - alpha->i * temp2.i, z__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ kk += *n - j + 1;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ i__2 = jy;
+ i__3 = jy;
+ i__4 = kk;
+ z__2.r = temp1.r * ap[i__4].r - temp1.i * ap[i__4].i, z__2.i =
+ temp1.r * ap[i__4].i + temp1.i * ap[i__4].r;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ ix = jx;
+ iy = jy;
+ i__2 = kk + *n - j;
+ for (k = kk + 1; k <= i__2; ++k) {
+ ix += *incx;
+ iy += *incy;
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = k;
+ z__2.r = temp1.r * ap[i__5].r - temp1.i * ap[i__5].i,
+ z__2.i = temp1.r * ap[i__5].i + temp1.i * ap[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ i__3 = k;
+ i__4 = ix;
+ z__2.r = ap[i__3].r * x[i__4].r - ap[i__3].i * x[i__4].i,
+ z__2.i = ap[i__3].r * x[i__4].i + ap[i__3].i * x[
+ i__4].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L110: */
+ }
+ i__2 = jy;
+ i__3 = jy;
+ z__2.r = alpha->r * temp2.r - alpha->i * temp2.i, z__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ jx += *incx;
+ jy += *incy;
+ kk += *n - j + 1;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZSPMV */
+
+} /* zspmv_ */
diff --git a/contrib/libs/clapack/zspr.c b/contrib/libs/clapack/zspr.c
new file mode 100644
index 0000000000..da539cd52c
--- /dev/null
+++ b/contrib/libs/clapack/zspr.c
@@ -0,0 +1,339 @@
+/* zspr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zspr_(char *uplo, integer *n, doublecomplex *alpha,
+ doublecomplex *x, integer *incx, doublecomplex *ap)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2;
+
+ /* Local variables */
+ integer i__, j, k, kk, ix, jx, kx, info;
+ doublecomplex temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSPR performs the symmetric rank 1 operation */
+
+/* A := alpha*x*conjg( x' ) + A, */
+
+/* where alpha is a complex scalar, x is an n element vector and A is an */
+/* n by n symmetric matrix, supplied in packed form. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO (input) CHARACTER*1 */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the matrix A is supplied in the packed */
+/* array AP as follows: */
+
+/* UPLO = 'U' or 'u' The upper triangular part of A is */
+/* supplied in AP. */
+
+/* UPLO = 'L' or 'l' The lower triangular part of A is */
+/* supplied in AP. */
+
+/* Unchanged on exit. */
+
+/* N (input) INTEGER */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA (input) COMPLEX*16 */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X (input) COMPLEX*16 array, dimension at least */
+/* ( 1 + ( N - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the N- */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX (input) INTEGER */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* AP (input/output) COMPLEX*16 array, dimension at least */
+/* ( ( N*( N + 1 ) )/2 ). */
+/* Before entry, with UPLO = 'U' or 'u', the array AP must */
+/* contain the upper triangular part of the symmetric matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 ) */
+/* and a( 2, 2 ) respectively, and so on. On exit, the array */
+/* AP is overwritten by the upper triangular part of the */
+/* updated matrix. */
+/* Before entry, with UPLO = 'L' or 'l', the array AP must */
+/* contain the lower triangular part of the symmetric matrix */
+/* packed sequentially, column by column, so that AP( 1 ) */
+/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 ) */
+/* and a( 3, 1 ) respectively, and so on. On exit, the array */
+/* AP is overwritten by the lower triangular part of the */
+/* updated matrix. */
+/* Note that the imaginary parts of the diagonal elements need */
+/* not be set, they are assumed to be zero, and on exit they */
+/* are set to zero. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ }
+ if (info != 0) {
+ xerbla_("ZSPR ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || alpha->r == 0. && alpha->i == 0.) {
+ return 0;
+ }
+
+/* Set the start point in X if the increment is not unity. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of the array AP */
+/* are accessed sequentially with one pass through AP. */
+
+ kk = 1;
+ if (lsame_(uplo, "U")) {
+
+/* Form A when upper triangle is stored in AP. */
+
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ i__2 = j;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ k = kk;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = k;
+ i__4 = k;
+ i__5 = i__;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ z__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ z__1.r = ap[i__4].r + z__2.r, z__1.i = ap[i__4].i +
+ z__2.i;
+ ap[i__3].r = z__1.r, ap[i__3].i = z__1.i;
+ ++k;
+/* L10: */
+ }
+ i__2 = kk + j - 1;
+ i__3 = kk + j - 1;
+ i__4 = j;
+ z__2.r = x[i__4].r * temp.r - x[i__4].i * temp.i, z__2.i =
+ x[i__4].r * temp.i + x[i__4].i * temp.r;
+ z__1.r = ap[i__3].r + z__2.r, z__1.i = ap[i__3].i +
+ z__2.i;
+ ap[i__2].r = z__1.r, ap[i__2].i = z__1.i;
+ } else {
+ i__2 = kk + j - 1;
+ i__3 = kk + j - 1;
+ ap[i__2].r = ap[i__3].r, ap[i__2].i = ap[i__3].i;
+ }
+ kk += j;
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ i__2 = jx;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix = kx;
+ i__2 = kk + j - 2;
+ for (k = kk; k <= i__2; ++k) {
+ i__3 = k;
+ i__4 = k;
+ i__5 = ix;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ z__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ z__1.r = ap[i__4].r + z__2.r, z__1.i = ap[i__4].i +
+ z__2.i;
+ ap[i__3].r = z__1.r, ap[i__3].i = z__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ i__2 = kk + j - 1;
+ i__3 = kk + j - 1;
+ i__4 = jx;
+ z__2.r = x[i__4].r * temp.r - x[i__4].i * temp.i, z__2.i =
+ x[i__4].r * temp.i + x[i__4].i * temp.r;
+ z__1.r = ap[i__3].r + z__2.r, z__1.i = ap[i__3].i +
+ z__2.i;
+ ap[i__2].r = z__1.r, ap[i__2].i = z__1.i;
+ } else {
+ i__2 = kk + j - 1;
+ i__3 = kk + j - 1;
+ ap[i__2].r = ap[i__3].r, ap[i__2].i = ap[i__3].i;
+ }
+ jx += *incx;
+ kk += j;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when lower triangle is stored in AP. */
+
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ i__2 = j;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = kk;
+ i__3 = kk;
+ i__4 = j;
+ z__2.r = temp.r * x[i__4].r - temp.i * x[i__4].i, z__2.i =
+ temp.r * x[i__4].i + temp.i * x[i__4].r;
+ z__1.r = ap[i__3].r + z__2.r, z__1.i = ap[i__3].i +
+ z__2.i;
+ ap[i__2].r = z__1.r, ap[i__2].i = z__1.i;
+ k = kk + 1;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = k;
+ i__4 = k;
+ i__5 = i__;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ z__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ z__1.r = ap[i__4].r + z__2.r, z__1.i = ap[i__4].i +
+ z__2.i;
+ ap[i__3].r = z__1.r, ap[i__3].i = z__1.i;
+ ++k;
+/* L50: */
+ }
+ } else {
+ i__2 = kk;
+ i__3 = kk;
+ ap[i__2].r = ap[i__3].r, ap[i__2].i = ap[i__3].i;
+ }
+ kk = kk + *n - j + 1;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ i__2 = jx;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = kk;
+ i__3 = kk;
+ i__4 = jx;
+ z__2.r = temp.r * x[i__4].r - temp.i * x[i__4].i, z__2.i =
+ temp.r * x[i__4].i + temp.i * x[i__4].r;
+ z__1.r = ap[i__3].r + z__2.r, z__1.i = ap[i__3].i +
+ z__2.i;
+ ap[i__2].r = z__1.r, ap[i__2].i = z__1.i;
+ ix = jx;
+ i__2 = kk + *n - j;
+ for (k = kk + 1; k <= i__2; ++k) {
+ ix += *incx;
+ i__3 = k;
+ i__4 = k;
+ i__5 = ix;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ z__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ z__1.r = ap[i__4].r + z__2.r, z__1.i = ap[i__4].i +
+ z__2.i;
+ ap[i__3].r = z__1.r, ap[i__3].i = z__1.i;
+/* L70: */
+ }
+ } else {
+ i__2 = kk;
+ i__3 = kk;
+ ap[i__2].r = ap[i__3].r, ap[i__2].i = ap[i__3].i;
+ }
+ jx += *incx;
+ kk = kk + *n - j + 1;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZSPR */
+
+} /* zspr_ */
diff --git a/contrib/libs/clapack/zsprfs.c b/contrib/libs/clapack/zsprfs.c
new file mode 100644
index 0000000000..f322c3ac2b
--- /dev/null
+++ b/contrib/libs/clapack/zsprfs.c
@@ -0,0 +1,464 @@
+/* zsprfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zsprfs_(char *uplo, integer *n, integer *nrhs,
+ doublecomplex *ap, doublecomplex *afp, integer *ipiv, doublecomplex *
+ b, integer *ldb, doublecomplex *x, integer *ldx, doublereal *ferr,
+ doublereal *berr, doublecomplex *work, doublereal *rwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal s;
+ integer ik, kk;
+ doublereal xk;
+ integer nz;
+ doublereal eps;
+ integer kase;
+ doublereal safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3], count;
+ logical upper;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zaxpy_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), zspmv_(
+ char *, integer *, doublecomplex *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), zlacn2_(integer *, doublecomplex *,
+ doublecomplex *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal lstres;
+ extern /* Subroutine */ int zsptrs_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSPRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is symmetric indefinite */
+/* and packed, and provides error bounds and backward error estimates */
+/* for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AP (input) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the symmetric matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* AFP (input) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* The factored form of the matrix A. AFP contains the block */
+/* diagonal matrix D and the multipliers used to obtain the */
+/* factor U or L from the factorization A = U*D*U**T or */
+/* A = L*D*L**T as computed by ZSPTRF, stored as a packed */
+/* triangular matrix. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by ZSPTRF. */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by ZSPTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --afp;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*ldx < max(1,*n)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZSPRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X */
+
+ zcopy_(n, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+ z__1.r = -1., z__1.i = -0.;
+ zspmv_(uplo, n, &z__1, &ap[1], &x[j * x_dim1 + 1], &c__1, &c_b1, &
+ work[1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[i__] = (d__1 = b[i__3].r, abs(d__1)) + (d__2 = d_imag(&b[
+ i__ + j * b_dim1]), abs(d__2));
+/* L30: */
+ }
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ kk = 1;
+ if (upper) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ i__3 = k + j * x_dim1;
+ xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[k + j *
+ x_dim1]), abs(d__2));
+ ik = kk;
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = ik;
+ rwork[i__] += ((d__1 = ap[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&ap[ik]), abs(d__2))) * xk;
+ i__4 = ik;
+ i__5 = i__ + j * x_dim1;
+ s += ((d__1 = ap[i__4].r, abs(d__1)) + (d__2 = d_imag(&ap[
+ ik]), abs(d__2))) * ((d__3 = x[i__5].r, abs(d__3))
+ + (d__4 = d_imag(&x[i__ + j * x_dim1]), abs(d__4)
+ ));
+ ++ik;
+/* L40: */
+ }
+ i__3 = kk + k - 1;
+ rwork[k] = rwork[k] + ((d__1 = ap[i__3].r, abs(d__1)) + (d__2
+ = d_imag(&ap[kk + k - 1]), abs(d__2))) * xk + s;
+ kk += k;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ i__3 = k + j * x_dim1;
+ xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[k + j *
+ x_dim1]), abs(d__2));
+ i__3 = kk;
+ rwork[k] += ((d__1 = ap[i__3].r, abs(d__1)) + (d__2 = d_imag(&
+ ap[kk]), abs(d__2))) * xk;
+ ik = kk + 1;
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ i__4 = ik;
+ rwork[i__] += ((d__1 = ap[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&ap[ik]), abs(d__2))) * xk;
+ i__4 = ik;
+ i__5 = i__ + j * x_dim1;
+ s += ((d__1 = ap[i__4].r, abs(d__1)) + (d__2 = d_imag(&ap[
+ ik]), abs(d__2))) * ((d__3 = x[i__5].r, abs(d__3))
+ + (d__4 = d_imag(&x[i__ + j * x_dim1]), abs(d__4)
+ ));
+ ++ik;
+/* L60: */
+ }
+ rwork[k] += s;
+ kk += *n - k + 1;
+/* L70: */
+ }
+ }
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__3 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2))) / rwork[i__];
+ s = max(d__3,d__4);
+ } else {
+/* Computing MAX */
+ i__3 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = max(d__3,d__4);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2. <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ zsptrs_(uplo, n, &c__1, &afp[1], &ipiv[1], &work[1], n, info);
+ zaxpy_(n, &c_b1, &work[1], &c__1, &x[j * x_dim1 + 1], &c__1);
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use ZLACN2 to estimate the infinity-norm of the matrix */
+/* inv(A) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__3 = i__;
+ rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ ;
+ } else {
+ i__3 = i__;
+ rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ zlacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(A'). */
+
+ zsptrs_(uplo, n, &c__1, &afp[1], &ipiv[1], &work[1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ z__1.r = rwork[i__4] * work[i__5].r, z__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+/* L110: */
+ }
+ } else if (kase == 2) {
+
+/* Multiply by inv(A)*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ z__1.r = rwork[i__4] * work[i__5].r, z__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+/* L120: */
+ }
+ zsptrs_(uplo, n, &c__1, &afp[1], &ipiv[1], &work[1], n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * x_dim1;
+ d__3 = lstres, d__4 = (d__1 = x[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&x[i__ + j * x_dim1]), abs(d__2));
+ lstres = max(d__3,d__4);
+/* L130: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of ZSPRFS */
+
+} /* zsprfs_ */
diff --git a/contrib/libs/clapack/zspsv.c b/contrib/libs/clapack/zspsv.c
new file mode 100644
index 0000000000..1794a155b5
--- /dev/null
+++ b/contrib/libs/clapack/zspsv.c
@@ -0,0 +1,177 @@
+/* zspsv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zspsv_(char *uplo, integer *n, integer *nrhs,
+ doublecomplex *ap, integer *ipiv, doublecomplex *b, integer *ldb,
+ integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), zsptrf_(
+ char *, integer *, doublecomplex *, integer *, integer *),
+ zsptrs_(char *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSPSV computes the solution to a complex system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N symmetric matrix stored in packed format and X */
+/* and B are N-by-NRHS matrices. */
+
+/* The diagonal pivoting method is used to factor A as */
+/* A = U * D * U**T, if UPLO = 'U', or */
+/* A = L * D * L**T, if UPLO = 'L', */
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, D is symmetric and block diagonal with 1-by-1 */
+/* and 2-by-2 diagonal blocks. The factored form of A is then used to */
+/* solve the system of equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AP (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. */
+
+/* On exit, the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**T or A = L*D*L**T as computed by ZSPTRF, stored as */
+/* a packed triangular matrix in the same storage format as A. */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D, as */
+/* determined by ZSPTRF. If IPIV(k) > 0, then rows and columns */
+/* k and IPIV(k) were interchanged, and D(k,k) is a 1-by-1 */
+/* diagonal block. If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, */
+/* then rows and columns k-1 and -IPIV(k) were interchanged and */
+/* D(k-1:k,k-1:k) is a 2-by-2 diagonal block. If UPLO = 'L' and */
+/* IPIV(k) = IPIV(k+1) < 0, then rows and columns k+1 and */
+/* -IPIV(k) were interchanged and D(k:k+1,k:k+1) is a 2-by-2 */
+/* diagonal block. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, so the solution could not be */
+/* computed. */
+
+/* Further Details */
+/* =============== */
+
+/* The packed storage scheme is illustrated by the following example */
+/* when N = 4, UPLO = 'U': */
+
+/* Two-dimensional storage of the symmetric matrix A: */
+
+/* a11 a12 a13 a14 */
+/* a22 a23 a24 */
+/* a33 a34 (aij = aji) */
+/* a44 */
+
+/* Packed storage of the upper triangle of A: */
+
+/* AP = [ a11, a12, a22, a13, a23, a33, a14, a24, a34, a44 ] */
+
+/* ===================================================================== */
+
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZSPSV ", &i__1);
+ return 0;
+ }
+
+/* Compute the factorization A = U*D*U' or A = L*D*L'. */
+
+ zsptrf_(uplo, n, &ap[1], &ipiv[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ zsptrs_(uplo, n, nrhs, &ap[1], &ipiv[1], &b[b_offset], ldb, info);
+
+ }
+ return 0;
+
+/* End of ZSPSV */
+
+} /* zspsv_ */
diff --git a/contrib/libs/clapack/zspsvx.c b/contrib/libs/clapack/zspsvx.c
new file mode 100644
index 0000000000..b8c3d86b4d
--- /dev/null
+++ b/contrib/libs/clapack/zspsvx.c
@@ -0,0 +1,326 @@
+/* zspsvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zspsvx_(char *fact, char *uplo, integer *n, integer *
+ nrhs, doublecomplex *ap, doublecomplex *afp, integer *ipiv,
+ doublecomplex *b, integer *ldb, doublecomplex *x, integer *ldx,
+ doublereal *rcond, doublereal *ferr, doublereal *berr, doublecomplex *
+ work, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ doublereal anorm;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ extern doublereal dlamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int xerbla_(char *, integer *), zlacpy_(
+ char *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ extern doublereal zlansp_(char *, char *, integer *, doublecomplex *,
+ doublereal *);
+ extern /* Subroutine */ int zspcon_(char *, integer *, doublecomplex *,
+ integer *, doublereal *, doublereal *, doublecomplex *, integer *), zsprfs_(char *, integer *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublereal *, doublereal *,
+ doublecomplex *, doublereal *, integer *), zsptrf_(char *,
+ integer *, doublecomplex *, integer *, integer *),
+ zsptrs_(char *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSPSVX uses the diagonal pivoting factorization A = U*D*U**T or */
+/* A = L*D*L**T to compute the solution to a complex system of linear */
+/* equations A * X = B, where A is an N-by-N symmetric matrix stored */
+/* in packed format and X and B are N-by-NRHS matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'N', the diagonal pivoting method is used to factor A as */
+/* A = U * D * U**T, if UPLO = 'U', or */
+/* A = L * D * L**T, if UPLO = 'L', */
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices and D is symmetric and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* 2. If some D(i,i)=0, so that D is exactly singular, then the routine */
+/* returns with INFO = i. Otherwise, the factored form of A is used */
+/* to estimate the condition number of the matrix A. If the */
+/* reciprocal of the condition number is less than machine precision, */
+/* INFO = N+1 is returned as a warning, but the routine still goes on */
+/* to solve for X and compute error bounds as described below. */
+
+/* 3. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 4. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of A has been */
+/* supplied on entry. */
+/* = 'F': On entry, AFP and IPIV contain the factored form */
+/* of A. AP, AFP and IPIV will not be modified. */
+/* = 'N': The matrix A will be copied to AFP and factored. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AP (input) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* The upper or lower triangle of the symmetric matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. */
+
+/* AFP (input or output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* If FACT = 'F', then AFP is an input argument and on entry */
+/* contains the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**T or A = L*D*L**T as computed by ZSPTRF, stored as */
+/* a packed triangular matrix in the same storage format as A. */
+
+/* If FACT = 'N', then AFP is an output argument and on exit */
+/* contains the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**T or A = L*D*L**T as computed by ZSPTRF, stored as */
+/* a packed triangular matrix in the same storage format as A. */
+
+/* IPIV (input or output) INTEGER array, dimension (N) */
+/* If FACT = 'F', then IPIV is an input argument and on entry */
+/* contains details of the interchanges and the block structure */
+/* of D, as determined by ZSPTRF. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* If FACT = 'N', then IPIV is an output argument and on exit */
+/* contains details of the interchanges and the block structure */
+/* of D, as determined by ZSPTRF. */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* The N-by-NRHS right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A. If RCOND is less than the machine precision (in */
+/* particular, if RCOND = 0), the matrix is singular to working */
+/* precision. This condition is indicated by a return code of */
+/* INFO > 0. */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: D(i,i) is exactly zero. The factorization */
+/* has been completed but the factor D is exactly */
+/* singular, so the solution and error bounds could */
+/* not be computed. RCOND = 0 is returned. */
+/* = N+1: D is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* Further Details */
+/* =============== */
+
+/* The packed storage scheme is illustrated by the following example */
+/* when N = 4, UPLO = 'U': */
+
+/* Two-dimensional storage of the symmetric matrix A: */
+
+/* a11 a12 a13 a14 */
+/* a22 a23 a24 */
+/* a33 a34 (aij = aji) */
+/* a44 */
+
+/* Packed storage of the upper triangle of A: */
+
+/* AP = [ a11, a12, a22, a13, a23, a33, a14, a24, a34, a44 ] */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ --afp;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ if (! nofact && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! lsame_(uplo, "U") && ! lsame_(uplo,
+ "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldx < max(1,*n)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZSPSVX", &i__1);
+ return 0;
+ }
+
+ if (nofact) {
+
+/* Compute the factorization A = U*D*U' or A = L*D*L'. */
+
+ i__1 = *n * (*n + 1) / 2;
+ zcopy_(&i__1, &ap[1], &c__1, &afp[1], &c__1);
+ zsptrf_(uplo, n, &afp[1], &ipiv[1], info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = zlansp_("I", uplo, n, &ap[1], &rwork[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ zspcon_(uplo, n, &afp[1], &ipiv[1], &anorm, rcond, &work[1], info);
+
+/* Compute the solution vectors X. */
+
+ zlacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ zsptrs_(uplo, n, nrhs, &afp[1], &ipiv[1], &x[x_offset], ldx, info);
+
+/* Use iterative refinement to improve the computed solutions and */
+/* compute error bounds and backward error estimates for them. */
+
+ zsprfs_(uplo, n, nrhs, &ap[1], &afp[1], &ipiv[1], &b[b_offset], ldb, &x[
+ x_offset], ldx, &ferr[1], &berr[1], &work[1], &rwork[1], info);
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < dlamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ return 0;
+
+/* End of ZSPSVX */
+
+} /* zspsvx_ */
diff --git a/contrib/libs/clapack/zsptrf.c b/contrib/libs/clapack/zsptrf.c
new file mode 100644
index 0000000000..4482665826
--- /dev/null
+++ b/contrib/libs/clapack/zsptrf.c
@@ -0,0 +1,763 @@
+/* zsptrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zsptrf_(char *uplo, integer *n, doublecomplex *ap,
+ integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4, i__5, i__6;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_imag(doublecomplex *);
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublecomplex t, r1, d11, d12, d21, d22;
+ integer kc, kk, kp;
+ doublecomplex wk;
+ integer kx, knc, kpc, npp;
+ doublecomplex wkm1, wkp1;
+ integer imax, jmax;
+ extern /* Subroutine */ int zspr_(char *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *);
+ doublereal alpha;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *);
+ integer kstep;
+ logical upper;
+ extern /* Subroutine */ int zswap_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ doublereal absakk;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal colmax;
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ doublereal rowmax;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSPTRF computes the factorization of a complex symmetric matrix A */
+/* stored in packed format using the Bunch-Kaufman diagonal pivoting */
+/* method: */
+
+/* A = U*D*U**T or A = L*D*L**T */
+
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, and D is symmetric and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangle of the symmetric matrix */
+/* A, packed columnwise in a linear array. The j-th column of A */
+/* is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* On exit, the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L, stored as a packed triangular */
+/* matrix overwriting A (see below for further details). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, and division by zero will occur if it */
+/* is used to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* 5-96 - Based on modifications by J. Lewis, Boeing Computer Services */
+/* Company */
+
+/* If UPLO = 'U', then A = U*D*U', where */
+/* U = P(n)*U(n)* ... *P(k)U(k)* ..., */
+/* i.e., U is a product of terms P(k)*U(k), where k decreases from n to */
+/* 1 in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and U(k) is a unit upper triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I v 0 ) k-s */
+/* U(k) = ( 0 I 0 ) s */
+/* ( 0 0 I ) n-k */
+/* k-s s n-k */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(1:k-1,k). */
+/* If s = 2, the upper triangle of D(k) overwrites A(k-1,k-1), A(k-1,k), */
+/* and A(k,k), and v overwrites A(1:k-2,k-1:k). */
+
+/* If UPLO = 'L', then A = L*D*L', where */
+/* L = P(1)*L(1)* ... *P(k)*L(k)* ..., */
+/* i.e., L is a product of terms P(k)*L(k), where k increases from 1 to */
+/* n in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and L(k) is a unit lower triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I 0 0 ) k-1 */
+/* L(k) = ( 0 I 0 ) s */
+/* ( 0 v I ) n-k-s+1 */
+/* k-1 s n-k-s+1 */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(k+1:n,k). */
+/* If s = 2, the lower triangle of D(k) overwrites A(k,k), A(k+1,k), */
+/* and A(k+1,k+1), and v overwrites A(k+2:n,k:k+1). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ipiv;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZSPTRF", &i__1);
+ return 0;
+ }
+
+/* Initialize ALPHA for use in choosing pivot block size. */
+
+ alpha = (sqrt(17.) + 1.) / 8.;
+
+ if (upper) {
+
+/* Factorize A as U*D*U' using the upper triangle of A */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2 */
+
+ k = *n;
+ kc = (*n - 1) * *n / 2 + 1;
+L10:
+ knc = kc;
+
+/* If K < 1, exit from loop */
+
+ if (k < 1) {
+ goto L110;
+ }
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ i__1 = kc + k - 1;
+ absakk = (d__1 = ap[i__1].r, abs(d__1)) + (d__2 = d_imag(&ap[kc + k -
+ 1]), abs(d__2));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ imax = izamax_(&i__1, &ap[kc], &c__1);
+ i__1 = kc + imax - 1;
+ colmax = (d__1 = ap[i__1].r, abs(d__1)) + (d__2 = d_imag(&ap[kc +
+ imax - 1]), abs(d__2));
+ } else {
+ colmax = 0.;
+ }
+
+ if (max(absakk,colmax) == 0.) {
+
+/* Column K is zero: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ rowmax = 0.;
+ jmax = imax;
+ kx = imax * (imax + 1) / 2 + imax;
+ i__1 = k;
+ for (j = imax + 1; j <= i__1; ++j) {
+ i__2 = kx;
+ if ((d__1 = ap[i__2].r, abs(d__1)) + (d__2 = d_imag(&ap[
+ kx]), abs(d__2)) > rowmax) {
+ i__2 = kx;
+ rowmax = (d__1 = ap[i__2].r, abs(d__1)) + (d__2 =
+ d_imag(&ap[kx]), abs(d__2));
+ jmax = j;
+ }
+ kx += j;
+/* L20: */
+ }
+ kpc = (imax - 1) * imax / 2 + 1;
+ if (imax > 1) {
+ i__1 = imax - 1;
+ jmax = izamax_(&i__1, &ap[kpc], &c__1);
+/* Computing MAX */
+ i__1 = kpc + jmax - 1;
+ d__3 = rowmax, d__4 = (d__1 = ap[i__1].r, abs(d__1)) + (
+ d__2 = d_imag(&ap[kpc + jmax - 1]), abs(d__2));
+ rowmax = max(d__3,d__4);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else /* if(complicated condition) */ {
+ i__1 = kpc + imax - 1;
+ if ((d__1 = ap[i__1].r, abs(d__1)) + (d__2 = d_imag(&ap[
+ kpc + imax - 1]), abs(d__2)) >= alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+ } else {
+
+/* interchange rows and columns K-1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+ }
+
+ kk = k - kstep + 1;
+ if (kstep == 2) {
+ knc = knc - k + 1;
+ }
+ if (kp != kk) {
+
+/* Interchange rows and columns KK and KP in the leading */
+/* submatrix A(1:k,1:k) */
+
+ i__1 = kp - 1;
+ zswap_(&i__1, &ap[knc], &c__1, &ap[kpc], &c__1);
+ kx = kpc + kp - 1;
+ i__1 = kk - 1;
+ for (j = kp + 1; j <= i__1; ++j) {
+ kx = kx + j - 1;
+ i__2 = knc + j - 1;
+ t.r = ap[i__2].r, t.i = ap[i__2].i;
+ i__2 = knc + j - 1;
+ i__3 = kx;
+ ap[i__2].r = ap[i__3].r, ap[i__2].i = ap[i__3].i;
+ i__2 = kx;
+ ap[i__2].r = t.r, ap[i__2].i = t.i;
+/* L30: */
+ }
+ i__1 = knc + kk - 1;
+ t.r = ap[i__1].r, t.i = ap[i__1].i;
+ i__1 = knc + kk - 1;
+ i__2 = kpc + kp - 1;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kpc + kp - 1;
+ ap[i__1].r = t.r, ap[i__1].i = t.i;
+ if (kstep == 2) {
+ i__1 = kc + k - 2;
+ t.r = ap[i__1].r, t.i = ap[i__1].i;
+ i__1 = kc + k - 2;
+ i__2 = kc + kp - 1;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kc + kp - 1;
+ ap[i__1].r = t.r, ap[i__1].i = t.i;
+ }
+ }
+
+/* Update the leading submatrix */
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k now holds */
+
+/* W(k) = U(k)*D(k) */
+
+/* where U(k) is the k-th column of U */
+
+/* Perform a rank-1 update of A(1:k-1,1:k-1) as */
+
+/* A := A - U(k)*D(k)*U(k)' = A - W(k)*1/D(k)*W(k)' */
+
+ z_div(&z__1, &c_b1, &ap[kc + k - 1]);
+ r1.r = z__1.r, r1.i = z__1.i;
+ i__1 = k - 1;
+ z__1.r = -r1.r, z__1.i = -r1.i;
+ zspr_(uplo, &i__1, &z__1, &ap[kc], &c__1, &ap[1]);
+
+/* Store U(k) in column k */
+
+ i__1 = k - 1;
+ zscal_(&i__1, &r1, &ap[kc], &c__1);
+ } else {
+
+/* 2-by-2 pivot block D(k): columns k and k-1 now hold */
+
+/* ( W(k-1) W(k) ) = ( U(k-1) U(k) )*D(k) */
+
+/* where U(k) and U(k-1) are the k-th and (k-1)-th columns */
+/* of U */
+
+/* Perform a rank-2 update of A(1:k-2,1:k-2) as */
+
+/* A := A - ( U(k-1) U(k) )*D(k)*( U(k-1) U(k) )' */
+/* = A - ( W(k-1) W(k) )*inv(D(k))*( W(k-1) W(k) )' */
+
+ if (k > 2) {
+
+ i__1 = k - 1 + (k - 1) * k / 2;
+ d12.r = ap[i__1].r, d12.i = ap[i__1].i;
+ z_div(&z__1, &ap[k - 1 + (k - 2) * (k - 1) / 2], &d12);
+ d22.r = z__1.r, d22.i = z__1.i;
+ z_div(&z__1, &ap[k + (k - 1) * k / 2], &d12);
+ d11.r = z__1.r, d11.i = z__1.i;
+ z__3.r = d11.r * d22.r - d11.i * d22.i, z__3.i = d11.r *
+ d22.i + d11.i * d22.r;
+ z__2.r = z__3.r - 1., z__2.i = z__3.i - 0.;
+ z_div(&z__1, &c_b1, &z__2);
+ t.r = z__1.r, t.i = z__1.i;
+ z_div(&z__1, &t, &d12);
+ d12.r = z__1.r, d12.i = z__1.i;
+
+ for (j = k - 2; j >= 1; --j) {
+ i__1 = j + (k - 2) * (k - 1) / 2;
+ z__3.r = d11.r * ap[i__1].r - d11.i * ap[i__1].i,
+ z__3.i = d11.r * ap[i__1].i + d11.i * ap[i__1]
+ .r;
+ i__2 = j + (k - 1) * k / 2;
+ z__2.r = z__3.r - ap[i__2].r, z__2.i = z__3.i - ap[
+ i__2].i;
+ z__1.r = d12.r * z__2.r - d12.i * z__2.i, z__1.i =
+ d12.r * z__2.i + d12.i * z__2.r;
+ wkm1.r = z__1.r, wkm1.i = z__1.i;
+ i__1 = j + (k - 1) * k / 2;
+ z__3.r = d22.r * ap[i__1].r - d22.i * ap[i__1].i,
+ z__3.i = d22.r * ap[i__1].i + d22.i * ap[i__1]
+ .r;
+ i__2 = j + (k - 2) * (k - 1) / 2;
+ z__2.r = z__3.r - ap[i__2].r, z__2.i = z__3.i - ap[
+ i__2].i;
+ z__1.r = d12.r * z__2.r - d12.i * z__2.i, z__1.i =
+ d12.r * z__2.i + d12.i * z__2.r;
+ wk.r = z__1.r, wk.i = z__1.i;
+ for (i__ = j; i__ >= 1; --i__) {
+ i__1 = i__ + (j - 1) * j / 2;
+ i__2 = i__ + (j - 1) * j / 2;
+ i__3 = i__ + (k - 1) * k / 2;
+ z__3.r = ap[i__3].r * wk.r - ap[i__3].i * wk.i,
+ z__3.i = ap[i__3].r * wk.i + ap[i__3].i *
+ wk.r;
+ z__2.r = ap[i__2].r - z__3.r, z__2.i = ap[i__2].i
+ - z__3.i;
+ i__4 = i__ + (k - 2) * (k - 1) / 2;
+ z__4.r = ap[i__4].r * wkm1.r - ap[i__4].i *
+ wkm1.i, z__4.i = ap[i__4].r * wkm1.i + ap[
+ i__4].i * wkm1.r;
+ z__1.r = z__2.r - z__4.r, z__1.i = z__2.i -
+ z__4.i;
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+/* L40: */
+ }
+ i__1 = j + (k - 1) * k / 2;
+ ap[i__1].r = wk.r, ap[i__1].i = wk.i;
+ i__1 = j + (k - 2) * (k - 1) / 2;
+ ap[i__1].r = wkm1.r, ap[i__1].i = wkm1.i;
+/* L50: */
+ }
+
+ }
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k - 1] = -kp;
+ }
+
+/* Decrease K and return to the start of the main loop */
+
+ k -= kstep;
+ kc = knc - k;
+ goto L10;
+
+ } else {
+
+/* Factorize A as L*D*L' using the lower triangle of A */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2 */
+
+ k = 1;
+ kc = 1;
+ npp = *n * (*n + 1) / 2;
+L60:
+ knc = kc;
+
+/* If K > N, exit from loop */
+
+ if (k > *n) {
+ goto L110;
+ }
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ i__1 = kc;
+ absakk = (d__1 = ap[i__1].r, abs(d__1)) + (d__2 = d_imag(&ap[kc]),
+ abs(d__2));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ imax = k + izamax_(&i__1, &ap[kc + 1], &c__1);
+ i__1 = kc + imax - k;
+ colmax = (d__1 = ap[i__1].r, abs(d__1)) + (d__2 = d_imag(&ap[kc +
+ imax - k]), abs(d__2));
+ } else {
+ colmax = 0.;
+ }
+
+ if (max(absakk,colmax) == 0.) {
+
+/* Column K is zero: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ rowmax = 0.;
+ kx = kc + imax - k;
+ i__1 = imax - 1;
+ for (j = k; j <= i__1; ++j) {
+ i__2 = kx;
+ if ((d__1 = ap[i__2].r, abs(d__1)) + (d__2 = d_imag(&ap[
+ kx]), abs(d__2)) > rowmax) {
+ i__2 = kx;
+ rowmax = (d__1 = ap[i__2].r, abs(d__1)) + (d__2 =
+ d_imag(&ap[kx]), abs(d__2));
+ jmax = j;
+ }
+ kx = kx + *n - j;
+/* L70: */
+ }
+ kpc = npp - (*n - imax + 1) * (*n - imax + 2) / 2 + 1;
+ if (imax < *n) {
+ i__1 = *n - imax;
+ jmax = imax + izamax_(&i__1, &ap[kpc + 1], &c__1);
+/* Computing MAX */
+ i__1 = kpc + jmax - imax;
+ d__3 = rowmax, d__4 = (d__1 = ap[i__1].r, abs(d__1)) + (
+ d__2 = d_imag(&ap[kpc + jmax - imax]), abs(d__2));
+ rowmax = max(d__3,d__4);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else /* if(complicated condition) */ {
+ i__1 = kpc;
+ if ((d__1 = ap[i__1].r, abs(d__1)) + (d__2 = d_imag(&ap[
+ kpc]), abs(d__2)) >= alpha * rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+ } else {
+
+/* interchange rows and columns K+1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+ }
+
+ kk = k + kstep - 1;
+ if (kstep == 2) {
+ knc = knc + *n - k + 1;
+ }
+ if (kp != kk) {
+
+/* Interchange rows and columns KK and KP in the trailing */
+/* submatrix A(k:n,k:n) */
+
+ if (kp < *n) {
+ i__1 = *n - kp;
+ zswap_(&i__1, &ap[knc + kp - kk + 1], &c__1, &ap[kpc + 1],
+ &c__1);
+ }
+ kx = knc + kp - kk;
+ i__1 = kp - 1;
+ for (j = kk + 1; j <= i__1; ++j) {
+ kx = kx + *n - j + 1;
+ i__2 = knc + j - kk;
+ t.r = ap[i__2].r, t.i = ap[i__2].i;
+ i__2 = knc + j - kk;
+ i__3 = kx;
+ ap[i__2].r = ap[i__3].r, ap[i__2].i = ap[i__3].i;
+ i__2 = kx;
+ ap[i__2].r = t.r, ap[i__2].i = t.i;
+/* L80: */
+ }
+ i__1 = knc;
+ t.r = ap[i__1].r, t.i = ap[i__1].i;
+ i__1 = knc;
+ i__2 = kpc;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kpc;
+ ap[i__1].r = t.r, ap[i__1].i = t.i;
+ if (kstep == 2) {
+ i__1 = kc + 1;
+ t.r = ap[i__1].r, t.i = ap[i__1].i;
+ i__1 = kc + 1;
+ i__2 = kc + kp - k;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kc + kp - k;
+ ap[i__1].r = t.r, ap[i__1].i = t.i;
+ }
+ }
+
+/* Update the trailing submatrix */
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k now holds */
+
+/* W(k) = L(k)*D(k) */
+
+/* where L(k) is the k-th column of L */
+
+ if (k < *n) {
+
+/* Perform a rank-1 update of A(k+1:n,k+1:n) as */
+
+/* A := A - L(k)*D(k)*L(k)' = A - W(k)*(1/D(k))*W(k)' */
+
+ z_div(&z__1, &c_b1, &ap[kc]);
+ r1.r = z__1.r, r1.i = z__1.i;
+ i__1 = *n - k;
+ z__1.r = -r1.r, z__1.i = -r1.i;
+ zspr_(uplo, &i__1, &z__1, &ap[kc + 1], &c__1, &ap[kc + *n
+ - k + 1]);
+
+/* Store L(k) in column K */
+
+ i__1 = *n - k;
+ zscal_(&i__1, &r1, &ap[kc + 1], &c__1);
+ }
+ } else {
+
+/* 2-by-2 pivot block D(k): columns K and K+1 now hold */
+
+/* ( W(k) W(k+1) ) = ( L(k) L(k+1) )*D(k) */
+
+/* where L(k) and L(k+1) are the k-th and (k+1)-th columns */
+/* of L */
+
+ if (k < *n - 1) {
+
+/* Perform a rank-2 update of A(k+2:n,k+2:n) as */
+
+/* A := A - ( L(k) L(k+1) )*D(k)*( L(k) L(k+1) )' */
+/* = A - ( W(k) W(k+1) )*inv(D(k))*( W(k) W(k+1) )' */
+
+/* where L(k) and L(k+1) are the k-th and (k+1)-th */
+/* columns of L */
+
+ i__1 = k + 1 + (k - 1) * ((*n << 1) - k) / 2;
+ d21.r = ap[i__1].r, d21.i = ap[i__1].i;
+ z_div(&z__1, &ap[k + 1 + k * ((*n << 1) - k - 1) / 2], &
+ d21);
+ d11.r = z__1.r, d11.i = z__1.i;
+ z_div(&z__1, &ap[k + (k - 1) * ((*n << 1) - k) / 2], &d21)
+ ;
+ d22.r = z__1.r, d22.i = z__1.i;
+ z__3.r = d11.r * d22.r - d11.i * d22.i, z__3.i = d11.r *
+ d22.i + d11.i * d22.r;
+ z__2.r = z__3.r - 1., z__2.i = z__3.i - 0.;
+ z_div(&z__1, &c_b1, &z__2);
+ t.r = z__1.r, t.i = z__1.i;
+ z_div(&z__1, &t, &d21);
+ d21.r = z__1.r, d21.i = z__1.i;
+
+ i__1 = *n;
+ for (j = k + 2; j <= i__1; ++j) {
+ i__2 = j + (k - 1) * ((*n << 1) - k) / 2;
+ z__3.r = d11.r * ap[i__2].r - d11.i * ap[i__2].i,
+ z__3.i = d11.r * ap[i__2].i + d11.i * ap[i__2]
+ .r;
+ i__3 = j + k * ((*n << 1) - k - 1) / 2;
+ z__2.r = z__3.r - ap[i__3].r, z__2.i = z__3.i - ap[
+ i__3].i;
+ z__1.r = d21.r * z__2.r - d21.i * z__2.i, z__1.i =
+ d21.r * z__2.i + d21.i * z__2.r;
+ wk.r = z__1.r, wk.i = z__1.i;
+ i__2 = j + k * ((*n << 1) - k - 1) / 2;
+ z__3.r = d22.r * ap[i__2].r - d22.i * ap[i__2].i,
+ z__3.i = d22.r * ap[i__2].i + d22.i * ap[i__2]
+ .r;
+ i__3 = j + (k - 1) * ((*n << 1) - k) / 2;
+ z__2.r = z__3.r - ap[i__3].r, z__2.i = z__3.i - ap[
+ i__3].i;
+ z__1.r = d21.r * z__2.r - d21.i * z__2.i, z__1.i =
+ d21.r * z__2.i + d21.i * z__2.r;
+ wkp1.r = z__1.r, wkp1.i = z__1.i;
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + (j - 1) * ((*n << 1) - j) / 2;
+ i__4 = i__ + (j - 1) * ((*n << 1) - j) / 2;
+ i__5 = i__ + (k - 1) * ((*n << 1) - k) / 2;
+ z__3.r = ap[i__5].r * wk.r - ap[i__5].i * wk.i,
+ z__3.i = ap[i__5].r * wk.i + ap[i__5].i *
+ wk.r;
+ z__2.r = ap[i__4].r - z__3.r, z__2.i = ap[i__4].i
+ - z__3.i;
+ i__6 = i__ + k * ((*n << 1) - k - 1) / 2;
+ z__4.r = ap[i__6].r * wkp1.r - ap[i__6].i *
+ wkp1.i, z__4.i = ap[i__6].r * wkp1.i + ap[
+ i__6].i * wkp1.r;
+ z__1.r = z__2.r - z__4.r, z__1.i = z__2.i -
+ z__4.i;
+ ap[i__3].r = z__1.r, ap[i__3].i = z__1.i;
+/* L90: */
+ }
+ i__2 = j + (k - 1) * ((*n << 1) - k) / 2;
+ ap[i__2].r = wk.r, ap[i__2].i = wk.i;
+ i__2 = j + k * ((*n << 1) - k - 1) / 2;
+ ap[i__2].r = wkp1.r, ap[i__2].i = wkp1.i;
+/* L100: */
+ }
+ }
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k + 1] = -kp;
+ }
+
+/* Increase K and return to the start of the main loop */
+
+ k += kstep;
+ kc = knc + *n - k + 2;
+ goto L60;
+
+ }
+
+L110:
+ return 0;
+
+/* End of ZSPTRF */
+
+} /* zsptrf_ */
diff --git a/contrib/libs/clapack/zsptri.c b/contrib/libs/clapack/zsptri.c
new file mode 100644
index 0000000000..82a3cfcacb
--- /dev/null
+++ b/contrib/libs/clapack/zsptri.c
@@ -0,0 +1,508 @@
+/* zsptri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static doublecomplex c_b2 = {0.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zsptri_(char *uplo, integer *n, doublecomplex *ap,
+ integer *ipiv, doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ doublecomplex d__;
+ integer j, k;
+ doublecomplex t, ak;
+ integer kc, kp, kx, kpc, npp;
+ doublecomplex akp1, temp, akkp1;
+ extern logical lsame_(char *, char *);
+ integer kstep;
+ logical upper;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ extern /* Double Complex */ VOID zdotu_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ extern /* Subroutine */ int zswap_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zspmv_(char *, integer *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, doublecomplex *, integer *), xerbla_(
+ char *, integer *);
+ integer kcnext;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSPTRI computes the inverse of a complex symmetric indefinite matrix */
+/* A in packed storage using the factorization A = U*D*U**T or */
+/* A = L*D*L**T computed by ZSPTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**T; */
+/* = 'L': Lower triangular, form is A = L*D*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* On entry, the block diagonal matrix D and the multipliers */
+/* used to obtain the factor U or L as computed by ZSPTRF, */
+/* stored as a packed triangular matrix. */
+
+/* On exit, if INFO = 0, the (symmetric) inverse of the original */
+/* matrix, stored as a packed triangular matrix. The j-th column */
+/* of inv(A) is stored in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = inv(A)(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', */
+/* AP(i + (j-1)*(2n-j)/2) = inv(A)(i,j) for j<=i<=n. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by ZSPTRF. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) = 0; the matrix is singular and its */
+/* inverse could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --work;
+ --ipiv;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZSPTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check that the diagonal matrix D is nonsingular. */
+
+ if (upper) {
+
+/* Upper triangular storage: examine D from bottom to top */
+
+ kp = *n * (*n + 1) / 2;
+ for (*info = *n; *info >= 1; --(*info)) {
+ i__1 = kp;
+ if (ipiv[*info] > 0 && (ap[i__1].r == 0. && ap[i__1].i == 0.)) {
+ return 0;
+ }
+ kp -= *info;
+/* L10: */
+ }
+ } else {
+
+/* Lower triangular storage: examine D from top to bottom. */
+
+ kp = 1;
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ i__2 = kp;
+ if (ipiv[*info] > 0 && (ap[i__2].r == 0. && ap[i__2].i == 0.)) {
+ return 0;
+ }
+ kp = kp + *n - *info + 1;
+/* L20: */
+ }
+ }
+ *info = 0;
+
+ if (upper) {
+
+/* Compute inv(A) from the factorization A = U*D*U'. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+ kc = 1;
+L30:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L50;
+ }
+
+ kcnext = kc + k;
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Invert the diagonal block. */
+
+ i__1 = kc + k - 1;
+ z_div(&z__1, &c_b1, &ap[kc + k - 1]);
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+
+/* Compute column K of the inverse. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ zcopy_(&i__1, &ap[kc], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zspmv_(uplo, &i__1, &z__1, &ap[1], &work[1], &c__1, &c_b2, &
+ ap[kc], &c__1);
+ i__1 = kc + k - 1;
+ i__2 = kc + k - 1;
+ i__3 = k - 1;
+ zdotu_(&z__2, &i__3, &work[1], &c__1, &ap[kc], &c__1);
+ z__1.r = ap[i__2].r - z__2.r, z__1.i = ap[i__2].i - z__2.i;
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+ }
+ kstep = 1;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Invert the diagonal block. */
+
+ i__1 = kcnext + k - 1;
+ t.r = ap[i__1].r, t.i = ap[i__1].i;
+ z_div(&z__1, &ap[kc + k - 1], &t);
+ ak.r = z__1.r, ak.i = z__1.i;
+ z_div(&z__1, &ap[kcnext + k], &t);
+ akp1.r = z__1.r, akp1.i = z__1.i;
+ z_div(&z__1, &ap[kcnext + k - 1], &t);
+ akkp1.r = z__1.r, akkp1.i = z__1.i;
+ z__3.r = ak.r * akp1.r - ak.i * akp1.i, z__3.i = ak.r * akp1.i +
+ ak.i * akp1.r;
+ z__2.r = z__3.r - 1., z__2.i = z__3.i - 0.;
+ z__1.r = t.r * z__2.r - t.i * z__2.i, z__1.i = t.r * z__2.i + t.i
+ * z__2.r;
+ d__.r = z__1.r, d__.i = z__1.i;
+ i__1 = kc + k - 1;
+ z_div(&z__1, &akp1, &d__);
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+ i__1 = kcnext + k;
+ z_div(&z__1, &ak, &d__);
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+ i__1 = kcnext + k - 1;
+ z__2.r = -akkp1.r, z__2.i = -akkp1.i;
+ z_div(&z__1, &z__2, &d__);
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+
+/* Compute columns K and K+1 of the inverse. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ zcopy_(&i__1, &ap[kc], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zspmv_(uplo, &i__1, &z__1, &ap[1], &work[1], &c__1, &c_b2, &
+ ap[kc], &c__1);
+ i__1 = kc + k - 1;
+ i__2 = kc + k - 1;
+ i__3 = k - 1;
+ zdotu_(&z__2, &i__3, &work[1], &c__1, &ap[kc], &c__1);
+ z__1.r = ap[i__2].r - z__2.r, z__1.i = ap[i__2].i - z__2.i;
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+ i__1 = kcnext + k - 1;
+ i__2 = kcnext + k - 1;
+ i__3 = k - 1;
+ zdotu_(&z__2, &i__3, &ap[kc], &c__1, &ap[kcnext], &c__1);
+ z__1.r = ap[i__2].r - z__2.r, z__1.i = ap[i__2].i - z__2.i;
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+ i__1 = k - 1;
+ zcopy_(&i__1, &ap[kcnext], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zspmv_(uplo, &i__1, &z__1, &ap[1], &work[1], &c__1, &c_b2, &
+ ap[kcnext], &c__1);
+ i__1 = kcnext + k;
+ i__2 = kcnext + k;
+ i__3 = k - 1;
+ zdotu_(&z__2, &i__3, &work[1], &c__1, &ap[kcnext], &c__1);
+ z__1.r = ap[i__2].r - z__2.r, z__1.i = ap[i__2].i - z__2.i;
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+ }
+ kstep = 2;
+ kcnext = kcnext + k + 1;
+ }
+
+ kp = (i__1 = ipiv[k], abs(i__1));
+ if (kp != k) {
+
+/* Interchange rows and columns K and KP in the leading */
+/* submatrix A(1:k+1,1:k+1) */
+
+ kpc = (kp - 1) * kp / 2 + 1;
+ i__1 = kp - 1;
+ zswap_(&i__1, &ap[kc], &c__1, &ap[kpc], &c__1);
+ kx = kpc + kp - 1;
+ i__1 = k - 1;
+ for (j = kp + 1; j <= i__1; ++j) {
+ kx = kx + j - 1;
+ i__2 = kc + j - 1;
+ temp.r = ap[i__2].r, temp.i = ap[i__2].i;
+ i__2 = kc + j - 1;
+ i__3 = kx;
+ ap[i__2].r = ap[i__3].r, ap[i__2].i = ap[i__3].i;
+ i__2 = kx;
+ ap[i__2].r = temp.r, ap[i__2].i = temp.i;
+/* L40: */
+ }
+ i__1 = kc + k - 1;
+ temp.r = ap[i__1].r, temp.i = ap[i__1].i;
+ i__1 = kc + k - 1;
+ i__2 = kpc + kp - 1;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kpc + kp - 1;
+ ap[i__1].r = temp.r, ap[i__1].i = temp.i;
+ if (kstep == 2) {
+ i__1 = kc + k + k - 1;
+ temp.r = ap[i__1].r, temp.i = ap[i__1].i;
+ i__1 = kc + k + k - 1;
+ i__2 = kc + k + kp - 1;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kc + k + kp - 1;
+ ap[i__1].r = temp.r, ap[i__1].i = temp.i;
+ }
+ }
+
+ k += kstep;
+ kc = kcnext;
+ goto L30;
+L50:
+
+ ;
+ } else {
+
+/* Compute inv(A) from the factorization A = L*D*L'. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ npp = *n * (*n + 1) / 2;
+ k = *n;
+ kc = npp;
+L60:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L80;
+ }
+
+ kcnext = kc - (*n - k + 2);
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Invert the diagonal block. */
+
+ i__1 = kc;
+ z_div(&z__1, &c_b1, &ap[kc]);
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+
+/* Compute column K of the inverse. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ zcopy_(&i__1, &ap[kc + 1], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zspmv_(uplo, &i__1, &z__1, &ap[kc + *n - k + 1], &work[1], &
+ c__1, &c_b2, &ap[kc + 1], &c__1);
+ i__1 = kc;
+ i__2 = kc;
+ i__3 = *n - k;
+ zdotu_(&z__2, &i__3, &work[1], &c__1, &ap[kc + 1], &c__1);
+ z__1.r = ap[i__2].r - z__2.r, z__1.i = ap[i__2].i - z__2.i;
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+ }
+ kstep = 1;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Invert the diagonal block. */
+
+ i__1 = kcnext + 1;
+ t.r = ap[i__1].r, t.i = ap[i__1].i;
+ z_div(&z__1, &ap[kcnext], &t);
+ ak.r = z__1.r, ak.i = z__1.i;
+ z_div(&z__1, &ap[kc], &t);
+ akp1.r = z__1.r, akp1.i = z__1.i;
+ z_div(&z__1, &ap[kcnext + 1], &t);
+ akkp1.r = z__1.r, akkp1.i = z__1.i;
+ z__3.r = ak.r * akp1.r - ak.i * akp1.i, z__3.i = ak.r * akp1.i +
+ ak.i * akp1.r;
+ z__2.r = z__3.r - 1., z__2.i = z__3.i - 0.;
+ z__1.r = t.r * z__2.r - t.i * z__2.i, z__1.i = t.r * z__2.i + t.i
+ * z__2.r;
+ d__.r = z__1.r, d__.i = z__1.i;
+ i__1 = kcnext;
+ z_div(&z__1, &akp1, &d__);
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+ i__1 = kc;
+ z_div(&z__1, &ak, &d__);
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+ i__1 = kcnext + 1;
+ z__2.r = -akkp1.r, z__2.i = -akkp1.i;
+ z_div(&z__1, &z__2, &d__);
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+
+/* Compute columns K-1 and K of the inverse. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ zcopy_(&i__1, &ap[kc + 1], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zspmv_(uplo, &i__1, &z__1, &ap[kc + (*n - k + 1)], &work[1], &
+ c__1, &c_b2, &ap[kc + 1], &c__1);
+ i__1 = kc;
+ i__2 = kc;
+ i__3 = *n - k;
+ zdotu_(&z__2, &i__3, &work[1], &c__1, &ap[kc + 1], &c__1);
+ z__1.r = ap[i__2].r - z__2.r, z__1.i = ap[i__2].i - z__2.i;
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+ i__1 = kcnext + 1;
+ i__2 = kcnext + 1;
+ i__3 = *n - k;
+ zdotu_(&z__2, &i__3, &ap[kc + 1], &c__1, &ap[kcnext + 2], &
+ c__1);
+ z__1.r = ap[i__2].r - z__2.r, z__1.i = ap[i__2].i - z__2.i;
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+ i__1 = *n - k;
+ zcopy_(&i__1, &ap[kcnext + 2], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zspmv_(uplo, &i__1, &z__1, &ap[kc + (*n - k + 1)], &work[1], &
+ c__1, &c_b2, &ap[kcnext + 2], &c__1);
+ i__1 = kcnext;
+ i__2 = kcnext;
+ i__3 = *n - k;
+ zdotu_(&z__2, &i__3, &work[1], &c__1, &ap[kcnext + 2], &c__1);
+ z__1.r = ap[i__2].r - z__2.r, z__1.i = ap[i__2].i - z__2.i;
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+ }
+ kstep = 2;
+ kcnext -= *n - k + 3;
+ }
+
+ kp = (i__1 = ipiv[k], abs(i__1));
+ if (kp != k) {
+
+/* Interchange rows and columns K and KP in the trailing */
+/* submatrix A(k-1:n,k-1:n) */
+
+ kpc = npp - (*n - kp + 1) * (*n - kp + 2) / 2 + 1;
+ if (kp < *n) {
+ i__1 = *n - kp;
+ zswap_(&i__1, &ap[kc + kp - k + 1], &c__1, &ap[kpc + 1], &
+ c__1);
+ }
+ kx = kc + kp - k;
+ i__1 = kp - 1;
+ for (j = k + 1; j <= i__1; ++j) {
+ kx = kx + *n - j + 1;
+ i__2 = kc + j - k;
+ temp.r = ap[i__2].r, temp.i = ap[i__2].i;
+ i__2 = kc + j - k;
+ i__3 = kx;
+ ap[i__2].r = ap[i__3].r, ap[i__2].i = ap[i__3].i;
+ i__2 = kx;
+ ap[i__2].r = temp.r, ap[i__2].i = temp.i;
+/* L70: */
+ }
+ i__1 = kc;
+ temp.r = ap[i__1].r, temp.i = ap[i__1].i;
+ i__1 = kc;
+ i__2 = kpc;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kpc;
+ ap[i__1].r = temp.r, ap[i__1].i = temp.i;
+ if (kstep == 2) {
+ i__1 = kc - *n + k - 1;
+ temp.r = ap[i__1].r, temp.i = ap[i__1].i;
+ i__1 = kc - *n + k - 1;
+ i__2 = kc - *n + kp - 1;
+ ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
+ i__1 = kc - *n + kp - 1;
+ ap[i__1].r = temp.r, ap[i__1].i = temp.i;
+ }
+ }
+
+ k -= kstep;
+ kc = kcnext;
+ goto L60;
+L80:
+ ;
+ }
+
+ return 0;
+
+/* End of ZSPTRI */
+
+} /* zsptri_ */
diff --git a/contrib/libs/clapack/zsptrs.c b/contrib/libs/clapack/zsptrs.c
new file mode 100644
index 0000000000..ecf7b91895
--- /dev/null
+++ b/contrib/libs/clapack/zsptrs.c
@@ -0,0 +1,503 @@
+/* zsptrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zsptrs_(char *uplo, integer *n, integer *nrhs,
+ doublecomplex *ap, integer *ipiv, doublecomplex *b, integer *ldb,
+ integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer j, k;
+ doublecomplex ak, bk;
+ integer kc, kp;
+ doublecomplex akm1, bkm1, akm1k;
+ extern logical lsame_(char *, char *);
+ doublecomplex denom;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *), zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int zgeru_(integer *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zswap_(integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSPTRS solves a system of linear equations A*X = B with a complex */
+/* symmetric matrix A stored in packed format using the factorization */
+/* A = U*D*U**T or A = L*D*L**T computed by ZSPTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**T; */
+/* = 'L': Lower triangular, form is A = L*D*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AP (input) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* The block diagonal matrix D and the multipliers used to */
+/* obtain the factor U or L as computed by ZSPTRF, stored as a */
+/* packed triangular matrix. */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by ZSPTRF. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ --ap;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZSPTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Solve A*X = B, where A = U*D*U'. */
+
+/* First solve U*D*X = B, overwriting B with X. */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+ kc = *n * (*n + 1) / 2 + 1;
+L10:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L30;
+ }
+
+ kc -= k;
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ zswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(U(K)), where U(K) is the transformation */
+/* stored in column K of A. */
+
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__1, nrhs, &z__1, &ap[kc], &c__1, &b[k + b_dim1], ldb, &
+ b[b_dim1 + 1], ldb);
+
+/* Multiply by the inverse of the diagonal block. */
+
+ z_div(&z__1, &c_b1, &ap[kc + k - 1]);
+ zscal_(nrhs, &z__1, &b[k + b_dim1], ldb);
+ --k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Interchange rows K-1 and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k - 1) {
+ zswap_(nrhs, &b[k - 1 + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(U(K)), where U(K) is the transformation */
+/* stored in columns K-1 and K of A. */
+
+ i__1 = k - 2;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__1, nrhs, &z__1, &ap[kc], &c__1, &b[k + b_dim1], ldb, &
+ b[b_dim1 + 1], ldb);
+ i__1 = k - 2;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__1, nrhs, &z__1, &ap[kc - (k - 1)], &c__1, &b[k - 1 +
+ b_dim1], ldb, &b[b_dim1 + 1], ldb);
+
+/* Multiply by the inverse of the diagonal block. */
+
+ i__1 = kc + k - 2;
+ akm1k.r = ap[i__1].r, akm1k.i = ap[i__1].i;
+ z_div(&z__1, &ap[kc - 1], &akm1k);
+ akm1.r = z__1.r, akm1.i = z__1.i;
+ z_div(&z__1, &ap[kc + k - 1], &akm1k);
+ ak.r = z__1.r, ak.i = z__1.i;
+ z__2.r = akm1.r * ak.r - akm1.i * ak.i, z__2.i = akm1.r * ak.i +
+ akm1.i * ak.r;
+ z__1.r = z__2.r - 1., z__1.i = z__2.i - 0.;
+ denom.r = z__1.r, denom.i = z__1.i;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ z_div(&z__1, &b[k - 1 + j * b_dim1], &akm1k);
+ bkm1.r = z__1.r, bkm1.i = z__1.i;
+ z_div(&z__1, &b[k + j * b_dim1], &akm1k);
+ bk.r = z__1.r, bk.i = z__1.i;
+ i__2 = k - 1 + j * b_dim1;
+ z__3.r = ak.r * bkm1.r - ak.i * bkm1.i, z__3.i = ak.r *
+ bkm1.i + ak.i * bkm1.r;
+ z__2.r = z__3.r - bk.r, z__2.i = z__3.i - bk.i;
+ z_div(&z__1, &z__2, &denom);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ i__2 = k + j * b_dim1;
+ z__3.r = akm1.r * bk.r - akm1.i * bk.i, z__3.i = akm1.r *
+ bk.i + akm1.i * bk.r;
+ z__2.r = z__3.r - bkm1.r, z__2.i = z__3.i - bkm1.i;
+ z_div(&z__1, &z__2, &denom);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L20: */
+ }
+ kc = kc - k + 1;
+ k += -2;
+ }
+
+ goto L10;
+L30:
+
+/* Next solve U'*X = B, overwriting B with X. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+ kc = 1;
+L40:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L50;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Multiply by inv(U'(K)), where U(K) is the transformation */
+/* stored in column K of A. */
+
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Transpose", &i__1, nrhs, &z__1, &b[b_offset], ldb, &ap[kc]
+, &c__1, &c_b1, &b[k + b_dim1], ldb);
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ zswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ kc += k;
+ ++k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Multiply by inv(U'(K+1)), where U(K+1) is the transformation */
+/* stored in columns K and K+1 of A. */
+
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Transpose", &i__1, nrhs, &z__1, &b[b_offset], ldb, &ap[kc]
+, &c__1, &c_b1, &b[k + b_dim1], ldb);
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Transpose", &i__1, nrhs, &z__1, &b[b_offset], ldb, &ap[kc
+ + k], &c__1, &c_b1, &b[k + 1 + b_dim1], ldb);
+
+/* Interchange rows K and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k) {
+ zswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ kc = kc + (k << 1) + 1;
+ k += 2;
+ }
+
+ goto L40;
+L50:
+
+ ;
+ } else {
+
+/* Solve A*X = B, where A = L*D*L'. */
+
+/* First solve L*D*X = B, overwriting B with X. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+ kc = 1;
+L60:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L80;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ zswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(L(K)), where L(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__1, nrhs, &z__1, &ap[kc + 1], &c__1, &b[k + b_dim1],
+ ldb, &b[k + 1 + b_dim1], ldb);
+ }
+
+/* Multiply by the inverse of the diagonal block. */
+
+ z_div(&z__1, &c_b1, &ap[kc]);
+ zscal_(nrhs, &z__1, &b[k + b_dim1], ldb);
+ kc = kc + *n - k + 1;
+ ++k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Interchange rows K+1 and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k + 1) {
+ zswap_(nrhs, &b[k + 1 + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(L(K)), where L(K) is the transformation */
+/* stored in columns K and K+1 of A. */
+
+ if (k < *n - 1) {
+ i__1 = *n - k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__1, nrhs, &z__1, &ap[kc + 2], &c__1, &b[k + b_dim1],
+ ldb, &b[k + 2 + b_dim1], ldb);
+ i__1 = *n - k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__1, nrhs, &z__1, &ap[kc + *n - k + 2], &c__1, &b[k
+ + 1 + b_dim1], ldb, &b[k + 2 + b_dim1], ldb);
+ }
+
+/* Multiply by the inverse of the diagonal block. */
+
+ i__1 = kc + 1;
+ akm1k.r = ap[i__1].r, akm1k.i = ap[i__1].i;
+ z_div(&z__1, &ap[kc], &akm1k);
+ akm1.r = z__1.r, akm1.i = z__1.i;
+ z_div(&z__1, &ap[kc + *n - k + 1], &akm1k);
+ ak.r = z__1.r, ak.i = z__1.i;
+ z__2.r = akm1.r * ak.r - akm1.i * ak.i, z__2.i = akm1.r * ak.i +
+ akm1.i * ak.r;
+ z__1.r = z__2.r - 1., z__1.i = z__2.i - 0.;
+ denom.r = z__1.r, denom.i = z__1.i;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ z_div(&z__1, &b[k + j * b_dim1], &akm1k);
+ bkm1.r = z__1.r, bkm1.i = z__1.i;
+ z_div(&z__1, &b[k + 1 + j * b_dim1], &akm1k);
+ bk.r = z__1.r, bk.i = z__1.i;
+ i__2 = k + j * b_dim1;
+ z__3.r = ak.r * bkm1.r - ak.i * bkm1.i, z__3.i = ak.r *
+ bkm1.i + ak.i * bkm1.r;
+ z__2.r = z__3.r - bk.r, z__2.i = z__3.i - bk.i;
+ z_div(&z__1, &z__2, &denom);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ i__2 = k + 1 + j * b_dim1;
+ z__3.r = akm1.r * bk.r - akm1.i * bk.i, z__3.i = akm1.r *
+ bk.i + akm1.i * bk.r;
+ z__2.r = z__3.r - bkm1.r, z__2.i = z__3.i - bkm1.i;
+ z_div(&z__1, &z__2, &denom);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L70: */
+ }
+ kc = kc + (*n - k << 1) + 1;
+ k += 2;
+ }
+
+ goto L60;
+L80:
+
+/* Next solve L'*X = B, overwriting B with X. */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+ kc = *n * (*n + 1) / 2 + 1;
+L90:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L100;
+ }
+
+ kc -= *n - k + 1;
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Multiply by inv(L'(K)), where L(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Transpose", &i__1, nrhs, &z__1, &b[k + 1 + b_dim1],
+ ldb, &ap[kc + 1], &c__1, &c_b1, &b[k + b_dim1], ldb);
+ }
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ zswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ --k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Multiply by inv(L'(K-1)), where L(K-1) is the transformation */
+/* stored in columns K-1 and K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Transpose", &i__1, nrhs, &z__1, &b[k + 1 + b_dim1],
+ ldb, &ap[kc + 1], &c__1, &c_b1, &b[k + b_dim1], ldb);
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Transpose", &i__1, nrhs, &z__1, &b[k + 1 + b_dim1],
+ ldb, &ap[kc - (*n - k)], &c__1, &c_b1, &b[k - 1 +
+ b_dim1], ldb);
+ }
+
+/* Interchange rows K and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k) {
+ zswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ kc -= *n - k + 2;
+ k += -2;
+ }
+
+ goto L90;
+L100:
+ ;
+ }
+
+ return 0;
+
+/* End of ZSPTRS */
+
+} /* zsptrs_ */
diff --git a/contrib/libs/clapack/zstedc.c b/contrib/libs/clapack/zstedc.c
new file mode 100644
index 0000000000..f72c69c668
--- /dev/null
+++ b/contrib/libs/clapack/zstedc.c
@@ -0,0 +1,497 @@
+/* zstedc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__9 = 9;
+static integer c__0 = 0;
+static integer c__2 = 2;
+static doublereal c_b17 = 0.;
+static doublereal c_b18 = 1.;
+static integer c__1 = 1;
+
+/* Subroutine */ int zstedc_(char *compz, integer *n, doublereal *d__,
+ doublereal *e, doublecomplex *z__, integer *ldz, doublecomplex *work,
+ integer *lwork, doublereal *rwork, integer *lrwork, integer *iwork,
+ integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double log(doublereal);
+ integer pow_ii(integer *, integer *);
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, m;
+ doublereal p;
+ integer ii, ll, lgn;
+ doublereal eps, tiny;
+ extern logical lsame_(char *, char *);
+ integer lwmin, start;
+ extern /* Subroutine */ int zswap_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zlaed0_(integer *, integer *,
+ doublereal *, doublereal *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dstedc_(char *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, integer *, integer *, integer *), dlaset_(
+ char *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer finish;
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *), zlacrm_(integer *, integer *, doublecomplex *,
+ integer *, doublereal *, integer *, doublecomplex *, integer *,
+ doublereal *);
+ integer liwmin, icompz;
+ extern /* Subroutine */ int dsteqr_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *), zlacpy_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *);
+ doublereal orgnrm;
+ integer lrwmin;
+ logical lquery;
+ integer smlsiz;
+ extern /* Subroutine */ int zsteqr_(char *, integer *, doublereal *,
+ doublereal *, doublecomplex *, integer *, doublereal *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSTEDC computes all eigenvalues and, optionally, eigenvectors of a */
+/* symmetric tridiagonal matrix using the divide and conquer method. */
+/* The eigenvectors of a full or band complex Hermitian matrix can also */
+/* be found if ZHETRD or ZHPTRD or ZHBTRD has been used to reduce this */
+/* matrix to tridiagonal form. */
+
+/* This code makes very mild assumptions about floating point */
+/* arithmetic. It will work on machines with a guard digit in */
+/* add/subtract, or on those binary machines without guard digits */
+/* which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2. */
+/* It could conceivably fail on hexadecimal or decimal machines */
+/* without guard digits, but we know of none. See DLAED3 for details. */
+
+/* Arguments */
+/* ========= */
+
+/* COMPZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only. */
+/* = 'I': Compute eigenvectors of tridiagonal matrix also. */
+/* = 'V': Compute eigenvectors of original Hermitian matrix */
+/* also. On entry, Z contains the unitary matrix used */
+/* to reduce the original matrix to tridiagonal form. */
+
+/* N (input) INTEGER */
+/* The dimension of the symmetric tridiagonal matrix. N >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the diagonal elements of the tridiagonal matrix. */
+/* On exit, if INFO = 0, the eigenvalues in ascending order. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (N-1) */
+/* On entry, the subdiagonal elements of the tridiagonal matrix. */
+/* On exit, E has been destroyed. */
+
+/* Z (input/output) COMPLEX*16 array, dimension (LDZ,N) */
+/* On entry, if COMPZ = 'V', then Z contains the unitary */
+/* matrix used in the reduction to tridiagonal form. */
+/* On exit, if INFO = 0, then if COMPZ = 'V', Z contains the */
+/* orthonormal eigenvectors of the original Hermitian matrix, */
+/* and if COMPZ = 'I', Z contains the orthonormal eigenvectors */
+/* of the symmetric tridiagonal matrix. */
+/* If COMPZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1. */
+/* If eigenvectors are desired, then LDZ >= max(1,N). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If COMPZ = 'N' or 'I', or N <= 1, LWORK must be at least 1. */
+/* If COMPZ = 'V' and N > 1, LWORK must be at least N*N. */
+/* Note that for COMPZ = 'V', then if N is less than or */
+/* equal to the minimum divide size, usually 25, then LWORK need */
+/* only be 1. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal sizes of the WORK, RWORK and */
+/* IWORK arrays, returns these values as the first entries of */
+/* the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* RWORK (workspace/output) DOUBLE PRECISION array, */
+/* dimension (LRWORK) */
+/* On exit, if INFO = 0, RWORK(1) returns the optimal LRWORK. */
+
+/* LRWORK (input) INTEGER */
+/* The dimension of the array RWORK. */
+/* If COMPZ = 'N' or N <= 1, LRWORK must be at least 1. */
+/* If COMPZ = 'V' and N > 1, LRWORK must be at least */
+/* 1 + 3*N + 2*N*lg N + 3*N**2 , */
+/* where lg( N ) = smallest integer k such */
+/* that 2**k >= N. */
+/* If COMPZ = 'I' and N > 1, LRWORK must be at least */
+/* 1 + 4*N + 2*N**2 . */
+/* Note that for COMPZ = 'I' or 'V', then if N is less than or */
+/* equal to the minimum divide size, usually 25, then LRWORK */
+/* need only be max(1,2*(N-1)). */
+
+/* If LRWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. */
+/* If COMPZ = 'N' or N <= 1, LIWORK must be at least 1. */
+/* If COMPZ = 'V' or N > 1, LIWORK must be at least */
+/* 6 + 6*N + 5*N*lg N. */
+/* If COMPZ = 'I' or N > 1, LIWORK must be at least */
+/* 3 + 5*N . */
+/* Note that for COMPZ = 'I' or 'V', then if N is less than or */
+/* equal to the minimum divide size, usually 25, then LIWORK */
+/* need only be 1. */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal sizes of the WORK, RWORK */
+/* and IWORK arrays, returns these values as the first entries */
+/* of the WORK, RWORK and IWORK arrays, and no error message */
+/* related to LWORK or LRWORK or LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* > 0: The algorithm failed to compute an eigenvalue while */
+/* working on the submatrix lying in rows and columns */
+/* INFO/(N+1) through mod(INFO,N+1). */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Jeff Rutter, Computer Science Division, University of California */
+/* at Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1 || *lrwork == -1 || *liwork == -1;
+
+ if (lsame_(compz, "N")) {
+ icompz = 0;
+ } else if (lsame_(compz, "V")) {
+ icompz = 1;
+ } else if (lsame_(compz, "I")) {
+ icompz = 2;
+ } else {
+ icompz = -1;
+ }
+ if (icompz < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldz < 1 || icompz > 0 && *ldz < max(1,*n)) {
+ *info = -6;
+ }
+
+ if (*info == 0) {
+
+/* Compute the workspace requirements */
+
+ smlsiz = ilaenv_(&c__9, "ZSTEDC", " ", &c__0, &c__0, &c__0, &c__0);
+ if (*n <= 1 || icompz == 0) {
+ lwmin = 1;
+ liwmin = 1;
+ lrwmin = 1;
+ } else if (*n <= smlsiz) {
+ lwmin = 1;
+ liwmin = 1;
+ lrwmin = *n - 1 << 1;
+ } else if (icompz == 1) {
+ lgn = (integer) (log((doublereal) (*n)) / log(2.));
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ lwmin = *n * *n;
+/* Computing 2nd power */
+ i__1 = *n;
+ lrwmin = *n * 3 + 1 + (*n << 1) * lgn + i__1 * i__1 * 3;
+ liwmin = *n * 6 + 6 + *n * 5 * lgn;
+ } else if (icompz == 2) {
+ lwmin = 1;
+/* Computing 2nd power */
+ i__1 = *n;
+ lrwmin = (*n << 2) + 1 + (i__1 * i__1 << 1);
+ liwmin = *n * 5 + 3;
+ }
+ work[1].r = (doublereal) lwmin, work[1].i = 0.;
+ rwork[1] = (doublereal) lrwmin;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -8;
+ } else if (*lrwork < lrwmin && ! lquery) {
+ *info = -10;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZSTEDC", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*n == 1) {
+ if (icompz != 0) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1., z__[i__1].i = 0.;
+ }
+ return 0;
+ }
+
+/* If the following conditional clause is removed, then the routine */
+/* will use the Divide and Conquer routine to compute only the */
+/* eigenvalues, which requires (3N + 3N**2) real workspace and */
+/* (2 + 5N + 2N lg(N)) integer workspace. */
+/* Since on many architectures DSTERF is much faster than any other */
+/* algorithm for finding eigenvalues only, it is used here */
+/* as the default. If the conditional clause is removed, then */
+/* information on the size of workspace needs to be changed. */
+
+/* If COMPZ = 'N', use DSTERF to compute the eigenvalues. */
+
+ if (icompz == 0) {
+ dsterf_(n, &d__[1], &e[1], info);
+ goto L70;
+ }
+
+/* If N is smaller than the minimum divide size (SMLSIZ+1), then */
+/* solve the problem with another solver. */
+
+ if (*n <= smlsiz) {
+
+ zsteqr_(compz, n, &d__[1], &e[1], &z__[z_offset], ldz, &rwork[1],
+ info);
+
+ } else {
+
+/* If COMPZ = 'I', we simply call DSTEDC instead. */
+
+ if (icompz == 2) {
+ dlaset_("Full", n, n, &c_b17, &c_b18, &rwork[1], n);
+ ll = *n * *n + 1;
+ i__1 = *lrwork - ll + 1;
+ dstedc_("I", n, &d__[1], &e[1], &rwork[1], n, &rwork[ll], &i__1, &
+ iwork[1], liwork, info);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * z_dim1;
+ i__4 = (j - 1) * *n + i__;
+ z__[i__3].r = rwork[i__4], z__[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ goto L70;
+ }
+
+/* From now on, only option left to be handled is COMPZ = 'V', */
+/* i.e. ICOMPZ = 1. */
+
+/* Scale. */
+
+ orgnrm = dlanst_("M", n, &d__[1], &e[1]);
+ if (orgnrm == 0.) {
+ goto L70;
+ }
+
+ eps = dlamch_("Epsilon");
+
+ start = 1;
+
+/* while ( START <= N ) */
+
+L30:
+ if (start <= *n) {
+
+/* Let FINISH be the position of the next subdiagonal entry */
+/* such that E( FINISH ) <= TINY or FINISH = N if no such */
+/* subdiagonal exists. The matrix identified by the elements */
+/* between START and FINISH constitutes an independent */
+/* sub-problem. */
+
+ finish = start;
+L40:
+ if (finish < *n) {
+ tiny = eps * sqrt((d__1 = d__[finish], abs(d__1))) * sqrt((
+ d__2 = d__[finish + 1], abs(d__2)));
+ if ((d__1 = e[finish], abs(d__1)) > tiny) {
+ ++finish;
+ goto L40;
+ }
+ }
+
+/* (Sub) Problem determined. Compute its size and solve it. */
+
+ m = finish - start + 1;
+ if (m > smlsiz) {
+
+/* Scale. */
+
+ orgnrm = dlanst_("M", &m, &d__[start], &e[start]);
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b18, &m, &c__1, &d__[
+ start], &m, info);
+ i__1 = m - 1;
+ i__2 = m - 1;
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b18, &i__1, &c__1, &e[
+ start], &i__2, info);
+
+ zlaed0_(n, &m, &d__[start], &e[start], &z__[start * z_dim1 +
+ 1], ldz, &work[1], n, &rwork[1], &iwork[1], info);
+ if (*info > 0) {
+ *info = (*info / (m + 1) + start - 1) * (*n + 1) + *info %
+ (m + 1) + start - 1;
+ goto L70;
+ }
+
+/* Scale back. */
+
+ dlascl_("G", &c__0, &c__0, &c_b18, &orgnrm, &m, &c__1, &d__[
+ start], &m, info);
+
+ } else {
+ dsteqr_("I", &m, &d__[start], &e[start], &rwork[1], &m, &
+ rwork[m * m + 1], info);
+ zlacrm_(n, &m, &z__[start * z_dim1 + 1], ldz, &rwork[1], &m, &
+ work[1], n, &rwork[m * m + 1]);
+ zlacpy_("A", n, &m, &work[1], n, &z__[start * z_dim1 + 1],
+ ldz);
+ if (*info > 0) {
+ *info = start * (*n + 1) + finish;
+ goto L70;
+ }
+ }
+
+ start = finish + 1;
+ goto L30;
+ }
+
+/* endwhile */
+
+/* If the problem split any number of times, then the eigenvalues */
+/* will not be properly ordered. Here we permute the eigenvalues */
+/* (and the associated eigenvectors) into ascending order. */
+
+ if (m != *n) {
+
+/* Use Selection Sort to minimize swaps of eigenvectors */
+
+ i__1 = *n;
+ for (ii = 2; ii <= i__1; ++ii) {
+ i__ = ii - 1;
+ k = i__;
+ p = d__[i__];
+ i__2 = *n;
+ for (j = ii; j <= i__2; ++j) {
+ if (d__[j] < p) {
+ k = j;
+ p = d__[j];
+ }
+/* L50: */
+ }
+ if (k != i__) {
+ d__[k] = d__[i__];
+ d__[i__] = p;
+ zswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1
+ + 1], &c__1);
+ }
+/* L60: */
+ }
+ }
+ }
+
+L70:
+ work[1].r = (doublereal) lwmin, work[1].i = 0.;
+ rwork[1] = (doublereal) lrwmin;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of ZSTEDC */
+
+} /* zstedc_ */
diff --git a/contrib/libs/clapack/zstegr.c b/contrib/libs/clapack/zstegr.c
new file mode 100644
index 0000000000..3b81feef75
--- /dev/null
+++ b/contrib/libs/clapack/zstegr.c
@@ -0,0 +1,211 @@
+/* zstegr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zstegr_(char *jobz, char *range, integer *n, doublereal *
+ d__, doublereal *e, doublereal *vl, doublereal *vu, integer *il,
+ integer *iu, doublereal *abstol, integer *m, doublereal *w,
+ doublecomplex *z__, integer *ldz, integer *isuppz, doublereal *work,
+ integer *lwork, integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset;
+
+ /* Local variables */
+ logical tryrac;
+ extern /* Subroutine */ int zstemr_(char *, char *, integer *, doublereal
+ *, doublereal *, doublereal *, doublereal *, integer *, integer *,
+ integer *, doublereal *, doublecomplex *, integer *, integer *,
+ integer *, logical *, doublereal *, integer *, integer *, integer
+ *, integer *);
+
+
+
+/* -- LAPACK computational routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSTEGR computes selected eigenvalues and, optionally, eigenvectors */
+/* of a real symmetric tridiagonal matrix T. Any such unreduced matrix has */
+/* a well defined set of pairwise different real eigenvalues, the corresponding */
+/* real eigenvectors are pairwise orthogonal. */
+
+/* The spectrum may be computed either completely or partially by specifying */
+/* either an interval (VL,VU] or a range of indices IL:IU for the desired */
+/* eigenvalues. */
+
+/* ZSTEGR is a compatability wrapper around the improved ZSTEMR routine. */
+/* See DSTEMR for further details. */
+
+/* One important change is that the ABSTOL parameter no longer provides any */
+/* benefit and hence is no longer used. */
+
+/* Note : ZSTEGR and ZSTEMR work only on machines which follow */
+/* IEEE-754 floating-point standard in their handling of infinities and */
+/* NaNs. Normal execution may create these exceptiona values and hence */
+/* may abort due to a floating point exception in environments which */
+/* do not conform to the IEEE-754 standard. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the N diagonal elements of the tridiagonal matrix */
+/* T. On exit, D is overwritten. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the (N-1) subdiagonal elements of the tridiagonal */
+/* matrix T in elements 1 to N-1 of E. E(N) need not be set on */
+/* input, but is used internally as workspace. */
+/* On exit, E is overwritten. */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* ABSTOL (input) DOUBLE PRECISION */
+/* Unused. Was the absolute error tolerance for the */
+/* eigenvalues/eigenvectors in previous versions. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* The first M elements contain the selected eigenvalues in */
+/* ascending order. */
+
+/* Z (output) COMPLEX*16 array, dimension (LDZ, max(1,M) ) */
+/* If JOBZ = 'V', and if INFO = 0, then the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix T */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and an upper bound must be used. */
+/* Supplying N columns is always safe. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', then LDZ >= max(1,N). */
+
+/* ISUPPZ (output) INTEGER ARRAY, dimension ( 2*max(1,M) ) */
+/* The support of the eigenvectors in Z, i.e., the indices */
+/* indicating the nonzero elements in Z. The i-th computed eigenvector */
+/* is nonzero only in elements ISUPPZ( 2*i-1 ) through */
+/* ISUPPZ( 2*i ). This is relevant in the case when the matrix */
+/* is split. ISUPPZ is only accessed when JOBZ is 'V' and N > 0. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal */
+/* (and minimal) LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,18*N) */
+/* if JOBZ = 'V', and LWORK >= max(1,12*N) if JOBZ = 'N'. */
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (LIWORK) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. LIWORK >= max(1,10*N) */
+/* if the eigenvectors are desired, and LIWORK >= max(1,8*N) */
+/* if only the eigenvalues are to be computed. */
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal size of the IWORK array, */
+/* returns this value as the first entry of the IWORK array, and */
+/* no error message related to LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* On exit, INFO */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = 1X, internal error in DLARRE, */
+/* if INFO = 2X, internal error in ZLARRV. */
+/* Here, the digit X = ABS( IINFO ) < 10, where IINFO is */
+/* the nonzero error code returned by DLARRE or */
+/* ZLARRV, respectively. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Inderjit Dhillon, IBM Almaden, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, LBNL/NERSC, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --isuppz;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ tryrac = FALSE_;
+ zstemr_(jobz, range, n, &d__[1], &e[1], vl, vu, il, iu, m, &w[1], &z__[
+ z_offset], ldz, n, &isuppz[1], &tryrac, &work[1], lwork, &iwork[1]
+, liwork, info);
+
+/* End of ZSTEGR */
+
+ return 0;
+} /* zstegr_ */
diff --git a/contrib/libs/clapack/zstein.c b/contrib/libs/clapack/zstein.c
new file mode 100644
index 0000000000..0c7f08c964
--- /dev/null
+++ b/contrib/libs/clapack/zstein.c
@@ -0,0 +1,470 @@
+/* zstein.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__2 = 2;
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zstein_(integer *n, doublereal *d__, doublereal *e,
+ integer *m, doublereal *w, integer *iblock, integer *isplit,
+ doublecomplex *z__, integer *ldz, doublereal *work, integer *iwork,
+ integer *ifail, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4, d__5;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, b1, j1, bn, jr;
+ doublereal xj, scl, eps, sep, nrm, tol;
+ integer its;
+ doublereal xjm, ztr, eps1;
+ integer jblk, nblk, jmax;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ integer iseed[4], gpind, iinfo;
+ extern doublereal dasum_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ doublereal ortol;
+ integer indrv1, indrv2, indrv3, indrv4, indrv5;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int dlagtf_(integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *
+, integer *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), dlagts_(
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *);
+ integer nrmchk;
+ extern /* Subroutine */ int dlarnv_(integer *, integer *, integer *,
+ doublereal *);
+ integer blksiz;
+ doublereal onenrm, dtpcrt, pertol;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSTEIN computes the eigenvectors of a real symmetric tridiagonal */
+/* matrix T corresponding to specified eigenvalues, using inverse */
+/* iteration. */
+
+/* The maximum number of iterations allowed for each eigenvector is */
+/* specified by an internal parameter MAXITS (currently set to 5). */
+
+/* Although the eigenvectors are real, they are stored in a complex */
+/* array, which may be passed to ZUNMTR or ZUPMTR for back */
+/* transformation to the eigenvectors of a complex Hermitian matrix */
+/* which was reduced to tridiagonal form. */
+
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input) DOUBLE PRECISION array, dimension (N) */
+/* The n diagonal elements of the tridiagonal matrix T. */
+
+/* E (input) DOUBLE PRECISION array, dimension (N-1) */
+/* The (n-1) subdiagonal elements of the tridiagonal matrix */
+/* T, stored in elements 1 to N-1. */
+
+/* M (input) INTEGER */
+/* The number of eigenvectors to be found. 0 <= M <= N. */
+
+/* W (input) DOUBLE PRECISION array, dimension (N) */
+/* The first M elements of W contain the eigenvalues for */
+/* which eigenvectors are to be computed. The eigenvalues */
+/* should be grouped by split-off block and ordered from */
+/* smallest to largest within the block. ( The output array */
+/* W from DSTEBZ with ORDER = 'B' is expected here. ) */
+
+/* IBLOCK (input) INTEGER array, dimension (N) */
+/* The submatrix indices associated with the corresponding */
+/* eigenvalues in W; IBLOCK(i)=1 if eigenvalue W(i) belongs to */
+/* the first submatrix from the top, =2 if W(i) belongs to */
+/* the second submatrix, etc. ( The output array IBLOCK */
+/* from DSTEBZ is expected here. ) */
+
+/* ISPLIT (input) INTEGER array, dimension (N) */
+/* The splitting points, at which T breaks up into submatrices. */
+/* The first submatrix consists of rows/columns 1 to */
+/* ISPLIT( 1 ), the second of rows/columns ISPLIT( 1 )+1 */
+/* through ISPLIT( 2 ), etc. */
+/* ( The output array ISPLIT from DSTEBZ is expected here. ) */
+
+/* Z (output) COMPLEX*16 array, dimension (LDZ, M) */
+/* The computed eigenvectors. The eigenvector associated */
+/* with the eigenvalue W(i) is stored in the i-th column of */
+/* Z. Any vector which fails to converge is set to its current */
+/* iterate after MAXITS iterations. */
+/* The imaginary parts of the eigenvectors are set to zero. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= max(1,N). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (5*N) */
+
+/* IWORK (workspace) INTEGER array, dimension (N) */
+
+/* IFAIL (output) INTEGER array, dimension (M) */
+/* On normal exit, all elements of IFAIL are zero. */
+/* If one or more eigenvectors fail to converge after */
+/* MAXITS iterations, then their indices are stored in */
+/* array IFAIL. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, then i eigenvectors failed to converge */
+/* in MAXITS iterations. Their indices are stored in */
+/* array IFAIL. */
+
+/* Internal Parameters */
+/* =================== */
+
+/* MAXITS INTEGER, default = 5 */
+/* The maximum number of iterations performed. */
+
+/* EXTRA INTEGER, default = 2 */
+/* The number of iterations performed after norm growth */
+/* criterion is satisfied, should be at least 1. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ --w;
+ --iblock;
+ --isplit;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+ --ifail;
+
+ /* Function Body */
+ *info = 0;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ifail[i__] = 0;
+/* L10: */
+ }
+
+ if (*n < 0) {
+ *info = -1;
+ } else if (*m < 0 || *m > *n) {
+ *info = -4;
+ } else if (*ldz < max(1,*n)) {
+ *info = -9;
+ } else {
+ i__1 = *m;
+ for (j = 2; j <= i__1; ++j) {
+ if (iblock[j] < iblock[j - 1]) {
+ *info = -6;
+ goto L30;
+ }
+ if (iblock[j] == iblock[j - 1] && w[j] < w[j - 1]) {
+ *info = -5;
+ goto L30;
+ }
+/* L20: */
+ }
+L30:
+ ;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZSTEIN", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *m == 0) {
+ return 0;
+ } else if (*n == 1) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1., z__[i__1].i = 0.;
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ eps = dlamch_("Precision");
+
+/* Initialize seed for random number generator DLARNV. */
+
+ for (i__ = 1; i__ <= 4; ++i__) {
+ iseed[i__ - 1] = 1;
+/* L40: */
+ }
+
+/* Initialize pointers. */
+
+ indrv1 = 0;
+ indrv2 = indrv1 + *n;
+ indrv3 = indrv2 + *n;
+ indrv4 = indrv3 + *n;
+ indrv5 = indrv4 + *n;
+
+/* Compute eigenvectors of matrix blocks. */
+
+ j1 = 1;
+ i__1 = iblock[*m];
+ for (nblk = 1; nblk <= i__1; ++nblk) {
+
+/* Find starting and ending indices of block nblk. */
+
+ if (nblk == 1) {
+ b1 = 1;
+ } else {
+ b1 = isplit[nblk - 1] + 1;
+ }
+ bn = isplit[nblk];
+ blksiz = bn - b1 + 1;
+ if (blksiz == 1) {
+ goto L60;
+ }
+ gpind = b1;
+
+/* Compute reorthogonalization criterion and stopping criterion. */
+
+ onenrm = (d__1 = d__[b1], abs(d__1)) + (d__2 = e[b1], abs(d__2));
+/* Computing MAX */
+ d__3 = onenrm, d__4 = (d__1 = d__[bn], abs(d__1)) + (d__2 = e[bn - 1],
+ abs(d__2));
+ onenrm = max(d__3,d__4);
+ i__2 = bn - 1;
+ for (i__ = b1 + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__4 = onenrm, d__5 = (d__1 = d__[i__], abs(d__1)) + (d__2 = e[
+ i__ - 1], abs(d__2)) + (d__3 = e[i__], abs(d__3));
+ onenrm = max(d__4,d__5);
+/* L50: */
+ }
+ ortol = onenrm * .001;
+
+ dtpcrt = sqrt(.1 / blksiz);
+
+/* Loop through eigenvalues of block nblk. */
+
+L60:
+ jblk = 0;
+ i__2 = *m;
+ for (j = j1; j <= i__2; ++j) {
+ if (iblock[j] != nblk) {
+ j1 = j;
+ goto L180;
+ }
+ ++jblk;
+ xj = w[j];
+
+/* Skip all the work if the block size is one. */
+
+ if (blksiz == 1) {
+ work[indrv1 + 1] = 1.;
+ goto L140;
+ }
+
+/* If eigenvalues j and j-1 are too close, add a relatively */
+/* small perturbation. */
+
+ if (jblk > 1) {
+ eps1 = (d__1 = eps * xj, abs(d__1));
+ pertol = eps1 * 10.;
+ sep = xj - xjm;
+ if (sep < pertol) {
+ xj = xjm + pertol;
+ }
+ }
+
+ its = 0;
+ nrmchk = 0;
+
+/* Get random starting vector. */
+
+ dlarnv_(&c__2, iseed, &blksiz, &work[indrv1 + 1]);
+
+/* Copy the matrix T so it won't be destroyed in factorization. */
+
+ dcopy_(&blksiz, &d__[b1], &c__1, &work[indrv4 + 1], &c__1);
+ i__3 = blksiz - 1;
+ dcopy_(&i__3, &e[b1], &c__1, &work[indrv2 + 2], &c__1);
+ i__3 = blksiz - 1;
+ dcopy_(&i__3, &e[b1], &c__1, &work[indrv3 + 1], &c__1);
+
+/* Compute LU factors with partial pivoting ( PT = LU ) */
+
+ tol = 0.;
+ dlagtf_(&blksiz, &work[indrv4 + 1], &xj, &work[indrv2 + 2], &work[
+ indrv3 + 1], &tol, &work[indrv5 + 1], &iwork[1], &iinfo);
+
+/* Update iteration count. */
+
+L70:
+ ++its;
+ if (its > 5) {
+ goto L120;
+ }
+
+/* Normalize and scale the righthand side vector Pb. */
+
+/* Computing MAX */
+ d__2 = eps, d__3 = (d__1 = work[indrv4 + blksiz], abs(d__1));
+ scl = blksiz * onenrm * max(d__2,d__3) / dasum_(&blksiz, &work[
+ indrv1 + 1], &c__1);
+ dscal_(&blksiz, &scl, &work[indrv1 + 1], &c__1);
+
+/* Solve the system LU = Pb. */
+
+ dlagts_(&c_n1, &blksiz, &work[indrv4 + 1], &work[indrv2 + 2], &
+ work[indrv3 + 1], &work[indrv5 + 1], &iwork[1], &work[
+ indrv1 + 1], &tol, &iinfo);
+
+/* Reorthogonalize by modified Gram-Schmidt if eigenvalues are */
+/* close enough. */
+
+ if (jblk == 1) {
+ goto L110;
+ }
+ if ((d__1 = xj - xjm, abs(d__1)) > ortol) {
+ gpind = j;
+ }
+ if (gpind != j) {
+ i__3 = j - 1;
+ for (i__ = gpind; i__ <= i__3; ++i__) {
+ ztr = 0.;
+ i__4 = blksiz;
+ for (jr = 1; jr <= i__4; ++jr) {
+ i__5 = b1 - 1 + jr + i__ * z_dim1;
+ ztr += work[indrv1 + jr] * z__[i__5].r;
+/* L80: */
+ }
+ i__4 = blksiz;
+ for (jr = 1; jr <= i__4; ++jr) {
+ i__5 = b1 - 1 + jr + i__ * z_dim1;
+ work[indrv1 + jr] -= ztr * z__[i__5].r;
+/* L90: */
+ }
+/* L100: */
+ }
+ }
+
+/* Check the infinity norm of the iterate. */
+
+L110:
+ jmax = idamax_(&blksiz, &work[indrv1 + 1], &c__1);
+ nrm = (d__1 = work[indrv1 + jmax], abs(d__1));
+
+/* Continue for additional iterations after norm reaches */
+/* stopping criterion. */
+
+ if (nrm < dtpcrt) {
+ goto L70;
+ }
+ ++nrmchk;
+ if (nrmchk < 3) {
+ goto L70;
+ }
+
+ goto L130;
+
+/* If stopping criterion was not satisfied, update info and */
+/* store eigenvector number in array ifail. */
+
+L120:
+ ++(*info);
+ ifail[*info] = j;
+
+/* Accept iterate as jth eigenvector. */
+
+L130:
+ scl = 1. / dnrm2_(&blksiz, &work[indrv1 + 1], &c__1);
+ jmax = idamax_(&blksiz, &work[indrv1 + 1], &c__1);
+ if (work[indrv1 + jmax] < 0.) {
+ scl = -scl;
+ }
+ dscal_(&blksiz, &scl, &work[indrv1 + 1], &c__1);
+L140:
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * z_dim1;
+ z__[i__4].r = 0., z__[i__4].i = 0.;
+/* L150: */
+ }
+ i__3 = blksiz;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = b1 + i__ - 1 + j * z_dim1;
+ i__5 = indrv1 + i__;
+ z__1.r = work[i__5], z__1.i = 0.;
+ z__[i__4].r = z__1.r, z__[i__4].i = z__1.i;
+/* L160: */
+ }
+
+/* Save the shift to check eigenvalue spacing at next */
+/* iteration. */
+
+ xjm = xj;
+
+/* L170: */
+ }
+L180:
+ ;
+ }
+
+ return 0;
+
+/* End of ZSTEIN */
+
+} /* zstein_ */
diff --git a/contrib/libs/clapack/zstemr.c b/contrib/libs/clapack/zstemr.c
new file mode 100644
index 0000000000..5e42fb2414
--- /dev/null
+++ b/contrib/libs/clapack/zstemr.c
@@ -0,0 +1,752 @@
+/* zstemr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublereal c_b18 = .001;
+
+/* Subroutine */ int zstemr_(char *jobz, char *range, integer *n, doublereal *
+ d__, doublereal *e, doublereal *vl, doublereal *vu, integer *il,
+ integer *iu, integer *m, doublereal *w, doublecomplex *z__, integer *
+ ldz, integer *nzc, integer *isuppz, logical *tryrac, doublereal *work,
+ integer *lwork, integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal r1, r2;
+ integer jj;
+ doublereal cs;
+ integer in;
+ doublereal sn, wl, wu;
+ integer iil, iiu;
+ doublereal eps, tmp;
+ integer indd, iend, jblk, wend;
+ doublereal rmin, rmax;
+ integer itmp;
+ doublereal tnrm;
+ extern /* Subroutine */ int dlae2_(doublereal *, doublereal *, doublereal
+ *, doublereal *, doublereal *);
+ integer inde2, itmp2;
+ doublereal rtol1, rtol2;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ doublereal scale;
+ integer indgp;
+ extern logical lsame_(char *, char *);
+ integer iinfo, iindw, ilast;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ integer lwmin;
+ logical wantz;
+ extern /* Subroutine */ int zswap_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), dlaev2_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *);
+ extern doublereal dlamch_(char *);
+ logical alleig;
+ integer ibegin;
+ logical indeig;
+ integer iindbl;
+ logical valeig;
+ extern /* Subroutine */ int dlarrc_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ integer *, integer *, integer *), dlarre_(char *,
+ integer *, doublereal *, doublereal *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, integer *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *);
+ integer wbegin;
+ doublereal safmin;
+ extern /* Subroutine */ int dlarrj_(integer *, doublereal *, doublereal *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *), xerbla_(char *, integer *);
+ doublereal bignum;
+ integer inderr, iindwk, indgrs, offset;
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ extern /* Subroutine */ int dlarrr_(integer *, doublereal *, doublereal *,
+ integer *), dlasrt_(char *, integer *, doublereal *, integer *);
+ doublereal thresh;
+ integer iinspl, indwrk, ifirst, liwmin, nzcmin;
+ doublereal pivmin;
+ integer nsplit;
+ doublereal smlnum;
+ extern /* Subroutine */ int zlarrv_(integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *,
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *,
+ doublereal *, doublecomplex *, integer *, integer *, doublereal *,
+ integer *, integer *);
+ logical lquery, zquery;
+
+
+/* -- LAPACK computational routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSTEMR computes selected eigenvalues and, optionally, eigenvectors */
+/* of a real symmetric tridiagonal matrix T. Any such unreduced matrix has */
+/* a well defined set of pairwise different real eigenvalues, the corresponding */
+/* real eigenvectors are pairwise orthogonal. */
+
+/* The spectrum may be computed either completely or partially by specifying */
+/* either an interval (VL,VU] or a range of indices IL:IU for the desired */
+/* eigenvalues. */
+
+/* Depending on the number of desired eigenvalues, these are computed either */
+/* by bisection or the dqds algorithm. Numerically orthogonal eigenvectors are */
+/* computed by the use of various suitable L D L^T factorizations near clusters */
+/* of close eigenvalues (referred to as RRRs, Relatively Robust */
+/* Representations). An informal sketch of the algorithm follows. */
+
+/* For each unreduced block (submatrix) of T, */
+/* (a) Compute T - sigma I = L D L^T, so that L and D */
+/* define all the wanted eigenvalues to high relative accuracy. */
+/* This means that small relative changes in the entries of D and L */
+/* cause only small relative changes in the eigenvalues and */
+/* eigenvectors. The standard (unfactored) representation of the */
+/* tridiagonal matrix T does not have this property in general. */
+/* (b) Compute the eigenvalues to suitable accuracy. */
+/* If the eigenvectors are desired, the algorithm attains full */
+/* accuracy of the computed eigenvalues only right before */
+/* the corresponding vectors have to be computed, see steps c) and d). */
+/* (c) For each cluster of close eigenvalues, select a new */
+/* shift close to the cluster, find a new factorization, and refine */
+/* the shifted eigenvalues to suitable accuracy. */
+/* (d) For each eigenvalue with a large enough relative separation compute */
+/* the corresponding eigenvector by forming a rank revealing twisted */
+/* factorization. Go back to (c) for any clusters that remain. */
+
+/* For more details, see: */
+/* - Inderjit S. Dhillon and Beresford N. Parlett: "Multiple representations */
+/* to compute orthogonal eigenvectors of symmetric tridiagonal matrices," */
+/* Linear Algebra and its Applications, 387(1), pp. 1-28, August 2004. */
+/* - Inderjit Dhillon and Beresford Parlett: "Orthogonal Eigenvectors and */
+/* Relative Gaps," SIAM Journal on Matrix Analysis and Applications, Vol. 25, */
+/* 2004. Also LAPACK Working Note 154. */
+/* - Inderjit Dhillon: "A new O(n^2) algorithm for the symmetric */
+/* tridiagonal eigenvalue/eigenvector problem", */
+/* Computer Science Division Technical Report No. UCB/CSD-97-971, */
+/* UC Berkeley, May 1997. */
+
+/* Notes: */
+/* 1.ZSTEMR works only on machines which follow IEEE-754 */
+/* floating-point standard in their handling of infinities and NaNs. */
+/* This permits the use of efficient inner loops avoiding a check for */
+/* zero divisors. */
+
+/* 2. LAPACK routines can be used to reduce a complex Hermitean matrix to */
+/* real symmetric tridiagonal form. */
+
+/* (Any complex Hermitean tridiagonal matrix has real values on its diagonal */
+/* and potentially complex numbers on its off-diagonals. By applying a */
+/* similarity transform with an appropriate diagonal matrix */
+/* diag(1,e^{i \phy_1}, ... , e^{i \phy_{n-1}}), the complex Hermitean */
+/* matrix can be transformed into a real symmetric matrix and complex */
+/* arithmetic can be entirely avoided.) */
+
+/* While the eigenvectors of the real symmetric tridiagonal matrix are real, */
+/* the eigenvectors of original complex Hermitean matrix have complex entries */
+/* in general. */
+/* Since LAPACK drivers overwrite the matrix data with the eigenvectors, */
+/* ZSTEMR accepts complex workspace to facilitate interoperability */
+/* with ZUNMTR or ZUPMTR. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only; */
+/* = 'V': Compute eigenvalues and eigenvectors. */
+
+/* RANGE (input) CHARACTER*1 */
+/* = 'A': all eigenvalues will be found. */
+/* = 'V': all eigenvalues in the half-open interval (VL,VU] */
+/* will be found. */
+/* = 'I': the IL-th through IU-th eigenvalues will be found. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the N diagonal elements of the tridiagonal matrix */
+/* T. On exit, D is overwritten. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the (N-1) subdiagonal elements of the tridiagonal */
+/* matrix T in elements 1 to N-1 of E. E(N) need not be set on */
+/* input, but is used internally as workspace. */
+/* On exit, E is overwritten. */
+
+/* VL (input) DOUBLE PRECISION */
+/* VU (input) DOUBLE PRECISION */
+/* If RANGE='V', the lower and upper bounds of the interval to */
+/* be searched for eigenvalues. VL < VU. */
+/* Not referenced if RANGE = 'A' or 'I'. */
+
+/* IL (input) INTEGER */
+/* IU (input) INTEGER */
+/* If RANGE='I', the indices (in ascending order) of the */
+/* smallest and largest eigenvalues to be returned. */
+/* 1 <= IL <= IU <= N, if N > 0. */
+/* Not referenced if RANGE = 'A' or 'V'. */
+
+/* M (output) INTEGER */
+/* The total number of eigenvalues found. 0 <= M <= N. */
+/* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */
+
+/* W (output) DOUBLE PRECISION array, dimension (N) */
+/* The first M elements contain the selected eigenvalues in */
+/* ascending order. */
+
+/* Z (output) COMPLEX*16 array, dimension (LDZ, max(1,M) ) */
+/* If JOBZ = 'V', and if INFO = 0, then the first M columns of Z */
+/* contain the orthonormal eigenvectors of the matrix T */
+/* corresponding to the selected eigenvalues, with the i-th */
+/* column of Z holding the eigenvector associated with W(i). */
+/* If JOBZ = 'N', then Z is not referenced. */
+/* Note: the user must ensure that at least max(1,M) columns are */
+/* supplied in the array Z; if RANGE = 'V', the exact value of M */
+/* is not known in advance and can be computed with a workspace */
+/* query by setting NZC = -1, see below. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* JOBZ = 'V', then LDZ >= max(1,N). */
+
+/* NZC (input) INTEGER */
+/* The number of eigenvectors to be held in the array Z. */
+/* If RANGE = 'A', then NZC >= max(1,N). */
+/* If RANGE = 'V', then NZC >= the number of eigenvalues in (VL,VU]. */
+/* If RANGE = 'I', then NZC >= IU-IL+1. */
+/* If NZC = -1, then a workspace query is assumed; the */
+/* routine calculates the number of columns of the array Z that */
+/* are needed to hold the eigenvectors. */
+/* This value is returned as the first entry of the Z array, and */
+/* no error message related to NZC is issued by XERBLA. */
+
+/* ISUPPZ (output) INTEGER ARRAY, dimension ( 2*max(1,M) ) */
+/* The support of the eigenvectors in Z, i.e., the indices */
+/* indicating the nonzero elements in Z. The i-th computed eigenvector */
+/* is nonzero only in elements ISUPPZ( 2*i-1 ) through */
+/* ISUPPZ( 2*i ). This is relevant in the case when the matrix */
+/* is split. ISUPPZ is only accessed when JOBZ is 'V' and N > 0. */
+
+/* TRYRAC (input/output) LOGICAL */
+/* If TRYRAC.EQ..TRUE., indicates that the code should check whether */
+/* the tridiagonal matrix defines its eigenvalues to high relative */
+/* accuracy. If so, the code uses relative-accuracy preserving */
+/* algorithms that might be (a bit) slower depending on the matrix. */
+/* If the matrix does not define its eigenvalues to high relative */
+/* accuracy, the code can uses possibly faster algorithms. */
+/* If TRYRAC.EQ..FALSE., the code is not required to guarantee */
+/* relatively accurate eigenvalues and can use the fastest possible */
+/* techniques. */
+/* On exit, a .TRUE. TRYRAC will be set to .FALSE. if the matrix */
+/* does not define its eigenvalues to high relative accuracy. */
+
+/* WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal */
+/* (and minimal) LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,18*N) */
+/* if JOBZ = 'V', and LWORK >= max(1,12*N) if JOBZ = 'N'. */
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (LIWORK) */
+/* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. LIWORK >= max(1,10*N) */
+/* if the eigenvectors are desired, and LIWORK >= max(1,8*N) */
+/* if only the eigenvalues are to be computed. */
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal size of the IWORK array, */
+/* returns this value as the first entry of the IWORK array, and */
+/* no error message related to LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* On exit, INFO */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = 1X, internal error in DLARRE, */
+/* if INFO = 2X, internal error in ZLARRV. */
+/* Here, the digit X = ABS( IINFO ) < 10, where IINFO is */
+/* the nonzero error code returned by DLARRE or */
+/* ZLARRV, respectively. */
+
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Beresford Parlett, University of California, Berkeley, USA */
+/* Jim Demmel, University of California, Berkeley, USA */
+/* Inderjit Dhillon, University of Texas, Austin, USA */
+/* Osni Marques, LBNL/NERSC, USA */
+/* Christof Voemel, University of California, Berkeley, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --isuppz;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ alleig = lsame_(range, "A");
+ valeig = lsame_(range, "V");
+ indeig = lsame_(range, "I");
+
+ lquery = *lwork == -1 || *liwork == -1;
+ zquery = *nzc == -1;
+/* DSTEMR needs WORK of size 6*N, IWORK of size 3*N. */
+/* In addition, DLARRE needs WORK of size 6*N, IWORK of size 5*N. */
+/* Furthermore, ZLARRV needs WORK of size 12*N, IWORK of size 7*N. */
+ if (wantz) {
+ lwmin = *n * 18;
+ liwmin = *n * 10;
+ } else {
+/* need less workspace if only the eigenvalues are wanted */
+ lwmin = *n * 12;
+ liwmin = *n << 3;
+ }
+ wl = 0.;
+ wu = 0.;
+ iil = 0;
+ iiu = 0;
+ if (valeig) {
+/* We do not reference VL, VU in the cases RANGE = 'I','A' */
+/* The interval (WL, WU] contains all the wanted eigenvalues. */
+/* It is either given by the user or computed in DLARRE. */
+ wl = *vl;
+ wu = *vu;
+ } else if (indeig) {
+/* We do not reference IL, IU in the cases RANGE = 'V','A' */
+ iil = *il;
+ iiu = *iu;
+ }
+
+ *info = 0;
+ if (! (wantz || lsame_(jobz, "N"))) {
+ *info = -1;
+ } else if (! (alleig || valeig || indeig)) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (valeig && *n > 0 && wu <= wl) {
+ *info = -7;
+ } else if (indeig && (iil < 1 || iil > *n)) {
+ *info = -8;
+ } else if (indeig && (iiu < iil || iiu > *n)) {
+ *info = -9;
+ } else if (*ldz < 1 || wantz && *ldz < *n) {
+ *info = -13;
+ } else if (*lwork < lwmin && ! lquery) {
+ *info = -17;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -19;
+ }
+
+/* Get machine constants. */
+
+ safmin = dlamch_("Safe minimum");
+ eps = dlamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+/* Computing MIN */
+ d__1 = sqrt(bignum), d__2 = 1. / sqrt(sqrt(safmin));
+ rmax = min(d__1,d__2);
+
+ if (*info == 0) {
+ work[1] = (doublereal) lwmin;
+ iwork[1] = liwmin;
+
+ if (wantz && alleig) {
+ nzcmin = *n;
+ } else if (wantz && valeig) {
+ dlarrc_("T", n, vl, vu, &d__[1], &e[1], &safmin, &nzcmin, &itmp, &
+ itmp2, info);
+ } else if (wantz && indeig) {
+ nzcmin = iiu - iil + 1;
+ } else {
+/* WANTZ .EQ. FALSE. */
+ nzcmin = 0;
+ }
+ if (zquery && *info == 0) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = (doublereal) nzcmin, z__[i__1].i = 0.;
+ } else if (*nzc < nzcmin && ! zquery) {
+ *info = -14;
+ }
+ }
+ if (*info != 0) {
+
+ i__1 = -(*info);
+ xerbla_("ZSTEMR", &i__1);
+
+ return 0;
+ } else if (lquery || zquery) {
+ return 0;
+ }
+
+/* Handle N = 0, 1, and 2 cases immediately */
+
+ *m = 0;
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (alleig || indeig) {
+ *m = 1;
+ w[1] = d__[1];
+ } else {
+ if (wl < d__[1] && wu >= d__[1]) {
+ *m = 1;
+ w[1] = d__[1];
+ }
+ }
+ if (wantz && ! zquery) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1., z__[i__1].i = 0.;
+ isuppz[1] = 1;
+ isuppz[2] = 1;
+ }
+ return 0;
+ }
+
+ if (*n == 2) {
+ if (! wantz) {
+ dlae2_(&d__[1], &e[1], &d__[2], &r1, &r2);
+ } else if (wantz && ! zquery) {
+ dlaev2_(&d__[1], &e[1], &d__[2], &r1, &r2, &cs, &sn);
+ }
+ if (alleig || valeig && r2 > wl && r2 <= wu || indeig && iil == 1) {
+ ++(*m);
+ w[*m] = r2;
+ if (wantz && ! zquery) {
+ i__1 = *m * z_dim1 + 1;
+ d__1 = -sn;
+ z__[i__1].r = d__1, z__[i__1].i = 0.;
+ i__1 = *m * z_dim1 + 2;
+ z__[i__1].r = cs, z__[i__1].i = 0.;
+/* Note: At most one of SN and CS can be zero. */
+ if (sn != 0.) {
+ if (cs != 0.) {
+ isuppz[(*m << 1) - 1] = 1;
+ isuppz[(*m << 1) - 1] = 2;
+ } else {
+ isuppz[(*m << 1) - 1] = 1;
+ isuppz[(*m << 1) - 1] = 1;
+ }
+ } else {
+ isuppz[(*m << 1) - 1] = 2;
+ isuppz[*m * 2] = 2;
+ }
+ }
+ }
+ if (alleig || valeig && r1 > wl && r1 <= wu || indeig && iiu == 2) {
+ ++(*m);
+ w[*m] = r1;
+ if (wantz && ! zquery) {
+ i__1 = *m * z_dim1 + 1;
+ z__[i__1].r = cs, z__[i__1].i = 0.;
+ i__1 = *m * z_dim1 + 2;
+ z__[i__1].r = sn, z__[i__1].i = 0.;
+/* Note: At most one of SN and CS can be zero. */
+ if (sn != 0.) {
+ if (cs != 0.) {
+ isuppz[(*m << 1) - 1] = 1;
+ isuppz[(*m << 1) - 1] = 2;
+ } else {
+ isuppz[(*m << 1) - 1] = 1;
+ isuppz[(*m << 1) - 1] = 1;
+ }
+ } else {
+ isuppz[(*m << 1) - 1] = 2;
+ isuppz[*m * 2] = 2;
+ }
+ }
+ }
+ return 0;
+ }
+/* Continue with general N */
+ indgrs = 1;
+ inderr = (*n << 1) + 1;
+ indgp = *n * 3 + 1;
+ indd = (*n << 2) + 1;
+ inde2 = *n * 5 + 1;
+ indwrk = *n * 6 + 1;
+
+ iinspl = 1;
+ iindbl = *n + 1;
+ iindw = (*n << 1) + 1;
+ iindwk = *n * 3 + 1;
+
+/* Scale matrix to allowable range, if necessary. */
+/* The allowable range is related to the PIVMIN parameter; see the */
+/* comments in DLARRD. The preference for scaling small values */
+/* up is heuristic; we expect users' matrices not to be close to the */
+/* RMAX threshold. */
+
+ scale = 1.;
+ tnrm = dlanst_("M", n, &d__[1], &e[1]);
+ if (tnrm > 0. && tnrm < rmin) {
+ scale = rmin / tnrm;
+ } else if (tnrm > rmax) {
+ scale = rmax / tnrm;
+ }
+ if (scale != 1.) {
+ dscal_(n, &scale, &d__[1], &c__1);
+ i__1 = *n - 1;
+ dscal_(&i__1, &scale, &e[1], &c__1);
+ tnrm *= scale;
+ if (valeig) {
+/* If eigenvalues in interval have to be found, */
+/* scale (WL, WU] accordingly */
+ wl *= scale;
+ wu *= scale;
+ }
+ }
+
+/* Compute the desired eigenvalues of the tridiagonal after splitting */
+/* into smaller subblocks if the corresponding off-diagonal elements */
+/* are small */
+/* THRESH is the splitting parameter for DLARRE */
+/* A negative THRESH forces the old splitting criterion based on the */
+/* size of the off-diagonal. A positive THRESH switches to splitting */
+/* which preserves relative accuracy. */
+
+ if (*tryrac) {
+/* Test whether the matrix warrants the more expensive relative approach. */
+ dlarrr_(n, &d__[1], &e[1], &iinfo);
+ } else {
+/* The user does not care about relative accurately eigenvalues */
+ iinfo = -1;
+ }
+/* Set the splitting criterion */
+ if (iinfo == 0) {
+ thresh = eps;
+ } else {
+ thresh = -eps;
+/* relative accuracy is desired but T does not guarantee it */
+ *tryrac = FALSE_;
+ }
+
+ if (*tryrac) {
+/* Copy original diagonal, needed to guarantee relative accuracy */
+ dcopy_(n, &d__[1], &c__1, &work[indd], &c__1);
+ }
+/* Store the squares of the offdiagonal values of T */
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing 2nd power */
+ d__1 = e[j];
+ work[inde2 + j - 1] = d__1 * d__1;
+/* L5: */
+ }
+/* Set the tolerance parameters for bisection */
+ if (! wantz) {
+/* DLARRE computes the eigenvalues to full precision. */
+ rtol1 = eps * 4.;
+ rtol2 = eps * 4.;
+ } else {
+/* DLARRE computes the eigenvalues to less than full precision. */
+/* ZLARRV will refine the eigenvalue approximations, and we only */
+/* need less accurate initial bisection in DLARRE. */
+/* Note: these settings do only affect the subset case and DLARRE */
+ rtol1 = sqrt(eps);
+/* Computing MAX */
+ d__1 = sqrt(eps) * .005, d__2 = eps * 4.;
+ rtol2 = max(d__1,d__2);
+ }
+ dlarre_(range, n, &wl, &wu, &iil, &iiu, &d__[1], &e[1], &work[inde2], &
+ rtol1, &rtol2, &thresh, &nsplit, &iwork[iinspl], m, &w[1], &work[
+ inderr], &work[indgp], &iwork[iindbl], &iwork[iindw], &work[
+ indgrs], &pivmin, &work[indwrk], &iwork[iindwk], &iinfo);
+ if (iinfo != 0) {
+ *info = abs(iinfo) + 10;
+ return 0;
+ }
+/* Note that if RANGE .NE. 'V', DLARRE computes bounds on the desired */
+/* part of the spectrum. All desired eigenvalues are contained in */
+/* (WL,WU] */
+ if (wantz) {
+
+/* Compute the desired eigenvectors corresponding to the computed */
+/* eigenvalues */
+
+ zlarrv_(n, &wl, &wu, &d__[1], &e[1], &pivmin, &iwork[iinspl], m, &
+ c__1, m, &c_b18, &rtol1, &rtol2, &w[1], &work[inderr], &work[
+ indgp], &iwork[iindbl], &iwork[iindw], &work[indgrs], &z__[
+ z_offset], ldz, &isuppz[1], &work[indwrk], &iwork[iindwk], &
+ iinfo);
+ if (iinfo != 0) {
+ *info = abs(iinfo) + 20;
+ return 0;
+ }
+ } else {
+/* DLARRE computes eigenvalues of the (shifted) root representation */
+/* ZLARRV returns the eigenvalues of the unshifted matrix. */
+/* However, if the eigenvectors are not desired by the user, we need */
+/* to apply the corresponding shifts from DLARRE to obtain the */
+/* eigenvalues of the original matrix. */
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ itmp = iwork[iindbl + j - 1];
+ w[j] += e[iwork[iinspl + itmp - 1]];
+/* L20: */
+ }
+ }
+
+ if (*tryrac) {
+/* Refine computed eigenvalues so that they are relatively accurate */
+/* with respect to the original matrix T. */
+ ibegin = 1;
+ wbegin = 1;
+ i__1 = iwork[iindbl + *m - 1];
+ for (jblk = 1; jblk <= i__1; ++jblk) {
+ iend = iwork[iinspl + jblk - 1];
+ in = iend - ibegin + 1;
+ wend = wbegin - 1;
+/* check if any eigenvalues have to be refined in this block */
+L36:
+ if (wend < *m) {
+ if (iwork[iindbl + wend] == jblk) {
+ ++wend;
+ goto L36;
+ }
+ }
+ if (wend < wbegin) {
+ ibegin = iend + 1;
+ goto L39;
+ }
+ offset = iwork[iindw + wbegin - 1] - 1;
+ ifirst = iwork[iindw + wbegin - 1];
+ ilast = iwork[iindw + wend - 1];
+ rtol2 = eps * 4.;
+ dlarrj_(&in, &work[indd + ibegin - 1], &work[inde2 + ibegin - 1],
+ &ifirst, &ilast, &rtol2, &offset, &w[wbegin], &work[
+ inderr + wbegin - 1], &work[indwrk], &iwork[iindwk], &
+ pivmin, &tnrm, &iinfo);
+ ibegin = iend + 1;
+ wbegin = wend + 1;
+L39:
+ ;
+ }
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (scale != 1.) {
+ d__1 = 1. / scale;
+ dscal_(m, &d__1, &w[1], &c__1);
+ }
+
+/* If eigenvalues are not in increasing order, then sort them, */
+/* possibly along with eigenvectors. */
+
+ if (nsplit > 1) {
+ if (! wantz) {
+ dlasrt_("I", m, &w[1], &iinfo);
+ if (iinfo != 0) {
+ *info = 3;
+ return 0;
+ }
+ } else {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = 0;
+ tmp = w[j];
+ i__2 = *m;
+ for (jj = j + 1; jj <= i__2; ++jj) {
+ if (w[jj] < tmp) {
+ i__ = jj;
+ tmp = w[jj];
+ }
+/* L50: */
+ }
+ if (i__ != 0) {
+ w[i__] = w[j];
+ w[j] = tmp;
+ if (wantz) {
+ zswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j *
+ z_dim1 + 1], &c__1);
+ itmp = isuppz[(i__ << 1) - 1];
+ isuppz[(i__ << 1) - 1] = isuppz[(j << 1) - 1];
+ isuppz[(j << 1) - 1] = itmp;
+ itmp = isuppz[i__ * 2];
+ isuppz[i__ * 2] = isuppz[j * 2];
+ isuppz[j * 2] = itmp;
+ }
+ }
+/* L60: */
+ }
+ }
+ }
+
+
+ work[1] = (doublereal) lwmin;
+ iwork[1] = liwmin;
+ return 0;
+
+/* End of ZSTEMR */
+
+} /* zstemr_ */
diff --git a/contrib/libs/clapack/zsteqr.c b/contrib/libs/clapack/zsteqr.c
new file mode 100644
index 0000000000..154a20b1a0
--- /dev/null
+++ b/contrib/libs/clapack/zsteqr.c
@@ -0,0 +1,621 @@
+/* zsteqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__0 = 0;
+static integer c__1 = 1;
+static integer c__2 = 2;
+static doublereal c_b41 = 1.;
+
+/* Subroutine */ int zsteqr_(char *compz, integer *n, doublereal *d__,
+ doublereal *e, doublecomplex *z__, integer *ldz, doublereal *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ doublereal b, c__, f, g;
+ integer i__, j, k, l, m;
+ doublereal p, r__, s;
+ integer l1, ii, mm, lm1, mm1, nm1;
+ doublereal rt1, rt2, eps;
+ integer lsv;
+ doublereal tst, eps2;
+ integer lend, jtot;
+ extern /* Subroutine */ int dlae2_(doublereal *, doublereal *, doublereal
+ *, doublereal *, doublereal *);
+ extern logical lsame_(char *, char *);
+ doublereal anorm;
+ extern /* Subroutine */ int zlasr_(char *, char *, char *, integer *,
+ integer *, doublereal *, doublereal *, doublecomplex *, integer *), zswap_(integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *), dlaev2_(doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *);
+ integer lendm1, lendp1;
+ extern doublereal dlapy2_(doublereal *, doublereal *), dlamch_(char *);
+ integer iscale;
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *);
+ doublereal safmin;
+ extern /* Subroutine */ int dlartg_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *);
+ doublereal safmax;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *,
+ integer *);
+ integer lendsv;
+ doublereal ssfmin;
+ integer nmaxit, icompz;
+ doublereal ssfmax;
+ extern /* Subroutine */ int zlaset_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, doublecomplex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSTEQR computes all eigenvalues and, optionally, eigenvectors of a */
+/* symmetric tridiagonal matrix using the implicit QL or QR method. */
+/* The eigenvectors of a full or band complex Hermitian matrix can also */
+/* be found if ZHETRD or ZHPTRD or ZHBTRD has been used to reduce this */
+/* matrix to tridiagonal form. */
+
+/* Arguments */
+/* ========= */
+
+/* COMPZ (input) CHARACTER*1 */
+/* = 'N': Compute eigenvalues only. */
+/* = 'V': Compute eigenvalues and eigenvectors of the original */
+/* Hermitian matrix. On entry, Z must contain the */
+/* unitary matrix used to reduce the original matrix */
+/* to tridiagonal form. */
+/* = 'I': Compute eigenvalues and eigenvectors of the */
+/* tridiagonal matrix. Z is initialized to the identity */
+/* matrix. */
+
+/* N (input) INTEGER */
+/* The order of the matrix. N >= 0. */
+
+/* D (input/output) DOUBLE PRECISION array, dimension (N) */
+/* On entry, the diagonal elements of the tridiagonal matrix. */
+/* On exit, if INFO = 0, the eigenvalues in ascending order. */
+
+/* E (input/output) DOUBLE PRECISION array, dimension (N-1) */
+/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
+/* matrix. */
+/* On exit, E has been destroyed. */
+
+/* Z (input/output) COMPLEX*16 array, dimension (LDZ, N) */
+/* On entry, if COMPZ = 'V', then Z contains the unitary */
+/* matrix used in the reduction to tridiagonal form. */
+/* On exit, if INFO = 0, then if COMPZ = 'V', Z contains the */
+/* orthonormal eigenvectors of the original Hermitian matrix, */
+/* and if COMPZ = 'I', Z contains the orthonormal eigenvectors */
+/* of the symmetric tridiagonal matrix. */
+/* If COMPZ = 'N', then Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1, and if */
+/* eigenvectors are desired, then LDZ >= max(1,N). */
+
+/* WORK (workspace) DOUBLE PRECISION array, dimension (max(1,2*N-2)) */
+/* If COMPZ = 'N', then WORK is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: the algorithm has failed to find all the eigenvalues in */
+/* a total of 30*N iterations; if INFO = i, then i */
+/* elements of E have not converged to zero; on exit, D */
+/* and E contain the elements of a symmetric tridiagonal */
+/* matrix which is unitarily similar to the original */
+/* matrix. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (lsame_(compz, "N")) {
+ icompz = 0;
+ } else if (lsame_(compz, "V")) {
+ icompz = 1;
+ } else if (lsame_(compz, "I")) {
+ icompz = 2;
+ } else {
+ icompz = -1;
+ }
+ if (icompz < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldz < 1 || icompz > 0 && *ldz < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZSTEQR", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (icompz == 2) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1., z__[i__1].i = 0.;
+ }
+ return 0;
+ }
+
+/* Determine the unit roundoff and over/underflow thresholds. */
+
+ eps = dlamch_("E");
+/* Computing 2nd power */
+ d__1 = eps;
+ eps2 = d__1 * d__1;
+ safmin = dlamch_("S");
+ safmax = 1. / safmin;
+ ssfmax = sqrt(safmax) / 3.;
+ ssfmin = sqrt(safmin) / eps2;
+
+/* Compute the eigenvalues and eigenvectors of the tridiagonal */
+/* matrix. */
+
+ if (icompz == 2) {
+ zlaset_("Full", n, n, &c_b1, &c_b2, &z__[z_offset], ldz);
+ }
+
+ nmaxit = *n * 30;
+ jtot = 0;
+
+/* Determine where the matrix splits and choose QL or QR iteration */
+/* for each block, according to whether top or bottom diagonal */
+/* element is smaller. */
+
+ l1 = 1;
+ nm1 = *n - 1;
+
+L10:
+ if (l1 > *n) {
+ goto L160;
+ }
+ if (l1 > 1) {
+ e[l1 - 1] = 0.;
+ }
+ if (l1 <= nm1) {
+ i__1 = nm1;
+ for (m = l1; m <= i__1; ++m) {
+ tst = (d__1 = e[m], abs(d__1));
+ if (tst == 0.) {
+ goto L30;
+ }
+ if (tst <= sqrt((d__1 = d__[m], abs(d__1))) * sqrt((d__2 = d__[m
+ + 1], abs(d__2))) * eps) {
+ e[m] = 0.;
+ goto L30;
+ }
+/* L20: */
+ }
+ }
+ m = *n;
+
+L30:
+ l = l1;
+ lsv = l;
+ lend = m;
+ lendsv = lend;
+ l1 = m + 1;
+ if (lend == l) {
+ goto L10;
+ }
+
+/* Scale submatrix in rows and columns L to LEND */
+
+ i__1 = lend - l + 1;
+ anorm = dlanst_("I", &i__1, &d__[l], &e[l]);
+ iscale = 0;
+ if (anorm == 0.) {
+ goto L10;
+ }
+ if (anorm > ssfmax) {
+ iscale = 1;
+ i__1 = lend - l + 1;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n,
+ info);
+ } else if (anorm < ssfmin) {
+ iscale = 2;
+ i__1 = lend - l + 1;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n,
+ info);
+ }
+
+/* Choose between QL and QR iteration */
+
+ if ((d__1 = d__[lend], abs(d__1)) < (d__2 = d__[l], abs(d__2))) {
+ lend = lsv;
+ l = lendsv;
+ }
+
+ if (lend > l) {
+
+/* QL Iteration */
+
+/* Look for small subdiagonal element. */
+
+L40:
+ if (l != lend) {
+ lendm1 = lend - 1;
+ i__1 = lendm1;
+ for (m = l; m <= i__1; ++m) {
+/* Computing 2nd power */
+ d__2 = (d__1 = e[m], abs(d__1));
+ tst = d__2 * d__2;
+ if (tst <= eps2 * (d__1 = d__[m], abs(d__1)) * (d__2 = d__[m
+ + 1], abs(d__2)) + safmin) {
+ goto L60;
+ }
+/* L50: */
+ }
+ }
+
+ m = lend;
+
+L60:
+ if (m < lend) {
+ e[m] = 0.;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L80;
+ }
+
+/* If remaining matrix is 2-by-2, use DLAE2 or SLAEV2 */
+/* to compute its eigensystem. */
+
+ if (m == l + 1) {
+ if (icompz > 0) {
+ dlaev2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2, &c__, &s);
+ work[l] = c__;
+ work[*n - 1 + l] = s;
+ zlasr_("R", "V", "B", n, &c__2, &work[l], &work[*n - 1 + l], &
+ z__[l * z_dim1 + 1], ldz);
+ } else {
+ dlae2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2);
+ }
+ d__[l] = rt1;
+ d__[l + 1] = rt2;
+ e[l] = 0.;
+ l += 2;
+ if (l <= lend) {
+ goto L40;
+ }
+ goto L140;
+ }
+
+ if (jtot == nmaxit) {
+ goto L140;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ g = (d__[l + 1] - p) / (e[l] * 2.);
+ r__ = dlapy2_(&g, &c_b41);
+ g = d__[m] - p + e[l] / (g + d_sign(&r__, &g));
+
+ s = 1.;
+ c__ = 1.;
+ p = 0.;
+
+/* Inner loop */
+
+ mm1 = m - 1;
+ i__1 = l;
+ for (i__ = mm1; i__ >= i__1; --i__) {
+ f = s * e[i__];
+ b = c__ * e[i__];
+ dlartg_(&g, &f, &c__, &s, &r__);
+ if (i__ != m - 1) {
+ e[i__ + 1] = r__;
+ }
+ g = d__[i__ + 1] - p;
+ r__ = (d__[i__] - g) * s + c__ * 2. * b;
+ p = s * r__;
+ d__[i__ + 1] = g + p;
+ g = c__ * r__ - b;
+
+/* If eigenvectors are desired, then save rotations. */
+
+ if (icompz > 0) {
+ work[i__] = c__;
+ work[*n - 1 + i__] = -s;
+ }
+
+/* L70: */
+ }
+
+/* If eigenvectors are desired, then apply saved rotations. */
+
+ if (icompz > 0) {
+ mm = m - l + 1;
+ zlasr_("R", "V", "B", n, &mm, &work[l], &work[*n - 1 + l], &z__[l
+ * z_dim1 + 1], ldz);
+ }
+
+ d__[l] -= p;
+ e[l] = g;
+ goto L40;
+
+/* Eigenvalue found. */
+
+L80:
+ d__[l] = p;
+
+ ++l;
+ if (l <= lend) {
+ goto L40;
+ }
+ goto L140;
+
+ } else {
+
+/* QR Iteration */
+
+/* Look for small superdiagonal element. */
+
+L90:
+ if (l != lend) {
+ lendp1 = lend + 1;
+ i__1 = lendp1;
+ for (m = l; m >= i__1; --m) {
+/* Computing 2nd power */
+ d__2 = (d__1 = e[m - 1], abs(d__1));
+ tst = d__2 * d__2;
+ if (tst <= eps2 * (d__1 = d__[m], abs(d__1)) * (d__2 = d__[m
+ - 1], abs(d__2)) + safmin) {
+ goto L110;
+ }
+/* L100: */
+ }
+ }
+
+ m = lend;
+
+L110:
+ if (m > lend) {
+ e[m - 1] = 0.;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L130;
+ }
+
+/* If remaining matrix is 2-by-2, use DLAE2 or SLAEV2 */
+/* to compute its eigensystem. */
+
+ if (m == l - 1) {
+ if (icompz > 0) {
+ dlaev2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2, &c__, &s)
+ ;
+ work[m] = c__;
+ work[*n - 1 + m] = s;
+ zlasr_("R", "V", "F", n, &c__2, &work[m], &work[*n - 1 + m], &
+ z__[(l - 1) * z_dim1 + 1], ldz);
+ } else {
+ dlae2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2);
+ }
+ d__[l - 1] = rt1;
+ d__[l] = rt2;
+ e[l - 1] = 0.;
+ l += -2;
+ if (l >= lend) {
+ goto L90;
+ }
+ goto L140;
+ }
+
+ if (jtot == nmaxit) {
+ goto L140;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ g = (d__[l - 1] - p) / (e[l - 1] * 2.);
+ r__ = dlapy2_(&g, &c_b41);
+ g = d__[m] - p + e[l - 1] / (g + d_sign(&r__, &g));
+
+ s = 1.;
+ c__ = 1.;
+ p = 0.;
+
+/* Inner loop */
+
+ lm1 = l - 1;
+ i__1 = lm1;
+ for (i__ = m; i__ <= i__1; ++i__) {
+ f = s * e[i__];
+ b = c__ * e[i__];
+ dlartg_(&g, &f, &c__, &s, &r__);
+ if (i__ != m) {
+ e[i__ - 1] = r__;
+ }
+ g = d__[i__] - p;
+ r__ = (d__[i__ + 1] - g) * s + c__ * 2. * b;
+ p = s * r__;
+ d__[i__] = g + p;
+ g = c__ * r__ - b;
+
+/* If eigenvectors are desired, then save rotations. */
+
+ if (icompz > 0) {
+ work[i__] = c__;
+ work[*n - 1 + i__] = s;
+ }
+
+/* L120: */
+ }
+
+/* If eigenvectors are desired, then apply saved rotations. */
+
+ if (icompz > 0) {
+ mm = l - m + 1;
+ zlasr_("R", "V", "F", n, &mm, &work[m], &work[*n - 1 + m], &z__[m
+ * z_dim1 + 1], ldz);
+ }
+
+ d__[l] -= p;
+ e[lm1] = g;
+ goto L90;
+
+/* Eigenvalue found. */
+
+L130:
+ d__[l] = p;
+
+ --l;
+ if (l >= lend) {
+ goto L90;
+ }
+ goto L140;
+
+ }
+
+/* Undo scaling if necessary */
+
+L140:
+ if (iscale == 1) {
+ i__1 = lendsv - lsv + 1;
+ dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ i__1 = lendsv - lsv;
+ dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &e[lsv], n,
+ info);
+ } else if (iscale == 2) {
+ i__1 = lendsv - lsv + 1;
+ dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ i__1 = lendsv - lsv;
+ dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &e[lsv], n,
+ info);
+ }
+
+/* Check for no convergence to an eigenvalue after a total */
+/* of N*MAXIT iterations. */
+
+ if (jtot == nmaxit) {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (e[i__] != 0.) {
+ ++(*info);
+ }
+/* L150: */
+ }
+ return 0;
+ }
+ goto L10;
+
+/* Order eigenvalues and eigenvectors. */
+
+L160:
+ if (icompz == 0) {
+
+/* Use Quick Sort */
+
+ dlasrt_("I", n, &d__[1], info);
+
+ } else {
+
+/* Use Selection Sort to minimize swaps of eigenvectors */
+
+ i__1 = *n;
+ for (ii = 2; ii <= i__1; ++ii) {
+ i__ = ii - 1;
+ k = i__;
+ p = d__[i__];
+ i__2 = *n;
+ for (j = ii; j <= i__2; ++j) {
+ if (d__[j] < p) {
+ k = j;
+ p = d__[j];
+ }
+/* L170: */
+ }
+ if (k != i__) {
+ d__[k] = d__[i__];
+ d__[i__] = p;
+ zswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1 + 1],
+ &c__1);
+ }
+/* L180: */
+ }
+ }
+ return 0;
+
+/* End of ZSTEQR */
+
+} /* zsteqr_ */
diff --git a/contrib/libs/clapack/zsycon.c b/contrib/libs/clapack/zsycon.c
new file mode 100644
index 0000000000..7a74d75c3f
--- /dev/null
+++ b/contrib/libs/clapack/zsycon.c
@@ -0,0 +1,203 @@
+/* zsycon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zsycon_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, integer *ipiv, doublereal *anorm, doublereal *rcond,
+ doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer i__, kase;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical upper;
+ extern /* Subroutine */ int zlacn2_(integer *, doublecomplex *,
+ doublecomplex *, doublereal *, integer *, integer *), xerbla_(
+ char *, integer *);
+ doublereal ainvnm;
+ extern /* Subroutine */ int zsytrs_(char *, integer *, integer *,
+ doublecomplex *, integer *, integer *, doublecomplex *, integer *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSYCON estimates the reciprocal of the condition number (in the */
+/* 1-norm) of a complex symmetric matrix A using the factorization */
+/* A = U*D*U**T or A = L*D*L**T computed by ZSYTRF. */
+
+/* An estimate is obtained for norm(inv(A)), and the reciprocal of the */
+/* condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**T; */
+/* = 'L': Lower triangular, form is A = L*D*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The block diagonal matrix D and the multipliers used to */
+/* obtain the factor U or L as computed by ZSYTRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by ZSYTRF. */
+
+/* ANORM (input) DOUBLE PRECISION */
+/* The 1-norm of the original matrix A. */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is an */
+/* estimate of the 1-norm of inv(A) computed in this routine. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*anorm < 0.) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZSYCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *rcond = 0.;
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ } else if (*anorm <= 0.) {
+ return 0;
+ }
+
+/* Check that the diagonal matrix D is nonsingular. */
+
+ if (upper) {
+
+/* Upper triangular storage: examine D from bottom to top */
+
+ for (i__ = *n; i__ >= 1; --i__) {
+ i__1 = i__ + i__ * a_dim1;
+ if (ipiv[i__] > 0 && (a[i__1].r == 0. && a[i__1].i == 0.)) {
+ return 0;
+ }
+/* L10: */
+ }
+ } else {
+
+/* Lower triangular storage: examine D from top to bottom. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ if (ipiv[i__] > 0 && (a[i__2].r == 0. && a[i__2].i == 0.)) {
+ return 0;
+ }
+/* L20: */
+ }
+ }
+
+/* Estimate the 1-norm of the inverse. */
+
+ kase = 0;
+L30:
+ zlacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+
+/* Multiply by inv(L*D*L') or inv(U*D*U'). */
+
+ zsytrs_(uplo, n, &c__1, &a[a_offset], lda, &ipiv[1], &work[1], n,
+ info);
+ goto L30;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / ainvnm / *anorm;
+ }
+
+ return 0;
+
+/* End of ZSYCON */
+
+} /* zsycon_ */
diff --git a/contrib/libs/clapack/zsyequb.c b/contrib/libs/clapack/zsyequb.c
new file mode 100644
index 0000000000..3ab99c7611
--- /dev/null
+++ b/contrib/libs/clapack/zsyequb.c
@@ -0,0 +1,450 @@
+/* zsyequb.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zsyequb_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, doublereal *s, doublereal *scond, doublereal *amax,
+ doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *), sqrt(doublereal), log(doublereal), pow_di(
+ doublereal *, integer *);
+
+ /* Local variables */
+ doublereal d__;
+ integer i__, j;
+ doublereal t, u, c0, c1, c2, si;
+ logical up;
+ doublereal avg, std, tol, base;
+ integer iter;
+ doublereal smin, smax, scale;
+ extern logical lsame_(char *, char *);
+ doublereal sumsq;
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum, smlnum;
+ extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- Contributed by James Demmel, Deaglan Halligan, Yozo Hida and -- */
+/* -- Jason Riedy of Univ. of California Berkeley. -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley and NAG Ltd. -- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSYEQUB computes row and column scalings intended to equilibrate a */
+/* symmetric matrix A and reduce its condition number */
+/* (with respect to the two-norm). S contains the scale factors, */
+/* S(i) = 1/sqrt(A(i,i)), chosen so that the scaled matrix B with */
+/* elements B(i,j) = S(i)*A(i,j)*S(j) has ones on the diagonal. This */
+/* choice of S puts the condition number of B within a factor N of the */
+/* smallest possible condition number over all possible diagonal */
+/* scalings. */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The N-by-N symmetric matrix whose scaling */
+/* factors are to be computed. Only the diagonal elements of A */
+/* are referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* S (output) DOUBLE PRECISION array, dimension (N) */
+/* If INFO = 0, S contains the scale factors for A. */
+
+/* SCOND (output) DOUBLE PRECISION */
+/* If INFO = 0, S contains the ratio of the smallest S(i) to */
+/* the largest S(i). If SCOND >= 0.1 and AMAX is neither too */
+/* large nor too small, it is not worth scaling by S. */
+
+/* AMAX (output) DOUBLE PRECISION */
+/* Absolute value of largest matrix element. If AMAX is very */
+/* close to overflow or very close to underflow, the matrix */
+/* should be scaled. */
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element is nonpositive. */
+
+/* Further Details */
+/* ======= ======= */
+
+/* Reference: Livne, O.E. and Golub, G.H., "Scaling by Binormalization", */
+/* Numerical Algorithms, vol. 35, no. 1, pp. 97-120, January 2004. */
+/* DOI 10.1023/B:NUMA.0000016606.32820.69 */
+/* Tech report version: http://ruready.utah.edu/archive/papers/bin.pdf */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* Statement Function Definitions */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (! (lsame_(uplo, "U") || lsame_(uplo, "L"))) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZSYEQUB", &i__1);
+ return 0;
+ }
+ up = lsame_(uplo, "U");
+ *amax = 0.;
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ *scond = 1.;
+ return 0;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ s[i__] = 0.;
+ }
+ *amax = 0.;
+ if (up) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ d__3 = s[i__], d__4 = (d__1 = a[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + j * a_dim1]), abs(d__2));
+ s[i__] = max(d__3,d__4);
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ d__3 = s[j], d__4 = (d__1 = a[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + j * a_dim1]), abs(d__2));
+ s[j] = max(d__3,d__4);
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ d__3 = *amax, d__4 = (d__1 = a[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + j * a_dim1]), abs(d__2));
+ *amax = max(d__3,d__4);
+ }
+/* Computing MAX */
+ i__2 = j + j * a_dim1;
+ d__3 = s[j], d__4 = (d__1 = a[i__2].r, abs(d__1)) + (d__2 =
+ d_imag(&a[j + j * a_dim1]), abs(d__2));
+ s[j] = max(d__3,d__4);
+/* Computing MAX */
+ i__2 = j + j * a_dim1;
+ d__3 = *amax, d__4 = (d__1 = a[i__2].r, abs(d__1)) + (d__2 =
+ d_imag(&a[j + j * a_dim1]), abs(d__2));
+ *amax = max(d__3,d__4);
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j + j * a_dim1;
+ d__3 = s[j], d__4 = (d__1 = a[i__2].r, abs(d__1)) + (d__2 =
+ d_imag(&a[j + j * a_dim1]), abs(d__2));
+ s[j] = max(d__3,d__4);
+/* Computing MAX */
+ i__2 = j + j * a_dim1;
+ d__3 = *amax, d__4 = (d__1 = a[i__2].r, abs(d__1)) + (d__2 =
+ d_imag(&a[j + j * a_dim1]), abs(d__2));
+ *amax = max(d__3,d__4);
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ d__3 = s[i__], d__4 = (d__1 = a[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + j * a_dim1]), abs(d__2));
+ s[i__] = max(d__3,d__4);
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ d__3 = s[j], d__4 = (d__1 = a[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + j * a_dim1]), abs(d__2));
+ s[j] = max(d__3,d__4);
+/* Computing MAX */
+ i__3 = i__ + j * a_dim1;
+ d__3 = *amax, d__4 = (d__1 = a[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + j * a_dim1]), abs(d__2));
+ *amax = max(d__3,d__4);
+ }
+ }
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ s[j] = 1. / s[j];
+ }
+ tol = 1. / sqrt(*n * 2.);
+ for (iter = 1; iter <= 100; ++iter) {
+ scale = 0.;
+ sumsq = 0.;
+/* beta = |A|s */
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ work[i__2].r = 0., work[i__2].i = 0.;
+ }
+ if (up) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ t = (d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a[i__
+ + j * a_dim1]), abs(d__2));
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ d__3 = ((d__1 = a[i__5].r, abs(d__1)) + (d__2 = d_imag(&a[
+ i__ + j * a_dim1]), abs(d__2))) * s[j];
+ z__1.r = work[i__4].r + d__3, z__1.i = work[i__4].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+ i__3 = j;
+ i__4 = j;
+ i__5 = i__ + j * a_dim1;
+ d__3 = ((d__1 = a[i__5].r, abs(d__1)) + (d__2 = d_imag(&a[
+ i__ + j * a_dim1]), abs(d__2))) * s[i__];
+ z__1.r = work[i__4].r + d__3, z__1.i = work[i__4].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+ }
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ d__3 = ((d__1 = a[i__4].r, abs(d__1)) + (d__2 = d_imag(&a[j +
+ j * a_dim1]), abs(d__2))) * s[j];
+ z__1.r = work[i__3].r + d__3, z__1.i = work[i__3].i;
+ work[i__2].r = z__1.r, work[i__2].i = z__1.i;
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ d__3 = ((d__1 = a[i__4].r, abs(d__1)) + (d__2 = d_imag(&a[j +
+ j * a_dim1]), abs(d__2))) * s[j];
+ z__1.r = work[i__3].r + d__3, z__1.i = work[i__3].i;
+ work[i__2].r = z__1.r, work[i__2].i = z__1.i;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ t = (d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a[i__
+ + j * a_dim1]), abs(d__2));
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ d__3 = ((d__1 = a[i__5].r, abs(d__1)) + (d__2 = d_imag(&a[
+ i__ + j * a_dim1]), abs(d__2))) * s[j];
+ z__1.r = work[i__4].r + d__3, z__1.i = work[i__4].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+ i__3 = j;
+ i__4 = j;
+ i__5 = i__ + j * a_dim1;
+ d__3 = ((d__1 = a[i__5].r, abs(d__1)) + (d__2 = d_imag(&a[
+ i__ + j * a_dim1]), abs(d__2))) * s[i__];
+ z__1.r = work[i__4].r + d__3, z__1.i = work[i__4].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+ }
+ }
+ }
+/* avg = s^T beta / n */
+ avg = 0.;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ z__2.r = s[i__2] * work[i__3].r, z__2.i = s[i__2] * work[i__3].i;
+ z__1.r = avg + z__2.r, z__1.i = z__2.i;
+ avg = z__1.r;
+ }
+ avg /= *n;
+ std = 0.;
+ i__1 = *n << 1;
+ for (i__ = *n + 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__ - *n;
+ i__4 = i__ - *n;
+ z__2.r = s[i__3] * work[i__4].r, z__2.i = s[i__3] * work[i__4].i;
+ z__1.r = z__2.r - avg, z__1.i = z__2.i;
+ work[i__2].r = z__1.r, work[i__2].i = z__1.i;
+ }
+ zlassq_(n, &work[*n + 1], &c__1, &scale, &sumsq);
+ std = scale * sqrt(sumsq / *n);
+ if (std < tol * avg) {
+ goto L999;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ t = (d__1 = a[i__2].r, abs(d__1)) + (d__2 = d_imag(&a[i__ + i__ *
+ a_dim1]), abs(d__2));
+ si = s[i__];
+ c2 = (*n - 1) * t;
+ i__2 = *n - 2;
+ i__3 = i__;
+ d__1 = t * si;
+ z__2.r = work[i__3].r - d__1, z__2.i = work[i__3].i;
+ d__2 = (doublereal) i__2;
+ z__1.r = d__2 * z__2.r, z__1.i = d__2 * z__2.i;
+ c1 = z__1.r;
+ d__1 = -(t * si) * si;
+ i__2 = i__;
+ d__2 = 2.;
+ z__4.r = d__2 * work[i__2].r, z__4.i = d__2 * work[i__2].i;
+ z__3.r = si * z__4.r, z__3.i = si * z__4.i;
+ z__2.r = d__1 + z__3.r, z__2.i = z__3.i;
+ d__3 = *n * avg;
+ z__1.r = z__2.r - d__3, z__1.i = z__2.i;
+ c0 = z__1.r;
+ d__ = c1 * c1 - c0 * 4 * c2;
+ if (d__ <= 0.) {
+ *info = -1;
+ return 0;
+ }
+ si = c0 * -2 / (c1 + sqrt(d__));
+ d__ = si - s[i__];
+ u = 0.;
+ if (up) {
+ i__2 = i__;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = j + i__ * a_dim1;
+ t = (d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a[j +
+ i__ * a_dim1]), abs(d__2));
+ u += s[j] * t;
+ i__3 = j;
+ i__4 = j;
+ d__1 = d__ * t;
+ z__1.r = work[i__4].r + d__1, z__1.i = work[i__4].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+ }
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ i__3 = i__ + j * a_dim1;
+ t = (d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a[i__
+ + j * a_dim1]), abs(d__2));
+ u += s[j] * t;
+ i__3 = j;
+ i__4 = j;
+ d__1 = d__ * t;
+ z__1.r = work[i__4].r + d__1, z__1.i = work[i__4].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+ }
+ } else {
+ i__2 = i__;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = i__ + j * a_dim1;
+ t = (d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a[i__
+ + j * a_dim1]), abs(d__2));
+ u += s[j] * t;
+ i__3 = j;
+ i__4 = j;
+ d__1 = d__ * t;
+ z__1.r = work[i__4].r + d__1, z__1.i = work[i__4].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+ }
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ i__3 = j + i__ * a_dim1;
+ t = (d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a[j +
+ i__ * a_dim1]), abs(d__2));
+ u += s[j] * t;
+ i__3 = j;
+ i__4 = j;
+ d__1 = d__ * t;
+ z__1.r = work[i__4].r + d__1, z__1.i = work[i__4].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+ }
+ }
+ i__2 = i__;
+ z__4.r = u + work[i__2].r, z__4.i = work[i__2].i;
+ z__3.r = d__ * z__4.r, z__3.i = d__ * z__4.i;
+ d__1 = (doublereal) (*n);
+ z__2.r = z__3.r / d__1, z__2.i = z__3.i / d__1;
+ z__1.r = avg + z__2.r, z__1.i = z__2.i;
+ avg = z__1.r;
+ s[i__] = si;
+ }
+ }
+L999:
+ smlnum = dlamch_("SAFEMIN");
+ bignum = 1. / smlnum;
+ smin = bignum;
+ smax = 0.;
+ t = 1. / sqrt(avg);
+ base = dlamch_("B");
+ u = 1. / log(base);
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = (integer) (u * log(s[i__] * t));
+ s[i__] = pow_di(&base, &i__2);
+/* Computing MIN */
+ d__1 = smin, d__2 = s[i__];
+ smin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = smax, d__2 = s[i__];
+ smax = max(d__1,d__2);
+ }
+ *scond = max(smin,smlnum) / min(smax,bignum);
+
+ return 0;
+} /* zsyequb_ */
diff --git a/contrib/libs/clapack/zsymv.c b/contrib/libs/clapack/zsymv.c
new file mode 100644
index 0000000000..826855bc39
--- /dev/null
+++ b/contrib/libs/clapack/zsymv.c
@@ -0,0 +1,429 @@
+/* zsymv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zsymv_(char *uplo, integer *n, doublecomplex *alpha,
+ doublecomplex *a, integer *lda, doublecomplex *x, integer *incx,
+ doublecomplex *beta, doublecomplex *y, integer *incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Local variables */
+ integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ doublecomplex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSYMV performs the matrix-vector operation */
+
+/* y := alpha*A*x + beta*y, */
+
+/* where alpha and beta are scalars, x and y are n element vectors and */
+/* A is an n by n symmetric matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO (input) CHARACTER*1 */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array A is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of A */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of A */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* N (input) INTEGER */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA (input) COMPLEX*16 */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* A (input) COMPLEX*16 array, dimension ( LDA, N ) */
+/* Before entry, with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array A must contain the upper */
+/* triangular part of the symmetric matrix and the strictly */
+/* lower triangular part of A is not referenced. */
+/* Before entry, with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array A must contain the lower */
+/* triangular part of the symmetric matrix and the strictly */
+/* upper triangular part of A is not referenced. */
+/* Unchanged on exit. */
+
+/* LDA (input) INTEGER */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, N ). */
+/* Unchanged on exit. */
+
+/* X (input) COMPLEX*16 array, dimension at least */
+/* ( 1 + ( N - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the N- */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX (input) INTEGER */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* BETA (input) COMPLEX*16 */
+/* On entry, BETA specifies the scalar beta. When BETA is */
+/* supplied as zero then Y need not be set on input. */
+/* Unchanged on exit. */
+
+/* Y (input/output) COMPLEX*16 array, dimension at least */
+/* ( 1 + ( N - 1 )*abs( INCY ) ). */
+/* Before entry, the incremented array Y must contain the n */
+/* element vector y. On exit, Y is overwritten by the updated */
+/* vector y. */
+
+/* INCY (input) INTEGER */
+/* On entry, INCY specifies the increment for the elements of */
+/* Y. INCY must not be zero. */
+/* Unchanged on exit. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*lda < max(1,*n)) {
+ info = 5;
+ } else if (*incx == 0) {
+ info = 7;
+ } else if (*incy == 0) {
+ info = 10;
+ }
+ if (info != 0) {
+ xerbla_("ZSYMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || alpha->r == 0. && alpha->i == 0. && (beta->r == 1. &&
+ beta->i == 0.)) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y. */
+
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through the triangular part */
+/* of A. */
+
+/* First form y := beta*y. */
+
+ if (beta->r != 1. || beta->i != 0.) {
+ if (*incy == 1) {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ y[i__2].r = 0., y[i__2].i = 0.;
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ y[i__2].r = 0., y[i__2].i = 0.;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = iy;
+ z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (alpha->r == 0. && alpha->i == 0.) {
+ return 0;
+ }
+ if (lsame_(uplo, "U")) {
+
+/* Form y when A is stored in upper triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__;
+ z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4].i,
+ z__2.i = a[i__3].r * x[i__4].i + a[i__3].i * x[
+ i__4].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L50: */
+ }
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ z__3.r = temp1.r * a[i__4].r - temp1.i * a[i__4].i, z__3.i =
+ temp1.r * a[i__4].i + temp1.i * a[i__4].r;
+ z__2.r = y[i__3].r + z__3.r, z__2.i = y[i__3].i + z__3.i;
+ z__4.r = alpha->r * temp2.r - alpha->i * temp2.i, z__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ ix = kx;
+ iy = ky;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ i__3 = i__ + j * a_dim1;
+ i__4 = ix;
+ z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4].i,
+ z__2.i = a[i__3].r * x[i__4].i + a[i__3].i * x[
+ i__4].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ i__2 = jy;
+ i__3 = jy;
+ i__4 = j + j * a_dim1;
+ z__3.r = temp1.r * a[i__4].r - temp1.i * a[i__4].i, z__3.i =
+ temp1.r * a[i__4].i + temp1.i * a[i__4].r;
+ z__2.r = y[i__3].r + z__3.r, z__2.i = y[i__3].i + z__3.i;
+ z__4.r = alpha->r * temp2.r - alpha->i * temp2.i, z__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ jx += *incx;
+ jy += *incy;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y when A is stored in lower triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ z__2.r = temp1.r * a[i__4].r - temp1.i * a[i__4].i, z__2.i =
+ temp1.r * a[i__4].i + temp1.i * a[i__4].r;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__;
+ z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4].i,
+ z__2.i = a[i__3].r * x[i__4].i + a[i__3].i * x[
+ i__4].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L90: */
+ }
+ i__2 = j;
+ i__3 = j;
+ z__2.r = alpha->r * temp2.r - alpha->i * temp2.i, z__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ i__2 = jy;
+ i__3 = jy;
+ i__4 = j + j * a_dim1;
+ z__2.r = temp1.r * a[i__4].r - temp1.i * a[i__4].i, z__2.i =
+ temp1.r * a[i__4].i + temp1.i * a[i__4].r;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ ix = jx;
+ iy = jy;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ iy += *incy;
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ i__3 = i__ + j * a_dim1;
+ i__4 = ix;
+ z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4].i,
+ z__2.i = a[i__3].r * x[i__4].i + a[i__3].i * x[
+ i__4].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L110: */
+ }
+ i__2 = jy;
+ i__3 = jy;
+ z__2.r = alpha->r * temp2.r - alpha->i * temp2.i, z__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ jx += *incx;
+ jy += *incy;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZSYMV */
+
+} /* zsymv_ */
diff --git a/contrib/libs/clapack/zsyr.c b/contrib/libs/clapack/zsyr.c
new file mode 100644
index 0000000000..81d2ef83ae
--- /dev/null
+++ b/contrib/libs/clapack/zsyr.c
@@ -0,0 +1,289 @@
+/* zsyr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zsyr_(char *uplo, integer *n, doublecomplex *alpha,
+ doublecomplex *x, integer *incx, doublecomplex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2;
+
+ /* Local variables */
+ integer i__, j, ix, jx, kx, info;
+ doublecomplex temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSYR performs the symmetric rank 1 operation */
+
+/* A := alpha*x*( x' ) + A, */
+
+/* where alpha is a complex scalar, x is an n element vector and A is an */
+/* n by n symmetric matrix. */
+
+/* Arguments */
+/* ========== */
+
+/* UPLO (input) CHARACTER*1 */
+/* On entry, UPLO specifies whether the upper or lower */
+/* triangular part of the array A is to be referenced as */
+/* follows: */
+
+/* UPLO = 'U' or 'u' Only the upper triangular part of A */
+/* is to be referenced. */
+
+/* UPLO = 'L' or 'l' Only the lower triangular part of A */
+/* is to be referenced. */
+
+/* Unchanged on exit. */
+
+/* N (input) INTEGER */
+/* On entry, N specifies the order of the matrix A. */
+/* N must be at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA (input) COMPLEX*16 */
+/* On entry, ALPHA specifies the scalar alpha. */
+/* Unchanged on exit. */
+
+/* X (input) COMPLEX*16 array, dimension at least */
+/* ( 1 + ( N - 1 )*abs( INCX ) ). */
+/* Before entry, the incremented array X must contain the N- */
+/* element vector x. */
+/* Unchanged on exit. */
+
+/* INCX (input) INTEGER */
+/* On entry, INCX specifies the increment for the elements of */
+/* X. INCX must not be zero. */
+/* Unchanged on exit. */
+
+/* A (input/output) COMPLEX*16 array, dimension ( LDA, N ) */
+/* Before entry, with UPLO = 'U' or 'u', the leading n by n */
+/* upper triangular part of the array A must contain the upper */
+/* triangular part of the symmetric matrix and the strictly */
+/* lower triangular part of A is not referenced. On exit, the */
+/* upper triangular part of the array A is overwritten by the */
+/* upper triangular part of the updated matrix. */
+/* Before entry, with UPLO = 'L' or 'l', the leading n by n */
+/* lower triangular part of the array A must contain the lower */
+/* triangular part of the symmetric matrix and the strictly */
+/* upper triangular part of A is not referenced. On exit, the */
+/* lower triangular part of the array A is overwritten by the */
+/* lower triangular part of the updated matrix. */
+
+/* LDA (input) INTEGER */
+/* On entry, LDA specifies the first dimension of A as declared */
+/* in the calling (sub) program. LDA must be at least */
+/* max( 1, N ). */
+/* Unchanged on exit. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --x;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*lda < max(1,*n)) {
+ info = 7;
+ }
+ if (info != 0) {
+ xerbla_("ZSYR ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0 || alpha->r == 0. && alpha->i == 0.) {
+ return 0;
+ }
+
+/* Set the start point in X if the increment is not unity. */
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/* Start the operations. In this version the elements of A are */
+/* accessed sequentially with one pass through the triangular part */
+/* of A. */
+
+ if (lsame_(uplo, "U")) {
+
+/* Form A when A is stored in upper triangle. */
+
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ i__2 = j;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ z__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i +
+ z__2.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ i__2 = jx;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix = kx;
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ z__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i +
+ z__2.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ }
+ jx += *incx;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when A is stored in lower triangle. */
+
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ i__2 = j;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ z__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i +
+ z__2.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if (x[i__2].r != 0. || x[i__2].i != 0.) {
+ i__2 = jx;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix = jx;
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i,
+ z__2.i = x[i__5].r * temp.i + x[i__5].i *
+ temp.r;
+ z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i +
+ z__2.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ ix += *incx;
+/* L70: */
+ }
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZSYR */
+
+} /* zsyr_ */
diff --git a/contrib/libs/clapack/zsyrfs.c b/contrib/libs/clapack/zsyrfs.c
new file mode 100644
index 0000000000..f39ef25978
--- /dev/null
+++ b/contrib/libs/clapack/zsyrfs.c
@@ -0,0 +1,474 @@
+/* zsyrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zsyrfs_(char *uplo, integer *n, integer *nrhs,
+ doublecomplex *a, integer *lda, doublecomplex *af, integer *ldaf,
+ integer *ipiv, doublecomplex *b, integer *ldb, doublecomplex *x,
+ integer *ldx, doublereal *ferr, doublereal *berr, doublecomplex *work,
+ doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal s, xk;
+ integer nz;
+ doublereal eps;
+ integer kase;
+ doublereal safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3], count;
+ logical upper;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zaxpy_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), zsymv_(
+ char *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), zlacn2_(integer *, doublecomplex *,
+ doublecomplex *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal lstres;
+ extern /* Subroutine */ int zsytrs_(char *, integer *, integer *,
+ doublecomplex *, integer *, integer *, doublecomplex *, integer *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSYRFS improves the computed solution to a system of linear */
+/* equations when the coefficient matrix is symmetric indefinite, and */
+/* provides error bounds and backward error estimates for the solution. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The symmetric matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of A contains the upper triangular part */
+/* of the matrix A, and the strictly lower triangular part of A */
+/* is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of A contains the lower triangular part of */
+/* the matrix A, and the strictly upper triangular part of A is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input) COMPLEX*16 array, dimension (LDAF,N) */
+/* The factored form of the matrix A. AF contains the block */
+/* diagonal matrix D and the multipliers used to obtain the */
+/* factor U or L from the factorization A = U*D*U**T or */
+/* A = L*D*L**T as computed by ZSYTRF. */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by ZSYTRF. */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input/output) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* On entry, the solution matrix X, as computed by ZSYTRS. */
+/* On exit, the improved solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Internal Parameters */
+/* =================== */
+
+/* ITMAX is the maximum number of steps of iterative refinement. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldx < max(1,*n)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZSYRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+ count = 1;
+ lstres = 3.;
+L20:
+
+/* Loop until stopping criterion is satisfied. */
+
+/* Compute residual R = B - A * X */
+
+ zcopy_(n, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+ z__1.r = -1., z__1.i = -0.;
+ zsymv_(uplo, n, &z__1, &a[a_offset], lda, &x[j * x_dim1 + 1], &c__1, &
+ c_b1, &work[1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[i__] = (d__1 = b[i__3].r, abs(d__1)) + (d__2 = d_imag(&b[
+ i__ + j * b_dim1]), abs(d__2));
+/* L30: */
+ }
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ i__3 = k + j * x_dim1;
+ xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[k + j *
+ x_dim1]), abs(d__2));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ rwork[i__] += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + k * a_dim1]), abs(d__2))) * xk;
+ i__4 = i__ + k * a_dim1;
+ i__5 = i__ + j * x_dim1;
+ s += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 = d_imag(&a[
+ i__ + k * a_dim1]), abs(d__2))) * ((d__3 = x[i__5]
+ .r, abs(d__3)) + (d__4 = d_imag(&x[i__ + j *
+ x_dim1]), abs(d__4)));
+/* L40: */
+ }
+ i__3 = k + k * a_dim1;
+ rwork[k] = rwork[k] + ((d__1 = a[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&a[k + k * a_dim1]), abs(d__2))) * xk + s;
+/* L50: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ i__3 = k + j * x_dim1;
+ xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[k + j *
+ x_dim1]), abs(d__2));
+ i__3 = k + k * a_dim1;
+ rwork[k] += ((d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&
+ a[k + k * a_dim1]), abs(d__2))) * xk;
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ rwork[i__] += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + k * a_dim1]), abs(d__2))) * xk;
+ i__4 = i__ + k * a_dim1;
+ i__5 = i__ + j * x_dim1;
+ s += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 = d_imag(&a[
+ i__ + k * a_dim1]), abs(d__2))) * ((d__3 = x[i__5]
+ .r, abs(d__3)) + (d__4 = d_imag(&x[i__ + j *
+ x_dim1]), abs(d__4)));
+/* L60: */
+ }
+ rwork[k] += s;
+/* L70: */
+ }
+ }
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__3 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2))) / rwork[i__];
+ s = max(d__3,d__4);
+ } else {
+/* Computing MAX */
+ i__3 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = max(d__3,d__4);
+ }
+/* L80: */
+ }
+ berr[j] = s;
+
+/* Test stopping criterion. Continue iterating if */
+/* 1) The residual BERR(J) is larger than machine epsilon, and */
+/* 2) BERR(J) decreased by at least a factor of 2 during the */
+/* last iteration, and */
+/* 3) At most ITMAX iterations tried. */
+
+ if (berr[j] > eps && berr[j] * 2. <= lstres && count <= 5) {
+
+/* Update solution and try again. */
+
+ zsytrs_(uplo, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &work[1],
+ n, info);
+ zaxpy_(n, &c_b1, &work[1], &c__1, &x[j * x_dim1 + 1], &c__1);
+ lstres = berr[j];
+ ++count;
+ goto L20;
+ }
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(A))* */
+/* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(A) is the inverse of A */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(A)*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use ZLACN2 to estimate the infinity-norm of the matrix */
+/* inv(A) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__3 = i__;
+ rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ ;
+ } else {
+ i__3 = i__;
+ rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ + safe1;
+ }
+/* L90: */
+ }
+
+ kase = 0;
+L100:
+ zlacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(A'). */
+
+ zsytrs_(uplo, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &work[
+ 1], n, info);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ z__1.r = rwork[i__4] * work[i__5].r, z__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+/* L110: */
+ }
+ } else if (kase == 2) {
+
+/* Multiply by inv(A)*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ z__1.r = rwork[i__4] * work[i__5].r, z__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+/* L120: */
+ }
+ zsytrs_(uplo, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &work[
+ 1], n, info);
+ }
+ goto L100;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * x_dim1;
+ d__3 = lstres, d__4 = (d__1 = x[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&x[i__ + j * x_dim1]), abs(d__2));
+ lstres = max(d__3,d__4);
+/* L130: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L140: */
+ }
+
+ return 0;
+
+/* End of ZSYRFS */
+
+} /* zsyrfs_ */
diff --git a/contrib/libs/clapack/zsysv.c b/contrib/libs/clapack/zsysv.c
new file mode 100644
index 0000000000..ce08ea1851
--- /dev/null
+++ b/contrib/libs/clapack/zsysv.c
@@ -0,0 +1,213 @@
+/* zsysv.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zsysv_(char *uplo, integer *n, integer *nrhs,
+ doublecomplex *a, integer *lda, integer *ipiv, doublecomplex *b,
+ integer *ldb, doublecomplex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ integer nb;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int zsytrf_(char *, integer *, doublecomplex *,
+ integer *, integer *, doublecomplex *, integer *, integer *), zsytrs_(char *, integer *, integer *, doublecomplex *,
+ integer *, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSYSV computes the solution to a complex system of linear equations */
+/* A * X = B, */
+/* where A is an N-by-N symmetric matrix and X and B are N-by-NRHS */
+/* matrices. */
+
+/* The diagonal pivoting method is used to factor A as */
+/* A = U * D * U**T, if UPLO = 'U', or */
+/* A = L * D * L**T, if UPLO = 'L', */
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, and D is symmetric and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. The factored form of A is then */
+/* used to solve the system of equations A * X = B. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, if INFO = 0, the block diagonal matrix D and the */
+/* multipliers used to obtain the factor U or L from the */
+/* factorization A = U*D*U**T or A = L*D*L**T as computed by */
+/* ZSYTRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D, as */
+/* determined by ZSYTRF. If IPIV(k) > 0, then rows and columns */
+/* k and IPIV(k) were interchanged, and D(k,k) is a 1-by-1 */
+/* diagonal block. If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, */
+/* then rows and columns k-1 and -IPIV(k) were interchanged and */
+/* D(k-1:k,k-1:k) is a 2-by-2 diagonal block. If UPLO = 'L' and */
+/* IPIV(k) = IPIV(k+1) < 0, then rows and columns k+1 and */
+/* -IPIV(k) were interchanged and D(k:k+1,k:k+1) is a 2-by-2 */
+/* diagonal block. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the N-by-NRHS right hand side matrix B. */
+/* On exit, if INFO = 0, the N-by-NRHS solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of WORK. LWORK >= 1, and for best performance */
+/* LWORK >= max(1,N*NB), where NB is the optimal blocksize for */
+/* ZSYTRF. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, so the solution could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*lwork < 1 && ! lquery) {
+ *info = -10;
+ }
+
+ if (*info == 0) {
+ if (*n == 0) {
+ lwkopt = 1;
+ } else {
+ nb = ilaenv_(&c__1, "ZSYTRF", uplo, n, &c_n1, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZSYSV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Compute the factorization A = U*D*U' or A = L*D*L'. */
+
+ zsytrf_(uplo, n, &a[a_offset], lda, &ipiv[1], &work[1], lwork, info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ zsytrs_(uplo, n, nrhs, &a[a_offset], lda, &ipiv[1], &b[b_offset], ldb,
+ info);
+
+ }
+
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZSYSV */
+
+} /* zsysv_ */
diff --git a/contrib/libs/clapack/zsysvx.c b/contrib/libs/clapack/zsysvx.c
new file mode 100644
index 0000000000..cdcb775a7c
--- /dev/null
+++ b/contrib/libs/clapack/zsysvx.c
@@ -0,0 +1,368 @@
+/* zsysvx.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zsysvx_(char *fact, char *uplo, integer *n, integer *
+ nrhs, doublecomplex *a, integer *lda, doublecomplex *af, integer *
+ ldaf, integer *ipiv, doublecomplex *b, integer *ldb, doublecomplex *x,
+ integer *ldx, doublereal *rcond, doublereal *ferr, doublereal *berr,
+ doublecomplex *work, integer *lwork, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1,
+ x_offset, i__1, i__2;
+
+ /* Local variables */
+ integer nb;
+ extern logical lsame_(char *, char *);
+ doublereal anorm;
+ extern doublereal dlamch_(char *);
+ logical nofact;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern doublereal zlansy_(char *, char *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ extern /* Subroutine */ int zsycon_(char *, integer *, doublecomplex *,
+ integer *, integer *, doublereal *, doublereal *, doublecomplex *,
+ integer *), zsyrfs_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *, doublecomplex *, doublereal *,
+ integer *), zsytrf_(char *, integer *, doublecomplex *,
+ integer *, integer *, doublecomplex *, integer *, integer *), zsytrs_(char *, integer *, integer *, doublecomplex *,
+ integer *, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK driver routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSYSVX uses the diagonal pivoting factorization to compute the */
+/* solution to a complex system of linear equations A * X = B, */
+/* where A is an N-by-N symmetric matrix and X and B are N-by-NRHS */
+/* matrices. */
+
+/* Error bounds on the solution and a condition estimate are also */
+/* provided. */
+
+/* Description */
+/* =========== */
+
+/* The following steps are performed: */
+
+/* 1. If FACT = 'N', the diagonal pivoting method is used to factor A. */
+/* The form of the factorization is */
+/* A = U * D * U**T, if UPLO = 'U', or */
+/* A = L * D * L**T, if UPLO = 'L', */
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, and D is symmetric and block diagonal with */
+/* 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* 2. If some D(i,i)=0, so that D is exactly singular, then the routine */
+/* returns with INFO = i. Otherwise, the factored form of A is used */
+/* to estimate the condition number of the matrix A. If the */
+/* reciprocal of the condition number is less than machine precision, */
+/* INFO = N+1 is returned as a warning, but the routine still goes on */
+/* to solve for X and compute error bounds as described below. */
+
+/* 3. The system of equations is solved for X using the factored form */
+/* of A. */
+
+/* 4. Iterative refinement is applied to improve the computed solution */
+/* matrix and calculate error bounds and backward error estimates */
+/* for it. */
+
+/* Arguments */
+/* ========= */
+
+/* FACT (input) CHARACTER*1 */
+/* Specifies whether or not the factored form of A has been */
+/* supplied on entry. */
+/* = 'F': On entry, AF and IPIV contain the factored form */
+/* of A. A, AF and IPIV will not be modified. */
+/* = 'N': The matrix A will be copied to AF and factored. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The number of linear equations, i.e., the order of the */
+/* matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The symmetric matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of A contains the upper triangular part */
+/* of the matrix A, and the strictly lower triangular part of A */
+/* is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of A contains the lower triangular part of */
+/* the matrix A, and the strictly upper triangular part of A is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AF (input or output) COMPLEX*16 array, dimension (LDAF,N) */
+/* If FACT = 'F', then AF is an input argument and on entry */
+/* contains the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**T or A = L*D*L**T as computed by ZSYTRF. */
+
+/* If FACT = 'N', then AF is an output argument and on exit */
+/* returns the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L from the factorization */
+/* A = U*D*U**T or A = L*D*L**T. */
+
+/* LDAF (input) INTEGER */
+/* The leading dimension of the array AF. LDAF >= max(1,N). */
+
+/* IPIV (input or output) INTEGER array, dimension (N) */
+/* If FACT = 'F', then IPIV is an input argument and on entry */
+/* contains details of the interchanges and the block structure */
+/* of D, as determined by ZSYTRF. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* If FACT = 'N', then IPIV is an output argument and on exit */
+/* contains details of the interchanges and the block structure */
+/* of D, as determined by ZSYTRF. */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* The N-by-NRHS right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (output) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* If INFO = 0 or INFO = N+1, the N-by-NRHS solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The estimate of the reciprocal condition number of the matrix */
+/* A. If RCOND is less than the machine precision (in */
+/* particular, if RCOND = 0), the matrix is singular to working */
+/* precision. This condition is indicated by a return code of */
+/* INFO > 0. */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of WORK. LWORK >= max(1,2*N), and for best */
+/* performance, when FACT = 'N', LWORK >= max(1,2*N,N*NB), where */
+/* NB is the optimal blocksize for ZSYTRF. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, and i is */
+/* <= N: D(i,i) is exactly zero. The factorization */
+/* has been completed but the factor D is exactly */
+/* singular, so the solution and error bounds could */
+/* not be computed. RCOND = 0 is returned. */
+/* = N+1: D is nonsingular, but RCOND is less than machine */
+/* precision, meaning that the matrix is singular */
+/* to working precision. Nevertheless, the */
+/* solution and error bounds are computed because */
+/* there are a number of situations where the */
+/* computed solution can be more accurate than the */
+/* value of RCOND would suggest. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ af_dim1 = *ldaf;
+ af_offset = 1 + af_dim1;
+ af -= af_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ nofact = lsame_(fact, "N");
+ lquery = *lwork == -1;
+ if (! nofact && ! lsame_(fact, "F")) {
+ *info = -1;
+ } else if (! lsame_(uplo, "U") && ! lsame_(uplo,
+ "L")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldaf < max(1,*n)) {
+ *info = -8;
+ } else if (*ldb < max(1,*n)) {
+ *info = -11;
+ } else if (*ldx < max(1,*n)) {
+ *info = -13;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n << 1;
+ if (*lwork < max(i__1,i__2) && ! lquery) {
+ *info = -18;
+ }
+ }
+
+ if (*info == 0) {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n << 1;
+ lwkopt = max(i__1,i__2);
+ if (nofact) {
+ nb = ilaenv_(&c__1, "ZSYTRF", uplo, n, &c_n1, &c_n1, &c_n1);
+/* Computing MAX */
+ i__1 = lwkopt, i__2 = *n * nb;
+ lwkopt = max(i__1,i__2);
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZSYSVX", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+ if (nofact) {
+
+/* Compute the factorization A = U*D*U' or A = L*D*L'. */
+
+ zlacpy_(uplo, n, n, &a[a_offset], lda, &af[af_offset], ldaf);
+ zsytrf_(uplo, n, &af[af_offset], ldaf, &ipiv[1], &work[1], lwork,
+ info);
+
+/* Return if INFO is non-zero. */
+
+ if (*info > 0) {
+ *rcond = 0.;
+ return 0;
+ }
+ }
+
+/* Compute the norm of the matrix A. */
+
+ anorm = zlansy_("I", uplo, n, &a[a_offset], lda, &rwork[1]);
+
+/* Compute the reciprocal of the condition number of A. */
+
+ zsycon_(uplo, n, &af[af_offset], ldaf, &ipiv[1], &anorm, rcond, &work[1],
+ info);
+
+/* Compute the solution vectors X. */
+
+ zlacpy_("Full", n, nrhs, &b[b_offset], ldb, &x[x_offset], ldx);
+ zsytrs_(uplo, n, nrhs, &af[af_offset], ldaf, &ipiv[1], &x[x_offset], ldx,
+ info);
+
+/* Use iterative refinement to improve the computed solutions and */
+/* compute error bounds and backward error estimates for them. */
+
+ zsyrfs_(uplo, n, nrhs, &a[a_offset], lda, &af[af_offset], ldaf, &ipiv[1],
+ &b[b_offset], ldb, &x[x_offset], ldx, &ferr[1], &berr[1], &work[1]
+, &rwork[1], info);
+
+/* Set INFO = N+1 if the matrix is singular to working precision. */
+
+ if (*rcond < dlamch_("Epsilon")) {
+ *info = *n + 1;
+ }
+
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZSYSVX */
+
+} /* zsysvx_ */
diff --git a/contrib/libs/clapack/zsytf2.c b/contrib/libs/clapack/zsytf2.c
new file mode 100644
index 0000000000..578000f3ae
--- /dev/null
+++ b/contrib/libs/clapack/zsytf2.c
@@ -0,0 +1,727 @@
+/* zsytf2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zsytf2_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_imag(doublecomplex *);
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublecomplex t, r1, d11, d12, d21, d22;
+ integer kk, kp;
+ doublecomplex wk, wkm1, wkp1;
+ integer imax, jmax;
+ extern /* Subroutine */ int zsyr_(char *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ doublereal alpha;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *);
+ integer kstep;
+ logical upper;
+ extern /* Subroutine */ int zswap_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ doublereal absakk;
+ extern logical disnan_(doublereal *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal colmax;
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ doublereal rowmax;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSYTF2 computes the factorization of a complex symmetric matrix A */
+/* using the Bunch-Kaufman diagonal pivoting method: */
+
+/* A = U*D*U' or A = L*D*L' */
+
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, U' is the transpose of U, and D is symmetric and */
+/* block diagonal with 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* This is the unblocked version of the algorithm, calling Level 2 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the upper or lower triangular part of the */
+/* symmetric matrix A is stored: */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* n-by-n upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n-by-n lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L (see below for further details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+/* > 0: if INFO = k, D(k,k) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, and division by zero will occur if it */
+/* is used to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* 09-29-06 - patch from */
+/* Bobby Cheng, MathWorks */
+
+/* Replace l.209 and l.377 */
+/* IF( MAX( ABSAKK, COLMAX ).EQ.ZERO ) THEN */
+/* by */
+/* IF( (MAX( ABSAKK, COLMAX ).EQ.ZERO) .OR. DISNAN(ABSAKK) ) THEN */
+
+/* 1-96 - Based on modifications by J. Lewis, Boeing Computer Services */
+/* Company */
+
+/* If UPLO = 'U', then A = U*D*U', where */
+/* U = P(n)*U(n)* ... *P(k)U(k)* ..., */
+/* i.e., U is a product of terms P(k)*U(k), where k decreases from n to */
+/* 1 in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and U(k) is a unit upper triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I v 0 ) k-s */
+/* U(k) = ( 0 I 0 ) s */
+/* ( 0 0 I ) n-k */
+/* k-s s n-k */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(1:k-1,k). */
+/* If s = 2, the upper triangle of D(k) overwrites A(k-1,k-1), A(k-1,k), */
+/* and A(k,k), and v overwrites A(1:k-2,k-1:k). */
+
+/* If UPLO = 'L', then A = L*D*L', where */
+/* L = P(1)*L(1)* ... *P(k)*L(k)* ..., */
+/* i.e., L is a product of terms P(k)*L(k), where k increases from 1 to */
+/* n in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and L(k) is a unit lower triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I 0 0 ) k-1 */
+/* L(k) = ( 0 I 0 ) s */
+/* ( 0 v I ) n-k-s+1 */
+/* k-1 s n-k-s+1 */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(k+1:n,k). */
+/* If s = 2, the lower triangle of D(k) overwrites A(k,k), A(k+1,k), */
+/* and A(k+1,k+1), and v overwrites A(k+2:n,k:k+1). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZSYTF2", &i__1);
+ return 0;
+ }
+
+/* Initialize ALPHA for use in choosing pivot block size. */
+
+ alpha = (sqrt(17.) + 1.) / 8.;
+
+ if (upper) {
+
+/* Factorize A as U*D*U' using the upper triangle of A */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2 */
+
+ k = *n;
+L10:
+
+/* If K < 1, exit from loop */
+
+ if (k < 1) {
+ goto L70;
+ }
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ i__1 = k + k * a_dim1;
+ absakk = (d__1 = a[i__1].r, abs(d__1)) + (d__2 = d_imag(&a[k + k *
+ a_dim1]), abs(d__2));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ imax = izamax_(&i__1, &a[k * a_dim1 + 1], &c__1);
+ i__1 = imax + k * a_dim1;
+ colmax = (d__1 = a[i__1].r, abs(d__1)) + (d__2 = d_imag(&a[imax +
+ k * a_dim1]), abs(d__2));
+ } else {
+ colmax = 0.;
+ }
+
+ if (max(absakk,colmax) == 0. || disnan_(&absakk)) {
+
+/* Column K is zero or contains a NaN: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ i__1 = k - imax;
+ jmax = imax + izamax_(&i__1, &a[imax + (imax + 1) * a_dim1],
+ lda);
+ i__1 = imax + jmax * a_dim1;
+ rowmax = (d__1 = a[i__1].r, abs(d__1)) + (d__2 = d_imag(&a[
+ imax + jmax * a_dim1]), abs(d__2));
+ if (imax > 1) {
+ i__1 = imax - 1;
+ jmax = izamax_(&i__1, &a[imax * a_dim1 + 1], &c__1);
+/* Computing MAX */
+ i__1 = jmax + imax * a_dim1;
+ d__3 = rowmax, d__4 = (d__1 = a[i__1].r, abs(d__1)) + (
+ d__2 = d_imag(&a[jmax + imax * a_dim1]), abs(d__2)
+ );
+ rowmax = max(d__3,d__4);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else /* if(complicated condition) */ {
+ i__1 = imax + imax * a_dim1;
+ if ((d__1 = a[i__1].r, abs(d__1)) + (d__2 = d_imag(&a[
+ imax + imax * a_dim1]), abs(d__2)) >= alpha *
+ rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+ } else {
+
+/* interchange rows and columns K-1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+ }
+
+ kk = k - kstep + 1;
+ if (kp != kk) {
+
+/* Interchange rows and columns KK and KP in the leading */
+/* submatrix A(1:k,1:k) */
+
+ i__1 = kp - 1;
+ zswap_(&i__1, &a[kk * a_dim1 + 1], &c__1, &a[kp * a_dim1 + 1],
+ &c__1);
+ i__1 = kk - kp - 1;
+ zswap_(&i__1, &a[kp + 1 + kk * a_dim1], &c__1, &a[kp + (kp +
+ 1) * a_dim1], lda);
+ i__1 = kk + kk * a_dim1;
+ t.r = a[i__1].r, t.i = a[i__1].i;
+ i__1 = kk + kk * a_dim1;
+ i__2 = kp + kp * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + kp * a_dim1;
+ a[i__1].r = t.r, a[i__1].i = t.i;
+ if (kstep == 2) {
+ i__1 = k - 1 + k * a_dim1;
+ t.r = a[i__1].r, t.i = a[i__1].i;
+ i__1 = k - 1 + k * a_dim1;
+ i__2 = kp + k * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + k * a_dim1;
+ a[i__1].r = t.r, a[i__1].i = t.i;
+ }
+ }
+
+/* Update the leading submatrix */
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k now holds */
+
+/* W(k) = U(k)*D(k) */
+
+/* where U(k) is the k-th column of U */
+
+/* Perform a rank-1 update of A(1:k-1,1:k-1) as */
+
+/* A := A - U(k)*D(k)*U(k)' = A - W(k)*1/D(k)*W(k)' */
+
+ z_div(&z__1, &c_b1, &a[k + k * a_dim1]);
+ r1.r = z__1.r, r1.i = z__1.i;
+ i__1 = k - 1;
+ z__1.r = -r1.r, z__1.i = -r1.i;
+ zsyr_(uplo, &i__1, &z__1, &a[k * a_dim1 + 1], &c__1, &a[
+ a_offset], lda);
+
+/* Store U(k) in column k */
+
+ i__1 = k - 1;
+ zscal_(&i__1, &r1, &a[k * a_dim1 + 1], &c__1);
+ } else {
+
+/* 2-by-2 pivot block D(k): columns k and k-1 now hold */
+
+/* ( W(k-1) W(k) ) = ( U(k-1) U(k) )*D(k) */
+
+/* where U(k) and U(k-1) are the k-th and (k-1)-th columns */
+/* of U */
+
+/* Perform a rank-2 update of A(1:k-2,1:k-2) as */
+
+/* A := A - ( U(k-1) U(k) )*D(k)*( U(k-1) U(k) )' */
+/* = A - ( W(k-1) W(k) )*inv(D(k))*( W(k-1) W(k) )' */
+
+ if (k > 2) {
+
+ i__1 = k - 1 + k * a_dim1;
+ d12.r = a[i__1].r, d12.i = a[i__1].i;
+ z_div(&z__1, &a[k - 1 + (k - 1) * a_dim1], &d12);
+ d22.r = z__1.r, d22.i = z__1.i;
+ z_div(&z__1, &a[k + k * a_dim1], &d12);
+ d11.r = z__1.r, d11.i = z__1.i;
+ z__3.r = d11.r * d22.r - d11.i * d22.i, z__3.i = d11.r *
+ d22.i + d11.i * d22.r;
+ z__2.r = z__3.r - 1., z__2.i = z__3.i - 0.;
+ z_div(&z__1, &c_b1, &z__2);
+ t.r = z__1.r, t.i = z__1.i;
+ z_div(&z__1, &t, &d12);
+ d12.r = z__1.r, d12.i = z__1.i;
+
+ for (j = k - 2; j >= 1; --j) {
+ i__1 = j + (k - 1) * a_dim1;
+ z__3.r = d11.r * a[i__1].r - d11.i * a[i__1].i,
+ z__3.i = d11.r * a[i__1].i + d11.i * a[i__1]
+ .r;
+ i__2 = j + k * a_dim1;
+ z__2.r = z__3.r - a[i__2].r, z__2.i = z__3.i - a[i__2]
+ .i;
+ z__1.r = d12.r * z__2.r - d12.i * z__2.i, z__1.i =
+ d12.r * z__2.i + d12.i * z__2.r;
+ wkm1.r = z__1.r, wkm1.i = z__1.i;
+ i__1 = j + k * a_dim1;
+ z__3.r = d22.r * a[i__1].r - d22.i * a[i__1].i,
+ z__3.i = d22.r * a[i__1].i + d22.i * a[i__1]
+ .r;
+ i__2 = j + (k - 1) * a_dim1;
+ z__2.r = z__3.r - a[i__2].r, z__2.i = z__3.i - a[i__2]
+ .i;
+ z__1.r = d12.r * z__2.r - d12.i * z__2.i, z__1.i =
+ d12.r * z__2.i + d12.i * z__2.r;
+ wk.r = z__1.r, wk.i = z__1.i;
+ for (i__ = j; i__ >= 1; --i__) {
+ i__1 = i__ + j * a_dim1;
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__ + k * a_dim1;
+ z__3.r = a[i__3].r * wk.r - a[i__3].i * wk.i,
+ z__3.i = a[i__3].r * wk.i + a[i__3].i *
+ wk.r;
+ z__2.r = a[i__2].r - z__3.r, z__2.i = a[i__2].i -
+ z__3.i;
+ i__4 = i__ + (k - 1) * a_dim1;
+ z__4.r = a[i__4].r * wkm1.r - a[i__4].i * wkm1.i,
+ z__4.i = a[i__4].r * wkm1.i + a[i__4].i *
+ wkm1.r;
+ z__1.r = z__2.r - z__4.r, z__1.i = z__2.i -
+ z__4.i;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+/* L20: */
+ }
+ i__1 = j + k * a_dim1;
+ a[i__1].r = wk.r, a[i__1].i = wk.i;
+ i__1 = j + (k - 1) * a_dim1;
+ a[i__1].r = wkm1.r, a[i__1].i = wkm1.i;
+/* L30: */
+ }
+
+ }
+
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k - 1] = -kp;
+ }
+
+/* Decrease K and return to the start of the main loop */
+
+ k -= kstep;
+ goto L10;
+
+ } else {
+
+/* Factorize A as L*D*L' using the lower triangle of A */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2 */
+
+ k = 1;
+L40:
+
+/* If K > N, exit from loop */
+
+ if (k > *n) {
+ goto L70;
+ }
+ kstep = 1;
+
+/* Determine rows and columns to be interchanged and whether */
+/* a 1-by-1 or 2-by-2 pivot block will be used */
+
+ i__1 = k + k * a_dim1;
+ absakk = (d__1 = a[i__1].r, abs(d__1)) + (d__2 = d_imag(&a[k + k *
+ a_dim1]), abs(d__2));
+
+/* IMAX is the row-index of the largest off-diagonal element in */
+/* column K, and COLMAX is its absolute value */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ imax = k + izamax_(&i__1, &a[k + 1 + k * a_dim1], &c__1);
+ i__1 = imax + k * a_dim1;
+ colmax = (d__1 = a[i__1].r, abs(d__1)) + (d__2 = d_imag(&a[imax +
+ k * a_dim1]), abs(d__2));
+ } else {
+ colmax = 0.;
+ }
+
+ if (max(absakk,colmax) == 0. || disnan_(&absakk)) {
+
+/* Column K is zero or contains a NaN: set INFO and continue */
+
+ if (*info == 0) {
+ *info = k;
+ }
+ kp = k;
+ } else {
+ if (absakk >= alpha * colmax) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else {
+
+/* JMAX is the column-index of the largest off-diagonal */
+/* element in row IMAX, and ROWMAX is its absolute value */
+
+ i__1 = imax - k;
+ jmax = k - 1 + izamax_(&i__1, &a[imax + k * a_dim1], lda);
+ i__1 = imax + jmax * a_dim1;
+ rowmax = (d__1 = a[i__1].r, abs(d__1)) + (d__2 = d_imag(&a[
+ imax + jmax * a_dim1]), abs(d__2));
+ if (imax < *n) {
+ i__1 = *n - imax;
+ jmax = imax + izamax_(&i__1, &a[imax + 1 + imax * a_dim1],
+ &c__1);
+/* Computing MAX */
+ i__1 = jmax + imax * a_dim1;
+ d__3 = rowmax, d__4 = (d__1 = a[i__1].r, abs(d__1)) + (
+ d__2 = d_imag(&a[jmax + imax * a_dim1]), abs(d__2)
+ );
+ rowmax = max(d__3,d__4);
+ }
+
+ if (absakk >= alpha * colmax * (colmax / rowmax)) {
+
+/* no interchange, use 1-by-1 pivot block */
+
+ kp = k;
+ } else /* if(complicated condition) */ {
+ i__1 = imax + imax * a_dim1;
+ if ((d__1 = a[i__1].r, abs(d__1)) + (d__2 = d_imag(&a[
+ imax + imax * a_dim1]), abs(d__2)) >= alpha *
+ rowmax) {
+
+/* interchange rows and columns K and IMAX, use 1-by-1 */
+/* pivot block */
+
+ kp = imax;
+ } else {
+
+/* interchange rows and columns K+1 and IMAX, use 2-by-2 */
+/* pivot block */
+
+ kp = imax;
+ kstep = 2;
+ }
+ }
+ }
+
+ kk = k + kstep - 1;
+ if (kp != kk) {
+
+/* Interchange rows and columns KK and KP in the trailing */
+/* submatrix A(k:n,k:n) */
+
+ if (kp < *n) {
+ i__1 = *n - kp;
+ zswap_(&i__1, &a[kp + 1 + kk * a_dim1], &c__1, &a[kp + 1
+ + kp * a_dim1], &c__1);
+ }
+ i__1 = kp - kk - 1;
+ zswap_(&i__1, &a[kk + 1 + kk * a_dim1], &c__1, &a[kp + (kk +
+ 1) * a_dim1], lda);
+ i__1 = kk + kk * a_dim1;
+ t.r = a[i__1].r, t.i = a[i__1].i;
+ i__1 = kk + kk * a_dim1;
+ i__2 = kp + kp * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + kp * a_dim1;
+ a[i__1].r = t.r, a[i__1].i = t.i;
+ if (kstep == 2) {
+ i__1 = k + 1 + k * a_dim1;
+ t.r = a[i__1].r, t.i = a[i__1].i;
+ i__1 = k + 1 + k * a_dim1;
+ i__2 = kp + k * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + k * a_dim1;
+ a[i__1].r = t.r, a[i__1].i = t.i;
+ }
+ }
+
+/* Update the trailing submatrix */
+
+ if (kstep == 1) {
+
+/* 1-by-1 pivot block D(k): column k now holds */
+
+/* W(k) = L(k)*D(k) */
+
+/* where L(k) is the k-th column of L */
+
+ if (k < *n) {
+
+/* Perform a rank-1 update of A(k+1:n,k+1:n) as */
+
+/* A := A - L(k)*D(k)*L(k)' = A - W(k)*(1/D(k))*W(k)' */
+
+ z_div(&z__1, &c_b1, &a[k + k * a_dim1]);
+ r1.r = z__1.r, r1.i = z__1.i;
+ i__1 = *n - k;
+ z__1.r = -r1.r, z__1.i = -r1.i;
+ zsyr_(uplo, &i__1, &z__1, &a[k + 1 + k * a_dim1], &c__1, &
+ a[k + 1 + (k + 1) * a_dim1], lda);
+
+/* Store L(k) in column K */
+
+ i__1 = *n - k;
+ zscal_(&i__1, &r1, &a[k + 1 + k * a_dim1], &c__1);
+ }
+ } else {
+
+/* 2-by-2 pivot block D(k) */
+
+ if (k < *n - 1) {
+
+/* Perform a rank-2 update of A(k+2:n,k+2:n) as */
+
+/* A := A - ( L(k) L(k+1) )*D(k)*( L(k) L(k+1) )' */
+/* = A - ( W(k) W(k+1) )*inv(D(k))*( W(k) W(k+1) )' */
+
+/* where L(k) and L(k+1) are the k-th and (k+1)-th */
+/* columns of L */
+
+ i__1 = k + 1 + k * a_dim1;
+ d21.r = a[i__1].r, d21.i = a[i__1].i;
+ z_div(&z__1, &a[k + 1 + (k + 1) * a_dim1], &d21);
+ d11.r = z__1.r, d11.i = z__1.i;
+ z_div(&z__1, &a[k + k * a_dim1], &d21);
+ d22.r = z__1.r, d22.i = z__1.i;
+ z__3.r = d11.r * d22.r - d11.i * d22.i, z__3.i = d11.r *
+ d22.i + d11.i * d22.r;
+ z__2.r = z__3.r - 1., z__2.i = z__3.i - 0.;
+ z_div(&z__1, &c_b1, &z__2);
+ t.r = z__1.r, t.i = z__1.i;
+ z_div(&z__1, &t, &d21);
+ d21.r = z__1.r, d21.i = z__1.i;
+
+ i__1 = *n;
+ for (j = k + 2; j <= i__1; ++j) {
+ i__2 = j + k * a_dim1;
+ z__3.r = d11.r * a[i__2].r - d11.i * a[i__2].i,
+ z__3.i = d11.r * a[i__2].i + d11.i * a[i__2]
+ .r;
+ i__3 = j + (k + 1) * a_dim1;
+ z__2.r = z__3.r - a[i__3].r, z__2.i = z__3.i - a[i__3]
+ .i;
+ z__1.r = d21.r * z__2.r - d21.i * z__2.i, z__1.i =
+ d21.r * z__2.i + d21.i * z__2.r;
+ wk.r = z__1.r, wk.i = z__1.i;
+ i__2 = j + (k + 1) * a_dim1;
+ z__3.r = d22.r * a[i__2].r - d22.i * a[i__2].i,
+ z__3.i = d22.r * a[i__2].i + d22.i * a[i__2]
+ .r;
+ i__3 = j + k * a_dim1;
+ z__2.r = z__3.r - a[i__3].r, z__2.i = z__3.i - a[i__3]
+ .i;
+ z__1.r = d21.r * z__2.r - d21.i * z__2.i, z__1.i =
+ d21.r * z__2.i + d21.i * z__2.r;
+ wkp1.r = z__1.r, wkp1.i = z__1.i;
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__ + k * a_dim1;
+ z__3.r = a[i__5].r * wk.r - a[i__5].i * wk.i,
+ z__3.i = a[i__5].r * wk.i + a[i__5].i *
+ wk.r;
+ z__2.r = a[i__4].r - z__3.r, z__2.i = a[i__4].i -
+ z__3.i;
+ i__6 = i__ + (k + 1) * a_dim1;
+ z__4.r = a[i__6].r * wkp1.r - a[i__6].i * wkp1.i,
+ z__4.i = a[i__6].r * wkp1.i + a[i__6].i *
+ wkp1.r;
+ z__1.r = z__2.r - z__4.r, z__1.i = z__2.i -
+ z__4.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L50: */
+ }
+ i__2 = j + k * a_dim1;
+ a[i__2].r = wk.r, a[i__2].i = wk.i;
+ i__2 = j + (k + 1) * a_dim1;
+ a[i__2].r = wkp1.r, a[i__2].i = wkp1.i;
+/* L60: */
+ }
+ }
+ }
+ }
+
+/* Store details of the interchanges in IPIV */
+
+ if (kstep == 1) {
+ ipiv[k] = kp;
+ } else {
+ ipiv[k] = -kp;
+ ipiv[k + 1] = -kp;
+ }
+
+/* Increase K and return to the start of the main loop */
+
+ k += kstep;
+ goto L40;
+
+ }
+
+L70:
+ return 0;
+
+/* End of ZSYTF2 */
+
+} /* zsytf2_ */
diff --git a/contrib/libs/clapack/zsytrf.c b/contrib/libs/clapack/zsytrf.c
new file mode 100644
index 0000000000..43a5234f27
--- /dev/null
+++ b/contrib/libs/clapack/zsytrf.c
@@ -0,0 +1,343 @@
+/* zsytrf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+
+/* Subroutine */ int zsytrf_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, integer *ipiv, doublecomplex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ integer j, k, kb, nb, iws;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ logical upper;
+ extern /* Subroutine */ int zsytf2_(char *, integer *, doublecomplex *,
+ integer *, integer *, integer *), xerbla_(char *, integer
+ *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork;
+ extern /* Subroutine */ int zlasyf_(char *, integer *, integer *, integer
+ *, doublecomplex *, integer *, integer *, doublecomplex *,
+ integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSYTRF computes the factorization of a complex symmetric matrix A */
+/* using the Bunch-Kaufman diagonal pivoting method. The form of the */
+/* factorization is */
+
+/* A = U*D*U**T or A = L*D*L**T */
+
+/* where U (or L) is a product of permutation and unit upper (lower) */
+/* triangular matrices, and D is symmetric and block diagonal with */
+/* with 1-by-1 and 2-by-2 diagonal blocks. */
+
+/* This is the blocked version of the algorithm, calling Level 3 BLAS. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A is stored; */
+/* = 'L': Lower triangle of A is stored. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the symmetric matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* On exit, the block diagonal matrix D and the multipliers used */
+/* to obtain the factor U or L (see below for further details). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (output) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D. */
+/* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
+/* interchanged and D(k,k) is a 1-by-1 diagonal block. */
+/* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
+/* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
+/* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */
+/* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
+/* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The length of WORK. LWORK >=1. For best performance */
+/* LWORK >= N*NB, where NB is the block size returned by ILAENV. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) is exactly zero. The factorization */
+/* has been completed, but the block diagonal matrix D is */
+/* exactly singular, and division by zero will occur if it */
+/* is used to solve a system of equations. */
+
+/* Further Details */
+/* =============== */
+
+/* If UPLO = 'U', then A = U*D*U', where */
+/* U = P(n)*U(n)* ... *P(k)U(k)* ..., */
+/* i.e., U is a product of terms P(k)*U(k), where k decreases from n to */
+/* 1 in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and U(k) is a unit upper triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I v 0 ) k-s */
+/* U(k) = ( 0 I 0 ) s */
+/* ( 0 0 I ) n-k */
+/* k-s s n-k */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(1:k-1,k). */
+/* If s = 2, the upper triangle of D(k) overwrites A(k-1,k-1), A(k-1,k), */
+/* and A(k,k), and v overwrites A(1:k-2,k-1:k). */
+
+/* If UPLO = 'L', then A = L*D*L', where */
+/* L = P(1)*L(1)* ... *P(k)*L(k)* ..., */
+/* i.e., L is a product of terms P(k)*L(k), where k increases from 1 to */
+/* n in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
+/* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */
+/* defined by IPIV(k), and L(k) is a unit lower triangular matrix, such */
+/* that if the diagonal block D(k) is of order s (s = 1 or 2), then */
+
+/* ( I 0 0 ) k-1 */
+/* L(k) = ( 0 I 0 ) s */
+/* ( 0 v I ) n-k-s+1 */
+/* k-1 s n-k-s+1 */
+
+/* If s = 1, D(k) overwrites A(k,k), and v overwrites A(k+1:n,k). */
+/* If s = 2, the lower triangle of D(k) overwrites A(k,k), A(k+1,k), */
+/* and A(k+1,k+1), and v overwrites A(k+2:n,k:k+1). */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*lwork < 1 && ! lquery) {
+ *info = -7;
+ }
+
+ if (*info == 0) {
+
+/* Determine the block size */
+
+ nb = ilaenv_(&c__1, "ZSYTRF", uplo, n, &c_n1, &c_n1, &c_n1);
+ lwkopt = *n * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZSYTRF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = *n;
+ if (nb > 1 && nb < *n) {
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+/* Computing MAX */
+ i__1 = *lwork / ldwork;
+ nb = max(i__1,1);
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZSYTRF", uplo, n, &c_n1, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = 1;
+ }
+ if (nb < nbmin) {
+ nb = *n;
+ }
+
+ if (upper) {
+
+/* Factorize A as U*D*U' using the upper triangle of A */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* KB, where KB is the number of columns factorized by ZLASYF; */
+/* KB is either NB or NB-1, or K for the last block */
+
+ k = *n;
+L10:
+
+/* If K < 1, exit from loop */
+
+ if (k < 1) {
+ goto L40;
+ }
+
+ if (k > nb) {
+
+/* Factorize columns k-kb+1:k of A and use blocked code to */
+/* update columns 1:k-kb */
+
+ zlasyf_(uplo, &k, &nb, &kb, &a[a_offset], lda, &ipiv[1], &work[1],
+ n, &iinfo);
+ } else {
+
+/* Use unblocked code to factorize columns 1:k of A */
+
+ zsytf2_(uplo, &k, &a[a_offset], lda, &ipiv[1], &iinfo);
+ kb = k;
+ }
+
+/* Set INFO on the first occurrence of a zero pivot */
+
+ if (*info == 0 && iinfo > 0) {
+ *info = iinfo;
+ }
+
+/* Decrease K and return to the start of the main loop */
+
+ k -= kb;
+ goto L10;
+
+ } else {
+
+/* Factorize A as L*D*L' using the lower triangle of A */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* KB, where KB is the number of columns factorized by ZLASYF; */
+/* KB is either NB or NB-1, or N-K+1 for the last block */
+
+ k = 1;
+L20:
+
+/* If K > N, exit from loop */
+
+ if (k > *n) {
+ goto L40;
+ }
+
+ if (k <= *n - nb) {
+
+/* Factorize columns k:k+kb-1 of A and use blocked code to */
+/* update columns k+kb:n */
+
+ i__1 = *n - k + 1;
+ zlasyf_(uplo, &i__1, &nb, &kb, &a[k + k * a_dim1], lda, &ipiv[k],
+ &work[1], n, &iinfo);
+ } else {
+
+/* Use unblocked code to factorize columns k:n of A */
+
+ i__1 = *n - k + 1;
+ zsytf2_(uplo, &i__1, &a[k + k * a_dim1], lda, &ipiv[k], &iinfo);
+ kb = *n - k + 1;
+ }
+
+/* Set INFO on the first occurrence of a zero pivot */
+
+ if (*info == 0 && iinfo > 0) {
+ *info = iinfo + k - 1;
+ }
+
+/* Adjust IPIV */
+
+ i__1 = k + kb - 1;
+ for (j = k; j <= i__1; ++j) {
+ if (ipiv[j] > 0) {
+ ipiv[j] = ipiv[j] + k - 1;
+ } else {
+ ipiv[j] = ipiv[j] - k + 1;
+ }
+/* L30: */
+ }
+
+/* Increase K and return to the start of the main loop */
+
+ k += kb;
+ goto L20;
+
+ }
+
+L40:
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ return 0;
+
+/* End of ZSYTRF */
+
+} /* zsytrf_ */
diff --git a/contrib/libs/clapack/zsytri.c b/contrib/libs/clapack/zsytri.c
new file mode 100644
index 0000000000..020c69999d
--- /dev/null
+++ b/contrib/libs/clapack/zsytri.c
@@ -0,0 +1,489 @@
+/* zsytri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static doublecomplex c_b2 = {0.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zsytri_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, integer *ipiv, doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ doublecomplex d__;
+ integer k;
+ doublecomplex t, ak;
+ integer kp;
+ doublecomplex akp1, temp, akkp1;
+ extern logical lsame_(char *, char *);
+ integer kstep;
+ logical upper;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ extern /* Double Complex */ VOID zdotu_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ extern /* Subroutine */ int zswap_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zsymv_(char *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *),
+ xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSYTRI computes the inverse of a complex symmetric indefinite matrix */
+/* A using the factorization A = U*D*U**T or A = L*D*L**T computed by */
+/* ZSYTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**T; */
+/* = 'L': Lower triangular, form is A = L*D*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the block diagonal matrix D and the multipliers */
+/* used to obtain the factor U or L as computed by ZSYTRF. */
+
+/* On exit, if INFO = 0, the (symmetric) inverse of the original */
+/* matrix. If UPLO = 'U', the upper triangular part of the */
+/* inverse is formed and the part of A below the diagonal is not */
+/* referenced; if UPLO = 'L' the lower triangular part of the */
+/* inverse is formed and the part of A above the diagonal is */
+/* not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by ZSYTRF. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, D(i,i) = 0; the matrix is singular and its */
+/* inverse could not be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZSYTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check that the diagonal matrix D is nonsingular. */
+
+ if (upper) {
+
+/* Upper triangular storage: examine D from bottom to top */
+
+ for (*info = *n; *info >= 1; --(*info)) {
+ i__1 = *info + *info * a_dim1;
+ if (ipiv[*info] > 0 && (a[i__1].r == 0. && a[i__1].i == 0.)) {
+ return 0;
+ }
+/* L10: */
+ }
+ } else {
+
+/* Lower triangular storage: examine D from top to bottom. */
+
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ i__2 = *info + *info * a_dim1;
+ if (ipiv[*info] > 0 && (a[i__2].r == 0. && a[i__2].i == 0.)) {
+ return 0;
+ }
+/* L20: */
+ }
+ }
+ *info = 0;
+
+ if (upper) {
+
+/* Compute inv(A) from the factorization A = U*D*U'. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+L30:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L40;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Invert the diagonal block. */
+
+ i__1 = k + k * a_dim1;
+ z_div(&z__1, &c_b1, &a[k + k * a_dim1]);
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+
+/* Compute column K of the inverse. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ zcopy_(&i__1, &a[k * a_dim1 + 1], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zsymv_(uplo, &i__1, &z__1, &a[a_offset], lda, &work[1], &c__1,
+ &c_b2, &a[k * a_dim1 + 1], &c__1);
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ i__3 = k - 1;
+ zdotu_(&z__2, &i__3, &work[1], &c__1, &a[k * a_dim1 + 1], &
+ c__1);
+ z__1.r = a[i__2].r - z__2.r, z__1.i = a[i__2].i - z__2.i;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ }
+ kstep = 1;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Invert the diagonal block. */
+
+ i__1 = k + (k + 1) * a_dim1;
+ t.r = a[i__1].r, t.i = a[i__1].i;
+ z_div(&z__1, &a[k + k * a_dim1], &t);
+ ak.r = z__1.r, ak.i = z__1.i;
+ z_div(&z__1, &a[k + 1 + (k + 1) * a_dim1], &t);
+ akp1.r = z__1.r, akp1.i = z__1.i;
+ z_div(&z__1, &a[k + (k + 1) * a_dim1], &t);
+ akkp1.r = z__1.r, akkp1.i = z__1.i;
+ z__3.r = ak.r * akp1.r - ak.i * akp1.i, z__3.i = ak.r * akp1.i +
+ ak.i * akp1.r;
+ z__2.r = z__3.r - 1., z__2.i = z__3.i - 0.;
+ z__1.r = t.r * z__2.r - t.i * z__2.i, z__1.i = t.r * z__2.i + t.i
+ * z__2.r;
+ d__.r = z__1.r, d__.i = z__1.i;
+ i__1 = k + k * a_dim1;
+ z_div(&z__1, &akp1, &d__);
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ i__1 = k + 1 + (k + 1) * a_dim1;
+ z_div(&z__1, &ak, &d__);
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ i__1 = k + (k + 1) * a_dim1;
+ z__2.r = -akkp1.r, z__2.i = -akkp1.i;
+ z_div(&z__1, &z__2, &d__);
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+
+/* Compute columns K and K+1 of the inverse. */
+
+ if (k > 1) {
+ i__1 = k - 1;
+ zcopy_(&i__1, &a[k * a_dim1 + 1], &c__1, &work[1], &c__1);
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zsymv_(uplo, &i__1, &z__1, &a[a_offset], lda, &work[1], &c__1,
+ &c_b2, &a[k * a_dim1 + 1], &c__1);
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ i__3 = k - 1;
+ zdotu_(&z__2, &i__3, &work[1], &c__1, &a[k * a_dim1 + 1], &
+ c__1);
+ z__1.r = a[i__2].r - z__2.r, z__1.i = a[i__2].i - z__2.i;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ i__1 = k + (k + 1) * a_dim1;
+ i__2 = k + (k + 1) * a_dim1;
+ i__3 = k - 1;
+ zdotu_(&z__2, &i__3, &a[k * a_dim1 + 1], &c__1, &a[(k + 1) *
+ a_dim1 + 1], &c__1);
+ z__1.r = a[i__2].r - z__2.r, z__1.i = a[i__2].i - z__2.i;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ i__1 = k - 1;
+ zcopy_(&i__1, &a[(k + 1) * a_dim1 + 1], &c__1, &work[1], &
+ c__1);
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zsymv_(uplo, &i__1, &z__1, &a[a_offset], lda, &work[1], &c__1,
+ &c_b2, &a[(k + 1) * a_dim1 + 1], &c__1);
+ i__1 = k + 1 + (k + 1) * a_dim1;
+ i__2 = k + 1 + (k + 1) * a_dim1;
+ i__3 = k - 1;
+ zdotu_(&z__2, &i__3, &work[1], &c__1, &a[(k + 1) * a_dim1 + 1]
+, &c__1);
+ z__1.r = a[i__2].r - z__2.r, z__1.i = a[i__2].i - z__2.i;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ }
+ kstep = 2;
+ }
+
+ kp = (i__1 = ipiv[k], abs(i__1));
+ if (kp != k) {
+
+/* Interchange rows and columns K and KP in the leading */
+/* submatrix A(1:k+1,1:k+1) */
+
+ i__1 = kp - 1;
+ zswap_(&i__1, &a[k * a_dim1 + 1], &c__1, &a[kp * a_dim1 + 1], &
+ c__1);
+ i__1 = k - kp - 1;
+ zswap_(&i__1, &a[kp + 1 + k * a_dim1], &c__1, &a[kp + (kp + 1) *
+ a_dim1], lda);
+ i__1 = k + k * a_dim1;
+ temp.r = a[i__1].r, temp.i = a[i__1].i;
+ i__1 = k + k * a_dim1;
+ i__2 = kp + kp * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + kp * a_dim1;
+ a[i__1].r = temp.r, a[i__1].i = temp.i;
+ if (kstep == 2) {
+ i__1 = k + (k + 1) * a_dim1;
+ temp.r = a[i__1].r, temp.i = a[i__1].i;
+ i__1 = k + (k + 1) * a_dim1;
+ i__2 = kp + (k + 1) * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + (k + 1) * a_dim1;
+ a[i__1].r = temp.r, a[i__1].i = temp.i;
+ }
+ }
+
+ k += kstep;
+ goto L30;
+L40:
+
+ ;
+ } else {
+
+/* Compute inv(A) from the factorization A = L*D*L'. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+L50:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L60;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Invert the diagonal block. */
+
+ i__1 = k + k * a_dim1;
+ z_div(&z__1, &c_b1, &a[k + k * a_dim1]);
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+
+/* Compute column K of the inverse. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ zcopy_(&i__1, &a[k + 1 + k * a_dim1], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zsymv_(uplo, &i__1, &z__1, &a[k + 1 + (k + 1) * a_dim1], lda,
+ &work[1], &c__1, &c_b2, &a[k + 1 + k * a_dim1], &c__1);
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ i__3 = *n - k;
+ zdotu_(&z__2, &i__3, &work[1], &c__1, &a[k + 1 + k * a_dim1],
+ &c__1);
+ z__1.r = a[i__2].r - z__2.r, z__1.i = a[i__2].i - z__2.i;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ }
+ kstep = 1;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Invert the diagonal block. */
+
+ i__1 = k + (k - 1) * a_dim1;
+ t.r = a[i__1].r, t.i = a[i__1].i;
+ z_div(&z__1, &a[k - 1 + (k - 1) * a_dim1], &t);
+ ak.r = z__1.r, ak.i = z__1.i;
+ z_div(&z__1, &a[k + k * a_dim1], &t);
+ akp1.r = z__1.r, akp1.i = z__1.i;
+ z_div(&z__1, &a[k + (k - 1) * a_dim1], &t);
+ akkp1.r = z__1.r, akkp1.i = z__1.i;
+ z__3.r = ak.r * akp1.r - ak.i * akp1.i, z__3.i = ak.r * akp1.i +
+ ak.i * akp1.r;
+ z__2.r = z__3.r - 1., z__2.i = z__3.i - 0.;
+ z__1.r = t.r * z__2.r - t.i * z__2.i, z__1.i = t.r * z__2.i + t.i
+ * z__2.r;
+ d__.r = z__1.r, d__.i = z__1.i;
+ i__1 = k - 1 + (k - 1) * a_dim1;
+ z_div(&z__1, &akp1, &d__);
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ i__1 = k + k * a_dim1;
+ z_div(&z__1, &ak, &d__);
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ i__1 = k + (k - 1) * a_dim1;
+ z__2.r = -akkp1.r, z__2.i = -akkp1.i;
+ z_div(&z__1, &z__2, &d__);
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+
+/* Compute columns K-1 and K of the inverse. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ zcopy_(&i__1, &a[k + 1 + k * a_dim1], &c__1, &work[1], &c__1);
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zsymv_(uplo, &i__1, &z__1, &a[k + 1 + (k + 1) * a_dim1], lda,
+ &work[1], &c__1, &c_b2, &a[k + 1 + k * a_dim1], &c__1);
+ i__1 = k + k * a_dim1;
+ i__2 = k + k * a_dim1;
+ i__3 = *n - k;
+ zdotu_(&z__2, &i__3, &work[1], &c__1, &a[k + 1 + k * a_dim1],
+ &c__1);
+ z__1.r = a[i__2].r - z__2.r, z__1.i = a[i__2].i - z__2.i;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ i__1 = k + (k - 1) * a_dim1;
+ i__2 = k + (k - 1) * a_dim1;
+ i__3 = *n - k;
+ zdotu_(&z__2, &i__3, &a[k + 1 + k * a_dim1], &c__1, &a[k + 1
+ + (k - 1) * a_dim1], &c__1);
+ z__1.r = a[i__2].r - z__2.r, z__1.i = a[i__2].i - z__2.i;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ i__1 = *n - k;
+ zcopy_(&i__1, &a[k + 1 + (k - 1) * a_dim1], &c__1, &work[1], &
+ c__1);
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zsymv_(uplo, &i__1, &z__1, &a[k + 1 + (k + 1) * a_dim1], lda,
+ &work[1], &c__1, &c_b2, &a[k + 1 + (k - 1) * a_dim1],
+ &c__1);
+ i__1 = k - 1 + (k - 1) * a_dim1;
+ i__2 = k - 1 + (k - 1) * a_dim1;
+ i__3 = *n - k;
+ zdotu_(&z__2, &i__3, &work[1], &c__1, &a[k + 1 + (k - 1) *
+ a_dim1], &c__1);
+ z__1.r = a[i__2].r - z__2.r, z__1.i = a[i__2].i - z__2.i;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ }
+ kstep = 2;
+ }
+
+ kp = (i__1 = ipiv[k], abs(i__1));
+ if (kp != k) {
+
+/* Interchange rows and columns K and KP in the trailing */
+/* submatrix A(k-1:n,k-1:n) */
+
+ if (kp < *n) {
+ i__1 = *n - kp;
+ zswap_(&i__1, &a[kp + 1 + k * a_dim1], &c__1, &a[kp + 1 + kp *
+ a_dim1], &c__1);
+ }
+ i__1 = kp - k - 1;
+ zswap_(&i__1, &a[k + 1 + k * a_dim1], &c__1, &a[kp + (k + 1) *
+ a_dim1], lda);
+ i__1 = k + k * a_dim1;
+ temp.r = a[i__1].r, temp.i = a[i__1].i;
+ i__1 = k + k * a_dim1;
+ i__2 = kp + kp * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + kp * a_dim1;
+ a[i__1].r = temp.r, a[i__1].i = temp.i;
+ if (kstep == 2) {
+ i__1 = k + (k - 1) * a_dim1;
+ temp.r = a[i__1].r, temp.i = a[i__1].i;
+ i__1 = k + (k - 1) * a_dim1;
+ i__2 = kp + (k - 1) * a_dim1;
+ a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i;
+ i__1 = kp + (k - 1) * a_dim1;
+ a[i__1].r = temp.r, a[i__1].i = temp.i;
+ }
+ }
+
+ k -= kstep;
+ goto L50;
+L60:
+ ;
+ }
+
+ return 0;
+
+/* End of ZSYTRI */
+
+} /* zsytri_ */
diff --git a/contrib/libs/clapack/zsytrs.c b/contrib/libs/clapack/zsytrs.c
new file mode 100644
index 0000000000..5971cff806
--- /dev/null
+++ b/contrib/libs/clapack/zsytrs.c
@@ -0,0 +1,502 @@
+/* zsytrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int zsytrs_(char *uplo, integer *n, integer *nrhs,
+ doublecomplex *a, integer *lda, integer *ipiv, doublecomplex *b,
+ integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer j, k;
+ doublecomplex ak, bk;
+ integer kp;
+ doublecomplex akm1, bkm1, akm1k;
+ extern logical lsame_(char *, char *);
+ doublecomplex denom;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *), zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int zgeru_(integer *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zswap_(integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZSYTRS solves a system of linear equations A*X = B with a complex */
+/* symmetric matrix A using the factorization A = U*D*U**T or */
+/* A = L*D*L**T computed by ZSYTRF. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the details of the factorization are stored */
+/* as an upper or lower triangular matrix. */
+/* = 'U': Upper triangular, form is A = U*D*U**T; */
+/* = 'L': Lower triangular, form is A = L*D*L**T. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The block diagonal matrix D and the multipliers used to */
+/* obtain the factor U or L as computed by ZSYTRF. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* IPIV (input) INTEGER array, dimension (N) */
+/* Details of the interchanges and the block structure of D */
+/* as determined by ZSYTRF. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZSYTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Solve A*X = B, where A = U*D*U'. */
+
+/* First solve U*D*X = B, overwriting B with X. */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+L10:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L30;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ zswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(U(K)), where U(K) is the transformation */
+/* stored in column K of A. */
+
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__1, nrhs, &z__1, &a[k * a_dim1 + 1], &c__1, &b[k +
+ b_dim1], ldb, &b[b_dim1 + 1], ldb);
+
+/* Multiply by the inverse of the diagonal block. */
+
+ z_div(&z__1, &c_b1, &a[k + k * a_dim1]);
+ zscal_(nrhs, &z__1, &b[k + b_dim1], ldb);
+ --k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Interchange rows K-1 and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k - 1) {
+ zswap_(nrhs, &b[k - 1 + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(U(K)), where U(K) is the transformation */
+/* stored in columns K-1 and K of A. */
+
+ i__1 = k - 2;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__1, nrhs, &z__1, &a[k * a_dim1 + 1], &c__1, &b[k +
+ b_dim1], ldb, &b[b_dim1 + 1], ldb);
+ i__1 = k - 2;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__1, nrhs, &z__1, &a[(k - 1) * a_dim1 + 1], &c__1, &b[k
+ - 1 + b_dim1], ldb, &b[b_dim1 + 1], ldb);
+
+/* Multiply by the inverse of the diagonal block. */
+
+ i__1 = k - 1 + k * a_dim1;
+ akm1k.r = a[i__1].r, akm1k.i = a[i__1].i;
+ z_div(&z__1, &a[k - 1 + (k - 1) * a_dim1], &akm1k);
+ akm1.r = z__1.r, akm1.i = z__1.i;
+ z_div(&z__1, &a[k + k * a_dim1], &akm1k);
+ ak.r = z__1.r, ak.i = z__1.i;
+ z__2.r = akm1.r * ak.r - akm1.i * ak.i, z__2.i = akm1.r * ak.i +
+ akm1.i * ak.r;
+ z__1.r = z__2.r - 1., z__1.i = z__2.i - 0.;
+ denom.r = z__1.r, denom.i = z__1.i;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ z_div(&z__1, &b[k - 1 + j * b_dim1], &akm1k);
+ bkm1.r = z__1.r, bkm1.i = z__1.i;
+ z_div(&z__1, &b[k + j * b_dim1], &akm1k);
+ bk.r = z__1.r, bk.i = z__1.i;
+ i__2 = k - 1 + j * b_dim1;
+ z__3.r = ak.r * bkm1.r - ak.i * bkm1.i, z__3.i = ak.r *
+ bkm1.i + ak.i * bkm1.r;
+ z__2.r = z__3.r - bk.r, z__2.i = z__3.i - bk.i;
+ z_div(&z__1, &z__2, &denom);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ i__2 = k + j * b_dim1;
+ z__3.r = akm1.r * bk.r - akm1.i * bk.i, z__3.i = akm1.r *
+ bk.i + akm1.i * bk.r;
+ z__2.r = z__3.r - bkm1.r, z__2.i = z__3.i - bkm1.i;
+ z_div(&z__1, &z__2, &denom);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L20: */
+ }
+ k += -2;
+ }
+
+ goto L10;
+L30:
+
+/* Next solve U'*X = B, overwriting B with X. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+L40:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L50;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Multiply by inv(U'(K)), where U(K) is the transformation */
+/* stored in column K of A. */
+
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Transpose", &i__1, nrhs, &z__1, &b[b_offset], ldb, &a[k *
+ a_dim1 + 1], &c__1, &c_b1, &b[k + b_dim1], ldb)
+ ;
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ zswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ ++k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Multiply by inv(U'(K+1)), where U(K+1) is the transformation */
+/* stored in columns K and K+1 of A. */
+
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Transpose", &i__1, nrhs, &z__1, &b[b_offset], ldb, &a[k *
+ a_dim1 + 1], &c__1, &c_b1, &b[k + b_dim1], ldb)
+ ;
+ i__1 = k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Transpose", &i__1, nrhs, &z__1, &b[b_offset], ldb, &a[(k
+ + 1) * a_dim1 + 1], &c__1, &c_b1, &b[k + 1 + b_dim1], ldb);
+
+/* Interchange rows K and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k) {
+ zswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ k += 2;
+ }
+
+ goto L40;
+L50:
+
+ ;
+ } else {
+
+/* Solve A*X = B, where A = L*D*L'. */
+
+/* First solve L*D*X = B, overwriting B with X. */
+
+/* K is the main loop index, increasing from 1 to N in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = 1;
+L60:
+
+/* If K > N, exit from loop. */
+
+ if (k > *n) {
+ goto L80;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ zswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(L(K)), where L(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__1, nrhs, &z__1, &a[k + 1 + k * a_dim1], &c__1, &b[
+ k + b_dim1], ldb, &b[k + 1 + b_dim1], ldb);
+ }
+
+/* Multiply by the inverse of the diagonal block. */
+
+ z_div(&z__1, &c_b1, &a[k + k * a_dim1]);
+ zscal_(nrhs, &z__1, &b[k + b_dim1], ldb);
+ ++k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Interchange rows K+1 and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k + 1) {
+ zswap_(nrhs, &b[k + 1 + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+
+/* Multiply by inv(L(K)), where L(K) is the transformation */
+/* stored in columns K and K+1 of A. */
+
+ if (k < *n - 1) {
+ i__1 = *n - k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__1, nrhs, &z__1, &a[k + 2 + k * a_dim1], &c__1, &b[
+ k + b_dim1], ldb, &b[k + 2 + b_dim1], ldb);
+ i__1 = *n - k - 1;
+ z__1.r = -1., z__1.i = -0.;
+ zgeru_(&i__1, nrhs, &z__1, &a[k + 2 + (k + 1) * a_dim1], &
+ c__1, &b[k + 1 + b_dim1], ldb, &b[k + 2 + b_dim1],
+ ldb);
+ }
+
+/* Multiply by the inverse of the diagonal block. */
+
+ i__1 = k + 1 + k * a_dim1;
+ akm1k.r = a[i__1].r, akm1k.i = a[i__1].i;
+ z_div(&z__1, &a[k + k * a_dim1], &akm1k);
+ akm1.r = z__1.r, akm1.i = z__1.i;
+ z_div(&z__1, &a[k + 1 + (k + 1) * a_dim1], &akm1k);
+ ak.r = z__1.r, ak.i = z__1.i;
+ z__2.r = akm1.r * ak.r - akm1.i * ak.i, z__2.i = akm1.r * ak.i +
+ akm1.i * ak.r;
+ z__1.r = z__2.r - 1., z__1.i = z__2.i - 0.;
+ denom.r = z__1.r, denom.i = z__1.i;
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ z_div(&z__1, &b[k + j * b_dim1], &akm1k);
+ bkm1.r = z__1.r, bkm1.i = z__1.i;
+ z_div(&z__1, &b[k + 1 + j * b_dim1], &akm1k);
+ bk.r = z__1.r, bk.i = z__1.i;
+ i__2 = k + j * b_dim1;
+ z__3.r = ak.r * bkm1.r - ak.i * bkm1.i, z__3.i = ak.r *
+ bkm1.i + ak.i * bkm1.r;
+ z__2.r = z__3.r - bk.r, z__2.i = z__3.i - bk.i;
+ z_div(&z__1, &z__2, &denom);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ i__2 = k + 1 + j * b_dim1;
+ z__3.r = akm1.r * bk.r - akm1.i * bk.i, z__3.i = akm1.r *
+ bk.i + akm1.i * bk.r;
+ z__2.r = z__3.r - bkm1.r, z__2.i = z__3.i - bkm1.i;
+ z_div(&z__1, &z__2, &denom);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L70: */
+ }
+ k += 2;
+ }
+
+ goto L60;
+L80:
+
+/* Next solve L'*X = B, overwriting B with X. */
+
+/* K is the main loop index, decreasing from N to 1 in steps of */
+/* 1 or 2, depending on the size of the diagonal blocks. */
+
+ k = *n;
+L90:
+
+/* If K < 1, exit from loop. */
+
+ if (k < 1) {
+ goto L100;
+ }
+
+ if (ipiv[k] > 0) {
+
+/* 1 x 1 diagonal block */
+
+/* Multiply by inv(L'(K)), where L(K) is the transformation */
+/* stored in column K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Transpose", &i__1, nrhs, &z__1, &b[k + 1 + b_dim1],
+ ldb, &a[k + 1 + k * a_dim1], &c__1, &c_b1, &b[k +
+ b_dim1], ldb);
+ }
+
+/* Interchange rows K and IPIV(K). */
+
+ kp = ipiv[k];
+ if (kp != k) {
+ zswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ --k;
+ } else {
+
+/* 2 x 2 diagonal block */
+
+/* Multiply by inv(L'(K-1)), where L(K-1) is the transformation */
+/* stored in columns K-1 and K of A. */
+
+ if (k < *n) {
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Transpose", &i__1, nrhs, &z__1, &b[k + 1 + b_dim1],
+ ldb, &a[k + 1 + k * a_dim1], &c__1, &c_b1, &b[k +
+ b_dim1], ldb);
+ i__1 = *n - k;
+ z__1.r = -1., z__1.i = -0.;
+ zgemv_("Transpose", &i__1, nrhs, &z__1, &b[k + 1 + b_dim1],
+ ldb, &a[k + 1 + (k - 1) * a_dim1], &c__1, &c_b1, &b[k
+ - 1 + b_dim1], ldb);
+ }
+
+/* Interchange rows K and -IPIV(K). */
+
+ kp = -ipiv[k];
+ if (kp != k) {
+ zswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb);
+ }
+ k += -2;
+ }
+
+ goto L90;
+L100:
+ ;
+ }
+
+ return 0;
+
+/* End of ZSYTRS */
+
+} /* zsytrs_ */
diff --git a/contrib/libs/clapack/ztbcon.c b/contrib/libs/clapack/ztbcon.c
new file mode 100644
index 0000000000..186f3d5076
--- /dev/null
+++ b/contrib/libs/clapack/ztbcon.c
@@ -0,0 +1,254 @@
+/* ztbcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ztbcon_(char *norm, char *uplo, char *diag, integer *n,
+ integer *kd, doublecomplex *ab, integer *ldab, doublereal *rcond,
+ doublecomplex *work, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, i__1;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer ix, kase, kase1;
+ doublereal scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ doublereal anorm;
+ logical upper;
+ doublereal xnorm;
+ extern /* Subroutine */ int zlacn2_(integer *, doublecomplex *,
+ doublecomplex *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal ainvnm;
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ extern doublereal zlantb_(char *, char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublereal *);
+ logical onenrm;
+ extern /* Subroutine */ int zlatbs_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ doublereal *, doublereal *, integer *), zdrscl_(integer *, doublereal *, doublecomplex *,
+ integer *);
+ char normin[1];
+ doublereal smlnum;
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTBCON estimates the reciprocal of the condition number of a */
+/* triangular band matrix A, in either the 1-norm or the infinity-norm. */
+
+/* The norm of A is computed and an estimate is obtained for */
+/* norm(inv(A)), then the reciprocal of the condition number is */
+/* computed as */
+/* RCOND = 1 / ( norm(A) * norm(inv(A)) ). */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies whether the 1-norm condition number or the */
+/* infinity-norm condition number is required: */
+/* = '1' or 'O': 1-norm; */
+/* = 'I': Infinity-norm. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals or subdiagonals of the */
+/* triangular band matrix A. KD >= 0. */
+
+/* AB (input) COMPLEX*16 array, dimension (LDAB,N) */
+/* The upper or lower triangular band matrix A, stored in the */
+/* first kd+1 rows of the array. The j-th column of A is stored */
+/* in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+/* If DIAG = 'U', the diagonal elements of A are not referenced */
+/* and are assumed to be 1. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(norm(A) * norm(inv(A))). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O");
+ nounit = lsame_(diag, "N");
+
+ if (! onenrm && ! lsame_(norm, "I")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*kd < 0) {
+ *info = -5;
+ } else if (*ldab < *kd + 1) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTBCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ }
+
+ *rcond = 0.;
+ smlnum = dlamch_("Safe minimum") * (doublereal) max(*n,1);
+
+/* Compute the 1-norm of the triangular matrix A or A'. */
+
+ anorm = zlantb_(norm, uplo, diag, n, kd, &ab[ab_offset], ldab, &rwork[1]);
+
+/* Continue only if ANORM > 0. */
+
+ if (anorm > 0.) {
+
+/* Estimate the 1-norm of the inverse of A. */
+
+ ainvnm = 0.;
+ *(unsigned char *)normin = 'N';
+ if (onenrm) {
+ kase1 = 1;
+ } else {
+ kase1 = 2;
+ }
+ kase = 0;
+L10:
+ zlacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (kase == kase1) {
+
+/* Multiply by inv(A). */
+
+ zlatbs_(uplo, "No transpose", diag, normin, n, kd, &ab[
+ ab_offset], ldab, &work[1], &scale, &rwork[1], info);
+ } else {
+
+/* Multiply by inv(A'). */
+
+ zlatbs_(uplo, "Conjugate transpose", diag, normin, n, kd, &ab[
+ ab_offset], ldab, &work[1], &scale, &rwork[1], info);
+ }
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by 1/SCALE if doing so will not cause overflow. */
+
+ if (scale != 1.) {
+ ix = izamax_(n, &work[1], &c__1);
+ i__1 = ix;
+ xnorm = (d__1 = work[i__1].r, abs(d__1)) + (d__2 = d_imag(&
+ work[ix]), abs(d__2));
+ if (scale < xnorm * smlnum || scale == 0.) {
+ goto L20;
+ }
+ zdrscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / anorm / ainvnm;
+ }
+ }
+
+L20:
+ return 0;
+
+/* End of ZTBCON */
+
+} /* ztbcon_ */
diff --git a/contrib/libs/clapack/ztbrfs.c b/contrib/libs/clapack/ztbrfs.c
new file mode 100644
index 0000000000..b96354d8f4
--- /dev/null
+++ b/contrib/libs/clapack/ztbrfs.c
@@ -0,0 +1,586 @@
+/* ztbrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ztbrfs_(char *uplo, char *trans, char *diag, integer *n,
+ integer *kd, integer *nrhs, doublecomplex *ab, integer *ldab,
+ doublecomplex *b, integer *ldb, doublecomplex *x, integer *ldx,
+ doublereal *ferr, doublereal *berr, doublecomplex *work, doublereal *
+ rwork, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, b_dim1, b_offset, x_dim1, x_offset, i__1,
+ i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal s, xk;
+ integer nz;
+ doublereal eps;
+ integer kase;
+ doublereal safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical upper;
+ extern /* Subroutine */ int ztbmv_(char *, char *, char *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *), zcopy_(integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *), ztbsv_(char *, char *,
+ char *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zaxpy_(
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zlacn2_(integer *, doublecomplex *,
+ doublecomplex *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+ char transn[1], transt[1];
+ logical nounit;
+ doublereal lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTBRFS provides error bounds and backward error estimates for the */
+/* solution to a system of linear equations with a triangular band */
+/* coefficient matrix. */
+
+/* The solution matrix X must be computed by ZTBTRS or some other */
+/* means before entering this routine. ZTBRFS does not do iterative */
+/* refinement because doing so cannot improve the backward error. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals or subdiagonals of the */
+/* triangular band matrix A. KD >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AB (input) COMPLEX*16 array, dimension (LDAB,N) */
+/* The upper or lower triangular band matrix A, stored in the */
+/* first kd+1 rows of the array. The j-th column of A is stored */
+/* in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+/* If DIAG = 'U', the diagonal elements of A are not referenced */
+/* and are assumed to be 1. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* The solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*kd < 0) {
+ *info = -5;
+ } else if (*nrhs < 0) {
+ *info = -6;
+ } else if (*ldab < *kd + 1) {
+ *info = -8;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ } else if (*ldx < max(1,*n)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTBRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+ if (notran) {
+ *(unsigned char *)transn = 'N';
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transn = 'C';
+ *(unsigned char *)transt = 'N';
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *kd + 2;
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute residual R = B - op(A) * X, */
+/* where op(A) = A, A**T, or A**H, depending on TRANS. */
+
+ zcopy_(n, &x[j * x_dim1 + 1], &c__1, &work[1], &c__1);
+ ztbmv_(uplo, trans, diag, n, kd, &ab[ab_offset], ldab, &work[1], &
+ c__1);
+ z__1.r = -1., z__1.i = -0.;
+ zaxpy_(n, &z__1, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[i__] = (d__1 = b[i__3].r, abs(d__1)) + (d__2 = d_imag(&b[
+ i__ + j * b_dim1]), abs(d__2));
+/* L20: */
+ }
+
+ if (notran) {
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&
+ x[k + j * x_dim1]), abs(d__2));
+/* Computing MAX */
+ i__3 = 1, i__4 = k - *kd;
+ i__5 = k;
+ for (i__ = max(i__3,i__4); i__ <= i__5; ++i__) {
+ i__3 = *kd + 1 + i__ - k + k * ab_dim1;
+ rwork[i__] += ((d__1 = ab[i__3].r, abs(d__1)) + (
+ d__2 = d_imag(&ab[*kd + 1 + i__ - k + k *
+ ab_dim1]), abs(d__2))) * xk;
+/* L30: */
+ }
+/* L40: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__5 = k + j * x_dim1;
+ xk = (d__1 = x[i__5].r, abs(d__1)) + (d__2 = d_imag(&
+ x[k + j * x_dim1]), abs(d__2));
+/* Computing MAX */
+ i__5 = 1, i__3 = k - *kd;
+ i__4 = k - 1;
+ for (i__ = max(i__5,i__3); i__ <= i__4; ++i__) {
+ i__5 = *kd + 1 + i__ - k + k * ab_dim1;
+ rwork[i__] += ((d__1 = ab[i__5].r, abs(d__1)) + (
+ d__2 = d_imag(&ab[*kd + 1 + i__ - k + k *
+ ab_dim1]), abs(d__2))) * xk;
+/* L50: */
+ }
+ rwork[k] += xk;
+/* L60: */
+ }
+ }
+ } else {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__4 = k + j * x_dim1;
+ xk = (d__1 = x[i__4].r, abs(d__1)) + (d__2 = d_imag(&
+ x[k + j * x_dim1]), abs(d__2));
+/* Computing MIN */
+ i__5 = *n, i__3 = k + *kd;
+ i__4 = min(i__5,i__3);
+ for (i__ = k; i__ <= i__4; ++i__) {
+ i__5 = i__ + 1 - k + k * ab_dim1;
+ rwork[i__] += ((d__1 = ab[i__5].r, abs(d__1)) + (
+ d__2 = d_imag(&ab[i__ + 1 - k + k *
+ ab_dim1]), abs(d__2))) * xk;
+/* L70: */
+ }
+/* L80: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__4 = k + j * x_dim1;
+ xk = (d__1 = x[i__4].r, abs(d__1)) + (d__2 = d_imag(&
+ x[k + j * x_dim1]), abs(d__2));
+/* Computing MIN */
+ i__5 = *n, i__3 = k + *kd;
+ i__4 = min(i__5,i__3);
+ for (i__ = k + 1; i__ <= i__4; ++i__) {
+ i__5 = i__ + 1 - k + k * ab_dim1;
+ rwork[i__] += ((d__1 = ab[i__5].r, abs(d__1)) + (
+ d__2 = d_imag(&ab[i__ + 1 - k + k *
+ ab_dim1]), abs(d__2))) * xk;
+/* L90: */
+ }
+ rwork[k] += xk;
+/* L100: */
+ }
+ }
+ }
+ } else {
+
+/* Compute abs(A**H)*abs(X) + abs(B). */
+
+ if (upper) {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+/* Computing MAX */
+ i__4 = 1, i__5 = k - *kd;
+ i__3 = k;
+ for (i__ = max(i__4,i__5); i__ <= i__3; ++i__) {
+ i__4 = *kd + 1 + i__ - k + k * ab_dim1;
+ i__5 = i__ + j * x_dim1;
+ s += ((d__1 = ab[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&ab[*kd + 1 + i__ - k + k *
+ ab_dim1]), abs(d__2))) * ((d__3 = x[i__5]
+ .r, abs(d__3)) + (d__4 = d_imag(&x[i__ +
+ j * x_dim1]), abs(d__4)));
+/* L110: */
+ }
+ rwork[k] += s;
+/* L120: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ s = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[
+ k + j * x_dim1]), abs(d__2));
+/* Computing MAX */
+ i__3 = 1, i__4 = k - *kd;
+ i__5 = k - 1;
+ for (i__ = max(i__3,i__4); i__ <= i__5; ++i__) {
+ i__3 = *kd + 1 + i__ - k + k * ab_dim1;
+ i__4 = i__ + j * x_dim1;
+ s += ((d__1 = ab[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&ab[*kd + 1 + i__ - k + k *
+ ab_dim1]), abs(d__2))) * ((d__3 = x[i__4]
+ .r, abs(d__3)) + (d__4 = d_imag(&x[i__ +
+ j * x_dim1]), abs(d__4)));
+/* L130: */
+ }
+ rwork[k] += s;
+/* L140: */
+ }
+ }
+ } else {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+/* Computing MIN */
+ i__3 = *n, i__4 = k + *kd;
+ i__5 = min(i__3,i__4);
+ for (i__ = k; i__ <= i__5; ++i__) {
+ i__3 = i__ + 1 - k + k * ab_dim1;
+ i__4 = i__ + j * x_dim1;
+ s += ((d__1 = ab[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&ab[i__ + 1 - k + k * ab_dim1]),
+ abs(d__2))) * ((d__3 = x[i__4].r, abs(
+ d__3)) + (d__4 = d_imag(&x[i__ + j *
+ x_dim1]), abs(d__4)));
+/* L150: */
+ }
+ rwork[k] += s;
+/* L160: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__5 = k + j * x_dim1;
+ s = (d__1 = x[i__5].r, abs(d__1)) + (d__2 = d_imag(&x[
+ k + j * x_dim1]), abs(d__2));
+/* Computing MIN */
+ i__3 = *n, i__4 = k + *kd;
+ i__5 = min(i__3,i__4);
+ for (i__ = k + 1; i__ <= i__5; ++i__) {
+ i__3 = i__ + 1 - k + k * ab_dim1;
+ i__4 = i__ + j * x_dim1;
+ s += ((d__1 = ab[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&ab[i__ + 1 - k + k * ab_dim1]),
+ abs(d__2))) * ((d__3 = x[i__4].r, abs(
+ d__3)) + (d__4 = d_imag(&x[i__ + j *
+ x_dim1]), abs(d__4)));
+/* L170: */
+ }
+ rwork[k] += s;
+/* L180: */
+ }
+ }
+ }
+ }
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__5 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__5].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2))) / rwork[i__];
+ s = max(d__3,d__4);
+ } else {
+/* Computing MAX */
+ i__5 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__5].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = max(d__3,d__4);
+ }
+/* L190: */
+ }
+ berr[j] = s;
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(op(A)))* */
+/* ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(op(A)) is the inverse of op(A) */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(op(A))*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use ZLACN2 to estimate the infinity-norm of the matrix */
+/* inv(op(A)) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__5 = i__;
+ rwork[i__] = (d__1 = work[i__5].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ ;
+ } else {
+ i__5 = i__;
+ rwork[i__] = (d__1 = work[i__5].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ + safe1;
+ }
+/* L200: */
+ }
+
+ kase = 0;
+L210:
+ zlacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(op(A)**H). */
+
+ ztbsv_(uplo, transt, diag, n, kd, &ab[ab_offset], ldab, &work[
+ 1], &c__1);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__5 = i__;
+ i__3 = i__;
+ i__4 = i__;
+ z__1.r = rwork[i__3] * work[i__4].r, z__1.i = rwork[i__3]
+ * work[i__4].i;
+ work[i__5].r = z__1.r, work[i__5].i = z__1.i;
+/* L220: */
+ }
+ } else {
+
+/* Multiply by inv(op(A))*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__5 = i__;
+ i__3 = i__;
+ i__4 = i__;
+ z__1.r = rwork[i__3] * work[i__4].r, z__1.i = rwork[i__3]
+ * work[i__4].i;
+ work[i__5].r = z__1.r, work[i__5].i = z__1.i;
+/* L230: */
+ }
+ ztbsv_(uplo, transn, diag, n, kd, &ab[ab_offset], ldab, &work[
+ 1], &c__1);
+ }
+ goto L210;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__5 = i__ + j * x_dim1;
+ d__3 = lstres, d__4 = (d__1 = x[i__5].r, abs(d__1)) + (d__2 =
+ d_imag(&x[i__ + j * x_dim1]), abs(d__2));
+ lstres = max(d__3,d__4);
+/* L240: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L250: */
+ }
+
+ return 0;
+
+/* End of ZTBRFS */
+
+} /* ztbrfs_ */
diff --git a/contrib/libs/clapack/ztbtrs.c b/contrib/libs/clapack/ztbtrs.c
new file mode 100644
index 0000000000..b9ff7a7a49
--- /dev/null
+++ b/contrib/libs/clapack/ztbtrs.c
@@ -0,0 +1,205 @@
+/* ztbtrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ztbtrs_(char *uplo, char *trans, char *diag, integer *n,
+ integer *kd, integer *nrhs, doublecomplex *ab, integer *ldab,
+ doublecomplex *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer ab_dim1, ab_offset, b_dim1, b_offset, i__1, i__2;
+
+ /* Local variables */
+ integer j;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int ztbsv_(char *, char *, char *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *), xerbla_(char *, integer *);
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTBTRS solves a triangular system of the form */
+
+/* A * X = B, A**T * X = B, or A**H * X = B, */
+
+/* where A is a triangular band matrix of order N, and B is an */
+/* N-by-NRHS matrix. A check is made to verify that A is nonsingular. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* KD (input) INTEGER */
+/* The number of superdiagonals or subdiagonals of the */
+/* triangular band matrix A. KD >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AB (input) COMPLEX*16 array, dimension (LDAB,N) */
+/* The upper or lower triangular band matrix A, stored in the */
+/* first kd+1 rows of AB. The j-th column of A is stored */
+/* in the j-th column of the array AB as follows: */
+/* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
+/* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */
+/* If DIAG = 'U', the diagonal elements of A are not referenced */
+/* and are assumed to be 1. */
+
+/* LDAB (input) INTEGER */
+/* The leading dimension of the array AB. LDAB >= KD+1. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, if INFO = 0, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element of A is zero, */
+/* indicating that the matrix is singular and the */
+/* solutions X have not been computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ ab_dim1 = *ldab;
+ ab_offset = 1 + ab_dim1;
+ ab -= ab_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ nounit = lsame_(diag, "N");
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*kd < 0) {
+ *info = -5;
+ } else if (*nrhs < 0) {
+ *info = -6;
+ } else if (*ldab < *kd + 1) {
+ *info = -8;
+ } else if (*ldb < max(1,*n)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTBTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check for singularity. */
+
+ if (nounit) {
+ if (upper) {
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ i__2 = *kd + 1 + *info * ab_dim1;
+ if (ab[i__2].r == 0. && ab[i__2].i == 0.) {
+ return 0;
+ }
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ i__2 = *info * ab_dim1 + 1;
+ if (ab[i__2].r == 0. && ab[i__2].i == 0.) {
+ return 0;
+ }
+/* L20: */
+ }
+ }
+ }
+ *info = 0;
+
+/* Solve A * X = B, A**T * X = B, or A**H * X = B. */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ztbsv_(uplo, trans, diag, n, kd, &ab[ab_offset], ldab, &b[j * b_dim1
+ + 1], &c__1);
+/* L30: */
+ }
+
+ return 0;
+
+/* End of ZTBTRS */
+
+} /* ztbtrs_ */
diff --git a/contrib/libs/clapack/ztfsm.c b/contrib/libs/clapack/ztfsm.c
new file mode 100644
index 0000000000..37d4bf9097
--- /dev/null
+++ b/contrib/libs/clapack/ztfsm.c
@@ -0,0 +1,1024 @@
+/* ztfsm.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+
+/* Subroutine */ int ztfsm_(char *transr, char *side, char *uplo, char *trans,
+ char *diag, integer *m, integer *n, doublecomplex *alpha,
+ doublecomplex *a, doublecomplex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j, k, m1, m2, n1, n2, info;
+ logical normaltransr, lside;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ logical lower;
+ extern /* Subroutine */ int ztrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *),
+ xerbla_(char *, integer *);
+ logical misodd, nisodd, notrans;
+
+
+/* -- LAPACK routine (version 3.2.1) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- April 2009 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* Level 3 BLAS like routine for A in RFP Format. */
+
+/* ZTFSM solves the matrix equation */
+
+/* op( A )*X = alpha*B or X*op( A ) = alpha*B */
+
+/* where alpha is a scalar, X and B are m by n matrices, A is a unit, or */
+/* non-unit, upper or lower triangular matrix and op( A ) is one of */
+
+/* op( A ) = A or op( A ) = conjg( A' ). */
+
+/* A is in Rectangular Full Packed (RFP) Format. */
+
+/* The matrix X is overwritten on B. */
+
+/* Arguments */
+/* ========== */
+
+/* TRANSR - (input) CHARACTER */
+/* = 'N': The Normal Form of RFP A is stored; */
+/* = 'C': The Conjugate-transpose Form of RFP A is stored. */
+
+/* SIDE - (input) CHARACTER */
+/* On entry, SIDE specifies whether op( A ) appears on the left */
+/* or right of X as follows: */
+
+/* SIDE = 'L' or 'l' op( A )*X = alpha*B. */
+
+/* SIDE = 'R' or 'r' X*op( A ) = alpha*B. */
+
+/* Unchanged on exit. */
+
+/* UPLO - (input) CHARACTER */
+/* On entry, UPLO specifies whether the RFP matrix A came from */
+/* an upper or lower triangular matrix as follows: */
+/* UPLO = 'U' or 'u' RFP A came from an upper triangular matrix */
+/* UPLO = 'L' or 'l' RFP A came from a lower triangular matrix */
+
+/* Unchanged on exit. */
+
+/* TRANS - (input) CHARACTER */
+/* On entry, TRANS specifies the form of op( A ) to be used */
+/* in the matrix multiplication as follows: */
+
+/* TRANS = 'N' or 'n' op( A ) = A. */
+
+/* TRANS = 'C' or 'c' op( A ) = conjg( A' ). */
+
+/* Unchanged on exit. */
+
+/* DIAG - (input) CHARACTER */
+/* On entry, DIAG specifies whether or not RFP A is unit */
+/* triangular as follows: */
+
+/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
+
+/* DIAG = 'N' or 'n' A is not assumed to be unit */
+/* triangular. */
+
+/* Unchanged on exit. */
+
+/* M - (input) INTEGER. */
+/* On entry, M specifies the number of rows of B. M must be at */
+/* least zero. */
+/* Unchanged on exit. */
+
+/* N - (input) INTEGER. */
+/* On entry, N specifies the number of columns of B. N must be */
+/* at least zero. */
+/* Unchanged on exit. */
+
+/* ALPHA - (input) COMPLEX*16. */
+/* On entry, ALPHA specifies the scalar alpha. When alpha is */
+/* zero then A is not referenced and B need not be set before */
+/* entry. */
+/* Unchanged on exit. */
+
+/* A - (input) COMPLEX*16 array, dimension ( N*(N+1)/2 ); */
+/* NT = N*(N+1)/2. On entry, the matrix A in RFP Format. */
+/* RFP Format is described by TRANSR, UPLO and N as follows: */
+/* If TRANSR='N' then RFP A is (0:N,0:K-1) when N is even; */
+/* K=N/2. RFP A is (0:N-1,0:K) when N is odd; K=N/2. If */
+/* TRANSR = 'C' then RFP is the Conjugate-transpose of RFP A as */
+/* defined when TRANSR = 'N'. The contents of RFP A are defined */
+/* by UPLO as follows: If UPLO = 'U' the RFP A contains the NT */
+/* elements of upper packed A either in normal or */
+/* conjugate-transpose Format. If UPLO = 'L' the RFP A contains */
+/* the NT elements of lower packed A either in normal or */
+/* conjugate-transpose Format. The LDA of RFP A is (N+1)/2 when */
+/* TRANSR = 'C'. When TRANSR is 'N' the LDA is N+1 when N is */
+/* even and is N when is odd. */
+/* See the Note below for more details. Unchanged on exit. */
+
+/* B - (input/ouptut) COMPLEX*16 array, DIMENSION ( LDB, N) */
+/* Before entry, the leading m by n part of the array B must */
+/* contain the right-hand side matrix B, and on exit is */
+/* overwritten by the solution matrix X. */
+
+/* LDB - (input) INTEGER. */
+/* On entry, LDB specifies the first dimension of B as declared */
+/* in the calling (sub) program. LDB must be at least */
+/* max( 1, m ). */
+/* Unchanged on exit. */
+
+/* Further Details */
+/* =============== */
+
+/* We first consider Standard Packed Format when N is even. */
+/* We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* conjugate-transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* conjugate-transpose of the last three columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- -- */
+/* 03 04 05 33 43 53 */
+/* -- -- */
+/* 13 14 15 00 44 54 */
+/* -- */
+/* 23 24 25 10 11 55 */
+
+/* 33 34 35 20 21 22 */
+/* -- */
+/* 00 44 45 30 31 32 */
+/* -- -- */
+/* 01 11 55 40 41 42 */
+/* -- -- -- */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We next consider Standard Packed Format when N is odd. */
+/* We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* conjugate-transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* conjugate-transpose of the last two columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- */
+/* 02 03 04 00 33 43 */
+/* -- */
+/* 12 13 14 10 11 44 */
+
+/* 22 23 24 20 21 22 */
+/* -- */
+/* 00 33 34 30 31 32 */
+/* -- -- */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- */
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* .. */
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb - 1 - 0 + 1;
+ b_offset = 0 + b_dim1 * 0;
+ b -= b_offset;
+
+ /* Function Body */
+ info = 0;
+ normaltransr = lsame_(transr, "N");
+ lside = lsame_(side, "L");
+ lower = lsame_(uplo, "L");
+ notrans = lsame_(trans, "N");
+ if (! normaltransr && ! lsame_(transr, "C")) {
+ info = -1;
+ } else if (! lside && ! lsame_(side, "R")) {
+ info = -2;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ info = -3;
+ } else if (! notrans && ! lsame_(trans, "C")) {
+ info = -4;
+ } else if (! lsame_(diag, "N") && ! lsame_(diag,
+ "U")) {
+ info = -5;
+ } else if (*m < 0) {
+ info = -6;
+ } else if (*n < 0) {
+ info = -7;
+ } else if (*ldb < max(1,*m)) {
+ info = -11;
+ }
+ if (info != 0) {
+ i__1 = -info;
+ xerbla_("ZTFSM ", &i__1);
+ return 0;
+ }
+
+/* Quick return when ( (N.EQ.0).OR.(M.EQ.0) ) */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Quick return when ALPHA.EQ.(0D+0,0D+0) */
+
+ if (alpha->r == 0. && alpha->i == 0.) {
+ i__1 = *n - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *m - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0., b[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+ if (lside) {
+
+/* SIDE = 'L' */
+
+/* A is M-by-M. */
+/* If M is odd, set NISODD = .TRUE., and M1 and M2. */
+/* If M is even, NISODD = .FALSE., and M. */
+
+ if (*m % 2 == 0) {
+ misodd = FALSE_;
+ k = *m / 2;
+ } else {
+ misodd = TRUE_;
+ if (lower) {
+ m2 = *m / 2;
+ m1 = *m - m2;
+ } else {
+ m1 = *m / 2;
+ m2 = *m - m1;
+ }
+ }
+
+ if (misodd) {
+
+/* SIDE = 'L' and N is odd */
+
+ if (normaltransr) {
+
+/* SIDE = 'L', N is odd, and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SIDE ='L', N is odd, TRANSR = 'N', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='L', N is odd, TRANSR = 'N', UPLO = 'L', and */
+/* TRANS = 'N' */
+
+ if (*m == 1) {
+ ztrsm_("L", "L", "N", diag, &m1, n, alpha, a, m, &
+ b[b_offset], ldb);
+ } else {
+ ztrsm_("L", "L", "N", diag, &m1, n, alpha, a, m, &
+ b[b_offset], ldb);
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("N", "N", &m2, n, &m1, &z__1, &a[m1], m, &
+ b[b_offset], ldb, alpha, &b[m1], ldb);
+ ztrsm_("L", "U", "C", diag, &m2, n, &c_b1, &a[*m],
+ m, &b[m1], ldb);
+ }
+
+ } else {
+
+/* SIDE ='L', N is odd, TRANSR = 'N', UPLO = 'L', and */
+/* TRANS = 'C' */
+
+ if (*m == 1) {
+ ztrsm_("L", "L", "C", diag, &m1, n, alpha, a, m, &
+ b[b_offset], ldb);
+ } else {
+ ztrsm_("L", "U", "N", diag, &m2, n, alpha, &a[*m],
+ m, &b[m1], ldb);
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("C", "N", &m1, n, &m2, &z__1, &a[m1], m, &
+ b[m1], ldb, alpha, &b[b_offset], ldb);
+ ztrsm_("L", "L", "C", diag, &m1, n, &c_b1, a, m, &
+ b[b_offset], ldb);
+ }
+
+ }
+
+ } else {
+
+/* SIDE ='L', N is odd, TRANSR = 'N', and UPLO = 'U' */
+
+ if (! notrans) {
+
+/* SIDE ='L', N is odd, TRANSR = 'N', UPLO = 'U', and */
+/* TRANS = 'N' */
+
+ ztrsm_("L", "L", "N", diag, &m1, n, alpha, &a[m2], m,
+ &b[b_offset], ldb);
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("C", "N", &m2, n, &m1, &z__1, a, m, &b[
+ b_offset], ldb, alpha, &b[m1], ldb);
+ ztrsm_("L", "U", "C", diag, &m2, n, &c_b1, &a[m1], m,
+ &b[m1], ldb);
+
+ } else {
+
+/* SIDE ='L', N is odd, TRANSR = 'N', UPLO = 'U', and */
+/* TRANS = 'C' */
+
+ ztrsm_("L", "U", "N", diag, &m2, n, alpha, &a[m1], m,
+ &b[m1], ldb);
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("N", "N", &m1, n, &m2, &z__1, a, m, &b[m1],
+ ldb, alpha, &b[b_offset], ldb);
+ ztrsm_("L", "L", "C", diag, &m1, n, &c_b1, &a[m2], m,
+ &b[b_offset], ldb);
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'L', N is odd, and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SIDE ='L', N is odd, TRANSR = 'C', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='L', N is odd, TRANSR = 'C', UPLO = 'L', and */
+/* TRANS = 'N' */
+
+ if (*m == 1) {
+ ztrsm_("L", "U", "C", diag, &m1, n, alpha, a, &m1,
+ &b[b_offset], ldb);
+ } else {
+ ztrsm_("L", "U", "C", diag, &m1, n, alpha, a, &m1,
+ &b[b_offset], ldb);
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("C", "N", &m2, n, &m1, &z__1, &a[m1 * m1],
+ &m1, &b[b_offset], ldb, alpha, &b[m1],
+ ldb);
+ ztrsm_("L", "L", "N", diag, &m2, n, &c_b1, &a[1],
+ &m1, &b[m1], ldb);
+ }
+
+ } else {
+
+/* SIDE ='L', N is odd, TRANSR = 'C', UPLO = 'L', and */
+/* TRANS = 'C' */
+
+ if (*m == 1) {
+ ztrsm_("L", "U", "N", diag, &m1, n, alpha, a, &m1,
+ &b[b_offset], ldb);
+ } else {
+ ztrsm_("L", "L", "C", diag, &m2, n, alpha, &a[1],
+ &m1, &b[m1], ldb);
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("N", "N", &m1, n, &m2, &z__1, &a[m1 * m1],
+ &m1, &b[m1], ldb, alpha, &b[b_offset],
+ ldb);
+ ztrsm_("L", "U", "N", diag, &m1, n, &c_b1, a, &m1,
+ &b[b_offset], ldb);
+ }
+
+ }
+
+ } else {
+
+/* SIDE ='L', N is odd, TRANSR = 'C', and UPLO = 'U' */
+
+ if (! notrans) {
+
+/* SIDE ='L', N is odd, TRANSR = 'C', UPLO = 'U', and */
+/* TRANS = 'N' */
+
+ ztrsm_("L", "U", "C", diag, &m1, n, alpha, &a[m2 * m2]
+, &m2, &b[b_offset], ldb);
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("N", "N", &m2, n, &m1, &z__1, a, &m2, &b[
+ b_offset], ldb, alpha, &b[m1], ldb);
+ ztrsm_("L", "L", "N", diag, &m2, n, &c_b1, &a[m1 * m2]
+, &m2, &b[m1], ldb);
+
+ } else {
+
+/* SIDE ='L', N is odd, TRANSR = 'C', UPLO = 'U', and */
+/* TRANS = 'C' */
+
+ ztrsm_("L", "L", "C", diag, &m2, n, alpha, &a[m1 * m2]
+, &m2, &b[m1], ldb);
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("C", "N", &m1, n, &m2, &z__1, a, &m2, &b[m1],
+ ldb, alpha, &b[b_offset], ldb);
+ ztrsm_("L", "U", "N", diag, &m1, n, &c_b1, &a[m2 * m2]
+, &m2, &b[b_offset], ldb);
+
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'L' and N is even */
+
+ if (normaltransr) {
+
+/* SIDE = 'L', N is even, and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SIDE ='L', N is even, TRANSR = 'N', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='L', N is even, TRANSR = 'N', UPLO = 'L', */
+/* and TRANS = 'N' */
+
+ i__1 = *m + 1;
+ ztrsm_("L", "L", "N", diag, &k, n, alpha, &a[1], &
+ i__1, &b[b_offset], ldb);
+ z__1.r = -1., z__1.i = -0.;
+ i__1 = *m + 1;
+ zgemm_("N", "N", &k, n, &k, &z__1, &a[k + 1], &i__1, &
+ b[b_offset], ldb, alpha, &b[k], ldb);
+ i__1 = *m + 1;
+ ztrsm_("L", "U", "C", diag, &k, n, &c_b1, a, &i__1, &
+ b[k], ldb);
+
+ } else {
+
+/* SIDE ='L', N is even, TRANSR = 'N', UPLO = 'L', */
+/* and TRANS = 'C' */
+
+ i__1 = *m + 1;
+ ztrsm_("L", "U", "N", diag, &k, n, alpha, a, &i__1, &
+ b[k], ldb);
+ z__1.r = -1., z__1.i = -0.;
+ i__1 = *m + 1;
+ zgemm_("C", "N", &k, n, &k, &z__1, &a[k + 1], &i__1, &
+ b[k], ldb, alpha, &b[b_offset], ldb);
+ i__1 = *m + 1;
+ ztrsm_("L", "L", "C", diag, &k, n, &c_b1, &a[1], &
+ i__1, &b[b_offset], ldb);
+
+ }
+
+ } else {
+
+/* SIDE ='L', N is even, TRANSR = 'N', and UPLO = 'U' */
+
+ if (! notrans) {
+
+/* SIDE ='L', N is even, TRANSR = 'N', UPLO = 'U', */
+/* and TRANS = 'N' */
+
+ i__1 = *m + 1;
+ ztrsm_("L", "L", "N", diag, &k, n, alpha, &a[k + 1], &
+ i__1, &b[b_offset], ldb);
+ z__1.r = -1., z__1.i = -0.;
+ i__1 = *m + 1;
+ zgemm_("C", "N", &k, n, &k, &z__1, a, &i__1, &b[
+ b_offset], ldb, alpha, &b[k], ldb);
+ i__1 = *m + 1;
+ ztrsm_("L", "U", "C", diag, &k, n, &c_b1, &a[k], &
+ i__1, &b[k], ldb);
+
+ } else {
+
+/* SIDE ='L', N is even, TRANSR = 'N', UPLO = 'U', */
+/* and TRANS = 'C' */
+ i__1 = *m + 1;
+ ztrsm_("L", "U", "N", diag, &k, n, alpha, &a[k], &
+ i__1, &b[k], ldb);
+ z__1.r = -1., z__1.i = -0.;
+ i__1 = *m + 1;
+ zgemm_("N", "N", &k, n, &k, &z__1, a, &i__1, &b[k],
+ ldb, alpha, &b[b_offset], ldb);
+ i__1 = *m + 1;
+ ztrsm_("L", "L", "C", diag, &k, n, &c_b1, &a[k + 1], &
+ i__1, &b[b_offset], ldb);
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'L', N is even, and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SIDE ='L', N is even, TRANSR = 'C', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='L', N is even, TRANSR = 'C', UPLO = 'L', */
+/* and TRANS = 'N' */
+
+ ztrsm_("L", "U", "C", diag, &k, n, alpha, &a[k], &k, &
+ b[b_offset], ldb);
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("C", "N", &k, n, &k, &z__1, &a[k * (k + 1)], &
+ k, &b[b_offset], ldb, alpha, &b[k], ldb);
+ ztrsm_("L", "L", "N", diag, &k, n, &c_b1, a, &k, &b[k]
+, ldb);
+
+ } else {
+
+/* SIDE ='L', N is even, TRANSR = 'C', UPLO = 'L', */
+/* and TRANS = 'C' */
+
+ ztrsm_("L", "L", "C", diag, &k, n, alpha, a, &k, &b[k]
+, ldb);
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("N", "N", &k, n, &k, &z__1, &a[k * (k + 1)], &
+ k, &b[k], ldb, alpha, &b[b_offset], ldb);
+ ztrsm_("L", "U", "N", diag, &k, n, &c_b1, &a[k], &k, &
+ b[b_offset], ldb);
+
+ }
+
+ } else {
+
+/* SIDE ='L', N is even, TRANSR = 'C', and UPLO = 'U' */
+
+ if (! notrans) {
+
+/* SIDE ='L', N is even, TRANSR = 'C', UPLO = 'U', */
+/* and TRANS = 'N' */
+
+ ztrsm_("L", "U", "C", diag, &k, n, alpha, &a[k * (k +
+ 1)], &k, &b[b_offset], ldb);
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("N", "N", &k, n, &k, &z__1, a, &k, &b[b_offset]
+, ldb, alpha, &b[k], ldb);
+ ztrsm_("L", "L", "N", diag, &k, n, &c_b1, &a[k * k], &
+ k, &b[k], ldb);
+
+ } else {
+
+/* SIDE ='L', N is even, TRANSR = 'C', UPLO = 'U', */
+/* and TRANS = 'C' */
+
+ ztrsm_("L", "L", "C", diag, &k, n, alpha, &a[k * k], &
+ k, &b[k], ldb);
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("C", "N", &k, n, &k, &z__1, a, &k, &b[k], ldb,
+ alpha, &b[b_offset], ldb);
+ ztrsm_("L", "U", "N", diag, &k, n, &c_b1, &a[k * (k +
+ 1)], &k, &b[b_offset], ldb);
+
+ }
+
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'R' */
+
+/* A is N-by-N. */
+/* If N is odd, set NISODD = .TRUE., and N1 and N2. */
+/* If N is even, NISODD = .FALSE., and K. */
+
+ if (*n % 2 == 0) {
+ nisodd = FALSE_;
+ k = *n / 2;
+ } else {
+ nisodd = TRUE_;
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+ }
+
+ if (nisodd) {
+
+/* SIDE = 'R' and N is odd */
+
+ if (normaltransr) {
+
+/* SIDE = 'R', N is odd, and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SIDE ='R', N is odd, TRANSR = 'N', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is odd, TRANSR = 'N', UPLO = 'L', and */
+/* TRANS = 'N' */
+
+ ztrsm_("R", "U", "C", diag, m, &n2, alpha, &a[*n], n,
+ &b[n1 * b_dim1], ldb);
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("N", "N", m, &n1, &n2, &z__1, &b[n1 * b_dim1],
+ ldb, &a[n1], n, alpha, b, ldb);
+ ztrsm_("R", "L", "N", diag, m, &n1, &c_b1, a, n, b,
+ ldb);
+
+ } else {
+
+/* SIDE ='R', N is odd, TRANSR = 'N', UPLO = 'L', and */
+/* TRANS = 'C' */
+
+ ztrsm_("R", "L", "C", diag, m, &n1, alpha, a, n, b,
+ ldb);
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("N", "C", m, &n2, &n1, &z__1, b, ldb, &a[n1],
+ n, alpha, &b[n1 * b_dim1], ldb);
+ ztrsm_("R", "U", "N", diag, m, &n2, &c_b1, &a[*n], n,
+ &b[n1 * b_dim1], ldb);
+
+ }
+
+ } else {
+
+/* SIDE ='R', N is odd, TRANSR = 'N', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is odd, TRANSR = 'N', UPLO = 'U', and */
+/* TRANS = 'N' */
+
+ ztrsm_("R", "L", "C", diag, m, &n1, alpha, &a[n2], n,
+ b, ldb);
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("N", "N", m, &n2, &n1, &z__1, b, ldb, a, n,
+ alpha, &b[n1 * b_dim1], ldb);
+ ztrsm_("R", "U", "N", diag, m, &n2, &c_b1, &a[n1], n,
+ &b[n1 * b_dim1], ldb);
+
+ } else {
+
+/* SIDE ='R', N is odd, TRANSR = 'N', UPLO = 'U', and */
+/* TRANS = 'C' */
+
+ ztrsm_("R", "U", "C", diag, m, &n2, alpha, &a[n1], n,
+ &b[n1 * b_dim1], ldb);
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("N", "C", m, &n1, &n2, &z__1, &b[n1 * b_dim1],
+ ldb, a, n, alpha, b, ldb);
+ ztrsm_("R", "L", "N", diag, m, &n1, &c_b1, &a[n2], n,
+ b, ldb);
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'R', N is odd, and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SIDE ='R', N is odd, TRANSR = 'C', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is odd, TRANSR = 'C', UPLO = 'L', and */
+/* TRANS = 'N' */
+
+ ztrsm_("R", "L", "N", diag, m, &n2, alpha, &a[1], &n1,
+ &b[n1 * b_dim1], ldb);
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("N", "C", m, &n1, &n2, &z__1, &b[n1 * b_dim1],
+ ldb, &a[n1 * n1], &n1, alpha, b, ldb);
+ ztrsm_("R", "U", "C", diag, m, &n1, &c_b1, a, &n1, b,
+ ldb);
+
+ } else {
+
+/* SIDE ='R', N is odd, TRANSR = 'C', UPLO = 'L', and */
+/* TRANS = 'C' */
+
+ ztrsm_("R", "U", "N", diag, m, &n1, alpha, a, &n1, b,
+ ldb);
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("N", "N", m, &n2, &n1, &z__1, b, ldb, &a[n1 *
+ n1], &n1, alpha, &b[n1 * b_dim1], ldb);
+ ztrsm_("R", "L", "C", diag, m, &n2, &c_b1, &a[1], &n1,
+ &b[n1 * b_dim1], ldb);
+
+ }
+
+ } else {
+
+/* SIDE ='R', N is odd, TRANSR = 'C', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is odd, TRANSR = 'C', UPLO = 'U', and */
+/* TRANS = 'N' */
+
+ ztrsm_("R", "U", "N", diag, m, &n1, alpha, &a[n2 * n2]
+, &n2, b, ldb);
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("N", "C", m, &n2, &n1, &z__1, b, ldb, a, &n2,
+ alpha, &b[n1 * b_dim1], ldb);
+ ztrsm_("R", "L", "C", diag, m, &n2, &c_b1, &a[n1 * n2]
+, &n2, &b[n1 * b_dim1], ldb);
+
+ } else {
+
+/* SIDE ='R', N is odd, TRANSR = 'C', UPLO = 'U', and */
+/* TRANS = 'C' */
+
+ ztrsm_("R", "L", "N", diag, m, &n2, alpha, &a[n1 * n2]
+, &n2, &b[n1 * b_dim1], ldb);
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("N", "N", m, &n1, &n2, &z__1, &b[n1 * b_dim1],
+ ldb, a, &n2, alpha, b, ldb);
+ ztrsm_("R", "U", "C", diag, m, &n1, &c_b1, &a[n2 * n2]
+, &n2, b, ldb);
+
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'R' and N is even */
+
+ if (normaltransr) {
+
+/* SIDE = 'R', N is even, and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SIDE ='R', N is even, TRANSR = 'N', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is even, TRANSR = 'N', UPLO = 'L', */
+/* and TRANS = 'N' */
+
+ i__1 = *n + 1;
+ ztrsm_("R", "U", "C", diag, m, &k, alpha, a, &i__1, &
+ b[k * b_dim1], ldb);
+ z__1.r = -1., z__1.i = -0.;
+ i__1 = *n + 1;
+ zgemm_("N", "N", m, &k, &k, &z__1, &b[k * b_dim1],
+ ldb, &a[k + 1], &i__1, alpha, b, ldb);
+ i__1 = *n + 1;
+ ztrsm_("R", "L", "N", diag, m, &k, &c_b1, &a[1], &
+ i__1, b, ldb);
+
+ } else {
+
+/* SIDE ='R', N is even, TRANSR = 'N', UPLO = 'L', */
+/* and TRANS = 'C' */
+
+ i__1 = *n + 1;
+ ztrsm_("R", "L", "C", diag, m, &k, alpha, &a[1], &
+ i__1, b, ldb);
+ z__1.r = -1., z__1.i = -0.;
+ i__1 = *n + 1;
+ zgemm_("N", "C", m, &k, &k, &z__1, b, ldb, &a[k + 1],
+ &i__1, alpha, &b[k * b_dim1], ldb);
+ i__1 = *n + 1;
+ ztrsm_("R", "U", "N", diag, m, &k, &c_b1, a, &i__1, &
+ b[k * b_dim1], ldb);
+
+ }
+
+ } else {
+
+/* SIDE ='R', N is even, TRANSR = 'N', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is even, TRANSR = 'N', UPLO = 'U', */
+/* and TRANS = 'N' */
+
+ i__1 = *n + 1;
+ ztrsm_("R", "L", "C", diag, m, &k, alpha, &a[k + 1], &
+ i__1, b, ldb);
+ z__1.r = -1., z__1.i = -0.;
+ i__1 = *n + 1;
+ zgemm_("N", "N", m, &k, &k, &z__1, b, ldb, a, &i__1,
+ alpha, &b[k * b_dim1], ldb);
+ i__1 = *n + 1;
+ ztrsm_("R", "U", "N", diag, m, &k, &c_b1, &a[k], &
+ i__1, &b[k * b_dim1], ldb);
+
+ } else {
+
+/* SIDE ='R', N is even, TRANSR = 'N', UPLO = 'U', */
+/* and TRANS = 'C' */
+
+ i__1 = *n + 1;
+ ztrsm_("R", "U", "C", diag, m, &k, alpha, &a[k], &
+ i__1, &b[k * b_dim1], ldb);
+ z__1.r = -1., z__1.i = -0.;
+ i__1 = *n + 1;
+ zgemm_("N", "C", m, &k, &k, &z__1, &b[k * b_dim1],
+ ldb, a, &i__1, alpha, b, ldb);
+ i__1 = *n + 1;
+ ztrsm_("R", "L", "N", diag, m, &k, &c_b1, &a[k + 1], &
+ i__1, b, ldb);
+
+ }
+
+ }
+
+ } else {
+
+/* SIDE = 'R', N is even, and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SIDE ='R', N is even, TRANSR = 'C', and UPLO = 'L' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is even, TRANSR = 'C', UPLO = 'L', */
+/* and TRANS = 'N' */
+
+ ztrsm_("R", "L", "N", diag, m, &k, alpha, a, &k, &b[k
+ * b_dim1], ldb);
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("N", "C", m, &k, &k, &z__1, &b[k * b_dim1],
+ ldb, &a[(k + 1) * k], &k, alpha, b, ldb);
+ ztrsm_("R", "U", "C", diag, m, &k, &c_b1, &a[k], &k,
+ b, ldb);
+
+ } else {
+
+/* SIDE ='R', N is even, TRANSR = 'C', UPLO = 'L', */
+/* and TRANS = 'C' */
+
+ ztrsm_("R", "U", "N", diag, m, &k, alpha, &a[k], &k,
+ b, ldb);
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("N", "N", m, &k, &k, &z__1, b, ldb, &a[(k + 1)
+ * k], &k, alpha, &b[k * b_dim1], ldb);
+ ztrsm_("R", "L", "C", diag, m, &k, &c_b1, a, &k, &b[k
+ * b_dim1], ldb);
+
+ }
+
+ } else {
+
+/* SIDE ='R', N is even, TRANSR = 'C', and UPLO = 'U' */
+
+ if (notrans) {
+
+/* SIDE ='R', N is even, TRANSR = 'C', UPLO = 'U', */
+/* and TRANS = 'N' */
+
+ ztrsm_("R", "U", "N", diag, m, &k, alpha, &a[(k + 1) *
+ k], &k, b, ldb);
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("N", "C", m, &k, &k, &z__1, b, ldb, a, &k,
+ alpha, &b[k * b_dim1], ldb);
+ ztrsm_("R", "L", "C", diag, m, &k, &c_b1, &a[k * k], &
+ k, &b[k * b_dim1], ldb);
+
+ } else {
+
+/* SIDE ='R', N is even, TRANSR = 'C', UPLO = 'U', */
+/* and TRANS = 'C' */
+
+ ztrsm_("R", "L", "N", diag, m, &k, alpha, &a[k * k], &
+ k, &b[k * b_dim1], ldb);
+ z__1.r = -1., z__1.i = -0.;
+ zgemm_("N", "N", m, &k, &k, &z__1, &b[k * b_dim1],
+ ldb, a, &k, alpha, b, ldb);
+ ztrsm_("R", "U", "C", diag, m, &k, &c_b1, &a[(k + 1) *
+ k], &k, b, ldb);
+
+ }
+
+ }
+
+ }
+
+ }
+ }
+
+ return 0;
+
+/* End of ZTFSM */
+
+} /* ztfsm_ */
diff --git a/contrib/libs/clapack/ztftri.c b/contrib/libs/clapack/ztftri.c
new file mode 100644
index 0000000000..9c028d1ebd
--- /dev/null
+++ b/contrib/libs/clapack/ztftri.c
@@ -0,0 +1,500 @@
+/* ztftri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+
+/* Subroutine */ int ztftri_(char *transr, char *uplo, char *diag, integer *n,
+ doublecomplex *a, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer k, n1, n2;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int ztrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *),
+ xerbla_(char *, integer *);
+ logical nisodd;
+ extern /* Subroutine */ int ztrtri_(char *, char *, integer *,
+ doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTFTRI computes the inverse of a triangular matrix A stored in RFP */
+/* format. */
+
+/* This is a Level 3 BLAS version of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': The Normal TRANSR of RFP A is stored; */
+/* = 'C': The Conjugate-transpose TRANSR of RFP A is stored. */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* DIAG (input) CHARACTER */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension ( N*(N+1)/2 ); */
+/* On entry, the triangular matrix A in RFP format. RFP format */
+/* is described by TRANSR, UPLO, and N as follows: If TRANSR = */
+/* 'N' then RFP A is (0:N,0:k-1) when N is even; k=N/2. RFP A is */
+/* (0:N-1,0:k) when N is odd; k=N/2. IF TRANSR = 'C' then RFP is */
+/* the Conjugate-transpose of RFP A as defined when */
+/* TRANSR = 'N'. The contents of RFP A are defined by UPLO as */
+/* follows: If UPLO = 'U' the RFP A contains the nt elements of */
+/* upper packed A; If UPLO = 'L' the RFP A contains the nt */
+/* elements of lower packed A. The LDA of RFP A is (N+1)/2 when */
+/* TRANSR = 'C'. When TRANSR is 'N' the LDA is N+1 when N is */
+/* even and N is odd. See the Note below for more details. */
+
+/* On exit, the (triangular) inverse of the original matrix, in */
+/* the same storage format. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, A(i,i) is exactly zero. The triangular */
+/* matrix is singular and its inverse can not be computed. */
+
+/* Notes: */
+/* ====== */
+
+/* We first consider Standard Packed Format when N is even. */
+/* We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* conjugate-transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* conjugate-transpose of the last three columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- -- */
+/* 03 04 05 33 43 53 */
+/* -- -- */
+/* 13 14 15 00 44 54 */
+/* -- */
+/* 23 24 25 10 11 55 */
+
+/* 33 34 35 20 21 22 */
+/* -- */
+/* 00 44 45 30 31 32 */
+/* -- -- */
+/* 01 11 55 40 41 42 */
+/* -- -- -- */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We next consider Standard Packed Format when N is odd. */
+/* We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* conjugate-transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* conjugate-transpose of the last two columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- */
+/* 02 03 04 00 33 43 */
+/* -- */
+/* 12 13 14 10 11 44 */
+
+/* 22 23 24 20 21 22 */
+/* -- */
+/* 00 33 34 30 31 32 */
+/* -- -- */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- */
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "C")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (! lsame_(diag, "N") && ! lsame_(diag,
+ "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTFTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* If N is odd, set NISODD = .TRUE. */
+/* If N is even, set K = N/2 and NISODD = .FALSE. */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ } else {
+ nisodd = TRUE_;
+ }
+
+/* Set N1 and N2 depending on LOWER */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+
+/* start execution: there are eight cases */
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL and N is odd ( a(0:n-1,0:n1-1) ) */
+/* T1 -> a(0,0), T2 -> a(0,1), S -> a(n1,0) */
+/* T1 -> a(0), T2 -> a(n), S -> a(n1) */
+
+ ztrtri_("L", diag, &n1, a, n, info);
+ if (*info > 0) {
+ return 0;
+ }
+ z__1.r = -1., z__1.i = -0.;
+ ztrmm_("R", "L", "N", diag, &n2, &n1, &z__1, a, n, &a[n1], n);
+ ztrtri_("U", diag, &n2, &a[*n], n, info)
+ ;
+ if (*info > 0) {
+ *info += n1;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ ztrmm_("L", "U", "C", diag, &n2, &n1, &c_b1, &a[*n], n, &a[n1]
+, n);
+
+ } else {
+
+/* SRPA for UPPER, NORMAL and N is odd ( a(0:n-1,0:n2-1) */
+/* T1 -> a(n1+1,0), T2 -> a(n1,0), S -> a(0,0) */
+/* T1 -> a(n2), T2 -> a(n1), S -> a(0) */
+
+ ztrtri_("L", diag, &n1, &a[n2], n, info)
+ ;
+ if (*info > 0) {
+ return 0;
+ }
+ z__1.r = -1., z__1.i = -0.;
+ ztrmm_("L", "L", "C", diag, &n1, &n2, &z__1, &a[n2], n, a, n);
+ ztrtri_("U", diag, &n2, &a[n1], n, info)
+ ;
+ if (*info > 0) {
+ *info += n1;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ ztrmm_("R", "U", "N", diag, &n1, &n2, &c_b1, &a[n1], n, a, n);
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is odd */
+/* T1 -> a(0), T2 -> a(1), S -> a(0+n1*n1) */
+
+ ztrtri_("U", diag, &n1, a, &n1, info);
+ if (*info > 0) {
+ return 0;
+ }
+ z__1.r = -1., z__1.i = -0.;
+ ztrmm_("L", "U", "N", diag, &n1, &n2, &z__1, a, &n1, &a[n1 *
+ n1], &n1);
+ ztrtri_("L", diag, &n2, &a[1], &n1, info);
+ if (*info > 0) {
+ *info += n1;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ ztrmm_("R", "L", "C", diag, &n1, &n2, &c_b1, &a[1], &n1, &a[
+ n1 * n1], &n1);
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is odd */
+/* T1 -> a(0+n2*n2), T2 -> a(0+n1*n2), S -> a(0) */
+
+ ztrtri_("U", diag, &n1, &a[n2 * n2], &n2, info);
+ if (*info > 0) {
+ return 0;
+ }
+ z__1.r = -1., z__1.i = -0.;
+ ztrmm_("R", "U", "C", diag, &n2, &n1, &z__1, &a[n2 * n2], &n2,
+ a, &n2);
+ ztrtri_("L", diag, &n2, &a[n1 * n2], &n2, info);
+ if (*info > 0) {
+ *info += n1;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ ztrmm_("L", "L", "N", diag, &n2, &n1, &c_b1, &a[n1 * n2], &n2,
+ a, &n2);
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(1,0), T2 -> a(0,0), S -> a(k+1,0) */
+/* T1 -> a(1), T2 -> a(0), S -> a(k+1) */
+
+ i__1 = *n + 1;
+ ztrtri_("L", diag, &k, &a[1], &i__1, info);
+ if (*info > 0) {
+ return 0;
+ }
+ z__1.r = -1., z__1.i = -0.;
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ ztrmm_("R", "L", "N", diag, &k, &k, &z__1, &a[1], &i__1, &a[k
+ + 1], &i__2);
+ i__1 = *n + 1;
+ ztrtri_("U", diag, &k, a, &i__1, info);
+ if (*info > 0) {
+ *info += k;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ ztrmm_("L", "U", "C", diag, &k, &k, &c_b1, a, &i__1, &a[k + 1]
+, &i__2);
+
+ } else {
+
+/* SRPA for UPPER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(k+1,0) , T2 -> a(k,0), S -> a(0,0) */
+/* T1 -> a(k+1), T2 -> a(k), S -> a(0) */
+
+ i__1 = *n + 1;
+ ztrtri_("L", diag, &k, &a[k + 1], &i__1, info);
+ if (*info > 0) {
+ return 0;
+ }
+ z__1.r = -1., z__1.i = -0.;
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ ztrmm_("L", "L", "C", diag, &k, &k, &z__1, &a[k + 1], &i__1,
+ a, &i__2);
+ i__1 = *n + 1;
+ ztrtri_("U", diag, &k, &a[k], &i__1, info);
+ if (*info > 0) {
+ *info += k;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ i__1 = *n + 1;
+ i__2 = *n + 1;
+ ztrmm_("R", "U", "N", diag, &k, &k, &c_b1, &a[k], &i__1, a, &
+ i__2);
+ }
+ } else {
+
+/* N is even and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,1), T2 -> B(0,0), S -> B(0,k+1) */
+/* T1 -> a(0+k), T2 -> a(0+0), S -> a(0+k*(k+1)); lda=k */
+
+ ztrtri_("U", diag, &k, &a[k], &k, info);
+ if (*info > 0) {
+ return 0;
+ }
+ z__1.r = -1., z__1.i = -0.;
+ ztrmm_("L", "U", "N", diag, &k, &k, &z__1, &a[k], &k, &a[k * (
+ k + 1)], &k);
+ ztrtri_("L", diag, &k, a, &k, info);
+ if (*info > 0) {
+ *info += k;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ ztrmm_("R", "L", "C", diag, &k, &k, &c_b1, a, &k, &a[k * (k +
+ 1)], &k);
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,k+1), T2 -> B(0,k), S -> B(0,0) */
+/* T1 -> a(0+k*(k+1)), T2 -> a(0+k*k), S -> a(0+0)); lda=k */
+
+ ztrtri_("U", diag, &k, &a[k * (k + 1)], &k, info);
+ if (*info > 0) {
+ return 0;
+ }
+ z__1.r = -1., z__1.i = -0.;
+ ztrmm_("R", "U", "C", diag, &k, &k, &z__1, &a[k * (k + 1)], &
+ k, a, &k);
+ ztrtri_("L", diag, &k, &a[k * k], &k, info);
+ if (*info > 0) {
+ *info += k;
+ }
+ if (*info > 0) {
+ return 0;
+ }
+ ztrmm_("L", "L", "N", diag, &k, &k, &c_b1, &a[k * k], &k, a, &
+ k);
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZTFTRI */
+
+} /* ztftri_ */
diff --git a/contrib/libs/clapack/ztfttp.c b/contrib/libs/clapack/ztfttp.c
new file mode 100644
index 0000000000..91a07c20e1
--- /dev/null
+++ b/contrib/libs/clapack/ztfttp.c
@@ -0,0 +1,575 @@
+/* ztfttp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ztfttp_(char *transr, char *uplo, integer *n,
+ doublecomplex *arf, doublecomplex *ap, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k, n1, n2, ij, jp, js, nt, lda, ijp;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nisodd;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTFTTP copies a triangular matrix A from rectangular full packed */
+/* format (TF) to standard packed format (TP). */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': ARF is in Normal format; */
+/* = 'C': ARF is in Conjugate-transpose format; */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* ARF (input) COMPLEX*16 array, dimension ( N*(N+1)/2 ), */
+/* On entry, the upper or lower triangular matrix A stored in */
+/* RFP format. For a further discussion see Notes below. */
+
+/* AP (output) COMPLEX*16 array, dimension ( N*(N+1)/2 ), */
+/* On exit, the upper or lower triangular matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Notes: */
+/* ====== */
+
+/* We first consider Standard Packed Format when N is even. */
+/* We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* conjugate-transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* conjugate-transpose of the last three columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- -- */
+/* 03 04 05 33 43 53 */
+/* -- -- */
+/* 13 14 15 00 44 54 */
+/* -- */
+/* 23 24 25 10 11 55 */
+
+/* 33 34 35 20 21 22 */
+/* -- */
+/* 00 44 45 30 31 32 */
+/* -- -- */
+/* 01 11 55 40 41 42 */
+/* -- -- -- */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We next consider Standard Packed Format when N is odd. */
+/* We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* conjugate-transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* conjugate-transpose of the last two columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- */
+/* 02 03 04 00 33 43 */
+/* -- */
+/* 12 13 14 10 11 44 */
+
+/* 22 23 24 20 21 22 */
+/* -- */
+/* 00 33 34 30 31 32 */
+/* -- -- */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- */
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "C")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTFTTP", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (normaltransr) {
+ ap[0].r = arf[0].r, ap[0].i = arf[0].i;
+ } else {
+ d_cnjg(&z__1, arf);
+ ap[0].r = z__1.r, ap[0].i = z__1.i;
+ }
+ return 0;
+ }
+
+/* Size of array ARF(0:NT-1) */
+
+ nt = *n * (*n + 1) / 2;
+
+/* Set N1 and N2 depending on LOWER */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+/* If N is odd, set NISODD = .TRUE. */
+/* If N is even, set K = N/2 and NISODD = .FALSE. */
+
+/* set lda of ARF^C; ARF^C is (0:(N+1)/2-1,0:N-noe) */
+/* where noe = 0 if n is even, noe = 1 if n is odd */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ lda = *n + 1;
+ } else {
+ nisodd = TRUE_;
+ lda = *n;
+ }
+
+/* ARF^C has lda rows and n+1-noe cols */
+
+ if (! normaltransr) {
+ lda = (*n + 1) / 2;
+ }
+
+/* start execution: there are eight cases */
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL and N is odd ( a(0:n-1,0:n1-1) ) */
+/* T1 -> a(0,0), T2 -> a(0,1), S -> a(n1,0) */
+/* T1 -> a(0), T2 -> a(n), S -> a(n1); lda = n */
+
+ ijp = 0;
+ jp = 0;
+ i__1 = n2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ij = i__ + jp;
+ i__3 = ijp;
+ i__4 = ij;
+ ap[i__3].r = arf[i__4].r, ap[i__3].i = arf[i__4].i;
+ ++ijp;
+ }
+ jp += lda;
+ }
+ i__1 = n2 - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = n2;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ ij = i__ + j * lda;
+ i__3 = ijp;
+ d_cnjg(&z__1, &arf[ij]);
+ ap[i__3].r = z__1.r, ap[i__3].i = z__1.i;
+ ++ijp;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL and N is odd ( a(0:n-1,0:n2-1) */
+/* T1 -> a(n1+1,0), T2 -> a(n1,0), S -> a(0,0) */
+/* T1 -> a(n2), T2 -> a(n1), S -> a(0) */
+
+ ijp = 0;
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ ij = n2 + j;
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = ijp;
+ d_cnjg(&z__1, &arf[ij]);
+ ap[i__3].r = z__1.r, ap[i__3].i = z__1.i;
+ ++ijp;
+ ij += lda;
+ }
+ }
+ js = 0;
+ i__1 = *n - 1;
+ for (j = n1; j <= i__1; ++j) {
+ ij = js;
+ i__2 = js + j;
+ for (ij = js; ij <= i__2; ++ij) {
+ i__3 = ijp;
+ i__4 = ij;
+ ap[i__3].r = arf[i__4].r, ap[i__3].i = arf[i__4].i;
+ ++ijp;
+ }
+ js += lda;
+ }
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is odd */
+/* T1 -> A(0,0) , T2 -> A(1,0) , S -> A(0,n1) */
+/* T1 -> a(0+0) , T2 -> a(1+0) , S -> a(0+n1*n1); lda=n1 */
+
+ ijp = 0;
+ i__1 = n2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = *n * lda - 1;
+ i__3 = lda;
+ for (ij = i__ * (lda + 1); i__3 < 0 ? ij >= i__2 : ij <=
+ i__2; ij += i__3) {
+ i__4 = ijp;
+ d_cnjg(&z__1, &arf[ij]);
+ ap[i__4].r = z__1.r, ap[i__4].i = z__1.i;
+ ++ijp;
+ }
+ }
+ js = 1;
+ i__1 = n2 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + n2 - j - 1;
+ for (ij = js; ij <= i__3; ++ij) {
+ i__2 = ijp;
+ i__4 = ij;
+ ap[i__2].r = arf[i__4].r, ap[i__2].i = arf[i__4].i;
+ ++ijp;
+ }
+ js = js + lda + 1;
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is odd */
+/* T1 -> A(0,n1+1), T2 -> A(0,n1), S -> A(0,0) */
+/* T1 -> a(n2*n2), T2 -> a(n1*n2), S -> a(0); lda = n2 */
+
+ ijp = 0;
+ js = n2 * lda;
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + j;
+ for (ij = js; ij <= i__3; ++ij) {
+ i__2 = ijp;
+ i__4 = ij;
+ ap[i__2].r = arf[i__4].r, ap[i__2].i = arf[i__4].i;
+ ++ijp;
+ }
+ js += lda;
+ }
+ i__1 = n1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__3 = i__ + (n1 + i__) * lda;
+ i__2 = lda;
+ for (ij = i__; i__2 < 0 ? ij >= i__3 : ij <= i__3; ij +=
+ i__2) {
+ i__4 = ijp;
+ d_cnjg(&z__1, &arf[ij]);
+ ap[i__4].r = z__1.r, ap[i__4].i = z__1.i;
+ ++ijp;
+ }
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(1,0), T2 -> a(0,0), S -> a(k+1,0) */
+/* T1 -> a(1), T2 -> a(0), S -> a(k+1) */
+
+ ijp = 0;
+ jp = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ij = i__ + 1 + jp;
+ i__3 = ijp;
+ i__4 = ij;
+ ap[i__3].r = arf[i__4].r, ap[i__3].i = arf[i__4].i;
+ ++ijp;
+ }
+ jp += lda;
+ }
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = k - 1;
+ for (j = i__; j <= i__2; ++j) {
+ ij = i__ + j * lda;
+ i__3 = ijp;
+ d_cnjg(&z__1, &arf[ij]);
+ ap[i__3].r = z__1.r, ap[i__3].i = z__1.i;
+ ++ijp;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(k+1,0) , T2 -> a(k,0), S -> a(0,0) */
+/* T1 -> a(k+1), T2 -> a(k), S -> a(0) */
+
+ ijp = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ ij = k + 1 + j;
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = ijp;
+ d_cnjg(&z__1, &arf[ij]);
+ ap[i__3].r = z__1.r, ap[i__3].i = z__1.i;
+ ++ijp;
+ ij += lda;
+ }
+ }
+ js = 0;
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+ ij = js;
+ i__2 = js + j;
+ for (ij = js; ij <= i__2; ++ij) {
+ i__3 = ijp;
+ i__4 = ij;
+ ap[i__3].r = arf[i__4].r, ap[i__3].i = arf[i__4].i;
+ ++ijp;
+ }
+ js += lda;
+ }
+
+ }
+
+ } else {
+
+/* N is even and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,1), T2 -> B(0,0), S -> B(0,k+1) */
+/* T1 -> a(0+k), T2 -> a(0+0), S -> a(0+k*(k+1)); lda=k */
+
+ ijp = 0;
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = (*n + 1) * lda - 1;
+ i__3 = lda;
+ for (ij = i__ + (i__ + 1) * lda; i__3 < 0 ? ij >= i__2 :
+ ij <= i__2; ij += i__3) {
+ i__4 = ijp;
+ d_cnjg(&z__1, &arf[ij]);
+ ap[i__4].r = z__1.r, ap[i__4].i = z__1.i;
+ ++ijp;
+ }
+ }
+ js = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + k - j - 1;
+ for (ij = js; ij <= i__3; ++ij) {
+ i__2 = ijp;
+ i__4 = ij;
+ ap[i__2].r = arf[i__4].r, ap[i__2].i = arf[i__4].i;
+ ++ijp;
+ }
+ js = js + lda + 1;
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,k+1), T2 -> B(0,k), S -> B(0,0) */
+/* T1 -> a(0+k*(k+1)), T2 -> a(0+k*k), S -> a(0+0)); lda=k */
+
+ ijp = 0;
+ js = (k + 1) * lda;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + j;
+ for (ij = js; ij <= i__3; ++ij) {
+ i__2 = ijp;
+ i__4 = ij;
+ ap[i__2].r = arf[i__4].r, ap[i__2].i = arf[i__4].i;
+ ++ijp;
+ }
+ js += lda;
+ }
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__3 = i__ + (k + i__) * lda;
+ i__2 = lda;
+ for (ij = i__; i__2 < 0 ? ij >= i__3 : ij <= i__3; ij +=
+ i__2) {
+ i__4 = ijp;
+ d_cnjg(&z__1, &arf[ij]);
+ ap[i__4].r = z__1.r, ap[i__4].i = z__1.i;
+ ++ijp;
+ }
+ }
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of ZTFTTP */
+
+} /* ztfttp_ */
diff --git a/contrib/libs/clapack/ztfttr.c b/contrib/libs/clapack/ztfttr.c
new file mode 100644
index 0000000000..c7e53b9d14
--- /dev/null
+++ b/contrib/libs/clapack/ztfttr.c
@@ -0,0 +1,580 @@
+/* ztfttr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ztfttr_(char *transr, char *uplo, integer *n,
+ doublecomplex *arf, doublecomplex *a, integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k, l, n1, n2, ij, nt, nx2, np1x2;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nisodd;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTFTTR copies a triangular matrix A from rectangular full packed */
+/* format (TF) to standard full format (TR). */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': ARF is in Normal format; */
+/* = 'C': ARF is in Conjugate-transpose format; */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* ARF (input) COMPLEX*16 array, dimension ( N*(N+1)/2 ), */
+/* On entry, the upper or lower triangular matrix A stored in */
+/* RFP format. For a further discussion see Notes below. */
+
+/* A (output) COMPLEX*16 array, dimension ( LDA, N ) */
+/* On exit, the triangular matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of the array A contains */
+/* the upper triangular matrix, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of the array A contains */
+/* the lower triangular matrix, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Notes: */
+/* ====== */
+
+/* We first consider Standard Packed Format when N is even. */
+/* We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* conjugate-transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* conjugate-transpose of the last three columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- -- */
+/* 03 04 05 33 43 53 */
+/* -- -- */
+/* 13 14 15 00 44 54 */
+/* -- */
+/* 23 24 25 10 11 55 */
+
+/* 33 34 35 20 21 22 */
+/* -- */
+/* 00 44 45 30 31 32 */
+/* -- -- */
+/* 01 11 55 40 41 42 */
+/* -- -- -- */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We next consider Standard Packed Format when N is odd. */
+/* We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* conjugate-transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* conjugate-transpose of the last two columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- */
+/* 02 03 04 00 33 43 */
+/* -- */
+/* 12 13 14 10 11 44 */
+
+/* 22 23 24 20 21 22 */
+/* -- */
+/* 00 33 34 30 31 32 */
+/* -- -- */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- */
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda - 1 - 0 + 1;
+ a_offset = 0 + a_dim1 * 0;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "C")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTFTTR", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 1) {
+ if (*n == 1) {
+ if (normaltransr) {
+ a[0].r = arf[0].r, a[0].i = arf[0].i;
+ } else {
+ d_cnjg(&z__1, arf);
+ a[0].r = z__1.r, a[0].i = z__1.i;
+ }
+ }
+ return 0;
+ }
+
+/* Size of array ARF(1:2,0:nt-1) */
+
+ nt = *n * (*n + 1) / 2;
+
+/* set N1 and N2 depending on LOWER: for N even N1=N2=K */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+/* If N is odd, set NISODD = .TRUE., LDA=N+1 and A is (N+1)--by--K2. */
+/* If N is even, set K = N/2 and NISODD = .FALSE., LDA=N and A is */
+/* N--by--(N+1)/2. */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ if (! lower) {
+ np1x2 = *n + *n + 2;
+ }
+ } else {
+ nisodd = TRUE_;
+ if (! lower) {
+ nx2 = *n + *n;
+ }
+ }
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL and N is odd ( a(0:n-1,0:n1-1) ) */
+/* T1 -> a(0,0), T2 -> a(0,1), S -> a(n1,0) */
+/* T1 -> a(0), T2 -> a(n), S -> a(n1); lda=n */
+
+ ij = 0;
+ i__1 = n2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = n2 + j;
+ for (i__ = n1; i__ <= i__2; ++i__) {
+ i__3 = n2 + j + i__ * a_dim1;
+ d_cnjg(&z__1, &arf[ij]);
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = ij;
+ a[i__3].r = arf[i__4].r, a[i__3].i = arf[i__4].i;
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL and N is odd ( a(0:n-1,0:n2-1) */
+/* T1 -> a(n1+1,0), T2 -> a(n1,0), S -> a(0,0) */
+/* T1 -> a(n2), T2 -> a(n1), S -> a(0); lda=n */
+
+ ij = nt - *n;
+ i__1 = n1;
+ for (j = *n - 1; j >= i__1; --j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = ij;
+ a[i__3].r = arf[i__4].r, a[i__3].i = arf[i__4].i;
+ ++ij;
+ }
+ i__2 = n1 - 1;
+ for (l = j - n1; l <= i__2; ++l) {
+ i__3 = j - n1 + l * a_dim1;
+ d_cnjg(&z__1, &arf[ij]);
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ ++ij;
+ }
+ ij -= nx2;
+ }
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is odd */
+/* T1 -> A(0,0) , T2 -> A(1,0) , S -> A(0,n1) */
+/* T1 -> A(0+0) , T2 -> A(1+0) , S -> A(0+n1*n1); lda=n1 */
+
+ ij = 0;
+ i__1 = n2 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * a_dim1;
+ d_cnjg(&z__1, &arf[ij]);
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = n1 + j; i__ <= i__2; ++i__) {
+ i__3 = i__ + (n1 + j) * a_dim1;
+ i__4 = ij;
+ a[i__3].r = arf[i__4].r, a[i__3].i = arf[i__4].i;
+ ++ij;
+ }
+ }
+ i__1 = *n - 1;
+ for (j = n2; j <= i__1; ++j) {
+ i__2 = n1 - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * a_dim1;
+ d_cnjg(&z__1, &arf[ij]);
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is odd */
+/* T1 -> A(0,n1+1), T2 -> A(0,n1), S -> A(0,0) */
+/* T1 -> A(n2*n2), T2 -> A(n1*n2), S -> A(0); lda = n2 */
+
+ ij = 0;
+ i__1 = n1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = n1; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * a_dim1;
+ d_cnjg(&z__1, &arf[ij]);
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ ++ij;
+ }
+ }
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = ij;
+ a[i__3].r = arf[i__4].r, a[i__3].i = arf[i__4].i;
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (l = n2 + j; l <= i__2; ++l) {
+ i__3 = n2 + j + l * a_dim1;
+ d_cnjg(&z__1, &arf[ij]);
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ ++ij;
+ }
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(1,0), T2 -> a(0,0), S -> a(k+1,0) */
+/* T1 -> a(1), T2 -> a(0), S -> a(k+1); lda=n+1 */
+
+ ij = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k + j;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ i__3 = k + j + i__ * a_dim1;
+ d_cnjg(&z__1, &arf[ij]);
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = ij;
+ a[i__3].r = arf[i__4].r, a[i__3].i = arf[i__4].i;
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(k+1,0) , T2 -> a(k,0), S -> a(0,0) */
+/* T1 -> a(k+1), T2 -> a(k), S -> a(0); lda=n+1 */
+
+ ij = nt - *n - 1;
+ i__1 = k;
+ for (j = *n - 1; j >= i__1; --j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = ij;
+ a[i__3].r = arf[i__4].r, a[i__3].i = arf[i__4].i;
+ ++ij;
+ }
+ i__2 = k - 1;
+ for (l = j - k; l <= i__2; ++l) {
+ i__3 = j - k + l * a_dim1;
+ d_cnjg(&z__1, &arf[ij]);
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ ++ij;
+ }
+ ij -= np1x2;
+ }
+
+ }
+
+ } else {
+
+/* N is even and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is even (see paper, A=B) */
+/* T1 -> A(0,1) , T2 -> A(0,0) , S -> A(0,k+1) : */
+/* T1 -> A(0+k) , T2 -> A(0+0) , S -> A(0+k*(k+1)); lda=k */
+
+ ij = 0;
+ j = k;
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = ij;
+ a[i__2].r = arf[i__3].r, a[i__2].i = arf[i__3].i;
+ ++ij;
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * a_dim1;
+ d_cnjg(&z__1, &arf[ij]);
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = k + 1 + j; i__ <= i__2; ++i__) {
+ i__3 = i__ + (k + 1 + j) * a_dim1;
+ i__4 = ij;
+ a[i__3].r = arf[i__4].r, a[i__3].i = arf[i__4].i;
+ ++ij;
+ }
+ }
+ i__1 = *n - 1;
+ for (j = k - 1; j <= i__1; ++j) {
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * a_dim1;
+ d_cnjg(&z__1, &arf[ij]);
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is even (see paper, A=B) */
+/* T1 -> A(0,k+1) , T2 -> A(0,k) , S -> A(0,0) */
+/* T1 -> A(0+k*(k+1)) , T2 -> A(0+k*k) , S -> A(0+0)); lda=k */
+
+ ij = 0;
+ i__1 = k;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * a_dim1;
+ d_cnjg(&z__1, &arf[ij]);
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ ++ij;
+ }
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = ij;
+ a[i__3].r = arf[i__4].r, a[i__3].i = arf[i__4].i;
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (l = k + 1 + j; l <= i__2; ++l) {
+ i__3 = k + 1 + j + l * a_dim1;
+ d_cnjg(&z__1, &arf[ij]);
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ ++ij;
+ }
+ }
+
+/* Note that here J = K-1 */
+
+ i__1 = j;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = ij;
+ a[i__2].r = arf[i__3].r, a[i__2].i = arf[i__3].i;
+ ++ij;
+ }
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of ZTFTTR */
+
+} /* ztfttr_ */
diff --git a/contrib/libs/clapack/ztgevc.c b/contrib/libs/clapack/ztgevc.c
new file mode 100644
index 0000000000..106692cf7a
--- /dev/null
+++ b/contrib/libs/clapack/ztgevc.c
@@ -0,0 +1,972 @@
+/* ztgevc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int ztgevc_(char *side, char *howmny, logical *select,
+ integer *n, doublecomplex *s, integer *lds, doublecomplex *p, integer
+ *ldp, doublecomplex *vl, integer *ldvl, doublecomplex *vr, integer *
+ ldvr, integer *mm, integer *m, doublecomplex *work, doublereal *rwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer p_dim1, p_offset, s_dim1, s_offset, vl_dim1, vl_offset, vr_dim1,
+ vr_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4, d__5, d__6;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ doublecomplex d__;
+ integer i__, j;
+ doublecomplex ca, cb;
+ integer je, im, jr;
+ doublereal big;
+ logical lsa, lsb;
+ doublereal ulp;
+ doublecomplex sum;
+ integer ibeg, ieig, iend;
+ doublereal dmin__;
+ integer isrc;
+ doublereal temp;
+ doublecomplex suma, sumb;
+ doublereal xmax, scale;
+ logical ilall;
+ integer iside;
+ doublereal sbeta;
+ extern logical lsame_(char *, char *);
+ doublereal small;
+ logical compl;
+ doublereal anorm, bnorm;
+ logical compr;
+ extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *),
+ dlabad_(doublereal *, doublereal *);
+ logical ilbbad;
+ doublereal acoefa, bcoefa, acoeff;
+ doublecomplex bcoeff;
+ logical ilback;
+ doublereal ascale, bscale;
+ extern doublereal dlamch_(char *);
+ doublecomplex salpha;
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ logical ilcomp;
+ extern /* Double Complex */ VOID zladiv_(doublecomplex *, doublecomplex *,
+ doublecomplex *);
+ integer ihwmny;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+
+/* Purpose */
+/* ======= */
+
+/* ZTGEVC computes some or all of the right and/or left eigenvectors of */
+/* a pair of complex matrices (S,P), where S and P are upper triangular. */
+/* Matrix pairs of this type are produced by the generalized Schur */
+/* factorization of a complex matrix pair (A,B): */
+
+/* A = Q*S*Z**H, B = Q*P*Z**H */
+
+/* as computed by ZGGHRD + ZHGEQZ. */
+
+/* The right eigenvector x and the left eigenvector y of (S,P) */
+/* corresponding to an eigenvalue w are defined by: */
+
+/* S*x = w*P*x, (y**H)*S = w*(y**H)*P, */
+
+/* where y**H denotes the conjugate tranpose of y. */
+/* The eigenvalues are not input to this routine, but are computed */
+/* directly from the diagonal elements of S and P. */
+
+/* This routine returns the matrices X and/or Y of right and left */
+/* eigenvectors of (S,P), or the products Z*X and/or Q*Y, */
+/* where Z and Q are input matrices. */
+/* If Q and Z are the unitary factors from the generalized Schur */
+/* factorization of a matrix pair (A,B), then Z*X and Q*Y */
+/* are the matrices of right and left eigenvectors of (A,B). */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'R': compute right eigenvectors only; */
+/* = 'L': compute left eigenvectors only; */
+/* = 'B': compute both right and left eigenvectors. */
+
+/* HOWMNY (input) CHARACTER*1 */
+/* = 'A': compute all right and/or left eigenvectors; */
+/* = 'B': compute all right and/or left eigenvectors, */
+/* backtransformed by the matrices in VR and/or VL; */
+/* = 'S': compute selected right and/or left eigenvectors, */
+/* specified by the logical array SELECT. */
+
+/* SELECT (input) LOGICAL array, dimension (N) */
+/* If HOWMNY='S', SELECT specifies the eigenvectors to be */
+/* computed. The eigenvector corresponding to the j-th */
+/* eigenvalue is computed if SELECT(j) = .TRUE.. */
+/* Not referenced if HOWMNY = 'A' or 'B'. */
+
+/* N (input) INTEGER */
+/* The order of the matrices S and P. N >= 0. */
+
+/* S (input) COMPLEX*16 array, dimension (LDS,N) */
+/* The upper triangular matrix S from a generalized Schur */
+/* factorization, as computed by ZHGEQZ. */
+
+/* LDS (input) INTEGER */
+/* The leading dimension of array S. LDS >= max(1,N). */
+
+/* P (input) COMPLEX*16 array, dimension (LDP,N) */
+/* The upper triangular matrix P from a generalized Schur */
+/* factorization, as computed by ZHGEQZ. P must have real */
+/* diagonal elements. */
+
+/* LDP (input) INTEGER */
+/* The leading dimension of array P. LDP >= max(1,N). */
+
+/* VL (input/output) COMPLEX*16 array, dimension (LDVL,MM) */
+/* On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must */
+/* contain an N-by-N matrix Q (usually the unitary matrix Q */
+/* of left Schur vectors returned by ZHGEQZ). */
+/* On exit, if SIDE = 'L' or 'B', VL contains: */
+/* if HOWMNY = 'A', the matrix Y of left eigenvectors of (S,P); */
+/* if HOWMNY = 'B', the matrix Q*Y; */
+/* if HOWMNY = 'S', the left eigenvectors of (S,P) specified by */
+/* SELECT, stored consecutively in the columns of */
+/* VL, in the same order as their eigenvalues. */
+/* Not referenced if SIDE = 'R'. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of array VL. LDVL >= 1, and if */
+/* SIDE = 'L' or 'l' or 'B' or 'b', LDVL >= N. */
+
+/* VR (input/output) COMPLEX*16 array, dimension (LDVR,MM) */
+/* On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must */
+/* contain an N-by-N matrix Q (usually the unitary matrix Z */
+/* of right Schur vectors returned by ZHGEQZ). */
+/* On exit, if SIDE = 'R' or 'B', VR contains: */
+/* if HOWMNY = 'A', the matrix X of right eigenvectors of (S,P); */
+/* if HOWMNY = 'B', the matrix Z*X; */
+/* if HOWMNY = 'S', the right eigenvectors of (S,P) specified by */
+/* SELECT, stored consecutively in the columns of */
+/* VR, in the same order as their eigenvalues. */
+/* Not referenced if SIDE = 'L'. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. LDVR >= 1, and if */
+/* SIDE = 'R' or 'B', LDVR >= N. */
+
+/* MM (input) INTEGER */
+/* The number of columns in the arrays VL and/or VR. MM >= M. */
+
+/* M (output) INTEGER */
+/* The number of columns in the arrays VL and/or VR actually */
+/* used to store the eigenvectors. If HOWMNY = 'A' or 'B', M */
+/* is set to N. Each selected eigenvector occupies one column. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (2*N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit. */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and Test the input parameters */
+
+ /* Parameter adjustments */
+ --select;
+ s_dim1 = *lds;
+ s_offset = 1 + s_dim1;
+ s -= s_offset;
+ p_dim1 = *ldp;
+ p_offset = 1 + p_dim1;
+ p -= p_offset;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ if (lsame_(howmny, "A")) {
+ ihwmny = 1;
+ ilall = TRUE_;
+ ilback = FALSE_;
+ } else if (lsame_(howmny, "S")) {
+ ihwmny = 2;
+ ilall = FALSE_;
+ ilback = FALSE_;
+ } else if (lsame_(howmny, "B")) {
+ ihwmny = 3;
+ ilall = TRUE_;
+ ilback = TRUE_;
+ } else {
+ ihwmny = -1;
+ }
+
+ if (lsame_(side, "R")) {
+ iside = 1;
+ compl = FALSE_;
+ compr = TRUE_;
+ } else if (lsame_(side, "L")) {
+ iside = 2;
+ compl = TRUE_;
+ compr = FALSE_;
+ } else if (lsame_(side, "B")) {
+ iside = 3;
+ compl = TRUE_;
+ compr = TRUE_;
+ } else {
+ iside = -1;
+ }
+
+ *info = 0;
+ if (iside < 0) {
+ *info = -1;
+ } else if (ihwmny < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lds < max(1,*n)) {
+ *info = -6;
+ } else if (*ldp < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTGEVC", &i__1);
+ return 0;
+ }
+
+/* Count the number of eigenvectors */
+
+ if (! ilall) {
+ im = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (select[j]) {
+ ++im;
+ }
+/* L10: */
+ }
+ } else {
+ im = *n;
+ }
+
+/* Check diagonal of B */
+
+ ilbbad = FALSE_;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (d_imag(&p[j + j * p_dim1]) != 0.) {
+ ilbbad = TRUE_;
+ }
+/* L20: */
+ }
+
+ if (ilbbad) {
+ *info = -7;
+ } else if (compl && *ldvl < *n || *ldvl < 1) {
+ *info = -10;
+ } else if (compr && *ldvr < *n || *ldvr < 1) {
+ *info = -12;
+ } else if (*mm < im) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTGEVC", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *m = im;
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Machine Constants */
+
+ safmin = dlamch_("Safe minimum");
+ big = 1. / safmin;
+ dlabad_(&safmin, &big);
+ ulp = dlamch_("Epsilon") * dlamch_("Base");
+ small = safmin * *n / ulp;
+ big = 1. / small;
+ bignum = 1. / (safmin * *n);
+
+/* Compute the 1-norm of each column of the strictly upper triangular */
+/* part of A and B to check for possible overflow in the triangular */
+/* solver. */
+
+ i__1 = s_dim1 + 1;
+ anorm = (d__1 = s[i__1].r, abs(d__1)) + (d__2 = d_imag(&s[s_dim1 + 1]),
+ abs(d__2));
+ i__1 = p_dim1 + 1;
+ bnorm = (d__1 = p[i__1].r, abs(d__1)) + (d__2 = d_imag(&p[p_dim1 + 1]),
+ abs(d__2));
+ rwork[1] = 0.;
+ rwork[*n + 1] = 0.;
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ rwork[j] = 0.;
+ rwork[*n + j] = 0.;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * s_dim1;
+ rwork[j] += (d__1 = s[i__3].r, abs(d__1)) + (d__2 = d_imag(&s[i__
+ + j * s_dim1]), abs(d__2));
+ i__3 = i__ + j * p_dim1;
+ rwork[*n + j] += (d__1 = p[i__3].r, abs(d__1)) + (d__2 = d_imag(&
+ p[i__ + j * p_dim1]), abs(d__2));
+/* L30: */
+ }
+/* Computing MAX */
+ i__2 = j + j * s_dim1;
+ d__3 = anorm, d__4 = rwork[j] + ((d__1 = s[i__2].r, abs(d__1)) + (
+ d__2 = d_imag(&s[j + j * s_dim1]), abs(d__2)));
+ anorm = max(d__3,d__4);
+/* Computing MAX */
+ i__2 = j + j * p_dim1;
+ d__3 = bnorm, d__4 = rwork[*n + j] + ((d__1 = p[i__2].r, abs(d__1)) +
+ (d__2 = d_imag(&p[j + j * p_dim1]), abs(d__2)));
+ bnorm = max(d__3,d__4);
+/* L40: */
+ }
+
+ ascale = 1. / max(anorm,safmin);
+ bscale = 1. / max(bnorm,safmin);
+
+/* Left eigenvectors */
+
+ if (compl) {
+ ieig = 0;
+
+/* Main loop over eigenvalues */
+
+ i__1 = *n;
+ for (je = 1; je <= i__1; ++je) {
+ if (ilall) {
+ ilcomp = TRUE_;
+ } else {
+ ilcomp = select[je];
+ }
+ if (ilcomp) {
+ ++ieig;
+
+ i__2 = je + je * s_dim1;
+ i__3 = je + je * p_dim1;
+ if ((d__2 = s[i__2].r, abs(d__2)) + (d__3 = d_imag(&s[je + je
+ * s_dim1]), abs(d__3)) <= safmin && (d__1 = p[i__3].r,
+ abs(d__1)) <= safmin) {
+
+/* Singular matrix pencil -- return unit eigenvector */
+
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ i__3 = jr + ieig * vl_dim1;
+ vl[i__3].r = 0., vl[i__3].i = 0.;
+/* L50: */
+ }
+ i__2 = ieig + ieig * vl_dim1;
+ vl[i__2].r = 1., vl[i__2].i = 0.;
+ goto L140;
+ }
+
+/* Non-singular eigenvalue: */
+/* Compute coefficients a and b in */
+/* H */
+/* y ( a A - b B ) = 0 */
+
+/* Computing MAX */
+ i__2 = je + je * s_dim1;
+ i__3 = je + je * p_dim1;
+ d__4 = ((d__2 = s[i__2].r, abs(d__2)) + (d__3 = d_imag(&s[je
+ + je * s_dim1]), abs(d__3))) * ascale, d__5 = (d__1 =
+ p[i__3].r, abs(d__1)) * bscale, d__4 = max(d__4,d__5);
+ temp = 1. / max(d__4,safmin);
+ i__2 = je + je * s_dim1;
+ z__2.r = temp * s[i__2].r, z__2.i = temp * s[i__2].i;
+ z__1.r = ascale * z__2.r, z__1.i = ascale * z__2.i;
+ salpha.r = z__1.r, salpha.i = z__1.i;
+ i__2 = je + je * p_dim1;
+ sbeta = temp * p[i__2].r * bscale;
+ acoeff = sbeta * ascale;
+ z__1.r = bscale * salpha.r, z__1.i = bscale * salpha.i;
+ bcoeff.r = z__1.r, bcoeff.i = z__1.i;
+
+/* Scale to avoid underflow */
+
+ lsa = abs(sbeta) >= safmin && abs(acoeff) < small;
+ lsb = (d__1 = salpha.r, abs(d__1)) + (d__2 = d_imag(&salpha),
+ abs(d__2)) >= safmin && (d__3 = bcoeff.r, abs(d__3))
+ + (d__4 = d_imag(&bcoeff), abs(d__4)) < small;
+
+ scale = 1.;
+ if (lsa) {
+ scale = small / abs(sbeta) * min(anorm,big);
+ }
+ if (lsb) {
+/* Computing MAX */
+ d__3 = scale, d__4 = small / ((d__1 = salpha.r, abs(d__1))
+ + (d__2 = d_imag(&salpha), abs(d__2))) * min(
+ bnorm,big);
+ scale = max(d__3,d__4);
+ }
+ if (lsa || lsb) {
+/* Computing MIN */
+/* Computing MAX */
+ d__5 = 1., d__6 = abs(acoeff), d__5 = max(d__5,d__6),
+ d__6 = (d__1 = bcoeff.r, abs(d__1)) + (d__2 =
+ d_imag(&bcoeff), abs(d__2));
+ d__3 = scale, d__4 = 1. / (safmin * max(d__5,d__6));
+ scale = min(d__3,d__4);
+ if (lsa) {
+ acoeff = ascale * (scale * sbeta);
+ } else {
+ acoeff = scale * acoeff;
+ }
+ if (lsb) {
+ z__2.r = scale * salpha.r, z__2.i = scale * salpha.i;
+ z__1.r = bscale * z__2.r, z__1.i = bscale * z__2.i;
+ bcoeff.r = z__1.r, bcoeff.i = z__1.i;
+ } else {
+ z__1.r = scale * bcoeff.r, z__1.i = scale * bcoeff.i;
+ bcoeff.r = z__1.r, bcoeff.i = z__1.i;
+ }
+ }
+
+ acoefa = abs(acoeff);
+ bcoefa = (d__1 = bcoeff.r, abs(d__1)) + (d__2 = d_imag(&
+ bcoeff), abs(d__2));
+ xmax = 1.;
+ i__2 = *n;
+ for (jr = 1; jr <= i__2; ++jr) {
+ i__3 = jr;
+ work[i__3].r = 0., work[i__3].i = 0.;
+/* L60: */
+ }
+ i__2 = je;
+ work[i__2].r = 1., work[i__2].i = 0.;
+/* Computing MAX */
+ d__1 = ulp * acoefa * anorm, d__2 = ulp * bcoefa * bnorm,
+ d__1 = max(d__1,d__2);
+ dmin__ = max(d__1,safmin);
+
+/* H */
+/* Triangular solve of (a A - b B) y = 0 */
+
+/* H */
+/* (rowwise in (a A - b B) , or columnwise in a A - b B) */
+
+ i__2 = *n;
+ for (j = je + 1; j <= i__2; ++j) {
+
+/* Compute */
+/* j-1 */
+/* SUM = sum conjg( a*S(k,j) - b*P(k,j) )*x(k) */
+/* k=je */
+/* (Scale if necessary) */
+
+ temp = 1. / xmax;
+ if (acoefa * rwork[j] + bcoefa * rwork[*n + j] > bignum *
+ temp) {
+ i__3 = j - 1;
+ for (jr = je; jr <= i__3; ++jr) {
+ i__4 = jr;
+ i__5 = jr;
+ z__1.r = temp * work[i__5].r, z__1.i = temp *
+ work[i__5].i;
+ work[i__4].r = z__1.r, work[i__4].i = z__1.i;
+/* L70: */
+ }
+ xmax = 1.;
+ }
+ suma.r = 0., suma.i = 0.;
+ sumb.r = 0., sumb.i = 0.;
+
+ i__3 = j - 1;
+ for (jr = je; jr <= i__3; ++jr) {
+ d_cnjg(&z__3, &s[jr + j * s_dim1]);
+ i__4 = jr;
+ z__2.r = z__3.r * work[i__4].r - z__3.i * work[i__4]
+ .i, z__2.i = z__3.r * work[i__4].i + z__3.i *
+ work[i__4].r;
+ z__1.r = suma.r + z__2.r, z__1.i = suma.i + z__2.i;
+ suma.r = z__1.r, suma.i = z__1.i;
+ d_cnjg(&z__3, &p[jr + j * p_dim1]);
+ i__4 = jr;
+ z__2.r = z__3.r * work[i__4].r - z__3.i * work[i__4]
+ .i, z__2.i = z__3.r * work[i__4].i + z__3.i *
+ work[i__4].r;
+ z__1.r = sumb.r + z__2.r, z__1.i = sumb.i + z__2.i;
+ sumb.r = z__1.r, sumb.i = z__1.i;
+/* L80: */
+ }
+ z__2.r = acoeff * suma.r, z__2.i = acoeff * suma.i;
+ d_cnjg(&z__4, &bcoeff);
+ z__3.r = z__4.r * sumb.r - z__4.i * sumb.i, z__3.i =
+ z__4.r * sumb.i + z__4.i * sumb.r;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+
+/* Form x(j) = - SUM / conjg( a*S(j,j) - b*P(j,j) ) */
+
+/* with scaling and perturbation of the denominator */
+
+ i__3 = j + j * s_dim1;
+ z__3.r = acoeff * s[i__3].r, z__3.i = acoeff * s[i__3].i;
+ i__4 = j + j * p_dim1;
+ z__4.r = bcoeff.r * p[i__4].r - bcoeff.i * p[i__4].i,
+ z__4.i = bcoeff.r * p[i__4].i + bcoeff.i * p[i__4]
+ .r;
+ z__2.r = z__3.r - z__4.r, z__2.i = z__3.i - z__4.i;
+ d_cnjg(&z__1, &z__2);
+ d__.r = z__1.r, d__.i = z__1.i;
+ if ((d__1 = d__.r, abs(d__1)) + (d__2 = d_imag(&d__), abs(
+ d__2)) <= dmin__) {
+ z__1.r = dmin__, z__1.i = 0.;
+ d__.r = z__1.r, d__.i = z__1.i;
+ }
+
+ if ((d__1 = d__.r, abs(d__1)) + (d__2 = d_imag(&d__), abs(
+ d__2)) < 1.) {
+ if ((d__1 = sum.r, abs(d__1)) + (d__2 = d_imag(&sum),
+ abs(d__2)) >= bignum * ((d__3 = d__.r, abs(
+ d__3)) + (d__4 = d_imag(&d__), abs(d__4)))) {
+ temp = 1. / ((d__1 = sum.r, abs(d__1)) + (d__2 =
+ d_imag(&sum), abs(d__2)));
+ i__3 = j - 1;
+ for (jr = je; jr <= i__3; ++jr) {
+ i__4 = jr;
+ i__5 = jr;
+ z__1.r = temp * work[i__5].r, z__1.i = temp *
+ work[i__5].i;
+ work[i__4].r = z__1.r, work[i__4].i = z__1.i;
+/* L90: */
+ }
+ xmax = temp * xmax;
+ z__1.r = temp * sum.r, z__1.i = temp * sum.i;
+ sum.r = z__1.r, sum.i = z__1.i;
+ }
+ }
+ i__3 = j;
+ z__2.r = -sum.r, z__2.i = -sum.i;
+ zladiv_(&z__1, &z__2, &d__);
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+/* Computing MAX */
+ i__3 = j;
+ d__3 = xmax, d__4 = (d__1 = work[i__3].r, abs(d__1)) + (
+ d__2 = d_imag(&work[j]), abs(d__2));
+ xmax = max(d__3,d__4);
+/* L100: */
+ }
+
+/* Back transform eigenvector if HOWMNY='B'. */
+
+ if (ilback) {
+ i__2 = *n + 1 - je;
+ zgemv_("N", n, &i__2, &c_b2, &vl[je * vl_dim1 + 1], ldvl,
+ &work[je], &c__1, &c_b1, &work[*n + 1], &c__1);
+ isrc = 2;
+ ibeg = 1;
+ } else {
+ isrc = 1;
+ ibeg = je;
+ }
+
+/* Copy and scale eigenvector into column of VL */
+
+ xmax = 0.;
+ i__2 = *n;
+ for (jr = ibeg; jr <= i__2; ++jr) {
+/* Computing MAX */
+ i__3 = (isrc - 1) * *n + jr;
+ d__3 = xmax, d__4 = (d__1 = work[i__3].r, abs(d__1)) + (
+ d__2 = d_imag(&work[(isrc - 1) * *n + jr]), abs(
+ d__2));
+ xmax = max(d__3,d__4);
+/* L110: */
+ }
+
+ if (xmax > safmin) {
+ temp = 1. / xmax;
+ i__2 = *n;
+ for (jr = ibeg; jr <= i__2; ++jr) {
+ i__3 = jr + ieig * vl_dim1;
+ i__4 = (isrc - 1) * *n + jr;
+ z__1.r = temp * work[i__4].r, z__1.i = temp * work[
+ i__4].i;
+ vl[i__3].r = z__1.r, vl[i__3].i = z__1.i;
+/* L120: */
+ }
+ } else {
+ ibeg = *n + 1;
+ }
+
+ i__2 = ibeg - 1;
+ for (jr = 1; jr <= i__2; ++jr) {
+ i__3 = jr + ieig * vl_dim1;
+ vl[i__3].r = 0., vl[i__3].i = 0.;
+/* L130: */
+ }
+
+ }
+L140:
+ ;
+ }
+ }
+
+/* Right eigenvectors */
+
+ if (compr) {
+ ieig = im + 1;
+
+/* Main loop over eigenvalues */
+
+ for (je = *n; je >= 1; --je) {
+ if (ilall) {
+ ilcomp = TRUE_;
+ } else {
+ ilcomp = select[je];
+ }
+ if (ilcomp) {
+ --ieig;
+
+ i__1 = je + je * s_dim1;
+ i__2 = je + je * p_dim1;
+ if ((d__2 = s[i__1].r, abs(d__2)) + (d__3 = d_imag(&s[je + je
+ * s_dim1]), abs(d__3)) <= safmin && (d__1 = p[i__2].r,
+ abs(d__1)) <= safmin) {
+
+/* Singular matrix pencil -- return unit eigenvector */
+
+ i__1 = *n;
+ for (jr = 1; jr <= i__1; ++jr) {
+ i__2 = jr + ieig * vr_dim1;
+ vr[i__2].r = 0., vr[i__2].i = 0.;
+/* L150: */
+ }
+ i__1 = ieig + ieig * vr_dim1;
+ vr[i__1].r = 1., vr[i__1].i = 0.;
+ goto L250;
+ }
+
+/* Non-singular eigenvalue: */
+/* Compute coefficients a and b in */
+
+/* ( a A - b B ) x = 0 */
+
+/* Computing MAX */
+ i__1 = je + je * s_dim1;
+ i__2 = je + je * p_dim1;
+ d__4 = ((d__2 = s[i__1].r, abs(d__2)) + (d__3 = d_imag(&s[je
+ + je * s_dim1]), abs(d__3))) * ascale, d__5 = (d__1 =
+ p[i__2].r, abs(d__1)) * bscale, d__4 = max(d__4,d__5);
+ temp = 1. / max(d__4,safmin);
+ i__1 = je + je * s_dim1;
+ z__2.r = temp * s[i__1].r, z__2.i = temp * s[i__1].i;
+ z__1.r = ascale * z__2.r, z__1.i = ascale * z__2.i;
+ salpha.r = z__1.r, salpha.i = z__1.i;
+ i__1 = je + je * p_dim1;
+ sbeta = temp * p[i__1].r * bscale;
+ acoeff = sbeta * ascale;
+ z__1.r = bscale * salpha.r, z__1.i = bscale * salpha.i;
+ bcoeff.r = z__1.r, bcoeff.i = z__1.i;
+
+/* Scale to avoid underflow */
+
+ lsa = abs(sbeta) >= safmin && abs(acoeff) < small;
+ lsb = (d__1 = salpha.r, abs(d__1)) + (d__2 = d_imag(&salpha),
+ abs(d__2)) >= safmin && (d__3 = bcoeff.r, abs(d__3))
+ + (d__4 = d_imag(&bcoeff), abs(d__4)) < small;
+
+ scale = 1.;
+ if (lsa) {
+ scale = small / abs(sbeta) * min(anorm,big);
+ }
+ if (lsb) {
+/* Computing MAX */
+ d__3 = scale, d__4 = small / ((d__1 = salpha.r, abs(d__1))
+ + (d__2 = d_imag(&salpha), abs(d__2))) * min(
+ bnorm,big);
+ scale = max(d__3,d__4);
+ }
+ if (lsa || lsb) {
+/* Computing MIN */
+/* Computing MAX */
+ d__5 = 1., d__6 = abs(acoeff), d__5 = max(d__5,d__6),
+ d__6 = (d__1 = bcoeff.r, abs(d__1)) + (d__2 =
+ d_imag(&bcoeff), abs(d__2));
+ d__3 = scale, d__4 = 1. / (safmin * max(d__5,d__6));
+ scale = min(d__3,d__4);
+ if (lsa) {
+ acoeff = ascale * (scale * sbeta);
+ } else {
+ acoeff = scale * acoeff;
+ }
+ if (lsb) {
+ z__2.r = scale * salpha.r, z__2.i = scale * salpha.i;
+ z__1.r = bscale * z__2.r, z__1.i = bscale * z__2.i;
+ bcoeff.r = z__1.r, bcoeff.i = z__1.i;
+ } else {
+ z__1.r = scale * bcoeff.r, z__1.i = scale * bcoeff.i;
+ bcoeff.r = z__1.r, bcoeff.i = z__1.i;
+ }
+ }
+
+ acoefa = abs(acoeff);
+ bcoefa = (d__1 = bcoeff.r, abs(d__1)) + (d__2 = d_imag(&
+ bcoeff), abs(d__2));
+ xmax = 1.;
+ i__1 = *n;
+ for (jr = 1; jr <= i__1; ++jr) {
+ i__2 = jr;
+ work[i__2].r = 0., work[i__2].i = 0.;
+/* L160: */
+ }
+ i__1 = je;
+ work[i__1].r = 1., work[i__1].i = 0.;
+/* Computing MAX */
+ d__1 = ulp * acoefa * anorm, d__2 = ulp * bcoefa * bnorm,
+ d__1 = max(d__1,d__2);
+ dmin__ = max(d__1,safmin);
+
+/* Triangular solve of (a A - b B) x = 0 (columnwise) */
+
+/* WORK(1:j-1) contains sums w, */
+/* WORK(j+1:JE) contains x */
+
+ i__1 = je - 1;
+ for (jr = 1; jr <= i__1; ++jr) {
+ i__2 = jr;
+ i__3 = jr + je * s_dim1;
+ z__2.r = acoeff * s[i__3].r, z__2.i = acoeff * s[i__3].i;
+ i__4 = jr + je * p_dim1;
+ z__3.r = bcoeff.r * p[i__4].r - bcoeff.i * p[i__4].i,
+ z__3.i = bcoeff.r * p[i__4].i + bcoeff.i * p[i__4]
+ .r;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
+ work[i__2].r = z__1.r, work[i__2].i = z__1.i;
+/* L170: */
+ }
+ i__1 = je;
+ work[i__1].r = 1., work[i__1].i = 0.;
+
+ for (j = je - 1; j >= 1; --j) {
+
+/* Form x(j) := - w(j) / d */
+/* with scaling and perturbation of the denominator */
+
+ i__1 = j + j * s_dim1;
+ z__2.r = acoeff * s[i__1].r, z__2.i = acoeff * s[i__1].i;
+ i__2 = j + j * p_dim1;
+ z__3.r = bcoeff.r * p[i__2].r - bcoeff.i * p[i__2].i,
+ z__3.i = bcoeff.r * p[i__2].i + bcoeff.i * p[i__2]
+ .r;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
+ d__.r = z__1.r, d__.i = z__1.i;
+ if ((d__1 = d__.r, abs(d__1)) + (d__2 = d_imag(&d__), abs(
+ d__2)) <= dmin__) {
+ z__1.r = dmin__, z__1.i = 0.;
+ d__.r = z__1.r, d__.i = z__1.i;
+ }
+
+ if ((d__1 = d__.r, abs(d__1)) + (d__2 = d_imag(&d__), abs(
+ d__2)) < 1.) {
+ i__1 = j;
+ if ((d__1 = work[i__1].r, abs(d__1)) + (d__2 = d_imag(
+ &work[j]), abs(d__2)) >= bignum * ((d__3 =
+ d__.r, abs(d__3)) + (d__4 = d_imag(&d__), abs(
+ d__4)))) {
+ i__1 = j;
+ temp = 1. / ((d__1 = work[i__1].r, abs(d__1)) + (
+ d__2 = d_imag(&work[j]), abs(d__2)));
+ i__1 = je;
+ for (jr = 1; jr <= i__1; ++jr) {
+ i__2 = jr;
+ i__3 = jr;
+ z__1.r = temp * work[i__3].r, z__1.i = temp *
+ work[i__3].i;
+ work[i__2].r = z__1.r, work[i__2].i = z__1.i;
+/* L180: */
+ }
+ }
+ }
+
+ i__1 = j;
+ i__2 = j;
+ z__2.r = -work[i__2].r, z__2.i = -work[i__2].i;
+ zladiv_(&z__1, &z__2, &d__);
+ work[i__1].r = z__1.r, work[i__1].i = z__1.i;
+
+ if (j > 1) {
+
+/* w = w + x(j)*(a S(*,j) - b P(*,j) ) with scaling */
+
+ i__1 = j;
+ if ((d__1 = work[i__1].r, abs(d__1)) + (d__2 = d_imag(
+ &work[j]), abs(d__2)) > 1.) {
+ i__1 = j;
+ temp = 1. / ((d__1 = work[i__1].r, abs(d__1)) + (
+ d__2 = d_imag(&work[j]), abs(d__2)));
+ if (acoefa * rwork[j] + bcoefa * rwork[*n + j] >=
+ bignum * temp) {
+ i__1 = je;
+ for (jr = 1; jr <= i__1; ++jr) {
+ i__2 = jr;
+ i__3 = jr;
+ z__1.r = temp * work[i__3].r, z__1.i =
+ temp * work[i__3].i;
+ work[i__2].r = z__1.r, work[i__2].i =
+ z__1.i;
+/* L190: */
+ }
+ }
+ }
+
+ i__1 = j;
+ z__1.r = acoeff * work[i__1].r, z__1.i = acoeff *
+ work[i__1].i;
+ ca.r = z__1.r, ca.i = z__1.i;
+ i__1 = j;
+ z__1.r = bcoeff.r * work[i__1].r - bcoeff.i * work[
+ i__1].i, z__1.i = bcoeff.r * work[i__1].i +
+ bcoeff.i * work[i__1].r;
+ cb.r = z__1.r, cb.i = z__1.i;
+ i__1 = j - 1;
+ for (jr = 1; jr <= i__1; ++jr) {
+ i__2 = jr;
+ i__3 = jr;
+ i__4 = jr + j * s_dim1;
+ z__3.r = ca.r * s[i__4].r - ca.i * s[i__4].i,
+ z__3.i = ca.r * s[i__4].i + ca.i * s[i__4]
+ .r;
+ z__2.r = work[i__3].r + z__3.r, z__2.i = work[
+ i__3].i + z__3.i;
+ i__5 = jr + j * p_dim1;
+ z__4.r = cb.r * p[i__5].r - cb.i * p[i__5].i,
+ z__4.i = cb.r * p[i__5].i + cb.i * p[i__5]
+ .r;
+ z__1.r = z__2.r - z__4.r, z__1.i = z__2.i -
+ z__4.i;
+ work[i__2].r = z__1.r, work[i__2].i = z__1.i;
+/* L200: */
+ }
+ }
+/* L210: */
+ }
+
+/* Back transform eigenvector if HOWMNY='B'. */
+
+ if (ilback) {
+ zgemv_("N", n, &je, &c_b2, &vr[vr_offset], ldvr, &work[1],
+ &c__1, &c_b1, &work[*n + 1], &c__1);
+ isrc = 2;
+ iend = *n;
+ } else {
+ isrc = 1;
+ iend = je;
+ }
+
+/* Copy and scale eigenvector into column of VR */
+
+ xmax = 0.;
+ i__1 = iend;
+ for (jr = 1; jr <= i__1; ++jr) {
+/* Computing MAX */
+ i__2 = (isrc - 1) * *n + jr;
+ d__3 = xmax, d__4 = (d__1 = work[i__2].r, abs(d__1)) + (
+ d__2 = d_imag(&work[(isrc - 1) * *n + jr]), abs(
+ d__2));
+ xmax = max(d__3,d__4);
+/* L220: */
+ }
+
+ if (xmax > safmin) {
+ temp = 1. / xmax;
+ i__1 = iend;
+ for (jr = 1; jr <= i__1; ++jr) {
+ i__2 = jr + ieig * vr_dim1;
+ i__3 = (isrc - 1) * *n + jr;
+ z__1.r = temp * work[i__3].r, z__1.i = temp * work[
+ i__3].i;
+ vr[i__2].r = z__1.r, vr[i__2].i = z__1.i;
+/* L230: */
+ }
+ } else {
+ iend = 0;
+ }
+
+ i__1 = *n;
+ for (jr = iend + 1; jr <= i__1; ++jr) {
+ i__2 = jr + ieig * vr_dim1;
+ vr[i__2].r = 0., vr[i__2].i = 0.;
+/* L240: */
+ }
+
+ }
+L250:
+ ;
+ }
+ }
+
+ return 0;
+
+/* End of ZTGEVC */
+
+} /* ztgevc_ */
diff --git a/contrib/libs/clapack/ztgex2.c b/contrib/libs/clapack/ztgex2.c
new file mode 100644
index 0000000000..d85191c51d
--- /dev/null
+++ b/contrib/libs/clapack/ztgex2.c
@@ -0,0 +1,376 @@
+/* ztgex2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__2 = 2;
+static integer c__1 = 1;
+
+/* Subroutine */ int ztgex2_(logical *wantq, logical *wantz, integer *n,
+ doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb,
+ doublecomplex *q, integer *ldq, doublecomplex *z__, integer *ldz,
+ integer *j1, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1,
+ z_offset, i__1, i__2, i__3;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal), z_abs(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ doublecomplex f, g;
+ integer i__, m;
+ doublecomplex s[4] /* was [2][2] */, t[4] /* was [2][2] */;
+ doublereal cq, sa, sb, cz;
+ doublecomplex sq;
+ doublereal ss, ws;
+ doublecomplex sz;
+ doublereal eps, sum;
+ logical weak;
+ doublecomplex cdum, work[8];
+ extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublereal *, doublecomplex *);
+ doublereal scale;
+ extern doublereal dlamch_(char *);
+ logical dtrong;
+ doublereal thresh;
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *),
+ zlartg_(doublecomplex *, doublecomplex *, doublereal *,
+ doublecomplex *, doublecomplex *);
+ doublereal smlnum;
+ extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *);
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTGEX2 swaps adjacent diagonal 1 by 1 blocks (A11,B11) and (A22,B22) */
+/* in an upper triangular matrix pair (A, B) by an unitary equivalence */
+/* transformation. */
+
+/* (A, B) must be in generalized Schur canonical form, that is, A and */
+/* B are both upper triangular. */
+
+/* Optionally, the matrices Q and Z of generalized Schur vectors are */
+/* updated. */
+
+/* Q(in) * A(in) * Z(in)' = Q(out) * A(out) * Z(out)' */
+/* Q(in) * B(in) * Z(in)' = Q(out) * B(out) * Z(out)' */
+
+
+/* Arguments */
+/* ========= */
+
+/* WANTQ (input) LOGICAL */
+/* .TRUE. : update the left transformation matrix Q; */
+/* .FALSE.: do not update Q. */
+
+/* WANTZ (input) LOGICAL */
+/* .TRUE. : update the right transformation matrix Z; */
+/* .FALSE.: do not update Z. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) COMPLEX*16 arrays, dimensions (LDA,N) */
+/* On entry, the matrix A in the pair (A, B). */
+/* On exit, the updated matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX*16 arrays, dimensions (LDB,N) */
+/* On entry, the matrix B in the pair (A, B). */
+/* On exit, the updated matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* Q (input/output) COMPLEX*16 array, dimension (LDZ,N) */
+/* If WANTQ = .TRUE, on entry, the unitary matrix Q. On exit, */
+/* the updated matrix Q. */
+/* Not referenced if WANTQ = .FALSE.. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= 1; */
+/* If WANTQ = .TRUE., LDQ >= N. */
+
+/* Z (input/output) COMPLEX*16 array, dimension (LDZ,N) */
+/* If WANTZ = .TRUE, on entry, the unitary matrix Z. On exit, */
+/* the updated matrix Z. */
+/* Not referenced if WANTZ = .FALSE.. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1; */
+/* If WANTZ = .TRUE., LDZ >= N. */
+
+/* J1 (input) INTEGER */
+/* The index to the first block (A11, B11). */
+
+/* INFO (output) INTEGER */
+/* =0: Successful exit. */
+/* =1: The transformed matrix pair (A, B) would be too far */
+/* from generalized Schur form; the problem is ill- */
+/* conditioned. */
+
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* In the current code both weak and strong stability tests are */
+/* performed. The user can omit the strong stability test by changing */
+/* the internal logical parameter WANDS to .FALSE.. See ref. [2] for */
+/* details. */
+
+/* [1] B. Kagstrom; A Direct Method for Reordering Eigenvalues in the */
+/* Generalized Real Schur Form of a Regular Matrix Pair (A, B), in */
+/* M.S. Moonen et al (eds), Linear Algebra for Large Scale and */
+/* Real-Time Applications, Kluwer Academic Publ. 1993, pp 195-218. */
+
+/* [2] B. Kagstrom and P. Poromaa; Computing Eigenspaces with Specified */
+/* Eigenvalues of a Regular Matrix Pair (A, B) and Condition */
+/* Estimation: Theory, Algorithms and Software, Report UMINF-94.04, */
+/* Department of Computing Science, Umea University, S-901 87 Umea, */
+/* Sweden, 1994. Also as LAPACK Working Note 87. To appear in */
+/* Numerical Algorithms, 1996. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+
+ /* Function Body */
+ *info = 0;
+
+/* Quick return if possible */
+
+ if (*n <= 1) {
+ return 0;
+ }
+
+ m = 2;
+ weak = FALSE_;
+ dtrong = FALSE_;
+
+/* Make a local copy of selected block in (A, B) */
+
+ zlacpy_("Full", &m, &m, &a[*j1 + *j1 * a_dim1], lda, s, &c__2);
+ zlacpy_("Full", &m, &m, &b[*j1 + *j1 * b_dim1], ldb, t, &c__2);
+
+/* Compute the threshold for testing the acceptance of swapping. */
+
+ eps = dlamch_("P");
+ smlnum = dlamch_("S") / eps;
+ scale = 0.;
+ sum = 1.;
+ zlacpy_("Full", &m, &m, s, &c__2, work, &m);
+ zlacpy_("Full", &m, &m, t, &c__2, &work[m * m], &m);
+ i__1 = (m << 1) * m;
+ zlassq_(&i__1, work, &c__1, &scale, &sum);
+ sa = scale * sqrt(sum);
+/* Computing MAX */
+ d__1 = eps * 10. * sa;
+ thresh = max(d__1,smlnum);
+
+/* Compute unitary QL and RQ that swap 1-by-1 and 1-by-1 blocks */
+/* using Givens rotations and perform the swap tentatively. */
+
+ z__2.r = s[3].r * t[0].r - s[3].i * t[0].i, z__2.i = s[3].r * t[0].i + s[
+ 3].i * t[0].r;
+ z__3.r = t[3].r * s[0].r - t[3].i * s[0].i, z__3.i = t[3].r * s[0].i + t[
+ 3].i * s[0].r;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
+ f.r = z__1.r, f.i = z__1.i;
+ z__2.r = s[3].r * t[2].r - s[3].i * t[2].i, z__2.i = s[3].r * t[2].i + s[
+ 3].i * t[2].r;
+ z__3.r = t[3].r * s[2].r - t[3].i * s[2].i, z__3.i = t[3].r * s[2].i + t[
+ 3].i * s[2].r;
+ z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
+ g.r = z__1.r, g.i = z__1.i;
+ sa = z_abs(&s[3]);
+ sb = z_abs(&t[3]);
+ zlartg_(&g, &f, &cz, &sz, &cdum);
+ z__1.r = -sz.r, z__1.i = -sz.i;
+ sz.r = z__1.r, sz.i = z__1.i;
+ d_cnjg(&z__1, &sz);
+ zrot_(&c__2, s, &c__1, &s[2], &c__1, &cz, &z__1);
+ d_cnjg(&z__1, &sz);
+ zrot_(&c__2, t, &c__1, &t[2], &c__1, &cz, &z__1);
+ if (sa >= sb) {
+ zlartg_(s, &s[1], &cq, &sq, &cdum);
+ } else {
+ zlartg_(t, &t[1], &cq, &sq, &cdum);
+ }
+ zrot_(&c__2, s, &c__2, &s[1], &c__2, &cq, &sq);
+ zrot_(&c__2, t, &c__2, &t[1], &c__2, &cq, &sq);
+
+/* Weak stability test: |S21| + |T21| <= O(EPS F-norm((S, T))) */
+
+ ws = z_abs(&s[1]) + z_abs(&t[1]);
+ weak = ws <= thresh;
+ if (! weak) {
+ goto L20;
+ }
+
+ if (TRUE_) {
+
+/* Strong stability test: */
+/* F-norm((A-QL'*S*QR, B-QL'*T*QR)) <= O(EPS*F-norm((A, B))) */
+
+ zlacpy_("Full", &m, &m, s, &c__2, work, &m);
+ zlacpy_("Full", &m, &m, t, &c__2, &work[m * m], &m);
+ d_cnjg(&z__2, &sz);
+ z__1.r = -z__2.r, z__1.i = -z__2.i;
+ zrot_(&c__2, work, &c__1, &work[2], &c__1, &cz, &z__1);
+ d_cnjg(&z__2, &sz);
+ z__1.r = -z__2.r, z__1.i = -z__2.i;
+ zrot_(&c__2, &work[4], &c__1, &work[6], &c__1, &cz, &z__1);
+ z__1.r = -sq.r, z__1.i = -sq.i;
+ zrot_(&c__2, work, &c__2, &work[1], &c__2, &cq, &z__1);
+ z__1.r = -sq.r, z__1.i = -sq.i;
+ zrot_(&c__2, &work[4], &c__2, &work[5], &c__2, &cq, &z__1);
+ for (i__ = 1; i__ <= 2; ++i__) {
+ i__1 = i__ - 1;
+ i__2 = i__ - 1;
+ i__3 = *j1 + i__ - 1 + *j1 * a_dim1;
+ z__1.r = work[i__2].r - a[i__3].r, z__1.i = work[i__2].i - a[i__3]
+ .i;
+ work[i__1].r = z__1.r, work[i__1].i = z__1.i;
+ i__1 = i__ + 1;
+ i__2 = i__ + 1;
+ i__3 = *j1 + i__ - 1 + (*j1 + 1) * a_dim1;
+ z__1.r = work[i__2].r - a[i__3].r, z__1.i = work[i__2].i - a[i__3]
+ .i;
+ work[i__1].r = z__1.r, work[i__1].i = z__1.i;
+ i__1 = i__ + 3;
+ i__2 = i__ + 3;
+ i__3 = *j1 + i__ - 1 + *j1 * b_dim1;
+ z__1.r = work[i__2].r - b[i__3].r, z__1.i = work[i__2].i - b[i__3]
+ .i;
+ work[i__1].r = z__1.r, work[i__1].i = z__1.i;
+ i__1 = i__ + 5;
+ i__2 = i__ + 5;
+ i__3 = *j1 + i__ - 1 + (*j1 + 1) * b_dim1;
+ z__1.r = work[i__2].r - b[i__3].r, z__1.i = work[i__2].i - b[i__3]
+ .i;
+ work[i__1].r = z__1.r, work[i__1].i = z__1.i;
+/* L10: */
+ }
+ scale = 0.;
+ sum = 1.;
+ i__1 = (m << 1) * m;
+ zlassq_(&i__1, work, &c__1, &scale, &sum);
+ ss = scale * sqrt(sum);
+ dtrong = ss <= thresh;
+ if (! dtrong) {
+ goto L20;
+ }
+ }
+
+/* If the swap is accepted ("weakly" and "strongly"), apply the */
+/* equivalence transformations to the original matrix pair (A,B) */
+
+ i__1 = *j1 + 1;
+ d_cnjg(&z__1, &sz);
+ zrot_(&i__1, &a[*j1 * a_dim1 + 1], &c__1, &a[(*j1 + 1) * a_dim1 + 1], &
+ c__1, &cz, &z__1);
+ i__1 = *j1 + 1;
+ d_cnjg(&z__1, &sz);
+ zrot_(&i__1, &b[*j1 * b_dim1 + 1], &c__1, &b[(*j1 + 1) * b_dim1 + 1], &
+ c__1, &cz, &z__1);
+ i__1 = *n - *j1 + 1;
+ zrot_(&i__1, &a[*j1 + *j1 * a_dim1], lda, &a[*j1 + 1 + *j1 * a_dim1], lda,
+ &cq, &sq);
+ i__1 = *n - *j1 + 1;
+ zrot_(&i__1, &b[*j1 + *j1 * b_dim1], ldb, &b[*j1 + 1 + *j1 * b_dim1], ldb,
+ &cq, &sq);
+
+/* Set N1 by N2 (2,1) blocks to 0 */
+
+ i__1 = *j1 + 1 + *j1 * a_dim1;
+ a[i__1].r = 0., a[i__1].i = 0.;
+ i__1 = *j1 + 1 + *j1 * b_dim1;
+ b[i__1].r = 0., b[i__1].i = 0.;
+
+/* Accumulate transformations into Q and Z if requested. */
+
+ if (*wantz) {
+ d_cnjg(&z__1, &sz);
+ zrot_(n, &z__[*j1 * z_dim1 + 1], &c__1, &z__[(*j1 + 1) * z_dim1 + 1],
+ &c__1, &cz, &z__1);
+ }
+ if (*wantq) {
+ d_cnjg(&z__1, &sq);
+ zrot_(n, &q[*j1 * q_dim1 + 1], &c__1, &q[(*j1 + 1) * q_dim1 + 1], &
+ c__1, &cq, &z__1);
+ }
+
+/* Exit with INFO = 0 if swap was successfully performed. */
+
+ return 0;
+
+/* Exit with INFO = 1 if swap was rejected. */
+
+L20:
+ *info = 1;
+ return 0;
+
+/* End of ZTGEX2 */
+
+} /* ztgex2_ */
diff --git a/contrib/libs/clapack/ztgexc.c b/contrib/libs/clapack/ztgexc.c
new file mode 100644
index 0000000000..b6c5e1d28d
--- /dev/null
+++ b/contrib/libs/clapack/ztgexc.c
@@ -0,0 +1,248 @@
+/* ztgexc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ztgexc_(logical *wantq, logical *wantz, integer *n,
+ doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb,
+ doublecomplex *q, integer *ldq, doublecomplex *z__, integer *ldz,
+ integer *ifst, integer *ilst, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1,
+ z_offset, i__1;
+
+ /* Local variables */
+ integer here;
+ extern /* Subroutine */ int ztgex2_(logical *, logical *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *,
+ integer *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTGEXC reorders the generalized Schur decomposition of a complex */
+/* matrix pair (A,B), using an unitary equivalence transformation */
+/* (A, B) := Q * (A, B) * Z', so that the diagonal block of (A, B) with */
+/* row index IFST is moved to row ILST. */
+
+/* (A, B) must be in generalized Schur canonical form, that is, A and */
+/* B are both upper triangular. */
+
+/* Optionally, the matrices Q and Z of generalized Schur vectors are */
+/* updated. */
+
+/* Q(in) * A(in) * Z(in)' = Q(out) * A(out) * Z(out)' */
+/* Q(in) * B(in) * Z(in)' = Q(out) * B(out) * Z(out)' */
+
+/* Arguments */
+/* ========= */
+
+/* WANTQ (input) LOGICAL */
+/* .TRUE. : update the left transformation matrix Q; */
+/* .FALSE.: do not update Q. */
+
+/* WANTZ (input) LOGICAL */
+/* .TRUE. : update the right transformation matrix Z; */
+/* .FALSE.: do not update Z. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the upper triangular matrix A in the pair (A, B). */
+/* On exit, the updated matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,N) */
+/* On entry, the upper triangular matrix B in the pair (A, B). */
+/* On exit, the updated matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* Q (input/output) COMPLEX*16 array, dimension (LDZ,N) */
+/* On entry, if WANTQ = .TRUE., the unitary matrix Q. */
+/* On exit, the updated matrix Q. */
+/* If WANTQ = .FALSE., Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= 1; */
+/* If WANTQ = .TRUE., LDQ >= N. */
+
+/* Z (input/output) COMPLEX*16 array, dimension (LDZ,N) */
+/* On entry, if WANTZ = .TRUE., the unitary matrix Z. */
+/* On exit, the updated matrix Z. */
+/* If WANTZ = .FALSE., Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1; */
+/* If WANTZ = .TRUE., LDZ >= N. */
+
+/* IFST (input) INTEGER */
+/* ILST (input/output) INTEGER */
+/* Specify the reordering of the diagonal blocks of (A, B). */
+/* The block with row index IFST is moved to row ILST, by a */
+/* sequence of swapping between adjacent blocks. */
+
+/* INFO (output) INTEGER */
+/* =0: Successful exit. */
+/* <0: if INFO = -i, the i-th argument had an illegal value. */
+/* =1: The transformed matrix pair (A, B) would be too far */
+/* from generalized Schur form; the problem is ill- */
+/* conditioned. (A, B) may have been partially reordered, */
+/* and ILST points to the first row of the current */
+/* position of the block being moved. */
+
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* [1] B. Kagstrom; A Direct Method for Reordering Eigenvalues in the */
+/* Generalized Real Schur Form of a Regular Matrix Pair (A, B), in */
+/* M.S. Moonen et al (eds), Linear Algebra for Large Scale and */
+/* Real-Time Applications, Kluwer Academic Publ. 1993, pp 195-218. */
+
+/* [2] B. Kagstrom and P. Poromaa; Computing Eigenspaces with Specified */
+/* Eigenvalues of a Regular Matrix Pair (A, B) and Condition */
+/* Estimation: Theory, Algorithms and Software, Report */
+/* UMINF - 94.04, Department of Computing Science, Umea University, */
+/* S-901 87 Umea, Sweden, 1994. Also as LAPACK Working Note 87. */
+/* To appear in Numerical Algorithms, 1996. */
+
+/* [3] B. Kagstrom and P. Poromaa, LAPACK-Style Algorithms and Software */
+/* for Solving the Generalized Sylvester Equation and Estimating the */
+/* Separation between Regular Matrix Pairs, Report UMINF - 93.23, */
+/* Department of Computing Science, Umea University, S-901 87 Umea, */
+/* Sweden, December 1993, Revised April 1994, Also as LAPACK working */
+/* Note 75. To appear in ACM Trans. on Math. Software, Vol 22, No 1, */
+/* 1996. */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test input arguments. */
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ } else if (*ldq < 1 || *wantq && *ldq < max(1,*n)) {
+ *info = -9;
+ } else if (*ldz < 1 || *wantz && *ldz < max(1,*n)) {
+ *info = -11;
+ } else if (*ifst < 1 || *ifst > *n) {
+ *info = -12;
+ } else if (*ilst < 1 || *ilst > *n) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTGEXC", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 1) {
+ return 0;
+ }
+ if (*ifst == *ilst) {
+ return 0;
+ }
+
+ if (*ifst < *ilst) {
+
+ here = *ifst;
+
+L10:
+
+/* Swap with next one below */
+
+ ztgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset], ldb, &q[
+ q_offset], ldq, &z__[z_offset], ldz, &here, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ ++here;
+ if (here < *ilst) {
+ goto L10;
+ }
+ --here;
+ } else {
+ here = *ifst - 1;
+
+L20:
+
+/* Swap with next one above */
+
+ ztgex2_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset], ldb, &q[
+ q_offset], ldq, &z__[z_offset], ldz, &here, info);
+ if (*info != 0) {
+ *ilst = here;
+ return 0;
+ }
+ --here;
+ if (here >= *ilst) {
+ goto L20;
+ }
+ ++here;
+ }
+ *ilst = here;
+ return 0;
+
+/* End of ZTGEXC */
+
+} /* ztgexc_ */
diff --git a/contrib/libs/clapack/ztgsen.c b/contrib/libs/clapack/ztgsen.c
new file mode 100644
index 0000000000..cbff260d85
--- /dev/null
+++ b/contrib/libs/clapack/ztgsen.c
@@ -0,0 +1,766 @@
+/* ztgsen.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ztgsen_(integer *ijob, logical *wantq, logical *wantz,
+ logical *select, integer *n, doublecomplex *a, integer *lda,
+ doublecomplex *b, integer *ldb, doublecomplex *alpha, doublecomplex *
+ beta, doublecomplex *q, integer *ldq, doublecomplex *z__, integer *
+ ldz, integer *m, doublereal *pl, doublereal *pr, doublereal *dif,
+ doublecomplex *work, integer *lwork, integer *iwork, integer *liwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1,
+ z_offset, i__1, i__2, i__3;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), z_abs(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, k, n1, n2, ks, mn2, ijb, kase, ierr;
+ doublereal dsum;
+ logical swap;
+ doublecomplex temp1, temp2;
+ integer isave[3];
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *);
+ logical wantd;
+ integer lwmin;
+ logical wantp;
+ extern /* Subroutine */ int zlacn2_(integer *, doublecomplex *,
+ doublecomplex *, doublereal *, integer *, integer *);
+ logical wantd1, wantd2;
+ extern doublereal dlamch_(char *);
+ doublereal dscale, rdscal, safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ integer liwmin;
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *),
+ ztgexc_(logical *, logical *, integer *, doublecomplex *, integer
+ *, doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, integer *, integer *, integer *),
+ zlassq_(integer *, doublecomplex *, integer *, doublereal *,
+ doublereal *);
+ logical lquery;
+ extern /* Subroutine */ int ztgsyl_(char *, integer *, integer *, integer
+ *, doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *, doublecomplex *, integer *, integer *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* January 2007 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTGSEN reorders the generalized Schur decomposition of a complex */
+/* matrix pair (A, B) (in terms of an unitary equivalence trans- */
+/* formation Q' * (A, B) * Z), so that a selected cluster of eigenvalues */
+/* appears in the leading diagonal blocks of the pair (A,B). The leading */
+/* columns of Q and Z form unitary bases of the corresponding left and */
+/* right eigenspaces (deflating subspaces). (A, B) must be in */
+/* generalized Schur canonical form, that is, A and B are both upper */
+/* triangular. */
+
+/* ZTGSEN also computes the generalized eigenvalues */
+
+/* w(j)= ALPHA(j) / BETA(j) */
+
+/* of the reordered matrix pair (A, B). */
+
+/* Optionally, the routine computes estimates of reciprocal condition */
+/* numbers for eigenvalues and eigenspaces. These are Difu[(A11,B11), */
+/* (A22,B22)] and Difl[(A11,B11), (A22,B22)], i.e. the separation(s) */
+/* between the matrix pairs (A11, B11) and (A22,B22) that correspond to */
+/* the selected cluster and the eigenvalues outside the cluster, resp., */
+/* and norms of "projections" onto left and right eigenspaces w.r.t. */
+/* the selected cluster in the (1,1)-block. */
+
+
+/* Arguments */
+/* ========= */
+
+/* IJOB (input) integer */
+/* Specifies whether condition numbers are required for the */
+/* cluster of eigenvalues (PL and PR) or the deflating subspaces */
+/* (Difu and Difl): */
+/* =0: Only reorder w.r.t. SELECT. No extras. */
+/* =1: Reciprocal of norms of "projections" onto left and right */
+/* eigenspaces w.r.t. the selected cluster (PL and PR). */
+/* =2: Upper bounds on Difu and Difl. F-norm-based estimate */
+/* (DIF(1:2)). */
+/* =3: Estimate of Difu and Difl. 1-norm-based estimate */
+/* (DIF(1:2)). */
+/* About 5 times as expensive as IJOB = 2. */
+/* =4: Compute PL, PR and DIF (i.e. 0, 1 and 2 above): Economic */
+/* version to get it all. */
+/* =5: Compute PL, PR and DIF (i.e. 0, 1 and 3 above) */
+
+/* WANTQ (input) LOGICAL */
+/* .TRUE. : update the left transformation matrix Q; */
+/* .FALSE.: do not update Q. */
+
+/* WANTZ (input) LOGICAL */
+/* .TRUE. : update the right transformation matrix Z; */
+/* .FALSE.: do not update Z. */
+
+/* SELECT (input) LOGICAL array, dimension (N) */
+/* SELECT specifies the eigenvalues in the selected cluster. To */
+/* select an eigenvalue w(j), SELECT(j) must be set to */
+/* .TRUE.. */
+
+/* N (input) INTEGER */
+/* The order of the matrices A and B. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension(LDA,N) */
+/* On entry, the upper triangular matrix A, in generalized */
+/* Schur canonical form. */
+/* On exit, A is overwritten by the reordered matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX*16 array, dimension(LDB,N) */
+/* On entry, the upper triangular matrix B, in generalized */
+/* Schur canonical form. */
+/* On exit, B is overwritten by the reordered matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* ALPHA (output) COMPLEX*16 array, dimension (N) */
+/* BETA (output) COMPLEX*16 array, dimension (N) */
+/* The diagonal elements of A and B, respectively, */
+/* when the pair (A,B) has been reduced to generalized Schur */
+/* form. ALPHA(i)/BETA(i) i=1,...,N are the generalized */
+/* eigenvalues. */
+
+/* Q (input/output) COMPLEX*16 array, dimension (LDQ,N) */
+/* On entry, if WANTQ = .TRUE., Q is an N-by-N matrix. */
+/* On exit, Q has been postmultiplied by the left unitary */
+/* transformation matrix which reorder (A, B); The leading M */
+/* columns of Q form orthonormal bases for the specified pair of */
+/* left eigenspaces (deflating subspaces). */
+/* If WANTQ = .FALSE., Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= 1. */
+/* If WANTQ = .TRUE., LDQ >= N. */
+
+/* Z (input/output) COMPLEX*16 array, dimension (LDZ,N) */
+/* On entry, if WANTZ = .TRUE., Z is an N-by-N matrix. */
+/* On exit, Z has been postmultiplied by the left unitary */
+/* transformation matrix which reorder (A, B); The leading M */
+/* columns of Z form orthonormal bases for the specified pair of */
+/* left eigenspaces (deflating subspaces). */
+/* If WANTZ = .FALSE., Z is not referenced. */
+
+/* LDZ (input) INTEGER */
+/* The leading dimension of the array Z. LDZ >= 1. */
+/* If WANTZ = .TRUE., LDZ >= N. */
+
+/* M (output) INTEGER */
+/* The dimension of the specified pair of left and right */
+/* eigenspaces, (deflating subspaces) 0 <= M <= N. */
+
+/* PL (output) DOUBLE PRECISION */
+/* PR (output) DOUBLE PRECISION */
+/* If IJOB = 1, 4 or 5, PL, PR are lower bounds on the */
+/* reciprocal of the norm of "projections" onto left and right */
+/* eigenspace with respect to the selected cluster. */
+/* 0 < PL, PR <= 1. */
+/* If M = 0 or M = N, PL = PR = 1. */
+/* If IJOB = 0, 2 or 3 PL, PR are not referenced. */
+
+/* DIF (output) DOUBLE PRECISION array, dimension (2). */
+/* If IJOB >= 2, DIF(1:2) store the estimates of Difu and Difl. */
+/* If IJOB = 2 or 4, DIF(1:2) are F-norm-based upper bounds on */
+/* Difu and Difl. If IJOB = 3 or 5, DIF(1:2) are 1-norm-based */
+/* estimates of Difu and Difl, computed using reversed */
+/* communication with ZLACN2. */
+/* If M = 0 or N, DIF(1:2) = F-norm([A, B]). */
+/* If IJOB = 0 or 1, DIF is not referenced. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* IF IJOB = 0, WORK is not referenced. Otherwise, */
+/* on exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= 1 */
+/* If IJOB = 1, 2 or 4, LWORK >= 2*M*(N-M) */
+/* If IJOB = 3 or 5, LWORK >= 4*M*(N-M) */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) */
+/* IF IJOB = 0, IWORK is not referenced. Otherwise, */
+/* on exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. */
+
+/* LIWORK (input) INTEGER */
+/* The dimension of the array IWORK. LIWORK >= 1. */
+/* If IJOB = 1, 2 or 4, LIWORK >= N+2; */
+/* If IJOB = 3 or 5, LIWORK >= MAX(N+2, 2*M*(N-M)); */
+
+/* If LIWORK = -1, then a workspace query is assumed; the */
+/* routine only calculates the optimal size of the IWORK array, */
+/* returns this value as the first entry of the IWORK array, and */
+/* no error message related to LIWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* =0: Successful exit. */
+/* <0: If INFO = -i, the i-th argument had an illegal value. */
+/* =1: Reordering of (A, B) failed because the transformed */
+/* matrix pair (A, B) would be too far from generalized */
+/* Schur form; the problem is very ill-conditioned. */
+/* (A, B) may have been partially reordered. */
+/* If requested, 0 is returned in DIF(*), PL and PR. */
+
+
+/* Further Details */
+/* =============== */
+
+/* ZTGSEN first collects the selected eigenvalues by computing unitary */
+/* U and W that move them to the top left corner of (A, B). In other */
+/* words, the selected eigenvalues are the eigenvalues of (A11, B11) in */
+
+/* U'*(A, B)*W = (A11 A12) (B11 B12) n1 */
+/* ( 0 A22),( 0 B22) n2 */
+/* n1 n2 n1 n2 */
+
+/* where N = n1+n2 and U' means the conjugate transpose of U. The first */
+/* n1 columns of U and W span the specified pair of left and right */
+/* eigenspaces (deflating subspaces) of (A, B). */
+
+/* If (A, B) has been obtained from the generalized real Schur */
+/* decomposition of a matrix pair (C, D) = Q*(A, B)*Z', then the */
+/* reordered generalized Schur form of (C, D) is given by */
+
+/* (C, D) = (Q*U)*(U'*(A, B)*W)*(Z*W)', */
+
+/* and the first n1 columns of Q*U and Z*W span the corresponding */
+/* deflating subspaces of (C, D) (Q and Z store Q*U and Z*W, resp.). */
+
+/* Note that if the selected eigenvalue is sufficiently ill-conditioned, */
+/* then its value may differ significantly from its value before */
+/* reordering. */
+
+/* The reciprocal condition numbers of the left and right eigenspaces */
+/* spanned by the first n1 columns of U and W (or Q*U and Z*W) may */
+/* be returned in DIF(1:2), corresponding to Difu and Difl, resp. */
+
+/* The Difu and Difl are defined as: */
+
+/* Difu[(A11, B11), (A22, B22)] = sigma-min( Zu ) */
+/* and */
+/* Difl[(A11, B11), (A22, B22)] = Difu[(A22, B22), (A11, B11)], */
+
+/* where sigma-min(Zu) is the smallest singular value of the */
+/* (2*n1*n2)-by-(2*n1*n2) matrix */
+
+/* Zu = [ kron(In2, A11) -kron(A22', In1) ] */
+/* [ kron(In2, B11) -kron(B22', In1) ]. */
+
+/* Here, Inx is the identity matrix of size nx and A22' is the */
+/* transpose of A22. kron(X, Y) is the Kronecker product between */
+/* the matrices X and Y. */
+
+/* When DIF(2) is small, small changes in (A, B) can cause large changes */
+/* in the deflating subspace. An approximate (asymptotic) bound on the */
+/* maximum angular error in the computed deflating subspaces is */
+
+/* EPS * norm((A, B)) / DIF(2), */
+
+/* where EPS is the machine precision. */
+
+/* The reciprocal norm of the projectors on the left and right */
+/* eigenspaces associated with (A11, B11) may be returned in PL and PR. */
+/* They are computed as follows. First we compute L and R so that */
+/* P*(A, B)*Q is block diagonal, where */
+
+/* P = ( I -L ) n1 Q = ( I R ) n1 */
+/* ( 0 I ) n2 and ( 0 I ) n2 */
+/* n1 n2 n1 n2 */
+
+/* and (L, R) is the solution to the generalized Sylvester equation */
+
+/* A11*R - L*A22 = -A12 */
+/* B11*R - L*B22 = -B12 */
+
+/* Then PL = (F-norm(L)**2+1)**(-1/2) and PR = (F-norm(R)**2+1)**(-1/2). */
+/* An approximate (asymptotic) bound on the average absolute error of */
+/* the selected eigenvalues is */
+
+/* EPS * norm((A, B)) / PL. */
+
+/* There are also global error bounds which valid for perturbations up */
+/* to a certain restriction: A lower bound (x) on the smallest */
+/* F-norm(E,F) for which an eigenvalue of (A11, B11) may move and */
+/* coalesce with an eigenvalue of (A22, B22) under perturbation (E,F), */
+/* (i.e. (A + E, B + F), is */
+
+/* x = min(Difu,Difl)/((1/(PL*PL)+1/(PR*PR))**(1/2)+2*max(1/PL,1/PR)). */
+
+/* An approximate bound on x can be computed from DIF(1:2), PL and PR. */
+
+/* If y = ( F-norm(E,F) / x) <= 1, the angles between the perturbed */
+/* (L', R') and unperturbed (L, R) left and right deflating subspaces */
+/* associated with the selected cluster in the (1,1)-blocks can be */
+/* bounded as */
+
+/* max-angle(L, L') <= arctan( y * PL / (1 - y * (1 - PL * PL)**(1/2)) */
+/* max-angle(R, R') <= arctan( y * PR / (1 - y * (1 - PR * PR)**(1/2)) */
+
+/* See LAPACK User's Guide section 4.11 or the following references */
+/* for more information. */
+
+/* Note that if the default method for computing the Frobenius-norm- */
+/* based estimate DIF is not wanted (see ZLATDF), then the parameter */
+/* IDIFJB (see below) should be changed from 3 to 4 (routine ZLATDF */
+/* (IJOB = 2 will be used)). See ZTGSYL for more details. */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* References */
+/* ========== */
+
+/* [1] B. Kagstrom; A Direct Method for Reordering Eigenvalues in the */
+/* Generalized Real Schur Form of a Regular Matrix Pair (A, B), in */
+/* M.S. Moonen et al (eds), Linear Algebra for Large Scale and */
+/* Real-Time Applications, Kluwer Academic Publ. 1993, pp 195-218. */
+
+/* [2] B. Kagstrom and P. Poromaa; Computing Eigenspaces with Specified */
+/* Eigenvalues of a Regular Matrix Pair (A, B) and Condition */
+/* Estimation: Theory, Algorithms and Software, Report */
+/* UMINF - 94.04, Department of Computing Science, Umea University, */
+/* S-901 87 Umea, Sweden, 1994. Also as LAPACK Working Note 87. */
+/* To appear in Numerical Algorithms, 1996. */
+
+/* [3] B. Kagstrom and P. Poromaa, LAPACK-Style Algorithms and Software */
+/* for Solving the Generalized Sylvester Equation and Estimating the */
+/* Separation between Regular Matrix Pairs, Report UMINF - 93.23, */
+/* Department of Computing Science, Umea University, S-901 87 Umea, */
+/* Sweden, December 1993, Revised April 1994, Also as LAPACK working */
+/* Note 75. To appear in ACM Trans. on Math. Software, Vol 22, No 1, */
+/* 1996. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters */
+
+ /* Parameter adjustments */
+ --select;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alpha;
+ --beta;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --dif;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1 || *liwork == -1;
+
+ if (*ijob < 0 || *ijob > 5) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldq < 1 || *wantq && *ldq < *n) {
+ *info = -13;
+ } else if (*ldz < 1 || *wantz && *ldz < *n) {
+ *info = -15;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTGSEN", &i__1);
+ return 0;
+ }
+
+ ierr = 0;
+
+ wantp = *ijob == 1 || *ijob >= 4;
+ wantd1 = *ijob == 2 || *ijob == 4;
+ wantd2 = *ijob == 3 || *ijob == 5;
+ wantd = wantd1 || wantd2;
+
+/* Set M to the dimension of the specified pair of deflating */
+/* subspaces. */
+
+ *m = 0;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k;
+ i__3 = k + k * a_dim1;
+ alpha[i__2].r = a[i__3].r, alpha[i__2].i = a[i__3].i;
+ i__2 = k;
+ i__3 = k + k * b_dim1;
+ beta[i__2].r = b[i__3].r, beta[i__2].i = b[i__3].i;
+ if (k < *n) {
+ if (select[k]) {
+ ++(*m);
+ }
+ } else {
+ if (select[*n]) {
+ ++(*m);
+ }
+ }
+/* L10: */
+ }
+
+ if (*ijob == 1 || *ijob == 2 || *ijob == 4) {
+/* Computing MAX */
+ i__1 = 1, i__2 = (*m << 1) * (*n - *m);
+ lwmin = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = 1, i__2 = *n + 2;
+ liwmin = max(i__1,i__2);
+ } else if (*ijob == 3 || *ijob == 5) {
+/* Computing MAX */
+ i__1 = 1, i__2 = (*m << 2) * (*n - *m);
+ lwmin = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = 1, i__2 = (*m << 1) * (*n - *m), i__1 = max(i__1,i__2), i__2 =
+ *n + 2;
+ liwmin = max(i__1,i__2);
+ } else {
+ lwmin = 1;
+ liwmin = 1;
+ }
+
+ work[1].r = (doublereal) lwmin, work[1].i = 0.;
+ iwork[1] = liwmin;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -21;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -23;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTGSEN", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*m == *n || *m == 0) {
+ if (wantp) {
+ *pl = 1.;
+ *pr = 1.;
+ }
+ if (wantd) {
+ dscale = 0.;
+ dsum = 1.;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ zlassq_(n, &a[i__ * a_dim1 + 1], &c__1, &dscale, &dsum);
+ zlassq_(n, &b[i__ * b_dim1 + 1], &c__1, &dscale, &dsum);
+/* L20: */
+ }
+ dif[1] = dscale * sqrt(dsum);
+ dif[2] = dif[1];
+ }
+ goto L70;
+ }
+
+/* Get machine constant */
+
+ safmin = dlamch_("S");
+
+/* Collect the selected blocks at the top-left corner of (A, B). */
+
+ ks = 0;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ swap = select[k];
+ if (swap) {
+ ++ks;
+
+/* Swap the K-th block to position KS. Compute unitary Q */
+/* and Z that will swap adjacent diagonal blocks in (A, B). */
+
+ if (k != ks) {
+ ztgexc_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset], ldb,
+ &q[q_offset], ldq, &z__[z_offset], ldz, &k, &ks, &
+ ierr);
+ }
+
+ if (ierr > 0) {
+
+/* Swap is rejected: exit. */
+
+ *info = 1;
+ if (wantp) {
+ *pl = 0.;
+ *pr = 0.;
+ }
+ if (wantd) {
+ dif[1] = 0.;
+ dif[2] = 0.;
+ }
+ goto L70;
+ }
+ }
+/* L30: */
+ }
+ if (wantp) {
+
+/* Solve generalized Sylvester equation for R and L: */
+/* A11 * R - L * A22 = A12 */
+/* B11 * R - L * B22 = B12 */
+
+ n1 = *m;
+ n2 = *n - *m;
+ i__ = n1 + 1;
+ zlacpy_("Full", &n1, &n2, &a[i__ * a_dim1 + 1], lda, &work[1], &n1);
+ zlacpy_("Full", &n1, &n2, &b[i__ * b_dim1 + 1], ldb, &work[n1 * n2 +
+ 1], &n1);
+ ijb = 0;
+ i__1 = *lwork - (n1 << 1) * n2;
+ ztgsyl_("N", &ijb, &n1, &n2, &a[a_offset], lda, &a[i__ + i__ * a_dim1]
+, lda, &work[1], &n1, &b[b_offset], ldb, &b[i__ + i__ *
+ b_dim1], ldb, &work[n1 * n2 + 1], &n1, &dscale, &dif[1], &
+ work[(n1 * n2 << 1) + 1], &i__1, &iwork[1], &ierr);
+
+/* Estimate the reciprocal of norms of "projections" onto */
+/* left and right eigenspaces */
+
+ rdscal = 0.;
+ dsum = 1.;
+ i__1 = n1 * n2;
+ zlassq_(&i__1, &work[1], &c__1, &rdscal, &dsum);
+ *pl = rdscal * sqrt(dsum);
+ if (*pl == 0.) {
+ *pl = 1.;
+ } else {
+ *pl = dscale / (sqrt(dscale * dscale / *pl + *pl) * sqrt(*pl));
+ }
+ rdscal = 0.;
+ dsum = 1.;
+ i__1 = n1 * n2;
+ zlassq_(&i__1, &work[n1 * n2 + 1], &c__1, &rdscal, &dsum);
+ *pr = rdscal * sqrt(dsum);
+ if (*pr == 0.) {
+ *pr = 1.;
+ } else {
+ *pr = dscale / (sqrt(dscale * dscale / *pr + *pr) * sqrt(*pr));
+ }
+ }
+ if (wantd) {
+
+/* Compute estimates Difu and Difl. */
+
+ if (wantd1) {
+ n1 = *m;
+ n2 = *n - *m;
+ i__ = n1 + 1;
+ ijb = 3;
+
+/* Frobenius norm-based Difu estimate. */
+
+ i__1 = *lwork - (n1 << 1) * n2;
+ ztgsyl_("N", &ijb, &n1, &n2, &a[a_offset], lda, &a[i__ + i__ *
+ a_dim1], lda, &work[1], &n1, &b[b_offset], ldb, &b[i__ +
+ i__ * b_dim1], ldb, &work[n1 * n2 + 1], &n1, &dscale, &
+ dif[1], &work[(n1 * n2 << 1) + 1], &i__1, &iwork[1], &
+ ierr);
+
+/* Frobenius norm-based Difl estimate. */
+
+ i__1 = *lwork - (n1 << 1) * n2;
+ ztgsyl_("N", &ijb, &n2, &n1, &a[i__ + i__ * a_dim1], lda, &a[
+ a_offset], lda, &work[1], &n2, &b[i__ + i__ * b_dim1],
+ ldb, &b[b_offset], ldb, &work[n1 * n2 + 1], &n2, &dscale,
+ &dif[2], &work[(n1 * n2 << 1) + 1], &i__1, &iwork[1], &
+ ierr);
+ } else {
+
+/* Compute 1-norm-based estimates of Difu and Difl using */
+/* reversed communication with ZLACN2. In each step a */
+/* generalized Sylvester equation or a transposed variant */
+/* is solved. */
+
+ kase = 0;
+ n1 = *m;
+ n2 = *n - *m;
+ i__ = n1 + 1;
+ ijb = 0;
+ mn2 = (n1 << 1) * n2;
+
+/* 1-norm-based estimate of Difu. */
+
+L40:
+ zlacn2_(&mn2, &work[mn2 + 1], &work[1], &dif[1], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Solve generalized Sylvester equation */
+
+ i__1 = *lwork - (n1 << 1) * n2;
+ ztgsyl_("N", &ijb, &n1, &n2, &a[a_offset], lda, &a[i__ +
+ i__ * a_dim1], lda, &work[1], &n1, &b[b_offset],
+ ldb, &b[i__ + i__ * b_dim1], ldb, &work[n1 * n2 +
+ 1], &n1, &dscale, &dif[1], &work[(n1 * n2 << 1) +
+ 1], &i__1, &iwork[1], &ierr);
+ } else {
+
+/* Solve the transposed variant. */
+
+ i__1 = *lwork - (n1 << 1) * n2;
+ ztgsyl_("C", &ijb, &n1, &n2, &a[a_offset], lda, &a[i__ +
+ i__ * a_dim1], lda, &work[1], &n1, &b[b_offset],
+ ldb, &b[i__ + i__ * b_dim1], ldb, &work[n1 * n2 +
+ 1], &n1, &dscale, &dif[1], &work[(n1 * n2 << 1) +
+ 1], &i__1, &iwork[1], &ierr);
+ }
+ goto L40;
+ }
+ dif[1] = dscale / dif[1];
+
+/* 1-norm-based estimate of Difl. */
+
+L50:
+ zlacn2_(&mn2, &work[mn2 + 1], &work[1], &dif[2], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Solve generalized Sylvester equation */
+
+ i__1 = *lwork - (n1 << 1) * n2;
+ ztgsyl_("N", &ijb, &n2, &n1, &a[i__ + i__ * a_dim1], lda,
+ &a[a_offset], lda, &work[1], &n2, &b[i__ + i__ *
+ b_dim1], ldb, &b[b_offset], ldb, &work[n1 * n2 +
+ 1], &n2, &dscale, &dif[2], &work[(n1 * n2 << 1) +
+ 1], &i__1, &iwork[1], &ierr);
+ } else {
+
+/* Solve the transposed variant. */
+
+ i__1 = *lwork - (n1 << 1) * n2;
+ ztgsyl_("C", &ijb, &n2, &n1, &a[i__ + i__ * a_dim1], lda,
+ &a[a_offset], lda, &work[1], &n2, &b[b_offset],
+ ldb, &b[i__ + i__ * b_dim1], ldb, &work[n1 * n2 +
+ 1], &n2, &dscale, &dif[2], &work[(n1 * n2 << 1) +
+ 1], &i__1, &iwork[1], &ierr);
+ }
+ goto L50;
+ }
+ dif[2] = dscale / dif[2];
+ }
+ }
+
+/* If B(K,K) is complex, make it real and positive (normalization */
+/* of the generalized Schur form) and Store the generalized */
+/* eigenvalues of reordered pair (A, B) */
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ dscale = z_abs(&b[k + k * b_dim1]);
+ if (dscale > safmin) {
+ i__2 = k + k * b_dim1;
+ z__2.r = b[i__2].r / dscale, z__2.i = b[i__2].i / dscale;
+ d_cnjg(&z__1, &z__2);
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__2 = k + k * b_dim1;
+ z__1.r = b[i__2].r / dscale, z__1.i = b[i__2].i / dscale;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ i__2 = k + k * b_dim1;
+ b[i__2].r = dscale, b[i__2].i = 0.;
+ i__2 = *n - k;
+ zscal_(&i__2, &temp1, &b[k + (k + 1) * b_dim1], ldb);
+ i__2 = *n - k + 1;
+ zscal_(&i__2, &temp1, &a[k + k * a_dim1], lda);
+ if (*wantq) {
+ zscal_(n, &temp2, &q[k * q_dim1 + 1], &c__1);
+ }
+ } else {
+ i__2 = k + k * b_dim1;
+ b[i__2].r = 0., b[i__2].i = 0.;
+ }
+
+ i__2 = k;
+ i__3 = k + k * a_dim1;
+ alpha[i__2].r = a[i__3].r, alpha[i__2].i = a[i__3].i;
+ i__2 = k;
+ i__3 = k + k * b_dim1;
+ beta[i__2].r = b[i__3].r, beta[i__2].i = b[i__3].i;
+
+/* L60: */
+ }
+
+L70:
+
+ work[1].r = (doublereal) lwmin, work[1].i = 0.;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of ZTGSEN */
+
+} /* ztgsen_ */
diff --git a/contrib/libs/clapack/ztgsja.c b/contrib/libs/clapack/ztgsja.c
new file mode 100644
index 0000000000..d3643028af
--- /dev/null
+++ b/contrib/libs/clapack/ztgsja.c
@@ -0,0 +1,672 @@
+/* ztgsja.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+static doublereal c_b39 = -1.;
+static doublereal c_b42 = 1.;
+
+/* Subroutine */ int ztgsja_(char *jobu, char *jobv, char *jobq, integer *m,
+ integer *p, integer *n, integer *k, integer *l, doublecomplex *a,
+ integer *lda, doublecomplex *b, integer *ldb, doublereal *tola,
+ doublereal *tolb, doublereal *alpha, doublereal *beta, doublecomplex *
+ u, integer *ldu, doublecomplex *v, integer *ldv, doublecomplex *q,
+ integer *ldq, doublecomplex *work, integer *ncycle, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, u_dim1,
+ u_offset, v_dim1, v_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j;
+ doublereal a1, b1, a3, b3;
+ doublecomplex a2, b2;
+ doublereal csq, csu, csv;
+ doublecomplex snq;
+ doublereal rwk;
+ doublecomplex snu, snv;
+ extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublereal *, doublecomplex *);
+ doublereal gamma;
+ extern logical lsame_(char *, char *);
+ logical initq, initu, initv, wantq, upper;
+ doublereal error, ssmin;
+ logical wantu, wantv;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zlags2_(logical *, doublereal *,
+ doublecomplex *, doublereal *, doublereal *, doublecomplex *,
+ doublereal *, doublereal *, doublecomplex *, doublereal *,
+ doublecomplex *, doublereal *, doublecomplex *);
+ integer kcycle;
+ extern /* Subroutine */ int dlartg_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *), xerbla_(char *,
+ integer *), zdscal_(integer *, doublereal *,
+ doublecomplex *, integer *), zlapll_(integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *, doublereal *), zlaset_(
+ char *, integer *, integer *, doublecomplex *, doublecomplex *,
+ doublecomplex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTGSJA computes the generalized singular value decomposition (GSVD) */
+/* of two complex upper triangular (or trapezoidal) matrices A and B. */
+
+/* On entry, it is assumed that matrices A and B have the following */
+/* forms, which may be obtained by the preprocessing subroutine ZGGSVP */
+/* from a general M-by-N matrix A and P-by-N matrix B: */
+
+/* N-K-L K L */
+/* A = K ( 0 A12 A13 ) if M-K-L >= 0; */
+/* L ( 0 0 A23 ) */
+/* M-K-L ( 0 0 0 ) */
+
+/* N-K-L K L */
+/* A = K ( 0 A12 A13 ) if M-K-L < 0; */
+/* M-K ( 0 0 A23 ) */
+
+/* N-K-L K L */
+/* B = L ( 0 0 B13 ) */
+/* P-L ( 0 0 0 ) */
+
+/* where the K-by-K matrix A12 and L-by-L matrix B13 are nonsingular */
+/* upper triangular; A23 is L-by-L upper triangular if M-K-L >= 0, */
+/* otherwise A23 is (M-K)-by-L upper trapezoidal. */
+
+/* On exit, */
+
+/* U'*A*Q = D1*( 0 R ), V'*B*Q = D2*( 0 R ), */
+
+/* where U, V and Q are unitary matrices, Z' denotes the conjugate */
+/* transpose of Z, R is a nonsingular upper triangular matrix, and D1 */
+/* and D2 are ``diagonal'' matrices, which are of the following */
+/* structures: */
+
+/* If M-K-L >= 0, */
+
+/* K L */
+/* D1 = K ( I 0 ) */
+/* L ( 0 C ) */
+/* M-K-L ( 0 0 ) */
+
+/* K L */
+/* D2 = L ( 0 S ) */
+/* P-L ( 0 0 ) */
+
+/* N-K-L K L */
+/* ( 0 R ) = K ( 0 R11 R12 ) K */
+/* L ( 0 0 R22 ) L */
+
+/* where */
+
+/* C = diag( ALPHA(K+1), ... , ALPHA(K+L) ), */
+/* S = diag( BETA(K+1), ... , BETA(K+L) ), */
+/* C**2 + S**2 = I. */
+
+/* R is stored in A(1:K+L,N-K-L+1:N) on exit. */
+
+/* If M-K-L < 0, */
+
+/* K M-K K+L-M */
+/* D1 = K ( I 0 0 ) */
+/* M-K ( 0 C 0 ) */
+
+/* K M-K K+L-M */
+/* D2 = M-K ( 0 S 0 ) */
+/* K+L-M ( 0 0 I ) */
+/* P-L ( 0 0 0 ) */
+
+/* N-K-L K M-K K+L-M */
+/* ( 0 R ) = K ( 0 R11 R12 R13 ) */
+/* M-K ( 0 0 R22 R23 ) */
+/* K+L-M ( 0 0 0 R33 ) */
+
+/* where */
+/* C = diag( ALPHA(K+1), ... , ALPHA(M) ), */
+/* S = diag( BETA(K+1), ... , BETA(M) ), */
+/* C**2 + S**2 = I. */
+
+/* R = ( R11 R12 R13 ) is stored in A(1:M, N-K-L+1:N) and R33 is stored */
+/* ( 0 R22 R23 ) */
+/* in B(M-K+1:L,N+M-K-L+1:N) on exit. */
+
+/* The computation of the unitary transformation matrices U, V or Q */
+/* is optional. These matrices may either be formed explicitly, or they */
+/* may be postmultiplied into input matrices U1, V1, or Q1. */
+
+/* Arguments */
+/* ========= */
+
+/* JOBU (input) CHARACTER*1 */
+/* = 'U': U must contain a unitary matrix U1 on entry, and */
+/* the product U1*U is returned; */
+/* = 'I': U is initialized to the unit matrix, and the */
+/* unitary matrix U is returned; */
+/* = 'N': U is not computed. */
+
+/* JOBV (input) CHARACTER*1 */
+/* = 'V': V must contain a unitary matrix V1 on entry, and */
+/* the product V1*V is returned; */
+/* = 'I': V is initialized to the unit matrix, and the */
+/* unitary matrix V is returned; */
+/* = 'N': V is not computed. */
+
+/* JOBQ (input) CHARACTER*1 */
+/* = 'Q': Q must contain a unitary matrix Q1 on entry, and */
+/* the product Q1*Q is returned; */
+/* = 'I': Q is initialized to the unit matrix, and the */
+/* unitary matrix Q is returned; */
+/* = 'N': Q is not computed. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* P (input) INTEGER */
+/* The number of rows of the matrix B. P >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrices A and B. N >= 0. */
+
+/* K (input) INTEGER */
+/* L (input) INTEGER */
+/* K and L specify the subblocks in the input matrices A and B: */
+/* A23 = A(K+1:MIN(K+L,M),N-L+1:N) and B13 = B(1:L,,N-L+1:N) */
+/* of A and B, whose GSVD is going to be computed by ZTGSJA. */
+/* See Further details. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the M-by-N matrix A. */
+/* On exit, A(N-K+1:N,1:MIN(K+L,M) ) contains the triangular */
+/* matrix R or part of R. See Purpose for details. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,N) */
+/* On entry, the P-by-N matrix B. */
+/* On exit, if necessary, B(M-K+1:L,N+M-K-L+1:N) contains */
+/* a part of R. See Purpose for details. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,P). */
+
+/* TOLA (input) DOUBLE PRECISION */
+/* TOLB (input) DOUBLE PRECISION */
+/* TOLA and TOLB are the convergence criteria for the Jacobi- */
+/* Kogbetliantz iteration procedure. Generally, they are the */
+/* same as used in the preprocessing step, say */
+/* TOLA = MAX(M,N)*norm(A)*MAZHEPS, */
+/* TOLB = MAX(P,N)*norm(B)*MAZHEPS. */
+
+/* ALPHA (output) DOUBLE PRECISION array, dimension (N) */
+/* BETA (output) DOUBLE PRECISION array, dimension (N) */
+/* On exit, ALPHA and BETA contain the generalized singular */
+/* value pairs of A and B; */
+/* ALPHA(1:K) = 1, */
+/* BETA(1:K) = 0, */
+/* and if M-K-L >= 0, */
+/* ALPHA(K+1:K+L) = diag(C), */
+/* BETA(K+1:K+L) = diag(S), */
+/* or if M-K-L < 0, */
+/* ALPHA(K+1:M)= C, ALPHA(M+1:K+L)= 0 */
+/* BETA(K+1:M) = S, BETA(M+1:K+L) = 1. */
+/* Furthermore, if K+L < N, */
+/* ALPHA(K+L+1:N) = 0 */
+/* BETA(K+L+1:N) = 0. */
+
+/* U (input/output) COMPLEX*16 array, dimension (LDU,M) */
+/* On entry, if JOBU = 'U', U must contain a matrix U1 (usually */
+/* the unitary matrix returned by ZGGSVP). */
+/* On exit, */
+/* if JOBU = 'I', U contains the unitary matrix U; */
+/* if JOBU = 'U', U contains the product U1*U. */
+/* If JOBU = 'N', U is not referenced. */
+
+/* LDU (input) INTEGER */
+/* The leading dimension of the array U. LDU >= max(1,M) if */
+/* JOBU = 'U'; LDU >= 1 otherwise. */
+
+/* V (input/output) COMPLEX*16 array, dimension (LDV,P) */
+/* On entry, if JOBV = 'V', V must contain a matrix V1 (usually */
+/* the unitary matrix returned by ZGGSVP). */
+/* On exit, */
+/* if JOBV = 'I', V contains the unitary matrix V; */
+/* if JOBV = 'V', V contains the product V1*V. */
+/* If JOBV = 'N', V is not referenced. */
+
+/* LDV (input) INTEGER */
+/* The leading dimension of the array V. LDV >= max(1,P) if */
+/* JOBV = 'V'; LDV >= 1 otherwise. */
+
+/* Q (input/output) COMPLEX*16 array, dimension (LDQ,N) */
+/* On entry, if JOBQ = 'Q', Q must contain a matrix Q1 (usually */
+/* the unitary matrix returned by ZGGSVP). */
+/* On exit, */
+/* if JOBQ = 'I', Q contains the unitary matrix Q; */
+/* if JOBQ = 'Q', Q contains the product Q1*Q. */
+/* If JOBQ = 'N', Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N) if */
+/* JOBQ = 'Q'; LDQ >= 1 otherwise. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* NCYCLE (output) INTEGER */
+/* The number of cycles required for convergence. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value. */
+/* = 1: the procedure does not converge after MAXIT cycles. */
+
+/* Internal Parameters */
+/* =================== */
+
+/* MAXIT INTEGER */
+/* MAXIT specifies the total loops that the iterative procedure */
+/* may take. If after MAXIT cycles, the routine fails to */
+/* converge, we return INFO = 1. */
+
+/* Further Details */
+/* =============== */
+
+/* ZTGSJA essentially uses a variant of Kogbetliantz algorithm to reduce */
+/* min(L,M-K)-by-L triangular (or trapezoidal) matrix A23 and L-by-L */
+/* matrix B13 to the form: */
+
+/* U1'*A13*Q1 = C1*R1; V1'*B13*Q1 = S1*R1, */
+
+/* where U1, V1 and Q1 are unitary matrix, and Z' is the conjugate */
+/* transpose of Z. C1 and S1 are diagonal matrices satisfying */
+
+/* C1**2 + S1**2 = I, */
+
+/* and R1 is an L-by-L nonsingular upper triangular matrix. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --alpha;
+ --beta;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --work;
+
+ /* Function Body */
+ initu = lsame_(jobu, "I");
+ wantu = initu || lsame_(jobu, "U");
+
+ initv = lsame_(jobv, "I");
+ wantv = initv || lsame_(jobv, "V");
+
+ initq = lsame_(jobq, "I");
+ wantq = initq || lsame_(jobq, "Q");
+
+ *info = 0;
+ if (! (initu || wantu || lsame_(jobu, "N"))) {
+ *info = -1;
+ } else if (! (initv || wantv || lsame_(jobv, "N")))
+ {
+ *info = -2;
+ } else if (! (initq || wantq || lsame_(jobq, "N")))
+ {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*p < 0) {
+ *info = -5;
+ } else if (*n < 0) {
+ *info = -6;
+ } else if (*lda < max(1,*m)) {
+ *info = -10;
+ } else if (*ldb < max(1,*p)) {
+ *info = -12;
+ } else if (*ldu < 1 || wantu && *ldu < *m) {
+ *info = -18;
+ } else if (*ldv < 1 || wantv && *ldv < *p) {
+ *info = -20;
+ } else if (*ldq < 1 || wantq && *ldq < *n) {
+ *info = -22;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTGSJA", &i__1);
+ return 0;
+ }
+
+/* Initialize U, V and Q, if necessary */
+
+ if (initu) {
+ zlaset_("Full", m, m, &c_b1, &c_b2, &u[u_offset], ldu);
+ }
+ if (initv) {
+ zlaset_("Full", p, p, &c_b1, &c_b2, &v[v_offset], ldv);
+ }
+ if (initq) {
+ zlaset_("Full", n, n, &c_b1, &c_b2, &q[q_offset], ldq);
+ }
+
+/* Loop until convergence */
+
+ upper = FALSE_;
+ for (kcycle = 1; kcycle <= 40; ++kcycle) {
+
+ upper = ! upper;
+
+ i__1 = *l - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *l;
+ for (j = i__ + 1; j <= i__2; ++j) {
+
+ a1 = 0.;
+ a2.r = 0., a2.i = 0.;
+ a3 = 0.;
+ if (*k + i__ <= *m) {
+ i__3 = *k + i__ + (*n - *l + i__) * a_dim1;
+ a1 = a[i__3].r;
+ }
+ if (*k + j <= *m) {
+ i__3 = *k + j + (*n - *l + j) * a_dim1;
+ a3 = a[i__3].r;
+ }
+
+ i__3 = i__ + (*n - *l + i__) * b_dim1;
+ b1 = b[i__3].r;
+ i__3 = j + (*n - *l + j) * b_dim1;
+ b3 = b[i__3].r;
+
+ if (upper) {
+ if (*k + i__ <= *m) {
+ i__3 = *k + i__ + (*n - *l + j) * a_dim1;
+ a2.r = a[i__3].r, a2.i = a[i__3].i;
+ }
+ i__3 = i__ + (*n - *l + j) * b_dim1;
+ b2.r = b[i__3].r, b2.i = b[i__3].i;
+ } else {
+ if (*k + j <= *m) {
+ i__3 = *k + j + (*n - *l + i__) * a_dim1;
+ a2.r = a[i__3].r, a2.i = a[i__3].i;
+ }
+ i__3 = j + (*n - *l + i__) * b_dim1;
+ b2.r = b[i__3].r, b2.i = b[i__3].i;
+ }
+
+ zlags2_(&upper, &a1, &a2, &a3, &b1, &b2, &b3, &csu, &snu, &
+ csv, &snv, &csq, &snq);
+
+/* Update (K+I)-th and (K+J)-th rows of matrix A: U'*A */
+
+ if (*k + j <= *m) {
+ d_cnjg(&z__1, &snu);
+ zrot_(l, &a[*k + j + (*n - *l + 1) * a_dim1], lda, &a[*k
+ + i__ + (*n - *l + 1) * a_dim1], lda, &csu, &z__1)
+ ;
+ }
+
+/* Update I-th and J-th rows of matrix B: V'*B */
+
+ d_cnjg(&z__1, &snv);
+ zrot_(l, &b[j + (*n - *l + 1) * b_dim1], ldb, &b[i__ + (*n - *
+ l + 1) * b_dim1], ldb, &csv, &z__1);
+
+/* Update (N-L+I)-th and (N-L+J)-th columns of matrices */
+/* A and B: A*Q and B*Q */
+
+/* Computing MIN */
+ i__4 = *k + *l;
+ i__3 = min(i__4,*m);
+ zrot_(&i__3, &a[(*n - *l + j) * a_dim1 + 1], &c__1, &a[(*n - *
+ l + i__) * a_dim1 + 1], &c__1, &csq, &snq);
+
+ zrot_(l, &b[(*n - *l + j) * b_dim1 + 1], &c__1, &b[(*n - *l +
+ i__) * b_dim1 + 1], &c__1, &csq, &snq);
+
+ if (upper) {
+ if (*k + i__ <= *m) {
+ i__3 = *k + i__ + (*n - *l + j) * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+ }
+ i__3 = i__ + (*n - *l + j) * b_dim1;
+ b[i__3].r = 0., b[i__3].i = 0.;
+ } else {
+ if (*k + j <= *m) {
+ i__3 = *k + j + (*n - *l + i__) * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+ }
+ i__3 = j + (*n - *l + i__) * b_dim1;
+ b[i__3].r = 0., b[i__3].i = 0.;
+ }
+
+/* Ensure that the diagonal elements of A and B are real. */
+
+ if (*k + i__ <= *m) {
+ i__3 = *k + i__ + (*n - *l + i__) * a_dim1;
+ i__4 = *k + i__ + (*n - *l + i__) * a_dim1;
+ d__1 = a[i__4].r;
+ a[i__3].r = d__1, a[i__3].i = 0.;
+ }
+ if (*k + j <= *m) {
+ i__3 = *k + j + (*n - *l + j) * a_dim1;
+ i__4 = *k + j + (*n - *l + j) * a_dim1;
+ d__1 = a[i__4].r;
+ a[i__3].r = d__1, a[i__3].i = 0.;
+ }
+ i__3 = i__ + (*n - *l + i__) * b_dim1;
+ i__4 = i__ + (*n - *l + i__) * b_dim1;
+ d__1 = b[i__4].r;
+ b[i__3].r = d__1, b[i__3].i = 0.;
+ i__3 = j + (*n - *l + j) * b_dim1;
+ i__4 = j + (*n - *l + j) * b_dim1;
+ d__1 = b[i__4].r;
+ b[i__3].r = d__1, b[i__3].i = 0.;
+
+/* Update unitary matrices U, V, Q, if desired. */
+
+ if (wantu && *k + j <= *m) {
+ zrot_(m, &u[(*k + j) * u_dim1 + 1], &c__1, &u[(*k + i__) *
+ u_dim1 + 1], &c__1, &csu, &snu);
+ }
+
+ if (wantv) {
+ zrot_(p, &v[j * v_dim1 + 1], &c__1, &v[i__ * v_dim1 + 1],
+ &c__1, &csv, &snv);
+ }
+
+ if (wantq) {
+ zrot_(n, &q[(*n - *l + j) * q_dim1 + 1], &c__1, &q[(*n - *
+ l + i__) * q_dim1 + 1], &c__1, &csq, &snq);
+ }
+
+/* L10: */
+ }
+/* L20: */
+ }
+
+ if (! upper) {
+
+/* The matrices A13 and B13 were lower triangular at the start */
+/* of the cycle, and are now upper triangular. */
+
+/* Convergence test: test the parallelism of the corresponding */
+/* rows of A and B. */
+
+ error = 0.;
+/* Computing MIN */
+ i__2 = *l, i__3 = *m - *k;
+ i__1 = min(i__2,i__3);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *l - i__ + 1;
+ zcopy_(&i__2, &a[*k + i__ + (*n - *l + i__) * a_dim1], lda, &
+ work[1], &c__1);
+ i__2 = *l - i__ + 1;
+ zcopy_(&i__2, &b[i__ + (*n - *l + i__) * b_dim1], ldb, &work[*
+ l + 1], &c__1);
+ i__2 = *l - i__ + 1;
+ zlapll_(&i__2, &work[1], &c__1, &work[*l + 1], &c__1, &ssmin);
+ error = max(error,ssmin);
+/* L30: */
+ }
+
+ if (abs(error) <= min(*tola,*tolb)) {
+ goto L50;
+ }
+ }
+
+/* End of cycle loop */
+
+/* L40: */
+ }
+
+/* The algorithm has not converged after MAXIT cycles. */
+
+ *info = 1;
+ goto L100;
+
+L50:
+
+/* If ERROR <= MIN(TOLA,TOLB), then the algorithm has converged. */
+/* Compute the generalized singular value pairs (ALPHA, BETA), and */
+/* set the triangular matrix R to array A. */
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ alpha[i__] = 1.;
+ beta[i__] = 0.;
+/* L60: */
+ }
+
+/* Computing MIN */
+ i__2 = *l, i__3 = *m - *k;
+ i__1 = min(i__2,i__3);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+ i__2 = *k + i__ + (*n - *l + i__) * a_dim1;
+ a1 = a[i__2].r;
+ i__2 = i__ + (*n - *l + i__) * b_dim1;
+ b1 = b[i__2].r;
+
+ if (a1 != 0.) {
+ gamma = b1 / a1;
+
+ if (gamma < 0.) {
+ i__2 = *l - i__ + 1;
+ zdscal_(&i__2, &c_b39, &b[i__ + (*n - *l + i__) * b_dim1],
+ ldb);
+ if (wantv) {
+ zdscal_(p, &c_b39, &v[i__ * v_dim1 + 1], &c__1);
+ }
+ }
+
+ d__1 = abs(gamma);
+ dlartg_(&d__1, &c_b42, &beta[*k + i__], &alpha[*k + i__], &rwk);
+
+ if (alpha[*k + i__] >= beta[*k + i__]) {
+ i__2 = *l - i__ + 1;
+ d__1 = 1. / alpha[*k + i__];
+ zdscal_(&i__2, &d__1, &a[*k + i__ + (*n - *l + i__) * a_dim1],
+ lda);
+ } else {
+ i__2 = *l - i__ + 1;
+ d__1 = 1. / beta[*k + i__];
+ zdscal_(&i__2, &d__1, &b[i__ + (*n - *l + i__) * b_dim1], ldb)
+ ;
+ i__2 = *l - i__ + 1;
+ zcopy_(&i__2, &b[i__ + (*n - *l + i__) * b_dim1], ldb, &a[*k
+ + i__ + (*n - *l + i__) * a_dim1], lda);
+ }
+
+ } else {
+ alpha[*k + i__] = 0.;
+ beta[*k + i__] = 1.;
+ i__2 = *l - i__ + 1;
+ zcopy_(&i__2, &b[i__ + (*n - *l + i__) * b_dim1], ldb, &a[*k +
+ i__ + (*n - *l + i__) * a_dim1], lda);
+ }
+/* L70: */
+ }
+
+/* Post-assignment */
+
+ i__1 = *k + *l;
+ for (i__ = *m + 1; i__ <= i__1; ++i__) {
+ alpha[i__] = 0.;
+ beta[i__] = 1.;
+/* L80: */
+ }
+
+ if (*k + *l < *n) {
+ i__1 = *n;
+ for (i__ = *k + *l + 1; i__ <= i__1; ++i__) {
+ alpha[i__] = 0.;
+ beta[i__] = 0.;
+/* L90: */
+ }
+ }
+
+L100:
+ *ncycle = kcycle;
+
+ return 0;
+
+/* End of ZTGSJA */
+
+} /* ztgsja_ */
diff --git a/contrib/libs/clapack/ztgsna.c b/contrib/libs/clapack/ztgsna.c
new file mode 100644
index 0000000000..2e8856d396
--- /dev/null
+++ b/contrib/libs/clapack/ztgsna.c
@@ -0,0 +1,487 @@
+/* ztgsna.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static doublecomplex c_b19 = {1.,0.};
+static doublecomplex c_b20 = {0.,0.};
+static logical c_false = FALSE_;
+static integer c__3 = 3;
+
+/* Subroutine */ int ztgsna_(char *job, char *howmny, logical *select,
+ integer *n, doublecomplex *a, integer *lda, doublecomplex *b, integer
+ *ldb, doublecomplex *vl, integer *ldvl, doublecomplex *vr, integer *
+ ldvr, doublereal *s, doublereal *dif, integer *mm, integer *m,
+ doublecomplex *work, integer *lwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, vl_dim1, vl_offset, vr_dim1,
+ vr_offset, i__1;
+ doublereal d__1, d__2;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *);
+
+ /* Local variables */
+ integer i__, k, n1, n2, ks;
+ doublereal eps, cond;
+ integer ierr, ifst;
+ doublereal lnrm;
+ doublecomplex yhax, yhbx;
+ integer ilst;
+ doublereal rnrm, scale;
+ extern logical lsame_(char *, char *);
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ integer lwmin;
+ extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *);
+ logical wants;
+ doublecomplex dummy[1];
+ extern doublereal dlapy2_(doublereal *, doublereal *);
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *);
+ doublecomplex dummy1[1];
+ extern doublereal dznrm2_(integer *, doublecomplex *, integer *), dlamch_(
+ char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ logical wantbh, wantdf, somcon;
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *),
+ ztgexc_(logical *, logical *, integer *, doublecomplex *, integer
+ *, doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, integer *, integer *, integer *);
+ doublereal smlnum;
+ logical lquery;
+ extern /* Subroutine */ int ztgsyl_(char *, integer *, integer *, integer
+ *, doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *, doublecomplex *, integer *, integer *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTGSNA estimates reciprocal condition numbers for specified */
+/* eigenvalues and/or eigenvectors of a matrix pair (A, B). */
+
+/* (A, B) must be in generalized Schur canonical form, that is, A and */
+/* B are both upper triangular. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies whether condition numbers are required for */
+/* eigenvalues (S) or eigenvectors (DIF): */
+/* = 'E': for eigenvalues only (S); */
+/* = 'V': for eigenvectors only (DIF); */
+/* = 'B': for both eigenvalues and eigenvectors (S and DIF). */
+
+/* HOWMNY (input) CHARACTER*1 */
+/* = 'A': compute condition numbers for all eigenpairs; */
+/* = 'S': compute condition numbers for selected eigenpairs */
+/* specified by the array SELECT. */
+
+/* SELECT (input) LOGICAL array, dimension (N) */
+/* If HOWMNY = 'S', SELECT specifies the eigenpairs for which */
+/* condition numbers are required. To select condition numbers */
+/* for the corresponding j-th eigenvalue and/or eigenvector, */
+/* SELECT(j) must be set to .TRUE.. */
+/* If HOWMNY = 'A', SELECT is not referenced. */
+
+/* N (input) INTEGER */
+/* The order of the square matrix pair (A, B). N >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The upper triangular matrix A in the pair (A,B). */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,N) */
+/* The upper triangular matrix B in the pair (A, B). */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* VL (input) COMPLEX*16 array, dimension (LDVL,M) */
+/* IF JOB = 'E' or 'B', VL must contain left eigenvectors of */
+/* (A, B), corresponding to the eigenpairs specified by HOWMNY */
+/* and SELECT. The eigenvectors must be stored in consecutive */
+/* columns of VL, as returned by ZTGEVC. */
+/* If JOB = 'V', VL is not referenced. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the array VL. LDVL >= 1; and */
+/* If JOB = 'E' or 'B', LDVL >= N. */
+
+/* VR (input) COMPLEX*16 array, dimension (LDVR,M) */
+/* IF JOB = 'E' or 'B', VR must contain right eigenvectors of */
+/* (A, B), corresponding to the eigenpairs specified by HOWMNY */
+/* and SELECT. The eigenvectors must be stored in consecutive */
+/* columns of VR, as returned by ZTGEVC. */
+/* If JOB = 'V', VR is not referenced. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. LDVR >= 1; */
+/* If JOB = 'E' or 'B', LDVR >= N. */
+
+/* S (output) DOUBLE PRECISION array, dimension (MM) */
+/* If JOB = 'E' or 'B', the reciprocal condition numbers of the */
+/* selected eigenvalues, stored in consecutive elements of the */
+/* array. */
+/* If JOB = 'V', S is not referenced. */
+
+/* DIF (output) DOUBLE PRECISION array, dimension (MM) */
+/* If JOB = 'V' or 'B', the estimated reciprocal condition */
+/* numbers of the selected eigenvectors, stored in consecutive */
+/* elements of the array. */
+/* If the eigenvalues cannot be reordered to compute DIF(j), */
+/* DIF(j) is set to 0; this can only occur when the true value */
+/* would be very small anyway. */
+/* For each eigenvalue/vector specified by SELECT, DIF stores */
+/* a Frobenius norm-based estimate of Difl. */
+/* If JOB = 'E', DIF is not referenced. */
+
+/* MM (input) INTEGER */
+/* The number of elements in the arrays S and DIF. MM >= M. */
+
+/* M (output) INTEGER */
+/* The number of elements of the arrays S and DIF used to store */
+/* the specified condition numbers; for each selected eigenvalue */
+/* one element is used. If HOWMNY = 'A', M is set to N. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+/* If JOB = 'V' or 'B', LWORK >= max(1,2*N*N). */
+
+/* IWORK (workspace) INTEGER array, dimension (N+2) */
+/* If JOB = 'E', IWORK is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: Successful exit */
+/* < 0: If INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The reciprocal of the condition number of the i-th generalized */
+/* eigenvalue w = (a, b) is defined as */
+
+/* S(I) = (|v'Au|**2 + |v'Bu|**2)**(1/2) / (norm(u)*norm(v)) */
+
+/* where u and v are the right and left eigenvectors of (A, B) */
+/* corresponding to w; |z| denotes the absolute value of the complex */
+/* number, and norm(u) denotes the 2-norm of the vector u. The pair */
+/* (a, b) corresponds to an eigenvalue w = a/b (= v'Au/v'Bu) of the */
+/* matrix pair (A, B). If both a and b equal zero, then (A,B) is */
+/* singular and S(I) = -1 is returned. */
+
+/* An approximate error bound on the chordal distance between the i-th */
+/* computed generalized eigenvalue w and the corresponding exact */
+/* eigenvalue lambda is */
+
+/* chord(w, lambda) <= EPS * norm(A, B) / S(I), */
+
+/* where EPS is the machine precision. */
+
+/* The reciprocal of the condition number of the right eigenvector u */
+/* and left eigenvector v corresponding to the generalized eigenvalue w */
+/* is defined as follows. Suppose */
+
+/* (A, B) = ( a * ) ( b * ) 1 */
+/* ( 0 A22 ),( 0 B22 ) n-1 */
+/* 1 n-1 1 n-1 */
+
+/* Then the reciprocal condition number DIF(I) is */
+
+/* Difl[(a, b), (A22, B22)] = sigma-min( Zl ) */
+
+/* where sigma-min(Zl) denotes the smallest singular value of */
+
+/* Zl = [ kron(a, In-1) -kron(1, A22) ] */
+/* [ kron(b, In-1) -kron(1, B22) ]. */
+
+/* Here In-1 is the identity matrix of size n-1 and X' is the conjugate */
+/* transpose of X. kron(X, Y) is the Kronecker product between the */
+/* matrices X and Y. */
+
+/* We approximate the smallest singular value of Zl with an upper */
+/* bound. This is done by ZLATDF. */
+
+/* An approximate error bound for a computed eigenvector VL(i) or */
+/* VR(i) is given by */
+
+/* EPS * norm(A, B) / DIF(i). */
+
+/* See ref. [2-3] for more details and further references. */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* References */
+/* ========== */
+
+/* [1] B. Kagstrom; A Direct Method for Reordering Eigenvalues in the */
+/* Generalized Real Schur Form of a Regular Matrix Pair (A, B), in */
+/* M.S. Moonen et al (eds), Linear Algebra for Large Scale and */
+/* Real-Time Applications, Kluwer Academic Publ. 1993, pp 195-218. */
+
+/* [2] B. Kagstrom and P. Poromaa; Computing Eigenspaces with Specified */
+/* Eigenvalues of a Regular Matrix Pair (A, B) and Condition */
+/* Estimation: Theory, Algorithms and Software, Report */
+/* UMINF - 94.04, Department of Computing Science, Umea University, */
+/* S-901 87 Umea, Sweden, 1994. Also as LAPACK Working Note 87. */
+/* To appear in Numerical Algorithms, 1996. */
+
+/* [3] B. Kagstrom and P. Poromaa, LAPACK-Style Algorithms and Software */
+/* for Solving the Generalized Sylvester Equation and Estimating the */
+/* Separation between Regular Matrix Pairs, Report UMINF - 93.23, */
+/* Department of Computing Science, Umea University, S-901 87 Umea, */
+/* Sweden, December 1993, Revised April 1994, Also as LAPACK Working */
+/* Note 75. */
+/* To appear in ACM Trans. on Math. Software, Vol 22, No 1, 1996. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters */
+
+ /* Parameter adjustments */
+ --select;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --s;
+ --dif;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantbh = lsame_(job, "B");
+ wants = lsame_(job, "E") || wantbh;
+ wantdf = lsame_(job, "V") || wantbh;
+
+ somcon = lsame_(howmny, "S");
+
+ *info = 0;
+ lquery = *lwork == -1;
+
+ if (! wants && ! wantdf) {
+ *info = -1;
+ } else if (! lsame_(howmny, "A") && ! somcon) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (wants && *ldvl < *n) {
+ *info = -10;
+ } else if (wants && *ldvr < *n) {
+ *info = -12;
+ } else {
+
+/* Set M to the number of eigenpairs for which condition numbers */
+/* are required, and test MM. */
+
+ if (somcon) {
+ *m = 0;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (select[k]) {
+ ++(*m);
+ }
+/* L10: */
+ }
+ } else {
+ *m = *n;
+ }
+
+ if (*n == 0) {
+ lwmin = 1;
+ } else if (lsame_(job, "V") || lsame_(job,
+ "B")) {
+ lwmin = (*n << 1) * *n;
+ } else {
+ lwmin = *n;
+ }
+ work[1].r = (doublereal) lwmin, work[1].i = 0.;
+
+ if (*mm < *m) {
+ *info = -15;
+ } else if (*lwork < lwmin && ! lquery) {
+ *info = -18;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTGSNA", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("P");
+ smlnum = dlamch_("S") / eps;
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+ ks = 0;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+
+/* Determine whether condition numbers are required for the k-th */
+/* eigenpair. */
+
+ if (somcon) {
+ if (! select[k]) {
+ goto L20;
+ }
+ }
+
+ ++ks;
+
+ if (wants) {
+
+/* Compute the reciprocal condition number of the k-th */
+/* eigenvalue. */
+
+ rnrm = dznrm2_(n, &vr[ks * vr_dim1 + 1], &c__1);
+ lnrm = dznrm2_(n, &vl[ks * vl_dim1 + 1], &c__1);
+ zgemv_("N", n, n, &c_b19, &a[a_offset], lda, &vr[ks * vr_dim1 + 1]
+, &c__1, &c_b20, &work[1], &c__1);
+ zdotc_(&z__1, n, &work[1], &c__1, &vl[ks * vl_dim1 + 1], &c__1);
+ yhax.r = z__1.r, yhax.i = z__1.i;
+ zgemv_("N", n, n, &c_b19, &b[b_offset], ldb, &vr[ks * vr_dim1 + 1]
+, &c__1, &c_b20, &work[1], &c__1);
+ zdotc_(&z__1, n, &work[1], &c__1, &vl[ks * vl_dim1 + 1], &c__1);
+ yhbx.r = z__1.r, yhbx.i = z__1.i;
+ d__1 = z_abs(&yhax);
+ d__2 = z_abs(&yhbx);
+ cond = dlapy2_(&d__1, &d__2);
+ if (cond == 0.) {
+ s[ks] = -1.;
+ } else {
+ s[ks] = cond / (rnrm * lnrm);
+ }
+ }
+
+ if (wantdf) {
+ if (*n == 1) {
+ d__1 = z_abs(&a[a_dim1 + 1]);
+ d__2 = z_abs(&b[b_dim1 + 1]);
+ dif[ks] = dlapy2_(&d__1, &d__2);
+ } else {
+
+/* Estimate the reciprocal condition number of the k-th */
+/* eigenvectors. */
+
+/* Copy the matrix (A, B) to the array WORK and move the */
+/* (k,k)th pair to the (1,1) position. */
+
+ zlacpy_("Full", n, n, &a[a_offset], lda, &work[1], n);
+ zlacpy_("Full", n, n, &b[b_offset], ldb, &work[*n * *n + 1],
+ n);
+ ifst = k;
+ ilst = 1;
+
+ ztgexc_(&c_false, &c_false, n, &work[1], n, &work[*n * *n + 1]
+, n, dummy, &c__1, dummy1, &c__1, &ifst, &ilst, &ierr)
+ ;
+
+ if (ierr > 0) {
+
+/* Ill-conditioned problem - swap rejected. */
+
+ dif[ks] = 0.;
+ } else {
+
+/* Reordering successful, solve generalized Sylvester */
+/* equation for R and L, */
+/* A22 * R - L * A11 = A12 */
+/* B22 * R - L * B11 = B12, */
+/* and compute estimate of Difl[(A11,B11), (A22, B22)]. */
+
+ n1 = 1;
+ n2 = *n - n1;
+ i__ = *n * *n + 1;
+ ztgsyl_("N", &c__3, &n2, &n1, &work[*n * n1 + n1 + 1], n,
+ &work[1], n, &work[n1 + 1], n, &work[*n * n1 + n1
+ + i__], n, &work[i__], n, &work[n1 + i__], n, &
+ scale, &dif[ks], dummy, &c__1, &iwork[1], &ierr);
+ }
+ }
+ }
+
+L20:
+ ;
+ }
+ work[1].r = (doublereal) lwmin, work[1].i = 0.;
+ return 0;
+
+/* End of ZTGSNA */
+
+} /* ztgsna_ */
diff --git a/contrib/libs/clapack/ztgsy2.c b/contrib/libs/clapack/ztgsy2.c
new file mode 100644
index 0000000000..4f1bbdd9a5
--- /dev/null
+++ b/contrib/libs/clapack/ztgsy2.c
@@ -0,0 +1,478 @@
+/* ztgsy2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__2 = 2;
+static integer c__1 = 1;
+
+/* Subroutine */ int ztgsy2_(char *trans, integer *ijob, integer *m, integer *
+ n, doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb,
+ doublecomplex *c__, integer *ldc, doublecomplex *d__, integer *ldd,
+ doublecomplex *e, integer *lde, doublecomplex *f, integer *ldf,
+ doublereal *scale, doublereal *rdsum, doublereal *rdscal, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, d_dim1,
+ d_offset, e_dim1, e_offset, f_dim1, f_offset, i__1, i__2, i__3,
+ i__4;
+ doublecomplex z__1, z__2, z__3, z__4, z__5, z__6;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublecomplex z__[4] /* was [2][2] */, rhs[2];
+ integer ierr, ipiv[2], jpiv[2];
+ doublecomplex alpha;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *), zaxpy_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), zgesc2_(
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *,
+ integer *, doublereal *), zgetc2_(integer *, doublecomplex *,
+ integer *, integer *, integer *, integer *);
+ doublereal scaloc;
+ extern /* Subroutine */ int xerbla_(char *, integer *), zlatdf_(
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ doublereal *, doublereal *, integer *, integer *);
+ logical notran;
+
+
+/* -- LAPACK auxiliary routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTGSY2 solves the generalized Sylvester equation */
+
+/* A * R - L * B = scale * C (1) */
+/* D * R - L * E = scale * F */
+
+/* using Level 1 and 2 BLAS, where R and L are unknown M-by-N matrices, */
+/* (A, D), (B, E) and (C, F) are given matrix pairs of size M-by-M, */
+/* N-by-N and M-by-N, respectively. A, B, D and E are upper triangular */
+/* (i.e., (A,D) and (B,E) in generalized Schur form). */
+
+/* The solution (R, L) overwrites (C, F). 0 <= SCALE <= 1 is an output */
+/* scaling factor chosen to avoid overflow. */
+
+/* In matrix notation solving equation (1) corresponds to solve */
+/* Zx = scale * b, where Z is defined as */
+
+/* Z = [ kron(In, A) -kron(B', Im) ] (2) */
+/* [ kron(In, D) -kron(E', Im) ], */
+
+/* Ik is the identity matrix of size k and X' is the transpose of X. */
+/* kron(X, Y) is the Kronecker product between the matrices X and Y. */
+
+/* If TRANS = 'C', y in the conjugate transposed system Z'y = scale*b */
+/* is solved for, which is equivalent to solve for R and L in */
+
+/* A' * R + D' * L = scale * C (3) */
+/* R * B' + L * E' = scale * -F */
+
+/* This case is used to compute an estimate of Dif[(A, D), (B, E)] = */
+/* = sigma_min(Z) using reverse communicaton with ZLACON. */
+
+/* ZTGSY2 also (IJOB >= 1) contributes to the computation in ZTGSYL */
+/* of an upper bound on the separation between to matrix pairs. Then */
+/* the input (A, D), (B, E) are sub-pencils of two matrix pairs in */
+/* ZTGSYL. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N', solve the generalized Sylvester equation (1). */
+/* = 'T': solve the 'transposed' system (3). */
+
+/* IJOB (input) INTEGER */
+/* Specifies what kind of functionality to be performed. */
+/* =0: solve (1) only. */
+/* =1: A contribution from this subsystem to a Frobenius */
+/* norm-based estimate of the separation between two matrix */
+/* pairs is computed. (look ahead strategy is used). */
+/* =2: A contribution from this subsystem to a Frobenius */
+/* norm-based estimate of the separation between two matrix */
+/* pairs is computed. (DGECON on sub-systems is used.) */
+/* Not referenced if TRANS = 'T'. */
+
+/* M (input) INTEGER */
+/* On entry, M specifies the order of A and D, and the row */
+/* dimension of C, F, R and L. */
+
+/* N (input) INTEGER */
+/* On entry, N specifies the order of B and E, and the column */
+/* dimension of C, F, R and L. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA, M) */
+/* On entry, A contains an upper triangular matrix. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the matrix A. LDA >= max(1, M). */
+
+/* B (input) COMPLEX*16 array, dimension (LDB, N) */
+/* On entry, B contains an upper triangular matrix. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the matrix B. LDB >= max(1, N). */
+
+/* C (input/output) COMPLEX*16 array, dimension (LDC, N) */
+/* On entry, C contains the right-hand-side of the first matrix */
+/* equation in (1). */
+/* On exit, if IJOB = 0, C has been overwritten by the solution */
+/* R. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the matrix C. LDC >= max(1, M). */
+
+/* D (input) COMPLEX*16 array, dimension (LDD, M) */
+/* On entry, D contains an upper triangular matrix. */
+
+/* LDD (input) INTEGER */
+/* The leading dimension of the matrix D. LDD >= max(1, M). */
+
+/* E (input) COMPLEX*16 array, dimension (LDE, N) */
+/* On entry, E contains an upper triangular matrix. */
+
+/* LDE (input) INTEGER */
+/* The leading dimension of the matrix E. LDE >= max(1, N). */
+
+/* F (input/output) COMPLEX*16 array, dimension (LDF, N) */
+/* On entry, F contains the right-hand-side of the second matrix */
+/* equation in (1). */
+/* On exit, if IJOB = 0, F has been overwritten by the solution */
+/* L. */
+
+/* LDF (input) INTEGER */
+/* The leading dimension of the matrix F. LDF >= max(1, M). */
+
+/* SCALE (output) DOUBLE PRECISION */
+/* On exit, 0 <= SCALE <= 1. If 0 < SCALE < 1, the solutions */
+/* R and L (C and F on entry) will hold the solutions to a */
+/* slightly perturbed system but the input matrices A, B, D and */
+/* E have not been changed. If SCALE = 0, R and L will hold the */
+/* solutions to the homogeneous system with C = F = 0. */
+/* Normally, SCALE = 1. */
+
+/* RDSUM (input/output) DOUBLE PRECISION */
+/* On entry, the sum of squares of computed contributions to */
+/* the Dif-estimate under computation by ZTGSYL, where the */
+/* scaling factor RDSCAL (see below) has been factored out. */
+/* On exit, the corresponding sum of squares updated with the */
+/* contributions from the current sub-system. */
+/* If TRANS = 'T' RDSUM is not touched. */
+/* NOTE: RDSUM only makes sense when ZTGSY2 is called by */
+/* ZTGSYL. */
+
+/* RDSCAL (input/output) DOUBLE PRECISION */
+/* On entry, scaling factor used to prevent overflow in RDSUM. */
+/* On exit, RDSCAL is updated w.r.t. the current contributions */
+/* in RDSUM. */
+/* If TRANS = 'T', RDSCAL is not touched. */
+/* NOTE: RDSCAL only makes sense when ZTGSY2 is called by */
+/* ZTGSYL. */
+
+/* INFO (output) INTEGER */
+/* On exit, if INFO is set to */
+/* =0: Successful exit */
+/* <0: If INFO = -i, input argument number i is illegal. */
+/* >0: The matrix pairs (A, D) and (B, E) have common or very */
+/* close eigenvalues. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ d_dim1 = *ldd;
+ d_offset = 1 + d_dim1;
+ d__ -= d_offset;
+ e_dim1 = *lde;
+ e_offset = 1 + e_dim1;
+ e -= e_offset;
+ f_dim1 = *ldf;
+ f_offset = 1 + f_dim1;
+ f -= f_offset;
+
+ /* Function Body */
+ *info = 0;
+ ierr = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "C")) {
+ *info = -1;
+ } else if (notran) {
+ if (*ijob < 0 || *ijob > 2) {
+ *info = -2;
+ }
+ }
+ if (*info == 0) {
+ if (*m <= 0) {
+ *info = -3;
+ } else if (*n <= 0) {
+ *info = -4;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*ldd < max(1,*m)) {
+ *info = -12;
+ } else if (*lde < max(1,*n)) {
+ *info = -14;
+ } else if (*ldf < max(1,*m)) {
+ *info = -16;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTGSY2", &i__1);
+ return 0;
+ }
+
+ if (notran) {
+
+/* Solve (I, J) - system */
+/* A(I, I) * R(I, J) - L(I, J) * B(J, J) = C(I, J) */
+/* D(I, I) * R(I, J) - L(I, J) * E(J, J) = F(I, J) */
+/* for I = M, M - 1, ..., 1; J = 1, 2, ..., N */
+
+ *scale = 1.;
+ scaloc = 1.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+
+/* Build 2 by 2 system */
+
+ i__2 = i__ + i__ * a_dim1;
+ z__[0].r = a[i__2].r, z__[0].i = a[i__2].i;
+ i__2 = i__ + i__ * d_dim1;
+ z__[1].r = d__[i__2].r, z__[1].i = d__[i__2].i;
+ i__2 = j + j * b_dim1;
+ z__1.r = -b[i__2].r, z__1.i = -b[i__2].i;
+ z__[2].r = z__1.r, z__[2].i = z__1.i;
+ i__2 = j + j * e_dim1;
+ z__1.r = -e[i__2].r, z__1.i = -e[i__2].i;
+ z__[3].r = z__1.r, z__[3].i = z__1.i;
+
+/* Set up right hand side(s) */
+
+ i__2 = i__ + j * c_dim1;
+ rhs[0].r = c__[i__2].r, rhs[0].i = c__[i__2].i;
+ i__2 = i__ + j * f_dim1;
+ rhs[1].r = f[i__2].r, rhs[1].i = f[i__2].i;
+
+/* Solve Z * x = RHS */
+
+ zgetc2_(&c__2, z__, &c__2, ipiv, jpiv, &ierr);
+ if (ierr > 0) {
+ *info = ierr;
+ }
+ if (*ijob == 0) {
+ zgesc2_(&c__2, z__, &c__2, rhs, ipiv, jpiv, &scaloc);
+ if (scaloc != 1.) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ z__1.r = scaloc, z__1.i = 0.;
+ zscal_(m, &z__1, &c__[k * c_dim1 + 1], &c__1);
+ z__1.r = scaloc, z__1.i = 0.;
+ zscal_(m, &z__1, &f[k * f_dim1 + 1], &c__1);
+/* L10: */
+ }
+ *scale *= scaloc;
+ }
+ } else {
+ zlatdf_(ijob, &c__2, z__, &c__2, rhs, rdsum, rdscal, ipiv,
+ jpiv);
+ }
+
+/* Unpack solution vector(s) */
+
+ i__2 = i__ + j * c_dim1;
+ c__[i__2].r = rhs[0].r, c__[i__2].i = rhs[0].i;
+ i__2 = i__ + j * f_dim1;
+ f[i__2].r = rhs[1].r, f[i__2].i = rhs[1].i;
+
+/* Substitute R(I, J) and L(I, J) into remaining equation. */
+
+ if (i__ > 1) {
+ z__1.r = -rhs[0].r, z__1.i = -rhs[0].i;
+ alpha.r = z__1.r, alpha.i = z__1.i;
+ i__2 = i__ - 1;
+ zaxpy_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &c__[j
+ * c_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ zaxpy_(&i__2, &alpha, &d__[i__ * d_dim1 + 1], &c__1, &f[j
+ * f_dim1 + 1], &c__1);
+ }
+ if (j < *n) {
+ i__2 = *n - j;
+ zaxpy_(&i__2, &rhs[1], &b[j + (j + 1) * b_dim1], ldb, &
+ c__[i__ + (j + 1) * c_dim1], ldc);
+ i__2 = *n - j;
+ zaxpy_(&i__2, &rhs[1], &e[j + (j + 1) * e_dim1], lde, &f[
+ i__ + (j + 1) * f_dim1], ldf);
+ }
+
+/* L20: */
+ }
+/* L30: */
+ }
+ } else {
+
+/* Solve transposed (I, J) - system: */
+/* A(I, I)' * R(I, J) + D(I, I)' * L(J, J) = C(I, J) */
+/* R(I, I) * B(J, J) + L(I, J) * E(J, J) = -F(I, J) */
+/* for I = 1, 2, ..., M, J = N, N - 1, ..., 1 */
+
+ *scale = 1.;
+ scaloc = 1.;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ for (j = *n; j >= 1; --j) {
+
+/* Build 2 by 2 system Z' */
+
+ d_cnjg(&z__1, &a[i__ + i__ * a_dim1]);
+ z__[0].r = z__1.r, z__[0].i = z__1.i;
+ d_cnjg(&z__2, &b[j + j * b_dim1]);
+ z__1.r = -z__2.r, z__1.i = -z__2.i;
+ z__[1].r = z__1.r, z__[1].i = z__1.i;
+ d_cnjg(&z__1, &d__[i__ + i__ * d_dim1]);
+ z__[2].r = z__1.r, z__[2].i = z__1.i;
+ d_cnjg(&z__2, &e[j + j * e_dim1]);
+ z__1.r = -z__2.r, z__1.i = -z__2.i;
+ z__[3].r = z__1.r, z__[3].i = z__1.i;
+
+
+/* Set up right hand side(s) */
+
+ i__2 = i__ + j * c_dim1;
+ rhs[0].r = c__[i__2].r, rhs[0].i = c__[i__2].i;
+ i__2 = i__ + j * f_dim1;
+ rhs[1].r = f[i__2].r, rhs[1].i = f[i__2].i;
+
+/* Solve Z' * x = RHS */
+
+ zgetc2_(&c__2, z__, &c__2, ipiv, jpiv, &ierr);
+ if (ierr > 0) {
+ *info = ierr;
+ }
+ zgesc2_(&c__2, z__, &c__2, rhs, ipiv, jpiv, &scaloc);
+ if (scaloc != 1.) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ z__1.r = scaloc, z__1.i = 0.;
+ zscal_(m, &z__1, &c__[k * c_dim1 + 1], &c__1);
+ z__1.r = scaloc, z__1.i = 0.;
+ zscal_(m, &z__1, &f[k * f_dim1 + 1], &c__1);
+/* L40: */
+ }
+ *scale *= scaloc;
+ }
+
+/* Unpack solution vector(s) */
+
+ i__2 = i__ + j * c_dim1;
+ c__[i__2].r = rhs[0].r, c__[i__2].i = rhs[0].i;
+ i__2 = i__ + j * f_dim1;
+ f[i__2].r = rhs[1].r, f[i__2].i = rhs[1].i;
+
+/* Substitute R(I, J) and L(I, J) into remaining equation. */
+
+ i__2 = j - 1;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = i__ + k * f_dim1;
+ i__4 = i__ + k * f_dim1;
+ d_cnjg(&z__4, &b[k + j * b_dim1]);
+ z__3.r = rhs[0].r * z__4.r - rhs[0].i * z__4.i, z__3.i =
+ rhs[0].r * z__4.i + rhs[0].i * z__4.r;
+ z__2.r = f[i__4].r + z__3.r, z__2.i = f[i__4].i + z__3.i;
+ d_cnjg(&z__6, &e[k + j * e_dim1]);
+ z__5.r = rhs[1].r * z__6.r - rhs[1].i * z__6.i, z__5.i =
+ rhs[1].r * z__6.i + rhs[1].i * z__6.r;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
+ f[i__3].r = z__1.r, f[i__3].i = z__1.i;
+/* L50: */
+ }
+ i__2 = *m;
+ for (k = i__ + 1; k <= i__2; ++k) {
+ i__3 = k + j * c_dim1;
+ i__4 = k + j * c_dim1;
+ d_cnjg(&z__4, &a[i__ + k * a_dim1]);
+ z__3.r = z__4.r * rhs[0].r - z__4.i * rhs[0].i, z__3.i =
+ z__4.r * rhs[0].i + z__4.i * rhs[0].r;
+ z__2.r = c__[i__4].r - z__3.r, z__2.i = c__[i__4].i -
+ z__3.i;
+ d_cnjg(&z__6, &d__[i__ + k * d_dim1]);
+ z__5.r = z__6.r * rhs[1].r - z__6.i * rhs[1].i, z__5.i =
+ z__6.r * rhs[1].i + z__6.i * rhs[1].r;
+ z__1.r = z__2.r - z__5.r, z__1.i = z__2.i - z__5.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L60: */
+ }
+
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ return 0;
+
+/* End of ZTGSY2 */
+
+} /* ztgsy2_ */
diff --git a/contrib/libs/clapack/ztgsyl.c b/contrib/libs/clapack/ztgsyl.c
new file mode 100644
index 0000000000..9829b408ca
--- /dev/null
+++ b/contrib/libs/clapack/ztgsyl.c
@@ -0,0 +1,695 @@
+/* ztgsyl.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {0.,0.};
+static integer c__2 = 2;
+static integer c_n1 = -1;
+static integer c__5 = 5;
+static integer c__1 = 1;
+static doublecomplex c_b44 = {-1.,0.};
+static doublecomplex c_b45 = {1.,0.};
+
+/* Subroutine */ int ztgsyl_(char *trans, integer *ijob, integer *m, integer *
+ n, doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb,
+ doublecomplex *c__, integer *ldc, doublecomplex *d__, integer *ldd,
+ doublecomplex *e, integer *lde, doublecomplex *f, integer *ldf,
+ doublereal *scale, doublereal *dif, doublecomplex *work, integer *
+ lwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, d_dim1,
+ d_offset, e_dim1, e_offset, f_dim1, f_offset, i__1, i__2, i__3,
+ i__4;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer i__, j, k, p, q, ie, je, mb, nb, is, js, pq;
+ doublereal dsum;
+ extern logical lsame_(char *, char *);
+ integer ifunc, linfo;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *), zgemm_(char *, char *, integer *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ integer lwmin;
+ doublereal scale2, dscale;
+ extern /* Subroutine */ int ztgsy2_(char *, integer *, integer *, integer
+ *, doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *);
+ doublereal scaloc;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer iround;
+ logical notran;
+ integer isolve;
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *),
+ zlaset_(char *, integer *, integer *, doublecomplex *,
+ doublecomplex *, doublecomplex *, integer *);
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* January 2007 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTGSYL solves the generalized Sylvester equation: */
+
+/* A * R - L * B = scale * C (1) */
+/* D * R - L * E = scale * F */
+
+/* where R and L are unknown m-by-n matrices, (A, D), (B, E) and */
+/* (C, F) are given matrix pairs of size m-by-m, n-by-n and m-by-n, */
+/* respectively, with complex entries. A, B, D and E are upper */
+/* triangular (i.e., (A,D) and (B,E) in generalized Schur form). */
+
+/* The solution (R, L) overwrites (C, F). 0 <= SCALE <= 1 */
+/* is an output scaling factor chosen to avoid overflow. */
+
+/* In matrix notation (1) is equivalent to solve Zx = scale*b, where Z */
+/* is defined as */
+
+/* Z = [ kron(In, A) -kron(B', Im) ] (2) */
+/* [ kron(In, D) -kron(E', Im) ], */
+
+/* Here Ix is the identity matrix of size x and X' is the conjugate */
+/* transpose of X. Kron(X, Y) is the Kronecker product between the */
+/* matrices X and Y. */
+
+/* If TRANS = 'C', y in the conjugate transposed system Z'*y = scale*b */
+/* is solved for, which is equivalent to solve for R and L in */
+
+/* A' * R + D' * L = scale * C (3) */
+/* R * B' + L * E' = scale * -F */
+
+/* This case (TRANS = 'C') is used to compute an one-norm-based estimate */
+/* of Dif[(A,D), (B,E)], the separation between the matrix pairs (A,D) */
+/* and (B,E), using ZLACON. */
+
+/* If IJOB >= 1, ZTGSYL computes a Frobenius norm-based estimate of */
+/* Dif[(A,D),(B,E)]. That is, the reciprocal of a lower bound on the */
+/* reciprocal of the smallest singular value of Z. */
+
+/* This is a level-3 BLAS algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': solve the generalized sylvester equation (1). */
+/* = 'C': solve the "conjugate transposed" system (3). */
+
+/* IJOB (input) INTEGER */
+/* Specifies what kind of functionality to be performed. */
+/* =0: solve (1) only. */
+/* =1: The functionality of 0 and 3. */
+/* =2: The functionality of 0 and 4. */
+/* =3: Only an estimate of Dif[(A,D), (B,E)] is computed. */
+/* (look ahead strategy is used). */
+/* =4: Only an estimate of Dif[(A,D), (B,E)] is computed. */
+/* (ZGECON on sub-systems is used). */
+/* Not referenced if TRANS = 'C'. */
+
+/* M (input) INTEGER */
+/* The order of the matrices A and D, and the row dimension of */
+/* the matrices C, F, R and L. */
+
+/* N (input) INTEGER */
+/* The order of the matrices B and E, and the column dimension */
+/* of the matrices C, F, R and L. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA, M) */
+/* The upper triangular matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1, M). */
+
+/* B (input) COMPLEX*16 array, dimension (LDB, N) */
+/* The upper triangular matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1, N). */
+
+/* C (input/output) COMPLEX*16 array, dimension (LDC, N) */
+/* On entry, C contains the right-hand-side of the first matrix */
+/* equation in (1) or (3). */
+/* On exit, if IJOB = 0, 1 or 2, C has been overwritten by */
+/* the solution R. If IJOB = 3 or 4 and TRANS = 'N', C holds R, */
+/* the solution achieved during the computation of the */
+/* Dif-estimate. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1, M). */
+
+/* D (input) COMPLEX*16 array, dimension (LDD, M) */
+/* The upper triangular matrix D. */
+
+/* LDD (input) INTEGER */
+/* The leading dimension of the array D. LDD >= max(1, M). */
+
+/* E (input) COMPLEX*16 array, dimension (LDE, N) */
+/* The upper triangular matrix E. */
+
+/* LDE (input) INTEGER */
+/* The leading dimension of the array E. LDE >= max(1, N). */
+
+/* F (input/output) COMPLEX*16 array, dimension (LDF, N) */
+/* On entry, F contains the right-hand-side of the second matrix */
+/* equation in (1) or (3). */
+/* On exit, if IJOB = 0, 1 or 2, F has been overwritten by */
+/* the solution L. If IJOB = 3 or 4 and TRANS = 'N', F holds L, */
+/* the solution achieved during the computation of the */
+/* Dif-estimate. */
+
+/* LDF (input) INTEGER */
+/* The leading dimension of the array F. LDF >= max(1, M). */
+
+/* DIF (output) DOUBLE PRECISION */
+/* On exit DIF is the reciprocal of a lower bound of the */
+/* reciprocal of the Dif-function, i.e. DIF is an upper bound of */
+/* Dif[(A,D), (B,E)] = sigma-min(Z), where Z as in (2). */
+/* IF IJOB = 0 or TRANS = 'C', DIF is not referenced. */
+
+/* SCALE (output) DOUBLE PRECISION */
+/* On exit SCALE is the scaling factor in (1) or (3). */
+/* If 0 < SCALE < 1, C and F hold the solutions R and L, resp., */
+/* to a slightly perturbed system but the input matrices A, B, */
+/* D and E have not been changed. If SCALE = 0, R and L will */
+/* hold the solutions to the homogenious system with C = F = 0. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK > = 1. */
+/* If IJOB = 1 or 2 and TRANS = 'N', LWORK >= max(1,2*M*N). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* IWORK (workspace) INTEGER array, dimension (M+N+2) */
+
+/* INFO (output) INTEGER */
+/* =0: successful exit */
+/* <0: If INFO = -i, the i-th argument had an illegal value. */
+/* >0: (A, D) and (B, E) have common or very close */
+/* eigenvalues. */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
+/* Umea University, S-901 87 Umea, Sweden. */
+
+/* [1] B. Kagstrom and P. Poromaa, LAPACK-Style Algorithms and Software */
+/* for Solving the Generalized Sylvester Equation and Estimating the */
+/* Separation between Regular Matrix Pairs, Report UMINF - 93.23, */
+/* Department of Computing Science, Umea University, S-901 87 Umea, */
+/* Sweden, December 1993, Revised April 1994, Also as LAPACK Working */
+/* Note 75. To appear in ACM Trans. on Math. Software, Vol 22, */
+/* No 1, 1996. */
+
+/* [2] B. Kagstrom, A Perturbation Analysis of the Generalized Sylvester */
+/* Equation (AR - LB, DR - LE ) = (C, F), SIAM J. Matrix Anal. */
+/* Appl., 15(4):1045-1060, 1994. */
+
+/* [3] B. Kagstrom and L. Westin, Generalized Schur Methods with */
+/* Condition Estimators for Solving the Generalized Sylvester */
+/* Equation, IEEE Transactions on Automatic Control, Vol. 34, No. 7, */
+/* July 1989, pp 745-751. */
+
+/* ===================================================================== */
+/* Replaced various illegal calls to CCOPY by calls to CLASET. */
+/* Sven Hammarling, 1/5/02. */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ d_dim1 = *ldd;
+ d_offset = 1 + d_dim1;
+ d__ -= d_offset;
+ e_dim1 = *lde;
+ e_offset = 1 + e_dim1;
+ e -= e_offset;
+ f_dim1 = *ldf;
+ f_offset = 1 + f_dim1;
+ f -= f_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+ if (! notran && ! lsame_(trans, "C")) {
+ *info = -1;
+ } else if (notran) {
+ if (*ijob < 0 || *ijob > 4) {
+ *info = -2;
+ }
+ }
+ if (*info == 0) {
+ if (*m <= 0) {
+ *info = -3;
+ } else if (*n <= 0) {
+ *info = -4;
+ } else if (*lda < max(1,*m)) {
+ *info = -6;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*ldd < max(1,*m)) {
+ *info = -12;
+ } else if (*lde < max(1,*n)) {
+ *info = -14;
+ } else if (*ldf < max(1,*m)) {
+ *info = -16;
+ }
+ }
+
+ if (*info == 0) {
+ if (notran) {
+ if (*ijob == 1 || *ijob == 2) {
+/* Computing MAX */
+ i__1 = 1, i__2 = (*m << 1) * *n;
+ lwmin = max(i__1,i__2);
+ } else {
+ lwmin = 1;
+ }
+ } else {
+ lwmin = 1;
+ }
+ work[1].r = (doublereal) lwmin, work[1].i = 0.;
+
+ if (*lwork < lwmin && ! lquery) {
+ *info = -20;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTGSYL", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ *scale = 1.;
+ if (notran) {
+ if (*ijob != 0) {
+ *dif = 0.;
+ }
+ }
+ return 0;
+ }
+
+/* Determine optimal block sizes MB and NB */
+
+ mb = ilaenv_(&c__2, "ZTGSYL", trans, m, n, &c_n1, &c_n1);
+ nb = ilaenv_(&c__5, "ZTGSYL", trans, m, n, &c_n1, &c_n1);
+
+ isolve = 1;
+ ifunc = 0;
+ if (notran) {
+ if (*ijob >= 3) {
+ ifunc = *ijob - 2;
+ zlaset_("F", m, n, &c_b1, &c_b1, &c__[c_offset], ldc);
+ zlaset_("F", m, n, &c_b1, &c_b1, &f[f_offset], ldf);
+ } else if (*ijob >= 1 && notran) {
+ isolve = 2;
+ }
+ }
+
+ if (mb <= 1 && nb <= 1 || mb >= *m && nb >= *n) {
+
+/* Use unblocked Level 2 solver */
+
+ i__1 = isolve;
+ for (iround = 1; iround <= i__1; ++iround) {
+
+ *scale = 1.;
+ dscale = 0.;
+ dsum = 1.;
+ pq = *m * *n;
+ ztgsy2_(trans, &ifunc, m, n, &a[a_offset], lda, &b[b_offset], ldb,
+ &c__[c_offset], ldc, &d__[d_offset], ldd, &e[e_offset],
+ lde, &f[f_offset], ldf, scale, &dsum, &dscale, info);
+ if (dscale != 0.) {
+ if (*ijob == 1 || *ijob == 3) {
+ *dif = sqrt((doublereal) ((*m << 1) * *n)) / (dscale *
+ sqrt(dsum));
+ } else {
+ *dif = sqrt((doublereal) pq) / (dscale * sqrt(dsum));
+ }
+ }
+ if (isolve == 2 && iround == 1) {
+ if (notran) {
+ ifunc = *ijob;
+ }
+ scale2 = *scale;
+ zlacpy_("F", m, n, &c__[c_offset], ldc, &work[1], m);
+ zlacpy_("F", m, n, &f[f_offset], ldf, &work[*m * *n + 1], m);
+ zlaset_("F", m, n, &c_b1, &c_b1, &c__[c_offset], ldc);
+ zlaset_("F", m, n, &c_b1, &c_b1, &f[f_offset], ldf)
+ ;
+ } else if (isolve == 2 && iround == 2) {
+ zlacpy_("F", m, n, &work[1], m, &c__[c_offset], ldc);
+ zlacpy_("F", m, n, &work[*m * *n + 1], m, &f[f_offset], ldf);
+ *scale = scale2;
+ }
+/* L30: */
+ }
+
+ return 0;
+
+ }
+
+/* Determine block structure of A */
+
+ p = 0;
+ i__ = 1;
+L40:
+ if (i__ > *m) {
+ goto L50;
+ }
+ ++p;
+ iwork[p] = i__;
+ i__ += mb;
+ if (i__ >= *m) {
+ goto L50;
+ }
+ goto L40;
+L50:
+ iwork[p + 1] = *m + 1;
+ if (iwork[p] == iwork[p + 1]) {
+ --p;
+ }
+
+/* Determine block structure of B */
+
+ q = p + 1;
+ j = 1;
+L60:
+ if (j > *n) {
+ goto L70;
+ }
+
+ ++q;
+ iwork[q] = j;
+ j += nb;
+ if (j >= *n) {
+ goto L70;
+ }
+ goto L60;
+
+L70:
+ iwork[q + 1] = *n + 1;
+ if (iwork[q] == iwork[q + 1]) {
+ --q;
+ }
+
+ if (notran) {
+ i__1 = isolve;
+ for (iround = 1; iround <= i__1; ++iround) {
+
+/* Solve (I, J) - subsystem */
+/* A(I, I) * R(I, J) - L(I, J) * B(J, J) = C(I, J) */
+/* D(I, I) * R(I, J) - L(I, J) * E(J, J) = F(I, J) */
+/* for I = P, P - 1, ..., 1; J = 1, 2, ..., Q */
+
+ pq = 0;
+ *scale = 1.;
+ dscale = 0.;
+ dsum = 1.;
+ i__2 = q;
+ for (j = p + 2; j <= i__2; ++j) {
+ js = iwork[j];
+ je = iwork[j + 1] - 1;
+ nb = je - js + 1;
+ for (i__ = p; i__ >= 1; --i__) {
+ is = iwork[i__];
+ ie = iwork[i__ + 1] - 1;
+ mb = ie - is + 1;
+ ztgsy2_(trans, &ifunc, &mb, &nb, &a[is + is * a_dim1],
+ lda, &b[js + js * b_dim1], ldb, &c__[is + js *
+ c_dim1], ldc, &d__[is + is * d_dim1], ldd, &e[js
+ + js * e_dim1], lde, &f[is + js * f_dim1], ldf, &
+ scaloc, &dsum, &dscale, &linfo);
+ if (linfo > 0) {
+ *info = linfo;
+ }
+ pq += mb * nb;
+ if (scaloc != 1.) {
+ i__3 = js - 1;
+ for (k = 1; k <= i__3; ++k) {
+ z__1.r = scaloc, z__1.i = 0.;
+ zscal_(m, &z__1, &c__[k * c_dim1 + 1], &c__1);
+ z__1.r = scaloc, z__1.i = 0.;
+ zscal_(m, &z__1, &f[k * f_dim1 + 1], &c__1);
+/* L80: */
+ }
+ i__3 = je;
+ for (k = js; k <= i__3; ++k) {
+ i__4 = is - 1;
+ z__1.r = scaloc, z__1.i = 0.;
+ zscal_(&i__4, &z__1, &c__[k * c_dim1 + 1], &c__1);
+ i__4 = is - 1;
+ z__1.r = scaloc, z__1.i = 0.;
+ zscal_(&i__4, &z__1, &f[k * f_dim1 + 1], &c__1);
+/* L90: */
+ }
+ i__3 = je;
+ for (k = js; k <= i__3; ++k) {
+ i__4 = *m - ie;
+ z__1.r = scaloc, z__1.i = 0.;
+ zscal_(&i__4, &z__1, &c__[ie + 1 + k * c_dim1], &
+ c__1);
+ i__4 = *m - ie;
+ z__1.r = scaloc, z__1.i = 0.;
+ zscal_(&i__4, &z__1, &f[ie + 1 + k * f_dim1], &
+ c__1);
+/* L100: */
+ }
+ i__3 = *n;
+ for (k = je + 1; k <= i__3; ++k) {
+ z__1.r = scaloc, z__1.i = 0.;
+ zscal_(m, &z__1, &c__[k * c_dim1 + 1], &c__1);
+ z__1.r = scaloc, z__1.i = 0.;
+ zscal_(m, &z__1, &f[k * f_dim1 + 1], &c__1);
+/* L110: */
+ }
+ *scale *= scaloc;
+ }
+
+/* Substitute R(I,J) and L(I,J) into remaining equation. */
+
+ if (i__ > 1) {
+ i__3 = is - 1;
+ zgemm_("N", "N", &i__3, &nb, &mb, &c_b44, &a[is *
+ a_dim1 + 1], lda, &c__[is + js * c_dim1], ldc,
+ &c_b45, &c__[js * c_dim1 + 1], ldc);
+ i__3 = is - 1;
+ zgemm_("N", "N", &i__3, &nb, &mb, &c_b44, &d__[is *
+ d_dim1 + 1], ldd, &c__[is + js * c_dim1], ldc,
+ &c_b45, &f[js * f_dim1 + 1], ldf);
+ }
+ if (j < q) {
+ i__3 = *n - je;
+ zgemm_("N", "N", &mb, &i__3, &nb, &c_b45, &f[is + js *
+ f_dim1], ldf, &b[js + (je + 1) * b_dim1],
+ ldb, &c_b45, &c__[is + (je + 1) * c_dim1],
+ ldc);
+ i__3 = *n - je;
+ zgemm_("N", "N", &mb, &i__3, &nb, &c_b45, &f[is + js *
+ f_dim1], ldf, &e[js + (je + 1) * e_dim1],
+ lde, &c_b45, &f[is + (je + 1) * f_dim1], ldf);
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ if (dscale != 0.) {
+ if (*ijob == 1 || *ijob == 3) {
+ *dif = sqrt((doublereal) ((*m << 1) * *n)) / (dscale *
+ sqrt(dsum));
+ } else {
+ *dif = sqrt((doublereal) pq) / (dscale * sqrt(dsum));
+ }
+ }
+ if (isolve == 2 && iround == 1) {
+ if (notran) {
+ ifunc = *ijob;
+ }
+ scale2 = *scale;
+ zlacpy_("F", m, n, &c__[c_offset], ldc, &work[1], m);
+ zlacpy_("F", m, n, &f[f_offset], ldf, &work[*m * *n + 1], m);
+ zlaset_("F", m, n, &c_b1, &c_b1, &c__[c_offset], ldc);
+ zlaset_("F", m, n, &c_b1, &c_b1, &f[f_offset], ldf)
+ ;
+ } else if (isolve == 2 && iround == 2) {
+ zlacpy_("F", m, n, &work[1], m, &c__[c_offset], ldc);
+ zlacpy_("F", m, n, &work[*m * *n + 1], m, &f[f_offset], ldf);
+ *scale = scale2;
+ }
+/* L150: */
+ }
+ } else {
+
+/* Solve transposed (I, J)-subsystem */
+/* A(I, I)' * R(I, J) + D(I, I)' * L(I, J) = C(I, J) */
+/* R(I, J) * B(J, J) + L(I, J) * E(J, J) = -F(I, J) */
+/* for I = 1,2,..., P; J = Q, Q-1,..., 1 */
+
+ *scale = 1.;
+ i__1 = p;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ is = iwork[i__];
+ ie = iwork[i__ + 1] - 1;
+ mb = ie - is + 1;
+ i__2 = p + 2;
+ for (j = q; j >= i__2; --j) {
+ js = iwork[j];
+ je = iwork[j + 1] - 1;
+ nb = je - js + 1;
+ ztgsy2_(trans, &ifunc, &mb, &nb, &a[is + is * a_dim1], lda, &
+ b[js + js * b_dim1], ldb, &c__[is + js * c_dim1], ldc,
+ &d__[is + is * d_dim1], ldd, &e[js + js * e_dim1],
+ lde, &f[is + js * f_dim1], ldf, &scaloc, &dsum, &
+ dscale, &linfo);
+ if (linfo > 0) {
+ *info = linfo;
+ }
+ if (scaloc != 1.) {
+ i__3 = js - 1;
+ for (k = 1; k <= i__3; ++k) {
+ z__1.r = scaloc, z__1.i = 0.;
+ zscal_(m, &z__1, &c__[k * c_dim1 + 1], &c__1);
+ z__1.r = scaloc, z__1.i = 0.;
+ zscal_(m, &z__1, &f[k * f_dim1 + 1], &c__1);
+/* L160: */
+ }
+ i__3 = je;
+ for (k = js; k <= i__3; ++k) {
+ i__4 = is - 1;
+ z__1.r = scaloc, z__1.i = 0.;
+ zscal_(&i__4, &z__1, &c__[k * c_dim1 + 1], &c__1);
+ i__4 = is - 1;
+ z__1.r = scaloc, z__1.i = 0.;
+ zscal_(&i__4, &z__1, &f[k * f_dim1 + 1], &c__1);
+/* L170: */
+ }
+ i__3 = je;
+ for (k = js; k <= i__3; ++k) {
+ i__4 = *m - ie;
+ z__1.r = scaloc, z__1.i = 0.;
+ zscal_(&i__4, &z__1, &c__[ie + 1 + k * c_dim1], &c__1)
+ ;
+ i__4 = *m - ie;
+ z__1.r = scaloc, z__1.i = 0.;
+ zscal_(&i__4, &z__1, &f[ie + 1 + k * f_dim1], &c__1);
+/* L180: */
+ }
+ i__3 = *n;
+ for (k = je + 1; k <= i__3; ++k) {
+ z__1.r = scaloc, z__1.i = 0.;
+ zscal_(m, &z__1, &c__[k * c_dim1 + 1], &c__1);
+ z__1.r = scaloc, z__1.i = 0.;
+ zscal_(m, &z__1, &f[k * f_dim1 + 1], &c__1);
+/* L190: */
+ }
+ *scale *= scaloc;
+ }
+
+/* Substitute R(I,J) and L(I,J) into remaining equation. */
+
+ if (j > p + 2) {
+ i__3 = js - 1;
+ zgemm_("N", "C", &mb, &i__3, &nb, &c_b45, &c__[is + js *
+ c_dim1], ldc, &b[js * b_dim1 + 1], ldb, &c_b45, &
+ f[is + f_dim1], ldf);
+ i__3 = js - 1;
+ zgemm_("N", "C", &mb, &i__3, &nb, &c_b45, &f[is + js *
+ f_dim1], ldf, &e[js * e_dim1 + 1], lde, &c_b45, &
+ f[is + f_dim1], ldf);
+ }
+ if (i__ < p) {
+ i__3 = *m - ie;
+ zgemm_("C", "N", &i__3, &nb, &mb, &c_b44, &a[is + (ie + 1)
+ * a_dim1], lda, &c__[is + js * c_dim1], ldc, &
+ c_b45, &c__[ie + 1 + js * c_dim1], ldc);
+ i__3 = *m - ie;
+ zgemm_("C", "N", &i__3, &nb, &mb, &c_b44, &d__[is + (ie +
+ 1) * d_dim1], ldd, &f[is + js * f_dim1], ldf, &
+ c_b45, &c__[ie + 1 + js * c_dim1], ldc);
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+ }
+
+ work[1].r = (doublereal) lwmin, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZTGSYL */
+
+} /* ztgsyl_ */
diff --git a/contrib/libs/clapack/ztpcon.c b/contrib/libs/clapack/ztpcon.c
new file mode 100644
index 0000000000..746b7311eb
--- /dev/null
+++ b/contrib/libs/clapack/ztpcon.c
@@ -0,0 +1,242 @@
+/* ztpcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ztpcon_(char *norm, char *uplo, char *diag, integer *n,
+ doublecomplex *ap, doublereal *rcond, doublecomplex *work, doublereal
+ *rwork, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer ix, kase, kase1;
+ doublereal scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ doublereal anorm;
+ logical upper;
+ doublereal xnorm;
+ extern /* Subroutine */ int zlacn2_(integer *, doublecomplex *,
+ doublecomplex *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal ainvnm;
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ logical onenrm;
+ extern /* Subroutine */ int zdrscl_(integer *, doublereal *,
+ doublecomplex *, integer *);
+ char normin[1];
+ extern doublereal zlantp_(char *, char *, char *, integer *,
+ doublecomplex *, doublereal *);
+ doublereal smlnum;
+ logical nounit;
+ extern /* Subroutine */ int zlatps_(char *, char *, char *, char *,
+ integer *, doublecomplex *, doublecomplex *, doublereal *,
+ doublereal *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTPCON estimates the reciprocal of the condition number of a packed */
+/* triangular matrix A, in either the 1-norm or the infinity-norm. */
+
+/* The norm of A is computed and an estimate is obtained for */
+/* norm(inv(A)), then the reciprocal of the condition number is */
+/* computed as */
+/* RCOND = 1 / ( norm(A) * norm(inv(A)) ). */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies whether the 1-norm condition number or the */
+/* infinity-norm condition number is required: */
+/* = '1' or 'O': 1-norm; */
+/* = 'I': Infinity-norm. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* The upper or lower triangular matrix A, packed columnwise in */
+/* a linear array. The j-th column of A is stored in the array */
+/* AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* If DIAG = 'U', the diagonal elements of A are not referenced */
+/* and are assumed to be 1. */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(norm(A) * norm(inv(A))). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --rwork;
+ --work;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O");
+ nounit = lsame_(diag, "N");
+
+ if (! onenrm && ! lsame_(norm, "I")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTPCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ }
+
+ *rcond = 0.;
+ smlnum = dlamch_("Safe minimum") * (doublereal) max(1,*n);
+
+/* Compute the norm of the triangular matrix A. */
+
+ anorm = zlantp_(norm, uplo, diag, n, &ap[1], &rwork[1]);
+
+/* Continue only if ANORM > 0. */
+
+ if (anorm > 0.) {
+
+/* Estimate the norm of the inverse of A. */
+
+ ainvnm = 0.;
+ *(unsigned char *)normin = 'N';
+ if (onenrm) {
+ kase1 = 1;
+ } else {
+ kase1 = 2;
+ }
+ kase = 0;
+L10:
+ zlacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (kase == kase1) {
+
+/* Multiply by inv(A). */
+
+ zlatps_(uplo, "No transpose", diag, normin, n, &ap[1], &work[
+ 1], &scale, &rwork[1], info);
+ } else {
+
+/* Multiply by inv(A'). */
+
+ zlatps_(uplo, "Conjugate transpose", diag, normin, n, &ap[1],
+ &work[1], &scale, &rwork[1], info);
+ }
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by 1/SCALE if doing so will not cause overflow. */
+
+ if (scale != 1.) {
+ ix = izamax_(n, &work[1], &c__1);
+ i__1 = ix;
+ xnorm = (d__1 = work[i__1].r, abs(d__1)) + (d__2 = d_imag(&
+ work[ix]), abs(d__2));
+ if (scale < xnorm * smlnum || scale == 0.) {
+ goto L20;
+ }
+ zdrscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / anorm / ainvnm;
+ }
+ }
+
+L20:
+ return 0;
+
+/* End of ZTPCON */
+
+} /* ztpcon_ */
diff --git a/contrib/libs/clapack/ztprfs.c b/contrib/libs/clapack/ztprfs.c
new file mode 100644
index 0000000000..43df67b965
--- /dev/null
+++ b/contrib/libs/clapack/ztprfs.c
@@ -0,0 +1,561 @@
+/* ztprfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ztprfs_(char *uplo, char *trans, char *diag, integer *n,
+ integer *nrhs, doublecomplex *ap, doublecomplex *b, integer *ldb,
+ doublecomplex *x, integer *ldx, doublereal *ferr, doublereal *berr,
+ doublecomplex *work, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal s;
+ integer kc;
+ doublereal xk;
+ integer nz;
+ doublereal eps;
+ integer kase;
+ doublereal safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical upper;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zaxpy_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), ztpmv_(
+ char *, char *, char *, integer *, doublecomplex *, doublecomplex
+ *, integer *), ztpsv_(char *, char *,
+ char *, integer *, doublecomplex *, doublecomplex *, integer *), zlacn2_(integer *, doublecomplex *,
+ doublecomplex *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+ char transn[1], transt[1];
+ logical nounit;
+ doublereal lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTPRFS provides error bounds and backward error estimates for the */
+/* solution to a system of linear equations with a triangular packed */
+/* coefficient matrix. */
+
+/* The solution matrix X must be computed by ZTPTRS or some other */
+/* means before entering this routine. ZTPRFS does not do iterative */
+/* refinement because doing so cannot improve the backward error. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* AP (input) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* The upper or lower triangular matrix A, packed columnwise in */
+/* a linear array. The j-th column of A is stored in the array */
+/* AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+/* If DIAG = 'U', the diagonal elements of A are not referenced */
+/* and are assumed to be 1. */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* The solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ } else if (*ldx < max(1,*n)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTPRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+ if (notran) {
+ *(unsigned char *)transn = 'N';
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transn = 'C';
+ *(unsigned char *)transt = 'N';
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute residual R = B - op(A) * X, */
+/* where op(A) = A, A**T, or A**H, depending on TRANS. */
+
+ zcopy_(n, &x[j * x_dim1 + 1], &c__1, &work[1], &c__1);
+ ztpmv_(uplo, trans, diag, n, &ap[1], &work[1], &c__1);
+ z__1.r = -1., z__1.i = -0.;
+ zaxpy_(n, &z__1, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[i__] = (d__1 = b[i__3].r, abs(d__1)) + (d__2 = d_imag(&b[
+ i__ + j * b_dim1]), abs(d__2));
+/* L20: */
+ }
+
+ if (notran) {
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ kc = 1;
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&
+ x[k + j * x_dim1]), abs(d__2));
+ i__3 = k;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = kc + i__ - 1;
+ rwork[i__] += ((d__1 = ap[i__4].r, abs(d__1)) + (
+ d__2 = d_imag(&ap[kc + i__ - 1]), abs(
+ d__2))) * xk;
+/* L30: */
+ }
+ kc += k;
+/* L40: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&
+ x[k + j * x_dim1]), abs(d__2));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = kc + i__ - 1;
+ rwork[i__] += ((d__1 = ap[i__4].r, abs(d__1)) + (
+ d__2 = d_imag(&ap[kc + i__ - 1]), abs(
+ d__2))) * xk;
+/* L50: */
+ }
+ rwork[k] += xk;
+ kc += k;
+/* L60: */
+ }
+ }
+ } else {
+ kc = 1;
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&
+ x[k + j * x_dim1]), abs(d__2));
+ i__3 = *n;
+ for (i__ = k; i__ <= i__3; ++i__) {
+ i__4 = kc + i__ - k;
+ rwork[i__] += ((d__1 = ap[i__4].r, abs(d__1)) + (
+ d__2 = d_imag(&ap[kc + i__ - k]), abs(
+ d__2))) * xk;
+/* L70: */
+ }
+ kc = kc + *n - k + 1;
+/* L80: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&
+ x[k + j * x_dim1]), abs(d__2));
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ i__4 = kc + i__ - k;
+ rwork[i__] += ((d__1 = ap[i__4].r, abs(d__1)) + (
+ d__2 = d_imag(&ap[kc + i__ - k]), abs(
+ d__2))) * xk;
+/* L90: */
+ }
+ rwork[k] += xk;
+ kc = kc + *n - k + 1;
+/* L100: */
+ }
+ }
+ }
+ } else {
+
+/* Compute abs(A**H)*abs(X) + abs(B). */
+
+ if (upper) {
+ kc = 1;
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ i__3 = k;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = kc + i__ - 1;
+ i__5 = i__ + j * x_dim1;
+ s += ((d__1 = ap[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&ap[kc + i__ - 1]), abs(d__2))) * (
+ (d__3 = x[i__5].r, abs(d__3)) + (d__4 =
+ d_imag(&x[i__ + j * x_dim1]), abs(d__4)));
+/* L110: */
+ }
+ rwork[k] += s;
+ kc += k;
+/* L120: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ s = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[
+ k + j * x_dim1]), abs(d__2));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = kc + i__ - 1;
+ i__5 = i__ + j * x_dim1;
+ s += ((d__1 = ap[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&ap[kc + i__ - 1]), abs(d__2))) * (
+ (d__3 = x[i__5].r, abs(d__3)) + (d__4 =
+ d_imag(&x[i__ + j * x_dim1]), abs(d__4)));
+/* L130: */
+ }
+ rwork[k] += s;
+ kc += k;
+/* L140: */
+ }
+ }
+ } else {
+ kc = 1;
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ i__3 = *n;
+ for (i__ = k; i__ <= i__3; ++i__) {
+ i__4 = kc + i__ - k;
+ i__5 = i__ + j * x_dim1;
+ s += ((d__1 = ap[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&ap[kc + i__ - k]), abs(d__2))) * (
+ (d__3 = x[i__5].r, abs(d__3)) + (d__4 =
+ d_imag(&x[i__ + j * x_dim1]), abs(d__4)));
+/* L150: */
+ }
+ rwork[k] += s;
+ kc = kc + *n - k + 1;
+/* L160: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ s = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[
+ k + j * x_dim1]), abs(d__2));
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ i__4 = kc + i__ - k;
+ i__5 = i__ + j * x_dim1;
+ s += ((d__1 = ap[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&ap[kc + i__ - k]), abs(d__2))) * (
+ (d__3 = x[i__5].r, abs(d__3)) + (d__4 =
+ d_imag(&x[i__ + j * x_dim1]), abs(d__4)));
+/* L170: */
+ }
+ rwork[k] += s;
+ kc = kc + *n - k + 1;
+/* L180: */
+ }
+ }
+ }
+ }
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__3 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2))) / rwork[i__];
+ s = max(d__3,d__4);
+ } else {
+/* Computing MAX */
+ i__3 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = max(d__3,d__4);
+ }
+/* L190: */
+ }
+ berr[j] = s;
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(op(A)))* */
+/* ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(op(A)) is the inverse of op(A) */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(op(A))*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use ZLACN2 to estimate the infinity-norm of the matrix */
+/* inv(op(A)) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__3 = i__;
+ rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ ;
+ } else {
+ i__3 = i__;
+ rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ + safe1;
+ }
+/* L200: */
+ }
+
+ kase = 0;
+L210:
+ zlacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(op(A)**H). */
+
+ ztpsv_(uplo, transt, diag, n, &ap[1], &work[1], &c__1);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ z__1.r = rwork[i__4] * work[i__5].r, z__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+/* L220: */
+ }
+ } else {
+
+/* Multiply by inv(op(A))*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ z__1.r = rwork[i__4] * work[i__5].r, z__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+/* L230: */
+ }
+ ztpsv_(uplo, transn, diag, n, &ap[1], &work[1], &c__1);
+ }
+ goto L210;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * x_dim1;
+ d__3 = lstres, d__4 = (d__1 = x[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&x[i__ + j * x_dim1]), abs(d__2));
+ lstres = max(d__3,d__4);
+/* L240: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L250: */
+ }
+
+ return 0;
+
+/* End of ZTPRFS */
+
+} /* ztprfs_ */
diff --git a/contrib/libs/clapack/ztptri.c b/contrib/libs/clapack/ztptri.c
new file mode 100644
index 0000000000..2081845f71
--- /dev/null
+++ b/contrib/libs/clapack/ztptri.c
@@ -0,0 +1,235 @@
+/* ztptri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int ztptri_(char *uplo, char *diag, integer *n,
+ doublecomplex *ap, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer j, jc, jj;
+ doublecomplex ajj;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int ztpmv_(char *, char *, char *, integer *,
+ doublecomplex *, doublecomplex *, integer *), xerbla_(char *, integer *);
+ integer jclast;
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTPTRI computes the inverse of a complex upper or lower triangular */
+/* matrix A stored in packed format. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* On entry, the upper or lower triangular matrix A, stored */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*((2*n-j)/2) = A(i,j) for j<=i<=n. */
+/* See below for further details. */
+/* On exit, the (triangular) inverse of the original matrix, in */
+/* the same packed storage format. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, A(i,i) is exactly zero. The triangular */
+/* matrix is singular and its inverse can not be computed. */
+
+/* Further Details */
+/* =============== */
+
+/* A triangular matrix A can be transferred to packed storage using one */
+/* of the following program segments: */
+
+/* UPLO = 'U': UPLO = 'L': */
+
+/* JC = 1 JC = 1 */
+/* DO 2 J = 1, N DO 2 J = 1, N */
+/* DO 1 I = 1, J DO 1 I = J, N */
+/* AP(JC+I-1) = A(I,J) AP(JC+I-J) = A(I,J) */
+/* 1 CONTINUE 1 CONTINUE */
+/* JC = JC + J JC = JC + N - J + 1 */
+/* 2 CONTINUE 2 CONTINUE */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ nounit = lsame_(diag, "N");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTPTRI", &i__1);
+ return 0;
+ }
+
+/* Check for singularity if non-unit. */
+
+ if (nounit) {
+ if (upper) {
+ jj = 0;
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ jj += *info;
+ i__2 = jj;
+ if (ap[i__2].r == 0. && ap[i__2].i == 0.) {
+ return 0;
+ }
+/* L10: */
+ }
+ } else {
+ jj = 1;
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ i__2 = jj;
+ if (ap[i__2].r == 0. && ap[i__2].i == 0.) {
+ return 0;
+ }
+ jj = jj + *n - *info + 1;
+/* L20: */
+ }
+ }
+ *info = 0;
+ }
+
+ if (upper) {
+
+/* Compute inverse of upper triangular matrix. */
+
+ jc = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (nounit) {
+ i__2 = jc + j - 1;
+ z_div(&z__1, &c_b1, &ap[jc + j - 1]);
+ ap[i__2].r = z__1.r, ap[i__2].i = z__1.i;
+ i__2 = jc + j - 1;
+ z__1.r = -ap[i__2].r, z__1.i = -ap[i__2].i;
+ ajj.r = z__1.r, ajj.i = z__1.i;
+ } else {
+ z__1.r = -1., z__1.i = -0.;
+ ajj.r = z__1.r, ajj.i = z__1.i;
+ }
+
+/* Compute elements 1:j-1 of j-th column. */
+
+ i__2 = j - 1;
+ ztpmv_("Upper", "No transpose", diag, &i__2, &ap[1], &ap[jc], &
+ c__1);
+ i__2 = j - 1;
+ zscal_(&i__2, &ajj, &ap[jc], &c__1);
+ jc += j;
+/* L30: */
+ }
+
+ } else {
+
+/* Compute inverse of lower triangular matrix. */
+
+ jc = *n * (*n + 1) / 2;
+ for (j = *n; j >= 1; --j) {
+ if (nounit) {
+ i__1 = jc;
+ z_div(&z__1, &c_b1, &ap[jc]);
+ ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
+ i__1 = jc;
+ z__1.r = -ap[i__1].r, z__1.i = -ap[i__1].i;
+ ajj.r = z__1.r, ajj.i = z__1.i;
+ } else {
+ z__1.r = -1., z__1.i = -0.;
+ ajj.r = z__1.r, ajj.i = z__1.i;
+ }
+ if (j < *n) {
+
+/* Compute elements j+1:n of j-th column. */
+
+ i__1 = *n - j;
+ ztpmv_("Lower", "No transpose", diag, &i__1, &ap[jclast], &ap[
+ jc + 1], &c__1);
+ i__1 = *n - j;
+ zscal_(&i__1, &ajj, &ap[jc + 1], &c__1);
+ }
+ jclast = jc;
+ jc = jc - *n + j - 2;
+/* L40: */
+ }
+ }
+
+ return 0;
+
+/* End of ZTPTRI */
+
+} /* ztptri_ */
diff --git a/contrib/libs/clapack/ztptrs.c b/contrib/libs/clapack/ztptrs.c
new file mode 100644
index 0000000000..a5affa808a
--- /dev/null
+++ b/contrib/libs/clapack/ztptrs.c
@@ -0,0 +1,194 @@
+/* ztptrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ztptrs_(char *uplo, char *trans, char *diag, integer *n,
+ integer *nrhs, doublecomplex *ap, doublecomplex *b, integer *ldb,
+ integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2;
+
+ /* Local variables */
+ integer j, jc;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int ztpsv_(char *, char *, char *, integer *,
+ doublecomplex *, doublecomplex *, integer *), xerbla_(char *, integer *);
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTPTRS solves a triangular system of the form */
+
+/* A * X = B, A**T * X = B, or A**H * X = B, */
+
+/* where A is a triangular matrix of order N stored in packed format, */
+/* and B is an N-by-NRHS matrix. A check is made to verify that A is */
+/* nonsingular. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* AP (input) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* The upper or lower triangular matrix A, packed columnwise in */
+/* a linear array. The j-th column of A is stored in the array */
+/* AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, if INFO = 0, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element of A is zero, */
+/* indicating that the matrix is singular and the */
+/* solutions X have not been computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ nounit = lsame_(diag, "N");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTPTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check for singularity. */
+
+ if (nounit) {
+ if (upper) {
+ jc = 1;
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ i__2 = jc + *info - 1;
+ if (ap[i__2].r == 0. && ap[i__2].i == 0.) {
+ return 0;
+ }
+ jc += *info;
+/* L10: */
+ }
+ } else {
+ jc = 1;
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ i__2 = jc;
+ if (ap[i__2].r == 0. && ap[i__2].i == 0.) {
+ return 0;
+ }
+ jc = jc + *n - *info + 1;
+/* L20: */
+ }
+ }
+ }
+ *info = 0;
+
+/* Solve A * x = b, A**T * x = b, or A**H * x = b. */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ztpsv_(uplo, trans, diag, n, &ap[1], &b[j * b_dim1 + 1], &c__1);
+/* L30: */
+ }
+
+ return 0;
+
+/* End of ZTPTRS */
+
+} /* ztptrs_ */
diff --git a/contrib/libs/clapack/ztpttf.c b/contrib/libs/clapack/ztpttf.c
new file mode 100644
index 0000000000..29d23716a3
--- /dev/null
+++ b/contrib/libs/clapack/ztpttf.c
@@ -0,0 +1,573 @@
+/* ztpttf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ztpttf_(char *transr, char *uplo, integer *n,
+ doublecomplex *ap, doublecomplex *arf, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k, n1, n2, ij, jp, js, nt, lda, ijp;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nisodd;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. */
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTPTTF copies a triangular matrix A from standard packed format (TP) */
+/* to rectangular full packed format (TF). */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': ARF in Normal format is wanted; */
+/* = 'C': ARF in Conjugate-transpose format is wanted. */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) COMPLEX*16 array, dimension ( N*(N+1)/2 ), */
+/* On entry, the upper or lower triangular matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* ARF (output) COMPLEX*16 array, dimension ( N*(N+1)/2 ), */
+/* On exit, the upper or lower triangular matrix A stored in */
+/* RFP format. For a further discussion see Notes below. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Notes: */
+/* ====== */
+
+/* We first consider Standard Packed Format when N is even. */
+/* We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* conjugate-transpose of the first three columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* conjugate-transpose of the last three columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N even and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- -- */
+/* 03 04 05 33 43 53 */
+/* -- -- */
+/* 13 14 15 00 44 54 */
+/* -- */
+/* 23 24 25 10 11 55 */
+
+/* 33 34 35 20 21 22 */
+/* -- */
+/* 00 44 45 30 31 32 */
+/* -- -- */
+/* 01 11 55 40 41 42 */
+/* -- -- -- */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We next consider Standard Packed Format when N is odd. */
+/* We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = 'N'. RFP holds AP as follows: */
+/* For UPLO = 'U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* conjugate-transpose of the first two columns of AP upper. */
+/* For UPLO = 'L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* conjugate-transpose of the last two columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N odd and TRANSR = 'N'. */
+
+/* RFP A RFP A */
+
+/* -- -- */
+/* 02 03 04 00 33 43 */
+/* -- */
+/* 12 13 14 10 11 44 */
+
+/* 22 23 24 20 21 22 */
+/* -- */
+/* 00 33 34 30 31 32 */
+/* -- -- */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = 'C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- */
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "C")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTPTTF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (normaltransr) {
+ arf[0].r = ap[0].r, arf[0].i = ap[0].i;
+ } else {
+ d_cnjg(&z__1, ap);
+ arf[0].r = z__1.r, arf[0].i = z__1.i;
+ }
+ return 0;
+ }
+
+/* Size of array ARF(0:NT-1) */
+
+ nt = *n * (*n + 1) / 2;
+
+/* Set N1 and N2 depending on LOWER */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+/* If N is odd, set NISODD = .TRUE. */
+/* If N is even, set K = N/2 and NISODD = .FALSE. */
+
+/* set lda of ARF^C; ARF^C is (0:(N+1)/2-1,0:N-noe) */
+/* where noe = 0 if n is even, noe = 1 if n is odd */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ lda = *n + 1;
+ } else {
+ nisodd = TRUE_;
+ lda = *n;
+ }
+
+/* ARF^C has lda rows and n+1-noe cols */
+
+ if (! normaltransr) {
+ lda = (*n + 1) / 2;
+ }
+
+/* start execution: there are eight cases */
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL and N is odd ( a(0:n-1,0:n1-1) ) */
+/* T1 -> a(0,0), T2 -> a(0,1), S -> a(n1,0) */
+/* T1 -> a(0), T2 -> a(n), S -> a(n1); lda = n */
+
+ ijp = 0;
+ jp = 0;
+ i__1 = n2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ij = i__ + jp;
+ i__3 = ij;
+ i__4 = ijp;
+ arf[i__3].r = ap[i__4].r, arf[i__3].i = ap[i__4].i;
+ ++ijp;
+ }
+ jp += lda;
+ }
+ i__1 = n2 - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = n2;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ ij = i__ + j * lda;
+ i__3 = ij;
+ d_cnjg(&z__1, &ap[ijp]);
+ arf[i__3].r = z__1.r, arf[i__3].i = z__1.i;
+ ++ijp;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL and N is odd ( a(0:n-1,0:n2-1) */
+/* T1 -> a(n1+1,0), T2 -> a(n1,0), S -> a(0,0) */
+/* T1 -> a(n2), T2 -> a(n1), S -> a(0) */
+
+ ijp = 0;
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ ij = n2 + j;
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ d_cnjg(&z__1, &ap[ijp]);
+ arf[i__3].r = z__1.r, arf[i__3].i = z__1.i;
+ ++ijp;
+ ij += lda;
+ }
+ }
+ js = 0;
+ i__1 = *n - 1;
+ for (j = n1; j <= i__1; ++j) {
+ ij = js;
+ i__2 = js + j;
+ for (ij = js; ij <= i__2; ++ij) {
+ i__3 = ij;
+ i__4 = ijp;
+ arf[i__3].r = ap[i__4].r, arf[i__3].i = ap[i__4].i;
+ ++ijp;
+ }
+ js += lda;
+ }
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is odd */
+/* T1 -> A(0,0) , T2 -> A(1,0) , S -> A(0,n1) */
+/* T1 -> a(0+0) , T2 -> a(1+0) , S -> a(0+n1*n1); lda=n1 */
+
+ ijp = 0;
+ i__1 = n2;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = *n * lda - 1;
+ i__3 = lda;
+ for (ij = i__ * (lda + 1); i__3 < 0 ? ij >= i__2 : ij <=
+ i__2; ij += i__3) {
+ i__4 = ij;
+ d_cnjg(&z__1, &ap[ijp]);
+ arf[i__4].r = z__1.r, arf[i__4].i = z__1.i;
+ ++ijp;
+ }
+ }
+ js = 1;
+ i__1 = n2 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + n2 - j - 1;
+ for (ij = js; ij <= i__3; ++ij) {
+ i__2 = ij;
+ i__4 = ijp;
+ arf[i__2].r = ap[i__4].r, arf[i__2].i = ap[i__4].i;
+ ++ijp;
+ }
+ js = js + lda + 1;
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is odd */
+/* T1 -> A(0,n1+1), T2 -> A(0,n1), S -> A(0,0) */
+/* T1 -> a(n2*n2), T2 -> a(n1*n2), S -> a(0); lda = n2 */
+
+ ijp = 0;
+ js = n2 * lda;
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + j;
+ for (ij = js; ij <= i__3; ++ij) {
+ i__2 = ij;
+ i__4 = ijp;
+ arf[i__2].r = ap[i__4].r, arf[i__2].i = ap[i__4].i;
+ ++ijp;
+ }
+ js += lda;
+ }
+ i__1 = n1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__3 = i__ + (n1 + i__) * lda;
+ i__2 = lda;
+ for (ij = i__; i__2 < 0 ? ij >= i__3 : ij <= i__3; ij +=
+ i__2) {
+ i__4 = ij;
+ d_cnjg(&z__1, &ap[ijp]);
+ arf[i__4].r = z__1.r, arf[i__4].i = z__1.i;
+ ++ijp;
+ }
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(1,0), T2 -> a(0,0), S -> a(k+1,0) */
+/* T1 -> a(1), T2 -> a(0), S -> a(k+1) */
+
+ ijp = 0;
+ jp = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ij = i__ + 1 + jp;
+ i__3 = ij;
+ i__4 = ijp;
+ arf[i__3].r = ap[i__4].r, arf[i__3].i = ap[i__4].i;
+ ++ijp;
+ }
+ jp += lda;
+ }
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = k - 1;
+ for (j = i__; j <= i__2; ++j) {
+ ij = i__ + j * lda;
+ i__3 = ij;
+ d_cnjg(&z__1, &ap[ijp]);
+ arf[i__3].r = z__1.r, arf[i__3].i = z__1.i;
+ ++ijp;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(k+1,0) , T2 -> a(k,0), S -> a(0,0) */
+/* T1 -> a(k+1), T2 -> a(k), S -> a(0) */
+
+ ijp = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ ij = k + 1 + j;
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ d_cnjg(&z__1, &ap[ijp]);
+ arf[i__3].r = z__1.r, arf[i__3].i = z__1.i;
+ ++ijp;
+ ij += lda;
+ }
+ }
+ js = 0;
+ i__1 = *n - 1;
+ for (j = k; j <= i__1; ++j) {
+ ij = js;
+ i__2 = js + j;
+ for (ij = js; ij <= i__2; ++ij) {
+ i__3 = ij;
+ i__4 = ijp;
+ arf[i__3].r = ap[i__4].r, arf[i__3].i = ap[i__4].i;
+ ++ijp;
+ }
+ js += lda;
+ }
+
+ }
+
+ } else {
+
+/* N is even and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,1), T2 -> B(0,0), S -> B(0,k+1) */
+/* T1 -> a(0+k), T2 -> a(0+0), S -> a(0+k*(k+1)); lda=k */
+
+ ijp = 0;
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = (*n + 1) * lda - 1;
+ i__3 = lda;
+ for (ij = i__ + (i__ + 1) * lda; i__3 < 0 ? ij >= i__2 :
+ ij <= i__2; ij += i__3) {
+ i__4 = ij;
+ d_cnjg(&z__1, &ap[ijp]);
+ arf[i__4].r = z__1.r, arf[i__4].i = z__1.i;
+ ++ijp;
+ }
+ }
+ js = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + k - j - 1;
+ for (ij = js; ij <= i__3; ++ij) {
+ i__2 = ij;
+ i__4 = ijp;
+ arf[i__2].r = ap[i__4].r, arf[i__2].i = ap[i__4].i;
+ ++ijp;
+ }
+ js = js + lda + 1;
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is even (see paper) */
+/* T1 -> B(0,k+1), T2 -> B(0,k), S -> B(0,0) */
+/* T1 -> a(0+k*(k+1)), T2 -> a(0+k*k), S -> a(0+0)); lda=k */
+
+ ijp = 0;
+ js = (k + 1) * lda;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__3 = js + j;
+ for (ij = js; ij <= i__3; ++ij) {
+ i__2 = ij;
+ i__4 = ijp;
+ arf[i__2].r = ap[i__4].r, arf[i__2].i = ap[i__4].i;
+ ++ijp;
+ }
+ js += lda;
+ }
+ i__1 = k - 1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__3 = i__ + (k + i__) * lda;
+ i__2 = lda;
+ for (ij = i__; i__2 < 0 ? ij >= i__3 : ij <= i__3; ij +=
+ i__2) {
+ i__4 = ij;
+ d_cnjg(&z__1, &ap[ijp]);
+ arf[i__4].r = z__1.r, arf[i__4].i = z__1.i;
+ ++ijp;
+ }
+ }
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of ZTPTTF */
+
+} /* ztpttf_ */
diff --git a/contrib/libs/clapack/ztpttr.c b/contrib/libs/clapack/ztpttr.c
new file mode 100644
index 0000000000..9d39cca254
--- /dev/null
+++ b/contrib/libs/clapack/ztpttr.c
@@ -0,0 +1,148 @@
+/* ztpttr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ztpttr_(char *uplo, integer *n, doublecomplex *ap,
+ doublecomplex *a, integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, k;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Julien Langou of the Univ. of Colorado Denver -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTPTTR copies a triangular matrix A from standard packed format (TP) */
+/* to standard full format (TR). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular. */
+/* = 'L': A is lower triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* AP (input) COMPLEX*16 array, dimension ( N*(N+1)/2 ), */
+/* On entry, the upper or lower triangular matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* A (output) COMPLEX*16 array, dimension ( LDA, N ) */
+/* On exit, the triangular matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ --ap;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ lower = lsame_(uplo, "L");
+ if (! lower && ! lsame_(uplo, "U")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTPTTR", &i__1);
+ return 0;
+ }
+
+ if (lower) {
+ k = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ++k;
+ i__3 = i__ + j * a_dim1;
+ i__4 = k;
+ a[i__3].r = ap[i__4].r, a[i__3].i = ap[i__4].i;
+ }
+ }
+ } else {
+ k = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ ++k;
+ i__3 = i__ + j * a_dim1;
+ i__4 = k;
+ a[i__3].r = ap[i__4].r, a[i__3].i = ap[i__4].i;
+ }
+ }
+ }
+
+
+ return 0;
+
+/* End of ZTPTTR */
+
+} /* ztpttr_ */
diff --git a/contrib/libs/clapack/ztrcon.c b/contrib/libs/clapack/ztrcon.c
new file mode 100644
index 0000000000..01a0724699
--- /dev/null
+++ b/contrib/libs/clapack/ztrcon.c
@@ -0,0 +1,250 @@
+/* ztrcon.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ztrcon_(char *norm, char *uplo, char *diag, integer *n,
+ doublecomplex *a, integer *lda, doublereal *rcond, doublecomplex *
+ work, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer ix, kase, kase1;
+ doublereal scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ doublereal anorm;
+ logical upper;
+ doublereal xnorm;
+ extern /* Subroutine */ int zlacn2_(integer *, doublecomplex *,
+ doublecomplex *, doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal ainvnm;
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ logical onenrm;
+ extern /* Subroutine */ int zdrscl_(integer *, doublereal *,
+ doublecomplex *, integer *);
+ char normin[1];
+ extern doublereal zlantr_(char *, char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublereal *);
+ doublereal smlnum;
+ logical nounit;
+ extern /* Subroutine */ int zlatrs_(char *, char *, char *, char *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublereal *, doublereal *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTRCON estimates the reciprocal of the condition number of a */
+/* triangular matrix A, in either the 1-norm or the infinity-norm. */
+
+/* The norm of A is computed and an estimate is obtained for */
+/* norm(inv(A)), then the reciprocal of the condition number is */
+/* computed as */
+/* RCOND = 1 / ( norm(A) * norm(inv(A)) ). */
+
+/* Arguments */
+/* ========= */
+
+/* NORM (input) CHARACTER*1 */
+/* Specifies whether the 1-norm condition number or the */
+/* infinity-norm condition number is required: */
+/* = '1' or 'O': 1-norm; */
+/* = 'I': Infinity-norm. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The triangular matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of the array A contains the upper */
+/* triangular matrix, and the strictly lower triangular part of */
+/* A is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of the array A contains the lower triangular */
+/* matrix, and the strictly upper triangular part of A is not */
+/* referenced. If DIAG = 'U', the diagonal elements of A are */
+/* also not referenced and are assumed to be 1. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* RCOND (output) DOUBLE PRECISION */
+/* The reciprocal of the condition number of the matrix A, */
+/* computed as RCOND = 1/(norm(A) * norm(inv(A))). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ onenrm = *(unsigned char *)norm == '1' || lsame_(norm, "O");
+ nounit = lsame_(diag, "N");
+
+ if (! onenrm && ! lsame_(norm, "I")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTRCON", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ *rcond = 1.;
+ return 0;
+ }
+
+ *rcond = 0.;
+ smlnum = dlamch_("Safe minimum") * (doublereal) max(1,*n);
+
+/* Compute the norm of the triangular matrix A. */
+
+ anorm = zlantr_(norm, uplo, diag, n, n, &a[a_offset], lda, &rwork[1]);
+
+/* Continue only if ANORM > 0. */
+
+ if (anorm > 0.) {
+
+/* Estimate the norm of the inverse of A. */
+
+ ainvnm = 0.;
+ *(unsigned char *)normin = 'N';
+ if (onenrm) {
+ kase1 = 1;
+ } else {
+ kase1 = 2;
+ }
+ kase = 0;
+L10:
+ zlacn2_(n, &work[*n + 1], &work[1], &ainvnm, &kase, isave);
+ if (kase != 0) {
+ if (kase == kase1) {
+
+/* Multiply by inv(A). */
+
+ zlatrs_(uplo, "No transpose", diag, normin, n, &a[a_offset],
+ lda, &work[1], &scale, &rwork[1], info);
+ } else {
+
+/* Multiply by inv(A'). */
+
+ zlatrs_(uplo, "Conjugate transpose", diag, normin, n, &a[
+ a_offset], lda, &work[1], &scale, &rwork[1], info);
+ }
+ *(unsigned char *)normin = 'Y';
+
+/* Multiply by 1/SCALE if doing so will not cause overflow. */
+
+ if (scale != 1.) {
+ ix = izamax_(n, &work[1], &c__1);
+ i__1 = ix;
+ xnorm = (d__1 = work[i__1].r, abs(d__1)) + (d__2 = d_imag(&
+ work[ix]), abs(d__2));
+ if (scale < xnorm * smlnum || scale == 0.) {
+ goto L20;
+ }
+ zdrscl_(n, &scale, &work[1], &c__1);
+ }
+ goto L10;
+ }
+
+/* Compute the estimate of the reciprocal condition number. */
+
+ if (ainvnm != 0.) {
+ *rcond = 1. / anorm / ainvnm;
+ }
+ }
+
+L20:
+ return 0;
+
+/* End of ZTRCON */
+
+} /* ztrcon_ */
diff --git a/contrib/libs/clapack/ztrevc.c b/contrib/libs/clapack/ztrevc.c
new file mode 100644
index 0000000000..3e4f836ed2
--- /dev/null
+++ b/contrib/libs/clapack/ztrevc.c
@@ -0,0 +1,533 @@
+/* ztrevc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b2 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int ztrevc_(char *side, char *howmny, logical *select,
+ integer *n, doublecomplex *t, integer *ldt, doublecomplex *vl,
+ integer *ldvl, doublecomplex *vr, integer *ldvr, integer *mm, integer
+ *m, doublecomplex *work, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k, ii, ki, is;
+ doublereal ulp;
+ logical allv;
+ doublereal unfl, ovfl, smin;
+ logical over;
+ doublereal scale;
+ extern logical lsame_(char *, char *);
+ doublereal remax;
+ logical leftv, bothv;
+ extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *);
+ logical somev;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), dlabad_(doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_(
+ integer *, doublereal *, doublecomplex *, integer *);
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ logical rightv;
+ extern doublereal dzasum_(integer *, doublecomplex *, integer *);
+ doublereal smlnum;
+ extern /* Subroutine */ int zlatrs_(char *, char *, char *, char *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublereal *, doublereal *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTREVC computes some or all of the right and/or left eigenvectors of */
+/* a complex upper triangular matrix T. */
+/* Matrices of this type are produced by the Schur factorization of */
+/* a complex general matrix: A = Q*T*Q**H, as computed by ZHSEQR. */
+
+/* The right eigenvector x and the left eigenvector y of T corresponding */
+/* to an eigenvalue w are defined by: */
+
+/* T*x = w*x, (y**H)*T = w*(y**H) */
+
+/* where y**H denotes the conjugate transpose of the vector y. */
+/* The eigenvalues are not input to this routine, but are read directly */
+/* from the diagonal of T. */
+
+/* This routine returns the matrices X and/or Y of right and left */
+/* eigenvectors of T, or the products Q*X and/or Q*Y, where Q is an */
+/* input matrix. If Q is the unitary factor that reduces a matrix A to */
+/* Schur form T, then Q*X and Q*Y are the matrices of right and left */
+/* eigenvectors of A. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'R': compute right eigenvectors only; */
+/* = 'L': compute left eigenvectors only; */
+/* = 'B': compute both right and left eigenvectors. */
+
+/* HOWMNY (input) CHARACTER*1 */
+/* = 'A': compute all right and/or left eigenvectors; */
+/* = 'B': compute all right and/or left eigenvectors, */
+/* backtransformed using the matrices supplied in */
+/* VR and/or VL; */
+/* = 'S': compute selected right and/or left eigenvectors, */
+/* as indicated by the logical array SELECT. */
+
+/* SELECT (input) LOGICAL array, dimension (N) */
+/* If HOWMNY = 'S', SELECT specifies the eigenvectors to be */
+/* computed. */
+/* The eigenvector corresponding to the j-th eigenvalue is */
+/* computed if SELECT(j) = .TRUE.. */
+/* Not referenced if HOWMNY = 'A' or 'B'. */
+
+/* N (input) INTEGER */
+/* The order of the matrix T. N >= 0. */
+
+/* T (input/output) COMPLEX*16 array, dimension (LDT,N) */
+/* The upper triangular matrix T. T is modified, but restored */
+/* on exit. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= max(1,N). */
+
+/* VL (input/output) COMPLEX*16 array, dimension (LDVL,MM) */
+/* On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must */
+/* contain an N-by-N matrix Q (usually the unitary matrix Q of */
+/* Schur vectors returned by ZHSEQR). */
+/* On exit, if SIDE = 'L' or 'B', VL contains: */
+/* if HOWMNY = 'A', the matrix Y of left eigenvectors of T; */
+/* if HOWMNY = 'B', the matrix Q*Y; */
+/* if HOWMNY = 'S', the left eigenvectors of T specified by */
+/* SELECT, stored consecutively in the columns */
+/* of VL, in the same order as their */
+/* eigenvalues. */
+/* Not referenced if SIDE = 'R'. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the array VL. LDVL >= 1, and if */
+/* SIDE = 'L' or 'B', LDVL >= N. */
+
+/* VR (input/output) COMPLEX*16 array, dimension (LDVR,MM) */
+/* On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must */
+/* contain an N-by-N matrix Q (usually the unitary matrix Q of */
+/* Schur vectors returned by ZHSEQR). */
+/* On exit, if SIDE = 'R' or 'B', VR contains: */
+/* if HOWMNY = 'A', the matrix X of right eigenvectors of T; */
+/* if HOWMNY = 'B', the matrix Q*X; */
+/* if HOWMNY = 'S', the right eigenvectors of T specified by */
+/* SELECT, stored consecutively in the columns */
+/* of VR, in the same order as their */
+/* eigenvalues. */
+/* Not referenced if SIDE = 'L'. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. LDVR >= 1, and if */
+/* SIDE = 'R' or 'B'; LDVR >= N. */
+
+/* MM (input) INTEGER */
+/* The number of columns in the arrays VL and/or VR. MM >= M. */
+
+/* M (output) INTEGER */
+/* The number of columns in the arrays VL and/or VR actually */
+/* used to store the eigenvectors. If HOWMNY = 'A' or 'B', M */
+/* is set to N. Each selected eigenvector occupies one */
+/* column. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The algorithm used in this program is basically backward (forward) */
+/* substitution, with scaling to make the the code robust against */
+/* possible overflow. */
+
+/* Each eigenvector is normalized so that the element of largest */
+/* magnitude has magnitude 1; here the magnitude of a complex number */
+/* (x,y) is taken to be |x| + |y|. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters */
+
+ /* Parameter adjustments */
+ --select;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ bothv = lsame_(side, "B");
+ rightv = lsame_(side, "R") || bothv;
+ leftv = lsame_(side, "L") || bothv;
+
+ allv = lsame_(howmny, "A");
+ over = lsame_(howmny, "B");
+ somev = lsame_(howmny, "S");
+
+/* Set M to the number of columns required to store the selected */
+/* eigenvectors. */
+
+ if (somev) {
+ *m = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (select[j]) {
+ ++(*m);
+ }
+/* L10: */
+ }
+ } else {
+ *m = *n;
+ }
+
+ *info = 0;
+ if (! rightv && ! leftv) {
+ *info = -1;
+ } else if (! allv && ! over && ! somev) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ldt < max(1,*n)) {
+ *info = -6;
+ } else if (*ldvl < 1 || leftv && *ldvl < *n) {
+ *info = -8;
+ } else if (*ldvr < 1 || rightv && *ldvr < *n) {
+ *info = -10;
+ } else if (*mm < *m) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTREVC", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Set the constants to control overflow. */
+
+ unfl = dlamch_("Safe minimum");
+ ovfl = 1. / unfl;
+ dlabad_(&unfl, &ovfl);
+ ulp = dlamch_("Precision");
+ smlnum = unfl * (*n / ulp);
+
+/* Store the diagonal elements of T in working array WORK. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + *n;
+ i__3 = i__ + i__ * t_dim1;
+ work[i__2].r = t[i__3].r, work[i__2].i = t[i__3].i;
+/* L20: */
+ }
+
+/* Compute 1-norm of each column of strictly upper triangular */
+/* part of T to control overflow in triangular solver. */
+
+ rwork[1] = 0.;
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ rwork[j] = dzasum_(&i__2, &t[j * t_dim1 + 1], &c__1);
+/* L30: */
+ }
+
+ if (rightv) {
+
+/* Compute right eigenvectors. */
+
+ is = *m;
+ for (ki = *n; ki >= 1; --ki) {
+
+ if (somev) {
+ if (! select[ki]) {
+ goto L80;
+ }
+ }
+/* Computing MAX */
+ i__1 = ki + ki * t_dim1;
+ d__3 = ulp * ((d__1 = t[i__1].r, abs(d__1)) + (d__2 = d_imag(&t[
+ ki + ki * t_dim1]), abs(d__2)));
+ smin = max(d__3,smlnum);
+
+ work[1].r = 1., work[1].i = 0.;
+
+/* Form right-hand side. */
+
+ i__1 = ki - 1;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k;
+ i__3 = k + ki * t_dim1;
+ z__1.r = -t[i__3].r, z__1.i = -t[i__3].i;
+ work[i__2].r = z__1.r, work[i__2].i = z__1.i;
+/* L40: */
+ }
+
+/* Solve the triangular system: */
+/* (T(1:KI-1,1:KI-1) - T(KI,KI))*X = SCALE*WORK. */
+
+ i__1 = ki - 1;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k + k * t_dim1;
+ i__3 = k + k * t_dim1;
+ i__4 = ki + ki * t_dim1;
+ z__1.r = t[i__3].r - t[i__4].r, z__1.i = t[i__3].i - t[i__4]
+ .i;
+ t[i__2].r = z__1.r, t[i__2].i = z__1.i;
+ i__2 = k + k * t_dim1;
+ if ((d__1 = t[i__2].r, abs(d__1)) + (d__2 = d_imag(&t[k + k *
+ t_dim1]), abs(d__2)) < smin) {
+ i__3 = k + k * t_dim1;
+ t[i__3].r = smin, t[i__3].i = 0.;
+ }
+/* L50: */
+ }
+
+ if (ki > 1) {
+ i__1 = ki - 1;
+ zlatrs_("Upper", "No transpose", "Non-unit", "Y", &i__1, &t[
+ t_offset], ldt, &work[1], &scale, &rwork[1], info);
+ i__1 = ki;
+ work[i__1].r = scale, work[i__1].i = 0.;
+ }
+
+/* Copy the vector x or Q*x to VR and normalize. */
+
+ if (! over) {
+ zcopy_(&ki, &work[1], &c__1, &vr[is * vr_dim1 + 1], &c__1);
+
+ ii = izamax_(&ki, &vr[is * vr_dim1 + 1], &c__1);
+ i__1 = ii + is * vr_dim1;
+ remax = 1. / ((d__1 = vr[i__1].r, abs(d__1)) + (d__2 = d_imag(
+ &vr[ii + is * vr_dim1]), abs(d__2)));
+ zdscal_(&ki, &remax, &vr[is * vr_dim1 + 1], &c__1);
+
+ i__1 = *n;
+ for (k = ki + 1; k <= i__1; ++k) {
+ i__2 = k + is * vr_dim1;
+ vr[i__2].r = 0., vr[i__2].i = 0.;
+/* L60: */
+ }
+ } else {
+ if (ki > 1) {
+ i__1 = ki - 1;
+ z__1.r = scale, z__1.i = 0.;
+ zgemv_("N", n, &i__1, &c_b2, &vr[vr_offset], ldvr, &work[
+ 1], &c__1, &z__1, &vr[ki * vr_dim1 + 1], &c__1);
+ }
+
+ ii = izamax_(n, &vr[ki * vr_dim1 + 1], &c__1);
+ i__1 = ii + ki * vr_dim1;
+ remax = 1. / ((d__1 = vr[i__1].r, abs(d__1)) + (d__2 = d_imag(
+ &vr[ii + ki * vr_dim1]), abs(d__2)));
+ zdscal_(n, &remax, &vr[ki * vr_dim1 + 1], &c__1);
+ }
+
+/* Set back the original diagonal elements of T. */
+
+ i__1 = ki - 1;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k + k * t_dim1;
+ i__3 = k + *n;
+ t[i__2].r = work[i__3].r, t[i__2].i = work[i__3].i;
+/* L70: */
+ }
+
+ --is;
+L80:
+ ;
+ }
+ }
+
+ if (leftv) {
+
+/* Compute left eigenvectors. */
+
+ is = 1;
+ i__1 = *n;
+ for (ki = 1; ki <= i__1; ++ki) {
+
+ if (somev) {
+ if (! select[ki]) {
+ goto L130;
+ }
+ }
+/* Computing MAX */
+ i__2 = ki + ki * t_dim1;
+ d__3 = ulp * ((d__1 = t[i__2].r, abs(d__1)) + (d__2 = d_imag(&t[
+ ki + ki * t_dim1]), abs(d__2)));
+ smin = max(d__3,smlnum);
+
+ i__2 = *n;
+ work[i__2].r = 1., work[i__2].i = 0.;
+
+/* Form right-hand side. */
+
+ i__2 = *n;
+ for (k = ki + 1; k <= i__2; ++k) {
+ i__3 = k;
+ d_cnjg(&z__2, &t[ki + k * t_dim1]);
+ z__1.r = -z__2.r, z__1.i = -z__2.i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+/* L90: */
+ }
+
+/* Solve the triangular system: */
+/* (T(KI+1:N,KI+1:N) - T(KI,KI))'*X = SCALE*WORK. */
+
+ i__2 = *n;
+ for (k = ki + 1; k <= i__2; ++k) {
+ i__3 = k + k * t_dim1;
+ i__4 = k + k * t_dim1;
+ i__5 = ki + ki * t_dim1;
+ z__1.r = t[i__4].r - t[i__5].r, z__1.i = t[i__4].i - t[i__5]
+ .i;
+ t[i__3].r = z__1.r, t[i__3].i = z__1.i;
+ i__3 = k + k * t_dim1;
+ if ((d__1 = t[i__3].r, abs(d__1)) + (d__2 = d_imag(&t[k + k *
+ t_dim1]), abs(d__2)) < smin) {
+ i__4 = k + k * t_dim1;
+ t[i__4].r = smin, t[i__4].i = 0.;
+ }
+/* L100: */
+ }
+
+ if (ki < *n) {
+ i__2 = *n - ki;
+ zlatrs_("Upper", "Conjugate transpose", "Non-unit", "Y", &
+ i__2, &t[ki + 1 + (ki + 1) * t_dim1], ldt, &work[ki +
+ 1], &scale, &rwork[1], info);
+ i__2 = ki;
+ work[i__2].r = scale, work[i__2].i = 0.;
+ }
+
+/* Copy the vector x or Q*x to VL and normalize. */
+
+ if (! over) {
+ i__2 = *n - ki + 1;
+ zcopy_(&i__2, &work[ki], &c__1, &vl[ki + is * vl_dim1], &c__1)
+ ;
+
+ i__2 = *n - ki + 1;
+ ii = izamax_(&i__2, &vl[ki + is * vl_dim1], &c__1) + ki - 1;
+ i__2 = ii + is * vl_dim1;
+ remax = 1. / ((d__1 = vl[i__2].r, abs(d__1)) + (d__2 = d_imag(
+ &vl[ii + is * vl_dim1]), abs(d__2)));
+ i__2 = *n - ki + 1;
+ zdscal_(&i__2, &remax, &vl[ki + is * vl_dim1], &c__1);
+
+ i__2 = ki - 1;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + is * vl_dim1;
+ vl[i__3].r = 0., vl[i__3].i = 0.;
+/* L110: */
+ }
+ } else {
+ if (ki < *n) {
+ i__2 = *n - ki;
+ z__1.r = scale, z__1.i = 0.;
+ zgemv_("N", n, &i__2, &c_b2, &vl[(ki + 1) * vl_dim1 + 1],
+ ldvl, &work[ki + 1], &c__1, &z__1, &vl[ki *
+ vl_dim1 + 1], &c__1);
+ }
+
+ ii = izamax_(n, &vl[ki * vl_dim1 + 1], &c__1);
+ i__2 = ii + ki * vl_dim1;
+ remax = 1. / ((d__1 = vl[i__2].r, abs(d__1)) + (d__2 = d_imag(
+ &vl[ii + ki * vl_dim1]), abs(d__2)));
+ zdscal_(n, &remax, &vl[ki * vl_dim1 + 1], &c__1);
+ }
+
+/* Set back the original diagonal elements of T. */
+
+ i__2 = *n;
+ for (k = ki + 1; k <= i__2; ++k) {
+ i__3 = k + k * t_dim1;
+ i__4 = k + *n;
+ t[i__3].r = work[i__4].r, t[i__3].i = work[i__4].i;
+/* L120: */
+ }
+
+ ++is;
+L130:
+ ;
+ }
+ }
+
+ return 0;
+
+/* End of ZTREVC */
+
+} /* ztrevc_ */
diff --git a/contrib/libs/clapack/ztrexc.c b/contrib/libs/clapack/ztrexc.c
new file mode 100644
index 0000000000..730babeea0
--- /dev/null
+++ b/contrib/libs/clapack/ztrexc.c
@@ -0,0 +1,216 @@
+/* ztrexc.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ztrexc_(char *compq, integer *n, doublecomplex *t,
+ integer *ldt, doublecomplex *q, integer *ldq, integer *ifst, integer *
+ ilst, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, t_dim1, t_offset, i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer k, m1, m2, m3;
+ doublereal cs;
+ doublecomplex t11, t22, sn, temp;
+ extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublereal *, doublecomplex *);
+ extern logical lsame_(char *, char *);
+ logical wantq;
+ extern /* Subroutine */ int xerbla_(char *, integer *), zlartg_(
+ doublecomplex *, doublecomplex *, doublereal *, doublecomplex *,
+ doublecomplex *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTREXC reorders the Schur factorization of a complex matrix */
+/* A = Q*T*Q**H, so that the diagonal element of T with row index IFST */
+/* is moved to row ILST. */
+
+/* The Schur form T is reordered by a unitary similarity transformation */
+/* Z**H*T*Z, and optionally the matrix Q of Schur vectors is updated by */
+/* postmultplying it with Z. */
+
+/* Arguments */
+/* ========= */
+
+/* COMPQ (input) CHARACTER*1 */
+/* = 'V': update the matrix Q of Schur vectors; */
+/* = 'N': do not update Q. */
+
+/* N (input) INTEGER */
+/* The order of the matrix T. N >= 0. */
+
+/* T (input/output) COMPLEX*16 array, dimension (LDT,N) */
+/* On entry, the upper triangular matrix T. */
+/* On exit, the reordered upper triangular matrix. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= max(1,N). */
+
+/* Q (input/output) COMPLEX*16 array, dimension (LDQ,N) */
+/* On entry, if COMPQ = 'V', the matrix Q of Schur vectors. */
+/* On exit, if COMPQ = 'V', Q has been postmultiplied by the */
+/* unitary transformation matrix Z which reorders T. */
+/* If COMPQ = 'N', Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N). */
+
+/* IFST (input) INTEGER */
+/* ILST (input) INTEGER */
+/* Specify the reordering of the diagonal elements of T: */
+/* The element with row index IFST is moved to row ILST by a */
+/* sequence of transpositions between adjacent elements. */
+/* 1 <= IFST <= N; 1 <= ILST <= N. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters. */
+
+ /* Parameter adjustments */
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+
+ /* Function Body */
+ *info = 0;
+ wantq = lsame_(compq, "V");
+ if (! lsame_(compq, "N") && ! wantq) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldt < max(1,*n)) {
+ *info = -4;
+ } else if (*ldq < 1 || wantq && *ldq < max(1,*n)) {
+ *info = -6;
+ } else if (*ifst < 1 || *ifst > *n) {
+ *info = -7;
+ } else if (*ilst < 1 || *ilst > *n) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTREXC", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 1 || *ifst == *ilst) {
+ return 0;
+ }
+
+ if (*ifst < *ilst) {
+
+/* Move the IFST-th diagonal element forward down the diagonal. */
+
+ m1 = 0;
+ m2 = -1;
+ m3 = 1;
+ } else {
+
+/* Move the IFST-th diagonal element backward up the diagonal. */
+
+ m1 = -1;
+ m2 = 0;
+ m3 = -1;
+ }
+
+ i__1 = *ilst + m2;
+ i__2 = m3;
+ for (k = *ifst + m1; i__2 < 0 ? k >= i__1 : k <= i__1; k += i__2) {
+
+/* Interchange the k-th and (k+1)-th diagonal elements. */
+
+ i__3 = k + k * t_dim1;
+ t11.r = t[i__3].r, t11.i = t[i__3].i;
+ i__3 = k + 1 + (k + 1) * t_dim1;
+ t22.r = t[i__3].r, t22.i = t[i__3].i;
+
+/* Determine the transformation to perform the interchange. */
+
+ z__1.r = t22.r - t11.r, z__1.i = t22.i - t11.i;
+ zlartg_(&t[k + (k + 1) * t_dim1], &z__1, &cs, &sn, &temp);
+
+/* Apply transformation to the matrix T. */
+
+ if (k + 2 <= *n) {
+ i__3 = *n - k - 1;
+ zrot_(&i__3, &t[k + (k + 2) * t_dim1], ldt, &t[k + 1 + (k + 2) *
+ t_dim1], ldt, &cs, &sn);
+ }
+ i__3 = k - 1;
+ d_cnjg(&z__1, &sn);
+ zrot_(&i__3, &t[k * t_dim1 + 1], &c__1, &t[(k + 1) * t_dim1 + 1], &
+ c__1, &cs, &z__1);
+
+ i__3 = k + k * t_dim1;
+ t[i__3].r = t22.r, t[i__3].i = t22.i;
+ i__3 = k + 1 + (k + 1) * t_dim1;
+ t[i__3].r = t11.r, t[i__3].i = t11.i;
+
+ if (wantq) {
+
+/* Accumulate transformation in the matrix Q. */
+
+ d_cnjg(&z__1, &sn);
+ zrot_(n, &q[k * q_dim1 + 1], &c__1, &q[(k + 1) * q_dim1 + 1], &
+ c__1, &cs, &z__1);
+ }
+
+/* L10: */
+ }
+
+ return 0;
+
+/* End of ZTREXC */
+
+} /* ztrexc_ */
diff --git a/contrib/libs/clapack/ztrrfs.c b/contrib/libs/clapack/ztrrfs.c
new file mode 100644
index 0000000000..c18131c539
--- /dev/null
+++ b/contrib/libs/clapack/ztrrfs.c
@@ -0,0 +1,565 @@
+/* ztrrfs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ztrrfs_(char *uplo, char *trans, char *diag, integer *n,
+ integer *nrhs, doublecomplex *a, integer *lda, doublecomplex *b,
+ integer *ldb, doublecomplex *x, integer *ldx, doublereal *ferr,
+ doublereal *berr, doublecomplex *work, doublereal *rwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, x_dim1, x_offset, i__1, i__2,
+ i__3, i__4, i__5;
+ doublereal d__1, d__2, d__3, d__4;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k;
+ doublereal s, xk;
+ integer nz;
+ doublereal eps;
+ integer kase;
+ doublereal safe1, safe2;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ logical upper;
+ extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zaxpy_(integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), ztrmv_(
+ char *, char *, char *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), ztrsv_(char *
+, char *, char *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zlacn2_(
+ integer *, doublecomplex *, doublecomplex *, doublereal *,
+ integer *, integer *);
+ extern doublereal dlamch_(char *);
+ doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran;
+ char transn[1], transt[1];
+ logical nounit;
+ doublereal lstres;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTRRFS provides error bounds and backward error estimates for the */
+/* solution to a system of linear equations with a triangular */
+/* coefficient matrix. */
+
+/* The solution matrix X must be computed by ZTRTRS or some other */
+/* means before entering this routine. ZTRRFS does not do iterative */
+/* refinement because doing so cannot improve the backward error. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrices B and X. NRHS >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The triangular matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of the array A contains the upper */
+/* triangular matrix, and the strictly lower triangular part of */
+/* A is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of the array A contains the lower triangular */
+/* matrix, and the strictly upper triangular part of A is not */
+/* referenced. If DIAG = 'U', the diagonal elements of A are */
+/* also not referenced and are assumed to be 1. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* The right hand side matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* X (input) COMPLEX*16 array, dimension (LDX,NRHS) */
+/* The solution matrix X. */
+
+/* LDX (input) INTEGER */
+/* The leading dimension of the array X. LDX >= max(1,N). */
+
+/* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The estimated forward error bound for each solution vector */
+/* X(j) (the j-th column of the solution matrix X). */
+/* If XTRUE is the true solution corresponding to X(j), FERR(j) */
+/* is an estimated upper bound for the magnitude of the largest */
+/* element in (X(j) - XTRUE) divided by the magnitude of the */
+/* largest element in X(j). The estimate is as reliable as */
+/* the estimate for RCOND, and is almost always a slight */
+/* overestimate of the true error. */
+
+/* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */
+/* The componentwise relative backward error of each solution */
+/* vector X(j) (i.e., the smallest relative change in */
+/* any element of A or B that makes X(j) an exact solution). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (2*N) */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ --ferr;
+ --berr;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldx < max(1,*n)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTRRFS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0 || *nrhs == 0) {
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+ ferr[j] = 0.;
+ berr[j] = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+ if (notran) {
+ *(unsigned char *)transn = 'N';
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transn = 'C';
+ *(unsigned char *)transt = 'N';
+ }
+
+/* NZ = maximum number of nonzero elements in each row of A, plus 1 */
+
+ nz = *n + 1;
+ eps = dlamch_("Epsilon");
+ safmin = dlamch_("Safe minimum");
+ safe1 = nz * safmin;
+ safe2 = safe1 / eps;
+
+/* Do for each right hand side */
+
+ i__1 = *nrhs;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute residual R = B - op(A) * X, */
+/* where op(A) = A, A**T, or A**H, depending on TRANS. */
+
+ zcopy_(n, &x[j * x_dim1 + 1], &c__1, &work[1], &c__1);
+ ztrmv_(uplo, trans, diag, n, &a[a_offset], lda, &work[1], &c__1);
+ z__1.r = -1., z__1.i = -0.;
+ zaxpy_(n, &z__1, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1);
+
+/* Compute componentwise relative backward error from formula */
+
+/* max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) ) */
+
+/* where abs(Z) is the componentwise absolute value of the matrix */
+/* or vector Z. If the i-th component of the denominator is less */
+/* than SAFE2, then SAFE1 is added to the i-th components of the */
+/* numerator and denominator before dividing. */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[i__] = (d__1 = b[i__3].r, abs(d__1)) + (d__2 = d_imag(&b[
+ i__ + j * b_dim1]), abs(d__2));
+/* L20: */
+ }
+
+ if (notran) {
+
+/* Compute abs(A)*abs(X) + abs(B). */
+
+ if (upper) {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&
+ x[k + j * x_dim1]), abs(d__2));
+ i__3 = k;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ rwork[i__] += ((d__1 = a[i__4].r, abs(d__1)) + (
+ d__2 = d_imag(&a[i__ + k * a_dim1]), abs(
+ d__2))) * xk;
+/* L30: */
+ }
+/* L40: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&
+ x[k + j * x_dim1]), abs(d__2));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ rwork[i__] += ((d__1 = a[i__4].r, abs(d__1)) + (
+ d__2 = d_imag(&a[i__ + k * a_dim1]), abs(
+ d__2))) * xk;
+/* L50: */
+ }
+ rwork[k] += xk;
+/* L60: */
+ }
+ }
+ } else {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&
+ x[k + j * x_dim1]), abs(d__2));
+ i__3 = *n;
+ for (i__ = k; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ rwork[i__] += ((d__1 = a[i__4].r, abs(d__1)) + (
+ d__2 = d_imag(&a[i__ + k * a_dim1]), abs(
+ d__2))) * xk;
+/* L70: */
+ }
+/* L80: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&
+ x[k + j * x_dim1]), abs(d__2));
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ rwork[i__] += ((d__1 = a[i__4].r, abs(d__1)) + (
+ d__2 = d_imag(&a[i__ + k * a_dim1]), abs(
+ d__2))) * xk;
+/* L90: */
+ }
+ rwork[k] += xk;
+/* L100: */
+ }
+ }
+ }
+ } else {
+
+/* Compute abs(A**H)*abs(X) + abs(B). */
+
+ if (upper) {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ i__3 = k;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ i__5 = i__ + j * x_dim1;
+ s += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + k * a_dim1]), abs(d__2)))
+ * ((d__3 = x[i__5].r, abs(d__3)) + (d__4 =
+ d_imag(&x[i__ + j * x_dim1]), abs(d__4)))
+ ;
+/* L110: */
+ }
+ rwork[k] += s;
+/* L120: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ s = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[
+ k + j * x_dim1]), abs(d__2));
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ i__5 = i__ + j * x_dim1;
+ s += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + k * a_dim1]), abs(d__2)))
+ * ((d__3 = x[i__5].r, abs(d__3)) + (d__4 =
+ d_imag(&x[i__ + j * x_dim1]), abs(d__4)))
+ ;
+/* L130: */
+ }
+ rwork[k] += s;
+/* L140: */
+ }
+ }
+ } else {
+ if (nounit) {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ s = 0.;
+ i__3 = *n;
+ for (i__ = k; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ i__5 = i__ + j * x_dim1;
+ s += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + k * a_dim1]), abs(d__2)))
+ * ((d__3 = x[i__5].r, abs(d__3)) + (d__4 =
+ d_imag(&x[i__ + j * x_dim1]), abs(d__4)))
+ ;
+/* L150: */
+ }
+ rwork[k] += s;
+/* L160: */
+ }
+ } else {
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * x_dim1;
+ s = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[
+ k + j * x_dim1]), abs(d__2));
+ i__3 = *n;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + k * a_dim1;
+ i__5 = i__ + j * x_dim1;
+ s += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 =
+ d_imag(&a[i__ + k * a_dim1]), abs(d__2)))
+ * ((d__3 = x[i__5].r, abs(d__3)) + (d__4 =
+ d_imag(&x[i__ + j * x_dim1]), abs(d__4)))
+ ;
+/* L170: */
+ }
+ rwork[k] += s;
+/* L180: */
+ }
+ }
+ }
+ }
+ s = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+/* Computing MAX */
+ i__3 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2))) / rwork[i__];
+ s = max(d__3,d__4);
+ } else {
+/* Computing MAX */
+ i__3 = i__;
+ d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + safe1) / (rwork[i__]
+ + safe1);
+ s = max(d__3,d__4);
+ }
+/* L190: */
+ }
+ berr[j] = s;
+
+/* Bound error from formula */
+
+/* norm(X - XTRUE) / norm(X) .le. FERR = */
+/* norm( abs(inv(op(A)))* */
+/* ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X) */
+
+/* where */
+/* norm(Z) is the magnitude of the largest component of Z */
+/* inv(op(A)) is the inverse of op(A) */
+/* abs(Z) is the componentwise absolute value of the matrix or */
+/* vector Z */
+/* NZ is the maximum number of nonzeros in any row of A, plus 1 */
+/* EPS is machine epsilon */
+
+/* The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B)) */
+/* is incremented by SAFE1 if the i-th component of */
+/* abs(op(A))*abs(X) + abs(B) is less than SAFE2. */
+
+/* Use ZLACN2 to estimate the infinity-norm of the matrix */
+/* inv(op(A)) * diag(W), */
+/* where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (rwork[i__] > safe2) {
+ i__3 = i__;
+ rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ ;
+ } else {
+ i__3 = i__;
+ rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__]
+ + safe1;
+ }
+/* L200: */
+ }
+
+ kase = 0;
+L210:
+ zlacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Multiply by diag(W)*inv(op(A)**H). */
+
+ ztrsv_(uplo, transt, diag, n, &a[a_offset], lda, &work[1], &
+ c__1);
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ z__1.r = rwork[i__4] * work[i__5].r, z__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+/* L220: */
+ }
+ } else {
+
+/* Multiply by inv(op(A))*diag(W). */
+
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__;
+ z__1.r = rwork[i__4] * work[i__5].r, z__1.i = rwork[i__4]
+ * work[i__5].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+/* L230: */
+ }
+ ztrsv_(uplo, transn, diag, n, &a[a_offset], lda, &work[1], &
+ c__1);
+ }
+ goto L210;
+ }
+
+/* Normalize error. */
+
+ lstres = 0.;
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ i__3 = i__ + j * x_dim1;
+ d__3 = lstres, d__4 = (d__1 = x[i__3].r, abs(d__1)) + (d__2 =
+ d_imag(&x[i__ + j * x_dim1]), abs(d__2));
+ lstres = max(d__3,d__4);
+/* L240: */
+ }
+ if (lstres != 0.) {
+ ferr[j] /= lstres;
+ }
+
+/* L250: */
+ }
+
+ return 0;
+
+/* End of ZTRRFS */
+
+} /* ztrrfs_ */
diff --git a/contrib/libs/clapack/ztrsen.c b/contrib/libs/clapack/ztrsen.c
new file mode 100644
index 0000000000..a30126591a
--- /dev/null
+++ b/contrib/libs/clapack/ztrsen.c
@@ -0,0 +1,422 @@
+/* ztrsen.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c_n1 = -1;
+
+/* Subroutine */ int ztrsen_(char *job, char *compq, logical *select, integer
+ *n, doublecomplex *t, integer *ldt, doublecomplex *q, integer *ldq,
+ doublecomplex *w, integer *m, doublereal *s, doublereal *sep,
+ doublecomplex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, t_dim1, t_offset, i__1, i__2, i__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ integer k, n1, n2, nn, ks;
+ doublereal est;
+ integer kase, ierr;
+ doublereal scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3], lwmin;
+ logical wantq, wants;
+ doublereal rnorm, rwork[1];
+ extern /* Subroutine */ int zlacn2_(integer *, doublecomplex *,
+ doublecomplex *, doublereal *, integer *, integer *), xerbla_(
+ char *, integer *);
+ extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ logical wantbh;
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ logical wantsp;
+ extern /* Subroutine */ int ztrexc_(char *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *, integer *, integer *,
+ integer *);
+ logical lquery;
+ extern /* Subroutine */ int ztrsyl_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublereal *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTRSEN reorders the Schur factorization of a complex matrix */
+/* A = Q*T*Q**H, so that a selected cluster of eigenvalues appears in */
+/* the leading positions on the diagonal of the upper triangular matrix */
+/* T, and the leading columns of Q form an orthonormal basis of the */
+/* corresponding right invariant subspace. */
+
+/* Optionally the routine computes the reciprocal condition numbers of */
+/* the cluster of eigenvalues and/or the invariant subspace. */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies whether condition numbers are required for the */
+/* cluster of eigenvalues (S) or the invariant subspace (SEP): */
+/* = 'N': none; */
+/* = 'E': for eigenvalues only (S); */
+/* = 'V': for invariant subspace only (SEP); */
+/* = 'B': for both eigenvalues and invariant subspace (S and */
+/* SEP). */
+
+/* COMPQ (input) CHARACTER*1 */
+/* = 'V': update the matrix Q of Schur vectors; */
+/* = 'N': do not update Q. */
+
+/* SELECT (input) LOGICAL array, dimension (N) */
+/* SELECT specifies the eigenvalues in the selected cluster. To */
+/* select the j-th eigenvalue, SELECT(j) must be set to .TRUE.. */
+
+/* N (input) INTEGER */
+/* The order of the matrix T. N >= 0. */
+
+/* T (input/output) COMPLEX*16 array, dimension (LDT,N) */
+/* On entry, the upper triangular matrix T. */
+/* On exit, T is overwritten by the reordered matrix T, with the */
+/* selected eigenvalues as the leading diagonal elements. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= max(1,N). */
+
+/* Q (input/output) COMPLEX*16 array, dimension (LDQ,N) */
+/* On entry, if COMPQ = 'V', the matrix Q of Schur vectors. */
+/* On exit, if COMPQ = 'V', Q has been postmultiplied by the */
+/* unitary transformation matrix which reorders T; the leading M */
+/* columns of Q form an orthonormal basis for the specified */
+/* invariant subspace. */
+/* If COMPQ = 'N', Q is not referenced. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. */
+/* LDQ >= 1; and if COMPQ = 'V', LDQ >= N. */
+
+/* W (output) COMPLEX*16 array, dimension (N) */
+/* The reordered eigenvalues of T, in the same order as they */
+/* appear on the diagonal of T. */
+
+/* M (output) INTEGER */
+/* The dimension of the specified invariant subspace. */
+/* 0 <= M <= N. */
+
+/* S (output) DOUBLE PRECISION */
+/* If JOB = 'E' or 'B', S is a lower bound on the reciprocal */
+/* condition number for the selected cluster of eigenvalues. */
+/* S cannot underestimate the true reciprocal condition number */
+/* by more than a factor of sqrt(N). If M = 0 or N, S = 1. */
+/* If JOB = 'N' or 'V', S is not referenced. */
+
+/* SEP (output) DOUBLE PRECISION */
+/* If JOB = 'V' or 'B', SEP is the estimated reciprocal */
+/* condition number of the specified invariant subspace. If */
+/* M = 0 or N, SEP = norm(T). */
+/* If JOB = 'N' or 'E', SEP is not referenced. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If JOB = 'N', LWORK >= 1; */
+/* if JOB = 'E', LWORK = max(1,M*(N-M)); */
+/* if JOB = 'V' or 'B', LWORK >= max(1,2*M*(N-M)). */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* ZTRSEN first collects the selected eigenvalues by computing a unitary */
+/* transformation Z to move them to the top left corner of T. In other */
+/* words, the selected eigenvalues are the eigenvalues of T11 in: */
+
+/* Z'*T*Z = ( T11 T12 ) n1 */
+/* ( 0 T22 ) n2 */
+/* n1 n2 */
+
+/* where N = n1+n2 and Z' means the conjugate transpose of Z. The first */
+/* n1 columns of Z span the specified invariant subspace of T. */
+
+/* If T has been obtained from the Schur factorization of a matrix */
+/* A = Q*T*Q', then the reordered Schur factorization of A is given by */
+/* A = (Q*Z)*(Z'*T*Z)*(Q*Z)', and the first n1 columns of Q*Z span the */
+/* corresponding invariant subspace of A. */
+
+/* The reciprocal condition number of the average of the eigenvalues of */
+/* T11 may be returned in S. S lies between 0 (very badly conditioned) */
+/* and 1 (very well conditioned). It is computed as follows. First we */
+/* compute R so that */
+
+/* P = ( I R ) n1 */
+/* ( 0 0 ) n2 */
+/* n1 n2 */
+
+/* is the projector on the invariant subspace associated with T11. */
+/* R is the solution of the Sylvester equation: */
+
+/* T11*R - R*T22 = T12. */
+
+/* Let F-norm(M) denote the Frobenius-norm of M and 2-norm(M) denote */
+/* the two-norm of M. Then S is computed as the lower bound */
+
+/* (1 + F-norm(R)**2)**(-1/2) */
+
+/* on the reciprocal of 2-norm(P), the true reciprocal condition number. */
+/* S cannot underestimate 1 / 2-norm(P) by more than a factor of */
+/* sqrt(N). */
+
+/* An approximate error bound for the computed average of the */
+/* eigenvalues of T11 is */
+
+/* EPS * norm(T) / S */
+
+/* where EPS is the machine precision. */
+
+/* The reciprocal condition number of the right invariant subspace */
+/* spanned by the first n1 columns of Z (or of Q*Z) is returned in SEP. */
+/* SEP is defined as the separation of T11 and T22: */
+
+/* sep( T11, T22 ) = sigma-min( C ) */
+
+/* where sigma-min(C) is the smallest singular value of the */
+/* n1*n2-by-n1*n2 matrix */
+
+/* C = kprod( I(n2), T11 ) - kprod( transpose(T22), I(n1) ) */
+
+/* I(m) is an m by m identity matrix, and kprod denotes the Kronecker */
+/* product. We estimate sigma-min(C) by the reciprocal of an estimate of */
+/* the 1-norm of inverse(C). The true reciprocal 1-norm of inverse(C) */
+/* cannot differ from sigma-min(C) by more than a factor of sqrt(n1*n2). */
+
+/* When SEP is small, small changes in T can cause large changes in */
+/* the invariant subspace. An approximate bound on the maximum angular */
+/* error in the computed right invariant subspace is */
+
+/* EPS * norm(T) / SEP */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters. */
+
+ /* Parameter adjustments */
+ --select;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --w;
+ --work;
+
+ /* Function Body */
+ wantbh = lsame_(job, "B");
+ wants = lsame_(job, "E") || wantbh;
+ wantsp = lsame_(job, "V") || wantbh;
+ wantq = lsame_(compq, "V");
+
+/* Set M to the number of selected eigenvalues. */
+
+ *m = 0;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (select[k]) {
+ ++(*m);
+ }
+/* L10: */
+ }
+
+ n1 = *m;
+ n2 = *n - *m;
+ nn = n1 * n2;
+
+ *info = 0;
+ lquery = *lwork == -1;
+
+ if (wantsp) {
+/* Computing MAX */
+ i__1 = 1, i__2 = nn << 1;
+ lwmin = max(i__1,i__2);
+ } else if (lsame_(job, "N")) {
+ lwmin = 1;
+ } else if (lsame_(job, "E")) {
+ lwmin = max(1,nn);
+ }
+
+ if (! lsame_(job, "N") && ! wants && ! wantsp) {
+ *info = -1;
+ } else if (! lsame_(compq, "N") && ! wantq) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ldt < max(1,*n)) {
+ *info = -6;
+ } else if (*ldq < 1 || wantq && *ldq < *n) {
+ *info = -8;
+ } else if (*lwork < lwmin && ! lquery) {
+ *info = -14;
+ }
+
+ if (*info == 0) {
+ work[1].r = (doublereal) lwmin, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTRSEN", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == *n || *m == 0) {
+ if (wants) {
+ *s = 1.;
+ }
+ if (wantsp) {
+ *sep = zlange_("1", n, n, &t[t_offset], ldt, rwork);
+ }
+ goto L40;
+ }
+
+/* Collect the selected eigenvalues at the top left corner of T. */
+
+ ks = 0;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (select[k]) {
+ ++ks;
+
+/* Swap the K-th eigenvalue to position KS. */
+
+ if (k != ks) {
+ ztrexc_(compq, n, &t[t_offset], ldt, &q[q_offset], ldq, &k, &
+ ks, &ierr);
+ }
+ }
+/* L20: */
+ }
+
+ if (wants) {
+
+/* Solve the Sylvester equation for R: */
+
+/* T11*R - R*T22 = scale*T12 */
+
+ zlacpy_("F", &n1, &n2, &t[(n1 + 1) * t_dim1 + 1], ldt, &work[1], &n1);
+ ztrsyl_("N", "N", &c_n1, &n1, &n2, &t[t_offset], ldt, &t[n1 + 1 + (n1
+ + 1) * t_dim1], ldt, &work[1], &n1, &scale, &ierr);
+
+/* Estimate the reciprocal of the condition number of the cluster */
+/* of eigenvalues. */
+
+ rnorm = zlange_("F", &n1, &n2, &work[1], &n1, rwork);
+ if (rnorm == 0.) {
+ *s = 1.;
+ } else {
+ *s = scale / (sqrt(scale * scale / rnorm + rnorm) * sqrt(rnorm));
+ }
+ }
+
+ if (wantsp) {
+
+/* Estimate sep(T11,T22). */
+
+ est = 0.;
+ kase = 0;
+L30:
+ zlacn2_(&nn, &work[nn + 1], &work[1], &est, &kase, isave);
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Solve T11*R - R*T22 = scale*X. */
+
+ ztrsyl_("N", "N", &c_n1, &n1, &n2, &t[t_offset], ldt, &t[n1 +
+ 1 + (n1 + 1) * t_dim1], ldt, &work[1], &n1, &scale, &
+ ierr);
+ } else {
+
+/* Solve T11'*R - R*T22' = scale*X. */
+
+ ztrsyl_("C", "C", &c_n1, &n1, &n2, &t[t_offset], ldt, &t[n1 +
+ 1 + (n1 + 1) * t_dim1], ldt, &work[1], &n1, &scale, &
+ ierr);
+ }
+ goto L30;
+ }
+
+ *sep = scale / est;
+ }
+
+L40:
+
+/* Copy reordered eigenvalues to W. */
+
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k;
+ i__3 = k + k * t_dim1;
+ w[i__2].r = t[i__3].r, w[i__2].i = t[i__3].i;
+/* L50: */
+ }
+
+ work[1].r = (doublereal) lwmin, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZTRSEN */
+
+} /* ztrsen_ */
diff --git a/contrib/libs/clapack/ztrsna.c b/contrib/libs/clapack/ztrsna.c
new file mode 100644
index 0000000000..0230abf2ad
--- /dev/null
+++ b/contrib/libs/clapack/ztrsna.c
@@ -0,0 +1,446 @@
+/* ztrsna.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ztrsna_(char *job, char *howmny, logical *select,
+ integer *n, doublecomplex *t, integer *ldt, doublecomplex *vl,
+ integer *ldvl, doublecomplex *vr, integer *ldvr, doublereal *s,
+ doublereal *sep, integer *mm, integer *m, doublecomplex *work,
+ integer *ldwork, doublereal *rwork, integer *info)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, vl_dim1, vl_offset, vr_dim1, vr_offset,
+ work_dim1, work_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1, d__2;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ double z_abs(doublecomplex *), d_imag(doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k, ks, ix;
+ doublereal eps, est;
+ integer kase, ierr;
+ doublecomplex prod;
+ doublereal lnrm, rnrm, scale;
+ extern logical lsame_(char *, char *);
+ integer isave[3];
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ doublecomplex dummy[1];
+ logical wants;
+ doublereal xnorm;
+ extern /* Subroutine */ int zlacn2_(integer *, doublecomplex *,
+ doublecomplex *, doublereal *, integer *, integer *), dlabad_(
+ doublereal *, doublereal *);
+ extern doublereal dznrm2_(integer *, doublecomplex *, integer *), dlamch_(
+ char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ doublereal bignum;
+ logical wantbh;
+ extern integer izamax_(integer *, doublecomplex *, integer *);
+ logical somcon;
+ extern /* Subroutine */ int zdrscl_(integer *, doublereal *,
+ doublecomplex *, integer *);
+ char normin[1];
+ extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ doublereal smlnum;
+ logical wantsp;
+ extern /* Subroutine */ int zlatrs_(char *, char *, char *, char *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublereal *, doublereal *, integer *), ztrexc_(char *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, integer *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTRSNA estimates reciprocal condition numbers for specified */
+/* eigenvalues and/or right eigenvectors of a complex upper triangular */
+/* matrix T (or of any matrix Q*T*Q**H with Q unitary). */
+
+/* Arguments */
+/* ========= */
+
+/* JOB (input) CHARACTER*1 */
+/* Specifies whether condition numbers are required for */
+/* eigenvalues (S) or eigenvectors (SEP): */
+/* = 'E': for eigenvalues only (S); */
+/* = 'V': for eigenvectors only (SEP); */
+/* = 'B': for both eigenvalues and eigenvectors (S and SEP). */
+
+/* HOWMNY (input) CHARACTER*1 */
+/* = 'A': compute condition numbers for all eigenpairs; */
+/* = 'S': compute condition numbers for selected eigenpairs */
+/* specified by the array SELECT. */
+
+/* SELECT (input) LOGICAL array, dimension (N) */
+/* If HOWMNY = 'S', SELECT specifies the eigenpairs for which */
+/* condition numbers are required. To select condition numbers */
+/* for the j-th eigenpair, SELECT(j) must be set to .TRUE.. */
+/* If HOWMNY = 'A', SELECT is not referenced. */
+
+/* N (input) INTEGER */
+/* The order of the matrix T. N >= 0. */
+
+/* T (input) COMPLEX*16 array, dimension (LDT,N) */
+/* The upper triangular matrix T. */
+
+/* LDT (input) INTEGER */
+/* The leading dimension of the array T. LDT >= max(1,N). */
+
+/* VL (input) COMPLEX*16 array, dimension (LDVL,M) */
+/* If JOB = 'E' or 'B', VL must contain left eigenvectors of T */
+/* (or of any Q*T*Q**H with Q unitary), corresponding to the */
+/* eigenpairs specified by HOWMNY and SELECT. The eigenvectors */
+/* must be stored in consecutive columns of VL, as returned by */
+/* ZHSEIN or ZTREVC. */
+/* If JOB = 'V', VL is not referenced. */
+
+/* LDVL (input) INTEGER */
+/* The leading dimension of the array VL. */
+/* LDVL >= 1; and if JOB = 'E' or 'B', LDVL >= N. */
+
+/* VR (input) COMPLEX*16 array, dimension (LDVR,M) */
+/* If JOB = 'E' or 'B', VR must contain right eigenvectors of T */
+/* (or of any Q*T*Q**H with Q unitary), corresponding to the */
+/* eigenpairs specified by HOWMNY and SELECT. The eigenvectors */
+/* must be stored in consecutive columns of VR, as returned by */
+/* ZHSEIN or ZTREVC. */
+/* If JOB = 'V', VR is not referenced. */
+
+/* LDVR (input) INTEGER */
+/* The leading dimension of the array VR. */
+/* LDVR >= 1; and if JOB = 'E' or 'B', LDVR >= N. */
+
+/* S (output) DOUBLE PRECISION array, dimension (MM) */
+/* If JOB = 'E' or 'B', the reciprocal condition numbers of the */
+/* selected eigenvalues, stored in consecutive elements of the */
+/* array. Thus S(j), SEP(j), and the j-th columns of VL and VR */
+/* all correspond to the same eigenpair (but not in general the */
+/* j-th eigenpair, unless all eigenpairs are selected). */
+/* If JOB = 'V', S is not referenced. */
+
+/* SEP (output) DOUBLE PRECISION array, dimension (MM) */
+/* If JOB = 'V' or 'B', the estimated reciprocal condition */
+/* numbers of the selected eigenvectors, stored in consecutive */
+/* elements of the array. */
+/* If JOB = 'E', SEP is not referenced. */
+
+/* MM (input) INTEGER */
+/* The number of elements in the arrays S (if JOB = 'E' or 'B') */
+/* and/or SEP (if JOB = 'V' or 'B'). MM >= M. */
+
+/* M (output) INTEGER */
+/* The number of elements of the arrays S and/or SEP actually */
+/* used to store the estimated condition numbers. */
+/* If HOWMNY = 'A', M is set to N. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (LDWORK,N+6) */
+/* If JOB = 'E', WORK is not referenced. */
+
+/* LDWORK (input) INTEGER */
+/* The leading dimension of the array WORK. */
+/* LDWORK >= 1; and if JOB = 'V' or 'B', LDWORK >= N. */
+
+/* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */
+/* If JOB = 'E', RWORK is not referenced. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The reciprocal of the condition number of an eigenvalue lambda is */
+/* defined as */
+
+/* S(lambda) = |v'*u| / (norm(u)*norm(v)) */
+
+/* where u and v are the right and left eigenvectors of T corresponding */
+/* to lambda; v' denotes the conjugate transpose of v, and norm(u) */
+/* denotes the Euclidean norm. These reciprocal condition numbers always */
+/* lie between zero (very badly conditioned) and one (very well */
+/* conditioned). If n = 1, S(lambda) is defined to be 1. */
+
+/* An approximate error bound for a computed eigenvalue W(i) is given by */
+
+/* EPS * norm(T) / S(i) */
+
+/* where EPS is the machine precision. */
+
+/* The reciprocal of the condition number of the right eigenvector u */
+/* corresponding to lambda is defined as follows. Suppose */
+
+/* T = ( lambda c ) */
+/* ( 0 T22 ) */
+
+/* Then the reciprocal condition number is */
+
+/* SEP( lambda, T22 ) = sigma-min( T22 - lambda*I ) */
+
+/* where sigma-min denotes the smallest singular value. We approximate */
+/* the smallest singular value by the reciprocal of an estimate of the */
+/* one-norm of the inverse of T22 - lambda*I. If n = 1, SEP(1) is */
+/* defined to be abs(T(1,1)). */
+
+/* An approximate error bound for a computed right eigenvector VR(i) */
+/* is given by */
+
+/* EPS * norm(T) / SEP(i) */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Statement Functions .. */
+/* .. */
+/* .. Statement Function definitions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and test the input parameters */
+
+ /* Parameter adjustments */
+ --select;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --s;
+ --sep;
+ work_dim1 = *ldwork;
+ work_offset = 1 + work_dim1;
+ work -= work_offset;
+ --rwork;
+
+ /* Function Body */
+ wantbh = lsame_(job, "B");
+ wants = lsame_(job, "E") || wantbh;
+ wantsp = lsame_(job, "V") || wantbh;
+
+ somcon = lsame_(howmny, "S");
+
+/* Set M to the number of eigenpairs for which condition numbers are */
+/* to be computed. */
+
+ if (somcon) {
+ *m = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (select[j]) {
+ ++(*m);
+ }
+/* L10: */
+ }
+ } else {
+ *m = *n;
+ }
+
+ *info = 0;
+ if (! wants && ! wantsp) {
+ *info = -1;
+ } else if (! lsame_(howmny, "A") && ! somcon) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ldt < max(1,*n)) {
+ *info = -6;
+ } else if (*ldvl < 1 || wants && *ldvl < *n) {
+ *info = -8;
+ } else if (*ldvr < 1 || wants && *ldvr < *n) {
+ *info = -10;
+ } else if (*mm < *m) {
+ *info = -13;
+ } else if (*ldwork < 1 || wantsp && *ldwork < *n) {
+ *info = -16;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTRSNA", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (somcon) {
+ if (! select[1]) {
+ return 0;
+ }
+ }
+ if (wants) {
+ s[1] = 1.;
+ }
+ if (wantsp) {
+ sep[1] = z_abs(&t[t_dim1 + 1]);
+ }
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = dlamch_("P");
+ smlnum = dlamch_("S") / eps;
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+
+ ks = 1;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+
+ if (somcon) {
+ if (! select[k]) {
+ goto L50;
+ }
+ }
+
+ if (wants) {
+
+/* Compute the reciprocal condition number of the k-th */
+/* eigenvalue. */
+
+ zdotc_(&z__1, n, &vr[ks * vr_dim1 + 1], &c__1, &vl[ks * vl_dim1 +
+ 1], &c__1);
+ prod.r = z__1.r, prod.i = z__1.i;
+ rnrm = dznrm2_(n, &vr[ks * vr_dim1 + 1], &c__1);
+ lnrm = dznrm2_(n, &vl[ks * vl_dim1 + 1], &c__1);
+ s[ks] = z_abs(&prod) / (rnrm * lnrm);
+
+ }
+
+ if (wantsp) {
+
+/* Estimate the reciprocal condition number of the k-th */
+/* eigenvector. */
+
+/* Copy the matrix T to the array WORK and swap the k-th */
+/* diagonal element to the (1,1) position. */
+
+ zlacpy_("Full", n, n, &t[t_offset], ldt, &work[work_offset],
+ ldwork);
+ ztrexc_("No Q", n, &work[work_offset], ldwork, dummy, &c__1, &k, &
+ c__1, &ierr);
+
+/* Form C = T22 - lambda*I in WORK(2:N,2:N). */
+
+ i__2 = *n;
+ for (i__ = 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + i__ * work_dim1;
+ i__4 = i__ + i__ * work_dim1;
+ i__5 = work_dim1 + 1;
+ z__1.r = work[i__4].r - work[i__5].r, z__1.i = work[i__4].i -
+ work[i__5].i;
+ work[i__3].r = z__1.r, work[i__3].i = z__1.i;
+/* L20: */
+ }
+
+/* Estimate a lower bound for the 1-norm of inv(C'). The 1st */
+/* and (N+1)th columns of WORK are used to store work vectors. */
+
+ sep[ks] = 0.;
+ est = 0.;
+ kase = 0;
+ *(unsigned char *)normin = 'N';
+L30:
+ i__2 = *n - 1;
+ zlacn2_(&i__2, &work[(*n + 1) * work_dim1 + 1], &work[work_offset]
+, &est, &kase, isave);
+
+ if (kase != 0) {
+ if (kase == 1) {
+
+/* Solve C'*x = scale*b */
+
+ i__2 = *n - 1;
+ zlatrs_("Upper", "Conjugate transpose", "Nonunit", normin,
+ &i__2, &work[(work_dim1 << 1) + 2], ldwork, &
+ work[work_offset], &scale, &rwork[1], &ierr);
+ } else {
+
+/* Solve C*x = scale*b */
+
+ i__2 = *n - 1;
+ zlatrs_("Upper", "No transpose", "Nonunit", normin, &i__2,
+ &work[(work_dim1 << 1) + 2], ldwork, &work[
+ work_offset], &scale, &rwork[1], &ierr);
+ }
+ *(unsigned char *)normin = 'Y';
+ if (scale != 1.) {
+
+/* Multiply by 1/SCALE if doing so will not cause */
+/* overflow. */
+
+ i__2 = *n - 1;
+ ix = izamax_(&i__2, &work[work_offset], &c__1);
+ i__2 = ix + work_dim1;
+ xnorm = (d__1 = work[i__2].r, abs(d__1)) + (d__2 = d_imag(
+ &work[ix + work_dim1]), abs(d__2));
+ if (scale < xnorm * smlnum || scale == 0.) {
+ goto L40;
+ }
+ zdrscl_(n, &scale, &work[work_offset], &c__1);
+ }
+ goto L30;
+ }
+
+ sep[ks] = 1. / max(est,smlnum);
+ }
+
+L40:
+ ++ks;
+L50:
+ ;
+ }
+ return 0;
+
+/* End of ZTRSNA */
+
+} /* ztrsna_ */
diff --git a/contrib/libs/clapack/ztrsyl.c b/contrib/libs/clapack/ztrsyl.c
new file mode 100644
index 0000000000..3ab1c31fa5
--- /dev/null
+++ b/contrib/libs/clapack/ztrsyl.c
@@ -0,0 +1,547 @@
+/* ztrsyl.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int ztrsyl_(char *trana, char *tranb, integer *isgn, integer
+ *m, integer *n, doublecomplex *a, integer *lda, doublecomplex *b,
+ integer *ldb, doublecomplex *c__, integer *ldc, doublereal *scale,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4;
+ doublereal d__1, d__2;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *);
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer j, k, l;
+ doublecomplex a11;
+ doublereal db;
+ doublecomplex x11;
+ doublereal da11;
+ doublecomplex vec;
+ doublereal dum[1], eps, sgn, smin;
+ doublecomplex suml, sumr;
+ extern logical lsame_(char *, char *);
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *), zdotu_(
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *);
+ extern doublereal dlamch_(char *);
+ doublereal scaloc;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
+ integer *, doublereal *);
+ doublereal bignum;
+ extern /* Subroutine */ int zdscal_(integer *, doublereal *,
+ doublecomplex *, integer *);
+ extern /* Double Complex */ VOID zladiv_(doublecomplex *, doublecomplex *,
+ doublecomplex *);
+ logical notrna, notrnb;
+ doublereal smlnum;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTRSYL solves the complex Sylvester matrix equation: */
+
+/* op(A)*X + X*op(B) = scale*C or */
+/* op(A)*X - X*op(B) = scale*C, */
+
+/* where op(A) = A or A**H, and A and B are both upper triangular. A is */
+/* M-by-M and B is N-by-N; the right hand side C and the solution X are */
+/* M-by-N; and scale is an output scale factor, set <= 1 to avoid */
+/* overflow in X. */
+
+/* Arguments */
+/* ========= */
+
+/* TRANA (input) CHARACTER*1 */
+/* Specifies the option op(A): */
+/* = 'N': op(A) = A (No transpose) */
+/* = 'C': op(A) = A**H (Conjugate transpose) */
+
+/* TRANB (input) CHARACTER*1 */
+/* Specifies the option op(B): */
+/* = 'N': op(B) = B (No transpose) */
+/* = 'C': op(B) = B**H (Conjugate transpose) */
+
+/* ISGN (input) INTEGER */
+/* Specifies the sign in the equation: */
+/* = +1: solve op(A)*X + X*op(B) = scale*C */
+/* = -1: solve op(A)*X - X*op(B) = scale*C */
+
+/* M (input) INTEGER */
+/* The order of the matrix A, and the number of rows in the */
+/* matrices X and C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The order of the matrix B, and the number of columns in the */
+/* matrices X and C. N >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,M) */
+/* The upper triangular matrix A. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* B (input) COMPLEX*16 array, dimension (LDB,N) */
+/* The upper triangular matrix B. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* C (input/output) COMPLEX*16 array, dimension (LDC,N) */
+/* On entry, the M-by-N right hand side matrix C. */
+/* On exit, C is overwritten by the solution matrix X. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M) */
+
+/* SCALE (output) DOUBLE PRECISION */
+/* The scale factor, scale, set <= 1 to avoid overflow in X. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* = 1: A and B have common or very close eigenvalues; perturbed */
+/* values were used to solve the equation (but the matrices */
+/* A and B are unchanged). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Decode and Test input parameters */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ notrna = lsame_(trana, "N");
+ notrnb = lsame_(tranb, "N");
+
+ *info = 0;
+ if (! notrna && ! lsame_(trana, "C")) {
+ *info = -1;
+ } else if (! notrnb && ! lsame_(tranb, "C")) {
+ *info = -2;
+ } else if (*isgn != 1 && *isgn != -1) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*m)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ } else if (*ldc < max(1,*m)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTRSYL", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ *scale = 1.;
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Set constants to control overflow */
+
+ eps = dlamch_("P");
+ smlnum = dlamch_("S");
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+ smlnum = smlnum * (doublereal) (*m * *n) / eps;
+ bignum = 1. / smlnum;
+/* Computing MAX */
+ d__1 = smlnum, d__2 = eps * zlange_("M", m, m, &a[a_offset], lda, dum), d__1 = max(d__1,d__2), d__2 = eps * zlange_("M", n, n,
+ &b[b_offset], ldb, dum);
+ smin = max(d__1,d__2);
+ sgn = (doublereal) (*isgn);
+
+ if (notrna && notrnb) {
+
+/* Solve A*X + ISGN*X*B = scale*C. */
+
+/* The (K,L)th block of X is determined starting from */
+/* bottom-left corner column by column by */
+
+/* A(K,K)*X(K,L) + ISGN*X(K,L)*B(L,L) = C(K,L) - R(K,L) */
+
+/* Where */
+/* M L-1 */
+/* R(K,L) = SUM [A(K,I)*X(I,L)] +ISGN*SUM [X(K,J)*B(J,L)]. */
+/* I=K+1 J=1 */
+
+ i__1 = *n;
+ for (l = 1; l <= i__1; ++l) {
+ for (k = *m; k >= 1; --k) {
+
+ i__2 = *m - k;
+/* Computing MIN */
+ i__3 = k + 1;
+/* Computing MIN */
+ i__4 = k + 1;
+ zdotu_(&z__1, &i__2, &a[k + min(i__3, *m)* a_dim1], lda, &c__[
+ min(i__4, *m)+ l * c_dim1], &c__1);
+ suml.r = z__1.r, suml.i = z__1.i;
+ i__2 = l - 1;
+ zdotu_(&z__1, &i__2, &c__[k + c_dim1], ldc, &b[l * b_dim1 + 1]
+, &c__1);
+ sumr.r = z__1.r, sumr.i = z__1.i;
+ i__2 = k + l * c_dim1;
+ z__3.r = sgn * sumr.r, z__3.i = sgn * sumr.i;
+ z__2.r = suml.r + z__3.r, z__2.i = suml.i + z__3.i;
+ z__1.r = c__[i__2].r - z__2.r, z__1.i = c__[i__2].i - z__2.i;
+ vec.r = z__1.r, vec.i = z__1.i;
+
+ scaloc = 1.;
+ i__2 = k + k * a_dim1;
+ i__3 = l + l * b_dim1;
+ z__2.r = sgn * b[i__3].r, z__2.i = sgn * b[i__3].i;
+ z__1.r = a[i__2].r + z__2.r, z__1.i = a[i__2].i + z__2.i;
+ a11.r = z__1.r, a11.i = z__1.i;
+ da11 = (d__1 = a11.r, abs(d__1)) + (d__2 = d_imag(&a11), abs(
+ d__2));
+ if (da11 <= smin) {
+ a11.r = smin, a11.i = 0.;
+ da11 = smin;
+ *info = 1;
+ }
+ db = (d__1 = vec.r, abs(d__1)) + (d__2 = d_imag(&vec), abs(
+ d__2));
+ if (da11 < 1. && db > 1.) {
+ if (db > bignum * da11) {
+ scaloc = 1. / db;
+ }
+ }
+ z__3.r = scaloc, z__3.i = 0.;
+ z__2.r = vec.r * z__3.r - vec.i * z__3.i, z__2.i = vec.r *
+ z__3.i + vec.i * z__3.r;
+ zladiv_(&z__1, &z__2, &a11);
+ x11.r = z__1.r, x11.i = z__1.i;
+
+ if (scaloc != 1.) {
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ zdscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L10: */
+ }
+ *scale *= scaloc;
+ }
+ i__2 = k + l * c_dim1;
+ c__[i__2].r = x11.r, c__[i__2].i = x11.i;
+
+/* L20: */
+ }
+/* L30: */
+ }
+
+ } else if (! notrna && notrnb) {
+
+/* Solve A' *X + ISGN*X*B = scale*C. */
+
+/* The (K,L)th block of X is determined starting from */
+/* upper-left corner column by column by */
+
+/* A'(K,K)*X(K,L) + ISGN*X(K,L)*B(L,L) = C(K,L) - R(K,L) */
+
+/* Where */
+/* K-1 L-1 */
+/* R(K,L) = SUM [A'(I,K)*X(I,L)] + ISGN*SUM [X(K,J)*B(J,L)] */
+/* I=1 J=1 */
+
+ i__1 = *n;
+ for (l = 1; l <= i__1; ++l) {
+ i__2 = *m;
+ for (k = 1; k <= i__2; ++k) {
+
+ i__3 = k - 1;
+ zdotc_(&z__1, &i__3, &a[k * a_dim1 + 1], &c__1, &c__[l *
+ c_dim1 + 1], &c__1);
+ suml.r = z__1.r, suml.i = z__1.i;
+ i__3 = l - 1;
+ zdotu_(&z__1, &i__3, &c__[k + c_dim1], ldc, &b[l * b_dim1 + 1]
+, &c__1);
+ sumr.r = z__1.r, sumr.i = z__1.i;
+ i__3 = k + l * c_dim1;
+ z__3.r = sgn * sumr.r, z__3.i = sgn * sumr.i;
+ z__2.r = suml.r + z__3.r, z__2.i = suml.i + z__3.i;
+ z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
+ vec.r = z__1.r, vec.i = z__1.i;
+
+ scaloc = 1.;
+ d_cnjg(&z__2, &a[k + k * a_dim1]);
+ i__3 = l + l * b_dim1;
+ z__3.r = sgn * b[i__3].r, z__3.i = sgn * b[i__3].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ a11.r = z__1.r, a11.i = z__1.i;
+ da11 = (d__1 = a11.r, abs(d__1)) + (d__2 = d_imag(&a11), abs(
+ d__2));
+ if (da11 <= smin) {
+ a11.r = smin, a11.i = 0.;
+ da11 = smin;
+ *info = 1;
+ }
+ db = (d__1 = vec.r, abs(d__1)) + (d__2 = d_imag(&vec), abs(
+ d__2));
+ if (da11 < 1. && db > 1.) {
+ if (db > bignum * da11) {
+ scaloc = 1. / db;
+ }
+ }
+
+ z__3.r = scaloc, z__3.i = 0.;
+ z__2.r = vec.r * z__3.r - vec.i * z__3.i, z__2.i = vec.r *
+ z__3.i + vec.i * z__3.r;
+ zladiv_(&z__1, &z__2, &a11);
+ x11.r = z__1.r, x11.i = z__1.i;
+
+ if (scaloc != 1.) {
+ i__3 = *n;
+ for (j = 1; j <= i__3; ++j) {
+ zdscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L40: */
+ }
+ *scale *= scaloc;
+ }
+ i__3 = k + l * c_dim1;
+ c__[i__3].r = x11.r, c__[i__3].i = x11.i;
+
+/* L50: */
+ }
+/* L60: */
+ }
+
+ } else if (! notrna && ! notrnb) {
+
+/* Solve A'*X + ISGN*X*B' = C. */
+
+/* The (K,L)th block of X is determined starting from */
+/* upper-right corner column by column by */
+
+/* A'(K,K)*X(K,L) + ISGN*X(K,L)*B'(L,L) = C(K,L) - R(K,L) */
+
+/* Where */
+/* K-1 */
+/* R(K,L) = SUM [A'(I,K)*X(I,L)] + */
+/* I=1 */
+/* N */
+/* ISGN*SUM [X(K,J)*B'(L,J)]. */
+/* J=L+1 */
+
+ for (l = *n; l >= 1; --l) {
+ i__1 = *m;
+ for (k = 1; k <= i__1; ++k) {
+
+ i__2 = k - 1;
+ zdotc_(&z__1, &i__2, &a[k * a_dim1 + 1], &c__1, &c__[l *
+ c_dim1 + 1], &c__1);
+ suml.r = z__1.r, suml.i = z__1.i;
+ i__2 = *n - l;
+/* Computing MIN */
+ i__3 = l + 1;
+/* Computing MIN */
+ i__4 = l + 1;
+ zdotc_(&z__1, &i__2, &c__[k + min(i__3, *n)* c_dim1], ldc, &b[
+ l + min(i__4, *n)* b_dim1], ldb);
+ sumr.r = z__1.r, sumr.i = z__1.i;
+ i__2 = k + l * c_dim1;
+ d_cnjg(&z__4, &sumr);
+ z__3.r = sgn * z__4.r, z__3.i = sgn * z__4.i;
+ z__2.r = suml.r + z__3.r, z__2.i = suml.i + z__3.i;
+ z__1.r = c__[i__2].r - z__2.r, z__1.i = c__[i__2].i - z__2.i;
+ vec.r = z__1.r, vec.i = z__1.i;
+
+ scaloc = 1.;
+ i__2 = k + k * a_dim1;
+ i__3 = l + l * b_dim1;
+ z__3.r = sgn * b[i__3].r, z__3.i = sgn * b[i__3].i;
+ z__2.r = a[i__2].r + z__3.r, z__2.i = a[i__2].i + z__3.i;
+ d_cnjg(&z__1, &z__2);
+ a11.r = z__1.r, a11.i = z__1.i;
+ da11 = (d__1 = a11.r, abs(d__1)) + (d__2 = d_imag(&a11), abs(
+ d__2));
+ if (da11 <= smin) {
+ a11.r = smin, a11.i = 0.;
+ da11 = smin;
+ *info = 1;
+ }
+ db = (d__1 = vec.r, abs(d__1)) + (d__2 = d_imag(&vec), abs(
+ d__2));
+ if (da11 < 1. && db > 1.) {
+ if (db > bignum * da11) {
+ scaloc = 1. / db;
+ }
+ }
+
+ z__3.r = scaloc, z__3.i = 0.;
+ z__2.r = vec.r * z__3.r - vec.i * z__3.i, z__2.i = vec.r *
+ z__3.i + vec.i * z__3.r;
+ zladiv_(&z__1, &z__2, &a11);
+ x11.r = z__1.r, x11.i = z__1.i;
+
+ if (scaloc != 1.) {
+ i__2 = *n;
+ for (j = 1; j <= i__2; ++j) {
+ zdscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L70: */
+ }
+ *scale *= scaloc;
+ }
+ i__2 = k + l * c_dim1;
+ c__[i__2].r = x11.r, c__[i__2].i = x11.i;
+
+/* L80: */
+ }
+/* L90: */
+ }
+
+ } else if (notrna && ! notrnb) {
+
+/* Solve A*X + ISGN*X*B' = C. */
+
+/* The (K,L)th block of X is determined starting from */
+/* bottom-left corner column by column by */
+
+/* A(K,K)*X(K,L) + ISGN*X(K,L)*B'(L,L) = C(K,L) - R(K,L) */
+
+/* Where */
+/* M N */
+/* R(K,L) = SUM [A(K,I)*X(I,L)] + ISGN*SUM [X(K,J)*B'(L,J)] */
+/* I=K+1 J=L+1 */
+
+ for (l = *n; l >= 1; --l) {
+ for (k = *m; k >= 1; --k) {
+
+ i__1 = *m - k;
+/* Computing MIN */
+ i__2 = k + 1;
+/* Computing MIN */
+ i__3 = k + 1;
+ zdotu_(&z__1, &i__1, &a[k + min(i__2, *m)* a_dim1], lda, &c__[
+ min(i__3, *m)+ l * c_dim1], &c__1);
+ suml.r = z__1.r, suml.i = z__1.i;
+ i__1 = *n - l;
+/* Computing MIN */
+ i__2 = l + 1;
+/* Computing MIN */
+ i__3 = l + 1;
+ zdotc_(&z__1, &i__1, &c__[k + min(i__2, *n)* c_dim1], ldc, &b[
+ l + min(i__3, *n)* b_dim1], ldb);
+ sumr.r = z__1.r, sumr.i = z__1.i;
+ i__1 = k + l * c_dim1;
+ d_cnjg(&z__4, &sumr);
+ z__3.r = sgn * z__4.r, z__3.i = sgn * z__4.i;
+ z__2.r = suml.r + z__3.r, z__2.i = suml.i + z__3.i;
+ z__1.r = c__[i__1].r - z__2.r, z__1.i = c__[i__1].i - z__2.i;
+ vec.r = z__1.r, vec.i = z__1.i;
+
+ scaloc = 1.;
+ i__1 = k + k * a_dim1;
+ d_cnjg(&z__3, &b[l + l * b_dim1]);
+ z__2.r = sgn * z__3.r, z__2.i = sgn * z__3.i;
+ z__1.r = a[i__1].r + z__2.r, z__1.i = a[i__1].i + z__2.i;
+ a11.r = z__1.r, a11.i = z__1.i;
+ da11 = (d__1 = a11.r, abs(d__1)) + (d__2 = d_imag(&a11), abs(
+ d__2));
+ if (da11 <= smin) {
+ a11.r = smin, a11.i = 0.;
+ da11 = smin;
+ *info = 1;
+ }
+ db = (d__1 = vec.r, abs(d__1)) + (d__2 = d_imag(&vec), abs(
+ d__2));
+ if (da11 < 1. && db > 1.) {
+ if (db > bignum * da11) {
+ scaloc = 1. / db;
+ }
+ }
+
+ z__3.r = scaloc, z__3.i = 0.;
+ z__2.r = vec.r * z__3.r - vec.i * z__3.i, z__2.i = vec.r *
+ z__3.i + vec.i * z__3.r;
+ zladiv_(&z__1, &z__2, &a11);
+ x11.r = z__1.r, x11.i = z__1.i;
+
+ if (scaloc != 1.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ zdscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
+/* L100: */
+ }
+ *scale *= scaloc;
+ }
+ i__1 = k + l * c_dim1;
+ c__[i__1].r = x11.r, c__[i__1].i = x11.i;
+
+/* L110: */
+ }
+/* L120: */
+ }
+
+ }
+
+ return 0;
+
+/* End of ZTRSYL */
+
+} /* ztrsyl_ */
diff --git a/contrib/libs/clapack/ztrti2.c b/contrib/libs/clapack/ztrti2.c
new file mode 100644
index 0000000000..43e436c380
--- /dev/null
+++ b/contrib/libs/clapack/ztrti2.c
@@ -0,0 +1,198 @@
+/* ztrti2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int ztrti2_(char *uplo, char *diag, integer *n,
+ doublecomplex *a, integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer j;
+ doublecomplex ajj;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *);
+ logical upper;
+ extern /* Subroutine */ int ztrmv_(char *, char *, char *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *), xerbla_(char *, integer *);
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTRTI2 computes the inverse of a complex upper or lower triangular */
+/* matrix. */
+
+/* This is the Level 2 BLAS version of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* Specifies whether the matrix A is upper or lower triangular. */
+/* = 'U': Upper triangular */
+/* = 'L': Lower triangular */
+
+/* DIAG (input) CHARACTER*1 */
+/* Specifies whether or not the matrix A is unit triangular. */
+/* = 'N': Non-unit triangular */
+/* = 'U': Unit triangular */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the triangular matrix A. If UPLO = 'U', the */
+/* leading n by n upper triangular part of the array A contains */
+/* the upper triangular matrix, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading n by n lower triangular part of the array A contains */
+/* the lower triangular matrix, and the strictly upper */
+/* triangular part of A is not referenced. If DIAG = 'U', the */
+/* diagonal elements of A are also not referenced and are */
+/* assumed to be 1. */
+
+/* On exit, the (triangular) inverse of the original matrix, in */
+/* the same storage format. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -k, the k-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ nounit = lsame_(diag, "N");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTRTI2", &i__1);
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute inverse of upper triangular matrix. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (nounit) {
+ i__2 = j + j * a_dim1;
+ z_div(&z__1, &c_b1, &a[j + j * a_dim1]);
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+ i__2 = j + j * a_dim1;
+ z__1.r = -a[i__2].r, z__1.i = -a[i__2].i;
+ ajj.r = z__1.r, ajj.i = z__1.i;
+ } else {
+ z__1.r = -1., z__1.i = -0.;
+ ajj.r = z__1.r, ajj.i = z__1.i;
+ }
+
+/* Compute elements 1:j-1 of j-th column. */
+
+ i__2 = j - 1;
+ ztrmv_("Upper", "No transpose", diag, &i__2, &a[a_offset], lda, &
+ a[j * a_dim1 + 1], &c__1);
+ i__2 = j - 1;
+ zscal_(&i__2, &ajj, &a[j * a_dim1 + 1], &c__1);
+/* L10: */
+ }
+ } else {
+
+/* Compute inverse of lower triangular matrix. */
+
+ for (j = *n; j >= 1; --j) {
+ if (nounit) {
+ i__1 = j + j * a_dim1;
+ z_div(&z__1, &c_b1, &a[j + j * a_dim1]);
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ i__1 = j + j * a_dim1;
+ z__1.r = -a[i__1].r, z__1.i = -a[i__1].i;
+ ajj.r = z__1.r, ajj.i = z__1.i;
+ } else {
+ z__1.r = -1., z__1.i = -0.;
+ ajj.r = z__1.r, ajj.i = z__1.i;
+ }
+ if (j < *n) {
+
+/* Compute elements j+1:n of j-th column. */
+
+ i__1 = *n - j;
+ ztrmv_("Lower", "No transpose", diag, &i__1, &a[j + 1 + (j +
+ 1) * a_dim1], lda, &a[j + 1 + j * a_dim1], &c__1);
+ i__1 = *n - j;
+ zscal_(&i__1, &ajj, &a[j + 1 + j * a_dim1], &c__1);
+ }
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of ZTRTI2 */
+
+} /* ztrti2_ */
diff --git a/contrib/libs/clapack/ztrtri.c b/contrib/libs/clapack/ztrtri.c
new file mode 100644
index 0000000000..1fede0c4fd
--- /dev/null
+++ b/contrib/libs/clapack/ztrtri.c
@@ -0,0 +1,244 @@
+/* ztrtri.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+
+/* Subroutine */ int ztrtri_(char *uplo, char *diag, integer *n,
+ doublecomplex *a, integer *lda, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, i__1, i__2, i__3[2], i__4, i__5;
+ doublecomplex z__1;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer j, jb, nb, nn;
+ extern logical lsame_(char *, char *);
+ logical upper;
+ extern /* Subroutine */ int ztrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *),
+ ztrsm_(char *, char *, char *, char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *), ztrti2_(char *, char *
+, integer *, doublecomplex *, integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTRTRI computes the inverse of a complex upper or lower triangular */
+/* matrix A. */
+
+/* This is the Level 3 BLAS version of the algorithm. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the triangular matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of the array A contains */
+/* the upper triangular matrix, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of the array A contains */
+/* the lower triangular matrix, and the strictly upper */
+/* triangular part of A is not referenced. If DIAG = 'U', the */
+/* diagonal elements of A are also not referenced and are */
+/* assumed to be 1. */
+/* On exit, the (triangular) inverse of the original matrix, in */
+/* the same storage format. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, A(i,i) is exactly zero. The triangular */
+/* matrix is singular and its inverse can not be computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ nounit = lsame_(diag, "N");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTRTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check for singularity if non-unit. */
+
+ if (nounit) {
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ i__2 = *info + *info * a_dim1;
+ if (a[i__2].r == 0. && a[i__2].i == 0.) {
+ return 0;
+ }
+/* L10: */
+ }
+ *info = 0;
+ }
+
+/* Determine the block size for this environment. */
+
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = uplo;
+ i__3[1] = 1, a__1[1] = diag;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ nb = ilaenv_(&c__1, "ZTRTRI", ch__1, n, &c_n1, &c_n1, &c_n1);
+ if (nb <= 1 || nb >= *n) {
+
+/* Use unblocked code */
+
+ ztrti2_(uplo, diag, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code */
+
+ if (upper) {
+
+/* Compute inverse of upper triangular matrix */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *n - j + 1;
+ jb = min(i__4,i__5);
+
+/* Compute rows 1:j-1 of current block column */
+
+ i__4 = j - 1;
+ ztrmm_("Left", "Upper", "No transpose", diag, &i__4, &jb, &
+ c_b1, &a[a_offset], lda, &a[j * a_dim1 + 1], lda);
+ i__4 = j - 1;
+ z__1.r = -1., z__1.i = -0.;
+ ztrsm_("Right", "Upper", "No transpose", diag, &i__4, &jb, &
+ z__1, &a[j + j * a_dim1], lda, &a[j * a_dim1 + 1],
+ lda);
+
+/* Compute inverse of current diagonal block */
+
+ ztrti2_("Upper", diag, &jb, &a[j + j * a_dim1], lda, info);
+/* L20: */
+ }
+ } else {
+
+/* Compute inverse of lower triangular matrix */
+
+ nn = (*n - 1) / nb * nb + 1;
+ i__2 = -nb;
+ for (j = nn; i__2 < 0 ? j >= 1 : j <= 1; j += i__2) {
+/* Computing MIN */
+ i__1 = nb, i__4 = *n - j + 1;
+ jb = min(i__1,i__4);
+ if (j + jb <= *n) {
+
+/* Compute rows j+jb:n of current block column */
+
+ i__1 = *n - j - jb + 1;
+ ztrmm_("Left", "Lower", "No transpose", diag, &i__1, &jb,
+ &c_b1, &a[j + jb + (j + jb) * a_dim1], lda, &a[j
+ + jb + j * a_dim1], lda);
+ i__1 = *n - j - jb + 1;
+ z__1.r = -1., z__1.i = -0.;
+ ztrsm_("Right", "Lower", "No transpose", diag, &i__1, &jb,
+ &z__1, &a[j + j * a_dim1], lda, &a[j + jb + j *
+ a_dim1], lda);
+ }
+
+/* Compute inverse of current diagonal block */
+
+ ztrti2_("Lower", diag, &jb, &a[j + j * a_dim1], lda, info);
+/* L30: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZTRTRI */
+
+} /* ztrtri_ */
diff --git a/contrib/libs/clapack/ztrtrs.c b/contrib/libs/clapack/ztrtrs.c
new file mode 100644
index 0000000000..1174f9c177
--- /dev/null
+++ b/contrib/libs/clapack/ztrtrs.c
@@ -0,0 +1,184 @@
+/* ztrtrs.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b2 = {1.,0.};
+
+/* Subroutine */ int ztrtrs_(char *uplo, char *trans, char *diag, integer *n,
+ integer *nrhs, doublecomplex *a, integer *lda, doublecomplex *b,
+ integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ztrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *),
+ xerbla_(char *, integer *);
+ logical nounit;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTRTRS solves a triangular system of the form */
+
+/* A * X = B, A**T * X = B, or A**H * X = B, */
+
+/* where A is a triangular matrix of order N, and B is an N-by-NRHS */
+/* matrix. A check is made to verify that A is nonsingular. */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* TRANS (input) CHARACTER*1 */
+/* Specifies the form of the system of equations: */
+/* = 'N': A * X = B (No transpose) */
+/* = 'T': A**T * X = B (Transpose) */
+/* = 'C': A**H * X = B (Conjugate transpose) */
+
+/* DIAG (input) CHARACTER*1 */
+/* = 'N': A is non-unit triangular; */
+/* = 'U': A is unit triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* NRHS (input) INTEGER */
+/* The number of right hand sides, i.e., the number of columns */
+/* of the matrix B. NRHS >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* The triangular matrix A. If UPLO = 'U', the leading N-by-N */
+/* upper triangular part of the array A contains the upper */
+/* triangular matrix, and the strictly lower triangular part of */
+/* A is not referenced. If UPLO = 'L', the leading N-by-N lower */
+/* triangular part of the array A contains the lower triangular */
+/* matrix, and the strictly upper triangular part of A is not */
+/* referenced. If DIAG = 'U', the diagonal elements of A are */
+/* also not referenced and are assumed to be 1. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* B (input/output) COMPLEX*16 array, dimension (LDB,NRHS) */
+/* On entry, the right hand side matrix B. */
+/* On exit, if INFO = 0, the solution matrix X. */
+
+/* LDB (input) INTEGER */
+/* The leading dimension of the array B. LDB >= max(1,N). */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+/* > 0: if INFO = i, the i-th diagonal element of A is zero, */
+/* indicating that the matrix is singular and the solutions */
+/* X have not been computed. */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ nounit = lsame_(diag, "N");
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*nrhs < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ } else if (*ldb < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTRTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check for singularity. */
+
+ if (nounit) {
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ i__2 = *info + *info * a_dim1;
+ if (a[i__2].r == 0. && a[i__2].i == 0.) {
+ return 0;
+ }
+/* L10: */
+ }
+ }
+ *info = 0;
+
+/* Solve A * x = b, A**T * x = b, or A**H * x = b. */
+
+ ztrsm_("Left", uplo, trans, diag, n, nrhs, &c_b2, &a[a_offset], lda, &b[
+ b_offset], ldb);
+
+ return 0;
+
+/* End of ZTRTRS */
+
+} /* ztrtrs_ */
diff --git a/contrib/libs/clapack/ztrttf.c b/contrib/libs/clapack/ztrttf.c
new file mode 100644
index 0000000000..b15f625040
--- /dev/null
+++ b/contrib/libs/clapack/ztrttf.c
@@ -0,0 +1,580 @@
+/* ztrttf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ztrttf_(char *transr, char *uplo, integer *n,
+ doublecomplex *a, integer *lda, doublecomplex *arf, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, k, l, n1, n2, ij, nt, nx2, np1x2;
+ logical normaltransr;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical nisodd;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTRTTF copies a triangular matrix A from standard full format (TR) */
+/* to rectangular full packed format (TF) . */
+
+/* Arguments */
+/* ========= */
+
+/* TRANSR (input) CHARACTER */
+/* = 'N': ARF in Normal mode is wanted; */
+/* = 'C': ARF in Conjugate Transpose mode is wanted; */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrix A. N >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension ( LDA, N ) */
+/* On entry, the triangular matrix A. If UPLO = 'U', the */
+/* leading N-by-N upper triangular part of the array A contains */
+/* the upper triangular matrix, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of the array A contains */
+/* the lower triangular matrix, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the matrix A. LDA >= max(1,N). */
+
+/* ARF (output) COMPLEX*16 array, dimension ( N*(N+1)/2 ), */
+/* On exit, the upper or lower triangular matrix A stored in */
+/* RFP format. For a further discussion see Notes below. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Notes */
+/* ===== */
+
+/* We first consider Standard Packed Format when N is even. */
+/* We give an example where N = 6. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 05 00 */
+/* 11 12 13 14 15 10 11 */
+/* 22 23 24 25 20 21 22 */
+/* 33 34 35 30 31 32 33 */
+/* 44 45 40 41 42 43 44 */
+/* 55 50 51 52 53 54 55 */
+
+
+/* Let TRANSR = `N'. RFP holds AP as follows: */
+/* For UPLO = `U' the upper trapezoid A(0:5,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(4:6,0:2) consists of */
+/* conjugate-transpose of the first three columns of AP upper. */
+/* For UPLO = `L' the lower trapezoid A(1:6,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:2,0:2) consists of */
+/* conjugate-transpose of the last three columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N even and TRANSR = `N'. */
+
+/* RFP A RFP A */
+
+/* -- -- -- */
+/* 03 04 05 33 43 53 */
+/* -- -- */
+/* 13 14 15 00 44 54 */
+/* -- */
+/* 23 24 25 10 11 55 */
+
+/* 33 34 35 20 21 22 */
+/* -- */
+/* 00 44 45 30 31 32 */
+/* -- -- */
+/* 01 11 55 40 41 42 */
+/* -- -- -- */
+/* 02 12 22 50 51 52 */
+
+/* Now let TRANSR = `C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 00 01 02 33 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 11 12 43 44 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- -- */
+/* 05 15 25 35 45 55 22 53 54 55 22 32 42 52 */
+
+
+/* We next consider Standard Packed Format when N is odd. */
+/* We give an example where N = 5. */
+
+/* AP is Upper AP is Lower */
+
+/* 00 01 02 03 04 00 */
+/* 11 12 13 14 10 11 */
+/* 22 23 24 20 21 22 */
+/* 33 34 30 31 32 33 */
+/* 44 40 41 42 43 44 */
+
+
+/* Let TRANSR = `N'. RFP holds AP as follows: */
+/* For UPLO = `U' the upper trapezoid A(0:4,0:2) consists of the last */
+/* three columns of AP upper. The lower triangle A(3:4,0:1) consists of */
+/* conjugate-transpose of the first two columns of AP upper. */
+/* For UPLO = `L' the lower trapezoid A(0:4,0:2) consists of the first */
+/* three columns of AP lower. The upper triangle A(0:1,1:2) consists of */
+/* conjugate-transpose of the last two columns of AP lower. */
+/* To denote conjugate we place -- above the element. This covers the */
+/* case N odd and TRANSR = `N'. */
+
+/* RFP A RFP A */
+
+/* -- -- */
+/* 02 03 04 00 33 43 */
+/* -- */
+/* 12 13 14 10 11 44 */
+
+/* 22 23 24 20 21 22 */
+/* -- */
+/* 00 33 34 30 31 32 */
+/* -- -- */
+/* 01 11 44 40 41 42 */
+
+/* Now let TRANSR = `C'. RFP A in both UPLO cases is just the conjugate- */
+/* transpose of RFP A above. One therefore gets: */
+
+
+/* RFP A RFP A */
+
+/* -- -- -- -- -- -- -- -- -- */
+/* 02 12 22 00 01 00 10 20 30 40 50 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 03 13 23 33 11 33 11 21 31 41 51 */
+/* -- -- -- -- -- -- -- -- -- */
+/* 04 14 24 34 44 43 44 22 32 42 52 */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda - 1 - 0 + 1;
+ a_offset = 0 + a_dim1 * 0;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ normaltransr = lsame_(transr, "N");
+ lower = lsame_(uplo, "L");
+ if (! normaltransr && ! lsame_(transr, "C")) {
+ *info = -1;
+ } else if (! lower && ! lsame_(uplo, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTRTTF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 1) {
+ if (*n == 1) {
+ if (normaltransr) {
+ arf[0].r = a[0].r, arf[0].i = a[0].i;
+ } else {
+ d_cnjg(&z__1, a);
+ arf[0].r = z__1.r, arf[0].i = z__1.i;
+ }
+ }
+ return 0;
+ }
+
+/* Size of array ARF(1:2,0:nt-1) */
+
+ nt = *n * (*n + 1) / 2;
+
+/* set N1 and N2 depending on LOWER: for N even N1=N2=K */
+
+ if (lower) {
+ n2 = *n / 2;
+ n1 = *n - n2;
+ } else {
+ n1 = *n / 2;
+ n2 = *n - n1;
+ }
+
+/* If N is odd, set NISODD = .TRUE., LDA=N+1 and A is (N+1)--by--K2. */
+/* If N is even, set K = N/2 and NISODD = .FALSE., LDA=N and A is */
+/* N--by--(N+1)/2. */
+
+ if (*n % 2 == 0) {
+ k = *n / 2;
+ nisodd = FALSE_;
+ if (! lower) {
+ np1x2 = *n + *n + 2;
+ }
+ } else {
+ nisodd = TRUE_;
+ if (! lower) {
+ nx2 = *n + *n;
+ }
+ }
+
+ if (nisodd) {
+
+/* N is odd */
+
+ if (normaltransr) {
+
+/* N is odd and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL and N is odd ( a(0:n-1,0:n1-1) ) */
+/* T1 -> a(0,0), T2 -> a(0,1), S -> a(n1,0) */
+/* T1 -> a(0), T2 -> a(n), S -> a(n1); lda=n */
+
+ ij = 0;
+ i__1 = n2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = n2 + j;
+ for (i__ = n1; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ d_cnjg(&z__1, &a[n2 + j + i__ * a_dim1]);
+ arf[i__3].r = z__1.r, arf[i__3].i = z__1.i;
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ i__4 = i__ + j * a_dim1;
+ arf[i__3].r = a[i__4].r, arf[i__3].i = a[i__4].i;
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL and N is odd ( a(0:n-1,0:n2-1) */
+/* T1 -> a(n1+1,0), T2 -> a(n1,0), S -> a(0,0) */
+/* T1 -> a(n2), T2 -> a(n1), S -> a(0); lda=n */
+
+ ij = nt - *n;
+ i__1 = n1;
+ for (j = *n - 1; j >= i__1; --j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ i__4 = i__ + j * a_dim1;
+ arf[i__3].r = a[i__4].r, arf[i__3].i = a[i__4].i;
+ ++ij;
+ }
+ i__2 = n1 - 1;
+ for (l = j - n1; l <= i__2; ++l) {
+ i__3 = ij;
+ d_cnjg(&z__1, &a[j - n1 + l * a_dim1]);
+ arf[i__3].r = z__1.r, arf[i__3].i = z__1.i;
+ ++ij;
+ }
+ ij -= nx2;
+ }
+
+ }
+
+ } else {
+
+/* N is odd and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is odd */
+/* T1 -> A(0,0) , T2 -> A(1,0) , S -> A(0,n1) */
+/* T1 -> A(0+0) , T2 -> A(1+0) , S -> A(0+n1*n1); lda=n1 */
+
+ ij = 0;
+ i__1 = n2 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ d_cnjg(&z__1, &a[j + i__ * a_dim1]);
+ arf[i__3].r = z__1.r, arf[i__3].i = z__1.i;
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = n1 + j; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ i__4 = i__ + (n1 + j) * a_dim1;
+ arf[i__3].r = a[i__4].r, arf[i__3].i = a[i__4].i;
+ ++ij;
+ }
+ }
+ i__1 = *n - 1;
+ for (j = n2; j <= i__1; ++j) {
+ i__2 = n1 - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ d_cnjg(&z__1, &a[j + i__ * a_dim1]);
+ arf[i__3].r = z__1.r, arf[i__3].i = z__1.i;
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is odd */
+/* T1 -> A(0,n1+1), T2 -> A(0,n1), S -> A(0,0) */
+/* T1 -> A(n2*n2), T2 -> A(n1*n2), S -> A(0); lda=n2 */
+
+ ij = 0;
+ i__1 = n1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = n1; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ d_cnjg(&z__1, &a[j + i__ * a_dim1]);
+ arf[i__3].r = z__1.r, arf[i__3].i = z__1.i;
+ ++ij;
+ }
+ }
+ i__1 = n1 - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ i__4 = i__ + j * a_dim1;
+ arf[i__3].r = a[i__4].r, arf[i__3].i = a[i__4].i;
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (l = n2 + j; l <= i__2; ++l) {
+ i__3 = ij;
+ d_cnjg(&z__1, &a[n2 + j + l * a_dim1]);
+ arf[i__3].r = z__1.r, arf[i__3].i = z__1.i;
+ ++ij;
+ }
+ }
+
+ }
+
+ }
+
+ } else {
+
+/* N is even */
+
+ if (normaltransr) {
+
+/* N is even and TRANSR = 'N' */
+
+ if (lower) {
+
+/* SRPA for LOWER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(1,0), T2 -> a(0,0), S -> a(k+1,0) */
+/* T1 -> a(1), T2 -> a(0), S -> a(k+1); lda=n+1 */
+
+ ij = 0;
+ i__1 = k - 1;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = k + j;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ d_cnjg(&z__1, &a[k + j + i__ * a_dim1]);
+ arf[i__3].r = z__1.r, arf[i__3].i = z__1.i;
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ i__4 = i__ + j * a_dim1;
+ arf[i__3].r = a[i__4].r, arf[i__3].i = a[i__4].i;
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, NORMAL, and N is even ( a(0:n,0:k-1) ) */
+/* T1 -> a(k+1,0) , T2 -> a(k,0), S -> a(0,0) */
+/* T1 -> a(k+1), T2 -> a(k), S -> a(0); lda=n+1 */
+
+ ij = nt - *n - 1;
+ i__1 = k;
+ for (j = *n - 1; j >= i__1; --j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ i__4 = i__ + j * a_dim1;
+ arf[i__3].r = a[i__4].r, arf[i__3].i = a[i__4].i;
+ ++ij;
+ }
+ i__2 = k - 1;
+ for (l = j - k; l <= i__2; ++l) {
+ i__3 = ij;
+ d_cnjg(&z__1, &a[j - k + l * a_dim1]);
+ arf[i__3].r = z__1.r, arf[i__3].i = z__1.i;
+ ++ij;
+ }
+ ij -= np1x2;
+ }
+
+ }
+
+ } else {
+
+/* N is even and TRANSR = 'C' */
+
+ if (lower) {
+
+/* SRPA for LOWER, TRANSPOSE and N is even (see paper, A=B) */
+/* T1 -> A(0,1) , T2 -> A(0,0) , S -> A(0,k+1) : */
+/* T1 -> A(0+k) , T2 -> A(0+0) , S -> A(0+k*(k+1)); lda=k */
+
+ ij = 0;
+ j = k;
+ i__1 = *n - 1;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ i__2 = ij;
+ i__3 = i__ + j * a_dim1;
+ arf[i__2].r = a[i__3].r, arf[i__2].i = a[i__3].i;
+ ++ij;
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ d_cnjg(&z__1, &a[j + i__ * a_dim1]);
+ arf[i__3].r = z__1.r, arf[i__3].i = z__1.i;
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (i__ = k + 1 + j; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ i__4 = i__ + (k + 1 + j) * a_dim1;
+ arf[i__3].r = a[i__4].r, arf[i__3].i = a[i__4].i;
+ ++ij;
+ }
+ }
+ i__1 = *n - 1;
+ for (j = k - 1; j <= i__1; ++j) {
+ i__2 = k - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ d_cnjg(&z__1, &a[j + i__ * a_dim1]);
+ arf[i__3].r = z__1.r, arf[i__3].i = z__1.i;
+ ++ij;
+ }
+ }
+
+ } else {
+
+/* SRPA for UPPER, TRANSPOSE and N is even (see paper, A=B) */
+/* T1 -> A(0,k+1) , T2 -> A(0,k) , S -> A(0,0) */
+/* T1 -> A(0+k*(k+1)) , T2 -> A(0+k*k) , S -> A(0+0)); lda=k */
+
+ ij = 0;
+ i__1 = k;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = *n - 1;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ d_cnjg(&z__1, &a[j + i__ * a_dim1]);
+ arf[i__3].r = z__1.r, arf[i__3].i = z__1.i;
+ ++ij;
+ }
+ }
+ i__1 = k - 2;
+ for (j = 0; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ i__3 = ij;
+ i__4 = i__ + j * a_dim1;
+ arf[i__3].r = a[i__4].r, arf[i__3].i = a[i__4].i;
+ ++ij;
+ }
+ i__2 = *n - 1;
+ for (l = k + 1 + j; l <= i__2; ++l) {
+ i__3 = ij;
+ d_cnjg(&z__1, &a[k + 1 + j + l * a_dim1]);
+ arf[i__3].r = z__1.r, arf[i__3].i = z__1.i;
+ ++ij;
+ }
+ }
+
+/* Note that here J = K-1 */
+
+ i__1 = j;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ i__2 = ij;
+ i__3 = i__ + j * a_dim1;
+ arf[i__2].r = a[i__3].r, arf[i__2].i = a[i__3].i;
+ ++ij;
+ }
+
+ }
+
+ }
+
+ }
+
+ return 0;
+
+/* End of ZTRTTF */
+
+} /* ztrttf_ */
diff --git a/contrib/libs/clapack/ztrttp.c b/contrib/libs/clapack/ztrttp.c
new file mode 100644
index 0000000000..7ac99cc5d7
--- /dev/null
+++ b/contrib/libs/clapack/ztrttp.c
@@ -0,0 +1,149 @@
+/* ztrttp.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int ztrttp_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, doublecomplex *ap, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, k;
+ extern logical lsame_(char *, char *);
+ logical lower;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+
+/* -- Contributed by Fred Gustavson of the IBM Watson Research Center -- */
+/* -- and Julien Langou of the Univ. of Colorado Denver -- */
+/* -- November 2008 -- */
+
+/* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
+/* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTRTTP copies a triangular matrix A from full format (TR) to standard */
+/* packed format (TP). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER */
+/* = 'U': A is upper triangular; */
+/* = 'L': A is lower triangular. */
+
+/* N (input) INTEGER */
+/* The order of the matrices AP and A. N >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the triangular matrix A. If UPLO = 'U', the leading */
+/* N-by-N upper triangular part of A contains the upper */
+/* triangular part of the matrix A, and the strictly lower */
+/* triangular part of A is not referenced. If UPLO = 'L', the */
+/* leading N-by-N lower triangular part of A contains the lower */
+/* triangular part of the matrix A, and the strictly upper */
+/* triangular part of A is not referenced. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* AP (output) COMPLEX*16 array, dimension ( N*(N+1)/2 ), */
+/* On exit, the upper or lower triangular matrix A, packed */
+/* columnwise in a linear array. The j-th column of A is stored */
+/* in the array AP as follows: */
+/* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
+/* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ap;
+
+ /* Function Body */
+ *info = 0;
+ lower = lsame_(uplo, "L");
+ if (! lower && ! lsame_(uplo, "U")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTRTTP", &i__1);
+ return 0;
+ }
+
+ if (lower) {
+ k = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ ++k;
+ i__3 = k;
+ i__4 = i__ + j * a_dim1;
+ ap[i__3].r = a[i__4].r, ap[i__3].i = a[i__4].i;
+ }
+ }
+ } else {
+ k = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ ++k;
+ i__3 = k;
+ i__4 = i__ + j * a_dim1;
+ ap[i__3].r = a[i__4].r, ap[i__3].i = a[i__4].i;
+ }
+ }
+ }
+
+
+ return 0;
+
+/* End of ZTRTTP */
+
+} /* ztrttp_ */
diff --git a/contrib/libs/clapack/ztzrqf.c b/contrib/libs/clapack/ztzrqf.c
new file mode 100644
index 0000000000..c096d8f38e
--- /dev/null
+++ b/contrib/libs/clapack/ztzrqf.c
@@ -0,0 +1,241 @@
+/* ztzrqf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static doublecomplex c_b1 = {1.,0.};
+static integer c__1 = 1;
+
+/* Subroutine */ int ztzrqf_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, doublecomplex *tau, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, k, m1;
+ doublecomplex alpha;
+ extern /* Subroutine */ int zgerc_(integer *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *), zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *),
+ zcopy_(integer *, doublecomplex *, integer *, doublecomplex *,
+ integer *), zaxpy_(integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *, integer *), xerbla_(char *, integer *), zlacgv_(integer *, doublecomplex *, integer *), zlarfp_(
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* This routine is deprecated and has been replaced by routine ZTZRZF. */
+
+/* ZTZRQF reduces the M-by-N ( M<=N ) complex upper trapezoidal matrix A */
+/* to upper triangular form by means of unitary transformations. */
+
+/* The upper trapezoidal matrix A is factored as */
+
+/* A = ( R 0 ) * Z, */
+
+/* where Z is an N-by-N unitary matrix and R is an M-by-M upper */
+/* triangular matrix. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= M. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the leading M-by-N upper trapezoidal part of the */
+/* array A must contain the matrix to be factorized. */
+/* On exit, the leading M-by-M upper triangular part of A */
+/* contains the upper triangular matrix R, and elements M+1 to */
+/* N of the first M rows of A, with the array TAU, represent the */
+/* unitary matrix Z as a product of M elementary reflectors. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) COMPLEX*16 array, dimension (M) */
+/* The scalar factors of the elementary reflectors. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* The factorization is obtained by Householder's method. The kth */
+/* transformation matrix, Z( k ), whose conjugate transpose is used to */
+/* introduce zeros into the (m - k + 1)th row of A, is given in the form */
+
+/* Z( k ) = ( I 0 ), */
+/* ( 0 T( k ) ) */
+
+/* where */
+
+/* T( k ) = I - tau*u( k )*u( k )', u( k ) = ( 1 ), */
+/* ( 0 ) */
+/* ( z( k ) ) */
+
+/* tau is a scalar and z( k ) is an ( n - m ) element vector. */
+/* tau and z( k ) are chosen to annihilate the elements of the kth row */
+/* of X. */
+
+/* The scalar tau is returned in the kth element of TAU and the vector */
+/* u( k ) in the kth row of A, such that the elements of z( k ) are */
+/* in a( k, m + 1 ), ..., a( k, n ). The elements of R are returned in */
+/* the upper triangular part of A. */
+
+/* Z is given by */
+
+/* Z = Z( 1 ) * Z( 2 ) * ... * Z( m ). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input parameters. */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTZRQF", &i__1);
+ return 0;
+ }
+
+/* Perform the factorization. */
+
+ if (*m == 0) {
+ return 0;
+ }
+ if (*m == *n) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ tau[i__2].r = 0., tau[i__2].i = 0.;
+/* L10: */
+ }
+ } else {
+/* Computing MIN */
+ i__1 = *m + 1;
+ m1 = min(i__1,*n);
+ for (k = *m; k >= 1; --k) {
+
+/* Use a Householder reflection to zero the kth row of A. */
+/* First set up the reflection. */
+
+ i__1 = k + k * a_dim1;
+ d_cnjg(&z__1, &a[k + k * a_dim1]);
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ i__1 = *n - *m;
+ zlacgv_(&i__1, &a[k + m1 * a_dim1], lda);
+ i__1 = k + k * a_dim1;
+ alpha.r = a[i__1].r, alpha.i = a[i__1].i;
+ i__1 = *n - *m + 1;
+ zlarfp_(&i__1, &alpha, &a[k + m1 * a_dim1], lda, &tau[k]);
+ i__1 = k + k * a_dim1;
+ a[i__1].r = alpha.r, a[i__1].i = alpha.i;
+ i__1 = k;
+ d_cnjg(&z__1, &tau[k]);
+ tau[i__1].r = z__1.r, tau[i__1].i = z__1.i;
+
+ i__1 = k;
+ if ((tau[i__1].r != 0. || tau[i__1].i != 0.) && k > 1) {
+
+/* We now perform the operation A := A*P( k )'. */
+
+/* Use the first ( k - 1 ) elements of TAU to store a( k ), */
+/* where a( k ) consists of the first ( k - 1 ) elements of */
+/* the kth column of A. Also let B denote the first */
+/* ( k - 1 ) rows of the last ( n - m ) columns of A. */
+
+ i__1 = k - 1;
+ zcopy_(&i__1, &a[k * a_dim1 + 1], &c__1, &tau[1], &c__1);
+
+/* Form w = a( k ) + B*z( k ) in TAU. */
+
+ i__1 = k - 1;
+ i__2 = *n - *m;
+ zgemv_("No transpose", &i__1, &i__2, &c_b1, &a[m1 * a_dim1 +
+ 1], lda, &a[k + m1 * a_dim1], lda, &c_b1, &tau[1], &
+ c__1);
+
+/* Now form a( k ) := a( k ) - conjg(tau)*w */
+/* and B := B - conjg(tau)*w*z( k )'. */
+
+ i__1 = k - 1;
+ d_cnjg(&z__2, &tau[k]);
+ z__1.r = -z__2.r, z__1.i = -z__2.i;
+ zaxpy_(&i__1, &z__1, &tau[1], &c__1, &a[k * a_dim1 + 1], &
+ c__1);
+ i__1 = k - 1;
+ i__2 = *n - *m;
+ d_cnjg(&z__2, &tau[k]);
+ z__1.r = -z__2.r, z__1.i = -z__2.i;
+ zgerc_(&i__1, &i__2, &z__1, &tau[1], &c__1, &a[k + m1 *
+ a_dim1], lda, &a[m1 * a_dim1 + 1], lda);
+ }
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of ZTZRQF */
+
+} /* ztzrqf_ */
diff --git a/contrib/libs/clapack/ztzrzf.c b/contrib/libs/clapack/ztzrzf.c
new file mode 100644
index 0000000000..b18feebfd2
--- /dev/null
+++ b/contrib/libs/clapack/ztzrzf.c
@@ -0,0 +1,313 @@
+/* ztzrzf.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int ztzrzf_(integer *m, integer *n, doublecomplex *a,
+ integer *lda, doublecomplex *tau, doublecomplex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+
+ /* Local variables */
+ integer i__, m1, ib, nb, ki, kk, mu, nx, iws, nbmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer ldwork;
+ extern /* Subroutine */ int zlarzb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int zlarzt_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), zlatrz_(integer *, integer *, integer
+ *, doublecomplex *, integer *, doublecomplex *, doublecomplex *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZTZRZF reduces the M-by-N ( M<=N ) complex upper trapezoidal matrix A */
+/* to upper triangular form by means of unitary transformations. */
+
+/* The upper trapezoidal matrix A is factored as */
+
+/* A = ( R 0 ) * Z, */
+
+/* where Z is an N-by-N unitary matrix and R is an M-by-M upper */
+/* triangular matrix. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix A. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix A. N >= M. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the leading M-by-N upper trapezoidal part of the */
+/* array A must contain the matrix to be factorized. */
+/* On exit, the leading M-by-M upper triangular part of A */
+/* contains the upper triangular matrix R, and elements M+1 to */
+/* N of the first M rows of A, with the array TAU, represent the */
+/* unitary matrix Z as a product of M elementary reflectors. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (output) COMPLEX*16 array, dimension (M) */
+/* The scalar factors of the elementary reflectors. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,M). */
+/* For optimum performance LWORK >= M*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* The factorization is obtained by Householder's method. The kth */
+/* transformation matrix, Z( k ), which is used to introduce zeros into */
+/* the ( m - k + 1 )th row of A, is given in the form */
+
+/* Z( k ) = ( I 0 ), */
+/* ( 0 T( k ) ) */
+
+/* where */
+
+/* T( k ) = I - tau*u( k )*u( k )', u( k ) = ( 1 ), */
+/* ( 0 ) */
+/* ( z( k ) ) */
+
+/* tau is a scalar and z( k ) is an ( n - m ) element vector. */
+/* tau and z( k ) are chosen to annihilate the elements of the kth row */
+/* of X. */
+
+/* The scalar tau is returned in the kth element of TAU and the vector */
+/* u( k ) in the kth row of A, such that the elements of z( k ) are */
+/* in a( k, m + 1 ), ..., a( k, n ). The elements of R are returned in */
+/* the upper triangular part of A. */
+
+/* Z is given by */
+
+/* Z = Z( 1 ) * Z( 2 ) * ... * Z( m ). */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+
+ if (*info == 0) {
+ if (*m == 0 || *m == *n) {
+ lwkopt = 1;
+ } else {
+
+/* Determine the block size. */
+
+ nb = ilaenv_(&c__1, "ZGERQF", " ", m, n, &c_n1, &c_n1);
+ lwkopt = *m * nb;
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ if (*lwork < max(1,*m) && ! lquery) {
+ *info = -7;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTZRZF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0) {
+ return 0;
+ } else if (*m == *n) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ tau[i__2].r = 0., tau[i__2].i = 0.;
+/* L10: */
+ }
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 1;
+ iws = *m;
+ if (nb > 1 && nb < *m) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "ZGERQF", " ", m, n, &c_n1, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < *m) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZGERQF", " ", m, n, &c_n1, &
+ c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *m && nx < *m) {
+
+/* Use blocked code initially. */
+/* The last kk rows are handled by the block method. */
+
+/* Computing MIN */
+ i__1 = *m + 1;
+ m1 = min(i__1,*n);
+ ki = (*m - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = *m, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+ i__1 = *m - kk + 1;
+ i__2 = -nb;
+ for (i__ = *m - kk + ki + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1;
+ i__ += i__2) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ ib = min(i__3,nb);
+
+/* Compute the TZ factorization of the current block */
+/* A(i:i+ib-1,i:n) */
+
+ i__3 = *n - i__ + 1;
+ i__4 = *n - *m;
+ zlatrz_(&ib, &i__3, &i__4, &a[i__ + i__ * a_dim1], lda, &tau[i__],
+ &work[1]);
+ if (i__ > 1) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__3 = *n - *m;
+ zlarzt_("Backward", "Rowwise", &i__3, &ib, &a[i__ + m1 *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(1:i-1,i:n) from the right */
+
+ i__3 = i__ - 1;
+ i__4 = *n - i__ + 1;
+ i__5 = *n - *m;
+ zlarzb_("Right", "No transpose", "Backward", "Rowwise", &i__3,
+ &i__4, &ib, &i__5, &a[i__ + m1 * a_dim1], lda, &work[
+ 1], &ldwork, &a[i__ * a_dim1 + 1], lda, &work[ib + 1],
+ &ldwork)
+ ;
+ }
+/* L20: */
+ }
+ mu = i__ + nb - 1;
+ } else {
+ mu = *m;
+ }
+
+/* Use unblocked code to factor the last or only block */
+
+ if (mu > 0) {
+ i__2 = *n - *m;
+ zlatrz_(&mu, n, &i__2, &a[a_offset], lda, &tau[1], &work[1]);
+ }
+
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZTZRZF */
+
+} /* ztzrzf_ */
diff --git a/contrib/libs/clapack/zung2l.c b/contrib/libs/clapack/zung2l.c
new file mode 100644
index 0000000000..1c75899b60
--- /dev/null
+++ b/contrib/libs/clapack/zung2l.c
@@ -0,0 +1,183 @@
+/* zung2l.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zung2l_(integer *m, integer *n, integer *k,
+ doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *
+ work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j, l, ii;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *), zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZUNG2L generates an m by n complex matrix Q with orthonormal columns, */
+/* which is defined as the last n columns of a product of k elementary */
+/* reflectors of order m */
+
+/* Q = H(k) . . . H(2) H(1) */
+
+/* as returned by ZGEQLF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. M >= N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. N >= K >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the (n-k+i)-th column must contain the vector which */
+/* defines the elementary reflector H(i), for i = 1,2,...,k, as */
+/* returned by ZGEQLF in the last k columns of its array */
+/* argument A. */
+/* On exit, the m-by-n matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) COMPLEX*16 array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by ZGEQLF. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0 || *n > *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNG2L", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+/* Initialise columns 1:n-k to columns of the unit matrix */
+
+ i__1 = *n - *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = l + j * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+/* L10: */
+ }
+ i__2 = *m - *n + j + j * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+/* L20: */
+ }
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ii = *n - *k + i__;
+
+/* Apply H(i) to A(1:m-k+i,1:n-k+i) from the left */
+
+ i__2 = *m - *n + ii + ii * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+ i__2 = *m - *n + ii;
+ i__3 = ii - 1;
+ zlarf_("Left", &i__2, &i__3, &a[ii * a_dim1 + 1], &c__1, &tau[i__], &
+ a[a_offset], lda, &work[1]);
+ i__2 = *m - *n + ii - 1;
+ i__3 = i__;
+ z__1.r = -tau[i__3].r, z__1.i = -tau[i__3].i;
+ zscal_(&i__2, &z__1, &a[ii * a_dim1 + 1], &c__1);
+ i__2 = *m - *n + ii + ii * a_dim1;
+ i__3 = i__;
+ z__1.r = 1. - tau[i__3].r, z__1.i = 0. - tau[i__3].i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+
+/* Set A(m-k+i+1:m,n-k+i) to zero */
+
+ i__2 = *m;
+ for (l = *m - *n + ii + 1; l <= i__2; ++l) {
+ i__3 = l + ii * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of ZUNG2L */
+
+} /* zung2l_ */
diff --git a/contrib/libs/clapack/zung2r.c b/contrib/libs/clapack/zung2r.c
new file mode 100644
index 0000000000..9acf7f7606
--- /dev/null
+++ b/contrib/libs/clapack/zung2r.c
@@ -0,0 +1,185 @@
+/* zung2r.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zung2r_(integer *m, integer *n, integer *k,
+ doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *
+ work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Local variables */
+ integer i__, j, l;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *), zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZUNG2R generates an m by n complex matrix Q with orthonormal columns, */
+/* which is defined as the first n columns of a product of k elementary */
+/* reflectors of order m */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by ZGEQRF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. M >= N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. N >= K >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the i-th column must contain the vector which */
+/* defines the elementary reflector H(i), for i = 1,2,...,k, as */
+/* returned by ZGEQRF in the first k columns of its array */
+/* argument A. */
+/* On exit, the m by n matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) COMPLEX*16 array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by ZGEQRF. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (N) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0 || *n > *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNG2R", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+/* Initialise columns k+1:n to columns of the unit matrix */
+
+ i__1 = *n;
+ for (j = *k + 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = l + j * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+/* L10: */
+ }
+ i__2 = j + j * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+/* L20: */
+ }
+
+ for (i__ = *k; i__ >= 1; --i__) {
+
+/* Apply H(i) to A(i:m,i:n) from the left */
+
+ if (i__ < *n) {
+ i__1 = i__ + i__ * a_dim1;
+ a[i__1].r = 1., a[i__1].i = 0.;
+ i__1 = *m - i__ + 1;
+ i__2 = *n - i__;
+ zlarf_("Left", &i__1, &i__2, &a[i__ + i__ * a_dim1], &c__1, &tau[
+ i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ }
+ if (i__ < *m) {
+ i__1 = *m - i__;
+ i__2 = i__;
+ z__1.r = -tau[i__2].r, z__1.i = -tau[i__2].i;
+ zscal_(&i__1, &z__1, &a[i__ + 1 + i__ * a_dim1], &c__1);
+ }
+ i__1 = i__ + i__ * a_dim1;
+ i__2 = i__;
+ z__1.r = 1. - tau[i__2].r, z__1.i = 0. - tau[i__2].i;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+
+/* Set A(1:i-1,i) to zero */
+
+ i__1 = i__ - 1;
+ for (l = 1; l <= i__1; ++l) {
+ i__2 = l + i__ * a_dim1;
+ a[i__2].r = 0., a[i__2].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of ZUNG2R */
+
+} /* zung2r_ */
diff --git a/contrib/libs/clapack/zungbr.c b/contrib/libs/clapack/zungbr.c
new file mode 100644
index 0000000000..f620c41c05
--- /dev/null
+++ b/contrib/libs/clapack/zungbr.c
@@ -0,0 +1,310 @@
+/* zungbr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zungbr_(char *vect, integer *m, integer *n, integer *k,
+ doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *
+ work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ integer i__, j, nb, mn;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical wantq;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int zunglq_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *), zungqr_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZUNGBR generates one of the complex unitary matrices Q or P**H */
+/* determined by ZGEBRD when reducing a complex matrix A to bidiagonal */
+/* form: A = Q * B * P**H. Q and P**H are defined as products of */
+/* elementary reflectors H(i) or G(i) respectively. */
+
+/* If VECT = 'Q', A is assumed to have been an M-by-K matrix, and Q */
+/* is of order M: */
+/* if m >= k, Q = H(1) H(2) . . . H(k) and ZUNGBR returns the first n */
+/* columns of Q, where m >= n >= k; */
+/* if m < k, Q = H(1) H(2) . . . H(m-1) and ZUNGBR returns Q as an */
+/* M-by-M matrix. */
+
+/* If VECT = 'P', A is assumed to have been a K-by-N matrix, and P**H */
+/* is of order N: */
+/* if k < n, P**H = G(k) . . . G(2) G(1) and ZUNGBR returns the first m */
+/* rows of P**H, where n >= m >= k; */
+/* if k >= n, P**H = G(n-1) . . . G(2) G(1) and ZUNGBR returns P**H as */
+/* an N-by-N matrix. */
+
+/* Arguments */
+/* ========= */
+
+/* VECT (input) CHARACTER*1 */
+/* Specifies whether the matrix Q or the matrix P**H is */
+/* required, as defined in the transformation applied by ZGEBRD: */
+/* = 'Q': generate Q; */
+/* = 'P': generate P**H. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q or P**H to be returned. */
+/* M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q or P**H to be returned. */
+/* N >= 0. */
+/* If VECT = 'Q', M >= N >= min(M,K); */
+/* if VECT = 'P', N >= M >= min(N,K). */
+
+/* K (input) INTEGER */
+/* If VECT = 'Q', the number of columns in the original M-by-K */
+/* matrix reduced by ZGEBRD. */
+/* If VECT = 'P', the number of rows in the original K-by-N */
+/* matrix reduced by ZGEBRD. */
+/* K >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the vectors which define the elementary reflectors, */
+/* as returned by ZGEBRD. */
+/* On exit, the M-by-N matrix Q or P**H. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= M. */
+
+/* TAU (input) COMPLEX*16 array, dimension */
+/* (min(M,K)) if VECT = 'Q' */
+/* (min(N,K)) if VECT = 'P' */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i) or G(i), which determines Q or P**H, as */
+/* returned by ZGEBRD in its array argument TAUQ or TAUP. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,min(M,N)). */
+/* For optimum performance LWORK >= min(M,N)*NB, where NB */
+/* is the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ wantq = lsame_(vect, "Q");
+ mn = min(*m,*n);
+ lquery = *lwork == -1;
+ if (! wantq && ! lsame_(vect, "P")) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0 || wantq && (*n > *m || *n < min(*m,*k)) || ! wantq && (
+ *m > *n || *m < min(*n,*k))) {
+ *info = -3;
+ } else if (*k < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*m)) {
+ *info = -6;
+ } else if (*lwork < max(1,mn) && ! lquery) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+ if (wantq) {
+ nb = ilaenv_(&c__1, "ZUNGQR", " ", m, n, k, &c_n1);
+ } else {
+ nb = ilaenv_(&c__1, "ZUNGLQ", " ", m, n, k, &c_n1);
+ }
+ lwkopt = max(1,mn) * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNGBR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ if (wantq) {
+
+/* Form Q, determined by a call to ZGEBRD to reduce an m-by-k */
+/* matrix */
+
+ if (*m >= *k) {
+
+/* If m >= k, assume m >= n >= k */
+
+ zungqr_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, &
+ iinfo);
+
+ } else {
+
+/* If m < k, assume m = n */
+
+/* Shift the vectors which define the elementary reflectors one */
+/* column to the right, and set the first row and column of Q */
+/* to those of the unit matrix */
+
+ for (j = *m; j >= 2; --j) {
+ i__1 = j * a_dim1 + 1;
+ a[i__1].r = 0., a[i__1].i = 0.;
+ i__1 = *m;
+ for (i__ = j + 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__ + (j - 1) * a_dim1;
+ a[i__2].r = a[i__3].r, a[i__2].i = a[i__3].i;
+/* L10: */
+ }
+/* L20: */
+ }
+ i__1 = a_dim1 + 1;
+ a[i__1].r = 1., a[i__1].i = 0.;
+ i__1 = *m;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ i__2 = i__ + a_dim1;
+ a[i__2].r = 0., a[i__2].i = 0.;
+/* L30: */
+ }
+ if (*m > 1) {
+
+/* Form Q(2:m,2:m) */
+
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ zungqr_(&i__1, &i__2, &i__3, &a[(a_dim1 << 1) + 2], lda, &tau[
+ 1], &work[1], lwork, &iinfo);
+ }
+ }
+ } else {
+
+/* Form P', determined by a call to ZGEBRD to reduce a k-by-n */
+/* matrix */
+
+ if (*k < *n) {
+
+/* If k < n, assume k <= m <= n */
+
+ zunglq_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, &
+ iinfo);
+
+ } else {
+
+/* If k >= n, assume m = n */
+
+/* Shift the vectors which define the elementary reflectors one */
+/* row downward, and set the first row and column of P' to */
+/* those of the unit matrix */
+
+ i__1 = a_dim1 + 1;
+ a[i__1].r = 1., a[i__1].i = 0.;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ i__2 = i__ + a_dim1;
+ a[i__2].r = 0., a[i__2].i = 0.;
+/* L40: */
+ }
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ for (i__ = j - 1; i__ >= 2; --i__) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__ - 1 + j * a_dim1;
+ a[i__2].r = a[i__3].r, a[i__2].i = a[i__3].i;
+/* L50: */
+ }
+ i__2 = j * a_dim1 + 1;
+ a[i__2].r = 0., a[i__2].i = 0.;
+/* L60: */
+ }
+ if (*n > 1) {
+
+/* Form P'(2:n,2:n) */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ zunglq_(&i__1, &i__2, &i__3, &a[(a_dim1 << 1) + 2], lda, &tau[
+ 1], &work[1], lwork, &iinfo);
+ }
+ }
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ return 0;
+
+/* End of ZUNGBR */
+
+} /* zungbr_ */
diff --git a/contrib/libs/clapack/zunghr.c b/contrib/libs/clapack/zunghr.c
new file mode 100644
index 0000000000..b78c018a6a
--- /dev/null
+++ b/contrib/libs/clapack/zunghr.c
@@ -0,0 +1,224 @@
+/* zunghr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zunghr_(integer *n, integer *ilo, integer *ihi,
+ doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *
+ work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, nb, nh, iinfo;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int zungqr_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZUNGHR generates a complex unitary matrix Q which is defined as the */
+/* product of IHI-ILO elementary reflectors of order N, as returned by */
+/* ZGEHRD: */
+
+/* Q = H(ilo) H(ilo+1) . . . H(ihi-1). */
+
+/* Arguments */
+/* ========= */
+
+/* N (input) INTEGER */
+/* The order of the matrix Q. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* ILO and IHI must have the same values as in the previous call */
+/* of ZGEHRD. Q is equal to the unit matrix except in the */
+/* submatrix Q(ilo+1:ihi,ilo+1:ihi). */
+/* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the vectors which define the elementary reflectors, */
+/* as returned by ZGEHRD. */
+/* On exit, the N-by-N unitary matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,N). */
+
+/* TAU (input) COMPLEX*16 array, dimension (N-1) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by ZGEHRD. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= IHI-ILO. */
+/* For optimum performance LWORK >= (IHI-ILO)*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nh = *ihi - *ilo;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ *info = -2;
+ } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*lwork < max(1,nh) && ! lquery) {
+ *info = -8;
+ }
+
+ if (*info == 0) {
+ nb = ilaenv_(&c__1, "ZUNGQR", " ", &nh, &nh, &nh, &c_n1);
+ lwkopt = max(1,nh) * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNGHR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+/* Shift the vectors which define the elementary reflectors one */
+/* column to the right, and set the first ilo and the last n-ihi */
+/* rows and columns to those of the unit matrix */
+
+ i__1 = *ilo + 1;
+ for (j = *ihi; j >= i__1; --j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+/* L10: */
+ }
+ i__2 = *ihi;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + (j - 1) * a_dim1;
+ a[i__3].r = a[i__4].r, a[i__3].i = a[i__4].i;
+/* L20: */
+ }
+ i__2 = *n;
+ for (i__ = *ihi + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+ i__1 = *ilo;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+/* L50: */
+ }
+ i__2 = j + j * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+/* L60: */
+ }
+ i__1 = *n;
+ for (j = *ihi + 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+/* L70: */
+ }
+ i__2 = j + j * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+/* L80: */
+ }
+
+ if (nh > 0) {
+
+/* Generate Q(ilo+1:ihi,ilo+1:ihi) */
+
+ zungqr_(&nh, &nh, &nh, &a[*ilo + 1 + (*ilo + 1) * a_dim1], lda, &tau[*
+ ilo], &work[1], lwork, &iinfo);
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ return 0;
+
+/* End of ZUNGHR */
+
+} /* zunghr_ */
diff --git a/contrib/libs/clapack/zungl2.c b/contrib/libs/clapack/zungl2.c
new file mode 100644
index 0000000000..ead79743b3
--- /dev/null
+++ b/contrib/libs/clapack/zungl2.c
@@ -0,0 +1,193 @@
+/* zungl2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zungl2_(integer *m, integer *n, integer *k,
+ doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *
+ work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, l;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *), zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), xerbla_(char *, integer *), zlacgv_(integer *, doublecomplex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZUNGL2 generates an m-by-n complex matrix Q with orthonormal rows, */
+/* which is defined as the first m rows of a product of k elementary */
+/* reflectors of order n */
+
+/* Q = H(k)' . . . H(2)' H(1)' */
+
+/* as returned by ZGELQF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. N >= M. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. M >= K >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the i-th row must contain the vector which defines */
+/* the elementary reflector H(i), for i = 1,2,...,k, as returned */
+/* by ZGELQF in the first k rows of its array argument A. */
+/* On exit, the m by n matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) COMPLEX*16 array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by ZGELQF. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (M) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *m) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNGL2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ return 0;
+ }
+
+ if (*k < *m) {
+
+/* Initialise rows k+1:m to rows of the unit matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (l = *k + 1; l <= i__2; ++l) {
+ i__3 = l + j * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+/* L10: */
+ }
+ if (j > *k && j <= *m) {
+ i__2 = j + j * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+ }
+/* L20: */
+ }
+ }
+
+ for (i__ = *k; i__ >= 1; --i__) {
+
+/* Apply H(i)' to A(i:m,i:n) from the right */
+
+ if (i__ < *n) {
+ i__1 = *n - i__;
+ zlacgv_(&i__1, &a[i__ + (i__ + 1) * a_dim1], lda);
+ if (i__ < *m) {
+ i__1 = i__ + i__ * a_dim1;
+ a[i__1].r = 1., a[i__1].i = 0.;
+ i__1 = *m - i__;
+ i__2 = *n - i__ + 1;
+ d_cnjg(&z__1, &tau[i__]);
+ zlarf_("Right", &i__1, &i__2, &a[i__ + i__ * a_dim1], lda, &
+ z__1, &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
+ }
+ i__1 = *n - i__;
+ i__2 = i__;
+ z__1.r = -tau[i__2].r, z__1.i = -tau[i__2].i;
+ zscal_(&i__1, &z__1, &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__1 = *n - i__;
+ zlacgv_(&i__1, &a[i__ + (i__ + 1) * a_dim1], lda);
+ }
+ i__1 = i__ + i__ * a_dim1;
+ d_cnjg(&z__2, &tau[i__]);
+ z__1.r = 1. - z__2.r, z__1.i = 0. - z__2.i;
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+
+/* Set A(i,1:i-1) to zero */
+
+ i__1 = i__ - 1;
+ for (l = 1; l <= i__1; ++l) {
+ i__2 = i__ + l * a_dim1;
+ a[i__2].r = 0., a[i__2].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of ZUNGL2 */
+
+} /* zungl2_ */
diff --git a/contrib/libs/clapack/zunglq.c b/contrib/libs/clapack/zunglq.c
new file mode 100644
index 0000000000..d871b62195
--- /dev/null
+++ b/contrib/libs/clapack/zunglq.c
@@ -0,0 +1,287 @@
+/* zunglq.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int zunglq_(integer *m, integer *n, integer *k,
+ doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *
+ work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, l, ib, nb, ki, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int zungl2_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ integer ldwork;
+ extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ logical lquery;
+ integer lwkopt;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZUNGLQ generates an M-by-N complex matrix Q with orthonormal rows, */
+/* which is defined as the first M rows of a product of K elementary */
+/* reflectors of order N */
+
+/* Q = H(k)' . . . H(2)' H(1)' */
+
+/* as returned by ZGELQF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. N >= M. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. M >= K >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the i-th row must contain the vector which defines */
+/* the elementary reflector H(i), for i = 1,2,...,k, as returned */
+/* by ZGELQF in the first k rows of its array argument A. */
+/* On exit, the M-by-N matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) COMPLEX*16 array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by ZGELQF. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,M). */
+/* For optimum performance LWORK >= M*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit; */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "ZUNGLQ", " ", m, n, k, &c_n1);
+ lwkopt = max(1,*m) * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *m) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*lwork < max(1,*m) && ! lquery) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNGLQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *m;
+ if (nb > 1 && nb < *k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "ZUNGLQ", " ", m, n, k, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZUNGLQ", " ", m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *k && nx < *k) {
+
+/* Use blocked code after the last block. */
+/* The first kk rows are handled by the block method. */
+
+ ki = (*k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = *k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+/* Set A(kk+1:m,1:kk) to zero. */
+
+ i__1 = kk;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = kk + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the last or only block. */
+
+ if (kk < *m) {
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ zungl2_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, &
+ tau[kk + 1], &work[1], &iinfo);
+ }
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = -nb;
+ for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) {
+/* Computing MIN */
+ i__2 = nb, i__3 = *k - i__ + 1;
+ ib = min(i__2,i__3);
+ if (i__ + ib <= *m) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i) H(i+1) . . . H(i+ib-1) */
+
+ i__2 = *n - i__ + 1;
+ zlarft_("Forward", "Rowwise", &i__2, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(i+ib:m,i:n) from the right */
+
+ i__2 = *m - i__ - ib + 1;
+ i__3 = *n - i__ + 1;
+ zlarfb_("Right", "Conjugate transpose", "Forward", "Rowwise",
+ &i__2, &i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[
+ 1], &ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[
+ ib + 1], &ldwork);
+ }
+
+/* Apply H' to columns i:n of current block */
+
+ i__2 = *n - i__ + 1;
+ zungl2_(&ib, &i__2, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &
+ work[1], &iinfo);
+
+/* Set columns 1:i-1 of current block to zero */
+
+ i__2 = i__ - 1;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = i__ + ib - 1;
+ for (l = i__; l <= i__3; ++l) {
+ i__4 = l + j * a_dim1;
+ a[i__4].r = 0., a[i__4].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1].r = (doublereal) iws, work[1].i = 0.;
+ return 0;
+
+/* End of ZUNGLQ */
+
+} /* zunglq_ */
diff --git a/contrib/libs/clapack/zungql.c b/contrib/libs/clapack/zungql.c
new file mode 100644
index 0000000000..19906cfe36
--- /dev/null
+++ b/contrib/libs/clapack/zungql.c
@@ -0,0 +1,296 @@
+/* zungql.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int zungql_(integer *m, integer *n, integer *k,
+ doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *
+ work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+
+ /* Local variables */
+ integer i__, j, l, ib, nb, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int zung2l_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ integer ldwork;
+ extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ logical lquery;
+ integer lwkopt;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZUNGQL generates an M-by-N complex matrix Q with orthonormal columns, */
+/* which is defined as the last N columns of a product of K elementary */
+/* reflectors of order M */
+
+/* Q = H(k) . . . H(2) H(1) */
+
+/* as returned by ZGEQLF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. M >= N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. N >= K >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the (n-k+i)-th column must contain the vector which */
+/* defines the elementary reflector H(i), for i = 1,2,...,k, as */
+/* returned by ZGEQLF in the last k columns of its array */
+/* argument A. */
+/* On exit, the M-by-N matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) COMPLEX*16 array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by ZGEQLF. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+/* For optimum performance LWORK >= N*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0 || *n > *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+
+ if (*info == 0) {
+ if (*n == 0) {
+ lwkopt = 1;
+ } else {
+ nb = ilaenv_(&c__1, "ZUNGQL", " ", m, n, k, &c_n1);
+ lwkopt = *n * nb;
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ if (*lwork < max(1,*n) && ! lquery) {
+ *info = -8;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNGQL", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *n;
+ if (nb > 1 && nb < *k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "ZUNGQL", " ", m, n, k, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZUNGQL", " ", m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *k && nx < *k) {
+
+/* Use blocked code after the first block. */
+/* The last kk columns are handled by the block method. */
+
+/* Computing MIN */
+ i__1 = *k, i__2 = (*k - nx + nb - 1) / nb * nb;
+ kk = min(i__1,i__2);
+
+/* Set A(m-kk+1:m,1:n-kk) to zero. */
+
+ i__1 = *n - kk;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = *m - kk + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the first or only block. */
+
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ zung2l_(&i__1, &i__2, &i__3, &a[a_offset], lda, &tau[1], &work[1], &iinfo)
+ ;
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = *k;
+ i__2 = nb;
+ for (i__ = *k - kk + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *k - i__ + 1;
+ ib = min(i__3,i__4);
+ if (*n - *k + i__ > 1) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__3 = *m - *k + i__ + ib - 1;
+ zlarft_("Backward", "Columnwise", &i__3, &ib, &a[(*n - *k +
+ i__) * a_dim1 + 1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(1:m-k+i+ib-1,1:n-k+i-1) from the left */
+
+ i__3 = *m - *k + i__ + ib - 1;
+ i__4 = *n - *k + i__ - 1;
+ zlarfb_("Left", "No transpose", "Backward", "Columnwise", &
+ i__3, &i__4, &ib, &a[(*n - *k + i__) * a_dim1 + 1],
+ lda, &work[1], &ldwork, &a[a_offset], lda, &work[ib +
+ 1], &ldwork);
+ }
+
+/* Apply H to rows 1:m-k+i+ib-1 of current block */
+
+ i__3 = *m - *k + i__ + ib - 1;
+ zung2l_(&i__3, &ib, &ib, &a[(*n - *k + i__) * a_dim1 + 1], lda, &
+ tau[i__], &work[1], &iinfo);
+
+/* Set rows m-k+i+ib:m of current block to zero */
+
+ i__3 = *n - *k + i__ + ib - 1;
+ for (j = *n - *k + i__; j <= i__3; ++j) {
+ i__4 = *m;
+ for (l = *m - *k + i__ + ib; l <= i__4; ++l) {
+ i__5 = l + j * a_dim1;
+ a[i__5].r = 0., a[i__5].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1].r = (doublereal) iws, work[1].i = 0.;
+ return 0;
+
+/* End of ZUNGQL */
+
+} /* zungql_ */
diff --git a/contrib/libs/clapack/zungqr.c b/contrib/libs/clapack/zungqr.c
new file mode 100644
index 0000000000..b3036a590e
--- /dev/null
+++ b/contrib/libs/clapack/zungqr.c
@@ -0,0 +1,288 @@
+/* zungqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int zungqr_(integer *m, integer *n, integer *k,
+ doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *
+ work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, l, ib, nb, ki, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int zung2r_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ integer ldwork;
+ extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZUNGQR generates an M-by-N complex matrix Q with orthonormal columns, */
+/* which is defined as the first N columns of a product of K elementary */
+/* reflectors of order M */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by ZGEQRF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. M >= N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. N >= K >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the i-th column must contain the vector which */
+/* defines the elementary reflector H(i), for i = 1,2,...,k, as */
+/* returned by ZGEQRF in the first k columns of its array */
+/* argument A. */
+/* On exit, the M-by-N matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) COMPLEX*16 array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by ZGEQRF. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,N). */
+/* For optimum performance LWORK >= N*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "ZUNGQR", " ", m, n, k, &c_n1);
+ lwkopt = max(1,*n) * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0 || *n > *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *n) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNGQR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *n;
+ if (nb > 1 && nb < *k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "ZUNGQR", " ", m, n, k, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZUNGQR", " ", m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *k && nx < *k) {
+
+/* Use blocked code after the last block. */
+/* The first kk columns are handled by the block method. */
+
+ ki = (*k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = *k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+/* Set A(1:kk,kk+1:n) to zero. */
+
+ i__1 = *n;
+ for (j = kk + 1; j <= i__1; ++j) {
+ i__2 = kk;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the last or only block. */
+
+ if (kk < *n) {
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ zung2r_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, &
+ tau[kk + 1], &work[1], &iinfo);
+ }
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = -nb;
+ for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) {
+/* Computing MIN */
+ i__2 = nb, i__3 = *k - i__ + 1;
+ ib = min(i__2,i__3);
+ if (i__ + ib <= *n) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i) H(i+1) . . . H(i+ib-1) */
+
+ i__2 = *m - i__ + 1;
+ zlarft_("Forward", "Columnwise", &i__2, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(i:m,i+ib:n) from the left */
+
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__ - ib + 1;
+ zlarfb_("Left", "No transpose", "Forward", "Columnwise", &
+ i__2, &i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[
+ 1], &ldwork, &a[i__ + (i__ + ib) * a_dim1], lda, &
+ work[ib + 1], &ldwork);
+ }
+
+/* Apply H to rows i:m of current block */
+
+ i__2 = *m - i__ + 1;
+ zung2r_(&i__2, &ib, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &
+ work[1], &iinfo);
+
+/* Set rows 1:i-1 of current block to zero */
+
+ i__2 = i__ + ib - 1;
+ for (j = i__; j <= i__2; ++j) {
+ i__3 = i__ - 1;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + j * a_dim1;
+ a[i__4].r = 0., a[i__4].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1].r = (doublereal) iws, work[1].i = 0.;
+ return 0;
+
+/* End of ZUNGQR */
+
+} /* zungqr_ */
diff --git a/contrib/libs/clapack/zungr2.c b/contrib/libs/clapack/zungr2.c
new file mode 100644
index 0000000000..f1d3c6f1df
--- /dev/null
+++ b/contrib/libs/clapack/zungr2.c
@@ -0,0 +1,192 @@
+/* zungr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zungr2_(integer *m, integer *n, integer *k,
+ doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *
+ work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, j, l, ii;
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *), zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), xerbla_(char *, integer *), zlacgv_(integer *, doublecomplex *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZUNGR2 generates an m by n complex matrix Q with orthonormal rows, */
+/* which is defined as the last m rows of a product of k elementary */
+/* reflectors of order n */
+
+/* Q = H(1)' H(2)' . . . H(k)' */
+
+/* as returned by ZGERQF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. N >= M. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. M >= K >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the (m-k+i)-th row must contain the vector which */
+/* defines the elementary reflector H(i), for i = 1,2,...,k, as */
+/* returned by ZGERQF in the last k rows of its array argument */
+/* A. */
+/* On exit, the m-by-n matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) COMPLEX*16 array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by ZGERQF. */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (M) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *m) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNGR2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ return 0;
+ }
+
+ if (*k < *m) {
+
+/* Initialise rows 1:m-k to rows of the unit matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m - *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = l + j * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+/* L10: */
+ }
+ if (j > *n - *m && j <= *n - *k) {
+ i__2 = *m - *n + j + j * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+ }
+/* L20: */
+ }
+ }
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ii = *m - *k + i__;
+
+/* Apply H(i)' to A(1:m-k+i,1:n-k+i) from the right */
+
+ i__2 = *n - *m + ii - 1;
+ zlacgv_(&i__2, &a[ii + a_dim1], lda);
+ i__2 = ii + (*n - *m + ii) * a_dim1;
+ a[i__2].r = 1., a[i__2].i = 0.;
+ i__2 = ii - 1;
+ i__3 = *n - *m + ii;
+ d_cnjg(&z__1, &tau[i__]);
+ zlarf_("Right", &i__2, &i__3, &a[ii + a_dim1], lda, &z__1, &a[
+ a_offset], lda, &work[1]);
+ i__2 = *n - *m + ii - 1;
+ i__3 = i__;
+ z__1.r = -tau[i__3].r, z__1.i = -tau[i__3].i;
+ zscal_(&i__2, &z__1, &a[ii + a_dim1], lda);
+ i__2 = *n - *m + ii - 1;
+ zlacgv_(&i__2, &a[ii + a_dim1], lda);
+ i__2 = ii + (*n - *m + ii) * a_dim1;
+ d_cnjg(&z__2, &tau[i__]);
+ z__1.r = 1. - z__2.r, z__1.i = 0. - z__2.i;
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+
+/* Set A(m-k+i,n-k+i+1:n) to zero */
+
+ i__2 = *n;
+ for (l = *n - *m + ii + 1; l <= i__2; ++l) {
+ i__3 = ii + l * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of ZUNGR2 */
+
+} /* zungr2_ */
diff --git a/contrib/libs/clapack/zungrq.c b/contrib/libs/clapack/zungrq.c
new file mode 100644
index 0000000000..1d52575995
--- /dev/null
+++ b/contrib/libs/clapack/zungrq.c
@@ -0,0 +1,296 @@
+/* zungrq.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+
+/* Subroutine */ int zungrq_(integer *m, integer *n, integer *k,
+ doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *
+ work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+
+ /* Local variables */
+ integer i__, j, l, ib, nb, ii, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int zungr2_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ integer ldwork;
+ extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZUNGRQ generates an M-by-N complex matrix Q with orthonormal rows, */
+/* which is defined as the last M rows of a product of K elementary */
+/* reflectors of order N */
+
+/* Q = H(1)' H(2)' . . . H(k)' */
+
+/* as returned by ZGERQF. */
+
+/* Arguments */
+/* ========= */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix Q. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix Q. N >= M. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines the */
+/* matrix Q. M >= K >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the (m-k+i)-th row must contain the vector which */
+/* defines the elementary reflector H(i), for i = 1,2,...,k, as */
+/* returned by ZGERQF in the last k rows of its array argument */
+/* A. */
+/* On exit, the M-by-N matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The first dimension of the array A. LDA >= max(1,M). */
+
+/* TAU (input) COMPLEX*16 array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by ZGERQF. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= max(1,M). */
+/* For optimum performance LWORK >= M*NB, where NB is the */
+/* optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument has an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if (*k < 0 || *k > *m) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+
+ if (*info == 0) {
+ if (*m <= 0) {
+ lwkopt = 1;
+ } else {
+ nb = ilaenv_(&c__1, "ZUNGRQ", " ", m, n, k, &c_n1);
+ lwkopt = *m * nb;
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ if (*lwork < max(1,*m) && ! lquery) {
+ *info = -8;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNGRQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *m;
+ if (nb > 1 && nb < *k) {
+
+/* Determine when to cross over from blocked to unblocked code. */
+
+/* Computing MAX */
+ i__1 = 0, i__2 = ilaenv_(&c__3, "ZUNGRQ", " ", m, n, k, &c_n1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/* Not enough workspace to use optimal NB: reduce NB and */
+/* determine the minimum value of NB. */
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZUNGRQ", " ", m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *k && nx < *k) {
+
+/* Use blocked code after the first block. */
+/* The last kk rows are handled by the block method. */
+
+/* Computing MIN */
+ i__1 = *k, i__2 = (*k - nx + nb - 1) / nb * nb;
+ kk = min(i__1,i__2);
+
+/* Set A(1:m-kk,n-kk+1:n) to zero. */
+
+ i__1 = *n;
+ for (j = *n - kk + 1; j <= i__1; ++j) {
+ i__2 = *m - kk;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0., a[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the first or only block. */
+
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ zungr2_(&i__1, &i__2, &i__3, &a[a_offset], lda, &tau[1], &work[1], &iinfo)
+ ;
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = *k;
+ i__2 = nb;
+ for (i__ = *k - kk + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *k - i__ + 1;
+ ib = min(i__3,i__4);
+ ii = *m - *k + i__;
+ if (ii > 1) {
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__3 = *n - *k + i__ + ib - 1;
+ zlarft_("Backward", "Rowwise", &i__3, &ib, &a[ii + a_dim1],
+ lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(1:m-k+i-1,1:n-k+i+ib-1) from the right */
+
+ i__3 = ii - 1;
+ i__4 = *n - *k + i__ + ib - 1;
+ zlarfb_("Right", "Conjugate transpose", "Backward", "Rowwise",
+ &i__3, &i__4, &ib, &a[ii + a_dim1], lda, &work[1], &
+ ldwork, &a[a_offset], lda, &work[ib + 1], &ldwork);
+ }
+
+/* Apply H' to columns 1:n-k+i+ib-1 of current block */
+
+ i__3 = *n - *k + i__ + ib - 1;
+ zungr2_(&ib, &i__3, &ib, &a[ii + a_dim1], lda, &tau[i__], &work[1]
+, &iinfo);
+
+/* Set columns n-k+i+ib:n of current block to zero */
+
+ i__3 = *n;
+ for (l = *n - *k + i__ + ib; l <= i__3; ++l) {
+ i__4 = ii + ib - 1;
+ for (j = ii; j <= i__4; ++j) {
+ i__5 = j + l * a_dim1;
+ a[i__5].r = 0., a[i__5].i = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1].r = (doublereal) iws, work[1].i = 0.;
+ return 0;
+
+/* End of ZUNGRQ */
+
+} /* zungrq_ */
diff --git a/contrib/libs/clapack/zungtr.c b/contrib/libs/clapack/zungtr.c
new file mode 100644
index 0000000000..1000fb8463
--- /dev/null
+++ b/contrib/libs/clapack/zungtr.c
@@ -0,0 +1,262 @@
+/* zungtr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+
+/* Subroutine */ int zungtr_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, doublecomplex *tau, doublecomplex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, nb;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int zungql_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *), zungqr_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZUNGTR generates a complex unitary matrix Q which is defined as the */
+/* product of n-1 elementary reflectors of order N, as returned by */
+/* ZHETRD: */
+
+/* if UPLO = 'U', Q = H(n-1) . . . H(2) H(1), */
+
+/* if UPLO = 'L', Q = H(1) H(2) . . . H(n-1). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A contains elementary reflectors */
+/* from ZHETRD; */
+/* = 'L': Lower triangle of A contains elementary reflectors */
+/* from ZHETRD. */
+
+/* N (input) INTEGER */
+/* The order of the matrix Q. N >= 0. */
+
+/* A (input/output) COMPLEX*16 array, dimension (LDA,N) */
+/* On entry, the vectors which define the elementary reflectors, */
+/* as returned by ZHETRD. */
+/* On exit, the N-by-N unitary matrix Q. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= N. */
+
+/* TAU (input) COMPLEX*16 array, dimension (N-1) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by ZHETRD. */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. LWORK >= N-1. */
+/* For optimum performance LWORK >= (N-1)*NB, where NB is */
+/* the optimal blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n - 1;
+ if (*lwork < max(i__1,i__2) && ! lquery) {
+ *info = -7;
+ }
+ }
+
+ if (*info == 0) {
+ if (upper) {
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "ZUNGQL", " ", &i__1, &i__2, &i__3, &c_n1);
+ } else {
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "ZUNGQR", " ", &i__1, &i__2, &i__3, &c_n1);
+ }
+/* Computing MAX */
+ i__1 = 1, i__2 = *n - 1;
+ lwkopt = max(i__1,i__2) * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNGTR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ if (upper) {
+
+/* Q was determined by a call to ZHETRD with UPLO = 'U' */
+
+/* Shift the vectors which define the elementary reflectors one */
+/* column to the left, and set the last row and column of Q to */
+/* those of the unit matrix */
+
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + (j + 1) * a_dim1;
+ a[i__3].r = a[i__4].r, a[i__3].i = a[i__4].i;
+/* L10: */
+ }
+ i__2 = *n + j * a_dim1;
+ a[i__2].r = 0., a[i__2].i = 0.;
+/* L20: */
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + *n * a_dim1;
+ a[i__2].r = 0., a[i__2].i = 0.;
+/* L30: */
+ }
+ i__1 = *n + *n * a_dim1;
+ a[i__1].r = 1., a[i__1].i = 0.;
+
+/* Generate Q(1:n-1,1:n-1) */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ zungql_(&i__1, &i__2, &i__3, &a[a_offset], lda, &tau[1], &work[1],
+ lwork, &iinfo);
+
+ } else {
+
+/* Q was determined by a call to ZHETRD with UPLO = 'L'. */
+
+/* Shift the vectors which define the elementary reflectors one */
+/* column to the right, and set the first row and column of Q to */
+/* those of the unit matrix */
+
+ for (j = *n; j >= 2; --j) {
+ i__1 = j * a_dim1 + 1;
+ a[i__1].r = 0., a[i__1].i = 0.;
+ i__1 = *n;
+ for (i__ = j + 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__ + (j - 1) * a_dim1;
+ a[i__2].r = a[i__3].r, a[i__2].i = a[i__3].i;
+/* L40: */
+ }
+/* L50: */
+ }
+ i__1 = a_dim1 + 1;
+ a[i__1].r = 1., a[i__1].i = 0.;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ i__2 = i__ + a_dim1;
+ a[i__2].r = 0., a[i__2].i = 0.;
+/* L60: */
+ }
+ if (*n > 1) {
+
+/* Generate Q(2:n,2:n) */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ zungqr_(&i__1, &i__2, &i__3, &a[(a_dim1 << 1) + 2], lda, &tau[1],
+ &work[1], lwork, &iinfo);
+ }
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ return 0;
+
+/* End of ZUNGTR */
+
+} /* zungtr_ */
diff --git a/contrib/libs/clapack/zunm2l.c b/contrib/libs/clapack/zunm2l.c
new file mode 100644
index 0000000000..4a49f63a30
--- /dev/null
+++ b/contrib/libs/clapack/zunm2l.c
@@ -0,0 +1,245 @@
+/* zunm2l.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zunm2l_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublecomplex *a, integer *lda, doublecomplex *tau,
+ doublecomplex *c__, integer *ldc, doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, i1, i2, i3, mi, ni, nq;
+ doublecomplex aii;
+ logical left;
+ doublecomplex taui;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), xerbla_(char *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZUNM2L overwrites the general complex m-by-n matrix C with */
+
+/* Q * C if SIDE = 'L' and TRANS = 'N', or */
+
+/* Q'* C if SIDE = 'L' and TRANS = 'C', or */
+
+/* C * Q if SIDE = 'R' and TRANS = 'N', or */
+
+/* C * Q' if SIDE = 'R' and TRANS = 'C', */
+
+/* where Q is a complex unitary matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(k) . . . H(2) H(1) */
+
+/* as returned by ZGEQLF. Q is of order m if SIDE = 'L' and of order n */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q' from the Left */
+/* = 'R': apply Q or Q' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply Q (No transpose) */
+/* = 'C': apply Q' (Conjugate transpose) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,K) */
+/* The i-th column must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* ZGEQLF in the last k columns of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* If SIDE = 'L', LDA >= max(1,M); */
+/* if SIDE = 'R', LDA >= max(1,N). */
+
+/* TAU (input) COMPLEX*16 array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by ZGEQLF. */
+
+/* C (input/output) COMPLEX*16 array, dimension (LDC,N) */
+/* On entry, the m-by-n matrix C. */
+/* On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension */
+/* (N) if SIDE = 'L', */
+/* (M) if SIDE = 'R' */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNM2L", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if (left && notran || ! left && ! notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) or H(i)' is applied to C(1:m-k+i,1:n) */
+
+ mi = *m - *k + i__;
+ } else {
+
+/* H(i) or H(i)' is applied to C(1:m,1:n-k+i) */
+
+ ni = *n - *k + i__;
+ }
+
+/* Apply H(i) or H(i)' */
+
+ if (notran) {
+ i__3 = i__;
+ taui.r = tau[i__3].r, taui.i = tau[i__3].i;
+ } else {
+ d_cnjg(&z__1, &tau[i__]);
+ taui.r = z__1.r, taui.i = z__1.i;
+ }
+ i__3 = nq - *k + i__ + i__ * a_dim1;
+ aii.r = a[i__3].r, aii.i = a[i__3].i;
+ i__3 = nq - *k + i__ + i__ * a_dim1;
+ a[i__3].r = 1., a[i__3].i = 0.;
+ zlarf_(side, &mi, &ni, &a[i__ * a_dim1 + 1], &c__1, &taui, &c__[
+ c_offset], ldc, &work[1]);
+ i__3 = nq - *k + i__ + i__ * a_dim1;
+ a[i__3].r = aii.r, a[i__3].i = aii.i;
+/* L10: */
+ }
+ return 0;
+
+/* End of ZUNM2L */
+
+} /* zunm2l_ */
diff --git a/contrib/libs/clapack/zunm2r.c b/contrib/libs/clapack/zunm2r.c
new file mode 100644
index 0000000000..a551863c29
--- /dev/null
+++ b/contrib/libs/clapack/zunm2r.c
@@ -0,0 +1,249 @@
+/* zunm2r.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zunm2r_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublecomplex *a, integer *lda, doublecomplex *tau,
+ doublecomplex *c__, integer *ldc, doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, i1, i2, i3, ic, jc, mi, ni, nq;
+ doublecomplex aii;
+ logical left;
+ doublecomplex taui;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), xerbla_(char *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZUNM2R overwrites the general complex m-by-n matrix C with */
+
+/* Q * C if SIDE = 'L' and TRANS = 'N', or */
+
+/* Q'* C if SIDE = 'L' and TRANS = 'C', or */
+
+/* C * Q if SIDE = 'R' and TRANS = 'N', or */
+
+/* C * Q' if SIDE = 'R' and TRANS = 'C', */
+
+/* where Q is a complex unitary matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by ZGEQRF. Q is of order m if SIDE = 'L' and of order n */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q' from the Left */
+/* = 'R': apply Q or Q' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply Q (No transpose) */
+/* = 'C': apply Q' (Conjugate transpose) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,K) */
+/* The i-th column must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* ZGEQRF in the first k columns of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* If SIDE = 'L', LDA >= max(1,M); */
+/* if SIDE = 'R', LDA >= max(1,N). */
+
+/* TAU (input) COMPLEX*16 array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by ZGEQRF. */
+
+/* C (input/output) COMPLEX*16 array, dimension (LDC,N) */
+/* On entry, the m-by-n matrix C. */
+/* On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension */
+/* (N) if SIDE = 'L', */
+/* (M) if SIDE = 'R' */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNM2R", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if (left && ! notran || ! left && notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) or H(i)' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H(i) or H(i)' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H(i) or H(i)' */
+
+ if (notran) {
+ i__3 = i__;
+ taui.r = tau[i__3].r, taui.i = tau[i__3].i;
+ } else {
+ d_cnjg(&z__1, &tau[i__]);
+ taui.r = z__1.r, taui.i = z__1.i;
+ }
+ i__3 = i__ + i__ * a_dim1;
+ aii.r = a[i__3].r, aii.i = a[i__3].i;
+ i__3 = i__ + i__ * a_dim1;
+ a[i__3].r = 1., a[i__3].i = 0.;
+ zlarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], &c__1, &taui, &c__[ic
+ + jc * c_dim1], ldc, &work[1]);
+ i__3 = i__ + i__ * a_dim1;
+ a[i__3].r = aii.r, a[i__3].i = aii.i;
+/* L10: */
+ }
+ return 0;
+
+/* End of ZUNM2R */
+
+} /* zunm2r_ */
diff --git a/contrib/libs/clapack/zunmbr.c b/contrib/libs/clapack/zunmbr.c
new file mode 100644
index 0000000000..0179f82d3e
--- /dev/null
+++ b/contrib/libs/clapack/zunmbr.c
@@ -0,0 +1,371 @@
+/* zunmbr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+
+/* Subroutine */ int zunmbr_(char *vect, char *side, char *trans, integer *m,
+ integer *n, integer *k, doublecomplex *a, integer *lda, doublecomplex
+ *tau, doublecomplex *c__, integer *ldc, doublecomplex *work, integer *
+ lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2];
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i1, i2, nb, mi, ni, nq, nw;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ logical notran, applyq;
+ char transt[1];
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int zunmlq_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *), zunmqr_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* If VECT = 'Q', ZUNMBR overwrites the general complex M-by-N matrix C */
+/* with */
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'C': Q**H * C C * Q**H */
+
+/* If VECT = 'P', ZUNMBR overwrites the general complex M-by-N matrix C */
+/* with */
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': P * C C * P */
+/* TRANS = 'C': P**H * C C * P**H */
+
+/* Here Q and P**H are the unitary matrices determined by ZGEBRD when */
+/* reducing a complex matrix A to bidiagonal form: A = Q * B * P**H. Q */
+/* and P**H are defined as products of elementary reflectors H(i) and */
+/* G(i) respectively. */
+
+/* Let nq = m if SIDE = 'L' and nq = n if SIDE = 'R'. Thus nq is the */
+/* order of the unitary matrix Q or P**H that is applied. */
+
+/* If VECT = 'Q', A is assumed to have been an NQ-by-K matrix: */
+/* if nq >= k, Q = H(1) H(2) . . . H(k); */
+/* if nq < k, Q = H(1) H(2) . . . H(nq-1). */
+
+/* If VECT = 'P', A is assumed to have been a K-by-NQ matrix: */
+/* if k < nq, P = G(1) G(2) . . . G(k); */
+/* if k >= nq, P = G(1) G(2) . . . G(nq-1). */
+
+/* Arguments */
+/* ========= */
+
+/* VECT (input) CHARACTER*1 */
+/* = 'Q': apply Q or Q**H; */
+/* = 'P': apply P or P**H. */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q, Q**H, P or P**H from the Left; */
+/* = 'R': apply Q, Q**H, P or P**H from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q or P; */
+/* = 'C': Conjugate transpose, apply Q**H or P**H. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* If VECT = 'Q', the number of columns in the original */
+/* matrix reduced by ZGEBRD. */
+/* If VECT = 'P', the number of rows in the original */
+/* matrix reduced by ZGEBRD. */
+/* K >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension */
+/* (LDA,min(nq,K)) if VECT = 'Q' */
+/* (LDA,nq) if VECT = 'P' */
+/* The vectors which define the elementary reflectors H(i) and */
+/* G(i), whose products determine the matrices Q and P, as */
+/* returned by ZGEBRD. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* If VECT = 'Q', LDA >= max(1,nq); */
+/* if VECT = 'P', LDA >= max(1,min(nq,K)). */
+
+/* TAU (input) COMPLEX*16 array, dimension (min(nq,K)) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i) or G(i) which determines Q or P, as returned */
+/* by ZGEBRD in the array argument TAUQ or TAUP. */
+
+/* C (input/output) COMPLEX*16 array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q */
+/* or P*C or P**H*C or C*P or C*P**H. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M); */
+/* if N = 0 or M = 0, LWORK >= 1. */
+/* For optimum performance LWORK >= max(1,N*NB) if SIDE = 'L', */
+/* and LWORK >= max(1,M*NB) if SIDE = 'R', where NB is the */
+/* optimal blocksize. (NB = 0 if M = 0 or N = 0.) */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ applyq = lsame_(vect, "Q");
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q or P and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (*m == 0 || *n == 0) {
+ nw = 0;
+ }
+ if (! applyq && ! lsame_(vect, "P")) {
+ *info = -1;
+ } else if (! left && ! lsame_(side, "R")) {
+ *info = -2;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*k < 0) {
+ *info = -6;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = 1, i__2 = min(nq,*k);
+ if (applyq && *lda < max(1,nq) || ! applyq && *lda < max(i__1,i__2)) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -11;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info == 0) {
+ if (nw > 0) {
+ if (applyq) {
+ if (left) {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ nb = ilaenv_(&c__1, "ZUNMQR", ch__1, &i__1, n, &i__2, &
+ c_n1);
+ } else {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ nb = ilaenv_(&c__1, "ZUNMQR", ch__1, m, &i__1, &i__2, &
+ c_n1);
+ }
+ } else {
+ if (left) {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ nb = ilaenv_(&c__1, "ZUNMLQ", ch__1, &i__1, n, &i__2, &
+ c_n1);
+ } else {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ nb = ilaenv_(&c__1, "ZUNMLQ", ch__1, m, &i__1, &i__2, &
+ c_n1);
+ }
+ }
+/* Computing MAX */
+ i__1 = 1, i__2 = nw * nb;
+ lwkopt = max(i__1,i__2);
+ } else {
+ lwkopt = 1;
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNMBR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ if (applyq) {
+
+/* Apply Q */
+
+ if (nq >= *k) {
+
+/* Q was determined by a call to ZGEBRD with nq >= k */
+
+ zunmqr_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], lwork, &iinfo);
+ } else if (nq > 1) {
+
+/* Q was determined by a call to ZGEBRD with nq < k */
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ i1 = 2;
+ i2 = 1;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ i1 = 1;
+ i2 = 2;
+ }
+ i__1 = nq - 1;
+ zunmqr_(side, trans, &mi, &ni, &i__1, &a[a_dim1 + 2], lda, &tau[1]
+, &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo);
+ }
+ } else {
+
+/* Apply P */
+
+ if (notran) {
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+ if (nq > *k) {
+
+/* P was determined by a call to ZGEBRD with nq > k */
+
+ zunmlq_(side, transt, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], lwork, &iinfo);
+ } else if (nq > 1) {
+
+/* P was determined by a call to ZGEBRD with nq <= k */
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ i1 = 2;
+ i2 = 1;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ i1 = 1;
+ i2 = 2;
+ }
+ i__1 = nq - 1;
+ zunmlq_(side, transt, &mi, &ni, &i__1, &a[(a_dim1 << 1) + 1], lda,
+ &tau[1], &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &
+ iinfo);
+ }
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ return 0;
+
+/* End of ZUNMBR */
+
+} /* zunmbr_ */
diff --git a/contrib/libs/clapack/zunmhr.c b/contrib/libs/clapack/zunmhr.c
new file mode 100644
index 0000000000..c67935a58c
--- /dev/null
+++ b/contrib/libs/clapack/zunmhr.c
@@ -0,0 +1,257 @@
+/* zunmhr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+
+/* Subroutine */ int zunmhr_(char *side, char *trans, integer *m, integer *n,
+ integer *ilo, integer *ihi, doublecomplex *a, integer *lda,
+ doublecomplex *tau, doublecomplex *c__, integer *ldc, doublecomplex *
+ work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i1, i2, nb, mi, nh, ni, nq, nw;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int zunmqr_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZUNMHR overwrites the general complex M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'C': Q**H * C C * Q**H */
+
+/* where Q is a complex unitary matrix of order nq, with nq = m if */
+/* SIDE = 'L' and nq = n if SIDE = 'R'. Q is defined as the product of */
+/* IHI-ILO elementary reflectors, as returned by ZGEHRD: */
+
+/* Q = H(ilo) H(ilo+1) . . . H(ihi-1). */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**H from the Left; */
+/* = 'R': apply Q or Q**H from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply Q (No transpose) */
+/* = 'C': apply Q**H (Conjugate transpose) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* ILO (input) INTEGER */
+/* IHI (input) INTEGER */
+/* ILO and IHI must have the same values as in the previous call */
+/* of ZGEHRD. Q is equal to the unit matrix except in the */
+/* submatrix Q(ilo+1:ihi,ilo+1:ihi). */
+/* If SIDE = 'L', then 1 <= ILO <= IHI <= M, if M > 0, and */
+/* ILO = 1 and IHI = 0, if M = 0; */
+/* if SIDE = 'R', then 1 <= ILO <= IHI <= N, if N > 0, and */
+/* ILO = 1 and IHI = 0, if N = 0. */
+
+/* A (input) COMPLEX*16 array, dimension */
+/* (LDA,M) if SIDE = 'L' */
+/* (LDA,N) if SIDE = 'R' */
+/* The vectors which define the elementary reflectors, as */
+/* returned by ZGEHRD. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* LDA >= max(1,M) if SIDE = 'L'; LDA >= max(1,N) if SIDE = 'R'. */
+
+/* TAU (input) COMPLEX*16 array, dimension */
+/* (M-1) if SIDE = 'L' */
+/* (N-1) if SIDE = 'R' */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by ZGEHRD. */
+
+/* C (input/output) COMPLEX*16 array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nh = *ihi - *ilo;
+ left = lsame_(side, "L");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ilo < 1 || *ilo > max(1,nq)) {
+ *info = -5;
+ } else if (*ihi < min(*ilo,nq) || *ihi > nq) {
+ *info = -6;
+ } else if (*lda < max(1,nq)) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -11;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -13;
+ }
+
+ if (*info == 0) {
+ if (left) {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ nb = ilaenv_(&c__1, "ZUNMQR", ch__1, &nh, n, &nh, &c_n1);
+ } else {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ nb = ilaenv_(&c__1, "ZUNMQR", ch__1, m, &nh, &nh, &c_n1);
+ }
+ lwkopt = max(1,nw) * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__2 = -(*info);
+ xerbla_("ZUNMHR", &i__2);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || nh == 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ if (left) {
+ mi = nh;
+ ni = *n;
+ i1 = *ilo + 1;
+ i2 = 1;
+ } else {
+ mi = *m;
+ ni = nh;
+ i1 = 1;
+ i2 = *ilo + 1;
+ }
+
+ zunmqr_(side, trans, &mi, &ni, &nh, &a[*ilo + 1 + *ilo * a_dim1], lda, &
+ tau[*ilo], &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo);
+
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ return 0;
+
+/* End of ZUNMHR */
+
+} /* zunmhr_ */
diff --git a/contrib/libs/clapack/zunml2.c b/contrib/libs/clapack/zunml2.c
new file mode 100644
index 0000000000..10899a5933
--- /dev/null
+++ b/contrib/libs/clapack/zunml2.c
@@ -0,0 +1,253 @@
+/* zunml2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zunml2_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublecomplex *a, integer *lda, doublecomplex *tau,
+ doublecomplex *c__, integer *ldc, doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, i1, i2, i3, ic, jc, mi, ni, nq;
+ doublecomplex aii;
+ logical left;
+ doublecomplex taui;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), xerbla_(char *, integer *), zlacgv_(integer *, doublecomplex *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZUNML2 overwrites the general complex m-by-n matrix C with */
+
+/* Q * C if SIDE = 'L' and TRANS = 'N', or */
+
+/* Q'* C if SIDE = 'L' and TRANS = 'C', or */
+
+/* C * Q if SIDE = 'R' and TRANS = 'N', or */
+
+/* C * Q' if SIDE = 'R' and TRANS = 'C', */
+
+/* where Q is a complex unitary matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(k)' . . . H(2)' H(1)' */
+
+/* as returned by ZGELQF. Q is of order m if SIDE = 'L' and of order n */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q' from the Left */
+/* = 'R': apply Q or Q' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply Q (No transpose) */
+/* = 'C': apply Q' (Conjugate transpose) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension */
+/* (LDA,M) if SIDE = 'L', */
+/* (LDA,N) if SIDE = 'R' */
+/* The i-th row must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* ZGELQF in the first k rows of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,K). */
+
+/* TAU (input) COMPLEX*16 array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by ZGELQF. */
+
+/* C (input/output) COMPLEX*16 array, dimension (LDC,N) */
+/* On entry, the m-by-n matrix C. */
+/* On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension */
+/* (N) if SIDE = 'L', */
+/* (M) if SIDE = 'R' */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNML2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if (left && notran || ! left && ! notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) or H(i)' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H(i) or H(i)' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H(i) or H(i)' */
+
+ if (notran) {
+ d_cnjg(&z__1, &tau[i__]);
+ taui.r = z__1.r, taui.i = z__1.i;
+ } else {
+ i__3 = i__;
+ taui.r = tau[i__3].r, taui.i = tau[i__3].i;
+ }
+ if (i__ < nq) {
+ i__3 = nq - i__;
+ zlacgv_(&i__3, &a[i__ + (i__ + 1) * a_dim1], lda);
+ }
+ i__3 = i__ + i__ * a_dim1;
+ aii.r = a[i__3].r, aii.i = a[i__3].i;
+ i__3 = i__ + i__ * a_dim1;
+ a[i__3].r = 1., a[i__3].i = 0.;
+ zlarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], lda, &taui, &c__[ic +
+ jc * c_dim1], ldc, &work[1]);
+ i__3 = i__ + i__ * a_dim1;
+ a[i__3].r = aii.r, a[i__3].i = aii.i;
+ if (i__ < nq) {
+ i__3 = nq - i__;
+ zlacgv_(&i__3, &a[i__ + (i__ + 1) * a_dim1], lda);
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of ZUNML2 */
+
+} /* zunml2_ */
diff --git a/contrib/libs/clapack/zunmlq.c b/contrib/libs/clapack/zunmlq.c
new file mode 100644
index 0000000000..a2315961eb
--- /dev/null
+++ b/contrib/libs/clapack/zunmlq.c
@@ -0,0 +1,338 @@
+/* zunmlq.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static integer c__65 = 65;
+
+/* Subroutine */ int zunmlq_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublecomplex *a, integer *lda, doublecomplex *tau,
+ doublecomplex *c__, integer *ldc, doublecomplex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i__;
+ doublecomplex t[4160] /* was [65][64] */;
+ integer i1, i2, i3, ib, ic, jc, nb, mi, ni, nq, nw, iws;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ extern /* Subroutine */ int zunml2_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ logical notran;
+ integer ldwork;
+ extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ char transt[1];
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZUNMLQ overwrites the general complex M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'C': Q**H * C C * Q**H */
+
+/* where Q is a complex unitary matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(k)' . . . H(2)' H(1)' */
+
+/* as returned by ZGELQF. Q is of order M if SIDE = 'L' and of order N */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**H from the Left; */
+/* = 'R': apply Q or Q**H from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'C': Conjugate transpose, apply Q**H. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension */
+/* (LDA,M) if SIDE = 'L', */
+/* (LDA,N) if SIDE = 'R' */
+/* The i-th row must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* ZGELQF in the first k rows of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,K). */
+
+/* TAU (input) COMPLEX*16 array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by ZGELQF. */
+
+/* C (input/output) COMPLEX*16 array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+
+/* Determine the block size. NB may be at most NBMAX, where NBMAX */
+/* is used to define the local array T. */
+
+/* Computing MIN */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "ZUNMLQ", ch__1, m, n, k, &c_n1);
+ nb = min(i__1,i__2);
+ lwkopt = max(1,nw) * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNMLQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/* Computing MAX */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZUNMLQ", ch__1, m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ zunml2_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if (left && notran || ! left && ! notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i) H(i+1) . . . H(i+ib-1) */
+
+ i__4 = nq - i__ + 1;
+ zlarft_("Forward", "Rowwise", &i__4, &ib, &a[i__ + i__ * a_dim1],
+ lda, &tau[i__], t, &c__65);
+ if (left) {
+
+/* H or H' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H or H' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H or H' */
+
+ zlarfb_(side, transt, "Forward", "Rowwise", &mi, &ni, &ib, &a[i__
+ + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc * c_dim1],
+ ldc, &work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ return 0;
+
+/* End of ZUNMLQ */
+
+} /* zunmlq_ */
diff --git a/contrib/libs/clapack/zunmql.c b/contrib/libs/clapack/zunmql.c
new file mode 100644
index 0000000000..f159164830
--- /dev/null
+++ b/contrib/libs/clapack/zunmql.c
@@ -0,0 +1,332 @@
+/* zunmql.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static integer c__65 = 65;
+
+/* Subroutine */ int zunmql_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublecomplex *a, integer *lda, doublecomplex *tau,
+ doublecomplex *c__, integer *ldc, doublecomplex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i__;
+ doublecomplex t[4160] /* was [65][64] */;
+ integer i1, i2, i3, ib, nb, mi, ni, nq, nw, iws;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ extern /* Subroutine */ int zunm2l_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ logical notran;
+ integer ldwork;
+ extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZUNMQL overwrites the general complex M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'C': Q**H * C C * Q**H */
+
+/* where Q is a complex unitary matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(k) . . . H(2) H(1) */
+
+/* as returned by ZGEQLF. Q is of order M if SIDE = 'L' and of order N */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**H from the Left; */
+/* = 'R': apply Q or Q**H from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'C': Transpose, apply Q**H. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,K) */
+/* The i-th column must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* ZGEQLF in the last k columns of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* If SIDE = 'L', LDA >= max(1,M); */
+/* if SIDE = 'R', LDA >= max(1,N). */
+
+/* TAU (input) COMPLEX*16 array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by ZGEQLF. */
+
+/* C (input/output) COMPLEX*16 array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = max(1,*n);
+ } else {
+ nq = *n;
+ nw = max(1,*m);
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+
+ if (*info == 0) {
+ if (*m == 0 || *n == 0) {
+ lwkopt = 1;
+ } else {
+
+/* Determine the block size. NB may be at most NBMAX, where */
+/* NBMAX is used to define the local array T. */
+
+/* Computing MIN */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "ZUNMQL", ch__1, m, n, k, &c_n1);
+ nb = min(i__1,i__2);
+ lwkopt = nw * nb;
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ if (*lwork < nw && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNMQL", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/* Computing MAX */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZUNMQL", ch__1, m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ zunm2l_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if (left && notran || ! left && ! notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__4 = nq - *k + i__ + ib - 1;
+ zlarft_("Backward", "Columnwise", &i__4, &ib, &a[i__ * a_dim1 + 1]
+, lda, &tau[i__], t, &c__65);
+ if (left) {
+
+/* H or H' is applied to C(1:m-k+i+ib-1,1:n) */
+
+ mi = *m - *k + i__ + ib - 1;
+ } else {
+
+/* H or H' is applied to C(1:m,1:n-k+i+ib-1) */
+
+ ni = *n - *k + i__ + ib - 1;
+ }
+
+/* Apply H or H' */
+
+ zlarfb_(side, trans, "Backward", "Columnwise", &mi, &ni, &ib, &a[
+ i__ * a_dim1 + 1], lda, t, &c__65, &c__[c_offset], ldc, &
+ work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ return 0;
+
+/* End of ZUNMQL */
+
+} /* zunmql_ */
diff --git a/contrib/libs/clapack/zunmqr.c b/contrib/libs/clapack/zunmqr.c
new file mode 100644
index 0000000000..1074b2a96a
--- /dev/null
+++ b/contrib/libs/clapack/zunmqr.c
@@ -0,0 +1,332 @@
+/* zunmqr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static integer c__65 = 65;
+
+/* Subroutine */ int zunmqr_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublecomplex *a, integer *lda, doublecomplex *tau,
+ doublecomplex *c__, integer *ldc, doublecomplex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i__;
+ doublecomplex t[4160] /* was [65][64] */;
+ integer i1, i2, i3, ib, ic, jc, nb, mi, ni, nq, nw, iws;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ extern /* Subroutine */ int zunm2r_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ logical notran;
+ integer ldwork;
+ extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZUNMQR overwrites the general complex M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'C': Q**H * C C * Q**H */
+
+/* where Q is a complex unitary matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by ZGEQRF. Q is of order M if SIDE = 'L' and of order N */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**H from the Left; */
+/* = 'R': apply Q or Q**H from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'C': Conjugate transpose, apply Q**H. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension (LDA,K) */
+/* The i-th column must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* ZGEQRF in the first k columns of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* If SIDE = 'L', LDA >= max(1,M); */
+/* if SIDE = 'R', LDA >= max(1,N). */
+
+/* TAU (input) COMPLEX*16 array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by ZGEQRF. */
+
+/* C (input/output) COMPLEX*16 array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+
+/* Determine the block size. NB may be at most NBMAX, where NBMAX */
+/* is used to define the local array T. */
+
+/* Computing MIN */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "ZUNMQR", ch__1, m, n, k, &c_n1);
+ nb = min(i__1,i__2);
+ lwkopt = max(1,nw) * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNMQR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/* Computing MAX */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZUNMQR", ch__1, m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ zunm2r_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if (left && ! notran || ! left && notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i) H(i+1) . . . H(i+ib-1) */
+
+ i__4 = nq - i__ + 1;
+ zlarft_("Forward", "Columnwise", &i__4, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], t, &c__65)
+ ;
+ if (left) {
+
+/* H or H' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H or H' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H or H' */
+
+ zlarfb_(side, trans, "Forward", "Columnwise", &mi, &ni, &ib, &a[
+ i__ + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc *
+ c_dim1], ldc, &work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ return 0;
+
+/* End of ZUNMQR */
+
+} /* zunmqr_ */
diff --git a/contrib/libs/clapack/zunmr2.c b/contrib/libs/clapack/zunmr2.c
new file mode 100644
index 0000000000..e3ab2fe52e
--- /dev/null
+++ b/contrib/libs/clapack/zunmr2.c
@@ -0,0 +1,245 @@
+/* zunmr2.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zunmr2_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublecomplex *a, integer *lda, doublecomplex *tau,
+ doublecomplex *c__, integer *ldc, doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, i1, i2, i3, mi, ni, nq;
+ doublecomplex aii;
+ logical left;
+ doublecomplex taui;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), xerbla_(char *, integer *), zlacgv_(integer *, doublecomplex *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZUNMR2 overwrites the general complex m-by-n matrix C with */
+
+/* Q * C if SIDE = 'L' and TRANS = 'N', or */
+
+/* Q'* C if SIDE = 'L' and TRANS = 'C', or */
+
+/* C * Q if SIDE = 'R' and TRANS = 'N', or */
+
+/* C * Q' if SIDE = 'R' and TRANS = 'C', */
+
+/* where Q is a complex unitary matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(1)' H(2)' . . . H(k)' */
+
+/* as returned by ZGERQF. Q is of order m if SIDE = 'L' and of order n */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q' from the Left */
+/* = 'R': apply Q or Q' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply Q (No transpose) */
+/* = 'C': apply Q' (Conjugate transpose) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension */
+/* (LDA,M) if SIDE = 'L', */
+/* (LDA,N) if SIDE = 'R' */
+/* The i-th row must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* ZGERQF in the last k rows of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,K). */
+
+/* TAU (input) COMPLEX*16 array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by ZGERQF. */
+
+/* C (input/output) COMPLEX*16 array, dimension (LDC,N) */
+/* On entry, the m-by-n matrix C. */
+/* On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension */
+/* (N) if SIDE = 'L', */
+/* (M) if SIDE = 'R' */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNMR2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if (left && ! notran || ! left && notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) or H(i)' is applied to C(1:m-k+i,1:n) */
+
+ mi = *m - *k + i__;
+ } else {
+
+/* H(i) or H(i)' is applied to C(1:m,1:n-k+i) */
+
+ ni = *n - *k + i__;
+ }
+
+/* Apply H(i) or H(i)' */
+
+ if (notran) {
+ d_cnjg(&z__1, &tau[i__]);
+ taui.r = z__1.r, taui.i = z__1.i;
+ } else {
+ i__3 = i__;
+ taui.r = tau[i__3].r, taui.i = tau[i__3].i;
+ }
+ i__3 = nq - *k + i__ - 1;
+ zlacgv_(&i__3, &a[i__ + a_dim1], lda);
+ i__3 = i__ + (nq - *k + i__) * a_dim1;
+ aii.r = a[i__3].r, aii.i = a[i__3].i;
+ i__3 = i__ + (nq - *k + i__) * a_dim1;
+ a[i__3].r = 1., a[i__3].i = 0.;
+ zlarf_(side, &mi, &ni, &a[i__ + a_dim1], lda, &taui, &c__[c_offset],
+ ldc, &work[1]);
+ i__3 = i__ + (nq - *k + i__) * a_dim1;
+ a[i__3].r = aii.r, a[i__3].i = aii.i;
+ i__3 = nq - *k + i__ - 1;
+ zlacgv_(&i__3, &a[i__ + a_dim1], lda);
+/* L10: */
+ }
+ return 0;
+
+/* End of ZUNMR2 */
+
+} /* zunmr2_ */
diff --git a/contrib/libs/clapack/zunmr3.c b/contrib/libs/clapack/zunmr3.c
new file mode 100644
index 0000000000..bd39c0b9db
--- /dev/null
+++ b/contrib/libs/clapack/zunmr3.c
@@ -0,0 +1,254 @@
+/* zunmr3.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zunmr3_(char *side, char *trans, integer *m, integer *n,
+ integer *k, integer *l, doublecomplex *a, integer *lda, doublecomplex
+ *tau, doublecomplex *c__, integer *ldc, doublecomplex *work, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, i1, i2, i3, ja, ic, jc, mi, ni, nq;
+ logical left;
+ doublecomplex taui;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zlarz_(char *, integer *, integer *, integer *
+, doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *), xerbla_(char *, integer *);
+ logical notran;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZUNMR3 overwrites the general complex m by n matrix C with */
+
+/* Q * C if SIDE = 'L' and TRANS = 'N', or */
+
+/* Q'* C if SIDE = 'L' and TRANS = 'C', or */
+
+/* C * Q if SIDE = 'R' and TRANS = 'N', or */
+
+/* C * Q' if SIDE = 'R' and TRANS = 'C', */
+
+/* where Q is a complex unitary matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by ZTZRZF. Q is of order m if SIDE = 'L' and of order n */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q' from the Left */
+/* = 'R': apply Q or Q' from the Right */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': apply Q (No transpose) */
+/* = 'C': apply Q' (Conjugate transpose) */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* L (input) INTEGER */
+/* The number of columns of the matrix A containing */
+/* the meaningful part of the Householder reflectors. */
+/* If SIDE = 'L', M >= L >= 0, if SIDE = 'R', N >= L >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension */
+/* (LDA,M) if SIDE = 'L', */
+/* (LDA,N) if SIDE = 'R' */
+/* The i-th row must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* ZTZRZF in the last k rows of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,K). */
+
+/* TAU (input) COMPLEX*16 array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by ZTZRZF. */
+
+/* C (input/output) COMPLEX*16 array, dimension (LDC,N) */
+/* On entry, the m-by-n matrix C. */
+/* On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension */
+/* (N) if SIDE = 'L', */
+/* (M) if SIDE = 'R' */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*l < 0 || left && *l > *m || ! left && *l > *n) {
+ *info = -6;
+ } else if (*lda < max(1,*k)) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNMR3", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || *k == 0) {
+ return 0;
+ }
+
+ if (left && ! notran || ! left && notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ ja = *m - *l + 1;
+ jc = 1;
+ } else {
+ mi = *m;
+ ja = *n - *l + 1;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) or H(i)' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H(i) or H(i)' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H(i) or H(i)' */
+
+ if (notran) {
+ i__3 = i__;
+ taui.r = tau[i__3].r, taui.i = tau[i__3].i;
+ } else {
+ d_cnjg(&z__1, &tau[i__]);
+ taui.r = z__1.r, taui.i = z__1.i;
+ }
+ zlarz_(side, &mi, &ni, l, &a[i__ + ja * a_dim1], lda, &taui, &c__[ic
+ + jc * c_dim1], ldc, &work[1]);
+
+/* L10: */
+ }
+
+ return 0;
+
+/* End of ZUNMR3 */
+
+} /* zunmr3_ */
diff --git a/contrib/libs/clapack/zunmrq.c b/contrib/libs/clapack/zunmrq.c
new file mode 100644
index 0000000000..5cd6d34c58
--- /dev/null
+++ b/contrib/libs/clapack/zunmrq.c
@@ -0,0 +1,339 @@
+/* zunmrq.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static integer c__65 = 65;
+
+/* Subroutine */ int zunmrq_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublecomplex *a, integer *lda, doublecomplex *tau,
+ doublecomplex *c__, integer *ldc, doublecomplex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i__;
+ doublecomplex t[4160] /* was [65][64] */;
+ integer i1, i2, i3, ib, nb, mi, ni, nq, nw, iws;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ extern /* Subroutine */ int zunmr2_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ extern /* Subroutine */ int zlarfb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ logical notran;
+ integer ldwork;
+ extern /* Subroutine */ int zlarft_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+ char transt[1];
+ integer lwkopt;
+ logical lquery;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZUNMRQ overwrites the general complex M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'C': Q**H * C C * Q**H */
+
+/* where Q is a complex unitary matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(1)' H(2)' . . . H(k)' */
+
+/* as returned by ZGERQF. Q is of order M if SIDE = 'L' and of order N */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**H from the Left; */
+/* = 'R': apply Q or Q**H from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'C': Transpose, apply Q**H. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension */
+/* (LDA,M) if SIDE = 'L', */
+/* (LDA,N) if SIDE = 'R' */
+/* The i-th row must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* ZGERQF in the last k rows of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,K). */
+
+/* TAU (input) COMPLEX*16 array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by ZGERQF. */
+
+/* C (input/output) COMPLEX*16 array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = max(1,*n);
+ } else {
+ nq = *n;
+ nw = max(1,*m);
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+
+ if (*info == 0) {
+ if (*m == 0 || *n == 0) {
+ lwkopt = 1;
+ } else {
+
+/* Determine the block size. NB may be at most NBMAX, where */
+/* NBMAX is used to define the local array T. */
+
+/* Computing MIN */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "ZUNMRQ", ch__1, m, n, k, &c_n1);
+ nb = min(i__1,i__2);
+ lwkopt = nw * nb;
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ if (*lwork < nw && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNMRQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/* Computing MAX */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZUNMRQ", ch__1, m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ zunmr2_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if (left && ! notran || ! left && notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ i__4 = nq - *k + i__ + ib - 1;
+ zlarft_("Backward", "Rowwise", &i__4, &ib, &a[i__ + a_dim1], lda,
+ &tau[i__], t, &c__65);
+ if (left) {
+
+/* H or H' is applied to C(1:m-k+i+ib-1,1:n) */
+
+ mi = *m - *k + i__ + ib - 1;
+ } else {
+
+/* H or H' is applied to C(1:m,1:n-k+i+ib-1) */
+
+ ni = *n - *k + i__ + ib - 1;
+ }
+
+/* Apply H or H' */
+
+ zlarfb_(side, transt, "Backward", "Rowwise", &mi, &ni, &ib, &a[
+ i__ + a_dim1], lda, t, &c__65, &c__[c_offset], ldc, &work[
+ 1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ return 0;
+
+/* End of ZUNMRQ */
+
+} /* zunmrq_ */
diff --git a/contrib/libs/clapack/zunmrz.c b/contrib/libs/clapack/zunmrz.c
new file mode 100644
index 0000000000..8cc2089469
--- /dev/null
+++ b/contrib/libs/clapack/zunmrz.c
@@ -0,0 +1,371 @@
+/* zunmrz.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+static integer c__65 = 65;
+
+/* Subroutine */ int zunmrz_(char *side, char *trans, integer *m, integer *n,
+ integer *k, integer *l, doublecomplex *a, integer *lda, doublecomplex
+ *tau, doublecomplex *c__, integer *ldc, doublecomplex *work, integer *
+ lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i__;
+ doublecomplex t[4160] /* was [65][64] */;
+ integer i1, i2, i3, ib, ic, ja, jc, nb, mi, ni, nq, nw, iws;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer nbmin, iinfo;
+ extern /* Subroutine */ int zunmr3_(char *, char *, integer *, integer *,
+ integer *, integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ logical notran;
+ integer ldwork;
+ extern /* Subroutine */ int zlarzb_(char *, char *, char *, char *,
+ integer *, integer *, integer *, integer *, doublecomplex *,
+ integer *, doublecomplex *, integer *, doublecomplex *, integer *,
+ doublecomplex *, integer *);
+ char transt[1];
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int zlarzt_(char *, char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* January 2007 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZUNMRZ overwrites the general complex M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'C': Q**H * C C * Q**H */
+
+/* where Q is a complex unitary matrix defined as the product of k */
+/* elementary reflectors */
+
+/* Q = H(1) H(2) . . . H(k) */
+
+/* as returned by ZTZRZF. Q is of order M if SIDE = 'L' and of order N */
+/* if SIDE = 'R'. */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**H from the Left; */
+/* = 'R': apply Q or Q**H from the Right. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'C': Conjugate transpose, apply Q**H. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* K (input) INTEGER */
+/* The number of elementary reflectors whose product defines */
+/* the matrix Q. */
+/* If SIDE = 'L', M >= K >= 0; */
+/* if SIDE = 'R', N >= K >= 0. */
+
+/* L (input) INTEGER */
+/* The number of columns of the matrix A containing */
+/* the meaningful part of the Householder reflectors. */
+/* If SIDE = 'L', M >= L >= 0, if SIDE = 'R', N >= L >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension */
+/* (LDA,M) if SIDE = 'L', */
+/* (LDA,N) if SIDE = 'R' */
+/* The i-th row must contain the vector which defines the */
+/* elementary reflector H(i), for i = 1,2,...,k, as returned by */
+/* ZTZRZF in the last k rows of its array argument A. */
+/* A is modified by the routine but restored on exit. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. LDA >= max(1,K). */
+
+/* TAU (input) COMPLEX*16 array, dimension (K) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by ZTZRZF. */
+
+/* C (input/output) COMPLEX*16 array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >= M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* Further Details */
+/* =============== */
+
+/* Based on contributions by */
+/* A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. Local Arrays .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = max(1,*n);
+ } else {
+ nq = *n;
+ nw = max(1,*m);
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*k < 0 || *k > nq) {
+ *info = -5;
+ } else if (*l < 0 || left && *l > *m || ! left && *l > *n) {
+ *info = -6;
+ } else if (*lda < max(1,*k)) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -11;
+ }
+
+ if (*info == 0) {
+ if (*m == 0 || *n == 0) {
+ lwkopt = 1;
+ } else {
+
+/* Determine the block size. NB may be at most NBMAX, where */
+/* NBMAX is used to define the local array T. */
+
+/* Computing MIN */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "ZUNMRQ", ch__1, m, n, k, &c_n1);
+ nb = min(i__1,i__2);
+ lwkopt = nw * nb;
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ if (*lwork < max(1,nw) && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUNMRZ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+/* Determine the block size. NB may be at most NBMAX, where NBMAX */
+/* is used to define the local array T. */
+
+/* Computing MIN */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "ZUNMRQ", ch__1, m, n, k, &c_n1);
+ nb = min(i__1,i__2);
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/* Computing MAX */
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "ZUNMRQ", ch__1, m, n, k, &c_n1);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if (nb < nbmin || nb >= *k) {
+
+/* Use unblocked code */
+
+ zunmr3_(side, trans, m, n, k, l, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if (left && ! notran || ! left && notran) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ ja = *m - *l + 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ ja = *n - *l + 1;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/* Form the triangular factor of the block reflector */
+/* H = H(i+ib-1) . . . H(i+1) H(i) */
+
+ zlarzt_("Backward", "Rowwise", l, &ib, &a[i__ + ja * a_dim1], lda,
+ &tau[i__], t, &c__65);
+
+ if (left) {
+
+/* H or H' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H or H' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H or H' */
+
+ zlarzb_(side, transt, "Backward", "Rowwise", &mi, &ni, &ib, l, &a[
+ i__ + ja * a_dim1], lda, t, &c__65, &c__[ic + jc * c_dim1]
+, ldc, &work[1], &ldwork);
+/* L10: */
+ }
+
+ }
+
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+
+ return 0;
+
+/* End of ZUNMRZ */
+
+} /* zunmrz_ */
diff --git a/contrib/libs/clapack/zunmtr.c b/contrib/libs/clapack/zunmtr.c
new file mode 100644
index 0000000000..012737a786
--- /dev/null
+++ b/contrib/libs/clapack/zunmtr.c
@@ -0,0 +1,295 @@
+/* zunmtr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static integer c_n1 = -1;
+static integer c__2 = 2;
+
+/* Subroutine */ int zunmtr_(char *side, char *uplo, char *trans, integer *m,
+ integer *n, doublecomplex *a, integer *lda, doublecomplex *tau,
+ doublecomplex *c__, integer *ldc, doublecomplex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2, i__3;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ integer i1, i2, nb, mi, ni, nq, nw;
+ logical left;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *);
+ integer lwkopt;
+ logical lquery;
+ extern /* Subroutine */ int zunmql_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *), zunmqr_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, integer *, doublecomplex *,
+ doublecomplex *, integer *, doublecomplex *, integer *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZUNMTR overwrites the general complex M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'C': Q**H * C C * Q**H */
+
+/* where Q is a complex unitary matrix of order nq, with nq = m if */
+/* SIDE = 'L' and nq = n if SIDE = 'R'. Q is defined as the product of */
+/* nq-1 elementary reflectors, as returned by ZHETRD: */
+
+/* if UPLO = 'U', Q = H(nq-1) . . . H(2) H(1); */
+
+/* if UPLO = 'L', Q = H(1) H(2) . . . H(nq-1). */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**H from the Left; */
+/* = 'R': apply Q or Q**H from the Right. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangle of A contains elementary reflectors */
+/* from ZHETRD; */
+/* = 'L': Lower triangle of A contains elementary reflectors */
+/* from ZHETRD. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'C': Conjugate transpose, apply Q**H. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* A (input) COMPLEX*16 array, dimension */
+/* (LDA,M) if SIDE = 'L' */
+/* (LDA,N) if SIDE = 'R' */
+/* The vectors which define the elementary reflectors, as */
+/* returned by ZHETRD. */
+
+/* LDA (input) INTEGER */
+/* The leading dimension of the array A. */
+/* LDA >= max(1,M) if SIDE = 'L'; LDA >= max(1,N) if SIDE = 'R'. */
+
+/* TAU (input) COMPLEX*16 array, dimension */
+/* (M-1) if SIDE = 'L' */
+/* (N-1) if SIDE = 'R' */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by ZHETRD. */
+
+/* C (input/output) COMPLEX*16 array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK)) */
+/* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
+
+/* LWORK (input) INTEGER */
+/* The dimension of the array WORK. */
+/* If SIDE = 'L', LWORK >= max(1,N); */
+/* if SIDE = 'R', LWORK >= max(1,M). */
+/* For optimum performance LWORK >= N*NB if SIDE = 'L', and */
+/* LWORK >=M*NB if SIDE = 'R', where NB is the optimal */
+/* blocksize. */
+
+/* If LWORK = -1, then a workspace query is assumed; the routine */
+/* only calculates the optimal size of the WORK array, returns */
+/* this value as the first entry of the WORK array, and no error */
+/* message related to LWORK is issued by XERBLA. */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "C")) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+ if (upper) {
+ if (left) {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ nb = ilaenv_(&c__1, "ZUNMQL", ch__1, &i__2, n, &i__3, &c_n1);
+ } else {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "ZUNMQL", ch__1, m, &i__2, &i__3, &c_n1);
+ }
+ } else {
+ if (left) {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ nb = ilaenv_(&c__1, "ZUNMQR", ch__1, &i__2, n, &i__3, &c_n1);
+ } else {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "ZUNMQR", ch__1, m, &i__2, &i__3, &c_n1);
+ }
+ }
+ lwkopt = max(1,nw) * nb;
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ }
+
+ if (*info != 0) {
+ i__2 = -(*info);
+ xerbla_("ZUNMTR", &i__2);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0 || nq == 1) {
+ work[1].r = 1., work[1].i = 0.;
+ return 0;
+ }
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ }
+
+ if (upper) {
+
+/* Q was determined by a call to ZHETRD with UPLO = 'U' */
+
+ i__2 = nq - 1;
+ zunmql_(side, trans, &mi, &ni, &i__2, &a[(a_dim1 << 1) + 1], lda, &
+ tau[1], &c__[c_offset], ldc, &work[1], lwork, &iinfo);
+ } else {
+
+/* Q was determined by a call to ZHETRD with UPLO = 'L' */
+
+ if (left) {
+ i1 = 2;
+ i2 = 1;
+ } else {
+ i1 = 1;
+ i2 = 2;
+ }
+ i__2 = nq - 1;
+ zunmqr_(side, trans, &mi, &ni, &i__2, &a[a_dim1 + 2], lda, &tau[1], &
+ c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo);
+ }
+ work[1].r = (doublereal) lwkopt, work[1].i = 0.;
+ return 0;
+
+/* End of ZUNMTR */
+
+} /* zunmtr_ */
diff --git a/contrib/libs/clapack/zupgtr.c b/contrib/libs/clapack/zupgtr.c
new file mode 100644
index 0000000000..295472702c
--- /dev/null
+++ b/contrib/libs/clapack/zupgtr.c
@@ -0,0 +1,221 @@
+/* zupgtr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Subroutine */ int zupgtr_(char *uplo, integer *n, doublecomplex *ap,
+ doublecomplex *tau, doublecomplex *q, integer *ldq, doublecomplex *
+ work, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ integer i__, j, ij;
+ extern logical lsame_(char *, char *);
+ integer iinfo;
+ logical upper;
+ extern /* Subroutine */ int zung2l_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), zung2r_(integer *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), xerbla_(char *, integer *);
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZUPGTR generates a complex unitary matrix Q which is defined as the */
+/* product of n-1 elementary reflectors H(i) of order n, as returned by */
+/* ZHPTRD using packed storage: */
+
+/* if UPLO = 'U', Q = H(n-1) . . . H(2) H(1), */
+
+/* if UPLO = 'L', Q = H(1) H(2) . . . H(n-1). */
+
+/* Arguments */
+/* ========= */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangular packed storage used in previous */
+/* call to ZHPTRD; */
+/* = 'L': Lower triangular packed storage used in previous */
+/* call to ZHPTRD. */
+
+/* N (input) INTEGER */
+/* The order of the matrix Q. N >= 0. */
+
+/* AP (input) COMPLEX*16 array, dimension (N*(N+1)/2) */
+/* The vectors which define the elementary reflectors, as */
+/* returned by ZHPTRD. */
+
+/* TAU (input) COMPLEX*16 array, dimension (N-1) */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by ZHPTRD. */
+
+/* Q (output) COMPLEX*16 array, dimension (LDQ,N) */
+/* The N-by-N unitary matrix Q. */
+
+/* LDQ (input) INTEGER */
+/* The leading dimension of the array Q. LDQ >= max(1,N). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension (N-1) */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ --ap;
+ --tau;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldq < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUPGTR", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Q was determined by a call to ZHPTRD with UPLO = 'U' */
+
+/* Unpack the vectors which define the elementary reflectors and */
+/* set the last row and column of Q equal to those of the unit */
+/* matrix */
+
+ ij = 2;
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * q_dim1;
+ i__4 = ij;
+ q[i__3].r = ap[i__4].r, q[i__3].i = ap[i__4].i;
+ ++ij;
+/* L10: */
+ }
+ ij += 2;
+ i__2 = *n + j * q_dim1;
+ q[i__2].r = 0., q[i__2].i = 0.;
+/* L20: */
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + *n * q_dim1;
+ q[i__2].r = 0., q[i__2].i = 0.;
+/* L30: */
+ }
+ i__1 = *n + *n * q_dim1;
+ q[i__1].r = 1., q[i__1].i = 0.;
+
+/* Generate Q(1:n-1,1:n-1) */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ zung2l_(&i__1, &i__2, &i__3, &q[q_offset], ldq, &tau[1], &work[1], &
+ iinfo);
+
+ } else {
+
+/* Q was determined by a call to ZHPTRD with UPLO = 'L'. */
+
+/* Unpack the vectors which define the elementary reflectors and */
+/* set the first row and column of Q equal to those of the unit */
+/* matrix */
+
+ i__1 = q_dim1 + 1;
+ q[i__1].r = 1., q[i__1].i = 0.;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ i__2 = i__ + q_dim1;
+ q[i__2].r = 0., q[i__2].i = 0.;
+/* L40: */
+ }
+ ij = 3;
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j * q_dim1 + 1;
+ q[i__2].r = 0., q[i__2].i = 0.;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * q_dim1;
+ i__4 = ij;
+ q[i__3].r = ap[i__4].r, q[i__3].i = ap[i__4].i;
+ ++ij;
+/* L50: */
+ }
+ ij += 2;
+/* L60: */
+ }
+ if (*n > 1) {
+
+/* Generate Q(2:n,2:n) */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ zung2r_(&i__1, &i__2, &i__3, &q[(q_dim1 << 1) + 2], ldq, &tau[1],
+ &work[1], &iinfo);
+ }
+ }
+ return 0;
+
+/* End of ZUPGTR */
+
+} /* zupgtr_ */
diff --git a/contrib/libs/clapack/zupmtr.c b/contrib/libs/clapack/zupmtr.c
new file mode 100644
index 0000000000..5d10cea56f
--- /dev/null
+++ b/contrib/libs/clapack/zupmtr.c
@@ -0,0 +1,321 @@
+/* zupmtr.f -- translated by f2c (version 20061008).
+ You must link the resulting object file with libf2c:
+ on Microsoft Windows system, link with libf2c.lib;
+ on Linux or Unix systems, link with .../path/to/libf2c.a -lm
+ or, if you install libf2c.a in a standard place, with -lf2c -lm
+ -- in that order, at the end of the command line, as in
+ cc *.o -lf2c -lm
+ Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
+
+ http://www.netlib.org/f2c/libf2c.zip
+*/
+
+#include "f2c.h"
+#include "blaswrap.h"
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+
+/* Subroutine */ int zupmtr_(char *side, char *uplo, char *trans, integer *m,
+ integer *n, doublecomplex *ap, doublecomplex *tau, doublecomplex *c__,
+ integer *ldc, doublecomplex *work, integer *info)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ integer i__, i1, i2, i3, ic, jc, ii, mi, ni, nq;
+ doublecomplex aii;
+ logical left;
+ doublecomplex taui;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zlarf_(char *, integer *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *, doublecomplex *);
+ logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ logical notran, forwrd;
+
+
+/* -- LAPACK routine (version 3.2) -- */
+/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
+/* November 2006 */
+
+/* .. Scalar Arguments .. */
+/* .. */
+/* .. Array Arguments .. */
+/* .. */
+
+/* Purpose */
+/* ======= */
+
+/* ZUPMTR overwrites the general complex M-by-N matrix C with */
+
+/* SIDE = 'L' SIDE = 'R' */
+/* TRANS = 'N': Q * C C * Q */
+/* TRANS = 'C': Q**H * C C * Q**H */
+
+/* where Q is a complex unitary matrix of order nq, with nq = m if */
+/* SIDE = 'L' and nq = n if SIDE = 'R'. Q is defined as the product of */
+/* nq-1 elementary reflectors, as returned by ZHPTRD using packed */
+/* storage: */
+
+/* if UPLO = 'U', Q = H(nq-1) . . . H(2) H(1); */
+
+/* if UPLO = 'L', Q = H(1) H(2) . . . H(nq-1). */
+
+/* Arguments */
+/* ========= */
+
+/* SIDE (input) CHARACTER*1 */
+/* = 'L': apply Q or Q**H from the Left; */
+/* = 'R': apply Q or Q**H from the Right. */
+
+/* UPLO (input) CHARACTER*1 */
+/* = 'U': Upper triangular packed storage used in previous */
+/* call to ZHPTRD; */
+/* = 'L': Lower triangular packed storage used in previous */
+/* call to ZHPTRD. */
+
+/* TRANS (input) CHARACTER*1 */
+/* = 'N': No transpose, apply Q; */
+/* = 'C': Conjugate transpose, apply Q**H. */
+
+/* M (input) INTEGER */
+/* The number of rows of the matrix C. M >= 0. */
+
+/* N (input) INTEGER */
+/* The number of columns of the matrix C. N >= 0. */
+
+/* AP (input) COMPLEX*16 array, dimension */
+/* (M*(M+1)/2) if SIDE = 'L' */
+/* (N*(N+1)/2) if SIDE = 'R' */
+/* The vectors which define the elementary reflectors, as */
+/* returned by ZHPTRD. AP is modified by the routine but */
+/* restored on exit. */
+
+/* TAU (input) COMPLEX*16 array, dimension (M-1) if SIDE = 'L' */
+/* or (N-1) if SIDE = 'R' */
+/* TAU(i) must contain the scalar factor of the elementary */
+/* reflector H(i), as returned by ZHPTRD. */
+
+/* C (input/output) COMPLEX*16 array, dimension (LDC,N) */
+/* On entry, the M-by-N matrix C. */
+/* On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q. */
+
+/* LDC (input) INTEGER */
+/* The leading dimension of the array C. LDC >= max(1,M). */
+
+/* WORK (workspace) COMPLEX*16 array, dimension */
+/* (N) if SIDE = 'L' */
+/* (M) if SIDE = 'R' */
+
+/* INFO (output) INTEGER */
+/* = 0: successful exit */
+/* < 0: if INFO = -i, the i-th argument had an illegal value */
+
+/* ===================================================================== */
+
+/* .. Parameters .. */
+/* .. */
+/* .. Local Scalars .. */
+/* .. */
+/* .. External Functions .. */
+/* .. */
+/* .. External Subroutines .. */
+/* .. */
+/* .. Intrinsic Functions .. */
+/* .. */
+/* .. Executable Statements .. */
+
+/* Test the input arguments */
+
+ /* Parameter adjustments */
+ --ap;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ upper = lsame_(uplo, "U");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*ldc < max(1,*m)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZUPMTR", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m == 0 || *n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Q was determined by a call to ZHPTRD with UPLO = 'U' */
+
+ forwrd = left && notran || ! left && ! notran;
+
+ if (forwrd) {
+ i1 = 1;
+ i2 = nq - 1;
+ i3 = 1;
+ ii = 2;
+ } else {
+ i1 = nq - 1;
+ i2 = 1;
+ i3 = -1;
+ ii = nq * (nq + 1) / 2 - 1;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) or H(i)' is applied to C(1:i,1:n) */
+
+ mi = i__;
+ } else {
+
+/* H(i) or H(i)' is applied to C(1:m,1:i) */
+
+ ni = i__;
+ }
+
+/* Apply H(i) or H(i)' */
+
+ if (notran) {
+ i__3 = i__;
+ taui.r = tau[i__3].r, taui.i = tau[i__3].i;
+ } else {
+ d_cnjg(&z__1, &tau[i__]);
+ taui.r = z__1.r, taui.i = z__1.i;
+ }
+ i__3 = ii;
+ aii.r = ap[i__3].r, aii.i = ap[i__3].i;
+ i__3 = ii;
+ ap[i__3].r = 1., ap[i__3].i = 0.;
+ zlarf_(side, &mi, &ni, &ap[ii - i__ + 1], &c__1, &taui, &c__[
+ c_offset], ldc, &work[1]);
+ i__3 = ii;
+ ap[i__3].r = aii.r, ap[i__3].i = aii.i;
+
+ if (forwrd) {
+ ii = ii + i__ + 2;
+ } else {
+ ii = ii - i__ - 1;
+ }
+/* L10: */
+ }
+ } else {
+
+/* Q was determined by a call to ZHPTRD with UPLO = 'L'. */
+
+ forwrd = left && ! notran || ! left && notran;
+
+ if (forwrd) {
+ i1 = 1;
+ i2 = nq - 1;
+ i3 = 1;
+ ii = 2;
+ } else {
+ i1 = nq - 1;
+ i2 = 1;
+ i3 = -1;
+ ii = nq * (nq + 1) / 2 - 1;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__2 = i2;
+ i__1 = i3;
+ for (i__ = i1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+ i__3 = ii;
+ aii.r = ap[i__3].r, aii.i = ap[i__3].i;
+ i__3 = ii;
+ ap[i__3].r = 1., ap[i__3].i = 0.;
+ if (left) {
+
+/* H(i) or H(i)' is applied to C(i+1:m,1:n) */
+
+ mi = *m - i__;
+ ic = i__ + 1;
+ } else {
+
+/* H(i) or H(i)' is applied to C(1:m,i+1:n) */
+
+ ni = *n - i__;
+ jc = i__ + 1;
+ }
+
+/* Apply H(i) or H(i)' */
+
+ if (notran) {
+ i__3 = i__;
+ taui.r = tau[i__3].r, taui.i = tau[i__3].i;
+ } else {
+ d_cnjg(&z__1, &tau[i__]);
+ taui.r = z__1.r, taui.i = z__1.i;
+ }
+ zlarf_(side, &mi, &ni, &ap[ii], &c__1, &taui, &c__[ic + jc *
+ c_dim1], ldc, &work[1]);
+ i__3 = ii;
+ ap[i__3].r = aii.r, ap[i__3].i = aii.i;
+
+ if (forwrd) {
+ ii = ii + nq - i__ + 1;
+ } else {
+ ii = ii - nq + i__ - 2;
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of ZUPMTR */
+
+} /* zupmtr_ */
diff --git a/contrib/libs/libf2c/Notice b/contrib/libs/libf2c/Notice
new file mode 100644
index 0000000000..261b719bc5
--- /dev/null
+++ b/contrib/libs/libf2c/Notice
@@ -0,0 +1,23 @@
+/****************************************************************
+Copyright 1990 - 1997 by AT&T, Lucent Technologies and Bellcore.
+
+Permission to use, copy, modify, and distribute this software
+and its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appear in all
+copies and that both that the copyright notice and this
+permission notice and warranty disclaimer appear in supporting
+documentation, and that the names of AT&T, Bell Laboratories,
+Lucent or Bellcore or any of their entities not be used in
+advertising or publicity pertaining to distribution of the
+software without specific, written prior permission.
+
+AT&T, Lucent and Bellcore disclaim all warranties with regard to
+this software, including all implied warranties of
+merchantability and fitness. In no event shall AT&T, Lucent or
+Bellcore be liable for any special, indirect or consequential
+damages or any damages whatsoever resulting from loss of use,
+data or profits, whether in an action of contract, negligence or
+other tortious action, arising out of or in connection with the
+use or performance of this software.
+****************************************************************/
+
diff --git a/contrib/libs/libf2c/README b/contrib/libs/libf2c/README
new file mode 100644
index 0000000000..940a354e35
--- /dev/null
+++ b/contrib/libs/libf2c/README
@@ -0,0 +1,374 @@
+As shipped, "makefile" is a copy of "makefile.u", a Unix makefile.
+Variants for other systems have names of the form makefile.* and
+have initial comments saying how to invoke them. You may wish to
+copy one of the other makefile.* files to makefile.
+
+If you use a C++ compiler, first say
+
+ make hadd
+
+to create a suitable f2c.h from f2c.h0 and f2ch.add. Otherwise,
+
+ make f2c.h
+
+will just copy f2c.h0 to f2c.h .
+
+If your compiler does not recognize ANSI C headers,
+compile with KR_headers defined: either add -DKR_headers
+to the definition of CFLAGS in the makefile, or insert
+
+#define KR_headers
+
+at the top of f2c.h .
+
+If your system lacks onexit() and you are not using an ANSI C
+compiler, then you should compile main.c with NO_ONEXIT defined.
+See the comments about onexit in makefile.u.
+
+If your system has a double drem() function such that drem(a,b)
+is the IEEE remainder function (with double a, b), then you may
+wish to compile r_mod.c and d_mod.c with IEEE_drem defined.
+
+To check for transmission errors, issue the command
+ make check
+or
+ make -f makefile.u check
+
+This assumes you have the xsum program whose source, xsum.c,
+is distributed as part of "all from f2c/src", and that it
+is installed somewhere in your search path. If you do not
+have xsum, you can obtain xsum.c by sending the following E-mail
+message to netlib@netlib.bell-labs.com
+ send xsum.c from f2c/src
+
+For convenience, the f2c.h0 in this directory is a copy of netlib's
+"f2c.h from f2c". It is best to install f2c.h in a standard place,
+so "include f2c.h" will work in any directory without further ado.
+Beware that the makefiles do not cause recompilation when f2c.h is
+changed.
+
+On machines, such as those using a DEC Alpha processor, on which
+sizeof(short) == 2, sizeof(int) == sizeof(float) == 4, and
+sizeof(long) == sizeof(double) == 8, it suffices to modify f2c.h by
+removing the first occurrence of "long " on each line containing
+"long ". On Unix systems, you can do this by issuing the commands
+ mv f2c.h f2c.h0
+ sed 's/long int /int /' f2c.h0 >f2c.h
+On such machines, one can enable INTEGER*8 by uncommenting the typedefs
+of longint and ulongint in f2c.h and adjusting them, so they read
+ typedef long longint;
+ typedef unsigned long ulongint;
+and by compiling libf2c with -DAllow_TYQUAD, as discussed below.
+
+
+Most of the routines in libf2c are support routines for Fortran
+intrinsic functions or for operations that f2c chooses not
+to do "in line". There are a few exceptions, summarized below --
+functions and subroutines that appear to your program as ordinary
+external Fortran routines.
+
+If you use the REAL valued functions listed below (ERF, ERFC,
+DTIME, and ETIME) with "f2c -R", then you need to compile the
+corresponding source files with -DREAL=float. To do this, it is
+perhaps simplest to add "-DREAL=float" to CFLAGS in the makefile.
+
+1. CALL ABORT prints a message and causes a core dump.
+
+2. ERF(r) and DERF(d) and the REAL and DOUBLE PRECISION
+ error functions (with x REAL and d DOUBLE PRECISION);
+ DERF must be declared DOUBLE PRECISION in your program.
+ Both ERF and DERF assume your C library provides the
+ underlying erf() function (which not all systems do).
+
+3. ERFC(r) and DERFC(d) are the complementary error functions:
+ ERFC(r) = 1 - ERF(r) and DERFC(d) = 1.d0 - DERFC(d)
+ (except that their results may be more accurate than
+ explicitly evaluating the above formulae would give).
+ Again, ERFC and r are REAL, and DERFC and d are DOUBLE
+ PRECISION (and must be declared as such in your program),
+ and ERFC and DERFC rely on your system's erfc().
+
+4. CALL GETARG(n,s), where n is an INTEGER and s is a CHARACTER
+ variable, sets s to the n-th command-line argument (or to
+ all blanks if there are fewer than n command-line arguments);
+ CALL GETARG(0,s) sets s to the name of the program (on systems
+ that support this feature). See IARGC below.
+
+5. CALL GETENV(name, value), where name and value are of type
+ CHARACTER, sets value to the environment value, $name, of
+ name (or to blanks if $name has not been set).
+
+6. NARGS = IARGC() sets NARGS to the number of command-line
+ arguments (an INTEGER value).
+
+7. CALL SIGNAL(n,func), where n is an INTEGER and func is an
+ EXTERNAL procedure, arranges for func to be invoked when n
+ occurs (on systems where this makes sense).
+
+If your compiler complains about the signal calls in main.c, s_paus.c,
+and signal_.c, you may need to adjust signal1.h suitably. See the
+comments in signal1.h.
+
+8. ETIME(ARR) and DTIME(ARR) are REAL functions that return
+ execution times. ARR is declared REAL ARR(2). The elapsed
+ user and system CPU times are stored in ARR(1) and ARR(2),
+ respectively. ETIME returns the total elapsed CPU time,
+ i.e., ARR(1) + ARR(2). DTIME returns total elapsed CPU
+ time since the previous call on DTIME.
+
+9. CALL SYSTEM(cmd), where cmd is of type CHARACTER, passes
+ cmd to the system's command processor (on systems where
+ this can be done).
+
+10. CALL FLUSH flushes all buffers.
+
+11. FTELL(i) is an INTEGER function that returns the current
+ offset of Fortran unit i (or -1 if unit i is not open).
+
+12. CALL FSEEK(i, offset, whence, *errlab) attemps to move
+ Fortran unit i to the specified offset: absolute offset
+ if whence = 0; relative to the current offset if whence = 1;
+ relative to the end of the file if whence = 2. It branches
+ to label errlab if unit i is not open or if the call
+ otherwise fails.
+
+The routines whose objects are makefile.u's $(I77) are for I/O.
+The following comments apply to them.
+
+If your system lacks /usr/include/local.h ,
+then you should create an appropriate local.h in
+this directory. An appropriate local.h may simply
+be empty, or it may #define VAX or #define CRAY
+(or whatever else you must do to make fp.h work right).
+Alternatively, edit fp.h to suite your machine.
+
+If your system lacks /usr/include/fcntl.h , then you
+should simply create an empty fcntl.h in this directory.
+If your compiler then complains about creat and open not
+having a prototype, compile with OPEN_DECL defined.
+On many systems, open and creat are declared in fcntl.h .
+
+If your system's sprintf does not work the way ANSI C
+specifies -- specifically, if it does not return the
+number of characters transmitted -- then insert the line
+
+#define USE_STRLEN
+
+at the end of fmt.h . This is necessary with
+at least some versions of Sun software.
+In particular, if you get a warning about an improper
+pointer/integer combination in compiling wref.c, then
+you need to compile with -DUSE_STRLEN .
+
+If your system's fopen does not like the ANSI binary
+reading and writing modes "rb" and "wb", then you should
+compile open.c with NON_ANSI_RW_MODES #defined.
+
+If you get error messages about references to cf->_ptr
+and cf->_base when compiling wrtfmt.c and wsfe.c or to
+stderr->_flag when compiling err.c, then insert the line
+
+#define NON_UNIX_STDIO
+
+at the beginning of fio.h, and recompile everything (or
+at least those modules that contain NON_UNIX_STDIO).
+
+Unformatted sequential records consist of a length of record
+contents, the record contents themselves, and the length of
+record contents again (for backspace). Prior to 17 Oct. 1991,
+the length was of type int; now it is of type long, but you
+can change it back to int by inserting
+
+#define UIOLEN_int
+
+at the beginning of fio.h. This affects only sue.c and uio.c .
+
+If you have a really ancient K&R C compiler that does not understand
+void, add -Dvoid=int to the definition of CFLAGS in the makefile.
+
+On VAX, Cray, or Research Tenth-Edition Unix systems, you may
+need to add -DVAX, -DCRAY, or -DV10 (respectively) to CFLAGS
+to make fp.h work correctly. Alternatively, you may need to
+edit fp.h to suit your machine.
+
+If your compiler complains about the signal calls in main.c, s_paus.c,
+and signal_.c, you may need to adjust signal1.h suitably. See the
+comments in signal1.h.
+
+You may need to supply the following non-ANSI routines:
+
+ fstat(int fileds, struct stat *buf) is similar
+to stat(char *name, struct stat *buf), except that
+the first argument, fileds, is the file descriptor
+returned by open rather than the name of the file.
+fstat is used in the system-dependent routine
+canseek (in the libf2c source file err.c), which
+is supposed to return 1 if it's possible to issue
+seeks on the file in question, 0 if it's not; you may
+need to suitably modify err.c . On non-UNIX systems,
+you can avoid references to fstat and stat by compiling
+with NON_UNIX_STDIO defined; in that case, you may need
+to supply access(char *Name,0), which is supposed to
+return 0 if file Name exists, nonzero otherwise.
+
+ char * mktemp(char *buf) is supposed to replace the
+6 trailing X's in buf with a unique number and then
+return buf. The idea is to get a unique name for
+a temporary file.
+
+On non-UNIX systems, you may need to change a few other,
+e.g.: the form of name computed by mktemp() in endfile.c and
+open.c; the use of the open(), close(), and creat() system
+calls in endfile.c, err.c, open.c; and the modes in calls on
+fopen() and fdopen() (and perhaps the use of fdopen() itself
+-- it's supposed to return a FILE* corresponding to a given
+an integer file descriptor) in err.c and open.c (component ufmt
+of struct unit is 1 for formatted I/O -- text mode on some systems
+-- and 0 for unformatted I/O -- binary mode on some systems).
+Compiling with -DNON_UNIX_STDIO omits all references to creat()
+and almost all references to open() and close(), the exception
+being in the function f__isdev() (in open.c).
+
+If you wish to use translated Fortran that has funny notions
+of record length for direct unformatted I/O (i.e., that assumes
+RECL= values in OPEN statements are not bytes but rather counts
+of some other units -- e.g., 4-character words for VMS), then you
+should insert an appropriate #define for url_Adjust at the
+beginning of open.c . For VMS Fortran, for example,
+#define url_Adjust(x) x *= 4
+would suffice.
+
+By default, Fortran I/O units 5, 6, and 0 are pre-connected to
+stdin, stdout, and stderr, respectively. You can change this
+behavior by changing f_init() in err.c to suit your needs.
+Note that f2c assumes READ(*... means READ(5... and WRITE(*...
+means WRITE(6... . Moreover, an OPEN(n,... statement that does
+not specify a file name (and does not specify STATUS='SCRATCH')
+assumes FILE='fort.n' . You can change this by editing open.c
+and endfile.c suitably.
+
+Unless you adjust the "#define MXUNIT" line in fio.h, Fortran units
+0, 1, ..., 99 are available, i.e., the highest allowed unit number
+is MXUNIT - 1.
+
+Lines protected from compilation by #ifdef Allow_TYQUAD
+are for a possible extension to 64-bit integers in which
+integer = int = 32 bits and longint = long = 64 bits.
+
+The makefile does not attempt to compile pow_qq.c, qbitbits.c,
+and qbitshft.c, which are meant for use with INTEGER*8. To use
+INTEGER*8, you must modify f2c.h to declare longint and ulongint
+appropriately; then add $(QINT) to the end of the makefile's
+dependency list for libf2c.a (if makefile is a copy of makefile.u;
+for the PC makefiles, add pow_qq.obj qbitbits.obj qbitshft.obj
+to the library's dependency list and adjust libf2c.lbc or libf2c.sy
+accordingly). Also add -DAllow_TYQUAD to the makefile's CFLAGS
+assignment. To make longint and ulongint available, it may suffice
+to add -DINTEGER_STAR_8 to the CFLAGS assignment.
+
+Following Fortran 90, s_cat.c and s_copy.c allow the target of a
+(character string) assignment to be appear on its right-hand, at
+the cost of some extra overhead for all run-time concatenations.
+If you prefer the extra efficiency that comes with the Fortran 77
+requirement that the left-hand side of a character assignment not
+be involved in the right-hand side, compile s_cat.c and s_copy.c
+with -DNO_OVERWRITE .
+
+Extensions (Feb. 1993) to NAMELIST processing:
+ 1. Reading a ? instead of &name (the start of a namelist) causes
+the namelist being sought to be written to stdout (unit 6);
+to omit this feature, compile rsne.c with -DNo_Namelist_Questions.
+ 2. Reading the wrong namelist name now leads to an error message
+and an attempt to skip input until the right namelist name is found;
+to omit this feature, compile rsne.c with -DNo_Bad_Namelist_Skip.
+ 3. Namelist writes now insert newlines before each variable; to omit
+this feature, compile xwsne.c with -DNo_Extra_Namelist_Newlines.
+ 4. (Sept. 1995) When looking for the &name that starts namelist
+input, lines whose first non-blank character is something other
+than &, $, or ? are treated as comment lines and ignored, unless
+rsne.c is compiled with -DNo_Namelist_Comments.
+
+Nonstandard extension (Feb. 1993) to open: for sequential files,
+ACCESS='APPEND' (or access='anything else starting with "A" or "a"')
+causes the file to be positioned at end-of-file, so a write will
+append to the file.
+
+Some buggy Fortran programs use unformatted direct I/O to write
+an incomplete record and later read more from that record than
+they have written. For records other than the last, the unwritten
+portion of the record reads as binary zeros. The last record is
+a special case: attempting to read more from it than was written
+gives end-of-file -- which may help one find a bug. Some other
+Fortran I/O libraries treat the last record no differently than
+others and thus give no help in finding the bug of reading more
+than was written. If you wish to have this behavior, compile
+uio.c with -DPad_UDread .
+
+If you want to be able to catch write failures (e.g., due to a
+disk being full) with an ERR= specifier, compile dfe.c, due.c,
+sfe.c, sue.c, and wsle.c with -DALWAYS_FLUSH. This will lead to
+slower execution and more I/O, but should make ERR= work as
+expected, provided fflush returns an error return when its
+physical write fails.
+
+Carriage controls are meant to be interpreted by the UNIX col
+program (or a similar program). Sometimes it's convenient to use
+only ' ' as the carriage control character (normal single spacing).
+If you compile lwrite.c and wsfe.c with -DOMIT_BLANK_CC, formatted
+external output lines will have an initial ' ' quietly omitted,
+making use of the col program unnecessary with output that only
+has ' ' for carriage control.
+
+The Fortran 77 Standard leaves it up to the implementation whether
+formatted writes of floating-point numbers of absolute value < 1 have
+a zero before the decimal point. By default, libI77 omits such
+superfluous zeros, but you can cause them to appear by compiling
+lwrite.c, wref.c, and wrtfmt.c with -DWANT_LEAD_0 .
+
+If your (Unix) system lacks a ranlib command, you don't need it.
+Either comment out the makefile's ranlib invocation, or install
+a harmless "ranlib" command somewhere in your PATH, such as the
+one-line shell script
+
+ exit 0
+
+or (on some systems)
+
+ exec /usr/bin/ar lts $1 >/dev/null
+
+By default, the routines that implement complex and double complex
+division, c_div.c and z_div.c, call sig_die to print an error message
+and exit if they see a divisor of 0, as this is sometimes helpful for
+debugging. On systems with IEEE arithmetic, compiling c_div.c and
+z_div.c with -DIEEE_COMPLEX_DIVIDE causes them instead to set both
+the real and imaginary parts of the result to +INFINITY if the
+numerator is nonzero, or to NaN if it vanishes.
+
+Nowadays most Unix and Linux systems have function
+ int ftruncate(int fildes, off_t len);
+defined in system header file unistd.h that adjusts the length of file
+descriptor fildes to length len. Unless endfile.c is compiled with
+-DNO_TRUNCATE, endfile.c #includes "unistd.h" and calls ftruncate() if
+necessary to shorten files. If your system lacks ftruncate(), compile
+endfile.c with -DNO_TRUNCATE to make endfile.c use the older and more
+portable scheme of shortening a file by copying to a temporary file
+and back again.
+
+The initializations for "f2c -trapuv" are done by _uninit_f2c(),
+whose source is uninit.c, introduced June 2001. On IEEE-arithmetic
+systems, _uninit_f2c should initialize floating-point variables to
+signaling NaNs and, at its first invocation, should enable the
+invalid operation exception. Alas, the rules for distinguishing
+signaling from quiet NaNs were not specified in the IEEE P754 standard,
+nor were the precise means of enabling and disabling IEEE-arithmetic
+exceptions, and these details are thus system dependent. There are
+#ifdef's in uninit.c that specify them for some popular systems. If
+yours is not one of these systems, it may take some detective work to
+discover the appropriate details for your system. Sometimes it helps
+to look in the standard include directories for header files with
+relevant-sounding names, such as ieeefp.h, nan.h, or trap.h, and
+it may be simplest to run experiments to see what distinguishes a
+signaling from a quiet NaN. (If x is initialized to a signaling
+NaN and the invalid operation exception is masked off, as it should
+be by default on IEEE-arithmetic systems, then computing, say,
+y = x + 1 will yield a quiet NaN.)
diff --git a/contrib/libs/libf2c/abort_.c b/contrib/libs/libf2c/abort_.c
new file mode 100644
index 0000000000..92c841a709
--- /dev/null
+++ b/contrib/libs/libf2c/abort_.c
@@ -0,0 +1,22 @@
+#include "stdio.h"
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+extern VOID sig_die();
+
+int abort_()
+#else
+extern void sig_die(const char*,int);
+
+int abort_(void)
+#endif
+{
+sig_die("Fortran abort routine called", 1);
+return 0; /* not reached */
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/arith.h b/contrib/libs/libf2c/arith.h
new file mode 100644
index 0000000000..356d34f5bc
--- /dev/null
+++ b/contrib/libs/libf2c/arith.h
@@ -0,0 +1,8 @@
+#define IEEE_8087
+#define Arith_Kind_ASL 1
+#define Long int
+#define Intcast (int)(long)
+#define Double_Align
+#define X64_bit_pointers
+#define QNaN0 0x0
+#define QNaN1 0xfff80000
diff --git a/contrib/libs/libf2c/backspac.c b/contrib/libs/libf2c/backspac.c
new file mode 100644
index 0000000000..908a61897a
--- /dev/null
+++ b/contrib/libs/libf2c/backspac.c
@@ -0,0 +1,76 @@
+#include "f2c.h"
+#include "fio.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+#ifdef KR_headers
+integer f_back(a) alist *a;
+#else
+integer f_back(alist *a)
+#endif
+{ unit *b;
+ OFF_T v, w, x, y, z;
+ uiolen n;
+ FILE *f;
+
+ f__curunit = b = &f__units[a->aunit]; /* curunit for error messages */
+ if(a->aunit >= MXUNIT || a->aunit < 0)
+ err(a->aerr,101,"backspace")
+ if(b->useek==0) err(a->aerr,106,"backspace")
+ if(b->ufd == NULL) {
+ fk_open(1, 1, a->aunit);
+ return(0);
+ }
+ if(b->uend==1)
+ { b->uend=0;
+ return(0);
+ }
+ if(b->uwrt) {
+ t_runc(a);
+ if (f__nowreading(b))
+ err(a->aerr,errno,"backspace")
+ }
+ f = b->ufd; /* may have changed in t_runc() */
+ if(b->url>0)
+ {
+ x=FTELL(f);
+ y = x % b->url;
+ if(y == 0) x--;
+ x /= b->url;
+ x *= b->url;
+ (void) FSEEK(f,x,SEEK_SET);
+ return(0);
+ }
+
+ if(b->ufmt==0)
+ { FSEEK(f,-(OFF_T)sizeof(uiolen),SEEK_CUR);
+ fread((char *)&n,sizeof(uiolen),1,f);
+ FSEEK(f,-(OFF_T)n-2*sizeof(uiolen),SEEK_CUR);
+ return(0);
+ }
+ w = x = FTELL(f);
+ z = 0;
+ loop:
+ while(x) {
+ x -= x < 64 ? x : 64;
+ FSEEK(f,x,SEEK_SET);
+ for(y = x; y < w; y++) {
+ if (getc(f) != '\n')
+ continue;
+ v = FTELL(f);
+ if (v == w) {
+ if (z)
+ goto break2;
+ goto loop;
+ }
+ z = v;
+ }
+ err(a->aerr,(EOF),"backspace")
+ }
+ break2:
+ FSEEK(f, z, SEEK_SET);
+ return 0;
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/c_abs.c b/contrib/libs/libf2c/c_abs.c
new file mode 100644
index 0000000000..858f2c8b4b
--- /dev/null
+++ b/contrib/libs/libf2c/c_abs.c
@@ -0,0 +1,20 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+extern double f__cabs();
+
+double c_abs(z) complex *z;
+#else
+extern double f__cabs(double, double);
+
+double c_abs(complex *z)
+#endif
+{
+return( f__cabs( z->r, z->i ) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/c_cos.c b/contrib/libs/libf2c/c_cos.c
new file mode 100644
index 0000000000..29fe49e3cd
--- /dev/null
+++ b/contrib/libs/libf2c/c_cos.c
@@ -0,0 +1,23 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+extern double sin(), cos(), sinh(), cosh();
+
+VOID c_cos(r, z) complex *r, *z;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void c_cos(complex *r, complex *z)
+#endif
+{
+ double zi = z->i, zr = z->r;
+ r->r = cos(zr) * cosh(zi);
+ r->i = - sin(zr) * sinh(zi);
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/c_div.c b/contrib/libs/libf2c/c_div.c
new file mode 100644
index 0000000000..9463a43d91
--- /dev/null
+++ b/contrib/libs/libf2c/c_div.c
@@ -0,0 +1,53 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+extern VOID sig_die();
+VOID c_div(c, a, b)
+complex *a, *b, *c;
+#else
+extern void sig_die(const char*,int);
+void c_div(complex *c, complex *a, complex *b)
+#endif
+{
+ double ratio, den;
+ double abr, abi, cr;
+
+ if( (abr = b->r) < 0.)
+ abr = - abr;
+ if( (abi = b->i) < 0.)
+ abi = - abi;
+ if( abr <= abi )
+ {
+ if(abi == 0) {
+#ifdef IEEE_COMPLEX_DIVIDE
+ float af, bf;
+ af = bf = abr;
+ if (a->i != 0 || a->r != 0)
+ af = 1.;
+ c->i = c->r = af / bf;
+ return;
+#else
+ sig_die("complex division by zero", 1);
+#endif
+ }
+ ratio = (double)b->r / b->i ;
+ den = b->i * (1 + ratio*ratio);
+ cr = (a->r*ratio + a->i) / den;
+ c->i = (a->i*ratio - a->r) / den;
+ }
+
+ else
+ {
+ ratio = (double)b->i / b->r ;
+ den = b->r * (1 + ratio*ratio);
+ cr = (a->r + a->i*ratio) / den;
+ c->i = (a->i - a->r*ratio) / den;
+ }
+ c->r = cr;
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/c_exp.c b/contrib/libs/libf2c/c_exp.c
new file mode 100644
index 0000000000..f46508d35b
--- /dev/null
+++ b/contrib/libs/libf2c/c_exp.c
@@ -0,0 +1,25 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+extern double exp(), cos(), sin();
+
+ VOID c_exp(r, z) complex *r, *z;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void c_exp(complex *r, complex *z)
+#endif
+{
+ double expx, zi = z->i;
+
+ expx = exp(z->r);
+ r->r = expx * cos(zi);
+ r->i = expx * sin(zi);
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/c_log.c b/contrib/libs/libf2c/c_log.c
new file mode 100644
index 0000000000..a0ba3f0d8d
--- /dev/null
+++ b/contrib/libs/libf2c/c_log.c
@@ -0,0 +1,23 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+extern double log(), f__cabs(), atan2();
+VOID c_log(r, z) complex *r, *z;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern double f__cabs(double, double);
+
+void c_log(complex *r, complex *z)
+#endif
+{
+ double zi, zr;
+ r->i = atan2(zi = z->i, zr = z->r);
+ r->r = log( f__cabs(zr, zi) );
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/c_sin.c b/contrib/libs/libf2c/c_sin.c
new file mode 100644
index 0000000000..c8bc30f2d6
--- /dev/null
+++ b/contrib/libs/libf2c/c_sin.c
@@ -0,0 +1,23 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+extern double sin(), cos(), sinh(), cosh();
+
+VOID c_sin(r, z) complex *r, *z;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void c_sin(complex *r, complex *z)
+#endif
+{
+ double zi = z->i, zr = z->r;
+ r->r = sin(zr) * cosh(zi);
+ r->i = cos(zr) * sinh(zi);
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/c_sqrt.c b/contrib/libs/libf2c/c_sqrt.c
new file mode 100644
index 0000000000..1678c534d6
--- /dev/null
+++ b/contrib/libs/libf2c/c_sqrt.c
@@ -0,0 +1,41 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+extern double sqrt(), f__cabs();
+
+VOID c_sqrt(r, z) complex *r, *z;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern double f__cabs(double, double);
+
+void c_sqrt(complex *r, complex *z)
+#endif
+{
+ double mag, t;
+ double zi = z->i, zr = z->r;
+
+ if( (mag = f__cabs(zr, zi)) == 0.)
+ r->r = r->i = 0.;
+ else if(zr > 0)
+ {
+ r->r = t = sqrt(0.5 * (mag + zr) );
+ t = zi / t;
+ r->i = 0.5 * t;
+ }
+ else
+ {
+ t = sqrt(0.5 * (mag - zr) );
+ if(zi < 0)
+ t = -t;
+ r->i = t;
+ t = zi / t;
+ r->r = 0.5 * t;
+ }
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/cabs.c b/contrib/libs/libf2c/cabs.c
new file mode 100644
index 0000000000..84750d505c
--- /dev/null
+++ b/contrib/libs/libf2c/cabs.c
@@ -0,0 +1,33 @@
+#ifdef KR_headers
+extern double sqrt();
+double f__cabs(real, imag) double real, imag;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double f__cabs(double real, double imag)
+#endif
+{
+double temp;
+
+if(real < 0)
+ real = -real;
+if(imag < 0)
+ imag = -imag;
+if(imag > real){
+ temp = real;
+ real = imag;
+ imag = temp;
+}
+if((real+imag) == real)
+ return(real);
+
+temp = imag/real;
+temp = real*sqrt(1.0 + temp*temp); /*overflow!!*/
+return(temp);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/close.c b/contrib/libs/libf2c/close.c
new file mode 100644
index 0000000000..e958c7172a
--- /dev/null
+++ b/contrib/libs/libf2c/close.c
@@ -0,0 +1,101 @@
+#include "f2c.h"
+#include "fio.h"
+#ifdef KR_headers
+integer f_clos(a) cllist *a;
+#else
+#undef abs
+#undef min
+#undef max
+#include "stdlib.h"
+#ifdef NON_UNIX_STDIO
+#ifndef unlink
+#define unlink remove
+#endif
+#else
+#ifdef MSDOS
+#include "io.h"
+#else
+#ifdef __cplusplus
+extern "C" int unlink(const char*);
+#else
+extern int unlink(const char*);
+#endif
+#endif
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+integer f_clos(cllist *a)
+#endif
+{ unit *b;
+
+ if(a->cunit >= MXUNIT) return(0);
+ b= &f__units[a->cunit];
+ if(b->ufd==NULL)
+ goto done;
+ if (b->uscrtch == 1)
+ goto Delete;
+ if (!a->csta)
+ goto Keep;
+ switch(*a->csta) {
+ default:
+ Keep:
+ case 'k':
+ case 'K':
+ if(b->uwrt == 1)
+ t_runc((alist *)a);
+ if(b->ufnm) {
+ fclose(b->ufd);
+ free(b->ufnm);
+ }
+ break;
+ case 'd':
+ case 'D':
+ Delete:
+ fclose(b->ufd);
+ if(b->ufnm) {
+ unlink(b->ufnm); /*SYSDEP*/
+ free(b->ufnm);
+ }
+ }
+ b->ufd=NULL;
+ done:
+ b->uend=0;
+ b->ufnm=NULL;
+ return(0);
+ }
+ void
+#ifdef KR_headers
+f_exit()
+#else
+f_exit(void)
+#endif
+{ int i;
+ static cllist xx;
+ if (!xx.cerr) {
+ xx.cerr=1;
+ xx.csta=NULL;
+ for(i=0;i<MXUNIT;i++)
+ {
+ xx.cunit=i;
+ (void) f_clos(&xx);
+ }
+ }
+}
+ int
+#ifdef KR_headers
+flush_()
+#else
+flush_(void)
+#endif
+{ int i;
+ for(i=0;i<MXUNIT;i++)
+ if(f__units[i].ufd != NULL && f__units[i].uwrt)
+ fflush(f__units[i].ufd);
+return 0;
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/ctype.c b/contrib/libs/libf2c/ctype.c
new file mode 100644
index 0000000000..b5a143b638
--- /dev/null
+++ b/contrib/libs/libf2c/ctype.c
@@ -0,0 +1,2 @@
+#define My_ctype_DEF
+#include "ctype_.h"
diff --git a/contrib/libs/libf2c/ctype_.h b/contrib/libs/libf2c/ctype_.h
new file mode 100644
index 0000000000..2915615058
--- /dev/null
+++ b/contrib/libs/libf2c/ctype_.h
@@ -0,0 +1,47 @@
+/* Custom ctype.h to overcome trouble with recent versions of Linux libc.a */
+
+#ifdef NO_My_ctype
+#include <ctype.h>
+#else /*{*/
+#ifndef My_ctype_DEF
+extern char My_ctype[];
+#else /*{*/
+char My_ctype[264] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 2, 2, 2, 2, 2, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 2, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+#endif /*}*/
+
+#define isdigit(x) (My_ctype[(x)+8] & 1)
+#define isspace(x) (My_ctype[(x)+8] & 2)
+#endif
diff --git a/contrib/libs/libf2c/d_abs.c b/contrib/libs/libf2c/d_abs.c
new file mode 100644
index 0000000000..2f7a153c2e
--- /dev/null
+++ b/contrib/libs/libf2c/d_abs.c
@@ -0,0 +1,18 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+double d_abs(x) doublereal *x;
+#else
+double d_abs(doublereal *x)
+#endif
+{
+if(*x >= 0)
+ return(*x);
+return(- *x);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/d_acos.c b/contrib/libs/libf2c/d_acos.c
new file mode 100644
index 0000000000..69005b56d7
--- /dev/null
+++ b/contrib/libs/libf2c/d_acos.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double acos();
+double d_acos(x) doublereal *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double d_acos(doublereal *x)
+#endif
+{
+return( acos(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/d_asin.c b/contrib/libs/libf2c/d_asin.c
new file mode 100644
index 0000000000..d5196ab101
--- /dev/null
+++ b/contrib/libs/libf2c/d_asin.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double asin();
+double d_asin(x) doublereal *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double d_asin(doublereal *x)
+#endif
+{
+return( asin(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/d_atan.c b/contrib/libs/libf2c/d_atan.c
new file mode 100644
index 0000000000..d8856f8d69
--- /dev/null
+++ b/contrib/libs/libf2c/d_atan.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double atan();
+double d_atan(x) doublereal *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double d_atan(doublereal *x)
+#endif
+{
+return( atan(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/d_atn2.c b/contrib/libs/libf2c/d_atn2.c
new file mode 100644
index 0000000000..56113850ab
--- /dev/null
+++ b/contrib/libs/libf2c/d_atn2.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double atan2();
+double d_atn2(x,y) doublereal *x, *y;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double d_atn2(doublereal *x, doublereal *y)
+#endif
+{
+return( atan2(*x,*y) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/d_cnjg.c b/contrib/libs/libf2c/d_cnjg.c
new file mode 100644
index 0000000000..38471d9bc0
--- /dev/null
+++ b/contrib/libs/libf2c/d_cnjg.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ VOID
+#ifdef KR_headers
+d_cnjg(r, z) doublecomplex *r, *z;
+#else
+d_cnjg(doublecomplex *r, doublecomplex *z)
+#endif
+{
+ doublereal zi = z->i;
+ r->r = z->r;
+ r->i = -zi;
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/d_cos.c b/contrib/libs/libf2c/d_cos.c
new file mode 100644
index 0000000000..12def9ad0f
--- /dev/null
+++ b/contrib/libs/libf2c/d_cos.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double cos();
+double d_cos(x) doublereal *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double d_cos(doublereal *x)
+#endif
+{
+return( cos(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/d_cosh.c b/contrib/libs/libf2c/d_cosh.c
new file mode 100644
index 0000000000..9214c7a0d8
--- /dev/null
+++ b/contrib/libs/libf2c/d_cosh.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double cosh();
+double d_cosh(x) doublereal *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double d_cosh(doublereal *x)
+#endif
+{
+return( cosh(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/d_dim.c b/contrib/libs/libf2c/d_dim.c
new file mode 100644
index 0000000000..627ddb690f
--- /dev/null
+++ b/contrib/libs/libf2c/d_dim.c
@@ -0,0 +1,16 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+double d_dim(a,b) doublereal *a, *b;
+#else
+double d_dim(doublereal *a, doublereal *b)
+#endif
+{
+return( *a > *b ? *a - *b : 0);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/d_exp.c b/contrib/libs/libf2c/d_exp.c
new file mode 100644
index 0000000000..e9ab5d4425
--- /dev/null
+++ b/contrib/libs/libf2c/d_exp.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double exp();
+double d_exp(x) doublereal *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double d_exp(doublereal *x)
+#endif
+{
+return( exp(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/d_imag.c b/contrib/libs/libf2c/d_imag.c
new file mode 100644
index 0000000000..d17b9dd591
--- /dev/null
+++ b/contrib/libs/libf2c/d_imag.c
@@ -0,0 +1,16 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+double d_imag(z) doublecomplex *z;
+#else
+double d_imag(doublecomplex *z)
+#endif
+{
+return(z->i);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/d_int.c b/contrib/libs/libf2c/d_int.c
new file mode 100644
index 0000000000..6da4ce35e9
--- /dev/null
+++ b/contrib/libs/libf2c/d_int.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double floor();
+double d_int(x) doublereal *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double d_int(doublereal *x)
+#endif
+{
+return( (*x>0) ? floor(*x) : -floor(- *x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/d_lg10.c b/contrib/libs/libf2c/d_lg10.c
new file mode 100644
index 0000000000..664c19d9e9
--- /dev/null
+++ b/contrib/libs/libf2c/d_lg10.c
@@ -0,0 +1,21 @@
+#include "f2c.h"
+
+#define log10e 0.43429448190325182765
+
+#ifdef KR_headers
+double log();
+double d_lg10(x) doublereal *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double d_lg10(doublereal *x)
+#endif
+{
+return( log10e * log(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/d_log.c b/contrib/libs/libf2c/d_log.c
new file mode 100644
index 0000000000..e74be02c54
--- /dev/null
+++ b/contrib/libs/libf2c/d_log.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double log();
+double d_log(x) doublereal *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double d_log(doublereal *x)
+#endif
+{
+return( log(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/d_mod.c b/contrib/libs/libf2c/d_mod.c
new file mode 100644
index 0000000000..3766d9fa82
--- /dev/null
+++ b/contrib/libs/libf2c/d_mod.c
@@ -0,0 +1,46 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+#ifdef IEEE_drem
+double drem();
+#else
+double floor();
+#endif
+double d_mod(x,y) doublereal *x, *y;
+#else
+#ifdef IEEE_drem
+double drem(double, double);
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+#endif
+double d_mod(doublereal *x, doublereal *y)
+#endif
+{
+#ifdef IEEE_drem
+ double xa, ya, z;
+ if ((ya = *y) < 0.)
+ ya = -ya;
+ z = drem(xa = *x, ya);
+ if (xa > 0) {
+ if (z < 0)
+ z += ya;
+ }
+ else if (z > 0)
+ z -= ya;
+ return z;
+#else
+ double quotient;
+ if( (quotient = *x / *y) >= 0)
+ quotient = floor(quotient);
+ else
+ quotient = -floor(-quotient);
+ return(*x - (*y) * quotient );
+#endif
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/d_nint.c b/contrib/libs/libf2c/d_nint.c
new file mode 100644
index 0000000000..66f2dd0ee6
--- /dev/null
+++ b/contrib/libs/libf2c/d_nint.c
@@ -0,0 +1,20 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double floor();
+double d_nint(x) doublereal *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double d_nint(doublereal *x)
+#endif
+{
+return( (*x)>=0 ?
+ floor(*x + .5) : -floor(.5 - *x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/d_prod.c b/contrib/libs/libf2c/d_prod.c
new file mode 100644
index 0000000000..f9f348b03d
--- /dev/null
+++ b/contrib/libs/libf2c/d_prod.c
@@ -0,0 +1,16 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+double d_prod(x,y) real *x, *y;
+#else
+double d_prod(real *x, real *y)
+#endif
+{
+return( (*x) * (*y) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/d_sign.c b/contrib/libs/libf2c/d_sign.c
new file mode 100644
index 0000000000..d06e0d1923
--- /dev/null
+++ b/contrib/libs/libf2c/d_sign.c
@@ -0,0 +1,18 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+double d_sign(a,b) doublereal *a, *b;
+#else
+double d_sign(doublereal *a, doublereal *b)
+#endif
+{
+double x;
+x = (*a >= 0 ? *a : - *a);
+return( *b >= 0 ? x : -x);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/d_sin.c b/contrib/libs/libf2c/d_sin.c
new file mode 100644
index 0000000000..ebd4eec5ee
--- /dev/null
+++ b/contrib/libs/libf2c/d_sin.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double sin();
+double d_sin(x) doublereal *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double d_sin(doublereal *x)
+#endif
+{
+return( sin(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/d_sinh.c b/contrib/libs/libf2c/d_sinh.c
new file mode 100644
index 0000000000..2479a6fab2
--- /dev/null
+++ b/contrib/libs/libf2c/d_sinh.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double sinh();
+double d_sinh(x) doublereal *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double d_sinh(doublereal *x)
+#endif
+{
+return( sinh(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/d_sqrt.c b/contrib/libs/libf2c/d_sqrt.c
new file mode 100644
index 0000000000..a7fa66c00a
--- /dev/null
+++ b/contrib/libs/libf2c/d_sqrt.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double sqrt();
+double d_sqrt(x) doublereal *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double d_sqrt(doublereal *x)
+#endif
+{
+return( sqrt(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/d_tan.c b/contrib/libs/libf2c/d_tan.c
new file mode 100644
index 0000000000..7d252c4d5c
--- /dev/null
+++ b/contrib/libs/libf2c/d_tan.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double tan();
+double d_tan(x) doublereal *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double d_tan(doublereal *x)
+#endif
+{
+return( tan(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/d_tanh.c b/contrib/libs/libf2c/d_tanh.c
new file mode 100644
index 0000000000..415b58508b
--- /dev/null
+++ b/contrib/libs/libf2c/d_tanh.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double tanh();
+double d_tanh(x) doublereal *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double d_tanh(doublereal *x)
+#endif
+{
+return( tanh(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/derf_.c b/contrib/libs/libf2c/derf_.c
new file mode 100644
index 0000000000..d935d31521
--- /dev/null
+++ b/contrib/libs/libf2c/derf_.c
@@ -0,0 +1,18 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+double erf();
+double derf_(x) doublereal *x;
+#else
+extern double erf(double);
+double derf_(doublereal *x)
+#endif
+{
+return( erf(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/derfc_.c b/contrib/libs/libf2c/derfc_.c
new file mode 100644
index 0000000000..18f5c619b6
--- /dev/null
+++ b/contrib/libs/libf2c/derfc_.c
@@ -0,0 +1,20 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+extern double erfc();
+
+double derfc_(x) doublereal *x;
+#else
+extern double erfc(double);
+
+double derfc_(doublereal *x)
+#endif
+{
+return( erfc(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/dfe.c b/contrib/libs/libf2c/dfe.c
new file mode 100644
index 0000000000..c6b10d0e9c
--- /dev/null
+++ b/contrib/libs/libf2c/dfe.c
@@ -0,0 +1,151 @@
+#include "f2c.h"
+#include "fio.h"
+#include "fmt.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ int
+y_rsk(Void)
+{
+ if(f__curunit->uend || f__curunit->url <= f__recpos
+ || f__curunit->url == 1) return 0;
+ do {
+ getc(f__cf);
+ } while(++f__recpos < f__curunit->url);
+ return 0;
+}
+
+ int
+y_getc(Void)
+{
+ int ch;
+ if(f__curunit->uend) return(-1);
+ if((ch=getc(f__cf))!=EOF)
+ {
+ f__recpos++;
+ if(f__curunit->url>=f__recpos ||
+ f__curunit->url==1)
+ return(ch);
+ else return(' ');
+ }
+ if(feof(f__cf))
+ {
+ f__curunit->uend=1;
+ errno=0;
+ return(-1);
+ }
+ err(f__elist->cierr,errno,"readingd");
+}
+
+ static int
+y_rev(Void)
+{
+ if (f__recpos < f__hiwater)
+ f__recpos = f__hiwater;
+ if (f__curunit->url > 1)
+ while(f__recpos < f__curunit->url)
+ (*f__putn)(' ');
+ if (f__recpos)
+ f__putbuf(0);
+ f__recpos = 0;
+ return(0);
+}
+
+ static int
+y_err(Void)
+{
+ err(f__elist->cierr, 110, "dfe");
+}
+
+ static int
+y_newrec(Void)
+{
+ y_rev();
+ f__hiwater = f__cursor = 0;
+ return(1);
+}
+
+ int
+#ifdef KR_headers
+c_dfe(a) cilist *a;
+#else
+c_dfe(cilist *a)
+#endif
+{
+ f__sequential=0;
+ f__formatted=f__external=1;
+ f__elist=a;
+ f__cursor=f__scale=f__recpos=0;
+ f__curunit = &f__units[a->ciunit];
+ if(a->ciunit>MXUNIT || a->ciunit<0)
+ err(a->cierr,101,"startchk");
+ if(f__curunit->ufd==NULL && fk_open(DIR,FMT,a->ciunit))
+ err(a->cierr,104,"dfe");
+ f__cf=f__curunit->ufd;
+ if(!f__curunit->ufmt) err(a->cierr,102,"dfe")
+ if(!f__curunit->useek) err(a->cierr,104,"dfe")
+ f__fmtbuf=a->cifmt;
+ if(a->cirec <= 0)
+ err(a->cierr,130,"dfe")
+ FSEEK(f__cf,(OFF_T)f__curunit->url * (a->cirec-1),SEEK_SET);
+ f__curunit->uend = 0;
+ return(0);
+}
+#ifdef KR_headers
+integer s_rdfe(a) cilist *a;
+#else
+integer s_rdfe(cilist *a)
+#endif
+{
+ int n;
+ if(!f__init) f_init();
+ f__reading=1;
+ if(n=c_dfe(a))return(n);
+ if(f__curunit->uwrt && f__nowreading(f__curunit))
+ err(a->cierr,errno,"read start");
+ f__getn = y_getc;
+ f__doed = rd_ed;
+ f__doned = rd_ned;
+ f__dorevert = f__donewrec = y_err;
+ f__doend = y_rsk;
+ if(pars_f(f__fmtbuf)<0)
+ err(a->cierr,100,"read start");
+ fmt_bg();
+ return(0);
+}
+#ifdef KR_headers
+integer s_wdfe(a) cilist *a;
+#else
+integer s_wdfe(cilist *a)
+#endif
+{
+ int n;
+ if(!f__init) f_init();
+ f__reading=0;
+ if(n=c_dfe(a)) return(n);
+ if(f__curunit->uwrt != 1 && f__nowwriting(f__curunit))
+ err(a->cierr,errno,"startwrt");
+ f__putn = x_putc;
+ f__doed = w_ed;
+ f__doned= w_ned;
+ f__dorevert = y_err;
+ f__donewrec = y_newrec;
+ f__doend = y_rev;
+ if(pars_f(f__fmtbuf)<0)
+ err(a->cierr,100,"startwrt");
+ fmt_bg();
+ return(0);
+}
+integer e_rdfe(Void)
+{
+ en_fio();
+ return 0;
+}
+integer e_wdfe(Void)
+{
+ return en_fio();
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/dolio.c b/contrib/libs/libf2c/dolio.c
new file mode 100644
index 0000000000..4070d87901
--- /dev/null
+++ b/contrib/libs/libf2c/dolio.c
@@ -0,0 +1,26 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+#ifdef KR_headers
+extern int (*f__lioproc)();
+
+integer do_lio(type,number,ptr,len) ftnint *number,*type; char *ptr; ftnlen len;
+#else
+extern int (*f__lioproc)(ftnint*, char*, ftnlen, ftnint);
+
+integer do_lio(ftnint *type, ftnint *number, char *ptr, ftnlen len)
+#endif
+{
+ return((*f__lioproc)(number,ptr,len,*type));
+}
+#ifdef __cplusplus
+ }
+#endif
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/dtime_.c b/contrib/libs/libf2c/dtime_.c
new file mode 100644
index 0000000000..6a09b3e983
--- /dev/null
+++ b/contrib/libs/libf2c/dtime_.c
@@ -0,0 +1,63 @@
+#include "time.h"
+
+#ifdef MSDOS
+#undef USE_CLOCK
+#define USE_CLOCK
+#endif
+
+#ifndef REAL
+#define REAL double
+#endif
+
+#ifndef USE_CLOCK
+#define _INCLUDE_POSIX_SOURCE /* for HP-UX */
+#define _INCLUDE_XOPEN_SOURCE /* for HP-UX */
+#include "sys/types.h"
+#include "sys/times.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+#endif
+
+#undef Hz
+#ifdef CLK_TCK
+#define Hz CLK_TCK
+#else
+#ifdef HZ
+#define Hz HZ
+#else
+#define Hz 60
+#endif
+#endif
+
+ REAL
+#ifdef KR_headers
+dtime_(tarray) float *tarray;
+#else
+dtime_(float *tarray)
+#endif
+{
+#ifdef USE_CLOCK
+#ifndef CLOCKS_PER_SECOND
+#define CLOCKS_PER_SECOND Hz
+#endif
+ static double t0;
+ double t = clock();
+ tarray[1] = 0;
+ tarray[0] = (t - t0) / CLOCKS_PER_SECOND;
+ t0 = t;
+ return tarray[0];
+#else
+ struct tms t;
+ static struct tms t0;
+
+ times(&t);
+ tarray[0] = (double)(t.tms_utime - t0.tms_utime) / Hz;
+ tarray[1] = (double)(t.tms_stime - t0.tms_stime) / Hz;
+ t0 = t;
+ return tarray[0] + tarray[1];
+#endif
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/due.c b/contrib/libs/libf2c/due.c
new file mode 100644
index 0000000000..a7f4cec467
--- /dev/null
+++ b/contrib/libs/libf2c/due.c
@@ -0,0 +1,77 @@
+#include "f2c.h"
+#include "fio.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ int
+#ifdef KR_headers
+c_due(a) cilist *a;
+#else
+c_due(cilist *a)
+#endif
+{
+ if(!f__init) f_init();
+ f__sequential=f__formatted=f__recpos=0;
+ f__external=1;
+ f__curunit = &f__units[a->ciunit];
+ if(a->ciunit>=MXUNIT || a->ciunit<0)
+ err(a->cierr,101,"startio");
+ f__elist=a;
+ if(f__curunit->ufd==NULL && fk_open(DIR,UNF,a->ciunit) ) err(a->cierr,104,"due");
+ f__cf=f__curunit->ufd;
+ if(f__curunit->ufmt) err(a->cierr,102,"cdue")
+ if(!f__curunit->useek) err(a->cierr,104,"cdue")
+ if(f__curunit->ufd==NULL) err(a->cierr,114,"cdue")
+ if(a->cirec <= 0)
+ err(a->cierr,130,"due")
+ FSEEK(f__cf,(OFF_T)(a->cirec-1)*f__curunit->url,SEEK_SET);
+ f__curunit->uend = 0;
+ return(0);
+}
+#ifdef KR_headers
+integer s_rdue(a) cilist *a;
+#else
+integer s_rdue(cilist *a)
+#endif
+{
+ int n;
+ f__reading=1;
+ if(n=c_due(a)) return(n);
+ if(f__curunit->uwrt && f__nowreading(f__curunit))
+ err(a->cierr,errno,"read start");
+ return(0);
+}
+#ifdef KR_headers
+integer s_wdue(a) cilist *a;
+#else
+integer s_wdue(cilist *a)
+#endif
+{
+ int n;
+ f__reading=0;
+ if(n=c_due(a)) return(n);
+ if(f__curunit->uwrt != 1 && f__nowwriting(f__curunit))
+ err(a->cierr,errno,"write start");
+ return(0);
+}
+integer e_rdue(Void)
+{
+ if(f__curunit->url==1 || f__recpos==f__curunit->url)
+ return(0);
+ FSEEK(f__cf,(OFF_T)(f__curunit->url-f__recpos),SEEK_CUR);
+ if(FTELL(f__cf)%f__curunit->url)
+ err(f__elist->cierr,200,"syserr");
+ return(0);
+}
+integer e_wdue(Void)
+{
+#ifdef ALWAYS_FLUSH
+ if (fflush(f__cf))
+ err(f__elist->cierr,errno,"write end");
+#endif
+ return(e_rdue());
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/ef1asc_.c b/contrib/libs/libf2c/ef1asc_.c
new file mode 100644
index 0000000000..70be0bc2e0
--- /dev/null
+++ b/contrib/libs/libf2c/ef1asc_.c
@@ -0,0 +1,25 @@
+/* EFL support routine to copy string b to string a */
+
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#define M ( (long) (sizeof(long) - 1) )
+#define EVEN(x) ( ( (x)+ M) & (~M) )
+
+#ifdef KR_headers
+extern VOID s_copy();
+ef1asc_(a, la, b, lb) ftnint *a, *b; ftnlen *la, *lb;
+#else
+extern void s_copy(char*,char*,ftnlen,ftnlen);
+int ef1asc_(ftnint *a, ftnlen *la, ftnint *b, ftnlen *lb)
+#endif
+{
+s_copy( (char *)a, (char *)b, EVEN(*la), *lb );
+return 0; /* ignored return value */
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/ef1cmc_.c b/contrib/libs/libf2c/ef1cmc_.c
new file mode 100644
index 0000000000..4b420ae646
--- /dev/null
+++ b/contrib/libs/libf2c/ef1cmc_.c
@@ -0,0 +1,20 @@
+/* EFL support routine to compare two character strings */
+
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+extern integer s_cmp();
+integer ef1cmc_(a, la, b, lb) ftnint *a, *b; ftnlen *la, *lb;
+#else
+extern integer s_cmp(char*,char*,ftnlen,ftnlen);
+integer ef1cmc_(ftnint *a, ftnlen *la, ftnint *b, ftnlen *lb)
+#endif
+{
+return( s_cmp( (char *)a, (char *)b, *la, *lb) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/endfile.c b/contrib/libs/libf2c/endfile.c
new file mode 100644
index 0000000000..04020d3802
--- /dev/null
+++ b/contrib/libs/libf2c/endfile.c
@@ -0,0 +1,160 @@
+#include "f2c.h"
+#include "fio.h"
+
+/* Compile this with -DNO_TRUNCATE if unistd.h does not exist or */
+/* if it does not define int truncate(const char *name, off_t). */
+
+#ifdef MSDOS
+#undef NO_TRUNCATE
+#define NO_TRUNCATE
+#endif
+
+#ifndef NO_TRUNCATE
+#include "unistd.h"
+#endif
+
+#ifdef KR_headers
+extern char *strcpy();
+extern FILE *tmpfile();
+#else
+#undef abs
+#undef min
+#undef max
+#include "stdlib.h"
+#include "string.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+#endif
+
+extern char *f__r_mode[], *f__w_mode[];
+
+#ifdef KR_headers
+integer f_end(a) alist *a;
+#else
+integer f_end(alist *a)
+#endif
+{
+ unit *b;
+ FILE *tf;
+
+ if(a->aunit>=MXUNIT || a->aunit<0) err(a->aerr,101,"endfile");
+ b = &f__units[a->aunit];
+ if(b->ufd==NULL) {
+ char nbuf[10];
+ sprintf(nbuf,"fort.%ld",(long)a->aunit);
+ if (tf = FOPEN(nbuf, f__w_mode[0]))
+ fclose(tf);
+ return(0);
+ }
+ b->uend=1;
+ return(b->useek ? t_runc(a) : 0);
+}
+
+#ifdef NO_TRUNCATE
+ static int
+#ifdef KR_headers
+copy(from, len, to) FILE *from, *to; register long len;
+#else
+copy(FILE *from, register long len, FILE *to)
+#endif
+{
+ int len1;
+ char buf[BUFSIZ];
+
+ while(fread(buf, len1 = len > BUFSIZ ? BUFSIZ : (int)len, 1, from)) {
+ if (!fwrite(buf, len1, 1, to))
+ return 1;
+ if ((len -= len1) <= 0)
+ break;
+ }
+ return 0;
+ }
+#endif /* NO_TRUNCATE */
+
+ int
+#ifdef KR_headers
+t_runc(a) alist *a;
+#else
+t_runc(alist *a)
+#endif
+{
+ OFF_T loc, len;
+ unit *b;
+ int rc;
+ FILE *bf;
+#ifdef NO_TRUNCATE
+ FILE *tf;
+#endif
+
+ b = &f__units[a->aunit];
+ if(b->url)
+ return(0); /*don't truncate direct files*/
+ loc=FTELL(bf = b->ufd);
+ FSEEK(bf,(OFF_T)0,SEEK_END);
+ len=FTELL(bf);
+ if (loc >= len || b->useek == 0)
+ return(0);
+#ifdef NO_TRUNCATE
+ if (b->ufnm == NULL)
+ return 0;
+ rc = 0;
+ fclose(b->ufd);
+ if (!loc) {
+ if (!(bf = FOPEN(b->ufnm, f__w_mode[b->ufmt])))
+ rc = 1;
+ if (b->uwrt)
+ b->uwrt = 1;
+ goto done;
+ }
+ if (!(bf = FOPEN(b->ufnm, f__r_mode[0]))
+ || !(tf = tmpfile())) {
+#ifdef NON_UNIX_STDIO
+ bad:
+#endif
+ rc = 1;
+ goto done;
+ }
+ if (copy(bf, (long)loc, tf)) {
+ bad1:
+ rc = 1;
+ goto done1;
+ }
+ if (!(bf = FREOPEN(b->ufnm, f__w_mode[0], bf)))
+ goto bad1;
+ rewind(tf);
+ if (copy(tf, (long)loc, bf))
+ goto bad1;
+ b->uwrt = 1;
+ b->urw = 2;
+#ifdef NON_UNIX_STDIO
+ if (b->ufmt) {
+ fclose(bf);
+ if (!(bf = FOPEN(b->ufnm, f__w_mode[3])))
+ goto bad;
+ FSEEK(bf,(OFF_T)0,SEEK_END);
+ b->urw = 3;
+ }
+#endif
+done1:
+ fclose(tf);
+done:
+ f__cf = b->ufd = bf;
+#else /* NO_TRUNCATE */
+ if (b->urw & 2)
+ fflush(b->ufd); /* necessary on some Linux systems */
+#ifndef FTRUNCATE
+#define FTRUNCATE ftruncate
+#endif
+ rc = FTRUNCATE(fileno(b->ufd), loc);
+ /* The following FSEEK is unnecessary on some systems, */
+ /* but should be harmless. */
+ FSEEK(b->ufd, (OFF_T)0, SEEK_END);
+#endif /* NO_TRUNCATE */
+ if (rc)
+ err(a->aerr,111,"endfile");
+ return 0;
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/erf_.c b/contrib/libs/libf2c/erf_.c
new file mode 100644
index 0000000000..532fec61c8
--- /dev/null
+++ b/contrib/libs/libf2c/erf_.c
@@ -0,0 +1,22 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef REAL
+#define REAL double
+#endif
+
+#ifdef KR_headers
+double erf();
+REAL erf_(x) real *x;
+#else
+extern double erf(double);
+REAL erf_(real *x)
+#endif
+{
+return( erf((double)*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/erfc_.c b/contrib/libs/libf2c/erfc_.c
new file mode 100644
index 0000000000..6f6c9f1064
--- /dev/null
+++ b/contrib/libs/libf2c/erfc_.c
@@ -0,0 +1,22 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef REAL
+#define REAL double
+#endif
+
+#ifdef KR_headers
+double erfc();
+REAL erfc_(x) real *x;
+#else
+extern double erfc(double);
+REAL erfc_(real *x)
+#endif
+{
+return( erfc((double)*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/err.c b/contrib/libs/libf2c/err.c
new file mode 100644
index 0000000000..80a3b74983
--- /dev/null
+++ b/contrib/libs/libf2c/err.c
@@ -0,0 +1,293 @@
+#include "sysdep1.h" /* here to get stat64 on some badly designed Linux systems */
+#include "f2c.h"
+#ifdef KR_headers
+#define Const /*nothing*/
+extern char *malloc();
+#else
+#define Const const
+#undef abs
+#undef min
+#undef max
+#include "stdlib.h"
+#endif
+#include "fio.h"
+#include "fmt.h" /* for struct syl */
+
+/* Compile this with -DNO_ISATTY if unistd.h does not exist or */
+/* if it does not define int isatty(int). */
+#ifdef NO_ISATTY
+#define isatty(x) 0
+#else
+#include <unistd.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*global definitions*/
+unit f__units[MXUNIT]; /*unit table*/
+flag f__init; /*0 on entry, 1 after initializations*/
+cilist *f__elist; /*active external io list*/
+icilist *f__svic; /*active internal io list*/
+flag f__reading; /*1 if reading, 0 if writing*/
+flag f__cplus,f__cblank;
+Const char *f__fmtbuf;
+flag f__external; /*1 if external io, 0 if internal */
+#ifdef KR_headers
+int (*f__doed)(),(*f__doned)();
+int (*f__doend)(),(*f__donewrec)(),(*f__dorevert)();
+int (*f__getn)(); /* for formatted input */
+void (*f__putn)(); /* for formatted output */
+#else
+int (*f__getn)(void); /* for formatted input */
+void (*f__putn)(int); /* for formatted output */
+int (*f__doed)(struct syl*, char*, ftnlen),(*f__doned)(struct syl*);
+int (*f__dorevert)(void),(*f__donewrec)(void),(*f__doend)(void);
+#endif
+flag f__sequential; /*1 if sequential io, 0 if direct*/
+flag f__formatted; /*1 if formatted io, 0 if unformatted*/
+FILE *f__cf; /*current file*/
+unit *f__curunit; /*current unit*/
+int f__recpos; /*place in current record*/
+OFF_T f__cursor, f__hiwater;
+int f__scale;
+char *f__icptr;
+
+/*error messages*/
+Const char *F_err[] =
+{
+ "error in format", /* 100 */
+ "illegal unit number", /* 101 */
+ "formatted io not allowed", /* 102 */
+ "unformatted io not allowed", /* 103 */
+ "direct io not allowed", /* 104 */
+ "sequential io not allowed", /* 105 */
+ "can't backspace file", /* 106 */
+ "null file name", /* 107 */
+ "can't stat file", /* 108 */
+ "unit not connected", /* 109 */
+ "off end of record", /* 110 */
+ "truncation failed in endfile", /* 111 */
+ "incomprehensible list input", /* 112 */
+ "out of free space", /* 113 */
+ "unit not connected", /* 114 */
+ "read unexpected character", /* 115 */
+ "bad logical input field", /* 116 */
+ "bad variable type", /* 117 */
+ "bad namelist name", /* 118 */
+ "variable not in namelist", /* 119 */
+ "no end record", /* 120 */
+ "variable count incorrect", /* 121 */
+ "subscript for scalar variable", /* 122 */
+ "invalid array section", /* 123 */
+ "substring out of bounds", /* 124 */
+ "subscript out of bounds", /* 125 */
+ "can't read file", /* 126 */
+ "can't write file", /* 127 */
+ "'new' file exists", /* 128 */
+ "can't append to file", /* 129 */
+ "non-positive record number", /* 130 */
+ "nmLbuf overflow" /* 131 */
+};
+#define MAXERR (sizeof(F_err)/sizeof(char *)+100)
+
+ int
+#ifdef KR_headers
+f__canseek(f) FILE *f; /*SYSDEP*/
+#else
+f__canseek(FILE *f) /*SYSDEP*/
+#endif
+{
+#ifdef NON_UNIX_STDIO
+ return !isatty(fileno(f));
+#else
+ struct STAT_ST x;
+
+ if (FSTAT(fileno(f),&x) < 0)
+ return(0);
+#ifdef S_IFMT
+ switch(x.st_mode & S_IFMT) {
+ case S_IFDIR:
+ case S_IFREG:
+ if(x.st_nlink > 0) /* !pipe */
+ return(1);
+ else
+ return(0);
+ case S_IFCHR:
+ if(isatty(fileno(f)))
+ return(0);
+ return(1);
+#ifdef S_IFBLK
+ case S_IFBLK:
+ return(1);
+#endif
+ }
+#else
+#ifdef S_ISDIR
+ /* POSIX version */
+ if (S_ISREG(x.st_mode) || S_ISDIR(x.st_mode)) {
+ if(x.st_nlink > 0) /* !pipe */
+ return(1);
+ else
+ return(0);
+ }
+ if (S_ISCHR(x.st_mode)) {
+ if(isatty(fileno(f)))
+ return(0);
+ return(1);
+ }
+ if (S_ISBLK(x.st_mode))
+ return(1);
+#else
+ Help! How does fstat work on this system?
+#endif
+#endif
+ return(0); /* who knows what it is? */
+#endif
+}
+
+ void
+#ifdef KR_headers
+f__fatal(n,s) char *s;
+#else
+f__fatal(int n, const char *s)
+#endif
+{
+ if(n<100 && n>=0) perror(s); /*SYSDEP*/
+ else if(n >= (int)MAXERR || n < -1)
+ { fprintf(stderr,"%s: illegal error number %d\n",s,n);
+ }
+ else if(n == -1) fprintf(stderr,"%s: end of file\n",s);
+ else
+ fprintf(stderr,"%s: %s\n",s,F_err[n-100]);
+ if (f__curunit) {
+ fprintf(stderr,"apparent state: unit %d ",
+ (int)(f__curunit-f__units));
+ fprintf(stderr, f__curunit->ufnm ? "named %s\n" : "(unnamed)\n",
+ f__curunit->ufnm);
+ }
+ else
+ fprintf(stderr,"apparent state: internal I/O\n");
+ if (f__fmtbuf)
+ fprintf(stderr,"last format: %s\n",f__fmtbuf);
+ fprintf(stderr,"lately %s %s %s %s",f__reading?"reading":"writing",
+ f__sequential?"sequential":"direct",f__formatted?"formatted":"unformatted",
+ f__external?"external":"internal");
+ sig_die(" IO", 1);
+}
+/*initialization routine*/
+ VOID
+f_init(Void)
+{ unit *p;
+
+ f__init=1;
+ p= &f__units[0];
+ p->ufd=stderr;
+ p->useek=f__canseek(stderr);
+ p->ufmt=1;
+ p->uwrt=1;
+ p = &f__units[5];
+ p->ufd=stdin;
+ p->useek=f__canseek(stdin);
+ p->ufmt=1;
+ p->uwrt=0;
+ p= &f__units[6];
+ p->ufd=stdout;
+ p->useek=f__canseek(stdout);
+ p->ufmt=1;
+ p->uwrt=1;
+}
+
+ int
+#ifdef KR_headers
+f__nowreading(x) unit *x;
+#else
+f__nowreading(unit *x)
+#endif
+{
+ OFF_T loc;
+ int ufmt, urw;
+ extern char *f__r_mode[], *f__w_mode[];
+
+ if (x->urw & 1)
+ goto done;
+ if (!x->ufnm)
+ goto cantread;
+ ufmt = x->url ? 0 : x->ufmt;
+ loc = FTELL(x->ufd);
+ urw = 3;
+ if (!FREOPEN(x->ufnm, f__w_mode[ufmt|2], x->ufd)) {
+ urw = 1;
+ if(!FREOPEN(x->ufnm, f__r_mode[ufmt], x->ufd)) {
+ cantread:
+ errno = 126;
+ return 1;
+ }
+ }
+ FSEEK(x->ufd,loc,SEEK_SET);
+ x->urw = urw;
+ done:
+ x->uwrt = 0;
+ return 0;
+}
+
+ int
+#ifdef KR_headers
+f__nowwriting(x) unit *x;
+#else
+f__nowwriting(unit *x)
+#endif
+{
+ OFF_T loc;
+ int ufmt;
+ extern char *f__w_mode[];
+
+ if (x->urw & 2) {
+ if (x->urw & 1)
+ FSEEK(x->ufd, (OFF_T)0, SEEK_CUR);
+ goto done;
+ }
+ if (!x->ufnm)
+ goto cantwrite;
+ ufmt = x->url ? 0 : x->ufmt;
+ if (x->uwrt == 3) { /* just did write, rewind */
+ if (!(f__cf = x->ufd =
+ FREOPEN(x->ufnm,f__w_mode[ufmt],x->ufd)))
+ goto cantwrite;
+ x->urw = 2;
+ }
+ else {
+ loc=FTELL(x->ufd);
+ if (!(f__cf = x->ufd =
+ FREOPEN(x->ufnm, f__w_mode[ufmt | 2], x->ufd)))
+ {
+ x->ufd = NULL;
+ cantwrite:
+ errno = 127;
+ return(1);
+ }
+ x->urw = 3;
+ FSEEK(x->ufd,loc,SEEK_SET);
+ }
+ done:
+ x->uwrt = 1;
+ return 0;
+}
+
+ int
+#ifdef KR_headers
+err__fl(f, m, s) int f, m; char *s;
+#else
+err__fl(int f, int m, const char *s)
+#endif
+{
+ if (!f)
+ f__fatal(m, s);
+ if (f__doend)
+ (*f__doend)();
+ return errno = m;
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/etime_.c b/contrib/libs/libf2c/etime_.c
new file mode 100644
index 0000000000..2d9a36d8a3
--- /dev/null
+++ b/contrib/libs/libf2c/etime_.c
@@ -0,0 +1,57 @@
+#include "time.h"
+
+#ifdef MSDOS
+#undef USE_CLOCK
+#define USE_CLOCK
+#endif
+
+#ifndef REAL
+#define REAL double
+#endif
+
+#ifndef USE_CLOCK
+#define _INCLUDE_POSIX_SOURCE /* for HP-UX */
+#define _INCLUDE_XOPEN_SOURCE /* for HP-UX */
+#include "sys/types.h"
+#include "sys/times.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+#endif
+
+#undef Hz
+#ifdef CLK_TCK
+#define Hz CLK_TCK
+#else
+#ifdef HZ
+#define Hz HZ
+#else
+#define Hz 60
+#endif
+#endif
+
+ REAL
+#ifdef KR_headers
+etime_(tarray) float *tarray;
+#else
+etime_(float *tarray)
+#endif
+{
+#ifdef USE_CLOCK
+#ifndef CLOCKS_PER_SECOND
+#define CLOCKS_PER_SECOND Hz
+#endif
+ double t = clock();
+ tarray[1] = 0;
+ return tarray[0] = t / CLOCKS_PER_SECOND;
+#else
+ struct tms t;
+
+ times(&t);
+ return (tarray[0] = (double)t.tms_utime/Hz)
+ + (tarray[1] = (double)t.tms_stime/Hz);
+#endif
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/exit_.c b/contrib/libs/libf2c/exit_.c
new file mode 100644
index 0000000000..08e9d07067
--- /dev/null
+++ b/contrib/libs/libf2c/exit_.c
@@ -0,0 +1,43 @@
+/* This gives the effect of
+
+ subroutine exit(rc)
+ integer*4 rc
+ stop
+ end
+
+ * with the added side effect of supplying rc as the program's exit code.
+ */
+
+#include "f2c.h"
+#undef abs
+#undef min
+#undef max
+#ifndef KR_headers
+#include "stdlib.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern void f_exit(void);
+#endif
+
+ void
+#ifdef KR_headers
+exit_(rc) integer *rc;
+#else
+exit_(integer *rc)
+#endif
+{
+#ifdef NO_ONEXIT
+ f_exit();
+#endif
+ exit(*rc);
+ }
+#ifdef __cplusplus
+}
+#endif
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/f2c.h b/contrib/libs/libf2c/f2c.h
new file mode 100644
index 0000000000..5b2297f7a4
--- /dev/null
+++ b/contrib/libs/libf2c/f2c.h
@@ -0,0 +1,248 @@
+/* f2c.h -- Standard Fortran to C header file */
+
+/** barf [ba:rf] 2. "He suggested using FORTRAN, and everybody barfed."
+
+ - From The Shogakukan DICTIONARY OF NEW ENGLISH (Second edition) */
+
+#ifndef F2C_INCLUDE
+#define F2C_INCLUDE
+
+#include <math.h>
+#include <ctype.h>
+#include <stdlib.h>
+/* needed for Windows Mobile */
+#ifdef WINCE
+#undef complex;
+#endif
+#include <string.h>
+#include <stdio.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef int integer;
+typedef unsigned int uinteger;
+typedef char *address;
+typedef short int shortint;
+typedef float real;
+typedef double doublereal;
+typedef struct { real r, i; } complex;
+typedef struct { doublereal r, i; } doublecomplex;
+typedef int logical;
+typedef short int shortlogical;
+typedef char logical1;
+typedef char integer1;
+#ifdef INTEGER_STAR_8 /* Adjust for integer*8. */
+typedef long long longint; /* system-dependent */
+typedef unsigned long long ulongint; /* system-dependent */
+#define qbit_clear(a,b) ((a) & ~((ulongint)1 << (b)))
+#define qbit_set(a,b) ((a) | ((ulongint)1 << (b)))
+#endif
+
+#define TRUE_ (1)
+#define FALSE_ (0)
+
+/* Extern is for use with -E */
+#ifndef Extern
+#define Extern extern
+#endif
+
+/* I/O stuff */
+
+#ifdef f2c_i2
+/* for -i2 */
+typedef short flag;
+typedef short ftnlen;
+typedef short ftnint;
+#else
+typedef int flag;
+typedef int ftnlen;
+typedef int ftnint;
+#endif
+
+/*external read, write*/
+typedef struct
+{ flag cierr;
+ ftnint ciunit;
+ flag ciend;
+ char *cifmt;
+ ftnint cirec;
+} cilist;
+
+/*internal read, write*/
+typedef struct
+{ flag icierr;
+ char *iciunit;
+ flag iciend;
+ char *icifmt;
+ ftnint icirlen;
+ ftnint icirnum;
+} icilist;
+
+/*open*/
+typedef struct
+{ flag oerr;
+ ftnint ounit;
+ char *ofnm;
+ ftnlen ofnmlen;
+ char *osta;
+ char *oacc;
+ char *ofm;
+ ftnint orl;
+ char *oblnk;
+} olist;
+
+/*close*/
+typedef struct
+{ flag cerr;
+ ftnint cunit;
+ char *csta;
+} cllist;
+
+/*rewind, backspace, endfile*/
+typedef struct
+{ flag aerr;
+ ftnint aunit;
+} alist;
+
+/* inquire */
+typedef struct
+{ flag inerr;
+ ftnint inunit;
+ char *infile;
+ ftnlen infilen;
+ ftnint *inex; /*parameters in standard's order*/
+ ftnint *inopen;
+ ftnint *innum;
+ ftnint *innamed;
+ char *inname;
+ ftnlen innamlen;
+ char *inacc;
+ ftnlen inacclen;
+ char *inseq;
+ ftnlen inseqlen;
+ char *indir;
+ ftnlen indirlen;
+ char *infmt;
+ ftnlen infmtlen;
+ char *inform;
+ ftnint informlen;
+ char *inunf;
+ ftnlen inunflen;
+ ftnint *inrecl;
+ ftnint *innrec;
+ char *inblank;
+ ftnlen inblanklen;
+} inlist;
+
+#define VOID void
+
+union Multitype { /* for multiple entry points */
+ integer1 g;
+ shortint h;
+ integer i;
+ /* longint j; */
+ real r;
+ doublereal d;
+ complex c;
+ doublecomplex z;
+ };
+
+typedef union Multitype Multitype;
+
+/*typedef long int Long;*/ /* No longer used; formerly in Namelist */
+
+struct Vardesc { /* for Namelist */
+ char *name;
+ char *addr;
+ ftnlen *dims;
+ int type;
+ };
+typedef struct Vardesc Vardesc;
+
+struct Namelist {
+ char *name;
+ Vardesc **vars;
+ int nvars;
+ };
+typedef struct Namelist Namelist;
+
+#ifndef abs
+ #define abs(x) ((x) >= 0 ? (x) : -(x))
+#endif
+ #define dabs(x) (doublereal)abs(x)
+#ifndef min
+ #define min(a,b) ((a) <= (b) ? (a) : (b))
+#endif
+#ifndef max
+ #define max(a,b) ((a) >= (b) ? (a) : (b))
+#endif
+#define dmin(a,b) (doublereal)min(a,b)
+#define dmax(a,b) (doublereal)max(a,b)
+#define bit_test(a,b) ((a) >> (b) & 1)
+#define bit_clear(a,b) ((a) & ~((uinteger)1 << (b)))
+#define bit_set(a,b) ((a) | ((uinteger)1 << (b)))
+
+/* procedure parameter types for -A and -C++ */
+
+#define F2C_proc_par_types 1
+#ifdef __cplusplus
+typedef int /* Unknown procedure type */ (*U_fp)(...);
+typedef shortint (*J_fp)(...);
+typedef integer (*I_fp)(...);
+typedef real (*R_fp)(...);
+typedef doublereal (*D_fp)(...), (*E_fp)(...);
+typedef /* Complex */ VOID (*C_fp)(...);
+typedef /* Double Complex */ VOID (*Z_fp)(...);
+typedef logical (*L_fp)(...);
+typedef shortlogical (*K_fp)(...);
+typedef /* Character */ VOID (*H_fp)(...);
+typedef /* Subroutine */ int (*S_fp)(...);
+#else
+typedef int /* Unknown procedure type */ (*U_fp)();
+typedef shortint (*J_fp)();
+typedef integer (*I_fp)();
+typedef real (*R_fp)();
+typedef doublereal (*D_fp)(), (*E_fp)();
+typedef /* Complex */ VOID (*C_fp)();
+typedef /* Double Complex */ VOID (*Z_fp)();
+typedef logical (*L_fp)();
+typedef shortlogical (*K_fp)();
+typedef /* Character */ VOID (*H_fp)();
+typedef /* Subroutine */ int (*S_fp)();
+#endif
+/* E_fp is for real functions when -R is not specified */
+typedef VOID C_f; /* complex function */
+typedef VOID H_f; /* character function */
+typedef VOID Z_f; /* double complex function */
+typedef doublereal E_f; /* real function with -R not specified */
+
+/* undef any lower-case symbols that your C compiler predefines, e.g.: */
+
+#ifndef Skip_f2c_Undefs
+#undef cray
+#undef gcos
+#undef mc68010
+#undef mc68020
+#undef mips
+#undef pdp11
+#undef sgi
+#undef sparc
+#undef sun
+#undef sun2
+#undef sun3
+#undef sun4
+#undef u370
+#undef u3b
+#undef u3b2
+#undef u3b5
+#undef unix
+#undef vax
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/contrib/libs/libf2c/f77_aloc.c b/contrib/libs/libf2c/f77_aloc.c
new file mode 100644
index 0000000000..f53609906d
--- /dev/null
+++ b/contrib/libs/libf2c/f77_aloc.c
@@ -0,0 +1,44 @@
+#include "f2c.h"
+#undef abs
+#undef min
+#undef max
+#include "stdio.h"
+
+static integer memfailure = 3;
+
+#ifdef KR_headers
+extern char *malloc();
+extern void exit_();
+
+ char *
+F77_aloc(Len, whence) integer Len; char *whence;
+#else
+#include "stdlib.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern void exit_(integer*);
+#ifdef __cplusplus
+ }
+#endif
+
+ char *
+F77_aloc(integer Len, const char *whence)
+#endif
+{
+ char *rv;
+ unsigned int uLen = (unsigned int) Len; /* for K&R C */
+
+ if (!(rv = (char*)malloc(uLen))) {
+ fprintf(stderr, "malloc(%u) failure in %s\n",
+ uLen, whence);
+ exit_(&memfailure);
+ }
+ return rv;
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/f77vers.c b/contrib/libs/libf2c/f77vers.c
new file mode 100644
index 0000000000..70cd6fe79e
--- /dev/null
+++ b/contrib/libs/libf2c/f77vers.c
@@ -0,0 +1,97 @@
+ char
+_libf77_version_f2c[] = "\n@(#) LIBF77 VERSION (f2c) 20051004\n";
+
+/*
+2.00 11 June 1980. File version.c added to library.
+2.01 31 May 1988. s_paus() flushes stderr; names of hl_* fixed
+ [ d]erf[c ] added
+ 8 Aug. 1989: #ifdefs for f2c -i2 added to s_cat.c
+ 29 Nov. 1989: s_cmp returns long (for f2c)
+ 30 Nov. 1989: arg types from f2c.h
+ 12 Dec. 1989: s_rnge allows long names
+ 19 Dec. 1989: getenv_ allows unsorted environment
+ 28 Mar. 1990: add exit(0) to end of main()
+ 2 Oct. 1990: test signal(...) == SIG_IGN rather than & 01 in main
+ 17 Oct. 1990: abort() calls changed to sig_die(...,1)
+ 22 Oct. 1990: separate sig_die from main
+ 25 Apr. 1991: minor, theoretically invisible tweaks to s_cat, sig_die
+ 31 May 1991: make system_ return status
+ 18 Dec. 1991: change long to ftnlen (for -i2) many places
+ 28 Feb. 1992: repair z_sqrt.c (scribbled on input, gave wrong answer)
+ 18 July 1992: for n < 0, repair handling of 0**n in pow_[dr]i.c
+ and m**n in pow_hh.c and pow_ii.c;
+ catch SIGTRAP in main() for error msg before abort
+ 23 July 1992: switch to ANSI prototypes unless KR_headers is #defined
+ 23 Oct. 1992: fix botch in signal_.c (erroneous deref of 2nd arg);
+ change Cabs to f__cabs.
+ 12 March 1993: various tweaks for C++
+ 2 June 1994: adjust so abnormal terminations invoke f_exit just once
+ 16 Sept. 1994: s_cmp: treat characters as unsigned in comparisons.
+ 19 Sept. 1994: s_paus: flush after end of PAUSE; add -DMSDOS
+ 12 Jan. 1995: pow_[dhiqrz][hiq]: adjust x**i to work on machines
+ that sign-extend right shifts when i is the most
+ negative integer.
+ 26 Jan. 1995: adjust s_cat.c, s_copy.c to permit the left-hand side
+ of character assignments to appear on the right-hand
+ side (unless compiled with -DNO_OVERWRITE).
+ 27 Jan. 1995: minor tweak to s_copy.c: copy forward whenever
+ possible (for better cache behavior).
+ 30 May 1995: added subroutine exit(rc) integer rc. Version not changed.
+ 29 Aug. 1995: add F77_aloc.c; use it in s_cat.c and system_.c.
+ 6 Sept. 1995: fix return type of system_ under -DKR_headers.
+ 19 Dec. 1995: s_cat.c: fix bug when 2nd or later arg overlaps lhs.
+ 19 Mar. 1996: s_cat.c: supply missing break after overlap detection.
+ 13 May 1996: add [lq]bitbits.c and [lq]bitshft.c (f90 bit intrinsics).
+ 19 June 1996: add casts to unsigned in [lq]bitshft.c.
+ 26 Feb. 1997: adjust functions with a complex output argument
+ to permit aliasing it with input arguments.
+ (For now, at least, this is just for possible
+ benefit of g77.)
+ 4 April 1997: [cz]_div.c: tweaks invisible on most systems (that may
+ affect systems using gratuitous extra precision).
+ 19 Sept. 1997: [de]time_.c (Unix systems only): change return
+ type to double.
+ 2 May 1999: getenv_.c: omit environ in favor of getenv().
+ c_cos.c, c_exp.c, c_sin.c, d_cnjg.c, r_cnjg.c,
+ z_cos.c, z_exp.c, z_log.c, z_sin.c: cope fully with
+ overlapping arguments caused by equivalence.
+ 3 May 1999: "invisible" tweaks to omit compiler warnings in
+ abort_.c, ef1asc_.c, s_rnge.c, s_stop.c.
+
+ 7 Sept. 1999: [cz]_div.c: arrange for compilation under
+ -DIEEE_COMPLEX_DIVIDE to make these routines
+ avoid calling sig_die when the denominator
+ vanishes; instead, they return pairs of NaNs
+ or Infinities, depending whether the numerator
+ also vanishes or not. VERSION not changed.
+ 15 Nov. 1999: s_rnge.c: add casts for the case of
+ sizeof(ftnint) == sizeof(int) < sizeof(long).
+ 10 March 2000: z_log.c: improve accuracy of Real(log(z)) for, e.g.,
+ z near (+-1,eps) with |eps| small. For the old
+ evaluation, compile with -DPre20000310 .
+ 20 April 2000: s_cat.c: tweak argument types to accord with
+ calls by f2c when ftnint and ftnlen are of
+ different sizes (different numbers of bits).
+ 4 July 2000: adjustments to permit compilation by C++ compilers;
+ VERSION string remains unchanged.
+ 29 Sept. 2000: dtime_.c, etime_.c: use floating-point divide.
+ dtime_.d, erf_.c, erfc_.c, etime.c: for use with
+ "f2c -R", compile with -DREAL=float.
+ 23 June 2001: add uninit.c; [fi]77vers.c: make version strings
+ visible as extern char _lib[fi]77_version_f2c[].
+ 5 July 2001: modify uninit.c for __mc68k__ under Linux.
+ 16 Nov. 2001: uninit.c: Linux Power PC logic supplied by Alan Bain.
+ 18 Jan. 2002: fix glitches in qbit_bits(): wrong return type,
+ missing ~ on y in return value.
+ 14 March 2002: z_log.c: add code to cope with buggy compilers
+ (e.g., some versions of gcc under -O2 or -O3)
+ that do floating-point comparisons against values
+ computed into extended-precision registers on some
+ systems (such as Intel IA32 systems). Compile with
+ -DNO_DOUBLE_EXTENDED to omit the new logic.
+ 4 Oct. 2002: uninit.c: on IRIX systems, omit use of shell variables.
+ 10 Oct 2005: uninit.c: on IA32 Linux systems, leave the rounding
+ precision alone rather than forcing it to 53 bits;
+ compile with -DUNINIT_F2C_PRECISION_53 to get the
+ former behavior.
+*/
diff --git a/contrib/libs/libf2c/fio.h b/contrib/libs/libf2c/fio.h
new file mode 100644
index 0000000000..ebf76965e6
--- /dev/null
+++ b/contrib/libs/libf2c/fio.h
@@ -0,0 +1,141 @@
+#ifndef SYSDEP_H_INCLUDED
+#include "sysdep1.h"
+#endif
+#include "stdio.h"
+#include "errno.h"
+#ifndef NULL
+/* ANSI C */
+#include "stddef.h"
+#endif
+
+#ifndef SEEK_SET
+#define SEEK_SET 0
+#define SEEK_CUR 1
+#define SEEK_END 2
+#endif
+
+#ifndef FOPEN
+#define FOPEN fopen
+#endif
+
+#ifndef FREOPEN
+#define FREOPEN freopen
+#endif
+
+#ifndef FSEEK
+#define FSEEK fseek
+#endif
+
+#ifndef FSTAT
+#define FSTAT fstat
+#endif
+
+#ifndef FTELL
+#define FTELL ftell
+#endif
+
+#ifndef OFF_T
+#define OFF_T long
+#endif
+
+#ifndef STAT_ST
+#define STAT_ST stat
+#endif
+
+#ifndef STAT
+#define STAT stat
+#endif
+
+#ifdef MSDOS
+#ifndef NON_UNIX_STDIO
+#define NON_UNIX_STDIO
+#endif
+#endif
+
+#ifdef UIOLEN_int
+typedef int uiolen;
+#else
+typedef long uiolen;
+#endif
+
+/*units*/
+typedef struct
+{ FILE *ufd; /*0=unconnected*/
+ char *ufnm;
+#ifndef MSDOS
+ long uinode;
+ int udev;
+#endif
+ int url; /*0=sequential*/
+ flag useek; /*true=can backspace, use dir, ...*/
+ flag ufmt;
+ flag urw; /* (1 for can read) | (2 for can write) */
+ flag ublnk;
+ flag uend;
+ flag uwrt; /*last io was write*/
+ flag uscrtch;
+} unit;
+
+#undef Void
+#ifdef KR_headers
+#define Void /*void*/
+extern int (*f__getn)(); /* for formatted input */
+extern void (*f__putn)(); /* for formatted output */
+extern void x_putc();
+extern long f__inode();
+extern VOID sig_die();
+extern int (*f__donewrec)(), t_putc(), x_wSL();
+extern int c_sfe(), err__fl(), xrd_SL(), f__putbuf();
+#else
+#define Void void
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern int (*f__getn)(void); /* for formatted input */
+extern void (*f__putn)(int); /* for formatted output */
+extern void x_putc(int);
+extern long f__inode(char*,int*);
+extern void sig_die(const char*,int);
+extern void f__fatal(int, const char*);
+extern int t_runc(alist*);
+extern int f__nowreading(unit*), f__nowwriting(unit*);
+extern int fk_open(int,int,ftnint);
+extern int en_fio(void);
+extern void f_init(void);
+extern int (*f__donewrec)(void), t_putc(int), x_wSL(void);
+extern void b_char(const char*,char*,ftnlen), g_char(const char*,ftnlen,char*);
+extern int c_sfe(cilist*), z_rnew(void);
+extern int err__fl(int,int,const char*);
+extern int xrd_SL(void);
+extern int f__putbuf(int);
+#endif
+extern flag f__init;
+extern cilist *f__elist; /*active external io list*/
+extern flag f__reading,f__external,f__sequential,f__formatted;
+extern int (*f__doend)(Void);
+extern FILE *f__cf; /*current file*/
+extern unit *f__curunit; /*current unit*/
+extern unit f__units[];
+#define err(f,m,s) {if(f) errno= m; else f__fatal(m,s); return(m);}
+#define errfl(f,m,s) return err__fl((int)f,m,s)
+
+/*Table sizes*/
+#define MXUNIT 100
+
+extern int f__recpos; /*position in current record*/
+extern OFF_T f__cursor; /* offset to move to */
+extern OFF_T f__hiwater; /* so TL doesn't confuse us */
+#ifdef __cplusplus
+ }
+#endif
+
+#define WRITE 1
+#define READ 2
+#define SEQ 3
+#define DIR 4
+#define FMT 5
+#define UNF 6
+#define EXT 7
+#define INT 8
+
+#define buf_end(x) (x->_flag & _IONBF ? x->_ptr : x->_base + BUFSIZ)
diff --git a/contrib/libs/libf2c/fmt.c b/contrib/libs/libf2c/fmt.c
new file mode 100644
index 0000000000..286c98f3c7
--- /dev/null
+++ b/contrib/libs/libf2c/fmt.c
@@ -0,0 +1,530 @@
+#include "f2c.h"
+#include "fio.h"
+#include "fmt.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+#define skip(s) while(*s==' ') s++
+#ifdef interdata
+#define SYLMX 300
+#endif
+#ifdef pdp11
+#define SYLMX 300
+#endif
+#ifdef vax
+#define SYLMX 300
+#endif
+#ifndef SYLMX
+#define SYLMX 300
+#endif
+#define GLITCH '\2'
+ /* special quote character for stu */
+extern flag f__cblank,f__cplus; /*blanks in I and compulsory plus*/
+static struct syl f__syl[SYLMX];
+int f__parenlvl,f__pc,f__revloc;
+#ifdef KR_headers
+#define Const /*nothing*/
+#else
+#define Const const
+#endif
+
+ static
+#ifdef KR_headers
+char *ap_end(s) char *s;
+#else
+const char *ap_end(const char *s)
+#endif
+{ char quote;
+ quote= *s++;
+ for(;*s;s++)
+ { if(*s!=quote) continue;
+ if(*++s!=quote) return(s);
+ }
+ if(f__elist->cierr) {
+ errno = 100;
+ return(NULL);
+ }
+ f__fatal(100, "bad string");
+ /*NOTREACHED*/ return 0;
+}
+ static int
+#ifdef KR_headers
+op_gen(a,b,c,d)
+#else
+op_gen(int a, int b, int c, int d)
+#endif
+{ struct syl *p= &f__syl[f__pc];
+ if(f__pc>=SYLMX)
+ { fprintf(stderr,"format too complicated:\n");
+ sig_die(f__fmtbuf, 1);
+ }
+ p->op=a;
+ p->p1=b;
+ p->p2.i[0]=c;
+ p->p2.i[1]=d;
+ return(f__pc++);
+}
+#ifdef KR_headers
+static char *f_list();
+static char *gt_num(s,n,n1) char *s; int *n, n1;
+#else
+static const char *f_list(const char*);
+static const char *gt_num(const char *s, int *n, int n1)
+#endif
+{ int m=0,f__cnt=0;
+ char c;
+ for(c= *s;;c = *s)
+ { if(c==' ')
+ { s++;
+ continue;
+ }
+ if(c>'9' || c<'0') break;
+ m=10*m+c-'0';
+ f__cnt++;
+ s++;
+ }
+ if(f__cnt==0) {
+ if (!n1)
+ s = 0;
+ *n=n1;
+ }
+ else *n=m;
+ return(s);
+}
+
+ static
+#ifdef KR_headers
+char *f_s(s,curloc) char *s;
+#else
+const char *f_s(const char *s, int curloc)
+#endif
+{
+ skip(s);
+ if(*s++!='(')
+ {
+ return(NULL);
+ }
+ if(f__parenlvl++ ==1) f__revloc=curloc;
+ if(op_gen(RET1,curloc,0,0)<0 ||
+ (s=f_list(s))==NULL)
+ {
+ return(NULL);
+ }
+ skip(s);
+ return(s);
+}
+
+ static int
+#ifdef KR_headers
+ne_d(s,p) char *s,**p;
+#else
+ne_d(const char *s, const char **p)
+#endif
+{ int n,x,sign=0;
+ struct syl *sp;
+ switch(*s)
+ {
+ default:
+ return(0);
+ case ':': (void) op_gen(COLON,0,0,0); break;
+ case '$':
+ (void) op_gen(NONL, 0, 0, 0); break;
+ case 'B':
+ case 'b':
+ if(*++s=='z' || *s == 'Z') (void) op_gen(BZ,0,0,0);
+ else (void) op_gen(BN,0,0,0);
+ break;
+ case 'S':
+ case 's':
+ if(*(s+1)=='s' || *(s+1) == 'S')
+ { x=SS;
+ s++;
+ }
+ else if(*(s+1)=='p' || *(s+1) == 'P')
+ { x=SP;
+ s++;
+ }
+ else x=S;
+ (void) op_gen(x,0,0,0);
+ break;
+ case '/': (void) op_gen(SLASH,0,0,0); break;
+ case '-': sign=1;
+ case '+': s++; /*OUTRAGEOUS CODING TRICK*/
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ if (!(s=gt_num(s,&n,0))) {
+ bad: *p = 0;
+ return 1;
+ }
+ switch(*s)
+ {
+ default:
+ return(0);
+ case 'P':
+ case 'p': if(sign) n= -n; (void) op_gen(P,n,0,0); break;
+ case 'X':
+ case 'x': (void) op_gen(X,n,0,0); break;
+ case 'H':
+ case 'h':
+ sp = &f__syl[op_gen(H,n,0,0)];
+ sp->p2.s = (char*)s + 1;
+ s+=n;
+ break;
+ }
+ break;
+ case GLITCH:
+ case '"':
+ case '\'':
+ sp = &f__syl[op_gen(APOS,0,0,0)];
+ sp->p2.s = (char*)s;
+ if((*p = ap_end(s)) == NULL)
+ return(0);
+ return(1);
+ case 'T':
+ case 't':
+ if(*(s+1)=='l' || *(s+1) == 'L')
+ { x=TL;
+ s++;
+ }
+ else if(*(s+1)=='r'|| *(s+1) == 'R')
+ { x=TR;
+ s++;
+ }
+ else x=T;
+ if (!(s=gt_num(s+1,&n,0)))
+ goto bad;
+ s--;
+ (void) op_gen(x,n,0,0);
+ break;
+ case 'X':
+ case 'x': (void) op_gen(X,1,0,0); break;
+ case 'P':
+ case 'p': (void) op_gen(P,1,0,0); break;
+ }
+ s++;
+ *p=s;
+ return(1);
+}
+
+ static int
+#ifdef KR_headers
+e_d(s,p) char *s,**p;
+#else
+e_d(const char *s, const char **p)
+#endif
+{ int i,im,n,w,d,e,found=0,x=0;
+ Const char *sv=s;
+ s=gt_num(s,&n,1);
+ (void) op_gen(STACK,n,0,0);
+ switch(*s++)
+ {
+ default: break;
+ case 'E':
+ case 'e': x=1;
+ case 'G':
+ case 'g':
+ found=1;
+ if (!(s=gt_num(s,&w,0))) {
+ bad:
+ *p = 0;
+ return 1;
+ }
+ if(w==0) break;
+ if(*s=='.') {
+ if (!(s=gt_num(s+1,&d,0)))
+ goto bad;
+ }
+ else d=0;
+ if(*s!='E' && *s != 'e')
+ (void) op_gen(x==1?E:G,w,d,0); /* default is Ew.dE2 */
+ else {
+ if (!(s=gt_num(s+1,&e,0)))
+ goto bad;
+ (void) op_gen(x==1?EE:GE,w,d,e);
+ }
+ break;
+ case 'O':
+ case 'o':
+ i = O;
+ im = OM;
+ goto finish_I;
+ case 'Z':
+ case 'z':
+ i = Z;
+ im = ZM;
+ goto finish_I;
+ case 'L':
+ case 'l':
+ found=1;
+ if (!(s=gt_num(s,&w,0)))
+ goto bad;
+ if(w==0) break;
+ (void) op_gen(L,w,0,0);
+ break;
+ case 'A':
+ case 'a':
+ found=1;
+ skip(s);
+ if(*s>='0' && *s<='9')
+ { s=gt_num(s,&w,1);
+ if(w==0) break;
+ (void) op_gen(AW,w,0,0);
+ break;
+ }
+ (void) op_gen(A,0,0,0);
+ break;
+ case 'F':
+ case 'f':
+ if (!(s=gt_num(s,&w,0)))
+ goto bad;
+ found=1;
+ if(w==0) break;
+ if(*s=='.') {
+ if (!(s=gt_num(s+1,&d,0)))
+ goto bad;
+ }
+ else d=0;
+ (void) op_gen(F,w,d,0);
+ break;
+ case 'D':
+ case 'd':
+ found=1;
+ if (!(s=gt_num(s,&w,0)))
+ goto bad;
+ if(w==0) break;
+ if(*s=='.') {
+ if (!(s=gt_num(s+1,&d,0)))
+ goto bad;
+ }
+ else d=0;
+ (void) op_gen(D,w,d,0);
+ break;
+ case 'I':
+ case 'i':
+ i = I;
+ im = IM;
+ finish_I:
+ if (!(s=gt_num(s,&w,0)))
+ goto bad;
+ found=1;
+ if(w==0) break;
+ if(*s!='.')
+ { (void) op_gen(i,w,0,0);
+ break;
+ }
+ if (!(s=gt_num(s+1,&d,0)))
+ goto bad;
+ (void) op_gen(im,w,d,0);
+ break;
+ }
+ if(found==0)
+ { f__pc--; /*unSTACK*/
+ *p=sv;
+ return(0);
+ }
+ *p=s;
+ return(1);
+}
+ static
+#ifdef KR_headers
+char *i_tem(s) char *s;
+#else
+const char *i_tem(const char *s)
+#endif
+{ const char *t;
+ int n,curloc;
+ if(*s==')') return(s);
+ if(ne_d(s,&t)) return(t);
+ if(e_d(s,&t)) return(t);
+ s=gt_num(s,&n,1);
+ if((curloc=op_gen(STACK,n,0,0))<0) return(NULL);
+ return(f_s(s,curloc));
+}
+
+ static
+#ifdef KR_headers
+char *f_list(s) char *s;
+#else
+const char *f_list(const char *s)
+#endif
+{
+ for(;*s!=0;)
+ { skip(s);
+ if((s=i_tem(s))==NULL) return(NULL);
+ skip(s);
+ if(*s==',') s++;
+ else if(*s==')')
+ { if(--f__parenlvl==0)
+ {
+ (void) op_gen(REVERT,f__revloc,0,0);
+ return(++s);
+ }
+ (void) op_gen(GOTO,0,0,0);
+ return(++s);
+ }
+ }
+ return(NULL);
+}
+
+ int
+#ifdef KR_headers
+pars_f(s) char *s;
+#else
+pars_f(const char *s)
+#endif
+{
+ f__parenlvl=f__revloc=f__pc=0;
+ if(f_s(s,0) == NULL)
+ {
+ return(-1);
+ }
+ return(0);
+}
+#define STKSZ 10
+int f__cnt[STKSZ],f__ret[STKSZ],f__cp,f__rp;
+flag f__workdone, f__nonl;
+
+ static int
+#ifdef KR_headers
+type_f(n)
+#else
+type_f(int n)
+#endif
+{
+ switch(n)
+ {
+ default:
+ return(n);
+ case RET1:
+ return(RET1);
+ case REVERT: return(REVERT);
+ case GOTO: return(GOTO);
+ case STACK: return(STACK);
+ case X:
+ case SLASH:
+ case APOS: case H:
+ case T: case TL: case TR:
+ return(NED);
+ case F:
+ case I:
+ case IM:
+ case A: case AW:
+ case O: case OM:
+ case L:
+ case E: case EE: case D:
+ case G: case GE:
+ case Z: case ZM:
+ return(ED);
+ }
+}
+#ifdef KR_headers
+integer do_fio(number,ptr,len) ftnint *number; ftnlen len; char *ptr;
+#else
+integer do_fio(ftnint *number, char *ptr, ftnlen len)
+#endif
+{ struct syl *p;
+ int n,i;
+ for(i=0;i<*number;i++,ptr+=len)
+ {
+loop: switch(type_f((p= &f__syl[f__pc])->op))
+ {
+ default:
+ fprintf(stderr,"unknown code in do_fio: %d\n%s\n",
+ p->op,f__fmtbuf);
+ err(f__elist->cierr,100,"do_fio");
+ case NED:
+ if((*f__doned)(p))
+ { f__pc++;
+ goto loop;
+ }
+ f__pc++;
+ continue;
+ case ED:
+ if(f__cnt[f__cp]<=0)
+ { f__cp--;
+ f__pc++;
+ goto loop;
+ }
+ if(ptr==NULL)
+ return((*f__doend)());
+ f__cnt[f__cp]--;
+ f__workdone=1;
+ if((n=(*f__doed)(p,ptr,len))>0)
+ errfl(f__elist->cierr,errno,"fmt");
+ if(n<0)
+ err(f__elist->ciend,(EOF),"fmt");
+ continue;
+ case STACK:
+ f__cnt[++f__cp]=p->p1;
+ f__pc++;
+ goto loop;
+ case RET1:
+ f__ret[++f__rp]=p->p1;
+ f__pc++;
+ goto loop;
+ case GOTO:
+ if(--f__cnt[f__cp]<=0)
+ { f__cp--;
+ f__rp--;
+ f__pc++;
+ goto loop;
+ }
+ f__pc=1+f__ret[f__rp--];
+ goto loop;
+ case REVERT:
+ f__rp=f__cp=0;
+ f__pc = p->p1;
+ if(ptr==NULL)
+ return((*f__doend)());
+ if(!f__workdone) return(0);
+ if((n=(*f__dorevert)()) != 0) return(n);
+ goto loop;
+ case COLON:
+ if(ptr==NULL)
+ return((*f__doend)());
+ f__pc++;
+ goto loop;
+ case NONL:
+ f__nonl = 1;
+ f__pc++;
+ goto loop;
+ case S:
+ case SS:
+ f__cplus=0;
+ f__pc++;
+ goto loop;
+ case SP:
+ f__cplus = 1;
+ f__pc++;
+ goto loop;
+ case P: f__scale=p->p1;
+ f__pc++;
+ goto loop;
+ case BN:
+ f__cblank=0;
+ f__pc++;
+ goto loop;
+ case BZ:
+ f__cblank=1;
+ f__pc++;
+ goto loop;
+ }
+ }
+ return(0);
+}
+
+ int
+en_fio(Void)
+{ ftnint one=1;
+ return(do_fio(&one,(char *)NULL,(ftnint)0));
+}
+
+ VOID
+fmt_bg(Void)
+{
+ f__workdone=f__cp=f__rp=f__pc=f__cursor=0;
+ f__cnt[0]=f__ret[0]=0;
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/fmt.h b/contrib/libs/libf2c/fmt.h
new file mode 100644
index 0000000000..ddfa551d20
--- /dev/null
+++ b/contrib/libs/libf2c/fmt.h
@@ -0,0 +1,105 @@
+struct syl
+{ int op;
+ int p1;
+ union { int i[2]; char *s;} p2;
+ };
+#define RET1 1
+#define REVERT 2
+#define GOTO 3
+#define X 4
+#define SLASH 5
+#define STACK 6
+#define I 7
+#define ED 8
+#define NED 9
+#define IM 10
+#define APOS 11
+#define H 12
+#define TL 13
+#define TR 14
+#define T 15
+#define COLON 16
+#define S 17
+#define SP 18
+#define SS 19
+#define P 20
+#define BN 21
+#define BZ 22
+#define F 23
+#define E 24
+#define EE 25
+#define D 26
+#define G 27
+#define GE 28
+#define L 29
+#define A 30
+#define AW 31
+#define O 32
+#define NONL 33
+#define OM 34
+#define Z 35
+#define ZM 36
+typedef union
+{ real pf;
+ doublereal pd;
+} ufloat;
+typedef union
+{ short is;
+#ifndef KR_headers
+ signed
+#endif
+ char ic;
+ integer il;
+#ifdef Allow_TYQUAD
+ longint ili;
+#endif
+} Uint;
+#ifdef KR_headers
+extern int (*f__doed)(),(*f__doned)();
+extern int (*f__dorevert)();
+extern int rd_ed(),rd_ned();
+extern int w_ed(),w_ned();
+extern int signbit_f2c();
+extern char *f__fmtbuf;
+#else
+#ifdef __cplusplus
+extern "C" {
+#define Cextern extern "C"
+#else
+#define Cextern extern
+#endif
+extern const char *f__fmtbuf;
+extern int (*f__doed)(struct syl*, char*, ftnlen),(*f__doned)(struct syl*);
+extern int (*f__dorevert)(void);
+extern void fmt_bg(void);
+extern int pars_f(const char*);
+extern int rd_ed(struct syl*, char*, ftnlen),rd_ned(struct syl*);
+extern int signbit_f2c(double*);
+extern int w_ed(struct syl*, char*, ftnlen),w_ned(struct syl*);
+extern int wrt_E(ufloat*, int, int, int, ftnlen);
+extern int wrt_F(ufloat*, int, int, ftnlen);
+extern int wrt_L(Uint*, int, ftnlen);
+#endif
+extern int f__pc,f__parenlvl,f__revloc;
+extern flag f__cblank,f__cplus,f__workdone, f__nonl;
+extern int f__scale;
+#ifdef __cplusplus
+ }
+#endif
+#define GET(x) if((x=(*f__getn)())<0) return(x)
+#define VAL(x) (x!='\n'?x:' ')
+#define PUT(x) (*f__putn)(x)
+
+#undef TYQUAD
+#ifndef Allow_TYQUAD
+#undef longint
+#define longint long
+#else
+#define TYQUAD 14
+#endif
+
+#ifdef KR_headers
+extern char *f__icvt();
+#else
+Cextern char *f__icvt(longint, int*, int*, int);
+#endif
diff --git a/contrib/libs/libf2c/fmtlib.c b/contrib/libs/libf2c/fmtlib.c
new file mode 100644
index 0000000000..279f66f439
--- /dev/null
+++ b/contrib/libs/libf2c/fmtlib.c
@@ -0,0 +1,51 @@
+/* @(#)fmtlib.c 1.2 */
+#define MAXINTLENGTH 23
+
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+#ifndef Allow_TYQUAD
+#undef longint
+#define longint long
+#undef ulongint
+#define ulongint unsigned long
+#endif
+
+#ifdef KR_headers
+char *f__icvt(value,ndigit,sign, base) longint value; int *ndigit,*sign;
+ register int base;
+#else
+char *f__icvt(longint value, int *ndigit, int *sign, int base)
+#endif
+{
+ static char buf[MAXINTLENGTH+1];
+ register int i;
+ ulongint uvalue;
+
+ if(value > 0) {
+ uvalue = value;
+ *sign = 0;
+ }
+ else if (value < 0) {
+ uvalue = -value;
+ *sign = 1;
+ }
+ else {
+ *sign = 0;
+ *ndigit = 1;
+ buf[MAXINTLENGTH-1] = '0';
+ return &buf[MAXINTLENGTH-1];
+ }
+ i = MAXINTLENGTH;
+ do {
+ buf[--i] = (uvalue%base) + '0';
+ uvalue /= base;
+ }
+ while(uvalue > 0);
+ *ndigit = MAXINTLENGTH - i;
+ return &buf[i];
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/fp.h b/contrib/libs/libf2c/fp.h
new file mode 100644
index 0000000000..40743d79f7
--- /dev/null
+++ b/contrib/libs/libf2c/fp.h
@@ -0,0 +1,28 @@
+#define FMAX 40
+#define EXPMAXDIGS 8
+#define EXPMAX 99999999
+/* FMAX = max number of nonzero digits passed to atof() */
+/* EXPMAX = 10^EXPMAXDIGS - 1 = largest allowed exponent absolute value */
+
+#ifdef V10 /* Research Tenth-Edition Unix */
+#include "local.h"
+#endif
+
+/* MAXFRACDIGS and MAXINTDIGS are for wrt_F -- bounds (not necessarily
+ tight) on the maximum number of digits to the right and left of
+ * the decimal point.
+ */
+
+#ifdef VAX
+#define MAXFRACDIGS 56
+#define MAXINTDIGS 38
+#else
+#ifdef CRAY
+#define MAXFRACDIGS 9880
+#define MAXINTDIGS 9864
+#else
+/* values that suffice for IEEE double */
+#define MAXFRACDIGS 344
+#define MAXINTDIGS 308
+#endif
+#endif
diff --git a/contrib/libs/libf2c/ftell_.c b/contrib/libs/libf2c/ftell_.c
new file mode 100644
index 0000000000..0acd60fe35
--- /dev/null
+++ b/contrib/libs/libf2c/ftell_.c
@@ -0,0 +1,52 @@
+#include "f2c.h"
+#include "fio.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ static FILE *
+#ifdef KR_headers
+unit_chk(Unit, who) integer Unit; char *who;
+#else
+unit_chk(integer Unit, const char *who)
+#endif
+{
+ if (Unit >= MXUNIT || Unit < 0)
+ f__fatal(101, who);
+ return f__units[Unit].ufd;
+ }
+
+ integer
+#ifdef KR_headers
+ftell_(Unit) integer *Unit;
+#else
+ftell_(integer *Unit)
+#endif
+{
+ FILE *f;
+ return (f = unit_chk(*Unit, "ftell")) ? ftell(f) : -1L;
+ }
+
+ int
+#ifdef KR_headers
+fseek_(Unit, offset, whence) integer *Unit, *offset, *whence;
+#else
+fseek_(integer *Unit, integer *offset, integer *whence)
+#endif
+{
+ FILE *f;
+ int w = (int)*whence;
+#ifdef SEEK_SET
+ static int wohin[3] = { SEEK_SET, SEEK_CUR, SEEK_END };
+#endif
+ if (w < 0 || w > 2)
+ w = 0;
+#ifdef SEEK_SET
+ w = wohin[w];
+#endif
+ return !(f = unit_chk(*Unit, "fseek"))
+ || fseek(f, *offset, w) ? 1 : 0;
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/getarg_.c b/contrib/libs/libf2c/getarg_.c
new file mode 100644
index 0000000000..2b69a1e10c
--- /dev/null
+++ b/contrib/libs/libf2c/getarg_.c
@@ -0,0 +1,36 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * subroutine getarg(k, c)
+ * returns the kth unix command argument in fortran character
+ * variable argument c
+*/
+
+#ifdef KR_headers
+VOID getarg_(n, s, ls) ftnint *n; char *s; ftnlen ls;
+#define Const /*nothing*/
+#else
+#define Const const
+void getarg_(ftnint *n, char *s, ftnlen ls)
+#endif
+{
+ extern int xargc;
+ extern char **xargv;
+ Const char *t;
+ int i;
+
+ if(*n>=0 && *n<xargc)
+ t = xargv[*n];
+ else
+ t = "";
+ for(i = 0; i<ls && *t!='\0' ; ++i)
+ *s++ = *t++;
+ for( ; i<ls ; ++i)
+ *s++ = ' ';
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/getenv_.c b/contrib/libs/libf2c/getenv_.c
new file mode 100644
index 0000000000..b615a37e5e
--- /dev/null
+++ b/contrib/libs/libf2c/getenv_.c
@@ -0,0 +1,62 @@
+#include "f2c.h"
+#undef abs
+#ifdef KR_headers
+extern char *F77_aloc(), *getenv();
+#else
+#include <stdlib.h>
+#include <string.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern char *F77_aloc(ftnlen, const char*);
+#endif
+
+/*
+ * getenv - f77 subroutine to return environment variables
+ *
+ * called by:
+ * call getenv (ENV_NAME, char_var)
+ * where:
+ * ENV_NAME is the name of an environment variable
+ * char_var is a character variable which will receive
+ * the current value of ENV_NAME, or all blanks
+ * if ENV_NAME is not defined
+ */
+
+#ifdef KR_headers
+ VOID
+getenv_(fname, value, flen, vlen) char *value, *fname; ftnlen vlen, flen;
+#else
+ void
+getenv_(char *fname, char *value, ftnlen flen, ftnlen vlen)
+#endif
+{
+ char buf[256], *ep, *fp;
+ integer i;
+
+ if (flen <= 0)
+ goto add_blanks;
+ for(i = 0; i < sizeof(buf); i++) {
+ if (i == flen || (buf[i] = fname[i]) == ' ') {
+ buf[i] = 0;
+ ep = getenv(buf);
+ goto have_ep;
+ }
+ }
+ while(i < flen && fname[i] != ' ')
+ i++;
+ strncpy(fp = F77_aloc(i+1, "getenv_"), fname, (int)i);
+ fp[i] = 0;
+ ep = getenv(fp);
+ free(fp);
+ have_ep:
+ if (ep)
+ while(*ep && vlen-- > 0)
+ *value++ = *ep++;
+ add_blanks:
+ while(vlen-- > 0)
+ *value++ = ' ';
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/h_abs.c b/contrib/libs/libf2c/h_abs.c
new file mode 100644
index 0000000000..db6906869c
--- /dev/null
+++ b/contrib/libs/libf2c/h_abs.c
@@ -0,0 +1,18 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+shortint h_abs(x) shortint *x;
+#else
+shortint h_abs(shortint *x)
+#endif
+{
+if(*x >= 0)
+ return(*x);
+return(- *x);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/h_dim.c b/contrib/libs/libf2c/h_dim.c
new file mode 100644
index 0000000000..443427a9b9
--- /dev/null
+++ b/contrib/libs/libf2c/h_dim.c
@@ -0,0 +1,16 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+shortint h_dim(a,b) shortint *a, *b;
+#else
+shortint h_dim(shortint *a, shortint *b)
+#endif
+{
+return( *a > *b ? *a - *b : 0);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/h_dnnt.c b/contrib/libs/libf2c/h_dnnt.c
new file mode 100644
index 0000000000..1ec641c5aa
--- /dev/null
+++ b/contrib/libs/libf2c/h_dnnt.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double floor();
+shortint h_dnnt(x) doublereal *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+shortint h_dnnt(doublereal *x)
+#endif
+{
+return (shortint)(*x >= 0. ? floor(*x + .5) : -floor(.5 - *x));
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/h_indx.c b/contrib/libs/libf2c/h_indx.c
new file mode 100644
index 0000000000..018f2f4386
--- /dev/null
+++ b/contrib/libs/libf2c/h_indx.c
@@ -0,0 +1,32 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+shortint h_indx(a, b, la, lb) char *a, *b; ftnlen la, lb;
+#else
+shortint h_indx(char *a, char *b, ftnlen la, ftnlen lb)
+#endif
+{
+ftnlen i, n;
+char *s, *t, *bend;
+
+n = la - lb + 1;
+bend = b + lb;
+
+for(i = 0 ; i < n ; ++i)
+ {
+ s = a + i;
+ t = b;
+ while(t < bend)
+ if(*s++ != *t++)
+ goto no;
+ return((shortint)i+1);
+ no: ;
+ }
+return(0);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/h_len.c b/contrib/libs/libf2c/h_len.c
new file mode 100644
index 0000000000..8b0aea99d2
--- /dev/null
+++ b/contrib/libs/libf2c/h_len.c
@@ -0,0 +1,16 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+shortint h_len(s, n) char *s; ftnlen n;
+#else
+shortint h_len(char *s, ftnlen n)
+#endif
+{
+return(n);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/h_mod.c b/contrib/libs/libf2c/h_mod.c
new file mode 100644
index 0000000000..611ef0aa8b
--- /dev/null
+++ b/contrib/libs/libf2c/h_mod.c
@@ -0,0 +1,16 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+shortint h_mod(a,b) short *a, *b;
+#else
+shortint h_mod(short *a, short *b)
+#endif
+{
+return( *a % *b);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/h_nint.c b/contrib/libs/libf2c/h_nint.c
new file mode 100644
index 0000000000..9e2282f2a7
--- /dev/null
+++ b/contrib/libs/libf2c/h_nint.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double floor();
+shortint h_nint(x) real *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+shortint h_nint(real *x)
+#endif
+{
+return (shortint)(*x >= 0 ? floor(*x + .5) : -floor(.5 - *x));
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/h_sign.c b/contrib/libs/libf2c/h_sign.c
new file mode 100644
index 0000000000..4e214380cf
--- /dev/null
+++ b/contrib/libs/libf2c/h_sign.c
@@ -0,0 +1,18 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+shortint h_sign(a,b) shortint *a, *b;
+#else
+shortint h_sign(shortint *a, shortint *b)
+#endif
+{
+shortint x;
+x = (*a >= 0 ? *a : - *a);
+return( *b >= 0 ? x : -x);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/hl_ge.c b/contrib/libs/libf2c/hl_ge.c
new file mode 100644
index 0000000000..8c72f03d48
--- /dev/null
+++ b/contrib/libs/libf2c/hl_ge.c
@@ -0,0 +1,18 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+extern integer s_cmp();
+shortlogical hl_ge(a,b,la,lb) char *a, *b; ftnlen la, lb;
+#else
+extern integer s_cmp(char *, char *, ftnlen, ftnlen);
+shortlogical hl_ge(char *a, char *b, ftnlen la, ftnlen lb)
+#endif
+{
+return(s_cmp(a,b,la,lb) >= 0);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/hl_gt.c b/contrib/libs/libf2c/hl_gt.c
new file mode 100644
index 0000000000..a448522db4
--- /dev/null
+++ b/contrib/libs/libf2c/hl_gt.c
@@ -0,0 +1,18 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+extern integer s_cmp();
+shortlogical hl_gt(a,b,la,lb) char *a, *b; ftnlen la, lb;
+#else
+extern integer s_cmp(char *, char *, ftnlen, ftnlen);
+shortlogical hl_gt(char *a, char *b, ftnlen la, ftnlen lb)
+#endif
+{
+return(s_cmp(a,b,la,lb) > 0);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/hl_le.c b/contrib/libs/libf2c/hl_le.c
new file mode 100644
index 0000000000..31cbc431a3
--- /dev/null
+++ b/contrib/libs/libf2c/hl_le.c
@@ -0,0 +1,18 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+extern integer s_cmp();
+shortlogical hl_le(a,b,la,lb) char *a, *b; ftnlen la, lb;
+#else
+extern integer s_cmp(char *, char *, ftnlen, ftnlen);
+shortlogical hl_le(char *a, char *b, ftnlen la, ftnlen lb)
+#endif
+{
+return(s_cmp(a,b,la,lb) <= 0);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/hl_lt.c b/contrib/libs/libf2c/hl_lt.c
new file mode 100644
index 0000000000..7ad3c714b1
--- /dev/null
+++ b/contrib/libs/libf2c/hl_lt.c
@@ -0,0 +1,18 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+extern integer s_cmp();
+shortlogical hl_lt(a,b,la,lb) char *a, *b; ftnlen la, lb;
+#else
+extern integer s_cmp(char *, char *, ftnlen, ftnlen);
+shortlogical hl_lt(char *a, char *b, ftnlen la, ftnlen lb)
+#endif
+{
+return(s_cmp(a,b,la,lb) < 0);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/i77vers.c b/contrib/libs/libf2c/i77vers.c
new file mode 100644
index 0000000000..60cc24eec1
--- /dev/null
+++ b/contrib/libs/libf2c/i77vers.c
@@ -0,0 +1,343 @@
+ char
+_libi77_version_f2c[] = "\n@(#) LIBI77 VERSION (f2c) pjw,dmg-mods 20030321\n";
+
+/*
+2.01 $ format added
+2.02 Coding bug in open.c repaired
+2.03 fixed bugs in lread.c (read * with negative f-format) and lio.c
+ and lio.h (e-format conforming to spec)
+2.04 changed open.c and err.c (fopen and freopen respectively) to
+ update to new c-library (append mode)
+2.05 added namelist capability
+2.06 allow internal list and namelist I/O
+*/
+
+/*
+close.c:
+ allow upper-case STATUS= values
+endfile.c
+ create fort.nnn if unit nnn not open;
+ else if (file length == 0) use creat() rather than copy;
+ use local copy() rather than forking /bin/cp;
+ rewind, fseek to clear buffer (for no reading past EOF)
+err.c
+ use neither setbuf nor setvbuf; make stderr buffered
+fio.h
+ #define _bufend
+inquire.c
+ upper case responses;
+ omit byfile test from SEQUENTIAL=
+ answer "YES" to DIRECT= for unopened file (open to debate)
+lio.c
+ flush stderr, stdout at end of each stmt
+ space before character strings in list output only at line start
+lio.h
+ adjust LEW, LED consistent with old libI77
+lread.c
+ use atof()
+ allow "nnn*," when reading complex constants
+open.c
+ try opening for writing when open for read fails, with
+ special uwrt value (2) delaying creat() to first write;
+ set curunit so error messages don't drop core;
+ no file name ==> fort.nnn except for STATUS='SCRATCH'
+rdfmt.c
+ use atof(); trust EOF == end-of-file (so don't read past
+ end-of-file after endfile stmt)
+sfe.c
+ flush stderr, stdout at end of each stmt
+wrtfmt.c:
+ use upper case
+ put wrt_E and wrt_F into wref.c, use sprintf()
+ rather than ecvt() and fcvt() [more accurate on VAX]
+*/
+
+/* 16 Oct. 1988: uwrt = 3 after write, rewind, so close won't zap the file. */
+
+/* 10 July 1989: change _bufend to buf_end in fio.h, wsfe.c, wrtfmt.c */
+
+/* 28 Nov. 1989: corrections for IEEE and Cray arithmetic */
+/* 29 Nov. 1989: change various int return types to long for f2c */
+/* 30 Nov. 1989: various types from f2c.h */
+/* 6 Dec. 1989: types corrected various places */
+/* 19 Dec. 1989: make iostat= work right for internal I/O */
+/* 8 Jan. 1990: add rsne, wsne -- routines for handling NAMELIST */
+/* 28 Jan. 1990: have NAMELIST read treat $ as &, general white
+ space as blank */
+/* 27 Mar. 1990: change an = to == in rd_L(rdfmt.c) so formatted reads
+ of logical values reject letters other than fFtT;
+ have nowwriting reset cf */
+/* 14 Aug. 1990: adjust lread.c to treat tabs as spaces in list input */
+/* 17 Aug. 1990: adjust open.c to recognize blank='Z...' as well as
+ blank='z...' when reopening an open file */
+/* 30 Aug. 1990: prevent embedded blanks in list output of complex values;
+ omit exponent field in list output of values of
+ magnitude between 10 and 1e8; prevent writing stdin
+ and reading stdout or stderr; don't close stdin, stdout,
+ or stderr when reopening units 5, 6, 0. */
+/* 18 Sep. 1990: add component udev to unit and consider old == new file
+ iff uinode and udev values agree; use stat rather than
+ access to check existence of file (when STATUS='OLD')*/
+/* 2 Oct. 1990: adjust rewind.c so two successive rewinds after a write
+ don't clobber the file. */
+/* 9 Oct. 1990: add #include "fcntl.h" to endfile.c, err.c, open.c;
+ adjust g_char in util.c for segmented memories. */
+/* 17 Oct. 1990: replace abort() and _cleanup() with calls on
+ sig_die(...,1) (defined in main.c). */
+/* 5 Nov. 1990: changes to open.c: complain if new= is specified and the
+ file already exists; allow file= to be omitted in open stmts
+ and allow status='replace' (Fortran 90 extensions). */
+/* 11 Dec. 1990: adjustments for POSIX. */
+/* 15 Jan. 1991: tweak i_ungetc in rsli.c to allow reading from
+ strings in read-only memory. */
+/* 25 Apr. 1991: adjust namelist stuff to work with f2c -i2 */
+/* 26 Apr. 1991: fix some bugs with NAMELIST read of multi-dim. arrays */
+/* 16 May 1991: increase LEFBL in lio.h to bypass NeXT bug */
+/* 17 Oct. 1991: change type of length field in sequential unformatted
+ records from int to long (for systems where sizeof(int)
+ can vary, depending on the compiler or compiler options). */
+/* 14 Nov. 1991: change uint to Uint in fmt.h, rdfmt.c, wrtfmt.c. */
+/* 25 Nov. 1991: change uint to Uint in lwrite.c; change sizeof(int) to
+ sizeof(uioint) in fseeks in sue.c (missed on 17 Oct.). */
+/* 1 Dec. 1991: uio.c: add test for read failure (seq. unformatted reads);
+ adjust an error return from EOF to off end of record */
+/* 12 Dec. 1991: rsli.c: fix bug with internal list input that caused
+ the last character of each record to be ignored.
+ iio.c: adjust error message in internal formatted
+ input from "end-of-file" to "off end of record" if
+ the format specifies more characters than the
+ record contains. */
+/* 17 Jan. 1992: lread.c, rsne.c: in list and namelist input,
+ treat "r* ," and "r*," alike (where r is a
+ positive integer constant), and fix a bug in
+ handling null values following items with repeat
+ counts (e.g., 2*1,,3); for namelist reading
+ of a numeric array, allow a new name-value subsequence
+ to terminate the current one (as though the current
+ one ended with the right number of null values).
+ lio.h, lwrite.c: omit insignificant zeros in
+ list and namelist output. To get the old
+ behavior, compile with -DOld_list_output . */
+/* 18 Jan. 1992: make list output consistent with F format by
+ printing .1 rather than 0.1 (introduced yesterday). */
+/* 3 Feb. 1992: rsne.c: fix namelist read bug that caused the
+ character following a comma to be ignored. */
+/* 19 May 1992: adjust iio.c, ilnw.c, rdfmt.c and rsli.c to make err=
+ work with internal list and formatted I/O. */
+/* 18 July 1992: adjust rsne.c to allow namelist input to stop at
+ an & (e.g. &end). */
+/* 23 July 1992: switch to ANSI prototypes unless KR_headers is #defined ;
+ recognize Z format (assuming 8-bit bytes). */
+/* 14 Aug. 1992: tweak wrt_E in wref.c to avoid -NaN */
+/* 23 Oct. 1992: Supply missing l_eof = 0 assignment to s_rsne() in rsne.c
+ (so end-of-file on other files won't confuse namelist
+ reads of external files). Prepend f__ to external
+ names that are only of internal interest to lib[FI]77. */
+/* 1 Feb. 1993: backspace.c: fix bug that bit when last char of 2nd
+ buffer == '\n'.
+ endfile.c: guard against tiny L_tmpnam; close and reopen
+ files in t_runc().
+ lio.h: lengthen LINTW (buffer size in lwrite.c).
+ err.c, open.c: more prepending of f__ (to [rw]_mode). */
+/* 5 Feb. 1993: tweaks to NAMELIST: rsne.c: ? prints the namelist being
+ sought; namelists of the wrong name are skipped (after
+ an error message; xwsne.c: namelist writes have a
+ newline before each new variable.
+ open.c: ACCESS='APPEND' positions sequential files
+ at EOF (nonstandard extension -- that doesn't require
+ changing data structures). */
+/* 9 Feb. 1993: Change some #ifdef MSDOS lines to #ifdef NON_UNIX_STDIO.
+ err.c: under NON_UNIX_STDIO, avoid close(creat(name,0666))
+ when the unit has another file descriptor for name. */
+/* 4 March 1993: err.c, open.c: take declaration of fdopen from rawio.h;
+ open.c: always give f__w_mode[] 4 elements for use
+ in t_runc (in endfile.c -- for change of 1 Feb. 1993). */
+/* 6 March 1993: uio.c: adjust off-end-of-record test for sequential
+ unformatted reads to respond to err= rather than end=. */
+/* 12 March 1993: various tweaks for C++ */
+/* 6 April 1993: adjust error returns for formatted inputs to flush
+ the current input line when err=label is specified.
+ To restore the old behavior (input left mid-line),
+ either adjust the #definition of errfl in fio.h or
+ omit the invocation of f__doend in err__fl (in err.c). */
+/* 23 June 1993: iio.c: fix bug in format reversions for internal writes. */
+/* 5 Aug. 1993: lread.c: fix bug in handling repetition counts for
+ logical data (during list or namelist input).
+ Change struct f__syl to struct syl (for buggy compilers). */
+/* 7 Aug. 1993: lread.c: fix bug in namelist reading of incomplete
+ logical arrays. */
+/* 9 Aug. 1993: lread.c: fix bug in namelist reading of an incomplete
+ array of numeric data followed by another namelist
+ item whose name starts with 'd', 'D', 'e', or 'E'. */
+/* 8 Sept. 1993: open.c: protect #include "sys/..." with
+ #ifndef NON_UNIX_STDIO; Version date not changed. */
+/* 10 Nov. 1993: backspace.c: add nonsense for #ifdef MSDOS */
+/* 8 Dec. 1993: iio.c: adjust internal formatted reads to treat
+ short records as though padded with blanks
+ (rather than causing an "off end of record" error). */
+/* 22 Feb. 1994: lread.c: check that realloc did not return NULL. */
+/* 6 June 1994: Under NON_UNIX_STDIO, use binary mode for direct
+ formatted files (avoiding any confusion regarding \n). */
+/* 5 July 1994: Fix bug (introduced 6 June 1994?) in reopening files
+ under NON_UNIX_STDIO. */
+/* 6 July 1994: wref.c: protect with #ifdef GOOD_SPRINTF_EXPONENT an
+ optimization that requires exponents to have 2 digits
+ when 2 digits suffice.
+ lwrite.c wsfe.c (list and formatted external output):
+ omit ' ' carriage-control when compiled with
+ -DOMIT_BLANK_CC . Off-by-one bug fixed in character
+ count for list output of character strings.
+ Omit '.' in list-directed printing of Nan, Infinity. */
+/* 12 July 1994: wrtfmt.c: under G11.4, write 0. as " .0000 " rather
+ than " .0000E+00". */
+/* 3 Aug. 1994: lwrite.c: do not insert a newline when appending an
+ oversize item to an empty line. */
+/* 12 Aug. 1994: rsli.c rsne.c: fix glitch (reset nml_read) that kept
+ ERR= (in list- or format-directed input) from working
+ after a NAMELIST READ. */
+/* 7 Sept. 1994: typesize.c: adjust to allow types LOGICAL*1, LOGICAL*2,
+ INTEGER*1, and (under -DAllow_TYQUAD) INTEGER*8
+ in NAMELISTs. */
+/* 6 Oct. 1994: util.c: omit f__mvgbt, as it is never used. */
+/* 2 Nov. 1994: add #ifdef ALWAYS_FLUSH logic. */
+/* 26 Jan. 1995: wref.c: fix glitch in printing the exponent of 0 when
+ GOOD_SPRINTF_EXPONENT is not #defined. */
+/* 24 Feb. 1995: iio.c: z_getc: insert (unsigned char *) to allow
+ internal reading of characters with high-bit set
+ (on machines that sign-extend characters). */
+/* 14 March 1995:lread.c and rsfe.c: adjust s_rsle and s_rsfe to
+ check for end-of-file (to prevent infinite loops
+ with empty read statements). */
+/* 26 May 1995: iio.c: z_wnew: fix bug in handling T format items
+ in internal writes whose last item is written to
+ an earlier position than some previous item. */
+/* 29 Aug. 1995: backspace.c: adjust MSDOS logic. */
+/* 6 Sept. 1995: Adjust namelist input to treat a subscripted name
+ whose subscripts do not involve colons similarly
+ to the name without a subscript: accept several
+ values, stored in successive elements starting at
+ the indicated subscript. Adjust namelist output
+ to quote character strings (avoiding confusion with
+ arrays of character strings). Adjust f_init calls
+ for people who don't use libF77's main(); now open and
+ namelist read statements invoke f_init if needed. */
+/* 7 Sept. 1995: Fix some bugs with -DAllow_TYQUAD (for integer*8).
+ Add -DNo_Namelist_Comments lines to rsne.c. */
+/* 5 Oct. 1995: wrtfmt.c: fix bug with t editing (f__cursor was not
+ always zeroed in mv_cur). */
+/* 11 Oct. 1995: move defs of f__hiwater, f__svic, f__icptr from wrtfmt.c
+ to err.c */
+/* 15 Mar. 1996: lread.c, rsfe.c: honor END= in READ stmt with empty iolist */
+
+/* 13 May 1996: add ftell_.c and fseek_.c */
+/* 9 June 1996: Adjust rsli.c and lread.c so internal list input with
+ too few items in the input string will honor end= . */
+/* 12 Sept. 1995:fmtlib.c: fix glitch in printing the most negative integer. */
+/* 25 Sept. 1995:fmt.h: for formatted writes of negative integer*1 values,
+ make ic signed on ANSI systems. If formatted writes of
+ integer*1 values trouble you when using a K&R C compiler,
+ switch to an ANSI compiler or use a compiler flag that
+ makes characters signed. */
+/* 9 Dec. 1996: d[fu]e.c, err.c: complain about non-positive rec=
+ in direct read and write statements.
+ ftell_.c: change param "unit" to "Unit" for -DKR_headers. */
+/* 26 Feb. 1997: ftell_.c: on systems that define SEEK_SET, etc., use
+ SEEK_SET, SEEK_CUR, SEEK_END for *whence = 0, 1, 2. */
+/* 7 Apr. 1997: fmt.c: adjust to complain at missing numbers in formats
+ (but still treat missing ".nnn" as ".0"). */
+/* 11 Apr. 1997: err.c: attempt to make stderr line buffered rather
+ than fully buffered. (Buffering is needed for format
+ items T and TR.) */
+/* 27 May 1997: ftell_.c: fix typo (that caused the third argument to be
+ treated as 2 on some systems). */
+/* 5 Aug. 1997: lread.c: adjust to accord with a change to the Fortran 8X
+ draft (in 1990 or 1991) that rescinded permission to elide
+ quote marks in namelist input of character data; compile
+ with -DF8X_NML_ELIDE_QUOTES to get the old behavior.
+ wrtfmt.o: wrt_G: tweak to print the right number of 0's
+ for zero under G format. */
+/* 16 Aug. 1997: iio.c: fix bug in internal writes to an array of character
+ strings that sometimes caused one more array element than
+ required by the format to be blank-filled. Example:
+ format(1x). */
+/* 16 Sept. 1997:fmt.[ch] rdfmt.c wrtfmt.c: tweak struct syl for machines
+ with 64-bit pointers and 32-bit ints that did not 64-bit
+ align struct syl (e.g., Linux on the DEC Alpha). */
+/* 19 Jan. 1998: backspace.c: for b->ufmt==0, change sizeof(int) to
+ sizeof(uiolen). On machines where this would make a
+ difference, it is best for portability to compile libI77 with
+ -DUIOLEN_int (which will render the change invisible). */
+/* 4 March 1998: open.c: fix glitch in comparing file names under
+ -DNON_UNIX_STDIO */
+/* 17 March 1998: endfile.c, open.c: acquire temporary files from tmpfile(),
+ unless compiled with -DNON_ANSI_STDIO, which uses mktemp().
+ New buffering scheme independent of NON_UNIX_STDIO for
+ handling T format items. Now -DNON_UNIX_STDIO is no
+ longer be necessary for Linux, and libf2c no longer
+ causes stderr to be buffered -- the former setbuf or
+ setvbuf call for stderr was to make T format items work.
+ open.c: use the Posix access() function to check existence
+ or nonexistence of files, except under -DNON_POSIX_STDIO,
+ where trial fopen calls are used. */
+/* 5 April 1998: wsfe.c: make $ format item work: this was lost in the
+ changes of 17 March 1998. */
+/* 28 May 1998: backspace.c dfe.c due.c iio.c lread.c rsfe.c sue.c wsfe.c:
+ set f__curunit sooner so various error messages will
+ correctly identify the I/O unit involved. */
+/* 17 June 1998: lread.c: unless compiled with
+ ALLOW_FLOAT_IN_INTEGER_LIST_INPUT #defined, treat
+ floating-point numbers (containing either a decimal point
+ or an exponent field) as errors when they appear as list
+ input for integer data. */
+/* 7 Sept. 1998: move e_wdfe from sfe.c to dfe.c, where it was originally.
+ Why did it ever move to sfe.c? */
+/* 2 May 1999: open.c: set f__external (to get "external" versus "internal"
+ right in the error message if we cannot open the file).
+ err.c: cast a pointer difference to (int) for %d.
+ rdfmt.c: omit fixed-length buffer that could be overwritten
+ by formats Inn or Lnn with nn > 83. */
+/* 3 May 1999: open.c: insert two casts for machines with 64-bit longs. */
+/* 18 June 1999: backspace.c: allow for b->ufd changing in t_runc */
+/* 27 June 1999: rsne.c: fix bug in namelist input: a misplaced increment */
+/* could cause wrong array elements to be assigned; e.g., */
+/* "&input k(5)=10*1 &end" assigned k(5) and k(15..23) */
+/* 15 Nov. 1999: endfile.c: set state to writing (b->uwrt = 1) when an */
+/* endfile statement requires copying the file. */
+/* (Otherwise an immediately following rewind statement */
+/* could make the file appear empty.) Also, supply a */
+/* missing (long) cast in the sprintf call. */
+/* sfe.c: add #ifdef ALWAYS_FLUSH logic, for formatted I/O: */
+/* Compiling libf2c with -DALWAYS_FLUSH should prevent losing */
+/* any data in buffers should the program fault. It also */
+/* makes the program run more slowly. */
+/* 20 April 2000: rsne.c, xwsne.c: tweaks that only matter if ftnint and */
+/* ftnlen are of different fundamental types (different numbers */
+/* of bits). Since these files will not compile when this */
+/* change matters, the above VERSION string remains unchanged. */
+/* 4 July 2000: adjustments to permit compilation by C++ compilers; */
+/* VERSION string remains unchanged. */
+/* 5 Dec. 2000: lread.c: under namelist input, when reading a logical array, */
+/* treat Tstuff= and Fstuff= as new assignments rather than as */
+/* logical constants. */
+/* 22 Feb. 2001: endfile.c: adjust to use truncate() unless compiled with */
+/* -DNO_TRUNCATE (or with -DMSDOS). */
+/* 1 March 2001: endfile.c: switch to ftruncate (absent -DNO_TRUNCATE), */
+/* thus permitting truncation of scratch files on true Unix */
+/* systems, where scratch files have no name. Add an fflush() */
+/* (surprisingly) needed on some Linux systems. */
+/* 11 Oct. 2001: backspac.c dfe.c due.c endfile.c err.c fio.h fmt.c fmt.h */
+/* inquire.c open.c rdfmt.c sue.c util.c: change fseek and */
+/* ftell to FSEEK and FTELL (#defined to be fseek and ftell, */
+/* respectively, in fio.h unless otherwise #defined), and use */
+/* type OFF_T (#defined to be long unless otherwise #defined) */
+/* to permit handling files over 2GB long where possible, */
+/* with suitable -D options, provided for some systems in new */
+/* header file sysdep1.h (copied from sysdep1.h0 by default). */
+/* 15 Nov. 2001: endfile.c: add FSEEK after FTRUNCATE. */
+/* 28 Nov. 2001: fmt.h lwrite.c wref.c and (new) signbit.c: on IEEE systems, */
+/* print -0 as -0 when compiled with -DSIGNED_ZEROS. See */
+/* comments in makefile or (better) libf2c/makefile.* . */
+/* 6 Sept. 2002: rsne.c: fix bug with multiple repeat counts in reading */
+/* namelists, e.g., &nl a(2) = 3*1.0, 2*2.0, 3*3.0 / */
+/* 21 March 2003: err.c: before writing to a file after reading from it, */
+/* f_seek(file, 0, SEEK_CUR) to make writing legal in ANSI C. */
diff --git a/contrib/libs/libf2c/i_abs.c b/contrib/libs/libf2c/i_abs.c
new file mode 100644
index 0000000000..2b92c4aa78
--- /dev/null
+++ b/contrib/libs/libf2c/i_abs.c
@@ -0,0 +1,18 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+integer i_abs(x) integer *x;
+#else
+integer i_abs(integer *x)
+#endif
+{
+if(*x >= 0)
+ return(*x);
+return(- *x);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/i_ceiling.c b/contrib/libs/libf2c/i_ceiling.c
new file mode 100644
index 0000000000..f708a8b76e
--- /dev/null
+++ b/contrib/libs/libf2c/i_ceiling.c
@@ -0,0 +1,36 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+integer i_sceiling(x) real *x;
+#else
+#ifdef __cplusplus
+extern "C" {
+#endif
+integer i_sceiling(real *x)
+#endif
+{
+#define CEIL(x) ((int)(x) + ((x) > 0 && (x) != (int)(x)))
+
+ return (integer) CEIL(*x);
+}
+#ifdef __cplusplus
+}
+#endif
+
+
+#ifdef KR_headers
+integer i_dceiling(x) doublereal *x;
+#else
+#ifdef __cplusplus
+extern "C" {
+#endif
+integer i_dceiling(doublereal *x)
+#endif
+{
+#define CEIL(x) ((int)(x) + ((x) > 0 && (x) != (int)(x)))
+
+ return (integer) CEIL(*x);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/i_dim.c b/contrib/libs/libf2c/i_dim.c
new file mode 100644
index 0000000000..60ed4d8c5b
--- /dev/null
+++ b/contrib/libs/libf2c/i_dim.c
@@ -0,0 +1,16 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+integer i_dim(a,b) integer *a, *b;
+#else
+integer i_dim(integer *a, integer *b)
+#endif
+{
+return( *a > *b ? *a - *b : 0);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/i_dnnt.c b/contrib/libs/libf2c/i_dnnt.c
new file mode 100644
index 0000000000..3abc2dc4a5
--- /dev/null
+++ b/contrib/libs/libf2c/i_dnnt.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double floor();
+integer i_dnnt(x) doublereal *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+integer i_dnnt(doublereal *x)
+#endif
+{
+return (integer)(*x >= 0. ? floor(*x + .5) : -floor(.5 - *x));
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/i_indx.c b/contrib/libs/libf2c/i_indx.c
new file mode 100644
index 0000000000..19256393ec
--- /dev/null
+++ b/contrib/libs/libf2c/i_indx.c
@@ -0,0 +1,32 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+integer i_indx(a, b, la, lb) char *a, *b; ftnlen la, lb;
+#else
+integer i_indx(char *a, char *b, ftnlen la, ftnlen lb)
+#endif
+{
+ftnlen i, n;
+char *s, *t, *bend;
+
+n = la - lb + 1;
+bend = b + lb;
+
+for(i = 0 ; i < n ; ++i)
+ {
+ s = a + i;
+ t = b;
+ while(t < bend)
+ if(*s++ != *t++)
+ goto no;
+ return(i+1);
+ no: ;
+ }
+return(0);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/i_len.c b/contrib/libs/libf2c/i_len.c
new file mode 100644
index 0000000000..0f7b188d65
--- /dev/null
+++ b/contrib/libs/libf2c/i_len.c
@@ -0,0 +1,16 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+integer i_len(s, n) char *s; ftnlen n;
+#else
+integer i_len(char *s, ftnlen n)
+#endif
+{
+return(n);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/i_len_trim.c b/contrib/libs/libf2c/i_len_trim.c
new file mode 100644
index 0000000000..c7b7680304
--- /dev/null
+++ b/contrib/libs/libf2c/i_len_trim.c
@@ -0,0 +1,22 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+integer i_len_trim(s, n) char *s; ftnlen n;
+#else
+integer i_len_trim(char *s, ftnlen n)
+#endif
+{
+ int i;
+
+ for(i=n-1;i>=0;i--)
+ if(s[i] != ' ')
+ return i + 1;
+
+ return(0);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/i_mod.c b/contrib/libs/libf2c/i_mod.c
new file mode 100644
index 0000000000..4a9b5609ba
--- /dev/null
+++ b/contrib/libs/libf2c/i_mod.c
@@ -0,0 +1,16 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+integer i_mod(a,b) integer *a, *b;
+#else
+integer i_mod(integer *a, integer *b)
+#endif
+{
+return( *a % *b);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/i_nint.c b/contrib/libs/libf2c/i_nint.c
new file mode 100644
index 0000000000..fe9fd68a86
--- /dev/null
+++ b/contrib/libs/libf2c/i_nint.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double floor();
+integer i_nint(x) real *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+integer i_nint(real *x)
+#endif
+{
+return (integer)(*x >= 0 ? floor(*x + .5) : -floor(.5 - *x));
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/i_sign.c b/contrib/libs/libf2c/i_sign.c
new file mode 100644
index 0000000000..4c20e9494e
--- /dev/null
+++ b/contrib/libs/libf2c/i_sign.c
@@ -0,0 +1,18 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+integer i_sign(a,b) integer *a, *b;
+#else
+integer i_sign(integer *a, integer *b)
+#endif
+{
+integer x;
+x = (*a >= 0 ? *a : - *a);
+return( *b >= 0 ? x : -x);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/iargc_.c b/contrib/libs/libf2c/iargc_.c
new file mode 100644
index 0000000000..2f29da0eaa
--- /dev/null
+++ b/contrib/libs/libf2c/iargc_.c
@@ -0,0 +1,17 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+ftnint iargc_()
+#else
+ftnint iargc_(void)
+#endif
+{
+extern int xargc;
+return ( xargc - 1 );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/iio.c b/contrib/libs/libf2c/iio.c
new file mode 100644
index 0000000000..8553efcf97
--- /dev/null
+++ b/contrib/libs/libf2c/iio.c
@@ -0,0 +1,159 @@
+#include "f2c.h"
+#include "fio.h"
+#include "fmt.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern char *f__icptr;
+char *f__icend;
+extern icilist *f__svic;
+int f__icnum;
+
+ int
+z_getc(Void)
+{
+ if(f__recpos++ < f__svic->icirlen) {
+ if(f__icptr >= f__icend) err(f__svic->iciend,(EOF),"endfile");
+ return(*(unsigned char *)f__icptr++);
+ }
+ return '\n';
+}
+
+ void
+#ifdef KR_headers
+z_putc(c)
+#else
+z_putc(int c)
+#endif
+{
+ if (f__icptr < f__icend && f__recpos++ < f__svic->icirlen)
+ *f__icptr++ = c;
+}
+
+ int
+z_rnew(Void)
+{
+ f__icptr = f__svic->iciunit + (++f__icnum)*f__svic->icirlen;
+ f__recpos = 0;
+ f__cursor = 0;
+ f__hiwater = 0;
+ return 1;
+}
+
+ static int
+z_endp(Void)
+{
+ (*f__donewrec)();
+ return 0;
+ }
+
+ int
+#ifdef KR_headers
+c_si(a) icilist *a;
+#else
+c_si(icilist *a)
+#endif
+{
+ f__elist = (cilist *)a;
+ f__fmtbuf=a->icifmt;
+ f__curunit = 0;
+ f__sequential=f__formatted=1;
+ f__external=0;
+ if(pars_f(f__fmtbuf)<0)
+ err(a->icierr,100,"startint");
+ fmt_bg();
+ f__cblank=f__cplus=f__scale=0;
+ f__svic=a;
+ f__icnum=f__recpos=0;
+ f__cursor = 0;
+ f__hiwater = 0;
+ f__icptr = a->iciunit;
+ f__icend = f__icptr + a->icirlen*a->icirnum;
+ f__cf = 0;
+ return(0);
+}
+
+ int
+iw_rev(Void)
+{
+ if(f__workdone)
+ z_endp();
+ f__hiwater = f__recpos = f__cursor = 0;
+ return(f__workdone=0);
+ }
+
+#ifdef KR_headers
+integer s_rsfi(a) icilist *a;
+#else
+integer s_rsfi(icilist *a)
+#endif
+{ int n;
+ if(n=c_si(a)) return(n);
+ f__reading=1;
+ f__doed=rd_ed;
+ f__doned=rd_ned;
+ f__getn=z_getc;
+ f__dorevert = z_endp;
+ f__donewrec = z_rnew;
+ f__doend = z_endp;
+ return(0);
+}
+
+ int
+z_wnew(Void)
+{
+ if (f__recpos < f__hiwater) {
+ f__icptr += f__hiwater - f__recpos;
+ f__recpos = f__hiwater;
+ }
+ while(f__recpos++ < f__svic->icirlen)
+ *f__icptr++ = ' ';
+ f__recpos = 0;
+ f__cursor = 0;
+ f__hiwater = 0;
+ f__icnum++;
+ return 1;
+}
+#ifdef KR_headers
+integer s_wsfi(a) icilist *a;
+#else
+integer s_wsfi(icilist *a)
+#endif
+{ int n;
+ if(n=c_si(a)) return(n);
+ f__reading=0;
+ f__doed=w_ed;
+ f__doned=w_ned;
+ f__putn=z_putc;
+ f__dorevert = iw_rev;
+ f__donewrec = z_wnew;
+ f__doend = z_endp;
+ return(0);
+}
+integer e_rsfi(Void)
+{ int n = en_fio();
+ f__fmtbuf = NULL;
+ return(n);
+}
+integer e_wsfi(Void)
+{
+ int n;
+ n = en_fio();
+ f__fmtbuf = NULL;
+ if(f__svic->icirnum != 1
+ && (f__icnum > f__svic->icirnum
+ || (f__icnum == f__svic->icirnum && (f__recpos | f__hiwater))))
+ err(f__svic->icierr,110,"inwrite");
+ if (f__recpos < f__hiwater)
+ f__recpos = f__hiwater;
+ if (f__recpos >= f__svic->icirlen)
+ err(f__svic->icierr,110,"recend");
+ if (!f__recpos && f__icnum)
+ return n;
+ while(f__recpos++ < f__svic->icirlen)
+ *f__icptr++ = ' ';
+ return n;
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/ilnw.c b/contrib/libs/libf2c/ilnw.c
new file mode 100644
index 0000000000..e8b3d49cf1
--- /dev/null
+++ b/contrib/libs/libf2c/ilnw.c
@@ -0,0 +1,83 @@
+#include "f2c.h"
+#include "fio.h"
+#include "lio.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern char *f__icptr;
+extern char *f__icend;
+extern icilist *f__svic;
+extern int f__icnum;
+#ifdef KR_headers
+extern void z_putc();
+#else
+extern void z_putc(int);
+#endif
+
+ static int
+z_wSL(Void)
+{
+ while(f__recpos < f__svic->icirlen)
+ z_putc(' ');
+ return z_rnew();
+ }
+
+ static void
+#ifdef KR_headers
+c_liw(a) icilist *a;
+#else
+c_liw(icilist *a)
+#endif
+{
+ f__reading = 0;
+ f__external = 0;
+ f__formatted = 1;
+ f__putn = z_putc;
+ L_len = a->icirlen;
+ f__donewrec = z_wSL;
+ f__svic = a;
+ f__icnum = f__recpos = 0;
+ f__cursor = 0;
+ f__cf = 0;
+ f__curunit = 0;
+ f__icptr = a->iciunit;
+ f__icend = f__icptr + a->icirlen*a->icirnum;
+ f__elist = (cilist *)a;
+ }
+
+ integer
+#ifdef KR_headers
+s_wsni(a) icilist *a;
+#else
+s_wsni(icilist *a)
+#endif
+{
+ cilist ca;
+
+ c_liw(a);
+ ca.cifmt = a->icifmt;
+ x_wsne(&ca);
+ z_wSL();
+ return 0;
+ }
+
+ integer
+#ifdef KR_headers
+s_wsli(a) icilist *a;
+#else
+s_wsli(icilist *a)
+#endif
+{
+ f__lioproc = l_write;
+ c_liw(a);
+ return(0);
+ }
+
+integer e_wsli(Void)
+{
+ z_wSL();
+ return(0);
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/inquire.c b/contrib/libs/libf2c/inquire.c
new file mode 100644
index 0000000000..5936a674cc
--- /dev/null
+++ b/contrib/libs/libf2c/inquire.c
@@ -0,0 +1,117 @@
+#include "f2c.h"
+#include "fio.h"
+#include "string.h"
+#ifdef NON_UNIX_STDIO
+#ifndef MSDOS
+#include "unistd.h" /* for access() */
+#endif
+#endif
+#ifdef KR_headers
+integer f_inqu(a) inlist *a;
+#else
+#ifdef __cplusplus
+extern "C" integer f_inqu(inlist*);
+#endif
+#ifdef MSDOS
+#undef abs
+#undef min
+#undef max
+#include "io.h"
+#endif
+integer f_inqu(inlist *a)
+#endif
+{ flag byfile;
+ int i;
+#ifndef NON_UNIX_STDIO
+ int n;
+#endif
+ unit *p;
+ char buf[256];
+ long x;
+ if(a->infile!=NULL)
+ { byfile=1;
+ g_char(a->infile,a->infilen,buf);
+#ifdef NON_UNIX_STDIO
+ x = access(buf,0) ? -1 : 0;
+ for(i=0,p=NULL;i<MXUNIT;i++)
+ if(f__units[i].ufd != NULL
+ && f__units[i].ufnm != NULL
+ && !strcmp(f__units[i].ufnm,buf)) {
+ p = &f__units[i];
+ break;
+ }
+#else
+ x=f__inode(buf, &n);
+ for(i=0,p=NULL;i<MXUNIT;i++)
+ if(f__units[i].uinode==x
+ && f__units[i].ufd!=NULL
+ && f__units[i].udev == n) {
+ p = &f__units[i];
+ break;
+ }
+#endif
+ }
+ else
+ {
+ byfile=0;
+ if(a->inunit<MXUNIT && a->inunit>=0)
+ {
+ p= &f__units[a->inunit];
+ }
+ else
+ {
+ p=NULL;
+ }
+ }
+ if(a->inex!=NULL)
+ if(byfile && x != -1 || !byfile && p!=NULL)
+ *a->inex=1;
+ else *a->inex=0;
+ if(a->inopen!=NULL)
+ if(byfile) *a->inopen=(p!=NULL);
+ else *a->inopen=(p!=NULL && p->ufd!=NULL);
+ if(a->innum!=NULL) *a->innum= p-f__units;
+ if(a->innamed!=NULL)
+ if(byfile || p!=NULL && p->ufnm!=NULL)
+ *a->innamed=1;
+ else *a->innamed=0;
+ if(a->inname!=NULL)
+ if(byfile)
+ b_char(buf,a->inname,a->innamlen);
+ else if(p!=NULL && p->ufnm!=NULL)
+ b_char(p->ufnm,a->inname,a->innamlen);
+ if(a->inacc!=NULL && p!=NULL && p->ufd!=NULL)
+ if(p->url)
+ b_char("DIRECT",a->inacc,a->inacclen);
+ else b_char("SEQUENTIAL",a->inacc,a->inacclen);
+ if(a->inseq!=NULL)
+ if(p!=NULL && p->url)
+ b_char("NO",a->inseq,a->inseqlen);
+ else b_char("YES",a->inseq,a->inseqlen);
+ if(a->indir!=NULL)
+ if(p==NULL || p->url)
+ b_char("YES",a->indir,a->indirlen);
+ else b_char("NO",a->indir,a->indirlen);
+ if(a->infmt!=NULL)
+ if(p!=NULL && p->ufmt==0)
+ b_char("UNFORMATTED",a->infmt,a->infmtlen);
+ else b_char("FORMATTED",a->infmt,a->infmtlen);
+ if(a->inform!=NULL)
+ if(p!=NULL && p->ufmt==0)
+ b_char("NO",a->inform,a->informlen);
+ else b_char("YES",a->inform,a->informlen);
+ if(a->inunf)
+ if(p!=NULL && p->ufmt==0)
+ b_char("YES",a->inunf,a->inunflen);
+ else if (p!=NULL) b_char("NO",a->inunf,a->inunflen);
+ else b_char("UNKNOWN",a->inunf,a->inunflen);
+ if(a->inrecl!=NULL && p!=NULL)
+ *a->inrecl=p->url;
+ if(a->innrec!=NULL && p!=NULL && p->url>0)
+ *a->innrec=(ftnint)(FTELL(p->ufd)/p->url+1);
+ if(a->inblank && p!=NULL && p->ufmt)
+ if(p->ublnk)
+ b_char("ZERO",a->inblank,a->inblanklen);
+ else b_char("NULL",a->inblank,a->inblanklen);
+ return(0);
+}
diff --git a/contrib/libs/libf2c/l_ge.c b/contrib/libs/libf2c/l_ge.c
new file mode 100644
index 0000000000..a84f0ee4ab
--- /dev/null
+++ b/contrib/libs/libf2c/l_ge.c
@@ -0,0 +1,18 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+extern integer s_cmp();
+logical l_ge(a,b,la,lb) char *a, *b; ftnlen la, lb;
+#else
+extern integer s_cmp(char *, char *, ftnlen, ftnlen);
+logical l_ge(char *a, char *b, ftnlen la, ftnlen lb)
+#endif
+{
+return(s_cmp(a,b,la,lb) >= 0);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/l_gt.c b/contrib/libs/libf2c/l_gt.c
new file mode 100644
index 0000000000..ae6950d139
--- /dev/null
+++ b/contrib/libs/libf2c/l_gt.c
@@ -0,0 +1,18 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+extern integer s_cmp();
+logical l_gt(a,b,la,lb) char *a, *b; ftnlen la, lb;
+#else
+extern integer s_cmp(char *, char *, ftnlen, ftnlen);
+logical l_gt(char *a, char *b, ftnlen la, ftnlen lb)
+#endif
+{
+return(s_cmp(a,b,la,lb) > 0);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/l_le.c b/contrib/libs/libf2c/l_le.c
new file mode 100644
index 0000000000..625b49a9ea
--- /dev/null
+++ b/contrib/libs/libf2c/l_le.c
@@ -0,0 +1,18 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+extern integer s_cmp();
+logical l_le(a,b,la,lb) char *a, *b; ftnlen la, lb;
+#else
+extern integer s_cmp(char *, char *, ftnlen, ftnlen);
+logical l_le(char *a, char *b, ftnlen la, ftnlen lb)
+#endif
+{
+return(s_cmp(a,b,la,lb) <= 0);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/l_lt.c b/contrib/libs/libf2c/l_lt.c
new file mode 100644
index 0000000000..ab21b362de
--- /dev/null
+++ b/contrib/libs/libf2c/l_lt.c
@@ -0,0 +1,18 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+extern integer s_cmp();
+logical l_lt(a,b,la,lb) char *a, *b; ftnlen la, lb;
+#else
+extern integer s_cmp(char *, char *, ftnlen, ftnlen);
+logical l_lt(char *a, char *b, ftnlen la, ftnlen lb)
+#endif
+{
+return(s_cmp(a,b,la,lb) < 0);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/lbitbits.c b/contrib/libs/libf2c/lbitbits.c
new file mode 100644
index 0000000000..5b6ccf72d0
--- /dev/null
+++ b/contrib/libs/libf2c/lbitbits.c
@@ -0,0 +1,68 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef LONGBITS
+#define LONGBITS 32
+#endif
+
+ integer
+#ifdef KR_headers
+lbit_bits(a, b, len) integer a, b, len;
+#else
+lbit_bits(integer a, integer b, integer len)
+#endif
+{
+ /* Assume 2's complement arithmetic */
+
+ unsigned long x, y;
+
+ x = (unsigned long) a;
+ y = (unsigned long)-1L;
+ x >>= b;
+ y <<= len;
+ return (integer)(x & ~y);
+ }
+
+ integer
+#ifdef KR_headers
+lbit_cshift(a, b, len) integer a, b, len;
+#else
+lbit_cshift(integer a, integer b, integer len)
+#endif
+{
+ unsigned long x, y, z;
+
+ x = (unsigned long)a;
+ if (len <= 0) {
+ if (len == 0)
+ return 0;
+ goto full_len;
+ }
+ if (len >= LONGBITS) {
+ full_len:
+ if (b >= 0) {
+ b %= LONGBITS;
+ return (integer)(x << b | x >> LONGBITS -b );
+ }
+ b = -b;
+ b %= LONGBITS;
+ return (integer)(x << LONGBITS - b | x >> b);
+ }
+ y = z = (unsigned long)-1;
+ y <<= len;
+ z &= ~y;
+ y &= x;
+ x &= z;
+ if (b >= 0) {
+ b %= len;
+ return (integer)(y | z & (x << b | x >> len - b));
+ }
+ b = -b;
+ b %= len;
+ return (integer)(y | z & (x >> b | x << len - b));
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/lbitshft.c b/contrib/libs/libf2c/lbitshft.c
new file mode 100644
index 0000000000..fbee94f140
--- /dev/null
+++ b/contrib/libs/libf2c/lbitshft.c
@@ -0,0 +1,17 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ integer
+#ifdef KR_headers
+lbit_shift(a, b) integer a; integer b;
+#else
+lbit_shift(integer a, integer b)
+#endif
+{
+ return b >= 0 ? a << b : (integer)((uinteger)a >> -b);
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/lio.h b/contrib/libs/libf2c/lio.h
new file mode 100644
index 0000000000..f9fd1cda8b
--- /dev/null
+++ b/contrib/libs/libf2c/lio.h
@@ -0,0 +1,74 @@
+/* copy of ftypes from the compiler */
+/* variable types
+ * numeric assumptions:
+ * int < reals < complexes
+ * TYDREAL-TYREAL = TYDCOMPLEX-TYCOMPLEX
+ */
+
+/* 0-10 retain their old (pre LOGICAL*1, etc.) */
+/* values to allow mixing old and new objects. */
+
+#define TYUNKNOWN 0
+#define TYADDR 1
+#define TYSHORT 2
+#define TYLONG 3
+#define TYREAL 4
+#define TYDREAL 5
+#define TYCOMPLEX 6
+#define TYDCOMPLEX 7
+#define TYLOGICAL 8
+#define TYCHAR 9
+#define TYSUBR 10
+#define TYINT1 11
+#define TYLOGICAL1 12
+#define TYLOGICAL2 13
+#ifdef Allow_TYQUAD
+#undef TYQUAD
+#define TYQUAD 14
+#endif
+
+#define LINTW 24
+#define LINE 80
+#define LLOGW 2
+#ifdef Old_list_output
+#define LLOW 1.0
+#define LHIGH 1.e9
+#define LEFMT " %# .8E"
+#define LFFMT " %# .9g"
+#else
+#define LGFMT "%.9G"
+#endif
+/* LEFBL 20 should suffice; 24 overcomes a NeXT bug. */
+#define LEFBL 24
+
+typedef union
+{
+ char flchar;
+ short flshort;
+ ftnint flint;
+#ifdef Allow_TYQUAD
+ longint fllongint;
+#endif
+ real flreal;
+ doublereal fldouble;
+} flex;
+#ifdef KR_headers
+extern int (*f__lioproc)(), (*l_getc)(), (*l_ungetc)();
+extern int l_read(), l_write();
+#else
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern int (*f__lioproc)(ftnint*, char*, ftnlen, ftnint);
+extern int l_write(ftnint*, char*, ftnlen, ftnint);
+extern void x_wsne(cilist*);
+extern int c_le(cilist*), (*l_getc)(void), (*l_ungetc)(int,FILE*);
+extern int l_read(ftnint*,char*,ftnlen,ftnint);
+extern integer e_rsle(void), e_wsle(void), s_wsne(cilist*);
+extern int z_rnew(void);
+#endif
+extern ftnint L_len;
+extern int f__scale;
+#ifdef __cplusplus
+ }
+#endif
diff --git a/contrib/libs/libf2c/lread.c b/contrib/libs/libf2c/lread.c
new file mode 100644
index 0000000000..e2a7b818a0
--- /dev/null
+++ b/contrib/libs/libf2c/lread.c
@@ -0,0 +1,806 @@
+#include "f2c.h"
+#include "fio.h"
+
+/* Compile with -DF8X_NML_ELIDE_QUOTES to permit eliding quotation */
+/* marks in namelist input a la the Fortran 8X Draft published in */
+/* the May 1989 issue of Fortran Forum. */
+
+
+#ifdef Allow_TYQUAD
+static longint f__llx;
+#endif
+
+#ifdef KR_headers
+extern double atof();
+extern char *malloc(), *realloc();
+int (*f__lioproc)(), (*l_getc)(), (*l_ungetc)();
+#else
+#undef abs
+#undef min
+#undef max
+#include "stdlib.h"
+#endif
+
+#include "fmt.h"
+#include "lio.h"
+#include "ctype_.h"
+#include "fp.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+extern char *f__fmtbuf;
+#else
+extern const char *f__fmtbuf;
+int (*f__lioproc)(ftnint*, char*, ftnlen, ftnint), (*l_getc)(void),
+ (*l_ungetc)(int,FILE*);
+#endif
+
+int l_eof;
+
+#define isblnk(x) (f__ltab[x+1]&B)
+#define issep(x) (f__ltab[x+1]&SX)
+#define isapos(x) (f__ltab[x+1]&AX)
+#define isexp(x) (f__ltab[x+1]&EX)
+#define issign(x) (f__ltab[x+1]&SG)
+#define iswhit(x) (f__ltab[x+1]&WH)
+#define SX 1
+#define B 2
+#define AX 4
+#define EX 8
+#define SG 16
+#define WH 32
+char f__ltab[128+1] = { /* offset one for EOF */
+ 0,
+ 0,0,AX,0,0,0,0,0,0,WH|B,SX|WH,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ SX|B|WH,0,AX,0,0,0,0,AX,0,0,0,SG,SX,SG,0,SX,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,EX,EX,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ AX,0,0,0,EX,EX,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+};
+
+#ifdef ungetc
+ static int
+#ifdef KR_headers
+un_getc(x,f__cf) int x; FILE *f__cf;
+#else
+un_getc(int x, FILE *f__cf)
+#endif
+{ return ungetc(x,f__cf); }
+#else
+#define un_getc ungetc
+#ifdef KR_headers
+ extern int ungetc();
+#else
+extern int ungetc(int, FILE*); /* for systems with a buggy stdio.h */
+#endif
+#endif
+
+ int
+t_getc(Void)
+{ int ch;
+ if(f__curunit->uend) return(EOF);
+ if((ch=getc(f__cf))!=EOF) return(ch);
+ if(feof(f__cf))
+ f__curunit->uend = l_eof = 1;
+ return(EOF);
+}
+integer e_rsle(Void)
+{
+ int ch;
+ if(f__curunit->uend) return(0);
+ while((ch=t_getc())!='\n')
+ if (ch == EOF) {
+ if(feof(f__cf))
+ f__curunit->uend = l_eof = 1;
+ return EOF;
+ }
+ return(0);
+}
+
+flag f__lquit;
+int f__lcount,f__ltype,nml_read;
+char *f__lchar;
+double f__lx,f__ly;
+#define ERR(x) if(n=(x)) return(n)
+#define GETC(x) (x=(*l_getc)())
+#define Ungetc(x,y) (*l_ungetc)(x,y)
+
+ static int
+#ifdef KR_headers
+l_R(poststar, reqint) int poststar, reqint;
+#else
+l_R(int poststar, int reqint)
+#endif
+{
+ char s[FMAX+EXPMAXDIGS+4];
+ register int ch;
+ register char *sp, *spe, *sp1;
+ long e, exp;
+ int havenum, havestar, se;
+
+ if (!poststar) {
+ if (f__lcount > 0)
+ return(0);
+ f__lcount = 1;
+ }
+#ifdef Allow_TYQUAD
+ f__llx = 0;
+#endif
+ f__ltype = 0;
+ exp = 0;
+ havestar = 0;
+retry:
+ sp1 = sp = s;
+ spe = sp + FMAX;
+ havenum = 0;
+
+ switch(GETC(ch)) {
+ case '-': *sp++ = ch; sp1++; spe++;
+ case '+':
+ GETC(ch);
+ }
+ while(ch == '0') {
+ ++havenum;
+ GETC(ch);
+ }
+ while(isdigit(ch)) {
+ if (sp < spe) *sp++ = ch;
+ else ++exp;
+ GETC(ch);
+ }
+ if (ch == '*' && !poststar) {
+ if (sp == sp1 || exp || *s == '-') {
+ errfl(f__elist->cierr,112,"bad repetition count");
+ }
+ poststar = havestar = 1;
+ *sp = 0;
+ f__lcount = atoi(s);
+ goto retry;
+ }
+ if (ch == '.') {
+#ifndef ALLOW_FLOAT_IN_INTEGER_LIST_INPUT
+ if (reqint)
+ errfl(f__elist->cierr,115,"invalid integer");
+#endif
+ GETC(ch);
+ if (sp == sp1)
+ while(ch == '0') {
+ ++havenum;
+ --exp;
+ GETC(ch);
+ }
+ while(isdigit(ch)) {
+ if (sp < spe)
+ { *sp++ = ch; --exp; }
+ GETC(ch);
+ }
+ }
+ havenum += sp - sp1;
+ se = 0;
+ if (issign(ch))
+ goto signonly;
+ if (havenum && isexp(ch)) {
+#ifndef ALLOW_FLOAT_IN_INTEGER_LIST_INPUT
+ if (reqint)
+ errfl(f__elist->cierr,115,"invalid integer");
+#endif
+ GETC(ch);
+ if (issign(ch)) {
+signonly:
+ if (ch == '-') se = 1;
+ GETC(ch);
+ }
+ if (!isdigit(ch)) {
+bad:
+ errfl(f__elist->cierr,112,"exponent field");
+ }
+
+ e = ch - '0';
+ while(isdigit(GETC(ch))) {
+ e = 10*e + ch - '0';
+ if (e > EXPMAX)
+ goto bad;
+ }
+ if (se)
+ exp -= e;
+ else
+ exp += e;
+ }
+ (void) Ungetc(ch, f__cf);
+ if (sp > sp1) {
+ ++havenum;
+ while(*--sp == '0')
+ ++exp;
+ if (exp)
+ sprintf(sp+1, "e%ld", exp);
+ else
+ sp[1] = 0;
+ f__lx = atof(s);
+#ifdef Allow_TYQUAD
+ if (reqint&2 && (se = sp - sp1 + exp) > 14 && se < 20) {
+ /* Assuming 64-bit longint and 32-bit long. */
+ if (exp < 0)
+ sp += exp;
+ if (sp1 <= sp) {
+ f__llx = *sp1 - '0';
+ while(++sp1 <= sp)
+ f__llx = 10*f__llx + (*sp1 - '0');
+ }
+ while(--exp >= 0)
+ f__llx *= 10;
+ if (*s == '-')
+ f__llx = -f__llx;
+ }
+#endif
+ }
+ else
+ f__lx = 0.;
+ if (havenum)
+ f__ltype = TYLONG;
+ else
+ switch(ch) {
+ case ',':
+ case '/':
+ break;
+ default:
+ if (havestar && ( ch == ' '
+ ||ch == '\t'
+ ||ch == '\n'))
+ break;
+ if (nml_read > 1) {
+ f__lquit = 2;
+ return 0;
+ }
+ errfl(f__elist->cierr,112,"invalid number");
+ }
+ return 0;
+ }
+
+ static int
+#ifdef KR_headers
+rd_count(ch) register int ch;
+#else
+rd_count(register int ch)
+#endif
+{
+ if (ch < '0' || ch > '9')
+ return 1;
+ f__lcount = ch - '0';
+ while(GETC(ch) >= '0' && ch <= '9')
+ f__lcount = 10*f__lcount + ch - '0';
+ Ungetc(ch,f__cf);
+ return f__lcount <= 0;
+ }
+
+ static int
+l_C(Void)
+{ int ch, nml_save;
+ double lz;
+ if(f__lcount>0) return(0);
+ f__ltype=0;
+ GETC(ch);
+ if(ch!='(')
+ {
+ if (nml_read > 1 && (ch < '0' || ch > '9')) {
+ Ungetc(ch,f__cf);
+ f__lquit = 2;
+ return 0;
+ }
+ if (rd_count(ch))
+ if(!f__cf || !feof(f__cf))
+ errfl(f__elist->cierr,112,"complex format");
+ else
+ err(f__elist->cierr,(EOF),"lread");
+ if(GETC(ch)!='*')
+ {
+ if(!f__cf || !feof(f__cf))
+ errfl(f__elist->cierr,112,"no star");
+ else
+ err(f__elist->cierr,(EOF),"lread");
+ }
+ if(GETC(ch)!='(')
+ { Ungetc(ch,f__cf);
+ return(0);
+ }
+ }
+ else
+ f__lcount = 1;
+ while(iswhit(GETC(ch)));
+ Ungetc(ch,f__cf);
+ nml_save = nml_read;
+ nml_read = 0;
+ if (ch = l_R(1,0))
+ return ch;
+ if (!f__ltype)
+ errfl(f__elist->cierr,112,"no real part");
+ lz = f__lx;
+ while(iswhit(GETC(ch)));
+ if(ch!=',')
+ { (void) Ungetc(ch,f__cf);
+ errfl(f__elist->cierr,112,"no comma");
+ }
+ while(iswhit(GETC(ch)));
+ (void) Ungetc(ch,f__cf);
+ if (ch = l_R(1,0))
+ return ch;
+ if (!f__ltype)
+ errfl(f__elist->cierr,112,"no imaginary part");
+ while(iswhit(GETC(ch)));
+ if(ch!=')') errfl(f__elist->cierr,112,"no )");
+ f__ly = f__lx;
+ f__lx = lz;
+#ifdef Allow_TYQUAD
+ f__llx = 0;
+#endif
+ nml_read = nml_save;
+ return(0);
+}
+
+ static char nmLbuf[256], *nmL_next;
+ static int (*nmL_getc_save)(Void);
+#ifdef KR_headers
+ static int (*nmL_ungetc_save)(/* int, FILE* */);
+#else
+ static int (*nmL_ungetc_save)(int, FILE*);
+#endif
+
+ static int
+nmL_getc(Void)
+{
+ int rv;
+ if (rv = *nmL_next++)
+ return rv;
+ l_getc = nmL_getc_save;
+ l_ungetc = nmL_ungetc_save;
+ return (*l_getc)();
+ }
+
+ static int
+#ifdef KR_headers
+nmL_ungetc(x, f) int x; FILE *f;
+#else
+nmL_ungetc(int x, FILE *f)
+#endif
+{
+ f = f; /* banish non-use warning */
+ return *--nmL_next = x;
+ }
+
+ static int
+#ifdef KR_headers
+Lfinish(ch, dot, rvp) int ch, dot, *rvp;
+#else
+Lfinish(int ch, int dot, int *rvp)
+#endif
+{
+ char *s, *se;
+ static char what[] = "namelist input";
+
+ s = nmLbuf + 2;
+ se = nmLbuf + sizeof(nmLbuf) - 1;
+ *s++ = ch;
+ while(!issep(GETC(ch)) && ch!=EOF) {
+ if (s >= se) {
+ nmLbuf_ovfl:
+ return *rvp = err__fl(f__elist->cierr,131,what);
+ }
+ *s++ = ch;
+ if (ch != '=')
+ continue;
+ if (dot)
+ return *rvp = err__fl(f__elist->cierr,112,what);
+ got_eq:
+ *s = 0;
+ nmL_getc_save = l_getc;
+ l_getc = nmL_getc;
+ nmL_ungetc_save = l_ungetc;
+ l_ungetc = nmL_ungetc;
+ nmLbuf[1] = *(nmL_next = nmLbuf) = ',';
+ *rvp = f__lcount = 0;
+ return 1;
+ }
+ if (dot)
+ goto done;
+ for(;;) {
+ if (s >= se)
+ goto nmLbuf_ovfl;
+ *s++ = ch;
+ if (!isblnk(ch))
+ break;
+ if (GETC(ch) == EOF)
+ goto done;
+ }
+ if (ch == '=')
+ goto got_eq;
+ done:
+ Ungetc(ch, f__cf);
+ return 0;
+ }
+
+ static int
+l_L(Void)
+{
+ int ch, rv, sawdot;
+
+ if(f__lcount>0)
+ return(0);
+ f__lcount = 1;
+ f__ltype=0;
+ GETC(ch);
+ if(isdigit(ch))
+ {
+ rd_count(ch);
+ if(GETC(ch)!='*')
+ if(!f__cf || !feof(f__cf))
+ errfl(f__elist->cierr,112,"no star");
+ else
+ err(f__elist->cierr,(EOF),"lread");
+ GETC(ch);
+ }
+ sawdot = 0;
+ if(ch == '.') {
+ sawdot = 1;
+ GETC(ch);
+ }
+ switch(ch)
+ {
+ case 't':
+ case 'T':
+ if (nml_read && Lfinish(ch, sawdot, &rv))
+ return rv;
+ f__lx=1;
+ break;
+ case 'f':
+ case 'F':
+ if (nml_read && Lfinish(ch, sawdot, &rv))
+ return rv;
+ f__lx=0;
+ break;
+ default:
+ if(isblnk(ch) || issep(ch) || ch==EOF)
+ { (void) Ungetc(ch,f__cf);
+ return(0);
+ }
+ if (nml_read > 1) {
+ Ungetc(ch,f__cf);
+ f__lquit = 2;
+ return 0;
+ }
+ errfl(f__elist->cierr,112,"logical");
+ }
+ f__ltype=TYLONG;
+ while(!issep(GETC(ch)) && ch!=EOF);
+ Ungetc(ch, f__cf);
+ return(0);
+}
+
+#define BUFSIZE 128
+
+ static int
+l_CHAR(Void)
+{ int ch,size,i;
+ static char rafail[] = "realloc failure";
+ char quote,*p;
+ if(f__lcount>0) return(0);
+ f__ltype=0;
+ if(f__lchar!=NULL) free(f__lchar);
+ size=BUFSIZE;
+ p=f__lchar = (char *)malloc((unsigned int)size);
+ if(f__lchar == NULL)
+ errfl(f__elist->cierr,113,"no space");
+
+ GETC(ch);
+ if(isdigit(ch)) {
+ /* allow Fortran 8x-style unquoted string... */
+ /* either find a repetition count or the string */
+ f__lcount = ch - '0';
+ *p++ = ch;
+ for(i = 1;;) {
+ switch(GETC(ch)) {
+ case '*':
+ if (f__lcount == 0) {
+ f__lcount = 1;
+#ifndef F8X_NML_ELIDE_QUOTES
+ if (nml_read)
+ goto no_quote;
+#endif
+ goto noquote;
+ }
+ p = f__lchar;
+ goto have_lcount;
+ case ',':
+ case ' ':
+ case '\t':
+ case '\n':
+ case '/':
+ Ungetc(ch,f__cf);
+ /* no break */
+ case EOF:
+ f__lcount = 1;
+ f__ltype = TYCHAR;
+ return *p = 0;
+ }
+ if (!isdigit(ch)) {
+ f__lcount = 1;
+#ifndef F8X_NML_ELIDE_QUOTES
+ if (nml_read) {
+ no_quote:
+ errfl(f__elist->cierr,112,
+ "undelimited character string");
+ }
+#endif
+ goto noquote;
+ }
+ *p++ = ch;
+ f__lcount = 10*f__lcount + ch - '0';
+ if (++i == size) {
+ f__lchar = (char *)realloc(f__lchar,
+ (unsigned int)(size += BUFSIZE));
+ if(f__lchar == NULL)
+ errfl(f__elist->cierr,113,rafail);
+ p = f__lchar + i;
+ }
+ }
+ }
+ else (void) Ungetc(ch,f__cf);
+ have_lcount:
+ if(GETC(ch)=='\'' || ch=='"') quote=ch;
+ else if(isblnk(ch) || (issep(ch) && ch != '\n') || ch==EOF) {
+ Ungetc(ch,f__cf);
+ return 0;
+ }
+#ifndef F8X_NML_ELIDE_QUOTES
+ else if (nml_read > 1) {
+ Ungetc(ch,f__cf);
+ f__lquit = 2;
+ return 0;
+ }
+#endif
+ else {
+ /* Fortran 8x-style unquoted string */
+ *p++ = ch;
+ for(i = 1;;) {
+ switch(GETC(ch)) {
+ case ',':
+ case ' ':
+ case '\t':
+ case '\n':
+ case '/':
+ Ungetc(ch,f__cf);
+ /* no break */
+ case EOF:
+ f__ltype = TYCHAR;
+ return *p = 0;
+ }
+ noquote:
+ *p++ = ch;
+ if (++i == size) {
+ f__lchar = (char *)realloc(f__lchar,
+ (unsigned int)(size += BUFSIZE));
+ if(f__lchar == NULL)
+ errfl(f__elist->cierr,113,rafail);
+ p = f__lchar + i;
+ }
+ }
+ }
+ f__ltype=TYCHAR;
+ for(i=0;;)
+ { while(GETC(ch)!=quote && ch!='\n'
+ && ch!=EOF && ++i<size) *p++ = ch;
+ if(i==size)
+ {
+ newone:
+ f__lchar= (char *)realloc(f__lchar,
+ (unsigned int)(size += BUFSIZE));
+ if(f__lchar == NULL)
+ errfl(f__elist->cierr,113,rafail);
+ p=f__lchar+i-1;
+ *p++ = ch;
+ }
+ else if(ch==EOF) return(EOF);
+ else if(ch=='\n')
+ { if(*(p-1) != '\\') continue;
+ i--;
+ p--;
+ if(++i<size) *p++ = ch;
+ else goto newone;
+ }
+ else if(GETC(ch)==quote)
+ { if(++i<size) *p++ = ch;
+ else goto newone;
+ }
+ else
+ { (void) Ungetc(ch,f__cf);
+ *p = 0;
+ return(0);
+ }
+ }
+}
+
+ int
+#ifdef KR_headers
+c_le(a) cilist *a;
+#else
+c_le(cilist *a)
+#endif
+{
+ if(!f__init)
+ f_init();
+ f__fmtbuf="list io";
+ f__curunit = &f__units[a->ciunit];
+ if(a->ciunit>=MXUNIT || a->ciunit<0)
+ err(a->cierr,101,"stler");
+ f__scale=f__recpos=0;
+ f__elist=a;
+ if(f__curunit->ufd==NULL && fk_open(SEQ,FMT,a->ciunit))
+ err(a->cierr,102,"lio");
+ f__cf=f__curunit->ufd;
+ if(!f__curunit->ufmt) err(a->cierr,103,"lio")
+ return(0);
+}
+
+ int
+#ifdef KR_headers
+l_read(number,ptr,len,type) ftnint *number,type; char *ptr; ftnlen len;
+#else
+l_read(ftnint *number, char *ptr, ftnlen len, ftnint type)
+#endif
+{
+#define Ptr ((flex *)ptr)
+ int i,n,ch;
+ doublereal *yy;
+ real *xx;
+ for(i=0;i<*number;i++)
+ {
+ if(f__lquit) return(0);
+ if(l_eof)
+ err(f__elist->ciend, EOF, "list in")
+ if(f__lcount == 0) {
+ f__ltype = 0;
+ for(;;) {
+ GETC(ch);
+ switch(ch) {
+ case EOF:
+ err(f__elist->ciend,(EOF),"list in")
+ case ' ':
+ case '\t':
+ case '\n':
+ continue;
+ case '/':
+ f__lquit = 1;
+ goto loopend;
+ case ',':
+ f__lcount = 1;
+ goto loopend;
+ default:
+ (void) Ungetc(ch, f__cf);
+ goto rddata;
+ }
+ }
+ }
+ rddata:
+ switch((int)type)
+ {
+ case TYINT1:
+ case TYSHORT:
+ case TYLONG:
+#ifndef ALLOW_FLOAT_IN_INTEGER_LIST_INPUT
+ ERR(l_R(0,1));
+ break;
+#endif
+ case TYREAL:
+ case TYDREAL:
+ ERR(l_R(0,0));
+ break;
+#ifdef TYQUAD
+ case TYQUAD:
+ n = l_R(0,2);
+ if (n)
+ return n;
+ break;
+#endif
+ case TYCOMPLEX:
+ case TYDCOMPLEX:
+ ERR(l_C());
+ break;
+ case TYLOGICAL1:
+ case TYLOGICAL2:
+ case TYLOGICAL:
+ ERR(l_L());
+ break;
+ case TYCHAR:
+ ERR(l_CHAR());
+ break;
+ }
+ while (GETC(ch) == ' ' || ch == '\t');
+ if (ch != ',' || f__lcount > 1)
+ Ungetc(ch,f__cf);
+ loopend:
+ if(f__lquit) return(0);
+ if(f__cf && ferror(f__cf)) {
+ clearerr(f__cf);
+ errfl(f__elist->cierr,errno,"list in");
+ }
+ if(f__ltype==0) goto bump;
+ switch((int)type)
+ {
+ case TYINT1:
+ case TYLOGICAL1:
+ Ptr->flchar = (char)f__lx;
+ break;
+ case TYLOGICAL2:
+ case TYSHORT:
+ Ptr->flshort = (short)f__lx;
+ break;
+ case TYLOGICAL:
+ case TYLONG:
+ Ptr->flint = (ftnint)f__lx;
+ break;
+#ifdef Allow_TYQUAD
+ case TYQUAD:
+ if (!(Ptr->fllongint = f__llx))
+ Ptr->fllongint = f__lx;
+ break;
+#endif
+ case TYREAL:
+ Ptr->flreal=f__lx;
+ break;
+ case TYDREAL:
+ Ptr->fldouble=f__lx;
+ break;
+ case TYCOMPLEX:
+ xx=(real *)ptr;
+ *xx++ = f__lx;
+ *xx = f__ly;
+ break;
+ case TYDCOMPLEX:
+ yy=(doublereal *)ptr;
+ *yy++ = f__lx;
+ *yy = f__ly;
+ break;
+ case TYCHAR:
+ b_char(f__lchar,ptr,len);
+ break;
+ }
+ bump:
+ if(f__lcount>0) f__lcount--;
+ ptr += len;
+ if (nml_read)
+ nml_read++;
+ }
+ return(0);
+#undef Ptr
+}
+#ifdef KR_headers
+integer s_rsle(a) cilist *a;
+#else
+integer s_rsle(cilist *a)
+#endif
+{
+ int n;
+
+ f__reading=1;
+ f__external=1;
+ f__formatted=1;
+ if(n=c_le(a)) return(n);
+ f__lioproc = l_read;
+ f__lquit = 0;
+ f__lcount = 0;
+ l_eof = 0;
+ if(f__curunit->uwrt && f__nowreading(f__curunit))
+ err(a->cierr,errno,"read start");
+ if(f__curunit->uend)
+ err(f__elist->ciend,(EOF),"read start");
+ l_getc = t_getc;
+ l_ungetc = un_getc;
+ f__doend = xrd_SL;
+ return(0);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/lwrite.c b/contrib/libs/libf2c/lwrite.c
new file mode 100644
index 0000000000..9e0d93deb5
--- /dev/null
+++ b/contrib/libs/libf2c/lwrite.c
@@ -0,0 +1,314 @@
+#include "f2c.h"
+#include "fio.h"
+#include "fmt.h"
+#include "lio.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ftnint L_len;
+int f__Aquote;
+
+ static VOID
+donewrec(Void)
+{
+ if (f__recpos)
+ (*f__donewrec)();
+ }
+
+ static VOID
+#ifdef KR_headers
+lwrt_I(n) longint n;
+#else
+lwrt_I(longint n)
+#endif
+{
+ char *p;
+ int ndigit, sign;
+
+ p = f__icvt(n, &ndigit, &sign, 10);
+ if(f__recpos + ndigit >= L_len)
+ donewrec();
+ PUT(' ');
+ if (sign)
+ PUT('-');
+ while(*p)
+ PUT(*p++);
+}
+ static VOID
+#ifdef KR_headers
+lwrt_L(n, len) ftnint n; ftnlen len;
+#else
+lwrt_L(ftnint n, ftnlen len)
+#endif
+{
+ if(f__recpos+LLOGW>=L_len)
+ donewrec();
+ wrt_L((Uint *)&n,LLOGW, len);
+}
+ static VOID
+#ifdef KR_headers
+lwrt_A(p,len) char *p; ftnlen len;
+#else
+lwrt_A(char *p, ftnlen len)
+#endif
+{
+ int a;
+ char *p1, *pe;
+
+ a = 0;
+ pe = p + len;
+ if (f__Aquote) {
+ a = 3;
+ if (len > 1 && p[len-1] == ' ') {
+ while(--len > 1 && p[len-1] == ' ');
+ pe = p + len;
+ }
+ p1 = p;
+ while(p1 < pe)
+ if (*p1++ == '\'')
+ a++;
+ }
+ if(f__recpos+len+a >= L_len)
+ donewrec();
+ if (a
+#ifndef OMIT_BLANK_CC
+ || !f__recpos
+#endif
+ )
+ PUT(' ');
+ if (a) {
+ PUT('\'');
+ while(p < pe) {
+ if (*p == '\'')
+ PUT('\'');
+ PUT(*p++);
+ }
+ PUT('\'');
+ }
+ else
+ while(p < pe)
+ PUT(*p++);
+}
+
+ static int
+#ifdef KR_headers
+l_g(buf, n) char *buf; double n;
+#else
+l_g(char *buf, double n)
+#endif
+{
+#ifdef Old_list_output
+ doublereal absn;
+ char *fmt;
+
+ absn = n;
+ if (absn < 0)
+ absn = -absn;
+ fmt = LLOW <= absn && absn < LHIGH ? LFFMT : LEFMT;
+#ifdef USE_STRLEN
+ sprintf(buf, fmt, n);
+ return strlen(buf);
+#else
+ return sprintf(buf, fmt, n);
+#endif
+
+#else
+ register char *b, c, c1;
+
+ b = buf;
+ *b++ = ' ';
+ if (n < 0) {
+ *b++ = '-';
+ n = -n;
+ }
+ else
+ *b++ = ' ';
+ if (n == 0) {
+#ifdef SIGNED_ZEROS
+ if (signbit_f2c(&n))
+ *b++ = '-';
+#endif
+ *b++ = '0';
+ *b++ = '.';
+ *b = 0;
+ goto f__ret;
+ }
+ sprintf(b, LGFMT, n);
+ switch(*b) {
+#ifndef WANT_LEAD_0
+ case '0':
+ while(b[0] = b[1])
+ b++;
+ break;
+#endif
+ case 'i':
+ case 'I':
+ /* Infinity */
+ case 'n':
+ case 'N':
+ /* NaN */
+ while(*++b);
+ break;
+
+ default:
+ /* Fortran 77 insists on having a decimal point... */
+ for(;; b++)
+ switch(*b) {
+ case 0:
+ *b++ = '.';
+ *b = 0;
+ goto f__ret;
+ case '.':
+ while(*++b);
+ goto f__ret;
+ case 'E':
+ for(c1 = '.', c = 'E'; *b = c1;
+ c1 = c, c = *++b);
+ goto f__ret;
+ }
+ }
+ f__ret:
+ return b - buf;
+#endif
+ }
+
+ static VOID
+#ifdef KR_headers
+l_put(s) register char *s;
+#else
+l_put(register char *s)
+#endif
+{
+#ifdef KR_headers
+ register void (*pn)() = f__putn;
+#else
+ register void (*pn)(int) = f__putn;
+#endif
+ register int c;
+
+ while(c = *s++)
+ (*pn)(c);
+ }
+
+ static VOID
+#ifdef KR_headers
+lwrt_F(n) double n;
+#else
+lwrt_F(double n)
+#endif
+{
+ char buf[LEFBL];
+
+ if(f__recpos + l_g(buf,n) >= L_len)
+ donewrec();
+ l_put(buf);
+}
+ static VOID
+#ifdef KR_headers
+lwrt_C(a,b) double a,b;
+#else
+lwrt_C(double a, double b)
+#endif
+{
+ char *ba, *bb, bufa[LEFBL], bufb[LEFBL];
+ int al, bl;
+
+ al = l_g(bufa, a);
+ for(ba = bufa; *ba == ' '; ba++)
+ --al;
+ bl = l_g(bufb, b) + 1; /* intentionally high by 1 */
+ for(bb = bufb; *bb == ' '; bb++)
+ --bl;
+ if(f__recpos + al + bl + 3 >= L_len)
+ donewrec();
+#ifdef OMIT_BLANK_CC
+ else
+#endif
+ PUT(' ');
+ PUT('(');
+ l_put(ba);
+ PUT(',');
+ if (f__recpos + bl >= L_len) {
+ (*f__donewrec)();
+#ifndef OMIT_BLANK_CC
+ PUT(' ');
+#endif
+ }
+ l_put(bb);
+ PUT(')');
+}
+
+ int
+#ifdef KR_headers
+l_write(number,ptr,len,type) ftnint *number,type; char *ptr; ftnlen len;
+#else
+l_write(ftnint *number, char *ptr, ftnlen len, ftnint type)
+#endif
+{
+#define Ptr ((flex *)ptr)
+ int i;
+ longint x;
+ double y,z;
+ real *xx;
+ doublereal *yy;
+ for(i=0;i< *number; i++)
+ {
+ switch((int)type)
+ {
+ default: f__fatal(117,"unknown type in lio");
+ case TYINT1:
+ x = Ptr->flchar;
+ goto xint;
+ case TYSHORT:
+ x=Ptr->flshort;
+ goto xint;
+#ifdef Allow_TYQUAD
+ case TYQUAD:
+ x = Ptr->fllongint;
+ goto xint;
+#endif
+ case TYLONG:
+ x=Ptr->flint;
+ xint: lwrt_I(x);
+ break;
+ case TYREAL:
+ y=Ptr->flreal;
+ goto xfloat;
+ case TYDREAL:
+ y=Ptr->fldouble;
+ xfloat: lwrt_F(y);
+ break;
+ case TYCOMPLEX:
+ xx= &Ptr->flreal;
+ y = *xx++;
+ z = *xx;
+ goto xcomplex;
+ case TYDCOMPLEX:
+ yy = &Ptr->fldouble;
+ y= *yy++;
+ z = *yy;
+ xcomplex:
+ lwrt_C(y,z);
+ break;
+ case TYLOGICAL1:
+ x = Ptr->flchar;
+ goto xlog;
+ case TYLOGICAL2:
+ x = Ptr->flshort;
+ goto xlog;
+ case TYLOGICAL:
+ x = Ptr->flint;
+ xlog: lwrt_L(Ptr->flint, len);
+ break;
+ case TYCHAR:
+ lwrt_A(ptr,len);
+ break;
+ }
+ ptr += len;
+ }
+ return(0);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/open.c b/contrib/libs/libf2c/open.c
new file mode 100644
index 0000000000..a06428dd76
--- /dev/null
+++ b/contrib/libs/libf2c/open.c
@@ -0,0 +1,301 @@
+#include "f2c.h"
+#include "fio.h"
+#include "string.h"
+#ifndef NON_POSIX_STDIO
+#ifdef MSDOS
+#include "io.h"
+#else
+#include "unistd.h" /* for access */
+#endif
+#endif
+
+#ifdef KR_headers
+extern char *malloc();
+#ifdef NON_ANSI_STDIO
+extern char *mktemp();
+#endif
+extern integer f_clos();
+#define Const /*nothing*/
+#else
+#define Const const
+#undef abs
+#undef min
+#undef max
+#include "stdlib.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern int f__canseek(FILE*);
+extern integer f_clos(cllist*);
+#endif
+
+#ifdef NON_ANSI_RW_MODES
+Const char *f__r_mode[2] = {"r", "r"};
+Const char *f__w_mode[4] = {"w", "w", "r+w", "r+w"};
+#else
+Const char *f__r_mode[2] = {"rb", "r"};
+Const char *f__w_mode[4] = {"wb", "w", "r+b", "r+"};
+#endif
+
+ static char f__buf0[400], *f__buf = f__buf0;
+ int f__buflen = (int)sizeof(f__buf0);
+
+ static void
+#ifdef KR_headers
+f__bufadj(n, c) int n, c;
+#else
+f__bufadj(int n, int c)
+#endif
+{
+ unsigned int len;
+ char *nbuf, *s, *t, *te;
+
+ if (f__buf == f__buf0)
+ f__buflen = 1024;
+ while(f__buflen <= n)
+ f__buflen <<= 1;
+ len = (unsigned int)f__buflen;
+ if (len != f__buflen || !(nbuf = (char*)malloc(len)))
+ f__fatal(113, "malloc failure");
+ s = nbuf;
+ t = f__buf;
+ te = t + c;
+ while(t < te)
+ *s++ = *t++;
+ if (f__buf != f__buf0)
+ free(f__buf);
+ f__buf = nbuf;
+ }
+
+ int
+#ifdef KR_headers
+f__putbuf(c) int c;
+#else
+f__putbuf(int c)
+#endif
+{
+ char *s, *se;
+ int n;
+
+ if (f__hiwater > f__recpos)
+ f__recpos = f__hiwater;
+ n = f__recpos + 1;
+ if (n >= f__buflen)
+ f__bufadj(n, f__recpos);
+ s = f__buf;
+ se = s + f__recpos;
+ if (c)
+ *se++ = c;
+ *se = 0;
+ for(;;) {
+ fputs(s, f__cf);
+ s += strlen(s);
+ if (s >= se)
+ break; /* normally happens the first time */
+ putc(*s++, f__cf);
+ }
+ return 0;
+ }
+
+ void
+#ifdef KR_headers
+x_putc(c)
+#else
+x_putc(int c)
+#endif
+{
+ if (f__recpos >= f__buflen)
+ f__bufadj(f__recpos, f__buflen);
+ f__buf[f__recpos++] = c;
+ }
+
+#define opnerr(f,m,s) {if(f) errno= m; else opn_err(m,s,a); return(m);}
+
+ static void
+#ifdef KR_headers
+opn_err(m, s, a) int m; char *s; olist *a;
+#else
+opn_err(int m, const char *s, olist *a)
+#endif
+{
+ if (a->ofnm) {
+ /* supply file name to error message */
+ if (a->ofnmlen >= f__buflen)
+ f__bufadj((int)a->ofnmlen, 0);
+ g_char(a->ofnm, a->ofnmlen, f__curunit->ufnm = f__buf);
+ }
+ f__fatal(m, s);
+ }
+
+#ifdef KR_headers
+integer f_open(a) olist *a;
+#else
+integer f_open(olist *a)
+#endif
+{ unit *b;
+ integer rv;
+ char buf[256], *s;
+ cllist x;
+ int ufmt;
+ FILE *tf;
+#ifndef NON_UNIX_STDIO
+ int n;
+#endif
+ f__external = 1;
+ if(a->ounit>=MXUNIT || a->ounit<0)
+ err(a->oerr,101,"open")
+ if (!f__init)
+ f_init();
+ f__curunit = b = &f__units[a->ounit];
+ if(b->ufd) {
+ if(a->ofnm==0)
+ {
+ same: if (a->oblnk)
+ b->ublnk = *a->oblnk == 'z' || *a->oblnk == 'Z';
+ return(0);
+ }
+#ifdef NON_UNIX_STDIO
+ if (b->ufnm
+ && strlen(b->ufnm) == a->ofnmlen
+ && !strncmp(b->ufnm, a->ofnm, (unsigned)a->ofnmlen))
+ goto same;
+#else
+ g_char(a->ofnm,a->ofnmlen,buf);
+ if (f__inode(buf,&n) == b->uinode && n == b->udev)
+ goto same;
+#endif
+ x.cunit=a->ounit;
+ x.csta=0;
+ x.cerr=a->oerr;
+ if ((rv = f_clos(&x)) != 0)
+ return rv;
+ }
+ b->url = (int)a->orl;
+ b->ublnk = a->oblnk && (*a->oblnk == 'z' || *a->oblnk == 'Z');
+ if(a->ofm==0)
+ { if(b->url>0) b->ufmt=0;
+ else b->ufmt=1;
+ }
+ else if(*a->ofm=='f' || *a->ofm == 'F') b->ufmt=1;
+ else b->ufmt=0;
+ ufmt = b->ufmt;
+#ifdef url_Adjust
+ if (b->url && !ufmt)
+ url_Adjust(b->url);
+#endif
+ if (a->ofnm) {
+ g_char(a->ofnm,a->ofnmlen,buf);
+ if (!buf[0])
+ opnerr(a->oerr,107,"open")
+ }
+ else
+ sprintf(buf, "fort.%ld", (long)a->ounit);
+ b->uscrtch = 0;
+ b->uend=0;
+ b->uwrt = 0;
+ b->ufd = 0;
+ b->urw = 3;
+ switch(a->osta ? *a->osta : 'u')
+ {
+ case 'o':
+ case 'O':
+#ifdef NON_POSIX_STDIO
+ if (!(tf = FOPEN(buf,"r")))
+ opnerr(a->oerr,errno,"open")
+ fclose(tf);
+#else
+ if (access(buf,0))
+ opnerr(a->oerr,errno,"open")
+#endif
+ break;
+ case 's':
+ case 'S':
+ b->uscrtch=1;
+#ifdef NON_ANSI_STDIO
+ (void) strcpy(buf,"tmp.FXXXXXX");
+ (void) mktemp(buf);
+ goto replace;
+#else
+ if (!(b->ufd = tmpfile()))
+ opnerr(a->oerr,errno,"open")
+ b->ufnm = 0;
+#ifndef NON_UNIX_STDIO
+ b->uinode = b->udev = -1;
+#endif
+ b->useek = 1;
+ return 0;
+#endif
+
+ case 'n':
+ case 'N':
+#ifdef NON_POSIX_STDIO
+ if ((tf = FOPEN(buf,"r")) || (tf = FOPEN(buf,"a"))) {
+ fclose(tf);
+ opnerr(a->oerr,128,"open")
+ }
+#else
+ if (!access(buf,0))
+ opnerr(a->oerr,128,"open")
+#endif
+ /* no break */
+ case 'r': /* Fortran 90 replace option */
+ case 'R':
+#ifdef NON_ANSI_STDIO
+ replace:
+#endif
+ if (tf = FOPEN(buf,f__w_mode[0]))
+ fclose(tf);
+ }
+
+ b->ufnm=(char *) malloc((unsigned int)(strlen(buf)+1));
+ if(b->ufnm==NULL) opnerr(a->oerr,113,"no space");
+ (void) strcpy(b->ufnm,buf);
+ if ((s = a->oacc) && b->url)
+ ufmt = 0;
+ if(!(tf = FOPEN(buf, f__w_mode[ufmt|2]))) {
+ if (tf = FOPEN(buf, f__r_mode[ufmt]))
+ b->urw = 1;
+ else if (tf = FOPEN(buf, f__w_mode[ufmt])) {
+ b->uwrt = 1;
+ b->urw = 2;
+ }
+ else
+ err(a->oerr, errno, "open");
+ }
+ b->useek = f__canseek(b->ufd = tf);
+#ifndef NON_UNIX_STDIO
+ if((b->uinode = f__inode(buf,&b->udev)) == -1)
+ opnerr(a->oerr,108,"open")
+#endif
+ if(b->useek)
+ if (a->orl)
+ rewind(b->ufd);
+ else if ((s = a->oacc) && (*s == 'a' || *s == 'A')
+ && FSEEK(b->ufd, 0L, SEEK_END))
+ opnerr(a->oerr,129,"open");
+ return(0);
+}
+
+ int
+#ifdef KR_headers
+fk_open(seq,fmt,n) ftnint n;
+#else
+fk_open(int seq, int fmt, ftnint n)
+#endif
+{ char nbuf[10];
+ olist a;
+ (void) sprintf(nbuf,"fort.%ld",(long)n);
+ a.oerr=1;
+ a.ounit=n;
+ a.ofnm=nbuf;
+ a.ofnmlen=strlen(nbuf);
+ a.osta=NULL;
+ a.oacc= (char*)(seq==SEQ?"s":"d");
+ a.ofm = (char*)(fmt==FMT?"f":"u");
+ a.orl = seq==DIR?1:0;
+ a.oblnk=NULL;
+ return(f_open(&a));
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/pow_ci.c b/contrib/libs/libf2c/pow_ci.c
new file mode 100644
index 0000000000..574e0b1eba
--- /dev/null
+++ b/contrib/libs/libf2c/pow_ci.c
@@ -0,0 +1,26 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+VOID pow_ci(p, a, b) /* p = a**b */
+ complex *p, *a; integer *b;
+#else
+extern void pow_zi(doublecomplex*, doublecomplex*, integer*);
+void pow_ci(complex *p, complex *a, integer *b) /* p = a**b */
+#endif
+{
+doublecomplex p1, a1;
+
+a1.r = a->r;
+a1.i = a->i;
+
+pow_zi(&p1, &a1, b);
+
+p->r = p1.r;
+p->i = p1.i;
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/pow_dd.c b/contrib/libs/libf2c/pow_dd.c
new file mode 100644
index 0000000000..08fc20884a
--- /dev/null
+++ b/contrib/libs/libf2c/pow_dd.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double pow();
+double pow_dd(ap, bp) doublereal *ap, *bp;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double pow_dd(doublereal *ap, doublereal *bp)
+#endif
+{
+return(pow(*ap, *bp) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/pow_di.c b/contrib/libs/libf2c/pow_di.c
new file mode 100644
index 0000000000..abf36cb74c
--- /dev/null
+++ b/contrib/libs/libf2c/pow_di.c
@@ -0,0 +1,41 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+double pow_di(ap, bp) doublereal *ap; integer *bp;
+#else
+double pow_di(doublereal *ap, integer *bp)
+#endif
+{
+double pow, x;
+integer n;
+unsigned long u;
+
+pow = 1;
+x = *ap;
+n = *bp;
+
+if(n != 0)
+ {
+ if(n < 0)
+ {
+ n = -n;
+ x = 1/x;
+ }
+ for(u = n; ; )
+ {
+ if(u & 01)
+ pow *= x;
+ if(u >>= 1)
+ x *= x;
+ else
+ break;
+ }
+ }
+return(pow);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/pow_hh.c b/contrib/libs/libf2c/pow_hh.c
new file mode 100644
index 0000000000..882168501a
--- /dev/null
+++ b/contrib/libs/libf2c/pow_hh.c
@@ -0,0 +1,39 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+shortint pow_hh(ap, bp) shortint *ap, *bp;
+#else
+shortint pow_hh(shortint *ap, shortint *bp)
+#endif
+{
+ shortint pow, x, n;
+ unsigned u;
+
+ x = *ap;
+ n = *bp;
+
+ if (n <= 0) {
+ if (n == 0 || x == 1)
+ return 1;
+ if (x != -1)
+ return x == 0 ? 1/x : 0;
+ n = -n;
+ }
+ u = n;
+ for(pow = 1; ; )
+ {
+ if(u & 01)
+ pow *= x;
+ if(u >>= 1)
+ x *= x;
+ else
+ break;
+ }
+ return(pow);
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/pow_ii.c b/contrib/libs/libf2c/pow_ii.c
new file mode 100644
index 0000000000..748d121773
--- /dev/null
+++ b/contrib/libs/libf2c/pow_ii.c
@@ -0,0 +1,39 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+integer pow_ii(ap, bp) integer *ap, *bp;
+#else
+integer pow_ii(integer *ap, integer *bp)
+#endif
+{
+ integer pow, x, n;
+ unsigned long u;
+
+ x = *ap;
+ n = *bp;
+
+ if (n <= 0) {
+ if (n == 0 || x == 1)
+ return 1;
+ if (x != -1)
+ return x == 0 ? 1/x : 0;
+ n = -n;
+ }
+ u = n;
+ for(pow = 1; ; )
+ {
+ if(u & 01)
+ pow *= x;
+ if(u >>= 1)
+ x *= x;
+ else
+ break;
+ }
+ return(pow);
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/pow_ri.c b/contrib/libs/libf2c/pow_ri.c
new file mode 100644
index 0000000000..e29d416eba
--- /dev/null
+++ b/contrib/libs/libf2c/pow_ri.c
@@ -0,0 +1,41 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+double pow_ri(ap, bp) real *ap; integer *bp;
+#else
+double pow_ri(real *ap, integer *bp)
+#endif
+{
+double pow, x;
+integer n;
+unsigned long u;
+
+pow = 1;
+x = *ap;
+n = *bp;
+
+if(n != 0)
+ {
+ if(n < 0)
+ {
+ n = -n;
+ x = 1/x;
+ }
+ for(u = n; ; )
+ {
+ if(u & 01)
+ pow *= x;
+ if(u >>= 1)
+ x *= x;
+ else
+ break;
+ }
+ }
+return(pow);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/pow_zi.c b/contrib/libs/libf2c/pow_zi.c
new file mode 100644
index 0000000000..1c0a4b07c2
--- /dev/null
+++ b/contrib/libs/libf2c/pow_zi.c
@@ -0,0 +1,60 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+VOID pow_zi(p, a, b) /* p = a**b */
+ doublecomplex *p, *a; integer *b;
+#else
+extern void z_div(doublecomplex*, doublecomplex*, doublecomplex*);
+void pow_zi(doublecomplex *p, doublecomplex *a, integer *b) /* p = a**b */
+#endif
+{
+ integer n;
+ unsigned long u;
+ double t;
+ doublecomplex q, x;
+ static doublecomplex one = {1.0, 0.0};
+
+ n = *b;
+ q.r = 1;
+ q.i = 0;
+
+ if(n == 0)
+ goto done;
+ if(n < 0)
+ {
+ n = -n;
+ z_div(&x, &one, a);
+ }
+ else
+ {
+ x.r = a->r;
+ x.i = a->i;
+ }
+
+ for(u = n; ; )
+ {
+ if(u & 01)
+ {
+ t = q.r * x.r - q.i * x.i;
+ q.i = q.r * x.i + q.i * x.r;
+ q.r = t;
+ }
+ if(u >>= 1)
+ {
+ t = x.r * x.r - x.i * x.i;
+ x.i = 2 * x.r * x.i;
+ x.r = t;
+ }
+ else
+ break;
+ }
+ done:
+ p->i = q.i;
+ p->r = q.r;
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/pow_zz.c b/contrib/libs/libf2c/pow_zz.c
new file mode 100644
index 0000000000..b5ffd33483
--- /dev/null
+++ b/contrib/libs/libf2c/pow_zz.c
@@ -0,0 +1,29 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double log(), exp(), cos(), sin(), atan2(), f__cabs();
+VOID pow_zz(r,a,b) doublecomplex *r, *a, *b;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern double f__cabs(double,double);
+void pow_zz(doublecomplex *r, doublecomplex *a, doublecomplex *b)
+#endif
+{
+double logr, logi, x, y;
+
+logr = log( f__cabs(a->r, a->i) );
+logi = atan2(a->i, a->r);
+
+x = exp( logr * b->r - logi * b->i );
+y = logr * b->i + logi * b->r;
+
+r->r = x * cos(y);
+r->i = x * sin(y);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/r_abs.c b/contrib/libs/libf2c/r_abs.c
new file mode 100644
index 0000000000..f3291fb4d1
--- /dev/null
+++ b/contrib/libs/libf2c/r_abs.c
@@ -0,0 +1,18 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+double r_abs(x) real *x;
+#else
+double r_abs(real *x)
+#endif
+{
+if(*x >= 0)
+ return(*x);
+return(- *x);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/r_acos.c b/contrib/libs/libf2c/r_acos.c
new file mode 100644
index 0000000000..103c7ff070
--- /dev/null
+++ b/contrib/libs/libf2c/r_acos.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double acos();
+double r_acos(x) real *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double r_acos(real *x)
+#endif
+{
+return( acos(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/r_asin.c b/contrib/libs/libf2c/r_asin.c
new file mode 100644
index 0000000000..432b9406ac
--- /dev/null
+++ b/contrib/libs/libf2c/r_asin.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double asin();
+double r_asin(x) real *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double r_asin(real *x)
+#endif
+{
+return( asin(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/r_atan.c b/contrib/libs/libf2c/r_atan.c
new file mode 100644
index 0000000000..7656982db4
--- /dev/null
+++ b/contrib/libs/libf2c/r_atan.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double atan();
+double r_atan(x) real *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double r_atan(real *x)
+#endif
+{
+return( atan(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/r_atn2.c b/contrib/libs/libf2c/r_atn2.c
new file mode 100644
index 0000000000..ab957b89d5
--- /dev/null
+++ b/contrib/libs/libf2c/r_atn2.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double atan2();
+double r_atn2(x,y) real *x, *y;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double r_atn2(real *x, real *y)
+#endif
+{
+return( atan2(*x,*y) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/r_cnjg.c b/contrib/libs/libf2c/r_cnjg.c
new file mode 100644
index 0000000000..cef0e4b092
--- /dev/null
+++ b/contrib/libs/libf2c/r_cnjg.c
@@ -0,0 +1,18 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+VOID r_cnjg(r, z) complex *r, *z;
+#else
+VOID r_cnjg(complex *r, complex *z)
+#endif
+{
+ real zi = z->i;
+ r->r = z->r;
+ r->i = -zi;
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/r_cos.c b/contrib/libs/libf2c/r_cos.c
new file mode 100644
index 0000000000..4418f0c1bb
--- /dev/null
+++ b/contrib/libs/libf2c/r_cos.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double cos();
+double r_cos(x) real *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double r_cos(real *x)
+#endif
+{
+return( cos(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/r_cosh.c b/contrib/libs/libf2c/r_cosh.c
new file mode 100644
index 0000000000..f547835580
--- /dev/null
+++ b/contrib/libs/libf2c/r_cosh.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double cosh();
+double r_cosh(x) real *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double r_cosh(real *x)
+#endif
+{
+return( cosh(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/r_dim.c b/contrib/libs/libf2c/r_dim.c
new file mode 100644
index 0000000000..d573ca36d8
--- /dev/null
+++ b/contrib/libs/libf2c/r_dim.c
@@ -0,0 +1,16 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+double r_dim(a,b) real *a, *b;
+#else
+double r_dim(real *a, real *b)
+#endif
+{
+return( *a > *b ? *a - *b : 0);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/r_exp.c b/contrib/libs/libf2c/r_exp.c
new file mode 100644
index 0000000000..4e679794f2
--- /dev/null
+++ b/contrib/libs/libf2c/r_exp.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double exp();
+double r_exp(x) real *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double r_exp(real *x)
+#endif
+{
+return( exp(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/r_imag.c b/contrib/libs/libf2c/r_imag.c
new file mode 100644
index 0000000000..1b4de14373
--- /dev/null
+++ b/contrib/libs/libf2c/r_imag.c
@@ -0,0 +1,16 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+double r_imag(z) complex *z;
+#else
+double r_imag(complex *z)
+#endif
+{
+return(z->i);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/r_int.c b/contrib/libs/libf2c/r_int.c
new file mode 100644
index 0000000000..bff87176ff
--- /dev/null
+++ b/contrib/libs/libf2c/r_int.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double floor();
+double r_int(x) real *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double r_int(real *x)
+#endif
+{
+return( (*x>0) ? floor(*x) : -floor(- *x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/r_lg10.c b/contrib/libs/libf2c/r_lg10.c
new file mode 100644
index 0000000000..64ffddf48b
--- /dev/null
+++ b/contrib/libs/libf2c/r_lg10.c
@@ -0,0 +1,21 @@
+#include "f2c.h"
+
+#define log10e 0.43429448190325182765
+
+#ifdef KR_headers
+double log();
+double r_lg10(x) real *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double r_lg10(real *x)
+#endif
+{
+return( log10e * log(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/r_log.c b/contrib/libs/libf2c/r_log.c
new file mode 100644
index 0000000000..94c79b0518
--- /dev/null
+++ b/contrib/libs/libf2c/r_log.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double log();
+double r_log(x) real *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double r_log(real *x)
+#endif
+{
+return( log(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/r_mod.c b/contrib/libs/libf2c/r_mod.c
new file mode 100644
index 0000000000..63ed175368
--- /dev/null
+++ b/contrib/libs/libf2c/r_mod.c
@@ -0,0 +1,46 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+#ifdef IEEE_drem
+double drem();
+#else
+double floor();
+#endif
+double r_mod(x,y) real *x, *y;
+#else
+#ifdef IEEE_drem
+double drem(double, double);
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+#endif
+double r_mod(real *x, real *y)
+#endif
+{
+#ifdef IEEE_drem
+ double xa, ya, z;
+ if ((ya = *y) < 0.)
+ ya = -ya;
+ z = drem(xa = *x, ya);
+ if (xa > 0) {
+ if (z < 0)
+ z += ya;
+ }
+ else if (z > 0)
+ z -= ya;
+ return z;
+#else
+ double quotient;
+ if( (quotient = (double)*x / *y) >= 0)
+ quotient = floor(quotient);
+ else
+ quotient = -floor(-quotient);
+ return(*x - (*y) * quotient );
+#endif
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/r_nint.c b/contrib/libs/libf2c/r_nint.c
new file mode 100644
index 0000000000..7cc3f1b5ae
--- /dev/null
+++ b/contrib/libs/libf2c/r_nint.c
@@ -0,0 +1,20 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double floor();
+double r_nint(x) real *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double r_nint(real *x)
+#endif
+{
+return( (*x)>=0 ?
+ floor(*x + .5) : -floor(.5 - *x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/r_sign.c b/contrib/libs/libf2c/r_sign.c
new file mode 100644
index 0000000000..797db1a4cb
--- /dev/null
+++ b/contrib/libs/libf2c/r_sign.c
@@ -0,0 +1,18 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+double r_sign(a,b) real *a, *b;
+#else
+double r_sign(real *a, real *b)
+#endif
+{
+double x;
+x = (*a >= 0 ? *a : - *a);
+return( *b >= 0 ? x : -x);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/r_sin.c b/contrib/libs/libf2c/r_sin.c
new file mode 100644
index 0000000000..37e0df25fa
--- /dev/null
+++ b/contrib/libs/libf2c/r_sin.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double sin();
+double r_sin(x) real *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double r_sin(real *x)
+#endif
+{
+return( sin(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/r_sinh.c b/contrib/libs/libf2c/r_sinh.c
new file mode 100644
index 0000000000..39878f03ae
--- /dev/null
+++ b/contrib/libs/libf2c/r_sinh.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double sinh();
+double r_sinh(x) real *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double r_sinh(real *x)
+#endif
+{
+return( sinh(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/r_sqrt.c b/contrib/libs/libf2c/r_sqrt.c
new file mode 100644
index 0000000000..e7b2c1c704
--- /dev/null
+++ b/contrib/libs/libf2c/r_sqrt.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double sqrt();
+double r_sqrt(x) real *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double r_sqrt(real *x)
+#endif
+{
+return( sqrt(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/r_tan.c b/contrib/libs/libf2c/r_tan.c
new file mode 100644
index 0000000000..1774bed73a
--- /dev/null
+++ b/contrib/libs/libf2c/r_tan.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double tan();
+double r_tan(x) real *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double r_tan(real *x)
+#endif
+{
+return( tan(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/r_tanh.c b/contrib/libs/libf2c/r_tanh.c
new file mode 100644
index 0000000000..7739c6ce84
--- /dev/null
+++ b/contrib/libs/libf2c/r_tanh.c
@@ -0,0 +1,19 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double tanh();
+double r_tanh(x) real *x;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+double r_tanh(real *x)
+#endif
+{
+return( tanh(*x) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/rdfmt.c b/contrib/libs/libf2c/rdfmt.c
new file mode 100644
index 0000000000..493bfef877
--- /dev/null
+++ b/contrib/libs/libf2c/rdfmt.c
@@ -0,0 +1,553 @@
+#include "f2c.h"
+#include "fio.h"
+
+#ifdef KR_headers
+extern double atof();
+#define Const /*nothing*/
+#else
+#define Const const
+#undef abs
+#undef min
+#undef max
+#include "stdlib.h"
+#endif
+
+#include "fmt.h"
+#include "fp.h"
+#include "ctype_.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ static int
+#ifdef KR_headers
+rd_Z(n,w,len) Uint *n; ftnlen len;
+#else
+rd_Z(Uint *n, int w, ftnlen len)
+#endif
+{
+ long x[9];
+ char *s, *s0, *s1, *se, *t;
+ Const char *sc;
+ int ch, i, w1, w2;
+ static char hex[256];
+ static int one = 1;
+ int bad = 0;
+
+ if (!hex['0']) {
+ sc = "0123456789";
+ while(ch = *sc++)
+ hex[ch] = ch - '0' + 1;
+ sc = "ABCDEF";
+ while(ch = *sc++)
+ hex[ch] = hex[ch + 'a' - 'A'] = ch - 'A' + 11;
+ }
+ s = s0 = (char *)x;
+ s1 = (char *)&x[4];
+ se = (char *)&x[8];
+ if (len > 4*sizeof(long))
+ return errno = 117;
+ while (w) {
+ GET(ch);
+ if (ch==',' || ch=='\n')
+ break;
+ w--;
+ if (ch > ' ') {
+ if (!hex[ch & 0xff])
+ bad++;
+ *s++ = ch;
+ if (s == se) {
+ /* discard excess characters */
+ for(t = s0, s = s1; t < s1;)
+ *t++ = *s++;
+ s = s1;
+ }
+ }
+ }
+ if (bad)
+ return errno = 115;
+ w = (int)len;
+ w1 = s - s0;
+ w2 = w1+1 >> 1;
+ t = (char *)n;
+ if (*(char *)&one) {
+ /* little endian */
+ t += w - 1;
+ i = -1;
+ }
+ else
+ i = 1;
+ for(; w > w2; t += i, --w)
+ *t = 0;
+ if (!w)
+ return 0;
+ if (w < w2)
+ s0 = s - (w << 1);
+ else if (w1 & 1) {
+ *t = hex[*s0++ & 0xff] - 1;
+ if (!--w)
+ return 0;
+ t += i;
+ }
+ do {
+ *t = hex[*s0 & 0xff]-1 << 4 | hex[s0[1] & 0xff]-1;
+ t += i;
+ s0 += 2;
+ }
+ while(--w);
+ return 0;
+ }
+
+ static int
+#ifdef KR_headers
+rd_I(n,w,len, base) Uint *n; int w; ftnlen len; register int base;
+#else
+rd_I(Uint *n, int w, ftnlen len, register int base)
+#endif
+{
+ int ch, sign;
+ longint x = 0;
+
+ if (w <= 0)
+ goto have_x;
+ for(;;) {
+ GET(ch);
+ if (ch != ' ')
+ break;
+ if (!--w)
+ goto have_x;
+ }
+ sign = 0;
+ switch(ch) {
+ case ',':
+ case '\n':
+ w = 0;
+ goto have_x;
+ case '-':
+ sign = 1;
+ case '+':
+ break;
+ default:
+ if (ch >= '0' && ch <= '9') {
+ x = ch - '0';
+ break;
+ }
+ goto have_x;
+ }
+ while(--w) {
+ GET(ch);
+ if (ch >= '0' && ch <= '9') {
+ x = x*base + ch - '0';
+ continue;
+ }
+ if (ch != ' ') {
+ if (ch == '\n' || ch == ',')
+ w = 0;
+ break;
+ }
+ if (f__cblank)
+ x *= base;
+ }
+ if (sign)
+ x = -x;
+ have_x:
+ if(len == sizeof(integer))
+ n->il=x;
+ else if(len == sizeof(char))
+ n->ic = (char)x;
+#ifdef Allow_TYQUAD
+ else if (len == sizeof(longint))
+ n->ili = x;
+#endif
+ else
+ n->is = (short)x;
+ if (w) {
+ while(--w)
+ GET(ch);
+ return errno = 115;
+ }
+ return 0;
+}
+
+ static int
+#ifdef KR_headers
+rd_L(n,w,len) ftnint *n; ftnlen len;
+#else
+rd_L(ftnint *n, int w, ftnlen len)
+#endif
+{ int ch, dot, lv;
+
+ if (w <= 0)
+ goto bad;
+ for(;;) {
+ GET(ch);
+ --w;
+ if (ch != ' ')
+ break;
+ if (!w)
+ goto bad;
+ }
+ dot = 0;
+ retry:
+ switch(ch) {
+ case '.':
+ if (dot++ || !w)
+ goto bad;
+ GET(ch);
+ --w;
+ goto retry;
+ case 't':
+ case 'T':
+ lv = 1;
+ break;
+ case 'f':
+ case 'F':
+ lv = 0;
+ break;
+ default:
+ bad:
+ for(; w > 0; --w)
+ GET(ch);
+ /* no break */
+ case ',':
+ case '\n':
+ return errno = 116;
+ }
+ switch(len) {
+ case sizeof(char): *(char *)n = (char)lv; break;
+ case sizeof(short): *(short *)n = (short)lv; break;
+ default: *n = lv;
+ }
+ while(w-- > 0) {
+ GET(ch);
+ if (ch == ',' || ch == '\n')
+ break;
+ }
+ return 0;
+}
+
+ static int
+#ifdef KR_headers
+rd_F(p, w, d, len) ufloat *p; ftnlen len;
+#else
+rd_F(ufloat *p, int w, int d, ftnlen len)
+#endif
+{
+ char s[FMAX+EXPMAXDIGS+4];
+ register int ch;
+ register char *sp, *spe, *sp1;
+ double x;
+ int scale1, se;
+ long e, exp;
+
+ sp1 = sp = s;
+ spe = sp + FMAX;
+ exp = -d;
+ x = 0.;
+
+ do {
+ GET(ch);
+ w--;
+ } while (ch == ' ' && w);
+ switch(ch) {
+ case '-': *sp++ = ch; sp1++; spe++;
+ case '+':
+ if (!w) goto zero;
+ --w;
+ GET(ch);
+ }
+ while(ch == ' ') {
+blankdrop:
+ if (!w--) goto zero; GET(ch); }
+ while(ch == '0')
+ { if (!w--) goto zero; GET(ch); }
+ if (ch == ' ' && f__cblank)
+ goto blankdrop;
+ scale1 = f__scale;
+ while(isdigit(ch)) {
+digloop1:
+ if (sp < spe) *sp++ = ch;
+ else ++exp;
+digloop1e:
+ if (!w--) goto done;
+ GET(ch);
+ }
+ if (ch == ' ') {
+ if (f__cblank)
+ { ch = '0'; goto digloop1; }
+ goto digloop1e;
+ }
+ if (ch == '.') {
+ exp += d;
+ if (!w--) goto done;
+ GET(ch);
+ if (sp == sp1) { /* no digits yet */
+ while(ch == '0') {
+skip01:
+ --exp;
+skip0:
+ if (!w--) goto done;
+ GET(ch);
+ }
+ if (ch == ' ') {
+ if (f__cblank) goto skip01;
+ goto skip0;
+ }
+ }
+ while(isdigit(ch)) {
+digloop2:
+ if (sp < spe)
+ { *sp++ = ch; --exp; }
+digloop2e:
+ if (!w--) goto done;
+ GET(ch);
+ }
+ if (ch == ' ') {
+ if (f__cblank)
+ { ch = '0'; goto digloop2; }
+ goto digloop2e;
+ }
+ }
+ switch(ch) {
+ default:
+ break;
+ case '-': se = 1; goto signonly;
+ case '+': se = 0; goto signonly;
+ case 'e':
+ case 'E':
+ case 'd':
+ case 'D':
+ if (!w--)
+ goto bad;
+ GET(ch);
+ while(ch == ' ') {
+ if (!w--)
+ goto bad;
+ GET(ch);
+ }
+ se = 0;
+ switch(ch) {
+ case '-': se = 1;
+ case '+':
+signonly:
+ if (!w--)
+ goto bad;
+ GET(ch);
+ }
+ while(ch == ' ') {
+ if (!w--)
+ goto bad;
+ GET(ch);
+ }
+ if (!isdigit(ch))
+ goto bad;
+
+ e = ch - '0';
+ for(;;) {
+ if (!w--)
+ { ch = '\n'; break; }
+ GET(ch);
+ if (!isdigit(ch)) {
+ if (ch == ' ') {
+ if (f__cblank)
+ ch = '0';
+ else continue;
+ }
+ else
+ break;
+ }
+ e = 10*e + ch - '0';
+ if (e > EXPMAX && sp > sp1)
+ goto bad;
+ }
+ if (se)
+ exp -= e;
+ else
+ exp += e;
+ scale1 = 0;
+ }
+ switch(ch) {
+ case '\n':
+ case ',':
+ break;
+ default:
+bad:
+ return (errno = 115);
+ }
+done:
+ if (sp > sp1) {
+ while(*--sp == '0')
+ ++exp;
+ if (exp -= scale1)
+ sprintf(sp+1, "e%ld", exp);
+ else
+ sp[1] = 0;
+ x = atof(s);
+ }
+zero:
+ if (len == sizeof(real))
+ p->pf = x;
+ else
+ p->pd = x;
+ return(0);
+ }
+
+
+ static int
+#ifdef KR_headers
+rd_A(p,len) char *p; ftnlen len;
+#else
+rd_A(char *p, ftnlen len)
+#endif
+{ int i,ch;
+ for(i=0;i<len;i++)
+ { GET(ch);
+ *p++=VAL(ch);
+ }
+ return(0);
+}
+ static int
+#ifdef KR_headers
+rd_AW(p,w,len) char *p; ftnlen len;
+#else
+rd_AW(char *p, int w, ftnlen len)
+#endif
+{ int i,ch;
+ if(w>=len)
+ { for(i=0;i<w-len;i++)
+ GET(ch);
+ for(i=0;i<len;i++)
+ { GET(ch);
+ *p++=VAL(ch);
+ }
+ return(0);
+ }
+ for(i=0;i<w;i++)
+ { GET(ch);
+ *p++=VAL(ch);
+ }
+ for(i=0;i<len-w;i++) *p++=' ';
+ return(0);
+}
+ static int
+#ifdef KR_headers
+rd_H(n,s) char *s;
+#else
+rd_H(int n, char *s)
+#endif
+{ int i,ch;
+ for(i=0;i<n;i++)
+ if((ch=(*f__getn)())<0) return(ch);
+ else *s++ = ch=='\n'?' ':ch;
+ return(1);
+}
+ static int
+#ifdef KR_headers
+rd_POS(s) char *s;
+#else
+rd_POS(char *s)
+#endif
+{ char quote;
+ int ch;
+ quote= *s++;
+ for(;*s;s++)
+ if(*s==quote && *(s+1)!=quote) break;
+ else if((ch=(*f__getn)())<0) return(ch);
+ else *s = ch=='\n'?' ':ch;
+ return(1);
+}
+
+ int
+#ifdef KR_headers
+rd_ed(p,ptr,len) struct syl *p; char *ptr; ftnlen len;
+#else
+rd_ed(struct syl *p, char *ptr, ftnlen len)
+#endif
+{ int ch;
+ for(;f__cursor>0;f__cursor--) if((ch=(*f__getn)())<0) return(ch);
+ if(f__cursor<0)
+ { if(f__recpos+f__cursor < 0) /*err(elist->cierr,110,"fmt")*/
+ f__cursor = -f__recpos; /* is this in the standard? */
+ if(f__external == 0) {
+ extern char *f__icptr;
+ f__icptr += f__cursor;
+ }
+ else if(f__curunit && f__curunit->useek)
+ (void) FSEEK(f__cf, f__cursor,SEEK_CUR);
+ else
+ err(f__elist->cierr,106,"fmt");
+ f__recpos += f__cursor;
+ f__cursor=0;
+ }
+ switch(p->op)
+ {
+ default: fprintf(stderr,"rd_ed, unexpected code: %d\n", p->op);
+ sig_die(f__fmtbuf, 1);
+ case IM:
+ case I: ch = rd_I((Uint *)ptr,p->p1,len, 10);
+ break;
+
+ /* O and OM don't work right for character, double, complex, */
+ /* or doublecomplex, and they differ from Fortran 90 in */
+ /* showing a minus sign for negative values. */
+
+ case OM:
+ case O: ch = rd_I((Uint *)ptr, p->p1, len, 8);
+ break;
+ case L: ch = rd_L((ftnint *)ptr,p->p1,len);
+ break;
+ case A: ch = rd_A(ptr,len);
+ break;
+ case AW:
+ ch = rd_AW(ptr,p->p1,len);
+ break;
+ case E: case EE:
+ case D:
+ case G:
+ case GE:
+ case F: ch = rd_F((ufloat *)ptr,p->p1,p->p2.i[0],len);
+ break;
+
+ /* Z and ZM assume 8-bit bytes. */
+
+ case ZM:
+ case Z:
+ ch = rd_Z((Uint *)ptr, p->p1, len);
+ break;
+ }
+ if(ch == 0) return(ch);
+ else if(ch == EOF) return(EOF);
+ if (f__cf)
+ clearerr(f__cf);
+ return(errno);
+}
+
+ int
+#ifdef KR_headers
+rd_ned(p) struct syl *p;
+#else
+rd_ned(struct syl *p)
+#endif
+{
+ switch(p->op)
+ {
+ default: fprintf(stderr,"rd_ned, unexpected code: %d\n", p->op);
+ sig_die(f__fmtbuf, 1);
+ case APOS:
+ return(rd_POS(p->p2.s));
+ case H: return(rd_H(p->p1,p->p2.s));
+ case SLASH: return((*f__donewrec)());
+ case TR:
+ case X: f__cursor += p->p1;
+ return(1);
+ case T: f__cursor=p->p1-f__recpos - 1;
+ return(1);
+ case TL: f__cursor -= p->p1;
+ if(f__cursor < -f__recpos) /* TL1000, 1X */
+ f__cursor = -f__recpos;
+ return(1);
+ }
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/rewind.c b/contrib/libs/libf2c/rewind.c
new file mode 100644
index 0000000000..9a0e07e6cf
--- /dev/null
+++ b/contrib/libs/libf2c/rewind.c
@@ -0,0 +1,30 @@
+#include "f2c.h"
+#include "fio.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+#ifdef KR_headers
+integer f_rew(a) alist *a;
+#else
+integer f_rew(alist *a)
+#endif
+{
+ unit *b;
+ if(a->aunit>=MXUNIT || a->aunit<0)
+ err(a->aerr,101,"rewind");
+ b = &f__units[a->aunit];
+ if(b->ufd == NULL || b->uwrt == 3)
+ return(0);
+ if(!b->useek)
+ err(a->aerr,106,"rewind")
+ if(b->uwrt) {
+ (void) t_runc(a);
+ b->uwrt = 3;
+ }
+ rewind(b->ufd);
+ b->uend=0;
+ return(0);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/rsfe.c b/contrib/libs/libf2c/rsfe.c
new file mode 100644
index 0000000000..abe9724a7b
--- /dev/null
+++ b/contrib/libs/libf2c/rsfe.c
@@ -0,0 +1,91 @@
+/* read sequential formatted external */
+#include "f2c.h"
+#include "fio.h"
+#include "fmt.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ int
+xrd_SL(Void)
+{ int ch;
+ if(!f__curunit->uend)
+ while((ch=getc(f__cf))!='\n')
+ if (ch == EOF) {
+ f__curunit->uend = 1;
+ break;
+ }
+ f__cursor=f__recpos=0;
+ return(1);
+}
+
+ int
+x_getc(Void)
+{ int ch;
+ if(f__curunit->uend) return(EOF);
+ ch = getc(f__cf);
+ if(ch!=EOF && ch!='\n')
+ { f__recpos++;
+ return(ch);
+ }
+ if(ch=='\n')
+ { (void) ungetc(ch,f__cf);
+ return(ch);
+ }
+ if(f__curunit->uend || feof(f__cf))
+ { errno=0;
+ f__curunit->uend=1;
+ return(-1);
+ }
+ return(-1);
+}
+
+ int
+x_endp(Void)
+{
+ xrd_SL();
+ return f__curunit->uend == 1 ? EOF : 0;
+}
+
+ int
+x_rev(Void)
+{
+ (void) xrd_SL();
+ return(0);
+}
+#ifdef KR_headers
+integer s_rsfe(a) cilist *a; /* start */
+#else
+integer s_rsfe(cilist *a) /* start */
+#endif
+{ int n;
+ if(!f__init) f_init();
+ f__reading=1;
+ f__sequential=1;
+ f__formatted=1;
+ f__external=1;
+ if(n=c_sfe(a)) return(n);
+ f__elist=a;
+ f__cursor=f__recpos=0;
+ f__scale=0;
+ f__fmtbuf=a->cifmt;
+ f__cf=f__curunit->ufd;
+ if(pars_f(f__fmtbuf)<0) err(a->cierr,100,"startio");
+ f__getn= x_getc;
+ f__doed= rd_ed;
+ f__doned= rd_ned;
+ fmt_bg();
+ f__doend=x_endp;
+ f__donewrec=xrd_SL;
+ f__dorevert=x_rev;
+ f__cblank=f__curunit->ublnk;
+ f__cplus=0;
+ if(f__curunit->uwrt && f__nowreading(f__curunit))
+ err(a->cierr,errno,"read start");
+ if(f__curunit->uend)
+ err(f__elist->ciend,(EOF),"read start");
+ return(0);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/rsli.c b/contrib/libs/libf2c/rsli.c
new file mode 100644
index 0000000000..3d4ea428fc
--- /dev/null
+++ b/contrib/libs/libf2c/rsli.c
@@ -0,0 +1,109 @@
+#include "f2c.h"
+#include "fio.h"
+#include "lio.h"
+#include "fmt.h" /* for f__doend */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern flag f__lquit;
+extern int f__lcount;
+extern char *f__icptr;
+extern char *f__icend;
+extern icilist *f__svic;
+extern int f__icnum, f__recpos;
+
+static int i_getc(Void)
+{
+ if(f__recpos >= f__svic->icirlen) {
+ if (f__recpos++ == f__svic->icirlen)
+ return '\n';
+ z_rnew();
+ }
+ f__recpos++;
+ if(f__icptr >= f__icend)
+ return EOF;
+ return(*f__icptr++);
+ }
+
+ static
+#ifdef KR_headers
+int i_ungetc(ch, f) int ch; FILE *f;
+#else
+int i_ungetc(int ch, FILE *f)
+#endif
+{
+ if (--f__recpos == f__svic->icirlen)
+ return '\n';
+ if (f__recpos < -1)
+ err(f__svic->icierr,110,"recend");
+ /* *--icptr == ch, and icptr may point to read-only memory */
+ return *--f__icptr /* = ch */;
+ }
+
+ static void
+#ifdef KR_headers
+c_lir(a) icilist *a;
+#else
+c_lir(icilist *a)
+#endif
+{
+ extern int l_eof;
+ f__reading = 1;
+ f__external = 0;
+ f__formatted = 1;
+ f__svic = a;
+ L_len = a->icirlen;
+ f__recpos = -1;
+ f__icnum = f__recpos = 0;
+ f__cursor = 0;
+ l_getc = i_getc;
+ l_ungetc = i_ungetc;
+ l_eof = 0;
+ f__icptr = a->iciunit;
+ f__icend = f__icptr + a->icirlen*a->icirnum;
+ f__cf = 0;
+ f__curunit = 0;
+ f__elist = (cilist *)a;
+ }
+
+
+#ifdef KR_headers
+integer s_rsli(a) icilist *a;
+#else
+integer s_rsli(icilist *a)
+#endif
+{
+ f__lioproc = l_read;
+ f__lquit = 0;
+ f__lcount = 0;
+ c_lir(a);
+ f__doend = 0;
+ return(0);
+ }
+
+integer e_rsli(Void)
+{ return 0; }
+
+#ifdef KR_headers
+integer s_rsni(a) icilist *a;
+#else
+extern int x_rsne(cilist*);
+
+integer s_rsni(icilist *a)
+#endif
+{
+ extern int nml_read;
+ integer rv;
+ cilist ca;
+ ca.ciend = a->iciend;
+ ca.cierr = a->icierr;
+ ca.cifmt = a->icifmt;
+ c_lir(a);
+ rv = x_rsne(&ca);
+ nml_read = 0;
+ return rv;
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/rsne.c b/contrib/libs/libf2c/rsne.c
new file mode 100644
index 0000000000..e8e9daea29
--- /dev/null
+++ b/contrib/libs/libf2c/rsne.c
@@ -0,0 +1,618 @@
+#include "f2c.h"
+#include "fio.h"
+#include "lio.h"
+
+#define MAX_NL_CACHE 3 /* maximum number of namelist hash tables to cache */
+#define MAXDIM 20 /* maximum number of subscripts */
+
+ struct dimen {
+ ftnlen extent;
+ ftnlen curval;
+ ftnlen delta;
+ ftnlen stride;
+ };
+ typedef struct dimen dimen;
+
+ struct hashentry {
+ struct hashentry *next;
+ char *name;
+ Vardesc *vd;
+ };
+ typedef struct hashentry hashentry;
+
+ struct hashtab {
+ struct hashtab *next;
+ Namelist *nl;
+ int htsize;
+ hashentry *tab[1];
+ };
+ typedef struct hashtab hashtab;
+
+ static hashtab *nl_cache;
+ static int n_nlcache;
+ static hashentry **zot;
+ static int colonseen;
+ extern ftnlen f__typesize[];
+
+ extern flag f__lquit;
+ extern int f__lcount, nml_read;
+ extern int t_getc(Void);
+
+#ifdef KR_headers
+ extern char *malloc(), *memset();
+#define Const /*nothing*/
+
+#ifdef ungetc
+ static int
+un_getc(x,f__cf) int x; FILE *f__cf;
+{ return ungetc(x,f__cf); }
+#else
+#define un_getc ungetc
+ extern int ungetc();
+#endif
+
+#else
+#define Const const
+#undef abs
+#undef min
+#undef max
+#include "stdlib.h"
+#include "string.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef ungetc
+ static int
+un_getc(int x, FILE *f__cf)
+{ return ungetc(x,f__cf); }
+#else
+#define un_getc ungetc
+extern int ungetc(int, FILE*); /* for systems with a buggy stdio.h */
+#endif
+#endif
+
+ static Vardesc *
+#ifdef KR_headers
+hash(ht, s) hashtab *ht; register char *s;
+#else
+hash(hashtab *ht, register char *s)
+#endif
+{
+ register int c, x;
+ register hashentry *h;
+ char *s0 = s;
+
+ for(x = 0; c = *s++; x = x & 0x4000 ? ((x << 1) & 0x7fff) + 1 : x << 1)
+ x += c;
+ for(h = *(zot = ht->tab + x % ht->htsize); h; h = h->next)
+ if (!strcmp(s0, h->name))
+ return h->vd;
+ return 0;
+ }
+
+ hashtab *
+#ifdef KR_headers
+mk_hashtab(nl) Namelist *nl;
+#else
+mk_hashtab(Namelist *nl)
+#endif
+{
+ int nht, nv;
+ hashtab *ht;
+ Vardesc *v, **vd, **vde;
+ hashentry *he;
+
+ hashtab **x, **x0, *y;
+ for(x = &nl_cache; y = *x; x0 = x, x = &y->next)
+ if (nl == y->nl)
+ return y;
+ if (n_nlcache >= MAX_NL_CACHE) {
+ /* discard least recently used namelist hash table */
+ y = *x0;
+ free((char *)y->next);
+ y->next = 0;
+ }
+ else
+ n_nlcache++;
+ nv = nl->nvars;
+ if (nv >= 0x4000)
+ nht = 0x7fff;
+ else {
+ for(nht = 1; nht < nv; nht <<= 1);
+ nht += nht - 1;
+ }
+ ht = (hashtab *)malloc(sizeof(hashtab) + (nht-1)*sizeof(hashentry *)
+ + nv*sizeof(hashentry));
+ if (!ht)
+ return 0;
+ he = (hashentry *)&ht->tab[nht];
+ ht->nl = nl;
+ ht->htsize = nht;
+ ht->next = nl_cache;
+ nl_cache = ht;
+ memset((char *)ht->tab, 0, nht*sizeof(hashentry *));
+ vd = nl->vars;
+ vde = vd + nv;
+ while(vd < vde) {
+ v = *vd++;
+ if (!hash(ht, v->name)) {
+ he->next = *zot;
+ *zot = he;
+ he->name = v->name;
+ he->vd = v;
+ he++;
+ }
+ }
+ return ht;
+ }
+
+static char Alpha[256], Alphanum[256];
+
+ static VOID
+nl_init(Void) {
+ Const char *s;
+ int c;
+
+ if(!f__init)
+ f_init();
+ for(s = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"; c = *s++; )
+ Alpha[c]
+ = Alphanum[c]
+ = Alpha[c + 'a' - 'A']
+ = Alphanum[c + 'a' - 'A']
+ = c;
+ for(s = "0123456789_"; c = *s++; )
+ Alphanum[c] = c;
+ }
+
+#define GETC(x) (x=(*l_getc)())
+#define Ungetc(x,y) (*l_ungetc)(x,y)
+
+ static int
+#ifdef KR_headers
+getname(s, slen) register char *s; int slen;
+#else
+getname(register char *s, int slen)
+#endif
+{
+ register char *se = s + slen - 1;
+ register int ch;
+
+ GETC(ch);
+ if (!(*s++ = Alpha[ch & 0xff])) {
+ if (ch != EOF)
+ ch = 115;
+ errfl(f__elist->cierr, ch, "namelist read");
+ }
+ while(*s = Alphanum[GETC(ch) & 0xff])
+ if (s < se)
+ s++;
+ if (ch == EOF)
+ err(f__elist->cierr, EOF, "namelist read");
+ if (ch > ' ')
+ Ungetc(ch,f__cf);
+ return *s = 0;
+ }
+
+ static int
+#ifdef KR_headers
+getnum(chp, val) int *chp; ftnlen *val;
+#else
+getnum(int *chp, ftnlen *val)
+#endif
+{
+ register int ch, sign;
+ register ftnlen x;
+
+ while(GETC(ch) <= ' ' && ch >= 0);
+ if (ch == '-') {
+ sign = 1;
+ GETC(ch);
+ }
+ else {
+ sign = 0;
+ if (ch == '+')
+ GETC(ch);
+ }
+ x = ch - '0';
+ if (x < 0 || x > 9)
+ return 115;
+ while(GETC(ch) >= '0' && ch <= '9')
+ x = 10*x + ch - '0';
+ while(ch <= ' ' && ch >= 0)
+ GETC(ch);
+ if (ch == EOF)
+ return EOF;
+ *val = sign ? -x : x;
+ *chp = ch;
+ return 0;
+ }
+
+ static int
+#ifdef KR_headers
+getdimen(chp, d, delta, extent, x1)
+ int *chp; dimen *d; ftnlen delta, extent, *x1;
+#else
+getdimen(int *chp, dimen *d, ftnlen delta, ftnlen extent, ftnlen *x1)
+#endif
+{
+ register int k;
+ ftnlen x2, x3;
+
+ if (k = getnum(chp, x1))
+ return k;
+ x3 = 1;
+ if (*chp == ':') {
+ if (k = getnum(chp, &x2))
+ return k;
+ x2 -= *x1;
+ if (*chp == ':') {
+ if (k = getnum(chp, &x3))
+ return k;
+ if (!x3)
+ return 123;
+ x2 /= x3;
+ colonseen = 1;
+ }
+ if (x2 < 0 || x2 >= extent)
+ return 123;
+ d->extent = x2 + 1;
+ }
+ else
+ d->extent = 1;
+ d->curval = 0;
+ d->delta = delta;
+ d->stride = x3;
+ return 0;
+ }
+
+#ifndef No_Namelist_Questions
+ static Void
+#ifdef KR_headers
+print_ne(a) cilist *a;
+#else
+print_ne(cilist *a)
+#endif
+{
+ flag intext = f__external;
+ int rpsave = f__recpos;
+ FILE *cfsave = f__cf;
+ unit *usave = f__curunit;
+ cilist t;
+ t = *a;
+ t.ciunit = 6;
+ s_wsne(&t);
+ fflush(f__cf);
+ f__external = intext;
+ f__reading = 1;
+ f__recpos = rpsave;
+ f__cf = cfsave;
+ f__curunit = usave;
+ f__elist = a;
+ }
+#endif
+
+ static char where0[] = "namelist read start ";
+
+ int
+#ifdef KR_headers
+x_rsne(a) cilist *a;
+#else
+x_rsne(cilist *a)
+#endif
+{
+ int ch, got1, k, n, nd, quote, readall;
+ Namelist *nl;
+ static char where[] = "namelist read";
+ char buf[64];
+ hashtab *ht;
+ Vardesc *v;
+ dimen *dn, *dn0, *dn1;
+ ftnlen *dims, *dims1;
+ ftnlen b, b0, b1, ex, no, nomax, size, span;
+ ftnint no1, no2, type;
+ char *vaddr;
+ long iva, ivae;
+ dimen dimens[MAXDIM], substr;
+
+ if (!Alpha['a'])
+ nl_init();
+ f__reading=1;
+ f__formatted=1;
+ got1 = 0;
+ top:
+ for(;;) switch(GETC(ch)) {
+ case EOF:
+ eof:
+ err(a->ciend,(EOF),where0);
+ case '&':
+ case '$':
+ goto have_amp;
+#ifndef No_Namelist_Questions
+ case '?':
+ print_ne(a);
+ continue;
+#endif
+ default:
+ if (ch <= ' ' && ch >= 0)
+ continue;
+#ifndef No_Namelist_Comments
+ while(GETC(ch) != '\n')
+ if (ch == EOF)
+ goto eof;
+#else
+ errfl(a->cierr, 115, where0);
+#endif
+ }
+ have_amp:
+ if (ch = getname(buf,sizeof(buf)))
+ return ch;
+ nl = (Namelist *)a->cifmt;
+ if (strcmp(buf, nl->name))
+#ifdef No_Bad_Namelist_Skip
+ errfl(a->cierr, 118, where0);
+#else
+ {
+ fprintf(stderr,
+ "Skipping namelist \"%s\": seeking namelist \"%s\".\n",
+ buf, nl->name);
+ fflush(stderr);
+ for(;;) switch(GETC(ch)) {
+ case EOF:
+ err(a->ciend, EOF, where0);
+ case '/':
+ case '&':
+ case '$':
+ if (f__external)
+ e_rsle();
+ else
+ z_rnew();
+ goto top;
+ case '"':
+ case '\'':
+ quote = ch;
+ more_quoted:
+ while(GETC(ch) != quote)
+ if (ch == EOF)
+ err(a->ciend, EOF, where0);
+ if (GETC(ch) == quote)
+ goto more_quoted;
+ Ungetc(ch,f__cf);
+ default:
+ continue;
+ }
+ }
+#endif
+ ht = mk_hashtab(nl);
+ if (!ht)
+ errfl(f__elist->cierr, 113, where0);
+ for(;;) {
+ for(;;) switch(GETC(ch)) {
+ case EOF:
+ if (got1)
+ return 0;
+ err(a->ciend, EOF, where0);
+ case '/':
+ case '$':
+ case '&':
+ return 0;
+ default:
+ if (ch <= ' ' && ch >= 0 || ch == ',')
+ continue;
+ Ungetc(ch,f__cf);
+ if (ch = getname(buf,sizeof(buf)))
+ return ch;
+ goto havename;
+ }
+ havename:
+ v = hash(ht,buf);
+ if (!v)
+ errfl(a->cierr, 119, where);
+ while(GETC(ch) <= ' ' && ch >= 0);
+ vaddr = v->addr;
+ type = v->type;
+ if (type < 0) {
+ size = -type;
+ type = TYCHAR;
+ }
+ else
+ size = f__typesize[type];
+ ivae = size;
+ iva = readall = 0;
+ if (ch == '(' /*)*/ ) {
+ dn = dimens;
+ if (!(dims = v->dims)) {
+ if (type != TYCHAR)
+ errfl(a->cierr, 122, where);
+ if (k = getdimen(&ch, dn, (ftnlen)size,
+ (ftnlen)size, &b))
+ errfl(a->cierr, k, where);
+ if (ch != ')')
+ errfl(a->cierr, 115, where);
+ b1 = dn->extent;
+ if (--b < 0 || b + b1 > size)
+ return 124;
+ iva += b;
+ size = b1;
+ while(GETC(ch) <= ' ' && ch >= 0);
+ goto scalar;
+ }
+ nd = (int)dims[0];
+ nomax = span = dims[1];
+ ivae = iva + size*nomax;
+ colonseen = 0;
+ if (k = getdimen(&ch, dn, size, nomax, &b))
+ errfl(a->cierr, k, where);
+ no = dn->extent;
+ b0 = dims[2];
+ dims1 = dims += 3;
+ ex = 1;
+ for(n = 1; n++ < nd; dims++) {
+ if (ch != ',')
+ errfl(a->cierr, 115, where);
+ dn1 = dn + 1;
+ span /= *dims;
+ if (k = getdimen(&ch, dn1, dn->delta**dims,
+ span, &b1))
+ errfl(a->cierr, k, where);
+ ex *= *dims;
+ b += b1*ex;
+ no *= dn1->extent;
+ dn = dn1;
+ }
+ if (ch != ')')
+ errfl(a->cierr, 115, where);
+ readall = 1 - colonseen;
+ b -= b0;
+ if (b < 0 || b >= nomax)
+ errfl(a->cierr, 125, where);
+ iva += size * b;
+ dims = dims1;
+ while(GETC(ch) <= ' ' && ch >= 0);
+ no1 = 1;
+ dn0 = dimens;
+ if (type == TYCHAR && ch == '(' /*)*/) {
+ if (k = getdimen(&ch, &substr, size, size, &b))
+ errfl(a->cierr, k, where);
+ if (ch != ')')
+ errfl(a->cierr, 115, where);
+ b1 = substr.extent;
+ if (--b < 0 || b + b1 > size)
+ return 124;
+ iva += b;
+ b0 = size;
+ size = b1;
+ while(GETC(ch) <= ' ' && ch >= 0);
+ if (b1 < b0)
+ goto delta_adj;
+ }
+ if (readall)
+ goto delta_adj;
+ for(; dn0 < dn; dn0++) {
+ if (dn0->extent != *dims++ || dn0->stride != 1)
+ break;
+ no1 *= dn0->extent;
+ }
+ if (dn0 == dimens && dimens[0].stride == 1) {
+ no1 = dimens[0].extent;
+ dn0++;
+ }
+ delta_adj:
+ ex = 0;
+ for(dn1 = dn0; dn1 <= dn; dn1++)
+ ex += (dn1->extent-1)
+ * (dn1->delta *= dn1->stride);
+ for(dn1 = dn; dn1 > dn0; dn1--) {
+ ex -= (dn1->extent - 1) * dn1->delta;
+ dn1->delta -= ex;
+ }
+ }
+ else if (dims = v->dims) {
+ no = no1 = dims[1];
+ ivae = iva + no*size;
+ }
+ else
+ scalar:
+ no = no1 = 1;
+ if (ch != '=')
+ errfl(a->cierr, 115, where);
+ got1 = nml_read = 1;
+ f__lcount = 0;
+ readloop:
+ for(;;) {
+ if (iva >= ivae || iva < 0) {
+ f__lquit = 1;
+ goto mustend;
+ }
+ else if (iva + no1*size > ivae)
+ no1 = (ivae - iva)/size;
+ f__lquit = 0;
+ if (k = l_read(&no1, vaddr + iva, size, type))
+ return k;
+ if (f__lquit == 1)
+ return 0;
+ if (readall) {
+ iva += dn0->delta;
+ if (f__lcount > 0) {
+ no2 = (ivae - iva)/size;
+ if (no2 > f__lcount)
+ no2 = f__lcount;
+ if (k = l_read(&no2, vaddr + iva,
+ size, type))
+ return k;
+ iva += no2 * dn0->delta;
+ }
+ }
+ mustend:
+ GETC(ch);
+ if (readall)
+ if (iva >= ivae)
+ readall = 0;
+ else for(;;) {
+ switch(ch) {
+ case ' ':
+ case '\t':
+ case '\n':
+ GETC(ch);
+ continue;
+ }
+ break;
+ }
+ if (ch == '/' || ch == '$' || ch == '&') {
+ f__lquit = 1;
+ return 0;
+ }
+ else if (f__lquit) {
+ while(ch <= ' ' && ch >= 0)
+ GETC(ch);
+ Ungetc(ch,f__cf);
+ if (!Alpha[ch & 0xff] && ch >= 0)
+ errfl(a->cierr, 125, where);
+ break;
+ }
+ Ungetc(ch,f__cf);
+ if (readall && !Alpha[ch & 0xff])
+ goto readloop;
+ if ((no -= no1) <= 0)
+ break;
+ for(dn1 = dn0; dn1 <= dn; dn1++) {
+ if (++dn1->curval < dn1->extent) {
+ iva += dn1->delta;
+ goto readloop;
+ }
+ dn1->curval = 0;
+ }
+ break;
+ }
+ }
+ }
+
+ integer
+#ifdef KR_headers
+s_rsne(a) cilist *a;
+#else
+s_rsne(cilist *a)
+#endif
+{
+ extern int l_eof;
+ int n;
+
+ f__external=1;
+ l_eof = 0;
+ if(n = c_le(a))
+ return n;
+ if(f__curunit->uwrt && f__nowreading(f__curunit))
+ err(a->cierr,errno,where0);
+ l_getc = t_getc;
+ l_ungetc = un_getc;
+ f__doend = xrd_SL;
+ n = x_rsne(a);
+ nml_read = 0;
+ if (n)
+ return n;
+ return e_rsle();
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/s_cat.c b/contrib/libs/libf2c/s_cat.c
new file mode 100644
index 0000000000..8d92a637f3
--- /dev/null
+++ b/contrib/libs/libf2c/s_cat.c
@@ -0,0 +1,86 @@
+/* Unless compiled with -DNO_OVERWRITE, this variant of s_cat allows the
+ * target of a concatenation to appear on its right-hand side (contrary
+ * to the Fortran 77 Standard, but in accordance with Fortran 90).
+ */
+
+#include "f2c.h"
+#ifndef NO_OVERWRITE
+#include "stdio.h"
+#undef abs
+#ifdef KR_headers
+ extern char *F77_aloc();
+ extern void free();
+ extern void exit_();
+#else
+#undef min
+#undef max
+#include "stdlib.h"
+extern
+#ifdef __cplusplus
+ "C"
+#endif
+ char *F77_aloc(ftnlen, const char*);
+#endif
+#include "string.h"
+#endif /* NO_OVERWRITE */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ VOID
+#ifdef KR_headers
+s_cat(lp, rpp, rnp, np, ll) char *lp, *rpp[]; ftnint rnp[], *np; ftnlen ll;
+#else
+s_cat(char *lp, char *rpp[], ftnint rnp[], ftnint *np, ftnlen ll)
+#endif
+{
+ ftnlen i, nc;
+ char *rp;
+ ftnlen n = *np;
+#ifndef NO_OVERWRITE
+ ftnlen L, m;
+ char *lp0, *lp1;
+
+ lp0 = 0;
+ lp1 = lp;
+ L = ll;
+ i = 0;
+ while(i < n) {
+ rp = rpp[i];
+ m = rnp[i++];
+ if (rp >= lp1 || rp + m <= lp) {
+ if ((L -= m) <= 0) {
+ n = i;
+ break;
+ }
+ lp1 += m;
+ continue;
+ }
+ lp0 = lp;
+ lp = lp1 = F77_aloc(L = ll, "s_cat");
+ break;
+ }
+ lp1 = lp;
+#endif /* NO_OVERWRITE */
+ for(i = 0 ; i < n ; ++i) {
+ nc = ll;
+ if(rnp[i] < nc)
+ nc = rnp[i];
+ ll -= nc;
+ rp = rpp[i];
+ while(--nc >= 0)
+ *lp++ = *rp++;
+ }
+ while(--ll >= 0)
+ *lp++ = ' ';
+#ifndef NO_OVERWRITE
+ if (lp0) {
+ memcpy(lp0, lp1, L);
+ free(lp1);
+ }
+#endif
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/s_cmp.c b/contrib/libs/libf2c/s_cmp.c
new file mode 100644
index 0000000000..3a2ea67ddf
--- /dev/null
+++ b/contrib/libs/libf2c/s_cmp.c
@@ -0,0 +1,50 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* compare two strings */
+
+#ifdef KR_headers
+integer s_cmp(a0, b0, la, lb) char *a0, *b0; ftnlen la, lb;
+#else
+integer s_cmp(char *a0, char *b0, ftnlen la, ftnlen lb)
+#endif
+{
+register unsigned char *a, *aend, *b, *bend;
+a = (unsigned char *)a0;
+b = (unsigned char *)b0;
+aend = a + la;
+bend = b + lb;
+
+if(la <= lb)
+ {
+ while(a < aend)
+ if(*a != *b)
+ return( *a - *b );
+ else
+ { ++a; ++b; }
+
+ while(b < bend)
+ if(*b != ' ')
+ return( ' ' - *b );
+ else ++b;
+ }
+
+else
+ {
+ while(b < bend)
+ if(*a == *b)
+ { ++a; ++b; }
+ else
+ return( *a - *b );
+ while(a < aend)
+ if(*a != ' ')
+ return(*a - ' ');
+ else ++a;
+ }
+return(0);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/s_copy.c b/contrib/libs/libf2c/s_copy.c
new file mode 100644
index 0000000000..3425148820
--- /dev/null
+++ b/contrib/libs/libf2c/s_copy.c
@@ -0,0 +1,68 @@
+/* Unless compiled with -DNO_OVERWRITE, this variant of s_copy allows the
+ * target of an assignment to appear on its right-hand side (contrary
+ * to the Fortran 77 Standard, but in accordance with Fortran 90),
+ * as in a(2:5) = a(4:7) .
+ */
+
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* assign strings: a = b */
+// command to get sanitizer error:
+// ya make -tAr --sanitize=address cv/imgclassifiers/danet/tests/minimize_memory_ut/ -F TMinimizeMemoryTest::TestMinimizeMemoryUsageModeTrain
+#if defined(__has_feature)
+# if __has_feature(address_sanitizer)
+ __attribute__((no_sanitize("address")))
+# endif
+#endif
+#if defined(__has_feature)
+# if __has_feature(memory_sanitizer)
+ __attribute__((no_sanitize("memory")))
+# endif
+#endif
+#ifdef KR_headers
+VOID s_copy(a, b, la, lb) register char *a, *b; ftnlen la, lb;
+#else
+void s_copy(register char *a, register char *b, ftnlen la, ftnlen lb)
+#endif
+{
+ register char *aend, *bend;
+
+ aend = a + la;
+
+ if(la <= lb)
+#ifndef NO_OVERWRITE
+ if (a <= b || a >= b + la)
+#endif
+ while(a < aend)
+ *a++ = *b++;
+#ifndef NO_OVERWRITE
+ else
+ for(b += la; a < aend; )
+ *--aend = *--b;
+#endif
+
+ else {
+ bend = b + lb;
+#ifndef NO_OVERWRITE
+ if (a <= b || a >= bend)
+#endif
+ while(b < bend)
+ *a++ = *b++;
+#ifndef NO_OVERWRITE
+ else {
+ a += lb;
+ while(b < bend)
+ *--a = *--bend;
+ a += lb;
+ }
+#endif
+ while(a < aend)
+ *a++ = ' ';
+ }
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/s_paus.c b/contrib/libs/libf2c/s_paus.c
new file mode 100644
index 0000000000..51d80eb087
--- /dev/null
+++ b/contrib/libs/libf2c/s_paus.c
@@ -0,0 +1,96 @@
+#include "stdio.h"
+#include "f2c.h"
+#define PAUSESIG 15
+
+#include "signal1.h"
+#ifdef KR_headers
+#define Void /* void */
+#define Int /* int */
+#else
+#define Void void
+#define Int int
+#undef abs
+#undef min
+#undef max
+#include "stdlib.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern int getpid(void), isatty(int), pause(void);
+#endif
+
+extern VOID f_exit(Void);
+
+#ifndef MSDOS
+ static VOID
+waitpause(Sigarg)
+{ Use_Sigarg;
+ return;
+ }
+#endif
+
+ static VOID
+#ifdef KR_headers
+s_1paus(fin) FILE *fin;
+#else
+s_1paus(FILE *fin)
+#endif
+{
+ fprintf(stderr,
+ "To resume execution, type go. Other input will terminate the job.\n");
+ fflush(stderr);
+ if( getc(fin)!='g' || getc(fin)!='o' || getc(fin)!='\n' ) {
+ fprintf(stderr, "STOP\n");
+#ifdef NO_ONEXIT
+ f_exit();
+#endif
+ exit(0);
+ }
+ }
+
+ int
+#ifdef KR_headers
+s_paus(s, n) char *s; ftnlen n;
+#else
+s_paus(char *s, ftnlen n)
+#endif
+{
+ fprintf(stderr, "PAUSE ");
+ if(n > 0)
+ fprintf(stderr, " %.*s", (int)n, s);
+ fprintf(stderr, " statement executed\n");
+ if( isatty(fileno(stdin)) )
+ s_1paus(stdin);
+ else {
+#ifdef MSDOS
+ FILE *fin;
+ fin = fopen("con", "r");
+ if (!fin) {
+ fprintf(stderr, "s_paus: can't open con!\n");
+ fflush(stderr);
+ exit(1);
+ }
+ s_1paus(fin);
+ fclose(fin);
+#else
+ fprintf(stderr,
+ "To resume execution, execute a kill -%d %d command\n",
+ PAUSESIG, getpid() );
+ signal1(PAUSESIG, waitpause);
+ fflush(stderr);
+ pause();
+#endif
+ }
+ fprintf(stderr, "Execution resumes after PAUSE.\n");
+ fflush(stderr);
+ return 0; /* NOT REACHED */
+#ifdef __cplusplus
+ }
+#endif
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/s_rnge.c b/contrib/libs/libf2c/s_rnge.c
new file mode 100644
index 0000000000..3dbc513554
--- /dev/null
+++ b/contrib/libs/libf2c/s_rnge.c
@@ -0,0 +1,32 @@
+#include "stdio.h"
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* called when a subscript is out of range */
+
+#ifdef KR_headers
+extern VOID sig_die();
+integer s_rnge(varn, offset, procn, line) char *varn, *procn; ftnint offset, line;
+#else
+extern VOID sig_die(const char*,int);
+integer s_rnge(char *varn, ftnint offset, char *procn, ftnint line)
+#endif
+{
+register int i;
+
+fprintf(stderr, "Subscript out of range on file line %ld, procedure ",
+ (long)line);
+while((i = *procn) && i != '_' && i != ' ')
+ putc(*procn++, stderr);
+fprintf(stderr, ".\nAttempt to access the %ld-th element of variable ",
+ (long)offset+1);
+while((i = *varn) && i != ' ')
+ putc(*varn++, stderr);
+sig_die(".", 1);
+return 0; /* not reached */
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/s_stop.c b/contrib/libs/libf2c/s_stop.c
new file mode 100644
index 0000000000..68233aea7e
--- /dev/null
+++ b/contrib/libs/libf2c/s_stop.c
@@ -0,0 +1,48 @@
+#include "stdio.h"
+#include "f2c.h"
+
+#ifdef KR_headers
+extern void f_exit();
+int s_stop(s, n) char *s; ftnlen n;
+#else
+#undef abs
+#undef min
+#undef max
+#include "stdlib.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+#ifdef __cplusplus
+extern "C" {
+#endif
+void f_exit(void);
+
+int s_stop(char *s, ftnlen n)
+#endif
+{
+int i;
+
+if(n > 0)
+ {
+ fprintf(stderr, "STOP ");
+ for(i = 0; i<n ; ++i)
+ putc(*s++, stderr);
+ fprintf(stderr, " statement executed\n");
+ }
+#ifdef NO_ONEXIT
+f_exit();
+#endif
+exit(0);
+
+/* We cannot avoid (useless) compiler diagnostics here: */
+/* some compilers complain if there is no return statement, */
+/* and others complain that this one cannot be reached. */
+
+return 0; /* NOT REACHED */
+}
+#ifdef __cplusplus
+}
+#endif
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/sfe.c b/contrib/libs/libf2c/sfe.c
new file mode 100644
index 0000000000..d24af6d936
--- /dev/null
+++ b/contrib/libs/libf2c/sfe.c
@@ -0,0 +1,47 @@
+/* sequential formatted external common routines*/
+#include "f2c.h"
+#include "fio.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+extern char *f__fmtbuf;
+#else
+extern const char *f__fmtbuf;
+#endif
+
+integer e_rsfe(Void)
+{ int n;
+ n=en_fio();
+ f__fmtbuf=NULL;
+ return(n);
+}
+
+ int
+#ifdef KR_headers
+c_sfe(a) cilist *a; /* check */
+#else
+c_sfe(cilist *a) /* check */
+#endif
+{ unit *p;
+ f__curunit = p = &f__units[a->ciunit];
+ if(a->ciunit >= MXUNIT || a->ciunit<0)
+ err(a->cierr,101,"startio");
+ if(p->ufd==NULL && fk_open(SEQ,FMT,a->ciunit)) err(a->cierr,114,"sfe")
+ if(!p->ufmt) err(a->cierr,102,"sfe")
+ return(0);
+}
+integer e_wsfe(Void)
+{
+ int n = en_fio();
+ f__fmtbuf = NULL;
+#ifdef ALWAYS_FLUSH
+ if (!n && fflush(f__cf))
+ err(f__elist->cierr, errno, "write end");
+#endif
+ return n;
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/sig_die.c b/contrib/libs/libf2c/sig_die.c
new file mode 100644
index 0000000000..63a73d9118
--- /dev/null
+++ b/contrib/libs/libf2c/sig_die.c
@@ -0,0 +1,51 @@
+#include "stdio.h"
+#include "signal.h"
+
+#ifndef SIGIOT
+#ifdef SIGABRT
+#define SIGIOT SIGABRT
+#endif
+#endif
+
+#ifdef KR_headers
+void sig_die(s, kill) char *s; int kill;
+#else
+#include "stdlib.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+#ifdef __cplusplus
+extern "C" {
+#endif
+ extern void f_exit(void);
+
+void sig_die(const char *s, int kill)
+#endif
+{
+ /* print error message, then clear buffers */
+ fprintf(stderr, "%s\n", s);
+
+ if(kill)
+ {
+ fflush(stderr);
+ f_exit();
+ fflush(stderr);
+ /* now get a core */
+#ifdef SIGIOT
+ signal(SIGIOT, SIG_DFL);
+#endif
+ abort();
+ }
+ else {
+#ifdef NO_ONEXIT
+ f_exit();
+#endif
+ exit(1);
+ }
+ }
+#ifdef __cplusplus
+}
+#endif
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/signal1.h b/contrib/libs/libf2c/signal1.h
new file mode 100644
index 0000000000..a383774b82
--- /dev/null
+++ b/contrib/libs/libf2c/signal1.h
@@ -0,0 +1,35 @@
+/* You may need to adjust the definition of signal1 to supply a */
+/* cast to the correct argument type. This detail is system- and */
+/* compiler-dependent. The #define below assumes signal.h declares */
+/* type SIG_PF for the signal function's second argument. */
+
+/* For some C++ compilers, "#define Sigarg_t ..." may be appropriate. */
+
+#include <signal.h>
+
+#ifndef Sigret_t
+#define Sigret_t void
+#endif
+#ifndef Sigarg_t
+#ifdef KR_headers
+#define Sigarg_t
+#else
+#define Sigarg_t int
+#endif
+#endif /*Sigarg_t*/
+
+#ifdef USE_SIG_PF /* compile with -DUSE_SIG_PF under IRIX */
+#define sig_pf SIG_PF
+#else
+typedef Sigret_t (*sig_pf)(Sigarg_t);
+#endif
+
+#define signal1(a,b) signal(a,(sig_pf)b)
+
+#ifdef __cplusplus
+#define Sigarg ...
+#define Use_Sigarg
+#else
+#define Sigarg Int n
+#define Use_Sigarg n = n /* shut up compiler warning */
+#endif
diff --git a/contrib/libs/libf2c/signal_.c b/contrib/libs/libf2c/signal_.c
new file mode 100644
index 0000000000..3b0e6cfe98
--- /dev/null
+++ b/contrib/libs/libf2c/signal_.c
@@ -0,0 +1,21 @@
+#include "f2c.h"
+#include "signal1.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ ftnint
+#ifdef KR_headers
+signal_(sigp, proc) integer *sigp; sig_pf proc;
+#else
+signal_(integer *sigp, sig_pf proc)
+#endif
+{
+ int sig;
+ sig = (int)*sigp;
+
+ return (ftnint)signal(sig, proc);
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/sue.c b/contrib/libs/libf2c/sue.c
new file mode 100644
index 0000000000..191e32627c
--- /dev/null
+++ b/contrib/libs/libf2c/sue.c
@@ -0,0 +1,90 @@
+#include "f2c.h"
+#include "fio.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern uiolen f__reclen;
+OFF_T f__recloc;
+
+ int
+#ifdef KR_headers
+c_sue(a) cilist *a;
+#else
+c_sue(cilist *a)
+#endif
+{
+ f__external=f__sequential=1;
+ f__formatted=0;
+ f__curunit = &f__units[a->ciunit];
+ if(a->ciunit >= MXUNIT || a->ciunit < 0)
+ err(a->cierr,101,"startio");
+ f__elist=a;
+ if(f__curunit->ufd==NULL && fk_open(SEQ,UNF,a->ciunit))
+ err(a->cierr,114,"sue");
+ f__cf=f__curunit->ufd;
+ if(f__curunit->ufmt) err(a->cierr,103,"sue")
+ if(!f__curunit->useek) err(a->cierr,103,"sue")
+ return(0);
+}
+#ifdef KR_headers
+integer s_rsue(a) cilist *a;
+#else
+integer s_rsue(cilist *a)
+#endif
+{
+ int n;
+ if(!f__init) f_init();
+ f__reading=1;
+ if(n=c_sue(a)) return(n);
+ f__recpos=0;
+ if(f__curunit->uwrt && f__nowreading(f__curunit))
+ err(a->cierr, errno, "read start");
+ if(fread((char *)&f__reclen,sizeof(uiolen),1,f__cf)
+ != 1)
+ { if(feof(f__cf))
+ { f__curunit->uend = 1;
+ err(a->ciend, EOF, "start");
+ }
+ clearerr(f__cf);
+ err(a->cierr, errno, "start");
+ }
+ return(0);
+}
+#ifdef KR_headers
+integer s_wsue(a) cilist *a;
+#else
+integer s_wsue(cilist *a)
+#endif
+{
+ int n;
+ if(!f__init) f_init();
+ if(n=c_sue(a)) return(n);
+ f__reading=0;
+ f__reclen=0;
+ if(f__curunit->uwrt != 1 && f__nowwriting(f__curunit))
+ err(a->cierr, errno, "write start");
+ f__recloc=FTELL(f__cf);
+ FSEEK(f__cf,(OFF_T)sizeof(uiolen),SEEK_CUR);
+ return(0);
+}
+integer e_wsue(Void)
+{ OFF_T loc;
+ fwrite((char *)&f__reclen,sizeof(uiolen),1,f__cf);
+#ifdef ALWAYS_FLUSH
+ if (fflush(f__cf))
+ err(f__elist->cierr, errno, "write end");
+#endif
+ loc=FTELL(f__cf);
+ FSEEK(f__cf,f__recloc,SEEK_SET);
+ fwrite((char *)&f__reclen,sizeof(uiolen),1,f__cf);
+ FSEEK(f__cf,loc,SEEK_SET);
+ return(0);
+}
+integer e_rsue(Void)
+{
+ FSEEK(f__cf,(OFF_T)(f__reclen-f__recpos+sizeof(uiolen)),SEEK_CUR);
+ return(0);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/sysdep1.h b/contrib/libs/libf2c/sysdep1.h
new file mode 100644
index 0000000000..441d6964d8
--- /dev/null
+++ b/contrib/libs/libf2c/sysdep1.h
@@ -0,0 +1,71 @@
+#ifndef SYSDEP_H_INCLUDED
+#define SYSDEP_H_INCLUDED
+#undef USE_LARGEFILE
+#ifndef NO_LONG_LONG
+
+#ifdef __sun__
+#define USE_LARGEFILE
+#define OFF_T off64_t
+#endif
+
+#ifdef __linux__
+#define USE_LARGEFILE
+#define OFF_T off64_t
+#endif
+
+#ifdef _AIX43
+#define _LARGE_FILES
+#define _LARGE_FILE_API
+#define USE_LARGEFILE
+#endif /*_AIX43*/
+
+#ifdef __hpux
+#define _FILE64
+#define _LARGEFILE64_SOURCE
+#define USE_LARGEFILE
+#endif /*__hpux*/
+
+#ifdef __sgi
+#define USE_LARGEFILE
+#endif /*__sgi*/
+
+#ifdef __FreeBSD__
+#define OFF_T off_t
+#define FSEEK fseeko
+#define FTELL ftello
+#endif
+
+#ifdef __ANDROID__
+#undef USE_LARGEFILE
+#define OFF_T off64_t
+#endif
+
+#ifdef USE_LARGEFILE
+#ifndef OFF_T
+#define OFF_T off64_t
+#endif
+#define _LARGEFILE_SOURCE
+#define _LARGEFILE64_SOURCE
+#include <sys/types.h>
+#include <sys/stat.h>
+#define FOPEN fopen64
+#define FREOPEN freopen64
+#define FSEEK fseeko64
+#define FSTAT fstat64
+#define FTELL ftello64
+#define FTRUNCATE ftruncate64
+#define STAT stat64
+#define STAT_ST stat64
+#endif /*USE_LARGEFILE*/
+#endif /*NO_LONG_LONG*/
+
+#ifndef NON_UNIX_STDIO
+#ifndef USE_LARGEFILE
+#define _INCLUDE_POSIX_SOURCE /* for HP-UX */
+#define _INCLUDE_XOPEN_SOURCE /* for HP-UX */
+#include "sys/types.h"
+#include "sys/stat.h"
+#endif
+#endif
+
+#endif /*SYSDEP_H_INCLUDED*/
diff --git a/contrib/libs/libf2c/system_.c b/contrib/libs/libf2c/system_.c
new file mode 100644
index 0000000000..153a2ef1c9
--- /dev/null
+++ b/contrib/libs/libf2c/system_.c
@@ -0,0 +1,47 @@
+/* f77 interface to system routine */
+
+#include "f2c.h"
+
+#ifdef KR_headers
+extern char *F77_aloc();
+
+ integer
+system_(s, n) register char *s; ftnlen n;
+#else
+#undef abs
+#undef min
+#undef max
+#include "stdlib.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern char *F77_aloc(ftnlen, const char*);
+
+ integer
+system_(register char *s, ftnlen n)
+#endif
+{
+#if !defined(__IOS__)
+ char buff0[256], *buff;
+ register char *bp, *blast;
+ integer rv;
+
+ buff = bp = n < sizeof(buff0)
+ ? buff0 : F77_aloc(n+1, "system_");
+ blast = bp + n;
+
+ while(bp < blast && *s)
+ *bp++ = *s++;
+ *bp = 0;
+ rv = system(buff);
+ if (buff != buff0)
+ free(buff);
+ return rv;
+#else
+ // system() is not available on iOS
+ return -1;
+#endif
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/typesize.c b/contrib/libs/libf2c/typesize.c
new file mode 100644
index 0000000000..39097f4699
--- /dev/null
+++ b/contrib/libs/libf2c/typesize.c
@@ -0,0 +1,18 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ftnlen f__typesize[] = { 0, 0, sizeof(shortint), sizeof(integer),
+ sizeof(real), sizeof(doublereal),
+ sizeof(complex), sizeof(doublecomplex),
+ sizeof(logical), sizeof(char),
+ 0, sizeof(integer1),
+ sizeof(logical1), sizeof(shortlogical),
+#ifdef Allow_TYQUAD
+ sizeof(longint),
+#endif
+ 0};
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/uio.c b/contrib/libs/libf2c/uio.c
new file mode 100644
index 0000000000..44f768d9a2
--- /dev/null
+++ b/contrib/libs/libf2c/uio.c
@@ -0,0 +1,75 @@
+#include "f2c.h"
+#include "fio.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+uiolen f__reclen;
+
+ int
+#ifdef KR_headers
+do_us(number,ptr,len) ftnint *number; char *ptr; ftnlen len;
+#else
+do_us(ftnint *number, char *ptr, ftnlen len)
+#endif
+{
+ if(f__reading)
+ {
+ f__recpos += (int)(*number * len);
+ if(f__recpos>f__reclen)
+ err(f__elist->cierr, 110, "do_us");
+ if (fread(ptr,(int)len,(int)(*number),f__cf) != *number)
+ err(f__elist->ciend, EOF, "do_us");
+ return(0);
+ }
+ else
+ {
+ f__reclen += *number * len;
+ (void) fwrite(ptr,(int)len,(int)(*number),f__cf);
+ return(0);
+ }
+}
+#ifdef KR_headers
+integer do_ud(number,ptr,len) ftnint *number; char *ptr; ftnlen len;
+#else
+integer do_ud(ftnint *number, char *ptr, ftnlen len)
+#endif
+{
+ f__recpos += (int)(*number * len);
+ if(f__recpos > f__curunit->url && f__curunit->url!=1)
+ err(f__elist->cierr,110,"do_ud");
+ if(f__reading)
+ {
+#ifdef Pad_UDread
+#ifdef KR_headers
+ int i;
+#else
+ size_t i;
+#endif
+ if (!(i = fread(ptr,(int)len,(int)(*number),f__cf))
+ && !(f__recpos - *number*len))
+ err(f__elist->cierr,EOF,"do_ud")
+ if (i < *number)
+ memset(ptr + i*len, 0, (*number - i)*len);
+ return 0;
+#else
+ if(fread(ptr,(int)len,(int)(*number),f__cf) != *number)
+ err(f__elist->cierr,EOF,"do_ud")
+ else return(0);
+#endif
+ }
+ (void) fwrite(ptr,(int)len,(int)(*number),f__cf);
+ return(0);
+}
+#ifdef KR_headers
+integer do_uio(number,ptr,len) ftnint *number; char *ptr; ftnlen len;
+#else
+integer do_uio(ftnint *number, char *ptr, ftnlen len)
+#endif
+{
+ if(f__sequential)
+ return(do_us(number,ptr,len));
+ else return(do_ud(number,ptr,len));
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/uninit.c b/contrib/libs/libf2c/uninit.c
new file mode 100644
index 0000000000..05d162bbac
--- /dev/null
+++ b/contrib/libs/libf2c/uninit.c
@@ -0,0 +1,379 @@
+#include <stdio.h>
+#include <string.h>
+#include "arith.h"
+
+#define TYSHORT 2
+#define TYLONG 3
+#define TYREAL 4
+#define TYDREAL 5
+#define TYCOMPLEX 6
+#define TYDCOMPLEX 7
+#define TYINT1 11
+#define TYQUAD 14
+#ifndef Long
+#define Long long
+#endif
+
+#ifdef __mips
+#define RNAN 0xffc00000
+#define DNAN0 0xfff80000
+#define DNAN1 0
+#endif
+
+#ifdef _PA_RISC1_1
+#define RNAN 0xffc00000
+#define DNAN0 0xfff80000
+#define DNAN1 0
+#endif
+
+#ifndef RNAN
+#define RNAN 0xff800001
+#ifdef IEEE_MC68k
+#define DNAN0 0xfff00000
+#define DNAN1 1
+#else
+#define DNAN0 1
+#define DNAN1 0xfff00000
+#endif
+#endif /*RNAN*/
+
+#ifdef KR_headers
+#define Void /*void*/
+#define FA7UL (unsigned Long) 0xfa7a7a7aL
+#else
+#define Void void
+#define FA7UL 0xfa7a7a7aUL
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static void ieee0(Void);
+
+static unsigned Long rnan = RNAN,
+ dnan0 = DNAN0,
+ dnan1 = DNAN1;
+
+double _0 = 0.;
+
+ void
+#ifdef KR_headers
+_uninit_f2c(x, type, len) void *x; int type; long len;
+#else
+_uninit_f2c(void *x, int type, long len)
+#endif
+{
+ static int first = 1;
+
+ unsigned Long *lx, *lxe;
+
+ if (first) {
+ first = 0;
+ ieee0();
+ }
+ if (len == 1)
+ switch(type) {
+ case TYINT1:
+ *(char*)x = 'Z';
+ return;
+ case TYSHORT:
+ *(short*)x = 0xfa7a;
+ break;
+ case TYLONG:
+ *(unsigned Long*)x = FA7UL;
+ return;
+ case TYQUAD:
+ case TYCOMPLEX:
+ case TYDCOMPLEX:
+ break;
+ case TYREAL:
+ *(unsigned Long*)x = rnan;
+ return;
+ case TYDREAL:
+ lx = (unsigned Long*)x;
+ lx[0] = dnan0;
+ lx[1] = dnan1;
+ return;
+ default:
+ printf("Surprise type %d in _uninit_f2c\n", type);
+ }
+ switch(type) {
+ case TYINT1:
+ memset(x, 'Z', len);
+ break;
+ case TYSHORT:
+ *(short*)x = 0xfa7a;
+ break;
+ case TYQUAD:
+ len *= 2;
+ /* no break */
+ case TYLONG:
+ lx = (unsigned Long*)x;
+ lxe = lx + len;
+ while(lx < lxe)
+ *lx++ = FA7UL;
+ break;
+ case TYCOMPLEX:
+ len *= 2;
+ /* no break */
+ case TYREAL:
+ lx = (unsigned Long*)x;
+ lxe = lx + len;
+ while(lx < lxe)
+ *lx++ = rnan;
+ break;
+ case TYDCOMPLEX:
+ len *= 2;
+ /* no break */
+ case TYDREAL:
+ lx = (unsigned Long*)x;
+ for(lxe = lx + 2*len; lx < lxe; lx += 2) {
+ lx[0] = dnan0;
+ lx[1] = dnan1;
+ }
+ }
+ }
+#ifdef __cplusplus
+}
+#endif
+
+#ifndef MSpc
+#ifdef MSDOS
+#define MSpc
+#else
+#ifdef _WIN32
+#define MSpc
+#endif
+#endif
+#endif
+
+#ifdef MSpc
+#define IEEE0_done
+#include "float.h"
+#include "signal.h"
+
+ static void
+ieee0(Void)
+{
+#ifndef __alpha
+#ifndef EM_DENORMAL
+#define EM_DENORMAL _EM_DENORMAL
+#endif
+#ifndef EM_UNDERFLOW
+#define EM_UNDERFLOW _EM_UNDERFLOW
+#endif
+#ifndef EM_INEXACT
+#define EM_INEXACT _EM_INEXACT
+#endif
+#ifndef MCW_EM
+#define MCW_EM _MCW_EM
+#endif
+ _control87(EM_DENORMAL | EM_UNDERFLOW | EM_INEXACT, MCW_EM);
+#endif
+ /* With MS VC++, compiling and linking with -Zi will permit */
+ /* clicking to invoke the MS C++ debugger, which will show */
+ /* the point of error -- provided SIGFPE is SIG_DFL. */
+ signal(SIGFPE, SIG_DFL);
+ }
+#endif /* MSpc */
+
+#ifdef __mips /* must link with -lfpe */
+#define IEEE0_done
+/* code from Eric Grosse */
+#include <stdlib.h>
+#include <stdio.h>
+#error #include "/usr/include/sigfpe.h" /* full pathname for lcc -N */
+#error #include "/usr/include/sys/fpu.h"
+
+ static void
+#ifdef KR_headers
+ieeeuserhand(exception, val) unsigned exception[5]; int val[2];
+#else
+ieeeuserhand(unsigned exception[5], int val[2])
+#endif
+{
+ fflush(stdout);
+ fprintf(stderr,"ieee0() aborting because of ");
+ if(exception[0]==_OVERFL) fprintf(stderr,"overflow\n");
+ else if(exception[0]==_UNDERFL) fprintf(stderr,"underflow\n");
+ else if(exception[0]==_DIVZERO) fprintf(stderr,"divide by 0\n");
+ else if(exception[0]==_INVALID) fprintf(stderr,"invalid operation\n");
+ else fprintf(stderr,"\tunknown reason\n");
+ fflush(stderr);
+ abort();
+}
+
+ static void
+#ifdef KR_headers
+ieeeuserhand2(j) unsigned int **j;
+#else
+ieeeuserhand2(unsigned int **j)
+#endif
+{
+ fprintf(stderr,"ieee0() aborting because of confusion\n");
+ abort();
+}
+
+ static void
+ieee0(Void)
+{
+ int i;
+ for(i=1; i<=4; i++){
+ sigfpe_[i].count = 1000;
+ sigfpe_[i].trace = 1;
+ sigfpe_[i].repls = _USER_DETERMINED;
+ }
+ sigfpe_[1].repls = _ZERO; /* underflow */
+ handle_sigfpes( _ON,
+ _EN_UNDERFL|_EN_OVERFL|_EN_DIVZERO|_EN_INVALID,
+ ieeeuserhand,_ABORT_ON_ERROR,ieeeuserhand2);
+ }
+#endif /* mips */
+
+#if 0
+#ifdef __linux__
+#define IEEE0_done
+#include "fpu_control.h"
+
+#ifdef __alpha__
+#ifndef USE_setfpucw
+#define __setfpucw(x) __fpu_control = (x)
+#endif
+#endif
+
+#ifndef _FPU_SETCW
+#undef Can_use__setfpucw
+#define Can_use__setfpucw
+#endif
+
+ static void
+ieee0(Void)
+{
+#if (defined(__mc68000__) || defined(__mc68020__) || defined(mc68020) || defined (__mc68k__))
+/* Reported 20010705 by Alan Bain <alanb@chiark.greenend.org.uk> */
+/* Note that IEEE 754 IOP (illegal operation) */
+/* = Signaling NAN (SNAN) + operation error (OPERR). */
+#ifdef Can_use__setfpucw
+ __setfpucw(_FPU_IEEE + _FPU_DOUBLE + _FPU_MASK_OPERR + _FPU_MASK_DZ + _FPU_MASK_SNAN+_FPU_MASK_OVFL);
+#else
+ __fpu_control = _FPU_IEEE + _FPU_DOUBLE + _FPU_MASK_OPERR + _FPU_MASK_DZ + _FPU_MASK_SNAN+_FPU_MASK_OVFL;
+ _FPU_SETCW(__fpu_control);
+#endif
+
+#elif (defined(__powerpc__)||defined(_ARCH_PPC)||defined(_ARCH_PWR)) /* !__mc68k__ */
+/* Reported 20011109 by Alan Bain <alanb@chiark.greenend.org.uk> */
+
+#ifdef Can_use__setfpucw
+
+/* The following is NOT a mistake -- the author of the fpu_control.h
+for the PPC has erroneously defined IEEE mode to turn on exceptions
+other than Inexact! Start from default then and turn on only the ones
+which we want*/
+
+ __setfpucw(_FPU_DEFAULT + _FPU_MASK_IM+_FPU_MASK_OM+_FPU_MASK_UM);
+
+#else /* PPC && !Can_use__setfpucw */
+
+ __fpu_control = _FPU_DEFAULT +_FPU_MASK_OM+_FPU_MASK_IM+_FPU_MASK_UM;
+ _FPU_SETCW(__fpu_control);
+
+#endif /*Can_use__setfpucw*/
+
+#else /* !(mc68000||powerpc) */
+
+#ifdef _FPU_IEEE
+#ifndef _FPU_EXTENDED /* e.g., ARM processor under Linux */
+#define _FPU_EXTENDED 0
+#endif
+#ifndef _FPU_DOUBLE
+#define _FPU_DOUBLE 0
+#endif
+#ifdef Can_use__setfpucw /* pre-1997 (?) Linux */
+ __setfpucw(_FPU_IEEE - _FPU_MASK_IM - _FPU_MASK_ZM - _FPU_MASK_OM);
+#else
+#ifdef UNINIT_F2C_PRECISION_53 /* 20051004 */
+ /* unmask invalid, etc., and change rounding precision to double */
+ __fpu_control = _FPU_IEEE - _FPU_EXTENDED + _FPU_DOUBLE - _FPU_MASK_IM - _FPU_MASK_ZM - _FPU_MASK_OM;
+ _FPU_SETCW(__fpu_control);
+#else
+ /* unmask invalid, etc., and keep current rounding precision */
+ fpu_control_t cw;
+ _FPU_GETCW(cw);
+ cw &= ~(_FPU_MASK_IM | _FPU_MASK_ZM | _FPU_MASK_OM);
+ _FPU_SETCW(cw);
+#endif
+#endif
+
+#else /* !_FPU_IEEE */
+
+ fprintf(stderr, "\n%s\n%s\n%s\n%s\n",
+ "WARNING: _uninit_f2c in libf2c does not know how",
+ "to enable trapping on this system, so f2c's -trapuv",
+ "option will not detect uninitialized variables unless",
+ "you can enable trapping manually.");
+ fflush(stderr);
+
+#endif /* _FPU_IEEE */
+#endif /* __mc68k__ */
+ }
+#endif /* __linux__ */
+#endif
+
+#ifdef __alpha
+#ifndef IEEE0_done
+#define IEEE0_done
+#include <machine/fpu.h>
+ static void
+ieee0(Void)
+{
+ ieee_set_fp_control(IEEE_TRAP_ENABLE_INV);
+ }
+#endif /*IEEE0_done*/
+#endif /*__alpha*/
+
+#ifdef __hpux
+#define IEEE0_done
+#define _INCLUDE_HPUX_SOURCE
+#include <math.h>
+
+#ifndef FP_X_INV
+#include <fenv.h>
+#define fpsetmask fesettrapenable
+#define FP_X_INV FE_INVALID
+#endif
+
+ static void
+ieee0(Void)
+{
+ fpsetmask(FP_X_INV);
+ }
+#endif /*__hpux*/
+
+#ifdef _AIX
+#define IEEE0_done
+#include <fptrap.h>
+
+ static void
+ieee0(Void)
+{
+ fp_enable(TRP_INVALID);
+ fp_trap(FP_TRAP_SYNC);
+ }
+#endif /*_AIX*/
+
+#ifdef __sun
+#define IEEE0_done
+#include <ieeefp.h>
+
+ static void
+ieee0(Void)
+{
+ fpsetmask(FP_X_INV);
+ }
+#endif /*__sparc*/
+
+#ifndef IEEE0_done
+ static void
+ieee0(Void) {}
+#endif
diff --git a/contrib/libs/libf2c/util.c b/contrib/libs/libf2c/util.c
new file mode 100644
index 0000000000..ad4bec5a79
--- /dev/null
+++ b/contrib/libs/libf2c/util.c
@@ -0,0 +1,57 @@
+#include "sysdep1.h" /* here to get stat64 on some badly designed Linux systems */
+#include "f2c.h"
+#include "fio.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ VOID
+#ifdef KR_headers
+#define Const /*nothing*/
+g_char(a,alen,b) char *a,*b; ftnlen alen;
+#else
+#define Const const
+g_char(const char *a, ftnlen alen, char *b)
+#endif
+{
+ Const char *x = a + alen;
+ char *y = b + alen;
+
+ for(;; y--) {
+ if (x <= a) {
+ *b = 0;
+ return;
+ }
+ if (*--x != ' ')
+ break;
+ }
+ *y-- = 0;
+ do *y-- = *x;
+ while(x-- > a);
+ }
+
+ VOID
+#ifdef KR_headers
+b_char(a,b,blen) char *a,*b; ftnlen blen;
+#else
+b_char(const char *a, char *b, ftnlen blen)
+#endif
+{ int i;
+ for(i=0;i<blen && *a!=0;i++) *b++= *a++;
+ for(;i<blen;i++) *b++=' ';
+}
+#ifndef NON_UNIX_STDIO
+#ifdef KR_headers
+long f__inode(a, dev) char *a; int *dev;
+#else
+long f__inode(char *a, int *dev)
+#endif
+{ struct STAT_ST x;
+ if(STAT(a,&x)<0) return(-1);
+ *dev = x.st_dev;
+ return(x.st_ino);
+}
+#endif
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/wref.c b/contrib/libs/libf2c/wref.c
new file mode 100644
index 0000000000..2753a80b16
--- /dev/null
+++ b/contrib/libs/libf2c/wref.c
@@ -0,0 +1,294 @@
+#include "f2c.h"
+#include "fio.h"
+
+#ifndef KR_headers
+#undef abs
+#undef min
+#undef max
+#include "stdlib.h"
+#include "string.h"
+#endif
+
+#include "fmt.h"
+#include "fp.h"
+#ifndef VAX
+#include "ctype_.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+#endif
+
+ int
+#ifdef KR_headers
+wrt_E(p,w,d,e,len) ufloat *p; ftnlen len;
+#else
+wrt_E(ufloat *p, int w, int d, int e, ftnlen len)
+#endif
+{
+ char buf[FMAX+EXPMAXDIGS+4], *s, *se;
+ int d1, delta, e1, i, sign, signspace;
+ double dd;
+#ifdef WANT_LEAD_0
+ int insert0 = 0;
+#endif
+#ifndef VAX
+ int e0 = e;
+#endif
+
+ if(e <= 0)
+ e = 2;
+ if(f__scale) {
+ if(f__scale >= d + 2 || f__scale <= -d)
+ goto nogood;
+ }
+ if(f__scale <= 0)
+ --d;
+ if (len == sizeof(real))
+ dd = p->pf;
+ else
+ dd = p->pd;
+ if (dd < 0.) {
+ signspace = sign = 1;
+ dd = -dd;
+ }
+ else {
+ sign = 0;
+ signspace = (int)f__cplus;
+#ifndef VAX
+ if (!dd) {
+#ifdef SIGNED_ZEROS
+ if (signbit_f2c(&dd))
+ signspace = sign = 1;
+#endif
+ dd = 0.; /* avoid -0 */
+ }
+#endif
+ }
+ delta = w - (2 /* for the . and the d adjustment above */
+ + 2 /* for the E+ */ + signspace + d + e);
+#ifdef WANT_LEAD_0
+ if (f__scale <= 0 && delta > 0) {
+ delta--;
+ insert0 = 1;
+ }
+ else
+#endif
+ if (delta < 0) {
+nogood:
+ while(--w >= 0)
+ PUT('*');
+ return(0);
+ }
+ if (f__scale < 0)
+ d += f__scale;
+ if (d > FMAX) {
+ d1 = d - FMAX;
+ d = FMAX;
+ }
+ else
+ d1 = 0;
+ sprintf(buf,"%#.*E", d, dd);
+#ifndef VAX
+ /* check for NaN, Infinity */
+ if (!isdigit(buf[0])) {
+ switch(buf[0]) {
+ case 'n':
+ case 'N':
+ signspace = 0; /* no sign for NaNs */
+ }
+ delta = w - strlen(buf) - signspace;
+ if (delta < 0)
+ goto nogood;
+ while(--delta >= 0)
+ PUT(' ');
+ if (signspace)
+ PUT(sign ? '-' : '+');
+ for(s = buf; *s; s++)
+ PUT(*s);
+ return 0;
+ }
+#endif
+ se = buf + d + 3;
+#ifdef GOOD_SPRINTF_EXPONENT /* When possible, exponent has 2 digits. */
+ if (f__scale != 1 && dd)
+ sprintf(se, "%+.2d", atoi(se) + 1 - f__scale);
+#else
+ if (dd)
+ sprintf(se, "%+.2d", atoi(se) + 1 - f__scale);
+ else
+ strcpy(se, "+00");
+#endif
+ s = ++se;
+ if (e < 2) {
+ if (*s != '0')
+ goto nogood;
+ }
+#ifndef VAX
+ /* accommodate 3 significant digits in exponent */
+ if (s[2]) {
+#ifdef Pedantic
+ if (!e0 && !s[3])
+ for(s -= 2, e1 = 2; s[0] = s[1]; s++);
+
+ /* Pedantic gives the behavior that Fortran 77 specifies, */
+ /* i.e., requires that E be specified for exponent fields */
+ /* of more than 3 digits. With Pedantic undefined, we get */
+ /* the behavior that Cray displays -- you get a bigger */
+ /* exponent field if it fits. */
+#else
+ if (!e0) {
+ for(s -= 2, e1 = 2; s[0] = s[1]; s++)
+#ifdef CRAY
+ delta--;
+ if ((delta += 4) < 0)
+ goto nogood
+#endif
+ ;
+ }
+#endif
+ else if (e0 >= 0)
+ goto shift;
+ else
+ e1 = e;
+ }
+ else
+ shift:
+#endif
+ for(s += 2, e1 = 2; *s; ++e1, ++s)
+ if (e1 >= e)
+ goto nogood;
+ while(--delta >= 0)
+ PUT(' ');
+ if (signspace)
+ PUT(sign ? '-' : '+');
+ s = buf;
+ i = f__scale;
+ if (f__scale <= 0) {
+#ifdef WANT_LEAD_0
+ if (insert0)
+ PUT('0');
+#endif
+ PUT('.');
+ for(; i < 0; ++i)
+ PUT('0');
+ PUT(*s);
+ s += 2;
+ }
+ else if (f__scale > 1) {
+ PUT(*s);
+ s += 2;
+ while(--i > 0)
+ PUT(*s++);
+ PUT('.');
+ }
+ if (d1) {
+ se -= 2;
+ while(s < se) PUT(*s++);
+ se += 2;
+ do PUT('0'); while(--d1 > 0);
+ }
+ while(s < se)
+ PUT(*s++);
+ if (e < 2)
+ PUT(s[1]);
+ else {
+ while(++e1 <= e)
+ PUT('0');
+ while(*s)
+ PUT(*s++);
+ }
+ return 0;
+ }
+
+ int
+#ifdef KR_headers
+wrt_F(p,w,d,len) ufloat *p; ftnlen len;
+#else
+wrt_F(ufloat *p, int w, int d, ftnlen len)
+#endif
+{
+ int d1, sign, n;
+ double x;
+ char *b, buf[MAXINTDIGS+MAXFRACDIGS+4], *s;
+
+ x= (len==sizeof(real)?p->pf:p->pd);
+ if (d < MAXFRACDIGS)
+ d1 = 0;
+ else {
+ d1 = d - MAXFRACDIGS;
+ d = MAXFRACDIGS;
+ }
+ if (x < 0.)
+ { x = -x; sign = 1; }
+ else {
+ sign = 0;
+#ifndef VAX
+ if (!x) {
+#ifdef SIGNED_ZEROS
+ if (signbit_f2c(&x))
+ sign = 2;
+#endif
+ x = 0.;
+ }
+#endif
+ }
+
+ if (n = f__scale)
+ if (n > 0)
+ do x *= 10.; while(--n > 0);
+ else
+ do x *= 0.1; while(++n < 0);
+
+#ifdef USE_STRLEN
+ sprintf(b = buf, "%#.*f", d, x);
+ n = strlen(b) + d1;
+#else
+ n = sprintf(b = buf, "%#.*f", d, x) + d1;
+#endif
+
+#ifndef WANT_LEAD_0
+ if (buf[0] == '0' && d)
+ { ++b; --n; }
+#endif
+ if (sign == 1) {
+ /* check for all zeros */
+ for(s = b;;) {
+ while(*s == '0') s++;
+ switch(*s) {
+ case '.':
+ s++; continue;
+ case 0:
+ sign = 0;
+ }
+ break;
+ }
+ }
+ if (sign || f__cplus)
+ ++n;
+ if (n > w) {
+#ifdef WANT_LEAD_0
+ if (buf[0] == '0' && --n == w)
+ ++b;
+ else
+#endif
+ {
+ while(--w >= 0)
+ PUT('*');
+ return 0;
+ }
+ }
+ for(w -= n; --w >= 0; )
+ PUT(' ');
+ if (sign)
+ PUT('-');
+ else if (f__cplus)
+ PUT('+');
+ while(n = *b++)
+ PUT(n);
+ while(--d1 >= 0)
+ PUT('0');
+ return 0;
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/wrtfmt.c b/contrib/libs/libf2c/wrtfmt.c
new file mode 100644
index 0000000000..a970db9591
--- /dev/null
+++ b/contrib/libs/libf2c/wrtfmt.c
@@ -0,0 +1,377 @@
+#include "f2c.h"
+#include "fio.h"
+#include "fmt.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern icilist *f__svic;
+extern char *f__icptr;
+
+ static int
+mv_cur(Void) /* shouldn't use fseek because it insists on calling fflush */
+ /* instead we know too much about stdio */
+{
+ int cursor = f__cursor;
+ f__cursor = 0;
+ if(f__external == 0) {
+ if(cursor < 0) {
+ if(f__hiwater < f__recpos)
+ f__hiwater = f__recpos;
+ f__recpos += cursor;
+ f__icptr += cursor;
+ if(f__recpos < 0)
+ err(f__elist->cierr, 110, "left off");
+ }
+ else if(cursor > 0) {
+ if(f__recpos + cursor >= f__svic->icirlen)
+ err(f__elist->cierr, 110, "recend");
+ if(f__hiwater <= f__recpos)
+ for(; cursor > 0; cursor--)
+ (*f__putn)(' ');
+ else if(f__hiwater <= f__recpos + cursor) {
+ cursor -= f__hiwater - f__recpos;
+ f__icptr += f__hiwater - f__recpos;
+ f__recpos = f__hiwater;
+ for(; cursor > 0; cursor--)
+ (*f__putn)(' ');
+ }
+ else {
+ f__icptr += cursor;
+ f__recpos += cursor;
+ }
+ }
+ return(0);
+ }
+ if (cursor > 0) {
+ if(f__hiwater <= f__recpos)
+ for(;cursor>0;cursor--) (*f__putn)(' ');
+ else if(f__hiwater <= f__recpos + cursor) {
+ cursor -= f__hiwater - f__recpos;
+ f__recpos = f__hiwater;
+ for(; cursor > 0; cursor--)
+ (*f__putn)(' ');
+ }
+ else {
+ f__recpos += cursor;
+ }
+ }
+ else if (cursor < 0)
+ {
+ if(cursor + f__recpos < 0)
+ err(f__elist->cierr,110,"left off");
+ if(f__hiwater < f__recpos)
+ f__hiwater = f__recpos;
+ f__recpos += cursor;
+ }
+ return(0);
+}
+
+ static int
+#ifdef KR_headers
+wrt_Z(n,w,minlen,len) Uint *n; int w, minlen; ftnlen len;
+#else
+wrt_Z(Uint *n, int w, int minlen, ftnlen len)
+#endif
+{
+ register char *s, *se;
+ register int i, w1;
+ static int one = 1;
+ static char hex[] = "0123456789ABCDEF";
+ s = (char *)n;
+ --len;
+ if (*(char *)&one) {
+ /* little endian */
+ se = s;
+ s += len;
+ i = -1;
+ }
+ else {
+ se = s + len;
+ i = 1;
+ }
+ for(;; s += i)
+ if (s == se || *s)
+ break;
+ w1 = (i*(se-s) << 1) + 1;
+ if (*s & 0xf0)
+ w1++;
+ if (w1 > w)
+ for(i = 0; i < w; i++)
+ (*f__putn)('*');
+ else {
+ if ((minlen -= w1) > 0)
+ w1 += minlen;
+ while(--w >= w1)
+ (*f__putn)(' ');
+ while(--minlen >= 0)
+ (*f__putn)('0');
+ if (!(*s & 0xf0)) {
+ (*f__putn)(hex[*s & 0xf]);
+ if (s == se)
+ return 0;
+ s += i;
+ }
+ for(;; s += i) {
+ (*f__putn)(hex[*s >> 4 & 0xf]);
+ (*f__putn)(hex[*s & 0xf]);
+ if (s == se)
+ break;
+ }
+ }
+ return 0;
+ }
+
+ static int
+#ifdef KR_headers
+wrt_I(n,w,len, base) Uint *n; ftnlen len; register int base;
+#else
+wrt_I(Uint *n, int w, ftnlen len, register int base)
+#endif
+{ int ndigit,sign,spare,i;
+ longint x;
+ char *ans;
+ if(len==sizeof(integer)) x=n->il;
+ else if(len == sizeof(char)) x = n->ic;
+#ifdef Allow_TYQUAD
+ else if (len == sizeof(longint)) x = n->ili;
+#endif
+ else x=n->is;
+ ans=f__icvt(x,&ndigit,&sign, base);
+ spare=w-ndigit;
+ if(sign || f__cplus) spare--;
+ if(spare<0)
+ for(i=0;i<w;i++) (*f__putn)('*');
+ else
+ { for(i=0;i<spare;i++) (*f__putn)(' ');
+ if(sign) (*f__putn)('-');
+ else if(f__cplus) (*f__putn)('+');
+ for(i=0;i<ndigit;i++) (*f__putn)(*ans++);
+ }
+ return(0);
+}
+ static int
+#ifdef KR_headers
+wrt_IM(n,w,m,len,base) Uint *n; ftnlen len; int base;
+#else
+wrt_IM(Uint *n, int w, int m, ftnlen len, int base)
+#endif
+{ int ndigit,sign,spare,i,xsign;
+ longint x;
+ char *ans;
+ if(sizeof(integer)==len) x=n->il;
+ else if(len == sizeof(char)) x = n->ic;
+#ifdef Allow_TYQUAD
+ else if (len == sizeof(longint)) x = n->ili;
+#endif
+ else x=n->is;
+ ans=f__icvt(x,&ndigit,&sign, base);
+ if(sign || f__cplus) xsign=1;
+ else xsign=0;
+ if(ndigit+xsign>w || m+xsign>w)
+ { for(i=0;i<w;i++) (*f__putn)('*');
+ return(0);
+ }
+ if(x==0 && m==0)
+ { for(i=0;i<w;i++) (*f__putn)(' ');
+ return(0);
+ }
+ if(ndigit>=m)
+ spare=w-ndigit-xsign;
+ else
+ spare=w-m-xsign;
+ for(i=0;i<spare;i++) (*f__putn)(' ');
+ if(sign) (*f__putn)('-');
+ else if(f__cplus) (*f__putn)('+');
+ for(i=0;i<m-ndigit;i++) (*f__putn)('0');
+ for(i=0;i<ndigit;i++) (*f__putn)(*ans++);
+ return(0);
+}
+ static int
+#ifdef KR_headers
+wrt_AP(s) char *s;
+#else
+wrt_AP(char *s)
+#endif
+{ char quote;
+ int i;
+
+ if(f__cursor && (i = mv_cur()))
+ return i;
+ quote = *s++;
+ for(;*s;s++)
+ { if(*s!=quote) (*f__putn)(*s);
+ else if(*++s==quote) (*f__putn)(*s);
+ else return(1);
+ }
+ return(1);
+}
+ static int
+#ifdef KR_headers
+wrt_H(a,s) char *s;
+#else
+wrt_H(int a, char *s)
+#endif
+{
+ int i;
+
+ if(f__cursor && (i = mv_cur()))
+ return i;
+ while(a--) (*f__putn)(*s++);
+ return(1);
+}
+
+ int
+#ifdef KR_headers
+wrt_L(n,len, sz) Uint *n; ftnlen sz;
+#else
+wrt_L(Uint *n, int len, ftnlen sz)
+#endif
+{ int i;
+ long x;
+ if(sizeof(long)==sz) x=n->il;
+ else if(sz == sizeof(char)) x = n->ic;
+ else x=n->is;
+ for(i=0;i<len-1;i++)
+ (*f__putn)(' ');
+ if(x) (*f__putn)('T');
+ else (*f__putn)('F');
+ return(0);
+}
+ static int
+#ifdef KR_headers
+wrt_A(p,len) char *p; ftnlen len;
+#else
+wrt_A(char *p, ftnlen len)
+#endif
+{
+ while(len-- > 0) (*f__putn)(*p++);
+ return(0);
+}
+ static int
+#ifdef KR_headers
+wrt_AW(p,w,len) char * p; ftnlen len;
+#else
+wrt_AW(char * p, int w, ftnlen len)
+#endif
+{
+ while(w>len)
+ { w--;
+ (*f__putn)(' ');
+ }
+ while(w-- > 0)
+ (*f__putn)(*p++);
+ return(0);
+}
+
+ static int
+#ifdef KR_headers
+wrt_G(p,w,d,e,len) ufloat *p; ftnlen len;
+#else
+wrt_G(ufloat *p, int w, int d, int e, ftnlen len)
+#endif
+{ double up = 1,x;
+ int i=0,oldscale,n,j;
+ x = len==sizeof(real)?p->pf:p->pd;
+ if(x < 0 ) x = -x;
+ if(x<.1) {
+ if (x != 0.)
+ return(wrt_E(p,w,d,e,len));
+ i = 1;
+ goto have_i;
+ }
+ for(;i<=d;i++,up*=10)
+ { if(x>=up) continue;
+ have_i:
+ oldscale = f__scale;
+ f__scale = 0;
+ if(e==0) n=4;
+ else n=e+2;
+ i=wrt_F(p,w-n,d-i,len);
+ for(j=0;j<n;j++) (*f__putn)(' ');
+ f__scale=oldscale;
+ return(i);
+ }
+ return(wrt_E(p,w,d,e,len));
+}
+
+ int
+#ifdef KR_headers
+w_ed(p,ptr,len) struct syl *p; char *ptr; ftnlen len;
+#else
+w_ed(struct syl *p, char *ptr, ftnlen len)
+#endif
+{
+ int i;
+
+ if(f__cursor && (i = mv_cur()))
+ return i;
+ switch(p->op)
+ {
+ default:
+ fprintf(stderr,"w_ed, unexpected code: %d\n", p->op);
+ sig_die(f__fmtbuf, 1);
+ case I: return(wrt_I((Uint *)ptr,p->p1,len, 10));
+ case IM:
+ return(wrt_IM((Uint *)ptr,p->p1,p->p2.i[0],len,10));
+
+ /* O and OM don't work right for character, double, complex, */
+ /* or doublecomplex, and they differ from Fortran 90 in */
+ /* showing a minus sign for negative values. */
+
+ case O: return(wrt_I((Uint *)ptr, p->p1, len, 8));
+ case OM:
+ return(wrt_IM((Uint *)ptr,p->p1,p->p2.i[0],len,8));
+ case L: return(wrt_L((Uint *)ptr,p->p1, len));
+ case A: return(wrt_A(ptr,len));
+ case AW:
+ return(wrt_AW(ptr,p->p1,len));
+ case D:
+ case E:
+ case EE:
+ return(wrt_E((ufloat *)ptr,p->p1,p->p2.i[0],p->p2.i[1],len));
+ case G:
+ case GE:
+ return(wrt_G((ufloat *)ptr,p->p1,p->p2.i[0],p->p2.i[1],len));
+ case F: return(wrt_F((ufloat *)ptr,p->p1,p->p2.i[0],len));
+
+ /* Z and ZM assume 8-bit bytes. */
+
+ case Z: return(wrt_Z((Uint *)ptr,p->p1,0,len));
+ case ZM:
+ return(wrt_Z((Uint *)ptr,p->p1,p->p2.i[0],len));
+ }
+}
+
+ int
+#ifdef KR_headers
+w_ned(p) struct syl *p;
+#else
+w_ned(struct syl *p)
+#endif
+{
+ switch(p->op)
+ {
+ default: fprintf(stderr,"w_ned, unexpected code: %d\n", p->op);
+ sig_die(f__fmtbuf, 1);
+ case SLASH:
+ return((*f__donewrec)());
+ case T: f__cursor = p->p1-f__recpos - 1;
+ return(1);
+ case TL: f__cursor -= p->p1;
+ if(f__cursor < -f__recpos) /* TL1000, 1X */
+ f__cursor = -f__recpos;
+ return(1);
+ case TR:
+ case X:
+ f__cursor += p->p1;
+ return(1);
+ case APOS:
+ return(wrt_AP(p->p2.s));
+ case H:
+ return(wrt_H(p->p1,p->p2.s));
+ }
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/wsfe.c b/contrib/libs/libf2c/wsfe.c
new file mode 100644
index 0000000000..8709f3b347
--- /dev/null
+++ b/contrib/libs/libf2c/wsfe.c
@@ -0,0 +1,78 @@
+/*write sequential formatted external*/
+#include "f2c.h"
+#include "fio.h"
+#include "fmt.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ int
+x_wSL(Void)
+{
+ int n = f__putbuf('\n');
+ f__hiwater = f__recpos = f__cursor = 0;
+ return(n == 0);
+}
+
+ static int
+xw_end(Void)
+{
+ int n;
+
+ if(f__nonl) {
+ f__putbuf(n = 0);
+ fflush(f__cf);
+ }
+ else
+ n = f__putbuf('\n');
+ f__hiwater = f__recpos = f__cursor = 0;
+ return n;
+}
+
+ static int
+xw_rev(Void)
+{
+ int n = 0;
+ if(f__workdone) {
+ n = f__putbuf('\n');
+ f__workdone = 0;
+ }
+ f__hiwater = f__recpos = f__cursor = 0;
+ return n;
+}
+
+#ifdef KR_headers
+integer s_wsfe(a) cilist *a; /*start*/
+#else
+integer s_wsfe(cilist *a) /*start*/
+#endif
+{ int n;
+ if(!f__init) f_init();
+ f__reading=0;
+ f__sequential=1;
+ f__formatted=1;
+ f__external=1;
+ if(n=c_sfe(a)) return(n);
+ f__elist=a;
+ f__hiwater = f__cursor=f__recpos=0;
+ f__nonl = 0;
+ f__scale=0;
+ f__fmtbuf=a->cifmt;
+ f__cf=f__curunit->ufd;
+ if(pars_f(f__fmtbuf)<0) err(a->cierr,100,"startio");
+ f__putn= x_putc;
+ f__doed= w_ed;
+ f__doned= w_ned;
+ f__doend=xw_end;
+ f__dorevert=xw_rev;
+ f__donewrec=x_wSL;
+ fmt_bg();
+ f__cplus=0;
+ f__cblank=f__curunit->ublnk;
+ if(f__curunit->uwrt != 1 && f__nowwriting(f__curunit))
+ err(a->cierr,errno,"write start");
+ return(0);
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/wsle.c b/contrib/libs/libf2c/wsle.c
new file mode 100644
index 0000000000..3e602702c0
--- /dev/null
+++ b/contrib/libs/libf2c/wsle.c
@@ -0,0 +1,42 @@
+#include "f2c.h"
+#include "fio.h"
+#include "fmt.h"
+#include "lio.h"
+#include "string.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+integer s_wsle(a) cilist *a;
+#else
+integer s_wsle(cilist *a)
+#endif
+{
+ int n;
+ if(n=c_le(a)) return(n);
+ f__reading=0;
+ f__external=1;
+ f__formatted=1;
+ f__putn = x_putc;
+ f__lioproc = l_write;
+ L_len = LINE;
+ f__donewrec = x_wSL;
+ if(f__curunit->uwrt != 1 && f__nowwriting(f__curunit))
+ err(a->cierr, errno, "list output start");
+ return(0);
+ }
+
+integer e_wsle(Void)
+{
+ int n = f__putbuf('\n');
+ f__recpos=0;
+#ifdef ALWAYS_FLUSH
+ if (!n && fflush(f__cf))
+ err(f__elist->cierr, errno, "write end");
+#endif
+ return(n);
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/wsne.c b/contrib/libs/libf2c/wsne.c
new file mode 100644
index 0000000000..e204a51a4f
--- /dev/null
+++ b/contrib/libs/libf2c/wsne.c
@@ -0,0 +1,32 @@
+#include "f2c.h"
+#include "fio.h"
+#include "lio.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ integer
+#ifdef KR_headers
+s_wsne(a) cilist *a;
+#else
+s_wsne(cilist *a)
+#endif
+{
+ int n;
+
+ if(n=c_le(a))
+ return(n);
+ f__reading=0;
+ f__external=1;
+ f__formatted=1;
+ f__putn = x_putc;
+ L_len = LINE;
+ f__donewrec = x_wSL;
+ if(f__curunit->uwrt != 1 && f__nowwriting(f__curunit))
+ err(a->cierr, errno, "namelist output start");
+ x_wsne(a);
+ return e_wsle();
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/xwsne.c b/contrib/libs/libf2c/xwsne.c
new file mode 100644
index 0000000000..f810d3edbf
--- /dev/null
+++ b/contrib/libs/libf2c/xwsne.c
@@ -0,0 +1,77 @@
+#include "f2c.h"
+#include "fio.h"
+#include "lio.h"
+#include "fmt.h"
+
+extern int f__Aquote;
+
+ static VOID
+nl_donewrec(Void)
+{
+ (*f__donewrec)();
+ PUT(' ');
+ }
+
+#ifdef KR_headers
+x_wsne(a) cilist *a;
+#else
+#include "string.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ VOID
+x_wsne(cilist *a)
+#endif
+{
+ Namelist *nl;
+ char *s;
+ Vardesc *v, **vd, **vde;
+ ftnint number, type;
+ ftnlen *dims;
+ ftnlen size;
+ extern ftnlen f__typesize[];
+
+ nl = (Namelist *)a->cifmt;
+ PUT('&');
+ for(s = nl->name; *s; s++)
+ PUT(*s);
+ PUT(' ');
+ f__Aquote = 1;
+ vd = nl->vars;
+ vde = vd + nl->nvars;
+ while(vd < vde) {
+ v = *vd++;
+ s = v->name;
+#ifdef No_Extra_Namelist_Newlines
+ if (f__recpos+strlen(s)+2 >= L_len)
+#endif
+ nl_donewrec();
+ while(*s)
+ PUT(*s++);
+ PUT(' ');
+ PUT('=');
+ number = (dims = v->dims) ? dims[1] : 1;
+ type = v->type;
+ if (type < 0) {
+ size = -type;
+ type = TYCHAR;
+ }
+ else
+ size = f__typesize[type];
+ l_write(&number, v->addr, size, type);
+ if (vd < vde) {
+ if (f__recpos+2 >= L_len)
+ nl_donewrec();
+ PUT(',');
+ PUT(' ');
+ }
+ else if (f__recpos+1 >= L_len)
+ nl_donewrec();
+ }
+ f__Aquote = 0;
+ PUT('/');
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/z_abs.c b/contrib/libs/libf2c/z_abs.c
new file mode 100644
index 0000000000..4d8a015d38
--- /dev/null
+++ b/contrib/libs/libf2c/z_abs.c
@@ -0,0 +1,18 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+double f__cabs();
+double z_abs(z) doublecomplex *z;
+#else
+double f__cabs(double, double);
+double z_abs(doublecomplex *z)
+#endif
+{
+return( f__cabs( z->r, z->i ) );
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/z_cos.c b/contrib/libs/libf2c/z_cos.c
new file mode 100644
index 0000000000..4abe8bf882
--- /dev/null
+++ b/contrib/libs/libf2c/z_cos.c
@@ -0,0 +1,21 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double sin(), cos(), sinh(), cosh();
+VOID z_cos(r, z) doublecomplex *r, *z;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+void z_cos(doublecomplex *r, doublecomplex *z)
+#endif
+{
+ double zi = z->i, zr = z->r;
+ r->r = cos(zr) * cosh(zi);
+ r->i = - sin(zr) * sinh(zi);
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/z_div.c b/contrib/libs/libf2c/z_div.c
new file mode 100644
index 0000000000..e45f360804
--- /dev/null
+++ b/contrib/libs/libf2c/z_div.c
@@ -0,0 +1,50 @@
+#include "f2c.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef KR_headers
+extern VOID sig_die();
+VOID z_div(c, a, b) doublecomplex *a, *b, *c;
+#else
+extern void sig_die(const char*, int);
+void z_div(doublecomplex *c, doublecomplex *a, doublecomplex *b)
+#endif
+{
+ double ratio, den;
+ double abr, abi, cr;
+
+ if( (abr = b->r) < 0.)
+ abr = - abr;
+ if( (abi = b->i) < 0.)
+ abi = - abi;
+ if( abr <= abi )
+ {
+ if(abi == 0) {
+#ifdef IEEE_COMPLEX_DIVIDE
+ if (a->i != 0 || a->r != 0)
+ abi = 1.;
+ c->i = c->r = abi / abr;
+ return;
+#else
+ sig_die("complex division by zero", 1);
+#endif
+ }
+ ratio = b->r / b->i ;
+ den = b->i * (1 + ratio*ratio);
+ cr = (a->r*ratio + a->i) / den;
+ c->i = (a->i*ratio - a->r) / den;
+ }
+
+ else
+ {
+ ratio = b->i / b->r ;
+ den = b->r * (1 + ratio*ratio);
+ cr = (a->r + a->i*ratio) / den;
+ c->i = (a->i - a->r*ratio) / den;
+ }
+ c->r = cr;
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/z_exp.c b/contrib/libs/libf2c/z_exp.c
new file mode 100644
index 0000000000..7b8edfece0
--- /dev/null
+++ b/contrib/libs/libf2c/z_exp.c
@@ -0,0 +1,23 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double exp(), cos(), sin();
+VOID z_exp(r, z) doublecomplex *r, *z;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+void z_exp(doublecomplex *r, doublecomplex *z)
+#endif
+{
+ double expx, zi = z->i;
+
+ expx = exp(z->r);
+ r->r = expx * cos(zi);
+ r->i = expx * sin(zi);
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/z_log.c b/contrib/libs/libf2c/z_log.c
new file mode 100644
index 0000000000..4f11bbe0c7
--- /dev/null
+++ b/contrib/libs/libf2c/z_log.c
@@ -0,0 +1,121 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double log(), f__cabs(), atan2();
+#define ANSI(x) ()
+#else
+#define ANSI(x) x
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern double f__cabs(double, double);
+#endif
+
+#ifndef NO_DOUBLE_EXTENDED
+#ifndef GCC_COMPARE_BUG_FIXED
+#ifndef Pre20000310
+#ifdef Comment
+Some versions of gcc, such as 2.95.3 and 3.0.4, are buggy under -O2 or -O3:
+on IA32 (Intel 80x87) systems, they may do comparisons on values computed
+in extended-precision registers. This can lead to the test "s > s0" that
+was used below being carried out incorrectly. The fix below cannot be
+spoiled by overzealous optimization, since the compiler cannot know
+whether gcc_bug_bypass_diff_F2C will be nonzero. (We expect it always
+to be zero. The weird name is unlikely to collide with anything.)
+
+An example (provided by Ulrich Jakobus) where the bug fix matters is
+
+ double complex a, b
+ a = (.1099557428756427618354862829619, .9857360542953131909982289471372)
+ b = log(a)
+
+An alternative to the fix below would be to use 53-bit rounding precision,
+but the means of specifying this 80x87 feature are highly unportable.
+#endif /*Comment*/
+#define BYPASS_GCC_COMPARE_BUG
+double (*gcc_bug_bypass_diff_F2C) ANSI((double*,double*));
+ static double
+#ifdef KR_headers
+diff1(a,b) double *a, *b;
+#else
+diff1(double *a, double *b)
+#endif
+{ return *a - *b; }
+#endif /*Pre20000310*/
+#endif /*GCC_COMPARE_BUG_FIXED*/
+#endif /*NO_DOUBLE_EXTENDED*/
+
+#ifdef KR_headers
+VOID z_log(r, z) doublecomplex *r, *z;
+#else
+void z_log(doublecomplex *r, doublecomplex *z)
+#endif
+{
+ double s, s0, t, t2, u, v;
+ double zi = z->i, zr = z->r;
+#ifdef BYPASS_GCC_COMPARE_BUG
+ double (*diff) ANSI((double*,double*));
+#endif
+
+ r->i = atan2(zi, zr);
+#ifdef Pre20000310
+ r->r = log( f__cabs( zr, zi ) );
+#else
+ if (zi < 0)
+ zi = -zi;
+ if (zr < 0)
+ zr = -zr;
+ if (zr < zi) {
+ t = zi;
+ zi = zr;
+ zr = t;
+ }
+ t = zi/zr;
+ s = zr * sqrt(1 + t*t);
+ /* now s = f__cabs(zi,zr), and zr = |zr| >= |zi| = zi */
+ if ((t = s - 1) < 0)
+ t = -t;
+ if (t > .01)
+ r->r = log(s);
+ else {
+
+#ifdef Comment
+
+ log(1+x) = x - x^2/2 + x^3/3 - x^4/4 + - ...
+
+ = x(1 - x/2 + x^2/3 -+...)
+
+ [sqrt(y^2 + z^2) - 1] * [sqrt(y^2 + z^2) + 1] = y^2 + z^2 - 1, so
+
+ sqrt(y^2 + z^2) - 1 = (y^2 + z^2 - 1) / [sqrt(y^2 + z^2) + 1]
+
+#endif /*Comment*/
+
+#ifdef BYPASS_GCC_COMPARE_BUG
+ if (!(diff = gcc_bug_bypass_diff_F2C))
+ diff = diff1;
+#endif
+ t = ((zr*zr - 1.) + zi*zi) / (s + 1);
+ t2 = t*t;
+ s = 1. - 0.5*t;
+ u = v = 1;
+ do {
+ s0 = s;
+ u *= t2;
+ v += 2;
+ s += u/v - t*u/(v+1);
+ }
+#ifdef BYPASS_GCC_COMPARE_BUG
+ while(s - s0 > 1e-18 || (*diff)(&s,&s0) > 0.);
+#else
+ while(s > s0);
+#endif
+ r->r = s*t;
+ }
+#endif
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/z_sin.c b/contrib/libs/libf2c/z_sin.c
new file mode 100644
index 0000000000..01225a9448
--- /dev/null
+++ b/contrib/libs/libf2c/z_sin.c
@@ -0,0 +1,21 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double sin(), cos(), sinh(), cosh();
+VOID z_sin(r, z) doublecomplex *r, *z;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+void z_sin(doublecomplex *r, doublecomplex *z)
+#endif
+{
+ double zi = z->i, zr = z->r;
+ r->r = sin(zr) * cosh(zi);
+ r->i = cos(zr) * sinh(zi);
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/libf2c/z_sqrt.c b/contrib/libs/libf2c/z_sqrt.c
new file mode 100644
index 0000000000..35bd44c8e0
--- /dev/null
+++ b/contrib/libs/libf2c/z_sqrt.c
@@ -0,0 +1,35 @@
+#include "f2c.h"
+
+#ifdef KR_headers
+double sqrt(), f__cabs();
+VOID z_sqrt(r, z) doublecomplex *r, *z;
+#else
+#undef abs
+#include "math.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern double f__cabs(double, double);
+void z_sqrt(doublecomplex *r, doublecomplex *z)
+#endif
+{
+ double mag, zi = z->i, zr = z->r;
+
+ if( (mag = f__cabs(zr, zi)) == 0.)
+ r->r = r->i = 0.;
+ else if(zr > 0)
+ {
+ r->r = sqrt(0.5 * (mag + zr) );
+ r->i = zi / r->r / 2;
+ }
+ else
+ {
+ r->i = sqrt(0.5 * (mag - zr) );
+ if(zi < 0)
+ r->i = - r->i;
+ r->r = zi / r->i / 2;
+ }
+ }
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/libs/python/Include/datetime.h b/contrib/libs/python/Include/datetime.h
new file mode 100644
index 0000000000..b24acd5638
--- /dev/null
+++ b/contrib/libs/python/Include/datetime.h
@@ -0,0 +1,7 @@
+#pragma once
+
+#ifdef USE_PYTHON3
+#include <contrib/tools/python3/src/Include/datetime.h>
+#else
+#include <contrib/tools/python/src/Include/datetime.h>
+#endif
diff --git a/ydb/public/lib/ydb_cli/commands/ydb_service_scheme.cpp b/ydb/public/lib/ydb_cli/commands/ydb_service_scheme.cpp
index a452eb0705..2294be5944 100644
--- a/ydb/public/lib/ydb_cli/commands/ydb_service_scheme.cpp
+++ b/ydb/public/lib/ydb_cli/commands/ydb_service_scheme.cpp
@@ -210,6 +210,7 @@ int TCommandDescribe::PrintTopicResponsePretty(const NYdb::NTopic::TTopicDescrip
}
Cout << Endl << "PartitionsCount: " << description.GetTotalPartitionsCount();
Cout << Endl << "PartitionWriteSpeed: " << description.GetPartitionWriteSpeedBytesPerSecond() / 1_KB << " KB";
+ Cout << Endl << "MeteringMode: " << (TStringBuilder() << description.GetMeteringMode());
if (!description.GetSupportedCodecs().empty()) {
Cout << Endl << "SupportedCodecs: " << FormatCodecs(description.GetSupportedCodecs()) << Endl;
}
diff --git a/ydb/public/lib/ydb_cli/commands/ydb_service_topic.cpp b/ydb/public/lib/ydb_cli/commands/ydb_service_topic.cpp
index 4a48db4764..e8cae0e412 100644
--- a/ydb/public/lib/ydb_cli/commands/ydb_service_topic.cpp
+++ b/ydb/public/lib/ydb_cli/commands/ydb_service_topic.cpp
@@ -28,6 +28,16 @@ namespace NYdb::NConsoleClient {
std::pair<TString, NYdb::NTopic::ECodec>("zstd", NYdb::NTopic::ECodec::ZSTD),
};
+ THashMap<TString, NTopic::EMeteringMode> ExistingMeteringModes = {
+ std::pair<TString, NTopic::EMeteringMode>("request-units", NTopic::EMeteringMode::RequestUnits),
+ std::pair<TString, NTopic::EMeteringMode>("reserved-capacity", NTopic::EMeteringMode::ReservedCapacity),
+ };
+
+ THashMap<NTopic::EMeteringMode, TString> MeteringModesDescriptions = {
+ std::pair<NTopic::EMeteringMode, TString>(NTopic::EMeteringMode::ReservedCapacity, "Throughput and storage limits on hourly basis, write operations"),
+ std::pair<NTopic::EMeteringMode, TString>(NTopic::EMeteringMode::RequestUnits, "Read/write operations valued in request units, storage usage on hourly basis"),
+ };
+
// TODO(shmel1k@): improve docs
THashMap<ETopicMetadataField, TString> TopicMetadataFieldsDescriptions = {
{ETopicMetadataField::Body, "Message data"},
@@ -109,6 +119,44 @@ namespace NYdb::NConsoleClient {
return SupportedCodecs_;
}
+ void TCommandWithMeteringMode::AddAllowedMeteringModes(TClientCommand::TConfig& config) {
+ TStringStream description;
+ description << "Topic metering for serverless databases pricing. Available metering modes: ";
+ NColorizer::TColors colors = NColorizer::AutoColors(Cout);
+ for (const auto& mode: ExistingMeteringModes) {
+ auto findResult = MeteringModesDescriptions.find(mode.second);
+ Y_VERIFY(findResult != MeteringModesDescriptions.end(),
+ "Couldn't find description for %s metering mode", (TStringBuilder() << mode.second).c_str());
+ description << "\n " << colors.BoldColor() << mode.first << colors.OldColor()
+ << "\n " << findResult->second;
+ }
+ config.Opts->AddLongOption("metering-mode", description.Str())
+ .Optional()
+ .StoreResult(&MeteringModeStr_);
+ }
+
+ void TCommandWithMeteringMode::ParseMeteringMode() {
+ if (MeteringModeStr_.empty()) {
+ return;
+ }
+
+ TString toLowerMeteringMode = to_lower(MeteringModeStr_);
+ if (toLowerMeteringMode == "reserved-capacity") {
+ MeteringMode_ = NTopic::EMeteringMode::ReservedCapacity;
+ return;
+ }
+ if (toLowerMeteringMode == "request-units") {
+ MeteringMode_ = NTopic::EMeteringMode::RequestUnits;
+ return;
+ }
+
+ throw TMisuseException() << "Metering mode " << MeteringModeStr_ << " is not available for this command";
+ }
+
+ NTopic::EMeteringMode TCommandWithMeteringMode::GetMeteringMode() const {
+ return MeteringMode_;
+ }
+
TCommandTopic::TCommandTopic()
: TClientCommandTree("topic", {}, "TopicService operations") {
AddCommand(std::make_unique<TCommandTopicCreate>());
@@ -138,12 +186,14 @@ namespace NYdb::NConsoleClient {
config.Opts->SetFreeArgsNum(1);
SetFreeArgTitle(0, "<topic-path>", "New topic path");
AddAllowedCodecs(config, AllowedCodecs);
+ AddAllowedMeteringModes(config);
}
void TCommandTopicCreate::Parse(TConfig& config) {
TYdbCommand::Parse(config);
ParseTopicName(config, 0);
ParseCodecs();
+ ParseMeteringMode();
}
int TCommandTopicCreate::Run(TConfig& config) {
@@ -160,6 +210,11 @@ namespace NYdb::NConsoleClient {
} else {
settings.SetSupportedCodecs(AllowedCodecs);
}
+
+ if (GetMeteringMode() != NTopic::EMeteringMode::Unspecified) {
+ settings.MeteringMode(GetMeteringMode());
+ }
+
settings.RetentionPeriod(TDuration::Hours(RetentionPeriodHours_));
auto status = persQueueClient.CreateTopic(TopicName, settings).GetValueSync();
@@ -184,12 +239,14 @@ namespace NYdb::NConsoleClient {
config.Opts->SetFreeArgsNum(1);
SetFreeArgTitle(0, "<topic-path>", "Topic to alter");
AddAllowedCodecs(config, AllowedCodecs);
+ AddAllowedMeteringModes(config);
}
void TCommandTopicAlter::Parse(TConfig& config) {
TYdbCommand::Parse(config);
ParseTopicName(config, 0);
ParseCodecs();
+ ParseMeteringMode();
}
NYdb::NTopic::TAlterTopicSettings TCommandTopicAlter::PrepareAlterSettings(
@@ -214,11 +271,16 @@ namespace NYdb::NConsoleClient {
settings.SetPartitionWriteBurstBytes(*PartitionWriteSpeedKbps_ * 1_KB);
}
+ if (GetMeteringMode() != NTopic::EMeteringMode::Unspecified && GetMeteringMode() != describeResult.GetTopicDescription().GetMeteringMode()) {
+ settings.SetMeteringMode(GetMeteringMode());
+ }
+
return settings;
}
int TCommandTopicAlter::Run(TConfig& config) {
- if (!PartitionsCount_.Defined() && GetCodecs().empty() && !RetentionPeriodHours_.Defined() && !PartitionWriteSpeedKbps_.Defined()) {
+ if (!PartitionsCount_.Defined() && GetCodecs().empty() && !RetentionPeriodHours_.Defined() && !PartitionWriteSpeedKbps_.Defined() &&
+ GetMeteringMode() != NTopic::EMeteringMode::Unspecified) {
return EXIT_SUCCESS;
}
diff --git a/ydb/public/lib/ydb_cli/commands/ydb_service_topic.h b/ydb/public/lib/ydb_cli/commands/ydb_service_topic.h
index 18eb162e75..87475f5901 100644
--- a/ydb/public/lib/ydb_cli/commands/ydb_service_topic.h
+++ b/ydb/public/lib/ydb_cli/commands/ydb_service_topic.h
@@ -25,12 +25,23 @@ namespace NYdb::NConsoleClient {
TVector<NTopic::ECodec> SupportedCodecs_;
};
+ class TCommandWithMeteringMode {
+ protected:
+ void AddAllowedMeteringModes(TClientCommand::TConfig& config);
+ void ParseMeteringMode();
+ NTopic::EMeteringMode GetMeteringMode() const;
+
+ private:
+ TString MeteringModeStr_;
+ NTopic::EMeteringMode MeteringMode_ = NTopic::EMeteringMode::Unspecified;
+ };
+
class TCommandTopic: public TClientCommandTree {
public:
TCommandTopic();
};
- class TCommandTopicCreate: public TYdbCommand, public TCommandWithTopicName, public TCommandWithSupportedCodecs {
+ class TCommandTopicCreate: public TYdbCommand, public TCommandWithTopicName, public TCommandWithSupportedCodecs, public TCommandWithMeteringMode {
public:
TCommandTopicCreate();
void Config(TConfig& config) override;
@@ -43,7 +54,7 @@ namespace NYdb::NConsoleClient {
ui32 PartitionWriteSpeedKbps_;
};
- class TCommandTopicAlter: public TYdbCommand, public TCommandWithTopicName, public TCommandWithSupportedCodecs {
+ class TCommandTopicAlter: public TYdbCommand, public TCommandWithTopicName, public TCommandWithSupportedCodecs, public TCommandWithMeteringMode {
public:
TCommandTopicAlter();
void Config(TConfig& config) override;
@@ -54,6 +65,7 @@ namespace NYdb::NConsoleClient {
TMaybe<ui64> RetentionPeriodHours_;
TMaybe<ui32> PartitionsCount_;
TMaybe<ui32> PartitionWriteSpeedKbps_;
+
NYdb::NTopic::TAlterTopicSettings PrepareAlterSettings(NYdb::NTopic::TDescribeTopicResult& describeResult);
};